From 140164e8ea74a784c9257049c600636b77b8732e Mon Sep 17 00:00:00 2001 From: leonidk Date: Wed, 5 Sep 2007 08:28:39 +0000 Subject: [PATCH] branch for adding NDI support git-svn-id: svn://openib.tc.cornell.edu/gen1@778 ad392aa1-c5ef-ae45-8dd8-e69d62a5ef86 --- branches/Ndi/core/al/al.c | 432 + branches/Ndi/core/al/al.h | 293 + branches/Ndi/core/al/al_av.c | 314 + branches/Ndi/core/al/al_av.h | 90 + branches/Ndi/core/al/al_ca.c | 424 + branches/Ndi/core/al/al_ca.h | 82 + branches/Ndi/core/al/al_ci_ca.h | 210 + branches/Ndi/core/al/al_ci_ca_shared.c | 595 + branches/Ndi/core/al/al_cm_cep.h | 437 + branches/Ndi/core/al/al_cm_conn.h | 1308 ++ branches/Ndi/core/al/al_cm_qp.c | 2058 +++ branches/Ndi/core/al/al_cm_sidr.h | 173 + branches/Ndi/core/al/al_common.c | 688 + branches/Ndi/core/al/al_common.h | 336 + branches/Ndi/core/al/al_cq.c | 489 + branches/Ndi/core/al/al_cq.h | 142 + branches/Ndi/core/al/al_debug.h | 251 + branches/Ndi/core/al/al_dev.h | 487 + branches/Ndi/core/al/al_dm.c | 1799 ++ branches/Ndi/core/al/al_dm.h | 124 + branches/Ndi/core/al/al_init.c | 173 + branches/Ndi/core/al/al_init.h | 92 + branches/Ndi/core/al/al_ioc_pnp.h | 53 + branches/Ndi/core/al/al_mad.c | 3276 ++++ branches/Ndi/core/al/al_mad.h | 236 + branches/Ndi/core/al/al_mad_pool.h | 281 + branches/Ndi/core/al/al_mcast.c | 690 + branches/Ndi/core/al/al_mcast.h | 114 + branches/Ndi/core/al/al_mgr.h | 122 + branches/Ndi/core/al/al_mgr_shared.c | 675 + branches/Ndi/core/al/al_mr.h | 181 + branches/Ndi/core/al/al_mr_shared.c | 634 + branches/Ndi/core/al/al_mw.c | 259 + branches/Ndi/core/al/al_mw.h | 74 + branches/Ndi/core/al/al_pd.c | 492 + branches/Ndi/core/al/al_pd.h | 80 + branches/Ndi/core/al/al_pnp.h | 234 + branches/Ndi/core/al/al_proxy.h | 258 + branches/Ndi/core/al/al_proxy_ioctl.h | 350 + branches/Ndi/core/al/al_qp.c | 2114 +++ branches/Ndi/core/al/al_qp.h | 312 + branches/Ndi/core/al/al_query.c | 375 + branches/Ndi/core/al/al_query.h | 152 + branches/Ndi/core/al/al_reg_svc.c | 358 + branches/Ndi/core/al/al_reg_svc.h | 65 + branches/Ndi/core/al/al_res_mgr.c | 299 + branches/Ndi/core/al/al_res_mgr.h | 97 + branches/Ndi/core/al/al_srq.c | 438 + branches/Ndi/core/al/al_srq.h | 108 + branches/Ndi/core/al/al_sub.c | 92 + branches/Ndi/core/al/al_sub.h | 36 + branches/Ndi/core/al/al_verbs.h | 636 + branches/Ndi/core/al/dirs | 3 + branches/Ndi/core/al/ib_common.c | 133 + branches/Ndi/core/al/ib_common.h | 50 + branches/Ndi/core/al/ib_statustext.c | 260 + branches/Ndi/core/al/kernel/SOURCES | 85 + branches/Ndi/core/al/kernel/al_ca_pnp.c | 34 + branches/Ndi/core/al/kernel/al_ca_pnp.h | 36 + branches/Ndi/core/al/kernel/al_ci_ca.c | 520 + branches/Ndi/core/al/kernel/al_cm_cep.c | 6082 +++++++ branches/Ndi/core/al/kernel/al_dev.c | 566 + branches/Ndi/core/al/kernel/al_driver.h | 209 + branches/Ndi/core/al/kernel/al_exports.def | 7 + branches/Ndi/core/al/kernel/al_fmr_pool.c | 749 + branches/Ndi/core/al/kernel/al_fmr_pool.h | 116 + branches/Ndi/core/al/kernel/al_ioc_pnp.c | 3320 ++++ branches/Ndi/core/al/kernel/al_mad_pool.c | 961 ++ branches/Ndi/core/al/kernel/al_mgr.c | 622 + branches/Ndi/core/al/kernel/al_mr.c | 614 + branches/Ndi/core/al/kernel/al_pnp.c | 1735 ++ branches/Ndi/core/al/kernel/al_proxy.c | 1288 ++ branches/Ndi/core/al/kernel/al_proxy_cep.c | 951 ++ branches/Ndi/core/al/kernel/al_proxy_cm.c | 1459 ++ branches/Ndi/core/al/kernel/al_proxy_ioc.c | 58 + branches/Ndi/core/al/kernel/al_proxy_subnet.c | 1158 ++ branches/Ndi/core/al/kernel/al_proxy_verbs.c | 3947 +++++ branches/Ndi/core/al/kernel/al_sa_req.c | 811 + branches/Ndi/core/al/kernel/al_smi.c | 3682 ++++ branches/Ndi/core/al/kernel/al_smi.h | 246 + branches/Ndi/core/al/kernel/ibal.rc | 47 + branches/Ndi/core/al/kernel/makefile | 7 + branches/Ndi/core/al/user/SOURCES | 97 + branches/Ndi/core/al/user/al_dll.c | 205 + branches/Ndi/core/al/user/al_exports.src | 219 + branches/Ndi/core/al/user/al_mad_pool.c | 1507 ++ branches/Ndi/core/al/user/ibal.rc | 48 + branches/Ndi/core/al/user/makefile | 7 + branches/Ndi/core/al/user/ual_av.c | 384 + branches/Ndi/core/al/user/ual_ca.c | 533 + branches/Ndi/core/al/user/ual_ca.h | 59 + branches/Ndi/core/al/user/ual_ci_ca.c | 338 + branches/Ndi/core/al/user/ual_ci_ca.h | 286 + branches/Ndi/core/al/user/ual_cm_cep.c | 1426 ++ branches/Ndi/core/al/user/ual_cq.c | 562 + branches/Ndi/core/al/user/ual_dm.c | 35 + branches/Ndi/core/al/user/ual_dm.h | 36 + branches/Ndi/core/al/user/ual_mad.c | 530 + branches/Ndi/core/al/user/ual_mad.h | 96 + branches/Ndi/core/al/user/ual_mad_pool.c | 139 + branches/Ndi/core/al/user/ual_mcast.c | 173 + branches/Ndi/core/al/user/ual_mcast.h | 44 + branches/Ndi/core/al/user/ual_mgr.c | 1293 ++ branches/Ndi/core/al/user/ual_mgr.h | 147 + branches/Ndi/core/al/user/ual_mr.c | 353 + branches/Ndi/core/al/user/ual_mr.h | 36 + branches/Ndi/core/al/user/ual_mw.c | 301 + branches/Ndi/core/al/user/ual_mw.h | 36 + branches/Ndi/core/al/user/ual_pd.c | 169 + branches/Ndi/core/al/user/ual_pnp.c | 568 + branches/Ndi/core/al/user/ual_qp.c | 662 + branches/Ndi/core/al/user/ual_qp.h | 46 + branches/Ndi/core/al/user/ual_query.c | 34 + branches/Ndi/core/al/user/ual_query.h | 36 + branches/Ndi/core/al/user/ual_reg_svc.c | 34 + branches/Ndi/core/al/user/ual_res_mgr.h | 70 + branches/Ndi/core/al/user/ual_sa_req.c | 308 + branches/Ndi/core/al/user/ual_srq.c | 438 + branches/Ndi/core/al/user/ual_sub.c | 34 + branches/Ndi/core/al/user/ual_support.h | 131 + branches/Ndi/core/bus/dirs | 2 + branches/Ndi/core/bus/kernel/SOURCES | 44 + branches/Ndi/core/bus/kernel/bus_driver.c | 517 + branches/Ndi/core/bus/kernel/bus_driver.h | 207 + branches/Ndi/core/bus/kernel/bus_iou_mgr.c | 1501 ++ branches/Ndi/core/bus/kernel/bus_iou_mgr.h | 66 + branches/Ndi/core/bus/kernel/bus_pnp.c | 1094 ++ branches/Ndi/core/bus/kernel/bus_pnp.h | 89 + branches/Ndi/core/bus/kernel/bus_port_mgr.c | 1494 ++ branches/Ndi/core/bus/kernel/bus_port_mgr.h | 66 + branches/Ndi/core/bus/kernel/ib_bus.inf | 207 + branches/Ndi/core/bus/kernel/ibbus.rc | 47 + branches/Ndi/core/bus/kernel/makefile | 7 + branches/Ndi/core/complib/cl_async_proc.c | 160 + branches/Ndi/core/complib/cl_list.c | 650 + branches/Ndi/core/complib/cl_map.c | 2218 +++ branches/Ndi/core/complib/cl_memory.c | 353 + branches/Ndi/core/complib/cl_memtrack.h | 86 + branches/Ndi/core/complib/cl_obj.c | 781 + branches/Ndi/core/complib/cl_perf.c | 269 + branches/Ndi/core/complib/cl_pool.c | 706 + branches/Ndi/core/complib/cl_ptr_vector.c | 357 + branches/Ndi/core/complib/cl_reqmgr.c | 288 + branches/Ndi/core/complib/cl_statustext.c | 71 + branches/Ndi/core/complib/cl_threadpool.c | 237 + branches/Ndi/core/complib/cl_vector.c | 617 + branches/Ndi/core/complib/dirs | 3 + branches/Ndi/core/complib/kernel/SOURCES | 32 + branches/Ndi/core/complib/kernel/cl_bus_ifc.c | 86 + branches/Ndi/core/complib/kernel/cl_driver.c | 145 + branches/Ndi/core/complib/kernel/cl_event.c | 74 + .../Ndi/core/complib/kernel/cl_exports.def | 7 + branches/Ndi/core/complib/kernel/cl_log.c | 103 + .../Ndi/core/complib/kernel/cl_memory_osd.c | 60 + branches/Ndi/core/complib/kernel/cl_pnp_po.c | 1417 ++ .../Ndi/core/complib/kernel/cl_syscallback.c | 88 + branches/Ndi/core/complib/kernel/cl_thread.c | 138 + branches/Ndi/core/complib/kernel/cl_timer.c | 160 + branches/Ndi/core/complib/kernel/makefile | 7 + branches/Ndi/core/complib/user/SOURCES | 43 + branches/Ndi/core/complib/user/cl_debug.c | 61 + branches/Ndi/core/complib/user/cl_dll.c | 89 + branches/Ndi/core/complib/user/cl_event.c | 64 + branches/Ndi/core/complib/user/cl_log.c | 78 + .../Ndi/core/complib/user/cl_memory_osd.c | 54 + .../Ndi/core/complib/user/cl_syscallback.c | 122 + branches/Ndi/core/complib/user/cl_thread.c | 127 + branches/Ndi/core/complib/user/cl_timer.c | 195 + branches/Ndi/core/complib/user/complib.rc | 48 + branches/Ndi/core/complib/user/complib.src | 293 + branches/Ndi/core/complib/user/makefile | 7 + branches/Ndi/core/dirs | 5 + branches/Ndi/core/iou/dirs | 2 + branches/Ndi/core/iou/kernel/SOURCES | 44 + branches/Ndi/core/iou/kernel/ibiou.rc | 47 + branches/Ndi/core/iou/kernel/iou_driver.c | 266 + branches/Ndi/core/iou/kernel/iou_driver.h | 258 + branches/Ndi/core/iou/kernel/iou_ioc_mgr.c | 1389 ++ branches/Ndi/core/iou/kernel/iou_ioc_mgr.h | 77 + branches/Ndi/core/iou/kernel/iou_pnp.c | 623 + branches/Ndi/core/iou/kernel/iou_pnp.h | 80 + branches/Ndi/core/iou/kernel/makefile | 7 + branches/Ndi/dirs | 6 + branches/Ndi/docs/Manual.htm | 1608 ++ .../Ndi/docs/complib/cl_async_proc_h.html | 309 + branches/Ndi/docs/complib/cl_atomic_h.html | 272 + branches/Ndi/docs/complib/cl_byteswap_h.html | 500 + branches/Ndi/docs/complib/cl_comppool_h.html | 604 + branches/Ndi/docs/complib/cl_debug_h.html | 534 + branches/Ndi/docs/complib/cl_event_h.html | 274 + branches/Ndi/docs/complib/cl_fleximap_h.html | 948 ++ branches/Ndi/docs/complib/cl_ioctl_h.html | 609 + branches/Ndi/docs/complib/cl_irqlock_h.html | 221 + branches/Ndi/docs/complib/cl_list_h.html | 1412 ++ branches/Ndi/docs/complib/cl_log_h.html | 117 + branches/Ndi/docs/complib/cl_map_h.html | 898 + branches/Ndi/docs/complib/cl_math_h.html | 103 + branches/Ndi/docs/complib/cl_memory_h.html | 629 + branches/Ndi/docs/complib/cl_mutex_h.html | 207 + branches/Ndi/docs/complib/cl_obj_h.html | 997 ++ .../Ndi/docs/complib/cl_passivelock_h.html | 417 + branches/Ndi/docs/complib/cl_perf_h.html | 583 + branches/Ndi/docs/complib/cl_pool_h.html | 581 + .../Ndi/docs/complib/cl_ptr_vector_h.html | 890 + branches/Ndi/docs/complib/cl_qcomppool_h.html | 740 + branches/Ndi/docs/complib/cl_qlist_h.html | 1728 ++ branches/Ndi/docs/complib/cl_qlockpool_h.html | 340 + branches/Ndi/docs/complib/cl_qmap_h.html | 998 ++ branches/Ndi/docs/complib/cl_qpool_h.html | 628 + branches/Ndi/docs/complib/cl_rbmap_h.html | 563 + branches/Ndi/docs/complib/cl_reqmgr_h.html | 463 + branches/Ndi/docs/complib/cl_spinlock_h.html | 210 + .../Ndi/docs/complib/cl_syscallback_h.html | 243 + branches/Ndi/docs/complib/cl_thread_h.html | 164 + .../Ndi/docs/complib/cl_threadpool_h.html | 273 + branches/Ndi/docs/complib/cl_timer_h.html | 432 + branches/Ndi/docs/complib/cl_types_h.html | 410 + branches/Ndi/docs/complib/cl_vector_h.html | 984 ++ branches/Ndi/docs/complib/cl_waitobj_h.html | 356 + branches/Ndi/docs/complib/comp_lib_h.html | 50 + branches/Ndi/docs/iba/ib_al_h.html | 10482 ++++++++++++ branches/Ndi/docs/iba/ib_types_h.html | 10744 ++++++++++++ branches/Ndi/docs/masterindex.html | 2741 +++ branches/Ndi/docs/openfabrics.gif | Bin 0 -> 3660 bytes branches/Ndi/docs/robo_definitions.html | 655 + branches/Ndi/docs/robo_functions.html | 1535 ++ branches/Ndi/docs/robo_modules.html | 116 + branches/Ndi/docs/robo_sourcefiles.html | 149 + branches/Ndi/docs/robo_strutures.html | 381 + branches/Ndi/docs/robodoc.css | 36 + branches/Ndi/etc/makebin.bat | 231 + branches/Ndi/etc/wpp/ALTraceRt.cmd | 14 + branches/Ndi/etc/wpp/CreateTrace.cmd | 12 + branches/Ndi/etc/wpp/IPoIBTraceRt.cmd | 13 + branches/Ndi/etc/wpp/MTHCATraceRt.cmd | 14 + branches/Ndi/etc/wpp/SDPTraceRt.cmd | 10 + branches/Ndi/etc/wpp/StartSdpTrace.cmd | 7 + branches/Ndi/etc/wpp/StartTrace.cmd | 22 + branches/Ndi/etc/wpp/StopSdpTrace.cmd | 10 + branches/Ndi/etc/wpp/StopTrace.cmd | 8 + branches/Ndi/hw/dirs | 3 + branches/Ndi/hw/mt23108/dirs | 4 + branches/Ndi/hw/mt23108/kernel/Makefile | 7 + branches/Ndi/hw/mt23108/kernel/SOURCES | 58 + branches/Ndi/hw/mt23108/kernel/hca.rc | 47 + branches/Ndi/hw/mt23108/kernel/hca_data.c | 2200 +++ branches/Ndi/hw/mt23108/kernel/hca_data.h | 608 + branches/Ndi/hw/mt23108/kernel/hca_debug.h | 67 + branches/Ndi/hw/mt23108/kernel/hca_direct.c | 598 + branches/Ndi/hw/mt23108/kernel/hca_driver.c | 1897 +++ branches/Ndi/hw/mt23108/kernel/hca_driver.h | 162 + branches/Ndi/hw/mt23108/kernel/hca_mcast.c | 204 + branches/Ndi/hw/mt23108/kernel/hca_memory.c | 979 ++ branches/Ndi/hw/mt23108/kernel/hca_smp.c | 581 + branches/Ndi/hw/mt23108/kernel/hca_verbs.c | 2348 +++ branches/Ndi/hw/mt23108/kernel/infinihost.inf | 191 + branches/Ndi/hw/mt23108/user/Makefile | 7 + branches/Ndi/hw/mt23108/user/SOURCES | 63 + branches/Ndi/hw/mt23108/user/hca_data.h | 98 + branches/Ndi/hw/mt23108/user/mlnx_ual_av.c | 395 + branches/Ndi/hw/mt23108/user/mlnx_ual_ca.c | 282 + branches/Ndi/hw/mt23108/user/mlnx_ual_cq.c | 494 + branches/Ndi/hw/mt23108/user/mlnx_ual_main.c | 190 + branches/Ndi/hw/mt23108/user/mlnx_ual_main.h | 509 + branches/Ndi/hw/mt23108/user/mlnx_ual_mcast.c | 121 + branches/Ndi/hw/mt23108/user/mlnx_ual_mrw.c | 429 + .../Ndi/hw/mt23108/user/mlnx_ual_osbypass.c | 333 + branches/Ndi/hw/mt23108/user/mlnx_ual_pd.c | 331 + branches/Ndi/hw/mt23108/user/mlnx_ual_qp.c | 531 + branches/Ndi/hw/mt23108/user/uvpd.rc | 48 + branches/Ndi/hw/mt23108/user/uvpd_exports.src | 10 + branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.c | 265 + branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.h | 867 + .../hw/mt23108/vapi/Hca/hcahal/hh_common.c | 128 + .../hw/mt23108/vapi/Hca/hcahal/hh_common.h | 158 + .../Ndi/hw/mt23108/vapi/Hca/hcahal/hh_init.h | 52 + .../hw/mt23108/vapi/Hca/hcahal/hh_rx_stub.c | 390 + .../mt23108/vapi/Hca/hcahal/hh_stub_defines.h | 46 + .../hw/mt23108/vapi/Hca/hcahal/hh_tx_stub.c | 428 + .../vapi/Hca/hcahal/hh_tx_stub_defines.h | 46 + .../hw/mt23108/vapi/Hca/hcahal/hhenosys.ic | 513 + .../Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.c | 81 + .../Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.h | 1558 ++ .../Ndi/hw/mt23108/vapi/Hca/hcahal/hhul_obj.h | 93 + .../hw/mt23108/vapi/Hca/hcahal/hhul_stub.c | 91 + .../hw/mt23108/vapi/Hca/hcahal/hhulenosys.ic | 382 + .../Ndi/hw/mt23108/vapi/Hca/hcahal/invalid.ic | 480 + .../Ndi/hw/mt23108/vapi/Hca/hcahal/rx_stub.c | 146 + .../vapi/Hca/hcahal/tavor/cmdif/cmd_types.h | 578 + .../vapi/Hca/hcahal/tavor/cmdif/cmdif.c | 1976 +++ .../vapi/Hca/hcahal/tavor/cmdif/cmdif.h | 288 + .../vapi/Hca/hcahal/tavor/cmdif/cmdif_priv.h | 235 + .../vapi/Hca/hcahal/tavor/cmdif/cmds_wrap.c | 3088 ++++ .../vapi/Hca/hcahal/tavor/ddrmm/tddrmm.c | 354 + .../vapi/Hca/hcahal/tavor/ddrmm/tddrmm.h | 254 + .../vapi/Hca/hcahal/tavor/eventp/event_irqh.c | 693 + .../vapi/Hca/hcahal/tavor/eventp/eventp.c | 1359 ++ .../vapi/Hca/hcahal/tavor/eventp/eventp.h | 293 + .../Hca/hcahal/tavor/eventp/eventp_priv.h | 163 + .../mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.c | 1017 ++ .../mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.h | 170 + .../vapi/Hca/hcahal/tavor/mrwm/tmrwm.c | 2910 ++++ .../vapi/Hca/hcahal/tavor/mrwm/tmrwm.h | 427 + .../Hca/hcahal/tavor/os_dep/win/thh_kl.def | 160 + .../Hca/hcahal/tavor/os_dep/win/thh_mod_obj.c | 214 + .../Hca/hcahal/tavor/os_dep/win/thh_mod_obj.h | 78 + .../Hca/hcahal/tavor/os_dep/win/thhul_kl.def | 61 + .../hcahal/tavor/os_dep/win/thhul_mod_obj.c | 50 + .../hw/mt23108/vapi/Hca/hcahal/tavor/thh.h | 166 + .../vapi/Hca/hcahal/tavor/thh_common.h | 166 + .../vapi/Hca/hcahal/tavor/thh_cqm/tcqm.c | 649 + .../vapi/Hca/hcahal/tavor/thh_cqm/tcqm.h | 220 + .../Hca/hcahal/tavor/thh_default_profile.h | 121 + .../vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c | 6917 ++++++++ .../vapi/Hca/hcahal/tavor/thh_hob/thh_hob.h | 430 + .../Hca/hcahal/tavor/thh_hob/thh_hob_priv.h | 312 + .../mt23108/vapi/Hca/hcahal/tavor/thh_init.c | 60 + .../mt23108/vapi/Hca/hcahal/tavor/thh_init.h | 47 + .../vapi/Hca/hcahal/tavor/thh_qpm/tqpm.c | 2681 +++ .../vapi/Hca/hcahal/tavor/thh_qpm/tqpm.h | 379 + .../Hca/hcahal/tavor/thh_requested_profile.h | 86 + .../vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.c | 363 + .../vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.h | 173 + .../hw/mt23108/vapi/Hca/hcahal/tavor/thhul.h | 45 + .../Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c | 2084 +++ .../Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h | 136 + .../Hca/hcahal/tavor/thhul_hob/thhul_hob.c | 415 + .../Hca/hcahal/tavor/thhul_hob/thhul_hob.h | 110 + .../Hca/hcahal/tavor/thhul_mwm/thhul_mwm.c | 329 + .../Hca/hcahal/tavor/thhul_mwm/thhul_mwm.h | 73 + .../Hca/hcahal/tavor/thhul_pdm/thhul_pdm.c | 704 + .../Hca/hcahal/tavor/thhul_pdm/thhul_pdm.h | 130 + .../hcahal/tavor/thhul_pdm/thhul_pdm_priv.h | 81 + .../Hca/hcahal/tavor/thhul_qpm/thhul_qpm.c | 5103 ++++++ .../Hca/hcahal/tavor/thhul_qpm/thhul_qpm.h | 208 + .../hcahal/tavor/thhul_qpm/thhul_qpm_ibal.h | 223 + .../Hca/hcahal/tavor/thhul_srqm/thhul_srqm.c | 1171 ++ .../Hca/hcahal/tavor/thhul_srqm/thhul_srqm.h | 104 + .../mt23108/vapi/Hca/hcahal/tavor/uar/uar.c | 281 + .../mt23108/vapi/Hca/hcahal/tavor/uar/uar.h | 319 + .../vapi/Hca/hcahal/tavor/udavm/udavm.c | 723 + .../vapi/Hca/hcahal/tavor/udavm/udavm.h | 207 + .../vapi/Hca/hcahal/tavor/uldm/thh_uldm.c | 747 + .../vapi/Hca/hcahal/tavor/uldm/thh_uldm.h | 241 + .../Hca/hcahal/tavor/uldm/thh_uldm_priv.h | 96 + .../vapi/Hca/hcahal/tavor/util/epool.c | 255 + .../vapi/Hca/hcahal/tavor/util/epool.h | 152 + .../vapi/Hca/hcahal/tavor/util/extbuddy.c | 625 + .../vapi/Hca/hcahal/tavor/util/extbuddy.h | 78 + .../vapi/Hca/hcahal/tavor/util/sm_mad.c | 430 + .../vapi/Hca/hcahal/tavor/util/sm_mad.h | 180 + .../vapi/Hca/hcahal/tavor/util/tlog2.c | 232 + .../vapi/Hca/hcahal/tavor/util/tlog2.h | 120 + .../Ndi/hw/mt23108/vapi/Hca/hcahal/zombie.ic | 480 + .../mt23108/vapi/Hca/verbs/common/allocator.h | 52 + .../verbs/common/os_dep/win/vapi_common.def | 116 + .../common/os_dep/win/vapi_common_kl.def | 126 + .../verbs/common/os_dep/win/vapi_mod_obj.c | 58 + .../Hca/verbs/common/os_dep/win/vip_imp.h | 39 + .../vapi/Hca/verbs/common/vapi_common.c | 524 + .../vapi/Hca/verbs/common/vapi_common.h | 72 + .../mt23108/vapi/Hca/verbs/common/vip_array.c | 1116 ++ .../mt23108/vapi/Hca/verbs/common/vip_array.h | 539 + .../mt23108/vapi/Hca/verbs/common/vip_cirq.c | 200 + .../mt23108/vapi/Hca/verbs/common/vip_cirq.h | 58 + .../vapi/Hca/verbs/common/vip_common.h | 103 + .../vapi/Hca/verbs/common/vip_delay_unlock.c | 128 + .../vapi/Hca/verbs/common/vip_delay_unlock.h | 51 + .../Hca/verbs/common/vip_delay_unlock_priv.h | 54 + .../mt23108/vapi/Hca/verbs/common/vip_hash.c | 115 + .../mt23108/vapi/Hca/verbs/common/vip_hash.h | 47 + .../mt23108/vapi/Hca/verbs/common/vip_hash.ic | 893 + .../mt23108/vapi/Hca/verbs/common/vip_hash.ih | 475 + .../vapi/Hca/verbs/common/vip_hash64p.h | 46 + .../mt23108/vapi/Hca/verbs/common/vip_hashp.h | 46 + .../vapi/Hca/verbs/common/vip_hashp2p.h | 46 + .../vapi/Hca/verbs/common/vip_hashv4p.h | 47 + .../Ndi/hw/mt23108/vapi/Hca/verbs/evapi.h | 1430 ++ branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi.h | 2191 +++ .../hw/mt23108/vapi/Hca/verbs/vapi_features.h | 80 + .../hw/mt23108/vapi/Hca/verbs/vapi_types.h | 877 + branches/Ndi/hw/mt23108/vapi/dirs | 3 + branches/Ndi/hw/mt23108/vapi/kernel/Makefile | 7 + branches/Ndi/hw/mt23108/vapi/kernel/SOURCES | 70 + .../hw/mt23108/vapi/kernel/hh_kl_sources.c | 37 + branches/Ndi/hw/mt23108/vapi/kernel/mdmsg.h | 94 + .../hw/mt23108/vapi/kernel/mosal_kl_sources.c | 48 + .../hw/mt23108/vapi/kernel/mpga_kl_sources.c | 41 + .../Ndi/hw/mt23108/vapi/kernel/mt23108.def | 7 + .../Ndi/hw/mt23108/vapi/kernel/mt23108.rc | 47 + .../vapi/kernel/mtl_common_kl_sources.c | 36 + .../hw/mt23108/vapi/kernel/tdriver_sources.c | 44 + .../hw/mt23108/vapi/kernel/thh_kl_sources.c | 54 + .../hw/mt23108/vapi/kernel/thhul_kl_sources.c | 40 + .../vapi/kernel/vapi_common_kl_sources.c | 40 + .../Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal.h | 126 + .../hw/mt23108/vapi/mlxsys/mosal/mosal_gen.h | 288 + .../mt23108/vapi/mlxsys/mosal/mosal_gen_nos.c | 135 + .../hw/mt23108/vapi/mlxsys/mosal/mosal_i2c.h | 237 + .../mt23108/vapi/mlxsys/mosal/mosal_iobuf.h | 282 + .../mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk.h | 106 + .../vapi/mlxsys/mosal/mosal_k2u_cbk_priv.h | 90 + .../hw/mt23108/vapi/mlxsys/mosal/mosal_mem.h | 478 + .../mt23108/vapi/mlxsys/mosal/mosal_mlock.h | 136 + .../vapi/mlxsys/mosal/mosal_prot_ctx.h | 49 + .../hw/mt23108/vapi/mlxsys/mosal/mosal_que.h | 173 + .../hw/mt23108/vapi/mlxsys/mosal/mosal_sync.h | 460 + .../mt23108/vapi/mlxsys/mosal/mosal_thread.h | 95 + .../mt23108/vapi/mlxsys/mosal/mosal_timer.h | 615 + .../mt23108/vapi/mlxsys/mosal/mosalu_socket.h | 250 + .../vapi/mlxsys/mosal/os_dep/win/mosal.def | 108 + .../vapi/mlxsys/mosal/os_dep/win/mosal_arch.h | 91 + .../vapi/mlxsys/mosal/os_dep/win/mosal_bus.c | 208 + .../vapi/mlxsys/mosal/os_dep/win/mosal_bus.h | 362 + .../mlxsys/mosal/os_dep/win/mosal_driver.c | 112 + .../vapi/mlxsys/mosal/os_dep/win/mosal_gen.c | 265 + .../mlxsys/mosal/os_dep/win/mosal_gen_priv.h | 82 + .../mlxsys/mosal/os_dep/win/mosal_iobuf.c | 639 + .../mlxsys/mosal/os_dep/win/mosal_iobuf_imp.h | 73 + .../mlxsys/mosal/os_dep/win/mosal_k2u_cbk.c | 311 + .../vapi/mlxsys/mosal/os_dep/win/mosal_kl.def | 178 + .../vapi/mlxsys/mosal/os_dep/win/mosal_mem.c | 1067 ++ .../mlxsys/mosal/os_dep/win/mosal_mem_imp.h | 242 + .../mlxsys/mosal/os_dep/win/mosal_mem_priv.h | 217 + .../mlxsys/mosal/os_dep/win/mosal_mlock.c | 476 + .../mosal/os_dep/win/mosal_mlock_priv.h | 68 + .../mlxsys/mosal/os_dep/win/mosal_ntddk.c | 570 + .../mlxsys/mosal/os_dep/win/mosal_ntddk.h | 319 + .../vapi/mlxsys/mosal/os_dep/win/mosal_priv.h | 59 + .../mosal/os_dep/win/mosal_prot_ctx_imp.h | 47 + .../vapi/mlxsys/mosal/os_dep/win/mosal_que.c | 372 + .../mlxsys/mosal/os_dep/win/mosal_que_priv.h | 38 + .../vapi/mlxsys/mosal/os_dep/win/mosal_sync.c | 647 + .../mlxsys/mosal/os_dep/win/mosal_sync_imp.h | 313 + .../mlxsys/mosal/os_dep/win/mosal_sync_priv.h | 37 + .../mlxsys/mosal/os_dep/win/mosal_thread.c | 173 + .../mosal/os_dep/win/mosal_thread_imp.h | 166 + .../mlxsys/mosal/os_dep/win/mosal_timer.c | 726 + .../mlxsys/mosal/os_dep/win/mosal_timer_imp.h | 128 + .../mosal/os_dep/win/mosal_timer_priv.h | 69 + .../mlxsys/mosal/os_dep/win/mosal_types.h | 47 + .../vapi/mlxsys/mosal/os_dep/win/mosal_util.c | 426 + .../vapi/mlxsys/mosal/os_dep/win/mosal_util.h | 273 + .../mlxsys/mosal/os_dep/win/mosalu_driver.c | 110 + .../mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.c | 272 + .../mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.h | 52 + .../vapi/mlxsys/mosal/os_dep/win/mosalu_mem.c | 230 + .../mlxsys/mosal/os_dep/win/mosalu_socket.c | 382 + .../mosal/os_dep/win/mosalu_socket_imp.h | 55 + .../mlxsys/mosal/os_dep/win/mosalu_sync.c | 558 + .../mlxsys/mosal/os_dep/win/mosalu_thread.c | 146 + .../mt23108/vapi/mlxsys/mpga/MPGA_headers.h | 2077 +++ .../hw/mt23108/vapi/mlxsys/mpga/ib_opcodes.h | 140 + .../vapi/mlxsys/mpga/internal_functions.c | 548 + .../vapi/mlxsys/mpga/internal_functions.h | 338 + .../Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.c | 1215 ++ .../Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.h | 872 + .../Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.c | 193 + .../Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.h | 148 + .../Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA.c | 1420 ++ .../Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA.h | 255 + .../vapi/mlxsys/mpga/nMPGA_packet_append.c | 208 + .../vapi/mlxsys/mpga/nMPGA_packet_append.h | 253 + .../vapi/mlxsys/mpga/os_dep/win/mpga.def | 44 + .../vapi/mlxsys/mpga/os_dep/win/mpga_driver.c | 51 + .../vapi/mlxsys/mpga/os_dep/win/mpga_kl.def | 46 + .../mt23108/vapi/mlxsys/mpga/packet_append.c | 523 + .../mt23108/vapi/mlxsys/mpga/packet_append.h | 651 + .../vapi/mlxsys/mpga/packet_utilities.c | 102 + .../vapi/mlxsys/mpga/packet_utilities.h | 85 + .../hw/mt23108/vapi/mlxsys/mpga/ud_pack_fmt.h | 196 + .../vapi/mlxsys/mtl_common/mtl_common.c | 1029 ++ .../vapi/mlxsys/mtl_common/mtl_common.h | 271 + .../mt23108/vapi/mlxsys/mtl_common/mtl_log.h | 306 + .../mtl_common/os_dep/win/mtl_common.def | 40 + .../mtl_common/os_dep/win/mtl_common_kl.def | 45 + .../mtl_common/os_dep/win/mtl_log_win.h | 263 + .../mt23108/vapi/mlxsys/mtl_types/bit_ops.h | 82 + .../mt23108/vapi/mlxsys/mtl_types/ib_defs.h | 427 + .../mt23108/vapi/mlxsys/mtl_types/mtl_errno.h | 172 + .../vapi/mlxsys/mtl_types/mtl_pci_types.h | 213 + .../mt23108/vapi/mlxsys/mtl_types/mtl_types.h | 116 + .../vapi/mlxsys/mtl_types/win/MdIoctl.h | 153 + .../vapi/mlxsys/mtl_types/win/MdIoctlSpec.h | 67 + .../vapi/mlxsys/mtl_types/win/endian.h | 36 + .../vapi/mlxsys/mtl_types/win/mtl_sys_defs.h | 375 + .../vapi/mlxsys/mtl_types/win/mtl_sys_types.h | 74 + .../vapi/mlxsys/mtl_types/win/unistd.h | 36 + .../mlxsys/mtl_types/win/win/mtl_arch_types.h | 88 + .../vapi/mlxsys/os_dep/win/tdriver/Md.c | 679 + .../vapi/mlxsys/os_dep/win/tdriver/Md.h | 60 + .../vapi/mlxsys/os_dep/win/tdriver/Md.rc | 181 + .../vapi/mlxsys/os_dep/win/tdriver/MdCard.h | 740 + .../vapi/mlxsys/os_dep/win/tdriver/MdConf.c | 1300 ++ .../vapi/mlxsys/os_dep/win/tdriver/MdConf.h | 98 + .../mlxsys/os_dep/win/tdriver/MdConfPriv.h | 81 + .../vapi/mlxsys/os_dep/win/tdriver/MdCtl.c | 126 + .../vapi/mlxsys/os_dep/win/tdriver/MdCtl.h | 47 + .../vapi/mlxsys/os_dep/win/tdriver/MdDbg.c | 72 + .../vapi/mlxsys/os_dep/win/tdriver/MdDbg.h | 238 + .../vapi/mlxsys/os_dep/win/tdriver/MdGen.h | 56 + .../vapi/mlxsys/os_dep/win/tdriver/MdGuid.h | 43 + .../vapi/mlxsys/os_dep/win/tdriver/MdIoctl.c | 210 + .../vapi/mlxsys/os_dep/win/tdriver/MdPci.c | 294 + .../vapi/mlxsys/os_dep/win/tdriver/MdPci.h | 64 + .../vapi/mlxsys/os_dep/win/tdriver/MdPnp.c | 1244 ++ .../vapi/mlxsys/os_dep/win/tdriver/MdPwr.c | 934 ++ .../vapi/mlxsys/os_dep/win/tdriver/MdRdWr.c | 329 + .../vapi/mlxsys/os_dep/win/tdriver/MdUtil.c | 1757 ++ .../vapi/mlxsys/os_dep/win/tdriver/MdUtil.h | 400 + .../vapi/mlxsys/os_dep/win/tdriver/MddLib.h | 72 + .../mlxsys/os_dep/win/tdriver/infinihost.h | 57 + .../mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.c | 62 + .../mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.mc | 65 + .../vapi/mlxsys/os_dep/win/tdriver/resource.h | 48 + .../mlxsys/os_dep/win/tdriver/tavor_csp.h | 13869 ++++++++++++++++ .../vapi/mlxsys/os_dep/win/tdriver/version.h | 52 + .../Ndi/hw/mt23108/vapi/mlxsys/tools/mtperf.h | 140 + .../hw/mt23108/vapi/tavor_arch_db/MT23108.h | 42 + .../mt23108/vapi/tavor_arch_db/MT23108_PRM.h | 2506 +++ .../vapi/tavor_arch_db/MT23108_PRM_append.h | 209 + .../hw/mt23108/vapi/tavor_arch_db/cr_types.h | 60 + .../vapi/tavor_arch_db/tavor_dev_defs.h | 55 + .../vapi/tavor_arch_db/tavor_if_defs.h | 548 + branches/Ndi/hw/mt23108/vapi/user/Makefile | 7 + branches/Ndi/hw/mt23108/vapi/user/SOURCES | 63 + .../Ndi/hw/mt23108/vapi/user/hh_ul_sources.c | 6 + .../hw/mt23108/vapi/user/mosal_ul_sources.c | 4 + .../hw/mt23108/vapi/user/mpga_ul_sources.c | 8 + .../mt23108/vapi/user/mtl_common_ul_sources.c | 2 + .../hw/mt23108/vapi/user/thhul_ul_sources.c | 11 + .../vapi/user/vapi_common_ul_sources.c | 4 + branches/Ndi/hw/mthca/dirs | 3 + branches/Ndi/hw/mthca/hca_utils.c | 79 + branches/Ndi/hw/mthca/hca_utils.h | 49 + branches/Ndi/hw/mthca/kernel/Makefile | 6 + branches/Ndi/hw/mthca/kernel/SOURCES | 86 + branches/Ndi/hw/mthca/kernel/hca.rc | 44 + branches/Ndi/hw/mthca/kernel/hca_data.c | 907 + branches/Ndi/hw/mthca/kernel/hca_data.h | 395 + branches/Ndi/hw/mthca/kernel/hca_debug.h | 180 + branches/Ndi/hw/mthca/kernel/hca_direct.c | 310 + branches/Ndi/hw/mthca/kernel/hca_driver.c | 1038 ++ branches/Ndi/hw/mthca/kernel/hca_driver.h | 246 + branches/Ndi/hw/mthca/kernel/hca_mcast.c | 202 + branches/Ndi/hw/mthca/kernel/hca_memory.c | 609 + branches/Ndi/hw/mthca/kernel/hca_pci.c | 769 + branches/Ndi/hw/mthca/kernel/hca_pci.h | 24 + branches/Ndi/hw/mthca/kernel/hca_pnp.c | 1765 ++ branches/Ndi/hw/mthca/kernel/hca_pnp.h | 46 + branches/Ndi/hw/mthca/kernel/hca_verbs.c | 1665 ++ branches/Ndi/hw/mthca/kernel/ib_cache.h | 109 + branches/Ndi/hw/mthca/kernel/ib_mad.h | 579 + branches/Ndi/hw/mthca/kernel/ib_pack.h | 245 + branches/Ndi/hw/mthca/kernel/ib_smi.h | 95 + branches/Ndi/hw/mthca/kernel/ib_verbs.h | 1343 ++ branches/Ndi/hw/mthca/kernel/mt_atomic.h | 49 + branches/Ndi/hw/mthca/kernel/mt_bitmap.h | 107 + branches/Ndi/hw/mthca/kernel/mt_cache.c | 415 + branches/Ndi/hw/mthca/kernel/mt_device.c | 567 + branches/Ndi/hw/mthca/kernel/mt_fmr_pool.c | 511 + branches/Ndi/hw/mthca/kernel/mt_fmr_pool.h | 95 + branches/Ndi/hw/mthca/kernel/mt_l2w.c | 132 + branches/Ndi/hw/mthca/kernel/mt_l2w.h | 92 + branches/Ndi/hw/mthca/kernel/mt_list.h | 168 + branches/Ndi/hw/mthca/kernel/mt_memory.c | 761 + branches/Ndi/hw/mthca/kernel/mt_memory.h | 307 + branches/Ndi/hw/mthca/kernel/mt_pa_cash.c | 364 + branches/Ndi/hw/mthca/kernel/mt_pa_cash.h | 51 + branches/Ndi/hw/mthca/kernel/mt_packer.c | 205 + branches/Ndi/hw/mthca/kernel/mt_pci.h | 131 + branches/Ndi/hw/mthca/kernel/mt_pcipool.h | 103 + branches/Ndi/hw/mthca/kernel/mt_reset_tavor.c | 485 + branches/Ndi/hw/mthca/kernel/mt_spinlock.h | 143 + branches/Ndi/hw/mthca/kernel/mt_sync.h | 109 + branches/Ndi/hw/mthca/kernel/mt_types.h | 60 + branches/Ndi/hw/mthca/kernel/mt_ud_header.c | 280 + branches/Ndi/hw/mthca/kernel/mt_uverbs.c | 101 + branches/Ndi/hw/mthca/kernel/mt_verbs.c | 935 ++ branches/Ndi/hw/mthca/kernel/mthca.h | 9 + branches/Ndi/hw/mthca/kernel/mthca.inf | 204 + .../Ndi/hw/mthca/kernel/mthca_allocator.c | 294 + branches/Ndi/hw/mthca/kernel/mthca_av.c | 298 + branches/Ndi/hw/mthca/kernel/mthca_catas.c | 166 + branches/Ndi/hw/mthca/kernel/mthca_cmd.c | 1830 ++ branches/Ndi/hw/mthca/kernel/mthca_cmd.h | 326 + .../Ndi/hw/mthca/kernel/mthca_config_reg.h | 50 + branches/Ndi/hw/mthca/kernel/mthca_cq.c | 963 ++ branches/Ndi/hw/mthca/kernel/mthca_dev.h | 605 + branches/Ndi/hw/mthca/kernel/mthca_doorbell.h | 106 + branches/Ndi/hw/mthca/kernel/mthca_eq.c | 1106 ++ branches/Ndi/hw/mthca/kernel/mthca_log.c | 234 + branches/Ndi/hw/mthca/kernel/mthca_log.mc | 56 + branches/Ndi/hw/mthca/kernel/mthca_log.rc | 2 + branches/Ndi/hw/mthca/kernel/mthca_mad.c | 293 + branches/Ndi/hw/mthca/kernel/mthca_main.c | 1108 ++ branches/Ndi/hw/mthca/kernel/mthca_mcg.c | 408 + branches/Ndi/hw/mthca/kernel/mthca_memfree.c | 729 + branches/Ndi/hw/mthca/kernel/mthca_memfree.h | 177 + branches/Ndi/hw/mthca/kernel/mthca_mr.c | 970 ++ branches/Ndi/hw/mthca/kernel/mthca_pd.c | 88 + branches/Ndi/hw/mthca/kernel/mthca_profile.c | 286 + branches/Ndi/hw/mthca/kernel/mthca_profile.h | 61 + branches/Ndi/hw/mthca/kernel/mthca_provider.c | 1327 ++ branches/Ndi/hw/mthca/kernel/mthca_provider.h | 404 + branches/Ndi/hw/mthca/kernel/mthca_qp.c | 2369 +++ branches/Ndi/hw/mthca/kernel/mthca_srq.c | 751 + branches/Ndi/hw/mthca/kernel/mthca_uar.c | 78 + branches/Ndi/hw/mthca/mt_utils.c | 50 + branches/Ndi/hw/mthca/mt_utils.h | 225 + branches/Ndi/hw/mthca/mthca_wqe.h | 137 + branches/Ndi/hw/mthca/mx_abi.h | 178 + branches/Ndi/hw/mthca/user/Makefile | 7 + branches/Ndi/hw/mthca/user/SOURCES | 80 + branches/Ndi/hw/mthca/user/arch.h | 53 + branches/Ndi/hw/mthca/user/mlnx_ual_av.c | 397 + branches/Ndi/hw/mthca/user/mlnx_ual_ca.c | 284 + branches/Ndi/hw/mthca/user/mlnx_ual_cq.c | 216 + branches/Ndi/hw/mthca/user/mlnx_ual_data.h | 58 + branches/Ndi/hw/mthca/user/mlnx_ual_main.c | 204 + branches/Ndi/hw/mthca/user/mlnx_ual_main.h | 576 + branches/Ndi/hw/mthca/user/mlnx_ual_mcast.c | 118 + branches/Ndi/hw/mthca/user/mlnx_ual_mrw.c | 229 + .../Ndi/hw/mthca/user/mlnx_ual_osbypass.c | 258 + branches/Ndi/hw/mthca/user/mlnx_ual_pd.c | 179 + branches/Ndi/hw/mthca/user/mlnx_ual_qp.c | 327 + branches/Ndi/hw/mthca/user/mlnx_ual_srq.c | 269 + branches/Ndi/hw/mthca/user/mlnx_uvp.c | 238 + branches/Ndi/hw/mthca/user/mlnx_uvp.def | 10 + branches/Ndi/hw/mthca/user/mlnx_uvp.h | 335 + branches/Ndi/hw/mthca/user/mlnx_uvp.rc | 48 + branches/Ndi/hw/mthca/user/mlnx_uvp_ah.c | 190 + branches/Ndi/hw/mthca/user/mlnx_uvp_cq.c | 626 + branches/Ndi/hw/mthca/user/mlnx_uvp_debug.c | 85 + branches/Ndi/hw/mthca/user/mlnx_uvp_debug.h | 145 + .../Ndi/hw/mthca/user/mlnx_uvp_doorbell.h | 80 + .../Ndi/hw/mthca/user/mlnx_uvp_kern_abi.h | 644 + branches/Ndi/hw/mthca/user/mlnx_uvp_memfree.c | 211 + branches/Ndi/hw/mthca/user/mlnx_uvp_qp.c | 1085 ++ branches/Ndi/hw/mthca/user/mlnx_uvp_srq.c | 326 + branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.c | 532 + branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.h | 490 + branches/Ndi/hw/mthca/user/mt_l2w.h | 87 + branches/Ndi/hw/mthca/user/opcode.h | 149 + branches/Ndi/inc/complib/cl_async_proc.h | 342 + branches/Ndi/inc/complib/cl_atomic.h | 297 + branches/Ndi/inc/complib/cl_byteswap.h | 539 + branches/Ndi/inc/complib/cl_comppool.h | 619 + branches/Ndi/inc/complib/cl_debug.h | 597 + branches/Ndi/inc/complib/cl_event.h | 304 + branches/Ndi/inc/complib/cl_fleximap.h | 928 ++ branches/Ndi/inc/complib/cl_ioctl.h | 626 + branches/Ndi/inc/complib/cl_irqlock.h | 248 + branches/Ndi/inc/complib/cl_list.h | 1364 ++ branches/Ndi/inc/complib/cl_log.h | 155 + branches/Ndi/inc/complib/cl_map.h | 875 + branches/Ndi/inc/complib/cl_math.h | 138 + branches/Ndi/inc/complib/cl_memory.h | 963 ++ branches/Ndi/inc/complib/cl_mutex.h | 235 + branches/Ndi/inc/complib/cl_obj.h | 998 ++ branches/Ndi/inc/complib/cl_passivelock.h | 433 + branches/Ndi/inc/complib/cl_perf.h | 807 + branches/Ndi/inc/complib/cl_pool.h | 594 + branches/Ndi/inc/complib/cl_ptr_vector.h | 878 + branches/Ndi/inc/complib/cl_qcomppool.h | 785 + branches/Ndi/inc/complib/cl_qlist.h | 1770 ++ branches/Ndi/inc/complib/cl_qlockpool.h | 369 + branches/Ndi/inc/complib/cl_qmap.h | 973 ++ branches/Ndi/inc/complib/cl_qpool.h | 639 + branches/Ndi/inc/complib/cl_rbmap.h | 593 + branches/Ndi/inc/complib/cl_reqmgr.h | 481 + branches/Ndi/inc/complib/cl_spinlock.h | 238 + branches/Ndi/inc/complib/cl_syscallback.h | 368 + branches/Ndi/inc/complib/cl_thread.h | 396 + branches/Ndi/inc/complib/cl_threadpool.h | 304 + branches/Ndi/inc/complib/cl_timer.h | 446 + branches/Ndi/inc/complib/cl_types.h | 470 + branches/Ndi/inc/complib/cl_vector.h | 1004 ++ branches/Ndi/inc/complib/cl_waitobj.h | 377 + branches/Ndi/inc/complib/comp_lib.h | 115 + branches/Ndi/inc/iba/ib_al.h | 10157 +++++++++++ branches/Ndi/inc/iba/ib_al_ioctl.h | 3402 ++++ branches/Ndi/inc/iba/ib_at_ioctl.h | 135 + branches/Ndi/inc/iba/ib_ci.h | 2917 ++++ branches/Ndi/inc/iba/ib_types.h | 11333 +++++++++++++ .../Ndi/inc/kernel/complib/cl_atomic_osd.h | 107 + branches/Ndi/inc/kernel/complib/cl_bus_ifc.h | 75 + .../Ndi/inc/kernel/complib/cl_byteswap_osd.h | 67 + .../Ndi/inc/kernel/complib/cl_debug_osd.h | 112 + .../Ndi/inc/kernel/complib/cl_event_osd.h | 119 + branches/Ndi/inc/kernel/complib/cl_init.h | 59 + .../Ndi/inc/kernel/complib/cl_ioctl_osd.h | 203 + .../Ndi/inc/kernel/complib/cl_irqlock_osd.h | 128 + .../Ndi/inc/kernel/complib/cl_memory_osd.h | 216 + .../Ndi/inc/kernel/complib/cl_mutex_osd.h | 106 + branches/Ndi/inc/kernel/complib/cl_packoff.h | 36 + branches/Ndi/inc/kernel/complib/cl_packon.h | 51 + branches/Ndi/inc/kernel/complib/cl_pnp_po.h | 994 ++ .../Ndi/inc/kernel/complib/cl_spinlock_osd.h | 120 + .../inc/kernel/complib/cl_syscallback_osd.h | 45 + .../Ndi/inc/kernel/complib/cl_thread_osd.h | 89 + .../Ndi/inc/kernel/complib/cl_timer_osd.h | 102 + .../Ndi/inc/kernel/complib/cl_types_osd.h | 138 + .../Ndi/inc/kernel/complib/cl_waitobj_osd.h | 107 + branches/Ndi/inc/kernel/iba/ib_al_ifc.h | 775 + branches/Ndi/inc/kernel/iba/ib_ci_ifc.h | 132 + branches/Ndi/inc/kernel/iba/ioc_ifc.h | 96 + branches/Ndi/inc/kernel/iba/iou_ifc.h | 103 + branches/Ndi/inc/kernel/iba/ipoib_ifc.h | 102 + branches/Ndi/inc/kernel/ip_packet.h | 459 + branches/Ndi/inc/mthca/mthca_vc.h | 89 + branches/Ndi/inc/oib_ver.h | 70 + branches/Ndi/inc/openib.def | 34 + branches/Ndi/inc/user/complib/cl_atomic_osd.h | 107 + .../Ndi/inc/user/complib/cl_byteswap_osd.h | 67 + branches/Ndi/inc/user/complib/cl_debug_osd.h | 87 + branches/Ndi/inc/user/complib/cl_event_osd.h | 120 + branches/Ndi/inc/user/complib/cl_ioctl_osd.h | 118 + branches/Ndi/inc/user/complib/cl_memory_osd.h | 101 + branches/Ndi/inc/user/complib/cl_mutex_osd.h | 106 + branches/Ndi/inc/user/complib/cl_packoff.h | 36 + branches/Ndi/inc/user/complib/cl_packon.h | 51 + .../Ndi/inc/user/complib/cl_spinlock_osd.h | 127 + .../Ndi/inc/user/complib/cl_syscallback_osd.h | 51 + branches/Ndi/inc/user/complib/cl_thread_osd.h | 91 + branches/Ndi/inc/user/complib/cl_timer_osd.h | 54 + branches/Ndi/inc/user/complib/cl_types_osd.h | 146 + .../Ndi/inc/user/complib/cl_waitobj_osd.h | 120 + branches/Ndi/inc/user/iba/ib_uvp.h | 3370 ++++ branches/Ndi/inc/user/wsd/ibsp_regpath.h | 66 + branches/Ndi/tests/alts/allocdeallocpd.c | 115 + branches/Ndi/tests/alts/alts_common.h | 301 + branches/Ndi/tests/alts/alts_debug.h | 79 + branches/Ndi/tests/alts/alts_misc.c | 254 + branches/Ndi/tests/alts/alts_readme.txt | 119 + branches/Ndi/tests/alts/cmtests.c | 4273 +++++ branches/Ndi/tests/alts/createanddestroycq.c | 332 + branches/Ndi/tests/alts/createanddestroyqp.c | 284 + branches/Ndi/tests/alts/createdestroyav.c | 333 + branches/Ndi/tests/alts/creatememwindow.c | 240 + branches/Ndi/tests/alts/dirs | 3 + branches/Ndi/tests/alts/ibquery.c | 582 + branches/Ndi/tests/alts/kernel/SOURCES | 38 + branches/Ndi/tests/alts/kernel/alts.inf | 169 + branches/Ndi/tests/alts/kernel/alts.rc | 47 + branches/Ndi/tests/alts/kernel/alts_driver.c | 353 + branches/Ndi/tests/alts/kernel/alts_driver.h | 23 + branches/Ndi/tests/alts/kernel/makefile | 7 + branches/Ndi/tests/alts/madtests.c | 3037 ++++ branches/Ndi/tests/alts/multisendrecv.c | 2371 +++ branches/Ndi/tests/alts/openclose.c | 80 + branches/Ndi/tests/alts/querycaattr.c | 270 + branches/Ndi/tests/alts/registermemregion.c | 723 + branches/Ndi/tests/alts/registerpnp.c | 207 + branches/Ndi/tests/alts/reregister_hca.c | 104 + branches/Ndi/tests/alts/smatests.c | 438 + branches/Ndi/tests/alts/user/SOURCES | 36 + branches/Ndi/tests/alts/user/alts_main.c | 507 + branches/Ndi/tests/alts/user/makefile | 7 + branches/Ndi/tests/cmtest/dirs | 2 + branches/Ndi/tests/cmtest/user/SOURCES | 20 + branches/Ndi/tests/cmtest/user/cmtest_main.c | 2028 +++ branches/Ndi/tests/cmtest/user/makefile | 7 + branches/Ndi/tests/dirs | 6 + branches/Ndi/tests/ibat/dirs | 2 + branches/Ndi/tests/ibat/user/PrintIp.c | 253 + branches/Ndi/tests/ibat/user/SOURCES | 18 + branches/Ndi/tests/ibat/user/makefile | 7 + branches/Ndi/tests/limits/dirs | 2 + branches/Ndi/tests/limits/user/SOURCES | 20 + branches/Ndi/tests/limits/user/limits_main.c | 529 + branches/Ndi/tests/limits/user/makefile | 7 + branches/Ndi/tests/wsd/dirs | 2 + branches/Ndi/tests/wsd/user/contest/contest.c | 200 + branches/Ndi/tests/wsd/user/contest/contest.h | 4 + branches/Ndi/tests/wsd/user/dirs | 2 + branches/Ndi/tests/wsd/user/test1/test1.c | 233 + branches/Ndi/tests/wsd/user/test2/ibwrap.c | 599 + branches/Ndi/tests/wsd/user/test2/ibwrap.h | 41 + branches/Ndi/tests/wsd/user/test2/test2.c | 64 + branches/Ndi/tests/wsd/user/test3/ibwrap.c | 610 + branches/Ndi/tests/wsd/user/test3/ibwrap.h | 44 + branches/Ndi/tests/wsd/user/test3/test3.c | 126 + branches/Ndi/tests/wsd/user/ttcp/SOURCES | 13 + branches/Ndi/tests/wsd/user/ttcp/makefile | 7 + branches/Ndi/tests/wsd/user/ttcp/ttcp.c | 860 + branches/Ndi/tools/coinstaller/dirs | 2 + .../Ndi/tools/coinstaller/user/IBInstaller.rc | 132 + .../tools/coinstaller/user/IbInstaller.cpp | 405 + .../tools/coinstaller/user/IbInstaller.def | 4 + branches/Ndi/tools/coinstaller/user/SOURCES | 18 + branches/Ndi/tools/coinstaller/user/makefile | 7 + .../Ndi/tools/coinstaller/user/resource.h | 46 + .../Ndi/tools/coinstaller/user/stdafx.cpp | 40 + branches/Ndi/tools/coinstaller/user/stdafx.h | 51 + branches/Ndi/tools/dirs | 12 + branches/Ndi/tools/flint/dirs | 2 + branches/Ndi/tools/flint/user/SOURCES | 63 + branches/Ndi/tools/flint/user/flint.cpp | 6451 +++++++ branches/Ndi/tools/flint/user/flint.rc | 47 + branches/Ndi/tools/flint/user/makefile | 7 + branches/Ndi/tools/fwupdate/dirs | 2 + branches/Ndi/tools/fwupdate/user/SOURCES | 21 + .../Ndi/tools/fwupdate/user/flint-tools.cpp | 368 + .../Ndi/tools/fwupdate/user/flint-tools.h | 160 + branches/Ndi/tools/fwupdate/user/flint.cpp | 2756 +++ branches/Ndi/tools/fwupdate/user/makefile | 8 + branches/Ndi/tools/fwupdate/user/mtcr.h | 227 + branches/Ndi/tools/mread/user/SOURCES | 40 + branches/Ndi/tools/mread/user/makefile | 8 + branches/Ndi/tools/mread/user/mread.c | 73 + branches/Ndi/tools/mst/dirs | 2 + branches/Ndi/tools/mst/user/SOURCES | 41 + branches/Ndi/tools/mst/user/makefile | 8 + branches/Ndi/tools/mst/user/mst.c | 46 + branches/Ndi/tools/mst/user/mst.rc | 47 + branches/Ndi/tools/mtcr/dirs | 2 + branches/Ndi/tools/mtcr/user/SOURCES | 88 + branches/Ndi/tools/mtcr/user/com_def.h | 194 + branches/Ndi/tools/mtcr/user/makefile | 8 + branches/Ndi/tools/mtcr/user/mtcr.c | 1031 ++ branches/Ndi/tools/mtcr/user/mtcr.def | 1 + branches/Ndi/tools/mtcr/user/mtcr.h | 216 + branches/Ndi/tools/mtcr/user/mtcr.rc | 58 + branches/Ndi/tools/mtcr/user/mtcr_i2c.c | 612 + branches/Ndi/tools/mtcr/user/mtcr_i2c.h | 38 + branches/Ndi/tools/mtcr/user/usb.cpp | 510 + branches/Ndi/tools/mtcr/user/usb.h | 172 + branches/Ndi/tools/mtcr/user/usb/I2cBrdg.lib | Bin 0 -> 16838 bytes branches/Ndi/tools/mtcr/user/usb/UsbI2cIo.lib | Bin 0 -> 5962 bytes branches/Ndi/tools/mtcr/user/usb/i2cbridge.h | 168 + branches/Ndi/tools/mtcr/user/usb/usbi2cio.h | 83 + branches/Ndi/tools/mwrite/user/SOURCES | 40 + branches/Ndi/tools/mwrite/user/makefile | 8 + branches/Ndi/tools/mwrite/user/mwrite.c | 62 + branches/Ndi/tools/perftests/dirs | 2 + branches/Ndi/tools/perftests/user/README | 101 + branches/Ndi/tools/perftests/user/TODO | 1 + .../Ndi/tools/perftests/user/clock_test.c | 24 + branches/Ndi/tools/perftests/user/dirs | 9 + branches/Ndi/tools/perftests/user/get_clock.c | 185 + branches/Ndi/tools/perftests/user/get_clock.h | 79 + branches/Ndi/tools/perftests/user/getopt.c | 250 + branches/Ndi/tools/perftests/user/getopt.h | 117 + branches/Ndi/tools/perftests/user/perf_defs.h | 155 + .../Ndi/tools/perftests/user/perf_utils.c | 207 + .../Ndi/tools/perftests/user/read_bw/SOURCES | 28 + .../Ndi/tools/perftests/user/read_bw/makefile | 7 + .../tools/perftests/user/read_bw/read_bw.c | 785 + .../tools/perftests/user/read_bw/read_bw.rc | 47 + .../Ndi/tools/perftests/user/read_lat/SOURCES | 28 + .../tools/perftests/user/read_lat/makefile | 7 + .../tools/perftests/user/read_lat/read_lat.c | 807 + .../tools/perftests/user/read_lat/read_lat.rc | 47 + .../Ndi/tools/perftests/user/send_bw/SOURCES | 28 + .../Ndi/tools/perftests/user/send_bw/makefile | 7 + .../tools/perftests/user/send_bw/send_bw.c | 1167 ++ .../tools/perftests/user/send_bw/send_bw.rc | 47 + .../Ndi/tools/perftests/user/send_lat/SOURCES | 28 + .../tools/perftests/user/send_lat/makefile | 7 + .../tools/perftests/user/send_lat/send_lat.c | 1022 ++ .../tools/perftests/user/send_lat/send_lat.rc | 47 + .../Ndi/tools/perftests/user/write_bw/SOURCES | 28 + .../tools/perftests/user/write_bw/makefile | 7 + .../tools/perftests/user/write_bw/write_bw.c | 878 + .../tools/perftests/user/write_bw/write_bw.rc | 47 + .../tools/perftests/user/write_lat/SOURCES | 28 + .../tools/perftests/user/write_lat/makefile | 7 + .../perftests/user/write_lat/write_lat.c | 831 + .../perftests/user/write_lat/write_lat.rc | 47 + branches/Ndi/tools/spark/dirs | 2 + branches/Ndi/tools/spark/user/SOURCES | 57 + branches/Ndi/tools/spark/user/makefile | 7 + branches/Ndi/tools/spark/user/spark.cpp | 3498 ++++ branches/Ndi/tools/spark/user/spark.rc | 47 + branches/Ndi/tools/vstat/dirs | 2 + branches/Ndi/tools/vstat/user/SOURCES | 23 + branches/Ndi/tools/vstat/user/makefile | 7 + branches/Ndi/tools/vstat/user/vstat.rc | 47 + branches/Ndi/tools/vstat/user/vstat_main.c | 642 + branches/Ndi/tools/wsdinstall/dirs | 2 + .../Ndi/tools/wsdinstall/user/InstallSP.sln | 21 + branches/Ndi/tools/wsdinstall/user/SOURCES | 23 + .../Ndi/tools/wsdinstall/user/installsp.c | 744 + .../Ndi/tools/wsdinstall/user/installsp.rc | 47 + branches/Ndi/tools/wsdinstall/user/makefile | 7 + .../ulp/dapl/dapl/common/dapl_adapter_util.h | 299 + .../ulp/dapl/dapl/common/dapl_cno_create.c | 104 + .../Ndi/ulp/dapl/dapl/common/dapl_cno_free.c | 89 + .../dapl/dapl/common/dapl_cno_modify_agent.c | 82 + .../Ndi/ulp/dapl/dapl/common/dapl_cno_query.c | 97 + .../Ndi/ulp/dapl/dapl/common/dapl_cno_util.c | 195 + .../Ndi/ulp/dapl/dapl/common/dapl_cno_util.h | 56 + .../Ndi/ulp/dapl/dapl/common/dapl_cno_wait.c | 133 + .../Ndi/ulp/dapl/dapl/common/dapl_cookie.c | 400 + .../Ndi/ulp/dapl/dapl/common/dapl_cookie.h | 71 + .../Ndi/ulp/dapl/dapl/common/dapl_cr_accept.c | 321 + .../ulp/dapl/dapl/common/dapl_cr_callback.c | 615 + .../ulp/dapl/dapl/common/dapl_cr_handoff.c | 78 + .../Ndi/ulp/dapl/dapl/common/dapl_cr_query.c | 104 + .../Ndi/ulp/dapl/dapl/common/dapl_cr_reject.c | 141 + .../Ndi/ulp/dapl/dapl/common/dapl_cr_util.c | 115 + .../Ndi/ulp/dapl/dapl/common/dapl_cr_util.h | 58 + .../Ndi/ulp/dapl/dapl/common/dapl_debug.c | 91 + .../ulp/dapl/dapl/common/dapl_ep_connect.c | 373 + .../Ndi/ulp/dapl/dapl/common/dapl_ep_create.c | 344 + .../ulp/dapl/dapl/common/dapl_ep_disconnect.c | 152 + .../dapl/dapl/common/dapl_ep_dup_connect.c | 129 + .../Ndi/ulp/dapl/dapl/common/dapl_ep_free.c | 203 + .../ulp/dapl/dapl/common/dapl_ep_get_status.c | 119 + .../Ndi/ulp/dapl/dapl/common/dapl_ep_modify.c | 756 + .../dapl/dapl/common/dapl_ep_post_rdma_read.c | 105 + .../dapl/common/dapl_ep_post_rdma_write.c | 104 + .../ulp/dapl/dapl/common/dapl_ep_post_recv.c | 133 + .../ulp/dapl/dapl/common/dapl_ep_post_send.c | 100 + .../Ndi/ulp/dapl/dapl/common/dapl_ep_query.c | 124 + .../Ndi/ulp/dapl/dapl/common/dapl_ep_reset.c | 106 + .../Ndi/ulp/dapl/dapl/common/dapl_ep_util.c | 559 + .../Ndi/ulp/dapl/dapl/common/dapl_ep_util.h | 79 + .../dapl/common/dapl_evd_clear_unwaitable.c | 81 + .../dapl/common/dapl_evd_connection_callb.c | 263 + .../common/dapl_evd_cq_async_error_callb.c | 92 + .../ulp/dapl/dapl/common/dapl_evd_create.c | 193 + .../ulp/dapl/dapl/common/dapl_evd_dequeue.c | 145 + .../ulp/dapl/dapl/common/dapl_evd_disable.c | 79 + .../ulp/dapl/dapl/common/dapl_evd_dto_callb.c | 168 + .../ulp/dapl/dapl/common/dapl_evd_enable.c | 93 + .../Ndi/ulp/dapl/dapl/common/dapl_evd_free.c | 126 + .../dapl/dapl/common/dapl_evd_modify_cno.c | 116 + .../ulp/dapl/dapl/common/dapl_evd_post_se.c | 103 + .../common/dapl_evd_qp_async_error_callb.c | 127 + .../Ndi/ulp/dapl/dapl/common/dapl_evd_query.c | 117 + .../ulp/dapl/dapl/common/dapl_evd_resize.c | 193 + .../dapl/common/dapl_evd_set_unwaitable.c | 104 + .../common/dapl_evd_un_async_error_callb.c | 96 + .../Ndi/ulp/dapl/dapl/common/dapl_evd_util.c | 1457 ++ .../Ndi/ulp/dapl/dapl/common/dapl_evd_util.h | 150 + .../Ndi/ulp/dapl/dapl/common/dapl_evd_wait.c | 281 + .../dapl/common/dapl_get_consumer_context.c | 101 + .../dapl/dapl/common/dapl_get_handle_type.c | 88 + branches/Ndi/ulp/dapl/dapl/common/dapl_hash.c | 537 + branches/Ndi/ulp/dapl/dapl/common/dapl_hash.h | 103 + .../Ndi/ulp/dapl/dapl/common/dapl_hca_util.c | 175 + .../Ndi/ulp/dapl/dapl/common/dapl_hca_util.h | 59 + .../Ndi/ulp/dapl/dapl/common/dapl_ia_close.c | 96 + .../Ndi/ulp/dapl/dapl/common/dapl_ia_open.c | 529 + .../Ndi/ulp/dapl/dapl/common/dapl_ia_query.c | 222 + .../Ndi/ulp/dapl/dapl/common/dapl_ia_util.c | 1244 ++ .../Ndi/ulp/dapl/dapl/common/dapl_ia_util.h | 147 + branches/Ndi/ulp/dapl/dapl/common/dapl_init.h | 56 + .../Ndi/ulp/dapl/dapl/common/dapl_llist.c | 380 + .../ulp/dapl/dapl/common/dapl_lmr_create.c | 537 + .../Ndi/ulp/dapl/dapl/common/dapl_lmr_free.c | 134 + .../Ndi/ulp/dapl/dapl/common/dapl_lmr_query.c | 93 + .../Ndi/ulp/dapl/dapl/common/dapl_lmr_util.c | 109 + .../Ndi/ulp/dapl/dapl/common/dapl_lmr_util.h | 101 + .../Ndi/ulp/dapl/dapl/common/dapl_mr_util.c | 80 + .../Ndi/ulp/dapl/dapl/common/dapl_mr_util.h | 92 + .../Ndi/ulp/dapl/dapl/common/dapl_provider.c | 401 + .../Ndi/ulp/dapl/dapl/common/dapl_provider.h | 108 + .../ulp/dapl/dapl/common/dapl_psp_create.c | 213 + .../dapl/dapl/common/dapl_psp_create_any.c | 213 + .../Ndi/ulp/dapl/dapl/common/dapl_psp_free.c | 149 + .../Ndi/ulp/dapl/dapl/common/dapl_psp_query.c | 104 + .../Ndi/ulp/dapl/dapl/common/dapl_pz_create.c | 107 + .../Ndi/ulp/dapl/dapl/common/dapl_pz_free.c | 94 + .../Ndi/ulp/dapl/dapl/common/dapl_pz_query.c | 95 + .../Ndi/ulp/dapl/dapl/common/dapl_pz_util.c | 115 + .../Ndi/ulp/dapl/dapl/common/dapl_pz_util.h | 50 + .../dapl/dapl/common/dapl_ring_buffer_util.c | 348 + .../dapl/dapl/common/dapl_ring_buffer_util.h | 77 + .../Ndi/ulp/dapl/dapl/common/dapl_rmr_bind.c | 348 + .../ulp/dapl/dapl/common/dapl_rmr_create.c | 110 + .../Ndi/ulp/dapl/dapl/common/dapl_rmr_free.c | 99 + .../Ndi/ulp/dapl/dapl/common/dapl_rmr_query.c | 95 + .../Ndi/ulp/dapl/dapl/common/dapl_rmr_util.c | 96 + .../Ndi/ulp/dapl/dapl/common/dapl_rmr_util.h | 108 + .../ulp/dapl/dapl/common/dapl_rsp_create.c | 218 + .../Ndi/ulp/dapl/dapl/common/dapl_rsp_free.c | 161 + .../Ndi/ulp/dapl/dapl/common/dapl_rsp_query.c | 103 + .../dapl/common/dapl_set_consumer_context.c | 89 + .../Ndi/ulp/dapl/dapl/common/dapl_sp_util.c | 316 + .../Ndi/ulp/dapl/dapl/common/dapl_sp_util.h | 62 + branches/Ndi/ulp/dapl/dapl/dirs | 1 + .../Ndi/ulp/dapl/dapl/ibal/dapl_ibal_cm.c | 1970 +++ .../Ndi/ulp/dapl/dapl/ibal/dapl_ibal_dto.h | 327 + .../Ndi/ulp/dapl/dapl/ibal/dapl_ibal_kmod.h | 91 + .../Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.c | 392 + .../Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.h | 52 + .../Ndi/ulp/dapl/dapl/ibal/dapl_ibal_qp.c | 681 + .../Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.c | 2477 +++ .../Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.h | 390 + branches/Ndi/ulp/dapl/dapl/include/dapl.h | 1042 ++ .../Ndi/ulp/dapl/dapl/include/dapl_debug.h | 128 + .../ulp/dapl/dapl/include/dapl_ipoib_names.h | 261 + .../Ndi/ulp/dapl/dapl/include/dapl_vendor.h | 125 + .../Ndi/ulp/dapl/dapl/udapl/Makefile.cygwin | 396 + branches/Ndi/ulp/dapl/dapl/udapl/Makefile.org | 200 + .../Ndi/ulp/dapl/dapl/udapl/Makefile.orig | 305 + branches/Ndi/ulp/dapl/dapl/udapl/SOURCES | 39 + branches/Ndi/ulp/dapl/dapl/udapl/dapl_init.c | 318 + .../ulp/dapl/dapl/udapl/dapl_name_service.c | 333 + .../ulp/dapl/dapl/udapl/dapl_name_service.h | 70 + .../Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.c | 336 + .../Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.h | 46 + .../Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.c | 642 + .../Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.h | 553 + branches/Ndi/ulp/dapl/dapl/udapl/makefile | 7 + branches/Ndi/ulp/dapl/dapl/udapl/makefile.wnd | 283 + .../Ndi/ulp/dapl/dapl/udapl/udapl_exports.src | 11 + .../Ndi/ulp/dapl/dapl/udapl/udapl_sources.c | 88 + .../ulp/dapl/dapl/udapl/windows/dapl_osd.c | 253 + .../ulp/dapl/dapl/udapl/windows/dapl_osd.h | 538 + .../ulp/dapl/dapl/udapl/windows/dapl_win.def | 61 + .../ulp/dapl/dapl/udapl/windows/dapllib.rc | 50 + .../Ndi/ulp/dapl/dat/common/dat_dictionary.c | 467 + .../Ndi/ulp/dapl/dat/common/dat_dictionary.h | 115 + branches/Ndi/ulp/dapl/dat/common/dat_dr.c | 381 + branches/Ndi/ulp/dapl/dat/common/dat_dr.h | 105 + branches/Ndi/ulp/dapl/dat/common/dat_init.c | 161 + branches/Ndi/ulp/dapl/dat/common/dat_init.h | 77 + branches/Ndi/ulp/dapl/dat/common/dat_sr.c | 412 + branches/Ndi/ulp/dapl/dat/common/dat_sr.h | 106 + .../Ndi/ulp/dapl/dat/common/dat_strerror.c | 600 + branches/Ndi/ulp/dapl/dat/dirs | 1 + branches/Ndi/ulp/dapl/dat/include/dat/dat.h | 958 ++ .../Ndi/ulp/dapl/dat/include/dat/dat_error.h | 333 + .../dat/include/dat/dat_platform_specific.h | 184 + .../dapl/dat/include/dat/dat_redirection.h | 333 + .../ulp/dapl/dat/include/dat/dat_registry.h | 103 + .../dat/include/dat/dat_vendor_specific.h | 144 + branches/Ndi/ulp/dapl/dat/include/dat/kdat.h | 240 + .../ulp/dapl/dat/include/dat/kdat_config.h | 78 + .../dapl/dat/include/dat/kdat_redirection.h | 163 + .../dat/include/dat/kdat_vendor_specific.h | 112 + branches/Ndi/ulp/dapl/dat/include/dat/udat.h | 332 + .../ulp/dapl/dat/include/dat/udat_config.h | 76 + .../dapl/dat/include/dat/udat_redirection.h | 225 + .../dat/include/dat/udat_vendor_specific.h | 143 + branches/Ndi/ulp/dapl/dat/kdat/Makefile | 113 + branches/Ndi/ulp/dapl/dat/kdat/dat_kdapl.c | 101 + branches/Ndi/ulp/dapl/dat/kdat/dat_module.c | 104 + .../Ndi/ulp/dapl/dat/kdat/linux/dat_osd.c | 262 + .../Ndi/ulp/dapl/dat/kdat/linux/dat_osd.h | 258 + .../Ndi/ulp/dapl/dat/udat/Makefile.cygwin | 271 + branches/Ndi/ulp/dapl/dat/udat/Makefile.org | 86 + branches/Ndi/ulp/dapl/dat/udat/Makefile.orig | 114 + branches/Ndi/ulp/dapl/dat/udat/SOURCES | 25 + branches/Ndi/ulp/dapl/dat/udat/dat.conf | 7 + branches/Ndi/ulp/dapl/dat/udat/ibhosts | 7 + .../Ndi/ulp/dapl/dat/udat/linux/dat-1.1.spec | 80 + .../Ndi/ulp/dapl/dat/udat/linux/dat_osd.c | 179 + .../Ndi/ulp/dapl/dat/udat/linux/dat_osd.h | 394 + branches/Ndi/ulp/dapl/dat/udat/makefile | 7 + branches/Ndi/ulp/dapl/dat/udat/makefile.wnd | 144 + branches/Ndi/ulp/dapl/dat/udat/udat.c | 420 + .../Ndi/ulp/dapl/dat/udat/udat_exports.src | 15 + branches/Ndi/ulp/dapl/dat/udat/udat_sources.c | 10 + .../Ndi/ulp/dapl/dat/udat/udat_sr_parser.c | 1551 ++ .../Ndi/ulp/dapl/dat/udat/udat_sr_parser.h | 59 + .../Ndi/ulp/dapl/dat/udat/windows/dat_osd.c | 149 + .../Ndi/ulp/dapl/dat/udat/windows/dat_osd.h | 399 + .../ulp/dapl/dat/udat/windows/dat_osd_sr.h | 43 + .../Ndi/ulp/dapl/dat/udat/windows/dat_win.def | 9 + branches/Ndi/ulp/dapl/dirs | 4 + .../Ndi/ulp/dapl/doc/dapl_coding_style.txt | 264 + .../ulp/dapl/doc/dapl_end_point_design.txt | 908 + branches/Ndi/ulp/dapl/doc/dapl_environ.txt | 42 + .../Ndi/ulp/dapl/doc/dapl_event_design.txt | 875 + .../doc/dapl_memory_management_design.txt | 173 + .../Ndi/ulp/dapl/doc/dapl_registry_design.txt | 631 + .../dapl/doc/dapl_shared_memory_design.txt | 867 + .../dapl/doc/dapl_vendor_specific_changes.txt | 394 + branches/Ndi/ulp/dapl/doc/dat.conf | 11 + branches/Ndi/ulp/dapl/doc/dat_environ.txt | 45 + branches/Ndi/ulp/dapl/doc/ibhosts | 3 + branches/Ndi/ulp/dapl/doc/mv_dapl_readme.txt | 226 + .../Ndi/ulp/dapl/doc/mv_dapl_relnotes.txt | 167 + branches/Ndi/ulp/dapl/test/dirs | 1 + .../ulp/dapl/test/udapl/dapltest/.DT_defaults | 12 + .../ulp/dapl/test/udapl/dapltest/.DT_onetest | 35 + .../ulp/dapl/test/udapl/dapltest/.DT_perf.csh | 42 + .../test/udapl/dapltest/DaplTest_how_2.txt | 292 + .../dapl/test/udapl/dapltest/Makefile.cygwin | 250 + .../ulp/dapl/test/udapl/dapltest/Makefile.org | 54 + .../dapl/test/udapl/dapltest/Makefile.orig | 134 + .../Ndi/ulp/dapl/test/udapl/dapltest/SOURCES | 63 + .../Ndi/ulp/dapl/test/udapl/dapltest/bw.sh | 22 + .../Ndi/ulp/dapl/test/udapl/dapltest/cl.sh | 30 + .../ulp/dapl/test/udapl/dapltest/dapl_bpool.c | 348 + .../ulp/dapl/test/udapl/dapltest/dapl_bpool.h | 61 + .../dapl/test/udapl/dapltest/dapl_client.c | 603 + .../test/udapl/dapltest/dapl_client_info.c | 51 + .../test/udapl/dapltest/dapl_client_info.h | 44 + .../ulp/dapl/test/udapl/dapltest/dapl_cnxn.c | 68 + .../ulp/dapl/test/udapl/dapltest/dapl_cnxn.h | 35 + .../dapl/test/udapl/dapltest/dapl_common.h | 47 + .../dapl/test/udapl/dapltest/dapl_endian.c | 96 + .../dapl/test/udapl/dapltest/dapl_fft_cmd.c | 356 + .../dapl/test/udapl/dapltest/dapl_fft_cmd.h | 117 + .../test/udapl/dapltest/dapl_fft_connmgt.c | 120 + .../test/udapl/dapltest/dapl_fft_dataxfer.c | 153 + .../udapl/dapltest/dapl_fft_dataxfer_client.c | 133 + .../test/udapl/dapltest/dapl_fft_endpoint.c | 281 + .../test/udapl/dapltest/dapl_fft_hwconn.c | 225 + .../dapl/test/udapl/dapltest/dapl_fft_mem.c | 237 + .../dapl/test/udapl/dapltest/dapl_fft_pz.c | 254 + .../test/udapl/dapltest/dapl_fft_queryinfo.c | 621 + .../dapl/test/udapl/dapltest/dapl_fft_test.c | 94 + .../dapl/test/udapl/dapltest/dapl_fft_util.c | 357 + .../dapl/test/udapl/dapltest/dapl_fft_util.h | 85 + .../ulp/dapl/test/udapl/dapltest/dapl_funcs.h | 58 + .../dapl/test/udapl/dapltest/dapl_getopt.c | 179 + .../dapl/test/udapl/dapltest/dapl_getopt.h | 48 + .../ulp/dapl/test/udapl/dapltest/dapl_limit.c | 1529 ++ .../dapl/test/udapl/dapltest/dapl_limit_cmd.c | 230 + .../dapl/test/udapl/dapltest/dapl_limit_cmd.h | 62 + .../ulp/dapl/test/udapl/dapltest/dapl_main.c | 196 + .../ulp/dapl/test/udapl/dapltest/dapl_mdep.c | 943 ++ .../ulp/dapl/test/udapl/dapltest/dapl_mdep.h | 333 + .../dapl/test/udapl/dapltest/dapl_memlist.c | 133 + .../dapl/test/udapl/dapltest/dapl_memlist.h | 54 + .../dapl/test/udapl/dapltest/dapl_netaddr.c | 149 + .../dapl/test/udapl/dapltest/dapl_params.c | 296 + .../dapl/test/udapl/dapltest/dapl_params.h | 66 + .../udapl/dapltest/dapl_performance_client.c | 545 + .../udapl/dapltest/dapl_performance_cmd.c | 355 + .../udapl/dapltest/dapl_performance_cmd.h | 65 + .../udapl/dapltest/dapl_performance_server.c | 411 + .../udapl/dapltest/dapl_performance_stats.c | 389 + .../udapl/dapltest/dapl_performance_stats.h | 61 + .../udapl/dapltest/dapl_performance_test.h | 86 + .../udapl/dapltest/dapl_performance_util.c | 652 + .../ulp/dapl/test/udapl/dapltest/dapl_proto.h | 611 + .../dapl/test/udapl/dapltest/dapl_quit_cmd.c | 148 + .../dapl/test/udapl/dapltest/dapl_quit_cmd.h | 41 + .../dapl/test/udapl/dapltest/dapl_server.c | 842 + .../test/udapl/dapltest/dapl_server_cmd.c | 124 + .../test/udapl/dapltest/dapl_server_cmd.h | 42 + .../test/udapl/dapltest/dapl_server_info.c | 50 + .../test/udapl/dapltest/dapl_server_info.h | 55 + .../dapl/test/udapl/dapltest/dapl_test_data.c | 59 + .../dapl/test/udapl/dapltest/dapl_test_data.h | 106 + .../dapl/test/udapl/dapltest/dapl_test_util.c | 743 + .../dapl/test/udapl/dapltest/dapl_thread.c | 132 + .../udapl/dapltest/dapl_transaction_cmd.c | 543 + .../udapl/dapltest/dapl_transaction_cmd.h | 66 + .../udapl/dapltest/dapl_transaction_stats.c | 170 + .../udapl/dapltest/dapl_transaction_stats.h | 47 + .../udapl/dapltest/dapl_transaction_test.c | 1922 +++ .../udapl/dapltest/dapl_transaction_test.h | 99 + .../udapl/dapltest/dapl_transaction_util.c | 730 + .../ulp/dapl/test/udapl/dapltest/dapl_util.c | 297 + .../dapl/test/udapl/dapltest/dapl_version.h | 37 + .../ulp/dapl/test/udapl/dapltest/lat_block.sh | 22 + .../ulp/dapl/test/udapl/dapltest/lat_poll.sh | 22 + .../Ndi/ulp/dapl/test/udapl/dapltest/lim.sh | 17 + .../Ndi/ulp/dapl/test/udapl/dapltest/makefile | 7 + .../ulp/dapl/test/udapl/dapltest/makefile.wnd | 179 + .../Ndi/ulp/dapl/test/udapl/dapltest/quit.sh | 19 + .../ulp/dapl/test/udapl/dapltest/regress.sh | 66 + .../Ndi/ulp/dapl/test/udapl/dapltest/srv.sh | 15 + branches/Ndi/ulp/dapl/test/udapl/dirs | 1 + branches/Ndi/ulp/dirs | 7 + branches/Ndi/ulp/inic/dirs | 2 + branches/Ndi/ulp/inic/kernel/SOURCES | 75 + branches/Ndi/ulp/inic/kernel/inic.rc | 47 + branches/Ndi/ulp/inic/kernel/makefile | 7 + branches/Ndi/ulp/inic/kernel/netvnic.inf | 146 + branches/Ndi/ulp/inic/kernel/vnic_adapter.c | 1120 ++ branches/Ndi/ulp/inic/kernel/vnic_adapter.h | 227 + branches/Ndi/ulp/inic/kernel/vnic_config.h | 240 + branches/Ndi/ulp/inic/kernel/vnic_control.c | 2007 +++ branches/Ndi/ulp/inic/kernel/vnic_control.h | 123 + .../Ndi/ulp/inic/kernel/vnic_controlpkt.h | 286 + branches/Ndi/ulp/inic/kernel/vnic_data.c | 1417 ++ branches/Ndi/ulp/inic/kernel/vnic_data.h | 204 + branches/Ndi/ulp/inic/kernel/vnic_debug.h | 105 + branches/Ndi/ulp/inic/kernel/vnic_driver.c | 1922 +++ branches/Ndi/ulp/inic/kernel/vnic_driver.h | 187 + branches/Ndi/ulp/inic/kernel/vnic_ib.c | 890 + branches/Ndi/ulp/inic/kernel/vnic_ib.h | 209 + branches/Ndi/ulp/inic/kernel/vnic_netpath.c | 217 + branches/Ndi/ulp/inic/kernel/vnic_trailer.h | 110 + branches/Ndi/ulp/inic/kernel/vnic_util.h | 82 + branches/Ndi/ulp/inic/kernel/vnic_viport.c | 1098 ++ branches/Ndi/ulp/inic/kernel/vnic_viport.h | 287 + branches/Ndi/ulp/ipoib/dirs | 2 + branches/Ndi/ulp/ipoib/ip_stats.h | 150 + branches/Ndi/ulp/ipoib/kernel/SOURCES | 48 + branches/Ndi/ulp/ipoib/kernel/ipoib.rc | 48 + branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.c | 1337 ++ branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.h | 424 + branches/Ndi/ulp/ipoib/kernel/ipoib_debug.h | 295 + branches/Ndi/ulp/ipoib/kernel/ipoib_driver.c | 2528 +++ branches/Ndi/ulp/ipoib/kernel/ipoib_driver.h | 129 + .../Ndi/ulp/ipoib/kernel/ipoib_endpoint.c | 456 + .../Ndi/ulp/ipoib/kernel/ipoib_endpoint.h | 158 + branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.c | 495 + branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.h | 45 + branches/Ndi/ulp/ipoib/kernel/ipoib_log.mc | 285 + branches/Ndi/ulp/ipoib/kernel/ipoib_port.c | 5659 +++++++ branches/Ndi/ulp/ipoib/kernel/ipoib_port.h | 620 + branches/Ndi/ulp/ipoib/kernel/ipoib_xfr_mgr.h | 537 + branches/Ndi/ulp/ipoib/kernel/makefile | 7 + branches/Ndi/ulp/ipoib/kernel/netipoib.inf | 196 + branches/Ndi/ulp/opensm/dirs | 2 + .../Ndi/ulp/opensm/user/README.opensm-build | 24 + branches/Ndi/ulp/opensm/user/TODO | 16 + branches/Ndi/ulp/opensm/user/config.h | 71 + branches/Ndi/ulp/opensm/user/dirs | 7 + .../ulp/opensm/user/doc/OpenSM_PKey_Mgr.txt | 79 + .../ulp/opensm/user/doc/OpenSM_RN_0_3_1.pdf | Bin 0 -> 83938 bytes .../Ndi/ulp/opensm/user/doc/OpenSM_UM_0_3.pdf | Bin 0 -> 223001 bytes .../ulp/opensm/user/doc/current-routing.txt | 202 + .../ulp/opensm/user/doc/modular-routing.txt | 78 + .../doc/opensm_release_notes_openib-2.0.5.txt | 487 + .../Ndi/ulp/opensm/user/doc/qos-config.txt | 45 + .../Ndi/ulp/opensm/user/ibtrapgen/Makefile | 7 + .../Ndi/ulp/opensm/user/ibtrapgen/SOURCES | 67 + .../Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.c | 442 + .../Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.h | 313 + branches/Ndi/ulp/opensm/user/ibtrapgen/main.c | 465 + .../opensm/user/include/complib/cl_byteswap.h | 547 + .../user/include/complib/cl_dispatcher.h | 660 + .../user/include/complib/cl_event_wheel.h | 493 + .../user/include/complib/cl_signal_osd.h | 127 + .../ulp/opensm/user/include/iba/ib_types.h | 10403 ++++++++++++ .../user/include/iba/ib_types_extended.h | 2585 +++ .../user/include/opensm/cl_dispatcher.h | 673 + .../user/include/opensm/cl_event_wheel.h | 497 + .../user/include/opensm/osm_attrib_req.h | 119 + .../ulp/opensm/user/include/opensm/osm_base.h | 811 + .../opensm/user/include/opensm/osm_console.h | 58 + .../ulp/opensm/user/include/opensm/osm_db.h | 455 + .../opensm/user/include/opensm/osm_db_pack.h | 255 + .../opensm/user/include/opensm/osm_drop_mgr.h | 259 + .../opensm/user/include/opensm/osm_errors.h | 181 + .../user/include/opensm/osm_ft_config_ctrl.h | 227 + .../opensm/user/include/opensm/osm_fwd_tbl.h | 388 + .../opensm/user/include/opensm/osm_helper.h | 620 + .../opensm/user/include/opensm/osm_inform.h | 305 + .../opensm/user/include/opensm/osm_lid_mgr.h | 322 + .../user/include/opensm/osm_lin_fwd_rcv.h | 255 + .../include/opensm/osm_lin_fwd_rcv_ctrl.h | 233 + .../user/include/opensm/osm_lin_fwd_tbl.h | 379 + .../opensm/user/include/opensm/osm_link_mgr.h | 271 + .../ulp/opensm/user/include/opensm/osm_log.h | 474 + .../opensm/user/include/opensm/osm_mad_pool.h | 405 + .../ulp/opensm/user/include/opensm/osm_madw.h | 1160 ++ .../opensm/user/include/opensm/osm_matrix.h | 454 + .../include/opensm/osm_mcast_config_ctrl.h | 288 + .../user/include/opensm/osm_mcast_fwd_rcv.h | 259 + .../include/opensm/osm_mcast_fwd_rcv_ctrl.h | 235 + .../user/include/opensm/osm_mcast_mgr.h | 339 + .../user/include/opensm/osm_mcast_tbl.h | 484 + .../opensm/user/include/opensm/osm_mcm_info.h | 237 + .../opensm/user/include/opensm/osm_mcm_port.h | 269 + .../opensm/user/include/opensm/osm_mcmember.h | 157 + .../opensm/user/include/opensm/osm_msgdef.h | 208 + .../opensm/user/include/opensm/osm_mtl_bind.h | 144 + .../opensm/user/include/opensm/osm_mtree.h | 378 + .../user/include/opensm/osm_multicast.h | 769 + .../ulp/opensm/user/include/opensm/osm_node.h | 957 ++ .../user/include/opensm/osm_node_desc_rcv.h | 256 + .../include/opensm/osm_node_desc_rcv_ctrl.h | 232 + .../user/include/opensm/osm_node_info_rcv.h | 305 + .../include/opensm/osm_node_info_rcv_ctrl.h | 261 + .../opensm/user/include/opensm/osm_opensm.h | 440 + .../user/include/opensm/osm_partition.h | 256 + .../ulp/opensm/user/include/opensm/osm_path.h | 267 + .../user/include/opensm/osm_pi_config_ctrl.h | 286 + .../ulp/opensm/user/include/opensm/osm_pkey.h | 755 + .../include/opensm/osm_pkey_config_ctrl.h | 286 + .../opensm/user/include/opensm/osm_pkey_mgr.h | 90 + .../opensm/user/include/opensm/osm_pkey_rcv.h | 253 + .../user/include/opensm/osm_pkey_rcv_ctrl.h | 248 + .../ulp/opensm/user/include/opensm/osm_port.h | 2122 +++ .../user/include/opensm/osm_port_info_rcv.h | 274 + .../include/opensm/osm_port_info_rcv_ctrl.h | 261 + .../user/include/opensm/osm_port_profile.h | 291 + .../user/include/opensm/osm_rand_fwd_tbl.h | 354 + .../user/include/opensm/osm_remote_sm.h | 212 + .../ulp/opensm/user/include/opensm/osm_req.h | 354 + .../opensm/user/include/opensm/osm_req_ctrl.h | 228 + .../ulp/opensm/user/include/opensm/osm_resp.h | 279 + .../opensm/user/include/opensm/osm_router.h | 323 + .../ulp/opensm/user/include/opensm/osm_sa.h | 500 + .../include/opensm/osm_sa_class_port_info.h | 269 + .../opensm/osm_sa_class_port_info_ctrl.h | 261 + .../include/opensm/osm_sa_guidinfo_record.h | 279 + .../opensm/osm_sa_guidinfo_record_ctrl.h | 230 + .../user/include/opensm/osm_sa_informinfo.h | 299 + .../include/opensm/osm_sa_informinfo_ctrl.h | 261 + .../user/include/opensm/osm_sa_lft_record.h | 281 + .../include/opensm/osm_sa_lft_record_ctrl.h | 233 + .../user/include/opensm/osm_sa_link_record.h | 276 + .../include/opensm/osm_sa_link_record_ctrl.h | 261 + .../user/include/opensm/osm_sa_mad_ctrl.h | 352 + .../include/opensm/osm_sa_mcmember_record.h | 420 + .../opensm/osm_sa_mcmember_record_ctrl.h | 262 + .../user/include/opensm/osm_sa_mft_record.h | 280 + .../include/opensm/osm_sa_mft_record_ctrl.h | 231 + .../include/opensm/osm_sa_multipath_record.h | 273 + .../opensm/osm_sa_multipath_record_ctrl.h | 260 + .../user/include/opensm/osm_sa_node_record.h | 275 + .../include/opensm/osm_sa_node_record_ctrl.h | 231 + .../user/include/opensm/osm_sa_path_record.h | 274 + .../include/opensm/osm_sa_path_record_ctrl.h | 261 + .../user/include/opensm/osm_sa_pkey_record.h | 268 + .../include/opensm/osm_sa_pkey_record_ctrl.h | 218 + .../include/opensm/osm_sa_portinfo_record.h | 280 + .../opensm/osm_sa_portinfo_record_ctrl.h | 231 + .../user/include/opensm/osm_sa_response.h | 255 + .../include/opensm/osm_sa_service_record.h | 298 + .../opensm/osm_sa_service_record_ctrl.h | 230 + .../user/include/opensm/osm_sa_slvl_record.h | 281 + .../include/opensm/osm_sa_slvl_record_ctrl.h | 231 + .../include/opensm/osm_sa_sminfo_record.h | 262 + .../opensm/osm_sa_sminfo_record_ctrl.h | 231 + .../include/opensm/osm_sa_sw_info_record.h | 306 + .../opensm/osm_sa_sw_info_record_ctrl.h | 259 + .../user/include/opensm/osm_sa_vlarb_record.h | 280 + .../include/opensm/osm_sa_vlarb_record_ctrl.h | 231 + .../opensm/user/include/opensm/osm_service.h | 241 + .../user/include/opensm/osm_slvl_map_rcv.h | 265 + .../include/opensm/osm_slvl_map_rcv_ctrl.h | 261 + .../ulp/opensm/user/include/opensm/osm_sm.h | 566 + .../include/opensm/osm_sm_info_get_ctrl.h | 294 + .../user/include/opensm/osm_sm_mad_ctrl.h | 339 + .../user/include/opensm/osm_sm_state_mgr.h | 353 + .../user/include/opensm/osm_sminfo_rcv.h | 291 + .../user/include/opensm/osm_sminfo_rcv_ctrl.h | 232 + .../user/include/opensm/osm_state_mgr.h | 518 + .../user/include/opensm/osm_state_mgr_ctrl.h | 233 + .../opensm/user/include/opensm/osm_stats.h | 125 + .../opensm/user/include/opensm/osm_subnet.h | 1161 ++ .../include/opensm/osm_subnet_config_ctrl.h | 287 + .../user/include/opensm/osm_sw_info_rcv.h | 304 + .../include/opensm/osm_sw_info_rcv_ctrl.h | 261 + .../user/include/opensm/osm_sweep_fail_ctrl.h | 239 + .../opensm/user/include/opensm/osm_switch.h | 1552 ++ .../opensm/user/include/opensm/osm_trap_rcv.h | 326 + .../user/include/opensm/osm_trap_rcv_ctrl.h | 232 + .../user/include/opensm/osm_ts_useraccess.h | 54 + .../user/include/opensm/osm_ucast_mgr.h | 328 + .../user/include/opensm/osm_ucast_updn.h | 472 + .../opensm/user/include/opensm/osm_umadt.h | 143 + .../opensm/user/include/opensm/osm_version.h | 63 + .../opensm/user/include/opensm/osm_vl15intf.h | 417 + .../user/include/opensm/osm_vl_arb_rcv.h | 265 + .../user/include/opensm/osm_vl_arb_rcv_ctrl.h | 261 + .../Ndi/ulp/opensm/user/include/opensm/st.h | 107 + branches/Ndi/ulp/opensm/user/include/unistd.h | 38 + .../opensm/user/include/vendor/osm_vendor.h | 78 + .../user/include/vendor/osm_vendor_al.h | 371 + .../user/include/vendor/osm_vendor_api.h | 519 + .../user/include/vendor/osm_vendor_sa_api.h | 879 + .../user/include/vendor/osm_vendor_select.h | 76 + .../user/include/vendor/winosm_common.h | 246 + .../Ndi/ulp/opensm/user/libopensm/Makefile | 7 + .../Ndi/ulp/opensm/user/libopensm/SOURCES | 64 + .../ulp/opensm/user/libopensm/osm_helper.c | 2534 +++ .../Ndi/ulp/opensm/user/libopensm/osm_log.c | 327 + .../ulp/opensm/user/libopensm/osm_mad_pool.c | 302 + .../Ndi/ulp/opensm/user/libvendor/Makefile | 7 + .../Ndi/ulp/opensm/user/libvendor/SOURCES | 62 + .../ulp/opensm/user/libvendor/osm_vendor_al.c | 1532 ++ .../opensm/user/libvendor/osm_vendor_mlx_sa.c | 879 + .../ulp/opensm/user/libvendor/winosm_common.c | 249 + branches/Ndi/ulp/opensm/user/opensm/Makefile | 7 + branches/Ndi/ulp/opensm/user/opensm/SOURCES | 164 + .../ulp/opensm/user/opensm/cl_dispatcher.c | 405 + .../ulp/opensm/user/opensm/cl_event_wheel.c | 663 + branches/Ndi/ulp/opensm/user/opensm/main.c | 1158 ++ .../Ndi/ulp/opensm/user/opensm/opensm.opts | 139 + branches/Ndi/ulp/opensm/user/opensm/opensm.rc | 46 + branches/Ndi/ulp/opensm/user/opensm/osm.h | 68 + branches/Ndi/ulp/opensm/user/opensm/osm.mc | 29 + branches/Ndi/ulp/opensm/user/opensm/osm.rc | 2 + .../Ndi/ulp/opensm/user/opensm/osm_console.c | 226 + .../Ndi/ulp/opensm/user/opensm/osm_db_files.c | 796 + .../Ndi/ulp/opensm/user/opensm/osm_db_pack.c | 172 + .../Ndi/ulp/opensm/user/opensm/osm_drop_mgr.c | 721 + .../Ndi/ulp/opensm/user/opensm/osm_fwd_tbl.c | 115 + .../Ndi/ulp/opensm/user/opensm/osm_inform.c | 763 + .../Ndi/ulp/opensm/user/opensm/osm_lid_mgr.c | 1486 ++ .../ulp/opensm/user/opensm/osm_lin_fwd_rcv.c | 160 + .../opensm/user/opensm/osm_lin_fwd_rcv_ctrl.c | 125 + .../ulp/opensm/user/opensm/osm_lin_fwd_tbl.c | 102 + .../Ndi/ulp/opensm/user/opensm/osm_link_mgr.c | 514 + .../Ndi/ulp/opensm/user/opensm/osm_matrix.c | 156 + .../opensm/user/opensm/osm_mcast_fwd_rcv.c | 181 + .../user/opensm/osm_mcast_fwd_rcv_ctrl.c | 125 + .../ulp/opensm/user/opensm/osm_mcast_mgr.c | 1726 ++ .../ulp/opensm/user/opensm/osm_mcast_tbl.c | 302 + .../Ndi/ulp/opensm/user/opensm/osm_mcm_info.c | 102 + .../Ndi/ulp/opensm/user/opensm/osm_mcm_port.c | 127 + .../Ndi/ulp/opensm/user/opensm/osm_mtree.c | 137 + .../ulp/opensm/user/opensm/osm_multicast.c | 403 + .../Ndi/ulp/opensm/user/opensm/osm_node.c | 335 + .../opensm/user/opensm/osm_node_desc_rcv.c | 182 + .../user/opensm/osm_node_desc_rcv_ctrl.c | 127 + .../opensm/user/opensm/osm_node_info_rcv.c | 1090 ++ .../user/opensm/osm_node_info_rcv_ctrl.c | 127 + .../Ndi/ulp/opensm/user/opensm/osm_opensm.c | 320 + .../Ndi/ulp/opensm/user/opensm/osm_pkey.c | 547 + .../Ndi/ulp/opensm/user/opensm/osm_pkey_mgr.c | 596 + .../Ndi/ulp/opensm/user/opensm/osm_pkey_rcv.c | 220 + .../opensm/user/opensm/osm_pkey_rcv_ctrl.c | 116 + .../Ndi/ulp/opensm/user/opensm/osm_port.c | 936 ++ .../opensm/user/opensm/osm_port_info_rcv.c | 870 + .../user/opensm/osm_port_info_rcv_ctrl.c | 128 + .../Ndi/ulp/opensm/user/opensm/osm_prtn.c | 399 + .../ulp/opensm/user/opensm/osm_prtn_config.c | 447 + branches/Ndi/ulp/opensm/user/opensm/osm_qos.c | 451 + .../ulp/opensm/user/opensm/osm_remote_sm.c | 90 + branches/Ndi/ulp/opensm/user/opensm/osm_req.c | 298 + .../Ndi/ulp/opensm/user/opensm/osm_req_ctrl.c | 136 + .../Ndi/ulp/opensm/user/opensm/osm_resp.c | 227 + .../Ndi/ulp/opensm/user/opensm/osm_router.c | 124 + branches/Ndi/ulp/opensm/user/opensm/osm_sa.c | 1207 ++ .../user/opensm/osm_sa_class_port_info.c | 280 + .../user/opensm/osm_sa_class_port_info_ctrl.c | 126 + .../user/opensm/osm_sa_guidinfo_record.c | 611 + .../user/opensm/osm_sa_guidinfo_record_ctrl.c | 124 + .../opensm/user/opensm/osm_sa_informinfo.c | 922 + .../user/opensm/osm_sa_informinfo_ctrl.c | 154 + .../opensm/user/opensm/osm_sa_lft_record.c | 515 + .../user/opensm/osm_sa_lft_record_ctrl.c | 124 + .../opensm/user/opensm/osm_sa_link_record.c | 777 + .../user/opensm/osm_sa_link_record_ctrl.c | 128 + .../ulp/opensm/user/opensm/osm_sa_mad_ctrl.c | 651 + .../user/opensm/osm_sa_mcmember_record.c | 2383 +++ .../user/opensm/osm_sa_mcmember_record_ctrl.c | 132 + .../opensm/user/opensm/osm_sa_mft_record.c | 547 + .../user/opensm/osm_sa_mft_record_ctrl.c | 123 + .../user/opensm/osm_sa_multipath_record.c | 1652 ++ .../opensm/osm_sa_multipath_record_ctrl.c | 128 + .../opensm/user/opensm/osm_sa_node_record.c | 600 + .../user/opensm/osm_sa_node_record_ctrl.c | 125 + .../opensm/user/opensm/osm_sa_path_record.c | 2006 +++ .../user/opensm/osm_sa_path_record_ctrl.c | 126 + .../opensm/user/opensm/osm_sa_pkey_record.c | 590 + .../user/opensm/osm_sa_pkey_record_ctrl.c | 113 + .../user/opensm/osm_sa_portinfo_record.c | 878 + .../user/opensm/osm_sa_portinfo_record_ctrl.c | 125 + .../ulp/opensm/user/opensm/osm_sa_response.c | 175 + .../user/opensm/osm_sa_service_record.c | 1204 ++ .../user/opensm/osm_sa_service_record_ctrl.c | 125 + .../opensm/user/opensm/osm_sa_slvl_record.c | 557 + .../user/opensm/osm_sa_slvl_record_ctrl.c | 126 + .../opensm/user/opensm/osm_sa_sminfo_record.c | 583 + .../user/opensm/osm_sa_sminfo_record_ctrl.c | 125 + .../user/opensm/osm_sa_sw_info_record.c | 535 + .../user/opensm/osm_sa_sw_info_record_ctrl.c | 123 + .../opensm/user/opensm/osm_sa_vlarb_record.c | 577 + .../user/opensm/osm_sa_vlarb_record_ctrl.c | 126 + .../Ndi/ulp/opensm/user/opensm/osm_service.c | 202 + .../ulp/opensm/user/opensm/osm_slvl_map_rcv.c | 232 + .../user/opensm/osm_slvl_map_rcv_ctrl.c | 127 + branches/Ndi/ulp/opensm/user/opensm/osm_sm.c | 824 + .../ulp/opensm/user/opensm/osm_sm_mad_ctrl.c | 1049 ++ .../ulp/opensm/user/opensm/osm_sm_state_mgr.c | 872 + .../ulp/opensm/user/opensm/osm_sminfo_rcv.c | 768 + .../opensm/user/opensm/osm_sminfo_rcv_ctrl.c | 127 + .../ulp/opensm/user/opensm/osm_state_mgr.c | 2986 ++++ .../opensm/user/opensm/osm_state_mgr_ctrl.c | 127 + .../Ndi/ulp/opensm/user/opensm/osm_subnet.c | 1272 ++ .../ulp/opensm/user/opensm/osm_sw_info_rcv.c | 681 + .../opensm/user/opensm/osm_sw_info_rcv_ctrl.c | 126 + .../opensm/user/opensm/osm_sweep_fail_ctrl.c | 133 + .../Ndi/ulp/opensm/user/opensm/osm_switch.c | 550 + .../Ndi/ulp/opensm/user/opensm/osm_trap_rcv.c | 771 + .../opensm/user/opensm/osm_trap_rcv_ctrl.c | 126 + .../ulp/opensm/user/opensm/osm_ucast_file.c | 413 + .../ulp/opensm/user/opensm/osm_ucast_ftree.c | 3141 ++++ .../ulp/opensm/user/opensm/osm_ucast_mgr.c | 1277 ++ .../ulp/opensm/user/opensm/osm_ucast_updn.c | 1281 ++ .../Ndi/ulp/opensm/user/opensm/osm_vl15intf.c | 544 + .../ulp/opensm/user/opensm/osm_vl_arb_rcv.c | 240 + .../opensm/user/opensm/osm_vl_arb_rcv_ctrl.c | 127 + branches/Ndi/ulp/opensm/user/opensm/st.c | 625 + branches/Ndi/ulp/opensm/user/osmtest/Makefile | 7 + branches/Ndi/ulp/opensm/user/osmtest/SOURCES | 73 + .../ulp/opensm/user/osmtest/include/error.h | 57 + .../opensm/user/osmtest/include/osmt_inform.h | 88 + .../osmtest/include/osmt_mtl_regular_qp.h | 187 + .../ulp/opensm/user/osmtest/include/osmtest.h | 517 + .../user/osmtest/include/osmtest_base.h | 72 + .../user/osmtest/include/osmtest_subnet.h | 349 + branches/Ndi/ulp/opensm/user/osmtest/main.c | 534 + .../Ndi/ulp/opensm/user/osmtest/osmt_inform.c | 961 ++ .../opensm/user/osmtest/osmt_mtl_regular_qp.c | 447 + .../ulp/opensm/user/osmtest/osmt_multicast.c | 3500 ++++ .../ulp/opensm/user/osmtest/osmt_service.c | 1833 ++ .../opensm/user/osmtest/osmt_slvl_vl_arb.c | 570 + .../Ndi/ulp/opensm/user/osmtest/osmtest.c | 7488 +++++++++ branches/Ndi/ulp/srp/dirs | 3 + branches/Ndi/ulp/srp/kernel/SOURCES | 56 + branches/Ndi/ulp/srp/kernel/ib_srp.inf | 135 + branches/Ndi/ulp/srp/kernel/ibsrp.rc | 47 + branches/Ndi/ulp/srp/kernel/makefile | 7 + branches/Ndi/ulp/srp/kernel/srp.h | 370 + branches/Ndi/ulp/srp/kernel/srp_aer_req.h | 295 + branches/Ndi/ulp/srp/kernel/srp_aer_rsp.h | 163 + branches/Ndi/ulp/srp/kernel/srp_cmd.h | 648 + branches/Ndi/ulp/srp/kernel/srp_connection.c | 940 ++ branches/Ndi/ulp/srp/kernel/srp_connection.h | 126 + branches/Ndi/ulp/srp/kernel/srp_cred_req.h | 201 + branches/Ndi/ulp/srp/kernel/srp_cred_rsp.h | 163 + branches/Ndi/ulp/srp/kernel/srp_data.h | 80 + branches/Ndi/ulp/srp/kernel/srp_data_path.c | 1538 ++ branches/Ndi/ulp/srp/kernel/srp_data_path.h | 85 + branches/Ndi/ulp/srp/kernel/srp_debug.h | 149 + branches/Ndi/ulp/srp/kernel/srp_descriptors.c | 673 + branches/Ndi/ulp/srp/kernel/srp_descriptors.h | 140 + branches/Ndi/ulp/srp/kernel/srp_driver.c | 945 ++ branches/Ndi/ulp/srp/kernel/srp_event.c | 82 + branches/Ndi/ulp/srp/kernel/srp_event.h | 42 + branches/Ndi/ulp/srp/kernel/srp_hba.c | 1114 ++ branches/Ndi/ulp/srp/kernel/srp_hba.h | 96 + branches/Ndi/ulp/srp/kernel/srp_hca.c | 256 + branches/Ndi/ulp/srp/kernel/srp_hca.h | 76 + branches/Ndi/ulp/srp/kernel/srp_i_logout.h | 163 + .../Ndi/ulp/srp/kernel/srp_information_unit.h | 106 + branches/Ndi/ulp/srp/kernel/srp_iu_buffer.h | 142 + branches/Ndi/ulp/srp/kernel/srp_login_rej.h | 262 + branches/Ndi/ulp/srp/kernel/srp_login_req.h | 364 + branches/Ndi/ulp/srp/kernel/srp_login_rsp.h | 382 + branches/Ndi/ulp/srp/kernel/srp_rsp.h | 726 + branches/Ndi/ulp/srp/kernel/srp_session.c | 360 + branches/Ndi/ulp/srp/kernel/srp_session.h | 108 + branches/Ndi/ulp/srp/kernel/srp_t_logout.h | 200 + branches/Ndi/ulp/srp/kernel/srp_tsk_mgmt.h | 276 + branches/Ndi/ulp/wsd/dirs | 2 + branches/Ndi/ulp/wsd/user/README | 38 + branches/Ndi/ulp/wsd/user/SOURCES | 61 + branches/Ndi/ulp/wsd/user/extensions.c | 650 + branches/Ndi/ulp/wsd/user/ib_cm.c | 981 ++ branches/Ndi/ulp/wsd/user/ibsp_duplicate.c | 322 + branches/Ndi/ulp/wsd/user/ibsp_iblow.c | 1319 ++ branches/Ndi/ulp/wsd/user/ibsp_ip.c | 645 + branches/Ndi/ulp/wsd/user/ibsp_mem.c | 399 + branches/Ndi/ulp/wsd/user/ibsp_mem.h | 30 + branches/Ndi/ulp/wsd/user/ibsp_mngt.c | 289 + branches/Ndi/ulp/wsd/user/ibsp_perfmon.c | 560 + branches/Ndi/ulp/wsd/user/ibsp_perfmon.h | 113 + branches/Ndi/ulp/wsd/user/ibsp_pnp.c | 442 + branches/Ndi/ulp/wsd/user/ibspdebug.c | 291 + branches/Ndi/ulp/wsd/user/ibspdebug.h | 267 + branches/Ndi/ulp/wsd/user/ibspdefines.h | 90 + branches/Ndi/ulp/wsd/user/ibspdll.c | 2326 +++ branches/Ndi/ulp/wsd/user/ibspdll.def | 6 + branches/Ndi/ulp/wsd/user/ibspdll.h | 72 + branches/Ndi/ulp/wsd/user/ibspdll.rc | 47 + branches/Ndi/ulp/wsd/user/ibspproto.h | 297 + branches/Ndi/ulp/wsd/user/ibspstruct.h | 467 + branches/Ndi/ulp/wsd/user/makefile | 7 + branches/Ndi/ulp/wsd/user/misc.c | 134 + branches/Ndi/ulp/wsd/user/sockinfo.c | 176 + 1561 files changed, 660744 insertions(+) create mode 100644 branches/Ndi/core/al/al.c create mode 100644 branches/Ndi/core/al/al.h create mode 100644 branches/Ndi/core/al/al_av.c create mode 100644 branches/Ndi/core/al/al_av.h create mode 100644 branches/Ndi/core/al/al_ca.c create mode 100644 branches/Ndi/core/al/al_ca.h create mode 100644 branches/Ndi/core/al/al_ci_ca.h create mode 100644 branches/Ndi/core/al/al_ci_ca_shared.c create mode 100644 branches/Ndi/core/al/al_cm_cep.h create mode 100644 branches/Ndi/core/al/al_cm_conn.h create mode 100644 branches/Ndi/core/al/al_cm_qp.c create mode 100644 branches/Ndi/core/al/al_cm_sidr.h create mode 100644 branches/Ndi/core/al/al_common.c create mode 100644 branches/Ndi/core/al/al_common.h create mode 100644 branches/Ndi/core/al/al_cq.c create mode 100644 branches/Ndi/core/al/al_cq.h create mode 100644 branches/Ndi/core/al/al_debug.h create mode 100644 branches/Ndi/core/al/al_dev.h create mode 100644 branches/Ndi/core/al/al_dm.c create mode 100644 branches/Ndi/core/al/al_dm.h create mode 100644 branches/Ndi/core/al/al_init.c create mode 100644 branches/Ndi/core/al/al_init.h create mode 100644 branches/Ndi/core/al/al_ioc_pnp.h create mode 100644 branches/Ndi/core/al/al_mad.c create mode 100644 branches/Ndi/core/al/al_mad.h create mode 100644 branches/Ndi/core/al/al_mad_pool.h create mode 100644 branches/Ndi/core/al/al_mcast.c create mode 100644 branches/Ndi/core/al/al_mcast.h create mode 100644 branches/Ndi/core/al/al_mgr.h create mode 100644 branches/Ndi/core/al/al_mgr_shared.c create mode 100644 branches/Ndi/core/al/al_mr.h create mode 100644 branches/Ndi/core/al/al_mr_shared.c create mode 100644 branches/Ndi/core/al/al_mw.c create mode 100644 branches/Ndi/core/al/al_mw.h create mode 100644 branches/Ndi/core/al/al_pd.c create mode 100644 branches/Ndi/core/al/al_pd.h create mode 100644 branches/Ndi/core/al/al_pnp.h create mode 100644 branches/Ndi/core/al/al_proxy.h create mode 100644 branches/Ndi/core/al/al_proxy_ioctl.h create mode 100644 branches/Ndi/core/al/al_qp.c create mode 100644 branches/Ndi/core/al/al_qp.h create mode 100644 branches/Ndi/core/al/al_query.c create mode 100644 branches/Ndi/core/al/al_query.h create mode 100644 branches/Ndi/core/al/al_reg_svc.c create mode 100644 branches/Ndi/core/al/al_reg_svc.h create mode 100644 branches/Ndi/core/al/al_res_mgr.c create mode 100644 branches/Ndi/core/al/al_res_mgr.h create mode 100644 branches/Ndi/core/al/al_srq.c create mode 100644 branches/Ndi/core/al/al_srq.h create mode 100644 branches/Ndi/core/al/al_sub.c create mode 100644 branches/Ndi/core/al/al_sub.h create mode 100644 branches/Ndi/core/al/al_verbs.h create mode 100644 branches/Ndi/core/al/dirs create mode 100644 branches/Ndi/core/al/ib_common.c create mode 100644 branches/Ndi/core/al/ib_common.h create mode 100644 branches/Ndi/core/al/ib_statustext.c create mode 100644 branches/Ndi/core/al/kernel/SOURCES create mode 100644 branches/Ndi/core/al/kernel/al_ca_pnp.c create mode 100644 branches/Ndi/core/al/kernel/al_ca_pnp.h create mode 100644 branches/Ndi/core/al/kernel/al_ci_ca.c create mode 100644 branches/Ndi/core/al/kernel/al_cm_cep.c create mode 100644 branches/Ndi/core/al/kernel/al_dev.c create mode 100644 branches/Ndi/core/al/kernel/al_driver.h create mode 100644 branches/Ndi/core/al/kernel/al_exports.def create mode 100644 branches/Ndi/core/al/kernel/al_fmr_pool.c create mode 100644 branches/Ndi/core/al/kernel/al_fmr_pool.h create mode 100644 branches/Ndi/core/al/kernel/al_ioc_pnp.c create mode 100644 branches/Ndi/core/al/kernel/al_mad_pool.c create mode 100644 branches/Ndi/core/al/kernel/al_mgr.c create mode 100644 branches/Ndi/core/al/kernel/al_mr.c create mode 100644 branches/Ndi/core/al/kernel/al_pnp.c create mode 100644 branches/Ndi/core/al/kernel/al_proxy.c create mode 100644 branches/Ndi/core/al/kernel/al_proxy_cep.c create mode 100644 branches/Ndi/core/al/kernel/al_proxy_cm.c create mode 100644 branches/Ndi/core/al/kernel/al_proxy_ioc.c create mode 100644 branches/Ndi/core/al/kernel/al_proxy_subnet.c create mode 100644 branches/Ndi/core/al/kernel/al_proxy_verbs.c create mode 100644 branches/Ndi/core/al/kernel/al_sa_req.c create mode 100644 branches/Ndi/core/al/kernel/al_smi.c create mode 100644 branches/Ndi/core/al/kernel/al_smi.h create mode 100644 branches/Ndi/core/al/kernel/ibal.rc create mode 100644 branches/Ndi/core/al/kernel/makefile create mode 100644 branches/Ndi/core/al/user/SOURCES create mode 100644 branches/Ndi/core/al/user/al_dll.c create mode 100644 branches/Ndi/core/al/user/al_exports.src create mode 100644 branches/Ndi/core/al/user/al_mad_pool.c create mode 100644 branches/Ndi/core/al/user/ibal.rc create mode 100644 branches/Ndi/core/al/user/makefile create mode 100644 branches/Ndi/core/al/user/ual_av.c create mode 100644 branches/Ndi/core/al/user/ual_ca.c create mode 100644 branches/Ndi/core/al/user/ual_ca.h create mode 100644 branches/Ndi/core/al/user/ual_ci_ca.c create mode 100644 branches/Ndi/core/al/user/ual_ci_ca.h create mode 100644 branches/Ndi/core/al/user/ual_cm_cep.c create mode 100644 branches/Ndi/core/al/user/ual_cq.c create mode 100644 branches/Ndi/core/al/user/ual_dm.c create mode 100644 branches/Ndi/core/al/user/ual_dm.h create mode 100644 branches/Ndi/core/al/user/ual_mad.c create mode 100644 branches/Ndi/core/al/user/ual_mad.h create mode 100644 branches/Ndi/core/al/user/ual_mad_pool.c create mode 100644 branches/Ndi/core/al/user/ual_mcast.c create mode 100644 branches/Ndi/core/al/user/ual_mcast.h create mode 100644 branches/Ndi/core/al/user/ual_mgr.c create mode 100644 branches/Ndi/core/al/user/ual_mgr.h create mode 100644 branches/Ndi/core/al/user/ual_mr.c create mode 100644 branches/Ndi/core/al/user/ual_mr.h create mode 100644 branches/Ndi/core/al/user/ual_mw.c create mode 100644 branches/Ndi/core/al/user/ual_mw.h create mode 100644 branches/Ndi/core/al/user/ual_pd.c create mode 100644 branches/Ndi/core/al/user/ual_pnp.c create mode 100644 branches/Ndi/core/al/user/ual_qp.c create mode 100644 branches/Ndi/core/al/user/ual_qp.h create mode 100644 branches/Ndi/core/al/user/ual_query.c create mode 100644 branches/Ndi/core/al/user/ual_query.h create mode 100644 branches/Ndi/core/al/user/ual_reg_svc.c create mode 100644 branches/Ndi/core/al/user/ual_res_mgr.h create mode 100644 branches/Ndi/core/al/user/ual_sa_req.c create mode 100644 branches/Ndi/core/al/user/ual_srq.c create mode 100644 branches/Ndi/core/al/user/ual_sub.c create mode 100644 branches/Ndi/core/al/user/ual_support.h create mode 100644 branches/Ndi/core/bus/dirs create mode 100644 branches/Ndi/core/bus/kernel/SOURCES create mode 100644 branches/Ndi/core/bus/kernel/bus_driver.c create mode 100644 branches/Ndi/core/bus/kernel/bus_driver.h create mode 100644 branches/Ndi/core/bus/kernel/bus_iou_mgr.c create mode 100644 branches/Ndi/core/bus/kernel/bus_iou_mgr.h create mode 100644 branches/Ndi/core/bus/kernel/bus_pnp.c create mode 100644 branches/Ndi/core/bus/kernel/bus_pnp.h create mode 100644 branches/Ndi/core/bus/kernel/bus_port_mgr.c create mode 100644 branches/Ndi/core/bus/kernel/bus_port_mgr.h create mode 100644 branches/Ndi/core/bus/kernel/ib_bus.inf create mode 100644 branches/Ndi/core/bus/kernel/ibbus.rc create mode 100644 branches/Ndi/core/bus/kernel/makefile create mode 100644 branches/Ndi/core/complib/cl_async_proc.c create mode 100644 branches/Ndi/core/complib/cl_list.c create mode 100644 branches/Ndi/core/complib/cl_map.c create mode 100644 branches/Ndi/core/complib/cl_memory.c create mode 100644 branches/Ndi/core/complib/cl_memtrack.h create mode 100644 branches/Ndi/core/complib/cl_obj.c create mode 100644 branches/Ndi/core/complib/cl_perf.c create mode 100644 branches/Ndi/core/complib/cl_pool.c create mode 100644 branches/Ndi/core/complib/cl_ptr_vector.c create mode 100644 branches/Ndi/core/complib/cl_reqmgr.c create mode 100644 branches/Ndi/core/complib/cl_statustext.c create mode 100644 branches/Ndi/core/complib/cl_threadpool.c create mode 100644 branches/Ndi/core/complib/cl_vector.c create mode 100644 branches/Ndi/core/complib/dirs create mode 100644 branches/Ndi/core/complib/kernel/SOURCES create mode 100644 branches/Ndi/core/complib/kernel/cl_bus_ifc.c create mode 100644 branches/Ndi/core/complib/kernel/cl_driver.c create mode 100644 branches/Ndi/core/complib/kernel/cl_event.c create mode 100644 branches/Ndi/core/complib/kernel/cl_exports.def create mode 100644 branches/Ndi/core/complib/kernel/cl_log.c create mode 100644 branches/Ndi/core/complib/kernel/cl_memory_osd.c create mode 100644 branches/Ndi/core/complib/kernel/cl_pnp_po.c create mode 100644 branches/Ndi/core/complib/kernel/cl_syscallback.c create mode 100644 branches/Ndi/core/complib/kernel/cl_thread.c create mode 100644 branches/Ndi/core/complib/kernel/cl_timer.c create mode 100644 branches/Ndi/core/complib/kernel/makefile create mode 100644 branches/Ndi/core/complib/user/SOURCES create mode 100644 branches/Ndi/core/complib/user/cl_debug.c create mode 100644 branches/Ndi/core/complib/user/cl_dll.c create mode 100644 branches/Ndi/core/complib/user/cl_event.c create mode 100644 branches/Ndi/core/complib/user/cl_log.c create mode 100644 branches/Ndi/core/complib/user/cl_memory_osd.c create mode 100644 branches/Ndi/core/complib/user/cl_syscallback.c create mode 100644 branches/Ndi/core/complib/user/cl_thread.c create mode 100644 branches/Ndi/core/complib/user/cl_timer.c create mode 100644 branches/Ndi/core/complib/user/complib.rc create mode 100644 branches/Ndi/core/complib/user/complib.src create mode 100644 branches/Ndi/core/complib/user/makefile create mode 100644 branches/Ndi/core/dirs create mode 100644 branches/Ndi/core/iou/dirs create mode 100644 branches/Ndi/core/iou/kernel/SOURCES create mode 100644 branches/Ndi/core/iou/kernel/ibiou.rc create mode 100644 branches/Ndi/core/iou/kernel/iou_driver.c create mode 100644 branches/Ndi/core/iou/kernel/iou_driver.h create mode 100644 branches/Ndi/core/iou/kernel/iou_ioc_mgr.c create mode 100644 branches/Ndi/core/iou/kernel/iou_ioc_mgr.h create mode 100644 branches/Ndi/core/iou/kernel/iou_pnp.c create mode 100644 branches/Ndi/core/iou/kernel/iou_pnp.h create mode 100644 branches/Ndi/core/iou/kernel/makefile create mode 100644 branches/Ndi/dirs create mode 100644 branches/Ndi/docs/Manual.htm create mode 100644 branches/Ndi/docs/complib/cl_async_proc_h.html create mode 100644 branches/Ndi/docs/complib/cl_atomic_h.html create mode 100644 branches/Ndi/docs/complib/cl_byteswap_h.html create mode 100644 branches/Ndi/docs/complib/cl_comppool_h.html create mode 100644 branches/Ndi/docs/complib/cl_debug_h.html create mode 100644 branches/Ndi/docs/complib/cl_event_h.html create mode 100644 branches/Ndi/docs/complib/cl_fleximap_h.html create mode 100644 branches/Ndi/docs/complib/cl_ioctl_h.html create mode 100644 branches/Ndi/docs/complib/cl_irqlock_h.html create mode 100644 branches/Ndi/docs/complib/cl_list_h.html create mode 100644 branches/Ndi/docs/complib/cl_log_h.html create mode 100644 branches/Ndi/docs/complib/cl_map_h.html create mode 100644 branches/Ndi/docs/complib/cl_math_h.html create mode 100644 branches/Ndi/docs/complib/cl_memory_h.html create mode 100644 branches/Ndi/docs/complib/cl_mutex_h.html create mode 100644 branches/Ndi/docs/complib/cl_obj_h.html create mode 100644 branches/Ndi/docs/complib/cl_passivelock_h.html create mode 100644 branches/Ndi/docs/complib/cl_perf_h.html create mode 100644 branches/Ndi/docs/complib/cl_pool_h.html create mode 100644 branches/Ndi/docs/complib/cl_ptr_vector_h.html create mode 100644 branches/Ndi/docs/complib/cl_qcomppool_h.html create mode 100644 branches/Ndi/docs/complib/cl_qlist_h.html create mode 100644 branches/Ndi/docs/complib/cl_qlockpool_h.html create mode 100644 branches/Ndi/docs/complib/cl_qmap_h.html create mode 100644 branches/Ndi/docs/complib/cl_qpool_h.html create mode 100644 branches/Ndi/docs/complib/cl_rbmap_h.html create mode 100644 branches/Ndi/docs/complib/cl_reqmgr_h.html create mode 100644 branches/Ndi/docs/complib/cl_spinlock_h.html create mode 100644 branches/Ndi/docs/complib/cl_syscallback_h.html create mode 100644 branches/Ndi/docs/complib/cl_thread_h.html create mode 100644 branches/Ndi/docs/complib/cl_threadpool_h.html create mode 100644 branches/Ndi/docs/complib/cl_timer_h.html create mode 100644 branches/Ndi/docs/complib/cl_types_h.html create mode 100644 branches/Ndi/docs/complib/cl_vector_h.html create mode 100644 branches/Ndi/docs/complib/cl_waitobj_h.html create mode 100644 branches/Ndi/docs/complib/comp_lib_h.html create mode 100644 branches/Ndi/docs/iba/ib_al_h.html create mode 100644 branches/Ndi/docs/iba/ib_types_h.html create mode 100644 branches/Ndi/docs/masterindex.html create mode 100644 branches/Ndi/docs/openfabrics.gif create mode 100644 branches/Ndi/docs/robo_definitions.html create mode 100644 branches/Ndi/docs/robo_functions.html create mode 100644 branches/Ndi/docs/robo_modules.html create mode 100644 branches/Ndi/docs/robo_sourcefiles.html create mode 100644 branches/Ndi/docs/robo_strutures.html create mode 100644 branches/Ndi/docs/robodoc.css create mode 100644 branches/Ndi/etc/makebin.bat create mode 100644 branches/Ndi/etc/wpp/ALTraceRt.cmd create mode 100644 branches/Ndi/etc/wpp/CreateTrace.cmd create mode 100644 branches/Ndi/etc/wpp/IPoIBTraceRt.cmd create mode 100644 branches/Ndi/etc/wpp/MTHCATraceRt.cmd create mode 100644 branches/Ndi/etc/wpp/SDPTraceRt.cmd create mode 100644 branches/Ndi/etc/wpp/StartSdpTrace.cmd create mode 100644 branches/Ndi/etc/wpp/StartTrace.cmd create mode 100644 branches/Ndi/etc/wpp/StopSdpTrace.cmd create mode 100644 branches/Ndi/etc/wpp/StopTrace.cmd create mode 100644 branches/Ndi/hw/dirs create mode 100644 branches/Ndi/hw/mt23108/dirs create mode 100644 branches/Ndi/hw/mt23108/kernel/Makefile create mode 100644 branches/Ndi/hw/mt23108/kernel/SOURCES create mode 100644 branches/Ndi/hw/mt23108/kernel/hca.rc create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_data.c create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_data.h create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_debug.h create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_direct.c create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_driver.c create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_driver.h create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_mcast.c create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_memory.c create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_smp.c create mode 100644 branches/Ndi/hw/mt23108/kernel/hca_verbs.c create mode 100644 branches/Ndi/hw/mt23108/kernel/infinihost.inf create mode 100644 branches/Ndi/hw/mt23108/user/Makefile create mode 100644 branches/Ndi/hw/mt23108/user/SOURCES create mode 100644 branches/Ndi/hw/mt23108/user/hca_data.h create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_av.c create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_ca.c create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_cq.c create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_main.c create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_main.h create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_mcast.c create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_mrw.c create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_osbypass.c create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_pd.c create mode 100644 branches/Ndi/hw/mt23108/user/mlnx_ual_qp.c create mode 100644 branches/Ndi/hw/mt23108/user/uvpd.rc create mode 100644 branches/Ndi/hw/mt23108/user/uvpd_exports.src create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_common.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_common.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_init.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_rx_stub.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_stub_defines.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_tx_stub.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_tx_stub_defines.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhenosys.ic create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul_obj.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul_stub.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhulenosys.ic create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/invalid.ic create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/rx_stub.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmd_types.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmds_wrap.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/event_irqh.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_kl.def create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_kl.def create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_mod_obj.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_common.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_default_profile.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_requested_profile.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm_ibal.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uar/uar.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uar/uar.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/hcahal/zombie.ic create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/allocator.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common.def create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common_kl.def create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_mod_obj.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vip_imp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vapi_common.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vapi_common.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_array.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_array.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_cirq.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_cirq.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_common.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.c create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.ic create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.ih create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash64p.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashp2p.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashv4p.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/evapi.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi_features.h create mode 100644 branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi_types.h create mode 100644 branches/Ndi/hw/mt23108/vapi/dirs create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/Makefile create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/SOURCES create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/hh_kl_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/mdmsg.h create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/mosal_kl_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/mpga_kl_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/mt23108.def create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/mt23108.rc create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/mtl_common_kl_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/tdriver_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/thh_kl_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/thhul_kl_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/kernel/vapi_common_kl_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_gen.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_gen_nos.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_i2c.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_iobuf.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_mem.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_mlock.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_prot_ctx.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_que.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_sync.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_thread.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_timer.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosalu_socket.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal.def create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_arch.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_driver.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf_imp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_k2u_cbk.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_kl.def create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_imp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_prot_ctx_imp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_imp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread_imp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_imp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_priv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_types.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_driver.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_mem.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket_imp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_sync.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_thread.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/MPGA_headers.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/ib_opcodes.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/internal_functions.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/internal_functions.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga.def create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_driver.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_kl.def create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_append.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_append.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_utilities.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_utilities.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/ud_pack_fmt.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_log.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common.def create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common_kl.def create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_log_win.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/bit_ops.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/ib_defs.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_errno.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_pci_types.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_types.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctl.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctlSpec.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/endian.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_defs.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_types.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/unistd.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/win/mtl_arch_types.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.rc create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCard.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConfPriv.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGen.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGuid.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdIoctl.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPnp.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPwr.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdRdWr.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MddLib.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/infinihost.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.c create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.mc create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/resource.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/tavor_csp.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/version.h create mode 100644 branches/Ndi/hw/mt23108/vapi/mlxsys/tools/mtperf.h create mode 100644 branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108.h create mode 100644 branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108_PRM.h create mode 100644 branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108_PRM_append.h create mode 100644 branches/Ndi/hw/mt23108/vapi/tavor_arch_db/cr_types.h create mode 100644 branches/Ndi/hw/mt23108/vapi/tavor_arch_db/tavor_dev_defs.h create mode 100644 branches/Ndi/hw/mt23108/vapi/tavor_arch_db/tavor_if_defs.h create mode 100644 branches/Ndi/hw/mt23108/vapi/user/Makefile create mode 100644 branches/Ndi/hw/mt23108/vapi/user/SOURCES create mode 100644 branches/Ndi/hw/mt23108/vapi/user/hh_ul_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/user/mosal_ul_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/user/mpga_ul_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/user/mtl_common_ul_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/user/thhul_ul_sources.c create mode 100644 branches/Ndi/hw/mt23108/vapi/user/vapi_common_ul_sources.c create mode 100644 branches/Ndi/hw/mthca/dirs create mode 100644 branches/Ndi/hw/mthca/hca_utils.c create mode 100644 branches/Ndi/hw/mthca/hca_utils.h create mode 100644 branches/Ndi/hw/mthca/kernel/Makefile create mode 100644 branches/Ndi/hw/mthca/kernel/SOURCES create mode 100644 branches/Ndi/hw/mthca/kernel/hca.rc create mode 100644 branches/Ndi/hw/mthca/kernel/hca_data.c create mode 100644 branches/Ndi/hw/mthca/kernel/hca_data.h create mode 100644 branches/Ndi/hw/mthca/kernel/hca_debug.h create mode 100644 branches/Ndi/hw/mthca/kernel/hca_direct.c create mode 100644 branches/Ndi/hw/mthca/kernel/hca_driver.c create mode 100644 branches/Ndi/hw/mthca/kernel/hca_driver.h create mode 100644 branches/Ndi/hw/mthca/kernel/hca_mcast.c create mode 100644 branches/Ndi/hw/mthca/kernel/hca_memory.c create mode 100644 branches/Ndi/hw/mthca/kernel/hca_pci.c create mode 100644 branches/Ndi/hw/mthca/kernel/hca_pci.h create mode 100644 branches/Ndi/hw/mthca/kernel/hca_pnp.c create mode 100644 branches/Ndi/hw/mthca/kernel/hca_pnp.h create mode 100644 branches/Ndi/hw/mthca/kernel/hca_verbs.c create mode 100644 branches/Ndi/hw/mthca/kernel/ib_cache.h create mode 100644 branches/Ndi/hw/mthca/kernel/ib_mad.h create mode 100644 branches/Ndi/hw/mthca/kernel/ib_pack.h create mode 100644 branches/Ndi/hw/mthca/kernel/ib_smi.h create mode 100644 branches/Ndi/hw/mthca/kernel/ib_verbs.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_atomic.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_bitmap.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_cache.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_device.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_fmr_pool.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_fmr_pool.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_l2w.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_l2w.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_list.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_memory.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_memory.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_pa_cash.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_pa_cash.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_packer.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_pci.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_pcipool.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_reset_tavor.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_spinlock.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_sync.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_types.h create mode 100644 branches/Ndi/hw/mthca/kernel/mt_ud_header.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_uverbs.c create mode 100644 branches/Ndi/hw/mthca/kernel/mt_verbs.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca.h create mode 100644 branches/Ndi/hw/mthca/kernel/mthca.inf create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_allocator.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_av.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_catas.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_cmd.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_cmd.h create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_config_reg.h create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_cq.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_dev.h create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_doorbell.h create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_eq.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_log.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_log.mc create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_log.rc create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_mad.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_main.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_mcg.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_memfree.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_memfree.h create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_mr.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_pd.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_profile.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_profile.h create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_provider.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_provider.h create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_qp.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_srq.c create mode 100644 branches/Ndi/hw/mthca/kernel/mthca_uar.c create mode 100644 branches/Ndi/hw/mthca/mt_utils.c create mode 100644 branches/Ndi/hw/mthca/mt_utils.h create mode 100644 branches/Ndi/hw/mthca/mthca_wqe.h create mode 100644 branches/Ndi/hw/mthca/mx_abi.h create mode 100644 branches/Ndi/hw/mthca/user/Makefile create mode 100644 branches/Ndi/hw/mthca/user/SOURCES create mode 100644 branches/Ndi/hw/mthca/user/arch.h create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_av.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_ca.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_cq.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_data.h create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_main.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_main.h create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_mcast.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_mrw.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_osbypass.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_pd.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_qp.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_ual_srq.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp.def create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp.h create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp.rc create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_ah.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_cq.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_debug.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_debug.h create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_doorbell.h create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_kern_abi.h create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_memfree.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_qp.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_srq.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.c create mode 100644 branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.h create mode 100644 branches/Ndi/hw/mthca/user/mt_l2w.h create mode 100644 branches/Ndi/hw/mthca/user/opcode.h create mode 100644 branches/Ndi/inc/complib/cl_async_proc.h create mode 100644 branches/Ndi/inc/complib/cl_atomic.h create mode 100644 branches/Ndi/inc/complib/cl_byteswap.h create mode 100644 branches/Ndi/inc/complib/cl_comppool.h create mode 100644 branches/Ndi/inc/complib/cl_debug.h create mode 100644 branches/Ndi/inc/complib/cl_event.h create mode 100644 branches/Ndi/inc/complib/cl_fleximap.h create mode 100644 branches/Ndi/inc/complib/cl_ioctl.h create mode 100644 branches/Ndi/inc/complib/cl_irqlock.h create mode 100644 branches/Ndi/inc/complib/cl_list.h create mode 100644 branches/Ndi/inc/complib/cl_log.h create mode 100644 branches/Ndi/inc/complib/cl_map.h create mode 100644 branches/Ndi/inc/complib/cl_math.h create mode 100644 branches/Ndi/inc/complib/cl_memory.h create mode 100644 branches/Ndi/inc/complib/cl_mutex.h create mode 100644 branches/Ndi/inc/complib/cl_obj.h create mode 100644 branches/Ndi/inc/complib/cl_passivelock.h create mode 100644 branches/Ndi/inc/complib/cl_perf.h create mode 100644 branches/Ndi/inc/complib/cl_pool.h create mode 100644 branches/Ndi/inc/complib/cl_ptr_vector.h create mode 100644 branches/Ndi/inc/complib/cl_qcomppool.h create mode 100644 branches/Ndi/inc/complib/cl_qlist.h create mode 100644 branches/Ndi/inc/complib/cl_qlockpool.h create mode 100644 branches/Ndi/inc/complib/cl_qmap.h create mode 100644 branches/Ndi/inc/complib/cl_qpool.h create mode 100644 branches/Ndi/inc/complib/cl_rbmap.h create mode 100644 branches/Ndi/inc/complib/cl_reqmgr.h create mode 100644 branches/Ndi/inc/complib/cl_spinlock.h create mode 100644 branches/Ndi/inc/complib/cl_syscallback.h create mode 100644 branches/Ndi/inc/complib/cl_thread.h create mode 100644 branches/Ndi/inc/complib/cl_threadpool.h create mode 100644 branches/Ndi/inc/complib/cl_timer.h create mode 100644 branches/Ndi/inc/complib/cl_types.h create mode 100644 branches/Ndi/inc/complib/cl_vector.h create mode 100644 branches/Ndi/inc/complib/cl_waitobj.h create mode 100644 branches/Ndi/inc/complib/comp_lib.h create mode 100644 branches/Ndi/inc/iba/ib_al.h create mode 100644 branches/Ndi/inc/iba/ib_al_ioctl.h create mode 100644 branches/Ndi/inc/iba/ib_at_ioctl.h create mode 100644 branches/Ndi/inc/iba/ib_ci.h create mode 100644 branches/Ndi/inc/iba/ib_types.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_atomic_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_bus_ifc.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_byteswap_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_debug_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_event_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_init.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_ioctl_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_irqlock_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_memory_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_mutex_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_packoff.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_packon.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_pnp_po.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_spinlock_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_syscallback_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_thread_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_timer_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_types_osd.h create mode 100644 branches/Ndi/inc/kernel/complib/cl_waitobj_osd.h create mode 100644 branches/Ndi/inc/kernel/iba/ib_al_ifc.h create mode 100644 branches/Ndi/inc/kernel/iba/ib_ci_ifc.h create mode 100644 branches/Ndi/inc/kernel/iba/ioc_ifc.h create mode 100644 branches/Ndi/inc/kernel/iba/iou_ifc.h create mode 100644 branches/Ndi/inc/kernel/iba/ipoib_ifc.h create mode 100644 branches/Ndi/inc/kernel/ip_packet.h create mode 100644 branches/Ndi/inc/mthca/mthca_vc.h create mode 100644 branches/Ndi/inc/oib_ver.h create mode 100644 branches/Ndi/inc/openib.def create mode 100644 branches/Ndi/inc/user/complib/cl_atomic_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_byteswap_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_debug_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_event_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_ioctl_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_memory_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_mutex_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_packoff.h create mode 100644 branches/Ndi/inc/user/complib/cl_packon.h create mode 100644 branches/Ndi/inc/user/complib/cl_spinlock_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_syscallback_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_thread_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_timer_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_types_osd.h create mode 100644 branches/Ndi/inc/user/complib/cl_waitobj_osd.h create mode 100644 branches/Ndi/inc/user/iba/ib_uvp.h create mode 100644 branches/Ndi/inc/user/wsd/ibsp_regpath.h create mode 100644 branches/Ndi/tests/alts/allocdeallocpd.c create mode 100644 branches/Ndi/tests/alts/alts_common.h create mode 100644 branches/Ndi/tests/alts/alts_debug.h create mode 100644 branches/Ndi/tests/alts/alts_misc.c create mode 100644 branches/Ndi/tests/alts/alts_readme.txt create mode 100644 branches/Ndi/tests/alts/cmtests.c create mode 100644 branches/Ndi/tests/alts/createanddestroycq.c create mode 100644 branches/Ndi/tests/alts/createanddestroyqp.c create mode 100644 branches/Ndi/tests/alts/createdestroyav.c create mode 100644 branches/Ndi/tests/alts/creatememwindow.c create mode 100644 branches/Ndi/tests/alts/dirs create mode 100644 branches/Ndi/tests/alts/ibquery.c create mode 100644 branches/Ndi/tests/alts/kernel/SOURCES create mode 100644 branches/Ndi/tests/alts/kernel/alts.inf create mode 100644 branches/Ndi/tests/alts/kernel/alts.rc create mode 100644 branches/Ndi/tests/alts/kernel/alts_driver.c create mode 100644 branches/Ndi/tests/alts/kernel/alts_driver.h create mode 100644 branches/Ndi/tests/alts/kernel/makefile create mode 100644 branches/Ndi/tests/alts/madtests.c create mode 100644 branches/Ndi/tests/alts/multisendrecv.c create mode 100644 branches/Ndi/tests/alts/openclose.c create mode 100644 branches/Ndi/tests/alts/querycaattr.c create mode 100644 branches/Ndi/tests/alts/registermemregion.c create mode 100644 branches/Ndi/tests/alts/registerpnp.c create mode 100644 branches/Ndi/tests/alts/reregister_hca.c create mode 100644 branches/Ndi/tests/alts/smatests.c create mode 100644 branches/Ndi/tests/alts/user/SOURCES create mode 100644 branches/Ndi/tests/alts/user/alts_main.c create mode 100644 branches/Ndi/tests/alts/user/makefile create mode 100644 branches/Ndi/tests/cmtest/dirs create mode 100644 branches/Ndi/tests/cmtest/user/SOURCES create mode 100644 branches/Ndi/tests/cmtest/user/cmtest_main.c create mode 100644 branches/Ndi/tests/cmtest/user/makefile create mode 100644 branches/Ndi/tests/dirs create mode 100644 branches/Ndi/tests/ibat/dirs create mode 100644 branches/Ndi/tests/ibat/user/PrintIp.c create mode 100644 branches/Ndi/tests/ibat/user/SOURCES create mode 100644 branches/Ndi/tests/ibat/user/makefile create mode 100644 branches/Ndi/tests/limits/dirs create mode 100644 branches/Ndi/tests/limits/user/SOURCES create mode 100644 branches/Ndi/tests/limits/user/limits_main.c create mode 100644 branches/Ndi/tests/limits/user/makefile create mode 100644 branches/Ndi/tests/wsd/dirs create mode 100644 branches/Ndi/tests/wsd/user/contest/contest.c create mode 100644 branches/Ndi/tests/wsd/user/contest/contest.h create mode 100644 branches/Ndi/tests/wsd/user/dirs create mode 100644 branches/Ndi/tests/wsd/user/test1/test1.c create mode 100644 branches/Ndi/tests/wsd/user/test2/ibwrap.c create mode 100644 branches/Ndi/tests/wsd/user/test2/ibwrap.h create mode 100644 branches/Ndi/tests/wsd/user/test2/test2.c create mode 100644 branches/Ndi/tests/wsd/user/test3/ibwrap.c create mode 100644 branches/Ndi/tests/wsd/user/test3/ibwrap.h create mode 100644 branches/Ndi/tests/wsd/user/test3/test3.c create mode 100644 branches/Ndi/tests/wsd/user/ttcp/SOURCES create mode 100644 branches/Ndi/tests/wsd/user/ttcp/makefile create mode 100644 branches/Ndi/tests/wsd/user/ttcp/ttcp.c create mode 100644 branches/Ndi/tools/coinstaller/dirs create mode 100644 branches/Ndi/tools/coinstaller/user/IBInstaller.rc create mode 100644 branches/Ndi/tools/coinstaller/user/IbInstaller.cpp create mode 100644 branches/Ndi/tools/coinstaller/user/IbInstaller.def create mode 100644 branches/Ndi/tools/coinstaller/user/SOURCES create mode 100644 branches/Ndi/tools/coinstaller/user/makefile create mode 100644 branches/Ndi/tools/coinstaller/user/resource.h create mode 100644 branches/Ndi/tools/coinstaller/user/stdafx.cpp create mode 100644 branches/Ndi/tools/coinstaller/user/stdafx.h create mode 100644 branches/Ndi/tools/dirs create mode 100644 branches/Ndi/tools/flint/dirs create mode 100644 branches/Ndi/tools/flint/user/SOURCES create mode 100644 branches/Ndi/tools/flint/user/flint.cpp create mode 100644 branches/Ndi/tools/flint/user/flint.rc create mode 100644 branches/Ndi/tools/flint/user/makefile create mode 100644 branches/Ndi/tools/fwupdate/dirs create mode 100644 branches/Ndi/tools/fwupdate/user/SOURCES create mode 100644 branches/Ndi/tools/fwupdate/user/flint-tools.cpp create mode 100644 branches/Ndi/tools/fwupdate/user/flint-tools.h create mode 100644 branches/Ndi/tools/fwupdate/user/flint.cpp create mode 100644 branches/Ndi/tools/fwupdate/user/makefile create mode 100644 branches/Ndi/tools/fwupdate/user/mtcr.h create mode 100644 branches/Ndi/tools/mread/user/SOURCES create mode 100644 branches/Ndi/tools/mread/user/makefile create mode 100644 branches/Ndi/tools/mread/user/mread.c create mode 100644 branches/Ndi/tools/mst/dirs create mode 100644 branches/Ndi/tools/mst/user/SOURCES create mode 100644 branches/Ndi/tools/mst/user/makefile create mode 100644 branches/Ndi/tools/mst/user/mst.c create mode 100644 branches/Ndi/tools/mst/user/mst.rc create mode 100644 branches/Ndi/tools/mtcr/dirs create mode 100644 branches/Ndi/tools/mtcr/user/SOURCES create mode 100644 branches/Ndi/tools/mtcr/user/com_def.h create mode 100644 branches/Ndi/tools/mtcr/user/makefile create mode 100644 branches/Ndi/tools/mtcr/user/mtcr.c create mode 100644 branches/Ndi/tools/mtcr/user/mtcr.def create mode 100644 branches/Ndi/tools/mtcr/user/mtcr.h create mode 100644 branches/Ndi/tools/mtcr/user/mtcr.rc create mode 100644 branches/Ndi/tools/mtcr/user/mtcr_i2c.c create mode 100644 branches/Ndi/tools/mtcr/user/mtcr_i2c.h create mode 100644 branches/Ndi/tools/mtcr/user/usb.cpp create mode 100644 branches/Ndi/tools/mtcr/user/usb.h create mode 100644 branches/Ndi/tools/mtcr/user/usb/I2cBrdg.lib create mode 100644 branches/Ndi/tools/mtcr/user/usb/UsbI2cIo.lib create mode 100644 branches/Ndi/tools/mtcr/user/usb/i2cbridge.h create mode 100644 branches/Ndi/tools/mtcr/user/usb/usbi2cio.h create mode 100644 branches/Ndi/tools/mwrite/user/SOURCES create mode 100644 branches/Ndi/tools/mwrite/user/makefile create mode 100644 branches/Ndi/tools/mwrite/user/mwrite.c create mode 100644 branches/Ndi/tools/perftests/dirs create mode 100644 branches/Ndi/tools/perftests/user/README create mode 100644 branches/Ndi/tools/perftests/user/TODO create mode 100644 branches/Ndi/tools/perftests/user/clock_test.c create mode 100644 branches/Ndi/tools/perftests/user/dirs create mode 100644 branches/Ndi/tools/perftests/user/get_clock.c create mode 100644 branches/Ndi/tools/perftests/user/get_clock.h create mode 100644 branches/Ndi/tools/perftests/user/getopt.c create mode 100644 branches/Ndi/tools/perftests/user/getopt.h create mode 100644 branches/Ndi/tools/perftests/user/perf_defs.h create mode 100644 branches/Ndi/tools/perftests/user/perf_utils.c create mode 100644 branches/Ndi/tools/perftests/user/read_bw/SOURCES create mode 100644 branches/Ndi/tools/perftests/user/read_bw/makefile create mode 100644 branches/Ndi/tools/perftests/user/read_bw/read_bw.c create mode 100644 branches/Ndi/tools/perftests/user/read_bw/read_bw.rc create mode 100644 branches/Ndi/tools/perftests/user/read_lat/SOURCES create mode 100644 branches/Ndi/tools/perftests/user/read_lat/makefile create mode 100644 branches/Ndi/tools/perftests/user/read_lat/read_lat.c create mode 100644 branches/Ndi/tools/perftests/user/read_lat/read_lat.rc create mode 100644 branches/Ndi/tools/perftests/user/send_bw/SOURCES create mode 100644 branches/Ndi/tools/perftests/user/send_bw/makefile create mode 100644 branches/Ndi/tools/perftests/user/send_bw/send_bw.c create mode 100644 branches/Ndi/tools/perftests/user/send_bw/send_bw.rc create mode 100644 branches/Ndi/tools/perftests/user/send_lat/SOURCES create mode 100644 branches/Ndi/tools/perftests/user/send_lat/makefile create mode 100644 branches/Ndi/tools/perftests/user/send_lat/send_lat.c create mode 100644 branches/Ndi/tools/perftests/user/send_lat/send_lat.rc create mode 100644 branches/Ndi/tools/perftests/user/write_bw/SOURCES create mode 100644 branches/Ndi/tools/perftests/user/write_bw/makefile create mode 100644 branches/Ndi/tools/perftests/user/write_bw/write_bw.c create mode 100644 branches/Ndi/tools/perftests/user/write_bw/write_bw.rc create mode 100644 branches/Ndi/tools/perftests/user/write_lat/SOURCES create mode 100644 branches/Ndi/tools/perftests/user/write_lat/makefile create mode 100644 branches/Ndi/tools/perftests/user/write_lat/write_lat.c create mode 100644 branches/Ndi/tools/perftests/user/write_lat/write_lat.rc create mode 100644 branches/Ndi/tools/spark/dirs create mode 100644 branches/Ndi/tools/spark/user/SOURCES create mode 100644 branches/Ndi/tools/spark/user/makefile create mode 100644 branches/Ndi/tools/spark/user/spark.cpp create mode 100644 branches/Ndi/tools/spark/user/spark.rc create mode 100644 branches/Ndi/tools/vstat/dirs create mode 100644 branches/Ndi/tools/vstat/user/SOURCES create mode 100644 branches/Ndi/tools/vstat/user/makefile create mode 100644 branches/Ndi/tools/vstat/user/vstat.rc create mode 100644 branches/Ndi/tools/vstat/user/vstat_main.c create mode 100644 branches/Ndi/tools/wsdinstall/dirs create mode 100644 branches/Ndi/tools/wsdinstall/user/InstallSP.sln create mode 100644 branches/Ndi/tools/wsdinstall/user/SOURCES create mode 100644 branches/Ndi/tools/wsdinstall/user/installsp.c create mode 100644 branches/Ndi/tools/wsdinstall/user/installsp.rc create mode 100644 branches/Ndi/tools/wsdinstall/user/makefile create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_adapter_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cno_create.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cno_free.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cno_modify_agent.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cno_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cno_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cno_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cno_wait.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cookie.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cookie.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cr_accept.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cr_callback.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cr_handoff.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cr_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cr_reject.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cr_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_cr_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_debug.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_connect.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_create.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_disconnect.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_dup_connect.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_free.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_get_status.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_modify.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_rdma_read.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_rdma_write.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_recv.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_send.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_reset.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ep_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_clear_unwaitable.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_connection_callb.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_cq_async_error_callb.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_create.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_dequeue.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_disable.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_dto_callb.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_enable.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_free.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_modify_cno.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_post_se.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_qp_async_error_callb.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_resize.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_set_unwaitable.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_un_async_error_callb.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_evd_wait.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_get_consumer_context.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_get_handle_type.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_hash.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_hash.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_hca_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_hca_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ia_close.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ia_open.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ia_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ia_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ia_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_init.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_llist.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_create.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_free.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_mr_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_mr_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_provider.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_provider.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_psp_create.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_psp_create_any.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_psp_free.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_psp_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_pz_create.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_pz_free.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_pz_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_pz_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_pz_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ring_buffer_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_ring_buffer_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_bind.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_create.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_free.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_create.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_free.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_query.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_set_consumer_context.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_sp_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/common/dapl_sp_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/dirs create mode 100644 branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_cm.c create mode 100644 branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_dto.h create mode 100644 branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_kmod.h create mode 100644 branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.c create mode 100644 branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.h create mode 100644 branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_qp.c create mode 100644 branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/include/dapl.h create mode 100644 branches/Ndi/ulp/dapl/dapl/include/dapl_debug.h create mode 100644 branches/Ndi/ulp/dapl/dapl/include/dapl_ipoib_names.h create mode 100644 branches/Ndi/ulp/dapl/dapl/include/dapl_vendor.h create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/Makefile.cygwin create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/Makefile.org create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/Makefile.orig create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/SOURCES create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/dapl_init.c create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/dapl_name_service.c create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/dapl_name_service.h create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.c create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.h create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.c create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.h create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/makefile create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/makefile.wnd create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/udapl_exports.src create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/udapl_sources.c create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_osd.c create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_osd.h create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_win.def create mode 100644 branches/Ndi/ulp/dapl/dapl/udapl/windows/dapllib.rc create mode 100644 branches/Ndi/ulp/dapl/dat/common/dat_dictionary.c create mode 100644 branches/Ndi/ulp/dapl/dat/common/dat_dictionary.h create mode 100644 branches/Ndi/ulp/dapl/dat/common/dat_dr.c create mode 100644 branches/Ndi/ulp/dapl/dat/common/dat_dr.h create mode 100644 branches/Ndi/ulp/dapl/dat/common/dat_init.c create mode 100644 branches/Ndi/ulp/dapl/dat/common/dat_init.h create mode 100644 branches/Ndi/ulp/dapl/dat/common/dat_sr.c create mode 100644 branches/Ndi/ulp/dapl/dat/common/dat_sr.h create mode 100644 branches/Ndi/ulp/dapl/dat/common/dat_strerror.c create mode 100644 branches/Ndi/ulp/dapl/dat/dirs create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/dat.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/dat_error.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/dat_platform_specific.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/dat_redirection.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/dat_registry.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/dat_vendor_specific.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/kdat.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/kdat_config.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/kdat_redirection.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/kdat_vendor_specific.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/udat.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/udat_config.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/udat_redirection.h create mode 100644 branches/Ndi/ulp/dapl/dat/include/dat/udat_vendor_specific.h create mode 100644 branches/Ndi/ulp/dapl/dat/kdat/Makefile create mode 100644 branches/Ndi/ulp/dapl/dat/kdat/dat_kdapl.c create mode 100644 branches/Ndi/ulp/dapl/dat/kdat/dat_module.c create mode 100644 branches/Ndi/ulp/dapl/dat/kdat/linux/dat_osd.c create mode 100644 branches/Ndi/ulp/dapl/dat/kdat/linux/dat_osd.h create mode 100644 branches/Ndi/ulp/dapl/dat/udat/Makefile.cygwin create mode 100644 branches/Ndi/ulp/dapl/dat/udat/Makefile.org create mode 100644 branches/Ndi/ulp/dapl/dat/udat/Makefile.orig create mode 100644 branches/Ndi/ulp/dapl/dat/udat/SOURCES create mode 100644 branches/Ndi/ulp/dapl/dat/udat/dat.conf create mode 100644 branches/Ndi/ulp/dapl/dat/udat/ibhosts create mode 100644 branches/Ndi/ulp/dapl/dat/udat/linux/dat-1.1.spec create mode 100644 branches/Ndi/ulp/dapl/dat/udat/linux/dat_osd.c create mode 100644 branches/Ndi/ulp/dapl/dat/udat/linux/dat_osd.h create mode 100644 branches/Ndi/ulp/dapl/dat/udat/makefile create mode 100644 branches/Ndi/ulp/dapl/dat/udat/makefile.wnd create mode 100644 branches/Ndi/ulp/dapl/dat/udat/udat.c create mode 100644 branches/Ndi/ulp/dapl/dat/udat/udat_exports.src create mode 100644 branches/Ndi/ulp/dapl/dat/udat/udat_sources.c create mode 100644 branches/Ndi/ulp/dapl/dat/udat/udat_sr_parser.c create mode 100644 branches/Ndi/ulp/dapl/dat/udat/udat_sr_parser.h create mode 100644 branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd.c create mode 100644 branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd.h create mode 100644 branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd_sr.h create mode 100644 branches/Ndi/ulp/dapl/dat/udat/windows/dat_win.def create mode 100644 branches/Ndi/ulp/dapl/dirs create mode 100644 branches/Ndi/ulp/dapl/doc/dapl_coding_style.txt create mode 100644 branches/Ndi/ulp/dapl/doc/dapl_end_point_design.txt create mode 100644 branches/Ndi/ulp/dapl/doc/dapl_environ.txt create mode 100644 branches/Ndi/ulp/dapl/doc/dapl_event_design.txt create mode 100644 branches/Ndi/ulp/dapl/doc/dapl_memory_management_design.txt create mode 100644 branches/Ndi/ulp/dapl/doc/dapl_registry_design.txt create mode 100644 branches/Ndi/ulp/dapl/doc/dapl_shared_memory_design.txt create mode 100644 branches/Ndi/ulp/dapl/doc/dapl_vendor_specific_changes.txt create mode 100644 branches/Ndi/ulp/dapl/doc/dat.conf create mode 100644 branches/Ndi/ulp/dapl/doc/dat_environ.txt create mode 100644 branches/Ndi/ulp/dapl/doc/ibhosts create mode 100644 branches/Ndi/ulp/dapl/doc/mv_dapl_readme.txt create mode 100644 branches/Ndi/ulp/dapl/doc/mv_dapl_relnotes.txt create mode 100644 branches/Ndi/ulp/dapl/test/dirs create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_defaults create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_onetest create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_perf.csh create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/DaplTest_how_2.txt create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.cygwin create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.org create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.orig create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/SOURCES create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/bw.sh create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/cl.sh create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_bpool.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_bpool.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client_info.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client_info.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_cnxn.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_cnxn.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_common.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_endian.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_cmd.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_cmd.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_connmgt.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_dataxfer.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_dataxfer_client.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_endpoint.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_hwconn.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_mem.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_pz.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_queryinfo.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_test.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_util.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_util.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_funcs.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_getopt.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_getopt.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit_cmd.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit_cmd.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_main.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_mdep.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_mdep.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_memlist.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_memlist.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_netaddr.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_params.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_params.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_client.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_cmd.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_cmd.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_server.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_stats.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_stats.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_test.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_util.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_proto.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_quit_cmd.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_quit_cmd.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_cmd.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_cmd.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_info.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_info.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_data.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_data.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_util.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_thread.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_cmd.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_cmd.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_stats.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_stats.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_test.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_test.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_util.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_util.c create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_version.h create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/lat_block.sh create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/lat_poll.sh create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/lim.sh create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/makefile create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/makefile.wnd create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/quit.sh create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/regress.sh create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dapltest/srv.sh create mode 100644 branches/Ndi/ulp/dapl/test/udapl/dirs create mode 100644 branches/Ndi/ulp/dirs create mode 100644 branches/Ndi/ulp/inic/dirs create mode 100644 branches/Ndi/ulp/inic/kernel/SOURCES create mode 100644 branches/Ndi/ulp/inic/kernel/inic.rc create mode 100644 branches/Ndi/ulp/inic/kernel/makefile create mode 100644 branches/Ndi/ulp/inic/kernel/netvnic.inf create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_adapter.c create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_adapter.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_config.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_control.c create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_control.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_controlpkt.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_data.c create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_data.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_debug.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_driver.c create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_driver.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_ib.c create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_ib.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_netpath.c create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_trailer.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_util.h create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_viport.c create mode 100644 branches/Ndi/ulp/inic/kernel/vnic_viport.h create mode 100644 branches/Ndi/ulp/ipoib/dirs create mode 100644 branches/Ndi/ulp/ipoib/ip_stats.h create mode 100644 branches/Ndi/ulp/ipoib/kernel/SOURCES create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib.rc create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.c create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.h create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_debug.h create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_driver.c create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_driver.h create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_endpoint.c create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_endpoint.h create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.c create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.h create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_log.mc create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_port.c create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_port.h create mode 100644 branches/Ndi/ulp/ipoib/kernel/ipoib_xfr_mgr.h create mode 100644 branches/Ndi/ulp/ipoib/kernel/makefile create mode 100644 branches/Ndi/ulp/ipoib/kernel/netipoib.inf create mode 100644 branches/Ndi/ulp/opensm/dirs create mode 100644 branches/Ndi/ulp/opensm/user/README.opensm-build create mode 100644 branches/Ndi/ulp/opensm/user/TODO create mode 100644 branches/Ndi/ulp/opensm/user/config.h create mode 100644 branches/Ndi/ulp/opensm/user/dirs create mode 100644 branches/Ndi/ulp/opensm/user/doc/OpenSM_PKey_Mgr.txt create mode 100644 branches/Ndi/ulp/opensm/user/doc/OpenSM_RN_0_3_1.pdf create mode 100644 branches/Ndi/ulp/opensm/user/doc/OpenSM_UM_0_3.pdf create mode 100644 branches/Ndi/ulp/opensm/user/doc/current-routing.txt create mode 100644 branches/Ndi/ulp/opensm/user/doc/modular-routing.txt create mode 100644 branches/Ndi/ulp/opensm/user/doc/opensm_release_notes_openib-2.0.5.txt create mode 100644 branches/Ndi/ulp/opensm/user/doc/qos-config.txt create mode 100644 branches/Ndi/ulp/opensm/user/ibtrapgen/Makefile create mode 100644 branches/Ndi/ulp/opensm/user/ibtrapgen/SOURCES create mode 100644 branches/Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.c create mode 100644 branches/Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.h create mode 100644 branches/Ndi/ulp/opensm/user/ibtrapgen/main.c create mode 100644 branches/Ndi/ulp/opensm/user/include/complib/cl_byteswap.h create mode 100644 branches/Ndi/ulp/opensm/user/include/complib/cl_dispatcher.h create mode 100644 branches/Ndi/ulp/opensm/user/include/complib/cl_event_wheel.h create mode 100644 branches/Ndi/ulp/opensm/user/include/complib/cl_signal_osd.h create mode 100644 branches/Ndi/ulp/opensm/user/include/iba/ib_types.h create mode 100644 branches/Ndi/ulp/opensm/user/include/iba/ib_types_extended.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/cl_dispatcher.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/cl_event_wheel.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_attrib_req.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_base.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_console.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_db.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_db_pack.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_drop_mgr.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_errors.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_ft_config_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_fwd_tbl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_helper.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_inform.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_lid_mgr.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_tbl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_link_mgr.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_log.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mad_pool.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_madw.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_matrix.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_config_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_fwd_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_fwd_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_mgr.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_tbl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mcm_info.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mcm_port.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mcmember.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_msgdef.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mtl_bind.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_mtree.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_multicast.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_node.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_node_desc_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_node_desc_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_node_info_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_node_info_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_opensm.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_partition.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_path.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_pi_config_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_config_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_mgr.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_port.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_port_info_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_port_info_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_port_profile.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_rand_fwd_tbl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_remote_sm.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_req.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_req_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_resp.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_router.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_class_port_info.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_class_port_info_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_guidinfo_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_guidinfo_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_informinfo.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_informinfo_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_lft_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_lft_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_link_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_link_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mad_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mcmember_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mcmember_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mft_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mft_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_multipath_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_multipath_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_node_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_node_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_path_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_path_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_pkey_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_pkey_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_portinfo_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_portinfo_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_response.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_service_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_service_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_slvl_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_slvl_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sminfo_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sminfo_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sw_info_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sw_info_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_vlarb_record.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_vlarb_record_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_service.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_slvl_map_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_slvl_map_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sm.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_info_get_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_mad_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_state_mgr.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sminfo_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sminfo_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_state_mgr.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_state_mgr_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_stats.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_subnet.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_subnet_config_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sw_info_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sw_info_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_sweep_fail_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_switch.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_trap_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_trap_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_ts_useraccess.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_ucast_mgr.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_ucast_updn.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_umadt.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_version.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_vl15intf.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_vl_arb_rcv.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/osm_vl_arb_rcv_ctrl.h create mode 100644 branches/Ndi/ulp/opensm/user/include/opensm/st.h create mode 100644 branches/Ndi/ulp/opensm/user/include/unistd.h create mode 100644 branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor.h create mode 100644 branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_al.h create mode 100644 branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_api.h create mode 100644 branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_sa_api.h create mode 100644 branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_select.h create mode 100644 branches/Ndi/ulp/opensm/user/include/vendor/winosm_common.h create mode 100755 branches/Ndi/ulp/opensm/user/libopensm/Makefile create mode 100644 branches/Ndi/ulp/opensm/user/libopensm/SOURCES create mode 100644 branches/Ndi/ulp/opensm/user/libopensm/osm_helper.c create mode 100644 branches/Ndi/ulp/opensm/user/libopensm/osm_log.c create mode 100644 branches/Ndi/ulp/opensm/user/libopensm/osm_mad_pool.c create mode 100644 branches/Ndi/ulp/opensm/user/libvendor/Makefile create mode 100644 branches/Ndi/ulp/opensm/user/libvendor/SOURCES create mode 100644 branches/Ndi/ulp/opensm/user/libvendor/osm_vendor_al.c create mode 100644 branches/Ndi/ulp/opensm/user/libvendor/osm_vendor_mlx_sa.c create mode 100644 branches/Ndi/ulp/opensm/user/libvendor/winosm_common.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/Makefile create mode 100644 branches/Ndi/ulp/opensm/user/opensm/SOURCES create mode 100644 branches/Ndi/ulp/opensm/user/opensm/cl_dispatcher.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/cl_event_wheel.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/main.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/opensm.opts create mode 100644 branches/Ndi/ulp/opensm/user/opensm/opensm.rc create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm.h create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm.mc create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm.rc create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_console.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_db_files.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_db_pack.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_drop_mgr.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_fwd_tbl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_inform.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_lid_mgr.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_tbl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_link_mgr.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_matrix.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_mcast_fwd_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_mcast_fwd_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_mcast_mgr.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_mcast_tbl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_mcm_info.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_mcm_port.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_mtree.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_multicast.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_node.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_node_desc_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_node_desc_rcv_ctrl.c create mode 100755 branches/Ndi/ulp/opensm/user/opensm/osm_node_info_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_node_info_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_opensm.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_pkey.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_pkey_mgr.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_pkey_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_pkey_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_port.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_port_info_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_port_info_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_prtn.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_prtn_config.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_qos.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_remote_sm.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_req.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_req_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_resp.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_router.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_class_port_info.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_class_port_info_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_guidinfo_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_guidinfo_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_informinfo.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_informinfo_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_lft_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_lft_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_link_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_link_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_mad_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_mcmember_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_mcmember_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_mft_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_mft_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_multipath_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_multipath_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_node_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_node_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_path_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_path_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_pkey_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_pkey_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_portinfo_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_portinfo_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_response.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_service_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_service_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_slvl_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_slvl_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_sminfo_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_sminfo_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_sw_info_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_sw_info_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_vlarb_record.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sa_vlarb_record_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_service.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_slvl_map_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_slvl_map_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sm.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sm_mad_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sm_state_mgr.c create mode 100755 branches/Ndi/ulp/opensm/user/opensm/osm_sminfo_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sminfo_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_state_mgr.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_state_mgr_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_subnet.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sw_info_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sw_info_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_sweep_fail_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_switch.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_trap_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_trap_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_ucast_file.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_ucast_ftree.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_ucast_mgr.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_ucast_updn.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_vl15intf.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_vl_arb_rcv.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/osm_vl_arb_rcv_ctrl.c create mode 100644 branches/Ndi/ulp/opensm/user/opensm/st.c create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/Makefile create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/SOURCES create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/include/error.h create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/include/osmt_inform.h create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/include/osmt_mtl_regular_qp.h create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/include/osmtest.h create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/include/osmtest_base.h create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/include/osmtest_subnet.h create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/main.c create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/osmt_inform.c create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/osmt_mtl_regular_qp.c create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/osmt_multicast.c create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/osmt_service.c create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/osmt_slvl_vl_arb.c create mode 100644 branches/Ndi/ulp/opensm/user/osmtest/osmtest.c create mode 100644 branches/Ndi/ulp/srp/dirs create mode 100644 branches/Ndi/ulp/srp/kernel/SOURCES create mode 100644 branches/Ndi/ulp/srp/kernel/ib_srp.inf create mode 100644 branches/Ndi/ulp/srp/kernel/ibsrp.rc create mode 100644 branches/Ndi/ulp/srp/kernel/makefile create mode 100644 branches/Ndi/ulp/srp/kernel/srp.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_aer_req.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_aer_rsp.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_cmd.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_connection.c create mode 100644 branches/Ndi/ulp/srp/kernel/srp_connection.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_cred_req.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_cred_rsp.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_data.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_data_path.c create mode 100644 branches/Ndi/ulp/srp/kernel/srp_data_path.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_debug.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_descriptors.c create mode 100644 branches/Ndi/ulp/srp/kernel/srp_descriptors.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_driver.c create mode 100644 branches/Ndi/ulp/srp/kernel/srp_event.c create mode 100644 branches/Ndi/ulp/srp/kernel/srp_event.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_hba.c create mode 100644 branches/Ndi/ulp/srp/kernel/srp_hba.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_hca.c create mode 100644 branches/Ndi/ulp/srp/kernel/srp_hca.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_i_logout.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_information_unit.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_iu_buffer.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_login_rej.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_login_req.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_login_rsp.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_rsp.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_session.c create mode 100644 branches/Ndi/ulp/srp/kernel/srp_session.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_t_logout.h create mode 100644 branches/Ndi/ulp/srp/kernel/srp_tsk_mgmt.h create mode 100644 branches/Ndi/ulp/wsd/dirs create mode 100644 branches/Ndi/ulp/wsd/user/README create mode 100644 branches/Ndi/ulp/wsd/user/SOURCES create mode 100644 branches/Ndi/ulp/wsd/user/extensions.c create mode 100644 branches/Ndi/ulp/wsd/user/ib_cm.c create mode 100644 branches/Ndi/ulp/wsd/user/ibsp_duplicate.c create mode 100644 branches/Ndi/ulp/wsd/user/ibsp_iblow.c create mode 100644 branches/Ndi/ulp/wsd/user/ibsp_ip.c create mode 100644 branches/Ndi/ulp/wsd/user/ibsp_mem.c create mode 100644 branches/Ndi/ulp/wsd/user/ibsp_mem.h create mode 100644 branches/Ndi/ulp/wsd/user/ibsp_mngt.c create mode 100644 branches/Ndi/ulp/wsd/user/ibsp_perfmon.c create mode 100644 branches/Ndi/ulp/wsd/user/ibsp_perfmon.h create mode 100644 branches/Ndi/ulp/wsd/user/ibsp_pnp.c create mode 100644 branches/Ndi/ulp/wsd/user/ibspdebug.c create mode 100644 branches/Ndi/ulp/wsd/user/ibspdebug.h create mode 100644 branches/Ndi/ulp/wsd/user/ibspdefines.h create mode 100644 branches/Ndi/ulp/wsd/user/ibspdll.c create mode 100644 branches/Ndi/ulp/wsd/user/ibspdll.def create mode 100644 branches/Ndi/ulp/wsd/user/ibspdll.h create mode 100644 branches/Ndi/ulp/wsd/user/ibspdll.rc create mode 100644 branches/Ndi/ulp/wsd/user/ibspproto.h create mode 100644 branches/Ndi/ulp/wsd/user/ibspstruct.h create mode 100644 branches/Ndi/ulp/wsd/user/makefile create mode 100644 branches/Ndi/ulp/wsd/user/misc.c create mode 100644 branches/Ndi/ulp/wsd/user/sockinfo.c diff --git a/branches/Ndi/core/al/al.c b/branches/Ndi/core/al/al.c new file mode 100644 index 00000000..0a71d421 --- /dev/null +++ b/branches/Ndi/core/al/al.c @@ -0,0 +1,432 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "al.h" +#include "al_ca.h" +#include "al_cm_cep.h" +#include "al_common.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al.tmh" +#endif + +#include "al_mad_pool.h" +#include "al_mgr.h" +#include "al_verbs.h" +#include "ib_common.h" + + +void +destroying_al( + IN al_obj_t *p_obj ); + + +void +free_al( + IN al_obj_t *p_obj ); + + + +/* + * Destroy an instance of the access layer. + */ +#ifdef CL_KERNEL +ib_api_status_t +ib_close_al( + IN const ib_al_handle_t h_al ) +#else +ib_api_status_t +do_close_al( + IN const ib_al_handle_t h_al ) +#endif +{ + AL_ENTER( AL_DBG_MGR ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + + ref_al_obj( &h_al->obj ); + h_al->obj.pfn_destroy( &h_al->obj, NULL ); + + AL_EXIT( AL_DBG_MGR ); + return IB_SUCCESS; +} + + + +void +destroying_al( + IN al_obj_t *p_obj ) +{ + ib_al_handle_t h_al; + cl_list_item_t *p_list_item; + al_sa_req_t *p_sa_req; + + CL_ASSERT( p_obj ); + h_al = PARENT_STRUCT( p_obj, ib_al_t, obj ); + + cl_spinlock_acquire( &p_obj->lock ); + + /* Cancel all outstanding queries. */ + for( p_list_item = cl_qlist_head( &h_al->query_list ); + p_list_item != cl_qlist_end( &h_al->query_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_sa_req = PARENT_STRUCT( p_list_item, al_sa_req_t, list_item ); + al_cancel_sa_req( p_sa_req ); + } + + cl_spinlock_release( &p_obj->lock ); + + /* Cleanup any left-over connections. */ + al_cep_cleanup_al( h_al ); +} + + + +static void +__free_mads( + IN const ib_al_handle_t h_al ) +{ + cl_list_item_t *p_list_item; + al_mad_element_t *p_mad_element; + ib_api_status_t status; + + /* Return all outstanding MADs to their MAD pools. */ + for( p_list_item = cl_qlist_head( &h_al->mad_list ); + p_list_item != cl_qlist_end( &h_al->mad_list ); + p_list_item = cl_qlist_head( &h_al->mad_list ) ) + { + p_mad_element = PARENT_STRUCT( p_list_item, al_mad_element_t, al_item ); + p_mad_element->element.p_next = NULL; + + status = ib_put_mad( &p_mad_element->element ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_put_mad failed with status %s, continuing.\n", + ib_get_err_str(status)) ); + } + } +} + + +void +free_al( + IN al_obj_t *p_obj ) +{ + ib_al_handle_t h_al; + + CL_ASSERT( p_obj ); + h_al = PARENT_STRUCT( p_obj, ib_al_t, obj ); + + /* Free any MADs not returned by the user. */ + __free_mads( h_al ); + +#ifdef CL_KERNEL + cl_vector_destroy( &h_al->hdl_vector ); +#endif + + cl_spinlock_destroy( &h_al->mad_lock ); + destroy_al_obj( &h_al->obj ); + cl_free( h_al ); +} + + +ib_api_status_t +ib_query_ca_by_guid( + IN const ib_al_handle_t h_al, + IN const ib_net64_t ca_guid, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT uint32_t* const p_size ) +{ + ib_ca_handle_t h_ca; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CA ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_size ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + h_ca = acquire_ca( ca_guid ); + if( !h_ca ) + { + return IB_INVALID_GUID; + } + status = ib_query_ca( h_ca, p_ca_attr, p_size ); + deref_al_obj( &h_ca->obj ); + + AL_EXIT( AL_DBG_CA ); + return status; +} + + + +void +al_insert_mad( + IN const ib_al_handle_t h_al, + IN al_mad_element_t* const p_mad ) +{ + /* Assert that the MAD does not already have an owner. */ + CL_ASSERT( !p_mad->h_al ); + + ref_al_obj( &h_al->obj ); + cl_spinlock_acquire( &h_al->mad_lock ); + + /* + * Initialize the h_al field. This field is used to locate the AL + * instance that owns a given MAD. + */ + p_mad->h_al = h_al; + cl_qlist_insert_tail( &h_al->mad_list, &p_mad->al_item ); + + cl_spinlock_release( &h_al->mad_lock ); +} + + + +void +al_remove_mad( + IN al_mad_element_t* const p_mad ) +{ + /* Return if the MAD is not in the AL instance MAD list. */ + if( !p_mad->h_al ) return; + + cl_spinlock_acquire( &p_mad->h_al->mad_lock ); + cl_qlist_remove_item( &p_mad->h_al->mad_list, &p_mad->al_item ); + cl_spinlock_release( &p_mad->h_al->mad_lock ); + + deref_al_obj( &p_mad->h_al->obj ); + p_mad->h_al = NULL; +} + + + +void +al_handoff_mad( + IN const ib_al_handle_t h_al, + IN ib_mad_element_t* const p_mad_element ) +{ + al_mad_element_t *p_mad; + + p_mad = PARENT_STRUCT( p_mad_element, al_mad_element_t, element ); + + /* + * See if we're handing off to the same AL instance. This can happen if + * we hand off to an internal service that uses the global AL instance. + */ + if( p_mad->h_al == h_al ) + return; + + al_remove_mad( p_mad ); + al_insert_mad( h_al, p_mad ); +} + + + +void +al_insert_key( + IN const ib_al_handle_t h_al, + IN al_pool_key_t* const p_pool_key ) +{ + ref_al_obj( &h_al->obj ); + p_pool_key->h_al = h_al; + + cl_spinlock_acquire( &h_al->obj.lock ); + p_pool_key->in_al_list = TRUE; + cl_qlist_insert_tail( &h_al->key_list, &p_pool_key->al_item ); + cl_spinlock_release( &h_al->obj.lock ); +} + + + +/* + * Remove the pool_key from AL's list. This is called from the pool_key's + * cleanup routine. + */ +void +al_remove_key( + IN al_pool_key_t* const p_pool_key ) +{ + /* Return if the pool key is not in the AL instance key list. */ + if( !p_pool_key->h_al ) return; + + cl_spinlock_acquire( &p_pool_key->h_al->obj.lock ); + if( p_pool_key->in_al_list ) + { + cl_qlist_remove_item( &p_pool_key->h_al->key_list, + &p_pool_key->al_item ); + } + cl_spinlock_release( &p_pool_key->h_al->obj.lock ); + + deref_al_obj( &p_pool_key->h_al->obj ); + p_pool_key->h_al = NULL; +} + + + +void +al_dereg_pool( + IN const ib_al_handle_t h_al, + IN ib_pool_handle_t const h_pool ) +{ + cl_qlist_t destroy_list; + cl_list_item_t *p_list_item, *p_next_item; + al_pool_key_t *p_pool_key; + + /* + * Deregister matching pool keys. This may deregister memory, so we + * cannot do this while holding a lock. So we need to move the pool + * keys to a destroy_list. + */ + cl_qlist_init( &destroy_list ); + + /* Search for keys associated with the given PD or MAD pool. */ + cl_spinlock_acquire( &h_al->obj.lock ); + for( p_list_item = cl_qlist_head( &h_al->key_list ); + p_list_item != cl_qlist_end( &h_al->key_list ); + p_list_item = p_next_item ) + { + /* Cache the next item in case we remove this one. */ + p_next_item = cl_qlist_next( p_list_item ); + p_pool_key = PARENT_STRUCT( p_list_item, al_pool_key_t, al_item ); + + if( p_pool_key->h_pool == h_pool ) + { + /* + * Destroy this pool key. This only deregisters memory in + * user-mode since we use phys reg in kernel mode, so we + * can do this while holding a lock. + */ + ref_al_obj( &p_pool_key->obj ); + p_pool_key->in_al_list = FALSE; + cl_qlist_remove_item( &h_al->key_list, &p_pool_key->al_item ); + cl_qlist_insert_tail( &destroy_list, &p_pool_key->al_item ); + } + } + cl_spinlock_release( &h_al->obj.lock ); + + /* Destroy all pool_keys associated with the MAD pool. */ + for( p_list_item = cl_qlist_remove_head( &destroy_list ); + p_list_item != cl_qlist_end( &destroy_list ); + p_list_item = cl_qlist_remove_head( &destroy_list ) ) + { + /* Mark that we've remove the item from the list. */ + p_pool_key = PARENT_STRUCT( p_list_item, al_pool_key_t, al_item ); + p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL ); + } +} + + +void +al_insert_query( + IN const ib_al_handle_t h_al, + IN al_query_t* const p_query ) +{ + p_query->h_al = h_al; + ref_al_obj( &h_al->obj ); + cl_spinlock_acquire( &h_al->obj.lock ); + cl_qlist_insert_tail( &h_al->query_list, &p_query->sa_req.list_item ); + cl_spinlock_release( &h_al->obj.lock ); +} + + +void +al_remove_query( + IN al_query_t* const p_query ) +{ + cl_spinlock_acquire( &p_query->h_al->obj.lock ); + cl_qlist_remove_item( &p_query->h_al->query_list, + &p_query->sa_req.list_item ); + cl_spinlock_release( &p_query->h_al->obj.lock ); + deref_al_obj( &p_query->h_al->obj ); +} + + + +static cl_status_t +__match_query( + IN const cl_list_item_t* const p_item, + IN void* context ) +{ + al_sa_req_t *p_sa_req; + + p_sa_req = PARENT_STRUCT( p_item, al_sa_req_t, list_item ); + if( context == PARENT_STRUCT( p_item, al_query_t, sa_req ) ) + return IB_SUCCESS; + + return IB_NOT_FOUND; +} + + +void +ib_cancel_query( + IN const ib_al_handle_t h_al, + IN const ib_query_handle_t h_query ) +{ + cl_list_item_t *p_item; + + AL_ENTER( AL_DBG_QUERY ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return; + } + + cl_spinlock_acquire( &h_al->obj.lock ); + p_item = + cl_qlist_find_from_head( &h_al->query_list, __match_query, h_query ); + if( p_item != cl_qlist_end( &h_al->query_list ) ) + al_cancel_sa_req( &h_query->sa_req ); + + cl_spinlock_release( &h_al->obj.lock ); + + AL_EXIT( AL_DBG_QUERY ); +} diff --git a/branches/Ndi/core/al/al.h b/branches/Ndi/core/al/al.h new file mode 100644 index 00000000..df8215e2 --- /dev/null +++ b/branches/Ndi/core/al/al.h @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_H__) +#define __AL_H__ + +#include +#include +#include +#include + +#include "al_common.h" +#include "al_mad_pool.h" +#include "al_query.h" +#include "al_reg_svc.h" +#ifdef CL_KERNEL +#include "al_proxy.h" +#endif + + + +typedef struct _al_handle +{ + uint32_t type; + al_obj_t *p_obj; + +} al_handle_t; + + +#define AL_INVALID_HANDLE 0 + + +/* + * AL instance structure. + */ +typedef struct _ib_al +{ + al_obj_t obj; + + /* Asynchronous processing item used to deregister services with the SA. */ + cl_async_proc_item_t dereg_svc_async; + + cl_qlist_t mad_list; + /* + * The MAD list must have a dedicated lock protecting it to prevent + * deadlocks. The MAD service gets/puts MADs from/to pools, which + * needs to track the MAD in the associated AL instance's mad_list. + * When cancelling SA requests, the AL instance's object lock is held + * and MAD cancellation takes the MAD service's lock. + */ + cl_spinlock_t mad_lock; + + cl_qlist_t key_list; + cl_qlist_t query_list; + cl_qlist_t cep_list; + +#ifdef CL_KERNEL + /* Handle manager is only needed in the kernel. */ + cl_vector_t hdl_vector; + uint64_t free_hdl; + + /* Proxy context. */ + al_dev_open_context_t *p_context; +#endif + +} ib_al_t; + + + +ib_api_status_t +init_al( + IN al_obj_t *p_parent_obj, + IN const ib_al_handle_t h_al ); + + +void +destroying_al( + IN al_obj_t *p_obj ); + + +void +free_al( + IN al_obj_t *p_obj ); + + + +/* + * Insert a pnp registration in the PnP vector. + */ +ib_api_status_t +al_insert_pnp( + IN const ib_al_handle_t h_al, + IN const ib_pnp_handle_t h_pnp ); + +/* + * Remove a pnp registration from the PnP vector. + */ +void +al_remove_pnp( + IN const ib_al_handle_t h_al, + IN const ib_pnp_handle_t h_pnp ); + + +void +al_insert_mad( + IN const ib_al_handle_t h_al, + IN al_mad_element_t* const p_mad ); + + +void +al_remove_mad( + IN al_mad_element_t* const p_mad ); + + +void +al_handoff_mad( + IN const ib_al_handle_t h_al, + IN ib_mad_element_t* const p_mad_element ); + + +void +al_insert_key( + IN const ib_al_handle_t h_al, + IN al_pool_key_t* const p_pool_key ); + + +void +al_remove_key( + IN al_pool_key_t* const p_pool_key ); + + +void +al_dereg_pool( + IN const ib_al_handle_t h_al, + IN ib_pool_handle_t const h_pool ); + + +void +al_insert_query( + IN const ib_al_handle_t h_al, + IN al_query_t* const p_query ); + + +void +al_remove_query( + IN al_query_t* const p_query ); + +void +al_insert_conn( + IN const ib_al_handle_t h_al, + IN const ib_cm_handle_t h_conn ); + +void +al_remove_conn( + IN const ib_cm_handle_t h_conn ); + +#ifdef CL_KERNEL +// TODO: Once all things in the handle vector are al_obj_t, +// TODO: we can remove the type parameter. +uint64_t +al_hdl_insert( + IN const ib_al_handle_t h_al, + IN void* const p_obj, + IN const uint32_t type ); + + +static inline uint64_t +al_hdl_lock_insert( + IN const ib_al_handle_t h_al, + IN void* const p_obj, + IN const uint32_t type ) +{ + uint64_t hdl; + cl_spinlock_acquire( &h_al->obj.lock ); + hdl = al_hdl_insert( h_al, p_obj, type ); + cl_spinlock_release( &h_al->obj.lock ); + return hdl; +} + + +void +al_hdl_free( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl ); + + +static inline uint64_t +al_hdl_insert_obj( + IN al_obj_t* const p_obj ) +{ + uint64_t hdl; + + CL_ASSERT( p_obj->h_al ); + + cl_spinlock_acquire( &p_obj->h_al->obj.lock ); + hdl = al_hdl_insert( p_obj->h_al, p_obj, AL_BASE_TYPE( p_obj->type ) ); + cl_spinlock_release( &p_obj->h_al->obj.lock ); + + return hdl; +} + + +static inline void +al_hdl_free_obj( + IN al_obj_t* const p_obj ) +{ + CL_ASSERT( p_obj->h_al ); + CL_ASSERT( p_obj->hdl != AL_INVALID_HANDLE ); + cl_spinlock_acquire( &p_obj->h_al->obj.lock ); + + al_hdl_free( p_obj->h_al, p_obj->hdl ); + p_obj->hdl = AL_INVALID_HANDLE; + p_obj->hdl_valid = FALSE; + + cl_spinlock_release( &p_obj->h_al->obj.lock ); +} + + +al_obj_t* +al_hdl_ref( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl, + IN const uint32_t type ); + +/* Validate an object. */ +void* +al_hdl_chk( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl, + IN const uint32_t type ); + +/* Validate and remove an object. */ +void* +al_hdl_get( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl, + IN const uint32_t type ); + +/* Validate and removes a MAD element. */ +static inline ib_mad_element_t* +al_hdl_get_mad( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl ) +{ + return (ib_mad_element_t*)al_hdl_get( h_al, hdl, AL_OBJ_TYPE_H_MAD ); +} + +/* Validate and reference a connection. Used for MRA */ +struct _al_conn* +al_hdl_ref_conn( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl, + IN const uint32_t sub_type ); + +/* Validate, reference, and remove a connection. */ +struct _al_conn* +al_hdl_get_conn( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl, + IN const uint32_t sub_type ); + +#endif /* CL_KERNEL */ + +#endif /* __AL_H__ */ diff --git a/branches/Ndi/core/al/al_av.c b/branches/Ndi/core/al/al_av.c new file mode 100644 index 00000000..503b22bc --- /dev/null +++ b/branches/Ndi/core/al/al_av.c @@ -0,0 +1,314 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include "al.h" +#include "al_av.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_av.tmh" +#endif + +#include "al_pd.h" +#include "al_res_mgr.h" +#include "al_verbs.h" + + + +static void +__cleanup_av( + IN struct _al_obj *p_obj ); + + +static void +__return_av( + IN al_obj_t *p_obj ); + + + +cl_status_t +av_ctor( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + ib_api_status_t status; + ib_av_handle_t h_av; + + UNUSED_PARAM( context ); + + h_av = (ib_av_handle_t)p_object; + cl_memclr( h_av, sizeof( ib_av_t ) ); + + construct_al_obj( &h_av->obj, AL_OBJ_TYPE_H_AV ); + status = init_al_obj( &h_av->obj, NULL, FALSE, NULL, + __cleanup_av, __return_av ); + if( status != IB_SUCCESS ) + { + return CL_ERROR; + } + + *pp_pool_item = &((ib_av_handle_t)p_object)->obj.pool_item; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_av->obj ); + + return CL_SUCCESS; +} + + +void +av_dtor( + IN const cl_pool_item_t* const p_pool_item, + IN void* context ) +{ + al_obj_t *p_obj; + + UNUSED_PARAM( context ); + + p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item ); + + /* + * The AV is being totally destroyed. Modify the free_cb to destroy the + * AL object. + */ + p_obj->pfn_free = (al_pfn_free_t)destroy_al_obj; + ref_al_obj( p_obj ); + p_obj->pfn_destroy( p_obj, NULL ); +} + + + +ib_api_status_t +create_av( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t* const p_av_attr, + OUT ib_av_handle_t* const ph_av, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + ib_av_handle_t h_av; + + CL_ASSERT( h_pd ); + + if( !p_av_attr || !ph_av ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + /* Get an AV tracking structure. */ + h_av = alloc_av(); + if( !h_av ) + return IB_INSUFFICIENT_MEMORY; + + status = attach_al_obj( &h_pd->obj, &h_av->obj ); + if( status != IB_SUCCESS ) + { + h_av->obj.pfn_destroy( &h_av->obj, NULL ); + return status; + } + + /* Create the address vector. */ + status = verbs_create_av( h_pd, p_av_attr, h_av ); + if( status != IB_SUCCESS ) + { + h_av->obj.pfn_destroy( &h_av->obj, NULL ); + return status; + } + + /* keep a copy of the av for special qp access */ + h_av->av_attr = *p_av_attr; + *ph_av = h_av; + + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_destroy_av( + IN const ib_av_handle_t h_av ) +{ + AL_ENTER( AL_DBG_AV ); + + if( AL_OBJ_INVALID_HANDLE( h_av, AL_OBJ_TYPE_H_AV ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AV_HANDLE\n") ); + return IB_INVALID_AV_HANDLE; + } + + ref_al_obj( &h_av->obj ); + h_av->obj.pfn_destroy( &h_av->obj, NULL ); + + AL_EXIT( AL_DBG_AV ); + return IB_SUCCESS; +} + + + +static void +__cleanup_av( + IN struct _al_obj *p_obj ) +{ + ib_api_status_t status; + ib_av_handle_t h_av; + + CL_ASSERT( p_obj ); + h_av = PARENT_STRUCT( p_obj, ib_av_t, obj ); + + /* Destroy the AV. */ + if( verbs_check_av( h_av ) ) + { + status = verbs_destroy_av(h_av); + CL_ASSERT( status == IB_SUCCESS ); +#ifndef CL_KERNEL + h_av->obj.hdl = AL_INVALID_HANDLE; +#endif + h_av->h_ci_av = NULL; + } +} + + + +static void +__return_av( + IN al_obj_t *p_obj ) +{ + ib_av_handle_t h_av; + + h_av = PARENT_STRUCT( p_obj, ib_av_t, obj ); + reset_al_obj( p_obj ); + put_av( h_av ); +} + + + +ib_api_status_t +ib_query_av( + IN const ib_av_handle_t h_av, + OUT ib_av_attr_t* const p_av_attr, + OUT ib_pd_handle_t* const ph_pd ) +{ + return query_av( h_av, p_av_attr, ph_pd, NULL ); +} + + + +ib_api_status_t +query_av( + IN const ib_av_handle_t h_av, + OUT ib_av_attr_t* const p_av_attr, + OUT ib_pd_handle_t* const ph_pd, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_AV ); + + if( AL_OBJ_INVALID_HANDLE( h_av, AL_OBJ_TYPE_H_AV ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AV_HANDLE\n") ); + return IB_INVALID_AV_HANDLE; + } + if( !p_av_attr || !ph_pd ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_query_av(h_av, p_av_attr, ph_pd); + + /* Record AL's PD handle. */ + if( status == IB_SUCCESS ) + { + *ph_pd = PARENT_STRUCT( h_av->obj.p_parent_obj, ib_pd_t, obj ); + h_av->av_attr = *p_av_attr; + } + + AL_EXIT( AL_DBG_AV ); + return status; +} + + + +ib_api_status_t +ib_modify_av( + IN const ib_av_handle_t h_av, + IN const ib_av_attr_t* const p_av_mod ) +{ + return modify_av( h_av, p_av_mod, NULL ); +} + + +ib_api_status_t +modify_av( + IN const ib_av_handle_t h_av, + IN const ib_av_attr_t* const p_av_mod, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_AV ); + + if( AL_OBJ_INVALID_HANDLE( h_av, AL_OBJ_TYPE_H_AV ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AV_HANDLE\n") ); + return IB_INVALID_AV_HANDLE; + } + if( !p_av_mod ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_modify_av(h_av, p_av_mod); + + /* Record av for special qp access */ + if( status == IB_SUCCESS ) + { + h_av->av_attr = *p_av_mod; + } + + AL_EXIT( AL_DBG_AV ); + return status; +} diff --git a/branches/Ndi/core/al/al_av.h b/branches/Ndi/core/al/al_av.h new file mode 100644 index 00000000..8b4f6f9e --- /dev/null +++ b/branches/Ndi/core/al/al_av.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_AV_H__) +#define __AL_AV_H__ + + +#include "al_ca.h" +#include +#include + + +typedef struct _ib_av +{ + al_obj_t obj; + ib_av_handle_t h_ci_av; /* Actual HW CI AV. */ + + ib_av_attr_t av_attr; + + cl_list_item_t list_item; /* item to manage AL AV's */ + +} ib_av_t; + + + +cl_status_t +av_ctor( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); + +void +av_dtor( + IN const cl_pool_item_t* const p_pool_item, + IN void* context ); + + +ib_api_status_t +create_av( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t* const p_av_attr, + OUT ib_av_handle_t* const ph_av, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +query_av( + IN const ib_av_handle_t h_av, + OUT ib_av_attr_t* const p_av_attr, + OUT ib_pd_handle_t* const ph_pd, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +modify_av( + IN const ib_av_handle_t h_av, + IN const ib_av_attr_t* const p_av_mod, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +#endif /* __AL_AV_H__ */ diff --git a/branches/Ndi/core/al/al_ca.c b/branches/Ndi/core/al/al_ca.c new file mode 100644 index 00000000..9bfe173f --- /dev/null +++ b/branches/Ndi/core/al/al_ca.c @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include + +#include "al.h" +#include "al_av.h" +#include "al_ca.h" +#include "al_cq.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_ca.tmh" +#endif + +#include "al_mgr.h" +#include "al_mr.h" +#include "al_mw.h" +#include "al_pd.h" +#include "al_qp.h" +#include "al_verbs.h" +#include "ib_common.h" + + +static void +__destroying_ca( + IN struct _al_obj *p_obj ); + +static void +__cleanup_ca( + IN struct _al_obj *p_obj ); + +static void +__free_ca( + IN struct _al_obj *p_obj ); + + + +ib_api_status_t +ib_open_ca( + IN const ib_al_handle_t h_al, + IN const ib_net64_t ca_guid, + IN const ib_pfn_event_cb_t pfn_ca_event_cb OPTIONAL, + IN const void* const ca_context, + OUT ib_ca_handle_t* const ph_ca ) +{ + ib_api_status_t status; + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + + status = open_ca( h_al, ca_guid, pfn_ca_event_cb, ca_context, ph_ca, NULL ); + + /* Release the reference taken in init_al_obj. */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_ca)->obj ); + + return status; +} + + +ib_api_status_t +open_ca( + IN const ib_al_handle_t h_al, + IN const ib_net64_t ca_guid, + IN const ib_pfn_event_cb_t pfn_ca_event_cb OPTIONAL, + IN const void* const ca_context, + OUT ib_ca_handle_t* const ph_ca, + IN OUT ci_umv_buf_t* const p_umv_buf OPTIONAL ) +{ + ib_ca_handle_t h_ca; + ib_api_status_t status; + al_obj_type_t obj_type = AL_OBJ_TYPE_H_CA; + + AL_ENTER( AL_DBG_CA ); + if( !ph_ca ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Allocate a CA instance. */ + h_ca = (ib_ca_handle_t)cl_zalloc( sizeof( ib_ca_t ) ); + if( !h_ca ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("IB_INSUFFICIENT_MEMORY\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the CA. */ + if( p_umv_buf ) + obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT; + construct_al_obj( &h_ca->obj, obj_type ); + h_ca->pfn_event_cb = pfn_ca_event_cb; + + status = init_al_obj( &h_ca->obj, ca_context, TRUE, + NULL, __cleanup_ca, __free_ca ); + if( status != IB_SUCCESS ) + { + __free_ca( &h_ca->obj ); + AL_EXIT( AL_DBG_CA ); + return status; + } + + status = attach_al_obj( &h_al->obj, &h_ca->obj ); + if( status != IB_SUCCESS ) + { + h_ca->obj.pfn_destroy( &h_ca->obj, NULL ); + AL_EXIT( AL_DBG_CA ); + return status; + } + + /* Obtain a reference to the correct CI CA. */ + h_ca->obj.p_ci_ca = acquire_ci_ca( ca_guid, h_ca ); + if( !h_ca->obj.p_ci_ca ) + { + h_ca->obj.pfn_destroy( &h_ca->obj, NULL ); + AL_EXIT( AL_DBG_CA ); + return IB_INVALID_GUID; + } + +#if defined(CL_KERNEL) + /* If a UM open, pass to the VPD to establish the UM CA context. */ + if( p_umv_buf ) + { + status = h_ca->obj.p_ci_ca->verbs.um_open_ca( + h_ca->obj.p_ci_ca->h_ci_ca, p_umv_buf, &h_ca->h_um_ca ); + if( status != IB_SUCCESS ) + { + h_ca->obj.pfn_destroy( &h_ca->obj, NULL ); + AL_EXIT( AL_DBG_CA ); + return status; + } + } +#endif /* defined(CL_KERNEL) */ + + *ph_ca = h_ca; + + AL_EXIT( AL_DBG_CA ); + return IB_SUCCESS; +} + + +/* + * Destroy an instance of an AL channel adapter. + */ +ib_api_status_t +ib_close_ca( + IN const ib_ca_handle_t h_ca, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_CA ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + + ref_al_obj( &h_ca->obj ); + h_ca->obj.pfn_destroy( &h_ca->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_CA ); + return IB_SUCCESS; +} + + +/* + * Release all resources associated with the CA. + */ +static void +__cleanup_ca( + IN struct _al_obj *p_obj ) +{ +#if defined(CL_KERNEL) + ib_ca_handle_t h_ca; + + CL_ASSERT( p_obj ); + h_ca = PARENT_STRUCT( p_obj, ib_ca_t, obj ); + if( h_ca->h_um_ca ) + { + h_ca->obj.p_ci_ca->verbs.um_close_ca( + h_ca->obj.p_ci_ca->h_ci_ca, h_ca->h_um_ca ); + } +#endif + + /* It is now safe to release the CI CA. */ + if( p_obj->p_ci_ca ) + release_ci_ca( PARENT_STRUCT( p_obj, ib_ca_t, obj ) ); +} + + + +static void +__free_ca( + IN struct _al_obj *p_obj ) +{ + ib_ca_handle_t h_ca; + + CL_ASSERT( p_obj ); + h_ca = PARENT_STRUCT( p_obj, ib_ca_t, obj ); + + destroy_al_obj( p_obj ); + cl_free( h_ca ); +} + + + +ib_api_status_t +ib_query_ca( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT uint32_t* const p_size ) +{ + return query_ca( h_ca, p_ca_attr, p_size, NULL ); +} + + + +ib_api_status_t +query_ca( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CA ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + if( !p_size ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_query_ca( h_ca, p_ca_attr, p_size ); + + AL_EXIT( AL_DBG_CA ); + return status; +} + + + +ib_api_status_t +ib_modify_ca( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t ca_mod, + IN const ib_port_attr_mod_t* const p_port_attr_mod ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CA ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + if( !p_port_attr_mod || (ca_mod & IB_CA_MOD_RESERVED_MASK) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_modify_ca(h_ca, port_num, ca_mod, p_port_attr_mod); + + AL_EXIT( AL_DBG_CA ); + return status; +} + + + +/* + * Allocate a new protection domain. + */ +ib_api_status_t +ib_alloc_pd( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_type_t pd_type, + IN const void * const pd_context, + OUT ib_pd_handle_t* const ph_pd ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_PD ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + + status = alloc_pd( h_ca, pd_type, pd_context, ph_pd, NULL ); + + /* Release the reference taken in init_al_obj. */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_pd)->obj ); + + AL_EXIT( AL_DBG_PD ); + return status; +} + + + +ib_api_status_t +ib_create_cq( + IN const ib_ca_handle_t h_ca, + IN OUT ib_cq_create_t* const p_cq_create, + IN const void* const cq_context, + IN const ib_pfn_event_cb_t pfn_cq_event_cb OPTIONAL, + OUT ib_cq_handle_t* const ph_cq ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CQ ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + + status = create_cq( h_ca, p_cq_create, cq_context, pfn_cq_event_cb, + ph_cq, NULL ); + + /* Release the reference taken in init_al_obj. */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_cq)->obj ); + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + +ib_api_status_t +al_convert_to_ci_handles( + IN void* __ptr64 * const dst_handle_array, + IN const void* __ptr64 * const src_handle_array, + IN uint32_t num_handles ) +{ + uint32_t i; + al_obj_t *p_al_obj; + + for( i = 0; i < num_handles; i++ ) + { + p_al_obj = (al_obj_t*)(const void*)src_handle_array[i]; + switch( p_al_obj->type ) + { + case AL_OBJ_TYPE_H_PD: + dst_handle_array[i] = ((ib_pd_t*)p_al_obj)->h_ci_pd; + break; + case AL_OBJ_TYPE_H_CQ: + dst_handle_array[i] = ((ib_cq_t*)p_al_obj)->h_ci_cq; + break; + case AL_OBJ_TYPE_H_AV: + dst_handle_array[i] = ((ib_av_t*)p_al_obj)->h_ci_av; + break; + case AL_OBJ_TYPE_H_QP: + dst_handle_array[i] = ((ib_qp_t*)p_al_obj)->h_ci_qp; + break; + case AL_OBJ_TYPE_H_MR: + dst_handle_array[i] = ((ib_mr_t*)p_al_obj)->h_ci_mr; + break; + case AL_OBJ_TYPE_H_MW: + dst_handle_array[i] = ((ib_mw_t*)p_al_obj)->h_ci_mw; + break; + default: + /* Bad handle type. */ + CL_ASSERT( p_al_obj->type == AL_OBJ_TYPE_H_PD || + p_al_obj->type == AL_OBJ_TYPE_H_CQ || + p_al_obj->type == AL_OBJ_TYPE_H_AV || + p_al_obj->type == AL_OBJ_TYPE_H_QP || + p_al_obj->type == AL_OBJ_TYPE_H_MR || + p_al_obj->type == AL_OBJ_TYPE_H_MW ); + return IB_INVALID_HANDLE; + } + } + + return IB_SUCCESS; +} diff --git a/branches/Ndi/core/al/al_ca.h b/branches/Ndi/core/al/al_ca.h new file mode 100644 index 00000000..e23473c7 --- /dev/null +++ b/branches/Ndi/core/al/al_ca.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_CA_H__) +#define __AL_CA_H__ + +#include +#include + +#include "al_common.h" +#include "al_ci_ca.h" + + + +typedef struct _ib_ca +{ + al_obj_t obj; + + ib_pfn_event_cb_t pfn_event_cb; + cl_list_item_t list_item; +#if defined(CL_KERNEL) + ib_ca_handle_t h_um_ca; +#endif + +} ib_ca_t; + + +ib_api_status_t +open_ca( + IN const ib_al_handle_t h_al, + IN const ib_net64_t ca_guid, + IN const ib_pfn_event_cb_t pfn_ca_event_cb OPTIONAL, + IN const void* const ca_context, + OUT ib_ca_handle_t* const ph_ca, + IN OUT ci_umv_buf_t* const p_umv_buf OPTIONAL ); + + +ib_api_status_t +query_ca( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +al_convert_to_ci_handles( + IN void* __ptr64 * const dst_handle_array, + IN const void* __ptr64 * const src_handle_array, + IN uint32_t num_handles ); + + +#endif /* __AL_CA_H__ */ diff --git a/branches/Ndi/core/al/al_ci_ca.h b/branches/Ndi/core/al/al_ci_ca.h new file mode 100644 index 00000000..d0405ef2 --- /dev/null +++ b/branches/Ndi/core/al/al_ci_ca.h @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_CI_CA_H__) +#define __AL_CI_CA_H__ + +#include +#ifdef CL_KERNEL +#include +#include +#else +#include "ual_ci_ca.h" +#endif /* CL_KERNEL */ + + +#include +#include +#include + +#include "al_common.h" + + +#ifdef CL_KERNEL +typedef ci_interface_t verbs_interface_t; + +ib_api_status_t +create_ci_ca( + IN al_obj_t *p_parent_obj, + IN const ci_interface_t* p_ci ); + +DEVICE_OBJECT* +get_ca_dev( + IN const ib_ca_handle_t h_ca ); +#endif + + + +typedef struct _al_ci_ca +{ + al_obj_t obj; + cl_list_item_t list_item; + + cl_async_proc_item_t dereg_async_item; + + verbs_interface_t verbs; + + ib_ca_handle_t h_ci_ca; /* CI handle */ + ib_ca_handle_t h_ca; /* AL handle */ + ib_pd_handle_t h_pd; /* AL handle */ + ib_pd_handle_t h_pd_alias; /* AL handle */ + ib_pool_key_t pool_key; /* AL handle */ + + /* Opened instances of this CA. */ + cl_qlist_t ca_list; + + /* + * Last known attributes as reported by the PnP manager. + * Updated by the PnP manager through the asynchronous processing thread. + */ + ib_ca_attr_t *p_pnp_attr; + ib_ca_attr_t *p_user_attr; + cl_spinlock_t attr_lock; + + cl_qpool_t event_pool; + + uint8_t num_ports; + + /* Shared memory registrations across processes. */ + cl_qlist_t shmid_list; + + /* Array of port GUIDs on this CI CA. */ + ib_net64_t *port_array; + +} al_ci_ca_t; + + +ib_api_status_t +get_port_info( + IN al_ci_ca_t *p_ci_ca ); + + +/* + * Asynchronous event reporting. + */ +typedef struct _event_item +{ + cl_async_proc_item_t async_item; + ib_async_event_rec_t event_rec; + +} event_item_t; + + +void +add_ca( + IN al_ci_ca_t* const p_ci_ca, + IN const ib_ca_handle_t h_ca ); + +void +remove_ca( + IN const ib_ca_handle_t h_ca ); + + +void +ca_event_cb( + IN ib_async_event_rec_t *p_event_rec ); + +void +free_ci_ca( + IN al_obj_t* p_obj ); + + +void +ci_ca_async_event( + IN const ib_async_event_rec_t* const p_event_rec ); + + +struct _al_shmid; + +void +add_shmid( + IN al_ci_ca_t* const p_ci_ca, + IN struct _al_shmid *p_shmid ); + +ib_api_status_t +acquire_shmid( + IN al_ci_ca_t* const p_ci_ca, + IN int shmid, + OUT struct _al_shmid **pp_shmid ); + +void +release_shmid( + IN struct _al_shmid *p_shmid ); + + + +ib_api_status_t +get_port_num( + IN al_ci_ca_t* const p_ci_ca, + IN const ib_net64_t port_guid, + OUT uint8_t *p_port_num OPTIONAL ); + +ib_port_attr_t* +get_port_attr( + IN ib_ca_attr_t * const p_ca_attr, + IN ib_gid_t * const p_gid ); + + +#define BAD_PKEY_INDEX 0xFFFF + +uint16_t +get_pkey_index( + IN ib_port_attr_t * const p_port_attr, + IN const uint16_t pkey ); + +ib_api_status_t +ci_ca_update_attr( + IN al_ci_ca_t* p_ci_ca, + OUT ib_ca_attr_t** pp_old_pnp_attr ); + +void +ci_ca_lock_attr( + IN al_ci_ca_t* const p_ci_ca ); + +void +ci_ca_excl_lock_attr( + IN al_ci_ca_t* const p_ci_ca ); + +void +ci_ca_unlock_attr( + IN al_ci_ca_t* const p_ci_ca ); + +ib_api_status_t +ci_call( + IN ib_ca_handle_t h_ca, + IN const void* __ptr64 * const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN ci_umv_buf_t* const p_umv_buf OPTIONAL ); + + +#endif /* __AL_CI_CA_H__ */ diff --git a/branches/Ndi/core/al/al_ci_ca_shared.c b/branches/Ndi/core/al/al_ci_ca_shared.c new file mode 100644 index 00000000..41d7c0d9 --- /dev/null +++ b/branches/Ndi/core/al/al_ci_ca_shared.c @@ -0,0 +1,595 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "al_ci_ca.h" +#include "al_common.h" +#include "al_cq.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_ci_ca_shared.tmh" +#endif + +#include "al_mgr.h" +#include "al_pnp.h" +#include "al_qp.h" +#include "al_srq.h" +#include "ib_common.h" + + +void +ci_ca_process_event_cb( + IN cl_async_proc_item_t* p_async_item ); + +void +ca_process_async_event_cb( + IN const ib_async_event_rec_t* const p_event_rec ); + +void +ca_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ); + + +void +free_ci_ca( + IN al_obj_t* p_obj ) +{ + al_ci_ca_t *p_ci_ca; + + CL_ASSERT( p_obj ); + p_ci_ca = PARENT_STRUCT( p_obj, al_ci_ca_t, obj ); + + cl_spinlock_destroy( &p_ci_ca->attr_lock ); + cl_qpool_destroy( &p_ci_ca->event_pool ); + + if( p_ci_ca->port_array ) + cl_free( p_ci_ca->port_array ); + + /* Free the PnP attributes buffer. */ + if( p_ci_ca->p_pnp_attr ) + cl_free( p_ci_ca->p_pnp_attr ); + + destroy_al_obj( p_obj ); + cl_free( p_ci_ca ); +} + +void +add_ca( + IN al_ci_ca_t* const p_ci_ca, + IN const ib_ca_handle_t h_ca ) +{ + cl_spinlock_acquire( &p_ci_ca->obj.lock ); + cl_qlist_insert_tail( &p_ci_ca->ca_list, &h_ca->list_item ); + ref_al_obj( &p_ci_ca->obj ); + cl_spinlock_release( &p_ci_ca->obj.lock ); +} + + + +void +remove_ca( + IN const ib_ca_handle_t h_ca ) +{ + al_ci_ca_t *p_ci_ca; + + p_ci_ca = h_ca->obj.p_ci_ca; + + cl_spinlock_acquire( &p_ci_ca->obj.lock ); + cl_qlist_remove_item( &p_ci_ca->ca_list, &h_ca->list_item ); + cl_spinlock_release( &p_ci_ca->obj.lock ); + deref_al_obj( &p_ci_ca->obj ); +} + + + +ib_api_status_t +get_port_info( + IN al_ci_ca_t *p_ci_ca ) +{ + ib_api_status_t status; + ib_ca_attr_t *p_ca_attr; + uint32_t attr_size; + uint8_t i; + + AL_ENTER( AL_DBG_CA ); + + /* Get the size of the CA attribute structure. */ + status = ib_query_ca( p_ci_ca->h_ca, NULL, &attr_size ); + if( status != IB_INSUFFICIENT_MEMORY ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_query_ca failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Allocate enough space to store the attribute structure. */ + p_ca_attr = cl_malloc( attr_size ); + if( !p_ca_attr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_malloc failed to allocate p_ca_attr!\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + + /* Query the CA attributes. */ + status = ib_query_ca( p_ci_ca->h_ca, p_ca_attr, &attr_size ); + if( status != IB_SUCCESS ) + { + cl_free( p_ca_attr ); + + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_query_ca failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Allocate the port GUID array. */ + p_ci_ca->port_array = cl_malloc( sizeof( ib_net64_t ) * + p_ca_attr->num_ports ); + if( !p_ci_ca->port_array ) + { + cl_free( p_ca_attr ); + + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_malloc failed to allocate port_array!\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + p_ci_ca->num_ports = p_ca_attr->num_ports; + + /* Copy the necessary port information. */ + for( i = 0; i < p_ca_attr->num_ports; i++ ) + { + p_ci_ca->port_array[i] = p_ca_attr->p_port_attr[i].port_guid; + +#ifdef CL_KERNEL + /* Set the port's client reregister bit. */ + { + ib_port_attr_mod_t attr; + + attr.cap.client_reregister = TRUE; + ib_modify_ca( p_ci_ca->h_ca, i + 1, + IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED, &attr ); + } +#endif + } + + cl_free( p_ca_attr ); + + AL_EXIT( AL_DBG_CA ); + return IB_SUCCESS; +} + + + +void +ci_ca_async_event( + IN const ib_async_event_rec_t* const p_event_rec ) +{ + al_obj_t* p_obj; + cl_pool_item_t* p_item; + event_item_t* p_event_item; + + AL_ENTER( AL_DBG_CA ); + + CL_ASSERT( p_event_rec ); + + p_obj = (al_obj_t* __ptr64)p_event_rec->context; + + /* Block the destruction of the object until a reference is taken. */ + cl_spinlock_acquire( &p_obj->lock ); + if( p_obj->state == CL_DESTROYING ) + { + /* Ignore events if the object is being destroyed. */ + cl_spinlock_release( &p_obj->lock ); + AL_EXIT( AL_DBG_CA ); + return; + } + + /* + * Get an event item from the pool. If an object is a child of + * a CA (e.g., CQ or QP) it will have valid p_ci_ca pointer. + * For CA's, the object is the actual p_ci_ca pointer itself. + */ + if( p_obj->p_ci_ca ) + { + cl_spinlock_acquire( &p_obj->p_ci_ca->obj.lock ); + p_item = cl_qpool_get( &p_obj->p_ci_ca->event_pool ); + cl_spinlock_release( &p_obj->p_ci_ca->obj.lock ); + } + else + { + p_item = cl_qpool_get( &((al_ci_ca_t*)p_obj)->event_pool ); + } + if( !p_item ) + { + /* Could not get an item. This event will not be reported. */ + cl_spinlock_release( &p_obj->lock ); + AL_EXIT( AL_DBG_CA ); + return; + } + + /* Hold a reference to prevent destruction until the async_item runs. */ + ref_al_obj( p_obj ); + + cl_spinlock_release( &p_obj->lock ); + + /* Initialize the item with the asynchronous event information. */ + p_event_item = PARENT_STRUCT( p_item, event_item_t, async_item.pool_item ); + p_event_item->event_rec.code = p_event_rec->code; + p_event_item->event_rec.context = p_event_rec->context; + + /* Queue the item on the asynchronous callback thread for processing. */ + p_event_item->async_item.pfn_callback = ci_ca_process_event_cb; + cl_async_proc_queue( gp_async_proc_mgr, &p_event_item->async_item ); + + AL_EXIT( AL_DBG_CA ); +} + + + +void +ci_ca_process_event_cb( + IN cl_async_proc_item_t* p_async_item ) +{ + event_item_t* p_event_item; + al_obj_t* p_obj; + + AL_ENTER( AL_DBG_CA ); + + CL_ASSERT( p_async_item ); + + p_event_item = PARENT_STRUCT( p_async_item, event_item_t, + async_item.pool_item ); + + p_obj = (al_obj_t* __ptr64)p_event_item->event_rec.context; + + switch( p_event_item->event_rec.code ) + { + case IB_AE_QP_COMM: + case IB_AE_QP_APM: + case IB_AE_QP_APM_ERROR: + case IB_AE_QP_FATAL: + case IB_AE_RQ_ERROR: + case IB_AE_SQ_ERROR: + case IB_AE_SQ_DRAINED: + case IB_AE_WQ_REQ_ERROR: + case IB_AE_WQ_ACCESS_ERROR: + case IB_AE_SRQ_QP_LAST_WQE_REACHED: + qp_async_event_cb( &p_event_item->event_rec ); + break; + + case IB_AE_SRQ_LIMIT_REACHED: + case IB_AE_SRQ_CATAS_ERROR: + srq_async_event_cb( &p_event_item->event_rec ); + break; + + case IB_AE_CQ_ERROR: + cq_async_event_cb( &p_event_item->event_rec ); + break; + + case IB_AE_PORT_TRAP: + case IB_AE_PORT_DOWN: + case IB_AE_PORT_ACTIVE: + case IB_AE_CLIENT_REREGISTER: +#ifdef CL_KERNEL + /* The SMI polling routine may report a PnP event. */ + force_smi_poll(); +#endif + /* Fall through next case. */ + + case IB_AE_LOCAL_FATAL: + ca_process_async_event_cb( &p_event_item->event_rec ); + break; + + /* Unhandled events - optional per IBA spec. */ + case IB_AE_QKEY_TRAP: + case IB_AE_PKEY_TRAP: + case IB_AE_MKEY_TRAP: + case IB_AE_BKEY_TRAP: + case IB_AE_BUF_OVERRUN: + case IB_AE_LINK_INTEGRITY: + case IB_AE_FLOW_CTRL_ERROR: + case IB_AE_SYSIMG_GUID_TRAP: + default: + break; + } + + /* + * Return the event item to the pool. If an object is a child of + * a CA (e.g., CQ or QP) it will have valid p_ci_ca pointer. + * For CA's, the object is the actual p_ci_ca pointer itself. + */ + if( p_obj->p_ci_ca ) + { + cl_spinlock_acquire( &p_obj->p_ci_ca->obj.lock ); + cl_qpool_put( &p_obj->p_ci_ca->event_pool, + &p_event_item->async_item.pool_item ); + cl_spinlock_release( &p_obj->p_ci_ca->obj.lock ); + } + else + { + cl_spinlock_acquire( &p_obj->lock ); + cl_qpool_put( &((al_ci_ca_t*)p_obj)->event_pool, + &p_event_item->async_item.pool_item ); + cl_spinlock_release( &p_obj->lock ); + } + + /* Dereference the object. */ + deref_al_obj( p_obj ); + + AL_EXIT( AL_DBG_CA ); +} + + + +/* + * Process an asynchronous event on a CA. Notify all clients of the event. + */ +void +ca_process_async_event_cb( + IN const ib_async_event_rec_t* const p_event_rec ) +{ + al_ci_ca_t* p_ci_ca; + cl_list_item_t* p_list_item; + ib_ca_handle_t h_ca; + ib_async_event_rec_t event_rec; + + CL_ASSERT( p_event_rec ); + p_ci_ca = (al_ci_ca_t* __ptr64)p_event_rec->context; + + /* Report the CA event to all clients. */ + cl_spinlock_acquire( &p_ci_ca->obj.lock ); + for( p_list_item = cl_qlist_head( &p_ci_ca->ca_list ); + p_list_item != cl_qlist_end( &p_ci_ca->ca_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + cl_spinlock_release( &p_ci_ca->obj.lock ); + + h_ca = PARENT_STRUCT( p_list_item, ib_ca_t, list_item ); + + event_rec.handle.h_ca = h_ca; + event_rec.code = p_event_rec->code; + ca_async_event_cb( &event_rec ); + + cl_spinlock_acquire( &p_ci_ca->obj.lock ); + } + cl_spinlock_release( &p_ci_ca->obj.lock ); +} + + + +/* + * Process an asynchronous event on a CA. Notify the user of the event. + */ +void +ca_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ) +{ + ib_ca_handle_t h_ca; + + CL_ASSERT( p_event_rec ); + h_ca = p_event_rec->handle.h_ca; + + p_event_rec->context = (void*)h_ca->obj.context; + p_event_rec->handle.h_ca = h_ca; + + if( h_ca->pfn_event_cb ) + h_ca->pfn_event_cb( p_event_rec ); +} + + + +void +ca_event_cb( + IN ib_async_event_rec_t *p_event_rec ) +{ + UNUSED_PARAM( p_event_rec ); +} + + + +/* + * Returns a port's index on its CA for the given port GUID. + */ +ib_api_status_t +get_port_num( + IN al_ci_ca_t* const p_ci_ca, + IN const ib_net64_t port_guid, + OUT uint8_t *p_port_num OPTIONAL ) +{ + uint8_t i; + + /* Find a matching port GUID on this CI CA. */ + for( i = 0; i < p_ci_ca->num_ports; i++ ) + { + if( p_ci_ca->port_array[i] == port_guid ) + { + /* The port number is the index plus one. */ + if( p_port_num ) + *p_port_num = (uint8_t)(i + 1); + return IB_SUCCESS; + } + } + + /* The port GUID was not found. */ + return IB_INVALID_GUID; +} + + + +ib_port_attr_t* +get_port_attr( + IN ib_ca_attr_t * const p_ca_attr, + IN ib_gid_t * const p_gid ) +{ + uintn_t port_index, gid_index; + ib_port_attr_t *p_port_attr; + + AL_ENTER( AL_DBG_CA ); + + if( !p_ca_attr || !p_gid ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("No p_ca_attr or p_gid.\n") ); + return NULL; + } + + /* Check all ports on this HCA for matching GID. */ + for( port_index = 0; port_index < p_ca_attr->num_ports; port_index++ ) + { + p_port_attr = &p_ca_attr->p_port_attr[port_index]; + + for( gid_index = 0; gid_index < p_port_attr->num_gids; gid_index++ ) + { + if( !cl_memcmp( &p_port_attr->p_gid_table[gid_index], + p_gid, sizeof(ib_gid_t) ) ) + { + AL_EXIT( AL_DBG_CA ); + return p_port_attr; + } + } + } + + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("No match found.\n") ); + return NULL; +} + + + +uint16_t +get_pkey_index( + IN ib_port_attr_t * const p_port_attr, + IN const uint16_t pkey ) +{ + uint16_t pkey_index; + + if( !p_port_attr ) + return BAD_PKEY_INDEX; + + for( pkey_index = 0; pkey_index < p_port_attr->num_pkeys; pkey_index++ ) + { + if( p_port_attr->p_pkey_table[pkey_index] == pkey ) + return pkey_index; + } + return BAD_PKEY_INDEX; +} + + +/* + * Reads the CA attributes from verbs. + */ +ib_api_status_t +ci_ca_update_attr( + IN al_ci_ca_t* p_ci_ca, + OUT ib_ca_attr_t** pp_old_pnp_attr ) +{ + ib_ca_attr_t *p_pnp_attr; + uint32_t attr_size; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CA ); + + /* Query to get the CA attributes size. */ + attr_size = 0; + status = ib_query_ca( p_ci_ca->h_ca, NULL, &attr_size ); + CL_ASSERT( status == IB_INSUFFICIENT_MEMORY ); + + /* + * Allocate the new CA attributes buffer. + * Double the buffer size for PnP and user reporting halves. + */ + p_pnp_attr = (ib_ca_attr_t*)cl_zalloc( attr_size * 2 ); + if( !p_pnp_attr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_CA, + ("Unable to allocate buffer for PnP attributes\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Read the attributes. */ + status = ib_query_ca( p_ci_ca->h_ca, p_pnp_attr, &attr_size ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_CA, + ("Unable to query attributes\n") ); + cl_free( p_pnp_attr ); + return status; + } + + ci_ca_excl_lock_attr( p_ci_ca ); + if( pp_old_pnp_attr ) + *pp_old_pnp_attr = p_ci_ca->p_pnp_attr; + p_ci_ca->p_pnp_attr = p_pnp_attr; + + /* + * Initialize pointer to the user reporting half. + * This buffer is used to report this CAs attributes to users. + */ + p_ci_ca->p_user_attr = (ib_ca_attr_t*)(((uint8_t*)p_pnp_attr) + attr_size); + ci_ca_unlock_attr( p_ci_ca ); + + AL_EXIT( AL_DBG_CA ); + return IB_SUCCESS; +} + + + +void +ci_ca_lock_attr( + IN al_ci_ca_t* const p_ci_ca ) +{ + CL_ASSERT( p_ci_ca ); + + cl_spinlock_acquire( &p_ci_ca->attr_lock ); +} + + +void +ci_ca_excl_lock_attr( + IN al_ci_ca_t* const p_ci_ca ) +{ + CL_ASSERT( p_ci_ca ); + + cl_spinlock_acquire( &p_ci_ca->attr_lock ); +} + + +void +ci_ca_unlock_attr( + IN al_ci_ca_t* const p_ci_ca ) +{ + CL_ASSERT( p_ci_ca ); + + cl_spinlock_release( &p_ci_ca->attr_lock ); +} diff --git a/branches/Ndi/core/al/al_cm_cep.h b/branches/Ndi/core/al/al_cm_cep.h new file mode 100644 index 00000000..a3f33074 --- /dev/null +++ b/branches/Ndi/core/al/al_cm_cep.h @@ -0,0 +1,437 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#pragma once + +#ifndef _AL_CM_CEP_H_ +#define _AL_CM_CEP_H_ + + +#include +#include "al_common.h" + + +#define CEP_EVENT_TIMEOUT 0x80000000 +#define CEP_EVENT_RECV 0x40000000 +#define CEP_EVENT_REQ 0x00000001 +#define CEP_EVENT_REP 0x00000002 +#define CEP_EVENT_RTU 0x00000004 +#define CEP_EVENT_DREQ 0x00000008 +#define CEP_EVENT_DREP 0x00000010 +#define CEP_EVENT_MRA 0x00000020 +#define CEP_EVENT_REJ 0x00000040 +#define CEP_EVENT_LAP 0x00000080 +#define CEP_EVENT_APR 0x00000100 +#define CEP_EVENT_SIDR 0x00800000 + + +#define AL_INVALID_CID 0xFFFFFFFF + + +typedef void +(*al_pfn_cep_cb_t)( + IN const ib_al_handle_t h_al, + IN const net32_t cid ); +/* PARAMETERS +* h_al +* [in] Handle to the AL instance to pass into the al_cep_poll call. +* +* cid +* [in] CID of the CEP on which the event occurred. The CID should +* be passed into the al_cep_poll call. +* +* RETURN VALUES: +* This function does not return a value. +* +* NOTES +* The callback is invoked at DISPATCH_LEVEL. +* +* Recipients of the callback are expected to call al_cep_poll to retrieve +* event specific details until al_cep_poll returns IB_NOT_DONE. This may +* be done in a different thread context. +*********/ + + +ib_api_status_t +create_cep_mgr( + IN al_obj_t* const p_parent_obj ); + + +void +al_cep_cleanup_al( + IN const ib_al_handle_t h_al ); + + +ib_api_status_t +al_create_cep( + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void* __ptr64 context, + OUT net32_t* const p_cid ); +/* +* NOTES +* This function may be invoked at DISPATCH_LEVEL +* +* The pfn_cb parameter may be NULL in the kernel if using IRPs for +* event notification. +*********/ + + +/* Destruction is asynchronous. */ +ib_api_status_t +al_destroy_cep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_pfn_destroy_cb_t pfn_destroy_cb ); +/* +*********/ + +ib_api_status_t +al_cep_listen( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_cep_listen_t* const p_listen_info ); + + +ib_api_status_t +al_cep_pre_req( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_req_t* const p_cm_req, + OUT ib_qp_mod_t* const p_init ); + + +ib_api_status_t +al_cep_send_req( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_pre_rep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN void* __ptr64 context, + IN const ib_cm_rep_t* const p_cm_rep, + OUT ib_qp_mod_t* const p_init ); + + +ib_api_status_t +al_cep_send_rep( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_get_rtr_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rtr ); + + +ib_api_status_t +al_cep_get_rts_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rts ); + + +ib_api_status_t +al_cep_rtu( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len ); + + +ib_api_status_t +al_cep_rej( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_rej_status_t rej_status, + IN const uint8_t* const p_ari, + IN uint8_t ari_len, + IN const uint8_t* const p_pdata, + IN uint8_t pdata_len ); + + +ib_api_status_t +al_cep_mra( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_mra_t* const p_cm_mra ); + + +ib_api_status_t +al_cep_lap( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_lap_t* const p_cm_lap ); + + +ib_api_status_t +al_cep_pre_apr( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_apr_t* const p_cm_apr, + OUT ib_qp_mod_t* const p_apr ); + + +ib_api_status_t +al_cep_send_apr( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_dreq( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* const p_pdata OPTIONAL, + IN const uint8_t pdata_len ); + + +ib_api_status_t +al_cep_drep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_drep_t* const p_cm_drep ); + + +ib_api_status_t +al_cep_get_timewait( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT uint64_t* const p_timewait_us ); + + +ib_api_status_t +al_cep_migrate( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_established( + IN ib_al_handle_t h_al, + IN net32_t cid ); + + +ib_api_status_t +al_cep_poll( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT void* __ptr64 * p_context, + OUT net32_t* const p_new_cid, + OUT ib_mad_element_t** const pp_mad ); + + +#ifdef CL_KERNEL +NTSTATUS +al_cep_queue_irp( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN IRP* const p_irp ); +#endif /* CL_KERNEL */ + + +/****s* Access Layer/al_cep_sreq_t +* NAME +* al_cep_sreq_t +* +* DESCRIPTION +* Connection request information used to establish a new connection. +* +* SYNOPSIS +*/ +typedef struct _al_cep_sreq +{ + ib_net64_t svc_id; + + ib_path_rec_t* __ptr64 p_path; + + const uint8_t* __ptr64 p_pdata; + uint8_t pdata_len; + + uint8_t max_cm_retries; + ib_net16_t pkey; + uint32_t timeout_ms; + +} al_cep_sreq_t; +/* +* FIELDS +* svc_id +* The ID of the remote service to which the SIDR request is +* being made. +* +* p_path +* Path information over which to send the request. +* +* p_pdata +* Optional user-defined private data sent as part of the SIDR request. +* +* pdata_len +* Defines the size of the user-defined private data. +* +* max_cm_retries +* The maximum number of times that either CM should +* resend a SIDR message. +* +* timeout_ms +* Timeout value in milli-seconds for the SIDR REQ to expire. The CM will +* add twice packet lifetime to this value to determine the actual timeout +* value used. +* +* pkey +* pkey to be used as part of the request. +* +* SEE ALSO +* al_cep_sreq +*****/ + +ib_api_status_t +al_cep_sreq( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const al_cep_sreq_t* const p_sreq ); + + +/****s* Access Layer/al_cep_srep_t +* NAME +* al_cep_srep_t +* +* DESCRIPTION +* SIDR reply information. +* +* SYNOPSIS +*/ +typedef struct _al_cep_srep +{ + net32_t qp_num; + net32_t qkey; + + const uint8_t* __ptr64 p_pdata; + const void* __ptr64 p_info; + + uint8_t pdata_len; + uint8_t info_len; + + ib_sidr_status_t status; + +} al_cep_srep_t; +/* +* FIELDS +* qp_num +* The number of the queue pair on which the requested service +* is supported. +* +* qp_key +* The QKEY of the returned queue pair. +* +* p_pdata +* Optional user-defined private data sent as part of the SIDR reply. +* +* p_info +* Optional "additonal information" sent as part of the SIDR reply. +* +* pdata_len +* Size of the user-defined private data. +* +* info_len +* Size of the "additional information". +* +* status +* sidr status value returned back to a previously received REQ. +* +* SEE ALSO +* al_cep_srep +*****/ + +ib_api_status_t +al_cep_srep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const al_cep_srep_t* const p_sreq ); + + + + +/* + * Return the local ACK timeout value based on the given packet lifetime + * and target ACK delay. Both input values are assumed to be in the form + * 4.096 x 2 ^ input. + */ +#define MAX_LOCAL_ACK_TIMEOUT 0x1F /* limited to 5 bits */ + +inline uint8_t +calc_lcl_ack_timeout( + IN const uint8_t round_trip_time, + IN const uint8_t target_ack_delay ) +{ + uint64_t timeout; + uint8_t local_ack_timeout; + + if( !target_ack_delay ) + { + if( round_trip_time > MAX_LOCAL_ACK_TIMEOUT ) + return MAX_LOCAL_ACK_TIMEOUT; + else + return round_trip_time; + } + + /* + * Since both input and the output values are in the same form, we + * can ignore the 4.096 portion by dividing it out. + */ + + /* The input parameter is the round trip time. */ + timeout = (uint64_t)1 << round_trip_time; + + /* Add in the target ack delay. */ + if( target_ack_delay ) + timeout += (uint64_t)1 << target_ack_delay; + + /* Calculate the local ACK timeout. */ + local_ack_timeout = 1; + while( (1ui64 << local_ack_timeout) <= timeout ) + { + local_ack_timeout++; + + /* Only 5-bits are valid. */ + if( local_ack_timeout > MAX_LOCAL_ACK_TIMEOUT ) + return MAX_LOCAL_ACK_TIMEOUT; + } + + return local_ack_timeout; +} + +#endif /* _AL_CM_CEP_H_ */ diff --git a/branches/Ndi/core/al/al_cm_conn.h b/branches/Ndi/core/al/al_cm_conn.h new file mode 100644 index 00000000..d11ab34f --- /dev/null +++ b/branches/Ndi/core/al/al_cm_conn.h @@ -0,0 +1,1308 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__IB_AL_CM_CONN_H__) +#define __IB_AL_CM_CONN_H__ + + +#include +#include +#include "al_common.h" + + +/* + * Helper functions + */ +static inline void +__set_low24( + OUT ib_field32_t* const p_field, + IN const net32_t val ) +{ + const uint8_t* p8 = (const uint8_t*)&val; + + p_field->bytes[0] = p8[1]; + p_field->bytes[1] = p8[2]; + p_field->bytes[2] = p8[3]; +} + +/* + * Returns a network byte ordered 32-bit quantity equal to the 24-bit value + * stored in the lower 3-bytes of the supplied field + */ +static inline net32_t +__get_low24( + IN const ib_field32_t field ) +{ + return cl_hton32( cl_ntoh32( field.val ) >> 8 ); +} + +static inline net32_t +__get_low20( + IN const ib_field32_t field ) +{ + return cl_hton32( cl_ntoh32( field.val ) >> 12 ); +} + +/* + * CM MAD definitions. + */ +#include + +typedef struct _req_path_info +{ + ib_net16_t local_lid; + ib_net16_t remote_lid; + ib_gid_t local_gid; + ib_gid_t remote_gid; + + /* Version 2: Flow Label:20, rsvd:6, Packet Rate:6 */ + ib_field32_t offset36; + + uint8_t traffic_class; + uint8_t hop_limit; + /* SL:4, Subnet Local:1, rsvd:3 */ + uint8_t offset42; + /* Local ACK Timeout:5, rsvd:3 */ + uint8_t offset43; + +} PACK_SUFFIX req_path_info_t; + + +#define CM_REQ_ATTR_ID CL_HTON16(0x0010) +typedef struct _mad_cm_req +{ + ib_mad_t hdr; + + ib_net32_t local_comm_id; + ib_net32_t rsvd1; + ib_net64_t sid; + ib_net64_t local_ca_guid; + ib_net32_t rsvd2; + ib_net32_t local_qkey; + + /* Local QPN:24, Responder resources:8 */ + ib_field32_t offset32; + /* Local EECN:24, Initiator depth:8 */ + ib_field32_t offset36; + /* + * Remote EECN:24, Remote CM Response Timeout:5, + * Transport Service Type:2, End-to-End Flow Control:1 + */ + ib_field32_t offset40; + /* Starting PSN:24, Local CM Response Timeout:5, Retry Count:3. */ + ib_field32_t offset44; + + ib_net16_t pkey; + + /* Path MTU:4, RDC Exists:1, RNR Retry Count:3. */ + uint8_t offset50; + /* Max CM Retries:4, rsvd:4 */ + uint8_t offset51; + + req_path_info_t primary_path; + req_path_info_t alternate_path; + + uint8_t pdata[IB_REQ_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_req_t; +C_ASSERT( sizeof(mad_cm_req_t) == MAD_BLOCK_SIZE ); + +#include + +/* REQ functions */ + +/* REQ offset32 accessors. */ +static inline ib_net32_t +conn_req_get_lcl_qpn( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return __get_low24( p_req->offset32 ); +} + +static inline void +conn_req_set_lcl_qpn( + IN const ib_net32_t qpn, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + __set_low24( &p_req->offset32, qpn ); +} + +static inline uint8_t +conn_req_get_resp_res( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return p_req->offset32.bytes[3]; +} + +static inline void +conn_req_set_resp_res( + IN const uint8_t resp_res, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->offset32.bytes[3] = resp_res; +} + +/* REQ offset36 accessors. */ +static inline uint8_t +conn_req_get_init_depth( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return p_req->offset36.bytes[3]; +} + +static inline void +conn_req_set_init_depth( + IN const uint8_t init_depth, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->offset36.bytes[3] = init_depth; +} + +static inline uint8_t +conn_req_get_resp_timeout( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return (p_req->offset40.bytes[3] >> 3); +} + +static inline void +conn_req_set_remote_resp_timeout( + IN const uint8_t resp_timeout, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( !(resp_timeout & 0xE0) ); + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->offset40.bytes[3] &= 0x07; + p_req->offset40.bytes[3] |= (resp_timeout << 3); +} + +static inline ib_qp_type_t +conn_req_get_qp_type( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return ( ib_qp_type_t )( (p_req->offset40.bytes[3] >> 1) & 0x3 ); +} + +static inline void +conn_req_set_qp_type( + IN const ib_qp_type_t qp_type, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( !(qp_type & 0xFC) ); + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->offset40.bytes[3] &= 0xF9; + p_req->offset40.bytes[3] |= ( ((uint8_t)qp_type & 0x03) << 1 ); +} + +static inline boolean_t +conn_req_get_flow_ctrl( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT(p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return ( (p_req->offset40.bytes[3] & 0x01) != 0 ); +} + +static inline void +conn_req_set_flow_ctrl( + IN const boolean_t flow_ctrl, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + if( flow_ctrl ) + p_req->offset40.bytes[3] |= 0x01; + else + p_req->offset40.bytes[3] &= 0xFE; +} + +/* REQ offset44 accessors. */ +static inline ib_net32_t +conn_req_get_starting_psn( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return __get_low24( p_req->offset44 ); +} + +static inline void +conn_req_set_starting_psn( + IN const ib_net32_t starting_psn, + IN mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + __set_low24( &p_req->offset44, starting_psn ); +} + +static inline uint8_t +conn_req_get_retry_cnt( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return p_req->offset44.bytes[3] & 0x7; +} + +static inline void +conn_req_set_retry_cnt( + IN const uint8_t retry_cnt, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->offset44.bytes[3] &= 0xF8; + p_req->offset44.bytes[3] |= (retry_cnt & 0x7); +} + +static inline uint8_t +conn_req_get_lcl_resp_timeout( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return (p_req->offset44.bytes[3] >> 3); +} + +static inline void +conn_req_set_lcl_resp_timeout( + IN const uint8_t resp_timeout, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( !(resp_timeout & 0xE0) ); + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->offset44.bytes[3] &= 0x07; + p_req->offset44.bytes[3] |= (resp_timeout << 3); +} + +/* REQ offset50 accessors. */ +static inline uint8_t +conn_req_get_mtu( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return ( p_req->offset50 >> 4); +} + +static inline void +conn_req_set_mtu( + IN const uint8_t path_mtu, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( !(path_mtu & 0xF0) ); + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->offset50 &= 0x0F; + p_req->offset50 |= (path_mtu << 4); +} + +static inline uint8_t +conn_req_get_rnr_retry_cnt( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return ( p_req->offset50 & 0x07 ); +} + +static inline void +conn_req_set_rnr_retry_cnt( + IN const uint8_t rnr_retry_cnt, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( !(rnr_retry_cnt & 0xF8) ); + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->offset50 &= 0xF8; + p_req->offset50 |= (rnr_retry_cnt & 0x07); +} + +static inline uint8_t +conn_req_get_max_cm_retries( + IN const mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return (uint8_t)( p_req->offset51 >> 4 ); +} + +static inline void +conn_req_set_max_cm_retries( + IN const uint8_t retries, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( !(retries & 0xF0) ); + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->offset51 = (retries << 4); +} + +static inline ib_api_status_t +conn_req_set_pdata( + IN const uint8_t* const p_data OPTIONAL, + IN const uint8_t data_len, + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + + if( p_data ) + { + if( data_len > IB_REQ_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_req->pdata, p_data, data_len ); + cl_memclr( p_req->pdata + data_len, IB_REQ_PDATA_SIZE - data_len ); + } + else + { + cl_memclr( p_req->pdata, IB_REQ_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +static inline void +conn_req_clr_rsvd_fields( + IN OUT mad_cm_req_t* const p_req ) +{ + CL_ASSERT( p_req->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_req->rsvd1 = 0; + p_req->rsvd2 = 0; + p_req->offset36.val &= CL_HTON32( 0x000000FF ); + p_req->offset40.val &= CL_HTON32( 0x000000FF ); + p_req->offset50 &= 0xF7; + p_req->offset51 &= 0xF0; +} + +/* REQ Path accessors. */ +static inline net32_t +conn_req_path_get_flow_lbl( + IN const req_path_info_t* const p_path ) +{ + return __get_low20( p_path->offset36 ); +} + +static inline void +conn_req_path_set_flow_lbl( + IN const net32_t flow_lbl, + IN OUT req_path_info_t* const p_path ) +{ + CL_ASSERT( !(cl_ntoh32( flow_lbl ) & 0xFFF00000) ); + __set_low24( &p_path->offset36, (flow_lbl & CL_HTON32( 0x000FFFFF )) ); +} + +static inline uint8_t +conn_req_path_get_pkt_rate( + IN const req_path_info_t* const p_path ) +{ + return p_path->offset36.bytes[3] & 0x3F; +} + +static inline void +conn_req_path_set_pkt_rate( + IN const uint8_t rate, + OUT req_path_info_t* const p_path ) +{ + CL_ASSERT( !(rate & 0xC0) ); + p_path->offset36.bytes[3] = (rate & 0x3F); +} + +static inline uint8_t +conn_req_path_get_svc_lvl( + IN const req_path_info_t* const p_path ) +{ + return ( p_path->offset42 >> 4 ); +} + +static inline void +conn_req_path_set_svc_lvl( + IN const uint8_t svc_lvl, + IN OUT req_path_info_t* const p_path ) +{ + CL_ASSERT( !(svc_lvl & 0xF0) ); + p_path->offset42 = ( ( p_path->offset42 & 0x08 ) | (svc_lvl << 4) ); +} + +static inline boolean_t +conn_req_path_get_subn_lcl( + IN const req_path_info_t* const p_path ) +{ + return (p_path->offset42 & 0x08) != 0; +} + +static inline void +conn_req_path_set_subn_lcl( + IN const boolean_t subn_lcl, + IN OUT req_path_info_t* const p_path ) +{ + if( subn_lcl ) + p_path->offset42 = ((p_path->offset42 & 0xF0) | 0x08); + else + p_path->offset42 = (p_path->offset42 & 0xF0); +} + +static inline uint8_t +conn_req_path_get_lcl_ack_timeout( + IN const req_path_info_t* const p_path ) +{ + return( p_path->offset43 >> 3 ); +} + +static inline void +conn_req_path_set_lcl_ack_timeout( + IN const uint8_t lcl_ack_timeout, + IN OUT req_path_info_t* const p_path ) +{ + CL_ASSERT( !(lcl_ack_timeout & 0xE0) ); + p_path->offset43 = (lcl_ack_timeout << 3); +} + +static inline void +conn_req_path_clr_rsvd_fields( + IN OUT req_path_info_t* const p_path ) +{ + p_path->offset36.val &= CL_HTON32( 0xFFFFF03F ); + p_path->offset42 &= 0xF8; + p_path->offset43 &= 0xF8; +} + + + +/* MRA */ +#include + +#define CM_MRA_ATTR_ID CL_HTON16(0x0011) +/* MRA is the same in ver1 and ver2. */ +typedef struct _mad_cm_mra +{ + ib_mad_t hdr; + + ib_net32_t local_comm_id; + ib_net32_t remote_comm_id; + /* Message MRAed:2, rsvd:6 */ + uint8_t offset8; + /* Service Timeout:5, rsvd:3 */ + uint8_t offset9; + + uint8_t pdata[IB_MRA_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_mra_t; +C_ASSERT( sizeof(mad_cm_mra_t) == MAD_BLOCK_SIZE ); + +#include + +static inline uint8_t +conn_mra_get_msg_mraed( + IN const mad_cm_mra_t* const p_mra ) +{ + return (p_mra->offset8 >> 6); +} + +static inline void +conn_mra_set_msg_mraed( + IN const uint8_t msg, + OUT mad_cm_mra_t* const p_mra ) +{ + p_mra->offset8 = (msg << 6); +} + +static inline uint8_t +conn_mra_get_svc_timeout( + IN const mad_cm_mra_t* const p_mra ) +{ + return (p_mra->offset9 >> 3); +} + +static inline void +conn_mra_set_svc_timeout( + IN const uint8_t svc_timeout, + OUT mad_cm_mra_t* const p_mra ) +{ + p_mra->offset9 = (svc_timeout << 3); +} + +static inline ib_api_status_t +conn_mra_set_pdata( + IN const uint8_t* const p_data OPTIONAL, + IN const uint8_t data_len, + IN OUT mad_cm_mra_t* const p_mra ) +{ + if( p_data ) + { + if( data_len > IB_MRA_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_mra->pdata, p_data, data_len ); + cl_memclr( p_mra->pdata + data_len, IB_MRA_PDATA_SIZE - data_len ); + } + else + { + cl_memclr( p_mra->pdata, IB_MRA_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +static inline void +conn_mra_clr_rsvd_fields( + IN OUT mad_cm_mra_t* const p_mra ) +{ + p_mra->offset8 &= 0xC0; + p_mra->offset9 &= 0xF8; +} + + + +/* REJ */ + +#include + +#define CM_REJ_ATTR_ID CL_HTON16(0x0012) +/* REJ is the same in ver1 and ver2. */ +typedef struct _mad_cm_rej +{ + ib_mad_t hdr; + + ib_net32_t local_comm_id; + ib_net32_t remote_comm_id; + + /* Message REJected:2, rsvd:6 */ + uint8_t offset8; + + /* Reject Info Length:7, rsvd:1. */ + uint8_t offset9; + + ib_net16_t reason; + uint8_t ari[IB_ARI_SIZE]; + uint8_t pdata[IB_REJ_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_rej_t; +C_ASSERT( sizeof(mad_cm_rej_t) == MAD_BLOCK_SIZE ); + +#include + +static inline uint8_t +conn_rej_get_msg_rejected( + IN const mad_cm_rej_t* const p_rej ) +{ + return (p_rej->offset8 >> 6); +} + +static inline void +conn_rej_set_msg_rejected( + IN const uint8_t msg, + IN OUT mad_cm_rej_t* const p_rej ) +{ + p_rej->offset8 = (msg << 6); +} + +static inline uint8_t +conn_rej_get_ari_len( + IN const mad_cm_rej_t* const p_rej ) +{ + return (p_rej->offset9 >> 1); +} + +static inline ib_api_status_t +conn_rej_set_ari( + IN const uint8_t* const p_ari_info OPTIONAL, + IN const uint8_t info_len, + IN OUT mad_cm_rej_t* const p_rej ) +{ + if( p_ari_info && info_len > IB_ARI_SIZE ) + return IB_INVALID_PARAMETER; + + if( p_ari_info ) + { + cl_memcpy( p_rej->ari, p_ari_info, info_len ); + cl_memclr( p_rej->ari + info_len, IB_ARI_SIZE - info_len ); + } + else + { + cl_memclr( p_rej->ari, IB_ARI_SIZE ); + } + p_rej->offset9 = (info_len << 1); + return IB_SUCCESS; +} + + +static inline ib_api_status_t +conn_rej_set_pdata( + IN const uint8_t* const p_data OPTIONAL, + IN const uint8_t data_len, + IN OUT mad_cm_rej_t* const p_rej ) +{ + if( p_data ) + { + if( data_len > IB_REJ_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_rej->pdata, p_data, data_len ); + cl_memclr( p_rej->pdata + data_len, IB_REJ_PDATA_SIZE - data_len ); + } + else + { + cl_memclr( p_rej->pdata, IB_REJ_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +static inline void +conn_rej_clr_rsvd_fields( + IN OUT mad_cm_rej_t* const p_rej ) +{ + p_rej->offset8 &= 0xC0; + p_rej->offset9 &= 0xFE; +} + + + +/* REP */ + +#include + +#define CM_REP_ATTR_ID CL_HTON16(0x0013) +typedef struct _mad_cm_rep +{ + ib_mad_t hdr; + + ib_net32_t local_comm_id; + ib_net32_t remote_comm_id; + + ib_net32_t local_qkey; + /* Local QPN:24, rsvd:8 */ + ib_field32_t offset12; + /* Local EECN:24, rsvd:8 */ + ib_field32_t offset16; + /* Starting PSN:24 rsvd:8 */ + ib_field32_t offset20; + + uint8_t resp_resources; + uint8_t initiator_depth; + /* Target ACK Delay:5, Failover Accepted:2, End-to-End Flow Control:1 */ + uint8_t offset26; + /* RNR Retry Count:3, rsvd:5 */ + uint8_t offset27; + + ib_net64_t local_ca_guid; + uint8_t pdata[IB_REP_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_rep_t; +C_ASSERT( sizeof(mad_cm_rep_t) == MAD_BLOCK_SIZE ); + +#include + +static inline ib_net32_t +conn_rep_get_lcl_qpn( + IN const mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return __get_low24( p_rep->offset12 ); +} + +static inline void +conn_rep_set_lcl_qpn( + IN const ib_net32_t qpn, + IN OUT mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + __set_low24( &p_rep->offset12, qpn ); +} + +static inline ib_net32_t +conn_rep_get_starting_psn( + IN const mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return __get_low24( p_rep->offset20 ); +} + +static inline void +conn_rep_set_starting_psn( + IN const ib_net32_t starting_psn, + IN OUT mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + __set_low24( &p_rep->offset20, starting_psn ); +} + +static inline uint8_t +conn_rep_get_target_ack_delay( + IN const mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return (p_rep->offset26 >> 3); +} + +static inline void +conn_rep_set_target_ack_delay( + IN const uint8_t target_ack_delay, + IN OUT mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( !(target_ack_delay & 0xE0) ); + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_rep->offset26 = + ((p_rep->offset26 & 0xE0) | (target_ack_delay << 3)); +} + +static inline uint8_t +conn_rep_get_failover( + IN const mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return ( ( p_rep->offset26 >> 1 ) & 0x3 ); +} + +static inline void +conn_rep_set_failover( + IN const uint8_t failover, + IN OUT mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_rep->offset26 = + ( (p_rep->offset26 & 0xF9) | (( failover & 0x03) << 1) ); +} + +static inline boolean_t +conn_rep_get_e2e_flow_ctl( + IN const mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return ( p_rep->offset26 & 0x01 ); +} + +static inline void +conn_rep_set_e2e_flow_ctl( + IN const boolean_t e2e_flow_ctl, + IN OUT mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + if( e2e_flow_ctl ) + p_rep->offset26 |= 0x01; + else + p_rep->offset26 &= 0xFE; +} + +static inline uint8_t +conn_rep_get_rnr_retry_cnt( + IN const mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return (p_rep->offset27 >> 5); +} + +static inline void +conn_rep_set_rnr_retry_cnt( + IN const uint8_t rnr_retry_cnt, + IN OUT mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_rep->offset27 = (rnr_retry_cnt << 5); +} + +static inline ib_api_status_t +conn_rep_set_pdata( + IN const uint8_t* const p_data OPTIONAL, + IN const uint8_t data_len, + IN OUT mad_cm_rep_t* const p_rep ) +{ + CL_ASSERT( p_rep->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + + if( p_data ) + { + if( data_len > IB_REP_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_rep->pdata, p_data, data_len ); + cl_memclr( p_rep->pdata + data_len, IB_REP_PDATA_SIZE - data_len ); + } + else + { + cl_memclr( p_rep->pdata, IB_REP_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +static inline void +conn_rep_clr_rsvd_fields( + IN OUT mad_cm_rep_t* const p_rep ) +{ + p_rep->offset12.bytes[3] = 0; + p_rep->offset16.val = 0; + p_rep->offset20.bytes[3] = 0; + p_rep->offset27 &= 0xE0; +} + + +/* RTU */ + +#include + +#define CM_RTU_ATTR_ID CL_HTON16(0x0014) +/* RTU is the same for ver1 and ver2. */ +typedef struct _mad_cm_rtu +{ + ib_mad_t hdr; + + ib_net32_t local_comm_id; + ib_net32_t remote_comm_id; + uint8_t pdata[IB_RTU_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_rtu_t; +C_ASSERT( sizeof(mad_cm_rtu_t) == MAD_BLOCK_SIZE ); + +#include + +static inline ib_api_status_t +conn_rtu_set_pdata( + IN const uint8_t* const p_data OPTIONAL, + IN const uint8_t data_len, + IN OUT mad_cm_rtu_t* const p_rtu ) +{ + if( p_data ) + { + if( data_len > IB_RTU_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_rtu->pdata, p_data, data_len ); + cl_memclr( p_rtu->pdata + data_len, IB_RTU_PDATA_SIZE - data_len ); + } + else + { + cl_memclr( p_rtu->pdata, IB_RTU_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +/* DREQ */ + +#include + +#define CM_DREQ_ATTR_ID CL_HTON16(0x0015) +/* DREQ is the same for ver1 and ver2. */ +typedef struct _mad_cm_dreq +{ + ib_mad_t hdr; + + ib_net32_t local_comm_id; + ib_net32_t remote_comm_id; + /* Remote QPN/EECN:24, rsvd:8 */ + ib_field32_t offset8; + uint8_t pdata[IB_DREQ_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_dreq_t; +C_ASSERT( sizeof(mad_cm_dreq_t) == MAD_BLOCK_SIZE ); + +#include + +static inline ib_net32_t +conn_dreq_get_remote_qpn( + IN const mad_cm_dreq_t* const p_dreq ) +{ + return __get_low24( p_dreq->offset8 ); +} + +static inline void +conn_dreq_set_remote_qpn( + IN const ib_net32_t qpn, + IN OUT mad_cm_dreq_t* const p_dreq ) +{ + __set_low24( &p_dreq->offset8, qpn ); +} + +static inline ib_api_status_t +conn_dreq_set_pdata( + IN const uint8_t* const p_data OPTIONAL, + IN const uint8_t data_len, + IN OUT mad_cm_dreq_t* const p_dreq ) +{ + if( p_data ) + { + if( data_len > IB_DREQ_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_dreq->pdata, p_data, data_len ); + cl_memclr( p_dreq->pdata + data_len, IB_DREQ_PDATA_SIZE - data_len ); + } + else + { + cl_memclr( p_dreq->pdata, IB_DREQ_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +static inline void +conn_dreq_clr_rsvd_fields( + IN OUT mad_cm_dreq_t* const p_dreq ) +{ + p_dreq->offset8.bytes[3] = 0; +} + + + +/* DREP */ + +#include + +#define CM_DREP_ATTR_ID CL_HTON16(0x0016) +/* DREP is the same for ver1 and ver2. */ +typedef struct _mad_cm_drep +{ + ib_mad_t hdr; + + ib_net32_t local_comm_id; + ib_net32_t remote_comm_id; + uint8_t pdata[IB_DREP_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_drep_t; +C_ASSERT( sizeof(mad_cm_drep_t) == MAD_BLOCK_SIZE ); + +#include + +static inline ib_api_status_t +conn_drep_set_pdata( + IN const uint8_t* const p_data OPTIONAL, + IN const uint8_t data_len, + IN OUT mad_cm_drep_t* const p_drep ) +{ + if( p_data ) + { + if( data_len > IB_DREP_PDATA_SIZE ) + return IB_INVALID_SETTING; + + cl_memcpy( p_drep->pdata, p_data, data_len ); + cl_memclr( p_drep->pdata + data_len, IB_DREP_PDATA_SIZE - data_len ); + } + else + { + cl_memclr( p_drep->pdata, IB_DREP_PDATA_SIZE ); + } + return IB_SUCCESS; +} + + +/* LAP */ + +#include + +typedef struct _lap_path_info +{ + ib_net16_t local_lid; + ib_net16_t remote_lid; + ib_gid_t local_gid; + ib_gid_t remote_gid; + /* Flow Label:20, rsvd:4, Traffic Class:8 */ + ib_field32_t offset36; + uint8_t hop_limit; + + /* rsvd:2, Packet Rate:6 */ + uint8_t offset41; + /* SL:4, Subnet Local:1, rsvd:3 */ + uint8_t offset42; + /* Local ACK Timeout:5, rsvd:3 */ + uint8_t offset43; + +} PACK_SUFFIX lap_path_info_t; + +#define CM_LAP_ATTR_ID CL_HTON16(0x0019) +typedef struct _mad_cm_lap +{ + ib_mad_t hdr; + + ib_net32_t local_comm_id; + ib_net32_t remote_comm_id; + + ib_net32_t rsvd1; + /* Remote QPN/EECN:24, Remote CM Response Timeout:5, rsvd:3 */ + ib_field32_t offset12; + ib_net32_t rsvd2; + lap_path_info_t alternate_path; + uint8_t pdata[IB_LAP_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_lap_t; +C_ASSERT( sizeof(mad_cm_lap_t) == MAD_BLOCK_SIZE ); + +#include + +static inline ib_net32_t +conn_lap_get_remote_qpn( + IN const mad_cm_lap_t* const p_lap ) +{ + CL_ASSERT( p_lap->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return __get_low24( p_lap->offset12 ); +} + +static inline void +conn_lap_set_remote_qpn( + IN const ib_net32_t qpn, + IN OUT mad_cm_lap_t* const p_lap ) +{ + CL_ASSERT( p_lap->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + __set_low24( &p_lap->offset12, qpn ); +} + +static inline uint8_t +conn_lap_get_resp_timeout( + IN const mad_cm_lap_t* const p_lap ) +{ + CL_ASSERT( p_lap->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + return (p_lap->offset12.bytes[3] >> 3); +} + +static inline void +conn_lap_set_resp_timeout( + IN const uint8_t resp_timeout, + IN OUT mad_cm_lap_t* const p_lap ) +{ + CL_ASSERT( !(resp_timeout & 0xE0) ); + CL_ASSERT( p_lap->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + p_lap->offset12.bytes[3] = (resp_timeout << 3); +} + +static inline ib_api_status_t +conn_lap_set_pdata( + IN const uint8_t* const p_data OPTIONAL, + IN const uint8_t data_len, + IN OUT mad_cm_lap_t* const p_lap ) +{ + CL_ASSERT( p_lap->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + + cl_memclr( p_lap->pdata, IB_LAP_PDATA_SIZE ); + if( p_data ) + { + if( data_len > IB_LAP_PDATA_SIZE ) + return IB_INVALID_PARAMETER; + + cl_memcpy( p_lap->pdata, p_data, data_len ); + cl_memclr( p_lap->pdata + data_len, + IB_LAP_PDATA_SIZE - data_len ); + } + else + { + cl_memclr( p_lap->pdata, IB_LAP_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +static inline void +conn_lap_clr_rsvd_fields( + IN OUT mad_cm_lap_t* const p_lap ) +{ + p_lap->rsvd1 = 0; + p_lap->offset12.val &= CL_HTON32( 0xFFFFFFF8 ); + p_lap->rsvd2 = 0; +} + +static inline net32_t +conn_lap_path_get_flow_lbl( + IN const lap_path_info_t* const p_lap_path ) +{ + return __get_low20( p_lap_path->offset36 ); +} + +static inline void +conn_lap_path_set_flow_lbl( + IN const ib_net32_t flow_lbl, + IN OUT lap_path_info_t* const p_lap_path ) +{ + __set_low24( &p_lap_path->offset36, (flow_lbl & CL_HTON32( 0x000FFFFF )) ); +} + +static inline uint8_t +conn_lap_path_get_tclass( + IN const lap_path_info_t* const p_lap_path ) +{ + return p_lap_path->offset36.bytes[3]; +} + +static inline void +conn_lap_path_set_tclass( + IN const uint8_t tclass, + IN OUT lap_path_info_t* const p_lap_path ) +{ + p_lap_path->offset36.bytes[3] = tclass; +} + +static inline uint8_t +conn_lap_path_get_pkt_rate( + IN const lap_path_info_t* const p_lap_path ) +{ + return ( p_lap_path->offset41 & 0x7F ); +} + +static inline void +conn_lap_path_set_pkt_rate( + IN const uint8_t pkt_rate, + IN OUT lap_path_info_t* const p_lap_path ) +{ + CL_ASSERT( !( pkt_rate & 0xC0 ) ); + + p_lap_path->offset41 = ( pkt_rate & 0x7F ); +} + +static inline const uint8_t +conn_lap_path_get_svc_lvl( + IN const lap_path_info_t* const p_lap_path ) +{ + return ( p_lap_path->offset42 >> 4 ); +} + +static inline void +conn_lap_path_set_svc_lvl( + IN const uint8_t sl, + IN OUT lap_path_info_t* const p_lap_path ) +{ + CL_ASSERT( !( sl & 0xF0 ) ); + + p_lap_path->offset42 = ( (p_lap_path->offset42 & 0x08) | (sl & 0xF0) ); +} + +static inline boolean_t +conn_lap_path_get_subn_lcl( + IN const lap_path_info_t* const p_lap_path ) +{ + return (p_lap_path->offset42 & 0x08) != 0; +} + +static inline void +conn_lap_path_set_subn_lcl( + IN const boolean_t subn_lcl, + IN OUT lap_path_info_t* const p_lap_path ) +{ + if( subn_lcl ) + p_lap_path->offset42 |= 0x08; + else + p_lap_path->offset42 &= 0xF0; +} + +static inline const uint8_t +conn_lap_path_get_lcl_ack_timeout( + IN const lap_path_info_t* const p_lap_path ) +{ + return ( p_lap_path->offset43 >> 3 ); +} + +static inline void +conn_lap_path_set_lcl_ack_timeout( + IN const uint8_t timeout, + IN OUT lap_path_info_t* const p_lap_path ) +{ + CL_ASSERT( !( timeout & 0xE0 ) ); + p_lap_path->offset43 = (timeout << 3); +} + +static inline void +conn_lap_path_clr_rsvd_fields( + IN OUT lap_path_info_t* const p_lap_path ) +{ + p_lap_path->offset36.val &= CL_HTON32( 0xFFFFF0FF ); + p_lap_path->offset41 &= 0x3F; + p_lap_path->offset42 &= 0xF8; + p_lap_path->offset43 &= 0xF8; +} + + + +/* APR */ +#include + +#define CM_APR_ATTR_ID CL_HTON16(0x001A) +typedef struct _mad_cm_apr +{ + ib_mad_t hdr; + + ib_net32_t local_comm_id; + ib_net32_t remote_comm_id; + + uint8_t info_len; + uint8_t status; + ib_net16_t rsvd; + + uint8_t info[IB_APR_INFO_SIZE]; + uint8_t pdata[IB_APR_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_apr_t; +C_ASSERT( sizeof(mad_cm_apr_t) == MAD_BLOCK_SIZE ); + +#include + +static inline ib_api_status_t +conn_apr_set_apr_info( + IN const uint8_t* const p_info OPTIONAL, + IN const uint8_t info_len, + IN OUT mad_cm_apr_t* const p_apr ) +{ + CL_ASSERT( p_apr->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + if( p_info && ( info_len > IB_APR_INFO_SIZE ) ) + return IB_INVALID_PARAMETER; + + if( p_info ) + { + cl_memcpy( p_apr->info, p_info, info_len ); + cl_memclr( p_apr->info + info_len, + IB_APR_INFO_SIZE - info_len ); + p_apr->info_len = info_len; + } + else + { + cl_memclr( p_apr->info, IB_APR_INFO_SIZE ); + p_apr->info_len = 0; + } + return IB_SUCCESS; +} + +static inline ib_api_status_t +conn_apr_set_pdata( + IN const uint8_t* const p_data OPTIONAL, + IN const uint8_t data_len, + IN OUT mad_cm_apr_t* const p_apr ) +{ + CL_ASSERT( p_apr->hdr.class_ver == IB_MCLASS_CM_VER_2 ); + if( p_data ) + { + if( data_len > IB_APR_PDATA_SIZE ) + return IB_INVALID_PARAMETER; + + cl_memcpy( p_apr->pdata, p_data, data_len ); + cl_memclr( p_apr->pdata + data_len, + IB_APR_PDATA_SIZE - data_len ); + } + else + { + cl_memclr( p_apr->pdata, IB_APR_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +static inline void +conn_apr_clr_rsvd_fields( + IN OUT mad_cm_apr_t* const p_apr ) +{ + p_apr->rsvd = 0; +} + +#endif /* __IB_AL_CM_CONN_H__ */ diff --git a/branches/Ndi/core/al/al_cm_qp.c b/branches/Ndi/core/al/al_cm_qp.c new file mode 100644 index 00000000..187a81da --- /dev/null +++ b/branches/Ndi/core/al/al_cm_qp.c @@ -0,0 +1,2058 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include "al.h" +#include "al_qp.h" +#include "al_cm_cep.h" +#include "al_cm_conn.h" +#include "al_cm_sidr.h" +#include "al_mgr.h" +#include "al_debug.h" + + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_cm_qp.tmh" +#endif + +typedef struct _al_listen +{ + al_obj_t obj; + net32_t cid; + + ib_pfn_cm_req_cb_t pfn_cm_req_cb; + + /* valid for ud qp_type only */ + const void* __ptr64 sidr_context; + +} al_listen_t; + + +#ifdef CL_KERNEL + +/* + * Structure for queuing received MADs to the asynchronous processing + * manager. + */ +typedef struct _cep_async_mad +{ + cl_async_proc_item_t item; + ib_al_handle_t h_al; + net32_t cid; + +} cep_async_mad_t; + +#endif /* CL_KERNEL */ + + +/* + * Transition the QP to the error state to flush all oustanding work + * requests and sets the timewait time. This function may be called + * when destroying the QP in order to flush all work requests, so we + * cannot call through the main API, or the call will fail since the + * QP is no longer in the initialize state. + */ +static void +__cep_timewait_qp( + IN const ib_qp_handle_t h_qp ) +{ + uint64_t timewait = 0; + ib_qp_mod_t qp_mod; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_qp ); + + /* + * The CM should have set the proper timewait time-out value. Reset + * the QP and let it enter the timewait state. + */ + if( al_cep_get_timewait( h_qp->obj.h_al, + ((al_conn_qp_t*)h_qp)->cid, &timewait ) == IB_SUCCESS ) + { + /* Special checks on the QP state for error handling - see above. */ + if( !h_qp || !AL_OBJ_IS_TYPE( h_qp, AL_OBJ_TYPE_H_QP ) || + ( (h_qp->obj.state != CL_INITIALIZED) && + (h_qp->obj.state != CL_DESTROYING) ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return; + } + + cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) ); + qp_mod.req_state = IB_QPS_ERROR; + + /* Modify to error state using function pointers - see above. */ + status = h_qp->pfn_modify_qp( h_qp, &qp_mod, NULL ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("pfn_modify_qp to IB_QPS_ERROR returned %s\n", + ib_get_err_str( status )) ); + return; + } + +#ifdef CL_KERNEL + /* Store the timestamp after which the QP exits timewait. */ + h_qp->timewait = cl_get_time_stamp() + timewait; +#endif /* CL_KERNEL */ + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__format_req_path_rec( + IN const mad_cm_req_t* const p_req, + IN const req_path_info_t* const p_path, + OUT ib_path_rec_t* const p_path_rec ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_req ); + CL_ASSERT( p_path ); + CL_ASSERT( p_path_rec ); + + /* + * Format a local path record. The local ack timeout specified in the + * REQ is twice the packet life plus the sender's CA ACK delay. When + * reporting the packet life, we divide the local ack timeout by 2 to + * approach the path's packet lifetime. Since local ack timeout is + * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the + * time in half. + */ + ib_path_rec_init_local( p_path_rec, + &p_path->local_gid, + &p_path->remote_gid, + p_path->local_lid, + p_path->remote_lid, + 1, p_req->pkey, + conn_req_path_get_svc_lvl( p_path ), + IB_PATH_SELECTOR_EXACTLY, conn_req_get_mtu( p_req ), + IB_PATH_SELECTOR_EXACTLY, + conn_req_path_get_pkt_rate( p_path ), + IB_PATH_SELECTOR_EXACTLY, + (uint8_t)( conn_req_path_get_lcl_ack_timeout( p_path ) - 1 ), + 0 ); + + p_path_rec->hop_flow_raw.val = 0; + /* Add global routing info as necessary. */ + if( !conn_req_path_get_subn_lcl( p_path ) ) + { + ib_path_rec_set_hop_flow_raw( p_path_rec, p_path->hop_limit, + conn_req_path_get_flow_lbl( p_path ), FALSE ); + p_path_rec->tclass = p_path->traffic_class; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__format_req_rec( + IN const mad_cm_req_t* const p_req, + OUT ib_cm_req_rec_t *p_req_rec ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_req ); + CL_ASSERT( p_req_rec ); + + cl_memclr( p_req_rec, sizeof(ib_cm_req_rec_t) ); + + /* format version specific data */ + p_req_rec->p_req_pdata = p_req->pdata; + + p_req_rec->qp_type = conn_req_get_qp_type( p_req ); + + p_req_rec->resp_res = conn_req_get_resp_res( p_req ); + p_req_rec->flow_ctrl = conn_req_get_flow_ctrl( p_req ); + p_req_rec->rnr_retry_cnt = conn_req_get_rnr_retry_cnt( p_req ); + + __format_req_path_rec( p_req, &p_req->primary_path, + &p_req_rec->primary_path ); + __format_req_path_rec( p_req, &p_req->alternate_path, + &p_req_rec->alt_path ); + + /* These values are filled in later based on listen or peer connections + p_req_rec->context = ; + p_req_rec->h_cm_req = ; + p_req_rec->h_cm_listen = ; + */ + + AL_EXIT( AL_DBG_CM ); +} + + +/****************************************************************************** +* Functions that handle incoming REQs that matched to an outstanding listen. +* +*/ + + +static void +__listen_req( + IN al_listen_t* const p_listen, + IN const net32_t new_cid, + IN const mad_cm_req_t* const p_req ) +{ + ib_cm_req_rec_t req_rec; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_listen ); + CL_ASSERT( new_cid != AL_INVALID_CID ); + CL_ASSERT( p_req ); + + /* Format the callback record. */ + __format_req_rec( p_req, &req_rec ); + + /* update listen based rec */ + req_rec.context = p_listen->obj.context; + + req_rec.h_cm_req.cid = new_cid; + req_rec.h_cm_req.h_al = p_listen->obj.h_al; + req_rec.h_cm_req.h_qp = NULL; + + req_rec.h_cm_listen = p_listen; + + /* Invoke the user's callback. */ + p_listen->pfn_cm_req_cb( &req_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_listen( + IN al_listen_t* const p_listen, + IN const net32_t new_cid, + IN const ib_mad_t* const p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + /* Context is a listen - MAD must be a REQ or SIDR REQ */ + switch( p_mad->attr_id ) + { + case CM_REQ_ATTR_ID: + __listen_req( + p_listen, new_cid, (mad_cm_req_t*)p_mad ); + break; + + case CM_SIDR_REQ_ATTR_ID: + /* TODO - implement SIDR. */ + default: + CL_ASSERT( p_mad->attr_id == CM_REQ_ATTR_ID || + p_mad->attr_id == CM_SIDR_REQ_ATTR_ID ); + /* Destroy the new CEP as it won't ever be reported to the user. */ + al_destroy_cep( p_listen->obj.h_al, new_cid, NULL ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +/****************************************************************************** +* Functions that handle send timeouts: +* +*/ + +/* + * callback to process a connection establishment timeout due to reply not + * being received. The connection object has a reference + * taken when the timer is set or when the send is sent. + */ +static void +__proc_conn_timeout( + IN const ib_qp_handle_t h_qp ) +{ + ib_cm_rej_rec_t rej_rec; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_qp ); + + /* + * Format the reject record before aborting the connection since + * we need the QP context. + */ + cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) ); + rej_rec.h_qp = h_qp; + rej_rec.qp_context = h_qp->obj.context; + rej_rec.rej_status = IB_REJ_TIMEOUT; + + ref_al_obj( &h_qp->obj ); + + /* Unbind the QP from the CEP. */ + __cep_timewait_qp( h_qp ); + + cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID ); + + /* Invoke the callback. */ + ((al_conn_qp_t*)h_qp)->pfn_cm_rej_cb( &rej_rec ); + + if( cid == AL_INVALID_CID || + al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_qp->obj ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * callback to process a LAP timeout due to APR not being received. + */ +static void +__proc_lap_timeout( + IN const ib_qp_handle_t h_qp ) +{ + ib_cm_apr_rec_t apr_rec; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_qp ); + + /* Report the timeout. */ + cl_memclr( &apr_rec, sizeof(ib_cm_apr_rec_t) ); + apr_rec.h_qp = h_qp; + apr_rec.qp_context = h_qp->obj.context; + apr_rec.cm_status = IB_TIMEOUT; + apr_rec.apr_status = IB_AP_REJECT; + + /* Notify the user that the LAP failed. */ + ((al_conn_qp_t*)h_qp)->pfn_cm_apr_cb( &apr_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Callback to process a disconnection timeout due to not receiving the DREP + * within allowable time. + */ +static void +__proc_dconn_timeout( + IN const ib_qp_handle_t h_qp ) +{ + ib_cm_drep_rec_t drep_rec; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + /* No response. We're done. Deliver a DREP callback. */ + cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) ); + drep_rec.h_qp = h_qp; + drep_rec.qp_context = h_qp->obj.context; + drep_rec.cm_status = IB_TIMEOUT; + + ref_al_obj( &h_qp->obj ); + + __cep_timewait_qp( h_qp ); + + cid = cl_atomic_xchg( &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID ); + + /* Call the user back. */ + ((al_conn_qp_t*)h_qp)->pfn_cm_drep_cb( &drep_rec ); + + if( cid == AL_INVALID_CID || + al_destroy_cep( h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_qp->obj ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_failed_send( + IN ib_qp_handle_t h_qp, + IN const ib_mad_t* const p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + /* Failure indicates a send. */ + switch( p_mad->attr_id ) + { + case CM_REQ_ATTR_ID: + case CM_REP_ATTR_ID: + __proc_conn_timeout( h_qp ); + break; + case CM_LAP_ATTR_ID: + __proc_lap_timeout( h_qp ); + break; + case CM_DREQ_ATTR_ID: + __proc_dconn_timeout( h_qp ); + break; + default: + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid CM send MAD attribute ID %d.\n", p_mad->attr_id) ); + break; + } + + AL_EXIT( AL_DBG_CM ); +} + + +/****************************************************************************** +* Functions that handle received MADs on a connection (not listen) +* +*/ + + +void +__proc_peer_req( + IN const ib_cm_handle_t* const p_cm, + IN const mad_cm_req_t* const p_req ) +{ + ib_cm_req_rec_t req_rec; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cm ); + CL_ASSERT( p_cm->h_qp ); + /* Must be peer-to-peer. */ + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb ); + CL_ASSERT( p_req ); + + /* Format the callback record. */ + __format_req_rec( p_req, &req_rec ); + + /* update peer based rec handles and context values */ + req_rec.context = p_cm->h_qp->obj.context; + req_rec.h_cm_req = *p_cm; + req_rec.h_cm_listen = NULL; + + /* Invoke the user's callback. User must call ib_cm_rep or ib_cm_rej. */ + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_req_cb( &req_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +void +__proc_mra( + IN const ib_cm_handle_t* const p_cm, + IN const mad_cm_mra_t* const p_mra ) +{ + ib_cm_mra_rec_t mra_rec; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cm->h_qp ); + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb ); + + /* Format the MRA callback record. */ + cl_memclr( &mra_rec, sizeof(ib_cm_mra_rec_t) ); + + mra_rec.h_qp = p_cm->h_qp; + mra_rec.qp_context = p_cm->h_qp->obj.context; + mra_rec.p_mra_pdata = p_mra->pdata; + + /* + * Call the user back. Note that users will get a callback only + * for the first MRA received in response to a REQ, REP, or LAP. + */ + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_mra_cb( &mra_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +void +__proc_rej( + IN const ib_cm_handle_t* const p_cm, + IN const mad_cm_rej_t* const p_rej ) +{ + ib_cm_rej_rec_t rej_rec; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( p_cm->h_qp ) + { + /* Format the REJ callback record. */ + cl_memclr( &rej_rec, sizeof(ib_cm_rej_rec_t) ); + + rej_rec.h_qp = p_cm->h_qp; + rej_rec.qp_context = p_cm->h_qp->obj.context; + + rej_rec.p_rej_pdata = p_rej->pdata; + rej_rec.p_ari = p_rej->ari; + rej_rec.ari_length = conn_rej_get_ari_len( p_rej ); + rej_rec.rej_status = p_rej->reason; + + ref_al_obj( &p_cm->h_qp->obj ); + + /* + * Unbind the QP from the connection object. This allows the QP to + * be immediately reused in another connection request. + */ + __cep_timewait_qp( p_cm->h_qp ); + + cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID ); + CL_ASSERT( cid == p_cm->cid || cid == AL_INVALID_CID ); + if( cid == AL_INVALID_CID || + al_destroy_cep( p_cm->h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &p_cm->h_qp->obj ); + } + + /* Call the user back. */ + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rej_cb( &rej_rec ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_rep( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_rep_t* const p_rep ) +{ + ib_cm_rep_rec_t rep_rec; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr( &rep_rec, sizeof(ib_cm_rep_rec_t) ); + + /* fill the rec callback data */ + rep_rec.p_rep_pdata = p_rep->pdata; + rep_rec.qp_type = p_cm->h_qp->type; + + rep_rec.h_cm_rep = *p_cm; + rep_rec.qp_context = p_cm->h_qp->obj.context; + rep_rec.resp_res = p_rep->resp_resources; + rep_rec.flow_ctrl = conn_rep_get_e2e_flow_ctl( p_rep ); + rep_rec.apr_status = conn_rep_get_failover( p_rep ); + + /* Notify the user of the reply. */ + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rep_cb( &rep_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_rtu( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_rtu_t* const p_rtu ) +{ + ib_cm_rtu_rec_t rtu_rec; + + AL_ENTER( AL_DBG_CM ); + + rtu_rec.p_rtu_pdata = p_rtu->pdata; + rtu_rec.h_qp = p_cm->h_qp; + rtu_rec.qp_context = p_cm->h_qp->obj.context; + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_rtu_cb( &rtu_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_dreq( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_dreq_t* const p_dreq ) +{ + ib_cm_dreq_rec_t dreq_rec; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr( &dreq_rec, sizeof(ib_cm_dreq_rec_t) ); + + dreq_rec.h_cm_dreq = *p_cm; + dreq_rec.p_dreq_pdata = p_dreq->pdata; + + dreq_rec.qp_context = p_cm->h_qp->obj.context; + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_dreq_cb( &dreq_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +void +__proc_drep( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_drep_t* const p_drep ) +{ + ib_cm_drep_rec_t drep_rec; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr( &drep_rec, sizeof(ib_cm_drep_rec_t) ); + + /* Copy qp context before the connection is released */ + drep_rec.cm_status = IB_SUCCESS; + drep_rec.p_drep_pdata = p_drep->pdata; + drep_rec.h_qp = p_cm->h_qp; + drep_rec.qp_context = p_cm->h_qp->obj.context; + + ref_al_obj( &p_cm->h_qp->obj ); + + __cep_timewait_qp( p_cm->h_qp ); + + cid = cl_atomic_xchg( &((al_conn_qp_t*)p_cm->h_qp)->cid, AL_INVALID_CID ); + if( cid != AL_INVALID_CID ) + { + CL_ASSERT( cid == p_cm->cid ); + + if( al_destroy_cep( + p_cm->h_al, p_cm->cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &p_cm->h_qp->obj ); + } + } + else + { + deref_al_obj( &p_cm->h_qp->obj ); + } + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_drep_cb( &drep_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +void +__proc_lap( + IN ib_cm_handle_t* const p_cm, + IN const mad_cm_lap_t* const p_lap ) +{ + ib_cm_lap_rec_t lap_rec; + const lap_path_info_t* const p_path = &p_lap->alternate_path; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cm ); + CL_ASSERT( p_cm->h_qp ); + CL_ASSERT( p_lap ); + + cl_memclr( &lap_rec, sizeof(ib_cm_lap_rec_t) ); + lap_rec.qp_context = p_cm->h_qp->obj.context; + lap_rec.h_cm_lap = *p_cm; + + /* + * Format the path record. The local ack timeout specified in the + * LAP is twice the packet life plus the sender's CA ACK delay. When + * reporting the packet life, we divide the local ack timeout by 2 to + * approach the path's packet lifetime. Since local ack timeout is + * expressed as 4.096 * 2^x, subtracting 1 is equivalent to dividing the + * time in half. + */ + ib_path_rec_init_local( &lap_rec.alt_path, + &p_lap->alternate_path.local_gid, + &p_lap->alternate_path.remote_gid, + p_lap->alternate_path.local_lid, + p_lap->alternate_path.remote_lid, + 1, IB_DEFAULT_PKEY, + conn_lap_path_get_svc_lvl( &p_lap->alternate_path ), + IB_PATH_SELECTOR_EXACTLY, + IB_MTU_LEN_2048, + IB_PATH_SELECTOR_EXACTLY, + conn_lap_path_get_pkt_rate( p_path ), + IB_PATH_SELECTOR_EXACTLY, + (uint8_t)( conn_lap_path_get_lcl_ack_timeout( p_path ) - 1 ), + 0 ); + + lap_rec.alt_path.hop_flow_raw.val = 0; + /* Add global routing info as necessary. */ + if( !conn_lap_path_get_subn_lcl( &p_lap->alternate_path ) ) + { + ib_path_rec_set_hop_flow_raw( &lap_rec.alt_path, + p_lap->alternate_path.hop_limit, + conn_lap_path_get_flow_lbl( &p_lap->alternate_path ), + FALSE ); + lap_rec.alt_path.tclass = + conn_lap_path_get_tclass( &p_lap->alternate_path ); + } + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_lap_cb( &lap_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static ib_api_status_t +__cep_lap_qp( + IN ib_cm_handle_t* const p_cm ) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + status = al_cep_get_rts_attr( p_cm->h_al, p_cm->cid, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_get_rts_attr returned %s.\n", ib_get_err_str(status)) ); + goto done; + } + + status = ib_modify_qp( p_cm->h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_modify_qp for LAP returned %s.\n", ib_get_err_str(status)) ); + } + +done: + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__proc_apr( + IN ib_cm_handle_t* const p_cm, + IN mad_cm_apr_t* const p_apr ) +{ + ib_cm_apr_rec_t apr_rec; + + AL_ENTER( AL_DBG_CM ); + + apr_rec.h_qp = p_cm->h_qp; + apr_rec.qp_context = p_cm->h_qp->obj.context; + apr_rec.p_info = (const uint8_t*)&p_apr->info; + apr_rec.info_length = p_apr->info_len; + apr_rec.p_apr_pdata = p_apr->pdata; + apr_rec.apr_status = p_apr->status; + + if( apr_rec.apr_status == IB_AP_SUCCESS ) + { + apr_rec.cm_status = __cep_lap_qp( p_cm ); + } + else + { + apr_rec.cm_status = IB_ERROR; + } + + ((al_conn_qp_t*)p_cm->h_qp)->pfn_cm_apr_cb( &apr_rec ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__proc_conn( + IN ib_cm_handle_t* const p_cm, + IN ib_mad_t* const p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + /* Success indicates a receive. */ + switch( p_mad->attr_id ) + { + case CM_REQ_ATTR_ID: + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->cid == (int32_t)p_cm->cid || + ((al_conn_qp_t*)p_cm->h_qp)->cid == AL_INVALID_CID ); + __proc_peer_req( p_cm, (mad_cm_req_t*)p_mad ); + break; + + case CM_MRA_ATTR_ID: + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->cid == (int32_t)p_cm->cid || + ((al_conn_qp_t*)p_cm->h_qp)->cid == AL_INVALID_CID ); + __proc_mra( p_cm, (mad_cm_mra_t*)p_mad ); + break; + + case CM_REJ_ATTR_ID: + __proc_rej( p_cm, (mad_cm_rej_t*)p_mad ); + break; + + case CM_REP_ATTR_ID: + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->cid == (int32_t)p_cm->cid || + ((al_conn_qp_t*)p_cm->h_qp)->cid == AL_INVALID_CID ); + __proc_rep( p_cm, (mad_cm_rep_t*)p_mad ); + break; + + case CM_RTU_ATTR_ID: + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->cid == (int32_t)p_cm->cid || + ((al_conn_qp_t*)p_cm->h_qp)->cid == AL_INVALID_CID ); + __proc_rtu( p_cm, (mad_cm_rtu_t*)p_mad ); + break; + + case CM_DREQ_ATTR_ID: + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->cid == (int32_t)p_cm->cid || + ((al_conn_qp_t*)p_cm->h_qp)->cid == AL_INVALID_CID ); + __proc_dreq( p_cm, (mad_cm_dreq_t*)p_mad ); + break; + + case CM_DREP_ATTR_ID: + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->cid == (int32_t)p_cm->cid || + ((al_conn_qp_t*)p_cm->h_qp)->cid == AL_INVALID_CID ); + __proc_drep( p_cm, (mad_cm_drep_t*)p_mad ); + break; + + case CM_LAP_ATTR_ID: + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->cid == (int32_t)p_cm->cid || + ((al_conn_qp_t*)p_cm->h_qp)->cid == AL_INVALID_CID ); + __proc_lap( p_cm, (mad_cm_lap_t*)p_mad ); + break; + + case CM_APR_ATTR_ID: + CL_ASSERT( ((al_conn_qp_t*)p_cm->h_qp)->cid == (int32_t)p_cm->cid || + ((al_conn_qp_t*)p_cm->h_qp)->cid == AL_INVALID_CID ); + __proc_apr( p_cm, (mad_cm_apr_t*)p_mad ); + break; + + //case CM_SIDR_REQ_ATTR_ID: + // p_async_mad->item.pfn_callback = __process_cm_sidr_req; + // break; + + //case CM_SIDR_REP_ATTR_ID: + // p_async_mad->item.pfn_callback = __process_cm_sidr_rep; + // break; + + default: + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid CM recv MAD attribute ID %d.\n", p_mad->attr_id) ); + } + + AL_EXIT( AL_DBG_CM ); +} + +/****************************************************************************** +* CEP callback handler. +* +*/ + +#ifdef CL_KERNEL +static void +__process_cep_cb( +#else +static void +__cm_handler( +#endif + IN const ib_al_handle_t h_al, + IN const net32_t cid ) +{ + ib_api_status_t status; + void* __ptr64 context; + net32_t new_cid; + ib_mad_element_t *p_mad; + ib_cm_handle_t h_cm; + + AL_ENTER( AL_DBG_CM ); + + for( status = al_cep_poll( h_al, cid, &context, &new_cid, &p_mad ); + status == IB_SUCCESS; + status = al_cep_poll( h_al, cid, &context, &new_cid, &p_mad ) ) + { + /* Something to do - WOOT!!! */ + if( new_cid != AL_INVALID_CID ) + { + __proc_listen( (al_listen_t* __ptr64)context, + new_cid, ib_get_mad_buf( p_mad ) ); + } + else if( p_mad->status != IB_SUCCESS ) + { + /* Context is a QP handle, and a sent MAD timed out. */ + __proc_failed_send( + (ib_qp_handle_t)context, ib_get_mad_buf( p_mad ) ); + } + else + { + h_cm.h_al = h_al; + h_cm.cid = cid; + h_cm.h_qp = (ib_qp_handle_t)context; + __proc_conn( &h_cm, ib_get_mad_buf( p_mad ) ); + } + ib_put_mad( p_mad ); + } +} + + +#ifdef CL_KERNEL + +static void +__process_cep_async( + IN cl_async_proc_item_t *p_item ) +{ + cep_async_mad_t *p_async_mad; + + AL_ENTER( AL_DBG_CM ); + + p_async_mad = PARENT_STRUCT( p_item, cep_async_mad_t, item ); + + __process_cep_cb( p_async_mad->h_al, p_async_mad->cid ); + + cl_free( p_async_mad ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * The handler is invoked at DISPATCH_LEVEL in kernel mode. We need to switch + * to a passive level thread context to perform QP modify and invoke user + * callbacks. + */ +static void +__cm_handler( + IN const ib_al_handle_t h_al, + IN const net32_t cid ) +{ + cep_async_mad_t *p_async_mad; + + AL_ENTER( AL_DBG_CM ); + + p_async_mad = (cep_async_mad_t*)cl_zalloc( sizeof(cep_async_mad_t) ); + if( !p_async_mad ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("failed to cl_zalloc cm_async_mad_t (%d bytes)\n", + sizeof(cep_async_mad_t)) ); + return; + } + + p_async_mad->h_al = h_al; + p_async_mad->cid = cid; + p_async_mad->item.pfn_callback = __process_cep_async; + + /* Queue the MAD for asynchronous processing. */ + cl_async_proc_queue( gp_async_proc_mgr, &p_async_mad->item ); + + AL_EXIT( AL_DBG_CM ); +} +#endif /* CL_KERNEL */ + + +/* + * Transition the QP to the INIT state, if it is not already in the + * INIT state. + */ +ib_api_status_t +__cep_init_qp( + IN const ib_qp_handle_t h_qp, + IN ib_qp_mod_t* const p_init ) +{ + ib_qp_mod_t qp_mod; + ib_api_status_t status; + + /* + * Move to the init state to allow posting of receive buffers. + * Chech the current state of the QP. The user may have already + * transitioned it and posted some receives to the QP, so we + * should not reset the QP if it is already in the INIT state. + */ + if( h_qp->state != IB_QPS_INIT ) + { + /* Reset the QP. */ + cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) ); + qp_mod.req_state = IB_QPS_RESET; + + status = ib_modify_qp( h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_modify_qp to IB_QPS_RESET returned %s\n", + ib_get_err_str(status) ) ); + } + + /* Initialize the QP. */ + status = ib_modify_qp( h_qp, p_init ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_modify_qp returned %s.\n", ib_get_err_str(status) ) ); + return status; + } + } + + return IB_SUCCESS; +} + +static ib_api_status_t +__cep_pre_req( + IN const ib_cm_req_t* const p_cm_req ) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + status = al_cep_pre_req( qp_get_al( p_cm_req->h_qp ), + ((al_conn_qp_t*)p_cm_req->h_qp)->cid, p_cm_req, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_pre_req returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + /* Transition QP through state machine */ + /* + * Warning! Using all access rights. We need to modify + * the ib_cm_req_t to include this. + */ + qp_mod.state.init.access_ctrl |= + IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_ATOMIC; + status = __cep_init_qp( p_cm_req->h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("__cep_init_qp returned %s\n", ib_get_err_str(status)) ); + return status; + } + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +static ib_api_status_t +__cep_conn_req( + IN const ib_al_handle_t h_al, + IN const ib_cm_req_t* const p_cm_req ) +{ + ib_api_status_t status; + //cl_status_t cl_status; + //cl_event_t sync_event; + //cl_event_t *p_sync_event = NULL; + al_conn_qp_t *p_qp; + net32_t cid, old_cid; + + AL_ENTER( AL_DBG_CM ); + + /* event based mechanism */ + if( p_cm_req->flags & IB_FLAGS_SYNC ) + { + AL_EXIT( AL_DBG_CM ); + return IB_UNSUPPORTED; + //cl_event_construct( &sync_event ); + //cl_status = cl_event_init( &sync_event, FALSE ); + //if( cl_status != CL_SUCCESS ) + //{ + // __deref_conn( p_conn ); + // return ib_convert_cl_status( cl_status ); + //} + //p_conn->p_sync_event = p_sync_event = &sync_event; + } + + p_qp = (al_conn_qp_t*)p_cm_req->h_qp; + + /* Get a CEP and bind it to the QP. */ + status = al_create_cep( h_al, __cm_handler, p_cm_req->h_qp, &cid ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_create_cep returned %s.\n", ib_get_err_str( status )) ); + goto done; + } + + /* See if this QP has already been connected. */ + old_cid = cl_atomic_comp_xchg( &p_qp->cid, AL_INVALID_CID, cid ); + if( old_cid != AL_INVALID_CID ) + { + al_destroy_cep( h_al, cid, NULL ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_STATE; + } + + status = __cep_pre_req( p_cm_req ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("__cep_pre_req returned %s.\n", ib_get_err_str( status )) ); + goto err; + } + + /* Store callback pointers. */ + p_qp->pfn_cm_req_cb = p_cm_req->pfn_cm_req_cb; + p_qp->pfn_cm_rep_cb = p_cm_req->pfn_cm_rep_cb; + p_qp->pfn_cm_mra_cb = p_cm_req->pfn_cm_mra_cb; + p_qp->pfn_cm_rej_cb = p_cm_req->pfn_cm_rej_cb; + + /* Send the REQ. */ + status = al_cep_send_req( h_al, p_qp->cid ); + if( status != IB_SUCCESS ) + { + //if( p_sync_event ) + // cl_event_destroy( p_sync_event ); + + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_send_req returned %s.\n", ib_get_err_str(status)) ); +err: + ref_al_obj( &p_qp->qp.obj ); + cl_atomic_xchg( &p_qp->cid, AL_INVALID_CID ); + if( al_destroy_cep( h_al, cid, deref_al_obj ) != IB_SUCCESS ) + deref_al_obj( &p_qp->qp.obj ); + } + + /* wait on event if synchronous operation */ + //if( p_sync_event ) + //{ + // AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + // ("event blocked on REQ...\n") ); + // cl_event_wait_on( p_sync_event, EVENT_NO_TIMEOUT, FALSE ); + + // cl_event_destroy( p_sync_event ); + //} + +done: + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_req( + IN const ib_cm_req_t* const p_cm_req ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_req ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + switch( p_cm_req->qp_type ) + { + default: + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid qp_type.\n") ); + return IB_INVALID_SETTING; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_req->h_qp->type != p_cm_req->qp_type) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + + status = __cep_conn_req( qp_get_al( p_cm_req->h_qp ), p_cm_req ); + break; + + case IB_QPT_UNRELIABLE_DGRM: + if( AL_OBJ_INVALID_HANDLE( p_cm_req->h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + status = IB_UNSUPPORTED; +// status = cm_sidr_req( p_cm_req->h_al, p_cm_req ); + break; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +/* + * Note: we pass in the QP handle separately because it comes form different + * sources. It comes from the ib_cm_rep_t structure in the ib_cm_rep path, and + * from the ib_cm_handle_t structure in the ib_cm_rtu path. + */ +static ib_api_status_t +__cep_rts_qp( + IN const ib_cm_handle_t h_cm, + IN const ib_qp_handle_t h_qp, + IN const ib_access_t access_ctrl, + IN const uint32_t sq_depth, + IN const uint32_t rq_depth ) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + /* Set the QP to RTR. */ + status = al_cep_get_rtr_attr( h_cm.h_al, h_cm.cid, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_get_rtr_attr returned %s\n", ib_get_err_str( status )) ); + return status; + } + + if( access_ctrl ) + { + qp_mod.state.rtr.access_ctrl = access_ctrl; + qp_mod.state.rtr.opts |= IB_MOD_QP_ACCESS_CTRL; + } + + if( sq_depth ) + { + qp_mod.state.rtr.sq_depth = sq_depth; + qp_mod.state.rtr.opts |= IB_MOD_QP_SQ_DEPTH; + } + + if( rq_depth ) + { + qp_mod.state.rtr.rq_depth = rq_depth; + qp_mod.state.rtr.opts |= IB_MOD_QP_RQ_DEPTH; + } + + status = ib_modify_qp( h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_modify_qp to RTR returned %s.\n", ib_get_err_str(status) ) ); + return status; + } + + /* Set the QP to RTS. */ + status = al_cep_get_rts_attr( h_cm.h_al, h_cm.cid, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_get_rts_attr returned %s\n", ib_get_err_str( status )) ); + return status; + } + + status = ib_modify_qp( h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_modify_qp to RTS returned %s.\n", ib_get_err_str(status) ) ); + return status; + } + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +static ib_api_status_t +__cep_pre_rep( + IN const ib_cm_handle_t h_cm, + IN ib_qp_mod_t* const p_qp_mod, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + /* Transition the QP to the INIT state. */ + p_qp_mod->state.init.access_ctrl = p_cm_rep->access_ctrl; + status = __cep_init_qp( p_cm_rep->h_qp, p_qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cm_init_qp returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Prepost receives. */ + if( p_cm_rep->p_recv_wr ) + { + status = ib_post_recv( p_cm_rep->h_qp, p_cm_rep->p_recv_wr, + (ib_recv_wr_t** __ptr64)p_cm_rep->pp_recv_failure ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_post_recv returned %s.\n", ib_get_err_str(status)) ); + return status; + } + } + + /* Transition the QP to the RTR and RTS states. */ + status = __cep_rts_qp( h_cm, p_cm_rep->h_qp, + p_cm_rep->access_ctrl, p_cm_rep->sq_depth, p_cm_rep->rq_depth ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("__cep_rts_qp returned %s.\n", ib_get_err_str(status)) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__cep_conn_rep( + IN ib_cm_handle_t h_cm, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + ib_api_status_t status; + net32_t cid; + ib_qp_mod_t qp_mod; + boolean_t qp_linked = FALSE; + + AL_ENTER( AL_DBG_CM ); + + cid = cl_atomic_comp_xchg( + &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID, h_cm.cid ); + + if( cid != AL_INVALID_CID ) + { + /* We don't destroy the CEP to allow the user to retry accepting. */ + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("QP already connected.\n") ); + return IB_INVALID_QP_HANDLE; + } + + /* Store the CM callbacks. */ + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rej_cb = p_cm_rep->pfn_cm_rej_cb; + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_mra_cb = p_cm_rep->pfn_cm_mra_cb; + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_rtu_cb = p_cm_rep->pfn_cm_rtu_cb; + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_lap_cb = p_cm_rep->pfn_cm_lap_cb; + ((al_conn_qp_t*)p_cm_rep->h_qp)->pfn_cm_dreq_cb = p_cm_rep->pfn_cm_dreq_cb; + + status = al_cep_pre_rep( + h_cm.h_al, h_cm.cid, p_cm_rep->h_qp, p_cm_rep, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_pre_rep returned %s.\n", ib_get_err_str( status )) ); + goto err; + } + + /* The QP has been set as the context for the CEP. */ + qp_linked = TRUE; + + /* Transition QP through state machine */ + status = __cep_pre_rep( h_cm, &qp_mod, p_cm_rep ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("__cep_pre_req returned %s\n", ib_get_err_str(status)) ); + goto err; + } + + status = al_cep_send_rep( h_cm.h_al, h_cm.cid ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_send_rep returned %s\n", ib_get_err_str(status)) ); +err: + cl_atomic_xchg( + &((al_conn_qp_t*)p_cm_rep->h_qp)->cid, AL_INVALID_CID ); + + /* Reject and abort the connection. */ + al_cep_rej( h_cm.h_al, h_cm.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 ); + + if( qp_linked ) + { + ref_al_obj( &p_cm_rep->h_qp->obj ); + + if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS ) + deref_al_obj( &p_cm_rep->h_qp->obj ); + } + else + { + al_destroy_cep( h_cm.h_al, h_cm.cid, NULL ); + } + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_rep( + IN const ib_cm_handle_t h_cm_req, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_rep ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + status = IB_SUCCESS; + switch( p_cm_rep->qp_type ) + { + default: + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid qp_type.\n") ); + status = IB_INVALID_SETTING; + break; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_rep->h_qp->type != p_cm_rep->qp_type) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + status = IB_INVALID_QP_HANDLE; + } + else if( p_cm_rep->h_qp->obj.h_al != h_cm_req.h_al ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + status = IB_INVALID_QP_HANDLE; + } + break; + + case IB_QPT_UNRELIABLE_DGRM: + if( ( p_cm_rep->status == IB_SIDR_SUCCESS ) && + (AL_OBJ_INVALID_HANDLE( p_cm_rep->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_rep->h_qp->type != p_cm_rep->qp_type) ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + status = IB_INVALID_QP_HANDLE; + } + break; + } + + if( status != IB_SUCCESS ) + { + al_cep_rej( + h_cm_req.h_al, h_cm_req.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 ); + al_destroy_cep( h_cm_req.h_al, h_cm_req.cid, NULL ); + + AL_EXIT( AL_DBG_CM ); + return status; + } + + if( p_cm_rep->qp_type == IB_QPT_UNRELIABLE_DGRM ) + status = IB_UNSUPPORTED;//status = cm_sidr_rep( p_conn, p_cm_rep ); + else + status = __cep_conn_rep( h_cm_req, p_cm_rep ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_rtu( + IN const ib_cm_handle_t h_cm_rep, + IN const ib_cm_rtu_t* const p_cm_rtu ) +{ + ib_api_status_t status; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_rtu ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + ///* + // * Call invalid if event is still processed. + // * User may have called rtu in rep callback. + // */ + //if( p_conn->p_sync_event ) + //{ + // AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + // ("Connection in invalid state. Sync call in progress.\n" ) ); + + // cm_res_release( p_conn ); + // __deref_conn( p_conn ); + // return IB_INVALID_STATE; + //} + ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_apr_cb = p_cm_rtu->pfn_cm_apr_cb; + ((al_conn_qp_t*)h_cm_rep.h_qp)->pfn_cm_dreq_cb = p_cm_rtu->pfn_cm_dreq_cb; + + /* Transition QP through state machine */ + status = __cep_rts_qp( h_cm_rep, h_cm_rep.h_qp, + p_cm_rtu->access_ctrl, p_cm_rtu->sq_depth, p_cm_rtu->rq_depth ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("__cep_rts_qp returned %s.\n", ib_get_err_str( status )) ); + goto err; + } + + status = al_cep_rtu( h_cm_rep.h_al, h_cm_rep.cid, + p_cm_rtu->p_rtu_pdata, p_cm_rtu->rtu_length ); + if( status != IB_SUCCESS && status != IB_INVALID_STATE ) + { +err: + /* Reject and abort the connection. */ + al_cep_rej( + h_cm_rep.h_al, h_cm_rep.cid, IB_REJ_INSUF_QP, NULL, 0, NULL, 0 ); + + __cep_timewait_qp( h_cm_rep.h_qp ); + + cid = cl_atomic_xchg( + &((al_conn_qp_t*)h_cm_rep.h_qp)->cid, AL_INVALID_CID ); + + if( cid != AL_INVALID_CID ) + { + CL_ASSERT( cid == h_cm_rep.cid ); + + ref_al_obj( &h_cm_rep.h_qp->obj ); + if( al_destroy_cep( + h_cm_rep.h_al, h_cm_rep.cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_cm_rep.h_qp->obj ); + } + } + + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_rtu returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_mra( + IN const ib_cm_handle_t h_cm, + IN const ib_cm_mra_t* const p_cm_mra ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_mra ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = al_cep_mra( h_cm.h_al, h_cm.cid, p_cm_mra ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_mra returned %s\n", ib_get_err_str( status )) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_rej( + IN const ib_cm_handle_t h_cm, + IN const ib_cm_rej_t* const p_cm_rej ) +{ + ib_api_status_t status; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_rej ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = al_cep_rej( h_cm.h_al, h_cm.cid, p_cm_rej->rej_status, + p_cm_rej->p_ari->data, p_cm_rej->ari_length, + p_cm_rej->p_rej_pdata, p_cm_rej->rej_length ); + + if( h_cm.h_qp ) + { + __cep_timewait_qp( h_cm.h_qp ); + + cid = cl_atomic_xchg( + &((al_conn_qp_t*)h_cm.h_qp)->cid, AL_INVALID_CID ); + if( cid != AL_INVALID_CID ) + { + ref_al_obj( &h_cm.h_qp->obj ); + if( al_destroy_cep( h_cm.h_al, h_cm.cid, deref_al_obj ) != IB_SUCCESS ) + deref_al_obj( &h_cm.h_qp->obj ); + } + } + else + { + al_destroy_cep( h_cm.h_al, h_cm.cid, NULL ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_dreq( + IN const ib_cm_dreq_t* const p_cm_dreq ) +{ + ib_api_status_t status; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_dreq ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + switch( p_cm_dreq->qp_type ) + { + default: + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid qp_type.\n") ); + return IB_INVALID_SETTING; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_dreq->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_dreq->h_qp->type != p_cm_dreq->qp_type) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + break; + } + + /* Store the callback pointers. */ + ((al_conn_qp_t*)p_cm_dreq->h_qp)->pfn_cm_drep_cb = + p_cm_dreq->pfn_cm_drep_cb; + + status = al_cep_dreq( p_cm_dreq->h_qp->obj.h_al, + ((al_conn_qp_t*)p_cm_dreq->h_qp)->cid, + p_cm_dreq->p_dreq_pdata, p_cm_dreq->dreq_length ); + switch( status ) + { + case IB_INVALID_STATE: + case IB_INVALID_HANDLE: + case IB_INVALID_PARAMETER: + case IB_INVALID_SETTING: + /* Bad call - don't touch the QP. */ + break; + + case IB_SUCCESS: + /* Wait for the DREP or timeout. */ + break; + + default: + /* + * If we failed to send the DREQ, just release the connection. It's + * unreliable anyway. The local port may be down. Note that we could + * not send the DREQ, but we still could have received one. The DREQ + * will have a reference on the connection until the user calls + * ib_cm_drep. + */ + __cep_timewait_qp( p_cm_dreq->h_qp ); + + cid = cl_atomic_xchg( + &((al_conn_qp_t*)p_cm_dreq->h_qp)->cid, AL_INVALID_CID ); + ref_al_obj( &p_cm_dreq->h_qp->obj ); + if( cid == AL_INVALID_CID || al_destroy_cep( + p_cm_dreq->h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &p_cm_dreq->h_qp->obj ); + } + status = IB_SUCCESS; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + + +ib_api_status_t +ib_cm_drep( + IN const ib_cm_handle_t h_cm_dreq, + IN const ib_cm_drep_t* const p_cm_drep ) +{ + ib_api_status_t status; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_drep ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = al_cep_drep( h_cm_dreq.h_al, h_cm_dreq.cid, p_cm_drep ); + switch( status ) + { + case IB_INVALID_SETTING: + case IB_INVALID_HANDLE: + case IB_INVALID_PARAMETER: + case IB_INVALID_STATE: + /* Bad call - don't touch the QP. */ + break; + + default: + /* + * Some other out-of-resource error - continue as if we succeeded in + * sending the DREP. + */ + status = IB_SUCCESS; + /* Fall through */ + case IB_SUCCESS: + __cep_timewait_qp( h_cm_dreq.h_qp ); + + cid = cl_atomic_xchg( + &((al_conn_qp_t*)h_cm_dreq.h_qp)->cid, AL_INVALID_CID ); + if( cid != AL_INVALID_CID ) + { + CL_ASSERT( cid == h_cm_dreq.cid ); + ref_al_obj( &h_cm_dreq.h_qp->obj ); + if( al_destroy_cep( + h_cm_dreq.h_al, h_cm_dreq.cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_cm_dreq.h_qp->obj ); + } + } + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_lap( + IN const ib_cm_lap_t* const p_cm_lap ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_lap ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + switch( p_cm_lap->qp_type ) + { + default: + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid qp_type.\n") ); + return IB_INVALID_SETTING; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_lap->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_lap->h_qp->type != p_cm_lap->qp_type) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + break; + } + + status = al_cep_lap( p_cm_lap->h_qp->obj.h_al, + ((al_conn_qp_t*)p_cm_lap->h_qp)->cid, p_cm_lap ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_lap returned %s.\n", ib_get_err_str( status )) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_apr( + IN const ib_cm_handle_t h_cm_lap, + IN const ib_cm_apr_t* const p_cm_apr ) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_apr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only supported qp types allowed */ + switch( p_cm_apr->qp_type ) + { + default: + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid qp_type.\n") ); + return IB_INVALID_SETTING; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_cm_apr->h_qp, AL_OBJ_TYPE_H_QP ) || + (p_cm_apr->h_qp->type != p_cm_apr->qp_type) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + break; + } + + status = al_cep_pre_apr( h_cm_lap.h_al, h_cm_lap.cid, p_cm_apr, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_pre_apr returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + /* Load alt path into QP */ + status = ib_modify_qp( h_cm_lap.h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_modify_qp for LAP returned %s.\n", + ib_get_err_str( status )) ); + return status; + } + + status = al_cep_send_apr( h_cm_lap.h_al, h_cm_lap.cid ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_force_apm( + IN const ib_qp_handle_t h_qp ) +{ + ib_api_status_t status; + al_conn_qp_t *p_conn_qp; + ib_qp_mod_t qp_mod; + + AL_ENTER( AL_DBG_CM ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + + p_conn_qp = PARENT_STRUCT( h_qp, al_conn_qp_t, qp ); + cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) ); + qp_mod.req_state = IB_QPS_RTS; + qp_mod.state.rts.apm_state = IB_APM_MIGRATED; + qp_mod.state.rts.opts = IB_MOD_QP_APM_STATE; + + /* Set the QP to RTS. */ + status = ib_modify_qp( h_qp, &qp_mod ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__destroying_listen( + IN al_obj_t* p_obj ) +{ + ib_api_status_t status; + al_listen_t *p_listen; + + p_listen = PARENT_STRUCT( p_obj, al_listen_t, obj ); + + /* Destroy the listen's CEP. */ + status = al_destroy_cep( + p_obj->h_al, p_listen->cid, deref_al_obj ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_destroy_cep returned %s.\n", ib_get_err_str( status )) ); + deref_al_obj( p_obj ); + } +} + + + +static void +__free_listen( + IN al_obj_t* p_obj ) +{ + destroy_al_obj( p_obj ); + cl_free( PARENT_STRUCT( p_obj, al_listen_t, obj ) ); +} + + +static ib_api_status_t +__cep_listen( + IN const ib_al_handle_t h_al, + IN const ib_cm_listen_t* const p_cm_listen, + IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb, + IN const void* const listen_context, + OUT ib_listen_handle_t* const ph_cm_listen ) +{ + ib_api_status_t status; + al_listen_t *p_listen; + ib_cep_listen_t cep_listen; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( pfn_listen_err_cb ); + + /* Allocate the listen object. */ + p_listen = (al_listen_t*)cl_zalloc( sizeof(al_listen_t) ); + if( !p_listen ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Copy the listen request information for matching incoming requests. */ + p_listen->pfn_cm_req_cb = p_cm_listen->pfn_cm_req_cb; + + /* valid for ud qp_type only */ + p_listen->sidr_context = p_cm_listen->sidr_context; + + construct_al_obj( &p_listen->obj, AL_OBJ_TYPE_H_LISTEN ); + status = init_al_obj( &p_listen->obj, listen_context, TRUE, + __destroying_listen, NULL, __free_listen ); + if( status != IB_SUCCESS ) + { + __free_listen( &p_listen->obj ); + AL_EXIT( AL_DBG_CM ); + return status; + } + + /* Add the listen to the AL instance's object list. */ + status = attach_al_obj( &h_al->obj, &p_listen->obj ); + if( status != IB_SUCCESS ) + { + p_listen->obj.pfn_destroy( &p_listen->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Create a CEP to listen on. */ + status = al_create_cep( h_al, __cm_handler, p_listen, &p_listen->cid ); + if( status != IB_SUCCESS ) + { + p_listen->obj.pfn_destroy( &p_listen->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_create_cep returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Take a reference on behalf of the CEP. */ + ref_al_obj( &p_listen->obj ); + + cep_listen.cmp_len = p_cm_listen->compare_length; + cep_listen.cmp_offset = p_cm_listen->compare_offset; + cep_listen.p_cmp_buf = p_cm_listen->p_compare_buffer; + cep_listen.port_guid = p_cm_listen->port_guid; + cep_listen.svc_id = p_cm_listen->svc_id; + + status = al_cep_listen( h_al, p_listen->cid, &cep_listen ); + if( status != IB_SUCCESS ) + { + p_listen->obj.pfn_destroy( &p_listen->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_cep_listen returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + *ph_cm_listen = p_listen; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_listen->obj ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +ib_cm_listen( + IN const ib_al_handle_t h_al, + IN const ib_cm_listen_t* const p_cm_listen, + IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb, + IN const void* const listen_context, + OUT ib_listen_handle_t* const ph_cm_listen ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_cm_listen || !pfn_listen_err_cb || !ph_cm_listen ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = __cep_listen(h_al, p_cm_listen, pfn_listen_err_cb, listen_context, + ph_cm_listen ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +ib_cm_cancel( + IN const ib_listen_handle_t h_cm_listen, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_CM ); + + if( AL_OBJ_INVALID_HANDLE( h_cm_listen, AL_OBJ_TYPE_H_LISTEN ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + ref_al_obj( &h_cm_listen->obj ); + h_cm_listen->obj.pfn_destroy( &h_cm_listen->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +ib_cm_handoff( + IN const ib_cm_handle_t h_cm_req, + IN const ib_net64_t svc_id ) +{ + UNUSED_PARAM( h_cm_req ); + UNUSED_PARAM( svc_id ); + return IB_UNSUPPORTED; +} diff --git a/branches/Ndi/core/al/al_cm_sidr.h b/branches/Ndi/core/al/al_cm_sidr.h new file mode 100644 index 00000000..2d37bbbc --- /dev/null +++ b/branches/Ndi/core/al/al_cm_sidr.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__IB_AL_CM_SIDR_H__) +#define __IB_AL_CM_SIDR_H__ + + +#include +#include +#include "al_common.h" + +/* + * CM SIDR MAD definitions. + * Helper functions to handle version specific SIDR MADs + */ + + +/* SIDR REQ */ +#include + +#define CM_SIDR_REQ_ATTR_ID CL_HTON16(0x0017) +typedef struct _mad_cm_sidr_req +{ + ib_mad_t hdr; + + ib_net32_t req_id; + + ib_net16_t pkey; + ib_net16_t rsvd; + + ib_net64_t sid; + + uint8_t pdata[IB_SIDR_REQ_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_sidr_req_t; + +#include + +static inline ib_api_status_t +sidr_req_set_pdata( + IN const uint8_t* p_data OPTIONAL, + IN const uint8_t rep_length, + IN OUT mad_cm_sidr_req_t* const p_sidr_req ) +{ + if( p_data && rep_length > IB_SIDR_REQ_PDATA_SIZE ) + return IB_INVALID_SETTING; + + if( p_data ) + { + cl_memcpy( p_sidr_req->pdata, p_data, rep_length ); + cl_memclr( p_sidr_req->pdata + rep_length, + IB_SIDR_REQ_PDATA_SIZE - rep_length ); + } + else + { + cl_memclr( p_sidr_req->pdata, IB_SIDR_REQ_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +static inline void +sidr_req_clr_rsvd_fields( + IN OUT mad_cm_sidr_req_t* const p_sidr_req ) +{ + p_sidr_req->rsvd = 0; +} + + + +/* SIDR REP */ +#include + +#define CM_SIDR_REP_ATTR_ID CL_HTON16(0x0018) +typedef struct _mad_cm_sidr_rep +{ + ib_mad_t hdr; + + ib_net32_t req_id; + + uint8_t status; + uint8_t info_len; + ib_net16_t rsvd; + + /* QPN 24; rsvd 8 */ + ib_field32_t offset8; + + ib_net64_t sid; + ib_net32_t qkey; + + ib_class_port_info_t class_info; + + uint8_t pdata[IB_SIDR_REP_PDATA_SIZE]; + +} PACK_SUFFIX mad_cm_sidr_rep_t; + +#include + +static inline ib_net32_t +sidr_rep_get_qpn( + IN const mad_cm_sidr_rep_t* const p_sidr_rep ) +{ + return __get_low24( p_sidr_rep->offset8 ); +} + +static inline void +sidr_rep_set_qpn( + IN const ib_net32_t qpn, + IN OUT mad_cm_sidr_rep_t* const p_sidr_rep ) +{ + CL_ASSERT( !( cl_ntoh32( qpn ) & 0xFF000000 ) ); + __set_low24( &p_sidr_rep->offset8, qpn ); +} + +static inline ib_api_status_t +sidr_rep_set_pdata( + IN const uint8_t* p_data OPTIONAL, + IN const uint8_t rep_length, + IN OUT mad_cm_sidr_rep_t* const p_sidr_rep ) +{ + if( p_data && rep_length > IB_SIDR_REP_PDATA_SIZE ) + return IB_INVALID_SETTING; + + if( p_data ) + { + cl_memcpy( p_sidr_rep->pdata, p_data, rep_length ); + cl_memclr( p_sidr_rep->pdata + rep_length, + IB_SIDR_REP_PDATA_SIZE - rep_length ); + } + else + { + cl_memclr( p_sidr_rep->pdata, IB_SIDR_REP_PDATA_SIZE ); + } + return IB_SUCCESS; +} + +static inline void +sidr_rep_clr_rsvd_fields( + IN OUT mad_cm_sidr_rep_t* const p_sidr_rep ) +{ + p_sidr_rep->rsvd = 0; + p_sidr_rep->offset8.bytes[3] = 0; +} + +#endif /* __IB_AL_CM_SIDR_H__ */ diff --git a/branches/Ndi/core/al/al_common.c b/branches/Ndi/core/al/al_common.c new file mode 100644 index 00000000..722c32f5 --- /dev/null +++ b/branches/Ndi/core/al/al_common.c @@ -0,0 +1,688 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "al.h" +#include "al_ci_ca.h" +#include "al_common.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_common.tmh" +#endif + +#include "al_mgr.h" +#include +#include "ib_common.h" + + + +#if AL_OBJ_PRIVATE_ASYNC_PROC +cl_async_proc_t *gp_async_obj_mgr = NULL; +#endif + + +boolean_t +destroy_obj( + IN struct _al_obj *p_obj, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb ); + + +void +async_destroy_cb( + IN cl_async_proc_item_t *p_item ); + + +void +async_destroy_obj( + IN struct _al_obj *p_obj, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb ); + + +void +sync_destroy_obj( + IN struct _al_obj *p_obj, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb ); + + + +const char* ib_obj_type_str[] = +{ + "AL_OBJ_TYPE_UNKNOWN", + "AL_OBJ_TYPE_H_AL", + "AL_OBJ_TYPE_H_QP", + "AL_OBJ_TYPE_H_AV", + "AL_OBJ_TYPE_H_MR", + "AL_OBJ_TYPE_H_MW", + "AL_OBJ_TYPE_H_PD", + "AL_OBJ_TYPE_H_CA", + "AL_OBJ_TYPE_H_CQ", + "AL_OBJ_TYPE_H_CONN", + "AL_OBJ_TYPE_H_LISTEN", + "AL_OBJ_TYPE_H_IOC", + "AL_OBJ_TYPE_H_SVC_ENTRY", + "AL_OBJ_TYPE_H_PNP", + "AL_OBJ_TYPE_H_SA_REQ", + "AL_OBJ_TYPE_H_MCAST", + "AL_OBJ_TYPE_H_ATTACH", + "AL_OBJ_TYPE_H_MAD", + "AL_OBJ_TYPE_H_MAD_POOL", + "AL_OBJ_TYPE_H_POOL_KEY", + "AL_OBJ_TYPE_H_MAD_SVC", + "AL_OBJ_TYPE_CI_CA", + "AL_OBJ_TYPE_CM", + "AL_OBJ_TYPE_SMI", + "AL_OBJ_TYPE_DM", + "AL_OBJ_TYPE_IOU", + "AL_OBJ_TYPE_LOADER", + "AL_OBJ_TYPE_MAD_POOL", + "AL_OBJ_TYPE_MAD_DISP", + "AL_OBJ_TYPE_AL_MGR", + "AL_OBJ_TYPE_PNP_MGR", + "AL_OBJ_TYPE_IOC_PNP_MGR", + "AL_OBJ_TYPE_IOC_PNP_SVC", + "AL_OBJ_TYPE_QUERY_SVC", + "AL_OBJ_TYPE_MCAST_SVC", + "AL_OBJ_TYPE_SA_REQ_SVC", + "AL_OBJ_TYPE_RES_MGR", + "AL_OBJ_TYPE_H_CA_ATTR", + "AL_OBJ_TYPE_H_PNP_EVENT", + "AL_OBJ_TYPE_H_SA_REG", + "AL_OBJ_TYPE_H_FMR", + "AL_OBJ_TYPE_H_SRQ", + "AL_OBJ_TYPE_H_FMR_POOL" +}; + + +/* + * Used to force synchronous destruction of AL objects. + */ +void +__sync_destroy_cb( + IN void *context ) +{ + UNUSED_PARAM( context ); +} + + +void +construct_al_obj( + IN al_obj_t * const p_obj, + IN const al_obj_type_t obj_type ) +{ + CL_ASSERT( p_obj ); + cl_memclr( p_obj, sizeof( al_obj_t ) ); + + cl_spinlock_construct( &p_obj->lock ); + p_obj->state = CL_UNINITIALIZED; + p_obj->type = obj_type; + p_obj->timeout_ms = AL_DEFAULT_TIMEOUT_MS; + p_obj->ref_cnt = 1; + cl_event_construct( &p_obj->event ); + + /* Insert the object into the global tracking list. */ + if( p_obj != &gp_al_mgr->obj ) + { + cl_spinlock_acquire( &gp_al_mgr->lock ); + cl_qlist_insert_tail( &gp_al_mgr->al_obj_list, &p_obj->list_item ); + cl_spinlock_release( &gp_al_mgr->lock ); + ref_al_obj( &gp_al_mgr->obj ); + } +} + + + +ib_api_status_t +init_al_obj( + IN al_obj_t * const p_obj, + IN const void* const context, + IN boolean_t async_destroy, + IN const al_pfn_destroying_t pfn_destroying, + IN const al_pfn_cleanup_t pfn_cleanup, + IN const al_pfn_free_t pfn_free ) +{ + cl_status_t cl_status; + + AL_ENTER( AL_DBG_AL_OBJ ); + CL_ASSERT( p_obj && pfn_free ); + CL_ASSERT( p_obj->state == CL_UNINITIALIZED ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, + ("%016I64x\n", (LONG_PTR)p_obj ) ); + + /* Initialize the object. */ + p_obj->async_item.pfn_callback = async_destroy_cb; + p_obj->pfn_free = pfn_free; + + p_obj->context = context; + + if( async_destroy && !(p_obj->type & AL_OBJ_SUBTYPE_UM_EXPORT) ) + p_obj->pfn_destroy = async_destroy_obj; + else + p_obj->pfn_destroy = sync_destroy_obj; + + p_obj->pfn_destroying = pfn_destroying; + + p_obj->pfn_cleanup = pfn_cleanup; + p_obj->user_destroy_cb = NULL; + + cl_qlist_init( &p_obj->obj_list ); + cl_status = cl_spinlock_init( &p_obj->lock ); + if( cl_status != CL_SUCCESS ) + { + return ib_convert_cl_status( cl_status ); + } + + cl_status = cl_event_init( &p_obj->event, FALSE ); + if( cl_status != CL_SUCCESS ) + { + return ib_convert_cl_status( cl_status ); + } + + p_obj->state = CL_INITIALIZED; + + /* + * Hold an extra reference on the object until creation is complete. + * This prevents a client's destruction of the object during asynchronous + * event callback processing from deallocating the object before the + * creation is complete. + */ + ref_al_obj( p_obj ); + + AL_EXIT( AL_DBG_AL_OBJ ); + return IB_SUCCESS; +} + + +void +reset_al_obj( + IN al_obj_t * const p_obj ) +{ + CL_ASSERT( p_obj && (p_obj->ref_cnt == 0) ); + CL_ASSERT( p_obj->state == CL_DESTROYING ); + + p_obj->ref_cnt = 1; + p_obj->desc_cnt = 0; + p_obj->state = CL_INITIALIZED; + p_obj->h_al = NULL; + p_obj->hdl = AL_INVALID_HANDLE; +} + + + +void +set_al_obj_timeout( + IN al_obj_t * const p_obj, + IN const uint32_t timeout_ms ) +{ + CL_ASSERT( p_obj ); + + /* Only increase timeout values. */ + p_obj->timeout_ms = MAX( p_obj->timeout_ms, timeout_ms ); +} + + + +void +inc_al_obj_desc( + IN al_obj_t * const p_obj, + IN const uint32_t desc_cnt ) +{ + CL_ASSERT( p_obj ); + + /* Increment the number of descendants. */ + p_obj->desc_cnt += desc_cnt; +} + + + +ib_api_status_t +attach_al_obj( + IN al_obj_t * const p_parent_obj, + IN al_obj_t * const p_child_obj ) +{ + AL_ENTER( AL_DBG_AL_OBJ ); + + CL_ASSERT( p_child_obj->state == CL_INITIALIZED ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, + ("%016I64x(%s) to %016I64x(%s)\n", + (LONG_PTR)p_child_obj, ib_get_obj_type( p_child_obj ), + (LONG_PTR)p_parent_obj, ib_get_obj_type( p_parent_obj ) ) ); + + /* Insert the object into the parent's object tracking list. */ + p_child_obj->p_ci_ca = p_parent_obj->p_ci_ca; + cl_spinlock_acquire( &p_parent_obj->lock ); + if( p_parent_obj->state != CL_INITIALIZED ) + { + cl_spinlock_release( &p_parent_obj->lock ); + return IB_INVALID_STATE; + } + cl_qlist_insert_tail( &p_parent_obj->obj_list, + (cl_list_item_t*)&p_child_obj->pool_item ); + p_child_obj->p_parent_obj = p_parent_obj; + cl_spinlock_release( &p_parent_obj->lock ); + + if( p_parent_obj->h_al ) + { + if( !p_child_obj->h_al ) + { + p_child_obj->h_al = p_parent_obj->h_al; +#ifdef CL_KERNEL + p_child_obj->hdl = al_hdl_insert_obj( p_child_obj ); + if( p_child_obj->hdl == AL_INVALID_HANDLE ) + { + cl_spinlock_acquire( &p_parent_obj->lock ); + cl_qlist_remove_item( &p_parent_obj->obj_list, + (cl_list_item_t*)&p_child_obj->pool_item ); + p_child_obj->p_parent_obj = NULL; + cl_spinlock_release( &p_parent_obj->lock ); + return IB_INSUFFICIENT_MEMORY; + } +#endif + } + else + { + CL_ASSERT( p_child_obj->h_al == p_parent_obj->h_al ); + } + } + + /* Reference the parent. */ + ref_al_obj( p_parent_obj ); + AL_EXIT( AL_DBG_AL_OBJ ); + return IB_SUCCESS; +} + + + +/* + * Called to release a child object from its parent. + */ +void +detach_al_obj( + IN al_obj_t * const p_obj ) +{ + al_obj_t *p_parent_obj; + + AL_ENTER( AL_DBG_AL_OBJ ); + + p_parent_obj = p_obj->p_parent_obj; + CL_ASSERT( p_obj ); + CL_ASSERT( p_obj->state == CL_INITIALIZED || + p_obj->state == CL_DESTROYING ); + CL_ASSERT( p_parent_obj ); + CL_ASSERT( p_parent_obj->state == CL_INITIALIZED || + p_parent_obj->state == CL_DESTROYING ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, + ("%016I64x(%s) from %016I64x(%s)\n", + (LONG_PTR)p_obj, ib_get_obj_type( p_obj ), + (LONG_PTR)p_parent_obj, ib_get_obj_type( p_parent_obj ) ) ); + + /* Remove the object from the parent's list. */ + cl_spinlock_acquire( &p_parent_obj->lock ); + cl_qlist_remove_item( &p_parent_obj->obj_list, + (cl_list_item_t*)&p_obj->pool_item ); + cl_spinlock_release( &p_parent_obj->lock ); + AL_EXIT( AL_DBG_AL_OBJ ); +} + + + +/* + * Increment a reference count on an object. This object should not be + * an object's parent. + */ +int32_t +ref_al_obj( + IN al_obj_t * const p_obj ) +{ + uint32_t ref_cnt; + + AL_ENTER( AL_DBG_AL_OBJ ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, + ("%016I64x(%s)\n", (LONG_PTR)p_obj, ib_get_obj_type( p_obj ) ) ); + ref_cnt = cl_atomic_inc( &p_obj->ref_cnt ); + CL_ASSERT( ref_cnt != 1 || p_obj->type == AL_OBJ_TYPE_H_CQ ); + + AL_EXIT( AL_DBG_AL_OBJ ); + return ref_cnt; +} + + + +/* + * Decrement the reference count on an AL object. Destroy the object if + * it is no longer referenced. This object should not be an object's parent. + */ +int32_t +deref_al_obj( + IN al_obj_t * const p_obj ) +{ + int32_t ref_cnt; + + AL_ENTER( AL_DBG_AL_OBJ ); + + CL_ASSERT( p_obj ); + CL_ASSERT( p_obj->state == CL_INITIALIZED || + p_obj->state == CL_DESTROYING ); + CL_ASSERT( p_obj->ref_cnt ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, + ("%016I64x(%s)\n", (LONG_PTR)p_obj, ib_get_obj_type( p_obj ) ) ); + + ref_cnt = cl_atomic_dec( &p_obj->ref_cnt ); + + /* If the reference count went to 0, the object should be destroyed. */ + if( ref_cnt == 0 ) + { + if( p_obj->pfn_destroy == async_destroy_obj && + p_obj->user_destroy_cb != __sync_destroy_cb ) + { + /* Queue the object for asynchronous destruction. */ +#if AL_OBJ_PRIVATE_ASYNC_PROC + cl_async_proc_queue( gp_async_obj_mgr, &p_obj->async_item ); +#else + cl_async_proc_queue( gp_async_proc_mgr, &p_obj->async_item ); +#endif + } + else + { + /* Signal an event for synchronous destruction. */ + cl_event_signal( &p_obj->event ); + } + } + + AL_EXIT( AL_DBG_AL_OBJ ); + return ref_cnt; +} + + + +/* + * Called to cleanup all resources allocated by an object. + */ +void +destroy_al_obj( + IN al_obj_t * const p_obj ) +{ + AL_ENTER( AL_DBG_AL_OBJ ); + + CL_ASSERT( p_obj ); + CL_ASSERT( p_obj->state == CL_DESTROYING || + p_obj->state == CL_UNINITIALIZED ); + CL_ASSERT( cl_is_qlist_empty( &p_obj->obj_list ) ); + + /* Remove the object from the global tracking list. */ + if( p_obj != &gp_al_mgr->obj ) + { + cl_spinlock_acquire( &gp_al_mgr->lock ); + cl_qlist_remove_item( &gp_al_mgr->al_obj_list, &p_obj->list_item ); + cl_spinlock_release( &gp_al_mgr->lock ); + deref_al_obj( &gp_al_mgr->obj ); + } + + cl_event_destroy( &p_obj->event ); + cl_spinlock_destroy( &p_obj->lock ); + p_obj->state = CL_DESTROYED; + + AL_EXIT( AL_DBG_AL_OBJ ); +} + + + +void +async_destroy_obj( + IN struct _al_obj *p_obj, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb ) +{ + AL_ENTER( AL_DBG_AL_OBJ ); + + if( pfn_destroy_cb == ib_sync_destroy ) + sync_destroy_obj( p_obj, pfn_destroy_cb ); + else if( destroy_obj( p_obj, pfn_destroy_cb ) ) + deref_al_obj( p_obj ); /* Only destroy the object once. */ + + AL_EXIT( AL_DBG_AL_OBJ ); +} + + + +void +sync_destroy_obj( + IN struct _al_obj *p_obj, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb ) +{ + cl_status_t cl_status; + + AL_ENTER( AL_DBG_AL_OBJ ); + + if( !destroy_obj( p_obj, pfn_destroy_cb ) ) + { + /* Object is already being destroyed... */ + AL_EXIT( AL_DBG_AL_OBJ ); + return; + } + + if( deref_al_obj( p_obj ) ) + { + #ifdef _DEBUG_ + uint32_t wait_us; + /* + * Wait for all other references to go away. We wait as long as the + * longest child will take, plus an additional amount based on the + * number of descendants. + */ + wait_us = (p_obj->timeout_ms * 1000) + + (AL_TIMEOUT_PER_DESC_US * p_obj->desc_cnt); + wait_us = MIN( wait_us, AL_MAX_TIMEOUT_US ); + do + { + cl_status = cl_event_wait_on( + &p_obj->event, wait_us, AL_WAIT_ALERTABLE ); + } while( cl_status == CL_NOT_DONE ); + + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Error waiting for references to be released - delaying.\n") ); + print_al_obj( p_obj ); + /* + * Wait some more to handle really long timeouts by referencing + * objects that are not descendants. + */ + do + { + cl_status = cl_event_wait_on( + &p_obj->event, AL_MAX_TIMEOUT_US, AL_WAIT_ALERTABLE ); + } while( cl_status == CL_NOT_DONE ); + } + #else + do + { + cl_status = cl_event_wait_on( + &p_obj->event, EVENT_NO_TIMEOUT, AL_WAIT_ALERTABLE ); + } while( cl_status == CL_NOT_DONE ); + #endif + CL_ASSERT( cl_status == CL_SUCCESS ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Forcing object destruction.\n") ); + print_al_obj( p_obj ); + //print_tail_al_objs(); + print_al_objs( p_obj->h_al ); + p_obj->ref_cnt = 0; + } + } + async_destroy_cb( &p_obj->async_item ); + + AL_EXIT( AL_DBG_AL_OBJ ); +} + + + +boolean_t +destroy_obj( + IN struct _al_obj *p_obj, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb ) +{ + cl_list_item_t *p_list_item; + al_obj_t *p_child_obj; + + AL_ENTER( AL_DBG_AL_OBJ ); + + CL_ASSERT( p_obj ); + CL_ASSERT( p_obj->state == CL_INITIALIZED || + p_obj->state == CL_DESTROYING ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, + ("%016I64x(%s)\n", (LONG_PTR)p_obj, ib_get_obj_type( p_obj ) ) ); + + /* + * Lock to synchronize with asynchronous event processing. + * See ci_ca_async_event_cb for more information. + */ + cl_spinlock_acquire( &p_obj->lock ); + if( p_obj->state == CL_DESTROYING ) + { + cl_spinlock_release( &p_obj->lock ); + deref_al_obj( p_obj ); + AL_EXIT( AL_DBG_AL_OBJ ); + return FALSE; + } + p_obj->state = CL_DESTROYING; + cl_spinlock_release( &p_obj->lock ); + deref_al_obj( p_obj ); + + /* Notify the object that it is being destroyed. */ + if( p_obj->pfn_destroying ) + p_obj->pfn_destroying( p_obj ); + +#ifdef CL_KERNEL + /* Release this object's handle. */ + if( p_obj->hdl != AL_INVALID_HANDLE ) + { + CL_ASSERT( p_obj->h_al ); + al_hdl_free_obj( p_obj ); + } +#endif + + if( p_obj->p_parent_obj ) + detach_al_obj( p_obj ); + + /* Destroy all child resources. No need to lock during destruction. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, ("destroying children\n") ); + p_list_item = cl_qlist_tail( &p_obj->obj_list ); + while( p_list_item != cl_qlist_end( &p_obj->obj_list ) ) + { + p_child_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item ); + CL_ASSERT( p_child_obj->pfn_destroy ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, + ("bye bye: %016I64x(%s)\n", (LONG_PTR)p_child_obj, + ib_get_obj_type( p_child_obj ) ) ); + ref_al_obj( p_child_obj ); + p_child_obj->pfn_destroy( p_child_obj, NULL ); + + p_list_item = cl_qlist_tail( &p_obj->obj_list ); + } + + /* + * Update our parent's timeout value. Ours could have been increased + * when destroying one of our children's. + */ + if( p_obj->p_parent_obj ) + { + set_al_obj_timeout( p_obj->p_parent_obj, p_obj->timeout_ms ); + inc_al_obj_desc( p_obj->p_parent_obj, p_obj->desc_cnt + 1 ); + } + + if( pfn_destroy_cb == ib_sync_destroy ) + p_obj->user_destroy_cb = __sync_destroy_cb; + else + p_obj->user_destroy_cb = pfn_destroy_cb; + + AL_EXIT( AL_DBG_AL_OBJ ); + return TRUE; +} + + + +void +async_destroy_cb( + IN cl_async_proc_item_t *p_item ) +{ + al_obj_t *p_obj; + al_obj_t *p_parent_obj = NULL; + + AL_ENTER( AL_DBG_AL_OBJ ); + + CL_ASSERT( p_item ); + p_obj = PARENT_STRUCT( p_item, al_obj_t, async_item ); + CL_ASSERT( p_obj ); + CL_ASSERT( p_obj->state == CL_DESTROYING ); + CL_ASSERT( !p_obj->ref_cnt ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, + ("%016I64x\n", (LONG_PTR)p_obj ) ); + + /* Cleanup any hardware related resources. */ + if( p_obj->pfn_cleanup ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, ("cleaning up\n" ) ); + p_obj->pfn_cleanup( p_obj ); + } + + /* We can now safely dereference the parent. */ + if( p_obj->p_parent_obj ) + { + p_parent_obj = p_obj->p_parent_obj; + p_obj->p_parent_obj = NULL; + } + + /* Notify the user that we're done. */ + if( p_obj->user_destroy_cb ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, ("notifying user\n" ) ); + p_obj->user_destroy_cb( (void*)p_obj->context ); + } + + /* Free the resources associated with the object. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_AL_OBJ, ("freeing object\n" ) ); + p_obj->pfn_free( p_obj ); + + /* Dereference the parent after freeing the child. */ + if( p_parent_obj ) + deref_al_obj( p_parent_obj ); + AL_EXIT( AL_DBG_AL_OBJ ); +} diff --git a/branches/Ndi/core/al/al_common.h b/branches/Ndi/core/al/al_common.h new file mode 100644 index 00000000..84af9994 --- /dev/null +++ b/branches/Ndi/core/al/al_common.h @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_COMMON_H__) +#define __AL_COMMON_H__ + +#include + +#include +#include +#include +#include +#include +#include + + +/* Driver parameters */ +extern uint32_t g_smi_poll_interval; +extern uint32_t g_ioc_query_timeout; +extern uint32_t g_ioc_query_retries; +extern uint32_t g_ioc_poll_interval; + + +/* Wait operations performed in user-mode must be alertable. */ +#ifdef CL_KERNEL +#define AL_WAIT_ALERTABLE FALSE +#else /* CL_KERNEL */ +#define AL_WAIT_ALERTABLE TRUE +#endif /* CL_KERNEL */ + +/* + * Controls whether the al_objects use their own private + * thread pool for destruction. + */ +#define AL_OBJ_PRIVATE_ASYNC_PROC 1 + +#if AL_OBJ_PRIVATE_ASYNC_PROC +extern cl_async_proc_t *gp_async_obj_mgr; +#endif + + +/* + * Macro to verify a AL object handle. We ignore the upper byte of the type + * when making the type comparison. The upper byte specifies a subtype. + */ +#define AL_BASE_TYPE( t ) ( (t) & 0x00FFFFFF ) +#define AL_SUBTYPE( t ) ( (t) & 0xFF000000 ) + +#define AL_OBJ_BASE_TYPE( h ) ( AL_BASE_TYPE( (h)->obj.type ) ) +#define AL_OBJ_SUBTYPE( h ) ( AL_SUBTYPE( (h)->obj.type ) ) + +#define AL_OBJ_IS_TYPE( h, t ) \ + ( AL_OBJ_BASE_TYPE( h ) == AL_BASE_TYPE( t ) ) + +#define AL_OBJ_IS_SUBTYPE( h, t ) \ + ( AL_OBJ_SUBTYPE( h ) == AL_SUBTYPE( t ) ) + +#define AL_OBJ_INVALID_HANDLE( h, t ) \ + ( !(h) || !AL_OBJ_IS_TYPE( h, t ) || ((h)->obj.state != CL_INITIALIZED) ) + + +typedef struct _al_obj __p_al_obj_t; + + +/* Function used to release AL items created by the object. */ +typedef void +(*al_pfn_destroying_t)( + IN struct _al_obj *p_obj ); + + +/* Function used to cleanup any HW resources used by the object. */ +typedef void +(*al_pfn_cleanup_t)( + IN struct _al_obj *p_obj ); + + +/* Function to all resources used by the object. */ +typedef void +(*al_pfn_free_t)( + IN struct _al_obj *p_obj ); + + +/* Function invoked to release HW resources. */ +typedef void +(*al_pfn_destroy_t)( + IN struct _al_obj *p_obj, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb ); + + + +/* + * Different types of AL object's. Note that the upper byte signifies + * a subtype. + */ +#define AL_OBJ_TYPE_UNKNOWN 0 +#define AL_OBJ_TYPE_H_AL 1 +#define AL_OBJ_TYPE_H_QP 2 +#define AL_OBJ_TYPE_H_AV 3 +#define AL_OBJ_TYPE_H_MR 4 +#define AL_OBJ_TYPE_H_MW 5 +#define AL_OBJ_TYPE_H_PD 6 +#define AL_OBJ_TYPE_H_CA 7 +#define AL_OBJ_TYPE_H_CQ 8 +#define AL_OBJ_TYPE_H_CONN 9 +#define AL_OBJ_TYPE_H_LISTEN 10 +#define AL_OBJ_TYPE_H_IOC 11 +#define AL_OBJ_TYPE_H_SVC_ENTRY 12 +#define AL_OBJ_TYPE_H_PNP 13 +#define AL_OBJ_TYPE_H_SA_REQ 14 +#define AL_OBJ_TYPE_H_MCAST 15 +#define AL_OBJ_TYPE_H_ATTACH 16 +#define AL_OBJ_TYPE_H_MAD 17 +#define AL_OBJ_TYPE_H_MAD_POOL 18 +#define AL_OBJ_TYPE_H_POOL_KEY 19 +#define AL_OBJ_TYPE_H_MAD_SVC 20 +#define AL_OBJ_TYPE_CI_CA 21 +#define AL_OBJ_TYPE_CM 22 +#define AL_OBJ_TYPE_SMI 23 +#define AL_OBJ_TYPE_DM 24 +#define AL_OBJ_TYPE_IOU 25 +#define AL_OBJ_TYPE_LOADER 26 +#define AL_OBJ_TYPE_MAD_POOL 27 +#define AL_OBJ_TYPE_MAD_DISP 28 +#define AL_OBJ_TYPE_AL_MGR 29 +#define AL_OBJ_TYPE_PNP_MGR 30 +#define AL_OBJ_TYPE_IOC_PNP_MGR 31 +#define AL_OBJ_TYPE_IOC_PNP_SVC 32 +#define AL_OBJ_TYPE_QUERY_SVC 33 +#define AL_OBJ_TYPE_MCAST_SVC 34 +#define AL_OBJ_TYPE_SA_REQ_SVC 35 +#define AL_OBJ_TYPE_RES_MGR 36 +#define AL_OBJ_TYPE_H_CA_ATTR 37 +#define AL_OBJ_TYPE_H_PNP_EVENT 38 +#define AL_OBJ_TYPE_H_SA_REG 39 +#define AL_OBJ_TYPE_H_FMR 40 +#define AL_OBJ_TYPE_H_SRQ 41 +#define AL_OBJ_TYPE_H_FMR_POOL 42 +#define AL_OBJ_TYPE_INVALID 43 /* Must be last type. */ + +/* Kernel object for a user-mode app. */ +#define AL_OBJ_SUBTYPE_UM_EXPORT 0x80000000 + +/* CM related subtypes, used by the CM proxy. */ +#define AL_OBJ_SUBTYPE_REQ 0x01000000 +#define AL_OBJ_SUBTYPE_REP 0x02000000 +#define AL_OBJ_SUBTYPE_DREQ 0x04000000 +#define AL_OBJ_SUBTYPE_LAP 0x08000000 + +typedef uint32_t al_obj_type_t; + + +#define AL_DEFAULT_TIMEOUT_MS 10000 /* 10 seconds */ +#define AL_DEFAULT_TIMEOUT_US (AL_DEFAULT_TIMEOUT_MS * 1000) +#define AL_TIMEOUT_PER_DESC_US 10000 +#define AL_MAX_TIMEOUT_MS (AL_DEFAULT_TIMEOUT_MS * 10) +#define AL_MAX_TIMEOUT_US (AL_MAX_TIMEOUT_MS * 1000) + + +#if defined( _DEBUG_ ) +const char* ib_obj_type_str[]; +#endif + + +/* + * Base object for AL resources. This must be the first element of + * AL resources. + */ +typedef struct _al_obj +{ + cl_pool_item_t pool_item; + + struct _al_obj *p_parent_obj; + struct _al_ci_ca *p_ci_ca; + + const void *context; + + /* Asynchronous item used when destroying the object asynchronously. */ + cl_async_proc_item_t async_item; + + /* Event used when destroying the object synchronously. */ + cl_event_t event; + uint32_t timeout_ms; + uint32_t desc_cnt; + + al_pfn_destroy_t pfn_destroy; + al_pfn_destroying_t pfn_destroying; + al_pfn_cleanup_t pfn_cleanup; + al_pfn_free_t pfn_free; + ib_pfn_destroy_cb_t user_destroy_cb; + + cl_spinlock_t lock; + cl_qlist_t obj_list; + atomic32_t ref_cnt; + + cl_list_item_t list_item; + al_obj_type_t type; + cl_state_t state; + + uint64_t hdl; /* User Handle. */ + ib_al_handle_t h_al; /* Owning AL instance. */ + +#ifdef CL_KERNEL + /* + * Flag to indicate that UM calls may proceed on the given object. + * Set by the proxy when creation completes successfully. + */ + boolean_t hdl_valid; +#endif +} al_obj_t; + + +void +construct_al_obj( + IN al_obj_t * const p_obj, + IN const al_obj_type_t obj_type ); + + +ib_api_status_t +init_al_obj( + IN al_obj_t * const p_obj, + IN const void* const context, + IN boolean_t async_destroy, + IN const al_pfn_destroying_t pfn_destroying, + IN const al_pfn_cleanup_t pfn_cleanup, + IN const al_pfn_free_t pfn_free ); + +/* + * Reset an object's state. This is called after pfn_destroy() has + * been called on a object, but before destroy_al_obj() has been invoked. + * It allows an object to be initialized once, then returned to a pool + * on destruction, and later reused after being removed from the pool. + */ +void +reset_al_obj( + IN al_obj_t * const p_obj ); + +void +set_al_obj_timeout( + IN al_obj_t * const p_obj, + IN const uint32_t timeout_ms ); + +void +inc_al_obj_desc( + IN al_obj_t * const p_obj, + IN const uint32_t desc_cnt ); + + +/* + * Attach to our parent object. The parent will destroy the child when + * it is destroyed. Attaching a child to the parent automatically + * increments the parent's reference count. + */ +ib_api_status_t +attach_al_obj( + IN al_obj_t * const p_parent_obj, + IN al_obj_t * const p_child_obj ); + + +/* + * Increment the reference count on an AL object. + */ +int32_t +ref_al_obj( + IN al_obj_t * const p_obj ); + + +/* + * Called to release a child object from its parent. The child's + * reference to its parent is still held. + */ +void +detach_al_obj( + IN al_obj_t * const p_obj ); + +/* + * Decrement the reference count on an AL object. + */ +AL_EXPORT int32_t AL_API +deref_al_obj( + IN al_obj_t * const p_obj ); + +/* + * Called to cleanup all resources allocated by an object. + */ +void +destroy_al_obj( + IN al_obj_t * const p_obj ); + + + + +extern const char* ib_obj_type_str[]; + +static inline const char* +ib_get_obj_type( + IN al_obj_t * const p_obj ) +{ + if( AL_BASE_TYPE( p_obj->type ) > AL_OBJ_TYPE_INVALID ) + return( ib_obj_type_str[AL_OBJ_TYPE_UNKNOWN] ); + + return( ib_obj_type_str[ AL_BASE_TYPE( p_obj->type ) ] ); +} + + + + +#endif /* __AL_COMMON_H__ */ diff --git a/branches/Ndi/core/al/al_cq.c b/branches/Ndi/core/al/al_cq.c new file mode 100644 index 00000000..243be6e1 --- /dev/null +++ b/branches/Ndi/core/al/al_cq.c @@ -0,0 +1,489 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "al_cq.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_cq.tmh" +#endif + +#include "al_ca.h" +#include "al_pd.h" +#include "al_qp.h" +#include "al_verbs.h" + +/* + * Function prototypes. + */ +void +destroying_cq( + IN struct _al_obj *p_obj ); + +void +cleanup_cq( + IN al_obj_t *p_obj ); + +void +free_cq( + IN al_obj_t *p_obj ); + + + + +/* + * Initializes the CQ information structure. + */ +ib_api_status_t +create_cq( + IN const ib_ca_handle_t h_ca, + IN OUT ib_cq_create_t* const p_cq_create, + IN const void* const cq_context, + IN const ib_pfn_event_cb_t pfn_cq_event_cb, + OUT ib_cq_handle_t* const ph_cq, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_cq_handle_t h_cq; + ib_api_status_t status; + al_obj_type_t obj_type = AL_OBJ_TYPE_H_CQ; + + if( !p_cq_create || !ph_cq ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + if( (p_cq_create->pfn_comp_cb && p_cq_create->h_wait_obj) || + (!p_cq_create->pfn_comp_cb && !p_cq_create->h_wait_obj) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") ); + return IB_INVALID_SETTING; + } + + h_cq = cl_zalloc( sizeof( ib_cq_t ) ); + if( !h_cq ) + { + return IB_INSUFFICIENT_MEMORY; + } + + if( p_umv_buf ) + obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT; + + /* Construct the CQ. */ + construct_al_obj( &h_cq->obj, obj_type ); + + cl_qlist_init( &h_cq->qp_list ); + + /* Initialize the CQ. */ + status = init_al_obj( &h_cq->obj, cq_context, TRUE, + destroying_cq, cleanup_cq, free_cq ); + if( status != IB_SUCCESS ) + { + free_cq( &h_cq->obj ); + return status; + } + status = attach_al_obj( &h_ca->obj, &h_cq->obj ); + if( status != IB_SUCCESS ) + { + h_cq->obj.pfn_destroy( &h_cq->obj, NULL ); + return status; + } + + /* + * Record which completion routine will be used to notify the CQ of + * a completion. + */ + h_cq->pfn_event_cb = pfn_cq_event_cb; + if( p_cq_create->pfn_comp_cb ) + { + CL_ASSERT( !p_cq_create->h_wait_obj ); + h_cq->pfn_user_comp_cb = p_cq_create->pfn_comp_cb; + } + else + { + CL_ASSERT( p_cq_create->h_wait_obj ); + h_cq->h_wait_obj = p_cq_create->h_wait_obj; + } + + /* + * Note: + * Because an extra reference is not held on the object during creation, + * the h_cq handle may be destroryed by the client's asynchronous event + * callback routine before call to verbs returns. + */ + status = verbs_create_cq( h_ca, p_cq_create, h_cq, p_umv_buf ); + if( status != IB_SUCCESS ) + { + h_cq->obj.pfn_destroy( &h_cq->obj, NULL ); + return status; + } + + *ph_cq = h_cq; + + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_destroy_cq( + IN const ib_cq_handle_t h_cq, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_CQ ); + + if( AL_OBJ_INVALID_HANDLE( h_cq, AL_OBJ_TYPE_H_CQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CQ_HANDLE\n") ); + return IB_INVALID_CQ_HANDLE; + } + + ref_al_obj( &h_cq->obj ); + h_cq->obj.pfn_destroy( &h_cq->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_CQ ); + return IB_SUCCESS; +} + + + +void +destroying_cq( + IN struct _al_obj *p_obj ) +{ + ib_cq_handle_t h_cq; + cl_list_item_t *p_item; + cl_obj_rel_t *p_rel; + ib_qp_handle_t h_qp; + + CL_ASSERT( p_obj ); + h_cq = PARENT_STRUCT( p_obj, ib_cq_t, obj ); + + /* Initiate destruction of all bound QPs. */ + cl_spinlock_acquire( &h_cq->obj.lock ); + for( p_item = cl_qlist_remove_tail( &h_cq->qp_list ); + p_item != cl_qlist_end( &h_cq->qp_list ); + p_item = cl_qlist_remove_tail( &h_cq->qp_list ) ) + { + p_rel = PARENT_STRUCT( p_item, cl_obj_rel_t, pool_item.list_item ); + p_rel->p_parent_obj = NULL; + h_qp = (ib_qp_handle_t)p_rel->p_child_obj; + if( h_qp ) + { + /* Take a reference to prevent the QP from being destroyed. */ + ref_al_obj( &h_qp->obj ); + cl_spinlock_release( &h_cq->obj.lock ); + h_qp->obj.pfn_destroy( &h_qp->obj, NULL ); + cl_spinlock_acquire( &h_cq->obj.lock ); + } + } + cl_spinlock_release( &h_cq->obj.lock ); +} + + +void +cleanup_cq( + IN struct _al_obj *p_obj ) +{ + ib_cq_handle_t h_cq; + ib_api_status_t status; + + CL_ASSERT( p_obj ); + h_cq = PARENT_STRUCT( p_obj, ib_cq_t, obj ); + + /* Deallocate the CI cq. */ + if( verbs_check_cq( h_cq ) ) + { + status = verbs_destroy_cq( h_cq ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + + +/* + * Release all resources associated with the completion queue. + */ +void +free_cq( + IN al_obj_t *p_obj ) +{ + ib_cq_handle_t h_cq; + + CL_ASSERT( p_obj ); + h_cq = PARENT_STRUCT( p_obj, ib_cq_t, obj ); + + destroy_al_obj( &h_cq->obj ); + cl_free( h_cq ); +} + + +void +cq_attach_qp( + IN const ib_cq_handle_t h_cq, + IN cl_obj_rel_t* const p_qp_rel ) +{ + p_qp_rel->p_parent_obj = (cl_obj_t*)h_cq; + ref_al_obj( &h_cq->obj ); + cl_spinlock_acquire( &h_cq->obj.lock ); + cl_qlist_insert_tail( &h_cq->qp_list, &p_qp_rel->pool_item.list_item ); + cl_spinlock_release( &h_cq->obj.lock ); +} + + +void +cq_detach_qp( + IN const ib_cq_handle_t h_cq, + IN cl_obj_rel_t* const p_qp_rel ) +{ + if( p_qp_rel->p_parent_obj ) + { + CL_ASSERT( p_qp_rel->p_parent_obj == (cl_obj_t*)h_cq ); + p_qp_rel->p_parent_obj = NULL; + cl_spinlock_acquire( &h_cq->obj.lock ); + cl_qlist_remove_item( &h_cq->qp_list, &p_qp_rel->pool_item.list_item ); + cl_spinlock_release( &h_cq->obj.lock ); + } +} + + +ib_api_status_t +ib_modify_cq( + IN const ib_cq_handle_t h_cq, + IN OUT uint32_t* const p_size ) +{ + return modify_cq( h_cq, p_size, NULL ); +} + + +ib_api_status_t +modify_cq( + IN const ib_cq_handle_t h_cq, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CQ ); + + if( AL_OBJ_INVALID_HANDLE( h_cq, AL_OBJ_TYPE_H_CQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CQ_HANDLE\n") ); + return IB_INVALID_CQ_HANDLE; + } + if( !p_size ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_modify_cq( h_cq, p_size ); + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + + +ib_api_status_t +ib_query_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_size ) +{ + return query_cq( h_cq, p_size, NULL ); +} + + + +ib_api_status_t +query_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CQ ); + + if( AL_OBJ_INVALID_HANDLE( h_cq, AL_OBJ_TYPE_H_CQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CQ_HANDLE\n") ); + return IB_INVALID_CQ_HANDLE; + } + if( !p_size ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_query_cq( h_cq, p_size ); + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + + +ib_api_status_t +ib_peek_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_n_cqes ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CQ ); + + if( AL_OBJ_INVALID_HANDLE( h_cq, AL_OBJ_TYPE_H_CQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CQ_HANDLE\n") ); + return IB_INVALID_CQ_HANDLE; + } + if( !p_n_cqes ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_peek_cq( h_cq, p_n_cqes ); + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + + +ib_api_status_t +ib_poll_cq( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ) +{ + ib_api_status_t status; + PERF_DECLARE( IbPollCq ); + PERF_DECLARE( VerbsPollCq ); + + cl_perf_start( IbPollCq ); + AL_ENTER( AL_DBG_CQ ); + + if( AL_OBJ_INVALID_HANDLE( h_cq, AL_OBJ_TYPE_H_CQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CQ_HANDLE\n") ); + return IB_INVALID_CQ_HANDLE; + } + if( !pp_free_wclist || !pp_done_wclist ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + cl_perf_start( VerbsPollCq ); + status = verbs_poll_cq( h_cq, pp_free_wclist, pp_done_wclist ); + cl_perf_stop( &g_perf, VerbsPollCq ); + + AL_EXIT( AL_DBG_CQ ); + cl_perf_stop( &g_perf, IbPollCq ); + return status; +} + + + +ib_api_status_t +ib_rearm_cq( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CQ ); + + if( AL_OBJ_INVALID_HANDLE( h_cq, AL_OBJ_TYPE_H_CQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CQ_HANDLE\n") ); + return IB_INVALID_CQ_HANDLE; + } + + status = verbs_rearm_cq( h_cq, solicited ); + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + + +ib_api_status_t +ib_rearm_n_cq( + IN const ib_cq_handle_t h_cq, + IN const uint32_t n_cqes ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CQ ); + + if( AL_OBJ_INVALID_HANDLE( h_cq, AL_OBJ_TYPE_H_CQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CQ_HANDLE\n") ); + return IB_INVALID_CQ_HANDLE; + } + if( !n_cqes ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_rearm_n_cq( h_cq, n_cqes ); + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + + +/* + * Process an asynchronous event on the CQ. Notify the user of the event. + */ +void +cq_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ) +{ + ib_cq_handle_t h_cq; + + CL_ASSERT( p_event_rec ); + h_cq = (ib_cq_handle_t)p_event_rec->context; + + p_event_rec->context = (void*)h_cq->obj.context; + p_event_rec->handle.h_cq = h_cq; + + if( h_cq->pfn_event_cb ) + h_cq->pfn_event_cb( p_event_rec ); +} diff --git a/branches/Ndi/core/al/al_cq.h b/branches/Ndi/core/al/al_cq.h new file mode 100644 index 00000000..d3ff4560 --- /dev/null +++ b/branches/Ndi/core/al/al_cq.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_CQ_H__) +#define __AL_CQ_H__ + +#include "al_ca.h" + + + +typedef void +(*pfn_proc_comp_t)( + IN const ib_cq_handle_t h_cq ); + +typedef ib_api_status_t +(*pfn_peek_cq_t)( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_n_cqes ); + +typedef ib_api_status_t +(*pfn_poll_cq_t)( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); + +typedef ib_api_status_t +(*pfn_rearm_cq_t)( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ); + +typedef ib_api_status_t +(*pfn_rearm_n_cq_t)( + IN const ib_cq_handle_t h_cq, + IN const uint32_t n_cqes ); + + +/* + * Completion queue information required by the access layer. This structure + * is referenced by a user's CQ handle. + */ +typedef struct _ib_cq +{ + al_obj_t obj; /* Must be first. */ + + cl_qlist_t qp_list; /* List of QPs bound to this CQ. */ + + ib_pfn_comp_cb_t pfn_user_comp_cb; + cl_waitobj_handle_t h_wait_obj; + + ib_cq_handle_t h_ci_cq; + + /* Function pointers for the various speed path operations. */ +#ifndef CL_KERNEL + pfn_peek_cq_t pfn_peek; + ib_cq_handle_t h_peek_cq; + + pfn_poll_cq_t pfn_poll; + ib_cq_handle_t h_poll_cq; + + pfn_rearm_cq_t pfn_rearm; + ib_cq_handle_t h_rearm_cq; + + pfn_rearm_n_cq_t pfn_rearm_n; + ib_cq_handle_t h_rearm_n_cq; +#endif + + ib_pfn_event_cb_t pfn_event_cb; + +} ib_cq_t; + + + +ib_api_status_t +create_cq( + IN const ib_ca_handle_t h_ca, + IN OUT ib_cq_create_t* const p_cq_create, + IN const void* const cq_context, + IN const ib_pfn_event_cb_t pfn_cq_event_cb, + OUT ib_cq_handle_t* const ph_cq, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +modify_cq( + IN const ib_cq_handle_t h_cq, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +query_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +void +cq_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ); + + +void +cq_attach_qp( + IN const ib_cq_handle_t h_cq, + IN cl_obj_rel_t* const p_qp_rel ); + + +void +cq_detach_qp( + IN const ib_cq_handle_t h_cq, + IN cl_obj_rel_t* const p_qp_rel ); + +#endif /* __AL_CQ_H__ */ diff --git a/branches/Ndi/core/al/al_debug.h b/branches/Ndi/core/al/al_debug.h new file mode 100644 index 00000000..3d24c195 --- /dev/null +++ b/branches/Ndi/core/al/al_debug.h @@ -0,0 +1,251 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_DEBUG_H__) +#define __AL_DEBUG_H__ + +#ifdef __MODULE__ +#undef __MODULE__ +#endif +#define __MODULE__ "[AL]" + + +#include +#include + +extern uint32_t g_al_dbg_level; +extern uint32_t g_al_dbg_flags; + +#if defined(EVENT_TRACING) +// +// Software Tracing Definitions +// + +#ifndef CL_KERNEL + +#define WPP_CONTROL_GUIDS \ + WPP_DEFINE_CONTROL_GUID(ALCtlGuid1,(B199CE55,F8BF,4147,B119,DACD1E5987A6), \ + WPP_DEFINE_BIT( AL_DBG_ERROR) \ + WPP_DEFINE_BIT( AL_DBG_PNP) \ + WPP_DEFINE_BIT( AL_DBG_HDL) \ + WPP_DEFINE_BIT( AL_DBG_AL_OBJ) \ + WPP_DEFINE_BIT( AL_DBG_SMI) \ + WPP_DEFINE_BIT( AL_DBG_SMI_CB) \ + WPP_DEFINE_BIT( AL_DBG_RES1) \ + WPP_DEFINE_BIT( AL_DBG_MAD_POOL) \ + WPP_DEFINE_BIT( AL_DBG_MAD_SVC) \ + WPP_DEFINE_BIT( AL_DBG_RES2) \ + WPP_DEFINE_BIT( AL_DBG_CM) \ + WPP_DEFINE_BIT( AL_DBG_CA) \ + WPP_DEFINE_BIT( AL_DBG_MR) \ + WPP_DEFINE_BIT( AL_DBG_MGR)\ + WPP_DEFINE_BIT( AL_DBG_DEV)\ + WPP_DEFINE_BIT( AL_DBG_MCAST)\ + WPP_DEFINE_BIT( AL_DBG_PD)\ + WPP_DEFINE_BIT( AL_DBG_AV)\ + WPP_DEFINE_BIT( AL_DBG_CQ)\ + WPP_DEFINE_BIT( AL_DBG_QP)\ + WPP_DEFINE_BIT( AL_DBG_SRQ)\ + WPP_DEFINE_BIT( AL_DBG_MW)\ + WPP_DEFINE_BIT( AL_DBG_RES4) \ + WPP_DEFINE_BIT( AL_DBG_PROXY_CB)\ + WPP_DEFINE_BIT( AL_DBG_UAL)\ + WPP_DEFINE_BIT( AL_DBG_QUERY)\ + WPP_DEFINE_BIT( AL_DBG_SA_REQ)\ + WPP_DEFINE_BIT( AL_DBG_IOC)\ + WPP_DEFINE_BIT( AL_DBG_SUB)\ + WPP_DEFINE_BIT( AL_DBG_MAD)) + +#else + +#define WPP_CONTROL_GUIDS \ + WPP_DEFINE_CONTROL_GUID(ALCtlGuid2,(99DC84E3,B106,431e,88A6,4DD20C9BBDE3), \ + WPP_DEFINE_BIT( AL_DBG_ERROR) \ + WPP_DEFINE_BIT( AL_DBG_PNP) \ + WPP_DEFINE_BIT( AL_DBG_HDL) \ + WPP_DEFINE_BIT( AL_DBG_AL_OBJ) \ + WPP_DEFINE_BIT( AL_DBG_SMI) \ + WPP_DEFINE_BIT( AL_DBG_SMI_CB) \ + WPP_DEFINE_BIT( AL_DBG_FMR_POOL) \ + WPP_DEFINE_BIT( AL_DBG_MAD_POOL) \ + WPP_DEFINE_BIT( AL_DBG_MAD_SVC) \ + WPP_DEFINE_BIT( AL_DBG_RES2) \ + WPP_DEFINE_BIT( AL_DBG_CM) \ + WPP_DEFINE_BIT( AL_DBG_CA) \ + WPP_DEFINE_BIT( AL_DBG_MR) \ + WPP_DEFINE_BIT( AL_DBG_MGR)\ + WPP_DEFINE_BIT( AL_DBG_DEV)\ + WPP_DEFINE_BIT( AL_DBG_MCAST)\ + WPP_DEFINE_BIT( AL_DBG_PD)\ + WPP_DEFINE_BIT( AL_DBG_AV)\ + WPP_DEFINE_BIT( AL_DBG_CQ)\ + WPP_DEFINE_BIT( AL_DBG_QP)\ + WPP_DEFINE_BIT( AL_DBG_SRQ)\ + WPP_DEFINE_BIT( AL_DBG_MW)\ + WPP_DEFINE_BIT( AL_DBG_RES4) \ + WPP_DEFINE_BIT( AL_DBG_PROXY_CB)\ + WPP_DEFINE_BIT( AL_DBG_UAL)\ + WPP_DEFINE_BIT( AL_DBG_QUERY)\ + WPP_DEFINE_BIT( AL_DBG_SA_REQ)\ + WPP_DEFINE_BIT( AL_DBG_IOC)\ + WPP_DEFINE_BIT( AL_DBG_SUB)\ + WPP_DEFINE_BIT( AL_DBG_MAD)) + +#endif + + +#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl) +#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags) +#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE) +#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags) + + +// begin_wpp config +// AL_ENTER( FLAG ); +// AL_EXIT( FLAG ); +// USEPREFIX(AL_PRINT, "%!STDPREFIX! [AL] :%!FUNC!() :"); +// USESUFFIX(AL_ENTER, " [AL] :%!FUNC!():["); +// USESUFFIX(AL_EXIT, " [AL] :%!FUNC!():]"); +// end_wpp + + + +#else + +#include +#include + +/* + * Debug macros + */ + + +/* Debug message source */ +#define AL_DBG_ERR (1 << 0) +#define AL_DBG_PNP (1 << 1) +#define AL_DBG_HDL (1 << 2) +#define AL_DBG_AL_OBJ (1 << 3) +#define AL_DBG_SMI (1 << 4) +#define AL_DBG_SMI_CB (1 << 5) +#define AL_DBG_FMR_POOL (1 << 6) +#define AL_DBG_MAD_POOL (1 << 7) +#define AL_DBG_MAD_SVC (1 << 8) +#define AL_DBG_CM (1 << 10) +#define AL_DBG_CA (1 << 11) +#define AL_DBG_MR (1 << 12) +#define AL_DBG_MGR (1 << 13) +#define AL_DBG_DEV (1 << 14) +#define AL_DBG_MCAST (1 << 15) +#define AL_DBG_PD (1 << 16) +#define AL_DBG_AV (1 << 17) +#define AL_DBG_CQ (1 << 18) +#define AL_DBG_QP (1 << 19) +#define AL_DBG_SRQ (1 << 20) +#define AL_DBG_MW (1 << 21) +#define AL_DBG_PROXY_CB (1 << 23) +#define AL_DBG_UAL (1 << 24) +#define AL_DBG_QUERY (1 << 25) +#define AL_DBG_SA_REQ (1 << 26) +#define AL_DBG_IOC (1 << 27) +#define AL_DBG_SUB (1 << 28) +#define AL_DBG_MAD (1 << 29) //TODO + +#define AL_DBG_ERROR (CL_DBG_ERROR | AL_DBG_ERR) + +#if DBG + +// assignment of _level_ is need to to overcome warning C4127 +#define AL_PRINT( _level_,_flag_,_msg_) \ + { \ + if( g_al_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_al_dbg_flags, _msg_ ); \ + } + + +#define AL_PRINT_EXIT( _level_,_flag_,_msg_) \ + { \ + if( g_al_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_al_dbg_flags, _msg_ );\ + AL_EXIT( _flag_ );\ + } + +#define AL_ENTER( _flag_) \ + { \ + if( g_al_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_ENTER( _flag_, g_al_dbg_flags ); \ + } + +#define AL_EXIT( _flag_)\ + { \ + if( g_al_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_EXIT( _flag_, g_al_dbg_flags ); \ + } + + +#else + +#define AL_PRINT( lvl, flags, msg) + +#define AL_PRINT_EXIT( _level_,_flag_,_msg_) + +#define AL_ENTER( _flag_) + +#define AL_EXIT( _flag_) + + +#endif + +#endif //EVENT_TRACING + + + +enum al_perf_counters +{ + IbPostSend, + PostSend, + VpPostSend, + UalDoPostSend, + + IbPollCq, + VerbsPollCq, + VpPollCq, + UalDoPollCq, + + + AlMaxPerf + +}; + +extern cl_perf_t g_perf; + + +#endif /* __AL_DEBUG_H__ */ diff --git a/branches/Ndi/core/al/al_dev.h b/branches/Ndi/core/al/al_dev.h new file mode 100644 index 00000000..c201f326 --- /dev/null +++ b/branches/Ndi/core/al/al_dev.h @@ -0,0 +1,487 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * This header file defines data structures for the user-mode proxy + * and UAL support + * + * Environment: + * Kernel and User Mode. + */ + + +#ifndef _ALDEV_H_ +#define _ALDEV_H_ + + +#include +#include +#include +#include +#include + +#define AL_DEVICE_NAME L"\\Device\\ibal" +#define ALDEV_KEY (0x3B) /* Matches FILE_DEVICE_INFINIBAND from wdm.h */ + +#define AL_IOCTL_VERSION (3) + +#ifdef CL_KERNEL + +/* Function prototypes for al device framework */ + +AL_EXPORT cl_status_t AL_API +al_dev_open( + IN cl_ioctl_handle_t h_ioctl ); + +AL_EXPORT cl_status_t AL_API +al_dev_close( + IN cl_ioctl_handle_t h_ioctl ); + +AL_EXPORT cl_status_t AL_API +al_dev_ioctl( + IN cl_ioctl_handle_t h_ioctl ); + +void +al_dev_cancel_ioctl( + IN cl_ioctl_handle_t h_ioctl ); + +void +al_dev_cancel_io( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + + +/* Define data structures for user-mode proxy */ +#else /* CL_KERNEL */ + + +/* Prototype for the ioctl support function */ +cl_status_t +do_al_dev_ioctl( + IN uint32_t command, + IN void *p_buf, + IN uintn_t buf_size, + IN void *p_out_buf, + IN uintn_t out_buf_size, + OUT uintn_t *p_bytes_ret ); + + +#endif /* CL_KERNEL */ + +/* + * Shared between user and kernel mode. + */ + + +/****d* AL device Helper/al_proxy_ops_t +* NAME +* al_proxy_ops_t +* +* DESCRIPTION +* +* Define the enumeration for UAL/proxy excluding AL specific +* These are intended to be the support ioctls such as the +* notification from the proxy to the UAL in user-mode +* +* SYNOPSIS +*/ +typedef enum al_proxy_ops +{ + al_proxy_ops_start = 0, + + ual_get_cm_cb_info = al_proxy_ops_start+1, + ual_get_comp_cb_info, + ual_get_misc_cb_info, + ual_bind, + ual_bind_sa, + ual_bind_pnp, + ual_bind_misc, + ual_bind_cm, + ual_bind_cq, + ual_bind_destroy, + + al_proxy_maxops + +} al_proxy_ops_t; +/**********/ +/* + * Various Opration Allowable on the System Helper + */ + +/* IOCTL to specify what notification wait objects are used by UAL + * for asynchronous event notifications from proxy + */ + +#define UAL_GET_CM_CB_INFO IOCTL_CODE(ALDEV_KEY, ual_get_cm_cb_info) +#define UAL_GET_COMP_CB_INFO IOCTL_CODE(ALDEV_KEY, ual_get_comp_cb_info) +#define UAL_GET_MISC_CB_INFO IOCTL_CODE(ALDEV_KEY, ual_get_misc_cb_info) +#define UAL_BIND IOCTL_CODE(ALDEV_KEY, ual_bind) +#define UAL_BIND_SA IOCTL_CODE(ALDEV_KEY, ual_bind_sa) +#define UAL_BIND_PNP IOCTL_CODE(ALDEV_KEY, ual_bind_pnp) +#define UAL_BIND_MISC IOCTL_CODE(ALDEV_KEY, ual_bind_misc) +#define UAL_BIND_CM IOCTL_CODE(ALDEV_KEY, ual_bind_cm) +#define UAL_BIND_CQ IOCTL_CODE(ALDEV_KEY, ual_bind_cq) +#define UAL_BIND_DESTROY IOCTL_CODE(ALDEV_KEY, ual_bind_destroy) + +#define AL_PROXY_OPS_START IOCTL_CODE(ALDEV_KEY, al_proxy_ops_start) +#define AL_PROXY_MAXOPS IOCTL_CODE(ALDEV_KEY, al_proxy_maxops) + +#define IS_AL_PROXY_IOCTL(cmd) \ + ((cmd) > AL_PROXY_OPS_START && (cmd) < AL_PROXY_MAXOPS) + + + +/****d* AL device Helper/al_dev_ops_t +* NAME +* al_dev_ops_t +* +* DESCRIPTION +* AL device supports the following ioctls +* 1. those meant strictly for the consumption of the proxy +* 2. those meant to denote an AL api and hence the call +* is further dispatched AL api and/or KAL internal interface +* +* SYNOPSIS +*/ +/* All verbs related ioctls */ +typedef enum _al_verbs_ops +{ + al_verbs_ops_start = al_proxy_maxops, + + ual_get_uvp_name_cmd = al_verbs_ops_start + 1, + ual_open_ca_ioctl_cmd, + ual_query_ca_ioctl_cmd, + ual_modify_ca_ioctl_cmd, + ual_close_ca_ioctl_cmd, + ual_ci_call_ioctl_cmd, + ual_alloc_pd_ioctl_cmd, + ual_dealloc_pd_ioctl_cmd, + ual_create_av_ioctl_cmd, + ual_query_av_ioctl_cmd, + ual_modify_av_ioctl_cmd, + ual_destroy_av_ioctl_cmd, + ual_create_srq_ioctl_cmd, + ual_query_srq_ioctl_cmd, + ual_modify_srq_ioctl_cmd, + ual_destroy_srq_ioctl_cmd, + ual_create_qp_ioctl_cmd, + ual_query_qp_ioctl_cmd, + ual_modify_qp_ioctl_cmd, + ual_destroy_qp_ioctl_cmd, + ual_create_cq_ioctl_cmd, + ual_query_cq_ioctl_cmd, + ual_modify_cq_ioctl_cmd, + ual_destroy_cq_ioctl_cmd, + ual_reg_mr_ioctl_cmd, + ual_query_mr_ioctl_cmd, + ual_rereg_mem_ioctl_cmd, + ual_reg_shared_ioctl_cmd, + ual_dereg_mr_ioctl_cmd, + ual_create_mw_ioctl_cmd, + ual_query_mw_ioctl_cmd, + ual_bind_mw_ioctl_cmd, + ual_destroy_mw_ioctl_cmd, + ual_post_send_ioctl_cmd, + ual_post_recv_ioctl_cmd, + ual_post_srq_recv_ioctl_cmd, + ual_peek_cq_ioctl_cmd, + ual_poll_cq_ioctl_cmd, + ual_rearm_cq_ioctl_cmd, + ual_rearm_n_cq_ioctl_cmd, + ual_attach_mcast_ioctl_cmd, + ual_detach_mcast_ioctl_cmd, + ual_get_spl_qp_cmd, + + al_verbs_maxops + +} al_verbs_ops_t; + +#define AL_VERBS_OPS_START IOCTL_CODE(ALDEV_KEY, al_verbs_ops_start) +#define AL_VERBS_MAXOPS IOCTL_CODE(ALDEV_KEY, al_verbs_maxops) +#define IS_VERBS_IOCTL(cmd) \ + ((cmd) > AL_VERBS_OPS_START && (cmd) < AL_VERBS_MAXOPS) + +/* All subnet management related ioctls */ + +typedef enum _al_subnet_ops +{ + al_subnet_ops_start = al_verbs_maxops, + + ual_reg_svc_cmd = al_subnet_ops_start + 1, + ual_dereg_svc_cmd, + ual_send_sa_req_cmd, + ual_cancel_sa_req_cmd, + ual_mad_send_cmd, + ual_mad_recv_cmd, + ual_init_dgram_svc_cmd, + ual_reg_mad_svc_cmd, + ual_dereg_mad_svc_cmd, + ual_reg_mad_pool_cmd, + ual_dereg_mad_pool_cmd, + ual_cancel_mad_cmd, + ual_mad_recv_comp_cmd, + ual_local_mad_cmd, + + al_subnet_maxops + +} al_subnet_ops_t; + +#define AL_SUBNET_OPS_START IOCTL_CODE(ALDEV_KEY, al_subnet_ops_start) +#define AL_SUBNET_MAXOPS IOCTL_CODE(ALDEV_KEY, al_subnet_maxops) +#define IS_SUBNET_IOCTL(cmd) \ + ((cmd) > AL_SUBNET_OPS_START && (cmd) < AL_SUBNET_MAXOPS) + +/* All ioc related ioctls */ + +typedef enum _al_ioc_ops +{ + al_ioc_ops_start = al_subnet_maxops, + + ual_create_ioc_cmd = al_ioc_ops_start + 1, + ual_destroy_ioc_cmd, + ual_reg_ioc_cmd, + ual_reject_ioc_cmd, + ual_add_svc_entry_cmd, + ual_remove_svc_entry_cmd, + + al_ioc_maxops + +} al_ioc_ops_t; + +#define AL_IOC_OPS_START IOCTL_CODE(ALDEV_KEY, al_ioc_ops_start) +#define AL_IOC_MAXOPS IOCTL_CODE(ALDEV_KEY, al_ioc_maxops) +#define IS_IOC_IOCTL(cmd) \ + ((cmd) > AL_IOC_OPS_START && (cmd) < AL_IOC_MAXOPS) + +typedef enum _al_cm_sidr_ops +{ + al_cm_ops_start = al_ioc_maxops, + ual_cm_req_cmd = al_cm_ops_start + 1, + ual_cm_rep_cmd, + ual_cm_dreq_cmd, + ual_cm_drep_cmd, + ual_cm_listen_cmd, + ual_cm_cancel_cmd, + ual_cm_rtu_cmd, + ual_cm_rej_cmd, + ual_cm_handoff_cmd, + ual_cm_mra_cmd, + ual_cm_lap_cmd, + ual_cm_apr_cmd, + ual_force_apm_cmd, + ual_reg_sidr_cmd, + ual_sidr_req_cmd, + ual_sidr_rep_cmd, + + al_cm_maxops + +} al_cm_sidr_ops_t; + +#define AL_CM_OPS_START IOCTL_CODE(ALDEV_KEY, al_cm_ops_start) +#define AL_CM_MAXOPS IOCTL_CODE(ALDEV_KEY, al_cm_maxops) +#define IS_CM_IOCTL(cmd) \ + ((cmd) > AL_CM_OPS_START && (cmd) < AL_CM_MAXOPS) + + +enum _ual_cep_ops +{ + al_cep_ops_start = al_ioc_maxops, + ual_create_cep, + ual_destroy_cep, + ual_cep_listen, + ual_cep_pre_req, + ual_cep_send_req, + ual_cep_pre_rep, + ual_cep_send_rep, + ual_cep_get_rtr, + ual_cep_get_rts, + ual_cep_rtu, + ual_cep_rej, + ual_cep_mra, + ual_cep_lap, + ual_cep_pre_apr, + ual_cep_send_apr, + ual_cep_dreq, + ual_cep_drep, + ual_cep_get_timewait, + ual_cep_get_event, + ual_cep_poll, + + al_cep_maxops + +} ual_cep_ops_t; + +#define UAL_CEP_OPS_START IOCTL_CODE(ALDEV_KEY, al_cep_ops_start) +#define UAL_CEP_MAXOPS IOCTL_CODE(ALDEV_KEY, al_cep_maxops) +#define IS_CEP_IOCTL(cmd) \ + ((cmd) > UAL_CEP_OPS_START && (cmd) < UAL_CEP_MAXOPS) + + +/* AL ioctls */ + +typedef enum _al_dev_ops +{ + al_ops_start = al_cep_maxops, + + ual_reg_shmid_cmd, + ual_get_ca_attr, + ual_reg_pnp_cmd, + ual_poll_pnp_cmd, + ual_rearm_pnp_cmd, + ual_dereg_pnp_cmd, + + ual_access_flash, + + al_maxops + +} al_dev_ops_t; + +#define AL_OPS_START IOCTL_CODE(ALDEV_KEY, al_ops_start) +#define AL_MAXOPS IOCTL_CODE(ALDEV_KEY, al_maxops) + +#define IS_AL_IOCTL(cmd) \ + ((cmd) > AL_OPS_START && (cmd) < AL_MAXOPS) + +/* + * Various Opration Allowable on the System Helper + */ + +#define UAL_REG_SHMID IOCTL_CODE(ALDEV_KEY, ual_reg_shmid_cmd) +#define UAL_GET_VENDOR_LIBCFG IOCTL_CODE(ALDEV_KEY, ual_get_uvp_name_cmd) +#define UAL_OPEN_CA IOCTL_CODE(ALDEV_KEY, ual_open_ca_ioctl_cmd) +#define UAL_QUERY_CA IOCTL_CODE(ALDEV_KEY, ual_query_ca_ioctl_cmd) +#define UAL_MODIFY_CA IOCTL_CODE(ALDEV_KEY, ual_modify_ca_ioctl_cmd) +#define UAL_CLOSE_CA IOCTL_CODE(ALDEV_KEY, ual_close_ca_ioctl_cmd) +#define UAL_CI_CALL IOCTL_CODE(ALDEV_KEY, ual_ci_call_ioctl_cmd) +#define UAL_ALLOC_PD IOCTL_CODE(ALDEV_KEY, ual_alloc_pd_ioctl_cmd) +#define UAL_DEALLOC_PD IOCTL_CODE(ALDEV_KEY, ual_dealloc_pd_ioctl_cmd) +#define UAL_CREATE_AV IOCTL_CODE(ALDEV_KEY, ual_create_av_ioctl_cmd) +#define UAL_QUERY_AV IOCTL_CODE(ALDEV_KEY, ual_query_av_ioctl_cmd) +#define UAL_MODIFY_AV IOCTL_CODE(ALDEV_KEY, ual_modify_av_ioctl_cmd) +#define UAL_DESTROY_AV IOCTL_CODE(ALDEV_KEY, ual_destroy_av_ioctl_cmd) +#define UAL_CREATE_SRQ IOCTL_CODE(ALDEV_KEY, ual_create_srq_ioctl_cmd) +#define UAL_QUERY_SRQ IOCTL_CODE(ALDEV_KEY, ual_query_srq_ioctl_cmd) +#define UAL_MODIFY_SRQ IOCTL_CODE(ALDEV_KEY, ual_modify_srq_ioctl_cmd) +#define UAL_DESTROY_SRQ IOCTL_CODE(ALDEV_KEY, ual_destroy_srq_ioctl_cmd) +#define UAL_CREATE_QP IOCTL_CODE(ALDEV_KEY, ual_create_qp_ioctl_cmd) +#define UAL_QUERY_QP IOCTL_CODE(ALDEV_KEY, ual_query_qp_ioctl_cmd) +#define UAL_MODIFY_QP IOCTL_CODE(ALDEV_KEY, ual_modify_qp_ioctl_cmd) +#define UAL_DESTROY_QP IOCTL_CODE(ALDEV_KEY, ual_destroy_qp_ioctl_cmd) +#define UAL_CREATE_CQ IOCTL_CODE(ALDEV_KEY, ual_create_cq_ioctl_cmd) +#define UAL_QUERY_CQ IOCTL_CODE(ALDEV_KEY, ual_query_cq_ioctl_cmd) +#define UAL_MODIFY_CQ IOCTL_CODE(ALDEV_KEY, ual_modify_cq_ioctl_cmd) +#define UAL_DESTROY_CQ IOCTL_CODE(ALDEV_KEY, ual_destroy_cq_ioctl_cmd) +#define UAL_REG_MR IOCTL_CODE(ALDEV_KEY, ual_reg_mr_ioctl_cmd) +#define UAL_QUERY_MR IOCTL_CODE(ALDEV_KEY, ual_query_mr_ioctl_cmd) +#define UAL_MODIFY_MR IOCTL_CODE(ALDEV_KEY, ual_rereg_mem_ioctl_cmd) +#define UAL_REG_SHARED IOCTL_CODE(ALDEV_KEY, ual_reg_shared_ioctl_cmd) +#define UAL_DEREG_MR IOCTL_CODE(ALDEV_KEY, ual_dereg_mr_ioctl_cmd) +#define UAL_CREATE_MW IOCTL_CODE(ALDEV_KEY, ual_create_mw_ioctl_cmd) +#define UAL_QUERY_MW IOCTL_CODE(ALDEV_KEY, ual_query_mw_ioctl_cmd) +#define UAL_BIND_MW IOCTL_CODE(ALDEV_KEY, ual_bind_mw_ioctl_cmd) +#define UAL_DESTROY_MW IOCTL_CODE(ALDEV_KEY, ual_destroy_mw_ioctl_cmd) +#define UAL_POST_SEND IOCTL_CODE(ALDEV_KEY, ual_post_send_ioctl_cmd) +#define UAL_POST_RECV IOCTL_CODE(ALDEV_KEY, ual_post_recv_ioctl_cmd) +#define UAL_POST_SRQ_RECV IOCTL_CODE(ALDEV_KEY, ual_post_srq_recv_ioctl_cmd) +#define UAL_PEEK_CQ IOCTL_CODE(ALDEV_KEY, ual_peek_cq_ioctl_cmd) +#define UAL_POLL_CQ IOCTL_CODE(ALDEV_KEY, ual_poll_cq_ioctl_cmd) +#define UAL_REARM_CQ IOCTL_CODE(ALDEV_KEY, ual_rearm_cq_ioctl_cmd) +#define UAL_REARM_N_CQ IOCTL_CODE(ALDEV_KEY, ual_rearm_n_cq_ioctl_cmd) +#define UAL_ATTACH_MCAST IOCTL_CODE(ALDEV_KEY, ual_attach_mcast_ioctl_cmd) +#define UAL_DETACH_MCAST IOCTL_CODE(ALDEV_KEY, ual_detach_mcast_ioctl_cmd) + +/* Subnet management related ioctl commands */ +#define UAL_REG_SVC IOCTL_CODE(ALDEV_KEY, ual_reg_svc_cmd) +#define UAL_DEREG_SVC IOCTL_CODE(ALDEV_KEY, ual_dereg_svc_cmd) +#define UAL_SEND_SA_REQ IOCTL_CODE(ALDEV_KEY, ual_send_sa_req_cmd) +#define UAL_CANCEL_SA_REQ IOCTL_CODE(ALDEV_KEY, ual_cancel_sa_req_cmd) +#define UAL_MAD_SEND IOCTL_CODE(ALDEV_KEY, ual_mad_send_cmd) +#define UAL_INIT_DGRM_SVC IOCTL_CODE(ALDEV_KEY, ual_init_dgram_svc_cmd) +#define UAL_REG_MAD_SVC IOCTL_CODE(ALDEV_KEY, ual_reg_mad_svc_cmd) +#define UAL_DEREG_MAD_SVC IOCTL_CODE(ALDEV_KEY, ual_dereg_mad_svc_cmd) +#define UAL_REG_MAD_POOL IOCTL_CODE(ALDEV_KEY, ual_reg_mad_pool_cmd) +#define UAL_DEREG_MAD_POOL IOCTL_CODE(ALDEV_KEY, ual_dereg_mad_pool_cmd) +#define UAL_CANCEL_MAD IOCTL_CODE(ALDEV_KEY, ual_cancel_mad_cmd) +#define UAL_GET_SPL_QP_ALIAS IOCTL_CODE(ALDEV_KEY, ual_get_spl_qp_cmd) +#define UAL_MAD_RECV_COMP IOCTL_CODE(ALDEV_KEY, ual_mad_recv_comp_cmd) +#define UAL_LOCAL_MAD IOCTL_CODE(ALDEV_KEY, ual_local_mad_cmd) + +/* CM Related ioctl commands */ +#define UAL_CM_LISTEN IOCTL_CODE(ALDEV_KEY, ual_cm_listen_cmd) +#define UAL_CM_CANCEL IOCTL_CODE(ALDEV_KEY, ual_cm_cancel_cmd) +#define UAL_CM_REQ IOCTL_CODE(ALDEV_KEY, ual_cm_req_cmd) +#define UAL_CM_REP IOCTL_CODE(ALDEV_KEY, ual_cm_rep_cmd) +#define UAL_CM_RTU IOCTL_CODE(ALDEV_KEY, ual_cm_rtu_cmd) +#define UAL_CM_REJ IOCTL_CODE(ALDEV_KEY, ual_cm_rej_cmd) +#define UAL_CM_HANDOFF IOCTL_CODE(ALDEV_KEY, ual_cm_handoff_cmd) +#define UAL_CM_DREQ IOCTL_CODE(ALDEV_KEY, ual_cm_dreq_cmd) +#define UAL_CM_DREP IOCTL_CODE(ALDEV_KEY, ual_cm_drep_cmd) +#define UAL_CM_MRA IOCTL_CODE(ALDEV_KEY, ual_cm_mra_cmd) +#define UAL_CM_LAP IOCTL_CODE(ALDEV_KEY, ual_cm_lap_cmd) +#define UAL_CM_APR IOCTL_CODE(ALDEV_KEY, ual_cm_apr_cmd) +#define UAL_CM_FORCE_APM IOCTL_CODE(ALDEV_KEY, ual_force_apm_cmd) + +/* CEP Related IOCTL commands */ +#define UAL_CREATE_CEP IOCTL_CODE(ALDEV_KEY, ual_create_cep) +#define UAL_DESTROY_CEP IOCTL_CODE(ALDEV_KEY, ual_destroy_cep) +#define UAL_CEP_LISTEN IOCTL_CODE(ALDEV_KEY, ual_cep_listen) +#define UAL_CEP_PRE_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_pre_req) +#define UAL_CEP_SEND_REQ IOCTL_CODE(ALDEV_KEY, ual_cep_send_req) +#define UAL_CEP_PRE_REP IOCTL_CODE(ALDEV_KEY, ual_cep_pre_rep) +#define UAL_CEP_SEND_REP IOCTL_CODE(ALDEV_KEY, ual_cep_send_rep) +#define UAL_CEP_GET_RTR IOCTL_CODE(ALDEV_KEY, ual_cep_get_rtr) +#define UAL_CEP_GET_RTS IOCTL_CODE(ALDEV_KEY, ual_cep_get_rts) +#define UAL_CEP_RTU IOCTL_CODE(ALDEV_KEY, ual_cep_rtu) +#define UAL_CEP_REJ IOCTL_CODE(ALDEV_KEY, ual_cep_rej) +#define UAL_CEP_MRA IOCTL_CODE(ALDEV_KEY, ual_cep_mra) +#define UAL_CEP_LAP IOCTL_CODE(ALDEV_KEY, ual_cep_lap) +#define UAL_CEP_PRE_APR IOCTL_CODE(ALDEV_KEY, ual_cep_pre_apr) +#define UAL_CEP_SEND_APR IOCTL_CODE(ALDEV_KEY, ual_cep_send_apr) +#define UAL_CEP_DREQ IOCTL_CODE(ALDEV_KEY, ual_cep_dreq) +#define UAL_CEP_DREP IOCTL_CODE(ALDEV_KEY, ual_cep_drep) +#define UAL_CEP_GET_TIMEWAIT IOCTL_CODE(ALDEV_KEY, ual_cep_get_timewait) +#define UAL_CEP_GET_EVENT IOCTL_CODE(ALDEV_KEY, ual_cep_get_event) +#define UAL_CEP_POLL IOCTL_CODE(ALDEV_KEY, ual_cep_poll) + +#define UAL_GET_CA_ATTR_INFO IOCTL_CODE(ALDEV_KEY, ual_get_ca_attr) + +/* PnP related ioctl commands. */ +#define UAL_REG_PNP IOCTL_CODE(ALDEV_KEY, ual_reg_pnp_cmd) +#define UAL_POLL_PNP IOCTL_CODE(ALDEV_KEY, ual_poll_pnp_cmd) +#define UAL_REARM_PNP IOCTL_CODE(ALDEV_KEY, ual_rearm_pnp_cmd) +#define UAL_DEREG_PNP IOCTL_CODE(ALDEV_KEY, ual_dereg_pnp_cmd) +#define UAL_ACCESS_FLASH IOCTL_CODE(ALDEV_KEY, ual_access_flash) + +#endif /* _AL_DEV_H_ */ diff --git a/branches/Ndi/core/al/al_dm.c b/branches/Ndi/core/al/al_dm.c new file mode 100644 index 00000000..75a2ef08 --- /dev/null +++ b/branches/Ndi/core/al/al_dm.c @@ -0,0 +1,1799 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "al_ca.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_dm.tmh" +#endif + +#include "al_dm.h" +#include "al_mgr.h" +#include "ib_common.h" + + +/* + * This code implements a minimal device management agent. + */ + + +static dm_agent_t* gp_dm_agent = NULL; + + +#define SVC_REG_TIMEOUT 2000 // Milliseconds +#define SVC_REG_RETRY_CNT 3 +#define DM_CLASS_RESP_TIME_VALUE 20 + + +#define SET_NIBBLE( nibble_array, nibble_num, value ) \ +{ \ + ((uint8_t*)(nibble_array))[(nibble_num) >> 1] = (uint8_t) \ + ((((nibble_num) & 1) == 0) ? \ + ((uint8_t*)(nibble_array))[(nibble_num) >> 1] & 0x0f : \ + ((uint8_t*)(nibble_array))[(nibble_num) >> 1] & 0xf0); \ + ((uint8_t*)(nibble_array))[(nibble_num) >> 1] |= \ + ( ((nibble_num) & 1) == 0) ? ((value) << 4) : ((value) & 0x0f); \ +} + + +void +free_ioc( + IN al_obj_t* p_obj ); + +void +free_svc_entry( + IN al_obj_t* p_obj ); + +al_iou_t* +acquire_iou( + IN const ib_net64_t ca_guid ); + +al_iou_t* +get_iou( + IN const ib_ioc_handle_t h_ioc ); + +ib_ioc_handle_t +get_ioc( + IN const ib_ca_handle_t h_ca ); + +ib_api_status_t +add_ioc( + IN al_iou_t* p_iou, + IN ib_ioc_handle_t h_ioc ); + +void +ioc_change( + IN ib_ioc_handle_t h_ioc ); + +void +iou_change( + IN al_iou_t* p_iou ); + +ib_api_status_t +set_port_dm_attr( + IN al_iou_port_t* p_iou_port ); + +void +iou_port_svc_reg_cb( + IN ib_reg_svc_rec_t* p_reg_svc_rec ); + +void +destroying_dm_agent( + IN al_obj_t* p_obj ); + +void +free_dm_agent( + IN al_obj_t* p_obj ); + +ib_api_status_t +dm_agent_reg_pnp( + IN ib_pnp_class_t pnp_class, + IN ib_pnp_handle_t* ph_pnp ); + +ib_api_status_t +dm_agent_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ); + +ib_api_status_t +create_iou( + IN ib_pnp_rec_t* p_pnp_rec ); + +void +cleanup_iou( + IN al_obj_t* p_obj ); + +void +free_iou( + IN al_obj_t* p_obj ); + +ib_api_status_t +create_iou_port( + IN ib_pnp_port_rec_t* p_pnp_rec ); + +void +destroying_iou_port( + IN al_obj_t* p_obj ); + +void +free_iou_port( + IN al_obj_t* p_obj ); + +void +iou_port_event_cb( + IN ib_async_event_rec_t *p_event_rec ); + +void +dm_agent_send_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void* mad_svc_context, + IN ib_mad_element_t* p_mad_response ); + +void +dm_agent_recv_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void* mad_svc_context, + IN ib_mad_element_t* p_mad_request ); + +void +dm_agent_get( + IN al_iou_port_t* p_iou_port, + IN ib_mad_t* p_mad_req, + IN ib_mad_t* p_mad_rsp ); + +void +dm_agent_set( + IN al_iou_port_t* p_iou_port, + IN ib_mad_t* p_mad_req, + IN ib_mad_t* p_mad_rsp ); + +void +get_class_port_info( + IN al_iou_t* p_iou, + IN ib_dm_mad_t* p_dm_mad ); + +void +get_io_unit_info( + IN al_iou_t* p_iou, + IN ib_dm_mad_t* p_dm_mad ); + +void +get_ioc_profile( + IN al_iou_t* p_iou, + IN uint8_t slot, + IN ib_dm_mad_t* p_dm_mad ); + +void +get_svc_entries( + IN al_iou_t* p_iou, + IN uint8_t slot, + IN uint8_t svc_num_lo, + IN uint8_t svc_num_hi, + IN ib_dm_mad_t* p_dm_mad ); + + + + +ib_api_status_t +ib_create_ioc( + IN const ib_ca_handle_t h_ca, + IN const ib_ioc_profile_t* const p_ioc_profile, + OUT ib_ioc_handle_t* const ph_ioc ) +{ + ib_ioc_handle_t h_ioc; + + AL_ENTER( AL_DBG_IOC ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + if( !p_ioc_profile || !ph_ioc ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Get an IOC. */ + h_ioc = get_ioc( h_ca ); + if( !h_ioc ) + return IB_INSUFFICIENT_MEMORY; + + /* Save the IOC profile. */ + cl_memcpy( &h_ioc->ioc_profile, p_ioc_profile, sizeof(ib_ioc_profile_t) ); + + /* Clear the service entry count. */ + h_ioc->ioc_profile.num_svc_entries = 0; + + /* Return the IOC handle to the user. */ + *ph_ioc = h_ioc; + + AL_EXIT( AL_DBG_IOC ); + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_destroy_ioc( + IN const ib_ioc_handle_t h_ioc ) +{ + AL_ENTER( AL_DBG_IOC ); + + if( AL_OBJ_INVALID_HANDLE( h_ioc, AL_OBJ_TYPE_H_IOC ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + ref_al_obj( &h_ioc->obj ); + h_ioc->obj.pfn_destroy( &h_ioc->obj, NULL ); + + AL_EXIT( AL_DBG_IOC ); + return IB_SUCCESS; +} + + + +/* + * Free an IOC. + */ +void +free_ioc( + IN al_obj_t* p_obj ) +{ + ib_ioc_handle_t h_ioc; + + CL_ASSERT( p_obj ); + + h_ioc = PARENT_STRUCT( p_obj, al_ioc_t, obj ); + + /* + * To maintain slot ordering, IOCs attached to an IO unit are freed when + * the IO unit is destroyed. Otherwise, unattached IOCs may be freed now. + */ + if( h_ioc->p_iou ) + { + /* Mark the IOC slot as empty. */ + h_ioc->state = EMPTY_SLOT; + reset_al_obj( p_obj ); + deref_al_obj( &h_ioc->p_iou->obj ); + + /* Report that a change occurred on the IOC. */ + ioc_change( h_ioc ); + } + else + { + /* Unattached IOCs can be destroyed. */ + destroy_al_obj( p_obj ); + cl_free( h_ioc ); + } +} + + + +ib_api_status_t +ib_reg_ioc( + IN const ib_ioc_handle_t h_ioc ) +{ + al_iou_t* p_iou; + ib_api_status_t status; + + AL_ENTER( AL_DBG_IOC ); + + if( AL_OBJ_INVALID_HANDLE( h_ioc, AL_OBJ_TYPE_H_IOC ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + /* Get an IO unit for this IOC. */ + p_iou = get_iou( h_ioc ); + if( !p_iou ) + return IB_INSUFFICIENT_MEMORY; + + /* Register the IOC with the IO unit. */ + status = add_ioc( p_iou, h_ioc ); + + AL_EXIT( AL_DBG_IOC ); + return status; +} + + + +ib_api_status_t +ib_add_svc_entry( + IN const ib_ioc_handle_t h_ioc, + IN const ib_svc_entry_t* const p_svc_entry, + OUT ib_svc_handle_t* const ph_svc ) +{ + ib_svc_handle_t h_svc; + ib_api_status_t status; + + AL_ENTER( AL_DBG_IOC ); + + if( AL_OBJ_INVALID_HANDLE( h_ioc, AL_OBJ_TYPE_H_IOC ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + if( !p_svc_entry || !ph_svc ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* + * Synchronize the addition of a service entry with the removal. + * Cannot hold a lock on the IOC when attaching a service entry + * object. Wait here until the IOC is no longer in use. + */ + cl_spinlock_acquire( &h_ioc->obj.lock ); + while( h_ioc->in_use_cnt ) + { + cl_spinlock_release( &h_ioc->obj.lock ); + cl_thread_suspend( 0 ); + cl_spinlock_acquire( &h_ioc->obj.lock ); + } + /* Flag the IOC as in use by this thread. */ + cl_atomic_inc( &h_ioc->in_use_cnt ); + cl_spinlock_release( &h_ioc->obj.lock ); + + /* Check the current service entry count. */ + if( h_ioc->ioc_profile.num_svc_entries == MAX_NUM_SVC_ENTRIES ) + { + cl_spinlock_release( &h_ioc->obj.lock ); + AL_EXIT( AL_DBG_IOC ); + return IB_INSUFFICIENT_RESOURCES; + } + h_svc = cl_zalloc( sizeof( ib_svc_handle_t ) ); + if( !h_svc ) + { + AL_EXIT( AL_DBG_IOC ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the service entry. */ + construct_al_obj( &h_svc->obj, AL_OBJ_TYPE_H_SVC_ENTRY ); + + /* Save the service entry. */ + cl_memcpy( &h_svc->svc_entry, p_svc_entry, sizeof( ib_svc_entry_t ) ); + + /* Initialize the service entry object. */ + status = init_al_obj( &h_svc->obj, h_svc, FALSE, NULL, NULL, + free_svc_entry ); + if( status != IB_SUCCESS ) + { + free_svc_entry( &h_svc->obj ); + AL_EXIT( AL_DBG_IOC ); + return status; + } + + /* Attach the service entry to the IOC. */ + status = attach_al_obj( &h_ioc->obj, &h_svc->obj ); + if( status != IB_SUCCESS ) + { + h_svc->obj.pfn_destroy( &h_svc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + h_ioc->ioc_profile.num_svc_entries++; + + /* Indicate that a change occured on the IOC. */ + ioc_change( h_ioc ); + + /* No longer in use by this thread. */ + cl_atomic_dec( &h_ioc->in_use_cnt ); + + /* Return the service entry handle to the user. */ + *ph_svc = h_svc; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_svc->obj ); + + AL_EXIT( AL_DBG_IOC ); + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_remove_svc_entry( + IN const ib_svc_handle_t h_svc ) +{ + ib_ioc_handle_t h_ioc; + + AL_ENTER( AL_DBG_IOC ); + + if( AL_OBJ_INVALID_HANDLE( h_svc, AL_OBJ_TYPE_H_SVC_ENTRY ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + h_ioc = PARENT_STRUCT( h_svc->obj.p_parent_obj, al_ioc_t, obj ); + + /* + * Synchronize the removal of a service entry with the addition. + * Cannot hold a lock on the IOC when detaching a service entry + * object. Wait here until the IOC is no longer in use. + */ + cl_spinlock_acquire( &h_ioc->obj.lock ); + while( h_ioc->in_use_cnt ) + { + cl_spinlock_release( &h_ioc->obj.lock ); + cl_thread_suspend( 0 ); + cl_spinlock_acquire( &h_ioc->obj.lock ); + } + /* Flag the IOC as in use by this thread. */ + cl_atomic_inc( &h_ioc->in_use_cnt ); + cl_spinlock_release( &h_ioc->obj.lock ); + + /* + * Synchronously destroy the service entry. + * The service handle is invalid when this call returns. + */ + ref_al_obj( &h_svc->obj ); + h_svc->obj.pfn_destroy( &h_svc->obj, NULL ); + + /* Decrement the service entry count. */ + h_ioc->ioc_profile.num_svc_entries--; + + /* Indicate that a change occured on the IOC. */ + ioc_change( h_ioc ); + + /* No longer in use by this thread. */ + cl_atomic_dec( &h_ioc->in_use_cnt ); + + AL_EXIT( AL_DBG_IOC ); + return IB_SUCCESS; +} + + + +/* + * Free a service entry. + */ +void +free_svc_entry( + IN al_obj_t* p_obj ) +{ + ib_svc_handle_t h_svc; + + CL_ASSERT( p_obj ); + h_svc = PARENT_STRUCT( p_obj, al_svc_entry_t, obj ); + + destroy_al_obj( &h_svc->obj ); + cl_free( h_svc ); +} + + + +/* + * Acquire the IO unit matching the given CA GUID. + */ +al_iou_t* +acquire_iou( + IN const ib_net64_t ca_guid ) +{ + cl_list_item_t* p_iou_item; + al_obj_t* p_obj; + al_iou_t* p_iou; + + /* Search for an existing IO unit matching the CA GUID. */ + cl_spinlock_acquire( &gp_dm_agent->obj.lock ); + for( p_iou_item = cl_qlist_head( &gp_dm_agent->obj.obj_list ); + p_iou_item != cl_qlist_end( &gp_dm_agent->obj.obj_list ); + p_iou_item = cl_qlist_next( p_iou_item ) ) + { + p_obj = PARENT_STRUCT( p_iou_item, al_obj_t, pool_item ); + p_iou = PARENT_STRUCT( p_obj, al_iou_t, obj ); + + /* Check for a GUID match. */ + if( p_iou->obj.p_ci_ca->verbs.guid == ca_guid ) + { + /* Reference the IO unit on behalf of the client. */ + ref_al_obj( &p_iou->obj ); + + cl_spinlock_release( &gp_dm_agent->obj.lock ); + return p_iou; + } + } + cl_spinlock_release( &gp_dm_agent->obj.lock ); + + return NULL; +} + + + +/* + * Get the IO unit for the given IOC. + */ +al_iou_t* +get_iou( + IN const ib_ioc_handle_t h_ioc ) +{ + CL_ASSERT( h_ioc ); + + /* Check if the IOC is already attached to an IO unit. */ + if( h_ioc->p_iou ) + return h_ioc->p_iou; + + /* The IOC is a new slot. Acquire the IO unit. */ + return acquire_iou( h_ioc->obj.p_ci_ca->verbs.guid ); +} + + + +ib_ioc_handle_t +get_ioc( + IN const ib_ca_handle_t h_ca ) +{ + cl_list_item_t* p_ioc_item; + al_iou_t* p_iou; + ib_ioc_handle_t h_ioc; + boolean_t found; + ib_api_status_t status; + + found = FALSE; + h_ioc = NULL; + + /* Acquire the IO unit. */ + p_iou = acquire_iou( h_ca->obj.p_ci_ca->verbs.guid ); + + if( p_iou ) + { + /* Search for an empty IOC slot in the IO unit. */ + cl_spinlock_acquire( &p_iou->obj.lock ); + for( p_ioc_item = cl_qlist_head( &p_iou->ioc_list ); + (p_ioc_item != cl_qlist_end( &p_iou->ioc_list )) && !found; + p_ioc_item = cl_qlist_next( p_ioc_item ) ) + { + h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, iou_item ); + + if( h_ioc->state == EMPTY_SLOT ) + { + /* + * An empty slot was found. + * Change the state to indicate that the slot is in use. + */ + h_ioc->state = SLOT_IN_USE; + found = TRUE; + } + } + cl_spinlock_release( &p_iou->obj.lock ); + } + + /* Allocate a new IOC if one was not found. */ + if( !found ) + { + h_ioc = cl_zalloc( sizeof( al_ioc_t ) ); + if( !h_ioc ) + return NULL; + + /* Construct the IOC. */ + construct_al_obj( &h_ioc->obj, AL_OBJ_TYPE_H_IOC ); + + /* Initialize the IOC object. */ + status = + init_al_obj( &h_ioc->obj, h_ioc, FALSE, NULL, NULL, free_ioc ); + if( status != IB_SUCCESS ) + { + free_ioc( &h_ioc->obj ); + return NULL; + } + } + + /* Attach the IOC to the CA. */ + status = attach_al_obj( &h_ca->obj, &h_ioc->obj ); + if( status != IB_SUCCESS ) + { + h_ioc->obj.pfn_destroy( &h_ioc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return NULL; + } + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_ioc->obj ); + + return h_ioc; +} + + + +ib_api_status_t +add_ioc( + IN al_iou_t* p_iou, + IN ib_ioc_handle_t h_ioc ) +{ + cl_list_item_t* p_list_item; + al_obj_t* p_obj; + al_iou_port_t* p_iou_port; + ib_api_status_t status; + + CL_ASSERT( p_iou ); + CL_ASSERT( h_ioc ); + + /* Attach the IOC to the IO unit. */ + if( !h_ioc->p_iou ) + { + cl_spinlock_acquire( &p_iou->obj.lock ); + + /* Make sure the IO unit can support the new IOC slot. */ + if( cl_qlist_count( &p_iou->ioc_list ) >= + ( sizeof( ((ib_iou_info_t*)0)->controller_list ) - 1) ) + { + cl_spinlock_release( &p_iou->obj.lock ); + deref_al_obj( &p_iou->obj ); + return IB_INSUFFICIENT_RESOURCES; + } + + /* Add a new IOC slot to the IO unit. */ + cl_qlist_insert_tail( &p_iou->ioc_list, &h_ioc->iou_item ); + h_ioc->p_iou = p_iou; + + cl_spinlock_release( &p_iou->obj.lock ); + } + else + { + /* The IOC is being added to an empty IO unit slot. */ + CL_ASSERT( h_ioc->p_iou == p_iou ); + CL_ASSERT( h_ioc->state == SLOT_IN_USE ); + } + + /* Enable the IOC. */ + h_ioc->state = IOC_ACTIVE; + + /* Indicate that a change occured on the IO unit. */ + iou_change( p_iou ); + + /* Flag each port on the IO unit CA as supporting device management. */ + status = IB_SUCCESS; + cl_spinlock_acquire( &p_iou->obj.lock ); + for( p_list_item = cl_qlist_head( &p_iou->obj.obj_list ); + p_list_item != cl_qlist_end( &p_iou->obj.obj_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item ); + p_iou_port = PARENT_STRUCT( p_obj, al_iou_port_t, obj ); + + status = set_port_dm_attr( p_iou_port ); + if( status != IB_SUCCESS ) break; + } + cl_spinlock_release( &p_iou->obj.lock ); + + if( status != IB_SUCCESS ) + h_ioc->state = SLOT_IN_USE; + + return status; +} + + + +void +ioc_change( + IN ib_ioc_handle_t h_ioc ) +{ + CL_ASSERT( h_ioc ); + + /* Report a change to the IO unit which the IOC is attached. */ + if( h_ioc->p_iou ) iou_change( h_ioc->p_iou ); +} + + + +void +iou_change( + IN al_iou_t* p_iou ) +{ + CL_ASSERT( p_iou ); + + /* Increment the IO unit change counter. */ + cl_spinlock_acquire( &p_iou->obj.lock ); + p_iou->change_id++; + cl_spinlock_release( &p_iou->obj.lock ); +} + + + +ib_api_status_t +set_port_dm_attr( + IN al_iou_port_t* p_iou_port ) +{ + ib_port_attr_mod_t port_attr_mod; + ib_reg_svc_req_t reg_svc_req; + ib_api_status_t status; + + CL_ASSERT( p_iou_port ); + + /* Initialize a port attribute modification structure. */ + cl_memclr( &port_attr_mod, sizeof( ib_port_attr_mod_t ) ); + port_attr_mod.cap.dev_mgmt = TRUE; + + /* Flag each port on the IO unit CA as supporting device management. */ + status = ib_modify_ca( p_iou_port->obj.p_ci_ca->h_ca, p_iou_port->port_num, + IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, &port_attr_mod ); + + if( status != IB_SUCCESS ) + return status; + + /* The register a service with the SA if one is needed. */ + if( !p_iou_port->svc_handle ) + { + /* Build the service registration request. */ + cl_memclr( ®_svc_req, sizeof( ib_reg_svc_req_t ) ); + + reg_svc_req.svc_rec.service_lease = 0xffffffff; + strncpy( (char*)reg_svc_req.svc_rec.service_name, DM_SVC_NAME, + sizeof( reg_svc_req.svc_rec.service_name ) ); + reg_svc_req.svc_rec.service_gid = p_iou_port->port_gid; + reg_svc_req.port_guid = p_iou_port->port_guid; + + reg_svc_req.timeout_ms = SVC_REG_TIMEOUT; + reg_svc_req.retry_cnt = SVC_REG_RETRY_CNT; + reg_svc_req.svc_context = p_iou_port; + reg_svc_req.pfn_reg_svc_cb = iou_port_svc_reg_cb; + reg_svc_req.svc_data_mask = IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SNAME; + + /* Reference the IO unit port on behalf of the ib_reg_svc call. */ + ref_al_obj( &p_iou_port->obj ); + + status = ib_reg_svc( gh_al, ®_svc_req, &p_iou_port->svc_handle ); + + if( status != IB_SUCCESS ) + { + deref_al_obj( &p_iou_port->obj ); + + /* Ignore this error - the SM will sweep port attribute changes. */ + status = IB_SUCCESS; + } + } + + return status; +} + + + +void +iou_port_svc_reg_cb( + IN ib_reg_svc_rec_t* p_reg_svc_rec ) +{ + al_iou_port_t* p_iou_port; + + CL_ASSERT( p_reg_svc_rec ); + + p_iou_port = (al_iou_port_t* __ptr64)p_reg_svc_rec->svc_context; + + if( p_reg_svc_rec->req_status != IB_SUCCESS ) + deref_al_obj( &p_iou_port->obj ); +} + + +/* + * Device Management Agent + */ + + +/* + * Create the device management agent. + */ +ib_api_status_t +create_dm_agent( + IN al_obj_t* const p_parent_obj ) +{ + cl_status_t cl_status; + ib_api_status_t status; + + CL_ASSERT( p_parent_obj ); + CL_ASSERT( !gp_dm_agent ); + + gp_dm_agent = cl_zalloc( sizeof( dm_agent_t ) ); + if( !gp_dm_agent ) + return IB_INSUFFICIENT_MEMORY; + + /* Construct the device management agent. */ + construct_al_obj( &gp_dm_agent->obj, AL_OBJ_TYPE_DM ); + cl_spinlock_construct( &gp_dm_agent->lock ); + + cl_status = cl_spinlock_init( &gp_dm_agent->lock ); + if( cl_status != CL_SUCCESS ) + { + free_dm_agent( &gp_dm_agent->obj ); + return ib_convert_cl_status( cl_status ); + } + + /* Initialize the device management agent object. */ + status = init_al_obj( &gp_dm_agent->obj, gp_dm_agent, TRUE, + destroying_dm_agent, NULL, free_dm_agent ); + if( status != IB_SUCCESS ) + { + free_dm_agent( &gp_dm_agent->obj ); + return status; + } + + /* Attach the device management agent to the parent object. */ + status = attach_al_obj( p_parent_obj, &gp_dm_agent->obj ); + if( status != IB_SUCCESS ) + { + gp_dm_agent->obj.pfn_destroy( &gp_dm_agent->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Register for CA PnP events. */ + status = dm_agent_reg_pnp( IB_PNP_CA, &gp_dm_agent->h_ca_pnp ); + if (status != IB_SUCCESS) + { + gp_dm_agent->obj.pfn_destroy( &gp_dm_agent->obj, NULL ); + return status; + } + + /* Register for port PnP events. */ + status = dm_agent_reg_pnp( IB_PNP_PORT, &gp_dm_agent->h_port_pnp ); + if (status != IB_SUCCESS) + { + gp_dm_agent->obj.pfn_destroy( &gp_dm_agent->obj, NULL ); + return status; + } + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &gp_dm_agent->obj ); + + return IB_SUCCESS; +} + + + +/* + * Pre-destroy the device management agent. + */ +void +destroying_dm_agent( + IN al_obj_t* p_obj ) +{ + ib_api_status_t status; + + CL_ASSERT( p_obj ); + CL_ASSERT( gp_dm_agent == PARENT_STRUCT( p_obj, dm_agent_t, obj ) ); + UNUSED_PARAM( p_obj ); + + /* Mark that we're destroying the agent. */ + cl_spinlock_acquire( &gp_dm_agent->lock ); + gp_dm_agent->destroying = TRUE; + cl_spinlock_release( &gp_dm_agent->lock ); + + /* Deregister for port PnP events. */ + if( gp_dm_agent->h_port_pnp ) + { + status = ib_dereg_pnp( gp_dm_agent->h_port_pnp, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } + + /* Deregister for CA PnP events. */ + if( gp_dm_agent->h_ca_pnp ) + { + status = ib_dereg_pnp( gp_dm_agent->h_ca_pnp, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + + +/* + * Free the device management agent. + */ +void +free_dm_agent( + IN al_obj_t* p_obj ) +{ + CL_ASSERT( p_obj ); + CL_ASSERT( gp_dm_agent == PARENT_STRUCT( p_obj, dm_agent_t, obj ) ); + UNUSED_PARAM( p_obj ); + + destroy_al_obj( &gp_dm_agent->obj ); + cl_free( gp_dm_agent ); + gp_dm_agent = NULL; +} + + + +/* + * Register the device management agent for the given PnP class events. + */ +ib_api_status_t +dm_agent_reg_pnp( + IN ib_pnp_class_t pnp_class, + IN ib_pnp_handle_t* ph_pnp ) +{ + ib_api_status_t status; + ib_pnp_req_t pnp_req; + + CL_ASSERT( ph_pnp ); + + cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) ); + pnp_req.pnp_class = pnp_class; + pnp_req.pnp_context = gp_dm_agent; + pnp_req.pfn_pnp_cb = dm_agent_pnp_cb; + + status = ib_reg_pnp( gh_al, &pnp_req, ph_pnp ); + + /* Reference the DM agent on behalf of the ib_reg_pnp call. */ + if( status == IB_SUCCESS ) + ref_al_obj( &gp_dm_agent->obj ); + + return status; +} + + + +/* + * Device managment agent PnP event callback. + */ +ib_api_status_t +dm_agent_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ) +{ + ib_api_status_t status; + al_iou_t* p_iou; + al_iou_port_t* p_iou_port; + + CL_ASSERT( p_pnp_rec ); + CL_ASSERT( p_pnp_rec->pnp_context == gp_dm_agent ); + + /* Dispatch based on the PnP event type. */ + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_CA_ADD: + status = create_iou( p_pnp_rec ); + break; + + case IB_PNP_CA_REMOVE: + CL_ASSERT( p_pnp_rec->context ); + p_iou = p_pnp_rec->context; + ref_al_obj( &p_iou->obj ); + p_iou->obj.pfn_destroy( &p_iou->obj, NULL ); + status = IB_SUCCESS; + break; + + case IB_PNP_PORT_ADD: + CL_ASSERT( !p_pnp_rec->context ); + status = create_iou_port( (ib_pnp_port_rec_t*)p_pnp_rec ); + break; + + case IB_PNP_PORT_REMOVE: + CL_ASSERT( p_pnp_rec->context ); + p_iou_port = p_pnp_rec->context; + ref_al_obj( &p_iou_port->obj ); + p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL ); + + default: + /* All other events are ignored. */ + status = IB_SUCCESS; + break; + } + + return status; +} + + + +/* + * Create an IO unit. + */ +ib_api_status_t +create_iou( + IN ib_pnp_rec_t* p_pnp_rec ) +{ + al_iou_t* p_iou; + ib_ca_handle_t h_ca; + ib_api_status_t status; + + CL_ASSERT( p_pnp_rec ); + + p_iou = cl_zalloc( sizeof( al_iou_t ) ); + if( !p_iou ) + return IB_INSUFFICIENT_MEMORY; + + /* Construct the IO unit object. */ + construct_al_obj( &p_iou->obj, AL_OBJ_TYPE_IOU ); + + /* Initialize the IO unit object. */ + status = + init_al_obj( &p_iou->obj, p_iou, TRUE, NULL, cleanup_iou, free_iou ); + if( status != IB_SUCCESS ) + { + free_iou( &p_iou->obj ); + return status; + } + + /* + * Attach the IO unit to the device management agent. Lock and + * check to synchronize the destruction of the user-mode device + * management agent with the creation of the IO unit through a + * PnP callback. + */ + cl_spinlock_acquire( &gp_dm_agent->lock ); + if( gp_dm_agent->destroying ) + { + p_iou->obj.pfn_destroy( &p_iou->obj, NULL ); + cl_spinlock_release( &gp_dm_agent->lock ); + return IB_INVALID_STATE; + } + status = attach_al_obj( &gp_dm_agent->obj, &p_iou->obj ); + if( status != IB_SUCCESS ) + { + p_iou->obj.pfn_destroy( &p_iou->obj, NULL ); + cl_spinlock_release( &gp_dm_agent->lock ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + cl_spinlock_release( &gp_dm_agent->lock ); + + /* It is now safe to acquire the CA and initialize the p_ci_ca pointer. */ + h_ca = acquire_ca( p_pnp_rec->guid ); + if( !h_ca ) + { + p_iou->obj.pfn_destroy( &p_iou->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("acquire_ca for GUID %016I64x failed.\n", p_pnp_rec->guid) ); + return IB_INVALID_CA_HANDLE; + } + + p_iou->obj.p_ci_ca = h_ca->obj.p_ci_ca; + + /* Initialize the IO unit IOC list. */ + cl_qlist_init( &p_iou->ioc_list ); + + /* Set the context of the PnP event to this child object. */ + p_pnp_rec->context = p_iou; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_iou->obj ); + + return IB_SUCCESS; +} + + + +/* + * Cleanup an IO unit. + */ +void +cleanup_iou( + IN al_obj_t* p_obj ) +{ + al_iou_t* p_iou; + cl_list_item_t* p_ioc_item; + ib_ioc_handle_t h_ioc; + + CL_ASSERT( p_obj ); + p_iou = PARENT_STRUCT( p_obj, al_iou_t, obj ); + + /* No need to lock during cleanup. */ + for( p_ioc_item = cl_qlist_remove_head( &p_iou->ioc_list ); + p_ioc_item != cl_qlist_end( &p_iou->ioc_list ); + p_ioc_item = cl_qlist_remove_head( &p_iou->ioc_list ) ) + { + h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, obj ); + + CL_ASSERT( h_ioc->state == EMPTY_SLOT ); + + /* Detach the IOC from the IO unit. */ + CL_ASSERT( h_ioc->p_iou == p_iou ); + h_ioc->p_iou = NULL; + + /* Destroy the IOC. */ + ref_al_obj( &h_ioc->obj ); + h_ioc->obj.pfn_destroy( &h_ioc->obj, NULL ); + } +} + + + +/* + * Free an IO unit. + */ +void +free_iou( + IN al_obj_t* p_obj ) +{ + al_iou_t* p_iou; + + CL_ASSERT( p_obj ); + + p_iou = PARENT_STRUCT( p_obj, al_iou_t, obj ); + + /* Dereference the CA. */ + if( p_iou->obj.p_ci_ca ) + deref_al_obj( &p_iou->obj.p_ci_ca->h_ca->obj ); + + destroy_al_obj( &p_iou->obj ); + cl_free( p_iou ); +} + + + +/* + * Create an IO unit port. + */ +ib_api_status_t +create_iou_port( + IN ib_pnp_port_rec_t* p_pnp_rec ) +{ + al_iou_port_t* p_iou_port; + al_iou_t* p_iou; + ib_qp_create_t qp_create; + ib_mad_svc_t mad_svc; + ib_api_status_t status; + + CL_ASSERT( p_pnp_rec ); + + CL_ASSERT( p_pnp_rec->p_ca_attr ); + CL_ASSERT( p_pnp_rec->p_port_attr ); + + p_iou_port = cl_zalloc( sizeof( al_iou_port_t ) ); + if( !p_iou_port ) + return IB_INSUFFICIENT_MEMORY; + + /* Construct the IO unit port object. */ + construct_al_obj( &p_iou_port->obj, AL_OBJ_TYPE_IOU ); + + /* Initialize the IO unit port object. */ + status = init_al_obj( &p_iou_port->obj, p_iou_port, TRUE, + destroying_iou_port, NULL, free_iou_port ); + if( status != IB_SUCCESS ) + { + free_iou_port( &p_iou_port->obj ); + return status; + } + + /* Acquire the IO unit. */ + p_iou = acquire_iou( p_pnp_rec->p_ca_attr->ca_guid ); + if( !p_iou ) + { + p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL ); + return IB_INVALID_GUID; + } + + /* Attach the IO unit port to the IO unit. */ + status = attach_al_obj( &p_iou->obj, &p_iou_port->obj ); + if( status != IB_SUCCESS ) + { + p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + deref_al_obj( &p_iou->obj ); + + /* Save the port number. */ + p_iou_port->port_num = p_pnp_rec->p_port_attr->port_num; + + /* Save the port GUID - used in svc reg. */ + p_iou_port->port_guid = p_pnp_rec->pnp_rec.guid; + + /* Save the default port gid and pkey */ + p_iou_port->port_gid = p_pnp_rec->p_port_attr->p_gid_table[0]; + p_iou_port->port_pkey = p_pnp_rec->p_port_attr->p_pkey_table[0]; + + /* Create a QP alias. */ + cl_memclr( &qp_create, sizeof( ib_qp_create_t ) ); + qp_create.qp_type = IB_QPT_QP1_ALIAS; + qp_create.sq_depth = 1; + qp_create.sq_sge = 1; + qp_create.sq_signaled = TRUE; + + status = ib_get_spl_qp( p_iou_port->obj.p_ci_ca->h_pd_alias, + p_pnp_rec->p_port_attr->port_guid, &qp_create, + p_iou_port, iou_port_event_cb, &p_iou_port->pool_key, + &p_iou_port->h_qp_alias ); + + if (status != IB_SUCCESS) + { + p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL ); + return status; + } + + /* Reference the IO unit port on behalf of ib_get_spl_qp. */ + ref_al_obj( &p_iou_port->obj ); + + /* Register a service the MAD service for device management. */ + cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) ); + mad_svc.mad_svc_context = p_iou_port; + mad_svc.pfn_mad_send_cb = dm_agent_send_cb; + mad_svc.pfn_mad_recv_cb = dm_agent_recv_cb; + mad_svc.support_unsol = TRUE; + mad_svc.mgmt_class = IB_MCLASS_DEV_MGMT; + mad_svc.mgmt_version = 1; + mad_svc.method_array[ IB_MAD_METHOD_GET ] = TRUE; + mad_svc.method_array[ IB_MAD_METHOD_SET ] = TRUE; + + status = ib_reg_mad_svc( p_iou_port->h_qp_alias, &mad_svc, + &p_iou_port->h_mad_svc ); + if( status != IB_SUCCESS ) + { + p_iou_port->obj.pfn_destroy( &p_iou_port->obj, NULL ); + return status; + } + + /* Determine if any IOCs are attached to this IO unit. */ + cl_spinlock_acquire( &p_iou->obj.lock ); + if( !cl_is_qlist_empty( &p_iou->ioc_list ) ) + { + /* Set the device management port attribute. */ + status = set_port_dm_attr( p_iou_port ); + CL_ASSERT( status == IB_SUCCESS ); + } + cl_spinlock_release( &p_iou->obj.lock ); + + /* Set the context of the PnP event to this child object. */ + p_pnp_rec->pnp_rec.context = p_iou_port; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_iou_port->obj ); + + return IB_SUCCESS; +} + + + +/* + * Pre-destroy an IO unit port. + */ +void +destroying_iou_port( + IN al_obj_t* p_obj ) +{ + al_iou_port_t* p_iou_port; + ib_api_status_t status; + + CL_ASSERT( p_obj ); + p_iou_port = PARENT_STRUCT( p_obj, al_iou_port_t, obj ); + + /* Deregister the device management service. */ + if( p_iou_port->svc_handle ) + { + status = ib_dereg_svc( p_iou_port->svc_handle, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } + + /* Destroy the QP alias. */ + if( p_iou_port->h_qp_alias ) + { + status = ib_destroy_qp( p_iou_port->h_qp_alias, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + + +/* + * Free an IO unit port. + */ +void +free_iou_port( + IN al_obj_t* p_obj ) +{ + al_iou_port_t* p_iou_port; + + CL_ASSERT( p_obj ); + + p_iou_port = PARENT_STRUCT( p_obj, al_iou_port_t, obj ); + + destroy_al_obj( &p_iou_port->obj ); + cl_free( p_iou_port ); +} + + + +/* + * IO unit port asynchronous event callback. + */ +void +iou_port_event_cb( + IN ib_async_event_rec_t *p_event_rec ) +{ + UNUSED_PARAM( p_event_rec ); + + /* The QP is an alias, so if we've received an error, it is unusable. */ +} + + + +/* + * Device management agent send completion callback. + */ +void +dm_agent_send_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void* mad_svc_context, + IN ib_mad_element_t* p_mad_response ) +{ + ib_api_status_t status; + + CL_ASSERT( mad_svc_context ); + CL_ASSERT( p_mad_response ); + UNUSED_PARAM( h_mad_svc ); + UNUSED_PARAM( mad_svc_context ); + + /* Return the MAD. */ + status = ib_destroy_av( p_mad_response->h_av ); + CL_ASSERT( status == IB_SUCCESS ); + status = ib_put_mad( p_mad_response ); + CL_ASSERT( status == IB_SUCCESS ); +} + + + +/* + * Device management agent receive completion callback. + */ +void +dm_agent_recv_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void* mad_svc_context, + IN ib_mad_element_t* p_mad_request ) +{ + al_iou_port_t* p_iou_port; + ib_mad_element_t* p_mad_response; + ib_mad_t* p_mad_req; + ib_mad_t* p_mad_rsp; + ib_av_attr_t av_attr; + ib_api_status_t status; + + CL_ASSERT( mad_svc_context ); + CL_ASSERT( p_mad_request ); + + p_iou_port = mad_svc_context; + p_mad_req = ib_get_mad_buf( p_mad_request ); + + /* Get a MAD element for the response. */ + status = ib_get_mad( p_iou_port->pool_key, MAD_BLOCK_SIZE, + &p_mad_response ); + + if( status != IB_SUCCESS ) + { + status = ib_put_mad( p_mad_request ); + CL_ASSERT( status == IB_SUCCESS ); + return; + } + + /* Initialize the response MAD element. */ + p_mad_response->remote_qp = p_mad_request->remote_qp; + p_mad_response->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY; + p_mad_rsp = ib_get_mad_buf( p_mad_response ); + + /* Create an address vector for the response. */ + cl_memclr( &av_attr, sizeof( ib_av_attr_t ) ); + av_attr.port_num = p_iou_port->port_num; + av_attr.sl = p_mad_request->remote_sl; + av_attr.dlid = p_mad_request->remote_lid; + av_attr.path_bits = p_mad_request->path_bits; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + if( p_mad_request->grh_valid ) + { + av_attr.grh_valid = TRUE; + av_attr.grh = *p_mad_request->p_grh; + } + + status = ib_create_av( p_iou_port->obj.p_ci_ca->h_pd_alias, &av_attr, + &p_mad_response->h_av ); + + if( status != IB_SUCCESS ) + { + status = ib_put_mad( p_mad_request ); + CL_ASSERT( status == IB_SUCCESS ); + status = ib_put_mad( p_mad_response ); + CL_ASSERT( status == IB_SUCCESS ); + return; + } + + /* Initialize the response header. */ + ib_mad_init_response( p_mad_req, p_mad_rsp, 0 ); + + /* Process the MAD request. */ + switch( p_mad_req->method ) + { + case IB_MAD_METHOD_GET: + dm_agent_get( p_iou_port, p_mad_req, p_mad_rsp ); + break; + + case IB_MAD_METHOD_SET: + dm_agent_set( p_iou_port, p_mad_req, p_mad_rsp ); + break; + + default: + p_mad_rsp->status = IB_MAD_STATUS_UNSUP_METHOD; + break; + } + + /* Return the request to the pool. */ + status = ib_put_mad( p_mad_request ); + CL_ASSERT( status == IB_SUCCESS ); + + /* Send the response. */ + status = ib_send_mad( h_mad_svc, p_mad_response, NULL ); + + if( status != IB_SUCCESS ) + { + status = ib_destroy_av( p_mad_response->h_av ); + CL_ASSERT( status == IB_SUCCESS ); + status = ib_put_mad( p_mad_response ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + + +/* + * Device management agent get method MAD. + */ +void +dm_agent_get( + IN al_iou_port_t* p_iou_port, + IN ib_mad_t* p_mad_req, + IN ib_mad_t* p_mad_rsp ) +{ + al_iou_t* p_iou; + ib_dm_mad_t* p_dm_mad; + + CL_ASSERT( p_iou_port ); + CL_ASSERT( p_mad_req ); + CL_ASSERT( p_mad_rsp ); + + p_iou = PARENT_STRUCT( p_iou_port->obj.p_parent_obj, al_iou_t, obj ); + + p_dm_mad = (ib_dm_mad_t*)p_mad_rsp; + + switch( p_mad_req->attr_id ) + { + case IB_MAD_ATTR_CLASS_PORT_INFO: + get_class_port_info( p_iou, p_dm_mad ); + break; + + case IB_MAD_ATTR_IO_UNIT_INFO: + get_io_unit_info( p_iou, p_dm_mad ); + break; + + case IB_MAD_ATTR_IO_CONTROLLER_PROFILE: + { + uint8_t slot; + + slot = (uint8_t)CL_NTOH32( p_dm_mad->hdr.attr_mod ); + + get_ioc_profile( p_iou, slot, p_dm_mad ); + break; + } + + case IB_MAD_ATTR_SERVICE_ENTRIES: + { + uint8_t slot; + uint8_t svc_num_hi; + uint8_t svc_num_lo; + + ib_dm_get_slot_lo_hi( p_dm_mad->hdr.attr_mod, &slot, + &svc_num_hi, &svc_num_lo ); + + get_svc_entries( p_iou, slot, svc_num_lo, svc_num_hi, p_dm_mad ); + break; + } + + case IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT: + case IB_MAD_ATTR_PREPARE_TO_TEST: + case IB_MAD_ATTR_DIAG_CODE: + default: + p_mad_rsp->status = IB_MAD_STATUS_UNSUP_METHOD_ATTR; + break; + } +} + + + +/* + * Device management agent set method MAD. + */ +void +dm_agent_set( + IN al_iou_port_t* p_iou_port, + IN ib_mad_t* p_mad_req, + IN ib_mad_t* p_mad_rsp ) +{ + ib_dm_mad_t* p_dm_mad; + + CL_ASSERT( p_iou_port ); + CL_ASSERT( p_mad_req ); + CL_ASSERT( p_mad_rsp ); + UNUSED_PARAM( p_iou_port ); + + p_dm_mad = (ib_dm_mad_t*)p_mad_rsp; + + switch( p_mad_req->attr_id ) + { + case IB_MAD_ATTR_CLASS_PORT_INFO: + break; + + case IB_MAD_ATTR_PREPARE_TO_TEST: + case IB_MAD_ATTR_TEST_DEVICE_ONCE: + case IB_MAD_ATTR_TEST_DEVICE_LOOP: + default: + p_mad_rsp->status = IB_MAD_STATUS_UNSUP_METHOD_ATTR; + break; + } +} + + +void +get_class_port_info( + IN al_iou_t* p_iou, + IN ib_dm_mad_t* p_dm_mad ) +{ + ib_class_port_info_t* p_class_port_info; + + CL_ASSERT( p_iou ); + CL_ASSERT( p_dm_mad ); + UNUSED_PARAM( p_iou ); + + p_class_port_info = (ib_class_port_info_t*)&p_dm_mad->data; + + p_class_port_info->base_ver = 1; + p_class_port_info->class_ver = 1; + p_class_port_info->resp_time_val = CL_HTON32( DM_CLASS_RESP_TIME_VALUE ); +} + + + +void +get_io_unit_info( + IN al_iou_t* p_iou, + IN ib_dm_mad_t* p_dm_mad ) +{ + ib_iou_info_t* p_iou_info; + cl_list_item_t* p_ioc_item; + ib_ioc_handle_t h_ioc; + uint8_t slot; + + CL_ASSERT( p_iou ); + CL_ASSERT( p_dm_mad ); + + p_iou_info = (ib_iou_info_t*)&p_dm_mad->data; + + cl_spinlock_acquire( &p_iou->obj.lock ); + + p_iou_info->change_id = p_iou->change_id; + + /* Mark all slots as non-existant. */ + SET_NIBBLE( &slot, 0, SLOT_DOES_NOT_EXIST ); + SET_NIBBLE( &slot, 1, SLOT_DOES_NOT_EXIST ); + cl_memset( p_iou_info->controller_list, slot, sizeof( p_iou->ioc_list ) ); + + /* Now mark the existing slots. */ + slot = 1; + for( p_ioc_item = cl_qlist_head( &p_iou->ioc_list ); + p_ioc_item != cl_qlist_end( &p_iou->ioc_list ); + p_ioc_item = cl_qlist_next( p_ioc_item ) ) + { + h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, iou_item ); + + switch( h_ioc->state ) + { + case EMPTY_SLOT: + case SLOT_IN_USE: + SET_NIBBLE( p_iou_info->controller_list, slot, IOC_NOT_INSTALLED ); + break; + + case IOC_ACTIVE: + SET_NIBBLE( p_iou_info->controller_list, slot, IOC_INSTALLED ); + break; + + default: + break; + } + slot++; + } + + p_iou_info->max_controllers = slot; + + cl_spinlock_release( &p_iou->obj.lock ); +} + + + +void +get_ioc_profile( + IN al_iou_t* p_iou, + IN uint8_t slot, + IN ib_dm_mad_t* p_dm_mad ) +{ + ib_ioc_profile_t* p_ioc_profile; + cl_list_item_t* p_ioc_item; + ib_ioc_handle_t h_ioc; + + CL_ASSERT( p_iou ); + CL_ASSERT( p_dm_mad ); + + p_ioc_profile = (ib_ioc_profile_t*)&p_dm_mad->data; + + cl_spinlock_acquire( &p_iou->obj.lock ); + + /* Verify that the slot number is within range. */ + if( ( slot == 0 ) || + ( slot > cl_qlist_count( &p_iou->ioc_list ) ) ) + { + cl_spinlock_release( &p_iou->obj.lock ); + p_dm_mad->hdr.status = IB_MAD_STATUS_INVALID_FIELD; + return; + } + + /* The remaining code assumes the slot number starts at zero. */ + for( p_ioc_item = cl_qlist_head( &p_iou->ioc_list ); + p_ioc_item != cl_qlist_end( &p_iou->ioc_list ) && slot; + p_ioc_item = cl_qlist_next( p_ioc_item ) ) + { + slot--; + } + + h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, iou_item ); + + cl_spinlock_acquire( &h_ioc->obj.lock ); + + /* Verify the IOC state. */ + if( h_ioc->state != IOC_ACTIVE ) + { + cl_spinlock_release( &h_ioc->obj.lock ); + cl_spinlock_release( &p_iou->obj.lock ); + p_dm_mad->hdr.status = IB_DM_MAD_STATUS_NO_IOC_RESP; + return; + } + + /* Copy the IOC profile. */ + *p_ioc_profile = h_ioc->ioc_profile; + + cl_spinlock_release( &h_ioc->obj.lock ); + cl_spinlock_release( &p_iou->obj.lock ); +} + + + +void +get_svc_entries( + IN al_iou_t* p_iou, + IN uint8_t slot, + IN uint8_t svc_num_lo, + IN uint8_t svc_num_hi, + IN ib_dm_mad_t* p_dm_mad ) +{ + ib_svc_entries_t* p_svc_entries; + cl_list_item_t* p_ioc_item; + cl_list_item_t* p_list_item; + ib_ioc_handle_t h_ioc; + al_obj_t* p_obj; + al_svc_entry_t* p_svc_entry; + uint8_t i, j, k; + + CL_ASSERT( p_iou ); + CL_ASSERT( p_dm_mad ); + + p_svc_entries = (ib_svc_entries_t*)&p_dm_mad->data; + + cl_spinlock_acquire( &p_iou->obj.lock ); + + /* + * Verify that the slot number is within range and + * a maximum of SVC_ENTRY_COUNT entries is requested. + */ + if( ( slot == 0 ) || + ( slot > cl_qlist_count( &p_iou->ioc_list ) ) || + ( ( svc_num_hi - svc_num_lo + 1) > SVC_ENTRY_COUNT ) ) + { + cl_spinlock_release( &p_iou->obj.lock ); + p_dm_mad->hdr.status = IB_MAD_STATUS_INVALID_FIELD; + return; + } + + /* The remaining code assumes the slot number starts at zero. */ + for( p_ioc_item = cl_qlist_head( &p_iou->ioc_list ); + p_ioc_item != cl_qlist_end( &p_iou->ioc_list ) && slot; + p_ioc_item = cl_qlist_next( p_ioc_item ) ) + { + slot--; + } + + h_ioc = PARENT_STRUCT( p_ioc_item, al_ioc_t, iou_item ); + + cl_spinlock_acquire( &h_ioc->obj.lock ); + + /* Verify the IOC state. */ + if( h_ioc->state != IOC_ACTIVE ) + { + cl_spinlock_release( &h_ioc->obj.lock ); + cl_spinlock_release( &p_iou->obj.lock ); + p_dm_mad->hdr.status = IB_DM_MAD_STATUS_NO_IOC_RESP; + return; + } + + /* Verify the service entry range. */ + if( ( svc_num_lo > h_ioc->ioc_profile.num_svc_entries ) || + ( svc_num_hi >= h_ioc->ioc_profile.num_svc_entries ) ) + { + cl_spinlock_release( &h_ioc->obj.lock ); + cl_spinlock_release( &p_iou->obj.lock ); + p_dm_mad->hdr.status = IB_MAD_STATUS_INVALID_FIELD; + return; + } + + for( i = svc_num_lo, j = 0; j < ( svc_num_hi - svc_num_lo + 1 ); i++, j++ ) + { + k = i; + + /* Locate the service entry. Traverse until k=0. */ + for( p_list_item = cl_qlist_head( &h_ioc->obj.obj_list ); + k && ( p_list_item != cl_qlist_end( &h_ioc->obj.obj_list ) ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + k--; + } + + if( p_list_item == cl_qlist_end( &h_ioc->obj.obj_list ) ) + { + /* The service entry list was empty or the end was reached. */ + cl_spinlock_release( &h_ioc->obj.lock ); + cl_spinlock_release( &p_iou->obj.lock ); + p_dm_mad->hdr.status = IB_DM_MAD_STATUS_NO_SVC_ENTRIES; + return; + } + + p_obj = PARENT_STRUCT( p_list_item, al_obj_t, obj_list ); + p_svc_entry = PARENT_STRUCT( p_obj, al_svc_entry_t, obj ); + + /* Copy the service entry. */ + p_svc_entries->service_entry[ j ] = p_svc_entry->svc_entry; + } + + cl_spinlock_release( &h_ioc->obj.lock ); + cl_spinlock_release( &p_iou->obj.lock ); +} diff --git a/branches/Ndi/core/al/al_dm.h b/branches/Ndi/core/al/al_dm.h new file mode 100644 index 00000000..f220dd87 --- /dev/null +++ b/branches/Ndi/core/al/al_dm.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_DM_H__) +#define __AL_DM_H__ + +#include +#include "al_common.h" + + +typedef struct _dm_agent /* Global device management agent struct */ +{ + al_obj_t obj; /* Child of al_mgr_t */ + ib_pnp_handle_t h_ca_pnp; /* Handle for CA PnP events */ + ib_pnp_handle_t h_port_pnp; /* Handle for Port PnP events */ + + /* + * Lock and state to synchronize user-mode device management + * agent destruction with PnP callbacks to create IO units. + */ + cl_spinlock_t lock; + boolean_t destroying; + +} dm_agent_t; + + +typedef struct _al_iou /* IO unit struct - max of one per CA */ +{ + al_obj_t obj; /* Child of dm_agent_t */ + + uint16_t change_id; + cl_qlist_t ioc_list; /* List of IOCs */ + +} al_iou_t; + + +typedef struct _al_iou_port /* Per-port object of an IO unit */ +{ + al_obj_t obj; /* Child of al_iou_t */ + + uint8_t port_num; + net64_t port_guid; + ib_gid_t port_gid; + ib_net16_t port_pkey; + ib_qp_handle_t h_qp_alias; + ib_mad_svc_handle_t h_mad_svc; + + ib_pool_key_t pool_key; + + ib_reg_svc_handle_t svc_handle; /* Service registration handle */ + +} al_iou_port_t; + + +typedef enum _ioc_state /* An IOC represents a slot in an IO unit */ +{ + IOC_INIT = 0, + EMPTY_SLOT, + SLOT_IN_USE, + IOC_ACTIVE + +} ioc_state_t; + + +#pragma warning(disable:4324) +typedef struct _al_ioc +{ + al_obj_t obj; /* Child of ib_ca_t */ + + cl_list_item_t iou_item; /* Item on IO Unit list */ + al_iou_t* p_iou; + + ioc_state_t state; + ib_ioc_profile_t ioc_profile; + + atomic32_t in_use_cnt; + +} al_ioc_t; +#pragma warning(default:4324) + + +typedef struct _al_svc_entry +{ + al_obj_t obj; /* Child of al_ioc_t */ + ib_svc_entry_t svc_entry; + +} al_svc_entry_t; + + +ib_api_status_t +create_dm_agent( + IN al_obj_t* const p_parent_obj ); + + +#endif /* __AL_DM_H__ */ diff --git a/branches/Ndi/core/al/al_init.c b/branches/Ndi/core/al/al_init.c new file mode 100644 index 00000000..2b2aca5e --- /dev/null +++ b/branches/Ndi/core/al/al_init.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_init.tmh" +#endif + +#include "al_dev.h" +#include "al_init.h" +#include "al_mgr.h" + +#include "ib_common.h" + + + +uint32_t g_al_dbg_level = TRACE_LEVEL_ERROR; +uint32_t g_al_dbg_flags = 0xf0; +/* + * Device driver initialization routine. + */ +ib_api_status_t +al_initialize( void ) +{ + cl_status_t cl_status; + ib_api_status_t status = IB_ERROR; + + AL_ENTER( AL_DBG_DEV ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_DEV, ("Hello World! =)\n") ); + + /* + * Initialize access layer services. + */ +#if AL_OBJ_PRIVATE_ASYNC_PROC + gp_async_proc_mgr = cl_malloc( sizeof(cl_async_proc_t) * 3 ); +#else + gp_async_proc_mgr = cl_malloc( sizeof(cl_async_proc_t) * 2 ); +#endif + if( !gp_async_proc_mgr ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("alloc_async_proc failed.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + gp_async_pnp_mgr = gp_async_proc_mgr + 1; + cl_async_proc_construct( gp_async_proc_mgr ); + cl_async_proc_construct( gp_async_pnp_mgr ); +#if AL_OBJ_PRIVATE_ASYNC_PROC + gp_async_obj_mgr = gp_async_proc_mgr + 2; + cl_async_proc_construct( gp_async_obj_mgr ); + cl_status = cl_async_proc_init( gp_async_obj_mgr, 1, "AL_OBJ" ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to init async_obj_mgr: status = 0x%x.\n", cl_status) ); + return ib_convert_cl_status( cl_status ); + } +#endif + cl_status = cl_async_proc_init( gp_async_proc_mgr, 1, "Althread" ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to init async_proc_mgr: status = 0x%x.\n", cl_status) ); + return ib_convert_cl_status( cl_status ); + } + + cl_status = cl_async_proc_init( gp_async_pnp_mgr, 1, "AL_PNP" ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to init async_pnp_mgr: status = 0x%x.\n", cl_status) ); + return ib_convert_cl_status( cl_status ); + } + + status = create_al_mgr(); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_mgr: status = 0x%x.\n", status) ); + return status; + } + + AL_EXIT( AL_DBG_DEV ); + return status; +} + + + +/* + * Device driver cleanup routine. + */ +void +al_cleanup( void ) +{ + AL_ENTER( AL_DBG_DEV ); + + /* + * Destroy access layer device interface. + */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_DEV, ("Destroying %s device.\n", + (const char *)AL_DEVICE_NAME) ); + + /* + * Destroy access layer services. + */ + if( gp_al_mgr ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_DEV, ("Destroying AL Mgr.\n") ); + ref_al_obj( &gp_al_mgr->obj ); + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + } + +#if AL_OBJ_PRIVATE_ASYNC_PROC + if( gp_async_obj_mgr ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_DEV, + ("Destroying async obj mgr.\n") ); + cl_async_proc_destroy( gp_async_obj_mgr ); + gp_async_obj_mgr = NULL; + } +#endif + + if( gp_async_pnp_mgr ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_DEV, + ("Destroying async pnp mgr.\n") ); + cl_async_proc_destroy( gp_async_pnp_mgr ); + gp_async_pnp_mgr = NULL; + } + + if( gp_async_proc_mgr ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_DEV, + ("Destroying async proc mgr.\n") ); + cl_async_proc_destroy( gp_async_proc_mgr ); + cl_free( gp_async_proc_mgr ); + gp_async_proc_mgr = NULL; + } + + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_DEV, ("Goodbye Cruel World =(\n") ); +} diff --git a/branches/Ndi/core/al/al_init.h b/branches/Ndi/core/al/al_init.h new file mode 100644 index 00000000..4f100474 --- /dev/null +++ b/branches/Ndi/core/al/al_init.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined(_IB_AL_INIT_H_) +#define _IB_AL_INIT_H_ + +#include + + +/****i* AL/al_initialize +* NAME +* This function initializes the Access Layer. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +al_initialize( void ); +/* +* DESCRIPTION +* This function performs a global initialization of the +* Access Layer (AL) +* +* PARAMETERS +* None +* +* RETURN VALUE +* Status - +* TBD +* +* PORTABILITY +* Kernel mode only. +* +* SEE ALSO +* al_cleanup +*********/ + + +/****i* AL/al_cleanup +* NAME +* This function cleans up resources allocated during al_initialize. +* +* SYNOPSIS +*/ +AL_EXPORT void AL_API +al_cleanup( void ); +/* +* DESCRIPTION +* This function frees up resources used by the access layer. +* +* PARAMETERS +* None +* +* RETURN VALUE +* +* PORTABILITY +* Kernel mode only. +* +* SEE ALSO +* al_initialize +*********/ + +#endif /* _IB_AL_INIT_H_ */ diff --git a/branches/Ndi/core/al/al_ioc_pnp.h b/branches/Ndi/core/al/al_ioc_pnp.h new file mode 100644 index 00000000..5758ff46 --- /dev/null +++ b/branches/Ndi/core/al/al_ioc_pnp.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if !defined(__IB_AL_IOC_PNP_H__) +#define __IB_AL_IOC_PNP_H__ + + +#include "al_common.h" + + +ib_api_status_t +create_ioc_pnp( + IN al_obj_t* const p_parent_obj ); + +void +ioc_pnp_process_reg( + IN cl_async_proc_item_t *p_item ); + +void +ioc_pnp_process_dereg( + IN cl_async_proc_item_t *p_item ); + +#endif /* __IB_AL_IOC_PNP_H__ */ diff --git a/branches/Ndi/core/al/al_mad.c b/branches/Ndi/core/al/al_mad.c new file mode 100644 index 00000000..bd8d473e --- /dev/null +++ b/branches/Ndi/core/al/al_mad.c @@ -0,0 +1,3276 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include + +#include "al.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_mad.tmh" +#endif + +#include "al_cq.h" +#include "al_mad.h" +#include "al_qp.h" +#include "al_res_mgr.h" +#include "al_verbs.h" + +#include "ib_common.h" + + +#define MAX_TIME CL_CONST64(0xFFFFFFFFFFFFFFFF) +#define MAD_VECTOR_SIZE 8 +#define MAX_METHOD 127 +#define DEFAULT_RMPP_VERSION 1 + +#define AL_RMPP_WINDOW 16 /* Max size of RMPP window */ +#define AL_REASSEMBLY_TIMEOUT 5000 /* 5 seconds */ + +static void +__cleanup_mad_disp( + IN al_obj_t *p_obj ); + +static void +__free_mad_disp( + IN al_obj_t *p_obj ); + +static cl_status_t +__init_mad_reg( + IN void* const p_element, + IN void* context ); + +static cl_status_t +__init_version_entry( + IN void* const p_element, + IN void* context ); + +static void +__destroy_version_entry( + IN void* const p_element, + IN void* context ); + +static cl_status_t +__init_class_entry( + IN void* const p_element, + IN void* context ); + +static void +__destroy_class_entry( + IN void* const p_element, + IN void* context ); + +static __inline uint8_t +__mgmt_class_index( + IN const uint8_t mgmt_class ); + +static __inline uint8_t +__mgmt_version_index( + IN const uint8_t mgmt_version ); + +static boolean_t +__mad_disp_reg_unsol( + IN const al_mad_disp_handle_t h_mad_disp, + IN const al_mad_reg_handle_t h_mad_reg, + IN const ib_mad_svc_t *p_mad_svc ); + +static boolean_t +__use_tid_routing( + IN const ib_mad_t* const p_mad_hdr, + IN const boolean_t are_we_sender ); + +/* + * Issue a send request to the MAD dispatcher. + */ +static void +__mad_disp_queue_send( + IN const al_mad_reg_handle_t h_mad_reg, + IN al_mad_wr_t* const p_mad_wr ); + +static inline void +__mad_disp_resume_send( + IN const al_mad_reg_handle_t h_mad_reg ); + +static void +__destroying_mad_svc( + IN struct _al_obj *p_obj ); + +static void +__cleanup_mad_svc( + IN struct _al_obj *p_obj ); + +static void +__send_timer_cb( + IN void *context ); + +static void +__check_send_queue( + IN ib_mad_svc_handle_t h_mad_svc ); + +static void +__recv_timer_cb( + IN void *context ); + +static ib_api_status_t +__init_send_mad( + IN ib_mad_svc_handle_t h_mad_svc, + IN const ib_mad_send_handle_t h_send, + IN ib_mad_element_t* const p_mad_element ); + +static boolean_t +__does_send_req_rmpp( + IN const ib_mad_svc_type_t mad_svc_type, + IN const ib_mad_element_t* const p_mad_element, + OUT uint8_t *p_rmpp_version ); + +static void +__queue_mad_wr( + IN const al_mad_reg_handle_t h_mad_reg, + IN const ib_mad_send_handle_t h_send ); + +static void +__queue_rmpp_seg( + IN const al_mad_reg_handle_t h_mad_reg, + IN ib_mad_send_handle_t h_send ); + +static ib_api_status_t +__create_send_av( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_send_handle_t h_send ); + +static void +__cleanup_mad_send( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_send_handle_t h_send ); + +static __inline void +__set_retry_time( + IN ib_mad_send_handle_t h_send ); + +static void +__mad_svc_send_done( + IN ib_mad_svc_handle_t h_mad_svc, + IN al_mad_wr_t *p_mad_wr, + IN ib_wc_t *p_wc ); + +static boolean_t +__is_send_mad_done( + IN ib_mad_send_handle_t h_send, + IN ib_wc_t *p_wc ); + +static void +__notify_send_comp( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_send_handle_t h_send, + IN ib_wc_status_t wc_status ); + +static void +__mad_svc_recv_done( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ); + +static void +__process_recv_resp( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ); + +static cl_status_t +__do_rmpp_recv( + IN ib_mad_svc_handle_t h_mad_svc, + IN OUT ib_mad_element_t **pp_mad_element ); + +static __inline boolean_t +__recv_requires_rmpp( + IN const ib_mad_svc_type_t mad_svc_type, + IN const ib_mad_element_t* const p_mad_element ); + +static __inline boolean_t +__is_internal_send( + IN const ib_mad_svc_type_t mad_svc_type, + IN const ib_mad_element_t* const p_mad_element ); + +static cl_status_t +__process_rmpp_data( + IN ib_mad_svc_handle_t h_mad_svc, + IN OUT ib_mad_element_t **pp_mad_element ); + +static void +__process_rmpp_ack( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ); + +static void +__process_rmpp_nack( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ); + +static cl_status_t +__process_segment( + IN ib_mad_svc_handle_t h_mad_svc, + IN al_mad_rmpp_t *p_rmpp, + IN OUT ib_mad_element_t **pp_mad_element, + OUT ib_mad_element_t **pp_rmpp_resp_mad ); + +static al_mad_rmpp_t* +__find_rmpp( + IN ib_mad_svc_handle_t h_mad_svc, + IN OUT ib_mad_element_t *p_mad_element ); + +static al_mad_rmpp_t* +__get_mad_rmpp( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ); + +static void +__put_mad_rmpp( + IN ib_mad_svc_handle_t h_mad_svc, + IN al_mad_rmpp_t *p_rmpp ); + +static void +__init_reply_element( + IN ib_mad_element_t *p_dst_element, + IN ib_mad_element_t *p_src_element ); + +static ib_mad_element_t* +__get_rmpp_ack( + IN al_mad_rmpp_t *p_rmpp ); + +ib_net64_t +__get_send_tid( + IN ib_mad_send_handle_t h_send ) +{ + return ((ib_mad_t*)ib_get_mad_buf( h_send->p_send_mad ))->trans_id; +} + + +ib_mad_t* +get_mad_hdr_from_wr( + IN al_mad_wr_t* const p_mad_wr ) +{ + ib_mad_send_handle_t h_send; + + CL_ASSERT( p_mad_wr ); + + h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr ); + return h_send->p_send_mad->p_mad_buf; +} + + + +/* + * Construct a MAD element from a receive work completion. + */ +void +build_mad_recv( + IN ib_mad_element_t* p_mad_element, + IN ib_wc_t* p_wc ) +{ + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_mad_element ); + CL_ASSERT( p_wc ); + + /* Build the MAD element from the work completion. */ + p_mad_element->status = p_wc->status; + p_mad_element->remote_qp = p_wc->recv.ud.remote_qp; + + /* + * We assume all communicating managers using MAD services use + * the same QKEY. + */ + + /* + * Mellanox workaround: + * The Q_KEY from the QP context must be used if the high bit is + * set in the Q_KEY part of the work request. See section 10.2.5 + * on Q_KEYS Compliance Statement C10-15. + * This must be enabled to permit future non special QP's to have + * MAD level service capability. To use SAR in a generic way. + */ + + /* + * p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY; + */ + + p_mad_element->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY; + p_mad_element->remote_lid = p_wc->recv.ud.remote_lid; + p_mad_element->remote_sl = p_wc->recv.ud.remote_sl; + p_mad_element->pkey_index = p_wc->recv.ud.pkey_index; + p_mad_element->path_bits = p_wc->recv.ud.path_bits; + p_mad_element->recv_opt = p_wc->recv.ud.recv_opt; + + p_mad_element->grh_valid = p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID; + + if( p_wc->recv.ud.recv_opt & IB_RECV_OPT_IMMEDIATE ) + p_mad_element->immediate_data = p_wc->recv.ud.immediate_data; + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * + * MAD Dispatcher. + * + */ + + +ib_api_status_t +create_mad_disp( + IN al_obj_t* const p_parent_obj, + IN const ib_qp_handle_t h_qp, + IN al_mad_disp_handle_t* const ph_mad_disp ) +{ + al_mad_disp_handle_t h_mad_disp; + ib_api_status_t status; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_MAD_SVC ); + h_mad_disp = cl_zalloc( sizeof( al_mad_disp_t ) ); + if( !h_mad_disp ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("insufficient memory\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Initialize the MAD dispatcher. */ + cl_vector_construct( &h_mad_disp->client_vector ); + cl_vector_construct( &h_mad_disp->version_vector ); + construct_al_obj( &h_mad_disp->obj, AL_OBJ_TYPE_MAD_DISP ); + status = init_al_obj( &h_mad_disp->obj, h_mad_disp, TRUE, + NULL, __cleanup_mad_disp, __free_mad_disp ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("init obj: %s\n", + ib_get_err_str(status)) ); + __free_mad_disp( &h_mad_disp->obj ); + return status; + } + status = attach_al_obj( p_parent_obj, &h_mad_disp->obj ); + if( status != IB_SUCCESS ) + { + h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Obtain a reference to the QP to post sends to. */ + h_mad_disp->h_qp = h_qp; + ref_al_obj( &h_qp->obj ); + + /* Create the client vector. */ + cl_status = cl_vector_init( &h_mad_disp->client_vector, 1, MAD_VECTOR_SIZE, + sizeof( al_mad_disp_reg_t ), __init_mad_reg, NULL, h_mad_disp ); + if( cl_status != CL_SUCCESS ) + { + h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL ); + return ib_convert_cl_status( cl_status ); + } + + /* Create the version vector. */ + cl_status = cl_vector_init( &h_mad_disp->version_vector, + 1, 1, sizeof( cl_vector_t ), __init_version_entry, + __destroy_version_entry, &h_mad_disp->version_vector ); + if( cl_status != CL_SUCCESS ) + { + h_mad_disp->obj.pfn_destroy( &h_mad_disp->obj, NULL ); + return ib_convert_cl_status( cl_status ); + } + + *ph_mad_disp = h_mad_disp; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_mad_disp->obj ); + + AL_EXIT( AL_DBG_MAD_SVC ); + return IB_SUCCESS; +} + + + +static void +__cleanup_mad_disp( + IN al_obj_t *p_obj ) +{ + al_mad_disp_handle_t h_mad_disp; + + AL_ENTER( AL_DBG_MAD_SVC ); + CL_ASSERT( p_obj ); + h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj ); + + /* Detach from the QP that we were using. */ + if( h_mad_disp->h_qp ) + deref_al_obj( &h_mad_disp->h_qp->obj ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +static void +__free_mad_disp( + IN al_obj_t *p_obj ) +{ + al_mad_disp_handle_t h_mad_disp; + + AL_ENTER( AL_DBG_MAD_SVC ); + CL_ASSERT( p_obj ); + h_mad_disp = PARENT_STRUCT( p_obj, al_mad_disp_t, obj ); + + cl_vector_destroy( &h_mad_disp->client_vector ); + cl_vector_destroy( &h_mad_disp->version_vector ); + destroy_al_obj( p_obj ); + cl_free( h_mad_disp ); + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +static al_mad_reg_handle_t +__mad_disp_reg( + IN const al_mad_disp_handle_t h_mad_disp, + IN const ib_mad_svc_handle_t h_mad_svc, + IN const ib_mad_svc_t *p_mad_svc, + IN const pfn_mad_svc_send_done_t pfn_send_done, + IN const pfn_mad_svc_recv_done_t pfn_recv_done ) +{ + al_mad_reg_handle_t h_mad_reg; + size_t i; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_MAD_SVC ); + cl_spinlock_acquire( &h_mad_disp->obj.lock ); + + /* Find an empty slot in the client vector for the registration. */ + for( i = 0; i < cl_vector_get_size( &h_mad_disp->client_vector ); i++ ) + { + h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i ); + if( !h_mad_reg->ref_cnt ) + break; + } + /* Trap for ClientID overflow. */ + if( i >= 0xFFFFFFFF ) + { + cl_spinlock_release( &h_mad_disp->obj.lock ); + return NULL; + } + cl_status = cl_vector_set_min_size( &h_mad_disp->client_vector, i+1 ); + if( cl_status != CL_SUCCESS ) + { + cl_spinlock_release( &h_mad_disp->obj.lock ); + return NULL; + } + h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, i ); + + /* Record the registration. */ + h_mad_reg->client_id = (uint32_t)i; + h_mad_reg->support_unsol = p_mad_svc->support_unsol; + h_mad_reg->mgmt_class = p_mad_svc->mgmt_class; + h_mad_reg->mgmt_version = p_mad_svc->mgmt_version; + h_mad_reg->pfn_recv_done = pfn_recv_done; + h_mad_reg->pfn_send_done = pfn_send_done; + + /* If the client requires support for unsolicited MADs, add tracking. */ + if( p_mad_svc->support_unsol ) + { + if( !__mad_disp_reg_unsol( h_mad_disp, h_mad_reg, p_mad_svc ) ) + { + cl_spinlock_release( &h_mad_disp->obj.lock ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("reg unsol failed\n") ); + return NULL; + } + } + + /* Record that the registration was successful. */ + h_mad_reg->h_mad_svc = h_mad_svc; + h_mad_reg->ref_cnt = 1; + cl_spinlock_release( &h_mad_disp->obj.lock ); + + /* The MAD service needs to take a reference on the dispatcher. */ + ref_al_obj( &h_mad_disp->obj ); + + AL_EXIT( AL_DBG_MAD_SVC ); + return h_mad_reg; +} + + +static cl_status_t +__init_mad_reg( + IN void* const p_element, + IN void* context ) +{ + al_mad_reg_handle_t h_mad_reg; + + /* Record the MAD dispatcher for the registration structure. */ + h_mad_reg = p_element; + h_mad_reg->h_mad_disp = context; + h_mad_reg->ref_cnt = 0; + + return CL_SUCCESS; +} + + +/* + * Initialize an entry in the version vector. Each entry is a vector of + * classes. + */ +static cl_status_t +__init_version_entry( + IN void* const p_element, + IN void* context ) +{ + cl_vector_t *p_vector; + + p_vector = p_element; + UNUSED_PARAM( context ); + + cl_vector_construct( p_vector ); + return cl_vector_init( p_vector, MAD_VECTOR_SIZE, MAD_VECTOR_SIZE, + sizeof( cl_ptr_vector_t ), __init_class_entry, __destroy_class_entry, + p_vector ); +} + + +static void +__destroy_version_entry( + IN void* const p_element, + IN void* context ) +{ + cl_vector_t *p_vector; + + p_vector = p_element; + UNUSED_PARAM( context ); + + cl_vector_destroy( p_vector ); +} + + +/* + * Initialize an entry in the class vector. Each entry is a pointer vector + * of methods. + */ +static cl_status_t +__init_class_entry( + IN void* const p_element, + IN void* context ) +{ + cl_ptr_vector_t *p_ptr_vector; + + p_ptr_vector = p_element; + UNUSED_PARAM( context ); + + cl_ptr_vector_construct( p_ptr_vector ); + return cl_ptr_vector_init( p_ptr_vector, + MAD_VECTOR_SIZE, MAD_VECTOR_SIZE ); +} + + +static void +__destroy_class_entry( + IN void* const p_element, + IN void* context ) +{ + cl_ptr_vector_t *p_ptr_vector; + + p_ptr_vector = p_element; + UNUSED_PARAM( context ); + + cl_ptr_vector_destroy( p_ptr_vector ); +} + + +/* + * Add support for unsolicited MADs for the given MAD service. + */ +static boolean_t +__mad_disp_reg_unsol( + IN const al_mad_disp_handle_t h_mad_disp, + IN const al_mad_reg_handle_t h_mad_reg, + IN const ib_mad_svc_t *p_mad_svc ) +{ + cl_status_t cl_status; + cl_vector_t *p_class_vector; + cl_ptr_vector_t *p_method_ptr_vector; + uint8_t i; + + /* Ensure that we are ready to handle this version number. */ + AL_ENTER( AL_DBG_MAD_SVC ); + cl_status = cl_vector_set_min_size( &h_mad_disp->version_vector, + __mgmt_version_index( p_mad_svc->mgmt_version ) + 1 ); + if( cl_status != CL_SUCCESS ) + return FALSE; + + /* Get the list of classes in use for this version. */ + p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector, + __mgmt_version_index( p_mad_svc->mgmt_version ) ); + + /* Ensure that we are ready to handle the specified class. */ + cl_status = cl_vector_set_min_size( p_class_vector, + __mgmt_class_index( p_mad_svc->mgmt_class ) + 1 ); + if( cl_status != CL_SUCCESS ) + return FALSE; + + /* Get the list of methods in use for this class. */ + p_method_ptr_vector = cl_vector_get_ptr( p_class_vector, + __mgmt_class_index( p_mad_svc->mgmt_class ) ); + + /* Ensure that we can handle all requested methods. */ + for( i = MAX_METHOD - 1; i > 0; i-- ) + { + if( p_mad_svc->method_array[i] ) + { + cl_status = cl_ptr_vector_set_min_size( p_method_ptr_vector, i+1 ); + if( cl_status != CL_SUCCESS ) + return FALSE; + + /* No one else can be registered for this method. */ + if( cl_ptr_vector_get( p_method_ptr_vector, i ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Other client already registered for Un-Solicited Method " + "%u for version %u of class %u.\n", i, p_mad_svc->mgmt_version, + p_mad_svc->mgmt_class ) ); + return FALSE; + } + } + } + + /* We can support the request. Record the methods. */ + for( i = 0; i < MAX_METHOD; i++ ) + { + if( p_mad_svc->method_array[i] ) + { + cl_ptr_vector_set( p_method_ptr_vector, i, h_mad_reg ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("Register version:%u (%u) class:0x%02X(%u) method:0x%02X Hdl:%016I64x\n", + p_mad_svc->mgmt_version, + __mgmt_version_index( p_mad_svc->mgmt_version ), + p_mad_svc->mgmt_class, + __mgmt_class_index( p_mad_svc->mgmt_class ), + i, + (LONG_PTR)h_mad_reg) ); + } + } + + AL_EXIT( AL_DBG_MAD_SVC ); + return TRUE; +} + + +static __inline uint8_t +__mgmt_version_index( + IN const uint8_t mgmt_version ) +{ + return (uint8_t)(mgmt_version - 1); +} + + +static __inline uint8_t +__mgmt_class_index( + IN const uint8_t mgmt_class ) +{ + /* Map class 0x81 to 0 to remove empty class values. */ + if( mgmt_class == IB_MCLASS_SUBN_DIR ) + return IB_MCLASS_SUBN_LID; + else + return mgmt_class; +} + + + +/* + * Deregister a MAD service from the dispatcher. + */ +static void +__mad_disp_dereg( + IN const al_mad_reg_handle_t h_mad_reg ) +{ + al_mad_disp_handle_t h_mad_disp; + cl_vector_t *p_class_vector; + cl_ptr_vector_t *p_method_ptr_vector; + size_t i; + + AL_ENTER( AL_DBG_MAD_SVC ); + h_mad_disp = h_mad_reg->h_mad_disp; + + cl_spinlock_acquire( &h_mad_disp->obj.lock ); + + if( h_mad_reg->support_unsol ) + { + /* Deregister the service from receiving unsolicited MADs. */ + p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector, + __mgmt_version_index( h_mad_reg->mgmt_version ) ); + + p_method_ptr_vector = cl_vector_get_ptr( p_class_vector, + __mgmt_class_index( h_mad_reg->mgmt_class ) ); + + /* Deregister all methods registered to the client. */ + for( i = 0; i < cl_ptr_vector_get_size( p_method_ptr_vector ); i++ ) + { + if( cl_ptr_vector_get( p_method_ptr_vector, i ) == h_mad_reg ) + { + cl_ptr_vector_set( p_method_ptr_vector, i, NULL ); + } + } + } + + cl_spinlock_release( &h_mad_disp->obj.lock ); + + /* Decrement the reference count in the registration table. */ + cl_atomic_dec( &h_mad_reg->ref_cnt ); + + /* The MAD service no longer requires access to the MAD dispatcher. */ + deref_al_obj( &h_mad_disp->obj ); + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +static void +__mad_disp_queue_send( + IN const al_mad_reg_handle_t h_mad_reg, + IN al_mad_wr_t* const p_mad_wr ) +{ + ib_mad_t *p_mad_hdr; + + /* + * Increment the reference count on the registration to ensure that + * the MAD service does not go away until the send completes. + */ + AL_ENTER( AL_DBG_MAD_SVC ); + cl_atomic_inc( &h_mad_reg->ref_cnt ); + ref_al_obj( &h_mad_reg->h_mad_svc->obj ); + + /* Get the MAD header. */ + p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr ); + CL_ASSERT( !p_mad_wr->send_wr.wr_id ); + p_mad_wr->send_wr.wr_id = (uintn_t)p_mad_wr; + + /* + * If we are the originator of the transaction, we need to modify the + * TID to ensure that duplicate TIDs are not used by multiple clients. + */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("dispatching TID: 0x%I64x\n", + p_mad_hdr->trans_id) ); + p_mad_wr->client_tid = p_mad_hdr->trans_id; + if( __use_tid_routing( p_mad_hdr, TRUE ) ) + { + /* Clear the AL portion of the TID before setting. */ + ((al_tid_t*)&p_mad_hdr->trans_id)->tid32.al_tid = 0; + +#pragma warning( push, 3 ) + al_set_al_tid( &p_mad_hdr->trans_id, h_mad_reg->client_id ); +#pragma warning( pop ) + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("modified TID to: 0x%0I64x\n", p_mad_hdr->trans_id) ); + } + + /* Post the work request to the QP. */ + p_mad_wr->client_id = h_mad_reg->client_id; + h_mad_reg->h_mad_disp->h_qp->pfn_queue_mad( + h_mad_reg->h_mad_disp->h_qp, p_mad_wr ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + +static inline void +__mad_disp_resume_send( + IN const al_mad_reg_handle_t h_mad_reg ) +{ + AL_ENTER( AL_DBG_MAD_SVC ); + + h_mad_reg->h_mad_disp->h_qp->pfn_resume_mad( + h_mad_reg->h_mad_disp->h_qp ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + +/* + * Complete a sent MAD. Route the completion to the correct MAD service. + */ +void +mad_disp_send_done( + IN al_mad_disp_handle_t h_mad_disp, + IN al_mad_wr_t *p_mad_wr, + IN ib_wc_t *p_wc ) +{ + al_mad_reg_handle_t h_mad_reg; + ib_mad_t *p_mad_hdr; + + AL_ENTER( AL_DBG_MAD_SVC ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("p_mad_wr 0x%016I64x\n", (LONG_PTR)p_mad_wr ) ); + + /* Get the MAD header. */ + p_mad_hdr = get_mad_hdr_from_wr( p_mad_wr ); + + /* Get the MAD service that issued the send. */ + cl_spinlock_acquire( &h_mad_disp->obj.lock ); + h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, + p_mad_wr->client_id ); + cl_spinlock_release( &h_mad_disp->obj.lock ); + CL_ASSERT( h_mad_reg && (h_mad_reg->client_id == p_mad_wr->client_id) ); + + /* Reset the TID and WR ID. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("send done TID: 0x%I64x\n", + p_mad_hdr->trans_id) ); + p_mad_hdr->trans_id = p_mad_wr->client_tid; + p_mad_wr->send_wr.wr_id = 0; + + /* Return the completed request to the MAD service. */ + CL_ASSERT( h_mad_reg->h_mad_svc ); + h_mad_reg->pfn_send_done( h_mad_reg->h_mad_svc, p_mad_wr, p_wc ); + + /* The MAD service is no longer referenced once the send completes. */ + deref_al_obj( &h_mad_reg->h_mad_svc->obj ); + cl_atomic_dec( &h_mad_reg->ref_cnt ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +/* + * Process a received MAD. Route the completion to the correct MAD service. + */ +ib_api_status_t +mad_disp_recv_done( + IN al_mad_disp_handle_t h_mad_disp, + IN ib_mad_element_t *p_mad_element ) +{ + ib_mad_t *p_mad_hdr; + al_mad_reg_handle_t h_mad_reg; + ib_al_handle_t h_al; + ib_mad_svc_handle_t h_mad_svc; + + cl_vector_t *p_class_vector; + cl_ptr_vector_t *p_method_ptr_vector; + uint8_t method; + + AL_ENTER( AL_DBG_MAD_SVC ); + p_mad_hdr = ib_get_mad_buf( p_mad_element ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("TID = 0x%I64x\n" + "class = 0x%x.\n" + "version = 0x%x.\n" + "method = 0x%x.\n", + p_mad_hdr->trans_id, + p_mad_hdr->mgmt_class, + p_mad_hdr->class_ver, + p_mad_hdr->method) ); + + /* Get the client to route the receive to. */ + cl_spinlock_acquire( &h_mad_disp->obj.lock ); + if( __use_tid_routing( p_mad_hdr, FALSE ) ) + { + /* The MAD was received in response to a send. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("routing based on TID\n")); + + /* Verify that we have a registration entry. */ + if( al_get_al_tid( p_mad_hdr->trans_id ) >= + cl_vector_get_size( &h_mad_disp->client_vector ) ) + { + /* No clients for this version-class-method. */ + cl_spinlock_release( &h_mad_disp->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("invalid client ID\n") ); + return IB_NOT_FOUND; + } + + h_mad_reg = cl_vector_get_ptr( &h_mad_disp->client_vector, + al_get_al_tid( p_mad_hdr->trans_id ) ); + +/* + * Disable warning about passing unaligned 64-bit value. + * The value is always aligned given how buffers are allocated + * and given the layout of a MAD. + */ +#pragma warning( push, 3 ) + al_set_al_tid( &p_mad_hdr->trans_id, 0 ); +#pragma warning( pop ) + } + else + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("routing based on version, class, method\n")); + + /* The receive is unsolicited. Find the client. */ + if( __mgmt_version_index( p_mad_hdr->class_ver ) >= + cl_vector_get_size( &h_mad_disp->version_vector ) ) + { + /* No clients for this version of MADs. */ + cl_spinlock_release( &h_mad_disp->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("no clients registered for this class version\n") ); + return IB_NOT_FOUND; + } + + /* See if we have a client for this class of MADs. */ + p_class_vector = cl_vector_get_ptr( &h_mad_disp->version_vector, + __mgmt_version_index( p_mad_hdr->class_ver ) ); + + if( __mgmt_class_index( p_mad_hdr->mgmt_class ) >= + cl_vector_get_size( p_class_vector ) ) + { + /* No clients for this version-class. */ + cl_spinlock_release( &h_mad_disp->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("no clients registered for this class\n") ); + return IB_NOT_FOUND; + } + + /* See if we have a client for this method. */ + p_method_ptr_vector = cl_vector_get_ptr( p_class_vector, + __mgmt_class_index( p_mad_hdr->mgmt_class ) ); + method = (uint8_t)(p_mad_hdr->method & (~IB_MAD_METHOD_RESP_MASK)); + + if( method >= cl_ptr_vector_get_size( p_method_ptr_vector ) ) + { + /* No clients for this version-class-method. */ + cl_spinlock_release( &h_mad_disp->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("no clients registered for this method-out of range\n") ); + return IB_NOT_FOUND; + } + + h_mad_reg = cl_ptr_vector_get( p_method_ptr_vector, method ); + if( !h_mad_reg ) + { + /* No clients for this version-class-method. */ + cl_spinlock_release( &h_mad_disp->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("no clients registered for method %u of class %u(%u) version %u(%u)\n", + method, + p_mad_hdr->mgmt_class, + __mgmt_class_index( p_mad_hdr->mgmt_class ), + p_mad_hdr->class_ver, + __mgmt_version_index( p_mad_hdr->class_ver ) + ) ); + return IB_NOT_FOUND; + } + } + + /* Verify that the registration is still valid. */ + if( !h_mad_reg->ref_cnt ) + { + cl_spinlock_release( &h_mad_disp->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("no client registered\n") ); + return IB_NOT_FOUND; + } + + /* Take a reference on the MAD service in case it deregisters. */ + h_mad_svc = h_mad_reg->h_mad_svc; + ref_al_obj( &h_mad_svc->obj ); + cl_spinlock_release( &h_mad_disp->obj.lock ); + + /* Handoff the MAD to the correct AL instance. */ + h_al = qp_get_al( (ib_qp_handle_t)(h_mad_svc->obj.p_parent_obj) ); + al_handoff_mad( h_al, p_mad_element ); + + h_mad_reg->pfn_recv_done( h_mad_svc, p_mad_element ); + deref_al_obj( &h_mad_svc->obj ); + AL_EXIT( AL_DBG_MAD_SVC ); + return IB_SUCCESS; +} + + + +/* + * Return TRUE if we should route the MAD to the recipient based on the TID. + */ +static boolean_t +__use_tid_routing( + IN const ib_mad_t* const p_mad_hdr, + IN const boolean_t are_we_sender ) +{ + ib_rmpp_mad_t *p_rmpp_mad; + boolean_t is_orig; + + AL_ENTER( AL_DBG_MAD_SVC ); + + /* CM MADs are never TID routed. */ + if( p_mad_hdr->mgmt_class == IB_MCLASS_COMM_MGMT ) + { + AL_EXIT( AL_DBG_MAD_SVC ); + return FALSE; + } + + /* + * Determine originator for a sent MAD. Received MADs are just the + * opposite. + */ + + /* Non-DATA RMPP MADs are handled differently. */ + p_rmpp_mad = (ib_rmpp_mad_t*)p_mad_hdr; + if( (p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_ADM) && + ( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) && + (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) ) + { + /* + * We need to distinguish between ACKs sent after receiving + * a request, versus ACKs sent after receiving a response. ACKs + * to a request are from the responder. ACKs to a response are + * from the originator. + + * Note that we assume STOP and ABORT packets are initiated by + * receivers. If both senders and receivers can + * initiate STOP and ABORT MADs, then we can't distinguish which + * transaction is associated with the MAD. The TID for a + * send and receive can be the same. + */ + is_orig = !ib_mad_is_response( p_mad_hdr ); + } + else + { + /* + * See if the MAD is being sent in response to a previous MAD. If + * it is, then we're NOT the originator. Note that trap repress + * MADs are responses, even though the response bit isn't set. + */ + is_orig = !( ib_mad_is_response( p_mad_hdr ) || + (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) ); + } + + /* If we're the receiver, toggle the result. */ + if( !are_we_sender ) + is_orig = !is_orig; + + AL_EXIT( AL_DBG_MAD_SVC ); + return is_orig; +} + + + +/* + * + * MAD Service. + * + */ + + + +/* + * Create and initialize a MAD service for use. + */ +ib_api_status_t +reg_mad_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_mad_svc_t* const p_mad_svc, + OUT ib_mad_svc_handle_t* const ph_mad_svc ) +{ + ib_api_status_t status; + cl_status_t cl_status; + ib_mad_svc_handle_t h_mad_svc; + al_qp_alias_t *p_qp_alias; + ib_qp_attr_t qp_attr; + + AL_ENTER( AL_DBG_MAD_SVC ); + CL_ASSERT( h_qp ); + + switch( h_qp->type ) + { + case IB_QPT_QP0: + case IB_QPT_QP1: + case IB_QPT_QP0_ALIAS: + case IB_QPT_QP1_ALIAS: + case IB_QPT_MAD: + break; + + default: + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + if( !p_mad_svc || !ph_mad_svc ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + h_mad_svc = cl_zalloc( sizeof( al_mad_svc_t) ); + if( !h_mad_svc ) + { + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the MAD service. */ + construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC ); + cl_timer_construct( &h_mad_svc->send_timer ); + cl_timer_construct( &h_mad_svc->recv_timer ); + cl_qlist_init( &h_mad_svc->send_list ); + cl_qlist_init( &h_mad_svc->recv_list ); + + p_qp_alias = PARENT_STRUCT( h_qp, al_qp_alias_t, qp ); + h_mad_svc->svc_type = p_mad_svc->svc_type; + h_mad_svc->obj.context = p_mad_svc->mad_svc_context; + h_mad_svc->pfn_user_recv_cb = p_mad_svc->pfn_mad_recv_cb; + h_mad_svc->pfn_user_send_cb = p_mad_svc->pfn_mad_send_cb; + + /* Initialize the MAD service. */ + status = init_al_obj( &h_mad_svc->obj, p_mad_svc->mad_svc_context, + TRUE, __destroying_mad_svc, __cleanup_mad_svc, free_mad_svc ); + if( status != IB_SUCCESS ) + { + free_mad_svc( &h_mad_svc->obj ); + return status; + } + status = attach_al_obj( &h_qp->obj, &h_mad_svc->obj ); + if( status != IB_SUCCESS ) + { + h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + h_mad_svc->h_mad_reg = __mad_disp_reg( p_qp_alias->h_mad_disp, + h_mad_svc, p_mad_svc, __mad_svc_send_done, __mad_svc_recv_done ); + if( !h_mad_svc->h_mad_reg ) + { + h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Record which port this MAD service uses, to use when creating AVs. */ + status = ib_query_qp( h_qp, &qp_attr ); + if( status != IB_SUCCESS ) + { + h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL ); + return status; + } + h_mad_svc->h_pd = qp_attr.h_pd; + h_mad_svc->port_num = qp_attr.primary_port; + + cl_status = cl_timer_init( &h_mad_svc->send_timer, + __send_timer_cb, h_mad_svc ); + if( cl_status != CL_SUCCESS ) + { + h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL ); + return ib_convert_cl_status( cl_status ); + } + + cl_status = cl_timer_init( &h_mad_svc->recv_timer, + __recv_timer_cb, h_mad_svc ); + if( cl_status != CL_SUCCESS ) + { + h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL ); + return ib_convert_cl_status( cl_status ); + } + + *ph_mad_svc = h_mad_svc; + + AL_EXIT( AL_DBG_MAD_SVC ); + return IB_SUCCESS; +} + + + +static void +__destroying_mad_svc( + IN struct _al_obj *p_obj ) +{ + ib_qp_handle_t h_qp; + ib_mad_svc_handle_t h_mad_svc; + ib_mad_send_handle_t h_send; + cl_list_item_t *p_list_item; + int32_t timeout_ms; +#ifdef CL_KERNEL + KIRQL old_irql; +#endif + + AL_ENTER( AL_DBG_MAD_SVC ); + CL_ASSERT( p_obj ); + h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj ); + + /* Deregister the MAD service. */ + h_qp = (ib_qp_handle_t)p_obj->p_parent_obj; + if( h_qp->pfn_dereg_mad_svc ) + h_qp->pfn_dereg_mad_svc( h_mad_svc ); + + /* Wait here until the MAD service is no longer in use. */ + timeout_ms = (int32_t)h_mad_svc->obj.timeout_ms; + while( h_mad_svc->ref_cnt && timeout_ms > 0 ) + { + /* Use a timeout to avoid waiting forever - just in case. */ + cl_thread_suspend( 10 ); + timeout_ms -= 10; + } + + /* + * Deregister from the MAD dispatcher. The MAD dispatcher holds + * a reference on the MAD service when invoking callbacks. Since we + * issue sends, we know how many callbacks are expected for send + * completions. With receive completions, we need to wait until all + * receive callbacks have completed before cleaning up receives. + */ + if( h_mad_svc->h_mad_reg ) + __mad_disp_dereg( h_mad_svc->h_mad_reg ); + + /* Cancel all outstanding send requests. */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + for( p_list_item = cl_qlist_head( &h_mad_svc->send_list ); + p_list_item != cl_qlist_end( &h_mad_svc->send_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("canceling MAD\n") ); + h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item ); + h_send->canceled = TRUE; + } + cl_spinlock_release( &h_mad_svc->obj.lock ); + + /* + * Invoke the timer callback to return the canceled MADs to the user. + * Since the MAD service is being destroyed, the user cannot be issuing + * sends. + */ + if( h_mad_svc->h_mad_reg ) + { +#ifdef CL_KERNEL + old_irql = KeRaiseIrqlToDpcLevel(); +#endif + __check_send_queue( h_mad_svc ); +#ifdef CL_KERNEL + KeLowerIrql( old_irql ); +#endif + } + + cl_timer_destroy( &h_mad_svc->send_timer ); + +#ifdef CL_KERNEL + /* + * Reclaim any pending receives sent to the proxy for UAL. + */ + if( h_mad_svc->obj.h_al->p_context ) + { + cl_qlist_t *p_cblist; + al_proxy_cb_info_t *p_cb_info; + + cl_spinlock_acquire( &h_mad_svc->obj.h_al->p_context->cb_lock ); + p_cblist = &h_mad_svc->obj.h_al->p_context->misc_cb_list; + p_list_item = cl_qlist_head( p_cblist ); + while( p_list_item != cl_qlist_end( p_cblist ) ) + { + p_cb_info = (al_proxy_cb_info_t*)p_list_item; + p_list_item = cl_qlist_next( p_list_item ); + + if( p_cb_info->p_al_obj && p_cb_info->p_al_obj == &h_mad_svc->obj ) + { + cl_qlist_remove_item( p_cblist, &p_cb_info->pool_item.list_item ); + deref_al_obj( p_cb_info->p_al_obj ); + proxy_cb_put( p_cb_info ); + } + } + cl_spinlock_release( &h_mad_svc->obj.h_al->p_context->cb_lock ); + } +#endif + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +static void +__cleanup_mad_svc( + IN struct _al_obj *p_obj ) +{ + ib_mad_svc_handle_t h_mad_svc; + al_mad_rmpp_t *p_rmpp; + cl_list_item_t *p_list_item; + + CL_ASSERT( p_obj ); + h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj ); + + /* + * There are no more callbacks from the MAD dispatcher that are active. + * Cleanup any receives that may still be lying around. Stop the receive + * timer to avoid synchronizing with it. + */ + cl_timer_destroy( &h_mad_svc->recv_timer ); + for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list ); + p_list_item != cl_qlist_end( &h_mad_svc->recv_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item ); + p_rmpp->inactive = TRUE; + } + __recv_timer_cb( h_mad_svc ); + + CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->send_list ) ); + CL_ASSERT( cl_is_qlist_empty( &h_mad_svc->recv_list ) ); +} + + + +void +free_mad_svc( + IN al_obj_t *p_obj ) +{ + ib_mad_svc_handle_t h_mad_svc; + + CL_ASSERT( p_obj ); + h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj ); + + destroy_al_obj( p_obj ); + cl_free( h_mad_svc ); +} + + + +ib_api_status_t +ib_send_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element_list, + OUT ib_mad_element_t **pp_mad_failure OPTIONAL ) +{ + ib_api_status_t status = IB_SUCCESS; +#ifdef CL_KERNEL + ib_mad_send_handle_t h_send; + ib_mad_element_t *p_cur_mad, *p_next_mad; +#endif + + AL_ENTER( AL_DBG_MAD_SVC ); + + if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + if( !p_mad_element_list || + ( p_mad_element_list->p_next && !pp_mad_failure ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + +#ifndef CL_KERNEL + /* This is a send from user mode using special QP alias */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("ib_send_mad: ual_context non-zero, TID = 0x%I64x.\n", + ((ib_mad_t*)(ib_get_mad_buf( p_mad_element_list )))->trans_id )); + status = spl_qp_mad_send( h_mad_svc, p_mad_element_list, + pp_mad_failure ); + AL_EXIT( AL_DBG_MAD_SVC ); + return status; +#else + /* Post each send on the list. */ + p_cur_mad = p_mad_element_list; + while( p_cur_mad ) + { + p_next_mad = p_cur_mad->p_next; + + /* Get an element to track the send. */ + h_send = get_mad_send( PARENT_STRUCT( p_cur_mad, + al_mad_element_t, element ) ); + if( !h_send ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("unable to get mad_send\n") ); + if( pp_mad_failure ) + *pp_mad_failure = p_cur_mad; + return IB_INSUFFICIENT_RESOURCES; + } + + /* Initialize the MAD for sending. */ + status = __init_send_mad( h_mad_svc, h_send, p_cur_mad ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("init_send_mad failed: %s\n", + ib_get_err_str(status)) ); + put_mad_send( h_send ); + if( pp_mad_failure ) + *pp_mad_failure = p_cur_mad; + return status; + } + + /* Add the MADs to our list. */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + cl_qlist_insert_tail( &h_mad_svc->send_list, + (cl_list_item_t*)&h_send->pool_item ); + + /* Post the MAD to the dispatcher, and check for failures. */ + ref_al_obj( &h_mad_svc->obj ); + p_cur_mad->p_next = NULL; + if( h_send->uses_rmpp ) + __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send ); + else + __queue_mad_wr( h_mad_svc->h_mad_reg, h_send ); + cl_spinlock_release( &h_mad_svc->obj.lock ); + + p_cur_mad = p_next_mad; + } + + /* + * Resume any sends that can now be sent without holding + * the mad service lock. + */ + __mad_disp_resume_send( h_mad_svc->h_mad_reg ); + + AL_EXIT( AL_DBG_MAD_SVC ); + return status; +#endif +} + + + +static ib_api_status_t +__init_send_mad( + IN ib_mad_svc_handle_t h_mad_svc, + IN const ib_mad_send_handle_t h_send, + IN ib_mad_element_t* const p_mad_element ) +{ + ib_rmpp_mad_t *p_rmpp_hdr; + uint8_t rmpp_version; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD_SVC ); + + /* Initialize tracking the send. */ + h_send->p_send_mad = p_mad_element; + h_send->retry_time = MAX_TIME; + h_send->retry_cnt = p_mad_element->retry_cnt; + + /* See if the send uses RMPP. */ + h_send->uses_rmpp = __does_send_req_rmpp( h_mad_svc->svc_type, + p_mad_element, &rmpp_version ); + if( h_send->uses_rmpp ) + { + /* The RMPP header is present. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("RMPP is activated\n") ); + p_rmpp_hdr = (ib_rmpp_mad_t*)p_mad_element->p_mad_buf; + + /* We only support version 1. */ + if( rmpp_version != DEFAULT_RMPP_VERSION ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("unsupported version\n") ); + return IB_INVALID_SETTING; + } + + p_rmpp_hdr->rmpp_version = rmpp_version; + p_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_DATA; + ib_rmpp_set_resp_time( p_rmpp_hdr, IB_RMPP_NO_RESP_TIME ); + p_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS; + /* + * The segment number, flags, and payload size are set when + * sending, so that they are set correctly when issuing retries. + */ + + h_send->ack_seg = 0; + h_send->seg_limit = 1; + h_send->cur_seg = 1; + /* For SA RMPP MADS we need different data size and header size */ + if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM ) + { + h_send->total_seg = ( (p_mad_element->size - IB_SA_MAD_HDR_SIZE) + + (IB_SA_DATA_SIZE - 1) ) / IB_SA_DATA_SIZE; + } + else + { + h_send->total_seg = ( (p_mad_element->size - MAD_RMPP_HDR_SIZE) + + (MAD_RMPP_DATA_SIZE - 1) ) / MAD_RMPP_DATA_SIZE; + } + /*for cases that there is no data we still need 1 seg */ + h_send->total_seg = h_send->total_seg?h_send->total_seg:1; + } + + /* See if we need to create the address vector for the user. + We also create AV for local send to pass the slid and grh in case of trap generation*/ + if( !p_mad_element->h_av){ + + status = __create_send_av( h_mad_svc, h_send ); + if( status != IB_SUCCESS ) + { + return status; + } + } + + AL_EXIT( AL_DBG_MAD_SVC ); + return IB_SUCCESS; +} + + + +static ib_api_status_t +__create_send_av( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_send_handle_t h_send ) +{ + ib_av_attr_t av_attr; + ib_mad_element_t *p_mad_element; + + p_mad_element = h_send->p_send_mad; + + av_attr.port_num = h_mad_svc->port_num; + + av_attr.sl = p_mad_element->remote_sl; + av_attr.dlid = p_mad_element->remote_lid; + + av_attr.grh_valid = p_mad_element->grh_valid; + if( av_attr.grh_valid ) + av_attr.grh = *p_mad_element->p_grh; + + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + av_attr.path_bits = p_mad_element->path_bits; + + return ib_create_av( h_mad_svc->h_pd, &av_attr, &h_send->h_av ); +} + + + +static boolean_t +__does_send_req_rmpp( + IN const ib_mad_svc_type_t mad_svc_type, + IN const ib_mad_element_t* const p_mad_element, + OUT uint8_t *p_rmpp_version ) +{ + switch( mad_svc_type ) + { + case IB_MAD_SVC_DEFAULT: + case IB_MAD_SVC_RMPP: + /* Internally generated MADs do not use RMPP. */ + if( __is_internal_send( mad_svc_type, p_mad_element ) ) + return FALSE; + + /* If the MAD has the version number set, just return it. */ + if( p_mad_element->rmpp_version ) + { + *p_rmpp_version = p_mad_element->rmpp_version; + return TRUE; + } + + /* If the class is well known and uses RMPP, use the default version. */ + if( p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM ) + { + switch( p_mad_element->p_mad_buf->method ) + { + case IB_MAD_METHOD_GETTABLE_RESP: + case IB_MAD_METHOD_GETMULTI: + case IB_MAD_METHOD_GETMULTI_RESP: + *p_rmpp_version = DEFAULT_RMPP_VERSION; + return TRUE; + + default: + return FALSE; + } + } + + /* The RMPP is not active. */ + return FALSE; + + default: + return FALSE; + } +} + + + +/* + * Sends the next RMPP segment of an RMPP transfer. + */ +static void +__queue_rmpp_seg( + IN const al_mad_reg_handle_t h_mad_reg, + IN ib_mad_send_handle_t h_send ) +{ + ib_rmpp_mad_t *p_rmpp_hdr; + + AL_ENTER( AL_DBG_MAD_SVC ); + + CL_ASSERT( h_mad_reg && h_send ); + CL_ASSERT( h_send->cur_seg <= h_send->seg_limit ); + + /* Reset information to track the send. */ + h_send->retry_time = MAX_TIME; + + /* Set the RMPP header information. */ + p_rmpp_hdr = (ib_rmpp_mad_t*)h_send->p_send_mad->p_mad_buf; + p_rmpp_hdr->seg_num = cl_hton32( h_send->cur_seg ); + p_rmpp_hdr->rmpp_flags = IB_RMPP_FLAG_ACTIVE; + p_rmpp_hdr->paylen_newwin = 0; + + /* See if this is the first segment that needs to be sent. */ + if( h_send->cur_seg == 1 ) + { + p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_FIRST; + + /* + * Since the RMPP layer is the one to support SA MADs by duplicating + * the SA header. The actual Payload Length should include the + * original mad size + NumSegs * SA-extra-header. + */ + if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM ) + { + /* Add sa_ext_hdr to each segment over the first one. */ + p_rmpp_hdr->paylen_newwin = cl_hton32( + h_send->p_send_mad->size - MAD_RMPP_HDR_SIZE + + (h_send->total_seg - 1) * + (IB_SA_MAD_HDR_SIZE - MAD_RMPP_HDR_SIZE) ); + } + else + { + /* For other RMPP packets we simply use the given MAD */ + p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size - + MAD_RMPP_HDR_SIZE ); + } + } + + /* See if this is the last segment that needs to be sent. */ + if( h_send->cur_seg == h_send->total_seg ) + { + p_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_LAST; + + /* But for SA MADs we need extra header size */ + if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM ) + { + p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size - + (h_send->cur_seg -1)*IB_SA_DATA_SIZE - MAD_RMPP_HDR_SIZE ); + } + else + { + p_rmpp_hdr->paylen_newwin = cl_hton32( h_send->p_send_mad->size - + (h_send->cur_seg -1)*MAD_RMPP_DATA_SIZE ); + } + } + + /* Set the current segment to the next one. */ + h_send->cur_seg++; + + /* Send the MAD. */ + __queue_mad_wr( h_mad_reg, h_send ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +/* + * Posts a send work request to the dispatcher for a MAD send. + */ +static void +__queue_mad_wr( + IN const al_mad_reg_handle_t h_mad_reg, + IN const ib_mad_send_handle_t h_send ) +{ + ib_send_wr_t *p_send_wr; + al_mad_element_t *p_al_element; + ib_rmpp_mad_t *p_rmpp_hdr; + uint8_t *p_rmpp_src, *p_rmpp_dst; + uintn_t hdr_len, offset, max_len; + + AL_ENTER( AL_DBG_MAD_SVC ); + p_send_wr = &h_send->mad_wr.send_wr; + + cl_memclr( p_send_wr, sizeof( ib_send_wr_t ) ); + + p_send_wr->wr_type = WR_SEND; + p_send_wr->send_opt = h_send->p_send_mad->send_opt; + + p_al_element = PARENT_STRUCT( h_send->p_send_mad, + al_mad_element_t, element ); + + /* See if the MAD requires RMPP support. */ + if( h_send->uses_rmpp && p_al_element->p_al_mad_buf ) + { +#if defined( CL_KERNEL ) + p_rmpp_dst = p_al_element->mad_buf + sizeof(ib_grh_t); +#else + p_rmpp_dst = (uint8_t*)(uintn_t)p_al_element->mad_ds.vaddr; +#endif + p_rmpp_src = (uint8_t* __ptr64)h_send->p_send_mad->p_mad_buf; + p_rmpp_hdr = (ib_rmpp_mad_t*)p_rmpp_src; + + if( h_send->p_send_mad->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM ) + hdr_len = IB_SA_MAD_HDR_SIZE; + else + hdr_len = MAD_RMPP_HDR_SIZE; + + max_len = MAD_BLOCK_SIZE - hdr_len; + + offset = hdr_len + (max_len * (cl_ntoh32( p_rmpp_hdr->seg_num ) - 1)); + + /* Copy the header into the registered send buffer. */ + cl_memcpy( p_rmpp_dst, p_rmpp_src, hdr_len ); + + /* Copy this segment's payload into the registered send buffer. */ + CL_ASSERT( h_send->p_send_mad->size != offset ); + if( (h_send->p_send_mad->size - offset) < max_len ) + { + max_len = h_send->p_send_mad->size - offset; + /* Clear unused payload. */ + cl_memclr( p_rmpp_dst + hdr_len + max_len, + MAD_BLOCK_SIZE - hdr_len - max_len ); + } + + cl_memcpy( + p_rmpp_dst + hdr_len, p_rmpp_src + offset, max_len ); + } + + p_send_wr->num_ds = 1; + p_send_wr->ds_array = &p_al_element->mad_ds; + + p_send_wr->dgrm.ud.remote_qp = h_send->p_send_mad->remote_qp; + p_send_wr->dgrm.ud.remote_qkey = h_send->p_send_mad->remote_qkey; + p_send_wr->dgrm.ud.pkey_index = h_send->p_send_mad->pkey_index; + + /* See if we created the address vector on behalf of the user. */ + if( h_send->p_send_mad->h_av ) + p_send_wr->dgrm.ud.h_av = h_send->p_send_mad->h_av; + else + p_send_wr->dgrm.ud.h_av = h_send->h_av; + + __mad_disp_queue_send( h_mad_reg, &h_send->mad_wr ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +static cl_status_t +__mad_svc_find_send( + IN const cl_list_item_t* const p_list_item, + IN void* context ) +{ + ib_mad_send_handle_t h_send; + + h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item ); + + if( h_send->p_send_mad == context ) + return CL_SUCCESS; + else + return CL_NOT_FOUND; +} + + + +ib_api_status_t +ib_cancel_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element ) +{ +#ifdef CL_KERNEL + cl_list_item_t *p_list_item; + ib_mad_send_handle_t h_send; +#else + ib_api_status_t status; +#endif + + AL_ENTER( AL_DBG_MAD_SVC ); + + if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + if( !p_mad_element ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + +#ifndef CL_KERNEL + /* This is a send from user mode using special QP alias */ + status = spl_qp_cancel_mad( h_mad_svc, p_mad_element ); + AL_EXIT( AL_DBG_MAD_SVC ); + return status; +#else + /* Search for the MAD in our MAD list. It may have already completed. */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list, + __mad_svc_find_send, p_mad_element ); + + if( p_list_item == cl_qlist_end( &h_mad_svc->send_list ) ) + { + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("mad not found\n") ); + return IB_NOT_FOUND; + } + + /* Mark the MAD as having been canceled. */ + h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item ); + h_send->canceled = TRUE; + + /* If the MAD is active, process it in the send callback. */ + if( h_send->retry_time != MAX_TIME ) + { + /* Process the canceled MAD using the timer thread. */ + cl_timer_trim( &h_mad_svc->send_timer, 0 ); + } + + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_EXIT( AL_DBG_MAD_SVC ); + return IB_SUCCESS; +#endif +} + + +ib_api_status_t +ib_delay_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element, + IN const uint32_t delay_ms ) +{ +#ifdef CL_KERNEL + cl_list_item_t *p_list_item; + ib_mad_send_handle_t h_send; +#endif + + AL_ENTER( AL_DBG_MAD_SVC ); + + if( AL_OBJ_INVALID_HANDLE( h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + if( !p_mad_element ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + +#ifndef CL_KERNEL + UNUSED_PARAM( p_mad_element ); + UNUSED_PARAM( delay_ms ); + /* TODO: support for user-mode MAD QP's. */ + AL_EXIT( AL_DBG_MAD_SVC ); + return IB_UNSUPPORTED; +#else + /* Search for the MAD in our MAD list. It may have already completed. */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + p_list_item = cl_qlist_find_from_head( &h_mad_svc->send_list, + __mad_svc_find_send, p_mad_element ); + + if( p_list_item == cl_qlist_end( &h_mad_svc->send_list ) ) + { + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("MAD not found\n") ); + return IB_NOT_FOUND; + } + + /* Mark the MAD as having been canceled. */ + h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item ); + + if( h_send->retry_time == MAX_TIME ) + h_send->delay = delay_ms; + else + h_send->retry_time += ((uint64_t)delay_ms * 1000Ui64); + + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_EXIT( AL_DBG_MAD_SVC ); + return IB_SUCCESS; +#endif +} + + +/* + * Process a send completion. + */ +static void +__mad_svc_send_done( + IN ib_mad_svc_handle_t h_mad_svc, + IN al_mad_wr_t *p_mad_wr, + IN ib_wc_t *p_wc ) +{ + ib_mad_send_handle_t h_send; + + AL_ENTER( AL_DBG_MAD_SVC ); + CL_ASSERT( h_mad_svc && p_mad_wr && !p_wc->p_next ); + + h_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("send callback TID:0x%I64x\n", + __get_send_tid( h_send )) ); + + /* We need to synchronize access to the list as well as the MAD request. */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + + /* Complete internally sent MADs. */ + if( __is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, ("internal send\n") ); + cl_qlist_remove_item( &h_mad_svc->send_list, + (cl_list_item_t*)&h_send->pool_item ); + cl_spinlock_release( &h_mad_svc->obj.lock ); + ib_put_mad( h_send->p_send_mad ); + __cleanup_mad_send( h_mad_svc, h_send ); + return; + } + + /* See if the send request has completed. */ + if( __is_send_mad_done( h_send, p_wc ) ) + { + /* The send has completed. */ + cl_qlist_remove_item( &h_mad_svc->send_list, + (cl_list_item_t*)&h_send->pool_item ); + cl_spinlock_release( &h_mad_svc->obj.lock ); + + /* Report the send as canceled only if we don't have the response. */ + if( h_send->canceled && !h_send->p_resp_mad ) + __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED ); + else + __notify_send_comp( h_mad_svc, h_send, p_wc->status ); + } + else + { + /* See if this is an RMPP MAD, and we should send more segments. */ + if( h_send->uses_rmpp && (h_send->cur_seg <= h_send->seg_limit) ) + { + /* Send the next segment. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("sending next RMPP segment for TID:0x%I64x\n", + __get_send_tid( h_send )) ); + + __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send ); + } + else + { + /* Continue waiting for a response or ACK. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("waiting for response for TID:0x%I64x\n", + __get_send_tid( h_send )) ); + + __set_retry_time( h_send ); + cl_timer_trim( &h_mad_svc->send_timer, + h_send->p_send_mad->timeout_ms ); + } + cl_spinlock_release( &h_mad_svc->obj.lock ); + } + + /* + * Resume any sends that can now be sent without holding + * the mad service lock. + */ + __mad_disp_resume_send( h_mad_svc->h_mad_reg ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +/* + * Notify the user of a completed send operation. + */ +static void +__notify_send_comp( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_send_handle_t h_send, + IN ib_wc_status_t wc_status ) +{ + AL_ENTER( AL_DBG_MAD_SVC ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("completing TID:0x%I64x\n", + __get_send_tid( h_send )) ); + + h_send->p_send_mad->status = wc_status; + + /* Notify the user of a received response, if one exists. */ + if( h_send->p_resp_mad ) + { + h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context, + h_send->p_resp_mad ); + } + + /* The transaction has completed, return the send MADs. */ + h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context, + h_send->p_send_mad ); + + __cleanup_mad_send( h_mad_svc, h_send ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +/* + * Return a send MAD tracking structure to its pool and cleanup any resources + * it may have allocated. + */ +static void +__cleanup_mad_send( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_send_handle_t h_send ) +{ + /* Release any address vectors that we may have created. */ + if( h_send->h_av ) + { + ib_destroy_av( h_send->h_av ); + } + + /* Return the send MAD tracking structure to its pool. */ + put_mad_send( h_send ); + + /* We no longer need to reference the MAD service. */ + deref_al_obj( &h_mad_svc->obj ); +} + + + +static boolean_t +__is_send_mad_done( + IN ib_mad_send_handle_t h_send, + IN ib_wc_t *p_wc ) +{ + AL_ENTER( AL_DBG_MAD_SVC ); + + /* Complete the send if the request failed. */ + if( p_wc->status != IB_WCS_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("y-send failed\n" ) ); + return TRUE; + } + + /* Complete the send if it has been canceled. */ + if( h_send->canceled ) + { + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("y-send was canceled\n") ); + return TRUE; + } + + /* Complete the send if we have its response. */ + if( h_send->p_resp_mad ) + { + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("y-response received\n") ); + return TRUE; + } + + /* RMPP sends cannot complete until all segments have been acked. */ + if( h_send->uses_rmpp && (h_send->ack_seg < h_send->total_seg) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("more RMPP segments to send\n") ); + return FALSE; + } + + /* + * All segments of this send have been sent. + * The send has completed if we are not waiting for a response. + */ + if( h_send->p_send_mad->resp_expected ) + { + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("no-waiting on response\n") ); + return FALSE; + } + else + { + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("send completed\n") ); + return TRUE; + } +} + + + +/* + * Try to find a send that matches the received response. This call must + * be synchronized with access to the MAD service send_list. + */ +static ib_mad_send_handle_t +__mad_svc_match_recv( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_recv_mad ) +{ + ib_mad_t *p_recv_hdr; + cl_list_item_t *p_list_item; + ib_mad_send_handle_t h_send; + + AL_ENTER( AL_DBG_MAD_SVC ); + + p_recv_hdr = p_recv_mad->p_mad_buf; + + /* Search the send list for a matching request. */ + for( p_list_item = cl_qlist_head( &h_mad_svc->send_list ); + p_list_item != cl_qlist_end( &h_mad_svc->send_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item ); + + /* Match on the transaction ID, ignoring internally generated sends. */ + AL_EXIT( AL_DBG_MAD_SVC ); + if( (p_recv_hdr->trans_id == h_send->mad_wr.client_tid) && + !__is_internal_send( h_mad_svc->svc_type, h_send->p_send_mad ) ) + { + return h_send; + } + } + + return NULL; +} + + + +static void +__mad_svc_recv_done( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ) +{ + ib_mad_t *p_mad_hdr; + ib_api_status_t cl_status; + + AL_ENTER( AL_DBG_MAD_SVC ); + + p_mad_hdr = ib_get_mad_buf( p_mad_element ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("recv done TID:0x%I64x\n", + p_mad_hdr->trans_id) ); + + /* Raw MAD services get all receives. */ + if( h_mad_svc->svc_type == IB_MAD_SVC_RAW ) + { + /* Report the receive. */ + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("recv TID:0x%I64x\n", p_mad_hdr->trans_id) ); + h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context, + p_mad_element ); + return; + } + + /* Fully reassemble received MADs before completing them. */ + if( __recv_requires_rmpp( h_mad_svc->svc_type, p_mad_element ) ) + { + /* Reassembling the receive. */ + cl_status = __do_rmpp_recv( h_mad_svc, &p_mad_element ); + if( cl_status != CL_SUCCESS ) + { + /* The reassembly is not done. */ + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("no RMPP receive to report\n") ); + return; + } + + /* + * Get the header to the MAD element to report to the user. This + * will be a MAD element received earlier. + */ + p_mad_hdr = ib_get_mad_buf( p_mad_element ); + } + + /* + * If the response indicates that the responder was busy, continue + * retrying the request. + */ + if( p_mad_hdr->status & IB_MAD_STATUS_BUSY ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("responder busy TID:0x%I64x\n", p_mad_hdr->trans_id) ); + ib_put_mad( p_mad_element ); + return; + } + + /* + * See if the MAD was sent in response to a previously sent MAD. Note + * that trap repress messages are responses, even though the response + * bit isn't set. + */ + if( ib_mad_is_response( p_mad_hdr ) || + (p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS) ) + { + /* Process the received response. */ + __process_recv_resp( h_mad_svc, p_mad_element ); + } + else + { + /* Report the receive. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("unsol recv TID:0x%I64x\n", + p_mad_hdr->trans_id) ); + h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context, + p_mad_element ); + } + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +/* + * A MAD was received in response to a send. Find the corresponding send + * and process the receive completion. + */ +static void +__process_recv_resp( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ) +{ + ib_mad_t *p_mad_hdr; + ib_mad_send_handle_t h_send; + + /* + * Try to find the send. The send may have already timed out or + * have been canceled, so we need to search for it. + */ + AL_ENTER( AL_DBG_MAD_SVC ); + p_mad_hdr = ib_get_mad_buf( p_mad_element ); + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + + h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element ); + if( !h_send ) + { + /* A matching send was not found. */ + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("unmatched resp TID:0x%I64x\n", p_mad_hdr->trans_id) ); + cl_spinlock_release( &h_mad_svc->obj.lock ); + ib_put_mad( p_mad_element ); + return; + } + + /* We've found the matching send. */ + h_send->p_send_mad->status = IB_WCS_SUCCESS; + + /* Record the send contexts with the receive. */ + p_mad_element->send_context1 = (void* __ptr64)h_send->p_send_mad->context1; + p_mad_element->send_context2 = (void* __ptr64)h_send->p_send_mad->context2; + + if( h_send->retry_time == MAX_TIME ) + { + /* The send is currently active. Do not report it. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("resp send active TID:0x%I64x\n", p_mad_hdr->trans_id) ); + /* Handle a duplicate receive happening before the send completion is processed. */ + if( h_send->p_resp_mad ) + ib_put_mad( h_send->p_resp_mad ); + h_send->p_resp_mad = p_mad_element; + cl_spinlock_release( &h_mad_svc->obj.lock ); + } + else + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, + ("resp received TID:0x%I64x\n", p_mad_hdr->trans_id) ); + + /* Report the send completion below. */ + cl_qlist_remove_item( &h_mad_svc->send_list, + (cl_list_item_t*)&h_send->pool_item ); + cl_spinlock_release( &h_mad_svc->obj.lock ); + + /* Report the receive. */ + h_mad_svc->pfn_user_recv_cb( h_mad_svc, (void*)h_mad_svc->obj.context, + p_mad_element ); + + /* Report the send completion. */ + h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context, + h_send->p_send_mad ); + __cleanup_mad_send( h_mad_svc, h_send ); + } + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +/* + * Return TRUE if a received MAD requires RMPP processing. + */ +static __inline boolean_t +__recv_requires_rmpp( + IN const ib_mad_svc_type_t mad_svc_type, + IN const ib_mad_element_t* const p_mad_element ) +{ + ib_rmpp_mad_t *p_rmpp_mad; + + AL_ENTER( AL_DBG_MAD_SVC ); + + p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element ); + + AL_EXIT( AL_DBG_MAD_SVC ); + + switch( mad_svc_type ) + { + case IB_MAD_SVC_DEFAULT: + /* Only subnet management receives require RMPP. */ + return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) && + ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) ); + + case IB_MAD_SVC_RMPP: + return( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) ); + + default: + return FALSE; + } +} + + + +/* + * Return TRUE if the MAD was issued by AL itself. + */ +static __inline boolean_t +__is_internal_send( + IN const ib_mad_svc_type_t mad_svc_type, + IN const ib_mad_element_t* const p_mad_element ) +{ + ib_rmpp_mad_t *p_rmpp_mad; + + p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element ); + + /* See if the MAD service issues internal MADs. */ + switch( mad_svc_type ) + { + case IB_MAD_SVC_DEFAULT: + /* Internal sends are non-RMPP data MADs. */ + return( (p_rmpp_mad->common_hdr.mgmt_class == IB_MCLASS_SUBN_ADM) && + (p_rmpp_mad->rmpp_type && + (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ) ); + + case IB_MAD_SVC_RMPP: + /* The RMPP header is present. Check its type. */ + return( (p_rmpp_mad->rmpp_type) && + (p_rmpp_mad->rmpp_type != IB_RMPP_TYPE_DATA) ); + + default: + return FALSE; + } +} + + + +/* + * Fully reassemble a received MAD. Return TRUE once all segments of the + * MAD have been received. Return the fully reassembled MAD. + */ +static cl_status_t +__do_rmpp_recv( + IN ib_mad_svc_handle_t h_mad_svc, + IN OUT ib_mad_element_t **pp_mad_element ) +{ + ib_rmpp_mad_t *p_rmpp_mad; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_MAD_SVC ); + + p_rmpp_mad = ib_get_mad_buf( *pp_mad_element ); + CL_ASSERT( ib_rmpp_is_flag_set( p_rmpp_mad, IB_RMPP_FLAG_ACTIVE ) ); + + /* Perform the correct operation base on the RMPP MAD type. */ + switch( p_rmpp_mad->rmpp_type ) + { + case IB_RMPP_TYPE_DATA: + cl_status = __process_rmpp_data( h_mad_svc, pp_mad_element ); + /* Return the received element back to its MAD pool if not needed. */ + if( (cl_status != CL_SUCCESS) && (cl_status != CL_NOT_DONE) ) + { + ib_put_mad( *pp_mad_element ); + } + break; + + case IB_RMPP_TYPE_ACK: + /* Process the ACK. */ + __process_rmpp_ack( h_mad_svc, *pp_mad_element ); + ib_put_mad( *pp_mad_element ); + cl_status = CL_COMPLETED; + break; + + case IB_RMPP_TYPE_STOP: + case IB_RMPP_TYPE_ABORT: + default: + /* Process the ABORT or STOP. */ + __process_rmpp_nack( h_mad_svc, *pp_mad_element ); + ib_put_mad( *pp_mad_element ); + cl_status = CL_REJECT; + break; + } + + AL_EXIT( AL_DBG_MAD_SVC ); + return cl_status; +} + + + +/* + * Process an RMPP DATA message. Reassemble the received data. If the + * received MAD is fully reassembled, this call returns CL_SUCCESS. + */ +static cl_status_t +__process_rmpp_data( + IN ib_mad_svc_handle_t h_mad_svc, + IN OUT ib_mad_element_t **pp_mad_element ) +{ + ib_mad_element_t *p_rmpp_resp_mad = NULL; + al_mad_rmpp_t *p_rmpp; + ib_rmpp_mad_t *p_rmpp_hdr; + uint32_t cur_seg; + cl_status_t cl_status; + ib_api_status_t status; + + p_rmpp_hdr = ib_get_mad_buf( *pp_mad_element ); + CL_ASSERT( p_rmpp_hdr->rmpp_type == IB_RMPP_TYPE_DATA ); + + /* Try to find a receive already being reassembled. */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + p_rmpp = __find_rmpp( h_mad_svc, *pp_mad_element ); + if( !p_rmpp ) + { + /* This receive is not being reassembled. It should be the first seg. */ + if( cl_ntoh32( p_rmpp_hdr->seg_num ) != 1 ) + { + cl_spinlock_release( &h_mad_svc->obj.lock ); + return CL_NOT_FOUND; + } + + /* Start tracking the new reassembly. */ + p_rmpp = __get_mad_rmpp( h_mad_svc, *pp_mad_element ); + if( !p_rmpp ) + { + cl_spinlock_release( &h_mad_svc->obj.lock ); + return CL_INSUFFICIENT_MEMORY; + } + } + + /* Verify that we just received the expected segment. */ + cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num ); + if( cur_seg == p_rmpp->expected_seg ) + { + /* Copy the new segment's data into our reassembly buffer. */ + cl_status = __process_segment( h_mad_svc, p_rmpp, + pp_mad_element, &p_rmpp_resp_mad ); + + /* See if the RMPP is done. */ + if( cl_status == CL_SUCCESS ) + { + /* Stop tracking the reassembly. */ + __put_mad_rmpp( h_mad_svc, p_rmpp ); + } + else if( cl_status == CL_NOT_DONE ) + { + /* Start the reassembly timer. */ + cl_timer_trim( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT ); + } + } + else if( cur_seg < p_rmpp->expected_seg ) + { + /* We received an old segment. Resend the last ACK. */ + p_rmpp_resp_mad = __get_rmpp_ack( p_rmpp ); + cl_status = CL_DUPLICATE; + } + else + { + /* The sender is confused, ignore this MAD. We could ABORT here. */ + cl_status = CL_OVERRUN; + } + + cl_spinlock_release( &h_mad_svc->obj.lock ); + + /* + * Send any response MAD (ACK, ABORT, etc.) to the sender. Note that + * we are currently in the callback from the MAD dispatcher. The + * dispatcher holds a reference on the MAD service while in the callback, + * preventing the MAD service from being destroyed. This allows the + * call to ib_send_mad() to proceed even if the user tries to destroy + * the MAD service. + */ + if( p_rmpp_resp_mad ) + { + status = ib_send_mad( h_mad_svc, p_rmpp_resp_mad, NULL ); + if( status != IB_SUCCESS ) + { + /* Return the MAD. The MAD is considered dropped. */ + ib_put_mad( p_rmpp_resp_mad ); + } + } + + return cl_status; +} + + + +/* + * Locate an existing RMPP MAD being reassembled. Return NULL if one is not + * found. This call assumes access to the recv_list is synchronized. + */ +static al_mad_rmpp_t* +__find_rmpp( + IN ib_mad_svc_handle_t h_mad_svc, + IN OUT ib_mad_element_t *p_mad_element ) +{ + al_mad_rmpp_t *p_rmpp; + cl_list_item_t *p_list_item; + ib_mad_t *p_mad_hdr, *p_mad_hdr2; + ib_mad_element_t *p_mad_element2; + + + p_mad_hdr = ib_get_mad_buf( p_mad_element ); + + /* Search all MADs being reassembled. */ + for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list ); + p_list_item != cl_qlist_end( &h_mad_svc->recv_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item ); + + p_mad_element2 = p_rmpp->p_mad_element; + p_mad_hdr2 = ib_get_mad_buf( p_mad_element2 ); + + /* See if the incoming MAD matches - what a check. */ + if( (p_mad_hdr->trans_id == p_mad_hdr2->trans_id) && + (p_mad_hdr->class_ver == p_mad_hdr2->class_ver) && + (p_mad_hdr->mgmt_class == p_mad_hdr2->mgmt_class) && + (p_mad_hdr->method == p_mad_hdr2->method) && + (p_mad_element->remote_lid == p_mad_element2->remote_lid) && + (p_mad_element->remote_qp == p_mad_element2->remote_qp) ) + { + return p_rmpp; + } + } + + return NULL; +} + + + +/* + * Acquire a new RMPP tracking structure. This call assumes access to + * the recv_list is synchronized. + */ +static al_mad_rmpp_t* +__get_mad_rmpp( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ) +{ + al_mad_rmpp_t *p_rmpp; + al_mad_element_t *p_al_element; + + p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element ); + + /* Get an RMPP tracking structure. */ + p_rmpp = get_mad_rmpp( p_al_element ); + if( !p_rmpp ) + return NULL; + + /* Initialize the tracking information. */ + p_rmpp->expected_seg = 1; + p_rmpp->seg_limit = 1; + p_rmpp->inactive = FALSE; + p_rmpp->p_mad_element = p_mad_element; + + /* Insert the tracking structure into the reassembly list. */ + cl_qlist_insert_tail( &h_mad_svc->recv_list, + (cl_list_item_t*)&p_rmpp->pool_item ); + + return p_rmpp; +} + + + +/* + * Return the RMPP tracking structure. This call assumes access to + * the recv_list is synchronized. + */ +static void +__put_mad_rmpp( + IN ib_mad_svc_handle_t h_mad_svc, + IN al_mad_rmpp_t *p_rmpp ) +{ + /* Remove the tracking structure from the reassembly list. */ + cl_qlist_remove_item( &h_mad_svc->recv_list, + (cl_list_item_t*)&p_rmpp->pool_item ); + + /* Return the RMPP tracking structure. */ + put_mad_rmpp( p_rmpp ); +} + + + +/* + * Process a received RMPP segment. Copy the data into our receive buffer, + * update the expected segment, and send an ACK if needed. + */ +static cl_status_t +__process_segment( + IN ib_mad_svc_handle_t h_mad_svc, + IN al_mad_rmpp_t *p_rmpp, + IN OUT ib_mad_element_t **pp_mad_element, + OUT ib_mad_element_t **pp_rmpp_resp_mad ) +{ + ib_rmpp_mad_t *p_rmpp_hdr; + uint32_t cur_seg; + ib_api_status_t status; + cl_status_t cl_status; + uint8_t *p_dst_seg, *p_src_seg; + uint32_t paylen; + + CL_ASSERT( h_mad_svc && p_rmpp && pp_mad_element && *pp_mad_element ); + + p_rmpp_hdr = (ib_rmpp_mad_t*)(*pp_mad_element)->p_mad_buf; + cur_seg = cl_ntoh32( p_rmpp_hdr->seg_num ); + CL_ASSERT( cur_seg == p_rmpp->expected_seg ); + CL_ASSERT( cur_seg <= p_rmpp->seg_limit ); + + /* See if the receive has been fully reassembled. */ + if( ib_rmpp_is_flag_set( p_rmpp_hdr, IB_RMPP_FLAG_LAST ) ) + cl_status = CL_SUCCESS; + else + cl_status = CL_NOT_DONE; + + /* Save the payload length for later use. */ + paylen = cl_ntoh32(p_rmpp_hdr->paylen_newwin); + + /* The element of the first segment starts the reasembly. */ + if( *pp_mad_element != p_rmpp->p_mad_element ) + { + /* SA MADs require extra header size ... */ + if( (*pp_mad_element)->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM ) + { + /* Copy the received data into our reassembly buffer. */ + p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) + + IB_SA_MAD_HDR_SIZE; + p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) + + IB_SA_MAD_HDR_SIZE + IB_SA_DATA_SIZE * (cur_seg - 1); + cl_memcpy( p_dst_seg, p_src_seg, IB_SA_DATA_SIZE ); + } + else + { + /* Copy the received data into our reassembly buffer. */ + p_src_seg = ((uint8_t* __ptr64)(*pp_mad_element)->p_mad_buf) + + MAD_RMPP_HDR_SIZE; + p_dst_seg = ((uint8_t* __ptr64)p_rmpp->p_mad_element->p_mad_buf) + + MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1); + cl_memcpy( p_dst_seg, p_src_seg, MAD_RMPP_DATA_SIZE ); + } + /* This MAD is no longer needed. */ + ib_put_mad( *pp_mad_element ); + } + + /* Update the size of the mad if the last segment */ + if ( cl_status == CL_SUCCESS ) + { + if (p_rmpp->p_mad_element->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_ADM ) + { + /* + * Note we will get one extra SA Hdr size in the paylen, + * so we only take the rmpp header size of the first segment. + */ + p_rmpp->p_mad_element->size = + MAD_RMPP_HDR_SIZE + IB_SA_DATA_SIZE *(cur_seg - 1) + paylen; + } + else + { + p_rmpp->p_mad_element->size = + MAD_RMPP_HDR_SIZE + MAD_RMPP_DATA_SIZE * (cur_seg - 1) + paylen; + } + } + + /* + * We are ready to accept the next segment. We increment expected segment + * even if we're done, so that ACKs correctly report the last segment. + */ + p_rmpp->expected_seg++; + + /* Mark the RMPP as active if we're not destroying the MAD service. */ + p_rmpp->inactive = (h_mad_svc->obj.state == CL_DESTROYING); + + /* See if the receive has been fully reassembled. */ + if( cl_status == CL_NOT_DONE && cur_seg == p_rmpp->seg_limit ) + { + /* Allocate more segments for the incoming receive. */ + status = al_resize_mad( p_rmpp->p_mad_element, + p_rmpp->p_mad_element->size + AL_RMPP_WINDOW * MAD_RMPP_DATA_SIZE ); + + /* If we couldn't allocate a new buffer, just drop the MAD. */ + if( status == IB_SUCCESS ) + { + /* Send an ACK indicating that more space is available. */ + p_rmpp->seg_limit += AL_RMPP_WINDOW; + *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp ); + } + } + else if( cl_status == CL_SUCCESS ) + { + /* Return the element referencing the reassembled MAD. */ + *pp_mad_element = p_rmpp->p_mad_element; + *pp_rmpp_resp_mad = __get_rmpp_ack( p_rmpp ); + } + + return cl_status; +} + + + +/* + * Get an ACK message to return to the sender of an RMPP MAD. + */ +static ib_mad_element_t* +__get_rmpp_ack( + IN al_mad_rmpp_t *p_rmpp ) +{ + ib_mad_element_t *p_mad_element; + al_mad_element_t *p_al_element; + ib_api_status_t status; + ib_rmpp_mad_t *p_ack_rmpp_hdr, *p_data_rmpp_hdr; + + /* Get a MAD to carry the ACK. */ + p_al_element = PARENT_STRUCT( p_rmpp->p_mad_element, + al_mad_element_t, element ); + status = ib_get_mad( p_al_element->pool_key, MAD_BLOCK_SIZE, + &p_mad_element ); + if( status != IB_SUCCESS ) + { + /* Just return. The ACK will be treated as being dropped. */ + return NULL; + } + + /* Format the ACK. */ + p_ack_rmpp_hdr = ib_get_mad_buf( p_mad_element ); + p_data_rmpp_hdr = ib_get_mad_buf( p_rmpp->p_mad_element ); + + __init_reply_element( p_mad_element, p_rmpp->p_mad_element ); + + /* Copy the MAD common header. */ + cl_memcpy( &p_ack_rmpp_hdr->common_hdr, &p_data_rmpp_hdr->common_hdr, + sizeof( ib_mad_t ) ); + + /* Reset the status (in case the BUSY bit is set). */ + p_ack_rmpp_hdr->common_hdr.status = 0; + + /* Flip the response bit in the method */ + p_ack_rmpp_hdr->common_hdr.method ^= IB_MAD_METHOD_RESP_MASK; + + p_ack_rmpp_hdr->rmpp_version = p_data_rmpp_hdr->rmpp_version; + p_ack_rmpp_hdr->rmpp_type = IB_RMPP_TYPE_ACK; + ib_rmpp_set_resp_time( p_ack_rmpp_hdr, IB_RMPP_NO_RESP_TIME ); + p_ack_rmpp_hdr->rmpp_flags |= IB_RMPP_FLAG_ACTIVE; + p_ack_rmpp_hdr->rmpp_status = IB_RMPP_STATUS_SUCCESS; + + p_ack_rmpp_hdr->seg_num = cl_hton32( p_rmpp->expected_seg - 1 ); + + if (p_rmpp->seg_limit == p_rmpp->expected_seg - 1 && + !ib_rmpp_is_flag_set( p_data_rmpp_hdr, IB_RMPP_FLAG_LAST ) ) + { + p_ack_rmpp_hdr->paylen_newwin = cl_hton32( 1 + p_rmpp->seg_limit); + } + else + { + p_ack_rmpp_hdr->paylen_newwin = cl_hton32( p_rmpp->seg_limit ); + } + + return p_mad_element; +} + + + +/* + * Copy necessary data between MAD elements to allow the destination + * element to be sent to the sender of the source element. + */ +static void +__init_reply_element( + IN ib_mad_element_t *p_dst_element, + IN ib_mad_element_t *p_src_element ) +{ + p_dst_element->remote_qp = p_src_element->remote_qp; + p_dst_element->remote_qkey = p_src_element->remote_qkey; + + if( p_src_element->grh_valid ) + { + p_dst_element->grh_valid = p_src_element->grh_valid; + cl_memcpy( p_dst_element->p_grh, p_src_element->p_grh, + sizeof( ib_grh_t ) ); + } + + p_dst_element->remote_lid = p_src_element->remote_lid; + p_dst_element->remote_sl = p_src_element->remote_sl; + p_dst_element->pkey_index = p_src_element->pkey_index; + p_dst_element->path_bits = p_src_element->path_bits; +} + + + +/* + * Process an RMPP ACK message. Continue sending addition segments. + */ +static void +__process_rmpp_ack( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ) +{ + ib_mad_send_handle_t h_send; + ib_rmpp_mad_t *p_rmpp_mad; + boolean_t send_done = FALSE; + ib_wc_status_t wc_status = IB_WCS_SUCCESS; + + AL_ENTER( AL_DBG_MAD_SVC ); + p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element ); + + /* + * Search for the send. The send may have timed out, been canceled, + * or received a response. + */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element ); + if( !h_send ) + { + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("ACK cannot find a matching send\n") ); + return; + } + + /* Drop old ACKs. */ + if( cl_ntoh32( p_rmpp_mad->seg_num ) < h_send->ack_seg ) + { + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("old ACK - being dropped\n") ); + return; + } + + /* Update the acknowledged segment and segment limit. */ + h_send->ack_seg = cl_ntoh32( p_rmpp_mad->seg_num ); + + /* Keep seg_limit <= total_seg to simplify checks. */ + if( cl_ntoh32( p_rmpp_mad->paylen_newwin ) > h_send->total_seg ) + h_send->seg_limit = h_send->total_seg; + else + h_send->seg_limit = cl_ntoh32( p_rmpp_mad->paylen_newwin ); + + /* Reset the current segment to start resending from the ACK. */ + h_send->cur_seg = h_send->ack_seg + 1; + + /* If the send is active, we will finish processing it once it completes. */ + if( h_send->retry_time == MAX_TIME ) + { + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("ACK processed, waiting for send to complete\n") ); + return; + } + + /* + * Complete the send if all segments have been ack'ed and no + * response is expected. (If the response for a send had already been + * received, we would have reported the completion regardless of the + * send having been ack'ed.) + */ + CL_ASSERT( !h_send->p_send_mad->resp_expected || !h_send->p_resp_mad ); + if( (h_send->ack_seg == h_send->total_seg) && + !h_send->p_send_mad->resp_expected ) + { + /* The send is done. All segments have been ack'ed. */ + send_done = TRUE; + } + else if( h_send->ack_seg < h_send->seg_limit ) + { + /* Send the next segment. */ + __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send ); + } + + if( send_done ) + { + /* Notify the user of a send completion or error. */ + cl_qlist_remove_item( &h_mad_svc->send_list, + (cl_list_item_t*)&h_send->pool_item ); + cl_spinlock_release( &h_mad_svc->obj.lock ); + __notify_send_comp( h_mad_svc, h_send, wc_status ); + } + else + { + /* Continue waiting for a response or a larger send window. */ + cl_spinlock_release( &h_mad_svc->obj.lock ); + } + + /* + * Resume any sends that can now be sent without holding + * the mad service lock. + */ + __mad_disp_resume_send( h_mad_svc->h_mad_reg ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +/* + * Process an RMPP STOP or ABORT message. + */ +static void +__process_rmpp_nack( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ) +{ + ib_mad_send_handle_t h_send; + ib_rmpp_mad_t *p_rmpp_mad; + + AL_ENTER( AL_DBG_MAD_SVC ); + p_rmpp_mad = (ib_rmpp_mad_t*)ib_get_mad_buf( p_mad_element ); + + /* Search for the send. The send may have timed out or been canceled. */ + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + h_send = __mad_svc_match_recv( h_mad_svc, p_mad_element ); + if( !h_send ) + { + cl_spinlock_release( &h_mad_svc->obj.lock ); + return; + } + + /* If the send is active, we will finish processing it once it completes. */ + if( h_send->retry_time == MAX_TIME ) + { + h_send->canceled = TRUE; + cl_spinlock_release( &h_mad_svc->obj.lock ); + AL_EXIT( AL_DBG_MAD_SVC ); + return; + } + + /* Fail the send operation. */ + cl_qlist_remove_item( &h_mad_svc->send_list, + (cl_list_item_t*)&h_send->pool_item ); + cl_spinlock_release( &h_mad_svc->obj.lock ); + __notify_send_comp( h_mad_svc, h_send, IB_WCS_CANCELED ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +static __inline void +__set_retry_time( + IN ib_mad_send_handle_t h_send ) +{ + h_send->retry_time = + (uint64_t)(h_send->p_send_mad->timeout_ms + h_send->delay) * 1000Ui64 + + cl_get_time_stamp(); + h_send->delay = 0; +} + + + +static void +__send_timer_cb( + IN void *context ) +{ + AL_ENTER( AL_DBG_MAD_SVC ); + + __check_send_queue( (ib_mad_svc_handle_t)context ); + + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +/* + * Check the send queue for any sends that have timed out or were canceled + * by the user. + */ +static void +__check_send_queue( + IN ib_mad_svc_handle_t h_mad_svc ) +{ + ib_mad_send_handle_t h_send; + cl_list_item_t *p_list_item, *p_next_item; + uint64_t cur_time; + cl_qlist_t timeout_list; + + AL_ENTER( AL_DBG_MAD_SVC ); + + /* + * The timeout out list is used to call the user back without + * holding the lock on the MAD service. + */ + cl_qlist_init( &timeout_list ); + cur_time = cl_get_time_stamp(); + + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + + /* Check all outstanding sends. */ + for( p_list_item = cl_qlist_head( &h_mad_svc->send_list ); + p_list_item != cl_qlist_end( &h_mad_svc->send_list ); + p_list_item = p_next_item ) + { + p_next_item = cl_qlist_next( p_list_item ); + h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item ); + + /* See if the request is active. */ + if( h_send->retry_time == MAX_TIME ) + { + /* The request is still active. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("active TID:0x%I64x\n", + __get_send_tid( h_send )) ); + continue; + } + + /* The request is not active. */ + /* See if the request has been canceled. */ + if( h_send->canceled ) + { + /* The request has been canceled. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("canceling TID:0x%I64x\n", + __get_send_tid( h_send )) ); + + h_send->p_send_mad->status = IB_WCS_CANCELED; + cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item ); + cl_qlist_insert_tail( &timeout_list, p_list_item ); + continue; + } + + /* Skip requests that have not timed out. */ + if( cur_time < h_send->retry_time ) + { + /* The request has not timed out. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("waiting on TID:0x%I64x\n", + __get_send_tid( h_send )) ); + + /* Set the retry timer to the minimum needed time, in ms. */ + cl_timer_trim( &h_mad_svc->send_timer, + ((uint32_t)(h_send->retry_time - cur_time) / 1000) ); + continue; + } + + /* See if we need to retry the send operation. */ + if( h_send->retry_cnt ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MAD_SVC, ("retrying TID:0x%I64x\n", + __get_send_tid( h_send )) ); + + /* Retry the send. */ + h_send->retry_time = MAX_TIME; + h_send->retry_cnt--; + + if( h_send->uses_rmpp ) + { + if( h_send->ack_seg < h_send->seg_limit ) + { + /* Resend all unacknowledged segments. */ + h_send->cur_seg = h_send->ack_seg + 1; + __queue_rmpp_seg( h_mad_svc->h_mad_reg, h_send ); + } + else + { + /* The send was delivered. Continue waiting. */ + __set_retry_time( h_send ); + cl_timer_trim( &h_mad_svc->send_timer, + ((uint32_t)(h_send->retry_time - cur_time) / 1000) ); + } + } + else + { + /* The work request should already be formatted properly. */ + __mad_disp_queue_send( h_mad_svc->h_mad_reg, + &h_send->mad_wr ); + } + continue; + } + /* The request has timed out or failed to be retried. */ + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_MAD_SVC, + ("timing out TID:0x%I64x\n", __get_send_tid( h_send )) ); + + h_send->p_send_mad->status = IB_WCS_TIMEOUT_RETRY_ERR; + cl_qlist_remove_item( &h_mad_svc->send_list, p_list_item ); + cl_qlist_insert_tail( &timeout_list, p_list_item ); + } + + cl_spinlock_release( &h_mad_svc->obj.lock ); + + /* + * Resume any sends that can now be sent without holding + * the mad service lock. + */ + __mad_disp_resume_send( h_mad_svc->h_mad_reg ); + + /* Report all timed out sends to the user. */ + p_list_item = cl_qlist_remove_head( &timeout_list ); + while( p_list_item != cl_qlist_end( &timeout_list ) ) + { + h_send = PARENT_STRUCT( p_list_item, al_mad_send_t, pool_item ); + + h_mad_svc->pfn_user_send_cb( h_mad_svc, (void*)h_mad_svc->obj.context, + h_send->p_send_mad ); + __cleanup_mad_send( h_mad_svc, h_send ); + p_list_item = cl_qlist_remove_head( &timeout_list ); + } + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +static void +__recv_timer_cb( + IN void *context ) +{ + ib_mad_svc_handle_t h_mad_svc; + al_mad_rmpp_t *p_rmpp; + cl_list_item_t *p_list_item, *p_next_item; + boolean_t restart_timer; + + AL_ENTER( AL_DBG_MAD_SVC ); + + h_mad_svc = (ib_mad_svc_handle_t)context; + + cl_spinlock_acquire( &h_mad_svc->obj.lock ); + + /* Check all outstanding receives. */ + for( p_list_item = cl_qlist_head( &h_mad_svc->recv_list ); + p_list_item != cl_qlist_end( &h_mad_svc->recv_list ); + p_list_item = p_next_item ) + { + p_next_item = cl_qlist_next( p_list_item ); + p_rmpp = PARENT_STRUCT( p_list_item, al_mad_rmpp_t, pool_item ); + + /* Fail all RMPP MADs that have remained inactive. */ + if( p_rmpp->inactive ) + { + ib_put_mad( p_rmpp->p_mad_element ); + __put_mad_rmpp( h_mad_svc, p_rmpp ); + } + else + { + /* Mark the RMPP as inactive. */ + p_rmpp->inactive = TRUE; + } + } + + restart_timer = !cl_is_qlist_empty( &h_mad_svc->recv_list ); + cl_spinlock_release( &h_mad_svc->obj.lock ); + + if( restart_timer ) + cl_timer_start( &h_mad_svc->recv_timer, AL_REASSEMBLY_TIMEOUT ); + AL_EXIT( AL_DBG_MAD_SVC ); +} + + + +ib_api_status_t +ib_local_mad( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const void* const p_mad_in, + IN void* p_mad_out ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD_SVC ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + if( !p_mad_in || !p_mad_out ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = al_local_mad(h_ca, port_num, NULL,p_mad_in, p_mad_out); + + AL_EXIT( AL_DBG_MAD_SVC ); + return status; +} + +ib_api_status_t +al_local_mad( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_av_attr_t* p_src_av_attr, + IN const void* const p_mad_in, + IN void* p_mad_out ) +{ + ib_api_status_t status; + void* p_mad_out_local = NULL; + AL_ENTER( AL_DBG_MAD_SVC ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + if( !p_mad_in ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + if( !p_mad_out ) + { + p_mad_out_local = cl_zalloc(256); + if(!p_mad_out_local) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INSUFFICIENT_MEMORY\n") ); + return IB_INSUFFICIENT_MEMORY; + } + }else + { + p_mad_out_local = p_mad_out; + } + + status = verbs_local_mad( h_ca, port_num, p_src_av_attr, p_mad_in, p_mad_out_local ); + + if( !p_mad_out ) + { + cl_free(p_mad_out_local); + } + + AL_EXIT( AL_DBG_MAD_SVC ); + return status; + +} + +ib_net32_t +al_get_user_tid( + IN const ib_net64_t tid64 ) +{ + al_tid_t al_tid; + + al_tid.tid64 = tid64; + return( al_tid.tid32.user_tid ); +} + +uint32_t +al_get_al_tid( + IN const ib_net64_t tid64 ) +{ + al_tid_t al_tid; + + al_tid.tid64 = tid64; + return( cl_ntoh32( al_tid.tid32.al_tid ) ); +} + +void +al_set_al_tid( + IN ib_net64_t* const p_tid64, + IN const uint32_t tid32 ) +{ + al_tid_t *p_al_tid; + + p_al_tid = (al_tid_t*)p_tid64; + + if( tid32 ) + { + CL_ASSERT( !p_al_tid->tid32.al_tid ); + } + + p_al_tid->tid32.al_tid = cl_hton32( tid32 ); +} diff --git a/branches/Ndi/core/al/al_mad.h b/branches/Ndi/core/al/al_mad.h new file mode 100644 index 00000000..84bdeee2 --- /dev/null +++ b/branches/Ndi/core/al/al_mad.h @@ -0,0 +1,236 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__IB_AL_MAD_H__) +#define __IB_AL_MAD_H__ + + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "al_common.h" +#include "al_mad_pool.h" + + + +/* + * The MAD dispatcher routes completed MAD work requests to the correct + * MAD service. + */ +typedef struct _al_mad_disp +{ + al_obj_t obj; + ib_qp_handle_t h_qp; + + /* Client information. */ + cl_vector_t client_vector; + + /* + * Indicates the version of supported MADs. 1 based-index. This is + * a vector of class vectors. + */ + cl_vector_t version_vector; + +} al_mad_disp_t; + + +typedef al_mad_disp_t *al_mad_disp_handle_t; + + +typedef void +(*pfn_mad_svc_send_done_t)( + IN ib_mad_svc_handle_t h_mad_svc, + IN al_mad_wr_t *p_mad_wr, + IN ib_wc_t *p_wc ); + +typedef void +(*pfn_mad_svc_recv_done_t)( + IN ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t *p_mad_element ); + + +typedef struct _al_mad_disp_reg +{ + ib_mad_svc_handle_t h_mad_svc; + al_mad_disp_handle_t h_mad_disp; + uint32_t client_id; + atomic32_t ref_cnt; + + /* Clients must specify completion routines for user-mode support. */ + pfn_mad_svc_send_done_t pfn_send_done; + pfn_mad_svc_recv_done_t pfn_recv_done; + + /* Mgmt class supported by the client. Class 0x81 is mapped to 0x00. */ + uint8_t mgmt_class; + uint8_t mgmt_version; + boolean_t support_unsol; + +} al_mad_disp_reg_t; + + + +/* The registration handle is an index into the client_vector. */ +typedef al_mad_disp_reg_t* al_mad_reg_handle_t; + + + +ib_api_status_t +create_mad_disp( + IN al_obj_t* const p_parent_obj, + IN const ib_qp_handle_t h_qp, + IN al_mad_disp_handle_t* const ph_mad_disp ); + +void +mad_disp_send_done( + IN al_mad_disp_handle_t h_mad_disp, + IN al_mad_wr_t *p_mad_wr, + IN ib_wc_t *p_wc ); + +ib_api_status_t +mad_disp_recv_done( + IN al_mad_disp_handle_t h_mad_disp, + IN ib_mad_element_t *p_mad_element ); + + + + +/* + * MAD service used to send and receive MADs. MAD services are responsible + * for retransmissions and SAR. + */ +typedef struct _al_mad_svc +{ + al_obj_t obj; + + ib_mad_svc_type_t svc_type; + + atomic32_t ref_cnt; + + al_mad_reg_handle_t h_mad_reg; + ib_pfn_mad_comp_cb_t pfn_user_send_cb; + ib_pfn_mad_comp_cb_t pfn_user_recv_cb; + + cl_qlist_t send_list; + cl_timer_t send_timer; + + cl_qlist_t recv_list; + cl_timer_t recv_timer; + + /* The PD and port number are used to create address vectors on sends. */ + ib_pd_handle_t h_pd; + uint8_t port_num; + +} al_mad_svc_t; + + +void +free_mad_svc( + IN struct _al_obj *p_obj ); + + +/* + * Register a MAD service with a QP. + */ +ib_api_status_t +reg_mad_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_mad_svc_t* const p_mad_svc, + OUT ib_mad_svc_handle_t* const ph_mad_svc ); + + +ib_api_status_t +al_local_mad( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_av_attr_t* p_av_attr, + IN const void* const p_mad_in, + IN void* p_mad_out ); + +/* + * TID management + */ +typedef union _al_tid +{ + ib_net64_t tid64; + struct _tid + { + ib_net32_t al_tid; + ib_net32_t user_tid; + } tid32; + +} al_tid_t; + + +ib_net32_t +al_get_user_tid( + IN const ib_net64_t tid64 ); + + +uint32_t +al_get_al_tid( + IN const ib_net64_t tid64 ); + + +void +al_set_al_tid( + IN ib_net64_t* const p_tid64, + IN const uint32_t tid32 ); + + + +void +build_mad_recv( + IN ib_mad_element_t* p_mad_element, + IN ib_wc_t* p_wc ); + + +ib_mad_t* +get_mad_hdr_from_wr( + IN al_mad_wr_t* const p_mad_wr ); + + +ib_api_status_t +ib_delay_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element, + IN const uint32_t delay_ms ); + + +#endif /* __IB_AL_MAD_H__ */ diff --git a/branches/Ndi/core/al/al_mad_pool.h b/branches/Ndi/core/al/al_mad_pool.h new file mode 100644 index 00000000..69544804 --- /dev/null +++ b/branches/Ndi/core/al/al_mad_pool.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined( __AL_MAD_POOL_H__ ) +#define __AL_MAD_POOL_H__ + +#include +#include +#include "al_common.h" + + +typedef struct _al_pool +{ + al_obj_t obj; /* Child of ib_al_handle_t */ +#if defined( CL_KERNEL ) + NPAGED_LOOKASIDE_LIST mad_stack; +#else + cl_qlist_t mad_stack; +#endif + cl_qlist_t key_list; + size_t max; + size_t actual; + size_t grow_size; +#if defined( CL_KERNEL ) + NPAGED_LOOKASIDE_LIST mad_send_pool; + NPAGED_LOOKASIDE_LIST mad_rmpp_pool; +#else + cl_qpool_t mad_send_pool; + cl_qpool_t mad_rmpp_pool; +#endif + +} al_pool_t; + + + +/* + * Pool key type used to distinguish between pool_keys allocated by the user + * and those that reference AL's internal MAD pool_key. + */ +typedef enum _al_key_type +{ + AL_KEY_NORMAL, + AL_KEY_ALIAS + +} al_key_type_t; + + + +typedef struct _al_pool_key +{ + al_obj_t obj; /* Not a child object. */ + /* Parent of mad_reg_t */ + al_key_type_t type; + + /* + * Pool keys can be destroyed either by deregistering them, destroying + * the associated pool, or destroying the associated PD. We track the + * pool key with the AL instance in order to synchronize destruction. + */ + boolean_t in_al_list; + ib_al_handle_t h_al; + cl_list_item_t al_item; /* Chain in ib_al_t for dereg */ + cl_list_item_t pool_item; /* Chain in al_pool_t for grow */ + + ib_pool_handle_t h_pool; +#ifndef CL_KERNEL + ib_pd_handle_t h_pd; +#else + ib_mr_handle_t h_mr; + net32_t lkey; +#endif + + /* Number of MADs currently removed from pool using this key. */ + atomic32_t mad_cnt; + + /* For alias keys, maintain a reference to the actual pool key. */ + ib_pool_key_t pool_key; + +} al_pool_key_t; + + +ib_api_status_t +reg_mad_pool( + IN const ib_pool_handle_t h_pool, + IN const ib_pd_handle_t h_pd, + OUT ib_pool_key_t* const pp_pool_key ); + +/* Deregister a MAD pool key if it is of the expected type. */ +ib_api_status_t +dereg_mad_pool( + IN const ib_pool_key_t pool_key, + IN const al_key_type_t expected_type ); + + +typedef void +(* __ptr64 pfn_mad_dbg_t)(void); + +typedef struct _al_mad_element +{ + /* + * List item used to track free MADs by the MAD pool. Also used by + * the SMI and MAD QPs to track received MADs. + */ + cl_list_item_t list_item; + ib_al_handle_t h_al; /* Track out-of-pool MAD owner */ + cl_list_item_t al_item; /* Out-of-pool MAD owner chain */ + ib_mad_element_t element; + ib_pool_key_t pool_key; /* For getting mads for RMPP ACK */ + ib_local_ds_t grh_ds; /* GRH + 256 byte buffer. */ + ib_local_ds_t mad_ds; /* Registered 256-byte buffer. */ + ib_mad_t *p_al_mad_buf; /* Allocated send/recv buffer. */ +#if defined( CL_KERNEL ) + uint8_t mad_buf[MAD_BLOCK_GRH_SIZE]; +#endif + + ib_mad_element_t* __ptr64 h_proxy_element; /* For user-mode support */ + +} al_mad_element_t; + + + +ib_api_status_t +al_resize_mad( + OUT ib_mad_element_t *p_mad_element, + IN const size_t buf_size ); + + + +/* We don't have MAD array structures in the Windows kernel. */ +#if !defined( CL_KERNEL ) +typedef struct _mad_array +{ + al_obj_t obj; /* Child of al_pool_t */ + ib_pool_handle_t h_pool; + size_t sizeof_array; + void* p_data; + +} mad_array_t; +#endif + + +typedef struct _mad_item +{ + al_mad_element_t al_mad_element; +#if defined( CL_KERNEL ) + ib_pool_key_t pool_key; +#else + mad_array_t* p_mad_array; +#endif + +} mad_item_t; + + + +/* + * Work request structure to use when posting sends. + */ +typedef struct _al_mad_wr +{ + cl_list_item_t list_item; + + uint32_t client_id; + ib_net64_t client_tid; + + /* + * Work request used when sending MADs. This permits formatting the + * work request once and re-using it when issuing retries. + */ + ib_send_wr_t send_wr; + +} al_mad_wr_t; + + + +/* + * Structure used to track an outstanding send request. + */ +typedef struct _al_mad_send +{ + cl_pool_item_t pool_item; + ib_mad_element_t *p_send_mad; + ib_av_handle_t h_av; + + al_mad_wr_t mad_wr; + + /* + * Received MAD in response to a send. This is not set until + * the entire response has been received. + */ + ib_mad_element_t *p_resp_mad; + + /* Absolute time that the request should be retried. */ + uint64_t retry_time; + + /* Delay, in milliseconds, to add before the next retry. */ + uint32_t delay; + + /* Number of times that the request can be retried. */ + uint32_t retry_cnt; + boolean_t canceled; /* indicates if send was canceled */ + + /* + * SAR tracking information. + */ + boolean_t uses_rmpp; + uint32_t ack_seg; /* last segment number ack'ed*/ + uint32_t cur_seg; /* current segment to send */ + uint32_t seg_limit; /* max. segment to send */ + uint32_t total_seg; /* total segments to send */ + +} al_mad_send_t; + + + +/* + * Structure used to track receiving an RMPP MAD. + */ +typedef struct _al_mad_rmpp +{ + cl_pool_item_t pool_item; + + boolean_t inactive; /* used to time out reassembly */ + + uint32_t expected_seg;/* next segment to receive */ + uint32_t seg_limit; /* upper bound of recv window */ + ib_mad_element_t *p_mad_element;/* reassembled recv */ + +} al_mad_rmpp_t; + + + +ib_mad_send_handle_t +get_mad_send( + IN const al_mad_element_t *p_mad_element ); + + +void +put_mad_send( + IN ib_mad_send_handle_t h_mad_send ); + + +al_mad_rmpp_t* +get_mad_rmpp( + IN const al_mad_element_t *p_mad_element ); + + +void +put_mad_rmpp( + IN al_mad_rmpp_t *p_rmpp ); + + +#endif // __AL_MAD_POOL_H__ diff --git a/branches/Ndi/core/al/al_mcast.c b/branches/Ndi/core/al/al_mcast.c new file mode 100644 index 00000000..89c1e3d9 --- /dev/null +++ b/branches/Ndi/core/al/al_mcast.c @@ -0,0 +1,690 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "al.h" +#include "al_ca.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_mcast.tmh" +#endif + +#include "al_mgr.h" +#include "al_qp.h" +#include "al_verbs.h" + +#include "ib_common.h" + + +/* + * Function prototypes. + */ +static ib_api_status_t +send_join( + IN ib_mcast_t *p_mcast, + IN const ib_mcast_req_t* const p_mcast_req ); + +static void +join_req_cb( + IN al_sa_req_t *p_sa_req, + IN ib_mad_element_t *p_mad_response ); + +static void +join_async_cb( + IN cl_async_proc_item_t *p_item ); + +static void +leave_req_cb( + IN al_sa_req_t *p_sa_req, + IN ib_mad_element_t *p_mad_response ); + +static void +leave_async_cb( + IN cl_async_proc_item_t *p_item ); + +static void +__destroying_mcast( + IN al_obj_t *p_obj ); + +static void +__cleanup_mcast( + IN al_obj_t *p_obj ); + +static void +__free_mcast( + IN al_obj_t *p_obj ); + +#ifdef CL_KERNEL +static void +__cleanup_attach( + IN al_obj_t *p_obj ); + +static void +__free_attach( + IN al_obj_t *p_obj ); +#endif + + + +ib_api_status_t +al_join_mcast( + IN const ib_qp_handle_t h_qp, + IN const ib_mcast_req_t* const p_mcast_req ) +{ + ib_mcast_handle_t h_mcast; + ib_api_status_t status; + cl_status_t cl_status; + boolean_t sync; + + AL_ENTER( AL_DBG_MCAST ); + + /* + * Validate the port GUID. There is no need to validate the pkey index as + * the user could change it later to make it invalid. There is also no + * need to perform any QP transitions as ib_init_dgrm_svc resets the QP and + * starts from scratch. + */ + status = get_port_num( h_qp->obj.p_ci_ca, p_mcast_req->port_guid, NULL ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("get_port_num failed, status: %s\n", ib_get_err_str(status)) ); + return status; + } + + /* Allocate a new multicast request. */ + h_mcast = cl_zalloc( sizeof( ib_mcast_t ) ); + if( !h_mcast ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("zalloc of h_mcast failed\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the AL object so we can call destroy_al_obj in case of failure. */ + construct_al_obj( &h_mcast->obj, AL_OBJ_TYPE_H_MCAST ); + + /* Check for synchronous operation. */ + h_mcast->flags = p_mcast_req->flags; + cl_event_construct( &h_mcast->event ); + sync = ( (h_mcast->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC ); + if( sync ) + { + if( !cl_is_blockable() ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Thread context not blockable\n") ); + __free_mcast( &h_mcast->obj ); + return IB_INVALID_SETTING; + } + + cl_status = cl_event_init( &h_mcast->event, TRUE ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to initialize event for sync operation\n") ); + __free_mcast( &h_mcast->obj ); + return ib_convert_cl_status( cl_status ); + } + } + + /* Initialize the AL object now. */ + status = init_al_obj( &h_mcast->obj, p_mcast_req->mcast_context, TRUE, + __destroying_mcast, __cleanup_mcast, __free_mcast ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj returned %s\n", ib_get_err_str( status )) ); + __free_mcast( &h_mcast->obj ); + return status; + } + + /* Copy the multicast context information. */ + h_mcast->pfn_mcast_cb = p_mcast_req->pfn_mcast_cb; + /* + * Copy the mcast member record so that we can leave without requiring the + * user to provide the settings. + */ + h_mcast->member_rec = p_mcast_req->member_rec; + h_mcast->port_guid = p_mcast_req->port_guid; + + /* Track the multicast with the QP instance. */ + status = attach_al_obj( &h_qp->obj, &h_mcast->obj ); + if( status != IB_SUCCESS ) + { + h_mcast->obj.pfn_destroy( &h_mcast->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Issue the MAD to the SA. */ + status = send_join( h_mcast, p_mcast_req ); + if( status == IB_SUCCESS ) + { + /* If synchronous, wait for the completion. */ + if( sync ) + { + do + { + cl_status = cl_event_wait_on( + &h_mcast->event, EVENT_NO_TIMEOUT, AL_WAIT_ALERTABLE ); + } while( cl_status == CL_NOT_DONE ); + CL_ASSERT( cl_status == CL_SUCCESS ); + } + } + else + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to send join request: %s\n", ib_get_err_str(status)) ); + h_mcast->obj.pfn_destroy( &h_mcast->obj, NULL ); + } + + /* + * Note: Don't release the reference taken in init_al_obj while we + * have the SA req outstanding. + */ + + AL_EXIT( AL_DBG_MCAST ); + return status; +} + + +static void +__destroying_mcast( + IN al_obj_t *p_obj ) +{ + ib_mcast_handle_t h_mcast; + ib_user_query_t sa_mad_data; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MCAST ); + + h_mcast = PARENT_STRUCT( p_obj, ib_mcast_t, obj ); + + if( h_mcast->state != SA_REG_STARTING && h_mcast->state != SA_REG_ACTIVE ) + { + AL_EXIT( AL_DBG_MCAST ); + return; + } + + if( h_mcast->state == SA_REG_STARTING ) + { + cl_spinlock_acquire( &h_mcast->obj.lock ); + /* Cancel all outstanding join requests. */ + h_mcast->state = SA_REG_CANCELING; +#if defined( CL_KERNEL ) + if( h_mcast->sa_reg_req.p_sa_req_svc ) + al_cancel_sa_req( &h_mcast->sa_reg_req ); +#else /* defined( CL_KERNEL ) */ + if( h_mcast->sa_reg_req.hdl ) + al_cancel_sa_req( &h_mcast->sa_reg_req ); +#endif /* defined( CL_KERNEL ) */ + cl_spinlock_release( &h_mcast->obj.lock ); + } + + /* Set the request information. */ + h_mcast->sa_dereg_req.pfn_sa_req_cb = leave_req_cb; + + /* Set the MAD attributes and component mask correctly. */ + sa_mad_data.method = IB_MAD_METHOD_DELETE; + sa_mad_data.attr_id = IB_MAD_ATTR_MCMEMBER_RECORD; + sa_mad_data.attr_size = sizeof( ib_member_rec_t ); + + /* Set the component mask. */ + sa_mad_data.comp_mask = IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | IB_MCR_COMPMASK_JOIN_STATE; + + sa_mad_data.p_attr = &h_mcast->member_rec; + + ref_al_obj( &h_mcast->obj ); + status = al_send_sa_req( + &h_mcast->sa_dereg_req, h_mcast->port_guid, 500, 0, &sa_mad_data, 0 ); + if( status != IB_SUCCESS ) + deref_al_obj( &h_mcast->obj ); + + AL_EXIT( AL_DBG_MCAST ); +} + + +static void +__cleanup_mcast( + IN al_obj_t *p_obj ) +{ + ib_mcast_handle_t h_mcast; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MCAST ); + + h_mcast = PARENT_STRUCT( p_obj, ib_mcast_t, obj ); + + /* + * Detach from the multicast group to ensure that multicast messages + * are not received on this QP again. Note that we need to check for + * a valid verbs handle in case the attach failed earlier, and we are + * just calling ib_leave_mcast to notify the SA. + */ + if( h_mcast->h_ci_mcast ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MCAST, + ("detaching from multicast group\n") ); + status = verbs_detach_mcast( h_mcast ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("detach failed: %s\n", ib_get_err_str(status)) ); + } + } + + AL_EXIT( AL_DBG_MCAST ); +} + + +static void +__free_mcast( + IN al_obj_t *p_obj ) +{ + ib_mcast_handle_t h_mcast; + + h_mcast = PARENT_STRUCT( p_obj, ib_mcast_t, obj ); + + cl_event_destroy( &h_mcast->event ); + destroy_al_obj( &h_mcast->obj ); + cl_free( h_mcast ); +} + + + +/* + * Format an SA request based on the user's request. + */ +static ib_api_status_t +send_join( + IN ib_mcast_t *p_mcast, + IN const ib_mcast_req_t* const p_mcast_req ) +{ + ib_user_query_t sa_mad_data; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MCAST ); + + /* Set the request information. */ + p_mcast->sa_reg_req.pfn_sa_req_cb = join_req_cb; + + ib_gid_set_default( &p_mcast->member_rec.port_gid, p_mcast_req->port_guid ); + + /* Set the MAD attributes and component mask correctly. */ + sa_mad_data.method = IB_MAD_METHOD_SET; + sa_mad_data.attr_id = IB_MAD_ATTR_MCMEMBER_RECORD; + sa_mad_data.attr_size = sizeof( ib_member_rec_t ); + + /* Initialize the component mask. */ + sa_mad_data.comp_mask = IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | IB_MCR_COMPMASK_JOIN_STATE; + + if( p_mcast_req->create ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MCAST, + ("requesting creation of mcast group\n") ); + + /* Set the necessary creation components. */ + sa_mad_data.comp_mask |= IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_TCLASS | IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_FLOW | IB_MCR_COMPMASK_SL; + + /* Set the MTU mask if so requested. */ + if( p_mcast_req->member_rec.mtu ) + { + sa_mad_data.comp_mask |= IB_MCR_COMPMASK_MTU_SEL; + if( (p_mcast_req->member_rec.mtu >> 6) != + IB_PATH_SELECTOR_LARGEST ) + { + sa_mad_data.comp_mask |= IB_MCR_COMPMASK_MTU; + } + } + + /* Set the rate mask if so requested. */ + if( p_mcast_req->member_rec.rate ) + { + sa_mad_data.comp_mask |= IB_MCR_COMPMASK_RATE_SEL; + if( (p_mcast_req->member_rec.rate >> 6) != + IB_PATH_SELECTOR_LARGEST ) + { + sa_mad_data.comp_mask |= IB_MCR_COMPMASK_RATE; + } + } + + /* Set the packet lifetime mask if so requested. */ + if( p_mcast_req->member_rec.pkt_life ) + { + sa_mad_data.comp_mask |= IB_MCR_COMPMASK_LIFE_SEL; + if( (p_mcast_req->member_rec.pkt_life >> 6) != + IB_PATH_SELECTOR_LARGEST ) + { + sa_mad_data.comp_mask |= IB_MCR_COMPMASK_LIFE; + } + } + } + + sa_mad_data.p_attr = &p_mcast->member_rec; + + p_mcast->state = SA_REG_STARTING; + status = al_send_sa_req( &p_mcast->sa_reg_req, p_mcast->port_guid, + p_mcast_req->timeout_ms, p_mcast_req->retry_cnt, &sa_mad_data, 0 ); + + AL_EXIT( AL_DBG_MCAST ); + return status; +} + + +/* + * Multicast join completion callback. + */ +static void +join_req_cb( + IN al_sa_req_t *p_sa_req, + IN ib_mad_element_t *p_mad_response ) +{ + ib_mcast_handle_t h_mcast; + ib_sa_mad_t *p_sa_mad; + + AL_ENTER( AL_DBG_MCAST ); + h_mcast = PARENT_STRUCT( p_sa_req, ib_mcast_t, sa_reg_req ); + + /* Record the status of the join request. */ + h_mcast->req_status = p_sa_req->status; + + if( p_mad_response ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MCAST, ("processing response\n") ); + p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_mad_response ); + h_mcast->resp_status = p_sa_mad->status; + + /* Record the join membership information. */ + if( h_mcast->req_status == IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MCAST, ("join successful\n") ); + h_mcast->member_rec = *((ib_member_rec_t*)p_sa_mad->data); + } + + /* We no longer need the response MAD. */ + ib_put_mad( p_mad_response ); + } + + /* + * Finish processing the join in the async callback context since + * we can't attach the QP to the mcast group at dispatch. + */ + h_mcast->async.pfn_callback = join_async_cb; + cl_async_proc_queue( gp_async_proc_mgr, &h_mcast->async ); + AL_EXIT( AL_DBG_MCAST ); +} + + +/* + * Process the results of a join request. This call is invoked from + * the asynchronous processing manager to allow invoking the + * VPD's attach_mcast entrypoint at passive level. + */ +static void +join_async_cb( + IN cl_async_proc_item_t *p_item ) +{ + ib_api_status_t status; + ib_mcast_handle_t h_mcast; + ib_mcast_rec_t mcast_rec; + boolean_t sync; + + AL_ENTER( AL_DBG_MCAST ); + + h_mcast = PARENT_STRUCT( p_item, ib_mcast_t, async ); + + cl_spinlock_acquire( &h_mcast->obj.lock ); +#if defined( CL_KERNEL ) + CL_ASSERT( h_mcast->sa_reg_req.p_sa_req_svc ); + h_mcast->sa_reg_req.p_sa_req_svc = NULL; +#else /* defined( CL_KERNEL ) */ + h_mcast->sa_reg_req.hdl = AL_INVALID_HANDLE; +#endif /* defined( CL_KERNEL ) */ + cl_spinlock_release( &h_mcast->obj.lock ); + + /* Initialize the user's response. */ + cl_memclr( &mcast_rec, sizeof( ib_mcast_rec_t ) ); + mcast_rec.mcast_context = h_mcast->obj.context; + status = h_mcast->req_status; + mcast_rec.error_status = h_mcast->resp_status; + mcast_rec.p_member_rec = &h_mcast->member_rec; + + /* If a synchronous join fails, the blocking thread needs to do cleanup. */ + sync = ((h_mcast->flags & IB_FLAGS_SYNC) == IB_FLAGS_SYNC); + + /* See if the join operation was successful. */ + if( status == IB_SUCCESS ) + { + /* Ensure that the user wants the join operation to proceed. */ + if( h_mcast->state == SA_REG_STARTING ) + { + /* + * Change the state here so that we avoid trying to cancel + * the request if the verb operation fails. + */ + h_mcast->state = SA_REG_ACTIVE; + /* Attach the QP to the multicast group. */ + if(ib_member_get_state(mcast_rec.p_member_rec->scope_state) == IB_MC_REC_STATE_FULL_MEMBER) + { + status = verbs_attach_mcast(h_mcast); + if( status != IB_SUCCESS ) + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_MCAST, ("attach_mcast failed\n") ); + } + mcast_rec.h_mcast = h_mcast; + + } + else + { + /* + * The operation was canceled as a result of destroying the QP. + * Invoke the user's callback notifying them that the join was + * canceled. The join succeeded with the SA, but we don't + * attach the QP to the multicast group, so the user will not + * be aware that the join succeeded. + */ + CL_ASSERT( h_mcast->state == SA_REG_CANCELING ); + status = IB_CANCELED; + } + } + + mcast_rec.status = status; + CL_ASSERT( h_mcast->pfn_mcast_cb ); + h_mcast->pfn_mcast_cb( &mcast_rec ); + + /* If synchronous, signal that the join is done. */ + if( sync ) + cl_event_signal( &h_mcast->event ); + + /* Dereference the mcast object now that the SA operation is complete. */ + if( status != IB_SUCCESS ) + h_mcast->obj.pfn_destroy( &h_mcast->obj, NULL ); + else + deref_al_obj( &h_mcast->obj ); + + AL_EXIT( AL_DBG_MCAST ); +} + + + +ib_api_status_t +ib_leave_mcast( + IN const ib_mcast_handle_t h_mcast, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + + AL_ENTER( AL_DBG_MCAST ); + + if( !h_mcast ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("IB_INVALID_MCAST_HANDLE\n") ); + return IB_INVALID_MCAST_HANDLE; + } + + /* Record that we're already leaving the multicast group. */ + ref_al_obj( &h_mcast->obj ); + h_mcast->obj.pfn_destroy( &h_mcast->obj, pfn_destroy_cb ); + AL_EXIT( AL_DBG_MCAST ); + return IB_SUCCESS; +} + + +/* + * Multicast leave completion callback. + */ +static void +leave_req_cb( + IN al_sa_req_t *p_sa_req, + IN ib_mad_element_t *p_mad_response ) +{ + ib_mcast_handle_t h_mcast; + + AL_ENTER( AL_DBG_MCAST ); + h_mcast = PARENT_STRUCT( p_sa_req, ib_mcast_t, sa_dereg_req ); + + if( p_mad_response ) + ib_put_mad( p_mad_response ); + + /* + * Release the reference on the mcast object now that + * the SA operation is complete. + */ + deref_al_obj( &h_mcast->obj ); + AL_EXIT( AL_DBG_MCAST ); +} + + + +#if defined( CL_KERNEL ) + +/* + * Called by proxy to attach a QP to a multicast group. + */ +ib_api_status_t +al_attach_mcast( + IN const ib_qp_handle_t h_qp, + IN const ib_gid_t *p_mcast_gid, + IN const ib_net16_t mcast_lid, + OUT al_attach_handle_t *ph_attach, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ) +{ + al_attach_handle_t h_attach; + ib_api_status_t status; + + CL_ASSERT( h_qp ); + CL_ASSERT( ph_attach ); + + /* Allocate a attachment object. */ + h_attach = (al_attach_handle_t)cl_zalloc( sizeof( al_attach_t ) ); + if( !h_attach ) + { + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the attachment object. */ + construct_al_obj( &h_attach->obj, AL_OBJ_TYPE_H_ATTACH ); + + status = init_al_obj( &h_attach->obj, NULL, FALSE, + NULL, __cleanup_attach, __free_attach ); + if( status != IB_SUCCESS ) + { + __free_attach( &h_attach->obj ); + return status; + } + status = attach_al_obj( &h_qp->obj, &h_attach->obj ); + if( status != IB_SUCCESS ) + { + h_attach->obj.pfn_destroy( &h_attach->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Attach the QP. */ + status = h_qp->obj.p_ci_ca->verbs.attach_mcast( h_qp->h_ci_qp, + p_mcast_gid, mcast_lid, &h_attach->h_ci_mcast, p_umv_buf ); + if( status != IB_SUCCESS ) + { + h_attach->obj.pfn_destroy( &h_attach->obj, NULL ); + return status; + } + + /* The proxy will release the reference taken in init_al_obj. */ + *ph_attach = h_attach; + return status; +} + + + +static void +__cleanup_attach( + IN al_obj_t *p_obj ) +{ + ib_api_status_t status; + al_attach_handle_t h_attach; + + CL_ASSERT( p_obj ); + h_attach = PARENT_STRUCT( p_obj, al_attach_t, obj ); + + if( h_attach->h_ci_mcast ) + { + status = h_attach->obj.p_ci_ca->verbs.detach_mcast( + h_attach->h_ci_mcast ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + +static void +__free_attach( + IN al_obj_t *p_obj ) +{ + al_attach_handle_t h_attach; + + CL_ASSERT( p_obj ); + h_attach = PARENT_STRUCT( p_obj, al_attach_t, obj ); + + destroy_al_obj( p_obj ); + cl_free( h_attach ); +} + +#endif /* CL_KERNEL */ diff --git a/branches/Ndi/core/al/al_mcast.h b/branches/Ndi/core/al/al_mcast.h new file mode 100644 index 00000000..e97ed377 --- /dev/null +++ b/branches/Ndi/core/al/al_mcast.h @@ -0,0 +1,114 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_MCAST_H__) +#define __AL_MCAST_H__ + +#include + +#include "al_common.h" +#include "al_query.h" +#include + + +/* + * Tracks attaching to a multicast group in the kernel for QPs allocated to + * user-mode clients. + */ +typedef struct _al_attach +{ + al_obj_t obj; + ib_mcast_handle_t h_ci_mcast; /* CI CA handle from attach */ + +} al_attach_t, * __ptr64 al_attach_handle_t; + + +typedef struct _ib_mcast +{ + al_obj_t obj; + + al_sa_req_t sa_reg_req; + al_sa_req_t sa_dereg_req; + + ib_mcast_handle_t h_ci_mcast; + + cl_async_proc_item_t async; + + /* Used to perform synchronous requests. */ + ib_al_flags_t flags; + cl_event_t event; + + /* Status of the join/leave request. */ + ib_api_status_t req_status; + + /* Additional status information returned in the join/leave response. */ + ib_net16_t resp_status; + + al_sa_reg_state_t state; + ib_pfn_mcast_cb_t pfn_mcast_cb; + + /* Store member record to report to SA later. */ + ib_member_rec_t member_rec; + ib_net64_t port_guid; + +} ib_mcast_t; + + + + +void +al_cancel_mcast( + IN const ib_mcast_handle_t h_mcast ); + + +ib_api_status_t +al_join_mcast( + IN const ib_qp_handle_t h_qp, + IN const ib_mcast_req_t* const p_mcast_req ); + + +#if defined( CL_KERNEL ) +/* + * Called by proxy to attach a QP to a multicast group. + */ +ib_api_status_t +al_attach_mcast( + IN const ib_qp_handle_t h_qp, + IN const ib_gid_t *p_mcast_gid, + IN const ib_net16_t mcast_lid, + OUT al_attach_handle_t *ph_attach, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); + +#endif /* CL_KERNEL */ + + +#endif /* __AL_MCAST_H__ */ diff --git a/branches/Ndi/core/al/al_mgr.h b/branches/Ndi/core/al/al_mgr.h new file mode 100644 index 00000000..f96ec9b4 --- /dev/null +++ b/branches/Ndi/core/al/al_mgr.h @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_MGR_H__) +#define __AL_MGR_H__ + + +#include +#include "al_ci_ca.h" +#include "al_common.h" +#include "al_proxy_ioctl.h" + +#ifndef CL_KERNEL +#include "ual_mgr.h" +#endif + + +typedef struct _al_mgr +{ + al_obj_t obj; + + /* List of all AL object's in the system. */ + cl_qlist_t al_obj_list; + cl_spinlock_t lock; + + /* Table of Channel Adapters. */ + cl_qlist_t ci_ca_list; + +#ifndef CL_KERNEL + ual_mgr_t ual_mgr; +#endif + +} al_mgr_t; + + +/* + * Globals used throughout AL + * + * Note that the exported symbols are only exported for the bus driver + * (loader) and are not intended for use by any normal AL clients. + */ +AL_EXPORT cl_async_proc_t *gp_async_proc_mgr; +AL_EXPORT cl_async_proc_t *gp_async_pnp_mgr; +AL_EXPORT al_mgr_t *gp_al_mgr; +AL_EXPORT ib_al_handle_t gh_al; +ib_pool_handle_t gh_mad_pool; + + +ib_api_status_t +create_al_mgr( void ); + + +void +print_al_objs( + IN const ib_al_handle_t h_al ); + +void +print_al_obj( + IN al_obj_t * const p_obj ); + +void +print_tail_al_objs( void ); + + +al_ci_ca_t* +acquire_ci_ca( + IN const ib_net64_t ci_ca_guid, + IN const ib_ca_handle_t h_ca ); + +void +release_ci_ca( + IN const ib_ca_handle_t h_ca ); + + +AL_EXPORT ib_ca_handle_t AL_API +acquire_ca( + IN const ib_net64_t ci_ca_guid ); + + +void +add_ci_ca( + IN al_ci_ca_t* const p_ci_ca ); + +void +remove_ci_ca( + IN al_ci_ca_t* const p_ci_ca ); + +al_ci_ca_t* +find_ci_ca( + IN const ib_net64_t ci_ca_guid ); + + +#endif /* __AL_MGR_H__ */ diff --git a/branches/Ndi/core/al/al_mgr_shared.c b/branches/Ndi/core/al/al_mgr_shared.c new file mode 100644 index 00000000..bfdb3c5b --- /dev/null +++ b/branches/Ndi/core/al/al_mgr_shared.c @@ -0,0 +1,675 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "al.h" +#include "al_common.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_mgr_shared.tmh" +#endif +#include "al_ci_ca.h" +#include "ib_common.h" +#include "al_mgr.h" +#include "al_pnp.h" + +ib_al_handle_t gh_al = NULL; +ib_pool_handle_t gh_mad_pool = NULL; +al_mgr_t *gp_al_mgr = NULL; +cl_async_proc_t *gp_async_proc_mgr = NULL; +cl_async_proc_t *gp_async_pnp_mgr = NULL; + + + +void +print_al_obj( + IN al_obj_t * const p_obj ) +{ + CL_ASSERT( p_obj ); + + UNUSED_PARAM( p_obj ); + + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("AL object %016Ix(%s), parent: %016Ix ref_cnt: %d\n", + (LONG_PTR)p_obj, ib_get_obj_type( p_obj ), + (LONG_PTR)p_obj->p_parent_obj, p_obj->ref_cnt) ); +} + + +void +print_al_objs( + IN const ib_al_handle_t h_al ) +{ + al_obj_t *p_obj; + cl_list_item_t *p_list_item; + + if( !gp_al_mgr ) + return; + + /* Display all access layer objects. */ + for( p_list_item = cl_qlist_head( &gp_al_mgr->al_obj_list ); + p_list_item != cl_qlist_end( &gp_al_mgr->al_obj_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_obj = PARENT_STRUCT( p_list_item, al_obj_t, list_item ); + if( !h_al || p_obj->h_al == h_al ) + print_al_obj( p_obj ); + } +} + + + +void +print_tail_al_objs() +{ + al_obj_t *p_obj; + cl_list_item_t *p_list_item; + int count = 3; + + if( !gp_al_mgr ) + return; + + /* Display all access layer objects. */ + for( p_list_item = cl_qlist_tail( &gp_al_mgr->al_obj_list ); + p_list_item != cl_qlist_end( &gp_al_mgr->al_obj_list ) && count; + p_list_item = cl_qlist_prev( p_list_item ) ) + { + p_obj = PARENT_STRUCT( p_list_item, al_obj_t, list_item ); + print_al_obj( p_obj ); + count--; + } +} + + + +/* + * Search all available CI CAs in the system to see if one exists with the + * given GUID. + */ +al_ci_ca_t* +find_ci_ca( + IN const ib_net64_t ci_ca_guid ) +{ + cl_list_item_t *p_list_item; + al_ci_ca_t *p_ci_ca; + + AL_ENTER( AL_DBG_MGR ); + + for( p_list_item = cl_qlist_head( &gp_al_mgr->ci_ca_list ); + p_list_item != cl_qlist_end( &gp_al_mgr->ci_ca_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_ci_ca = PARENT_STRUCT( p_list_item, al_ci_ca_t, list_item ); + if( p_ci_ca->verbs.guid == ci_ca_guid && + p_ci_ca->obj.state == CL_INITIALIZED ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MGR, + ("find_ci_ca:CA guid %I64x.\n", ci_ca_guid) ); + AL_EXIT( AL_DBG_MGR ); + return p_ci_ca; + } + } + + AL_EXIT( AL_DBG_MGR ); + return NULL; +} + + + +al_ci_ca_t* +acquire_ci_ca( + IN const ib_net64_t ci_ca_guid, + IN const ib_ca_handle_t h_ca ) +{ + al_ci_ca_t *p_ci_ca; + + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + p_ci_ca = find_ci_ca( ci_ca_guid ); + if( !p_ci_ca ) + { + cl_spinlock_release( &gp_al_mgr->obj.lock ); + return NULL; + } + + add_ca( p_ci_ca, h_ca ); + cl_spinlock_release( &gp_al_mgr->obj.lock ); + return p_ci_ca; +} + + + +void +release_ci_ca( + IN const ib_ca_handle_t h_ca ) +{ + AL_ENTER( AL_DBG_MGR ); + remove_ca( h_ca ); + AL_EXIT( AL_DBG_MGR ); +} + + + +void +add_ci_ca( + IN al_ci_ca_t* const p_ci_ca ) +{ + AL_ENTER( AL_DBG_MGR ); + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + cl_qlist_insert_tail( &gp_al_mgr->ci_ca_list, &p_ci_ca->list_item ); + ref_al_obj( &gp_al_mgr->obj ); + cl_spinlock_release( &gp_al_mgr->obj.lock ); + AL_EXIT( AL_DBG_MGR ); +} + + +void +remove_ci_ca( + IN al_ci_ca_t* const p_ci_ca ) +{ + AL_ENTER( AL_DBG_MGR ); + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + cl_qlist_remove_item( &gp_al_mgr->ci_ca_list, &p_ci_ca->list_item ); + cl_spinlock_release( &gp_al_mgr->obj.lock ); + deref_al_obj( &gp_al_mgr->obj ); + AL_EXIT( AL_DBG_MGR ); +} + + + +ib_ca_handle_t +acquire_ca( + IN const ib_net64_t ci_ca_guid ) +{ + al_ci_ca_t *p_ci_ca; + + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + p_ci_ca = find_ci_ca( ci_ca_guid ); + if( !p_ci_ca ) + { + cl_spinlock_release( &gp_al_mgr->obj.lock ); + return NULL; + } + + ref_al_obj( &p_ci_ca->h_ca->obj ); + cl_spinlock_release( &gp_al_mgr->obj.lock ); + return p_ci_ca->h_ca; +} + + + +#define SEARCH_CA_GUID (1) +#define SEARCH_PORT_GUID (2) + +/* + * Return the GUID of CA with the given port GID. + */ + +static ib_api_status_t +__get_guid_by_gid ( + IN ib_al_handle_t h_al, + IN const ib_gid_t* const p_gid, + IN const uintn_t type, + OUT ib_net64_t* const p_guid ) +{ + ib_net64_t *p_guid_array = NULL; + uint32_t size; + uintn_t ca_ind, port_ind, gid_ind, ca_cnt; + ib_api_status_t status = IB_SUCCESS; + ib_ca_attr_t *p_ca_attr = NULL; + ib_port_attr_t *p_port_attr = NULL; + + AL_ENTER( AL_DBG_MGR ); + + CL_ASSERT( h_al && p_gid && p_guid ); + + /* Get the number of CA GUIDs. */ + ca_cnt = 0; + p_guid_array = NULL; + status = ib_get_ca_guids( h_al, p_guid_array, &ca_cnt ); + if( status != IB_INSUFFICIENT_MEMORY ) + { + if( status == IB_SUCCESS ) + { + status = IB_NOT_FOUND; /* No CAs in the system */ + } + goto end; + } + + /* Allocate an array to store the CA GUIDs. */ + p_guid_array = cl_malloc( sizeof( ib_net64_t ) * ca_cnt ); + if( !p_guid_array ) + { + status = IB_INSUFFICIENT_MEMORY; + goto end; + } + + /* Get the list of CA GUIDs in the system. */ + status = ib_get_ca_guids( h_al, p_guid_array, &ca_cnt ); + if( status != IB_SUCCESS ) + goto end; + + /* Query each CA. */ + size = 0; + p_ca_attr = NULL; + for( ca_ind = 0; ca_ind < ca_cnt; ca_ind++ ) + { + /* Query the CA and port information. */ + status = ib_query_ca_by_guid( h_al, p_guid_array[ca_ind], + p_ca_attr, &size ); + + if( status == IB_INSUFFICIENT_MEMORY ) + { + /* Allocate a larger buffer and requery. */ + if( p_ca_attr ) + cl_free( p_ca_attr ); + + p_ca_attr = cl_malloc( size ); + if( !p_ca_attr ) + { + status = IB_INSUFFICIENT_MEMORY; + goto end; + } + + status = ib_query_ca_by_guid( h_al, p_guid_array[ca_ind], + p_ca_attr, &size ); + } + + if( status != IB_SUCCESS ) + goto end; + + /* Try to match the GID with one of the port's GIDs. */ + status = IB_NOT_FOUND; + for( port_ind = 0; port_ind < p_ca_attr->num_ports; port_ind++ ) + { + p_port_attr = &p_ca_attr->p_port_attr[port_ind]; + + for( gid_ind = 0; gid_ind < p_port_attr->num_gids; gid_ind++ ) + { + if( !cl_memcmp( &p_port_attr->p_gid_table[gid_ind], p_gid, + sizeof( ib_gid_t ) ) ) + { + if ( type == SEARCH_CA_GUID ) + *p_guid = p_guid_array[ca_ind]; + else + *p_guid = p_port_attr->port_guid; + status = IB_SUCCESS; + goto end; + } + } + } + } + +end: + if ( p_ca_attr ) + cl_free ( p_ca_attr ); + if ( p_guid_array ) + cl_free( p_guid_array ); + + AL_EXIT( AL_DBG_MGR ); + return status; +} + + + +ib_api_status_t +ib_get_ca_by_gid( + IN ib_al_handle_t h_al, + IN const ib_gid_t* const p_gid, + OUT ib_net64_t* const p_ca_guid ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MGR ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_gid || !p_ca_guid ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = __get_guid_by_gid( h_al, p_gid, SEARCH_CA_GUID, p_ca_guid ); + + AL_EXIT( AL_DBG_MGR ); + return status; +} + + + +ib_api_status_t +ib_get_port_by_gid( + IN ib_al_handle_t h_al, + IN const ib_gid_t* const p_gid, + OUT ib_net64_t* const p_port_guid ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MGR ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_gid || !p_port_guid ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = __get_guid_by_gid( h_al, p_gid, SEARCH_PORT_GUID, p_port_guid ); + + AL_EXIT( AL_DBG_MGR ); + return status; +} + + + +/* + * Return the GUIDs of all CAs the system. + */ +ib_api_status_t +ib_get_ca_guids( + IN ib_al_handle_t h_al, + OUT ib_net64_t* const p_guid_array OPTIONAL, + IN OUT size_t* const p_guid_cnt ) +{ + cl_list_item_t *p_list_item; + al_ci_ca_t *p_ci_ca; + uintn_t guid_cnt; + + AL_ENTER( AL_DBG_MGR ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_guid_cnt ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Prevent CA additions or removals. */ + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + + /* + * Count the number of GUIDs available. Allow CA + * additions or removals and maintain the count. + */ + guid_cnt = cl_qlist_count( &gp_al_mgr->ci_ca_list ); + + /* Check if a GUID array of sufficient size was provided. */ + if( !p_guid_array || (*p_guid_cnt < guid_cnt) ) + { + /* Array too small. */ + cl_spinlock_release( &gp_al_mgr->obj.lock ); + + /* Return the actual count. */ + *p_guid_cnt = guid_cnt; + + AL_EXIT( AL_DBG_MGR ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Return the actual count. */ + *p_guid_cnt = guid_cnt; + + /* Copy the GUIDs into the array. */ + guid_cnt = 0; + for( p_list_item = cl_qlist_head( &gp_al_mgr->ci_ca_list ); + p_list_item != cl_qlist_end( &gp_al_mgr->ci_ca_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_ci_ca = PARENT_STRUCT( p_list_item, al_ci_ca_t, list_item ); + p_guid_array[guid_cnt++] = p_ci_ca->verbs.guid; + } + + /* Allow CA additions or removals. */ + cl_spinlock_release( &gp_al_mgr->obj.lock ); + + AL_EXIT( AL_DBG_MGR ); + return IB_SUCCESS; +} + + + +static boolean_t +__match_ca_attr( + IN al_ci_ca_t * const p_ci_ca, + IN const uint64_t attr_mask ) +{ + boolean_t match; + + ci_ca_lock_attr( p_ci_ca ); + + /* We don't match any attributes for CA's currently. */ + UNUSED_PARAM( attr_mask ); + match = TRUE; + + ci_ca_unlock_attr( p_ci_ca ); + + return match; +} + + + +static ib_api_status_t +__get_ca_guid( + IN const uint32_t index, + IN const uint64_t attr_mask, + OUT ib_net64_t* const p_guid ) +{ + uint32_t ca_index; + cl_list_item_t *p_list_item; + al_ci_ca_t *p_ci_ca; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MGR ); + + /* Prevent CA additions or removals. */ + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + + /* Check for a valid index. */ + if( index != IB_ANY_INDEX && + index >= cl_qlist_count( &gp_al_mgr->ci_ca_list ) ) + { + cl_spinlock_release( &gp_al_mgr->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_INDEX\n") ); + return IB_INVALID_INDEX; + } + + /* + * Find the CA at the correct index and check its attributes. Optimize + * for the "any index" case. + */ + status = IB_NO_MATCH; + ca_index = 0; + for( p_list_item = cl_qlist_head( &gp_al_mgr->ci_ca_list ); + p_list_item != cl_qlist_end( &gp_al_mgr->ci_ca_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_ci_ca = PARENT_STRUCT( p_list_item, al_ci_ca_t, list_item ); + + if( (ca_index == index || index == IB_ANY_INDEX) && + __match_ca_attr( p_ci_ca, attr_mask ) ) + { + *p_guid = p_ci_ca->verbs.guid; + status = IB_SUCCESS; + break; + } + ca_index++; + } + + cl_spinlock_release( &gp_al_mgr->obj.lock ); + + AL_EXIT( AL_DBG_MGR ); + return status; +} + + + +static boolean_t +__match_port_attr( + IN const ib_port_attr_t * const p_port_attr, + IN const uint64_t attr_mask ) +{ + if( attr_mask & IB_DEV_PORT_ACTIVE ) + return( p_port_attr->link_state == IB_LINK_ACTIVE ); + + return TRUE; +} + + + +static ib_api_status_t +__get_port_guid( + IN const uint32_t index, + IN const uint64_t attr_mask, + OUT ib_net64_t* const p_guid ) +{ + uint32_t port_index, i; + cl_list_item_t *p_list_item; + al_ci_ca_t *p_ci_ca; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MGR ); + + /* Prevent CA additions or removals. */ + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + + /* + * Find the port at the correct index and check its attributes. Optimize + * for the "any index" case. + */ + status = IB_NO_MATCH; + port_index = 0; + for( p_list_item = cl_qlist_head( &gp_al_mgr->ci_ca_list ); + p_list_item != cl_qlist_end( &gp_al_mgr->ci_ca_list ) && + status != IB_SUCCESS; + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_ci_ca = PARENT_STRUCT( p_list_item, al_ci_ca_t, list_item ); + + /* Check all ports on this CA. */ + ci_ca_lock_attr( p_ci_ca ); + for( i = 0; i < p_ci_ca->p_pnp_attr->num_ports; i++ ) + { + /* Check the attributes. */ + if( (port_index == index || index == IB_ANY_INDEX) && + __match_port_attr( &p_ci_ca->p_pnp_attr->p_port_attr[i], + attr_mask ) ) + { + *p_guid = p_ci_ca->verbs.guid; + status = IB_SUCCESS; + break; + } + port_index++; + } + ci_ca_unlock_attr( p_ci_ca ); + } + cl_spinlock_release( &gp_al_mgr->obj.lock ); + + /* + * See if the index was valid. We need to perform this check at the + * end of the routine, since we don't know how many ports we have. + */ + if( p_list_item == cl_qlist_end( &gp_al_mgr->ci_ca_list ) && + index != IB_ANY_INDEX ) + { + status = IB_INVALID_INDEX; + } + + AL_EXIT( AL_DBG_MGR ); + return status; +} + + + +ib_api_status_t +ib_get_guid( + IN ib_al_handle_t h_al, + IN const uint32_t index, + IN const ib_pnp_class_t device_type, + IN const uint64_t attr_mask, + OUT ib_net64_t* const p_guid ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MGR ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_guid ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + switch( device_type ) + { + case IB_PNP_CA: + status = __get_ca_guid( index, attr_mask, p_guid ); + break; + + case IB_PNP_PORT: + status = __get_port_guid( index, attr_mask, p_guid ); + break; + + case IB_PNP_IOC: + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("IOC GUIDs not supported at this time\n") ); + return IB_UNSUPPORTED; + + default: + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") ); + return IB_INVALID_SETTING; + } + + AL_EXIT( AL_DBG_MGR ); + return status; +} + + + + + diff --git a/branches/Ndi/core/al/al_mr.h b/branches/Ndi/core/al/al_mr.h new file mode 100644 index 00000000..e01539bf --- /dev/null +++ b/branches/Ndi/core/al/al_mr.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_MR_H__) +#define __AL_MR_H__ + + +#include + +#include "al_ca.h" + + +typedef struct _al_shmid +{ + al_obj_t obj; + cl_list_item_t list_item; + + /* List of sharing registered memory regions. */ + cl_list_t mr_list; + int id; + +} al_shmid_t; + + + +/* + * The MR and MR alias structures must be the same in order to seemlessly + * handle posting of work requests. + */ +typedef struct _ib_mr +{ + al_obj_t obj; + ib_mr_handle_t h_ci_mr; /* Actual HW handle. */ + + /* Reference to any memory registrations shared between processes. */ + al_shmid_t *p_shmid; + +} ib_mr_t; + + +cl_status_t +mr_ctor( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); + + +void +mr_dtor( + IN const cl_pool_item_t* const p_pool_item, + IN void* context ); + + +ib_api_status_t +reg_mem( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t* const p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ); + + +ib_api_status_t +reg_phys( + IN const ib_pd_handle_t h_pd, + IN const ib_phys_create_t* const p_phys_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ); + + +ib_api_status_t +reg_shared( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ); + + +ib_api_status_t +rereg_mem( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t* const p_mr_create OPTIONAL, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ); + + +ib_api_status_t +rereg_phys( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_phys_create_t* const p_phys_create OPTIONAL, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ); + + +ib_api_status_t +dereg_mr( + IN const ib_mr_handle_t h_mr ); + +ib_api_status_t +reg_shmid( + IN const ib_pd_handle_t h_pd, + IN const ib_shmid_t shmid, + IN const ib_mr_create_t* const p_mr_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); + + +#ifdef CL_KERNEL +typedef struct _mlnx_fmr +{ + al_obj_t obj; + mlnx_fmr_handle_t h_ci_fmr; /* Actual HW handle. */ + struct _mlnx_fmr* __ptr64 p_next; +} mlnx_fmr_t; + + + +cl_status_t +mlnx_fmr_ctor( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); + + +void +mlnx_fmr_dtor( + IN const cl_pool_item_t* const p_pool_item, + IN void* context ); + + + +#endif + + +#endif /* __AL_MR_H__ */ diff --git a/branches/Ndi/core/al/al_mr_shared.c b/branches/Ndi/core/al/al_mr_shared.c new file mode 100644 index 00000000..8fc389da --- /dev/null +++ b/branches/Ndi/core/al/al_mr_shared.c @@ -0,0 +1,634 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include "al.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_mr_shared.tmh" +#endif +#include "al_mr.h" +#include "al_pd.h" +#include "al_res_mgr.h" +#include "al_verbs.h" + +#include "ib_common.h" + + +static void +__cleanup_mr( + IN struct _al_obj *p_obj ); + +static void +__return_mr( + IN al_obj_t *p_obj ); + + +cl_status_t +mr_ctor( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + ib_api_status_t status; + ib_mr_handle_t h_mr; + + UNUSED_PARAM( context ); + + h_mr = (ib_mr_handle_t)p_object; + cl_memclr( h_mr, sizeof( ib_mr_t ) ); + + construct_al_obj( &h_mr->obj, AL_OBJ_TYPE_H_MR ); + status = init_al_obj( &h_mr->obj, NULL, FALSE, NULL, + __cleanup_mr, __return_mr ); + if( status != IB_SUCCESS ) + { + return CL_ERROR; + } + + *pp_pool_item = &((ib_mr_handle_t)p_object)->obj.pool_item; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_mr->obj ); + + return CL_SUCCESS; +} + + + +void +mr_dtor( + IN const cl_pool_item_t* const p_pool_item, + IN void* context ) +{ + al_obj_t *p_obj; + + UNUSED_PARAM( context ); + + p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item ); + + /* + * The MR is being totally destroyed. Modify the free_cb to destroy the + * AL object. + */ + p_obj->pfn_free = (al_pfn_free_t)destroy_al_obj; + ref_al_obj( p_obj ); + p_obj->pfn_destroy( p_obj, NULL ); +} + + + +static void +__cleanup_mr( + IN struct _al_obj *p_obj ) +{ + ib_api_status_t status; + ib_mr_handle_t h_mr; + + CL_ASSERT( p_obj ); + h_mr = PARENT_STRUCT( p_obj, ib_mr_t, obj ); + + /* Dereference any shared memory registrations. */ + verbs_release_shmid(h_mr); + + /* Deregister the memory. */ + if( verbs_check_mr(h_mr) ) + { + status = verbs_deregister_mr(h_mr); + + /* + * This was our last chance to deregister the MR. All MW's should + * be destroyed by now. + */ + CL_ASSERT( status == IB_SUCCESS ); + h_mr->h_ci_mr = NULL; +#ifndef CL_KERNEL + h_mr->obj.hdl = AL_INVALID_HANDLE; +#endif + } +} + + + +static void +__return_mr( + IN al_obj_t *p_obj ) +{ + ib_mr_handle_t h_mr; + + h_mr = PARENT_STRUCT( p_obj, ib_mr_t, obj ); + reset_al_obj( p_obj ); + put_mr( h_mr ); +} + + + +ib_api_status_t +ib_reg_mem( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t* const p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + status = reg_mem( h_pd, p_mr_create, p_lkey, p_rkey, ph_mr, FALSE ); + + /* Release the reference taken in alloc_mr for initialization. */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_mr)->obj ); + + AL_EXIT( AL_DBG_MR ); + return status; +} + + + +ib_api_status_t +reg_mem( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t* const p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ) +{ + ib_mr_handle_t h_mr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( !p_mr_create || !p_lkey || !p_rkey || !ph_mr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Get a MR tracking structure. */ + h_mr = alloc_mr(); + if( !h_mr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to allocate memory handle\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + status = attach_al_obj( &h_pd->obj, &h_mr->obj ); + if( status != IB_SUCCESS ) + { + h_mr->obj.pfn_destroy( &h_mr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Register the memory region. */ + status = verbs_register_mr( h_pd, p_mr_create, p_lkey, p_rkey, h_mr ); + if( status != IB_SUCCESS ) + { + h_mr->obj.pfn_destroy( &h_mr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to register memory: %s\n", ib_get_err_str(status)) ); + return status; + } + + *ph_mr = h_mr; + + AL_EXIT( AL_DBG_MR ); + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_reg_phys( + IN const ib_pd_handle_t h_pd, + IN const ib_phys_create_t* const p_phys_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ) +{ + ib_mr_handle_t h_mr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + if( !p_vaddr || !p_lkey || !p_rkey || !ph_mr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Get a MR tracking structure. */ + h_mr = alloc_mr(); + if( !h_mr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to allocate memory handle\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + status = attach_al_obj( &h_pd->obj, &h_mr->obj ); + if( status != IB_SUCCESS ) + { + h_mr->obj.pfn_destroy( &h_mr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Register the memory region. */ + status = verbs_register_pmr( h_pd, p_phys_create, p_vaddr, + p_lkey, p_rkey, h_mr ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to register memory: %s\n", ib_get_err_str(status)) ); + h_mr->obj.pfn_destroy( &h_mr->obj, NULL ); + return status; + } + + *ph_mr = h_mr; + + /* Release the reference taken in alloc_mr for initialization. */ + deref_al_obj( &h_mr->obj ); + + AL_EXIT( AL_DBG_MR ); + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_rereg_mem( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t* const p_mr_create OPTIONAL, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL ) +{ + if( AL_OBJ_INVALID_HANDLE( h_mr, AL_OBJ_TYPE_H_MR ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MR_HANDLE\n") ); + return IB_INVALID_MR_HANDLE; + } + + return rereg_mem( + h_mr, mr_mod_mask, p_mr_create, p_lkey, p_rkey, h_pd, FALSE ); +} + + +ib_api_status_t +rereg_mem( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t* const p_mr_create OPTIONAL, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( ( mr_mod_mask & IB_MR_MOD_PD ) ) + { + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + if( h_pd->obj.h_al != h_mr->obj.h_al ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + } + if( !p_lkey || !p_rkey ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Modify the registered memory region. */ + status = verbs_modify_mr( h_mr, mr_mod_mask, p_mr_create, + p_lkey, p_rkey, h_pd ); + + /* If we're changing the PD, we need to update the object hierarchy. */ + if( h_pd && (status == IB_SUCCESS) ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MR, ("associating MR with new PD\n") ); + detach_al_obj( &h_mr->obj ); + deref_al_obj( h_mr->obj.p_parent_obj ); + status = attach_al_obj( &h_pd->obj, &h_mr->obj ); + CL_ASSERT( status ); + } + + AL_EXIT( AL_DBG_MR ); + return status; +} + + + +ib_api_status_t +ib_rereg_phys( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_phys_create_t* const p_phys_create OPTIONAL, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_mr, AL_OBJ_TYPE_H_MR ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MR_HANDLE\n") ); + return IB_INVALID_MR_HANDLE; + } + if( ( mr_mod_mask & IB_MR_MOD_PD ) ) + { + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + if( h_pd->obj.p_parent_obj != h_mr->obj.p_parent_obj->p_parent_obj ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + } + if( !p_vaddr || !p_lkey || !p_rkey ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Modify the registered memory region. */ + status = verbs_modify_pmr( h_mr, mr_mod_mask, p_phys_create, p_vaddr, + p_lkey, p_rkey, h_pd ); + + /* If we're changing the PD, we need to update the object hierarchy. */ + if( h_pd && (status == IB_SUCCESS) ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MR, ("associating MR with new PD\n") ); + detach_al_obj( &h_mr->obj ); + deref_al_obj( h_mr->obj.p_parent_obj ); + status = attach_al_obj( &h_pd->obj, &h_mr->obj ); + CL_ASSERT( status ); + } + + AL_EXIT( AL_DBG_MR ); + return status; +} + + + +ib_api_status_t +ib_reg_shared( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ) +{ + ib_api_status_t status; + + if( AL_OBJ_INVALID_HANDLE( h_mr, AL_OBJ_TYPE_H_MR ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MR_HANDLE\n") ); + return IB_INVALID_MR_HANDLE; + } + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + status = reg_shared( h_mr, h_pd, access_ctrl, p_vaddr, p_lkey, p_rkey, + ph_mr, FALSE ); + + /* Release the reference taken in alloc_mr for initialization. */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_mr)->obj ); + + return status; +} + + + +ib_api_status_t +reg_shared( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ) +{ + ib_mr_handle_t h_new_mr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( !p_vaddr || !p_lkey || !p_rkey || !ph_mr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Get a MR tracking structure. */ + h_new_mr = alloc_mr(); + if( !h_new_mr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to allocate memory handle\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + status = attach_al_obj( &h_pd->obj, &h_new_mr->obj ); + if( status != IB_SUCCESS ) + { + h_new_mr->obj.pfn_destroy( &h_new_mr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Register the memory region. */ + status = verbs_register_smr( h_mr, h_pd, access_ctrl, p_vaddr, + p_lkey, p_rkey, h_new_mr ); + if( status != IB_SUCCESS ) + { + h_new_mr->obj.pfn_destroy( &h_new_mr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to register memory: %s\n", ib_get_err_str(status)) ); + return status; + } + + *ph_mr = h_new_mr; + + AL_EXIT( AL_DBG_MR ); + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_dereg_mr( + IN const ib_mr_handle_t h_mr ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_mr, AL_OBJ_TYPE_H_MR ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MR_HANDLE\n") ); + return IB_INVALID_MR_HANDLE; + } + + ref_al_obj( &h_mr->obj ); + + status = dereg_mr( h_mr ); + if( status != IB_SUCCESS ) + deref_al_obj( &h_mr->obj ); + + AL_EXIT( AL_DBG_MR ); + return status; +} + + + +ib_api_status_t +dereg_mr( + IN const ib_mr_handle_t h_mr ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( !verbs_check_mr(h_mr) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MR_HANDLE\n") ); + return IB_INVALID_MR_HANDLE; + } + + /* + * MR's are destroyed synchronously. Go ahead and try to destroy it now. + * If we fail, then report the failure to the user. Failures could be + * a result of having a memory window bound to the region, which we cannot + * track in the kernel for user-mode clients. + */ + status = verbs_deregister_mr(h_mr); + + if( status == IB_SUCCESS ) + { + h_mr->h_ci_mr = NULL; +#ifndef CL_KERNEL + h_mr->obj.hdl = AL_INVALID_HANDLE; +#endif + + /* We're good to destroy the object. */ + h_mr->obj.pfn_destroy( &h_mr->obj, NULL ); + } + + AL_EXIT( AL_DBG_MR ); + return status; +} + + + +ib_api_status_t +ib_query_mr( + IN const ib_mr_handle_t h_mr, + OUT ib_mr_attr_t* const p_mr_attr ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_mr, AL_OBJ_TYPE_H_MR ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MR_HANDLE\n") ); + return IB_INVALID_MR_HANDLE; + } + if( !p_mr_attr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_query_mr(h_mr, p_mr_attr); + + /* Set AL's handles. */ + if( status == IB_SUCCESS ) + { + p_mr_attr->h_pd = PARENT_STRUCT( h_mr->obj.p_parent_obj, ib_pd_t, obj ); + } + else + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to query memory region: %s\n", ib_get_err_str(status)) ); + } + + AL_EXIT( AL_DBG_MR ); + return status; +} diff --git a/branches/Ndi/core/al/al_mw.c b/branches/Ndi/core/al/al_mw.c new file mode 100644 index 00000000..b7a30a5d --- /dev/null +++ b/branches/Ndi/core/al/al_mw.c @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_mw.tmh" +#endif +#include "al_mw.h" +#include "al_pd.h" +#include "al_verbs.h" + + + +void +destroying_mw( + IN struct _al_obj *p_obj ); + +void +cleanup_mw( + IN struct _al_obj *p_obj ); + +void +free_mw( + IN al_obj_t *p_obj ); + + + +ib_api_status_t +create_mw( + IN const ib_pd_handle_t h_pd, + OUT net32_t* const p_rkey, + OUT ib_mw_handle_t* const ph_mw, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_mw_handle_t h_mw; + ib_api_status_t status; + al_obj_type_t obj_type = AL_OBJ_TYPE_H_MW; + + if( !p_rkey || !ph_mw ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Allocate a MW tracking structure. */ + h_mw = cl_zalloc( sizeof( ib_mw_t) ); + if( !h_mw ) + { + return IB_INSUFFICIENT_MEMORY; + } + + if( p_umv_buf ) + obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT; + + /* Construct the mw. */ + construct_al_obj( &h_mw->obj, obj_type ); + + status = init_al_obj( &h_mw->obj, NULL, FALSE, + destroying_mw, NULL, free_mw ); + if( status != IB_SUCCESS ) + { + free_mw( &h_mw->obj ); + return status; + } + status = attach_al_obj( &h_pd->obj, &h_mw->obj ); + if( status != IB_SUCCESS ) + { + h_mw->obj.pfn_destroy( &h_mw->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Insert the MW into the PD's MW list used to order destruction. */ + pd_insert_mw( h_mw ); + + /* Allocate the protection domain. */ + status = verbs_create_mw( h_pd, p_rkey, h_mw ); + if( status != IB_SUCCESS ) + { + h_mw->obj.pfn_destroy( &h_mw->obj, NULL ); + return status; + } + + *ph_mw = h_mw; + + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_destroy_mw( + IN const ib_mw_handle_t h_mw ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MW ); + + if( AL_OBJ_INVALID_HANDLE( h_mw, AL_OBJ_TYPE_H_MW ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MW_HANDLE\n") ); + return IB_INVALID_MW_HANDLE; + } + + ref_al_obj( &h_mw->obj ); + + status = destroy_mw( h_mw ); + + if( status != IB_SUCCESS ) + deref_al_obj( &h_mw->obj ); + + AL_EXIT( AL_DBG_MW ); + return status; +} + + + +ib_api_status_t +destroy_mw( + IN const ib_mw_handle_t h_mw ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MW ); + + if( !verbs_check_mw( h_mw ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MW_HANDLE\n") ); + return IB_INVALID_MW_HANDLE; + } + + /* + * MW's are destroyed synchronously. Go ahead and try to destroy it now. + * If we fail, then report the failure to the user. + */ + status = verbs_destroy_mw( h_mw ); + + if( status == IB_SUCCESS ) + h_mw->obj.pfn_destroy( &h_mw->obj, NULL ); + + AL_EXIT( AL_DBG_MW ); + return status; +} + + + +void +destroying_mw( + IN struct _al_obj *p_obj ) +{ + ib_mw_handle_t h_mw; + + CL_ASSERT( p_obj ); + h_mw = PARENT_STRUCT( p_obj, ib_mw_t, obj ); + + /* Remove the MW from the PD's MW list. */ + pd_remove_mw( h_mw ); +} + + + +/* + * Release all resources associated with the protection domain. + */ +void +free_mw( + IN al_obj_t *p_obj ) +{ + ib_mw_handle_t h_mw; + + CL_ASSERT( p_obj ); + h_mw = PARENT_STRUCT( p_obj, ib_mw_t, obj ); + + destroy_al_obj( p_obj ); + cl_free( h_mw ); +} + + + +ib_api_status_t +ib_query_mw( + IN const ib_mw_handle_t h_mw, + OUT ib_pd_handle_t* const ph_pd, + OUT net32_t* const p_rkey ) +{ + return query_mw( h_mw, ph_pd, p_rkey, NULL ); +} + + +ib_api_status_t +query_mw( + IN const ib_mw_handle_t h_mw, + OUT ib_pd_handle_t* const ph_pd, + OUT net32_t* const p_rkey, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_pd_handle_t h_ci_pd; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MW ); + + if( AL_OBJ_INVALID_HANDLE( h_mw, AL_OBJ_TYPE_H_MW ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MW_HANDLE\n") ); + return IB_INVALID_MW_HANDLE; + } + if( !ph_pd || !p_rkey ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_query_mw(h_mw, &h_ci_pd, p_rkey); + + /* Get the PD for AL associated with this memory window. */ + if( status == IB_SUCCESS ) + { + *ph_pd = PARENT_STRUCT( h_mw->obj.p_parent_obj, ib_pd_t, obj ); + CL_ASSERT( (*ph_pd)->h_ci_pd == h_ci_pd ); + } + + AL_EXIT( AL_DBG_MW ); + return status; +} diff --git a/branches/Ndi/core/al/al_mw.h b/branches/Ndi/core/al/al_mw.h new file mode 100644 index 00000000..5aac1657 --- /dev/null +++ b/branches/Ndi/core/al/al_mw.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_MW_H__) +#define __AL_MW_H__ + + +#include + +#include "al_ca.h" + + + +typedef struct _ib_mw +{ + al_obj_t obj; + + /* List item used to store MW in PD list for proper destruction order. */ + cl_list_item_t pd_list_item; + ib_mw_handle_t h_ci_mw; /* Actual HW handle. */ + +} ib_mw_t; + + + +ib_api_status_t +create_mw( + IN const ib_pd_handle_t h_pd, + OUT uint32_t* const p_rkey, + OUT ib_mw_handle_t* const ph_mw, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +query_mw( + IN const ib_mw_handle_t h_mw, + OUT ib_pd_handle_t* const ph_pd, + OUT uint32_t* const p_rkey, + IN OUT ci_umv_buf_t* const p_umv_buf ); + +ib_api_status_t +destroy_mw( + IN const ib_mw_handle_t h_mw ); + +#endif /* __AL_MW_H__ */ diff --git a/branches/Ndi/core/al/al_pd.c b/branches/Ndi/core/al/al_pd.c new file mode 100644 index 00000000..5e19729d --- /dev/null +++ b/branches/Ndi/core/al/al_pd.c @@ -0,0 +1,492 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "al.h" +#include "al_av.h" +#include "al_ca.h" +#include "al_cq.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_pd.tmh" +#endif +#include "al_mgr.h" +#include "al_mr.h" +#include "al_mw.h" +#include "al_pd.h" +#include "al_qp.h" +#include "al_srq.h" +#include "al_verbs.h" + +#include "ib_common.h" + + +void +destroying_pd( + IN struct _al_obj *p_obj ); + + +void +cleanup_pd( + IN struct _al_obj *p_obj ); + + +void +free_pd( + IN al_obj_t *p_obj ); + + + +ib_api_status_t +alloc_pd( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_type_t pd_type, + IN const void * const pd_context, + OUT ib_pd_handle_t* const ph_pd, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_pd_handle_t h_pd; + ib_api_status_t status; + al_obj_type_t obj_type = AL_OBJ_TYPE_H_PD; + + CL_ASSERT( h_ca ); + + if( !ph_pd ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Allocate a protection domain. */ + h_pd = (ib_pd_handle_t)cl_zalloc( sizeof( ib_pd_t ) ); + if( !h_pd ) + { + return IB_INSUFFICIENT_MEMORY; + } + + if( p_umv_buf ) + obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT; + + /* Construct the PD. */ + construct_al_obj( &h_pd->obj, obj_type ); + cl_qlist_init( &h_pd->mw_list ); + + status = init_al_obj( &h_pd->obj, pd_context, TRUE, + destroying_pd, cleanup_pd, free_pd ); + if( status != IB_SUCCESS ) + { + free_pd( &h_pd->obj ); + return status; + } + + status = attach_al_obj( &h_ca->obj, &h_pd->obj ); + if( status != IB_SUCCESS ) + { + h_pd->obj.pfn_destroy( &h_pd->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + h_pd->type = pd_type; + switch( h_pd->type ) + { + case IB_PDT_ALIAS: + status = allocate_pd_alias( h_ca, h_pd ); + break; + + case IB_PDT_NORMAL: + case IB_PDT_SQP: + case IB_PDT_UD: + /* Allocate the protection domain. */ + status = verbs_allocate_pd( h_ca, h_pd, p_umv_buf ); + break; + + default: + CL_ASSERT( h_pd->type == IB_PDT_ALIAS || h_pd->type == IB_PDT_NORMAL ); + status = IB_INVALID_PARAMETER; + } + + if( status != IB_SUCCESS ) + { + h_pd->obj.pfn_destroy( &h_pd->obj, NULL ); + return status; + } + + *ph_pd = h_pd; + + return status; +} + + + +ib_api_status_t +ib_dealloc_pd( + IN const ib_pd_handle_t h_pd, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_PD ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + ref_al_obj( &h_pd->obj ); + h_pd->obj.pfn_destroy( &h_pd->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_PD ); + return IB_SUCCESS; +} + + + +/* + * Pre-destroy the protection domain. + */ +void +destroying_pd( + IN al_obj_t *p_obj ) +{ + ib_al_handle_t h_al; + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd; + ib_mw_handle_t h_mw; + cl_list_item_t *p_list_item; + ib_api_status_t status; + + CL_ASSERT( p_obj ); + h_pd = PARENT_STRUCT( p_obj, ib_pd_t, obj ); + + /* Get the AL instance of this protection domain. */ + p_obj = h_pd->obj.p_parent_obj; + h_ca = PARENT_STRUCT( p_obj, ib_ca_t, obj ); + p_obj = h_ca->obj.p_parent_obj; + h_al = PARENT_STRUCT( p_obj, ib_al_t, obj ); + + /* + * Deallocate all MW's before proceeding with destruction. This ensures + * that all MW's have been destroyed before any MR's are. + */ + p_list_item = cl_qlist_head( &h_pd->mw_list ); + while( p_list_item != cl_qlist_end( &h_pd->mw_list ) ) + { + h_mw = PARENT_STRUCT( p_list_item, ib_mw_t, pd_list_item ); + status = ib_destroy_mw( h_mw ); + CL_ASSERT( status == IB_SUCCESS ); + + CL_ASSERT( p_list_item != cl_qlist_head( &h_pd->mw_list ) ); + p_list_item = cl_qlist_head( &h_pd->mw_list ); + } +} + + + +void +cleanup_pd( + IN struct _al_obj *p_obj ) +{ + ib_pd_handle_t h_pd; + ib_api_status_t status; + + CL_ASSERT( p_obj ); + h_pd = PARENT_STRUCT( p_obj, ib_pd_t, obj ); + + /* Release the HW resources. */ + if( verbs_check_pd(h_pd)) + { + if( h_pd->type != IB_PDT_ALIAS ) + { + /* Deallocate the CI PD. */ + status = verbs_deallocate_pd(h_pd); + CL_ASSERT( status == IB_SUCCESS ); + } + else + { + deallocate_pd_alias( h_pd ); + } + } +} + + + +/* + * Release all resources associated with the protection domain. + */ +void +free_pd( + IN al_obj_t *p_obj ) +{ + ib_pd_handle_t h_pd; + + CL_ASSERT( p_obj ); + h_pd = PARENT_STRUCT( p_obj, ib_pd_t, obj ); + + destroy_al_obj( p_obj ); + cl_free( h_pd ); +} + +ib_api_status_t +ib_create_srq( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t pfn_srq_event_cb OPTIONAL, + OUT ib_srq_handle_t* const ph_srq ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + if( !p_srq_attr || !ph_srq) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + if( !p_srq_attr->max_wr) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") ); + return IB_INVALID_MAX_WRS; + } + + if (h_pd->obj.p_ci_ca && h_pd->obj.p_ci_ca->p_pnp_attr) + { + if (p_srq_attr->max_wr > h_pd->obj.p_ci_ca->p_pnp_attr->max_srq_wrs) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") ); + return IB_INVALID_MAX_WRS; + } + if (p_srq_attr->max_sge > h_pd->obj.p_ci_ca->p_pnp_attr->max_srq_sges) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_SGE\n") ); + return IB_INVALID_MAX_SGE; + } + } + + status = create_srq( + h_pd, p_srq_attr, srq_context, pfn_srq_event_cb, ph_srq, NULL ); + + /* Release the reference taken in init_al_obj (init_base_srq). */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_srq)->obj ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + +ib_api_status_t +ib_create_qp( + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb OPTIONAL, + OUT ib_qp_handle_t* const ph_qp ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + if( !p_qp_create->rq_depth && !p_qp_create->sq_depth ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") ); + return IB_INVALID_MAX_WRS; + } + if( !p_qp_create->sq_sge && !p_qp_create->sq_sge) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_SGE\n") ); + return IB_INVALID_MAX_SGE; + + } + if (h_pd->obj.p_ci_ca && h_pd->obj.p_ci_ca->p_pnp_attr) + { + if ((p_qp_create->rq_depth > h_pd->obj.p_ci_ca->p_pnp_attr->max_wrs) || + (p_qp_create->sq_depth > h_pd->obj.p_ci_ca->p_pnp_attr->max_wrs)) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") ); + return IB_INVALID_MAX_WRS; + } + if ((p_qp_create->rq_sge > h_pd->obj.p_ci_ca->p_pnp_attr->max_sges) || + (p_qp_create->sq_sge > h_pd->obj.p_ci_ca->p_pnp_attr->max_sges)) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_SGE\n") ); + return IB_INVALID_MAX_SGE; + } + } + status = create_qp( + h_pd, p_qp_create, qp_context, pfn_qp_event_cb, ph_qp, NULL ); + + /* Release the reference taken in init_al_obj (init_base_qp). */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_qp)->obj ); + + AL_EXIT( AL_DBG_QP ); + return status; +} + + + +ib_api_status_t +ib_get_spl_qp( + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb OPTIONAL, + OUT ib_pool_key_t* const p_pool_key OPTIONAL, + OUT ib_qp_handle_t* const ph_qp ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + status = get_spl_qp( h_pd, port_guid, p_qp_create, qp_context, + pfn_qp_event_cb, p_pool_key, ph_qp, NULL ); + + /* Release the reference taken in init_al_obj. */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_qp)->obj ); + + AL_EXIT( AL_DBG_QP ); + return status; +} + + + +ib_api_status_t +ib_create_av( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t* const p_av_attr, + OUT ib_av_handle_t* const ph_av ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_AV ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + status = create_av( h_pd, p_av_attr, ph_av, NULL ); + + /* Release the reference taken in alloc_av. */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_av)->obj ); + + AL_EXIT( AL_DBG_AV ); + return status; +} + + + +ib_api_status_t +ib_create_mw( + IN const ib_pd_handle_t h_pd, + OUT net32_t* const p_rkey, + OUT ib_mw_handle_t* const ph_mw ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MW ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + status = create_mw( h_pd, p_rkey, ph_mw, NULL ); + + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_mw)->obj ); + + AL_EXIT( AL_DBG_MW ); + return status; +} + + + +void +pd_insert_mw( + IN const ib_mw_handle_t h_mw ) +{ + ib_pd_handle_t h_pd; + + CL_ASSERT( h_mw ); + h_pd = PARENT_STRUCT( h_mw->obj.p_parent_obj, ib_pd_t, obj ); + + cl_spinlock_acquire( &h_pd->obj.lock ); + cl_qlist_insert_tail( &h_pd->mw_list, &h_mw->pd_list_item ); + cl_spinlock_release( &h_pd->obj.lock ); +} + + + +void +pd_remove_mw( + IN const ib_mw_handle_t h_mw ) +{ + ib_pd_handle_t h_pd; + + CL_ASSERT( h_mw ); + h_pd = PARENT_STRUCT( h_mw->obj.p_parent_obj, ib_pd_t, obj ); + + cl_spinlock_acquire( &h_pd->obj.lock ); + cl_qlist_remove_item( &h_pd->mw_list, &h_mw->pd_list_item ); + cl_spinlock_release( &h_pd->obj.lock ); +} diff --git a/branches/Ndi/core/al/al_pd.h b/branches/Ndi/core/al/al_pd.h new file mode 100644 index 00000000..beba13d8 --- /dev/null +++ b/branches/Ndi/core/al/al_pd.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_PD_H__) +#define __AL_PD_H__ + +#include +#include +#include + +#include "al_common.h" + + + +typedef struct _ib_pd +{ + al_obj_t obj; + + /* + * MW list used to order destruction between MW's and MR's. MR and MW + * can be created in any order, so we can't rely on their order in the + * al_obj list for proper destruction. + */ + cl_qlist_t mw_list; + + ib_pd_type_t type; + ib_pd_handle_t h_ci_pd; /* Actual CI PD handle. */ + +} ib_pd_t; + + + +ib_api_status_t +alloc_pd( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_type_t pd_type, + IN const void * const pd_context, + OUT ib_pd_handle_t* const ph_pd, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +void +pd_insert_mw( + IN const ib_mw_handle_t h_mw ); + +void +pd_remove_mw( + IN const ib_mw_handle_t h_mw ); + + +#endif /* __AL_PD_H__ */ diff --git a/branches/Ndi/core/al/al_pnp.h b/branches/Ndi/core/al/al_pnp.h new file mode 100644 index 00000000..4c56edb4 --- /dev/null +++ b/branches/Ndi/core/al/al_pnp.h @@ -0,0 +1,234 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_PNP_H__) +#define __AL_PNP_H__ + + +#include "al_common.h" +#include "al_ca.h" +#include +#include +#include + + +extern char* ib_pnp_event_str[]; + + +typedef struct _al_pnp +{ + al_obj_t obj; + + cl_async_proc_item_t async_item; +#if defined( CL_KERNEL ) + KEVENT *p_sync_event; + cl_list_item_t list_item; + cl_async_proc_item_t dereg_item; + ib_pnp_class_t pnp_class; + cl_fmap_t context_map; + IRP *p_rearm_irp; + IRP *p_dereg_irp; +#else /* defined( CL_KERNEL ) */ + ual_rearm_pnp_ioctl_out_t rearm; + OVERLAPPED ov; + OVERLAPPED destroy_ov; +#endif /* defined( CL_KERNEL ) */ + ib_pfn_pnp_cb_t pfn_pnp_cb; + +} al_pnp_t; +/* +* FIELDS +* obj +* AL object, used to manage relationships and destruction +* synchronization. +* +* list_item +* Used to store the registration in the proper list for the class. +* +* async_item +* Asynchronous processing item used to handle registration and events. +* +* dereg_item +* Asynchronous processing item used to handle deregistration. This item +* is separate from the registration item to allow a user to immediately +* call ib_dereg_pnp after ib_reg_pnp. +* +* pnp_class +* Class of PnP events for this registration. +* +* pfn_pnp_cb +* Client's PnP notification callback. +* +* context_map +* map of client contexts. +*********/ + + +/* + * Context information stored in a registration structure. + */ +typedef struct _al_pnp_context +{ + /* List item must be first. */ + cl_fmap_item_t map_item; + ib_net64_t guid; + ib_net64_t ca_guid; + const void *context; + +} al_pnp_context_t; + +/****f* Access Layer/create_pnp +* DESCRIPTION +* Initialized the plug and play manager. +* +* SYNOPSIS +*/ +ib_api_status_t +create_pnp( + IN al_obj_t* const p_parent_obj ); +/******/ + + +#ifdef CL_KERNEL + +/****f* Access Layer/al_pnp_ca_event +* NAME +* pnp_ca_event +* +* DESCRIPTION +* Reports a CA event to the plug and play manager. +* +* SYNOPSIS +*/ +ib_api_status_t +pnp_ca_event( + IN al_ci_ca_t* const p_ci_ca, + IN const ib_pnp_event_t event ); +/* +* PARAMETERS +* p_ci_ca +* Pointer to the al_ci_ca_t structure for the ca for which the event +* is being reported. +* +* event +* One of IB_PNP_CA_ADD, IB_PNP_CA_REMOVE to indicate the type of CA +* event being reported. +*****/ + + +/****f* Access Layer/pnp_ca_change +* NAME +* pnp_ca_change +* +* DESCRIPTION +* Called by user mode AL to report a CA attribute change. +* +* SYNOPSIS +*/ +ib_api_status_t +pnp_ca_change( + IN al_ci_ca_t* const p_ci_ca, + IN const ib_ca_attr_t* p_ca_attr ); +/* +* PARAMETERS +* p_ci_ca +* Pointer to the al_ci_ca_t structure for the ca for which the change +* is being reported. +* +* p_ca_attr +* Pointer to the updated CA attributes. +*****/ + + +/****f* Access Layer/pnp_check_events +* NAME +* pnp_poll +* +* DESCRIPTION +* Check for PnP new events and report changes to registered clients. +* +* SYNOPSIS +*/ +void +pnp_poll( + void ); +/******/ + + +/****f* Access Layer/pnp_create_context +* NAME +* pnp_create_context +* +* DESCRIPTION +* Creates a context structure for a reported PnP event. +* +* SYNOPSIS +*/ +al_pnp_context_t* +pnp_create_context( + IN al_pnp_t* const p_reg, + IN const void* const p_key ); +/******/ + +al_pnp_context_t* +pnp_get_context( + IN const al_pnp_t* const p_reg, + IN const void* const p_key ); + +void +pnp_reg_complete( + IN al_pnp_t* const p_reg ); + +ib_api_status_t +al_reg_pnp( + IN const ib_al_handle_t h_al, + IN const ib_pnp_req_t* const p_pnp_req, + IN KEVENT *p_sync_event, + OUT ib_pnp_handle_t* const ph_pnp ); + +#endif /* CL_KERNEL */ + +static inline ib_pnp_class_t +pnp_get_class( + IN const ib_pnp_class_t pnp_class ) +{ + return pnp_class & IB_PNP_CLASS_MASK; +} + +static inline ib_pnp_class_t +pnp_get_flag( + IN const ib_pnp_class_t pnp_class ) +{ + return pnp_class & IB_PNP_FLAG_MASK; +} + +#endif /* __AL_PNP_H__ */ diff --git a/branches/Ndi/core/al/al_proxy.h b/branches/Ndi/core/al/al_proxy.h new file mode 100644 index 00000000..fccf903b --- /dev/null +++ b/branches/Ndi/core/al/al_proxy.h @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * This header file defines data structures for the user-mode proxy + * and UAL support + * + * Environment: + * Kernel and User Mode. + */ + + +#ifndef _ALPROXY_H_ +#define _ALPROXY_H_ + + + +/* Just include everything */ +#include +#include + +#include "al_proxy_ioctl.h" +#include "al_mcast.h" + + +#define AL_CB_POOL_START_SIZE 10 +#define AL_CB_POOL_GROW_SIZE 5 + + +#define PROXY_PNP_TIMEOUT_US (5000000) + + +typedef struct _proxy_pnp_evt +{ + cl_event_t event; + ib_api_status_t evt_status; + void* evt_context; + size_t rec_size; + +} proxy_pnp_evt_t; + + +typedef union _proxy_pnp_recs +{ + ib_pnp_rec_t pnp; + ib_pnp_ca_rec_t ca; + ib_pnp_port_rec_t port; + ib_pnp_iou_rec_t iou; + ib_pnp_ioc_rec_t ioc; + ib_pnp_ioc_path_rec_t ioc_path; + +} proxy_pnp_recs_t; + + + +/********************************************************** + * + * Per-process device context. + * + **********************************************************/ +typedef struct _al_dev_open_context +{ + volatile boolean_t closing; + atomic32_t ref_cnt; + cl_event_t close_event; + + /* General purpose pool of list objects */ + cl_qpool_t cb_pool; + cl_spinlock_t cb_pool_lock; + + /* User-mode callback queues. */ + cl_qlist_t cm_cb_list; + cl_qlist_t comp_cb_list; + cl_qlist_t misc_cb_list; + cl_spinlock_t cb_lock; + + /* PnP synchronization mutex. */ + cl_mutex_t pnp_mutex; + + /* Pending IOCTLs. */ + cl_ioctl_handle_t h_cm_ioctl; + cl_ioctl_handle_t h_comp_ioctl; + cl_ioctl_handle_t h_misc_ioctl; + + /* Per-process AL handle. */ + ib_al_handle_t h_al; + +} al_dev_open_context_t; + + + +/****f* Access Layer - Proxy/proxy_context_ref +* NAME +* proxy_context_ref +* +* DESCRIPTION +* Function to reference the open context. +* It fails if the context is closing. +* +* SYNOPSIS +*/ +inline boolean_t +proxy_context_ref( + IN al_dev_open_context_t *p_context ) +{ + cl_atomic_inc( &p_context->ref_cnt ); + + return( !p_context->closing ); +} +/*********/ + + +/****f* Access Layer - Proxy/proxy_context_deref +* NAME +* proxy_context_deref +* +* DESCRIPTION +* Releases a reference on an open context acquired via a call to +* proxy_context_ref. +* +* SYNOPSIS +*/ +inline void +proxy_context_deref( + IN al_dev_open_context_t *p_context ) +{ + cl_atomic_dec( &p_context->ref_cnt ); + cl_event_signal( &p_context->close_event ); +} +/*********/ + + + +/* + * Generic callback information. Used to report callbacks from kernel to + * user-mode. + */ +typedef struct _al_proxy_cb_info +{ + cl_pool_item_t pool_item; /* must be first */ + al_dev_open_context_t *p_context; + + union _cb_type + { + cm_cb_ioctl_info_t cm; + comp_cb_ioctl_info_t comp; + misc_cb_ioctl_info_t misc; + + } cb_type; + + /* + * AL object to dereference after processing callback. We use this to + * ensure that a kernel object is not destroyed while a callback is in + * progress to user-mode. Since user-mode objects are not destroyed until + * the associated kernel objects are, this ensures that all callbacks + * from the kernel reference valid user-mode objects. + */ + al_obj_t *p_al_obj; + boolean_t reported; + +} al_proxy_cb_info_t; + + +al_proxy_cb_info_t* +proxy_cb_get( + IN al_dev_open_context_t *p_context ); + + +void +proxy_cb_put( + IN al_proxy_cb_info_t *p_cbinfo ); + + + +void +proxy_cb_put_list( + IN al_dev_open_context_t *p_context, + IN cl_qlist_t *p_cb_list ); + + +cl_status_t proxy_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ); + +cl_status_t al_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ); + +cl_status_t verbs_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ); + +cl_status_t subnet_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ); + +cl_status_t cm_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ); + +cl_status_t cep_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ); + +cl_status_t ioc_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ); + +boolean_t +proxy_queue_cb_buf( + IN uintn_t cb_type, + IN al_dev_open_context_t *p_context, + IN void *p_cb_data, + IN al_obj_t *p_al_obj OPTIONAL ); + + +ib_api_status_t +proxy_pnp_ca_cb( + IN ib_pnp_rec_t *p_pnp_rec ); + +ib_api_status_t +proxy_pnp_port_cb( + IN ib_pnp_rec_t *p_pnp_rec ); + + +#endif /* _AL_PROXY_H_ */ diff --git a/branches/Ndi/core/al/al_proxy_ioctl.h b/branches/Ndi/core/al/al_proxy_ioctl.h new file mode 100644 index 00000000..3453aaea --- /dev/null +++ b/branches/Ndi/core/al/al_proxy_ioctl.h @@ -0,0 +1,350 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_PROXY_IOCTL_H__) +#define __AL_PROXY_IOCTL_H__ + + + +/* + * IOCTL structures for passing the callback contexts to user mode. + */ + +typedef enum _cm_cb_rec_type +{ + CM_REQ_REC, + CM_REP_REC, + CM_RTU_REC, + CM_REJ_REC, + CM_MRA_REC, + CM_LAP_REC, + CM_APR_REC, + CM_DREQ_REC, + CM_DREP_REC + +} cm_cb_rec_type; + + + +typedef enum _misc_cb_rec_type +{ + CA_ERROR_REC, + QP_ERROR_REC, + SRQ_ERROR_REC, + CQ_ERROR_REC, + MCAST_REC, + MAD_SEND_REC, + MAD_RECV_REC, + SVC_REG_REC, + QUERY_REC, + PNP_REC, + SUB_REC, + REPORT_REC, + LISTEN_ERROR_REC + +} misc_cb_rec_type; + + + +/* + * Information for CM callbacks. + */ +typedef union _cm_cb_ioctl_rec +{ + /* REQ callback */ + struct _cm_req_cb_ioctl_rec + { + ib_cm_req_rec_t req_rec; + + /* Attributes needed to modify the QP in user-mode. */ + ib_qp_mod_t qp_mod_rtr; + ib_qp_mod_t qp_mod_rts; + uint32_t timeout_ms; + + union _cm_req_pdata_rec + { + ib_req_pdata_t req_pdata; + ib_sidr_req_pdata_t sidr_req_pdata; + + } cm_req_pdata_rec; + + } cm_req_cb_ioctl_rec; + + + /* REP callback */ + struct _cm_rep_cb_ioctl_rec + { + ib_cm_rep_rec_t rep_rec; + + /* Attributes needed to modify the QP in user-mode. */ + ib_qp_mod_t qp_mod_rtr; + ib_qp_mod_t qp_mod_rts; + + union _cm_rep_pdata_rec + { + ib_rep_pdata_t rep_pdata; + ib_sidr_rep_pdata_t sidr_rep_pdata; + + } cm_rep_pdata_rec; + + } cm_rep_cb_ioctl_rec; + + + /* RTU callback */ + struct _cm_rtu_cb_ioctl_rec + { + ib_cm_rtu_rec_t rtu_rec; + ib_rtu_pdata_t rtu_pdata; + + } cm_rtu_cb_ioctl_rec; + + + /* REJ callback */ + struct _cm_rej_cb_ioctl_rec + { + ib_cm_rej_rec_t rej_rec; + ib_ari_t ari_pdata; + ib_rej_pdata_t rej_pdata; + + } cm_rej_cb_ioctl_rec; + + + /* MRA callback */ + struct _cm_mra_cb_ioctl_rec + { + ib_cm_mra_rec_t mra_rec; + ib_mra_pdata_t mra_pdata; + + } cm_mra_cb_ioctl_rec; + + + /* LAP callback */ + struct _cm_lap_cb_ioctl_rec + { + ib_cm_lap_rec_t lap_rec; + ib_lap_pdata_t lap_pdata; + + } cm_lap_cb_ioctl_rec; + + + /* APR callback */ + struct _cm_apr_cb_ioctl_rec + { + ib_cm_apr_rec_t apr_rec; + ib_apr_pdata_t apr_pdata; + ib_apr_info_t apr_info; + + } cm_apr_cb_ioctl_rec; + + + /* DREQ callback */ + struct _cm_dreq_cb_ioctl_rec + { + ib_cm_dreq_rec_t dreq_rec; + ib_dreq_pdata_t dreq_pdata; + + } cm_dreq_cb_ioctl_rec; + + + /* DREP callback */ + struct _cm_drep_cb_ioctl_rec + { + ib_cm_drep_rec_t drep_rec; + ib_drep_pdata_t drep_pdata; + + } cm_drep_cb_ioctl_rec; +/* +*/ +} cm_cb_ioctl_rec_t; + + + +/* + * Information for most callbacks. This does not include callbacks for + * the CM or completions. + */ +typedef union _misc_cb_ioctl_rec +{ + void* __ptr64 context; + + /* Asynchronous event records */ + ib_async_event_rec_t event_rec; + + /* Multicast record */ + struct _mcast_cb_ioctl_rec + { + const void* __ptr64 mcast_context; + ib_api_status_t status; + ib_net16_t error_status; + ib_mcast_handle_t h_mcast; + ib_member_rec_t member_rec; + + } mcast_cb_ioctl_rec; + + + /* Mad send */ + struct _mad_send_cb_ioctl_rec + { + ib_mad_element_t* __ptr64 p_um_mad; + ib_wc_status_t wc_status; + void* __ptr64 mad_svc_context; + + } mad_send_cb_ioctl_rec; + + + /* Mad receive */ + struct _mad_recv_cb_ioctl_rec + { + uint64_t h_mad; + uint32_t elem_size; + void* __ptr64 mad_svc_context; + ib_mad_element_t* __ptr64 p_send_mad; + + } mad_recv_cb_ioctl_rec; + + + /* Service Registration records */ + ib_reg_svc_rec_t reg_svc_rec; + + + /* PNP Record as defined here is for UAL's consumption alone */ + struct _pnp_cb_ioctl_rec + { + ib_pnp_event_t pnp_event; + + union _pnp_info + { + /* pnp_ca is valid only for CA events + * UAL can query based on the ca_guid for more info + */ + struct _pnp_ca + { + ib_net64_t ca_guid; + uint32_t size; + uint64_t h_ca_attr; /* handle to kernel ca attr */ + + } ca; + + /* pnp_port is valid only for port events + * UAL can query based on the ca_guid for more info + */ + struct _pnp_port + { + ib_net64_t ca_guid; + uint8_t port_num; + + } port; + + /* pnp_ioc is valid only for IOC events + * If IOC is implemented, need to determine + * whether the ioc event handle should be passed + */ + struct _pnp_ioc + { + ib_ioc_info_t info; + ib_pnp_handle_t h_ioc_event; + /* TBD: service entry is variable-sized?? */ + ib_svc_entry_t svc_entry[1]; + + } ioc; + + } pnp_info; + + } pnp_cb_ioctl_rec; + + + ib_listen_err_rec_t listen_err; + + + /* Subscription Record */ + ib_sub_rec_t sub_rec; + + + /* Report record */ + struct _report_cb_ioctl_rec + { + const void* __ptr64 report_context; + ib_mad_notice_attr_t notice; + + } report_cb_ioctl_rec; + + +} misc_cb_ioctl_rec_t; + + + + +typedef struct _cm_cb_ioctl_info +{ + cm_cb_rec_type rec_type; + cm_cb_ioctl_rec_t ioctl_rec; + +} cm_cb_ioctl_info_t; + + + +typedef struct _comp_cb_ioctl_info +{ + void* __ptr64 cq_context; + +} comp_cb_ioctl_info_t; + + + +typedef struct _misc_cb_ioctl_info +{ + misc_cb_rec_type rec_type; + misc_cb_ioctl_rec_t ioctl_rec; + +} misc_cb_ioctl_info_t; + + + +typedef union _ual_ca_attr_info +{ + struct _ual_ca_attr_info_in + { + ib_ca_attr_t* __ptr64 p_ca_attr; /* where to copy in user mode */ + uint64_t h_ca_attr; /* kernel handle to ca attribute */ + + } in; + struct _ual_ca_attr_info_out + { + ib_api_status_t status; + + } out; + +} ual_ca_attr_info_ioctl_t; + + + +#endif /* __AL_PROXY_IOCTL_H__ */ diff --git a/branches/Ndi/core/al/al_qp.c b/branches/Ndi/core/al/al_qp.c new file mode 100644 index 00000000..69aa89b4 --- /dev/null +++ b/branches/Ndi/core/al/al_qp.c @@ -0,0 +1,2114 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include + +#include "al.h" +#include "al_av.h" +#include "al_ca.h" +#include "al_cm_cep.h" +#include "al_cq.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_qp.tmh" +#endif +#include "al_mad.h" +#include "al_mad_pool.h" +#include "al_mcast.h" +#include "al_mgr.h" +#include "al_mr.h" +#include "al_mw.h" +#include "al_pd.h" +#include "al_qp.h" +#include "al_query.h" +#ifdef CL_KERNEL +#include "al_smi.h" +#endif /* CL_KERNEL */ +#include "al_verbs.h" + +#include "ib_common.h" + + +#define UNBOUND_PORT_GUID 0 + + +extern ib_pool_handle_t gh_mad_pool; + + +/* + * Function prototypes. + */ +void +destroying_qp( + IN al_obj_t *p_obj ); + +void +cleanup_qp( + IN al_obj_t *p_obj ); + +void +free_qp( + IN al_obj_t *p_obj ); + + + +ib_api_status_t +init_base_qp( + IN ib_qp_t* const p_qp, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb, + IN OUT ci_umv_buf_t* const p_umv_buf ); + +ib_api_status_t +init_raw_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid OPTIONAL, + IN const ib_qp_create_t* const p_qp_create, + IN OUT ci_umv_buf_t* const p_umv_buf ); + +ib_api_status_t +init_conn_qp( + IN al_conn_qp_t* const p_conn_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN OUT ci_umv_buf_t* const p_umv_buf ); + +ib_api_status_t +init_dgrm_qp( + IN al_dgrm_qp_t* const p_dgrm_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN OUT ci_umv_buf_t* const p_umv_buf ); + +ib_api_status_t +init_special_qp( + IN al_special_qp_t* const p_special_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create ); + +ib_api_status_t +init_qp_alias( + IN al_qp_alias_t* const p_qp_alias, + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create ); + +ib_api_status_t +init_mad_qp( + IN al_mad_qp_t* const p_mad_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN const ib_pfn_event_cb_t pfn_qp_event_cb ); + +ib_api_status_t +init_mad_dgrm_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_dgrm_info_t* const p_dgrm_info ); + + +ib_api_status_t +al_modify_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +init_dgrm_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_dgrm_info_t* const p_dgrm_info ); + +ib_api_status_t +mad_qp_post_recvs( + IN al_mad_qp_t* const p_mad_qp ); + +ib_api_status_t +ud_post_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t **pp_send_failure ); + +ib_api_status_t +special_qp_post_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t **pp_send_failure ); + +void +mad_qp_queue_mad( + IN const ib_qp_handle_t h_qp, + IN al_mad_wr_t* const p_mad_wr ); + +void +mad_qp_resume_sends( + IN ib_qp_handle_t h_qp ); + +void +mad_qp_flush_send( + IN al_mad_qp_t* p_mad_qp, + IN al_mad_wr_t* const p_mad_wr ); + +void +mad_recv_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); + +void +mad_send_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); + +void +mad_qp_comp( + IN al_mad_qp_t* p_mad_qp, + IN const ib_cq_handle_t h_cq, + IN ib_wc_type_t wc_type ); + +void +mad_qp_cq_event_cb( + IN ib_async_event_rec_t *p_event_rec ); + + + +/* + * Allocates a structure to store QP information. + */ +ib_api_status_t +alloc_qp( + IN const ib_qp_type_t qp_type, + OUT ib_qp_handle_t* const ph_qp ) +{ + ib_qp_handle_t h_qp; + + switch( qp_type ) + { + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_conn_qp_t ) ); + break; + + case IB_QPT_UNRELIABLE_DGRM: + h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_dgrm_qp_t ) ); + break; + + case IB_QPT_QP0: + case IB_QPT_QP1: + h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_special_qp_t ) ); + break; + + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHER: + h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( ib_qp_t ) ); + break; + + case IB_QPT_MAD: + h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_mad_qp_t ) ); + break; + + case IB_QPT_QP0_ALIAS: + case IB_QPT_QP1_ALIAS: + h_qp = (ib_qp_handle_t)cl_zalloc( sizeof( al_qp_alias_t ) ); + break; + + default: + CL_ASSERT( qp_type == IB_QPT_RELIABLE_CONN || + qp_type == IB_QPT_UNRELIABLE_CONN || + qp_type == IB_QPT_UNRELIABLE_DGRM || + qp_type == IB_QPT_QP0 || + qp_type == IB_QPT_QP1 || + qp_type == IB_QPT_RAW_IPV6 || + qp_type == IB_QPT_RAW_ETHER || + qp_type == IB_QPT_MAD || + qp_type == IB_QPT_QP0_ALIAS || + qp_type == IB_QPT_QP1_ALIAS ); + return IB_INVALID_SETTING; + } + + if( !h_qp ) + { + return IB_INSUFFICIENT_MEMORY; + } + + h_qp->type = qp_type; + + *ph_qp = h_qp; + return IB_SUCCESS; +} + + + +/* + * Initializes the QP information structure. + */ +ib_api_status_t +create_qp( + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb, + OUT ib_qp_handle_t* const ph_qp, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + ib_qp_handle_t h_qp; + + if( !p_qp_create || !ph_qp ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + if (p_qp_create->h_srq && + AL_OBJ_INVALID_HANDLE( p_qp_create->h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_SRQ_HANDLE; + } + + /* Allocate a QP. */ + status = alloc_qp( p_qp_create->qp_type, &h_qp ); + if( status != IB_SUCCESS ) + { + return status; + } + + /* Init the base QP first. */ + status = init_base_qp( h_qp, qp_context, pfn_qp_event_cb, p_umv_buf ); + if( status != IB_SUCCESS ) + return status; + + /* Initialize the QP based on its type. */ + switch( h_qp->type ) + { + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( AL_OBJ_INVALID_HANDLE( p_qp_create->h_sq_cq, AL_OBJ_TYPE_H_CQ ) || + AL_OBJ_INVALID_HANDLE( p_qp_create->h_rq_cq, AL_OBJ_TYPE_H_CQ ) ) + { + status = IB_INVALID_CQ_HANDLE; + break; + } + status = init_conn_qp( (al_conn_qp_t*)h_qp, h_pd, p_qp_create, p_umv_buf ); + break; + + case IB_QPT_UNRELIABLE_DGRM: + if( AL_OBJ_INVALID_HANDLE( p_qp_create->h_sq_cq, AL_OBJ_TYPE_H_CQ ) || + AL_OBJ_INVALID_HANDLE( p_qp_create->h_rq_cq, AL_OBJ_TYPE_H_CQ ) ) + { + status = IB_INVALID_CQ_HANDLE; + break; + } + status = init_dgrm_qp( (al_dgrm_qp_t*)h_qp, h_pd, p_qp_create, p_umv_buf ); + break; + + case IB_QPT_MAD: + if( p_qp_create->h_sq_cq || p_qp_create->h_rq_cq ) + { + status = IB_INVALID_CQ_HANDLE; + break; + } + status = init_mad_qp( (al_mad_qp_t*)h_qp, h_pd, p_qp_create, + pfn_qp_event_cb ); + break; + + default: + CL_ASSERT( h_qp->type == IB_QPT_RELIABLE_CONN || + h_qp->type == IB_QPT_UNRELIABLE_CONN || + h_qp->type == IB_QPT_UNRELIABLE_DGRM || + h_qp->type == IB_QPT_MAD ); + status = IB_INVALID_SETTING; + break; + } + + if( status != IB_SUCCESS ) + { + h_qp->obj.pfn_destroy( &h_qp->obj, NULL ); + return status; + } + + *ph_qp = h_qp; + + /* + * Note that we don't release the reference taken in init_al_obj here. + * For kernel clients, it is release in ib_create_qp. For user-mode + * clients is is released by the proxy after the handle is extracted. + */ + return IB_SUCCESS; +} + + + +ib_api_status_t +get_spl_qp( + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb, + OUT ib_pool_key_t* const p_pool_key OPTIONAL, + OUT ib_qp_handle_t* const ph_qp, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + ib_qp_handle_t h_qp; + + if( !p_qp_create || !ph_qp ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Only allow creation of the special QP types. */ + switch( p_qp_create->qp_type ) + { +#ifdef CL_KERNEL + case IB_QPT_QP0: + case IB_QPT_QP1: +#endif + case IB_QPT_QP0_ALIAS: + case IB_QPT_QP1_ALIAS: + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHER: + break; /* The QP type is valid. */ + + default: + return IB_INVALID_SETTING; + } + + /* Allocate a QP. */ + status = alloc_qp( p_qp_create->qp_type, &h_qp ); + if( status != IB_SUCCESS ) + { + return status; + } + + /* Init the base QP first. */ + status = init_base_qp( h_qp, qp_context, pfn_qp_event_cb, p_umv_buf ); + if( status != IB_SUCCESS ) + return status; + + /* Initialize the QP based on its type. */ + switch( h_qp->type ) + { +#ifdef CL_KERNEL + case IB_QPT_QP0: + case IB_QPT_QP1: + if( AL_OBJ_INVALID_HANDLE( p_qp_create->h_sq_cq, AL_OBJ_TYPE_H_CQ ) || + AL_OBJ_INVALID_HANDLE( p_qp_create->h_rq_cq, AL_OBJ_TYPE_H_CQ ) ) + { + status = IB_INVALID_CQ_HANDLE; + break; + } + status = init_special_qp( (al_special_qp_t*)h_qp, h_pd, port_guid, + p_qp_create ); + break; +#endif /* CL_KERNEL */ + + case IB_QPT_QP0_ALIAS: + case IB_QPT_QP1_ALIAS: + if( p_qp_create->h_sq_cq || p_qp_create->h_rq_cq ) + { + status = IB_INVALID_CQ_HANDLE; + break; + } + status = init_alias_qp( (al_qp_alias_t*)h_qp, h_pd, port_guid, + p_qp_create ); + if( status == IB_SUCCESS && p_pool_key ) + { + /* Create a pool_key to access to the global MAD pool. */ + status = ib_reg_mad_pool( gh_mad_pool, h_pd, + &((al_qp_alias_t*)h_qp)->pool_key ); + if( status == IB_SUCCESS ) + { + /* + * Take a reference on the pool key since we don't have a + * mechanism for the pool key to clear the QP's pointer to it. + */ + ref_al_obj( &((al_qp_alias_t*)h_qp)->pool_key->obj ); + *p_pool_key = ((al_qp_alias_t*)h_qp)->pool_key; + } + } + break; + + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHER: + if( AL_OBJ_INVALID_HANDLE( p_qp_create->h_sq_cq, AL_OBJ_TYPE_H_CQ ) || + AL_OBJ_INVALID_HANDLE( p_qp_create->h_rq_cq, AL_OBJ_TYPE_H_CQ ) ) + { + status = IB_INVALID_CQ_HANDLE; + break; + } + status = init_raw_qp( h_qp, h_pd, port_guid, p_qp_create, p_umv_buf ); + break; + + default: + CL_ASSERT( h_qp->type == IB_QPT_QP0 || + h_qp->type == IB_QPT_QP1 || + h_qp->type == IB_QPT_QP0_ALIAS || + h_qp->type == IB_QPT_QP1_ALIAS || + h_qp->type == IB_QPT_RAW_IPV6 || + h_qp->type == IB_QPT_RAW_ETHER ); + + status = IB_INVALID_SETTING; + break; + } + + if( status != IB_SUCCESS ) + { + h_qp->obj.pfn_destroy( &h_qp->obj, NULL ); + return status; + } + + *ph_qp = h_qp; + + return IB_SUCCESS; +} + + +static ib_api_status_t +al_bad_modify_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + UNUSED_PARAM( h_qp ); + UNUSED_PARAM( p_qp_mod ); + UNUSED_PARAM( p_umv_buf ); + return IB_INVALID_PARAMETER; +} + + +static ib_api_status_t +al_bad_post_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + IN ib_send_wr_t **pp_send_failure OPTIONAL ) +{ + UNUSED_PARAM( h_qp ); + UNUSED_PARAM( p_send_wr ); + UNUSED_PARAM( pp_send_failure ); + return IB_INVALID_PARAMETER; +} + + +static ib_api_status_t +al_bad_post_recv( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t* const p_recv_wr, + IN ib_recv_wr_t **p_recv_failure OPTIONAL ) +{ + UNUSED_PARAM( h_qp ); + UNUSED_PARAM( p_recv_wr ); + UNUSED_PARAM( p_recv_failure ); + return IB_INVALID_PARAMETER; +} + + +static ib_api_status_t +al_bad_init_dgrm_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_dgrm_info_t* const p_dgrm_info ) +{ + UNUSED_PARAM( h_qp ); + UNUSED_PARAM( p_dgrm_info ); + return IB_INVALID_PARAMETER; +} + + +static ib_api_status_t +al_bad_reg_mad_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_mad_svc_t* const p_mad_svc, + OUT ib_mad_svc_handle_t* const ph_mad_svc ) +{ + UNUSED_PARAM( h_qp ); + UNUSED_PARAM( p_mad_svc ); + UNUSED_PARAM( ph_mad_svc ); + return IB_INVALID_PARAMETER; +} + + +static ib_api_status_t +al_bad_dereg_mad_svc( + IN const ib_mad_svc_handle_t h_mad_svc ) +{ + UNUSED_PARAM( h_mad_svc ); + return IB_INVALID_PARAMETER; +} + + +static void +al_bad_queue_mad( + IN const ib_qp_handle_t h_qp, + IN al_mad_wr_t* const p_mad_wr ) +{ + UNUSED_PARAM( h_qp ); + UNUSED_PARAM( p_mad_wr ); +} + + +static void +al_bad_resume_mad( + IN const ib_qp_handle_t h_qp ) +{ + UNUSED_PARAM( h_qp ); + return; +} + + +static ib_api_status_t +al_bad_join_mcast( + IN const ib_qp_handle_t h_qp, + IN const ib_mcast_req_t* const p_mcast_req ) +{ + UNUSED_PARAM( h_qp ); + UNUSED_PARAM( p_mcast_req ); + return IB_INVALID_PARAMETER; +} + + +ib_api_status_t +init_base_qp( + IN ib_qp_t* const p_qp, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + al_obj_type_t obj_type = AL_OBJ_TYPE_H_QP; + + CL_ASSERT( p_qp ); + + if( p_umv_buf ) + obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT; + + construct_al_obj( &p_qp->obj, obj_type ); + status = init_al_obj( &p_qp->obj, qp_context, TRUE, + destroying_qp, cleanup_qp, free_qp ); + if( status != IB_SUCCESS ) + { + free_qp( &p_qp->obj ); + return status; + } + + p_qp->pfn_event_cb = pfn_qp_event_cb; + + /* + * All function pointers should be invalid. They will be set by + * derived QP types where appropriate. + */ + p_qp->pfn_modify_qp = al_bad_modify_qp; + p_qp->pfn_post_recv = al_bad_post_recv; + p_qp->pfn_post_send = al_bad_post_send; + p_qp->pfn_reg_mad_svc = al_bad_reg_mad_svc; + p_qp->pfn_dereg_mad_svc = al_bad_dereg_mad_svc; + p_qp->pfn_queue_mad = al_bad_queue_mad; + p_qp->pfn_resume_mad = al_bad_resume_mad; + p_qp->pfn_init_dgrm_svc = al_bad_init_dgrm_svc; + p_qp->pfn_join_mcast = al_bad_join_mcast; + + if( p_qp->type == IB_QPT_RELIABLE_CONN || + p_qp->type == IB_QPT_UNRELIABLE_CONN ) + { + ((al_conn_qp_t*)p_qp)->cid = AL_INVALID_CID; + } + + return status; +} + + + +ib_api_status_t +init_raw_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid OPTIONAL, + IN const ib_qp_create_t* const p_qp_create, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + ib_qp_create_t qp_create; + ib_qp_attr_t qp_attr; + uint8_t port_num; + + status = attach_al_obj( &h_pd->obj, &h_qp->obj ); + if( status != IB_SUCCESS ) + return status; + + /* Convert AL handles to CI handles. */ + qp_create = *p_qp_create; + convert_qp_handle( qp_create ); + + /* Clear the QP attributes to ensure non-set values are 0. */ + cl_memclr( &qp_attr, sizeof( ib_qp_attr_t ) ); + + h_qp->port_guid = port_guid; + + /* + * Allocate a QP from the channel adapter. Note that these calls + * set the send and receive pointers appropriately for posting + * work requests. + */ + if( port_guid == UNBOUND_PORT_GUID ) + { + status = + verbs_create_qp( h_pd, h_qp, &qp_create, &qp_attr, p_umv_buf ); + } + else + { + status = get_port_num( h_pd->obj.p_ci_ca, port_guid, &port_num ); + if( status == IB_SUCCESS ) + { + status = verbs_get_spl_qp( h_pd, port_num, h_qp, + &qp_create, &qp_attr ); + } + } + if( status != IB_SUCCESS ) + { + return status; + } + + /* Override function pointers. */ + h_qp->pfn_modify_qp = al_modify_qp; + + if( h_qp->type == IB_QPT_UNRELIABLE_DGRM || + h_qp->type == IB_QPT_QP0 || + h_qp->type == IB_QPT_QP1 ) + { + /* We have to mess with the AV handles. */ + h_qp->pfn_ud_post_send = h_qp->pfn_post_send; + h_qp->h_ud_send_qp = h_qp->h_send_qp; + + h_qp->pfn_post_send = ud_post_send; + h_qp->h_send_qp = h_qp; + } + + h_qp->h_recv_cq = p_qp_create->h_rq_cq; + h_qp->h_send_cq = p_qp_create->h_sq_cq; + + h_qp->recv_cq_rel.p_child_obj = (cl_obj_t*)h_qp; + h_qp->send_cq_rel.p_child_obj = (cl_obj_t*)h_qp; + + cq_attach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel ); + cq_attach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel ); + + h_qp->h_srq = p_qp_create->h_srq; + h_qp->srq_rel.p_child_obj = (cl_obj_t*)h_qp; + if (h_qp->h_srq) + srq_attach_qp( h_qp->h_srq, &h_qp->srq_rel ); + + h_qp->num = qp_attr.num; + + return IB_SUCCESS; +} + + + +ib_api_status_t +init_conn_qp( + IN al_conn_qp_t* const p_conn_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + CL_ASSERT( p_conn_qp ); + + /* Initialize the inherited QP first. */ + status = init_raw_qp( &p_conn_qp->qp, h_pd, UNBOUND_PORT_GUID, + p_qp_create, p_umv_buf ); + + + return status; +} + + + +ib_api_status_t +init_dgrm_qp( + IN al_dgrm_qp_t* const p_dgrm_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + CL_ASSERT( p_dgrm_qp ); + + /* Initialize the inherited QP first. */ + status = init_raw_qp( p_dgrm_qp, h_pd, UNBOUND_PORT_GUID, + p_qp_create, p_umv_buf ); + if( status != IB_SUCCESS ) + { + return status; + } + + /* Override function pointers. */ + p_dgrm_qp->pfn_init_dgrm_svc = init_dgrm_svc; + p_dgrm_qp->pfn_join_mcast = al_join_mcast; + + return IB_SUCCESS; +} + + +#ifdef CL_KERNEL +ib_api_status_t +init_special_qp( + IN al_special_qp_t* const p_special_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create ) +{ + ib_api_status_t status; + CL_ASSERT( p_special_qp ); + + /* Construct the special QP. */ + cl_qlist_init( &p_special_qp->to_send_queue ); + + /* Initialize the inherited QP first. */ + status = + init_raw_qp( &p_special_qp->qp, h_pd, port_guid, p_qp_create, NULL ); + if( status != IB_SUCCESS ) + { + return status; + } + + /* Override function pointers. */ + p_special_qp->qp.pfn_init_dgrm_svc = init_dgrm_svc; + p_special_qp->qp.pfn_queue_mad = special_qp_queue_mad; + p_special_qp->qp.pfn_resume_mad = special_qp_resume_sends; + + return IB_SUCCESS; +} + + +ib_api_status_t +init_qp_alias( + IN al_qp_alias_t* const p_qp_alias, + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create ) +{ + ib_api_status_t status; + + CL_ASSERT( p_qp_alias ); + UNUSED_PARAM( p_qp_create ); + + if( h_pd->type != IB_PDT_ALIAS ) + { + return IB_INVALID_PD_HANDLE; + } + + status = attach_al_obj( &h_pd->obj, &p_qp_alias->qp.obj ); + if( status != IB_SUCCESS ) + return status; + + switch( p_qp_alias->qp.type ) + { + case IB_QPT_QP0_ALIAS: + status = acquire_smi_disp( port_guid, &p_qp_alias->h_mad_disp ); + break; + + case IB_QPT_QP1_ALIAS: + status = acquire_gsi_disp( port_guid, &p_qp_alias->h_mad_disp ); + break; + + default: + CL_ASSERT( p_qp_alias->qp.type == IB_QPT_QP0_ALIAS || + p_qp_alias->qp.type == IB_QPT_QP1_ALIAS ); + return IB_ERROR; + } + + if( status != IB_SUCCESS ) + return status; + + /* Get a copy of the QP used by the MAD dispatcher. */ + ref_al_obj( &p_qp_alias->h_mad_disp->h_qp->obj ); + p_qp_alias->qp.h_ci_qp = p_qp_alias->h_mad_disp->h_qp->h_ci_qp; + + /* Override function pointers. */ + p_qp_alias->qp.pfn_reg_mad_svc = reg_mad_svc; + + return IB_SUCCESS; +} +#endif /* CL_KERNEL */ + + + +ib_api_status_t +init_mad_qp( + IN al_mad_qp_t* const p_mad_qp, + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN const ib_pfn_event_cb_t pfn_qp_event_cb ) +{ + ib_cq_create_t cq_create; + ib_qp_create_t qp_create; + ib_al_handle_t h_al; + ib_ca_handle_t h_ca; + ib_api_status_t status; + + CL_ASSERT( p_mad_qp ); + + /* Initialize the send and receive tracking queues. */ + cl_qlist_init( &p_mad_qp->to_send_queue ); + cl_qlist_init( &p_mad_qp->send_queue ); + cl_qlist_init( &p_mad_qp->recv_queue ); + + /* The CQ handles must be NULL when creating a MAD queue pair. */ + if( p_qp_create->h_sq_cq || p_qp_create->h_rq_cq ) + { + return IB_INVALID_SETTING; + } + + /* Initialize the CQs used with the MAD QP. */ + cl_memclr( &cq_create, sizeof( ib_cq_create_t ) ); + + /* Create the send CQ. */ + cq_create.size = p_qp_create->sq_depth; + cq_create.pfn_comp_cb = mad_send_comp_cb; + + status = ib_create_cq( h_pd->obj.p_ci_ca->h_ca, &cq_create, + p_mad_qp, mad_qp_cq_event_cb, &p_mad_qp->h_send_cq ); + + if( status != IB_SUCCESS ) + { + return status; + } + + /* Reference the MAD QP on behalf of ib_create_cq. */ + ref_al_obj( &p_mad_qp->qp.obj ); + + /* Create the receive CQ. */ + cq_create.size = p_qp_create->rq_depth; + cq_create.pfn_comp_cb = mad_recv_comp_cb; + + h_ca = PARENT_STRUCT( h_pd->obj.p_parent_obj, ib_ca_t, obj ); + status = ib_create_cq( h_ca, &cq_create, p_mad_qp, mad_qp_cq_event_cb, + &p_mad_qp->h_recv_cq ); + + if( status != IB_SUCCESS ) + { + return status; + } + + /* Reference the MAD QP on behalf of ib_create_cq. */ + ref_al_obj( &p_mad_qp->qp.obj ); + + /* Save the requested receive queue depth. This is used to post MADs. */ + p_mad_qp->max_rq_depth = p_qp_create->rq_depth; + + /* Allocate a datagram QP for the MAD QP. */ + qp_create = *p_qp_create; + qp_create.qp_type = IB_QPT_UNRELIABLE_DGRM; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + qp_create.h_rq_cq = p_mad_qp->h_recv_cq; + qp_create.h_sq_cq = p_mad_qp->h_send_cq; + + status = ib_create_qp( h_pd, &qp_create, p_mad_qp, pfn_qp_event_cb, + &p_mad_qp->h_dgrm_qp ); + + if( status != IB_SUCCESS ) + { + return status; + } + + /* Reference the MAD QP on behalf of ib_create_qp. */ + ref_al_obj( &p_mad_qp->qp.obj ); + + /* Create the MAD dispatch service. */ + status = create_mad_disp( &p_mad_qp->qp.obj, &p_mad_qp->qp, + &p_mad_qp->h_mad_disp ); + if( status != IB_SUCCESS ) + { + return status; + } + + /* Override function pointers. */ + p_mad_qp->qp.pfn_init_dgrm_svc = init_mad_dgrm_svc; + p_mad_qp->qp.pfn_queue_mad = mad_qp_queue_mad; + p_mad_qp->qp.pfn_resume_mad = mad_qp_resume_sends; + p_mad_qp->qp.pfn_reg_mad_svc = reg_mad_svc; + + /* The client's AL handle is the grandparent of the PD. */ + h_al = PARENT_STRUCT( h_pd->obj.p_parent_obj->p_parent_obj, ib_al_t, obj ); + + /* Create a receive MAD pool. */ + status = ib_create_mad_pool( h_al, p_mad_qp->max_rq_depth + 16, 0, 16, + &p_mad_qp->h_pool ); + + if (status != IB_SUCCESS) + { + return status; + } + + /* + * The MAD pool is a child of the client's AL instance. If the client + * closes AL, the MAD pool will be destroyed before the MAD queue pair. + * Therefore, we hold a reference on the MAD pool to keep it from being + * destroyed until the MAD queue pair is destroyed. Refer to the MAD + * queue pair cleanup code. + */ + ref_al_obj( &p_mad_qp->h_pool->obj ); + + /* Register the MAD pool with the PD. */ + status = ib_reg_mad_pool( p_mad_qp->h_pool, h_pd, &p_mad_qp->pool_key ); + + if (status != IB_SUCCESS) + { + return status; + } + + /* + * Attach the MAD queue pair to the protection domain. This must be + * done after creating the datagram queue pair and the MAD pool to set + * the correct order of object destruction. + */ + status = attach_al_obj( &h_pd->obj, &p_mad_qp->qp.obj ); + + /* Get a copy of the CI datagram QP for ib_query_qp. */ + p_mad_qp->qp.h_ci_qp = p_mad_qp->h_dgrm_qp->h_ci_qp; + + return status; +} + + + +ib_api_status_t +ib_destroy_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_QP ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + + ref_al_obj( &h_qp->obj ); + h_qp->obj.pfn_destroy( &h_qp->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_QP ); + return IB_SUCCESS; +} + + + +/* + * Release any resources that must be cleaned up immediately, such as + * any AL resources acquired by calling through the main API. + */ +void +destroying_qp( + IN al_obj_t *p_obj ) +{ + ib_qp_handle_t h_qp; + al_mad_qp_t *p_mad_qp; + al_qp_alias_t *p_qp_alias; + net32_t cid; + + CL_ASSERT( p_obj ); + h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj ); + + switch( h_qp->type ) + { + case IB_QPT_MAD: + /* Destroy QP and CQ services required for MAD QP support. */ + p_mad_qp = PARENT_STRUCT( h_qp, al_mad_qp_t, qp ); + + if( p_mad_qp->h_dgrm_qp ) + { + ib_destroy_qp( p_mad_qp->h_dgrm_qp, + (ib_pfn_destroy_cb_t)deref_al_obj ); + p_mad_qp->qp.h_ci_qp = NULL; + } + + if( p_mad_qp->h_recv_cq ) + { + ib_destroy_cq( p_mad_qp->h_recv_cq, + (ib_pfn_destroy_cb_t)deref_al_obj ); + } + + if( p_mad_qp->h_send_cq ) + { + ib_destroy_cq( p_mad_qp->h_send_cq, + (ib_pfn_destroy_cb_t)deref_al_obj ); + } + break; + + case IB_QPT_QP0_ALIAS: + case IB_QPT_QP1_ALIAS: + p_qp_alias = PARENT_STRUCT( h_qp, al_qp_alias_t, qp ); + + if( p_qp_alias->pool_key ) + { + ib_api_status_t status; + /* Deregister the pool_key. */ + status = dereg_mad_pool( p_qp_alias->pool_key, AL_KEY_ALIAS ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("dereg_mad_pool returned %s.\n", + ib_get_err_str(status)) ); + /* Release the reference taken when we created the pool key. */ + deref_al_obj( &p_qp_alias->pool_key->obj ); + } + p_qp_alias->pool_key = NULL; + } + + if( p_qp_alias->qp.h_ci_qp ) + { + deref_al_obj( &p_qp_alias->h_mad_disp->h_qp->obj ); + p_qp_alias->qp.h_ci_qp = NULL; + } + + /* + * If the pool_key still exists here, then the QP is being destroyed + * by destroying its parent (the PD). Destruction of the PD will also + * destroy the pool_key. + */ + + if( p_qp_alias->h_mad_disp ) + deref_al_obj( &p_qp_alias->h_mad_disp->obj ); + break; + + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + cid = cl_atomic_xchg( + &((al_conn_qp_t*)h_qp)->cid, AL_INVALID_CID ); + if( cid != AL_INVALID_CID ) + { + ref_al_obj( &h_qp->obj ); + if( al_destroy_cep( + h_qp->obj.h_al, cid, deref_al_obj ) != IB_SUCCESS ) + { + deref_al_obj( &h_qp->obj ); + } + } + + /* Fall through. */ + case IB_QPT_UNRELIABLE_DGRM: + default: + /* Multicast membership gets cleaned up by object hierarchy. */ + cq_detach_qp( h_qp->h_recv_cq, &h_qp->recv_cq_rel ); + cq_detach_qp( h_qp->h_send_cq, &h_qp->send_cq_rel ); + if (h_qp->h_srq) + srq_detach_qp( h_qp->h_srq, &h_qp->srq_rel ); + } +} + + + +/* + * Release any HW resources. + */ +void +cleanup_qp( + IN al_obj_t *p_obj ) +{ + ib_qp_handle_t h_qp; + al_mad_qp_t* p_mad_qp; + al_mad_wr_t* p_mad_wr; + cl_list_item_t* p_list_item; + al_mad_element_t* p_al_mad; + ib_api_status_t status; + + CL_ASSERT( p_obj ); + h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj ); + + if( verbs_check_qp( h_qp ) ) + { + status = verbs_destroy_qp( h_qp ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("verbs_destroy_qp failed with status %s.\n", + ib_get_err_str(status)) ); + } + h_qp->h_ci_qp = NULL; + } + + if( h_qp->type == IB_QPT_MAD ) + { + /* All MAD queue pair operations are complete. */ + p_mad_qp = PARENT_STRUCT( h_qp, al_mad_qp_t, qp ); + + /* Append the pending MAD send queue to the posted MAD send queue. */ + cl_qlist_insert_list_tail( &p_mad_qp->send_queue, + &p_mad_qp->to_send_queue ); + + /* Complete all MAD sends as "flushed". */ + for( p_list_item = cl_qlist_remove_head( &p_mad_qp->send_queue ); + p_list_item != cl_qlist_end( &p_mad_qp->send_queue ); + p_list_item = cl_qlist_remove_head( &p_mad_qp->send_queue ) ) + { + p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item ); + mad_qp_flush_send( p_mad_qp, p_mad_wr ); + } + + /* Return any posted receive MAD elements to the pool. */ + for( p_list_item = cl_qlist_remove_head( &p_mad_qp->recv_queue ); + p_list_item != cl_qlist_end( &p_mad_qp->recv_queue ); + p_list_item = cl_qlist_remove_head( &p_mad_qp->recv_queue ) ) + { + p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, + list_item ); + + status = ib_put_mad( &p_al_mad->element ); + CL_ASSERT( status == IB_SUCCESS ); + } + + if( p_mad_qp->h_pool ) + { + /* + * Destroy the receive MAD pool. If the client has closed the + * AL instance, the MAD pool should already be destroying. In + * this case, we simply release our reference on the pool to + * allow it to cleanup and deallocate. Otherwise, we initiate + * the destruction of the MAD pool and release our reference. + */ + cl_spinlock_acquire( &p_mad_qp->h_pool->obj.lock ); + if( p_mad_qp->h_pool->obj.state == CL_DESTROYING ) + { + cl_spinlock_release( &p_mad_qp->h_pool->obj.lock ); + } + else + { + cl_spinlock_release( &p_mad_qp->h_pool->obj.lock ); + ib_destroy_mad_pool( p_mad_qp->h_pool ); + } + deref_al_obj( &p_mad_qp->h_pool->obj ); + } + } + else + { + if( h_qp->h_recv_cq ) + deref_al_obj( &h_qp->h_recv_cq->obj ); + if( h_qp->h_send_cq ) + deref_al_obj( &h_qp->h_send_cq->obj ); + if( h_qp->h_srq ) + deref_al_obj( &h_qp->h_srq->obj ); + } +} + + + +void +free_qp( + IN al_obj_t *p_obj ) +{ + ib_qp_handle_t h_qp; + + CL_ASSERT( p_obj ); + h_qp = PARENT_STRUCT( p_obj, ib_qp_t, obj ); + + destroy_al_obj( p_obj ); + cl_free( h_qp ); +} + + + +ib_api_status_t +ib_query_qp( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t* const p_qp_attr ) +{ + return query_qp( h_qp, p_qp_attr, NULL ); +} + + +ib_api_status_t +query_qp( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t* const p_qp_attr, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + if( !p_qp_attr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_query_qp( h_qp, p_qp_attr ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_QP ); + return status; + } + + /* Convert to using AL's handles. */ + p_qp_attr->h_pd = PARENT_STRUCT( h_qp->obj.p_parent_obj, ib_pd_t, obj ); + p_qp_attr->h_rq_cq = h_qp->h_recv_cq; + p_qp_attr->h_sq_cq = h_qp->h_send_cq; + p_qp_attr->qp_type = h_qp->type; + p_qp_attr->h_srq = h_qp->h_srq; + + AL_EXIT( AL_DBG_QP ); + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_modify_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod ) +{ + return modify_qp( h_qp, p_qp_mod, NULL ); +} + + + +ib_api_status_t +modify_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + if( !p_qp_mod ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = h_qp->pfn_modify_qp( h_qp, p_qp_mod, p_umv_buf ); + + AL_EXIT( AL_DBG_QP ); + return status; +} + + + +ib_api_status_t +al_modify_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + ib_qp_attr_t qp_attr; + + CL_ASSERT( h_qp ); + +#ifdef CL_KERNEL + /* Only allow ERROR and RESET state changes during timewait. */ + if( (h_qp->type == IB_QPT_RELIABLE_CONN || + h_qp->type == IB_QPT_UNRELIABLE_CONN) && + p_qp_mod->req_state != IB_QPS_ERROR && + p_qp_mod->req_state != IB_QPS_RESET && + p_qp_mod->req_state != IB_QPS_INIT && + cl_get_time_stamp() < h_qp->timewait ) + { + return IB_QP_IN_TIMEWAIT; + } +#endif /* CL_KERNEL */ + + /* Modify the actual QP attributes. */ + status = verbs_modify_qp( h_qp, p_qp_mod, qp_attr ); + + /* Record the QP state if the modify was successful. */ + if( status == IB_SUCCESS ) + h_qp->state = p_qp_mod->req_state; + + return status; +} + + + +ib_api_status_t +ib_init_dgrm_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_dgrm_info_t* const p_dgrm_info OPTIONAL ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + + switch( h_qp->type ) + { + case IB_QPT_QP0: + case IB_QPT_QP1: + case IB_QPT_RAW_IPV6: + case IB_QPT_RAW_ETHER: + break; + + case IB_QPT_UNRELIABLE_DGRM: + case IB_QPT_MAD: + if( !p_dgrm_info ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + break; + + default: + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = h_qp->pfn_init_dgrm_svc( h_qp, p_dgrm_info ); + + AL_EXIT( AL_DBG_QP ); + return status; +} + + + +/* + * Initialize a datagram QP to send and receive datagrams. + */ +ib_api_status_t +init_dgrm_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_dgrm_info_t* const p_dgrm_info OPTIONAL ) +{ + al_dgrm_qp_t *p_dgrm_qp; + ib_qp_mod_t qp_mod; + ib_api_status_t status; + + CL_ASSERT( h_qp ); + + p_dgrm_qp = (al_dgrm_qp_t*)h_qp; + + /* Change to the RESET state. */ + cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) ); + qp_mod.req_state = IB_QPS_RESET; + + status = ib_modify_qp( h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + return status; + } + + /* Change to the INIT state. */ + cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) ); + qp_mod.req_state = IB_QPS_INIT; + if( p_dgrm_info ) + { + qp_mod.state.init.qkey = p_dgrm_info->qkey; + qp_mod.state.init.pkey_index = p_dgrm_info->pkey_index; + status = get_port_num( h_qp->obj.p_ci_ca, p_dgrm_info->port_guid, + &qp_mod.state.init.primary_port ); + } + else + { + if( h_qp->type == IB_QPT_QP0 ) + qp_mod.state.init.qkey = 0; + else + qp_mod.state.init.qkey = IB_QP1_WELL_KNOWN_Q_KEY; + status = get_port_num( h_qp->obj.p_ci_ca, h_qp->port_guid, + &qp_mod.state.init.primary_port ); + } + if( status != IB_SUCCESS ) + { + return status; + } + + status = ib_modify_qp( h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + return status; + } + + /* Change to the RTR state. */ + cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) ); + qp_mod.req_state = IB_QPS_RTR; + + status = ib_modify_qp( h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + return status; + } + + /* Change to the RTS state. */ + cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) ); + qp_mod.req_state = IB_QPS_RTS; + qp_mod.state.rts.sq_psn = CL_HTON32(cl_get_time_stamp_sec() & 0x00ffffff); + status = ib_modify_qp( h_qp, &qp_mod ); + + return status; +} + + + +ib_api_status_t +init_mad_dgrm_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_dgrm_info_t* const p_dgrm_info ) +{ + al_mad_qp_t *p_mad_qp; + ib_api_status_t status; + + CL_ASSERT( h_qp ); + + p_mad_qp = (al_mad_qp_t*)h_qp; + status = ib_init_dgrm_svc( p_mad_qp->h_dgrm_qp, p_dgrm_info ); + if( status != IB_SUCCESS ) + { + return status; + } + + /* Post receive buffers. */ + status = mad_qp_post_recvs( p_mad_qp ); + if (status != IB_SUCCESS) + { + return status; + } + + /* Force a completion callback to rearm the CQs. */ + mad_send_comp_cb( p_mad_qp->h_send_cq, p_mad_qp ); + mad_recv_comp_cb( p_mad_qp->h_recv_cq, p_mad_qp ); + + return status; +} + + + +ib_api_status_t +ib_reg_mad_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_mad_svc_t* const p_mad_svc, + OUT ib_mad_svc_handle_t* const ph_mad_svc ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD_SVC ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + + status = h_qp->pfn_reg_mad_svc( h_qp, p_mad_svc, ph_mad_svc ); + + /* Release the reference taken in init_al_obj. */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*ph_mad_svc)->obj ); + + AL_EXIT( AL_DBG_MAD_SVC ); + return status; +} + + +ib_api_status_t +ib_join_mcast( + IN const ib_qp_handle_t h_qp, + IN const ib_mcast_req_t* const p_mcast_req ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MCAST ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + if( !p_mcast_req ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = h_qp->pfn_join_mcast( h_qp, p_mcast_req ); + + AL_EXIT( AL_DBG_MCAST ); + return status; +} + + + +/* + * Post a work request to the send queue of the QP. + */ +ib_api_status_t +ib_post_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t **pp_send_failure OPTIONAL ) +{ + ib_api_status_t status; + PERF_DECLARE( IbPostSend ); + PERF_DECLARE( PostSend ); + + cl_perf_start( IbPostSend ); + AL_ENTER( AL_DBG_QP ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + if( !p_send_wr || ( p_send_wr->p_next && !pp_send_failure ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + cl_perf_start( PostSend ); + status = + h_qp->pfn_post_send( h_qp->h_send_qp, p_send_wr, pp_send_failure ); + cl_perf_stop( &g_perf, PostSend ); + + AL_EXIT( AL_DBG_QP ); + cl_perf_stop( &g_perf, IbPostSend ); + return status; +} + + + +ib_api_status_t +ud_post_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t **pp_send_failure ) +{ + ib_api_status_t status; + ib_send_wr_t *p_wr; + + CL_ASSERT( h_qp ); + + /* Convert all AV handles for verb provider usage. */ + for( p_wr = p_send_wr; p_wr; p_wr = p_wr->p_next ) + { + CL_ASSERT( p_wr->dgrm.ud.h_av ); + p_wr->dgrm.ud.rsvd = p_wr->dgrm.ud.h_av; + p_wr->dgrm.ud.h_av = convert_av_handle( h_qp, p_wr->dgrm.ud.h_av ); + } + + status = h_qp->pfn_ud_post_send( + h_qp->h_ud_send_qp, p_send_wr, pp_send_failure ); + + /* Restore all AV handles. */ + for( p_wr = p_send_wr; p_wr; p_wr = p_wr->p_next ) + p_wr->dgrm.ud.h_av = (ib_av_handle_t)p_wr->dgrm.ud.rsvd; + + return status; +} + + + +#ifdef CL_KERNEL +/* + * Post a work request to the send queue of a special QP. + * The special QP is owned by the GSA or SMA, so care must be taken to prevent + * overruning the QP by multiple owners. + */ +void +special_qp_queue_mad( + IN const ib_qp_handle_t h_qp, + IN al_mad_wr_t* const p_mad_wr ) +{ + al_special_qp_t* p_special_qp; + + CL_ASSERT( h_qp ); + CL_ASSERT( p_mad_wr ); + + p_special_qp = (al_special_qp_t*)h_qp; + + /* Queue the send work request. */ + cl_spinlock_acquire( &h_qp->obj.lock ); + cl_qlist_insert_tail( &p_special_qp->to_send_queue, &p_mad_wr->list_item ); + cl_spinlock_release( &h_qp->obj.lock ); +} + + + +void +special_qp_resume_sends( + IN const ib_qp_handle_t h_qp ) +{ + al_special_qp_t* p_special_qp; + cl_list_item_t* p_list_item; + al_mad_wr_t* p_mad_wr; + ib_api_status_t status; + + CL_ASSERT( h_qp ); + p_special_qp = (al_special_qp_t*)h_qp; + + cl_spinlock_acquire( &p_special_qp->qp.obj.lock ); + + for( p_list_item = cl_qlist_remove_head( &p_special_qp->to_send_queue ); + p_list_item != cl_qlist_end( &p_special_qp->to_send_queue ); + p_list_item = cl_qlist_remove_head( &p_special_qp->to_send_queue ) ) + { + p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item ); + + cl_spinlock_release( &p_special_qp->qp.obj.lock ); + status = spl_qp_svc_send( &p_special_qp->qp, &p_mad_wr->send_wr ); + cl_spinlock_acquire( &p_special_qp->qp.obj.lock ); + + if( status != IB_SUCCESS ) + { + cl_qlist_insert_head( &p_special_qp->to_send_queue, p_list_item ); + break; + } + } + + cl_spinlock_release( &p_special_qp->qp.obj.lock ); +} +#endif /* CL_KERNEL */ + + +void +mad_qp_queue_mad( + IN const ib_qp_handle_t h_qp, + IN al_mad_wr_t* const p_mad_wr ) +{ + al_mad_qp_t *p_mad_qp; + + CL_ASSERT( h_qp ); + p_mad_qp = (al_mad_qp_t*)h_qp; + + /* Queue the send work request on the to_send_queue. */ + cl_spinlock_acquire( &p_mad_qp->qp.obj.lock ); + cl_qlist_insert_tail( &p_mad_qp->to_send_queue, &p_mad_wr->list_item ); + cl_spinlock_release( &p_mad_qp->qp.obj.lock ); +} + + + +void +mad_qp_resume_sends( + IN ib_qp_handle_t h_qp ) +{ + al_mad_qp_t *p_mad_qp; + cl_list_item_t* p_list_item; + al_mad_wr_t* p_mad_wr; + ib_api_status_t status; + + CL_ASSERT( h_qp ); + + p_mad_qp = (al_mad_qp_t*)h_qp; + + cl_spinlock_acquire( &p_mad_qp->qp.obj.lock ); + + /* Do not post sends if the MAD queue pair is being destroyed. */ + if( p_mad_qp->qp.obj.state == CL_DESTROYING ) + { + cl_spinlock_release( &p_mad_qp->qp.obj.lock ); + return; + } + + for( p_list_item = cl_qlist_remove_head( &p_mad_qp->to_send_queue ); + p_list_item != cl_qlist_end( &p_mad_qp->to_send_queue ); + p_list_item = cl_qlist_remove_head( &p_mad_qp->to_send_queue ) ) + { + p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item ); + + /* Always generate send completions. */ + p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED; + + status = ib_post_send( p_mad_qp->h_dgrm_qp, &p_mad_wr->send_wr, NULL ); + + if( status == IB_SUCCESS ) + { + /* Queue the MAD work request on the send tracking queue. */ + cl_qlist_insert_tail( &p_mad_qp->send_queue, &p_mad_wr->list_item ); + } + else + { + /* Re-queue the send work request on the to_send_queue. */ + cl_qlist_insert_head( &p_mad_qp->to_send_queue, p_list_item ); + break; + } + } + + cl_spinlock_release( &p_mad_qp->qp.obj.lock ); +} + + + +void +mad_qp_flush_send( + IN al_mad_qp_t* p_mad_qp, + IN al_mad_wr_t* const p_mad_wr ) +{ + ib_wc_t wc; + + cl_memclr( &wc, sizeof( ib_wc_t ) ); + wc.wr_id = p_mad_wr->send_wr.wr_id; + wc.wc_type = IB_WC_SEND; + wc.status = IB_WCS_WR_FLUSHED_ERR; + + mad_disp_send_done( p_mad_qp->h_mad_disp, p_mad_wr, &wc ); +} + + + +ib_api_status_t +ib_post_recv( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + if( !p_recv_wr || ( p_recv_wr->p_next && !pp_recv_failure ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = + h_qp->pfn_post_recv( h_qp->h_recv_qp, p_recv_wr, pp_recv_failure ); + + AL_EXIT( AL_DBG_QP ); + return status; +} + + + +/* + * Post receive buffers to a MAD QP. + */ +ib_api_status_t +mad_qp_post_recvs( + IN al_mad_qp_t* const p_mad_qp ) +{ + ib_mad_element_t* p_mad_element; + al_mad_element_t* p_al_element; + ib_recv_wr_t recv_wr; + ib_api_status_t status = IB_SUCCESS; + + CL_ASSERT( p_mad_qp ); + + /* Attempt to post receive buffers up to the max_rq_depth limit. */ + cl_spinlock_acquire( &p_mad_qp->qp.obj.lock ); + while( p_mad_qp->cur_rq_depth < (int32_t)p_mad_qp->max_rq_depth ) + { + /* Get a MAD element from the pool. */ + status = ib_get_mad( p_mad_qp->pool_key, MAD_BLOCK_SIZE, + &p_mad_element ); + + if( status != IB_SUCCESS ) break; + + p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, + element ); + + /* Build the receive work request. */ + recv_wr.p_next = NULL; + recv_wr.wr_id = (uintn_t)p_al_element; + recv_wr.num_ds = 1; + recv_wr.ds_array = &p_al_element->grh_ds; + + /* Queue the receive on the service tracking list. */ + cl_qlist_insert_tail( &p_mad_qp->recv_queue, &p_al_element->list_item ); + + /* Post the receive. */ + status = ib_post_recv( p_mad_qp->h_dgrm_qp, &recv_wr, NULL ); + + if( status != IB_SUCCESS ) + { + cl_qlist_remove_item( &p_mad_qp->recv_queue, + &p_al_element->list_item ); + + ib_put_mad( p_mad_element ); + break; + } + + cl_atomic_inc( &p_mad_qp->cur_rq_depth ); + } + cl_spinlock_release( &p_mad_qp->qp.obj.lock ); + + return status; +} + + + +void +mad_recv_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + al_mad_qp_t *p_mad_qp; + + CL_ASSERT( cq_context ); + p_mad_qp = (al_mad_qp_t*)cq_context; + + CL_ASSERT( h_cq == p_mad_qp->h_recv_cq ); + mad_qp_comp( p_mad_qp, h_cq, IB_WC_RECV ); +} + + + +void +mad_send_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + al_mad_qp_t *p_mad_qp; + + CL_ASSERT( cq_context ); + p_mad_qp = (al_mad_qp_t*)cq_context; + + CL_ASSERT( h_cq == p_mad_qp->h_send_cq ); + mad_qp_comp( p_mad_qp, h_cq, IB_WC_SEND ); + + /* Continue processing any queued MADs on the QP. */ + mad_qp_resume_sends( &p_mad_qp->qp ); +} + + + +void +mad_qp_comp( + IN al_mad_qp_t* p_mad_qp, + IN const ib_cq_handle_t h_cq, + IN ib_wc_type_t wc_type ) +{ + ib_wc_t wc; + ib_wc_t* p_free_wc = &wc; + ib_wc_t* p_done_wc; + al_mad_wr_t* p_mad_wr; + al_mad_element_t* p_al_mad; + ib_mad_element_t* p_mad_element; + ib_api_status_t status; + + CL_ASSERT( p_mad_qp ); + CL_ASSERT( h_cq ); + + /* Rearm the CQ before polling to avoid missing completions. */ + status = ib_rearm_cq( h_cq, FALSE ); + CL_ASSERT( status == IB_SUCCESS ); + + wc.p_next = NULL; + /* Process work completions. */ + while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS ) + { + /* Process completions one at a time. */ + + /* + * Process the work completion. Per IBA specification, the + * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS. + * Use the wc_type function parameter instead of wc.wc_type. + */ + switch( wc_type ) + { + case IB_WC_SEND: + /* Get a pointer to the MAD work request. */ + p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id); + + /* Remove the MAD work request from the send tracking queue. */ + cl_spinlock_acquire( &p_mad_qp->qp.obj.lock ); + cl_qlist_remove_item( &p_mad_qp->send_queue, &p_mad_wr->list_item ); + cl_spinlock_release( &p_mad_qp->qp.obj.lock ); + + /* Report the send completion to the dispatcher. */ + mad_disp_send_done( p_mad_qp->h_mad_disp, p_mad_wr, &wc ); + break; + + case IB_WC_RECV: + /* A receive buffer was consumed. */ + cl_atomic_dec( &p_mad_qp->cur_rq_depth ); + + /* Replenish the receive buffer. */ + mad_qp_post_recvs( p_mad_qp ); + + /* Initialize pointers to the MAD element. */ + p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id); + p_mad_element = &p_al_mad->element; + + /* Remove the AL MAD element from the receive tracking queue. */ + cl_spinlock_acquire( &p_mad_qp->qp.obj.lock ); + cl_qlist_remove_item( &p_mad_qp->recv_queue, &p_al_mad->list_item ); + cl_spinlock_release( &p_mad_qp->qp.obj.lock ); + + /* Construct the MAD element from the receive work completion. */ + build_mad_recv( p_mad_element, &wc ); + + /* Process the received MAD. */ + status = mad_disp_recv_done( p_mad_qp->h_mad_disp, + p_mad_element ); + + /* Discard this MAD on error. */ + if( status != IB_SUCCESS ) + { + status = ib_put_mad( p_mad_element ); + CL_ASSERT( status == IB_SUCCESS ); + } + break; + + default: + CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV ); + break; + } + p_free_wc = &wc; + } +} + + + +/* + * Process an event on a CQ associated with a MAD QP. + */ +void +mad_qp_cq_event_cb( + IN ib_async_event_rec_t *p_event_rec ) +{ + al_mad_qp_t *p_mad_qp; + + CL_ASSERT( p_event_rec ); + CL_ASSERT( p_event_rec->context ); + + if( p_event_rec->code == IB_AE_SQ_DRAINED ) + return; + + p_mad_qp = (al_mad_qp_t* __ptr64)p_event_rec->context; + + /* Nothing to do here. */ +} + + + +/* + * Process an asynchronous event on the QP. Notify the user of the event. + */ +void +qp_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ) +{ + ib_qp_handle_t h_qp; + + CL_ASSERT( p_event_rec ); + h_qp = (ib_qp_handle_t)p_event_rec->context; + +#if defined(CL_KERNEL) + switch( p_event_rec->code ) + { + case IB_AE_QP_COMM: + al_cep_established( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid ); + break; + + case IB_AE_QP_APM: + al_cep_migrate( h_qp->obj.h_al, ((al_conn_qp_t*)h_qp)->cid ); + break; + + case IB_AE_QP_APM_ERROR: + //***TODO: Figure out how to handle these errors. + break; + + default: + break; + } +#endif + + p_event_rec->context = (void*)h_qp->obj.context; + p_event_rec->handle.h_qp = h_qp; + + if( h_qp->pfn_event_cb ) + h_qp->pfn_event_cb( p_event_rec ); +} + + + +ib_api_status_t +ib_bind_mw( + IN const ib_mw_handle_t h_mw, + IN const ib_qp_handle_t h_qp, + IN ib_bind_wr_t * const p_mw_bind, + OUT net32_t * const p_rkey ) +{ + ib_mr_handle_t h_mr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MW ); + + if( AL_OBJ_INVALID_HANDLE( h_mw, AL_OBJ_TYPE_H_MW ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MW_HANDLE\n") ); + return IB_INVALID_MW_HANDLE; + } + if( AL_OBJ_INVALID_HANDLE( h_qp, AL_OBJ_TYPE_H_QP ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + if( !p_mw_bind || !p_rkey ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Convert to the CI handles. */ + h_mr = p_mw_bind->h_mr; + p_mw_bind->h_mr = convert_mr_handle( h_mr ); + + status = verbs_bind_mw(h_mw, h_qp, p_mw_bind, p_rkey); + + p_mw_bind->h_mr = h_mr; + + AL_EXIT( AL_DBG_MW ); + return status; +} diff --git a/branches/Ndi/core/al/al_qp.h b/branches/Ndi/core/al/al_qp.h new file mode 100644 index 00000000..8b18d580 --- /dev/null +++ b/branches/Ndi/core/al/al_qp.h @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_QP_H__) +#define __AL_QP_H__ + +#include +#include +#include +#include + +#include "al_ca.h" +#include "al_common.h" +#include "al_mad.h" +#include "al_mcast.h" +#ifdef CL_KERNEL +#include "al_smi.h" +#endif /* CL_KERNEL */ + + +typedef ib_api_status_t +(*ib_pfn_modify_qp_t)( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod, + IN OUT ci_umv_buf_t* const p_umv_buf ); + +typedef ib_api_status_t +(*ib_pfn_post_send_t)( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + IN ib_send_wr_t **pp_send_failure OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_post_recv_t)( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t* const p_recv_wr, + IN ib_recv_wr_t **p_recv_failure OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_init_dgrm_svc_t)( + IN const ib_qp_handle_t h_qp, + IN const ib_dgrm_info_t* const p_dgrm_info ); + +typedef ib_api_status_t +(*ib_pfn_reg_mad_svc_t)( + IN const ib_qp_handle_t h_qp, + IN const ib_mad_svc_t* const p_mad_svc, + OUT ib_mad_svc_handle_t* const ph_mad_svc ); + +typedef ib_api_status_t +(*ib_pfn_dereg_mad_svc_t)( + IN const ib_mad_svc_handle_t h_mad_svc ); + +typedef void +(*ib_pfn_queue_mad_t)( + IN const ib_qp_handle_t h_qp, + IN al_mad_wr_t* const p_mad_wr ); + +typedef void +(*ib_pfn_resume_mad_t)( + IN const ib_qp_handle_t h_qp ); + +typedef ib_api_status_t +(*ib_pfn_join_mcast_t)( + IN const ib_qp_handle_t h_qp, + IN const ib_mcast_req_t* const p_mcast_req ); + +typedef ib_api_status_t +(*ib_pfn_leave_mcast_t)( + IN const ib_mcast_handle_t h_mcast ); + + +/* + * Queue pair information required by the access layer. This structure + * is referenced by a user's QP handle. + * + * Other QP types are derived from this base object. + */ +typedef struct _ib_qp +{ + al_obj_t obj; /* Must be first. */ + + ib_qp_handle_t h_ci_qp; + ib_qp_type_t type; + ib_net32_t num; + ib_qp_state_t state; + net64_t port_guid; + +#ifdef CL_KERNEL + /* Timewait timeout time. */ + uint64_t timewait; +#endif /* CL_KERNEL */ + + /* Handles to pass to post_send and post_recv. */ + ib_qp_handle_t h_recv_qp; + ib_qp_handle_t h_send_qp; + + /* + * For UD QPs, we have to mess with AV handles. This is + * where we store the actual post send function and appropriate + * handle. + */ + ib_qp_handle_t h_ud_send_qp; + ib_pfn_post_send_t pfn_ud_post_send; + + ib_cq_handle_t h_recv_cq; + ib_cq_handle_t h_send_cq; + cl_obj_rel_t recv_cq_rel; + cl_obj_rel_t send_cq_rel; + + ib_srq_handle_t h_srq; + cl_obj_rel_t srq_rel; + + ib_pfn_event_cb_t pfn_event_cb; + + ib_pfn_modify_qp_t pfn_modify_qp; + ib_pfn_post_recv_t pfn_post_recv; + ib_pfn_post_send_t pfn_post_send; + ib_pfn_reg_mad_svc_t pfn_reg_mad_svc; + ib_pfn_dereg_mad_svc_t pfn_dereg_mad_svc; + ib_pfn_queue_mad_t pfn_queue_mad; + ib_pfn_resume_mad_t pfn_resume_mad; + ib_pfn_init_dgrm_svc_t pfn_init_dgrm_svc; + ib_pfn_join_mcast_t pfn_join_mcast; + +} ib_qp_t; + + +/* + * Connected QP type. + */ +typedef struct _al_conn_qp +{ + ib_qp_t qp; /* Must be first. */ + + ib_cm_handle_t p_conn; + + atomic32_t cid; + + /* Callback table. */ + ib_pfn_cm_req_cb_t pfn_cm_req_cb; + ib_pfn_cm_rep_cb_t pfn_cm_rep_cb; + ib_pfn_cm_mra_cb_t pfn_cm_mra_cb; + ib_pfn_cm_rtu_cb_t pfn_cm_rtu_cb; + ib_pfn_cm_lap_cb_t pfn_cm_lap_cb; + ib_pfn_cm_apr_cb_t pfn_cm_apr_cb; + ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb; + ib_pfn_cm_drep_cb_t pfn_cm_drep_cb; + ib_pfn_cm_rej_cb_t pfn_cm_rej_cb; /* If RTU times out */ + + +} al_conn_qp_t; + + +/* + * Datagram QP type. + */ +typedef ib_qp_t al_dgrm_qp_t; + + +/* + * Special QPs - SMI, GSI. + */ +typedef struct _al_special_qp +{ + ib_qp_t qp; /* Must be first. */ + cl_qlist_t to_send_queue; + +} al_special_qp_t; + + +/* + * QP alias to SMI, GSI QPs. + */ +typedef struct _al_qp_alias +{ + ib_qp_t qp; /* Must be first. */ + al_mad_disp_handle_t h_mad_disp; + ib_pool_key_t pool_key; /* Global MAD pool. */ + +} al_qp_alias_t; + + +/* + * QPs that support MAD operations. + */ +typedef struct _al_mad_qp +{ + ib_qp_t qp; /* Must be first. */ + ib_qp_handle_t h_dgrm_qp; + al_mad_disp_handle_t h_mad_disp; + + ib_cq_handle_t h_recv_cq; + ib_cq_handle_t h_send_cq; + + cl_qlist_t to_send_queue; /* Waiting to be posted */ + cl_qlist_t send_queue; /* Posted sends */ + cl_qlist_t recv_queue; /* Posted receives */ + uint32_t max_rq_depth; /* Maximum recv queue depth */ + atomic32_t cur_rq_depth; /* Current recv queue depth */ + + ib_pool_handle_t h_pool; + ib_pool_key_t pool_key; + +} al_mad_qp_t; + + + +ib_api_status_t +create_qp( + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb, + OUT ib_qp_handle_t* const ph_qp, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +query_qp( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t* const p_qp_attr, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +modify_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +get_spl_qp( + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb, + OUT ib_pool_key_t* const p_pool_key OPTIONAL, + OUT ib_qp_handle_t* const ph_qp, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + + +ib_mad_send_handle_t +get_send_mad_wp( + IN const al_qp_alias_t *p_qp_alias ); + + + +void +put_send_mad_wp( + IN const al_qp_alias_t *p_qp_alias, + IN const ib_mad_send_handle_t h_send_mad ); + + +void +special_qp_resume_sends( + IN const ib_qp_handle_t h_qp ); + + +void +special_qp_queue_mad( + IN const ib_qp_handle_t h_qp, + IN al_mad_wr_t* const p_mad_wr ); + + +void +qp_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ); + + +/* Return the AL instance associated with this QP. */ +static inline ib_al_handle_t +qp_get_al( + IN const ib_qp_handle_t h_qp ) +{ + return h_qp->obj.h_al; +} + + +#endif /* __AL_QP_H__ */ diff --git a/branches/Ndi/core/al/al_query.c b/branches/Ndi/core/al/al_query.c new file mode 100644 index 00000000..34e99f5a --- /dev/null +++ b/branches/Ndi/core/al/al_query.c @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include + +#include "al.h" +#include "al_ca.h" +#include "al_common.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_query.tmh" +#endif +#include "al_mgr.h" +#include "al_query.h" +#include "ib_common.h" + + +#define PR102982 + + +static ib_api_status_t +query_sa( + IN al_query_t *p_query, + IN const ib_query_req_t* const p_query_req, + IN const ib_al_flags_t flags ); + +void +query_req_cb( + IN al_sa_req_t *p_sa_req, + IN ib_mad_element_t *p_mad_response ); + + +ib_api_status_t +ib_query( + IN const ib_al_handle_t h_al, + IN const ib_query_req_t* const p_query_req, + OUT ib_query_handle_t* const ph_query OPTIONAL ) +{ + al_query_t *p_query; + ib_api_status_t status; + + AL_ENTER( AL_DBG_QUERY ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_query_req ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + if( (p_query_req->flags & IB_FLAGS_SYNC) && !cl_is_blockable() ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_UNSUPPORTED\n") ); + return IB_UNSUPPORTED; + } + + /* Allocate a new query. */ + p_query = cl_zalloc( sizeof( al_query_t ) ); + if( !p_query ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("insufficient memory\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Copy the query context information. */ + p_query->sa_req.pfn_sa_req_cb = query_req_cb; + p_query->sa_req.user_context = p_query_req->query_context; + p_query->pfn_query_cb = p_query_req->pfn_query_cb; + p_query->query_type = p_query_req->query_type; + + /* Track the query with the AL instance. */ + al_insert_query( h_al, p_query ); + + /* Issue the MAD to the SA. */ + status = query_sa( p_query, p_query_req, p_query_req->flags ); + if( status != IB_SUCCESS && status != IB_INVALID_GUID ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("query_sa failed: %s\n", ib_get_err_str(status) ) ); + } + + /* Cleanup from issuing the query if it failed or was synchronous. */ + if( status != IB_SUCCESS ) + { + al_remove_query( p_query ); + cl_free( p_query ); + } + else if( ph_query ) + { + *ph_query = p_query; + } + + AL_EXIT( AL_DBG_QUERY ); + return status; +} + + + +/* + * Query the SA based on the user's request. + */ +static ib_api_status_t +query_sa( + IN al_query_t *p_query, + IN const ib_query_req_t* const p_query_req, + IN const ib_al_flags_t flags ) +{ + ib_user_query_t sa_req, *p_sa_req; + union _query_sa_recs + { + ib_service_record_t svc; + ib_node_record_t node; + ib_portinfo_record_t portinfo; + ib_path_rec_t path; + ib_class_port_info_t class_port_info; + } rec; + ib_api_status_t status; + + AL_ENTER( AL_DBG_QUERY ); + + cl_memclr( &rec, sizeof(rec) ); + + /* Set the request information. */ + p_sa_req = &sa_req; + sa_req.method = IB_MAD_METHOD_GETTABLE; + + /* Set the MAD attributes and component mask correctly. */ + switch( p_query_req->query_type ) + { + case IB_QUERY_USER_DEFINED: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("USER_DEFINED\n") ); + p_sa_req = (ib_user_query_t* __ptr64)p_query_req->p_query_input; + if( !p_sa_req->method ) + { + AL_EXIT( AL_DBG_QUERY ); + return IB_INVALID_SETTING; + } + break; + + case IB_QUERY_ALL_SVC_RECS: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("IB_QUERY_ALL_SVC_RECS\n") ); + sa_req.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + sa_req.attr_size = sizeof( ib_service_record_t ); + sa_req.comp_mask = 0; + sa_req.p_attr = &rec.svc; + break; + + case IB_QUERY_SVC_REC_BY_NAME: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("SVC_REC_BY_NAME\n") ); + sa_req.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + sa_req.attr_size = sizeof( ib_service_record_t ); + sa_req.comp_mask = IB_SR_COMPMASK_SNAME; + sa_req.p_attr = &rec.svc; + cl_memcpy( rec.svc.service_name, p_query_req->p_query_input, + sizeof( ib_svc_name_t ) ); + break; + + case IB_QUERY_SVC_REC_BY_ID: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("SVC_REC_BY_ID\n") ); + sa_req.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + sa_req.attr_size = sizeof( ib_service_record_t ); + sa_req.comp_mask = IB_SR_COMPMASK_SID; + sa_req.p_attr = &rec.svc; + rec.svc.service_id = *(ib_net64_t* __ptr64)(p_query_req->p_query_input); + break; + + case IB_QUERY_CLASS_PORT_INFO: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("IB_QUERY_CLASS_PORT_INFO\n") ); + sa_req.method = IB_MAD_METHOD_GET; + sa_req.attr_id = IB_MAD_ATTR_CLASS_PORT_INFO; + sa_req.attr_size = sizeof( ib_class_port_info_t ); + sa_req.comp_mask = 0; + sa_req.p_attr = &rec.class_port_info; + break; + + case IB_QUERY_NODE_REC_BY_NODE_GUID: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("NODE_REC_BY_NODE_GUID\n") ); + /* + * 15.2.5.2: + * if >1 ports on of a CA/RTR the subnet return multiple + * record + */ + sa_req.attr_id = IB_MAD_ATTR_NODE_RECORD; + sa_req.attr_size = sizeof( ib_node_record_t ); + sa_req.comp_mask = IB_NR_COMPMASK_NODEGUID; + sa_req.p_attr = &rec.node; + rec.node.node_info.node_guid = + *(ib_net64_t* __ptr64)(p_query_req->p_query_input); + break; + + case IB_QUERY_PORT_REC_BY_LID: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("PORT_REC_BY_LID\n") ); + sa_req.attr_id = IB_MAD_ATTR_PORTINFO_RECORD; + sa_req.attr_size = sizeof( ib_portinfo_record_t ); + sa_req.comp_mask = IB_PIR_COMPMASK_BASELID; + sa_req.p_attr = &rec.portinfo; + rec.portinfo.port_info.base_lid = + *(ib_net16_t* __ptr64)(p_query_req->p_query_input); + break; + + case IB_QUERY_PATH_REC_BY_PORT_GUIDS: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("PATH_REC_BY_PORT_GUIDS\n") ); + sa_req.attr_id = IB_MAD_ATTR_PATH_RECORD; + sa_req.attr_size = sizeof( ib_path_rec_t ); + sa_req.comp_mask = (IB_PR_COMPMASK_DGID | + IB_PR_COMPMASK_SGID | IB_PR_COMPMASK_NUM_PATH); + sa_req.p_attr = &rec.path; + ib_gid_set_default( &rec.path.dgid, ((ib_guid_pair_t* __ptr64) + (p_query_req->p_query_input))->dest_guid ); + ib_gid_set_default( &rec.path.sgid, ((ib_guid_pair_t* __ptr64) + (p_query_req->p_query_input))->src_guid ); + rec.path.num_path = 1; + break; + + case IB_QUERY_PATH_REC_BY_GIDS: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("PATH_REC_BY_GIDS\n") ); + sa_req.attr_id = IB_MAD_ATTR_PATH_RECORD; + sa_req.attr_size = sizeof( ib_path_rec_t ); + sa_req.comp_mask = (IB_PR_COMPMASK_DGID | + IB_PR_COMPMASK_SGID | IB_PR_COMPMASK_NUM_PATH); + sa_req.p_attr = &rec.path; + cl_memcpy( &rec.path.dgid, &((ib_gid_pair_t* __ptr64) + (p_query_req->p_query_input))->dest_gid, sizeof( ib_gid_t ) ); + cl_memcpy( &rec.path.sgid, &((ib_gid_pair_t* __ptr64) + (p_query_req->p_query_input))->src_gid, sizeof( ib_gid_t ) ); + rec.path.num_path = 1; + break; + + case IB_QUERY_PATH_REC_BY_LIDS: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("PATH_REC_BY_LIDS\n") ); + /* SGID must be provided for GET_TABLE requests. */ + sa_req.method = IB_MAD_METHOD_GET; + sa_req.attr_id = IB_MAD_ATTR_PATH_RECORD; + sa_req.attr_size = sizeof( ib_path_rec_t ); + sa_req.comp_mask = + (IB_PR_COMPMASK_DLID | IB_PR_COMPMASK_SLID); + sa_req.p_attr = &rec.path; + rec.path.dlid = + ((ib_lid_pair_t* __ptr64)(p_query_req->p_query_input))->dest_lid; + rec.path.slid = + ((ib_lid_pair_t* __ptr64)(p_query_req->p_query_input))->src_lid; +#ifdef PR102982 + rec.path.num_path = 1; +#endif + break; + + default: + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("UNKNOWN\n") ); + CL_ASSERT( p_query_req->query_type == IB_QUERY_USER_DEFINED || + p_query_req->query_type == IB_QUERY_ALL_SVC_RECS || + p_query_req->query_type == IB_QUERY_SVC_REC_BY_NAME || + p_query_req->query_type == IB_QUERY_SVC_REC_BY_ID || + p_query_req->query_type == IB_QUERY_CLASS_PORT_INFO || + p_query_req->query_type == IB_QUERY_NODE_REC_BY_NODE_GUID || + p_query_req->query_type == IB_QUERY_PORT_REC_BY_LID || + p_query_req->query_type == IB_QUERY_PATH_REC_BY_PORT_GUIDS || + p_query_req->query_type == IB_QUERY_PATH_REC_BY_GIDS || + p_query_req->query_type == IB_QUERY_PATH_REC_BY_LIDS ); + + return IB_ERROR; + } + + status = al_send_sa_req( + &p_query->sa_req, p_query_req->port_guid, p_query_req->timeout_ms, + p_query_req->retry_cnt, p_sa_req, flags ); + AL_EXIT( AL_DBG_QUERY ); + return status; +} + + + +/* + * Query request completion callback. + */ +void +query_req_cb( + IN al_sa_req_t *p_sa_req, + IN ib_mad_element_t *p_mad_response ) +{ + al_query_t *p_query; + ib_query_rec_t query_rec; + ib_sa_mad_t *p_sa_mad; + + AL_ENTER( AL_DBG_QUERY ); + p_query = PARENT_STRUCT( p_sa_req, al_query_t, sa_req ); + + /* Initialize the results of the query. */ + cl_memclr( &query_rec, sizeof( ib_query_rec_t ) ); + query_rec.status = p_sa_req->status; + query_rec.query_context = p_query->sa_req.user_context; + query_rec.query_type = p_query->query_type; + + /* Form the result of the query, if we got one. */ + if( query_rec.status == IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, + ("query succeeded\n") ); + + CL_ASSERT( p_mad_response ); + p_sa_mad = (ib_sa_mad_t*)p_mad_response->p_mad_buf; + + if (ib_get_attr_size( p_sa_mad->attr_offset ) != 0) + { + query_rec.result_cnt = + ( ( p_mad_response->size - IB_SA_MAD_HDR_SIZE ) / + ib_get_attr_size( p_sa_mad->attr_offset ) ); + } + else + { + query_rec.result_cnt = 0; + } + + query_rec.p_result_mad = p_mad_response; + } + else + { + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_QUERY, + ("query failed: %s\n", ib_get_err_str(query_rec.status) ) ); + if( p_mad_response ) + query_rec.p_result_mad = p_mad_response; + } + + /* + * Handing an internal MAD to a client. + * Track the MAD element with the client's AL instance. + */ + if( p_mad_response ) + al_handoff_mad( p_query->h_al, p_mad_response ); + + /* Notify the user of the result. */ + p_query->pfn_query_cb( &query_rec ); + + /* Cleanup from issuing the query. */ + al_remove_query( p_query ); + cl_free( p_query ); + + AL_EXIT( AL_DBG_QUERY ); +} diff --git a/branches/Ndi/core/al/al_query.h b/branches/Ndi/core/al/al_query.h new file mode 100644 index 00000000..9186e9ee --- /dev/null +++ b/branches/Ndi/core/al/al_query.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_QUERY_H__) +#define __AL_QUERY_H__ + + +#include "al_common.h" +#include +#include + + +/* Per port sa_req service */ +typedef struct _sa_req_svc +{ + al_obj_t obj; + + ib_net64_t port_guid; + uint8_t port_num; + ib_net16_t sm_lid; + uint8_t sm_sl; + + ib_qp_handle_t h_qp; + ib_mad_svc_handle_t h_mad_svc; + ib_av_handle_t h_av; + + ib_pool_key_t pool_key; + atomic32_t trans_id; + +} sa_req_svc_t; + + + +struct _al_sa_req; + + +typedef void +(*pfn_sa_req_cb_t)( + IN struct _al_sa_req *p_sa_req, + IN ib_mad_element_t *p_mad_response ); + + + +typedef enum _al_sa_reg_state +{ + SA_REG_STARTING = 0, /* Request sent to SA - awaiting response. */ + SA_REG_ACTIVE, /* Request successfully ack'ed by SA. */ + SA_REG_CANCELING, /* Canceling STARTING request to SA. */ + SA_REG_HALTING, /* Deregistering from SA. */ + SA_REG_ERROR /* There was an error registering. */ + +} al_sa_reg_state_t; + + +/* Notes: Only the pfn_sa_req_cb field is required to send a request. */ +typedef struct _al_sa_req +{ + cl_list_item_t list_item; + + ib_api_status_t status; + +#ifdef CL_KERNEL + sa_req_svc_t *p_sa_req_svc; /* For cancellation */ + ib_mad_element_t *p_mad_response; + ib_mad_element_t *p_mad_request; /* For cancellation */ + KEVENT *p_sync_event; +#else /* defined( CL_KERNEL ) */ + uint64_t hdl; + ual_send_sa_req_ioctl_t ioctl; + OVERLAPPED ov; +#endif /* defined( CL_KERNEL ) */ + const void *user_context; + pfn_sa_req_cb_t pfn_sa_req_cb; + +} al_sa_req_t; + + + +typedef struct _al_query +{ + al_sa_req_t sa_req; /* Must be first. */ + + ib_al_handle_t h_al; + ib_pfn_query_cb_t pfn_query_cb; + ib_query_type_t query_type; + +} al_query_t; + + + +ib_api_status_t +create_sa_req_mgr( + IN al_obj_t* const p_parent_obj ); + +ib_api_status_t +al_send_sa_req( + IN al_sa_req_t *p_sa_req, + IN const net64_t port_guid, + IN const uint32_t timeout_ms, + IN const uint32_t retry_cnt, + IN const ib_user_query_t* const p_sa_req_data, + IN const ib_al_flags_t flags ); + +#if defined( CL_KERNEL ) +static __inline void +al_cancel_sa_req( + IN const al_sa_req_t *p_sa_req ) +{ + ib_cancel_mad( p_sa_req->p_sa_req_svc->h_mad_svc, + p_sa_req->p_mad_request ); +} +#else /* defined( CL_KERNEL ) */ +void +al_cancel_sa_req( + IN const al_sa_req_t *p_sa_req ); +#endif /* defined( CL_KERNEL ) */ + +ib_api_status_t +convert_wc_status( + IN const ib_wc_status_t wc_status ); + + +#endif /* __AL_QUERY_H__ */ diff --git a/branches/Ndi/core/al/al_reg_svc.c b/branches/Ndi/core/al/al_reg_svc.c new file mode 100644 index 00000000..8c0debee --- /dev/null +++ b/branches/Ndi/core/al/al_reg_svc.c @@ -0,0 +1,358 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "al.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_reg_svc.tmh" +#endif +#include "al_reg_svc.h" +#include "ib_common.h" +#include "al_mgr.h" + + + +static void +__dereg_svc_cb( + IN al_sa_req_t *p_sa_req, + IN ib_mad_element_t *p_mad_response ) +{ + ib_reg_svc_handle_t h_reg_svc; + + /* + * Note that we come into this callback with a reference + * on the registration object. + */ + h_reg_svc = PARENT_STRUCT( p_sa_req, al_reg_svc_t, sa_req ); + + if( p_mad_response ) + ib_put_mad( p_mad_response ); + + h_reg_svc->obj.pfn_destroy( &h_reg_svc->obj, NULL ); +} + + +static void +__sa_dereg_svc( + IN const ib_reg_svc_handle_t h_reg_svc ) +{ + ib_user_query_t sa_mad_data; + + ref_al_obj( &h_reg_svc->obj ); + + /* Set the request information. */ + h_reg_svc->sa_req.pfn_sa_req_cb = __dereg_svc_cb; + + /* Set the MAD attributes and component mask correctly. */ + sa_mad_data.method = IB_MAD_METHOD_DELETE; + sa_mad_data.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + sa_mad_data.attr_size = sizeof(ib_service_record_t); + + sa_mad_data.p_attr = &h_reg_svc->svc_rec; + sa_mad_data.comp_mask = ~CL_CONST64(0); + + if( al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid, + 500, 0, &sa_mad_data, 0 ) != IB_SUCCESS ) + { + /* Cleanup from the registration. */ + deref_al_obj( &h_reg_svc->obj ); + } +} + + +void +reg_svc_req_cb( + IN al_sa_req_t *p_sa_req, + IN ib_mad_element_t *p_mad_response ) +{ + ib_reg_svc_handle_t h_reg_svc; + ib_sa_mad_t *p_sa_mad; + ib_reg_svc_rec_t reg_svc_rec; + + /* + * Note that we come into this callback with a reference + * on the registration object. + */ + h_reg_svc = PARENT_STRUCT( p_sa_req, al_reg_svc_t, sa_req ); + + CL_ASSERT( h_reg_svc->pfn_reg_svc_cb ); + + /* Record the status of the registration request. */ + h_reg_svc->req_status = p_sa_req->status; + + if( p_mad_response ) + { + p_sa_mad = (ib_sa_mad_t *)p_mad_response->p_mad_buf; + h_reg_svc->resp_status = p_sa_mad->status; + + if ( h_reg_svc->req_status == IB_SUCCESS ) + { + /* Save the service registration results. */ + h_reg_svc->svc_rec = *((ib_service_record_t *)p_sa_mad->data); + } + + /* We no longer need the response MAD. */ + ib_put_mad( p_mad_response ); + } + + /* Initialize the user's callback record. */ + cl_memclr( ®_svc_rec, sizeof( ib_reg_svc_rec_t ) ); + reg_svc_rec.svc_context = h_reg_svc->sa_req.user_context; + reg_svc_rec.req_status = h_reg_svc->req_status; + reg_svc_rec.resp_status = h_reg_svc->resp_status; + reg_svc_rec.svc_rec = h_reg_svc->svc_rec; + + cl_spinlock_acquire( &h_reg_svc->obj.lock ); + /* See if the registration was successful. */ + if( reg_svc_rec.req_status == IB_SUCCESS ) + { + /* Ensure that the user wants the registration to proceed. */ + if( h_reg_svc->state == SA_REG_STARTING ) + { + h_reg_svc->state = SA_REG_ACTIVE; + reg_svc_rec.h_reg_svc = h_reg_svc; + } + else + { + CL_ASSERT( h_reg_svc->state == SA_REG_CANCELING ); + reg_svc_rec.req_status = IB_CANCELED; + + /* Notify the SA that we're deregistering. */ + __sa_dereg_svc( h_reg_svc ); + } + } + else + { + h_reg_svc->state = SA_REG_ERROR; + } + cl_spinlock_release( &h_reg_svc->obj.lock ); + + h_reg_svc->pfn_reg_svc_cb( ®_svc_rec ); + + if( p_sa_req->status != IB_SUCCESS ) + { + h_reg_svc->obj.pfn_destroy( &h_reg_svc->obj, NULL ); + } + else + { + /* Release the reference taken when issuing the request. */ + deref_al_obj( &h_reg_svc->obj ); + } +} + + +static void +__destroying_sa_reg( + IN al_obj_t* const p_obj ) +{ + ib_reg_svc_handle_t h_sa_reg; + + AL_ENTER( AL_DBG_SA_REQ ); + + h_sa_reg = PARENT_STRUCT( p_obj, al_reg_svc_t, obj ); + + cl_spinlock_acquire( &p_obj->lock ); + + CL_ASSERT( h_sa_reg->state != SA_REG_HALTING ); + switch( h_sa_reg->state ) + { + case SA_REG_STARTING: + /* + * Cancel registration. Note that there is a reference held until + * this completes. + */ + h_sa_reg->state = SA_REG_CANCELING; + al_cancel_sa_req( &h_sa_reg->sa_req ); + break; + + case SA_REG_ERROR: + /* Nothing to do. */ + break; + + default: + h_sa_reg->state = SA_REG_HALTING; + + __sa_dereg_svc( h_sa_reg ); + } + cl_spinlock_release( &p_obj->lock ); + +} + + +static void +__free_sa_reg( + IN al_obj_t* const p_obj ) +{ + ib_reg_svc_handle_t h_sa_reg; + + AL_ENTER( AL_DBG_SA_REQ ); + + h_sa_reg = PARENT_STRUCT( p_obj, al_reg_svc_t, obj ); + + destroy_al_obj( p_obj ); + cl_free( h_sa_reg ); + + AL_EXIT( AL_DBG_SA_REQ ); +} + + +static ib_api_status_t +sa_reg_svc( + IN const ib_reg_svc_handle_t h_reg_svc, + IN const ib_reg_svc_req_t* const p_reg_svc_req ) +{ + ib_user_query_t sa_mad_data; + + /* Set the request information. */ + h_reg_svc->sa_req.pfn_sa_req_cb = reg_svc_req_cb; + + /* Set the MAD attributes and component mask correctly. */ + sa_mad_data.method = IB_MAD_METHOD_SET; + sa_mad_data.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + sa_mad_data.attr_size = sizeof(ib_service_record_t); + + /* Initialize the component mask. */ + sa_mad_data.comp_mask = p_reg_svc_req->svc_data_mask; + sa_mad_data.p_attr = &h_reg_svc->svc_rec; + + return al_send_sa_req( &h_reg_svc->sa_req, h_reg_svc->port_guid, + p_reg_svc_req->timeout_ms, p_reg_svc_req->retry_cnt, &sa_mad_data, + p_reg_svc_req->flags ); +} + + +ib_api_status_t +ib_reg_svc( + IN const ib_al_handle_t h_al, + IN const ib_reg_svc_req_t* const p_reg_svc_req, + OUT ib_reg_svc_handle_t* const ph_reg_svc ) +{ + ib_reg_svc_handle_t h_sa_reg = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SA_REQ ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_reg_svc_req ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Allocate a new service registration request. */ + h_sa_reg = cl_zalloc( sizeof( al_reg_svc_t ) ); + if( !h_sa_reg ) + { + AL_EXIT( AL_DBG_SA_REQ ); + return IB_INSUFFICIENT_MEMORY; + } + + construct_al_obj( &h_sa_reg->obj, AL_OBJ_TYPE_H_SA_REG ); + + status = init_al_obj( &h_sa_reg->obj, p_reg_svc_req->svc_context, TRUE, + __destroying_sa_reg, NULL, __free_sa_reg ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj returned %s.\n", ib_get_err_str( status )) ); + __free_sa_reg( &h_sa_reg->obj ); + return status; + } + + /* Track the registered service with the AL instance. */ + status = attach_al_obj( &h_al->obj, &h_sa_reg->obj ); + if( status != IB_SUCCESS ) + { + h_sa_reg->obj.pfn_destroy( &h_sa_reg->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + /* Store the port GUID on which to issue the request. */ + h_sa_reg->port_guid = p_reg_svc_req->port_guid; + + /* Copy the service registration information. */ + h_sa_reg->sa_req.user_context = p_reg_svc_req->svc_context; + h_sa_reg->pfn_reg_svc_cb = p_reg_svc_req->pfn_reg_svc_cb; + h_sa_reg->svc_rec = p_reg_svc_req->svc_rec; + + h_sa_reg->state = SA_REG_STARTING; + + /* Issue the MAD to the SA. */ + status = sa_reg_svc( h_sa_reg, p_reg_svc_req ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("sa_reg_svc failed: %s\n", ib_get_err_str(status) ) ); + h_sa_reg->state = SA_REG_ERROR; + + h_sa_reg->obj.pfn_destroy( &h_sa_reg->obj, NULL ); + } + else + { + *ph_reg_svc = h_sa_reg; + } + + AL_EXIT( AL_DBG_SA_REQ ); + return status; +} + + +ib_api_status_t +ib_dereg_svc( + IN const ib_reg_svc_handle_t h_reg_svc, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_SA_REQ ); + + if( AL_OBJ_INVALID_HANDLE( h_reg_svc, AL_OBJ_TYPE_H_SA_REG) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + ref_al_obj( &h_reg_svc->obj ); + h_reg_svc->obj.pfn_destroy( &h_reg_svc->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_SA_REQ ); + return IB_SUCCESS; +} diff --git a/branches/Ndi/core/al/al_reg_svc.h b/branches/Ndi/core/al/al_reg_svc.h new file mode 100644 index 00000000..56f6dbc5 --- /dev/null +++ b/branches/Ndi/core/al/al_reg_svc.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_REG_SVC_H__) +#define __AL_REG_SVC_H__ + +#include + +#include "al_common.h" +#include "al_query.h" + + + +typedef struct _al_reg_svc +{ + al_obj_t obj; + + al_sa_req_t sa_req; + + /* Status of the registration request. */ + ib_api_status_t req_status; + /* Additional status information returned in the registration response. */ + ib_net16_t resp_status; + + al_sa_reg_state_t state; + ib_pfn_reg_svc_cb_t pfn_reg_svc_cb; + + /* Store service record to report to SA later. */ + ib_service_record_t svc_rec; + ib_net64_t port_guid; + +} al_reg_svc_t; + + + +#endif /* __AL_REG_SVC_H__ */ diff --git a/branches/Ndi/core/al/al_res_mgr.c b/branches/Ndi/core/al/al_res_mgr.c new file mode 100644 index 00000000..d6902d7b --- /dev/null +++ b/branches/Ndi/core/al/al_res_mgr.c @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "al_mgr.h" +#include "ib_common.h" +#include "al_res_mgr.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_res_mgr.tmh" +#endif + + +#define AL_MR_POOL_SIZE (4096 / sizeof( ib_mr_t )) +#define AL_AV_POOL_SIZE (4096 / sizeof( ib_av_t )) +#ifdef CL_KERNEL +#define AL_FMR_POOL_SIZE (4096 / sizeof( mlnx_fmr_t )) +#endif + +al_res_mgr_t *gp_res_mgr; + + +void +free_res_mgr( + IN al_obj_t *p_obj ); + + + +ib_api_status_t +create_res_mgr( + IN al_obj_t *p_parent_obj ) +{ + ib_api_status_t status; + cl_status_t cl_status; + + gp_res_mgr = (al_res_mgr_t*)cl_zalloc( sizeof( al_res_mgr_t ) ); + if( !gp_res_mgr ) + { + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the resource manager. */ + cl_qpool_construct( &gp_res_mgr->av_pool ); + cl_qpool_construct( &gp_res_mgr->mr_pool ); +#ifdef CL_KERNEL + cl_qpool_construct( &gp_res_mgr->fmr_pool ); +#endif + + construct_al_obj( &gp_res_mgr->obj, AL_OBJ_TYPE_RES_MGR ); + status = init_al_obj( &gp_res_mgr->obj, gp_res_mgr, TRUE, + NULL, NULL, free_res_mgr ); + if( status != IB_SUCCESS ) + { + free_res_mgr( &gp_res_mgr->obj ); + return status; + } + + status = attach_al_obj( p_parent_obj, &gp_res_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_res_mgr->obj.pfn_destroy( &gp_res_mgr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Initialize the pool of address vectors. */ + cl_status = cl_qpool_init( &gp_res_mgr->av_pool, + AL_AV_POOL_SIZE, 0, AL_AV_POOL_SIZE, sizeof( ib_av_t ), + av_ctor, av_dtor, gp_res_mgr ); + if( cl_status != CL_SUCCESS ) + { + gp_res_mgr->obj.pfn_destroy( &gp_res_mgr->obj, NULL ); + return ib_convert_cl_status( cl_status ); + } + + /* Initialize the pool of memory regions. */ + cl_status = cl_qpool_init( &gp_res_mgr->mr_pool, + AL_MR_POOL_SIZE, 0, AL_MR_POOL_SIZE, sizeof( ib_mr_t ), + mr_ctor, mr_dtor, gp_res_mgr ); + if( cl_status != CL_SUCCESS ) + { + gp_res_mgr->obj.pfn_destroy( &gp_res_mgr->obj, NULL ); + return ib_convert_cl_status( cl_status ); + } + +#ifdef CL_KERNEL + /* Initialize the pool of fast memory regions. */ + cl_status = cl_qpool_init( &gp_res_mgr->fmr_pool, + AL_FMR_POOL_SIZE, 0, AL_FMR_POOL_SIZE, sizeof(mlnx_fmr_t), + mlnx_fmr_ctor, mlnx_fmr_dtor, gp_res_mgr ); + if( cl_status != CL_SUCCESS ) + { + gp_res_mgr->obj.pfn_destroy( &gp_res_mgr->obj, NULL ); + return ib_convert_cl_status( cl_status ); + } +#endif + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &gp_res_mgr->obj ); + + return IB_SUCCESS; +} + + + +/* + * Destroy the resource manager. + */ +void +free_res_mgr( + IN al_obj_t *p_obj ) +{ + CL_ASSERT( p_obj == &gp_res_mgr->obj ); + + cl_qpool_destroy( &gp_res_mgr->av_pool ); + cl_qpool_destroy( &gp_res_mgr->mr_pool ); +#ifdef CL_KERNEL + cl_qpool_destroy( &gp_res_mgr->fmr_pool ); +#endif + + destroy_al_obj( p_obj ); + cl_free ( gp_res_mgr ); + gp_res_mgr = NULL; +} + + + +/* + * Get a memory region structure to track registration requests. + */ +ib_mr_handle_t +alloc_mr() +{ + al_obj_t *p_obj; + cl_pool_item_t *p_pool_item; + + cl_spinlock_acquire( &gp_res_mgr->obj.lock ); + p_pool_item = cl_qpool_get( &gp_res_mgr->mr_pool ); + cl_spinlock_release( &gp_res_mgr->obj.lock ); + + if( !p_pool_item ) + return NULL; + + ref_al_obj( &gp_res_mgr->obj ); + p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item ); + + /* + * Hold an extra reference on the object until creation is complete. + * This prevents a client's destruction of the object during asynchronous + * event callback processing from deallocating the object before the + * creation is complete. + */ + ref_al_obj( p_obj ); + + return PARENT_STRUCT( p_obj, ib_mr_t, obj ); +} + + + +/* + * Return a memory region structure to the available pool. + */ +void +put_mr( + IN ib_mr_handle_t h_mr ) +{ + cl_spinlock_acquire( &gp_res_mgr->obj.lock ); + cl_qpool_put( &gp_res_mgr->mr_pool, &h_mr->obj.pool_item ); + cl_spinlock_release( &gp_res_mgr->obj.lock ); + deref_al_obj( &gp_res_mgr->obj ); +} + + +#ifdef CL_KERNEL +/* + * Get a fast memory region structure to track registration requests. + */ +mlnx_fmr_handle_t +alloc_mlnx_fmr() +{ + al_obj_t *p_obj; + cl_pool_item_t *p_pool_item; + + cl_spinlock_acquire( &gp_res_mgr->obj.lock ); + p_pool_item = cl_qpool_get( &gp_res_mgr->fmr_pool ); + cl_spinlock_release( &gp_res_mgr->obj.lock ); + + if( !p_pool_item ) + return NULL; + + ref_al_obj( &gp_res_mgr->obj ); + p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item ); + + /* + * Hold an extra reference on the object until creation is complete. + * This prevents a client's destruction of the object during asynchronous + * event callback processing from deallocating the object before the + * creation is complete. + */ + ref_al_obj( p_obj ); + + return PARENT_STRUCT( p_obj, mlnx_fmr_t, obj ); +} + + + +/* + * Return a memory region structure to the available pool. + */ +void +put_mlnx_fmr( + IN mlnx_fmr_handle_t h_fmr ) +{ + cl_spinlock_acquire( &gp_res_mgr->obj.lock ); + cl_qpool_put( &gp_res_mgr->fmr_pool, &h_fmr->obj.pool_item ); + cl_spinlock_release( &gp_res_mgr->obj.lock ); + deref_al_obj( &gp_res_mgr->obj ); +} +#endif + + +/* + * Get an address vector from the available pool. + */ +ib_av_handle_t +alloc_av() +{ + al_obj_t *p_obj; + cl_pool_item_t *p_pool_item; + + cl_spinlock_acquire( &gp_res_mgr->obj.lock ); + p_pool_item = cl_qpool_get( &gp_res_mgr->av_pool ); + cl_spinlock_release( &gp_res_mgr->obj.lock ); + + if( !p_pool_item ) + return NULL; + + ref_al_obj( &gp_res_mgr->obj ); + p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item ); + + /* + * Hold an extra reference on the object until creation is complete. + * This prevents a client's destruction of the object during asynchronous + * event callback processing from deallocating the object before the + * creation is complete. + */ + ref_al_obj( p_obj ); + return PARENT_STRUCT( p_obj, ib_av_t, obj ); +} + + + +/* + * Return an address vector to the available pool. + */ +void +put_av( + IN ib_av_handle_t h_av ) +{ + cl_spinlock_acquire( &gp_res_mgr->obj.lock ); + cl_qpool_put( &gp_res_mgr->av_pool, &h_av->obj.pool_item ); + cl_spinlock_release( &gp_res_mgr->obj.lock ); + deref_al_obj( &gp_res_mgr->obj ); +} diff --git a/branches/Ndi/core/al/al_res_mgr.h b/branches/Ndi/core/al/al_res_mgr.h new file mode 100644 index 00000000..0ceac0f3 --- /dev/null +++ b/branches/Ndi/core/al/al_res_mgr.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_RES_MGR_H__) +#define __AL_RES_MGR_H__ + + +#include + +#include "al_av.h" +#include "al_mr.h" +#include "al_qp.h" +#include "al_mad.h" + +#include +#include +#include +#include + + + +typedef struct _al_res_mgr +{ + al_obj_t obj; + + cl_qpool_t mr_pool; + cl_qpool_t av_pool; +#ifdef CL_KERNEL + cl_qpool_t fmr_pool; +#endif + +} al_res_mgr_t; + + + +ib_api_status_t +create_res_mgr( + IN al_obj_t *p_parent_obj ); + + +ib_mr_handle_t +alloc_mr(void); + + +void +put_mr( + IN ib_mr_handle_t h_mr ); + +#ifdef CL_KERNEL +mlnx_fmr_handle_t +alloc_mlnx_fmr(void); + + +void +put_mlnx_fmr( + IN mlnx_fmr_handle_t h_fmr ); +#endif + +ib_av_handle_t +alloc_av(void); + + +void +put_av( + IN ib_av_handle_t h_av ); + + +#endif /* __AL_RES_MGR_H__ */ diff --git a/branches/Ndi/core/al/al_srq.c b/branches/Ndi/core/al/al_srq.c new file mode 100644 index 00000000..94cbcfe1 --- /dev/null +++ b/branches/Ndi/core/al/al_srq.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: al_qp.c 1611 2006-08-20 14:48:55Z leonid $ + */ + +#include +#include +#include + +#include "al.h" +#include "al_ca.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_srq.tmh" +#endif +#include "al_mgr.h" +#include "al_mr.h" +#include "al_pd.h" +#include "al_srq.h" +#include "al_verbs.h" + +#include "ib_common.h" + +/* + * Function prototypes. + */ +void +destroying_srq( + IN struct _al_obj *p_obj ); + +void +cleanup_srq( + IN al_obj_t *p_obj ); + +void +free_srq( + IN al_obj_t *p_obj ); + + +ib_destroy_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_SRQ_HANDLE; + } + + /* Don't destroy while there are bound QPs. */ + cl_spinlock_acquire( &h_srq->obj.lock ); + if (!cl_is_qlist_empty( &h_srq->qp_list )) + { + cl_spinlock_release( &h_srq->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_RESOURCE_BUSY\n") ); + return IB_RESOURCE_BUSY; + } + cl_spinlock_release( &h_srq->obj.lock ); + + ref_al_obj( &h_srq->obj ); + h_srq->obj.pfn_destroy( &h_srq->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_SRQ ); + return IB_SUCCESS; +} + + +void +destroying_srq( + IN struct _al_obj *p_obj ) +{ + ib_srq_handle_t h_srq; + cl_list_item_t *p_item; + cl_obj_rel_t *p_rel; + ib_qp_handle_t h_qp; + + CL_ASSERT( p_obj ); + h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj ); + + /* Initiate destruction of all bound QPs. */ + cl_spinlock_acquire( &h_srq->obj.lock ); + for( p_item = cl_qlist_remove_tail( &h_srq->qp_list ); + p_item != cl_qlist_end( &h_srq->qp_list ); + p_item = cl_qlist_remove_tail( &h_srq->qp_list ) ) + { + p_rel = PARENT_STRUCT( p_item, cl_obj_rel_t, pool_item.list_item ); + p_rel->p_parent_obj = NULL; + h_qp = (ib_qp_handle_t)p_rel->p_child_obj; + if( h_qp ) + { + /* Take a reference to prevent the QP from being destroyed. */ + ref_al_obj( &h_qp->obj ); + cl_spinlock_release( &h_srq->obj.lock ); + h_qp->obj.pfn_destroy( &h_qp->obj, NULL ); + cl_spinlock_acquire( &h_srq->obj.lock ); + } + } + cl_spinlock_release( &h_srq->obj.lock ); +} + +void +cleanup_srq( + IN struct _al_obj *p_obj ) +{ + ib_srq_handle_t h_srq; + ib_api_status_t status; + + CL_ASSERT( p_obj ); + h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj ); + + /* Deallocate the CI srq. */ + if( verbs_check_srq( h_srq ) ) + { + status = verbs_destroy_srq( h_srq ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + +/* + * Release all resources associated with the completion queue. + */ +void +free_srq( + IN al_obj_t *p_obj ) +{ + ib_srq_handle_t h_srq; + + CL_ASSERT( p_obj ); + h_srq = PARENT_STRUCT( p_obj, ib_srq_t, obj ); + + destroy_al_obj( &h_srq->obj ); + cl_free( h_srq ); +} + + +void +srq_attach_qp( + IN const ib_srq_handle_t h_srq, + IN cl_obj_rel_t* const p_qp_rel ) +{ + p_qp_rel->p_parent_obj = (cl_obj_t*)h_srq; + ref_al_obj( &h_srq->obj ); + cl_spinlock_acquire( &h_srq->obj.lock ); + cl_qlist_insert_tail( &h_srq->qp_list, &p_qp_rel->pool_item.list_item ); + cl_spinlock_release( &h_srq->obj.lock ); +} + + +void +srq_detach_qp( + IN const ib_srq_handle_t h_srq, + IN cl_obj_rel_t* const p_qp_rel ) +{ + if( p_qp_rel->p_parent_obj ) + { + CL_ASSERT( p_qp_rel->p_parent_obj == (cl_obj_t*)h_srq ); + p_qp_rel->p_parent_obj = NULL; + cl_spinlock_acquire( &h_srq->obj.lock ); + cl_qlist_remove_item( &h_srq->qp_list, &p_qp_rel->pool_item.list_item ); + cl_spinlock_release( &h_srq->obj.lock ); + } +} + + +ib_api_status_t +ib_modify_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask ) +{ + return modify_srq( h_srq, p_srq_attr, srq_attr_mask, NULL ); +} + + +ib_api_status_t +modify_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_SRQ_HANDLE; + } + + if( !p_srq_attr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + if( !( srq_attr_mask & (IB_SRQ_MAX_WR |IB_SRQ_LIMIT)) || + ( srq_attr_mask & ~(IB_SRQ_MAX_WR |IB_SRQ_LIMIT))) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_SETTING; + } + + if((srq_attr_mask & IB_SRQ_LIMIT) && h_srq->obj.p_ci_ca && h_srq->obj.p_ci_ca->p_pnp_attr ) + { + if (p_srq_attr->srq_limit > h_srq->obj.p_ci_ca->p_pnp_attr->max_srq_wrs) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") ); + return IB_INVALID_SETTING; + } + } + + if((srq_attr_mask & IB_SRQ_MAX_WR) && !p_srq_attr->max_wr) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SETTING\n") ); + return IB_INVALID_SETTING; + } + + if ((srq_attr_mask & IB_SRQ_MAX_WR) && h_srq->obj.p_ci_ca && h_srq->obj.p_ci_ca->p_pnp_attr) + { + if (p_srq_attr->max_wr > h_srq->obj.p_ci_ca->p_pnp_attr->max_srq_wrs) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_MAX_WRS\n") ); + return IB_INVALID_MAX_WRS; + } + } + + status = verbs_modify_srq( h_srq, p_srq_attr, srq_attr_mask ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + + +ib_api_status_t +ib_query_srq( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr ) +{ + return query_srq( h_srq, p_srq_attr, NULL ); +} + + + +ib_api_status_t +query_srq( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_SRQ_HANDLE; + } + if( !p_srq_attr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = verbs_query_srq( h_srq, p_srq_attr ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + +/* + * Initializes the QP information structure. + */ +ib_api_status_t +create_srq( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t pfn_srq_event_cb, + OUT ib_srq_handle_t* const ph_srq, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_srq_handle_t h_srq; + ib_api_status_t status; + al_obj_type_t obj_type = AL_OBJ_TYPE_H_SRQ; + + h_srq = cl_zalloc( sizeof( ib_srq_t ) ); + if( !h_srq ) + { + return IB_INSUFFICIENT_MEMORY; + } + + if( p_umv_buf ) + obj_type |= AL_OBJ_SUBTYPE_UM_EXPORT; + + /* Construct the SRQ. */ + construct_al_obj( &h_srq->obj, obj_type ); + + cl_qlist_init( &h_srq->qp_list ); + h_srq->pfn_event_cb = pfn_srq_event_cb; + + /* Initialize the SRQ. */ + status = init_al_obj( &h_srq->obj, srq_context, TRUE, + destroying_srq, cleanup_srq, free_srq ); + if( status != IB_SUCCESS ) + { + free_srq( &h_srq->obj ); + return status; + } + status = attach_al_obj( &h_pd->obj, &h_srq->obj ); + if( status != IB_SUCCESS ) + { + h_srq->obj.pfn_destroy( &h_srq->obj, NULL ); + return status; + } + + status = verbs_create_srq( h_pd, h_srq, p_srq_attr, p_umv_buf ); + if( status != IB_SUCCESS ) + { + h_srq->obj.pfn_destroy( &h_srq->obj, NULL ); + return status; + } + + *ph_srq = h_srq; + + /* + * Note that we don't release the reference taken in init_al_obj here. + * For kernel clients, it is release in ib_create_srq. For user-mode + * clients is released by the proxy after the handle is extracted. + */ + return IB_SUCCESS; +} + + +/* + * Process an asynchronous event on the QP. Notify the user of the event. + */ +void +srq_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ) +{ + ib_srq_handle_t h_srq; + + CL_ASSERT( p_event_rec ); + h_srq = (ib_srq_handle_t)p_event_rec->context; + +#if defined(CL_KERNEL) + switch( p_event_rec->code ) + { + case IB_AE_SRQ_LIMIT_REACHED: + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_SRQ, + ("IB_AE_SRQ_LIMIT_REACHED for srq %p \n", h_srq) ); + //TODO: handle this error. + break; + case IB_AE_SRQ_CATAS_ERROR: + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_SRQ, + ("IB_AE_SRQ_CATAS_ERROR for srq %p \n", h_srq) ); + //TODO: handle this error. + break; + default: + break; + } +#endif + + p_event_rec->context = (void*)h_srq->obj.context; + p_event_rec->handle.h_srq = h_srq; + + if( h_srq->pfn_event_cb ) + h_srq->pfn_event_cb( p_event_rec ); +} + +ib_api_status_t +ib_post_srq_recv( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + if( AL_OBJ_INVALID_HANDLE( h_srq, AL_OBJ_TYPE_H_SRQ ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + return IB_INVALID_QP_HANDLE; + } + if( !p_recv_wr || ( p_recv_wr->p_next && !pp_recv_failure ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = + h_srq->pfn_post_srq_recv( h_srq->h_recv_srq, p_recv_wr, pp_recv_failure ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + + diff --git a/branches/Ndi/core/al/al_srq.h b/branches/Ndi/core/al/al_srq.h new file mode 100644 index 00000000..28ad8187 --- /dev/null +++ b/branches/Ndi/core/al/al_srq.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: al_srq.h 1611 2006-08-20 14:48:55Z leonid $ + */ + +#if !defined(__AL_SRQ_H__) +#define __AL_SRQ_H__ + +#include +#include +#include +#include + +#include "al_ca.h" +#include "al_common.h" + + +typedef ib_api_status_t +(*ib_pfn_post_srq_recv_t)( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + IN ib_recv_wr_t **p_recv_failure OPTIONAL ); + + +/* + * Shared queue pair information required by the access layer. This structure + * is referenced by a user's SRQ handle. + */ +typedef struct _ib_srq +{ + al_obj_t obj; /* Must be first. */ + + ib_srq_handle_t h_ci_srq; /* kernel SRQ handle */ + ib_pfn_post_srq_recv_t pfn_post_srq_recv; /* post_srq_recv call */ + ib_srq_handle_t h_recv_srq; /* srq handle for the post_srq_recv call */ + ib_pfn_event_cb_t pfn_event_cb; /* user async event handler */ + cl_qlist_t qp_list; /* List of QPs bound to this CQ. */ + +} ib_srq_t; + +ib_api_status_t +create_srq( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t pfn_srq_event_cb, + OUT ib_srq_handle_t* const ph_srq, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +query_srq( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +ib_api_status_t +modify_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t* const p_umv_buf ); + + +void +srq_async_event_cb( + IN ib_async_event_rec_t* const p_event_rec ); + +void +srq_attach_qp( + IN const ib_srq_handle_t h_srq, + IN cl_obj_rel_t* const p_qp_rel ); + +void +srq_detach_qp( + IN const ib_srq_handle_t h_srq, + IN cl_obj_rel_t* const p_qp_rel ); + +#endif /* __AL_QP_H__ */ + diff --git a/branches/Ndi/core/al/al_sub.c b/branches/Ndi/core/al/al_sub.c new file mode 100644 index 00000000..edc2b71c --- /dev/null +++ b/branches/Ndi/core/al/al_sub.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "al.h" +#include "al_common.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_sub.tmh" +#endif +#include "al_sub.h" + + +ib_api_status_t +ib_subscribe( + IN const ib_al_handle_t h_al, + IN const ib_sub_req_t* const p_sub_req, + OUT ib_sub_handle_t* const ph_sub ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SUB ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_sub_req || !ph_sub ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = IB_UNSUPPORTED; + + AL_EXIT( AL_DBG_SUB ); + return status; +} + + +ib_api_status_t +ib_unsubscribe( + IN const ib_sub_handle_t h_sub, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_SUB ); + + if( !h_sub ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + UNUSED_PARAM( pfn_destroy_cb ); + + AL_EXIT( AL_DBG_SUB ); + return IB_UNSUPPORTED; +} diff --git a/branches/Ndi/core/al/al_sub.h b/branches/Ndi/core/al/al_sub.h new file mode 100644 index 00000000..fbfe422a --- /dev/null +++ b/branches/Ndi/core/al/al_sub.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_SUB_H__) +#define __AL_SUB_H__ + +#endif /* __AL_SUB_H__ */ diff --git a/branches/Ndi/core/al/al_verbs.h b/branches/Ndi/core/al/al_verbs.h new file mode 100644 index 00000000..dec7a33f --- /dev/null +++ b/branches/Ndi/core/al/al_verbs.h @@ -0,0 +1,636 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__AL_VERBS_H__) +#define __AL_VERBS_H__ + +#include "al_ca.h" +#include "al_cq.h" +#include "al_pd.h" +#include "al_qp.h" +#include "al_srq.h" + +#ifndef CL_KERNEL +#include "ual_mad.h" +#include "ual_qp.h" +#include "ual_mcast.h" +#endif + +#ifdef CL_KERNEL + + /* Macros for kernel-mode only */ +#define verbs_create_av(h_pd, p_av_attr, h_av) \ + h_av->obj.p_ci_ca->verbs.create_av( h_pd->h_ci_pd,\ + p_av_attr, &h_av->h_ci_av, p_umv_buf ) + +#define verbs_check_av(h_av) ((h_av)->h_ci_av) +#define convert_av_handle(h_qp, h_av) ((h_av)->h_ci_av) +#define verbs_destroy_av(h_av) \ + h_av->obj.p_ci_ca->verbs.destroy_av( h_av->h_ci_av ) + +#define verbs_query_av(h_av, p_av_attr, ph_pd) \ + h_av->obj.p_ci_ca->verbs.query_av( h_av->h_ci_av,\ + p_av_attr, ph_pd, p_umv_buf ) + +#define verbs_modify_av(h_av, p_av_mod) \ + h_av->obj.p_ci_ca->verbs.modify_av( h_av->h_ci_av, p_av_mod, p_umv_buf ) + +#define verbs_query_ca(h_ca, p_ca_attr, p_size) \ + h_ca->obj.p_ci_ca->verbs.query_ca( h_ca->obj.p_ci_ca->h_ci_ca,\ + p_ca_attr, p_size, p_umv_buf ) + +#define verbs_modify_ca(h_ca, port_num, ca_mod, p_port_attr_mod) \ + h_ca->obj.p_ci_ca->verbs.modify_ca( h_ca->obj.p_ci_ca->h_ci_ca,\ + port_num, ca_mod, p_port_attr_mod ) + +static inline ib_api_status_t +verbs_create_cq( + IN const ib_ca_handle_t h_ca, + IN OUT ib_cq_create_t* const p_cq_create, + IN ib_cq_handle_t h_cq, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + return h_ca->obj.p_ci_ca->verbs.create_cq( + (p_umv_buf) ? h_ca->h_um_ca : h_ca->obj.p_ci_ca->h_ci_ca, + h_cq, &p_cq_create->size, &h_cq->h_ci_cq, p_umv_buf ); +} + +#define verbs_check_cq(h_cq) ((h_cq)->h_ci_cq) +#define verbs_destroy_cq(h_cq) \ + h_cq->obj.p_ci_ca->verbs.destroy_cq( h_cq->h_ci_cq ) + +#define verbs_modify_cq(h_cq, p_size) \ + h_cq->obj.p_ci_ca->verbs.resize_cq( h_cq->h_ci_cq, p_size, p_umv_buf ) + +#define verbs_query_cq(h_cq, p_size) \ + h_cq->obj.p_ci_ca->verbs.query_cq( h_cq->h_ci_cq, p_size, p_umv_buf ) + +#define verbs_peek_cq(h_cq, p_n_cqes) \ + ( ( h_cq->obj.p_ci_ca->verbs.peek_cq ) ? \ + h_cq->obj.p_ci_ca->verbs.peek_cq( h_cq->h_ci_cq, p_n_cqes) : \ + IB_UNSUPPORTED ) + +#define verbs_poll_cq(h_cq, pp_free_wclist, pp_done_wclist) \ + h_cq->obj.p_ci_ca->verbs.poll_cq( h_cq->h_ci_cq,\ + pp_free_wclist, pp_done_wclist ) + +#define verbs_rearm_cq(h_cq, solicited) \ + h_cq->obj.p_ci_ca->verbs.enable_cq_notify( h_cq->h_ci_cq,\ + solicited ) + +#define verbs_rearm_n_cq(h_cq, n_cqes) \ + ( ( h_cq->obj.p_ci_ca->verbs.enable_ncomp_cq_notify ) ? \ + h_cq->obj.p_ci_ca->verbs.enable_ncomp_cq_notify(h_cq->h_ci_cq,n_cqes): \ + IB_UNSUPPORTED ) + +#define verbs_register_mr(h_pd, p_mr_create, p_lkey, p_rkey, h_mr) \ + h_mr->obj.p_ci_ca->verbs.register_mr( h_pd->h_ci_pd,\ + p_mr_create, p_lkey, p_rkey, &h_mr->h_ci_mr, um_call ) + +#define verbs_register_pmr(h_pd, p_phys_create, p_vaddr,\ + p_lkey, p_rkey, h_mr) \ + h_mr->obj.p_ci_ca->verbs.register_pmr( h_pd->h_ci_pd,\ + p_phys_create, p_vaddr, p_lkey, p_rkey, &h_mr->h_ci_mr, FALSE ) + +#define verbs_check_mr(h_mr) ((h_mr)->h_ci_mr) +#define verbs_check_mlnx_fmr(h_fmr) ((h_fmr)->h_ci_fmr) +#define verbs_deregister_mr(h_mr) \ + h_mr->obj.p_ci_ca->verbs.deregister_mr( h_mr->h_ci_mr ) + +/* + * Remove this registration from the shmid's list to prevent any + * new registrations from accessing it once it is deregistered. + */ +#define verbs_release_shmid(h_mr) \ + if( h_mr->p_shmid ) \ + { \ + cl_spinlock_acquire( &h_mr->p_shmid->obj.lock ); \ + cl_list_remove_object( &h_mr->p_shmid->mr_list, h_mr ); \ + cl_spinlock_release( &h_mr->p_shmid->obj.lock ); \ + release_shmid( h_mr->p_shmid ); \ + h_mr->p_shmid = NULL; \ + } +#define verbs_query_mr(h_mr, p_mr_attr) \ + h_mr->obj.p_ci_ca->verbs.query_mr( h_mr->h_ci_mr, p_mr_attr ) + +#define verbs_modify_mr(h_mr, mr_modify_mask, p_mr_create, \ + p_lkey, p_rkey, h_pd ) \ + h_mr->obj.p_ci_ca->verbs.modify_mr( h_mr->h_ci_mr, mr_modify_mask, \ + p_mr_create, p_lkey, p_rkey, h_pd ? h_pd->h_ci_pd : NULL, \ + um_call ) + +#define verbs_modify_pmr(h_mr, mr_modify_mask, p_pmr_create, \ + p_vaddr, p_lkey, p_rkey, h_pd ) \ + h_mr->obj.p_ci_ca->verbs.modify_pmr( h_mr->h_ci_mr, mr_modify_mask, \ + p_pmr_create, p_vaddr, p_lkey, p_rkey, \ + h_pd ? h_pd->h_ci_pd : NULL, FALSE ) + +#define verbs_register_smr(h_mr, h_pd, access_ctrl, p_vaddr, p_lkey, \ + p_rkey, ph_mr ) \ + h_mr->obj.p_ci_ca->verbs.register_smr( h_mr->h_ci_mr, h_pd->h_ci_pd,\ + access_ctrl, p_vaddr, p_lkey, p_rkey, &(ph_mr->h_ci_mr), \ + um_call ) + +#define verbs_create_mlnx_fmr(h_pd, p_fmr_create, h_fmr ) \ + h_fmr->obj.p_ci_ca->verbs.alloc_mlnx_fmr( h_pd->h_ci_pd,\ + p_fmr_create, &h_fmr->h_ci_fmr ) + +#define verbs_map_phys_mlnx_fmr( h_fmr, plist_addr, list_len, p_vaddr, p_lkey, p_rkey) \ + h_fmr->obj.p_ci_ca->verbs.map_phys_mlnx_fmr( h_fmr->h_ci_fmr,\ + plist_addr, list_len, p_vaddr, p_lkey, p_rkey ) + +#define verbs_unmap_mlnx_fmr( h_fmr, p_fmr_array ) \ + h_fmr->obj.p_ci_ca->verbs.unmap_mlnx_fmr( p_fmr_array) + +#define verbs_destroy_mlnx_fmr( h_fmr ) \ + h_fmr->obj.p_ci_ca->verbs.dealloc_mlnx_fmr( h_fmr->h_ci_fmr ) + + +#define verbs_create_mw(h_pd, p_rkey, h_mw) \ + h_mw->obj.p_ci_ca->verbs.create_mw( h_pd->h_ci_pd,\ + p_rkey, &h_mw->h_ci_mw, p_umv_buf ) + +#define verbs_check_mw(h_mw) ((h_mw)->h_ci_mw) +#define verbs_destroy_mw(h_mw) \ + h_mw->obj.p_ci_ca->verbs.destroy_mw( h_mw->h_ci_mw ) + +#define verbs_query_mw(h_mw, ph_pd, p_rkey) \ + h_mw->obj.p_ci_ca->verbs.query_mw(\ + h_mw->h_ci_mw, ph_pd, p_rkey, p_umv_buf ) + +#define convert_mr_handle(h_mr) ((h_mr)->h_ci_mr) + +#define verbs_bind_mw(h_mw, h_qp, p_mw_bind, p_rkey) \ + h_qp->obj.p_ci_ca->verbs.bind_mw( h_mw->h_ci_mw,\ + h_qp->h_ci_qp, p_mw_bind, p_rkey ) + +static inline ib_api_status_t +verbs_allocate_pd( + IN const ib_ca_handle_t h_ca, + IN ib_pd_handle_t h_pd, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + return h_ca->obj.p_ci_ca->verbs.allocate_pd( + (p_umv_buf) ? h_ca->h_um_ca : h_ca->obj.p_ci_ca->h_ci_ca, + h_pd->type, &h_pd->h_ci_pd, p_umv_buf ); +} + +/* + * Reference the hardware PD. + */ +static inline ib_api_status_t +allocate_pd_alias( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_handle_t h_pd ) +{ + UNUSED_PARAM( h_ca ); + h_pd->h_ci_pd = h_pd->obj.p_ci_ca->h_pd->h_ci_pd; + ref_al_obj( &h_pd->obj.p_ci_ca->h_pd->obj ); + return IB_SUCCESS; +} + +static inline void +deallocate_pd_alias( + IN const ib_pd_handle_t h_pd ) +{ + deref_al_obj( &h_pd->obj.p_ci_ca->h_pd->obj ); +} + + + +#define verbs_check_pd(h_pd) ((h_pd)->h_ci_pd) +#define verbs_deallocate_pd(h_pd) \ + h_pd->obj.p_ci_ca->verbs.deallocate_pd( h_pd->h_ci_pd ) + +static inline ib_api_status_t +verbs_create_srq( + IN const ib_pd_handle_t h_pd, + IN ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + status = h_srq->obj.p_ci_ca->verbs.create_srq( + h_pd->h_ci_pd, h_srq, p_srq_attr, + &h_srq->h_ci_srq, p_umv_buf ); + + h_srq->h_recv_srq = h_srq->h_ci_srq; + h_srq->pfn_post_srq_recv = h_srq->obj.p_ci_ca->verbs.post_srq_recv; + return status; +} + +#define verbs_check_srq(h_srq) ((h_srq)->h_ci_srq) + +#define verbs_destroy_srq(h_srq) \ + h_srq->obj.p_ci_ca->verbs.destroy_srq( h_srq->h_ci_srq ) + +#define verbs_query_srq(h_srq, p_srq_attr) \ + h_srq->obj.p_ci_ca->verbs.query_srq( h_srq->h_ci_srq,\ + p_srq_attr, p_umv_buf ) + +#define verbs_modify_srq(h_srq, p_srq_attr, srq_attr_mask) \ + h_srq->obj.p_ci_ca->verbs.modify_srq( h_srq->h_ci_srq,\ + p_srq_attr, srq_attr_mask, p_umv_buf ) + +#define verbs_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) \ + h_srq->obj.p_ci_ca->verbs.post_srq_recv( h_srq->h_ci_srq,\ + p_recv_wr, pp_recv_failure ) + +#define convert_qp_handle( qp_create ) {\ + CL_ASSERT( qp_create.h_rq_cq ); \ + qp_create.h_rq_cq = qp_create.h_rq_cq->h_ci_cq; \ + CL_ASSERT( qp_create.h_sq_cq ); \ + qp_create.h_sq_cq = qp_create.h_sq_cq->h_ci_cq; \ + if (qp_create.h_srq) \ + qp_create.h_srq = qp_create.h_srq->h_ci_srq; \ +} + +static inline ib_api_status_t +verbs_get_spl_qp( + IN ib_pd_handle_t h_pd, + IN uint8_t port_num, + IN ib_qp_handle_t h_qp, + IN ib_qp_create_t *p_qp_create, + IN ib_qp_attr_t *p_qp_attr ) +{ + ib_api_status_t status; + + status = h_qp->obj.p_ci_ca->verbs.create_spl_qp( + h_pd->h_ci_pd, port_num, h_qp, p_qp_create, + p_qp_attr, &h_qp->h_ci_qp ); + + h_qp->h_recv_qp = h_qp->h_ci_qp; + h_qp->h_send_qp = h_qp->h_ci_qp; + + h_qp->pfn_post_send = h_qp->obj.p_ci_ca->verbs.post_send; + h_qp->pfn_post_recv = h_qp->obj.p_ci_ca->verbs.post_recv; + return status; +} + + +static inline ib_api_status_t +verbs_create_qp( + IN ib_pd_handle_t h_pd, + IN ib_qp_handle_t h_qp, + IN ib_qp_create_t *p_qp_create, + IN ib_qp_attr_t *p_qp_attr, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + ib_api_status_t status; + + status = h_qp->obj.p_ci_ca->verbs.create_qp( + h_pd->h_ci_pd, h_qp, p_qp_create, p_qp_attr, + &h_qp->h_ci_qp, p_umv_buf ); + + h_qp->h_recv_qp = h_qp->h_ci_qp; + h_qp->h_send_qp = h_qp->h_ci_qp; + + h_qp->pfn_post_send = h_qp->obj.p_ci_ca->verbs.post_send; + h_qp->pfn_post_recv = h_qp->obj.p_ci_ca->verbs.post_recv; + return status; +} + +#define verbs_check_qp(h_qp) ((h_qp)->h_ci_qp) +#define verbs_destroy_qp(h_qp) \ + h_qp->obj.p_ci_ca->verbs.destroy_qp( h_qp->h_ci_qp, h_qp->timewait ) + +#define verbs_query_qp(h_qp, p_qp_attr) \ + h_qp->obj.p_ci_ca->verbs.query_qp( h_qp->h_ci_qp,\ + p_qp_attr, p_umv_buf ) + +#define verbs_modify_qp(h_qp, p_qp_mod, qp_attr) \ + h_qp->obj.p_ci_ca->verbs.modify_qp( h_qp->h_ci_qp,\ + p_qp_mod, &qp_attr, p_umv_buf ) + +#define verbs_post_send(h_qp, p_send_wr, pp_send_failure) \ + h_qp->obj.p_ci_ca->verbs.post_send( h_qp->h_ci_qp,\ + p_send_wr, pp_send_failure ) + +#define verbs_post_recv(h_qp, p_recv_wr, pp_recv_failure) \ + h_qp->obj.p_ci_ca->verbs.post_recv( h_qp->h_ci_qp,\ + p_recv_wr, pp_recv_failure ) + +#define verbs_local_mad(h_ca, port_num, p_src_av_attr, p_mad_in, p_mad_out) \ + h_ca->obj.p_ci_ca->verbs.local_mad( h_ca->obj.p_ci_ca->h_ci_ca,\ + port_num, p_src_av_attr, p_mad_in, p_mad_out) + +#define check_local_mad(h_qp) \ + (h_qp->obj.p_ci_ca->verbs.local_mad) + +#define init_alias_qp( h_qp, h_pd, port_guid, p_qp_create ) \ + init_qp_alias( h_qp, h_pd, port_guid, p_qp_create ) + +#define spl_qp_mad_send( h_mad_svc, p_mad_element_list, pp_mad_failure ) \ + IB_ERROR + +#define spl_qp_cancel_mad( h_mad_svc, h_mad_send ) \ + IB_ERROR + +#define create_reg_mad_pool( h_pool, h_pd, p_pool_key ) \ + IB_SUCCESS + +#define dereg_destroy_mad_pool( pool_key ) + +#define verbs_attach_mcast(h_mcast) \ + h_mcast->obj.p_ci_ca->verbs.attach_mcast( \ + ((ib_qp_handle_t)h_mcast->obj.p_parent_obj)->h_ci_qp, &h_mcast->member_rec.mgid, \ + h_mcast->member_rec.mlid, &h_mcast->h_ci_mcast, \ + NULL) + +#define verbs_detach_mcast(h_mcast) \ + h_mcast->obj.p_ci_ca->verbs.detach_mcast( \ + h_mcast->h_ci_mcast ) + +static inline ib_api_status_t +verbs_ci_call( + IN ib_ca_handle_t h_ca, + IN const void* __ptr64 * const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN ci_umv_buf_t* const p_umv_buf OPTIONAL ) +{ + return h_ca->obj.p_ci_ca->verbs.vendor_call( + p_umv_buf ? h_ca->h_um_ca : h_ca->obj.p_ci_ca->h_ci_ca, + handle_array, num_handles, p_ci_op, p_umv_buf ); +} + + +#else + + + + /* Macros for user-mode only */ +#define verbs_create_av(h_pd, p_av_attr, h_av) \ + (h_pd->type == IB_PDT_ALIAS) ?\ + ual_pd_alias_create_av(h_pd, p_av_attr, h_av):\ + ual_create_av(h_pd, p_av_attr, h_av); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_check_av(h_av) ((h_av)->h_ci_av || (h_av)->obj.hdl) +#define convert_av_handle(h_qp, h_av) \ + ((h_qp)->h_ci_qp?(h_av)->h_ci_av:(ib_av_handle_t)(h_av)->obj.hdl) +#define verbs_destroy_av(h_av) \ + ual_destroy_av(h_av) + +#define verbs_query_av(h_av, p_av_attr, ph_pd) \ + ual_query_av(h_av, p_av_attr, ph_pd); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_modify_av(h_av, p_av_mod) \ + ual_modify_av(h_av, p_av_mod); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_query_ca(h_ca, p_ca_attr, p_size) \ + ual_query_ca(h_ca, p_ca_attr, p_size); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_modify_ca(h_ca, port_num, ca_mod, p_port_attr_mod) \ + ual_modify_ca(h_ca, port_num, ca_mod, p_port_attr_mod) + +static inline ib_api_status_t +verbs_create_cq( + IN const ib_ca_handle_t h_ca, + IN OUT ib_cq_create_t* const p_cq_create, + IN ib_cq_handle_t h_cq, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + UNUSED_PARAM( p_umv_buf ); + return ual_create_cq( h_ca->obj.p_ci_ca, p_cq_create, h_cq ); +} + + +#define verbs_check_cq(h_cq) ((h_cq)->h_ci_cq || (h_cq)->obj.hdl) +#define verbs_destroy_cq(h_cq) \ + ual_destroy_cq(h_cq) + +#define verbs_modify_cq(h_cq, p_size) \ + ual_modify_cq(h_cq, p_size); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_query_cq(h_cq, p_size) \ + ual_query_cq(h_cq, p_size); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_peek_cq(h_cq, p_n_cqes) \ + h_cq->pfn_peek( h_cq->h_peek_cq, p_n_cqes ) + +#define verbs_poll_cq(h_cq, pp_free_wclist, pp_done_wclist) \ + h_cq->pfn_poll( h_cq->h_poll_cq, pp_free_wclist, pp_done_wclist ) + +#define verbs_rearm_cq(h_cq, solicited) \ + h_cq->pfn_rearm( h_cq->h_rearm_cq, solicited ) + +#define verbs_rearm_n_cq(h_cq, n_cqes) \ + h_cq->pfn_rearm_n( h_cq->h_rearm_n_cq, n_cqes ) + +#define verbs_register_mr(h_pd, p_mr_create, p_lkey, p_rkey, h_mr) \ + ual_reg_mem(h_pd, p_mr_create, p_lkey, p_rkey, h_mr); \ + UNUSED_PARAM( um_call ) + +#define verbs_register_pmr(h_pd, p_phys_create, p_vaddr, p_lkey, p_rkey, h_mr) \ + IB_UNSUPPORTED; \ + UNUSED_PARAM( h_pd ); \ + UNUSED_PARAM( p_phys_create ); \ + UNUSED_PARAM( p_vaddr ); \ + UNUSED_PARAM( p_lkey ); \ + UNUSED_PARAM( p_rkey ); \ + UNUSED_PARAM( h_mr ) + +#define verbs_check_mr(h_mr) ((h_mr)->h_ci_mr || (h_mr)->obj.hdl) +#define verbs_deregister_mr(h_mr) \ + ual_dereg_mr(h_mr) + +/* For user-mode, this is nop */ +#define verbs_release_shmid(h_mr) + +#define verbs_query_mr(h_mr, p_mr_attr) \ + ual_query_mr(h_mr, p_mr_attr) + +#define verbs_modify_mr(h_mr, mr_modify_mask, p_mr_create, \ + p_lkey, p_rkey, h_pd ) \ + ual_modify_mr( h_mr, mr_modify_mask, p_mr_create, \ + p_lkey, p_rkey, h_pd ); \ + UNUSED_PARAM( um_call ) + +#define verbs_modify_pmr( h_mr, mr_mod_mask, p_phys_create, \ + p_vaddr, p_lkey, p_rkey, h_pd ) \ + IB_UNSUPPORTED; \ + UNUSED_PARAM( h_mr ); \ + UNUSED_PARAM( mr_mod_mask ); \ + UNUSED_PARAM( p_phys_create ); \ + UNUSED_PARAM( p_vaddr ); \ + UNUSED_PARAM( p_lkey ); \ + UNUSED_PARAM( p_rkey ); \ + UNUSED_PARAM( h_pd ) + +#define verbs_register_smr(h_mr, h_pd, access_ctrl, p_vaddr, p_lkey, \ + p_rkey, ph_mr ) \ + ual_reg_shared( h_mr, h_pd, access_ctrl, p_vaddr, p_lkey, \ + p_rkey, ph_mr ); \ + UNUSED_PARAM( um_call ) + +#define verbs_create_mw(h_pd, p_rkey, h_mw) \ + ual_create_mw(h_pd, p_rkey, h_mw); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_check_mw(h_mw) ((h_mw)->h_ci_mw || (h_mw)->obj.hdl) +#define verbs_destroy_mw(h_mw) \ + ual_destroy_mw(h_mw) + +#define verbs_query_mw(h_mw, ph_pd, p_rkey) \ + ual_query_mw(h_mw, ph_pd, p_rkey); \ + UNUSED_PARAM( p_umv_buf ) + +#define convert_mr_handle(h_mr) (h_mr) + +#define verbs_bind_mw(h_mw, h_qp, p_mw_bind, p_rkey) \ + ual_bind_mw(h_mw, h_qp, p_mw_bind, p_rkey) + +static inline ib_api_status_t +verbs_allocate_pd( + IN const ib_ca_handle_t h_ca, + IN ib_pd_handle_t h_pd, + IN OUT ci_umv_buf_t* const p_umv_buf ) +{ + UNUSED_PARAM( p_umv_buf ); + return ual_allocate_pd( h_ca, h_pd->type, h_pd ); +} + +/* + * Get an alias to the kernel's hardware PD. + */ +static inline ib_api_status_t +allocate_pd_alias( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_handle_t h_pd ) +{ + return ual_allocate_pd( h_ca, h_pd->type, h_pd ); +} + +#define deallocate_pd_alias( h_pd ) /* no action to take */ + +#define verbs_check_pd(h_pd) ((h_pd)->h_ci_pd || (h_pd)->obj.hdl) +#define verbs_deallocate_pd(h_pd) \ + ual_deallocate_pd(h_pd) + +#define verbs_create_srq(h_pd, h_srq, p_srq_attr, p_umv_buf) \ + ual_create_srq (h_pd, h_srq, p_srq_attr); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_check_srq(h_srq) ((h_srq)->h_ci_srq || (h_srq)->obj.hdl) + +#define verbs_destroy_srq(h_srq) \ + ual_destroy_srq(h_srq) + +#define verbs_query_srq(h_srq, p_srq_attr) \ + ual_query_srq(h_srq, p_srq_attr); \ + UNUSED_PARAM( p_umv_buf ); + +#define verbs_modify_srq(h_srq, p_srq_attr, srq_attr_mask) \ + ual_modify_srq(h_srq, p_srq_attr, srq_attr_mask); \ + UNUSED_PARAM( p_umv_buf ); + +#define verbs_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) \ + ual_post_srq_recv(h_srq, p_recv_wr, pp_recv_failure) + + +/* For user-mode, handle conversion is done in ual files */ + +#define convert_qp_handle( qp_create ) + +/* TBD: Do we need to support this in user-mode? */ +#define verbs_get_spl_qp(h_pd, port_num, h_qp, p_qp_create, p_qp_attr) \ + IB_UNSUPPORTED + +#define verbs_create_qp(h_pd, h_qp, p_qp_create, p_qp_attr, p_umv_buf) \ + ual_create_qp (h_pd, h_qp, p_qp_create, p_qp_attr); \ + UNUSED_PARAM( p_umv_buf ) + +#define verbs_check_qp(h_qp) ((h_qp)->h_ci_qp || (h_qp)->obj.hdl) +#define verbs_destroy_qp(h_qp) \ + ual_destroy_qp(h_qp) + +#define verbs_query_qp(h_qp, p_qp_attr) \ + ual_query_qp(h_qp, p_qp_attr); \ + UNUSED_PARAM( p_umv_buf ); + +#define verbs_modify_qp(h_qp, p_qp_mod, qp_attr) \ + ual_modify_qp(h_qp, p_qp_mod, &qp_attr); \ + UNUSED_PARAM( p_umv_buf ); + +#define verbs_post_send(h_qp, p_send_wr, pp_send_failure) \ + ual_post_send(h_qp, p_send_wr, pp_send_failure) + +#define verbs_post_recv(h_qp, p_recv_wr, pp_recv_failure) \ + ual_post_recv(h_qp, p_recv_wr, pp_recv_failure) + +static inline ib_api_status_t +verbs_local_mad( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_av_attr_t* p_src_av_attr, + IN const void* const p_mad_in, + IN void* p_mad_out ) +{ + return ual_local_mad( h_ca, port_num, p_mad_in, p_mad_out ); + UNUSED_PARAM( p_src_av_attr ); +} + +#define check_local_mad(h_qp) \ + (!h_qp) + +#define init_alias_qp( h_qp, h_pd, port_guid, p_qp_create ) \ + ual_init_qp_alias( h_qp, h_pd, port_guid, p_qp_create ) + +#define spl_qp_mad_send( h_mad_svc, p_mad_element_list, pp_mad_failure ) \ + ual_spl_qp_mad_send( h_mad_svc, p_mad_element_list, pp_mad_failure ) + +#define spl_qp_cancel_mad( h_mad_svc, p_mad_element ) \ + ual_spl_qp_cancel_mad( h_mad_svc, p_mad_element ) + +#define create_reg_mad_pool( h_pool, h_pd, p_pool_key ) \ + ual_create_reg_mad_pool( h_pool, h_pd, p_pool_key ) + +#define dereg_destroy_mad_pool( pool_key ) \ + ual_dereg_destroy_mad_pool( pool_key ) + +#define verbs_attach_mcast(h_mcast) \ + ual_attach_mcast( h_mcast ) + +#define verbs_detach_mcast(h_mcast) \ + ual_detach_mcast( h_mcast ) + +#endif /* CL_KERNEL */ + + +#endif /* __AL_VERBS_H__ */ diff --git a/branches/Ndi/core/al/dirs b/branches/Ndi/core/al/dirs new file mode 100644 index 00000000..ddf0ed7d --- /dev/null +++ b/branches/Ndi/core/al/dirs @@ -0,0 +1,3 @@ +DIRS=\ + user \ + kernel diff --git a/branches/Ndi/core/al/ib_common.c b/branches/Ndi/core/al/ib_common.c new file mode 100644 index 00000000..bb3fbb2a --- /dev/null +++ b/branches/Ndi/core/al/ib_common.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include "ib_common.h" + + + +ib_api_status_t +ib_convert_cl_status( + IN const cl_status_t cl_status ) +{ + switch( cl_status ) + { + case CL_SUCCESS: + return IB_SUCCESS; + case CL_INVALID_STATE: + return IB_INVALID_STATE; + case CL_INVALID_SETTING: + return IB_INVALID_SETTING; + case CL_INVALID_PARAMETER: + return IB_INVALID_PARAMETER; + case CL_INSUFFICIENT_RESOURCES: + return IB_INSUFFICIENT_RESOURCES; + case CL_INSUFFICIENT_MEMORY: + return IB_INSUFFICIENT_MEMORY; + case CL_INVALID_PERMISSION: + return IB_INVALID_PERMISSION; + case CL_COMPLETED: + return IB_SUCCESS; + case CL_INVALID_OPERATION: + return IB_UNSUPPORTED; + case CL_TIMEOUT: + return IB_TIMEOUT; + case CL_NOT_DONE: + return IB_NOT_DONE; + case CL_CANCELED: + return IB_CANCELED; + case CL_NOT_FOUND: + return IB_NOT_FOUND; + case CL_BUSY: + return IB_RESOURCE_BUSY; + case CL_PENDING: + return IB_PENDING; + case CL_OVERRUN: + return IB_OVERFLOW; + case CL_ERROR: + case CL_REJECT: + case CL_UNAVAILABLE: + case CL_DISCONNECT: + case CL_DUPLICATE: + default: + return IB_ERROR; + } +} + + +void +ib_fixup_ca_attr( + IN ib_ca_attr_t* const p_dest, + IN const ib_ca_attr_t* const p_src ) +{ + uint8_t i; + uintn_t offset = (uintn_t)p_dest - (uintn_t)p_src; + ib_port_attr_t *p_tmp_port_attr = NULL; + + CL_ASSERT( p_dest ); + CL_ASSERT( p_src ); + + /* Fix up the pointers to point within the destination buffer. */ + p_dest->p_page_size = + (uint32_t* __ptr64)(((uint8_t* __ptr64)p_dest->p_page_size) + offset); + + p_tmp_port_attr = + (ib_port_attr_t* __ptr64)(((uint8_t* __ptr64)p_dest->p_port_attr) + offset); + + /* Fix up each port attribute's gid and pkey table pointers. */ + for( i = 0; i < p_dest->num_ports; i++ ) + { + p_tmp_port_attr[i].p_gid_table = (ib_gid_t* __ptr64) + (((uint8_t* __ptr64)p_tmp_port_attr[i].p_gid_table) + offset); + + p_tmp_port_attr[i].p_pkey_table =(ib_net16_t* __ptr64) + (((uint8_t* __ptr64)p_tmp_port_attr[i].p_pkey_table) + offset); + } + p_dest->p_port_attr = p_tmp_port_attr; +} + + +ib_ca_attr_t* +ib_copy_ca_attr( + IN ib_ca_attr_t* const p_dest, + IN const ib_ca_attr_t* const p_src ) +{ + CL_ASSERT( p_dest ); + CL_ASSERT( p_src ); + + /* Copy the attibutes buffer. */ + cl_memcpy( p_dest, p_src, p_src->size ); + + ib_fixup_ca_attr( p_dest, p_src ); + + return p_dest; +} diff --git a/branches/Ndi/core/al/ib_common.h b/branches/Ndi/core/al/ib_common.h new file mode 100644 index 00000000..456217b1 --- /dev/null +++ b/branches/Ndi/core/al/ib_common.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__IB_COMMON_H__) +#define __IB_COMMON_H__ + + +#include +#include + + +AL_EXPORT ib_api_status_t AL_API +ib_convert_cl_status( + IN const cl_status_t cl_status ); + +AL_EXPORT void AL_API +ib_fixup_ca_attr( + IN ib_ca_attr_t* const p_dest, + IN const ib_ca_attr_t* const p_src ); + +#endif /* __IB_COMMON_H__ */ diff --git a/branches/Ndi/core/al/ib_statustext.c b/branches/Ndi/core/al/ib_statustext.c new file mode 100644 index 00000000..5740dffa --- /dev/null +++ b/branches/Ndi/core/al/ib_statustext.c @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Defines string to decode ib_api_status_t return values. + * + * Environment: + * All + */ + + +#include + + +/* ib_api_status_t values above converted to text for easier printing. */ +static const char* const __ib_error_str[] = +{ + "IB_SUCCESS", + "IB_INSUFFICIENT_RESOURCES", + "IB_INSUFFICIENT_MEMORY", + "IB_INVALID_PARAMETER", + "IB_INVALID_SETTING", + "IB_NOT_FOUND", + "IB_TIMEOUT", + "IB_CANCELED", + "IB_INTERRUPTED", + "IB_INVALID_PERMISSION", + "IB_UNSUPPORTED", + "IB_OVERFLOW", + "IB_MAX_MCAST_QPS_REACHED", + "IB_INVALID_QP_STATE", + "IB_INVALID_APM_STATE", + "IB_INVALID_PORT_STATE", + "IB_INVALID_STATE", + "IB_RESOURCE_BUSY", + "IB_INVALID_PKEY", + "IB_INVALID_LKEY", + "IB_INVALID_RKEY", + "IB_INVALID_MAX_WRS", + "IB_INVALID_MAX_SGE", + "IB_INVALID_CQ_SIZE", + "IB_INVALID_SRQ_SIZE", + "IB_INVALID_SERVICE_TYPE", + "IB_INVALID_GID", + "IB_INVALID_LID", + "IB_INVALID_GUID", + "IB_INVALID_CA_HANDLE", + "IB_INVALID_AV_HANDLE", + "IB_INVALID_CQ_HANDLE", + "IB_INVALID_QP_HANDLE", + "IB_INVALID_SRQ_HANDLE", + "IB_INVALID_PD_HANDLE", + "IB_INVALID_MR_HANDLE", + "IB_INVALID_FMR_HANDLE", + "IB_INVALID_MW_HANDLE", + "IB_INVALID_MCAST_HANDLE", + "IB_INVALID_CALLBACK", + "IB_INVALID_AL_HANDLE", + "IB_INVALID_HANDLE", + "IB_ERROR", + "IB_REMOTE_ERROR", + "IB_VERBS_PROCESSING_DONE", + "IB_INVALID_WR_TYPE", + "IB_QP_IN_TIMEWAIT", + "IB_EE_IN_TIMEWAIT", + "IB_INVALID_PORT", + "IB_NOT_DONE", + "IB_INVALID_INDEX", + "IB_NO_MATCH", + "IB_PENDING", + "IB_UNKNOWN_ERROR" +}; + + +const char* +ib_get_err_str( + IN ib_api_status_t status ) +{ + if( status > IB_UNKNOWN_ERROR ) + status = IB_UNKNOWN_ERROR; + return( __ib_error_str[status] ); +} + + +/* ib_async_event_t values above converted to text for easier printing. */ +static const char* const __ib_async_event_str[] = +{ + "IB_AE_DUMMY", /*place holder*/ + "IB_AE_SQ_ERROR", + "IB_AE_SQ_DRAINED", + "IB_AE_RQ_ERROR", + "IB_AE_CQ_ERROR", + "IB_AE_QP_FATAL", + "IB_AE_QP_COMM", + "IB_AE_QP_APM", + "IB_AE_LOCAL_FATAL", + "IB_AE_PKEY_TRAP", + "IB_AE_QKEY_TRAP", + "IB_AE_MKEY_TRAP", + "IB_AE_PORT_TRAP", + "IB_AE_SYSIMG_GUID_TRAP", + "IB_AE_BUF_OVERRUN", + "IB_AE_LINK_INTEGRITY", + "IB_AE_FLOW_CTRL_ERROR", + "IB_AE_BKEY_TRAP", + "IB_AE_QP_APM_ERROR", + "IB_AE_WQ_REQ_ERROR", + "IB_AE_WQ_ACCESS_ERROR", + "IB_AE_PORT_ACTIVE", /* ACTIVE STATE */ + "IB_AE_PORT_DOWN", /* INIT", ARMED", DOWN */ + "IB_AE_UNKNOWN" +}; + + +const char* +ib_get_async_event_str( + IN ib_async_event_t event ) +{ + if( event > IB_AE_UNKNOWN ) + event = IB_AE_UNKNOWN; + return( __ib_async_event_str[event] ); +} + + +static const char* const __ib_wc_status_str[] = +{ + "IB_WCS_SUCCESS", + "IB_WCS_LOCAL_LEN_ERR", + "IB_WCS_LOCAL_OP_ERR", + "IB_WCS_LOCAL_PROTECTION_ERR", + "IB_WCS_WR_FLUSHED_ERR", + "IB_WCS_MEM_WINDOW_BIND_ERR", + "IB_WCS_REM_ACCESS_ERR", + "IB_WCS_REM_OP_ERR", + "IB_WCS_RNR_RETRY_ERR", + "IB_WCS_TIMEOUT_RETRY_ERR", + "IB_WCS_REM_INVALID_REQ_ERR", + "IB_WCS_BAD_RESP_ERR", + "IB_WCS_LOCAL_ACCESS_ERR", + "IB_WCS_GENERAL_ERR", + "IB_WCS_UNMATCHED_RESPONSE", /* InfiniBand Access Layer */ + "IB_WCS_CANCELED", /* InfiniBand Access Layer */ + "IB_WCS_UNKNOWN" +}; + + +const char* +ib_get_wc_status_str( + IN ib_wc_status_t wc_status ) +{ + if( wc_status > IB_WCS_UNKNOWN ) + wc_status = IB_WCS_UNKNOWN; + return( __ib_wc_status_str[wc_status] ); +} + + +static const char* const __ib_wc_type_str[] = +{ + "IB_WC_SEND", + "IB_WC_RDMA_WRITE", + "IB_WC_RECV", + "IB_WC_RDMA_READ", + "IB_WC_MW_BIND", + "IB_WC_FETCH_ADD", + "IB_WC_COMPARE_SWAP", + "IB_WC_RECV_RDMA_WRITE" + "IB_WC_UNKNOWN" +}; + + +const char* +ib_get_wc_type_str( + IN ib_wc_type_t wc_type ) +{ + if( wc_type > IB_WC_UNKNOWN ) + wc_type = IB_WC_UNKNOWN; + return( __ib_wc_type_str[wc_type] ); +} + + +static const char* const __ib_wr_type_str[] = +{ + "WR_DUMMY", /*place holder*/ + "WR_SEND", + "WR_RDMA_WRITE", + "WR_RDMA_READ", + "WR_COMPARE_SWAP", + "WR_FETCH_ADD", + "WR_UNKNOWN" +}; + + +const char* +ib_get_wr_type_str( + IN uint8_t wr_type ) +{ + if( wr_type > WR_UNKNOWN ) + wr_type = WR_UNKNOWN; + return( __ib_wr_type_str[wr_type] ); +} + +static const char* const __ib_qp_type_str[] = +{ + "IB_QPT_RELIABLE_CONN" + "IB_QPT_UNRELIABLE_CONN", + "IB_QPT_UNKNOWN", + "IB_QPT_UNRELIABLE_DGRM", + "IB_QPT_QP0", + "IB_QPT_QP1", + "IB_QPT_RAW_IPV6", + "IB_QPT_RAW_ETHER", + "IB_QPT_MAD", + "IB_QPT_QP0_ALIAS", + "IB_QPT_QP1_ALIAS", + "IB_QPT_UNKNOWN" + +}; + + +const char* +ib_get_qp_type_str( + IN uint8_t qp_type ) +{ + if( qp_type > IB_QPT_UNKNOWN ) + qp_type = IB_QPT_UNKNOWN; + return( __ib_qp_type_str[qp_type] ); +} + + diff --git a/branches/Ndi/core/al/kernel/SOURCES b/branches/Ndi/core/al/kernel/SOURCES new file mode 100644 index 00000000..b45085bb --- /dev/null +++ b/branches/Ndi/core/al/kernel/SOURCES @@ -0,0 +1,85 @@ +TARGETNAME=ibal +TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=DRIVER_LIBRARY + + + +!if $(FREEBUILD) +ENABLE_EVENT_TRACING=1 +!else +#ENABLE_EVENT_TRACING=1 +!endif + + +DLLDEF=al_exports.def + +SOURCES= ibal.rc \ + al_ca_pnp.c \ + al_ci_ca.c \ + al_cm_cep.c \ + al_dev.c \ + al_ioc_pnp.c \ + al_mad_pool.c \ + al_fmr_pool.c \ + al_mgr.c \ + al_mr.c \ + al_pnp.c \ + al_proxy.c \ + al_proxy_cep.c \ + al_proxy_ioc.c \ + al_proxy_subnet.c \ + al_proxy_verbs.c \ + al_sa_req.c \ + al_smi.c \ + ..\al.c \ + ..\al_av.c \ + ..\al_ca.c \ + ..\al_ci_ca_shared.c \ + ..\al_cm_qp.c \ + ..\al_common.c \ + ..\al_cq.c \ + ..\al_dm.c \ + ..\al_init.c \ + ..\al_mad.c \ + ..\al_mcast.c \ + ..\al_mgr_shared.c \ + ..\al_mr_shared.c \ + ..\al_mw.c \ + ..\al_pd.c \ + ..\al_qp.c \ + ..\al_query.c \ + ..\al_reg_svc.c \ + ..\al_res_mgr.c \ + ..\al_srq.c \ + ..\al_sub.c \ + ..\ib_common.c \ + ..\ib_statustext.c + +INCLUDES=..;..\..\..\inc;..\..\..\inc\kernel; + +C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS \ + -DEXPORT_AL_SYMBOLS + +TARGETLIBS= \ + $(TARGETPATH)\*\complib.lib + +!if !defined(DDK_TARGET_OS) || "$(DDK_TARGET_OS)"=="Win2K" +# +# The driver is built in the Win2K build environment +# - use the library version of safe strings +# +TARGETLIBS= $(TARGETLIBS) $(DDK_LIB_PATH)\ntstrsafe.lib +!endif + + +!IFDEF ENABLE_EVENT_TRACING + +C_DEFINES = $(C_DEFINES) -DEVENT_TRACING + +RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H \ + -scan:..\al_debug.h \ + -func:AL_PRINT(LEVEL,FLAGS,(MSG,...)) \ + -func:AL_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) +!ENDIF + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/core/al/kernel/al_ca_pnp.c b/branches/Ndi/core/al/kernel/al_ca_pnp.c new file mode 100644 index 00000000..649a63e7 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_ca_pnp.c @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + diff --git a/branches/Ndi/core/al/kernel/al_ca_pnp.h b/branches/Ndi/core/al/kernel/al_ca_pnp.h new file mode 100644 index 00000000..e17ec4bc --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_ca_pnp.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__IB_AL_CA_PNP_H__) +#define __IB_AL_CA_PNP_H__ + +#endif /* __IB_AL_CA_PNP_H__ */ diff --git a/branches/Ndi/core/al/kernel/al_ci_ca.c b/branches/Ndi/core/al/kernel/al_ci_ca.c new file mode 100644 index 00000000..0ecc686d --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_ci_ca.c @@ -0,0 +1,520 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "al_ci_ca.h" +#include "al_verbs.h" +#include "al_cq.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_ci_ca.tmh" +#endif +#include "al_mad_pool.h" +#include "al_mgr.h" +#include "al_mr.h" +#include "al_pnp.h" +#include "al_mad_pool.h" + +#include "ib_common.h" + + +#define EVENT_POOL_MIN 4 +#define EVENT_POOL_MAX 0 +#define EVENT_POOL_GROW 1 + + +void +destroying_ci_ca( + IN al_obj_t* p_obj ); + +void +cleanup_ci_ca( + IN al_obj_t* p_obj ); + +void +free_ci_ca( + IN al_obj_t* p_obj ); + +void +ci_ca_comp_cb( + IN void *cq_context ); + +void +ci_ca_async_proc_cb( + IN struct _cl_async_proc_item *p_item ); + +void +ci_ca_async_event_cb( + IN const ib_event_rec_t* const p_event_record ); + + + +ib_api_status_t +create_ci_ca( + IN al_obj_t *p_parent_obj, + IN const ci_interface_t* p_ci ) +{ + ib_api_status_t status; + cl_status_t cl_status; + al_ci_ca_t *p_ci_ca; + + AL_ENTER( AL_DBG_CA ); + + CL_ASSERT( p_ci ); + + /* Allocate the CI CA. */ + p_ci_ca = (al_ci_ca_t*)cl_zalloc( sizeof( al_ci_ca_t ) ); + if( !p_ci_ca ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_zalloc failed\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the CI CA. */ + construct_al_obj( &p_ci_ca->obj, AL_OBJ_TYPE_CI_CA ); + cl_spinlock_construct( &p_ci_ca->attr_lock ); + cl_qlist_init( &p_ci_ca->ca_list ); + cl_qlist_init( &p_ci_ca->shmid_list ); + cl_qpool_construct( &p_ci_ca->event_pool ); + p_ci_ca->verbs = *p_ci; + + cl_status = cl_spinlock_init( &p_ci_ca->attr_lock ); + if( cl_status != CL_SUCCESS ) + { + free_ci_ca( &p_ci_ca->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_spinlock_init failed, status = 0x%x.\n", + ib_convert_cl_status( cl_status ) ) ); + return ib_convert_cl_status( cl_status ); + } + + /* Create a pool of items to report asynchronous events. */ + cl_status = cl_qpool_init( &p_ci_ca->event_pool, EVENT_POOL_MIN, + EVENT_POOL_MAX, EVENT_POOL_GROW, sizeof( event_item_t ), NULL, + NULL, p_ci_ca ); + if( cl_status != CL_SUCCESS ) + { + free_ci_ca( &p_ci_ca->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_qpool_init failed, status = 0x%x.\n", + ib_convert_cl_status( cl_status ) ) ); + return ib_convert_cl_status( cl_status ); + } + + status = init_al_obj( &p_ci_ca->obj, p_ci_ca, FALSE, + destroying_ci_ca, cleanup_ci_ca, free_ci_ca ); + if( status != IB_SUCCESS ) + { + free_ci_ca( &p_ci_ca->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj failed, status = 0x%x.\n", status) ); + return status; + } + status = attach_al_obj( p_parent_obj, &p_ci_ca->obj ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + p_ci_ca->dereg_async_item.pfn_callback = ci_ca_async_proc_cb; + + /* Open the CI CA. */ + status = p_ci_ca->verbs.open_ca( p_ci_ca->verbs.guid, ci_ca_comp_cb, + ci_ca_async_event_cb, p_ci_ca, &p_ci_ca->h_ci_ca ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("open_ca failed, status = 0x%x.\n", status) ); + return status; + } + + /* Increase the max timeout for the CI CA to handle driver unload. */ + set_al_obj_timeout( &p_ci_ca->obj, AL_MAX_TIMEOUT_MS ); + + /* + * Register ourselves with the AL manager, so that the open call below + * will succeed. + */ + add_ci_ca( p_ci_ca ); + + /* Open the AL CA. */ + status = ib_open_ca( gh_al, p_ci_ca->verbs.guid, ca_event_cb, p_ci_ca, + &p_ci_ca->h_ca ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_open_ca failed, status = 0x%x.\n", status) ); + return status; + } + + /* Get a list of the port GUIDs on this CI CA. */ + status = get_port_info( p_ci_ca ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("get_port_guids failed, status = 0x%x.\n", status) ); + return status; + } + + /* Allocate a PD for use by AL itself. */ + status = ib_alloc_pd( p_ci_ca->h_ca, IB_PDT_SQP, p_ci_ca, + &p_ci_ca->h_pd ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_alloc_pd failed, status = 0x%x.\n", status) ); + return status; + } + + /* Allocate a PD for use by AL itself. */ + status = ib_alloc_pd( p_ci_ca->h_ca, IB_PDT_ALIAS, p_ci_ca, + &p_ci_ca->h_pd_alias ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_alloc_pd alias failed, status = 0x%x.\n", status) ); + return status; + } + + /* Register the global MAD pool on this CA. */ + status = ib_reg_mad_pool( gh_mad_pool, p_ci_ca->h_pd, &p_ci_ca->pool_key ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_mad_pool failed, status = 0x%x.\n", status) ); + return status; + } + + /* + * Notify the PnP manager that a CA has been added. + * NOTE: PnP Manager must increment the CA reference count. + */ + status = pnp_ca_event( p_ci_ca, IB_PNP_CA_ADD ); + if( status != IB_SUCCESS ) + { + /* Destroy the CA */ + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_pnp_add_ca failed, status = 0x%x.\n", status) ); + return status; + } + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_ci_ca->obj ); + + AL_EXIT( AL_DBG_CA ); + return IB_SUCCESS; +} + + + +void +destroying_ci_ca( + IN al_obj_t* p_obj ) +{ + al_ci_ca_t *p_ci_ca; + + CL_ASSERT( p_obj ); + p_ci_ca = PARENT_STRUCT( p_obj, al_ci_ca_t, obj ); + + /* + * Notify the PnP manager that this CA is being removed. + * NOTE: PnP Manager must decrement the CA reference count. + */ + pnp_ca_event( p_ci_ca, IB_PNP_CA_REMOVE ); + + /* + * We queue a request to the asynchronous processing manager to close + * the CA after the PNP remove CA event has been delivered. This avoids + * the ib_close_ca() call from immediately removing resouces (PDs, QPs) + * that are in use by clients waiting on the remove CA event. + */ + if( p_ci_ca->h_ca ) + cl_async_proc_queue( gp_async_pnp_mgr, &p_ci_ca->dereg_async_item ); +} + + + +void +ci_ca_async_proc_cb( + IN struct _cl_async_proc_item *p_item ) +{ + al_ci_ca_t *p_ci_ca; + + p_ci_ca = PARENT_STRUCT( p_item, al_ci_ca_t, dereg_async_item ); + + /* Release all AL resources acquired by the CI CA. */ + ib_close_ca( p_ci_ca->h_ca, NULL ); +} + + + +void +cleanup_ci_ca( + IN al_obj_t* p_obj ) +{ + ib_api_status_t status; + al_ci_ca_t *p_ci_ca; + + AL_ENTER( AL_DBG_CA ); + + CL_ASSERT( p_obj ); + p_ci_ca = PARENT_STRUCT( p_obj, al_ci_ca_t, obj ); + + CL_ASSERT( cl_is_qlist_empty( &p_ci_ca->shmid_list ) ); + + if( p_ci_ca->h_ci_ca ) + { + remove_ci_ca( p_ci_ca ); + status = p_ci_ca->verbs.close_ca( p_ci_ca->h_ci_ca ); + CL_ASSERT( status == IB_SUCCESS ); + } + + AL_EXIT( AL_DBG_CA ); +} + + + +void +ci_ca_comp_cb( + IN void *cq_context ) +{ + ib_cq_handle_t h_cq = (ib_cq_handle_t)cq_context; + + if( h_cq->h_wait_obj ) + KeSetEvent( h_cq->h_wait_obj, IO_NETWORK_INCREMENT, FALSE ); + else + h_cq->pfn_user_comp_cb( h_cq, (void*)h_cq->obj.context ); +} + + + +/* + * CI CA asynchronous event callback. + */ +void +ci_ca_async_event_cb( + IN const ib_event_rec_t* const p_event_record ) +{ + ib_async_event_rec_t event_rec; + + AL_ENTER( AL_DBG_CA ); + + CL_ASSERT( p_event_record ); + + event_rec.code = p_event_record->type; + event_rec.context = p_event_record->context; + event_rec.vendor_specific = p_event_record->vendor_specific; + + ci_ca_async_event( &event_rec ); + + AL_EXIT( AL_DBG_CA ); +} + + + +/* + * Insert a new shmid tracking structure into the CI CA's list. + */ +void +add_shmid( + IN al_ci_ca_t* const p_ci_ca, + IN struct _al_shmid *p_shmid ) +{ + CL_ASSERT( p_ci_ca && p_shmid ); + + p_shmid->obj.p_ci_ca = p_ci_ca; + + /* Insert the shmid structure into the shmid list. */ + cl_spinlock_acquire( &p_ci_ca->obj.lock ); + cl_qlist_insert_head( &p_ci_ca->shmid_list, &p_shmid->list_item ); + cl_spinlock_release( &p_ci_ca->obj.lock ); +} + + + +ib_api_status_t +acquire_shmid( + IN al_ci_ca_t* const p_ci_ca, + IN int shmid, + OUT struct _al_shmid **pp_shmid ) +{ + al_shmid_t *p_shmid; + cl_list_item_t *p_list_item; + + /* Try to find the shmid. */ + cl_spinlock_acquire( &p_ci_ca->obj.lock ); + for( p_list_item = cl_qlist_head( &p_ci_ca->shmid_list ); + p_list_item != cl_qlist_end( &p_ci_ca->shmid_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_shmid = PARENT_STRUCT( p_list_item, al_shmid_t, list_item ); + if( p_shmid->id == shmid ) + { + ref_al_obj( &p_shmid->obj ); + *pp_shmid = p_shmid; + break; + } + } + cl_spinlock_release( &p_ci_ca->obj.lock ); + + if( p_list_item == cl_qlist_end( &p_ci_ca->shmid_list ) ) + return IB_NOT_FOUND; + else + return IB_SUCCESS; +} + + + +void +release_shmid( + IN struct _al_shmid *p_shmid ) +{ + al_ci_ca_t *p_ci_ca; + int32_t ref_cnt; + + CL_ASSERT( p_shmid ); + + p_ci_ca = p_shmid->obj.p_ci_ca; + + cl_spinlock_acquire( &p_ci_ca->obj.lock ); + + /* Dereference the shmid. */ + ref_cnt = deref_al_obj( &p_shmid->obj ); + + /* If the shmid is no longer in active use, remove it. */ + if( ref_cnt == 1 ) + cl_qlist_remove_item( &p_ci_ca->shmid_list, &p_shmid->list_item ); + + cl_spinlock_release( &p_ci_ca->obj.lock ); + + /* Destroy the shmid if it is not needed. */ + if( ref_cnt == 1 ) + { + ref_al_obj( &p_shmid->obj ); + p_shmid->obj.pfn_destroy( &p_shmid->obj, NULL ); + } +} + + + +ib_api_status_t +ib_ci_call( + IN ib_ca_handle_t h_ca, + IN const void* __ptr64 * const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op ) +{ + return ci_call( h_ca, handle_array, num_handles, p_ci_op, NULL ); +} + + + +ib_api_status_t +ci_call( + IN ib_ca_handle_t h_ca, + IN const void* __ptr64 * const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN ci_umv_buf_t* const p_umv_buf OPTIONAL ) +{ + void* __ptr64 * p_handle_array; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CA ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + if( !p_ci_op ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + p_handle_array = NULL; + if ( num_handles ) + { + p_handle_array = cl_zalloc( sizeof(void* __ptr64) * num_handles ); + if( !p_handle_array ) + return IB_INSUFFICIENT_MEMORY; + + status = al_convert_to_ci_handles( p_handle_array, handle_array, + num_handles ); + + if( status != IB_SUCCESS ) + { + cl_free( p_handle_array ); + return status; + } + } + + if( h_ca->obj.p_ci_ca->verbs.vendor_call ) + { + status = verbs_ci_call( + h_ca, p_handle_array, num_handles, p_ci_op, p_umv_buf ); + } + else + { + status = IB_UNSUPPORTED; + } + + if ( num_handles ) + cl_free( p_handle_array ); + + AL_EXIT( AL_DBG_QUERY ); + return status; +} + + +DEVICE_OBJECT* +get_ca_dev( + IN const ib_ca_handle_t h_ca ) +{ + ASSERT( h_ca ); + + ObReferenceObject( h_ca->obj.p_ci_ca->verbs.p_hca_dev ); + return h_ca->obj.p_ci_ca->verbs.p_hca_dev; +} \ No newline at end of file diff --git a/branches/Ndi/core/al/kernel/al_cm_cep.c b/branches/Ndi/core/al/kernel/al_cm_cep.c new file mode 100644 index 00000000..51ea1c31 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_cm_cep.c @@ -0,0 +1,6082 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include +#include "al_common.h" +#include "al_cm_cep.h" +#include "al_cm_conn.h" +#include "al_cm_sidr.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_cm_cep.tmh" +#endif +#include "ib_common.h" +#include "al_mgr.h" +#include "al_ca.h" +#include "al.h" +#include "al_mad.h" +#include "al_qp.h" + + +/* + * The vector object uses a list item at the front of the buffers + * it allocates. Take the list item into account so that allocations + * are for full page sizes. + */ +#define CEP_CID_MIN \ + ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t)) +#define CEP_CID_GROW \ + ((PAGE_SIZE - sizeof(cl_list_item_t)) / sizeof(cep_cid_t)) + +/* + * We reserve the upper byte of the connection ID as a revolving counter so + * that connections that are retried by the client change connection ID. + * This counter is never zero, so it is OK to use all CIDs since we will never + * have a full CID (base + counter) that is zero. + * See the IB spec, section 12.9.8.7 for details about REJ retry. + */ +#define CEP_MAX_CID (0x00FFFFFF) +#define CEP_MAX_CID_MASK (0x00FFFFFF) + +#define CEP_MAD_SQ_DEPTH (128) +#define CEP_MAD_RQ_DEPTH (1) /* ignored. */ +#define CEP_MAD_SQ_SGE (1) +#define CEP_MAD_RQ_SGE (1) /* ignored. */ + + +/* Global connection manager object. */ +typedef struct _al_cep_mgr +{ + al_obj_t obj; + + cl_qmap_t port_map; + + KSPIN_LOCK lock; + + /* Bitmap of CEPs, indexed by CID. */ + cl_vector_t cid_vector; + uint32_t free_cid; + + /* List of active listens. */ + cl_rbmap_t listen_map; + + /* Map of CEP by remote CID and CA GUID. */ + cl_rbmap_t conn_id_map; + /* Map of CEP by remote QPN, used for stale connection matching. */ + cl_rbmap_t conn_qp_map; + + NPAGED_LOOKASIDE_LIST cep_pool; + NPAGED_LOOKASIDE_LIST req_pool; + + /* + * Periodically walk the list of connections in the time wait state + * and flush them as appropriate. + */ + cl_timer_t timewait_timer; + cl_qlist_t timewait_list; + + ib_pnp_handle_t h_pnp; + +} al_cep_mgr_t; + + +/* Per-port CM object. */ +typedef struct _cep_port_agent +{ + al_obj_t obj; + + cl_map_item_t item; + + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd; + ib_qp_handle_t h_qp; + ib_pool_key_t pool_key; + ib_mad_svc_handle_t h_mad_svc; + + net64_t port_guid; + uint8_t port_num; + net16_t base_lid; + +} cep_agent_t; + + +/* + * Note: the REQ, REP, and LAP values must be 1, 2, and 4 respectively. + * This allows shifting 1 << msg_mraed from an MRA to figure out for what + * message the MRA was sent for. + */ +#define CEP_STATE_RCVD 0x10000000 +#define CEP_STATE_SENT 0x20000000 +#define CEP_STATE_MRA 0x01000000 +#define CEP_STATE_REQ 0x00000001 +#define CEP_STATE_REP 0x00000002 +#define CEP_STATE_LAP 0x00000004 +#define CEP_STATE_RTU 0x00000008 +#define CEP_STATE_DREQ 0x00000010 +#define CEP_STATE_DREP 0x00000020 +#define CEP_STATE_DESTROYING 0x00010000 +#define CEP_STATE_USER 0x00020000 + +#define CEP_MSG_MASK 0x000000FF +#define CEP_OP_MASK 0xF0000000 + +#define CEP_STATE_PREP 0x00100000 + +/* States match CM state transition diagrams from spec. */ +typedef enum _cep_state +{ + CEP_STATE_IDLE, + CEP_STATE_LISTEN, + CEP_STATE_ESTABLISHED, + CEP_STATE_TIMEWAIT, + CEP_STATE_SREQ_SENT, + CEP_STATE_SREQ_RCVD, + CEP_STATE_ERROR, + CEP_STATE_DESTROY = CEP_STATE_DESTROYING, + CEP_STATE_PRE_REQ = CEP_STATE_IDLE | CEP_STATE_PREP, + CEP_STATE_REQ_RCVD = CEP_STATE_REQ | CEP_STATE_RCVD, + CEP_STATE_PRE_REP = CEP_STATE_REQ_RCVD | CEP_STATE_PREP, + CEP_STATE_REQ_SENT = CEP_STATE_REQ | CEP_STATE_SENT, + CEP_STATE_REQ_MRA_RCVD = CEP_STATE_REQ_SENT | CEP_STATE_MRA, + CEP_STATE_REQ_MRA_SENT = CEP_STATE_REQ_RCVD | CEP_STATE_MRA, + CEP_STATE_PRE_REP_MRA_SENT = CEP_STATE_REQ_MRA_SENT | CEP_STATE_PREP, + CEP_STATE_REP_RCVD = CEP_STATE_REP | CEP_STATE_RCVD, + CEP_STATE_REP_SENT = CEP_STATE_REP | CEP_STATE_SENT, + CEP_STATE_REP_MRA_RCVD = CEP_STATE_REP_SENT | CEP_STATE_MRA, + CEP_STATE_REP_MRA_SENT = CEP_STATE_REP_RCVD | CEP_STATE_MRA, + CEP_STATE_LAP_RCVD = CEP_STATE_LAP | CEP_STATE_RCVD, + CEP_STATE_PRE_APR = CEP_STATE_LAP_RCVD | CEP_STATE_PREP, + CEP_STATE_LAP_SENT = CEP_STATE_LAP | CEP_STATE_SENT, + CEP_STATE_LAP_MRA_RCVD = CEP_STATE_LAP_SENT | CEP_STATE_MRA, + CEP_STATE_LAP_MRA_SENT = CEP_STATE_LAP_RCVD | CEP_STATE_MRA, + CEP_STATE_PRE_APR_MRA_SENT = CEP_STATE_LAP_MRA_SENT | CEP_STATE_PREP, + CEP_STATE_DREQ_SENT = CEP_STATE_DREQ | CEP_STATE_SENT, + CEP_STATE_DREQ_RCVD = CEP_STATE_DREQ | CEP_STATE_RCVD, + CEP_STATE_DREQ_DESTROY = CEP_STATE_DREQ_SENT | CEP_STATE_DESTROYING + +} cep_state_t; + + +/* Active side CEP state transitions: +* al_create_cep -> IDLE +* al_cep_pre_req -> PRE_REQ +* al_cep_send_req -> REQ_SENT +* Recv REQ MRA -> REQ_MRA_RCVD +* Recv REP -> REP_RCVD +* al_cep_mra -> REP_MRA_SENT +* al_cep_rtu -> ESTABLISHED +* +* Passive side CEP state transitions: +* al_create_cep -> IDLE +* Recv REQ -> REQ_RCVD +* al_cep_mra* -> REQ_MRA_SENT +* al_cep_pre_rep -> PRE_REP +* al_cep_mra* -> PRE_REP_MRA_SENT +* al_cep_send_rep -> REP_SENT +* Recv RTU -> ESTABLISHED +* +* *al_cep_mra can only be called once - either before or after PRE_REP. +*/ + +typedef struct _al_kcep_av +{ + ib_av_attr_t attr; + net64_t port_guid; + uint16_t pkey_index; + +} kcep_av_t; + + +typedef struct _al_kcep +{ + net32_t cid; + void* context; + + struct _cep_cid *p_cid; + + net64_t sid; + + /* Port guid for filtering incoming requests. */ + net64_t port_guid; + + uint8_t* __ptr64 p_cmp_buf; + uint8_t cmp_offset; + uint8_t cmp_len; + + boolean_t p2p; + + /* Used to store connection structure with owning AL instance. */ + cl_list_item_t al_item; + + /* Flag to indicate whether a user is processing events. */ + boolean_t signalled; + + /* Destroy callback. */ + ib_pfn_destroy_cb_t pfn_destroy_cb; + + ib_mad_element_t *p_mad_head; + ib_mad_element_t *p_mad_tail; + al_pfn_cep_cb_t pfn_cb; + + IRP *p_irp; + + /* MAP item for finding listen CEPs. */ + cl_rbmap_item_t listen_item; + + /* Map item for finding CEPs based on remote comm ID & CA GUID. */ + cl_rbmap_item_t rem_id_item; + + /* Map item for finding CEPs based on remote QP number. */ + cl_rbmap_item_t rem_qp_item; + + /* Communication ID's for the connection. */ + net32_t local_comm_id; + net32_t remote_comm_id; + + net64_t local_ca_guid; + net64_t remote_ca_guid; + + /* Remote QP, used for stale connection checking. */ + net32_t remote_qpn; + + /* Parameters to format QP modification structure. */ + net32_t sq_psn; + net32_t rq_psn; + uint8_t resp_res; + uint8_t init_depth; + uint8_t rnr_nak_timeout; + + /* + * Local QP number, used for the "additional check" required + * of the DREQ. + */ + net32_t local_qpn; + + /* PKEY to make sure a LAP is on the same partition. */ + net16_t pkey; + + /* Initiator depth as received in the REQ. */ + uint8_t req_init_depth; + + /* + * Primary and alternate path info, used to create the address vectors for + * sending MADs, to locate the port CM agent to use for outgoing sends, + * and for creating the address vectors for transitioning QPs. + */ + kcep_av_t av[2]; + uint8_t idx_primary; + + /* Temporary AV and CEP port GUID used when processing LAP. */ + kcep_av_t alt_av; + uint8_t alt_2pkt_life; + + /* maxium packet lifetime * 2 of any path used on a connection. */ + uint8_t max_2pkt_life; + /* Given by the REP, used for alternate path setup. */ + uint8_t target_ack_delay; + /* Stored to help calculate the local ACK delay in the LAP. */ + uint8_t local_ack_delay; + + /* Volatile to allow using atomic operations for state checks. */ + cep_state_t state; + + /* + * Flag that indicates whether a connection took the active role during + * establishment. + */ + boolean_t was_active; + + /* + * Handle to the sent MAD, used for cancelling. We store the handle to + * the mad service so that we can properly cancel. This should not be a + * problem since all outstanding sends should be completed before the + * mad service completes its destruction and the handle becomes invalid. + */ + ib_mad_svc_handle_t h_mad_svc; + ib_mad_element_t *p_send_mad; + + /* Number of outstanding MADs. Delays destruction of CEP destruction. */ + atomic32_t ref_cnt; + + /* MAD transaction ID to use when sending MADs. */ + uint64_t tid; + + /* Maximum retries per MAD. Set at REQ time, stored to retry LAP. */ + uint8_t max_cm_retries; + /* Timeout value, in milliseconds. Set at REQ time, stored to retry LAP. */ + uint32_t retry_timeout; + + /* Timer that will be signalled when the CEP exits timewait. */ + KTIMER timewait_timer; + LARGE_INTEGER timewait_time; + cl_list_item_t timewait_item; + + /* + * Pointer to a formatted MAD. The pre_req, pre_rep and pre_apr calls + * allocate and format the MAD, and the send_req, send_rep and send_apr + * calls send it. + */ + ib_mad_element_t *p_mad; + + /* Cache the last MAD sent for retransmission. */ + union _mads + { + ib_mad_t hdr; + mad_cm_mra_t mra; + mad_cm_rtu_t rtu; + mad_cm_drep_t drep; + + } mads; + +} kcep_t; + + +/* Structures stored in the CID vector. */ +typedef struct _cep_cid +{ + /* Owning AL handle. NULL if invalid. */ + ib_al_handle_t h_al; + /* Pointer to CEP, or index of next free entry if h_al is NULL. */ + kcep_t *p_cep; + /* For REJ Retry support */ + uint8_t modifier; + +} cep_cid_t; + + +/* Global instance of the CM agent. */ +al_cep_mgr_t *gp_cep_mgr = NULL; + + +static ib_api_status_t +__format_drep( + IN kcep_t* const p_cep, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len, + IN OUT mad_cm_drep_t* const p_drep ); + +static ib_api_status_t +__cep_queue_mad( + IN kcep_t* const p_cep, + IN ib_mad_element_t* p_mad ); + +static inline void +__process_cep( + IN kcep_t* const p_cep ); + +static inline uint32_t +__calc_mad_timeout( + IN const uint8_t pkt_life ); + +static inline void +__calc_timewait( + IN kcep_t* const p_cep ); + +static kcep_t* +__create_cep( void ); + +static int32_t +__cleanup_cep( + IN kcep_t* const p_cep ); + +static void +__destroy_cep( + IN kcep_t* const p_cep ); + +static inline void +__bind_cep( + IN kcep_t* const p_cep, + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void* __ptr64 context ); + +static inline void +__unbind_cep( + IN kcep_t* const p_cep ); + +static void +__pre_destroy_cep( + IN kcep_t* const p_cep ); + +static kcep_t* +__lookup_by_id( + IN net32_t remote_comm_id, + IN net64_t remote_ca_guid ); + +static kcep_t* +__lookup_listen( + IN net64_t sid, + IN net64_t port_guid, + IN void *p_pdata ); + +static inline kcep_t* +__lookup_cep( + IN ib_al_handle_t h_al OPTIONAL, + IN net32_t cid ); + +static inline kcep_t* +__insert_cep( + IN kcep_t* const p_new_cep ); + +static inline void +__remove_cep( + IN kcep_t* const p_cep ); + +static inline void +__insert_timewait( + IN kcep_t* const p_cep ); + +static ib_api_status_t +__cep_get_mad( + IN kcep_t* const p_cep, + IN net16_t attr_id, + OUT cep_agent_t** const pp_port_cep, + OUT ib_mad_element_t** const pp_mad ); + +static ib_api_status_t +__cep_send_mad( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ); + +/* Returns the 1-based port index of the CEP agent with the specified GID. */ +static cep_agent_t* +__find_port_cep( + IN const ib_gid_t* const p_gid, + IN const net16_t lid, + IN const net16_t pkey, + OUT uint16_t* const p_pkey_index ); + +static cep_cid_t* +__get_lcid( + OUT net32_t* const p_cid ); + +static void +__process_cep_send_comp( + IN cl_async_proc_item_t *p_item ); + + +/****************************************************************************** +* Per-port CEP agent +******************************************************************************/ + + +static inline void +__format_mad_hdr( + IN ib_mad_t* const p_mad, + IN const kcep_t* const p_cep, + IN net16_t attr_id ) +{ + p_mad->base_ver = 1; + p_mad->mgmt_class = IB_MCLASS_COMM_MGMT; + p_mad->class_ver = IB_MCLASS_CM_VER_2; + p_mad->method = IB_MAD_METHOD_SEND; + p_mad->status = 0; + p_mad->class_spec = 0; + p_mad->trans_id = p_cep->tid; + p_mad->attr_id = attr_id; + p_mad->resv = 0; + p_mad->attr_mod = 0; +} + + +/* Consumes the input MAD. */ +static void +__reject_mad( + IN cep_agent_t* const p_port_cep, + IN kcep_t* const p_cep, + IN ib_mad_element_t* const p_mad, + IN ib_rej_status_t reason ) +{ + mad_cm_rej_t *p_rej; + + AL_ENTER( AL_DBG_CM ); + + p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf; + + __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_REJ_ATTR_ID ); + + p_rej->local_comm_id = p_cep->local_comm_id; + p_rej->remote_comm_id = p_cep->remote_comm_id; + p_rej->reason = reason; + + switch( p_cep->state ) + { + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REQ_MRA_SENT: + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + conn_rej_set_msg_rejected( 0, p_rej ); + break; + + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + conn_rej_set_msg_rejected( 1, p_rej ); + break; + + default: + CL_ASSERT( reason == IB_REJ_TIMEOUT ); + conn_rej_set_msg_rejected( 2, p_rej ); + break; + } + + conn_rej_clr_rsvd_fields( p_rej ); + __cep_send_mad( p_port_cep, p_mad ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__reject_timeout( + IN cep_agent_t* const p_port_cep, + IN kcep_t* const p_cep, + IN const ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + ib_mad_element_t *p_rej_mad; + ib_mad_t *p_mad_buf; + ib_grh_t *p_grh; + + AL_ENTER( AL_DBG_CM ); + + status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_rej_mad ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_get_mad returned %s\n", ib_get_err_str( status )) ); + return; + } + + /* Save the buffer pointers from the new element. */ + p_mad_buf = p_rej_mad->p_mad_buf; + p_grh = p_rej_mad->p_grh; + + /* + * Copy the input MAD element to the reject - this gives us + * all appropriate addressing information. + */ + cl_memcpy( p_rej_mad, p_mad, sizeof(ib_mad_element_t) ); + cl_memcpy( p_grh, p_mad->p_grh, sizeof(ib_grh_t) ); + + /* Restore the buffer pointers now that the copy is complete. */ + p_rej_mad->p_mad_buf = p_mad_buf; + p_rej_mad->p_grh = p_grh; + + status = conn_rej_set_pdata( NULL, 0, (mad_cm_rej_t*)p_mad_buf ); + CL_ASSERT( status == IB_SUCCESS ); + + /* Copy the local CA GUID into the ARI. */ + switch( p_mad->p_mad_buf->attr_id ) + { + case CM_REQ_ATTR_ID: + status = conn_rej_set_ari( + (uint8_t*)&p_cep->local_ca_guid, + sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf ); + CL_ASSERT( status == IB_SUCCESS ); + __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT ); + break; + + case CM_REP_ATTR_ID: + status = conn_rej_set_ari( + (uint8_t*)&p_cep->local_ca_guid, + sizeof(p_cep->local_ca_guid), (mad_cm_rej_t*)p_mad_buf ); + CL_ASSERT( status == IB_SUCCESS ); + __reject_mad( p_port_cep, p_cep, p_rej_mad, IB_REJ_TIMEOUT ); + break; + + default: + CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID || + p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID ); + ib_put_mad( p_rej_mad ); + return; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__reject_req( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad, + IN const ib_rej_status_t reason ) +{ + mad_cm_req_t *p_req; + mad_cm_rej_t *p_rej; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_port_cep ); + CL_ASSERT( p_mad ); + CL_ASSERT( reason != 0 ); + + p_req = (mad_cm_req_t*)p_mad->p_mad_buf; + p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf; + + /* + * Format the reject information, overwriting the REQ data and send + * the response. + */ + p_rej->hdr.attr_id = CM_REJ_ATTR_ID; + p_rej->remote_comm_id = p_req->local_comm_id; + p_rej->local_comm_id = 0; + conn_rej_set_msg_rejected( 0, p_rej ); + p_rej->reason = reason; + conn_rej_set_ari( NULL, 0, p_rej ); + conn_rej_set_pdata( NULL, 0, p_rej ); + conn_rej_clr_rsvd_fields( p_rej ); + + p_mad->retry_cnt = 0; + p_mad->send_opt = 0; + p_mad->timeout_ms = 0; + p_mad->resp_expected = FALSE; + + __cep_send_mad( p_port_cep, p_mad ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__format_req_av( + IN kcep_t* const p_cep, + IN const mad_cm_req_t* const p_req, + IN const uint8_t idx ) +{ + cep_agent_t *p_port_cep; + const req_path_info_t *p_path; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_req ); + + cl_memclr( &p_cep->av[idx], sizeof(kcep_av_t) ); + + p_path = &((&p_req->primary_path)[idx]); + + p_port_cep = __find_port_cep( &p_path->remote_gid, + p_path->remote_lid, p_req->pkey, &p_cep->av[idx].pkey_index ); + if( !p_port_cep ) + { + if( !idx ) + p_cep->local_ca_guid = 0; + AL_EXIT( AL_DBG_CM ); + return; + } + + if( !idx ) + p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid; + + /* Check that CA GUIDs match if formatting the alternate path. */ + if( idx && + p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid ) + { + AL_EXIT( AL_DBG_CM ); + return; + } + + /* + * Pkey indeces must match if formating the alternat path - the QP + * modify structure only allows for a single PKEY index to be specified. + */ + if( idx && + p_cep->av[0].pkey_index != p_cep->av[1].pkey_index ) + { + AL_EXIT( AL_DBG_CM ); + return; + } + + p_cep->av[idx].port_guid = p_port_cep->port_guid; + p_cep->av[idx].attr.port_num = p_port_cep->port_num; + + p_cep->av[idx].attr.sl = conn_req_path_get_svc_lvl( p_path ); + p_cep->av[idx].attr.dlid = p_path->local_lid; + + if( !conn_req_path_get_subn_lcl( p_path ) ) + { + p_cep->av[idx].attr.grh_valid = TRUE; + p_cep->av[idx].attr.grh.ver_class_flow = ib_grh_set_ver_class_flow( + 1, p_path->traffic_class, conn_req_path_get_flow_lbl( p_path ) ); + p_cep->av[idx].attr.grh.hop_limit = p_path->hop_limit; + p_cep->av[idx].attr.grh.dest_gid = p_path->local_gid; + p_cep->av[idx].attr.grh.src_gid = p_path->remote_gid; + } + else + { + p_cep->av[idx].attr.grh_valid = FALSE; + } + p_cep->av[idx].attr.static_rate = conn_req_path_get_pkt_rate( p_path ); + p_cep->av[idx].attr.path_bits = + (uint8_t)(p_path->remote_lid - p_port_cep->base_lid); + + /* + * Note that while we never use the connected AV attributes internally, + * we store them so we can pass them back to users. + */ + p_cep->av[idx].attr.conn.path_mtu = conn_req_get_mtu( p_req ); + p_cep->av[idx].attr.conn.local_ack_timeout = + conn_req_path_get_lcl_ack_timeout( p_path ); + p_cep->av[idx].attr.conn.seq_err_retry_cnt = + conn_req_get_retry_cnt( p_req ); + p_cep->av[idx].attr.conn.rnr_retry_cnt = + conn_req_get_rnr_retry_cnt( p_req ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * + Validates the path information provided in the REQ and stores the + * associated CA attributes and port indeces. + * + Transitions a connection object from active to passive in the peer case. + * + Sets the path information in the connection and sets the CA GUID + * in the REQ callback record. + */ +static void +__save_wire_req( + IN OUT kcep_t* const p_cep, + IN OUT mad_cm_req_t* const p_req ) +{ + AL_ENTER( AL_DBG_CM ); + + p_cep->state = CEP_STATE_REQ_RCVD; + p_cep->was_active = FALSE; + + p_cep->sid = p_req->sid; + + /* Store pertinent information in the connection. */ + p_cep->remote_comm_id = p_req->local_comm_id; + p_cep->remote_ca_guid = p_req->local_ca_guid; + + p_cep->remote_qpn = conn_req_get_lcl_qpn( p_req ); + p_cep->local_qpn = 0; + + p_cep->retry_timeout = + __calc_mad_timeout( conn_req_get_lcl_resp_timeout( p_req ) ); + + /* Store the retry count. */ + p_cep->max_cm_retries = conn_req_get_max_cm_retries( p_req ); + + /* + * Copy the paths from the req_rec into the connection for + * future use. Note that if the primary path is invalid, + * the REP will fail. + */ + __format_req_av( p_cep, p_req, 0 ); + + if( p_req->alternate_path.local_lid ) + __format_req_av( p_cep, p_req, 1 ); + else + cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) ); + + p_cep->idx_primary = 0; + + /* Store the maximum packet lifetime, used to calculate timewait. */ + p_cep->max_2pkt_life = conn_req_path_get_lcl_ack_timeout( &p_req->primary_path ); + p_cep->max_2pkt_life = max( p_cep->max_2pkt_life, + conn_req_path_get_lcl_ack_timeout( &p_req->alternate_path ) ); + + /* + * Make sure the target ack delay is cleared - the above + * "packet life" includes it. + */ + p_cep->target_ack_delay = 0; + + /* Store the requested initiator depth. */ + p_cep->req_init_depth = conn_req_get_init_depth( p_req ); + + /* + * Store the provided responder resources. These turn into the local + * QP's initiator depth. + */ + p_cep->init_depth = conn_req_get_resp_res( p_req ); + + p_cep->sq_psn = conn_req_get_starting_psn( p_req ); + + p_cep->tid = p_req->hdr.trans_id; + /* copy mad info for cm handoff */ + /* TODO: Do need to support CM handoff? */ + //p_cep->mads.req = *p_req; + + AL_EXIT( AL_DBG_CM ); +} + + +/* Must be called with the CEP lock held. */ +static void +__repeat_mad( + IN cep_agent_t* const p_port_cep, + IN kcep_t* const p_cep, + IN ib_mad_element_t* const p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_port_cep ); + CL_ASSERT( p_cep ); + CL_ASSERT( p_mad ); + + /* Repeat the last mad sent for the connection. */ + switch( p_cep->state ) + { + case CEP_STATE_REQ_MRA_SENT: /* resend MRA(REQ) */ + case CEP_STATE_REP_MRA_SENT: /* resend MRA(REP) */ + case CEP_STATE_LAP_MRA_SENT: /* resend MRA(LAP) */ + case CEP_STATE_ESTABLISHED: /* resend RTU */ + case CEP_STATE_TIMEWAIT: /* resend the DREP */ + cl_memcpy( p_mad->p_mad_buf, &p_cep->mads, MAD_BLOCK_SIZE ); + p_mad->send_context1 = NULL; + p_mad->send_context2 = NULL; + __cep_send_mad( p_port_cep, p_mad ); + break; + + default: + /* Return the MAD to the mad pool */ + ib_put_mad( p_mad ); + break; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static ib_api_status_t +__process_rej( + IN kcep_t* const p_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_rej_t *p_rej; + + AL_ENTER( AL_DBG_CM ); + + ASSERT( p_cep ); + ASSERT( p_mad ); + ASSERT( p_mad->p_mad_buf ); + + p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf; + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("Request rejected reason - %d.\n", p_rej->reason) ); + + switch( p_cep->state ) + { + case CEP_STATE_REQ_SENT: + /* + * Ignore rejects with the status set to IB_REJ_INVALID_SID. We will + * continue to retry (up to max_cm_retries) to connect to the remote + * side. This is required to support peer-to-peer connections. + */ + if( p_cep->p2p && p_rej->reason == IB_REJ_INVALID_SID ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("Request rejected (invalid SID) - retrying.\n") ); + goto err1; + } + + /* Fall through */ + case CEP_STATE_REP_SENT: + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REP_MRA_RCVD: + /* Cancel any outstanding MAD. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + /* Fall through */ + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REP_RCVD: + case CEP_STATE_REQ_MRA_SENT: + case CEP_STATE_REP_MRA_SENT: + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + if( p_cep->state & CEP_STATE_PREP ) + { + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + } + /* Abort connection establishment. No transition to timewait. */ + __remove_cep( p_cep ); + p_cep->state = CEP_STATE_IDLE; + break; + + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_PRE_APR: + case CEP_STATE_PRE_APR_MRA_SENT: + if( p_cep->state & CEP_STATE_PREP ) + { + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + } + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + break; + + default: + /* Ignore the REJ. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("REJ received in invalid state.\n") ); +err1: + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return IB_NO_MATCH; + } + + status = __cep_queue_mad( p_cep, p_mad ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__process_stale( + IN kcep_t* const p_cep ) +{ + ib_api_status_t status; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + mad_cm_rej_t *p_rej; + + status = __cep_get_mad( p_cep, CM_REJ_ATTR_ID, &p_port_cep, &p_mad ); + if( status != IB_SUCCESS ) + return status; + + p_rej = ib_get_mad_buf( p_mad ); + + conn_rej_set_ari( NULL, 0, p_rej ); + conn_rej_set_pdata( NULL, 0, p_rej ); + + p_rej->local_comm_id = p_cep->remote_comm_id; + p_rej->remote_comm_id = p_cep->local_comm_id; + p_rej->reason = IB_REJ_STALE_CONN; + + switch( p_cep->state ) + { + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REQ_MRA_SENT: + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + conn_rej_set_msg_rejected( 0, p_rej ); + break; + + case CEP_STATE_REQ_SENT: + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + conn_rej_set_msg_rejected( 1, p_rej ); + break; + + default: + conn_rej_set_msg_rejected( 2, p_rej ); + break; + } + conn_rej_clr_rsvd_fields( p_rej ); + + return __process_rej( p_cep, p_mad ); +} + + +static void +__req_handler( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status = IB_SUCCESS; + mad_cm_req_t *p_req; + kcep_t *p_cep, *p_new_cep, *p_stale_cep = NULL; + KLOCK_QUEUE_HANDLE hdl; + ib_rej_status_t reason; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_req = (mad_cm_req_t*)p_mad->p_mad_buf; + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("REQ: comm_id (x%x) qpn (x%x) received\n", + p_req->local_comm_id, conn_req_get_lcl_qpn( p_req )) ); + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + + if( conn_req_get_qp_type( p_req ) > IB_QPT_UNRELIABLE_CONN ) + { + /* Reserved value. Reject. */ + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid transport type received.\n") ); + reason = IB_REJ_INVALID_XPORT; + goto reject; + } + + /* Match against pending connections using remote comm ID and CA GUID. */ + p_cep = __lookup_by_id( p_req->local_comm_id, p_req->local_ca_guid ); + if( p_cep ) + { + /* Already received the REQ. */ + switch( p_cep->state ) + { + case CEP_STATE_REQ_MRA_SENT: + __repeat_mad( p_port_cep, p_cep, p_mad ); + break; + + case CEP_STATE_TIMEWAIT: + case CEP_STATE_DESTROY: + /* Send a reject. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("REQ received for connection in TIME_WAIT state.\n") ); + __reject_req( p_port_cep, p_mad, IB_REJ_STALE_CONN ); + break; + + default: + /* + * Let regular retries repeat the MAD. If our last message was + * dropped, resending only adds to the congestion. If it wasn't + * dropped, then the remote CM will eventually process it, and + * we'd just be adding traffic. + */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("Duplicate REQ received.\n") ); + ib_put_mad( p_mad ); + } + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + AL_EXIT( AL_DBG_CM ); + return; + } + + /* + * Allocate a new CEP for the new request. This will + * prevent multiple identical REQs from queueing up for processing. + */ + p_new_cep = __create_cep(); + if( !p_new_cep ) + { + /* Reject the request for insufficient resources. */ + reason = IB_REJ_INSUF_RESOURCES; + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_create_cep failed\nREJ sent for insufficient resources.\n") ); + goto reject; + } + + __save_wire_req( p_new_cep, p_req ); + + /* + * Match against listens using SID and compare data, also provide the receiving + * MAD service's port GUID so we can properly filter. + */ + p_cep = __lookup_listen( p_req->sid, p_port_cep->port_guid, p_req->pdata ); + if( p_cep ) + { + __bind_cep( p_new_cep, p_cep->p_cid->h_al, p_cep->pfn_cb, NULL ); + + /* Add the new CEP to the map so that repeated REQs match up. */ + p_stale_cep = __insert_cep( p_new_cep ); + if( p_stale_cep != p_new_cep ) + { + /* Duplicate - must be a stale connection. */ + reason = IB_REJ_STALE_CONN; + /* Fail the local stale CEP. */ + status = __process_stale( p_stale_cep ); + goto unbind; + } + + /* + * Queue the mad - the return value indicates whether we should + * invoke the callback. + */ + status = __cep_queue_mad( p_cep, p_mad ); + switch( status ) + { + case IB_SUCCESS: + case IB_PENDING: + p_mad->send_context1 = p_new_cep; + break; + + default: + reason = IB_REJ_INSUF_RESOURCES; + goto unbind; + } + } + else + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("No listens active!\n") ); + + /* Match against peer-to-peer requests using SID and compare data. */ + //p_cep = __lookup_peer(); + //if( p_cep ) + //{ + // p_mad->send_context2 = NULL; + // p_list_item = cl_qlist_find_from_head( &gp_cep_mgr->pending_list, + // __match_peer, p_req ); + // if( p_list_item != cl_qlist_end( &gp_cep_mgr->pending_list ) ) + // { + // KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + // p_conn = PARENT_STRUCT( p_list_item, kcep_t, map_item ); + // __peer_req( p_port_cep, p_conn, p_async_mad->p_mad ); + // cl_free( p_async_mad ); + // AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + // ("REQ matched a peer-to-peer request.\n") ); + // return; + // } + // reason = IB_REJ_INVALID_SID; + // goto free; + //} + //else + { + /* No match found. Reject. */ + reason = IB_REJ_INVALID_SID; + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("REQ received but no match found.\n") ); + goto cleanup; + } + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + /* Process any queued MADs for the CEP. */ + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + +unbind: + __unbind_cep( p_new_cep ); + +cleanup: + /* + * Move the CEP in the idle state so that we don't send a reject + * for it when cleaning up. Also clear the RQPN and RCID so that + * we don't try to remove it from our maps (since it isn't inserted). + */ + p_new_cep->state = CEP_STATE_IDLE; + p_new_cep->remote_comm_id = 0; + p_new_cep->remote_qpn = 0; + __cleanup_cep( p_new_cep ); + +reject: + __reject_req( p_port_cep, p_mad, reason ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( reason == IB_REJ_STALE_CONN && status == IB_SUCCESS ) + __process_cep( p_stale_cep ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__save_wire_rep( + IN OUT kcep_t* const p_cep, + IN const mad_cm_rep_t* const p_rep ) +{ + AL_ENTER( AL_DBG_CM ); + + /* The send should have been cancelled during MRA processing. */ + p_cep->state = CEP_STATE_REP_RCVD; + + /* Store pertinent information in the connection. */ + p_cep->remote_comm_id = p_rep->local_comm_id; + p_cep->remote_ca_guid = p_rep->local_ca_guid; + + p_cep->remote_qpn = conn_rep_get_lcl_qpn( p_rep ); + + /* Store the remote endpoint's target ACK delay. */ + p_cep->target_ack_delay = conn_rep_get_target_ack_delay( p_rep ); + + /* Update the local ACK delay stored in the AV's. */ + p_cep->av[0].attr.conn.local_ack_timeout = calc_lcl_ack_timeout( + p_cep->av[0].attr.conn.local_ack_timeout, p_cep->target_ack_delay ); + p_cep->av[0].attr.conn.rnr_retry_cnt = conn_rep_get_rnr_retry_cnt( p_rep ); + + if( p_cep->av[1].port_guid ) + { + p_cep->av[1].attr.conn.local_ack_timeout = calc_lcl_ack_timeout( + p_cep->av[1].attr.conn.local_ack_timeout, + p_cep->target_ack_delay ); + p_cep->av[1].attr.conn.rnr_retry_cnt = + p_cep->av[0].attr.conn.rnr_retry_cnt; + } + + p_cep->init_depth = p_rep->resp_resources; + p_cep->resp_res = p_rep->initiator_depth; + + p_cep->sq_psn = conn_rep_get_starting_psn( p_rep ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__mra_handler( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_mra_t *p_mra; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf; + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_mra->remote_comm_id ); + if( !p_cep ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("MRA received that could not be matched.\n") ); + goto err; + } + + if( p_cep->remote_comm_id ) + { + if( p_cep->remote_comm_id != p_mra->local_comm_id ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("MRA received that could not be matched.\n") ); + goto err; + } + } + + /* + * Note that we don't update the CEP's remote comm ID - it messes up REP + * processing since a non-zero RCID implies the connection is in the RCID + * map. Adding it here requires checking there and conditionally adding + * it. Ignoring it is a valid thing to do. + */ + if( !(p_cep->state & CEP_STATE_SENT) || + (1 << conn_mra_get_msg_mraed( p_mra ) != + (p_cep->state & CEP_MSG_MASK)) ) + { + /* Invalid state. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("MRA received in invalid state.\n") ); + goto err; + } + + /* Delay the current send. */ + CL_ASSERT( p_cep->p_send_mad ); + ib_delay_mad( p_cep->h_mad_svc, p_cep->p_send_mad, + __calc_mad_timeout( conn_mra_get_svc_timeout( p_mra ) ) + + __calc_mad_timeout( p_cep->max_2pkt_life - 1 ) ); + + /* We only invoke a single callback for MRA. */ + if( p_cep->state & CEP_STATE_MRA ) + { + /* Invalid state. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("Already received MRA.\n") ); + goto err; + } + + p_cep->state |= CEP_STATE_MRA; + + status = __cep_queue_mad( p_cep, p_mad ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + +err: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__rej_handler( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_rej_t *p_rej; + kcep_t *p_cep = NULL; + KLOCK_QUEUE_HANDLE hdl; + net64_t ca_guid; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_rej = (mad_cm_rej_t*)p_mad->p_mad_buf; + + /* Either one of the communication IDs must be set. */ + if( !p_rej->remote_comm_id && !p_rej->local_comm_id ) + goto err1; + + /* Check the pending list by the remote CA GUID and connection ID. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + if( p_rej->remote_comm_id ) + { + p_cep = __lookup_cep( NULL, p_rej->remote_comm_id ); + } + else if( p_rej->reason == IB_REJ_TIMEOUT && + conn_rej_get_ari_len( p_rej ) == sizeof(net64_t) ) + { + cl_memcpy( &ca_guid, p_rej->ari, sizeof(net64_t) ); + p_cep = __lookup_by_id( p_rej->local_comm_id, ca_guid ); + } + + if( !p_cep ) + { + goto err2; + } + + if( p_cep->remote_comm_id && + p_cep->remote_comm_id != p_rej->local_comm_id ) + { + err2: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + err1: + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return; + } + + status = __process_rej( p_cep, p_mad ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__rep_handler( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_rep_t *p_rep; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_state_t old_state; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_rep = (mad_cm_rep_t*)p_mad->p_mad_buf; + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("REP: comm_id (x%x) received\n", p_rep->local_comm_id ) ); + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_rep->remote_comm_id ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("REP received that could not be matched.\n") ); + return; + } + + switch( p_cep->state ) + { + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REQ_SENT: + old_state = p_cep->state; + /* Save pertinent information and change state. */ + __save_wire_rep( p_cep, p_rep ); + + if( __insert_cep( p_cep ) != p_cep ) + { + /* Roll back the state change. */ + __reject_mad( p_port_cep, p_cep, p_mad, IB_REJ_STALE_CONN ); + p_cep->state = old_state; + status = __process_stale( p_cep ); + } + else + { + /* + * Cancel any outstanding send. Note that we do this only after + * inserting the CEP - if we failed, then the send will timeout + * and we'll finish our way through the state machine. + */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + status = __cep_queue_mad( p_cep, p_mad ); + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_REP_MRA_SENT: + /* Repeate the MRA or RTU. */ + __repeat_mad( p_port_cep, p_cep, p_mad ); + break; + + default: + ib_put_mad( p_mad ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("REP received in invalid state.\n") ); + break; + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__rtu_handler( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_rtu_t *p_rtu; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf; + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("RTU: comm_id (x%x) received\n", p_rtu->local_comm_id) ); + + /* Find the connection by local connection ID. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_rtu->remote_comm_id ); + if( !p_cep || p_cep->remote_comm_id != p_rtu->local_comm_id ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("RTU received that could not be matched.\n") ); + goto done; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + /* Cancel any outstanding send. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + p_cep->state = CEP_STATE_ESTABLISHED; + + status = __cep_queue_mad( p_cep, p_mad ); + + /* Update timewait time. */ + __calc_timewait( p_cep ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + + default: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("RTU received in invalid state.\n") ); + break; + } + +done: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__dreq_handler( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_dreq_t *p_dreq; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf; + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("DREQ: comm_id (x%x) qpn (x%x) received\n", + p_dreq->local_comm_id, conn_dreq_get_remote_qpn( p_dreq )) ); + + /* Find the connection by connection IDs. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_dreq->remote_comm_id ); + if( !p_cep || + p_cep->remote_comm_id != p_dreq->local_comm_id || + p_cep->local_qpn != conn_dreq_get_remote_qpn( p_dreq ) ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("DREQ received that could not be matched.\n") ); + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + case CEP_STATE_DREQ_SENT: + /* Cancel the outstanding MAD. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + /* Fall through and process as DREQ received case. */ + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + case CEP_STATE_LAP_MRA_SENT: + p_cep->state = CEP_STATE_DREQ_RCVD; + + status = __cep_queue_mad( p_cep, p_mad ); + + /* Store the TID for use in the reply DREP. */ + p_cep->tid = p_dreq->hdr.trans_id; + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + AL_EXIT( AL_DBG_CM ); + return; + + case CEP_STATE_TIMEWAIT: + case CEP_STATE_DESTROY: + /* Repeat the DREP. */ + __repeat_mad( p_port_cep, p_cep, p_mad ); + break; + + default: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("DREQ received in invalid state.\n") ); + case CEP_STATE_DREQ_RCVD: + ib_put_mad( p_mad ); + break; + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__drep_handler( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_drep_t *p_drep; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_drep = (mad_cm_drep_t*)p_mad->p_mad_buf; + + /* Find the connection by local connection ID. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_drep->remote_comm_id ); + if( !p_cep || p_cep->remote_comm_id != p_drep->local_comm_id ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("DREP received that could not be matched.\n") ); + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return; + } + + if( p_cep->state != CEP_STATE_DREQ_SENT && + p_cep->state != CEP_STATE_DREQ_DESTROY ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("DREP received in invalid state.\n") ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return; + } + + /* Cancel the DREQ. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + if( p_cep->state == CEP_STATE_DREQ_SENT ) + { + p_cep->state = CEP_STATE_TIMEWAIT; + + status = __cep_queue_mad( p_cep, p_mad ); + } + else + { + /* State is DREQ_DESTROY - move to DESTROY to allow cleanup. */ + CL_ASSERT( p_cep->state == CEP_STATE_DREQ_DESTROY ); + p_cep->state = CEP_STATE_DESTROY; + + ib_put_mad( p_mad ); + status = IB_INVALID_STATE; + } + + __insert_timewait( p_cep ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); +} + + +static boolean_t +__format_lap_av( + IN kcep_t* const p_cep, + IN const lap_path_info_t* const p_path ) +{ + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_path ); + + cl_memclr( &p_cep->alt_av, sizeof(kcep_av_t) ); + + p_port_cep = __find_port_cep( &p_path->remote_gid, p_path->remote_lid, + p_cep->pkey, &p_cep->alt_av.pkey_index ); + if( !p_port_cep ) + { + AL_EXIT( AL_DBG_CM ); + return FALSE; + } + + if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid ) + { + AL_EXIT( AL_DBG_CM ); + return FALSE; + } + + p_cep->alt_av.port_guid = p_port_cep->port_guid; + p_cep->alt_av.attr.port_num = p_port_cep->port_num; + + p_cep->alt_av.attr.sl = conn_lap_path_get_svc_lvl( p_path ); + p_cep->alt_av.attr.dlid = p_path->local_lid; + + if( !conn_lap_path_get_subn_lcl( p_path ) ) + { + p_cep->alt_av.attr.grh_valid = TRUE; + p_cep->alt_av.attr.grh.ver_class_flow = ib_grh_set_ver_class_flow( + 1, conn_lap_path_get_tclass( p_path ), + conn_lap_path_get_flow_lbl( p_path ) ); + p_cep->alt_av.attr.grh.hop_limit = p_path->hop_limit; + p_cep->alt_av.attr.grh.dest_gid = p_path->local_gid; + p_cep->alt_av.attr.grh.src_gid = p_path->remote_gid; + } + else + { + p_cep->alt_av.attr.grh_valid = FALSE; + } + p_cep->alt_av.attr.static_rate = conn_lap_path_get_pkt_rate( p_path ); + p_cep->alt_av.attr.path_bits = + (uint8_t)(p_path->remote_lid - p_port_cep->base_lid); + + /* + * Note that while we never use the connected AV attributes internally, + * we store them so we can pass them back to users. For the LAP, we + * first copy the settings from the current primary - MTU and retry + * counts are only specified in the REQ. + */ + p_cep->alt_av.attr.conn = p_cep->av[p_cep->idx_primary].attr.conn; + p_cep->alt_av.attr.conn.local_ack_timeout = + conn_lap_path_get_lcl_ack_timeout( p_path ); + + AL_EXIT( AL_DBG_CM ); + return TRUE; +} + + +static void +__lap_handler( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_lap_t *p_lap; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf; + + /* Find the connection by local connection ID. */ + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_lap->remote_comm_id ); + if( !p_cep || p_cep->remote_comm_id != p_lap->local_comm_id ) + { + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("LAP received that could not be matched.\n") ); + return; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + /* + * These two cases handle the RTU being dropped. Receipt of + * a LAP indicates that the connection is established. + */ + case CEP_STATE_ESTABLISHED: + /* + * We don't check for other "established" states related to + * alternate path management (CEP_STATE_LAP_RCVD, etc) + */ + + /* We only support receiving LAP if we took the passive role. */ + if( p_cep->was_active ) + { + ib_put_mad( p_mad ); + break; + } + + /* Store the transaction ID for use during the LAP exchange. */ + p_cep->tid = p_lap->hdr.trans_id; + + /* + * Copy the path record into the connection for use when + * sending the APR and loading the path. + */ + if( !__format_lap_av( p_cep, &p_lap->alternate_path ) ) + { + /* Trap an invalid path. */ + ib_put_mad( p_mad ); + break; + } + + p_cep->state = CEP_STATE_LAP_RCVD; + + status = __cep_queue_mad( p_cep, p_mad ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + + case CEP_STATE_LAP_MRA_SENT: + __repeat_mad( p_port_cep, p_cep, p_mad ); + break; + + default: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("LAP received in invalid state.\n") ); + ib_put_mad( p_mad ); + break; + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__apr_handler( + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_apr_t *p_apr; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf; + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( NULL, p_apr->remote_comm_id ); + if( !p_cep || p_cep->remote_comm_id != p_apr->local_comm_id ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("APR received that could not be matched.\n") ); + goto done; + } + + switch( p_cep->state ) + { + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + /* Cancel sending the LAP. */ + if( p_cep->p_send_mad ) + { + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + } + + /* Copy the temporary alternate AV. */ + p_cep->av[(p_cep->idx_primary + 1) & 0x1] = p_cep->alt_av; + + /* Update the maximum packet lifetime. */ + p_cep->max_2pkt_life = max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life ); + + /* Update the timewait time. */ + __calc_timewait( p_cep ); + + p_cep->state = CEP_STATE_ESTABLISHED; + + status = __cep_queue_mad( p_cep, p_mad ); + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return; + + default: + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, ("APR received in invalid state.\n") ); + break; + } + +done: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__cep_mad_recv_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void *context, + IN ib_mad_element_t *p_mad ) +{ + cep_agent_t *p_port_cep; + ib_mad_t *p_hdr; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + UNUSED_PARAM( h_mad_svc ); + p_port_cep = (cep_agent_t*)context; + + CL_ASSERT( p_mad->p_next == NULL ); + + p_hdr = (ib_mad_t*)p_mad->p_mad_buf; + + /* + * TODO: Add filtering in all the handlers for unsupported class version. + * See 12.6.7.2 Rejection Reason, code 31. + */ + + switch( p_hdr->attr_id ) + { + case CM_REQ_ATTR_ID: + __req_handler( p_port_cep, p_mad ); + break; + + case CM_MRA_ATTR_ID: + __mra_handler( p_mad ); + break; + + case CM_REJ_ATTR_ID: + __rej_handler( p_mad ); + break; + + case CM_REP_ATTR_ID: + __rep_handler( p_port_cep, p_mad ); + break; + + case CM_RTU_ATTR_ID: + __rtu_handler( p_mad ); + break; + + case CM_DREQ_ATTR_ID: + __dreq_handler( p_port_cep, p_mad ); + break; + + case CM_DREP_ATTR_ID: + __drep_handler( p_mad ); + break; + + case CM_LAP_ATTR_ID: + __lap_handler( p_port_cep, p_mad ); + break; + + case CM_APR_ATTR_ID: + __apr_handler( p_mad ); + break; + + case CM_SIDR_REQ_ATTR_ID: +// p_async_mad->item.pfn_callback = __process_cm_sidr_req; +// break; +// + case CM_SIDR_REP_ATTR_ID: +// p_async_mad->item.pfn_callback = __process_cm_sidr_rep; +// break; +// + default: + ib_put_mad( p_mad ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid CM MAD attribute ID.\n") ); + return; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static inline cep_agent_t* +__get_cep_agent( + IN kcep_t* const p_cep ) +{ + cl_map_item_t *p_item; + + CL_ASSERT( p_cep ); + + /* Look up the primary CEP port agent */ + p_item = cl_qmap_get( &gp_cep_mgr->port_map, + p_cep->av[p_cep->idx_primary].port_guid ); + if( p_item == cl_qmap_end( &gp_cep_mgr->port_map ) ) + return NULL; + + return PARENT_STRUCT( p_item, cep_agent_t, item ); +} + + +static inline void +__format_mad_av( + OUT ib_mad_element_t* const p_mad, + IN kcep_av_t* const p_av ) +{ + /* Set the addressing information in the MAD. */ + p_mad->grh_valid = p_av->attr.grh_valid; + if( p_av->attr.grh_valid ) + cl_memcpy( p_mad->p_grh, &p_av->attr.grh, sizeof(ib_grh_t) ); + + p_mad->remote_sl = p_av->attr.sl; + p_mad->remote_lid = p_av->attr.dlid; + p_mad->path_bits = p_av->attr.path_bits; + p_mad->pkey_index = p_av->pkey_index; + p_mad->remote_qp = IB_QP1; + p_mad->send_opt = IB_SEND_OPT_SIGNALED; + p_mad->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY; + /* Let the MAD service manage the AV for us. */ + p_mad->h_av = NULL; +} + + +static ib_api_status_t +__cep_send_mad( + IN cep_agent_t* const p_port_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_port_cep ); + CL_ASSERT( p_mad ); + + /* Use the mad's attributes already present */ + p_mad->resp_expected = FALSE; + p_mad->retry_cnt = 0; + p_mad->timeout_ms = 0; + + /* Clear the contexts since the send isn't associated with a CEP. */ + p_mad->context1 = NULL; + p_mad->context2 = NULL; + + status = ib_send_mad( p_port_cep->h_mad_svc, p_mad, NULL ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_mad ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__cep_send_retry( + IN cep_agent_t* const p_port_cep, + IN kcep_t* const p_cep, + IN ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_mad ); + CL_ASSERT( p_mad->p_mad_buf->attr_id == CM_REQ_ATTR_ID || + p_mad->p_mad_buf->attr_id == CM_REP_ATTR_ID || + p_mad->p_mad_buf->attr_id == CM_LAP_ATTR_ID || + p_mad->p_mad_buf->attr_id == CM_DREQ_ATTR_ID ); + + /* + * REQ, REP, and DREQ are retried until either a response is + * received or the operation times out. + */ + p_mad->resp_expected = TRUE; + p_mad->retry_cnt = p_cep->max_cm_retries; + p_mad->timeout_ms = p_cep->retry_timeout; + + CL_ASSERT( !p_cep->p_send_mad ); + + /* Store the mad & mad service handle in the CEP for cancelling. */ + p_cep->h_mad_svc = p_port_cep->h_mad_svc; + p_cep->p_send_mad = p_mad; + + /* reference the connection for which we are sending the MAD. */ + cl_atomic_inc( &p_cep->ref_cnt ); + + /* Set the context. */ + p_mad->context1 = p_cep; + p_mad->context2 = NULL; + + /* Fire in the hole! */ + status = ib_send_mad( p_cep->h_mad_svc, p_mad, NULL ); + if( status != IB_SUCCESS ) + { + /* + * Note that we don't need to check for destruction here since + * we're holding the global lock. + */ + cl_atomic_dec( &p_cep->ref_cnt ); + p_cep->p_send_mad = NULL; + ib_put_mad( p_mad ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_send_mad failed with status %s.\n", ib_get_err_str(status)) ); + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__cep_mad_send_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void *context, + IN ib_mad_element_t *p_mad ) +{ + ib_api_status_t status; + cep_agent_t *p_port_cep; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + ib_pfn_destroy_cb_t pfn_destroy_cb; + void *cep_context; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( h_mad_svc ); + CL_ASSERT( p_mad->p_next == NULL ); + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + p_port_cep = (cep_agent_t*)context; + + p_cep = (kcep_t* __ptr64)p_mad->context1; + + /* + * The connection context is not set when performing immediate responses, + * such as repeating MADS. + */ + if( !p_cep ) + { + ib_put_mad( p_mad ); + AL_EXIT( AL_DBG_CM ); + return; + } + + p_mad->context1 = NULL; + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + /* Clear the sent MAD pointer so that we don't try cancelling again. */ + if( p_cep->p_send_mad == p_mad ) + p_cep->p_send_mad = NULL; + + switch( p_mad->status ) + { + case IB_WCS_SUCCESS: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + break; + + case IB_WCS_CANCELED: + if( p_cep->state != CEP_STATE_REQ_SENT && + p_cep->state != CEP_STATE_REQ_MRA_RCVD && + p_cep->state != CEP_STATE_REP_SENT && + p_cep->state != CEP_STATE_REP_MRA_RCVD && + p_cep->state != CEP_STATE_LAP_SENT && + p_cep->state != CEP_STATE_LAP_MRA_RCVD && + p_cep->state != CEP_STATE_DREQ_SENT && + p_cep->state != CEP_STATE_SREQ_SENT ) + { + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + break; + } + /* Treat as a timeout so we don't stall the state machine. */ + p_mad->status = IB_WCS_TIMEOUT_RETRY_ERR; + + /* Fall through. */ + case IB_WCS_TIMEOUT_RETRY_ERR: + default: + /* Timeout. Reject the connection. */ + switch( p_cep->state ) + { + case CEP_STATE_REQ_SENT: + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + /* Send the REJ. */ + __reject_timeout( p_port_cep, p_cep, p_mad ); + __remove_cep( p_cep ); + p_cep->state = CEP_STATE_IDLE; + break; + + case CEP_STATE_DREQ_DESTROY: + p_cep->state = CEP_STATE_DESTROY; + __insert_timewait( p_cep ); + /* Fall through. */ + + case CEP_STATE_DESTROY: + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + ib_put_mad( p_mad ); + goto done; + + case CEP_STATE_DREQ_SENT: + /* + * Make up a DREP mad so we can respond if we receive + * a DREQ while in timewait. + */ + __format_mad_hdr( &p_cep->mads.drep.hdr, p_cep, CM_DREP_ATTR_ID ); + __format_drep( p_cep, NULL, 0, &p_cep->mads.drep ); + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + + default: + break; + } + + status = __cep_queue_mad( p_cep, p_mad ); + CL_ASSERT( status != IB_INVALID_STATE ); + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + if( status == IB_SUCCESS ) + __process_cep( p_cep ); + break; + } + +done: + pfn_destroy_cb = p_cep->pfn_destroy_cb; + cep_context = p_cep->context; + + if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb ) + pfn_destroy_cb( cep_context ); + AL_EXIT( AL_DBG_CM ); +} + + +static void +__cep_qp_event_cb( + IN ib_async_event_rec_t *p_event_rec ) +{ + UNUSED_PARAM( p_event_rec ); + + /* + * Most of the QP events are trapped by the real owner of the QP. + * For real events, the CM may not be able to do much anyways! + */ +} + + +static ib_api_status_t +__init_data_svc( + IN cep_agent_t* const p_port_cep, + IN const ib_port_attr_t* const p_port_attr ) +{ + ib_api_status_t status; + ib_qp_create_t qp_create; + ib_mad_svc_t mad_svc; + + AL_ENTER( AL_DBG_CM ); + + /* + * Create the PD alias. We use the port CM's al_obj_t as the context + * to allow using deref_al_obj as the destroy callback. + */ + status = ib_alloc_pd( p_port_cep->h_ca, IB_PDT_ALIAS, &p_port_cep->obj, + &p_port_cep->h_pd ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_alloc_pd failed with status %s\n", ib_get_err_str(status)) ); + return status; + } + /* Reference the port object on behalf of the PD. */ + ref_al_obj( &p_port_cep->obj ); + + /* Create the MAD QP. */ + cl_memclr( &qp_create, sizeof( ib_qp_create_t ) ); + qp_create.qp_type = IB_QPT_QP1_ALIAS; + qp_create.rq_depth = CEP_MAD_RQ_DEPTH; + qp_create.sq_depth = CEP_MAD_SQ_DEPTH; + qp_create.rq_sge = CEP_MAD_RQ_SGE; + qp_create.sq_sge = CEP_MAD_SQ_SGE; + qp_create.sq_signaled = TRUE; + /* + * We use the port CM's al_obj_t as the context to allow using + * deref_al_obj as the destroy callback. + */ + status = ib_get_spl_qp( p_port_cep->h_pd, p_port_attr->port_guid, + &qp_create, &p_port_cep->obj, __cep_qp_event_cb, &p_port_cep->pool_key, + &p_port_cep->h_qp ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_get_spl_qp failed with status %s\n", ib_get_err_str(status)) ); + return status; + } + /* Reference the port object on behalf of the QP. */ + ref_al_obj( &p_port_cep->obj ); + + /* Create the MAD service. */ + cl_memclr( &mad_svc, sizeof(mad_svc) ); + mad_svc.mad_svc_context = p_port_cep; + mad_svc.pfn_mad_recv_cb = __cep_mad_recv_cb; + mad_svc.pfn_mad_send_cb = __cep_mad_send_cb; + mad_svc.support_unsol = TRUE; + mad_svc.mgmt_class = IB_MCLASS_COMM_MGMT; + mad_svc.mgmt_version = IB_MCLASS_CM_VER_2; + mad_svc.method_array[IB_MAD_METHOD_SEND] = TRUE; + status = + ib_reg_mad_svc( p_port_cep->h_qp, &mad_svc, &p_port_cep->h_mad_svc ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_mad_svc failed with status %s\n", ib_get_err_str(status)) ); + return status; + } + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +/* + * Performs immediate cleanup of resources. + */ +static void +__destroying_port_cep( + IN al_obj_t *p_obj ) +{ + cep_agent_t *p_port_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj ); + + if( p_port_cep->port_guid ) + { + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + cl_qmap_remove_item( &gp_cep_mgr->port_map, &p_port_cep->item ); + KeReleaseInStackQueuedSpinLock( &hdl ); + } + + if( p_port_cep->h_qp ) + { + ib_destroy_qp( p_port_cep->h_qp, (ib_pfn_destroy_cb_t)deref_al_obj ); + p_port_cep->h_qp = NULL; + } + + if( p_port_cep->h_pd ) + { + ib_dealloc_pd( p_port_cep->h_pd, (ib_pfn_destroy_cb_t)deref_al_obj ); + p_port_cep->h_pd = NULL; + } + + AL_EXIT( AL_DBG_CM ); +} + + + +/* + * Release all resources allocated by a port CM agent. Finishes any cleanup + * for a port agent. + */ +static void +__free_port_cep( + IN al_obj_t *p_obj ) +{ + cep_agent_t *p_port_cep; + ib_port_attr_mod_t port_attr_mod; + + AL_ENTER( AL_DBG_CM ); + + p_port_cep = PARENT_STRUCT( p_obj, cep_agent_t, obj ); + + if( p_port_cep->h_ca ) + { + /* Update local port attributes */ + port_attr_mod.cap.cm = FALSE; + ib_modify_ca( p_port_cep->h_ca, p_port_cep->port_num, + IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod ); + + deref_al_obj( &p_port_cep->h_ca->obj ); + } + + destroy_al_obj( &p_port_cep->obj ); + cl_free( p_port_cep ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Create a port agent for a given port. + */ +static ib_api_status_t +__create_port_cep( + IN ib_pnp_port_rec_t *p_pnp_rec ) +{ + cep_agent_t *p_port_cep; + ib_api_status_t status; + ib_port_attr_mod_t port_attr_mod; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + /* calculate size of port_cm struct */ + p_port_cep = (cep_agent_t*)cl_zalloc( sizeof(cep_agent_t) ); + if( !p_port_cep ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to cl_zalloc port CM agent.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + construct_al_obj( &p_port_cep->obj, AL_OBJ_TYPE_CM ); + + status = init_al_obj( &p_port_cep->obj, p_port_cep, TRUE, + __destroying_port_cep, NULL, __free_port_cep ); + if( status != IB_SUCCESS ) + { + __free_port_cep( &p_port_cep->obj ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Attach to the global CM object. */ + status = attach_al_obj( &gp_cep_mgr->obj, &p_port_cep->obj ); + if( status != IB_SUCCESS ) + { + p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + p_port_cep->port_guid = p_pnp_rec->p_port_attr->port_guid; + p_port_cep->port_num = p_pnp_rec->p_port_attr->port_num; + p_port_cep->base_lid = p_pnp_rec->p_port_attr->lid; + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + cl_qmap_insert( + &gp_cep_mgr->port_map, p_port_cep->port_guid, &p_port_cep->item ); + KeReleaseInStackQueuedSpinLock( &hdl ); + + /* Get a reference to the CA on which we are loading. */ + p_port_cep->h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid ); + if( !p_port_cep->h_ca ) + { + p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") ); + return IB_INVALID_GUID; } + + status = __init_data_svc( p_port_cep, p_pnp_rec->p_port_attr ); + if( status != IB_SUCCESS ) + { + p_port_cep->obj.pfn_destroy( &p_port_cep->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("__init_data_svc failed with status %s.\n", + ib_get_err_str(status)) ); + return status; + } + + /* Update local port attributes */ + cl_memclr( &port_attr_mod, sizeof(ib_port_attr_mod_t) ); + port_attr_mod.cap.cm = TRUE; + status = ib_modify_ca( p_port_cep->h_ca, p_pnp_rec->p_port_attr->port_num, + IB_CA_MOD_IS_CM_SUPPORTED, &port_attr_mod ); + + /* Update the PNP context to reference this port. */ + p_pnp_rec->pnp_rec.context = p_port_cep; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_port_cep->obj ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +/****************************************************************************** +* Global CEP manager +******************************************************************************/ + +static cep_cid_t* +__get_lcid( + OUT net32_t* const p_cid ) +{ + cl_status_t status; + uint32_t size, cid; + cep_cid_t *p_cep_cid; + + AL_ENTER( AL_DBG_CM ); + + size = (uint32_t)cl_vector_get_size( &gp_cep_mgr->cid_vector ); + cid = gp_cep_mgr->free_cid; + if( gp_cep_mgr->free_cid == size ) + { + /* Grow the vector pool. */ + status = + cl_vector_set_size( &gp_cep_mgr->cid_vector, size + CEP_CID_GROW ); + if( status != CL_SUCCESS ) + { + AL_EXIT( AL_DBG_CM ); + return NULL; + } + /* + * Return the the start of the free list since the + * entry initializer incremented it. + */ + gp_cep_mgr->free_cid = size; + } + + /* Get the next free entry. */ + p_cep_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, cid ); + + /* Update the next entry index. */ + gp_cep_mgr->free_cid = (uint32_t)(uintn_t)p_cep_cid->p_cep; + + *p_cid = cid; + + AL_EXIT( AL_DBG_CM ); + return p_cep_cid; +} + + +static inline kcep_t* +__lookup_cep( + IN ib_al_handle_t h_al OPTIONAL, + IN net32_t cid ) +{ + size_t idx; + cep_cid_t *p_cid; + + /* Mask off the counter bits so we get the index in our vector. */ + idx = cid & CEP_MAX_CID_MASK; + + if( idx >= cl_vector_get_size( &gp_cep_mgr->cid_vector ) ) + return NULL; + + p_cid = (cep_cid_t*)cl_vector_get_ptr( &gp_cep_mgr->cid_vector, idx ); + if( !p_cid->h_al ) + return NULL; + + /* + * h_al is NULL when processing MADs, so we need to match on + * the actual local communication ID. If h_al is non-NULL, we + * are doing a lookup from a call to our API, and only need to match + * on the index in the vector (without the modifier). + */ + if( h_al ) + { + if( p_cid->h_al != h_al ) + return NULL; + } + else if( p_cid->p_cep->local_comm_id != cid ) + { + return NULL; + } + + return p_cid->p_cep; +} + + +/* + * Lookup a CEP by remote comm ID and CA GUID. + */ +static kcep_t* +__lookup_by_id( + IN net32_t remote_comm_id, + IN net64_t remote_ca_guid ) +{ + cl_rbmap_item_t *p_item; + kcep_t *p_cep; + + AL_ENTER( AL_DBG_CM ); + + /* Match against pending connections using remote comm ID and CA GUID. */ + p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map ); + while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) ) + { + p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item ); + + if( remote_comm_id < p_cep->remote_comm_id ) + p_item = cl_rbmap_left( p_item ); + else if( remote_comm_id > p_cep->remote_comm_id ) + p_item = cl_rbmap_right( p_item ); + else if( remote_ca_guid < p_cep->remote_ca_guid ) + p_item = cl_rbmap_left( p_item ); + else if( remote_ca_guid > p_cep->remote_ca_guid ) + p_item = cl_rbmap_right( p_item ); + else + return p_cep; + } + + AL_EXIT( AL_DBG_CM ); + return NULL; +} + + +/* + * Lookup a CEP by Service ID and private data. + */ +static kcep_t* +__lookup_listen( + IN net64_t sid, + IN net64_t port_guid, + IN uint8_t *p_pdata ) +{ + cl_rbmap_item_t *p_item; + kcep_t *p_cep; + intn_t cmp; + + AL_ENTER( AL_DBG_CM ); + + /* Match against pending connections using remote comm ID and CA GUID. */ + p_item = cl_rbmap_root( &gp_cep_mgr->listen_map ); + while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) ) + { + p_cep = PARENT_STRUCT( p_item, kcep_t, listen_item ); + + if( sid == p_cep->sid ) + goto port_cmp; + else if( sid < p_cep->sid ) + p_item = cl_rbmap_left( p_item ); + else + p_item = cl_rbmap_right( p_item ); + + continue; + +port_cmp: + if( p_cep->port_guid != IB_ALL_PORTS ) + { + if( port_guid == p_cep->port_guid ) + goto pdata_cmp; + else if( port_guid < p_cep->port_guid ) + p_item = cl_rbmap_left( p_item ); + else + p_item = cl_rbmap_right( p_item ); + + continue; + } + +pdata_cmp: + if( p_cep->p_cmp_buf && p_pdata ) + { + cmp = cl_memcmp( &p_pdata[p_cep->cmp_offset], + p_cep->p_cmp_buf, p_cep->cmp_len ); + + if( !cmp ) + goto match; + else if( cmp < 0 ) + p_item = cl_rbmap_left( p_item ); + else + p_item = cl_rbmap_right( p_item ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("Svc ID match but compare buffer mismatch.\n") ); + continue; + } + +match: + /* Everything matched. */ + AL_EXIT( AL_DBG_CM ); + return p_cep; + } + + AL_EXIT( AL_DBG_CM ); + return NULL; +} + + +static kcep_t* +__insert_by_id( + IN kcep_t* const p_new_cep ) +{ + kcep_t *p_cep; + cl_rbmap_item_t *p_item, *p_insert_at; + boolean_t left = TRUE; + + AL_ENTER( AL_DBG_CM ); + + p_item = cl_rbmap_root( &gp_cep_mgr->conn_id_map ); + p_insert_at = p_item; + while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_id_map ) ) + { + p_insert_at = p_item; + p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item ); + + if( p_new_cep->remote_comm_id < p_cep->remote_comm_id ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_new_cep->remote_comm_id > p_cep->remote_comm_id ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else + { + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("WARNING: Duplicate remote CID and CA GUID.\n") ); + goto done; + } + } + + cl_rbmap_insert( + &gp_cep_mgr->conn_id_map, p_insert_at, &p_new_cep->rem_id_item, left ); + p_cep = p_new_cep; + +done: + AL_EXIT( AL_DBG_CM ); + return p_cep; +} + + +static kcep_t* +__insert_by_qpn( + IN kcep_t* const p_new_cep ) +{ + kcep_t *p_cep; + cl_rbmap_item_t *p_item, *p_insert_at; + boolean_t left = TRUE; + + AL_ENTER( AL_DBG_CM ); + + p_item = cl_rbmap_root( &gp_cep_mgr->conn_qp_map ); + p_insert_at = p_item; + while( p_item != cl_rbmap_end( &gp_cep_mgr->conn_qp_map ) ) + { + p_insert_at = p_item; + p_cep = PARENT_STRUCT( p_item, kcep_t, rem_id_item ); + + if( p_new_cep->remote_qpn < p_cep->remote_qpn ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_new_cep->remote_qpn > p_cep->remote_qpn ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else if( p_new_cep->remote_ca_guid < p_cep->remote_ca_guid ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_new_cep->remote_ca_guid > p_cep->remote_ca_guid ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else + { + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("WARNING: Duplicate remote QPN and CA GUID.\n") ); + goto done; + } + } + + cl_rbmap_insert( + &gp_cep_mgr->conn_qp_map, p_insert_at, &p_new_cep->rem_qp_item, left ); + p_cep = p_new_cep; + +done: + AL_EXIT( AL_DBG_CM ); + return p_cep; +} + + +static inline kcep_t* +__insert_cep( + IN kcep_t* const p_new_cep ) +{ + kcep_t *p_cep; + + AL_ENTER( AL_DBG_CM ); + + p_cep = __insert_by_qpn( p_new_cep ); + if( p_cep != p_new_cep ) + goto err; + + p_cep = __insert_by_id( p_new_cep ); + if( p_cep != p_new_cep ) + { + cl_rbmap_remove_item( + &gp_cep_mgr->conn_qp_map, &p_new_cep->rem_qp_item ); +err: + /* + * Clear the remote QPN and comm ID so that we don't try + * to remove the CEP from those maps. + */ + p_new_cep->remote_qpn = 0; + p_new_cep->remote_comm_id = 0; + } + + AL_EXIT( AL_DBG_CM ); + return p_cep; +} + + +static inline void +__remove_cep( + IN kcep_t* const p_cep ) +{ + AL_ENTER( AL_DBG_CM ); + + if( p_cep->remote_comm_id ) + { + cl_rbmap_remove_item( + &gp_cep_mgr->conn_id_map, &p_cep->rem_id_item ); + p_cep->remote_comm_id = 0; + } + if( p_cep->remote_qpn ) + { + cl_rbmap_remove_item( + &gp_cep_mgr->conn_qp_map, &p_cep->rem_qp_item ); + p_cep->remote_qpn = 0; + } + + AL_EXIT( AL_DBG_CM ); +} + + +static boolean_t +__is_lid_valid( + IN ib_net16_t lid, + IN ib_net16_t port_lid, + IN uint8_t lmc ) +{ + uint16_t lid1; + uint16_t lid2; + uint16_t path_bits; + + if(lmc) + { + lid1 = CL_NTOH16(lid); + lid2 = CL_NTOH16(port_lid); + path_bits = 0; + + if( lid1 < lid2 ) + return FALSE; + + while( lmc-- ) + path_bits = (uint16_t)( (path_bits << 1) | 1 ); + + lid2 |= path_bits; + + if( lid1 > lid2) + return FALSE; + } + else + { + if (lid != port_lid) + return FALSE; + } + + return TRUE; +} + + +static inline boolean_t +__is_gid_valid( + IN const ib_port_attr_t* const p_port_attr, + IN const ib_gid_t* const p_gid ) +{ + uint16_t idx; + + for( idx = 0; idx < p_port_attr->num_gids; idx++ ) + { + if( !cl_memcmp( + p_gid, &p_port_attr->p_gid_table[idx], sizeof(ib_gid_t) ) ) + { + return TRUE; + } + } + return FALSE; +} + + +static inline boolean_t +__get_pkey_index( + IN const ib_port_attr_t* const p_port_attr, + IN const net16_t pkey, + OUT uint16_t* const p_pkey_index ) +{ + uint16_t idx; + + for( idx = 0; idx < p_port_attr->num_pkeys; idx++ ) + { + if( p_port_attr->p_pkey_table[idx] == pkey ) + { + *p_pkey_index = idx; + return TRUE; + } + } + + return FALSE; +} + + +/* Returns the 1-based port index of the CEP agent with the specified GID. */ +static cep_agent_t* +__find_port_cep( + IN const ib_gid_t* const p_gid, + IN const net16_t lid, + IN const net16_t pkey, + OUT uint16_t* const p_pkey_index ) +{ + cep_agent_t *p_port_cep; + cl_list_item_t *p_item; + const ib_port_attr_t *p_port_attr; + + AL_ENTER( AL_DBG_CM ); + + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + for( p_item = cl_qlist_head( &gp_cep_mgr->obj.obj_list ); + p_item != cl_qlist_end( &gp_cep_mgr->obj.obj_list ); + p_item = cl_qlist_next( p_item ) ) + { + p_port_cep = PARENT_STRUCT( p_item, cep_agent_t, obj.pool_item ); + + CL_ASSERT( p_port_cep->port_num ); + + ci_ca_lock_attr( p_port_cep->h_ca->obj.p_ci_ca ); + + p_port_attr = p_port_cep->h_ca->obj.p_ci_ca->p_pnp_attr->p_port_attr; + p_port_attr += (p_port_cep->port_num - 1); + + if( __is_lid_valid( lid, p_port_attr->lid, p_port_attr->lmc ) && + __is_gid_valid( p_port_attr, p_gid ) && + __get_pkey_index( p_port_attr, pkey, p_pkey_index ) ) + { + ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca ); + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + AL_EXIT( AL_DBG_CM ); + return p_port_cep; + } + + ci_ca_unlock_attr( p_port_cep->h_ca->obj.p_ci_ca ); + } + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + AL_EXIT( AL_DBG_CM ); + return NULL; +} + + +/* + * PnP callback for port event notifications. + */ +static ib_api_status_t +__cep_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + ib_api_status_t status = IB_SUCCESS; + + AL_ENTER( AL_DBG_CM ); + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_PORT_ADD: + /* Create the port agent. */ + CL_ASSERT( !p_pnp_rec->context ); + status = __create_port_cep( (ib_pnp_port_rec_t*)p_pnp_rec ); + break; + + case IB_PNP_PORT_REMOVE: + CL_ASSERT( p_pnp_rec->context ); + + /* Destroy the port agent. */ + ref_al_obj( &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj ); + ((cep_agent_t* __ptr64)p_pnp_rec->context)->obj.pfn_destroy( + &((cep_agent_t* __ptr64)p_pnp_rec->context)->obj, NULL ); + break; + + default: + break; /* Ignore other PNP events. */ + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static inline int64_t +__min_timewait( + IN int64_t current_min, + IN kcep_t* const p_cep ) +{ + /* + * The minimum timer interval is 50 milliseconds. This means + * 500000 100ns increments. Since __process_timewait divides the + * result in half (so that the worst cast timewait interval is 150%) + * we compensate for this here. Note that relative time values are + * expressed as negative. + */ +#define MIN_TIMEWAIT_100NS -1000000 + + /* Still in timewait - try again next time. */ + if( !current_min ) + { + return min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS ); + } + else + { + return max( current_min, + min( p_cep->timewait_time.QuadPart, MIN_TIMEWAIT_100NS ) ); + } +} + + +/* + * Timer callback to process CEPs in timewait state. Returns time in ms. + */ +static uint32_t +__process_timewait() +{ + cl_list_item_t *p_item; + kcep_t *p_cep; + LARGE_INTEGER timeout; + int64_t min_timewait = 0; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + timeout.QuadPart = 0; + + p_item = cl_qlist_head( &gp_cep_mgr->timewait_list ); + while( p_item != cl_qlist_end( &gp_cep_mgr->timewait_list ) ) + { + p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item ); + p_item = cl_qlist_next( p_item ); + + CL_ASSERT( p_cep->state == CEP_STATE_DESTROY || + p_cep->state == CEP_STATE_TIMEWAIT ); + + CL_ASSERT( !p_cep->p_mad ); + + if( KeWaitForSingleObject( &p_cep->timewait_timer, Executive, + KernelMode, FALSE, &timeout ) != STATUS_SUCCESS ) + { + /* Still in timewait - try again next time. */ + min_timewait = __min_timewait( min_timewait, p_cep ); + continue; + } + + if( p_cep->ref_cnt ) + { + /* Send outstanding or destruction in progress. */ + min_timewait = __min_timewait( min_timewait, p_cep ); + continue; + } + + /* Remove from the timewait list. */ + cl_qlist_remove_item( &gp_cep_mgr->timewait_list, &p_cep->timewait_item ); + + /* + * Not in timewait. Remove the CEP from the maps - it should + * no longer be matched against. + */ + __remove_cep( p_cep ); + + if( p_cep->state == CEP_STATE_DESTROY ) + { + __destroy_cep( p_cep ); + } + else + { + /* Move the CEP to the IDLE state so that it can be used again. */ + p_cep->state = CEP_STATE_IDLE; + } + } + + AL_EXIT( AL_DBG_CM ); + return (uint32_t)(min_timewait / -20000); +} + + +/* + * Timer callback to process CEPs in timewait state. + */ +static void +__cep_timewait_cb( + IN void *context ) +{ + KLOCK_QUEUE_HANDLE hdl; + uint32_t min_timewait; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( context ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + KeAcquireInStackQueuedSpinLockAtDpcLevel( &gp_cep_mgr->lock, &hdl ); + + min_timewait = __process_timewait(); + + if( cl_qlist_count( &gp_cep_mgr->timewait_list ) ) + { + /* + * Reset the timer for half of the shortest timeout - this results + * in a worst case timeout of 150% of timewait. + */ + cl_timer_trim( &gp_cep_mgr->timewait_timer, min_timewait ); + } + + KeReleaseInStackQueuedSpinLockFromDpcLevel( &hdl ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Starts immediate cleanup of the CM. Invoked during al_obj destruction. + */ +static void +__destroying_cep_mgr( + IN al_obj_t* p_obj ) +{ + ib_api_status_t status; + KLOCK_QUEUE_HANDLE hdl; + cl_list_item_t *p_item; + kcep_t *p_cep; + LARGE_INTEGER timeout; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( &gp_cep_mgr->obj == p_obj ); + UNUSED_PARAM( p_obj ); + + /* Deregister from PnP notifications. */ + if( gp_cep_mgr->h_pnp ) + { + status = ib_dereg_pnp( + gp_cep_mgr->h_pnp, (ib_pfn_destroy_cb_t)deref_al_obj ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_dereg_pnp failed with status %s.\n", + ib_get_err_str(status)) ); + deref_al_obj( &gp_cep_mgr->obj ); + } + } + + /* Cancel all timewait timers. */ + timeout.QuadPart = 0; + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + for( p_item = cl_qlist_head( &gp_cep_mgr->timewait_list ); + p_item != cl_qlist_end( &gp_cep_mgr->timewait_list ); + p_item = cl_qlist_next( p_item ) ) + { + p_cep = PARENT_STRUCT( p_item, kcep_t, timewait_item ); + KeSetTimer( &p_cep->timewait_timer, timeout, NULL ); + } + __process_timewait(); + KeReleaseInStackQueuedSpinLock( &hdl ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Frees the global CEP agent. Invoked during al_obj destruction. + */ +static void +__free_cep_mgr( + IN al_obj_t* p_obj ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( &gp_cep_mgr->obj == p_obj ); + /* All listen request should have been cleaned up by this point. */ + CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->listen_map ) ); + /* All connections should have been cancelled/disconnected by now. */ + CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_id_map ) ); + CL_ASSERT( cl_is_rbmap_empty( &gp_cep_mgr->conn_qp_map ) ); + + cl_vector_destroy( &gp_cep_mgr->cid_vector ); + + cl_timer_destroy( &gp_cep_mgr->timewait_timer ); + + /* + * All CM port agents should have been destroyed by now via the + * standard child object destruction provided by the al_obj. + */ + ExDeleteNPagedLookasideList( &gp_cep_mgr->cep_pool ); + destroy_al_obj( p_obj ); + + cl_free( gp_cep_mgr ); + gp_cep_mgr = NULL; + + AL_EXIT( AL_DBG_CM ); +} + + +static cl_status_t +__cid_init( + IN void* const p_element, + IN void* context ) +{ + cep_cid_t *p_cid; + + UNUSED_PARAM( context ); + + p_cid = (cep_cid_t*)p_element; + + p_cid->h_al = NULL; + p_cid->p_cep = (kcep_t*)(uintn_t)++gp_cep_mgr->free_cid; + p_cid->modifier = 0; + + return CL_SUCCESS; +} + + +/* + * Allocates and initialized the global CM agent. + */ +ib_api_status_t +create_cep_mgr( + IN al_obj_t* const p_parent_obj ) +{ + ib_api_status_t status; + cl_status_t cl_status; + ib_pnp_req_t pnp_req; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( gp_cep_mgr == NULL ); + + /* Allocate the global CM agent. */ + gp_cep_mgr = (al_cep_mgr_t*)cl_zalloc( sizeof(al_cep_mgr_t) ); + if( !gp_cep_mgr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed allocation of global CM agent.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + construct_al_obj( &gp_cep_mgr->obj, AL_OBJ_TYPE_CM ); + ExInitializeNPagedLookasideList( &gp_cep_mgr->cep_pool, NULL, NULL, + 0, sizeof(kcep_t), 'PECK', 0 ); + cl_qmap_init( &gp_cep_mgr->port_map ); + cl_rbmap_init( &gp_cep_mgr->listen_map ); + cl_rbmap_init( &gp_cep_mgr->conn_id_map ); + cl_rbmap_init( &gp_cep_mgr->conn_qp_map ); + cl_qlist_init( &gp_cep_mgr->timewait_list ); + /* Timer initialization can't fail in kernel-mode. */ + cl_timer_init( &gp_cep_mgr->timewait_timer, __cep_timewait_cb, NULL ); + cl_vector_construct( &gp_cep_mgr->cid_vector ); + + status = init_al_obj( &gp_cep_mgr->obj, NULL, FALSE, + __destroying_cep_mgr, NULL, __free_cep_mgr ); + if( status != IB_SUCCESS ) + { + __free_cep_mgr( &gp_cep_mgr->obj ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + /* Attach to the parent object. */ + status = attach_al_obj( p_parent_obj, &gp_cep_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + cl_status = cl_vector_init( &gp_cep_mgr->cid_vector, + CEP_CID_MIN, CEP_CID_GROW, sizeof(cep_cid_t), __cid_init, NULL, NULL ); + if( cl_status != CL_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_vector_init failed with status %s.\n", + CL_STATUS_MSG(cl_status)) ); + return ib_convert_cl_status( cl_status ); + } + + gp_cep_mgr->free_cid = 0; + + /* Register for port PnP notifications. */ + cl_memclr( &pnp_req, sizeof(pnp_req) ); + pnp_req.pnp_class = IB_PNP_PORT; + pnp_req.pnp_context = &gp_cep_mgr->obj; + pnp_req.pfn_pnp_cb = __cep_pnp_cb; + status = ib_reg_pnp( gh_al, &pnp_req, &gp_cep_mgr->h_pnp ); + if( status != IB_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_pnp failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* + * Leave the reference taken in init_al_obj oustanding since PnP + * deregistration is asynchronous. This replaces a call to ref and + * deref the object. + */ + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + +/****************************************************************************** +* CEP manager API +******************************************************************************/ + +/* Called with the CEP and CEP manager locks held */ +static ib_api_status_t +__cep_queue_mad( + IN kcep_t* const p_cep, + IN ib_mad_element_t* p_mad ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( !p_mad->p_next ); + + if( p_cep->state == CEP_STATE_DESTROY ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_STATE; + } + + /* Queue this MAD for processing. */ + if( p_cep->p_mad_head ) + { + CL_ASSERT( p_cep->signalled ); + /* + * If there's already a MAD at the head of the list, we will not + * invoke the callback. Just queue and exit. + */ + CL_ASSERT( p_cep->p_mad_tail ); + p_cep->p_mad_tail->p_next = p_mad; + p_cep->p_mad_tail = p_mad; + AL_EXIT( AL_DBG_CM ); + return IB_PENDING; + } + + p_cep->p_mad_head = p_mad; + p_cep->p_mad_tail = p_mad; + + if( p_cep->signalled ) + { + /* signalled was already non-zero. Don't invoke the callback again. */ + AL_EXIT( AL_DBG_CM ); + return IB_PENDING; + } + + p_cep->signalled = TRUE; + + /* Take a reference since we're about to invoke the callback. */ + cl_atomic_inc( &p_cep->ref_cnt ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +static inline void +__cep_complete_irp( + IN kcep_t* const p_cep, + IN NTSTATUS status, + IN CCHAR increment ) +{ + IRP *p_irp; + + AL_ENTER( AL_DBG_CM ); + + p_irp = InterlockedExchangePointer( &p_cep->p_irp, NULL ); + + if( p_irp ) + { +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, NULL ); +#pragma warning(pop) + + /* Complete the IRP. */ + p_irp->IoStatus.Status = status; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, increment ); + } + + AL_EXIT( AL_DBG_CM ); +} + + +static inline void +__process_cep( + IN kcep_t* const p_cep ) +{ + ib_pfn_destroy_cb_t pfn_destroy_cb; + void *context; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + /* Signal to the user there are callback waiting. */ + if( p_cep->pfn_cb ) + p_cep->pfn_cb( p_cep->p_cid->h_al, p_cep->cid ); + else + __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT ); + + pfn_destroy_cb = p_cep->pfn_destroy_cb; + context = p_cep->context; + + /* + * Release the reference for the callback and invoke the destroy + * callback if necessary. + */ + if( !cl_atomic_dec( &p_cep->ref_cnt ) && pfn_destroy_cb ) + pfn_destroy_cb( context ); + + AL_EXIT( AL_DBG_CM ); +} + + +static uint32_t +__calc_mad_timeout( + IN const uint8_t pkt_life ) +{ + /* + * Calculate the retry timeout. + * All timeout values in micro seconds are expressed as 4.096 * 2^x, + * where x is the timeout. The formula to approximates this to + * milliseconds using just shifts and subtraction is: + * timeout_ms = 67 << (x - 14) + * The results are off by 0.162%. + * + * Note that we will never return less than 1 millisecond. We also + * trap exceedingly large values to prevent wrapping. + */ + if( pkt_life > 39 ) + return ~0UL; + if( pkt_life > 14 ) + return 67 << (pkt_life - 14); + else if( pkt_life > 8 ) + return 67 >> (14 - pkt_life); + else + return 1; +} + + +/* CEP manager lock is held when calling this function. */ +static kcep_t* +__create_cep() +{ + kcep_t *p_cep; + + AL_ENTER( AL_DBG_CM ); + + p_cep = ExAllocateFromNPagedLookasideList( &gp_cep_mgr->cep_pool ); + if( !p_cep ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate CEP.\n") ); + return NULL; + } + + cl_memclr( p_cep, sizeof(kcep_t) ); + + KeInitializeTimer( &p_cep->timewait_timer ); + + p_cep->state = CEP_STATE_IDLE; + + /* + * Pre-charge the reference count to 1. The code will invoke the + * destroy callback once the ref count reaches to zero. + */ + p_cep->ref_cnt = 1; + p_cep->signalled = FALSE; + + /* Find a free entry in the CID vector. */ + p_cep->p_cid = __get_lcid( &p_cep->cid ); + + if( !p_cep->p_cid ) + { + ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to get CID.\n") ); + return NULL; + } + + p_cep->p_cid->modifier++; + /* + * We don't ever want a modifier of zero for the CID at index zero + * since it would result in a total CID of zero. + */ + if( !p_cep->cid && !p_cep->p_cid->modifier ) + p_cep->p_cid->modifier++; + + p_cep->local_comm_id = p_cep->cid | (p_cep->p_cid->modifier << 24); + p_cep->tid = p_cep->local_comm_id; + + p_cep->p_cid->p_cep = p_cep; + + ref_al_obj( &gp_cep_mgr->obj ); + + AL_EXIT( AL_DBG_CM ); + return p_cep; +} + + +static inline void +__bind_cep( + IN kcep_t* const p_cep, + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void* __ptr64 context ) +{ + CL_ASSERT( p_cep ); + CL_ASSERT( p_cep->p_cid ); + CL_ASSERT( h_al ); + + p_cep->p_cid->h_al = h_al; + p_cep->pfn_cb = pfn_cb; + p_cep->context = context; + + /* Track the CEP in its owning AL instance. */ + cl_spinlock_acquire( &h_al->obj.lock ); + cl_qlist_insert_tail( &h_al->cep_list, &p_cep->al_item ); + cl_spinlock_release( &h_al->obj.lock ); +} + + +static inline void +__unbind_cep( + IN kcep_t* const p_cep ) +{ + CL_ASSERT( p_cep ); + CL_ASSERT( p_cep->p_cid ); + CL_ASSERT( p_cep->p_cid->h_al ); + + /* Track the CEP in its owning AL instance. */ + cl_spinlock_acquire( &p_cep->p_cid->h_al->obj.lock ); + cl_qlist_remove_item( &p_cep->p_cid->h_al->cep_list, &p_cep->al_item ); + cl_spinlock_release( &p_cep->p_cid->h_al->obj.lock ); + + /* + * Set to the internal AL handle - it needs to be non-NULL to indicate it's + * a valid entry, and it can't be a user's AL instance to prevent using a + * destroyed CEP. + */ + p_cep->p_cid->h_al = gh_al; +#ifdef _DEBUG_ + p_cep->pfn_cb = NULL; +#endif /* _DEBUG_ */ +} + + +static inline void +__calc_timewait( + IN kcep_t* const p_cep ) +{ + + /* + * Use the CEP's stored packet lifetime to calculate the time at which + * the CEP exits timewait. Packet lifetime is expressed as + * 4.096 * 2^pkt_life microseconds, and we need a timeout in 100ns + * increments. The formual using just shifts and subtraction is this: + * timeout = (41943 << (pkt_life - 10)); + * The results are off by .0001%, which should be more than adequate. + */ + if( p_cep->max_2pkt_life > 10 ) + { + p_cep->timewait_time.QuadPart = + -(41943i64 << (p_cep->max_2pkt_life - 10)); + } + else + { + p_cep->timewait_time.QuadPart = + -(41943i64 >> (10 - p_cep->max_2pkt_life)); + } + if( p_cep->target_ack_delay > 10 ) + { + p_cep->timewait_time.QuadPart -= + (41943i64 << (p_cep->target_ack_delay - 10)); + } + else + { + p_cep->timewait_time.QuadPart -= + (41943i64 >> (10 - p_cep->target_ack_delay)); + } +} + + +/* Called with CEP manager and CEP locks held. */ +static inline void +__insert_timewait( + IN kcep_t* const p_cep ) +{ + cl_qlist_insert_tail( &gp_cep_mgr->timewait_list, &p_cep->timewait_item ); + + KeSetTimer( &p_cep->timewait_timer, p_cep->timewait_time, NULL ); + + /* + * Reset the timer for half of the shortest timeout - this results + * in a worst case timeout of 150% of timewait. + */ + cl_timer_trim( &gp_cep_mgr->timewait_timer, + (uint32_t)(-p_cep->timewait_time.QuadPart / 20000) ); +} + + +static inline ib_api_status_t +__do_cep_rej( + IN kcep_t* const p_cep, + IN ib_rej_status_t rej_status, + IN const uint8_t* const p_ari, + IN uint8_t ari_len, + IN const uint8_t* const p_pdata, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + return IB_INSUFFICIENT_RESOURCES; + + status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad ); + if( status != IB_SUCCESS ) + return status; + + __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] ); + + status = conn_rej_set_ari( + p_ari, ari_len, (mad_cm_rej_t*)p_mad->p_mad_buf ); + if( status != IB_SUCCESS ) + return status; + + status = conn_rej_set_pdata( + p_pdata, pdata_len, (mad_cm_rej_t*)p_mad->p_mad_buf ); + if( status != IB_SUCCESS ) + return status; + + __reject_mad( p_port_cep, p_cep, p_mad, rej_status ); + return IB_SUCCESS; +} + + +static ib_api_status_t +__cep_get_mad( + IN kcep_t* const p_cep, + IN net16_t attr_id, + OUT cep_agent_t** const pp_port_cep, + OUT ib_mad_element_t** const pp_mad ) +{ + cep_agent_t *p_port_cep; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("__get_cep_agent failed.\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + + status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, pp_mad ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_get_mad returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + __format_mad_av( *pp_mad, &p_cep->av[p_cep->idx_primary] ); + + __format_mad_hdr( (*pp_mad)->p_mad_buf, p_cep, attr_id ); + + *pp_port_cep = p_port_cep; + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__format_dreq( + IN kcep_t* const p_cep, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_dreq_t *p_dreq; + + AL_ENTER( AL_DBG_CM ); + + p_dreq = (mad_cm_dreq_t*)p_mad->p_mad_buf; + + p_dreq->local_comm_id = p_cep->local_comm_id; + p_dreq->remote_comm_id = p_cep->remote_comm_id; + + conn_dreq_set_remote_qpn( p_cep->remote_qpn, p_dreq ); + + /* copy optional data */ + status = conn_dreq_set_pdata( p_pdata, pdata_len, p_dreq ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__dreq_cep( + IN kcep_t* const p_cep ) +{ + ib_api_status_t status; + cep_agent_t *p_agt; + ib_mad_element_t *p_mad; + + status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_agt, &p_mad ); + if( status != IB_SUCCESS ) + return status; + + status = __format_dreq( p_cep, NULL, 0, p_mad ); + if( status != IB_SUCCESS ) + return status; + + return __cep_send_retry( p_agt, p_cep, p_mad ); +} + + +static ib_api_status_t +__format_drep( + IN kcep_t* const p_cep, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len, + IN OUT mad_cm_drep_t* const p_drep ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_drep->local_comm_id = p_cep->local_comm_id; + p_drep->remote_comm_id = p_cep->remote_comm_id; + + /* copy optional data */ + status = conn_drep_set_pdata( p_pdata, pdata_len, p_drep ); + + /* Store the RTU MAD so we can repeat it if we get a repeated DREP. */ + if( status == IB_SUCCESS && p_drep != &p_cep->mads.drep ) + p_cep->mads.drep = *p_drep; + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__drep_cep( + IN kcep_t* const p_cep ) +{ + cep_agent_t *p_agt; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + if( __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_agt, &p_mad ) != IB_SUCCESS ) + return; + + if( __format_drep( p_cep, NULL, 0, (mad_cm_drep_t*)p_mad->p_mad_buf ) + != IB_SUCCESS ) + { + return; + } + + __cep_send_mad( p_agt, p_mad ); + + AL_EXIT( AL_DBG_CM ); +} + + +/* Called with CEP manager lock held. */ +static int32_t +__cleanup_cep( + IN kcep_t* const p_cep ) +{ + ib_mad_element_t *p_mad; + kcep_t *p_new_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + /* If we've already come through here, we're done. */ + if( p_cep->state == CEP_STATE_DESTROY || + p_cep->state == CEP_STATE_DREQ_DESTROY ) + { + AL_EXIT( AL_DBG_CM ); + return -1; + } + + /* Cleanup the pending MAD list. */ + while( p_cep->p_mad_head ) + { + p_mad = p_cep->p_mad_head; + p_cep->p_mad_head = p_mad->p_next; + p_mad->p_next = NULL; + if( p_mad->send_context1 ) + { + p_new_cep = (kcep_t* __ptr64)p_mad->send_context1; + + __unbind_cep( p_new_cep ); + __cleanup_cep( p_new_cep ); + } + ib_put_mad( p_mad ); + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REP_RCVD: + case CEP_STATE_REQ_MRA_SENT: + case CEP_STATE_REP_MRA_SENT: + /* Reject the connection. */ + __do_cep_rej( p_cep, IB_REJ_USER_DEFINED, NULL, 0, NULL, 0 ); + break; + + case CEP_STATE_REQ_SENT: + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + /* Cancel the send. */ + CL_ASSERT( p_cep->h_mad_svc ); + CL_ASSERT( p_cep->p_send_mad ); + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + /* Reject the connection. */ + __do_cep_rej( p_cep, IB_REJ_TIMEOUT, (uint8_t*)&p_cep->local_ca_guid, + sizeof(p_cep->local_ca_guid), NULL, 0 ); + break; + + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_MRA_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_PRE_APR: + case CEP_STATE_PRE_APR_MRA_SENT: + /* Disconnect the connection. */ + if( __dreq_cep( p_cep ) != IB_SUCCESS ) + break; + /* Fall through. */ + + case CEP_STATE_DREQ_SENT: + p_cep->state = CEP_STATE_DREQ_DESTROY; + AL_EXIT( AL_DBG_CM ); + return cl_atomic_dec( &p_cep->ref_cnt ); + + case CEP_STATE_DREQ_RCVD: + /* Send the DREP. */ + __drep_cep( p_cep ); + break; + + case CEP_STATE_SREQ_RCVD: + /* TODO: Reject the SIDR request. */ + break; + + case CEP_STATE_LISTEN: + /* Remove from listen map. */ + cl_rbmap_remove_item( &gp_cep_mgr->listen_map, &p_cep->listen_item ); + break; + + case CEP_STATE_PRE_REQ: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_IDLE: + break; + + default: + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("CEP in state %d.\n", p_cep->state) ); + case CEP_STATE_TIMEWAIT: + /* Already in timewait - so all is good. */ + p_cep->state = CEP_STATE_DESTROY; + AL_EXIT( AL_DBG_CM ); + return cl_atomic_dec( &p_cep->ref_cnt ); + } + + p_cep->state = CEP_STATE_DESTROY; + __insert_timewait( p_cep ); + + AL_EXIT( AL_DBG_CM ); + return cl_atomic_dec( &p_cep->ref_cnt ); +} + + +static void +__destroy_cep( + IN kcep_t* const p_cep ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( + p_cep->cid < cl_vector_get_size( &gp_cep_mgr->cid_vector ) ); + + CL_ASSERT( p_cep->p_cid == (cep_cid_t*)cl_vector_get_ptr( + &gp_cep_mgr->cid_vector, p_cep->cid ) ); + + /* Free the CID. */ + p_cep->p_cid->p_cep = (kcep_t*)(uintn_t)gp_cep_mgr->free_cid; + p_cep->p_cid->h_al = NULL; + gp_cep_mgr->free_cid = p_cep->cid; + + KeCancelTimer( &p_cep->timewait_timer ); + + ExFreeToNPagedLookasideList( &gp_cep_mgr->cep_pool, p_cep ); + + deref_al_obj( &gp_cep_mgr->obj ); + + AL_EXIT( AL_DBG_CM ); +} + + +ib_api_status_t +al_create_cep( + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void* __ptr64 context, + OUT net32_t* const p_cid ) +{ + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cid ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __create_cep(); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate CEP.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + + __bind_cep( p_cep, h_al, pfn_cb, context ); + + *p_cid = p_cep->cid; + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_destroy_cep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_pfn_destroy_cb_t pfn_destroy_cb ) +{ + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + void *context; + int32_t ref_cnt; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + /* + * Remove the CEP from the CID vector - no further API calls + * will succeed for it. + */ + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + /* Invalid handle. */ + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + context = p_cep->context; + p_cep->pfn_destroy_cb = pfn_destroy_cb; + + /* Cancel any queued IRP */ + __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT ); + + __unbind_cep( p_cep ); + ref_cnt = __cleanup_cep( p_cep ); + KeReleaseInStackQueuedSpinLock( &hdl ); + + /* + * Done waiting. Release the reference so the timewait timer callback + * can finish cleaning up. + */ + if( !ref_cnt && pfn_destroy_cb ) + pfn_destroy_cb( context ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_listen( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_cep_listen_t* const p_listen_info ) +{ + ib_api_status_t status; + kcep_t *p_cep, *p_listen; + cl_rbmap_item_t *p_item, *p_insert_at; + boolean_t left = TRUE; + intn_t cmp; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_listen_info ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REQ: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Must change state here in case listen fails */ + p_cep->state = CEP_STATE_IDLE; + /* Fall through. */ + case CEP_STATE_IDLE: + break; + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + goto done; + } + + /* Insert the CEP into the listen map. */ + p_item = cl_rbmap_root( &gp_cep_mgr->listen_map ); + p_insert_at = p_item; + while( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) ) + { + p_insert_at = p_item; + + p_listen = PARENT_STRUCT( p_item, kcep_t, listen_item ); + + if( p_listen_info->svc_id == p_listen->sid ) + goto port_cmp; + + if( p_listen_info->svc_id < p_listen->sid ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else + p_item = cl_rbmap_right( p_item ), left = FALSE; + + continue; + +port_cmp: + if( p_listen_info->port_guid != IB_ALL_PORTS ) + { + if( p_listen_info->port_guid == p_listen->port_guid ) + goto pdata_cmp; + + if( p_listen_info->port_guid < p_listen->port_guid ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else + p_item = cl_rbmap_right( p_item ), left = FALSE; + + continue; + } + +pdata_cmp: + /* + * If an existing listen doesn't have a compare buffer, + * then we found a duplicate. + */ + if( !p_listen->p_cmp_buf || !p_listen_info->p_cmp_buf ) + break; + + if( p_listen_info->p_cmp_buf ) + { + /* Compare length must match. */ + if( p_listen_info->cmp_len != p_listen->cmp_len ) + break; + + /* Compare offset must match. */ + if( p_listen_info->cmp_offset != p_listen->cmp_offset ) + break; + + cmp = cl_memcmp( &p_listen_info->p_cmp_buf, + p_listen->p_cmp_buf, p_listen->cmp_len ); + + if( cmp < 0 ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( cmp > 0 ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else + break; + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CM, + ("Svc ID match but compare buffer mismatch.\n") ); + continue; + } + } + + if( p_item != cl_rbmap_end( &gp_cep_mgr->listen_map ) ) + { + /* Duplicate!!! */ + status = IB_INVALID_SETTING; + goto done; + } + + /* Set up the CEP. */ + if( p_listen_info->p_cmp_buf ) + { + p_cep->p_cmp_buf = cl_malloc( p_listen_info->cmp_len ); + if( !p_cep->p_cmp_buf ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to allocate compare buffer.\n") ); + status = IB_INSUFFICIENT_MEMORY; + goto done; + } + + cl_memcpy( p_cep->p_cmp_buf, + p_listen_info->p_cmp_buf, p_listen_info->cmp_len ); + } + p_cep->cmp_len = p_listen_info->cmp_len; + p_cep->cmp_offset = p_listen_info->cmp_offset; + p_cep->sid = p_listen_info->svc_id; + p_cep->port_guid = p_listen_info->port_guid; + p_cep->state = CEP_STATE_LISTEN; + + cl_rbmap_insert( &gp_cep_mgr->listen_map, p_insert_at, + &p_cep->listen_item, left ); + + status = IB_SUCCESS; + +done: + KeReleaseInStackQueuedSpinLock( &hdl ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static cep_agent_t* +__format_path_av( + IN const ib_path_rec_t* const p_path, + OUT kcep_av_t* const p_av ) +{ + cep_agent_t* p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_path ); + CL_ASSERT( p_av ); + + cl_memclr( p_av, sizeof(kcep_av_t) ); + + p_port_cep = __find_port_cep( &p_path->sgid, p_path->slid, + p_path->pkey, &p_av->pkey_index ); + if( !p_port_cep ) + { + AL_EXIT( AL_DBG_CM ); + return NULL; + } + + p_av->port_guid = p_port_cep->port_guid; + + p_av->attr.port_num = p_port_cep->port_num; + + p_av->attr.sl = ib_path_rec_sl( p_path ); + p_av->attr.dlid = p_path->dlid; + + p_av->attr.grh.ver_class_flow = ib_grh_set_ver_class_flow( + 1, p_path->tclass, ib_path_rec_flow_lbl( p_path ) ); + p_av->attr.grh.hop_limit = ib_path_rec_hop_limit( p_path ); + p_av->attr.grh.src_gid = p_path->sgid; + p_av->attr.grh.dest_gid = p_path->dgid; + + p_av->attr.grh_valid = !ib_gid_is_link_local( &p_path->dgid ); + + p_av->attr.static_rate = ib_path_rec_rate( p_path ); + p_av->attr.path_bits = (uint8_t)(p_path->slid - p_port_cep->base_lid); + + /* + * Note that while we never use the connected AV attributes internally, + * we store them so we can pass them back to users. + */ + p_av->attr.conn.path_mtu = ib_path_rec_mtu( p_path ); + p_av->attr.conn.local_ack_timeout = calc_lcl_ack_timeout( + ib_path_rec_pkt_life( p_path ) + 1, 0 ); + + AL_EXIT( AL_DBG_CM ); + return p_port_cep; +} + + +/* + * Formats a REQ mad's path information given a path record. + */ +static void +__format_req_path( + IN const ib_path_rec_t* const p_path, + IN const uint8_t ack_delay, + OUT req_path_info_t* const p_req_path ) +{ + AL_ENTER( AL_DBG_CM ); + + p_req_path->local_lid = p_path->slid; + p_req_path->remote_lid = p_path->dlid; + p_req_path->local_gid = p_path->sgid; + p_req_path->remote_gid = p_path->dgid; + + conn_req_path_set_flow_lbl( ib_path_rec_flow_lbl( p_path ), + p_req_path ); + conn_req_path_set_pkt_rate( ib_path_rec_rate( p_path ), + p_req_path ); + + /* Traffic class & hop limit */ + p_req_path->traffic_class = p_path->tclass; + p_req_path->hop_limit = ib_path_rec_hop_limit( p_path ); + + /* SL & Subnet Local fields */ + conn_req_path_set_svc_lvl( ib_path_rec_sl( p_path ), + p_req_path ); + conn_req_path_set_subn_lcl( + ib_gid_is_link_local( &p_path->dgid ), p_req_path ); + + conn_req_path_set_lcl_ack_timeout( + calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_path ) + 1, + ack_delay ), p_req_path ); + + conn_req_path_clr_rsvd_fields( p_req_path ); + + AL_EXIT( AL_DBG_CM ); +} + + +static ib_api_status_t +__format_req( + IN kcep_t* const p_cep, + IN const ib_cm_req_t* const p_cm_req ) +{ + ib_api_status_t status; + mad_cm_req_t* p_req; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_cm_req ); + CL_ASSERT( p_cep->p_mad ); + + /* Format the MAD header. */ + __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REQ_ATTR_ID ); + + /* Set the addressing information in the MAD. */ + __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] ); + + p_req = (mad_cm_req_t*)p_cep->p_mad->p_mad_buf; + + ci_ca_lock_attr( p_cm_req->h_qp->obj.p_ci_ca ); + /* + * Store the local CA's ack timeout for use when computing + * the local ACK timeout. + */ + p_cep->local_ack_delay = + p_cm_req->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay; + ci_ca_unlock_attr( p_cm_req->h_qp->obj.p_ci_ca ); + + /* Format the primary path. */ + __format_req_path( p_cm_req->p_primary_path, + p_cep->local_ack_delay, &p_req->primary_path ); + + if( p_cm_req->p_alt_path ) + { + /* Format the alternate path. */ + __format_req_path( p_cm_req->p_alt_path, + p_cep->local_ack_delay, &p_req->alternate_path ); + } + else + { + cl_memclr( &p_req->alternate_path, sizeof(req_path_info_t) ); + } + + /* Set the local communication in the REQ. */ + p_req->local_comm_id = p_cep->local_comm_id; + p_req->sid = p_cm_req->svc_id; + p_req->local_ca_guid = p_cm_req->h_qp->obj.p_ci_ca->verbs.guid; + + conn_req_set_lcl_qpn( p_cep->local_qpn, p_req ); + conn_req_set_resp_res( p_cm_req->resp_res, p_req ); + conn_req_set_init_depth( p_cm_req->init_depth, p_req ); + conn_req_set_remote_resp_timeout( p_cm_req->remote_resp_timeout, p_req ); + conn_req_set_qp_type( p_cm_req->h_qp->type, p_req ); + conn_req_set_flow_ctrl( p_cm_req->flow_ctrl, p_req ); + conn_req_set_starting_psn( p_cep->rq_psn, p_req ); + + conn_req_set_lcl_resp_timeout( p_cm_req->local_resp_timeout, p_req ); + conn_req_set_retry_cnt( p_cm_req->retry_cnt, p_req ); + + p_req->pkey = p_cm_req->p_primary_path->pkey; + + conn_req_set_mtu( ib_path_rec_mtu( p_cm_req->p_primary_path ), p_req ); + conn_req_set_rnr_retry_cnt( p_cm_req->rnr_retry_cnt, p_req ); + + conn_req_set_max_cm_retries( p_cm_req->max_cm_retries, p_req ); + status = conn_req_set_pdata( + p_cm_req->p_req_pdata, p_cm_req->req_length, p_req ); + + conn_req_clr_rsvd_fields( p_req ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__save_user_req( + IN kcep_t* const p_cep, + IN const ib_cm_req_t* const p_cm_req, + OUT cep_agent_t** const pp_port_cep ) +{ + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + if( !p_cm_req->p_primary_path ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid primary path record.\n") ); + return IB_INVALID_SETTING; + } + + p_cep->sid = p_cm_req->svc_id; + + p_cep->idx_primary = 0; + + p_cep->p2p = (p_cm_req->pfn_cm_req_cb != NULL); + + if( p_cm_req->p_compare_buffer ) + { + if( !p_cm_req->compare_length || + (p_cm_req->compare_offset + p_cm_req->compare_length) > + IB_REQ_PDATA_SIZE ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_SETTING; + } + p_cep->p_cmp_buf = cl_malloc( p_cm_req->compare_length ); + if( !p_cep->p_cmp_buf ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INSUFFICIENT_MEMORY; + } + + cl_memcpy( p_cep->p_cmp_buf, + p_cm_req->p_compare_buffer, p_cm_req->compare_length ); + + p_cep->cmp_len = p_cm_req->compare_length; + p_cep->cmp_offset = p_cm_req->compare_offset; + } + else + { + p_cep->p_cmp_buf = NULL; + p_cep->cmp_len = 0; + p_cep->cmp_offset = 0; + } + p_cep->was_active = TRUE; + + /* Validate the primary path. */ + p_port_cep = __format_path_av( p_cm_req->p_primary_path, &p_cep->av[0] ); + if( !p_port_cep ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Primary path unrealizable.\n") ); + return IB_INVALID_SETTING; + } + + p_cep->av[0].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt; + + /* Make sure the paths will work on the desired QP. */ + if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != + p_cm_req->h_qp->obj.p_ci_ca->verbs.guid ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Primary path not realizable on given QP.\n") ); + return IB_INVALID_SETTING; + } + + p_cep->local_ca_guid = p_port_cep->h_ca->obj.p_ci_ca->verbs.guid; + + *pp_port_cep = p_port_cep; + + /* + * Store the PKEY so we can ensure that alternate paths are + * on the same partition. + */ + p_cep->pkey = p_cm_req->p_primary_path->pkey; + + p_cep->max_2pkt_life = ib_path_rec_pkt_life( p_cm_req->p_primary_path ) + 1; + + if( p_cm_req->p_alt_path ) + { + /* MTUs must match since they are specified only once. */ + if( ib_path_rec_mtu( p_cm_req->p_primary_path ) != + ib_path_rec_mtu( p_cm_req->p_alt_path ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Mismatched primary and alternate path MTUs.\n") ); + return IB_INVALID_SETTING; + } + + /* The PKEY must match too. */ + if( p_cm_req->p_alt_path->pkey != p_cm_req->p_primary_path->pkey ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Mismatched pimary and alternate PKEYs.\n") ); + return IB_INVALID_SETTING; + } + + p_port_cep = + __format_path_av( p_cm_req->p_alt_path, &p_cep->av[1] ); + if( p_port_cep && + p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid ) + { + /* Alternate path is not on same CA. */ + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Alternate path unrealizable.\n") ); + return IB_INVALID_SETTING; + } + + p_cep->av[1].attr.conn.seq_err_retry_cnt = p_cm_req->retry_cnt; + + p_cep->max_2pkt_life = max( p_cep->max_2pkt_life, + (ib_path_rec_pkt_life( p_cm_req->p_alt_path ) + 1) ); + } + else + { + cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) ); + } + + p_cep->p_cid->modifier++; + /* + * We don't ever want a modifier of zero for the CID at index zero + * since it would result in a total CID of zero. + */ + if( !p_cep->cid && !p_cep->p_cid->modifier ) + p_cep->p_cid->modifier++; + + /* Store pertinent information in the connection. */ + p_cep->local_comm_id = p_cep->cid | (p_cep->p_cid->modifier << 24); + p_cep->remote_comm_id = 0; + + /* Cache the local QPN. */ + p_cep->local_qpn = p_cm_req->h_qp->num; + p_cep->remote_ca_guid = 0; + p_cep->remote_qpn = 0; + + /* Retry timeout is remote CM response timeout plus 2 * packet life. */ + p_cep->retry_timeout = __calc_mad_timeout( p_cep->max_2pkt_life ) + + __calc_mad_timeout( p_cm_req->remote_resp_timeout ); + + + /* Store the retry count. */ + p_cep->max_cm_retries = p_cm_req->max_cm_retries; + + /* + * Clear the maximum packet lifetime, used to calculate timewait. + * It will be set when we transition into the established state. + */ + p_cep->timewait_time.QuadPart = 0; + + p_cep->rq_psn = p_cep->local_qpn; + + p_cep->rnr_nak_timeout = p_cm_req->rnr_nak_timeout; + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_pre_req( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_req_t* const p_cm_req, + OUT ib_qp_mod_t* const p_init ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_req ); + CL_ASSERT( p_init ); + + /* TODO: Code P2P support. */ + if( p_cm_req->pfn_cm_req_cb ) + { + AL_EXIT( AL_DBG_CM ); + return IB_UNSUPPORTED; + } + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REQ: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_IDLE: + status = __save_user_req( p_cep, p_cm_req, &p_port_cep ); + if( status != IB_SUCCESS ) + break; + + status = + ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_cep->p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_req( p_cep, p_cm_req ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid pdata length.\n") ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + break; + } + + /* Format the INIT qp modify attributes. */ + p_init->req_state = IB_QPS_INIT; + p_init->state.init.primary_port = + p_cep->av[p_cep->idx_primary].attr.port_num; + p_init->state.init.qkey = 0; + p_init->state.init.pkey_index = + p_cep->av[p_cep->idx_primary].pkey_index; + p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE; + + p_cep->state = CEP_STATE_PRE_REQ; + break; + + case CEP_STATE_TIMEWAIT: + status = IB_QP_IN_TIMEWAIT; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_send_req( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REQ: + CL_ASSERT( p_cep->p_mad ); + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + { + ib_put_mad( p_cep->p_mad ); + p_cep->state = CEP_STATE_IDLE; + status = IB_INVALID_SETTING; + } + else + { + status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad ); + + if( status == IB_SUCCESS ) + p_cep->state = CEP_STATE_REQ_SENT; + else + p_cep->state = CEP_STATE_IDLE; + } + p_cep->p_mad = NULL; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__save_user_rep( + IN kcep_t* const p_cep, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + AL_ENTER( AL_DBG_CM ); + + /* Cache the local QPN. */ + p_cep->local_qpn = p_cm_rep->h_qp->num; + p_cep->rq_psn = p_cep->local_qpn; + p_cep->init_depth = p_cm_rep->init_depth; + + ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca ); + /* Check the CA's responder resource max and trim if necessary. */ + if( (p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res < + p_cep->req_init_depth) ) + { + /* + * The CA cannot handle the requested responder resources. + * Set the response to the CA's maximum. + */ + p_cep->resp_res = + p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->max_qp_resp_res; + } + else + { + /* The CA supports the requested responder resources. */ + p_cep->resp_res = p_cep->req_init_depth; + } + ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca ); + + p_cep->rnr_nak_timeout = p_cm_rep->rnr_nak_timeout; + + AL_EXIT( AL_DBG_CM ); +} + + +static ib_api_status_t +__format_rep( + IN kcep_t* const p_cep, + IN const ib_cm_rep_t* const p_cm_rep ) +{ + ib_api_status_t status; + mad_cm_rep_t *p_rep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( p_cep ); + CL_ASSERT( p_cm_rep ); + CL_ASSERT( p_cep->p_mad ); + + /* Format the MAD header. */ + __format_mad_hdr( p_cep->p_mad->p_mad_buf, p_cep, CM_REP_ATTR_ID ); + + /* Set the addressing information in the MAD. */ + __format_mad_av( p_cep->p_mad, &p_cep->av[p_cep->idx_primary] ); + + p_rep = (mad_cm_rep_t*)p_cep->p_mad->p_mad_buf; + + p_rep->local_comm_id = p_cep->local_comm_id; + p_rep->remote_comm_id = p_cep->remote_comm_id; + conn_rep_set_lcl_qpn( p_cep->local_qpn, p_rep ); + conn_rep_set_starting_psn( p_cep->rq_psn, p_rep ); + + if( p_cm_rep->failover_accepted != IB_FAILOVER_ACCEPT_SUCCESS ) + { + /* + * Failover rejected - clear the alternate AV information. + * Note that at this point, the alternate is always at index 1. + */ + cl_memclr( &p_cep->av[1], sizeof(kcep_av_t) ); + } + else if( !p_cep->av[1].port_guid ) + { + /* + * Always reject alternate path if it's zero. We might + * have cleared the AV because it was unrealizable when + * processing the REQ. + */ + conn_rep_set_failover( IB_FAILOVER_ACCEPT_ERROR, p_rep ); + } + else + { + conn_rep_set_failover( p_cm_rep->failover_accepted, p_rep ); + } + + p_rep->resp_resources = p_cep->resp_res; + + ci_ca_lock_attr( p_cm_rep->h_qp->obj.p_ci_ca ); + conn_rep_set_target_ack_delay( + p_cm_rep->h_qp->obj.p_ci_ca->p_pnp_attr->local_ack_delay, p_rep ); + ci_ca_unlock_attr( p_cm_rep->h_qp->obj.p_ci_ca ); + + p_rep->initiator_depth = p_cep->init_depth; + + conn_rep_set_e2e_flow_ctl( p_cm_rep->flow_ctrl, p_rep ); + + conn_rep_set_rnr_retry_cnt( + (uint8_t)(p_cm_rep->rnr_retry_cnt & 0x07), p_rep ); + + /* Local CA guid should have been set when processing the received REQ. */ + CL_ASSERT( p_cep->local_ca_guid ); + p_rep->local_ca_guid = p_cep->local_ca_guid; + + status = conn_rep_set_pdata( + p_cm_rep->p_rep_pdata, p_cm_rep->rep_length, p_rep ); + + conn_rep_clr_rsvd_fields( p_rep ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_pre_rep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN void* __ptr64 context, + IN const ib_cm_rep_t* const p_cm_rep, + OUT ib_qp_mod_t* const p_init ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_rep ); + CL_ASSERT( p_init ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REQ_MRA_SENT: + CL_ASSERT( !p_cep->p_mad ); + status = + __cep_get_mad( p_cep, CM_REP_ATTR_ID, &p_port_cep, &p_cep->p_mad ); + if( status != IB_SUCCESS ) + break; + + __save_user_rep( p_cep, p_cm_rep ); + + status = __format_rep( p_cep, p_cm_rep ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + break; + } + + /* Format the INIT qp modify attributes. */ + p_init->req_state = IB_QPS_INIT; + p_init->state.init.primary_port = + p_cep->av[p_cep->idx_primary].attr.port_num; + p_init->state.init.qkey = 0; + p_init->state.init.pkey_index = + p_cep->av[p_cep->idx_primary].pkey_index; + p_init->state.init.access_ctrl = IB_AC_LOCAL_WRITE; + + p_cep->context = context; + + /* Just OR in the PREP bit into the state. */ + p_cep->state |= CEP_STATE_PREP; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_send_rep( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + { + ib_put_mad( p_cep->p_mad ); + p_cep->state = CEP_STATE_IDLE; + status = IB_INSUFFICIENT_RESOURCES; + } + else + { + status = __cep_send_retry( p_port_cep, p_cep, p_cep->p_mad ); + if( status == IB_SUCCESS ) + { + p_cep->state = CEP_STATE_REP_SENT; + } + else + { + __remove_cep( p_cep ); + p_cep->state = CEP_STATE_IDLE; + } + } + p_cep->p_mad = NULL; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static inline ib_api_status_t +__format_rtu( + IN kcep_t* const p_cep, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_rtu_t *p_rtu; + + AL_ENTER( AL_DBG_CM ); + + p_rtu = (mad_cm_rtu_t*)p_mad->p_mad_buf; + + p_rtu->local_comm_id = p_cep->local_comm_id; + p_rtu->remote_comm_id = p_cep->remote_comm_id; + + /* copy optional data */ + status = conn_rtu_set_pdata( p_pdata, pdata_len, p_rtu ); + + /* Store the RTU MAD so we can repeat it if we get a repeated REP. */ + if( status == IB_SUCCESS ) + p_cep->mads.rtu = *p_rtu; + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_rtu( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + status = __cep_get_mad( p_cep, CM_RTU_ATTR_ID, &p_port_cep, &p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_rtu( p_cep, p_pdata, pdata_len, p_mad ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_mad ); + break; + } + + /* Update the timewait time. */ + __calc_timewait( p_cep ); + + p_cep->state = CEP_STATE_ESTABLISHED; + + __cep_send_mad( p_port_cep, p_mad ); + /* Send failures will get another chance if we receive a repeated REP. */ + status = IB_SUCCESS; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_rej( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_rej_status_t rej_status, + IN const uint8_t* const p_ari, + IN uint8_t ari_len, + IN const uint8_t* const p_pdata, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REQ_MRA_SENT: + status = __do_cep_rej( + p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len ); + __remove_cep( p_cep ); + p_cep->state = CEP_STATE_IDLE; + break; + + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + status = __do_cep_rej( + p_cep, rej_status, p_ari, ari_len, p_pdata, pdata_len ); + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + } + + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__format_mra( + IN kcep_t* const p_cep, + IN const uint8_t msg_mraed, + IN const ib_cm_mra_t* const p_cm_mra, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_mra_t *p_mra; + + AL_ENTER( AL_DBG_CM ); + + p_mra = (mad_cm_mra_t*)p_mad->p_mad_buf; + + conn_mra_set_msg_mraed( msg_mraed, p_mra ); + + p_mra->local_comm_id = p_cep->local_comm_id; + p_mra->remote_comm_id = p_cep->remote_comm_id; + + conn_mra_set_svc_timeout( p_cm_mra->svc_timeout, p_mra ); + status = conn_mra_set_pdata( + p_cm_mra->p_mra_pdata, p_cm_mra->mra_length, p_mra ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_CM ); + return status; + } + conn_mra_clr_rsvd_fields( p_mra ); + + /* Save the MRA so we can repeat it if we get a repeated message. */ + p_cep->mads.mra = *p_mra; + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_mra( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_mra_t* const p_cm_mra ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + uint8_t msg_mraed; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_mra ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REQ_RCVD: + case CEP_STATE_PRE_REP: + msg_mraed = 0; + break; + + case CEP_STATE_REP_RCVD: + msg_mraed = 1; + break; + + case CEP_STATE_PRE_APR: + case CEP_STATE_LAP_RCVD: + msg_mraed = 2; + break; + + default: + status = IB_INVALID_STATE; + goto done; + } + + status = __cep_get_mad( p_cep, CM_MRA_ATTR_ID, &p_port_cep, &p_mad ); + if( status != IB_SUCCESS ) + goto done; + + status = __format_mra( p_cep, msg_mraed, p_cm_mra, p_mad ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_mad ); + goto done; + } + + p_cep->state |= CEP_STATE_MRA; + + __cep_send_mad( p_port_cep, p_mad ); + status = IB_SUCCESS; + +done: + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + + +static ib_api_status_t +__format_lap( + IN kcep_t* const p_cep, + IN const ib_cm_lap_t* const p_cm_lap, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_lap_t *p_lap; + + AL_ENTER( AL_DBG_CM ); + + __format_mad_hdr( p_mad->p_mad_buf, p_cep, CM_LAP_ATTR_ID ); + + __format_mad_av( p_mad, &p_cep->av[p_cep->idx_primary] ); + + p_lap = (mad_cm_lap_t*)p_mad->p_mad_buf; + + p_lap->alternate_path.local_lid = p_cm_lap->p_alt_path->slid; + p_lap->alternate_path.remote_lid = p_cm_lap->p_alt_path->dlid; + p_lap->alternate_path.local_gid = p_cm_lap->p_alt_path->sgid; + p_lap->alternate_path.remote_gid = p_cm_lap->p_alt_path->dgid; + + /* Set Flow Label and Packet Rate */ + conn_lap_path_set_flow_lbl( + ib_path_rec_flow_lbl( p_cm_lap->p_alt_path ), &p_lap->alternate_path ); + conn_lap_path_set_tclass( + p_cm_lap->p_alt_path->tclass, &p_lap->alternate_path ); + + p_lap->alternate_path.hop_limit = + ib_path_rec_hop_limit( p_cm_lap->p_alt_path ); + conn_lap_path_set_pkt_rate( + ib_path_rec_rate( p_cm_lap->p_alt_path ), &p_lap->alternate_path ); + + /* Set SL and Subnet Local */ + conn_lap_path_set_svc_lvl( + ib_path_rec_sl( p_cm_lap->p_alt_path ), &p_lap->alternate_path ); + conn_lap_path_set_subn_lcl( + ib_gid_is_link_local( &p_cm_lap->p_alt_path->dgid ), + &p_lap->alternate_path ); + + conn_lap_path_set_lcl_ack_timeout( + calc_lcl_ack_timeout( ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1, + p_cep->local_ack_delay), &p_lap->alternate_path ); + + conn_lap_path_clr_rsvd_fields( &p_lap->alternate_path ); + + p_lap->local_comm_id = p_cep->local_comm_id; + p_lap->remote_comm_id = p_cep->remote_comm_id; + conn_lap_set_remote_qpn( p_cep->remote_qpn, p_lap ); + conn_lap_set_resp_timeout( p_cm_lap->remote_resp_timeout, p_lap ); + + status = conn_lap_set_pdata( + p_cm_lap->p_lap_pdata, p_cm_lap->lap_length, p_lap ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("lap pdata invalid.\n") ); + return status; + } + + conn_lap_clr_rsvd_fields( p_lap ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_lap( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_lap_t* const p_cm_lap ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_lap ); + CL_ASSERT( p_cm_lap->p_alt_path ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_ESTABLISHED: + if( !p_cep->was_active ) + { + /* Only the side that took the active role can initialte a LAP. */ + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Only the active side of a connection can initiate a LAP.\n") ); + status = IB_INVALID_STATE; + break; + } + + /* + * Format the AV information - store in the temporary location until we + * get the APR indicating acceptance. + */ + p_port_cep = __format_path_av( p_cm_lap->p_alt_path, &p_cep->alt_av ); + if( !p_port_cep ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Alternate path invalid!\n") ); + status = IB_INVALID_SETTING; + break; + } + + p_cep->alt_av.attr.conn.seq_err_retry_cnt = + p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt; + p_cep->alt_av.attr.conn.rnr_retry_cnt = + p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt; + + if( p_port_cep->h_ca->obj.p_ci_ca->verbs.guid != p_cep->local_ca_guid ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Alternate CA GUID different from current!\n") ); + status = IB_INVALID_SETTING; + break; + } + + /* Store the alternate path info temporarilly. */ + p_cep->alt_2pkt_life = ib_path_rec_pkt_life( p_cm_lap->p_alt_path ) + 1; + + status = ib_get_mad( p_port_cep->pool_key, MAD_BLOCK_SIZE, &p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_lap( p_cep, p_cm_lap, p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __cep_send_retry( p_port_cep, p_cep, p_mad ); + if( status == IB_SUCCESS ) + p_cep->state = CEP_STATE_LAP_SENT; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static ib_api_status_t +__format_apr( + IN kcep_t* const p_cep, + IN const ib_cm_apr_t* const p_cm_apr, + IN OUT ib_mad_element_t* const p_mad ) +{ + ib_api_status_t status; + mad_cm_apr_t *p_apr; + + AL_ENTER( AL_DBG_CM ); + + p_apr = (mad_cm_apr_t*)p_mad->p_mad_buf; + + p_apr->local_comm_id = p_cep->local_comm_id; + p_apr->remote_comm_id = p_cep->remote_comm_id; + p_apr->status = p_cm_apr->apr_status; + + status = conn_apr_set_apr_info( p_cm_apr->p_info->data, + p_cm_apr->info_length, p_apr ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("apr_info invalid\n") ); + return status; + } + + status = conn_apr_set_pdata( p_cm_apr->p_apr_pdata, + p_cm_apr->apr_length, p_apr ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("apr pdata invalid\n") ); + return status; + } + + conn_apr_clr_rsvd_fields( p_apr ); + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_pre_apr( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_apr_t* const p_cm_apr, + OUT ib_qp_mod_t* const p_apr ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_apr ); + CL_ASSERT( p_apr ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_APR: + case CEP_STATE_PRE_APR_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + /* Fall through. */ + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_MRA_SENT: + CL_ASSERT( !p_cep->p_mad ); + status = __cep_get_mad( p_cep, CM_APR_ATTR_ID, &p_port_cep, &p_cep->p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_apr( p_cep, p_cm_apr, p_cep->p_mad ); + if( status != IB_SUCCESS ) + { + ib_put_mad( p_cep->p_mad ); + p_cep->p_mad = NULL; + break; + } + + if( !p_cm_apr->apr_status ) + { + /* + * Copy the temporary AV and port GUID information into + * the alternate path. + */ + p_cep->av[((p_cep->idx_primary + 1) & 0x1)] = p_cep->alt_av; + + /* Update our maximum packet lifetime. */ + p_cep->max_2pkt_life = + max( p_cep->max_2pkt_life, p_cep->alt_2pkt_life ); + + /* Update our timewait time. */ + __calc_timewait( p_cep ); + + /* Fill in the QP attributes. */ + cl_memclr( p_apr, sizeof(ib_qp_mod_t) ); + p_apr->req_state = IB_QPS_RTS; + p_apr->state.rts.opts = + IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE; + p_apr->state.rts.alternate_av = p_cep->alt_av.attr; + p_apr->state.rts.apm_state = IB_APM_REARM; + } + + p_cep->state |= CEP_STATE_PREP; + status = IB_SUCCESS; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_send_apr( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_APR: + case CEP_STATE_PRE_APR_MRA_SENT: + CL_ASSERT( p_cep->p_mad ); + p_port_cep = __get_cep_agent( p_cep ); + if( !p_port_cep ) + { + ib_put_mad( p_cep->p_mad ); + status = IB_INSUFFICIENT_RESOURCES; + } + else + { + p_cep->state = CEP_STATE_ESTABLISHED; + + __cep_send_mad( p_port_cep, p_cep->p_mad ); + status = IB_SUCCESS; + } + p_cep->p_mad = NULL; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_dreq( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* const p_pdata, + IN const uint8_t pdata_len ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_LAP_MRA_RCVD: + status = __cep_get_mad( p_cep, CM_DREQ_ATTR_ID, &p_port_cep, &p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_dreq( p_cep, p_pdata, pdata_len, p_mad ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("__format_dreq returned %s.\n", ib_get_err_str( status )) ); + break; + } + + if( __cep_send_retry( p_port_cep, p_cep, p_mad ) == IB_SUCCESS ) + { + p_cep->state = CEP_STATE_DREQ_SENT; + } + else + { + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + } + + status = IB_SUCCESS; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_drep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_drep_t* const p_cm_drep ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + cep_agent_t *p_port_cep; + ib_mad_element_t *p_mad; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_cm_drep ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_DREQ_RCVD: + status = __cep_get_mad( p_cep, CM_DREP_ATTR_ID, &p_port_cep, &p_mad ); + if( status != IB_SUCCESS ) + break; + + status = __format_drep( p_cep, p_cm_drep->p_drep_pdata, + p_cm_drep->drep_length, (mad_cm_drep_t*)p_mad->p_mad_buf ); + if( status != IB_SUCCESS ) + break; + + __cep_send_mad( p_port_cep, p_mad ); + p_cep->state = CEP_STATE_TIMEWAIT; + __insert_timewait( p_cep ); + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_migrate( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_ESTABLISHED: + case CEP_STATE_LAP_SENT: + case CEP_STATE_LAP_RCVD: + case CEP_STATE_LAP_MRA_SENT: + case CEP_STATE_LAP_MRA_RCVD: + if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid ) + { + p_cep->idx_primary++; + p_cep->idx_primary &= 0x1; + status = IB_SUCCESS; + break; + } + + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("No alternate path avaialble.\n") ); + + /* Fall through. */ + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_established( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + CL_ASSERT( p_cep->p_send_mad ); + ib_cancel_mad( p_cep->h_mad_svc, p_cep->p_send_mad ); + p_cep->p_send_mad = NULL; + p_cep->state = CEP_STATE_ESTABLISHED; + status = IB_SUCCESS; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_rtr_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rtr ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_rtr ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_MRA_RCVD: + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + case CEP_STATE_ESTABLISHED: + cl_memclr( p_rtr, sizeof(ib_qp_mod_t) ); + p_rtr->req_state = IB_QPS_RTR; + + /* Required params. */ + p_rtr->state.rtr.rq_psn = p_cep->rq_psn; + p_rtr->state.rtr.dest_qp = p_cep->remote_qpn; + p_rtr->state.rtr.primary_av = p_cep->av[p_cep->idx_primary].attr; + p_rtr->state.rtr.resp_res = p_cep->resp_res; + p_rtr->state.rtr.rnr_nak_timeout = p_cep->rnr_nak_timeout; + + /* Optional params. */ + p_rtr->state.rtr.opts = 0; + if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid ) + { + p_rtr->state.rtr.opts |= IB_MOD_QP_ALTERNATE_AV; + p_rtr->state.rtr.alternate_av = + p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr; + } + status = IB_SUCCESS; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_rts_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rts ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_rts ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + switch( p_cep->state ) + { + case CEP_STATE_REQ_SENT: + case CEP_STATE_REQ_RCVD: + case CEP_STATE_REQ_MRA_SENT: + case CEP_STATE_REQ_MRA_RCVD: + case CEP_STATE_REP_SENT: + case CEP_STATE_REP_RCVD: + case CEP_STATE_REP_MRA_SENT: + case CEP_STATE_REP_MRA_RCVD: + case CEP_STATE_PRE_REP: + case CEP_STATE_PRE_REP_MRA_SENT: + case CEP_STATE_ESTABLISHED: + cl_memclr( p_rts, sizeof(ib_qp_mod_t) ); + p_rts->req_state = IB_QPS_RTS; + + /* Required params. */ + p_rts->state.rts.sq_psn = p_cep->sq_psn; + p_rts->state.rts.retry_cnt = + p_cep->av[p_cep->idx_primary].attr.conn.seq_err_retry_cnt; + p_rts->state.rts.rnr_retry_cnt = + p_cep->av[p_cep->idx_primary].attr.conn.rnr_retry_cnt; + p_rts->state.rts.local_ack_timeout = + p_cep->av[p_cep->idx_primary].attr.conn.local_ack_timeout; + p_rts->state.rts.init_depth = p_cep->init_depth; + + /* Optional params. */ + p_rts->state.rts.opts = 0; + if( p_cep->av[(p_cep->idx_primary + 1) & 0x1].port_guid ) + { + p_rts->state.rts.opts = + IB_MOD_QP_ALTERNATE_AV | IB_MOD_QP_APM_STATE; + p_rts->state.rts.apm_state = IB_APM_REARM; + p_rts->state.rts.alternate_av = + p_cep->av[(p_cep->idx_primary + 1) & 0x1].attr; + } + status = IB_SUCCESS; + break; + + default: + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_CM, + ("Invalid state: %d\n", p_cep->state) ); + status = IB_INVALID_STATE; + break; + } + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_timewait( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT uint64_t* const p_timewait_us ) +{ + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + *p_timewait_us = p_cep->timewait_time.QuadPart / 10; + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_poll( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT void* __ptr64 * p_context, + OUT net32_t* const p_new_cid, + OUT ib_mad_element_t** const pp_mad ) +{ + ib_api_status_t status; + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_new_cid ); + CL_ASSERT( pp_mad ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + *p_context = p_cep->context; + + if( !p_cep->p_mad_head ) + { + p_cep->signalled = FALSE; + status = IB_NOT_DONE; + goto done; + } + + /* Set the MAD. */ + *pp_mad = p_cep->p_mad_head; + p_cep->p_mad_head = p_cep->p_mad_head->p_next; + (*pp_mad)->p_next = NULL; + + /* We're done with the input CEP. Reuse the variable */ + p_cep = (kcep_t* __ptr64)(*pp_mad)->send_context1; + if( p_cep ) + *p_new_cid = p_cep->cid; + else + *p_new_cid = AL_INVALID_CID; + + status = IB_SUCCESS; + +done: + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return status; +} + + +static void +__cep_cancel_irp( + IN DEVICE_OBJECT* p_dev_obj, + IN IRP* p_irp ) +{ + net32_t cid; + ib_al_handle_t h_al; + KLOCK_QUEUE_HANDLE hdl; + kcep_t *p_cep; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( p_dev_obj ); + CL_ASSERT( p_irp ); + + cid = (net32_t)(size_t)p_irp->Tail.Overlay.DriverContext[0]; + h_al = (ib_al_handle_t)p_irp->Tail.Overlay.DriverContext[1]; + CL_ASSERT( h_al ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( p_cep ) + __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT ); + + KeReleaseInStackQueuedSpinLock( &hdl ); + + IoReleaseCancelSpinLock( p_irp->CancelIrql ); + + AL_EXIT( AL_DBG_CM ); +} + + +NTSTATUS +al_cep_queue_irp( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN IRP* const p_irp ) +{ + kcep_t *p_cep; + KLOCK_QUEUE_HANDLE hdl; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + CL_ASSERT( p_irp ); + + KeAcquireInStackQueuedSpinLock( &gp_cep_mgr->lock, &hdl ); + p_cep = __lookup_cep( h_al, cid ); + if( !p_cep ) + { + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return STATUS_INVALID_PARAMETER; + } + + /* + * Store the CID an AL handle in the IRP's driver context + * so we can cancel it. + */ + p_irp->Tail.Overlay.DriverContext[0] = (void*)(size_t)cid; + p_irp->Tail.Overlay.DriverContext[1] = (void*)h_al; +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, __cep_cancel_irp ); +#pragma warning(pop) + IoMarkIrpPending( p_irp ); + + /* Always dequeue and complete whatever IRP is there. */ + __cep_complete_irp( p_cep, STATUS_CANCELLED, IO_NO_INCREMENT ); + + InterlockedExchangePointer( &p_cep->p_irp, p_irp ); + + /* Complete the IRP if there are MADs to be reaped. */ + if( p_cep->p_mad_head ) + __cep_complete_irp( p_cep, STATUS_SUCCESS, IO_NETWORK_INCREMENT ); + + KeReleaseInStackQueuedSpinLock( &hdl ); + AL_EXIT( AL_DBG_CM ); + return STATUS_PENDING; +} + + +void +al_cep_cleanup_al( + IN const ib_al_handle_t h_al ) +{ + cl_list_item_t *p_item; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + /* Destroy all CEPs associated with the input instance of AL. */ + cl_spinlock_acquire( &h_al->obj.lock ); + for( p_item = cl_qlist_head( &h_al->cep_list ); + p_item != cl_qlist_end( &h_al->cep_list ); + p_item = cl_qlist_head( &h_al->cep_list ) ) + { + /* + * Note that we don't walk the list - we can't hold the AL + * lock when cleaning up its CEPs because the cleanup path + * takes the CEP's lock. We always want to take the CEP + * before the AL lock to prevent any possibilities of deadlock. + * + * So we just get the CID, and then release the AL lock and try to + * destroy. This should unbind the CEP from the AL instance and + * remove it from the list, allowing the next CEP to be cleaned up + * in the next pass through. + */ + cid = PARENT_STRUCT( p_item, kcep_t, al_item )->cid; + cl_spinlock_release( &h_al->obj.lock ); + al_destroy_cep( h_al, cid, NULL ); + cl_spinlock_acquire( &h_al->obj.lock ); + } + cl_spinlock_release( &h_al->obj.lock ); + + AL_EXIT( AL_DBG_CM ); +} diff --git a/branches/Ndi/core/al/kernel/al_dev.c b/branches/Ndi/core/al/kernel/al_dev.c new file mode 100644 index 00000000..1969830b --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_dev.c @@ -0,0 +1,566 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include + +#include "al.h" +#include "al_ca.h" +#include "al_common.h" +#include "al_cq.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_dev.tmh" +#endif +#include "al_dev.h" +#include "al_qp.h" +#include "al_mgr.h" +#include "al_proxy.h" + + + +static cl_status_t +__proxy_reg_pnp( + IN al_dev_open_context_t *p_context ); + +static void +__proxy_cancel_cblists( + IN al_dev_open_context_t *p_context ); + + + + +static void +__construct_open_context( + IN al_dev_open_context_t *p_context ) +{ + cl_event_construct( &p_context->close_event ); + + cl_qpool_construct( &p_context->cb_pool ); + cl_spinlock_construct( &p_context->cb_pool_lock ); + + cl_qlist_init( &p_context->cm_cb_list ); + cl_qlist_init( &p_context->comp_cb_list ); + cl_qlist_init( &p_context->misc_cb_list ); + cl_spinlock_construct( &p_context->cb_lock ); + cl_mutex_construct( &p_context->pnp_mutex ); +} + + + +/* + * Initialize all objects used by the per client open context. + */ +static cl_status_t +__init_open_context( + IN al_dev_open_context_t *p_context ) +{ + cl_status_t cl_status; + + cl_status = cl_event_init( &p_context->close_event, FALSE ); + if( cl_status != CL_SUCCESS ) + return cl_status; + + /* Allocate pool for storing callback info or requests. */ + cl_status = cl_qpool_init( &p_context->cb_pool, + AL_CB_POOL_START_SIZE, 0, AL_CB_POOL_GROW_SIZE, + sizeof(al_proxy_cb_info_t), NULL, NULL, NULL ); + if( cl_status != CL_SUCCESS ) + return cl_status; + + cl_status = cl_spinlock_init( &p_context->cb_pool_lock ); + if( cl_status != CL_SUCCESS ) + return cl_status; + + cl_status = cl_spinlock_init( &p_context->cb_lock ); + if( cl_status != CL_SUCCESS ) + return cl_status; + + cl_status = cl_mutex_init( &p_context->pnp_mutex ); + if( cl_status != CL_SUCCESS ) + return cl_status; + + return CL_SUCCESS; +} + + + +static void +__destroy_open_context( + IN al_dev_open_context_t *p_context ) +{ + cl_event_destroy( &p_context->close_event ); + + cl_qpool_destroy( &p_context->cb_pool ); + cl_spinlock_destroy( &p_context->cb_pool_lock ); + cl_spinlock_destroy( &p_context->cb_lock ); + cl_mutex_destroy( &p_context->pnp_mutex ); +} + + + +cl_status_t +al_dev_open( + IN cl_ioctl_handle_t h_ioctl ) +{ + al_dev_open_context_t *p_context; + ib_api_status_t status; + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + ULONG *p_ver; + + AL_ENTER( AL_DBG_DEV ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + + p_ver = cl_ioctl_in_buf( h_ioctl ); + + if( p_io_stack->FileObject->FsContext || + cl_ioctl_in_size( h_ioctl ) != sizeof(ULONG) || + !p_ver || + cl_ioctl_out_size( h_ioctl ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("context already exists or bad parameters.\n") ); + return CL_INVALID_PARAMETER; + } + + if( *p_ver != AL_IOCTL_VERSION ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Unsupported client version: %d\n", *p_ver) ); + return CL_INVALID_PARAMETER; + } + + /* Allocate the client's context structure. */ + p_context = (al_dev_open_context_t*) + cl_zalloc( sizeof(al_dev_open_context_t) ); + if( !p_context ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_malloc( %d ) failed.\n", sizeof(al_dev_open_context_t)) ); + return CL_INSUFFICIENT_MEMORY; + } + + /* Construct the open context to allow destruction. */ + __construct_open_context( p_context ); + + /* Initialize the open context elements. */ + cl_status = __init_open_context( p_context ); + if( cl_status != CL_SUCCESS ) + { + __destroy_open_context( p_context ); + return cl_status; + } + + /* Open an internal AL instance for this process. */ + status = ib_open_al( &p_context->h_al ); + if( status == IB_SUCCESS ) + { + /* Register for PnP events. */ + status = __proxy_reg_pnp( p_context ); + } + + /* Make sure that we were able to open AL and register for PnP. */ + if( status == IB_SUCCESS ) + { + /* + * Store the reference from the AL instance back to this + * open context. This allows using the user-mode context + * for resource creation. + */ + p_context->h_al->p_context = p_context; + /* We successfully opened the device. */ + p_io_stack->FileObject->FsContext = p_context; + } + else + { + __destroy_open_context( p_context ); + cl_status = CL_INSUFFICIENT_RESOURCES; + } + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} + + + +/* + * To be called by al_dev_open(). This will register for PnP events + * on behalf of user process (UAL). It uses the implicit global + * al instance created by AL manager. PnP events are propagated + * to UAL automatically from the time AL device is open till the + * process exits. + */ +static ib_api_status_t +__proxy_reg_pnp( + IN al_dev_open_context_t *p_context ) +{ + ib_pnp_req_t pnp_req; + ib_pnp_handle_t h_pnp; + ib_api_status_t status; + + /* Register for PnP events. */ + cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) ); + pnp_req.pnp_class = IB_PNP_CA | IB_PNP_FLAG_REG_COMPLETE; + pnp_req.pnp_context = p_context; + pnp_req.pfn_pnp_cb = proxy_pnp_ca_cb; + + /* No need to track the registration. We'll deregister when closing AL. */ + status = ib_reg_pnp( p_context->h_al, &pnp_req, &h_pnp ); + if( status != IB_SUCCESS ) + return status; + + /* Register for port events. */ + pnp_req.pfn_pnp_cb = proxy_pnp_port_cb; + pnp_req.pnp_class = IB_PNP_PORT | IB_PNP_FLAG_REG_COMPLETE; + status = ib_reg_pnp( p_context->h_al, &pnp_req, &h_pnp ); + + return status; +} + + + +/* + * Cleanup the handle map. Remove all mappings. Perform all necessary + * operations. + */ +static void +__proxy_cleanup_map( + IN al_dev_open_context_t *p_context ) +{ + al_handle_t *p_h; + size_t i; + + AL_ENTER( AL_DBG_DEV ); + + cl_spinlock_acquire( &p_context->h_al->obj.lock ); + for( i = 0; i < cl_vector_get_size( &p_context->h_al->hdl_vector ); i++ ) + { + p_h = (al_handle_t*) + cl_vector_get_ptr( &p_context->h_al->hdl_vector, i ); + + switch( AL_BASE_TYPE( p_h->type ) ) + { + /* Return any MADs not reported to the user. */ + case AL_OBJ_TYPE_H_MAD: + ib_put_mad( (ib_mad_element_t*)p_h->p_obj ); + al_hdl_free( p_context->h_al, i ); + break; + + case AL_OBJ_TYPE_H_CA_ATTR: + /* Release a saved CA attribute. */ + cl_free( p_h->p_obj ); + al_hdl_free( p_context->h_al, i ); + break; + + case AL_OBJ_TYPE_H_SA_REQ: + al_cancel_sa_req( (al_sa_req_t*)p_h->p_obj ); + break; + + case AL_OBJ_TYPE_H_PNP_EVENT: + cl_event_signal( &((proxy_pnp_evt_t*)p_h->p_obj)->event ); + break; + + default: + /* Nothing else to do for other handle types. */ + break; + } + } + cl_spinlock_release( &p_context->h_al->obj.lock ); + + AL_EXIT( AL_DBG_DEV ); +} + + +cl_status_t +al_dev_close( + IN cl_ioctl_handle_t h_ioctl ) +{ + al_dev_open_context_t *p_context; + IO_STACK_LOCATION *p_io_stack; + + AL_ENTER( AL_DBG_DEV ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + + /* Determine if the client closed the al_handle. */ + p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext; + if( !p_context ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Client closed with a null open context .\n") ); + return CL_SUCCESS; + } + if( p_io_stack->FileObject->FsContext2 ) + { + /* Not the main file object - ignore. */ + AL_EXIT( AL_DBG_DEV ); + return CL_SUCCESS; + } + + /* Mark that we're closing this device. */ + p_context->closing = TRUE; + + /* Flush any pending IOCTLs in case user-mode threads died on us. */ + if( p_context->h_cm_ioctl ) + al_dev_cancel_ioctl( p_context->h_cm_ioctl ); + if( p_context->h_comp_ioctl ) + al_dev_cancel_ioctl( p_context->h_comp_ioctl ); + if( p_context->h_misc_ioctl ) + al_dev_cancel_ioctl( p_context->h_misc_ioctl ); + + while( p_context->ref_cnt ) + { +#ifdef _DEBUG_ + cl_status_t cl_status; + + cl_status = cl_event_wait_on( &p_context->close_event, 1000, FALSE ); + ASSERT( cl_status == IB_SUCCESS ); + if( cl_status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Waiting on ref_cnt timed out!\n") ); + break; + } +#else + cl_event_wait_on( &p_context->close_event, EVENT_NO_TIMEOUT, FALSE ); +#endif + } + + /* Cleanup any leftover callback resources. */ + __proxy_cancel_cblists( p_context ); + + /* Close the AL instance for this process. */ + if( p_context->h_al ) + { + /* Cleanup all user to kernel handle mappings. */ + __proxy_cleanup_map( p_context ); + + ib_close_al( p_context->h_al ); + p_context->h_al = NULL; + } + + /* Destroy the open context now. */ + __destroy_open_context( p_context ); + cl_free( p_context ); + + AL_EXIT( AL_DBG_DEV ); + return CL_SUCCESS; +} + + + +/* + * Remove all callbacks on the given callback queue and return them to + * the callback pool. + */ +static void +__proxy_dq_cblist( + IN al_dev_open_context_t *p_context, + IN cl_qlist_t *p_cblist ) +{ + cl_list_item_t *p_list_item; + al_proxy_cb_info_t *p_cb_info; + + cl_spinlock_acquire( &p_context->cb_lock ); + for( p_list_item = cl_qlist_remove_head( p_cblist ); + p_list_item != cl_qlist_end( p_cblist ); + p_list_item = cl_qlist_remove_head( p_cblist ) ) + { + p_cb_info = (al_proxy_cb_info_t*)p_list_item; + if( p_cb_info->p_al_obj ) + deref_al_obj( p_cb_info->p_al_obj ); + proxy_cb_put( p_cb_info ); + } + cl_spinlock_release( &p_context->cb_lock ); +} + + + +/* + * Remove all queued callbacks from all callback lists. + */ +static void +__proxy_cancel_cblists( + IN al_dev_open_context_t *p_context ) +{ + __proxy_dq_cblist( p_context, &p_context->cm_cb_list ); + __proxy_dq_cblist( p_context, &p_context->comp_cb_list ); + __proxy_dq_cblist( p_context, &p_context->misc_cb_list ); +} + + +cl_status_t +al_dev_ioctl( + IN cl_ioctl_handle_t h_ioctl ) +{ + cl_status_t cl_status; + size_t ret_bytes = 0; + void *p_open_context; + IO_STACK_LOCATION *p_io_stack; + + AL_ENTER( AL_DBG_DEV ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_open_context = p_io_stack->FileObject->FsContext; + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_DEV, + ("al_dev_ioctl: buf_size (%d) p_buf (%016I64x).\n", + cl_ioctl_in_size( h_ioctl ), (LONG_PTR)cl_ioctl_in_buf( h_ioctl )) ); + + /* Process the ioctl command. */ + if( IS_AL_PROXY_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) + cl_status = proxy_ioctl( h_ioctl, &ret_bytes ); + else if( IS_VERBS_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) + cl_status = verbs_ioctl( h_ioctl, &ret_bytes ); + //else if( IS_CM_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) + // cl_status = cm_ioctl( h_ioctl, &ret_bytes ); + else if( IS_CEP_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) + cl_status = cep_ioctl( h_ioctl, &ret_bytes ); + else if( IS_AL_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) + cl_status = al_ioctl( h_ioctl, &ret_bytes ); + else if( IS_SUBNET_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) + cl_status = subnet_ioctl( h_ioctl, &ret_bytes ); + else if( IS_IOC_IOCTL(cl_ioctl_ctl_code( h_ioctl )) ) + cl_status = ioc_ioctl( h_ioctl, &ret_bytes ); + else + cl_status = CL_INVALID_REQUEST; + + switch( cl_status ) + { + case CL_COMPLETED: + /* Flip the status since the IOCTL was completed. */ + cl_status = CL_SUCCESS; + /* Fall through */ + case CL_PENDING: + break; + case CL_INVALID_REQUEST: + /* + * In Windows, Driver Verifier sends bogus IOCTLs to the device. + * These must be passed down the device stack, and so cannot be + * completed in the IOCTL handler. They are properly cleaned up, + * though no data is returned to the user. + */ + break; + default: + cl_ioctl_complete( h_ioctl, cl_status, ret_bytes ); + } + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} + + + +/* + * Cancel any pending IOCTL calls for the specified type. + * This routine is also called when closing the device. + */ +void +al_dev_cancel_ioctl( + IN cl_ioctl_handle_t h_ioctl ) +{ + al_dev_open_context_t *p_context; + cl_ioctl_handle_t *ph_ioctl; + PIO_STACK_LOCATION p_io_stack; + + /* + * Search the ioctl buffer in the process specific queue + * Dequeue it, if found + */ + AL_ENTER( AL_DBG_DEV ); + + /* Get the stack location. */ + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + + p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext; + ASSERT( p_context ); + + /* Clear the IOCTL. */ + cl_spinlock_acquire( &p_context->cb_lock ); + switch( cl_ioctl_ctl_code( h_ioctl ) ) + { + case UAL_GET_CM_CB_INFO: + ph_ioctl = &p_context->h_cm_ioctl; + break; + case UAL_GET_COMP_CB_INFO: + ph_ioctl = &p_context->h_comp_ioctl; + break; + case UAL_GET_MISC_CB_INFO: + ph_ioctl = &p_context->h_misc_ioctl; + break; + default: + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid CB type\n") ); + ph_ioctl = NULL; + break; + } + + if( ph_ioctl && *ph_ioctl == h_ioctl ) + { + *ph_ioctl = NULL; +#pragma warning(push, 3) + IoSetCancelRoutine( h_ioctl, NULL ); +#pragma warning(pop) + + /* Complete the IOCTL. */ + cl_ioctl_complete( h_ioctl, CL_CANCELED, 0 ); + proxy_context_deref( p_context ); + } + cl_spinlock_release( &p_context->cb_lock ); + + AL_EXIT( AL_DBG_DEV ); +} + + +void +al_dev_cancel_io( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ) +{ + AL_ENTER( AL_DBG_DEV ); + + UNUSED_PARAM( p_dev_obj ); + + al_dev_cancel_ioctl( p_irp ); + + IoReleaseCancelSpinLock( p_irp->CancelIrql ); + + AL_EXIT( AL_DBG_DEV ); +} diff --git a/branches/Ndi/core/al/kernel/al_driver.h b/branches/Ndi/core/al/kernel/al_driver.h new file mode 100644 index 00000000..d69d436f --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_driver.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined _AL_DRIVER_H_ +#define _AL_DRIVER_H_ + +#include "al_debug.h" +#include "iba/ib_al_ifc.h" +#include "complib/cl_types.h" +#include "complib/cl_atomic.h" +#include "complib/cl_debug.h" +#include "complib/cl_mutex.h" +#include "complib/cl_qlist.h" +#include "complib/cl_ptr_vector.h" +#include "complib/cl_pnp_po.h" +/* Safe string functions. */ +#if WINVER == 0x500 +/* + * Windows 2000 doesn't support the inline version of safe strings. + * Force the use of the library version of safe strings. + */ +#define NTSTRSAFE_LIB +#endif +#include + + +/* + * Main header for Access Layer driver. + */ + +/* + * ALLOC_PRAGMA sections: + * PAGE + * Default pagable code. Won't be locked in memory. + * + * PAGE_PNP + * Code that needs to be locked in memory when the device is + * in the paging, crash dump, or hibernation path. + */ + + +/* + * Device extension for the device object that serves as entry point for + * the interface and IOCTL requests. + */ +typedef struct _al_fdo_ext +{ + cl_pnp_po_ext_t cl_ext; + + /* + * Device power map returned by the bus driver for the device, used + * when sending IRP_MN_SET_POWER for device state in response to + * IRP_MN_SET_POWER for system state. + */ + DEVICE_POWER_STATE po_state[PowerSystemMaximum]; + + /* Mutex to protect the CA list. */ +// FAST_MUTEX caMutex; + + /* List of CAs. */ +// cl_qlist_t caList; + + /* + * Interface names are generated by IoRegisterDeviceInterface. + * Interface name for the upper edge. + */ + UNICODE_STRING al_ifc_name; + /* Interface name for the bottom edge. */ + UNICODE_STRING ci_ifc_name; + + /* Number of references on the upper interface. */ + atomic32_t n_al_ifc_ref; + /* Number of references on the lower interface. */ + atomic32_t n_ci_ifc_ref; + +} al_fdo_ext_t; +// +// +//typedef struct _AlCaContext +//{ +// cl_list_item_t listItem; +// void *ibtContext; +// const IB_VERBS_INTERFACE_STANDARD2 *pCi; +// +// /* Number of ports, used to size the DEVICE_RELATIONS structure. */ +// uint32_t nPorts; +// +// DEVICE_OBJECT *pHcaPdo; +// +//} AlCaInfo_t; + + +/* + * Device extension for bus driver PDOs. + */ +typedef struct _al_pdo_ext +{ + cl_pnp_po_ext_t cl_ext; + + cl_list_item_t list_item; + + POWER_STATE dev_po_state; + + /* + * Pointer to the bus root device extension. Used to manage access to + * child PDO pointer vector when a child is removed politely. + */ + al_fdo_ext_t *p_parent_ext; + + /* + * The following two flags are exclusively set, but can both be FALSE. + * Flag that indicates whether the device is present in the system or not. + * This affects how a IRP_MN_REMOVE_DEVICE IRP is handled for a child PDO. + * This flag is cleared when: + * - an HCA (for IPoIB devices) is removed from the system for all port + * devices loaded for that HCA + * - an IOU is reported as removed by the CIA. + */ + boolean_t b_present; + + /* + * Flag that indicates whether the device has been reported to the PnP + * manager as having been removed. That is, the device was reported + * in a previous BusRelations query and not in a subsequent one. + * This flag is set when + * - the device is in the surprise remove state when the parent bus + * device is removed + * - the device is found to be not present during a BusRelations query + * and thus not reported. + */ + boolean_t b_reported_missing; + +} al_pdo_ext_t; + + +/* + * Device extension for IPoIB port PDOs. + */ +typedef struct _al_port_ext +{ + al_pdo_ext_t pdo; + + net64_t port_guid; + uint32_t n_port; + + /* Number of references on the upper interface. */ + atomic32_t n_ifc_ref; + + ib_ca_handle_t h_ca; + +} al_port_ext_t; + + +/* + * Global Driver parameters. + */ +typedef struct _al_globals +{ + /* Flag to control loading of Ip Over Ib driver for each HCA port. */ + uint32_t b_report_port_nic; + + /* Driver object. Used for registering of Plug and Play notifications. */ + DRIVER_OBJECT *p_driver_obj; + + /* + * Mutex protecting the bus extension pointer. This allows getting a local + * copy of the pointer and taking a reference on the device in one atomic + * operation. + */ + cl_mutex_t ext_mutex; + /* Pointer to the one and only bus root. */ + al_fdo_ext_t *p_bus_ext; + +} al_globals_t; + + +extern al_globals_t al_globals; + + +#endif /* !defined _AL_DRIVER_H_ */ diff --git a/branches/Ndi/core/al/kernel/al_exports.def b/branches/Ndi/core/al/kernel/al_exports.def new file mode 100644 index 00000000..217e2fde --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_exports.def @@ -0,0 +1,7 @@ +LIBRARY ibal.sys + +EXPORTS +; DllInitialize and DllUnload must be exported for the OS reference counting to +; work, and must be private for the compiler to accept them. +DllInitialize private +DllUnload private diff --git a/branches/Ndi/core/al/kernel/al_fmr_pool.c b/branches/Ndi/core/al/kernel/al_fmr_pool.c new file mode 100644 index 00000000..cf927684 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_fmr_pool.c @@ -0,0 +1,749 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: al_fmr_pool.h 1611 2006-08-20 14:48:55Z sleybo $ + */ + + + + +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_fmr_pool.tmh" +#endif + +#include "al_fmr_pool.h" +#include "al_mr.h" +#include "al_pd.h" + +#define hash_mix(a, b, c) \ + { \ + a -= b; a -= c; a ^= (c>>13); \ + b -= c; b -= a; b ^= (a<<8); \ + c -= a; c -= b; c ^= (b>>13); \ + a -= b; a -= c; a ^= (c>>12); \ + b -= c; b -= a; b ^= (a<<16); \ + c -= a; c -= b; c ^= (b>>5); \ + a -= b; a -= c; a ^= (c>>3); \ + b -= c; b -= a; b ^= (a<<10); \ + c -= a; c -= b; c ^= (b>>15); \ +} + +static inline uint32_t hash_2words(uint32_t a, uint32_t b, uint32_t c) +{ + a += 0x9e3779b9; + b += 0x9e3779b9; + hash_mix(a, b, c); + return c; +} + +enum { + IB_FMR_MAX_REMAPS = 32, + + IB_FMR_HASH_BITS = 8, + IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS, + IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1 +}; + + +static inline uint32_t __fmr_hash(uint64_t first_page) +{ + return hash_2words((uint32_t) first_page, (uint32_t) (first_page >> 32), 0) & + (IB_FMR_HASH_SIZE - 1); +} + +/* Caller must hold pool_lock */ +static inline mlnx_fmr_pool_element_t *__fmr_cache_lookup( + mlnx_fmr_pool_t *p_pool, + const uint64_t* const page_list, + int page_list_len, + uint64_t io_virtual_address) +{ + cl_qlist_t *bucket; + cl_list_item_t *p_list_item; + mlnx_fmr_pool_element_t *p_fmr_el; + + if (!p_pool->cache_bucket) + return NULL; + + bucket = p_pool->cache_bucket + __fmr_hash(*page_list); + + for( p_list_item = cl_qlist_head( bucket ); + p_list_item != cl_qlist_end( bucket); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, cache_node ); + if (io_virtual_address == p_fmr_el->io_virtual_address && + page_list_len == p_fmr_el->page_list_len && + !memcmp(page_list, p_fmr_el->page_list, page_list_len * sizeof *page_list)) + return p_fmr_el; + } + + return NULL; +} + + +static void +__fmr_pool_batch_release(mlnx_fmr_pool_t *p_pool) +{ + ib_api_status_t status; + mlnx_fmr_pool_element_t *p_fmr_el; + mlnx_fmr_handle_t h_fmr = NULL; + cl_qlist_t unmap_list; + cl_list_item_t *p_list_item; + cl_qlist_t *bucket; + + cl_qlist_init(&unmap_list); + + cl_spinlock_acquire(&p_pool->pool_lock); + + for( p_list_item = cl_qlist_head( &p_pool->dirty_list ); + p_list_item != cl_qlist_end( &p_pool->dirty_list); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, list_item ); + if (p_fmr_el->in_cash) + { + p_fmr_el->in_cash = FALSE; + bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]); + cl_qlist_remove_item( bucket, &p_fmr_el->cache_node ); + } + p_fmr_el->remap_count = 0; + p_fmr_el->h_fmr->p_next = h_fmr; + h_fmr = p_fmr_el->h_fmr; + if (p_fmr_el->ref_count !=0) + { + AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("Unmapping FMR 0x%p with ref count %d", + p_fmr_el, p_fmr_el->ref_count)); + } + } + + cl_qlist_insert_list_head(&unmap_list, &p_pool->dirty_list ); + cl_qlist_init(&p_pool->dirty_list); + p_pool->dirty_len = 0; + + cl_spinlock_release( &p_pool->pool_lock ); + + if (cl_is_qlist_empty(&unmap_list)) { + return; + } + + status = mlnx_unmap_fmr(h_fmr); + if (status != IB_SUCCESS) + AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("mlnx_unmap_fmr returned %s", ib_get_err_str(status))); + + + cl_spinlock_acquire( &p_pool->pool_lock ); + cl_qlist_insert_list_head(&p_pool->free_list,&unmap_list); + cl_spinlock_release( &p_pool->pool_lock ); +} + + + +static int +__fmr_cleanup_thread(void * p_pool_ptr) +{ + mlnx_fmr_pool_t *p_pool = p_pool_ptr; + atomic32_t flush_req; + int forever = 1; + + do { + flush_req = 0; + if (p_pool->flush_req || p_pool->dirty_len >= p_pool->dirty_watermark) + { + __fmr_pool_batch_release(p_pool); + + if (p_pool->flush_req) + { + cl_event_signal(&p_pool->flush_done_event); + flush_req = cl_atomic_dec( &p_pool->flush_req ); + } + + if (p_pool->flush_function) + p_pool->flush_function( (mlnx_fmr_pool_handle_t)p_pool, p_pool->flush_arg); + } + + if (!flush_req) + { + if (p_pool->should_stop) + break; + cl_event_wait_on(&p_pool->do_flush_event, EVENT_NO_TIMEOUT, TRUE); + } + } while (forever); + + return 0; +} + +/* + * Destroying the pool. + */ +static void +__destroying_fmr_pool( + IN al_obj_t* p_obj ) +{ + mlnx_fmr_pool_t* p_pool; + + CL_ASSERT( p_obj ); + p_pool = PARENT_STRUCT( p_obj, mlnx_fmr_pool_t, obj ); + AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("pool %p\n", p_pool)); + + // notify cleaning thread to exit + cl_atomic_inc( &p_pool->should_stop ); + cl_event_signal(&p_pool->do_flush_event); + cl_thread_destroy(&p_pool->thread); +} + +/* + * Cleanup the pool. + */ +static void +__cleanup_fmr_pool( + IN al_obj_t* p_obj ) +{ + int i=0; + ib_api_status_t status = IB_SUCCESS; + mlnx_fmr_pool_t* p_pool; + mlnx_fmr_pool_element_t *p_fmr_el; + cl_list_item_t *p_list_item; + cl_qlist_t *bucket; + + CL_ASSERT( p_obj ); + p_pool = PARENT_STRUCT( p_obj, mlnx_fmr_pool_t, obj ); + AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("pool %p\n", p_pool)); + + // cleanup the dirty list stuff + __fmr_pool_batch_release(p_pool); + + cl_spinlock_acquire(&p_pool->pool_lock); + + // merge the rest with free list + for( p_list_item = cl_qlist_head( &p_pool->rest_list ); + p_list_item != cl_qlist_end( &p_pool->rest_list ); + p_list_item = cl_qlist_head( &p_pool->rest_list ) ) + { + p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, list_item ); + if (p_fmr_el->in_cash) + { + p_fmr_el->in_cash = FALSE; + bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]); + cl_qlist_remove_item( bucket, &p_fmr_el->cache_node ); + } + cl_qlist_remove_item(&p_pool->rest_list, p_list_item); + cl_qlist_insert_tail(&p_pool->free_list, &p_fmr_el->list_item); + p_fmr_el->p_cur_list = &p_pool->free_list; + } + + // cleanup the free list + for( p_list_item = cl_qlist_head( &p_pool->free_list ); + p_list_item != cl_qlist_end( &p_pool->free_list ); + p_list_item = cl_qlist_head( &p_pool->free_list ) ) + { + p_fmr_el = PARENT_STRUCT( p_list_item, mlnx_fmr_pool_element_t, list_item); + cl_spinlock_release( &p_pool->pool_lock ); + if (p_fmr_el->remap_count) + { + p_fmr_el->h_fmr->p_next = NULL; + status = mlnx_unmap_fmr(p_fmr_el->h_fmr); + if (status != IB_SUCCESS) + AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("mlnx_unmap_fmr returned %s\n", ib_get_err_str(status))); + + } + status = mlnx_destroy_fmr(p_fmr_el->h_fmr); + if (status != IB_SUCCESS) + AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("mlnx_destroy_fmr returned %s\n", ib_get_err_str(status))); + + cl_spinlock_acquire(&p_pool->pool_lock); + cl_qlist_remove_item(&p_pool->free_list, p_list_item); + cl_free(p_fmr_el); + ++i; + } + + cl_spinlock_release( &p_pool->pool_lock ); + + if (i < p_pool->pool_size) + AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("pool still has %d regions registered\n", + p_pool->pool_size - i)); +} + + +/* + * Free the pool. + */ +static void +__free_fmr_pool( + IN al_obj_t* p_obj ) +{ + mlnx_fmr_pool_t* p_pool; + + CL_ASSERT( p_obj ); + p_pool = PARENT_STRUCT( p_obj, mlnx_fmr_pool_t, obj ); + + cl_spinlock_destroy(&p_pool->pool_lock); + destroy_al_obj( &p_pool->obj ); + if (p_pool->cache_bucket) + cl_free( p_pool->cache_bucket ); + cl_free( p_pool ); + AL_PRINT(TRACE_LEVEL_ERROR, AL_DBG_FMR_POOL, ("__free_pool: pool %p\n", p_pool)); +} + + + +ib_api_status_t +mlnx_create_fmr_pool( + IN const ib_pd_handle_t h_pd, + IN const mlnx_fmr_pool_create_t *p_fmr_pool_attr, + OUT mlnx_fmr_pool_handle_t* const ph_pool ) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_fmr_pool_t *p_pool; + int i; + int max_remaps; + cl_status_t cl_status; + mlnx_fmr_pool_element_t *p_fmr_el; + + + AL_ENTER( AL_DBG_FMR_POOL ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + status = IB_INVALID_AL_HANDLE; + goto end; + } + + if( !ph_pool ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + status = IB_INVALID_PARAMETER; + goto end; + } + + if( !p_fmr_pool_attr || !p_fmr_pool_attr->dirty_watermark) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + status = IB_INVALID_PARAMETER; + goto end; + } + + if (!h_pd->obj.p_ci_ca || !h_pd->obj.p_ci_ca->p_pnp_attr) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_STATE\n") ); + status = IB_INVALID_STATE; + goto end; + } + + // check whether the device support FMR + if (!h_pd->obj.p_ci_ca->verbs.alloc_mlnx_fmr|| !h_pd->obj.p_ci_ca->verbs.dealloc_mlnx_fmr || + !h_pd->obj.p_ci_ca->verbs.map_phys_mlnx_fmr || !h_pd->obj.p_ci_ca->verbs.unmap_mlnx_fmr) { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Device does not support fast memory regions")); + status = IB_UNSUPPORTED; + goto end; + } + + if (!h_pd->obj.p_ci_ca->p_pnp_attr->max_map_per_fmr) + { + max_remaps = IB_FMR_MAX_REMAPS; + } + else + { + max_remaps = h_pd->obj.p_ci_ca->p_pnp_attr->max_map_per_fmr; + } + + // allocate pool object + p_pool = cl_zalloc( sizeof( mlnx_fmr_pool_t ) ); + if( !p_pool ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Couldn't allocate pool struct")); + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_pool_obj; + } + + // construct pool objects + cl_spinlock_construct( &p_pool->pool_lock); + cl_thread_construct(&p_pool->thread); + cl_event_construct(&p_pool->do_flush_event); + cl_event_construct(&p_pool->flush_done_event); + + + // init pool objects + p_pool->pool_size = 0; + p_pool->max_pages = p_fmr_pool_attr->max_pages_per_fmr; + p_pool->max_remaps = max_remaps; + p_pool->dirty_watermark = p_fmr_pool_attr->dirty_watermark; + p_pool->dirty_len = 0; + p_pool->cache_bucket = NULL; + p_pool->flush_function = p_fmr_pool_attr->flush_function; + p_pool->flush_arg = p_fmr_pool_attr->flush_arg; + cl_qlist_init(&p_pool->dirty_list); + cl_qlist_init(&p_pool->free_list); + cl_qlist_init(&p_pool->rest_list); + + if (p_fmr_pool_attr->cache) { + p_pool->cache_bucket = + cl_zalloc(IB_FMR_HASH_SIZE * sizeof *p_pool->cache_bucket); + if (!p_pool->cache_bucket) { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate cache in pool")); + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_cache; + } + + for (i = 0; i < IB_FMR_HASH_SIZE; ++i) + cl_qlist_init(p_pool->cache_bucket + i); + } + + cl_status = cl_spinlock_init( &p_pool->pool_lock ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_spinlock_init")); + status = IB_ERROR; + goto err_pool_init; + } + + cl_event_init(&p_pool->do_flush_event,FALSE); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_event_init")); + status = IB_ERROR; + goto err_pool_init; + } + + cl_event_init(&p_pool->flush_done_event,FALSE); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_event_init")); + status = IB_ERROR; + goto err_pool_init; + } + + cl_thread_init(&p_pool->thread ,__fmr_cleanup_thread,p_pool,"fmr_cleanup"); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed cl_thread_init")); + status = IB_ERROR; + goto err_pool_init; + } + + { + mlnx_fmr_create_t fmr_attr; + + fmr_attr.max_pages = p_fmr_pool_attr->max_pages_per_fmr, + fmr_attr.max_maps = p_pool->max_remaps, + fmr_attr.page_size = p_fmr_pool_attr->page_size; + fmr_attr.access_ctrl = p_fmr_pool_attr->access_ctrl; + + + for (i = 0; i < p_fmr_pool_attr->pool_size; ++i) + { + p_fmr_el = cl_zalloc(sizeof (mlnx_fmr_pool_element_t) + p_fmr_pool_attr->max_pages_per_fmr * sizeof (uint64_t)); + if (!p_fmr_el) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, (" failed to allocate struct for FMR %d \n",i)); + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_cache_el; + } + + p_fmr_el->h_pool = (mlnx_fmr_pool_handle_t)p_pool; + p_fmr_el->remap_count = 0; + p_fmr_el->ref_count = 0; + + status = mlnx_create_fmr(h_pd, &fmr_attr,&p_fmr_el->h_fmr); + if (status != IB_SUCCESS) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("mlnx_create_fmr failed for FMR %d with status %s.\n",i,ib_get_err_str(status))); + cl_free(p_fmr_el); + goto err_alloc_cache_el; + } + + cl_qlist_insert_tail(&p_pool->free_list, &p_fmr_el->list_item); + p_fmr_el->p_cur_list = &p_pool->free_list; + ++p_pool->pool_size; + } + + } + + /* Do IBAL stuff for creating and iniitializing the object */ + construct_al_obj( &p_pool->obj, AL_OBJ_TYPE_H_FMR_POOL); + + status = init_al_obj( &p_pool->obj, p_pool, FALSE, __destroying_fmr_pool, __cleanup_fmr_pool, __free_fmr_pool ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + goto err_init_al_obj; + } + + /* Attach the pool to the AL object. */ + status = attach_al_obj( &h_pd->obj, &p_pool->obj ); + if( status != IB_SUCCESS ) + { + ref_al_obj( &p_pool->obj ); + p_pool->obj.pfn_destroy( &p_pool->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + goto end; + } + + + /* Release the reference taken in init_al_obj */ + deref_al_obj( &p_pool->obj ); + + *ph_pool = p_pool; + status = IB_SUCCESS; + goto end; + +err_init_al_obj: + destroy_al_obj( &p_pool->obj ); + +err_alloc_cache_el: + __destroying_fmr_pool( &p_pool->obj ); + __cleanup_fmr_pool( &p_pool->obj ); + +err_pool_init: + if (p_pool->cache_bucket) + cl_free( p_pool->cache_bucket ); + +err_alloc_cache: + cl_free( p_pool ); + +err_alloc_pool_obj: +end: + AL_EXIT( AL_DBG_FMR_POOL ); + return status; +} + +/** + * ib_destroy_fmr_pool - Free FMR pool + * @pool:FMR pool to free + * + * Destroy an FMR pool and free all associated resources. + */ +ib_api_status_t +mlnx_destroy_fmr_pool( + IN const mlnx_fmr_pool_handle_t h_pool) +{ + mlnx_fmr_pool_t *p_pool = (mlnx_fmr_pool_t*)h_pool; + + AL_ENTER( AL_DBG_FMR_POOL ); + + if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_FMR_POOL ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + ref_al_obj( &p_pool->obj ); + p_pool->obj.pfn_destroy( &p_pool->obj, NULL ); + + AL_EXIT( AL_DBG_FMR_POOL ); + return IB_SUCCESS; +} + + + +ib_api_status_t +mlnx_flush_fmr_pool(mlnx_fmr_pool_handle_t h_pool) +{ + + ib_api_status_t status = IB_SUCCESS; + mlnx_fmr_pool_t *p_pool = (mlnx_fmr_pool_t*)h_pool; + + if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_FMR_POOL ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + ref_al_obj( &p_pool->obj ); + + cl_atomic_inc( &p_pool->flush_req ); + cl_event_signal(&p_pool->do_flush_event); + if (cl_event_wait_on(&p_pool->flush_done_event, EVENT_NO_TIMEOUT, TRUE)) + status = IB_ERROR; + + deref_al_obj( &p_pool->obj ); + + return status; +} + +ib_api_status_t +mlnx_map_phys_fmr_pool( + IN const mlnx_fmr_pool_handle_t h_pool , + IN const uint64_t* const page_list, + IN const int list_len, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT mlnx_fmr_pool_el_t *pp_fmr_el) +{ + + ib_api_status_t status = IB_SUCCESS; + mlnx_fmr_pool_t *p_pool = (mlnx_fmr_pool_t*)h_pool; + mlnx_fmr_pool_element_t *p_fmr_el; + cl_qlist_t *bucket; + + if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_FMR_POOL ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + if (list_len < 1 || list_len > p_pool->max_pages) + return IB_INVALID_PARAMETER; + + ref_al_obj( &p_pool->obj ); + + cl_spinlock_acquire(&p_pool->pool_lock); + + p_fmr_el = __fmr_cache_lookup( p_pool, page_list, list_len, *p_vaddr ); + if (p_fmr_el) { + /* found in cache */ + ++p_fmr_el->ref_count; + if (p_fmr_el->ref_count == 1) { + cl_qlist_remove_item( p_fmr_el->p_cur_list, &p_fmr_el->list_item ); + cl_qlist_insert_tail(&p_pool->rest_list, &p_fmr_el->list_item); + p_fmr_el->p_cur_list = &p_pool->rest_list; + } + + cl_spinlock_release(&p_pool->pool_lock); + goto end; + } + + if (cl_is_qlist_empty(&p_pool->free_list)) { + cl_spinlock_release(&p_pool->pool_lock); + status = IB_RESOURCE_BUSY; + goto exit; + } + + p_fmr_el = PARENT_STRUCT(cl_qlist_remove_head(&p_pool->free_list),mlnx_fmr_pool_element_t,list_item); + if (p_fmr_el->in_cash) + { + p_fmr_el->in_cash = FALSE; + bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]); + cl_qlist_remove_item( bucket, &p_fmr_el->cache_node ); + } + cl_spinlock_release(&p_pool->pool_lock); + + status = mlnx_map_phys_fmr(p_fmr_el->h_fmr, page_list, + list_len, p_vaddr, p_lkey, p_rkey); + + if (status != IB_SUCCESS) { + cl_spinlock_acquire(&p_pool->pool_lock); + cl_qlist_insert_tail(&p_pool->free_list, &p_fmr_el->list_item); + p_fmr_el->p_cur_list = &p_pool->free_list; + cl_spinlock_release(&p_pool->pool_lock); + goto exit; + } + + ++p_fmr_el->remap_count; + p_fmr_el->ref_count = 1; + p_fmr_el->lkey = *p_lkey; + p_fmr_el->rkey = *p_rkey; + p_fmr_el->io_virtual_address = *p_vaddr; + cl_spinlock_acquire(&p_pool->pool_lock); + cl_qlist_insert_tail(&p_pool->rest_list, &p_fmr_el->list_item); + p_fmr_el->p_cur_list = &p_pool->rest_list; + cl_spinlock_release(&p_pool->pool_lock); + + if (p_pool->cache_bucket) { + p_fmr_el->io_virtual_address = *p_vaddr; + p_fmr_el->page_list_len = list_len; + memcpy(p_fmr_el->page_list, page_list, list_len * sizeof(*page_list)); + + cl_spinlock_acquire(&p_pool->pool_lock); + bucket = p_pool->cache_bucket + __fmr_hash(p_fmr_el->page_list[0]); + cl_qlist_insert_head( bucket, &p_fmr_el->cache_node ); + p_fmr_el->in_cash = TRUE; + cl_spinlock_release(&p_pool->pool_lock); + } + +end: + *pp_fmr_el = (mlnx_fmr_pool_el_t)p_fmr_el; + *p_lkey = p_fmr_el->lkey; + *p_rkey = p_fmr_el->rkey; + *p_vaddr = p_fmr_el->io_virtual_address; + +exit: + deref_al_obj( &p_pool->obj ); + return status; +} + + + +ib_api_status_t +mlnx_unmap_fmr_pool( + IN mlnx_fmr_pool_el_t p_fmr_el ) +{ + mlnx_fmr_pool_t *p_pool; + + p_pool = (mlnx_fmr_pool_t*)p_fmr_el->h_pool; + + if( AL_OBJ_INVALID_HANDLE( (mlnx_fmr_pool_handle_t)p_pool, AL_OBJ_TYPE_H_FMR_POOL ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + ref_al_obj( &p_pool->obj ); + + cl_spinlock_acquire(&p_pool->pool_lock); + + --p_fmr_el->ref_count; + if (!p_fmr_el->ref_count) + { + if (p_fmr_el->p_cur_list == &p_pool->rest_list) + cl_qlist_remove_item( p_fmr_el->p_cur_list, &p_fmr_el->list_item ); + + if (p_fmr_el->remap_count < p_pool->max_remaps) + { + cl_qlist_insert_tail(&p_pool->free_list,&p_fmr_el->list_item); + p_fmr_el->p_cur_list = &p_pool->free_list; + } + else + { + cl_qlist_insert_tail(&p_pool->dirty_list, &p_fmr_el->list_item); + p_fmr_el->p_cur_list = &p_pool->dirty_list; + ++p_pool->dirty_len; + cl_event_signal(&p_pool->do_flush_event); + } + } + + if (p_fmr_el->ref_count < 0) + { + AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_FMR_POOL, ("FMR %p has ref count %d < 0\n",p_fmr_el, p_fmr_el->ref_count)); + } + cl_spinlock_release( &p_pool->pool_lock ); + + deref_al_obj( &p_pool->obj ); + return IB_SUCCESS; +} diff --git a/branches/Ndi/core/al/kernel/al_fmr_pool.h b/branches/Ndi/core/al/kernel/al_fmr_pool.h new file mode 100644 index 00000000..f68fad63 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_fmr_pool.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: al_fmr_pool.h 1611 2006-08-20 14:48:55Z sleybo $ + */ + + +#if !defined(__AL_FMR_POOL_H__) +#define __AL_FMR_POOL_H__ + +#include +#include +#include "al_common.h" + + +/* + * If an FMR is not in use, then the list member will point to either + * its pool's free_list (if the FMR can be mapped again; that is, + * remap_count < pool->max_remaps) or its pool's dirty_list (if the + * FMR needs to be unmapped before being remapped). In either of + * these cases it is a bug if the ref_count is not 0. In other words, + * if ref_count is > 0, then the list member must not be linked into + * either free_list or dirty_list. + * + * The cache_node member is used to link the FMR into a cache bucket + * (if caching is enabled). This is independent of the reference + * count of the FMR. When a valid FMR is released, its ref_count is + * decremented, and if ref_count reaches 0, the FMR is placed in + * either free_list or dirty_list as appropriate. However, it is not + * removed from the cache and may be "revived" if a call to + * ib_fmr_register_physical() occurs before the FMR is remapped. In + * this case we just increment the ref_count and remove the FMR from + * free_list/dirty_list. + * + * Before we remap an FMR from free_list, we remove it from the cache + * (to prevent another user from obtaining a stale FMR). When an FMR + * is released, we add it to the tail of the free list, so that our + * cache eviction policy is "least recently used." + * + * All manipulation of ref_count, list and cache_node is protected by + * pool_lock to maintain consistency. + */ + +#pragma warning( disable : 4200) +typedef struct _mlnx_fmr_pool_element { + mlnx_fmr_handle_t h_fmr; + mlnx_fmr_pool_handle_t h_pool; + cl_list_item_t list_item; + cl_qlist_t *p_cur_list; + cl_list_item_t cache_node; + boolean_t in_cash; + int ref_count; + int remap_count; + uint64_t io_virtual_address; + net32_t lkey; + net32_t rkey; + int page_list_len; + uint64_t page_list[0]; +} mlnx_fmr_pool_element_t; +#pragma warning( default : 4200) + + +typedef struct _mlnx_fmr_pool { + + al_obj_t obj; /* Child of ib_al_handle_t */ + cl_spinlock_t pool_lock; + + int pool_size; + int max_pages; + int max_remaps; + int dirty_watermark; + int dirty_len; + cl_qlist_t free_list; + cl_qlist_t dirty_list; + cl_qlist_t rest_list; /* those, that not in free and not in dirty */ + cl_qlist_t *cache_bucket; + + void (*flush_function) (mlnx_fmr_pool_handle_t h_pool,void* arg); + void *flush_arg; + + cl_thread_t thread; + cl_event_t do_flush_event; + cl_event_t flush_done_event; + atomic32_t flush_req; + atomic32_t should_stop; +} mlnx_fmr_pool_t; + + +#endif /* IB_FMR_POOL_H */ + diff --git a/branches/Ndi/core/al/kernel/al_ioc_pnp.c b/branches/Ndi/core/al/kernel/al_ioc_pnp.c new file mode 100644 index 00000000..0970e61a --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_ioc_pnp.c @@ -0,0 +1,3320 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include "al_pnp.h" +#include "al_ioc_pnp.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_ioc_pnp.tmh" +#endif +#include "ib_common.h" +#include "al_mgr.h" +#include "al_ca.h" +#include +#include +#include +#include +#include + + +/* Basic sweep operation flow: + * + * NOTE: Empty lines indicate asynchronous decoupling. + * 1. Timer expires + * 2. Issue SA query for all CA nodes + * 3. Issue SA query for all paths + * + * 4. Query callback for first query - store results. + * 5. Query callback for second query - process results. + * 6. Associate paths to nodes. + * 7. For each node, use the first path to send a IOU Info query. + * + * 8a. Recv callback (success) - record IOU info, decrement ref count. + * 8b. Recv callback (failure) - decrement ref count. + * 8c. Send failure - decrement ref count. + * 8d. Send timeout - pick next path and repeate IOU info query. + * 9. Queue results to async proc thread once ref count hits zero + * + * 10. Discard any nodes that failed IOU info query, or reported no IOCs. + * 11. For each node scanned that is already known, compare change ID + * 12a. Change ID identical - report any path changes. + * 12b. Change ID different - for each active IOC slot, query IOC profile. + * + * 13a. Recv callback (success) - associate IOC with IOU, decrement ref count. + * 13b. Recv callback (failure) - decrement ref count. + * 13c. Send failure - decrement ref count. + * 14. Queue results to async proc thread once ref count hits zero. + * + * 15. Discard any nodes that have no IOCs. + * 16. For each IOC of each node, query all service entries. + * + * 17a. Recv callback (success) - copy service entries, decrement ref count. + * 17b. Recv callback (failure) - Remove IOC from IOU, decrement ref count. + * 17c. Send failure - Remove IOC from IOU, decrement ref count. + * 18. Queue results to async proc thread once ref count hits zero. + * + * 19. Discard any nodes that have no IOCs. + * 20. Compare new node map to known nodes and report changes. + * 21. Compare IOCs for any duplicates and report changes. + * 22. Compare paths for any duplicates and report changes. + * 23. Reset sweep timer. + * + * Note: the sweep timer is reset at any point where there can be no further + * progress towards. + */ + + +/* Number of entries in the various pools to grow by. */ +#define IOC_PNP_POOL_GROW (10) + + +/* IOC PnP Manager structure. */ +typedef struct _ioc_pnp_mgr +{ + al_obj_t obj; + + cl_qlist_t iou_reg_list; + cl_qlist_t ioc_reg_list; + + ib_pnp_handle_t h_pnp; + + cl_async_proc_item_t async_item; + boolean_t async_item_is_busy; + + cl_spinlock_t iou_pool_lock; + cl_qpool_t iou_pool; + cl_spinlock_t ioc_pool_lock; + cl_qpool_t ioc_pool; + cl_spinlock_t path_pool_lock; + cl_qpool_t path_pool; + + cl_fmap_t iou_map; /* Map of currently known IOUs */ + cl_fmap_t sweep_map; /* Map of IOUs from sweep results. */ + cl_timer_t sweep_timer;/* Timer to trigger sweep. */ + atomic32_t query_cnt; /* Number of sweep results outstanding. */ + +} ioc_pnp_mgr_t; + + +/* Per-port IOC PnP agent. */ +typedef struct _ioc_pnp_svc +{ + al_obj_t obj; + + net64_t ca_guid; + net64_t port_guid; + + ib_qp_handle_t h_qp; + ib_pool_key_t pool_key; + ib_mad_svc_handle_t h_mad_svc; + + atomic32_t query_cnt; + ib_query_handle_t h_node_query; + ib_query_handle_t h_path_query; + ib_mad_element_t *p_node_element; + ib_mad_element_t *p_path_element; + uint32_t num_nodes; + uint32_t num_paths; + +} ioc_pnp_svc_t; + + +/****d* Access Layer:IOC PnP/iou_path_t +* NAME +* iou_path_t +* +* DESCRIPTION +* Describes a path to an IOU node. +* +* SYNOPSIS +*/ +typedef struct _iou_path +{ + cl_fmap_item_t map_item; + net64_t ca_guid; + net64_t port_guid; + ib_path_rec_t rec; + +} iou_path_t; +/* +* FIELDS +* map_item +* Map item for storing paths in a map. +* +* path_rec +* Path record. +* +* SEE ALSO +* IOC PnP +*********/ + + +/****d* Access Layer:IOC PnP/iou_node_t +* NAME +* iou_node_t +* +* DESCRIPTION +* Describes an IOU node on the fabric. +* +* SYNOPSIS +*/ +typedef struct _iou_node +{ + cl_fmap_item_t map_item; + cl_fmap_t path_map; + cl_qmap_t ioc_map; + cl_spinlock_t lock; + + iou_path_t *p_config_path; + + net64_t ca_guid; + net64_t guid; + net64_t chassis_guid; + uint8_t slot; + net32_t vend_id; + net16_t dev_id; + net32_t revision; + ib_iou_info_t info; + + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + +} iou_node_t; +/* +* FIELDS +* map_item +* Map item for storing IOUs in a map. +* +* path_map +* Map of paths to the IOU. +* +* ioc_map +* Map of IOCs on the IOU. +* +* p_config_path +* Path used to get configuration information from the IOU. +* +* ca_guid +* CA GUID through which the IOU is accessible. +* +* guid +* Node GUID used as key when storing IOUs in the map. +* +* chassis_guid +* GUID of the chassis in which the IOU is installed. +* +* slot +* Slot number in the chassis in which the IOU is installed. +* +* vend_id +* Vendor ID of the IOU. +* +* dev_id +* Device ID of the IOU. +* +* revision +* Device revision of the IOU. +* +* info +* I/O unit info structure. +* +* desc +* Node description as provided in ib_node_record_t, along with space for +* terminating NULL. +* +* NOTES +* The guid member must follow the ca_guid member to allow both guids to +* be compared in single call to cl_memcmp. +* +* SEE ALSO +* IOC PnP +*********/ + + +#pragma warning(disable:4324) +typedef struct _iou_ioc +{ + cl_map_item_t map_item; + iou_node_t *p_iou; + uint8_t slot; + ib_ioc_profile_t profile; + uint8_t num_valid_entries; + ib_svc_entry_t *p_svc_entries; + atomic32_t ref_cnt; + +} iou_ioc_t; +#pragma warning(default:4324) + + +typedef enum _sweep_state +{ + SWEEP_IOU_INFO, + SWEEP_IOC_PROFILE, + SWEEP_SVC_ENTRIES, + SWEEP_COMPLETE + +} sweep_state_t; + + +typedef struct _ioc_sweep_results +{ + cl_async_proc_item_t async_item; + sweep_state_t state; + ioc_pnp_svc_t *p_svc; + atomic32_t query_cnt; + cl_fmap_t iou_map; + +} ioc_sweep_results_t; + + +typedef struct _al_pnp_ioc_event +{ + size_t rec_size; + ib_pnp_rec_t *p_rec; + ib_pnp_rec_t *p_user_rec; + +} al_pnp_ioc_event_t; + + +/* Global instance of the IOC PnP manager. */ +ioc_pnp_mgr_t *gp_ioc_pnp = NULL; +uint32_t g_ioc_query_timeout = 250; +uint32_t g_ioc_query_retries = 4; +uint32_t g_ioc_poll_interval = 30000; + + + +/****************************************************************************** +* +* IOC PnP Manager functions - global object. +* +******************************************************************************/ +static void +__construct_ioc_pnp( + IN ioc_pnp_mgr_t* const p_ioc_mgr ); + +static ib_api_status_t +__init_ioc_pnp( + IN ioc_pnp_mgr_t* const p_ioc_mgr ); + +static void +__destroying_ioc_pnp( + IN al_obj_t *p_obj ); + +static void +__free_ioc_pnp( + IN al_obj_t *p_obj ); + +static ib_api_status_t +__ioc_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ); + +static cl_status_t +__init_iou( + IN void* const p_obj, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); + +/****************************************************************************** +* +* IOC PnP manager sweep-related functions. +* +******************************************************************************/ +static iou_node_t* +__get_iou( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN const net64_t ca_guid, + IN const ib_node_record_t* const p_node_rec ); + +static void +__put_iou( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN iou_node_t* const p_iou ); + +static void +__put_iou_map( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN cl_fmap_t* const p_iou_map ); + +static iou_path_t* +__get_path( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN const net64_t ca_guid, + IN const net64_t port_guid, + IN const ib_path_rec_t* const p_path_rec ); + +static void +__put_path( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN iou_path_t* const p_path ); + +static void +__put_path_map( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN cl_fmap_t* const p_path_map ); + +static iou_ioc_t* +__get_ioc( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN const uint32_t ioc_slot, + IN const ib_ioc_profile_t* const p_profile ); + +static void +__put_ioc( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN iou_ioc_t* const p_ioc ); + +static void +__put_ioc_map( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN cl_qmap_t* const p_ioc_map ); + +static intn_t +__iou_cmp( + IN const void* const p_key1, + IN const void* const p_key2 ); + +static intn_t +__path_cmp( + IN const void* const p_key1, + IN const void* const p_key2 ); + +static void +__ioc_pnp_timer_cb( + IN void *context ); + +static void +__ioc_async_cb( + IN cl_async_proc_item_t *p_async_item ); + +/****************************************************************************** +* +* IOC PnP service - per local port child of IOC PnP manager. +* +******************************************************************************/ +static ib_api_status_t +__create_ioc_pnp_svc( + IN ib_pnp_rec_t *p_pnp_rec ); + +static ib_api_status_t +__init_ioc_pnp_svc( + IN ioc_pnp_svc_t* const p_ioc_pnp_svc, + IN const ib_pnp_rec_t* const p_pnp_rec ); + +static void +__destroying_ioc_pnp_svc( + IN al_obj_t *p_obj ); + +static void +__free_ioc_pnp_svc( + IN al_obj_t *p_obj ); + +/****************************************************************************** +* +* IOC PnP service sweep functions. +* +******************************************************************************/ +static void +__ioc_pnp_recv_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_request_mad ); + +static void +__ioc_pnp_send_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_response ); + +static void +__node_rec_cb( + IN ib_query_rec_t *p_query_rec ); + +static void +__path_rec_cb( + IN ib_query_rec_t *p_query_rec ); + +static void +__process_sweep( + IN cl_async_proc_item_t *p_async_item ); + +static void +__process_query( + IN ioc_pnp_svc_t* const p_svc ); + +static void +__process_nodes( + IN ioc_pnp_svc_t* const p_svc, + IN cl_qmap_t* const p_iou_map ); + +static void +__process_paths( + IN ioc_pnp_svc_t* const p_svc, + IN cl_qmap_t* const p_iou_map ); + +static void +__build_iou_map( + IN cl_qmap_t* const p_port_map, + IN OUT cl_fmap_t* const p_iou_map ); + +static void +__format_dm_get( + IN const void* const context1, + IN const void* const context2, + IN const iou_path_t* const p_path, + IN const net16_t attr_id, + IN const net32_t attr_mod, + IN OUT ib_mad_element_t* const p_mad_element ); + +static ib_api_status_t +__ioc_query_sa( + IN ioc_pnp_svc_t* const p_svc ); + +static ib_api_status_t +__query_ious( + IN ioc_sweep_results_t* const p_results ); + +static ib_api_status_t +__query_ioc_profiles( + IN ioc_sweep_results_t* const p_results ); + +static ib_api_status_t +__query_svc_entries( + IN ioc_sweep_results_t* const p_results ); + +static void +__update_results( + IN ioc_sweep_results_t* const p_results ); + +static void +__iou_info_resp( + IN OUT iou_node_t* const p_iou, + IN const ib_dm_mad_t* const p_mad ); + +static void +__ioc_profile_resp( + IN OUT iou_node_t* const p_iou, + IN const ib_dm_mad_t* const p_mad ); + +static void +__svc_entry_resp( + IN OUT iou_ioc_t* const p_ioc, + IN const ib_dm_mad_t* const p_mad ); + +/****************************************************************************** +* +* Client registration and notification management +* +******************************************************************************/ +static void +__change_ious( + IN cl_fmap_t* const p_cur_ious, + IN cl_fmap_t* const p_dup_ious ); + +static void +__add_ious( + IN cl_fmap_t* const p_cur_ious, + IN cl_fmap_t* const p_new_ious, + IN al_pnp_t* const p_reg OPTIONAL ); + +static void +__remove_ious( + IN cl_fmap_t* const p_old_ious ); + +static void +__add_iocs( + IN iou_node_t* const p_iou, + IN cl_qmap_t* const p_new_iocs, + IN al_pnp_t* const p_reg OPTIONAL ); + +static void +__remove_iocs( + IN iou_node_t* const p_iou, + IN cl_qmap_t* const p_old_iocs ); + +static void +__add_paths( + IN iou_node_t* const p_iou, + IN cl_qmap_t* const p_ioc_map, + IN cl_fmap_t* const p_new_paths, + IN al_pnp_t* const p_reg OPTIONAL ); + +static void +__add_ioc_paths( + IN iou_ioc_t* const p_ioc, + IN cl_fmap_t* const p_new_paths, + IN al_pnp_t* const p_reg OPTIONAL ); + +static void +__remove_paths( + IN cl_qmap_t* const p_ioc_map, + IN cl_fmap_t* const p_old_paths ); + +static void +__report_iou_add( + IN iou_node_t* const p_iou, + IN al_pnp_t* const p_reg OPTIONAL ); + +static void +__report_iou_remove( + IN iou_node_t* const p_iou ); + +static void +__report_ioc_add( + IN iou_node_t* const p_iou, + IN iou_ioc_t* const p_ioc, + IN al_pnp_t* const p_reg OPTIONAL ); + +static void +__report_ioc_remove( + IN iou_node_t* const p_iou, + IN iou_ioc_t* const p_ioc ); + +static void +__report_path( + IN iou_ioc_t* const p_ioc, + IN iou_path_t* const p_path, + IN ib_pnp_event_t pnp_event, + IN al_pnp_t* const p_reg OPTIONAL ); + + +/****************************************************************************** +* +* Implementation +* +******************************************************************************/ +ib_api_status_t +create_ioc_pnp( + IN al_obj_t* const p_parent_obj ) +{ + ib_api_status_t status; + ib_pnp_req_t pnp_req; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( !gp_ioc_pnp ); + + gp_ioc_pnp = (ioc_pnp_mgr_t*)cl_zalloc( sizeof(ioc_pnp_mgr_t) ); + if( !gp_ioc_pnp ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to allocate IOC PnP manager.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + __construct_ioc_pnp( gp_ioc_pnp ); + + status = __init_ioc_pnp( gp_ioc_pnp ); + if( status != IB_SUCCESS ) + { + __free_ioc_pnp( &gp_ioc_pnp->obj ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("__construct_ioc_pnp returned %s\n", ib_get_err_str( status )) ); + return status; + } + + /* Attach to the parent object. */ + status = attach_al_obj( p_parent_obj, &gp_ioc_pnp->obj ); + if( status != IB_SUCCESS ) + { + gp_ioc_pnp->obj.pfn_destroy( &gp_ioc_pnp->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Register for port PnP notifications. */ + cl_memclr( &pnp_req, sizeof(pnp_req) ); + pnp_req.pnp_class = IB_PNP_PORT; + pnp_req.pnp_context = gp_ioc_pnp; + pnp_req.pfn_pnp_cb = __ioc_pnp_cb; + status = ib_reg_pnp( gh_al, &pnp_req, &gp_ioc_pnp->h_pnp ); + if( status != IB_SUCCESS ) + { + gp_ioc_pnp->obj.pfn_destroy( &gp_ioc_pnp->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_pnp failed with status %s.\n", + ib_get_err_str( status )) ); + return status; + } + /* + * We don't release the reference taken in init_al_obj + * since PnP deregistration is asynchronous. + */ + + AL_EXIT( AL_DBG_PNP ); + return IB_SUCCESS; +} + + +static void +__construct_ioc_pnp( + IN ioc_pnp_mgr_t* const p_ioc_mgr ) +{ + AL_ENTER( AL_DBG_PNP ); + + cl_qlist_init( &p_ioc_mgr->iou_reg_list ); + cl_qlist_init( &p_ioc_mgr->ioc_reg_list ); + cl_fmap_init( &p_ioc_mgr->iou_map, __iou_cmp ); + construct_al_obj( &p_ioc_mgr->obj, AL_OBJ_TYPE_IOC_PNP_MGR ); + cl_spinlock_construct( &p_ioc_mgr->iou_pool_lock ); + cl_spinlock_construct( &p_ioc_mgr->path_pool_lock ); + cl_spinlock_construct( &p_ioc_mgr->ioc_pool_lock ); + cl_qpool_construct( &p_ioc_mgr->iou_pool ); + cl_qpool_construct( &p_ioc_mgr->path_pool ); + cl_qpool_construct( &p_ioc_mgr->ioc_pool ); + cl_fmap_init( &p_ioc_mgr->sweep_map, __iou_cmp ); + cl_timer_construct( &p_ioc_mgr->sweep_timer ); + p_ioc_mgr->async_item.pfn_callback = __ioc_async_cb; + + AL_EXIT( AL_DBG_PNP ); +} + + +static ib_api_status_t +__init_ioc_pnp( + IN ioc_pnp_mgr_t* const p_ioc_mgr ) +{ + ib_api_status_t status; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_PNP ); + + /* Initialize the pool locks. */ + cl_status = cl_spinlock_init( &p_ioc_mgr->iou_pool_lock ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_spinlock_init returned %s\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + cl_status = cl_spinlock_init( &p_ioc_mgr->path_pool_lock ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_spinlock_init returned %s\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + cl_status = cl_spinlock_init( &p_ioc_mgr->ioc_pool_lock ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_spinlock_init returned %s\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + /* Initialize the pools */ + cl_status = cl_qpool_init( &p_ioc_mgr->iou_pool, 0, 0, IOC_PNP_POOL_GROW, + sizeof(iou_node_t), __init_iou, NULL, p_ioc_mgr ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_qpool_init returned %s\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + cl_status = cl_qpool_init( &p_ioc_mgr->path_pool, 0, 0, IOC_PNP_POOL_GROW, + sizeof(iou_path_t), NULL, NULL, NULL ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_qpool_init returned %s\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + cl_status = cl_qpool_init( &p_ioc_mgr->ioc_pool, 0, 0, IOC_PNP_POOL_GROW, + sizeof(iou_ioc_t), NULL, NULL, NULL ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_qpool_init returned %s\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + /* Initialize the sweep timer. */ + cl_status = cl_timer_init( &p_ioc_mgr->sweep_timer, + __ioc_pnp_timer_cb, p_ioc_mgr ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_timer_init failed with %s\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + status = init_al_obj( &p_ioc_mgr->obj, p_ioc_mgr, TRUE, + __destroying_ioc_pnp, NULL, __free_ioc_pnp ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj returned %s\n", ib_get_err_str( status )) ); + return status; + } + + AL_EXIT( AL_DBG_PNP ); + return IB_SUCCESS; +} + + +static void +__destroying_ioc_pnp( + IN al_obj_t *p_obj ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_PNP ); + + UNUSED_PARAM( p_obj ); + CL_ASSERT( &gp_ioc_pnp->obj == p_obj ); + + /* Stop the timer. */ + cl_timer_stop( &gp_ioc_pnp->sweep_timer ); + + if( gp_ioc_pnp->h_pnp ) + { + status = ib_dereg_pnp( gp_ioc_pnp->h_pnp, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__free_ioc_pnp( + IN al_obj_t *p_obj ) +{ + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( &gp_ioc_pnp->obj == p_obj ); + + /* + * Return all items from the maps to their pools before + * destroying the pools + */ + __put_iou_map( gp_ioc_pnp, &gp_ioc_pnp->iou_map ); + cl_timer_destroy( &gp_ioc_pnp->sweep_timer ); + cl_qpool_destroy( &gp_ioc_pnp->ioc_pool ); + cl_qpool_destroy( &gp_ioc_pnp->path_pool ); + cl_qpool_destroy( &gp_ioc_pnp->iou_pool ); + cl_spinlock_destroy( &gp_ioc_pnp->ioc_pool_lock ); + cl_spinlock_destroy( &gp_ioc_pnp->path_pool_lock ); + cl_spinlock_destroy( &gp_ioc_pnp->iou_pool_lock ); + destroy_al_obj( p_obj ); + cl_free( gp_ioc_pnp ); + gp_ioc_pnp = NULL; + + AL_EXIT( AL_DBG_PNP ); +} + + +static cl_status_t +__init_iou( + IN void* const p_obj, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + iou_node_t *p_iou; + + UNUSED_PARAM( context ); + + p_iou = (iou_node_t*)p_obj; + + cl_spinlock_construct( &p_iou->lock ); + cl_qmap_init( &p_iou->ioc_map ); + cl_fmap_init( &p_iou->path_map, __path_cmp ); + + *pp_pool_item = &p_iou->map_item.pool_item; + return cl_spinlock_init( &p_iou->lock ); +} + + +static iou_node_t* +__get_iou( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN const net64_t ca_guid, + IN const ib_node_record_t* const p_node_rec ) +{ + iou_node_t *p_iou; + cl_pool_item_t *p_item; + + cl_spinlock_acquire( &p_ioc_mgr->iou_pool_lock ); + p_item = cl_qpool_get( &p_ioc_mgr->iou_pool ); + cl_spinlock_release( &p_ioc_mgr->iou_pool_lock ); + if( !p_item ) + return NULL; + + p_iou = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_map_item_t, pool_item ), + iou_node_t, map_item ); + + p_iou->ca_guid = ca_guid; + p_iou->guid = p_node_rec->node_info.node_guid; + p_iou->chassis_guid = p_node_rec->node_info.sys_guid; + p_iou->vend_id = ib_node_info_get_vendor_id( &p_node_rec->node_info ); + p_iou->dev_id = p_node_rec->node_info.device_id; + p_iou->revision = p_node_rec->node_info.revision; + + cl_memclr( &p_iou->info, sizeof(ib_iou_info_t) ); + + cl_memcpy( p_iou->desc, p_node_rec->node_desc.description, + IB_NODE_DESCRIPTION_SIZE ); + + /* The terminating NULL should never get overwritten. */ + CL_ASSERT( p_iou->desc[IB_NODE_DESCRIPTION_SIZE] == '\0' ); + + return p_iou; +} + + +static void +__put_iou( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN iou_node_t* const p_iou ) +{ + __put_path_map( p_ioc_mgr, &p_iou->path_map ); + __put_ioc_map( p_ioc_mgr, &p_iou->ioc_map ); + + cl_spinlock_acquire( &p_ioc_mgr->iou_pool_lock ); + cl_qpool_put( &p_ioc_mgr->iou_pool, &p_iou->map_item.pool_item ); + cl_spinlock_release( &p_ioc_mgr->iou_pool_lock ); +} + + +static void +__put_iou_map( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN cl_fmap_t* const p_iou_map ) +{ + cl_qlist_t list; + cl_fmap_item_t *p_item; + iou_node_t *p_iou; + + cl_qlist_init( &list ); + + p_item = cl_fmap_head( p_iou_map ); + while( p_item != cl_fmap_end( p_iou_map ) ) + { + cl_fmap_remove_item( p_iou_map, p_item ); + + p_iou = PARENT_STRUCT( + PARENT_STRUCT( p_item, cl_map_item_t, pool_item ), + iou_node_t, map_item ); + + __put_path_map( p_ioc_mgr, &p_iou->path_map ); + __put_ioc_map( p_ioc_mgr, &p_iou->ioc_map ); + cl_qlist_insert_head( &list, &p_item->pool_item.list_item ); + p_item = cl_fmap_head( p_iou_map ); + } + cl_spinlock_acquire( &p_ioc_mgr->iou_pool_lock ); + cl_qpool_put_list( &p_ioc_mgr->iou_pool, &list ); + cl_spinlock_release( &p_ioc_mgr->iou_pool_lock ); +} + + +static iou_path_t* +__get_path( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN const net64_t ca_guid, + IN const net64_t port_guid, + IN const ib_path_rec_t* const p_path_rec ) +{ + cl_pool_item_t *p_item; + iou_path_t *p_path; + + cl_spinlock_acquire( &p_ioc_mgr->path_pool_lock ); + p_item = cl_qpool_get( &p_ioc_mgr->path_pool ); + cl_spinlock_release( &p_ioc_mgr->path_pool_lock ); + if( !p_item ) + return NULL; + + p_path = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_fmap_item_t, pool_item ), + iou_path_t, map_item ); + + /* + * Store the local CA and port GUID for this path to let recipients + * of a PATH_ADD event avoid a CA lookup based on GID. + */ + p_path->ca_guid = ca_guid; + p_path->port_guid = port_guid; + + p_path->rec = *p_path_rec; + /* Clear the num_path field since it is just "undefined". */ + p_path->rec.num_path = 0; + /* + * Clear reserved fields in case they were set to prevent undue path + * thrashing. + */ + p_path->rec.resv0 = 0; + p_path->rec.resv1 = 0; + p_path->rec.resv2 = 0; + + return p_path; +} + + +static void +__put_path( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN iou_path_t* const p_path ) +{ + cl_spinlock_acquire( &p_ioc_mgr->path_pool_lock ); + cl_qpool_put( &p_ioc_mgr->path_pool, &p_path->map_item.pool_item ); + cl_spinlock_release( &p_ioc_mgr->path_pool_lock ); +} + + +static void +__put_path_map( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN cl_fmap_t* const p_path_map ) +{ + cl_qlist_t list; + cl_fmap_item_t *p_item; + iou_path_t *p_path; + + cl_qlist_init( &list ); + + p_item = cl_fmap_head( p_path_map ); + while( p_item != cl_fmap_end( p_path_map ) ) + { + cl_fmap_remove_item( p_path_map, p_item ); + + p_path = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_fmap_item_t, pool_item ), + iou_path_t, map_item ); + + cl_qlist_insert_head( &list, &p_item->pool_item.list_item ); + p_item = cl_fmap_head( p_path_map ); + } + cl_spinlock_acquire( &p_ioc_mgr->path_pool_lock ); + cl_qpool_put_list( &p_ioc_mgr->path_pool, &list ); + cl_spinlock_release( &p_ioc_mgr->path_pool_lock ); +} + + +static iou_ioc_t* +__get_ioc( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN const uint32_t ioc_slot, + IN const ib_ioc_profile_t* const p_profile ) +{ + cl_pool_item_t *p_item; + iou_ioc_t *p_ioc; + ib_svc_entry_t *p_svc_entries; + + if( !p_profile->num_svc_entries ) + return NULL; + + p_svc_entries = + cl_zalloc( sizeof(ib_svc_entry_t) * p_profile->num_svc_entries ); + if( !p_svc_entries ) + return NULL; + + cl_spinlock_acquire( &p_ioc_mgr->ioc_pool_lock ); + p_item = cl_qpool_get( &p_ioc_mgr->ioc_pool ); + cl_spinlock_release( &p_ioc_mgr->ioc_pool_lock ); + if( !p_item ) + { + cl_free( p_svc_entries ); + return NULL; + } + + p_ioc = PARENT_STRUCT( PARENT_STRUCT( p_item, cl_map_item_t, pool_item ), + iou_ioc_t, map_item ); + + CL_ASSERT( !p_ioc->ref_cnt ); + + CL_ASSERT( !(ioc_slot >> 8) ); + p_ioc->slot = (uint8_t)ioc_slot; + p_ioc->profile = *p_profile; + p_ioc->num_valid_entries = 0; + p_ioc->p_svc_entries = p_svc_entries; + cl_atomic_inc( &p_ioc->ref_cnt ); + return p_ioc; +} + + +static void +__put_ioc( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN iou_ioc_t* const p_ioc ) +{ + if( cl_atomic_dec( &p_ioc->ref_cnt ) == 0 ) + { + cl_free( p_ioc->p_svc_entries ); + + cl_spinlock_acquire( &p_ioc_mgr->ioc_pool_lock ); + cl_qpool_put( &p_ioc_mgr->ioc_pool, &p_ioc->map_item.pool_item ); + cl_spinlock_release( &p_ioc_mgr->ioc_pool_lock ); + } +} + + +static void +__put_ioc_map( + IN ioc_pnp_mgr_t* const p_ioc_mgr, + IN cl_qmap_t* const p_ioc_map ) +{ + cl_qlist_t list; + cl_map_item_t *p_item; + iou_ioc_t *p_ioc; + + cl_qlist_init( &list ); + + p_item = cl_qmap_head( p_ioc_map ); + while( p_item != cl_qmap_end( p_ioc_map ) ) + { + cl_qmap_remove_item( p_ioc_map, p_item ); + + p_ioc = PARENT_STRUCT( + PARENT_STRUCT( p_item, cl_map_item_t, pool_item ), + iou_ioc_t, map_item ); + + if( cl_atomic_dec( &p_ioc->ref_cnt ) == 0 ) + { + cl_free( p_ioc->p_svc_entries ); + cl_qlist_insert_head( &list, &p_item->pool_item.list_item ); + } + p_item = cl_qmap_head( p_ioc_map ); + } + cl_spinlock_acquire( &p_ioc_mgr->ioc_pool_lock ); + cl_qpool_put_list( &p_ioc_mgr->ioc_pool, &list ); + cl_spinlock_release( &p_ioc_mgr->ioc_pool_lock ); +} + + +/* + * Compares two IOUs for inserts/lookups in a flexi map. Keys are the + * address of the ca_guid, which is adjacent to the node GUID of the IOU. + * This allows for a single call to cl_memcmp. + */ +static intn_t +__iou_cmp( + IN const void* const p_key1, + IN const void* const p_key2 ) +{ + return cl_memcmp( p_key1, p_key2, sizeof(uint64_t) * 2 ); +} + + +/* + * Compares two paths for inserts/lookups in a flexi map. + */ +static intn_t +__path_cmp( + IN const void* const p_key1, + IN const void* const p_key2 ) +{ + return cl_memcmp( p_key1, p_key2, sizeof(ib_path_rec_t) ); +} + + +/* + * Removes all paths and orphaned IOC/IOUs upon a port DOWN event. + */ +static void +__process_port_down( + IN const net64_t port_guid ) +{ + cl_fmap_item_t *p_path_item; + cl_fmap_item_t *p_iou_item; + iou_node_t *p_iou; + iou_path_t *p_path; + cl_fmap_t old_paths; + cl_fmap_t old_ious; + + AL_ENTER( AL_DBG_PNP ); + + cl_fmap_init( &old_paths, __path_cmp ); + cl_fmap_init( &old_ious, __iou_cmp ); + + p_iou_item = cl_fmap_head( &gp_ioc_pnp->iou_map ); + while( p_iou_item != cl_fmap_end( &gp_ioc_pnp->iou_map ) ) + { + p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item ); + /* + * Note that it is safe to move to the next item even if we remove + * the IOU from the map since the map effectively maintains an ordered + * list of its contents. + */ + p_iou_item = cl_fmap_next( p_iou_item ); + + p_path_item = cl_fmap_head( &p_iou->path_map ); + while( p_path_item != cl_fmap_end( &p_iou->path_map ) ) + { + p_path = PARENT_STRUCT( p_path_item, iou_path_t, map_item ); + p_path_item = cl_fmap_next( p_path_item ); + if( p_path->rec.sgid.unicast.interface_id == port_guid ) + { + cl_fmap_remove_item( &p_iou->path_map, &p_path->map_item ); + cl_fmap_insert( &old_paths, &p_path->rec, &p_path->map_item ); + } + } + + if( !cl_fmap_count( &p_iou->path_map ) ) + { + /* Move the paths back to the IOU so that they get freed. */ + cl_fmap_merge( &p_iou->path_map, &old_paths ); + cl_fmap_remove_item( &gp_ioc_pnp->iou_map, &p_iou->map_item ); + cl_fmap_insert( &old_ious, &p_iou->ca_guid, &p_iou->map_item ); + } + else + { + /* Report the removed paths. */ + __remove_paths( &p_iou->ioc_map, &old_paths ); + } + } + + /* Report any removed IOUs. */ + __remove_ious( &old_ious ); + + AL_EXIT( AL_DBG_PNP ); +} + + +/* + * PnP callback for port event notifications. + */ +static ib_api_status_t +__ioc_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + ib_api_status_t status = IB_SUCCESS; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_PNP ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP, + ("p_pnp_rec->pnp_event = 0x%x (%s)\n", + p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) ); + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_PORT_ADD: + /* Create the port service. */ + CL_ASSERT( !p_pnp_rec->context ); + status = __create_ioc_pnp_svc( p_pnp_rec ); + break; + + case IB_PNP_SM_CHANGE: + case IB_PNP_PORT_ACTIVE: + /* Initiate a sweep - delay a bit to allow the ports to come up. */ + if( g_ioc_poll_interval && !gp_ioc_pnp->query_cnt) + { + cl_status = cl_timer_start( &gp_ioc_pnp->sweep_timer, 250 ); + CL_ASSERT( cl_status == CL_SUCCESS ); + } + break; + + case IB_PNP_PORT_DOWN: + case IB_PNP_PORT_INIT: + case IB_PNP_PORT_ARMED: + CL_ASSERT( p_pnp_rec->context ); + + /* + * Report IOC and IOU remove events for any IOU/IOCs that only have + * paths through this port. Note, no need to synchronize with a + * sweep since synchronization is provided by the PnP thread. + */ + __process_port_down( p_pnp_rec->guid ); + break; + + case IB_PNP_PORT_REMOVE: + /* Destroy the port service. */ + ref_al_obj( &((ioc_pnp_svc_t* __ptr64)p_pnp_rec->context)->obj ); + ((ioc_pnp_svc_t* __ptr64)p_pnp_rec->context)->obj.pfn_destroy( + &((ioc_pnp_svc_t* __ptr64)p_pnp_rec->context)->obj, NULL ); + p_pnp_rec->context = NULL; + + default: + break; /* Ignore other PNP events. */ + } + + AL_EXIT( AL_DBG_PNP ); + return status; +} + + +static ib_api_status_t +__init_ioc_pnp_svc( + IN ioc_pnp_svc_t* const p_ioc_pnp_svc, + IN const ib_pnp_rec_t* const p_pnp_rec ) +{ + ib_api_status_t status; + ib_ca_handle_t h_ca; + ib_qp_create_t qp_create; + ib_mad_svc_t mad_svc; + ib_pnp_port_rec_t *p_pnp_port_rec; + + AL_ENTER( AL_DBG_PNP ); + + p_pnp_port_rec = PARENT_STRUCT( p_pnp_rec, ib_pnp_port_rec_t, pnp_rec ); + + /* Store the CA and port GUID so we can issue SA queries. */ + p_ioc_pnp_svc->ca_guid = p_pnp_port_rec->p_ca_attr->ca_guid; + p_ioc_pnp_svc->port_guid = p_pnp_rec->guid; + + /* Acquire the correct CI CA for this port. */ + h_ca = acquire_ca( p_pnp_port_rec->p_ca_attr->ca_guid ); + if( !h_ca ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") ); + return IB_INVALID_GUID; + } + p_ioc_pnp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca; + + /* Create the MAD QP. */ + cl_memclr( &qp_create, sizeof( ib_qp_create_t ) ); + qp_create.qp_type = IB_QPT_QP1_ALIAS; + qp_create.sq_depth = p_pnp_port_rec->p_ca_attr->max_wrs; + qp_create.sq_sge = 1; + qp_create.sq_signaled = TRUE; + /* + * We use the IOC PnP service's al_obj_t as the context to allow using + * deref_al_obj as the destroy callback. + */ + status = ib_get_spl_qp( h_ca->obj.p_ci_ca->h_pd_alias, + p_pnp_port_rec->p_port_attr->port_guid, &qp_create, + &p_ioc_pnp_svc->obj, NULL, &p_ioc_pnp_svc->pool_key, + &p_ioc_pnp_svc->h_qp ); + + /* + * Release the CI CA once we've allocated the QP. The CI CA will not + * go away while we hold the QP. + */ + deref_al_obj( &h_ca->obj ); + + /* Check for failure allocating the QP. */ + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_get_spl_qp failed with status %s\n", + ib_get_err_str( status )) ); + return status; + } + /* Reference the port object on behalf of the QP. */ + ref_al_obj( &p_ioc_pnp_svc->obj ); + + /* Create the MAD service. */ + cl_memclr( &mad_svc, sizeof(ib_mad_svc_t) ); + mad_svc.mad_svc_context = p_ioc_pnp_svc; + mad_svc.pfn_mad_recv_cb = __ioc_pnp_recv_cb; + mad_svc.pfn_mad_send_cb = __ioc_pnp_send_cb; + status = + ib_reg_mad_svc( p_ioc_pnp_svc->h_qp, &mad_svc, + &p_ioc_pnp_svc->h_mad_svc ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_mad_svc failed with status %s\n", + ib_get_err_str( status )) ); + return status; + } + + AL_EXIT( AL_DBG_PNP ); + return IB_SUCCESS; +} + + +/* + * Create a port agent for a given port. + */ +static ib_api_status_t +__create_ioc_pnp_svc( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + ioc_pnp_svc_t *p_ioc_pnp_svc; + ib_api_status_t status; + + AL_ENTER( AL_DBG_PNP ); + + /* calculate size of port_cm struct */ + p_ioc_pnp_svc = (ioc_pnp_svc_t*)cl_zalloc( sizeof(ioc_pnp_svc_t) ); + if( !p_ioc_pnp_svc ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to cl_zalloc port CM agent.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + construct_al_obj( &p_ioc_pnp_svc->obj, AL_OBJ_TYPE_IOC_PNP_SVC ); + + status = init_al_obj( &p_ioc_pnp_svc->obj, p_ioc_pnp_svc, TRUE, + __destroying_ioc_pnp_svc, NULL, __free_ioc_pnp_svc ); + if( status != IB_SUCCESS ) + { + __free_ioc_pnp_svc( &p_ioc_pnp_svc->obj ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", + ib_get_err_str( status )) ); + return status; + } + + /* Attach to the global CM object. */ + status = attach_al_obj( &gp_ioc_pnp->obj, &p_ioc_pnp_svc->obj ); + if( status != IB_SUCCESS ) + { + p_ioc_pnp_svc->obj.pfn_destroy( &p_ioc_pnp_svc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + status = __init_ioc_pnp_svc( p_ioc_pnp_svc, p_pnp_rec ); + if( status != IB_SUCCESS ) + { + p_ioc_pnp_svc->obj.pfn_destroy( &p_ioc_pnp_svc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("__init_data_svc failed with status %s.\n", + ib_get_err_str( status )) ); + return status; + } + + /* Set the PnP context to reference this service. */ + p_pnp_rec->context = p_ioc_pnp_svc; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_ioc_pnp_svc->obj ); + + AL_EXIT( AL_DBG_PNP ); + return status; +} + + +static void +__destroying_ioc_pnp_svc( + IN al_obj_t *p_obj ) +{ + ib_api_status_t status; + ioc_pnp_svc_t *p_svc; + + CL_ASSERT( p_obj ); + p_svc = PARENT_STRUCT( p_obj, ioc_pnp_svc_t, obj ); + + if( p_svc->h_node_query ) + ib_cancel_query( gh_al, p_svc->h_node_query ); + + if( p_svc->h_path_query ) + ib_cancel_query( gh_al, p_svc->h_path_query ); + + /* Destroy the QP. */ + if( p_svc->h_qp ) + { + status = + ib_destroy_qp( p_svc->h_qp, (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + +static void +__free_ioc_pnp_svc( + IN al_obj_t *p_obj ) +{ + ioc_pnp_svc_t* p_svc; + + CL_ASSERT( p_obj ); + p_svc = PARENT_STRUCT( p_obj, ioc_pnp_svc_t, obj ); + + CL_ASSERT( !p_svc->query_cnt ); + + destroy_al_obj( p_obj ); + cl_free( p_svc ); +} + + +static void +__ioc_pnp_timer_cb( + IN void *context ) +{ + ib_api_status_t status; + ioc_pnp_mgr_t *p_mgr; + cl_list_item_t *p_item; + ioc_pnp_svc_t *p_svc; + + AL_ENTER( AL_DBG_PNP ); + + p_mgr = (ioc_pnp_mgr_t*)context; + + cl_spinlock_acquire( &p_mgr->obj.lock ); + if( p_mgr->obj.state == CL_DESTROYING ) + { + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP, + ("Destroying - not resetting timer.\n") ); + cl_spinlock_release( &p_mgr->obj.lock ); + return; + } + + CL_ASSERT( !cl_fmap_count( &p_mgr->sweep_map ) ); + + /* Pre-charge the ref count so that we don't toggle between 0 and 1. */ + cl_atomic_inc( &p_mgr->query_cnt ); + /* Take a reference on the object for the duration of the sweep process. */ + ref_al_obj( &p_mgr->obj ); + for( p_item = cl_qlist_head( &p_mgr->obj.obj_list ); + p_item != cl_qlist_end( &p_mgr->obj.obj_list ); + p_item = cl_qlist_next( p_item ) ) + { + p_svc = PARENT_STRUCT( PARENT_STRUCT( p_item, al_obj_t, pool_item ), + ioc_pnp_svc_t, obj ); + cl_atomic_inc( &p_mgr->query_cnt ); + status = __ioc_query_sa( p_svc ); + if( status != IB_SUCCESS ) + cl_atomic_dec( &p_mgr->query_cnt ); + } + /* Release the reference we took and see if we're done sweeping. */ + if( !cl_atomic_dec( &p_mgr->query_cnt ) ) + cl_async_proc_queue( gp_async_pnp_mgr, &p_mgr->async_item ); + + cl_spinlock_release( &p_mgr->obj.lock ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static ib_api_status_t +__ioc_query_sa( + IN ioc_pnp_svc_t* const p_svc ) +{ + ib_api_status_t status = IB_NOT_DONE; + ib_query_req_t query; + ib_user_query_t info; + union _ioc_pnp_timer_cb_u + { + ib_node_record_t node_rec; + ib_path_rec_t path_rec; + + } u; + + AL_ENTER( AL_DBG_PNP ); + + if( p_svc->h_node_query ) + return IB_NOT_DONE; + if( p_svc->h_path_query ) + return IB_NOT_DONE; + + if( p_svc->obj.state == CL_DESTROYING ) + { + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP, + ("Destroying - not resetting timer.\n") ); + return IB_NOT_DONE; + } + + info.method = IB_MAD_METHOD_GETTABLE; + info.attr_id = IB_MAD_ATTR_NODE_RECORD; + info.attr_size = sizeof(ib_node_record_t); + info.comp_mask = IB_NR_COMPMASK_NODETYPE; + info.p_attr = &u.node_rec; + + cl_memclr( &u.node_rec, sizeof(ib_node_record_t) ); + u.node_rec.node_info.node_type = IB_NODE_TYPE_CA; + + cl_memclr( &query, sizeof(ib_query_req_t) ); + query.query_type = IB_QUERY_USER_DEFINED; + query.p_query_input = &info; + query.port_guid = p_svc->port_guid; + query.timeout_ms = g_ioc_query_timeout; + query.retry_cnt = g_ioc_query_retries; + query.query_context = p_svc; + query.pfn_query_cb = __node_rec_cb; + + /* Reference the service for the node record query. */ + ref_al_obj( &p_svc->obj ); + cl_atomic_inc( &p_svc->query_cnt ); + + status = ib_query( gh_al, &query, &p_svc->h_node_query ); + if( status != IB_SUCCESS ) + { + cl_atomic_dec( &p_svc->query_cnt ); + deref_al_obj( &p_svc->obj ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_PNP, + ("ib_query returned %s\n", ib_get_err_str( status )) ); + return status; + } + + /* Setup the path query. */ + info.method = IB_MAD_METHOD_GETTABLE; + info.attr_id = IB_MAD_ATTR_PATH_RECORD; + info.attr_size = sizeof(ib_path_rec_t); + info.comp_mask = IB_PR_COMPMASK_SGID | IB_PR_COMPMASK_NUM_PATH; + info.p_attr = &u.path_rec; + + cl_memclr( &u.path_rec, sizeof(ib_path_rec_t) ); + ib_gid_set_default( &u.path_rec.sgid, p_svc->port_guid ); + /* Request all the paths available, setting the reversible bit. */ + u.path_rec.num_path = 0xFF; + + query.pfn_query_cb = __path_rec_cb; + + /* Reference the service for the node record query. */ + ref_al_obj( &p_svc->obj ); + cl_atomic_inc( &p_svc->query_cnt ); + + status = ib_query( gh_al, &query, &p_svc->h_path_query ); + if( status != IB_SUCCESS ) + { + cl_atomic_dec( &p_svc->query_cnt ); + deref_al_obj( &p_svc->obj ); + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_PNP, + ("ib_query returned %s\n", ib_get_err_str( status )) ); + } + + AL_EXIT( AL_DBG_PNP ); + return IB_SUCCESS; +} + + +static void +__node_rec_cb( + IN ib_query_rec_t *p_query_rec ) +{ + ioc_pnp_svc_t *p_svc; + + AL_ENTER( AL_DBG_PNP ); + + p_svc = (ioc_pnp_svc_t* __ptr64)p_query_rec->query_context; + + if( p_svc->obj.state != CL_DESTROYING && + p_query_rec->status == IB_SUCCESS && p_query_rec->result_cnt ) + { + CL_ASSERT( p_query_rec->p_result_mad ); + CL_ASSERT( !p_svc->p_node_element ); + CL_ASSERT( p_query_rec->p_result_mad->p_next == NULL ); + p_svc->p_node_element = p_query_rec->p_result_mad; + p_svc->num_nodes = p_query_rec->result_cnt; + } + else if( p_query_rec->p_result_mad ) + { + ib_put_mad( p_query_rec->p_result_mad ); + } + + p_svc->h_node_query = NULL; + if( !cl_atomic_dec( &p_svc->query_cnt ) ) + { + /* The path query has already completed. Process the results. */ + __process_query( p_svc ); + } + + /* Release the reference taken for the query. */ + deref_al_obj( &p_svc->obj ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__path_rec_cb( + IN ib_query_rec_t *p_query_rec ) +{ + ioc_pnp_svc_t *p_svc; + + AL_ENTER( AL_DBG_PNP ); + + p_svc = (ioc_pnp_svc_t* __ptr64)p_query_rec->query_context; + + if( p_svc->obj.state != CL_DESTROYING && + p_query_rec->status == IB_SUCCESS && p_query_rec->result_cnt ) + { + CL_ASSERT( p_query_rec->p_result_mad ); + CL_ASSERT( !p_svc->p_path_element ); + CL_ASSERT( p_query_rec->p_result_mad->p_next == NULL ); + p_svc->p_path_element = p_query_rec->p_result_mad; + p_svc->num_paths = p_query_rec->result_cnt; + } + else if( p_query_rec->p_result_mad ) + { + ib_put_mad( p_query_rec->p_result_mad ); + } + + p_svc->h_path_query = NULL; + if( !cl_atomic_dec( &p_svc->query_cnt ) ) + { + /* The node query has already completed. Process the results. */ + __process_query( p_svc ); + } + + /* Release the reference taken for the query. */ + deref_al_obj( &p_svc->obj ); + + AL_EXIT( AL_DBG_PNP ); +} + +static void +__process_query( + IN ioc_pnp_svc_t* const p_svc ) +{ + ib_api_status_t status; + ioc_sweep_results_t *p_results; + cl_qmap_t port_map; + + AL_ENTER( AL_DBG_PNP ); + + cl_qmap_init( &port_map ); + + if( !p_svc->p_node_element || !p_svc->p_path_element ) + { + /* One of the queries failed. Release the MADs and reset the timer. */ + if( p_svc->p_node_element ) + { + ib_put_mad( p_svc->p_node_element ); + p_svc->p_node_element = NULL; + } + + if( p_svc->p_path_element ) + { + ib_put_mad( p_svc->p_path_element ); + p_svc->p_path_element = NULL; + } + + /* Decrement the IOC PnP manager's query count. */ + if( !cl_atomic_dec( &gp_ioc_pnp->query_cnt ) ) + cl_async_proc_queue( gp_async_pnp_mgr, &gp_ioc_pnp->async_item ); + AL_EXIT( AL_DBG_PNP ); + return; + } + + /* + * Allocate the sweep results structure to allow processing + * asynchronously. + */ + p_results = cl_zalloc( sizeof(ioc_sweep_results_t) ); + if( p_results ) + { + p_results->async_item.pfn_callback = __process_sweep; + p_results->p_svc = p_svc; + cl_fmap_init( &p_results->iou_map, __iou_cmp ); + + /* Build the map of nodes by port GUID. */ + __process_nodes( p_svc, &port_map ); + + /* Build the map of paths for each node. */ + __process_paths( p_svc, &port_map ); + + /* Collapse the map of nodes to be keyed by node GUID. */ + __build_iou_map( &port_map, &p_results->iou_map ); + + /* Send the IOU Info queries to the nodes. */ + status = __query_ious( p_results ); + } + else + { + status = IB_INSUFFICIENT_MEMORY; + } + + /* Release the query result MADs now that we're done with them. */ + ib_put_mad( p_svc->p_node_element ); + ib_put_mad( p_svc->p_path_element ); + p_svc->p_node_element = NULL; + p_svc->p_path_element = NULL; + + switch( status ) + { + case IB_SUCCESS: + break; + default: + CL_ASSERT( p_results ); + cl_free( p_results ); + /* Fall through */ + case IB_INSUFFICIENT_MEMORY: + /* Decrement the IOC PnP manager's query count. */ + if( !cl_atomic_dec( &gp_ioc_pnp->query_cnt ) ) + cl_async_proc_queue( gp_async_pnp_mgr, &gp_ioc_pnp->async_item ); + } + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__process_nodes( + IN ioc_pnp_svc_t* const p_svc, + IN cl_qmap_t* const p_port_map ) +{ + iou_node_t *p_iou; + ib_node_record_t *p_node_rec; + uint32_t i; + void *p_item; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( p_svc ); + CL_ASSERT( p_svc->p_node_element ); + CL_ASSERT( p_port_map ); + + for( i = 0; i < p_svc->num_nodes; i++ ) + { + p_node_rec = ib_get_query_node_rec( p_svc->p_node_element, i ); + + p_iou = __get_iou( gp_ioc_pnp, p_svc->ca_guid, p_node_rec ); + if( !p_iou ) + break; + + /* + * We insert by port GUID, not node GUID so that we can match + * to paths using DGID. Note that it is safe to cast between + * a flexi-map item and a map item since the pointer to the key + * in a flexi-map item is always a 64-bit pointer. + */ + p_item = cl_qmap_insert( + p_port_map, p_node_rec->node_info.port_guid, + (cl_map_item_t*)&p_iou->map_item ); + if( p_item != &p_iou->map_item ) + { + /* Duplicate node - discard. */ + __put_iou( gp_ioc_pnp, p_iou ); + } + } + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__process_paths( + IN ioc_pnp_svc_t* const p_svc, + IN cl_qmap_t* const p_port_map ) +{ + iou_node_t *p_iou; + iou_path_t *p_path; + ib_path_rec_t *p_path_rec; + uint32_t i; + cl_map_item_t *p_iou_item; + cl_fmap_item_t *p_item; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( p_svc ); + CL_ASSERT( p_svc->p_node_element ); + CL_ASSERT( p_port_map ); + + for( i = 0; i < p_svc->num_paths; i++ ) + { + p_path_rec = ib_get_query_path_rec( p_svc->p_path_element, i ); + + p_iou_item = + cl_qmap_get( p_port_map, p_path_rec->dgid.unicast.interface_id ); + if( p_iou_item == cl_qmap_end( p_port_map ) ) + continue; + + p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item ); + + p_path = __get_path( gp_ioc_pnp, p_svc->ca_guid, + p_svc->port_guid, p_path_rec ); + if( !p_path ) + break; + + p_item = cl_fmap_insert( &p_iou->path_map, &p_path->rec, + &p_path->map_item ); + if( p_item != &p_path->map_item ) + { + /* Duplicate path - discard. */ + __put_path( gp_ioc_pnp, p_path ); + } + } + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__build_iou_map( + IN cl_qmap_t* const p_port_map, + IN OUT cl_fmap_t* const p_iou_map ) +{ + cl_fmap_t map1, map2; + void *p_item; + iou_node_t *p_iou, *p_dup; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( !cl_fmap_count( p_iou_map ) ); + + cl_fmap_init( &map1, __path_cmp ); + cl_fmap_init( &map2, __path_cmp ); + + /* + * Now collapse the map so that IOUs aren't repeated. + * This is needed because the IOU map is keyed by port GUID, and thus + * a multi-port IOU could be listed twice. + */ + /* Merge the port map into a map of IOUs. */ + for( p_item = cl_qmap_head( p_port_map ); + p_item != cl_qmap_end( p_port_map ); + p_item = cl_qmap_head( p_port_map ) ) + { + cl_qmap_remove_item( p_port_map, (cl_map_item_t*)p_item ); + p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item ); + + p_item = cl_fmap_insert( p_iou_map, &p_iou->ca_guid, p_item ); + if( p_item != &p_iou->map_item ) + { + /* Duplicate IOU information - merge the paths. */ + p_dup = PARENT_STRUCT( p_item, iou_node_t, map_item ); + CL_ASSERT( p_dup != p_iou ); + cl_fmap_delta( &p_dup->path_map, &p_iou->path_map, &map1, &map2 ); + /* + * The path map in p_iou->path_map is duplicate paths. + * map1 contains paths unique to p_iou->path_map, map2 contains + * paths unique to p_dup->path_map. Add the unique paths back to + * p_dup->path_map since that IOU is already in the IOU map. + * Note that we are keeping the p_dup IOU node. + */ + cl_fmap_merge( &p_dup->path_map, &map1 ); + cl_fmap_merge( &p_dup->path_map, &map2 ); + /* All unique items should have merged without duplicates. */ + CL_ASSERT( !cl_fmap_count( &map1 ) ); + CL_ASSERT( !cl_fmap_count( &map2 ) ); + + __put_iou( gp_ioc_pnp, p_iou ); + } + } + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__format_dm_get( + IN const void* const context1, + IN const void* const context2, + IN const iou_path_t* const p_path, + IN const net16_t attr_id, + IN const net32_t attr_mod, + IN OUT ib_mad_element_t* const p_mad_element ) +{ + static uint64_t tid = 0; + + AL_ENTER( AL_DBG_PNP ); + + /* + * Context information so that we can continue processing when + * the query completes. + */ + p_mad_element->context1 = context1; + p_mad_element->context2 = context2; + + /* + * Set the addressing bits necessary for the mad service to + * create the address vector + */ + p_mad_element->h_av = NULL; + p_mad_element->remote_sl = ib_path_rec_sl( &p_path->rec ); + p_mad_element->remote_lid = p_path->rec.dlid; + p_mad_element->grh_valid = FALSE; + p_mad_element->path_bits = p_path->rec.num_path; + + /* Request response processing. */ + p_mad_element->resp_expected = TRUE; + p_mad_element->retry_cnt = g_ioc_query_retries; + p_mad_element->timeout_ms = g_ioc_query_timeout; + + /* Set the destination information for the send. */ + p_mad_element->remote_qp = IB_QP1; + p_mad_element->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY; + + /* Format the MAD payload. */ + cl_memclr( p_mad_element->p_mad_buf, sizeof(ib_dm_mad_t) ); + ib_mad_init_new( p_mad_element->p_mad_buf, IB_MCLASS_DEV_MGMT, 1, + IB_MAD_METHOD_GET, cl_ntoh64( tid++ ), attr_id, attr_mod ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static ib_api_status_t +__query_ious( + IN ioc_sweep_results_t* const p_results ) +{ + ib_api_status_t status; + iou_node_t *p_iou; + iou_path_t *p_path; + cl_fmap_item_t *p_iou_item; + cl_fmap_item_t *p_path_item; + ib_mad_element_t *p_mad, *p_mad_list = NULL; + + AL_ENTER( AL_DBG_PNP ); + + p_results->state = SWEEP_IOU_INFO; + + /* Send a IOU Info query on the first path to every IOU. */ + p_iou_item = cl_fmap_head( &p_results->iou_map ); + while( p_iou_item != cl_fmap_end( &p_results->iou_map ) ) + { + p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item ); + p_iou_item = cl_fmap_next( p_iou_item ); + if( !cl_fmap_count( &p_iou->path_map ) ) + { + /* No paths for this node. Discard it. */ + cl_fmap_remove_item( &p_results->iou_map, &p_iou->map_item ); + __put_iou( gp_ioc_pnp, p_iou ); + continue; + } + + p_path_item = cl_fmap_head( &p_iou->path_map ); + + p_path = PARENT_STRUCT( p_path_item, iou_path_t, map_item ); + + status = ib_get_mad( p_results->p_svc->pool_key, + MAD_BLOCK_SIZE, &p_mad ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_get_mad for IOU Info query returned %s.\n", + ib_get_err_str( status )) ); + break; + } + + p_iou->p_config_path = p_path; + __format_dm_get( p_results, p_iou, p_path, + IB_MAD_ATTR_IO_UNIT_INFO, 0, p_mad ); + + /* Link the elements together. */ + p_mad->p_next = p_mad_list; + p_mad_list = p_mad; + + cl_atomic_inc( &p_results->p_svc->query_cnt ); + } + + if( !p_mad_list ) + { + AL_EXIT( AL_DBG_PNP ); + return IB_ERROR; + } + + status = ib_send_mad( p_results->p_svc->h_mad_svc, p_mad_list, &p_mad ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_send_mad returned %s\n", ib_get_err_str( status )) ); + + /* If some sends succeeded, change the status. */ + if( p_mad_list != p_mad ) + status = IB_SUCCESS; + + while( p_mad ) + { + p_mad_list = p_mad->p_next; + p_mad->p_next = NULL; + ib_put_mad( p_mad ); + if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) && + status == IB_SUCCESS ) + { + cl_async_proc_queue( gp_async_pnp_mgr, + &p_results->async_item ); + } + p_mad = p_mad_list; + } + } + AL_EXIT( AL_DBG_PNP ); + return status; +} + + +static void +__ioc_pnp_recv_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_response ) +{ + ioc_sweep_results_t *p_results; + iou_node_t *p_iou; + iou_ioc_t *p_ioc; + + AL_ENTER( AL_DBG_PNP ); + + UNUSED_PARAM( h_mad_svc ); + UNUSED_PARAM( mad_svc_context ); + CL_ASSERT( !p_mad_response->p_next ); + + p_results = (ioc_sweep_results_t* __ptr64)p_mad_response->send_context1; + if( !p_mad_response->p_mad_buf->status ) + { + /* Query was successful */ + switch( p_mad_response->p_mad_buf->attr_id ) + { + case IB_MAD_ATTR_IO_UNIT_INFO: + p_iou = (iou_node_t* __ptr64)p_mad_response->send_context2; + __iou_info_resp( p_iou, + (ib_dm_mad_t*)p_mad_response->p_mad_buf ); + break; + + case IB_MAD_ATTR_IO_CONTROLLER_PROFILE: + p_iou = (iou_node_t* __ptr64)p_mad_response->send_context2; + __ioc_profile_resp( p_iou, + (ib_dm_mad_t*)p_mad_response->p_mad_buf ); + break; + + case IB_MAD_ATTR_SERVICE_ENTRIES: + p_ioc = (iou_ioc_t* __ptr64)p_mad_response->send_context2; + __svc_entry_resp( p_ioc, + (ib_dm_mad_t*)p_mad_response->p_mad_buf ); + break; + + default: + break; + } + } + + ib_put_mad( p_mad_response ); + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__iou_info_resp( + IN OUT iou_node_t* const p_iou, + IN const ib_dm_mad_t* const p_mad ) +{ + AL_ENTER( AL_DBG_PNP ); + /* Copy the IOU info for post-processing. */ + p_iou->info = *((ib_iou_info_t*)p_mad->data); + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__ioc_profile_resp( + IN OUT iou_node_t* const p_iou, + IN const ib_dm_mad_t* const p_mad ) +{ + iou_ioc_t *p_ioc; + cl_map_item_t *p_item; + + AL_ENTER( AL_DBG_PNP ); + p_ioc = __get_ioc( gp_ioc_pnp, cl_ntoh32(p_mad->hdr.attr_mod), + (ib_ioc_profile_t*)p_mad->data ); + if( p_ioc ) + { + /* Need back link to process service entry failures. */ + p_ioc->p_iou = p_iou; + cl_spinlock_acquire( &p_iou->lock ); + p_item = cl_qmap_insert( &p_iou->ioc_map, + p_ioc->profile.ioc_guid, &p_ioc->map_item ); + cl_spinlock_release( &p_iou->lock ); + /* Return the IOC if it's a duplicate. */ + if( p_item != &p_ioc->map_item ) + __put_ioc( gp_ioc_pnp, p_ioc ); + } + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__svc_entry_resp( + IN OUT iou_ioc_t* const p_ioc, + IN const ib_dm_mad_t* const p_mad ) +{ + uint16_t idx; + uint8_t lo, hi; + ib_svc_entries_t *p_svc_entries; + + AL_ENTER( AL_DBG_PNP ); + + ib_dm_get_slot_lo_hi( p_mad->hdr.attr_mod, NULL, &lo, &hi ); + CL_ASSERT( (hi - lo) < SVC_ENTRY_COUNT ); + p_svc_entries = (ib_svc_entries_t*)p_mad->data; + + /* Copy the entries. */ + for( idx = lo; idx <= hi; idx++ ) + p_ioc->p_svc_entries[idx] = p_svc_entries->service_entry[idx - lo]; + + /* Update the number of entries received so far. */ + p_ioc->num_valid_entries += (hi - lo) + 1; + cl_atomic_dec(&p_ioc->ref_cnt); + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__ioc_pnp_send_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_request_mad ) +{ + ib_api_status_t status; + ioc_sweep_results_t *p_results; + iou_node_t *p_iou; + iou_ioc_t *p_ioc; + cl_fmap_item_t *p_item; + + AL_ENTER( AL_DBG_PNP ); + + UNUSED_PARAM( h_mad_svc ); + UNUSED_PARAM( mad_svc_context ); + + CL_ASSERT( p_request_mad->p_next == NULL ); + + p_results = (ioc_sweep_results_t* __ptr64)p_request_mad->context1; + + if( p_request_mad->status != IB_WCS_SUCCESS ) + { + switch( p_request_mad->p_mad_buf->attr_id ) + { + case IB_MAD_ATTR_IO_UNIT_INFO: + p_iou = (iou_node_t* __ptr64)p_request_mad->context2; + if( p_request_mad->status == IB_WCS_TIMEOUT_RETRY_ERR ) + { + /* Move to the next path for the node and try the query again. */ + p_item = cl_fmap_next( &p_iou->p_config_path->map_item ); + if( p_item != cl_fmap_end( &p_iou->path_map ) ) + { + p_iou->p_config_path = + PARENT_STRUCT( p_item, iou_path_t, map_item ); + __format_dm_get( p_results, p_iou, p_iou->p_config_path, + IB_MAD_ATTR_IO_UNIT_INFO, 0, p_request_mad ); + + status = ib_send_mad( p_results->p_svc->h_mad_svc, + p_request_mad, &p_request_mad ); + if( status == IB_SUCCESS ) + { + AL_EXIT( AL_DBG_PNP ); + return; + } + } + } + break; + + case IB_MAD_ATTR_SERVICE_ENTRIES: + p_ioc = (iou_ioc_t* __ptr64)p_request_mad->context2; + cl_spinlock_acquire( &p_ioc->p_iou->lock ); + cl_qmap_remove_item( &p_ioc->p_iou->ioc_map, &p_ioc->map_item ); + cl_spinlock_release( &p_ioc->p_iou->lock ); + __put_ioc( gp_ioc_pnp, p_ioc ); + break; + + default: + break; + } + } + + /* Cleanup. */ + ib_put_mad( p_request_mad ); + + /* + * If this is the last MAD, finish processing the IOU queries + * in the PnP thread. + */ + if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) ) + cl_async_proc_queue( gp_async_pnp_mgr, &p_results->async_item ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__flush_duds( + IN OUT ioc_sweep_results_t *p_results ) +{ + cl_fmap_item_t *p_item; + cl_map_item_t *p_ioc_item; + iou_node_t *p_iou; + iou_ioc_t *p_ioc; + + AL_ENTER( AL_DBG_PNP ); + + /* Walk the map of IOUs and discard any that didn't respond to IOU info. */ + p_item = cl_fmap_head( &p_results->iou_map ); + /* + * No locking required since we're protected by the serialization of the + * PnP thread. + */ + while( p_item != cl_fmap_end( &p_results->iou_map ) ) + { + p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item ); + + p_item = cl_fmap_next( p_item ); + switch( p_results->state ) + { + case SWEEP_IOU_INFO: + if( p_iou->info.max_controllers ) + continue; + break; + + case SWEEP_SVC_ENTRIES: + CL_ASSERT( cl_qmap_count( &p_iou->ioc_map ) ); + p_ioc_item = cl_qmap_head( &p_iou->ioc_map ); + while( p_ioc_item != cl_qmap_end( &p_iou->ioc_map ) ) + { + p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item ); + p_ioc_item = cl_qmap_next( p_ioc_item ); + + if( !p_ioc->num_valid_entries || + p_ioc->num_valid_entries != p_ioc->profile.num_svc_entries ) + { + cl_qmap_remove_item( &p_iou->ioc_map, &p_ioc->map_item ); + __put_ioc( gp_ioc_pnp, p_ioc ); + } + } + /* Fall through. */ + case SWEEP_IOC_PROFILE: + if( cl_qmap_count( &p_iou->ioc_map ) ) + continue; + break; + + default: + CL_ASSERT( p_results->state != SWEEP_COMPLETE ); + break; + } + + cl_fmap_remove_item( &p_results->iou_map, &p_iou->map_item ); + __put_iou( gp_ioc_pnp, p_iou ); + } + + AL_EXIT( AL_DBG_PNP ); +} + +static void +__process_sweep( + IN cl_async_proc_item_t *p_async_item ) +{ + ib_api_status_t status; + ioc_sweep_results_t *p_results; + + AL_ENTER( AL_DBG_PNP ); + + p_results = PARENT_STRUCT( p_async_item, ioc_sweep_results_t, async_item ); + CL_ASSERT( !p_results->p_svc->query_cnt ); + + if( p_results->p_svc->obj.state == CL_DESTROYING ) + { + __put_iou_map( gp_ioc_pnp, &p_results->iou_map ); + goto err; + } + + /* Walk the map of IOUs and discard any that didn't respond to IOU info. */ + __flush_duds( p_results ); + switch( p_results->state ) + { + case SWEEP_IOU_INFO: + /* Next step, query IOC profiles for all IOUs. */ + p_results->state = SWEEP_IOC_PROFILE; + status = __query_ioc_profiles( p_results ); + break; + + case SWEEP_IOC_PROFILE: + /* Next step: query service entries for all IOCs. */ + p_results->state = SWEEP_SVC_ENTRIES; + status = __query_svc_entries( p_results ); + break; + + case SWEEP_SVC_ENTRIES: + /* Filter results and report changes. */ + p_results->state = SWEEP_COMPLETE; + __update_results( p_results ); + status = IB_SUCCESS; + break; + + default: + CL_ASSERT( p_results->state == SWEEP_IOU_INFO || + p_results->state == SWEEP_IOC_PROFILE || + p_results->state == SWEEP_SVC_ENTRIES ); + status = IB_ERROR; + } + + if( p_results->state == SWEEP_COMPLETE || status != IB_SUCCESS ) + { +err: + if( !cl_atomic_dec( &gp_ioc_pnp->query_cnt ) ) + cl_async_proc_queue( gp_async_pnp_mgr, &gp_ioc_pnp->async_item ); + cl_free( p_results ); + } + + AL_EXIT( AL_DBG_PNP ); +} + + +static ib_api_status_t +__query_ioc_profiles( + IN ioc_sweep_results_t* const p_results ) +{ + ib_api_status_t status; + cl_fmap_item_t *p_item; + iou_node_t *p_iou; + uint8_t slot; + ib_mad_element_t *p_mad, *p_mad_list = NULL; + + AL_ENTER( AL_DBG_PNP ); + + p_item = cl_fmap_head( &p_results->iou_map ); + while( p_item != cl_fmap_end( &p_results->iou_map ) ) + { + p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item ); + CL_ASSERT( p_iou->info.max_controllers ); + CL_ASSERT( cl_fmap_count( &p_iou->path_map ) ); + CL_ASSERT( p_iou->p_config_path ); + p_item = cl_fmap_next( p_item ); + + p_mad = NULL; + for( slot = 1; slot <= p_iou->info.max_controllers; slot++ ) + { + if( ioc_at_slot( &p_iou->info, slot ) == IOC_INSTALLED ) + { + status = ib_get_mad( p_results->p_svc->pool_key, + MAD_BLOCK_SIZE, &p_mad ); + if( status != IB_SUCCESS ) + break; + + __format_dm_get( p_results, p_iou, p_iou->p_config_path, + IB_MAD_ATTR_IO_CONTROLLER_PROFILE, cl_hton32( slot ), p_mad ); + + /* Chain the MAD up. */ + p_mad->p_next = p_mad_list; + p_mad_list = p_mad; + + cl_atomic_inc( &p_results->p_svc->query_cnt ); + } + } + if( !p_mad ) + { + /* No IOCs installed in this IOU, or failed to get MAD. */ + cl_fmap_remove_item( &p_results->iou_map, &p_iou->map_item ); + __put_iou( gp_ioc_pnp, p_iou ); + } + } + + /* Trap the case where there are no queries to send. */ + if( !p_mad_list ) + { + AL_EXIT( AL_DBG_PNP ); + return IB_NOT_DONE; + } + + status = ib_send_mad( p_results->p_svc->h_mad_svc, p_mad_list, &p_mad ); + if( status != IB_SUCCESS ) + { + /* If some of the MADs were sent wait for their completion. */ + if( p_mad_list != p_mad ) + status = IB_SUCCESS; + + while( p_mad ) + { + p_mad_list = p_mad->p_next; + p_mad->p_next = NULL; + ib_put_mad( p_mad ); + if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) && + status == IB_SUCCESS ) + { + cl_async_proc_queue( gp_async_pnp_mgr, + &p_results->async_item ); + } + p_mad = p_mad_list; + } + } + AL_EXIT( AL_DBG_PNP ); + return status; +} + + +static ib_api_status_t +__query_svc_entries( + IN ioc_sweep_results_t* const p_results ) +{ + ib_api_status_t status; + cl_fmap_item_t *p_iou_item; + cl_map_item_t *p_ioc_item; + iou_node_t *p_iou; + iou_ioc_t *p_ioc; + uint8_t i; + uint32_t attr_mod; + ib_mad_element_t *p_mad, *p_mad_list = NULL; + + AL_ENTER( AL_DBG_PNP ); + + for( p_iou_item = cl_fmap_head( &p_results->iou_map ); + p_iou_item != cl_fmap_end( &p_results->iou_map ); + p_iou_item = cl_fmap_next( p_iou_item ) ) + { + p_iou = PARENT_STRUCT( p_iou_item, iou_node_t, map_item ); + CL_ASSERT( cl_qmap_count( &p_iou->ioc_map ) ); + CL_ASSERT( cl_fmap_count( &p_iou->path_map ) ); + CL_ASSERT( p_iou->p_config_path ); + + for( p_ioc_item = cl_qmap_head( &p_iou->ioc_map ); + p_ioc_item != cl_qmap_end( &p_iou->ioc_map ); + p_ioc_item = cl_qmap_next( p_ioc_item ) ) + { + p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item ); + CL_ASSERT( p_ioc->p_iou == p_iou ); + + for( i = 0; i < p_ioc->profile.num_svc_entries; i += 4 ) + { + status = ib_get_mad( p_results->p_svc->pool_key, + MAD_BLOCK_SIZE, &p_mad ); + if( status != IB_SUCCESS ) + break; + + attr_mod = (((uint32_t)p_ioc->slot) << 16) | i; + if( (i + 3) > p_ioc->profile.num_svc_entries ) + attr_mod |= ((p_ioc->profile.num_svc_entries - 1) << 8); + else + attr_mod |= ((i + 3) << 8); + + __format_dm_get( p_results, p_ioc, p_iou->p_config_path, + IB_MAD_ATTR_SERVICE_ENTRIES, cl_hton32( attr_mod ), + p_mad ); + + /* Chain the MAD up. */ + p_mad->p_next = p_mad_list; + p_mad_list = p_mad; + + cl_atomic_inc( &p_ioc->ref_cnt ); + cl_atomic_inc( &p_results->p_svc->query_cnt ); + } + } + } + + /* Trap the case where there are no queries to send. */ + if( !p_mad_list ) + { + AL_EXIT( AL_DBG_PNP ); + return IB_NOT_DONE; + } + + status = ib_send_mad( p_results->p_svc->h_mad_svc, p_mad_list, &p_mad ); + if( status != IB_SUCCESS ) + { + /* If some of the MADs were sent wait for their completion. */ + if( p_mad_list != p_mad ) + status = IB_SUCCESS; + + while( p_mad ) + { + p_mad_list = p_mad->p_next; + p_mad->p_next = NULL; + p_ioc = (iou_ioc_t* __ptr64)p_mad->context2; + cl_atomic_dec( &p_ioc->ref_cnt ); + ib_put_mad( p_mad ); + if( !cl_atomic_dec( &p_results->p_svc->query_cnt ) && + status == IB_SUCCESS ) + { + cl_async_proc_queue( gp_async_pnp_mgr, + &p_results->async_item ); + } + p_mad = p_mad_list; + } + } + AL_EXIT( AL_DBG_PNP ); + return status; +} + + +static void +__update_results( + IN ioc_sweep_results_t* const p_results ) +{ + cl_fmap_t iou_map1, iou_map2; + cl_fmap_item_t *p_item1, *p_item2; + iou_node_t *p_iou1, *p_iou2; + + AL_ENTER( AL_DBG_PNP ); + + cl_fmap_init( &iou_map1, __iou_cmp ); + cl_fmap_init( &iou_map2, __iou_cmp ); + + /* + * No need to lock on the sweep map since all accesses are serialized + * by the PnP thread. + */ + cl_fmap_delta( &gp_ioc_pnp->sweep_map, &p_results->iou_map, + &iou_map1, &iou_map2 ); + /* sweep_map and iou_map now contain exactly the same items. */ + p_item1 = cl_fmap_head( &gp_ioc_pnp->sweep_map ); + p_item2 = cl_fmap_head( &p_results->iou_map ); + while( p_item1 != cl_fmap_end( &gp_ioc_pnp->sweep_map ) ) + { + CL_ASSERT( p_item2 != cl_fmap_end( &p_results->iou_map ) ); + p_iou1 = PARENT_STRUCT( p_item1, iou_node_t, map_item ); + p_iou2 = PARENT_STRUCT( p_item2, iou_node_t, map_item ); + CL_ASSERT( p_iou1->guid == p_iou2->guid ); + + /* + * Merge the IOC maps - this leaves all duplicates in + * p_iou2->ioc_map. + */ + cl_qmap_merge( &p_iou1->ioc_map, &p_iou2->ioc_map ); + + /* + * Merge the path maps - this leaves all duplicates in + * p_iou2->path_map + */ + cl_fmap_merge( &p_iou1->path_map, &p_iou2->path_map ); + + /* Return the duplicate IOU (and whatever duplicate paths and IOCs) */ + cl_fmap_remove_item( &p_results->iou_map, p_item2 ); + __put_iou( gp_ioc_pnp, p_iou2 ); + + p_item1 = cl_fmap_next( p_item1 ); + p_item2 = cl_fmap_head( &p_results->iou_map ); + } + CL_ASSERT( !cl_fmap_count( &p_results->iou_map ) ); + + /* Merge in the unique items. */ + cl_fmap_merge( &gp_ioc_pnp->sweep_map, &iou_map1 ); + CL_ASSERT( !cl_fmap_count( &iou_map1 ) ); + cl_fmap_merge( &gp_ioc_pnp->sweep_map, &iou_map2 ); + CL_ASSERT( !cl_fmap_count( &iou_map2 ) ); + + AL_EXIT( AL_DBG_PNP ); + return; +} + + +static void +__ioc_async_cb( + IN cl_async_proc_item_t *p_item ) +{ + cl_status_t status; + cl_fmap_t old_ious, new_ious; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( p_item == &gp_ioc_pnp->async_item ); + UNUSED_PARAM( p_item ); + + CL_ASSERT( !gp_ioc_pnp->query_cnt ); + + cl_fmap_init( &old_ious, __iou_cmp ); + cl_fmap_init( &new_ious, __iou_cmp ); + cl_fmap_delta( + &gp_ioc_pnp->iou_map, &gp_ioc_pnp->sweep_map, &new_ious, &old_ious ); + + /* For each duplicate IOU, report changes in IOCs or paths. */ + __change_ious( &gp_ioc_pnp->iou_map, &gp_ioc_pnp->sweep_map ); + + /* Report all new IOUs. */ + __add_ious( &gp_ioc_pnp->iou_map, &new_ious, NULL ); + CL_ASSERT( !cl_fmap_count( &new_ious ) ); + + /* Report all removed IOUs. */ + __remove_ious( &old_ious ); + CL_ASSERT( !cl_fmap_count( &old_ious ) ); + + /* Reset the sweep timer. */ + if( g_ioc_poll_interval ) + { + status = cl_timer_start( + &gp_ioc_pnp->sweep_timer, g_ioc_poll_interval ); + CL_ASSERT( status == CL_SUCCESS ); + } + + /* Release the reference we took in the timer callback. */ + deref_al_obj( &gp_ioc_pnp->obj ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__change_ious( + IN cl_fmap_t* const p_cur_ious, + IN cl_fmap_t* const p_dup_ious ) +{ + cl_fmap_t new_paths, old_paths; + cl_qmap_t new_iocs, old_iocs; + cl_fmap_item_t *p_item1, *p_item2; + iou_node_t *p_iou1, *p_iou2; + + AL_ENTER( AL_DBG_PNP ); + + cl_fmap_init( &new_paths, __path_cmp ); + cl_fmap_init( &old_paths, __path_cmp ); + cl_qmap_init( &new_iocs ); + cl_qmap_init( &old_iocs ); + + p_item1 = cl_fmap_head( p_cur_ious ); + p_item2 = cl_fmap_head( p_dup_ious ); + while( p_item1 != cl_fmap_end( p_cur_ious ) ) + { + p_iou1 = PARENT_STRUCT( p_item1, iou_node_t, map_item ); + p_iou2 = PARENT_STRUCT( p_item2, iou_node_t, map_item ); + CL_ASSERT( p_iou1->guid == p_iou2->guid ); + + /* Figure out what changed. */ + cl_fmap_delta( + &p_iou1->path_map, &p_iou2->path_map, &new_paths, &old_paths ); + cl_qmap_delta( + &p_iou1->ioc_map, &p_iou2->ioc_map, &new_iocs, &old_iocs ); + + /* + * Report path changes before IOC changes so that new IOCs + * report up-to-date paths. Report new paths before removing + * old ones to minimize the chance of disruption of service - + * i.e. the last path being removed before an alternate is available. + */ + __add_paths( p_iou1, &p_iou1->ioc_map, &new_paths, NULL ); + CL_ASSERT( !cl_fmap_count( &new_paths ) ); + + __remove_paths( &p_iou1->ioc_map, &old_paths ); + CL_ASSERT( !cl_fmap_count( &old_paths ) ); + + /* Report IOCs. */ + __add_iocs( p_iou1, &new_iocs, NULL ); + CL_ASSERT( !cl_qmap_count( &new_iocs ) ); + + __remove_iocs( p_iou1, &old_iocs ); + CL_ASSERT( !cl_qmap_count( &old_iocs ) ); + + /* Done with the duplicate IOU. Return it to the pool */ + cl_fmap_remove_item( p_dup_ious, p_item2 ); + __put_iou( gp_ioc_pnp, p_iou2 ); + + p_item1 = cl_fmap_next( p_item1 ); + p_item2 = cl_fmap_head( p_dup_ious ); + } + CL_ASSERT( !cl_fmap_count( p_dup_ious ) ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__add_ious( + IN cl_fmap_t* const p_cur_ious, + IN cl_fmap_t* const p_new_ious, + IN al_pnp_t* const p_reg OPTIONAL ) +{ + cl_fmap_item_t *p_item; + iou_node_t *p_iou; + + AL_ENTER( AL_DBG_PNP ); + + p_item = cl_fmap_head( p_new_ious ); + while( p_item != cl_fmap_end( p_new_ious ) ) + { + p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item ); + + /* Report the IOU addition. */ + __report_iou_add( p_iou, p_reg ); + + p_item = cl_fmap_next( p_item ); + } + + if( p_cur_ious != p_new_ious ) + { + cl_fmap_merge( p_cur_ious, p_new_ious ); + CL_ASSERT( !cl_fmap_count( p_new_ious ) ); + } + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__remove_ious( + IN cl_fmap_t* const p_old_ious ) +{ + cl_fmap_item_t *p_item; + iou_node_t *p_iou; + + AL_ENTER( AL_DBG_PNP ); + + p_item = cl_fmap_head( p_old_ious ); + while( p_item != cl_fmap_end( p_old_ious ) ) + { + p_iou = PARENT_STRUCT( p_item, iou_node_t, map_item ); + + /* Report the IOU removal. */ + __report_iou_remove( p_iou ); + + cl_fmap_remove_item( p_old_ious, p_item ); + __put_iou( gp_ioc_pnp, p_iou ); + p_item = cl_fmap_head( p_old_ious ); + } + CL_ASSERT( !cl_fmap_count( p_old_ious ) ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__add_iocs( + IN iou_node_t* const p_iou, + IN cl_qmap_t* const p_new_iocs, + IN al_pnp_t* const p_reg OPTIONAL ) +{ + cl_map_item_t *p_item; + iou_ioc_t *p_ioc; + + AL_ENTER( AL_DBG_PNP ); + + p_item = cl_qmap_head( p_new_iocs ); + while( p_item != cl_qmap_end( p_new_iocs ) ) + { + p_ioc = PARENT_STRUCT( p_item, iou_ioc_t, map_item ); + + /* Report the IOU addition. */ + __report_ioc_add( p_iou, p_ioc, p_reg ); + + p_item = cl_qmap_next( p_item ); + } + + if( p_new_iocs != &p_iou->ioc_map ) + { + cl_qmap_merge( &p_iou->ioc_map, p_new_iocs ); + CL_ASSERT( !cl_qmap_count( p_new_iocs ) ); + } + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__remove_iocs( + IN iou_node_t* const p_iou, + IN cl_qmap_t* const p_old_iocs ) +{ + cl_map_item_t *p_item; + iou_ioc_t *p_ioc; + + AL_ENTER( AL_DBG_PNP ); + + p_item = cl_qmap_tail( p_old_iocs ); + while( p_item != cl_qmap_end( p_old_iocs ) ) + { + p_ioc = PARENT_STRUCT( p_item, iou_ioc_t, map_item ); + + /* Report the IOC removal. */ + __report_ioc_remove( p_iou, p_ioc ); + + cl_qmap_remove_item( p_old_iocs, p_item ); + __put_ioc( gp_ioc_pnp, p_ioc ); + p_item = cl_qmap_tail( p_old_iocs ); + } + CL_ASSERT( !cl_qmap_count( p_old_iocs ) ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__add_paths( + IN iou_node_t* const p_iou, + IN cl_qmap_t* const p_ioc_map, + IN cl_fmap_t* const p_new_paths, + IN al_pnp_t* const p_reg OPTIONAL ) +{ + cl_map_item_t *p_ioc_item; + cl_fmap_item_t *p_item; + iou_ioc_t *p_ioc; + iou_path_t *p_path; + + AL_ENTER( AL_DBG_PNP ); + + p_item = cl_fmap_head( p_new_paths ); + while( p_item != cl_fmap_end( p_new_paths ) ) + { + p_path = PARENT_STRUCT( p_item, iou_path_t, map_item ); + + /* Report the path to all IOCs. */ + for( p_ioc_item = cl_qmap_head( p_ioc_map ); + p_ioc_item != cl_qmap_end( p_ioc_map ); + p_ioc_item = cl_qmap_next( p_ioc_item ) ) + { + p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item ); + __report_path( p_ioc, p_path, IB_PNP_IOC_PATH_ADD, p_reg ); + } + + p_item = cl_fmap_next( p_item ); + } + + ASSERT( &p_iou->path_map != p_new_paths ); + + cl_fmap_merge( &p_iou->path_map, p_new_paths ); + CL_ASSERT( !cl_fmap_count( p_new_paths ) ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__add_ioc_paths( + IN iou_ioc_t* const p_ioc, + IN cl_fmap_t* const p_new_paths, + IN al_pnp_t* const p_reg OPTIONAL ) +{ + cl_fmap_item_t *p_item; + iou_path_t *p_path; + + AL_ENTER( AL_DBG_PNP ); + + p_item = cl_fmap_head( p_new_paths ); + while( p_item != cl_fmap_end( p_new_paths ) ) + { + p_path = PARENT_STRUCT( p_item, iou_path_t, map_item ); + + __report_path( p_ioc, p_path, IB_PNP_IOC_PATH_ADD, p_reg ); + + p_item = cl_fmap_next( p_item ); + } + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__remove_paths( + IN cl_qmap_t* const p_ioc_map, + IN cl_fmap_t* const p_old_paths ) +{ + cl_map_item_t *p_ioc_item; + cl_fmap_item_t *p_item; + iou_ioc_t *p_ioc; + iou_path_t *p_path; + + AL_ENTER( AL_DBG_PNP ); + + p_item = cl_fmap_tail( p_old_paths ); + while( p_item != cl_fmap_end( p_old_paths ) ) + { + p_path = PARENT_STRUCT( p_item, iou_path_t, map_item ); + + for( p_ioc_item = cl_qmap_tail( p_ioc_map ); + p_ioc_item != cl_qmap_end( p_ioc_map ); + p_ioc_item = cl_qmap_prev( p_ioc_item ) ) + { + p_ioc = PARENT_STRUCT( p_ioc_item, iou_ioc_t, map_item ); + __report_path( p_ioc, p_path, IB_PNP_IOC_PATH_REMOVE, NULL ); + } + + cl_fmap_remove_item( p_old_paths, p_item ); + __put_path( gp_ioc_pnp, p_path ); + p_item = cl_fmap_tail( p_old_paths ); + } + CL_ASSERT( !cl_fmap_count( p_old_paths ) ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static cl_status_t +__notify_users( + IN const cl_list_item_t* const p_item, + IN al_pnp_ioc_event_t* const p_event ) +{ + ib_api_status_t status; + al_pnp_t *p_reg; + al_pnp_context_t *p_context; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_item, al_pnp_t, list_item ); + + /* Copy the source record into the user's record. */ + cl_memcpy( p_event->p_user_rec, p_event->p_rec, p_event->rec_size ); + p_event->p_user_rec->h_pnp = p_reg; + p_event->p_user_rec->pnp_context = (void*)p_reg->obj.context; + + switch( p_event->p_rec->pnp_event ) + { + case IB_PNP_IOU_ADD: + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU ); + p_context = pnp_create_context( p_reg, &p_event->p_rec->guid); + break; + + case IB_PNP_IOU_REMOVE: + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU ); + /* Lookup the context for this IOU. */ + p_context = pnp_get_context( p_reg, &p_event->p_rec->guid ); + break; + + case IB_PNP_IOC_ADD: + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC ); + p_context = pnp_create_context( p_reg, &p_event->p_rec->guid); + break; + case IB_PNP_IOC_REMOVE: + case IB_PNP_IOC_PATH_ADD: + case IB_PNP_IOC_PATH_REMOVE: + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC ); + p_context = pnp_get_context( p_reg, &p_event->p_rec->guid ); + break; + default: + AL_PRINT_EXIT(TRACE_LEVEL_WARNING, AL_DBG_PNP,("Invalid PnP event %#x\n", + p_event->p_rec->pnp_event)); + return CL_NOT_DONE; + break; + } + if( !p_context ) + return CL_NOT_FOUND; + + p_event->p_user_rec->context = (void*)p_context->context; + + /* Notify user. */ + status = p_reg->pfn_pnp_cb( p_event->p_user_rec ); + + /* Update contexts */ + if( status != IB_SUCCESS || + p_event->p_rec->pnp_event == IB_PNP_IOU_REMOVE || + p_event->p_rec->pnp_event == IB_PNP_IOC_REMOVE ) + { + cl_fmap_remove_item( &p_reg->context_map, &p_context->map_item ); + cl_free( p_context ); + } + else + { + p_context->context = p_event->p_user_rec->context; + } + + AL_EXIT( AL_DBG_PNP ); + return CL_NOT_FOUND; +} + + +static void +__report_iou_add( + IN iou_node_t* const p_iou, + IN al_pnp_t* const p_reg OPTIONAL ) +{ + al_pnp_ioc_event_t event; + ib_pnp_iou_rec_t *p_rec, *p_user_rec; + + AL_ENTER( AL_DBG_PNP ); + + event.rec_size = sizeof(ib_pnp_iou_rec_t); + event.rec_size = ROUNDUP( event.rec_size, sizeof(void*) ); + + p_rec = cl_zalloc( event.rec_size * 2 ); + if( !p_rec ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to allocate user record.\n") ); + return; + } + p_rec->pnp_rec.pnp_event = IB_PNP_IOU_ADD; + p_rec->pnp_rec.guid = p_iou->guid; + p_rec->pnp_rec.ca_guid = p_iou->ca_guid; + + p_rec->ca_guid = p_iou->ca_guid; + p_rec->guid = p_iou->guid; + p_rec->chassis_guid = p_iou->chassis_guid; + p_rec->vend_id = p_iou->vend_id; + p_rec->dev_id = p_iou->dev_id; + p_rec->revision = p_iou->revision; + cl_memcpy( p_rec->desc, p_iou->desc, sizeof(p_rec->desc) ); + p_user_rec = (ib_pnp_iou_rec_t*)(((uint8_t*)p_rec) + event.rec_size); + + event.p_rec = (ib_pnp_rec_t*)p_rec; + event.p_user_rec = (ib_pnp_rec_t*)p_user_rec; + + if( p_reg ) + { + if( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU ) + __notify_users( &p_reg->list_item, &event ); + else + __add_iocs( p_iou, &p_iou->ioc_map, p_reg ); + } + else + { + /* Report the IOU to all clients registered for IOU events. */ + cl_qlist_find_from_head( &gp_ioc_pnp->iou_reg_list, + __notify_users, &event ); + + /* Report IOCs - this will in turn report the paths. */ + __add_iocs( p_iou, &p_iou->ioc_map, NULL ); + } + + cl_free( p_rec ); + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__report_iou_remove( + IN iou_node_t* const p_iou ) +{ + al_pnp_ioc_event_t event; + ib_pnp_iou_rec_t rec, user_rec; + + AL_ENTER( AL_DBG_PNP ); + + /* Report IOCs - this will in turn report the paths. */ + __remove_iocs( p_iou, &p_iou->ioc_map ); + + cl_memclr( &rec, sizeof(ib_pnp_iou_rec_t) ); + rec.pnp_rec.pnp_event = IB_PNP_IOU_REMOVE; + rec.pnp_rec.guid = p_iou->guid; + rec.pnp_rec.ca_guid = p_iou->ca_guid; + + event.rec_size = sizeof(ib_pnp_iou_rec_t); + event.p_rec = (ib_pnp_rec_t*)&rec; + event.p_user_rec = (ib_pnp_rec_t*)&user_rec; + + /* + * Report the IOU to all clients registered for IOU events in + * reverse order than ADD notifications. + */ + cl_qlist_find_from_tail( &gp_ioc_pnp->iou_reg_list, + __notify_users, &event ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__report_ioc_add( + IN iou_node_t* const p_iou, + IN iou_ioc_t* const p_ioc, + IN al_pnp_t* const p_reg OPTIONAL ) +{ + al_pnp_ioc_event_t event; + ib_pnp_ioc_rec_t *p_rec; + + AL_ENTER( AL_DBG_PNP ); + + event.rec_size = sizeof(ib_pnp_ioc_rec_t) + + (sizeof(ib_svc_entry_t) * (p_ioc->profile.num_svc_entries - 1)); + event.rec_size = ROUNDUP( event.rec_size, sizeof(void*) ); + + /* + * The layout of the pnp record is as follows: + * ib_pnp_rec_t + * ib_svc_entry_t + * ib_ioc_info_t + * + * This is needed to keep the service entries contiguous to the first + * entry in the pnp record. + */ + p_rec = (ib_pnp_ioc_rec_t*)cl_zalloc( event.rec_size * 2 ); + if( !p_rec ) + return; + + p_rec->pnp_rec.pnp_event = IB_PNP_IOC_ADD; + p_rec->pnp_rec.guid = p_ioc->profile.ioc_guid; + p_rec->pnp_rec.ca_guid = p_ioc->p_iou->ca_guid; + + p_rec->ca_guid = p_ioc->p_iou->ca_guid; + cl_memcpy( p_rec->svc_entry_array, p_ioc->p_svc_entries, + p_ioc->profile.num_svc_entries * sizeof(ib_svc_entry_t) ); + p_rec->info.chassis_guid = p_iou->chassis_guid; + p_rec->info.chassis_slot = p_iou->slot; + p_rec->info.iou_guid = p_iou->guid; + p_rec->info.iou_slot = p_ioc->slot; + p_rec->info.profile = p_ioc->profile; + + event.p_rec = (ib_pnp_rec_t*)p_rec; + event.p_user_rec = (ib_pnp_rec_t*)(((uint8_t*)p_rec) + event.rec_size); + + if( p_reg ) + { + __notify_users( &p_reg->list_item, &event ); + } + else + { + /* Report the IOC to all clients registered for IOC events. */ + cl_qlist_find_from_head( &gp_ioc_pnp->ioc_reg_list, + __notify_users, &event ); + } + cl_free( p_rec ); + + /* Report the paths for this IOC only. */ + __add_ioc_paths( p_ioc, &p_iou->path_map, p_reg ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__report_ioc_remove( + IN iou_node_t* const p_iou, + IN iou_ioc_t* const p_ioc ) +{ + al_pnp_ioc_event_t event; + ib_pnp_ioc_rec_t rec, user_rec; + + AL_ENTER( AL_DBG_PNP ); + + UNUSED_PARAM( p_iou ); + + cl_memclr( &rec, sizeof(ib_pnp_ioc_rec_t) ); + rec.pnp_rec.pnp_event = IB_PNP_IOC_REMOVE; + rec.pnp_rec.guid = p_ioc->profile.ioc_guid; + rec.pnp_rec.ca_guid = p_ioc->p_iou->ca_guid; + + event.rec_size = sizeof(ib_pnp_ioc_rec_t); + event.p_rec = (ib_pnp_rec_t*)&rec; + event.p_user_rec = (ib_pnp_rec_t*)&user_rec; + + /* + * Report the IOC removal to all clients registered for IOC events in + * reverse order than ADD notifications. + */ + cl_qlist_find_from_tail( &gp_ioc_pnp->ioc_reg_list, + __notify_users, &event ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__report_path( + IN iou_ioc_t* const p_ioc, + IN iou_path_t* const p_path, + IN ib_pnp_event_t pnp_event, + IN al_pnp_t* const p_reg OPTIONAL ) +{ + al_pnp_ioc_event_t event; + ib_pnp_ioc_path_rec_t *p_rec; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( pnp_event == IB_PNP_IOC_PATH_ADD || + pnp_event == IB_PNP_IOC_PATH_REMOVE ); + + event.rec_size = sizeof(ib_pnp_ioc_path_rec_t); + event.rec_size = ROUNDUP( event.rec_size, sizeof(void*) ); + + /* + * The layout of the pnp record is as follows: + * ib_pnp_rec_t + * ib_svc_entry_t + * ib_ioc_info_t + * + * This is needed to keep the service entries contiguous to the first + * entry in the pnp record. + */ + p_rec = (ib_pnp_ioc_path_rec_t*)cl_zalloc( event.rec_size * 2 ); + if( !p_rec ) + return; + p_rec->pnp_rec.pnp_event = pnp_event; + p_rec->pnp_rec.guid = p_ioc->profile.ioc_guid; + p_rec->pnp_rec.ca_guid = p_path->ca_guid; + + p_rec->ca_guid = p_path->ca_guid; + p_rec->port_guid = p_path->port_guid; + p_rec->path = p_path->rec; + + event.p_rec = (ib_pnp_rec_t*)p_rec; + event.p_user_rec = (ib_pnp_rec_t*)(((uint8_t*)p_rec) + event.rec_size); + + /* Report the IOC to all clients registered for IOC events. */ + if( p_reg ) + { + __notify_users( &p_reg->list_item, &event ); + } + else + { + if( pnp_event == IB_PNP_IOC_PATH_ADD ) + { + cl_qlist_find_from_head( &gp_ioc_pnp->ioc_reg_list, + __notify_users, &event ); + } + else + { + cl_qlist_find_from_tail( &gp_ioc_pnp->ioc_reg_list, + __notify_users, &event ); + } + } + + cl_free( p_rec ); + + AL_EXIT( AL_DBG_PNP ); +} + + +void +ioc_pnp_process_reg( + IN cl_async_proc_item_t *p_item ) +{ + al_pnp_t *p_reg; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_item, al_pnp_t, async_item ); + + /* Add the registrant to the list. */ + switch( pnp_get_class( p_reg->pnp_class ) ) + { + case IB_PNP_IOU: + cl_qlist_insert_tail( &gp_ioc_pnp->iou_reg_list, &p_reg->list_item ); + break; + + case IB_PNP_IOC: + cl_qlist_insert_tail( &gp_ioc_pnp->ioc_reg_list, &p_reg->list_item ); + break; + + default: + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU || + pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC ); + } + + /* Generate all relevant events for the registration. */ + __add_ious( &gp_ioc_pnp->iou_map, &gp_ioc_pnp->iou_map, p_reg ); + + /* Notify the user that the registration is complete. */ + pnp_reg_complete( p_reg ); + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_reg->obj ); + + AL_EXIT( AL_DBG_PNP ); +} + + +void +ioc_pnp_process_dereg( + IN cl_async_proc_item_t *p_item ) +{ + al_pnp_t *p_reg; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_item, al_pnp_t, dereg_item ); + + /* Remove the registration information from the list. */ + switch( pnp_get_class( p_reg->pnp_class ) ) + { + case IB_PNP_IOU: + cl_qlist_remove_item( &gp_ioc_pnp->iou_reg_list, &p_reg->list_item ); + break; + + case IB_PNP_IOC: + cl_qlist_remove_item( &gp_ioc_pnp->ioc_reg_list, &p_reg->list_item ); + break; + + default: + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOU || + pnp_get_class( p_reg->pnp_class ) == IB_PNP_IOC ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid PnP registartion type.\n") ); + } + + /* Release the reference we took for processing the deregistration. */ + deref_al_obj( &p_reg->obj ); + + AL_EXIT( AL_DBG_PNP ); +} + + + + + + diff --git a/branches/Ndi/core/al/kernel/al_mad_pool.c b/branches/Ndi/core/al/kernel/al_mad_pool.c new file mode 100644 index 00000000..036cfa5f --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_mad_pool.c @@ -0,0 +1,961 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "al.h" +#include "al_ci_ca.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_mad_pool.tmh" +#endif + +#include "al_mad_pool.h" +#include "al_pd.h" +#include "al_verbs.h" +#include "ib_common.h" + + +typedef struct _mad_send +{ + al_mad_send_t mad_send; + ib_pool_handle_t h_pool; + +} mad_send_t; + + + + +typedef struct _mad_rmpp +{ + al_mad_rmpp_t mad_rmpp; + ib_pool_handle_t h_pool; + +} mad_rmpp_t; + + + +/* + * Function prototypes. + */ +static void +__destroying_pool( + IN al_obj_t* p_obj ); + +static void +__free_pool( + IN al_obj_t* p_obj ); + +static void +__destroying_pool_key( + IN al_obj_t* p_obj ); + +static void +__cleanup_pool_key( + IN al_obj_t* p_obj ); + +static void +__free_pool_key( + IN al_obj_t* p_obj ); + +static cl_status_t +__mad_send_init( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); + +static cl_status_t +__mad_rmpp_init( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); + + + +/* + * Create a MAD pool. + */ +ib_api_status_t +ib_create_mad_pool( + IN const ib_al_handle_t h_al, + IN const size_t min, + IN const size_t max, + IN const size_t grow_size, + OUT ib_pool_handle_t* const ph_pool ) +{ + ib_pool_handle_t h_pool; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD_POOL ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !ph_pool ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Validate the min and max parameters. */ + if( (min > 0) && (max > 0) && (min > max) ) + return IB_INVALID_SETTING; + + h_pool = cl_zalloc( sizeof( al_pool_t ) ); + if( !h_pool ) + return IB_INSUFFICIENT_MEMORY; + + /* Initialize the pool lists. */ + cl_qlist_init( &h_pool->key_list ); + ExInitializeNPagedLookasideList( &h_pool->mad_stack, NULL, NULL, + 0, sizeof(mad_item_t), 'dmla', 0 ); + ExInitializeNPagedLookasideList( &h_pool->mad_send_pool, NULL, NULL, + 0, sizeof(mad_send_t), 'dmla', 0 ); + ExInitializeNPagedLookasideList( &h_pool->mad_rmpp_pool, NULL, NULL, + 0, sizeof(mad_rmpp_t), 'dmla', 0 ); + + /* Initialize the pool object. */ + construct_al_obj( &h_pool->obj, AL_OBJ_TYPE_H_MAD_POOL ); + status = init_al_obj( &h_pool->obj, h_pool, TRUE, + __destroying_pool, NULL, __free_pool ); + if( status != IB_SUCCESS ) + { + __free_pool( &h_pool->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Attach the pool to the AL object. */ + status = attach_al_obj( &h_al->obj, &h_pool->obj ); + if( status != IB_SUCCESS ) + { + h_pool->obj.pfn_destroy( &h_pool->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Save the pool parameters. Set grow_size to min for initialization. */ + h_pool->max = max; + h_pool->grow_size = min; + + /* Save the grow_size for subsequent allocations. */ + h_pool->grow_size = grow_size; + + /* Return the pool handle. */ + *ph_pool = h_pool; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_pool->obj ); + + AL_EXIT( AL_DBG_MAD_POOL ); + return IB_SUCCESS; +} + + + +/* + * Pre-destory the pool. + */ +static void +__destroying_pool( + IN al_obj_t* p_obj ) +{ + ib_pool_handle_t h_pool; + ib_al_handle_t h_al; + + AL_ENTER( AL_DBG_MAD_POOL ); + + CL_ASSERT( p_obj ); + h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj ); + + /* Get the AL instance of this MAD pool. */ + p_obj = h_pool->obj.p_parent_obj; + h_al = PARENT_STRUCT( p_obj, ib_al_t, obj ); + + /* Deregister this MAD pool from all protection domains. */ + al_dereg_pool( h_al, h_pool ); + + AL_EXIT( AL_DBG_MAD_POOL ); +} + + + +/* + * Free the pool. + */ +static void +__free_pool( + IN al_obj_t* p_obj ) +{ + ib_pool_handle_t h_pool; + + CL_ASSERT( p_obj ); + h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj ); + + ExDeleteNPagedLookasideList( &h_pool->mad_send_pool ); + ExDeleteNPagedLookasideList( &h_pool->mad_rmpp_pool ); + ExDeleteNPagedLookasideList( &h_pool->mad_stack ); + destroy_al_obj( &h_pool->obj ); + cl_free( h_pool ); +} + + + +/* + * Destory a MAD pool. + */ +ib_api_status_t +ib_destroy_mad_pool( + IN const ib_pool_handle_t h_pool ) +{ + cl_list_item_t* p_array_item; + al_obj_t* p_obj; + boolean_t busy; + + AL_ENTER( AL_DBG_MAD_POOL ); + + if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + /* Verify that all send handles and MAD elements are in pool. */ + cl_spinlock_acquire( &h_pool->obj.lock ); + busy = ( h_pool->obj.ref_cnt > 1 ); + for( p_array_item = cl_qlist_head( &h_pool->obj.obj_list ); + p_array_item != cl_qlist_end( &h_pool->obj.obj_list ) && !busy; + p_array_item = cl_qlist_next( p_array_item ) ) + { + p_obj = PARENT_STRUCT( p_array_item, al_obj_t, pool_item ); + busy = ( p_obj->ref_cnt > 1 ); + } + cl_spinlock_release( &h_pool->obj.lock ); + + /* Return an error if the pool is busy. */ + if( busy ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("h_pool (0x%016I64x) is busy!.\n", (LONG64)h_pool) ); + return IB_RESOURCE_BUSY; + } + + ref_al_obj( &h_pool->obj ); + h_pool->obj.pfn_destroy( &h_pool->obj, NULL ); + + AL_EXIT( AL_DBG_MAD_POOL ); + return IB_SUCCESS; +} + + + +/* + * Register a MAD pool with a protection domain. + */ +ib_api_status_t +ib_reg_mad_pool( + IN const ib_pool_handle_t h_pool, + IN const ib_pd_handle_t h_pd, + OUT ib_pool_key_t* const pp_pool_key ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD_POOL ); + + if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + /* Alias keys require an alias PD. */ + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + status = reg_mad_pool( h_pool, h_pd, pp_pool_key ); + /* Release the reference taken in init_al_obj. */ + if( status == IB_SUCCESS ) + deref_al_obj( &(*pp_pool_key)->obj ); + + AL_EXIT( AL_DBG_MAD_POOL ); + return status; +} + + +ib_api_status_t +reg_mad_pool( + IN const ib_pool_handle_t h_pool, + IN const ib_pd_handle_t h_pd, + OUT ib_pool_key_t* const pp_pool_key ) +{ + al_pool_key_t* p_pool_key; + ib_al_handle_t h_al; + ib_api_status_t status; + al_key_type_t key_type; + + AL_ENTER( AL_DBG_MAD_POOL ); + + if( !pp_pool_key ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Set the type of key to create. */ + if( h_pd->type != IB_PDT_ALIAS ) + key_type = AL_KEY_NORMAL; + else + key_type = AL_KEY_ALIAS; + + /* Allocate a pool key structure. */ + p_pool_key = cl_zalloc( sizeof( al_pool_key_t ) ); + if( !p_pool_key ) + return IB_INSUFFICIENT_MEMORY; + + /* Initialize the pool key. */ + construct_al_obj( &p_pool_key->obj, AL_OBJ_TYPE_H_POOL_KEY ); + p_pool_key->type = key_type; + p_pool_key->h_pool = h_pool; + + /* Initialize the pool key object. */ + status = init_al_obj( &p_pool_key->obj, p_pool_key, TRUE, + __destroying_pool_key, __cleanup_pool_key, __free_pool_key ); + if( status != IB_SUCCESS ) + { + __free_pool_key( &p_pool_key->obj ); + + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Register the pool on the protection domain. */ + if( key_type == AL_KEY_NORMAL ) + { + ib_phys_create_t phys_create; + ib_phys_range_t phys_range; + uint64_t vaddr; + net32_t rkey; + + /* Register all of physical memory. */ + phys_create.length = 0xFFFFFFFFFFFFFFFF; + phys_create.num_ranges = 1; + phys_create.range_array = &phys_range; + phys_create.buf_offset = 0; + phys_create.hca_page_size = PAGE_SIZE; + phys_create.access_ctrl = IB_AC_LOCAL_WRITE; + phys_range.base_addr = 0; + phys_range.size = 0xFFFFFFFFFFFFFFFF; + vaddr = 0; + status = ib_reg_phys( h_pd, &phys_create, &vaddr, + &p_pool_key->lkey, &rkey, &p_pool_key->h_mr ); + if( status != IB_SUCCESS ) + { + p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_phys returned %s\n", ib_get_err_str( status )) ); + return status; + } + + /* Chain the pool key onto the pool. */ + cl_spinlock_acquire( &h_pool->obj.lock ); + cl_qlist_insert_tail( &h_pool->key_list, &p_pool_key->pool_item ); + cl_spinlock_release( &h_pool->obj.lock ); + } + + /* + * Attach to the pool after we register the memory so that PD destruction + * will cleanup the pool key before its memory region. + */ + status = attach_al_obj( &h_pd->obj, &p_pool_key->obj ); + if( status != IB_SUCCESS ) + { + p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL ); + + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s\n", ib_get_err_str(status)) ); + return status; + } + + /* From the PD, get the AL handle of the pool_key. */ + h_al = h_pd->obj.h_al; + + /* Add this pool_key to the AL instance. */ + al_insert_key( h_al, p_pool_key ); + + ref_al_obj( &h_pool->obj ); + + /* + * Take a reference on the global pool_key for this CA, if it exists. + * Note that the pool_key does not exist for the global MAD pool in + * user-mode, as that MAD pool never registers memory on a PD. + */ + /* TODO: Is the pool_key check here needed since this is a kernel-only implementation? */ + if( key_type == AL_KEY_ALIAS && h_pd->obj.p_ci_ca->pool_key ) + { + ref_al_obj( &h_pd->obj.p_ci_ca->pool_key->obj ); + p_pool_key->pool_key = h_pd->obj.p_ci_ca->pool_key; + } + + /* Return the pool key. */ + *pp_pool_key = (ib_pool_key_t)p_pool_key; + + AL_EXIT( AL_DBG_MAD_POOL ); + return IB_SUCCESS; +} + + +/* + * The destroying callback releases the memory registration. This is needed + * to maintain the destroy semantics, where the pool key's destruction is + * async, but the MAD registrations are sync. This means that all memory + * registered on a pool key is deregistered before the pool key leaves the + * destroy call. + */ +static void +__destroying_pool_key( + IN al_obj_t* p_obj ) +{ + al_pool_key_t* p_pool_key; + + CL_ASSERT( p_obj ); + p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj ); + + /* Remove this pool_key from the AL instance. */ + al_remove_key( p_pool_key ); + + p_pool_key->lkey = 0; +} + + +/* + * Release all references on objects that were needed by the pool key. + */ +static void +__cleanup_pool_key( + IN al_obj_t* p_obj ) +{ + cl_list_item_t *p_list_item, *p_next_item; + ib_mad_element_t *p_mad_element_list, *p_last_mad_element; + al_mad_element_t *p_mad; + ib_api_status_t status; + al_pool_key_t* p_pool_key; + + CL_ASSERT( p_obj ); + p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj ); + + CL_ASSERT( !p_pool_key->mad_cnt ); + + if( p_pool_key->h_mr ) + ib_dereg_mr( p_pool_key->h_mr ); + + /* Search for any outstanding MADs associated with the given pool key. */ + if( p_pool_key->mad_cnt ) + { + p_mad_element_list = p_last_mad_element = NULL; + + cl_spinlock_acquire( &p_pool_key->obj.h_al->mad_lock ); + for( p_list_item = cl_qlist_head( &p_pool_key->obj.h_al->mad_list ); + p_list_item != cl_qlist_end( &p_pool_key->obj.h_al->mad_list ); + p_list_item = p_next_item ) + { + p_next_item = cl_qlist_next( p_list_item ); + p_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, al_item ); + + if( p_mad->pool_key != p_pool_key ) continue; + + /* Build the list of MADs to be returned to pool. */ + if( p_last_mad_element ) + p_last_mad_element->p_next = &p_mad->element; + else + p_mad_element_list = &p_mad->element; + + p_last_mad_element = &p_mad->element; + p_last_mad_element->p_next = NULL; + } + cl_spinlock_release( &p_pool_key->obj.h_al->mad_lock ); + + /* Return any outstanding MADs to the pool. */ + if( p_mad_element_list ) + { + status = ib_put_mad( p_mad_element_list ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_put_mad failed with status %s, continuing.\n", + ib_get_err_str(status)) ); + } + } + } + + /* + * Remove the pool key from the pool to prevent further registrations + * against this pool. + */ + if( p_pool_key->type == AL_KEY_NORMAL ) + { + cl_spinlock_acquire( &p_pool_key->h_pool->obj.lock ); + cl_qlist_remove_item( &p_pool_key->h_pool->key_list, + &p_pool_key->pool_item ); + cl_spinlock_release( &p_pool_key->h_pool->obj.lock ); + } + + deref_al_obj( &p_pool_key->h_pool->obj ); + p_pool_key->h_pool = NULL; + if( p_pool_key->pool_key ) + deref_al_obj( &p_pool_key->pool_key->obj ); +} + + + +/* + * Free a pool key. + */ +static void +__free_pool_key( + IN al_obj_t* p_obj ) +{ + al_pool_key_t* p_pool_key; + + CL_ASSERT( p_obj ); + p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj ); + + destroy_al_obj( &p_pool_key->obj ); + cl_free( p_pool_key ); +} + + +/* + * Deregister a MAD pool from a protection domain. Only normal pool_keys + * can be destroyed using this routine. + */ +ib_api_status_t +ib_dereg_mad_pool( + IN const ib_pool_key_t pool_key ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD_POOL ); + + if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + ref_al_obj( &pool_key->obj ); + status = dereg_mad_pool( pool_key, AL_KEY_NORMAL ); + + if( status != IB_SUCCESS ) + deref_al_obj( &pool_key->obj ); + + AL_EXIT( AL_DBG_MAD_POOL ); + return status; +} + + + +/* + * Deregister a MAD pool from a protection domain. + */ +ib_api_status_t +dereg_mad_pool( + IN const ib_pool_key_t pool_key, + IN const al_key_type_t expected_type ) +{ + AL_ENTER( AL_DBG_MAD_POOL ); + + if( pool_key->type != expected_type ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Check mad_cnt to see if MADs are still outstanding. */ + //if( pool_key->mad_cnt ) + //{ + // AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_MAD_POOL, ("IB_RESOURCE_BUSY\n") ); + // return IB_RESOURCE_BUSY; + //} + + pool_key->obj.pfn_destroy( &pool_key->obj, NULL ); + + AL_EXIT( AL_DBG_MAD_POOL ); + return IB_SUCCESS; +} + + + +/* + * Obtain a MAD element from the pool. + */ +static ib_api_status_t +__get_mad_element( + IN const ib_pool_key_t pool_key, + OUT al_mad_element_t** pp_mad_element ) +{ + mad_item_t* p_mad_item; + net32_t lkey; + + AL_ENTER( AL_DBG_MAD_POOL ); + + CL_ASSERT( pool_key ); + CL_ASSERT( pp_mad_element ); + + /* Obtain a MAD item from the stack. */ + p_mad_item = (mad_item_t*)ExAllocateFromNPagedLookasideList( + &pool_key->h_pool->mad_stack ); + if( !p_mad_item ) + return IB_INSUFFICIENT_RESOURCES; + + p_mad_item->pool_key = pool_key; + + if( pool_key->type == AL_KEY_NORMAL ) + lkey = pool_key->lkey; + else + lkey = pool_key->pool_key->lkey; + + CL_ASSERT( ADDRESS_AND_SIZE_TO_SPAN_PAGES( + p_mad_item->al_mad_element.mad_buf, MAD_BLOCK_GRH_SIZE ) == 1 ); + + /* Clear the element. */ + cl_memclr( &p_mad_item->al_mad_element, sizeof(al_mad_element_t) ); + + /* Initialize the receive data segment information. */ + p_mad_item->al_mad_element.grh_ds.vaddr = + cl_get_physaddr( p_mad_item->al_mad_element.mad_buf ); + p_mad_item->al_mad_element.grh_ds.length = MAD_BLOCK_GRH_SIZE; + p_mad_item->al_mad_element.grh_ds.lkey = lkey; + + /* Initialize the send data segment information. */ + p_mad_item->al_mad_element.mad_ds.vaddr = + p_mad_item->al_mad_element.grh_ds.vaddr + sizeof(ib_grh_t); + p_mad_item->al_mad_element.mad_ds.length = MAD_BLOCK_SIZE; + p_mad_item->al_mad_element.mad_ds.lkey = lkey; + + /* Initialize grh */ + p_mad_item->al_mad_element.element.p_grh = + (ib_grh_t*)p_mad_item->al_mad_element.mad_buf; + + /* Hold a reference on the pool key while a MAD element is removed. */ + ref_al_obj( &pool_key->obj ); + cl_atomic_inc( &pool_key->mad_cnt ); + + p_mad_item->al_mad_element.pool_key = (ib_pool_key_t)pool_key; + /* Return the MAD element. */ + *pp_mad_element = &p_mad_item->al_mad_element; + + AL_EXIT( AL_DBG_MAD_POOL ); + return IB_SUCCESS; +} + + + +/* + * Return a MAD element to the pool. + */ +static void +__put_mad_element( + IN al_mad_element_t* p_mad_element ) +{ + mad_item_t* p_mad_item; + ib_pool_key_t pool_key; + + CL_ASSERT( p_mad_element ); + p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element ); + pool_key = p_mad_item->pool_key; + CL_ASSERT( pool_key ); + CL_ASSERT( pool_key->h_pool ); + + /* Clear the MAD buffer. */ + cl_memclr( p_mad_element->mad_buf, MAD_BLOCK_GRH_SIZE ); + p_mad_element->element.p_next = NULL; + + /* Return the MAD element to the pool. */ + ExFreeToNPagedLookasideList( &pool_key->h_pool->mad_stack, p_mad_item ); + + cl_atomic_dec( &pool_key->mad_cnt ); + deref_al_obj( &pool_key->obj ); +} + + + +ib_mad_send_handle_t +get_mad_send( + IN const al_mad_element_t *p_mad_element ) +{ + mad_item_t* p_mad_item; + mad_send_t *p_mad_send; + + CL_ASSERT( p_mad_element ); + + /* Get a handle to the pool. */ + p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element ); + CL_ASSERT( p_mad_item->pool_key ); + CL_ASSERT( p_mad_item->pool_key->h_pool ); + + p_mad_send = ExAllocateFromNPagedLookasideList( + &p_mad_item->pool_key->h_pool->mad_send_pool ); + if( !p_mad_send ) + return NULL; + + p_mad_send->mad_send.canceled = FALSE; + p_mad_send->mad_send.p_send_mad = NULL; + p_mad_send->mad_send.p_resp_mad = NULL; + p_mad_send->mad_send.h_av = NULL; + p_mad_send->mad_send.retry_cnt = 0; + p_mad_send->mad_send.retry_time = 0; + p_mad_send->mad_send.delay = 0; + p_mad_send->h_pool = p_mad_item->pool_key->h_pool; + + ref_al_obj( &p_mad_item->pool_key->h_pool->obj ); + return &p_mad_send->mad_send; +} + + + +void +put_mad_send( + IN ib_mad_send_handle_t h_mad_send ) +{ + mad_send_t *p_mad_send; + ib_pool_handle_t h_pool; + + p_mad_send = PARENT_STRUCT( h_mad_send, mad_send_t, mad_send ); + h_pool = p_mad_send->h_pool; + + ExFreeToNPagedLookasideList( &h_pool->mad_send_pool, p_mad_send ); + deref_al_obj( &h_pool->obj ); +} + + + +al_mad_rmpp_t* +get_mad_rmpp( + IN const al_mad_element_t *p_mad_element ) +{ + mad_item_t *p_mad_item; + mad_rmpp_t *p_mad_rmpp; + + CL_ASSERT( p_mad_element ); + + /* Get a handle to the pool. */ + p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element ); + CL_ASSERT( p_mad_item->pool_key ); + CL_ASSERT( p_mad_item->pool_key->h_pool ); + + p_mad_rmpp = ExAllocateFromNPagedLookasideList( + &p_mad_item->pool_key->h_pool->mad_rmpp_pool ); + if( !p_mad_rmpp ) + return NULL; + + p_mad_rmpp->h_pool = p_mad_item->pool_key->h_pool; + + ref_al_obj( &p_mad_item->pool_key->h_pool->obj ); + return &p_mad_rmpp->mad_rmpp; +} + + + +void +put_mad_rmpp( + IN al_mad_rmpp_t* h_mad_rmpp ) +{ + mad_rmpp_t *p_mad_rmpp; + ib_pool_handle_t h_pool; + + p_mad_rmpp = PARENT_STRUCT( h_mad_rmpp, mad_rmpp_t, mad_rmpp ); + + h_pool = p_mad_rmpp->h_pool; + + ExFreeToNPagedLookasideList( &h_pool->mad_rmpp_pool, p_mad_rmpp ); + deref_al_obj( &h_pool->obj ); +} + + + +ib_api_status_t +ib_get_mad( + IN const ib_pool_key_t pool_key, + IN const size_t buf_size, + OUT ib_mad_element_t **pp_mad_element ) +{ + al_mad_element_t* p_mad; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD_POOL ); + + if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + if( !buf_size || !pp_mad_element ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + status = __get_mad_element( pool_key, &p_mad ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_MAD_POOL ); + return status; + } + + /* Set the user accessible buffer. */ + if( buf_size <= MAD_BLOCK_SIZE ) + { + /* Use the send buffer for 256 byte MADs. */ + p_mad->element.p_mad_buf = (ib_mad_t*)(p_mad->mad_buf + sizeof(ib_grh_t)); + } + else if( buf_size >= 0xFFFFFFFF ) + { + __put_mad_element( p_mad ); + return IB_INVALID_SETTING; + } + else + { + /* Allocate a new buffer for the MAD. */ + p_mad->p_al_mad_buf = cl_zalloc( buf_size ); + if( !p_mad->p_al_mad_buf ) + { + __put_mad_element( p_mad ); + AL_EXIT( AL_DBG_MAD_POOL ); + return IB_INSUFFICIENT_MEMORY; + } + p_mad->element.p_mad_buf = p_mad->p_al_mad_buf; + } + p_mad->element.size = (uint32_t)buf_size; + + /* Track the MAD element with the requesting AL instance. */ + al_insert_mad( pool_key->h_al, p_mad ); + + /* Return the MAD element to the client. */ + *pp_mad_element = &p_mad->element; + + AL_EXIT( AL_DBG_MAD_POOL ); + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_put_mad( + IN const ib_mad_element_t* p_mad_element_list ) +{ + al_mad_element_t* p_mad; + + if( !p_mad_element_list ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + while( p_mad_element_list ) + { + p_mad = PARENT_STRUCT( p_mad_element_list, al_mad_element_t, element ); + p_mad_element_list = p_mad_element_list->p_next; + + /* Deallocate any buffers allocated for the user. */ + if( p_mad->p_al_mad_buf ) + { + cl_free( p_mad->p_al_mad_buf ); + p_mad->p_al_mad_buf = NULL; + } + + /* See if the MAD has already been returned to the MAD pool. */ + CL_ASSERT( p_mad->h_al ); + + /* Remove the MAD element from the owning AL instance. */ + al_remove_mad( p_mad ); + + /* Return the MAD element to the pool. */ + __put_mad_element( p_mad ); + } + + return IB_SUCCESS; +} + + + +/* + * Resize the data buffer associated with a MAD element. + */ +ib_api_status_t +al_resize_mad( + OUT ib_mad_element_t *p_mad_element, + IN const size_t buf_size ) +{ + al_mad_element_t *p_al_element; + ib_mad_t *p_new_buf; + + CL_ASSERT( p_mad_element ); + + /* We only support growing the buffer for now. */ + CL_ASSERT( buf_size > p_mad_element->size ); + + /* Cap the size. */ + if( buf_size >= 0xFFFFFFFF ) + return IB_INVALID_SETTING; + + p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element ); + + /* Allocate a new buffer. */ + p_new_buf = cl_malloc( buf_size ); + if( !p_new_buf ) + return IB_INSUFFICIENT_MEMORY; + + /* Copy the existing buffer's data into the new buffer. */ + cl_memcpy( p_new_buf, p_mad_element->p_mad_buf, p_mad_element->size ); + cl_memclr( (uint8_t*)p_new_buf + p_mad_element->size, + buf_size - p_mad_element->size ); + + /* Update the MAD element to use the new buffer. */ + p_mad_element->p_mad_buf = p_new_buf; + p_mad_element->size = (uint32_t)buf_size; + + /* Free any old buffer. */ + if( p_al_element->p_al_mad_buf ) + cl_free( p_al_element->p_al_mad_buf ); + p_al_element->p_al_mad_buf = p_new_buf; + + return IB_SUCCESS; +} + diff --git a/branches/Ndi/core/al/kernel/al_mgr.c b/branches/Ndi/core/al/kernel/al_mgr.c new file mode 100644 index 00000000..6babc91e --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_mgr.c @@ -0,0 +1,622 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include "al.h" +#include "al_cm_cep.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_mgr.tmh" +#endif + +#include "al_dm.h" +#include "al_mad_pool.h" +#include "al_mcast.h" +#include "al_mgr.h" +#include "al_pnp.h" +#include "al_ioc_pnp.h" +#include "al_query.h" +#include "al_res_mgr.h" +#include "al_smi.h" +#include "ib_common.h" + +#ifndef CL_KERNEL +#include "ual_mgr.h" +#endif + + +#define AL_HDL_VECTOR_MIN 64 +#define AL_HDL_VECTOR_GROW 64 + + +static void +__free_al_mgr( + IN al_obj_t *p_obj ); + +void +free_al( + IN al_obj_t *p_obj ); + + + +ib_api_status_t +create_al_mgr() +{ + cl_status_t cl_status; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MGR ); + + CL_ASSERT( !gp_al_mgr ); + + gp_al_mgr = cl_zalloc( sizeof( al_mgr_t ) ); + if( !gp_al_mgr ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("cl_zalloc failed.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the AL manager components. */ + cl_qlist_init( &gp_al_mgr->ci_ca_list ); + cl_qlist_init( &gp_al_mgr->al_obj_list ); + cl_spinlock_construct( &gp_al_mgr->lock ); + + /* Initialize the AL management components. */ + construct_al_obj( &gp_al_mgr->obj, AL_OBJ_TYPE_AL_MGR ); + status = init_al_obj( &gp_al_mgr->obj, gp_al_mgr, FALSE, + NULL, NULL, __free_al_mgr ); + if( status != IB_SUCCESS ) + { + __free_al_mgr( &gp_al_mgr->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj failed, status = 0x%x.\n", status) ); + return status; + } + + cl_status = cl_spinlock_init( &gp_al_mgr->lock ); + if( cl_status != CL_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_spinlock_init failed\n") ); + return ib_convert_cl_status( cl_status ); + } + + /* We should be able to open AL now. */ + status = ib_open_al( &gh_al ); + if( status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_open_al failed, status = 0x%x.\n", status) ); + return status; + } + + /* + * Initialize the AL management services. + * Create the PnP manager first - the other services depend on PnP. + */ + status = create_pnp( &gp_al_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_pnp_create failed with %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Create the global AL MAD pool. */ + status = ib_create_mad_pool( gh_al, 0, 0, 64, &gh_mad_pool ); + if( status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_create_mad_pool failed with %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Initialize the AL resource manager. */ + status = create_res_mgr( &gp_al_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("create_res_mgr failed with %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Initialize the AL special QP manager. */ + status = create_spl_qp_mgr( &gp_al_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("create_spl_qp_mgr failed with %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Initialize the AL SA request manager. */ + status = create_sa_req_mgr( &gp_al_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("create_sa_req_mgr failed, status = 0x%x.\n", status) ); + return status; + } + + /* Initialize CM */ + status = create_cep_mgr( &gp_al_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("create_cm_mgr failed, status = 0x%x.\n", status) ); + return status; + } + + /* Initialize the AL device management agent. */ + +/* + Disable support of DM agent. + + status = create_dm_agent( &gp_al_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("create_dm_agent failed, status = 0x%x.\n", status) ); + return status; + } +*/ + status = create_ioc_pnp( &gp_al_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("create_ioc_pnp failed, status = 0x%x.\n", status) ); + return status; + } + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &gp_al_mgr->obj ); + + AL_EXIT( AL_DBG_MGR ); + return IB_SUCCESS; +} + + + +static void +__free_al_mgr( + IN al_obj_t *p_obj ) +{ + CL_ASSERT( p_obj == &gp_al_mgr->obj ); + + /* + * We need to destroy the AL object before the spinlock, since + * destroying the AL object will try to acquire the spinlock. + */ + destroy_al_obj( p_obj ); + + /* Verify that the object list is empty. */ + print_al_objs( NULL ); + + cl_spinlock_destroy( &gp_al_mgr->lock ); + cl_free( gp_al_mgr ); + gp_al_mgr = NULL; +} + + + +/* + * Register a new CI CA with the access layer. + */ +ib_api_status_t +ib_register_ca( + IN const ci_interface_t* p_ci ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MGR ); + + if( !p_ci ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + CL_ASSERT( !find_ci_ca( p_ci->guid ) ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MGR, + ("CA guid %I64x.\n", p_ci->guid) ); + + /* Check the channel interface verbs version. */ + if( p_ci->version != VERBS_VERSION ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Unsupported channel interface version, " + "expected = 0x%x, actual = 0x%x.\n", + VERBS_VERSION, p_ci->version) ); + return IB_UNSUPPORTED; + } + + /* Construct and initialize the CA structure. */ + status = create_ci_ca( &gp_al_mgr->obj, p_ci ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("al_mgr_ca_init failed.\n") ); + return status; + } + + AL_EXIT( AL_DBG_MGR ); + return status; +} + + + +/* + * Process the removal of a CI CA from the system. + */ +ib_api_status_t +ib_deregister_ca( + IN const net64_t ca_guid ) +{ + al_ci_ca_t *p_ci_ca; + + AL_ENTER( AL_DBG_MGR ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_MGR, + ("Deregistering CA guid %I64x.\n", ca_guid) ); + + /* Locate the CA. */ + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + p_ci_ca = find_ci_ca( ca_guid ); + cl_spinlock_release( &gp_al_mgr->obj.lock ); + + if( !p_ci_ca ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("CA not found.\n") ); + return IB_NOT_FOUND; + } + + /* + * TODO: Before destroying, do a query PnP call and return IB_BUSY + * as needed. + */ + /* Destroy the CI CA. */ + ref_al_obj( &p_ci_ca->obj ); + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + + AL_EXIT( AL_DBG_MGR ); + return IB_SUCCESS; +} + + +/* + * Initialize a proxy entry used to map user-mode to kernel-mode resources. + */ +static cl_status_t +__init_hdl( + IN void* const p_element, + IN void* context ) +{ + al_handle_t *p_h; + + p_h = (al_handle_t*)p_element; + + /* Chain free entries one after another. */ + p_h->p_obj = (al_obj_t*)(uintn_t)++(((ib_al_handle_t)context)->free_hdl); + p_h->type = AL_OBJ_TYPE_UNKNOWN; + + return CL_SUCCESS; +} + + +/* + * Create a new instance of the access layer. This function is placed here + * to prevent sharing the implementation with user-mode. + */ +ib_api_status_t +ib_open_al( + OUT ib_al_handle_t* const ph_al ) +{ + ib_al_handle_t h_al; + ib_api_status_t status; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_MGR ); + + if( !ph_al ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Allocate an access layer instance. */ + h_al = cl_zalloc( sizeof( ib_al_t ) ); + if( !h_al ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("cl_zalloc failed\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the instance. */ + construct_al_obj( &h_al->obj, AL_OBJ_TYPE_H_AL ); + cl_spinlock_construct( &h_al->mad_lock ); + cl_qlist_init( &h_al->mad_list ); + cl_qlist_init( &h_al->key_list ); + cl_qlist_init( &h_al->query_list ); + cl_qlist_init( &h_al->cep_list ); + + cl_vector_construct( &h_al->hdl_vector ); + + cl_status = cl_spinlock_init( &h_al->mad_lock ); + if( cl_status != CL_SUCCESS ) + { + free_al( &h_al->obj ); + AL_EXIT( AL_DBG_MGR ); + return ib_convert_cl_status( cl_status ); + } + + /* Initialize the handle vector. */ + cl_status = cl_vector_init( &h_al->hdl_vector, AL_HDL_VECTOR_MIN, + AL_HDL_VECTOR_GROW, sizeof(al_handle_t), __init_hdl, NULL, h_al ); + if( cl_status != CL_SUCCESS ) + { + free_al( &h_al->obj ); + AL_EXIT( AL_DBG_MGR ); + return ib_convert_cl_status( cl_status ); + } + h_al->free_hdl = 1; + + /* Initialize the base object. */ + status = init_al_obj( &h_al->obj, NULL, FALSE, + destroying_al, NULL, free_al ); + if( status != IB_SUCCESS ) + { + free_al( &h_al->obj ); + AL_EXIT( AL_DBG_MGR ); + return status; + } + status = attach_al_obj( &gp_al_mgr->obj, &h_al->obj ); + if( status != IB_SUCCESS ) + { + h_al->obj.pfn_destroy( &h_al->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* + * Self reference the AL instance so that all attached objects + * insert themselve in the instance's handle manager automatically. + */ + h_al->obj.h_al = h_al; + + *ph_al = h_al; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_al->obj ); + + AL_EXIT( AL_DBG_MGR ); + return IB_SUCCESS; +} + + +uint64_t +al_hdl_insert( + IN const ib_al_handle_t h_al, + IN void* const p_obj, + IN const uint32_t type ) +{ + cl_status_t status; + size_t size; + uint64_t hdl; + al_handle_t *p_h; + + AL_ENTER( AL_DBG_HDL ); + + size = cl_vector_get_size( &h_al->hdl_vector ); + hdl = h_al->free_hdl; + if( h_al->free_hdl == size ) + { + /* Grow the vector pool. */ + status = + cl_vector_set_size( &h_al->hdl_vector, size + AL_HDL_VECTOR_GROW ); + if( status != CL_SUCCESS ) + { + AL_EXIT( AL_DBG_HDL ); + return AL_INVALID_HANDLE; + } + /* + * Return the the start of the free list since the + * entry initializer incremented it. + */ + h_al->free_hdl = size; + } + + /* Get the next free entry. */ + p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl ); + + /* Update the next entry index. */ + h_al->free_hdl = (size_t)p_h->p_obj; + + /* Update the entry. */ + p_h->type = type; + p_h->p_obj = (al_obj_t*)p_obj; + + return hdl; +} + + +void +al_hdl_free( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl ) +{ + al_handle_t *p_h; + + CL_ASSERT( hdl < cl_vector_get_size( &h_al->hdl_vector ) ); + + p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl ); + p_h->type = AL_OBJ_TYPE_UNKNOWN; + p_h->p_obj = (al_obj_t*)(uintn_t)h_al->free_hdl; + h_al->free_hdl = hdl; +} + + +al_obj_t* +al_hdl_ref( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl, + IN const uint32_t type ) +{ + al_handle_t *p_h; + al_obj_t *p_obj; + + CL_ASSERT( type != AL_OBJ_TYPE_H_MAD && type != AL_OBJ_TYPE_H_CONN ); + + cl_spinlock_acquire( &h_al->obj.lock ); + + /* Validate index. */ + if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) ) + { + cl_spinlock_release( &h_al->obj.lock ); + return NULL; + } + + /* Get the specified entry. */ + p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl ); + + /* Make sure that the handle is valid and the correct type. */ + if( type == AL_OBJ_TYPE_UNKNOWN && + p_h->type != AL_OBJ_TYPE_H_PD && p_h->type != AL_OBJ_TYPE_H_CQ && + p_h->type != AL_OBJ_TYPE_H_AV && p_h->type != AL_OBJ_TYPE_H_QP && + p_h->type != AL_OBJ_TYPE_H_MR && p_h->type != AL_OBJ_TYPE_H_MW && + p_h->type != AL_OBJ_TYPE_H_SRQ ) + { + cl_spinlock_release( &h_al->obj.lock ); + return NULL; + } + else if( p_h->type != type ) + { + cl_spinlock_release( &h_al->obj.lock ); + return NULL; + } + + p_obj = p_h->p_obj; + if( !p_obj->hdl_valid ) + { + cl_spinlock_release( &h_al->obj.lock ); + return NULL; + } + ref_al_obj( p_obj ); + cl_spinlock_release( &h_al->obj.lock ); + return p_obj; +} + + +void* +al_hdl_chk( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl, + IN const uint32_t type ) +{ + al_handle_t *p_h; + + /* Validate index. */ + if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) ) + return NULL; + + /* Get the specified entry. */ + p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl ); + + /* Make sure that the handle is valid and the correct type. */ + if( (p_h->type != type) ) + return NULL; + + return p_h->p_obj; +} + + +void* +al_hdl_get( + IN const ib_al_handle_t h_al, + IN const uint64_t hdl, + IN const uint32_t type ) +{ + al_handle_t *p_h; + void *p_obj; + + cl_spinlock_acquire( &h_al->obj.lock ); + + /* Validate index. */ + if( hdl >= cl_vector_get_size( &h_al->hdl_vector ) ) + { + cl_spinlock_release( &h_al->obj.lock ); + return NULL; + } + + /* Get the specified entry. */ + p_h = (al_handle_t*)cl_vector_get_ptr( &h_al->hdl_vector, (size_t)hdl ); + + /* Make sure that the handle is valid and the correct type. */ + if( (p_h->type != type) ) + { + cl_spinlock_release( &h_al->obj.lock ); + return NULL; + } + + p_obj = (void*)p_h->p_obj; + + /* Clear the entry. */ + p_h->type = AL_OBJ_TYPE_UNKNOWN; + p_h->p_obj = (al_obj_t*)(uintn_t)h_al->free_hdl; + h_al->free_hdl = hdl; + + cl_spinlock_release( &h_al->obj.lock ); + return p_obj; +} + diff --git a/branches/Ndi/core/al/kernel/al_mr.c b/branches/Ndi/core/al/kernel/al_mr.c new file mode 100644 index 00000000..bf2f27ed --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_mr.c @@ -0,0 +1,614 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_mr.tmh" +#endif +#include "al_mr.h" +#include "al_pd.h" +#include "al_res_mgr.h" +#include "al_verbs.h" + +#include "ib_common.h" + + +static void +__cleanup_mlnx_fmr( + IN struct _al_obj *p_obj ); + +static void +__return_mlnx_fmr( + IN al_obj_t *p_obj ); + + +static al_shmid_t* +__create_shmid( + IN const int shmid ); + +static void +__free_shmid( + IN struct _al_obj *p_obj ); + + +cl_status_t +mlnx_fmr_ctor( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + ib_api_status_t status; + mlnx_fmr_handle_t h_fmr; + + UNUSED_PARAM( context ); + + h_fmr = (mlnx_fmr_handle_t)p_object; + cl_memclr( h_fmr, sizeof(mlnx_fmr_t) ); + + construct_al_obj( &h_fmr->obj, AL_OBJ_TYPE_H_FMR ); + status = init_al_obj( &h_fmr->obj, NULL, FALSE, NULL, + __cleanup_mlnx_fmr, __return_mlnx_fmr ); + if( status != IB_SUCCESS ) + { + return CL_ERROR; + } + + *pp_pool_item = &((mlnx_fmr_handle_t)p_object)->obj.pool_item; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_fmr->obj ); + + return CL_SUCCESS; +} + + + +void +mlnx_fmr_dtor( + IN const cl_pool_item_t* const p_pool_item, + IN void* context ) +{ + al_obj_t *p_obj; + + UNUSED_PARAM( context ); + + p_obj = PARENT_STRUCT( p_pool_item, al_obj_t, pool_item ); + + /* + * The FMR is being totally destroyed. Modify the free_cb to destroy the + * AL object. + */ + p_obj->pfn_free = (al_pfn_free_t)destroy_al_obj; + ref_al_obj( p_obj ); + p_obj->pfn_destroy( p_obj, NULL ); +} + + + +static void +__cleanup_mlnx_fmr( + IN struct _al_obj *p_obj ) +{ + ib_api_status_t status; + mlnx_fmr_handle_t h_fmr; + + CL_ASSERT( p_obj ); + h_fmr = PARENT_STRUCT( p_obj, mlnx_fmr_t, obj ); + + /* Deregister the memory. */ + if( verbs_check_mlnx_fmr( h_fmr ) ) + { + status = verbs_destroy_mlnx_fmr( h_fmr ); + CL_ASSERT( status == IB_SUCCESS ); + + h_fmr->h_ci_fmr = NULL; + h_fmr->p_next = NULL; + } +} + + + +static void +__return_mlnx_fmr( + IN al_obj_t *p_obj ) +{ + mlnx_fmr_handle_t h_fmr; + + h_fmr = PARENT_STRUCT( p_obj, mlnx_fmr_t, obj ); + reset_al_obj( p_obj ); + put_mlnx_fmr( h_fmr ); +} + + + +ib_api_status_t +mlnx_create_fmr( + IN const ib_pd_handle_t h_pd, + IN const mlnx_fmr_create_t* const p_fmr_create, + OUT mlnx_fmr_handle_t* const ph_fmr ) +{ + mlnx_fmr_handle_t h_fmr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + + if( !p_fmr_create || !ph_fmr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Get a FMR tracking structure. */ + h_fmr = alloc_mlnx_fmr(); + if( !h_fmr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to allocate memory handle\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + status = attach_al_obj( &h_pd->obj, &h_fmr->obj ); + if( status != IB_SUCCESS ) + { + h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Register the memory region. */ + status = verbs_create_mlnx_fmr( h_pd, p_fmr_create, h_fmr ); + if( status != IB_SUCCESS ) + { + h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to register memory: %s\n", ib_get_err_str(status)) ); + return status; + } + + *ph_fmr = h_fmr; + /* Release the reference taken in alloc_mlnx_fmr for initialization. */ + deref_al_obj( &(*ph_fmr )->obj ); + + AL_EXIT( AL_DBG_MR ); + return IB_SUCCESS; +} + + +ib_api_status_t +mlnx_map_phys_fmr( + IN const mlnx_fmr_handle_t h_fmr, + IN const uint64_t* const paddr_list, + IN const int list_len, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") ); + return IB_INVALID_FMR_HANDLE; + } + + if( !paddr_list || !p_vaddr || !p_lkey || !p_rkey ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + ref_al_obj( &h_fmr->obj ); + + /* Register the memory region. */ + status = verbs_map_phys_mlnx_fmr( h_fmr, paddr_list, list_len, p_vaddr, p_lkey, p_rkey); + if( status != IB_SUCCESS ) + { + //TODO: do we need to do something more about the error ? + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to map FMR: %s\n", ib_get_err_str(status)) ); + } + + deref_al_obj( &h_fmr->obj ); + + AL_EXIT( AL_DBG_MR ); + return status; +} + + +ib_api_status_t +mlnx_unmap_fmr( + IN const mlnx_fmr_handle_t h_fmr ) +{ + ib_api_status_t status; + mlnx_fmr_t *p_fmr = (mlnx_fmr_t*)h_fmr; + mlnx_fmr_t *p_cur_fmr; + mlnx_fmr_handle_t *p_fmr_array; + int i; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") ); + return IB_INVALID_FMR_HANDLE; + } + + // calculate the list size + for ( i=0, p_cur_fmr = p_fmr; p_cur_fmr; p_cur_fmr = p_cur_fmr->p_next) + i++; + + // allocate the array + p_fmr_array = cl_zalloc((i+1)*sizeof(mlnx_fmr_handle_t)); + if (!p_fmr_array) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + // fill the array + for ( i=0, p_cur_fmr = p_fmr; p_cur_fmr; p_cur_fmr = p_cur_fmr->p_next) + { + p_fmr_array[i++] = p_cur_fmr->h_ci_fmr; + ref_al_obj( &p_cur_fmr->obj ); + } + p_fmr_array[i] = NULL; + + // unmap the array of FMRs + status = verbs_unmap_mlnx_fmr( h_fmr, p_fmr_array ); + + // deref the objects + for ( p_cur_fmr = p_fmr; p_cur_fmr; p_cur_fmr = p_cur_fmr->p_next) + deref_al_obj( &p_cur_fmr->obj ); + + cl_free( p_fmr_array ); + + AL_EXIT( AL_DBG_MR ); + return status; +} + + +ib_api_status_t +mlnx_destroy_fmr( + IN const mlnx_fmr_handle_t h_fmr ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_fmr, AL_OBJ_TYPE_H_FMR ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") ); + return IB_INVALID_FMR_HANDLE; + } + + if( !verbs_check_mlnx_fmr( h_fmr ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_FMR_HANDLE\n") ); + return IB_INVALID_FMR_HANDLE; + } + + ref_al_obj( &h_fmr->obj ); + + /* FMR's are destroyed synchronously */ + status = verbs_destroy_mlnx_fmr( h_fmr ); + + if( status == IB_SUCCESS ) + { + h_fmr->h_ci_fmr = NULL; + /* We're good to destroy the object. + NOTE: No need to deref the al object , + we are resetting the fmr obj before inserting it back to the pool */ + + h_fmr->obj.pfn_destroy( &h_fmr->obj, NULL ); + } + else + { + deref_al_obj( &h_fmr->obj ); + } + AL_EXIT( AL_DBG_MR ); + return status; +} + + + +ib_api_status_t +ib_create_shmid( + IN const ib_pd_handle_t h_pd, + IN const int shmid, + IN const ib_mr_create_t* const p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ) +{ + ib_api_status_t status; + cl_status_t cl_status; + net32_t lkey; + net32_t rkey; + ib_mr_handle_t h_mr; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + if( !p_mr_create || !p_lkey || !p_rkey || !ph_mr ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Register the memory region. */ + status = ib_reg_mem( h_pd, p_mr_create, &lkey, &rkey, &h_mr ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to register memory: %s\n", ib_get_err_str(status)) ); + return status; + } + + /* Create the shmid tracking structure. */ + h_mr->p_shmid = __create_shmid( shmid ); + if( !h_mr->p_shmid ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to allocate shmid\n") ); + ib_dereg_mr( h_mr ); + return IB_INSUFFICIENT_MEMORY; + } + + /* + * Record that the memory region is associated with this shmid. The + * insertion should automatically succeed since the list has a minimum + * size of 1. + */ + ref_al_obj( &h_mr->p_shmid->obj ); + cl_status = cl_list_insert_head( &h_mr->p_shmid->mr_list, h_mr ); + CL_ASSERT( cl_status == CL_SUCCESS ); + + /* Add the shmid to the CI CA for tracking. */ + add_shmid( h_pd->obj.p_ci_ca, h_mr->p_shmid ); + + /* Return the results. */ + *p_lkey = lkey; + *p_rkey = rkey; + *ph_mr = h_mr; + AL_EXIT( AL_DBG_MR ); + return IB_SUCCESS; +} + + + +/* + * Allocate a new structure to track memory registrations shared across + * processes. + */ +static al_shmid_t* +__create_shmid( + IN const int shmid ) +{ + al_shmid_t *p_shmid; + ib_api_status_t status; + cl_status_t cl_status; + + /* Allocate the shmid structure. */ + p_shmid = cl_zalloc( sizeof( al_shmid_t ) ); + if( !p_shmid ) + { + return NULL; + } + + /* Construct the shmid structure. */ + construct_al_obj( &p_shmid->obj, AL_OBJ_TYPE_H_MR ); + cl_list_construct( &p_shmid->mr_list ); + + /* Initialize the shmid structure. */ + status = init_al_obj( &p_shmid->obj, p_shmid, TRUE, + NULL, NULL, __free_shmid ); + if( status != IB_SUCCESS ) + { + __free_shmid( &p_shmid->obj ); + return NULL; + } + + cl_status = cl_list_init( &p_shmid->mr_list, 1 ); + if( cl_status != CL_SUCCESS ) + { + p_shmid->obj.pfn_destroy( &p_shmid->obj, NULL ); + return NULL; + } + + p_shmid->id = shmid; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_shmid->obj ); + + return p_shmid; +} + + + +static void +__free_shmid( + IN struct _al_obj *p_obj ) +{ + al_shmid_t *p_shmid; + + p_shmid = PARENT_STRUCT( p_obj, al_shmid_t, obj ); + + CL_ASSERT( cl_is_list_empty( &p_shmid->mr_list ) ); + + cl_list_destroy( &p_shmid->mr_list ); + destroy_al_obj( p_obj ); + cl_free( p_shmid ); +} + + + +ib_api_status_t +ib_reg_shmid( + IN const ib_pd_handle_t h_pd, + IN const ib_shmid_t shmid, + IN const ib_mr_create_t* const p_mr_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ) +{ + return reg_shmid( h_pd, shmid, p_mr_create, p_vaddr, p_lkey, p_rkey, ph_mr ); +} + + +ib_api_status_t +reg_shmid( + IN const ib_pd_handle_t h_pd, + IN const ib_shmid_t shmid, + IN const ib_mr_create_t* const p_mr_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ) +{ + UNUSED_PARAM( h_pd ); + UNUSED_PARAM( shmid ); + UNUSED_PARAM( p_mr_create ); + UNUSED_PARAM( p_vaddr ); + UNUSED_PARAM( p_lkey ); + UNUSED_PARAM( p_rkey ); + UNUSED_PARAM( ph_mr ); + return IB_ERROR; +#if 0 + ib_api_status_t status; + cl_status_t cl_status; + al_shmid_t *p_shmid; + uint64_t vaddr; + net32_t lkey; + net32_t rkey; + ib_mr_handle_t h_mr, h_reg_mr; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + if( !p_vaddr || !p_lkey || !p_rkey || !ph_mr ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Let's see if we can acquire the registered memory region. */ + status = acquire_shmid( h_pd->obj.p_ci_ca, shmid, &p_shmid ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("shmid not found: %s\n", ib_get_err_str(status)) ); + return IB_NOT_FOUND; + } + + /* Lock down the shmid to prevent deregistrations while we register. */ + cl_spinlock_acquire( &p_shmid->obj.lock ); + + /* + * There's a chance after we acquired the shmid, all current + * registrations were deregistered. + */ + if( cl_is_list_empty( &p_shmid->mr_list ) ) + { + /* There are no registrations left to share. */ + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("shmid not found\n") ); + cl_spinlock_release( &p_shmid->obj.lock ); + release_shmid( p_shmid ); + return IB_NOT_FOUND; + } + + /* Get a handle to an existing registered memory region. */ + h_reg_mr = cl_list_obj( cl_list_head( &p_shmid->mr_list ) ); + +// BUGBUG: This release is not safe since the h_reg_mr can be deregistered. + cl_spinlock_release( &p_shmid->obj.lock ); + + /* Register the memory region. */ + vaddr = *p_vaddr; + status = ib_reg_shared( h_reg_mr, h_pd, access_ctrl, &vaddr, + &lkey, &rkey, &h_mr ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unable to register shared memory: 0x%0I64x %s\n", + vaddr, ib_get_err_str(status)) ); + release_shmid( p_shmid ); + return status; + } + + cl_spinlock_acquire( &p_shmid->obj.lock ); + + /* Track the registration with the shmid structure. */ + cl_status = cl_list_insert_head( &p_shmid->mr_list, h_mr ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("insertion into shmid list failed\n") ); + cl_spinlock_release( &p_shmid->obj.lock ); + release_shmid( p_shmid ); + return ib_convert_cl_status( cl_status ); + } + + cl_spinlock_release( &p_shmid->obj.lock ); + + /* Return the results. */ + h_mr->p_shmid = p_shmid; + *p_vaddr = vaddr; + *p_lkey = lkey; + *p_rkey = rkey; + *ph_mr = h_mr; + AL_EXIT( AL_DBG_MR ); + return IB_SUCCESS; +#endif +} diff --git a/branches/Ndi/core/al/kernel/al_pnp.c b/branches/Ndi/core/al/kernel/al_pnp.c new file mode 100644 index 00000000..df7dffa5 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_pnp.c @@ -0,0 +1,1735 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "al.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_pnp.tmh" +#endif +#include "al_mgr.h" +#include "al_pnp.h" +#include "ib_common.h" +#include "al_ioc_pnp.h" + + +#define PNP_CA_VECTOR_MIN 0 +#define PNP_CA_VECTOR_GROW 10 + + +/* ib_pnp_event_t values converted to text strings. */ +char* ib_pnp_event_str[] = +{ + "IB_PNP_CA_ADD", + "IB_PNP_CA_REMOVE", + "IB_PNP_PORT_ADD", + "IB_PNP_PORT_REMOVE", + "IB_PNP_PORT_INIT", + "IB_PNP_PORT_ARMED", + "IB_PNP_PORT_ACTIVE", + "IB_PNP_PORT_DOWN", + "IB_PNP_PKEY_CHANGE", + "IB_PNP_SM_CHANGE", + "IB_PNP_GID_CHANGE", + "IB_PNP_LID_CHANGE", + "IB_PNP_SUBNET_TIMEOUT_CHANGE", + "IB_PNP_IOU_ADD", + "IB_PNP_IOU_REMOVE", + "IB_PNP_IOC_ADD", + "IB_PNP_IOC_REMOVE", + "IB_PNP_IOC_PATH_ADD", + "IB_PNP_IOC_PATH_REMOVE" +}; + + +/* + * Declarations. + */ +static void +__pnp_free( + IN al_obj_t *p_obj ); + + +/* + * Compares two context for inserts/lookups in a flexi map. Keys are the + * address of the reg guid1, which is adjacent to the context guid2 (if exist). + * This allows for a single call to cl_memcmp. + */ +static intn_t +__context_cmp128( + IN const void* const p_key1, + IN const void* const p_key2 ) +{ + return cl_memcmp( p_key1, p_key2, sizeof(uint64_t) * 2 ); +} + +/* + * Compares two context for inserts/lookups in a flexi map. Keys are the + * address of the reg guid1, which is adjacent to the context guid2 (if exist). + * This allows for a single call to cl_memcmp. + */ +static intn_t +__context_cmp64( + IN const void* const p_key1, + IN const void* const p_key2 ) +{ + return cl_memcmp( p_key1, p_key2, sizeof(uint64_t) ); +} + + +/* + * Event structures for queuing to the async proc manager. + */ +typedef struct _al_pnp_ca_change +{ + cl_async_proc_item_t async_item; + al_ci_ca_t *p_ci_ca; + ib_ca_attr_t *p_new_ca_attr; + +} al_pnp_ca_change_t; + + +typedef struct _al_pnp_ca_event +{ + cl_async_proc_item_t async_item; + ib_pnp_event_t pnp_event; + al_ci_ca_t *p_ci_ca; + uint8_t port_index; + +} al_pnp_ca_event_t; + + +typedef struct _al_pnp_reg_event +{ + cl_async_proc_item_t async_item; + al_pnp_t *p_reg; + +} al_pnp_reg_event_t; + + +/* PnP Manager structure. */ +typedef struct _al_pnp_mgr +{ + al_obj_t obj; + + cl_qlist_t ca_reg_list; + cl_qlist_t port_reg_list; + + cl_ptr_vector_t ca_vector; + + cl_async_proc_item_t async_item; + boolean_t async_item_is_busy; + +} al_pnp_mgr_t; + + +/* + * PnP Manager instance, creation, destruction. + */ + +/* Global instance of the PnP manager. */ +al_pnp_mgr_t *gp_pnp = NULL; + + +static void +__pnp_check_events( + IN cl_async_proc_item_t* p_item ); + +static void +__al_pnp_process_dereg( + IN cl_async_proc_item_t* p_item ); + + +ib_api_status_t +create_pnp( + IN al_obj_t* const p_parent_obj ) +{ + ib_api_status_t status; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( gp_pnp == NULL ); + + gp_pnp = (al_pnp_mgr_t*)cl_zalloc( sizeof(al_pnp_mgr_t) ); + if( !gp_pnp ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to allocate PnP manager.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + cl_qlist_init( &gp_pnp->ca_reg_list ); + cl_qlist_init( &gp_pnp->port_reg_list ); + construct_al_obj( &gp_pnp->obj, AL_OBJ_TYPE_PNP_MGR ); + cl_ptr_vector_construct( &gp_pnp->ca_vector ); + + cl_status = cl_ptr_vector_init( &gp_pnp->ca_vector, PNP_CA_VECTOR_MIN, + PNP_CA_VECTOR_GROW ); + if( cl_status != CL_SUCCESS ) + { + __pnp_free( &gp_pnp->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_ptr_vector_init failed with status %s.\n", + CL_STATUS_MSG( cl_status )) ); + return IB_ERROR; + } + + gp_pnp->async_item.pfn_callback = __pnp_check_events; + + status = init_al_obj( &gp_pnp->obj, NULL, TRUE, NULL, NULL, __pnp_free ); + if( status != IB_SUCCESS ) + { + __pnp_free( &gp_pnp->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj() failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + status = attach_al_obj( p_parent_obj, &gp_pnp->obj ); + if( status != IB_SUCCESS ) + { + gp_pnp->obj.pfn_destroy( &gp_pnp->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &gp_pnp->obj ); + + AL_EXIT( AL_DBG_PNP ); + return( IB_SUCCESS ); +} + + +static void +__pnp_free( + IN al_obj_t *p_obj ) +{ + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( PARENT_STRUCT( p_obj, al_pnp_mgr_t, obj ) == gp_pnp ); + CL_ASSERT( cl_is_qlist_empty( &gp_pnp->ca_reg_list ) ); + CL_ASSERT( cl_is_qlist_empty( &gp_pnp->port_reg_list ) ); + UNUSED_PARAM( p_obj ); + + /* All CA's should have been removed by now. */ + CL_ASSERT( !cl_ptr_vector_get_size( &gp_pnp->ca_vector ) ); + cl_ptr_vector_destroy( &gp_pnp->ca_vector ); + + destroy_al_obj( &gp_pnp->obj ); + cl_free( gp_pnp ); + gp_pnp = NULL; + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__pnp_reg_destroying( + IN al_obj_t *p_obj ) +{ + al_pnp_t *p_reg; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_obj, al_pnp_t, obj ); + + /* Reference the registration entry while we queue it to our PnP thread. */ + ref_al_obj( &p_reg->obj ); + + /* Queue the registration for removal from the list. */ + cl_async_proc_queue( gp_async_pnp_mgr, &p_reg->dereg_item ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__al_pnp_process_dereg( + IN cl_async_proc_item_t* p_item ) +{ + al_pnp_t* p_reg; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_item, al_pnp_t, dereg_item ); + + /* Remove the registration information from the list. */ + switch( pnp_get_class( p_reg->pnp_class ) ) + { + case IB_PNP_CA: + cl_qlist_remove_item( &gp_pnp->ca_reg_list, &p_reg->list_item ); + break; + + case IB_PNP_PORT: + cl_qlist_remove_item( &gp_pnp->port_reg_list, &p_reg->list_item ); + break; + + default: + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA || + pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid PnP registartion type.\n") ); + } + + /* Release the reference we took for processing the deregistration. */ + deref_al_obj( &p_reg->obj ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__pnp_reg_cleanup( + IN al_obj_t *p_obj ) +{ + al_pnp_t *p_reg; + cl_fmap_item_t *p_item; + IRP *p_irp; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_obj, al_pnp_t, obj ); + + /* Cleanup the context list. */ + while( cl_fmap_count( &p_reg->context_map ) ) + { + p_item = cl_fmap_tail( &p_reg->context_map ); + cl_fmap_remove_item( &p_reg->context_map, p_item ); + cl_free( p_item ); + } + + p_irp = InterlockedExchangePointer( &p_reg->p_rearm_irp, NULL ); + if( p_irp ) + { +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, NULL ); +#pragma warning(pop) + /* Complete the IRP. */ + p_irp->IoStatus.Status = STATUS_CANCELLED; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + } + + if( p_reg->p_dereg_irp ) + { + p_reg->p_dereg_irp->IoStatus.Status = STATUS_SUCCESS; + p_reg->p_dereg_irp->IoStatus.Information = 0; + IoCompleteRequest( p_reg->p_dereg_irp, IO_NO_INCREMENT ); + p_reg->p_dereg_irp = NULL; + } + + /* Dereference the PnP manager. */ + deref_al_obj( &gp_pnp->obj ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__pnp_reg_free( + IN al_obj_t *p_obj ) +{ + al_pnp_t *p_reg; + cl_fmap_item_t *p_item; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_obj, al_pnp_t, obj ); + + /* Cleanup the context list. */ + while( cl_fmap_count( &p_reg->context_map ) ) + { + p_item = cl_fmap_tail( &p_reg->context_map ); + cl_fmap_remove_item( &p_reg->context_map, p_item ); + cl_free( p_item ); + } + + /* Free the registration structure. */ + destroy_al_obj( &p_reg->obj ); + cl_free( p_reg ); + + AL_EXIT( AL_DBG_PNP ); +} + + +/* + * Helper functions. + */ + + + +/* + * Returns the context structure stored in a registration for + * a given CA or port GUID. + */ +al_pnp_context_t* +pnp_get_context( + IN const al_pnp_t* const p_reg, + IN const void* const p_key ) +{ + cl_fmap_item_t *p_context_item; + + AL_ENTER( AL_DBG_PNP ); + + /* Search the context list for this CA. */ + p_context_item = cl_fmap_get( &p_reg->context_map, p_key ); + if( p_context_item != cl_fmap_end( &p_reg->context_map ) ) + { + AL_EXIT( AL_DBG_PNP ); + return PARENT_STRUCT( p_context_item, al_pnp_context_t, map_item ); + } + + AL_EXIT( AL_DBG_PNP ); + return NULL; +} + + +void +pnp_reg_complete( + IN al_pnp_t* const p_reg ) +{ + ib_pnp_rec_t user_rec; + + AL_ENTER( AL_DBG_PNP ); + + /* Notify the user that the registration is complete. */ + if( (pnp_get_flag( p_reg->pnp_class ) & IB_PNP_FLAG_REG_COMPLETE) ) + { + /* Setup the PnP record for the callback. */ + cl_memclr( &user_rec, sizeof(user_rec) ); + user_rec.h_pnp = p_reg; + user_rec.pnp_event = IB_PNP_REG_COMPLETE; + user_rec.pnp_context = (void*)p_reg->obj.context; + + /* Invoke the user callback. */ + p_reg->pfn_pnp_cb( &user_rec ); + } + + if( pnp_get_flag( p_reg->pnp_class ) & IB_PNP_FLAG_REG_SYNC ) + { + KeSetEvent( p_reg->p_sync_event, 0, FALSE ); + /* + * Proxy synchronizes PnP callbacks with registration, and thus can + * safely set the UM_EXPORT subtype after al_reg_pnp returns. + */ + if( p_reg->obj.type & AL_OBJ_SUBTYPE_UM_EXPORT ) + ObDereferenceObject( p_reg->p_sync_event ); + p_reg->p_sync_event = NULL; + } + + AL_EXIT( AL_DBG_PNP ); +} + +/* + * User notification. Formats the PnP record delivered by the + * callback, invokes the callback, and updates the contexts. + */ +static ib_api_status_t +__pnp_notify_user( + IN al_pnp_t* const p_reg, + IN al_pnp_context_t* const p_context, + IN const al_pnp_ca_event_t* const p_event_rec ) +{ + ib_api_status_t status; + union + { + ib_pnp_rec_t user_rec; + ib_pnp_ca_rec_t ca_rec; + ib_pnp_port_rec_t port_rec; + } u; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( p_reg ); + CL_ASSERT( p_context ); + CL_ASSERT( p_event_rec ); + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP, + ("p_event_rec->pnp_event = 0x%x (%s)\n", + p_event_rec->pnp_event, ib_get_pnp_event_str( p_event_rec->pnp_event )) ); + + /* Setup the PnP record for the callback. */ + cl_memclr( &u, sizeof(u) ); + u.user_rec.h_pnp = p_reg; + u.user_rec.pnp_event = p_event_rec->pnp_event; + u.user_rec.pnp_context = (void*)p_reg->obj.context; + u.user_rec.context = (void*)p_context->context; + + switch( p_event_rec->pnp_event ) + { + case IB_PNP_CA_ADD: + /* Copy the attributes for use in calling users back. */ + u.ca_rec.p_ca_attr = ib_copy_ca_attr( + p_event_rec->p_ci_ca->p_user_attr, + p_event_rec->p_ci_ca->p_pnp_attr ); + + /* Fall through */ + case IB_PNP_CA_REMOVE: + u.user_rec.guid = p_event_rec->p_ci_ca->p_pnp_attr->ca_guid; + break; + + case IB_PNP_PORT_ADD: + case IB_PNP_PORT_INIT: + case IB_PNP_PORT_ARMED: + case IB_PNP_PORT_ACTIVE: + case IB_PNP_PORT_DOWN: + case IB_PNP_PKEY_CHANGE: + case IB_PNP_SM_CHANGE: + case IB_PNP_GID_CHANGE: + case IB_PNP_LID_CHANGE: + case IB_PNP_SUBNET_TIMEOUT_CHANGE: + /* Copy the attributes for use in calling users back. */ + u.port_rec.p_ca_attr = ib_copy_ca_attr( + p_event_rec->p_ci_ca->p_user_attr, + p_event_rec->p_ci_ca->p_pnp_attr ); + + /* Setup the port attribute pointer. */ + u.port_rec.p_port_attr = + &u.port_rec.p_ca_attr->p_port_attr[p_event_rec->port_index]; + + /* Fall through */ + case IB_PNP_PORT_REMOVE: + u.user_rec.guid = p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr[ + p_event_rec->port_index].port_guid; + break; + + case IB_PNP_REG_COMPLETE: + break; + + default: + /* Invalid event type. */ + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid event type (%d).\n", p_event_rec->pnp_event) ); + CL_ASSERT( p_event_rec->pnp_event == IB_PNP_CA_ADD || + p_event_rec->pnp_event == IB_PNP_PORT_ADD || + p_event_rec->pnp_event == IB_PNP_PORT_INIT || + p_event_rec->pnp_event == IB_PNP_PORT_ACTIVE || + p_event_rec->pnp_event == IB_PNP_PORT_DOWN || + p_event_rec->pnp_event == IB_PNP_PKEY_CHANGE || + p_event_rec->pnp_event == IB_PNP_SM_CHANGE || + p_event_rec->pnp_event == IB_PNP_GID_CHANGE || + p_event_rec->pnp_event == IB_PNP_LID_CHANGE || + p_event_rec->pnp_event == IB_PNP_SUBNET_TIMEOUT_CHANGE || + p_event_rec->pnp_event == IB_PNP_CA_REMOVE || + p_event_rec->pnp_event == IB_PNP_PORT_REMOVE ); + return IB_SUCCESS; + } + + /* Invoke the user callback. */ + status = p_reg->pfn_pnp_cb( &u.user_rec ); + + if( status == IB_SUCCESS ) + { + /* Store the user's event context in the context block. */ + p_context->context = u.user_rec.context; + } + else + { + cl_fmap_remove_item( &p_reg->context_map, &p_context->map_item ); + cl_free( p_context ); + } + + AL_EXIT( AL_DBG_PNP ); + return status; +} + + + +/* + * Context creation. + */ +al_pnp_context_t* +pnp_create_context( + IN al_pnp_t* const p_reg, + IN const void* const p_key ) +{ + al_pnp_context_t *p_context; + cl_fmap_item_t *p_item; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( p_reg ); + + /* No context exists for this port. Create one. */ + p_context = (al_pnp_context_t*)cl_pzalloc( sizeof(al_pnp_context_t) ); + if( !p_context ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to cl_zalloc al_pnp_context_t (%I64d bytes).\n", + sizeof(al_pnp_context_t)) ); + return NULL; + } + /* Store the GUID in the context record. */ + cl_memcpy(&p_context->guid, p_key, sizeof(ib_net64_t) * 2); + + /* Add the context to the context list. */ + p_item = cl_fmap_insert( &p_reg->context_map, &p_context->guid, + &p_context->map_item ); + if( p_item != &p_context->map_item ) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP, + ("p_context is already in context map %I64x \n",p_context->guid)); + p_context = NULL; + } + + + AL_EXIT( AL_DBG_PNP ); + return p_context; +} + + + +/* + * Report all local port information. This notifies the user of PORT_ADD + * events, along with port state events (PORT_INIT, PORT_ACTIVE). + */ +static void +__pnp_port_notify( + IN al_pnp_t *p_reg, + IN al_ci_ca_t *p_ci_ca ) +{ + ib_api_status_t status; + al_pnp_context_t *p_context; + ib_port_attr_t *p_port_attr; + al_pnp_ca_event_t event_rec; + + AL_ENTER( AL_DBG_PNP ); + + event_rec.p_ci_ca = p_ci_ca; + + for( event_rec.port_index = 0; + event_rec.port_index < p_ci_ca->num_ports; + event_rec.port_index++ ) + { + p_port_attr = p_ci_ca->p_pnp_attr->p_port_attr; + p_port_attr += event_rec.port_index; + + /* Create a new context for user port information. */ + p_context = pnp_create_context( p_reg, &p_port_attr->port_guid); + if( !p_context ) + continue; + + /* Notify the user of the port's existence. */ + event_rec.pnp_event = IB_PNP_PORT_ADD; + status = __pnp_notify_user( p_reg, p_context, &event_rec ); + if( status != IB_SUCCESS ) + continue; + + /* Generate a port down event if the port is currently down. */ + if( p_port_attr->link_state == IB_LINK_DOWN ) + { + event_rec.pnp_event = IB_PNP_PORT_DOWN; + __pnp_notify_user( p_reg, p_context, &event_rec ); + } + else + { + /* Generate port init event. */ + if( p_port_attr->link_state >= IB_LINK_INIT ) + { + event_rec.pnp_event = IB_PNP_PORT_INIT; + status = __pnp_notify_user( p_reg, p_context, &event_rec ); + if( status != IB_SUCCESS ) + continue; + } + /* Generate port armed event. */ + if( p_port_attr->link_state >= IB_LINK_ARMED ) + { + event_rec.pnp_event = IB_PNP_PORT_ARMED; + status = __pnp_notify_user( p_reg, p_context, &event_rec ); + if( status != IB_SUCCESS ) + continue; + } + /* Generate port up event. */ + if( p_port_attr->link_state >= IB_LINK_ACTIVE ) + { + event_rec.pnp_event = IB_PNP_PORT_ACTIVE; + __pnp_notify_user( p_reg, p_context, &event_rec ); + } + } + } + AL_EXIT( AL_DBG_PNP ); +} + + +/* + * Registration and deregistration. + */ +static void +__pnp_reg_notify( + IN al_pnp_t* const p_reg ) +{ + al_pnp_ca_event_t event_rec; + size_t i; + al_pnp_context_t *p_context; + + AL_ENTER( AL_DBG_PNP ); + + for( i = 0; i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ); i++ ) + { + event_rec.p_ci_ca = (al_ci_ca_t*) + cl_ptr_vector_get( &gp_pnp->ca_vector, i ); + if( !event_rec.p_ci_ca ) + continue; + + switch( pnp_get_class( p_reg->pnp_class ) ) + { + case IB_PNP_CA: + event_rec.pnp_event = IB_PNP_CA_ADD; + p_context = pnp_create_context( p_reg, + &event_rec.p_ci_ca->p_pnp_attr->ca_guid); + if( !p_context ) + break; + + __pnp_notify_user( p_reg, p_context, &event_rec ); + break; + + case IB_PNP_PORT: + __pnp_port_notify( p_reg, event_rec.p_ci_ca ); + break; + + default: + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA || + pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT ); + continue; + } + } + + /* Notify the user that the registration is complete. */ + pnp_reg_complete( p_reg ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__al_pnp_process_reg( + IN cl_async_proc_item_t* p_item ) +{ + al_pnp_t* p_reg; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_item, al_pnp_t, async_item ); + + /* Add the registrant to the list. */ + switch( pnp_get_class( p_reg->pnp_class ) ) + { + case IB_PNP_CA: + cl_qlist_insert_tail( &gp_pnp->ca_reg_list, &p_reg->list_item ); + break; + + case IB_PNP_PORT: + cl_qlist_insert_tail( &gp_pnp->port_reg_list, &p_reg->list_item ); + break; + + default: + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA || + pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT ); + } + + /* Generate all relevant events for the registration. */ + __pnp_reg_notify( p_reg ); + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_reg->obj ); + + AL_EXIT( AL_DBG_PNP ); +} + + +ib_api_status_t +ib_reg_pnp( + IN const ib_al_handle_t h_al, + IN const ib_pnp_req_t* const p_pnp_req, + OUT ib_pnp_handle_t* const ph_pnp ) +{ + ib_api_status_t status; + KEVENT event; + + AL_ENTER( AL_DBG_PNP ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_pnp_req || !ph_pnp ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + if( pnp_get_flag( p_pnp_req->pnp_class ) & IB_PNP_FLAG_REG_SYNC ) + KeInitializeEvent( &event, SynchronizationEvent, FALSE ); + + status = al_reg_pnp( h_al, p_pnp_req, &event, ph_pnp ); + /* Release the reference taken in init_al_obj. */ + if( status == IB_SUCCESS ) + { + deref_al_obj( &(*ph_pnp)->obj ); + + /* Wait for registration to complete if synchronous. */ + if( pnp_get_flag( p_pnp_req->pnp_class ) & IB_PNP_FLAG_REG_SYNC ) + { + KeWaitForSingleObject( + &event, Executive, KernelMode, TRUE, NULL ); + } + } + + AL_EXIT( AL_DBG_PNP ); + return status; +} + + +ib_api_status_t +al_reg_pnp( + IN const ib_al_handle_t h_al, + IN const ib_pnp_req_t* const p_pnp_req, + IN KEVENT *p_sync_event, + OUT ib_pnp_handle_t* const ph_pnp ) +{ + ib_api_status_t status; + al_pnp_t* p_reg; + + AL_ENTER( AL_DBG_PNP ); + + /* Allocate a new registration info structure. */ + p_reg = (al_pnp_t*)cl_zalloc( sizeof(al_pnp_t) ); + if( !p_reg ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to cl_zalloc al_pnp_t (%I64d bytes).\n", + sizeof(al_pnp_t)) ); + return( IB_INSUFFICIENT_MEMORY ); + } + + /* Initialize the registration info. */ + construct_al_obj( &p_reg->obj, AL_OBJ_TYPE_H_PNP ); + switch(pnp_get_class(p_pnp_req->pnp_class)){ + case IB_PNP_IOU: + case IB_PNP_IOC: + cl_fmap_init( &p_reg->context_map, __context_cmp128 ); + break; + case IB_PNP_PORT: + case IB_PNP_CA: + cl_fmap_init( &p_reg->context_map, __context_cmp64 ); + break; + default: + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("unknown pnp_class 0x%x.\n", pnp_get_class(p_pnp_req->pnp_class))); + } + status = init_al_obj( &p_reg->obj, p_pnp_req->pnp_context, TRUE, + __pnp_reg_destroying, __pnp_reg_cleanup, __pnp_reg_free ); + if( status != IB_SUCCESS ) + { + __pnp_reg_free( &p_reg->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj() failed with status %s.\n", ib_get_err_str(status)) ); + return( status ); + } + status = attach_al_obj( &h_al->obj, &p_reg->obj ); + if( status != IB_SUCCESS ) + { + p_reg->obj.pfn_destroy( &p_reg->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Reference the PnP Manager. */ + ref_al_obj( &gp_pnp->obj ); + + /* Copy the request information. */ + p_reg->pnp_class = p_pnp_req->pnp_class; + p_reg->pfn_pnp_cb = p_pnp_req->pfn_pnp_cb; + + p_reg->p_sync_event = p_sync_event; + + /* Send IOU/IOC registration to the IOC PnP manager. */ + if( pnp_get_class(p_pnp_req->pnp_class) == IB_PNP_IOU || + pnp_get_class(p_pnp_req->pnp_class) == IB_PNP_IOC ) + { + p_reg->async_item.pfn_callback = ioc_pnp_process_reg; + p_reg->dereg_item.pfn_callback = ioc_pnp_process_dereg; + } + else + { + p_reg->async_item.pfn_callback = __al_pnp_process_reg; + p_reg->dereg_item.pfn_callback = __al_pnp_process_dereg; + } + + /* Queue the registrant for addition to the list. */ + ref_al_obj( &p_reg->obj ); + cl_async_proc_queue( gp_async_pnp_mgr, &p_reg->async_item ); + + /* Set the user handle. */ + *ph_pnp = p_reg; + + AL_EXIT( AL_DBG_PNP ); + return( IB_SUCCESS ); +} + + +ib_api_status_t +ib_dereg_pnp( + IN const ib_pnp_handle_t h_pnp, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_PNP ); + + if( AL_OBJ_INVALID_HANDLE( h_pnp, AL_OBJ_TYPE_H_PNP ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + ref_al_obj( &h_pnp->obj ); + h_pnp->obj.pfn_destroy( &h_pnp->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_PNP ); + return( IB_SUCCESS ); +} + + +/* + * CA event handling. + */ +static void +__pnp_process_add_ca( + IN cl_async_proc_item_t *p_item ) +{ + al_pnp_t *p_reg; + al_pnp_ca_event_t *p_event_rec; + cl_list_item_t *p_reg_item; + al_pnp_context_t *p_context; + cl_status_t cl_status; + size_t i; + + AL_ENTER( AL_DBG_PNP ); + + p_event_rec = PARENT_STRUCT( p_item, al_pnp_ca_event_t, async_item ); + + cl_spinlock_acquire( &gp_pnp->obj.lock ); + /* Add the CA to the CA vector. */ + for( i = 0; i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ); i++ ) + { + if( !cl_ptr_vector_get( &gp_pnp->ca_vector, i ) ) + { + cl_status = cl_ptr_vector_set( &gp_pnp->ca_vector, i, + p_event_rec->p_ci_ca ); + CL_ASSERT( cl_status == CL_SUCCESS ); + break; + } + } + cl_spinlock_release( &gp_pnp->obj.lock ); + CL_ASSERT( i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ) ); + + /* Walk the list of registrants for notification. */ + for( p_reg_item = cl_qlist_head( &gp_pnp->ca_reg_list ); + p_reg_item != cl_qlist_end( &gp_pnp->ca_reg_list ); + p_reg_item = cl_qlist_next( p_reg_item ) ) + { + p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item ); + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA ); + + /* Allocate the user's context. */ + /* + * Moving this allocation to the pnp_ca_event call is left as an + * exercise to the open source community. + */ + p_context = pnp_create_context( p_reg, + &p_event_rec->p_ci_ca->p_pnp_attr->ca_guid); + if( !p_context ) + continue; + + /* Notify the user. */ + __pnp_notify_user( p_reg, p_context, p_event_rec ); + } + + /* Generate port add and state events. */ + for( p_reg_item = cl_qlist_head( &gp_pnp->port_reg_list ); + p_reg_item != cl_qlist_end( &gp_pnp->port_reg_list ); + p_reg_item = cl_qlist_next( p_reg_item ) ) + { + p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item ); + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT ); + __pnp_port_notify( p_reg, p_event_rec->p_ci_ca ); + } + + /* Cleanup the event record. */ + deref_al_obj( &gp_pnp->obj ); + cl_free( p_event_rec ); + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__pnp_process_remove_port( + IN const al_pnp_ca_event_t* const p_event_rec ) +{ + ib_api_status_t status; + al_pnp_t *p_reg; + cl_list_item_t *p_reg_item; + uint8_t port_index; + al_pnp_context_t *p_context; + al_pnp_ca_event_t event_rec; + ib_port_attr_t *p_port_attr; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( p_event_rec->p_ci_ca->p_pnp_attr ); + CL_ASSERT( p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr ); + + /* Notify the IOC PnP manager of the port down event. */ + //***TODO: Make some call to the IOC PnP manager here, such as + //***TODO: al_ioc_pnp_process_port_down( p_event_rec->p_ci_ca, + //***TODO: p_event_rec->port_index ); + + cl_memclr( &event_rec, sizeof( al_pnp_ca_event_t ) ); + event_rec = *p_event_rec; + + /* Walk the list of registrants for notification. */ + for( p_reg_item = cl_qlist_tail( &gp_pnp->port_reg_list ); + p_reg_item != cl_qlist_end( &gp_pnp->port_reg_list ); + p_reg_item = cl_qlist_prev( p_reg_item ) ) + { + p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item ); + + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT ); + + for( port_index = 0; + port_index < p_event_rec->p_ci_ca->num_ports; + port_index++ ) + { + p_port_attr = p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr; + p_port_attr += port_index; + p_context = pnp_get_context( p_reg, &p_port_attr->port_guid ); + if( !p_context ) + continue; + + event_rec.port_index = port_index; + + if( p_port_attr->link_state >= IB_LINK_INIT ) + { + /* Notify the user of the port down. */ + event_rec.pnp_event = IB_PNP_PORT_DOWN; + status = __pnp_notify_user( p_reg, p_context, &event_rec ); + if( status != IB_SUCCESS ) + continue; + } + + /* Notify the user of the port remove. */ + event_rec.pnp_event = IB_PNP_PORT_REMOVE; + status = __pnp_notify_user( p_reg, p_context, &event_rec ); + if( status == IB_SUCCESS ) + { + /* + * Remove the port context from the registrant's + * context list. + */ + cl_fmap_remove_item( &p_reg->context_map, + &p_context->map_item ); + /* Free the context. */ + cl_free( p_context ); + } + } + } + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__pnp_process_remove_ca( + IN cl_async_proc_item_t *p_item ) +{ + al_pnp_t *p_reg; + al_pnp_ca_event_t *p_event_rec; + cl_list_item_t *p_reg_item; + al_pnp_context_t *p_context = NULL; + size_t i; + + AL_ENTER( AL_DBG_PNP ); + + p_event_rec = PARENT_STRUCT( p_item, al_pnp_ca_event_t, async_item ); + + /* Generate port remove events. */ + __pnp_process_remove_port( p_event_rec ); + + /* Walk the list of registrants for notification. */ + for( p_reg_item = cl_qlist_tail( &gp_pnp->ca_reg_list ); + p_reg_item != cl_qlist_end( &gp_pnp->ca_reg_list ); + p_reg_item = cl_qlist_prev( p_reg_item ) ) + { + p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item ); + + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_CA ); + + /* Search the context list for this CA. */ + p_context = + pnp_get_context( p_reg, &p_event_rec->p_ci_ca->p_pnp_attr->ca_guid); + + /* Make sure we found a context. */ + if( !p_context ) + continue; + + /* Notify the user. */ + if( __pnp_notify_user( p_reg, p_context, p_event_rec ) == IB_SUCCESS ) + { + /* Remove the context from the context list. */ + cl_fmap_remove_item( &p_reg->context_map, &p_context->map_item ); + + /* Deallocate the context block. */ + cl_free( p_context ); + } + } + + /* Remove the CA from the CA vector. */ + for( i = 0; i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ); i++ ) + { + if( cl_ptr_vector_get( &gp_pnp->ca_vector, i ) == + p_event_rec->p_ci_ca ) + { + cl_ptr_vector_remove( &gp_pnp->ca_vector, i ); + break; + } + } + + /* Release the reference to the CA. */ + deref_al_obj( &p_event_rec->p_ci_ca->obj ); + + /* Cleanup the event record. */ + deref_al_obj( &gp_pnp->obj ); + cl_free( p_event_rec ); + + AL_EXIT( AL_DBG_PNP ); +} + + +ib_api_status_t +pnp_ca_event( + IN al_ci_ca_t* const p_ci_ca, + IN const ib_pnp_event_t event ) +{ + ib_api_status_t status; + cl_status_t cl_status; + al_pnp_ca_event_t *p_event_rec; + ib_ca_attr_t *p_old_ca_attr; + + AL_ENTER( AL_DBG_PNP ); + + /* Allocate an event record. */ + p_event_rec = (al_pnp_ca_event_t*)cl_zalloc( sizeof(al_pnp_ca_event_t) ); + if( !p_event_rec ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to cl_zalloc al_pnp_ca_event_t (%I64d bytes).\n", + sizeof(al_pnp_ca_event_t)) ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Store the event type. */ + p_event_rec->pnp_event = event; + /* Store a pointer to the ca. */ + p_event_rec->p_ci_ca = p_ci_ca; + + switch( event ) + { + case IB_PNP_CA_ADD: + p_event_rec->async_item.pfn_callback = __pnp_process_add_ca; + + /* Reserve space for the CA in the CA vector. */ + cl_spinlock_acquire( &gp_pnp->obj.lock ); + cl_status = cl_ptr_vector_set_size( &gp_pnp->ca_vector, + cl_ptr_vector_get_size( &gp_pnp->ca_vector ) + 1 ); + cl_spinlock_release( &gp_pnp->obj.lock ); + + if( cl_status != CL_SUCCESS ) + { + cl_free( p_event_rec ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_ptr_vector_set_size failed with status %s.\n", + CL_STATUS_MSG( cl_status )) ); + return ib_convert_cl_status( cl_status ); + } + + /* Read the CA attributes required to process the event. */ + status = ci_ca_update_attr( p_ci_ca, &p_old_ca_attr ); + if( status != IB_SUCCESS ) + { + cl_spinlock_acquire( &gp_pnp->obj.lock ); + cl_status = cl_ptr_vector_set_size( &gp_pnp->ca_vector, + cl_ptr_vector_get_size( &gp_pnp->ca_vector ) - 1 ); + CL_ASSERT( cl_status == CL_SUCCESS ); + cl_spinlock_release( &gp_pnp->obj.lock ); + cl_free( p_event_rec ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ci_ca_update_attr failed.\n") ); + return status; + } + + /* Take out a reference to the CA until it is removed. */ + ref_al_obj( &p_ci_ca->obj ); + break; + + case IB_PNP_CA_REMOVE: + if( !p_event_rec->p_ci_ca->p_pnp_attr ) + { + /* The CA was never added by the PNP manager. */ + cl_free( p_event_rec ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Ignoring removal request for unknown CA.\n") ); + return IB_NOT_FOUND; + } + + p_event_rec->async_item.pfn_callback = __pnp_process_remove_ca; + break; + + default: + /* Invalid event for this function. */ + CL_ASSERT( event == IB_PNP_CA_ADD || event == IB_PNP_CA_REMOVE ); + cl_free( p_event_rec ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid event type.\n") ); + return IB_ERROR; + } + + /* Queue the event to the async processing manager. */ + ref_al_obj( &gp_pnp->obj ); + cl_async_proc_queue( gp_async_pnp_mgr, &p_event_rec->async_item ); + + AL_EXIT( AL_DBG_PNP ); + return IB_SUCCESS; +} + + +/* + * Port event handling. + */ + +/* + * Processes a port event, reporting it to clients from the first + * registrant to the last. + */ +static void +__pnp_process_port_forward( + IN al_pnp_ca_event_t* p_event_rec ) +{ + al_pnp_t *p_reg; + cl_list_item_t *p_reg_item; + al_pnp_context_t *p_context; + ib_port_attr_t *p_port_attr; + + AL_ENTER( AL_DBG_PNP ); + + /* Walk the list of registrants for notification. */ + for( p_reg_item = cl_qlist_head( &gp_pnp->port_reg_list ); + p_reg_item != cl_qlist_end( &gp_pnp->port_reg_list ); + p_reg_item = cl_qlist_next( p_reg_item ) ) + { + p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item ); + + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT ); + + p_port_attr = p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr; + p_port_attr += p_event_rec->port_index; + + p_context = pnp_get_context( p_reg, &p_port_attr->port_guid ); + if( !p_context ) + continue; + + /* Notify the user. */ + __pnp_notify_user( p_reg, p_context, p_event_rec ); + } + + AL_EXIT( AL_DBG_PNP ); +} + + +/* + * Processes a port event, reporting it to clients from the last + * registrant to the first. + */ +static void +__pnp_process_port_backward( + IN al_pnp_ca_event_t* p_event_rec ) +{ + al_pnp_t *p_reg; + cl_list_item_t *p_reg_item; + al_pnp_context_t *p_context; + ib_port_attr_t *p_port_attr; + + AL_ENTER( AL_DBG_PNP ); + + /* Walk the list of registrants for notification. */ + for( p_reg_item = cl_qlist_tail( &gp_pnp->port_reg_list ); + p_reg_item != cl_qlist_end( &gp_pnp->port_reg_list ); + p_reg_item = cl_qlist_prev( p_reg_item ) ) + { + p_reg = PARENT_STRUCT( p_reg_item, al_pnp_t, list_item ); + + CL_ASSERT( pnp_get_class( p_reg->pnp_class ) == IB_PNP_PORT ); + + p_port_attr = p_event_rec->p_ci_ca->p_pnp_attr->p_port_attr; + p_port_attr += p_event_rec->port_index; + + p_context = pnp_get_context( p_reg, &p_port_attr->port_guid ); + if( !p_context ) + continue; + + /* Notify the user. */ + __pnp_notify_user( p_reg, p_context, p_event_rec ); + } + + AL_EXIT( AL_DBG_PNP ); +} + + + +/* + * Check for port attribute changes. + */ +static void +__pnp_check_ports( + IN al_ci_ca_t* const p_ci_ca, + IN const ib_ca_attr_t* const p_old_ca_attr ) +{ + uint16_t index; + al_pnp_ca_event_t event_rec; + ib_port_attr_t *p_old_port_attr, *p_new_port_attr; + + AL_ENTER( AL_DBG_PNP ); + + /* Store the event information. */ + event_rec.p_ci_ca = p_ci_ca; + + for( event_rec.port_index = 0; + event_rec.port_index < p_ci_ca->p_pnp_attr->num_ports; + event_rec.port_index++ ) + { + p_old_port_attr = p_old_ca_attr->p_port_attr; + p_old_port_attr += event_rec.port_index; + p_new_port_attr = p_ci_ca->p_pnp_attr->p_port_attr; + p_new_port_attr += event_rec.port_index; + + /* Check the link state. */ + if( p_old_port_attr->link_state != p_new_port_attr->link_state ) + { + switch( p_new_port_attr->link_state ) + { + case IB_LINK_DOWN: + event_rec.pnp_event = IB_PNP_PORT_DOWN; + __pnp_process_port_backward( &event_rec ); + break; + + case IB_LINK_INIT: + if( p_old_port_attr->link_state > IB_LINK_INIT ) + { + /* Missed the down event. */ + event_rec.pnp_event = IB_PNP_PORT_DOWN; + __pnp_process_port_backward( &event_rec ); + } + event_rec.pnp_event = IB_PNP_PORT_INIT; + __pnp_process_port_forward( &event_rec ); + break; + + case IB_LINK_ARMED: + if( p_old_port_attr->link_state > IB_LINK_ARMED ) + { + /* Missed the down and init events. */ + event_rec.pnp_event = IB_PNP_PORT_DOWN; + __pnp_process_port_backward( &event_rec ); + event_rec.pnp_event = IB_PNP_PORT_INIT; + __pnp_process_port_forward( &event_rec ); + } + event_rec.pnp_event = IB_PNP_PORT_ARMED; + __pnp_process_port_forward( &event_rec ); + break; + + case IB_LINK_ACTIVE: + case IB_LINK_ACT_DEFER: + if( p_old_port_attr->link_state == IB_LINK_DOWN ) + { + /* Missed the init and armed event. */ + event_rec.pnp_event = IB_PNP_PORT_INIT; + __pnp_process_port_forward( &event_rec ); + event_rec.pnp_event = IB_PNP_PORT_ARMED; + __pnp_process_port_forward( &event_rec ); + } + if( p_old_port_attr->link_state < IB_LINK_ACTIVE ) + { + event_rec.pnp_event = IB_PNP_PORT_ACTIVE; + __pnp_process_port_forward( &event_rec ); + } + break; + + default: + break; + } + } + + /* + * Check for P_Key and GID table changes. + * The tables are only valid in the armed or active states. + */ + if( ( (p_old_port_attr->link_state == IB_LINK_ARMED) || + (p_old_port_attr->link_state == IB_LINK_ACTIVE) ) + && + ( (p_new_port_attr->link_state == IB_LINK_ARMED) || + (p_new_port_attr->link_state == IB_LINK_ACTIVE) ) ) + { + /* A different number of P_Keys indicates a change.*/ + if( p_old_port_attr->num_pkeys != p_new_port_attr->num_pkeys ) + { + event_rec.pnp_event = IB_PNP_PKEY_CHANGE; + __pnp_process_port_forward( &event_rec ); + } + else + { + /* Same number of P_Keys - compare the table contents. */ + for( index = 0; index < p_old_port_attr->num_pkeys; index++ ) + { + if( p_old_port_attr->p_pkey_table[index] != + p_new_port_attr->p_pkey_table[index] ) + { + event_rec.pnp_event = IB_PNP_PKEY_CHANGE; + __pnp_process_port_forward( &event_rec ); + break; + } + } + } + + /* A different number of GIDs indicates a change.*/ + if( p_old_port_attr->num_gids != p_new_port_attr->num_gids ) + { + event_rec.pnp_event = IB_PNP_GID_CHANGE; + __pnp_process_port_forward( &event_rec ); + } + else + { + /* Same number of GIDs - compare the table contents. */ + for( index = 0; index < p_old_port_attr->num_gids; index++ ) + { + if( cl_memcmp( p_old_port_attr->p_gid_table[index].raw, + p_new_port_attr->p_gid_table[index].raw, + sizeof( ib_gid_t ) ) ) + { + event_rec.pnp_event = IB_PNP_GID_CHANGE; + __pnp_process_port_forward( &event_rec ); + break; + } + } + } + } + + /* Check for LID change. */ + if( (p_old_port_attr->lid != p_new_port_attr->lid) || + (p_old_port_attr->lmc != p_new_port_attr->lmc) ) + { + event_rec.pnp_event = IB_PNP_LID_CHANGE; + __pnp_process_port_forward( &event_rec ); + } + /* Check for SM related changes. */ + if( (p_old_port_attr->sm_lid != p_new_port_attr->sm_lid) || + (p_old_port_attr->sm_sl != p_new_port_attr->sm_sl) ) + { + event_rec.pnp_event = IB_PNP_SM_CHANGE; + __pnp_process_port_forward( &event_rec ); + } + /* Check for subnet timeout change. */ + if( p_old_port_attr->subnet_timeout != + p_new_port_attr->subnet_timeout ) + { + event_rec.pnp_event = IB_PNP_SUBNET_TIMEOUT_CHANGE; + __pnp_process_port_forward( &event_rec ); + } + } +} + + + +static boolean_t +__pnp_cmp_attr( + IN ib_ca_attr_t *p_attr_1, + IN ib_ca_attr_t *p_attr_2 + ) +{ + uint8_t port_index; + ib_port_attr_t* p_port_attr_1; + ib_port_attr_t* p_port_attr_2; + + CL_ASSERT( p_attr_1 && p_attr_2 ); + + for( port_index = 0; + port_index < p_attr_1->num_ports; + port_index++ ) + { + /* Initialize pointers to the port attributes. */ + p_port_attr_1 = &p_attr_1->p_port_attr[port_index]; + p_port_attr_2 = &p_attr_2->p_port_attr[port_index]; + + CL_ASSERT( p_port_attr_1->port_guid == p_port_attr_2->port_guid ); + + if( cl_memcmp( p_port_attr_1, p_port_attr_2, + offsetof( ib_port_attr_t, p_gid_table ) ) != 0 ) + { + return FALSE; + } + } + + return TRUE; +} + + + +static void +__pnp_check_events( + IN cl_async_proc_item_t* p_item ) +{ + al_ci_ca_t *p_ci_ca; + size_t i; + uint32_t attr_size; + ib_ca_attr_t *p_old_ca_attr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_PNP ); + + UNUSED_PARAM( p_item ); + CL_ASSERT( gp_pnp ); + + /* Walk all known CAs. */ + for( i = 0; i < cl_ptr_vector_get_size( &gp_pnp->ca_vector ); i++ ) + { + p_ci_ca = (al_ci_ca_t*)cl_ptr_vector_get( &gp_pnp->ca_vector, i ); + + /* Check if the CA was just added to our list but is not ready. */ + if( !p_ci_ca ) + continue; + + attr_size = p_ci_ca->p_pnp_attr->size; + status = ib_query_ca( p_ci_ca->h_ca, p_ci_ca->p_user_attr, &attr_size ); + + /* Report changes if there is an attribute size difference. */ + if( ( attr_size != p_ci_ca->p_pnp_attr->size ) || + !__pnp_cmp_attr( p_ci_ca->p_pnp_attr, p_ci_ca->p_user_attr ) ) + { + status = ci_ca_update_attr( p_ci_ca, &p_old_ca_attr ); + if( status == IB_SUCCESS ) + { + /* Check port attributes and report changes. */ + __pnp_check_ports( p_ci_ca, p_old_ca_attr ); + + /* Free the old CA attributes. */ + cl_free( p_old_ca_attr ); + } + else + { + /* + * Could not get new attribute buffers. + * Skip this event - it should be picked up on the next check. + */ + continue; + } + } + } + + /* Dereference the PnP Manager. */ + deref_al_obj( &gp_pnp->obj ); + gp_pnp->async_item_is_busy = FALSE; + + AL_EXIT( AL_DBG_PNP ); +} + + + +/* + * Check and report PnP events. + */ +void +pnp_poll( + void ) +{ + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( gp_pnp ); + + /* Determine if the PnP manager asynchronous processing item is busy. */ + cl_spinlock_acquire( &gp_pnp->obj.lock ); + + if( gp_pnp->async_item_is_busy ) + { + cl_spinlock_release( &gp_pnp->obj.lock ); + return; + } + + gp_pnp->async_item_is_busy = TRUE; + + cl_spinlock_release( &gp_pnp->obj.lock ); + + /* Reference the PnP Manager. */ + ref_al_obj( &gp_pnp->obj ); + + /* Queue the request to check for PnP events. */ + cl_async_proc_queue( gp_async_pnp_mgr, &gp_pnp->async_item ); + + AL_EXIT( AL_DBG_PNP ); +} + + + +static void +__pnp_process_ca_change( + IN cl_async_proc_item_t* p_item ) +{ + al_pnp_ca_change_t *p_pnp_ca_change; + ib_ca_attr_t *p_old_ca_attr; + al_ci_ca_t *p_ci_ca; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( p_item ); + CL_ASSERT( gp_pnp ); + + p_pnp_ca_change = PARENT_STRUCT( p_item, al_pnp_ca_change_t, async_item ); + + p_ci_ca = p_pnp_ca_change->p_ci_ca; + + /* + * Prevent readers of the CA attributes from accessing them while + * we are updating the pointers. + */ + ci_ca_excl_lock_attr( p_ci_ca ); + + /* Swap the old and new CA attributes. */ + p_old_ca_attr = p_ci_ca->p_pnp_attr; + p_ci_ca->p_pnp_attr = p_pnp_ca_change->p_new_ca_attr; + p_ci_ca->p_user_attr = (ib_ca_attr_t*)(((uint8_t*)p_ci_ca->p_pnp_attr) + + p_ci_ca->p_pnp_attr->size); + ci_ca_unlock_attr( p_ci_ca ); + + /* Report changes. */ + __pnp_check_ports( p_ci_ca, p_old_ca_attr ); + + /* Free the old CA attributes. */ + cl_free( p_old_ca_attr ); + + /* Dereference the PnP Manager. */ + deref_al_obj( &gp_pnp->obj ); + + AL_EXIT( AL_DBG_PNP ); +} + + + +/* + * Called by user mode AL to report a CA attribute change. + */ +ib_api_status_t +pnp_ca_change( + IN al_ci_ca_t* const p_ci_ca, + IN const ib_ca_attr_t* p_ca_attr ) +{ + ib_ca_attr_t* p_new_ca_attr; + al_pnp_ca_change_t* p_pnp_ca_change; + size_t size; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( p_ci_ca ); + CL_ASSERT( p_ca_attr ); + + /* + * Allocate the new CA attributes buffer. + * Double the buffer size for PnP and user reporting halves. + * Also include the CA change event structure in the allocation. + */ + size = ( p_ca_attr->size * 2 ) + sizeof( al_pnp_ca_change_t ); + p_new_ca_attr = (ib_ca_attr_t*)cl_zalloc( size ); + if( !p_new_ca_attr ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR,AL_DBG_PNP, + ("Unable to allocate buffer for changed CA attributes\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Copy the attributes. */ + ib_copy_ca_attr( p_new_ca_attr, p_ca_attr ); + + /* Initialize a pointer to the CA change event structure. */ + p_pnp_ca_change = (al_pnp_ca_change_t*) + (((uint8_t*)p_new_ca_attr) + ( p_ca_attr->size * 2 )); + + /* Initialize the CA change event strucuture. */ + p_pnp_ca_change->async_item.pfn_callback = __pnp_process_ca_change; + p_pnp_ca_change->p_ci_ca = p_ci_ca; + p_pnp_ca_change->p_new_ca_attr = p_new_ca_attr; + + /* Reference the PnP Manager. */ + ref_al_obj( &gp_pnp->obj ); + + /* Queue the CA change event. */ + cl_async_proc_queue( gp_async_pnp_mgr, &p_pnp_ca_change->async_item ); + + AL_EXIT( AL_DBG_PNP ); + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_reject_ioc( + IN const ib_al_handle_t h_al, + IN const ib_pnp_handle_t h_event ) +{ + AL_ENTER( AL_DBG_PNP ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !h_event ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + AL_EXIT( AL_DBG_PNP ); + return IB_UNSUPPORTED; +} diff --git a/branches/Ndi/core/al/kernel/al_proxy.c b/branches/Ndi/core/al/kernel/al_proxy.c new file mode 100644 index 00000000..d6ef240e --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_proxy.c @@ -0,0 +1,1288 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include "al.h" +#include "al_mr.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_proxy.tmh" +#endif +#include "al_dev.h" +#include "al_ci_ca.h" +#include "al_mgr.h" +#include "al_pnp.h" +#include "al_proxy.h" +#include "ib_common.h" + + + +/* + * Acquire an object used to queue callbacks. + */ +al_proxy_cb_info_t* +proxy_cb_get( + IN al_dev_open_context_t *p_context ) +{ + al_proxy_cb_info_t *p_cb_info; + + if( !p_context ) + return NULL; + + cl_spinlock_acquire( &p_context->cb_pool_lock ); + p_cb_info = (al_proxy_cb_info_t*)cl_qpool_get( &p_context->cb_pool ); + cl_spinlock_release( &p_context->cb_pool_lock ); + + if( p_cb_info ) + p_cb_info->p_context = p_context; + + return p_cb_info; +} + + + +/* + * Release an object used to report callbacks. + */ +void +proxy_cb_put( + IN al_proxy_cb_info_t *p_cb_info ) +{ + al_dev_open_context_t *p_context; + + if( !p_cb_info ) + return; + + p_context = p_cb_info->p_context; + + p_cb_info->reported = FALSE; + p_cb_info->p_al_obj = NULL; + + cl_spinlock_acquire( &p_context->cb_pool_lock ); + cl_qpool_put( &p_context->cb_pool, &p_cb_info->pool_item ); + cl_spinlock_release( &p_context->cb_pool_lock ); +} + + + +/* + * Process the ioctl UAL_REG_SHMID: + */ +static +cl_status_t +proxy_reg_shmid( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_reg_shmid_ioctl_t *p_ioctl = + (ual_reg_shmid_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_mr_handle_t h_mr; + uint64_t vaddr; + net32_t lkey, rkey; + + AL_ENTER( AL_DBG_DEV ); + + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(ual_reg_shmid_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) < sizeof(ual_reg_shmid_ioctl_t) ) + { + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + /* Validate PD handle */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd ) + { + cl_memclr( &p_ioctl->out, sizeof(p_ioctl->out) ); + p_ioctl->out.status = IB_INVALID_PD_HANDLE; + goto done; + } + + /* Validate input region size. */ + if( p_ioctl->in.mr_create.length > ~((size_t)0) ) + { + cl_memclr( &p_ioctl->out, sizeof(p_ioctl->out) ); + p_ioctl->out.status = IB_INVALID_SETTING; + goto done; + } + + p_ioctl->out.status = reg_shmid( + h_pd, + p_ioctl->in.shmid, + &p_ioctl->in.mr_create, + &vaddr, + &lkey, + &rkey, + &h_mr ); + + if( p_ioctl->out.status == IB_SUCCESS ) + { + /* We put the kernel al handle itself in the al_list for the process */ + p_ioctl->out.vaddr = vaddr; + p_ioctl->out.lkey = lkey; + p_ioctl->out.rkey = rkey; + p_ioctl->out.h_mr = h_mr->obj.hdl; + h_mr->obj.hdl_valid = TRUE; + deref_al_obj( &h_mr->obj ); + } + else + { + /* release the memory handle allocated */ + p_ioctl->out.vaddr = 0; + p_ioctl->out.lkey = 0; + p_ioctl->out.rkey = 0; + p_ioctl->out.h_mr = AL_INVALID_HANDLE; + } + +done: + *p_ret_bytes = sizeof(p_ioctl->out); + AL_EXIT( AL_DBG_DEV ); + return CL_SUCCESS; +} + + +/* + * Retrieve a callback record from the appropriate callback list + * and fill the ioctl buffer. + * + * If no callback record is available, queue the ioctl buffer. + * Queued ioctl buffer will put the calling process to sleep and complete + * when complete when a callback record is available. + */ +static cl_status_t +proxy_queue_ioctl_buf( + IN uintn_t cb_type, + IN al_dev_open_context_t *p_context, + IN cl_ioctl_handle_t h_ioctl ) +{ + cl_qlist_t *p_cb_list; + al_proxy_cb_info_t *p_cb_info; + cl_ioctl_handle_t *ph_ioctl; + uintn_t ioctl_size; + + AL_ENTER( AL_DBG_DEV ); + + /* Set up the appropriate callback list. */ + switch( cb_type ) + { + case UAL_GET_CM_CB_INFO: + p_cb_list = &p_context->cm_cb_list; + ph_ioctl = &p_context->h_cm_ioctl; + /* TODO: Use output size only. */ + ioctl_size = sizeof( cm_cb_ioctl_info_t ); + break; + + case UAL_GET_COMP_CB_INFO: + p_cb_list = &p_context->comp_cb_list; + ph_ioctl = &p_context->h_comp_ioctl; + /* TODO: Use output size only. */ + ioctl_size = sizeof( comp_cb_ioctl_info_t ); + break; + + case UAL_GET_MISC_CB_INFO: + p_cb_list = &p_context->misc_cb_list; + ph_ioctl = &p_context->h_misc_ioctl; + /* TODO: Use output size only. */ + ioctl_size = sizeof( misc_cb_ioctl_info_t ); + break; + + default: + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + /* Process queued callbacks. */ + cl_spinlock_acquire( &p_context->cb_lock ); + while( !cl_is_qlist_empty( p_cb_list ) ) + { + p_cb_info = (al_proxy_cb_info_t*)cl_qlist_head( p_cb_list ); + + /* Check to see if we've already reported the callback. */ + if( !p_cb_info->reported ) + { + p_cb_info->reported = TRUE; + + /* Return the callback to the user. */ + CL_ASSERT( cl_ioctl_out_size( h_ioctl ) >= ioctl_size ); + cl_memcpy( + cl_ioctl_out_buf( h_ioctl ), &p_cb_info->cb_type, ioctl_size ); + cl_ioctl_complete( h_ioctl, CL_SUCCESS, ioctl_size ); + cl_spinlock_release( &p_context->cb_lock ); + AL_EXIT( AL_DBG_DEV ); + return CL_COMPLETED; + } + if( p_cb_info->p_al_obj ) + deref_al_obj( p_cb_info->p_al_obj ); + + cl_qlist_remove_head( p_cb_list ); + proxy_cb_put( p_cb_info ); + } + + /* There are no callbacks to report. Mark this IOCTL as pending. */ + CL_ASSERT( !(*ph_ioctl) ); + + /* If we're closing down, complete the IOCTL with a canceled status. */ + if( p_context->closing ) + { + cl_spinlock_release( &p_context->cb_lock ); + AL_EXIT( AL_DBG_DEV ); + return CL_CANCELED; + } + + *ph_ioctl = h_ioctl; + /* Set the cancel routine for this IRP so the app can abort. */ +#pragma warning(push, 3) + IoSetCancelRoutine( h_ioctl, al_dev_cancel_io ); +#pragma warning(pop) + /* If returning pending, the IRP must be marked as such. */ + IoMarkIrpPending( h_ioctl ); + + /* Ref the context until the IOCTL is either completed or cancelled. */ + proxy_context_ref( p_context ); + cl_spinlock_release( &p_context->cb_lock ); + + AL_EXIT( AL_DBG_DEV ); + return CL_PENDING; +} + + + +/* + * Process the ioctl UAL_GET_CM_CB_INFO: + * Get a CM callback record from the queue of CM callback records + */ +static cl_status_t +proxy_get_cm_cb( + IN cl_ioctl_handle_t h_ioctl ) +{ + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_DEV ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext; + if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_CM ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid file object type for request: %016I64x\n", + (LONG_PTR)p_io_stack->FileObject->FsContext2) ); + return CL_INVALID_PARAMETER; + } + + /* Check the size of the ioctl */ + if( !p_context || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_out_size( h_ioctl ) != sizeof(cm_cb_ioctl_info_t) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("No output buffer, or buffer too small.\n") ); + return CL_INVALID_PARAMETER; + } + + cl_status = proxy_queue_ioctl_buf( UAL_GET_CM_CB_INFO, + p_context, h_ioctl ); + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} + + + +/* + * Process the ioctl UAL_GET_COMP_CB_INFO: + * Get a completion callback record from the queue of CM callback records + */ +static cl_status_t +proxy_get_comp_cb( + IN cl_ioctl_handle_t h_ioctl ) +{ + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_DEV ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext; + if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_H_CQ ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid file object type for request: %016I64x\n", + (LONG_PTR)p_io_stack->FileObject->FsContext2) ); + return CL_INVALID_PARAMETER; + } + + /* Check the size of the ioctl */ + if( !p_context || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_out_size( h_ioctl ) != sizeof(comp_cb_ioctl_info_t) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("No output buffer, or buffer too small.\n") ); + return CL_INVALID_PARAMETER; + } + + cl_status = proxy_queue_ioctl_buf( UAL_GET_COMP_CB_INFO, + p_context, h_ioctl ); + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} + + + +/* + * Process the ioctl UAL_GET_MISC_CB_INFO: + * Get a miscellaneous callback record from the queue of CM callback records + */ +static cl_status_t +proxy_get_misc_cb( + IN cl_ioctl_handle_t h_ioctl ) +{ + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_DEV ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext; + if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_AL_MGR ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid file object type for request: %016I64x\n", + (LONG_PTR)p_io_stack->FileObject->FsContext2) ); + return CL_INVALID_PARAMETER; + } + + /* Check the size of the ioctl */ + if( !p_context || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_out_size( h_ioctl ) != sizeof(misc_cb_ioctl_info_t) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("No output buffer, or buffer too small.\n") ); + return CL_INVALID_PARAMETER; + } + + cl_status = proxy_queue_ioctl_buf( UAL_GET_MISC_CB_INFO, + p_context, h_ioctl ); + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} + + + +/* + * Process a PnP callback for a CA. + */ +ib_api_status_t +proxy_pnp_ca_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + misc_cb_ioctl_info_t misc_cb_info; + misc_cb_ioctl_rec_t *p_misc_rec = &misc_cb_info.ioctl_rec; + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_PROXY_CB ); + + p_context = p_pnp_rec->pnp_context; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + return IB_ERROR; + } + + /* Initialize the PnP callback information to return to user-mode. */ + cl_memclr( &misc_cb_info, sizeof(misc_cb_info) ); + misc_cb_info.rec_type = PNP_REC; + p_misc_rec->pnp_cb_ioctl_rec.pnp_event = p_pnp_rec->pnp_event; + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_CA_ADD: + case IB_PNP_CA_REMOVE: + /* Queue the add/remove pnp record */ + p_misc_rec->pnp_cb_ioctl_rec.pnp_info.ca.ca_guid = p_pnp_rec->guid; + proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &misc_cb_info, + NULL ); + break; + + default: + /* We only handle CA adds and removals. */ + break; + } + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_PROXY_CB ); + return IB_SUCCESS; +} + + + +/* + * Process a PnP callback for a port. + */ +ib_api_status_t +proxy_pnp_port_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + ib_pnp_port_rec_t *p_port_rec; + misc_cb_ioctl_info_t misc_cb_info; + misc_cb_ioctl_rec_t *p_misc_rec = &misc_cb_info.ioctl_rec; + al_dev_open_context_t *p_context; + ib_ca_attr_t *p_ca_attr; + uint64_t hdl; + + AL_ENTER( AL_DBG_PROXY_CB ); + + p_context = p_pnp_rec->pnp_context; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + return IB_ERROR; + } + + p_port_rec = (ib_pnp_port_rec_t*)p_pnp_rec; + + /* Initialize the PnP callback information to return to user-mode. */ + cl_memclr( &misc_cb_info, sizeof(misc_cb_info) ); + misc_cb_info.rec_type = PNP_REC; + p_misc_rec->pnp_cb_ioctl_rec.pnp_event = p_pnp_rec->pnp_event; + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_PORT_ADD: + case IB_PNP_PORT_REMOVE: + /* Port add/remove will be generated automatically by uAL. */ + break; + + case IB_PNP_REG_COMPLETE: + /* + * Once our registration for ports is complete, report this to the + * user-mode library. This indicates to the that the current + * system state has been reported. + */ + proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &misc_cb_info, + NULL ); + break; + + default: + /* Allocate space for the CA attributes. */ + p_ca_attr = cl_zalloc( p_port_rec->p_ca_attr->size ); + if( !p_ca_attr ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_malloc( %d ) failed.\n", p_port_rec->p_ca_attr->size) ); + break; + } + + ib_copy_ca_attr( p_ca_attr, p_port_rec->p_ca_attr ); + + hdl = al_hdl_lock_insert( + p_context->h_al, p_ca_attr, AL_OBJ_TYPE_H_CA_ATTR ); + + if( hdl == AL_INVALID_HANDLE ) + { + cl_free( p_ca_attr ); + break; + } + + p_misc_rec->pnp_cb_ioctl_rec.pnp_info.ca.ca_guid = + p_port_rec->p_ca_attr->ca_guid; + p_misc_rec->pnp_cb_ioctl_rec.pnp_info.ca.size = + p_port_rec->p_ca_attr->size; + p_misc_rec->pnp_cb_ioctl_rec.pnp_info.ca.h_ca_attr = hdl; + + proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &misc_cb_info, + NULL ); + break; + } + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_PROXY_CB ); + return IB_SUCCESS; +} + + + +cl_status_t +proxy_get_ca_attr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_ca_attr_info_ioctl_t *p_ioctl; + ib_ca_attr_t *p_src; + + AL_ENTER( AL_DBG_DEV ); + + /* Check the size of the ioctl */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) < sizeof(p_ioctl->out) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("invalid buffer size\n") ); + return CL_INVALID_PARAMETER; + } + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_ca_attr_info_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + p_src = (ib_ca_attr_t*)al_hdl_get( + p_context->h_al, p_ioctl->in.h_ca_attr, AL_OBJ_TYPE_H_CA_ATTR ); + if( !p_src ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("invalid attr handle\n") ); + return CL_INVALID_PARAMETER; + } + + __try + { + ProbeForWrite( p_ioctl->in.p_ca_attr, p_src->size, sizeof(void*) ); + ib_copy_ca_attr( p_ioctl->in.p_ca_attr, p_src ); + p_ioctl->out.status = IB_SUCCESS; + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + p_ioctl->out.status = IB_INVALID_PERMISSION; + } + + cl_free(p_src); + + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_DEV ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_BIND_SA: + * Get a completion callback record from the queue of CM callback records + */ +static cl_status_t +proxy_bind_file( + IN cl_ioctl_handle_t h_ioctl, + IN const uint32_t type ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + al_dev_open_context_t *p_context; + ual_bind_file_ioctl_t *p_ioctl; + FILE_OBJECT *p_file_obj; + + AL_ENTER( AL_DBG_DEV ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = (al_dev_open_context_t*)p_io_stack->FileObject->FsContext; + + /* Check the size of the ioctl */ + if( !p_context || + !cl_ioctl_in_buf( h_ioctl ) || cl_ioctl_out_size( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(ual_bind_file_ioctl_t) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("No input buffer, or buffer too small.\n") ); + return CL_INVALID_PARAMETER; + } + + p_ioctl = cl_ioctl_in_buf( h_ioctl ); + + status = ObReferenceObjectByHandle( p_ioctl->h_file, + READ_CONTROL, *IoFileObjectType, h_ioctl->RequestorMode, + &p_file_obj, NULL ); + if( !NT_SUCCESS(status) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ObReferenceObjectByHandle returned 0x%08X\n", status) ); + return CL_INVALID_PARAMETER; + } + + p_file_obj->FsContext = p_context; + p_file_obj->FsContext2 = (void*)(ULONG_PTR)type; + + ObDereferenceObject( p_file_obj ); + + AL_EXIT( AL_DBG_DEV ); + return CL_SUCCESS; +} + + + +cl_status_t +proxy_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + cl_status_t cl_status; + + AL_ENTER( AL_DBG_DEV ); + + UNUSED_PARAM( p_ret_bytes ); + + switch( cl_ioctl_ctl_code( h_ioctl ) ) + { + case UAL_GET_CM_CB_INFO: + cl_status = proxy_get_cm_cb( h_ioctl ); + break; + case UAL_GET_MISC_CB_INFO: + cl_status = proxy_get_misc_cb( h_ioctl ); + break; + case UAL_GET_COMP_CB_INFO: + cl_status = proxy_get_comp_cb( h_ioctl ); + break; + case UAL_BIND: + cl_status = al_dev_open( h_ioctl ); + break; + case UAL_BIND_SA: + cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_SA_REQ_SVC ); + break; + case UAL_BIND_DESTROY: + case UAL_BIND_PNP: + cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_PNP_MGR ); + break; + case UAL_BIND_CM: + cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_CM ); + break; + case UAL_BIND_CQ: + cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_H_CQ ); + break; + case UAL_BIND_MISC: + cl_status = proxy_bind_file( h_ioctl, AL_OBJ_TYPE_AL_MGR ); + break; + default: + cl_status = CL_INVALID_PARAMETER; + break; + } + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} + + +static ib_api_status_t +__proxy_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + proxy_pnp_evt_t *p_evt; + uint32_t rec_size; + proxy_pnp_recs_t *p_evt_rec, *p_rec; + IRP *p_irp; + IO_STACK_LOCATION *p_io_stack; + ual_rearm_pnp_ioctl_out_t *p_ioctl; + al_dev_open_context_t *p_context; + uint64_t hdl; + cl_status_t cl_status; + ib_api_status_t ret_status; + + AL_ENTER( AL_DBG_PNP ); + + p_rec = (proxy_pnp_recs_t*)p_pnp_rec; + + /* + * If an add event, return error to suppress all further + * events for this target. + */ + if( p_pnp_rec->pnp_event & IB_PNP_EVENT_ADD ) + ret_status = IB_ERROR; + else + ret_status = IB_SUCCESS; + + p_context = p_pnp_rec->pnp_context; + ASSERT( p_context ); + + /* Must take and release mutex to synchronize with registration. */ + cl_mutex_acquire( &p_context->pnp_mutex ); + cl_mutex_release( &p_context->pnp_mutex ); + + p_irp = InterlockedExchangePointer( &p_pnp_rec->h_pnp->p_rearm_irp, NULL ); + if( !p_irp ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("No rearm IRP queued for PnP event.\n") ); + return ret_status; + } + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + p_context = p_io_stack->FileObject->FsContext; + ASSERT( p_context ); +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, NULL ); +#pragma warning(pop) + switch( pnp_get_class( p_pnp_rec->pnp_event ) ) + { + case IB_PNP_CA: + if( p_pnp_rec->pnp_event == IB_PNP_CA_REMOVE ) + rec_size = sizeof(ib_pnp_ca_rec_t); + else + rec_size = sizeof(ib_pnp_ca_rec_t) + p_rec->ca.p_ca_attr->size; + break; + case IB_PNP_PORT: + if( p_pnp_rec->pnp_event == IB_PNP_PORT_REMOVE ) + rec_size = sizeof(ib_pnp_port_rec_t); + else + rec_size = sizeof(ib_pnp_port_rec_t) + p_rec->port.p_ca_attr->size; + break; + case IB_PNP_IOU: + rec_size = sizeof(ib_pnp_iou_rec_t); + break; + case IB_PNP_IOC: + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_IOC_PATH_ADD: + case IB_PNP_IOC_PATH_REMOVE: + rec_size = sizeof( ib_pnp_ioc_path_rec_t); + break; + default: + rec_size = sizeof( ib_pnp_ioc_rec_t ) + (sizeof(ib_svc_entry_t) * + (p_rec->ioc.info.profile.num_svc_entries - 1)); + } + break; + default: + /* The REG_COMPLETE event is not associated with any class. */ + rec_size = sizeof( ib_pnp_rec_t ); + break; + } + + p_evt = cl_zalloc( rec_size + sizeof(proxy_pnp_evt_t) ); + if( !p_evt ) + return ret_status; + + /* Note that cl_event_init cannot fail in kernel-mode. */ + cl_event_init( &p_evt->event, FALSE ); + + p_evt->rec_size = rec_size; + + p_evt_rec = (proxy_pnp_recs_t*)(p_evt + 1); + + /* Copy the PnP event data. */ + switch( pnp_get_class( p_pnp_rec->pnp_event ) ) + { + case IB_PNP_CA: + cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_ca_rec_t) ); + if( p_pnp_rec->pnp_event == IB_PNP_CA_REMOVE ) + { + p_evt_rec->ca.p_ca_attr = NULL; + } + else + { + p_evt_rec->ca.p_ca_attr = (ib_ca_attr_t*)(&p_evt_rec->ca + 1); + ib_copy_ca_attr( p_evt_rec->ca.p_ca_attr, p_rec->ca.p_ca_attr ); + } + break; + case IB_PNP_PORT: + cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_port_rec_t) ); + if( p_pnp_rec->pnp_event == IB_PNP_PORT_REMOVE ) + { + p_evt_rec->port.p_ca_attr = NULL; + p_evt_rec->port.p_port_attr = NULL; + } + else + { + p_evt_rec->port.p_ca_attr = (ib_ca_attr_t*)(&p_evt_rec->port + 1); + ib_copy_ca_attr( + p_evt_rec->port.p_ca_attr, p_rec->port.p_ca_attr ); + p_evt_rec->port.p_port_attr = &p_evt_rec->port.p_ca_attr-> + p_port_attr[p_rec->port.p_port_attr->port_num - 1]; + } + break; + case IB_PNP_IOU: + cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_iou_rec_t) ); + break; + case IB_PNP_IOC: + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_IOC_PATH_ADD: + case IB_PNP_IOC_PATH_REMOVE: + cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_ioc_path_rec_t) ); + break; + default: + cl_memcpy( p_evt_rec, p_pnp_rec, sizeof(ib_pnp_ioc_rec_t) ); + } + break; + default: + p_evt_rec->pnp = *p_pnp_rec; + break; + } + + p_evt_rec->pnp.h_pnp = (ib_pnp_handle_t)p_pnp_rec->h_pnp->obj.hdl; + p_pnp_rec->h_pnp->obj.hdl_valid = TRUE; + + hdl = + al_hdl_lock_insert( p_context->h_al, p_evt, AL_OBJ_TYPE_H_PNP_EVENT ); + if( hdl == AL_INVALID_HANDLE ) + { + cl_free( p_evt ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to insert PnP event in handle map.\n") ); + return ret_status; + } + + p_ioctl = cl_ioctl_out_buf( p_irp ); + p_ioctl->evt_hdl = hdl; + p_ioctl->evt_size = rec_size; + + /* Hold callback lock to synchronize with registration. */ + cl_spinlock_acquire( &p_context->cb_lock ); + p_irp->IoStatus.Status = STATUS_SUCCESS; + p_irp->IoStatus.Information = sizeof(ual_rearm_pnp_ioctl_out_t); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + cl_spinlock_release( &p_context->cb_lock ); + + /* Now wait on the event. */ + cl_status = cl_event_wait_on( &p_evt->event, PROXY_PNP_TIMEOUT_US, FALSE ); + if( cl_status == CL_SUCCESS ) + { + /* Update the event context with the user's requested value. */ + p_pnp_rec->context = p_evt->evt_context; + /* Forward the user's status. */ + ret_status = p_evt->evt_status; + } + cl_spinlock_acquire( &p_context->h_al->obj.lock ); + al_hdl_free( p_context->h_al, hdl ); + cl_spinlock_release( &p_context->h_al->obj.lock ); + cl_event_destroy( &p_evt->event ); + cl_free( p_evt ); + + AL_EXIT( AL_DBG_PNP ); + return ret_status; +} + + +static void +__cancel_rearm_pnp( + IN DEVICE_OBJECT* p_dev_obj, + IN IRP* p_irp ) +{ + al_dev_open_context_t *p_context; + PIO_STACK_LOCATION p_io_stack; + uint64_t hdl; + al_pnp_t *h_pnp; + + AL_ENTER( AL_DBG_DEV ); + + UNUSED_PARAM( p_dev_obj ); + + /* Get the stack location. */ + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext; + ASSERT( p_context ); + + hdl = (size_t)InterlockedExchangePointer( + &p_irp->Tail.Overlay.DriverContext[0], NULL ); + if( hdl != AL_INVALID_HANDLE ) + { + h_pnp = (al_pnp_t*) + al_hdl_ref( p_context->h_al, hdl, AL_OBJ_TYPE_H_PNP ); + if( h_pnp ) + { + if( InterlockedExchangePointer( &h_pnp->p_rearm_irp, NULL ) == + p_irp ) + { +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, NULL ); +#pragma warning(pop) + /* Complete the IRP. */ + p_irp->IoStatus.Status = STATUS_CANCELLED; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + } + deref_al_obj( &h_pnp->obj ); + } + } + + IoReleaseCancelSpinLock( p_irp->CancelIrql ); +} + + +/* + * Process the ioctl UAL_REG_PNP: + */ +static cl_status_t +proxy_reg_pnp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl ) +{ + ual_reg_pnp_ioctl_in_t *p_ioctl; + al_dev_open_context_t *p_context; + IO_STACK_LOCATION *p_io_stack; + ib_pnp_req_t pnp_req; + ib_api_status_t status, *p_user_status; + uint64_t *p_user_hdl; + ib_pnp_handle_t h_pnp; + cl_status_t cl_status; + KEVENT *p_sync_event; + NTSTATUS nt_status; + + AL_ENTER( AL_DBG_PNP ); + + p_context = p_open_context; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_PNP_MGR ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid file object type for request: %016I64x\n", + (LONG_PTR)p_io_stack->FileObject->FsContext2) ); + return CL_INVALID_PARAMETER; + } + + if( cl_ioctl_in_size( h_ioctl ) < sizeof(ual_reg_pnp_ioctl_in_t) || + cl_ioctl_out_size( h_ioctl ) < sizeof(ual_rearm_pnp_ioctl_out_t) ) + { + AL_EXIT( AL_DBG_PNP ); + return CL_INVALID_PARAMETER; + } + + p_ioctl = cl_ioctl_in_buf( h_ioctl ); + + pnp_req.pnp_class = p_ioctl->pnp_class; + pnp_req.pnp_context = p_open_context; + pnp_req.pfn_pnp_cb = __proxy_pnp_cb; + + p_user_status = p_ioctl->p_status; + p_user_hdl = p_ioctl->p_hdl; + + if( pnp_get_flag( p_ioctl->pnp_class ) & IB_PNP_FLAG_REG_SYNC ) + { + nt_status = ObReferenceObjectByHandle( p_ioctl->sync_event, + STANDARD_RIGHTS_ALL, *ExEventObjectType, h_ioctl->RequestorMode, + (PVOID*)&p_sync_event, NULL ); + if( !NT_SUCCESS( nt_status ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid sync event handle\n") ); + return CL_INVALID_PARAMETER; + } + } + else + { + p_sync_event = NULL; + } + + cl_mutex_acquire( &p_context->pnp_mutex ); + status = al_reg_pnp( p_context->h_al, &pnp_req, p_sync_event, &h_pnp ); + if( status == IB_SUCCESS ) + { + CL_ASSERT( h_pnp ); + h_pnp->p_rearm_irp = h_ioctl; + + h_ioctl->Tail.Overlay.DriverContext[0] = (void*)(size_t)h_pnp->obj.hdl; +#pragma warning(push, 3) + IoSetCancelRoutine( h_ioctl, __cancel_rearm_pnp ); +#pragma warning(pop) + IoMarkIrpPending( h_ioctl ); + + cl_copy_to_user( p_user_hdl, &h_pnp->obj.hdl, sizeof(uint64_t) ); + + /* Mark the registration as a user-mode one. */ + h_pnp->obj.type |= AL_OBJ_SUBTYPE_UM_EXPORT; + h_pnp->obj.hdl_valid = TRUE; + deref_al_obj( &h_pnp->obj ); + + cl_status = CL_PENDING; + } + else + { + cl_status = CL_INVALID_PARAMETER; + } + + cl_copy_to_user( p_user_status, &status, sizeof(ib_api_status_t) ); + cl_mutex_release( &p_context->pnp_mutex ); + + AL_EXIT( AL_DBG_PNP ); + return cl_status; +} + + +/* + * Process the ioctl UAL_REG_PNP: + */ +static cl_status_t +proxy_poll_pnp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_poll_pnp_ioctl_t *p_ioctl; + al_dev_open_context_t *p_context; + proxy_pnp_evt_t *p_evt; + + AL_ENTER( AL_DBG_PNP ); + + p_context = p_open_context; + + if( cl_ioctl_in_size( h_ioctl ) < sizeof(uint64_t) || + cl_ioctl_out_size( h_ioctl ) < sizeof(ib_pnp_rec_t) ) + { + AL_EXIT( AL_DBG_PNP ); + return CL_INVALID_PARAMETER; + } + + p_ioctl = cl_ioctl_in_buf( h_ioctl ); + CL_ASSERT( cl_ioctl_in_buf( h_ioctl ) == cl_ioctl_out_buf( h_ioctl ) ); + + cl_spinlock_acquire( &p_context->h_al->obj.lock ); + p_evt = al_hdl_chk( + p_context->h_al, p_ioctl->in.evt_hdl, AL_OBJ_TYPE_H_PNP_EVENT ); + if( p_evt ) + { + if( cl_ioctl_out_size( h_ioctl ) < p_evt->rec_size ) + { + cl_spinlock_release( &p_context->h_al->obj.lock ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Buffer too small!\n") ); + return CL_INVALID_PARAMETER; + } + + cl_memcpy( &p_ioctl->out.pnp_rec, p_evt + 1, p_evt->rec_size ); + *p_ret_bytes = p_evt->rec_size; + } + cl_spinlock_release( &p_context->h_al->obj.lock ); + + AL_EXIT( AL_DBG_PNP ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_REG_PNP: + */ +static cl_status_t +proxy_rearm_pnp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl ) +{ + ual_rearm_pnp_ioctl_in_t *p_ioctl; + al_dev_open_context_t *p_context; + IO_STACK_LOCATION *p_io_stack; + proxy_pnp_evt_t *p_evt; + ib_pnp_handle_t h_pnp; + IRP *p_old_irp; + + AL_ENTER( AL_DBG_PNP ); + + p_context = p_open_context; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_PNP_MGR ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid file object type for request: %016I64x\n", + (LONG_PTR)p_io_stack->FileObject->FsContext2) ); + return CL_INVALID_PARAMETER; + } + + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_rearm_pnp_ioctl_in_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_rearm_pnp_ioctl_out_t) ) + { + AL_EXIT( AL_DBG_PNP ); + return CL_INVALID_PARAMETER; + } + + p_ioctl = cl_ioctl_in_buf( h_ioctl ); + + h_pnp = (al_pnp_t*) + al_hdl_ref( p_context->h_al, p_ioctl->pnp_hdl, AL_OBJ_TYPE_H_PNP ); + if( !h_pnp ) + { + AL_PRINT_EXIT( TRACE_LEVEL_WARNING, AL_DBG_PNP, + ("Invalid PNP handle.\n") ); + return CL_INVALID_PARAMETER; + } +#pragma warning(push, 3) + IoSetCancelRoutine( h_ioctl, __cancel_rearm_pnp ); +#pragma warning(pop) + IoMarkIrpPending( h_ioctl ); + h_ioctl->Tail.Overlay.DriverContext[0] = (void*)(size_t)h_pnp->obj.hdl; + + /* + * Update the object context before signalling the event since that value + * is returned by the PnP callback. + */ + p_old_irp = InterlockedExchangePointer( &h_pnp->p_rearm_irp, h_ioctl ); + if( p_old_irp ) + { +#pragma warning(push, 3) + IoSetCancelRoutine( p_old_irp, NULL ); +#pragma warning(pop) + /* Complete the IRP. */ + p_old_irp->IoStatus.Status = STATUS_CANCELLED; + p_old_irp->IoStatus.Information = 0; + IoCompleteRequest( p_old_irp, IO_NO_INCREMENT ); + } + + cl_spinlock_acquire( &p_context->h_al->obj.lock ); + p_evt = al_hdl_chk( + p_context->h_al, p_ioctl->last_evt_hdl, AL_OBJ_TYPE_H_PNP_EVENT ); + if( p_evt ) + { + p_evt->evt_context = p_ioctl->last_evt_context; + p_evt->evt_status = p_ioctl->last_evt_status; + cl_event_signal( &p_evt->event ); + } + cl_spinlock_release( &p_context->h_al->obj.lock ); + + deref_al_obj( &h_pnp->obj ); + + AL_EXIT( AL_DBG_PNP ); + return CL_PENDING; +} + + +/* + * Process the ioctl UAL_DEREG_PNP: + */ +static cl_status_t +proxy_dereg_pnp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl ) +{ + uint64_t *p_hdl; + al_dev_open_context_t *p_context; + IO_STACK_LOCATION *p_io_stack; + ib_pnp_handle_t h_pnp; + + AL_ENTER( AL_DBG_PNP ); + p_context = p_open_context; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_PNP_MGR ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid file object type for request: %016I64x\n", + (LONG_PTR)p_io_stack->FileObject->FsContext2) ); + return CL_INVALID_PARAMETER; + } + + if( cl_ioctl_in_size( h_ioctl ) < sizeof(ual_dereg_pnp_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) ) + { + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + p_hdl = cl_ioctl_in_buf( h_ioctl ); + + h_pnp = (ib_pnp_handle_t) + al_hdl_ref( p_context->h_al, *p_hdl, AL_OBJ_TYPE_H_PNP ); + if( !h_pnp ) + { + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + h_pnp->p_dereg_irp = h_ioctl; + + IoMarkIrpPending( h_ioctl ); + + h_pnp->obj.pfn_destroy( &h_pnp->obj, NULL ); + + AL_EXIT( AL_DBG_PNP ); + return CL_PENDING; +} + + + +cl_status_t +al_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + void *p_context; + + AL_ENTER( AL_DBG_DEV ); + + CL_ASSERT( h_ioctl && p_ret_bytes ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = p_io_stack->FileObject->FsContext; + + if( !p_context ) + { + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + switch( cl_ioctl_ctl_code( h_ioctl ) ) + { + case UAL_REG_SHMID: + cl_status = proxy_reg_shmid( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_GET_CA_ATTR_INFO: + cl_status = proxy_get_ca_attr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_REG_PNP: + cl_status = proxy_reg_pnp( p_context, h_ioctl ); + break; + case UAL_POLL_PNP: + cl_status = proxy_poll_pnp( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_REARM_PNP: + cl_status = proxy_rearm_pnp( p_context, h_ioctl ); + break; + case UAL_DEREG_PNP: + cl_status = proxy_dereg_pnp( p_context, h_ioctl ); + break; + default: + cl_status = CL_INVALID_PARAMETER; + break; + } + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} diff --git a/branches/Ndi/core/al/kernel/al_proxy_cep.c b/branches/Ndi/core/al/kernel/al_proxy_cep.c new file mode 100644 index 00000000..6b1ccdd0 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_proxy_cep.c @@ -0,0 +1,951 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_proxy_cep.tmh" +#endif +#include "al_cm_cep.h" +#include "al_dev.h" +#include +#include "al_proxy.h" +#include "al.h" +#include "al_qp.h" + + +static cl_status_t +proxy_create_cep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + void* __ptr64 * p_user_context; + ual_create_cep_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_create_cep_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(void* __ptr64) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_create_cep_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + p_user_context = cl_ioctl_in_buf( h_ioctl ); + + /* We use IRPs as notification mechanism so the callback is NULL. */ + p_ioctl->status = al_create_cep( p_context->h_al, NULL, + *p_user_context, &p_ioctl->cid ); + + *p_ret_bytes = sizeof(ual_create_cep_ioctl_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static inline void +__complete_get_event_ioctl( + IN ib_al_handle_t h_al, + IN IRP* const p_irp, + IN NTSTATUS status ) +{ +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, NULL ); +#pragma warning(pop) + + /* Complete the IRP. */ + p_irp->IoStatus.Status = status; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, IO_NETWORK_INCREMENT ); + + deref_al_obj( &h_al->obj ); +} + + +static cl_status_t +proxy_destroy_cep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( p_ret_bytes ); + + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + al_destroy_cep( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), NULL ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_listen( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_listen_ioctl_t *p_ioctl; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_listen_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_listen_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the private data compare buffer to our kernel copy. */ + if( p_ioctl->cep_listen.p_cmp_buf ) + p_ioctl->cep_listen.p_cmp_buf = p_ioctl->compare; + + status = + al_cep_listen( p_context->h_al, p_ioctl->cid, &p_ioctl->cep_listen ); + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status; + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_pre_req( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_req_ioctl_t *p_ioctl; + ib_qp_handle_t h_qp; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_req_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_req_ioctl_out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(struct _ual_cep_req_ioctl_out); + + p_ioctl->in.cm_req.h_al = p_context->h_al; + p_ioctl->in.cm_req.p_primary_path = &p_ioctl->in.paths[0]; + if( p_ioctl->in.cm_req.p_alt_path ) + p_ioctl->in.cm_req.p_alt_path = &p_ioctl->in.paths[1]; + if( p_ioctl->in.cm_req.p_compare_buffer ) + p_ioctl->in.cm_req.p_compare_buffer = p_ioctl->in.compare; + if( p_ioctl->in.cm_req.p_req_pdata ) + p_ioctl->in.cm_req.p_req_pdata = p_ioctl->in.pdata; + + /* Get the kernel QP handle. */ + h_qp = (ib_qp_handle_t)al_hdl_ref( + p_context->h_al, (uint64_t)p_ioctl->in.cm_req.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + goto done; + } + + p_ioctl->in.cm_req.h_qp = h_qp; + + p_ioctl->out.status = al_cep_pre_req( p_context->h_al, p_ioctl->in.cid, + &p_ioctl->in.cm_req, &p_ioctl->out.init ); + + deref_al_obj( &h_qp->obj ); + + if( p_ioctl->out.status != IB_SUCCESS ) + { +done: + cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) ); + } + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_send_req( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_req( + p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_pre_rep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_rep_ioctl_t *p_ioctl; + ib_qp_handle_t h_qp; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_rep_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_rep_ioctl_out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(struct _ual_cep_rep_ioctl_out); + + if( p_ioctl->in.cm_rep.p_rep_pdata ) + p_ioctl->in.cm_rep.p_rep_pdata = p_ioctl->in.pdata; + + /* Get the kernel QP handle. */ + h_qp = (ib_qp_handle_t)al_hdl_ref( + p_context->h_al, (uint64_t)p_ioctl->in.cm_rep.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + goto done; + } + + p_ioctl->in.cm_rep.h_qp = h_qp; + + p_ioctl->out.status = al_cep_pre_rep( p_context->h_al, p_ioctl->in.cid, + p_ioctl->in.context, &p_ioctl->in.cm_rep, &p_ioctl->out.init ); + + deref_al_obj( &h_qp->obj ); + + if( p_ioctl->out.status != IB_SUCCESS ) + { +done: + cl_memclr( &p_ioctl->out.init, sizeof(ib_qp_mod_t) ); + } + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_send_rep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_rep( + p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_get_rtr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_get_rtr_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_get_rtr_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rtr_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(ual_cep_get_rtr_ioctl_t); + + p_ioctl->status = al_cep_get_rtr_attr( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rtr ); + + if( p_ioctl->status != IB_SUCCESS ) + cl_memclr( &p_ioctl->rtr, sizeof(ib_qp_mod_t) ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_get_rts( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_get_rts_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_get_rts_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_rts_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(ual_cep_get_rts_ioctl_t); + + p_ioctl->status = al_cep_get_rts_attr( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->rts ); + + if( p_ioctl->status != IB_SUCCESS ) + cl_memclr( &p_ioctl->rts, sizeof(ib_qp_mod_t) ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_rtu( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_rtu_ioctl_t *p_ioctl; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_rtu_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rtu_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + status = al_cep_rtu( p_context->h_al, + p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len ); + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status; + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_rej( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_rej_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_rej_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_rej_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_rej( + p_context->h_al, p_ioctl->cid, p_ioctl->rej_status, p_ioctl->ari, + p_ioctl->ari_len, p_ioctl->pdata, p_ioctl->pdata_len ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_mra( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_mra_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_mra_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_mra_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + p_ioctl->cm_mra.p_mra_pdata = p_ioctl->pdata; + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_mra( + p_context->h_al, p_ioctl->cid, &p_ioctl->cm_mra ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_lap( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_lap_ioctl_t *p_ioctl; + ib_api_status_t status; + ib_qp_handle_t h_qp; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_lap_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_lap_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(ib_api_status_t); + + p_ioctl->cm_lap.p_alt_path = &p_ioctl->alt_path; + if( p_ioctl->cm_lap.p_lap_pdata ) + p_ioctl->pdata; + + /* Get the kernel QP handle. */ + h_qp = (ib_qp_handle_t)al_hdl_ref( + p_context->h_al, (uint64_t)p_ioctl->cm_lap.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + status = IB_INVALID_QP_HANDLE; + goto done; + } + + p_ioctl->cm_lap.h_qp = h_qp; + + status = al_cep_lap( p_context->h_al, p_ioctl->cid, &p_ioctl->cm_lap ); + + deref_al_obj( &h_qp->obj ); + +done: + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status; + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_pre_apr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_apr_ioctl_t *p_ioctl; + ib_qp_handle_t h_qp; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_apr_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(struct _ual_cep_apr_ioctl_out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(struct _ual_cep_apr_ioctl_out); + + if( p_ioctl->in.cm_apr.p_info ) + p_ioctl->in.cm_apr.p_info = (ib_apr_info_t*)p_ioctl->in.apr_info; + if( p_ioctl->in.cm_apr.p_apr_pdata ) + p_ioctl->in.cm_apr.p_apr_pdata = p_ioctl->in.pdata; + + /* Get the kernel QP handle. */ + h_qp = (ib_qp_handle_t)al_hdl_ref( + p_context->h_al, (uint64_t)p_ioctl->in.cm_apr.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + goto done; + } + + p_ioctl->in.cm_apr.h_qp = h_qp; + + p_ioctl->out.status = al_cep_pre_apr( p_context->h_al, p_ioctl->in.cid, + &p_ioctl->in.cm_apr, &p_ioctl->out.apr ); + + deref_al_obj( &h_qp->obj ); + + if( p_ioctl->out.status != IB_SUCCESS ) + { +done: + cl_memclr( &p_ioctl->out.apr, sizeof(ib_qp_mod_t) ); + } + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_send_apr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( p_ret_bytes ); + + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_send_apr( + p_context->h_al, *(net32_t*)cl_ioctl_in_buf( h_ioctl ) ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_dreq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_dreq_ioctl_t *p_ioctl; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_dreq_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_dreq_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the private data compare buffer to our kernel copy. */ + status = al_cep_dreq( p_context->h_al, + p_ioctl->cid, p_ioctl->pdata, p_ioctl->pdata_len ); + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = status; + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_drep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_drep_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_drep_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cep_drep_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ib_api_status_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + p_ioctl->cm_drep.p_drep_pdata = p_ioctl->pdata; + + (*(ib_api_status_t*)cl_ioctl_out_buf( h_ioctl )) = al_cep_drep( + p_context->h_al, p_ioctl->cid, &p_ioctl->cm_drep ); + + *p_ret_bytes = sizeof(ib_api_status_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_get_timewait( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_get_timewait_ioctl_t *p_ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_get_timewait_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_get_timewait_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + p_ioctl->status = al_cep_get_timewait( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->timewait_us ); + + *p_ret_bytes = sizeof(ual_cep_get_timewait_ioctl_t); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_poll( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + al_dev_open_context_t *p_context; + ual_cep_poll_ioctl_t *p_ioctl; + ib_mad_element_t *p_mad = NULL; + + AL_ENTER( AL_DBG_CM ); + + p_context = (al_dev_open_context_t*)p_open_context; + p_ioctl = (ual_cep_poll_ioctl_t*)cl_ioctl_out_buf( h_ioctl ); + + /* Validate user parameters. */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) || + cl_ioctl_out_size( h_ioctl ) != sizeof(ual_cep_poll_ioctl_t) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + *p_ret_bytes = sizeof(ual_cep_poll_ioctl_t); + + p_ioctl->status = al_cep_poll( p_context->h_al, + *(net32_t*)cl_ioctl_in_buf( h_ioctl ), &p_ioctl->context, + &p_ioctl->new_cid, &p_mad ); + + if( p_ioctl->status == IB_SUCCESS ) + { + /* Copy the MAD for user consumption and free the it. */ + CL_ASSERT( p_mad ); + p_ioctl->element = *p_mad; + if( p_mad->grh_valid ) + p_ioctl->grh = *p_mad->p_grh; + else + cl_memclr( &p_ioctl->grh, sizeof(ib_grh_t) ); + cl_memcpy( p_ioctl->mad_buf, p_mad->p_mad_buf, MAD_BLOCK_SIZE ); + ib_put_mad( p_mad ); + } + else + { + cl_memclr( &p_ioctl->mad_buf, sizeof(MAD_BLOCK_SIZE) ); + p_ioctl->new_cid = AL_INVALID_CID; + } + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_cep_get_event( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + al_dev_open_context_t *p_context; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + UNUSED_PARAM( p_ret_bytes ); + + p_context = p_open_context; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + if( (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_CM ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid file object type for request: %016I64x\n", + (LONG_PTR)p_io_stack->FileObject->FsContext2) ); + return CL_INVALID_PARAMETER; + } + + /* Check the size of the ioctl */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(net32_t) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid IOCTL input buffer.\n") ); + return CL_INVALID_PARAMETER; + } + + cid = *(net32_t*)cl_ioctl_in_buf( h_ioctl ); + + status = al_cep_queue_irp( p_context->h_al, cid, h_ioctl ); + if( status != STATUS_PENDING ) + { + /* Invalid CID. Complete the request. */ + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + AL_EXIT( AL_DBG_CM ); + return CL_PENDING; +} + + +cl_status_t cep_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + void *p_context; + + AL_ENTER( AL_DBG_DEV ); + + CL_ASSERT( h_ioctl && p_ret_bytes ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = p_io_stack->FileObject->FsContext; + + if( !p_context ) + { + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + switch( cl_ioctl_ctl_code( h_ioctl ) ) + { + case UAL_CREATE_CEP: + cl_status = proxy_create_cep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DESTROY_CEP: + cl_status = proxy_destroy_cep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_LISTEN: + cl_status = proxy_cep_listen( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_PRE_REQ: + cl_status = proxy_cep_pre_req( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_SEND_REQ: + cl_status = proxy_cep_send_req( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_PRE_REP: + cl_status = proxy_cep_pre_rep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_SEND_REP: + cl_status = proxy_cep_send_rep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_GET_RTR: + cl_status = proxy_cep_get_rtr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_GET_RTS: + cl_status = proxy_cep_get_rts( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_RTU: + cl_status = proxy_cep_rtu( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_REJ: + cl_status = proxy_cep_rej( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_MRA: + cl_status = proxy_cep_mra( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_LAP: + cl_status = proxy_cep_lap( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_PRE_APR: + cl_status = proxy_cep_pre_apr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_SEND_APR: + cl_status = proxy_cep_send_apr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_DREQ: + cl_status = proxy_cep_dreq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_DREP: + cl_status = proxy_cep_drep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_GET_TIMEWAIT: + cl_status = proxy_cep_get_timewait( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_GET_EVENT: + cl_status = proxy_cep_get_event( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CEP_POLL: + cl_status = proxy_cep_poll( p_context, h_ioctl, p_ret_bytes ); + break; + default: + cl_status = CL_INVALID_PARAMETER; + break; + } + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} diff --git a/branches/Ndi/core/al/kernel/al_proxy_cm.c b/branches/Ndi/core/al/kernel/al_proxy_cm.c new file mode 100644 index 00000000..876d0804 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_proxy_cm.c @@ -0,0 +1,1459 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include "al.h" +#include "al_debug.h" +#include "al_dev.h" +#include "al_cm_cep.h" +#include "al_qp.h" +#include "al_proxy.h" + + + +/* + * Process a received CM REQ message. + */ +void proxy_cm_req_cb( + IN ib_cm_req_rec_t *p_cm_req_rec ) +{ + cm_cb_ioctl_info_t cb_info; + struct _cm_req_cb_ioctl_rec *p_ioctl_rec; /* short-cut ptr to CM req */ + al_dev_open_context_t *p_context; + uint8_t *dest; + uint64_t hdl; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr(&cb_info, sizeof(cb_info)); + + cb_info.rec_type = CM_REQ_REC; + p_ioctl_rec = &cb_info.ioctl_rec.cm_req_cb_ioctl_rec; + cl_memcpy(&p_ioctl_rec->req_rec, p_cm_req_rec, sizeof(ib_cm_req_rec_t)); + + /* set up the context to be returned to user */ + if( p_cm_req_rec->h_cm_listen ) + { + p_cm_req_rec->h_cm_listen->obj.hdl_valid = TRUE; + p_ioctl_rec->req_rec.h_cm_listen = + (ib_listen_handle_t)p_cm_req_rec->h_cm_listen->obj.hdl; + } + + p_context = p_cm_req_rec->h_cm_req->h_al->p_context; + + CL_ASSERT(p_context); + + /* Copy the necessary QP attributes to the user. */ + cl_memcpy( &p_ioctl_rec->qp_mod_rtr, + &p_cm_req_rec->h_cm_req->p_req_info->qp_mod_rtr, + sizeof( ib_qp_mod_t ) ); + cl_memcpy( &p_ioctl_rec->qp_mod_rts, + &p_cm_req_rec->h_cm_req->p_req_info->qp_mod_rts, + sizeof( ib_qp_mod_t ) ); + p_ioctl_rec->timeout_ms = p_cm_req_rec->h_cm_req->retry_timeout * + p_cm_req_rec->h_cm_req->max_cm_retries + 2000; + + if( p_cm_req_rec->qp_type == IB_QPT_UNRELIABLE_DGRM) + { + dest = (uint8_t*)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata; + cl_memcpy( dest,p_cm_req_rec->p_req_pdata, IB_SIDR_REQ_PDATA_SIZE ); + } + else + { + dest = (uint8_t*)&p_ioctl_rec->cm_req_pdata_rec.req_pdata; + cl_memcpy( dest,p_cm_req_rec->p_req_pdata, IB_REQ_PDATA_SIZE ); + } + + p_ioctl_rec->req_rec.p_req_pdata = NULL; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + hdl = al_hdl_lock_insert( p_context->h_al, p_cm_req_rec->h_cm_req, + AL_OBJ_TYPE_H_CONN | AL_OBJ_SUBTYPE_REQ ); + } + else + { + hdl = AL_INVALID_HANDLE; + } + + if( hdl == AL_INVALID_HANDLE ) + { + ib_cm_rej_t cm_rej; + + /* Reject the request. */ + cl_memclr( &cm_rej, sizeof( ib_cm_rej_t ) ); + cm_rej.rej_status = IB_REJ_TIMEOUT; + ib_cm_rej( p_cm_req_rec->h_cm_req, &cm_rej ); + } + else + { + p_ioctl_rec->req_rec.h_cm_req = (ib_cm_handle_t)hdl; + /* TODO: handle failure. */ + proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, &cb_info, NULL ); + } + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); +} + + + +void proxy_cm_dreq_cb( + IN ib_cm_dreq_rec_t *p_cm_dreq_rec ) +{ + cm_cb_ioctl_info_t cb_info; + struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec; + al_dev_open_context_t *p_context; + uint64_t hdl; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr(&cb_info, sizeof(cb_info)); + cb_info.rec_type = CM_DREQ_REC; + p_ioctl_rec = &cb_info.ioctl_rec.cm_dreq_cb_ioctl_rec; + cl_memcpy(&p_ioctl_rec->dreq_rec, p_cm_dreq_rec, sizeof(ib_cm_dreq_rec_t)); + + p_context = p_cm_dreq_rec->h_cm_dreq->h_al->p_context; + + cl_memcpy( &p_ioctl_rec->dreq_pdata, p_cm_dreq_rec->p_dreq_pdata, + IB_DREQ_PDATA_SIZE ); + p_ioctl_rec->dreq_rec.p_dreq_pdata = NULL; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + hdl = al_hdl_lock_insert( p_context->h_al, p_cm_dreq_rec->h_cm_dreq, + AL_OBJ_TYPE_H_CONN | AL_OBJ_SUBTYPE_DREQ ); + } + else + { + hdl = AL_INVALID_HANDLE; + } + + if( hdl != AL_INVALID_HANDLE ) + { + p_ioctl_rec->dreq_rec.h_cm_dreq = (ib_cm_handle_t)hdl; + if( !proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, + &cb_info, &p_cm_dreq_rec->h_cm_dreq->h_qp->obj ) ) + { + /* Remove handle from map. */ + al_hdl_get_conn( p_context->h_al, hdl, AL_OBJ_SUBTYPE_DREQ ); + + goto err; + } + } + else + { + ib_cm_drep_t cm_drep; +err: + + /* Send a drep. */ + cl_memclr( &cm_drep, sizeof( ib_cm_drep_t ) ); + ib_cm_drep( p_cm_dreq_rec->h_cm_dreq, &cm_drep ); + } + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); +} + + + +void +proxy_cm_rtu_cb( + IN ib_cm_rtu_rec_t *p_cm_rtu_rec ) +{ + cm_cb_ioctl_info_t cb_info; + struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec; + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr(&cb_info, sizeof(cb_info)); + cb_info.rec_type = CM_RTU_REC; + p_ioctl_rec = &cb_info.ioctl_rec.cm_rtu_cb_ioctl_rec; + cl_memcpy(&p_ioctl_rec->rtu_rec, p_cm_rtu_rec, sizeof(ib_cm_rtu_rec_t)); + + CL_ASSERT( p_cm_rtu_rec->h_qp ); + p_context = p_cm_rtu_rec->h_qp->obj.h_al->p_context; + p_ioctl_rec->rtu_rec.h_qp = (ib_qp_handle_t)p_cm_rtu_rec->h_qp->obj.hdl; + + cl_memcpy( &p_ioctl_rec->rtu_pdata, + p_cm_rtu_rec->p_rtu_pdata, IB_RTU_PDATA_SIZE ); + p_ioctl_rec->rtu_rec.p_rtu_pdata = NULL; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, + &cb_info, &p_cm_rtu_rec->h_qp->obj ); + } + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); +} + + + +void proxy_cm_rep_cb( + IN ib_cm_rep_rec_t *p_cm_rep_rec ) +{ + cm_cb_ioctl_info_t cb_info; + struct _cm_rep_cb_ioctl_rec *p_ioctl_rec; + al_dev_open_context_t *p_context; + uint8_t *p_dest; + uint64_t hdl; + al_obj_t *p_al_obj; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr(&cb_info, sizeof(cb_info)); + cb_info.rec_type = CM_REP_REC; + p_ioctl_rec = &cb_info.ioctl_rec.cm_rep_cb_ioctl_rec; + p_ioctl_rec->rep_rec = *p_cm_rep_rec; + + if( p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_DGRM ) + { + p_dest = (uint8_t*)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata; + cl_memcpy( p_dest, p_cm_rep_rec->p_rep_pdata, + IB_SIDR_REP_PDATA_SIZE ); + } + else + { + p_dest = (uint8_t*)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata; + cl_memcpy( p_dest, p_cm_rep_rec->p_rep_pdata, + IB_REP_PDATA_SIZE ); + } + + p_ioctl_rec->rep_rec.p_rep_pdata = NULL; + + CL_ASSERT( p_cm_rep_rec->h_cm_rep ); + p_context = p_cm_rep_rec->h_cm_rep->h_al->p_context; + + if( p_cm_rep_rec->qp_type == IB_QPT_RELIABLE_CONN || + p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_CONN ) + { + CL_ASSERT( p_cm_rep_rec->h_cm_rep->h_qp ); + p_al_obj = &p_cm_rep_rec->h_cm_rep->h_qp->obj; + + /* Copy the necessary QP attributes to the user. */ + cl_memcpy( &p_ioctl_rec->qp_mod_rtr, + &p_cm_rep_rec->h_cm_rep->p_req_info->qp_mod_rtr, + sizeof( ib_qp_mod_t ) ); + cl_memcpy( &p_ioctl_rec->qp_mod_rts, + &p_cm_rep_rec->h_cm_rep->p_req_info->qp_mod_rts, + sizeof( ib_qp_mod_t ) ); + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + hdl = al_hdl_lock_insert( p_context->h_al, p_cm_rep_rec->h_cm_rep, + AL_OBJ_TYPE_H_CONN | AL_OBJ_SUBTYPE_REP ); + } + else + { + hdl = AL_INVALID_HANDLE; + } + + if( hdl == AL_INVALID_HANDLE ) + { + ib_cm_rej_t cm_rej; + + /* Reject the connection. */ + proxy_context_deref( p_context ); + cl_memclr( &cm_rej, sizeof( ib_cm_rej_t ) ); + cm_rej.rej_status = IB_REJ_TIMEOUT; + ib_cm_rej( p_cm_rep_rec->h_cm_rep, &cm_rej ); + return; + } + p_ioctl_rec->rep_rec.h_cm_rep = (ib_cm_handle_t)hdl; + } + else + { + CL_ASSERT( p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_DGRM ); + + p_al_obj = NULL; + + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + return; + } + p_ioctl_rec->rep_rec.h_cm_rep = NULL; + } + + proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, &cb_info, p_al_obj ); + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); +} + + + +void proxy_cm_drep_cb( + IN ib_cm_drep_rec_t *p_cm_drep_rec) +{ + cm_cb_ioctl_info_t cb_info; + struct _cm_drep_cb_ioctl_rec *p_ioctl_rec = + &cb_info.ioctl_rec.cm_drep_cb_ioctl_rec; + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + cl_memclr(&cb_info, sizeof(cb_info)); + cb_info.rec_type = CM_DREP_REC; + p_ioctl_rec->drep_rec = *p_cm_drep_rec; + + if( p_cm_drep_rec->p_drep_pdata ) + { + cl_memcpy( &p_ioctl_rec->drep_pdata, p_cm_drep_rec->p_drep_pdata, + IB_DREP_PDATA_SIZE ); + p_ioctl_rec->drep_rec.p_drep_pdata = NULL; + } + + CL_ASSERT( p_cm_drep_rec->h_qp ); + p_context = p_cm_drep_rec->h_qp->obj.h_al->p_context; + p_ioctl_rec->drep_rec.h_qp = (ib_qp_handle_t)p_cm_drep_rec->h_qp->obj.hdl; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, + &cb_info, &p_cm_drep_rec->h_qp->obj ); + } + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); +} + + + +void proxy_cm_mra_cb( + IN ib_cm_mra_rec_t *p_cm_mra_rec) +{ + cm_cb_ioctl_info_t cb_info; + struct _cm_mra_cb_ioctl_rec *p_ioctl_rec = + &cb_info.ioctl_rec.cm_mra_cb_ioctl_rec; + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr( &cb_info, sizeof(cb_info) ); + cb_info.rec_type = CM_MRA_REC; + p_ioctl_rec->mra_rec = *p_cm_mra_rec; + + cl_memcpy( &p_ioctl_rec->mra_pdata, p_cm_mra_rec->p_mra_pdata, + IB_MRA_PDATA_SIZE ); + p_ioctl_rec->mra_rec.p_mra_pdata = NULL; + + CL_ASSERT( p_cm_mra_rec->h_qp ); + p_ioctl_rec->mra_rec.h_qp = (ib_qp_handle_t)p_cm_mra_rec->h_qp->obj.hdl; + p_context = p_cm_mra_rec->h_qp->obj.h_al->p_context; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, + &cb_info, &p_cm_mra_rec->h_qp->obj ); + } + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); +} + + + +void proxy_cm_rej_cb( + IN ib_cm_rej_rec_t *p_cm_rej_rec) +{ + cm_cb_ioctl_info_t cb_info; + struct _cm_rej_cb_ioctl_rec *p_ioctl_rec = + &cb_info.ioctl_rec.cm_rej_cb_ioctl_rec; + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + + cl_memclr(&cb_info, sizeof(cb_info)); + cb_info.rec_type = CM_REJ_REC; + p_ioctl_rec->rej_rec = *p_cm_rej_rec; + + if( p_cm_rej_rec->p_rej_pdata ) + { + cl_memcpy( &p_ioctl_rec->rej_pdata, p_cm_rej_rec->p_rej_pdata, + IB_REJ_PDATA_SIZE ); + p_ioctl_rec->rej_rec.p_rej_pdata = (uint8_t*)&p_ioctl_rec->rej_pdata; + } + + if( p_cm_rej_rec->ari_length > 0 ) + { + cl_memcpy( &p_ioctl_rec->ari_pdata, p_cm_rej_rec->p_ari, + p_cm_rej_rec->ari_length ); + p_ioctl_rec->rej_rec.p_ari = (uint8_t*)&p_ioctl_rec->ari_pdata; + } + else + { + p_ioctl_rec->rej_rec.p_ari = NULL; + } + + CL_ASSERT( p_cm_rej_rec->h_qp ); + p_ioctl_rec->rej_rec.h_qp = (ib_qp_handle_t)p_cm_rej_rec->h_qp->obj.hdl; + p_context = p_cm_rej_rec->h_qp->obj.h_al->p_context; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, + &cb_info, &p_cm_rej_rec->h_qp->obj ); + } + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); +} + + + +void +proxy_cm_lap_cb( + IN ib_cm_lap_rec_t *p_cm_lap_rec) +{ + cm_cb_ioctl_info_t cb_info; + struct _cm_lap_cb_ioctl_rec *p_ioctl_rec = + &cb_info.ioctl_rec.cm_lap_cb_ioctl_rec; + al_dev_open_context_t *p_context; + uint64_t hdl; + + AL_ENTER( AL_DBG_CM ); + cl_memclr(&cb_info, sizeof(cb_info)); + cb_info.rec_type = CM_LAP_REC; + p_ioctl_rec->lap_rec = *p_cm_lap_rec; + + if( p_cm_lap_rec->p_lap_pdata ) + { + cl_memcpy( &p_ioctl_rec->lap_pdata, p_cm_lap_rec->p_lap_pdata, + IB_LAP_PDATA_SIZE ); + p_ioctl_rec->lap_rec.p_lap_pdata = NULL; + } + + p_context = p_cm_lap_rec->h_cm_lap->h_al->p_context; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + hdl = al_hdl_lock_insert( p_context->h_al, p_cm_lap_rec->h_cm_lap, + AL_OBJ_TYPE_H_CONN | AL_OBJ_SUBTYPE_LAP ); + } + else + { + hdl = AL_INVALID_HANDLE; + } + + if( hdl != AL_INVALID_HANDLE ) + { + p_ioctl_rec->lap_rec.h_cm_lap = (ib_cm_handle_t)hdl; + + if( !proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, + &cb_info, &p_cm_lap_rec->h_cm_lap->h_qp->obj ) ) + { + al_hdl_get_conn( p_context->h_al, hdl, AL_OBJ_SUBTYPE_LAP ); + goto err; + } + } + else + { + ib_cm_apr_t cm_apr; + +err: + /* Reject the LAP. */ + cl_memclr( &cm_apr, sizeof( ib_cm_apr_t ) ); + cm_apr.apr_status = IB_AP_REJECT; + ib_cm_apr( p_cm_lap_rec->h_cm_lap, &cm_apr ); + } + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); +} + + + +void +proxy_cm_apr_cb( + IN ib_cm_apr_rec_t *p_cm_apr_rec) +{ + cm_cb_ioctl_info_t cb_info; + struct _cm_apr_cb_ioctl_rec *p_ioctl_rec = + &cb_info.ioctl_rec.cm_apr_cb_ioctl_rec; + al_dev_open_context_t *p_context; + + AL_ENTER( AL_DBG_CM ); + cl_memclr( &cb_info, sizeof(cb_info) ); + cb_info.rec_type = CM_APR_REC; + p_ioctl_rec->apr_rec = *p_cm_apr_rec; + + if( p_cm_apr_rec->info_length > 0 ) + { + cl_memcpy( &p_ioctl_rec->apr_info, p_cm_apr_rec->p_info, + p_cm_apr_rec->info_length ); + } + + p_ioctl_rec->apr_rec.p_info = NULL; + + cl_memcpy( &p_ioctl_rec->apr_pdata, p_cm_apr_rec->p_apr_pdata, + IB_APR_PDATA_SIZE ); + p_ioctl_rec->apr_rec.p_apr_pdata = NULL; + + p_context = p_cm_apr_rec->h_qp->obj.h_al->p_context; + p_ioctl_rec->apr_rec.h_qp = (ib_qp_handle_t)p_cm_apr_rec->h_qp->obj.hdl; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + proxy_queue_cb_buf( UAL_GET_CM_CB_INFO, p_context, + &cb_info, &p_cm_apr_rec->h_qp->obj ); + } + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); +} + + + +void +proxy_listen_err_cb( + IN ib_listen_err_rec_t *p_err_rec) +{ + al_dev_open_context_t *p_context; + misc_cb_ioctl_info_t cb_info; + + AL_ENTER( AL_DBG_CM ); + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + p_context = p_err_rec->h_cm_listen->obj.h_al->p_context; + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_CM ); + return; + } + + /* Set up context and callback record type appropriate for UAL */ + cb_info.rec_type = LISTEN_ERROR_REC; + cb_info.ioctl_rec.listen_err = *p_err_rec; + cb_info.ioctl_rec.listen_err.h_cm_listen = + (ib_listen_handle_t)p_err_rec->h_cm_listen->obj.hdl; + + /* Proxy handle must be valid now. */ + if( !p_err_rec->h_cm_listen->obj.hdl_valid ) + p_err_rec->h_cm_listen->obj.hdl_valid = TRUE; + + proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info, + &p_err_rec->h_cm_listen->obj ); + proxy_context_deref( p_context ); + + AL_EXIT( AL_DBG_CM ); +} + + +cl_status_t +proxy_cm_req( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_req_ioctl_t *p_ioctl = + (ual_cm_req_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + size_t in_buf_sz; + uint8_t *p_buf; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Extra validation on the input buffer length. */ + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_req.p_alt_path ) + in_buf_sz += sizeof(ib_path_rec_t); + if( p_ioctl->in.cm_req.p_compare_buffer ) + in_buf_sz += p_ioctl->in.cm_req.compare_length; + if( p_ioctl->in.cm_req.p_req_pdata ) + in_buf_sz += p_ioctl->in.cm_req.req_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + p_ioctl->in.cm_req.h_al = p_context->h_al; + + /* Validate qp handle */ + if( p_ioctl->in.cm_req.qp_type == IB_QPT_RELIABLE_CONN || + p_ioctl->in.cm_req.qp_type == IB_QPT_UNRELIABLE_CONN ) + { + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return CL_SUCCESS; + } + p_ioctl->in.cm_req.h_qp = h_qp; + } + else + { + h_qp = NULL; + } + + /* Fixup the primary path pointer. */ + p_ioctl->in.cm_req.p_primary_path = p_ioctl->in.paths; + + p_buf = (uint8_t*)&p_ioctl->in.paths[1]; + /* Fixup the alternat path pointer if needed. */ + if( p_ioctl->in.cm_req.p_alt_path ) + { + p_ioctl->in.cm_req.p_alt_path = (ib_path_rec_t*)p_buf; + p_buf += sizeof(ib_path_rec_t); + } + /* Fixup the private data buffer pointer as needed. */ + if( p_ioctl->in.cm_req.p_req_pdata ) + { + p_ioctl->in.cm_req.p_req_pdata = p_buf; + p_buf += p_ioctl->in.cm_req.req_length; + } + /* Fixup the compare buffer pointer as needed. */ + if( p_ioctl->in.cm_req.p_compare_buffer ) + p_ioctl->in.cm_req.p_compare_buffer = p_buf; + + /* Override the user's callbacks with our own. */ + /* Do not change user's request from client/server to peer-to-peer. */ + if( p_ioctl->in.cm_req.pfn_cm_req_cb ) + p_ioctl->in.cm_req.pfn_cm_req_cb = proxy_cm_req_cb; + p_ioctl->in.cm_req.pfn_cm_rep_cb = proxy_cm_rep_cb; + p_ioctl->in.cm_req.pfn_cm_mra_cb = proxy_cm_mra_cb; + p_ioctl->in.cm_req.pfn_cm_rej_cb = proxy_cm_rej_cb; + + p_ioctl->out.status = ib_cm_req( &p_ioctl->in.cm_req ); + + if( h_qp ) + deref_al_obj( &h_qp->obj ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_rep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_rep_ioctl_t *p_ioctl = + (ual_cm_rep_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cm_handle_t h_cm_req; + ib_qp_handle_t h_qp; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_rep.p_rep_pdata ) + in_buf_sz += p_ioctl->in.cm_rep.rep_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate QP handle. */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; + } + p_ioctl->in.cm_rep.h_qp = h_qp; + + /* Validate CM REQ handle */ + h_cm_req = al_hdl_get_conn( + p_context->h_al, p_ioctl->in.h_cm_req, AL_OBJ_SUBTYPE_REQ ); + if( !h_cm_req ) + { + deref_al_obj( &h_qp->obj ); + p_ioctl->out.status = IB_INVALID_HANDLE; + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; + } + + if( p_ioctl->in.cm_rep.p_rep_pdata ) + p_ioctl->in.cm_rep.p_rep_pdata = (uint8_t*)((&p_ioctl->in.cm_rep) + 1); + + /* All work requests are posted in UM. */ + p_ioctl->in.cm_rep.p_recv_wr = NULL; + + p_ioctl->in.cm_rep.pfn_cm_rtu_cb = proxy_cm_rtu_cb; + p_ioctl->in.cm_rep.pfn_cm_lap_cb = proxy_cm_lap_cb; + p_ioctl->in.cm_rep.pfn_cm_dreq_cb = proxy_cm_dreq_cb; + + p_ioctl->out.status = ib_cm_rep( h_cm_req, &p_ioctl->in.cm_rep ); + + __deref_conn( h_cm_req ); + deref_al_obj( &h_qp->obj ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_dreq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_dreq_ioctl_t *p_ioctl = + (ual_cm_dreq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_dreq.p_dreq_pdata ) + in_buf_sz += p_ioctl->in.cm_dreq.dreq_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate qp handle */ + if( p_ioctl->in.cm_dreq.qp_type == IB_QPT_RELIABLE_CONN || + p_ioctl->in.cm_dreq.qp_type == IB_QPT_UNRELIABLE_CONN ) + { + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; + } + p_ioctl->in.cm_dreq.h_qp = h_qp; + } + else + { + h_qp = NULL; + } + + if( p_ioctl->in.cm_dreq.p_dreq_pdata ) + p_ioctl->in.cm_dreq.p_dreq_pdata = (uint8_t*)((&p_ioctl->in.cm_dreq) + 1); + + p_ioctl->in.cm_dreq.pfn_cm_drep_cb = proxy_cm_drep_cb; + + p_ioctl->out.status = ib_cm_dreq( &p_ioctl->in.cm_dreq ); + + if( h_qp ) + deref_al_obj( &h_qp->obj ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_drep( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_drep_ioctl_t *p_ioctl = + (ual_cm_drep_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cm_handle_t h_cm_dreq; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_drep.p_drep_pdata ) + in_buf_sz += p_ioctl->in.cm_drep.drep_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CM REQ handle. */ + h_cm_dreq = al_hdl_get_conn( + p_context->h_al, p_ioctl->in.h_cm_dreq, AL_OBJ_SUBTYPE_DREQ ); + if( !h_cm_dreq ) + { + p_ioctl->out.status = IB_INVALID_HANDLE; + return CL_SUCCESS; + } + + if( p_ioctl->in.cm_drep.p_drep_pdata ) + { + p_ioctl->in.cm_drep.p_drep_pdata = + (uint8_t*)((&p_ioctl->in.cm_drep) + 1); + } + + p_ioctl->out.status = ib_cm_drep( h_cm_dreq, &p_ioctl->in.cm_drep ); + + __deref_conn( h_cm_dreq ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_listen( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_listen_ioctl_t *p_ioctl = + (ual_cm_listen_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + ib_listen_handle_t h_listen; + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_listen.p_compare_buffer ) + in_buf_sz += p_ioctl->in.cm_listen.compare_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + if( p_ioctl->in.cm_listen.p_compare_buffer ) + { + p_ioctl->in.cm_listen.p_compare_buffer = + (uint8_t*)((&p_ioctl->in.cm_listen) + 1); + } + + if( p_ioctl->in.cm_listen.qp_type == IB_QPT_RELIABLE_CONN || + p_ioctl->in.cm_listen.qp_type == IB_QPT_UNRELIABLE_CONN ) + { + p_ioctl->in.cm_listen.pfn_cm_mra_cb = proxy_cm_mra_cb; + p_ioctl->in.cm_listen.pfn_cm_rej_cb = proxy_cm_rej_cb; + } + p_ioctl->in.cm_listen.pfn_cm_req_cb = proxy_cm_req_cb; + + p_ioctl->out.status = cm_listen( p_context->h_al, + &p_ioctl->in.cm_listen, proxy_listen_err_cb, p_ioctl->in.context, + &h_listen ); + if( p_ioctl->out.status == IB_SUCCESS ) + { + p_ioctl->out.h_cm_listen = h_listen->obj.hdl; + h_listen->obj.hdl_valid = TRUE; + deref_al_obj( &h_listen->obj ); + } + else + { + p_ioctl->out.h_cm_listen = AL_INVALID_HANDLE; + } + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_cancel( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_cancel_ioctl_t *p_ioctl = + (ual_cm_cancel_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + ib_listen_handle_t h_cm_listen; + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate listen handle. */ + h_cm_listen = (ib_listen_handle_t)al_hdl_ref( + p_context->h_al, p_ioctl->in.h_cm_listen, AL_OBJ_TYPE_H_LISTEN ); + if( !h_cm_listen ) + { + p_ioctl->out.status = IB_INVALID_HANDLE; + return CL_SUCCESS; + } + + h_cm_listen->obj.pfn_destroy( &h_cm_listen->obj, ib_sync_destroy ); + p_ioctl->out.status = IB_SUCCESS; + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_rtu( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_rtu_ioctl_t *p_ioctl = + (ual_cm_rtu_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cm_handle_t h_cm_rep; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_CM ); + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_rtu.p_rtu_pdata ) + in_buf_sz += p_ioctl->in.cm_rtu.rtu_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CM REP handle. */ + h_cm_rep = al_hdl_get_conn( + p_context->h_al, p_ioctl->in.h_cm_rep, AL_OBJ_SUBTYPE_REP ); + if( !h_cm_rep ) + { + p_ioctl->out.status = IB_INVALID_HANDLE; + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; + } + + if( p_ioctl->in.cm_rtu.p_rtu_pdata ) + p_ioctl->in.cm_rtu.p_rtu_pdata = (uint8_t*)((&p_ioctl->in.cm_rtu) + 1); + + p_ioctl->in.cm_rtu.pfn_cm_dreq_cb = proxy_cm_dreq_cb; + p_ioctl->in.cm_rtu.pfn_cm_apr_cb = proxy_cm_apr_cb; + + p_ioctl->out.status = ib_cm_rtu( h_cm_rep, &p_ioctl->in.cm_rtu ); + + __deref_conn( h_cm_rep ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_rej( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_rej_ioctl_t *p_ioctl = + (ual_cm_rej_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cm_handle_t h_cm; + size_t in_buf_sz; + uint8_t *p_buf; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_rej.p_ari ) + in_buf_sz += p_ioctl->in.cm_rej.ari_length; + if( p_ioctl->in.cm_rej.p_rej_pdata ) + in_buf_sz += p_ioctl->in.cm_rej.rej_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CM handle. We could reject a request or reply. */ + h_cm = al_hdl_get_conn( p_context->h_al, p_ioctl->in.h_cm, + AL_OBJ_SUBTYPE_REQ | AL_OBJ_SUBTYPE_REP ); + if( !h_cm ) + { + p_ioctl->out.status = IB_INVALID_HANDLE; + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; + } + + p_buf = (uint8_t*)((&p_ioctl->in.cm_rej) + 1); + if( p_ioctl->in.cm_rej.p_ari ) + { + p_ioctl->in.cm_rej.p_ari = (ib_ari_t*)p_buf; + p_buf += p_ioctl->in.cm_rej.ari_length; + } + if( p_ioctl->in.cm_rej.p_rej_pdata ) + p_ioctl->in.cm_rej.p_rej_pdata = p_buf; + + p_ioctl->out.status = ib_cm_rej( h_cm, &p_ioctl->in.cm_rej ); + + __deref_conn( h_cm ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_mra( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_mra_ioctl_t *p_ioctl = + (ual_cm_mra_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cm_handle_t h_cm; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_mra.p_mra_pdata ) + in_buf_sz += p_ioctl->in.cm_mra.mra_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CM handle. MRA could be for a REQ, REP, or LAP. */ + h_cm = al_hdl_ref_conn( p_context->h_al, p_ioctl->in.h_cm, + AL_OBJ_SUBTYPE_REQ | AL_OBJ_SUBTYPE_REP | AL_OBJ_SUBTYPE_LAP ); + if( !h_cm ) + { + p_ioctl->out.status = IB_INVALID_HANDLE; + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; + } + + if( p_ioctl->in.cm_mra.p_mra_pdata ) + p_ioctl->in.cm_mra.p_mra_pdata = (uint8_t*)((&p_ioctl->in.cm_mra) + 1); + + p_ioctl->out.status = ib_cm_mra( h_cm, &p_ioctl->in.cm_mra ); + + __deref_conn( h_cm ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_lap( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_lap_ioctl_t *p_ioctl = + (ual_cm_lap_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_lap.p_lap_pdata ) + in_buf_sz += p_ioctl->in.cm_lap.lap_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate qp handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; + } + p_ioctl->in.cm_lap.h_qp = h_qp; + + if( p_ioctl->in.cm_lap.p_alt_path ) + p_ioctl->in.cm_lap.p_alt_path = &p_ioctl->in.alt_path; + + if( p_ioctl->in.cm_lap.p_lap_pdata ) + { + p_ioctl->in.cm_lap.p_lap_pdata = + (uint8_t* __ptr64)((&p_ioctl->in.cm_lap.p_lap_pdata) + 1); + } + + p_ioctl->out.status = ib_cm_lap( &p_ioctl->in.cm_lap ); + + deref_al_obj( &h_qp->obj ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_apr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cm_apr_ioctl_t *p_ioctl = + (ual_cm_apr_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cm_handle_t h_cm_lap; + ib_qp_handle_t h_qp; + size_t in_buf_sz; + uint8_t *p_buf; + + AL_ENTER( AL_DBG_CM ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + in_buf_sz = sizeof(p_ioctl->in); + if( p_ioctl->in.cm_apr.p_apr_pdata ) + in_buf_sz += p_ioctl->in.cm_apr.apr_length; + + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_CM ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + h_cm_lap = al_hdl_get_conn( + p_context->h_al, p_ioctl->in.h_cm_lap, AL_OBJ_SUBTYPE_LAP ); + if( !h_cm_lap ) + { + p_ioctl->out.status = IB_INVALID_HANDLE; + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; + } + + /* Validate qp handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + __deref_conn( h_cm_lap ); + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; + } + p_ioctl->in.cm_apr.h_qp = h_qp; + + p_buf = (uint8_t*)((&p_ioctl->in.cm_apr) + 1); + if( p_ioctl->in.cm_apr.p_info ) + { + p_ioctl->in.cm_apr.p_info = (ib_apr_info_t*)p_buf; + p_buf += p_ioctl->in.cm_apr.info_length; + } + if( p_ioctl->in.cm_apr.p_apr_pdata ) + p_ioctl->in.cm_apr.p_apr_pdata = p_buf; + + p_ioctl->out.status = ib_cm_apr( h_cm_lap, &p_ioctl->in.cm_apr ); + + __deref_conn( h_cm_lap ); + deref_al_obj( &h_qp->obj ); + + AL_EXIT( AL_DBG_CM ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_cm_force_apm( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + /* + * The force APM path needs to just update the primary path index. + * The actual QP modify needs to happen in UM. + */ + UNUSED_PARAM( p_open_context ); + UNUSED_PARAM( h_ioctl ); + UNUSED_PARAM( p_ret_bytes ); + return IB_ERROR; +} + + +cl_status_t +cm_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + void *p_context; + + AL_ENTER( AL_DBG_DEV ); + + CL_ASSERT( h_ioctl && p_ret_bytes ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = p_io_stack->FileObject->FsContext; + + if( !p_context ) + { + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + switch( cl_ioctl_ctl_code( h_ioctl ) ) + { + case UAL_CM_LISTEN: + cl_status = proxy_cm_listen( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_REQ: + cl_status = proxy_cm_req( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_REP: + cl_status = proxy_cm_rep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_RTU: + cl_status = proxy_cm_rtu( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_REJ: + cl_status = proxy_cm_rej( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_DREQ: + cl_status = proxy_cm_dreq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_DREP: + cl_status = proxy_cm_drep( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_MRA: + cl_status = proxy_cm_mra( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_LAP: + cl_status = proxy_cm_lap( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_APR: + cl_status = proxy_cm_apr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_FORCE_APM: + cl_status = proxy_cm_force_apm( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CM_CANCEL: + cl_status = proxy_cm_cancel( p_context, h_ioctl, p_ret_bytes ); + break; + default: + cl_status = CL_INVALID_PARAMETER; + break; + } + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} diff --git a/branches/Ndi/core/al/kernel/al_proxy_ioc.c b/branches/Ndi/core/al/kernel/al_proxy_ioc.c new file mode 100644 index 00000000..b218b9c6 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_proxy_ioc.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include "al_debug.h" +#include "al_dev.h" +#include "al_proxy.h" + + +cl_status_t +ioc_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + cl_status_t cl_status; + + UNUSED_PARAM( p_ret_bytes ); + + switch( cl_ioctl_ctl_code( h_ioctl ) ) + { + default: + cl_status = CL_INVALID_PARAMETER; + break; + } + return cl_status; +} diff --git a/branches/Ndi/core/al/kernel/al_proxy_subnet.c b/branches/Ndi/core/al/kernel/al_proxy_subnet.c new file mode 100644 index 00000000..cad58974 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_proxy_subnet.c @@ -0,0 +1,1158 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include + +#include "al.h" +#include "al_av.h" +#include "al_ca.h" +#include "al_cq.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_proxy_subnet.tmh" +#endif +#include "al_dev.h" +#include "al_mad_pool.h" +#include "al_mr.h" +#include "al_mw.h" +#include "al_pd.h" +#include "al_qp.h" +#include "ib_common.h" +#include "al_proxy.h" + + +extern ib_pool_handle_t gh_mad_pool; + + + +static +cl_status_t +proxy_reg_svc( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + UNUSED_PARAM( p_open_context ); + UNUSED_PARAM( h_ioctl ); + UNUSED_PARAM( p_ret_bytes ); + return CL_ERROR; +} +static +cl_status_t +proxy_dereg_svc( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + UNUSED_PARAM( p_open_context ); + UNUSED_PARAM( h_ioctl ); + UNUSED_PARAM( p_ret_bytes ); + return CL_ERROR; +} + + +static void +__proxy_sa_req_cb( + IN al_sa_req_t *p_sa_req, + IN ib_mad_element_t *p_mad_response ) +{ + IRP *p_irp; + IO_STACK_LOCATION *p_io_stack; + ual_send_sa_req_ioctl_t *p_ioctl; + al_dev_open_context_t *p_context; + uint64_t hdl; + + AL_ENTER( AL_DBG_QUERY ); + + p_irp = (IRP*)p_sa_req->user_context; + CL_ASSERT( p_irp ); + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + p_ioctl = cl_ioctl_out_buf( p_irp ); + + p_context = p_io_stack->FileObject->FsContext; + ASSERT( p_context ); +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, NULL ); +#pragma warning(pop) + /* Clear the pointer to the query to prevent cancelation. */ + hdl = (size_t)InterlockedExchangePointer( + &p_irp->Tail.Overlay.DriverContext[0], AL_INVALID_HANDLE ); + + cl_spinlock_acquire( &p_context->h_al->obj.lock ); + if( hdl != AL_INVALID_HANDLE ) + { + CL_ASSERT( p_sa_req == + al_hdl_chk( p_context->h_al, hdl, AL_OBJ_TYPE_H_SA_REQ ) ); + al_hdl_free( p_context->h_al, hdl ); + } + + p_ioctl->out.status = p_sa_req->status; + if( p_mad_response ) + { + /* Insert an item to track the MAD until the user fetches it. */ + hdl = al_hdl_insert( p_context->h_al, + p_mad_response, AL_OBJ_TYPE_H_MAD ); + if( hdl != AL_INVALID_HANDLE ) + { + p_ioctl->out.h_resp = hdl; + p_ioctl->out.resp_size = p_mad_response->size; + } + else + { + p_ioctl->out.h_resp = AL_INVALID_HANDLE; + p_ioctl->out.resp_size = 0; + p_ioctl->out.status = IB_TIMEOUT; + ib_put_mad( p_sa_req->p_mad_response ); + } + } + else + { + p_ioctl->out.h_resp = AL_INVALID_HANDLE; + p_ioctl->out.resp_size = 0; + } + cl_spinlock_release( &p_context->h_al->obj.lock ); + + p_irp->IoStatus.Status = STATUS_SUCCESS; + p_irp->IoStatus.Information = sizeof(p_ioctl->out); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + + /* Release the reference taken when the query was initiated. */ + proxy_context_deref( p_context ); + + cl_free( p_sa_req ); + + AL_EXIT( AL_DBG_QUERY ); +} + + +static void +__proxy_cancel_sa_req( + IN DEVICE_OBJECT* p_dev_obj, + IN IRP* p_irp ) +{ + al_dev_open_context_t *p_context; + PIO_STACK_LOCATION p_io_stack; + uint64_t hdl; + al_sa_req_t *p_sa_req; + + AL_ENTER( AL_DBG_DEV ); + + UNUSED_PARAM( p_dev_obj ); + + /* Get the stack location. */ + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + p_context = (al_dev_open_context_t *)p_io_stack->FileObject->FsContext; + ASSERT( p_context ); + + hdl = (size_t)InterlockedExchangePointer( + &p_irp->Tail.Overlay.DriverContext[0], NULL ); + if( hdl != AL_INVALID_HANDLE ) + { +#pragma warning(push, 3) + IoSetCancelRoutine( p_irp, NULL ); +#pragma warning(pop) + cl_spinlock_acquire( &p_context->h_al->obj.lock ); + p_sa_req = al_hdl_chk( p_context->h_al, hdl, AL_OBJ_TYPE_H_SA_REQ ); + CL_ASSERT( p_sa_req ); + al_cancel_sa_req( p_sa_req ); + al_hdl_free( p_context->h_al, hdl ); + cl_spinlock_release( &p_context->h_al->obj.lock ); + } + + IoReleaseCancelSpinLock( p_irp->CancelIrql ); +} + + +static cl_status_t +proxy_send_sa_req( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_send_sa_req_ioctl_t *p_ioctl; + cl_status_t status; + ib_api_status_t ib_status, *p_usr_status; + IO_STACK_LOCATION *p_io_stack; + al_dev_open_context_t *p_context; + al_sa_req_t *p_sa_req; + uint64_t hdl, *p_usr_hdl; + + AL_ENTER( AL_DBG_QUERY ); + + UNUSED_PARAM( p_ret_bytes ); + + p_context = p_open_context; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + /* + * We support SA requests coming in either through the main file object + * or the async file handle. + */ + if( p_io_stack->FileObject->FsContext2 && + (uintn_t)p_io_stack->FileObject->FsContext2 != AL_OBJ_TYPE_SA_REQ_SVC ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Invalid file object type for request: %016I64x\n", + (LONG_PTR)p_io_stack->FileObject->FsContext2) ); + return CL_INVALID_PARAMETER; + } + + /* Check the size of the ioctl */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid IOCTL buffers.\n") ); + return CL_INVALID_PARAMETER; + } + + p_ioctl = cl_ioctl_in_buf( h_ioctl ); + CL_ASSERT( p_ioctl ); + + /* Must save user's pointers in case req completes before call returns. */ + p_usr_status = p_ioctl->in.p_status; + p_usr_hdl = p_ioctl->in.ph_sa_req; + + if( p_ioctl->in.sa_req.attr_size > IB_SA_DATA_SIZE ) + { + ib_status = IB_INVALID_SETTING; + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid SA data size: %d\n", + p_ioctl->in.sa_req.attr_size) ); + goto proxy_send_sa_req_err1; + } + + p_sa_req = (al_sa_req_t*)cl_zalloc( sizeof(al_sa_req_t) ); + if( !p_sa_req ) + { + ib_status = IB_INSUFFICIENT_MEMORY; + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate SA req.\n") ); + goto proxy_send_sa_req_err1; + } + + /* Synchronize with callbacks. */ + cl_spinlock_acquire( &p_context->h_al->obj.lock ); + + /* Track the request. */ + hdl = al_hdl_insert( p_context->h_al, p_sa_req, AL_OBJ_TYPE_H_SA_REQ ); + if( hdl == AL_INVALID_HANDLE ) + { + ib_status = IB_INSUFFICIENT_MEMORY; + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to create handle.\n") ); + goto proxy_send_sa_req_err2; + } + + /* + * Store the handle in the IRP's driver context so we can cancel it. + * Note that the handle is really a size_t variable, but is cast to a + * uint64_t to provide constant size in mixed 32- and 64-bit environments. + */ + h_ioctl->Tail.Overlay.DriverContext[0] = (void*)(size_t)hdl; + + /* Format the SA request */ + p_sa_req->user_context = h_ioctl; + p_sa_req->pfn_sa_req_cb = __proxy_sa_req_cb; + + p_ioctl->in.sa_req.p_attr = p_ioctl->in.attr; + + /* + * We never pass the user-mode flag when sending SA requests - the + * I/O manager will perform all synchronization to make this IRP sync + * if it needs to. + */ + ib_status = al_send_sa_req( p_sa_req, p_ioctl->in.port_guid, + p_ioctl->in.timeout_ms, p_ioctl->in.retry_cnt, + &p_ioctl->in.sa_req, 0 ); + if( ib_status == IB_SUCCESS ) + { + /* Hold a reference on the proxy context until the request completes. */ + proxy_context_ref( p_context ); +#pragma warning(push, 3) + IoSetCancelRoutine( h_ioctl, __proxy_cancel_sa_req ); +#pragma warning(pop) + IoMarkIrpPending( h_ioctl ); + + cl_spinlock_release( &p_context->h_al->obj.lock ); + + cl_copy_to_user( p_usr_hdl, &hdl, sizeof(hdl) ); + status = CL_PENDING; + } + else + { + al_hdl_free( p_context->h_al, hdl ); + +proxy_send_sa_req_err2: + cl_spinlock_release( &p_context->h_al->obj.lock ); + cl_free( p_sa_req ); + +proxy_send_sa_req_err1: + status = CL_INVALID_PARAMETER; + } + + cl_copy_to_user( p_usr_status, &ib_status, sizeof(ib_api_status_t) ); + + AL_EXIT( AL_DBG_QUERY ); + return status; +} + + +static cl_status_t +proxy_cancel_sa_req( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cancel_sa_req_ioctl_t *p_ioctl; + al_dev_open_context_t *p_context; + al_sa_req_t *p_sa_req; + + AL_ENTER( AL_DBG_QUERY ); + + UNUSED_PARAM( p_ret_bytes ); + + p_context = p_open_context; + + /* Check the size of the ioctl */ + if( cl_ioctl_in_size( h_ioctl ) != sizeof(ual_cancel_sa_req_ioctl_t) || + cl_ioctl_out_size( h_ioctl ) ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Invalid input buffer.\n") ); + return CL_INVALID_PARAMETER; + } + + p_ioctl = cl_ioctl_in_buf( h_ioctl ); + CL_ASSERT( p_ioctl ); + + cl_spinlock_acquire( &p_context->h_al->obj.lock ); + p_sa_req = + al_hdl_chk( p_context->h_al, p_ioctl->h_sa_req, AL_OBJ_TYPE_H_SA_REQ ); + if( p_sa_req ) + al_cancel_sa_req( p_sa_req ); + cl_spinlock_release( &p_context->h_al->obj.lock ); + + AL_EXIT( AL_DBG_QUERY ); + return CL_SUCCESS; +} + + +static cl_status_t +proxy_send_mad( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_send_mad_ioctl_t *p_ioctl = + (ual_send_mad_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_mad_svc_handle_t h_mad_svc; + ib_pool_key_t pool_key = NULL; + ib_av_handle_t h_av = NULL; + ib_mad_element_t *p_mad_el; + al_mad_element_t *p_al_el; + ib_mad_t *p_mad_buf, *p_usr_buf; + ib_grh_t *p_grh, *p_usr_grh; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD ); + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MAD ); + return CL_INVALID_PARAMETER; + } + + /* Validate mad svc handle. */ + h_mad_svc = (ib_mad_svc_handle_t)al_hdl_ref( + p_context->h_al, p_ioctl->in.h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ); + if( !h_mad_svc ) + { + status = IB_INVALID_HANDLE; + goto proxy_send_mad_err1; + } + + /* Validate the pool key */ + pool_key = (ib_pool_key_t)al_hdl_ref( + p_context->h_al, p_ioctl->in.pool_key, AL_OBJ_TYPE_H_POOL_KEY ); + if( !pool_key ) + { + status = IB_INVALID_HANDLE; + goto proxy_send_mad_err1; + } + + /* Validate the AV handle in the mad element if it is not NULL. */ + if( p_ioctl->in.h_av ) + { + h_av = (ib_av_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_av, AL_OBJ_TYPE_H_AV ); + if( !h_av ) + { + status = IB_INVALID_AV_HANDLE; + goto proxy_send_mad_err1; + } + } + + /* + * Get a mad element from kernel MAD pool + * This should not fail since the pool is set to grow + * dynamically + */ + status = ib_get_mad( pool_key, p_ioctl->in.size, &p_mad_el ); + if( status != IB_SUCCESS ) + goto proxy_send_mad_err1; + + /* Store the MAD and GRH buffers pointers. */ + p_mad_buf = p_mad_el->p_mad_buf; + p_grh = p_mad_el->p_grh; + + /* Now copy the mad element with all info */ + status = ib_convert_cl_status( cl_copy_from_user( p_mad_el, + p_ioctl->in.p_mad_element, sizeof(ib_mad_element_t) ) ); + if( status != IB_SUCCESS ) + goto proxy_send_mad_err2; + + /* Store the UM pointers. */ + p_usr_buf = p_mad_el->p_mad_buf; + p_usr_grh = p_mad_el->p_grh; + /* Restore the MAD and GRH buffer pointers. */ + p_mad_el->p_mad_buf = p_mad_buf; + p_mad_el->p_grh = p_grh; + /* Clear the next pointer. */ + p_mad_el->p_next = NULL; + /* + * Override the send context so that a response's MAD has a way + * of getting back to the associated send. This is needed because a + * MAD receive completion could fail to be delivered to the app even though + * the response was properly received in the kernel. + */ + p_mad_el->context1 = p_ioctl->in.p_mad_element; + + /* Set the kernel AV handle. This is either NULL or a valid KM handle. */ + p_mad_el->h_av = h_av; + + /* Copy the GRH, if valid. */ + if( p_mad_el->grh_valid ) + { + status = ib_convert_cl_status( + cl_copy_from_user( p_grh, p_usr_grh, sizeof(ib_grh_t) ) ); + if( status != IB_SUCCESS ) + goto proxy_send_mad_err2; + } + + /* Copy the mad payload. */ + status = ib_convert_cl_status( + cl_copy_from_user( p_mad_buf, p_usr_buf, p_ioctl->in.size ) ); + if( status != IB_SUCCESS ) + goto proxy_send_mad_err2; + + /* Copy the handle to UM to allow cancelling. */ + status = ib_convert_cl_status( cl_copy_to_user( + p_ioctl->in.ph_proxy, p_mad_el, sizeof(ib_mad_element_t*) ) ); + if( status != IB_SUCCESS ) + goto proxy_send_mad_err2; + + /* + * Copy the UM element pointer to the kernel's AL element + * for use in completion generation. + */ + p_al_el = PARENT_STRUCT( p_mad_el, al_mad_element_t, element ); + p_al_el->h_proxy_element = p_ioctl->in.p_mad_element; + + /* Post the element. */ + status = ib_send_mad( h_mad_svc, p_mad_el, NULL ); + + if( status != IB_SUCCESS ) + { +proxy_send_mad_err2: + ib_put_mad( p_mad_el ); + } +proxy_send_mad_err1: + + if( h_av ) + deref_al_obj( &h_av->obj ); + if( pool_key ) + deref_al_obj( &pool_key->obj ); + if( h_mad_svc ) + deref_al_obj( &h_mad_svc->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MAD ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl to retrieve a received MAD. + */ +static cl_status_t +proxy_mad_comp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_mad_recv_ioctl_t *p_ioctl; + al_dev_open_context_t *p_context; + ib_mad_element_t *p_mad; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MAD ); + return CL_INVALID_PARAMETER; + } + + p_ioctl = (ual_mad_recv_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate the MAD handle and remove it from the handle manager. */ + p_mad = al_hdl_get_mad( p_context->h_al, p_ioctl->in.h_mad ); + if( !p_mad ) + { + status = IB_INVALID_HANDLE; + goto proxy_mad_comp_err1; + } + + /* + * Return the MAD to the user. The user-mode library is responsible + * for correcting all pointers. + */ + status = ib_convert_cl_status( cl_copy_to_user( + p_ioctl->in.p_user_mad, p_mad, sizeof(ib_mad_element_t) ) ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Unable to copy element to user's MAD\n") ); + goto proxy_mad_comp_err2; + } + + /* Copy the MAD buffer. */ + status = ib_convert_cl_status( cl_copy_to_user( + p_ioctl->in.p_mad_buf, p_mad->p_mad_buf, p_mad->size ) ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Unable to copy buffer to user's MAD\n") ); + goto proxy_mad_comp_err2; + } + + /* Copy the GRH if it is valid. */ + if( p_mad->grh_valid ) + { + status = ib_convert_cl_status( cl_copy_to_user( + p_ioctl->in.p_grh, p_mad->p_grh, sizeof(ib_grh_t) ) ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Unable to copy GRH to user's MAD\n") ); + goto proxy_mad_comp_err2; + } + } + + if( status == IB_SUCCESS ) + { + ib_put_mad( p_mad ); + } + else + { +proxy_mad_comp_err2: + ib_put_mad( p_mad ); +proxy_mad_comp_err1: + cl_memclr( &p_ioctl->out, sizeof(p_ioctl->out) ); + } + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MAD ); + return CL_SUCCESS; +} + + + +static cl_status_t +proxy_init_dgrm( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + UNUSED_PARAM( p_open_context ); + UNUSED_PARAM( h_ioctl ); + UNUSED_PARAM( p_ret_bytes ); + return CL_ERROR; +} + + + +static void +__proxy_mad_send_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ) +{ + misc_cb_ioctl_info_t cb_info; + al_dev_open_context_t *p_context; + al_mad_element_t *p_al_el; + + AL_ENTER( AL_DBG_MAD ); + + CL_ASSERT( p_mad_element ); + CL_ASSERT( !p_mad_element->p_next ); + p_context = h_mad_svc->obj.h_al->p_context; + p_al_el = PARENT_STRUCT( p_mad_element, al_mad_element_t, element ); + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( proxy_context_ref( p_context ) ) + { + /* Set up context and callback record type appropriate for UAL */ + cb_info.rec_type = MAD_SEND_REC; + cb_info.ioctl_rec.mad_send_cb_ioctl_rec.wc_status = + p_mad_element->status; + cb_info.ioctl_rec.mad_send_cb_ioctl_rec.p_um_mad = + p_al_el->h_proxy_element; + cb_info.ioctl_rec.mad_send_cb_ioctl_rec.mad_svc_context = + mad_svc_context; + + /* Queue this mad completion notification for the user. */ + proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info, + &h_mad_svc->obj ); + } + + /* Return the MAD. */ + ib_put_mad( p_mad_element ); + + proxy_context_deref( p_context ); + AL_EXIT( AL_DBG_MAD ); +} + + + +static void +__proxy_mad_recv_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ) +{ + misc_cb_ioctl_info_t cb_info; + al_dev_open_context_t *p_context; + al_mad_element_t *p_al_mad; + uint64_t hdl; + + AL_ENTER( AL_DBG_MAD ); + + p_context = h_mad_svc->obj.h_al->p_context; + + p_al_mad = PARENT_STRUCT( p_mad_element, al_mad_element_t, element ); + + /* Set up context and callback record type appropriate for UAL */ + cb_info.rec_type = MAD_RECV_REC; + cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context = mad_svc_context; + cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.elem_size = p_mad_element->size; + cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.p_send_mad = + (ib_mad_element_t* __ptr64)p_mad_element->send_context1; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( !proxy_context_ref( p_context ) ) + { + ib_put_mad( p_mad_element ); + AL_EXIT( AL_DBG_MAD ); + return; + } + + /* Insert an item to track the MAD until the user fetches it. */ + cl_spinlock_acquire( &p_context->h_al->obj.lock ); + hdl = al_hdl_insert( p_context->h_al, p_mad_element, AL_OBJ_TYPE_H_MAD ); + if( hdl == AL_INVALID_HANDLE ) + goto proxy_mad_recv_cb_err; + + cb_info.ioctl_rec.mad_recv_cb_ioctl_rec.h_mad = hdl; + + /* Queue this mad completion notification for the user. */ + if( !proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info, + &h_mad_svc->obj ) ) + { + al_hdl_free( p_context->h_al, hdl ); +proxy_mad_recv_cb_err: + ib_put_mad( p_mad_element ); + } + cl_spinlock_release( &p_context->h_al->obj.lock ); + + proxy_context_deref( p_context ); + + AL_EXIT( AL_DBG_MAD ); +} + + + +static cl_status_t +proxy_reg_mad_svc( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_reg_mad_svc_ioctl_t *p_ioctl = + (ual_reg_mad_svc_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + ib_mad_svc_handle_t h_mad_svc; + + AL_ENTER( AL_DBG_MAD ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MAD ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate QP handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + p_ioctl->out.h_mad_svc = AL_INVALID_HANDLE; + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + return CL_SUCCESS; + } + + /* Now proxy's mad_svc overrides */ + p_ioctl->in.mad_svc.pfn_mad_send_cb = __proxy_mad_send_cb; + p_ioctl->in.mad_svc.pfn_mad_recv_cb = __proxy_mad_recv_cb; + + p_ioctl->out.status = reg_mad_svc( h_qp, + &p_ioctl->in.mad_svc, &h_mad_svc ); + if( p_ioctl->out.status == IB_SUCCESS ) + { + p_ioctl->out.h_mad_svc = h_mad_svc->obj.hdl; + h_mad_svc->obj.hdl_valid = TRUE; + deref_al_obj( &h_mad_svc->obj ); + } + else + { + p_ioctl->out.h_mad_svc = AL_INVALID_HANDLE; + } + + deref_al_obj( &h_qp->obj ); + + AL_EXIT( AL_DBG_MAD ); + return CL_SUCCESS; +} + + + +/* + * Deregister the MAD service. + */ +static cl_status_t +proxy_dereg_mad_svc( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_dereg_mad_svc_ioctl_t *p_ioctl; + al_dev_open_context_t *p_context; + ib_mad_svc_handle_t h_mad_svc; + + AL_ENTER( AL_DBG_MAD ); + + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("IOCTL buffer is invalid\n") ); + return CL_INVALID_PARAMETER; + } + + p_ioctl = (ual_dereg_mad_svc_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + p_context = (al_dev_open_context_t*)p_open_context; + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate MAD service. */ + h_mad_svc = (ib_mad_svc_handle_t)al_hdl_ref( + p_context->h_al, p_ioctl->in.h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ); + if( !h_mad_svc ) + { + p_ioctl->out.status = IB_INVALID_HANDLE; + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return CL_SUCCESS; + } + + /* Destroy the MAD service. */ + h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, ib_sync_destroy ); + p_ioctl->out.status = IB_SUCCESS; + + AL_EXIT( AL_DBG_MAD ); + return CL_SUCCESS; +} + + + +/* + * UAL only uses reg_mad_pool/dereg_mad_pool ioctls + * create/destroy mad pool is implicit in these ioctls + */ +static +cl_status_t +proxy_reg_mad_pool( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_reg_mad_pool_ioctl_t *p_ioctl = + (ual_reg_mad_pool_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_pool_key_t pool_key; + + AL_ENTER( AL_DBG_MAD ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MAD ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate PD handle */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd ) + { + p_ioctl->out.status = IB_INVALID_PD_HANDLE; + p_ioctl->out.pool_key = AL_INVALID_HANDLE; + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PD_HANDLE\n") ); + return CL_SUCCESS; + } + + /* + * If we're in the kernel, we are using the global MAD pool. Other + * MAD pools remain entirely in user-mode. + */ + + /* Register the PD with the MAD pool to obtain a pool_key. */ + p_ioctl->out.status = reg_mad_pool( gh_mad_pool, h_pd, &pool_key ); + if( p_ioctl->out.status == IB_SUCCESS ) + { + /* Track the pool info with the process context. */ + p_ioctl->out.pool_key = pool_key->obj.hdl; + pool_key->obj.hdl_valid = TRUE; + deref_al_obj( &pool_key->obj ); + } + else + { + p_ioctl->out.pool_key = AL_INVALID_HANDLE; + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("reg_mad_pool returned %s.\n", + ib_get_err_str(p_ioctl->out.status)) ); + } + + deref_al_obj( &h_pd->obj ); + + AL_EXIT( AL_DBG_MAD ); + return CL_SUCCESS; +} + + + +/* + * Deregister the pool_key with the MAD pool. Destroy the MAD pool if we + * created one. + */ +static +cl_status_t +proxy_dereg_mad_pool( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_dereg_mad_pool_ioctl_t *p_ioctl = + (ual_dereg_mad_pool_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pool_key_t pool_key; + + AL_ENTER( AL_DBG_MAD ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("IOCTL buffer is invalid\n") ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate pool key */ + pool_key = (ib_pool_key_t)al_hdl_ref( + p_context->h_al, p_ioctl->in.pool_key, AL_OBJ_TYPE_H_POOL_KEY ); + if( !pool_key ) + { + p_ioctl->out.status = IB_INVALID_HANDLE; + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("User-mode provided pool key is invalid\n") ); + return CL_SUCCESS; + } + + /* We should only have alias pool keys exported to user-mode. */ + p_ioctl->out.status = dereg_mad_pool( pool_key, AL_KEY_ALIAS ); + if( p_ioctl->out.status != IB_SUCCESS ) + { + deref_al_obj( &pool_key->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("dereg_mad_pool failed: %s\n", + ib_get_err_str( p_ioctl->out.status )) ); + } + + AL_EXIT( AL_DBG_MAD ); + return CL_SUCCESS; +} + + + +cl_status_t +proxy_cancel_mad( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_cancel_mad_ioctl_t *p_ioctl; + al_dev_open_context_t *p_context; + ib_mad_svc_handle_t h_mad_svc; + + AL_ENTER( AL_DBG_MAD ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MAD ); + return CL_INVALID_PARAMETER; + } + + p_ioctl = (ual_cancel_mad_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + p_context = (al_dev_open_context_t*)p_open_context; + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate MAD service handle. */ + h_mad_svc = (ib_mad_svc_handle_t)al_hdl_ref( + p_context->h_al, p_ioctl->in.h_mad_svc, AL_OBJ_TYPE_H_MAD_SVC ); + if( !h_mad_svc ) + { + p_ioctl->out.status = IB_INVALID_HANDLE; + AL_EXIT( AL_DBG_MAD ); + return CL_SUCCESS; + } + + p_ioctl->out.status = + ib_cancel_mad( h_mad_svc, p_ioctl->in.h_proxy_element ); + + /* + * The clean up of resources allocated for the sent mad will + * be handled in the send completion callback + */ + AL_EXIT( AL_DBG_MAD ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_LOCAL_MAD: + */ +static cl_status_t +proxy_local_mad( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_local_mad_ioctl_t *p_ioctl = + (ual_local_mad_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_ca_handle_t h_ca; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MAD ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MAD ); + return CL_INVALID_PARAMETER; + } + + if( ((ib_mad_t*)p_ioctl->in.mad_in)->method != IB_MAD_METHOD_GET ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("invalid method %d\n", ((ib_mad_t*)p_ioctl->in.mad_in)->method) ); + status = IB_UNSUPPORTED; + goto proxy_local_mad_err; + } + + /* Validate CA handle */ + h_ca = (ib_ca_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA ); + if( !h_ca ) + { + status = IB_INVALID_CA_HANDLE; + goto proxy_local_mad_err; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + status = ib_local_mad( + h_ca, p_ioctl->in.port_num, p_ioctl->in.mad_in, p_ioctl->out.mad_out ); + + deref_al_obj( &h_ca->obj ); + +proxy_local_mad_err: + p_ioctl->out.status = status; + + AL_EXIT( AL_DBG_MAD ); + return CL_SUCCESS; +} + + +cl_status_t +subnet_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + void *p_context; + + AL_ENTER( AL_DBG_DEV ); + + CL_ASSERT( h_ioctl && p_ret_bytes ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = p_io_stack->FileObject->FsContext; + + if( !p_context ) + { + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + switch( cl_ioctl_ctl_code( h_ioctl ) ) + { + case UAL_REG_SVC: + cl_status = proxy_reg_svc( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_SEND_SA_REQ: + cl_status = proxy_send_sa_req( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CANCEL_SA_REQ: + cl_status = proxy_cancel_sa_req( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_MAD_SEND: + cl_status = proxy_send_mad( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_INIT_DGRM_SVC: + cl_status = proxy_init_dgrm( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_REG_MAD_SVC: + cl_status = proxy_reg_mad_svc( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_REG_MAD_POOL: + cl_status = proxy_reg_mad_pool( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CANCEL_MAD: + cl_status = proxy_cancel_mad( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_MAD_RECV_COMP: + cl_status = proxy_mad_comp( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DEREG_SVC: + cl_status = proxy_dereg_svc( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DEREG_MAD_SVC: + cl_status = proxy_dereg_mad_svc( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DEREG_MAD_POOL: + cl_status = proxy_dereg_mad_pool( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_LOCAL_MAD: + cl_status = proxy_local_mad( p_context, h_ioctl, p_ret_bytes ); + break; + default: + cl_status = CL_INVALID_PARAMETER; + break; + } + + return cl_status; +} diff --git a/branches/Ndi/core/al/kernel/al_proxy_verbs.c b/branches/Ndi/core/al/kernel/al_proxy_verbs.c new file mode 100644 index 00000000..5bf7db8f --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_proxy_verbs.c @@ -0,0 +1,3947 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include "al.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_proxy_verbs.tmh" +#endif + +#include "al_dev.h" +/* Get the internal definitions of apis for the proxy */ +#include "al_ca.h" +#include "al_pd.h" +#include "al_qp.h" +#include "al_srq.h" +#include "al_cq.h" +#include "al_mr.h" +#include "al_mw.h" +#include "al_av.h" +#include "al_ci_ca.h" +#include "al_mgr.h" +#include "ib_common.h" +#include "al_proxy.h" + + +extern al_mgr_t *gp_al_mgr; + + +/* + * + * Utility function to: + * a. allocate an umv_buf and p_buf in kernel space + * b. copy umv_buf and the contents of p_buf from user-mode + * + * It is assumed that the p_buf does not have any embedded user-mode pointers + */ + +static +ib_api_status_t +cpyin_umvbuf( + IN ci_umv_buf_t *p_src, + OUT ci_umv_buf_t **pp_dst ) +{ + size_t umv_buf_size; + ci_umv_buf_t *p_dest; + + /* Allocate space for umv_buf */ + CL_ASSERT( pp_dst ); + + umv_buf_size = sizeof(ci_umv_buf_t); + umv_buf_size += MAX(p_src->input_size, p_src->output_size); + + if( p_src->p_inout_buf ) + { + if( p_src->input_size && + cl_check_for_read( p_src->p_inout_buf, (size_t)p_src->input_size ) + != CL_SUCCESS ) + { + /* user-supplied memory area not readable */ + return IB_INVALID_PERMISSION; + } + if( p_src->output_size && + cl_check_for_write( p_src->p_inout_buf, (size_t)p_src->output_size ) + != CL_SUCCESS ) + { + /* user-supplied memory area not writeable */ + return IB_INVALID_PERMISSION; + } + } + p_dest = (ci_umv_buf_t*)cl_zalloc( (size_t)umv_buf_size ); + if( !p_dest ) + return IB_INSUFFICIENT_MEMORY; + + /* Copy the umv_buf structure. */ + *p_dest = *p_src; + if( p_src->p_inout_buf ) + p_dest->p_inout_buf = (void*)(p_dest + 1); + + /* Setup the buffer - either we have an input or output buffer */ + if( p_src->input_size ) + { + if( cl_copy_from_user( p_dest->p_inout_buf, p_src->p_inout_buf, + (size_t)p_src->input_size ) != CL_SUCCESS ) + { + cl_free( p_dest ); + return IB_INVALID_PERMISSION; + } + } + *pp_dst = p_dest; + return IB_SUCCESS; +} + + + +/* + * + * Utility function to copy the results of umv_buf and the contents + * of p_buf to umv_buf in user-space. + * + * It is assumed that the p_buf does not have any embedded user-mode pointers + * + * This function can NOT be called from asynchronous callbacks where + * user process context may not be valid + * + */ +static +ib_api_status_t +cpyout_umvbuf( + IN ci_umv_buf_t *p_dest, + IN ci_umv_buf_t *p_src) +{ + CL_ASSERT( p_dest ); + + if( p_src ) + { + CL_ASSERT( p_dest->command == p_src->command ); + CL_ASSERT( p_dest->input_size == p_src->input_size ); + /* Copy output buf only on success. */ + if( p_src->status == IB_SUCCESS ) + { + uint32_t out_size; + + out_size = MIN( p_dest->output_size, p_src->output_size ); + + if( cl_copy_to_user( p_dest->p_inout_buf, p_src->p_inout_buf, + out_size ) != CL_SUCCESS ) + { + p_dest->output_size = 0; + return IB_INVALID_PERMISSION; + } + p_dest->status = p_src->status; + p_dest->output_size = out_size; + } + } + return IB_SUCCESS; +} + + +static void +free_umvbuf( + IN ci_umv_buf_t *p_umv_buf ) +{ + if( p_umv_buf ) + cl_free( p_umv_buf ); +} + + + +/* + * Process the ioctl UAL_GET_VENDOR_LIBCFG: + */ +static cl_status_t +proxy_get_vendor_libcfg( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_get_uvp_name_ioctl_t *p_ioctl = + (ual_get_uvp_name_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + al_ci_ca_t *p_ci_ca; + + AL_ENTER( AL_DBG_CA ); + + UNUSED_PARAM( p_open_context ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CA ); + return CL_INVALID_PARAMETER; + } + + /* Find the CAguid */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CA, + ("CA guid %I64x.\n", p_ioctl->in.ca_guid) ); + + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + p_ci_ca = find_ci_ca( p_ioctl->in.ca_guid ); + + if( !p_ci_ca ) + { + cl_spinlock_release( &gp_al_mgr->obj.lock ); + p_ioctl->out.status = IB_NOT_FOUND; + } + else + { + /* found the ca guid, copy the user-mode verbs provider libname */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_CA, + ("CA guid %I64x. libname (%s)\n", + p_ioctl->in.ca_guid, p_ci_ca->verbs.libname) ); + cl_memcpy( p_ioctl->out.uvp_lib_name, p_ci_ca->verbs.libname, + sizeof(p_ci_ca->verbs.libname)); + cl_spinlock_release( &gp_al_mgr->obj.lock ); + p_ioctl->out.status = IB_SUCCESS; + } + *p_ret_bytes = sizeof(p_ioctl->out); + AL_EXIT( AL_DBG_CA ); + return CL_SUCCESS; +} + + +/* + * Allocate an ioctl buffer of appropriate size + * Copy the given ioctl buffer + * Queue the ioctl buffer as needed + */ +boolean_t +proxy_queue_cb_buf( + IN uintn_t cb_type, + IN al_dev_open_context_t *p_context, + IN void *p_cb_data, + IN al_obj_t *p_al_obj OPTIONAL ) +{ + cl_qlist_t *p_cb_list; + al_proxy_cb_info_t *p_cb_info; + cl_ioctl_handle_t *ph_ioctl, h_ioctl; + uintn_t ioctl_size; + + AL_ENTER( AL_DBG_DEV ); + + /* Set up the appropriate callback list. */ + switch( cb_type ) + { + case UAL_GET_CM_CB_INFO: + p_cb_list = &p_context->cm_cb_list; + ph_ioctl = &p_context->h_cm_ioctl; + ioctl_size = sizeof( cm_cb_ioctl_info_t ); + break; + + case UAL_GET_COMP_CB_INFO: + p_cb_list = &p_context->comp_cb_list; + ph_ioctl = &p_context->h_comp_ioctl; + ioctl_size = sizeof( comp_cb_ioctl_info_t ); + break; + + case UAL_GET_MISC_CB_INFO: + p_cb_list = &p_context->misc_cb_list; + ph_ioctl = &p_context->h_misc_ioctl; + ioctl_size = sizeof( misc_cb_ioctl_info_t ); + break; + + default: + return FALSE; + } + + /* Get a callback record to queue the callback. */ + p_cb_info = proxy_cb_get( p_context ); + if( !p_cb_info ) + return FALSE; + + cl_memcpy( &p_cb_info->cb_type, p_cb_data, ioctl_size ); + + /* + * If an AL object was specified, we need to reference it to prevent its + * destruction until the callback has been fully specified. + */ + if( p_al_obj ) + { + p_cb_info->p_al_obj = p_al_obj; + ref_al_obj( p_al_obj ); + } + + /* Insert the callback record into the callback list */ + cl_spinlock_acquire( &p_context->cb_lock ); + cl_qlist_insert_tail( p_cb_list, &p_cb_info->pool_item.list_item ); + + /* See if there is a pending IOCTL ready to receive the callback. */ + if( *ph_ioctl ) + { + h_ioctl = *ph_ioctl; + *ph_ioctl = NULL; +#pragma warning(push, 3) + IoSetCancelRoutine( h_ioctl, NULL ); +#pragma warning(pop) + + p_cb_info->reported = TRUE; + + /* Complete the IOCTL to return the callback information. */ + CL_ASSERT( cl_ioctl_out_size( h_ioctl ) >= ioctl_size ); + cl_memcpy( cl_ioctl_out_buf( h_ioctl ), p_cb_data, ioctl_size ); + cl_ioctl_complete( h_ioctl, CL_SUCCESS, ioctl_size ); + proxy_context_deref( p_context ); + } + cl_spinlock_release( &p_context->cb_lock ); + + AL_EXIT( AL_DBG_DEV ); + return TRUE; +} + + +/* + * Proxy's ca error callback + * The context in the error record is proxy's ca context + * Context is the a list object in the CA list + */ +static void +proxy_ca_err_cb( + IN ib_async_event_rec_t *p_err_rec) +{ + ib_ca_handle_t h_ca = p_err_rec->handle.h_ca; + al_dev_open_context_t *p_context = h_ca->obj.h_al->p_context; + misc_cb_ioctl_info_t cb_info; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + return; + } + + /* Set up context and callback record type appropriate for UAL */ + cb_info.rec_type = CA_ERROR_REC; + /* Return the Proxy's open_ca handle and the user's context */ + cb_info.ioctl_rec.event_rec = *p_err_rec; + cb_info.ioctl_rec.event_rec.handle.h_ca = (ib_ca_handle_t)h_ca->obj.hdl; + + /* The proxy handle must be valid now. */ + if( !h_ca->obj.hdl_valid ) + h_ca->obj.hdl_valid = TRUE; + + proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info, + &h_ca->obj ); + + proxy_context_deref( p_context ); +} + + +/* + * Process the ioctl UAL_OPEN_CA: + * + * Returns the ca_list_obj as the handle to UAL + */ +static cl_status_t +proxy_open_ca( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_open_ca_ioctl_t *p_ioctl = + (ual_open_ca_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_ca_handle_t h_ca; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CA ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CA ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases. */ + *p_ret_bytes = sizeof(p_ioctl->out); + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_open_ca_err; + + status = open_ca( p_context->h_al, p_ioctl->in.guid, proxy_ca_err_cb, + p_ioctl->in.context, &h_ca, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_open_ca_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_ca = h_ca->obj.hdl; + h_ca->obj.hdl_valid = TRUE; + /* Release the reference taken in init_al_obj */ + deref_al_obj( &h_ca->obj ); + } + else + { + h_ca->obj.pfn_destroy( &h_ca->obj, NULL ); + +proxy_open_ca_err: /* getting a handle failed. */ + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.h_ca = AL_INVALID_HANDLE; + } + free_umvbuf( p_umv_buf ); + + p_ioctl->out.status = status; + + AL_EXIT( AL_DBG_CA ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_QUERY_CA: + */ +static cl_status_t +proxy_query_ca( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_query_ca_ioctl_t *p_ioctl = + (ual_query_ca_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_ca_handle_t h_ca; + ib_ca_attr_t *p_ca_attr = NULL; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + uint32_t byte_cnt = 0; + + AL_ENTER( AL_DBG_CA ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CA ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CA handle */ + h_ca = (ib_ca_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA ); + if( !h_ca ) + { + status = IB_INVALID_CA_HANDLE; + goto proxy_query_ca_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_ca_err; + + byte_cnt = p_ioctl->in.byte_cnt; + if( p_ioctl->in.p_ca_attr && byte_cnt ) + { + p_ca_attr = (ib_ca_attr_t*)cl_zalloc( byte_cnt ); + if( !p_ca_attr ) + { + status = IB_INSUFFICIENT_MEMORY; + goto proxy_query_ca_err; + } + } + status = query_ca( h_ca, p_ca_attr, &byte_cnt, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_query_ca_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_query_ca_err; + + /* copy CA attribute back to user */ + if( p_ca_attr ) + { + __try + { + ProbeForWrite( p_ioctl->in.p_ca_attr, byte_cnt, sizeof(void*) ); + ib_copy_ca_attr( p_ioctl->in.p_ca_attr, p_ca_attr ); + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to copy CA attributes to user buffer %016I64x\n", + (LONG64)p_ioctl->in.p_ca_attr) ); + status = IB_INVALID_PERMISSION; + } + } + + /* Free the ca_attr buffer allocated locally */ + if( status != IB_SUCCESS ) + { +proxy_query_ca_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + } + if( p_ca_attr ) + cl_free( p_ca_attr ); + + free_umvbuf( p_umv_buf ); + + if( h_ca ) + deref_al_obj( &h_ca->obj ); + + p_ioctl->out.status = status; + p_ioctl->out.byte_cnt = byte_cnt; + + AL_EXIT( AL_DBG_CA ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_MODIFY_CA: + */ +static +cl_status_t +proxy_modify_ca( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_modify_ca_ioctl_t *p_ioctl = + (ual_modify_ca_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_ca_handle_t h_ca; + + AL_ENTER( AL_DBG_CA ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CA ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CA handle */ + h_ca = (ib_ca_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA ); + if( !h_ca ) + { + p_ioctl->out.status = IB_INVALID_CA_HANDLE; + AL_EXIT( AL_DBG_CA ); + return CL_SUCCESS; + } + + p_ioctl->out.status = ib_modify_ca( h_ca, p_ioctl->in.port_num, + p_ioctl->in.ca_mod, &p_ioctl->in.port_attr_mod ); + + deref_al_obj( &h_ca->obj ); + + AL_EXIT( AL_DBG_CA ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_CLOSE_CA: + */ +static +cl_status_t +proxy_close_ca( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_close_ca_ioctl_t *p_ioctl = + (ual_close_ca_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_ca_handle_t h_ca; + + AL_ENTER( AL_DBG_CA ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CA ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CA handle */ + h_ca = (ib_ca_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA ); + if( !h_ca ) + { + p_ioctl->out.status = IB_INVALID_CA_HANDLE; + AL_EXIT( AL_DBG_CA ); + return CL_SUCCESS; + } + + /* + * Note that we hold a reference on the CA, so we need to + * call close_ca, not ib_close_ca. We also don't release the reference + * since close_ca will do so (by destroying the object). + */ + h_ca->obj.pfn_destroy( &h_ca->obj, ib_sync_destroy ); + p_ioctl->out.status = IB_SUCCESS; + + AL_EXIT( AL_DBG_CA ); + return CL_SUCCESS; +} + + +/* + * Validates the proxy handles and converts them to AL handles + */ +static ib_api_status_t +__convert_to_al_handles( + IN al_dev_open_context_t* const p_context, + IN uint64_t* const um_handle_array, + IN uint32_t num_handles, + OUT void* __ptr64 * const p_handle_array ) +{ + uint32_t i; + + for( i = 0; i < num_handles; i++ ) + { + /* Validate the handle in the resource map */ + p_handle_array[i] = al_hdl_ref( + p_context->h_al, um_handle_array[i], AL_OBJ_TYPE_UNKNOWN ); + if( !p_handle_array[i] ) + { + /* Release references taken so far. */ + while( i-- ) + deref_al_obj( p_handle_array[i] ); + + /* Could not find the handle in the map */ + return IB_INVALID_HANDLE; + } + } + + return IB_SUCCESS; +} + + + +static +cl_status_t +proxy_ci_call( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_ci_call_ioctl_t *p_ioctl = + (ual_ci_call_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_ca_handle_t h_ca; + ci_umv_buf_t *p_umv_buf = NULL; + void* p_ci_op_buf = NULL; + void* p_ci_op_user_buf = NULL; + void* __ptr64 * p_handle_array = NULL; + size_t ci_op_buf_size; + ib_api_status_t status; + uint32_t num_handles; + + AL_ENTER( AL_DBG_CA ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CA ); + return CL_INVALID_PARAMETER; + } + + num_handles = p_ioctl->in.num_handles; + if( num_handles > 1 && + cl_ioctl_in_size( h_ioctl ) != (sizeof(uint64_t) * (num_handles - 1)) ) + { + AL_EXIT( AL_DBG_CA ); + return CL_INVALID_PARAMETER; + } + + ci_op_buf_size = (size_t)p_ioctl->in.ci_op.buf_size; + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CA handle */ + h_ca = (ib_ca_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA ); + if( !h_ca ) + { + p_ioctl->out.status = IB_INVALID_CA_HANDLE; + AL_EXIT( AL_DBG_CA ); + return CL_SUCCESS; + } + + /* Save the user buffer address */ + p_ci_op_user_buf = p_ioctl->in.ci_op.p_buf; + + /* Validate the handle array */ + if( num_handles ) + { + p_handle_array = cl_malloc( sizeof(void* __ptr64) * num_handles ); + if( !p_handle_array ) + { + p_ioctl->out.status = IB_INSUFFICIENT_MEMORY; + deref_al_obj( &h_ca->obj ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to allocate handle array.\n") ); + return CL_SUCCESS; + } + + /* + * Now we have the handle array in kernel space. Replace + * the handles with the correct AL handles based on the + * type + */ + status = __convert_to_al_handles( p_context, p_ioctl->in.handle_array, + num_handles, p_handle_array ); + if( status != IB_SUCCESS ) + { + cl_free( p_handle_array ); + p_ioctl->out.status = status; + deref_al_obj( &h_ca->obj ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("Failed to convert handles.\n") ); + return CL_SUCCESS; + } + } + + /* Copy in the UMV buffer */ + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_ci_call_err; + + if( p_ioctl->in.ci_op.buf_size && p_ioctl->in.ci_op.p_buf ) + { + p_ci_op_buf = cl_zalloc( ci_op_buf_size ); + if( !p_ci_op_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto proxy_ci_call_err; + } + + /* Copy from user the buffer */ + if( cl_copy_from_user( p_ci_op_buf, p_ioctl->in.ci_op.p_buf, + ci_op_buf_size ) != CL_SUCCESS ) + { + status = IB_INVALID_PERMISSION; + goto proxy_ci_call_err; + } + /* Update the buffer pointer to reference the kernel copy. */ + p_ioctl->in.ci_op.p_buf = p_ci_op_buf; + } + + status = ci_call( h_ca, p_handle_array, + num_handles, &p_ioctl->in.ci_op, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_ci_call_err; + + /* Copy the umv_buf back to user space */ + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status != IB_SUCCESS ) + { + status = IB_INVALID_PERMISSION; + goto proxy_ci_call_err; + } + + /* + * Copy the data buffer. Copy the buf size so that if the + * num_bytes_ret is greater than the buffer size, we copy + * only what the buffer can hold + */ + if( cl_copy_to_user( p_ci_op_user_buf, p_ioctl->in.ci_op.p_buf, + ci_op_buf_size ) != CL_SUCCESS ) + { + status = IB_INVALID_PERMISSION; + } + +proxy_ci_call_err: + + /* Restore the data buffer */ + p_ioctl->out.ci_op.p_buf = p_ci_op_user_buf; + p_ioctl->out.status = status; + + /* Release the resources allocated */ + if( p_handle_array ) + { + while( num_handles-- ) + deref_al_obj( (al_obj_t* __ptr64)p_handle_array[num_handles] ); + cl_free( p_handle_array ); + } + if( p_ci_op_buf ) + cl_free( p_ci_op_buf ); + + free_umvbuf( p_umv_buf ); + + deref_al_obj( &h_ca->obj ); + + AL_EXIT( AL_DBG_CA ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_ALLOC_PD: + * + * Returns the pd_list_obj as the handle to UAL + */ +static +cl_status_t +proxy_alloc_pd( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_alloc_pd_ioctl_t *p_ioctl = + (ual_alloc_pd_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_PD ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_PD ); + return CL_INVALID_PARAMETER; + } + + /* Validate CA handle */ + h_ca = (ib_ca_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA ); + if( !h_ca ) + { + status = IB_INVALID_CA_HANDLE; + goto proxy_alloc_pd_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_alloc_pd_err; + + status = alloc_pd( h_ca, p_ioctl->in.type, p_ioctl->in.context, + &h_pd, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_alloc_pd_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_pd = h_pd->obj.hdl; + h_pd->obj.hdl_valid = TRUE; + deref_al_obj( &h_pd->obj ); + } + else + { + h_pd->obj.pfn_destroy( &h_pd->obj, NULL ); + +proxy_alloc_pd_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.h_pd = AL_INVALID_HANDLE; + } + free_umvbuf( p_umv_buf ); + + if( h_ca ) + deref_al_obj( &h_ca->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_PD ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_DEALLOC_PD: + */ +static cl_status_t +proxy_dealloc_pd( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_dealloc_pd_ioctl_t *p_ioctl = + (ual_dealloc_pd_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + + AL_ENTER( AL_DBG_PD ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_PD ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate PD handle */ + h_pd = (ib_pd_handle_t)al_hdl_ref( + p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd ) + { + p_ioctl->out.status = IB_INVALID_PD_HANDLE; + AL_EXIT( AL_DBG_PD ); + return CL_SUCCESS; + } + + h_pd->obj.pfn_destroy( &h_pd->obj, ib_sync_destroy ); + p_ioctl->out.status = IB_SUCCESS; + + AL_EXIT( AL_DBG_PD ); + return CL_SUCCESS; +} + + +/* + * Proxy's SRQ error handler + */ +static void +proxy_srq_err_cb( + IN ib_async_event_rec_t *p_err_rec ) +{ + ib_srq_handle_t h_srq = p_err_rec->handle.h_srq; + al_dev_open_context_t *p_context = h_srq->obj.h_al->p_context; + misc_cb_ioctl_info_t cb_info; + + AL_ENTER( AL_DBG_QP ); + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + return; + } + + /* Set up context and callback record type appropriate for UAL */ + cb_info.rec_type = SRQ_ERROR_REC; + /* Return the Proxy's SRQ handle and the user's context */ + cb_info.ioctl_rec.event_rec = *p_err_rec; + cb_info.ioctl_rec.event_rec.handle.h_srq = (ib_srq_handle_t)h_srq->obj.hdl; + + /* The proxy handle must be valid now. */ + if( !h_srq->obj.hdl_valid ) + h_srq->obj.hdl_valid = TRUE; + + proxy_queue_cb_buf( + UAL_GET_MISC_CB_INFO, p_context, &cb_info, &h_srq->obj ); + + proxy_context_deref( p_context ); + + AL_EXIT( AL_DBG_QP ); +} + +/* + * Process the ioctl UAL_CREATE_SRQ + * + * Returns the srq_list_obj as the handle to UAL + */ +static cl_status_t +proxy_create_srq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_create_srq_ioctl_t *p_ioctl = + (ual_create_srq_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_srq_handle_t h_srq; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + ib_pfn_event_cb_t pfn_ev; + + AL_ENTER( AL_DBG_SRQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_SRQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate handles. */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_create_srq_err1; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_create_srq_err1; + + if( p_ioctl->in.ev_notify ) + pfn_ev = proxy_srq_err_cb; + else + pfn_ev = NULL; + + status = create_srq( h_pd, &p_ioctl->in.srq_attr, p_ioctl->in.context, + pfn_ev, &h_srq, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_create_srq_err1; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_srq = h_srq->obj.hdl; + h_srq->obj.hdl_valid = TRUE; + /* Release the reference taken in create_srq (by init_al_obj) */ + deref_al_obj( &h_srq->obj ); + } + else + { +proxy_create_srq_err1: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.h_srq = AL_INVALID_HANDLE; + } + free_umvbuf( p_umv_buf ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + if( h_pd ) + deref_al_obj( &h_pd->obj ); + + AL_EXIT( AL_DBG_SRQ ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_QUERY_SRQ: + */ +static +cl_status_t +proxy_query_srq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_query_srq_ioctl_t *p_ioctl = + (ual_query_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_srq_handle_t h_srq; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_SRQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate SRQ handle */ + h_srq = (ib_srq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ ); + if( !h_srq ) + { + status = IB_INVALID_SRQ_HANDLE; + goto proxy_query_srq_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_srq_err; + + status = query_srq( h_srq, &p_ioctl->out.srq_attr, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_srq_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status != IB_SUCCESS ) + { +proxy_query_srq_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + cl_memclr( &p_ioctl->out.srq_attr, sizeof(ib_srq_attr_t) ); + } + free_umvbuf( p_umv_buf ); + + if( h_srq ) + deref_al_obj( &h_srq->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_SRQ ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_MODIFY_SRQ: + */ +static +cl_status_t +proxy_modify_srq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_modify_srq_ioctl_t *p_ioctl = + (ual_modify_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_srq_handle_t h_srq; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_SRQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate SRQ handle */ + h_srq = (ib_srq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ ); + if( !h_srq ) + { + status = IB_INVALID_SRQ_HANDLE; + goto proxy_modify_srq_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_modify_srq_err; + + status = modify_srq( h_srq, &p_ioctl->in.srq_attr, p_ioctl->in.srq_attr_mask, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_modify_srq_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status != IB_SUCCESS ) + { +proxy_modify_srq_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + } + free_umvbuf( p_umv_buf ); + + if( h_srq ) + deref_al_obj( &h_srq->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_SRQ ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_DESTROY_SRQ + */ +static cl_status_t +proxy_destroy_srq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_destroy_srq_ioctl_t *p_ioctl = + (ual_destroy_srq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_srq_handle_t h_srq; + + AL_ENTER( AL_DBG_SRQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_SRQ ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate SRQ handle */ + h_srq = (ib_srq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_SRQ ); + if( !h_srq ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_SRQ_HANDLE\n") ); + p_ioctl->out.status = IB_INVALID_SRQ_HANDLE; + } + else + { + h_srq->obj.pfn_destroy( &h_srq->obj, ib_sync_destroy ); + p_ioctl->out.status = IB_SUCCESS; + } + + AL_EXIT( AL_DBG_SRQ ); + return CL_SUCCESS; +} + + +/* + * Proxy's QP error handler + */ +static void +proxy_qp_err_cb( + IN ib_async_event_rec_t *p_err_rec ) +{ + ib_qp_handle_t h_qp = p_err_rec->handle.h_qp; + al_dev_open_context_t *p_context = h_qp->obj.h_al->p_context; + misc_cb_ioctl_info_t cb_info; + + AL_ENTER( AL_DBG_QP ); + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + return; + } + + /* Set up context and callback record type appropriate for UAL */ + cb_info.rec_type = QP_ERROR_REC; + /* Return the Proxy's QP handle and the user's context */ + cb_info.ioctl_rec.event_rec = *p_err_rec; + cb_info.ioctl_rec.event_rec.handle.h_qp = (ib_qp_handle_t)h_qp->obj.hdl; + + /* The proxy handle must be valid now. */ + if( !h_qp->obj.hdl_valid ) + h_qp->obj.hdl_valid = TRUE; + + proxy_queue_cb_buf( + UAL_GET_MISC_CB_INFO, p_context, &cb_info, &h_qp->obj ); + + proxy_context_deref( p_context ); + + AL_EXIT( AL_DBG_QP ); +} + + + +/* + * Process the ioctl UAL_CREATE_QP + * + * Returns the qp_list_obj as the handle to UAL + */ +static cl_status_t +proxy_create_qp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_create_qp_ioctl_t *p_ioctl = + (ual_create_qp_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_qp_handle_t h_qp; + ib_srq_handle_t h_srq = NULL; + ib_cq_handle_t h_sq_cq, h_rq_cq; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + ib_pfn_event_cb_t pfn_ev; + + AL_ENTER( AL_DBG_QP ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* Validate handles. */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + h_sq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al, + (uint64_t)p_ioctl->in.qp_create.h_sq_cq, AL_OBJ_TYPE_H_CQ ); + h_rq_cq = (ib_cq_handle_t)al_hdl_ref( p_context->h_al, + (uint64_t)p_ioctl->in.qp_create.h_rq_cq, AL_OBJ_TYPE_H_CQ ); + if (p_ioctl->in.qp_create.h_srq) { + h_srq = (ib_srq_handle_t)al_hdl_ref( p_context->h_al, + (uint64_t)p_ioctl->in.qp_create.h_srq, AL_OBJ_TYPE_H_SRQ ); + if( !h_srq) + { + status = IB_INVALID_SRQ_HANDLE; + goto proxy_create_qp_err1; + } + } + if( !h_pd) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_create_qp_err1; + } + if( !h_sq_cq ) + { + status = IB_INVALID_CQ_HANDLE; + goto proxy_create_qp_err1; + } + if( !h_rq_cq ) + { + status = IB_INVALID_CQ_HANDLE; + goto proxy_create_qp_err1; + } + + /* Substitute rq_cq handle with AL's cq handle */ + p_ioctl->in.qp_create.h_sq_cq = h_sq_cq; + /* Substitute rq_cq handle with AL's cq handle */ + p_ioctl->in.qp_create.h_rq_cq = h_rq_cq; + /* Substitute srq handle with AL's srq handle */ + p_ioctl->in.qp_create.h_srq = h_srq; + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_create_qp_err1; + + if( p_ioctl->in.ev_notify ) + pfn_ev = proxy_qp_err_cb; + else + pfn_ev = NULL; + + status = create_qp( h_pd, &p_ioctl->in.qp_create, p_ioctl->in.context, + pfn_ev, &h_qp, p_umv_buf ); + /* TODO: The create_qp call should return the attributes... */ + if( status != IB_SUCCESS ) + goto proxy_create_qp_err1; + + status = query_qp( h_qp, &p_ioctl->out.attr, NULL ); + if( status != IB_SUCCESS ) + goto proxy_create_qp_err2; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_qp = h_qp->obj.hdl; + h_qp->obj.hdl_valid = TRUE; + /* Release the reference taken in create_qp (by init_al_obj) */ + deref_al_obj( &h_qp->obj ); + } + else + { +proxy_create_qp_err2: + /* + * Note that we hold the reference taken in create_qp (by init_al_obj) + */ + h_qp->obj.pfn_destroy( &h_qp->obj, NULL ); + +proxy_create_qp_err1: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.h_qp = AL_INVALID_HANDLE; + cl_memclr( &p_ioctl->out.attr, sizeof(ib_qp_attr_t) ); + } + free_umvbuf( p_umv_buf ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + if( h_pd ) + deref_al_obj( &h_pd->obj ); + if( h_rq_cq ) + deref_al_obj( &h_rq_cq->obj ); + if( h_sq_cq ) + deref_al_obj( &h_sq_cq->obj ); + if( h_srq ) + deref_al_obj( &h_srq->obj ); + + AL_EXIT( AL_DBG_QP ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_QUERY_QP: + */ +static +cl_status_t +proxy_query_qp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_query_qp_ioctl_t *p_ioctl = + (ual_query_qp_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* Validate QP handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + status = IB_INVALID_QP_HANDLE; + goto proxy_query_qp_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_qp_err; + + status = query_qp( h_qp, &p_ioctl->out.attr, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_qp_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + if( p_ioctl->out.attr.h_pd ) + { + p_ioctl->out.attr.h_pd = + (ib_pd_handle_t)p_ioctl->out.attr.h_pd->obj.hdl; + } + else + { + p_ioctl->out.attr.h_pd = NULL; + } + if( p_ioctl->out.attr.h_sq_cq ) + { + p_ioctl->out.attr.h_sq_cq = + (ib_cq_handle_t)p_ioctl->out.attr.h_sq_cq->obj.hdl; + } + else + { + p_ioctl->out.attr.h_sq_cq = NULL; + } + if( p_ioctl->out.attr.h_rq_cq ) + { + p_ioctl->out.attr.h_rq_cq = + (ib_cq_handle_t)p_ioctl->out.attr.h_rq_cq->obj.hdl; + } + else + { + p_ioctl->out.attr.h_rq_cq = NULL; + } + if( p_ioctl->out.attr.h_srq ) + { + p_ioctl->out.attr.h_srq = + (ib_srq_handle_t)p_ioctl->out.attr.h_srq->obj.hdl; + } + else + { + p_ioctl->out.attr.h_srq = NULL; + } + } + else + { +proxy_query_qp_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + cl_memclr( &p_ioctl->out.attr, sizeof(ib_qp_attr_t) ); + } + free_umvbuf( p_umv_buf ); + + if( h_qp ) + deref_al_obj( &h_qp->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_QP ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_MODIFY_QP: + */ +static +cl_status_t +proxy_modify_qp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_modify_qp_ioctl_t *p_ioctl = + (ual_modify_qp_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* Validate QP handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + status = IB_INVALID_QP_HANDLE; + goto proxy_modify_qp_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_modify_qp_err; + + status = modify_qp( h_qp, &p_ioctl->in.modify_attr, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_modify_qp_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status != IB_SUCCESS ) + { +proxy_modify_qp_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + } + free_umvbuf( p_umv_buf ); + + if( h_qp ) + deref_al_obj( &h_qp->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_QP ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_DESTROY_QP + */ +static cl_status_t +proxy_destroy_qp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_destroy_qp_ioctl_t *p_ioctl = + (ual_destroy_qp_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + + AL_ENTER( AL_DBG_QP ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate QP handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_QP_HANDLE\n") ); + p_ioctl->out.status = IB_INVALID_QP_HANDLE; + } + else + { + h_qp->obj.pfn_destroy( &h_qp->obj, ib_sync_destroy ); + p_ioctl->out.status = IB_SUCCESS; + } + + AL_EXIT( AL_DBG_QP ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_CREATE_AV: + */ +static +cl_status_t +proxy_create_av( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_create_av_ioctl_t *p_ioctl = + (ual_create_av_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_av_handle_t h_av; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_AV ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_AV ); + return CL_INVALID_PARAMETER; + } + + /* Validate PD handle */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd ) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_create_av_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_create_av_err; + + status = create_av( h_pd, &p_ioctl->in.attr, &h_av, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_create_av_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_av = h_av->obj.hdl; + h_av->obj.hdl_valid = TRUE; + /* Release the reference taken in create_av. */ + deref_al_obj( &h_av->obj ); + } + else + { + h_av->obj.pfn_destroy( &h_av->obj, NULL ); + +proxy_create_av_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.h_av = AL_INVALID_HANDLE; + } + free_umvbuf( p_umv_buf ); + + if( h_pd ) + deref_al_obj( &h_pd->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_AV ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_QUERY_AV: + */ +static +cl_status_t +proxy_query_av( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_query_av_ioctl_t *p_ioctl = + (ual_query_av_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_av_handle_t h_av; + ib_pd_handle_t h_pd; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_AV ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_AV ); + return CL_INVALID_PARAMETER; + } + + /* Validate AV handle */ + h_av = (ib_av_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_av, AL_OBJ_TYPE_H_AV ); + if( !h_av ) + { + status = IB_INVALID_AV_HANDLE; + goto proxy_query_av_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_av_err; + + status = query_av( h_av, &p_ioctl->out.attr, &h_pd, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_query_av_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + /* Return proxy's PD handle when AV was created */ + p_ioctl->out.pd_context = (void*)h_pd->obj.context; + } + else + { +proxy_query_av_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + cl_memclr( &p_ioctl->out.attr, sizeof(ib_av_attr_t) ); + p_ioctl->out.pd_context = NULL; + } + free_umvbuf( p_umv_buf ); + + if( h_av ) + deref_al_obj( &h_av->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_AV ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_MODIFY_AV: + */ +static +cl_status_t +proxy_modify_av( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_modify_av_ioctl_t *p_ioctl = + (ual_modify_av_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_av_handle_t h_av; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_AV ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_AV ); + return CL_INVALID_PARAMETER; + } + + /* Validate AV handle */ + h_av = (ib_av_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_av, AL_OBJ_TYPE_H_AV ); + if( !h_av ) + { + status = IB_INVALID_AV_HANDLE; + goto proxy_modify_av_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_modify_av_err; + + status = modify_av( h_av, &p_ioctl->in.attr, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_modify_av_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status != IB_SUCCESS ) + { +proxy_modify_av_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + } + free_umvbuf( p_umv_buf ); + + if( h_av ) + deref_al_obj( &h_av->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_AV ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_DESTROY_AV: + */ +static +cl_status_t +proxy_destroy_av( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_destroy_av_ioctl_t *p_ioctl = + (ual_destroy_av_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_av_handle_t h_av; + + AL_ENTER( AL_DBG_AV ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_AV ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate AV handle */ + h_av = (ib_av_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_av, AL_OBJ_TYPE_H_AV ); + if( !h_av ) + { + p_ioctl->out.status = IB_INVALID_AV_HANDLE; + AL_EXIT( AL_DBG_AV ); + return CL_SUCCESS; + } + + h_av->obj.pfn_destroy( &h_av->obj, NULL ); + p_ioctl->out.status = IB_SUCCESS; + + AL_EXIT( AL_DBG_AV ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_MODIFY_CQ: + */ +static +cl_status_t +proxy_modify_cq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_modify_cq_ioctl_t *p_ioctl = + (ual_modify_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cq_handle_t h_cq; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + uint32_t size; + + AL_ENTER( AL_DBG_CQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate CQ handle */ + h_cq = (ib_cq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ ); + if( !h_cq ) + { + status = IB_INVALID_CQ_HANDLE; + goto proxy_modify_cq_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_modify_cq_err; + + size = p_ioctl->in.size; + status = modify_cq( h_cq, &size, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_modify_cq_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.size = size; + } + else + { +proxy_modify_cq_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.size = 0; + } + free_umvbuf( p_umv_buf ); + + if( h_cq ) + deref_al_obj( &h_cq->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; +} + + + +/* + * Proxy's CQ completion callback + */ +static void +proxy_cq_comp_cb( + IN ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + comp_cb_ioctl_info_t cb_info; + al_dev_open_context_t *p_context = h_cq->obj.h_al->p_context; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + return; + } + + /* Set up context and callback record type appropriate for UAL */ + cb_info.cq_context = cq_context; + + /* The proxy handle must be valid now. */ + if( !h_cq->obj.hdl_valid ) + h_cq->obj.hdl_valid = TRUE; + + proxy_queue_cb_buf( UAL_GET_COMP_CB_INFO, p_context, &cb_info, + &h_cq->obj ); + + proxy_context_deref( p_context ); +} + + + +/* + * Proxy's CQ error callback + */ +static void +proxy_cq_err_cb( + IN ib_async_event_rec_t *p_err_rec) +{ + ib_cq_handle_t h_cq = p_err_rec->handle.h_cq; + al_dev_open_context_t *p_context = h_cq->obj.h_al->p_context; + misc_cb_ioctl_info_t cb_info; + + /* + * If we're already closing the device - do not queue a callback, since + * we're cleaning up the callback lists. + */ + if( !proxy_context_ref( p_context ) ) + { + proxy_context_deref( p_context ); + return; + } + + /* Set up context and callback record type appropriate for UAL */ + cb_info.rec_type = CQ_ERROR_REC; + /* Return the Proxy's cq handle and the user's context */ + cb_info.ioctl_rec.event_rec = *p_err_rec; + cb_info.ioctl_rec.event_rec.handle.h_cq = (ib_cq_handle_t)h_cq->obj.hdl; + + /* The proxy handle must be valid now. */ + if( !h_cq->obj.hdl_valid ) + h_cq->obj.hdl_valid = TRUE; + + proxy_queue_cb_buf( UAL_GET_MISC_CB_INFO, p_context, &cb_info, + &h_cq->obj ); + proxy_context_deref( p_context ); +} + + + +/* + * Process the ioctl UAL_CREATE_CQ: + */ +static cl_status_t +proxy_create_cq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_create_cq_ioctl_t *p_ioctl = + (ual_create_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_ca_handle_t h_ca; + ib_cq_handle_t h_cq; + ib_cq_create_t cq_create; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + ib_pfn_event_cb_t pfn_ev; + + AL_ENTER( AL_DBG_CQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate CA handle */ + h_ca = (ib_ca_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_ca, AL_OBJ_TYPE_H_CA ); + if( !h_ca ) + { + status = IB_INVALID_CA_HANDLE; + goto proxy_create_cq_err1; + } + + cq_create.size = p_ioctl->in.size; + + if( p_ioctl->in.h_wait_obj ) + { + cq_create.pfn_comp_cb = NULL; + cq_create.h_wait_obj = cl_waitobj_ref( p_ioctl->in.h_wait_obj ); + if( !cq_create.h_wait_obj ) + { + status = IB_INVALID_PARAMETER; + goto proxy_create_cq_err1; + } + } + else + { + /* Override with proxy's cq callback */ + cq_create.pfn_comp_cb = proxy_cq_comp_cb; + cq_create.h_wait_obj = NULL; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_create_cq_err2; + + if( p_ioctl->in.ev_notify ) + pfn_ev = proxy_cq_err_cb; + else + pfn_ev = NULL; + + status = create_cq( h_ca, &cq_create, p_ioctl->in.context, + pfn_ev, &h_cq, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_create_cq_err2; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.size = cq_create.size; + p_ioctl->out.h_cq = h_cq->obj.hdl; + h_cq->obj.hdl_valid = TRUE; + deref_al_obj( &h_cq->obj ); + } + else + { + h_cq->obj.pfn_destroy( &h_cq->obj, NULL ); + +proxy_create_cq_err2: + if( cq_create.h_wait_obj ) + cl_waitobj_deref( cq_create.h_wait_obj ); + +proxy_create_cq_err1: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.h_cq = AL_INVALID_HANDLE; + p_ioctl->out.size = 0; + } + free_umvbuf( p_umv_buf ); + + if( h_ca ) + deref_al_obj( &h_ca->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_QUERY_CQ: + */ +static +cl_status_t +proxy_query_cq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_query_cq_ioctl_t *p_ioctl = + (ual_query_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cq_handle_t h_cq; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate CQ handle */ + h_cq = (ib_cq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ ); + if( !h_cq ) + { + status = IB_INVALID_CQ_HANDLE; + goto proxy_query_cq_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_cq_err; + + status = query_cq( h_cq, &p_ioctl->out.size, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_query_cq_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status != IB_SUCCESS ) + { +proxy_query_cq_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.size = 0; + } + free_umvbuf( p_umv_buf ); + + if( h_cq ) + deref_al_obj( &h_cq->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_DESTROY_CQ + */ +static +cl_status_t +proxy_destroy_cq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_destroy_cq_ioctl_t *p_ioctl = + (ual_destroy_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cq_handle_t h_cq; + cl_waitobj_handle_t h_wait_obj; + + AL_ENTER( AL_DBG_CQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CQ ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CQ handle */ + h_cq = (ib_cq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ ); + if( !h_cq ) + { + p_ioctl->out.status = IB_INVALID_CQ_HANDLE; + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; + } + + h_wait_obj = h_cq->h_wait_obj; + + h_cq->obj.pfn_destroy( &h_cq->obj, ib_sync_destroy ); + + /* Deref the wait object, if any. */ + if( h_wait_obj ) + cl_waitobj_deref( h_wait_obj ); + + p_ioctl->out.status = IB_SUCCESS; + + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_POST_SEND + */ +static +cl_status_t +proxy_post_send( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_post_send_ioctl_t *p_ioctl = + (ual_post_send_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + ib_av_handle_t h_av; + ib_send_wr_t *p_wr; + ib_send_wr_t *p_send_failure; + uintn_t i = 0; + ib_local_ds_t *p_ds; + uintn_t num_ds = 0; + ib_api_status_t status; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_QP ); + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* + * Additional input buffer validation based on actual settings. + * Note that this validates that work requests are actually + * being passed in. + */ + in_buf_sz = sizeof(p_ioctl->in); + in_buf_sz += sizeof(ib_send_wr_t) * (p_ioctl->in.num_wr - 1); + in_buf_sz += sizeof(ib_local_ds_t) * p_ioctl->in.num_ds; + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* Setup p_send_failure to head of list. */ + p_send_failure = p_wr = p_ioctl->in.send_wr; + + /* Validate QP handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + status = IB_INVALID_QP_HANDLE; + goto proxy_post_send_done; + } + + /* Setup the base data segment pointer. */ + p_ds = (ib_local_ds_t*)&p_ioctl->in.send_wr[p_ioctl->in.num_wr]; + + /* Setup the user's work requests and data segments and translate. */ + for( i = 0; i < p_ioctl->in.num_wr; i++ ) + { + if( h_qp->type == IB_QPT_UNRELIABLE_DGRM ) + { + /* Validate the AV handle for UD */ + h_av = (ib_av_handle_t)al_hdl_ref( p_context->h_al, + (uint64_t)p_wr[i].dgrm.ud.h_av, AL_OBJ_TYPE_H_AV ); + if( !h_av ) + { + status = IB_INVALID_AV_HANDLE; + goto proxy_post_send_done; + } + /* substitute with KAL AV handle */ + p_wr[i].dgrm.ud.h_av = h_av; + } + + /* Setup the data segments, if any. */ + if( p_wr[i].num_ds ) + { + num_ds += p_wr[i].num_ds; + if( num_ds > p_ioctl->in.num_ds ) + { + /* + * The work request submitted exceed the number of data + * segments specified in the IOCTL. + */ + status = IB_INVALID_PARAMETER; + goto proxy_post_send_done; + } + p_wr[i].ds_array = p_ds; + p_ds += p_wr->num_ds; + } + else + { + p_wr[i].ds_array = NULL; + } + + p_wr[i].p_next = &p_wr[i + 1]; + } + + /* Mark the end of list. */ + p_wr[i - 1].p_next = NULL; + + /* so much for the set up, let's roll! */ + status = ib_post_send( h_qp, p_wr, &p_send_failure ); + + if( status == IB_SUCCESS ) + { + p_ioctl->out.failed_cnt = 0; + } + else + { +proxy_post_send_done: + /* First set up as if all failed. */ + p_ioctl->out.failed_cnt = p_ioctl->in.num_wr; + /* Now subtract successful ones. */ + p_ioctl->out.failed_cnt -= (uint32_t)( + (((uintn_t)p_send_failure) - ((uintn_t)p_wr)) + / sizeof(ib_send_wr_t)); + } + + /* releases the references on address vectors. */ + if( h_qp ) + { + if( h_qp->type == IB_QPT_UNRELIABLE_DGRM ) + { + while( i-- ) + deref_al_obj( &p_wr[i].dgrm.ud.h_av->obj ); + } + deref_al_obj( &h_qp->obj ); + } + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_QP ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_POST_RECV + */ +static +cl_status_t +proxy_post_recv( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_post_recv_ioctl_t *p_ioctl = + (ual_post_recv_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + ib_recv_wr_t *p_wr; + ib_recv_wr_t *p_recv_failure; + uintn_t i; + ib_local_ds_t *p_ds; + uintn_t num_ds = 0; + ib_api_status_t status; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_QP ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* + * Additional input buffer validation based on actual settings. + * Note that this validates that work requests are actually + * being passed in. + */ + in_buf_sz = sizeof(p_ioctl->in); + in_buf_sz += sizeof(ib_recv_wr_t) * (p_ioctl->in.num_wr - 1); + in_buf_sz += sizeof(ib_local_ds_t) * p_ioctl->in.num_ds; + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* Setup p_send_failure to head of list. */ + p_recv_failure = p_wr = p_ioctl->in.recv_wr; + + /* Validate QP handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + status = IB_INVALID_QP_HANDLE; + goto proxy_post_recv_done; + } + + /* Setup the base data segment pointer. */ + p_ds = (ib_local_ds_t*)&p_ioctl->in.recv_wr[p_ioctl->in.num_wr]; + + /* Setup the user's work requests and data segments and translate. */ + for( i = 0; i < p_ioctl->in.num_wr; i++ ) + { + /* Setup the data segments, if any. */ + if( p_wr[i].num_ds ) + { + num_ds += p_wr[i].num_ds; + if( num_ds > p_ioctl->in.num_ds ) + { + /* + * The work request submitted exceed the number of data + * segments specified in the IOCTL. + */ + status = IB_INVALID_PARAMETER; + goto proxy_post_recv_done; + } + p_wr[i].ds_array = p_ds; + p_ds += p_wr->num_ds; + } + else + { + p_wr[i].ds_array = NULL; + } + + p_wr[i].p_next = &p_wr[i + 1]; + } + + /* Mark the end of list. */ + p_wr[i-1].p_next = NULL; + + status = ib_post_recv( h_qp, p_wr, &p_recv_failure ); + + if( status == IB_SUCCESS ) + { + p_ioctl->out.failed_cnt = 0; + } + else + { +proxy_post_recv_done: + /* First set up as if all failed. */ + p_ioctl->out.failed_cnt = p_ioctl->in.num_wr; + /* Now subtract successful ones. */ + p_ioctl->out.failed_cnt -= (uint32_t)( + (((uintn_t)p_recv_failure) - ((uintn_t)p_wr)) + / sizeof(ib_recv_wr_t)); + } + + if( h_qp ) + deref_al_obj( &h_qp->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_QP ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_POST_SRQ_RECV + */ +static +cl_status_t +proxy_post_srq_recv( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_post_srq_recv_ioctl_t *p_ioctl = + (ual_post_srq_recv_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_srq_handle_t h_srq; + ib_recv_wr_t *p_wr; + ib_recv_wr_t *p_recv_failure; + uintn_t i; + ib_local_ds_t *p_ds; + uintn_t num_ds = 0; + ib_api_status_t status; + size_t in_buf_sz; + + AL_ENTER( AL_DBG_QP ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) < sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* + * Additional input buffer validation based on actual settings. + * Note that this validates that work requests are actually + * being passed in. + */ + in_buf_sz = sizeof(p_ioctl->in); + in_buf_sz += sizeof(ib_recv_wr_t) * (p_ioctl->in.num_wr - 1); + in_buf_sz += sizeof(ib_local_ds_t) * p_ioctl->in.num_ds; + if( cl_ioctl_in_size( h_ioctl ) != in_buf_sz ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* Setup p_send_failure to head of list. */ + p_recv_failure = p_wr = p_ioctl->in.recv_wr; + + /* Validate SRQ handle */ + h_srq = (ib_srq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_srq, AL_OBJ_TYPE_H_QP ); + if( !h_srq ) + { + status = IB_INVALID_SRQ_HANDLE; + goto proxy_post_recv_done; + } + + /* Setup the base data segment pointer. */ + p_ds = (ib_local_ds_t*)&p_ioctl->in.recv_wr[p_ioctl->in.num_wr]; + + /* Setup the user's work requests and data segments and translate. */ + for( i = 0; i < p_ioctl->in.num_wr; i++ ) + { + /* Setup the data segments, if any. */ + if( p_wr[i].num_ds ) + { + num_ds += p_wr[i].num_ds; + if( num_ds > p_ioctl->in.num_ds ) + { + /* + * The work request submitted exceed the number of data + * segments specified in the IOCTL. + */ + status = IB_INVALID_PARAMETER; + goto proxy_post_recv_done; + } + p_wr[i].ds_array = p_ds; + p_ds += p_wr->num_ds; + } + else + { + p_wr[i].ds_array = NULL; + } + + p_wr[i].p_next = &p_wr[i + 1]; + } + + /* Mark the end of list. */ + p_wr[i-1].p_next = NULL; + + status = ib_post_srq_recv( h_srq, p_wr, &p_recv_failure ); + + if( status == IB_SUCCESS ) + { + p_ioctl->out.failed_cnt = 0; + } + else + { +proxy_post_recv_done: + /* First set up as if all failed. */ + p_ioctl->out.failed_cnt = p_ioctl->in.num_wr; + /* Now subtract successful ones. */ + p_ioctl->out.failed_cnt -= (uint32_t)( + (((uintn_t)p_recv_failure) - ((uintn_t)p_wr)) + / sizeof(ib_recv_wr_t)); + } + + if( h_srq ) + deref_al_obj( &h_srq->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_QP ); + return CL_SUCCESS; +} + + +/* + * Process the ioctl UAL_PEEK_CQ + */ +static cl_status_t +proxy_peek_cq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_peek_cq_ioctl_t *p_ioctl = + (ual_peek_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cq_handle_t h_cq; + + AL_ENTER( AL_DBG_CQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CQ ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CQ handle */ + h_cq = (ib_cq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ ); + if( !h_cq ) + { + p_ioctl->out.status = IB_INVALID_CQ_HANDLE; + p_ioctl->out.n_cqes = 0; + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; + } + + p_ioctl->out.status = ib_peek_cq( h_cq, &p_ioctl->out.n_cqes ); + + deref_al_obj( &h_cq->obj ); + + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_POLL_CQ + */ +static cl_status_t +proxy_poll_cq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_poll_cq_ioctl_t *p_ioctl; + al_dev_open_context_t *p_context; + ib_cq_handle_t h_cq; + ib_wc_t *p_free_wc; + ib_wc_t *p_done_wc = NULL; + uint32_t i, num_wc; + size_t out_buf_sz; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CQ ); + + p_ioctl = (ual_poll_cq_ioctl_t*)cl_ioctl_in_buf( h_ioctl ); + p_context = (al_dev_open_context_t*)p_open_context; + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) < sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CQ ); + return CL_INVALID_PARAMETER; + } + + /* + * Additional validation of input and output sizes. + * Note that this also checks that work completions are actually + * being passed in. + */ + out_buf_sz = sizeof(p_ioctl->out); + out_buf_sz += sizeof(ib_wc_t) * (p_ioctl->in.num_wc - 1); + if( cl_ioctl_out_size( h_ioctl ) != out_buf_sz ) + { + AL_EXIT( AL_DBG_CQ ); + return CL_INVALID_PARAMETER; + } + + /* Validate CQ handle. */ + h_cq = (ib_cq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ ); + if( !h_cq ) + { + status = IB_INVALID_CQ_HANDLE; + goto proxy_poll_cq_err; + } + + p_free_wc = p_ioctl->out.wc; + num_wc = p_ioctl->in.num_wc; + for( i = 0; i < num_wc; i++ ) + p_free_wc[i].p_next = &p_free_wc[i+1]; + p_free_wc[i - 1].p_next = NULL; + + status = ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ); + + /* + * If any of the completions are done, copy to user + * otherwise, just return + */ + if( status == IB_SUCCESS ) + { + CL_ASSERT( p_done_wc ); + /* Calculate the number of WCs. */ + if( !p_free_wc ) + { + p_ioctl->out.num_wc = num_wc; + } + else + { + p_ioctl->out.num_wc = (uint32_t) + (((uintn_t)p_free_wc) - ((uintn_t)p_done_wc)) / + sizeof(ib_wc_t); + } + } + else + { +proxy_poll_cq_err: + p_ioctl->out.num_wc = 0; + } + + if( h_cq ) + deref_al_obj( &h_cq->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out) - sizeof(ib_wc_t); + if( p_ioctl->out.num_wc ) + *p_ret_bytes += (sizeof(ib_wc_t) * (p_ioctl->out.num_wc)); + + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_REARM_CQ + */ +static cl_status_t +proxy_rearm_cq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_rearm_cq_ioctl_t *p_ioctl = + (ual_rearm_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cq_handle_t h_cq; + + AL_ENTER( AL_DBG_CQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CQ ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CQ handle */ + h_cq = (ib_cq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ ); + if( !h_cq ) + { + p_ioctl->out.status = IB_INVALID_CQ_HANDLE; + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; + } + + p_ioctl->out.status = ib_rearm_cq( h_cq, p_ioctl->in.solicited ); + + deref_al_obj( &h_cq->obj ); + + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_REARM_N_CQ + */ +static +cl_status_t +proxy_rearm_n_cq( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_rearm_n_cq_ioctl_t *p_ioctl = + (ual_rearm_n_cq_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_cq_handle_t h_cq; + + AL_ENTER( AL_DBG_CQ ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_CQ ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate CQ handle */ + h_cq = (ib_cq_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_cq, AL_OBJ_TYPE_H_CQ ); + if( !h_cq ) + { + p_ioctl->out.status = IB_INVALID_CQ_HANDLE; + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; + } + + p_ioctl->out.status = ib_rearm_n_cq( h_cq, p_ioctl->in.n_cqes ); + + deref_al_obj( &h_cq->obj ); + + AL_EXIT( AL_DBG_CQ ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_REGISTER_MEM: + */ +static cl_status_t +proxy_register_mr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_reg_mem_ioctl_t *p_ioctl = + (ual_reg_mem_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_mr_handle_t h_mr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MR ); + return CL_INVALID_PARAMETER; + } + + /* Validate PD handle */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd ) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_register_mr_err; + } + + /* Validate input region size. */ + if( p_ioctl->in.mem_create.length > ~((size_t)0) ) + { + status = IB_INVALID_SETTING; + goto proxy_register_mr_err; + } + + status = reg_mem( h_pd, &p_ioctl->in.mem_create, &p_ioctl->out.lkey, + &p_ioctl->out.rkey, &h_mr, TRUE ); + + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_mr = h_mr->obj.hdl; + h_mr->obj.hdl_valid = TRUE; + deref_al_obj( &h_mr->obj ); + } + else + { +proxy_register_mr_err: + p_ioctl->out.h_mr = AL_INVALID_HANDLE; + p_ioctl->out.lkey = 0; + p_ioctl->out.rkey = 0; + } + + if( h_pd ) + deref_al_obj( &h_pd->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MR ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_QUERY_MEM: + */ +static cl_status_t +proxy_query_mr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_query_mr_ioctl_t *p_ioctl = + (ual_query_mr_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_mr_handle_t h_mr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MR ); + return CL_INVALID_PARAMETER; + } + + /* Validate MR handle */ + h_mr = (ib_mr_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_mr, AL_OBJ_TYPE_H_MR ); + if( !h_mr ) + { + status = IB_INVALID_MR_HANDLE; + goto proxy_query_mr_err; + } + + status = ib_query_mr( h_mr, &p_ioctl->out.attr ); + + if( status == IB_SUCCESS ) + { + /* Replace the pd handle with proxy's handle */ + p_ioctl->out.attr.h_pd = + (ib_pd_handle_t)p_ioctl->out.attr.h_pd->obj.hdl; + } + else + { +proxy_query_mr_err: + cl_memclr( &p_ioctl->out.attr, sizeof(ib_mr_attr_t) ); + } + + if( h_mr ) + deref_al_obj( &h_mr->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MR ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_MODIFY_MEM: + */ +static cl_status_t +proxy_modify_mr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_rereg_mem_ioctl_t *p_ioctl = + (ual_rereg_mem_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_mr_handle_t h_mr; + ib_pd_handle_t h_pd = NULL; + ib_mr_create_t *p_mr_create; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MR ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MR ); + return CL_INVALID_PARAMETER; + } + + /* Validate MR handle */ + h_mr = (ib_mr_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_mr, AL_OBJ_TYPE_H_MR ); + if( !h_mr ) + { + status = IB_INVALID_MR_HANDLE; + goto proxy_modify_mr_err; + } + + /* Validate input region size. */ + if( p_ioctl->in.mem_create.length > ~((size_t)0) ) + { + status = IB_INVALID_SETTING; + goto proxy_modify_mr_err; + } + + if( p_ioctl->in.mem_mod_mask & IB_MR_MOD_PD ) + { + if( !p_ioctl->in.h_pd ) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_modify_mr_err; + } + /* This is a modify PD request, validate the PD handle */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd ) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_modify_mr_err; + } + } + else + { + h_pd = NULL; + } + + if( p_ioctl->in.mem_mod_mask != IB_MR_MOD_PD ) + p_mr_create = &p_ioctl->in.mem_create; + else + p_mr_create = NULL; + + status = rereg_mem( h_mr, p_ioctl->in.mem_mod_mask, + p_mr_create, &p_ioctl->out.lkey, &p_ioctl->out.rkey, h_pd, TRUE ); + + if( status != IB_SUCCESS ) + { +proxy_modify_mr_err: + p_ioctl->out.lkey = 0; + p_ioctl->out.rkey = 0; + } + + if( h_pd ) + deref_al_obj( &h_pd->obj ); + + if( h_mr ) + deref_al_obj( &h_mr->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MR ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_REG_SHARED_MEM: + */ +static cl_status_t +proxy_shared_mr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_reg_shared_ioctl_t *p_ioctl = + (ual_reg_shared_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_mr_handle_t h_mr, h_cur_mr; + ib_api_status_t status; + uint64_t vaddr; + + AL_ENTER( AL_DBG_MR ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MR ); + return CL_INVALID_PARAMETER; + } + + /* + * TODO: Must support taking an input handle that isn't + * in this process's context. + */ + /* Validate MR handle */ + h_cur_mr = (ib_mr_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_mr, AL_OBJ_TYPE_H_MR ); + if( !h_cur_mr ) + { + h_pd = NULL; + status = IB_INVALID_MR_HANDLE; + goto proxy_shared_mr_err; + } + + /* Validate the PD handle */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd ) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_shared_mr_err; + } + + vaddr = p_ioctl->in.vaddr; + status = reg_shared( h_cur_mr, h_pd, + p_ioctl->in.access_ctrl, &vaddr, &p_ioctl->out.lkey, + &p_ioctl->out.rkey, &h_mr, TRUE ); + + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_new_mr = h_mr->obj.hdl; + p_ioctl->out.vaddr = vaddr; + h_mr->obj.hdl_valid = TRUE; + deref_al_obj( &h_mr->obj ); + } + else + { +proxy_shared_mr_err: + cl_memclr( &p_ioctl->out, sizeof(p_ioctl->out) ); + } + + if( h_pd ) + deref_al_obj( &h_pd->obj ); + + if( h_cur_mr ) + deref_al_obj( &h_cur_mr->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MR ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_DEREGISTER_MEM: + */ +static cl_status_t +proxy_deregister_mr( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_dereg_mr_ioctl_t *p_ioctl = + (ual_dereg_mr_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_mr_handle_t h_mr; + + AL_ENTER( AL_DBG_MR ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MR ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate MR handle */ + h_mr = (ib_mr_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_mr, AL_OBJ_TYPE_H_MR ); + if( !h_mr ) + { + p_ioctl->out.status = IB_INVALID_MR_HANDLE; + AL_EXIT( AL_DBG_MR ); + return CL_SUCCESS; + } + + p_ioctl->out.status = dereg_mr( h_mr ); + + if( p_ioctl->out.status != IB_SUCCESS ) + deref_al_obj( &h_mr->obj ); + + AL_EXIT( AL_DBG_MR ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_CREATE_MW: + */ +static cl_status_t +proxy_create_mw( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_create_mw_ioctl_t *p_ioctl = + (ual_create_mw_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_mw_handle_t h_mw; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MW ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MW ); + return CL_INVALID_PARAMETER; + } + + /* Validate PD handle */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd ) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_create_mw_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_create_mw_err; + + status = create_mw( h_pd, &p_ioctl->out.rkey, &h_mw, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_create_mw_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_mw = h_mw->obj.hdl; + h_mw->obj.hdl_valid = TRUE; + deref_al_obj( &h_mw->obj ); + } + else + { + h_mw->obj.pfn_destroy( &h_mw->obj, NULL ); + +proxy_create_mw_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.rkey = 0; + p_ioctl->out.h_mw = AL_INVALID_HANDLE; + } + free_umvbuf( p_umv_buf ); + + if( h_pd ) + deref_al_obj( &h_pd->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MW ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_QUERY_MW: + */ +static cl_status_t +proxy_query_mw( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_query_mw_ioctl_t *p_ioctl = + (ual_query_mw_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_mw_handle_t h_mw; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + ib_pd_handle_t h_pd; + + AL_ENTER( AL_DBG_MW ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MW ); + return CL_INVALID_PARAMETER; + } + + /* Validate MW handle */ + h_mw = (ib_mw_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_mw, AL_OBJ_TYPE_H_MW ); + if( !h_mw ) + { + status = IB_INVALID_MW_HANDLE; + goto proxy_query_mw_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_query_mw_err; + + status = query_mw( h_mw, &h_pd, &p_ioctl->out.rkey, p_umv_buf ); + + if( status != IB_SUCCESS ) + goto proxy_query_mw_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + /* + * replace the pd handle with user's pd context for the proxy's PD. + */ + p_ioctl->out.pd_context = (void*)h_pd->obj.context; + } + else + { +proxy_query_mw_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.pd_context = NULL; + p_ioctl->out.rkey = 0; + } + free_umvbuf( p_umv_buf ); + + if( h_mw ) + deref_al_obj( &h_mw->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MW ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_BIND_MW: + */ +static cl_status_t +proxy_bind_mw( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_bind_mw_ioctl_t *p_ioctl = + (ual_bind_mw_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_mw_handle_t h_mw; + ib_qp_handle_t h_qp; + ib_mr_handle_t h_mr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MW ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MW ); + return CL_INVALID_PARAMETER; + } + + /* Validate MW handle */ + h_mw = (ib_mw_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_mw, AL_OBJ_TYPE_H_MW ); + if( !h_mw ) + { + status = IB_INVALID_MW_HANDLE; + goto proxy_bind_mw_err1; + } + + /* Validate QP handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + status = IB_INVALID_QP_HANDLE; + goto proxy_bind_mw_err2; + } + + /* Validate MR handle */ + h_mr = (ib_mr_handle_t)al_hdl_ref( p_context->h_al, + (uint64_t)p_ioctl->in.mw_bind.h_mr, AL_OBJ_TYPE_H_MR ); + if( !h_mr ) + { + status = IB_INVALID_MR_HANDLE; + goto proxy_bind_mw_err3; + } + + /* Update bind attribute with the kernel space handles */ + p_ioctl->in.mw_bind.h_mr = h_mr; + + status = ib_bind_mw( h_mw, h_qp, + &p_ioctl->in.mw_bind, &p_ioctl->out.r_key ); + + deref_al_obj( &h_mr->obj ); +proxy_bind_mw_err3: + deref_al_obj( &h_qp->obj ); +proxy_bind_mw_err2: + deref_al_obj( &h_mw->obj ); +proxy_bind_mw_err1: + + if( status != IB_SUCCESS ) + p_ioctl->out.r_key = 0; + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MW ); + return CL_SUCCESS; +} + + + +/* + * Process the ioctl UAL_DESTROY_MW: + */ +static cl_status_t +proxy_destroy_mw( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_destroy_mw_ioctl_t *p_ioctl = + (ual_destroy_mw_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_mw_handle_t h_mw; + + AL_ENTER( AL_DBG_MW ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MW ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate MW handle */ + h_mw = (ib_mw_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_mw, AL_OBJ_TYPE_H_MW ); + if( !h_mw ) + { + p_ioctl->out.status = IB_INVALID_MW_HANDLE; + AL_EXIT( AL_DBG_MW ); + return CL_SUCCESS; + } + p_ioctl->out.status = destroy_mw( h_mw ); + + if( p_ioctl->out.status != IB_SUCCESS ) + deref_al_obj( &h_mw->obj ); + + AL_EXIT( AL_DBG_MW ); + return CL_SUCCESS; +} + + +cl_status_t +proxy_get_spl_qp( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_spl_qp_ioctl_t *p_ioctl = + (ual_spl_qp_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_pd_handle_t h_pd; + ib_qp_handle_t h_qp; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_QP ); + return CL_INVALID_PARAMETER; + } + + /* Validate pd handle */ + h_pd = (ib_pd_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_pd, AL_OBJ_TYPE_H_PD ); + if( !h_pd ) + { + status = IB_INVALID_PD_HANDLE; + goto proxy_get_spl_qp_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_get_spl_qp_err; + + /* We obtain the pool_key separately from the special QP. */ + status = get_spl_qp( h_pd, p_ioctl->in.port_guid, + &p_ioctl->in.qp_create, p_ioctl->in.context, proxy_qp_err_cb, NULL, &h_qp, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_get_spl_qp_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_qp = h_qp->obj.hdl; + h_qp->obj.hdl_valid = TRUE; + deref_al_obj( &h_qp->obj ); + } + else + { + h_qp->obj.pfn_destroy( &h_qp->obj, NULL ); + +proxy_get_spl_qp_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.h_qp = AL_INVALID_HANDLE; + } + free_umvbuf( p_umv_buf ); + + if( h_pd ) + deref_al_obj( &h_pd->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_QP ); + return CL_SUCCESS; +} + + + +static cl_status_t +proxy_attach_mcast( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_attach_mcast_ioctl_t *p_ioctl = + (ual_attach_mcast_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + ib_qp_handle_t h_qp; + al_attach_handle_t h_attach; + ci_umv_buf_t *p_umv_buf = NULL; + ib_api_status_t status; + + AL_ENTER( AL_DBG_MCAST ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MCAST ); + return CL_INVALID_PARAMETER; + } + + /* Validate QP handle */ + h_qp = (ib_qp_handle_t) + al_hdl_ref( p_context->h_al, p_ioctl->in.h_qp, AL_OBJ_TYPE_H_QP ); + if( !h_qp ) + { + status = IB_INVALID_QP_HANDLE; + goto proxy_attach_mcast_err; + } + + status = cpyin_umvbuf( &p_ioctl->in.umv_buf, &p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_attach_mcast_err; + + status = al_attach_mcast( h_qp, &p_ioctl->in.mgid, + p_ioctl->in.mlid, &h_attach, p_umv_buf ); + if( status != IB_SUCCESS ) + goto proxy_attach_mcast_err; + + status = cpyout_umvbuf( &p_ioctl->out.umv_buf, p_umv_buf ); + if( status == IB_SUCCESS ) + { + p_ioctl->out.h_attach = h_attach->obj.hdl; + h_attach->obj.hdl_valid = TRUE; + deref_al_obj( &h_attach->obj ); + } + else + { + h_attach->obj.pfn_destroy( &h_attach->obj, NULL ); + +proxy_attach_mcast_err: + p_ioctl->out.umv_buf = p_ioctl->in.umv_buf; + p_ioctl->out.h_attach = AL_INVALID_HANDLE; + } + free_umvbuf( p_umv_buf ); + + if( h_qp ) + deref_al_obj( &h_qp->obj ); + + p_ioctl->out.status = status; + *p_ret_bytes = sizeof(p_ioctl->out); + + AL_EXIT( AL_DBG_MCAST ); + return CL_SUCCESS; +} + + + +static cl_status_t +proxy_detach_mcast( + IN void *p_open_context, + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + ual_detach_mcast_ioctl_t *p_ioctl = + (ual_detach_mcast_ioctl_t *)cl_ioctl_in_buf( h_ioctl ); + al_dev_open_context_t *p_context = + (al_dev_open_context_t *)p_open_context; + al_attach_handle_t h_attach; + + AL_ENTER( AL_DBG_MCAST ); + + /* Validate input buffers. */ + if( !cl_ioctl_in_buf( h_ioctl ) || !cl_ioctl_out_buf( h_ioctl ) || + cl_ioctl_in_size( h_ioctl ) != sizeof(p_ioctl->in) || + cl_ioctl_out_size( h_ioctl ) != sizeof(p_ioctl->out) ) + { + AL_EXIT( AL_DBG_MCAST ); + return CL_INVALID_PARAMETER; + } + + /* Set the return bytes in all cases */ + *p_ret_bytes = sizeof(p_ioctl->out); + + /* Validate mcast handle */ + h_attach = (al_attach_handle_t)al_hdl_ref( + p_context->h_al, p_ioctl->in.h_attach, AL_OBJ_TYPE_H_ATTACH ); + if( !h_attach ) + { + p_ioctl->out.status = IB_INVALID_MCAST_HANDLE; + AL_EXIT( AL_DBG_MCAST ); + return CL_SUCCESS; + } + + h_attach->obj.pfn_destroy( &h_attach->obj, ib_sync_destroy ); + p_ioctl->out.status = IB_SUCCESS; + + AL_EXIT( AL_DBG_MCAST ); + return CL_SUCCESS; +} + + + +cl_status_t +verbs_ioctl( + IN cl_ioctl_handle_t h_ioctl, + OUT size_t *p_ret_bytes ) +{ + cl_status_t cl_status; + IO_STACK_LOCATION *p_io_stack; + void *p_context; + + AL_ENTER( AL_DBG_DEV ); + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + p_context = p_io_stack->FileObject->FsContext; + + if( !p_context ) + { + AL_EXIT( AL_DBG_DEV ); + return CL_INVALID_PARAMETER; + } + + switch( cl_ioctl_ctl_code( h_ioctl ) ) + { + case UAL_OPEN_CA: + cl_status = proxy_open_ca( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_QUERY_CA: + cl_status = proxy_query_ca( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_MODIFY_CA: + cl_status = proxy_modify_ca( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CI_CALL: + cl_status = proxy_ci_call( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_ALLOC_PD: + cl_status = proxy_alloc_pd( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CREATE_AV: + cl_status = proxy_create_av( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_QUERY_AV: + cl_status = proxy_query_av( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_MODIFY_AV: + cl_status = proxy_modify_av( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CREATE_SRQ: + cl_status = proxy_create_srq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_QUERY_SRQ: + cl_status = proxy_query_srq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_MODIFY_SRQ: + cl_status = proxy_modify_srq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DESTROY_SRQ: + cl_status = proxy_destroy_srq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_POST_SRQ_RECV: + cl_status = proxy_post_srq_recv( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CREATE_QP: + cl_status = proxy_create_qp( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_QUERY_QP: + cl_status = proxy_query_qp( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_MODIFY_QP: + cl_status = proxy_modify_qp( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CREATE_CQ: + cl_status = proxy_create_cq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_QUERY_CQ: + cl_status = proxy_query_cq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_MODIFY_CQ: + cl_status = proxy_modify_cq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_REG_MR: + cl_status = proxy_register_mr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_QUERY_MR: + cl_status = proxy_query_mr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_MODIFY_MR: + cl_status = proxy_modify_mr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_REG_SHARED: + cl_status = proxy_shared_mr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CREATE_MW: + cl_status = proxy_create_mw( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_QUERY_MW: + cl_status = proxy_query_mw( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_BIND_MW: + cl_status = proxy_bind_mw( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_POST_SEND: + cl_status = proxy_post_send( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_POST_RECV: + cl_status = proxy_post_recv( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_PEEK_CQ: + cl_status = proxy_peek_cq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_POLL_CQ: + cl_status = proxy_poll_cq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_REARM_CQ: + cl_status = proxy_rearm_cq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_REARM_N_CQ: + cl_status = proxy_rearm_n_cq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_ATTACH_MCAST: + cl_status = proxy_attach_mcast( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_GET_SPL_QP_ALIAS: + cl_status = proxy_get_spl_qp( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_CLOSE_CA: + cl_status = proxy_close_ca( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DEALLOC_PD: + cl_status = proxy_dealloc_pd( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DESTROY_AV: + cl_status = proxy_destroy_av( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DESTROY_QP: + cl_status = proxy_destroy_qp( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DESTROY_CQ: + cl_status = proxy_destroy_cq( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DEREG_MR: + cl_status = proxy_deregister_mr( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DESTROY_MW: + cl_status = proxy_destroy_mw( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_DETACH_MCAST: + cl_status = proxy_detach_mcast( p_context, h_ioctl, p_ret_bytes ); + break; + case UAL_GET_VENDOR_LIBCFG: + cl_status = + proxy_get_vendor_libcfg( p_context, h_ioctl, p_ret_bytes ); + break; + default: + cl_status = CL_INVALID_PARAMETER; + break; + } + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} diff --git a/branches/Ndi/core/al/kernel/al_sa_req.c b/branches/Ndi/core/al/kernel/al_sa_req.c new file mode 100644 index 00000000..f5dfe73a --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_sa_req.c @@ -0,0 +1,811 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include + +#include "al.h" +#include "al_ca.h" +#include "al_common.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_sa_req.tmh" +#endif +#include "al_mgr.h" +#include "al_query.h" +#include "ib_common.h" + + +/* Global SA request manager */ +typedef struct _sa_req_mgr +{ + al_obj_t obj; /* Child of gp_al_mgr */ + ib_pnp_handle_t h_pnp; /* Handle for CA PnP events */ + +} sa_req_mgr_t; + + +static sa_req_mgr_t *gp_sa_req_mgr = NULL; + + + +/* + * Function prototypes. + */ +void +destroying_sa_req_mgr( + IN al_obj_t* p_obj ); + +void +free_sa_req_mgr( + IN al_obj_t* p_obj ); + +ib_api_status_t +sa_req_mgr_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ); + +ib_api_status_t +create_sa_req_svc( + IN ib_pnp_port_rec_t* p_pnp_rec ); + +void +destroying_sa_req_svc( + IN al_obj_t* p_obj ); + +void +free_sa_req_svc( + IN al_obj_t* p_obj ); + +ib_api_status_t +init_sa_req_svc( + IN sa_req_svc_t* p_sa_req_svc, + IN const ib_pnp_port_rec_t *p_pnp_rec ); + +void +sa_req_send_comp_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad ); + +void +sa_req_recv_comp_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad ); + +void +sa_req_svc_event_cb( + IN ib_async_event_rec_t *p_event_rec ); + + +/* + * Create the sa_req manager. + */ +ib_api_status_t +create_sa_req_mgr( + IN al_obj_t* const p_parent_obj ) +{ + ib_pnp_req_t pnp_req; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SA_REQ ); + CL_ASSERT( p_parent_obj ); + CL_ASSERT( gp_sa_req_mgr == NULL ); + + gp_sa_req_mgr = cl_zalloc( sizeof( sa_req_mgr_t ) ); + if( gp_sa_req_mgr == NULL ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_zalloc failed\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the sa_req manager. */ + construct_al_obj( &gp_sa_req_mgr->obj, AL_OBJ_TYPE_SA_REQ_SVC ); + + /* Initialize the global sa_req manager object. */ + status = init_al_obj( &gp_sa_req_mgr->obj, gp_sa_req_mgr, TRUE, + destroying_sa_req_mgr, NULL, free_sa_req_mgr ); + if( status != IB_SUCCESS ) + { + free_sa_req_mgr( &gp_sa_req_mgr->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_spinlock_init failed\n") ); + return status; + } + status = attach_al_obj( p_parent_obj, &gp_sa_req_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_sa_req_mgr->obj.pfn_destroy( &gp_sa_req_mgr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Register for CA PnP events. */ + cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) ); + pnp_req.pnp_class = IB_PNP_PORT; + pnp_req.pnp_context = &gp_sa_req_mgr->obj; + pnp_req.pfn_pnp_cb = sa_req_mgr_pnp_cb; + + status = ib_reg_pnp( gh_al, &pnp_req, &gp_sa_req_mgr->h_pnp ); + if (status != IB_SUCCESS) + { + gp_sa_req_mgr->obj.pfn_destroy( &gp_sa_req_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_pnp failed: %s\n", ib_get_err_str( status ) ) ); + return status; + } + + /* + * Note that we don't release the reference from init_al_obj because + * we need a reference on behalf of the ib_reg_pnp call. This avoids + * a call to ref_al_obj and deref_al_obj. + */ + + AL_EXIT( AL_DBG_SA_REQ ); + return IB_SUCCESS; +} + + + +/* + * Pre-destroy the sa_req manager. + */ +void +destroying_sa_req_mgr( + IN al_obj_t* p_obj ) +{ + ib_api_status_t status; + + CL_ASSERT( p_obj ); + CL_ASSERT( gp_sa_req_mgr == PARENT_STRUCT( p_obj, sa_req_mgr_t, obj ) ); + UNUSED_PARAM( p_obj ); + + /* Deregister for PnP events. */ + if( gp_sa_req_mgr->h_pnp ) + { + status = ib_dereg_pnp( gp_sa_req_mgr->h_pnp, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + + +/* + * Free the sa_req manager. + */ +void +free_sa_req_mgr( + IN al_obj_t* p_obj ) +{ + CL_ASSERT( p_obj ); + CL_ASSERT( gp_sa_req_mgr == PARENT_STRUCT( p_obj, sa_req_mgr_t, obj ) ); + UNUSED_PARAM( p_obj ); + + destroy_al_obj( &gp_sa_req_mgr->obj ); + cl_free( gp_sa_req_mgr ); + gp_sa_req_mgr = NULL; +} + + + +/* + * SA request manager PnP event callback. + */ +ib_api_status_t +sa_req_mgr_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ) +{ + sa_req_svc_t *p_sa_req_svc; + ib_av_attr_t av_attr; + ib_pd_handle_t h_pd; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SA_REQ ); + CL_ASSERT( p_pnp_rec ); + CL_ASSERT( p_pnp_rec->pnp_context == &gp_sa_req_mgr->obj ); + + /* Dispatch based on the PnP event type. */ + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_PORT_ADD: + status = create_sa_req_svc( (ib_pnp_port_rec_t*)p_pnp_rec ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("create_sa_req_svc failed: %s\n", ib_get_err_str(status)) ); + } + break; + + case IB_PNP_PORT_REMOVE: + CL_ASSERT( p_pnp_rec->context ); + p_sa_req_svc = p_pnp_rec->context; + ref_al_obj( &p_sa_req_svc->obj ); + p_sa_req_svc->obj.pfn_destroy( &p_sa_req_svc->obj, NULL ); + p_pnp_rec->context = NULL; + status = IB_SUCCESS; + break; + + case IB_PNP_PORT_ACTIVE: + case IB_PNP_SM_CHANGE: + CL_ASSERT( p_pnp_rec->context ); + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SA_REQ, + ("updating SM information\n") ); + + p_sa_req_svc = p_pnp_rec->context; + p_sa_req_svc->sm_lid = + ((ib_pnp_port_rec_t*)p_pnp_rec)->p_port_attr->sm_lid; + p_sa_req_svc->sm_sl = + ((ib_pnp_port_rec_t*)p_pnp_rec)->p_port_attr->sm_sl; + + /* Update the address vector. */ + status = ib_query_av( p_sa_req_svc->h_av, &av_attr, &h_pd ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("AV query failed: %s\n", ib_get_err_str(status)) ); + status = IB_SUCCESS; + break; + } + + av_attr.dlid = p_sa_req_svc->sm_lid; + av_attr.sl = p_sa_req_svc->sm_sl; + status = ib_modify_av( p_sa_req_svc->h_av, &av_attr ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("modify AV failed: %s\n", ib_get_err_str(status) ) ); + status = IB_SUCCESS; + break; + } + break; + + case IB_PNP_PORT_INIT: + case IB_PNP_PORT_ARMED: + case IB_PNP_PORT_DOWN: + CL_ASSERT( p_pnp_rec->context ); + p_sa_req_svc = p_pnp_rec->context; + p_sa_req_svc->sm_lid = 0; + p_sa_req_svc->sm_sl = 0; + + default: + status = IB_SUCCESS; + break; + } + AL_EXIT( AL_DBG_SA_REQ ); + return status; +} + + + +/* + * Create an sa_req service. + */ +ib_api_status_t +create_sa_req_svc( + IN ib_pnp_port_rec_t* p_pnp_rec ) +{ + sa_req_svc_t* p_sa_req_svc; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SA_REQ ); + CL_ASSERT( p_pnp_rec ); + CL_ASSERT( p_pnp_rec->p_ca_attr ); + CL_ASSERT( p_pnp_rec->p_port_attr ); + + p_sa_req_svc = cl_zalloc( sizeof( sa_req_svc_t ) ); + if( p_sa_req_svc == NULL ) + return IB_INSUFFICIENT_MEMORY; + + /* Construct the sa_req service object. */ + construct_al_obj( &p_sa_req_svc->obj, AL_OBJ_TYPE_SA_REQ_SVC ); + + /* Initialize the sa_req service object. */ + status = init_al_obj( &p_sa_req_svc->obj, p_sa_req_svc, TRUE, + destroying_sa_req_svc, NULL, free_sa_req_svc ); + if( status != IB_SUCCESS ) + { + free_sa_req_svc( &p_sa_req_svc->obj ); + return status; + } + + /* Attach to the sa_req_mgr. */ + status = attach_al_obj( &gp_sa_req_mgr->obj, &p_sa_req_svc->obj ); + if( status != IB_SUCCESS ) + { + p_sa_req_svc->obj.pfn_destroy( &p_sa_req_svc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Allocate a QP alias and MAD service to send queries on. */ + status = init_sa_req_svc( p_sa_req_svc, p_pnp_rec ); + if( status != IB_SUCCESS ) + { + p_sa_req_svc->obj.pfn_destroy( &p_sa_req_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_sa_req_svc failed: %s\n", ib_get_err_str(status) ) ); + return status; + } + + /* Set the context of the PnP event to this child object. */ + p_pnp_rec->pnp_rec.context = p_sa_req_svc; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_sa_req_svc->obj ); + + AL_EXIT( AL_DBG_SA_REQ ); + return IB_SUCCESS; +} + + + +/* + * Pre-destroy a sa_req service. + */ +void +destroying_sa_req_svc( + IN al_obj_t* p_obj ) +{ + sa_req_svc_t* p_sa_req_svc; + ib_api_status_t status; + + CL_ASSERT( p_obj ); + p_sa_req_svc = PARENT_STRUCT( p_obj, sa_req_svc_t, obj ); + + /* Destroy the AV. */ + if( p_sa_req_svc->h_av ) + ib_destroy_av( p_sa_req_svc->h_av ); + + /* Destroy the QP. */ + if( p_sa_req_svc->h_qp ) + { + status = ib_destroy_qp( p_sa_req_svc->h_qp, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } +} + + + +/* + * Free a sa_req service. + */ +void +free_sa_req_svc( + IN al_obj_t* p_obj ) +{ + sa_req_svc_t* p_sa_req_svc; + + CL_ASSERT( p_obj ); + p_sa_req_svc = PARENT_STRUCT( p_obj, sa_req_svc_t, obj ); + + destroy_al_obj( p_obj ); + cl_free( p_sa_req_svc ); +} + + + +/* + * Initialize an sa_req service. + */ +ib_api_status_t +init_sa_req_svc( + IN sa_req_svc_t *p_sa_req_svc, + IN const ib_pnp_port_rec_t *p_pnp_rec ) +{ + ib_qp_create_t qp_create; + ib_mad_svc_t mad_svc; + ib_api_status_t status; + ib_ca_handle_t h_ca; + ib_av_attr_t av_attr; + + AL_ENTER( AL_DBG_SA_REQ ); + CL_ASSERT( p_sa_req_svc && p_pnp_rec ); + + /* Acquire the correct CI CA for this port. */ + h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid ); + if( !h_ca ) + { + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_SA_REQ, ("Failed to acquire CA\n") ); + return IB_INVALID_GUID; + } + p_sa_req_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca; + + /* Record which port this service operates on. */ + p_sa_req_svc->port_guid = p_pnp_rec->p_port_attr->port_guid; + p_sa_req_svc->port_num = p_pnp_rec->p_port_attr->port_num; + p_sa_req_svc->sm_lid = p_pnp_rec->p_port_attr->sm_lid; + p_sa_req_svc->sm_sl = p_pnp_rec->p_port_attr->sm_sl; + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SA_REQ, + ("using port: 0x%x\tsm lid: 0x%x\tsm sl: 0x%x\n", + p_sa_req_svc->port_num, p_sa_req_svc->sm_lid, p_sa_req_svc->sm_sl) ); + + /* Create the QP. */ + cl_memclr( &qp_create, sizeof( ib_qp_create_t ) ); + qp_create.qp_type = IB_QPT_QP1_ALIAS; + qp_create.sq_depth = p_pnp_rec->p_ca_attr->max_wrs; + qp_create.rq_depth = 0; + qp_create.sq_sge = 1; + qp_create.rq_sge = 0; + qp_create.h_sq_cq = NULL; + qp_create.h_rq_cq = NULL; + qp_create.sq_signaled = TRUE; + + status = ib_get_spl_qp( h_ca->obj.p_ci_ca->h_pd_alias, + p_sa_req_svc->port_guid, &qp_create, p_sa_req_svc, + sa_req_svc_event_cb, &p_sa_req_svc->pool_key, &p_sa_req_svc->h_qp ); + + /* + * Release the CI CA once we've allocated the QP. The CI CA will not + * go away while we hold the QP. + */ + deref_al_obj( &h_ca->obj ); + + /* Check for failure allocating the QP. */ + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("failed to create QP1 alias: %s\n", ib_get_err_str(status) ) ); + return status; + } + + /* Reference the sa_req service on behalf of QP alias. */ + ref_al_obj( &p_sa_req_svc->obj ); + + /* Create a MAD service. */ + cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) ); + mad_svc.mad_svc_context = p_sa_req_svc; + mad_svc.pfn_mad_send_cb = sa_req_send_comp_cb; + mad_svc.pfn_mad_recv_cb = sa_req_recv_comp_cb; + + status = ib_reg_mad_svc( p_sa_req_svc->h_qp, &mad_svc, + &p_sa_req_svc->h_mad_svc ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("failed to register MAD service: %s\n", ib_get_err_str(status) ) ); + return status; + } + + /* Create an address vector for the SA. */ + av_attr.port_num = p_sa_req_svc->port_num; + av_attr.sl = p_sa_req_svc->sm_sl; + av_attr.dlid = 1; + av_attr.grh_valid = FALSE; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + av_attr.path_bits = 0; + + status = ib_create_av( p_sa_req_svc->obj.p_ci_ca->h_pd_alias, + &av_attr, &p_sa_req_svc->h_av ); + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("failed to create AV: %s\n", ib_get_err_str(status) ) ); + return status; + } + + AL_EXIT( AL_DBG_SA_REQ ); + return IB_SUCCESS; +} + + + +/* + * SA request service asynchronous event callback. Our QP is an alias, + * so if we've received an error, the QP is unusable. + */ +void +sa_req_svc_event_cb( + IN ib_async_event_rec_t *p_event_rec ) +{ + sa_req_svc_t *p_sa_req_svc; + + CL_ASSERT( p_event_rec ); + CL_ASSERT( p_event_rec->context ); + + p_sa_req_svc = p_event_rec->context; + ref_al_obj( &p_sa_req_svc->obj ); + p_sa_req_svc->obj.pfn_destroy( &p_sa_req_svc->obj, NULL ); +} + + + +/* + * Acquire the sa_req service for the given port. + */ +static sa_req_svc_t* +acquire_sa_req_svc( + IN const ib_net64_t port_guid ) +{ + cl_list_item_t *p_list_item; + sa_req_svc_t *p_sa_req_svc; + al_obj_t *p_obj; + + CL_ASSERT( gp_sa_req_mgr ); + + /* Search for the sa_req service for the given port. */ + cl_spinlock_acquire( &gp_sa_req_mgr->obj.lock ); + for( p_list_item = cl_qlist_head( &gp_sa_req_mgr->obj.obj_list ); + p_list_item != cl_qlist_end( &gp_sa_req_mgr->obj.obj_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item ); + p_sa_req_svc = PARENT_STRUCT( p_obj, sa_req_svc_t, obj ); + + /* Make sure that the REQ service isn't being destroyed. */ + if( p_sa_req_svc->obj.state != CL_INITIALIZED || !p_sa_req_svc->sm_lid ) + continue; + + /* Check for a port match. */ + if( p_sa_req_svc->port_guid == port_guid ) + { + /* Reference the service on behalf of the client. */ + ref_al_obj( &p_sa_req_svc->obj ); + cl_spinlock_release( &gp_sa_req_mgr->obj.lock ); + return p_sa_req_svc; + } + } + cl_spinlock_release( &gp_sa_req_mgr->obj.lock ); + + return NULL; +} + + + +ib_api_status_t +al_send_sa_req( + IN al_sa_req_t *p_sa_req, + IN const net64_t port_guid, + IN const uint32_t timeout_ms, + IN const uint32_t retry_cnt, + IN const ib_user_query_t* const p_sa_req_data, + IN const ib_al_flags_t flags ) +{ + ib_api_status_t status; + sa_req_svc_t *p_sa_req_svc; + ib_mad_element_t *p_mad_request; + ib_mad_t *p_mad_hdr; + ib_sa_mad_t *p_sa_mad; + KEVENT event; + + AL_ENTER( AL_DBG_SA_REQ ); + + if( flags & IB_FLAGS_SYNC ) + { + if( !cl_is_blockable() ) + { + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Thread context not blockable\n") ); + return IB_INVALID_SETTING; + } + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + p_sa_req->p_sync_event = &event; + } + else + { + p_sa_req->p_sync_event = NULL; + } + + /* Locate the sa_req service to issue the sa_req on. */ + p_sa_req->p_sa_req_svc = acquire_sa_req_svc( port_guid ); + if( !p_sa_req->p_sa_req_svc ) + { + AL_PRINT_EXIT( TRACE_LEVEL_INFORMATION, AL_DBG_QUERY, ("invalid port GUID\n") ); + return IB_INVALID_GUID; + } + + /* Get a MAD element for the send request. */ + p_sa_req_svc = p_sa_req->p_sa_req_svc; + status = ib_get_mad( p_sa_req_svc->pool_key, MAD_BLOCK_SIZE, + &p_mad_request ); + if( status != IB_SUCCESS ) + { + deref_al_obj( &p_sa_req_svc->obj ); + AL_EXIT( AL_DBG_SA_REQ ); + return status; + } + + /* Store the MAD request so it can be cancelled. */ + p_sa_req->p_mad_request = p_mad_request; + + /* Initialize the MAD buffer for the send operation. */ + p_mad_hdr = p_sa_req->p_mad_request->p_mad_buf; + p_sa_mad = (ib_sa_mad_t*)p_mad_hdr; + + /* Initialize the standard MAD header. */ + ib_mad_init_new( p_mad_hdr, IB_MCLASS_SUBN_ADM, (uint8_t)2, + p_sa_req_data->method, + cl_hton64( (uint64_t)cl_atomic_inc( &p_sa_req_svc->trans_id ) ), + 0, 0 ); + + /* Set the query information. */ + p_sa_mad->attr_id = p_sa_req_data->attr_id; + p_sa_mad->attr_offset = ib_get_attr_offset( p_sa_req_data->attr_size ); + p_sa_mad->comp_mask = p_sa_req_data->comp_mask; + /* + * Most set operations don't use the component mask. + * Check the method and copy the attributes if it's a set too. + */ + if( p_sa_mad->comp_mask || p_sa_mad->method == IB_MAD_METHOD_SET ) + { + cl_memcpy( p_sa_mad->data, p_sa_req_data->p_attr, + p_sa_req_data->attr_size ); + } + + /* Set the MAD element information. */ + p_mad_request->context1 = p_sa_req; + p_mad_request->send_context1 = p_sa_req; + p_mad_request->remote_qp = IB_QP1; + p_mad_request->h_av = p_sa_req_svc->h_av; + p_mad_request->send_opt = 0; + p_mad_request->remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY; + p_mad_request->resp_expected = TRUE; + p_mad_request->timeout_ms = timeout_ms; + p_mad_request->retry_cnt = retry_cnt; + + status = ib_send_mad( p_sa_req_svc->h_mad_svc, p_mad_request, NULL ); + if( status != IB_SUCCESS ) + { + p_sa_req->p_mad_request = NULL; + ib_put_mad( p_mad_request ); + deref_al_obj( &p_sa_req->p_sa_req_svc->obj ); + } + else if( flags & IB_FLAGS_SYNC ) + { + /* Wait for the MAD completion. */ + KeWaitForSingleObject( &event, Executive, KernelMode, FALSE, NULL ); + } + + AL_EXIT( AL_DBG_SA_REQ ); + return status; +} + + + +/* + * SA request send completion callback. + */ +void +sa_req_send_comp_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_request_mad ) +{ + al_sa_req_t *p_sa_req; + sa_req_svc_t *p_sa_req_svc; + KEVENT *p_sync_event; + + AL_ENTER( AL_DBG_SA_REQ ); + + UNUSED_PARAM( h_mad_svc ); + UNUSED_PARAM( mad_svc_context ); + + /* + * Check the result of the send operation. If the send was successful, + * we will be getting a receive callback with the response. + */ + if( p_request_mad->status != IB_WCS_SUCCESS ) + { + /* Notify the requestor of the result. */ + AL_PRINT( TRACE_LEVEL_WARNING, AL_DBG_QUERY, + ("request failed - notifying user\n") ); + + p_sa_req = p_request_mad->send_context1; + p_sa_req_svc = p_sa_req->p_sa_req_svc; + p_sync_event = p_sa_req->p_sync_event; + + p_sa_req->status = convert_wc_status( p_request_mad->status ); + p_sa_req->pfn_sa_req_cb( p_sa_req, NULL ); + if( p_sync_event ) + KeSetEvent( p_sync_event, 0, FALSE ); + deref_al_obj( &p_sa_req_svc->obj ); + } + + /* Return the MAD. */ + ib_put_mad( p_request_mad ); + + AL_EXIT( AL_DBG_SA_REQ ); +} + + + +/* + * SA request receive completion callback. + */ +void +sa_req_recv_comp_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_response ) +{ + al_sa_req_t *p_sa_req; + sa_req_svc_t *p_sa_req_svc; + ib_sa_mad_t *p_sa_mad; + KEVENT *p_sync_event; + + AL_ENTER( AL_DBG_SA_REQ ); + + UNUSED_PARAM( h_mad_svc ); + UNUSED_PARAM( mad_svc_context ); + + p_sa_req = p_mad_response->send_context1; + p_sa_req_svc = p_sa_req->p_sa_req_svc; + p_sync_event = p_sa_req->p_sync_event; + + //*** check for SA redirection... + + /* Record the results of the request. */ + p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_mad_response ); + if( p_sa_mad->status == IB_SA_MAD_STATUS_SUCCESS ) + p_sa_req->status = IB_SUCCESS; + else + p_sa_req->status = IB_REMOTE_ERROR; + + /* Notify the requestor of the result. */ + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SA_REQ, ("notifying user\n") ); + p_sa_req->pfn_sa_req_cb( p_sa_req, p_mad_response ); + if( p_sync_event ) + KeSetEvent( p_sync_event, 0, FALSE ); + deref_al_obj( &p_sa_req_svc->obj ); + + AL_EXIT( AL_DBG_SA_REQ ); +} + + + +ib_api_status_t +convert_wc_status( + IN const ib_wc_status_t wc_status ) +{ + switch( wc_status ) + { + case IB_WCS_SUCCESS: + return IB_SUCCESS; + + case IB_WCS_TIMEOUT_RETRY_ERR: + return IB_TIMEOUT; + + case IB_WCS_CANCELED: + return IB_CANCELED; + + default: + return IB_ERROR; + } +} diff --git a/branches/Ndi/core/al/kernel/al_smi.c b/branches/Ndi/core/al/kernel/al_smi.c new file mode 100644 index 00000000..4461730c --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_smi.c @@ -0,0 +1,3682 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * Copyright (c) 2006 Voltaire Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include + +#include "ib_common.h" +#include "al_common.h" +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_smi.tmh" +#endif +#include "al_verbs.h" +#include "al_mgr.h" +#include "al_pnp.h" +#include "al_qp.h" +#include "al_smi.h" +#include "al_av.h" + + +extern char node_desc[IB_NODE_DESCRIPTION_SIZE]; + +#define SMI_POLL_INTERVAL 20000 /* Milliseconds */ +#define LOCAL_MAD_TIMEOUT 50 /* Milliseconds */ +#define DEFAULT_QP0_DEPTH 256 +#define DEFAULT_QP1_DEPTH 1024 + +uint32_t g_smi_poll_interval = SMI_POLL_INTERVAL; +spl_qp_mgr_t* gp_spl_qp_mgr = NULL; + + +/* + * Function prototypes. + */ +void +destroying_spl_qp_mgr( + IN al_obj_t* p_obj ); + +void +free_spl_qp_mgr( + IN al_obj_t* p_obj ); + +ib_api_status_t +spl_qp0_agent_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ); + +ib_api_status_t +spl_qp1_agent_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ); + +ib_api_status_t +spl_qp_agent_pnp( + IN ib_pnp_rec_t* p_pnp_rec, + IN ib_qp_type_t qp_type ); + +ib_api_status_t +create_spl_qp_svc( + IN ib_pnp_port_rec_t* p_pnp_rec, + IN const ib_qp_type_t qp_type ); + +void +destroying_spl_qp_svc( + IN al_obj_t* p_obj ); + +void +free_spl_qp_svc( + IN al_obj_t* p_obj ); + +void +spl_qp_svc_lid_change( + IN al_obj_t* p_obj, + IN ib_pnp_port_rec_t* p_pnp_rec ); + +ib_api_status_t +remote_mad_send( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ); + +static ib_api_status_t +local_mad_send( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ); + +static ib_api_status_t +loopback_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ); + +static ib_api_status_t +__process_subn_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ); + +static ib_api_status_t +fwd_local_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ); + +void +send_local_mad_cb( + IN cl_async_proc_item_t* p_item ); + +void +spl_qp_send_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); + +void +spl_qp_recv_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); + +void +spl_qp_comp( + IN spl_qp_svc_t* p_spl_qp_svc, + IN const ib_cq_handle_t h_cq, + IN ib_wc_type_t wc_type ); + +ib_api_status_t +process_mad_recv( + IN spl_qp_svc_t* p_spl_qp_svc, + IN ib_mad_element_t* p_mad_element ); + +mad_route_t +route_recv_smp( + IN ib_mad_element_t* p_mad_element ); + +mad_route_t +route_recv_smp_attr( + IN ib_mad_element_t* p_mad_element ); + +mad_route_t +route_recv_dm_mad( + IN ib_mad_element_t* p_mad_element ); + +mad_route_t +route_recv_gmp( + IN ib_mad_element_t* p_mad_element ); + +mad_route_t +route_recv_gmp_attr( + IN ib_mad_element_t* p_mad_element ); + +ib_api_status_t +forward_sm_trap( + IN spl_qp_svc_t* p_spl_qp_svc, + IN ib_mad_element_t* p_mad_element ); + +ib_api_status_t +recv_local_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN ib_mad_element_t* p_mad_request ); + +void +spl_qp_alias_send_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ); + +void +spl_qp_alias_recv_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_response ); + +static ib_api_status_t +spl_qp_svc_post_recvs( + IN spl_qp_svc_t* const p_spl_qp_svc ); + +void +spl_qp_svc_event_cb( + IN ib_async_event_rec_t *p_event_rec ); + +void +spl_qp_alias_event_cb( + IN ib_async_event_rec_t *p_event_rec ); + +void +spl_qp_svc_reset( + IN spl_qp_svc_t* p_spl_qp_svc ); + +void +spl_qp_svc_reset_cb( + IN cl_async_proc_item_t* p_item ); + +ib_api_status_t +acquire_svc_disp( + IN const cl_qmap_t* const p_svc_map, + IN const ib_net64_t port_guid, + OUT al_mad_disp_handle_t *ph_mad_disp ); + +void +smi_poll_timer_cb( + IN void* context ); + +void +smi_post_recvs( + IN cl_list_item_t* const p_list_item, + IN void* context ); + +#if defined( CL_USE_MUTEX ) +void +spl_qp_send_async_cb( + IN cl_async_proc_item_t* p_item ); + +void +spl_qp_recv_async_cb( + IN cl_async_proc_item_t* p_item ); +#endif + +/* + * Create the special QP manager. + */ +ib_api_status_t +create_spl_qp_mgr( + IN al_obj_t* const p_parent_obj ) +{ + ib_pnp_req_t pnp_req; + ib_api_status_t status; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_parent_obj ); + CL_ASSERT( !gp_spl_qp_mgr ); + + gp_spl_qp_mgr = cl_zalloc( sizeof( spl_qp_mgr_t ) ); + if( !gp_spl_qp_mgr ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("IB_INSUFFICIENT_MEMORY\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the special QP manager. */ + construct_al_obj( &gp_spl_qp_mgr->obj, AL_OBJ_TYPE_SMI ); + cl_timer_construct( &gp_spl_qp_mgr->poll_timer ); + + /* Initialize the lists. */ + cl_qmap_init( &gp_spl_qp_mgr->smi_map ); + cl_qmap_init( &gp_spl_qp_mgr->gsi_map ); + + /* Initialize the global SMI/GSI manager object. */ + status = init_al_obj( &gp_spl_qp_mgr->obj, gp_spl_qp_mgr, TRUE, + destroying_spl_qp_mgr, NULL, free_spl_qp_mgr ); + if( status != IB_SUCCESS ) + { + free_spl_qp_mgr( &gp_spl_qp_mgr->obj ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("init_al_obj failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + + /* Attach the special QP manager to the parent object. */ + status = attach_al_obj( p_parent_obj, &gp_spl_qp_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Initialize the SMI polling timer. */ + cl_status = cl_timer_init( &gp_spl_qp_mgr->poll_timer, smi_poll_timer_cb, + gp_spl_qp_mgr ); + if( cl_status != CL_SUCCESS ) + { + gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_timer_init failed, status 0x%x\n", cl_status ) ); + return ib_convert_cl_status( cl_status ); + } + + /* + * Note: PnP registrations for port events must be done + * when the special QP manager is created. This ensures that + * the registrations are listed sequentially and the reporting + * of PnP events occurs in the proper order. + */ + + /* + * Separate context is needed for each special QP. Therefore, a + * separate PnP event registration is performed for QP0 and QP1. + */ + + /* Register for port PnP events for QP0. */ + cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) ); + pnp_req.pnp_class = IB_PNP_PORT; + pnp_req.pnp_context = &gp_spl_qp_mgr->obj; + pnp_req.pfn_pnp_cb = spl_qp0_agent_pnp_cb; + + status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp0_pnp ); + + if( status != IB_SUCCESS ) + { + gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_pnp QP0 failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + + /* Reference the special QP manager on behalf of the ib_reg_pnp call. */ + ref_al_obj( &gp_spl_qp_mgr->obj ); + + /* Register for port PnP events for QP1. */ + cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) ); + pnp_req.pnp_class = IB_PNP_PORT; + pnp_req.pnp_context = &gp_spl_qp_mgr->obj; + pnp_req.pfn_pnp_cb = spl_qp1_agent_pnp_cb; + + status = ib_reg_pnp( gh_al, &pnp_req, &gp_spl_qp_mgr->h_qp1_pnp ); + + if( status != IB_SUCCESS ) + { + gp_spl_qp_mgr->obj.pfn_destroy( &gp_spl_qp_mgr->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_pnp QP1 failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + + /* + * Note that we don't release the referende taken in init_al_obj + * because we need one on behalf of the ib_reg_pnp call. + */ + + AL_EXIT( AL_DBG_SMI ); + return IB_SUCCESS; +} + + + +/* + * Pre-destroy the special QP manager. + */ +void +destroying_spl_qp_mgr( + IN al_obj_t* p_obj ) +{ + ib_api_status_t status; + + CL_ASSERT( p_obj ); + CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) ); + UNUSED_PARAM( p_obj ); + + /* Deregister for port PnP events for QP0. */ + if( gp_spl_qp_mgr->h_qp0_pnp ) + { + status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp0_pnp, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } + + /* Deregister for port PnP events for QP1. */ + if( gp_spl_qp_mgr->h_qp1_pnp ) + { + status = ib_dereg_pnp( gp_spl_qp_mgr->h_qp1_pnp, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } + + /* Destroy the SMI polling timer. */ + cl_timer_destroy( &gp_spl_qp_mgr->poll_timer ); +} + + + +/* + * Free the special QP manager. + */ +void +free_spl_qp_mgr( + IN al_obj_t* p_obj ) +{ + CL_ASSERT( p_obj ); + CL_ASSERT( gp_spl_qp_mgr == PARENT_STRUCT( p_obj, spl_qp_mgr_t, obj ) ); + UNUSED_PARAM( p_obj ); + + destroy_al_obj( &gp_spl_qp_mgr->obj ); + cl_free( gp_spl_qp_mgr ); + gp_spl_qp_mgr = NULL; +} + + + +/* + * Special QP0 agent PnP event callback. + */ +ib_api_status_t +spl_qp0_agent_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ) +{ + ib_api_status_t status; + AL_ENTER( AL_DBG_SMI ); + + status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP0 ); + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + + +/* + * Special QP1 agent PnP event callback. + */ +ib_api_status_t +spl_qp1_agent_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ) +{ + ib_api_status_t status; + AL_ENTER( AL_DBG_SMI ); + + status = spl_qp_agent_pnp( p_pnp_rec, IB_QPT_QP1 ); + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + + +/* + * Special QP agent PnP event callback. + */ +ib_api_status_t +spl_qp_agent_pnp( + IN ib_pnp_rec_t* p_pnp_rec, + IN ib_qp_type_t qp_type ) +{ + ib_api_status_t status; + al_obj_t* p_obj; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_pnp_rec ); + p_obj = p_pnp_rec->context; + + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_SMI, + ("p_pnp_rec->pnp_event = 0x%x (%s)\n", + p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) ); + /* Dispatch based on the PnP event type. */ + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_PORT_ADD: + CL_ASSERT( !p_obj ); + status = create_spl_qp_svc( (ib_pnp_port_rec_t*)p_pnp_rec, qp_type ); + break; + + case IB_PNP_PORT_REMOVE: + CL_ASSERT( p_obj ); + ref_al_obj( p_obj ); + p_obj->pfn_destroy( p_obj, NULL ); + status = IB_SUCCESS; + break; + + case IB_PNP_LID_CHANGE: + CL_ASSERT( p_obj ); + spl_qp_svc_lid_change( p_obj, (ib_pnp_port_rec_t*)p_pnp_rec ); + status = IB_SUCCESS; + break; + + default: + /* All other events are ignored. */ + status = IB_SUCCESS; + break; + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + + +/* + * Create a special QP service. + */ +ib_api_status_t +create_spl_qp_svc( + IN ib_pnp_port_rec_t* p_pnp_rec, + IN const ib_qp_type_t qp_type ) +{ + cl_status_t cl_status; + spl_qp_svc_t* p_spl_qp_svc; + ib_ca_handle_t h_ca; + ib_cq_create_t cq_create; + ib_qp_create_t qp_create; + ib_qp_attr_t qp_attr; + ib_mad_svc_t mad_svc; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_pnp_rec ); + + if( ( qp_type != IB_QPT_QP0 ) && ( qp_type != IB_QPT_QP1 ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + CL_ASSERT( p_pnp_rec->pnp_rec.pnp_context ); + CL_ASSERT( p_pnp_rec->p_ca_attr ); + CL_ASSERT( p_pnp_rec->p_port_attr ); + + p_spl_qp_svc = cl_zalloc( sizeof( spl_qp_svc_t ) ); + if( !p_spl_qp_svc ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("IB_INSUFFICIENT_MEMORY\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Tie the special QP service to the port by setting the port number. */ + p_spl_qp_svc->port_num = p_pnp_rec->p_port_attr->port_num; + /* Store the port GUID to allow faster lookups of the dispatchers. */ + p_spl_qp_svc->port_guid = p_pnp_rec->p_port_attr->port_guid; + + /* Initialize the send and receive queues. */ + cl_qlist_init( &p_spl_qp_svc->send_queue ); + cl_qlist_init( &p_spl_qp_svc->recv_queue ); + cl_spinlock_init(&p_spl_qp_svc->cache_lock); + +#if defined( CL_USE_MUTEX ) + /* Initialize async callbacks and flags for send/receive processing. */ + p_spl_qp_svc->send_async_queued = FALSE; + p_spl_qp_svc->send_async_cb.pfn_callback = spl_qp_send_async_cb; + p_spl_qp_svc->recv_async_queued = FALSE; + p_spl_qp_svc->recv_async_cb.pfn_callback = spl_qp_recv_async_cb; +#endif + + /* Initialize the async callback function to process local sends. */ + p_spl_qp_svc->send_async.pfn_callback = send_local_mad_cb; + + /* Initialize the async callback function to reset the QP on error. */ + p_spl_qp_svc->reset_async.pfn_callback = spl_qp_svc_reset_cb; + + /* Construct the special QP service object. */ + construct_al_obj( &p_spl_qp_svc->obj, AL_OBJ_TYPE_SMI ); + + /* Initialize the special QP service object. */ + status = init_al_obj( &p_spl_qp_svc->obj, p_spl_qp_svc, TRUE, + destroying_spl_qp_svc, NULL, free_spl_qp_svc ); + if( status != IB_SUCCESS ) + { + free_spl_qp_svc( &p_spl_qp_svc->obj ); + return status; + } + + /* Attach the special QP service to the parent object. */ + status = attach_al_obj( + (al_obj_t* __ptr64)p_pnp_rec->pnp_rec.pnp_context, &p_spl_qp_svc->obj ); + if( status != IB_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid ); + CL_ASSERT( h_ca ); + if( !h_ca ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, ("acquire_ca failed.\n") ); + return IB_INVALID_GUID; + } + + p_spl_qp_svc->obj.p_ci_ca = h_ca->obj.p_ci_ca; + + /* Determine the maximum queue depth of the QP and CQs. */ + p_spl_qp_svc->max_qp_depth = + ( p_pnp_rec->p_ca_attr->max_wrs < + p_pnp_rec->p_ca_attr->max_cqes ) ? + p_pnp_rec->p_ca_attr->max_wrs : + p_pnp_rec->p_ca_attr->max_cqes; + + /* Compare this maximum to the default special queue depth. */ + if( ( qp_type == IB_QPT_QP0 ) && + ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP0_DEPTH ) ) + p_spl_qp_svc->max_qp_depth = DEFAULT_QP0_DEPTH; + if( ( qp_type == IB_QPT_QP1 ) && + ( p_spl_qp_svc->max_qp_depth > DEFAULT_QP1_DEPTH ) ) + p_spl_qp_svc->max_qp_depth = DEFAULT_QP1_DEPTH; + + /* Create the send CQ. */ + cl_memclr( &cq_create, sizeof( ib_cq_create_t ) ); + cq_create.size = p_spl_qp_svc->max_qp_depth; + cq_create.pfn_comp_cb = spl_qp_send_comp_cb; + + status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create, + p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_send_cq ); + + if( status != IB_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_create_cq send CQ failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + + /* Reference the special QP service on behalf of ib_create_cq. */ + ref_al_obj( &p_spl_qp_svc->obj ); + + /* Check the result of the creation request. */ + if( cq_create.size < p_spl_qp_svc->max_qp_depth ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_create_cq allocated insufficient send CQ size\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + + /* Create the receive CQ. */ + cl_memclr( &cq_create, sizeof( ib_cq_create_t ) ); + cq_create.size = p_spl_qp_svc->max_qp_depth; + cq_create.pfn_comp_cb = spl_qp_recv_comp_cb; + + status = ib_create_cq( p_spl_qp_svc->obj.p_ci_ca->h_ca, &cq_create, + p_spl_qp_svc, spl_qp_svc_event_cb, &p_spl_qp_svc->h_recv_cq ); + + if( status != IB_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_create_cq recv CQ failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + + /* Reference the special QP service on behalf of ib_create_cq. */ + ref_al_obj( &p_spl_qp_svc->obj ); + + /* Check the result of the creation request. */ + if( cq_create.size < p_spl_qp_svc->max_qp_depth ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_create_cq allocated insufficient recv CQ size\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + + /* Create the special QP. */ + cl_memclr( &qp_create, sizeof( ib_qp_create_t ) ); + qp_create.qp_type = qp_type; + qp_create.sq_depth = p_spl_qp_svc->max_qp_depth; + qp_create.rq_depth = p_spl_qp_svc->max_qp_depth; + qp_create.sq_sge = 3; /* Three entries are required for segmentation. */ + qp_create.rq_sge = 1; + qp_create.h_sq_cq = p_spl_qp_svc->h_send_cq; + qp_create.h_rq_cq = p_spl_qp_svc->h_recv_cq; + qp_create.sq_signaled = TRUE; + + status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd, + p_pnp_rec->p_port_attr->port_guid, &qp_create, + p_spl_qp_svc, spl_qp_svc_event_cb, NULL, &p_spl_qp_svc->h_qp ); + + if( status != IB_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_get_spl_qp failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + + /* Reference the special QP service on behalf of ib_get_spl_qp. */ + ref_al_obj( &p_spl_qp_svc->obj ); + + /* Check the result of the creation request. */ + status = ib_query_qp( p_spl_qp_svc->h_qp, &qp_attr ); + if( status != IB_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_query_qp failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + + if( ( qp_attr.rq_depth < p_spl_qp_svc->max_qp_depth ) || + ( qp_attr.sq_depth < p_spl_qp_svc->max_qp_depth ) || + ( qp_attr.sq_sge < 3 ) || ( qp_attr.rq_sge < 1 ) ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_get_spl_qp allocated attributes are insufficient\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + + /* Initialize the QP for use. */ + status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL ); + if( status != IB_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_init_dgrm_svc failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + + /* Post receive buffers. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + status = spl_qp_svc_post_recvs( p_spl_qp_svc ); + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + if( status != IB_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("spl_qp_svc_post_recvs failed, %s\n", + ib_get_err_str( status ) ) ); + return status; + } + + /* Create the MAD dispatcher. */ + status = create_mad_disp( &p_spl_qp_svc->obj, p_spl_qp_svc->h_qp, + &p_spl_qp_svc->h_mad_disp ); + if( status != IB_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("create_mad_disp failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + + /* + * Add this service to the special QP manager lookup lists. + * The service must be added to allow the creation of a QP alias. + */ + cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock ); + if( qp_type == IB_QPT_QP0 ) + { + cl_qmap_insert( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid, + &p_spl_qp_svc->map_item ); + } + else + { + cl_qmap_insert( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid, + &p_spl_qp_svc->map_item ); + } + cl_spinlock_release( &gp_spl_qp_mgr->obj.lock ); + + /* + * If the CA does not support HW agents, create a QP alias and register + * a MAD service for sending responses from the local MAD interface. + */ + if( check_local_mad( p_spl_qp_svc->h_qp ) ) + { + /* Create a QP alias. */ + cl_memclr( &qp_create, sizeof( ib_qp_create_t ) ); + qp_create.qp_type = + ( qp_type == IB_QPT_QP0 ) ? IB_QPT_QP0_ALIAS : IB_QPT_QP1_ALIAS; + qp_create.sq_depth = p_spl_qp_svc->max_qp_depth; + qp_create.sq_sge = 1; + qp_create.sq_signaled = TRUE; + + status = ib_get_spl_qp( p_spl_qp_svc->obj.p_ci_ca->h_pd_alias, + p_pnp_rec->p_port_attr->port_guid, &qp_create, + p_spl_qp_svc, spl_qp_alias_event_cb, &p_spl_qp_svc->pool_key, + &p_spl_qp_svc->h_qp_alias ); + + if (status != IB_SUCCESS) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_get_spl_qp alias failed, %s\n", + ib_get_err_str( status ) ) ); + return status; + } + + /* Reference the special QP service on behalf of ib_get_spl_qp. */ + ref_al_obj( &p_spl_qp_svc->obj ); + + /* Register a MAD service for sends. */ + cl_memclr( &mad_svc, sizeof( ib_mad_svc_t ) ); + mad_svc.mad_svc_context = p_spl_qp_svc; + mad_svc.pfn_mad_send_cb = spl_qp_alias_send_cb; + mad_svc.pfn_mad_recv_cb = spl_qp_alias_recv_cb; + + status = ib_reg_mad_svc( p_spl_qp_svc->h_qp_alias, &mad_svc, + &p_spl_qp_svc->h_mad_svc ); + + if( status != IB_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("ib_reg_mad_svc failed, %s\n", ib_get_err_str( status ) ) ); + return status; + } + } + + /* Set the context of the PnP event to this child object. */ + p_pnp_rec->pnp_rec.context = &p_spl_qp_svc->obj; + + /* The QP is ready. Change the state. */ + p_spl_qp_svc->state = SPL_QP_ACTIVE; + + /* Force a completion callback to rearm the CQs. */ + spl_qp_recv_comp_cb( p_spl_qp_svc->h_recv_cq, p_spl_qp_svc ); + spl_qp_send_comp_cb( p_spl_qp_svc->h_send_cq, p_spl_qp_svc ); + + /* Start the polling thread timer. */ + if( g_smi_poll_interval ) + { + cl_status = + cl_timer_trim( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval ); + + if( cl_status != CL_SUCCESS ) + { + p_spl_qp_svc->obj.pfn_destroy( &p_spl_qp_svc->obj, NULL ); + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("cl_timer_start failed, status 0x%x\n", cl_status ) ); + return ib_convert_cl_status( cl_status ); + } + } + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_spl_qp_svc->obj ); + + AL_EXIT( AL_DBG_SMI ); + return IB_SUCCESS; +} + + + +/* + * Return a work completion to the MAD dispatcher for the specified MAD. + */ +static void +__complete_send_mad( + IN const al_mad_disp_handle_t h_mad_disp, + IN al_mad_wr_t* const p_mad_wr, + IN const ib_wc_status_t wc_status ) +{ + ib_wc_t wc; + + /* Construct a send work completion. */ + cl_memclr( &wc, sizeof( ib_wc_t ) ); + wc.wr_id = p_mad_wr->send_wr.wr_id; + wc.wc_type = IB_WC_SEND; + wc.status = wc_status; + + /* Set the send size if we were successful with the send. */ + if( wc_status == IB_WCS_SUCCESS ) + wc.length = MAD_BLOCK_SIZE; + + mad_disp_send_done( h_mad_disp, p_mad_wr, &wc ); +} + + + +/* + * Pre-destroy a special QP service. + */ +void +destroying_spl_qp_svc( + IN al_obj_t* p_obj ) +{ + spl_qp_svc_t* p_spl_qp_svc; + cl_list_item_t* p_list_item; + al_mad_wr_t* p_mad_wr; + + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_obj ); + p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj ); + + /* Change the state to prevent processing new send requests. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + p_spl_qp_svc->state = SPL_QP_DESTROYING; + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + /* Wait here until the special QP service is no longer in use. */ + while( p_spl_qp_svc->in_use_cnt ) + { + cl_thread_suspend( 0 ); + } + + /* Destroy the special QP. */ + if( p_spl_qp_svc->h_qp ) + { + /* If present, remove the special QP service from the tracking map. */ + cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock ); + if( p_spl_qp_svc->h_qp->type == IB_QPT_QP0 ) + { + cl_qmap_remove( &gp_spl_qp_mgr->smi_map, p_spl_qp_svc->port_guid ); + } + else + { + cl_qmap_remove( &gp_spl_qp_mgr->gsi_map, p_spl_qp_svc->port_guid ); + } + cl_spinlock_release( &gp_spl_qp_mgr->obj.lock ); + + status = ib_destroy_qp( p_spl_qp_svc->h_qp, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + + /* Complete any outstanding MAD sends operations as "flushed". */ + for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ); + p_list_item != cl_qlist_end( &p_spl_qp_svc->send_queue ); + p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->send_queue ) ) + { + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item ); + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, + IB_WCS_WR_FLUSHED_ERR ); + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + } + + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + /* Receive MAD elements are returned to the pool by the free routine. */ + } + + /* Destroy the special QP alias and CQs. */ + if( p_spl_qp_svc->h_qp_alias ) + { + status = ib_destroy_qp( p_spl_qp_svc->h_qp_alias, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } + if( p_spl_qp_svc->h_send_cq ) + { + status = ib_destroy_cq( p_spl_qp_svc->h_send_cq, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } + if( p_spl_qp_svc->h_recv_cq ) + { + status = ib_destroy_cq( p_spl_qp_svc->h_recv_cq, + (ib_pfn_destroy_cb_t)deref_al_obj ); + CL_ASSERT( status == IB_SUCCESS ); + } + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Free a special QP service. + */ +void +free_spl_qp_svc( + IN al_obj_t* p_obj ) +{ + spl_qp_svc_t* p_spl_qp_svc; + cl_list_item_t* p_list_item; + al_mad_element_t* p_al_mad; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_obj ); + p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj ); + + /* Dereference the CA. */ + if( p_spl_qp_svc->obj.p_ci_ca ) + deref_al_obj( &p_spl_qp_svc->obj.p_ci_ca->h_ca->obj ); + + /* Return receive MAD elements to the pool. */ + for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ); + p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue ); + p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) ) + { + p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item ); + + status = ib_put_mad( &p_al_mad->element ); + CL_ASSERT( status == IB_SUCCESS ); + } + + CL_ASSERT( cl_is_qlist_empty( &p_spl_qp_svc->send_queue ) ); + + destroy_al_obj( &p_spl_qp_svc->obj ); + cl_free( p_spl_qp_svc ); + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Update the base LID of a special QP service. + */ +void +spl_qp_svc_lid_change( + IN al_obj_t* p_obj, + IN ib_pnp_port_rec_t* p_pnp_rec ) +{ + spl_qp_svc_t* p_spl_qp_svc; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_obj ); + CL_ASSERT( p_pnp_rec ); + CL_ASSERT( p_pnp_rec->p_port_attr ); + + p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj ); + + p_spl_qp_svc->base_lid = p_pnp_rec->p_port_attr->lid; + p_spl_qp_svc->lmc = p_pnp_rec->p_port_attr->lmc; + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Route a send work request. + */ +mad_route_t +route_mad_send( + IN spl_qp_svc_t* p_spl_qp_svc, + IN ib_send_wr_t* const p_send_wr ) +{ + al_mad_wr_t* p_mad_wr; + al_mad_send_t* p_mad_send; + ib_mad_t* p_mad; + ib_smp_t* p_smp; + ib_av_handle_t h_av; + mad_route_t route; + boolean_t local, loopback, discard; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_send_wr ); + + /* Initialize a pointers to the MAD work request and the MAD. */ + p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr ); + p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr ); + p_mad = get_mad_hdr_from_wr( p_mad_wr ); + p_smp = (ib_smp_t*)p_mad; + + /* Check if the CA has a local MAD interface. */ + local = loopback = discard = FALSE; + if( check_local_mad( p_spl_qp_svc->h_qp ) ) + { + /* + * If the MAD is a locally addressed Subnet Management, Performance + * Management, or Connection Management datagram, process the work + * request locally. + */ + h_av = p_send_wr->dgrm.ud.h_av; + switch( p_mad->mgmt_class ) + { + case IB_MCLASS_SUBN_DIR: + /* Perform special checks on directed route SMPs. */ + if( ib_smp_is_response( p_smp ) ) + { + /* + * This node is the originator of the response. Discard + * if the hop count or pointer is zero, an intermediate hop, + * out of bounds hop, or if the first port of the directed + * route retrun path is not this port. + */ + if( ( p_smp->hop_count == 0 ) || ( p_smp->hop_ptr == 0 ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("hop cnt or hop ptr set to 0...discarding\n") ); + discard = TRUE; + } + else if( p_smp->hop_count != ( p_smp->hop_ptr - 1 ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("hop cnt != (hop ptr - 1)...discarding\n") ); + discard = TRUE; + } + else if( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("hop cnt > max hops...discarding\n") ); + discard = TRUE; + } + else if( ( p_smp->dr_dlid == IB_LID_PERMISSIVE ) && + ( p_smp->return_path[ p_smp->hop_ptr - 1 ] != + p_spl_qp_svc->port_num ) ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("return path[hop ptr - 1] != port num...discarding\n") ); + discard = TRUE; + } + } + else + { + /* The SMP is a request. */ + if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) || + ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) ) + { + discard = TRUE; + } + else if( ( p_smp->hop_count == 0 ) && ( p_smp->hop_ptr == 0 ) ) + { + /* Self Addressed: Sent locally, routed locally. */ + local = TRUE; + discard = ( p_smp->dr_slid != IB_LID_PERMISSIVE ) || + ( p_smp->dr_dlid != IB_LID_PERMISSIVE ); + } + else if( ( p_smp->hop_count != 0 ) && + ( p_smp->hop_count == ( p_smp->hop_ptr - 1 ) ) ) + { + /* End of Path: Sent remotely, routed locally. */ + local = TRUE; + } + else if( ( p_smp->hop_count != 0 ) && + ( p_smp->hop_ptr == 0 ) ) + { + /* Beginning of Path: Sent locally, routed remotely. */ + if( p_smp->dr_slid == IB_LID_PERMISSIVE ) + { + discard = + ( p_smp->initial_path[ p_smp->hop_ptr + 1 ] != + p_spl_qp_svc->port_num ); + } + } + else + { + /* Intermediate hop. */ + discard = TRUE; + } + } + /* Loopback locally addressed SM to SM "heartbeat" messages. */ + loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO); + break; + + case IB_MCLASS_SUBN_LID: + /* Loopback locally addressed SM to SM "heartbeat" messages. */ + loopback = (p_mad->attr_id == IB_MAD_ATTR_SM_INFO); + + /* Fall through to check for a local MAD. */ + + case IB_MCLASS_PERF: + case IB_MCLASS_BM: + local = ( h_av && + ( h_av->av_attr.dlid == + ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) ); + break; + + default: + /* Route vendor specific MADs to the HCA provider. */ + if( ib_class_is_vendor_specific( p_mad->mgmt_class ) ) + { + local = ( h_av && + ( h_av->av_attr.dlid == + ( h_av->av_attr.path_bits | p_spl_qp_svc->base_lid ) ) ); + } + break; + } + } + + route = ( p_mad_send->p_send_mad->send_opt & IB_SEND_OPT_LOCAL ) ? + ROUTE_LOCAL : ROUTE_REMOTE; + if( local ) route = ROUTE_LOCAL; + if( loopback && local ) route = ROUTE_LOOPBACK; + if( discard ) route = ROUTE_DISCARD; + + AL_EXIT( AL_DBG_SMI ); + return route; +} + + + +/* + * Send a work request on the special QP. + */ +ib_api_status_t +spl_qp_svc_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr ) +{ + spl_qp_svc_t* p_spl_qp_svc; + al_mad_wr_t* p_mad_wr; + mad_route_t route; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( h_qp ); + CL_ASSERT( p_send_wr ); + + /* Get the special QP service. */ + p_spl_qp_svc = (spl_qp_svc_t*)h_qp->obj.context; + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_spl_qp_svc->h_qp == h_qp ); + + /* Determine how to route the MAD. */ + route = route_mad_send( p_spl_qp_svc, p_send_wr ); + + /* + * Check the QP state and guard against error handling. Also, + * to maintain proper order of work completions, delay processing + * a local MAD until any remote MAD work requests have completed, + * and delay processing a remote MAD until local MAD work requests + * have completed. + */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + if( (p_spl_qp_svc->state != SPL_QP_ACTIVE) || p_spl_qp_svc->local_mad_wr || + (is_local(route) && !cl_is_qlist_empty( &p_spl_qp_svc->send_queue )) || + ( cl_qlist_count( &p_spl_qp_svc->send_queue ) >= + p_spl_qp_svc->max_qp_depth ) ) + { + /* + * Return busy status. + * The special QP will resume sends at this point. + */ + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + AL_EXIT( AL_DBG_SMI ); + return IB_RESOURCE_BUSY; + } + + p_mad_wr = PARENT_STRUCT( p_send_wr, al_mad_wr_t, send_wr ); + + if( is_local( route ) ) + { + /* Save the local MAD work request for processing. */ + p_spl_qp_svc->local_mad_wr = p_mad_wr; + + /* Flag the service as in use by the asynchronous processing thread. */ + cl_atomic_inc( &p_spl_qp_svc->in_use_cnt ); + + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + status = local_mad_send( p_spl_qp_svc, p_mad_wr ); + } + else + { + /* Process a remote MAD send work request. */ + status = remote_mad_send( p_spl_qp_svc, p_mad_wr ); + + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + + +/* + * Process a remote MAD send work request. Called holding the spl_qp_svc lock. + */ +ib_api_status_t +remote_mad_send( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + ib_smp_t* p_smp; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + /* Initialize a pointers to the MAD work request and outbound MAD. */ + p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr ); + + /* Perform outbound MAD processing. */ + + /* Adjust directed route SMPs as required by IBA. */ + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + if( ib_smp_is_response( p_smp ) ) + { + if( p_smp->dr_dlid == IB_LID_PERMISSIVE ) + p_smp->hop_ptr--; + } + else if( p_smp->dr_slid == IB_LID_PERMISSIVE ) + { + /* + * Only update the pointer if the hw_agent is not implemented. + * Fujitsu implements SMI in hardware, so the following has to + * be passed down to the hardware SMI. + */ + ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca ); + if( !p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents ) + p_smp->hop_ptr++; + ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca ); + } + } + + /* Always generate send completions. */ + p_mad_wr->send_wr.send_opt |= IB_SEND_OPT_SIGNALED; + + /* Queue the MAD work request on the service tracking queue. */ + cl_qlist_insert_tail( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item ); + + status = ib_post_send( p_spl_qp_svc->h_qp, &p_mad_wr->send_wr, NULL ); + + if( status != IB_SUCCESS ) + { + cl_qlist_remove_item( &p_spl_qp_svc->send_queue, &p_mad_wr->list_item ); + + /* Reset directed route SMPs as required by IBA. */ + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + if( ib_smp_is_response( p_smp ) ) + { + if( p_smp->dr_dlid == IB_LID_PERMISSIVE ) + p_smp->hop_ptr++; + } + else if( p_smp->dr_slid == IB_LID_PERMISSIVE ) + { + /* Only update if the hw_agent is not implemented. */ + ci_ca_lock_attr( p_spl_qp_svc->obj.p_ci_ca ); + if( p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr->hw_agents == FALSE ) + p_smp->hop_ptr--; + ci_ca_unlock_attr( p_spl_qp_svc->obj.p_ci_ca ); + } + } + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + +/* + * Handle a MAD destined for the local CA, using cached data + * as much as possible. + */ +static ib_api_status_t +local_mad_send( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + mad_route_t route; + ib_api_status_t status = IB_SUCCESS; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + /* Determine how to route the MAD. */ + route = route_mad_send( p_spl_qp_svc, &p_mad_wr->send_wr ); + + /* Check if this MAD should be discarded. */ + if( is_discard( route ) ) + { + /* Deliver a "work completion" to the dispatcher. */ + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, + IB_WCS_LOCAL_OP_ERR ); + status = IB_INVALID_SETTING; + } + else if( is_loopback( route ) ) + { + /* Loopback local SM to SM "heartbeat" messages. */ + status = loopback_mad( p_spl_qp_svc, p_mad_wr ); + } + else + { + switch( get_mad_hdr_from_wr( p_mad_wr )->mgmt_class ) + { + case IB_MCLASS_SUBN_DIR: + case IB_MCLASS_SUBN_LID: + //DO not use the cache in order to force Mkey check + status = __process_subn_mad( p_spl_qp_svc, p_mad_wr ); + //status = IB_NOT_DONE; + break; + + default: + status = IB_NOT_DONE; + } + } + + if( status == IB_NOT_DONE ) + { + /* Queue an asynchronous processing item to process the local MAD. */ + cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async ); + } + else + { + /* + * Clear the local MAD pointer to allow processing of other MADs. + * This is done after polling for attribute changes to ensure that + * subsequent MADs pick up any changes performed by this one. + */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + p_spl_qp_svc->local_mad_wr = NULL; + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + /* No longer in use by the asynchronous processing thread. */ + cl_atomic_dec( &p_spl_qp_svc->in_use_cnt ); + + /* Special QP operations will resume by unwinding. */ + } + + AL_EXIT( AL_DBG_SMI ); + return IB_SUCCESS; +} + + +static ib_api_status_t +get_resp_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr, + OUT ib_mad_element_t** const pp_mad_resp ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + CL_ASSERT( pp_mad_resp ); + + /* Get a MAD element from the pool for the response. */ + status = ib_get_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->pool_key, + MAD_BLOCK_SIZE, pp_mad_resp ); + if( status != IB_SUCCESS ) + { + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, + IB_WCS_LOCAL_OP_ERR ); + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + +static ib_api_status_t +complete_local_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr, + IN ib_mad_element_t* const p_mad_resp ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + CL_ASSERT( p_mad_resp ); + + /* Construct the receive MAD element. */ + p_mad_resp->status = IB_WCS_SUCCESS; + p_mad_resp->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp; + p_mad_resp->remote_lid = p_spl_qp_svc->base_lid; + if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE ) + { + p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data; + p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE; + } + + /* + * Hand the receive MAD element to the dispatcher before completing + * the send. This guarantees that the send request cannot time out. + */ + status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp ); + + /* Forward the send work completion to the dispatcher. */ + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS ); + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + +static ib_api_status_t +loopback_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + ib_mad_t *p_mad; + ib_mad_element_t *p_mad_resp; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + /* Get a MAD element from the pool for the response. */ + status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp ); + if( status == IB_SUCCESS ) + { + /* Initialize a pointers to the MAD work request and outbound MAD. */ + p_mad = get_mad_hdr_from_wr( p_mad_wr ); + + /* Simulate a send/receive between local managers. */ + cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE ); + + /* Construct the receive MAD element. */ + p_mad_resp->status = IB_WCS_SUCCESS; + p_mad_resp->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp; + p_mad_resp->remote_lid = p_spl_qp_svc->base_lid; + if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE ) + { + p_mad_resp->immediate_data = p_mad_wr->send_wr.immediate_data; + p_mad_resp->recv_opt |= IB_RECV_OPT_IMMEDIATE; + } + + /* + * Hand the receive MAD element to the dispatcher before completing + * the send. This guarantees that the send request cannot time out. + */ + status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_resp ); + + /* Forward the send work completion to the dispatcher. */ + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, IB_WCS_SUCCESS ); + + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + +static void +__update_guid_info( + IN spl_qp_cache_t* const p_cache, + IN const ib_smp_t* const p_mad ) +{ + uint32_t idx; + + /* Get the table selector from the attribute */ + idx = cl_ntoh32( p_mad->attr_mod ); + + /* + * We only get successful MADs here, so invalid settings + * shouldn't happen. + */ + CL_ASSERT( idx <= 31 ); + + cl_memcpy( &p_cache->guid_block[idx].tbl, + ib_smp_get_payload_ptr( p_mad ), + sizeof(ib_guid_info_t) ); + p_cache->guid_block[idx].valid = TRUE; +} + + +static void +__update_pkey_table( + IN spl_qp_cache_t* const p_cache, + IN const ib_smp_t* const p_mad ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod )); + + CL_ASSERT( idx <= 2047 ); + + cl_memcpy( &p_cache->pkey_tbl[idx].tbl, + ib_smp_get_payload_ptr( p_mad ), + sizeof(ib_pkey_table_t) ); + p_cache->pkey_tbl[idx].valid = TRUE; +} + + +static void +__update_sl_vl_table( + IN spl_qp_cache_t* const p_cache, + IN const ib_smp_t* const p_mad ) +{ + cl_memcpy( &p_cache->sl_vl.tbl, + ib_smp_get_payload_ptr( p_mad ), + sizeof(ib_slvl_table_t) ); + p_cache->sl_vl.valid = TRUE; +} + + +static void +__update_vl_arb_table( + IN spl_qp_cache_t* const p_cache, + IN const ib_smp_t* const p_mad ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1; + + CL_ASSERT( idx <= 3 ); + + cl_memcpy( &p_cache->vl_arb[idx].tbl, + ib_smp_get_payload_ptr( p_mad ), + sizeof(ib_vl_arb_table_t) ); + p_cache->vl_arb[idx].valid = TRUE; +} + + + +void +spl_qp_svc_update_cache( + IN spl_qp_svc_t *p_spl_qp_svc, + IN ib_smp_t *p_mad ) +{ + + + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad ); + CL_ASSERT( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR || + p_mad->mgmt_class == IB_MCLASS_SUBN_LID); + CL_ASSERT(!p_mad->status); + + cl_spinlock_acquire(&p_spl_qp_svc->cache_lock); + + switch( p_mad->attr_id ) + { + case IB_MAD_ATTR_GUID_INFO: + __update_guid_info( + &p_spl_qp_svc->cache, p_mad ); + break; + + case IB_MAD_ATTR_P_KEY_TABLE: + __update_pkey_table( + &p_spl_qp_svc->cache, p_mad ); + break; + + case IB_MAD_ATTR_SLVL_TABLE: + __update_sl_vl_table( + &p_spl_qp_svc->cache, p_mad ); + break; + + case IB_MAD_ATTR_VL_ARBITRATION: + __update_vl_arb_table( + &p_spl_qp_svc->cache, p_mad ); + break; + + default: + break; + } + + cl_spinlock_release(&p_spl_qp_svc->cache_lock); +} + + + +static ib_api_status_t +__process_node_info( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + ib_mad_t *p_mad; + ib_mad_element_t *p_mad_resp; + ib_smp_t *p_smp; + ib_node_info_t *p_node_info; + ib_ca_attr_t *p_ca_attr; + ib_port_attr_t *p_port_attr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + /* Initialize a pointers to the MAD work request and outbound MAD. */ + p_mad = get_mad_hdr_from_wr( p_mad_wr ); + if( p_mad->method != IB_MAD_METHOD_GET ) + { + /* Node description is a GET-only attribute. */ + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, + IB_WCS_LOCAL_OP_ERR ); + AL_EXIT( AL_DBG_SMI ); + return IB_INVALID_SETTING; + } + + /* Get a MAD element from the pool for the response. */ + status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp ); + if( status == IB_SUCCESS ) + { + p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf; + cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE ); + p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_smp->status = IB_SMP_DIRECTION; + else + p_smp->status = 0; + + p_node_info = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp ); + + /* + * Fill in the node info, protecting against the + * attributes being changed by PnP. + */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.p_ci_ca->obj.lock ); + + p_ca_attr = p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr; + p_port_attr = &p_ca_attr->p_port_attr[p_spl_qp_svc->port_num - 1]; + + p_node_info->base_version = 1; + p_node_info->class_version = 1; + p_node_info->node_type = IB_NODE_TYPE_CA; + p_node_info->num_ports = p_ca_attr->num_ports; + p_node_info->sys_guid = p_ca_attr->system_image_guid; + p_node_info->node_guid = p_ca_attr->ca_guid; + p_node_info->port_guid = p_port_attr->port_guid; + p_node_info->partition_cap = cl_hton16( p_port_attr->num_pkeys ); + p_node_info->device_id = cl_hton16( p_ca_attr->dev_id ); + p_node_info->revision = cl_hton32( p_ca_attr->revision ); + p_node_info->port_num_vendor_id = + cl_hton32( p_ca_attr->vend_id & 0x00FFFFFF ) | p_port_attr->port_num; + cl_spinlock_release( &p_spl_qp_svc->obj.p_ci_ca->obj.lock ); + + status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp ); + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + +static ib_api_status_t +__process_node_desc( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + ib_mad_t *p_mad; + ib_mad_element_t *p_mad_resp; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + /* Initialize a pointers to the MAD work request and outbound MAD. */ + p_mad = get_mad_hdr_from_wr( p_mad_wr ); + if( p_mad->method != IB_MAD_METHOD_GET ) + { + /* Node info is a GET-only attribute. */ + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, + IB_WCS_LOCAL_OP_ERR ); + AL_EXIT( AL_DBG_SMI ); + return IB_INVALID_SETTING; + } + + /* Get a MAD element from the pool for the response. */ + status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp ); + if( status == IB_SUCCESS ) + { + cl_memcpy( p_mad_resp->p_mad_buf, p_mad, MAD_BLOCK_SIZE ); + p_mad_resp->p_mad_buf->method = + (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_mad_resp->p_mad_buf->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_resp->p_mad_buf->status = IB_SMP_DIRECTION; + else + p_mad_resp->p_mad_buf->status = 0; + /* Set the node description to the machine name. */ + cl_memcpy( ((ib_smp_t*)p_mad_resp->p_mad_buf)->data, + node_desc, sizeof(node_desc) ); + + status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp ); + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + +static ib_api_status_t +__process_guid_info( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + + ib_mad_t *p_mad; + ib_mad_element_t *p_mad_resp; + ib_smp_t *p_smp; + ib_guid_info_t *p_guid_info; + uint16_t idx; + ib_api_status_t status; + + + /* Initialize a pointers to the MAD work request and outbound MAD. */ + p_mad = get_mad_hdr_from_wr( p_mad_wr ); + + /* Get the table selector from the attribute */ + idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod )); + + /* + * TODO : Setup the response to fail the MAD instead of sending + * it down to the HCA. + */ + if( idx > 31 ) + { + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + if( !p_spl_qp_svc->cache.guid_block[idx].valid ) + { + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad->method == IB_MAD_METHOD_SET ) + { + if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ), + &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) ) ) + { + /* The set is requesting a change. */ + return IB_NOT_DONE; + } + } + + /* Get a MAD element from the pool for the response. */ + status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp ); + if( status == IB_SUCCESS ) + { + p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf; + + /* Setup the response mad. */ + cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE ); + p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_smp->status = IB_SMP_DIRECTION; + else + p_smp->status = 0; + + p_guid_info = (ib_guid_info_t*)ib_smp_get_payload_ptr( p_smp ); + + // TODO: do we need lock on the cache ????? + + + /* Copy the cached data. */ + cl_memcpy( p_guid_info, + &p_spl_qp_svc->cache.guid_block[idx].tbl, sizeof(ib_guid_info_t) ); + + status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp ); + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + +static ib_api_status_t +__process_pkey_table( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + + ib_mad_t *p_mad; + ib_mad_element_t *p_mad_resp; + ib_smp_t *p_smp; + ib_pkey_table_t *p_pkey_table; + uint16_t idx; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + /* Initialize a pointers to the MAD work request and outbound MAD. */ + p_mad = get_mad_hdr_from_wr( p_mad_wr ); + + /* Get the table selector from the attribute */ + idx = ((uint16_t)cl_ntoh32( p_mad->attr_mod )); + + /* + * TODO : Setup the response to fail the MAD instead of sending + * it down to the HCA. + */ + if( idx > 2047 ) + { + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + + + if( !p_spl_qp_svc->cache.pkey_tbl[idx].valid ) + { + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad->method == IB_MAD_METHOD_SET ) + { + if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ), + &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) ) ) + { + /* The set is requesting a change. */ + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + } + + /* Get a MAD element from the pool for the response. */ + status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp ); + if( status == IB_SUCCESS ) + { + p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf; + + /* Setup the response mad. */ + cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE ); + p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_smp->status = IB_SMP_DIRECTION; + else + p_smp->status = 0; + + p_pkey_table = (ib_pkey_table_t*)ib_smp_get_payload_ptr( p_smp ); + + // TODO: do we need lock on the cache ????? + + + /* Copy the cached data. */ + cl_memcpy( p_pkey_table, + &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) ); + + status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp ); + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + +static ib_api_status_t +__process_slvl_table( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + + + ib_mad_t *p_mad; + ib_mad_element_t *p_mad_resp; + ib_smp_t *p_smp; + ib_slvl_table_t *p_slvl_table; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + /* Initialize a pointers to the MAD work request and outbound MAD. */ + p_mad = get_mad_hdr_from_wr( p_mad_wr ); + + if( !p_spl_qp_svc->cache.sl_vl.valid ) + { + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad->method == IB_MAD_METHOD_SET ) + { + if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ), + &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) ) ) + { + /* The set is requesting a change. */ + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + } + + /* Get a MAD element from the pool for the response. */ + status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp ); + if( status == IB_SUCCESS ) + { + p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf; + + /* Setup the response mad. */ + cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE ); + p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_smp->status = IB_SMP_DIRECTION; + else + p_smp->status = 0; + + p_slvl_table = (ib_slvl_table_t*)ib_smp_get_payload_ptr( p_smp ); + + // TODO: do we need lock on the cache ????? + + + /* Copy the cached data. */ + cl_memcpy( p_slvl_table, + &p_spl_qp_svc->cache.sl_vl.tbl, sizeof(ib_slvl_table_t) ); + + status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp ); + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + + +static ib_api_status_t +__process_vl_arb_table( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + + ib_mad_t *p_mad; + ib_mad_element_t *p_mad_resp; + ib_smp_t *p_smp; + ib_vl_arb_table_t *p_vl_arb_table; + uint16_t idx; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + /* Initialize a pointers to the MAD work request and outbound MAD. */ + p_mad = get_mad_hdr_from_wr( p_mad_wr ); + + /* Get the table selector from the attribute */ + idx = ((uint16_t)(cl_ntoh32( p_mad->attr_mod ) >> 16)) - 1; + + /* + * TODO : Setup the response to fail the MAD instead of sending + * it down to the HCA. + */ + if( idx > 3 ) + { + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + + + if( !p_spl_qp_svc->cache.vl_arb[idx].valid ) + { + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad->method == IB_MAD_METHOD_SET ) + { + if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad ), + &p_spl_qp_svc->cache.vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ) ) + { + /* The set is requesting a change. */ + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + } + + /* Get a MAD element from the pool for the response. */ + status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_resp ); + if( status == IB_SUCCESS ) + { + p_smp = (ib_smp_t*)p_mad_resp->p_mad_buf; + + /* Setup the response mad. */ + cl_memcpy( p_smp, p_mad, MAD_BLOCK_SIZE ); + p_smp->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_smp->status = IB_SMP_DIRECTION; + else + p_smp->status = 0; + + p_vl_arb_table = (ib_vl_arb_table_t*)ib_smp_get_payload_ptr( p_smp ); + + // TODO: do we need lock on the cache ????? + + + /* Copy the cached data. */ + cl_memcpy( p_vl_arb_table, + &p_spl_qp_svc->cache.pkey_tbl[idx].tbl, sizeof(ib_vl_arb_table_t) ); + + status = complete_local_mad( p_spl_qp_svc, p_mad_wr, p_mad_resp ); + } + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + + + +/* + * Process subnet administration MADs using cached data if possible. + */ +static ib_api_status_t +__process_subn_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + ib_api_status_t status; + ib_smp_t *p_smp; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr ); + + CL_ASSERT( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR || + p_smp->mgmt_class == IB_MCLASS_SUBN_LID ); + + /* simple m-key check */ + if( p_spl_qp_svc->m_key && p_smp->m_key == p_spl_qp_svc->m_key ) + { + if(!p_spl_qp_svc->cache_en ) + { + p_spl_qp_svc->cache_en = TRUE; + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + } + else + { + AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check failed \n")); + AL_PRINT(TRACE_LEVEL_WARNING, AL_DBG_SMI, ("Mkey check SMP= 0x%08x:%08x SVC = 0x%08x:%08x \n", + ((uint32_t*)&p_smp->m_key)[0],((uint32_t*)&p_smp->m_key)[1], + ((uint32_t*)&p_spl_qp_svc->m_key)[0],((uint32_t*)&p_spl_qp_svc->m_key)[1])); + + p_spl_qp_svc->cache_en = FALSE; + AL_EXIT( AL_DBG_SMI ); + return IB_NOT_DONE; + } + + cl_spinlock_acquire(&p_spl_qp_svc->cache_lock); + + switch( p_smp->attr_id ) + { + case IB_MAD_ATTR_NODE_INFO: + status = __process_node_info( p_spl_qp_svc, p_mad_wr ); + break; + + case IB_MAD_ATTR_NODE_DESC: + status = __process_node_desc( p_spl_qp_svc, p_mad_wr ); + break; + + case IB_MAD_ATTR_GUID_INFO: + status = __process_guid_info( p_spl_qp_svc, p_mad_wr ); + break; + + case IB_MAD_ATTR_P_KEY_TABLE: + status = __process_pkey_table( p_spl_qp_svc, p_mad_wr ); + break; + + case IB_MAD_ATTR_SLVL_TABLE: + status = __process_slvl_table( p_spl_qp_svc, p_mad_wr ); + break; + + case IB_MAD_ATTR_VL_ARBITRATION: + status = __process_vl_arb_table( p_spl_qp_svc, p_mad_wr ); + break; + + default: + status = IB_NOT_DONE; + break; + } + + cl_spinlock_release(&p_spl_qp_svc->cache_lock); + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + +/* + * Process a local MAD send work request. + */ +static ib_api_status_t +fwd_local_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN al_mad_wr_t* const p_mad_wr ) +{ + ib_mad_t* p_mad; + ib_smp_t* p_smp; + al_mad_send_t* p_mad_send; + ib_mad_element_t* p_mad_response = NULL; + ib_mad_t* p_mad_response_buf; + ib_api_status_t status = IB_SUCCESS; + boolean_t smp_is_set; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_wr ); + + /* Initialize a pointers to the MAD work request and outbound MAD. */ + p_mad = get_mad_hdr_from_wr( p_mad_wr ); + p_smp = (ib_smp_t*)p_mad; + + smp_is_set = (p_smp->method == IB_MAD_METHOD_SET); + + /* Get a MAD element from the pool for the response. */ + p_mad_send = PARENT_STRUCT( p_mad_wr, al_mad_send_t, mad_wr ); + if( p_mad_send->p_send_mad->resp_expected ) + { + status = get_resp_mad( p_spl_qp_svc, p_mad_wr, &p_mad_response ); + if( status != IB_SUCCESS ) + { + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, + IB_WCS_LOCAL_OP_ERR ); + AL_EXIT( AL_DBG_SMI ); + return status; + } + p_mad_response_buf = p_mad_response->p_mad_buf; + } + else + { + p_mad_response_buf = NULL; + } + + /* Adjust directed route SMPs as required by IBA. */ + if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + CL_ASSERT( !ib_smp_is_response( p_smp ) ); + + /* + * If this was a self addressed, directed route SMP, increment + * the hop pointer in the request before delivery as required + * by IBA. Otherwise, adjustment for remote requests occurs + * during inbound processing. + */ + if( p_smp->hop_count == 0 ) + p_smp->hop_ptr++; + } + + /* Forward the locally addressed MAD to the CA interface. */ + status = al_local_mad( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_ca, + p_spl_qp_svc->port_num, &p_mad_wr->send_wr.dgrm.ud.h_av->av_attr, p_mad, p_mad_response_buf ); + + /* Reset directed route SMPs as required by IBA. */ + if( p_mad->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + /* + * If this was a self addressed, directed route SMP, decrement + * the hop pointer in the response before delivery as required + * by IBA. Otherwise, adjustment for remote responses occurs + * during outbound processing. + */ + if( p_smp->hop_count == 0 ) + { + /* Adjust the request SMP. */ + p_smp->hop_ptr--; + + /* Adjust the response SMP. */ + if( p_mad_response_buf ) + { + p_smp = (ib_smp_t*)p_mad_response_buf; + p_smp->hop_ptr--; + } + } + } + + if( status != IB_SUCCESS ) + { + if( p_mad_response ) + ib_put_mad( p_mad_response ); + + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr, + IB_WCS_LOCAL_OP_ERR ); + AL_EXIT( AL_DBG_SMI ); + return status; + } + + /* Check the completion status of this simulated send. */ + if( p_mad_send->p_send_mad->resp_expected ) + { + /* + * The SMI is uses PnP polling to refresh the base_lid and lmc. + * Polling takes time, so we update the values here to prevent + * the failure of LID routed MADs sent immediately following this + * assignment. Check the response to see if the port info was set. + */ + if( smp_is_set ) + { + ib_smp_t* p_smp_response = NULL; + + switch( p_mad_response_buf->mgmt_class ) + { + case IB_MCLASS_SUBN_DIR: + if( ib_smp_get_status( p_smp ) == IB_SA_MAD_STATUS_SUCCESS ) + { + p_smp_response = p_smp; + //p_port_info = + // (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp ); + } + break; + + case IB_MCLASS_SUBN_LID: + if( p_mad_response_buf->status == IB_SA_MAD_STATUS_SUCCESS ) + { + p_smp_response = (ib_smp_t*)p_mad_response_buf; + //p_port_info = + // (ib_port_info_t*)ib_smp_get_payload_ptr((ib_smp_t*)p_mad_response_buf); + } + break; + + default: + break; + } + + if( p_smp_response ) + { + switch( p_smp_response->attr_id ) + { + case IB_MAD_ATTR_PORT_INFO: + { + ib_port_info_t *p_port_info = + (ib_port_info_t*)ib_smp_get_payload_ptr(p_smp_response); + p_spl_qp_svc->base_lid = p_port_info->base_lid; + p_spl_qp_svc->lmc = ib_port_info_get_lmc( p_port_info ); + p_spl_qp_svc->sm_lid = p_port_info->master_sm_base_lid; + p_spl_qp_svc->sm_sl = ib_port_info_get_sm_sl( p_port_info ); + + if(p_port_info->m_key) + p_spl_qp_svc->m_key = p_port_info->m_key; + if (p_port_info->subnet_timeout & 0x80) + { + AL_PRINT( TRACE_LEVEL_INFORMATION, AL_DBG_PNP, + ("Client reregister event, setting sm_lid to 0.\n")); + ci_ca_lock_attr(p_spl_qp_svc->obj.p_ci_ca); + p_spl_qp_svc->obj.p_ci_ca->p_pnp_attr-> + p_port_attr[p_port_info->local_port_num - 1].sm_lid= 0; + ci_ca_unlock_attr(p_spl_qp_svc->obj.p_ci_ca); + } + } + break; + case IB_MAD_ATTR_P_KEY_TABLE: + case IB_MAD_ATTR_GUID_INFO: + case IB_MAD_ATTR_SLVL_TABLE: + case IB_MAD_ATTR_VL_ARBITRATION: + spl_qp_svc_update_cache( p_spl_qp_svc, p_smp_response); + break; + default : + break; + } + } + } + + + /* Construct the receive MAD element. */ + p_mad_response->status = IB_WCS_SUCCESS; + p_mad_response->remote_qp = p_mad_wr->send_wr.dgrm.ud.remote_qp; + p_mad_response->remote_lid = p_spl_qp_svc->base_lid; + if( p_mad_wr->send_wr.send_opt & IB_RECV_OPT_IMMEDIATE ) + { + p_mad_response->immediate_data = p_mad_wr->send_wr.immediate_data; + p_mad_response->recv_opt |= IB_RECV_OPT_IMMEDIATE; + } + + /* + * Hand the receive MAD element to the dispatcher before completing + * the send. This guarantees that the send request cannot time out. + */ + status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_response ); + } + + __complete_send_mad( p_spl_qp_svc->h_mad_disp, p_mad_wr,IB_WCS_SUCCESS); + + + + /* If the SMP was a Get, no need to trigger a PnP poll. */ + if( status == IB_SUCCESS && !smp_is_set ) + status = IB_NOT_DONE; + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + + +/* + * Asynchronous processing thread callback to send a local MAD. + */ +void +send_local_mad_cb( + IN cl_async_proc_item_t* p_item ) +{ + spl_qp_svc_t* p_spl_qp_svc; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_item ); + p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async ); + + /* Process a local MAD send work request. */ + CL_ASSERT( p_spl_qp_svc->local_mad_wr ); + status = fwd_local_mad( p_spl_qp_svc, p_spl_qp_svc->local_mad_wr ); + + /* + * If we successfully processed a local MAD, which could have changed + * something (e.g. the LID) on the HCA. Scan for changes. + */ + if( status == IB_SUCCESS ) + pnp_poll(); + + /* + * Clear the local MAD pointer to allow processing of other MADs. + * This is done after polling for attribute changes to ensure that + * subsequent MADs pick up any changes performed by this one. + */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + p_spl_qp_svc->local_mad_wr = NULL; + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + /* Continue processing any queued MADs on the QP. */ + special_qp_resume_sends( p_spl_qp_svc->h_qp ); + + /* No longer in use by the asynchronous processing thread. */ + cl_atomic_dec( &p_spl_qp_svc->in_use_cnt ); + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Special QP send completion callback. + */ +void +spl_qp_send_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void* cq_context ) +{ + spl_qp_svc_t* p_spl_qp_svc; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( cq_context ); + p_spl_qp_svc = cq_context; + +#if defined( CL_USE_MUTEX ) + + /* Queue an asynchronous processing item to process sends. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + if( !p_spl_qp_svc->send_async_queued ) + { + p_spl_qp_svc->send_async_queued = TRUE; + ref_al_obj( &p_spl_qp_svc->obj ); + cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->send_async_cb ); + } + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + +#else + + /* Invoke the callback directly. */ + CL_ASSERT( h_cq == p_spl_qp_svc->h_send_cq ); + spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_SEND ); + + /* Continue processing any queued MADs on the QP. */ + special_qp_resume_sends( p_spl_qp_svc->h_qp ); + +#endif + + AL_EXIT( AL_DBG_SMI ); +} + + + +#if defined( CL_USE_MUTEX ) +void +spl_qp_send_async_cb( + IN cl_async_proc_item_t* p_item ) +{ + spl_qp_svc_t* p_spl_qp_svc; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_item ); + p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, send_async_cb ); + + /* Reset asynchronous queue flag. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + p_spl_qp_svc->send_async_queued = FALSE; + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_send_cq, IB_WC_SEND ); + + /* Continue processing any queued MADs on the QP. */ + status = special_qp_resume_sends( p_spl_qp_svc->h_qp ); + CL_ASSERT( status == IB_SUCCESS ); + + deref_al_obj( &p_spl_qp_svc->obj ); + + AL_EXIT( AL_DBG_SMI ); +} +#endif + + + +/* + * Special QP receive completion callback. + */ +void +spl_qp_recv_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void* cq_context ) +{ + spl_qp_svc_t* p_spl_qp_svc; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( cq_context ); + p_spl_qp_svc = cq_context; + +#if defined( CL_USE_MUTEX ) + + /* Queue an asynchronous processing item to process receives. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + if( !p_spl_qp_svc->recv_async_queued ) + { + p_spl_qp_svc->recv_async_queued = TRUE; + ref_al_obj( &p_spl_qp_svc->obj ); + cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->recv_async_cb ); + } + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + +#else + + CL_ASSERT( h_cq == p_spl_qp_svc->h_recv_cq ); + spl_qp_comp( p_spl_qp_svc, h_cq, IB_WC_RECV ); + +#endif + + AL_EXIT( AL_DBG_SMI ); +} + + + +#if defined( CL_USE_MUTEX ) +void +spl_qp_recv_async_cb( + IN cl_async_proc_item_t* p_item ) +{ + spl_qp_svc_t* p_spl_qp_svc; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_item ); + p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, recv_async_cb ); + + /* Reset asynchronous queue flag. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + p_spl_qp_svc->recv_async_queued = FALSE; + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + spl_qp_comp( p_spl_qp_svc, p_spl_qp_svc->h_recv_cq, IB_WC_RECV ); + + deref_al_obj( &p_spl_qp_svc->obj ); + + AL_EXIT( AL_DBG_SMI ); +} +#endif + + + +/* + * Special QP completion handler. + */ +void +spl_qp_comp( + IN spl_qp_svc_t* p_spl_qp_svc, + IN const ib_cq_handle_t h_cq, + IN ib_wc_type_t wc_type ) +{ + ib_wc_t wc; + ib_wc_t* p_free_wc = &wc; + ib_wc_t* p_done_wc; + al_mad_wr_t* p_mad_wr; + al_mad_element_t* p_al_mad; + ib_mad_element_t* p_mad_element; + ib_smp_t* p_smp; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI_CB ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( h_cq ); + + /* Check the QP state and guard against error handling. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + if( p_spl_qp_svc->state != SPL_QP_ACTIVE ) + { + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + return; + } + cl_atomic_inc( &p_spl_qp_svc->in_use_cnt ); + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + wc.p_next = NULL; + /* Process work completions. */ + while( ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ) == IB_SUCCESS ) + { + /* Process completions one at a time. */ + CL_ASSERT( p_done_wc ); + + /* Flushed completions are handled elsewhere. */ + if( wc.status == IB_WCS_WR_FLUSHED_ERR ) + { + p_free_wc = &wc; + continue; + } + + /* + * Process the work completion. Per IBA specification, the + * wc.wc_type is undefined if wc.status is not IB_WCS_SUCCESS. + * Use the wc_type parameter. + */ + switch( wc_type ) + { + case IB_WC_SEND: + /* Get a pointer to the MAD work request. */ + p_mad_wr = (al_mad_wr_t*)((uintn_t)wc.wr_id); + + /* Remove the MAD work request from the service tracking queue. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + cl_qlist_remove_item( &p_spl_qp_svc->send_queue, + &p_mad_wr->list_item ); + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + /* Reset directed route SMPs as required by IBA. */ + p_smp = (ib_smp_t*)get_mad_hdr_from_wr( p_mad_wr ); + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + if( ib_smp_is_response( p_smp ) ) + p_smp->hop_ptr++; + else + p_smp->hop_ptr--; + } + + /* Report the send completion to the dispatcher. */ + mad_disp_send_done( p_spl_qp_svc->h_mad_disp, p_mad_wr, &wc ); + break; + + case IB_WC_RECV: + + /* Initialize pointers to the MAD element. */ + p_al_mad = (al_mad_element_t*)((uintn_t)wc.wr_id); + p_mad_element = &p_al_mad->element; + + /* Remove the AL MAD element from the service tracking list. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + + cl_qlist_remove_item( &p_spl_qp_svc->recv_queue, + &p_al_mad->list_item ); + + /* Replenish the receive buffer. */ + spl_qp_svc_post_recvs( p_spl_qp_svc ); + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + /* Construct the MAD element from the receive work completion. */ + build_mad_recv( p_mad_element, &wc ); + + /* Process the received MAD. */ + status = process_mad_recv( p_spl_qp_svc, p_mad_element ); + + /* Discard this MAD on error. */ + if( status != IB_SUCCESS ) + { + status = ib_put_mad( p_mad_element ); + CL_ASSERT( status == IB_SUCCESS ); + } + break; + + default: + CL_ASSERT( wc_type == IB_WC_SEND || wc_type == IB_WC_RECV ); + break; + } + + if( wc.status != IB_WCS_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("special QP completion error: %s! internal syndrome 0x%I64x\n", + ib_get_wc_status_str( wc.status ), wc.vendor_specific) ); + + /* Reset the special QP service and return. */ + spl_qp_svc_reset( p_spl_qp_svc ); + } + p_free_wc = &wc; + } + + /* Rearm the CQ. */ + status = ib_rearm_cq( h_cq, FALSE ); + CL_ASSERT( status == IB_SUCCESS ); + + cl_atomic_dec( &p_spl_qp_svc->in_use_cnt ); + AL_EXIT( AL_DBG_SMI_CB ); +} + + + +/* + * Process a received MAD. + */ +ib_api_status_t +process_mad_recv( + IN spl_qp_svc_t* p_spl_qp_svc, + IN ib_mad_element_t* p_mad_element ) +{ + ib_smp_t* p_smp; + mad_route_t route; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_element ); + + /* + * If the CA has a HW agent then this MAD should have been + * consumed below verbs. The fact that it was received here + * indicates that it should be forwarded to the dispatcher + * for delivery to a class manager. Otherwise, determine how + * the MAD should be routed. + */ + route = ROUTE_DISPATCHER; + if( check_local_mad( p_spl_qp_svc->h_qp ) ) + { + /* + * SMP and GMP processing is branched here to handle overlaps + * between class methods and attributes. + */ + switch( p_mad_element->p_mad_buf->mgmt_class ) + { + case IB_MCLASS_SUBN_DIR: + /* Perform special checks on directed route SMPs. */ + p_smp = (ib_smp_t*)p_mad_element->p_mad_buf; + + if( ( p_smp->hop_count >= IB_SUBNET_PATH_HOPS_MAX ) || + ( p_smp->hop_ptr >= IB_SUBNET_PATH_HOPS_MAX ) ) + { + route = ROUTE_DISCARD; + } + else if( ib_smp_is_response( p_smp ) ) + { + /* + * This node is the destination of the response. Discard + * the source LID or hop pointer are incorrect. + */ + if( p_smp->dr_slid == IB_LID_PERMISSIVE ) + { + if( p_smp->hop_ptr == 1 ) + { + p_smp->hop_ptr--; /* Adjust ptr per IBA spec. */ + } + else + { + route = ROUTE_DISCARD; + } + } + else if( ( p_smp->dr_slid < p_spl_qp_svc->base_lid ) || + ( p_smp->dr_slid >= p_spl_qp_svc->base_lid + + ( 1 << p_spl_qp_svc->lmc ) ) ) + { + route = ROUTE_DISCARD; + } + } + else + { + /* + * This node is the destination of the request. Discard + * the destination LID or hop pointer are incorrect. + */ + if( p_smp->dr_dlid == IB_LID_PERMISSIVE ) + { + if( p_smp->hop_count == p_smp->hop_ptr ) + { + p_smp->return_path[ p_smp->hop_ptr++ ] = + p_spl_qp_svc->port_num; /* Set path per IBA spec. */ + } + else + { + route = ROUTE_DISCARD; + } + } + else if( ( p_smp->dr_dlid < p_spl_qp_svc->base_lid ) || + ( p_smp->dr_dlid >= p_spl_qp_svc->base_lid + + ( 1 << p_spl_qp_svc->lmc ) ) ) + { + route = ROUTE_DISCARD; + } + } + + if( route == ROUTE_DISCARD ) break; + /* else fall through next case */ + + case IB_MCLASS_SUBN_LID: + route = route_recv_smp( p_mad_element ); + break; + + case IB_MCLASS_PERF: + /* Process the received GMP. */ + switch( p_mad_element->p_mad_buf->method ) + { + case IB_MAD_METHOD_GET: + case IB_MAD_METHOD_SET: + route = ROUTE_LOCAL; + break; + default: + break; + } + break; + + case IB_MCLASS_BM: + route = route_recv_gmp( p_mad_element ); + break; + + case IB_MCLASS_SUBN_ADM: + case IB_MCLASS_DEV_MGMT: + case IB_MCLASS_COMM_MGMT: + case IB_MCLASS_SNMP: + break; + + default: + /* Route vendor specific MADs to the HCA provider. */ + if( ib_class_is_vendor_specific( + p_mad_element->p_mad_buf->mgmt_class ) ) + { + route = route_recv_gmp( p_mad_element ); + } + break; + } + } + + /* Route the MAD. */ + if( is_discard( route ) ) + status = IB_ERROR; + else if( is_dispatcher( route ) ) + status = mad_disp_recv_done( p_spl_qp_svc->h_mad_disp, p_mad_element ); + else if( is_remote( route ) ) + status = forward_sm_trap( p_spl_qp_svc, p_mad_element ); + else + status = recv_local_mad( p_spl_qp_svc, p_mad_element ); + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + + +/* + * Route a received SMP. + */ +mad_route_t +route_recv_smp( + IN ib_mad_element_t* p_mad_element ) +{ + mad_route_t route; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_mad_element ); + + /* Process the received SMP. */ + switch( p_mad_element->p_mad_buf->method ) + { + case IB_MAD_METHOD_GET: + case IB_MAD_METHOD_SET: + route = route_recv_smp_attr( p_mad_element ); + break; + + case IB_MAD_METHOD_TRAP: + /* + * Special check to route locally generated traps to the remote SM. + * Distinguished from other receives by the p_wc->recv.ud.recv_opt + * IB_RECV_OPT_FORWARD flag. + * + * Note that because forwarded traps use AL MAD services, the upper + * 32-bits of the TID are reserved by the access layer. When matching + * a Trap Repress MAD, the SMA must only use the lower 32-bits of the + * TID. + */ + AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("Trap TID = 0x%08x:%08x \n", + ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0], + ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1])); + + route = ( p_mad_element->recv_opt & IB_RECV_OPT_FORWARD ) ? + ROUTE_REMOTE : ROUTE_DISPATCHER; + break; + + case IB_MAD_METHOD_TRAP_REPRESS: + /* + * Note that because forwarded traps use AL MAD services, the upper + * 32-bits of the TID are reserved by the access layer. When matching + * a Trap Repress MAD, the SMA must only use the lower 32-bits of the + * TID. + */ + AL_PRINT(TRACE_LEVEL_INFORMATION, AL_DBG_SMI, ("TrapRepress TID = 0x%08x:%08x \n", + ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[0], + ((uint32_t*)&p_mad_element->p_mad_buf->trans_id)[1])); + + route = ROUTE_LOCAL; + break; + + default: + route = ROUTE_DISPATCHER; + break; + } + + AL_EXIT( AL_DBG_SMI ); + return route; +} + + + +/* + * Route received SMP attributes. + */ +mad_route_t +route_recv_smp_attr( + IN ib_mad_element_t* p_mad_element ) +{ + mad_route_t route; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_mad_element ); + + /* Process the received SMP attributes. */ + switch( p_mad_element->p_mad_buf->attr_id ) + { + case IB_MAD_ATTR_NODE_DESC: + case IB_MAD_ATTR_NODE_INFO: + case IB_MAD_ATTR_GUID_INFO: + case IB_MAD_ATTR_PORT_INFO: + case IB_MAD_ATTR_P_KEY_TABLE: + case IB_MAD_ATTR_SLVL_TABLE: + case IB_MAD_ATTR_VL_ARBITRATION: + case IB_MAD_ATTR_VENDOR_DIAG: + case IB_MAD_ATTR_LED_INFO: + case IB_MAD_ATTR_SWITCH_INFO: + route = ROUTE_LOCAL; + break; + + default: + route = ROUTE_DISPATCHER; + break; + } + + AL_EXIT( AL_DBG_SMI ); + return route; +} + + +/* + * Route a received GMP. + */ +mad_route_t +route_recv_gmp( + IN ib_mad_element_t* p_mad_element ) +{ + mad_route_t route; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_mad_element ); + + /* Process the received GMP. */ + switch( p_mad_element->p_mad_buf->method ) + { + case IB_MAD_METHOD_GET: + case IB_MAD_METHOD_SET: + /* Route vendor specific MADs to the HCA provider. */ + if( ib_class_is_vendor_specific( + p_mad_element->p_mad_buf->mgmt_class ) ) + { + route = ROUTE_LOCAL; + } + else + { + route = route_recv_gmp_attr( p_mad_element ); + } + break; + + default: + route = ROUTE_DISPATCHER; + break; + } + + AL_EXIT( AL_DBG_SMI ); + return route; +} + + + +/* + * Route received GMP attributes. + */ +mad_route_t +route_recv_gmp_attr( + IN ib_mad_element_t* p_mad_element ) +{ + mad_route_t route; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_mad_element ); + + /* Process the received GMP attributes. */ + if( p_mad_element->p_mad_buf->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO ) + route = ROUTE_LOCAL; + else + route = ROUTE_DISPATCHER; + + AL_EXIT( AL_DBG_SMI ); + return route; +} + + + +/* + * Forward a locally generated Subnet Management trap. + */ +ib_api_status_t +forward_sm_trap( + IN spl_qp_svc_t* p_spl_qp_svc, + IN ib_mad_element_t* p_mad_element ) +{ + ib_av_attr_t av_attr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_element ); + + /* Check the SMP class. */ + if( p_mad_element->p_mad_buf->mgmt_class != IB_MCLASS_SUBN_LID ) + { + /* + * Per IBA Specification Release 1.1 Section 14.2.2.1, + * "C14-5: Only a SM shall originate a directed route SMP." + * Therefore all traps should be LID routed; drop this one. + */ + AL_EXIT( AL_DBG_SMI ); + return IB_ERROR; + } + + if(p_spl_qp_svc->sm_lid == p_spl_qp_svc->base_lid) + return mad_disp_recv_done(p_spl_qp_svc->h_mad_disp,p_mad_element); + + /* Create an address vector for the SM. */ + cl_memclr( &av_attr, sizeof( ib_av_attr_t ) ); + av_attr.port_num = p_spl_qp_svc->port_num; + av_attr.sl = p_spl_qp_svc->sm_sl; + av_attr.dlid = p_spl_qp_svc->sm_lid; + av_attr.grh_valid = FALSE; + + status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias, + &av_attr, &p_mad_element->h_av ); + + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_SMI ); + return status; + } + + /* Complete the initialization of the MAD element. */ + p_mad_element->p_next = NULL; + p_mad_element->remote_qkey = IB_QP_PRIVILEGED_Q_KEY; + p_mad_element->resp_expected = FALSE; + + /* Clear context1 for proper send completion callback processing. */ + p_mad_element->context1 = NULL; + + /* + * Forward the trap. Note that because forwarded traps use AL MAD + * services, the upper 32-bits of the TID are reserved by the access + * layer. When matching a Trap Repress MAD, the SMA must only use + * the lower 32-bits of the TID. + */ + status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_element, NULL ); + + if( status != IB_SUCCESS ) + ib_destroy_av( p_mad_element->h_av ); + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + +/* + * Process a locally routed MAD received from the special QP. + */ +ib_api_status_t +recv_local_mad( + IN spl_qp_svc_t* p_spl_qp_svc, + IN ib_mad_element_t* p_mad_request ) +{ + ib_mad_t* p_mad_hdr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_spl_qp_svc ); + CL_ASSERT( p_mad_request ); + + /* Initialize the MAD element. */ + p_mad_hdr = ib_get_mad_buf( p_mad_request ); + p_mad_request->context1 = p_mad_request; + + /* Save the TID. */ + p_mad_request->context2 = + (void*)(uintn_t)al_get_al_tid( p_mad_hdr->trans_id ); +/* + * Disable warning about passing unaligned 64-bit value. + * The value is always aligned given how buffers are allocated + * and given the layout of a MAD. + */ +#pragma warning( push, 3 ) + al_set_al_tid( &p_mad_hdr->trans_id, 0 ); +#pragma warning( pop ) + + /* + * We need to get a response from the local HCA to this MAD only if this + * MAD is not itself a response. + */ + p_mad_request->resp_expected = !( ib_mad_is_response( p_mad_hdr ) || + ( p_mad_hdr->method == IB_MAD_METHOD_TRAP_REPRESS ) ); + p_mad_request->timeout_ms = LOCAL_MAD_TIMEOUT; + p_mad_request->send_opt = IB_SEND_OPT_LOCAL; + + /* Send the locally addressed MAD request to the CA for processing. */ + status = ib_send_mad( p_spl_qp_svc->h_mad_svc, p_mad_request, NULL ); + + AL_EXIT( AL_DBG_SMI ); + return status; +} + + + +/* + * Special QP alias send completion callback. + */ +void +spl_qp_alias_send_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void* mad_svc_context, + IN ib_mad_element_t* p_mad_element ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + UNUSED_PARAM( h_mad_svc ); + UNUSED_PARAM( mad_svc_context ); + CL_ASSERT( p_mad_element ); + + if( p_mad_element->h_av ) + { + status = ib_destroy_av( p_mad_element->h_av ); + CL_ASSERT( status == IB_SUCCESS ); + } + + status = ib_put_mad( p_mad_element ); + CL_ASSERT( status == IB_SUCCESS ); + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Special QP alias receive completion callback. + */ +void +spl_qp_alias_recv_cb( + IN ib_mad_svc_handle_t h_mad_svc, + IN void* mad_svc_context, + IN ib_mad_element_t* p_mad_response ) +{ + spl_qp_svc_t* p_spl_qp_svc; + ib_mad_element_t* p_mad_request; + ib_mad_t* p_mad_hdr; + ib_av_attr_t av_attr; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( mad_svc_context ); + CL_ASSERT( p_mad_response ); + CL_ASSERT( p_mad_response->send_context1 ); + + /* Initialize pointers. */ + p_spl_qp_svc = mad_svc_context; + p_mad_request = p_mad_response->send_context1; + p_mad_hdr = ib_get_mad_buf( p_mad_response ); + + /* Restore the TID, so it will match on the remote side. */ +#pragma warning( push, 3 ) + al_set_al_tid( &p_mad_hdr->trans_id, + (uint32_t)(uintn_t)p_mad_response->send_context2 ); +#pragma warning( pop ) + + /* Set the remote QP. */ + p_mad_response->remote_qp = p_mad_request->remote_qp; + p_mad_response->remote_qkey = p_mad_request->remote_qkey; + + /* Prepare to create an address vector. */ + cl_memclr( &av_attr, sizeof( ib_av_attr_t ) ); + av_attr.port_num = p_spl_qp_svc->port_num; + av_attr.sl = p_mad_request->remote_sl; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + av_attr.path_bits = p_mad_request->path_bits; + if( p_mad_request->grh_valid ) + { + cl_memcpy( &av_attr.grh, p_mad_request->p_grh, sizeof( ib_grh_t ) ); + av_attr.grh.src_gid = p_mad_request->p_grh->dest_gid; + av_attr.grh.dest_gid = p_mad_request->p_grh->src_gid; + av_attr.grh_valid = TRUE; + } + if( ( p_mad_hdr->mgmt_class == IB_MCLASS_SUBN_DIR ) && + ( ((ib_smp_t *)p_mad_hdr)->dr_dlid == IB_LID_PERMISSIVE ) ) + av_attr.dlid = IB_LID_PERMISSIVE; + else + av_attr.dlid = p_mad_request->remote_lid; + + /* Create an address vector. */ + status = ib_create_av( p_spl_qp_svc->h_qp->obj.p_ci_ca->h_pd_alias, + &av_attr, &p_mad_response->h_av ); + + if( status != IB_SUCCESS ) + { + ib_put_mad( p_mad_response ); + + AL_EXIT( AL_DBG_SMI ); + return; + } + + /* Send the response. */ + status = ib_send_mad( h_mad_svc, p_mad_response, NULL ); + + if( status != IB_SUCCESS ) + { + ib_destroy_av( p_mad_response->h_av ); + ib_put_mad( p_mad_response ); + } + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Post receive buffers to a special QP. + */ +static ib_api_status_t +spl_qp_svc_post_recvs( + IN spl_qp_svc_t* const p_spl_qp_svc ) +{ + ib_mad_element_t* p_mad_element; + al_mad_element_t* p_al_element; + ib_recv_wr_t recv_wr; + ib_api_status_t status = IB_SUCCESS; + + /* Attempt to post receive buffers up to the max_qp_depth limit. */ + while( cl_qlist_count( &p_spl_qp_svc->recv_queue ) < + (int32_t)p_spl_qp_svc->max_qp_depth ) + { + /* Get a MAD element from the pool. */ + status = ib_get_mad( p_spl_qp_svc->obj.p_ci_ca->pool_key, + MAD_BLOCK_SIZE, &p_mad_element ); + + if( status != IB_SUCCESS ) break; + + p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, + element ); + + /* Build the receive work request. */ + recv_wr.p_next = NULL; + recv_wr.wr_id = (uintn_t)p_al_element; + recv_wr.num_ds = 1; + recv_wr.ds_array = &p_al_element->grh_ds; + + /* Queue the receive on the service tracking list. */ + cl_qlist_insert_tail( &p_spl_qp_svc->recv_queue, + &p_al_element->list_item ); + + /* Post the receive. */ + status = ib_post_recv( p_spl_qp_svc->h_qp, &recv_wr, NULL ); + + if( status != IB_SUCCESS ) + { + AL_PRINT( TRACE_LEVEL_ERROR, AL_DBG_ERROR, + ("Failed to post receive %016I64x\n", + (LONG_PTR)p_al_element) ); + cl_qlist_remove_item( &p_spl_qp_svc->recv_queue, + &p_al_element->list_item ); + + ib_put_mad( p_mad_element ); + break; + } + } + + return status; +} + + + +/* + * Special QP service asynchronous event callback. + */ +void +spl_qp_svc_event_cb( + IN ib_async_event_rec_t *p_event_rec ) +{ + spl_qp_svc_t* p_spl_qp_svc; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_event_rec ); + CL_ASSERT( p_event_rec->context ); + + if( p_event_rec->code == IB_AE_SQ_DRAINED ) + { + AL_EXIT( AL_DBG_SMI ); + return; + } + + p_spl_qp_svc = p_event_rec->context; + + spl_qp_svc_reset( p_spl_qp_svc ); + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Special QP service reset. + */ +void +spl_qp_svc_reset( + IN spl_qp_svc_t* p_spl_qp_svc ) +{ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + + if( p_spl_qp_svc->state != SPL_QP_ACTIVE ) + { + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + return; + } + + /* Change the special QP service to the error state. */ + p_spl_qp_svc->state = SPL_QP_ERROR; + + /* Flag the service as in use by the asynchronous processing thread. */ + cl_atomic_inc( &p_spl_qp_svc->in_use_cnt ); + + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + /* Queue an asynchronous processing item to reset the special QP. */ + cl_async_proc_queue( gp_async_proc_mgr, &p_spl_qp_svc->reset_async ); +} + + + +/* + * Asynchronous processing thread callback to reset the special QP service. + */ +void +spl_qp_svc_reset_cb( + IN cl_async_proc_item_t* p_item ) +{ + spl_qp_svc_t* p_spl_qp_svc; + cl_list_item_t* p_list_item; + ib_wc_t wc; + ib_wc_t* p_free_wc; + ib_wc_t* p_done_wc; + al_mad_wr_t* p_mad_wr; + al_mad_element_t* p_al_mad; + ib_qp_mod_t qp_mod; + ib_api_status_t status; + cl_qlist_t mad_wr_list; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_item ); + p_spl_qp_svc = PARENT_STRUCT( p_item, spl_qp_svc_t, reset_async ); + + /* Wait here until the special QP service is only in use by this thread. */ + while( p_spl_qp_svc->in_use_cnt != 1 ) + { + cl_thread_suspend( 0 ); + } + + /* Change the QP to the RESET state. */ + cl_memclr( &qp_mod, sizeof( ib_qp_mod_t ) ); + qp_mod.req_state = IB_QPS_RESET; + + status = ib_modify_qp( p_spl_qp_svc->h_qp, &qp_mod ); + CL_ASSERT( status == IB_SUCCESS ); + + /* Return receive MAD elements to the pool. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + for( p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ); + p_list_item != cl_qlist_end( &p_spl_qp_svc->recv_queue ); + p_list_item = cl_qlist_remove_head( &p_spl_qp_svc->recv_queue ) ) + { + p_al_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, list_item ); + + status = ib_put_mad( &p_al_mad->element ); + CL_ASSERT( status == IB_SUCCESS ); + } + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + /* Re-initialize the QP. */ + status = ib_init_dgrm_svc( p_spl_qp_svc->h_qp, NULL ); + CL_ASSERT( status == IB_SUCCESS ); + + /* Poll to remove any remaining send completions from the CQ. */ + do + { + cl_memclr( &wc, sizeof( ib_wc_t ) ); + p_free_wc = &wc; + status = ib_poll_cq( p_spl_qp_svc->h_send_cq, &p_free_wc, &p_done_wc ); + + } while( status == IB_SUCCESS ); + + /* Post receive buffers. */ + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + spl_qp_svc_post_recvs( p_spl_qp_svc ); + + /* Re-queue any outstanding MAD send operations. */ + cl_qlist_init( &mad_wr_list ); + cl_qlist_insert_list_tail( &mad_wr_list, &p_spl_qp_svc->send_queue ); + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + for( p_list_item = cl_qlist_remove_head( &mad_wr_list ); + p_list_item != cl_qlist_end( &mad_wr_list ); + p_list_item = cl_qlist_remove_head( &mad_wr_list ) ) + { + p_mad_wr = PARENT_STRUCT( p_list_item, al_mad_wr_t, list_item ); + special_qp_queue_mad( p_spl_qp_svc->h_qp, p_mad_wr ); + } + + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + if( p_spl_qp_svc->state == SPL_QP_ERROR ) + { + /* The QP is ready. Change the state. */ + p_spl_qp_svc->state = SPL_QP_ACTIVE; + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + /* Re-arm the CQs. */ + status = ib_rearm_cq( p_spl_qp_svc->h_recv_cq, FALSE ); + CL_ASSERT( status == IB_SUCCESS ); + status = ib_rearm_cq( p_spl_qp_svc->h_send_cq, FALSE ); + CL_ASSERT( status == IB_SUCCESS ); + + /* Resume send processing. */ + special_qp_resume_sends( p_spl_qp_svc->h_qp ); + } + else + { + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + } + + /* No longer in use by the asynchronous processing thread. */ + cl_atomic_dec( &p_spl_qp_svc->in_use_cnt ); + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Special QP alias asynchronous event callback. + */ +void +spl_qp_alias_event_cb( + IN ib_async_event_rec_t *p_event_rec ) +{ + UNUSED_PARAM( p_event_rec ); +} + + + +/* + * Acquire the SMI dispatcher for the given port. + */ +ib_api_status_t +acquire_smi_disp( + IN const ib_net64_t port_guid, + OUT al_mad_disp_handle_t* const ph_mad_disp ) +{ + CL_ASSERT( gp_spl_qp_mgr ); + return acquire_svc_disp( &gp_spl_qp_mgr->smi_map, port_guid, ph_mad_disp ); +} + + + +/* + * Acquire the GSI dispatcher for the given port. + */ +ib_api_status_t +acquire_gsi_disp( + IN const ib_net64_t port_guid, + OUT al_mad_disp_handle_t* const ph_mad_disp ) +{ + CL_ASSERT( gp_spl_qp_mgr ); + return acquire_svc_disp( &gp_spl_qp_mgr->gsi_map, port_guid, ph_mad_disp ); +} + + + +/* + * Acquire the service dispatcher for the given port. + */ +ib_api_status_t +acquire_svc_disp( + IN const cl_qmap_t* const p_svc_map, + IN const ib_net64_t port_guid, + OUT al_mad_disp_handle_t *ph_mad_disp ) +{ + cl_map_item_t* p_svc_item; + spl_qp_svc_t* p_spl_qp_svc; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_svc_map ); + CL_ASSERT( gp_spl_qp_mgr ); + + /* Search for the SMI or GSI service for the given port. */ + cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock ); + p_svc_item = cl_qmap_get( p_svc_map, port_guid ); + cl_spinlock_release( &gp_spl_qp_mgr->obj.lock ); + if( p_svc_item == cl_qmap_end( p_svc_map ) ) + { + /* The port does not have an active agent. */ + AL_EXIT( AL_DBG_SMI ); + return IB_INVALID_GUID; + } + + p_spl_qp_svc = PARENT_STRUCT( p_svc_item, spl_qp_svc_t, map_item ); + + /* Found a match. Get MAD dispatcher handle. */ + *ph_mad_disp = p_spl_qp_svc->h_mad_disp; + + /* Reference the MAD dispatcher on behalf of the client. */ + ref_al_obj( &p_spl_qp_svc->h_mad_disp->obj ); + + AL_EXIT( AL_DBG_SMI ); + return IB_SUCCESS; +} + + + +/* + * Force a poll for CA attribute changes. + */ +void +force_smi_poll( + void ) +{ + AL_ENTER( AL_DBG_SMI ); + + /* + * Stop the poll timer. Just invoke the timer callback directly to + * save the thread context switching. + */ + smi_poll_timer_cb( gp_spl_qp_mgr ); + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Poll for CA port attribute changes. + */ +void +smi_poll_timer_cb( + IN void* context ) +{ + cl_status_t cl_status; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( context ); + CL_ASSERT( gp_spl_qp_mgr == context ); + UNUSED_PARAM( context ); + + /* + * Scan for changes on the local HCAs. Since the PnP manager has its + * own thread for processing changes, we kick off that thread in parallel + * reposting receive buffers to the SQP agents. + */ + pnp_poll(); + + /* + * To handle the case where force_smi_poll is called at the same time + * the timer expires, check if the asynchronous processing item is in + * use. If it is already in use, it means that we're about to poll + * anyway, so just ignore this call. + */ + cl_spinlock_acquire( &gp_spl_qp_mgr->obj.lock ); + + /* Perform port processing on the special QP agents. */ + cl_qlist_apply_func( &gp_spl_qp_mgr->obj.obj_list, smi_post_recvs, + gp_spl_qp_mgr ); + + /* Determine if there are any special QP agents to poll. */ + if( !cl_is_qlist_empty( &gp_spl_qp_mgr->obj.obj_list ) && g_smi_poll_interval ) + { + /* Restart the polling timer. */ + cl_status = + cl_timer_start( &gp_spl_qp_mgr->poll_timer, g_smi_poll_interval ); + CL_ASSERT( cl_status == CL_SUCCESS ); + } + cl_spinlock_release( &gp_spl_qp_mgr->obj.lock ); + + AL_EXIT( AL_DBG_SMI ); +} + + + +/* + * Post receive buffers to a special QP. + */ +void +smi_post_recvs( + IN cl_list_item_t* const p_list_item, + IN void* context ) +{ + al_obj_t* p_obj; + spl_qp_svc_t* p_spl_qp_svc; + + AL_ENTER( AL_DBG_SMI ); + + CL_ASSERT( p_list_item ); + UNUSED_PARAM( context ); + + p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item ); + p_spl_qp_svc = PARENT_STRUCT( p_obj, spl_qp_svc_t, obj ); + + cl_spinlock_acquire( &p_spl_qp_svc->obj.lock ); + if( p_spl_qp_svc->state != SPL_QP_ACTIVE ) + { + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + return; + } + + spl_qp_svc_post_recvs( p_spl_qp_svc ); + cl_spinlock_release( &p_spl_qp_svc->obj.lock ); + + AL_EXIT( AL_DBG_SMI ); +} diff --git a/branches/Ndi/core/al/kernel/al_smi.h b/branches/Ndi/core/al/kernel/al_smi.h new file mode 100644 index 00000000..51180287 --- /dev/null +++ b/branches/Ndi/core/al/kernel/al_smi.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined( __AL_SMI_H__ ) +#define __AL_SMI_H__ + + +#include +#include +#include "al_common.h" +#include "al_mad.h" + + +/* Global special QP manager */ +typedef struct _spl_qp_mgr +{ + al_obj_t obj; /* Child of gp_al_mgr */ + ib_pnp_handle_t h_qp0_pnp; /* Handle for QP0 port PnP events */ + ib_pnp_handle_t h_qp1_pnp; /* Handle for QP1 port PnP events */ + + cl_timer_t poll_timer; /* Timer for polling HW SMIs */ + + cl_qmap_t smi_map; /* List of SMI services */ + cl_qmap_t gsi_map; /* List of GSI services */ + +} spl_qp_mgr_t; + + + +typedef enum _spl_qp_svc_state +{ + SPL_QP_INIT = 0, + SPL_QP_ACTIVE, + SPL_QP_ERROR, + SPL_QP_DESTROYING + +} spl_qp_svc_state_t; + +/* + * Attribute cache for port info saved to expedite local MAD processing. + * Note that the cache accounts for the worst case GID and PKEY table size + * but is allocated from paged pool, so it's nothing to worry about. + */ + +typedef struct _guid_block +{ + boolean_t valid; + ib_guid_info_t tbl; + +} guid_block_t; + + +typedef struct _pkey_block +{ + boolean_t valid; + ib_pkey_table_t tbl; + +} pkey_block_t; + +typedef struct _sl_vl_cache +{ + boolean_t valid; + ib_slvl_table_t tbl; + +} sl_vl_cache_t; + +typedef struct _vl_arb_block +{ + boolean_t valid; + ib_vl_arb_table_t tbl; + +} vl_arb_block_t; + +typedef struct _attr_cache +{ + guid_block_t guid_block[32]; + pkey_block_t pkey_tbl[2048]; + sl_vl_cache_t sl_vl; + vl_arb_block_t vl_arb[4]; + +} spl_qp_cache_t; + + +/* Per port special QP service */ +typedef struct _spl_qp_svc +{ + al_obj_t obj; /* Child of spl_qp_agent_t */ + cl_map_item_t map_item; /* Item on SMI/GSI list */ + + net64_t port_guid; + uint8_t port_num; + ib_net16_t base_lid; + uint8_t lmc; + + ib_net16_t sm_lid; + uint8_t sm_sl; + ib_net64_t m_key; + + spl_qp_cache_t cache; + cl_spinlock_t cache_lock; + boolean_t cache_en; + + al_mad_disp_handle_t h_mad_disp; + ib_cq_handle_t h_send_cq; + ib_cq_handle_t h_recv_cq; + ib_qp_handle_t h_qp; + +#if defined( CL_USE_MUTEX ) + boolean_t send_async_queued; + cl_async_proc_item_t send_async_cb; + boolean_t recv_async_queued; + cl_async_proc_item_t recv_async_cb; +#endif + + spl_qp_svc_state_t state; + atomic32_t in_use_cnt; + cl_async_proc_item_t reset_async; + + uint32_t max_qp_depth; + al_mad_wr_t* local_mad_wr; + cl_qlist_t send_queue; + cl_qlist_t recv_queue; + cl_async_proc_item_t send_async; + + ib_qp_handle_t h_qp_alias; + ib_pool_key_t pool_key; + ib_mad_svc_handle_t h_mad_svc; + +} spl_qp_svc_t; + + +typedef enum _mad_route +{ + ROUTE_DISPATCHER = 0, + ROUTE_REMOTE, + ROUTE_LOCAL, + ROUTE_LOOPBACK, + ROUTE_DISCARD + +} mad_route_t; + + +static inline boolean_t +is_dispatcher( + IN const mad_route_t route ) +{ + return( route == ROUTE_DISPATCHER ); +} + + +static inline boolean_t +is_remote( + IN const mad_route_t route ) +{ + return( route == ROUTE_REMOTE ); +} + + +static inline boolean_t +is_discard( + IN const mad_route_t route ) +{ + return( route == ROUTE_DISCARD ); +} + + +static inline boolean_t +is_loopback( + IN const mad_route_t route ) +{ + return( route == ROUTE_LOOPBACK ); +} + + +static inline boolean_t +is_local( + IN const mad_route_t route ) +{ + /* + * Loopback implies a locally routed MAD. Discarded MADs are always + * handled locally to maintain proper order of work completions. + */ + return( ( route == ROUTE_LOCAL ) || + is_loopback( route ) || is_discard( route ) ); +} + + +ib_api_status_t +create_spl_qp_mgr( + IN al_obj_t* const p_parent_obj ); + + +ib_api_status_t +acquire_smi_disp( + IN const ib_net64_t port_guid, + OUT al_mad_disp_handle_t* const ph_mad_disp ); + + +ib_api_status_t +acquire_gsi_disp( + IN const ib_net64_t port_guid, + OUT al_mad_disp_handle_t* const ph_mad_disp ); + + +ib_api_status_t +spl_qp_svc_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr ); + + +void +force_smi_poll( + void ); + + +#endif diff --git a/branches/Ndi/core/al/kernel/ibal.rc b/branches/Ndi/core/al/kernel/ibal.rc new file mode 100644 index 00000000..608d6f38 --- /dev/null +++ b/branches/Ndi/core/al/kernel/ibal.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Kernel Mode InfiniBand Access Layer (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Kernel Mode InfiniBand Access Layer" +#endif + +#define VER_INTERNALNAME_STR "ibal.sys" +#define VER_ORIGINALFILENAME_STR "ibal.sys" + +#include diff --git a/branches/Ndi/core/al/kernel/makefile b/branches/Ndi/core/al/kernel/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/core/al/kernel/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/core/al/user/SOURCES b/branches/Ndi/core/al/user/SOURCES new file mode 100644 index 00000000..bd1cfbf8 --- /dev/null +++ b/branches/Ndi/core/al/user/SOURCES @@ -0,0 +1,97 @@ +!if $(FREEBUILD) +TARGETNAME=ibal +!else +TARGETNAME=ibald +!endif + +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=DYNLINK +DLLENTRY=DllMain + + + +!if $(FREEBUILD) +ENABLE_EVENT_TRACING=1 +#!else +#ENABLE_EVENT_TRACING=1 +!endif + + + +DLLDEF=$O\al_exports.def +USE_NTDLL=1 + +SOURCES=\ + ibal.rc \ + al_dll.c \ + al_mad_pool.c \ + ual_av.c \ + ual_ca.c \ + ual_ci_ca.c \ + ual_cm_cep.c \ + ual_cq.c \ + ual_dm.c \ + ual_mad.c \ + ual_mad_pool.c \ + ual_mcast.c \ + ual_mgr.c \ + ual_mr.c \ + ual_mw.c \ + ual_pd.c \ + ual_pnp.c \ + ual_qp.c \ + ual_query.c \ + ual_reg_svc.c \ + ual_sa_req.c \ + ual_srq.c \ + ual_sub.c \ + ..\al.c \ + ..\al_av.c \ + ..\al_ca.c \ + ..\al_ci_ca_shared.c \ + ..\al_cm_qp.c \ + ..\al_common.c \ + ..\al_cq.c \ + ..\al_dm.c \ + ..\al_init.c \ + ..\al_mad.c \ + ..\al_mcast.c \ + ..\al_mgr_shared.c \ + ..\al_mr_shared.c \ + ..\al_mw.c \ + ..\al_pd.c \ + ..\al_qp.c \ + ..\al_query.c \ + ..\al_reg_svc.c \ + ..\al_res_mgr.c \ + ..\al_srq.c \ + ..\al_sub.c \ + ..\ib_common.c \ + ..\ib_statustext.c + +INCLUDES=..;..\..\..\inc;..\..\..\inc\user; + +USER_C_FLAGS=$(USER_C_FLAGS) -DEXPORT_AL_SYMBOLS -DCL_NO_TRACK_MEM -DWPP_OLDCC + +TARGETLIBS= \ + $(SDK_LIB_PATH)\kernel32.lib \ + $(SDK_LIB_PATH)\Advapi32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib +!else + $(TARGETPATH)\*\complibd.lib +!endif + +!IFDEF ENABLE_EVENT_TRACING + +C_DEFINES = $(C_DEFINES) -DEVENT_TRACING + +RUN_WPP = $(SOURCES) -ext: .c .h .C .H \ + -scan:..\al_debug.h \ + -func:AL_PRINT(LEVEL,FLAGS,(MSG,...)) \ + -func:AL_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) \ + -dll +!ENDIF + + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/core/al/user/al_dll.c b/branches/Ndi/core/al/user/al_dll.c new file mode 100644 index 00000000..85aef032 --- /dev/null +++ b/branches/Ndi/core/al/user/al_dll.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include "ual_support.h" +#include "al_mgr.h" +#include "ib_common.h" +#include "al_init.h" + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_dll.tmh" +#endif + + + +HANDLE g_al_device = INVALID_HANDLE_VALUE; +cl_mutex_t g_open_close_mutex; +cl_perf_t g_perf; + +extern al_mgr_t* gp_al_mgr; +extern cl_async_proc_t *gp_async_proc_mgr; + + +static BOOL +_DllMain( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ) +{ + +#if !defined(EVENT_TRACING) +#if DBG + +#define ENV_BUFSIZE 16 + + TCHAR dbg_lvl_str[ENV_BUFSIZE]; + DWORD i; +#endif +#endif + + UNUSED_PARAM( lp_reserved ); + + switch( ul_reason_for_call ) + { + case DLL_PROCESS_ATTACH: + +#if defined(EVENT_TRACING) +#if DBG + WPP_INIT_TRACING(L"ibald.dll"); +#else + WPP_INIT_TRACING(L"ibal.dll"); +#endif +#endif + cl_perf_init( &g_perf, AlMaxPerf ); + DisableThreadLibraryCalls( h_module ); + + cl_mutex_construct( &g_open_close_mutex ); + if( cl_mutex_init( &g_open_close_mutex ) != CL_SUCCESS ) + return FALSE; + +#if !defined(EVENT_TRACING) +#if DBG + + i = GetEnvironmentVariable( "IBAL_UAL_DBG_LEVEL", dbg_lvl_str, ENV_BUFSIZE ); + if( i && i <= 16 ) + { + g_al_dbg_level = _tcstoul( dbg_lvl_str, NULL, ENV_BUFSIZE ); + } + + i = GetEnvironmentVariable( "IBAL_UAL_DBG_FLAGS", dbg_lvl_str, ENV_BUFSIZE ); + if( i && i <= 16 ) + { + g_al_dbg_flags = _tcstoul( dbg_lvl_str, NULL, ENV_BUFSIZE ); + } + + if( g_al_dbg_flags & AL_DBG_ERR ) + g_al_dbg_flags |= CL_DBG_ERROR; + + AL_PRINT(TRACE_LEVEL_INFORMATION ,AL_DBG_DEV , + ("Given IBAL_UAL_DBG debug level:%d debug flags 0x%x\n", + g_al_dbg_level ,g_al_dbg_flags) ); + +#endif +#endif + + break; + + case DLL_PROCESS_DETACH: + + cl_mutex_destroy( &g_open_close_mutex ); + cl_perf_destroy( &g_perf, TRUE ); + +#if defined(EVENT_TRACING) + WPP_CLEANUP(); +#endif + break; + } + return TRUE; +} + + +extern BOOL APIENTRY +_DllMainCRTStartupForGS( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ); + + +BOOL APIENTRY +DllMain( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ) +{ + switch( ul_reason_for_call ) + { + case DLL_PROCESS_ATTACH: + if( !_DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ) ) + { + return FALSE; + } + + return _DllMain( h_module, ul_reason_for_call, lp_reserved ); + + case DLL_PROCESS_DETACH: + _DllMain( h_module, ul_reason_for_call, lp_reserved ); + + return _DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ); + } + return TRUE; +} + + +cl_status_t +do_al_dev_ioctl( + IN uint32_t command, + IN void *p_in_buf, + IN uintn_t in_buf_size, + IN void *p_out_buf, + IN uintn_t out_buf_size, + OUT uintn_t *p_bytes_ret ) +{ + cl_status_t cl_status; + + AL_ENTER( AL_DBG_DEV ); + + CL_ASSERT( g_al_device != INVALID_HANDLE_VALUE ); + + cl_status = cl_ioctl_request( g_al_device, + command, + p_in_buf, + in_buf_size, + p_out_buf, + out_buf_size, + p_bytes_ret, + NULL ); + + if( cl_status != CL_SUCCESS ) + { + CL_ASSERT( cl_status != CL_PENDING ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("Error performing IOCTL 0x%08x to AL driver (%s)\n", + command, CL_STATUS_MSG(cl_status)) ); + return CL_ERROR; + } + + AL_EXIT( AL_DBG_DEV ); + return cl_status; +} diff --git a/branches/Ndi/core/al/user/al_exports.src b/branches/Ndi/core/al/user/al_exports.src new file mode 100644 index 00000000..87d428bd --- /dev/null +++ b/branches/Ndi/core/al/user/al_exports.src @@ -0,0 +1,219 @@ +#if DBG +LIBRARY ibald.dll +#else +LIBRARY ibal.dll +#endif + +#ifndef _WIN64 +EXPORTS +acquire_ca +deref_al_obj +ib_add_svc_entry +ib_alloc_pd +ib_bind_mw +ib_cancel_mad +ib_cancel_query +ib_ci_call +ib_close_al +ib_close_ca +ib_cm_apr +ib_cm_cancel +ib_cm_drep +ib_cm_dreq +ib_cm_handoff +ib_cm_lap +ib_cm_listen +ib_cm_mra +ib_cm_rej +ib_cm_rep +ib_cm_req +ib_cm_rtu +ib_convert_cl_status +ib_copy_ca_attr +ib_create_av +ib_create_cq +ib_create_ioc +ib_create_mad_pool +ib_create_mw +ib_create_qp +ib_dealloc_pd +ib_dereg_mad_pool +ib_dereg_mr +ib_dereg_pnp +ib_dereg_svc +ib_destroy_av +ib_destroy_cq +ib_destroy_ioc +ib_destroy_mad_pool +ib_destroy_mw +ib_destroy_qp +ib_dm_get_slot_lo_hi +ib_dm_set_slot_lo_hi +ib_force_apm +ib_get_async_event_str +ib_get_attr_offset +ib_get_attr_size +ib_get_ca_by_gid +ib_get_ca_guids +ib_get_err_str +ib_get_guid +ib_get_mad +ib_get_mad_buf +ib_get_node_type_str +ib_get_port_by_gid +ib_get_port_state_from_str +ib_get_port_state_str +ib_get_query_node_rec +ib_get_query_path_rec +ib_get_query_portinfo_rec +ib_get_query_result +ib_get_query_svc_rec +ib_get_spl_qp +ib_get_wc_status_str +ib_get_wc_type_str +ib_gid_get_guid +ib_gid_get_subnet_prefix +ib_gid_is_multicast +ib_gid_set_default +ib_grh_get_ver_class_flow +ib_grh_set_ver_class_flow +ib_inform_get_dev_id +ib_inform_get_prod_type +ib_inform_get_qpn +ib_inform_get_resp_time_val +ib_inform_get_trap_num +ib_inform_get_vend_id +ib_inform_set_dev_id +ib_inform_set_prod_type +ib_inform_set_qpn +ib_inform_set_resp_time_val +ib_inform_set_trap_num +ib_inform_set_vend_id +ib_init_dgrm_svc +ib_ioc_profile_get_subsys_vend_id +ib_ioc_profile_get_vend_id +ib_ioc_profile_set_subsys_vend_id +ib_ioc_profile_set_vend_id +ib_iou_info_diag_dev_id +ib_iou_info_option_rom +ib_join_mcast +ib_leave_mcast +ib_local_mad +ib_mad_init_new +ib_mad_init_response +ib_mad_is_response +ib_member_get_scope +ib_member_get_scope_state +ib_member_get_sl_flow_hop +ib_member_get_state +ib_member_set_join_state +ib_member_set_scope +ib_member_set_scope_state +ib_member_set_sl_flow_hop +ib_member_set_state +ib_modify_av +ib_modify_ca +ib_modify_cq +ib_modify_qp +ib_node_info_get_local_port_num +ib_node_info_get_vendor_id +ib_notice_get_count +ib_notice_get_dev_id +ib_notice_get_generic +ib_notice_get_prod_type +ib_notice_get_toggle +ib_notice_get_trap_num +ib_notice_get_type +ib_notice_get_vend_id +ib_notice_set_count +ib_notice_set_dev_id +ib_notice_set_generic +ib_notice_set_prod_type +ib_notice_set_toggle +ib_notice_set_trap_num +ib_notice_set_type +ib_notice_set_vend_id +ib_open_al +ib_open_ca +ib_path_get_ipd +ib_path_rec_flow_lbl +ib_path_rec_hop_limit +ib_path_rec_init_local +ib_path_rec_mtu +ib_path_rec_mtu_sel +ib_path_rec_pkt_life +ib_path_rec_pkt_life_sel +ib_path_rec_rate +ib_path_rec_rate_sel +ib_path_rec_set_hop_flow_raw +ib_path_rec_sl +ib_peek_cq +ib_pkey_get_base +ib_pkey_is_full_member +ib_poll_cq +ib_port_info_compute_rate +ib_port_info_get_link_speed_sup +ib_port_info_get_lmc +ib_port_info_get_mpb +ib_port_info_get_mtu_cap +ib_port_info_get_neighbor_mtu +ib_port_info_get_op_vls +ib_port_info_get_port_state +ib_port_info_get_vl_cap +ib_port_info_set_link_speed_sup +ib_port_info_set_lmc +ib_port_info_set_mpb +ib_port_info_set_neighbor_mtu +ib_port_info_set_op_vls +ib_port_info_set_port_state +ib_port_info_set_state_no_change +ib_port_info_set_timeout +ib_post_recv +ib_post_send +ib_put_mad +ib_query +ib_query_av +ib_query_ca +ib_query_ca_by_guid +ib_query_cq +ib_query_mr +ib_query_mw +ib_query_qp +ib_rearm_cq +ib_rearm_n_cq +ib_reg_ioc +ib_reg_mad_pool +ib_reg_mad_svc +ib_reg_mem +ib_reg_phys +ib_reg_pnp +ib_reg_shared +ib_reg_shmid +ib_reg_svc +ib_reject_ioc +ib_remove_svc_entry +ib_rereg_mem +ib_rereg_phys +ib_rmpp_get_resp_time +ib_rmpp_is_flag_set +ib_rmpp_set_resp_time +ib_sa_mad_get_payload_ptr +ib_send_mad +ib_slvl_table_set +ib_slvl_table_get +ib_sminfo_get_priority +ib_sminfo_get_state +ib_smp_get_payload_ptr +ib_smp_get_status +ib_smp_init_new +ib_smp_is_d +ib_smp_is_response +ib_subscribe +ib_switch_info_clear_state_change +ib_switch_info_get_state_change +ib_unsubscribe +ib_vl_arb_element_get_vl +ib_vl_arb_element_set_vl +ioc_at_slot +ual_close_ca +#endif diff --git a/branches/Ndi/core/al/user/al_mad_pool.c b/branches/Ndi/core/al/user/al_mad_pool.c new file mode 100644 index 00000000..02ad282b --- /dev/null +++ b/branches/Ndi/core/al/user/al_mad_pool.c @@ -0,0 +1,1507 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "al.h" +#include "al_ci_ca.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "al_mad_pool.tmh" +#endif + +#include "al_mad_pool.h" +#include "al_pd.h" +#include "al_verbs.h" +#include "ib_common.h" + + +typedef struct _mad_reg +{ + al_obj_t obj; /* Child of al_pool_key_t */ + ib_mr_handle_t h_mr; + net32_t lkey; + net32_t rkey; + mad_array_t* p_mad_array; + +} mad_reg_t; + + + +typedef struct _mad_send +{ + al_mad_send_t mad_send; + ib_pool_handle_t h_pool; + +} mad_send_t; + + + + +typedef struct _mad_rmpp +{ + al_mad_rmpp_t mad_rmpp; + ib_pool_handle_t h_pool; + +} mad_rmpp_t; + + + +/* + * Function prototypes. + */ +static void +__destroying_pool( + IN al_obj_t* p_obj ); + +static void +__free_pool( + IN al_obj_t* p_obj ); + +static void +__cleanup_pool_key( + IN al_obj_t* p_obj ); + +static void +__free_pool_key( + IN al_obj_t* p_obj ); + +static ib_api_status_t +__reg_mad_array( + IN al_pool_key_t* const p_pool_key, + IN mad_array_t* const p_mad_array ); + +static void +__free_mad_reg( + IN al_obj_t* p_obj ); + +static ib_api_status_t +__init_mad_element( + IN const al_pool_key_t* p_pool_key, + IN OUT mad_item_t* p_mad_item ); + +static cl_status_t +__locate_reg_cb( + IN const cl_list_item_t* const p_list_item, + IN void* context ); + +static ib_api_status_t +__grow_mad_pool( + IN const ib_pool_handle_t h_pool, + OUT mad_item_t** pp_mad_item OPTIONAL ); + +static void +__free_mad_array( + IN al_obj_t* p_obj ); + +static cl_status_t +__mad_send_init( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); + +static cl_status_t +__mad_rmpp_init( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); + + + +/* + * Create a MAD pool. + */ +ib_api_status_t +ib_create_mad_pool( + IN const ib_al_handle_t h_al, + IN const size_t min, + IN const size_t max, + IN const size_t grow_size, + OUT ib_pool_handle_t* const ph_pool ) +{ + ib_pool_handle_t h_pool; + ib_api_status_t status; + cl_status_t cl_status; + + AL_ENTER(AL_DBG_MAD_POOL); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !ph_pool ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Validate the min and max parameters. */ + if( (min > 0) && (max > 0) && (min > max) ) + return IB_INVALID_SETTING; + + h_pool = cl_zalloc( sizeof( al_pool_t ) ); + if( !h_pool ) + return IB_INSUFFICIENT_MEMORY; + + /* Initialize the pool lists. */ + cl_qlist_init( &h_pool->mad_stack ); + cl_qlist_init( &h_pool->key_list ); + cl_qpool_construct( &h_pool->mad_send_pool ); + cl_qpool_construct( &h_pool->mad_rmpp_pool ); + + /* Initialize the pool object. */ + construct_al_obj( &h_pool->obj, AL_OBJ_TYPE_H_MAD_POOL ); + status = init_al_obj( &h_pool->obj, h_pool, TRUE, + __destroying_pool, NULL, __free_pool ); + if( status != IB_SUCCESS ) + { + __free_pool( &h_pool->obj ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Attach the pool to the AL object. */ + attach_al_obj( &h_al->obj, &h_pool->obj ); + + /* Save the pool parameters. Set grow_size to min for initialization. */ + h_pool->max = max; + if( min ) + { + h_pool->grow_size = min; + + /* Grow the pool to the minimum size. */ + status = __grow_mad_pool( h_pool, NULL ); + if( status != IB_SUCCESS ) + { + h_pool->obj.pfn_destroy( &h_pool->obj, NULL ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("grow_mad_pool failed with status %s.\n", + ib_get_err_str(status)) ); + return status; + } + } + + /* Save the grow_size for subsequent allocations. */ + h_pool->grow_size = grow_size; + + /* Initialize the pool of mad send tracking structures. */ + cl_status = cl_qpool_init( &h_pool->mad_send_pool, + min, max, grow_size, sizeof( mad_send_t ), + __mad_send_init, NULL, h_pool ); + if( cl_status != CL_SUCCESS ) + { + h_pool->obj.pfn_destroy( &h_pool->obj, NULL ); + status = ib_convert_cl_status( cl_status ); + h_pool->obj.pfn_destroy( &h_pool->obj, NULL ); + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("cl_qpool_init failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Initialize the pool of mad send tracking structures. */ + cl_status = cl_qpool_init( &h_pool->mad_rmpp_pool, + min, max, grow_size, sizeof( mad_rmpp_t ), + __mad_rmpp_init, NULL, h_pool ); + if( cl_status != CL_SUCCESS ) + { + h_pool->obj.pfn_destroy( &h_pool->obj, NULL ); + status = ib_convert_cl_status( cl_status ); + h_pool->obj.pfn_destroy( &h_pool->obj, NULL ); + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("cl_qpool_init failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Return the pool handle. */ + *ph_pool = h_pool; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_pool->obj ); + + AL_EXIT(AL_DBG_MAD_POOL); + return IB_SUCCESS; +} + + + +/* + * Pre-destory the pool. + */ +static void +__destroying_pool( + IN al_obj_t* p_obj ) +{ + ib_pool_handle_t h_pool; + ib_al_handle_t h_al; + + AL_ENTER(AL_DBG_MAD_POOL); + + CL_ASSERT( p_obj ); + h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj ); + + /* Get the AL instance of this MAD pool. */ + p_obj = h_pool->obj.p_parent_obj; + h_al = PARENT_STRUCT( p_obj, ib_al_t, obj ); + + /* Deregister this MAD pool from all protection domains. */ + al_dereg_pool( h_al, h_pool ); + + AL_EXIT(AL_DBG_MAD_POOL); +} + + + +/* + * Free the pool. + */ +static void +__free_pool( + IN al_obj_t* p_obj ) +{ + ib_pool_handle_t h_pool; + + CL_ASSERT( p_obj ); + h_pool = PARENT_STRUCT( p_obj, al_pool_t, obj ); + + cl_qpool_destroy( &h_pool->mad_send_pool ); + cl_qpool_destroy( &h_pool->mad_rmpp_pool ); + destroy_al_obj( &h_pool->obj ); + cl_free( h_pool ); +} + + + +/* + * Destory a MAD pool. + */ +ib_api_status_t +ib_destroy_mad_pool( + IN const ib_pool_handle_t h_pool ) +{ + cl_list_item_t* p_array_item; + al_obj_t* p_obj; + boolean_t busy; + + AL_ENTER(AL_DBG_MAD_POOL); + + if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + /* Verify that all send handles and MAD elements are in pool. */ + cl_spinlock_acquire( &h_pool->obj.lock ); + busy = ( h_pool->obj.ref_cnt > 1 ); + for( p_array_item = cl_qlist_head( &h_pool->obj.obj_list ); + p_array_item != cl_qlist_end( &h_pool->obj.obj_list ) && !busy; + p_array_item = cl_qlist_next( p_array_item ) ) + { + p_obj = PARENT_STRUCT( p_array_item, al_obj_t, pool_item ); + busy = ( p_obj->ref_cnt > 1 ); + } + cl_spinlock_release( &h_pool->obj.lock ); + + /* Return an error if the pool is busy. */ + if( busy ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("h_pool (0x%p) is busy!.\n", h_pool) ); + return IB_RESOURCE_BUSY; + } + + ref_al_obj( &h_pool->obj ); + h_pool->obj.pfn_destroy( &h_pool->obj, NULL ); + + AL_EXIT(AL_DBG_MAD_POOL); + return IB_SUCCESS; +} + + + +/* + * Register a MAD pool with a protection domain. + */ +ib_api_status_t +ib_reg_mad_pool( + IN const ib_pool_handle_t h_pool, + IN const ib_pd_handle_t h_pd, + OUT ib_pool_key_t* const pp_pool_key ) +{ + al_pool_key_t* p_pool_key; + cl_list_item_t* p_array_item; + al_obj_t* p_obj; + ib_al_handle_t h_al; + mad_array_t* p_mad_array; + ib_api_status_t status; + al_key_type_t key_type; + + AL_ENTER(AL_DBG_MAD_POOL); + + if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + /* Alias keys require an alias PD. */ + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + if( !pp_pool_key ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Set the type of key to create. */ + if( h_pd->type != IB_PDT_ALIAS ) + key_type = AL_KEY_NORMAL; + else + key_type = AL_KEY_ALIAS; + + /* Allocate a pool key structure. */ + p_pool_key = cl_zalloc( sizeof( al_pool_key_t ) ); + if( !p_pool_key ) + return IB_INSUFFICIENT_MEMORY; + + /* Initialize the pool key. */ + construct_al_obj( &p_pool_key->obj, AL_OBJ_TYPE_H_POOL_KEY ); + p_pool_key->type = key_type; + p_pool_key->h_pool = h_pool; + p_pool_key->h_pd = h_pd; + + /* Initialize the pool key object. */ + status = init_al_obj( &p_pool_key->obj, p_pool_key, TRUE, + NULL, __cleanup_pool_key, __free_pool_key ); + if( status != IB_SUCCESS ) + { + __free_pool_key( &p_pool_key->obj ); + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + status = attach_al_obj( &h_pd->obj, &p_pool_key->obj ); + if( status != IB_SUCCESS ) + { + p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* From the PD, get the AL handle of the pool_key. */ + p_obj = h_pd->obj.p_parent_obj->p_parent_obj; + h_al = PARENT_STRUCT( p_obj, ib_al_t, obj ); + + /* Add this pool_key to the AL instance. */ + al_insert_key( h_al, p_pool_key ); + + ref_al_obj( &h_pd->obj ); + ref_al_obj( &h_pool->obj ); + + /* + * Take a reference on the global pool_key for this CA, if it exists. + * Note that the pool_key does not exist for the global MAD pool in + * user-mode, as that MAD pool never registers memory on a PD. + */ + if( key_type == AL_KEY_ALIAS && h_pd->obj.p_ci_ca->pool_key ) + { + ref_al_obj( &h_pd->obj.p_ci_ca->pool_key->obj ); + p_pool_key->pool_key = h_pd->obj.p_ci_ca->pool_key; + } + + /* Register the pool on the protection domain. */ + if( key_type == AL_KEY_NORMAL ) + { + /* Chain the pool key onto the pool. */ + cl_spinlock_acquire( &h_pool->obj.lock ); + cl_qlist_insert_tail( &h_pool->key_list, &p_pool_key->pool_item ); + + /* Synchronize with growing the MAD pool. */ + for( p_array_item = cl_qlist_head( &h_pool->obj.obj_list ); + p_array_item != cl_qlist_end( &h_pool->obj.obj_list ); + p_array_item = cl_qlist_next( p_array_item ) ) + { + p_obj = PARENT_STRUCT( p_array_item, al_obj_t, pool_item ); + p_mad_array = PARENT_STRUCT( p_obj, mad_array_t, obj ); + + status = __reg_mad_array( p_pool_key, p_mad_array ); + + if( status != IB_SUCCESS ) + { + cl_spinlock_release( &h_pool->obj.lock ); + p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL ); + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("reg_mad_array failed with status %s.\n", + ib_get_err_str(status)) ); + return status; + } + } + cl_spinlock_release( &h_pool->obj.lock ); + } + + /* + * If the PD is of alias type, then we need to create/register an + * equivalent pool key in the kernel. + */ + if( h_pd->type == IB_PDT_ALIAS ) + { + status = create_reg_mad_pool( h_pool, h_pd, p_pool_key ); + if( status != IB_SUCCESS ) + { + p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL ); + return status; + } + } + + /* Return the pool key. */ + *pp_pool_key = (ib_pool_key_t)p_pool_key; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_pool_key->obj ); + + AL_EXIT(AL_DBG_MAD_POOL); + return IB_SUCCESS; +} + + + +/* + * Release all references on objects that were needed by the pool key. + */ +static void +__cleanup_pool_key( + IN al_obj_t* p_obj ) +{ + cl_list_item_t *p_list_item, *p_next_item; + ib_mad_element_t *p_mad_element_list, *p_last_mad_element; + al_mad_element_t *p_mad; + ib_api_status_t status; + al_pool_key_t* p_pool_key; + + CL_ASSERT( p_obj ); + p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj ); + + /* Search for any outstanding MADs associated with the given pool key. */ + if( p_pool_key->mad_cnt ) + { + p_mad_element_list = p_last_mad_element = NULL; + + cl_spinlock_acquire( &p_pool_key->h_al->mad_lock ); + for( p_list_item = cl_qlist_head( &p_pool_key->h_al->mad_list ); + p_list_item != cl_qlist_end( &p_pool_key->h_al->mad_list ); + p_list_item = p_next_item ) + { + p_next_item = cl_qlist_next( p_list_item ); + p_mad = PARENT_STRUCT( p_list_item, al_mad_element_t, al_item ); + + if( p_mad->pool_key != p_pool_key ) continue; + + /* Build the list of MADs to be returned to pool. */ + if( p_last_mad_element ) + p_last_mad_element->p_next = &p_mad->element; + else + p_mad_element_list = &p_mad->element; + + p_last_mad_element = &p_mad->element; + p_last_mad_element->p_next = NULL; + } + cl_spinlock_release( &p_pool_key->h_al->mad_lock ); + + /* Return any outstanding MADs to the pool. */ + if( p_mad_element_list ) + { + status = ib_put_mad( p_mad_element_list ); + if( status != IB_SUCCESS ) + { + AL_PRINT(TRACE_LEVEL_ERROR , AL_DBG_ERROR , + ("ib_put_mad failed with status %s, continuing.\n", + ib_get_err_str(status)) ); + } + } + } + + /* + * Remove the pool key from the pool to prevent further registrations + * against this pool. + * + * Warning: There is a small window where a pool key can be destroyed + * while its associated pool is growing. In this case, the pool key + * will receive a new registration after it has been destroyed. This + * is a result of having to register memory with the HCA without holding + * a lock, making correct synchronization impossible. One solution to + * this problem is to register all of physical memory, which avoids + * having to register more memory as a MAD pool grows. + */ + if( p_pool_key->type == AL_KEY_NORMAL ) + { + cl_spinlock_acquire( &p_pool_key->h_pool->obj.lock ); + cl_qlist_remove_item( &p_pool_key->h_pool->key_list, + &p_pool_key->pool_item ); + cl_spinlock_release( &p_pool_key->h_pool->obj.lock ); + } + + /* Remove this pool_key from the AL instance. */ + al_remove_key( p_pool_key ); + + /* User-mode only: cleanup kernel resources. */ + dereg_destroy_mad_pool( p_pool_key ); + + deref_al_obj( &p_pool_key->h_pool->obj ); + p_pool_key->h_pool = NULL; + deref_al_obj( &p_pool_key->h_pd->obj ); + p_pool_key->h_pd = NULL; + if( p_pool_key->pool_key ) + deref_al_obj( &p_pool_key->pool_key->obj ); +} + + + +/* + * Free a pool key. + */ +static void +__free_pool_key( + IN al_obj_t* p_obj ) +{ + al_pool_key_t* p_pool_key; + + CL_ASSERT( p_obj ); + p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj ); + + destroy_al_obj( &p_pool_key->obj ); + cl_free( p_pool_key ); +} + + + +/* + * Register a MAD array with a protection domain. + */ +static ib_api_status_t +__reg_mad_array( + IN al_pool_key_t* const p_pool_key, + IN mad_array_t* const p_mad_array ) +{ + mad_reg_t* p_reg; + ib_mr_create_t mr_create; + ib_api_status_t status; + + CL_ASSERT( p_pool_key ); + CL_ASSERT( p_mad_array ); + + /* Determine if there is memory to register. */ + if( p_mad_array->sizeof_array == 0 ) + return IB_SUCCESS; + + p_reg = cl_zalloc( sizeof( mad_reg_t ) ); + if( p_reg == NULL ) + return IB_INSUFFICIENT_MEMORY; + + /* + * Initialize the registration object. We use synchronous + * destruction to deregister memory immediately. Otherwise, the + * memory will be automatically deregistered when destroying the + * PD, which can lead to trying to deregister the memory twice. + */ + construct_al_obj( &p_reg->obj, AL_OBJ_TYPE_MAD_POOL ); + status = init_al_obj( &p_reg->obj, p_reg, FALSE, + NULL, NULL, __free_mad_reg ); + if( status != IB_SUCCESS ) + { + __free_mad_reg( &p_reg->obj ); + return status; + } + + /* Attach the registration to the pool key. */ + attach_al_obj( &p_pool_key->obj, &p_reg->obj ); + + if( p_pool_key->h_pd->type != IB_PDT_ALIAS ) + { + /* Register the MAD array on the protection domain. */ + cl_memclr( &mr_create, sizeof( ib_mr_create_t ) ); + mr_create.vaddr = p_mad_array->p_data; + mr_create.length = p_mad_array->sizeof_array; + mr_create.access_ctrl = IB_AC_LOCAL_WRITE; + + status = ib_reg_mem( p_pool_key->h_pd, &mr_create, &p_reg->lkey, + &p_reg->rkey, &p_reg->h_mr ); + } + + if( status != IB_SUCCESS ) + { + p_reg->obj.pfn_destroy( &p_reg->obj, NULL ); + return status; + } + + /* Save p_mad_array to match the registration with the array. */ + p_reg->p_mad_array = p_mad_array; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_reg->obj ); + + return IB_SUCCESS; +} + + + +/* + * Free a MAD registration. + */ +static void +__free_mad_reg( + IN al_obj_t* p_obj ) +{ + mad_reg_t* p_reg; + ib_api_status_t status; + + CL_ASSERT( p_obj ); + p_reg = PARENT_STRUCT( p_obj, mad_reg_t, obj ); + + /* Deregister the MAD array if it was registered. */ + if( p_reg->h_mr ) + { + status = ib_dereg_mr( p_reg->h_mr ); + CL_ASSERT( status == IB_SUCCESS ); + } + + destroy_al_obj( &p_reg->obj ); + cl_free( p_reg ); +} + + + +/* + * Deregister a MAD pool from a protection domain. Only normal pool_keys + * can be destroyed using this routine. + */ +ib_api_status_t +ib_dereg_mad_pool( + IN const ib_pool_key_t pool_key ) +{ + ib_api_status_t status; + + AL_ENTER(AL_DBG_MAD_POOL); + + if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + ref_al_obj( &pool_key->obj ); + status = dereg_mad_pool( pool_key, AL_KEY_NORMAL ); + + if( status != IB_SUCCESS ) + deref_al_obj( &pool_key->obj ); + + AL_EXIT(AL_DBG_MAD_POOL); + return status; +} + + + +/* + * Deregister a MAD pool from a protection domain. + */ +ib_api_status_t +dereg_mad_pool( + IN const ib_pool_key_t pool_key , + IN const al_key_type_t expected_type ) +{ + AL_ENTER(AL_DBG_MAD_POOL); + + if( pool_key->type != expected_type ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + ///* Check mad_cnt to see if MADs are still outstanding. */ + //if( pool_key->mad_cnt ) + //{ + // AL_PRINT_EXIT( TRACE_LEVEL_ERROR, AL_DBG_MAD_POOL, ("IB_RESOURCE_BUSY\n") ); + // return IB_RESOURCE_BUSY; + //} + + pool_key->obj.pfn_destroy( &pool_key->obj, NULL ); + + AL_EXIT(AL_DBG_MAD_POOL); + return IB_SUCCESS; +} + + + +/* + * Obtain a MAD element from the pool. + */ +static ib_api_status_t +__get_mad_element( + IN const ib_pool_key_t pool_key, + OUT al_mad_element_t** pp_mad_element ) +{ + al_pool_key_t* p_pool_key; + ib_pool_handle_t h_pool; + cl_list_item_t* p_item; + mad_item_t* p_mad_item; + ib_api_status_t status; + + AL_ENTER(AL_DBG_MAD_POOL); + + CL_ASSERT( pool_key ); + CL_ASSERT( pp_mad_element ); + + p_pool_key = (al_pool_key_t*)pool_key; + h_pool = p_pool_key->h_pool; + + /* Obtain a MAD item from the stack. */ + cl_spinlock_acquire( &h_pool->obj.lock ); + p_item = cl_qlist_remove_head( &h_pool->mad_stack ); + p_mad_item = PARENT_STRUCT( p_item, mad_item_t, al_mad_element.list_item ); + if( p_item == cl_qlist_end( &h_pool->mad_stack ) ) + { + /* The stack was empty. Grow the pool and obtain a new item. */ + cl_spinlock_release( &h_pool->obj.lock ); + status = __grow_mad_pool( h_pool, &p_mad_item ); + if( status != IB_SUCCESS ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("grow_mad_pool failed with status %s.\n", + ib_get_err_str(status)) ); + return status; + } + } + else + { + cl_spinlock_release( &h_pool->obj.lock ); + } + + /* Get the local data segment information for this pool key. */ + status = __init_mad_element( p_pool_key, p_mad_item ); + if( status != IB_SUCCESS ) + { + cl_spinlock_acquire( &h_pool->obj.lock ); + cl_qlist_insert_head( &h_pool->mad_stack, + &p_mad_item->al_mad_element.list_item ); + cl_spinlock_release( &h_pool->obj.lock ); + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("init_mad_element failed with status %s.\n", + ib_get_err_str(status)) ); + return status; + } + + /* Hold a reference on the array while a MAD element is removed. */ + ref_al_obj( &p_mad_item->p_mad_array->obj ); + + p_mad_item->al_mad_element.pool_key = (ib_pool_key_t)pool_key; + /* Return the MAD element. */ + *pp_mad_element = &p_mad_item->al_mad_element; + + AL_EXIT(AL_DBG_MAD_POOL); + return IB_SUCCESS; +} + + + +static void +__setup_mad_element( + IN OUT al_mad_element_t* const p_al_mad_element, + IN const uint32_t lkey ) +{ + /* Clear the MAD element. */ + cl_memclr( &p_al_mad_element->element, sizeof( ib_mad_element_t ) ); + + /* Initialize the receive data segment information. */ + p_al_mad_element->grh_ds.lkey = lkey; + + /* Initialize the send data segment information. */ + p_al_mad_element->mad_ds.lkey = lkey; + + /* Initialize grh */ + p_al_mad_element->element.p_grh = + (ib_grh_t*)(uintn_t)p_al_mad_element->grh_ds.vaddr; +} + + + +/* + * Initialize the MAD element local data segment for this pool key. + */ +static ib_api_status_t +__init_mad_element( + IN const al_pool_key_t* p_pool_key, + IN OUT mad_item_t* p_mad_item ) +{ + cl_list_item_t *p_item; + cl_qlist_t *p_list; + al_obj_t *p_obj; + mad_reg_t *p_reg; + ib_pool_handle_t h_pool; + + CL_ASSERT( p_pool_key ); + CL_ASSERT( p_mad_item != NULL ); + + /* Find the MAD array registration entry. */ + if( p_pool_key->type == AL_KEY_NORMAL ) + { + p_list = (cl_qlist_t*)&p_pool_key->obj.obj_list; + } + else + { +#if defined( CL_KERNEL ) + /* Search the registrations on the actual pool key, not the alias. */ + p_list = (cl_qlist_t*)&p_pool_key->pool_key->obj.obj_list; +#else + /* + * Note that MAD elements used by user-mode clients on special QPs + * are not registered on a user-mode PD. The user-mode MAD elements + * must be copied into a kernel-mode MAD element before being sent. + */ + __setup_mad_element( &p_mad_item->al_mad_element, 0 ); + return IB_SUCCESS; +#endif + } + + /* Prevent MAD array registrations. */ + h_pool = p_pool_key->h_pool; + cl_spinlock_acquire( &h_pool->obj.lock ); + + /* Search for the registration entry. */ + p_item = cl_qlist_find_from_head( p_list, __locate_reg_cb, + p_mad_item->p_mad_array ); + if( p_item == cl_qlist_end( p_list ) ) + { + cl_spinlock_release( &h_pool->obj.lock ); + return IB_NOT_FOUND; + } + + /* Allow MAD array registrations. */ + cl_spinlock_release( &h_pool->obj.lock ); + + /* Get a pointer to the registration. */ + p_obj = PARENT_STRUCT( p_item, al_obj_t, pool_item ); + p_reg = PARENT_STRUCT( p_obj, mad_reg_t, obj ); + __setup_mad_element( &p_mad_item->al_mad_element, p_reg->lkey ); + + return IB_SUCCESS; +} + + + +/* + * Determine if a registration is for a given array. + */ +static cl_status_t +__locate_reg_cb( + IN const cl_list_item_t* const p_list_item, + IN void* context ) +{ + al_obj_t* p_obj; + mad_reg_t* p_reg; + mad_array_t* p_mad_array; + + CL_ASSERT( p_list_item ); + CL_ASSERT( context ); + + p_obj = PARENT_STRUCT( p_list_item, al_obj_t, pool_item ); + p_reg = PARENT_STRUCT( p_obj, mad_reg_t, obj ); + p_mad_array = context; + + return ( p_reg->p_mad_array == p_mad_array ) ? CL_SUCCESS : CL_NOT_FOUND; +} + + + +/* + * Return a MAD element to the pool. + */ +static void +__put_mad_element( + IN al_mad_element_t* p_mad_element ) +{ + mad_item_t* p_mad_item; + ib_pool_handle_t h_pool; + + CL_ASSERT( p_mad_element ); + p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element ); + + /* Get a handle to the pool. */ + h_pool = p_mad_item->p_mad_array->h_pool; + + /* Clear the MAD buffer. */ + cl_memclr( + (uint8_t*)(uintn_t)p_mad_element->grh_ds.vaddr, MAD_BLOCK_GRH_SIZE ); + p_mad_element->element.p_next = NULL; + + /* Return the MAD element to the pool. */ + cl_spinlock_acquire( &h_pool->obj.lock ); + cl_qlist_insert_head( &h_pool->mad_stack, + &p_mad_item->al_mad_element.list_item ); + cl_spinlock_release( &h_pool->obj.lock ); + + /* Dereference the array when a MAD element is returned. */ + deref_al_obj( &p_mad_item->p_mad_array->obj ); +} + + + +/* + * Grow a MAD pool. + */ +static ib_api_status_t +__grow_mad_pool( + IN const ib_pool_handle_t h_pool, + OUT mad_item_t** pp_mad_item OPTIONAL ) +{ + size_t i; + size_t alloc_size; + uint8_t* p_data; + mad_array_t* p_mad_array; + mad_item_t* p_mad_item; + mad_item_t* p_mad_items; + cl_list_item_t* p_key_item; + al_pool_key_t* p_pool_key; + ib_api_status_t status; + + AL_ENTER(AL_DBG_MAD_POOL); + + CL_ASSERT( h_pool ); + + /* Determine if the pool is allowed to grow. */ + if( h_pool->grow_size == 0 ) + return IB_INSUFFICIENT_RESOURCES; + + /* Lock the pool. */ + cl_spinlock_acquire( &h_pool->obj.lock ); + + /* Determine if the pool has a maximum. */ + if( h_pool->max != 0 ) + { + /* Determine if the pool maximum has been reached. */ + if( h_pool->actual >= h_pool->max ) + { + cl_spinlock_release( &h_pool->obj.lock ); + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("h_pool's (0x%p) maximum has been reached.\n", h_pool) ); + return IB_INSUFFICIENT_RESOURCES; + } + + /* Determine if growing the pool will exceed the maximum. */ + if( (h_pool->actual + h_pool->grow_size) > h_pool->max ) + { + cl_spinlock_release( &h_pool->obj.lock ); + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("h_pool's (0x%p) will exceed maximum on grow.\n", h_pool) ); + return IB_INSUFFICIENT_RESOURCES; + } + } + + /* Calculate the allocation size. */ + alloc_size = sizeof( mad_item_t ); + alloc_size += MAD_BLOCK_GRH_SIZE; + alloc_size *= h_pool->grow_size; + alloc_size += sizeof( mad_array_t ); + + /* Allocate a MAD data array and item structures. */ + p_data = cl_zalloc( alloc_size ); + if( p_data == NULL ) + { + cl_spinlock_release( &h_pool->obj.lock ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Offset to the MAD array structure. */ + alloc_size -= sizeof( mad_array_t ); + p_mad_array = (mad_array_t*)(p_data + alloc_size); + + /* Offset to the array of MAD item structures. */ + alloc_size -= sizeof( mad_item_t ) * h_pool->grow_size; + p_mad_items = (mad_item_t*)(p_data + alloc_size); + + /* Initialize the MAD array structure. */ + p_mad_array->h_pool = h_pool; + p_mad_array->p_data = p_data; + p_mad_array->sizeof_array = alloc_size; + + /* Initialize the MAD array object. */ + construct_al_obj( &p_mad_array->obj, AL_OBJ_TYPE_MAD_POOL ); + status = init_al_obj( &p_mad_array->obj, p_mad_array, TRUE, + NULL, NULL, __free_mad_array ); + if( status != IB_SUCCESS ) + { + cl_spinlock_release( &h_pool->obj.lock ); + __free_mad_array( &p_mad_array->obj ); + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Register the MAD array on the existing pool protection domains. */ + for( p_key_item = cl_qlist_head( &h_pool->key_list ); + p_key_item != cl_qlist_end( &h_pool->key_list ); + p_key_item = cl_qlist_next( p_key_item ) ) + { + p_pool_key = PARENT_STRUCT( p_key_item, al_pool_key_t, pool_item ); + ref_al_obj( &p_pool_key->obj ); + status = __reg_mad_array( p_pool_key, p_mad_array ); + deref_al_obj( &p_pool_key->obj ); + if( status != IB_SUCCESS ) + break; + } + + if( status != IB_SUCCESS ) + { + cl_spinlock_release( &h_pool->obj.lock ); + p_mad_array->obj.pfn_destroy( &p_mad_array->obj, NULL ); + return status; + } + + /* The pool has been successfully grown. Update the actual size. */ + h_pool->actual += h_pool->grow_size; + + /* Intialize the MAD stack item structures. */ + p_mad_item = p_mad_items; + for( i = 0; i < h_pool->grow_size; i++ ) + { + p_mad_item->p_mad_array = p_mad_array; + + p_mad_item->al_mad_element.grh_ds.vaddr = (uintn_t)p_data; + p_mad_item->al_mad_element.grh_ds.length = MAD_BLOCK_GRH_SIZE; + + p_mad_item->al_mad_element.mad_ds.vaddr = + (uintn_t)(p_data + sizeof( ib_grh_t )); + p_mad_item->al_mad_element.mad_ds.length = MAD_BLOCK_SIZE; + p_data += MAD_BLOCK_GRH_SIZE; + p_mad_item++; + } + + /* Return a MAD item to the caller if one was requested. */ + if( pp_mad_item != NULL ) + { + *pp_mad_item = p_mad_items; + p_mad_items++; + i--; + } + + /* Append the remaining MAD items to the existing stack. */ + cl_qlist_insert_array_tail( &h_pool->mad_stack, + &p_mad_items->al_mad_element.list_item, i, sizeof( mad_item_t ) ); + + /* Unlock the pool. */ + cl_spinlock_release( &h_pool->obj.lock ); + + /* Attach the array object to the pool. */ + attach_al_obj( &h_pool->obj, &p_mad_array->obj ); + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_mad_array->obj ); + + AL_EXIT(AL_DBG_MAD_POOL); + return IB_SUCCESS; +} + + + +/* + * Free the MAD array structure. + */ +static void +__free_mad_array( + IN al_obj_t* p_obj ) +{ + mad_array_t* p_mad_array; + ib_pool_handle_t h_pool; + cl_list_item_t* p_key_item; + al_pool_key_t* p_pool_key; + cl_list_item_t* p_reg_item; + cl_list_item_t* p_next_item; + mad_reg_t* p_reg; + + AL_ENTER(AL_DBG_MAD_POOL); + + CL_ASSERT( p_obj ); + p_mad_array = PARENT_STRUCT( p_obj, mad_array_t, obj ); + + /* Destroy any registrations for this MAD array. */ + h_pool = p_mad_array->h_pool; + cl_spinlock_acquire( &h_pool->obj.lock ); + + /* Walk the pool key list. */ + p_key_item = cl_qlist_head( &h_pool->key_list ); + while( p_key_item != cl_qlist_end( &h_pool->key_list ) ) + { + p_pool_key = PARENT_STRUCT( p_key_item, al_pool_key_t, pool_item ); + + /* Walk the pool key registrations. */ + for( p_reg_item = cl_qlist_head( &p_pool_key->obj.obj_list ); + p_reg_item != cl_qlist_end( &p_pool_key->obj.obj_list ); + p_reg_item = p_next_item ) + { + p_next_item = cl_qlist_next( p_reg_item ); + + p_obj = PARENT_STRUCT( p_reg_item, al_obj_t, pool_item ); + p_reg = PARENT_STRUCT( p_obj, mad_reg_t, obj ); + + /* Destroy registrations for this MAD array. */ + if( p_reg->p_mad_array == p_mad_array ) + { + ref_al_obj( &p_reg->obj ); + p_reg->obj.pfn_destroy( &p_reg->obj, NULL ); + } + } + + p_key_item = cl_qlist_next( p_key_item ); + } + cl_spinlock_release( &h_pool->obj.lock ); + + destroy_al_obj( &p_mad_array->obj ); + cl_free( p_mad_array->p_data ); + + AL_EXIT(AL_DBG_MAD_POOL); +} + + + +/* + * Initialize a MAD send tracking structure to reference the pool from + * whence it came. + */ +static cl_status_t +__mad_send_init( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + mad_send_t *p_mad_send; + + p_mad_send = (mad_send_t*)p_object; + p_mad_send->h_pool = context; + *pp_pool_item = &p_mad_send->mad_send.pool_item; + return CL_SUCCESS; +} + + + +ib_mad_send_handle_t +get_mad_send( + IN const al_mad_element_t *p_mad_element ) +{ + mad_item_t* p_mad_item; + ib_pool_handle_t h_pool; + cl_pool_item_t *p_pool_item; + ib_mad_send_handle_t h_send; + + CL_ASSERT( p_mad_element ); + + /* Get a handle to the pool. */ + p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element ); + h_pool = p_mad_item->p_mad_array->h_pool; + + cl_spinlock_acquire( &h_pool->obj.lock ); + p_pool_item = cl_qpool_get( &h_pool->mad_send_pool ); + cl_spinlock_release( &h_pool->obj.lock ); + + if( !p_pool_item ) + return NULL; + + ref_al_obj( &h_pool->obj ); + h_send = PARENT_STRUCT( p_pool_item, al_mad_send_t, pool_item ); + h_send->canceled = FALSE; + h_send->p_send_mad = NULL; + h_send->p_resp_mad = NULL; + h_send->h_av = NULL; + h_send->retry_cnt = 0; + h_send->retry_time = 0; + + return h_send; +} + + + +void +put_mad_send( + IN ib_mad_send_handle_t h_mad_send ) +{ + mad_send_t *p_mad_send; + + p_mad_send = PARENT_STRUCT( h_mad_send, mad_send_t, mad_send ); + + cl_spinlock_acquire( &p_mad_send->h_pool->obj.lock ); + cl_qpool_put( &p_mad_send->h_pool->mad_send_pool, &h_mad_send->pool_item ); + cl_spinlock_release( &p_mad_send->h_pool->obj.lock ); + deref_al_obj( &p_mad_send->h_pool->obj ); +} + + + +/* + * Initialize a MAD RMPP tracking structure to reference the pool from + * whence it came. + */ +static cl_status_t +__mad_rmpp_init( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + mad_rmpp_t *p_mad_rmpp; + + p_mad_rmpp = (mad_rmpp_t*)p_object; + p_mad_rmpp->h_pool = context; + *pp_pool_item = &p_mad_rmpp->mad_rmpp.pool_item; + return CL_SUCCESS; +} + + + +al_mad_rmpp_t* +get_mad_rmpp( + IN const al_mad_element_t *p_mad_element ) +{ + mad_item_t* p_mad_item; + ib_pool_handle_t h_pool; + cl_pool_item_t *p_pool_item; + + CL_ASSERT( p_mad_element ); + + /* Get a handle to the pool. */ + p_mad_item = PARENT_STRUCT( p_mad_element, mad_item_t, al_mad_element ); + h_pool = p_mad_item->p_mad_array->h_pool; + + cl_spinlock_acquire( &h_pool->obj.lock ); + p_pool_item = cl_qpool_get( &h_pool->mad_rmpp_pool ); + cl_spinlock_release( &h_pool->obj.lock ); + + if( !p_pool_item ) + return NULL; + + ref_al_obj( &h_pool->obj ); + return PARENT_STRUCT( p_pool_item, al_mad_rmpp_t, pool_item ); +} + + + +void +put_mad_rmpp( + IN al_mad_rmpp_t* h_mad_rmpp ) +{ + mad_rmpp_t *p_mad_rmpp; + + p_mad_rmpp = PARENT_STRUCT( h_mad_rmpp, mad_rmpp_t, mad_rmpp ); + + cl_spinlock_acquire( &p_mad_rmpp->h_pool->obj.lock ); + cl_qpool_put( &p_mad_rmpp->h_pool->mad_rmpp_pool, &h_mad_rmpp->pool_item ); + cl_spinlock_release( &p_mad_rmpp->h_pool->obj.lock ); + deref_al_obj( &p_mad_rmpp->h_pool->obj ); +} + + + +ib_api_status_t +ib_get_mad( + IN const ib_pool_key_t pool_key, + IN const size_t buf_size, + OUT ib_mad_element_t **pp_mad_element ) +{ + al_pool_key_t* p_pool_key; + al_mad_element_t* p_mad; + ib_api_status_t status; + + AL_ENTER(AL_DBG_MAD_POOL); + + if( AL_OBJ_INVALID_HANDLE( pool_key, AL_OBJ_TYPE_H_POOL_KEY ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + if( !buf_size || !pp_mad_element ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + p_pool_key = (al_pool_key_t*)pool_key; + + status = __get_mad_element( pool_key, &p_mad ); + if( status != IB_SUCCESS ) + { + AL_EXIT(AL_DBG_MAD_POOL); + return status; + } + + /* Set the user accessible buffer. */ + if( buf_size <= MAD_BLOCK_SIZE ) + { + /* Use the send buffer for 256 byte MADs. */ + p_mad->element.p_mad_buf = (ib_mad_t*)(uintn_t)p_mad->mad_ds.vaddr; + } + else if( buf_size >= 0xFFFFFFFF ) + { + __put_mad_element( p_mad ); + return IB_INVALID_SETTING; + } + else + { + /* Allocate a new buffer for the MAD. */ + p_mad->p_al_mad_buf = cl_zalloc( buf_size ); + if( !p_mad->p_al_mad_buf ) + { + __put_mad_element( p_mad ); + AL_EXIT(AL_DBG_MAD_POOL); + return IB_INSUFFICIENT_MEMORY; + } + p_mad->element.p_mad_buf = p_mad->p_al_mad_buf; + } + p_mad->element.size = (uint32_t)buf_size; + + /* Track the MAD element with the requesting AL instance. */ + al_insert_mad( p_pool_key->h_al, p_mad ); + + ref_al_obj( &p_pool_key->obj ); + cl_atomic_inc( &p_pool_key->mad_cnt ); + + /* Return the MAD element to the client. */ + *pp_mad_element = &p_mad->element; + + AL_EXIT(AL_DBG_MAD_POOL); + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_put_mad( + IN const ib_mad_element_t* p_mad_element_list ) +{ + al_mad_element_t* p_mad; + + if( !p_mad_element_list ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + while( p_mad_element_list ) + { + p_mad = PARENT_STRUCT( p_mad_element_list, al_mad_element_t, element ); + p_mad_element_list = p_mad_element_list->p_next; + + /* Deallocate any buffers allocated for the user. */ + if( p_mad->p_al_mad_buf ) + { + cl_free( p_mad->p_al_mad_buf ); + p_mad->p_al_mad_buf = NULL; + } + + /* See if the MAD has already been returned to the MAD pool. */ + if( p_mad->h_al ) + { + /* Remove the MAD element from the owning AL instance. */ + al_remove_mad( p_mad ); + + /* Return the MAD element to the pool. */ + cl_atomic_dec( &p_mad->pool_key->mad_cnt ); + deref_al_obj( &p_mad->pool_key->obj ); + __put_mad_element( p_mad ); + } + else + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("MAD has already been returned to MAD pool.\n") ); + } + } + + return IB_SUCCESS; +} + + + +/* + * Resize the data buffer associated with a MAD element. + */ +ib_api_status_t +al_resize_mad( + OUT ib_mad_element_t *p_mad_element, + IN const size_t buf_size ) +{ + al_mad_element_t *p_al_element; + ib_mad_t *p_new_buf; + + CL_ASSERT( p_mad_element ); + + /* We only support growing the buffer for now. */ + CL_ASSERT( buf_size > p_mad_element->size ); + + /* Cap the size. */ + if( buf_size >= 0xFFFFFFFF ) + return IB_INVALID_SETTING; + + p_al_element = PARENT_STRUCT( p_mad_element, al_mad_element_t, element ); + + /* Allocate a new buffer. */ + p_new_buf = cl_malloc( buf_size ); + if( !p_new_buf ) + return IB_INSUFFICIENT_MEMORY; + + /* Copy the existing buffer's data into the new buffer. */ + cl_memcpy( p_new_buf, p_mad_element->p_mad_buf, p_mad_element->size ); + cl_memclr( (uint8_t*)p_new_buf + p_mad_element->size, + buf_size - p_mad_element->size ); + + /* Update the MAD element to use the new buffer. */ + p_mad_element->p_mad_buf = p_new_buf; + p_mad_element->size = (uint32_t)buf_size; + + /* Free any old buffer. */ + if( p_al_element->p_al_mad_buf ) + cl_free( p_al_element->p_al_mad_buf ); + p_al_element->p_al_mad_buf = p_new_buf; + + return IB_SUCCESS; +} + diff --git a/branches/Ndi/core/al/user/ibal.rc b/branches/Ndi/core/al/user/ibal.rc new file mode 100644 index 00000000..e2d86f5f --- /dev/null +++ b/branches/Ndi/core/al/user/ibal.rc @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DLL +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "InfiniBand Access Layer (Debug)" +#define VER_INTERNALNAME_STR "ibald.dll" +#define VER_ORIGINALFILENAME_STR "ibald.dll" +#else +#define VER_FILEDESCRIPTION_STR "InfiniBand Access Layer" +#define VER_INTERNALNAME_STR "ibal.dll" +#define VER_ORIGINALFILENAME_STR "ibal.dll" +#endif + +#include diff --git a/branches/Ndi/core/al/user/makefile b/branches/Ndi/core/al/user/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/core/al/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/core/al/user/ual_av.c b/branches/Ndi/core/al/user/ual_av.c new file mode 100644 index 00000000..1a6729cd --- /dev/null +++ b/branches/Ndi/core/al/user/ual_av.c @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ual_support.h" +#include "al_pd.h" +#include "al_av.h" + +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_av.tmh" +#endif + + + +ib_api_status_t +ual_create_av( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t* const p_av_attr, + IN OUT ib_av_handle_t h_av ) +{ + ual_create_av_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status = CL_ERROR; + ib_api_status_t status = IB_ERROR; + uvp_interface_t uvp_intf = h_pd->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_AV ); + /* Clear the ioctl_buf */ + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + /* Pre call to the UVP library */ + if( h_pd->h_ci_pd && uvp_intf.pre_create_av ) + { + status = uvp_intf.pre_create_av( h_pd->h_ci_pd, + p_av_attr, &ioctl_buf.in.umv_buf ); + if( status == IB_VERBS_PROCESSING_DONE ) + { + /* Creation is done entirely in user mode. Issue the post call */ + if( uvp_intf.post_create_av ) + { + uvp_intf.post_create_av( h_pd->h_ci_pd, + IB_SUCCESS, &h_av->h_ci_av, &ioctl_buf.in.umv_buf ); + } + AL_EXIT( AL_DBG_AV ); + return IB_SUCCESS; + } + else if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_AV ); + return status; + } + } + + ioctl_buf.in.h_pd = h_pd->obj.hdl; + ioctl_buf.in.attr = *p_av_attr; + + cl_status = do_al_dev_ioctl( UAL_CREATE_AV, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + status = IB_ERROR; + else + { + status = ioctl_buf.out.status; + if(status == IB_SUCCESS) + h_av->obj.hdl = ioctl_buf.out.h_av; + } + + /* Post uvp call */ + if( h_pd->h_ci_pd && uvp_intf.post_create_av ) + { + uvp_intf.post_create_av( h_pd->h_ci_pd, + status, &h_av->h_ci_av, &ioctl_buf.out.umv_buf); + } + + AL_EXIT( AL_DBG_AV ); + return status; +} + + +/* + * This call does not go to the uvp library. The handle should be + * always created in the kernel if it is an alias pd. + */ +ib_api_status_t +ual_pd_alias_create_av( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t* const p_av_attr, + IN OUT ib_av_handle_t h_av ) +{ + ual_create_av_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status = CL_ERROR; + ib_api_status_t status = IB_ERROR; + + AL_ENTER( AL_DBG_AV ); + /* Clear the ioctl_buf */ + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + ioctl_buf.in.h_pd = h_pd->obj.hdl; + ioctl_buf.in.attr = *p_av_attr; + + cl_status = do_al_dev_ioctl( UAL_CREATE_AV, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CREATE_AV IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + }else + { + status = ioctl_buf.out.status; + if(status == IB_SUCCESS) + h_av->obj.hdl = ioctl_buf.out.h_av; + } + + + AL_EXIT( AL_DBG_AV ); + return status; +} + + +/* + * The functions below can be used by both the alias_pd as well as real pd. + * For alias_pd, there won't be a uvp_av_handle, as the create call doesn't + * go to uvp. Since there is no uvp_av_handle, the query, modify and + * destroy calls also will not call uvp library. So the rest of the + * functions can be shared by both the real pd, and alias pd. + */ +ib_api_status_t +ual_destroy_av( + IN ib_av_handle_t h_av ) +{ + ual_destroy_av_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status = CL_ERROR; + ib_api_status_t status = IB_ERROR; + uvp_interface_t uvp_intf = h_av->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_AV ); + /* Clear the ioctl_buf */ + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + if( h_av->h_ci_av && uvp_intf.pre_destroy_av ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_destroy_av( h_av->h_ci_av ); + CL_ASSERT( (status == IB_SUCCESS) || + (status == IB_VERBS_PROCESSING_DONE) ); + if( status == IB_VERBS_PROCESSING_DONE ) + { + /* Destruction is done in user mode. Issue the post call */ + if( uvp_intf.post_destroy_av ) + uvp_intf.post_destroy_av( h_av->h_ci_av, IB_SUCCESS ); + + AL_EXIT( AL_DBG_AV ); + return IB_SUCCESS; + } + else if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_AV ); + return status; + } + } + + ioctl_buf.in.h_av = h_av->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_DESTROY_AV, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DESTROY_AV IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else if( ioctl_buf.out.status != IB_SUCCESS ) + { + CL_ASSERT( status == IB_SUCCESS ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DESTROY_AV IOCTL status %s\n", + ib_get_err_str(ioctl_buf.out.status)) ); + status = ioctl_buf.out.status; + } + else + { + status = ioctl_buf.out.status; + } + + /* Call vendor's post call */ + if( h_av->h_ci_av && uvp_intf.post_destroy_av ) + uvp_intf.post_destroy_av( h_av->h_ci_av, status ); + + AL_EXIT( AL_DBG_AV ); + return status; +} + + + +ib_api_status_t +ual_modify_av( + IN ib_av_handle_t h_av, + IN const ib_av_attr_t* const p_av_attr) +{ + ual_modify_av_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status = CL_ERROR; + ib_api_status_t status = IB_ERROR; + ib_av_t* p_av = (ib_av_t*) h_av; + uvp_interface_t uvp_intf = p_av->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_AV ); + /* Clear the ioctl_buf */ + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( p_av->h_ci_av && uvp_intf.pre_modify_av ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_modify_av( p_av->h_ci_av, + p_av_attr, &ioctl_buf.in.umv_buf ); + if( status == IB_VERBS_PROCESSING_DONE ) + { + /* Modification is done in user mode. Issue the post call */ + if( uvp_intf.post_modify_av ) + { + uvp_intf.post_modify_av( + p_av->h_ci_av, IB_SUCCESS, &ioctl_buf.in.umv_buf ); + } + + AL_EXIT( AL_DBG_AV ); + return IB_SUCCESS; + } + else if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_AV ); + return status; + } + } + + ioctl_buf.in.h_av = p_av->obj.hdl; + ioctl_buf.in.attr = *p_av_attr; + + cl_status = do_al_dev_ioctl( UAL_MODIFY_AV, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_MODIFY_AV IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ioctl_buf.out.status; + } + + /* Post uvp call */ + if( h_av->h_ci_av && uvp_intf.post_modify_av ) + { + uvp_intf.post_modify_av( + p_av->h_ci_av, status, &ioctl_buf.out.umv_buf ); + } + + AL_EXIT( AL_DBG_AV ); + return status; +} + + +ib_api_status_t +ual_query_av( + IN ib_av_handle_t h_av, + OUT ib_av_attr_t* const p_av_attr, + OUT ib_pd_handle_t* const ph_pd ) +{ + ual_query_av_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status = CL_ERROR; + ib_api_status_t status = IB_ERROR; + uvp_interface_t uvp_intf = h_av->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_AV ); + /* Clear the ioctl_buf */ + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( h_av->h_ci_av && uvp_intf.pre_query_av ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_query_av( + h_av->h_ci_av, &ioctl_buf.in.umv_buf ); + if( status == IB_VERBS_PROCESSING_DONE ) + { + /* Query is done in user mode. Issue the post call */ + if( uvp_intf.post_query_av ) + { + uvp_intf.post_query_av( h_av->h_ci_av, + IB_SUCCESS, p_av_attr, ph_pd, &ioctl_buf.in.umv_buf ); + } + AL_EXIT( AL_DBG_AV ); + return IB_SUCCESS; + } + else if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_AV ); + return status; + } + } + + ioctl_buf.in.h_av = h_av->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_QUERY_AV, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_QUERY_AV IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ioctl_buf.out.status; + } + + /* Post uvp call */ + if( h_av->h_ci_av && uvp_intf.post_query_av ) + { + uvp_intf.post_query_av( h_av->h_ci_av, + status, &ioctl_buf.out.attr, ph_pd, + &ioctl_buf.out.umv_buf ); + } + + if( status == IB_SUCCESS ) + *p_av_attr = ioctl_buf.out.attr; + + AL_EXIT( AL_DBG_AV ); + return status; +} + diff --git a/branches/Ndi/core/al/user/ual_ca.c b/branches/Ndi/core/al/user/ual_ca.c new file mode 100644 index 00000000..a2691a03 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_ca.c @@ -0,0 +1,533 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "ual_support.h" +#include "al.h" +#include "al_ca.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_ca.tmh" +#endif + +#include "ual_ca.h" +#include "ual_ci_ca.h" + + +void +close_vendor_lib( + IN verbs_interface_t *p_vca_intf ) +{ + if( p_vca_intf->h_uvp_lib ) + al_unload_uvp( p_vca_intf->h_uvp_lib ); +} + + + +ib_api_status_t +open_vendor_lib( + IN const ib_net64_t ca_guid, + IN OUT verbs_interface_t *p_vca_intf ) +{ + ual_get_uvp_name_ioctl_t al_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + void *h_lib; + uvp_get_interface_t pfn_uvp_ifc; + + AL_ENTER( AL_DBG_CA ); + + /* Initialize assuming no user-mode support */ + cl_memclr( &al_ioctl, sizeof(al_ioctl) ); + cl_memclr( p_vca_intf, sizeof(verbs_interface_t) ); + + /* init with the guid */ + p_vca_intf->guid = ca_guid; + + al_ioctl.in.ca_guid = ca_guid; + + cl_status = do_al_dev_ioctl( UAL_GET_VENDOR_LIBCFG, + &al_ioctl.in, sizeof(al_ioctl.in), + &al_ioctl.out, sizeof(al_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(al_ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + + if( !strlen( al_ioctl.out.uvp_lib_name ) ) + { + /* Vendor does not implement user-mode library */ + AL_PRINT_EXIT(TRACE_LEVEL_WARNING ,AL_DBG_CA , + ("No vendor lib for CA guid %I64x.\n", ca_guid) ); + return IB_UNSUPPORTED; + } + + /* + * The vendor supports a user-mode library + * open the library and get the interfaces supported + */ + AL_PRINT(TRACE_LEVEL_INFORMATION ,AL_DBG_CA , + ("Loading vendor lib (%s)\n", al_ioctl.out.uvp_lib_name) ); + h_lib = al_load_uvp( al_ioctl.out.uvp_lib_name ); + if (h_lib == NULL) + { +#if defined( _DEBUG_ ) + al_uvp_lib_err( TRACE_LEVEL_WARNING, + "!vendor lib (%s) not found for CA guid %"PRIx64".", + al_ioctl.out.uvp_lib_name, ca_guid ); +#endif + AL_EXIT( AL_DBG_CA ); + return IB_SUCCESS; + } + + pfn_uvp_ifc = al_get_uvp_ifc_pfn( h_lib ); + if( !pfn_uvp_ifc ) + { +#if defined( _DEBUG_ ) + al_uvp_lib_err( TRACE_LEVEL_ERROR, + "failed to get vendor lib interface (%s) " + "for CA guid %"PRIx64" returned ", + al_ioctl.out.uvp_lib_name, ca_guid ); +#endif + al_unload_uvp( h_lib ); + AL_EXIT( AL_DBG_CA ); + return IB_SUCCESS; + } + + /* Query the vendor-supported user-mode functions */ + pfn_uvp_ifc( &p_vca_intf->user_verbs ); + p_vca_intf->h_uvp_lib = h_lib; + AL_EXIT( AL_DBG_CA ); + return IB_SUCCESS; +} + + + +ib_api_status_t +ual_open_ca( + IN const ib_net64_t ca_guid, + IN OUT al_ci_ca_t* const p_ci_ca ) +{ + ual_open_ca_ioctl_t ca_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status = CL_ERROR; + ib_api_status_t status = IB_ERROR; + ib_api_status_t uvp_status = IB_SUCCESS; + uvp_interface_t uvp_intf = p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_CA ); + + cl_memclr( &ca_ioctl, sizeof(ca_ioctl) ); + + /* Pre call to the UVP library */ + if( uvp_intf.pre_open_ca ) + { + status = uvp_intf.pre_open_ca( ca_guid, &ca_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + CL_ASSERT( status != IB_VERBS_PROCESSING_DONE ); + AL_EXIT(AL_DBG_CA); + return status; + } + } + + ca_ioctl.in.guid = ca_guid; + ca_ioctl.in.context = p_ci_ca; + + cl_status = do_al_dev_ioctl( UAL_OPEN_CA, + &ca_ioctl.in, sizeof(ca_ioctl.in), &ca_ioctl.out, sizeof(ca_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ca_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("UAL_OPEN_CA IOCTL returned %s\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ca_ioctl.out.status; + p_ci_ca->obj.hdl = ca_ioctl.out.h_ca; + } + + /* Post uvp call */ + if( uvp_intf.post_open_ca ) + { + uvp_status = uvp_intf.post_open_ca( ca_guid, status, + &p_ci_ca->h_ci_ca, &ca_ioctl.out.umv_buf ); + } + + if( (status == IB_SUCCESS) && (uvp_status != IB_SUCCESS) ) + status = uvp_status; + + AL_EXIT( AL_DBG_CA ); + return status; +} + + +ib_api_status_t +ual_close_ca( + IN al_ci_ca_t *p_ci_ca ) +{ + ual_close_ca_ioctl_t ca_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status = CL_ERROR; + ib_api_status_t status = IB_ERROR; + uvp_interface_t uvp_intf = p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_CA ); + + cl_memclr( &ca_ioctl, sizeof(ca_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( p_ci_ca->h_ci_ca && uvp_intf.pre_close_ca ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_close_ca( p_ci_ca->h_ci_ca ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_CA ); + return status; + } + } + + ca_ioctl.in.h_ca = p_ci_ca->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_CLOSE_CA, + &ca_ioctl.in, sizeof(ca_ioctl.in), &ca_ioctl.out, sizeof(ca_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ca_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_CLOSE_CA IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ca_ioctl.out.status; + } + + if( p_ci_ca->h_ci_ca && uvp_intf.post_close_ca ) + uvp_intf.post_close_ca( p_ci_ca->h_ci_ca, status ); + + AL_EXIT( AL_DBG_CA ); + return status; +} + + + +ib_api_status_t +ual_query_ca( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT uint32_t* const p_size ) +{ + /* Do we need to do any special checking here ?? */ + + ual_query_ca_ioctl_t ca_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status = CL_SUCCESS; + ib_api_status_t status = IB_SUCCESS; + uvp_interface_t uvp_intf = h_ca->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_CA ); + + cl_memclr( &ca_ioctl, sizeof(ca_ioctl) ); + + ca_ioctl.in.h_ca = h_ca->obj.p_ci_ca->obj.hdl; + ca_ioctl.in.p_ca_attr = p_ca_attr; + ca_ioctl.in.byte_cnt = *p_size; + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( h_ca->obj.p_ci_ca->h_ci_ca && uvp_intf.pre_query_ca ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_query_ca( h_ca->obj.p_ci_ca->h_ci_ca, + p_ca_attr, *p_size, &ca_ioctl.in.umv_buf ); + + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_CA ); + return status; + } + } + + cl_status = do_al_dev_ioctl( UAL_QUERY_CA, + &ca_ioctl.in, sizeof(ca_ioctl.in), &ca_ioctl.out, sizeof(ca_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ca_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_QUERY_CA IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + *p_size = ca_ioctl.out.byte_cnt; + status = ca_ioctl.out.status; + } + + /* The attributes, if any, will be directly copied by proxy */ + /* Post uvp call */ + if( h_ca->obj.p_ci_ca->h_ci_ca && uvp_intf.post_query_ca ) + { + uvp_intf.post_query_ca( h_ca->obj.p_ci_ca->h_ci_ca, + status, p_ca_attr, ca_ioctl.out.byte_cnt, &ca_ioctl.out.umv_buf ); + } + + AL_EXIT( AL_DBG_CA ); + return status; +} + + + +ib_api_status_t +ual_modify_ca( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t ca_mod, + IN const ib_port_attr_mod_t* const p_port_attr_mod ) +{ + /* Do we need to do any special checking here ?? */ + + ual_modify_ca_ioctl_t ca_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status = CL_SUCCESS; + ib_api_status_t status = IB_SUCCESS; + uvp_interface_t uvp_intf = h_ca->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_CA ); + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( h_ca->obj.p_ci_ca->h_ci_ca && uvp_intf.pre_modify_ca ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_modify_ca( + h_ca->obj.p_ci_ca->h_ci_ca, port_num, ca_mod, p_port_attr_mod ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_CA ); + return status; + } + } + + ca_ioctl.in.h_ca = h_ca->obj.p_ci_ca->obj.hdl; + ca_ioctl.in.port_num = port_num; + ca_ioctl.in.ca_mod = ca_mod; + ca_ioctl.in.port_attr_mod = *p_port_attr_mod; + + cl_status = do_al_dev_ioctl( UAL_MODIFY_CA, + &ca_ioctl.in, sizeof(ca_ioctl.in), &ca_ioctl.out, sizeof(ca_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ca_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_MODIFY_CA IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ca_ioctl.out.status; + } + + /* Post uvp call */ + if( h_ca->obj.p_ci_ca->h_ci_ca && uvp_intf.post_modify_ca ) + uvp_intf.post_modify_ca( h_ca->obj.p_ci_ca->h_ci_ca, status ); + + AL_EXIT( AL_DBG_CA ); + return status; +} + + + +static ib_api_status_t +__convert_to_proxy_handles( + IN uint64_t* const dst_handle_array, + IN const void* __ptr64 * const src_handle_array, + IN uint32_t num_handles ) +{ + uint32_t i; + al_obj_t *p_al_obj; + + for( i = 0; i < num_handles; i++ ) + { + p_al_obj = (al_obj_t* __ptr64)src_handle_array[i]; + if( (p_al_obj->type != AL_OBJ_TYPE_H_PD) && + (p_al_obj->type != AL_OBJ_TYPE_H_CQ) && + (p_al_obj->type != AL_OBJ_TYPE_H_AV) && + (p_al_obj->type != AL_OBJ_TYPE_H_QP) && + (p_al_obj->type != AL_OBJ_TYPE_H_MR) && + (p_al_obj->type != AL_OBJ_TYPE_H_MW) ) + { + return IB_INVALID_HANDLE; + } + + dst_handle_array[i] = p_al_obj->hdl; + } + return IB_SUCCESS; +} + + + +ib_api_status_t +ib_ci_call( + IN ib_ca_handle_t h_ca, + IN const void* __ptr64 * const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op ) +{ + ual_ci_call_ioctl_t *p_ca_ioctl; + size_t in_sz; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf; + void* __ptr64 * p_uvp_handle_array = NULL; + + AL_ENTER( AL_DBG_CA ); + + if( AL_OBJ_INVALID_HANDLE( h_ca, AL_OBJ_TYPE_H_CA ) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("IB_INVALID_CA_HANDLE\n") ); + return IB_INVALID_CA_HANDLE; + } + if( !p_ci_op ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + uvp_intf = h_ca->obj.p_ci_ca->verbs.user_verbs; + + in_sz = sizeof(ual_ci_call_ioctl_t); + if( num_handles > 1 ) + in_sz += (sizeof(uint64_t) * (num_handles - 1)); + + p_ca_ioctl = cl_zalloc( in_sz ); + if( !p_ca_ioctl ) + { + AL_EXIT( AL_DBG_CA ); + return IB_INSUFFICIENT_MEMORY; + } + + if( num_handles > 0 ) + { + status = __convert_to_proxy_handles( + p_ca_ioctl->in.handle_array, handle_array, num_handles ); + if( status != IB_SUCCESS ) + { + cl_free( p_ca_ioctl ); + AL_EXIT( AL_DBG_CA ); + return status; + } + + p_uvp_handle_array = cl_zalloc( sizeof(void* __ptr64) * num_handles ); + if( !p_uvp_handle_array ) + { + cl_free( p_ca_ioctl ); + AL_EXIT( AL_DBG_CA ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Convert the handle array */ + status = al_convert_to_ci_handles( + p_uvp_handle_array, handle_array, num_handles ); + if( status != IB_SUCCESS ) + { + cl_free( p_uvp_handle_array ); + cl_free( p_ca_ioctl ); + AL_EXIT( AL_DBG_CA ); + return status; + } + } + + /* Pre call to the UVP library */ + if( h_ca->obj.p_ci_ca->h_ci_ca && uvp_intf.pre_ci_call ) + { + status = uvp_intf.pre_ci_call( + h_ca->obj.p_ci_ca->h_ci_ca, p_uvp_handle_array, + num_handles, p_ci_op, &p_ca_ioctl->in.umv_buf ); + if( status != IB_SUCCESS ) + { + cl_free( p_uvp_handle_array ); + cl_free( p_ca_ioctl ); + AL_EXIT( AL_DBG_CA ); + return status; + } + } + + p_ca_ioctl->in.h_ca = h_ca->obj.p_ci_ca->obj.hdl; + p_ca_ioctl->in.num_handles = num_handles; + p_ca_ioctl->in.ci_op = *p_ci_op; + + cl_status = do_al_dev_ioctl( UAL_CI_CALL, + &p_ca_ioctl->in, sizeof(p_ca_ioctl->in), + &p_ca_ioctl->out, sizeof(p_ca_ioctl->out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(p_ca_ioctl->out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_CI_CALL IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = p_ca_ioctl->out.status; + } + + /* Post uvp call */ + if( h_ca->obj.p_ci_ca->h_ci_ca && uvp_intf.post_ci_call ) + { + uvp_intf.post_ci_call( + h_ca->obj.p_ci_ca->h_ci_ca, + status, p_uvp_handle_array, num_handles, p_ci_op, + &p_ca_ioctl->out.umv_buf ); + } + + if( num_handles > 0 ) + cl_free( p_uvp_handle_array ); + cl_free( p_ca_ioctl ); + + AL_EXIT( AL_DBG_CA ); + return status; +} diff --git a/branches/Ndi/core/al/user/ual_ca.h b/branches/Ndi/core/al/user/ual_ca.h new file mode 100644 index 00000000..671eaad1 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_ca.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__IB_UAL_CA_H__) +#define __IB_UAL_CA_H__ + +#include +#include "al_ci_ca.h" + +void +close_vendor_lib( + IN verbs_interface_t *p_vca_intf ); + +ib_api_status_t +open_vendor_lib( + IN const ib_net64_t ca_guid, + IN OUT verbs_interface_t *p_vca_intf); + + +void +ual_ca_completion_cb( + IN void* cq_context ); + + +void +ual_ca_async_event_cb( + IN ib_event_rec_t* event_record ); + + +#endif /* __IB_UAL_CA_H__ */ diff --git a/branches/Ndi/core/al/user/ual_ci_ca.c b/branches/Ndi/core/al/user/ual_ci_ca.c new file mode 100644 index 00000000..cf0de4b2 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_ci_ca.c @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "al.h" +#include "al_mgr.h" +#include "al_ci_ca.h" +#include "ual_ca.h" +#include "al_pnp.h" +#include "al_pd.h" +#include "ib_common.h" + + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_ci_ca.tmh" +#endif + + +extern ib_pool_handle_t gh_mad_pool; +extern ib_al_handle_t gh_al; +extern cl_async_proc_t *gp_async_proc_mgr; + +#define EVENT_POOL_MIN 4 +#define EVENT_POOL_MAX 0 +#define EVENT_POOL_GROW 1 + + +static void +ci_ca_async_proc_cb( + IN struct _cl_async_proc_item *p_item ); + +void +destroying_ci_ca( + IN al_obj_t* p_obj ); + +void +cleanup_ci_ca( + IN al_obj_t* p_obj ); + +/* To be called only if a CI is not opened yet by UAL */ + +/* This gets called by ual_mgr when a CA is opened for the first time. + * The CA remains open for the process life-time. + * ib_open_ca will not go through this code. + */ + +ib_api_status_t +create_ci_ca( + IN ib_al_handle_t h_al, + IN al_obj_t *p_parent_obj, + IN ib_net64_t ca_guid ) +{ + ib_api_status_t status; + cl_status_t cl_status; + al_ci_ca_t *p_ci_ca; + + AL_ENTER(AL_DBG_CA); + + /* Allocate a new CA structure. */ + p_ci_ca = (al_ci_ca_t *)cl_zalloc( sizeof( al_ci_ca_t ) ); + if( p_ci_ca == NULL ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("Failed to cl_malloc al_ci_ca_t\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the CI CA */ + construct_al_obj( &p_ci_ca->obj, AL_OBJ_TYPE_CI_CA ); + cl_qlist_init( &p_ci_ca->ca_list ); + cl_qpool_construct( &p_ci_ca->event_pool ); + cl_spinlock_construct( &p_ci_ca->attr_lock ); + + cl_status = cl_spinlock_init( &p_ci_ca->attr_lock ); + if( cl_status != CL_SUCCESS ) + { + free_ci_ca( &p_ci_ca->obj ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("cl_spinlock_init failed, status = 0x%x.\n", + ib_convert_cl_status(cl_status) ) ); + return ib_convert_cl_status( cl_status ); + } + + /* Create a pool of items to report asynchronous events. */ + cl_status = cl_qpool_init( &p_ci_ca->event_pool, EVENT_POOL_MIN, + EVENT_POOL_MAX, EVENT_POOL_GROW, sizeof( event_item_t ), NULL, + NULL, p_ci_ca ); + if( cl_status != CL_SUCCESS ) + { + free_ci_ca( &p_ci_ca->obj ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("cl_qpool_init failed, status = 0x%x.\n", + ib_convert_cl_status(cl_status) ) ); + return ib_convert_cl_status( cl_status ); + } + + /* Initialize the al object and attach it to the parent so that the + * cleanups will work fine on all cases including error conditions + * encountered here. We use synchronous destruction to ensure that + * the internal CA handle is destroyed before the global AL instance + * is destroyed during the shutdown procedure. + */ + status = init_al_obj( &p_ci_ca->obj, p_ci_ca, FALSE, + destroying_ci_ca, cleanup_ci_ca, free_ci_ca ); + if( status != IB_SUCCESS ) + { + free_ci_ca( &p_ci_ca->obj ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("init_al_obj failed, status = 0x%x.\n", status) ); + return status; + } + + attach_al_obj( p_parent_obj, &p_ci_ca->obj ); + + p_ci_ca->dereg_async_item.pfn_callback = ci_ca_async_proc_cb; + + /* We need to open a CA and allocate a PD for internal use. Doing this + * will result in the creation of user-mode and kernel-mode objects that + * will be children under our global AL instance and associated kernel AL + * instance. We need to take a reference on our user-mode AL instance + * while the CI CA exists, to ensure that our kernel-mode counterpart + * does not go away during application exit. + */ + ref_al_obj( &gh_al->obj ); + + /* Register ourselves with the AL manager, so that the open call below + * will succeed. + */ + add_ci_ca( p_ci_ca ); + open_vendor_lib( ca_guid, &p_ci_ca->verbs ); + + /* Now open the UAL CA to be assigned to p_ci_ca */ + status = ib_open_ca( h_al, ca_guid, ca_event_cb, p_ci_ca, + &p_ci_ca->h_ca ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("ib_open_ca failed, status = 0x%x.\n", status) ); + return status; + } + + /* Now open the CA by sending the ioctl down to kernel */ + status = ual_open_ca( ca_guid, p_ci_ca ); + if (status != IB_SUCCESS) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + + /* Note that we don't release it here. + * It is done through async queuing and the callback + * and the associated destroy/cleanup in the AL's + * object model + */ + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("ual_open_ca failed, status = 0x%x.\n", status) ); + return IB_ERROR; + } + + /* Increase the max timeout for the CI CA to handle driver unload. */ + set_al_obj_timeout( &p_ci_ca->obj, AL_MAX_TIMEOUT_MS ); + + /* + * Allocate a PD for use by AL itself. Note that we need to use the + * PD in the kernel, so we create an alias PD for the global PD. + */ + status = ib_alloc_pd( p_ci_ca->h_ca, IB_PDT_ALIAS, p_ci_ca, + &p_ci_ca->h_pd ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("ib_alloc_pd failed, status = 0x%x.\n", status) ); + return status; + } + + /* Allocate an alias in the kernel for this global PD alias. */ + status = ual_allocate_pd( p_ci_ca->h_ca, IB_PDT_ALIAS, p_ci_ca->h_pd ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("ual_allocate_pd returned %s\n", ib_get_err_str( status )) ); + return status; + } + + /* Now create an alias PD in user-mode for AL services. */ + status = ib_alloc_pd( p_ci_ca->h_ca, IB_PDT_ALIAS, p_ci_ca, + &p_ci_ca->h_pd_alias ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("ib_alloc_pd failed, status = 0x%x.\n", status) ); + return status; + } + + status = get_port_info( p_ci_ca ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("get_port_info failed, status = 0x%x.\n", status) ); + return status; + } + + /* Register the global MAD pool on this CA. */ + status = ib_reg_mad_pool( gh_mad_pool, p_ci_ca->h_pd, &p_ci_ca->pool_key ); + if( status != IB_SUCCESS ) + { + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("ib_reg_mad_pool failed, status = 0x%x.\n", status) ); + return status; + } + + /* Update the PnP attributes buffer. */ + ci_ca_update_attr( p_ci_ca, NULL ); + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_ci_ca->obj ); + + AL_EXIT(AL_DBG_CA); + return IB_SUCCESS; +} + + + +static void +ci_ca_async_proc_cb( + IN struct _cl_async_proc_item *p_item ) +{ + al_ci_ca_t *p_ci_ca; + + p_ci_ca = PARENT_STRUCT( p_item, al_ci_ca_t, dereg_async_item ); + + /* Release all AL resources acquired by the CI CA. */ + ib_close_ca( p_ci_ca->h_ca, NULL ); +} + + + +/* + * This overrides the implementation in shared AL + * UAL-specific destroy_ci_ca + */ +void +destroying_ci_ca( + IN al_obj_t* p_obj ) +{ + al_ci_ca_t *p_ci_ca; + + AL_ENTER(AL_DBG_CA); + CL_ASSERT( p_obj ); + p_ci_ca = PARENT_STRUCT( p_obj, al_ci_ca_t, obj ); + + /* + * We queue a request to the asynchronous processing manager to close + * the CA after the PNP remove CA event has been delivered. This avoids + * the ib_close_ca() call from immediately removing resouces (PDs, QPs) + * that are in use by clients waiting on the remove CA event. + */ + if( p_ci_ca->h_ca ) + cl_async_proc_queue( gp_async_pnp_mgr, &p_ci_ca->dereg_async_item ); + + AL_EXIT(AL_DBG_CA); +} + + + +/* + * This overrides the implementation in shared AL + * + * Remove H/W resource used. From UAL perspective, it is the UVP lib + * UAL-specific + */ +void +cleanup_ci_ca( + IN al_obj_t* p_obj ) +{ + ib_api_status_t status; + al_ci_ca_t *p_ci_ca; + + AL_ENTER(AL_DBG_CA); + + CL_ASSERT( p_obj ); + p_ci_ca = PARENT_STRUCT( p_obj, al_ci_ca_t, obj ); + + if( p_ci_ca->h_ca ) + { + /* Remove the associated kernel CA object. */ + status = ual_close_ca( p_ci_ca ); + CL_ASSERT( status == IB_SUCCESS ); + } + + remove_ci_ca( p_ci_ca ); + + /* We have finished cleaning up all associated kernel resources. We can + * now safely dereference the global AL instance. + */ + deref_al_obj( &gh_al->obj ); + + close_vendor_lib( &p_ci_ca->verbs ); + + AL_EXIT(AL_DBG_CA); +} diff --git a/branches/Ndi/core/al/user/ual_ci_ca.h b/branches/Ndi/core/al/user/ual_ci_ca.h new file mode 100644 index 00000000..87dc7e4d --- /dev/null +++ b/branches/Ndi/core/al/user/ual_ci_ca.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__UAL_CI_CA_H__) +#define __UAL_CI_CA_H__ + +#include +#include "al_common.h" +/* #include "al_ci_ca.h" */ +/* Dummy function declerations */ +/* The arguments must be defined later */ + + + +ib_api_status_t +create_ci_ca( + IN ib_al_handle_t h_al, + IN al_obj_t *p_parent_obj, + IN ib_net64_t ca_guid ); + +ib_api_status_t +ual_open_ca( + IN const ib_net64_t ca_guid, + IN OUT struct _al_ci_ca* const p_ci_ca ); + +#if 0 +ib_api_status_t +ual_close_ca( + IN ib_ca_handle_t h_ca); +#else +ib_api_status_t +ual_close_ca( + IN struct _al_ci_ca *p_ci_ca ); +#endif + +ib_api_status_t +ual_modify_ca( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t ca_mod, + IN const ib_port_attr_mod_t* const p_port_attr_mod ); + +ib_api_status_t +ual_query_ca( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT uint32_t* const p_size ); + +ib_api_status_t +ual_allocate_pd( + IN ib_ca_handle_t h_ca, + IN const ib_pd_type_t pd_type, + IN OUT ib_pd_handle_t h_pd ); + +ib_api_status_t +ual_deallocate_pd( + IN ib_pd_handle_t h_pd ); + +ib_api_status_t +ual_create_av( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t* const p_av_attr, + IN OUT ib_av_handle_t h_av ); + +ib_api_status_t +ual_pd_alias_create_av( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t* const p_av_attr, + IN OUT ib_av_handle_t h_av ); + +ib_api_status_t +ual_destroy_av( + IN ib_av_handle_t h_av ); + +ib_api_status_t +ual_modify_av( + IN ib_av_handle_t h_av, + IN const ib_av_attr_t* const p_av_attr); + +ib_api_status_t +ual_query_av( + IN ib_av_handle_t h_av, + OUT ib_av_attr_t* const p_av_attr, + OUT ib_pd_handle_t* const ph_pd ); + +ib_api_status_t +ual_create_srq( + IN const ib_pd_handle_t h_pd, + IN OUT ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr); + +ib_api_status_t +ual_modify_srq( + IN ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask ); + +ib_api_status_t +ual_query_srq( + IN ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* p_srq_attr ); + +ib_api_status_t +ual_destroy_srq( + IN ib_srq_handle_t h_srq ); + +ib_api_status_t +ual_create_qp( + IN const ib_pd_handle_t h_pd, + IN OUT ib_qp_handle_t h_qp, + IN const ib_qp_create_t* const p_qp_create, + IN ib_qp_attr_t* p_qp_attr ); + +ib_api_status_t +ual_modify_qp( + IN ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod, + IN ib_qp_attr_t* p_qp_attr ); + +ib_api_status_t +ual_query_qp( + IN ib_qp_handle_t h_qp, + OUT ib_qp_attr_t* p_qp_attr ); + +ib_api_status_t +ual_destroy_qp( + IN ib_qp_handle_t h_qp ); + +ib_api_status_t +ual_create_cq( + IN struct _al_ci_ca* const p_ci_ca, + IN ib_cq_create_t* const p_cq_create, + IN OUT ib_cq_handle_t h_cq ); + +ib_api_status_t +ual_modify_cq( + IN ib_cq_handle_t h_cq, + IN OUT uint32_t* p_size ); + +ib_api_status_t +ual_query_cq( + IN ib_cq_handle_t h_cq, + OUT uint32_t* p_size ); + +ib_api_status_t +ual_destroy_cq( + IN ib_cq_handle_t h_cq ); + +ib_api_status_t +ual_reg_mem( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t* const p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN OUT ib_mr_handle_t h_mr ); + +ib_api_status_t +ual_dereg_mr( + IN ib_mr_handle_t h_mr ); + +ib_api_status_t +ual_modify_mr( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t* const p_mr_create OPTIONAL, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL ); + +ib_api_status_t +ual_query_mr( + IN ib_mr_handle_t h_mr, + OUT ib_mr_attr_t* p_mr_attr ); + +ib_api_status_t +ual_reg_shared( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN OUT ib_mr_handle_t h_new_mr ); + +ib_api_status_t +ual_create_mw( + IN const ib_pd_handle_t h_pd, + OUT net32_t* const p_rkey, + IN OUT ib_mw_handle_t h_mw ); + +ib_api_status_t +ual_destroy_mw( + IN ib_mw_handle_t h_mw ); + +ib_api_status_t +ual_query_mw( + IN ib_mw_handle_t h_mw, + OUT ib_pd_handle_t* ph_pd, + OUT net32_t* const p_rkey ); + +ib_api_status_t +ual_bind_mw( + IN const ib_mw_handle_t h_mw, + IN const ib_qp_handle_t h_qp, + IN ib_bind_wr_t* p_mw_bind, + OUT net32_t* const p_rkey ); + +ib_api_status_t +ual_post_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t **pp_send_failure ); + +ib_api_status_t +ual_post_recv( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure ); + +ib_api_status_t +ual_post_srq_recv( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure ); + +ib_api_status_t +ual_peek_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_n_cqes ); + +ib_api_status_t +ual_poll_cq( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); + +ib_api_status_t +ual_rearm_cq( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ); + +ib_api_status_t +ual_rearm_n_cq( + IN const ib_cq_handle_t h_cq, + IN const uint32_t n_cqes ); + +typedef struct _ual_ci_interface +{ + uvp_interface_t user_verbs; + void *h_uvp_lib; /* UVP Library Handle */ + ib_net64_t guid; + +} ual_ci_interface_t; + +typedef ual_ci_interface_t verbs_interface_t; + +#endif /* (__UAL_CI_CA_H__) */ diff --git a/branches/Ndi/core/al/user/ual_cm_cep.c b/branches/Ndi/core/al/user/ual_cm_cep.c new file mode 100644 index 00000000..ce398677 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_cm_cep.c @@ -0,0 +1,1426 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include "al_common.h" +#include "al_cm_cep.h" +#include "al_cm_conn.h" +#include "al_cm_sidr.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_cm_cep.tmh" +#endif + +#include "ib_common.h" +#include "al_mgr.h" +//#include "al_ca.h" +#include "al.h" +//#include "al_mad.h" +#include "al_qp.h" + + +#define UAL_CEP_MIN (512) +#define UAL_CEP_GROW (256) + + +/* Global connection manager object. */ +typedef struct _ual_cep_mgr +{ + al_obj_t obj; + + cl_ptr_vector_t cep_vector; + + /* File handle on which to issue query IOCTLs. */ + HANDLE h_file; + +} ual_cep_mgr_t; + + +typedef struct _al_ucep +{ + al_pfn_cep_cb_t pfn_cb; + ib_al_handle_t h_al; + cl_list_item_t al_item; + + ib_pfn_destroy_cb_t pfn_destroy_cb; + void* destroy_context; + net32_t cid; + + OVERLAPPED ov; + atomic32_t ref_cnt; + +} ucep_t; + + +/* Global instance of the CM agent. */ +ual_cep_mgr_t *gp_cep_mgr = NULL; + + +/* + * Frees the global CEP manager. Invoked during al_obj destruction. + */ +static void +__free_cep_mgr( + IN al_obj_t* p_obj ) +{ + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( &gp_cep_mgr->obj == p_obj ); + + if( gp_cep_mgr->h_file != INVALID_HANDLE_VALUE ) + CloseHandle( gp_cep_mgr->h_file ); + + cl_ptr_vector_destroy( &gp_cep_mgr->cep_vector ); + + destroy_al_obj( p_obj ); + + cl_free( gp_cep_mgr ); + gp_cep_mgr = NULL; + + AL_EXIT( AL_DBG_CM ); +} + + +/* + * Allocates and initialized the global user-mode CM agent. + */ +ib_api_status_t +create_cep_mgr( + IN al_obj_t* const p_parent_obj ) +{ + ib_api_status_t status; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( gp_cep_mgr == NULL ); + + /* Allocate the global CM agent. */ + gp_cep_mgr = (ual_cep_mgr_t*)cl_zalloc( sizeof(ual_cep_mgr_t) ); + if( !gp_cep_mgr ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Failed allocation of global CEP manager.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + construct_al_obj( &gp_cep_mgr->obj, AL_OBJ_TYPE_CM ); + cl_ptr_vector_construct( &gp_cep_mgr->cep_vector ); + gp_cep_mgr->h_file = INVALID_HANDLE_VALUE; + + status = init_al_obj( &gp_cep_mgr->obj, NULL, FALSE, + NULL, NULL, __free_cep_mgr ); + if( status != IB_SUCCESS ) + { + __free_cep_mgr( &gp_cep_mgr->obj ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + /* Attach to the parent object. */ + status = attach_al_obj( p_parent_obj, &gp_cep_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + cl_status = cl_ptr_vector_init( + &gp_cep_mgr->cep_vector, UAL_CEP_MIN, UAL_CEP_GROW ); + if( cl_status != CL_SUCCESS ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("cl_vector_init failed with status %s.\n", + CL_STATUS_MSG(cl_status)) ); + return ib_convert_cl_status( cl_status ); + } + + /* Create a file object on which to issue all CM requests. */ + gp_cep_mgr->h_file = ual_create_async_file( UAL_BIND_CM ); + if( gp_cep_mgr->h_file == INVALID_HANDLE_VALUE ) + { + gp_cep_mgr->obj.pfn_destroy( &gp_cep_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("ual_create_async_file for UAL_BIND_CM returned %d.\n", + GetLastError()) ); + return IB_ERROR; + } + + /* Release the reference from init_al_obj */ + deref_al_obj( &gp_cep_mgr->obj ); + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +void +al_cep_cleanup_al( + IN const ib_al_handle_t h_al ) +{ + cl_list_item_t *p_item; + net32_t cid; + + AL_ENTER( AL_DBG_CM ); + + /* Destroy all CEPs associated with the input instance of AL. */ + cl_spinlock_acquire( &h_al->obj.lock ); + for( p_item = cl_qlist_head( &h_al->cep_list ); + p_item != cl_qlist_end( &h_al->cep_list ); + p_item = cl_qlist_head( &h_al->cep_list ) ) + { + /* + * Note that we don't walk the list - we can't hold the AL + * lock when cleaning up its CEPs because the cleanup path + * takes the CEP's lock. We always want to take the CEP + * before the AL lock to prevent any possibilities of deadlock. + * + * So we just get the CID, and then release the AL lock and try to + * destroy. This should unbind the CEP from the AL instance and + * remove it from the list, allowing the next CEP to be cleaned up + * in the next pass through. + */ + cid = PARENT_STRUCT( p_item, ucep_t, al_item )->cid; + cl_spinlock_release( &h_al->obj.lock ); + al_destroy_cep( h_al, cid, NULL ); + cl_spinlock_acquire( &h_al->obj.lock ); + } + cl_spinlock_release( &h_al->obj.lock ); + + AL_EXIT( AL_DBG_CM ); +} + + +static void +__destroy_ucep( + IN ucep_t* const p_cep ) +{ + if( p_cep->pfn_destroy_cb ) + p_cep->pfn_destroy_cb( p_cep->destroy_context ); + cl_free( p_cep ); +} + + +ib_api_status_t +__create_ucep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN al_pfn_cep_cb_t pfn_cb, + IN void* __ptr64 context, + OUT net32_t* const p_cid ) +{ + ucep_t *p_cep; + DWORD bytes_ret; + ual_create_cep_ioctl_t ioctl; + + AL_ENTER( AL_DBG_CM ); + + p_cep = cl_zalloc( sizeof(ucep_t) ); + if( !p_cep ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("Failed to allocate ucep_t\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Initialize to two - one for the CEP, and one for the IOCTL. */ + p_cep->ref_cnt = 2; + + /* Store user parameters. */ + p_cep->pfn_cb = pfn_cb; + p_cep->destroy_context = context; + + /* Create a kernel CEP only if we don't already have a CID. */ + if( cid == AL_INVALID_CID ) + { + if( !DeviceIoControl( g_al_device, UAL_CREATE_CEP, &context, + sizeof(context), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + __destroy_ucep( p_cep ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CREATE_CEP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status != IB_SUCCESS ) + { + __destroy_ucep( p_cep ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("UAL_CREATE_CEP IOCTL returned %s\n", + ib_get_err_str( ioctl.status )) ); + return ioctl.status; + } + + p_cep->cid = ioctl.cid; + } + else + { + p_cep->cid = cid; + } + + /* Track the CEP before we issue any further IOCTLs on it. */ + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + cl_ptr_vector_set_min_size( &gp_cep_mgr->cep_vector, p_cep->cid + 1 ); + CL_ASSERT( !cl_ptr_vector_get( &gp_cep_mgr->cep_vector, p_cep->cid ) ); + cl_ptr_vector_set( &gp_cep_mgr->cep_vector, p_cep->cid, p_cep ); + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + + /* Now issue a poll request. This request is async. */ + if( DeviceIoControl( gp_cep_mgr->h_file, UAL_CEP_GET_EVENT, + &p_cep->cid, sizeof(p_cep->cid), + NULL, 0, NULL, &p_cep->ov ) || + GetLastError() != ERROR_IO_PENDING ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("Failed to issue CEP poll IOCTL.\n") ); + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + cl_ptr_vector_set( &gp_cep_mgr->cep_vector, p_cep->cid, NULL ); + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + + DeviceIoControl( g_al_device, UAL_DESTROY_CEP, &p_cep->cid, + sizeof(p_cep->cid), NULL, 0, &bytes_ret, NULL ); + + __destroy_ucep( p_cep ); + AL_EXIT( AL_DBG_CM ); + return IB_ERROR; + } + + p_cep->h_al = h_al; + + /* Track the CEP in its owning AL instance. */ + cl_spinlock_acquire( &h_al->obj.lock ); + cl_qlist_insert_tail( &h_al->cep_list, &p_cep->al_item ); + cl_spinlock_release( &h_al->obj.lock ); + + if( p_cid ) + *p_cid = p_cep->cid; + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_create_cep( + IN ib_al_handle_t h_al, + IN al_pfn_cep_cb_t pfn_cb, + IN void* __ptr64 context, + OUT net32_t* const p_cid ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_CM ); + + status = __create_ucep( h_al, AL_INVALID_CID, pfn_cb, context, p_cid ); + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +/* + * Note that destroy_cep is synchronous. It does however handle the case + * where a user calls it from a callback context. + */ +ib_api_status_t +al_destroy_cep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + ucep_t *p_cep; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + CL_ASSERT( h_al ); + + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + if( cid < cl_ptr_vector_get_size( &gp_cep_mgr->cep_vector ) ) + { + p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid ); + if( p_cep && p_cep->h_al == h_al ) + cl_ptr_vector_set( &gp_cep_mgr->cep_vector, cid, NULL ); + else + goto invalid; + } + else + { +invalid: + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + /* + * Destroy the kernel CEP right away. We must synchronize with issuing + * the next GET_EVENT IOCTL. + */ + DeviceIoControl( g_al_device, UAL_DESTROY_CEP, &p_cep->cid, + sizeof(p_cep->cid), NULL, 0, &bytes_ret, NULL ); + p_cep->cid = AL_INVALID_CID; + + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + + p_cep->pfn_destroy_cb = pfn_destroy_cb; + + /* + * Remove from the AL instance. Note that once removed, all + * callbacks for an item will stop. + */ + cl_spinlock_acquire( &h_al->obj.lock ); + cl_qlist_remove_item( &h_al->cep_list, &p_cep->al_item ); + cl_spinlock_release( &h_al->obj.lock ); + + if( !cl_atomic_dec( &p_cep->ref_cnt ) ) + { + /* We have no remaining refrences. */ + __destroy_ucep( p_cep ); + } + + AL_EXIT( AL_DBG_CM ); + return IB_SUCCESS; +} + + +ib_api_status_t +al_cep_listen( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_cep_listen_t* const p_listen_info ) +{ + ual_cep_listen_ioctl_t ioctl; + ib_api_status_t status; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_listen_info ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + ioctl.cid = cid; + ioctl.cep_listen = *p_listen_info; + if( p_listen_info->p_cmp_buf ) + { + if( p_listen_info->cmp_len > IB_REQ_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Listen compare data larger than REQ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.compare, p_listen_info->p_cmp_buf, + p_listen_info->cmp_len ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_LISTEN, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("ual_cep_listen IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_pre_req( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_req_t* const p_cm_req, + OUT ib_qp_mod_t* const p_init ) +{ + ual_cep_req_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_req ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + if( !p_init ) + { + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + + ioctl.in.cid = cid; + ioctl.in.cm_req = *p_cm_req; + ioctl.in.cm_req.h_qp = (ib_qp_handle_t)p_cm_req->h_qp->obj.hdl; + ioctl.in.paths[0] = *(p_cm_req->p_primary_path); + if( p_cm_req->p_alt_path ) + ioctl.in.paths[1] = *(p_cm_req->p_alt_path); + /* Copy private data, if any. */ + if( p_cm_req->p_req_pdata ) + { + if( p_cm_req->req_length > IB_REQ_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than REQ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.in.pdata, p_cm_req->p_req_pdata, + p_cm_req->req_length ); + } + + /* Copy compare data, if any. */ + if( p_cm_req->p_compare_buffer ) + { + if( p_cm_req->compare_length > IB_REQ_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("REQ compare data larger than REQ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.in.compare, p_cm_req->p_compare_buffer, + p_cm_req->compare_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_PRE_REQ, &ioctl, + sizeof(ioctl.in), &ioctl, sizeof(ioctl.out), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.out.status == IB_SUCCESS ) + *p_init = ioctl.out.init; + + AL_EXIT( AL_DBG_CM ); + return ioctl.out.status; +} + + +ib_api_status_t +al_cep_send_req( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_REQ, &cid, + sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_SEND_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_pre_rep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN void* __ptr64 context, + IN const ib_cm_rep_t* const p_cm_rep, + OUT ib_qp_mod_t* const p_init ) +{ + ucep_t *p_cep; + ual_cep_rep_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_rep ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + if( !p_init ) + { + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + + /* Store the context for the CEP. */ + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid ); + if( !p_cep ) + { + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + p_cep->destroy_context = context; + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + + ioctl.in.context = context; + ioctl.in.cid = cid; + ioctl.in.cm_rep = *p_cm_rep; + ioctl.in.cm_rep.h_qp = (ib_qp_handle_t)p_cm_rep->h_qp->obj.hdl; + /* Copy private data, if any. */ + if( p_cm_rep->p_rep_pdata ) + { + if( p_cm_rep->rep_length > IB_REP_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than REP private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.in.pdata, p_cm_rep->p_rep_pdata, + p_cm_rep->rep_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_PRE_REP, &ioctl, + sizeof(ioctl.in), &ioctl, sizeof(ioctl.out), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.out.status == IB_SUCCESS ) + *p_init = ioctl.out.init; + + AL_EXIT( AL_DBG_CM ); + return ioctl.out.status; +} + + +ib_api_status_t +al_cep_send_rep( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_REP, &cid, + sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_SEND_REP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_rtr_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rtr ) +{ + ual_cep_get_rtr_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_rtr ) + { + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_GET_RTR, &cid, + sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_GET_RTR IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status == IB_SUCCESS ) + *p_rtr = ioctl.rtr; + + AL_EXIT( AL_DBG_CM ); + return ioctl.status; +} + + +ib_api_status_t +al_cep_get_rts_attr( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT ib_qp_mod_t* const p_rts ) +{ + ual_cep_get_rts_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_rts ) + { + AL_EXIT( AL_DBG_ERROR ); + return IB_INVALID_PARAMETER; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_GET_RTS, &cid, + sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_GET_RTS IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status == IB_SUCCESS ) + *p_rts = ioctl.rts; + + AL_EXIT( AL_DBG_CM ); + return ioctl.status; +} + + +ib_api_status_t +al_cep_rtu( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* p_pdata OPTIONAL, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + ual_cep_rtu_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + /* Copy private data, if any. */ + if( p_pdata ) + { + if( pdata_len > IB_RTU_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than RTU private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.pdata, p_pdata, pdata_len ); + } + ioctl.pdata_len = pdata_len; + + if( !DeviceIoControl( g_al_device, UAL_CEP_RTU, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_RTU IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_rej( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN ib_rej_status_t rej_status, + IN const uint8_t* const p_ari, + IN uint8_t ari_len, + IN const uint8_t* const p_pdata, + IN uint8_t pdata_len ) +{ + ib_api_status_t status; + ual_cep_rej_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + ioctl.rej_status = rej_status; + if( p_ari ) + { + if( ari_len > IB_ARI_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than REJ ARI data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.ari, p_ari, ari_len ); + ioctl.ari_len = ari_len; + } + else + { + ioctl.ari_len = 0; + } + /* Copy private data, if any. */ + if( p_pdata) + { + if( pdata_len > IB_REJ_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than REJ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.pdata, p_pdata, pdata_len ); + ioctl.pdata_len = pdata_len; + } + else + { + ioctl.pdata_len = 0; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_REJ, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_mra( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_mra_t* const p_cm_mra ) +{ + ib_api_status_t status; + ual_cep_mra_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_mra ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + ioctl.cm_mra = *p_cm_mra; + /* Copy private data, if any. */ + if( p_cm_mra->p_mra_pdata ) + { + if( p_cm_mra->mra_length > IB_MRA_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than MRA private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.pdata, p_cm_mra->p_mra_pdata, p_cm_mra->mra_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_MRA, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_MRA IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_lap( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_lap_t* const p_cm_lap ) +{ + ib_api_status_t status; + ual_cep_lap_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_lap ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_lap->p_alt_path ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + ioctl.cm_lap = *p_cm_lap; + ioctl.cm_lap.h_qp = (ib_qp_handle_t)p_cm_lap->h_qp->obj.hdl; + ioctl.alt_path = *(p_cm_lap->p_alt_path); + /* Copy private data, if any. */ + if( p_cm_lap->p_lap_pdata ) + { + if( p_cm_lap->lap_length > IB_LAP_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than LAP private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.pdata, p_cm_lap->p_lap_pdata, p_cm_lap->lap_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_LAP, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_LAP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_pre_apr( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_apr_t* const p_cm_apr, + OUT ib_qp_mod_t* const p_apr ) +{ + ual_cep_apr_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_apr || !p_apr ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + ioctl.in.cid = cid; + ioctl.in.cm_apr = *p_cm_apr; + ioctl.in.cm_apr.h_qp = (ib_qp_handle_t)p_cm_apr->h_qp->obj.hdl; + if( p_cm_apr->p_info ) + { + if( p_cm_apr->info_length > IB_APR_INFO_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than APR info data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.in.apr_info, p_cm_apr->p_info, p_cm_apr->info_length ); + } + /* Copy private data, if any. */ + if( p_cm_apr->p_apr_pdata ) + { + if( p_cm_apr->apr_length > IB_REJ_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than APR private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.in.pdata, p_cm_apr->p_apr_pdata, p_cm_apr->apr_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_REJ, &ioctl.in, + sizeof(ioctl.in), &ioctl.out, sizeof(ioctl.out), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_PRE_REQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.out.status == IB_SUCCESS ) + *p_apr = ioctl.out.apr; + + AL_EXIT( AL_DBG_CM ); + return ioctl.out.status; +} + + +ib_api_status_t +al_cep_send_apr( + IN ib_al_handle_t h_al, + IN net32_t cid ) +{ + ib_api_status_t status; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_SEND_APR, &cid, + sizeof(cid), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_SEND_APR IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_dreq( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const uint8_t* const p_pdata OPTIONAL, + IN const uint8_t pdata_len ) +{ + ib_api_status_t status; + ual_cep_dreq_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + /* Copy private data, if any. */ + if( p_pdata ) + { + if( pdata_len > IB_DREQ_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than DREQ private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( ioctl.pdata, p_pdata, pdata_len ); + ioctl.pdata_len = pdata_len; + } + else + { + ioctl.pdata_len = 0; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_DREQ, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_DREQ IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_drep( + IN ib_al_handle_t h_al, + IN net32_t cid, + IN const ib_cm_drep_t* const p_cm_drep ) +{ + ib_api_status_t status; + ual_cep_drep_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_cm_drep ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + ioctl.cid = cid; + ioctl.cm_drep = *p_cm_drep; + /* Copy private data, if any. */ + if( p_cm_drep->p_drep_pdata ) + { + if( p_cm_drep->drep_length > IB_DREP_PDATA_SIZE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("private data larger than DREP private data.\n") ); + return IB_INVALID_SETTING; + } + + cl_memcpy( + ioctl.pdata, p_cm_drep->p_drep_pdata, p_cm_drep->drep_length ); + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_DREP, &ioctl, + sizeof(ioctl), &status, sizeof(status), &bytes_ret, NULL ) || + bytes_ret != sizeof(status) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_DREP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CM ); + return status; +} + + +ib_api_status_t +al_cep_get_timewait( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT uint64_t* const p_timewait_us ) +{ + ual_cep_get_timewait_ioctl_t ioctl; + DWORD bytes_ret; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_timewait_us ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !DeviceIoControl( g_al_device, UAL_CEP_GET_TIMEWAIT, &cid, sizeof(cid), + &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_DREP IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status == IB_SUCCESS ) + *p_timewait_us = ioctl.timewait_us; + + AL_EXIT( AL_DBG_CM ); + return ioctl.status; +} +// +// +//ib_api_status_t +//al_cep_migrate( +// IN ib_al_handle_t h_al, +// IN net32_t cid ); +// +// +//ib_api_status_t +//al_cep_established( +// IN ib_al_handle_t h_al, +// IN net32_t cid ); + + +ib_api_status_t +al_cep_poll( + IN ib_al_handle_t h_al, + IN net32_t cid, + OUT void* __ptr64 * p_context, + OUT net32_t* const p_new_cid, + OUT ib_mad_element_t** const pp_mad ) +{ + ucep_t *p_cep; + ib_api_status_t status; + ual_cep_poll_ioctl_t ioctl; + DWORD bytes_ret; + ib_mad_element_t *p_mad; + ib_grh_t *p_grh; + ib_mad_t *p_mad_buf; + + AL_ENTER( AL_DBG_CM ); + + if( !h_al ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_HANDLE; + } + + if( !p_new_cid || !pp_mad ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + if( cid > cl_ptr_vector_get_size( &gp_cep_mgr->cep_vector ) ) + p_cep = NULL; + else + p_cep = cl_ptr_vector_get( &gp_cep_mgr->cep_vector, cid ); + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + if( !p_cep ) + { + AL_EXIT( AL_DBG_CM ); + return IB_INVALID_PARAMETER; + } + + status = ib_get_mad( g_pool_key, MAD_BLOCK_SIZE, &p_mad ); + if( status != IB_SUCCESS ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("ib_get_mad returned %s.\n", ib_get_err_str( status )) ); + return status; + } + + p_mad_buf = p_mad->p_mad_buf; + p_grh = p_mad->p_grh; + + if( !DeviceIoControl( g_al_device, UAL_CEP_POLL, &cid, + sizeof(cid), &ioctl, sizeof(ioctl), &bytes_ret, NULL ) || + bytes_ret != sizeof(ioctl) ) + { + ib_put_mad( p_mad ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CEP_GET_RTS IOCTL failed with %d.\n", GetLastError()) ); + return IB_ERROR; + } + + if( ioctl.status == IB_SUCCESS ) + { + if( ioctl.new_cid != AL_INVALID_CID ) + { + /* Need to create a new CEP for user-mode. */ + status = __create_ucep( p_cep->h_al, ioctl.new_cid, + p_cep->pfn_cb, NULL, NULL ); + if( status != IB_SUCCESS ) + { + DeviceIoControl( g_al_device, UAL_DESTROY_CEP, + &ioctl.new_cid, sizeof(ioctl.new_cid), + NULL, 0, &bytes_ret, NULL ); + goto err; + } + } + + /* Copy the MAD payload as it's all that's used. */ + *p_mad = ioctl.element; + p_mad->p_grh = p_grh; + if( p_mad->grh_valid ) + cl_memcpy( p_mad->p_grh, &ioctl.grh, sizeof(ib_grh_t) ); + p_mad->p_mad_buf = p_mad_buf; + + cl_memcpy( p_mad->p_mad_buf, ioctl.mad_buf, MAD_BLOCK_SIZE ); + + *p_context = ioctl.context; + *p_new_cid = ioctl.new_cid; + *pp_mad = p_mad; + } + else + { +err: + ib_put_mad( p_mad ); + } + + AL_EXIT( AL_DBG_CM ); + return ioctl.status; +} + + +/* Callback to process CM events */ +void +cm_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ) +{ + ucep_t *p_cep; + BOOL ret; + + AL_ENTER( AL_DBG_CM ); + + /* The UAL_CEP_GET_EVENT IOCTL does not have any output data. */ + UNUSED_PARAM( ret_bytes ); + + p_cep = PARENT_STRUCT( p_ov, ucep_t, ov ); + + if( !error_code ) + { + p_cep->pfn_cb( p_cep->h_al, p_cep->cid ); + + /* Synchronize with destruction. */ + cl_spinlock_acquire( &gp_cep_mgr->obj.lock ); + ret = DeviceIoControl( gp_cep_mgr->h_file, UAL_CEP_GET_EVENT, + &p_cep->cid, sizeof(p_cep->cid), NULL, 0, + NULL, &p_cep->ov ); + cl_spinlock_release( &gp_cep_mgr->obj.lock ); + if( !ret && GetLastError() == ERROR_IO_PENDING ) + { + AL_EXIT( AL_DBG_CM ); + return; + } + else if( GetLastError() != ERROR_INVALID_PARAMETER ) + { + /* We can get ERROR_INVALID_PARAMETER if the CEP was destroyed. */ + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("DeviceIoControl for CEP callback request returned %d.\n", + GetLastError()) ); + } + } + else + { + AL_PRINT(TRACE_LEVEL_WARNING ,AL_DBG_CM , + ("UAL_CEP_GET_EVENT IOCTL returned %d.\n", error_code) ); + } + + /* + * We failed to issue the next request or the previous request was + * cancelled. Release the reference held by the previous IOCTL and exit. + */ + if( !cl_atomic_dec( &p_cep->ref_cnt ) ) + __destroy_ucep( p_cep ); + + AL_EXIT( AL_DBG_CM ); +} diff --git a/branches/Ndi/core/al/user/ual_cq.c b/branches/Ndi/core/al/user/ual_cq.c new file mode 100644 index 00000000..c21ddeaf --- /dev/null +++ b/branches/Ndi/core/al/user/ual_cq.c @@ -0,0 +1,562 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ual_support.h" +#include "al.h" +#include "al_ca.h" +#include "al_ci_ca.h" +#include "al_cq.h" + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_cq.tmh" +#endif + + +ib_api_status_t +ual_create_cq( + IN al_ci_ca_t* const p_ci_ca, + IN ib_cq_create_t* const p_cq_create, + IN OUT ib_cq_handle_t h_cq ) +{ + ual_create_cq_ioctl_t cq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_CQ ); + + /* Clear the IOCTL buffer */ + cl_memclr( &cq_ioctl, sizeof(cq_ioctl) ); + + /* Pre call to the UVP library */ + if( p_ci_ca->h_ci_ca && uvp_intf.pre_create_cq ) + { + status = uvp_intf.pre_create_cq( p_ci_ca->h_ci_ca, + &p_cq_create->size, &cq_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_CQ ); + return status; + } + } + + cq_ioctl.in.h_ca = p_ci_ca->obj.hdl; + cq_ioctl.in.size = p_cq_create->size; + cq_ioctl.in.h_wait_obj = p_cq_create->h_wait_obj; + cq_ioctl.in.context = h_cq; + cq_ioctl.in.ev_notify = (h_cq->pfn_event_cb != NULL) ? TRUE : FALSE; + + cl_status = do_al_dev_ioctl( UAL_CREATE_CQ, + &cq_ioctl.in, sizeof(cq_ioctl.in), &cq_ioctl.out, sizeof(cq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(cq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_CREATE_CQ IOCTL returned %s.\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = cq_ioctl.out.status; + } + + h_cq->obj.hdl = cq_ioctl.out.h_cq; + + /* Post uvp call */ + if( p_ci_ca->h_ci_ca && uvp_intf.post_create_cq ) + { + uvp_intf.post_create_cq( p_ci_ca->h_ci_ca, + status, cq_ioctl.out.size, &h_cq->h_ci_cq, + &cq_ioctl.out.umv_buf ); + + if( uvp_intf.peek_cq ) + { + h_cq->pfn_peek = uvp_intf.peek_cq; + h_cq->h_peek_cq = h_cq->h_ci_cq; + } + else + { + h_cq->pfn_peek = ual_peek_cq; + h_cq->h_peek_cq = h_cq; + } + + if( uvp_intf.poll_cq ) + { + h_cq->pfn_poll = uvp_intf.poll_cq; + h_cq->h_poll_cq = h_cq->h_ci_cq; + } + else + { + h_cq->pfn_poll = ual_poll_cq; + h_cq->h_poll_cq = h_cq; + } + + if( uvp_intf.rearm_cq ) + { + h_cq->pfn_rearm = uvp_intf.rearm_cq; + h_cq->h_rearm_cq = h_cq->h_ci_cq; + } + else + { + h_cq->pfn_rearm = ual_rearm_cq; + h_cq->h_rearm_cq = h_cq; + } + + if( uvp_intf.rearm_n_cq ) + { + h_cq->pfn_rearm_n = uvp_intf.rearm_n_cq; + h_cq->h_rearm_n_cq = h_cq->h_ci_cq; + } + else + { + h_cq->pfn_rearm_n = ual_rearm_n_cq; + h_cq->h_rearm_n_cq = h_cq; + } + } + else + { + h_cq->pfn_peek = ual_peek_cq; + h_cq->pfn_poll = ual_poll_cq; + h_cq->pfn_rearm = ual_rearm_cq; + h_cq->pfn_rearm_n = ual_rearm_n_cq; + h_cq->h_peek_cq = h_cq; + h_cq->h_poll_cq = h_cq; + h_cq->h_rearm_cq = h_cq; + h_cq->h_rearm_n_cq = h_cq; + } + + if( status != IB_SUCCESS ) + { + CL_ASSERT( !h_cq->h_ci_cq ); + /* + * If we failed to create the CQ in the kernel, clear the h_ci_cq + * pointer to prevent trying to clean it up later. This is needed + * to work-around the uVPD incorrectly setting the handle in the + * post_create_cq call even in the case of a failure. + */ + /* TODO: Fix the UVP. */ + h_cq->h_ci_cq = NULL; + } + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + +ib_api_status_t +ual_destroy_cq( + IN ib_cq_handle_t h_cq ) +{ + ual_destroy_cq_ioctl_t cq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_cq->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_CQ ); + + /* Clear the IOCTL buffer */ + cl_memclr( &cq_ioctl, sizeof(cq_ioctl) ); + + if( h_cq->h_ci_cq && uvp_intf.pre_destroy_cq ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_destroy_cq( h_cq->h_ci_cq ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_CQ ); + return status; + } + } + + cq_ioctl.in.h_cq = h_cq->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_DESTROY_CQ, + &cq_ioctl.in, sizeof(cq_ioctl.in), &cq_ioctl.out, sizeof(cq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(cq_ioctl.out ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DESTROY_CQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = cq_ioctl.out.status; + } + + if( h_cq->h_ci_cq && uvp_intf.post_destroy_cq ) + uvp_intf.post_destroy_cq( h_cq->h_ci_cq, status ); + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + +ib_api_status_t +ual_modify_cq( + IN ib_cq_handle_t h_cq, + IN OUT uint32_t* p_size ) +{ + ual_modify_cq_ioctl_t cq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_cq->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_CQ ); + + /* Clear the IOCTL buffer */ + cl_memclr( &cq_ioctl, sizeof(cq_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( h_cq->h_ci_cq && uvp_intf.pre_resize_cq ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_resize_cq( h_cq->h_ci_cq, + p_size, &cq_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_CQ ); + return status; + } + } + + cq_ioctl.in.h_cq = h_cq->obj.hdl; + cq_ioctl.in.size = *p_size; + + cl_status = do_al_dev_ioctl( UAL_MODIFY_CQ, + &cq_ioctl.in, sizeof(cq_ioctl.in), &cq_ioctl.out, sizeof(cq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(cq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_MODIFY_CQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = cq_ioctl.out.status; + if( status == IB_SUCCESS ) + *p_size = cq_ioctl.out.size; + } + + /* Post uvp call */ + if( h_cq->h_ci_cq && uvp_intf.post_resize_cq ) + { + uvp_intf.post_resize_cq( h_cq->h_ci_cq, + status, cq_ioctl.out.size, &cq_ioctl.out.umv_buf ); + } + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + +ib_api_status_t +ual_query_cq( + IN ib_cq_handle_t h_cq, + OUT uint32_t* p_size ) +{ + ual_query_cq_ioctl_t cq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_cq->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_CQ ); + + /* Clear the IOCTL buffer */ + cl_memclr( &cq_ioctl, sizeof(cq_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid cq handle */ + if( h_cq->h_ci_cq && uvp_intf.pre_query_cq ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_query_cq( + h_cq->h_ci_cq, p_size, &cq_ioctl.in.umv_buf ); + if( status == IB_VERBS_PROCESSING_DONE ) + { + AL_EXIT( AL_DBG_CQ ); + return IB_SUCCESS; + } + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_CQ ); + return status; + } + } + + cq_ioctl.in.h_cq = h_cq->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_QUERY_CQ, + &cq_ioctl.in, sizeof(cq_ioctl.in), &cq_ioctl.out, sizeof(cq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(cq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_QUERY_CQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = cq_ioctl.out.status; + if( status == IB_SUCCESS ) + *p_size = cq_ioctl.out.size; + } + + /* Post uvp call */ + if( h_cq->h_ci_cq && uvp_intf.post_query_cq ) + { + uvp_intf.post_query_cq( h_cq->h_ci_cq, + status, cq_ioctl.out.size, &cq_ioctl.out.umv_buf ); + } + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + +ib_api_status_t +ual_peek_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_n_cqes ) +{ + ual_peek_cq_ioctl_t cq_ioctl; + cl_status_t cl_status; + uintn_t bytes_ret; + + AL_ENTER( AL_DBG_CQ ); + + /* Clear the IOCTL buffer */ + cl_memclr(&cq_ioctl, sizeof(cq_ioctl)); + + cq_ioctl.in.h_cq = h_cq->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_PEEK_CQ, + &cq_ioctl.in, sizeof(cq_ioctl.in), &cq_ioctl.out, sizeof(cq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(cq_ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_PEEK_CQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + + if( cq_ioctl.out.status == IB_SUCCESS ) + *p_n_cqes = cq_ioctl.out.n_cqes; + + AL_EXIT( AL_DBG_CQ ); + return cq_ioctl.out.status; +} + + +ib_api_status_t +ual_poll_cq( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ) +{ + uintn_t bytes_ret; + ual_poll_cq_ioctl_t *p_cq_ioctl; + size_t ioctl_buf_sz; + uint32_t num_wc; + ib_wc_t* p_wc_start; + ib_wc_t* p_next_wc; + cl_status_t cl_status; + ib_api_status_t status; + + AL_ENTER( AL_DBG_CQ ); + + /* + * Since the work request is a link list and we need to pass this + * to the kernel as a array of work requests. So first walk through + * the list and find out how much memory we need to allocate. + */ + p_next_wc = *pp_free_wclist; + num_wc = 0; + while( p_next_wc ) + { + num_wc++; + + /* Check for overflow */ + if( !num_wc ) + break; + + p_next_wc = p_next_wc->p_next; + } + if( !num_wc ) + { + AL_EXIT( AL_DBG_CQ ); + return IB_INVALID_PARAMETER; + } + + ioctl_buf_sz = sizeof(p_cq_ioctl->out); + ioctl_buf_sz += sizeof(ib_wc_t) * (num_wc - 1); + + p_cq_ioctl = (ual_poll_cq_ioctl_t*)cl_zalloc( ioctl_buf_sz ); + + /* Now populate the ioctl buffer and send down the ioctl */ + p_cq_ioctl->in.h_cq = h_cq->obj.hdl; + p_cq_ioctl->in.num_wc = num_wc; + + cl_status = do_al_dev_ioctl( UAL_POLL_CQ, + p_cq_ioctl, sizeof(p_cq_ioctl->in), p_cq_ioctl, ioctl_buf_sz, + &bytes_ret ); + + /* Make sure we got the right amount of data returned. */ + if( cl_status != CL_SUCCESS || + bytes_ret < (sizeof(p_cq_ioctl->out) - sizeof(ib_wc_t)) || + (cl_status == CL_SUCCESS && bytes_ret < (sizeof(p_cq_ioctl->out) - + sizeof(ib_wc_t) + (sizeof(ib_wc_t) * p_cq_ioctl->out.num_wc))) ) + { + cl_free( p_cq_ioctl ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_POLL_CQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + + status = p_cq_ioctl->out.status; + if( status == IB_SUCCESS ) + { + CL_ASSERT( p_cq_ioctl->out.num_wc ); + /* Fix up the free and done lists. */ + p_next_wc = *pp_free_wclist; + num_wc = 0; + p_wc_start = p_next_wc; + do + { + p_wc_start = p_next_wc; + CL_ASSERT( p_wc_start ); + /* Save next pointer. */ + p_next_wc = p_wc_start->p_next; + /* Copy WC contents back to user. */ + cl_memcpy( + p_wc_start, &p_cq_ioctl->out.wc[num_wc], sizeof(ib_wc_t) ); + /* Restore next pointer. */ + p_wc_start->p_next = p_next_wc; + } while( ++num_wc < p_cq_ioctl->out.num_wc ); + + p_wc_start->p_next = NULL; + *pp_done_wclist = *pp_free_wclist; + *pp_free_wclist = p_next_wc; + } + + cl_free( p_cq_ioctl ); + + AL_EXIT( AL_DBG_CQ ); + return status; +} + + +ib_api_status_t +ual_rearm_cq( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ) +{ + ual_rearm_cq_ioctl_t cq_ioctl; + cl_status_t cl_status; + uintn_t bytes_ret; + + AL_ENTER( AL_DBG_CQ ); + + /* Clear the IOCTL buffer */ + cl_memclr(&cq_ioctl, sizeof(cq_ioctl)); + + cq_ioctl.in.h_cq = h_cq->obj.hdl; + cq_ioctl.in.solicited = solicited; + + cl_status = do_al_dev_ioctl( UAL_REARM_CQ, + &cq_ioctl.in, sizeof(cq_ioctl.in), &cq_ioctl.out, sizeof(cq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(cq_ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_REARM_CQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CQ ); + return cq_ioctl.out.status; +} + + +ib_api_status_t +ual_rearm_n_cq( + IN const ib_cq_handle_t h_cq, + IN const uint32_t n_cqes ) +{ + ual_rearm_n_cq_ioctl_t cq_ioctl; + cl_status_t cl_status; + uintn_t bytes_ret; + + AL_ENTER( AL_DBG_CQ ); + + /* Clear the IOCTL buffer */ + cl_memclr(&cq_ioctl, sizeof(cq_ioctl)); + + cq_ioctl.in.h_cq = h_cq->obj.hdl; + cq_ioctl.in.n_cqes = n_cqes; + + cl_status = do_al_dev_ioctl( UAL_REARM_N_CQ, + &cq_ioctl.in, sizeof(cq_ioctl.in), &cq_ioctl.out, sizeof(cq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(cq_ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_REARM_N_CQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_CQ ); + return cq_ioctl.out.status; +} diff --git a/branches/Ndi/core/al/user/ual_dm.c b/branches/Ndi/core/al/user/ual_dm.c new file mode 100644 index 00000000..37ca67e7 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_dm.c @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "ual_support.h" +#include "al.h" + diff --git a/branches/Ndi/core/al/user/ual_dm.h b/branches/Ndi/core/al/user/ual_dm.h new file mode 100644 index 00000000..7bb023b4 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_dm.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__IB_UAL_DM_H__) +#define __IB_UAL_DM_H__ + +#endif /* __IB_UAL_DM_H__ */ diff --git a/branches/Ndi/core/al/user/ual_mad.c b/branches/Ndi/core/al/user/ual_mad.c new file mode 100644 index 00000000..a59df935 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mad.c @@ -0,0 +1,530 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "al.h" +#include "al_av.h" +#include "al_common.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_mad.tmh" +#endif + +#include "al_dev.h" +#include "al_qp.h" +#include "al_pd.h" +#include "ual_mad.h" +#include "ual_support.h" + + +static void +__destroying_ual_mad_svc( + IN struct _al_obj *p_obj ) +{ + ib_mad_svc_handle_t h_mad_svc; + + AL_ENTER(AL_DBG_MAD_SVC); + CL_ASSERT( p_obj ); + h_mad_svc = PARENT_STRUCT( p_obj, al_mad_svc_t, obj ); + + /* Deregister the MAD service. */ + ual_dereg_mad_svc( h_mad_svc ); + + AL_EXIT(AL_DBG_MAD_SVC); +} + + +ib_api_status_t +ual_reg_mad_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_mad_svc_t* const p_mad_svc, + OUT ib_mad_svc_handle_t* const ph_mad_svc ) +{ + ib_api_status_t status; + cl_status_t cl_status; + uintn_t bytes_ret; + ib_mad_svc_handle_t h_mad_svc; + al_qp_alias_t *p_qp_alias; + ual_reg_mad_svc_ioctl_t ioctl_buf; + + AL_ENTER( AL_DBG_MAD ); + + CL_ASSERT( h_qp && p_mad_svc && ph_mad_svc ); + + h_mad_svc = cl_zalloc( sizeof( al_mad_svc_t) ); + if( !h_mad_svc ) + { + AL_EXIT( AL_DBG_MAD ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the MAD service. */ + construct_al_obj( &h_mad_svc->obj, AL_OBJ_TYPE_H_MAD_SVC ); + + p_qp_alias = PARENT_STRUCT( h_qp, al_qp_alias_t, qp ); + h_mad_svc->obj.context = p_mad_svc->mad_svc_context; + h_mad_svc->pfn_user_recv_cb = p_mad_svc->pfn_mad_recv_cb; + h_mad_svc->pfn_user_send_cb = p_mad_svc->pfn_mad_send_cb; + + /* Initialize the MAD service. */ + status = init_al_obj( &h_mad_svc->obj, p_mad_svc->mad_svc_context, + TRUE, NULL, __destroying_ual_mad_svc, free_mad_svc ); + if( status != IB_SUCCESS ) + { + cl_free( h_mad_svc ); + AL_EXIT( AL_DBG_MAD ); + return status; + } + attach_al_obj( &h_qp->obj, &h_mad_svc->obj ); + + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + ioctl_buf.in.h_qp = h_qp->obj.hdl; + ioctl_buf.in.mad_svc = *p_mad_svc; + + /* Replace the context in mad_svc */ + ioctl_buf.in.mad_svc.mad_svc_context = h_mad_svc; + + cl_status = do_al_dev_ioctl( UAL_REG_MAD_SVC, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_REG_MAD_SVC IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ioctl_buf.out.status; + if(status == IB_SUCCESS) + h_mad_svc->obj.hdl = ioctl_buf.out.h_mad_svc; + + } + + if( status != IB_SUCCESS ) + { + h_mad_svc->obj.pfn_destroy( &h_mad_svc->obj, NULL ); + AL_EXIT( AL_DBG_MAD ); + return status; + } + + *ph_mad_svc = h_mad_svc; + + AL_EXIT( AL_DBG_MAD ); + return status; +} + + + +ib_api_status_t +ual_dereg_mad_svc( + IN const ib_mad_svc_handle_t h_mad_svc ) +{ + ual_dereg_mad_svc_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + + AL_ENTER(AL_DBG_MAD); + + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + ioctl_buf.in.h_mad_svc = h_mad_svc->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_DEREG_MAD_SVC, &ioctl_buf, + sizeof(ioctl_buf.in), &ioctl_buf, sizeof(ioctl_buf.out), &bytes_ret ); + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + status = IB_ERROR; + else + status = ioctl_buf.out.status; + + if( status != IB_SUCCESS ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("Error deregistering MAD svc: %s\n", ib_get_err_str( status ) ) ); + } + + AL_EXIT(AL_DBG_MAD); + return status; +} + + + +ib_api_status_t +ual_send_one_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element ) +{ + ual_send_mad_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + al_mad_element_t* p_al_element; + + AL_ENTER( AL_DBG_MAD ); + + CL_ASSERT( p_mad_element ); + + p_al_element = PARENT_STRUCT( + p_mad_element, al_mad_element_t, element ); + + p_mad_element->status = IB_WCS_UNKNOWN; + + ioctl_buf.in.h_mad_svc = h_mad_svc->obj.hdl; + + /* Update the pool key to the proxy's handle. */ + ioctl_buf.in.pool_key = p_al_element->pool_key->obj.hdl; + + /* + * Convert user-mode AV handles to kernel AV handles. Note that + * the completion handler will convert the handles back before + * returning the MAD to the user. + */ + if( p_mad_element->h_av ) + ioctl_buf.in.h_av = p_mad_element->h_av->obj.hdl; + else + ioctl_buf.in.h_av = AL_INVALID_HANDLE; + + ioctl_buf.in.p_mad_element = p_mad_element; + ioctl_buf.in.size = p_mad_element->size; + ioctl_buf.in.ph_proxy = &p_al_element->h_proxy_element; + + cl_status = do_al_dev_ioctl( UAL_MAD_SEND, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_MAD_SEND IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ioctl_buf.out.status; + } + + AL_EXIT( AL_DBG_MAD ); + return status; +} + + + +ib_api_status_t +ual_spl_qp_mad_send( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element_list, + OUT ib_mad_element_t **pp_mad_failure OPTIONAL ) +{ + ib_api_status_t status; + ib_mad_element_t* p_next_element; + + AL_ENTER( AL_DBG_MAD ); + + /* Count up the mads in the list. */ + p_next_element = p_mad_element_list; + do + { + status = ual_send_one_mad( h_mad_svc, p_next_element ); + if( status != IB_SUCCESS ) + break; + + p_next_element = p_next_element->p_next; + + } while( p_next_element ); + + if( status != IB_SUCCESS && pp_mad_failure ) + *pp_mad_failure = p_next_element; + + AL_EXIT( AL_DBG_MAD ); + return status; +} + + + +ib_api_status_t +ual_spl_qp_cancel_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element ) +{ + ual_cancel_mad_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status; + al_mad_element_t* p_al_mad; + + AL_ENTER( AL_DBG_MAD ); + + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + ioctl_buf.in.h_mad_svc = h_mad_svc->obj.hdl; + /* + * Pass the corresponding kernel mode mad_element as KAL knows + * only about kernel mads. This gets set when we send the mad. + */ + p_al_mad = PARENT_STRUCT(p_mad_element, al_mad_element_t, element); + ioctl_buf.in.h_proxy_element = p_al_mad->h_proxy_element; + + cl_status = do_al_dev_ioctl( UAL_CANCEL_MAD, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_CANCEL_MAD IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_MAD ); + return ioctl_buf.out.status; +} + + +ib_api_status_t +ual_create_reg_mad_pool( + IN const ib_pool_handle_t h_pool, + IN const ib_pd_handle_t h_pd, + IN OUT ib_pool_key_t p_pool_key ) +{ + ual_reg_mad_pool_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status = IB_ERROR; + + AL_ENTER( AL_DBG_MAD ); + + /*TODO: Can h_pool be removed as a param? */ + UNUSED_PARAM( h_pool ); + + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + ioctl_buf.in.h_pd = h_pd->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_REG_MAD_POOL, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_REG_MAD_POOL IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ioctl_buf.out.status; + if(status == IB_SUCCESS ) + p_pool_key->obj.hdl = ioctl_buf.out.pool_key; + } + + if( ioctl_buf.out.status == IB_SUCCESS ) + p_pool_key->obj.hdl = ioctl_buf.out.pool_key; + + AL_EXIT( AL_DBG_MAD ); + return status; +} + + +void +ual_dereg_destroy_mad_pool( + IN const ib_pool_key_t pool_key ) +{ + ual_dereg_mad_pool_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_MAD ); + + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + ioctl_buf.in.pool_key = pool_key->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_DEREG_MAD_POOL, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DEREG_MAD_POOL IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status))); + } + else if( ioctl_buf.out.status != IB_SUCCESS ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("Error deregistering MAD pool: %s\n", + ib_get_err_str( ioctl_buf.out.status )) ); + } + AL_EXIT( AL_DBG_MAD ); +} + + +/* + * We've receive a MAD. We need to get a user-mode MAD of the + * correct size, then send it down to retrieve the received MAD + * from the kernel. + */ +ib_api_status_t +ual_get_recv_mad( + IN ib_pool_key_t p_pool_key, + IN const uint64_t h_mad, + IN const size_t buf_size, + OUT ib_mad_element_t** const pp_mad_element ) +{ + ual_mad_recv_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + ib_mad_element_t *p_mad = NULL; + ib_mad_t *p_mad_buf = NULL; + ib_grh_t *p_grh = NULL; + + AL_ENTER( AL_DBG_MAD ); + + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + /* + * Get a MAD large enough to receive the MAD. If we can't get a + * MAD, we still perform the IOCTL so that the kernel will return + * the MAD to its pool, resulting in a dropped MAD. + */ + status = ib_get_mad( p_pool_key, buf_size, &p_mad ); + + /* + * Note that we issue the IOCTL regardless of failure of ib_get_mad. + * This is done in order to release the kernel-mode MAD. + */ + ioctl_buf.in.p_user_mad = p_mad; + + if( p_mad ) + { + /* Save off the pointers since the proxy overwrites the element. */ + p_mad_buf = p_mad->p_mad_buf; + p_grh = p_mad->p_grh; + + ioctl_buf.in.p_mad_buf = p_mad_buf; + ioctl_buf.in.p_grh = p_grh; + } + ioctl_buf.in.h_mad = h_mad; + + cl_status = do_al_dev_ioctl( UAL_MAD_RECV_COMP, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_MAD_RECV_COMP IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ioctl_buf.out.status; + } + + if( p_mad ) + { + /* We need to reset MAD data pointers. */ + p_mad->p_mad_buf = p_mad_buf; + p_mad->p_grh = p_grh; + if( status != IB_SUCCESS ) + { + ib_put_mad( p_mad ); + p_mad = NULL; + } + } + + *pp_mad_element = p_mad; + + AL_EXIT( AL_DBG_MAD ); + return status; +} + + +ib_api_status_t +ual_local_mad( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN ib_mad_t* const p_mad_in, + IN ib_mad_t* p_mad_out ) +{ + ual_local_mad_ioctl_t local_mad_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status = CL_SUCCESS; + ib_api_status_t status = IB_SUCCESS; + + AL_ENTER( AL_DBG_CA ); + + local_mad_ioctl.in.h_ca = h_ca->obj.p_ci_ca->obj.hdl; + local_mad_ioctl.in.port_num = port_num; + cl_memcpy( local_mad_ioctl.in.mad_in, p_mad_in, + sizeof(local_mad_ioctl.in.mad_in) ); + + cl_status = do_al_dev_ioctl( UAL_LOCAL_MAD, + &local_mad_ioctl.in, sizeof(local_mad_ioctl.in), + &local_mad_ioctl.out, sizeof(local_mad_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(local_mad_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_LOCAL_MAD IOCTL returned %s\n", CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = local_mad_ioctl.out.status; + cl_memcpy( p_mad_out, local_mad_ioctl.out.mad_out, + sizeof(local_mad_ioctl.out.mad_out) ); + } + + AL_EXIT( AL_DBG_CA ); + return status; +} + + diff --git a/branches/Ndi/core/al/user/ual_mad.h b/branches/Ndi/core/al/user/ual_mad.h new file mode 100644 index 00000000..a1956469 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mad.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__IB_UAL_MAD_H__) +#define __IB_UAL_MAD_H__ + +#include +#include "al_mad_pool.h" + + +/* + * Create a pool key for the internal mad pool. This pool key will never + * be registered on anything. + */ +ib_api_status_t +ual_reg_global_mad_pool( + IN const ib_pool_handle_t h_pool, + OUT ib_pool_key_t* const pp_pool_key ); + +ib_api_status_t +ual_reg_mad_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_mad_svc_t* const p_mad_svc, + OUT ib_mad_svc_handle_t* const ph_mad_svc ); + +ib_api_status_t +ual_dereg_mad_svc( + IN const ib_mad_svc_handle_t h_mad_svc ); + + +ib_api_status_t +ual_spl_qp_mad_send( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element_list, + OUT ib_mad_element_t **pp_mad_failure OPTIONAL ); + +ib_api_status_t +ual_spl_qp_cancel_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element ); + +void +ual_dereg_destroy_mad_pool( + IN const ib_pool_key_t pool_key ); + +ib_api_status_t +ual_create_reg_mad_pool( + IN const ib_pool_handle_t h_pool, + IN const ib_pd_handle_t h_pd, + IN ib_pool_key_t p_pool_key ); + + +ib_api_status_t +ual_get_recv_mad( + IN ib_pool_key_t p_pool_key, + IN const uint64_t h_mad, + IN const size_t buf_size, + OUT ib_mad_element_t** const pp_mad_element ); + +ib_api_status_t +ual_local_mad( +IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const void* const p_mad_in, + IN void* p_mad_out ); + +#endif /* __IB_UAL_MAD_H__ */ diff --git a/branches/Ndi/core/al/user/ual_mad_pool.c b/branches/Ndi/core/al/user/ual_mad_pool.c new file mode 100644 index 00000000..71277e61 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mad_pool.c @@ -0,0 +1,139 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "al_mad_pool.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_mad_pool.tmh" +#endif + + + + +/* + * Function prototypes. + */ +static void +__ual_free_pool_key( + IN al_obj_t* p_obj ); + + +/* + * Register a MAD pool with a protection domain. + */ +ib_api_status_t +ual_reg_global_mad_pool( + IN const ib_pool_handle_t h_pool, + OUT ib_pool_key_t* const pp_pool_key ) +{ + al_pool_key_t* p_pool_key; + ib_api_status_t status; + + AL_ENTER(AL_DBG_MAD_POOL); + + if( AL_OBJ_INVALID_HANDLE( h_pool, AL_OBJ_TYPE_H_MAD_POOL ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + if( !pp_pool_key ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Allocate a pool key structure. */ + p_pool_key = cl_zalloc( sizeof( al_pool_key_t ) ); + if( !p_pool_key ) + return IB_INSUFFICIENT_MEMORY; + + /* Initialize the pool key. */ + construct_al_obj( &p_pool_key->obj, AL_OBJ_TYPE_H_POOL_KEY ); + p_pool_key->type = AL_KEY_ALIAS; + p_pool_key->h_pool = h_pool; + p_pool_key->h_pd = NULL; + + /* Initialize the pool key object. */ + status = init_al_obj( &p_pool_key->obj, p_pool_key, TRUE, + NULL, NULL, __ual_free_pool_key ); + if( status != IB_SUCCESS ) + { + __ual_free_pool_key( &p_pool_key->obj ); + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("init_al_obj failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + + status = attach_al_obj( &h_pool->obj, &p_pool_key->obj ); + if( status != IB_SUCCESS ) + { + p_pool_key->obj.pfn_destroy( &p_pool_key->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + p_pool_key->h_al = p_pool_key->obj.h_al; + + /* Return the pool key. */ + *pp_pool_key = (ib_pool_key_t)p_pool_key; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &p_pool_key->obj ); + + AL_EXIT(AL_DBG_MAD_POOL); + return IB_SUCCESS; +} + + + +/* + * Free a pool key. + */ +static void +__ual_free_pool_key( + IN al_obj_t* p_obj ) +{ + al_pool_key_t* p_pool_key; + + CL_ASSERT( p_obj ); + p_pool_key = PARENT_STRUCT( p_obj, al_pool_key_t, obj ); + + CL_ASSERT( !p_pool_key->mad_cnt ); + destroy_al_obj( &p_pool_key->obj ); + cl_free( p_pool_key ); +} + diff --git a/branches/Ndi/core/al/user/ual_mcast.c b/branches/Ndi/core/al/user/ual_mcast.c new file mode 100644 index 00000000..6f05f3db --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mcast.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include "al_qp.h" +#include "ual_support.h" +#include "ual_mcast.h" + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_mcast.tmh" +#endif + +ib_api_status_t +ual_attach_mcast( + IN ib_mcast_handle_t h_mcast ) +{ + ual_attach_mcast_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status = IB_ERROR; + ib_qp_handle_t h_qp; + uvp_interface_t uvp_intf; + + AL_ENTER( AL_DBG_MCAST ); + + h_qp = PARENT_STRUCT( h_mcast->obj.p_parent_obj, + al_dgrm_qp_t, obj ); + uvp_intf = h_qp->obj.p_ci_ca->verbs.user_verbs; + + /* Clear the ioctl_buf */ + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + /* Pre call to the UVP library */ + if( h_qp->h_ci_qp && uvp_intf.pre_attach_mcast ) + { + status = uvp_intf.pre_attach_mcast( h_qp->h_ci_qp, + &h_mcast->member_rec.mgid, h_mcast->member_rec.mlid, + &ioctl_buf.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_PD ); + return status; + } + } + + ioctl_buf.in.h_qp = h_qp->obj.hdl; + ioctl_buf.in.mgid = h_mcast->member_rec.mgid; + ioctl_buf.in.mlid = h_mcast->member_rec.mlid; + + cl_status = do_al_dev_ioctl( UAL_ATTACH_MCAST, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_ATTACH_MCAST IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ioctl_buf.out.status; + if( status == IB_SUCCESS ){ + h_mcast->obj.hdl = ioctl_buf.out.h_attach; + h_mcast->h_ci_mcast = (ib_mcast_handle_t)ioctl_buf.out.h_attach; + } + } + + /* Post uvp call */ + if( h_qp->h_ci_qp && uvp_intf.post_attach_mcast ) + { + uvp_intf.post_attach_mcast( h_qp->h_ci_qp, + status, &h_mcast->h_ci_mcast, &ioctl_buf.out.umv_buf); + } + + AL_EXIT( AL_DBG_MCAST ); + return status; +} + + +ib_api_status_t +ual_detach_mcast( + IN ib_mcast_handle_t h_mcast ) +{ + ual_detach_mcast_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + ib_qp_handle_t h_qp; + uvp_interface_t uvp_intf; + + AL_ENTER( AL_DBG_MCAST ); + + h_qp = PARENT_STRUCT( h_mcast->obj.p_parent_obj, + al_dgrm_qp_t, obj ); + uvp_intf = h_qp->obj.p_ci_ca->verbs.user_verbs; + + /* Clear the ioctl_buf */ + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + /* Pre call to the UVP library */ + if( h_qp->h_ci_qp && uvp_intf.pre_detach_mcast ) + { + status = uvp_intf.pre_detach_mcast( h_mcast->h_ci_mcast ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_MCAST ); + return status; + } + } + + ioctl_buf.in.h_attach = h_mcast->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_DETACH_MCAST, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DETACH_MCAST IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ioctl_buf.out.status; + } + + /* Post uvp call */ + if( h_mcast->h_ci_mcast && uvp_intf.post_detach_mcast ) + uvp_intf.post_detach_mcast( h_mcast->h_ci_mcast, status ); + + AL_EXIT( AL_DBG_MCAST ); + return status; +} diff --git a/branches/Ndi/core/al/user/ual_mcast.h b/branches/Ndi/core/al/user/ual_mcast.h new file mode 100644 index 00000000..2c249cb1 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mcast.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__UAL_MCAST_H__) +#define __UAL_MCAST_H__ + +ib_api_status_t +ual_attach_mcast( + IN ib_mcast_handle_t h_mcast ); + +ib_api_status_t +ual_detach_mcast( + IN ib_mcast_handle_t h_mcast ); + +#endif /* __UAL_MCAST_H__ */ diff --git a/branches/Ndi/core/al/user/ual_mgr.c b/branches/Ndi/core/al/user/ual_mgr.c new file mode 100644 index 00000000..09365d05 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mgr.c @@ -0,0 +1,1293 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ual_support.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_mgr.tmh" +#endif + +#include "al_mgr.h" +#include "al_init.h" +#include "al_res_mgr.h" +#include "al_proxy_ioctl.h" +#include "al.h" +#include "al_ci_ca.h" +#include "al_pnp.h" +#include "al_ioc_pnp.h" +#include "al_cq.h" +#include "ual_ca.h" +#include "ual_qp.h" +#include "ual_mad.h" +#include "ib_common.h" +#include "al_cm_cep.h" + + +/* Global AL manager handle is defined in al_mgr_shared.c */ +extern ib_al_handle_t gh_al; +extern al_mgr_t* gp_al_mgr; +extern ib_pool_handle_t gh_mad_pool; + + +atomic32_t g_open_cnt = 0; + +/* Define the thread names to handle various notifications */ +#define CM_THREAD_NAME "CM_Thread" +#define COMP_THREAD_NAME "Comp_Thread" +#define MISC_THREAD_NAME "Misc_Thread" + +static DWORD WINAPI +__cb_thread_routine( + IN void *context ); + +//static void +//__process_cm_cb( +// IN cm_cb_ioctl_info_t* p_cm_cb_info); + +static void +__process_misc_cb( + IN misc_cb_ioctl_info_t* p_misc_cb_info ); + + +static void +__cleanup_ual_mgr( + IN al_obj_t *p_obj ) +{ + AL_ENTER(AL_DBG_MGR); + + UNUSED_PARAM( p_obj ); + + /* Set the callback thread state to exit. */ + gp_al_mgr->ual_mgr.exit_thread = TRUE; + + /* Closing the file handles cancels any pending I/O requests. */ + //CloseHandle( gp_al_mgr->ual_mgr.h_cm_file ); + CloseHandle( gp_al_mgr->ual_mgr.h_cq_file ); + CloseHandle( gp_al_mgr->ual_mgr.h_misc_file ); + CloseHandle( g_al_device ); + g_al_device = INVALID_HANDLE_VALUE; +} + + +static void +__free_ual_mgr( + IN al_obj_t *p_obj ) +{ + size_t i; + HANDLE h_thread; + + UNUSED_PARAM( p_obj ); + + /* + * We need to destroy the AL object before the spinlock, since + * destroying the AL object will try to acquire the spinlock. + */ + destroy_al_obj( &gp_al_mgr->obj ); + + /* Verify that the object list is empty. */ + print_al_objs( NULL ); + + if( gp_al_mgr->ual_mgr.h_cb_port ) + { + /* Post a notification to the completion port to make threads exit. */ + for( i = 0; + i < cl_ptr_vector_get_size( &gp_al_mgr->ual_mgr.cb_threads ); + i++ ) + { + if( !PostQueuedCompletionStatus( gp_al_mgr->ual_mgr.h_cb_port, + 0, 0, NULL ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("PostQueuedCompletionStatus returned %d\n", + GetLastError()) ); + } + } + + while( cl_ptr_vector_get_size( &gp_al_mgr->ual_mgr.cb_threads ) ) + { + h_thread = cl_ptr_vector_get( &gp_al_mgr->ual_mgr.cb_threads, 0 ); + WaitForSingleObject( h_thread, INFINITE ); + CloseHandle( h_thread ); + cl_ptr_vector_remove( &gp_al_mgr->ual_mgr.cb_threads, 0 ); + } + + CloseHandle( gp_al_mgr->ual_mgr.h_cb_port ); + } + + cl_ptr_vector_destroy( &gp_al_mgr->ual_mgr.cb_threads ); + cl_spinlock_destroy( &gp_al_mgr->lock ); + + cl_free( gp_al_mgr ); + gp_al_mgr = NULL; +} + + +HANDLE +ual_create_async_file( + IN uint32_t type ) +{ + cl_status_t cl_status; + ual_bind_file_ioctl_t ioctl; + uintn_t bytes_ret; + + AL_ENTER( AL_DBG_MGR ); + + /* Create a file object on which to issue all SA requests. */ + ioctl.h_file = CreateFileW( L"\\\\.\\ibal", + GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, + NULL, OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL ); + if( ioctl.h_file == INVALID_HANDLE_VALUE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("CreateFile returned %d.\n", GetLastError()) ); + return INVALID_HANDLE_VALUE; + } + + /* Bind this file object to the completion port. */ + if( !CreateIoCompletionPort( + ioctl.h_file, gp_al_mgr->ual_mgr.h_cb_port, type, 0 ) ) + { + CloseHandle( ioctl.h_file ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("CreateIoCompletionPort for file of type %d returned %d.\n", + type, GetLastError()) ); + return INVALID_HANDLE_VALUE; + } + + /* + * Send an IOCTL down on the main file handle to bind this file + * handle with our proxy context. + */ + cl_status = do_al_dev_ioctl( + type, &ioctl, sizeof(ioctl), NULL, 0, &bytes_ret ); + if( cl_status != CL_SUCCESS ) + { + CloseHandle( ioctl.h_file ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Bind IOCTL for type %d returned %s.\n", + type,CL_STATUS_MSG(cl_status)) ); + return INVALID_HANDLE_VALUE; + } + + AL_EXIT( AL_DBG_MGR ); + return ioctl.h_file; +} + + +ib_api_status_t +ual_create_cb_threads( void ) +{ + cl_status_t cl_status; + uint32_t i; + HANDLE h_thread; + + AL_ENTER( AL_DBG_MGR ); + + cl_status = cl_ptr_vector_init( + &gp_al_mgr->ual_mgr.cb_threads, cl_proc_count(), 0 ); + if( cl_status != CL_SUCCESS ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("cl_ptr_vector_init returned %s.\n", CL_STATUS_MSG( cl_status )) ); + return IB_ERROR; + } + + for( i = 0; i < cl_proc_count(); i++ ) + { + h_thread = CreateThread( NULL, 0, __cb_thread_routine, NULL, 0, NULL ); + if( !h_thread ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("CreateThread returned %d.\n", GetLastError()) ); + return IB_ERROR; + } + + /* We already sized the vector, so insertion should work. */ + cl_status = cl_ptr_vector_insert( &gp_al_mgr->ual_mgr.cb_threads, + h_thread, NULL ); + CL_ASSERT( cl_status == CL_SUCCESS ); + } + + AL_EXIT( AL_DBG_MGR ); + return IB_SUCCESS; +} + + +/* + * Create the ual manager for the process + */ +ib_api_status_t +create_al_mgr() +{ + ib_api_status_t ib_status; + cl_status_t cl_status; + uintn_t bytes_ret; + ULONG ver; + + AL_ENTER(AL_DBG_MGR); + + CL_ASSERT( !gp_al_mgr ); + + /* First open the kernel device. */ + CL_ASSERT( g_al_device == INVALID_HANDLE_VALUE ); + g_al_device = CreateFileW( L"\\\\.\\ibal", + GENERIC_READ | GENERIC_WRITE, FILE_SHARE_READ | FILE_SHARE_WRITE, + NULL, OPEN_EXISTING, 0, NULL ); + if( g_al_device == INVALID_HANDLE_VALUE ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("CreateFile returned %d.\n", GetLastError()) ); + return IB_ERROR; + } + + + ver = AL_IOCTL_VERSION; + + cl_status = + do_al_dev_ioctl( UAL_BIND, &ver, sizeof(ver), NULL, 0, &bytes_ret ); + if( cl_status != CL_SUCCESS ) + return IB_ERROR; + + gp_al_mgr = cl_zalloc( sizeof( al_mgr_t ) ); + if( !gp_al_mgr ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("Failed to cl_zalloc ual_mgr_t.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the AL manager. */ + cl_event_construct( &gp_al_mgr->ual_mgr.sync_event ); + cl_ptr_vector_construct( &gp_al_mgr->ual_mgr.cb_threads ); + cl_qlist_init( &gp_al_mgr->al_obj_list ); + cl_qlist_init( &gp_al_mgr->ci_ca_list ); + cl_spinlock_construct( &gp_al_mgr->lock ); + gp_al_mgr->ual_mgr.h_cb_port = NULL; + + /* Init the al object in the ual manager */ + construct_al_obj(&gp_al_mgr->obj, AL_OBJ_TYPE_AL_MGR); + ib_status = init_al_obj( &gp_al_mgr->obj, gp_al_mgr, FALSE, + NULL, __cleanup_ual_mgr, __free_ual_mgr ); + if( ib_status != IB_SUCCESS ) + { + __free_ual_mgr( &gp_al_mgr->obj ); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("init_al_obj failed, status = 0x%x.\n", ib_status) ); + return ib_status; + } + + /* Allocate the I/O completion port for async operations. */ + gp_al_mgr->ual_mgr.h_cb_port = CreateIoCompletionPort( + INVALID_HANDLE_VALUE, NULL, 0, 0 ); + if( !gp_al_mgr->ual_mgr.h_cb_port ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Failed to create I/O completion port.\n") ); + return IB_ERROR; + } + + /* Create the threads to process completion callbacks. */ + ib_status = ual_create_cb_threads(); + if( ib_status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("ual_create_cb_threads failed.\n") ); + return ib_status; + } + + /* Create CM callback file handle. */ + //gp_al_mgr->ual_mgr.h_cm_file = ual_create_async_file( UAL_BIND_CM ); + //if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE ) + //{ + // gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + // AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + // ("ual_create_async_file for UAL_BIND_CM returned %d.\n", + // GetLastError()) ); + // return IB_ERROR; + //} + + /* Create the CQ completion callback file handle. */ + gp_al_mgr->ual_mgr.h_cq_file = ual_create_async_file( UAL_BIND_CQ ); + if( gp_al_mgr->ual_mgr.h_cq_file == INVALID_HANDLE_VALUE ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("ual_create_async_file for UAL_BIND_CQ returned %d.\n", + GetLastError()) ); + return IB_ERROR; + } + + /* Create the miscelaneous callback file handle. */ + gp_al_mgr->ual_mgr.h_misc_file = ual_create_async_file( UAL_BIND_MISC ); + if( gp_al_mgr->ual_mgr.h_misc_file == INVALID_HANDLE_VALUE ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("ual_create_async_file for UAL_BIND_CQ returned %d.\n", + GetLastError()) ); + return IB_ERROR; + } + + cl_status = cl_spinlock_init( &gp_al_mgr->lock ); + if( cl_status != CL_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + return ib_convert_cl_status( cl_status ); + } + + /* With PnP support, open the AL instance before the threads + * get a chance to process async events + */ + + /* Open an implicit al instance for UAL's internal usage. This call will + * automatically create the gh_al. + */ + gh_al = NULL; + if ((ib_status = do_open_al(&gp_al_mgr->ual_mgr.h_al)) != IB_SUCCESS) + { + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("do_open_al() failed, status = 0x%x.\n", ib_status) ); + return ( ib_status ); + } + + /* Create the global AL MAD pool. */ + ib_status = ib_create_mad_pool( gh_al, 0, 0, 64, &gh_mad_pool ); + if( ib_status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("ib_create_mad_pool failed with %s.\n", ib_get_err_str(ib_status)) ); + return ib_status; + } + + /* + * Create a global pool key for internal MADs - they are never + * registered on any CA. + */ + ib_status = ual_reg_global_mad_pool( gh_mad_pool, &g_pool_key ); + if( ib_status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("ual_reg_global_mad_pool failed with %s.\n", ib_get_err_str(ib_status)) ); + return ib_status; + } + + /* Create the pnp manager before the thread initialize. This makes + * sure that the pnp manager is ready to process pnp callbacks as + * soon as the callback threads start running + */ + ib_status = create_pnp( &gp_al_mgr->obj ); + if( ib_status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("al_pnp_create failed with %s.\n", ib_get_err_str(ib_status)) ); + return ib_status; + } + + /* Initialize the AL resource manager. */ + ib_status = create_res_mgr( &gp_al_mgr->obj ); + if( ib_status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("create_res_mgr failed with %s.\n", ib_get_err_str(ib_status)) ); + return ib_status; + } + + /* Initialize the AL SA request manager. */ + ib_status = create_sa_req_mgr( &gp_al_mgr->obj ); + if( ib_status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("create_sa_req_mgr failed with %s.\n", ib_get_err_str(ib_status)) ); + return ib_status; + } + + /* Initialize CM */ + ib_status = create_cep_mgr( &gp_al_mgr->obj ); + if( ib_status != IB_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy( &gp_al_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("create_cm_mgr failed, status = 0x%x.\n", ib_status) ); + return ib_status; + } + + cl_status = cl_event_init( &gp_al_mgr->ual_mgr.sync_event, FALSE ); + if( cl_status != CL_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + return ib_convert_cl_status( cl_status ); + } + + /* Everything is ready now. Issue the first callback requests. */ + if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_misc_file, UAL_GET_MISC_CB_INFO, + NULL, 0, + &gp_al_mgr->ual_mgr.misc_cb_info, sizeof(misc_cb_ioctl_info_t), + NULL, &gp_al_mgr->ual_mgr.misc_ov ) ) + { + if( GetLastError() != ERROR_IO_PENDING ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("DeviceIoControl for misc callback request returned %d.\n", + GetLastError()) ); + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + return IB_ERROR; + } + } + + //if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO, + // NULL, 0, + // &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t), + // NULL, &gp_al_mgr->ual_mgr.cm_ov ) ) + //{ + // if( GetLastError() != ERROR_IO_PENDING ) + // { + // AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + // ("DeviceIoControl for CM callback request returned %d.\n", + // GetLastError()) ); + // gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + // return IB_ERROR; + // } + //} + + if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cq_file, UAL_GET_COMP_CB_INFO, + NULL, 0, + &gp_al_mgr->ual_mgr.comp_cb_info, sizeof(comp_cb_ioctl_info_t), + NULL, &gp_al_mgr->ual_mgr.cq_ov ) ) + { + if( GetLastError() != ERROR_IO_PENDING ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("DeviceIoControl for CM callback request returned %d.\n", + GetLastError()) ); + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + return IB_ERROR; + } + } + + /* + * Wait until the associated kernel PnP registration completes. This + * indicates that all known CAs have been reported to user-space + * and are being processed by the PnP manager. + */ +#ifdef _DEBUG_ + cl_status = cl_event_wait_on( &gp_al_mgr->ual_mgr.sync_event, + EVENT_NO_TIMEOUT, TRUE ); + CL_ASSERT ( cl_status == CL_SUCCESS); +#else + cl_status = cl_event_wait_on( &gp_al_mgr->ual_mgr.sync_event, + EVENT_NO_TIMEOUT, TRUE ); +#endif + + if( cl_status != CL_SUCCESS ) + { + gp_al_mgr->obj.pfn_destroy(&gp_al_mgr->obj, NULL); + return ib_convert_cl_status( cl_status ); + } + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &gp_al_mgr->obj ); + + AL_EXIT(AL_DBG_MGR); + return IB_SUCCESS; +} + + + +/* + * UAL thread start routines. + */ +// +// +///* Thread to process the asynchronous CM notifications */ +//void +//cm_cb( +// IN DWORD error_code, +// IN DWORD ret_bytes, +// IN LPOVERLAPPED p_ov ) +//{ +// AL_ENTER( AL_DBG_CM ); +// +// UNUSED_PARAM( p_ov ); +// +// if( !error_code && ret_bytes ) +// { +// /* Check the record type and adjust the pointers */ +// /* TBD */ +// __process_cm_cb( &gp_al_mgr->ual_mgr.cm_cb_info ); +// } +// +// if( error_code != ERROR_OPERATION_ABORTED ) +// { +// if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cm_file, UAL_GET_CM_CB_INFO, +// NULL, 0, +// &gp_al_mgr->ual_mgr.cm_cb_info, sizeof(cm_cb_ioctl_info_t), +// NULL, &gp_al_mgr->ual_mgr.cm_ov ) ) +// { +// if( GetLastError() != ERROR_IO_PENDING ) +// { +// AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, +// ("DeviceIoControl for CM callback request returned %d.\n", +// GetLastError()) ); +// } +// } +// } +// +// AL_EXIT( AL_DBG_CM ); +//} + + + +//static void +//__process_cm_cb( +// IN cm_cb_ioctl_info_t* p_cm_cb_info) +//{ +// switch( p_cm_cb_info->rec_type) +// { +// case CM_REQ_REC: +// { +// struct _cm_req_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_req_cb_ioctl_rec; +// +// if (p_ioctl_rec->req_rec.qp_type == IB_QPT_UNRELIABLE_DGRM) +// { +// p_ioctl_rec->req_rec.p_req_pdata = +// (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.sidr_req_pdata; +// } +// else +// { +// p_ioctl_rec->req_rec.p_req_pdata = +// (uint8_t *)&p_ioctl_rec->cm_req_pdata_rec.req_pdata; +// } +// ual_cm_req_cb( &p_ioctl_rec->req_rec, &p_ioctl_rec->qp_mod_rtr, +// &p_ioctl_rec->qp_mod_rts, p_ioctl_rec->timeout_ms ); +// break; +// } +// case CM_REP_REC: +// { +// struct _cm_rep_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_rep_cb_ioctl_rec; +// +// if (p_ioctl_rec->rep_rec.qp_type == IB_QPT_UNRELIABLE_DGRM) +// { +// p_ioctl_rec->rep_rec.p_rep_pdata = +// (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.sidr_rep_pdata; +// } +// else +// { +// p_ioctl_rec->rep_rec.p_rep_pdata = +// (uint8_t *)&p_ioctl_rec->cm_rep_pdata_rec.rep_pdata; +// } +// ual_cm_rep_cb( &p_ioctl_rec->rep_rec, &p_ioctl_rec->qp_mod_rtr, +// &p_ioctl_rec->qp_mod_rts ); +// break; +// } +// case CM_RTU_REC: +// { +// struct _cm_rtu_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_rtu_cb_ioctl_rec; +// +// p_ioctl_rec->rtu_rec.p_rtu_pdata = (uint8_t *)&p_ioctl_rec->rtu_pdata; +// ual_cm_rtu_cb( &p_ioctl_rec->rtu_rec ); +// break; +// } +// case CM_REJ_REC: +// { +// struct _cm_rej_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_rej_cb_ioctl_rec; +// +// p_ioctl_rec->rej_rec.p_rej_pdata = +// (uint8_t*)&p_ioctl_rec->rej_pdata; +// p_ioctl_rec->rej_rec.p_ari = +// (uint8_t*)&p_ioctl_rec->ari_pdata; +// ual_cm_rej_cb( &p_ioctl_rec->rej_rec ); +// break; +// } +// case CM_MRA_REC: +// { +// struct _cm_mra_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec; +// +// p_ioctl_rec->mra_rec.p_mra_pdata = +// (uint8_t*)&p_ioctl_rec->mra_pdata; +// ual_cm_mra_cb( &p_cm_cb_info->ioctl_rec.cm_mra_cb_ioctl_rec.mra_rec ); +// break; +// } +// case CM_LAP_REC: +// { +// struct _cm_lap_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_lap_cb_ioctl_rec; +// +// p_ioctl_rec->lap_rec.p_lap_pdata = +// (uint8_t *)&p_ioctl_rec->lap_pdata; +// ual_cm_lap_cb( &p_ioctl_rec->lap_rec ); +// break; +// } +// case CM_APR_REC: +// { +// struct _cm_apr_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_apr_cb_ioctl_rec; +// +// p_ioctl_rec->apr_rec.p_apr_pdata = +// (uint8_t*)&p_ioctl_rec->apr_pdata; +// p_ioctl_rec->apr_rec.p_info = +// (uint8_t*)&p_ioctl_rec->apr_info; +// ual_cm_apr_cb( &p_ioctl_rec->apr_rec ); +// break; +// } +// case CM_DREQ_REC: +// { +// struct _cm_dreq_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_dreq_cb_ioctl_rec; +// +// p_ioctl_rec->dreq_rec.p_dreq_pdata = +// (uint8_t*)&p_ioctl_rec->dreq_pdata; +// ual_cm_dreq_cb( &p_ioctl_rec->dreq_rec ); +// break; +// } +// case CM_DREP_REC: +// { +// struct _cm_drep_cb_ioctl_rec *p_ioctl_rec = +// &p_cm_cb_info->ioctl_rec.cm_drep_cb_ioctl_rec; +// +// p_ioctl_rec->drep_rec.p_drep_pdata = +// (uint8_t*)&p_ioctl_rec->drep_pdata; +// ual_cm_drep_cb( &p_ioctl_rec->drep_rec ); +// break; +// } +// default: +// /* Unknown record type - just return */ +// break; +// } +//} +// +// +// +static void +__process_comp_cb( + IN comp_cb_ioctl_info_t* p_comp_cb_info ) +{ + ib_cq_handle_t h_cq; + CL_ASSERT( p_comp_cb_info->cq_context ); + h_cq = (ib_cq_handle_t)(p_comp_cb_info->cq_context); + + if( ref_al_obj( &h_cq->obj ) > 1 ) + { + CL_ASSERT( h_cq->pfn_user_comp_cb ); + h_cq->pfn_user_comp_cb( h_cq, (void*)h_cq->obj.context ); + } + deref_al_obj( &h_cq->obj ); +} + + + +/* Thread to process the asynchronous completion notifications */ +void +cq_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ) +{ + AL_ENTER( AL_DBG_CQ ); + + UNUSED_PARAM( p_ov ); + + if( !error_code && ret_bytes ) + { + /* Check the record type and adjust the pointers */ + /* TBD */ + __process_comp_cb( &gp_al_mgr->ual_mgr.comp_cb_info ); + } + + if( error_code != ERROR_OPERATION_ABORTED ) + { + if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_cq_file, UAL_GET_COMP_CB_INFO, + NULL, 0, + &gp_al_mgr->ual_mgr.comp_cb_info, sizeof(comp_cb_ioctl_info_t), + NULL, &gp_al_mgr->ual_mgr.cq_ov ) ) + { + if( GetLastError() != ERROR_IO_PENDING ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("DeviceIoControl for CM callback request returned %d.\n", + GetLastError()) ); + } + } + } + + AL_EXIT( AL_DBG_CQ ); +} + + + +/* Thread to process miscellaneous asynchronous events */ +void +misc_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ) +{ + AL_ENTER( AL_DBG_MGR ); + + UNUSED_PARAM( p_ov ); + + if( !error_code && ret_bytes ) + { + /* Check the record type and adjust the pointers */ + /* TBD */ + __process_misc_cb( &gp_al_mgr->ual_mgr.misc_cb_info ); + } + + if( error_code != ERROR_OPERATION_ABORTED ) + { + /* Issue the next request. */ + if( !DeviceIoControl( gp_al_mgr->ual_mgr.h_misc_file, UAL_GET_MISC_CB_INFO, + NULL, 0, + &gp_al_mgr->ual_mgr.misc_cb_info, sizeof(misc_cb_ioctl_info_t), + NULL, &gp_al_mgr->ual_mgr.misc_ov ) ) + { + if( GetLastError() != ERROR_IO_PENDING ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("DeviceIoControl for misc callback request returned %d.\n", + GetLastError()) ); + } + } + } + + AL_EXIT( AL_DBG_MGR ); +} + + + +void +__process_misc_cb( + IN misc_cb_ioctl_info_t* p_misc_cb_info ) +{ + switch( p_misc_cb_info->rec_type ) + { + case CA_ERROR_REC: + case QP_ERROR_REC: + case SRQ_ERROR_REC: + case CQ_ERROR_REC: + { + /* Initiate user-mode asynchronous event processing. */ + ci_ca_async_event( &p_misc_cb_info->ioctl_rec.event_rec ); + break; + } + case MCAST_REC: + { + ib_mcast_rec_t mcast_rec; + cl_memcpy((void *)&mcast_rec, + (void*)&p_misc_cb_info->ioctl_rec.mcast_cb_ioctl_rec, + sizeof(ib_mcast_rec_t)); + mcast_rec.p_member_rec = + &p_misc_cb_info->ioctl_rec.mcast_cb_ioctl_rec.member_rec; + /******* Call the cb function for app callback *****/ + break; + } + case MAD_SEND_REC: + { + /* We got a send completion. */ + ib_mad_element_t *p_element; + + ib_mad_svc_handle_t h_mad_svc = (ib_mad_svc_handle_t) + p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.mad_svc_context; + + /* Copy the data to the user's element. */ + p_element = p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.p_um_mad; + /* Only update the status if a receive wasn't failed. */ + if( p_element->status != IB_WCS_TIMEOUT_RETRY_ERR ) + { + p_element->status = + p_misc_cb_info->ioctl_rec.mad_send_cb_ioctl_rec.wc_status; + } + p_element->p_next = NULL; + + /* Now the user mad_elements should have the right data + * Make the client callback + */ + h_mad_svc->pfn_user_send_cb( h_mad_svc, + (void*)h_mad_svc->obj.context, p_element ); + break; + } + case MAD_RECV_REC: + { + /* + * We've receive a MAD. We need to get a user-mode MAD of the + * correct size, then send it down to retrieve the received MAD. + */ + ual_mad_recv_ioctl_t ioctl_buf; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + ib_mad_svc_handle_t h_mad_svc; + ib_mad_element_t *p_mad = NULL; + ib_mad_element_t *p_send_mad; + ib_mad_t *p_mad_buf = NULL; + ib_grh_t *p_grh = NULL; + + h_mad_svc = (ib_mad_svc_handle_t) + p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.mad_svc_context; + + p_send_mad = + p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.p_send_mad; + + cl_memclr( &ioctl_buf, sizeof(ioctl_buf) ); + + /* + * Get a MAD large enough to receive the MAD. If we can't get a + * MAD, we still perform the IOCTL so that the kernel will return + * the MAD to its pool, resulting in a dropped MAD. + */ + status = ib_get_mad( h_mad_svc->obj.p_ci_ca->pool_key, + p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.elem_size, + &p_mad ); + + /* + * Note that we set any associated send MAD's status here + * in case of failure. + */ + if( status == IB_SUCCESS ) + al_handoff_mad( (ib_al_handle_t)h_mad_svc->obj.h_al, p_mad ); + else if( p_send_mad ) + p_send_mad->status = IB_WCS_TIMEOUT_RETRY_ERR; + + ioctl_buf.in.p_user_mad = p_mad; + + if( p_mad ) + { + /* Save off the pointers since the proxy overwrites the element. */ + p_mad_buf = p_mad->p_mad_buf; + p_grh = p_mad->p_grh; + + ioctl_buf.in.p_mad_buf = p_mad_buf; + ioctl_buf.in.p_grh = p_grh; + } + ioctl_buf.in.h_mad = p_misc_cb_info->ioctl_rec.mad_recv_cb_ioctl_rec.h_mad; + + cl_status = do_al_dev_ioctl( UAL_MAD_RECV_COMP, + &ioctl_buf.in, sizeof(ioctl_buf.in), + &ioctl_buf.out, sizeof(ioctl_buf.out), + &bytes_ret ); + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(ioctl_buf.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_MAD_RECV_COMP IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = ioctl_buf.out.status; + } + if( p_mad ) + { + if( status == IB_SUCCESS ) + { + /* We need to reset MAD data pointers. */ + p_mad->p_mad_buf = p_mad_buf; + p_mad->p_grh = p_grh; + /* Restore the client's send context1 */ + if( p_send_mad ) + p_mad->send_context1 = (void* __ptr64)p_send_mad->context1; + + h_mad_svc->pfn_user_recv_cb( h_mad_svc, + (void*)h_mad_svc->obj.context, p_mad ); + } + else + { + ib_put_mad( p_mad ); + } + } + break; + } + case SVC_REG_REC: + { + break; + } + case QUERY_REC: + { + break; + } + case PNP_REC: + { + ib_pnp_event_t pnp_event; + ib_net64_t ca_guid; + al_ci_ca_t *p_ci_ca; + ual_ca_attr_info_ioctl_t attr_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_ca_attr_t *p_old_ca_attr; + ib_api_status_t status; + + pnp_event = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_event; + ca_guid = p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.ca_guid; + + switch( pnp_event ) + { + case IB_PNP_CA_ADD: + /* Create a new CI CA. */ + create_ci_ca( gh_al, &gp_al_mgr->obj, + p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.ca_guid ); + break; + + case IB_PNP_CA_REMOVE: + /* Destroy the CI CA. */ + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + p_ci_ca = find_ci_ca( ca_guid ); + if( !p_ci_ca ) + { + cl_spinlock_release( &gp_al_mgr->obj.lock ); + break; + } + ref_al_obj( &p_ci_ca->obj ); + cl_spinlock_release( &gp_al_mgr->obj.lock ); + + p_ci_ca->obj.pfn_destroy( &p_ci_ca->obj, NULL ); + break; + + case IB_PNP_PORT_ADD: + case IB_PNP_PORT_REMOVE: + /* Should never get these. */ + break; + + case IB_PNP_REG_COMPLETE: + /* + * Signal that the kernel PnP registration is done, indicating + * that the current system state has been reported to the user. + */ + cl_event_signal( &gp_al_mgr->ual_mgr.sync_event ); + break; + + default: + /* Process the PnP event - most likely a port change event. */ + cl_spinlock_acquire( &gp_al_mgr->obj.lock ); + p_ci_ca = find_ci_ca( ca_guid ); + if( !p_ci_ca ) + { + cl_spinlock_release( &gp_al_mgr->obj.lock ); + break; + } + ref_al_obj( &p_ci_ca->obj ); + cl_spinlock_release( &gp_al_mgr->obj.lock ); + + status = ci_ca_update_attr( p_ci_ca, &p_old_ca_attr ); + if( status != IB_SUCCESS) { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("update CA attributes returned %#x.\n", status) ); + + /* Dereference the CA now. */ + deref_al_obj( &p_ci_ca->obj ); + break; + } + if ( p_old_ca_attr ) + cl_free( p_old_ca_attr ); + + /* + * We need to fetch the cached CA attributes from the proxy. We + * always send down the IOCTL to free the cached attributes. + */ + //p_ca_attr = (ib_ca_attr_t*)cl_zalloc( + // p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.size ); + attr_ioctl.in.p_ca_attr = NULL; + + /* Get the cached attributes from the kernel. */ + attr_ioctl.in.h_ca_attr = + p_misc_cb_info->ioctl_rec.pnp_cb_ioctl_rec.pnp_info.ca.h_ca_attr; + cl_status = do_al_dev_ioctl( UAL_GET_CA_ATTR_INFO, + &attr_ioctl, sizeof(attr_ioctl.in), + &attr_ioctl, sizeof(attr_ioctl.out), + &bytes_ret ); + + ///* Notify PnP manager of the changes if we have them. */ + //if( p_ca_attr ) + //{ + // if( cl_status == CL_SUCCESS && + // attr_ioctl.out.status == IB_SUCCESS ) + // { + // pnp_ca_change( p_ci_ca, p_ca_attr ); + // } + // else + // { + // } + + // cl_free( p_ca_attr ); + //} + /* Dereference the CA now. */ + deref_al_obj( &p_ci_ca->obj ); + break; + } + + break; /* For PNP_EVENT_REC */ + } + case SUB_REC: + { + /******* TBD *******/ + /* No adjustment needed */ + break; + } + case REPORT_REC: + { + ib_report_rec_t report_rec; + cl_memcpy((void *)&report_rec, + (void*)&p_misc_cb_info->ioctl_rec.report_cb_ioctl_rec, + sizeof(ib_report_rec_t)); + report_rec.p_notice = &p_misc_cb_info->ioctl_rec.report_cb_ioctl_rec.notice; + /******* Call the cb function for app callback *****/ + break; + } + default: + CL_ASSERT (0); + break; + } +} + + + +/* + * Create a new instance of the access layer. + */ +ib_api_status_t +ib_open_al( + OUT ib_al_handle_t* const ph_al ) +{ + ib_api_status_t status; + + cl_mutex_acquire( &g_open_close_mutex ); + status = do_open_al( ph_al ); + if( status == IB_SUCCESS ) + { + /* + * Bump the open count. Note that we only do this for external + * calls, not the internal ib_open_al call. + */ + cl_atomic_inc( &g_open_cnt ); + } + cl_mutex_release( &g_open_close_mutex ); + return status; +} + + +ib_api_status_t +ib_close_al( + IN const ib_al_handle_t h_al ) +{ + ib_api_status_t status; + + cl_mutex_acquire( &g_open_close_mutex ); + status = do_close_al( h_al ); + if( status == IB_SUCCESS && !cl_atomic_dec( &g_open_cnt ) ) + al_cleanup(); + cl_mutex_release( &g_open_close_mutex ); + return status; +} + + +ib_api_status_t +do_open_al( + OUT ib_al_handle_t* const ph_al ) +{ + ib_al_handle_t h_al; + ib_api_status_t status; + + AL_ENTER(AL_DBG_MGR); + + if( !ph_al ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* + * Initialize AL if needed. + * This should only occur on the first ib_open_al call. + */ + if( !gp_al_mgr ) + { + status = al_initialize(); + if( status != IB_SUCCESS ) + { + al_cleanup(); + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("ual_init failed, status = %s\n", ib_get_err_str(status) ) ); + return status; + } + /* + * Wait for 50ms before returning. This ensures the pnp events are + * delivered before any special qp services are invoked. + */ + cl_thread_suspend( 50 ); + } + + /* Allocate an access layer instance. */ + h_al = (ib_al_handle_t)cl_zalloc( sizeof( ib_al_t ) ); + if( !h_al ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("cl_malloc failed\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the instance. */ + construct_al_obj( &h_al->obj, AL_OBJ_TYPE_H_AL ); + cl_spinlock_construct( &h_al->mad_lock ); + cl_qlist_init( &h_al->mad_list ); + cl_qlist_init( &h_al->key_list ); + cl_qlist_init( &h_al->query_list ); + cl_qlist_init( &h_al->cep_list ); + + if( cl_spinlock_init( &h_al->mad_lock ) != CL_SUCCESS ) + { + free_al( &h_al->obj ); + AL_EXIT( AL_DBG_ERROR ); + return IB_ERROR; + } + + /* Initialize the base object. */ + status = init_al_obj( &h_al->obj, NULL, FALSE, + destroying_al, NULL, free_al ); + if( status != IB_SUCCESS ) + { + free_al( &h_al->obj ); + AL_EXIT(AL_DBG_MGR); + return status; + } + attach_al_obj( &gp_al_mgr->obj, &h_al->obj ); + + /* + * Self reference the AL instance so that all attached objects + * insert themselve in the instance's handle manager automatically. + */ + h_al->obj.h_al = h_al; + + /* + * We only maintain a single AL instance in the kernel. It is created + * automatically when the device is opened. + */ + if( !gh_al ) + { + /* Save a copy of the implicit al handle in a global */ + gh_al = h_al; + } + + /* Return UAL's handle to caller */ + *ph_al = (ib_al_handle_t)h_al; + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &h_al->obj ); + + AL_EXIT(AL_DBG_MGR); + return IB_SUCCESS; +} + + +static DWORD WINAPI +__cb_thread_routine( + IN void *context ) +{ + DWORD ret_bytes, err; + OVERLAPPED *p_ov; + ULONG_PTR key; + BOOL ret; + + AL_ENTER( AL_DBG_MGR ); + + UNUSED_PARAM( context ); + + do + { + ret = GetQueuedCompletionStatus( gp_al_mgr->ual_mgr.h_cb_port, + &ret_bytes, &key, &p_ov, INFINITE ); + + if( ret && !p_ov ) + break; + + if( !ret ) + err = GetLastError(); + else + err = 0; + + CL_ASSERT( p_ov ); + switch( key ) + { + case UAL_BIND_CM: + //DebugBreak(); + /* CM callback. */ + cm_cb( err, ret_bytes, p_ov ); + break; + + case UAL_BIND_CQ: + /* CQ completion callback. */ + cq_cb( err, ret_bytes, p_ov ); + break; + + case UAL_BIND_MISC: + /* Misc callback. */ + misc_cb( err, ret_bytes, p_ov ); + break; + + case UAL_BIND_PNP: + /* PnP callback. */ + pnp_cb( err, ret_bytes, p_ov ); + break; + + case UAL_BIND_SA: + /* SA callback. */ + sa_req_cb( err, ret_bytes, p_ov ); + break; + + case UAL_BIND_DESTROY: + if( p_ov ) + deref_al_obj( (al_obj_t*)p_ov->Pointer ); + break; + + default: + CL_ASSERT( key == UAL_BIND_CM || key == UAL_BIND_CQ || + key == UAL_BIND_MISC || key == UAL_BIND_PNP || + key == UAL_BIND_SA || key == UAL_BIND_DESTROY ); + break; + } + } while( !ret || p_ov ); + + AL_EXIT( AL_DBG_MGR ); + ExitThread( 0 ); +} diff --git a/branches/Ndi/core/al/user/ual_mgr.h b/branches/Ndi/core/al/user/ual_mgr.h new file mode 100644 index 00000000..dd818670 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mgr.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__UAL_MGR_H__) +#define __UAL_MGR_H__ + +#include "ual_support.h" +#include "al_ci_ca.h" +#include +#include + + +typedef struct _ual_mgr +{ + ib_al_handle_t h_al; /* UAL's internal implicit open_al */ + + cl_event_t sync_event; + + boolean_t exit_thread; + + /* Pointer vector of threads used to handle async IOCTL completions. */ + cl_ptr_vector_t cb_threads; + /* Completion port handle that cb threads use to get I/O completions. */ + HANDLE h_cb_port; + + /* File to handle CM related notifications */ + //HANDLE h_cm_file; + //cm_cb_ioctl_info_t cm_cb_info; + //OVERLAPPED cm_ov; + + /* Thread to handle work request completions */ + HANDLE h_cq_file; + comp_cb_ioctl_info_t comp_cb_info; + OVERLAPPED cq_ov; + + /* Thread to handle Miscellaneous notifications such as: + * cq/qp error + * cq/qp destroyed notification + * MAD notifications, SM, Query + * mcast_join completion + * pnp event + * completion of subscription + * Fabric event + * ca destroyed + * cancellation of subscription, notify req + */ + HANDLE h_misc_file; + misc_cb_ioctl_info_t misc_cb_info; + OVERLAPPED misc_ov; + +} ual_mgr_t; + + +/* Global mad pool key for internal MADs. */ +ib_pool_key_t g_pool_key; + + +ib_api_status_t +do_open_al( + OUT ib_al_handle_t* const ph_al ); + +ib_api_status_t +do_close_al( + IN const ib_al_handle_t h_al ); + + +/* + * Proto types for asynchronous event processing threads + */ +void +ual_cm_thread_start( + IN void *context); + +void +ual_comp_thread_start( + IN void *context); + +void +ual_misc_thread_start( + IN void *context); + + +/* Prototype for creating a file and binding it to the internal thread pool */ +HANDLE +ual_create_async_file( + IN uint32_t type ); + +void +sa_req_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ); + +void +pnp_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ); + +void +cm_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ); + +void +cq_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ); + +void +misc_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ); + +#endif // __UAL_MGR_H__ diff --git a/branches/Ndi/core/al/user/ual_mr.c b/branches/Ndi/core/al/user/ual_mr.c new file mode 100644 index 00000000..c137c7a7 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mr.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include +#include "al.h" +#include "ual_support.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_mr.tmh" +#endif + +#include "al_mr.h" +#include "al_pd.h" +#include "al_res_mgr.h" + + +ib_api_status_t +ual_reg_mem( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t* const p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN OUT ib_mr_handle_t h_mr ) +{ + ual_reg_mem_ioctl_t mr_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status = IB_ERROR; + + AL_ENTER( AL_DBG_MR ); + + /* Clear the mr_ioctl */ + cl_memclr( &mr_ioctl, sizeof(mr_ioctl) ); + + mr_ioctl.in.h_pd = h_pd->obj.hdl; + mr_ioctl.in.mem_create = *p_mr_create; + + cl_status = do_al_dev_ioctl( UAL_REG_MR, + &mr_ioctl.in, sizeof(mr_ioctl.in), &mr_ioctl.out, sizeof(mr_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mr_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_REG_MR IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = mr_ioctl.out.status; + if( status == IB_SUCCESS ) + { + h_mr->obj.hdl = mr_ioctl.out.h_mr; + *p_lkey = mr_ioctl.out.lkey; + *p_rkey = mr_ioctl.out.rkey; + } + } + + AL_EXIT( AL_DBG_MR ); + return status; +} + + +ib_api_status_t +ual_dereg_mr( + IN ib_mr_handle_t h_mr ) +{ + ual_dereg_mr_ioctl_t mr_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_MR ); + + /* Clear the mr_ioctl */ + cl_memclr( &mr_ioctl, sizeof(mr_ioctl) ); + + mr_ioctl.in.h_mr = h_mr->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_DEREG_MR, + &mr_ioctl.in, sizeof(mr_ioctl.in), &mr_ioctl.out, sizeof(mr_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mr_ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_DEREG_MR IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + + AL_EXIT( AL_DBG_MR ); + return mr_ioctl.out.status; +} + + +ib_api_status_t +ual_modify_mr( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t* const p_mr_create OPTIONAL, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL ) +{ + ual_rereg_mem_ioctl_t mr_ioctl; + uintn_t bytes_ret; + uint64_t h_al_pd = AL_INVALID_HANDLE; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_MR ); + + /* Clear the mr_ioctl */ + cl_memclr( &mr_ioctl, sizeof(mr_ioctl) ); + + if( h_pd ) + h_al_pd = h_pd->obj.hdl; + + mr_ioctl.in.h_mr = h_mr->obj.hdl; + mr_ioctl.in.mem_mod_mask = mr_mod_mask; + if( p_mr_create ) + mr_ioctl.in.mem_create = *p_mr_create; + + mr_ioctl.in.h_pd = h_al_pd; + + cl_status = do_al_dev_ioctl( UAL_MODIFY_MR, + &mr_ioctl.in, sizeof(mr_ioctl.in), &mr_ioctl.out, sizeof(mr_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mr_ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_MODIFY_MR IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + else if( mr_ioctl.out.status == IB_SUCCESS ) + { + *p_lkey = mr_ioctl.out.lkey; + *p_rkey = mr_ioctl.out.rkey; + } + + AL_EXIT( AL_DBG_MR ); + return mr_ioctl.out.status; +} + + +ib_api_status_t +ual_query_mr( + IN ib_mr_handle_t h_mr, + OUT ib_mr_attr_t* p_mr_attr ) +{ + ual_query_mr_ioctl_t mr_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_MR ); + + /* Clear the mr_ioctl */ + cl_memclr( &mr_ioctl, sizeof(mr_ioctl) ); + + mr_ioctl.in.h_mr = h_mr->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_QUERY_MR, + &mr_ioctl.in, sizeof(mr_ioctl.in), &mr_ioctl.out, sizeof(mr_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mr_ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_QUERY_MR IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + else if( mr_ioctl.out.status == IB_SUCCESS ) + { + *p_mr_attr = mr_ioctl.out.attr; + } + + AL_EXIT( AL_DBG_MR ); + return mr_ioctl.out.status; +} + + +ib_api_status_t +ual_reg_shared( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN OUT ib_mr_handle_t h_new_mr ) +{ + ual_reg_shared_ioctl_t mr_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_MR ); + + /* Clear the mr_ioctl */ + cl_memclr( &mr_ioctl, sizeof(mr_ioctl) ); + + mr_ioctl.in.h_mr = h_mr->obj.hdl; + mr_ioctl.in.h_pd = h_pd->obj.hdl; + mr_ioctl.in.access_ctrl = access_ctrl; + mr_ioctl.in.vaddr = *p_vaddr; + + cl_status = do_al_dev_ioctl( UAL_REG_SHARED, + &mr_ioctl.in, sizeof(mr_ioctl.in), &mr_ioctl.out, sizeof(mr_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mr_ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_REG_SHARED IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + else if( mr_ioctl.out.status == IB_SUCCESS ) + { + h_new_mr->obj.hdl = mr_ioctl.out.h_new_mr; + *p_lkey = mr_ioctl.out.lkey; + *p_rkey = mr_ioctl.out.rkey; + *p_vaddr = mr_ioctl.out.vaddr; + } + + AL_EXIT( AL_DBG_MR ); + return mr_ioctl.out.status; +} + + +ib_api_status_t +ib_reg_shmid( + IN const ib_pd_handle_t h_pd, + IN const ib_shmid_t shmid, + IN const ib_mr_create_t* const p_mr_create, + OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ) +{ + ual_reg_shmid_ioctl_t mr_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + ib_mr_handle_t h_mr; + + AL_ENTER( AL_DBG_MR ); + + if( AL_OBJ_INVALID_HANDLE( h_pd, AL_OBJ_TYPE_H_PD ) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PD_HANDLE\n") ); + return IB_INVALID_PD_HANDLE; + } + if( !p_mr_create || !p_vaddr || !p_lkey || !p_rkey || !ph_mr ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Allocate a user mode memory handle */ + h_mr = alloc_mr(); + if( !h_mr ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("unable to allocate memory handle\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Attach this under the pd */ + attach_al_obj( &h_pd->obj, &h_mr->obj ); + + /* Clear the mr_ioctl */ + cl_memclr( &mr_ioctl, sizeof(mr_ioctl) ); + + cl_memcpy( mr_ioctl.in.shmid, shmid, sizeof(ib_shmid_t) ); + mr_ioctl.in.mr_create = *p_mr_create; + mr_ioctl.in.h_pd = h_pd->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_REG_SHMID, + &mr_ioctl.in, sizeof(mr_ioctl.in), &mr_ioctl.out, sizeof(mr_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mr_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_REG_SHMID IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = mr_ioctl.out.status; + } + + if( IB_SUCCESS == status ) + { + *p_vaddr = mr_ioctl.out.vaddr; + *p_lkey = mr_ioctl.out.lkey; + *p_rkey = mr_ioctl.out.rkey; + + /* Store the kernel handle and return the user handle */ + h_mr->obj.hdl = mr_ioctl.out.h_mr; + *ph_mr = h_mr; + + /* Release the reference taken in alloc_mr. */ + deref_al_obj( &h_mr->obj ); + } + else + { + h_mr->obj.pfn_destroy( &h_mr->obj, NULL ); + } + + AL_EXIT( AL_DBG_MR ); + return status; +} + diff --git a/branches/Ndi/core/al/user/ual_mr.h b/branches/Ndi/core/al/user/ual_mr.h new file mode 100644 index 00000000..fc7ccd2c --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mr.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__UAL_MR_H__) +#define __UAL_MR_H__ + +#endif /* __UAL_MR_H__ */ diff --git a/branches/Ndi/core/al/user/ual_mw.c b/branches/Ndi/core/al/user/ual_mw.c new file mode 100644 index 00000000..3fefe0e0 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mw.c @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include "ual_support.h" +#include "al.h" +#include "al_pd.h" +#include "al_qp.h" +#include "al_mw.h" +#include "al_mr.h" + + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_mw.tmh" +#endif + +ib_api_status_t +ual_create_mw( + IN const ib_pd_handle_t h_pd, + OUT net32_t* const p_rkey, + IN OUT ib_mw_handle_t h_mw ) +{ + ual_create_mw_ioctl_t mw_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_pd->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_MW ); + + /* Clear the mw_ioctl */ + cl_memclr( &mw_ioctl, sizeof(mw_ioctl) ); + + /* Pre call to the UVP library */ + if( h_pd->h_ci_pd && uvp_intf.pre_create_mw ) + { + status = uvp_intf.pre_create_mw( + h_pd->h_ci_pd, &mw_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_MW ); + return status; + } + } + + mw_ioctl.in.h_pd = h_pd->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_CREATE_MW, + &mw_ioctl.in, sizeof(mw_ioctl.in), &mw_ioctl.out, sizeof(mw_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mw_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_CREATE_MW IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = mw_ioctl.out.status; + if( status == IB_SUCCESS ) + { + h_mw->obj.hdl = mw_ioctl.out.h_mw; + *p_rkey = mw_ioctl.out.rkey; + } + } + + /* Post uvp call */ + if( h_pd->h_ci_pd && uvp_intf.post_create_mw ) + { + uvp_intf.post_create_mw( h_pd->h_ci_pd, status, + mw_ioctl.out.rkey, &h_mw->h_ci_mw, + &mw_ioctl.out.umv_buf ); + } + + + + AL_EXIT( AL_DBG_MW ); + return status; +} + + +ib_api_status_t +ual_destroy_mw( + IN ib_mw_handle_t h_mw ) +{ + ual_destroy_mw_ioctl_t mw_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_mw->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_MW ); + + /* Clear the mw_ioctl */ + cl_memclr( &mw_ioctl, sizeof(mw_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid handle */ + if( h_mw->h_ci_mw && uvp_intf.pre_destroy_mw ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_destroy_mw( h_mw->h_ci_mw ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_MW ); + return status; + } + } + + mw_ioctl.in.h_mw = h_mw->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_DESTROY_MW, + &mw_ioctl.in, sizeof(mw_ioctl.in), &mw_ioctl.out, sizeof(mw_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mw_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DESTROY_MW IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = mw_ioctl.out.status; + } + + /* Post uvp call */ + if( h_mw->h_ci_mw && uvp_intf.post_destroy_mw ) + uvp_intf.post_destroy_mw( h_mw->h_ci_mw, status ); + + if( status == IB_SUCCESS ) + { + h_mw->obj.hdl = AL_INVALID_HANDLE; + h_mw->h_ci_mw = NULL; + } + + AL_EXIT( AL_DBG_MW ); + return status; +} + + +ib_api_status_t +ual_query_mw( + IN ib_mw_handle_t h_mw, + OUT ib_pd_handle_t* ph_pd, + OUT net32_t* const p_rkey ) +{ + ual_query_mw_ioctl_t mw_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_mw->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_MW ); + /* Clear the mw_ioctl */ + cl_memclr( &mw_ioctl, sizeof(mw_ioctl) ); + + /* Pre call to the UVP library */ + if( h_mw->h_ci_mw && uvp_intf.pre_query_mw ) + { + status = uvp_intf.pre_query_mw( + h_mw->h_ci_mw, &mw_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_MW ); + return status; + } + } + + mw_ioctl.in.h_mw = h_mw->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_QUERY_MW, + &mw_ioctl.in, sizeof(mw_ioctl.in), &mw_ioctl.out, sizeof(mw_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mw_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_QUERY_MW IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = mw_ioctl.out.status; + } + + if( IB_SUCCESS == status ) + { + *p_rkey = mw_ioctl.out.rkey; + *ph_pd = ((ib_pd_handle_t)mw_ioctl.out.pd_context)->h_ci_pd; + } + + /* Post uvp call */ + if( h_mw->h_ci_mw && uvp_intf.post_query_mw ) + { + uvp_intf.post_query_mw( h_mw->h_ci_mw, status, + mw_ioctl.out.rkey, ph_pd, &mw_ioctl.out.umv_buf ); + } + + AL_EXIT( AL_DBG_MW ); + return status; +} + + +ib_api_status_t +ual_bind_mw( + IN const ib_mw_handle_t h_mw, + IN const ib_qp_handle_t h_qp, + IN ib_bind_wr_t* p_mw_bind, + OUT net32_t* const p_rkey ) +{ + ual_bind_mw_ioctl_t mw_ioctl; + cl_status_t cl_status; + ib_api_status_t status; + uintn_t bytes_ret; + ib_mr_handle_t h_user_mr; + /* + * Check whether a vendor library is available and the + * bind_mw call is implemented. If so, the call terminates + * at the UVP library. If not, pass this to kernel. + */ + uvp_interface_t uvp_intf = h_mw->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_MW ); + + /* Clear the mw_ioctl */ + cl_memclr( &mw_ioctl, sizeof(mw_ioctl) ); + + /* Call to the UVP library */ + if( h_mw->h_ci_mw && h_qp->h_ci_qp && uvp_intf.bind_mw ) + { + h_user_mr = p_mw_bind->h_mr; + p_mw_bind->h_mr = p_mw_bind->h_mr->h_ci_mr; + status = uvp_intf.bind_mw( h_mw->h_ci_mw, + h_qp->h_ci_qp, p_mw_bind, p_rkey); + p_mw_bind->h_mr = h_user_mr; + AL_EXIT( AL_DBG_MW ); + return status; + } + + mw_ioctl.in.h_mw = h_mw->obj.hdl; + mw_ioctl.in.h_qp = h_qp->obj.hdl; + mw_ioctl.in.mw_bind = *p_mw_bind; + mw_ioctl.in.mw_bind.h_mr = (ib_mr_handle_t)p_mw_bind->h_mr->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_BIND_MW, + &mw_ioctl.in, sizeof(mw_ioctl.in), &mw_ioctl.out, sizeof(mw_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(mw_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_BIND_MW IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else if( mw_ioctl.out.status == IB_SUCCESS ) + { + *p_rkey = mw_ioctl.out.r_key; + } + + AL_EXIT( AL_DBG_MW ); + return mw_ioctl.out.status; +} diff --git a/branches/Ndi/core/al/user/ual_mw.h b/branches/Ndi/core/al/user/ual_mw.h new file mode 100644 index 00000000..78a047d9 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_mw.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__UAL_MW_H__) +#define __UAL_MW_H__ + +#endif /* __UAL_MW_H__ */ diff --git a/branches/Ndi/core/al/user/ual_pd.c b/branches/Ndi/core/al/user/ual_pd.c new file mode 100644 index 00000000..b069591b --- /dev/null +++ b/branches/Ndi/core/al/user/ual_pd.c @@ -0,0 +1,169 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ual_support.h" +#include "al.h" +#include "al_ca.h" +#include "al_pd.h" + + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_pd.tmh" +#endif + +ib_api_status_t +ual_allocate_pd( + IN ib_ca_handle_t h_ca, + IN const ib_pd_type_t pd_type, + IN OUT ib_pd_handle_t h_pd ) +{ + /* The first two arguments is probably not needed */ + ual_alloc_pd_ioctl_t pd_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + ib_ca_handle_t h_uvp_ca; + uvp_interface_t uvp_intf = h_ca->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_PD ); + + /* Clear the pd_ioctl */ + cl_memclr( &pd_ioctl, sizeof(pd_ioctl) ); + + h_uvp_ca = h_ca->obj.p_ci_ca->h_ci_ca; + + /* Pre call to the UVP library */ + if( pd_type != IB_PDT_ALIAS && h_uvp_ca && uvp_intf.pre_allocate_pd ) + { + status = uvp_intf.pre_allocate_pd( h_uvp_ca, &pd_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_PD ); + return status; + } + } + + pd_ioctl.in.h_ca = h_ca->obj.p_ci_ca->obj.hdl; + pd_ioctl.in.type = pd_type; + pd_ioctl.in.context = h_pd; + + cl_status = do_al_dev_ioctl( UAL_ALLOC_PD, + &pd_ioctl.in, sizeof(pd_ioctl.in), &pd_ioctl.out, sizeof(pd_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(pd_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_ALLOC_PD IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = pd_ioctl.out.status; + if( status == IB_SUCCESS ) + h_pd->obj.hdl = pd_ioctl.out.h_pd; + + } + + + + /* Post uvp call */ + if( pd_type != IB_PDT_ALIAS && h_uvp_ca && uvp_intf.post_allocate_pd ) + { + uvp_intf.post_allocate_pd( h_uvp_ca, status, + &h_pd->h_ci_pd, &pd_ioctl.out.umv_buf ); + } + + AL_EXIT( AL_DBG_PD ); + return status; +} + + +ib_api_status_t +ual_deallocate_pd( + IN ib_pd_handle_t h_pd ) +{ + ual_dealloc_pd_ioctl_t pd_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_pd->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_PD ); + + /* Clear the pd_ioctl */ + cl_memclr( &pd_ioctl, sizeof(pd_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( h_pd->h_ci_pd && uvp_intf.pre_deallocate_pd ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_deallocate_pd( h_pd->h_ci_pd ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_PD ); + return status; + } + } + + pd_ioctl.in.h_pd = h_pd->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_DEALLOC_PD, + &pd_ioctl.in, sizeof(pd_ioctl.in), &pd_ioctl.out, sizeof(pd_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(pd_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DEALLOC_PD IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = pd_ioctl.out.status; + } + + /* Call vendor's post_close ca */ + if( h_pd->h_ci_pd && uvp_intf.post_deallocate_pd ) + uvp_intf.post_deallocate_pd( h_pd->h_ci_pd, status ); + + + AL_EXIT( AL_DBG_PD ); + return status; +} diff --git a/branches/Ndi/core/al/user/ual_pnp.c b/branches/Ndi/core/al/user/ual_pnp.c new file mode 100644 index 00000000..624c3e60 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_pnp.c @@ -0,0 +1,568 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Handles all PnP-related interaction for user-mode: + */ +#include + +#include "al.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_pnp.tmh" +#endif + +#include "al_mgr.h" +#include "al_pnp.h" +#include "ib_common.h" +#include "al_ioc_pnp.h" + + +#define PNP_CA_VECTOR_MIN 0 +#define PNP_CA_VECTOR_GROW 10 + + +/* ib_pnp_event_t values converted to text strings. */ +char* ib_pnp_event_str[] = +{ + "IB_PNP_CA_ADD", + "IB_PNP_CA_REMOVE", + "IB_PNP_PORT_ADD", + "IB_PNP_PORT_REMOVE", + "IB_PNP_PORT_INIT", + "IB_PNP_PORT_ARMED", + "IB_PNP_PORT_ACTIVE", + "IB_PNP_PORT_DOWN", + "IB_PNP_PKEY_CHANGE", + "IB_PNP_SM_CHANGE", + "IB_PNP_GID_CHANGE", + "IB_PNP_LID_CHANGE", + "IB_PNP_SUBNET_TIMEOUT_CHANGE", + "IB_PNP_IOU_ADD", + "IB_PNP_IOU_REMOVE", + "IB_PNP_IOC_ADD", + "IB_PNP_IOC_REMOVE", + "IB_PNP_IOC_PATH_ADD", + "IB_PNP_IOC_PATH_REMOVE" +}; + + +/* PnP Manager structure. */ +typedef struct _ual_pnp_mgr +{ + al_obj_t obj; + + /* File handle on which to issue asynchronous PnP IOCTLs. */ + HANDLE h_file; + HANDLE h_destroy_file; + +} ual_pnp_mgr_t; + + +/* + * PnP Manager instance, creation, destruction. + */ + +/* Global instance of the PnP manager. */ +ual_pnp_mgr_t *gp_pnp = NULL; + + +/* + * Declarations. + */ +static void +__pnp_free( + IN al_obj_t *p_obj ); + +static void +__pnp_async_cb( + IN cl_async_proc_item_t *p_item ); + + +ib_api_status_t +create_pnp( + IN al_obj_t* const p_parent_obj ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( gp_pnp == NULL ); + + gp_pnp = (ual_pnp_mgr_t*)cl_zalloc( sizeof(ual_pnp_mgr_t) ); + if( !gp_pnp ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Failed to allocate PnP manager.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + construct_al_obj( &gp_pnp->obj, AL_OBJ_TYPE_PNP_MGR ); + gp_pnp->h_file = INVALID_HANDLE_VALUE; + gp_pnp->h_destroy_file = INVALID_HANDLE_VALUE; + + status = init_al_obj( &gp_pnp->obj, NULL, TRUE, NULL, NULL, __pnp_free ); + if( status != IB_SUCCESS ) + { + __pnp_free( &gp_pnp->obj ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("init_al_obj() failed with status %s.\n", ib_get_err_str(status)) ); + return status; + } + status = attach_al_obj( p_parent_obj, &gp_pnp->obj ); + if( status != IB_SUCCESS ) + { + gp_pnp->obj.pfn_destroy( &gp_pnp->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Create a file object on which to issue all PNP requests. */ + gp_pnp->h_file = ual_create_async_file( UAL_BIND_PNP ); + if( gp_pnp->h_file == INVALID_HANDLE_VALUE ) + { + gp_pnp->obj.pfn_destroy( &gp_pnp->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("ual_create_async_file for UAL_BIND_PNP returned %d.\n", + GetLastError()) ); + return IB_ERROR; + } + + /* Create a file object on which to issue all dereg request. */ + gp_pnp->h_destroy_file = ual_create_async_file( UAL_BIND_DESTROY ); + if( gp_pnp->h_destroy_file == INVALID_HANDLE_VALUE ) + { + gp_pnp->obj.pfn_destroy( &gp_pnp->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("ual_create_async_file for UAL_BIND_DESTROY returned %d.\n", + GetLastError()) ); + return IB_ERROR; + } + + /* Release the reference taken in init_al_obj. */ + deref_al_obj( &gp_pnp->obj ); + + AL_EXIT( AL_DBG_PNP ); + return( IB_SUCCESS ); +} + + +static void +__pnp_free( + IN al_obj_t *p_obj ) +{ + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( PARENT_STRUCT( p_obj, ual_pnp_mgr_t, obj ) == gp_pnp ); + UNUSED_PARAM( p_obj ); + + if( gp_pnp->h_file != INVALID_HANDLE_VALUE ) + CloseHandle( gp_pnp->h_file ); + if( gp_pnp->h_destroy_file != INVALID_HANDLE_VALUE ) + CloseHandle( gp_pnp->h_destroy_file ); + + destroy_al_obj( &gp_pnp->obj ); + cl_free( gp_pnp ); + gp_pnp = NULL; + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__pnp_reg_destroying( + IN al_obj_t *p_obj ) +{ + al_pnp_t *p_reg; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_obj, al_pnp_t, obj ); + + /* Reference the registration entry while we queue it to our PnP thread. */ + ref_al_obj( &p_reg->obj ); + + /* + * Store the pointer to the object so we can dereference it + * in the completion callback. + */ + p_reg->destroy_ov.Pointer = &p_reg->obj; + + if( !DeviceIoControl( gp_pnp->h_destroy_file, UAL_DEREG_PNP, + &p_reg->obj.hdl, sizeof(uint64_t), NULL, 0, + NULL, &p_reg->destroy_ov ) ) + { + if( GetLastError() != ERROR_IO_PENDING ) + deref_al_obj( &p_reg->obj ); + } + else + { + CL_ASSERT( GetLastError() == ERROR_IO_PENDING ); + deref_al_obj( &p_reg->obj ); + } + + AL_EXIT( AL_DBG_PNP ); +} + + +static void +__pnp_reg_free( + IN al_obj_t *p_obj ) +{ + al_pnp_t *p_reg; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_obj, al_pnp_t, obj ); + + /* Dereference the PnP manager. */ + deref_al_obj( &gp_pnp->obj ); + + /* Free the registration structure. */ + destroy_al_obj( &p_reg->obj ); + cl_free( p_reg ); + + AL_EXIT( AL_DBG_PNP ); +} + + +ib_api_status_t +ib_reg_pnp( + IN const ib_al_handle_t h_al, + IN const ib_pnp_req_t* const p_pnp_req, + OUT ib_pnp_handle_t* const ph_pnp ) +{ + ib_api_status_t status; + al_pnp_t* p_reg; + ual_reg_pnp_ioctl_in_t in; + + AL_ENTER( AL_DBG_PNP ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !p_pnp_req || !ph_pnp ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("IB_INVALID_PARAMETER\n") ); + return IB_INVALID_PARAMETER; + } + + /* Allocate a new registration info structure. */ + p_reg = (al_pnp_t*)cl_zalloc( sizeof(al_pnp_t) ); + if( !p_reg ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Failed to cl_zalloc al_pnp_t (%I64d bytes).\n", + sizeof(al_pnp_t)) ); + return( IB_INSUFFICIENT_MEMORY ); + } + + /* Initialize the registration info. */ + construct_al_obj( &p_reg->obj, AL_OBJ_TYPE_H_PNP ); + p_reg->async_item.pfn_callback = __pnp_async_cb; + + status = init_al_obj( &p_reg->obj, p_pnp_req->pnp_context, TRUE, + __pnp_reg_destroying, NULL, __pnp_reg_free ); + if( status != IB_SUCCESS ) + { + __pnp_reg_free( &p_reg->obj ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("init_al_obj() failed with status %s.\n", ib_get_err_str(status)) ); + return( status ); + } + status = attach_al_obj( &h_al->obj, &p_reg->obj ); + if( status != IB_SUCCESS ) + { + p_reg->obj.pfn_destroy( &p_reg->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Reference the PnP Manager. */ + ref_al_obj( &gp_pnp->obj ); + + /* Copy the request information. */ + p_reg->pfn_pnp_cb = p_pnp_req->pfn_pnp_cb; + + in.pnp_class = p_pnp_req->pnp_class; + in.p_status = &status; + in.p_hdl = &p_reg->obj.hdl; + + if( pnp_get_flag( p_pnp_req->pnp_class ) & IB_PNP_FLAG_REG_SYNC ) + { + in.sync_event = CreateEvent( NULL, FALSE, FALSE, NULL ); + if( !in.sync_event ) + { + p_reg->obj.pfn_destroy( &p_reg->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("CreateEvent returned %d\n", GetLastError()) ); + return IB_ERROR; + } + } + + status = IB_ERROR; + + /* The IOCTL handler will update status as needed. */ + DeviceIoControl( gp_pnp->h_file, UAL_REG_PNP, + &in, sizeof(in), &p_reg->rearm, sizeof(p_reg->rearm), + NULL, &p_reg->ov ); + + if( status == IB_SUCCESS ) + { + /* Set the user handle. */ + *ph_pnp = p_reg; + + /* + * Note that we don't release the reference taken by init_al_obj while + * any IOCTLs are in progress. + */ + + if( pnp_get_flag( p_pnp_req->pnp_class ) & IB_PNP_FLAG_REG_SYNC ) + { + WaitForSingleObject( in.sync_event, INFINITE ); + CloseHandle( in.sync_event ); + } + } + else + { + p_reg->obj.pfn_destroy( &p_reg->obj, NULL ); + } + + AL_EXIT( AL_DBG_PNP ); + return status; +} + + +ib_api_status_t +ib_dereg_pnp( + IN const ib_pnp_handle_t h_pnp, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ) +{ + AL_ENTER( AL_DBG_PNP ); + + if( AL_OBJ_INVALID_HANDLE( h_pnp, AL_OBJ_TYPE_H_PNP ) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + ref_al_obj( &h_pnp->obj ); + h_pnp->obj.pfn_destroy( &h_pnp->obj, pfn_destroy_cb ); + + AL_EXIT( AL_DBG_PNP ); + return( IB_SUCCESS ); +} + + +ib_api_status_t +ib_reject_ioc( + IN const ib_al_handle_t h_al, + IN const ib_pnp_handle_t h_event ) +{ + AL_ENTER( AL_DBG_PNP ); + + if( AL_OBJ_INVALID_HANDLE( h_al, AL_OBJ_TYPE_H_AL ) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("IB_INVALID_AL_HANDLE\n") ); + return IB_INVALID_AL_HANDLE; + } + if( !h_event ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("IB_INVALID_HANDLE\n") ); + return IB_INVALID_HANDLE; + } + + AL_EXIT( AL_DBG_PNP ); + return IB_UNSUPPORTED; +} + + +static void +__fix_port_attr( + IN OUT ib_pnp_port_rec_t* const p_port_rec ) +{ + uintn_t offset; + + if( !p_port_rec->p_ca_attr ) + return; + + offset = (uintn_t)(p_port_rec + 1) - + (uintn_t)(ib_ca_attr_t*)p_port_rec->p_ca_attr; + ib_fixup_ca_attr( (ib_ca_attr_t*)(p_port_rec + 1), p_port_rec->p_ca_attr ); + p_port_rec->p_ca_attr = (ib_ca_attr_t*)(size_t)(p_port_rec + 1); + p_port_rec->p_port_attr = (ib_port_attr_t* __ptr64) + (((uint8_t* __ptr64)p_port_rec->p_port_attr) + offset); +} + + +static void +__fix_ca_attr( + IN OUT ib_pnp_ca_rec_t* const p_ca_rec ) +{ + if( !p_ca_rec->p_ca_attr ) + return; + + ib_fixup_ca_attr( (ib_ca_attr_t*)(p_ca_rec + 1), p_ca_rec->p_ca_attr ); + p_ca_rec->p_ca_attr = (ib_ca_attr_t*)(size_t)(p_ca_rec + 1); +} + + +static void +__pnp_async_cb( + IN cl_async_proc_item_t *p_item ) +{ + al_pnp_t *p_reg; + ual_rearm_pnp_ioctl_in_t in; + cl_status_t status; + size_t bytes_ret; + ib_pnp_rec_t *p_pnp_rec; + + AL_ENTER( AL_DBG_PNP ); + + p_reg = PARENT_STRUCT( p_item, al_pnp_t, async_item ); + in.pnp_hdl = p_reg->obj.hdl; + in.last_evt_hdl = p_reg->rearm.evt_hdl; + + if( p_reg->rearm.evt_size ) + { + /* Retrieve the PnP event and report it to the client. */ + CL_ASSERT( p_reg->rearm.evt_size >= sizeof(ib_pnp_rec_t) ); + + p_pnp_rec = (ib_pnp_rec_t*)cl_malloc( p_reg->rearm.evt_size ); + if( p_pnp_rec ) + { + status = do_al_dev_ioctl( UAL_POLL_PNP, + &p_reg->rearm.evt_hdl, sizeof(uint64_t), + p_pnp_rec, p_reg->rearm.evt_size, &bytes_ret ); + + if( status == CL_SUCCESS ) + { + CL_ASSERT( bytes_ret == p_reg->rearm.evt_size ); + /* Fixup pointers. */ + switch( pnp_get_class( p_pnp_rec->pnp_event ) ) + { + case IB_PNP_PORT: + __fix_port_attr( (ib_pnp_port_rec_t*)p_pnp_rec ); + break; + case IB_PNP_CA: + __fix_ca_attr( (ib_pnp_ca_rec_t*)p_pnp_rec ); + break; + default: + break; + } + p_pnp_rec->pnp_context = (void*)p_reg->obj.context; + in.last_evt_status = p_reg->pfn_pnp_cb( p_pnp_rec ); + in.last_evt_context = p_pnp_rec->context; + } + else + { + in.last_evt_status = IB_SUCCESS; + in.last_evt_context = NULL; + } + + if( p_pnp_rec ) + cl_free( p_pnp_rec ); + } + else + { + in.last_evt_status = IB_SUCCESS; + in.last_evt_context = NULL; + } + } + else + { + in.last_evt_status = IB_SUCCESS; + in.last_evt_context = NULL; + } + + /* Request the next PnP event. */ + DeviceIoControl( gp_pnp->h_file, UAL_REARM_PNP, + &in, sizeof(in), &p_reg->rearm, sizeof(p_reg->rearm), + NULL, &p_reg->ov ); + + if( GetLastError() != ERROR_IO_PENDING ) + { + /* Release the reference taken for the IOCTL. */ + deref_al_obj( &p_reg->obj ); + } + + CL_ASSERT( GetLastError() == ERROR_IO_PENDING || + GetLastError() == ERROR_CANCELLED || + GetLastError() == ERROR_OPERATION_ABORTED ); + + AL_EXIT( AL_DBG_PNP ); +} + + +void CALLBACK +pnp_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ) +{ + al_pnp_t *p_reg; + + AL_ENTER( AL_DBG_PNP ); + + CL_ASSERT( p_ov ); + + p_reg = PARENT_STRUCT( p_ov, al_pnp_t, ov ); + if( error_code || ret_bytes != sizeof(p_reg->rearm) ) + { + if( error_code == ERROR_CANCELLED || + error_code == ERROR_OPERATION_ABORTED || + p_reg->obj.state != CL_INITIALIZED ) + { + /* Release the reference taken for the IOCTL. */ + deref_al_obj( &p_reg->obj ); + AL_EXIT( AL_DBG_PNP ); + return; + } + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("IOCTL failed with error code %d\n", + error_code) ); + p_reg->rearm.evt_hdl = AL_INVALID_HANDLE; + p_reg->rearm.evt_size = 0; + } + + cl_async_proc_queue( gp_async_pnp_mgr, &p_reg->async_item ); + + AL_EXIT( AL_DBG_PNP ); +} diff --git a/branches/Ndi/core/al/user/ual_qp.c b/branches/Ndi/core/al/user/ual_qp.c new file mode 100644 index 00000000..0ff64866 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_qp.c @@ -0,0 +1,662 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "al.h" +#include "al_av.h" +#include "al_ci_ca.h" +#include "al_cq.h" +#include "al_pd.h" +#include "al_qp.h" +#include "al_srq.h" +#include "ual_mad.h" +#include "ual_support.h" + + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_qp.tmh" +#endif +ib_api_status_t +ual_post_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t **pp_send_failure OPTIONAL ) +{ + uintn_t failed_index; + uintn_t bytes_ret; + uint32_t num_wr = 0; + uint32_t num_ds = 0; + ib_send_wr_t *p_wr; + ib_local_ds_t *p_ds; + ual_post_send_ioctl_t *p_qp_ioctl; + size_t ioctl_buf_sz; + cl_status_t cl_status; + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + /* + * Since the work request is a link list and we need to pass this + * to the kernel as a array of work requests. So first walk through + * the list and find out how much memory we need to allocate. + */ + for( p_wr = p_send_wr; p_wr; p_wr = p_wr->p_next ) + { + num_wr++; + + /* Check for overflow */ + if( !num_wr ) + break; + if( num_ds > num_ds + p_wr->num_ds ) + { + num_wr = 0; + break; + } + + num_ds += p_wr->num_ds; + } + if( !num_wr ) + { + AL_EXIT( AL_DBG_QP ); + return IB_INVALID_PARAMETER; + } + + ioctl_buf_sz = sizeof(ual_post_send_ioctl_t); + ioctl_buf_sz += sizeof(ib_send_wr_t) * (num_wr - 1); + ioctl_buf_sz += sizeof(ib_local_ds_t) * num_ds; + + p_qp_ioctl = (ual_post_send_ioctl_t*)cl_zalloc( ioctl_buf_sz ); + if( !p_qp_ioctl ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Failed to allocate IOCTL buffer.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + p_ds = (ib_local_ds_t*)&p_qp_ioctl->in.send_wr[num_wr]; + + /* Now populate the ioctl buffer and send down the ioctl */ + p_qp_ioctl->in.h_qp = h_qp->obj.hdl; + p_qp_ioctl->in.num_wr = num_wr; + p_qp_ioctl->in.num_ds = num_ds; + num_wr = 0; + for( p_wr = p_send_wr; p_wr; p_wr = p_wr->p_next ) + { + p_qp_ioctl->in.send_wr[num_wr++] = *p_wr; + cl_memcpy( + p_ds, p_wr->ds_array, sizeof(ib_local_ds_t) * p_wr->num_ds ); + p_ds += p_wr->num_ds; + } + + cl_status = do_al_dev_ioctl( UAL_POST_SEND, + &p_qp_ioctl->in, ioctl_buf_sz, + &p_qp_ioctl->out, sizeof(p_qp_ioctl->out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(p_qp_ioctl->out) ) + { + if( pp_send_failure ) + *pp_send_failure = p_send_wr; + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_POST_SEND IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = p_qp_ioctl->out.status; + + if( status != IB_SUCCESS && pp_send_failure ) + { + /* Get the failed index */ + failed_index = num_wr - p_qp_ioctl->out.failed_cnt; + p_wr = p_send_wr; + while( failed_index-- ) + p_wr = p_wr->p_next; + + *pp_send_failure = p_wr; + } + } + + cl_free( p_qp_ioctl ); + AL_EXIT( AL_DBG_QP ); + return status; +} + + +ib_api_status_t +ual_post_recv( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ) +{ + uintn_t failed_index; + uintn_t bytes_ret; + uint32_t num_wr = 0; + uint32_t num_ds = 0; + ib_recv_wr_t* p_wr; + ib_local_ds_t* p_ds; + ual_post_recv_ioctl_t *p_qp_ioctl; + size_t ioctl_buf_sz; + cl_status_t cl_status; + ib_api_status_t status; + + AL_ENTER( AL_DBG_QP ); + + /* + * Since the work request is a link list and we need to pass this + * to the kernel as a array of work requests. So first walk through + * the list and find out how much memory we need to allocate. + */ + for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next ) + { + num_wr++; + + /* Check for overflow */ + if( !num_wr ) + break; + if( num_ds > num_ds + p_wr->num_ds ) + { + num_wr = 0; + break; + } + + num_ds += p_wr->num_ds; + } + if( !num_wr ) + { + AL_EXIT( AL_DBG_QP ); + return IB_INVALID_PARAMETER; + } + + ioctl_buf_sz = sizeof(ual_post_recv_ioctl_t); + ioctl_buf_sz += sizeof(ib_recv_wr_t) * (num_wr - 1); + ioctl_buf_sz += sizeof(ib_local_ds_t) * num_ds; + + p_qp_ioctl = (ual_post_recv_ioctl_t*)cl_zalloc( ioctl_buf_sz ); + if( !p_qp_ioctl ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Failed to allocate IOCTL buffer.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + p_ds = (ib_local_ds_t*)&p_qp_ioctl->in.recv_wr[num_wr]; + + /* Now populate the ioctl buffer and send down the ioctl */ + p_qp_ioctl->in.h_qp = h_qp->obj.hdl; + p_qp_ioctl->in.num_wr = num_wr; + p_qp_ioctl->in.num_ds = num_ds; + num_wr = 0; + for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next ) + { + p_qp_ioctl->in.recv_wr[num_wr++] = *p_wr; + cl_memcpy( + p_ds, p_wr->ds_array, sizeof(ib_local_ds_t) * p_wr->num_ds ); + p_ds += p_wr->num_ds; + } + + cl_status = do_al_dev_ioctl( UAL_POST_RECV, + &p_qp_ioctl->in, ioctl_buf_sz, + &p_qp_ioctl->out, sizeof(p_qp_ioctl->out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(p_qp_ioctl->out) ) + { + if( pp_recv_failure ) + *pp_recv_failure = p_recv_wr; + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_POST_RECV IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = p_qp_ioctl->out.status; + + if( status != IB_SUCCESS && pp_recv_failure ) + { + /* Get the failed index */ + failed_index = num_wr - p_qp_ioctl->out.failed_cnt; + p_wr = p_recv_wr; + while( failed_index-- ) + p_wr = p_wr->p_next; + + *pp_recv_failure = p_wr; + } + } + + cl_free( p_qp_ioctl ); + AL_EXIT( AL_DBG_QP ); + return status; +} + + + +ib_api_status_t +ual_create_qp( + IN const ib_pd_handle_t h_pd, + IN OUT ib_qp_handle_t h_qp, + IN const ib_qp_create_t* const p_qp_create, + IN ib_qp_attr_t* p_qp_attr ) +{ + /* The first argument is probably not needed */ + ual_create_qp_ioctl_t qp_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_qp->obj.p_ci_ca->verbs.user_verbs; + ib_qp_create_t qp_create; + + AL_ENTER( AL_DBG_QP ); + UNUSED_PARAM( p_qp_attr ); + + /* Clear the qp_ioctl */ + cl_memclr( &qp_ioctl, sizeof(qp_ioctl) ); + + /* Pre call to the UVP library */ + if( h_pd->h_ci_pd && uvp_intf.pre_create_qp ) + { + /* The post call MUST exist as it sets the UVP QP handle. */ + CL_ASSERT( uvp_intf.post_create_qp ); + /* Convert the handles to UVP handles */ + qp_create = *p_qp_create; + qp_create.h_rq_cq = qp_create.h_rq_cq->h_ci_cq; + qp_create.h_sq_cq = qp_create.h_sq_cq->h_ci_cq; + if (qp_create.h_srq) + qp_create.h_srq = qp_create.h_srq->h_ci_srq; + status = uvp_intf.pre_create_qp( h_pd->h_ci_pd, + &qp_create, &qp_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_QP ); + return status; + } + } + /* + * Convert the handles to KAL handles once again starting + * from the input qp attribute + */ + qp_ioctl.in.h_pd = h_pd->obj.hdl; + qp_ioctl.in.qp_create = *p_qp_create; + qp_ioctl.in.qp_create.h_rq_cq = + (ib_cq_handle_t)p_qp_create->h_rq_cq->obj.hdl; + qp_ioctl.in.qp_create.h_sq_cq = + (ib_cq_handle_t)p_qp_create->h_sq_cq->obj.hdl; + if (p_qp_create->h_srq) + qp_ioctl.in.qp_create.h_srq = + (ib_srq_handle_t)p_qp_create->h_srq->obj.hdl; + qp_ioctl.in.context = h_qp; + qp_ioctl.in.ev_notify = (h_qp->pfn_event_cb != NULL) ? TRUE : FALSE; + + cl_status = do_al_dev_ioctl( UAL_CREATE_QP, + &qp_ioctl.in, sizeof(qp_ioctl.in), &qp_ioctl.out, sizeof(qp_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(qp_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_CREATE_QP IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = qp_ioctl.out.status; + + if( status == IB_SUCCESS ) + { + h_qp->obj.hdl = qp_ioctl.out.h_qp; + *p_qp_attr = qp_ioctl.out.attr; + } + } + + /* Post uvp call */ + if( h_pd->h_ci_pd && uvp_intf.post_create_qp ) + { + uvp_intf.post_create_qp( h_pd->h_ci_pd, + status, &h_qp->h_ci_qp, &qp_ioctl.out.umv_buf ); + + if( uvp_intf.post_recv ) + { + h_qp->h_recv_qp = h_qp->h_ci_qp; + h_qp->pfn_post_recv = uvp_intf.post_recv; + } + else + { + h_qp->h_recv_qp = h_qp; + h_qp->pfn_post_recv = ual_post_recv; + } + + if( uvp_intf.post_send ) + { + h_qp->h_send_qp = h_qp->h_ci_qp; + h_qp->pfn_post_send = uvp_intf.post_send; + } + else + { + h_qp->h_send_qp = h_qp; + h_qp->pfn_post_send = ual_post_send; + } + } + else + { + h_qp->h_recv_qp = h_qp; + h_qp->pfn_post_recv = ual_post_recv; + h_qp->h_send_qp = h_qp; + h_qp->pfn_post_send = ual_post_send; + } + + + AL_EXIT( AL_DBG_QP ); + return status; +} + + + +ib_api_status_t +ual_destroy_qp( + IN ib_qp_handle_t h_qp ) +{ + ual_destroy_qp_ioctl_t qp_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_qp->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_QP ); + + /* Call the uvp pre call if the vendor library provided a valid QP handle */ + if( h_qp->h_ci_qp && uvp_intf.pre_destroy_qp ) + { + status = uvp_intf.pre_destroy_qp( h_qp->h_ci_qp ); + if (status != IB_SUCCESS) + { + AL_EXIT( AL_DBG_QP ); + return status; + } + } + + cl_memclr( &qp_ioctl, sizeof(qp_ioctl) ); + qp_ioctl.in.h_qp = h_qp->obj.hdl; + cl_status = do_al_dev_ioctl( UAL_DESTROY_QP, + &qp_ioctl.in, sizeof(qp_ioctl.in), &qp_ioctl.out, sizeof(qp_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(qp_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DESTROY_QP IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = qp_ioctl.out.status; + } + + /* Call vendor's post_destroy_qp */ + if( h_qp->h_ci_qp && uvp_intf.post_destroy_qp ) + uvp_intf.post_destroy_qp( h_qp->h_ci_qp, status ); + + AL_EXIT( AL_DBG_QP ); + return status; +} + + +ib_api_status_t +ual_modify_qp( + IN ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod, + IN ib_qp_attr_t* p_qp_attr) +{ + ual_modify_qp_ioctl_t qp_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_qp->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_QP ); + + /* Clear the qp_ioctl */ + cl_memclr( &qp_ioctl, sizeof(qp_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid QP handle */ + if( h_qp->h_ci_qp && uvp_intf.pre_modify_qp ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_modify_qp( h_qp->h_ci_qp, + p_qp_mod, &qp_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_QP ); + return status; + } + } + + qp_ioctl.in.h_qp = h_qp->obj.hdl; + qp_ioctl.in.modify_attr = *p_qp_mod; + + cl_status = do_al_dev_ioctl( UAL_MODIFY_QP, + &qp_ioctl.in, sizeof(qp_ioctl.in), &qp_ioctl.out, sizeof(qp_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(qp_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_MODIFY_QP IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = qp_ioctl.out.status; + } + + /* Post uvp call */ + if( h_qp->h_ci_qp && uvp_intf.post_modify_qp ) + { + uvp_intf.post_modify_qp( h_qp->h_ci_qp, status, + &qp_ioctl.out.umv_buf ); + } + + UNUSED_PARAM( p_qp_attr ); + //if( status == IB_SUCCESS ) + //{ + // *p_qp_attr = qp_ioctl.out.qp_attr; + //} + + AL_EXIT( AL_DBG_QP ); + return status; +} + + +ib_api_status_t +ual_query_qp( + IN ib_qp_handle_t h_qp, + OUT ib_qp_attr_t* p_qp_attr ) +{ + ual_query_qp_ioctl_t qp_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_qp->obj.p_ci_ca->verbs.user_verbs; + ib_qp_attr_t* p_attr; + ib_pd_handle_t h_ual_pd; + + AL_ENTER( AL_DBG_QP ); + + /* Clear the qp_ioctl */ + cl_memclr( &qp_ioctl, sizeof(qp_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( h_qp->h_ci_qp && uvp_intf.pre_query_qp ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_query_qp( h_qp->h_ci_qp, &qp_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_QP ); + return status; + } + } + + qp_ioctl.in.h_qp = h_qp->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_QUERY_QP, + &qp_ioctl.in, sizeof(qp_ioctl.in), &qp_ioctl.out, sizeof(qp_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(qp_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_QUERY_QP IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = qp_ioctl.out.status; + } + + p_attr = &qp_ioctl.out.attr; + /* + * Convert the handles in qp_attr to UVP handles + */ + h_ual_pd = PARENT_STRUCT( h_qp->obj.p_parent_obj, ib_pd_t, obj ); + p_attr->h_pd = h_ual_pd->h_ci_pd; + if( h_qp->h_recv_cq ) + p_attr->h_rq_cq = h_qp->h_recv_cq->h_ci_cq; + if( h_qp->h_send_cq ) + p_attr->h_sq_cq = h_qp->h_send_cq->h_ci_cq; + if( h_qp->h_srq ) + p_attr->h_srq = h_qp->h_srq->h_ci_srq; + + /* Post uvp call */ + if( h_qp->h_ci_qp && uvp_intf.post_query_qp ) + { + uvp_intf.post_query_qp( h_qp->h_ci_qp, status, + p_attr, &qp_ioctl.out.umv_buf ); + } + + if( IB_SUCCESS == status ) + { + /* UVP handles in qp_attr will be converted to UAL's handles + * by the common code + */ + *p_qp_attr = *p_attr; + } + + AL_EXIT( AL_DBG_QP ); + return status; +} + + +ib_api_status_t +ual_init_qp_alias( + IN al_qp_alias_t* const p_qp_alias, + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create ) +{ + ual_spl_qp_ioctl_t qp_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + + AL_ENTER( AL_DBG_QP ); + + CL_ASSERT( p_qp_alias ); + + if( h_pd->type != IB_PDT_ALIAS ) + { + AL_EXIT( AL_DBG_QP ); + return IB_INVALID_PD_HANDLE; + } + + attach_al_obj( &h_pd->obj, &p_qp_alias->qp.obj ); + + switch( p_qp_alias->qp.type ) + { + case IB_QPT_QP0_ALIAS: + case IB_QPT_QP1_ALIAS: + /* Send an ioctl to kernel to get the alias qp */ + cl_memclr( &qp_ioctl, sizeof(qp_ioctl) ); + qp_ioctl.in.h_pd = h_pd->obj.hdl; + qp_ioctl.in.port_guid = port_guid; + qp_ioctl.in.qp_create = *p_qp_create; + qp_ioctl.in.context = &p_qp_alias->qp; + + cl_status = do_al_dev_ioctl( UAL_GET_SPL_QP_ALIAS, + &qp_ioctl.in, sizeof(qp_ioctl.in), + &qp_ioctl.out, sizeof(qp_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(qp_ioctl.out) ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("UAL_GET_SPL_QP_ALIAS IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + else if( qp_ioctl.out.status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_QP ); + return qp_ioctl.out.status; + } + p_qp_alias->qp.obj.hdl = qp_ioctl.out.h_qp; + p_qp_alias->h_mad_disp = NULL; + break; + + case IB_QPT_MAD: + /* The MAD QP should have created the MAD dispatcher. */ + CL_ASSERT( p_qp_alias->h_mad_disp ); + break; + + default: + CL_ASSERT( p_qp_alias->qp.type == IB_QPT_QP0_ALIAS || + p_qp_alias->qp.type == IB_QPT_QP1_ALIAS || + p_qp_alias->qp.type == IB_QPT_MAD ); + AL_EXIT( AL_DBG_QP ); + return IB_ERROR; + } + + + /* Override function pointers. */ + p_qp_alias->qp.pfn_reg_mad_svc = ual_reg_mad_svc; + p_qp_alias->qp.pfn_dereg_mad_svc = ual_dereg_mad_svc; + + AL_EXIT( AL_DBG_QP ); + return IB_SUCCESS; +} diff --git a/branches/Ndi/core/al/user/ual_qp.h b/branches/Ndi/core/al/user/ual_qp.h new file mode 100644 index 00000000..7eb8780c --- /dev/null +++ b/branches/Ndi/core/al/user/ual_qp.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__UAL_QP_H__) +#define __UAL_QP_H__ + +#include "al_qp.h" + + +ib_api_status_t +ual_init_qp_alias( + IN al_qp_alias_t* const p_qp_alias, + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create ); + +#endif // __UAL_QP_H__ diff --git a/branches/Ndi/core/al/user/ual_query.c b/branches/Ndi/core/al/user/ual_query.c new file mode 100644 index 00000000..63dd3b2f --- /dev/null +++ b/branches/Ndi/core/al/user/ual_query.c @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "ual_support.h" +#include "al.h" diff --git a/branches/Ndi/core/al/user/ual_query.h b/branches/Ndi/core/al/user/ual_query.h new file mode 100644 index 00000000..ed61ea64 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_query.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__UAL_QUERY_H__) +#define __UAL_QUERY_H__ + +#endif /* __UAL_QUERY_H__ */ diff --git a/branches/Ndi/core/al/user/ual_reg_svc.c b/branches/Ndi/core/al/user/ual_reg_svc.c new file mode 100644 index 00000000..63dd3b2f --- /dev/null +++ b/branches/Ndi/core/al/user/ual_reg_svc.c @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "ual_support.h" +#include "al.h" diff --git a/branches/Ndi/core/al/user/ual_res_mgr.h b/branches/Ndi/core/al/user/ual_res_mgr.h new file mode 100644 index 00000000..c10bc464 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_res_mgr.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(__UAL_RES_MGR_H__) +#define __UAL_RES_MGR_H__ + +/* Commented out until we define them */ +/* +#include "ual_pd.h" +#include "ual_mr.h" +#include "ual_mw.h" +#include "ual_qp.h" +#include "ual_cq.h" +#include "ual_av.h" +#include "ual_mcast.h" +*/ + + + +/* + * Global handle to the access layer. This is used by internal components + * when calling the external API. This handle is initialized by the access + * layer manager. + */ +extern ib_al_handle_t gh_al; + + +/* + * + * + * Resource list structure with a lock + * + */ +typedef struct _ual_res +{ + cl_qlist_t list; + cl_spinlock_t lock; + +} ual_res_t; + +#endif // __UAL_RES_MGR_H__ diff --git a/branches/Ndi/core/al/user/ual_sa_req.c b/branches/Ndi/core/al/user/ual_sa_req.c new file mode 100644 index 00000000..f1dba3b6 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_sa_req.c @@ -0,0 +1,308 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Handles all SA-related interaction for user-mode: + * queries + * service registration + * multicast + */ + + +#include +#include + +#include "al.h" +#include "al_ca.h" +#include "al_common.h" +#include "al_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_sa_req.tmh" +#endif + +#include "al_mgr.h" +#include "al_query.h" +#include "ib_common.h" +#include "ual_mad.h" + + +typedef struct _sa_req_mgr +{ + al_obj_t obj; /* Child of gp_al_mgr */ + + /* File handle on which to issue query IOCTLs. */ + HANDLE h_sa_dev; + +} sa_req_mgr_t; + + +/* Global SA request manager */ +sa_req_mgr_t *gp_sa_req_mgr = NULL; + + + +/* + * Function prototypes. + */ +static void +free_sa_req_mgr( + IN al_obj_t* p_obj ); + +void +destroying_sa_req_svc( + IN al_obj_t* p_obj ); + +void +free_sa_req_svc( + IN al_obj_t* p_obj ); + + +/* + * Create the sa_req manager. + */ +ib_api_status_t +create_sa_req_mgr( + IN al_obj_t* const p_parent_obj ) +{ + ib_api_status_t status; + + AL_ENTER( AL_DBG_SA_REQ ); + CL_ASSERT( p_parent_obj ); + CL_ASSERT( gp_sa_req_mgr == NULL ); + + gp_sa_req_mgr = cl_zalloc( sizeof( sa_req_mgr_t ) ); + if( gp_sa_req_mgr == NULL ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("cl_zalloc failed\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the sa_req manager. */ + construct_al_obj( &gp_sa_req_mgr->obj, AL_OBJ_TYPE_SA_REQ_SVC ); + gp_sa_req_mgr->h_sa_dev = INVALID_HANDLE_VALUE; + + /* Initialize the global sa_req manager object. */ + status = init_al_obj( &gp_sa_req_mgr->obj, gp_sa_req_mgr, TRUE, + NULL, NULL, free_sa_req_mgr ); + if( status != IB_SUCCESS ) + { + free_sa_req_mgr( &gp_sa_req_mgr->obj ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("cl_spinlock_init failed\n") ); + return status; + } + status = attach_al_obj( p_parent_obj, &gp_sa_req_mgr->obj ); + if( status != IB_SUCCESS ) + { + gp_sa_req_mgr->obj.pfn_destroy( &gp_sa_req_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("attach_al_obj returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + /* Create a file object on which to issue all SA requests. */ + gp_sa_req_mgr->h_sa_dev = ual_create_async_file( UAL_BIND_SA ); + if( gp_sa_req_mgr->h_sa_dev == INVALID_HANDLE_VALUE ) + { + gp_sa_req_mgr->obj.pfn_destroy( &gp_sa_req_mgr->obj, NULL ); + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("ual_create_async_file returned %d.\n", GetLastError()) ); + return IB_ERROR; + } + + /* Release the reference from init_al_obj */ + deref_al_obj( &gp_sa_req_mgr->obj ); + + AL_EXIT( AL_DBG_SA_REQ ); + return IB_SUCCESS; +} + + +/* + * Free the sa_req manager. + */ +static void +free_sa_req_mgr( + IN al_obj_t* p_obj ) +{ + CL_ASSERT( p_obj ); + CL_ASSERT( gp_sa_req_mgr == PARENT_STRUCT( p_obj, sa_req_mgr_t, obj ) ); + UNUSED_PARAM( p_obj ); + + if( gp_sa_req_mgr->h_sa_dev != INVALID_HANDLE_VALUE ) + CloseHandle( gp_sa_req_mgr->h_sa_dev ); + + destroy_al_obj( &gp_sa_req_mgr->obj ); + cl_free( gp_sa_req_mgr ); + gp_sa_req_mgr = NULL; +} + + +ib_api_status_t +al_send_sa_req( + IN al_sa_req_t *p_sa_req, + IN const net64_t port_guid, + IN const uint32_t timeout_ms, + IN const uint32_t retry_cnt, + IN const ib_user_query_t* const p_sa_req_data, + IN const ib_al_flags_t flags ) +{ + ib_api_status_t status; + HANDLE h_dev; + DWORD ret_bytes; + + AL_ENTER( AL_DBG_QUERY ); + + CL_ASSERT( p_sa_req ); + CL_ASSERT( p_sa_req_data ); + + /* Copy the query context information. */ + p_sa_req->status = IB_ERROR; + + /* Issue the query IOCTL */ + p_sa_req->ioctl.in.port_guid = port_guid; + p_sa_req->ioctl.in.timeout_ms = timeout_ms; + p_sa_req->ioctl.in.retry_cnt = retry_cnt; + p_sa_req->ioctl.in.sa_req = *p_sa_req_data; + cl_memcpy( p_sa_req->ioctl.in.attr, + p_sa_req_data->p_attr, p_sa_req_data->attr_size ); + p_sa_req->ioctl.in.ph_sa_req = &p_sa_req->hdl; + p_sa_req->ioctl.in.p_status = &p_sa_req->status; + + if( flags & IB_FLAGS_SYNC ) + h_dev = g_al_device; + else + h_dev = gp_sa_req_mgr->h_sa_dev; + + if( !DeviceIoControl( h_dev, UAL_SEND_SA_REQ, + &p_sa_req->ioctl.in, sizeof(p_sa_req->ioctl.in), + &p_sa_req->ioctl.out, sizeof(p_sa_req->ioctl.out), + NULL, &p_sa_req->ov ) ) + { + if( GetLastError() != ERROR_IO_PENDING ) + { + status = p_sa_req->status; + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , ("UAL_SEND_SA_REQ IOCTL returned %s\n", + ib_get_err_str(status)) ); + } + else + { + status = IB_SUCCESS; + } + } + else + { + /* Completed synchronously. */ + if( GetOverlappedResult( h_dev, &p_sa_req->ov, &ret_bytes, FALSE ) ) + { + status = IB_SUCCESS; + /* Process the completion. */ + sa_req_cb( 0, ret_bytes, &p_sa_req->ov ); + } + else + { + sa_req_cb( GetLastError(), 0, &p_sa_req->ov ); + status = IB_ERROR; + } + } + + AL_EXIT( AL_DBG_QUERY ); + return status; +} + + +void CALLBACK +sa_req_cb( + IN DWORD error_code, + IN DWORD ret_bytes, + IN LPOVERLAPPED p_ov ) +{ + al_sa_req_t *p_sa_req; + ib_mad_element_t *p_mad_response = NULL; + + AL_ENTER( AL_DBG_QUERY ); + + CL_ASSERT( p_ov ); + + p_sa_req = PARENT_STRUCT( p_ov, al_sa_req_t, ov ); + + if( error_code ) + { + /* Some sort of failure. :( */ + p_sa_req->status = IB_ERROR; + goto sa_req_cb_err; + } + else if( ret_bytes != sizeof(p_sa_req->ioctl.out) ) + { + /* Check for expected returned data. */ + p_sa_req->status = IB_ERROR; + goto sa_req_cb_err; + } + + /* Retrieve the response */ + if( p_sa_req->ioctl.out.h_resp != AL_INVALID_HANDLE ) + { + p_sa_req->status = + ual_get_recv_mad( g_pool_key, p_sa_req->ioctl.out.h_resp, + p_sa_req->ioctl.out.resp_size, &p_mad_response ); + + if( p_sa_req->status != IB_SUCCESS ) + goto sa_req_cb_err; + } + + p_sa_req->status = p_sa_req->ioctl.out.status; + +sa_req_cb_err: + p_sa_req->pfn_sa_req_cb( p_sa_req, p_mad_response ); + + AL_EXIT( AL_DBG_QUERY ); +} + + +void +al_cancel_sa_req( + IN const al_sa_req_t *p_sa_req ) +{ + ual_cancel_sa_req_ioctl_t ioctl; + size_t bytes_ret; + + AL_ENTER( AL_DBG_SA_REQ ); + + ioctl.h_sa_req = p_sa_req->hdl; + + do_al_dev_ioctl( + UAL_CANCEL_SA_REQ, &ioctl, sizeof(ioctl), NULL, 0, &bytes_ret ); + + AL_EXIT( AL_DBG_SA_REQ ); +} diff --git a/branches/Ndi/core/al/user/ual_srq.c b/branches/Ndi/core/al/user/ual_srq.c new file mode 100644 index 00000000..a575b7e9 --- /dev/null +++ b/branches/Ndi/core/al/user/ual_srq.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: ual_qp.c 1611 2006-08-20 14:48:55Z sleybo $ + */ + + +#include "al.h" +#include "al_av.h" +#include "al_ci_ca.h" +#include "al_cq.h" +#include "al_pd.h" +#include "al_srq.h" +#include "ual_mad.h" +#include "ual_support.h" + + +#include "al_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ual_srq.tmh" +#endif + + +ib_api_status_t +ual_post_srq_recv( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ) +{ + uintn_t failed_index; + uintn_t bytes_ret; + uint32_t num_wr = 0; + uint32_t num_ds = 0; + ib_recv_wr_t* p_wr; + ib_local_ds_t* p_ds; + ual_post_srq_recv_ioctl_t *p_srq_ioctl; + size_t ioctl_buf_sz; + cl_status_t cl_status; + ib_api_status_t status; + + AL_ENTER( AL_DBG_SRQ ); + + /* + * Since the work request is a link list and we need to pass this + * to the kernel as a array of work requests. So first walk through + * the list and find out how much memory we need to allocate. + */ + for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next ) + { + num_wr++; + + /* Check for overflow */ + if( !num_wr ) + break; + if( num_ds > num_ds + p_wr->num_ds ) + { + num_wr = 0; + break; + } + + num_ds += p_wr->num_ds; + } + if( !num_wr ) + { + AL_EXIT( AL_DBG_SRQ ); + return IB_INVALID_PARAMETER; + } + + ioctl_buf_sz = sizeof(ual_post_recv_ioctl_t); + ioctl_buf_sz += sizeof(ib_recv_wr_t) * (num_wr - 1); + ioctl_buf_sz += sizeof(ib_local_ds_t) * num_ds; + + p_srq_ioctl = (ual_post_srq_recv_ioctl_t*)cl_zalloc( ioctl_buf_sz ); + if( !p_srq_ioctl ) + { + AL_PRINT_EXIT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, + ("Failed to allocate IOCTL buffer.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + p_ds = (ib_local_ds_t*)&p_srq_ioctl->in.recv_wr[num_wr]; + + /* Now populate the ioctl buffer and send down the ioctl */ + p_srq_ioctl->in.h_srq = h_srq->obj.hdl; + p_srq_ioctl->in.num_wr = num_wr; + p_srq_ioctl->in.num_ds = num_ds; + num_wr = 0; + for( p_wr = p_recv_wr; p_wr; p_wr = p_wr->p_next ) + { + p_srq_ioctl->in.recv_wr[num_wr++] = *p_wr; + cl_memcpy( + p_ds, p_wr->ds_array, sizeof(ib_local_ds_t) * p_wr->num_ds ); + p_ds += p_wr->num_ds; + } + + cl_status = do_al_dev_ioctl( UAL_POST_SRQ_RECV, + &p_srq_ioctl->in, ioctl_buf_sz, + &p_srq_ioctl->out, sizeof(p_srq_ioctl->out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(p_srq_ioctl->out) ) + { + if( pp_recv_failure ) + *pp_recv_failure = p_recv_wr; + + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_POST_SRQ_RECV IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = p_srq_ioctl->out.status; + + if( status != IB_SUCCESS && pp_recv_failure ) + { + /* Get the failed index */ + failed_index = num_wr - p_srq_ioctl->out.failed_cnt; + p_wr = p_recv_wr; + while( failed_index-- ) + p_wr = p_wr->p_next; + + *pp_recv_failure = p_wr; + } + } + + cl_free( p_srq_ioctl ); + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + + +ib_api_status_t +ual_create_srq( + IN const ib_pd_handle_t h_pd, + IN OUT ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr) +{ + /* The first argument is probably not needed */ + ual_create_srq_ioctl_t srq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs; + ib_srq_attr_t srq_attr; + + AL_ENTER( AL_DBG_SRQ ); + + /* Clear the srq_ioctl */ + cl_memclr( &srq_ioctl, sizeof(srq_ioctl) ); + + /* Pre call to the UVP library */ + if( h_pd->h_ci_pd && uvp_intf.pre_create_srq ) + { + /* The post call MUST exist as it sets the UVP srq handle. */ + CL_ASSERT( uvp_intf.post_create_srq ); + /* Convert the handles to UVP handles */ + srq_attr = *p_srq_attr; + status = uvp_intf.pre_create_srq( h_pd->h_ci_pd, + &srq_attr, &srq_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_SRQ ); + return status; + } + } + /* + * Convert the handles to KAL handles once again starting + * from the input srq attribute + */ + srq_ioctl.in.h_pd = h_pd->obj.hdl; + srq_ioctl.in.srq_attr = *p_srq_attr; + srq_ioctl.in.context = h_srq; + srq_ioctl.in.ev_notify = (h_srq->pfn_event_cb != NULL) ? TRUE : FALSE; + + cl_status = do_al_dev_ioctl( UAL_CREATE_SRQ, + &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_CREATE_SRQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = srq_ioctl.out.status; + } + + /* Post uvp call */ + if( h_pd->h_ci_pd && uvp_intf.post_create_srq ) + { + uvp_intf.post_create_srq( h_pd->h_ci_pd, + status, &h_srq->h_ci_srq, &srq_ioctl.out.umv_buf ); + + if( uvp_intf.post_recv ) + { + h_srq->h_recv_srq = h_srq->h_ci_srq; + h_srq->pfn_post_srq_recv = uvp_intf.post_srq_recv; + } + else + { + h_srq->h_recv_srq = h_srq; + h_srq->pfn_post_srq_recv = ual_post_srq_recv; + } + } + else + { + h_srq->h_recv_srq = h_srq; + h_srq->pfn_post_srq_recv = ual_post_srq_recv; + } + + if( status == IB_SUCCESS ) + { + h_srq->obj.hdl = srq_ioctl.out.h_srq; + } + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + +ib_api_status_t +ual_modify_srq( + IN ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask) +{ + ual_modify_srq_ioctl_t srq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_SRQ ); + + /* Clear the srq_ioctl */ + cl_memclr( &srq_ioctl, sizeof(srq_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid srq handle */ + if( h_srq->h_ci_srq && uvp_intf.pre_modify_srq ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_modify_srq( h_srq->h_ci_srq, + p_srq_attr, srq_attr_mask, &srq_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_SRQ ); + return status; + } + } + + srq_ioctl.in.h_srq = h_srq->obj.hdl; + srq_ioctl.in.srq_attr = *p_srq_attr; + srq_ioctl.in.srq_attr_mask = srq_attr_mask; + + cl_status = do_al_dev_ioctl( UAL_MODIFY_SRQ, + &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_MODIFY_SRQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = srq_ioctl.out.status; + } + + /* Post uvp call */ + if( h_srq->h_ci_srq && uvp_intf.post_modify_srq ) + { + uvp_intf.post_modify_srq( h_srq->h_ci_srq, status, + &srq_ioctl.out.umv_buf ); + } + + //if( status == IB_SUCCESS ) + //{ + // *p_srq_attr = srq_ioctl.out.srq_attr; + //} + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + +ib_api_status_t +ual_query_srq( + IN ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* p_srq_attr ) +{ + ual_query_srq_ioctl_t srq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs; + ib_srq_attr_t* p_attr; + + AL_ENTER( AL_DBG_SRQ ); + + /* Clear the srq_ioctl */ + cl_memclr( &srq_ioctl, sizeof(srq_ioctl) ); + + /* Call the uvp pre call if the vendor library provided a valid ca handle */ + if( h_srq->h_ci_srq && uvp_intf.pre_query_srq ) + { + /* Pre call to the UVP library */ + status = uvp_intf.pre_query_srq( h_srq->h_ci_srq, &srq_ioctl.in.umv_buf ); + if( status != IB_SUCCESS ) + { + AL_EXIT( AL_DBG_SRQ ); + return status; + } + } + + srq_ioctl.in.h_srq = h_srq->obj.hdl; + + cl_status = do_al_dev_ioctl( UAL_QUERY_SRQ, + &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_QUERY_SRQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = srq_ioctl.out.status; + } + + p_attr = &srq_ioctl.out.srq_attr; + + /* Post uvp call */ + if( h_srq->h_ci_srq && uvp_intf.post_query_srq ) + { + uvp_intf.post_query_srq( h_srq->h_ci_srq, status, + p_attr, &srq_ioctl.out.umv_buf ); + } + + if( IB_SUCCESS == status ) + { + /* UVP handles in srq_attr will be converted to UAL's handles + * by the common code + */ + *p_srq_attr = *p_attr; + } + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + + +ib_api_status_t +ual_destroy_srq( + IN ib_srq_handle_t h_srq ) +{ + ual_destroy_srq_ioctl_t srq_ioctl; + uintn_t bytes_ret; + cl_status_t cl_status; + ib_api_status_t status; + uvp_interface_t uvp_intf = h_srq->obj.p_ci_ca->verbs.user_verbs; + + AL_ENTER( AL_DBG_SRQ ); + + /* Call the uvp pre call if the vendor library provided a valid srq handle */ + if( h_srq->h_ci_srq && uvp_intf.pre_destroy_srq ) + { + status = uvp_intf.pre_destroy_srq( h_srq->h_ci_srq ); + if (status != IB_SUCCESS) + { + AL_EXIT( AL_DBG_SRQ ); + return status; + } + } + + cl_memclr( &srq_ioctl, sizeof(srq_ioctl) ); + srq_ioctl.in.h_srq = h_srq->obj.hdl; + cl_status = do_al_dev_ioctl( UAL_DESTROY_SRQ, + &srq_ioctl.in, sizeof(srq_ioctl.in), &srq_ioctl.out, sizeof(srq_ioctl.out), + &bytes_ret ); + + if( cl_status != CL_SUCCESS || bytes_ret != sizeof(srq_ioctl.out) ) + { + AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR , + ("UAL_DESTROY_SRQ IOCTL returned %s.\n", + CL_STATUS_MSG(cl_status)) ); + status = IB_ERROR; + } + else + { + status = srq_ioctl.out.status; + } + + /* Call vendor's post_destroy_srq */ + if( h_srq->h_ci_srq && uvp_intf.post_destroy_srq ) + uvp_intf.post_destroy_srq( h_srq->h_ci_srq, status ); + + AL_EXIT( AL_DBG_SRQ ); + return status; +} + diff --git a/branches/Ndi/core/al/user/ual_sub.c b/branches/Ndi/core/al/user/ual_sub.c new file mode 100644 index 00000000..63dd3b2f --- /dev/null +++ b/branches/Ndi/core/al/user/ual_sub.c @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "ual_support.h" +#include "al.h" diff --git a/branches/Ndi/core/al/user/ual_support.h b/branches/Ndi/core/al/user/ual_support.h new file mode 100644 index 00000000..9dcd784b --- /dev/null +++ b/branches/Ndi/core/al/user/ual_support.h @@ -0,0 +1,131 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Device and complib support for UAL + */ + +#if !defined(__UAL_SUPPORT_H__) +#define __UAL_SUPPORT_H__ + +#include +#include +#include +#include +#include "al_dev.h" +#include "al_debug.h" + + + +#include + + +/* File handle to kernel transport. */ +extern HANDLE g_al_device; + +/* + * Mutex to serialize open/close AL calls so that we can properly synchronize + * initializing and cleaning up AL internally. + */ +extern cl_mutex_t g_open_close_mutex; + + +/* Helper functions for UVP library access. */ +static inline void* +al_load_uvp( + IN char* const uvp_lib_name ) +{ +#ifdef _DEBUG_ + StringCbCatA( uvp_lib_name, 32, "d.dll" ); +#else /* _DEBUG_ */ + StringCbCatA( uvp_lib_name, 32, ".dll" ); +#endif /* _DEBUG_ */ + return LoadLibrary( uvp_lib_name ); +} + +static inline void +al_unload_uvp( + IN void *h_uvp_lib ) +{ + FreeLibrary( h_uvp_lib ); +} + +static inline uvp_get_interface_t +al_get_uvp_ifc_pfn( + IN void *h_uvp_lib ) +{ + return (uvp_get_interface_t) + GetProcAddress( h_uvp_lib, "uvp_get_interface" ); +} + +static inline void +al_uvp_lib_err( + IN uint32_t dbg_lvl, + IN char* const msg, + IN ... ) +{ + char buffer[256]; + char* error; + va_list args; + + /* Make free build warning go away. */ + UNUSED_PARAM( dbg_lvl ); + + va_start( args, msg ); + + if( StringCbVPrintfA( buffer, 256, msg, args ) == + STRSAFE_E_INSUFFICIENT_BUFFER ) + { + /* Overflow... */ + buffer[252] = '.'; + buffer[253] = '.'; + buffer[254] = '.'; + } + va_end(args); + + if( !FormatMessageA( FORMAT_MESSAGE_ALLOCATE_BUFFER | + FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, GetLastError(), + MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + (LPSTR)&error, 0, NULL ) ) + { + //AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("%s (%d)\n", buffer, GetLastError()) ); + } + else + { + //AL_PRINT(TRACE_LEVEL_ERROR ,AL_DBG_ERROR, ("%s (%s)\n", buffer, error) ); + LocalFree( error ); + } +} + +#endif // __UAL_SUPPORT_H__ diff --git a/branches/Ndi/core/bus/dirs b/branches/Ndi/core/bus/dirs new file mode 100644 index 00000000..ed41dcf4 --- /dev/null +++ b/branches/Ndi/core/bus/dirs @@ -0,0 +1,2 @@ +DIRS=\ + kernel diff --git a/branches/Ndi/core/bus/kernel/SOURCES b/branches/Ndi/core/bus/kernel/SOURCES new file mode 100644 index 00000000..f8ca870f --- /dev/null +++ b/branches/Ndi/core/bus/kernel/SOURCES @@ -0,0 +1,44 @@ +TARGETNAME=ibbus +TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=DRIVER + + +!if $(FREEBUILD) +ENABLE_EVENT_TRACING=1 +!else +#ENABLE_EVENT_TRACING=1 +!endif + + +SOURCES= ibbus.rc \ + bus_driver.c \ + bus_pnp.c \ + bus_port_mgr.c \ + bus_iou_mgr.c + +INCLUDES=..\..\..\inc;..\..\..\inc\kernel;..\..\al;..\..\al\kernel; + +C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -DNEED_CL_OBJ + +TARGETLIBS= \ + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib + +!if !defined(DDK_TARGET_OS) || "$(DDK_TARGET_OS)"=="Win2K" +# +# The driver is built in the Win2K build environment +# - use the library version of safe strings +# +TARGETLIBS= $(TARGETLIBS) $(DDK_LIB_PATH)\ntstrsafe.lib +!endif + +!IFDEF ENABLE_EVENT_TRACING + +C_DEFINES = $(C_DEFINES) -DEVENT_TRACING + +RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H + +!ENDIF + + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/core/bus/kernel/bus_driver.c b/branches/Ndi/core/bus/kernel/bus_driver.c new file mode 100644 index 00000000..c1f8e789 --- /dev/null +++ b/branches/Ndi/core/bus/kernel/bus_driver.c @@ -0,0 +1,517 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Provides the driver entry points for the InfiniBand Bus Driver. + */ + +#include +#include "bus_driver.h" +#include "bus_pnp.h" +#include "al_mgr.h" +#include "al_dev.h" +#include "al_debug.h" +#include + + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "bus_driver.tmh" +#endif + + +#define DEFAULT_NODE_DESC "OpenIB Windows® Host" + + +char node_desc[IB_NODE_DESCRIPTION_SIZE]; + +bus_globals_t bus_globals = { + BUS_DBG_ERROR, + TRUE, + NULL, + NULL +}; + +static void +__read_machine_name( void ); + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_Param_Path ); + +static NTSTATUS +bus_drv_open( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + +static NTSTATUS +bus_drv_cleanup( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + +static NTSTATUS +bus_drv_close( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + +static NTSTATUS +bus_drv_ioctl( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + +/***f* InfiniBand Bus Driver/bus_drv_sysctl +* NAME +* bus_drv_sysctl +* +* DESCRIPTION +* Entry point for handling WMI IRPs. +* +* SYNOPSIS +*/ +static NTSTATUS +bus_drv_sysctl( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); +/**********/ + +static void +bus_drv_unload( + IN DRIVER_OBJECT *p_driver_obj ); + +NTSTATUS +DriverEntry( + IN DRIVER_OBJECT *p_driver_obj, + IN UNICODE_STRING *p_registry_path ); + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (INIT, DriverEntry) +#pragma alloc_text (INIT, __read_machine_name) +#pragma alloc_text (INIT, __read_registry) +#pragma alloc_text (PAGE, bus_drv_unload) +#pragma alloc_text (PAGE, bus_drv_open) +#pragma alloc_text (PAGE, bus_drv_close) +#pragma alloc_text (PAGE, bus_drv_ioctl) +#pragma alloc_text (PAGE_PNP, bus_drv_sysctl) +#endif + + +static void +__read_machine_name( void ) +{ + NTSTATUS status; + /* Remember the terminating entry in the table below. */ + RTL_QUERY_REGISTRY_TABLE table[2]; + UNICODE_STRING hostNamePath; + UNICODE_STRING hostNameW; + ANSI_STRING hostName; + + BUS_ENTER( BUS_DBG_DRV ); + + /* Get the host name. */ + RtlInitUnicodeString( &hostNamePath, L"ComputerName\\ComputerName" ); + RtlInitUnicodeString( &hostNameW, NULL ); + + /* + * Clear the table. This clears all the query callback pointers, + * and sets up the terminating table entry. + */ + cl_memclr( table, sizeof(table) ); + cl_memclr( node_desc, sizeof(node_desc) ); + + /* Setup the table entries. */ + table[0].Flags = RTL_QUERY_REGISTRY_DIRECT | RTL_QUERY_REGISTRY_REQUIRED; + table[0].Name = L"ComputerName"; + table[0].EntryContext = &hostNameW; + table[0].DefaultType = REG_SZ; + table[0].DefaultData = &hostNameW; + table[0].DefaultLength = 0; + + /* Have at it! */ + status = RtlQueryRegistryValues( RTL_REGISTRY_CONTROL, + hostNamePath.Buffer, table, NULL, NULL ); + if( NT_SUCCESS( status ) ) + { + /* Convert the UNICODE host name to UTF-8 (ASCII). */ + hostName.Length = 0; + hostName.MaximumLength = sizeof(node_desc); + hostName.Buffer = node_desc; + status = RtlUnicodeStringToAnsiString( &hostName, &hostNameW, FALSE ); + RtlFreeUnicodeString( &hostNameW ); + } + else + { + BUS_TRACE(BUS_DBG_ERROR , ("Failed to get host name.\n") ); + /* Use the default name... */ + RtlStringCbCopyNA( node_desc, sizeof(node_desc), + DEFAULT_NODE_DESC, sizeof(DEFAULT_NODE_DESC) ); + } + + BUS_ENTER( BUS_DBG_DRV ); +} + + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_registry_path ) +{ + NTSTATUS status; + /* Remember the terminating entry in the table below. */ + RTL_QUERY_REGISTRY_TABLE table[9]; + UNICODE_STRING param_path; + + BUS_ENTER( BUS_DBG_DRV ); + + __read_machine_name(); + + RtlInitUnicodeString( ¶m_path, NULL ); + param_path.MaximumLength = p_registry_path->Length + + sizeof(L"\\Parameters"); + param_path.Buffer = cl_zalloc( param_path.MaximumLength ); + if( !param_path.Buffer ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate parameters path buffer.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + RtlAppendUnicodeStringToString( ¶m_path, p_registry_path ); + RtlAppendUnicodeToString( ¶m_path, L"\\Parameters" ); + + /* + * Clear the table. This clears all the query callback pointers, + * and sets up the terminating table entry. + */ + cl_memclr( table, sizeof(table) ); + + /* Setup the table entries. */ + table[0].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[0].Name = L"ReportPortNIC"; + table[0].EntryContext = &bus_globals.b_report_port_nic; + table[0].DefaultType = REG_DWORD; + table[0].DefaultData = &bus_globals.b_report_port_nic; + table[0].DefaultLength = sizeof(ULONG); + + table[1].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[1].Name = L"DebugFlags"; + table[1].EntryContext = &bus_globals.dbg_lvl; + table[1].DefaultType = REG_DWORD; + table[1].DefaultData = &bus_globals.dbg_lvl; + table[1].DefaultLength = sizeof(ULONG); + + table[2].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[2].Name = L"IbalDebugLevel"; + table[2].EntryContext = &g_al_dbg_level; + table[2].DefaultType = REG_DWORD; + table[2].DefaultData = &g_al_dbg_level; + table[2].DefaultLength = sizeof(ULONG); + + table[3].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[3].Name = L"IbalDebugFlags"; + table[3].EntryContext = &g_al_dbg_flags; + table[3].DefaultType = REG_DWORD; + table[3].DefaultData = &g_al_dbg_flags; + table[3].DefaultLength = sizeof(ULONG); + + + table[4].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[4].Name = L"SmiPollInterval"; + table[4].EntryContext = &g_smi_poll_interval; + table[4].DefaultType = REG_DWORD; + table[4].DefaultData = &g_smi_poll_interval; + table[4].DefaultLength = sizeof(ULONG); + + table[5].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[5].Name = L"IocQueryTimeout"; + table[5].EntryContext = &g_ioc_query_timeout; + table[5].DefaultType = REG_DWORD; + table[5].DefaultData = &g_ioc_query_timeout; + table[5].DefaultLength = sizeof(ULONG); + + table[6].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[6].Name = L"IocQueryRetries"; + table[6].EntryContext = &g_ioc_query_retries; + table[6].DefaultType = REG_DWORD; + table[6].DefaultData = &g_ioc_query_retries; + table[6].DefaultLength = sizeof(ULONG); + + table[7].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[7].Name = L"IocPollInterval"; + table[7].EntryContext = &g_ioc_poll_interval; + table[7].DefaultType = REG_DWORD; + table[7].DefaultData = &g_ioc_poll_interval; + table[7].DefaultLength = sizeof(ULONG); + + /* Have at it! */ + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, + param_path.Buffer, table, NULL, NULL ); + +#if DBG + if( g_al_dbg_flags & AL_DBG_ERR ) + g_al_dbg_flags |= CL_DBG_ERROR; +#endif + + BUS_TRACE(BUS_DBG_DRV , + ("debug level %d debug flags 0x%.8x\n", + g_al_dbg_level, + g_al_dbg_flags)); + + cl_free( param_path.Buffer ); + BUS_EXIT( BUS_DBG_DRV ); + return status; +} + + +static NTSTATUS +bus_drv_open( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ) +{ + BUS_ENTER( BUS_DBG_DRV ); + + UNUSED_PARAM( p_dev_obj ); + + CL_ASSERT( KeGetCurrentIrql() == PASSIVE_LEVEL ); + + /* We always succeed file handles creation. */ + p_irp->IoStatus.Status = STATUS_SUCCESS; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + + BUS_EXIT( BUS_DBG_DRV ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +bus_drv_cleanup( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ) +{ + NTSTATUS status; + + BUS_ENTER( BUS_DBG_DRV ); + + UNUSED_PARAM( p_dev_obj ); + + CL_ASSERT( KeGetCurrentIrql() == PASSIVE_LEVEL ); + + /* + * Note that we don't acquire the remove and stop lock on close to allow + * applications to close the device when the locks are already held. + */ + status = cl_to_ntstatus( al_dev_close( p_irp ) ); + + /* Complete the IRP. */ + p_irp->IoStatus.Status = status; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + + BUS_EXIT( BUS_DBG_DRV ); + return status; +} + + +static NTSTATUS +bus_drv_close( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ) +{ + UNUSED_PARAM( p_dev_obj ); + + p_irp->IoStatus.Status = STATUS_SUCCESS; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + + return STATUS_SUCCESS; +} + + +static NTSTATUS +bus_drv_ioctl( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ) +{ + NTSTATUS status; + bus_fdo_ext_t *p_ext; + PIO_STACK_LOCATION p_io_stack; + + BUS_ENTER( BUS_DBG_DRV ); + + /* Get the extension. */ + p_ext = p_dev_obj->DeviceExtension; + + /* Get the stack location. */ + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + /* Acquire the stop lock. */ + status = IoAcquireRemoveLock( &p_ext->cl_ext.stop_lock, p_irp ); + if( !NT_SUCCESS( status ) ) + { + p_irp->IoStatus.Status = status; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + BUS_EXIT( BUS_DBG_DRV ); + return status; + } + + /* Acquire the remove lock. */ + status = IoAcquireRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + if( !NT_SUCCESS( status ) ) + { + IoReleaseRemoveLock( &p_ext->cl_ext.stop_lock, p_irp ); + p_irp->IoStatus.Status = status; + p_irp->IoStatus.Information = 0; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + BUS_EXIT( BUS_DBG_DRV ); + return status; + } + + status = cl_to_ntstatus( al_dev_ioctl( p_irp ) ); + + /* Only pass down if not handled and not PDO device. */ + if( status == STATUS_INVALID_DEVICE_REQUEST && p_ext->cl_ext.p_next_do ) + { + IoSkipCurrentIrpStackLocation( p_irp ); + status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + } + + /* Release the remove and stop locks. */ + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + IoReleaseRemoveLock( &p_ext->cl_ext.stop_lock, p_irp ); + + BUS_EXIT( BUS_DBG_DRV ); + return status; +} + + +static NTSTATUS +bus_drv_sysctl( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ) +{ + NTSTATUS status; + bus_fdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_DRV ); + + CL_ASSERT( p_dev_obj ); + CL_ASSERT( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + + if( p_ext->cl_ext.p_next_do ) + { + IoSkipCurrentIrpStackLocation( p_irp ); + status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + } + else + { + status = p_irp->IoStatus.Status; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + } + + BUS_EXIT( BUS_DBG_DRV ); + return status; +} + + +static void +bus_drv_unload( + IN DRIVER_OBJECT *p_driver_obj ) +{ + BUS_ENTER( BUS_DBG_DRV ); + + UNUSED_PARAM( p_driver_obj ); + + CL_DEINIT; + +#if defined(EVENT_TRACING) + WPP_CLEANUP(p_driver_obj); +#endif + + BUS_EXIT( BUS_DBG_DRV ); + +} + + +NTSTATUS +DriverEntry( + IN DRIVER_OBJECT *p_driver_obj, + IN UNICODE_STRING *p_registry_path ) +{ + NTSTATUS status; + + BUS_ENTER( BUS_DBG_DRV ); + +#if defined(EVENT_TRACING) + WPP_INIT_TRACING(p_driver_obj ,p_registry_path); +#endif + + status = CL_INIT; + if( !NT_SUCCESS(status) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("cl_init returned %08X.\n", status) ); + return status; + } + + /* Store the driver object pointer in the global parameters. */ + bus_globals.p_driver_obj = p_driver_obj; + + /* Get the registry values. */ + status = __read_registry( p_registry_path ); + if( !NT_SUCCESS(status) ) + { + CL_DEINIT; + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("__read_registry returned %08x.\n", status) ); + return status; + } + + /* Setup the entry points. */ + p_driver_obj->MajorFunction[IRP_MJ_CREATE] = bus_drv_open; + p_driver_obj->MajorFunction[IRP_MJ_CLEANUP] = bus_drv_cleanup; + p_driver_obj->MajorFunction[IRP_MJ_CLOSE] = bus_drv_close; + p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp; + p_driver_obj->MajorFunction[IRP_MJ_POWER] = cl_power; + p_driver_obj->MajorFunction[IRP_MJ_DEVICE_CONTROL] = bus_drv_ioctl; + p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = bus_drv_sysctl; + p_driver_obj->DriverUnload = bus_drv_unload; + p_driver_obj->DriverExtension->AddDevice = bus_add_device; + + BUS_EXIT( BUS_DBG_DRV ); + return STATUS_SUCCESS; +} diff --git a/branches/Ndi/core/bus/kernel/bus_driver.h b/branches/Ndi/core/bus/kernel/bus_driver.h new file mode 100644 index 00000000..98c00bee --- /dev/null +++ b/branches/Ndi/core/bus/kernel/bus_driver.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if !defined _BUS_DRIVER_H_ +#define _BUS_DRIVER_H_ + +#include "complib/cl_types.h" +#include "complib/cl_atomic.h" +#include "complib/cl_debug.h" +#include "complib/cl_mutex.h" +#include "complib/cl_qlist.h" +#include "complib/cl_ptr_vector.h" +#include "complib/cl_pnp_po.h" +#include "iba/ib_al.h" +#include "bus_port_mgr.h" +#include "bus_iou_mgr.h" + +/* Safe string functions. */ +#if WINVER == 0x500 +/* + * Windows 2000 doesn't support the inline version of safe strings. + * Force the use of the library version of safe strings. + */ +#define NTSTRSAFE_LIB +#endif +#include + + +/* + * Main header for IB Bus driver. + */ + +#define BUS_ENTER( lvl ) \ + CL_ENTER( lvl, bus_globals.dbg_lvl ) + +#define BUS_EXIT( lvl ) \ + CL_EXIT( lvl, bus_globals.dbg_lvl ) + +#define BUS_TRACE( lvl, msg ) \ + CL_TRACE( lvl, bus_globals.dbg_lvl, msg ) + +#define BUS_TRACE_EXIT( lvl, msg ) \ + CL_TRACE_EXIT( lvl, bus_globals.dbg_lvl, msg ) + +#define BUS_PRINT( lvl, msg ) \ + CL_PRINT( lvl, bus_globals.dbl_lvl, msg ) + +#define BUS_DBG_ERROR CL_DBG_ERROR +#define BUS_DBG_DRV (1 << 0) +#define BUS_DBG_PNP (1 << 1) +#define BUS_DBG_POWER (1 << 2) +#define BUS_DBG_PORT (1 << 3) +#define BUS_DBG_IOU (1 << 4) + +/* + * ALLOC_PRAGMA sections: + * PAGE + * Default pagable code. Won't be locked in memory. + * + * PAGE_PNP + * Code that needs to be locked in memory when the device is + * in the paging, crash dump, or hibernation path. + */ + + +/* + * Device extension for the device object that serves as entry point for + * the interface and IOCTL requests. + */ +typedef struct _bus_fdo_ext +{ + cl_pnp_po_ext_t cl_ext; + + /* + * Device power map returned by the bus driver for the device, used + * when sending IRP_MN_SET_POWER for device state in response to + * IRP_MN_SET_POWER for system state. + */ + DEVICE_POWER_STATE po_state[PowerSystemMaximum]; + + port_mgr_t *p_port_mgr; + iou_mgr_t *p_iou_mgr; + + /* Interface names are generated by IoRegisterDeviceInterface. */ + UNICODE_STRING al_ifc_name; + UNICODE_STRING ci_ifc_name; + + /* Number of references on the upper interface. */ + atomic32_t n_al_ifc_ref; + /* Number of references on the CI interface. */ + atomic32_t n_ci_ifc_ref; + +} bus_fdo_ext_t; + + +/* + * Device extension for bus driver PDOs. + */ +typedef struct _bus_pdo_ext +{ + cl_pnp_po_ext_t cl_ext; + + cl_list_item_t list_item; + + /* All reported PDOs are children of an HCA. */ + ib_ca_handle_t h_ca; + + /* + * CA GUID copy - in case we get IRPs after the CA + * handle has been released. + */ + net64_t ca_guid; + + POWER_STATE dev_po_state; + + /* + * Pointer to the bus root device extension. Used to manage access to + * child PDO pointer vector when a child is removed politely. + */ + bus_fdo_ext_t *p_parent_ext; + + /* + * The following two flags are exclusively set, but can both be FALSE. + * Flag that indicates whether the device is present in the system or not. + * This affects how a IRP_MN_REMOVE_DEVICE IRP is handled for a child PDO. + * This flag is cleared when: + * - an HCA (for IPoIB devices) is removed from the system for all port + * devices loaded for that HCA + * - an IOU is reported as removed by the CIA. + */ + boolean_t b_present; + + /* + * Flag that indicates whether the device has been reported to the PnP + * manager as having been removed. That is, the device was reported + * in a previous BusRelations query and not in a subsequent one. + * This flag is set when + * - the device is in the surprise remove state when the parent bus + * device is removed + * - the device is found to be not present during a BusRelations query + * and thus not reported. + */ + boolean_t b_reported_missing; + + /* Flag to control the behaviour of the driver during hibernation */ + uint32_t b_hibernating; + + /* work item for handling Power Management request */ + PIO_WORKITEM p_po_work_item; + +} bus_pdo_ext_t; + + +/* + * Global Driver parameters. + */ +typedef struct _bus_globals +{ + /* Debug level. */ + uint32_t dbg_lvl; + + /* Flag to control loading of Ip Over Ib driver for each HCA port. */ + uint32_t b_report_port_nic; + + /* Driver object. Used for registering of Plug and Play notifications. */ + DRIVER_OBJECT *p_driver_obj; + + /* Pointer to the one and only bus root. */ + bus_fdo_ext_t *p_bus_ext; + +} bus_globals_t; + + +extern bus_globals_t bus_globals; + + +#endif /* !defined _BUS_DRIVER_H_ */ diff --git a/branches/Ndi/core/bus/kernel/bus_iou_mgr.c b/branches/Ndi/core/bus/kernel/bus_iou_mgr.c new file mode 100644 index 00000000..81ce2cdd --- /dev/null +++ b/branches/Ndi/core/bus/kernel/bus_iou_mgr.c @@ -0,0 +1,1501 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include "ib_common.h" +#include "al_ca.h" +#include "al_mgr.h" +#include "bus_pnp.h" +#include "bus_iou_mgr.h" +#include +#include +#include "iba/iou_ifc.h" + + +/* {5A9649F4-0101-4a7c-8337-796C48082DA2} */ +DEFINE_GUID(GUID_BUS_TYPE_IBA, +0x5a9649f4, 0x101, 0x4a7c, 0x83, 0x37, 0x79, 0x6c, 0x48, 0x8, 0x2d, 0xa2); + + +/* + * Size of device descriptions, in the format: + * IBA\VxxxxxxPxxxxxxxxvxxxxxxxx + */ +#define IOU_DEV_ID_SIZE sizeof(L"IBA\\VxxxxxxPxxxxvxxxxxxxx") +#define IOU_DEV_ID_STRING1 L"IBA\\V%06xP%04hxv%08x" +#define IOU_DEV_ID_STRING2 L"IBA\\V%06xP%04hx" +#define IOU_HW_ID_SIZE \ + sizeof(L"IBA\\VxxxxxxPxxxxvxxxxxxxx\0IBA\\VxxxxxxPxxxx\0\0") +#define IOU_COMPAT_ID L"IBA\\IB_IOU\0\0" +#define IOU_LOCATION_SIZE \ + sizeof(L"Chassis 0xxxxxxxxxxxxxxxxx, Slot xx") + +/* + * Device extension for IOU PDOs. + */ +typedef struct _bus_iou_ext +{ + bus_pdo_ext_t pdo; + + net64_t chassis_guid; + uint8_t slot; + net64_t guid; + net32_t vend_id; + net16_t dev_id; + net32_t revision; + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + + /* Number of references on the upper interface. */ + atomic32_t n_ifc_ref; + +} bus_iou_ext_t; + + +iou_mgr_t* gp_iou_mgr = NULL; + + +/* + * Function prototypes. + */ +void +destroying_iou_mgr( + IN cl_obj_t* p_obj ); + +void +free_iou_mgr( + IN cl_obj_t* p_obj ); + +ib_api_status_t +bus_reg_iou_pnp( void ); + +ib_api_status_t +iou_mgr_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ); + +ib_api_status_t +iou_mgr_iou_add( + IN ib_pnp_iou_rec_t* p_pnp_rec ); + +void +iou_mgr_iou_remove( + IN ib_pnp_iou_rec_t* p_pnp_rec ); + +static NTSTATUS +iou_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +iou_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static void +iou_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +iou_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +iou_surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +iou_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +iou_query_target_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +iou_query_device_id( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +iou_query_hardware_ids( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +iou_query_compatible_ids( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +iou_query_unique_id( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +iou_query_description( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +iou_query_location( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +iou_query_bus_info( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +iou_query_interface( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +iou_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + + +/* All PnP code is called at passive, so it can all be paged out. */ +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, iou_start) +#pragma alloc_text (PAGE, iou_query_remove) +#pragma alloc_text (PAGE, iou_release_resources) +#pragma alloc_text (PAGE, iou_remove) +#pragma alloc_text (PAGE, iou_surprise_remove) +#pragma alloc_text (PAGE, iou_query_capabilities) +#pragma alloc_text (PAGE, iou_query_target_relations) +#pragma alloc_text (PAGE, iou_query_device_id) +#pragma alloc_text (PAGE, iou_query_hardware_ids) +#pragma alloc_text (PAGE, iou_query_compatible_ids) +#pragma alloc_text (PAGE, iou_query_unique_id) +#pragma alloc_text (PAGE, iou_query_description) +#pragma alloc_text (PAGE, iou_query_location) +#pragma alloc_text (PAGE, iou_query_bus_info) +#pragma alloc_text (PAGE, iou_query_interface) +#pragma alloc_text (PAGE_PNP, iou_set_power) +#pragma alloc_text (PAGE, iou_mgr_iou_add) +#pragma alloc_text (PAGE, iou_mgr_iou_remove) +#endif + + +/* + * Global virtual function pointer tables shared between all + * instances of Port PDOs. + */ +static const cl_vfptr_pnp_po_t vfptr_iou_pnp = { + "IB IOU", + iou_start, + cl_irp_succeed, + cl_irp_succeed, + cl_irp_succeed, + iou_query_remove, + iou_release_resources, + iou_remove, + cl_irp_succeed, + iou_surprise_remove, + iou_query_capabilities, + cl_irp_complete, + cl_irp_complete, + cl_irp_succeed, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + iou_query_target_relations, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + iou_query_bus_info, + iou_query_interface, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + cl_irp_succeed, // QueryPower + iou_set_power, // SetPower + cl_irp_unsupported, // PowerSequence + cl_irp_unsupported // WaitWake +}; + + +static const cl_vfptr_query_txt_t vfptr_iou_query_txt = { + iou_query_device_id, + iou_query_hardware_ids, + iou_query_compatible_ids, + iou_query_unique_id, + iou_query_description, + iou_query_location +}; + + +/* + * Create the AL load service. + */ +ib_api_status_t +create_iou_mgr( + OUT iou_mgr_t** const pp_iou_mgr ) +{ + ib_api_status_t status; + cl_status_t cl_status; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( !gp_iou_mgr ); + + gp_iou_mgr = cl_zalloc( sizeof(iou_mgr_t) ); + if( !gp_iou_mgr ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate port manager.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the load service. */ + cl_obj_construct( &gp_iou_mgr->obj, AL_OBJ_TYPE_LOADER ); + cl_mutex_construct( &gp_iou_mgr->pdo_mutex ); + cl_qlist_init( &gp_iou_mgr->iou_list ); + + cl_status = cl_mutex_init( &gp_iou_mgr->pdo_mutex ); + if( cl_status != CL_SUCCESS ) + { + free_iou_mgr( &gp_iou_mgr->obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("cl_mutex_init returned %s.\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + /* Initialize the load service object. */ + cl_status = cl_obj_init( &gp_iou_mgr->obj, CL_DESTROY_SYNC, + destroying_iou_mgr, NULL, free_iou_mgr ); + if( cl_status != CL_SUCCESS ) + { + free_iou_mgr( &gp_iou_mgr->obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("cl_obj_init returned %s.\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + /* Register for port PnP events. */ + status = bus_reg_iou_pnp(); + if( status != IB_SUCCESS ) + { + cl_obj_destroy( &gp_iou_mgr->obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("bus_reg_iou_pnp returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + *pp_iou_mgr = gp_iou_mgr; + + BUS_EXIT( BUS_DBG_PNP ); + return IB_SUCCESS; +} + + +/* + * Pre-destroy the load service. + */ +void +destroying_iou_mgr( + IN cl_obj_t* p_obj ) +{ + ib_api_status_t status; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( p_obj ); + CL_ASSERT( gp_iou_mgr == PARENT_STRUCT( p_obj, iou_mgr_t, obj ) ); + UNUSED_PARAM( p_obj ); + + /* Deregister for port PnP events. */ + if( gp_iou_mgr->h_pnp ) + { + status = ib_dereg_pnp( gp_iou_mgr->h_pnp, + (ib_pfn_destroy_cb_t)cl_obj_deref ); + CL_ASSERT( status == IB_SUCCESS ); + } + BUS_EXIT( BUS_DBG_PNP ); +} + + +/* + * Free the load service. + */ +void +free_iou_mgr( + IN cl_obj_t* p_obj ) +{ + bus_pdo_ext_t *p_ext; + cl_list_item_t *p_list_item; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( p_obj ); + CL_ASSERT( gp_iou_mgr == PARENT_STRUCT( p_obj, iou_mgr_t, obj ) ); + + /* + * Mark all IPoIB PDOs as no longer present. This will cause them + * to be removed when they process the IRP_MN_REMOVE_DEVICE. + */ + p_list_item = cl_qlist_remove_head( &gp_iou_mgr->iou_list ); + while( p_list_item != cl_qlist_end( &gp_iou_mgr->iou_list ) ) + { + p_ext = PARENT_STRUCT( p_list_item, bus_pdo_ext_t, list_item ); + p_list_item = cl_qlist_remove_head( &gp_iou_mgr->iou_list ); + if( p_ext->cl_ext.pnp_state == SurpriseRemoved ) + { + CL_ASSERT( !p_ext->b_present ); + p_ext->b_reported_missing = TRUE; + BUS_TRACE( BUS_DBG_PNP, ("%s: ext %p, present %d, missing %d .\n", + p_ext->cl_ext.vfptr_pnp_po->identity, p_ext, p_ext->b_present, p_ext->b_reported_missing ) ); + continue; + } + if( p_ext->h_ca ) + { + /* Invalidate bus relations for the HCA. */ + IoInvalidateDeviceRelations( + p_ext->h_ca->obj.p_ci_ca->verbs.p_hca_dev, BusRelations ); + + /* Release the reference on the CA object. */ + deref_al_obj( &p_ext->h_ca->obj ); + } + IoDeleteDevice( p_ext->cl_ext.p_self_do ); + } + + cl_mutex_destroy( &gp_iou_mgr->pdo_mutex ); + cl_obj_deinit( p_obj ); + cl_free( gp_iou_mgr ); + gp_iou_mgr = NULL; + BUS_EXIT( BUS_DBG_PNP ); +} + + +/* + * Register the load service for the given PnP class events. + */ +ib_api_status_t +bus_reg_iou_pnp( void ) +{ + ib_pnp_req_t pnp_req; + ib_api_status_t status; + + cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) ); + pnp_req.pnp_class = IB_PNP_IOU | IB_PNP_FLAG_REG_SYNC; + pnp_req.pnp_context = gp_iou_mgr; + pnp_req.pfn_pnp_cb = iou_mgr_pnp_cb; + + status = ib_reg_pnp( gh_al, &pnp_req, &gp_iou_mgr->h_pnp ); + + if( status == IB_SUCCESS ) + { + /* Reference the load service on behalf of the ib_reg_pnp call. */ + cl_obj_ref( &gp_iou_mgr->obj ); + } + + return status; +} + + +/* + * Load service PnP event callback. + */ +ib_api_status_t +iou_mgr_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ) +{ + ib_api_status_t status; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( p_pnp_rec ); + CL_ASSERT( gp_iou_mgr == p_pnp_rec->pnp_context ); + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_IOU_ADD: + status = iou_mgr_iou_add( (ib_pnp_iou_rec_t*)p_pnp_rec ); + break; + + case IB_PNP_IOU_REMOVE: + iou_mgr_iou_remove( (ib_pnp_iou_rec_t*)p_pnp_rec ); + + default: + status = IB_SUCCESS; + break; + } + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +/* + * Called to get child relations for the bus root. + */ +NTSTATUS +iou_mgr_get_bus_relations( + IN const net64_t ca_guid, + IN IRP* const p_irp ) +{ + NTSTATUS status; + + BUS_ENTER( BUS_DBG_PNP ); + + cl_mutex_acquire( &gp_iou_mgr->pdo_mutex ); + status = bus_get_relations( &gp_iou_mgr->iou_list, ca_guid, p_irp ); + cl_mutex_release( &gp_iou_mgr->pdo_mutex ); + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +static ib_api_status_t +__iou_was_hibernated( + IN ib_pnp_iou_rec_t* p_pnp_rec ) +{ + NTSTATUS status; + cl_list_item_t *p_list_item; + bus_iou_ext_t *p_iou_ext; + bus_pdo_ext_t *p_pdo_ext = NULL; + size_t n_devs = 0; + cl_qlist_t* p_pdo_list = &gp_iou_mgr->iou_list; + + BUS_ENTER( BUS_DBG_PNP ); + + cl_mutex_acquire( &gp_iou_mgr->pdo_mutex ); + + /* Count the number of child devices. */ + for( p_list_item = cl_qlist_head( p_pdo_list ); + p_list_item != cl_qlist_end( p_pdo_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_pdo_ext = PARENT_STRUCT( p_list_item, bus_pdo_ext_t, list_item ); + p_iou_ext = (bus_iou_ext_t*)p_pdo_ext; + + /* TODO: maybe we need more search patterns like vend_id, dev_id ... */ + if( p_pdo_ext->b_present && p_pdo_ext->b_hibernating && + (p_iou_ext->guid == p_pnp_rec->pnp_rec.guid) ) + { + n_devs++; + break; + } + + BUS_TRACE( BUS_DBG_PNP, + ("Skipped PDO for %s: PDO %p, ext %p, present %d, missing %d, hibernating %d, port_guid %I64x.\n", + p_pdo_ext->cl_ext.vfptr_pnp_po->identity, p_pdo_ext->cl_ext.p_self_do, + p_pdo_ext, p_pdo_ext->b_present, p_pdo_ext->b_reported_missing, + p_pdo_ext->b_hibernating, p_iou_ext->guid ) ); + } + + if (n_devs) + { + /* Take a reference on the parent HCA. */ + p_pdo_ext->h_ca = acquire_ca( p_pnp_rec->ca_guid ); + if( !p_pdo_ext->h_ca ) + { + BUS_TRACE( BUS_DBG_ERROR, ("acquire_ca failed to find CA by guid %I64x\n", + p_pnp_rec->ca_guid ) ); + status = IB_INVALID_GUID; + } + else + { + p_pdo_ext->b_hibernating = FALSE; + p_pnp_rec->pnp_rec.context = p_pdo_ext; + status = IB_SUCCESS; + p_iou_ext = (bus_iou_ext_t*)p_pdo_ext; + BUS_TRACE( BUS_DBG_PNP, + ("Found PDO for %s: PDO %p, ext %p, present %d, missing %d, hibernating %d, port_guid %I64x.\n", + p_pdo_ext->cl_ext.vfptr_pnp_po->identity, p_pdo_ext->cl_ext.p_self_do, + p_pdo_ext, p_pdo_ext->b_present, p_pdo_ext->b_reported_missing, + p_pdo_ext->b_hibernating, p_iou_ext->guid ) ); + } + } + else + { + BUS_TRACE( BUS_DBG_PNP, ("Failed to find PDO for guid %I64x .\n", + p_pnp_rec->pnp_rec.guid ) ); + status = IB_NOT_FOUND; + } + + cl_mutex_release( &gp_iou_mgr->pdo_mutex ); + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + +ib_api_status_t +iou_mgr_iou_add( + IN ib_pnp_iou_rec_t* p_pnp_rec ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_pdo; + bus_iou_ext_t *p_iou_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + /* Upon hibernating of the computer IB_BUS driver doesn't remove PDO, but + marks with a flag. So we first try to find an existing PDO for this port, + marked with this flag. If it was found, we turn off the flag and use this PDO */ + status = __iou_was_hibernated(p_pnp_rec); + if( status != IB_NOT_FOUND ) + { + BUS_EXIT( BUS_DBG_PNP ); + return status; + } + + /* Create the PDO for the new port device. */ + status = IoCreateDevice( bus_globals.p_driver_obj, sizeof(bus_iou_ext_t), + NULL, FILE_DEVICE_CONTROLLER, + FILE_DEVICE_SECURE_OPEN | FILE_AUTOGENERATED_DEVICE_NAME, + FALSE, &p_pdo ); + if( !NT_SUCCESS( status ) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("IoCreateDevice returned %08x.\n", status) ); + return IB_ERROR; + } + + /* Initialize the device extension. */ + cl_init_pnp_po_ext( p_pdo, NULL, p_pdo, bus_globals.dbg_lvl, + &vfptr_iou_pnp, &vfptr_iou_query_txt ); + + /* Set the DO_BUS_ENUMERATED_DEVICE flag to mark it as a PDO. */ + p_pdo->Flags |= DO_BUS_ENUMERATED_DEVICE; + + p_iou_ext = p_pdo->DeviceExtension; + p_iou_ext->pdo.dev_po_state.DeviceState = PowerDeviceD0; + p_iou_ext->pdo.p_parent_ext = bus_globals.p_bus_ext; + p_iou_ext->pdo.b_present = TRUE; + p_iou_ext->pdo.b_reported_missing = FALSE; + BUS_TRACE( BUS_DBG_PNP, ("%s: ext %p, present %d, missing %d .\n", + p_iou_ext->pdo.cl_ext.vfptr_pnp_po->identity, p_iou_ext, p_iou_ext->pdo.b_present, p_iou_ext->pdo.b_reported_missing ) ); + + p_iou_ext->guid = p_pnp_rec->guid; + p_iou_ext->chassis_guid = p_pnp_rec->chassis_guid; + p_iou_ext->slot = p_pnp_rec->slot; + p_iou_ext->vend_id = cl_ntoh32( p_pnp_rec->vend_id ); + if( p_iou_ext->vend_id == 0x00066a ) + p_iou_ext->dev_id = (net16_t)(p_pnp_rec->pnp_rec.guid >> 32) & 0x00FF; + else + p_iou_ext->dev_id = cl_ntoh16( p_pnp_rec->dev_id ); + p_iou_ext->revision = cl_ntoh32( p_pnp_rec->revision ); + cl_memcpy( p_iou_ext->desc, p_pnp_rec->desc, + IB_NODE_DESCRIPTION_SIZE + 1 ); + + /* Cache the CA GUID. */ + p_iou_ext->pdo.ca_guid = p_pnp_rec->ca_guid; + + /* Take a reference on the parent HCA. */ + p_iou_ext->pdo.h_ca = acquire_ca( p_pnp_rec->ca_guid ); + if( !p_iou_ext->pdo.h_ca ) + { + IoDeleteDevice( p_pdo ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("acquire_ca failed to find CA.\n") ); + return IB_INVALID_GUID; + } + + /* Store the device extension in the PDO list for future queries. */ + cl_mutex_acquire( &gp_iou_mgr->pdo_mutex ); + cl_qlist_insert_tail( &gp_iou_mgr->iou_list, + &p_iou_ext->pdo.list_item ); + cl_mutex_release( &gp_iou_mgr->pdo_mutex ); + + /* + * Set the context of the PNP event. The context is passed in for future + * events on the same port. + */ + p_pnp_rec->pnp_rec.context = p_iou_ext; + + /* Tell the PnP Manager to rescan for the HCA's bus relations. */ + IoInvalidateDeviceRelations( + p_iou_ext->pdo.h_ca->obj.p_ci_ca->verbs.p_hca_dev, BusRelations ); + + /* Invalidate removal relations for the bus driver. */ + IoInvalidateDeviceRelations( + bus_globals.p_bus_ext->cl_ext.p_pdo, RemovalRelations ); + + BUS_EXIT( BUS_DBG_PNP ); + + return IB_SUCCESS; +} + + +void +iou_mgr_iou_remove( + IN ib_pnp_iou_rec_t* p_pnp_rec ) +{ + bus_pdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + /* The PNP record's context is the port extension. */ + p_ext = p_pnp_rec->pnp_rec.context; + CL_ASSERT( p_ext ); + + /* + * Flag the port PDO as no longer being present. We have to wait until + * the PnP manager removes it to clean up. However, we do release the + * reference on the CA object in order to allow the removal of the HCA + * to proceed should it occur before the port's PDO is cleaned up. + */ + cl_mutex_acquire( &gp_iou_mgr->pdo_mutex ); + CL_ASSERT( p_ext->h_ca ); + + if( p_ext->b_hibernating ) + { + BUS_TRACE( BUS_DBG_PNP, ("Skip port removing for %s: PDO %p, ext %p, present %d, missing %d, hibernating %d .\n", + p_ext->cl_ext.vfptr_pnp_po->identity, p_ext->cl_ext.p_self_do, p_ext, p_ext->b_present, + p_ext->b_reported_missing, p_ext->b_hibernating ) ); + goto hca_deref; + } + + p_ext->b_present = FALSE; + BUS_TRACE( BUS_DBG_PNP, ("%s: ext %p, present %d, missing %d .\n", + p_ext->cl_ext.vfptr_pnp_po->identity, p_ext, p_ext->b_present, p_ext->b_reported_missing ) ); + + /* Invalidate removal relations for the bus driver. */ + IoInvalidateDeviceRelations( bus_globals.p_bus_ext->cl_ext.p_pdo, + RemovalRelations ); + + /* Invalidate bus relations for the HCA. */ + IoInvalidateDeviceRelations( + p_ext->h_ca->obj.p_ci_ca->verbs.p_hca_dev, BusRelations ); + +hca_deref: + deref_al_obj( &p_ext->h_ca->obj ); + p_ext->h_ca = NULL; + cl_mutex_release( &gp_iou_mgr->pdo_mutex ); + + BUS_EXIT( BUS_DBG_PNP ); +} + + +static NTSTATUS +iou_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_pdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Notify the Power Manager that the device is started. */ + PoSetPowerState( p_dev_obj, DevicePowerState, p_ext->dev_po_state ); + + *p_action = IrpComplete; + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_iou_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + + *p_action = IrpComplete; + if( p_ext->n_ifc_ref ) + { + /* + * Our interface is still being held by someone. + * Rollback the PnP state that was changed in the complib handler. + */ + cl_rollback_pnp_state( &p_ext->pdo.cl_ext ); + + /* Fail the query. */ + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Failing IRP_MN_QUERY_REMOVE_DEVICE:\n" + "\tInterface has %d reference\n", p_ext->n_ifc_ref ) ); + return STATUS_UNSUCCESSFUL; + } + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static void +iou_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + bus_iou_ext_t *p_ext; + POWER_STATE po_state; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Remove this PDO from its list. */ + cl_mutex_acquire( &gp_iou_mgr->pdo_mutex ); + BUS_TRACE( BUS_DBG_PNP, ("Removing IOU from list.\n") ); + cl_qlist_remove_item( &gp_iou_mgr->iou_list, &p_ext->pdo.list_item ); + cl_mutex_release( &gp_iou_mgr->pdo_mutex ); + po_state.DeviceState = PowerDeviceD3; + PoSetPowerState( p_ext->pdo.cl_ext.p_pdo, DevicePowerState, po_state ); + + BUS_EXIT( BUS_DBG_PNP ); +} + + +static NTSTATUS +iou_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_iou_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + if( p_ext->pdo.b_present ) + { + CL_ASSERT( p_ext->pdo.cl_ext.pnp_state != NotStarted ); + CL_ASSERT( !p_ext->pdo.b_reported_missing ); + /* Reset the state to NotStarted. CompLib set it to Deleted. */ + cl_set_pnp_state( &p_ext->pdo.cl_ext, NotStarted ); + /* Don't delete the device. It may simply be disabled. */ + *p_action = IrpComplete; + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Device still present.\n") ); + return STATUS_SUCCESS; + } + + if( !p_ext->pdo.b_reported_missing ) + { + /* Reset the state to RemovePending. Complib set it to Deleted. */ + cl_rollback_pnp_state( &p_ext->pdo.cl_ext ); + *p_action = IrpComplete; + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Device not reported missing yet.\n") ); + return STATUS_SUCCESS; + } + + /* Wait for all I/O operations to complete. */ + IoReleaseRemoveLockAndWait( &p_ext->pdo.cl_ext.remove_lock, p_irp ); + + /* Release resources if it was not done yet. */ + if( p_ext->pdo.cl_ext.last_pnp_state != SurpriseRemoved ) + p_ext->pdo.cl_ext.vfptr_pnp_po->pfn_release_resources( p_dev_obj ); + + p_irp->IoStatus.Status = STATUS_SUCCESS; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + + IoDeleteDevice( p_dev_obj ); + + *p_action = IrpDoNothing; + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_iou_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + p_ext->pdo.b_present = FALSE; + p_ext->pdo.b_reported_missing = TRUE; + BUS_TRACE( BUS_DBG_PNP, ("%s: ext %p, present %d, missing %d .\n", + p_ext->pdo.cl_ext.vfptr_pnp_po->identity, p_ext, p_ext->pdo.b_present, p_ext->pdo.b_reported_missing ) ); + + *p_action = IrpComplete; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + DEVICE_CAPABILITIES *p_caps; + IO_STACK_LOCATION *p_io_stack; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_dev_obj ); + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + p_caps = p_io_stack->Parameters.DeviceCapabilities.Capabilities; + + p_caps->DeviceD1 = FALSE; + p_caps->DeviceD2 = FALSE; + p_caps->LockSupported = FALSE; + p_caps->EjectSupported = FALSE; + p_caps->Removable = TRUE; + p_caps->DockDevice = FALSE; + p_caps->UniqueID = TRUE; + p_caps->SilentInstall = TRUE; + p_caps->RawDeviceOK = FALSE; + p_caps->SurpriseRemovalOK = FALSE; + p_caps->WakeFromD0 = FALSE; + p_caps->WakeFromD1 = FALSE; + p_caps->WakeFromD2 = FALSE; + p_caps->WakeFromD3 = FALSE; + p_caps->HardwareDisabled = FALSE; + p_caps->DeviceState[PowerSystemWorking] = PowerDeviceD0; + p_caps->DeviceState[PowerSystemSleeping1] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemSleeping2] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemSleeping3] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemHibernate] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemShutdown] = PowerDeviceD3; + p_caps->SystemWake = PowerSystemUnspecified; + p_caps->DeviceWake = PowerDeviceUnspecified; + p_caps->D1Latency = 0; + p_caps->D2Latency = 0; + p_caps->D3Latency = 0; + + *p_action = IrpComplete; + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_target_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + DEVICE_RELATIONS *p_rel; + + BUS_ENTER( BUS_DBG_PNP ); + + *p_action = IrpComplete; + + status = cl_alloc_relations( p_irp, 1 ); + if( !NT_SUCCESS( status ) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("cl_alloc_relations returned 0x%08x.\n", status) ); + return status; + } + + p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information; + p_rel->Count = 1; + p_rel->Objects[0] = p_dev_obj; + + ObReferenceObject( p_dev_obj ); + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +static NTSTATUS +iou_query_device_id( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + bus_iou_ext_t *p_ext; + WCHAR *p_string; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = (bus_iou_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* Device ID is "IBA\SID_ where is the IPoIB Service ID. */ + p_string = ExAllocatePool( PagedPool, IOU_DEV_ID_SIZE ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate device ID buffer (%d bytes).\n", + IOU_DEV_ID_SIZE) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + status = + RtlStringCbPrintfW( p_string, IOU_DEV_ID_SIZE, IOU_DEV_ID_STRING1, + p_ext->vend_id, p_ext->dev_id, p_ext->revision ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to format device ID string.\n") ); + return status; + } + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_hardware_ids( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + bus_iou_ext_t *p_ext; + WCHAR *p_string, *p_start; + size_t size; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = (bus_iou_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + p_string = ExAllocatePool( PagedPool, IOU_HW_ID_SIZE ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate hardware ID buffer (%d bytes).\n", + IOU_HW_ID_SIZE) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + p_start = p_string; + size = IOU_HW_ID_SIZE; + /* Fill in the first HW ID. */ + status = RtlStringCbPrintfExW( p_start, size, &p_start, &size, + STRSAFE_FILL_BEHIND_NULL | STRSAFE_NO_TRUNCATION, IOU_DEV_ID_STRING1, + p_ext->vend_id, p_ext->dev_id, p_ext->revision ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to format device ID string.\n") ); + return status; + } + /* Fill in the second HW ID. */ + CL_ASSERT( *p_start == L'\0' ); + p_start++; + size -= sizeof(WCHAR); + status = RtlStringCbPrintfExW( p_start, size, NULL, NULL, + STRSAFE_FILL_BEHIND_NULL | STRSAFE_NO_TRUNCATION, IOU_DEV_ID_STRING2, + p_ext->vend_id, p_ext->dev_id ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to format device ID string.\n") ); + return status; + } + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_compatible_ids( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + WCHAR *p_string; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_dev_obj ); + + p_string = ExAllocatePool( PagedPool, sizeof(IOU_COMPAT_ID) ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate compatible ID buffer (%d bytes).\n", + IOU_HW_ID_SIZE) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + cl_memcpy( p_string, IOU_COMPAT_ID, sizeof(IOU_COMPAT_ID) ); + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_unique_id( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + WCHAR *p_string; + bus_iou_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* The instance ID is the port GUID. */ + p_string = ExAllocatePool( PagedPool, sizeof(WCHAR) * 33 ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate instance ID buffer (%d bytes).\n", + sizeof(WCHAR) * 17) ); + return STATUS_NO_MEMORY; + } + + status = RtlStringCchPrintfW( p_string, 33, L"%016I64x%016I64x", + p_ext->guid, p_ext->pdo.ca_guid ); + if( !NT_SUCCESS( status ) ) + { + CL_ASSERT( NT_SUCCESS( status ) ); + ExFreePool( p_string ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("RtlStringCchPrintfW returned %08x.\n", status) ); + return status; + } + + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_description( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + WCHAR *p_string; + bus_iou_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* The instance ID is the port GUID. */ + p_string = + ExAllocatePool( PagedPool, sizeof(WCHAR) * sizeof(p_ext->desc) ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate device description buffer (%d bytes).\n", + sizeof(WCHAR) * sizeof(p_ext->desc)) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + status = RtlStringCchPrintfW( p_string, sizeof(p_ext->desc), + L"%S", p_ext->desc ); + if( !NT_SUCCESS( status ) ) + { + CL_ASSERT( NT_SUCCESS( status ) ); + ExFreePool( p_string ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("RtlStringCchPrintfW returned %08x.\n", status) ); + return status; + } + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_location( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + bus_iou_ext_t *p_ext; + WCHAR *p_string; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = (bus_iou_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + p_string = ExAllocatePool( PagedPool, IOU_LOCATION_SIZE ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate location buffer (%d bytes).\n", + IOU_LOCATION_SIZE) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + status = RtlStringCbPrintfW( p_string, IOU_LOCATION_SIZE, + L"Chassis 0x%016I64x, Slot %d", p_ext->chassis_guid, p_ext->slot ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to format device ID string.\n") ); + return status; + } + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_bus_info( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + PNP_BUS_INFORMATION *p_bus_info; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_dev_obj ); + + *p_action = IrpComplete; + + p_bus_info = ExAllocatePool( PagedPool, sizeof(PNP_BUS_INFORMATION) ); + if( !p_bus_info ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate PNP_BUS_INFORMATION (%d bytes).\n", + sizeof(PNP_BUS_INFORMATION)) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + p_bus_info->BusTypeGuid = GUID_BUS_TYPE_IBA; + //TODO: Memory from Intel - storage miniport would not stay loaded unless + //TODO: bus type was PCI. Look here if SRP is having problems staying + //TODO: loaded. + p_bus_info->LegacyBusType = PNPBus; + p_bus_info->BusNumber = 0; + + p_irp->IoStatus.Information = (ULONG_PTR)p_bus_info; + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_iou_ifc( + IN DEVICE_OBJECT* const p_dev_obj, + IN IO_STACK_LOCATION* const p_io_stack ) +{ + NTSTATUS status; + ib_al_ifc_t *p_ifc; + ib_al_ifc_data_t *p_ifc_data; + iou_ifc_data_t *p_iou_data; + bus_iou_ext_t *p_ext; + const GUID *p_guid; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Get the interface. */ + status = cl_fwd_query_ifc( + p_ext->pdo.p_parent_ext->cl_ext.p_self_do, p_io_stack ); + if( !NT_SUCCESS( status ) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to forward interface query: %08X\n", status) ); + return status; + } + + if( !p_io_stack->Parameters.QueryInterface.InterfaceSpecificData ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("No interface specific data!\n") ); + return status; + } + + p_ifc = (ib_al_ifc_t*)p_io_stack->Parameters.QueryInterface.Interface; + + p_ifc_data = (ib_al_ifc_data_t*) + p_io_stack->Parameters.QueryInterface.InterfaceSpecificData; + p_guid = p_ifc_data->type; + if( !IsEqualGUID( p_guid, &GUID_IOU_INTERFACE_DATA ) ) + { + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Unsupported interface data: \n\t" + "0x%08x, 0x%04x, 0x%04x, 0x%02x, 0x%02x, 0x%02x," + "0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x.\n", + p_guid->Data1, p_guid->Data2, p_guid->Data3, + p_guid->Data4[0], p_guid->Data4[1], p_guid->Data4[2], + p_guid->Data4[3], p_guid->Data4[4], p_guid->Data4[5], + p_guid->Data4[6], p_guid->Data4[7]) ); + return status; + } + + if( p_ifc_data->version != IOU_INTERFACE_DATA_VERSION ) + { + p_ifc->wdm.InterfaceDereference( p_ifc->wdm.Context ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Unsupported version %d, expected %d\n", + p_ifc_data->version, IOU_INTERFACE_DATA_VERSION) ); + return STATUS_NOT_SUPPORTED; + } + + ASSERT( p_ifc_data->p_data ); + + if( p_ifc_data->size != sizeof(iou_ifc_data_t) ) + { + p_ifc->wdm.InterfaceDereference( p_ifc->wdm.Context ); + BUS_TRACE_EXIT( BUS_DBG_PNP, + ("Buffer too small (%d given, %d required).\n", + p_ifc_data->size, + sizeof(iou_ifc_data_t)) ); + return STATUS_BUFFER_TOO_SMALL; + } + + /* Set the interface data. */ + p_iou_data = (iou_ifc_data_t*)p_ifc_data->p_data; + + p_iou_data->ca_guid = p_ext->pdo.ca_guid; + p_iou_data->chassis_guid = p_ext->chassis_guid; + p_iou_data->slot = p_ext->slot; + p_iou_data->guid = p_ext->guid; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +iou_query_interface( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_pdo_ext_t *p_ext; + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + /* Bottom of the stack - IRP must be completed. */ + *p_action = IrpComplete; + + /* Compare requested GUID with our supported interface GUIDs. */ + if( IsEqualGUID( p_io_stack->Parameters.QueryInterface.InterfaceType, + &GUID_IB_AL_INTERFACE ) ) + { + status = iou_query_iou_ifc( p_dev_obj, p_io_stack ); + } + else if( IsEqualGUID( p_io_stack->Parameters.QueryInterface.InterfaceType, + &GUID_BUS_INTERFACE_STANDARD ) ) + { + p_ext = p_dev_obj->DeviceExtension; + if( !p_ext->h_ca || + !p_ext->b_present || + p_ext->b_reported_missing ) + { + return STATUS_NO_SUCH_DEVICE; + } + + + status = cl_fwd_query_ifc( + p_ext->h_ca->obj.p_ci_ca->verbs.p_hca_dev, p_io_stack ); + } + else + { + status = p_irp->IoStatus.Status; + } + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + + +/* Work item callback to handle DevicePowerD3 IRPs at passive level. */ +static void +__HibernateUpWorkItem( + IN DEVICE_OBJECT* p_dev_obj, + IN void* context ) +{ + IO_STACK_LOCATION *p_io_stack; + bus_pdo_ext_t *p_ext; + IRP *p_irp; + POWER_STATE powerState; + + BUS_ENTER( BUS_DBG_POWER ); + + p_ext = (bus_pdo_ext_t*)p_dev_obj->DeviceExtension; + p_irp = (IRP*)context; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + IoFreeWorkItem( p_ext->p_po_work_item ); + p_ext->p_po_work_item = NULL; + + while (!p_ext->h_ca) { + BUS_TRACE( BUS_DBG_PNP, ("Waiting for the end of HCA registration ... \n")); + cl_thread_suspend( 200 ); /* suspend for 200 ms */ + } + + p_ext->dev_po_state = p_io_stack->Parameters.Power.State; + powerState = PoSetPowerState( p_dev_obj, DevicePowerState, p_ext->dev_po_state ); + + BUS_TRACE( BUS_DBG_POWER, + ("PoSetPowerState: old state %d, new state to %d\n", + powerState.DeviceState, p_ext->dev_po_state )); + + p_irp->IoStatus.Status = STATUS_SUCCESS; + PoStartNextPowerIrp( p_irp ); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + + BUS_EXIT( BUS_DBG_POWER ); +} + +/* + * The PDOs created by the IB Bus driver are software devices. As such, + * all power states are supported. It is left to the HCA power policy + * owner to handle which states can be supported by the HCA. + */ +static NTSTATUS +iou_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status = STATUS_SUCCESS; + IO_STACK_LOCATION *p_io_stack; + bus_pdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_POWER ); + + p_ext = p_dev_obj->DeviceExtension; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + BUS_TRACE( BUS_DBG_POWER, + ("SET_POWER for PDO %p (ext %p): type %s, state %d, action %d \n", + p_dev_obj, p_ext, + (p_io_stack->Parameters.Power.Type) ? "DevicePowerState" : "SystemPowerState", + p_io_stack->Parameters.Power.State.DeviceState, + p_io_stack->Parameters.Power.ShutdownType )); + + if ((p_io_stack->Parameters.Power.Type == SystemPowerState) && + (p_io_stack->Parameters.Power.State.SystemState ==PowerSystemHibernate || + p_io_stack->Parameters.Power.State.SystemState ==PowerSystemSleeping1 )) + { + BUS_TRACE( BUS_DBG_POWER, ("Setting b_hibernating flag for PDO %p \n", p_dev_obj)); + p_ext->b_hibernating = TRUE; + } + + if( p_io_stack->Parameters.Power.Type == DevicePowerState ) + { + /* after hibernation PDO is not ready for work. we need to wait for finishing of the HCA registration */ + if( p_io_stack->Parameters.Power.State.DeviceState == PowerDeviceD0 && p_ext->b_hibernating) + { + /* Process in a work item - deregister_ca and HcaDeinit block. */ + ASSERT( !p_ext->p_po_work_item ); + p_ext->p_po_work_item = IoAllocateWorkItem( p_dev_obj ); + if( !p_ext->p_po_work_item ) + status = STATUS_INSUFFICIENT_RESOURCES; + else { + /* Process in work item callback. */ + IoMarkIrpPending( p_irp ); + IoQueueWorkItem( + p_ext->p_po_work_item, __HibernateUpWorkItem, DelayedWorkQueue, p_irp ); + *p_action = IrpDoNothing; + BUS_EXIT( BUS_DBG_POWER ); + return STATUS_PENDING; + } + } + + /* Notify the power manager. */ + p_ext->dev_po_state = p_io_stack->Parameters.Power.State; + PoSetPowerState( p_dev_obj, DevicePowerState, p_ext->dev_po_state ); + } + + *p_action = IrpComplete; + BUS_EXIT( BUS_DBG_POWER ); + return STATUS_SUCCESS; +} diff --git a/branches/Ndi/core/bus/kernel/bus_iou_mgr.h b/branches/Ndi/core/bus/kernel/bus_iou_mgr.h new file mode 100644 index 00000000..7353fcaa --- /dev/null +++ b/branches/Ndi/core/bus/kernel/bus_iou_mgr.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined( __BUS_IOU_MGR_H__ ) +#define __BUS_IOU_MGR_H__ + +#include +#include +#include + + +/* Global load service */ +typedef struct _iou_mgr +{ + cl_obj_t obj; + ib_pnp_handle_t h_pnp; /* Handle for iou PnP events */ + + /* Mutex protects both pointer vectors. */ + cl_mutex_t pdo_mutex; + + /* Pointer vector of child IOU PDOs. */ + cl_qlist_t iou_list; + +} iou_mgr_t; + + +ib_api_status_t +create_iou_mgr( + OUT iou_mgr_t** const pp_iou_mgr ); + + +NTSTATUS +iou_mgr_get_bus_relations( + IN const net64_t ca_guid, + IN IRP* const p_irp ); + +#endif diff --git a/branches/Ndi/core/bus/kernel/bus_pnp.c b/branches/Ndi/core/bus/kernel/bus_pnp.c new file mode 100644 index 00000000..e0188c53 --- /dev/null +++ b/branches/Ndi/core/bus/kernel/bus_pnp.c @@ -0,0 +1,1094 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Implemenation of all PnP functionality for FDO (power policy owners). + */ + + +#include "bus_pnp.h" +#include "al_ca.h" +#include "al_init.h" +#include "al_dev.h" +#include "bus_port_mgr.h" +#include "bus_iou_mgr.h" +#include "complib/cl_memory.h" +#include +#include "iba/ib_ci_ifc.h" + + +static NTSTATUS +fdo_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static void +fdo_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +fdo_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +fdo_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +fdo_query_remove_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__query_al_ifc( + IN DEVICE_OBJECT* const p_dev_obj, + IN IO_STACK_LOCATION* const p_io_stack ); + +static NTSTATUS +__get_relations( + IN const net64_t ca_guid, + IN IRP* const p_irp ); + +static NTSTATUS +__query_ci_ifc( + IN DEVICE_OBJECT* const p_dev_obj, + IN IO_STACK_LOCATION* const p_io_stack ); + +static NTSTATUS +fdo_query_interface( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__fdo_query_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__fdo_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + + +/* All PnP code is called at passive, so it can all be paged out. */ +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, bus_add_device) +#pragma alloc_text (PAGE, fdo_start) +#pragma alloc_text (PAGE, fdo_query_remove) +#pragma alloc_text (PAGE, fdo_release_resources) +#pragma alloc_text (PAGE, fdo_query_capabilities) +#pragma alloc_text (PAGE, fdo_query_remove_relations) +#pragma alloc_text (PAGE, __query_al_ifc) +#pragma alloc_text (PAGE, __query_ci_ifc) +#pragma alloc_text (PAGE, __get_relations) +#pragma alloc_text (PAGE, fdo_query_interface) +#pragma alloc_text (PAGE_PNP, __fdo_query_power) +#pragma alloc_text (PAGE_PNP, __fdo_set_power) +#endif + + +/* Global virtual function pointer tables shared between all instances of FDO. */ +static const cl_vfptr_pnp_po_t vfptr_fdo_pnp = { + "IB Bus", + fdo_start, + cl_irp_skip, + cl_irp_skip, + cl_do_sync_pnp, + fdo_query_remove, + fdo_release_resources, + cl_do_remove, + cl_do_sync_pnp, + cl_irp_skip, + fdo_query_capabilities, + cl_irp_skip, + cl_irp_skip, + cl_do_sync_pnp, + cl_irp_ignore, + cl_irp_ignore, + fdo_query_remove_relations, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + fdo_query_interface, /* QueryInterface */ + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + __fdo_query_power, /* QueryPower */ + __fdo_set_power, /* SetPower */ + cl_irp_ignore, /* PowerSequence */ + cl_irp_ignore /* WaitWake */ +}; + + +NTSTATUS +bus_add_device( + IN DRIVER_OBJECT *p_driver_obj, + IN DEVICE_OBJECT *p_pdo ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_dev_obj, *p_next_do; + bus_fdo_ext_t *p_ext; + UNICODE_STRING dev_name, dos_name; + + BUS_ENTER( BUS_DBG_PNP ); + + if( bus_globals.p_bus_ext ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Bus root already exists. Only one bus root allowed.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + RtlInitUnicodeString( &dev_name, AL_DEVICE_NAME ); + RtlInitUnicodeString( &dos_name, L"\\DosDevices\\Global\\ibal" ); + + /* Create the FDO device object to attach to the stack. */ + status = IoCreateDevice( p_driver_obj, sizeof(bus_fdo_ext_t), + &dev_name, FILE_DEVICE_BUS_EXTENDER, + FILE_DEVICE_SECURE_OPEN, FALSE, &p_dev_obj ); + if( !NT_SUCCESS(status) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to create bus root FDO device.\n") ); + return status; + } + + IoDeleteSymbolicLink( &dos_name ); + status = IoCreateSymbolicLink( &dos_name, &dev_name ); + if( !NT_SUCCESS(status) ) + { + IoDeleteDevice( p_dev_obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to create symlink for dos name.\n") ); + return status; + } + + p_ext = p_dev_obj->DeviceExtension; + + p_next_do = IoAttachDeviceToDeviceStack( p_dev_obj, p_pdo ); + if( !p_next_do ) + { + IoDeleteDevice( p_dev_obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("IoAttachToDeviceStack failed.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + cl_init_pnp_po_ext( p_dev_obj, p_next_do, p_pdo, bus_globals.dbg_lvl, + &vfptr_fdo_pnp, NULL ); + + /* Register the upper interface (the one used by clients). */ + status = IoRegisterDeviceInterface( p_pdo, + &GUID_IB_AL_INTERFACE, NULL, &p_ext->al_ifc_name ); + if( !NT_SUCCESS( status ) ) + { + IoDetachDevice( p_ext->cl_ext.p_next_do ); + IoDeleteDevice( p_dev_obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("IoRegisterDeviceInterface for upper interface returned %08x\n", + status) ); + return STATUS_NO_SUCH_DEVICE; + } + + /* Register the lower interface (the one used by HCA VPDs). */ + status = IoRegisterDeviceInterface( p_pdo, + &GUID_IB_CI_INTERFACE, NULL, &p_ext->ci_ifc_name ); + if( !NT_SUCCESS( status ) ) + { + RtlFreeUnicodeString( &p_ext->al_ifc_name ); + IoDetachDevice( p_ext->cl_ext.p_next_do ); + IoDeleteDevice( p_dev_obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("IoRegisterDeviceInterface for lower interface returned %08x\n", + status) ); + return STATUS_NO_SUCH_DEVICE; + } + + bus_globals.p_bus_ext = p_ext; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +fdo_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + bus_fdo_ext_t *p_ext; + ib_api_status_t ib_status; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Handled on the way up. */ + status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); + if( !NT_SUCCESS( status ) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Lower drivers failed IRP_MN_START_DEVICE.\n") ); + return status; + } + + /* Initialize AL */ + ib_status = al_initialize(); + if( ib_status != IB_SUCCESS ) + { + al_cleanup(); + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("al_initialize returned %s.\n", + ib_get_err_str(ib_status)) ); + return STATUS_UNSUCCESSFUL; + } + + /* Initialize the port manager. */ + ib_status = create_port_mgr( &p_ext->p_port_mgr ); + if( ib_status != IB_SUCCESS ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("create_port_mgr returned %s.\n", + ib_get_err_str(ib_status)) ); + return STATUS_UNSUCCESSFUL; + } + + /* Initialize the IOU manager. */ + ib_status = create_iou_mgr( &p_ext->p_iou_mgr ); + if( ib_status != IB_SUCCESS ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("create_iou_mgr returned %s.\n", + ib_get_err_str(ib_status)) ); + return STATUS_UNSUCCESSFUL; + } + + status = IoSetDeviceInterfaceState( &p_ext->al_ifc_name, TRUE ); + ASSERT( NT_SUCCESS( status ) ); + + status = IoSetDeviceInterfaceState( &p_ext->ci_ifc_name, TRUE ); + ASSERT( NT_SUCCESS( status ) ); + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +static NTSTATUS +fdo_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_fdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + if( p_ext->n_ci_ifc_ref ) + { + /* + * Our interface is still being held by someone. + * Rollback the PnP state that was changed in the cl_ext handler. + */ + cl_rollback_pnp_state( &p_ext->cl_ext ); + + /* Fail the query. */ + *p_action = IrpComplete; + BUS_TRACE_EXIT( BUS_DBG_PNP, + ("Failing IRP_MN_QUERY_REMOVE_DEVICE:\n" + "\tLowerInterface has %d references\n", + p_ext->n_ci_ifc_ref ) ); + return STATUS_UNSUCCESSFUL; + } + + *p_action = IrpSkip; + /* The FDO driver must set the status even when passing down. */ + p_irp->IoStatus.Status = STATUS_SUCCESS; + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +/* + * This function gets called after releasing the remove lock and waiting + * for all other threads to release the lock. No more modifications will + * occur to the PDO pointer vectors. + */ +static void +fdo_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + bus_fdo_ext_t *p_ext; + NTSTATUS status; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + //TODO: Fail outstanding I/O operations. + + /* Disable any exported interfaces. */ + status = IoSetDeviceInterfaceState( &p_ext->al_ifc_name, FALSE ); + ASSERT( NT_SUCCESS( status ) ); + status = IoSetDeviceInterfaceState( &p_ext->ci_ifc_name, FALSE ); + ASSERT( NT_SUCCESS( status ) ); + + /* Release the memory allocated for the interface symbolic names. */ + RtlFreeUnicodeString( &p_ext->ci_ifc_name ); + RtlFreeUnicodeString( &p_ext->al_ifc_name ); + + cl_obj_destroy( &p_ext->p_port_mgr->obj ); + cl_obj_destroy( &p_ext->p_iou_mgr->obj ); + + al_cleanup(); + + bus_globals.p_bus_ext = NULL; + + BUS_EXIT( BUS_DBG_PNP ); +} + + +static NTSTATUS +fdo_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + bus_fdo_ext_t *p_ext; + IO_STACK_LOCATION *p_io_stack; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Process on the way up. */ + status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); + + if( !NT_SUCCESS( status ) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("cl_do_sync_pnp returned %08x.\n", status) ); + return status; + } + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + /* + * Store the device power maping into our extension since we're + * the power policy owner. The mapping is used when handling + * IRP_MN_SET_POWER IRPs. + */ + cl_memcpy( p_ext->po_state, + p_io_stack->Parameters.DeviceCapabilities.Capabilities->DeviceState, + sizeof( p_ext->po_state ) ); + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +static NTSTATUS +fdo_query_remove_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_dev_obj ); + + status = port_mgr_get_bus_relations( 0, p_irp ); + if( status == STATUS_SUCCESS || + status == STATUS_NO_SUCH_DEVICE ) + { + status = iou_mgr_get_bus_relations( 0, p_irp ); + } + if( status == STATUS_NO_SUCH_DEVICE ) + status = STATUS_SUCCESS; + + switch( status ) + { + case STATUS_NO_SUCH_DEVICE: + *p_action = IrpSkip; + status = STATUS_SUCCESS; + break; + + case STATUS_SUCCESS: + *p_action = IrpPassDown; + break; + + default: + *p_action = IrpComplete; + break; + } + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +void +al_ref_ifc( + IN DEVICE_OBJECT* p_dev_obj ) +{ + bus_fdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + cl_atomic_inc( &p_ext->n_al_ifc_ref ); + ObReferenceObject( p_dev_obj ); + + BUS_EXIT( BUS_DBG_PNP ); +} + + +void +al_deref_ifc( + IN DEVICE_OBJECT* p_dev_obj ) +{ + bus_fdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + cl_atomic_dec( &p_ext->n_al_ifc_ref ); + ObDereferenceObject( p_dev_obj ); + + BUS_EXIT( BUS_DBG_PNP ); +} + + +void +al_ref_ci_ifc( + IN DEVICE_OBJECT* p_dev_obj ) +{ + bus_fdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + cl_atomic_inc( &p_ext->n_ci_ifc_ref ); + ObReferenceObject( p_dev_obj ); + + BUS_EXIT( BUS_DBG_PNP ); +} + + +void +al_deref_ci_ifc( + IN DEVICE_OBJECT* p_dev_obj ) +{ + bus_fdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + cl_atomic_dec( &p_ext->n_ci_ifc_ref ); + ObDereferenceObject( p_dev_obj ); + + BUS_EXIT( BUS_DBG_PNP ); +} + + +static NTSTATUS +__query_al_ifc( + IN DEVICE_OBJECT* const p_dev_obj, + IN IO_STACK_LOCATION* const p_io_stack ) +{ + ib_al_ifc_t *p_ifc; + + BUS_ENTER( BUS_DBG_PNP ); + + if( p_io_stack->Parameters.QueryInterface.Version != + AL_INTERFACE_VERSION ) + { + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Incorrect interface version (%d)\n", + p_io_stack->Parameters.QueryInterface.Version ) ); + return STATUS_NOT_SUPPORTED; + } + + if( p_io_stack->Parameters.QueryInterface.Size < sizeof(ib_al_ifc_t) ) + { + BUS_TRACE_EXIT( BUS_DBG_PNP, + ("Buffer too small (%d given, %d required).\n", + p_io_stack->Parameters.QueryInterface.Size, sizeof(ib_al_ifc_t)) ); + return STATUS_BUFFER_TOO_SMALL; + } + + // Copy the interface. + p_ifc = (ib_al_ifc_t*)p_io_stack->Parameters.QueryInterface.Interface; + + p_ifc->wdm.Size = sizeof(ib_al_ifc_t); + p_ifc->wdm.Version = AL_INTERFACE_VERSION; + p_ifc->wdm.Context = p_dev_obj; + p_ifc->wdm.InterfaceReference = al_ref_ifc; + p_ifc->wdm.InterfaceDereference = al_deref_ifc; + + al_set_ifc( p_ifc ); + + // take the reference before returning. + al_ref_ifc( p_dev_obj ); + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +void +al_set_ifc( + OUT ib_al_ifc_t* const p_ifc ) +{ + BUS_ENTER( BUS_DBG_PNP ); + + p_ifc->wdm.Size = sizeof(ib_al_ifc_t); + p_ifc->wdm.InterfaceReference = al_ref_ifc; + p_ifc->wdm.InterfaceDereference = al_deref_ifc; + + p_ifc->sync_destroy = ib_sync_destroy; + p_ifc->open_ca = ib_open_ca; + p_ifc->query_ca = ib_query_ca; + p_ifc->get_dev = get_ca_dev; + p_ifc->close_ca = ib_close_ca; + p_ifc->alloc_pd = ib_alloc_pd; + p_ifc->dealloc_pd = ib_dealloc_pd; + p_ifc->create_av = ib_create_av; + p_ifc->query_av = ib_query_av; + p_ifc->modify_av = ib_modify_av; + p_ifc->destroy_av = ib_destroy_av; + p_ifc->create_qp = ib_create_qp; + p_ifc->get_spl_qp = ib_get_spl_qp; + p_ifc->query_qp = ib_query_qp; + p_ifc->modify_qp = ib_modify_qp; + p_ifc->destroy_qp = ib_destroy_qp; + p_ifc->create_cq = ib_create_cq; + p_ifc->modify_cq = ib_modify_cq; + p_ifc->query_cq = ib_query_cq; + p_ifc->destroy_cq = ib_destroy_cq; + p_ifc->reg_mem = ib_reg_mem; + p_ifc->reg_phys = ib_reg_phys; + p_ifc->query_mr = ib_query_mr; + p_ifc->rereg_mem = ib_rereg_mem; + p_ifc->reg_shmid = ib_reg_shmid; + p_ifc->dereg_mr = ib_dereg_mr; + p_ifc->create_mw = ib_create_mw; + p_ifc->query_mw = ib_query_mw; + p_ifc->bind_mw = ib_bind_mw; + p_ifc->destroy_mw = ib_destroy_mw; + p_ifc->post_send = ib_post_send; + p_ifc->post_recv = ib_post_recv; + p_ifc->send_mad = ib_send_mad; + p_ifc->cancel_mad = ib_cancel_mad; + p_ifc->poll_cq = ib_poll_cq; + p_ifc->rearm_cq = ib_rearm_cq; + p_ifc->join_mcast = ib_join_mcast; + p_ifc->leave_mcast = ib_leave_mcast; + p_ifc->local_mad = ib_local_mad; + p_ifc->cm_listen = ib_cm_listen; + p_ifc->cm_cancel = ib_cm_cancel; + p_ifc->cm_req = ib_cm_req; + p_ifc->cm_rep = ib_cm_rep; + p_ifc->cm_rtu = ib_cm_rtu; + p_ifc->cm_rej = ib_cm_rej; + p_ifc->cm_mra = ib_cm_mra; + p_ifc->cm_lap = ib_cm_lap; + p_ifc->cm_apr = ib_cm_apr; + p_ifc->force_apm = ib_force_apm; + p_ifc->cm_dreq = ib_cm_dreq; + p_ifc->cm_drep = ib_cm_drep; + p_ifc->cm_handoff = ib_cm_handoff; + p_ifc->create_ioc = ib_create_ioc; + p_ifc->destroy_ioc = ib_destroy_ioc; + p_ifc->reg_ioc = ib_reg_ioc; + p_ifc->add_svc_entry = ib_add_svc_entry; + p_ifc->remove_svc_entry = ib_remove_svc_entry; + p_ifc->get_ca_guids = ib_get_ca_guids; + p_ifc->get_ca_by_gid = ib_get_ca_by_gid; + p_ifc->get_port_by_gid = ib_get_port_by_gid; + p_ifc->create_mad_pool = ib_create_mad_pool; + p_ifc->destroy_mad_pool = ib_destroy_mad_pool; + p_ifc->reg_mad_pool = ib_reg_mad_pool; + p_ifc->dereg_mad_pool = ib_dereg_mad_pool; + p_ifc->get_mad = ib_get_mad; + p_ifc->put_mad = ib_put_mad; + p_ifc->init_dgrm_svc = ib_init_dgrm_svc; + p_ifc->reg_mad_svc = ib_reg_mad_svc; + p_ifc->reg_svc = ib_reg_svc; + p_ifc->dereg_svc = ib_dereg_svc; + p_ifc->query = ib_query; + p_ifc->cancel_query = ib_cancel_query; + p_ifc->reg_pnp = ib_reg_pnp; + p_ifc->dereg_pnp = ib_dereg_pnp; + p_ifc->subscribe = ib_subscribe; + p_ifc->unsubscribe = ib_unsubscribe; + p_ifc->reject_ioc = ib_reject_ioc; + p_ifc->ci_call = ib_ci_call; + p_ifc->open_al = ib_open_al; + p_ifc->close_al = ib_close_al; + p_ifc->get_err_str = ib_get_err_str; + p_ifc->get_wc_status_str = ib_get_wc_status_str; + p_ifc->create_mlnx_fmr = mlnx_create_fmr; + p_ifc->map_phys_mlnx_fmr = mlnx_map_phys_fmr; + p_ifc->unmap_mlnx_fmr = mlnx_unmap_fmr; + p_ifc->destroy_mlnx_fmr = mlnx_destroy_fmr; + p_ifc->create_mlnx_fmr_pool = mlnx_create_fmr_pool; + p_ifc->destroy_mlnx_fmr_pool = mlnx_destroy_fmr_pool; + p_ifc->map_phys_mlnx_fmr_pool = mlnx_map_phys_fmr_pool; + p_ifc->unmap_mlnx_fmr_pool = mlnx_unmap_fmr_pool; + p_ifc->flush_mlnx_fmr_pool = mlnx_flush_fmr_pool; + p_ifc->create_srq = ib_create_srq; + p_ifc->modify_srq = ib_modify_srq; + p_ifc->query_srq = ib_query_srq; + p_ifc->destroy_srq = ib_destroy_srq; + p_ifc->post_srq_recv = ib_post_srq_recv; + + BUS_EXIT( BUS_DBG_PNP ); +} + + +static NTSTATUS +__get_relations( + IN const net64_t ca_guid, + IN IRP* const p_irp ) +{ + NTSTATUS status; + + BUS_ENTER( BUS_DBG_PNP ); + + /* TODO: For IOUs, filter relations based on multi-HCA support. */ + status = port_mgr_get_bus_relations( ca_guid, p_irp ); + if( status == STATUS_SUCCESS || + status == STATUS_NO_SUCH_DEVICE ) + { + status = iou_mgr_get_bus_relations( ca_guid, p_irp ); + } + if( status == STATUS_NO_SUCH_DEVICE ) + status = STATUS_SUCCESS; + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +static NTSTATUS +__query_ci_ifc( + IN DEVICE_OBJECT* const p_dev_obj, + IN IO_STACK_LOCATION* const p_io_stack ) +{ + ib_ci_ifc_t *p_ifc; + + BUS_ENTER( BUS_DBG_PNP ); + + if( p_io_stack->Parameters.QueryInterface.Version != + IB_CI_INTERFACE_VERSION ) + { + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Incorrect interface version (%d)\n", + p_io_stack->Parameters.QueryInterface.Version ) ); + return STATUS_NOT_SUPPORTED; + } + + if( p_io_stack->Parameters.QueryInterface.Size < sizeof(ib_ci_ifc_t) ) + { + BUS_TRACE_EXIT( BUS_DBG_PNP, + ("Buffer too small (%d given, %d required).\n", + p_io_stack->Parameters.QueryInterface.Size, sizeof(ib_ci_ifc_t)) ); + return STATUS_BUFFER_TOO_SMALL; + } + + /* Copy the interface. */ + p_ifc = (ib_ci_ifc_t*)p_io_stack->Parameters.QueryInterface.Interface; + + p_ifc->wdm.Size = sizeof(ib_ci_ifc_t); + p_ifc->wdm.Version = IB_CI_INTERFACE_VERSION; + p_ifc->wdm.Context = p_dev_obj; + p_ifc->wdm.InterfaceReference = al_ref_ci_ifc; + p_ifc->wdm.InterfaceDereference = al_deref_ci_ifc; + + /* Set the entry points. */ + p_ifc->register_ca = ib_register_ca; + p_ifc->deregister_ca = ib_deregister_ca; + p_ifc->get_relations = __get_relations; + p_ifc->get_err_str = ib_get_err_str; + + /* take the reference before returning. */ + al_ref_ci_ifc( p_dev_obj ); + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +fdo_query_interface( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + + BUS_ENTER( BUS_DBG_PNP ); + +#pragma warning( push, 3 ) + PAGED_CODE(); +#pragma warning( pop ) + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + /* Compare requested GUID with our supported interface GUIDs. */ + if( IsEqualGUID( p_io_stack->Parameters.QueryInterface.InterfaceType, + &GUID_IB_AL_INTERFACE ) ) + { + status = __query_al_ifc( p_dev_obj, p_io_stack ); + } + else if( IsEqualGUID( p_io_stack->Parameters.QueryInterface.InterfaceType, + &GUID_IB_CI_INTERFACE ) ) + { + status = __query_ci_ifc( p_dev_obj, p_io_stack ); + } + else + { + status = p_irp->IoStatus.Status; + } + + if( NT_SUCCESS( status ) ) + *p_action = IrpSkip; + else if( status == STATUS_BUFFER_TOO_SMALL ) + *p_action = IrpComplete; + else + *p_action = IrpIgnore; + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +static NTSTATUS +__fdo_query_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status = STATUS_SUCCESS; + IO_STACK_LOCATION *p_io_stack; + + BUS_ENTER( BUS_DBG_POWER ); + + UNUSED_PARAM( p_dev_obj ); + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + switch( p_io_stack->Parameters.Power.Type ) + { + case SystemPowerState: + /* Fail any requests to hibernate or sleep the system. */ + switch( p_io_stack->Parameters.Power.State.SystemState ) + { + case PowerSystemHibernate: + case PowerSystemSleeping1: // STANDBY support + case PowerSystemWorking: + case PowerSystemShutdown: + break; + + default: + status = STATUS_NOT_SUPPORTED; + } + break; + + case DevicePowerState: + /* Fail any query for low power states. */ + switch( p_io_stack->Parameters.Power.State.DeviceState ) + { + case PowerDeviceD0: + case PowerDeviceD3: + /* We only support fully powered or off power states. */ + break; + + default: + status = STATUS_NOT_SUPPORTED; + } + break; + } + + if( status == STATUS_NOT_SUPPORTED ) + *p_action = IrpComplete; + else + *p_action = IrpSkip; + + BUS_EXIT( BUS_DBG_POWER ); + return status; +} + + +static void +__request_power_completion( + IN DEVICE_OBJECT *p_dev_obj, + IN UCHAR minor_function, + IN POWER_STATE power_state, + IN void *context, + IN IO_STATUS_BLOCK *p_io_status ) +{ + IRP *p_irp; + cl_pnp_po_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( minor_function ); + UNUSED_PARAM( power_state ); + + p_irp = (IRP*)context; + p_ext = p_dev_obj->DeviceExtension; + + /* Propagate the device IRP status to the system IRP status. */ + p_irp->IoStatus.Status = p_io_status->Status; + + /* Continue Power IRP processing. */ + PoStartNextPowerIrp( p_irp ); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->remove_lock, p_irp ); + BUS_EXIT( BUS_DBG_PNP ); +} + + +/*NOTE: Completion routines must NEVER be pageable. */ +static NTSTATUS +__set_power_completion( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp, + IN void *context ) +{ + NTSTATUS status; + POWER_STATE state; + bus_fdo_ext_t *p_ext; + IO_STACK_LOCATION *p_io_stack; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( context ); + + p_ext = p_dev_obj->DeviceExtension; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + if( !NT_SUCCESS( p_irp->IoStatus.Status ) ) + { + PoStartNextPowerIrp( p_irp ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("IRP_MN_SET_POWER for system failed by lower driver with %08x.\n", + p_irp->IoStatus.Status) ); + return STATUS_SUCCESS; + } + + state.DeviceState = + p_ext->po_state[p_io_stack->Parameters.Power.State.SystemState]; + + /* + * Send a device power IRP to our devnode. Using our device object will + * only work on win2k and other NT based systems. + */ + status = PoRequestPowerIrp( p_dev_obj, IRP_MN_SET_POWER, state, + __request_power_completion, p_irp, NULL ); + + if( status != STATUS_PENDING ) + { + PoStartNextPowerIrp( p_irp ); + /* Propagate the failure. */ + p_irp->IoStatus.Status = status; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + BUS_TRACE( BUS_DBG_ERROR, + ("PoRequestPowerIrp returned %08x.\n", status) ); + } + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_MORE_PROCESSING_REQUIRED; +} + + +static NTSTATUS +__fdo_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + bus_fdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_POWER ); + + p_ext = p_dev_obj->DeviceExtension; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + BUS_TRACE( BUS_DBG_POWER, + ("SET_POWER for FDO %p (ext %p): type %s, state %d, action %d \n", + p_dev_obj, p_ext, + (p_io_stack->Parameters.Power.Type) ? "DevicePowerState" : "SystemPowerState", + p_io_stack->Parameters.Power.State.DeviceState, + p_io_stack->Parameters.Power.ShutdownType )); + + switch( p_io_stack->Parameters.Power.Type ) + { + case SystemPowerState: + /* + * Process on the way up the stack. We cannot block since the + * power dispatch function can be called at elevated IRQL if the + * device is in a paging/hibernation/crash dump path. + */ + IoMarkIrpPending( p_irp ); + IoCopyCurrentIrpStackLocationToNext( p_irp ); +#pragma warning( push, 3 ) + IoSetCompletionRoutine( p_irp, __set_power_completion, NULL, + TRUE, TRUE, TRUE ); +#pragma warning( pop ) + PoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + + *p_action = IrpDoNothing; + status = STATUS_PENDING; + break; + + case DevicePowerState: + default: + /* Pass down and let the PDO driver handle it. */ + *p_action = IrpIgnore; + status = STATUS_SUCCESS; + break; + } + + BUS_EXIT( BUS_DBG_POWER ); + return status; +} + + +/* + * A CA GUID of zero means that all devices should be reported. + */ +NTSTATUS +bus_get_relations( + IN cl_qlist_t* const p_pdo_list, + IN const net64_t ca_guid, + IN IRP* const p_irp ) +{ + NTSTATUS status; + DEVICE_RELATIONS *p_rel; + cl_list_item_t *p_list_item; + bus_pdo_ext_t *p_pdo_ext; + size_t n_devs = 0; + + BUS_ENTER( BUS_DBG_PNP ); + + /* Count the number of child devices. */ + for( p_list_item = cl_qlist_head( p_pdo_list ); + p_list_item != cl_qlist_end( p_pdo_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_pdo_ext = PARENT_STRUCT( p_list_item, bus_pdo_ext_t, list_item ); + + if( !p_pdo_ext->b_present ) + continue; + + if( ca_guid && p_pdo_ext->ca_guid != ca_guid ) + continue; + + n_devs++; + } + + if( !n_devs ) + { + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_NO_SUCH_DEVICE; + } + + BUS_TRACE( BUS_DBG_PNP, ("Found %d PDOs .\n", n_devs )); + + /* Add space for our child IOUs. */ + status = cl_alloc_relations( p_irp, n_devs ); + if( !NT_SUCCESS( status ) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("cl_alloc_relations returned %08x.\n", status) ); + return status; + } + + p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information; + + for( p_list_item = cl_qlist_head( p_pdo_list ); + p_list_item != cl_qlist_end( p_pdo_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_pdo_ext = PARENT_STRUCT( p_list_item, bus_pdo_ext_t, list_item ); + + if( !p_pdo_ext->b_present ) + { + /* + * We don't report a PDO that is no longer present. This is how + * the PDO will get cleaned up. + */ + p_pdo_ext->b_reported_missing = TRUE; + BUS_TRACE( BUS_DBG_PNP, ("Don't report PDO! %s: PDO %p, ext %p, present %d, missing %d .\n", + p_pdo_ext->cl_ext.vfptr_pnp_po->identity, p_pdo_ext->cl_ext.p_self_do, + p_pdo_ext, p_pdo_ext->b_present, p_pdo_ext->b_reported_missing ) ); + continue; + } + + if( ca_guid && p_pdo_ext->ca_guid != ca_guid ) + continue; + + BUS_TRACE( BUS_DBG_PNP, ("Reported PDO %p(=%p), ext %p.\n", + p_pdo_ext->cl_ext.p_self_do, p_pdo_ext->cl_ext.p_pdo, p_pdo_ext )); + + p_rel->Objects[p_rel->Count] = p_pdo_ext->cl_ext.p_pdo; + ObReferenceObject( p_rel->Objects[p_rel->Count++] ); + } + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} diff --git a/branches/Ndi/core/bus/kernel/bus_pnp.h b/branches/Ndi/core/bus/kernel/bus_pnp.h new file mode 100644 index 00000000..a5c2041b --- /dev/null +++ b/branches/Ndi/core/bus/kernel/bus_pnp.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if !defined _BUS_DRV_PNP_H_ +#define _BUS_DRV_PNP_H_ + + +#include "bus_driver.h" +#include "iba/ib_al_ifc.h" + + +/****f* InfiniBand Bus Driver: Plug and Play/bus_add_device +* NAME +* bus_add_device +* +* DESCRIPTION +* Main AddDevice entrypoint for the IB Bus driver. +* Adds the bus root functional device object to the device node. The +* bus root FDO performs all PnP operations for fabric attached devices. +* +* SYNOPSIS +*/ +NTSTATUS +bus_add_device( + IN PDRIVER_OBJECT p_driver_obj, + IN PDEVICE_OBJECT p_pdo ); +/* +* PARAMETERS +* p_driver_obj +* Driver object for the BUS driver. +* +* p_pdo +* Pointer to the device object representing the PDO for the device on +* which we are loading. +* +* RETURN VBUSUES +* STATUS_SUCCESS if the device was successfully added. +* +* Other NTSTATUS error values if errors are encountered. +* +* SEE ALSO +*********/ + + +void +al_set_ifc( + OUT ib_al_ifc_t* const p_ifc ); + +void +al_ref_ifc( + IN DEVICE_OBJECT* p_dev_obj ); + +NTSTATUS +bus_get_relations( + IN cl_qlist_t* const p_pdo_list, + IN const net64_t ca_guid, + IN IRP* const p_irp ); + +#endif // !defined _BUS_DRV_PNP_H_ diff --git a/branches/Ndi/core/bus/kernel/bus_port_mgr.c b/branches/Ndi/core/bus/kernel/bus_port_mgr.c new file mode 100644 index 00000000..704e2eb0 --- /dev/null +++ b/branches/Ndi/core/bus/kernel/bus_port_mgr.c @@ -0,0 +1,1494 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include "ib_common.h" +#include "bus_pnp.h" +#include "bus_port_mgr.h" +#include "al_ca.h" +#include "al_mgr.h" +#include +#include +#include "iba/ipoib_ifc.h" + + +#define IPOIB_DEVICE_ID L"IBA\\IPoIB" +#define IPOIB_COMPAT_ID L"IBA\\SID_1000066a00020000\0\0" +/* Hardware ID is a MULTI_SZ, so is terminated with a double NULL. */ +#define IPOIB_HARDWARE_ID IPOIB_DEVICE_ID L"\0" +#define IPOIB_DESCRIPTION L"OpenIB IPoIB Adapter" + +/* {5A9649F4-0101-4a7c-8337-796C48082DA2} */ +DEFINE_GUID(GUID_BUS_TYPE_IBA, +0x5a9649f4, 0x101, 0x4a7c, 0x83, 0x37, 0x79, 0x6c, 0x48, 0x8, 0x2d, 0xa2); + + +/* + * Device extension for IPoIB port PDOs. + */ +typedef struct _bus_port_ext +{ + bus_pdo_ext_t pdo; + + net64_t port_guid; + uint32_t n_port; + + /* Number of references on the upper interface. */ + atomic32_t n_ifc_ref; + +} bus_port_ext_t; + + +port_mgr_t* gp_port_mgr = NULL; + + +/* + * Function prototypes. + */ +void +destroying_port_mgr( + IN cl_obj_t* p_obj ); + +void +free_port_mgr( + IN cl_obj_t* p_obj ); + +ib_api_status_t +bus_reg_port_pnp( void ); + +ib_api_status_t +port_mgr_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ); + +ib_api_status_t +port_mgr_port_add( + IN ib_pnp_port_rec_t* p_pnp_rec ); + +void +port_mgr_port_remove( + IN ib_pnp_port_rec_t* p_pnp_rec ); + +static NTSTATUS +port_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +port_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static void +port_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +port_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +port_surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +port_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +port_query_target_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +port_query_device_id( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +port_query_hardware_ids( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +port_query_compatible_ids( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +port_query_unique_id( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +port_query_description( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +port_query_location( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +port_query_bus_info( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +port_query_ipoib_ifc( + IN DEVICE_OBJECT* const p_dev_obj, + IN IO_STACK_LOCATION* const p_io_stack ); + +static NTSTATUS +port_query_interface( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +port_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + + +/* All PnP code is called at passive, so it can all be paged out. */ +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, port_start) +#pragma alloc_text (PAGE, port_query_remove) +#pragma alloc_text (PAGE, port_release_resources) +#pragma alloc_text (PAGE, port_remove) +#pragma alloc_text (PAGE, port_surprise_remove) +#pragma alloc_text (PAGE, port_query_capabilities) +#pragma alloc_text (PAGE, port_query_target_relations) +#pragma alloc_text (PAGE, port_query_device_id) +#pragma alloc_text (PAGE, port_query_hardware_ids) +#pragma alloc_text (PAGE, port_query_compatible_ids) +#pragma alloc_text (PAGE, port_query_unique_id) +#pragma alloc_text (PAGE, port_query_description) +#pragma alloc_text (PAGE, port_query_location) +#pragma alloc_text (PAGE, port_query_bus_info) +#pragma alloc_text (PAGE, port_query_ipoib_ifc) +#pragma alloc_text (PAGE, port_query_interface) +#pragma alloc_text (PAGE_PNP, port_set_power) +#pragma alloc_text (PAGE, port_mgr_port_add) +#pragma alloc_text (PAGE, port_mgr_port_remove) +#endif + + +/* + * Global virtual function pointer tables shared between all + * instances of Port PDOs. + */ +static const cl_vfptr_pnp_po_t vfptr_port_pnp = { + "IPoIB", + port_start, + cl_irp_succeed, + cl_irp_succeed, + cl_irp_succeed, + port_query_remove, + port_release_resources, + port_remove, + cl_irp_succeed, + port_surprise_remove, + port_query_capabilities, + cl_irp_complete, + cl_irp_complete, + cl_irp_succeed, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + port_query_target_relations, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + port_query_bus_info, + port_query_interface, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + cl_irp_succeed, // QueryPower + port_set_power, // SetPower + cl_irp_unsupported, // PowerSequence + cl_irp_unsupported // WaitWake +}; + + +static const cl_vfptr_query_txt_t vfptr_port_query_txt = { + port_query_device_id, + port_query_hardware_ids, + port_query_compatible_ids, + port_query_unique_id, + port_query_description, + port_query_location +}; + + +/* + * Create the AL load service. + */ +ib_api_status_t +create_port_mgr( + OUT port_mgr_t** const pp_port_mgr ) +{ + ib_api_status_t status; + cl_status_t cl_status; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( !gp_port_mgr ); + + gp_port_mgr = cl_zalloc( sizeof( port_mgr_t ) ); + if( !gp_port_mgr ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate port manager.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Construct the load service. */ + cl_obj_construct( &gp_port_mgr->obj, AL_OBJ_TYPE_LOADER ); + cl_mutex_construct( &gp_port_mgr->pdo_mutex ); + cl_qlist_init( &gp_port_mgr->port_list ); + + cl_status = cl_mutex_init( &gp_port_mgr->pdo_mutex ); + if( cl_status != CL_SUCCESS ) + { + free_port_mgr( &gp_port_mgr->obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("cl_mutex_init returned %s.\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + /* Initialize the load service object. */ + cl_status = cl_obj_init( &gp_port_mgr->obj, CL_DESTROY_SYNC, + destroying_port_mgr, NULL, free_port_mgr ); + if( cl_status != CL_SUCCESS ) + { + free_port_mgr( &gp_port_mgr->obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("cl_obj_init returned %s.\n", cl_status_text[cl_status]) ); + return ib_convert_cl_status( cl_status ); + } + + /* Register for port PnP events. */ + status = bus_reg_port_pnp(); + if( status != IB_SUCCESS ) + { + cl_obj_destroy( &gp_port_mgr->obj ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("bus_reg_port_pnp returned %s.\n", ib_get_err_str(status)) ); + return status; + } + + *pp_port_mgr = gp_port_mgr; + + BUS_EXIT( BUS_DBG_PNP ); + return IB_SUCCESS; +} + + +/* + * Pre-destroy the load service. + */ +void +destroying_port_mgr( + IN cl_obj_t* p_obj ) +{ + ib_api_status_t status; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( p_obj ); + CL_ASSERT( gp_port_mgr == PARENT_STRUCT( p_obj, port_mgr_t, obj ) ); + UNUSED_PARAM( p_obj ); + + /* Deregister for port PnP events. */ + if( gp_port_mgr->h_pnp ) + { + status = ib_dereg_pnp( gp_port_mgr->h_pnp, + (ib_pfn_destroy_cb_t)cl_obj_deref ); + CL_ASSERT( status == IB_SUCCESS ); + } + BUS_EXIT( BUS_DBG_PNP ); +} + + +/* + * Free the load service. + */ +void +free_port_mgr( + IN cl_obj_t* p_obj ) +{ + bus_pdo_ext_t *p_ext; + cl_list_item_t *p_list_item; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( p_obj ); + CL_ASSERT( gp_port_mgr == PARENT_STRUCT( p_obj, port_mgr_t, obj ) ); + + /* + * Mark all IPoIB PDOs as no longer present. This will cause them + * to be removed when they process the IRP_MN_REMOVE_DEVICE. + */ + p_list_item = cl_qlist_remove_head( &gp_port_mgr->port_list ); + while( p_list_item != cl_qlist_end( &gp_port_mgr->port_list ) ) + { + p_ext = PARENT_STRUCT( p_list_item, bus_pdo_ext_t, list_item ); + p_list_item = cl_qlist_remove_head( &gp_port_mgr->port_list ); + if( p_ext->cl_ext.pnp_state == SurpriseRemoved ) + { + CL_ASSERT( !p_ext->b_present ); + p_ext->b_reported_missing = TRUE; + BUS_TRACE( BUS_DBG_PNP, ("%s: PDO %p, ext %p, present %d, missing %d .\n", + p_ext->cl_ext.vfptr_pnp_po->identity, p_ext->cl_ext.p_self_do, p_ext, p_ext->b_present, p_ext->b_reported_missing ) ); + continue; + } + if( p_ext->h_ca ) + { + /* Invalidate bus relations for the HCA. */ + IoInvalidateDeviceRelations( + p_ext->h_ca->obj.p_ci_ca->verbs.p_hca_dev, BusRelations ); + + /* Release the reference on the CA object. */ + deref_al_obj( &p_ext->h_ca->obj ); + } + BUS_TRACE( BUS_DBG_PNP, ("Deleted device %s: PDO %p, ext %p\n", + p_ext->cl_ext.vfptr_pnp_po->identity, p_ext->cl_ext.p_self_do, p_ext ) ); + IoDeleteDevice( p_ext->cl_ext.p_self_do ); + } + + cl_mutex_destroy( &gp_port_mgr->pdo_mutex ); + cl_obj_deinit( p_obj ); + cl_free( gp_port_mgr ); + gp_port_mgr = NULL; + BUS_EXIT( BUS_DBG_PNP ); +} + + +/* + * Register the load service for the given PnP class events. + */ +ib_api_status_t +bus_reg_port_pnp( void ) +{ + ib_pnp_req_t pnp_req; + ib_api_status_t status; + + cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) ); + pnp_req.pnp_class = IB_PNP_PORT | IB_PNP_FLAG_REG_SYNC; + pnp_req.pnp_context = gp_port_mgr; + pnp_req.pfn_pnp_cb = port_mgr_pnp_cb; + + status = ib_reg_pnp( gh_al, &pnp_req, &gp_port_mgr->h_pnp ); + + if( status == IB_SUCCESS ) + { + /* Reference the load service on behalf of the ib_reg_pnp call. */ + cl_obj_ref( &gp_port_mgr->obj ); + } + + return status; +} + + +/* + * Load service PnP event callback. + */ +ib_api_status_t +port_mgr_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ) +{ + ib_api_status_t status; + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( p_pnp_rec ); + CL_ASSERT( gp_port_mgr == p_pnp_rec->pnp_context ); + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_PORT_ADD: + status = port_mgr_port_add( (ib_pnp_port_rec_t*)p_pnp_rec ); + break; + + case IB_PNP_PORT_REMOVE: + port_mgr_port_remove( (ib_pnp_port_rec_t*)p_pnp_rec ); + + default: + status = IB_SUCCESS; + break; + } + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +/* + * Called to get bus relations for an HCA. + */ +NTSTATUS +port_mgr_get_bus_relations( + IN const net64_t ca_guid, + IN IRP* const p_irp ) +{ + NTSTATUS status; + + BUS_ENTER( BUS_DBG_PNP ); + + cl_mutex_acquire( &gp_port_mgr->pdo_mutex ); + status = bus_get_relations( &gp_port_mgr->port_list, ca_guid, p_irp ); + cl_mutex_release( &gp_port_mgr->pdo_mutex ); + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + +static ib_api_status_t +__port_was_hibernated( + IN ib_pnp_port_rec_t* p_pnp_rec ) +{ + NTSTATUS status; + cl_list_item_t *p_list_item; + bus_port_ext_t *p_port_ext; + bus_pdo_ext_t *p_pdo_ext = NULL; + size_t n_devs = 0; + cl_qlist_t* p_pdo_list = &gp_port_mgr->port_list; + + BUS_ENTER( BUS_DBG_PNP ); + + cl_mutex_acquire( &gp_port_mgr->pdo_mutex ); + + /* Count the number of child devices. */ + for( p_list_item = cl_qlist_head( p_pdo_list ); + p_list_item != cl_qlist_end( p_pdo_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_pdo_ext = PARENT_STRUCT( p_list_item, bus_pdo_ext_t, list_item ); + p_port_ext = (bus_port_ext_t*)p_pdo_ext; + + if( p_pdo_ext->b_present && p_pdo_ext->b_hibernating && + (p_port_ext->port_guid == p_pnp_rec->p_port_attr->port_guid) ) + { + n_devs++; + break; + } + + BUS_TRACE( BUS_DBG_PNP, + ("Skipped PDO for %s: PDO %p, ext %p, present %d, missing %d, hibernating %d, port_guid %I64x.\n", + p_pdo_ext->cl_ext.vfptr_pnp_po->identity, p_pdo_ext->cl_ext.p_self_do, + p_pdo_ext, p_pdo_ext->b_present, p_pdo_ext->b_reported_missing, + p_pdo_ext->b_hibernating, p_port_ext->port_guid ) ); + } + + if (n_devs) + { + /* Take a reference on the parent HCA. */ + p_pdo_ext->h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid ); + if( !p_pdo_ext->h_ca ) + { + BUS_TRACE( BUS_DBG_ERROR, ("acquire_ca failed to find CA by guid %I64x\n", + p_pnp_rec->p_ca_attr->ca_guid ) ); + status = IB_INVALID_GUID; + } + else + { + p_pdo_ext->b_hibernating = FALSE; + p_pnp_rec->pnp_rec.context = p_pdo_ext; + status = IB_SUCCESS; + p_port_ext = (bus_port_ext_t*)p_pdo_ext; + BUS_TRACE( BUS_DBG_PNP, + ("Found PDO for %s: PDO %p, ext %p, present %d, missing %d, hibernating %d, port_guid %I64x.\n", + p_pdo_ext->cl_ext.vfptr_pnp_po->identity, p_pdo_ext->cl_ext.p_self_do, + p_pdo_ext, p_pdo_ext->b_present, p_pdo_ext->b_reported_missing, + p_pdo_ext->b_hibernating, p_port_ext->port_guid ) ); + } + } + else + { + BUS_TRACE( BUS_DBG_PNP, ("Failed to find PDO for guid %I64x .\n", + p_pnp_rec->p_ca_attr->ca_guid ) ); + status = IB_NOT_FOUND; + } + + cl_mutex_release( &gp_port_mgr->pdo_mutex ); + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + +ib_api_status_t +port_mgr_port_add( + IN ib_pnp_port_rec_t* p_pnp_rec ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_pdo; + bus_port_ext_t *p_port_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + if( !bus_globals.b_report_port_nic ) + { + BUS_EXIT( BUS_DBG_PNP ); + return IB_NOT_DONE; + } + + /* Upon hibernating of the computer IB_BUS driver doesn't remove PDO, but + marks with a flag. So we first try to find an existing PDO for this port, + marked with this flag. If it was found, we turn off the flag and use this PDO */ + status = __port_was_hibernated(p_pnp_rec); + if( status != IB_NOT_FOUND ) + { + BUS_EXIT( BUS_DBG_PNP ); + return status; + } + + /* Create the PDO for the new port device. */ + status = IoCreateDevice( bus_globals.p_driver_obj, sizeof(bus_port_ext_t), + NULL, FILE_DEVICE_CONTROLLER, + FILE_DEVICE_SECURE_OPEN | FILE_AUTOGENERATED_DEVICE_NAME, + FALSE, &p_pdo ); + if( !NT_SUCCESS( status ) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("IoCreateDevice returned %08x.\n", status) ); + return IB_ERROR; + } + + /* Initialize the device extension. */ + cl_init_pnp_po_ext( p_pdo, NULL, p_pdo, bus_globals.dbg_lvl, + &vfptr_port_pnp, &vfptr_port_query_txt ); + + /* Set the DO_BUS_ENUMERATED_DEVICE flag to mark it as a PDO. */ + p_pdo->Flags |= DO_BUS_ENUMERATED_DEVICE; + + p_port_ext = p_pdo->DeviceExtension; + p_port_ext->pdo.dev_po_state.DeviceState = PowerDeviceD0; + p_port_ext->pdo.p_parent_ext = bus_globals.p_bus_ext; + p_port_ext->pdo.b_present = TRUE; + p_port_ext->pdo.b_reported_missing = FALSE; + p_port_ext->pdo.b_hibernating = FALSE; + p_port_ext->pdo.p_po_work_item = NULL; + BUS_TRACE( BUS_DBG_PNP, ("Created device for %s: PDO %p,ext %p, present %d, missing %d .\n", + p_port_ext->pdo.cl_ext.vfptr_pnp_po->identity, p_pdo, p_port_ext, p_port_ext->pdo.b_present, + p_port_ext->pdo.b_reported_missing ) ); + + /* Cache the CA GUID. */ + p_port_ext->pdo.ca_guid = p_pnp_rec->p_ca_attr->ca_guid; + + /* Take a reference on the parent HCA. */ + p_port_ext->pdo.h_ca = acquire_ca( p_pnp_rec->p_ca_attr->ca_guid ); + if( !p_port_ext->pdo.h_ca ) + { + BUS_TRACE( BUS_DBG_PNP, ("Deleted device: PDO %p\n", p_pdo )); + IoDeleteDevice( p_pdo ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("acquire_ca failed to find CA.\n") ); + return IB_INVALID_GUID; + } + p_port_ext->port_guid = p_pnp_rec->p_port_attr->port_guid; + p_port_ext->n_port = p_pnp_rec->p_port_attr->port_num; + + /* Store the device extension in the port vector for future queries. */ + cl_mutex_acquire( &gp_port_mgr->pdo_mutex ); + cl_qlist_insert_tail( &gp_port_mgr->port_list, + &p_port_ext->pdo.list_item ); + cl_mutex_release( &gp_port_mgr->pdo_mutex ); + + /* + * Set the context of the PNP event. The context is passed in for future + * events on the same port. + */ + p_pnp_rec->pnp_rec.context = p_port_ext; + + /* Tell the PnP Manager to rescan for the HCA's bus relations. */ + IoInvalidateDeviceRelations( + p_port_ext->pdo.h_ca->obj.p_ci_ca->verbs.p_hca_dev, BusRelations ); + + /* Invalidate removal relations for the bus driver. */ + IoInvalidateDeviceRelations( + bus_globals.p_bus_ext->cl_ext.p_pdo, RemovalRelations ); + + BUS_EXIT( BUS_DBG_PNP ); + return IB_SUCCESS; +} + + +void +port_mgr_port_remove( + IN ib_pnp_port_rec_t* p_pnp_rec ) +{ + bus_pdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + /* The PNP record's context is the port extension. */ + p_ext = p_pnp_rec->pnp_rec.context; + CL_ASSERT( p_ext ); + + /* + * Flag the port PDO as no longer being present. We have to wait until + * the PnP manager removes it to clean up. However, we do release the + * reference on the CA object in order to allow the removal of the HCA + * to proceed should it occur before the port's PDO is cleaned up. + */ + cl_mutex_acquire( &gp_port_mgr->pdo_mutex ); + CL_ASSERT( p_ext->h_ca ); + + if( p_ext->b_hibernating ) + { + BUS_TRACE( BUS_DBG_PNP, ("Skip port removing for %s: PDO %p, ext %p, present %d, missing %d, hibernating %d .\n", + p_ext->cl_ext.vfptr_pnp_po->identity, p_ext->cl_ext.p_self_do, p_ext, p_ext->b_present, + p_ext->b_reported_missing, p_ext->b_hibernating ) ); + goto hca_deref; + } + + p_ext->b_present = FALSE; + BUS_TRACE( BUS_DBG_PNP, ("Mark removing %s: PDO %p, ext %p, present %d, missing %d .\n", + p_ext->cl_ext.vfptr_pnp_po->identity, p_ext->cl_ext.p_self_do, p_ext, p_ext->b_present, p_ext->b_reported_missing ) ); + + /* Invalidate removal relations for the bus driver. */ + IoInvalidateDeviceRelations( bus_globals.p_bus_ext->cl_ext.p_pdo, + RemovalRelations ); + + /* Invalidate bus relations for the HCA. */ + IoInvalidateDeviceRelations( + p_ext->h_ca->obj.p_ci_ca->verbs.p_hca_dev, BusRelations ); + +hca_deref: + deref_al_obj( &p_ext->h_ca->obj ); + p_ext->h_ca = NULL; + cl_mutex_release( &gp_port_mgr->pdo_mutex ); + + BUS_EXIT( BUS_DBG_PNP ); +} + + +static NTSTATUS +port_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_pdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Notify the Power Manager that the device is started. */ + PoSetPowerState( p_dev_obj, DevicePowerState, p_ext->dev_po_state ); + *p_action = IrpComplete; + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_port_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + + *p_action = IrpComplete; + if( p_ext->n_ifc_ref ) + { + /* + * Our interface is still being held by someone. + * Rollback the PnP state that was changed in the complib handler. + */ + cl_rollback_pnp_state( &p_ext->pdo.cl_ext ); + + /* Fail the query. */ + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Failing IRP_MN_QUERY_REMOVE_DEVICE:\n" + "\tInterface has %d reference\n", p_ext->n_ifc_ref ) ); + return STATUS_UNSUCCESSFUL; + } + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static void +port_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + bus_port_ext_t *p_ext; + POWER_STATE po_state; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Remove this PDO from its list. */ + cl_mutex_acquire( &gp_port_mgr->pdo_mutex ); + BUS_TRACE( BUS_DBG_PNP, ("Removing port from vector: PDO %p, ext %p\n", p_dev_obj, p_ext) ); + cl_qlist_remove_item( &gp_port_mgr->port_list, &p_ext->pdo.list_item ); + cl_mutex_release( &gp_port_mgr->pdo_mutex ); + po_state.DeviceState = PowerDeviceD3; + PoSetPowerState( p_ext->pdo.cl_ext.p_pdo, DevicePowerState, po_state ); + + BUS_EXIT( BUS_DBG_PNP ); +} + + +static NTSTATUS +port_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_port_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + if( p_ext->pdo.b_present ) + { + CL_ASSERT( p_ext->pdo.cl_ext.pnp_state != NotStarted ); + CL_ASSERT( !p_ext->pdo.b_reported_missing ); + /* Reset the state to NotStarted. CompLib set it to Deleted. */ + cl_set_pnp_state( &p_ext->pdo.cl_ext, NotStarted ); + /* Don't delete the device. It may simply be disabled. */ + *p_action = IrpComplete; + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Device still present: PDO %p, ext %p\n", p_dev_obj, p_ext) ); + return STATUS_SUCCESS; + } + + if( !p_ext->pdo.b_reported_missing ) + { + /* Reset the state to RemovePending. Complib set it to Deleted. */ + cl_rollback_pnp_state( &p_ext->pdo.cl_ext ); + *p_action = IrpComplete; + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Device not reported missing yet: PDO %p, ext %p\n", p_dev_obj, p_ext) ); + return STATUS_SUCCESS; + } + + /* Wait for all I/O operations to complete. */ + IoReleaseRemoveLockAndWait( &p_ext->pdo.cl_ext.remove_lock, p_irp ); + + /* Release resources if it was not done yet. */ + if( p_ext->pdo.cl_ext.last_pnp_state != SurpriseRemoved ) + p_ext->pdo.cl_ext.vfptr_pnp_po->pfn_release_resources( p_dev_obj ); + + p_irp->IoStatus.Status = STATUS_SUCCESS; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + + BUS_TRACE( BUS_DBG_PNP, ("Deleted device %s: PDO %p(=%p), ext %p\n", + p_ext->pdo.cl_ext.vfptr_pnp_po->identity, p_ext->pdo.cl_ext.p_self_do, p_dev_obj, p_ext ) ); + IoDeleteDevice( p_dev_obj ); + + *p_action = IrpDoNothing; + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_port_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + p_ext->pdo.b_present = FALSE; + p_ext->pdo.b_reported_missing = TRUE; + BUS_TRACE( BUS_DBG_PNP, ("%s: PDO %p, ext %p, present %d, missing %d .\n", + p_ext->pdo.cl_ext.vfptr_pnp_po->identity, p_ext->pdo.cl_ext.p_self_do, + p_ext, p_ext->pdo.b_present, p_ext->pdo.b_reported_missing ) ); + + *p_action = IrpComplete; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + DEVICE_CAPABILITIES *p_caps; + IO_STACK_LOCATION *p_io_stack; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_dev_obj ); + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + p_caps = p_io_stack->Parameters.DeviceCapabilities.Capabilities; + + p_caps->DeviceD1 = FALSE; + p_caps->DeviceD2 = FALSE; + p_caps->LockSupported = FALSE; + p_caps->EjectSupported = FALSE; + p_caps->Removable = FALSE; + p_caps->DockDevice = FALSE; + p_caps->UniqueID = TRUE; + p_caps->SilentInstall = TRUE; + p_caps->RawDeviceOK = FALSE; + p_caps->SurpriseRemovalOK = FALSE; + p_caps->WakeFromD0 = FALSE; + p_caps->WakeFromD1 = FALSE; + p_caps->WakeFromD2 = FALSE; + p_caps->WakeFromD3 = FALSE; + p_caps->HardwareDisabled = FALSE; + p_caps->DeviceState[PowerSystemWorking] = PowerDeviceD0; + p_caps->DeviceState[PowerSystemSleeping1] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemSleeping2] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemSleeping3] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemHibernate] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemShutdown] = PowerDeviceD3; + p_caps->SystemWake = PowerSystemUnspecified; + p_caps->DeviceWake = PowerDeviceUnspecified; + p_caps->D1Latency = 0; + p_caps->D2Latency = 0; + p_caps->D3Latency = 0; + + *p_action = IrpComplete; + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_target_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + DEVICE_RELATIONS *p_rel; + + BUS_ENTER( BUS_DBG_PNP ); + + *p_action = IrpComplete; + + status = cl_alloc_relations( p_irp, 1 ); + if( !NT_SUCCESS( status ) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("cl_alloc_relations returned 0x%08x.\n", status) ); + return status; + } + + p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information; + p_rel->Count = 1; + p_rel->Objects[0] = p_dev_obj; + + ObReferenceObject( p_dev_obj ); + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +static NTSTATUS +port_query_device_id( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + WCHAR *p_string; + bus_port_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + + p_ext = (bus_port_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* Device ID is "IBA\SID_ where is the IPoIB Service ID. */ + p_string = ExAllocatePool( PagedPool, sizeof(IPOIB_DEVICE_ID) ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate device ID buffer (%d bytes).\n", + sizeof(IPOIB_DEVICE_ID)) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + cl_memcpy( p_string, IPOIB_DEVICE_ID, sizeof(IPOIB_DEVICE_ID) ); + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_hardware_ids( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + WCHAR *p_string; + bus_port_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + + p_ext = (bus_port_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + p_string = ExAllocatePool( PagedPool, sizeof(IPOIB_HARDWARE_ID) ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate hardware ID buffer (%d bytes).\n", + sizeof(IPOIB_HARDWARE_ID)) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + cl_memcpy( p_string, IPOIB_HARDWARE_ID, sizeof(IPOIB_HARDWARE_ID) ); + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_compatible_ids( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + WCHAR *p_string; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_dev_obj ); + + p_string = ExAllocatePool( PagedPool, sizeof(IPOIB_COMPAT_ID) ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate compatible ID buffer (%d bytes).\n", + sizeof(IPOIB_COMPAT_ID)) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + cl_memcpy( p_string, IPOIB_COMPAT_ID, sizeof(IPOIB_COMPAT_ID) ); + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_unique_id( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + WCHAR *p_string; + bus_port_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* The instance ID is the port GUID. */ + p_string = ExAllocatePool( PagedPool, sizeof(WCHAR) * 17 ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate instance ID buffer (%d bytes).\n", + sizeof(WCHAR) * 17) ); + return STATUS_NO_MEMORY; + } + + status = RtlStringCchPrintfW( p_string, 17, L"%016I64x", p_ext->port_guid ); + if( !NT_SUCCESS( status ) ) + { + CL_ASSERT( NT_SUCCESS( status ) ); + ExFreePool( p_string ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("RtlStringCchPrintfW returned %08x.\n", status) ); + return status; + } + + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_description( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + WCHAR *p_string; + bus_port_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + + /* The instance ID is the port GUID. */ + p_string = ExAllocatePool( PagedPool, sizeof(IPOIB_DESCRIPTION) ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate device description buffer (%d bytes).\n", + sizeof(IPOIB_DESCRIPTION)) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + cl_memcpy( p_string, IPOIB_DESCRIPTION, sizeof(IPOIB_DESCRIPTION) ); + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_location( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + WCHAR *p_string; + bus_port_ext_t *p_ext; + size_t size; + ULONG len; + NTSTATUS status; + DEVICE_OBJECT *p_hca_dev; + + BUS_ENTER( BUS_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + p_hca_dev = p_ext->pdo.h_ca->obj.p_ci_ca->verbs.p_hca_dev; + + /* Get the length of the HCA's location. */ + status = IoGetDeviceProperty( p_hca_dev, + DevicePropertyLocationInformation, 0, NULL, &len ); + if( status != STATUS_BUFFER_TOO_SMALL ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("IoGetDeviceProperty for device location size returned %08x.\n", + status) ); + return status; + } + + /* + * Allocate the string buffer to hold the HCA's location along with the + * port number. The port number is 32-bits, so in decimal it can be at + * most 10 characters. + */ + size = len + sizeof(L", port ") + (sizeof(WCHAR) * 10); + if( size > (USHORT)-1 ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Length beyond limits.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + p_string = ExAllocatePool( PagedPool, size ); + if( !p_string ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate device location buffer (%d bytes).\n", len) ); + return STATUS_NO_MEMORY; + } + + /* Get the HCA's location information. */ + status = IoGetDeviceProperty( p_hca_dev, + DevicePropertyLocationInformation, len, p_string, &len ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("IoGetDeviceProperty for device location returned %08x.\n", + status) ); + return status; + } + + /* Append the port number to the HCA's location. */ + status = RtlStringCbPrintfW( p_string + (len/2) - 1, size - len + 1, + L", port %d", p_ext->n_port ); + if( !NT_SUCCESS( status ) ) + { + CL_ASSERT( NT_SUCCESS( status ) ); + ExFreePool( p_string ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("RtlStringCbPrintfW returned %08x.\n", status) ); + return status; + } + + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_bus_info( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + PNP_BUS_INFORMATION *p_bus_info; + + BUS_ENTER( BUS_DBG_PNP ); + + UNUSED_PARAM( p_dev_obj ); + + *p_action = IrpComplete; + + p_bus_info = ExAllocatePool( PagedPool, sizeof(PNP_BUS_INFORMATION) ); + if( !p_bus_info ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to allocate PNP_BUS_INFORMATION (%d bytes).\n", + sizeof(PNP_BUS_INFORMATION)) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + p_bus_info->BusTypeGuid = GUID_BUS_TYPE_IBA; + //TODO: Memory from Intel - storage miniport would not stay loaded unless + //TODO: bus type was PCI. Look here if SRP is having problems staying + //TODO: loaded. + p_bus_info->LegacyBusType = PNPBus; + p_bus_info->BusNumber = 0; + + p_irp->IoStatus.Information = (ULONG_PTR)p_bus_info; + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_ipoib_ifc( + IN DEVICE_OBJECT* const p_dev_obj, + IN IO_STACK_LOCATION* const p_io_stack ) +{ + NTSTATUS status; + ib_al_ifc_t *p_ifc; + ib_al_ifc_data_t *p_ifc_data; + ipoib_ifc_data_t *p_ipoib_data; + bus_port_ext_t *p_ext; + const GUID *p_guid; + + + BUS_ENTER( BUS_DBG_PNP ); + + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + + p_ext = p_dev_obj->DeviceExtension; + + BUS_TRACE( BUS_DBG_PNP, ("Query i/f for %s: PDO %p (=%p),ext %p, present %d, missing %d .\n", + p_ext->pdo.cl_ext.vfptr_pnp_po->identity, p_ext->pdo.cl_ext.p_self_do, + p_dev_obj, p_ext, p_ext->pdo.b_present, p_ext->pdo.b_reported_missing ) ); + + /* Get the interface. */ + status = cl_fwd_query_ifc( + p_ext->pdo.p_parent_ext->cl_ext.p_self_do, p_io_stack ); + if( !NT_SUCCESS( status ) ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Failed to forward interface query: %08X\n", status) ); + return status; + } + + if( !p_io_stack->Parameters.QueryInterface.InterfaceSpecificData ) + { + BUS_TRACE_EXIT( BUS_DBG_ERROR, ("No interface specific data!\n") ); + return status; + } + + p_ifc = (ib_al_ifc_t*)p_io_stack->Parameters.QueryInterface.Interface; + + p_ifc_data = (ib_al_ifc_data_t*) + p_io_stack->Parameters.QueryInterface.InterfaceSpecificData; + p_guid = p_ifc_data->type; + if( !IsEqualGUID( p_guid, &GUID_IPOIB_INTERFACE_DATA ) ) + { + BUS_TRACE_EXIT( BUS_DBG_PNP, ("Unsupported interface data: \n\t" + "0x%08x, 0x%04x, 0x%04x, 0x%02x, 0x%02x, 0x%02x," + "0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x.\n", + p_guid->Data1, p_guid->Data2, p_guid->Data3, + p_guid->Data4[0], p_guid->Data4[1], p_guid->Data4[2], + p_guid->Data4[3], p_guid->Data4[4], p_guid->Data4[5], + p_guid->Data4[6], p_guid->Data4[7]) ); + return status; + } + + if( p_ifc_data->version != IPOIB_INTERFACE_DATA_VERSION ) + { + p_ifc->wdm.InterfaceDereference( p_ifc->wdm.Context ); + BUS_TRACE_EXIT( BUS_DBG_ERROR, + ("Unsupported version %d, expected %d\n", + p_ifc_data->version, IPOIB_INTERFACE_DATA_VERSION) ); + return STATUS_NOT_SUPPORTED; + } + + ASSERT( p_ifc_data->p_data ); + + if( p_ifc_data->size != sizeof(ipoib_ifc_data_t) ) + { + p_ifc->wdm.InterfaceDereference( p_ifc->wdm.Context ); + BUS_TRACE_EXIT( BUS_DBG_PNP, + ("Buffer too small (%d given, %d required).\n", + p_ifc_data->size, + sizeof(ipoib_ifc_data_t)) ); + return STATUS_BUFFER_TOO_SMALL; + } + + /* Set the interface data. */ + p_ipoib_data = (ipoib_ifc_data_t*)p_ifc_data->p_data; + + p_ipoib_data->ca_guid = p_ext->pdo.h_ca->obj.p_ci_ca->verbs.guid; + p_ipoib_data->port_guid = p_ext->port_guid; + p_ipoib_data->port_num = (uint8_t)p_ext->n_port; + + BUS_EXIT( BUS_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +port_query_interface( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + bus_pdo_ext_t *p_ext; + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + + BUS_ENTER( BUS_DBG_PNP ); + +#pragma warning( push, 3 ) + PAGED_CODE(); +#pragma warning( pop ) + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + /* Bottom of the stack - IRP must be completed. */ + *p_action = IrpComplete; + + /* Compare requested GUID with our supported interface GUIDs. */ + if( IsEqualGUID( p_io_stack->Parameters.QueryInterface.InterfaceType, + &GUID_IB_AL_INTERFACE ) ) + { + status = port_query_ipoib_ifc( p_dev_obj, p_io_stack ); + } + else if( IsEqualGUID( p_io_stack->Parameters.QueryInterface.InterfaceType, + &GUID_BUS_INTERFACE_STANDARD ) ) + { + p_ext = p_dev_obj->DeviceExtension; + if( !p_ext->h_ca || + !p_ext->b_present || + p_ext->b_reported_missing ) + { + return STATUS_NO_SUCH_DEVICE; + } + + status = cl_fwd_query_ifc( + p_ext->h_ca->obj.p_ci_ca->verbs.p_hca_dev, p_io_stack ); + } + else + { + status = p_irp->IoStatus.Status; + } + + BUS_EXIT( BUS_DBG_PNP ); + return status; +} + + +/* Work item callback to handle DevicePowerD3 IRPs at passive level. */ +static void +__HibernateUpWorkItem( + IN DEVICE_OBJECT* p_dev_obj, + IN void* context ) +{ + IO_STACK_LOCATION *p_io_stack; + bus_pdo_ext_t *p_ext; + IRP *p_irp; + POWER_STATE powerState; + + BUS_ENTER( BUS_DBG_POWER ); + + p_ext = (bus_pdo_ext_t*)p_dev_obj->DeviceExtension; + p_irp = (IRP*)context; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + IoFreeWorkItem( p_ext->p_po_work_item ); + p_ext->p_po_work_item = NULL; + + while (!p_ext->h_ca) { + BUS_TRACE( BUS_DBG_PNP, ("Waiting for the end of HCA registration ... \n")); + cl_thread_suspend( 200 ); /* suspend for 200 ms */ + } + + p_ext->dev_po_state = p_io_stack->Parameters.Power.State; + powerState = PoSetPowerState( p_dev_obj, DevicePowerState, p_ext->dev_po_state ); + + BUS_TRACE( BUS_DBG_POWER, + ("PoSetPowerState: old state %d, new state to %d\n", + powerState.DeviceState, p_ext->dev_po_state )); + + p_irp->IoStatus.Status = STATUS_SUCCESS; + PoStartNextPowerIrp( p_irp ); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + + BUS_EXIT( BUS_DBG_POWER ); +} + + +/* + * The PDOs created by the IB Bus driver are software devices. As such, + * all power states are supported. It is left to the HCA power policy + * owner to handle which states can be supported by the HCA. + */ +static NTSTATUS +port_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status = STATUS_SUCCESS; + IO_STACK_LOCATION *p_io_stack; + bus_pdo_ext_t *p_ext; + + BUS_ENTER( BUS_DBG_POWER ); + + p_ext = p_dev_obj->DeviceExtension; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + BUS_TRACE( BUS_DBG_POWER, + ("SET_POWER for PDO %p (ext %p): type %s, state %d, action %d \n", + p_dev_obj, p_ext, + (p_io_stack->Parameters.Power.Type) ? "DevicePowerState" : "SystemPowerState", + p_io_stack->Parameters.Power.State.DeviceState, + p_io_stack->Parameters.Power.ShutdownType )); + + if ((p_io_stack->Parameters.Power.Type == SystemPowerState) && + (p_io_stack->Parameters.Power.State.SystemState ==PowerSystemHibernate || + p_io_stack->Parameters.Power.State.SystemState ==PowerSystemSleeping1 )) + { + BUS_TRACE( BUS_DBG_POWER, ("Setting b_hibernating flag for PDO %p \n", p_dev_obj)); + p_ext->b_hibernating = TRUE; + } + + if( p_io_stack->Parameters.Power.Type == DevicePowerState ) + { + /* after hibernation PDO is not ready for work. we need to wait for finishing of the HCA registration */ + if( p_io_stack->Parameters.Power.State.DeviceState == PowerDeviceD0 && p_ext->b_hibernating) + { + /* Process in a work item - deregister_ca and HcaDeinit block. */ + ASSERT( !p_ext->p_po_work_item ); + p_ext->p_po_work_item = IoAllocateWorkItem( p_dev_obj ); + if( !p_ext->p_po_work_item ) + status = STATUS_INSUFFICIENT_RESOURCES; + else { + /* Process in work item callback. */ + IoMarkIrpPending( p_irp ); + IoQueueWorkItem( + p_ext->p_po_work_item, __HibernateUpWorkItem, DelayedWorkQueue, p_irp ); + *p_action = IrpDoNothing; + BUS_EXIT( BUS_DBG_POWER ); + return STATUS_PENDING; + } + } + + /* Notify the power manager. */ + p_ext->dev_po_state = p_io_stack->Parameters.Power.State; + PoSetPowerState( p_dev_obj, DevicePowerState, p_ext->dev_po_state ); + } + + *p_action = IrpComplete; + BUS_EXIT( BUS_DBG_POWER ); + return status; +} diff --git a/branches/Ndi/core/bus/kernel/bus_port_mgr.h b/branches/Ndi/core/bus/kernel/bus_port_mgr.h new file mode 100644 index 00000000..1529f2b0 --- /dev/null +++ b/branches/Ndi/core/bus/kernel/bus_port_mgr.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined( __BUS_PORT_MGR_H__ ) +#define __BUS_PORT_MGR_H__ + +#include +#include +#include + + +/* Global load service */ +typedef struct _port_mgr +{ + cl_obj_t obj; + ib_pnp_handle_t h_pnp; /* Handle for port PnP events */ + + /* Mutex protects both pointer vectors. */ + cl_mutex_t pdo_mutex; + + /* Pointer vector of child IPoIB port PDOs. */ + cl_qlist_t port_list; + +} port_mgr_t; + + +ib_api_status_t +create_port_mgr( + OUT port_mgr_t** const pp_port_mgr ); + + +NTSTATUS +port_mgr_get_bus_relations( + IN const net64_t ca_guid, + IN IRP* const p_irp ); + +#endif diff --git a/branches/Ndi/core/bus/kernel/ib_bus.inf b/branches/Ndi/core/bus/kernel/ib_bus.inf new file mode 100644 index 00000000..c3ae6c32 --- /dev/null +++ b/branches/Ndi/core/bus/kernel/ib_bus.inf @@ -0,0 +1,207 @@ +; OpenIB InfiniBand Bus Driver. +; Copyright 2005 SilverStorm Technologies all Rights Reserved. +; Copyright 2006 Mellanox Technologies all Rights Reserved. + +[Version] +Signature="$Windows NT$" +Class=System +ClassGuid={4D36E97D-E325-11CE-BFC1-08002BE10318} +Provider=%OPENIB% +DriverVer=03/08/2006,1.0.0000.614 + + +; ================= Device Install section ===================== + +; 64-bit platforms also copy 32-bit user-mode binaries. +[DestinationDirs] +DefaultDestDir=%DIRID_DRIVERS% +Ibbus.UMCopyFiles=%DIRID_SYSTEM% +Ibbus.WOW64CopyFiles=%DIRID_SYSTEM_X86% + +[SourceDisksNames.x86] +1=%DiskId%,,,"" + +[SourceDisksNames.amd64] +1=%DiskId%,,,"" + +[SourceDisksNames.ia64] +1=%DiskId%,,,"" + +[SourceDisksFiles.x86] +ibbus.sys=1 +ibiou.sys=1 +ibal.dll=1 +complib.dll=1 +ibald.dll=1 +complibd.dll=1 + +[SourceDisksFiles.amd64] +ibbus.sys=1 +ibiou.sys=1 +ibal.dll=1 +complib.dll=1 +ibald.dll=1 +complibd.dll=1 +cl32d.dll=1 +cl32.dll=1 +ibal32d.dll=1 +ibal32.dll=1 + +[SourceDisksFiles.ia64] +ibbus.sys=1 +ibiou.sys=1 +ibal.dll=1 +complib.dll=1 +ibald.dll=1 +complibd.dll=1 +cl32d.dll=1 +cl32.dll=1 +ibal32d.dll=1 +ibal32.dll=1 + +[Manufacturer] +%OPENIB% = Ibbus.DeviceSection,ntx86,ntamd64,ntia64 +%SST% = SST.DeviceSection,ntx86,ntamd64,ntia64 + +[Ibbus.DeviceSection] +; empty since we don't support W9x/Me + +[Ibbus.DeviceSection.ntx86] +%Ibbus.DeviceDesc% = Ibbus.DDInstall,{94f41ced-78eb-407c-b5df-958040af0fd8} +%Iou.DeviceDesc% = Iou.DDInstall,IBA\IB_IOU + +[Ibbus.DeviceSection.ntamd64] +%Ibbus.DeviceDesc% = Ibbus.DDInstall,{94f41ced-78eb-407c-b5df-958040af0fd8} +%Iou.DeviceDesc% = Iou.DDInstall,IBA\IB_IOU + +[Ibbus.DeviceSection.ntia64] +%Ibbus.DeviceDesc% = Ibbus.DDInstall,{94f41ced-78eb-407c-b5df-958040af0fd8} +%Iou.DeviceDesc% = Iou.DDInstall,IBA\IB_IOU + +[SST.DeviceSection] +; empty since we don't support W9x/Me + +[SST.DeviceSection.ntx86] +%VFx.DeviceDesc% = Iou.DDInstall,IBA\V00066aP0060,IBA\V00066aP0010 +%VEx.DeviceDesc% = Iou.DDInstall,IBA\V00066aP0058 +%FVIC.DeviceDesc% = Iou.DDInstall,IBA\V00066aP00dd +%EVIC.DeviceDesc% = Iou.DDInstall,IBA\V00066aP00de + +[SST.DeviceSection.ntamd64] +%VFx.DeviceDesc% = Iou.DDInstall,IBA\V00066aP0060,IBA\V00066aP0010 +%VEx.DeviceDesc% = Iou.DDInstall,IBA\V00066aP0058 +%FVIC.DeviceDesc% = Iou.DDInstall,IBA\V00066aP00dd +%EVIC.DeviceDesc% = Iou.DDInstall,IBA\V00066aP00de + +[SST.DeviceSection.ntia64] +%VFx.DeviceDesc% = Iou.DDInstall,IBA\V00066aP0060,IBA\V00066aP0010 +%VEx.DeviceDesc% = Iou.DDInstall,IBA\V00066aP0058 +%FVIC.DeviceDesc% = Iou.DDInstall,IBA\V00066aP00dd +%EVIC.DeviceDesc% = Iou.DDInstall,IBA\V00066aP00de + +[Ibbus.DDInstall.ntx86] +CopyFiles = Ibbus.CopyFiles +CopyFiles = Ibbus.UMCopyFiles + +[Ibbus.DDInstall.ntamd64] +CopyFiles = Ibbus.CopyFiles +CopyFiles = Ibbus.UMCopyFiles +CopyFiles = Ibbus.WOW64CopyFiles + +[Ibbus.DDInstall.ntia64] +CopyFiles = Ibbus.CopyFiles +CopyFiles = Ibbus.UMCopyFiles +CopyFiles = Ibbus.WOW64CopyFiles + +[Ibbus.DDInstall.ntx86.Services] +AddService = ibbus,%SPSVCINST_ASSOCSERVICE%,Ibbus.ServiceInstall + +[Ibbus.DDInstall.ntamd64.Services] +AddService = ibbus,%SPSVCINST_ASSOCSERVICE%,Ibbus.ServiceInstall + +[Ibbus.DDInstall.ntia64.Services] +AddService = ibbus,%SPSVCINST_ASSOCSERVICE%,Ibbus.ServiceInstall + +[Iou.DDInstall.nt] +CopyFiles = Iou.CopyFiles + +[Iou.DDInstall.nt.Services] +AddService = ibiou,%SPSVCINST_ASSOCSERVICE%,Iou.ServiceInstall + +[Ibbus.CopyFiles] +ibbus.sys + +[Ibbus.UMCopyFiles] +complib.dll,,,2 +ibal.dll,,,2 +complibd.dll,,,2 +ibald.dll,,,2 + +[Ibbus.WOW64CopyFiles] +complib.dll,cl32.dll,,2 +ibal.dll,ibal32.dll,,2 +complibd.dll,cl32d.dll,,2 +ibald.dll,ibal32d.dll,,2 + +[Iou.CopyFiles] +ibiou.sys + +; +; ============= Service Install section ============== +; + +[Ibbus.ServiceInstall] +DisplayName = %Ibbus.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\ibbus.sys +LoadOrderGroup = extended base +AddReg = Ibbus.ParamsReg +Dependencies = mthca + +[Iou.ServiceInstall] +DisplayName = %Iou.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\ibiou.sys +AddReg = Iou.ParamsReg + +[Ibbus.ParamsReg] +HKR,"Parameters","DebugFlags",%REG_DWORD%,0x80000000 +HKR,"Parameters","ReportPortNIC",%REG_DWORD%,1 +HKR,"Parameters","IbalDebugLevel",%REG_DWORD%,2 +HKR,"Parameters","IbalDebugFlags",%REG_DWORD%,0x00ffffff +HKR,"Parameters","SmiPollInterval",%REG_DWORD_NO_CLOBBER%,20000 +HKR,"Parameters","IocQueryTimeout",%REG_DWORD_NO_CLOBBER%,250 +HKR,"Parameters","IocQueryRetries",%REG_DWORD_NO_CLOBBER%,4 +HKR,"Parameters","IocPollInterval",%REG_DWORD_NO_CLOBBER%,30000 + +[Iou.ParamsReg] +HKR,"Parameters","DebugLevel",%REG_DWORD%,2 +HKR,"Parameters","DebugFlags",%REG_DWORD%,0x00ffffff + +[Strings] +OPENIB = "OpenIB Alliance" +SST = "SilverStorm Technologies" +Ibbus.DeviceDesc = "InfiniBand Fabric" +VFx.DeviceDesc = "SilverStorm VFx" +VEx.DeviceDesc = "SilverStorm VEx" +FVIC.DeviceDesc = "SilverStorm FVIC" +EVIC.DeviceDesc = "SilverStorm EVIC" +Iou.DeviceDesc = "InfiniBand I/O Unit" +Ibbus.ServiceDesc = "OpenIB InfiniBand Bus Driver" +Ibal.ServiceDesc = "OpenIB InfiniBand Access Layer" +Iou.ServiceDesc = "OpenIB InfiniBand I/O Unit Driver" +DiskId = "OpenIB InfiniBand Access Layer installation disk" +SPSVCINST_NULL = 0x0 +SPSVCINST_ASSOCSERVICE = 0x00000002 +SERVICE_KERNEL_DRIVER = 1 +SERVICE_DEMAND_START = 3 +SERVICE_ERROR_NORMAL = 1 +REG_DWORD = 0x00010001 +REG_DWORD_NO_CLOBBER = 0x00010003 +DIRID_SYSTEM = 11 +DIRID_DRIVERS = 12 +DIRID_SYSTEM_X86 = 16425 diff --git a/branches/Ndi/core/bus/kernel/ibbus.rc b/branches/Ndi/core/bus/kernel/ibbus.rc new file mode 100644 index 00000000..a6b3ffd0 --- /dev/null +++ b/branches/Ndi/core/bus/kernel/ibbus.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "InfiniBand Fabric Bus Driver (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "InfiniBand Fabric Bus Driver" +#endif + +#define VER_INTERNALNAME_STR "ibbus.sys" +#define VER_ORIGINALFILENAME_STR "ibbus.sys" + +#include diff --git a/branches/Ndi/core/bus/kernel/makefile b/branches/Ndi/core/bus/kernel/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/core/bus/kernel/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/core/complib/cl_async_proc.c b/branches/Ndi/core/complib/cl_async_proc.c new file mode 100644 index 00000000..c8dbbe12 --- /dev/null +++ b/branches/Ndi/core/complib/cl_async_proc.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include + +#define CL_ASYNC_PROC_MIN 16 +#define CL_ASYNC_PROC_GROWSIZE 16 + + +/* Worker function declaration. */ +static void +__cl_async_proc_worker( + IN void* const context ); + + +void +cl_async_proc_construct( + IN cl_async_proc_t* const p_async_proc ) +{ + CL_ASSERT( p_async_proc ); + + cl_qlist_init( &p_async_proc->item_queue ); + cl_spinlock_construct( &p_async_proc->lock ); + cl_thread_pool_construct( &p_async_proc->thread_pool ); + p_async_proc->state = CL_UNINITIALIZED; +} + + +cl_status_t +cl_async_proc_init( + IN cl_async_proc_t* const p_async_proc, + IN const uint32_t thread_count, + IN const char* const name ) +{ + cl_status_t status; + + CL_ASSERT( p_async_proc ); + + cl_async_proc_construct( p_async_proc ); + + status = cl_spinlock_init( &p_async_proc->lock ); + if( status != CL_SUCCESS ) + { + cl_async_proc_destroy( p_async_proc ); + return( status ); + } + + status = cl_thread_pool_init( &p_async_proc->thread_pool, thread_count, + __cl_async_proc_worker, p_async_proc, name ); + if( status != CL_SUCCESS ) + { + cl_async_proc_destroy( p_async_proc ); + return (status); + } + + p_async_proc->state = CL_INITIALIZED; + return( status ); +} + + +void +cl_async_proc_destroy( + IN cl_async_proc_t* const p_async_proc ) +{ + /* Destroy the thread pool first so that the threads stop. */ + cl_thread_pool_destroy( &p_async_proc->thread_pool ); + + /* Flush all queued callbacks. */ + if( p_async_proc->state == CL_INITIALIZED ) + __cl_async_proc_worker( p_async_proc ); + + /* Destroy the spinlock. */ + cl_spinlock_destroy( &p_async_proc->lock ); + + p_async_proc->state = CL_DESTROYED; +} + + +void +cl_async_proc_queue( + IN cl_async_proc_t* const p_async_proc, + IN cl_async_proc_item_t* const p_item ) +{ + CL_ASSERT( p_async_proc ); + CL_ASSERT( p_item->pfn_callback ); + CL_ASSERT( p_async_proc->state == CL_INITIALIZED ); + + /* Enqueue this item for processing. */ + cl_spinlock_acquire( &p_async_proc->lock ); + cl_qlist_insert_tail( &p_async_proc->item_queue, + &p_item->pool_item.list_item ); + cl_spinlock_release( &p_async_proc->lock ); + + /* Signal the thread pool to wake up. */ + cl_thread_pool_signal( &p_async_proc->thread_pool ); +} + + +static void +__cl_async_proc_worker( + IN void* const context) +{ + cl_async_proc_t *p_async_proc = (cl_async_proc_t*)context; + cl_list_item_t *p_list_item; + cl_async_proc_item_t *p_item; + + CL_ASSERT( p_async_proc->state == CL_INITIALIZED ); + + /* Process items from the head of the queue until it is empty. */ + cl_spinlock_acquire( &p_async_proc->lock ); + p_list_item = cl_qlist_remove_head( &p_async_proc->item_queue ); + while( p_list_item != cl_qlist_end( &p_async_proc->item_queue ) ) + { + /* Release the lock during the user's callback. */ + cl_spinlock_release( &p_async_proc->lock ); + + /* Invoke the user callback. */ + p_item = (cl_async_proc_item_t*)p_list_item; + p_item->pfn_callback( p_item ); + + /* Acquire the lock again to continue processing. */ + cl_spinlock_acquire( &p_async_proc->lock ); + /* Get the next item in the queue. */ + p_list_item = cl_qlist_remove_head( &p_async_proc->item_queue ); + } + + /* The queue is empty. Release the lock and return. */ + cl_spinlock_release( &p_async_proc->lock ); +} diff --git a/branches/Ndi/core/complib/cl_list.c b/branches/Ndi/core/complib/cl_list.c new file mode 100644 index 00000000..4866b125 --- /dev/null +++ b/branches/Ndi/core/complib/cl_list.c @@ -0,0 +1,650 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of quick list, and list. + * + * Environment: + * All + */ + + +#include +#include + + +#define FREE_ITEM_GROW_SIZE 10 + + +/****************************************************************************** +******************************************************************************* +************** ************ +************** IMPLEMENTATION OF QUICK LIST ************ +************** ************ +******************************************************************************* +******************************************************************************/ + +void +cl_qlist_insert_array_head( + IN cl_qlist_t* const p_list, + IN cl_list_item_t* const p_array, + IN size_t item_count, + IN const size_t item_size ) +{ + cl_list_item_t *p_item; + + CL_ASSERT( p_list ); + CL_ASSERT( p_list->state == CL_INITIALIZED ); + CL_ASSERT( p_array ); + CL_ASSERT( item_size >= sizeof(cl_list_item_t) ); + CL_ASSERT( item_count ); + + /* + * To add items from the array to the list in the same order as + * the elements appear in the array, we add them starting with + * the last one first. Locate the last item. + */ + p_item = (cl_list_item_t*)( + (uint8_t*)p_array + (item_size * (item_count - 1))); + + /* Continue to add all items to the list. */ + while( item_count-- ) + { + cl_qlist_insert_head( p_list, p_item ); + + /* Get the next object to add to the list. */ + p_item = (cl_list_item_t*)((uint8_t*)p_item - item_size); + } +} + + +void +cl_qlist_insert_array_tail( + IN cl_qlist_t* const p_list, + IN cl_list_item_t* const p_array, + IN size_t item_count, + IN const size_t item_size ) +{ + cl_list_item_t *p_item; + + CL_ASSERT( p_list ); + CL_ASSERT( p_list->state == CL_INITIALIZED ); + CL_ASSERT( p_array ); + CL_ASSERT( item_size >= sizeof(cl_list_item_t) ); + CL_ASSERT( item_count ); + + /* Set the first item to add to the list. */ + p_item = p_array; + + /* Continue to add all items to the list. */ + while( item_count-- ) + { + cl_qlist_insert_tail( p_list, p_item ); + + /* Get the next object to add to the list. */ + p_item = (cl_list_item_t*)((uint8_t*)p_item + item_size); + } +} + + +void +cl_qlist_insert_list_head( + IN cl_qlist_t* const p_dest_list, + IN cl_qlist_t* const p_src_list ) +{ +#if defined( _DEBUG_ ) + cl_list_item_t *p_item; +#endif + + CL_ASSERT( p_dest_list ); + CL_ASSERT( p_src_list ); + CL_ASSERT( p_dest_list->state == CL_INITIALIZED ); + CL_ASSERT( p_src_list->state == CL_INITIALIZED ); + + /* + * Is the src list empty? + * We must have this check here for code below to work. + */ + if( cl_is_qlist_empty( p_src_list ) ) + return; + +#if defined( _DEBUG_ ) + /* Check that all items in the source list belong there. */ + p_item = cl_qlist_head( p_src_list ); + while( p_item != cl_qlist_end( p_src_list ) ) + { + /* All list items in the source list must point to it. */ + CL_ASSERT( p_item->p_list == p_src_list ); + /* Point them all to the destination list. */ + p_item->p_list = p_dest_list; + p_item = cl_qlist_next( p_item ); + } +#endif + + /* Chain the destination list to the tail of the source list. */ + cl_qlist_tail( p_src_list )->p_next = cl_qlist_head( p_dest_list ); + cl_qlist_head( p_dest_list )->p_prev = cl_qlist_tail( p_src_list ); + + /* + * Update the head of the destination list to the head of + * the source list. + */ + p_dest_list->end.p_next = cl_qlist_head( p_src_list ); + cl_qlist_head( p_src_list )->p_prev = &p_dest_list->end; + + /* + * Update the count of the destination to reflect the source items having + * been added. + */ + p_dest_list->count += p_src_list->count; + + /* Update source list to reflect being empty. */ + __cl_qlist_reset( p_src_list ); +} + + +void +cl_qlist_insert_list_tail( + IN cl_qlist_t* const p_dest_list, + IN cl_qlist_t* const p_src_list ) +{ +#if defined( _DEBUG_ ) + cl_list_item_t *p_item; +#endif + + CL_ASSERT( p_dest_list ); + CL_ASSERT( p_src_list ); + CL_ASSERT( p_dest_list->state == CL_INITIALIZED ); + CL_ASSERT( p_src_list->state == CL_INITIALIZED ); + + /* + * Is the src list empty? + * We must have this check here for code below to work. + */ + if( cl_is_qlist_empty( p_src_list ) ) + return; + +#if defined( _DEBUG_ ) + /* Check that all items in the source list belong there. */ + p_item = cl_qlist_head( p_src_list ); + while( p_item != cl_qlist_end( p_src_list ) ) + { + /* All list items in the source list must point to it. */ + CL_ASSERT( p_item->p_list == p_src_list ); + /* Point them all to the destination list. */ + p_item->p_list = p_dest_list; + p_item = cl_qlist_next( p_item ); + } +#endif + + /* Chain the source list to the tail of the destination list. */ + cl_qlist_tail( p_dest_list )->p_next = cl_qlist_head( p_src_list ); + cl_qlist_head( p_src_list )->p_prev = cl_qlist_tail( p_dest_list ); + + /* + * Update the tail of the destination list to the tail of + * the source list. + */ + p_dest_list->end.p_prev = cl_qlist_tail( p_src_list ); + cl_qlist_tail( p_src_list )->p_next = &p_dest_list->end; + + /* + * Update the count of the destination to reflect the source items having + * been added. + */ + p_dest_list->count += p_src_list->count; + + /* Update source list to reflect being empty. */ + __cl_qlist_reset( p_src_list ); +} + + +boolean_t +cl_is_item_in_qlist( + IN const cl_qlist_t* const p_list, + IN const cl_list_item_t* const p_list_item ) +{ + const cl_list_item_t* p_temp; + + CL_ASSERT( p_list ); + CL_ASSERT( p_list_item ); + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + /* Traverse looking for a match */ + p_temp = cl_qlist_head( p_list ); + while( p_temp != cl_qlist_end( p_list ) ) + { + if( p_temp == p_list_item ) + { + CL_ASSERT( p_list_item->p_list == p_list ); + return( TRUE ); + } + + p_temp = cl_qlist_next( p_temp ); + } + + return( FALSE ); +} + + +cl_list_item_t* +cl_qlist_find_next( + IN const cl_qlist_t* const p_list, + IN const cl_list_item_t* const p_list_item, + IN cl_pfn_qlist_find_t pfn_func, + IN const void* const context ) +{ + cl_list_item_t *p_found_item; + + CL_ASSERT( p_list ); + CL_ASSERT( p_list->state == CL_INITIALIZED ); + CL_ASSERT( p_list_item ); + CL_ASSERT( p_list_item->p_list == p_list ); + CL_ASSERT( pfn_func ); + + p_found_item = cl_qlist_next( p_list_item ); + + /* The user provided a compare function */ + while( p_found_item != cl_qlist_end( p_list ) ) + { + CL_ASSERT( p_found_item->p_list == p_list ); + + if( pfn_func( p_found_item, (void*)context ) == CL_SUCCESS ) + break; + + p_found_item = cl_qlist_next( p_found_item ); + } + + /* No match */ + return( p_found_item ); +} + + +cl_list_item_t* +cl_qlist_find_prev( + IN const cl_qlist_t* const p_list, + IN const cl_list_item_t* const p_list_item, + IN cl_pfn_qlist_find_t pfn_func, + IN const void* const context ) +{ + cl_list_item_t *p_found_item; + + CL_ASSERT( p_list ); + CL_ASSERT( p_list->state == CL_INITIALIZED ); + CL_ASSERT( p_list_item ); + CL_ASSERT( p_list_item->p_list == p_list ); + CL_ASSERT( pfn_func ); + + p_found_item = cl_qlist_prev( p_list_item ); + + /* The user provided a compare function */ + while( p_found_item != cl_qlist_end( p_list ) ) + { + CL_ASSERT( p_found_item->p_list == p_list ); + + if( pfn_func( p_found_item, (void*)context ) == CL_SUCCESS ) + break; + + p_found_item = cl_qlist_prev( p_found_item ); + } + + /* No match */ + return( p_found_item ); +} + + +void +cl_qlist_apply_func( + IN const cl_qlist_t* const p_list, + IN cl_pfn_qlist_apply_t pfn_func, + IN const void* const context ) +{ + cl_list_item_t* p_list_item; + + /* Note that context can have any arbitrary value. */ + CL_ASSERT( p_list ); + CL_ASSERT( p_list->state == CL_INITIALIZED ); + CL_ASSERT( pfn_func ); + + p_list_item = cl_qlist_head( p_list ); + while( p_list_item != cl_qlist_end( p_list ) ) + { + pfn_func( p_list_item, (void*)context ); + p_list_item = cl_qlist_next( p_list_item ); + } +} + + +void +cl_qlist_move_items( + IN cl_qlist_t* const p_src_list, + IN cl_qlist_t* const p_dest_list, + IN cl_pfn_qlist_find_t pfn_func, + IN const void* const context ) +{ + cl_list_item_t *p_current_item, *p_next; + + CL_ASSERT( p_src_list ); + CL_ASSERT( p_dest_list ); + CL_ASSERT( p_src_list->state == CL_INITIALIZED ); + CL_ASSERT( p_dest_list->state == CL_INITIALIZED ); + CL_ASSERT( pfn_func ); + + p_current_item = cl_qlist_head( p_src_list ); + + while( p_current_item != cl_qlist_end( p_src_list ) ) + { + /* Before we do anything, get a pointer to the next item. */ + p_next = cl_qlist_next( p_current_item ); + + if( pfn_func( p_current_item, (void*)context ) == CL_SUCCESS ) + { + /* Move the item from one list to the other. */ + cl_qlist_remove_item( p_src_list, p_current_item ); + cl_qlist_insert_tail( p_dest_list, p_current_item ); + } + p_current_item = p_next; + } +} + + +/****************************************************************************** +******************************************************************************* +************** ************ +************** IMPLEMENTATION OF LIST ************ +************** ************ +******************************************************************************* +******************************************************************************/ + + +void +cl_list_construct( + IN cl_list_t* const p_list ) +{ + CL_ASSERT( p_list ); + + cl_qpool_construct( &p_list->list_item_pool ); +} + + +cl_status_t +cl_list_init( + IN cl_list_t* const p_list, + IN const size_t min_items ) +{ + size_t grow_size; + + CL_ASSERT( p_list ); + cl_qlist_init( &p_list->list ); + + /* + * We will grow by min_items/8 items at a time, with a minimum of + * FREE_ITEM_GROW_SIZE. + */ + grow_size = min_items >> 3; + if( grow_size < FREE_ITEM_GROW_SIZE ) + grow_size = FREE_ITEM_GROW_SIZE; + + /* Initialize the pool of list items. */ + return( cl_qpool_init( &p_list->list_item_pool, min_items, 0, grow_size, + sizeof(cl_pool_obj_t), NULL, NULL, NULL ) ); +} + + +void +cl_list_destroy( + IN cl_list_t* const p_list ) +{ + CL_ASSERT( p_list ); + + cl_qpool_destroy( &p_list->list_item_pool ); +} + + +static cl_status_t +cl_list_find_cb( + IN const cl_list_item_t* const p_list_item, + IN void* const context ) +{ + CL_ASSERT( p_list_item ); + + if( cl_list_obj( p_list_item ) == context ) + return( CL_SUCCESS ); + + return( CL_NOT_FOUND ); +} + + +cl_status_t +cl_list_remove_object( + IN cl_list_t* const p_list, + IN const void* const p_object ) +{ + cl_list_item_t *p_list_item; + + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + /* find the item in question */ + p_list_item = + cl_qlist_find_from_head( &p_list->list, cl_list_find_cb, p_object ); + if( p_list_item != cl_qlist_end( &p_list->list ) ) + { + /* remove this item */ + cl_qlist_remove_item( &p_list->list, p_list_item ); + cl_qpool_put( &p_list->list_item_pool, (cl_pool_item_t*)p_list_item ); + return( CL_SUCCESS ); + } + return( CL_NOT_FOUND ); +} + + +boolean_t +cl_is_object_in_list( + IN const cl_list_t* const p_list, + IN const void* const p_object ) +{ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + return( cl_qlist_find_from_head( &p_list->list, cl_list_find_cb, p_object ) + != cl_qlist_end( &p_list->list ) ); +} + + +cl_status_t +cl_list_insert_array_head( + IN cl_list_t* const p_list, + IN const void* const p_array, + IN uint32_t item_count, + IN const uint32_t item_size ) +{ + cl_status_t status; + void *p_object; + + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + CL_ASSERT( p_array ); + CL_ASSERT( item_size ); + CL_ASSERT( item_count ); + + /* + * To add items from the array to the list in the same order as + * the elements appear in the array, we add them starting with + * the last one first. Locate the last item. + */ + p_object = ((uint8_t*)p_array + (item_size * (item_count - 1))); + + /* Continue to add all items to the list. */ + while( item_count-- ) + { + status = cl_list_insert_head( p_list, p_object ); + if( status != CL_SUCCESS ) + { + /* Remove all items that have been inserted. */ + while( item_count++ < item_count ) + cl_list_remove_head( p_list ); + return( status ); + } + + /* Get the next object to add to the list. */ + p_object = ((uint8_t*)p_object - item_size); + } + + return( CL_SUCCESS ); +} + + +cl_status_t +cl_list_insert_array_tail( + IN cl_list_t* const p_list, + IN const void* const p_array, + IN uint32_t item_count, + IN const uint32_t item_size ) +{ + cl_status_t status; + void *p_object; + + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + CL_ASSERT( p_array ); + CL_ASSERT( item_size ); + CL_ASSERT( item_count ); + + /* Set the first item to add to the list. */ + p_object = (void*)p_array; + + /* Continue to add all items to the list. */ + while( item_count-- ) + { + status = cl_list_insert_tail( p_list, p_object ); + if( status != CL_SUCCESS ) + { + /* Remove all items that have been inserted. */ + while( item_count++ < item_count ) + cl_list_remove_tail( p_list ); + return( status ); + } + + /* Get the next object to add to the list. */ + p_object = ((uint8_t*)p_object + item_size); + } + + return( CL_SUCCESS ); +} + + +const cl_list_iterator_t +cl_list_find_from_head( + IN const cl_list_t* const p_list, + IN cl_pfn_list_find_t pfn_func, + IN const void* const context ) +{ + cl_status_t status; + cl_list_iterator_t itor; + + /* Note that context can have any arbitrary value. */ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + CL_ASSERT( pfn_func ); + + itor = cl_list_head( p_list ); + + while( itor != cl_list_end( p_list ) ) + { + status = pfn_func( cl_list_obj( itor ), (void*)context ); + if( status == CL_SUCCESS ) + break; + + itor = cl_list_next( itor ); + } + + /* no match */ + return( itor ); +} + + +const cl_list_iterator_t +cl_list_find_from_tail( + IN const cl_list_t* const p_list, + IN cl_pfn_list_find_t pfn_func, + IN const void* const context ) +{ + cl_status_t status; + cl_list_iterator_t itor; + + /* Note that context can have any arbitrary value. */ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + CL_ASSERT( pfn_func ); + + itor = cl_list_tail( p_list ); + + while( itor != cl_list_end( p_list ) ) + { + status = pfn_func( cl_list_obj( itor ), (void*)context ); + if( status == CL_SUCCESS ) + break; + + itor = cl_list_prev( itor ); + } + + /* no match */ + return( itor ); +} + + +void +cl_list_apply_func( + IN const cl_list_t* const p_list, + IN cl_pfn_list_apply_t pfn_func, + IN const void* const context ) +{ + cl_list_iterator_t itor; + + /* Note that context can have any arbitrary value. */ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + CL_ASSERT( pfn_func ); + + itor = cl_list_head( p_list ); + + while( itor != cl_list_end( p_list ) ) + { + pfn_func( cl_list_obj( itor ), (void*)context ); + + itor = cl_list_next( itor ); + } +} diff --git a/branches/Ndi/core/complib/cl_map.c b/branches/Ndi/core/complib/cl_map.c new file mode 100644 index 00000000..9af2471c --- /dev/null +++ b/branches/Ndi/core/complib/cl_map.c @@ -0,0 +1,2218 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of quick map, a binary tree where the caller always provides + * all necessary storage. + * + * Environment: + * All + */ + + +/***************************************************************************** +* +* Map +* +* Map is an associative array. By providing a key, the caller can retrieve +* an object from the map. All objects in the map have an associated key, +* as specified by the caller when the object was inserted into the map. +* In addition to random access, the caller can traverse the map much like +* a linked list, either forwards from the first object or backwards from +* the last object. The objects in the map are always traversed in +* order since the nodes are stored sorted. +* +* This implementation of Map uses a red black tree verified against +* Cormen-Leiserson-Rivest text, McGraw-Hill Edition, fourteenth +* printing, 1994. +* +*****************************************************************************/ + + +#include +#include +#include +#include +#include + + +/****************************************************************************** +******************************************************************************* +************** ************ +************** IMPLEMENTATION OF RB MAP ************ +************** ************ +******************************************************************************* +******************************************************************************/ + + +/* + * Returns whether a given item is on the left of its parent. + */ +static boolean_t +__cl_rbmap_is_left_child( + IN const cl_rbmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_up ); + CL_ASSERT( p_item->p_up != p_item ); + + return( p_item->p_up->p_left == p_item ); +} + + +/* + * Retrieve the pointer to the parent's pointer to an item. + */ +static cl_rbmap_item_t** +__cl_rbmap_get_parent_ptr_to_item( + IN cl_rbmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_up ); + CL_ASSERT( p_item->p_up != p_item ); + + if( __cl_rbmap_is_left_child( p_item ) ) + return( &p_item->p_up->p_left ); + + CL_ASSERT( p_item->p_up->p_right == p_item ); + return( &p_item->p_up->p_right ); +} + + +/* + * Rotate a node to the left. This rotation affects the least number of links + * between nodes and brings the level of C up by one while increasing the depth + * of A one. Note that the links to/from W, X, Y, and Z are not affected. + * + * R R + * | | + * A C + * / \ / \ + * W C A Z + * / \ / \ + * B Z W B + * / \ / \ + * X Y X Y + */ +static void +__cl_rbmap_rot_left( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_item ) +{ + cl_rbmap_item_t **pp_root; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_right != &p_map->nil ); + + pp_root = __cl_rbmap_get_parent_ptr_to_item( p_item ); + + /* Point R to C instead of A. */ + *pp_root = p_item->p_right; + /* Set C's parent to R. */ + (*pp_root)->p_up = p_item->p_up; + + /* Set A's right to B */ + p_item->p_right = (*pp_root)->p_left; + /* + * Set B's parent to A. We trap for B being NIL since the + * caller may depend on NIL not changing. + */ + if( (*pp_root)->p_left != &p_map->nil ) + (*pp_root)->p_left->p_up = p_item; + + /* Set C's left to A. */ + (*pp_root)->p_left = p_item; + /* Set A's parent to C. */ + p_item->p_up = *pp_root; +} + + +/* + * Rotate a node to the right. This rotation affects the least number of links + * between nodes and brings the level of A up by one while increasing the depth + * of C one. Note that the links to/from W, X, Y, and Z are not affected. + * + * R R + * | | + * C A + * / \ / \ + * A Z W C + * / \ / \ + * W B B Z + * / \ / \ + * X Y X Y + */ +static void +__cl_rbmap_rot_right( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_item ) +{ + cl_rbmap_item_t **pp_root; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_left != &p_map->nil ); + + /* Point R to A instead of C. */ + pp_root = __cl_rbmap_get_parent_ptr_to_item( p_item ); + (*pp_root) = p_item->p_left; + /* Set A's parent to R. */ + (*pp_root)->p_up = p_item->p_up; + + /* Set C's left to B */ + p_item->p_left = (*pp_root)->p_right; + /* + * Set B's parent to C. We trap for B being NIL since the + * caller may depend on NIL not changing. + */ + if( (*pp_root)->p_right != &p_map->nil ) + (*pp_root)->p_right->p_up = p_item; + + /* Set A's right to C. */ + (*pp_root)->p_right = p_item; + /* Set C's parent to A. */ + p_item->p_up = *pp_root; +} + + +/* + * Balance a tree starting at a given item back to the root. + */ +static void +__cl_rbmap_ins_bal( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* p_item ) +{ + cl_rbmap_item_t* p_grand_uncle; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item != &p_map->root ); + + while( p_item->p_up->color == CL_MAP_RED ) + { + if( __cl_rbmap_is_left_child( p_item->p_up ) ) + { + p_grand_uncle = p_item->p_up->p_up->p_right; + CL_ASSERT( p_grand_uncle ); + if( p_grand_uncle->color == CL_MAP_RED ) + { + p_grand_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + p_item = p_item->p_up->p_up; + continue; + } + + if( !__cl_rbmap_is_left_child( p_item ) ) + { + p_item = p_item->p_up; + __cl_rbmap_rot_left( p_map, p_item ); + } + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + __cl_rbmap_rot_right( p_map, p_item->p_up->p_up ); + } + else + { + p_grand_uncle = p_item->p_up->p_up->p_left; + CL_ASSERT( p_grand_uncle ); + if( p_grand_uncle->color == CL_MAP_RED ) + { + p_grand_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + p_item = p_item->p_up->p_up; + continue; + } + + if( __cl_rbmap_is_left_child( p_item ) ) + { + p_item = p_item->p_up; + __cl_rbmap_rot_right( p_map, p_item ); + } + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + __cl_rbmap_rot_left( p_map, p_item->p_up->p_up ); + } + } +} + + +void +cl_rbmap_insert( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_insert_at, + IN cl_rbmap_item_t* const p_item, + IN boolean_t left ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( p_insert_at ); + CL_ASSERT( p_item ); + CL_ASSERT( p_map->root.p_up == &p_map->root ); + CL_ASSERT( p_map->root.color != CL_MAP_RED ); + CL_ASSERT( p_map->nil.color != CL_MAP_RED ); + + p_item->p_left = &p_map->nil; + p_item->p_right = &p_map->nil; + p_item->color = CL_MAP_RED; + + if( p_insert_at == cl_rbmap_end( p_map ) ) + { + p_map->root.p_left = p_item; + p_item->p_up = &p_map->root; + } + else + { + if( left ) + p_insert_at->p_left = p_item; + else + p_insert_at->p_right = p_item; + + p_item->p_up = p_insert_at; + } + + /* Increase the count. */ + p_map->count++; + + /* + * We have added depth to this section of the tree. + * Rebalance as necessary as we retrace our path through the tree + * and update colors. + */ + __cl_rbmap_ins_bal( p_map, p_item ); + + cl_rbmap_root( p_map )->color = CL_MAP_BLACK; + + /* + * Note that it is not necessary to re-color the nil node black because all + * red color assignments are made via the p_up pointer, and nil is never + * set as the value of a p_up pointer. + */ + +#ifdef _DEBUG_ + /* Set the pointer to the map in the map item for consistency checking. */ + p_item->p_map = p_map; +#endif +} + + +static void +__cl_rbmap_del_bal( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* p_item ) +{ + cl_rbmap_item_t *p_uncle; + + while( (p_item->color != CL_MAP_RED) && (p_item->p_up != &p_map->root) ) + { + if( __cl_rbmap_is_left_child( p_item ) ) + { + p_uncle = p_item->p_up->p_right; + + if( p_uncle->color == CL_MAP_RED ) + { + p_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_RED; + __cl_rbmap_rot_left( p_map, p_item->p_up ); + p_uncle = p_item->p_up->p_right; + } + + if( p_uncle->p_right->color != CL_MAP_RED ) + { + if( p_uncle->p_left->color != CL_MAP_RED ) + { + p_uncle->color = CL_MAP_RED; + p_item = p_item->p_up; + continue; + } + + p_uncle->p_left->color = CL_MAP_BLACK; + p_uncle->color = CL_MAP_RED; + __cl_rbmap_rot_right( p_map, p_uncle ); + p_uncle = p_item->p_up->p_right; + } + p_uncle->color = p_item->p_up->color; + p_item->p_up->color = CL_MAP_BLACK; + p_uncle->p_right->color = CL_MAP_BLACK; + __cl_rbmap_rot_left( p_map, p_item->p_up ); + break; + } + else + { + p_uncle = p_item->p_up->p_left; + + if( p_uncle->color == CL_MAP_RED ) + { + p_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_RED; + __cl_rbmap_rot_right( p_map, p_item->p_up ); + p_uncle = p_item->p_up->p_left; + } + + if( p_uncle->p_left->color != CL_MAP_RED ) + { + if( p_uncle->p_right->color != CL_MAP_RED ) + { + p_uncle->color = CL_MAP_RED; + p_item = p_item->p_up; + continue; + } + + p_uncle->p_right->color = CL_MAP_BLACK; + p_uncle->color = CL_MAP_RED; + __cl_rbmap_rot_left( p_map, p_uncle ); + p_uncle = p_item->p_up->p_left; + } + p_uncle->color = p_item->p_up->color; + p_item->p_up->color = CL_MAP_BLACK; + p_uncle->p_left->color = CL_MAP_BLACK; + __cl_rbmap_rot_right( p_map, p_item->p_up ); + break; + } + } + p_item->color = CL_MAP_BLACK; +} + + +void +cl_rbmap_remove_item( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_item ) +{ + cl_rbmap_item_t *p_child, *p_del_item; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_map == p_map ); + + if( p_item == cl_rbmap_end( p_map ) ) + return; + + if( p_item->p_right == &p_map->nil ) + { + /* The item being removed has children on at most its left. */ + p_del_item = p_item; + p_child = p_del_item->p_left; + } + else if( p_item->p_left == &p_map->nil ) + { + /* The item being removed has children on at most its right. */ + p_del_item = p_item; + p_child = p_del_item->p_right; + } + else + { + /* + * The item being removed has children on both side. + * We select the item that will replace it. After removing + * the substitute item and rebalancing, the tree will have the + * correct topology. Exchanging the substitute for the item + * will finalize the removal. + */ + p_del_item = p_item->p_right; + CL_ASSERT( p_del_item != &p_map->nil ); + while( p_del_item->p_left != &p_map->nil ) + p_del_item = p_del_item->p_left; + p_child = p_del_item->p_right; + } + + /* Decrement the item count. */ + p_map->count--; + + /* + * This assignment may modify the parent pointer of the nil node. + * This is inconsequential. + */ + p_child->p_up = p_del_item->p_up; + (*__cl_rbmap_get_parent_ptr_to_item( p_del_item )) = p_child; // 2 right = 5 + + if( p_del_item->color != CL_MAP_RED ) + __cl_rbmap_del_bal( p_map, p_child ); + + /* + * Note that the splicing done below does not need to occur before + * the tree is balanced, since the actual topology changes are made by the + * preceding code. The topology is preserved by the color assignment made + * below (reader should be reminded that p_del_item == p_item in some cases). + */ + if( p_del_item != p_item ) + { + /* + * Finalize the removal of the specified item by exchanging it with + * the substitute which we removed above. + */ + p_del_item->p_up = p_item->p_up; + p_del_item->p_left = p_item->p_left; + p_del_item->p_right = p_item->p_right; + (*__cl_rbmap_get_parent_ptr_to_item( p_item )) = p_del_item; + p_item->p_right->p_up = p_del_item; + p_item->p_left->p_up = p_del_item; + p_del_item->color = p_item->color; + } + + CL_ASSERT( p_map->nil.color != CL_MAP_RED ); + +#ifdef _DEBUG_ + /* Clear the pointer to the map since the item has been removed. */ + p_item->p_map = NULL; +#endif +} + + +/****************************************************************************** +******************************************************************************* +************** ************ +************** IMPLEMENTATION OF QUICK MAP ************ +************** ************ +******************************************************************************* +******************************************************************************/ + +/* + * Get the root. + */ +static inline cl_map_item_t* +__cl_map_root( + IN const cl_qmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + return( p_map->root.p_left ); +} + + +/* + * Returns whether a given item is on the left of its parent. + */ +static boolean_t +__cl_map_is_left_child( + IN const cl_map_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_up ); + CL_ASSERT( p_item->p_up != p_item ); + + return( p_item->p_up->p_left == p_item ); +} + + +/* + * Retrieve the pointer to the parent's pointer to an item. + */ +static cl_map_item_t** +__cl_map_get_parent_ptr_to_item( + IN cl_map_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_up ); + CL_ASSERT( p_item->p_up != p_item ); + + if( __cl_map_is_left_child( p_item ) ) + return( &p_item->p_up->p_left ); + + CL_ASSERT( p_item->p_up->p_right == p_item ); + return( &p_item->p_up->p_right ); +} + + +/* + * Rotate a node to the left. This rotation affects the least number of links + * between nodes and brings the level of C up by one while increasing the depth + * of A one. Note that the links to/from W, X, Y, and Z are not affected. + * + * R R + * | | + * A C + * / \ / \ + * W C A Z + * / \ / \ + * B Z W B + * / \ / \ + * X Y X Y + */ +static void +__cl_map_rot_left( + IN cl_qmap_t* const p_map, + IN cl_map_item_t* const p_item ) +{ + cl_map_item_t **pp_root; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_right != &p_map->nil ); + + pp_root = __cl_map_get_parent_ptr_to_item( p_item ); + + /* Point R to C instead of A. */ + *pp_root = p_item->p_right; + /* Set C's parent to R. */ + (*pp_root)->p_up = p_item->p_up; + + /* Set A's right to B */ + p_item->p_right = (*pp_root)->p_left; + /* + * Set B's parent to A. We trap for B being NIL since the + * caller may depend on NIL not changing. + */ + if( (*pp_root)->p_left != &p_map->nil ) + (*pp_root)->p_left->p_up = p_item; + + /* Set C's left to A. */ + (*pp_root)->p_left = p_item; + /* Set A's parent to C. */ + p_item->p_up = *pp_root; +} + + +/* + * Rotate a node to the right. This rotation affects the least number of links + * between nodes and brings the level of A up by one while increasing the depth + * of C one. Note that the links to/from W, X, Y, and Z are not affected. + * + * R R + * | | + * C A + * / \ / \ + * A Z W C + * / \ / \ + * W B B Z + * / \ / \ + * X Y X Y + */ +static void +__cl_map_rot_right( + IN cl_qmap_t* const p_map, + IN cl_map_item_t* const p_item ) +{ + cl_map_item_t **pp_root; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_left != &p_map->nil ); + + /* Point R to A instead of C. */ + pp_root = __cl_map_get_parent_ptr_to_item( p_item ); + (*pp_root) = p_item->p_left; + /* Set A's parent to R. */ + (*pp_root)->p_up = p_item->p_up; + + /* Set C's left to B */ + p_item->p_left = (*pp_root)->p_right; + /* + * Set B's parent to C. We trap for B being NIL since the + * caller may depend on NIL not changing. + */ + if( (*pp_root)->p_right != &p_map->nil ) + (*pp_root)->p_right->p_up = p_item; + + /* Set A's right to C. */ + (*pp_root)->p_right = p_item; + /* Set C's parent to A. */ + p_item->p_up = *pp_root; +} + + +void +cl_qmap_init( + IN cl_qmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + + cl_memclr( p_map, sizeof(cl_qmap_t) ); + + /* special setup for the root node */ + p_map->root.p_up = &p_map->root; + p_map->root.p_left = &p_map->nil; + p_map->root.p_right = &p_map->nil; + p_map->root.color = CL_MAP_BLACK; + + /* Setup the node used as terminator for all leaves. */ + p_map->nil.p_up = &p_map->nil; + p_map->nil.p_left = &p_map->nil; + p_map->nil.p_right = &p_map->nil; + p_map->nil.color = CL_MAP_BLACK; + +#ifdef _DEBUG_ + p_map->root.p_map = p_map; + p_map->nil.p_map = p_map; +#endif + + p_map->state = CL_INITIALIZED; + + cl_qmap_remove_all( p_map ); +} + + +cl_map_item_t* +cl_qmap_get( + IN const cl_qmap_t* const p_map, + IN const uint64_t key ) +{ + cl_map_item_t *p_item; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + p_item = __cl_map_root( p_map ); + + while( p_item != &p_map->nil ) + { + if( key == p_item->key ) + break; /* just right */ + + if( key < p_item->key ) + p_item = p_item->p_left; /* too small */ + else + p_item = p_item->p_right; /* too big */ + } + + return( p_item ); +} + + +void +cl_qmap_apply_func( + IN const cl_qmap_t* const p_map, + IN cl_pfn_qmap_apply_t pfn_func, + IN const void* const context ) +{ + cl_map_item_t* p_map_item; + + /* Note that context can have any arbitrary value. */ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( pfn_func ); + + p_map_item = cl_qmap_head( p_map ); + while( p_map_item != cl_qmap_end( p_map ) ) + { + pfn_func( p_map_item, (void*)context ); + p_map_item = cl_qmap_next( p_map_item ); + } +} + + +/* + * Balance a tree starting at a given item back to the root. + */ +static void +__cl_map_ins_bal( + IN cl_qmap_t* const p_map, + IN cl_map_item_t* p_item ) +{ + cl_map_item_t* p_grand_uncle; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item != &p_map->root ); + + while( p_item->p_up->color == CL_MAP_RED ) + { + if( __cl_map_is_left_child( p_item->p_up ) ) + { + p_grand_uncle = p_item->p_up->p_up->p_right; + CL_ASSERT( p_grand_uncle ); + if( p_grand_uncle->color == CL_MAP_RED ) + { + p_grand_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + p_item = p_item->p_up->p_up; + continue; + } + + if( !__cl_map_is_left_child( p_item ) ) + { + p_item = p_item->p_up; + __cl_map_rot_left( p_map, p_item ); + } + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + __cl_map_rot_right( p_map, p_item->p_up->p_up ); + } + else + { + p_grand_uncle = p_item->p_up->p_up->p_left; + CL_ASSERT( p_grand_uncle ); + if( p_grand_uncle->color == CL_MAP_RED ) + { + p_grand_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + p_item = p_item->p_up->p_up; + continue; + } + + if( __cl_map_is_left_child( p_item ) ) + { + p_item = p_item->p_up; + __cl_map_rot_right( p_map, p_item ); + } + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + __cl_map_rot_left( p_map, p_item->p_up->p_up ); + } + } +} + + +cl_map_item_t* +cl_qmap_insert( + IN cl_qmap_t* const p_map, + IN const uint64_t key, + IN cl_map_item_t* const p_item ) +{ + cl_map_item_t *p_insert_at, *p_comp_item; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( p_item ); + CL_ASSERT( p_map->root.p_up == &p_map->root ); + CL_ASSERT( p_map->root.color != CL_MAP_RED ); + CL_ASSERT( p_map->nil.color != CL_MAP_RED ); + + p_item->p_left = &p_map->nil; + p_item->p_right = &p_map->nil; + p_item->key = key; + p_item->color = CL_MAP_RED; + + /* Find the insertion location. */ + p_insert_at = &p_map->root; + p_comp_item = __cl_map_root( p_map ); + + while( p_comp_item != &p_map->nil ) + { + p_insert_at = p_comp_item; + + if( key == p_insert_at->key ) + return( p_insert_at ); + + /* Traverse the tree until the correct insertion point is found. */ + if( key < p_insert_at->key ) + p_comp_item = p_insert_at->p_left; + else + p_comp_item = p_insert_at->p_right; + } + + CL_ASSERT( p_insert_at != &p_map->nil ); + CL_ASSERT( p_comp_item == &p_map->nil ); + /* Insert the item. */ + if( p_insert_at == &p_map->root ) + { + p_insert_at->p_left = p_item; + /* + * Primitive insert places the new item in front of + * the existing item. + */ + __cl_primitive_insert( &p_map->nil.pool_item.list_item, + &p_item->pool_item.list_item ); + } + else if( key < p_insert_at->key ) + { + p_insert_at->p_left = p_item; + /* + * Primitive insert places the new item in front of + * the existing item. + */ + __cl_primitive_insert( &p_insert_at->pool_item.list_item, + &p_item->pool_item.list_item ); + } + else + { + p_insert_at->p_right = p_item; + /* + * Primitive insert places the new item in front of + * the existing item. + */ + __cl_primitive_insert( p_insert_at->pool_item.list_item.p_next, + &p_item->pool_item.list_item ); + } + /* Increase the count. */ + p_map->count++; + + p_item->p_up = p_insert_at; + + /* + * We have added depth to this section of the tree. + * Rebalance as necessary as we retrace our path through the tree + * and update colors. + */ + __cl_map_ins_bal( p_map, p_item ); + + __cl_map_root( p_map )->color = CL_MAP_BLACK; + + /* + * Note that it is not necessary to re-color the nil node black because all + * red color assignments are made via the p_up pointer, and nil is never + * set as the value of a p_up pointer. + */ + +#ifdef _DEBUG_ + /* Set the pointer to the map in the map item for consistency checking. */ + p_item->p_map = p_map; +#endif + + return( p_item ); +} + + +static void +__cl_map_del_bal( + IN cl_qmap_t* const p_map, + IN cl_map_item_t* p_item ) +{ + cl_map_item_t *p_uncle; + + while( (p_item->color != CL_MAP_RED) && (p_item->p_up != &p_map->root) ) + { + if( __cl_map_is_left_child( p_item ) ) + { + p_uncle = p_item->p_up->p_right; + + if( p_uncle->color == CL_MAP_RED ) + { + p_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_RED; + __cl_map_rot_left( p_map, p_item->p_up ); + p_uncle = p_item->p_up->p_right; + } + + if( p_uncle->p_right->color != CL_MAP_RED ) + { + if( p_uncle->p_left->color != CL_MAP_RED ) + { + p_uncle->color = CL_MAP_RED; + p_item = p_item->p_up; + continue; + } + + p_uncle->p_left->color = CL_MAP_BLACK; + p_uncle->color = CL_MAP_RED; + __cl_map_rot_right( p_map, p_uncle ); + p_uncle = p_item->p_up->p_right; + } + p_uncle->color = p_item->p_up->color; + p_item->p_up->color = CL_MAP_BLACK; + p_uncle->p_right->color = CL_MAP_BLACK; + __cl_map_rot_left( p_map, p_item->p_up ); + break; + } + else + { + p_uncle = p_item->p_up->p_left; + + if( p_uncle->color == CL_MAP_RED ) + { + p_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_RED; + __cl_map_rot_right( p_map, p_item->p_up ); + p_uncle = p_item->p_up->p_left; + } + + if( p_uncle->p_left->color != CL_MAP_RED ) + { + if( p_uncle->p_right->color != CL_MAP_RED ) + { + p_uncle->color = CL_MAP_RED; + p_item = p_item->p_up; + continue; + } + + p_uncle->p_right->color = CL_MAP_BLACK; + p_uncle->color = CL_MAP_RED; + __cl_map_rot_left( p_map, p_uncle ); + p_uncle = p_item->p_up->p_left; + } + p_uncle->color = p_item->p_up->color; + p_item->p_up->color = CL_MAP_BLACK; + p_uncle->p_left->color = CL_MAP_BLACK; + __cl_map_rot_right( p_map, p_item->p_up ); + break; + } + } + p_item->color = CL_MAP_BLACK; +} + + +void +cl_qmap_remove_item( + IN cl_qmap_t* const p_map, + IN cl_map_item_t* const p_item ) +{ + cl_map_item_t *p_child, *p_del_item; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_map == p_map ); + + if( p_item == cl_qmap_end( p_map ) ) + return; + + if( (p_item->p_right == &p_map->nil) || (p_item->p_left == &p_map->nil ) ) + { + /* The item being removed has children on at most on side. */ + p_del_item = p_item; + } + else + { + /* + * The item being removed has children on both side. + * We select the item that will replace it. After removing + * the substitute item and rebalancing, the tree will have the + * correct topology. Exchanging the substitute for the item + * will finalize the removal. + */ + p_del_item = cl_qmap_next( p_item ); + CL_ASSERT( p_del_item != &p_map->nil ); + } + + /* Remove the item from the list. */ + __cl_primitive_remove( &p_item->pool_item.list_item ); + /* Decrement the item count. */ + p_map->count--; + + /* Get the pointer to the new root's child, if any. */ + if( p_del_item->p_left != &p_map->nil ) + p_child = p_del_item->p_left; + else + p_child = p_del_item->p_right; + + /* + * This assignment may modify the parent pointer of the nil node. + * This is inconsequential. + */ + p_child->p_up = p_del_item->p_up; + (*__cl_map_get_parent_ptr_to_item( p_del_item )) = p_child; + + if( p_del_item->color != CL_MAP_RED ) + __cl_map_del_bal( p_map, p_child ); + + /* + * Note that the splicing done below does not need to occur before + * the tree is balanced, since the actual topology changes are made by the + * preceding code. The topology is preserved by the color assignment made + * below (reader should be reminded that p_del_item == p_item in some cases). + */ + if( p_del_item != p_item ) + { + /* + * Finalize the removal of the specified item by exchanging it with + * the substitute which we removed above. + */ + p_del_item->p_up = p_item->p_up; + p_del_item->p_left = p_item->p_left; + p_del_item->p_right = p_item->p_right; + (*__cl_map_get_parent_ptr_to_item( p_item )) = p_del_item; + p_item->p_right->p_up = p_del_item; + p_item->p_left->p_up = p_del_item; + p_del_item->color = p_item->color; + } + + CL_ASSERT( p_map->nil.color != CL_MAP_RED ); + +#ifdef _DEBUG_ + /* Clear the pointer to the map since the item has been removed. */ + p_item->p_map = NULL; +#endif +} + + +cl_map_item_t* +cl_qmap_remove( + IN cl_qmap_t* const p_map, + IN const uint64_t key ) +{ + cl_map_item_t *p_item; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + /* Seek the node with the specified key */ + p_item = cl_qmap_get( p_map, key ); + + cl_qmap_remove_item( p_map, p_item ); + + return( p_item ); +} + + +void +cl_qmap_merge( + OUT cl_qmap_t* const p_dest_map, + IN OUT cl_qmap_t* const p_src_map ) +{ + cl_map_item_t *p_item, *p_item2, *p_next; + + CL_ASSERT( p_dest_map ); + CL_ASSERT( p_src_map ); + + p_item = cl_qmap_head( p_src_map ); + + while( p_item != cl_qmap_end( p_src_map ) ) + { + p_next = cl_qmap_next( p_item ); + + /* Remove the item from its current map. */ + cl_qmap_remove_item( p_src_map, p_item ); + /* Insert the item into the destination map. */ + p_item2 = cl_qmap_insert( p_dest_map, cl_qmap_key( p_item ), p_item ); + /* Check that the item was successfully inserted. */ + if( p_item2 != p_item ) + { + /* Put the item in back in the source map. */ + p_item2 = + cl_qmap_insert( p_src_map, cl_qmap_key( p_item ), p_item ); + CL_ASSERT( p_item2 == p_item ); + } + p_item = p_next; + } +} + + +static void +__cl_qmap_delta_move( + IN OUT cl_qmap_t* const p_dest, + IN OUT cl_qmap_t* const p_src, + IN OUT cl_map_item_t** const pp_item ) +{ + cl_map_item_t *p_temp, *p_next; + + /* + * Get the next item so that we can ensure that pp_item points to + * a valid item upon return from the function. + */ + p_next = cl_qmap_next( *pp_item ); + /* Move the old item from its current map the the old map. */ + cl_qmap_remove_item( p_src, *pp_item ); + p_temp = cl_qmap_insert( p_dest, cl_qmap_key( *pp_item ), *pp_item ); + /* We should never have duplicates. */ + CL_ASSERT( p_temp == *pp_item ); + /* Point pp_item to a valid item in the source map. */ + (*pp_item) = p_next; +} + + +void +cl_qmap_delta( + IN OUT cl_qmap_t* const p_map1, + IN OUT cl_qmap_t* const p_map2, + OUT cl_qmap_t* const p_new, + OUT cl_qmap_t* const p_old ) +{ + cl_map_item_t *p_item1, *p_item2; + uint64_t key1, key2; + + CL_ASSERT( p_map1 ); + CL_ASSERT( p_map2 ); + CL_ASSERT( p_new ); + CL_ASSERT( p_old ); + CL_ASSERT( cl_is_qmap_empty( p_new ) ); + CL_ASSERT( cl_is_qmap_empty( p_old ) ); + + p_item1 = cl_qmap_head( p_map1 ); + p_item2 = cl_qmap_head( p_map2 ); + + while( p_item1 != cl_qmap_end( p_map1 ) && + p_item2 != cl_qmap_end( p_map2 ) ) + { + key1 = cl_qmap_key( p_item1 ); + key2 = cl_qmap_key( p_item2 ); + if( key1 < key2 ) + { + /* We found an old item. */ + __cl_qmap_delta_move( p_old, p_map1, &p_item1 ); + } + else if( key1 > key2 ) + { + /* We found a new item. */ + __cl_qmap_delta_move( p_new, p_map2, &p_item2 ); + } + else + { + /* Move both forward since they have the same key. */ + p_item1 = cl_qmap_next( p_item1 ); + p_item2 = cl_qmap_next( p_item2 ); + } + } + + /* Process the remainder if the end of either source map was reached. */ + while( p_item2 != cl_qmap_end( p_map2 ) ) + __cl_qmap_delta_move( p_new, p_map2, &p_item2 ); + + while( p_item1 != cl_qmap_end( p_map1 ) ) + __cl_qmap_delta_move( p_old, p_map1, &p_item1 ); +} + + +/****************************************************************************** +******************************************************************************* +************** ************ +************** IMPLEMENTATION OF MAP ************ +************** ************ +******************************************************************************* +******************************************************************************/ + + +#define MAP_GROW_SIZE 32 + + +void +cl_map_construct( + IN cl_map_t* const p_map ) +{ + CL_ASSERT( p_map ); + + cl_qpool_construct( &p_map->pool ); +} + + +cl_status_t +cl_map_init( + IN cl_map_t* const p_map, + IN const size_t min_items ) +{ + size_t grow_size; + + CL_ASSERT( p_map ); + + cl_qmap_init( &p_map->qmap ); + + /* + * We will grow by min_items/8 items at a time, with a minimum of + * MAP_GROW_SIZE. + */ + grow_size = min_items >> 3; + if( grow_size < MAP_GROW_SIZE ) + grow_size = MAP_GROW_SIZE; + + return( cl_qpool_init( &p_map->pool, min_items, 0, grow_size, + sizeof(cl_map_obj_t), NULL, NULL, NULL ) ); +} + + +void +cl_map_destroy( + IN cl_map_t* const p_map ) +{ + CL_ASSERT( p_map ); + + cl_qpool_destroy( &p_map->pool ); +} + + +void* +cl_map_insert( + IN cl_map_t* const p_map, + IN const uint64_t key, + IN const void* const p_object ) +{ + cl_map_obj_t *p_map_obj, *p_obj_at_key; + + CL_ASSERT( p_map ); + + p_map_obj = (cl_map_obj_t*)cl_qpool_get( &p_map->pool ); + + if( !p_map_obj ) + return( NULL ); + + cl_qmap_set_obj( p_map_obj, p_object ); + + p_obj_at_key = + (cl_map_obj_t*)cl_qmap_insert( &p_map->qmap, key, &p_map_obj->item ); + + /* Return the item to the pool if insertion failed. */ + if( p_obj_at_key != p_map_obj ) + cl_qpool_put( &p_map->pool, &p_map_obj->item.pool_item ); + + return( cl_qmap_obj( p_obj_at_key ) ); +} + + +void* +cl_map_get( + IN const cl_map_t* const p_map, + IN const uint64_t key ) +{ + cl_map_item_t *p_item; + + CL_ASSERT( p_map ); + + p_item = cl_qmap_get( &p_map->qmap, key ); + + if( p_item == cl_qmap_end( &p_map->qmap ) ) + return( NULL ); + + return( cl_qmap_obj( PARENT_STRUCT( p_item, cl_map_obj_t, item ) ) ); +} + + +void +cl_map_remove_item( + IN cl_map_t* const p_map, + IN const cl_map_iterator_t itor ) +{ + CL_ASSERT( itor->p_map == &p_map->qmap ); + + if( itor == cl_map_end( p_map ) ) + return; + + cl_qmap_remove_item( &p_map->qmap, (cl_map_item_t*)itor ); + cl_qpool_put( &p_map->pool, &((cl_map_item_t*)itor)->pool_item ); +} + + +void* +cl_map_remove( + IN cl_map_t* const p_map, + IN const uint64_t key ) +{ + cl_map_item_t *p_item; + + CL_ASSERT( p_map ); + + p_item = cl_qmap_remove( &p_map->qmap, key ); + + if( p_item == cl_qmap_end( &p_map->qmap ) ) + return( NULL ); + + cl_qpool_put( &p_map->pool, &p_item->pool_item ); + + return( cl_qmap_obj( (cl_map_obj_t*)p_item ) ); +} + + +void +cl_map_remove_all( + IN cl_map_t* const p_map ) +{ + cl_map_item_t *p_item; + + CL_ASSERT( p_map ); + + /* Return all map items to the pool. */ + while( !cl_is_qmap_empty( &p_map->qmap ) ) + { + p_item = cl_qmap_head( &p_map->qmap ); + cl_qmap_remove_item( &p_map->qmap, p_item ); + cl_qpool_put( &p_map->pool, &p_item->pool_item ); + + if( !cl_is_qmap_empty( &p_map->qmap ) ) + { + p_item = cl_qmap_tail( &p_map->qmap ); + cl_qmap_remove_item( &p_map->qmap, p_item ); + cl_qpool_put( &p_map->pool, &p_item->pool_item ); + } + } +} + + +cl_status_t +cl_map_merge( + OUT cl_map_t* const p_dest_map, + IN OUT cl_map_t* const p_src_map ) +{ + cl_status_t status = CL_SUCCESS; + cl_map_iterator_t itor, next; + uint64_t key; + void *p_obj, *p_obj2; + + CL_ASSERT( p_dest_map ); + CL_ASSERT( p_src_map ); + + itor = cl_map_head( p_src_map ); + while( itor != cl_map_end( p_src_map ) ) + { + next = cl_map_next( itor ); + + p_obj = cl_map_obj( itor ); + key = cl_map_key( itor ); + + cl_map_remove_item( p_src_map, itor ); + + /* Insert the object into the destination map. */ + p_obj2 = cl_map_insert( p_dest_map, key, p_obj ); + /* Trap for failure. */ + if( p_obj != p_obj2 ) + { + if( !p_obj2 ) + status = CL_INSUFFICIENT_MEMORY; + /* Put the object back in the source map. This must succeed. */ + p_obj2 = cl_map_insert( p_src_map, key, p_obj ); + CL_ASSERT( p_obj == p_obj2 ); + /* If the failure was due to insufficient memory, return. */ + if( status != CL_SUCCESS ) + return( status ); + } + itor = next; + } + + return( CL_SUCCESS ); +} + + +static void +__cl_map_revert( + IN OUT cl_map_t* const p_map1, + IN OUT cl_map_t* const p_map2, + IN OUT cl_map_t* const p_new, + IN OUT cl_map_t* const p_old ) +{ + cl_status_t status; + + /* Restore the initial state. */ + status = cl_map_merge( p_map1, p_old ); + CL_ASSERT( status == CL_SUCCESS ); + status = cl_map_merge( p_map2, p_new ); + CL_ASSERT( status == CL_SUCCESS ); +} + + +static cl_status_t +__cl_map_delta_move( + OUT cl_map_t* const p_dest, + IN OUT cl_map_t* const p_src, + IN OUT cl_map_iterator_t* const p_itor ) +{ + cl_map_iterator_t next; + void *p_obj, *p_obj2; + uint64_t key; + + /* Get a valid iterator so we can continue the loop. */ + next = cl_map_next( *p_itor ); + /* Get the pointer to the object for insertion. */ + p_obj = cl_map_obj( *p_itor ); + /* Get the key for the object. */ + key = cl_map_key( *p_itor ); + /* Move the object. */ + cl_map_remove_item( p_src, *p_itor ); + p_obj2 = cl_map_insert( p_dest, key, p_obj ); + /* Check for failure. We should never get a duplicate. */ + if( !p_obj2 ) + { + p_obj2 = cl_map_insert( p_src, key, p_obj ); + CL_ASSERT( p_obj2 == p_obj ); + return( CL_INSUFFICIENT_MEMORY ); + } + + /* We should never get a duplicate */ + CL_ASSERT( p_obj == p_obj2 ); + /* Update the iterator so that it is valid. */ + (*p_itor) = next; + + return( CL_SUCCESS ); +} + + +cl_status_t +cl_map_delta( + IN OUT cl_map_t* const p_map1, + IN OUT cl_map_t* const p_map2, + OUT cl_map_t* const p_new, + OUT cl_map_t* const p_old ) +{ + cl_map_iterator_t itor1, itor2; + uint64_t key1, key2; + cl_status_t status; + + CL_ASSERT( p_map1 ); + CL_ASSERT( p_map2 ); + CL_ASSERT( p_new ); + CL_ASSERT( p_old ); + CL_ASSERT( cl_is_map_empty( p_new ) ); + CL_ASSERT( cl_is_map_empty( p_old ) ); + + itor1 = cl_map_head( p_map1 ); + itor2 = cl_map_head( p_map2 ); + + /* + * Note that the check is for the end, since duplicate items will remain + * in their respective maps. + */ + while( itor1 != cl_map_end( p_map1 ) && + itor2 != cl_map_end( p_map2 ) ) + { + key1 = cl_map_key( itor1 ); + key2 = cl_map_key( itor2 ); + if( key1 < key2 ) + { + status = __cl_map_delta_move( p_old, p_map1, &itor1 ); + /* Check for failure. */ + if( status != CL_SUCCESS ) + { + /* Restore the initial state. */ + __cl_map_revert( p_map1, p_map2, p_new, p_old ); + /* Return the failure status. */ + return( status ); + } + } + else if( key1 > key2 ) + { + status = __cl_map_delta_move( p_new, p_map2, &itor2 ); + if( status != CL_SUCCESS ) + { + /* Restore the initial state. */ + __cl_map_revert( p_map1, p_map2, p_new, p_old ); + /* Return the failure status. */ + return( status ); + } + } + else + { + /* Move both forward since they have the same key. */ + itor1 = cl_map_next( itor1 ); + itor2 = cl_map_next( itor2 ); + } + } + + /* Process the remainder if either source map is empty. */ + while( itor2 != cl_map_end( p_map2 ) ) + { + status = __cl_map_delta_move( p_new, p_map2, &itor2 ); + if( status != CL_SUCCESS ) + { + /* Restore the initial state. */ + __cl_map_revert( p_map1, p_map2, p_new, p_old ); + /* Return the failure status. */ + return( status ); + } + } + + while( itor1 != cl_map_end( p_map1 ) ) + { + status = __cl_map_delta_move( p_old, p_map1, &itor1 ); + if( status != CL_SUCCESS ) + { + /* Restore the initial state. */ + __cl_map_revert( p_map1, p_map2, p_new, p_old ); + /* Return the failure status. */ + return( status ); + } + } + + return( CL_SUCCESS ); +} + + +/****************************************************************************** +******************************************************************************* +************** ************ +************** IMPLEMENTATION OF FLEXI MAP ************ +************** ************ +******************************************************************************* +******************************************************************************/ + +/* + * Get the root. + */ +static inline cl_fmap_item_t* +__cl_fmap_root( + IN const cl_fmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + return( p_map->root.p_left ); +} + + +/* + * Returns whether a given item is on the left of its parent. + */ +static boolean_t +__cl_fmap_is_left_child( + IN const cl_fmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_up ); + CL_ASSERT( p_item->p_up != p_item ); + + return( p_item->p_up->p_left == p_item ); +} + + +/* + * Retrieve the pointer to the parent's pointer to an item. + */ +static cl_fmap_item_t** +__cl_fmap_get_parent_ptr_to_item( + IN cl_fmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_up ); + CL_ASSERT( p_item->p_up != p_item ); + + if( __cl_fmap_is_left_child( p_item ) ) + return( &p_item->p_up->p_left ); + + CL_ASSERT( p_item->p_up->p_right == p_item ); + return( &p_item->p_up->p_right ); +} + + +/* + * Rotate a node to the left. This rotation affects the least number of links + * between nodes and brings the level of C up by one while increasing the depth + * of A one. Note that the links to/from W, X, Y, and Z are not affected. + * + * R R + * | | + * A C + * / \ / \ + * W C A Z + * / \ / \ + * B Z W B + * / \ / \ + * X Y X Y + */ +static void +__cl_fmap_rot_left( + IN cl_fmap_t* const p_map, + IN cl_fmap_item_t* const p_item ) +{ + cl_fmap_item_t **pp_root; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_right != &p_map->nil ); + + pp_root = __cl_fmap_get_parent_ptr_to_item( p_item ); + + /* Point R to C instead of A. */ + *pp_root = p_item->p_right; + /* Set C's parent to R. */ + (*pp_root)->p_up = p_item->p_up; + + /* Set A's right to B */ + p_item->p_right = (*pp_root)->p_left; + /* + * Set B's parent to A. We trap for B being NIL since the + * caller may depend on NIL not changing. + */ + if( (*pp_root)->p_left != &p_map->nil ) + (*pp_root)->p_left->p_up = p_item; + + /* Set C's left to A. */ + (*pp_root)->p_left = p_item; + /* Set A's parent to C. */ + p_item->p_up = *pp_root; +} + + +/* + * Rotate a node to the right. This rotation affects the least number of links + * between nodes and brings the level of A up by one while increasing the depth + * of C one. Note that the links to/from W, X, Y, and Z are not affected. + * + * R R + * | | + * C A + * / \ / \ + * A Z W C + * / \ / \ + * W B B Z + * / \ / \ + * X Y X Y + */ +static void +__cl_fmap_rot_right( + IN cl_fmap_t* const p_map, + IN cl_fmap_item_t* const p_item ) +{ + cl_fmap_item_t **pp_root; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_left != &p_map->nil ); + + /* Point R to A instead of C. */ + pp_root = __cl_fmap_get_parent_ptr_to_item( p_item ); + (*pp_root) = p_item->p_left; + /* Set A's parent to R. */ + (*pp_root)->p_up = p_item->p_up; + + /* Set C's left to B */ + p_item->p_left = (*pp_root)->p_right; + /* + * Set B's parent to C. We trap for B being NIL since the + * caller may depend on NIL not changing. + */ + if( (*pp_root)->p_right != &p_map->nil ) + (*pp_root)->p_right->p_up = p_item; + + /* Set A's right to C. */ + (*pp_root)->p_right = p_item; + /* Set C's parent to A. */ + p_item->p_up = *pp_root; +} + + +void +cl_fmap_init( + IN cl_fmap_t* const p_map, + IN cl_pfn_fmap_cmp_t pfn_compare ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( pfn_compare ); + + cl_memclr( p_map, sizeof(cl_fmap_t) ); + + /* special setup for the root node */ + p_map->root.p_up = &p_map->root; + p_map->root.p_left = &p_map->nil; + p_map->root.p_right = &p_map->nil; + p_map->root.color = CL_MAP_BLACK; + + /* Setup the node used as terminator for all leaves. */ + p_map->nil.p_up = &p_map->nil; + p_map->nil.p_left = &p_map->nil; + p_map->nil.p_right = &p_map->nil; + p_map->nil.color = CL_MAP_BLACK; + + /* Store the compare function pointer. */ + p_map->pfn_compare = pfn_compare; + + p_map->state = CL_INITIALIZED; + + cl_fmap_remove_all( p_map ); +} + + +cl_fmap_item_t* +cl_fmap_get( + IN const cl_fmap_t* const p_map, + IN const void* const p_key ) +{ + cl_fmap_item_t *p_item; + intn_t cmp; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + p_item = __cl_fmap_root( p_map ); + + while( p_item != &p_map->nil ) + { + cmp = p_map->pfn_compare( p_key, p_item->p_key ); + + if( !cmp ) + break; /* just right */ + + if( cmp < 0 ) + p_item = p_item->p_left; /* too small */ + else + p_item = p_item->p_right; /* too big */ + } + + return( p_item ); +} + + +void +cl_fmap_apply_func( + IN const cl_fmap_t* const p_map, + IN cl_pfn_fmap_apply_t pfn_func, + IN const void* const context ) +{ + cl_fmap_item_t* p_fmap_item; + + /* Note that context can have any arbitrary value. */ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( pfn_func ); + + p_fmap_item = cl_fmap_head( p_map ); + while( p_fmap_item != cl_fmap_end( p_map ) ) + { + pfn_func( p_fmap_item, (void*)context ); + p_fmap_item = cl_fmap_next( p_fmap_item ); + } +} + + +/* + * Balance a tree starting at a given item back to the root. + */ +static void +__cl_fmap_ins_bal( + IN cl_fmap_t* const p_map, + IN cl_fmap_item_t* p_item ) +{ + cl_fmap_item_t* p_grand_uncle; + + CL_ASSERT( p_map ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item != &p_map->root ); + + while( p_item->p_up->color == CL_MAP_RED ) + { + if( __cl_fmap_is_left_child( p_item->p_up ) ) + { + p_grand_uncle = p_item->p_up->p_up->p_right; + CL_ASSERT( p_grand_uncle ); + if( p_grand_uncle->color == CL_MAP_RED ) + { + p_grand_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + p_item = p_item->p_up->p_up; + continue; + } + + if( !__cl_fmap_is_left_child( p_item ) ) + { + p_item = p_item->p_up; + __cl_fmap_rot_left( p_map, p_item ); + } + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + __cl_fmap_rot_right( p_map, p_item->p_up->p_up ); + } + else + { + p_grand_uncle = p_item->p_up->p_up->p_left; + CL_ASSERT( p_grand_uncle ); + if( p_grand_uncle->color == CL_MAP_RED ) + { + p_grand_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + p_item = p_item->p_up->p_up; + continue; + } + + if( __cl_fmap_is_left_child( p_item ) ) + { + p_item = p_item->p_up; + __cl_fmap_rot_right( p_map, p_item ); + } + p_item->p_up->color = CL_MAP_BLACK; + p_item->p_up->p_up->color = CL_MAP_RED; + __cl_fmap_rot_left( p_map, p_item->p_up->p_up ); + } + } +} + + +cl_fmap_item_t* +cl_fmap_insert( + IN cl_fmap_t* const p_map, + IN const void* const p_key, + IN cl_fmap_item_t* const p_item ) +{ + cl_fmap_item_t *p_insert_at, *p_comp_item; + intn_t cmp = 0; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( p_item ); + CL_ASSERT( p_map->root.p_up == &p_map->root ); + CL_ASSERT( p_map->root.color != CL_MAP_RED ); + CL_ASSERT( p_map->nil.color != CL_MAP_RED ); + + p_item->p_left = &p_map->nil; + p_item->p_right = &p_map->nil; + p_item->p_key = p_key; + p_item->color = CL_MAP_RED; + + /* Find the insertion location. */ + p_insert_at = &p_map->root; + p_comp_item = __cl_fmap_root( p_map ); + + while( p_comp_item != &p_map->nil ) + { + p_insert_at = p_comp_item; + + cmp = p_map->pfn_compare( p_key, p_insert_at->p_key ); + + if( !cmp ) + return( p_insert_at ); + + /* Traverse the tree until the correct insertion point is found. */ + if( cmp < 0 ) + p_comp_item = p_insert_at->p_left; + else + p_comp_item = p_insert_at->p_right; + } + + CL_ASSERT( p_insert_at != &p_map->nil ); + CL_ASSERT( p_comp_item == &p_map->nil ); + /* Insert the item. */ + if( p_insert_at == &p_map->root ) + { + p_insert_at->p_left = p_item; + /* + * Primitive insert places the new item in front of + * the existing item. + */ + __cl_primitive_insert( &p_map->nil.pool_item.list_item, + &p_item->pool_item.list_item ); + } + else if( cmp < 0 ) + { + p_insert_at->p_left = p_item; + /* + * Primitive insert places the new item in front of + * the existing item. + */ + __cl_primitive_insert( &p_insert_at->pool_item.list_item, + &p_item->pool_item.list_item ); + } + else + { + p_insert_at->p_right = p_item; + /* + * Primitive insert places the new item in front of + * the existing item. + */ + __cl_primitive_insert( p_insert_at->pool_item.list_item.p_next, + &p_item->pool_item.list_item ); + } + /* Increase the count. */ + p_map->count++; + + p_item->p_up = p_insert_at; + + /* + * We have added depth to this section of the tree. + * Rebalance as necessary as we retrace our path through the tree + * and update colors. + */ + __cl_fmap_ins_bal( p_map, p_item ); + + __cl_fmap_root( p_map )->color = CL_MAP_BLACK; + + /* + * Note that it is not necessary to re-color the nil node black because all + * red color assignments are made via the p_up pointer, and nil is never + * set as the value of a p_up pointer. + */ + +#ifdef _DEBUG_ + /* Set the pointer to the map in the map item for consistency checking. */ + p_item->p_map = p_map; +#endif + + return( p_item ); +} + + +static void +__cl_fmap_del_bal( + IN cl_fmap_t* const p_map, + IN cl_fmap_item_t* p_item ) +{ + cl_fmap_item_t *p_uncle; + + while( (p_item->color != CL_MAP_RED) && (p_item->p_up != &p_map->root) ) + { + if( __cl_fmap_is_left_child( p_item ) ) + { + p_uncle = p_item->p_up->p_right; + + if( p_uncle->color == CL_MAP_RED ) + { + p_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_RED; + __cl_fmap_rot_left( p_map, p_item->p_up ); + p_uncle = p_item->p_up->p_right; + } + + if( p_uncle->p_right->color != CL_MAP_RED ) + { + if( p_uncle->p_left->color != CL_MAP_RED ) + { + p_uncle->color = CL_MAP_RED; + p_item = p_item->p_up; + continue; + } + + p_uncle->p_left->color = CL_MAP_BLACK; + p_uncle->color = CL_MAP_RED; + __cl_fmap_rot_right( p_map, p_uncle ); + p_uncle = p_item->p_up->p_right; + } + p_uncle->color = p_item->p_up->color; + p_item->p_up->color = CL_MAP_BLACK; + p_uncle->p_right->color = CL_MAP_BLACK; + __cl_fmap_rot_left( p_map, p_item->p_up ); + break; + } + else + { + p_uncle = p_item->p_up->p_left; + + if( p_uncle->color == CL_MAP_RED ) + { + p_uncle->color = CL_MAP_BLACK; + p_item->p_up->color = CL_MAP_RED; + __cl_fmap_rot_right( p_map, p_item->p_up ); + p_uncle = p_item->p_up->p_left; + } + + if( p_uncle->p_left->color != CL_MAP_RED ) + { + if( p_uncle->p_right->color != CL_MAP_RED ) + { + p_uncle->color = CL_MAP_RED; + p_item = p_item->p_up; + continue; + } + + p_uncle->p_right->color = CL_MAP_BLACK; + p_uncle->color = CL_MAP_RED; + __cl_fmap_rot_left( p_map, p_uncle ); + p_uncle = p_item->p_up->p_left; + } + p_uncle->color = p_item->p_up->color; + p_item->p_up->color = CL_MAP_BLACK; + p_uncle->p_left->color = CL_MAP_BLACK; + __cl_fmap_rot_right( p_map, p_item->p_up ); + break; + } + } + p_item->color = CL_MAP_BLACK; +} + + +void +cl_fmap_remove_item( + IN cl_fmap_t* const p_map, + IN cl_fmap_item_t* const p_item ) +{ + cl_fmap_item_t *p_child, *p_del_item; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + CL_ASSERT( p_item ); + CL_ASSERT( p_item->p_map == p_map ); + + if( p_item == cl_fmap_end( p_map ) ) + return; + + if( (p_item->p_right == &p_map->nil) || (p_item->p_left == &p_map->nil ) ) + { + /* The item being removed has children on at most on side. */ + p_del_item = p_item; + } + else + { + /* + * The item being removed has children on both side. + * We select the item that will replace it. After removing + * the substitute item and rebalancing, the tree will have the + * correct topology. Exchanging the substitute for the item + * will finalize the removal. + */ + p_del_item = cl_fmap_next( p_item ); + CL_ASSERT( p_del_item != &p_map->nil ); + } + + /* Remove the item from the list. */ + __cl_primitive_remove( &p_item->pool_item.list_item ); + /* Decrement the item count. */ + p_map->count--; + + /* Get the pointer to the new root's child, if any. */ + if( p_del_item->p_left != &p_map->nil ) + p_child = p_del_item->p_left; + else + p_child = p_del_item->p_right; + + /* + * This assignment may modify the parent pointer of the nil node. + * This is inconsequential. + */ + p_child->p_up = p_del_item->p_up; + (*__cl_fmap_get_parent_ptr_to_item( p_del_item )) = p_child; + + if( p_del_item->color != CL_MAP_RED ) + __cl_fmap_del_bal( p_map, p_child ); + + /* + * Note that the splicing done below does not need to occur before + * the tree is balanced, since the actual topology changes are made by the + * preceding code. The topology is preserved by the color assignment made + * below (reader should be reminded that p_del_item == p_item in some cases). + */ + if( p_del_item != p_item ) + { + /* + * Finalize the removal of the specified item by exchanging it with + * the substitute which we removed above. + */ + p_del_item->p_up = p_item->p_up; + p_del_item->p_left = p_item->p_left; + p_del_item->p_right = p_item->p_right; + (*__cl_fmap_get_parent_ptr_to_item( p_item )) = p_del_item; + p_item->p_right->p_up = p_del_item; + p_item->p_left->p_up = p_del_item; + p_del_item->color = p_item->color; + } + + CL_ASSERT( p_map->nil.color != CL_MAP_RED ); + +#ifdef _DEBUG_ + /* Clear the pointer to the map since the item has been removed. */ + p_item->p_map = NULL; +#endif +} + + +cl_fmap_item_t* +cl_fmap_remove( + IN cl_fmap_t* const p_map, + IN const void* const p_key ) +{ + cl_fmap_item_t *p_item; + + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + /* Seek the node with the specified key */ + p_item = cl_fmap_get( p_map, p_key ); + + cl_fmap_remove_item( p_map, p_item ); + + return( p_item ); +} + + +void +cl_fmap_merge( + OUT cl_fmap_t* const p_dest_map, + IN OUT cl_fmap_t* const p_src_map ) +{ + cl_fmap_item_t *p_item, *p_item2, *p_next; + + CL_ASSERT( p_dest_map ); + CL_ASSERT( p_src_map ); + + p_item = cl_fmap_head( p_src_map ); + + while( p_item != cl_fmap_end( p_src_map ) ) + { + p_next = cl_fmap_next( p_item ); + + /* Remove the item from its current map. */ + cl_fmap_remove_item( p_src_map, p_item ); + /* Insert the item into the destination map. */ + p_item2 = cl_fmap_insert( p_dest_map, cl_fmap_key( p_item ), p_item ); + /* Check that the item was successfully inserted. */ + if( p_item2 != p_item ) + { + /* Put the item in back in the source map. */ + p_item2 = + cl_fmap_insert( p_src_map, cl_fmap_key( p_item ), p_item ); + CL_ASSERT( p_item2 == p_item ); + } + p_item = p_next; + } +} + + +static void +__cl_fmap_delta_move( + IN OUT cl_fmap_t* const p_dest, + IN OUT cl_fmap_t* const p_src, + IN OUT cl_fmap_item_t** const pp_item ) +{ + cl_fmap_item_t *p_temp, *p_next; + + /* + * Get the next item so that we can ensure that pp_item points to + * a valid item upon return from the function. + */ + p_next = cl_fmap_next( *pp_item ); + /* Move the old item from its current map the the old map. */ + cl_fmap_remove_item( p_src, *pp_item ); + p_temp = cl_fmap_insert( p_dest, cl_fmap_key( *pp_item ), *pp_item ); + /* We should never have duplicates. */ + CL_ASSERT( p_temp == *pp_item ); + /* Point pp_item to a valid item in the source map. */ + (*pp_item) = p_next; +} + + +void +cl_fmap_delta( + IN OUT cl_fmap_t* const p_map1, + IN OUT cl_fmap_t* const p_map2, + OUT cl_fmap_t* const p_new, + OUT cl_fmap_t* const p_old ) +{ + cl_fmap_item_t *p_item1, *p_item2; + intn_t cmp; + + CL_ASSERT( p_map1 ); + CL_ASSERT( p_map2 ); + CL_ASSERT( p_new ); + CL_ASSERT( p_old ); + CL_ASSERT( cl_is_fmap_empty( p_new ) ); + CL_ASSERT( cl_is_fmap_empty( p_old ) ); + + p_item1 = cl_fmap_head( p_map1 ); + p_item2 = cl_fmap_head( p_map2 ); + + while( p_item1 != cl_fmap_end( p_map1 ) && + p_item2 != cl_fmap_end( p_map2 ) ) + { + cmp = p_map1->pfn_compare( cl_fmap_key( p_item1 ), + cl_fmap_key( p_item2 ) ); + if( cmp < 0 ) + { + /* We found an old item. */ + __cl_fmap_delta_move( p_old, p_map1, &p_item1 ); + } + else if( cmp > 0 ) + { + /* We found a new item. */ + __cl_fmap_delta_move( p_new, p_map2, &p_item2 ); + } + else + { + /* Move both forward since they have the same key. */ + p_item1 = cl_fmap_next( p_item1 ); + p_item2 = cl_fmap_next( p_item2 ); + } + } + + /* Process the remainder if the end of either source map was reached. */ + while( p_item2 != cl_fmap_end( p_map2 ) ) + __cl_fmap_delta_move( p_new, p_map2, &p_item2 ); + + while( p_item1 != cl_fmap_end( p_map1 ) ) + __cl_fmap_delta_move( p_old, p_map1, &p_item1 ); +} diff --git a/branches/Ndi/core/complib/cl_memory.c b/branches/Ndi/core/complib/cl_memory.c new file mode 100644 index 00000000..eec7a235 --- /dev/null +++ b/branches/Ndi/core/complib/cl_memory.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of memory allocation tracking functions. + * + * Environment: + * All + */ + + +#include "cl_memtrack.h" + + +cl_mem_tracker_t *gp_mem_tracker = NULL; + + +/* + * Allocates memory. + */ +void* +__cl_malloc_priv( + IN const size_t size, + IN const boolean_t pageable ); + + +/* + * Deallocates memory. + */ +void +__cl_free_priv( + IN void* const p_memory ); + + +/* + * Allocate and initialize the memory tracker object. + */ +static inline void +__cl_mem_track_start( void ) +{ + cl_status_t status; + + if( gp_mem_tracker ) + return; + + /* Allocate the memory tracker object. */ + gp_mem_tracker = (cl_mem_tracker_t*) + __cl_malloc_priv( sizeof(cl_mem_tracker_t), FALSE ); + + if( !gp_mem_tracker ) + return; + + /* Initialize the free list. */ + cl_qlist_init( &gp_mem_tracker->free_hdr_list ); + /* Initialize the allocation list. */ + cl_qmap_init( &gp_mem_tracker->alloc_map ); + + /* Initialize the spin lock to protect list operations. */ + status = cl_spinlock_init( &gp_mem_tracker->lock ); + if( status != CL_SUCCESS ) + { + __cl_free_priv( gp_mem_tracker ); + return; + } +} + + +/* + * Clean up memory tracking. + */ +static inline void +__cl_mem_track_stop( void ) +{ + cl_map_item_t *p_map_item; + cl_list_item_t *p_list_item; + + if( !gp_mem_tracker ) + return; + + if( cl_qmap_count( &gp_mem_tracker->alloc_map ) ) + { + /* There are still items in the list. Print them out. */ + cl_mem_display(); + } + + /* Free all allocated headers. */ + cl_spinlock_acquire( &gp_mem_tracker->lock ); + while( cl_qmap_count( &gp_mem_tracker->alloc_map ) ) + { + p_map_item = cl_qmap_head( &gp_mem_tracker->alloc_map ); + cl_qmap_remove_item( &gp_mem_tracker->alloc_map, p_map_item ); + __cl_free_priv( + PARENT_STRUCT( p_map_item, cl_malloc_hdr_t, map_item ) ); + } + + while( cl_qlist_count( &gp_mem_tracker->free_hdr_list ) ) + { + p_list_item = cl_qlist_remove_head( &gp_mem_tracker->free_hdr_list ); + __cl_free_priv( PARENT_STRUCT( + p_list_item, cl_malloc_hdr_t, map_item.pool_item.list_item ) ); + } + cl_spinlock_release( &gp_mem_tracker->lock ); + + /* Destory all objects in the memory tracker object. */ + cl_spinlock_destroy( &gp_mem_tracker->lock ); + + /* Free the memory allocated for the memory tracker object. */ + __cl_free_priv( gp_mem_tracker ); + gp_mem_tracker = NULL; +} + + +/* + * Enables memory allocation tracking. + */ +void +__cl_mem_track( + IN const boolean_t start ) +{ + if( start ) + __cl_mem_track_start(); + else + __cl_mem_track_stop(); +} + + +/* + * Display memory usage. + */ +void +cl_mem_display( void ) +{ + cl_map_item_t *p_map_item; + cl_malloc_hdr_t *p_hdr; + + if( !gp_mem_tracker ) + return; + + cl_spinlock_acquire( &gp_mem_tracker->lock ); + cl_msg_out( "\n\n\n*** Memory Usage ***\n" ); + p_map_item = cl_qmap_head( &gp_mem_tracker->alloc_map ); + while( p_map_item != cl_qmap_end( &gp_mem_tracker->alloc_map ) ) + { + /* + * Get the pointer to the header. Note that the object member of the + * list item will be used to store the pointer to the user's memory. + */ + p_hdr = PARENT_STRUCT( p_map_item, cl_malloc_hdr_t, map_item ); + + cl_msg_out( "\tMemory block at %p allocated in file %s line %d\n", + p_hdr->p_mem, p_hdr->file_name, p_hdr->line_num ); + + p_map_item = cl_qmap_next( p_map_item ); + } + cl_msg_out( "*** End of Memory Usage ***\n\n" ); + cl_spinlock_release( &gp_mem_tracker->lock ); +} + + +/* + * Allocates memory and stores information about the allocation in a list. + * The contents of the list can be printed out by calling the function + * "MemoryReportUsage". Memory allocation will succeed even if the list + * cannot be created. + */ +void* +__cl_malloc_trk( + IN const char* const p_file_name, + IN const int32_t line_num, + IN const size_t size, + IN const boolean_t pageable ) +{ + cl_malloc_hdr_t *p_hdr; + cl_list_item_t *p_list_item; + void *p_mem; + uint64_t temp_buf[FILE_NAME_LENGTH/sizeof(uint64_t)]; + int32_t temp_line; + + /* + * Allocate the memory first, so that we give the user's allocation + * priority over the the header allocation. + */ + p_mem = __cl_malloc_priv( size, pageable ); + + if( !p_mem ) + return( NULL ); + + if( !gp_mem_tracker ) + return( p_mem ); + + /* + * Make copies of the file name and line number in case those + * parameters are in paged pool. + */ + temp_line = line_num; + strncpy( (char*)temp_buf, p_file_name, FILE_NAME_LENGTH ); + /* Make sure the string is null terminated. */ + ((char*)temp_buf)[FILE_NAME_LENGTH - 1] = '\0'; + + cl_spinlock_acquire( &gp_mem_tracker->lock ); + + /* Get a header from the free header list. */ + p_list_item = cl_qlist_remove_head( &gp_mem_tracker->free_hdr_list ); + if( p_list_item != cl_qlist_end( &gp_mem_tracker->free_hdr_list ) ) + { + /* Set the header pointer to the header retrieved from the list. */ + p_hdr = PARENT_STRUCT( p_list_item, cl_malloc_hdr_t, + map_item.pool_item.list_item ); + } + else + { + /* We failed to get a free header. Allocate one. */ + p_hdr = __cl_malloc_priv( sizeof(cl_malloc_hdr_t), FALSE ); + if( !p_hdr ) + { + /* We failed to allocate the header. Return the user's memory. */ + cl_spinlock_release( &gp_mem_tracker->lock ); + return( p_mem ); + } + } + cl_memcpy( p_hdr->file_name, temp_buf, FILE_NAME_LENGTH ); + p_hdr->line_num = temp_line; + /* + * We store the pointer to the memory returned to the user. This allows + * searching the list of allocated memory even if the buffer allocated is + * not in the list without dereferencing memory we do not own. + */ + p_hdr->p_mem = p_mem; + + /* Insert the header structure into our allocation list. */ + cl_qmap_insert( &gp_mem_tracker->alloc_map, (uintn_t)p_mem, &p_hdr->map_item ); + cl_spinlock_release( &gp_mem_tracker->lock ); + + return( p_mem ); +} + + +/* + * Allocate non-tracked memory. + */ +void* +__cl_malloc_ntrk( + IN const size_t size, + IN const boolean_t pageable ) +{ + return( __cl_malloc_priv( size, pageable ) ); +} + + +void* +__cl_zalloc_trk( + IN const char* const p_file_name, + IN const int32_t line_num, + IN const size_t size, + IN const boolean_t pageable ) +{ + void *p_buffer; + + p_buffer = __cl_malloc_trk( p_file_name, line_num, size, pageable ); + if( p_buffer ) + cl_memclr( p_buffer, size ); + + return( p_buffer ); +} + + +void* +__cl_zalloc_ntrk( + IN const size_t size, + IN const boolean_t pageable ) +{ + void *p_buffer; + + p_buffer = __cl_malloc_priv( size, pageable ); + if( p_buffer ) + cl_memclr( p_buffer, size ); + + return( p_buffer ); +} + + +void +__cl_free_trk( + IN void* const p_memory ) +{ + cl_malloc_hdr_t *p_hdr; + cl_map_item_t *p_map_item; + + if( gp_mem_tracker ) + { + cl_spinlock_acquire( &gp_mem_tracker->lock ); + + /* + * Removes an item from the allocation tracking list given a pointer + * To the user's data and returns the pointer to header referencing the + * allocated memory block. + */ + p_map_item = cl_qmap_get( &gp_mem_tracker->alloc_map, (uintn_t)p_memory ); + if( p_map_item != cl_qmap_end( &gp_mem_tracker->alloc_map ) ) + { + /* Get the pointer to the header. */ + p_hdr = PARENT_STRUCT( p_map_item, cl_malloc_hdr_t, map_item ); + /* Remove the item from the list. */ + cl_qmap_remove_item( &gp_mem_tracker->alloc_map, p_map_item ); + + /* Return the header to the free header list. */ + cl_qlist_insert_head( &gp_mem_tracker->free_hdr_list, + &p_hdr->map_item.pool_item.list_item ); + } + cl_spinlock_release( &gp_mem_tracker->lock ); + } + __cl_free_priv( p_memory ); +} + + +void +__cl_free_ntrk( + IN void* const p_memory ) +{ + __cl_free_priv( p_memory ); +} diff --git a/branches/Ndi/core/complib/cl_memtrack.h b/branches/Ndi/core/complib/cl_memtrack.h new file mode 100644 index 00000000..43e8deab --- /dev/null +++ b/branches/Ndi/core/complib/cl_memtrack.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Definitions of Data-Structures for memory allocation tracking functions. + * + * Environment: + * All + */ + + +#ifndef _CL_MEMTRACK_H_ +#define _CL_MEMTRACK_H_ + + +#include +#include +#include +#include +#include + + +/* Structure to track memory allocations. */ +typedef struct _cl_mem_tracker +{ + /* List for tracking memory allocations. */ + cl_qmap_t alloc_map; + + /* Lock for synchronization. */ + cl_spinlock_t lock; + + /* List to manage free headers. */ + cl_qlist_t free_hdr_list; + +} cl_mem_tracker_t; + + +#define FILE_NAME_LENGTH 64 + + +/* Header for all memory allocations. */ +typedef struct _cl_malloc_hdr +{ + cl_map_item_t map_item; + void *p_mem; + char file_name[FILE_NAME_LENGTH]; + int32_t line_num; + +} cl_malloc_hdr_t; + + +extern cl_mem_tracker_t *gp_mem_tracker; + +#endif /* _CL_MEMTRACK_H_ */ + diff --git a/branches/Ndi/core/complib/cl_obj.c b/branches/Ndi/core/complib/cl_obj.c new file mode 100644 index 00000000..259a5f53 --- /dev/null +++ b/branches/Ndi/core/complib/cl_obj.c @@ -0,0 +1,781 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include + + +/* Number of relation objects to add to the global pool when growing. */ +#define CL_REL_POOL_SIZE ( 4096 / sizeof( cl_obj_rel_t ) ) + + + +/* The global object manager. */ +cl_obj_mgr_t *gp_obj_mgr = NULL; + + + +/******************************************************************** + * Global Object Manager + *******************************************************************/ + +cl_status_t +cl_obj_mgr_create() +{ + cl_status_t status; + + /* See if the object manager has already been created. */ + if( gp_obj_mgr ) + return CL_SUCCESS; + + /* Allocate the object manager. */ + gp_obj_mgr = cl_zalloc( sizeof( cl_obj_mgr_t ) ); + if( !gp_obj_mgr ) + return CL_INSUFFICIENT_MEMORY; + + /* Construct the object manager. */ + cl_qlist_init( &gp_obj_mgr->obj_list ); + cl_spinlock_construct( &gp_obj_mgr->lock ); + cl_async_proc_construct( &gp_obj_mgr->async_proc_mgr ); + cl_qpool_construct( &gp_obj_mgr->rel_pool ); + + /* Initialize the spinlock. */ + status = cl_spinlock_init( &gp_obj_mgr->lock ); + if( status != CL_SUCCESS ) + { + cl_obj_mgr_destroy(); + return status; + } + + /* Initialize the asynchronous processing manager. */ + status = cl_async_proc_init( &gp_obj_mgr->async_proc_mgr, 0, "obj_mgr" ); + if( status != CL_SUCCESS ) + { + cl_obj_mgr_destroy(); + return status; + } + + /* Initialize the relationship pool. */ + status = cl_qpool_init( &gp_obj_mgr->rel_pool, 0, 0, CL_REL_POOL_SIZE, + sizeof( cl_obj_rel_t ), NULL, NULL, gp_obj_mgr ); + if( status != CL_SUCCESS ) + { + cl_obj_mgr_destroy(); + return status; + } + + return CL_SUCCESS; +} + + + +void +cl_obj_mgr_destroy() +{ + cl_list_item_t *p_list_item; + cl_obj_t *p_obj; + + /* See if the object manager had been created. */ + if( !gp_obj_mgr ) + return; + + /* Verify that all object's have been destroyed. */ + for( p_list_item = cl_qlist_head( &gp_obj_mgr->obj_list ); + p_list_item != cl_qlist_end( &gp_obj_mgr->obj_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_obj = PARENT_STRUCT( p_list_item, cl_obj_t, pool_item ); +#if defined( _DEBUG_ ) + cl_dbg_out( "object not destroyed %p(%i), ref_cnt: %d\n", + p_obj, p_obj->type, p_obj->ref_cnt ); +#endif + } + + /* Destroy all object manager resources. */ + cl_spinlock_destroy( &gp_obj_mgr->lock ); + cl_async_proc_destroy( &gp_obj_mgr->async_proc_mgr ); + cl_qpool_destroy( &gp_obj_mgr->rel_pool ); + + /* Free the object manager and clear the global pointer. */ + cl_free( gp_obj_mgr ); + gp_obj_mgr = NULL; +} + + + +/* + * Get an item to track object relationships. + */ +cl_obj_rel_t* +cl_rel_alloc() +{ + cl_obj_rel_t *p_rel; + + CL_ASSERT( gp_obj_mgr ); + + cl_spinlock_acquire( &gp_obj_mgr->lock ); + p_rel = (cl_obj_rel_t*)cl_qpool_get( &gp_obj_mgr->rel_pool ); + cl_spinlock_release( &gp_obj_mgr->lock ); + + return p_rel; +} + + + +/* + * Return an item used to track relationships back to the pool. + */ +void +cl_rel_free( + IN cl_obj_rel_t * const p_rel ) +{ + CL_ASSERT( gp_obj_mgr && p_rel ); + + cl_spinlock_acquire( &gp_obj_mgr->lock ); + cl_qpool_put( &gp_obj_mgr->rel_pool, &p_rel->pool_item ); + cl_spinlock_release( &gp_obj_mgr->lock ); +} + + + +/* + * Insert an object into the global object manager's list. + */ +static void +__track_obj( + IN cl_obj_t *p_obj ) +{ + CL_ASSERT( gp_obj_mgr && p_obj ); + + cl_spinlock_acquire( &gp_obj_mgr->lock ); + cl_qlist_insert_tail( &gp_obj_mgr->obj_list, + (cl_list_item_t*)&p_obj->pool_item ); + cl_spinlock_release( &gp_obj_mgr->lock ); +} + + + +/* + * Remove an object from the global object manager's list. + */ +static void +__remove_obj( + IN cl_obj_t *p_obj ) +{ + CL_ASSERT( gp_obj_mgr && p_obj ); + + cl_spinlock_acquire( &gp_obj_mgr->lock ); + cl_qlist_remove_item( &gp_obj_mgr->obj_list, + (cl_list_item_t*)&p_obj->pool_item ); + cl_spinlock_release( &gp_obj_mgr->lock ); +} + + + +/******************************************************************** + * Generic Object Class + *******************************************************************/ + +/* Function prototypes. */ +static void +__destroy_obj( + IN cl_obj_t *p_obj ); + +static void +__destroy_cb( + IN cl_async_proc_item_t *p_item ); + +/* Sets the state of an object and returns the old state. */ +static cl_state_t +__obj_set_state( + IN cl_obj_t * const p_obj, + IN const cl_state_t new_state ); + + + + +void +cl_obj_construct( + IN cl_obj_t * const p_obj, + IN const uint32_t obj_type ) +{ + CL_ASSERT( p_obj ); + cl_memclr( p_obj, sizeof( cl_obj_t ) ); + + cl_spinlock_construct( &p_obj->lock ); + p_obj->state = CL_UNINITIALIZED; + p_obj->type = obj_type; + cl_event_construct( &p_obj->event ); + + cl_qlist_init( &p_obj->parent_list ); + cl_qlist_init( &p_obj->child_list ); + + /* Insert the object into the global tracking list. */ + __track_obj( p_obj ); +} + + + +cl_status_t +cl_obj_init( + IN cl_obj_t * const p_obj, + IN cl_destroy_type_t destroy_type, + IN const cl_pfn_obj_call_t pfn_destroying OPTIONAL, + IN const cl_pfn_obj_call_t pfn_cleanup OPTIONAL, + IN const cl_pfn_obj_call_t pfn_free ) +{ + cl_status_t status; + + CL_ASSERT( p_obj && pfn_free ); + CL_ASSERT( p_obj->state == CL_UNINITIALIZED ); + + /* The object references itself until it is destroyed. */ + p_obj->ref_cnt = 1; + + /* Record destruction callbacks. */ + p_obj->pfn_destroying = pfn_destroying; + p_obj->pfn_cleanup = pfn_cleanup; + p_obj->pfn_free = pfn_free; + + /* Set the destroy function pointer based on the destruction type. */ + p_obj->destroy_type = destroy_type; + p_obj->async_item.pfn_callback = __destroy_cb; + + /* Initialize the spinlock. */ + status = cl_spinlock_init( &p_obj->lock ); + if( status != CL_SUCCESS ) + return status; + + /* Initialize the synchronous cleanup event. */ + status = cl_event_init( &p_obj->event, FALSE ); + if( status != CL_SUCCESS ) + return status; + + p_obj->state = CL_INITIALIZED; + + return CL_SUCCESS; +} + + + +void +cl_obj_destroy( + IN cl_obj_t * p_obj ) +{ + cl_state_t old_state; + + CL_ASSERT( p_obj ); + + /* Mark that we're destroying the object. */ + old_state = __obj_set_state( p_obj, CL_DESTROYING ); + + /* + * Only a single thread can actually destroy the object. Multiple + * threads can initiate destruction as long as the callers can ensure + * their object reference is valid. + */ + if( old_state == CL_DESTROYING ) + return; + + /* Destroy the object. */ + __destroy_obj( p_obj ); +} + + + +void +cl_obj_reset( + IN cl_obj_t * const p_obj ) +{ + CL_ASSERT( p_obj ); + CL_ASSERT( p_obj->ref_cnt == 0 ); + CL_ASSERT( p_obj->state == CL_DESTROYING ); + + p_obj->ref_cnt = 1; + p_obj->state = CL_INITIALIZED; + + cl_qlist_remove_all( &p_obj->parent_list ); + cl_qlist_remove_all( &p_obj->child_list ); +} + + + +static cl_state_t +__obj_set_state( + IN cl_obj_t * const p_obj, + IN const cl_state_t new_state ) +{ + cl_state_t old_state; + + cl_spinlock_acquire( &p_obj->lock ); + old_state = p_obj->state; + p_obj->state = new_state; + cl_spinlock_release( &p_obj->lock ); + + return old_state; +} + + + +/* + * Add a dependent relationship between two objects. + */ +cl_status_t +cl_obj_insert_rel( + IN cl_obj_rel_t * const p_rel, + IN cl_obj_t * const p_parent_obj, + IN cl_obj_t * const p_child_obj ) +{ + cl_status_t status; + CL_ASSERT( p_rel && p_parent_obj && p_child_obj ); + + cl_spinlock_acquire( &p_parent_obj->lock ); + status = cl_obj_insert_rel_parent_locked( p_rel, p_parent_obj, p_child_obj ); + cl_spinlock_release( &p_parent_obj->lock ); + return status; +} + + + +/* + * Add a dependent relationship between two objects. + */ +cl_status_t +cl_obj_insert_rel_parent_locked( + IN cl_obj_rel_t * const p_rel, + IN cl_obj_t * const p_parent_obj, + IN cl_obj_t * const p_child_obj ) +{ + CL_ASSERT( p_rel && p_parent_obj && p_child_obj ); + + if(p_parent_obj->state != CL_INITIALIZED) + return CL_INVALID_STATE; + /* The child object needs to maintain a reference on the parent. */ + cl_obj_ref( p_parent_obj ); + cl_obj_ref( p_child_obj ); + + /* Save the relationship details. */ + p_rel->p_child_obj = p_child_obj; + p_rel->p_parent_obj = p_parent_obj; + + /* + * Track the object - hold both locks to ensure that the relationship is + * viewable in the child and parent lists at the same time. + */ + cl_spinlock_acquire( &p_child_obj->lock ); + + cl_qlist_insert_tail( &p_child_obj->parent_list, &p_rel->list_item ); + cl_qlist_insert_tail( &p_parent_obj->child_list, + (cl_list_item_t*)&p_rel->pool_item ); + + cl_spinlock_release( &p_child_obj->lock ); + return CL_SUCCESS; +} + + + +/* + * Remove an existing relationship. + */ +void +cl_obj_remove_rel( + IN cl_obj_rel_t * const p_rel ) +{ + cl_obj_t *p_child_obj; + cl_obj_t *p_parent_obj; + + CL_ASSERT( p_rel ); + CL_ASSERT( p_rel->p_child_obj && p_rel->p_parent_obj ); + + p_child_obj = p_rel->p_child_obj; + p_parent_obj = p_rel->p_parent_obj; + + /* + * Release the objects - hold both locks to ensure that the relationship is + * removed from the child and parent lists at the same time. + */ + cl_spinlock_acquire( &p_parent_obj->lock ); + cl_spinlock_acquire( &p_child_obj->lock ); + + cl_qlist_remove_item( &p_child_obj->parent_list, &p_rel->list_item ); + cl_qlist_remove_item( &p_parent_obj->child_list, + (cl_list_item_t*)&p_rel->pool_item ); + + cl_spinlock_release( &p_child_obj->lock ); + cl_spinlock_release( &p_parent_obj->lock ); + + /* Dereference the objects. */ + cl_obj_deref( p_parent_obj ); + cl_obj_deref( p_child_obj ); + + p_rel->p_child_obj = NULL; + p_rel->p_parent_obj = NULL; +} + + + +/* + * Increment a reference count on an object. + */ +int32_t +cl_obj_ref( + IN cl_obj_t * const p_obj ) +{ + CL_ASSERT( p_obj ); + + /* + * We need to allow referencing the object during destruction in order + * to properly synchronize destruction between parent and child objects. + */ + CL_ASSERT( p_obj->state == CL_INITIALIZED || + p_obj->state == CL_DESTROYING ); + + return cl_atomic_inc( &p_obj->ref_cnt ); +} + + + +/* + * Decrement the reference count on an AL object. Destroy the object if + * it is no longer referenced. This object should not be an object's parent. + */ +int32_t +cl_obj_deref( + IN cl_obj_t * const p_obj ) +{ + int32_t ref_cnt; + + CL_ASSERT( p_obj ); + CL_ASSERT( p_obj->state == CL_INITIALIZED || + p_obj->state == CL_DESTROYING ); + + ref_cnt = cl_atomic_dec( &p_obj->ref_cnt ); + + /* If the reference count went to 0, the object should be destroyed. */ + if( ref_cnt == 0 ) + { + if( p_obj->destroy_type == CL_DESTROY_ASYNC ) + { + /* Queue the object for asynchronous destruction. */ + CL_ASSERT( gp_obj_mgr ); + cl_async_proc_queue( &gp_obj_mgr->async_proc_mgr, + &p_obj->async_item ); + } + else + { + /* Signal an event for synchronous destruction. */ + cl_event_signal( &p_obj->event ); + } + } + + return ref_cnt; +} + + + +/* + * Called to cleanup all resources allocated by an object. + */ +void +cl_obj_deinit( + IN cl_obj_t * const p_obj ) +{ + CL_ASSERT( p_obj ); + CL_ASSERT( p_obj->state == CL_UNINITIALIZED || + p_obj->state == CL_DESTROYING ); +#if defined( _DEBUG_ ) + { + cl_list_item_t *p_list_item; + cl_obj_rel_t *p_rel; + + /* + * Check that we didn't leave any list items in the parent list + * that came from the global pool. Ignore list items allocated by + * the user to simplify their usage model. + */ + for( p_list_item = cl_qlist_head( &p_obj->parent_list ); + p_list_item != cl_qlist_end( &p_obj->parent_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_rel = (cl_obj_rel_t*)PARENT_STRUCT( p_list_item, + cl_obj_rel_t, list_item ); + CL_ASSERT( p_rel->pool_item.p_pool != + &gp_obj_mgr->rel_pool.qcpool ); + } + } +#endif + CL_ASSERT( cl_is_qlist_empty( &p_obj->child_list ) ); + + /* Remove the object from the global tracking list. */ + __remove_obj( p_obj ); + + cl_event_destroy( &p_obj->event ); + cl_spinlock_destroy( &p_obj->lock ); + + /* Mark the object as destroyed for debugging purposes. */ + p_obj->state = CL_DESTROYED; +} + + + +/* + * Remove the given object from its relationships with all its parents. + * This call requires synchronization to the given object. + */ +static void +__remove_parent_rel( + IN cl_obj_t * const p_obj ) +{ + cl_list_item_t *p_list_item; + cl_obj_rel_t *p_rel; + + /* Remove this child object from all its parents. */ + for( p_list_item = cl_qlist_tail( &p_obj->parent_list ); + p_list_item != cl_qlist_end( &p_obj->parent_list ); + p_list_item = cl_qlist_prev( p_list_item ) ) + { + p_rel = (cl_obj_rel_t*)PARENT_STRUCT( p_list_item, + cl_obj_rel_t, list_item ); + + /* + * Remove the child from the parent's list, but do not dereference + * the parent. This lets the user access the parent in the callback + * routines, but allows destruction to proceed. + */ + cl_spinlock_acquire( &p_rel->p_parent_obj->lock ); + cl_qlist_remove_item( &p_rel->p_parent_obj->child_list, + (cl_list_item_t*)&p_rel->pool_item ); + + /* + * Remove the relationship's reference to the child. Use an atomic + * decrement rather than cl_obj_deref, since we're already holding the + * child object's lock. + */ + cl_atomic_dec( &p_obj->ref_cnt ); + CL_ASSERT( p_obj->ref_cnt > 0 ); + + cl_spinlock_release( &p_rel->p_parent_obj->lock ); + + /* + * Mark that the child is no longer related to the parent. We still + * hold a reference on the parent object, so we don't clear the parent + * pointer until that reference is released. + */ + p_rel->p_child_obj = NULL; + } +} + + + +static void +__destroy_child_obj( + IN cl_obj_t * p_obj ) +{ + cl_list_item_t *p_list_item; + cl_obj_rel_t *p_rel; + cl_obj_t *p_child_obj; + cl_state_t old_state; + + /* Destroy all child objects. */ + cl_spinlock_acquire( &p_obj->lock ); + for( p_list_item = cl_qlist_tail( &p_obj->child_list ); + p_list_item != cl_qlist_end( &p_obj->child_list ); + p_list_item = cl_qlist_tail( &p_obj->child_list ) ) + { + p_rel = (cl_obj_rel_t*)PARENT_STRUCT( p_list_item, + cl_obj_rel_t, pool_item ); + + /* + * Take a reference on the child to protect against another parent + * of the object destroying it while we are trying to access it. + * If the child object is being destroyed, it will try to remove + * this relationship from this parent. + */ + p_child_obj = p_rel->p_child_obj; + cl_obj_ref( p_child_obj ); + + /* + * We cannot hold the parent lock when acquiring the child's lock, or + * a deadlock can occur if the child is in the process of destroying + * itself and its parent relationships. + */ + cl_spinlock_release( &p_obj->lock ); + + /* + * Mark that we wish to destroy the object. If the old state indicates + * that we should destroy the object, continue with the destruction. + * Note that there is a reference held on the child object from its + * creation. We no longer need the prior reference taken above. + */ + old_state = __obj_set_state( p_child_obj, CL_DESTROYING ); + cl_obj_deref( p_child_obj ); + + if( old_state != CL_DESTROYING ) + __destroy_obj( p_child_obj ); + + /* Continue processing the relationship list. */ + cl_spinlock_acquire( &p_obj->lock ); + } + cl_spinlock_release( &p_obj->lock ); +} + + + +/* + * Destroys an object. This call returns TRUE if the destruction process + * should proceed, or FALSE if destruction is already in progress. + */ +static void +__destroy_obj( + IN cl_obj_t *p_obj ) +{ + uint32_t ref_cnt; + cl_destroy_type_t destroy_type; + + CL_ASSERT( p_obj ); + CL_ASSERT( p_obj->state == CL_DESTROYING ); + + /* Remove this child object from all its parents. */ + __remove_parent_rel( p_obj ); + + /* Notify the user that the object is being destroyed. */ + if( p_obj->pfn_destroying ) + p_obj->pfn_destroying( p_obj ); + + /* Destroy all child objects. */ + __destroy_child_obj( p_obj ); + + /* + * Cache the destroy_type because the object could be freed by the time + * cl_obj_deref below returns. + */ + destroy_type = p_obj->destroy_type; + + /* Dereference this object as it is being destroyed. */ + ref_cnt = cl_obj_deref( p_obj ); + + if( destroy_type == CL_DESTROY_SYNC ) + { + if( ref_cnt ) + { + /* Wait for all other references to go away. */ +#if DBG + /* + * In debug builds, we assert every 10 seconds - a synchronous + * destruction should not take that long. + */ + while( cl_event_wait_on( &p_obj->event, 10000000, FALSE ) == + CL_TIMEOUT ) + { + CL_ASSERT( !p_obj->ref_cnt ); + } +#else /* DBG */ + cl_event_wait_on( &p_obj->event, EVENT_NO_TIMEOUT, FALSE ); +#endif /* DBG */ + } + __destroy_cb( &p_obj->async_item ); + } +} + + + +/* + * Dereference all parents the object was related to. + */ +static cl_obj_t* +__deref_parents( + IN cl_obj_t * const p_obj ) +{ + cl_list_item_t *p_list_item; + cl_obj_rel_t *p_rel; + cl_obj_t *p_parent_obj; + + /* Destruction of the object is already serialized - no need to lock. */ + + /* + * Dereference all parents. Keep the relationship items in the child's + * list, so that they can be returned to the user through the free callback. + */ + for( p_list_item = cl_qlist_head( &p_obj->parent_list ); + p_list_item != cl_qlist_end( &p_obj->parent_list ); + p_list_item = cl_qlist_next( p_list_item ) ) + { + p_rel = (cl_obj_rel_t*)PARENT_STRUCT( p_list_item, + cl_obj_rel_t, list_item ); + + p_parent_obj = p_rel->p_parent_obj; + p_rel->p_parent_obj = NULL; + CL_ASSERT( !p_rel->p_child_obj ); + if( cl_qlist_next( p_list_item ) == + cl_qlist_end( &p_obj->parent_list ) ) + { + /* Last parent - don't dereference it until after the "free" cb. */ + return p_parent_obj; + } + else + { + cl_obj_deref( p_parent_obj ); + } + } + return NULL; +} + + + +static void +__destroy_cb( + IN cl_async_proc_item_t *p_item ) +{ + cl_obj_t *p_obj, *p_last_parent; + + CL_ASSERT( p_item ); + + p_obj = PARENT_STRUCT( p_item, cl_obj_t, async_item ); + CL_ASSERT( !p_obj->ref_cnt ); + CL_ASSERT( p_obj->state == CL_DESTROYING ); + + /* Cleanup any hardware related resources. */ + if( p_obj->pfn_cleanup ) + p_obj->pfn_cleanup( p_obj ); + + /* We can now safely dereference all but the last parent. */ + p_last_parent = __deref_parents( p_obj ); + + /* Free the resources associated with the object. */ + CL_ASSERT( p_obj->pfn_free ); + p_obj->pfn_free( p_obj ); + + if( p_last_parent ) + cl_obj_deref( p_last_parent ); +} diff --git a/branches/Ndi/core/complib/cl_perf.c b/branches/Ndi/core/complib/cl_perf.c new file mode 100644 index 00000000..49675786 --- /dev/null +++ b/branches/Ndi/core/complib/cl_perf.c @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of performance tracking. + * + * Environment: + * All supported environments. + */ + + +/* + * Always turn on performance tracking when building this file to allow the + * performance counter functions to be built into the component library. + * Users control their use of the functions by defining the PERF_TRACK_ON + * keyword themselves before including cl_perf.h to enable the macros to + * resolve to the internal functions. + */ +#define PERF_TRACK_ON + +#include +#include +#include + + + +uint64_t +__cl_perf_run_calibration( + IN cl_perf_t* const p_perf ); + + +/* + * Initialize the state of the performance tracker. + */ +void +__cl_perf_construct( + IN cl_perf_t* const p_perf ) +{ + cl_memclr( p_perf, sizeof(cl_perf_t) ); + p_perf->state = CL_UNINITIALIZED; +} + + +/* + * Initialize the performance tracker. + */ +cl_status_t +__cl_perf_init( + IN cl_perf_t* const p_perf, + IN const uintn_t num_counters ) +{ + cl_status_t status; + cl_spinlock_t lock; + uintn_t i; + static uint64_t locked_calibration_time = 0; + static uint64_t normal_calibration_time; + + CL_ASSERT( p_perf ); + CL_ASSERT( !p_perf->size && num_counters ); + + /* Construct the performance tracker. */ + __cl_perf_construct( p_perf ); + + /* Allocate an array of counters. */ + p_perf->size = num_counters; + p_perf->data_array = (cl_perf_data_t*) + cl_zalloc( sizeof(cl_perf_data_t) * num_counters ); + + if( !p_perf->data_array ) + return( CL_INSUFFICIENT_MEMORY ); + + /* Initialize the user's counters. */ + for( i = 0; i < num_counters; i++ ) + { + p_perf->data_array[i].min_time = ((uint64_t)~0); + cl_spinlock_construct( &p_perf->data_array[i].lock ); + } + + for( i = 0; i < num_counters; i++ ) + { + status = cl_spinlock_init( &p_perf->data_array[i].lock ); + if( status != CL_SUCCESS ) + { + __cl_perf_destroy( p_perf, FALSE ); + return( status ); + } + } + + /* + * Run the calibration only if it has not been run yet. Subsequent + * calls will use the results from the first calibration. + */ + if( !locked_calibration_time ) + { + /* + * Perform the calibration under lock to prevent thread context + * switches. + */ + cl_spinlock_construct( &lock ); + status = cl_spinlock_init( &lock ); + if( status != CL_SUCCESS ) + { + __cl_perf_destroy( p_perf, FALSE ); + return( status ); + } + + /* Measure the impact when running at elevated thread priority. */ + cl_spinlock_acquire( &lock ); + locked_calibration_time = __cl_perf_run_calibration( p_perf ); + cl_spinlock_release( &lock ); + cl_spinlock_destroy( &lock ); + + /* Measure the impact when runnin at normal thread priority. */ + normal_calibration_time = __cl_perf_run_calibration( p_perf ); + } + + /* Reset the user's performance counter. */ + p_perf->normal_calibration_time = locked_calibration_time; + p_perf->locked_calibration_time = normal_calibration_time; + p_perf->data_array[0].count = 0; + p_perf->data_array[0].total_time = 0; + p_perf->data_array[0].min_time = ((uint64_t)~0); + + p_perf->state = CL_INITIALIZED; + + return( CL_SUCCESS ); +} + + +/* + * Measure the time to take performance counters. + */ +uint64_t +__cl_perf_run_calibration( + IN cl_perf_t* const p_perf ) +{ + uint64_t start_time; + uintn_t i; + PERF_DECLARE( 0 ); + + /* Start timing. */ + start_time = cl_get_time_stamp(); + + /* + * Get the performance counter repeatedly in a loop. Use the first + * user counter as our test counter. + */ + for( i = 0; i < PERF_CALIBRATION_TESTS; i++ ) + { + cl_perf_start( 0 ); + cl_perf_stop( p_perf, 0 ); + } + + /* Calculate the total time for the calibration. */ + return( cl_get_time_stamp() - start_time ); +} + + +/* + * Destroy the performance tracker. + */ +void +__cl_perf_destroy( + IN cl_perf_t* const p_perf, + IN const boolean_t display ) +{ + uintn_t i; + + CL_ASSERT( cl_is_state_valid( p_perf->state ) ); + + if( !p_perf->data_array ) + return; + + /* Display the performance data as requested. */ + if( display && p_perf->state == CL_INITIALIZED ) + __cl_perf_display( p_perf ); + + /* Destroy the user's counters. */ + for( i = 0; i < p_perf->size; i++ ) + cl_spinlock_destroy( &p_perf->data_array[i].lock ); + + cl_free( p_perf->data_array ); + p_perf->data_array = NULL; + + p_perf->state = CL_UNINITIALIZED; +} + + +/* + * Reset the performance counters. + */ +void +__cl_perf_reset( + IN cl_perf_t* const p_perf ) +{ + uintn_t i; + + for( i = 0; i < p_perf->size; i++ ) + { + cl_spinlock_acquire( &p_perf->data_array[i].lock ); + p_perf->data_array[i].min_time = ((uint64_t)~0); + p_perf->data_array[i].total_time = 0; + p_perf->data_array[i].count = 0; + cl_spinlock_release( &p_perf->data_array[i].lock ); + } +} + + +/* + * Display the captured performance data. + */ +void +__cl_perf_display( + IN const cl_perf_t* const p_perf ) +{ + uintn_t i; + + CL_ASSERT( p_perf ); + CL_ASSERT( p_perf->state == CL_INITIALIZED ); + + cl_msg_out( "\n\n\nCL Perf:\tPerformance Data\n" ); + + cl_msg_out( "CL Perf:\tCounter Calibration Time\n" ); + cl_msg_out( "CL Perf:\tLocked TotalTime\tNormal TotalTime\tTest Count\n" ); + cl_msg_out( "CL Perf:\t%"PRIu64"\t%"PRIu64"\t%u\n", + p_perf->locked_calibration_time, p_perf->normal_calibration_time, + PERF_CALIBRATION_TESTS ); + + cl_msg_out( "CL Perf:\tUser Performance Counters\n" ); + cl_msg_out( "CL Perf:\tIndex\tTotalTime\tMinTime\tCount\n" ); + for( i = 0; i < p_perf->size; i++ ) + { + cl_msg_out( "CL Perf:\t%lu\t%"PRIu64"\t%"PRIu64"\t%"PRIu64"\n", + i, p_perf->data_array[i].total_time, + p_perf->data_array[i].min_time, p_perf->data_array[i].count ); + } + cl_msg_out( "CL Perf:\tEnd of User Performance Counters\n" ); +} diff --git a/branches/Ndi/core/complib/cl_pool.c b/branches/Ndi/core/complib/cl_pool.c new file mode 100644 index 00000000..a4a88c9d --- /dev/null +++ b/branches/Ndi/core/complib/cl_pool.c @@ -0,0 +1,706 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of the grow pools. The grow pools manage a pool of objects. + * The pools can grow to meet demand, limited only by system memory. + * + * Environment: + * All + */ + + +#include +#include +#include +#include +#include +#include + + +/* + * IMPLEMENTATION OF QUICK COMPOSITE POOL + */ + +void +cl_qcpool_construct( + IN cl_qcpool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + + cl_memclr( p_pool, sizeof(cl_qcpool_t) ); + + p_pool->state = CL_UNINITIALIZED; +} + + +cl_status_t +cl_qcpool_init( + IN cl_qcpool_t* const p_pool, + IN const size_t min_size, + IN const size_t max_size, + IN const size_t grow_size, + IN const size_t* const component_sizes, + IN const uint32_t num_components, + IN cl_pfn_qcpool_init_t pfn_initializer OPTIONAL, + IN cl_pfn_qcpool_dtor_t pfn_destructor OPTIONAL, + IN const void* const context ) +{ + cl_status_t status; + uint32_t i; + + CL_ASSERT( p_pool ); + /* Must have a minimum of 1 component. */ + CL_ASSERT( num_components ); + /* A component size array is required. */ + CL_ASSERT( component_sizes ); + /* + * If no initializer is provided, the first component must be large + * enough to hold a pool item. + */ + CL_ASSERT( pfn_initializer || + (component_sizes[0] >= sizeof(cl_pool_item_t)) ); + + cl_qcpool_construct( p_pool ); + + if( num_components > 1 && !pfn_initializer ) + return( CL_INVALID_SETTING ); + + if( max_size && max_size < min_size ) + return( CL_INVALID_SETTING ); + + /* + * Allocate the array of component sizes and component pointers all + * in one allocation. + */ + p_pool->component_sizes = (size_t*)cl_zalloc( + (sizeof(size_t) + sizeof(void*)) * num_components ); + + if( !p_pool->component_sizes ) + return( CL_INSUFFICIENT_MEMORY ); + + /* Calculate the pointer to the array of pointers, used for callbacks. */ + p_pool->p_components = + (void**)(p_pool->component_sizes + num_components); + + /* Copy the user's sizes into our array for future use. */ + cl_memcpy( p_pool->component_sizes, component_sizes, + sizeof(uint32_t) * num_components ); + + /* Store the number of components per object. */ + p_pool->num_components = num_components; + + /* Round up and store the size of the components. */ + for( i = 0; i < num_components; i++ ) + { + /* + * We roundup each component size so that all components + * are aligned on a natural boundary. + */ + p_pool->component_sizes[i] = + ROUNDUP( p_pool->component_sizes[i], sizeof(uintn_t) ); + } + + p_pool->max_objects = max_size? max_size : ~(size_t)0; + p_pool->grow_size = grow_size; + + /* Store callback function pointers. */ + p_pool->pfn_init = pfn_initializer; /* may be NULL */ + p_pool->pfn_dtor = pfn_destructor; /* may be NULL */ + p_pool->context = context; + + cl_qlist_init( &p_pool->alloc_list ); + + cl_qlist_init( &p_pool->free_list ); + + /* + * We are now initialized. We change the initialized flag before + * growing since the grow function asserts that we are initialized. + */ + p_pool->state = CL_INITIALIZED; + + /* Allocate the minimum number of objects as requested. */ + if( !min_size ) + return( CL_SUCCESS ); + + status = cl_qcpool_grow( p_pool, min_size ); + /* Trap for error and cleanup if necessary. */ + if( status != CL_SUCCESS ) + cl_qcpool_destroy( p_pool ); + + return( status ); +} + + +void +cl_qcpool_destroy( + IN cl_qcpool_t* const p_pool ) +{ + /* CL_ASSERT that a non-NULL pointer was provided. */ + CL_ASSERT( p_pool ); + /* CL_ASSERT that we are in a valid state (not uninitialized memory). */ + CL_ASSERT( cl_is_state_valid( p_pool->state ) ); + + if( p_pool->state == CL_INITIALIZED ) + { + /* + * Assert if the user hasn't put everything back in the pool + * before destroying it + * if they haven't, then most likely they are still using memory + * that will be freed, and the destructor will not be called! + */ + CL_ASSERT( cl_qcpool_count( p_pool ) == p_pool->num_objects ); + + /* call the user's destructor for each object in the pool */ + if( p_pool->pfn_dtor ) + { + while( !cl_is_qlist_empty( &p_pool->free_list ) ) + { + p_pool->pfn_dtor( (cl_pool_item_t*) + cl_qlist_remove_head( &p_pool->free_list ), + (void*)p_pool->context ); + } + } + else + { + cl_qlist_remove_all( &p_pool->free_list ); + } + + /* Free all alocated memory blocks. */ + while( !cl_is_qlist_empty( &p_pool->alloc_list ) ) + cl_free( cl_qlist_remove_head( &p_pool->alloc_list ) ); + + if( p_pool->component_sizes ) + { + cl_free( p_pool->component_sizes ); + p_pool->component_sizes = NULL; + } + } + + p_pool->state = CL_UNINITIALIZED; +} + + +cl_status_t +cl_qcpool_grow( + IN cl_qcpool_t* const p_pool, + IN size_t obj_count ) +{ + cl_status_t status = CL_SUCCESS; + uint8_t *p_objects; + cl_pool_item_t *p_pool_item; + uint32_t i; + size_t obj_size; + + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->state == CL_INITIALIZED ); + CL_ASSERT( obj_count ); + + /* Validate that growth is possible. */ + if( p_pool->num_objects == p_pool->max_objects ) + return( CL_INSUFFICIENT_MEMORY ); + + /* Cap the growth to the desired maximum. */ + if( obj_count > (p_pool->max_objects - p_pool->num_objects) ) + obj_count = p_pool->max_objects - p_pool->num_objects; + + /* Calculate the size of an object. */ + obj_size = 0; + for( i = 0; i < p_pool->num_components; i++ ) + obj_size += p_pool->component_sizes[i]; + + /* Allocate the buffer for the new objects. */ + p_objects = (uint8_t*) + cl_zalloc( sizeof(cl_list_item_t) + (obj_size * obj_count) ); + + /* Make sure the allocation succeeded. */ + if( !p_objects ) + return( CL_INSUFFICIENT_MEMORY ); + + /* Insert the allocation in our list. */ + cl_qlist_insert_tail( &p_pool->alloc_list, (cl_list_item_t*)p_objects ); + p_objects += sizeof(cl_list_item_t); + + /* initialize the new elements and add them to the free list */ + while( obj_count-- ) + { + /* Setup the array of components for the current object. */ + p_pool->p_components[0] = p_objects; + for( i = 1; i < p_pool->num_components; i++ ) + { + /* Calculate the pointer to the next component. */ + p_pool->p_components[i] = (uint8_t*)p_pool->p_components[i-1] + + p_pool->component_sizes[i-1]; + } + + /* + * call the user's initializer + * this can fail! + */ + if( p_pool->pfn_init ) + { + p_pool_item = NULL; + status = p_pool->pfn_init( p_pool->p_components, + p_pool->num_components, (void*)p_pool->context, &p_pool_item ); + if( status != CL_SUCCESS ) + { + /* + * User initialization failed + * we may have only grown the pool by some partial amount + * Invoke the destructor for the object that failed + * initialization. + */ + if( p_pool->pfn_dtor ) + p_pool->pfn_dtor( p_pool_item, (void*)p_pool->context ); + + /* Return the user's status. */ + return( status ); + } + CL_ASSERT( p_pool_item ); + } + else + { + /* + * If no initializer is provided, assume that the pool item + * is stored at the beginning of the first component. + */ + p_pool_item = (cl_pool_item_t*)p_pool->p_components[0]; + } + +#ifdef _DEBUG_ + /* + * Set the pool item's pool pointer to this pool so that we can + * check that items get returned to the correct pool. + */ + p_pool_item->p_pool = p_pool; +#endif + + /* Insert the new item in the free list, traping for failure. */ + cl_qlist_insert_head( &p_pool->free_list, &p_pool_item->list_item ); + + p_pool->num_objects++; + + /* move the pointer to the next item */ + p_objects += obj_size; + } + + return( status ); +} + + +cl_pool_item_t* +cl_qcpool_get( + IN cl_qcpool_t* const p_pool ) +{ + cl_list_item_t *p_list_item; + + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->state == CL_INITIALIZED ); + + if( cl_is_qlist_empty( &p_pool->free_list ) ) + { + /* + * No object is available. + * Return NULL if the user does not want automatic growth. + */ + if( !p_pool->grow_size ) + return( NULL ); + + /* We ran out of elements. Get more */ + cl_qcpool_grow( p_pool, p_pool->grow_size ); + /* + * We may not have gotten everything we wanted but we might have + * gotten something. + */ + if( cl_is_qlist_empty( &p_pool->free_list ) ) + return( NULL ); + } + + p_list_item = cl_qlist_remove_head( &p_pool->free_list ); + /* OK, at this point we have an object */ + CL_ASSERT( p_list_item != cl_qlist_end( &p_pool->free_list ) ); + return( (cl_pool_item_t*)p_list_item ); +} + + +cl_pool_item_t* +cl_qcpool_get_tail( + IN cl_qcpool_t* const p_pool ) +{ + cl_list_item_t *p_list_item; + + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->state == CL_INITIALIZED ); + + if( cl_is_qlist_empty( &p_pool->free_list ) ) + { + /* + * No object is available. + * Return NULL if the user does not want automatic growth. + */ + if( !p_pool->grow_size ) + return( NULL ); + + /* We ran out of elements. Get more */ + cl_qcpool_grow( p_pool, p_pool->grow_size ); + /* + * We may not have gotten everything we wanted but we might have + * gotten something. + */ + if( cl_is_qlist_empty( &p_pool->free_list ) ) + return( NULL ); + } + + p_list_item = cl_qlist_remove_tail( &p_pool->free_list ); + /* OK, at this point we have an object */ + CL_ASSERT( p_list_item != cl_qlist_end( &p_pool->free_list ) ); + return( (cl_pool_item_t*)p_list_item ); +} + + +/* + * IMPLEMENTATION OF QUICK GROW POOL + */ + +/* + * Callback to translate quick composite to quick grow pool + * initializer callback. + */ +static cl_status_t +__cl_qpool_init_cb( + IN void** const p_comp_array, + IN const uint32_t num_components, + IN void* const context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + cl_qpool_t *p_pool = (cl_qpool_t*)context; + + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->pfn_init ); + CL_ASSERT( num_components == 1 ); + + UNUSED_PARAM( num_components ); + + return( p_pool->pfn_init( p_comp_array[0], (void*)p_pool->context, + pp_pool_item ) ); +} + + +/* + * Callback to translate quick composite to quick grow pool + * destructor callback. + */ +static void +__cl_qpool_dtor_cb( + IN const cl_pool_item_t* const p_pool_item, + IN void* const context ) +{ + cl_qpool_t *p_pool = (cl_qpool_t*)context; + + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->pfn_dtor ); + + p_pool->pfn_dtor( p_pool_item, (void*)p_pool->context ); +} + + +void +cl_qpool_construct( + IN cl_qpool_t* const p_pool ) +{ + cl_memclr( p_pool, sizeof(cl_qpool_t) ); + + cl_qcpool_construct( &p_pool->qcpool ); +} + + +cl_status_t +cl_qpool_init( + IN cl_qpool_t* const p_pool, + IN const size_t min_size, + IN const size_t max_size, + IN const size_t grow_size, + IN const size_t object_size, + IN cl_pfn_qpool_init_t pfn_initializer OPTIONAL, + IN cl_pfn_qpool_dtor_t pfn_destructor OPTIONAL, + IN const void* const context ) +{ + cl_status_t status; + CL_ASSERT( p_pool ); + + p_pool->pfn_init = pfn_initializer; /* may be NULL */ + p_pool->pfn_dtor = pfn_destructor; /* may be NULL */ + p_pool->context = context; + + status = cl_qcpool_init( &p_pool->qcpool, min_size, max_size, grow_size, + &object_size, 1, pfn_initializer ? __cl_qpool_init_cb : NULL, + pfn_destructor ? __cl_qpool_dtor_cb : NULL, p_pool ); + + return( status ); +} + + +/* + * IMPLEMENTATION OF COMPOSITE POOL + */ + + +/* + * Callback to translate quick composite to compsite pool + * initializer callback. + */ +static cl_status_t +__cl_cpool_init_cb( + IN void** const p_comp_array, + IN const uint32_t num_components, + IN void* const context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + cl_cpool_t *p_pool = (cl_cpool_t*)context; + cl_pool_obj_t *p_pool_obj; + cl_status_t status = CL_SUCCESS; + + CL_ASSERT( p_pool ); + + /* + * Set our pointer to the list item, which is stored at the beginning of + * the first component. + */ + p_pool_obj = (cl_pool_obj_t*)p_comp_array[0]; + /* Set the pool item pointer for the caller. */ + *pp_pool_item = (cl_pool_item_t*)p_pool_obj; + + /* Calculate the pointer to the user's first component. */ + p_comp_array[0] = ((uint8_t*)p_comp_array[0]) + sizeof(cl_pool_obj_t); + + /* + * Set the object pointer in the pool object to point to the first of the + * user's components. + */ + p_pool_obj->list_obj.p_object = p_comp_array[0]; + + /* Invoke the user's constructor callback. */ + if( p_pool->pfn_init ) + { + status = p_pool->pfn_init( p_comp_array, num_components, + (void*)p_pool->context ); + } + + return( status ); +} + + +/* + * Callback to translate quick composite to composite pool + * destructor callback. + */ +static void +__cl_cpool_dtor_cb( + IN const cl_pool_item_t* const p_pool_item, + IN void* const context ) +{ + cl_cpool_t *p_pool = (cl_cpool_t*)context; + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->pfn_dtor ); + CL_ASSERT( ((cl_pool_obj_t*)p_pool_item)->list_obj.p_object ); + + /* Invoke the user's destructor callback. */ + p_pool->pfn_dtor( (void*)((cl_pool_obj_t*)p_pool_item)->list_obj.p_object, + (void*)p_pool->context ); +} + + +void +cl_cpool_construct( + IN cl_cpool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + + cl_memclr( p_pool, sizeof(cl_cpool_t) ); + + cl_qcpool_construct( &p_pool->qcpool ); +} + + +cl_status_t +cl_cpool_init( + IN cl_cpool_t* const p_pool, + IN const size_t min_size, + IN const size_t max_size, + IN const size_t grow_size, + IN size_t* const component_sizes, + IN const uint32_t num_components, + IN cl_pfn_cpool_init_t pfn_initializer OPTIONAL, + IN cl_pfn_cpool_dtor_t pfn_destructor OPTIONAL, + IN const void* const context ) +{ + cl_status_t status; + + CL_ASSERT( p_pool ); + CL_ASSERT( num_components ); + CL_ASSERT( component_sizes ); + + /* Add the size of the pool object to the first component. */ + component_sizes[0] += sizeof(cl_pool_obj_t); + + /* Store callback function pointers. */ + p_pool->pfn_init = pfn_initializer; /* may be NULL */ + p_pool->pfn_dtor = pfn_destructor; /* may be NULL */ + p_pool->context = context; + + status = cl_qcpool_init( &p_pool->qcpool, min_size, max_size, grow_size, + component_sizes, num_components, __cl_cpool_init_cb, + pfn_destructor ? __cl_cpool_dtor_cb : NULL, + p_pool ); + + /* Restore the original value of the first component. */ + component_sizes[0] -= sizeof(cl_pool_obj_t); + + return( status ); +} + + +/* + * IMPLEMENTATION OF GROW POOL + */ + +/* + * Callback to translate quick composite to grow pool constructor callback. + */ +static cl_status_t +__cl_pool_init_cb( + IN void** const pp_obj, + IN const uint32_t count, + IN void* const context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + cl_pool_t *p_pool = (cl_pool_t*)context; + cl_pool_obj_t *p_pool_obj; + cl_status_t status = CL_SUCCESS; + + CL_ASSERT( p_pool ); + CL_ASSERT( pp_obj ); + CL_ASSERT( count == 1 ); + + UNUSED_PARAM( count ); + + /* + * Set our pointer to the list item, which is stored at the beginning of + * the first component. + */ + p_pool_obj = (cl_pool_obj_t*)*pp_obj; + *pp_pool_item = (cl_pool_item_t*)p_pool_obj; + + /* Calculate the pointer to the user's first component. */ + *pp_obj = ((uint8_t*)*pp_obj) + sizeof(cl_pool_obj_t); + + /* + * Set the object pointer in the pool item to point to the first of the + * user's components. + */ + p_pool_obj->list_obj.p_object = *pp_obj; + + /* Invoke the user's constructor callback. */ + if( p_pool->pfn_init ) + status = p_pool->pfn_init( *pp_obj, (void*)p_pool->context ); + + return( status ); +} + + +/* + * Callback to translate quick composite to grow pool destructor callback. + */ +static void +__cl_pool_dtor_cb( + IN const cl_pool_item_t* const p_pool_item, + IN void* const context ) +{ + cl_pool_t *p_pool = (cl_pool_t*)context; + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->pfn_dtor ); + CL_ASSERT( ((cl_pool_obj_t*)p_pool_item)->list_obj.p_object ); + + /* Invoke the user's destructor callback. */ + p_pool->pfn_dtor( (void*)((cl_pool_obj_t*)p_pool_item)->list_obj.p_object, + (void*)p_pool->context ); +} + + +void +cl_pool_construct( + IN cl_pool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + + cl_memclr( p_pool, sizeof(cl_pool_t) ); + + cl_qcpool_construct( &p_pool->qcpool ); +} + + +cl_status_t +cl_pool_init( + IN cl_pool_t* const p_pool, + IN const size_t min_size, + IN const size_t max_size, + IN const size_t grow_size, + IN const size_t object_size, + IN cl_pfn_pool_init_t pfn_initializer OPTIONAL, + IN cl_pfn_pool_dtor_t pfn_destructor OPTIONAL, + IN const void* const context ) +{ + cl_status_t status; + size_t total_size; + + CL_ASSERT( p_pool ); + + /* Add the size of the list item to the first component. */ + total_size = object_size + sizeof(cl_pool_obj_t); + + /* Store callback function pointers. */ + p_pool->pfn_init = pfn_initializer; /* may be NULL */ + p_pool->pfn_dtor = pfn_destructor; /* may be NULL */ + p_pool->context = context; + + /* + * We need an initializer in all cases for quick composite pool, since + * the user pointer must be manipulated to hide the prefixed cl_pool_obj_t. + */ + status = cl_qcpool_init( &p_pool->qcpool, min_size, max_size, grow_size, + &total_size, 1, __cl_pool_init_cb, + pfn_destructor ? __cl_pool_dtor_cb : NULL, p_pool ); + + return( status ); +} diff --git a/branches/Ndi/core/complib/cl_ptr_vector.c b/branches/Ndi/core/complib/cl_ptr_vector.c new file mode 100644 index 00000000..dbb3d926 --- /dev/null +++ b/branches/Ndi/core/complib/cl_ptr_vector.c @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * This file contains ivector and isvector implementations. + * + * Environment: + * All + */ + + +#include +#include + + +void +cl_ptr_vector_construct( + IN cl_ptr_vector_t* const p_vector ) +{ + CL_ASSERT( p_vector ); + + cl_memclr( p_vector, sizeof(cl_ptr_vector_t) ); + + p_vector->state = CL_UNINITIALIZED; +} + + +cl_status_t +cl_ptr_vector_init( + IN cl_ptr_vector_t* const p_vector, + IN const size_t min_cap, + IN const size_t grow_size ) +{ + cl_status_t status = CL_SUCCESS; + + CL_ASSERT( p_vector ); + + cl_ptr_vector_construct( p_vector ); + + p_vector->grow_size = grow_size; + + /* + * Set the state to initialized so that the call to set_size + * doesn't assert. + */ + p_vector->state = CL_INITIALIZED; + + /* get the storage needed by the user */ + if( min_cap ) + { + status = cl_ptr_vector_set_capacity( p_vector, min_cap ); + if( status != CL_SUCCESS ) + cl_ptr_vector_destroy( p_vector ); + } + + return( status ); +} + + +void +cl_ptr_vector_destroy( + IN cl_ptr_vector_t* const p_vector ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( cl_is_state_valid( p_vector->state ) ); + + /* Call the user's destructor for each element in the array. */ + if( p_vector->state == CL_INITIALIZED ) + { + /* Destroy the page vector. */ + if( p_vector->p_ptr_array ) + { + cl_free( (void*)p_vector->p_ptr_array ); + p_vector->p_ptr_array = NULL; + } + } + + p_vector->state = CL_UNINITIALIZED; +} + + +cl_status_t +cl_ptr_vector_at( + IN const cl_ptr_vector_t* const p_vector, + IN const size_t index, + OUT void** const p_element ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + /* Range check */ + if( index >= p_vector->size ) + return( CL_INVALID_PARAMETER ); + + *p_element = cl_ptr_vector_get( p_vector, index ); + return( CL_SUCCESS ); +} + + +cl_status_t +cl_ptr_vector_set( + IN cl_ptr_vector_t* const p_vector, + IN const size_t index, + IN const void* const element ) +{ + cl_status_t status; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + /* Determine if the vector has room for this element. */ + if( index >= p_vector->size ) + { + /* Resize to accomodate the given index. */ + status = cl_ptr_vector_set_size( p_vector, index + 1 ); + + /* Check for failure on or before the given index. */ + if( (status != CL_SUCCESS) && (p_vector->size < index) ) + return( status ); + } + + /* At this point, the array is guaranteed to be big enough */ + p_vector->p_ptr_array[index] = element; + + return( CL_SUCCESS ); +} + + +void* +cl_ptr_vector_remove( + IN cl_ptr_vector_t* const p_vector, + IN const size_t index ) +{ + size_t src; + const void *element; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( p_vector->size > index ); + + /* Store a copy of the element to return. */ + element = p_vector->p_ptr_array[index]; + /* Shift all items above the removed item down. */ + if( index < --p_vector->size ) + { + for( src = index; src < p_vector->size; src++ ) + p_vector->p_ptr_array[src] = p_vector->p_ptr_array[src + 1]; + } + /* Clear the entry for the element just outside of the new upper bound. */ + p_vector->p_ptr_array[p_vector->size] = NULL; + + return( (void*)element ); +} + + +cl_status_t +cl_ptr_vector_set_capacity( + IN cl_ptr_vector_t* const p_vector, + IN const size_t new_capacity ) +{ + void *p_new_ptr_array; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + /* Do we have to do anything here? */ + if( new_capacity <= p_vector->capacity ) + { + /* Nope */ + return( CL_SUCCESS ); + } + + /* Allocate our pointer array. */ + p_new_ptr_array = cl_zalloc( new_capacity * sizeof(void*) ); + if( !p_new_ptr_array ) + return( CL_INSUFFICIENT_MEMORY ); + + if( p_vector->p_ptr_array ) + { + /* Copy the old pointer array into the new. */ + cl_memcpy( p_new_ptr_array, p_vector->p_ptr_array, + p_vector->capacity * sizeof(void*) ); + + /* Free the old pointer array. */ + cl_free( (void*)p_vector->p_ptr_array ); + } + + /* Set the new array. */ + p_vector->p_ptr_array = p_new_ptr_array; + + /* Update the vector with the new capactity. */ + p_vector->capacity = new_capacity; + + return( CL_SUCCESS ); +} + + +cl_status_t +cl_ptr_vector_set_size( + IN cl_ptr_vector_t* const p_vector, + IN const size_t size ) +{ + cl_status_t status; + size_t new_capacity; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + /* Check to see if the requested size is the same as the existing size. */ + if( size == p_vector->size ) + return( CL_SUCCESS ); + + /* Determine if the vector has room for this element. */ + if( size > p_vector->capacity ) + { + if( !p_vector->grow_size ) + return( CL_INSUFFICIENT_MEMORY ); + + /* Calculate the new capacity, taking into account the grow size. */ + new_capacity = size; + if( size % p_vector->grow_size ) + { + /* Round up to nearest grow_size boundary. */ + new_capacity += p_vector->grow_size - + (size % p_vector->grow_size); + } + + status = cl_ptr_vector_set_capacity( p_vector, new_capacity ); + if( status != CL_SUCCESS ) + return( status ); + } + + p_vector->size = size; + return( CL_SUCCESS ); +} + + +cl_status_t +cl_ptr_vector_set_min_size( + IN cl_ptr_vector_t* const p_vector, + IN const size_t min_size ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + if( min_size > p_vector->size ) + { + /* We have to resize the array */ + return( cl_ptr_vector_set_size( p_vector, min_size ) ); + } + + /* We didn't have to do anything */ + return( CL_SUCCESS ); +} + + +void +cl_ptr_vector_apply_func( + IN const cl_ptr_vector_t* const p_vector, + IN cl_pfn_ptr_vec_apply_t pfn_callback, + IN const void* const context ) +{ + size_t i; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( pfn_callback ); + + for( i = 0; i < p_vector->size; i++ ) + pfn_callback( i, (void*)p_vector->p_ptr_array[i], (void*)context ); +} + + +size_t +cl_ptr_vector_find_from_start( + IN const cl_ptr_vector_t* const p_vector, + IN cl_pfn_ptr_vec_find_t pfn_callback, + IN const void* const context ) +{ + size_t i; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( pfn_callback ); + + for( i = 0; i < p_vector->size; i++ ) + { + /* Invoke the callback */ + if( pfn_callback( i, (void*)p_vector->p_ptr_array[i], + (void*)context ) == CL_SUCCESS ) + { + break; + } + } + return( i ); +} + + +size_t +cl_ptr_vector_find_from_end( + IN const cl_ptr_vector_t* const p_vector, + IN cl_pfn_ptr_vec_find_t pfn_callback, + IN const void* const context ) +{ + size_t i; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( pfn_callback ); + + i = p_vector->size; + + while( i ) + { + /* Invoke the callback for the current element. */ + if( pfn_callback( i, (void*)p_vector->p_ptr_array[--i], + (void*)context ) == CL_SUCCESS ) + { + return( i ); + } + } + + return( p_vector->size ); +} + + diff --git a/branches/Ndi/core/complib/cl_reqmgr.c b/branches/Ndi/core/complib/cl_reqmgr.c new file mode 100644 index 00000000..9ea1f08d --- /dev/null +++ b/branches/Ndi/core/complib/cl_reqmgr.c @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of asynchronous request manager. + * + * Environment: + * All + */ + + +#include +#include + + +/* minimum number of objects to allocate */ +#define REQ_MGR_START_SIZE 10 +/* minimum number of objects to grow */ +#define REQ_MGR_GROW_SIZE 10 + + +/****i* Component Library: Request Manager/cl_request_object_t +* NAME +* cl_request_object_t +* +* DESCRIPTION +* Request manager structure. +* +* The cl_request_object_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_request_object +{ + cl_pool_item_t pool_item; + size_t count; + boolean_t partial_ok; + cl_pfn_req_cb_t pfn_callback; + const void *context1; + const void *context2; + +} cl_request_object_t; +/* +* FIELDS +* pool_item +* Pool item to store request in a pool or list. +* +* count +* Number of items requested. +* +* partial_ok +* Is it okay to return some of the items. +* +* pfn_callback +* Notification routine when completed. +* +* context1 +* Callback context information. +* +* context2 +* Callback context information. +* +* SEE ALSO +* Overview +*********/ + + +void +cl_req_mgr_construct( + IN cl_req_mgr_t* const p_req_mgr ) +{ + CL_ASSERT( p_req_mgr ); + + /* Clear the structure. */ + cl_memclr( p_req_mgr, sizeof(cl_req_mgr_t) ); + + /* Initialize the state of the free request stack. */ + cl_qpool_construct( &p_req_mgr->request_pool ); +} + + +cl_status_t +cl_req_mgr_init( + IN cl_req_mgr_t* const p_req_mgr, + IN cl_pfn_reqmgr_get_count_t pfn_get_count, + IN const void* const get_context ) +{ + cl_status_t status; + + CL_ASSERT( p_req_mgr ); + CL_ASSERT( pfn_get_count ); + + cl_qlist_init( &p_req_mgr->request_queue ); + + status = cl_qpool_init( &p_req_mgr->request_pool, REQ_MGR_START_SIZE, 0, + REQ_MGR_GROW_SIZE, sizeof(cl_request_object_t), NULL, NULL, NULL ); + + if( status != CL_SUCCESS ) + return( status ); + + /* Store callback information for the count function. */ + p_req_mgr->pfn_get_count = pfn_get_count; + p_req_mgr->get_context = get_context; + + return( CL_SUCCESS ); +} + + +void +cl_req_mgr_destroy( + IN cl_req_mgr_t* const p_req_mgr ) +{ + CL_ASSERT( p_req_mgr ); + + /* Return all requests to the grow pool. */ + if( cl_is_qpool_inited( &p_req_mgr->request_pool ) ) + { + cl_qpool_put_list( &p_req_mgr->request_pool, + &p_req_mgr->request_queue ); + } + + cl_qpool_destroy( &p_req_mgr->request_pool ); +} + + +cl_status_t +cl_req_mgr_get( + IN cl_req_mgr_t* const p_req_mgr, + IN OUT size_t* const p_count, + IN const cl_req_type_t req_type, + IN cl_pfn_req_cb_t pfn_callback, + IN const void* const context1, + IN const void* const context2 ) +{ + size_t available_count; + size_t count; + cl_request_object_t *p_request; + + CL_ASSERT( p_req_mgr ); + CL_ASSERT( cl_is_qpool_inited( &p_req_mgr->request_pool ) ); + CL_ASSERT( p_count ); + CL_ASSERT( *p_count ); + + /* Get the number of available objects in the grow pool. */ + available_count = + p_req_mgr->pfn_get_count( (void*)p_req_mgr->get_context ); + + /* + * Check to see if there is nothing on the queue, and there are + * enough items to satisfy the whole request. + */ + if( cl_is_qlist_empty( &p_req_mgr->request_queue ) && + *p_count <= available_count ) + { + return( CL_SUCCESS ); + } + + if( req_type == REQ_GET_SYNC ) + return( CL_INSUFFICIENT_RESOURCES ); + + /* We need a request object to place on the request queue. */ + p_request = (cl_request_object_t*) + cl_qpool_get( &p_req_mgr->request_pool ); + + if( !p_request ) + return( CL_INSUFFICIENT_MEMORY ); + + /* + * We can return the available number of objects but we still need + * to queue a request for the remainder. + */ + if( req_type == REQ_GET_PARTIAL_OK && + cl_is_qlist_empty( &p_req_mgr->request_queue ) ) + { + count = *p_count - available_count; + *p_count = available_count; + p_request->partial_ok = TRUE; + } + else + { + /* + * We cannot return any objects. We queue a request for + * all of them. + */ + count = *p_count; + *p_count = 0; + p_request->partial_ok = FALSE; + } + + /* Set the request fields and enqueue it. */ + p_request->pfn_callback = pfn_callback; + p_request->context1 = context1; + p_request->context2 = context2; + p_request->count = count; + + cl_qlist_insert_tail( &p_req_mgr->request_queue, + &p_request->pool_item.list_item ); + + return( CL_PENDING ); +} + + +cl_status_t +cl_req_mgr_resume( + IN cl_req_mgr_t* const p_req_mgr, + OUT size_t* const p_count, + OUT cl_pfn_req_cb_t* const ppfn_callback, + OUT const void** const p_context1, + OUT const void** const p_context2 ) +{ + size_t available_count; + cl_request_object_t *p_queued_request; + + CL_ASSERT( p_req_mgr ); + CL_ASSERT( cl_is_qpool_inited( &p_req_mgr->request_pool ) ); + + /* If no requests are pending, there's nothing to return. */ + if( cl_is_qlist_empty( &p_req_mgr->request_queue ) ) + return( CL_NOT_DONE ); + + /* + * Get the item at the head of the request queue, + * but do not remove it yet. + */ + p_queued_request = (cl_request_object_t*) + cl_qlist_head( &p_req_mgr->request_queue ); + + *ppfn_callback = p_queued_request->pfn_callback; + *p_context1 = p_queued_request->context1; + *p_context2 = p_queued_request->context2; + + available_count = + p_req_mgr->pfn_get_count( (void*)p_req_mgr->get_context ); + + /* See if the request can be fulfilled. */ + if( p_queued_request->count > available_count ) + { + if( !p_queued_request->partial_ok ) + return( CL_INSUFFICIENT_RESOURCES ); + + p_queued_request->count -= available_count; + + *p_count = available_count; + return( CL_PENDING ); + } + + *p_count = p_queued_request->count; + + /* The entire request can be met. Remove it from the request queue. */ + cl_qlist_remove_head( &p_req_mgr->request_queue ); + + /* Return the internal request object to the free stack. */ + cl_qpool_put( &p_req_mgr->request_pool, + &p_queued_request->pool_item ); + return( CL_SUCCESS ); +} diff --git a/branches/Ndi/core/complib/cl_statustext.c b/branches/Ndi/core/complib/cl_statustext.c new file mode 100644 index 00000000..76441704 --- /dev/null +++ b/branches/Ndi/core/complib/cl_statustext.c @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Defines string to decode cl_status_t return values. + * + * Environment: + * All + */ + + +#include + + +/* Status values above converted to text for easier printing. */ +const char* cl_status_text[] = +{ + "CL_SUCCESS", + "CL_ERROR", + "CL_INVALID_STATE", + "CL_INVALID_OPERATION", + "CL_INVALID_SETTING", + "CL_INVALID_PARAMETER", + "CL_INSUFFICIENT_RESOURCES", + "CL_INSUFFICIENT_MEMORY", + "CL_INVALID_PERMISSION", + "CL_COMPLETED", + "CL_NOT_DONE", + "CL_PENDING", + "CL_TIMEOUT", + "CL_CANCELED", + "CL_REJECT", + "CL_OVERRUN", + "CL_NOT_FOUND", + "CL_UNAVAILABLE", + "CL_BUSY", + "CL_DISCONNECT", + "CL_DUPLICATE", + "CL_INVALID_REQUEST" +}; diff --git a/branches/Ndi/core/complib/cl_threadpool.c b/branches/Ndi/core/complib/cl_threadpool.c new file mode 100644 index 00000000..cba8f2c7 --- /dev/null +++ b/branches/Ndi/core/complib/cl_threadpool.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of thread pool. + * + * Environment: + * All + */ + + +#include +#include +#include + +void +__cl_thread_pool_routine( + IN void* const context ) +{ + cl_status_t status = CL_SUCCESS; + cl_thread_pool_t *p_thread_pool = (cl_thread_pool_t*)context; + + /* Continue looping until signalled to end. */ + while( !p_thread_pool->exit ) + { + /* Wait for the specified event to occur. */ + status = cl_event_wait_on( &p_thread_pool->wakeup_event, + EVENT_NO_TIMEOUT, TRUE ); + + /* See if we've been signalled to end execution. */ + if( (p_thread_pool->exit) || (status == CL_NOT_DONE) ) + break; + + /* The event has been signalled. Invoke the callback. */ + (*p_thread_pool->pfn_callback)( (void*)p_thread_pool->context ); + } + + /* + * Decrement the running count to notify the destroying thread + * that the event was received and processed. + */ + cl_atomic_dec( &p_thread_pool->running_count ); + cl_event_signal( &p_thread_pool->destroy_event ); +} + + +void +cl_thread_pool_construct( + IN cl_thread_pool_t* const p_thread_pool ) +{ + CL_ASSERT( p_thread_pool); + + cl_memclr( p_thread_pool, sizeof(cl_thread_pool_t) ); + cl_event_construct( &p_thread_pool->wakeup_event ); + cl_event_construct( &p_thread_pool->destroy_event ); + cl_list_construct( &p_thread_pool->thread_list ); + p_thread_pool->state = CL_UNINITIALIZED; +} + + +cl_status_t +cl_thread_pool_init( + IN cl_thread_pool_t* const p_thread_pool, + IN uint32_t count, + IN cl_pfn_thread_callback_t pfn_callback, + IN const void* const context, + IN const char* const name ) +{ + cl_status_t status; + cl_thread_t *p_thread; + uint32_t i; + + CL_ASSERT( p_thread_pool ); + CL_ASSERT( pfn_callback ); + + cl_thread_pool_construct( p_thread_pool ); + + if( !count ) + count = cl_proc_count(); + + status = cl_list_init( &p_thread_pool->thread_list, count ); + if( status != CL_SUCCESS ) + { + cl_thread_pool_destroy( p_thread_pool ); + return( status ); + } + + /* Initialize the event that the threads wait on. */ + status = cl_event_init( &p_thread_pool->wakeup_event, FALSE ); + if( status != CL_SUCCESS ) + { + cl_thread_pool_destroy( p_thread_pool ); + return( status ); + } + + /* Initialize the event used to destroy the threadpool. */ + status = cl_event_init( &p_thread_pool->destroy_event, FALSE ); + if( status != CL_SUCCESS ) + { + cl_thread_pool_destroy( p_thread_pool ); + return( status ); + } + + p_thread_pool->pfn_callback = pfn_callback; + p_thread_pool->context = context; + + for( i = 0; i < count; i++ ) + { + /* Create a new thread. */ + p_thread = (cl_thread_t*)cl_malloc( sizeof(cl_thread_t) ); + if( !p_thread ) + { + cl_thread_pool_destroy( p_thread_pool ); + return( CL_INSUFFICIENT_MEMORY ); + } + + cl_thread_construct( p_thread ); + + /* + * Add it to the list. This is guaranteed to work since we + * initialized the list to hold at least the number of threads we want + * to store there. + */ + status = cl_list_insert_head( &p_thread_pool->thread_list, p_thread ); + CL_ASSERT( status == CL_SUCCESS ); + + /* Start the thread. */ + status = cl_thread_init( p_thread, __cl_thread_pool_routine, + p_thread_pool, name ); + if( status != CL_SUCCESS ) + { + cl_thread_pool_destroy( p_thread_pool ); + return( status ); + } + + /* + * Increment the running count to insure that a destroying thread + * will signal all the threads. + */ + cl_atomic_inc( &p_thread_pool->running_count ); + } + p_thread_pool->state = CL_INITIALIZED; + return( CL_SUCCESS ); +} + + +void +cl_thread_pool_destroy( + IN cl_thread_pool_t* const p_thread_pool ) +{ + cl_thread_t *p_thread; + + CL_ASSERT( p_thread_pool ); + CL_ASSERT( cl_is_state_valid( p_thread_pool->state ) ); + + /* Indicate to all threads that they need to exit. */ + p_thread_pool->exit = TRUE; + + /* + * Signal the threads until they have all exited. Signalling + * once for each thread is not guaranteed to work since two events + * could release only a single thread, depending on the rate at which + * the events are set and how the thread scheduler processes notifications. + */ + while( p_thread_pool->running_count ) + { + cl_event_signal( &p_thread_pool->wakeup_event ); + /* + * Wait for the destroy event to occur, indicating that the thread + * has exited. + */ + cl_event_wait_on( &p_thread_pool->destroy_event, + 2000000, TRUE ); + } + + /* + * Stop each thread one at a time. Note that this cannot be done in the + * above for loop because signal will wake up an unknown thread. + */ + if( cl_is_list_inited( &p_thread_pool->thread_list ) ) + { + while( !cl_is_list_empty( &p_thread_pool->thread_list ) ) + { + p_thread = + (cl_thread_t*)cl_list_remove_head( &p_thread_pool->thread_list ); + cl_thread_destroy( p_thread ); + cl_free( p_thread ); + } + } + + cl_event_destroy( &p_thread_pool->destroy_event ); + cl_event_destroy( &p_thread_pool->wakeup_event ); + cl_list_destroy( &p_thread_pool->thread_list ); + p_thread_pool->state = CL_UNINITIALIZED; +} + + +cl_status_t +cl_thread_pool_signal( + IN cl_thread_pool_t* const p_thread_pool ) +{ + CL_ASSERT( p_thread_pool ); + CL_ASSERT( p_thread_pool->state == CL_INITIALIZED ); + + return( cl_event_signal( &p_thread_pool->wakeup_event ) ); +} diff --git a/branches/Ndi/core/complib/cl_vector.c b/branches/Ndi/core/complib/cl_vector.c new file mode 100644 index 00000000..843d9f4e --- /dev/null +++ b/branches/Ndi/core/complib/cl_vector.c @@ -0,0 +1,617 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * This file contains ivector and isvector implementations. + * + * Environment: + * All + */ + + +#include +#include + + +/* + * Define the maximum size for array pages in an cl_vector_t. + * This size is in objects, not bytes. + */ +#define SVEC_MAX_PAGE_SIZE 0x1000 + + + +/* + * cl_vector_copy_general + * + * Description: + * copy operator used when size of the user object doesn't fit one of the + * other optimized copy functions. + * + * Inputs: + * p_src - source for copy + * + * Outputs: + * p_dest - destination for copy + * + * Returns: + * None + * + */ +static void +cl_vector_copy_general( + OUT void* const p_dest, + IN const void* const p_src, + IN const size_t size ) +{ + cl_memcpy( p_dest, p_src, size ); +} + + +/* + * cl_vector_copy8 + * + * Description: + * copy operator used when the user structure is only 8 bits long. + * + * Inputs: + * p_src - source for copy + * + * Outputs: + * p_dest - destination for copy + * + * Returns: + * None + * + */ +static void +cl_vector_copy8( + OUT void* const p_dest, + IN const void* const p_src, + IN const size_t size ) +{ + CL_ASSERT( size == sizeof(uint8_t) ); + UNUSED_PARAM( size ); + + *(uint8_t*)p_dest = *(uint8_t*)p_src; +} + + +/* + * cl_vector_copy16 + * + * Description: + * copy operator used when the user structure is only 16 bits long. + * + * Inputs: + * p_src - source for copy + * + * Outputs: + * p_dest - destination for copy + * + * Returns: + * None + * + */ +void +cl_vector_copy16( + OUT void* const p_dest, + IN const void* const p_src, + IN const size_t size ) +{ + CL_ASSERT( size == sizeof(uint16_t) ); + UNUSED_PARAM( size ); + + *(uint16_t*)p_dest = *(uint16_t*)p_src; +} + + +/* + * cl_vector_copy32 + * + * Description: + * copy operator used when the user structure is only 32 bits long. + * + * Inputs: + * p_src - source for copy + * + * Outputs: + * p_dest - destination for copy + * + * Returns: + * None + * + */ +void +cl_vector_copy32( + OUT void* const p_dest, + IN const void* const p_src, + IN const size_t size ) +{ + CL_ASSERT( size == sizeof(uint32_t) ); + UNUSED_PARAM( size ); + + *(uint32_t*)p_dest = *(uint32_t*)p_src; +} + + +/* + * cl_vector_copy64 + * + * Description: + * copy operator used when the user structure is only 64 bits long. + * + * Inputs: + * p_src - source for copy + * + * Outputs: + * p_dest - destination for copy + * + * Returns: + * None + * + */ +void +cl_vector_copy64( + OUT void* const p_dest, + IN const void* const p_src, + IN const size_t size ) +{ + CL_ASSERT( size == sizeof(uint64_t) ); + UNUSED_PARAM( size ); + + *(uint64_t*)p_dest = *(uint64_t*)p_src; +} + + +void +cl_vector_construct( + IN cl_vector_t* const p_vector ) +{ + CL_ASSERT( p_vector ); + + cl_memclr( p_vector, sizeof(cl_vector_t) ); + + p_vector->state = CL_UNINITIALIZED; +} + + +cl_status_t +cl_vector_init( + IN cl_vector_t* const p_vector, + IN const size_t min_size, + IN const size_t grow_size, + IN const size_t element_size, + IN cl_pfn_vec_init_t pfn_init OPTIONAL, + IN cl_pfn_vec_dtor_t pfn_dtor OPTIONAL, + IN const void* const context ) +{ + cl_status_t status = CL_SUCCESS; + + CL_ASSERT( p_vector ); + CL_ASSERT( element_size ); + + cl_vector_construct( p_vector ); + + p_vector->grow_size = grow_size; + p_vector->element_size = element_size; + p_vector->pfn_init = pfn_init; + p_vector->pfn_dtor = pfn_dtor; + p_vector->context = context; + + /* + * Try to choose a smart copy operator + * someday, we could simply let the users pass one in + */ + switch( element_size ) + { + case sizeof(uint8_t): + p_vector->pfn_copy = cl_vector_copy8; + break; + + case sizeof(uint16_t): + p_vector->pfn_copy = cl_vector_copy16; + break; + + case sizeof(uint32_t): + p_vector->pfn_copy = cl_vector_copy32; + break; + + case sizeof(uint64_t): + p_vector->pfn_copy = cl_vector_copy64; + break; + + default: + p_vector->pfn_copy = cl_vector_copy_general; + break; + } + + /* + * Set the state to initialized so that the call to set_size + * doesn't assert. + */ + p_vector->state = CL_INITIALIZED; + + /* Initialize the allocation list */ + cl_qlist_init( &p_vector->alloc_list ); + + /* get the storage needed by the user */ + if( min_size ) + { + status = cl_vector_set_size( p_vector, min_size ); + if( status != CL_SUCCESS ) + cl_vector_destroy( p_vector ); + } + + return( status ); +} + + +void +cl_vector_destroy( + IN cl_vector_t* const p_vector ) +{ + size_t i; + void *p_element; + + CL_ASSERT( p_vector ); + CL_ASSERT( cl_is_state_valid( p_vector->state ) ); + + /* Call the user's destructor for each element in the array. */ + if( p_vector->state == CL_INITIALIZED ) + { + if( p_vector->pfn_dtor ) + { + for( i = 0; i < p_vector->size; i++ ) + { + p_element = p_vector->p_ptr_array[i]; + /* Sanity check! */ + CL_ASSERT( p_element ); + p_vector->pfn_dtor( p_element, (void*)p_vector->context ); + } + } + + /* Deallocate the pages */ + while( !cl_is_qlist_empty( &p_vector->alloc_list ) ) + cl_free( cl_qlist_remove_head( &p_vector->alloc_list ) ); + + /* Destroy the page vector. */ + if( p_vector->p_ptr_array ) + { + cl_free( p_vector->p_ptr_array ); + p_vector->p_ptr_array = NULL; + } + } + + p_vector->state = CL_UNINITIALIZED; +} + + +cl_status_t +cl_vector_at( + IN const cl_vector_t* const p_vector, + IN const size_t index, + OUT void* const p_element ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + /* Range check */ + if( index >= p_vector->size ) + return( CL_INVALID_PARAMETER ); + + cl_vector_get( p_vector, index, p_element ); + return( CL_SUCCESS ); +} + + +cl_status_t +cl_vector_set( + IN cl_vector_t* const p_vector, + IN const size_t index, + IN void* const p_element ) +{ + cl_status_t status; + void *p_dest; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( p_element ); + + /* Determine if the vector has room for this element. */ + if( index >= p_vector->size ) + { + /* Resize to accomodate the given index. */ + status = cl_vector_set_size( p_vector, index + 1 ); + + /* Check for failure on or before the given index. */ + if( (status != CL_SUCCESS) && (p_vector->size < index) ) + return( status ); + } + + /* At this point, the array is guaranteed to be big enough */ + p_dest = cl_vector_get_ptr( p_vector, index ); + /* Sanity check! */ + CL_ASSERT( p_dest ); + + /* Copy the data into the array */ + p_vector->pfn_copy( p_dest, p_element, p_vector->element_size ); + + return( CL_SUCCESS ); +} + + +cl_status_t +cl_vector_set_capacity( + IN cl_vector_t* const p_vector, + IN const size_t new_capacity ) +{ + size_t new_elements; + size_t alloc_size; + size_t i; + cl_list_item_t *p_buf; + void *p_new_ptr_array; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + /* Do we have to do anything here? */ + if( new_capacity <= p_vector->capacity ) + { + /* Nope */ + return( CL_SUCCESS ); + } + + /* Allocate our pointer array. */ + p_new_ptr_array = cl_zalloc( new_capacity * sizeof(void*) ); + if( !p_new_ptr_array ) + return( CL_INSUFFICIENT_MEMORY ); + + if( p_vector->p_ptr_array ) + { + /* Copy the old pointer array into the new. */ + cl_memcpy( p_new_ptr_array, p_vector->p_ptr_array, + p_vector->capacity * sizeof(void*) ); + + /* Free the old pointer array. */ + cl_free( p_vector->p_ptr_array ); + } + + /* Set the new array. */ + p_vector->p_ptr_array = p_new_ptr_array; + + /* + * We have to add capacity to the array. Determine how many + * elements to add. + */ + new_elements = new_capacity - p_vector->capacity; + /* Determine the allocation size for the new array elements. */ + alloc_size = new_elements * p_vector->element_size; + + p_buf = (cl_list_item_t*)cl_zalloc( alloc_size + sizeof(cl_list_item_t) ); + if( !p_buf ) + return( CL_INSUFFICIENT_MEMORY ); + + cl_qlist_insert_tail( &p_vector->alloc_list, p_buf ); + /* Advance the buffer pointer past the list item. */ + p_buf++; + + for( i = p_vector->capacity; i < new_capacity; i++ ) + { + p_vector->p_ptr_array[i] = p_buf; + /* Move the buffer pointer to the next element. */ + p_buf = (void*)(((uint8_t*)p_buf) + p_vector->element_size); + } + + /* Update the vector with the new capactity. */ + p_vector->capacity = new_capacity; + + return( CL_SUCCESS ); +} + + +cl_status_t +cl_vector_set_size( + IN cl_vector_t* const p_vector, + IN const size_t size ) +{ + cl_status_t status; + size_t new_capacity; + size_t index; + void *p_element; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + /* Check to see if the requested size is the same as the existing size. */ + if( size == p_vector->size ) + return( CL_SUCCESS ); + + /* Determine if the vector has room for this element. */ + if( size > p_vector->capacity ) + { + if( !p_vector->grow_size ) + return( CL_INSUFFICIENT_MEMORY ); + + /* Calculate the new capacity, taking into account the grow size. */ + new_capacity = size; + if( size % p_vector->grow_size ) + { + /* Round up to nearest grow_size boundary. */ + new_capacity += p_vector->grow_size - + (size % p_vector->grow_size); + } + + status = cl_vector_set_capacity( p_vector, new_capacity ); + if( status != CL_SUCCESS ) + return( status ); + } + + /* Are we growing the array and need to invoke an initializer callback? */ + if( size > p_vector->size && p_vector->pfn_init ) + { + for( index = p_vector->size; index < size; index++ ) + { + /* Get a pointer to this element */ + p_element = cl_vector_get_ptr( p_vector, index ); + + /* Call the user's initializer and trap failures. */ + status = p_vector->pfn_init( p_element, (void*)p_vector->context ); + if( status != CL_SUCCESS ) + { + /* Call the destructor for this object */ + if( p_vector->pfn_dtor ) + p_vector->pfn_dtor( p_element, (void*)p_vector->context ); + + /* Return the failure status to the caller. */ + return( status ); + } + + /* The array just grew by one element */ + p_vector->size++; + } + } + else if( p_vector->pfn_dtor ) + { + /* The array is shrinking and there is a destructor to invoke. */ + for( index = size; index < p_vector->size; index++ ) + { + /* compute the address of the new elements */ + p_element = cl_vector_get_ptr( p_vector, index ); + /* call the user's destructor */ + p_vector->pfn_dtor( p_element, (void*)p_vector->context ); + } + } + + p_vector->size = size; + return( CL_SUCCESS ); +} + + +cl_status_t +cl_vector_set_min_size( + IN cl_vector_t* const p_vector, + IN const size_t min_size ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + if( min_size > p_vector->size ) + { + /* We have to resize the array */ + return( cl_vector_set_size( p_vector, min_size ) ); + } + + /* We didn't have to do anything */ + return( CL_SUCCESS ); +} + + +void +cl_vector_apply_func( + IN const cl_vector_t* const p_vector, + IN cl_pfn_vec_apply_t pfn_callback, + IN const void* const context ) +{ + size_t i; + void *p_element; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( pfn_callback ); + + for( i = 0; i < p_vector->size; i++ ) + { + p_element = cl_vector_get_ptr( p_vector, i ); + pfn_callback( i, p_element, (void*)context ); + } +} + + +size_t +cl_vector_find_from_start( + IN const cl_vector_t* const p_vector, + IN cl_pfn_vec_find_t pfn_callback, + IN const void* const context ) +{ + size_t i; + void *p_element; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( pfn_callback ); + + for( i = 0; i < p_vector->size; i++ ) + { + p_element = cl_vector_get_ptr( p_vector, i ); + /* Invoke the callback */ + if( pfn_callback( i, p_element, (void*)context ) == CL_SUCCESS ) + break; + } + return( i ); +} + + +size_t +cl_vector_find_from_end( + IN const cl_vector_t* const p_vector, + IN cl_pfn_vec_find_t pfn_callback, + IN const void* const context ) +{ + size_t i; + void *p_element; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( pfn_callback ); + + i = p_vector->size; + + while( i ) + { + /* Get a pointer to the element in the array. */ + p_element = cl_vector_get_ptr( p_vector, --i ); + CL_ASSERT( p_element ); + + /* Invoke the callback for the current element. */ + if( pfn_callback( i, p_element, (void*)context ) == CL_SUCCESS ) + return( i ); + } + + return( p_vector->size ); +} + + + diff --git a/branches/Ndi/core/complib/dirs b/branches/Ndi/core/complib/dirs new file mode 100644 index 00000000..ddf0ed7d --- /dev/null +++ b/branches/Ndi/core/complib/dirs @@ -0,0 +1,3 @@ +DIRS=\ + user \ + kernel diff --git a/branches/Ndi/core/complib/kernel/SOURCES b/branches/Ndi/core/complib/kernel/SOURCES new file mode 100644 index 00000000..dd385750 --- /dev/null +++ b/branches/Ndi/core/complib/kernel/SOURCES @@ -0,0 +1,32 @@ +TARGETNAME=complib +TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=DRIVER_LIBRARY + +SOURCES= \ + cl_driver.c \ + cl_event.c \ + cl_log.c \ + cl_memory_osd.c \ + cl_syscallback.c \ + cl_thread.c \ + cl_timer.c \ + cl_pnp_po.c \ + cl_bus_ifc.c \ + ..\cl_async_proc.c \ + ..\cl_list.c \ + ..\cl_map.c \ + ..\cl_memory.c \ + ..\cl_obj.c \ + ..\cl_perf.c \ + ..\cl_pool.c \ + ..\cl_ptr_vector.c \ + ..\cl_reqmgr.c \ + ..\cl_statustext.c \ + ..\cl_threadpool.c \ + ..\cl_vector.c + +INCLUDES=..\..\..\inc;..\..\..\inc\kernel; + +C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/core/complib/kernel/cl_bus_ifc.c b/branches/Ndi/core/complib/kernel/cl_bus_ifc.c new file mode 100644 index 00000000..6c0810dd --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_bus_ifc.c @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#include + + +/* Forwards the request to the HCA's PDO. */ +NTSTATUS +cl_fwd_query_ifc( + IN DEVICE_OBJECT* const p_dev_obj, + IN IO_STACK_LOCATION* const p_io_stack ) +{ + NTSTATUS status; + IRP *p_irp; + IO_STATUS_BLOCK io_status; + IO_STACK_LOCATION *p_fwd_io_stack; + DEVICE_OBJECT *p_target; + KEVENT event; + + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + CL_ASSERT( p_io_stack->MinorFunction == IRP_MN_QUERY_INTERFACE ); + + p_target = IoGetAttachedDeviceReference( p_dev_obj ); + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Build the IRP for the HCA. */ + p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_target, + NULL, 0, NULL, &event, &io_status ); + if( !p_irp ) + { + ObDereferenceObject( p_target ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Copy the request query parameters. */ + p_fwd_io_stack = IoGetNextIrpStackLocation( p_irp ); + p_fwd_io_stack->MinorFunction = IRP_MN_QUERY_INTERFACE; + p_fwd_io_stack->Parameters.QueryInterface = + p_io_stack->Parameters.QueryInterface; + p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( p_target, p_irp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = io_status.Status; + } + + ObDereferenceObject( p_target ); + return status; +} diff --git a/branches/Ndi/core/complib/kernel/cl_driver.c b/branches/Ndi/core/complib/kernel/cl_driver.c new file mode 100644 index 00000000..2efa9876 --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_driver.c @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "complib/comp_lib.h" + + +CL_EXPORT NTSTATUS +cl_to_ntstatus( + IN cl_status_t status ) +{ + switch( status ) + { + case CL_SUCCESS: + return STATUS_SUCCESS; + case CL_ERROR: + return STATUS_DRIVER_INTERNAL_ERROR; + case CL_INVALID_STATE: + return STATUS_INVALID_DEVICE_STATE; + case CL_INVALID_OPERATION: + return STATUS_NOT_SUPPORTED; + case CL_INVALID_SETTING: + return STATUS_INVALID_PARAMETER_1; + case CL_INVALID_PARAMETER: + return STATUS_INVALID_PARAMETER; + case CL_INSUFFICIENT_RESOURCES: + return STATUS_INSUFFICIENT_RESOURCES; + case CL_INSUFFICIENT_MEMORY: + return STATUS_NO_MEMORY; + case CL_INVALID_PERMISSION: + return STATUS_ACCESS_DENIED; + case CL_COMPLETED: + return STATUS_EVENT_DONE; + case CL_NOT_DONE: + return STATUS_ABANDONED; + case CL_PENDING: + return STATUS_PENDING; + case CL_TIMEOUT: + return STATUS_TIMEOUT; + case CL_CANCELED: + return STATUS_CANCELLED; + case CL_REJECT: + return STATUS_REQUEST_NOT_ACCEPTED; + case CL_OVERRUN: + return STATUS_DATA_OVERRUN; + case CL_NOT_FOUND: + return STATUS_NOT_FOUND; + case CL_UNAVAILABLE: + return STATUS_DEVICE_NOT_READY; + case CL_BUSY: + return STATUS_DEVICE_BUSY; + case CL_DISCONNECT: + return STATUS_LOCAL_DISCONNECT; + case CL_DUPLICATE: + return STATUS_DUPLICATE_NAME; + case CL_INVALID_REQUEST: + return STATUS_INVALID_DEVICE_REQUEST; + default: + return STATUS_UNSUCCESSFUL; + } +} + + +CL_EXPORT cl_status_t +cl_from_ntstatus( + IN NTSTATUS status ) +{ + switch( status ) + { + case STATUS_SUCCESS: + return CL_SUCCESS; + case STATUS_DRIVER_INTERNAL_ERROR: + return CL_ERROR; + case STATUS_INVALID_DEVICE_STATE: + return CL_INVALID_STATE; + case STATUS_NOT_SUPPORTED: + return CL_INVALID_OPERATION; + case STATUS_INVALID_PARAMETER_1: + return CL_INVALID_SETTING; + case STATUS_INVALID_PARAMETER: + return CL_INVALID_PARAMETER; + case STATUS_INSUFFICIENT_RESOURCES: + return CL_INSUFFICIENT_RESOURCES; + case STATUS_NO_MEMORY: + return CL_INSUFFICIENT_MEMORY; + case STATUS_ACCESS_DENIED: + return CL_INVALID_PERMISSION; + case STATUS_EVENT_DONE: + return CL_COMPLETED; + case STATUS_ABANDONED: + return CL_NOT_DONE; + case STATUS_PENDING: + return CL_PENDING; + case STATUS_TIMEOUT: + return CL_TIMEOUT; + case STATUS_CANCELLED: + return CL_CANCELED; + case STATUS_REQUEST_NOT_ACCEPTED: + return CL_REJECT; + case STATUS_DATA_OVERRUN: + return CL_OVERRUN; + case STATUS_NOT_FOUND: + return CL_NOT_FOUND; + case STATUS_DEVICE_NOT_READY: + return CL_UNAVAILABLE; + case STATUS_DEVICE_BUSY: + return CL_BUSY; + case STATUS_LOCAL_DISCONNECT: + return CL_DISCONNECT; + case STATUS_DUPLICATE_NAME: + return CL_DUPLICATE; + case STATUS_INVALID_DEVICE_REQUEST: + return CL_INVALID_REQUEST; + default: + return CL_ERROR; + } +} diff --git a/branches/Ndi/core/complib/kernel/cl_event.c b/branches/Ndi/core/complib/kernel/cl_event.c new file mode 100644 index 00000000..07f9790d --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_event.c @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "complib/cl_event.h" + + +cl_status_t +cl_event_wait_on( + IN cl_event_t* const p_event, + IN const uint32_t wait_us, + IN const boolean_t interruptible ) +{ + NTSTATUS status; + LARGE_INTEGER wait; + + CL_ASSERT( p_event ); + + if( wait_us == EVENT_NO_TIMEOUT ) + { + CL_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + status = KeWaitForSingleObject( p_event, Executive, KernelMode, + (BOOLEAN)interruptible, NULL ); + } + else + { + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + /* Timeout is in 100 ns increments. */ + wait.QuadPart = -(int64_t)(((uint64_t)wait_us) * 10); + status = KeWaitForSingleObject( p_event, Executive, KernelMode, + (BOOLEAN)interruptible, &wait ); + } + + switch( status ) + { + case STATUS_SUCCESS: + return( CL_SUCCESS ); + case STATUS_USER_APC: + return( CL_NOT_DONE ); + case STATUS_TIMEOUT: + return( CL_TIMEOUT ); + case STATUS_ALERTED: + default: + return( CL_ERROR ); + } +} diff --git a/branches/Ndi/core/complib/kernel/cl_exports.def b/branches/Ndi/core/complib/kernel/cl_exports.def new file mode 100644 index 00000000..7d05000b --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_exports.def @@ -0,0 +1,7 @@ +LIBRARY complib.sys + +EXPORTS +; DllInitialize and DllUnload must be exported for the OS reference counting to +; work, and must be private for the compiler to accept them. +DllInitialize private +DllUnload private diff --git a/branches/Ndi/core/complib/kernel/cl_log.c b/branches/Ndi/core/complib/kernel/cl_log.c new file mode 100644 index 00000000..399f2d72 --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_log.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "complib/cl_log.h" + + +/* + * The IO Object required to allocate an event log entry is passed in + * via the "name" parameter. + */ +void +cl_log_event( + IN const char* const name, + IN const cl_log_type_t type, + IN const char* const p_message, + IN const void* const p_data, + IN const uint32_t data_len ) +{ + UNUSED_PARAM( name ); + UNUSED_PARAM( type ); + UNUSED_PARAM( p_message ); + UNUSED_PARAM( p_data ); + UNUSED_PARAM( data_len ); + /* + * To log errors requires setting up custom error strings and registering + * them with the system. Do this later. + */ + //IO_ERROR_LOG_PACKET *p_entry; + //size_t size = sizeof(IO_ERROR_LOG_PACKET); + //UCHAR *p_dump_data; + //WCHAR *p_str; + + //if( p_message ) + // size += strlen( p_message ); + + //size += data_len; + + //if( size > ERROR_LOG_MAXIMUM_SIZE ) + // return; + + //p_entry = IoAllocateErrorLogEntry( name, (UCHAR)size ); + //if( !p_entry ) + // return; + + //cl_memclr( p_entry, size ); + + //p_dump_data = p_entry->DumpData; + + ///* Copy the string to the dump data. */ + //if( p_message ) + //{ + // cl_memcpy( p_dump_data, p_message, strlen( p_message ) + 1 ); + // p_dump_data += strlen( p_message ) + 1; + //} + + //if( data_len ) + // cl_memcpy( p_dump_data, p_data, data_len ); + + //switch( type ) + //{ + //case CL_LOG_ERROR: + // p_entry->ErrorCode = STATUS_UNSUCCESSFUL; + // break; + + //case CL_LOG_WARN: + // p_entry->ErrorCode = STATUS_UNSUCCESSFUL; + // break; + + //default: + //case CL_LOG_INFO: + // p_entry->ErrorCode = STATUS_SERVICE_NOTIFICATION; + // break; + //} +} diff --git a/branches/Ndi/core/complib/kernel/cl_memory_osd.c b/branches/Ndi/core/complib/kernel/cl_memory_osd.c new file mode 100644 index 00000000..a206cbf9 --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_memory_osd.c @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "complib/cl_memory.h" + + +void* +__cl_malloc_priv( + IN const size_t size, + IN const boolean_t pageable ) +{ + if( pageable ) + { + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + return( ExAllocatePool( PagedPool, size ) ); + } + else + { + CL_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + return( ExAllocatePool( NonPagedPool, size ) ); + } +} + + +void +__cl_free_priv( + IN void* const p_memory ) +{ + CL_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + ExFreePool( p_memory ); +} diff --git a/branches/Ndi/core/complib/kernel/cl_pnp_po.c b/branches/Ndi/core/complib/kernel/cl_pnp_po.c new file mode 100644 index 00000000..74098023 --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_pnp_po.c @@ -0,0 +1,1417 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "complib/cl_pnp_po.h" +#include "complib/cl_debug.h" +#include "complib/cl_atomic.h" + + +static NTSTATUS +__start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__query_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__cancel_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__cancel_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__query_pnp_state( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__device_usage_notification( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__query_device_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__query_id( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__query_device_text( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + + +/* All PnP code is called at passive, so it can all be paged out. */ +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, cl_init_pnp_po_ext) +#pragma alloc_text (PAGE, cl_pnp) +#pragma alloc_text (PAGE, __start) +#pragma alloc_text (PAGE, __query_stop) +#pragma alloc_text (PAGE, __stop) +#pragma alloc_text (PAGE, __cancel_stop) +#pragma alloc_text (PAGE, __query_remove) +#pragma alloc_text (PAGE, __remove) +#pragma alloc_text (PAGE, __cancel_remove) +#pragma alloc_text (PAGE, __surprise_remove) +#pragma alloc_text (PAGE, __query_pnp_state) +#pragma alloc_text (PAGE, __device_usage_notification) +#pragma alloc_text (PAGE, __query_device_relations) +#pragma alloc_text (PAGE, __query_id) +#pragma alloc_text (PAGE, __query_device_text) +#pragma alloc_text (PAGE, cl_do_sync_pnp) +#pragma alloc_text (PAGE_PNP, cl_irp_skip) +#pragma alloc_text (PAGE_PNP, cl_irp_complete) +#pragma alloc_text (PAGE_PNP, cl_irp_succeed) +#pragma alloc_text (PAGE_PNP, cl_irp_unsupported) +#endif + + +void +cl_init_pnp_po_ext( + IN OUT DEVICE_OBJECT* const p_dev_obj, + IN DEVICE_OBJECT* const p_next_do, + IN DEVICE_OBJECT* const p_pdo, + IN const uint32_t pnp_po_dbg_lvl, + IN const cl_vfptr_pnp_po_t* const vfptr_pnp_po, + IN const cl_vfptr_query_txt_t* const vfptr_query_txt OPTIONAL ) +{ + cl_pnp_po_ext_t *p_ext; + + CL_ENTER( CL_DBG_PNP, pnp_po_dbg_lvl ); + + p_ext = p_dev_obj->DeviceExtension; + + p_ext->dbg_lvl = pnp_po_dbg_lvl; + + /* Store the pointer to our own device. */ + p_ext->p_self_do = p_dev_obj; + IoInitializeRemoveLock( &p_ext->remove_lock, 'bilc', 15, 1000 ); + + /* Initialize the PnP states. */ + p_ext->pnp_state = NotStarted; + p_ext->last_pnp_state = NotStarted; + + /* Store the pointer to the next device in the stack. */ + p_ext->p_next_do = p_next_do; + + /* Store the pointer to the underlying PDO. */ + p_ext->p_pdo = p_pdo; + + /* Store the PnP virtual function pointer table. */ + p_ext->vfptr_pnp_po = vfptr_pnp_po; + + /* Store the Power Management virtual function pointer table. */ + p_ext->vfptr_query_txt = vfptr_query_txt; + + /* + * Mark power routines as pageable. This changes when the device is + * notified of being in the paging path. + */ + p_dev_obj->Flags |= DO_POWER_PAGABLE; + + /* Clear the initializing flag before returning. */ + p_dev_obj->Flags &= ~DO_DEVICE_INITIALIZING; + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); +} + + +NTSTATUS +cl_pnp( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + cl_pnp_po_ext_t *p_ext; + cl_irp_action_t action; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("PDO %p, ext %p\n", p_dev_obj, p_ext) ); + + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + + status = IoAcquireRemoveLock( &p_ext->remove_lock, p_irp ); + if( !NT_SUCCESS( status ) ) + { + CL_TRACE_EXIT( CL_DBG_ERROR, p_ext->dbg_lvl, + ("IoAcquireRemoveLock returned %08x.\n", status) ); + p_irp->IoStatus.Status = status; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + return status; + } + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + ASSERT( p_io_stack->MajorFunction == IRP_MJ_PNP ); + + switch( p_io_stack->MinorFunction ) + { + case IRP_MN_START_DEVICE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_START_DEVICE for %s\n", p_ext->vfptr_pnp_po->identity) ); + status = __start( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_STOP_DEVICE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("IRP_MN_QUERY_STOP_DEVICE for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = __query_stop( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_STOP_DEVICE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_STOP_DEVICE for %s\n", p_ext->vfptr_pnp_po->identity) ); + status = __stop( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_CANCEL_STOP_DEVICE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_START_DEVICE for %s\n", p_ext->vfptr_pnp_po->identity) ); + status = __cancel_stop( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_REMOVE_DEVICE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_QUERY_REMOVE_DEVICE for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = __query_remove( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_REMOVE_DEVICE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_REMOVE_DEVICE for %s\n", p_ext->vfptr_pnp_po->identity) ); + status = __remove( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_CANCEL_REMOVE_DEVICE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_CANCEL_REMOVE_DEVICE for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = __cancel_remove( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_SURPRISE_REMOVAL: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("IRP_MN_SURPRISE_REMOVAL for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = __surprise_remove( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_CAPABILITIES: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_QUERY_CAPABILITIES for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_query_capabilities( + p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_PNP_DEVICE_STATE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_QUERY_PNP_DEVICE_STATE for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = __query_pnp_state( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_FILTER_RESOURCE_REQUIREMENTS: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_FILTER_RESOURCE_REQUIREMENTS for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_filter_res_req( + p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_DEVICE_USAGE_NOTIFICATION: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_DEVICE_USAGE_NOTIFICATION for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = __device_usage_notification( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_DEVICE_RELATIONS: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_QUERY_DEVICE_RELATIONS for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = __query_device_relations( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_RESOURCES: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("IRP_MN_QUERY_RESOURCES for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_query_resources( + p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_RESOURCE_REQUIREMENTS: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_QUERY_RESOURCE_REQUIREMENTS for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_query_res_req( + p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_ID: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_QUERY_ID for %s\n", p_ext->vfptr_pnp_po->identity) ); + status = __query_id( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_DEVICE_TEXT: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_QUERY_DEVICE_TEXT for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = __query_device_text( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_BUS_INFORMATION: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_QUERY_BUS_INFORMATION for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_query_bus_info( + p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_QUERY_INTERFACE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("IRP_MN_QUERY_INTERFACE for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_query_interface( + p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_READ_CONFIG: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_READ_CONFIG for %s\n", p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_read_config( + p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_WRITE_CONFIG: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_WRITE_CONFIG for %s\n", p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_write_config( + p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_EJECT: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_EJECT for %s\n", p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_eject( + p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_SET_LOCK: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_SET_LOCK for %s\n", p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_set_lock( + p_dev_obj, p_irp, &action ); + break; + + default: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, + ("Unknown IRP minor function 0x%x for %s\n", + p_io_stack->MinorFunction, p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_unknown( + p_dev_obj, p_irp, &action ); + break; + } + + switch( action ) + { + case IrpPassDown: + p_irp->IoStatus.Status = status; + IoCopyCurrentIrpStackLocationToNext( p_irp ); + status = IoCallDriver( p_ext->p_next_do, p_irp ); + break; + + case IrpSkip: + p_irp->IoStatus.Status = status; + + case IrpIgnore: + IoSkipCurrentIrpStackLocation( p_irp ); + status = IoCallDriver( p_ext->p_next_do, p_irp ); + break; + + case IrpComplete: + p_irp->IoStatus.Status = status; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + break; + + case IrpDoNothing: + break; + } + + if( action != IrpDoNothing ) + IoReleaseRemoveLock( &p_ext->remove_lock, p_irp ); + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + cl_pnp_po_ext_t *p_ext; + NTSTATUS status; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + status = p_ext->vfptr_pnp_po->pfn_start( p_dev_obj, p_irp, p_action ); + if( NT_SUCCESS( status ) ) + cl_set_pnp_state( p_ext, Started ); + + /* + * If we get the start request when we're already started, don't + * re-initialize the stop lock. + */ + if( p_ext->last_pnp_state != Started ) + IoInitializeRemoveLock( &p_ext->stop_lock, 'dtci', 0, 1000 ); + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__query_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + cl_pnp_po_ext_t *p_ext; + NTSTATUS status; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + /* + * We must fail the query if there are any paging, dump, or hibernation + * files on the device. + */ + if( p_ext->n_crash_files || + p_ext->n_hibernate_files || + p_ext->n_paging_files ) + { + *p_action = IrpComplete; + /* Fail the request. */ + CL_TRACE_EXIT( CL_DBG_PNP, p_ext->dbg_lvl, + ("Failing IRP_MN_QUERY_STOP_DEVICE - device %s has:\n" + "\t\t%d paging files\n\t\t%d crash files\n" + "\t\t%d hibernate files\n", p_ext->vfptr_pnp_po->identity, + p_ext->n_paging_files, p_ext->n_crash_files, + p_ext->n_hibernate_files) ); + return STATUS_UNSUCCESSFUL; + } + + /* + * Mark the device as stop pending so that all new non-_PnP and non-_Power + * IRPs get queued or failed. + */ + cl_set_pnp_state( p_ext, StopPending ); + + if( p_ext->last_pnp_state == Started ) + { + /* Acquire the lock so we can release and wait. */ + IoAcquireRemoveLock( &p_ext->stop_lock, p_irp ); + /* Wait for all IO operations to complete. */ + IoReleaseRemoveLockAndWait( &p_ext->stop_lock, p_irp ); + } + + status = p_ext->vfptr_pnp_po->pfn_query_stop( p_dev_obj, p_irp, p_action ); + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + cl_pnp_po_ext_t *p_ext; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + ASSERT( p_ext->pnp_state == StopPending ); + + cl_set_pnp_state( p_ext, Stopped ); + + status = p_ext->vfptr_pnp_po->pfn_stop( p_dev_obj, p_irp, p_action ); + + /* Release resources. */ + if( p_ext->vfptr_pnp_po->pfn_release_resources ) + p_ext->vfptr_pnp_po->pfn_release_resources( p_dev_obj ); + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__cancel_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + cl_pnp_po_ext_t* p_ext; + NTSTATUS status; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + /* Call the device specific handler. */ + status = p_ext->vfptr_pnp_po->pfn_cancel_stop( p_dev_obj, p_irp, p_action ); + ASSERT( NT_SUCCESS(status) ); + + /* + * If we were never stopped (a higher level driver failed the + * IRP_MN_QUERY_STOP but passed down the cancel), just return. + */ + if( p_ext->pnp_state != StopPending ) + { + CL_TRACE_EXIT( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_CANCEL_STOP_DEVICE received in invalid state.\n") ); + return status; + } + + if( p_ext->last_pnp_state == Started ) + { + /* + * Re-initialize the remove lock before rolling back the PnP + * state so that there's no contention while it's uninitialized. + */ + IoInitializeRemoveLock( &p_ext->stop_lock, 'dtci', 0, 1000 ); + /* + * Acquire the stop lock to allow releasing and waiting when stopping. + */ + IoAcquireRemoveLock( &p_ext->stop_lock, NULL ); + } + + /* Return to the previous PnP state. */ + cl_rollback_pnp_state( p_ext ); + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + cl_pnp_po_ext_t *p_ext; + NTSTATUS status; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + /* + * We must fail the query if there are any paging, dump, or hibernation + * files on the device. + */ + if( p_ext->n_crash_files || + p_ext->n_hibernate_files || + p_ext->n_paging_files ) + { + *p_action = IrpComplete; + /* Fail the request. */ + CL_TRACE_EXIT( CL_DBG_PNP, p_ext->dbg_lvl, + ("Failing IRP_MN_QUERY_REMOVE_DEVICE - device has:\n" + "\t\t%d paging files\n\t\t%d crash files\n" + "\t\t%d hibernate files\n", p_ext->n_paging_files, + p_ext->n_crash_files, p_ext->n_hibernate_files) ); + return STATUS_UNSUCCESSFUL; + } + + /* We fail the query if we have any interface outstanding. */ + if( p_ext->n_ifc_ref ) + { + *p_action = IrpComplete; + CL_TRACE_EXIT( CL_DBG_PNP, p_ext->dbg_lvl, + ("Failing IRP_MN_QUERY_REMOVE_DEVICE - interface ref count: %d\n", + p_ext->n_ifc_ref) ); + return STATUS_UNSUCCESSFUL; + } + + /* + * Mark the device as remove pending so that all new non-PnP and + * non-Power IRPs get queued or failed. + */ + cl_set_pnp_state( p_ext, RemovePending ); + + /* Call type specific handler. */ + status = + p_ext->vfptr_pnp_po->pfn_query_remove( p_dev_obj, p_irp, p_action ); + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + cl_pnp_po_ext_t *p_ext; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + ASSERT( p_ext->pnp_state == NotStarted || + p_ext->pnp_state == Started || + p_ext->pnp_state == RemovePending || + p_ext->pnp_state == SurpriseRemoved ); + + /* Set the device state. */ + cl_set_pnp_state( p_ext, Deleted ); + + status = p_ext->vfptr_pnp_po->pfn_remove( p_dev_obj, p_irp, p_action ); + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +NTSTATUS +cl_do_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + cl_pnp_po_ext_t *p_ext; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + /* Wait for all I/O operations to complete. */ + IoReleaseRemoveLockAndWait( &p_ext->remove_lock, p_irp ); + + /* Release resources if it was not done yet. */ + if( p_ext->last_pnp_state != SurpriseRemoved && + p_ext->last_pnp_state != Stopped && + p_ext->vfptr_pnp_po->pfn_release_resources ) + { + p_ext->vfptr_pnp_po->pfn_release_resources( p_dev_obj ); + } + + /* Set the IRP status. */ + p_irp->IoStatus.Status = STATUS_SUCCESS; + + /* Pass the IRP down. */ + IoSkipCurrentIrpStackLocation( p_irp ); + status = IoCallDriver( p_ext->p_next_do, p_irp ); + *p_action = IrpDoNothing; + + /* Detach and destroy the device. */ + IoDetachDevice( p_ext->p_next_do ); + IoDeleteDevice( p_dev_obj ); + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__cancel_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + cl_pnp_po_ext_t *p_ext; + NTSTATUS status; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + status = + p_ext->vfptr_pnp_po->pfn_cancel_remove( p_dev_obj, p_irp, p_action ); + ASSERT( NT_SUCCESS(status) ); + + if( p_ext->pnp_state != RemovePending ) + { + CL_TRACE_EXIT( CL_DBG_PNP, p_ext->dbg_lvl, + ("IRP_MN_CANCEL_REMOVE_DEVICE received in invalid state.\n") ); + return status; + } + + cl_rollback_pnp_state( p_ext ); + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + cl_pnp_po_ext_t *p_ext; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + cl_set_pnp_state( p_ext, SurpriseRemoved ); + + /* Call handler before releasing resources. */ + status = + p_ext->vfptr_pnp_po->pfn_surprise_remove( p_dev_obj, p_irp, p_action ); + + /* Release resources. */ + if( p_ext->last_pnp_state != Stopped && + p_ext->vfptr_pnp_po->pfn_release_resources ) + { + p_ext->vfptr_pnp_po->pfn_release_resources( p_dev_obj ); + } + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__query_pnp_state( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + cl_pnp_po_ext_t *p_ext; + NTSTATUS status; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + /* + * Flag the device as not removable if there are any special files on it. + */ + if( p_ext->n_paging_files || + p_ext->n_crash_files || + p_ext->n_hibernate_files ) + { + p_irp->IoStatus.Information |= PNP_DEVICE_NOT_DISABLEABLE; + } + + status = + p_ext->vfptr_pnp_po->pfn_query_pnp_state( p_dev_obj, p_irp, p_action ); + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static inline void +__lock_po_code( + IN OUT cl_pnp_po_ext_t* const p_ext ) +{ + if( !p_ext->h_cl_locked_section ) + { + /* + * No handle exists. This is the first lock. Once locked, the + * handle is valid as long as the driver is loaded. Lock any + * function in the PAGE_PNP section. + */ +#pragma warning( push, 3 ) + p_ext->h_cl_locked_section = MmLockPagableCodeSection( cl_power ); + /* TODO: Pick first non-CL function */ + p_ext->h_user_locked_section = MmLockPagableCodeSection( + p_ext->vfptr_pnp_po->pfn_set_power ); +#pragma warning( pop ) + } + else + { + /* Handle already exists. Locking by handle is faster. */ + MmLockPagableSectionByHandle( p_ext->h_cl_locked_section ); + if( p_ext->h_user_locked_section ) + MmLockPagableSectionByHandle( p_ext->h_user_locked_section ); + } +} + + +static inline void +__unlock_po_code( + IN OUT cl_pnp_po_ext_t* const p_ext ) +{ + ASSERT( p_ext->h_cl_locked_section ); + MmUnlockPagableImageSection( p_ext->h_cl_locked_section ); + if( p_ext->h_user_locked_section ) + MmUnlockPagableImageSection( p_ext->h_user_locked_section ); +} + + +static NTSTATUS +__device_usage_notification( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + cl_pnp_po_ext_t *p_ext; + IO_STACK_LOCATION *p_io_stack; + atomic32_t *p_val; + NTSTATUS status; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + switch( p_io_stack->Parameters.UsageNotification.Type ) + { + case DeviceUsageTypePaging: + p_val = &p_ext->n_paging_files; + break; + + case DeviceUsageTypeDumpFile: + p_val = &p_ext->n_crash_files; + break; + + case DeviceUsageTypeHibernation: + p_val = &p_ext->n_hibernate_files; + break; + + default: + CL_TRACE_EXIT( CL_DBG_ERROR, p_ext->dbg_lvl, + ("Invalid notification type.\n") ); + return STATUS_INVALID_PARAMETER; + } + + if( p_io_stack->Parameters.UsageNotification.InPath ) + { + cl_atomic_inc( p_val ); + __lock_po_code( p_ext ); + } + else + { + __unlock_po_code( p_ext ); + cl_atomic_dec( p_val ); + } + + /* + * Set the flag in the device extension to indicate that power management + * can happen at elevated IRQL. + */ + if( p_ext->n_paging_files || + p_ext->n_crash_files || + p_ext->n_hibernate_files ) + { + p_dev_obj->Flags &= ~DO_POWER_PAGABLE; + } + else + { + p_dev_obj->Flags |= DO_POWER_PAGABLE; + } + + /* Call type specific (FDO, PDO) function for propagating the IRP. */ + status = p_ext->vfptr_pnp_po->pfn_dev_usage_notification( + p_dev_obj, p_irp, p_action ); + + if( NT_SUCCESS( status ) ) + { + /* Notify the PnP manager that the device state may have changed. */ + IoInvalidateDeviceState( p_ext->p_pdo ); + } + else + { + /* Propagation failed. Undo. */ + if( p_io_stack->Parameters.UsageNotification.InPath ) + { + /* Someone does not support the type of special file requested. */ + __unlock_po_code( p_ext ); + cl_atomic_dec( p_val ); + } + else + { + /* + * Someone failed the notification for the removal of a special + * file. This is unlikely to happen, but handle it anyway. + */ + cl_atomic_inc( p_val ); + __lock_po_code( p_ext ); + } + + /* + * Set the flag in the device extension to indicate that power + * management can happen at elevated IRQL. + */ + if( p_ext->n_paging_files || + p_ext->n_crash_files || + p_ext->n_hibernate_files ) + { + p_dev_obj->Flags &= ~DO_POWER_PAGABLE; + } + else + { + p_dev_obj->Flags |= DO_POWER_PAGABLE; + } + } + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__query_device_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + cl_pnp_po_ext_t *p_ext; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + switch( p_io_stack->Parameters.QueryDeviceRelations.Type ) + { + case BusRelations: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("BusRelations for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_query_bus_relations( + p_dev_obj, p_irp, p_action ); + break; + + case EjectionRelations: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("EjectionRelations for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_query_ejection_relations( + p_dev_obj, p_irp, p_action ); + break; + + case RemovalRelations: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("RemovalRelations for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_query_removal_relations( + p_dev_obj, p_irp, p_action ); + break; + + case TargetDeviceRelation: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("TargetDeviceRelation for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_query_target_relations( + p_dev_obj, p_irp, p_action ); + break; + + default: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("Unknown Relation for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_unknown( + p_dev_obj, p_irp, p_action ); + break; + } + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__query_id( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + cl_pnp_po_ext_t *p_ext; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + /* Only PDOs handle query ID and query text IRPs */ + if( p_ext->p_next_do ) + { + status = cl_irp_ignore( p_dev_obj, p_irp, p_action ); + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; + } + + *p_action = IrpComplete; + + switch( p_io_stack->Parameters.QueryId.IdType ) + { + case BusQueryDeviceID: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("BusQueryDeviceID for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = + p_ext->vfptr_query_txt->pfn_query_device_id( p_dev_obj, p_irp ); + break; + + case BusQueryHardwareIDs: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("BusQueryHardwareIDs for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = + p_ext->vfptr_query_txt->pfn_query_hardware_id( p_dev_obj, p_irp ); + break; + + case BusQueryCompatibleIDs: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("BusQueryCompatibleIDs for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_query_txt->pfn_query_compatible_id( + p_dev_obj, p_irp ); + break; + + case BusQueryInstanceID: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("BusQueryInstanceID for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = + p_ext->vfptr_query_txt->pfn_query_unique_id( p_dev_obj, p_irp ); + break; + + default: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("Unsupported ID type for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_irp->IoStatus.Status; + break; + } + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__query_device_text( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + cl_pnp_po_ext_t *p_ext; + IO_STACK_LOCATION *p_io_stack; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + /* Only PDOs handle query ID and query text IRPs */ + if( p_ext->p_next_do ) + { + status = cl_irp_ignore( p_dev_obj, p_irp, p_action ); + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; + } + + *p_action = IrpComplete; + + switch( p_io_stack->Parameters.QueryDeviceText.DeviceTextType ) + { + case DeviceTextDescription: + status = + p_ext->vfptr_query_txt->pfn_query_description( p_dev_obj, p_irp ); + break; + + case DeviceTextLocationInformation: + status = + p_ext->vfptr_query_txt->pfn_query_location( p_dev_obj, p_irp ); + break; + + default: + status = p_irp->IoStatus.Status; + break; + } + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +static NTSTATUS +__sync_completion( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp, + IN void *context ) +{ + UNUSED_PARAM( p_dev_obj ); + + ASSERT( p_irp ); + ASSERT( context ); + + /* + * We only wait if IoCallDriver returned STATUS_PENDING. Only set + * the event if the IRP returned pending, so that we don't needlessly + * signal it. + */ + if( p_irp->PendingReturned ) + KeSetEvent( (KEVENT*)context, IO_NO_INCREMENT, FALSE ); + + /* We need to process the IRP further. */ + return STATUS_MORE_PROCESSING_REQUIRED; +} + + +NTSTATUS +cl_do_sync_pnp( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + KEVENT event; + NTSTATUS status; + cl_pnp_po_ext_t *p_ext; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Setup the IRP. */ + IoCopyCurrentIrpStackLocationToNext( p_irp ); +#pragma warning( push, 3 ) + IoSetCompletionRoutine( p_irp, __sync_completion, &event, + TRUE, TRUE, TRUE ); +#pragma warning( pop ) + + status = IoCallDriver( p_ext->p_next_do, p_irp ); + if( status == STATUS_PENDING ) + { + /* Wait for the completion. */ + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = p_irp->IoStatus.Status; + } + *p_action = IrpComplete; + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +NTSTATUS +cl_alloc_relations( + IN IRP* const p_irp, + IN const size_t n_devs ) +{ + DEVICE_RELATIONS *p_rel, *p_old_rel; + size_t alloc_size; +#ifdef _DEBUG_ + /* Debug variable to prevent warnings when using CL_TRACE macros. */ + uint32_t dbg_error = CL_DBG_ERROR; +#endif + + ASSERT( n_devs ); + + alloc_size = sizeof(DEVICE_RELATIONS) + + (sizeof(PDEVICE_OBJECT) * (n_devs - 1)); + + /* If there are already relations, copy them. */ + p_old_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information; + if( p_old_rel ) + alloc_size += (sizeof(PDEVICE_OBJECT) * p_old_rel->Count); + + /* Allocate the new relations structure. */ + p_rel = ExAllocatePool( PagedPool, alloc_size ); + p_irp->IoStatus.Information = (ULONG_PTR)p_rel; + if( !p_rel ) + { + /* + * Allocation failed. Release the existing relations and fail the IRP. + */ + if( p_old_rel ) + ExFreePool( p_old_rel ); + CL_TRACE( CL_DBG_ERROR, dbg_error, + ("Failed to allocate DEVICE_RELATIONS (%d bytes).\n", alloc_size) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* + * Since the structure doesn't contain the callers target devices, + * the count is only set to what existing relations specify. + */ + if( p_old_rel ) + { + /* Copy the existing relations. */ + RtlCopyMemory( p_rel->Objects, p_old_rel->Objects, + sizeof(PDEVICE_OBJECT) * p_old_rel->Count ); + p_rel->Count = p_old_rel->Count; + /* Done with the copy, free the old relations structure. */ + ExFreePool( p_old_rel ); + } + else + { + p_rel->Count = 0; + } + + return STATUS_SUCCESS; +} + + +NTSTATUS +cl_power( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + cl_pnp_po_ext_t *p_ext; + cl_irp_action_t action; + + p_ext = p_dev_obj->DeviceExtension; + + CL_ENTER( CL_DBG_PNP, p_ext->dbg_lvl ); + + status = IoAcquireRemoveLock( &p_ext->remove_lock, p_irp ); + if( !NT_SUCCESS( status ) ) + { + CL_TRACE_EXIT( CL_DBG_ERROR, p_ext->dbg_lvl, + ("IoAcquireRemoveLock returned %08x for %s.\n", + status, p_ext->vfptr_pnp_po->identity) ); + PoStartNextPowerIrp( p_irp ); + p_irp->IoStatus.Status = status; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + return status; + } + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + ASSERT( p_io_stack->MajorFunction == IRP_MJ_POWER ); + + switch( p_io_stack->MinorFunction ) + { + case IRP_MN_QUERY_POWER: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("IRP_MN_QUERY_POWER for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = + p_ext->vfptr_pnp_po->pfn_query_power( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_SET_POWER: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("IRP_MN_SET_POWER for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = + p_ext->vfptr_pnp_po->pfn_set_power( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_WAIT_WAKE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("IRP_MN_WAIT_WAKE for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = + p_ext->vfptr_pnp_po->pfn_wait_wake( p_dev_obj, p_irp, &action ); + break; + + case IRP_MN_POWER_SEQUENCE: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("IRP_MN_POWER_SEQUENCE for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = + p_ext->vfptr_pnp_po->pfn_power_sequence( p_dev_obj, p_irp, &action ); + break; + + default: + CL_TRACE( CL_DBG_PNP, p_ext->dbg_lvl, ("Unknown IRP minor function for %s\n", + p_ext->vfptr_pnp_po->identity) ); + status = p_ext->vfptr_pnp_po->pfn_unknown( + p_dev_obj, p_irp, &action ); + } + + switch( action ) + { + case IrpPassDown: + /* + * A completion routine has already been set. + * PoStartNextPowerIrp should be called in the completion routine. + */ + status = PoCallDriver( p_ext->p_next_do, p_irp ); + break; + + case IrpSkip: + p_irp->IoStatus.Status = status; + + case IrpIgnore: + PoStartNextPowerIrp( p_irp ); + IoSkipCurrentIrpStackLocation( p_irp ); + /* TODO: Documentation says to return STATUS_PENDING. Seems odd. */ + status = PoCallDriver( p_ext->p_next_do, p_irp ); + break; + + case IrpComplete: + p_irp->IoStatus.Status = status; + PoStartNextPowerIrp( p_irp ); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + break; + + case IrpDoNothing: + /* + * Returned when sending a device IRP_MN_SET_POWER IRP so that + * processing can continue in the completion routine without releasing + * the remove lock. + */ + break; + } + + if( action != IrpDoNothing ) + IoReleaseRemoveLock( &p_ext->remove_lock, p_irp ); + + CL_EXIT( CL_DBG_PNP, p_ext->dbg_lvl ); + return status; +} + + +NTSTATUS +cl_irp_skip( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + UNUSED_PARAM( p_dev_obj ); + UNUSED_PARAM( p_irp ); + *p_action = IrpSkip; + return STATUS_SUCCESS; +} + + +NTSTATUS +cl_irp_ignore( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + UNUSED_PARAM( p_dev_obj ); + UNUSED_PARAM( p_irp ); + *p_action = IrpIgnore; + return STATUS_SUCCESS; +} + + + +NTSTATUS +cl_irp_complete( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + UNUSED_PARAM( p_dev_obj ); + *p_action = IrpComplete; + return p_irp->IoStatus.Status; +} + + +NTSTATUS +cl_irp_succeed( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + UNUSED_PARAM( p_dev_obj ); + UNUSED_PARAM( p_irp ); + *p_action = IrpComplete; + return STATUS_SUCCESS; +} + + +NTSTATUS +cl_irp_unsupported( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + UNUSED_PARAM( p_dev_obj ); + UNUSED_PARAM( p_irp ); + *p_action = IrpComplete; + return STATUS_NOT_SUPPORTED; +} diff --git a/branches/Ndi/core/complib/kernel/cl_syscallback.c b/branches/Ndi/core/complib/kernel/cl_syscallback.c new file mode 100644 index 00000000..98246763 --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_syscallback.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "complib/cl_syscallback.h" +#include "complib/cl_memory.h" + + +/* The OS porvides the thread pool, so this is a no-op. */ +cl_status_t +cl_sys_callback_init( void ) +{ + return( CL_SUCCESS ); +} + + +/* The OS porvides the thread pool, so this is a no-op. */ +void +cl_sys_callback_destroy( void ) +{ +} + + +/* Note: This could be improved to pull items out of a pool. */ +cl_sys_callback_item_t* +cl_sys_callback_get( + IN const void* const get_context ) +{ + return IoAllocateWorkItem( (PDEVICE_OBJECT)get_context ); +} + + +void +cl_sys_callback_put( + IN cl_sys_callback_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + IoFreeWorkItem( p_item ); +} + + +cl_status_t +cl_sys_callback_queue( + IN cl_sys_callback_item_t* const p_item, + IN cl_pfn_sys_callback_t pfn_callback, + IN const void* const queue_context, + IN const boolean_t high_priority ) +{ + WORK_QUEUE_TYPE type; + + /* Execute as an APC if the requested priority is high. */ + if( high_priority ) + type = CriticalWorkQueue; + else + type = DelayedWorkQueue; + + IoQueueWorkItem( p_item, pfn_callback, type, (void*)queue_context ); + + return( CL_SUCCESS ); +} diff --git a/branches/Ndi/core/complib/kernel/cl_thread.c b/branches/Ndi/core/complib/kernel/cl_thread.c new file mode 100644 index 00000000..f68eb167 --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_thread.c @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "complib/cl_thread.h" + + +static void +__thread_callback( + IN cl_thread_t* p_thread ) +{ + /* Store the thread pointer so that destroy and is_current_thread work. */ + p_thread->osd.p_thread = KeGetCurrentThread(); + + /* Bump the thread's priority. */ + KeSetPriorityThread( p_thread->osd.p_thread, LOW_REALTIME_PRIORITY ); + + /* Call the user's thread function. */ + (*p_thread->pfn_callback)( (void*)p_thread->context ); + + PsTerminateSystemThread( STATUS_SUCCESS ); +} + + +void +cl_thread_construct( + IN cl_thread_t* const p_thread ) +{ + p_thread->osd.h_thread = NULL; + p_thread->osd.p_thread = NULL; +} + + +cl_status_t +cl_thread_init( + IN cl_thread_t* const p_thread, + IN cl_pfn_thread_callback_t pfn_callback, + IN const void* const context, + IN const char* const name ) +{ + NTSTATUS status; + OBJECT_ATTRIBUTES attr; + + CL_ASSERT( p_thread && pfn_callback ); + CL_ASSERT( KeGetCurrentIrql() == PASSIVE_LEVEL ); + + UNUSED_PARAM( name ); + + cl_thread_construct( p_thread ); + + p_thread->pfn_callback = pfn_callback; + p_thread->context = context; + + /* Create a new thread, storing both the handle and thread id. */ + InitializeObjectAttributes( &attr, NULL, OBJ_KERNEL_HANDLE, NULL, NULL ); + status = PsCreateSystemThread( &p_thread->osd.h_thread, THREAD_ALL_ACCESS, + &attr, NULL, NULL, __thread_callback, p_thread ); + + if( !NT_SUCCESS( status ) ) + return( CL_ERROR ); + + return( CL_SUCCESS ); +} + + +void +cl_thread_destroy( + IN cl_thread_t* const p_thread ) +{ + CL_ASSERT( p_thread ); + + if( !p_thread->osd.h_thread ) + return; + + /* Wait until the kernel thread pointer is stored in the thread object. */ + while( !p_thread->osd.p_thread ) + cl_thread_suspend( 0 ); + + /* Wait for the thread to exit. */ + KeWaitForSingleObject( p_thread->osd.p_thread, Executive, KernelMode, + FALSE, NULL ); + + /* Close the handle to the thread. */ + ZwClose( p_thread->osd.h_thread ); + + /* + * Reset the handle in case the user calls destroy and the thread is + * no longer active. + */ + cl_thread_construct( p_thread ); +} + + +uint32_t +cl_proc_count( void ) +{ +#if WINVER > 0x500 + return KeNumberProcessors; +#else + return *KeNumberProcessors; +#endif +} + + +boolean_t +cl_is_current_thread( + IN const cl_thread_t* const p_thread ) +{ + return( p_thread->osd.p_thread == KeGetCurrentThread() ); +} \ No newline at end of file diff --git a/branches/Ndi/core/complib/kernel/cl_timer.c b/branches/Ndi/core/complib/kernel/cl_timer.c new file mode 100644 index 00000000..34cae8e3 --- /dev/null +++ b/branches/Ndi/core/complib/kernel/cl_timer.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "complib/cl_timer.h" +#include "complib/cl_memory.h" + + +static void +__timer_callback( + IN PRKDPC p_dpc, + IN cl_timer_t* p_timer, + IN void* arg1, + IN void* arg2 ) +{ + UNUSED_PARAM( p_dpc ); + UNUSED_PARAM( arg1 ); + UNUSED_PARAM( arg2 ); + + p_timer->timeout_time = 0; + + (p_timer->pfn_callback)( (void*)p_timer->context ); +} + + +void +cl_timer_construct( + IN cl_timer_t* const p_timer ) +{ + cl_memclr( p_timer, sizeof(cl_timer_t) ); +} + + + +cl_status_t +cl_timer_init( + IN cl_timer_t* const p_timer, + IN cl_pfn_timer_callback_t pfn_callback, + IN const void* const context ) +{ + CL_ASSERT( p_timer && pfn_callback ); + + cl_timer_construct( p_timer ); + + p_timer->pfn_callback = pfn_callback; + p_timer->context = context; + + KeInitializeTimer( &p_timer->timer ); + KeInitializeDpc( &p_timer->dpc, __timer_callback, p_timer ); + + return( CL_SUCCESS ); +} + + +void +cl_timer_destroy( + IN cl_timer_t* const p_timer ) +{ + CL_ASSERT( p_timer ); + + if( !p_timer->pfn_callback ) + return; + + /* Ensure that the timer is stopped. */ + cl_timer_stop( p_timer ); + + /* + * Flush the DPCs to ensure that no callbacks occur after the timer is + * destroyed. + */ + KeFlushQueuedDpcs(); + + p_timer->pfn_callback = NULL; +} + + +cl_status_t +cl_timer_start( + IN cl_timer_t* const p_timer, + IN const uint32_t time_ms ) +{ + LARGE_INTEGER due_time; + + CL_ASSERT( p_timer ); + CL_ASSERT( p_timer->pfn_callback ); + CL_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + + /* Due time is in 100 ns increments. Negative for relative time. */ + due_time.QuadPart = -(int64_t)(((uint64_t)time_ms) * 10000); + + /* Store the timeout time in the timer object. */ + p_timer->timeout_time = cl_get_time_stamp() + (((uint64_t)time_ms) * 1000); + + KeSetTimer( &p_timer->timer, due_time, &p_timer->dpc ); + return( CL_SUCCESS ); +} + + +cl_status_t +cl_timer_trim( + IN cl_timer_t* const p_timer, + IN const uint32_t time_ms ) +{ + uint64_t timeout_time; + + CL_ASSERT( p_timer ); + CL_ASSERT( p_timer->pfn_callback ); + + /* Calculate the timeout time in the timer object. */ + timeout_time = cl_get_time_stamp() + (((uint64_t)time_ms) * 1000); + + /* Only pull in the timeout time. */ + if( p_timer->timeout_time && p_timer->timeout_time < timeout_time ) + return( CL_SUCCESS ); + + return cl_timer_start( p_timer, time_ms ); +} + + +void +cl_timer_stop( + IN cl_timer_t* const p_timer ) +{ + CL_ASSERT( p_timer ); + CL_ASSERT( p_timer->pfn_callback ); + CL_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + + /* Cancel the timer. This also cancels any queued DPCs for the timer. */ + KeCancelTimer( &p_timer->timer ); + + p_timer->timeout_time = 0; +} diff --git a/branches/Ndi/core/complib/kernel/makefile b/branches/Ndi/core/complib/kernel/makefile new file mode 100644 index 00000000..9c985f57 --- /dev/null +++ b/branches/Ndi/core/complib/kernel/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/Ndi/core/complib/user/SOURCES b/branches/Ndi/core/complib/user/SOURCES new file mode 100644 index 00000000..64725a8d --- /dev/null +++ b/branches/Ndi/core/complib/user/SOURCES @@ -0,0 +1,43 @@ +!if $(FREEBUILD) +TARGETNAME=complib +!else +TARGETNAME=complibd +!endif +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=DYNLINK +DLLENTRY=DllMain +DLLDEF=$O\complib.def +USE_NTDLL=1 + +SOURCES=\ + complib.rc \ + cl_debug.c \ + cl_dll.c \ + cl_event.c \ + cl_log.c \ + cl_memory_osd.c \ + cl_syscallback.c \ + cl_thread.c \ + cl_timer.c \ + ..\cl_async_proc.c \ + ..\cl_list.c \ + ..\cl_map.c \ + ..\cl_memory.c \ + ..\cl_obj.c \ + ..\cl_perf.c \ + ..\cl_pool.c \ + ..\cl_ptr_vector.c \ + ..\cl_reqmgr.c \ + ..\cl_statustext.c \ + ..\cl_threadpool.c \ + ..\cl_vector.c \ + +INCLUDES=..\..\..\inc;..\..\..\inc\user; + +USER_C_FLAGS=$(USER_C_FLAGS) -DEXPORT_CL_SYMBOLS + +TARGETLIBS=\ + $(SDK_LIB_PATH)\advapi32.lib \ + $(SDK_LIB_PATH)\kernel32.lib + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/core/complib/user/cl_debug.c b/branches/Ndi/core/complib/user/cl_debug.c new file mode 100644 index 00000000..5ee2a650 --- /dev/null +++ b/branches/Ndi/core/complib/user/cl_debug.c @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "complib/cl_debug.h" +#include +#include + + +void +cl_msg_out( + IN const char* const message, + IN ... ) +{ + char buffer[256]; + + va_list args; + va_start( args, message ); + if( _vsnprintf( buffer, 256, message, args ) == -1 ) + { + /* Overflow... */ + buffer[252] = '.'; + buffer[253] = '.'; + buffer[254] = '.'; + buffer[255] = '\0'; + OutputDebugStringA( buffer ); + OutputDebugStringA( "WARNING: Buffer truncated." ); + } + OutputDebugStringA( buffer ); + va_end(args); +} + diff --git a/branches/Ndi/core/complib/user/cl_dll.c b/branches/Ndi/core/complib/user/cl_dll.c new file mode 100644 index 00000000..dbc6405e --- /dev/null +++ b/branches/Ndi/core/complib/user/cl_dll.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + + +static BOOL +_DllMain( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ) +{ + UNUSED_PARAM( lp_reserved ); + + switch( ul_reason_for_call ) + { + case DLL_PROCESS_ATTACH: + DisableThreadLibraryCalls( h_module ); + __cl_mem_track( TRUE ); + break; + + case DLL_PROCESS_DETACH: + __cl_mem_track( FALSE ); + } + return TRUE; +} + + +extern BOOL APIENTRY +_DllMainCRTStartupForGS( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ); + + +BOOL APIENTRY +DllMain( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ) +{ + switch( ul_reason_for_call ) + { + case DLL_PROCESS_ATTACH: + if( !_DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ) ) + { + return FALSE; + } + + return _DllMain( h_module, ul_reason_for_call, lp_reserved ); + + case DLL_PROCESS_DETACH: + _DllMain( h_module, ul_reason_for_call, lp_reserved ); + + return _DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ); + } + return TRUE; +} + diff --git a/branches/Ndi/core/complib/user/cl_event.c b/branches/Ndi/core/complib/user/cl_event.c new file mode 100644 index 00000000..2ec5b1cd --- /dev/null +++ b/branches/Ndi/core/complib/user/cl_event.c @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "complib/cl_event.h" + + +cl_status_t +cl_event_wait_on( + IN cl_event_t* const p_event, + IN const uint32_t wait_us, + IN const boolean_t interruptible ) +{ + DWORD wait_ms; + + CL_ASSERT( p_event ); + CL_ASSERT( *p_event ); + + if( wait_us == EVENT_NO_TIMEOUT ) + wait_ms = INFINITE; + else + wait_ms = (DWORD)(wait_us / 1000); + + switch( WaitForSingleObjectEx( *p_event, wait_ms, interruptible ) ) + { + case WAIT_OBJECT_0: + return( CL_SUCCESS ); + case WAIT_IO_COMPLETION: + return( CL_NOT_DONE ); + case WAIT_TIMEOUT: + return( CL_TIMEOUT ); + default: + return( CL_ERROR ); + } +} diff --git a/branches/Ndi/core/complib/user/cl_log.c b/branches/Ndi/core/complib/user/cl_log.c new file mode 100644 index 00000000..a5821814 --- /dev/null +++ b/branches/Ndi/core/complib/user/cl_log.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "complib/cl_log.h" + + +void +cl_log_event( + IN const char* const name, + IN const cl_log_type_t type, + IN const char* const p_message, + IN const void* const p_data, + IN const uint32_t data_len ) +{ + HANDLE h; + WORD log_type; + WORD num_str = 0; + + h = RegisterEventSource( NULL, name ); + + if( !h ) + return; + + switch( type ) + { + case CL_LOG_ERROR: + log_type = EVENTLOG_ERROR_TYPE; + break; + + case CL_LOG_WARN: + log_type = EVENTLOG_WARNING_TYPE; + break; + + default: + case CL_LOG_INFO: + log_type = EVENTLOG_INFORMATION_TYPE; + break; + } + + if( p_message ) + num_str = 1; + + /* User the ASCII version of ReportEvent. */ + ReportEventA( h, log_type, 0, 0, NULL, num_str, data_len, + (LPCTSTR*)&p_message, (LPVOID)p_data ); + + DeregisterEventSource( h ); +} diff --git a/branches/Ndi/core/complib/user/cl_memory_osd.c b/branches/Ndi/core/complib/user/cl_memory_osd.c new file mode 100644 index 00000000..643e1b26 --- /dev/null +++ b/branches/Ndi/core/complib/user/cl_memory_osd.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "complib/cl_memory.h" +#include "stdlib.h" + + +void* +__cl_malloc_priv( + IN const size_t size, + IN const boolean_t pageable ) +{ + UNUSED_PARAM( pageable ); + return( HeapAlloc( GetProcessHeap(), 0, size ) ); +} + + +void +__cl_free_priv( + IN void* const p_memory ) +{ + HeapFree( GetProcessHeap(), 0, p_memory ); +} + diff --git a/branches/Ndi/core/complib/user/cl_syscallback.c b/branches/Ndi/core/complib/user/cl_syscallback.c new file mode 100644 index 00000000..feec61ce --- /dev/null +++ b/branches/Ndi/core/complib/user/cl_syscallback.c @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "complib/cl_syscallback.h" +#include "complib/cl_memory.h" + + +/* + * Callback function invoked by the thread pool in response to a + * call to QueueUserWorkItem. + */ +static DWORD WINAPI +cl_sys_callback_wrapper( + IN cl_sys_callback_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + + p_item->pfn_callback( (void*)p_item->get_context, + (void*)p_item->queue_context ); + + return( 0 ); +} + + +/* The OS porvides the thread pool, so this is a no-op. */ +cl_status_t +cl_sys_callback_init( void ) +{ + return( CL_SUCCESS ); +} + + +/* The OS porvides the thread pool, so this is a no-op. */ +void +cl_sys_callback_destroy( void ) +{ +} + + +/* Note: This could be improved to pull items out of a pool. */ +cl_sys_callback_item_t* +cl_sys_callback_get( + IN const void* const get_context ) +{ + cl_sys_callback_item_t *p_item; + + p_item = (cl_sys_callback_item_t*) + cl_zalloc( sizeof(cl_sys_callback_item_t) ); + + if( !p_item ) + return( NULL ); + + p_item->get_context = get_context; + + return p_item; +} + + +void +cl_sys_callback_put( + IN cl_sys_callback_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + cl_free( p_item ); +} + + +cl_status_t +cl_sys_callback_queue( + IN cl_sys_callback_item_t* const p_item, + IN cl_pfn_sys_callback_t pfn_callback, + IN const void* const queue_context, + IN const boolean_t high_priority ) +{ + ULONG flags; + + /* Store the callback and context in the item */ + p_item->pfn_callback = pfn_callback; + p_item->queue_context = queue_context; + + /* Execute as an APC if the requested priority is high. */ + if( high_priority ) + flags = WT_EXECUTEINIOTHREAD; + else + flags = WT_EXECUTEDEFAULT; + + /* Queue the internal callback with a pointer to the callback item. */ + if( !QueueUserWorkItem( cl_sys_callback_wrapper, p_item, flags ) ) + return( CL_ERROR ); + + return( CL_SUCCESS ); +} diff --git a/branches/Ndi/core/complib/user/cl_thread.c b/branches/Ndi/core/complib/user/cl_thread.c new file mode 100644 index 00000000..2edaee95 --- /dev/null +++ b/branches/Ndi/core/complib/user/cl_thread.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "complib/cl_thread.h" +#include + + +static DWORD WINAPI +cl_thread_callback( + IN cl_thread_t* p_thread ) +{ + /* Call the user's thread function. */ + (*p_thread->pfn_callback)( (void*)p_thread->context ); + + /* + * Use endthreadex so that the thread handle is not closed. It will + * be closed in the cl_thread_destroy. + */ + ExitThread( 0 ); +} + + +void +cl_thread_construct( + IN cl_thread_t* const p_thread ) +{ + p_thread->osd.h_thread = NULL; + p_thread->osd.thread_id = 0; +} + + +cl_status_t +cl_thread_init( + IN cl_thread_t* const p_thread, + IN cl_pfn_thread_callback_t pfn_callback, + IN const void* const context, + IN const char* const name ) +{ + CL_ASSERT( p_thread && pfn_callback ); + + UNUSED_PARAM( name ); + + cl_thread_construct( p_thread ); + + p_thread->pfn_callback = pfn_callback; + p_thread->context = context; + + /* Create a new thread, storing both the handle and thread id. */ + p_thread->osd.h_thread = (HANDLE)CreateThread( NULL, 0, + cl_thread_callback, p_thread, 0, &p_thread->osd.thread_id ); + + if( !p_thread->osd.h_thread ) + return( CL_ERROR ); + + return( CL_SUCCESS ); +} + + +void +cl_thread_destroy( + IN cl_thread_t* const p_thread ) +{ + CL_ASSERT( p_thread ); + + if( !p_thread->osd.h_thread ) + return; + + /* Wait for the thread to exit. */ + WaitForSingleObject( p_thread->osd.h_thread, INFINITE ); + + /* Close the handle to the thread. */ + CloseHandle( p_thread->osd.h_thread ); + + /* + * Reset the handle in case the user calls destroy and the thread is + * no longer active. + */ + p_thread->osd.h_thread = NULL; +} + + +uint32_t +cl_proc_count( void ) +{ + SYSTEM_INFO system_info; + + GetSystemInfo( &system_info ); + return system_info.dwNumberOfProcessors; +} + + +boolean_t +cl_is_current_thread( + IN const cl_thread_t* const p_thread ) +{ + return( p_thread->osd.thread_id == GetCurrentThreadId() ); +} \ No newline at end of file diff --git a/branches/Ndi/core/complib/user/cl_timer.c b/branches/Ndi/core/complib/user/cl_timer.c new file mode 100644 index 00000000..649a7951 --- /dev/null +++ b/branches/Ndi/core/complib/user/cl_timer.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "complib/cl_timer.h" + + +static void CALLBACK +__timer_callback( + IN cl_timer_t* const p_timer, + IN BOOLEAN timer_signalled ) +{ + /* timer_signalled is always TRUE, and has no value. */ + CL_ASSERT( timer_signalled ); + UNUSED_PARAM( timer_signalled ); + + p_timer->timeout_time = 0; + p_timer->thread_id = GetCurrentThreadId(); + + (p_timer->pfn_callback)( (void*)p_timer->context ); + + p_timer->thread_id = 0; +} + + +void +cl_timer_construct( + IN cl_timer_t* const p_timer ) +{ + p_timer->h_timer = NULL; + p_timer->timeout_time = 0; + p_timer->thread_id = 0; +} + + + +cl_status_t +cl_timer_init( + IN cl_timer_t* const p_timer, + IN cl_pfn_timer_callback_t pfn_callback, + IN const void* const context ) +{ + CL_ASSERT( p_timer ); + CL_ASSERT( pfn_callback ); + + cl_timer_construct( p_timer ); + + p_timer->pfn_callback = pfn_callback; + p_timer->context = context; + return( CL_SUCCESS ); +} + + +void +cl_timer_destroy( + IN cl_timer_t* const p_timer ) +{ + CL_ASSERT( p_timer ); + + cl_timer_stop( p_timer ); +} + + +cl_status_t +cl_timer_start( + IN cl_timer_t* const p_timer, + IN const uint32_t time_ms ) +{ + CL_ASSERT( p_timer ); + + cl_timer_stop( p_timer ); + + p_timer->timeout_time = cl_get_time_stamp() + (((uint64_t)time_ms) * 1000); + + if( !CreateTimerQueueTimer( &p_timer->h_timer, NULL, __timer_callback, + p_timer, time_ms, 0, WT_EXECUTEINIOTHREAD ) ) + { + return( CL_ERROR ); + } + + return( CL_SUCCESS ); +} + + +cl_status_t +cl_timer_trim( + IN cl_timer_t* const p_timer, + IN const uint32_t time_ms ) +{ + uint64_t timeout_time; + + CL_ASSERT( p_timer ); + CL_ASSERT( p_timer->pfn_callback ); + + /* Calculate the timeout time in the timer object. */ + timeout_time = cl_get_time_stamp() + (((uint64_t)time_ms) * 1000); + + /* Only pull in the timeout time. */ + if( p_timer->timeout_time && p_timer->timeout_time < timeout_time ) + return( CL_SUCCESS ); + + return cl_timer_start( p_timer, time_ms ); +} + + +void +cl_timer_stop( + IN cl_timer_t* const p_timer ) +{ + CL_ASSERT( p_timer ); + + if( p_timer->h_timer && p_timer->thread_id != GetCurrentThreadId() ) + { + /* Make sure we block until the timer is cancelled. */ + DeleteTimerQueueTimer( NULL, p_timer->h_timer, INVALID_HANDLE_VALUE ); + p_timer->h_timer = NULL; + } + p_timer->timeout_time = 0; +} + + +#define SEC_TO_MICRO CL_CONST64(1000000) // s to µs conversion + +uint64_t +cl_get_time_stamp( void ) +{ + LARGE_INTEGER tick_count, frequency; + + if( !QueryPerformanceFrequency( &frequency ) ) + return( 0 ); + + if( !QueryPerformanceCounter( &tick_count ) ) + return( 0 ); + + return( tick_count.QuadPart / (frequency.QuadPart / SEC_TO_MICRO) ); +} + +uint32_t +cl_get_time_stamp_sec( void ) +{ + return( (uint32_t)(cl_get_time_stamp() / SEC_TO_MICRO) ); +} + + +uint64_t +cl_get_tick_count( void ) +{ + LARGE_INTEGER tick_count; + + if( !QueryPerformanceCounter( &tick_count ) ) + return( 0 ); + + return tick_count.QuadPart; +} + + +uint64_t +cl_get_tick_freq( void ) +{ + LARGE_INTEGER frequency; + + if( !QueryPerformanceFrequency( &frequency ) ) + return( 0 ); + + return frequency.QuadPart; +} diff --git a/branches/Ndi/core/complib/user/complib.rc b/branches/Ndi/core/complib/user/complib.rc new file mode 100644 index 00000000..6237874c --- /dev/null +++ b/branches/Ndi/core/complib/user/complib.rc @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DLL +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Component Library (Debug)" +#define VER_INTERNALNAME_STR "complibd.dll" +#define VER_ORIGINALFILENAME_STR "complibd.dll" +#else +#define VER_FILEDESCRIPTION_STR "Component Library" +#define VER_INTERNALNAME_STR "complib.dll" +#define VER_ORIGINALFILENAME_STR "complib.dll" +#endif + +#include diff --git a/branches/Ndi/core/complib/user/complib.src b/branches/Ndi/core/complib/user/complib.src new file mode 100644 index 00000000..7c9d297c --- /dev/null +++ b/branches/Ndi/core/complib/user/complib.src @@ -0,0 +1,293 @@ +#if DBG +LIBRARY complibd.dll +#else +LIBRARY complib.dll +#endif + +#ifndef _WIN64 +EXPORTS +__cl_free_ntrk +__cl_free_trk +__cl_malloc_ntrk +__cl_malloc_trk +__cl_mem_track +__cl_perf_construct +__cl_perf_destroy +__cl_perf_display +__cl_perf_init +__cl_perf_reset +__cl_primitive_insert +__cl_primitive_remove +__cl_qlist_reset +__cl_zalloc_ntrk +__cl_zalloc_trk +cl_async_proc_construct +cl_async_proc_init +cl_async_proc_destroy +cl_async_proc_queue +cl_atomic_inc +cl_atomic_dec +cl_atomic_add +cl_atomic_sub +cl_atomic_xchg +cl_atomic_comp_xchg +cl_ntoh +cl_cpool_construct +cl_is_cpool_inited +cl_cpool_init +cl_cpool_destroy +cl_cpool_count +cl_cpool_get +cl_cpool_put +cl_cpool_grow +cl_msg_out +cl_event_construct +cl_event_init +cl_event_destroy +cl_event_signal +cl_event_reset +cl_event_wait_on +cl_fmap_count +cl_is_fmap_empty +cl_fmap_key +cl_fmap_init +cl_fmap_end +cl_fmap_head +cl_fmap_tail +cl_fmap_next +cl_fmap_prev +cl_fmap_insert +cl_fmap_get +cl_fmap_remove_item +cl_fmap_remove +cl_fmap_remove_all +cl_fmap_merge +cl_fmap_delta +cl_fmap_apply_func +cl_ioctl_request +cl_ioctl_result +cl_list_construct +cl_is_list_inited +cl_list_init +cl_list_destroy +cl_is_list_empty +cl_list_insert_head +cl_list_insert_tail +cl_list_insert_array_head +cl_list_insert_array_tail +cl_list_insert_next +cl_list_insert_prev +cl_list_remove_head +cl_list_remove_tail +cl_list_remove_all +cl_list_remove_object +cl_list_remove_item +cl_is_object_in_list +cl_list_end +cl_list_head +cl_list_tail +cl_list_next +cl_list_prev +cl_list_obj +cl_list_find_from_head +cl_list_find_from_tail +cl_list_apply_func +cl_list_count +cl_log_event +cl_map_count +cl_is_map_empty +cl_map_key +cl_map_construct +cl_is_map_inited +cl_map_init +cl_map_destroy +cl_map_end +cl_map_head +cl_map_tail +cl_map_next +cl_map_prev +cl_map_insert +cl_map_get +cl_map_remove_item +cl_map_remove +cl_map_remove_all +cl_map_obj +cl_map_merge +cl_map_delta +cl_mem_display +cl_memset +cl_memclr +cl_memcpy +cl_memcmp +cl_mutex_construct +cl_mutex_init +cl_mutex_destroy +cl_mutex_acquire +cl_mutex_release +cl_obj_mgr_create +cl_obj_mgr_destroy +cl_obj_construct +cl_obj_init +cl_obj_destroy +cl_obj_deinit +cl_obj_reset +cl_obj_ref +cl_obj_deref +cl_obj_type +cl_obj_lock +cl_obj_unlock +cl_rel_alloc +cl_rel_free +cl_obj_insert_rel +cl_obj_remove_rel +cl_plock_construct +cl_plock_destroy +cl_plock_init +cl_plock_acquire +cl_plock_excl_acquire +cl_plock_release +cl_pool_construct +cl_is_pool_inited +cl_pool_init +cl_pool_destroy +cl_pool_count +cl_pool_get +cl_pool_put +cl_pool_grow +cl_ptr_vector_construct +cl_ptr_vector_init +cl_ptr_vector_destroy +cl_ptr_vector_get_capacity +cl_ptr_vector_get_size +cl_ptr_vector_get +cl_ptr_vector_at +cl_ptr_vector_set +cl_ptr_vector_insert +cl_ptr_vector_remove +cl_ptr_vector_set_capacity +cl_ptr_vector_set_size +cl_ptr_vector_set_min_size +cl_ptr_vector_apply_func +cl_ptr_vector_find_from_start +cl_ptr_vector_find_from_end +cl_qcpool_construct +cl_is_qcpool_inited +cl_qcpool_init +cl_qcpool_destroy +cl_qcpool_count +cl_qcpool_get +cl_qcpool_put +cl_qcpool_put_list +cl_qcpool_grow +cl_qlist_set_obj +cl_qlist_obj +cl_qlist_init +cl_qlist_count +cl_is_qlist_empty +cl_qlist_next +cl_qlist_prev +cl_qlist_head +cl_qlist_tail +cl_qlist_end +cl_qlist_insert_head +cl_qlist_insert_tail +cl_qlist_insert_list_head +cl_qlist_insert_list_tail +cl_qlist_insert_array_head +cl_qlist_insert_array_tail +cl_qlist_insert_prev +cl_qlist_insert_next +cl_qlist_remove_head +cl_qlist_remove_tail +cl_qlist_remove_item +cl_qlist_remove_all +cl_is_item_in_qlist +cl_qlist_find_next +cl_qlist_find_prev +cl_qlist_find_from_head +cl_qlist_find_from_tail +cl_qlist_apply_func +cl_qlist_move_items +cl_qmap_count +cl_is_qmap_empty +cl_qmap_set_obj +cl_qmap_obj +cl_qmap_key +cl_qmap_init +cl_qmap_end +cl_qmap_head +cl_qmap_tail +cl_qmap_next +cl_qmap_prev +cl_qmap_insert +cl_qmap_get +cl_qmap_remove_item +cl_qmap_remove +cl_qmap_remove_all +cl_qmap_merge +cl_qmap_delta +cl_qmap_apply_func +cl_qpool_construct +cl_is_qpool_inited +cl_qpool_init +cl_qpool_destroy +cl_qpool_count +cl_qpool_get +cl_qpool_put +cl_qpool_put_list +cl_qpool_grow +cl_req_mgr_construct +cl_req_mgr_init +cl_req_mgr_destroy +cl_req_mgr_get +cl_req_mgr_resume +cl_spinlock_construct +cl_spinlock_init +cl_spinlock_destroy +cl_spinlock_acquire +cl_spinlock_release +cl_sys_callback_get +cl_sys_callback_put +cl_sys_callback_queue +cl_thread_construct +cl_thread_init +cl_thread_destroy +cl_thread_suspend +cl_thread_stall +cl_proc_count +cl_is_current_thread +cl_is_blockable +cl_thread_pool_construct +cl_thread_pool_init +cl_thread_pool_destroy +cl_thread_pool_signal +cl_timer_construct +cl_timer_init +cl_timer_destroy +cl_timer_start +cl_timer_stop +cl_timer_trim +cl_get_time_stamp +cl_get_time_stamp_sec +cl_is_state_valid +cl_vector_construct +cl_vector_init +cl_vector_destroy +cl_vector_get_capacity +cl_vector_get_size +cl_vector_get_ptr +cl_vector_get +cl_vector_at +cl_vector_set +cl_vector_set_capacity +cl_vector_set_size +cl_vector_set_min_size +cl_vector_apply_func +cl_vector_find_from_start +cl_vector_find_from_end +cl_waitobj_create +cl_waitobj_destroy +cl_waitobj_signal +cl_waitobj_reset +cl_waitobj_wait_on +#endif diff --git a/branches/Ndi/core/complib/user/makefile b/branches/Ndi/core/complib/user/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/core/complib/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/core/dirs b/branches/Ndi/core/dirs new file mode 100644 index 00000000..ec1177bf --- /dev/null +++ b/branches/Ndi/core/dirs @@ -0,0 +1,5 @@ +DIRS=\ + complib \ + al \ + bus \ + iou diff --git a/branches/Ndi/core/iou/dirs b/branches/Ndi/core/iou/dirs new file mode 100644 index 00000000..ed41dcf4 --- /dev/null +++ b/branches/Ndi/core/iou/dirs @@ -0,0 +1,2 @@ +DIRS=\ + kernel diff --git a/branches/Ndi/core/iou/kernel/SOURCES b/branches/Ndi/core/iou/kernel/SOURCES new file mode 100644 index 00000000..141da4c2 --- /dev/null +++ b/branches/Ndi/core/iou/kernel/SOURCES @@ -0,0 +1,44 @@ +TARGETNAME=ibiou +TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=DRIVER + +!if $(FREEBUILD) +ENABLE_EVENT_TRACING=1 +!else +#ENABLE_EVENT_TRACING=1 +!endif + + +SOURCES= ibiou.rc \ + iou_driver.c \ + iou_pnp.c \ + iou_ioc_mgr.c + +INCLUDES=..\..\..\inc;..\..\..\inc\kernel; + +C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -DNEED_CL_OBJ -DWPP_OLDCC + + +TARGETLIBS= \ + $(TARGETPATH)\*\complib.lib + +!if !defined(DDK_TARGET_OS) || "$(DDK_TARGET_OS)"=="Win2K" +# +# The driver is built in the Win2K build environment +# - use the library version of safe strings +# +TARGETLIBS= $(TARGETLIBS) $(DDK_LIB_PATH)\ntstrsafe.lib +!endif + +!IFDEF ENABLE_EVENT_TRACING + +C_DEFINES = $(C_DEFINES) -DEVENT_TRACING + +RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H \ + -scan:iou_driver.h \ + -func:IOU_PRINT(LEVEL,FLAGS,(MSG,...)) \ + -func:IOU_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) + +!ENDIF + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/core/iou/kernel/ibiou.rc b/branches/Ndi/core/iou/kernel/ibiou.rc new file mode 100644 index 00000000..56b995af --- /dev/null +++ b/branches/Ndi/core/iou/kernel/ibiou.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "InfiniBand I/O Unit Driver (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "InfiniBand I/O Unit Driver" +#endif + +#define VER_INTERNALNAME_STR "ibiou.sys" +#define VER_ORIGINALFILENAME_STR "ibiou.sys" + +#include diff --git a/branches/Ndi/core/iou/kernel/iou_driver.c b/branches/Ndi/core/iou/kernel/iou_driver.c new file mode 100644 index 00000000..7b33741e --- /dev/null +++ b/branches/Ndi/core/iou/kernel/iou_driver.c @@ -0,0 +1,266 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Provides the driver entry points for the InfiniBand I/O Unit Bus Driver. + */ + +#include +#include "iou_driver.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "iou_driver.tmh" +#endif +#include "iou_pnp.h" +#include + + +iou_globals_t iou_globals = { + NULL +}; + +uint32_t g_iou_dbg_level = TRACE_LEVEL_ERROR; +uint32_t g_iou_dbg_flags = 0x00000fff; + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_Param_Path ); + +static NTSTATUS +iou_drv_open( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + +static NTSTATUS +iou_drv_close( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + +static NTSTATUS +iou_drv_ioctl( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + +/***f* InfiniBand Bus Driver/iou_sysctl +* NAME +* iou_sysctl +* +* DESCRIPTION +* Entry point for handling WMI IRPs. +* +* SYNOPSIS +*/ +static NTSTATUS +iou_sysctl( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); +/**********/ + +static void +iou_unload( + IN DRIVER_OBJECT *p_driver_obj ); + +NTSTATUS +DriverEntry( + IN DRIVER_OBJECT *p_driver_obj, + IN UNICODE_STRING *p_registry_path ); + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (INIT, DriverEntry) +#pragma alloc_text (INIT, __read_registry) +#pragma alloc_text (PAGE, iou_unload) +#pragma alloc_text (PAGE, iou_drv_open) +#pragma alloc_text (PAGE, iou_drv_close) +#pragma alloc_text (PAGE, iou_drv_ioctl) +#pragma alloc_text (PAGE_PNP, iou_sysctl) +#endif + + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_registry_path ) +{ + NTSTATUS status; + /* Remember the terminating entry in the table below. */ + RTL_QUERY_REGISTRY_TABLE table[3]; + UNICODE_STRING param_path; + + IOU_ENTER( IOU_DBG_DRV ); + + RtlInitUnicodeString( ¶m_path, NULL ); + param_path.MaximumLength = p_registry_path->Length + + sizeof(L"\\Parameters"); + param_path.Buffer = cl_zalloc( param_path.MaximumLength ); + if( !param_path.Buffer ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR,IOU_DBG_ERROR, + ("Failed to allocate parameters path buffer.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + RtlAppendUnicodeStringToString( ¶m_path, p_registry_path ); + RtlAppendUnicodeToString( ¶m_path, L"\\Parameters" ); + + /* + * Clear the table. This clears all the query callback pointers, + * and sets up the terminating table entry. + */ + cl_memclr( table, sizeof(table) ); + + /* Setup the table entries. */ + table[0].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[0].Name = L"DebugLevel"; + table[0].EntryContext = &g_iou_dbg_level; + table[0].DefaultType = REG_DWORD; + table[0].DefaultData = &g_iou_dbg_level; + table[0].DefaultLength = sizeof(ULONG); + + table[1].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[1].Name = L"DebugFlags"; + table[1].EntryContext = &g_iou_dbg_flags; + table[1].DefaultType = REG_DWORD; + table[1].DefaultData = &g_iou_dbg_flags; + table[1].DefaultLength = sizeof(ULONG); + /* Have at it! */ + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, + param_path.Buffer, table, NULL, NULL ); + +#ifndef EVENT_TRACING + if( g_iou_dbg_flags & IOU_DBG_ERR ) + g_iou_dbg_flags |= CL_DBG_ERROR; +#endif + + IOU_PRINT( TRACE_LEVEL_INFORMATION, IOU_DBG_DRV, + ("debug level %d debug flags 0x%.8x\n", + g_iou_dbg_level, + g_iou_dbg_flags) ); + + cl_free( param_path.Buffer ); + IOU_EXIT( IOU_DBG_DRV ); + return status; +} + + +static NTSTATUS +iou_sysctl( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ) +{ + NTSTATUS status; + cl_pnp_po_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_DRV ); + + CL_ASSERT( p_dev_obj ); + CL_ASSERT( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + + if( p_ext->p_next_do ) + { + IoSkipCurrentIrpStackLocation( p_irp ); + status = IoCallDriver( p_ext->p_next_do, p_irp ); + } + else + { + status = p_irp->IoStatus.Status; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + } + + IOU_EXIT( IOU_DBG_DRV ); + return status; +} + + +static void +iou_unload( + IN DRIVER_OBJECT *p_driver_obj ) +{ + IOU_ENTER( IOU_DBG_DRV ); +#if defined(EVENT_TRACING) + WPP_CLEANUP( p_drv_obj ); +#endif + + UNUSED_PARAM( p_driver_obj ); + + CL_DEINIT; + + IOU_EXIT( IOU_DBG_DRV ); +} + + +NTSTATUS +DriverEntry( + IN DRIVER_OBJECT *p_driver_obj, + IN UNICODE_STRING *p_registry_path ) +{ + NTSTATUS status; + + IOU_ENTER( IOU_DBG_DRV ); + +#if defined(EVENT_TRACING) + WPP_INIT_TRACING( p_drv_obj, p_registry_path ); +#endif + + status = CL_INIT; + if( !NT_SUCCESS(status) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("cl_init returned %08X.\n", status) ); + return status; + } + + /* Store the driver object pointer in the global parameters. */ + iou_globals.p_driver_obj = p_driver_obj; + + /* Get the registry values. */ + status = __read_registry( p_registry_path ); + if( !NT_SUCCESS(status) ) + { + CL_DEINIT; + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("__read_registry returned %08x.\n", status) ); + return status; + } + + /* Setup the entry points. */ + p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp; + p_driver_obj->MajorFunction[IRP_MJ_POWER] = cl_power; + p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = iou_sysctl; + p_driver_obj->DriverUnload = iou_unload; + p_driver_obj->DriverExtension->AddDevice = iou_add_device; + + IOU_EXIT( IOU_DBG_DRV ); + return STATUS_SUCCESS; +} diff --git a/branches/Ndi/core/iou/kernel/iou_driver.h b/branches/Ndi/core/iou/kernel/iou_driver.h new file mode 100644 index 00000000..381630ee --- /dev/null +++ b/branches/Ndi/core/iou/kernel/iou_driver.h @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if !defined _IOU_DRIVER_H_ +#define _IOU_DRIVER_H_ + +#include "complib/cl_types.h" +#include "complib/cl_atomic.h" +#include "complib/cl_debug.h" +#include "complib/cl_mutex.h" +#include "complib/cl_qlist.h" +#include "complib/cl_ptr_vector.h" +#include "complib/cl_pnp_po.h" +#include "iba/ib_al.h" +#include "iou_ioc_mgr.h" + +/* Safe string functions. */ +#if WINVER == 0x500 +/* + * Windows 2000 doesn't support the inline version of safe strings. + * Force the use of the library version of safe strings. + */ +#define NTSTRSAFE_LIB +#endif +#include + +extern uint32_t g_iou_dbg_level; +extern uint32_t g_iou_dbg_flags; + +#if defined(EVENT_TRACING) +// +// Software Tracing Definitions +// + +#define WPP_CONTROL_GUIDS \ + WPP_DEFINE_CONTROL_GUID(IOUCtlGuid,(A0090FEF,01BB,4617,AF1E,FD02FD5B24ED), \ + WPP_DEFINE_BIT( IOU_DBG_ERROR) \ + WPP_DEFINE_BIT( IOU_DBG_DRV) \ + WPP_DEFINE_BIT( IOU_DBG_PNP) \ + WPP_DEFINE_BIT( IOU_DBG_POWER) \ + WPP_DEFINE_BIT( IOU_DBG_PORT) \ + WPP_DEFINE_BIT( IOU_DBG_IOU)) + + + +#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl) +#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags) +#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE) +#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags) + + +// begin_wpp config +// IOU_ENTER(FLAG); +// IOU_EXIT(FLAG); +// USEPREFIX(IOU_PRINT, "%!STDPREFIX! [IOU] :%!FUNC!() :"); +// USESUFFIX(IOU_ENTER, " [IOU] :%!FUNC!():["); +// USESUFFIX(IOU_EXIT, " [IOU] :%!FUNC!():]"); +// end_wpp + + +#else + + +#include + +/* + * Debug macros + */ + + +#define IOU_DBG_ERR (1 << 0) +#define IOU_DBG_DRV (1 << 1) +#define IOU_DBG_PNP (1 << 2) +#define IOU_DBG_POWER (1 << 3) +#define IOU_DBG_PORT (1 << 4) +#define IOU_DBG_IOU (1 << 5) + +#define IOU_DBG_ERROR (CL_DBG_ERROR | IOU_DBG_ERR) +#define IOU_DBG_ALL CL_DBG_ALL + +#if DBG + +// assignment of _level_ is need to to overcome warning C4127 +#define IOU_PRINT(_level_,_flag_,_msg_) \ + { \ + if( g_iou_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_iou_dbg_flags, _msg_ ); \ + } + +#define IOU_PRINT_EXIT(_level_,_flag_,_msg_) \ + { \ + if( g_iou_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_iou_dbg_flags, _msg_ );\ + IOU_EXIT(_flag_);\ + } + +#define IOU_ENTER(_flag_) \ + { \ + if( g_iou_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_ENTER( _flag_, g_iou_dbg_flags ); \ + } + +#define IOU_EXIT(_flag_)\ + { \ + if( g_iou_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_EXIT( _flag_, g_iou_dbg_flags ); \ + } + + +#else + +#define IOU_PRINT(lvl, flags, msg) + +#define IOU_PRINT_EXIT(_level_,_flag_,_msg_) + +#define IOU_ENTER(_flag_) + +#define IOU_EXIT(_flag_) + + +#endif + + +#endif //EVENT_TRACING + +/* + * Main header for IB Bus driver. + */ + + + +/* + * ALLOC_PRAGMA sections: + * PAGE + * Default pagable code. Won't be locked in memory. + * + * PAGE_PNP + * Code that needs to be locked in memory when the device is + * in the paging, crash dump, or hibernation path. + */ + + +/* + * Device extension for the device object that serves as entry point for + * the interface and IOCTL requests. + */ +typedef struct _iou_fdo_ext +{ + cl_pnp_po_ext_t cl_ext; + + /* + * Device power map returned by the bus driver for the device, used + * when sending IRP_MN_SET_POWER for device state in response to + * IRP_MN_SET_POWER for system state. + */ + DEVICE_POWER_STATE po_state[PowerSystemMaximum]; + + ioc_mgr_t ioc_mgr; + +} iou_fdo_ext_t; + + +/* + * Device extension for bus driver PDOs. + */ +typedef struct _iou_pdo_ext +{ + cl_pnp_po_ext_t cl_ext; + + cl_list_item_t list_item; + + /* All reported PDOs are children of an HCA. */ + ib_ca_handle_t h_ca; + + /* + * CA GUID copy - in case we get IRPs after the CA + * handle has been released. + */ + net64_t ca_guid; + POWER_STATE dev_po_state; + + /* + * Pointer to the bus root device extension. Used to manage access to + * child PDO pointer vector when a child is removed politely. + */ + iou_fdo_ext_t *p_parent_ext; + + /* + * The following two flags are exclusively set, but can both be FALSE. + * Flag that indicates whether the device is present in the system or not. + * This affects how a IRP_MN_REMOVE_DEVICE IRP is handled for a child PDO. + * This flag is cleared when: + * - an HCA (for IPoIB devices) is removed from the system for all port + * devices loaded for that HCA + * - an IOU is reported as removed by the CIA. + */ + boolean_t b_present; + + /* + * Flag that indicates whether the device has been reported to the PnP + * manager as having been removed. That is, the device was reported + * in a previous BusRelations query and not in a subsequent one. + * This flag is set when + * - the device is in the surprise remove state when the parent bus + * device is removed + * - the device is found to be not present during a BusRelations query + * and thus not reported. + */ + boolean_t b_reported_missing; + +} iou_pdo_ext_t; + + +/* + * Global Driver parameters. + */ +typedef struct _iou_globals +{ + /* Driver object. Used for creating child devices. */ + DRIVER_OBJECT *p_driver_obj; + +} iou_globals_t; + + +extern iou_globals_t iou_globals; + + +#endif /* !defined _IOU_DRIVER_H_ */ diff --git a/branches/Ndi/core/iou/kernel/iou_ioc_mgr.c b/branches/Ndi/core/iou/kernel/iou_ioc_mgr.c new file mode 100644 index 00000000..eb313641 --- /dev/null +++ b/branches/Ndi/core/iou/kernel/iou_ioc_mgr.c @@ -0,0 +1,1389 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include +#include +#include +#include "iou_driver.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "iou_ioc_mgr.tmh" +#endif +#include "iou_pnp.h" +#include "iou_ioc_mgr.h" +#include +#include +#include "iba/ioc_ifc.h" + + +/* {5A9649F4-0101-4a7c-8337-796C48082DA2} */ +DEFINE_GUID(GUID_BUS_TYPE_IBA, +0x5a9649f4, 0x101, 0x4a7c, 0x83, 0x37, 0x79, 0x6c, 0x48, 0x8, 0x2d, 0xa2); + + +/* + * Size of device descriptions, as defined in + * A1.2.3.1.1 - Creating Compatibility Strings for an I/O Controller + */ +#define IOC_DEV_ID_SIZE \ + sizeof(L"IBA\\VxxxxxxPxxxxxxxxSxxxxxxsxxxxxxxxvxxxx") +#define IOC_HW_ID_SIZE \ + sizeof(L"IBA\\VxxxxxxPxxxxxxxxSxxxxxxsxxxxxxxxvxxxx") + \ + sizeof(L"IBA\\VxxxxxxPxxxxxxxxSxxxxxxsxxxxxxxx") + \ + sizeof(L"IBA\\VxxxxxxPxxxxxxxxvxxxx") + \ + sizeof(L"IBA\\VxxxxxxPxxxxxxxx\0\0") +#define IOC_COMPAT_ID_SIZE \ + sizeof(L"IBA\\Cxxxxcxxxxpxxxxrxxxx") + \ + sizeof(L"IBA\\Cxxxxcxxxxpxxxx\0\0") +#define IOC_LOCATION_SIZE \ + sizeof(L"Chassis 0xxxxxxxxxxxxxxxxx, Slot xx, IOC xx") + +/* + * Device extension for IOU PDOs. + */ +typedef struct _ioc_ext +{ + iou_pdo_ext_t pdo; + + ib_ioc_info_t info; + ib_svc_entry_t svc_entries[1]; + +} ioc_ext_t; + + +/* + * Function prototypes. + */ +void +destroying_ioc_mgr( + IN cl_obj_t* p_obj ); + +void +free_ioc_mgr( + IN cl_obj_t* p_obj ); + +ib_api_status_t +ioc_mgr_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ); + +ib_api_status_t +ioc_mgr_ioc_add( + IN ib_pnp_ioc_rec_t* p_pnp_rec ); + +void +ioc_mgr_ioc_remove( + IN ib_pnp_ioc_rec_t* p_pnp_rec ); + +static NTSTATUS +ioc_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static void +ioc_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +ioc_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +ioc_surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +ioc_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +ioc_query_target_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +ioc_query_device_id( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +ioc_query_hardware_ids( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +ioc_query_compatible_ids( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +ioc_query_unique_id( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +ioc_query_description( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +ioc_query_location( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp ); + +static NTSTATUS +ioc_query_bus_info( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +ioc_query_interface( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +ioc_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + + +/* All PnP code is called at passive, so it can all be paged out. */ +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, ioc_start) +#pragma alloc_text (PAGE, ioc_release_resources) +#pragma alloc_text (PAGE, ioc_remove) +#pragma alloc_text (PAGE, ioc_surprise_remove) +#pragma alloc_text (PAGE, ioc_query_capabilities) +#pragma alloc_text (PAGE, ioc_query_target_relations) +#pragma alloc_text (PAGE, ioc_query_device_id) +#pragma alloc_text (PAGE, ioc_query_hardware_ids) +#pragma alloc_text (PAGE, ioc_query_compatible_ids) +#pragma alloc_text (PAGE, ioc_query_unique_id) +#pragma alloc_text (PAGE, ioc_query_description) +#pragma alloc_text (PAGE, ioc_query_location) +#pragma alloc_text (PAGE, ioc_query_bus_info) +#pragma alloc_text (PAGE, ioc_query_interface) +#pragma alloc_text (PAGE_PNP, ioc_set_power) +#pragma alloc_text (PAGE, ioc_mgr_ioc_add) +#pragma alloc_text (PAGE, ioc_mgr_ioc_remove) +#endif + + +/* + * Global virtual function pointer tables shared between all + * instances of Port PDOs. + */ +static const cl_vfptr_pnp_po_t vfptr_ioc_pnp = { + "IB IOC", + ioc_start, + cl_irp_succeed, + cl_irp_succeed, + cl_irp_succeed, + cl_irp_succeed, + ioc_release_resources, + ioc_remove, + cl_irp_succeed, + ioc_surprise_remove, + ioc_query_capabilities, + cl_irp_complete, + cl_irp_complete, + cl_irp_succeed, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + ioc_query_target_relations, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + ioc_query_bus_info, + ioc_query_interface, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + cl_irp_complete, + cl_irp_succeed, // QueryPower + ioc_set_power, // SetPower + cl_irp_unsupported, // PowerSequence + cl_irp_unsupported // WaitWake +}; + + +static const cl_vfptr_query_txt_t vfptr_iou_query_txt = { + ioc_query_device_id, + ioc_query_hardware_ids, + ioc_query_compatible_ids, + ioc_query_unique_id, + ioc_query_description, + ioc_query_location +}; + + +void +ioc_mgr_construct( + IN OUT ioc_mgr_t* const p_ioc_mgr ) +{ + IOU_ENTER( IOU_DBG_PNP ); + + /* Construct the IOC manager service. */ + cl_obj_construct( &p_ioc_mgr->obj, 0 ); + cl_mutex_construct( &p_ioc_mgr->pdo_mutex ); + cl_qlist_init( &p_ioc_mgr->ioc_list ); + + IOU_EXIT( IOU_DBG_PNP ); +} + + +ib_api_status_t +ioc_mgr_init( + IN OUT ioc_mgr_t* const p_ioc_mgr ) +{ + ib_pnp_req_t pnp_req; + ib_api_status_t status; + cl_status_t cl_status; + + IOU_ENTER( IOU_DBG_PNP ); + + cl_status = cl_mutex_init( &p_ioc_mgr->pdo_mutex ); + if( cl_status != CL_SUCCESS ) + { + free_ioc_mgr( &p_ioc_mgr->obj ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("cl_mutex_init returned %s.\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + /* Initialize the load service object. */ + cl_status = cl_obj_init( &p_ioc_mgr->obj, CL_DESTROY_SYNC, + destroying_ioc_mgr, NULL, free_ioc_mgr ); + if( cl_status != CL_SUCCESS ) + { + free_ioc_mgr( &p_ioc_mgr->obj ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("cl_obj_init returned %s.\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + status = p_ioc_mgr->ifc.open_al( &p_ioc_mgr->h_al ); + if( status != IB_SUCCESS ) + { + cl_obj_destroy( &p_ioc_mgr->obj ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("open_al returned %s.\n", + p_ioc_mgr->ifc.get_err_str(status)) ); + return status; + } + + /* Register for IOC PnP events. */ + cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) ); + pnp_req.pnp_class = IB_PNP_IOC; + pnp_req.pnp_context = p_ioc_mgr; + pnp_req.pfn_pnp_cb = ioc_mgr_pnp_cb; + + status = p_ioc_mgr->ifc.reg_pnp( + p_ioc_mgr->h_al, &pnp_req, &p_ioc_mgr->h_pnp ); + if( status != IB_SUCCESS ) + { + cl_obj_destroy( &p_ioc_mgr->obj ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("ib_reg_pnp returned %s.\n", + p_ioc_mgr->ifc.get_err_str(status)) ); + return status; + } + + /* Reference the load service on behalf of the ib_reg_pnp call. */ + cl_obj_ref( &p_ioc_mgr->obj ); + + IOU_EXIT( IOU_DBG_PNP ); + return IB_SUCCESS; +} + + +/* + * Pre-destroy the load service. + */ +void +destroying_ioc_mgr( + IN cl_obj_t* p_obj ) +{ + ioc_mgr_t *p_ioc_mgr; + ib_api_status_t status; + + IOU_ENTER( IOU_DBG_PNP ); + + CL_ASSERT( p_obj ); + + p_ioc_mgr = PARENT_STRUCT( p_obj, ioc_mgr_t, obj ); + + /* Deregister for port PnP events. */ + if( p_ioc_mgr->h_pnp ) + { + status = p_ioc_mgr->ifc.dereg_pnp( + p_ioc_mgr->h_pnp, (ib_pfn_destroy_cb_t)cl_obj_deref ); + CL_ASSERT( status == IB_SUCCESS ); + } + IOU_EXIT( IOU_DBG_PNP ); +} + + +/* + * Free the load service. + */ +void +free_ioc_mgr( + IN cl_obj_t* p_obj ) +{ + ioc_mgr_t *p_ioc_mgr; + ioc_ext_t *p_iou_ext; + cl_list_item_t *p_list_item; + + IOU_ENTER( IOU_DBG_PNP ); + + CL_ASSERT( p_obj ); + p_ioc_mgr = PARENT_STRUCT( p_obj, ioc_mgr_t, obj ); + + /* + * Mark all IOCs as no longer present. This will cause them + * to be removed when they process the IRP_MN_REMOVE_DEVICE. + */ + p_list_item = cl_qlist_remove_head( &p_ioc_mgr->ioc_list ); + while( p_list_item != cl_qlist_end( &p_ioc_mgr->ioc_list ) ) + { + p_iou_ext = PARENT_STRUCT( + PARENT_STRUCT( p_list_item, iou_pdo_ext_t, list_item ), + ioc_ext_t, pdo ); + p_list_item = cl_qlist_remove_head( &p_ioc_mgr->ioc_list ); + if( p_iou_ext->pdo.cl_ext.pnp_state == SurpriseRemoved ) + { + CL_ASSERT( !p_iou_ext->pdo.b_present ); + p_iou_ext->pdo.b_reported_missing = TRUE; + continue; + } + IoDeleteDevice( p_iou_ext->pdo.cl_ext.p_self_do ); + } + + cl_mutex_destroy( &p_ioc_mgr->pdo_mutex ); + cl_obj_deinit( p_obj ); + IOU_EXIT( IOU_DBG_PNP ); +} + + +/* + * Load service PnP event callback. + */ +ib_api_status_t +ioc_mgr_pnp_cb( + IN ib_pnp_rec_t* p_pnp_rec ) +{ + ib_api_status_t status; + ioc_mgr_t *p_ioc_mgr; + + IOU_ENTER( IOU_DBG_PNP ); + + CL_ASSERT( p_pnp_rec ); + p_ioc_mgr = (ioc_mgr_t* __ptr64)p_pnp_rec->pnp_context; + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_IOC_ADD: + status = ioc_mgr_ioc_add( (ib_pnp_ioc_rec_t*)p_pnp_rec ); + break; + + case IB_PNP_IOC_REMOVE: + ioc_mgr_ioc_remove( (ib_pnp_ioc_rec_t*)p_pnp_rec ); + + default: + status = IB_SUCCESS; + break; + } + IOU_EXIT( IOU_DBG_PNP ); + return status; +} + + +/* + * Called to get child relations for the bus root. + */ +NTSTATUS +ioc_mgr_get_iou_relations( + IN ioc_mgr_t* const p_ioc_mgr, + IN IRP* const p_irp ) +{ + NTSTATUS status; + size_t n_devs; + DEVICE_RELATIONS *p_rel; + + IOU_ENTER( IOU_DBG_PNP ); + + /* If there are already relations, copy them. */ + cl_mutex_acquire( &p_ioc_mgr->pdo_mutex ); + n_devs = cl_qlist_count( &p_ioc_mgr->ioc_list ); + if( !n_devs ) + { + cl_mutex_release( &p_ioc_mgr->pdo_mutex ); + IOU_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IOU_DBG_PNP, + ("No child PDOs.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* Add space for our child IOUs. */ + status = cl_alloc_relations( p_irp, n_devs ); + if( !NT_SUCCESS( status ) ) + { + cl_mutex_release( &p_ioc_mgr->pdo_mutex ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("cl_alloc_relations returned %08x.\n", status) ); + return status; + } + + p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information; + update_relations( &p_ioc_mgr->ioc_list, p_rel ); + cl_mutex_release( &p_ioc_mgr->pdo_mutex ); + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +ib_api_status_t +ioc_mgr_ioc_add( + IN ib_pnp_ioc_rec_t* p_pnp_rec ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_pdo; + iou_fdo_ext_t *p_ext; + ioc_mgr_t *p_ioc_mgr; + ioc_ext_t *p_ioc_ext; + uint32_t ext_size; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ioc_mgr = PARENT_STRUCT( p_pnp_rec->pnp_rec.pnp_context, ioc_mgr_t, obj ); + p_ext = PARENT_STRUCT( p_ioc_mgr, iou_fdo_ext_t, ioc_mgr ); + + if( p_pnp_rec->ca_guid != p_ioc_mgr->info.ca_guid || + p_pnp_rec->info.chassis_guid != p_ioc_mgr->info.chassis_guid || + p_pnp_rec->info.chassis_slot != p_ioc_mgr->info.slot|| + p_pnp_rec->info.iou_guid != p_ioc_mgr->info.guid ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IOU_DBG_PNP, + ("IOC not in this IOU.\n") ); + return IB_NOT_DONE; + } + + ext_size = sizeof(ioc_ext_t) + + (sizeof(ib_svc_entry_t) * p_pnp_rec->info.profile.num_svc_entries); + + /* Create the PDO for the new port device. */ + status = IoCreateDevice( iou_globals.p_driver_obj, ext_size, + NULL, FILE_DEVICE_CONTROLLER, + FILE_DEVICE_SECURE_OPEN | FILE_AUTOGENERATED_DEVICE_NAME, + FALSE, &p_pdo ); + if( !NT_SUCCESS( status ) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("IoCreateDevice returned %08x.\n", status) ); + return IB_ERROR; + } + + /* Initialize the device extension. */ + cl_init_pnp_po_ext( p_pdo, NULL, p_pdo, g_iou_dbg_flags, + &vfptr_ioc_pnp, &vfptr_iou_query_txt ); + + /* Set the DO_BUS_ENUMERATED_DEVICE flag to mark it as a PDO. */ + p_pdo->Flags |= DO_BUS_ENUMERATED_DEVICE; + + p_ioc_ext = p_pdo->DeviceExtension; + p_ioc_ext->pdo.dev_po_state.DeviceState = PowerDeviceD0; + p_ioc_ext->pdo.p_parent_ext = p_ext; + p_ioc_ext->pdo.b_present = TRUE; + p_ioc_ext->pdo.b_reported_missing = FALSE; + p_ioc_ext->pdo.ca_guid = p_pnp_rec->ca_guid; + + /* Copy the IOC profile and service entries. */ + p_ioc_ext->info = p_pnp_rec->info; + cl_memcpy( p_ioc_ext->svc_entries, p_pnp_rec->svc_entry_array, + p_pnp_rec->info.profile.num_svc_entries ); + /* Make sure the IOC string is null terminated. */ + p_ioc_ext->info.profile.id_string[CTRL_ID_STRING_LEN-1] = '\0'; + + /* Store the device extension in the PDO list for future queries. */ + cl_mutex_acquire( &p_ioc_mgr->pdo_mutex ); + cl_qlist_insert_tail( &p_ioc_mgr->ioc_list, + &p_ioc_ext->pdo.list_item ); + cl_mutex_release( &p_ioc_mgr->pdo_mutex ); + + /* + * Set the context of the PNP event. The context is passed in for future + * events on the same port. + */ + p_pnp_rec->pnp_rec.context = p_ioc_ext; + + /* Tell the PnP Manager to rescan for bus relations. */ + IoInvalidateDeviceRelations( p_ext->cl_ext.p_pdo, BusRelations ); + + IOU_EXIT( IOU_DBG_PNP ); + return IB_SUCCESS; +} + + +void +ioc_mgr_ioc_remove( + IN ib_pnp_ioc_rec_t* p_pnp_rec ) +{ + ioc_mgr_t *p_ioc_mgr; + ioc_ext_t *p_ioc_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + /* The PNP record's context is the IOC's device extension. */ + p_ioc_ext = p_pnp_rec->pnp_rec.context; + CL_ASSERT( p_ioc_ext ); + + p_ioc_mgr = &p_ioc_ext->pdo.p_parent_ext->ioc_mgr; + /* + * Flag the port IOC as no longer being present. We have to wait until + * the PnP manager removes it to clean up. + */ + cl_mutex_acquire( &p_ioc_mgr->pdo_mutex ); + p_ioc_ext->pdo.b_present = FALSE; + + /* Invalidate bus relations for the bus root. */ + IoInvalidateDeviceRelations( + p_ioc_ext->pdo.p_parent_ext->cl_ext.p_pdo, BusRelations ); + + cl_mutex_release( &p_ioc_mgr->pdo_mutex ); + + IOU_EXIT( IOU_DBG_PNP ); +} + + +static NTSTATUS +ioc_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + iou_pdo_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + UNUSED_PARAM( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Notify the Power Manager that the device is started. */ + PoSetPowerState( p_dev_obj, DevicePowerState, p_ext->dev_po_state ); + + *p_action = IrpComplete; + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static void +ioc_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + ioc_mgr_t *p_ioc_mgr; + ioc_ext_t *p_ext; + POWER_STATE po_state; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + p_ioc_mgr = &p_ext->pdo.p_parent_ext->ioc_mgr; + + /* Remove this PDO from its list. */ + cl_mutex_acquire( &p_ioc_mgr->pdo_mutex ); + IOU_PRINT( TRACE_LEVEL_INFORMATION, IOU_DBG_PNP, + ("Removing IOC from list.\n") ); + cl_qlist_remove_item( &p_ioc_mgr->ioc_list, &p_ext->pdo.list_item ); + cl_mutex_release( &p_ioc_mgr->pdo_mutex ); + po_state.DeviceState = PowerDeviceD3; + PoSetPowerState( p_ext->pdo.cl_ext.p_pdo, DevicePowerState, po_state ); + + IOU_EXIT( IOU_DBG_PNP ); +} + + +static NTSTATUS +ioc_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + ioc_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + if( p_ext->pdo.b_present ) + { + CL_ASSERT( p_ext->pdo.cl_ext.pnp_state != NotStarted ); + CL_ASSERT( !p_ext->pdo.b_reported_missing ); + /* Reset the state to NotStarted. CompLib set it to Deleted. */ + cl_set_pnp_state( &p_ext->pdo.cl_ext, NotStarted ); + /* Don't delete the device. It may simply be disabled. */ + *p_action = IrpComplete; + IOU_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IOU_DBG_PNP, + ("Device still present.\n") ); + return STATUS_SUCCESS; + } + + if( !p_ext->pdo.b_reported_missing ) + { + /* Reset the state to RemovePending. Complib set it to Deleted. */ + cl_rollback_pnp_state( &p_ext->pdo.cl_ext ); + *p_action = IrpComplete; + IOU_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IOU_DBG_PNP, + ("Device not reported missing yet.\n") ); + return STATUS_SUCCESS; + } + + /* Wait for all I/O operations to complete. */ + IoReleaseRemoveLockAndWait( &p_ext->pdo.cl_ext.remove_lock, p_irp ); + + /* Release resources if it was not done yet. */ + if( p_ext->pdo.cl_ext.last_pnp_state != SurpriseRemoved ) + p_ext->pdo.cl_ext.vfptr_pnp_po->pfn_release_resources( p_dev_obj ); + + p_irp->IoStatus.Status = STATUS_SUCCESS; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + + IoDeleteDevice( p_dev_obj ); + + *p_action = IrpDoNothing; + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +ioc_surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + ioc_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + UNUSED_PARAM( p_irp ); + + p_ext = p_dev_obj->DeviceExtension; + p_ext->pdo.b_present = FALSE; + p_ext->pdo.b_reported_missing = TRUE; + + *p_action = IrpComplete; + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +ioc_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + DEVICE_CAPABILITIES *p_caps; + IO_STACK_LOCATION *p_io_stack; + + IOU_ENTER( IOU_DBG_PNP ); + + UNUSED_PARAM( p_dev_obj ); + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + p_caps = p_io_stack->Parameters.DeviceCapabilities.Capabilities; + + p_caps->DeviceD1 = FALSE; + p_caps->DeviceD2 = FALSE; + p_caps->LockSupported = FALSE; + p_caps->EjectSupported = FALSE; + p_caps->Removable = TRUE; + p_caps->DockDevice = FALSE; + p_caps->UniqueID = TRUE; + p_caps->SilentInstall = TRUE; + p_caps->RawDeviceOK = FALSE; + p_caps->SurpriseRemovalOK = FALSE; + p_caps->WakeFromD0 = FALSE; + p_caps->WakeFromD1 = FALSE; + p_caps->WakeFromD2 = FALSE; + p_caps->WakeFromD3 = FALSE; + p_caps->HardwareDisabled = FALSE; + p_caps->DeviceState[PowerSystemWorking] = PowerDeviceD0; + p_caps->DeviceState[PowerSystemSleeping1] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemSleeping2] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemSleeping3] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemHibernate] = PowerDeviceD3; + p_caps->DeviceState[PowerSystemShutdown] = PowerDeviceD3; + p_caps->SystemWake = PowerSystemUnspecified; + p_caps->DeviceWake = PowerDeviceUnspecified; + p_caps->D1Latency = 0; + p_caps->D2Latency = 0; + p_caps->D3Latency = 0; + + *p_action = IrpComplete; + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +ioc_query_target_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + DEVICE_RELATIONS *p_rel; + + IOU_ENTER( IOU_DBG_PNP ); + + *p_action = IrpComplete; + + status = cl_alloc_relations( p_irp, 1 ); + if( !NT_SUCCESS( status ) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("cl_alloc_relations returned 0x%08x.\n", status) ); + return status; + } + + p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information; + p_rel->Count = 1; + p_rel->Objects[0] = p_dev_obj; + + ObReferenceObject( p_dev_obj ); + + IOU_EXIT( IOU_DBG_PNP ); + return status; +} + + +static NTSTATUS +ioc_query_device_id( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + ioc_ext_t *p_ext; + WCHAR *p_string; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = (ioc_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + p_string = ExAllocatePool( PagedPool, IOC_DEV_ID_SIZE ); + if( !p_string ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to allocate device ID buffer (%d bytes).\n", + IOC_DEV_ID_SIZE) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + status = RtlStringCbPrintfW( p_string, IOC_DEV_ID_SIZE, + L"IBA\\V%06xP%08xS%06xs%08xv%04x", + ib_ioc_profile_get_vend_id( &p_ext->info.profile ), + cl_ntoh32( p_ext->info.profile.dev_id ), + ib_ioc_profile_get_subsys_vend_id( &p_ext->info.profile ), + cl_ntoh32( p_ext->info.profile.subsys_id ), + cl_ntoh16( p_ext->info.profile.dev_ver ) ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to format device ID string.\n") ); + return status; + } + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +ioc_query_hardware_ids( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + ioc_ext_t *p_ext; + WCHAR *p_string, *p_start; + size_t size; + uint32_t V,P,S,s; + uint16_t v; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = (ioc_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + p_string = ExAllocatePool( PagedPool, IOC_HW_ID_SIZE ); + if( !p_string ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to allocate hardware ID buffer (%d bytes).\n", + IOC_HW_ID_SIZE) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + V = ib_ioc_profile_get_vend_id( &p_ext->info.profile ); + P = cl_ntoh32( p_ext->info.profile.dev_id ); + S = ib_ioc_profile_get_subsys_vend_id( &p_ext->info.profile ); + s = cl_ntoh32( p_ext->info.profile.subsys_id ); + v = cl_ntoh16( p_ext->info.profile.dev_ver ); + + /* Fill in the first hardware ID. */ + p_start = p_string; + size = IOC_HW_ID_SIZE; + status = RtlStringCbPrintfExW( p_start, size, &p_start, &size, + STRSAFE_FILL_BEHIND_NULL, L"IBA\\V%06xP%08xS%06xs%08xv%04x", + V, P, S, s, v ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to format hardware ID string.\n") ); + return status; + } + /* Fill in the second hardware ID. */ + p_start++; + size -= sizeof(WCHAR); + status = RtlStringCbPrintfExW( p_start, size, &p_start, &size, + STRSAFE_FILL_BEHIND_NULL, L"IBA\\V%06xP%08xS%06xs%08x", V, P, S, s ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to format hardware ID string.\n") ); + return status; + } + /* Fill in the third hardware ID. */ + p_start++; + size -= sizeof(WCHAR); + status = RtlStringCbPrintfExW( p_start, size, &p_start, &size, + STRSAFE_FILL_BEHIND_NULL, L"IBA\\V%06xP%08xv%04x", V, P, v ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to format hardware ID string.\n") ); + return status; + } + /* Fill in the fourth hardware ID. */ + p_start++; + size -= sizeof(WCHAR); + status = RtlStringCbPrintfExW( p_start, size, &p_start, &size, + STRSAFE_FILL_BEHIND_NULL, L"IBA\\V%06xP%08x", V, P ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to format hardware ID string.\n") ); + return status; + } + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +ioc_query_compatible_ids( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + ioc_ext_t *p_ext; + WCHAR *p_string, *p_start; + size_t size; + uint16_t C, c, p, r; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = (ioc_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + p_string = ExAllocatePool( PagedPool, IOC_COMPAT_ID_SIZE ); + if( !p_string ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to allocate compatible ID buffer (%d bytes).\n", + IOC_HW_ID_SIZE) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + C = cl_ntoh16( p_ext->info.profile.io_class ); + c = cl_ntoh16( p_ext->info.profile.io_subclass ); + p = cl_ntoh16( p_ext->info.profile.protocol ); + r = cl_ntoh16( p_ext->info.profile.protocol_ver ); + + p_start = p_string; + size = IOC_COMPAT_ID_SIZE; + /* Fill in the first compatible ID. */ + status = RtlStringCbPrintfExW( p_start, size, &p_start, &size, + STRSAFE_FILL_BEHIND_NULL, L"IBA\\C%04xc%04xp%04xr%04x", C, c, p, r ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to format device ID string.\n") ); + return status; + } + /* Fill in the second compatible ID. */ + p_start++; + size -= sizeof(WCHAR); + status = RtlStringCbPrintfExW( p_start, size, NULL, NULL, + STRSAFE_FILL_BEHIND_NULL, L"IBA\\C%04xc%04xp%04x", C, c, p ); + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to format device ID string.\n") ); + return status; + } + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +ioc_query_unique_id( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + WCHAR *p_string; + ioc_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* The instance ID is the port GUID. */ + p_string = ExAllocatePool( PagedPool, sizeof(WCHAR) * 33 ); + if( !p_string ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to allocate instance ID buffer (%d bytes).\n", + sizeof(WCHAR) * 17) ); + return STATUS_NO_MEMORY; + } + + status = RtlStringCchPrintfW(p_string, 33, L"%016I64x%016I64x", + p_ext->info.profile.ioc_guid,p_ext->pdo.ca_guid); + if( !NT_SUCCESS( status ) ) + { + CL_ASSERT( NT_SUCCESS( status ) ); + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("RtlStringCchPrintfW returned %08x.\n", status) ); + return status; + } + + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +ioc_query_description( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + WCHAR *p_string; + ioc_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + p_string = ExAllocatePool( + PagedPool, sizeof(WCHAR) * sizeof(p_ext->info.profile.id_string) ); + if( !p_string ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to allocate device description buffer (%d bytes).\n", + sizeof(WCHAR) * sizeof(p_ext->info.profile.id_string)) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + if( ib_ioc_profile_get_vend_id( &p_ext->info.profile ) == 0x00066a && + p_ext->info.profile.dev_id == CL_HTON32(0x00000030) ) + { + status = RtlStringCchPrintfW( + p_string, sizeof(p_ext->info.profile.id_string), + L"SilverStorm Technologies VEx I/O Controller" ); + } + else if( ib_ioc_profile_get_vend_id( &p_ext->info.profile ) == 0x00066a && + p_ext->info.profile.dev_id == CL_HTON32(0x00000038) ) + { + status = RtlStringCchPrintfW( + p_string, sizeof(p_ext->info.profile.id_string), + L"SilverStorm Technologies VFx I/O Controller" ); + } + else + { + status = RtlStringCchPrintfW( + p_string, sizeof(p_ext->info.profile.id_string), + L"%S", p_ext->info.profile.id_string ); + } + if( !NT_SUCCESS( status ) ) + { + CL_ASSERT( NT_SUCCESS( status ) ); + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("RtlStringCchPrintfW returned %08x.\n", status) ); + return status; + } + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +ioc_query_location( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ) +{ + NTSTATUS status; + ioc_ext_t *p_ext; + WCHAR *p_string; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = (ioc_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + p_string = ExAllocatePool( PagedPool, + max( IOC_LOCATION_SIZE, + sizeof( WCHAR ) * ( sizeof( p_ext->info.profile.id_string ) + 1 ))); + if( !p_string ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to allocate location buffer (%d bytes).\n", + IOC_LOCATION_SIZE) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + if( ib_ioc_profile_get_vend_id( &p_ext->info.profile ) == 0x00066a ) + { + status = RtlStringCchPrintfW( + p_string, sizeof(p_ext->info.profile.id_string), + L"%S", p_ext->info.profile.id_string ); + } + else + { + status = RtlStringCbPrintfW( p_string, IOC_LOCATION_SIZE, + L"Chassis 0x%016I64x, Slot %d, IOC %d", + cl_ntoh64( p_ext->info.chassis_guid ), + p_ext->info.chassis_slot, p_ext->info.iou_slot ); + } + if( !NT_SUCCESS( status ) ) + { + ExFreePool( p_string ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to format device ID string.\n") ); + return status; + } + p_irp->IoStatus.Information = (ULONG_PTR)p_string; + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +ioc_query_bus_info( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + ioc_ext_t *p_ext; + PNP_BUS_INFORMATION *p_iou_info; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = (ioc_ext_t*)p_dev_obj->DeviceExtension; + if( !p_ext->pdo.b_present ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Device not present.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + *p_action = IrpComplete; + + p_iou_info = ExAllocatePool( PagedPool, sizeof(PNP_BUS_INFORMATION) ); + if( !p_iou_info ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to allocate PNP_BUS_INFORMATION (%d bytes).\n", + sizeof(PNP_BUS_INFORMATION)) ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + + p_iou_info->BusTypeGuid = GUID_BUS_TYPE_IBA; + //TODO: Memory from Intel - storage miniport would not stay loaded unless + //TODO: bus type was PCI. Look here if SRP is having problems staying + //TODO: loaded. + p_iou_info->LegacyBusType = PNPBus; + p_iou_info->BusNumber = p_ext->info.iou_slot; + + p_irp->IoStatus.Information = (ULONG_PTR)p_iou_info; + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static __ref_ioc_ifc( + IN ioc_ext_t* p_ext ) +{ + IOU_ENTER( IOU_DBG_PNP ); + + cl_atomic_inc( &p_ext->pdo.p_parent_ext->cl_ext.n_ifc_ref ); + ObReferenceObject( p_ext->pdo.p_parent_ext->cl_ext.p_self_do ); + + IOU_EXIT( IOU_DBG_PNP ); +} + + +static void +__deref_ioc_ifc( + IN ioc_ext_t* p_ext ) +{ + IOU_ENTER( IOU_DBG_PNP ); + + cl_atomic_dec( &p_ext->pdo.p_parent_ext->cl_ext.n_ifc_ref ); + ObDereferenceObject( p_ext->pdo.p_parent_ext->cl_ext.p_self_do ); + + IOU_EXIT( IOU_DBG_PNP ); +} + + + + +static NTSTATUS +ioc_query_interface( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + ib_al_ifc_t *p_ifc; + ib_al_ifc_data_t *p_ifc_data; + ioc_ifc_data_t *p_ioc_data; + ioc_ext_t *p_ext; + const GUID *p_guid; + + IOU_ENTER( IOU_DBG_PNP ); + + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + + p_ext = p_dev_obj->DeviceExtension; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + p_guid = p_io_stack->Parameters.QueryInterface.InterfaceType; + /* Bottom of the stack - IRP must be completed. */ + *p_action = IrpComplete; + + /* Compare requested GUID with our supported interface GUIDs. */ + if( IsEqualGUID( p_guid, &GUID_BUS_INTERFACE_STANDARD ) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IOU_DBG_PNP, + ("BUS_INTERFACE_STANDARD\n") ); + return cl_fwd_query_ifc( + p_ext->pdo.p_parent_ext->cl_ext.p_self_do, p_io_stack ); + } + + if( !IsEqualGUID( p_guid, &GUID_IB_AL_INTERFACE ) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IOU_DBG_PNP, + ("Unsupported interface: \n\t" + "0x%08x, 0x%04x, 0x%04x, 0x%02x, 0x%02x, 0x%02x," + "0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x.\n", + p_guid->Data1, p_guid->Data2, p_guid->Data3, + p_guid->Data4[0], p_guid->Data4[1], p_guid->Data4[2], + p_guid->Data4[3], p_guid->Data4[4], p_guid->Data4[5], + p_guid->Data4[6], p_guid->Data4[7]) ); + return p_irp->IoStatus.Status; + } + + /* Get the interface. */ + status = cl_fwd_query_ifc( + p_ext->pdo.p_parent_ext->cl_ext.p_self_do, p_io_stack ); + if( !NT_SUCCESS( status ) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to forward interface query: %08X\n", status) ); + return status; + } + + if( !p_io_stack->Parameters.QueryInterface.InterfaceSpecificData ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("No interface specific data!\n") ); + return status; + } + + p_ifc = (ib_al_ifc_t*)p_io_stack->Parameters.QueryInterface.Interface; + + p_ifc_data = (ib_al_ifc_data_t*) + p_io_stack->Parameters.QueryInterface.InterfaceSpecificData; + p_guid = p_ifc_data->type; + if( !IsEqualGUID( p_guid, &GUID_IOC_INTERFACE_DATA ) || + p_ifc_data->version != IOC_INTERFACE_DATA_VERSION ) + { + p_ifc->wdm.InterfaceDereference( p_ifc->wdm.Context ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Unsupported interface data: \n\t" + "0x%08x, 0x%04x, 0x%04x, 0x%02x, 0x%02x, 0x%02x," + "0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x.\n", + p_guid->Data1, p_guid->Data2, p_guid->Data3, + p_guid->Data4[0], p_guid->Data4[1], p_guid->Data4[2], + p_guid->Data4[3], p_guid->Data4[4], p_guid->Data4[5], + p_guid->Data4[6], p_guid->Data4[7]) ); + return STATUS_INVALID_PARAMETER; + } + + ASSERT( p_ifc_data->p_data ); + + if( p_ifc_data->size != sizeof(ioc_ifc_data_t) ) + { + p_ifc->wdm.InterfaceDereference( p_ifc->wdm.Context ); + IOU_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IOU_DBG_PNP, + ("Buffer too small (%d given, %d required).\n", + p_ifc_data->size, + sizeof(ioc_ifc_data_t)) ); + return STATUS_BUFFER_TOO_SMALL; + } + + /* Set the interface data. */ + p_ioc_data = (ioc_ifc_data_t*)p_ifc_data->p_data; + + p_ioc_data->ca_guid = p_ext->pdo.p_parent_ext->ioc_mgr.info.ca_guid; + p_ioc_data->guid = p_ext->info.profile.ioc_guid; + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +/* + * The PDOs created by the IB Bus driver are software devices. As such, + * all power states are supported. It is left to the HCA power policy + * owner to handle which states can be supported by the HCA. + */ +static NTSTATUS +ioc_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + IO_STACK_LOCATION *p_io_stack; + iou_pdo_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_POWER ); + + p_ext = p_dev_obj->DeviceExtension; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + if( p_io_stack->Parameters.Power.Type == DevicePowerState ) + { + /* Notify the power manager. */ + p_ext->dev_po_state = p_io_stack->Parameters.Power.State; + PoSetPowerState( p_dev_obj, DevicePowerState, p_ext->dev_po_state ); + } + + *p_action = IrpComplete; + IOU_EXIT( IOU_DBG_POWER ); + return STATUS_SUCCESS; +} diff --git a/branches/Ndi/core/iou/kernel/iou_ioc_mgr.h b/branches/Ndi/core/iou/kernel/iou_ioc_mgr.h new file mode 100644 index 00000000..96c09a45 --- /dev/null +++ b/branches/Ndi/core/iou/kernel/iou_ioc_mgr.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if !defined( __IOU_IOU_MGR_H__ ) +#define __IOU_IOU_MGR_H__ + +#include +#include +#include +#include + +/* Global load service */ +typedef struct _ioc_mgr +{ + cl_obj_t obj; + + ib_al_ifc_t ifc; + + ib_al_handle_t h_al; + ib_pnp_handle_t h_pnp; /* Handle for iou PnP events */ + + /* Attributes for this IOU. */ + iou_ifc_data_t info; + + /* Mutex protects both pointer vectors. */ + cl_mutex_t pdo_mutex; + + /* Pointer vector of child IOC PDOs. */ + cl_qlist_t ioc_list; + +} ioc_mgr_t; + + +void +ioc_mgr_construct( + IN OUT ioc_mgr_t* const p_ioc_mgr ); + +ib_api_status_t +ioc_mgr_init( + IN OUT ioc_mgr_t* const p_ioc_mgr ); + +NTSTATUS +ioc_mgr_get_iou_relations( + IN ioc_mgr_t* const p_ioc_mgr, + IN IRP* const p_irp ); + +#endif diff --git a/branches/Ndi/core/iou/kernel/iou_pnp.c b/branches/Ndi/core/iou/kernel/iou_pnp.c new file mode 100644 index 00000000..3652c2ce --- /dev/null +++ b/branches/Ndi/core/iou/kernel/iou_pnp.c @@ -0,0 +1,623 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Implemenation of all PnP functionality for FDO (power policy owners). + */ + + +#include "iou_driver.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "iou_pnp.tmh" +#endif +#include "iou_pnp.h" +#include "iou_ioc_mgr.h" +#include +#include +#include +#include + + +static NTSTATUS +fdo_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static void +fdo_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +fdo_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +fdo_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +fdo_query_iou_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +fdo_query_interface( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__fdo_query_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +__fdo_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + + +/* All PnP code is called at passive, so it can all be paged out. */ +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, iou_add_device) +#pragma alloc_text (PAGE, fdo_start) +#pragma alloc_text (PAGE, fdo_query_remove) +#pragma alloc_text (PAGE, fdo_release_resources) +#pragma alloc_text (PAGE, fdo_query_capabilities) +#pragma alloc_text (PAGE, fdo_query_iou_relations) +#pragma alloc_text (PAGE_PNP, __fdo_query_power) +#pragma alloc_text (PAGE_PNP, __fdo_set_power) +#endif + + +/* Global virtual function pointer tables shared between all instances of FDO. */ +static const cl_vfptr_pnp_po_t vfptr_fdo_pnp = { + "IB IOU", + fdo_start, + cl_irp_skip, + cl_irp_skip, + cl_do_sync_pnp, + fdo_query_remove, + fdo_release_resources, + cl_do_remove, + cl_do_sync_pnp, + cl_irp_skip, + fdo_query_capabilities, + cl_irp_skip, + cl_irp_skip, + cl_do_sync_pnp, + fdo_query_iou_relations, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, /* QueryInterface */ + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + __fdo_query_power, /* QueryPower */ + __fdo_set_power, /* SetPower */ + cl_irp_ignore, /* PowerSequence */ + cl_irp_ignore /* WaitWake */ +}; +/* + * NOTE: The QueryInterface entry point is not used because we only enable/disable + * our interface so that user-mode AL can find a device to perform IOCTLs to. + */ + + +NTSTATUS +iou_add_device( + IN DRIVER_OBJECT *p_driver_obj, + IN DEVICE_OBJECT *p_pdo ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_dev_obj, *p_next_do; + iou_fdo_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + /* Create the FDO device object to attach to the stack. */ + status = IoCreateDevice( p_driver_obj, sizeof(iou_fdo_ext_t), + NULL, FILE_DEVICE_BUS_EXTENDER, + FILE_DEVICE_SECURE_OPEN, FALSE, &p_dev_obj ); + if( !NT_SUCCESS(status) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to create bus root FDO device.\n") ); + return status; + } + + p_ext = p_dev_obj->DeviceExtension; + + ioc_mgr_construct( &p_ext->ioc_mgr ); + + p_next_do = IoAttachDeviceToDeviceStack( p_dev_obj, p_pdo ); + if( !p_next_do ) + { + IoDeleteDevice( p_dev_obj ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("IoAttachToDeviceStack failed.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + cl_init_pnp_po_ext( p_dev_obj, p_next_do, p_pdo, g_iou_dbg_flags, + &vfptr_fdo_pnp, NULL ); + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +__get_iou_ifc( + IN iou_fdo_ext_t* const p_ext ) +{ + NTSTATUS status; + IO_STACK_LOCATION io_stack; + ib_al_ifc_data_t data; + + IOU_ENTER( IOU_DBG_PNP ); + + data.type = &GUID_IOU_INTERFACE_DATA; + data.version = IOU_INTERFACE_DATA_VERSION; + data.size = sizeof(iou_ifc_data_t); + data.p_data = &p_ext->ioc_mgr.info; + + io_stack.MinorFunction = IRP_MN_QUERY_INTERFACE; + io_stack.Parameters.QueryInterface.Version = AL_INTERFACE_VERSION; + io_stack.Parameters.QueryInterface.Size = sizeof(ib_al_ifc_t); + io_stack.Parameters.QueryInterface.Interface = + (INTERFACE*)&p_ext->ioc_mgr.ifc; + io_stack.Parameters.QueryInterface.InterfaceSpecificData = + &data; + io_stack.Parameters.QueryInterface.InterfaceType = &GUID_IB_AL_INTERFACE; + + status = cl_fwd_query_ifc( p_ext->cl_ext.p_next_do, &io_stack ); + + /* + * Dereference the interface now so that the bus driver doesn't fail a + * query remove IRP. We will always get unloaded before the bus driver + * since we're a child device. + */ + if( NT_SUCCESS( status ) ) + { + p_ext->ioc_mgr.ifc.wdm.InterfaceDereference( + p_ext->ioc_mgr.ifc.wdm.Context ); + } + + IOU_EXIT( IOU_DBG_PNP ); + return status; +} + + +static NTSTATUS +fdo_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + iou_fdo_ext_t *p_ext; + ib_api_status_t ib_status; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Handled on the way up. */ + status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); + if( !NT_SUCCESS( status ) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Lower drivers failed IRP_MN_START_DEVICE.\n") ); + return status; + } + + status = __get_iou_ifc( p_ext ); + if( !NT_SUCCESS( status ) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("Failed to get IOU interface.\n") ); + return status; + } + + /* Initialize the IOU manager. */ + ib_status = ioc_mgr_init( &p_ext->ioc_mgr ); + if( ib_status != IB_SUCCESS ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("ioc_mgr_init returned %s.\n", + p_ext->ioc_mgr.ifc.get_err_str(ib_status)) ); + return STATUS_UNSUCCESSFUL; + } + + IOU_EXIT( IOU_DBG_PNP ); + return status; +} + + +static NTSTATUS +fdo_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + iou_fdo_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + *p_action = IrpSkip; + /* The FDO driver must set the status even when passing down. */ + p_irp->IoStatus.Status = STATUS_SUCCESS; + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_SUCCESS; +} + + +/* + * This function gets called after releasing the remove lock and waiting + * for all other threads to release the lock. No more modifications will + * occur to the PDO pointer vectors. + */ +static void +fdo_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + iou_fdo_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + //TODO: Fail outstanding I/O operations. + cl_obj_destroy( &p_ext->ioc_mgr.obj ); + + IOU_EXIT( IOU_DBG_PNP ); +} + + +static NTSTATUS +fdo_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + iou_fdo_ext_t *p_ext; + IO_STACK_LOCATION *p_io_stack; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + /* Process on the way up. */ + status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); + + if( !NT_SUCCESS( status ) ) + { + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("cl_do_sync_pnp returned %08x.\n", status) ); + return status; + } + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + /* + * Store the device power maping into our extension since we're + * the power policy owner. The mapping is used when handling + * IRP_MN_SET_POWER IRPs. + */ + cl_memcpy( p_ext->po_state, + p_io_stack->Parameters.DeviceCapabilities.Capabilities->DeviceState, + sizeof( p_ext->po_state ) ); + + IOU_EXIT( IOU_DBG_PNP ); + return status; +} + + +static NTSTATUS +fdo_query_iou_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + iou_fdo_ext_t *p_ext; + NTSTATUS status; + + IOU_ENTER( IOU_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + status = ioc_mgr_get_iou_relations( &p_ext->ioc_mgr, p_irp ); + switch( status ) + { + case STATUS_NO_SUCH_DEVICE: + *p_action = IrpSkip; + status = STATUS_SUCCESS; + break; + + case STATUS_SUCCESS: + *p_action = IrpPassDown; + break; + + default: + *p_action = IrpComplete; + break; + } + + IOU_EXIT( IOU_DBG_PNP ); + return status; +} + + +static NTSTATUS +__fdo_query_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status = STATUS_SUCCESS; + IO_STACK_LOCATION *p_io_stack; + + IOU_ENTER( IOU_DBG_POWER ); + + UNUSED_PARAM( p_dev_obj ); + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + switch( p_io_stack->Parameters.Power.Type ) + { + case SystemPowerState: + /* Fail any requests to hibernate or sleep the system. */ + switch( p_io_stack->Parameters.Power.State.SystemState ) + { + case PowerSystemWorking: + case PowerSystemShutdown: + /* We only support fully working and shutdown system states. */ + break; + + default: + status = STATUS_NOT_SUPPORTED; + } + break; + + case DevicePowerState: + /* Fail any query for low power states. */ + switch( p_io_stack->Parameters.Power.State.DeviceState ) + { + case PowerDeviceD0: + case PowerDeviceD3: + /* We only support fully powered or off power states. */ + break; + + default: + status = STATUS_NOT_SUPPORTED; + } + break; + } + + if( status == STATUS_NOT_SUPPORTED ) + *p_action = IrpComplete; + else + *p_action = IrpSkip; + + IOU_EXIT( IOU_DBG_POWER ); + return status; +} + + +static void +__request_power_completion( + IN DEVICE_OBJECT *p_dev_obj, + IN UCHAR minor_function, + IN POWER_STATE power_state, + IN void *context, + IN IO_STATUS_BLOCK *p_io_status ) +{ + IRP *p_irp; + cl_pnp_po_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + UNUSED_PARAM( minor_function ); + UNUSED_PARAM( power_state ); + + p_irp = (IRP*)context; + p_ext = p_dev_obj->DeviceExtension; + + /* Propagate the device IRP status to the system IRP status. */ + p_irp->IoStatus.Status = p_io_status->Status; + + /* Continue Power IRP processing. */ + PoStartNextPowerIrp( p_irp ); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->remove_lock, p_irp ); + IOU_EXIT( IOU_DBG_PNP ); +} + + +/*NOTE: Completion routines must NEVER be pageable. */ +static NTSTATUS +__set_power_completion( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp, + IN void *context ) +{ + NTSTATUS status; + POWER_STATE state; + iou_fdo_ext_t *p_ext; + IO_STACK_LOCATION *p_io_stack; + + IOU_ENTER( IOU_DBG_PNP ); + + UNUSED_PARAM( context ); + + p_ext = p_dev_obj->DeviceExtension; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + if( !NT_SUCCESS( p_irp->IoStatus.Status ) ) + { + PoStartNextPowerIrp( p_irp ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + IOU_PRINT_EXIT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("IRP_MN_SET_POWER for system failed by lower driver with %08x.\n", + p_irp->IoStatus.Status) ); + return STATUS_SUCCESS; + } + + state.DeviceState = + p_ext->po_state[p_io_stack->Parameters.Power.State.SystemState]; + + /* + * Send a device power IRP to our devnode. Using our device object will + * only work on win2k and other NT based systems. + */ + status = PoRequestPowerIrp( p_dev_obj, IRP_MN_SET_POWER, state, + __request_power_completion, p_irp, NULL ); + + if( !NT_SUCCESS( p_irp->IoStatus.Status ) ) + { + PoStartNextPowerIrp( p_irp ); + /* Propagate the failure. */ + p_irp->IoStatus.Status = status; + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + IOU_PRINT( TRACE_LEVEL_ERROR, IOU_DBG_ERROR, + ("PoRequestPowerIrp returned %08x.\n", status) ); + } + + IOU_EXIT( IOU_DBG_PNP ); + return STATUS_MORE_PROCESSING_REQUIRED; +} + + +static NTSTATUS +__fdo_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_io_stack; + iou_fdo_ext_t *p_ext; + + IOU_ENTER( IOU_DBG_POWER ); + + p_ext = p_dev_obj->DeviceExtension; + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + switch( p_io_stack->Parameters.Power.Type ) + { + case SystemPowerState: + /* + * Process on the way up the stack. We cannot block since the + * power dispatch function can be called at elevated IRQL if the + * device is in a paging/hibernation/crash dump path. + */ + IoMarkIrpPending( p_irp ); + IoCopyCurrentIrpStackLocationToNext( p_irp ); +#pragma warning( push, 3 ) + IoSetCompletionRoutine( p_irp, __set_power_completion, NULL, + TRUE, TRUE, TRUE ); +#pragma warning( pop ) + PoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + + *p_action = IrpDoNothing; + status = STATUS_PENDING; + break; + + case DevicePowerState: + default: + /* Pass down and let the PDO driver handle it. */ + *p_action = IrpIgnore; + status = STATUS_SUCCESS; + break; + } + + IOU_EXIT( IOU_DBG_POWER ); + return status; +} + + +void +update_relations( + IN cl_qlist_t* const p_pdo_list, + IN OUT DEVICE_RELATIONS* const p_rel ) +{ + cl_list_item_t *p_list_item; + iou_pdo_ext_t *p_pdo_ext; + + IOU_ENTER( IOU_DBG_PNP ); + + p_list_item = cl_qlist_head( p_pdo_list ); + while( p_list_item != cl_qlist_end( p_pdo_list ) ) + { + p_pdo_ext = PARENT_STRUCT( p_list_item, iou_pdo_ext_t, list_item ); + + /* Move the list item to the next object. */ + p_list_item = cl_qlist_next( p_list_item ); + + if( !p_pdo_ext->b_present ) + { + /* + * We don't report a PDO that is no longer present. This is how + * the PDO will get cleaned up. + */ + p_pdo_ext->b_reported_missing = TRUE; + continue; + } + p_rel->Objects[p_rel->Count] = p_pdo_ext->cl_ext.p_pdo; + ObReferenceObject( p_rel->Objects[p_rel->Count++] ); + } + + IOU_EXIT( IOU_DBG_PNP ); +} diff --git a/branches/Ndi/core/iou/kernel/iou_pnp.h b/branches/Ndi/core/iou/kernel/iou_pnp.h new file mode 100644 index 00000000..80a53a54 --- /dev/null +++ b/branches/Ndi/core/iou/kernel/iou_pnp.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if !defined _IOU_DRV_PNP_H_ +#define _IOU_DRV_PNP_H_ + + +#include "iou_driver.h" + + +/****f* InfiniBand Bus Driver: Plug and Play/iou_add_device +* NAME +* iou_add_device +* +* DESCRIPTION +* Main AddDevice entrypoint for the IB Bus driver. +* Adds the bus root functional device object to the device node. The +* bus root FDO performs all PnP operations for fabric attached devices. +* +* SYNOPSIS +*/ +NTSTATUS +iou_add_device( + IN PDRIVER_OBJECT p_driver_obj, + IN PDEVICE_OBJECT p_pdo ); +/* +* PARAMETERS +* p_driver_obj +* Driver object for the IOU driver. +* +* p_pdo +* Pointer to the device object representing the PDO for the device on +* which we are loading. +* +* RETURN VIOUUES +* STATUS_SUCCESS if the device was successfully added. +* +* Other NTSTATUS error values if errors are encountered. +* +* SEE ALSO +*********/ + + +void +update_relations( + IN cl_qlist_t* const p_pdo_list, + IN OUT DEVICE_RELATIONS* const p_rel ); + + +#endif // !defined _IOU_DRV_PNP_H_ diff --git a/branches/Ndi/core/iou/kernel/makefile b/branches/Ndi/core/iou/kernel/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/core/iou/kernel/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/dirs b/branches/Ndi/dirs new file mode 100644 index 00000000..23326682 --- /dev/null +++ b/branches/Ndi/dirs @@ -0,0 +1,6 @@ +DIRS=\ + core \ + ulp \ + hw \ + tools \ + tests diff --git a/branches/Ndi/docs/Manual.htm b/branches/Ndi/docs/Manual.htm new file mode 100644 index 00000000..b310d8a8 --- /dev/null +++ b/branches/Ndi/docs/Manual.htm @@ -0,0 +1,1608 @@ + + + + + +

+  +

+

Windows OpenFabrics

+

User's Manual

+

Release 1.0

+

+08/14/2007

+

Overview

+

+The Windows OpenFabrics (WinOF) package is composed of software modules intended +for use on Microsoft Windows based computer systems connected via an InfiniBand +fabric.

+

The Windows OpenFabrics software package contains the +following:
+
+OpenFabrics Infiniband core drivers and Upper Level Protocols (ULPs):

+
    +
  • +

    HCA (Host Channel Adapter) driver

  • +
  • +

    MTHCA - + Mellanox + HCA low level driver. See + Release_notes.htm for a list of supported devices. +

  • +
  • +

    Infiniband Core modules: IB verbs and IB access layer

  • +
  • +

    Upper Layer Protocols: IPoIB, WSD, VNIC, SRP Initiator and uDAPL

  • +
+

OpenFabrics utilities:

+
    +
  • +

    OpenSM: InfiniBand Subnet Manager

  • +
  • +

    Performance tests

  • +
  • +

    Diagnostic tools

  • +
+

Documentation

+
    +
  • +

    User's manual

  • +
  • +

    Release Notes

  • +
+

Features

+
    +
  • +

    Tools

  • +
+
+
+

The OpenFabrics Alliance Windows release contains a set of + user mode tools which are designed to faciliate the smooth operation of an + Windows OpenFabrics installation. These tools are available from a command + window (cmd.exe) as the installation path '%SystemDrive%\Program + Files\WinOF' is appended to the system wide search path registry entry. + A start menu short-cut 'WinOF Cmd Window' is provided to faciliate + correction tool operation.

+

Infiniband Subnet Management

+
    +
  • +

    opensm        Open Subnet + Management - configure and manage an InfiniBand subnet

  • +
  • +

    osmtest         + Subnet management tests

  • +
  • +

    ib_trapgen    Generate Infiniband Subnet + Management Traps for testing purposes

  • +
+

Performance

+
    +
  • +

    ib_send_lat      Infiniband send +latency measurement

  • +
  • +

    ib_send_bw     Infiniband send bandwidth + measurement

  • +
  • +

    ib_write_lat     Infiniband RDMA write +latency measurement

  • +
  • +

    ib_write_bw    Infiniband RDMA write bandwidth +measurement

  • +
  • +

    ttcp                 +TCP performance measurements

  • +
+

Diagnostics

+
    +
  • +

    ib_limits         + Infiniband verb tests

  • +
  • +

    cmtest           Connection Manager tests

  • +
  • +

    PrintIP           Display +an Internet Protocol address associated with an IB GUID.

  • +
  • +

    vstat               +Display HCA attributes, statistics and error counters.
     

  • +
+
+
+ +

 

+

 

+

 

+

User mode micro-benchmarks

+
+

The following user-mode test programs are intended as useful +micro-benchmarks for HW or SW +tuning and/or functional testing.

+
+

Tests use CPU cycle counters to get time stamps without + context switch.
+
Tests measure round-trip time but report half of that as one-way latency
+ (i.e.. May not be sufficiently accurate for + asymmetrical configurations).
+
Min/Median/Max result is reported.
The median (vs. average) is less sensitive to extreme scores.
Typically the "Max" value is the first value measured.
+
larger samples only marginally help. The default (1000) is pretty good.
Note that an array of cycles_t (typically unsigned long) is allocated
once to collect samples and again to store the difference between them.
Really big sample sizes (e.g. 1 million) might expose other problems
with the program.
+
"-H" option will dump the histogram for additional statistical analysis.
See xgraph, ygraph, r-base (http://www.r-project.org/), pspp, or other +
statistical math programs.

Architectures tested: x86, x86_64, ia64

+
+

ib_send_lat.exe      - latency test with +send transactions

+
+

Usage:

+
+

ib_send_lat start a server and wait for connection
+ ib_send_lat <host> connect to server at <host>

+
+

Options:

+
+

-p, --port=<port> listen on/connect to port <port> + (default 18515)
+ -c, --connection=<RC/UC> connection type RC/UC (default RC)
+ -m, --mtu=<mtu> mtu size (default 2048)
+ -d, --ib-dev=<dev> use IB device <dev> (default first device found)
+ -i, --ib-port=<port> use port <port> of IB device (default 1)
+ -s, --size=<size> size of message to exchange (default 1)
+ -t, --tx-depth=<dep> size of tx queue (default 50)
+ -l, --signal signal completion on each msg
+ -a, --all Run sizes from 2 till 2^23
+ -n, --iters=<iters> number of exchanges (at least 2, default 1000)
+ -C, --report-cycles report times in cpu cycle units (default + microseconds)
+ -H, --report-histogram print out all results (default print summary + only)
+ -U, --report-unsorted (implies -H) print out unsorted results (default + sorted)
+ -V, --version display version number
+ -e, --events sleep on CQ events (default poll)

+
+
+

ib_send_bw.exe     - BW (BandWidth) test with send transactions

+
+

Usage:

+
+

ib_send_bw start a server and wait for connection
+ ib_send_bw <host> connect to server at <host>

+
+

Options:

+
+

-p, --port=<port> listen on/connect to port <port> + (default 18515)
+ -d, --ib-dev=<dev> use IB device <dev> (default first device found)
+ -i, --ib-port=<port> use port <port> of IB device (default 1)
+ -c, --connection=<RC/UC> connection type RC/UC/UD (default RC)
+ -m, --mtu=<mtu> mtu size (default 1024)
+ -s, --size=<size> size of message to exchange (default 65536)
+ -a, --all Run sizes from 2 till 2^23
+ -t, --tx-depth=<dep> size of tx queue (default 300)
+ -n, --iters=<iters> number of exchanges (at least 2, default 1000)
+ -b, --bidirectional measure bidirectional bandwidth (default + unidirectional)
+ -V, --version display version number
+ -e, --events sleep on CQ events (default poll)

+
+
+

ib_write_lat.exe      - latency test with RDMA write +transactions

+
+

Usage:

+
+

ib_write_lat start a server and wait for connection
+ ib_write_lat <host> connect to server at <host>

+
+

Options:

+
+

-p, --port=<port> listen on/connect to port <port> + (default 18515)
+ -c, --connection=<RC/UC> connection type RC/UC (default RC)
+ -m, --mtu=<mtu> mtu size (default 1024)
+ -d, --ib-dev=<dev> use IB device <dev> (default first device found)
+ -i, --ib-port=<port> use port <port> of IB device (default 1)
+ -s, --size=<size> size of message to exchange (default 1)
+ -a, --all Run sizes from 2 till 2^23
+ -t, --tx-depth=<dep> size of tx queue (default 50)
+ -n, --iters=<iters> number of exchanges (at least 2, default 1000)
+ -C, --report-cycles report times in cpu cycle units (default + microseconds)
+ -H, --report-histogram print out all results (default print summary + only)
+ -U, --report-unsorted (implies -H) print out unsorted results (default + sorted)
+ -V, --version display version number

+
+
+

ib_write_bw.exe     - BW test with RDMA write transactions

+
+

Usage:

+
+

ib_write_bw                + # start a server and wait for connection
+ ib_write_bw <host>    # connect to server at <host>

+
+

Options:

+
+

-p, --port=<port> listen on/connect to port <port> + (default 18515)
+ -d, --ib-dev=<dev> use IB device <dev> (default first device found)
+ -i, --ib-port=<port> use port <port> of IB device (default 1)
+ -c, --connection=<RC/UC> connection type RC/UC (default RC)
+ -m, --mtu=<mtu> mtu size (default 1024)
+ -g, --post=<num of posts> number of posts for each qp in the chain + (default tx_depth)
+ -q, --qp=<num of qp's> Num of qp's(default 1)
+ -s, --size=<size> size of message to exchange (default 65536)
+ -a, --all Run sizes from 2 till 2^23
+ -t, --tx-depth=<dep> size of tx queue (default 100)
+ -n, --iters=<iters> number of exchanges (at least 2, default 5000)
+ -b, --bidirectional measure bidirectional bandwidth (default + unidirectional)
+ -V, --version display version number

+
+
+

<return-to-top>

+


+

ttcp - Test TCP performance

+

TTCP accesses the Windows socket layer, hence it does not access +IB verbs directly. IPoIB or WSD layers are invoked beneath the socket layer +depending on configuration. TTCP is included as a quick baseline performance +check.

+
+
Usage: ttcp -t [-options] host 
+       ttcp -r [-options]
+Common options:
+	-l ##	length of bufs read from or written to network (default 8192)
+	-u	use UDP instead of TCP
+	-p ##	port number to send to or listen at (default 5001)
+	-A	align the start of buffers to this modulus (default 16384)
+	-O	start buffers at this offset from the modulus (default 0)
+	-d	set SO_DEBUG socket option
+	-b ##	set socket buffer size (if supported)
+	-f X	format for rate: k,K = kilo{bit,byte}; m,M = mega; g,G = giga
+Options specific to -t:
+	-n##	number of source bufs written to network (default 2048)
+	-D	don't buffer TCP writes (sets TCP_NODELAY socket option)
+Options specific to -r:
+	-B	for -s, only output full blocks as specified by -l (for TAR)
+	-T	"touch": access each byte as it's read
+

Requires a receiver (server) side and a transmitter (client) + side, host1 and host2 are IPoIB connected hosts.

+

at host1 (receiver)        + ttcp -r -f M -l 4096

+

at host2 (transmitter)    ttcp -t -f M -l + 4096 -n1000 host1

+
+

<return-to-top>

+

 

+

 

+

Diagnostics

+
+

 

+

ib_limits - Infiniband verbs tests

+

Usage: ib_limits [options]

+
+

Options:
-m or --memory
    Direct ib_limits to test memory registration
-c or --cq
    Direct ib_limits to test CQ creation
-r or --resize_cq
    direct ib_limits to test CQ resize
-q or --qp
    Directs ib_limits to test QP creation
-v or --verbose
    Enable verbosity level to debug console.
-h or --help
    Display this usage info then exit.

+
+

<return-to-top>

+

 

+

cmtest - Connection Manager Tests

+

Usage: cmtest [options]

+

    Options:

+
+

 -s --server This option directs cmtest to act as a Server
+ -l + --local + This option specifies the local endpoint.
+ -r + --remote + This option specifies the remote endpoint.
+ -c + --connect + This option specifies the number of connections to open. Default of + 1.
+ -m + --msize + This option specifies the byte size of each message. Default is 100 + bytes.
+ -n + --nmsgs + This option specifies the number of messages to send at a time.
+ -p --permsg This option indicates if a separate buffer should be used per + message. Default is one buffer for all messages.
+ -i + --iterate + This option specifies the number of times to loop through 'nmsgs'. + Default of 1.
+ -v --verbose This option enables verbosity level to debug console.
+ -h --help Display this usage info then exit.

+
+

<return-to-top>

+

 

+

 

+

PrintIP - print ip adapters and their addresses

+
+

PrintIP is used to print IP adapters and their addresses, or + ARP (Address Resolution Protocol) and IP address.
+
+ Usage:
+    printip <print_ips>
+    printip <remoteip> <ip>        + (example printip remoteip 10.10.2.20)

+
+

<return-to-top>

+

 

+

+
+vstat - HCA Stats and Counters

+
+

Display HCA (Host channel Adapter) attributes.

+

Usage: vstat [-v] [-c]
+          -v - verbose mode
+          -c - HCA error/statistic + counters

+
+

<return-to-top>

+

 

+

Subnet Management with OpenSM Rev: openib-1.2.0

+
+

A single running process (opensm.exe) is required to configure +and thus make an Infiniband subnet useable.  For most cases, InfiniBand +Subnet Management as a Windows service is sufficient to correctly configure most +InfiniBand fabrics.

+

The Infiniband subnet management process (opensm) may exist on a +Windows (WinOF) node or a Linux (OFED) node.
+
+Limit the number of OpenSM processes per IB fabric; one SM is sufficient +although redundant SMs are supported. You do not need a Subnet Manager per +node/system.

+

OpenIB Subnet Management as a Windows Service

+

InfiniBand subnet management (OpenSM), as a Windows service, is installed by default, although it is NOT +started by default. There are two ways to enable the InfiniBand Subnet +Management service.

+
    +
  1. Reset the installed OpenSM service "InfiniBand Subnet Management" to + start automatically; See
    + My Computer->Manage->Services and Applications->Services->InfiniBand + subnet Management->Start.
     
  2. +
  3. Install OpenSM as a 'running' Windows service:
    + Request a 'Custom' install, selecting the OpenSM_service install feature. + Once the installation has completed, check the running InfiniBand Subnet + Management service status via the Windows service manager (see #1).
  4. +
  5. Consult the OpenSM log file @ %SystemRoot%\temp\osm.log to see what + OpenSM thinks is happening.
  6. +
+

 

+

Manual InfiniBand Subnet Management from a command window

+

Usage: opensm.exe [options]

+

Options:

+
+

-c
+ --cache-options

+
+

Cache the given command line options into the file
+ /var/cache/osm/opensm.opts for use next invocation
+ The cache directory can be changed by the environment
+ variable OSM_CACHE_DIR

+
+

-g[=]<GUID in hex>
+ --guid[=]<GUID in hex>

+
+

This option specifies the local port GUID value with + which OpenSM should bind. OpenSM may be
+ bound to 1 port at a time.  If GUID given is 0, OpenSM displays a + list of possible port GUIDs and waits for user input. Without -g, OpenSM + trys to use the default port.

+
+

-l <LMC>
+ --lmc <LMC>

+
+

This option specifies the subnet's LMC value.
+ The number of LIDs assigned to each port is 2^LMC.
+ The LMC value must be in the range 0-7.
+ LMC values > 0 allow multiple paths between ports.
+ LMC values > 0 should only be used if the subnet
+ topology actually provides multiple paths between
+ ports, i.e. multiple interconnects between switches.
+ Without -l, OpenSM defaults to LMC = 0, which allows
+ one path between any two ports.

+
+

-p <PRIORITY>
+ --priority <PRIORITY>

+
+

This option specifies the SM's PRIORITY.
+ This will effect the handover cases, where master
+ is chosen by priority and GUID.
+ -smkey <SM_Key>
+ This option specifies the SM's SM_Key (64 bits).
+ This will effect SM authentication.

+
+

-r
+ --reassign_lids

+
+


+ This option causes OpenSM to reassign LIDs to all end nodes. Specifying + -r on a running subnet
+ may disrupt subnet traffic.  Without -r, OpenSM attempts to + preserve existing LID assignments resolving multiple use of same LID.

+
+

-u
+ --updn

+
+

This option activate UPDN algorithm instead of Min Hop + algorithm (default).

+
+

-a
+ --add_guid_file <path to file>

+
+

Set the root nodes for the Up/Down routing algorithm to + the guids provided in the given file (one per line)

+
+

-o
+ --once

+
+

This option causes OpenSM to configure the subnet once, + then exit. Ports remain in the ACTIVE state.

+
+

-s <interval>
+ --sweep <interval>

+
+

This option specifies the number of seconds between + subnet sweeps. Specifying -s 0 disables sweeping.
+ Without -s, OpenSM defaults to a sweep interval of 10 seconds.

+
+

-t <milliseconds>
+ --timeout <milliseconds>

+
+

This option specifies the time in milliseconds
+ used for transaction timeouts.
+ Specifying -t 0 disables timeouts.
+ Without -t, OpenSM defaults to a timeout value of
+ 200 milliseconds.

+
+

-maxsmps <number>

+
+

This option specifies the number of VL15 SMP MADs + allowed on the wire at any one time.
+ Specifying -maxsmps 0 allows unlimited outstanding SMPs.
+ Without -maxsmps, OpenSM defaults to a maximum of one outstanding SMP.

+
+

-i <equalize-ignore-guids-file>
+ -ignore-guids <equalize-ignore-guids-file>

+
+

This option provides the means to define a set of ports + (by guids) that will be ignored by the link load  + equalization algorithm.

+
+

-x
+ --honor_guid2lid

+
+

This option forces OpenSM to honor the guid2lid file, + when it comes out of Standby state, if such file exists + under OSM_CACHE_DIR, and is valid. + By default this is FALSE.

+
+

-f
+ --log_file

+
+

This option defines the log to be the given file. By + default the log goes to %SystemRoot%\temp\osm.log.
+ For the log to go to standard output use -f stdout.

+
+

-e
+ --erase_log_file

+
+

This option will cause deletion of the log file  + (if it previously exists). By default, the log file is accumulative.

+
+

-y
+ --stay_on_fatal

+
+

This option will cause SM not to exit on fatal + initialization + issues: if SM discovers duplicated guids or 12x link with + lane reversal badly configured. + By default, the SM will exit on these errors.

+
+

-v
+ --verbose

+
+

This option increases the log verbosity level. + The -v option may be specified multiple times + to further increase the verbosity level.  + See the -vf option for more information about. + log verbosity.

+
+

-V

+
+

This option sets the maximum verbosity level and + forces log flushing.
+ The -V is equivalent to '-vf 0xFF -d 2'. + See the -vf option for more information about + log verbosity.

+
+

-D <flags>

+
+

This option sets the log verbosity level.  A flags + field must follow the -D option.
+ A bit set/clear in the flags enables/disables a specific log level as + follows:
+ BIT LOG LEVEL ENABLED
+ ---- -----------------
+ 0x01 - ERROR (error messages)
+ 0x02 - INFO (basic messages, low volume)
+ 0x04 - VERBOSE (interesting stuff, moderate volume)
+ 0x08 - DEBUG (diagnostic, high volume)
+ 0x10 - FUNCS (function entry/exit, very high volume)
+ 0x20 - FRAMES (dumps all SMP and GMP frames)
+ 0x40 - ROUTING (dump FDB routing information)
+ 0x80 - currently unused.
+ Without -D, OpenSM defaults to ERROR + INFO (0x3).
+ Specifying -D 0 disables all messages.
+ Specifying -D 0xFF enables all messages (see -V).
+ High verbosity levels may require increasing the transaction timeout + with the -t option.

+
+

-d <number>
+ --debug <number>

+
+

This option specifies a debug option. These options are + not normally needed. The number following -d selects the debug option to + enable as follows:
+ OPT Description
+ --- -----------------
+ -d0 - Ignore other SM nodes
+ -d1 - Force single threaded dispatching
+ -d2 - Force log flushing after each log message
+ -d3 - Disable multicast support
+ -d4 - Put OpenSM in memory tracking mode
+ -d10 - Put OpenSM in testability mode
+ Without -d, no debug options are enabled

+
+

-h
+ --help

+
+

Display this usage info then exit.

+
+

-?

+
+

Display this usage info then exit.

+
+
+

<return-to-top>

+

 

+

Osmtest - Subnet Management Tests

+

Invoke open subnet management tests. osmtest currently can not +run on the same HCA port which OpenSM is currently using.

+
+

 Usage: osmtest [options]

+

Options:

+
+

 -f <c|a|v|s|e|f|m|q|t>
+ --flow <c|a|v|s|e|f|m|q|t>

+ +

This option directs osmtest to run a specific flow:

+

FLOW DESCRIPTIONS
+ c = create an inventory file with all nodes, ports & paths.
+ a = run all validation tests (expecting an input inventory)
+ v = only validate the given inventory file.
+ s = run service registration, un-registration and lease.
+ e = run event forwarding test.
+ f = flood the SA with queries accoring to the stress mode.
+ m = multicast flow.
+ q = QoS info - VLArb and SLtoVL tables.
+ t = run trap 64/65 flow; requires running an external tool.
+ (default is all but QoS).

+ +

-w <trap_wait_time>
+ --wait <trap_wait_time>

+
+

This option specifies the wait time for trap 64/65 + in seconds.
+ It is used only when running -f t - the trap 64/65 flow
+ (default to 10 sec).

+
+

-d <number>
+ --debug <number>

+
+

This option specifies a debug option. + These options are not normally needed.
+ The number following -d selects the debug + option to enable as follows:
+ OPT Description
+ --- -----------------
+ -d0 - Unused.
+ -d1 - Do not scan/compare path records.
+ -d2 - Force log flushing after each log message.
+ -d3 - Use mem tracking.
+ Without -d, no debug options are enabled.

+
+

-m <LID in hex>
+ --max_lid <LID in hex>

+
+

This option specifies the maximal LID number to be + searched + for during inventory file build (default to 100).

+
+

-g <GUID in hex>
+ --guid <GUID in hex>

+
+

This option specifies the local port GUID value + with which osmtest should bind. osmtest may be + bound to 1 port at a time. + Without -g, osmtest displays a menu of possible + port GUIDs and waits for user input.

+
+

-h
+ --help

+
+

Display this usage info then exit.

+
+

-i <filename>
+ --inventory <filename>

+
+

This option specifies the name of the inventory + file. + Normally, osmtest expects to find an inventory file, + which osmtest uses to validate real-time information + received from the SA during testing. + If -i is not specified, osmtest defaults to the file + 'osmtest.dat'.
+ See the -c option for related information.

+
+

-s
+ --stress

+
+

This option runs the specified stress test instead + of the normal test suite.
+ Stress test options are as follows:
+ OPT Description
+ --- -----------------
+ -s1 - Single-MAD response SA queries .
+ -s2 - Multi-MAD (RMPP) response SA queries.
+ -s3 - Multi-MAD (RMPP) Path Record SA queries.
+ Without -s, stress testing is not performed.

+
+

-M
+ --Multicast_Mode

+
+

This option specify length of Multicast test :
+ OPT Description
+ --- -----------------
+ -M1 - Short Multicast Flow (default) - single mode.
+ -M2 - Short Multicast Flow - multiple mode.
+ -M3 - Long Multicast Flow - single mode.
+ -M4 - Long Multicast Flow - multiple mode.
+ Single mode - Osmtest is tested alone , with no other
+ apps that interact vs. OpenSM MC.
+ Multiple mode - Could be run with other apps using MC vs.
+ OpenSM. Without -M, default flow testing is performed.

+
+

-t <milliseconds>

+
+

This option specifies the time in milliseconds used + for transaction timeouts.
+ Specifying -t 0 disables timeouts.
+ Without -t, osmtest defaults to a timeout value of 1 second.

+
+

-l
+ --log_file

+
+

This option defines the log to be the given file.
+ By default the log goes to stdout.

+
+

-v

+
+

This option increases the log verbosity level. The + -v option may be specified multiple times
+ to further increase the verbosity level. See the -vf option for more + information about log verbosity.

+
+

-V

+
+

This option sets the maximum verbosity level and + forces log flushing.
+ The -V is equivalent to '-vf 0xFF -d 2'.
+ See the -vf option for more information about log verbosity.

+
+

-vf <flags>

+
+

This option sets the log verbosity level. A flags + field must follow the -vf option.
+ A bit set/clear in the flags enables/disables a specific log level + as follows:
+ BIT LOG LEVEL ENABLED
+ ---- -----------------
+ 0x01 - ERROR (error messages)
+ 0x02 - INFO (basic messages, low volume)
+ 0x04 - VERBOSE (interesting stuff, moderate volume)
+ 0x08 - DEBUG (diagnostic, high volume)
+ 0x10 - FUNCS (function entry/exit, very high volume)
+ 0x20 - FRAMES (dumps all SMP and GMP frames)
+ 0x40 - currently unused.
+ 0x80 - currently unused.
+ Without -vf, osmtest defaults to ERROR + INFO (0x3).
+ Specifying -vf 0 disables all messages.
+ Specifying -vf 0xFF enables all messages (see -V).
+ High verbosity levels may require increasing
+ the transaction timeout with the -t option.

+
+
+
+

<return-to-top>

+

 

+


+ibtrapgen - Generate Infiniband subnet management traps

+

Usage: ibtrapgen -t|--trap_num <TRAP_NUM> -n|--number <NUM_TRAP_CREATIONS>
+                          +-r|--rate <TRAP_RATE> -l|--lid <LIDADDR>
+                          +-s|--src_port <SOURCE_PORT> -p|--port_num <PORT_NUM>
+
+Options: one of the following optional flows:

+
+

-t <TRAP_NUM>
+ --trap_num <TRAP_NUM>
+          This option specifies the + number of the trap to generate. Valid values are 128-131.
+ -n <NUM_TRAP_CREATIONS>
+ --number <NUM_TRAP_CREATIONS>
+          This option specifies the + number of times to generate this trap.
+          If not specified - + default to 1.
+ -r <TRAP_RATE>
+ --rate <TRAP_RATE>
+          This option specifies the + rate of the trap generation.
+          What is the time period + between one generation and another?
+          The value is given in + miliseconds.
+          If the number of trap + creations is 1 - this value is ignored.
+ -l <LIDADDR>
+ --lid <LIDADDR>
+          This option specifies the + lid address from where the trap should be generated.
+ -s <SOURCE_PORT>
+ --src_port <SOURCE_PORT>
+          This option specifies the + port number from which the trap should
+          be generated. If trap + number is 128 - this value is ignored (since
+          trap 128 is not sent with + a specific port number)
+ -p <port num>
+ --port_num <port num>
+          This is the port number + used for communicating with the SA.
+ -h
+ --help
+          Display this usage info + then exit.
+ -o
+ --out_log_file
+          This option defines the + log to be the given file.
+          By default the log goes + to stdout.
+ -v
+          This option increases the + log verbosity level.
+          The -v option may be + specified multiple times to further increase the verbosity level.
+          See the -vf option for + more information about log verbosity.
+ -V
+          This option sets the + maximum verbosity level and forces log flushing.
+          The -V is equivalent to + '-vf 0xFF -d 2'.
+          See the -vf option for + more information about. log verbosity.
+ -x <flags>
+          This option sets the log + verbosity level.
+          A flags field must follow + the -vf option.
+          A bit set/clear in the + flags enables/disables a
+          specific log level as + follows:

+
+

BIT LOG LEVEL ENABLED
+ ---- -----------------
+ 0x01 - ERROR (error messages)
+ 0x02 - INFO (basic messages, low volume)
+ 0x04 - VERBOSE (interesting stuff, moderate volume)
+ 0x08 - DEBUG (diagnostic, high volume)
+ 0x10 - FUNCS (function entry/exit, very high volume)
+ 0x20 - FRAMES (dumps all SMP and GMP frames)
+ 0x40 - currently unused.
+ 0x80 - currently unused.
+ Without -x, ibtrapgen defaults to ERROR + INFO (0x3).
+ Specifying -x 0 disables all messages.
+ Specifying -x 0xFF enables all messages (see -V).

+
+
+

<return-to-top>

+

 

+

 

+

IPoIB - Internet Protocols over InfiniBand

+
+

IPoIB enables the use of Internet Protocol utilities (e.g., ftp, +telnet) to function correctly over an Infiniband fabric. IPoIB is implemented as +an NDIS Miniport driver with a WDM lower edge.

+

The IPoIB Network adapters are +located via 'My Computer->Manage->Device Manager->Network adapters->IPoIB'.
+'My +Network Places->Properties' will display IPoIB Local Area Connection instances and should be used to +configure IP addresses for the IPoIB interfaces; one Local Area Connection +instance per HCA port. The IP +(Internet Protocol) address bound to the IPoIB adapter instance can be assigned +by DHCP or as a static IP addresses via
+'My Network Places->Properties->Local +Area Connection X->Properties->(General Tab)Internet Protocol(TCP/IP)->Properties'.

+

When the subnet manager (opensm) configures/sweeps the local +Infiniband HCA, the Local Area Connection will become enabled. If you discover +the Local Area Connection to be disabled, then likely your subnet manager +(opensm) is not running or functioning correctly.

+

<return-to-top>

+

 

+

 

+

Winsock Direct Service Provider

+
+

Winsock Direct (WSD) is Microsoft's proprietary protocol that +predates SDP (Sockets Direct Protocol) for accelerating TCP/IP applications by +using RDMA hardware. Microsoft had a significant role in defining the SDP +protocol, hence SDP and WSD are remarkably similar, though unfortunately +incompatible.
+
+WSD is made up of two parts, the winsock direct switch and the winsock direct +provider. The WSD switch is in the winsock DLL that ships in all editions of +Windows Server 2003, and is responsible for routing socket traffic over either +the regular TCP/IP stack, or offload it to a WSD provider. The WSD provider is a +hardware specific DLL that implements connection management and data transfers +over particular RDMA hardware.

+

WinOF WSD is not supported in the Windows XP environment.

+

The WSD Protocol seamlessly transports TCP + data using Infiniband data packets in 'buffered' mode or Infiniband + RDMA in 'direct' mode. Either way the user mode socket application sees no + behavioral difference in the standard Internet Protocol socket it created other than +reduced data transfer times and increased bandwidth.
+
+The Windows OpenFabrics release includes a WSD provider library that has been +extensively tested with Microsoft Windows Server 2003.
+During testing, bugs where found in the WSD switch that could lead to hangs, +crashes, data corruption, and other unwanted behavior. Microsoft released a +hotfix to address these issues which should be installed if using WSD; the +Microsoft Windows Server 2003 hotfix can be found +here.

+
+
+ + + + +
+
+
+ Environment variables can be used to change the behavior + of the WSD provider:
+
+ IBWSD_NO_READ - Disables RDMA Read operations when set + to any value. Note that this variable must be used + consistently throughout the cluster or communication + will fail.
+
+ IBWSD_POLL - Sets the number of times to poll the + completion queue after processing completions in + response to a CQ event. Reduces latency at the cost of + CPU utilization. Default is 500.
+
+ IBWSD_SA_RETRY - Sets the number of times to retry SA + query requests. Default is 4, can be increased if + connection establishment fails.
+
+ IBWSD_SA_TIMEOUT - Sets the number of milliseconds to + wait before retrying SA query requests. Default is 4, + can be increased if connection establishment fails.
+
+ IBWSD_NO_IPOIB - SA query timeouts by default allow the + connection to be established over IPoIB. Setting this + environment variable to any value prevents fall back to + IPoIB if SA queries time out.
+
+ IBWSD_DBG - Controls debug output when using a debug + version of the WSD provider. Takes a hex value, with + leading '0x', default value is '0x80000000'
+
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
0x00000001DLL
0x00000002socket info
0x00000004initialization code
0x00000008WQ related functions
0x00000010Enpoints related functions
0x00000020memory registration
0x00000040CM (Connection Manager)
0x00000080connections
0x00000200socket options
0x00000400network events
0x00000800Hardware
0x00001000Overlapped I/O request
0x00002000Socket Duplication
0x00004000Performance Monitoring
0x01000000More verbose than + IBSP_DBG_LEVEL3
0x02000000More verbose than + IBSP_DBG_LEVEL2
0x04000000More verbose than + IBSP_DBG_LEVEL1
0x08000000Verbose output
0x20000000Function enter/exit
0x40000000Warnings
0x80000000Errors
+
+
+
+
+
+


+See +https://wiki.openfabrics.org/tiki-index.php?page=Winsock+Direct for the +latest WSD status.

+

Winsock Direct Service Provider Installation

+

When the custom install option 'WSD' is selected the WSD service +is automatically installed and started as part of the installation.
+Manual control is performed via the \Program Files\WinOF\installsp.exe utility.

+
+

usage: installsp [-i | -r | -l]
+
+ -i    Install the Winsock Direct (WSD) service provider
+ -r    Remove the WSD service provider
+ -r <name>    Remove the specified service provider
+ -l    List service providers

+
+

<return-to-top>

+

 

+

 

+

Direct Access Transport and usermode Direct Access Programming +Libraries

+
+

The DAT (Direct Access Transport) +API is a C programming interface developed by the +DAT Collaborative in +order provide a set of transport-independent, platform-independent Application +Programming Interfaces that exploit the RDMA (remote direct memory access) +capabilities of next-generation interconnect technologies such as InfiniBand, +and iWARP.

+

WinOF DAT and uDAPL are based on the 1.1 DAT specification. The DAPL +(Direct Access Provider Library) now fully supports Infiniband RDMA and +IPoIB.

+
+
+ + + + +
+
+
+
+ How  DAT objects map to equivalent + + InfiniBand objects:
+  + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Interface Adapter (IA) + HCA (Host Channel Adapter)
Protection Zone (PZ) PD (Protection Domain)
Local Memory Region (LMR) + MR (Memory Region)
Remote Memory Region (RMR) + MW (Memory Windows)
Event Dispatcher (EVD) + CQ (Completion Queue)
Endpoint (EP) QP (Queue Pair)
Public Service Point (PSP) + connection identifier
Reserved Service Point (RSP) + connection identifier
Connection Request (CR) + connection manager event +
+
+
+
+
+
+

 

+
+

DAT EXECUTION ENVIRONMENT:

+
+
+

In order for DAT/uDAPL + programs to execute correctly, the 'dat.dll' file must be present in the + current directory, + %SystemRoot%\system32, %SystemRoot% or in the library search path.

+

The default WinOF + installation places the file dat.dll in the '%SystemRoot%' folder.

+

The DAPL configuration + file by default is defined as '%SystemDrive%\DAT\dat.conf'. This default + specification can be overriden by use of the environment variable + DAT_OVERRIDE; see following environment variable discussion.

+

Within the dat.conf file, + the DAPL library specification can be located as the 5th whitespace + separated line argument. By default the DAPL library file is installed as
+ '%SystemRoot%\dapl.dll'.

+

Should you choose to + relocated the DAPL library file to a path where whitespace appears in the + full library path specification, then the full library file specification + must be contained within double-quotes. A side effect of the double-quotes + is the library specification is treated as a Windows string which implies + the '\' (backslash character) is treated as an 'escape' character.  Hence + all backslashes in the library path must be duplicated when enclosed in + double-quotes + (e.g., "C:\\Programs Files\\WinOF\\dapl.dll").

+

A sample dat.conf file is + installed as '\Program Files\WinOF\dat.conf '.
+ After the WinOF installation, move the \Program Files\WinOF\dat.conf file to + the default DAT configuration file location + '%SystemDrive%\DAT\dat.conf'.

+

In order to preserve existing installations, + the dat.conf file is not automatically installed in it's default location.

+

 

+

+DAT library environment variables:

+
+DAT_OVERRIDE
+------------
+Value used as the static registry configuration file, overriding the
+default location, 'C:\DAT\dat.conf'.
+
+Example: set DAT_OVERRIDE=%SystemDrive%\path\to\my\private.conf
+
+
+DAT_DBG_TYPE
+------------
+
+Value specifies which parts of the registry will print debugging
+information, valid values are 
+
+DAT_OS_DBG_TYPE_ERROR        = 0x1
+DAT_OS_DBG_TYPE_GENERIC      = 0x2
+DAT_OS_DBG_TYPE_SR           = 0x4
+DAT_OS_DBG_TYPE_DR           = 0x8
+DAT_OS_DBG_TYPE_PROVIDER_API = 0x10
+DAT_OS_DBG_TYPE_CONSUMER_API = 0x20
+DAT_OS_DBG_TYPE_ALL          = 0xff
+
+or any combination of these. For example you can use 0xC to get both 
+static and dynamic registry output.
+
+Example set DAT_DBG_TYPE=0xC
+
+DAT_DBG_DEST
+------------ 
+
+Value sets the output destination, valid values are 
+
+DAT_OS_DBG_DEST_STDOUT = 0x1
+DAT_OS_DBG_DEST_SYSLOG = 0x2 
+DAT_OS_DBG_DEST_ALL    = 0x3 
+
+For example, 0x3 will output to both stdout and the syslog. 
+
+

+DAPL Provider library environment variables

+

+
+DAPL_DBG_TYPE
+-------------
+
+Value specifies which parts of the registry will print +debugging information, valid values are
+
+
DAPL_DBG_TYPE_ERR          = 0x0001
+DAPL_DBG_TYPE_WARN         = 0x0002
+DAPL_DBG_TYPE_EVD          = 0x0004
+DAPL_DBG_TYPE_CM           = 0x0008
+DAPL_DBG_TYPE_EP           = 0x0010
+DAPL_DBG_TYPE_UTIL         = 0x0020
+DAPL_DBG_TYPE_CALLBACK     = 0x0040
+DAPL_DBG_TYPE_DTO_COMP_ERR = 0x0080
+DAPL_DBG_TYPE_API          = 0x0100
+DAPL_DBG_TYPE_RTN          = 0x0200
+DAPL_DBG_TYPE_EXCEPTION    = 0x0400
+
+or any combination of these. For example you can use 0xC to get both
+EVD and CM output.
+
+Example set DAPL_DBG_TYPE=0xC
+
+
+DAPL_DBG_DEST
+-------------
+
+Value sets the output destination, valid values are
+
+DAPL_DBG_DEST_STDOUT = 0x1
+DAPL_DBG_DEST_SYSLOG = 0x2
+DAPL_DBG_DEST_ALL    = 0x3
+
+For example, 0x3 will output to both stdout and the syslog.

+
+
+ +

+

+

DAPLTEST

+
+
+    dapltest - test for the Direct Access Provider Library (DAPL)
+
+DESCRIPTION
+
+    Dapltest is a set of tests developed to exercise, characterize,
+    and verify the DAPL interfaces during development and porting.
+    At least two instantiations of the test must be run.  One acts
+    as the server, fielding requests and spawning server-side test
+    threads as needed.  Other client invocations connect to the
+    server and issue test requests.
+
+    The server side of the test, once invoked, listens continuously
+    for client connection requests, until quit or killed.  Upon
+    receipt of a connection request, the connection is established,
+    the server and client sides swap version numbers to verify that
+    they are able to communicate, and the client sends the test
+    request to the server.  If the version numbers match, and the
+    test request is well-formed, the server spawns the threads
+    needed to run the test before awaiting further connections.
+
+USAGE
+
+    dapltest [ -f script_file_name ]
+             [ -T S|Q|T|P|L ] [ -D device_name ] [ -d ] [ -R HT|LL|EC|PM|BE ]
+
+    With no arguments, dapltest runs as a server using default values,
+    and loops accepting requests from clients.  The -f option allows
+    all arguments to be placed in a file, to ease test automation.
+    The following arguments are common to all tests:
+
+    [ -T S|Q|T|P|L ]    Test function to be performed:
+                            S   - server loop
+                            Q   - quit, client requests that server
+                                  wait for any outstanding tests to
+                                  complete, then clean up and exit
+                            T   - transaction test, transfers data between 
+                                  client and server
+                            P   - performance test, times DTO operations
+                            L   - limit test, exhausts various resources,
+                                  runs in client w/o server interaction
+                        Default: S
+
+    [ -D device_name ]  Specifies the name of the device (interface adapter).
+                        Default: host-specific, look for DT_MdepDeviceName
+                                 in dapl_mdep.h
+
+    [ -d ]              Enables extra debug verbosity, primarily tracing
+			of the various DAPL operations as they progress.
+			Repeating this parameter increases debug spew.
+			Errors encountered result in the test spewing some
+			explanatory text and stopping; this flag provides
+			more detail about what lead up to the error.
+                        Default: zero
+
+    [ -R BE ]           Indicate the quality of service (QoS) desired.
+                        Choices are:
+                            HT  - high throughput
+                            LL  - low latency
+                            EC  - economy (neither HT nor LL)
+                            PM  - premium
+                            BE  - best effort
+                        Default: BE
+
+USAGE - Quit test client
+
+    dapltest [Common_Args] [ -s server_name ]
+
+    Quit testing (-T Q) connects to the server to ask it to clean up and
+    exit (after it waits for any outstanding test runs to complete).
+    In addition to being more polite than simply killing the server,
+    this test exercises the DAPL object teardown code paths.
+    There is only one argument other than those supported by all tests:
+
+    -s server_name      Specifies the name of the server interface.
+                        No default.
+
+
+USAGE - Transaction test client
+
+    dapltest [Common_Args] [ -s server_name ]
+             [ -t threads ] [ -w endpoints ] [ -i iterations ] [ -Q ] 
+             [ -V ] [ -P ] OPclient OPserver [ op3, 
+
+    Transaction testing (-T T) transfers a variable amount of data between 
+    client and server.  The data transfer can be described as a sequence of 
+    individual operations; that entire sequence is transferred 'iterations' 
+    times by each thread over all of its endpoint(s).
+
+    The following parameters determine the behavior of the transaction test:
+
+    -s server_name      Specifies the hostname of the dapltest server.
+                        No default.
+
+    [ -t threads ]      Specify the number of threads to be used.
+                        Default: 1
+
+    [ -w endpoints ]    Specify the number of connected endpoints per thread.
+                        Default: 1
+
+    [ -i iterations ]   Specify the number of times the entire sequence
+                        of data transfers will be made over each endpoint.
+                        Default: 1000
+
+    [ -Q ]              Funnel completion events into a CNO.
+			Default: use EVDs
+
+    [ -V ]              Validate the data being transferred.
+			Default: ignore the data
+
+    [ -P ]		Turn on DTO completion polling
+			Default: off
+
+    OP1 OP2 [ OP3, ... ]
+                        A single transaction (OPx) consists of:
+
+                        server|client   Indicates who initiates the
+                                        data transfer.
+
+                        SR|RR|RW        Indicates the type of transfer:
+                                        SR  send/recv
+                                        RR  RDMA read
+                                        RW  RDMA write
+                        Defaults: none
+
+                        [ seg_size [ num_segs ] ]
+                                        Indicates the amount and format
+                                        of the data to be transferred.
+                                        Default:  4096  1
+                                                  (i.e., 1 4KB buffer)
+
+                        [ -f ]          For SR transfers only, indicates
+                                        that a client's send transfer
+                                        completion should be reaped when
+                                        the next recv completion is reaped.
+					Sends and receives must be paired
+					(one client, one server, and in that
+					order) for this option to be used.
+
+    Restrictions:  
+    
+    Due to the flow control algorithm used by the transaction test, there 
+    must be at least one SR OP for both the client and the server.  
+
+    Requesting data validation (-V) causes the test to automatically append 
+    three OPs to those specified. These additional operations provide 
+    synchronization points during each iteration, at which all user-specified 
+    transaction buffers are checked. These three appended operations satisfy 
+    the "one SR in each direction" requirement.
+
+    The transaction OP list is printed out if -d is supplied.
+
+USAGE - Performance test client
+
+    dapltest [Common_Args] -s server_name [ -m p|b ]
+             [ -i iterations ] [ -p pipeline ] OP
+
+    Performance testing (-T P) times the transfer of an operation.
+    The operation is posted 'iterations' times.
+
+    The following parameters determine the behavior of the transaction test:
+
+    -s server_name      Specifies the hostname of the dapltest server.
+                        No default.
+
+    -m b|p		Used to choose either blocking (b) or polling (p)
+                        Default: blocking (b)
+
+    [ -i iterations ]   Specify the number of times the entire sequence
+                        of data transfers will be made over each endpoint.
+                        Default: 1000
+
+    [ -p pipeline ]     Specify the pipline length, valid arguments are in 
+                        the range [0,MAX_SEND_DTOS]. If a value greater than 
+                        MAX_SEND_DTOS is requested the value will be
+                        adjusted down to MAX_SEND_DTOS.
+                        Default: MAX_SEND_DTOS
+
+    OP
+                        An operation consists of:
+
+                        RR|RW           Indicates the type of transfer:
+                                        RR  RDMA read
+                                        RW  RDMA write
+                        Default: none
+
+                        [ seg_size [ num_segs ] ]
+                                        Indicates the amount and format
+                                        of the data to be transferred.
+                                        Default:  4096  1
+                                                  (i.e., 1 4KB buffer)
+
+USAGE - Limit test client
+
+    Limit testing (-T L) neither requires nor connects to any server
+    instance.  The client runs one or more tests which attempt to
+    exhaust various resources to determine DAPL limits and exercise
+    DAPL error paths.  If no arguments are given, all tests are run.
+
+    Limit testing creates the sequence of DAT objects needed to
+    move data back and forth, attempting to find the limits supported
+    for the DAPL object requested.  For example, if the LMR creation
+    limit is being examined, the test will create a set of
+    {IA, PZ, CNO, EVD, EP} before trying to run dat_lmr_create() to
+    failure using that set of DAPL objects.  The 'width' parameter
+    can be used to control how many of these parallel DAPL object
+    sets are created before beating upon the requested constructor.
+    Use of -m limits the number of dat_*_create() calls that will
+    be attempted, which can be helpful if the DAPL in use supports
+    essentailly unlimited numbers of some objects.
+
+    The limit test arguments are:
+
+    [ -m maximum ]      Specify the maximum number of dapl_*_create()
+                        attempts.
+                        Default: run to object creation failure
+
+    [ -w width ]        Specify the number of DAPL object sets to
+                        create while initializing.
+                        Default: 1
+
+    [ limit_ia ]        Attempt to exhaust dat_ia_open()
+
+    [ limit_pz ]        Attempt to exhaust dat_pz_create()
+
+    [ limit_cno ]       Attempt to exhaust dat_cno_create()
+
+    [ limit_evd ]       Attempt to exhaust dat_evd_create()
+
+    [ limit_ep ]        Attempt to exhaust dat_ep_create()
+
+    [ limit_rsp ]       Attempt to exhaust dat_rsp_create()
+
+    [ limit_psp ]       Attempt to exhaust dat_psp_create()
+
+    [ limit_lmr ]       Attempt to exhaust dat_lmr_create(4KB)
+
+    [ limit_rpost ]     Attempt to exhaust dat_ep_post_recv(4KB)
+
+    [ limit_size_lmr ]  Probe maximum size dat_lmr_create()
+
+                        Default: run all tests
+
+
+EXAMPLES
+
+    dapltest -T S -d -D ibnic0
+
+                        Starts a local dapltest server process with debug verbosity.
+                        Server loops (listen for dapltest request, process request).
+    
+    dapltest -T T -d -s winIB -D ibnic0 -i 100 client SR 4096 2 server SR 4096 2
+
+                        Runs a transaction test, with both sides
+                        sending one buffer with two 4KB segments,
+                        one hundred times; dapltest server is on host winIB.
+
+    dapltest -T P -d -s winIB -D ibnic0 -i 100 RW 4096 2
+
+                        Runs a performance test, with the client 
+                        RDMA writing one buffer with two 4KB segments,
+                        one hundred times.
+
+    dapltest -T Q -s winIB -D ibnic0
+
+                        Asks the dapltest server at host 'winIB' to clean up and exit.
+
+    dapltest -T L -D ibnic0 -d -w 16 -m 1000
+
+                        Runs all of the limit tests, setting up
+                        16 complete sets of DAPL objects, and
+                        creating at most a thousand instances
+                        when trying to exhaust resources.
+
+    dapltest -T T -V -d -t 2 -w 4 -i 55555 -s winIB -D ibnic0 \
+       client RW  4096 1    server RW  2048 4    \
+       client SR  1024 4    server SR  4096 2    \
+       client SR  1024 3 -f server SR  2048 1 -f
+
+                        Runs a more complicated transaction test,
+                        with two thread using four EPs each,
+                        sending a more complicated buffer pattern
+                        for a larger number of iterations,
+                        validating the data received.
+
+
+BUGS  (and  To Do List)
+
+    Use of CNOs (-Q) is not yet supported.
+
+    Further limit tests could be added.
+

<return-to-top>

+

 

+

 

+

SRP - SCSI RDMA Protocol

+

+The +SCSI RDMA +Protocol  (SRP) is an emerging industry standard protocol for utilizing +block storage devices over an InfiniBand™ fabric. SRP is being defined in the +ANSI T-10 committee.

+

WinOF SRP is a storage +driver implementation that enables the SCSI RDMA protocol over an InfiniBand +fabric.
+The implementation conforms +to the T10 Working Group draft + +http://www.t10.org/ftp/t10/drafts/srp/srp-r16a.pdf.

+

Software Dependencies

+

The SRP driver depends on the installation of the WinOF stack +with a Subnet
+Manager running somewhere on the IB fabric.
+
+- Supported Operating Systems and Service Packs:
+   o Windows XP SP2 x64
+   o Windows Server 2003 SP1 (x86, x64)
+   o Windows Server 2003 CCS (x64)

+


+Testing Level

+

The SRP driver has undergone basic testing against Mellanox +Technologies'
+SRP Targets MTD1000 and MTD2000. Testing included format, read, and write
+operations.

+

Installation

+

The WinOF stack  does not install the SRP driver by default. +If SRP is +selected in the custom installation window, it will only be copied during WinOF +installation. To complete the SRP driver installation, an SRP target must be +detected; a Subnet Manager must be running somewhere in the IB subnet.
+
+Upon the detection of an SRP target, the "New Hardware Found" Wizard pops up.
+- Select Install Automatically and click Next. This installs the I/O unit +device.
+
+Once completed, the "New Hardware Found" Wizard pops up again.
+- Select Install Automatically and click Next. This installs the SRP driver.

+

New Features and Changes

+

- SRP supports WPP tracing tools by using the GUID: +'5AF07B3C-D119-4233-9C81-C07EF481CBE6'.
+  The flags and level of debug can be controlled at load-time or run-time.

+

<return-to-top>

diff --git a/branches/Ndi/docs/complib/cl_async_proc_h.html b/branches/Ndi/docs/complib/cl_async_proc_h.html new file mode 100644 index 00000000..a7926d51 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_async_proc_h.html @@ -0,0 +1,309 @@ + + + + +./inc_docs/complib/cl_async_proc_h.html + + + + +Generated from ./inc/complib/cl_async_proc.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/Asynchronous Processor

+ +

[top][parent][index]

+

NAME

+
       Asynchronous Processor
+
+

DESCRIPTION

+
       The asynchronous processor provides threads for executing queued callbacks.
+
+       The threads in the asynchronous processor wait for callbacks to be queued.
+
+       The asynchronous processor functions operate on a cl_async_proc_t structure
+       which should be treated as opaque and manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_async_proc_t, cl_async_proc_item_t
+
+       Initialization:
+               cl_async_proc_construct, cl_async_proc_init, cl_async_proc_destroy
+
+       Manipulation:
+               cl_async_proc_queue
+
+
+
+ +

[Functions] +Component Library: Asynchronous Processor/cl_async_proc_construct

+ +

[top][index]

+

NAME

+
       cl_async_proc_construct
+
+

DESCRIPTION

+
       The cl_async_proc_construct function initializes the state of a
+       thread pool.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_async_proc_construct(
+        IN      cl_async_proc_t* const  p_async_proc );
+
+

PARAMETERS

+
       p_async_proc
+               [in] Pointer to an asynchronous processor structure.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_async_proc_destroy without first calling
+       cl_async_proc_init.
+
+       Calling cl_async_proc_construct is a prerequisite to calling any other
+       thread pool function except cl_async_proc_init.
+
+

SEE ALSO

+
       Asynchronous Processor, cl_async_proc_init, cl_async_proc_destroy
+
+
+
+ +

[Functions] +Component Library: Asynchronous Processor/cl_async_proc_destroy

+ +

[top][index]

+

NAME

+
       cl_async_proc_destroy
+
+

DESCRIPTION

+
       The cl_async_proc_destroy function performs any necessary cleanup
+       for a thread pool.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_async_proc_destroy(
+        IN      cl_async_proc_t* const  p_async_proc );
+
+

PARAMETERS

+
       p_async_proc
+               [in] Pointer to an asynchronous processor structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function blocks until all threads exit, and must therefore not
+       be called from any of the asynchronous processor's threads. Because of
+       its blocking nature, callers of cl_async_proc_destroy must ensure that
+       entering a wait state is valid from the calling thread context.
+
+       This function should only be called after a call to
+       cl_async_proc_construct or cl_async_proc_init.
+
+

SEE ALSO

+
       Asynchronous Processor, cl_async_proc_construct, cl_async_proc_init
+
+
+
+ +

[Functions] +Component Library: Asynchronous Processor/cl_async_proc_init

+ +

[top][index]

+

NAME

+
       cl_async_proc_init
+
+

DESCRIPTION

+
       The cl_async_proc_init function initialized an asynchronous processor
+       for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_async_proc_init(
+        IN      cl_async_proc_t* const  p_async_proc,
+        IN      const uint32_t                  thread_count,
+        IN      const char* const               name );
+
+

PARAMETERS

+
       p_async_proc
+               [in] Pointer to an asynchronous processor structure to initialize.
+
+       thread_count
+               [in] Number of threads to be managed by the asynchronous processor.
+
+       name
+               [in] Name to associate with the threads.  The name may be up to 16
+               characters, including a terminating null character.  All threads
+               created in the asynchronous processor have the same name.
+
+ RETURN VALUES
+       CL_SUCCESS if the asynchronous processor creation succeeded.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to inititalize
+       the asynchronous processor.
+
+       CL_ERROR if the threads could not be created.
+
+

NOTES

+
       cl_async_proc_init creates and starts the specified number of threads.
+       If thread_count is zero, the asynchronous processor creates as many
+       threads as there are processors in the system.
+
+

SEE ALSO

+
       Asynchronous Processor, cl_async_proc_construct, cl_async_proc_destroy,
+       cl_async_proc_queue
+
+
+
+ +

[Structures] +Component Library: Asynchronous Processor/cl_async_proc_item_t

+ +

[top][index]

+

NAME

+
       cl_async_proc_item_t
+
+

DESCRIPTION

+
       Asynchronous processor item structure passed to the cl_async_proc_queue
+       function to queue a callback for execution.
+
+

SYNOPSIS

+
typedef struct _cl_async_proc_item
+{
+        cl_pool_item_t                  pool_item;
+        cl_pfn_async_proc_cb_t  pfn_callback;
+
+} cl_async_proc_item_t;
+
+

FIELDS

+
       pool_item
+               Pool item for queuing the item to be invoked by the asynchronous
+               processor's threads.  This field is defined as a pool item to
+               allow items to be managed by a pool.
+
+       pfn_callback
+               Pointer to a callback function to invoke when the item is dequeued.
+
+

SEE ALSO

+
       Asynchronous Processor, cl_async_proc_queue, cl_pfn_async_proc_cb_t
+
+
+
+ +

[Functions] +Component Library: Asynchronous Processor/cl_async_proc_queue

+ +

[top][index]

+

NAME

+
       cl_async_proc_queue
+
+

DESCRIPTION

+
       The cl_async_proc_queue function queues a callback to an asynchronous
+       processor.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_async_proc_queue(
+        IN      cl_async_proc_t* const          p_async_proc,
+        IN      cl_async_proc_item_t* const     p_item );
+
+

PARAMETERS

+
       p_async_proc
+               [in] Pointer to an asynchronous processor structure to initialize.
+
+       p_item
+               [in] Pointer to an asynchronous processor item to queue for execution.
+               The callback and context fields of the item must be valid.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       Asynchronous Processor, cl_async_proc_init, cl_pfn_async_proc_cb_t
+
+
+
+ +

[Structures] +Component Library: Asynchronous Processor/cl_async_proc_t

+ +

[top][index]

+

NAME

+
       cl_async_proc_t
+
+

DESCRIPTION

+
       Asynchronous processor structure.
+
+       The cl_async_proc_t structure should be treated as opaque, and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_async_proc
+{
+        cl_thread_pool_t        thread_pool;
+        cl_qlist_t                      item_queue;
+        cl_spinlock_t           lock;
+        cl_state_t                      state;
+
+} cl_async_proc_t;
+
+

FIELDS

+
       item_pool
+               Pool of items storing the callback function and contexts to be invoked
+               by the asynchronous processor's threads.
+
+       thread_pool
+               Thread pool that will invoke the callbacks.
+
+       item_queue
+               Queue of items that the threads should process.
+
+       lock
+               Lock used to synchronize access to the item pool and queue.
+
+

SEE ALSO

+
       Asynchronous Processor
+
+
+
+ +

[Definitions] +Component Library: Asynchronous Processor/cl_pfn_async_proc_cb_t

+ +

[top][index]

+

NAME

+
       cl_pfn_async_proc_cb_t
+
+

DESCRIPTION

+
       The cl_pfn_async_proc_cb_t function type defines the prototype for
+       callbacks queued to and invoked by the asynchronous processor.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_async_proc_cb_t)(
+        IN      struct _cl_async_proc_item      *p_item );
+
+

PARAMETERS

+
       p_item
+               Pointer to the cl_async_proc_item_t structure that was queued in
+               a call to cl_async_proc_queue.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_async_proc_queue
+       function.
+
+

SEE ALSO

+
       Asynchronous Processor, cl_async_proc_item_t
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_atomic_h.html b/branches/Ndi/docs/complib/cl_atomic_h.html new file mode 100644 index 00000000..1d78041a --- /dev/null +++ b/branches/Ndi/docs/complib/cl_atomic_h.html @@ -0,0 +1,272 @@ + + + + +./inc_docs/complib/cl_atomic_h.html + + + + +Generated from ./inc/complib/cl_atomic.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/Atomic Operations

+ +

[top][parent][index]

+

NAME

+
       Atomic Operations
+
+

DESCRIPTION

+
       The Atomic Operations functions allow callers to operate on
+       32-bit signed integers in an atomic fashion.
+
+
+
+ +

[Functions] +Component Library: Atomic Operations/cl_atomic_add

+ +

[top][index]

+

NAME

+
       cl_atomic_add
+
+

DESCRIPTION

+
       The cl_atomic_add function atomically adds a value to a
+       32-bit signed integer and returns the resulting value.
+
+

SYNOPSIS

+
CL_EXPORT int32_t CL_API
+cl_atomic_add(
+        IN      atomic32_t* const       p_value,
+        IN      const int32_t           increment );
+
+

PARAMETERS

+
       p_value
+               [in] Pointer to a 32-bit integer that will be added to.
+
+       increment
+               [in] Value by which to increment the integer pointed to by p_value.
+
+

RETURN VALUE

+
       Returns the value pointed to by p_value after the addition.
+
+

NOTES

+
       The provided increment is added to the value and the result returned in
+       one atomic operation.
+
+       cl_atomic_add maintains data consistency without requiring additional
+       synchronization mechanisms in multi-threaded environments.
+
+

SEE ALSO

+
       Atomic Operations, cl_atomic_inc, cl_atomic_dec, cl_atomic_sub,
+       cl_atomic_xchg, cl_atomic_comp_xchg
+
+
+
+ +

[Functions] +Component Library: Atomic Operations/cl_atomic_comp_xchg

+ +

[top][index]

+

NAME

+
       cl_atomic_comp_xchg
+
+

DESCRIPTION

+
       The cl_atomic_comp_xchg function atomically compares a 32-bit signed
+       integer to a desired value, sets that integer to the
+       specified value if equal, and returns the initial value.
+
+

SYNOPSIS

+
CL_EXPORT int32_t CL_API
+cl_atomic_comp_xchg(
+        IN      atomic32_t* const       p_value,
+        IN      const int32_t           compare,
+        IN      const int32_t           new_value );
+
+

PARAMETERS

+
       p_value
+               [in] Pointer to a 32-bit integer to exchange with new_value.
+
+       compare
+               [in] Value to compare to the value pointed to by p_value.
+
+       new_value
+               [in] Value to assign if the value pointed to by p_value is equal to
+               the value specified by the compare parameter.
+
+

RETURN VALUE

+
       Returns the initial value of the variable pointed to by p_value.
+
+

NOTES

+
       The value pointed to by p_value is compared to the value specified by the
+       compare parameter.  If the two values are equal, the p_value variable is
+       set to new_value.  The initial value pointed to by p_value is returned.
+
+       cl_atomic_comp_xchg maintains data consistency without requiring additional
+       synchronization mechanisms in multi-threaded environments.
+
+

SEE ALSO

+
       Atomic Operations, cl_atomic_inc, cl_atomic_dec, cl_atomic_add,
+       cl_atomic_sub, cl_atomic_xchg
+
+
+
+ +

[Functions] +Component Library: Atomic Operations/cl_atomic_dec

+ +

[top][index]

+

NAME

+
       cl_atomic_dec
+
+

DESCRIPTION

+
       The cl_atomic_dec function atomically decrements a 32-bit signed
+       integer and returns the decremented value.
+
+

SYNOPSIS

+
CL_EXPORT int32_t CL_API
+cl_atomic_dec(
+        IN      atomic32_t* const       p_value );
+
+

PARAMETERS

+
       p_value
+               [in] Pointer to a 32-bit integer to decrement.
+
+

RETURN VALUE

+
       Returns the decremented value pointed to by p_value.
+
+

NOTES

+
       The provided value is decremented and its value returned in one atomic
+       operation.
+
+       cl_atomic_dec maintains data consistency without requiring additional
+       synchronization mechanisms in multi-threaded environments.
+
+

SEE ALSO

+
       Atomic Operations, cl_atomic_inc, cl_atomic_add, cl_atomic_sub,
+       cl_atomic_xchg, cl_atomic_comp_xchg
+
+
+
+ +

[Functions] +Component Library: Atomic Operations/cl_atomic_inc

+ +

[top][index]

+

NAME

+
       cl_atomic_inc
+
+

DESCRIPTION

+
       The cl_atomic_inc function atomically increments a 32-bit signed
+       integer and returns the incremented value.
+
+

SYNOPSIS

+
CL_EXPORT int32_t CL_API
+cl_atomic_inc(
+        IN      atomic32_t* const       p_value );
+
+

PARAMETERS

+
       p_value
+               [in] Pointer to a 32-bit integer to increment.
+
+

RETURN VALUE

+
       Returns the incremented value pointed to by p_value.
+
+

NOTES

+
       The provided value is incremented and its value returned in one atomic
+       operation.
+
+       cl_atomic_inc maintains data consistency without requiring additional
+       synchronization mechanisms in multi-threaded environments.
+
+

SEE ALSO

+
       Atomic Operations, cl_atomic_dec, cl_atomic_add, cl_atomic_sub,
+       cl_atomic_xchg, cl_atomic_comp_xchg
+
+
+
+ +

[Functions] +Component Library: Atomic Operations/cl_atomic_sub

+ +

[top][index]

+

NAME

+
       cl_atomic_sub
+
+

DESCRIPTION

+
       The cl_atomic_sub function atomically subtracts a value from a
+       32-bit signed integer and returns the resulting value.
+
+

SYNOPSIS

+
CL_EXPORT int32_t CL_API
+cl_atomic_sub(
+        IN      atomic32_t* const       p_value,
+        IN      const int32_t           decrement );
+
+

PARAMETERS

+
       p_value
+               [in] Pointer to a 32-bit integer that will be subtracted from.
+
+       decrement
+               [in] Value by which to decrement the integer pointed to by p_value.
+
+

RETURN VALUE

+
       Returns the value pointed to by p_value after the subtraction.
+
+

NOTES

+
       The provided decrement is subtracted from the value and the result
+       returned in one atomic operation.
+
+       cl_atomic_sub maintains data consistency without requiring additional
+       synchronization mechanisms in multi-threaded environments.
+
+

SEE ALSO

+
       Atomic Operations, cl_atomic_inc, cl_atomic_dec, cl_atomic_add,
+       cl_atomic_xchg, cl_atomic_comp_xchg
+
+
+
+ +

[Functions] +Component Library: Atomic Operations/cl_atomic_xchg

+ +

[top][index]

+

NAME

+
       cl_atomic_xchg
+
+

DESCRIPTION

+
       The cl_atomic_xchg function atomically sets a value of a
+       32-bit signed integer and returns the initial value.
+
+

SYNOPSIS

+
CL_EXPORT int32_t CL_API
+cl_atomic_xchg(
+        IN      atomic32_t* const       p_value,
+        IN      const int32_t           new_value );
+
+

PARAMETERS

+
       p_value
+               [in] Pointer to a 32-bit integer to exchange with new_value.
+
+       new_value
+               [in] Value to assign.
+
+

RETURN VALUE

+
       Returns the initial value pointed to by p_value.
+
+

NOTES

+
       The provided value is exchanged with new_value and its initial value
+       returned in one atomic operation.
+
+       cl_atomic_xchg maintains data consistency without requiring additional
+       synchronization mechanisms in multi-threaded environments.
+
+

SEE ALSO

+
       Atomic Operations, cl_atomic_inc, cl_atomic_dec, cl_atomic_add,
+       cl_atomic_sub, cl_atomic_comp_xchg
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_byteswap_h.html b/branches/Ndi/docs/complib/cl_byteswap_h.html new file mode 100644 index 00000000..b9572798 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_byteswap_h.html @@ -0,0 +1,500 @@ + + + + +./inc_docs/complib/cl_byteswap_h.html + + + + +Generated from ./inc/complib/cl_byteswap.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/Byte Swapping

+ +

[top][parent][index]

+

NAME

+
       Byte Swapping
+
+

DESCRIPTION

+
       The byte swapping functions and macros allow swapping bytes from network
+       byte order to host byte order.
+
+       All data transmitted between systems should be in network byte order.
+       In order to utilize such data, it must be converted to host byte order
+       before use.
+
+

SEE ALSO

+
       Functions:
+               cl_ntoh16, cl_hton16, cl_ntoh32, cl_hton32, cl_ntoh64, cl_hton64,
+               cl_ntoh
+
+       Macros:
+               CL_NTOH16, CL_HTON16, CL_NTOH32, CL_HTON32, CL_NTOH64, CL_HTON64
+
+
+
+ +

[Definitions] +Component Library: Byte Swapping/CL_HTON16

+ +

[top][index]

+

NAME

+
       CL_HTON16
+
+

DESCRIPTION

+
       The CL_HTON16 macro converts a 16-bit value from host byte order to
+       network byte order.  The CL_HTON16 macro will cause constant values to be
+       swapped by the pre-processor.  For variables, CL_HTON16 is less efficient
+       than the cl_hton16 function.
+
+

SYNOPSIS

+
*       CL_HTON16( val );
+
+

PARAMETERS

+
       val
+               [in] 16-bit value to swap from host byte order to network byte order.
+
+

RESULT

+
       Value of val converted to network byte order.
+
+

NOTES

+
       This macro is analogous to CL_NTOH16.
+
+

SEE ALSO

+
       Byte Swapping, CL_NTOH16, CL_HTON32, CL_HTON64,
+       cl_hton16, cl_hton32, cl_hton64, cl_ntoh
+
+
+
+ +

[Functions] +Component Library: Byte Swapping/cl_hton16

+ +

[top][index]

+

NAME

+
       cl_hton16
+
+

DESCRIPTION

+
       The cl_hton16 function converts a 16-bit value from host byte order to
+       network byte order.
+
+

SYNOPSIS

+
*       uint16_t
+*       cl_hton16(
+*               IN      const uint16_t  val );
+
+

PARAMETERS

+
       val
+               [in] Value to swap from host byte order to network byte order .
+
+

RETURN VALUE

+
       Value of val converted to network byte order.
+
+

NOTES

+
       This function is analogous to cl_ntoh16.
+
+

SEE ALSO

+
       Byte Swapping, cl_ntoh16, cl_hton32, cl_hton64, cl_ntoh
+
+
+
+ +

[Definitions] +Component Library: Byte Swapping/CL_HTON32

+ +

[top][index]

+

NAME

+
       CL_HTON32
+
+

DESCRIPTION

+
       The CL_HTON32 macro converts a 32-bit value from host byte order to
+       network byte order.  The CL_HTON32 macro will cause constant values to be
+       swapped by the pre-processor.  For variables, CL_HTON32 is less efficient
+       than the cl_hton32 function.
+
+

SYNOPSIS

+
*       CL_HTON32( val );
+
+

PARAMETERS

+
       val
+               [in] 32-bit value to swap from host byte order to network byte order.
+
+

RESULT

+
       Value of val converted to network byte order.
+
+

NOTES

+
       This macro is analogous to CL_NTOH32.
+
+

SEE ALSO

+
       Byte Swapping, CL_NTOH32, CL_HTON16, CL_HTON64,
+       cl_hton16, cl_hton32, cl_hton64, cl_ntoh
+
+
+
+ +

[Functions] +Component Library: Byte Swapping/cl_hton32

+ +

[top][index]

+

NAME

+
       cl_hton32
+
+

DESCRIPTION

+
       The cl_hton32 function converts a 32-bit value from host byte order to
+       network byte order.
+
+

SYNOPSIS

+
*       uint32_t
+*       cl_hton32(
+*               IN      const uint32_t  val );
+
+

PARAMETERS

+
       val
+               [in] Value to swap from host byte order to network byte order .
+
+

RETURN VALUE

+
       Value of val converted to network byte order.
+
+

NOTES

+
       This function is analogous to cl_ntoh32.
+
+

SEE ALSO

+
       Byte Swapping, cl_ntoh32, cl_hton16, cl_hton64, cl_ntoh
+
+
+
+ +

[Definitions] +Component Library: Byte Swapping/CL_HTON64

+ +

[top][index]

+

NAME

+
       CL_HTON64
+
+

DESCRIPTION

+
       The CL_HTON64 macro converts a 64-bit value from host byte order to
+       network byte order.  The CL_HTON64 macro will cause constant values to be
+       swapped by the pre-processor.  For variables, CL_HTON64 is less efficient
+       than the cl_hton64 function.
+
+

SYNOPSIS

+
*       CL_HTON64( val );
+
+

PARAMETERS

+
       val
+               [in] 64-bit value to swap from host byte order to network byte order.
+
+

RESULT

+
       Value of val converted to network byte order.
+
+

NOTES

+
       This macro is analogous to CL_NTOH64.
+
+

SEE ALSO

+
       Byte Swapping, CL_NTOH64, CL_HTON16, CL_HTON32,
+       cl_hton16, cl_hton32, cl_hton64, cl_ntoh
+
+
+
+ +

[Functions] +Component Library: Byte Swapping/cl_hton64

+ +

[top][index]

+

NAME

+
       cl_hton64
+
+

DESCRIPTION

+
       The cl_hton64 function converts a 64-bit value from host byte order to
+       network byte order.
+
+

SYNOPSIS

+
*       uint64_t
+*       cl_hton64(
+*               IN      const uint64_t  val );
+
+

PARAMETERS

+
       val
+               [in] Value to swap from host byte order to network byte order .
+
+

RETURN VALUE

+
       Value of val converted to network byte order.
+
+

NOTES

+
       This function is analogous to cl_ntoh64.
+
+

SEE ALSO

+
       Byte Swapping, cl_ntoh64, cl_hton16, cl_hton32, cl_ntoh
+
+
+
+ +

[Functions] +Component Library: Byte Swapping/cl_ntoh

+ +

[top][index]

+

NAME

+
       cl_ntoh
+
+

DESCRIPTION

+
       The cl_ntoh function converts a value from network byte order to
+       host byte order.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_ntoh(
+        OUT     char* const                     p_dest,
+        IN      const char* const       p_src,
+        IN      const uint8_t           size )
+{
+#if CPU_LE
+        uint8_t i;
+        char    temp;
+
+        if( p_src == p_dest )
+        {
+                /* Swap in place if source and destination are the same. */
+                for( i = 0; i < size / 2; i++ )
+                {
+                        temp = p_dest[i];
+                        p_dest[i] = p_src[size - 1 - i];
+                        p_dest[size - 1 - i] = temp;
+                }
+        }
+        else
+        {
+                for( i = 0; i < size; i++ )
+                        p_dest[i] = p_src[size - 1 - i];
+        }
+#else
+        /*
+         * If the source and destination are not the same, copy the source to
+         * the destination.
+         */
+        if( p_src != p_dest )
+                cl_memcpy( p_dest, p_src, size );
+#endif
+}
+
+

PARAMETERS

+
       p_dest
+               [in] Pointer to a byte array to contain the converted value of p_src.
+
+       p_src
+               [in] Pointer to a byte array to be converted from network byte
+               ordering.
+
+       size
+               [in] Number of bytes to swap.p_dest
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_ntoh can perform in place swapping if both p_src and p_dest point to
+       the same buffer.
+
+

SEE ALSO

+
       Byte Swapping, cl_ntoh16, cl_ntoh32, cl_ntoh64
+
+
+
+ +

[Definitions] +Component Library: Byte Swapping/CL_NTOH16

+ +

[top][index]

+

NAME

+
       CL_NTOH16
+
+

DESCRIPTION

+
       The CL_NTOH16 macro converts a 16-bit value from network byte order to
+       host byte order.  The CL_NTOH16 macro will cause constant values to be
+       swapped by the pre-processor.  For variables, CL_NTOH16 is less efficient
+       than the cl_ntoh16 function.
+
+

SYNOPSIS

+
*       CL_NTOH16( val );
+
+

PARAMETERS

+
       val
+               [in] 16-bit value to swap from network byte order to host byte order.
+
+

RESULT

+
       Value of val converted to host byte order.
+
+

NOTES

+
       This macro is analogous to CL_HTON16.
+
+

SEE ALSO

+
       Byte Swapping, CL_HTON16, CL_NTOH32, CL_NTOH64,
+       cl_ntoh16, cl_ntoh32, cl_ntoh64, cl_ntoh
+
+
+
+ +

[Functions] +Component Library: Byte Swapping/cl_ntoh16

+ +

[top][index]

+

NAME

+
       cl_ntoh16
+
+

DESCRIPTION

+
       The cl_ntoh16 function converts a 16-bit value from network byte order to
+       host byte order.
+
+

SYNOPSIS

+
*       uint16_t
+*       cl_ntoh16(
+*               IN      const uint16_t  val );
+
+

PARAMETERS

+
       val
+               [in] Value to swap from network byte order to host byte order.
+
+

RETURN VALUE

+
       Value of val converted to host byte order.
+
+

NOTES

+
       This function is analogous to cl_hton16.
+
+

SEE ALSO

+
       Byte Swapping, cl_hton16, cl_ntoh32, cl_ntoh64, cl_ntoh
+
+
+
+ +

[Functions] +Component Library: Byte Swapping/cl_ntoh32

+ +

[top][index]

+

NAME

+
       cl_ntoh32
+
+

DESCRIPTION

+
       The cl_ntoh32 function converts a 32-bit value from network byte order to
+       host byte order.
+
+

SYNOPSIS

+
*       uint32_t
+*       cl_ntoh32(
+*               IN      const uint32_t  val );
+
+

PARAMETERS

+
       val
+               [in] Value to swap from network byte order to host byte order.
+
+

RETURN VALUE

+
       Value of val converted in host byte order.
+
+

NOTES

+
       This function is analogous to cl_hton32.
+
+

SEE ALSO

+
       Byte Swapping, cl_hton32, cl_ntoh16, cl_ntoh64, cl_ntoh
+
+
+
+ +

[Definitions] +Component Library: Byte Swapping/CL_NTOH32

+ +

[top][index]

+

NAME

+
       CL_NTOH32
+
+

DESCRIPTION

+
       The CL_NTOH32 macro converts a 32-bit value from network byte order to
+       host byte order.  The CL_NTOH32 macro will cause constant values to be
+       swapped by the pre-processor.  For variables, CL_NTOH32 is less efficient
+       than the cl_ntoh32 function.
+
+

SYNOPSIS

+
*       CL_NTOH32( val );
+
+

PARAMETERS

+
       val
+               [in] 32-bit value to swap from network byte order to host byte order.
+
+

RESULT

+
       Value of val converted to host byte order.
+
+

NOTES

+
       This macro is analogous to CL_HTON32.
+
+

SEE ALSO

+
       Byte Swapping, CL_HTON32, CL_NTOH16, CL_NTOH64,
+       cl_ntoh16, cl_ntoh32, cl_ntoh64, cl_ntoh
+
+
+
+ +

[Definitions] +Component Library: Byte Swapping/CL_NTOH64

+ +

[top][index]

+

NAME

+
       CL_NTOH64
+
+

DESCRIPTION

+
       The CL_NTOH64 macro converts a 64-bit value from network byte order to
+       host byte order.  The CL_NTOH64 macro will cause constant values to be
+       swapped by the pre-processor.  For variables, CL_NTOH64 is less efficient
+       than the cl_ntoh64 function.
+
+

SYNOPSIS

+
*       CL_NTOH64( val );
+
+

PARAMETERS

+
       val
+               [in] 64-bit value to swap from network byte order to host byte order.
+
+

RESULT

+
       Value of val converted to host byte order.
+
+

NOTES

+
       This macro is analogous to CL_HTON64.
+
+

SEE ALSO

+
       Byte Swapping, CL_HTON64, CL_NTOH16, CL_NTOH32,
+       cl_ntoh16, cl_ntoh32, cl_ntoh64, cl_ntoh
+
+
+
+ +

[Functions] +Component Library: Byte Swapping/cl_ntoh64

+ +

[top][index]

+

NAME

+
       cl_ntoh64
+
+

DESCRIPTION

+
       The cl_ntoh64 function converts a 64-bit value from network byte order to
+       host byte order.
+
+

SYNOPSIS

+
*       uint64_t
+*       cl_ntoh64(
+*               IN      const uint64_t  val );
+
+

PARAMETERS

+
       val
+               [in] Value to swap from network byte order to host byte order.
+
+

RETURN VALUE

+
       Value of val converted in host byte order.
+
+

NOTES

+
       This function is analogous to cl_hton64.
+
+

SEE ALSO

+
       Byte Swapping, cl_hton64, cl_ntoh16, cl_ntoh32, cl_ntoh
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_comppool_h.html b/branches/Ndi/docs/complib/cl_comppool_h.html new file mode 100644 index 00000000..fbecd283 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_comppool_h.html @@ -0,0 +1,604 @@ + + + + +./inc_docs/complib/cl_comppool_h.html + + + + +Generated from ./inc/complib/cl_comppool.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/Composite Pool

+ +

[top][parent][index]

+

NAME

+
       Composite Pool
+
+

DESCRIPTION

+
       The Composite Pool provides a self-contained and self-sustaining pool of
+       user defined composite objects.
+
+       A composite object is an object that is composed of one or more
+       sub-objects, each of which needs to be treated separately for
+       initialization. Objects can be retrieved from the pool as long as there
+       is memory in the system.
+
+       To aid in object oriented design, the composite pool provides the user
+       the ability to specify callbacks that are invoked for each object for
+       construction, initialization, and destruction. Constructor and destructor
+       callback functions may not fail.
+
+       A composite pool does not return memory to the system as the user returns
+       objects to the pool. The only method of returning memory to the system is
+       to destroy the pool.
+
+       The composite pool functions operates on a cl_cpool_t structure which
+       should be treated as opaque and should be manipulated only through the
+       provided functions.
+
+

SEE ALSO

+
       Structures:
+               cl_cpool_t
+
+       Callbacks:
+               cl_pfn_cpool_init_t, cl_pfn_cpool_dtor_t
+
+       Initialization/Destruction:
+               cl_cpool_construct, cl_cpool_init, cl_cpool_destroy
+
+       Manipulation:
+               cl_cpool_get, cl_cpool_put, cl_cpool_grow
+
+       Attributes:
+               cl_is_cpool_inited, cl_cpool_count
+
+
+
+ +

[Functions] +Component Library: Composite Pool/cl_cpool_construct

+ +

[top][index]

+

NAME

+
       cl_cpool_construct
+
+

DESCRIPTION

+
       The cl_cpool_construct function constructs a composite pool.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_cpool_construct(
+        IN      cl_cpool_t* const       p_pool );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_cpool_t structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_pool_init, cl_cpool_destroy, cl_is_cpool_inited.
+
+       Calling cl_cpool_construct is a prerequisite to calling any other
+       composite pool function except cl_cpool_init.
+
+

SEE ALSO

+
       Composite Pool, cl_cpool_init, cl_cpool_destroy, cl_is_cpool_inited
+
+
+
+ +

[Functions] +Component Library: Composite Pool/cl_cpool_count

+ +

[top][index]

+

NAME

+
       cl_cpool_count
+
+

DESCRIPTION

+
       The cl_cpool_count function returns the number of available objects
+       in a composite pool.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_cpool_count(
+        IN      cl_cpool_t* const       p_pool )
+{
+        CL_ASSERT( p_pool );
+        return( cl_qcpool_count( &p_pool->qcpool ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_cpool_t structure for which the number of
+               available objects is requested.
+
+

RETURN VALUE

+
       Returns the number of objects available in the specified
+       composite pool.
+
+

SEE ALSO

+
       Composite Pool
+
+
+
+ +

[Functions] +Component Library: Composite Pool/cl_cpool_destroy

+ +

[top][index]

+

NAME

+
       cl_cpool_destroy
+
+

DESCRIPTION

+
       The cl_cpool_destroy function destroys a composite pool.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_cpool_destroy(
+        IN      cl_cpool_t* const       p_pool )
+{
+        CL_ASSERT( p_pool );
+
+        cl_qcpool_destroy( &p_pool->qcpool );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_cpool_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       All memory allocated for composite objects is freed. The destructor
+       callback, if any, will be invoked for every allocated object. Further
+       operations on the composite pool should not be attempted after
+       cl_cpool_destroy is invoked.
+
+       This function should only be called after a call to cl_cpool_construct.
+
+       In a debug build, cl_cpool_destroy asserts that all objects are in
+       the pool.
+
+

SEE ALSO

+
       Composite Pool, cl_cpool_construct, cl_cpool_init
+
+
+
+ +

[Functions] +Component Library: Composite Pool/cl_cpool_get

+ +

[top][index]

+

NAME

+
       cl_cpool_get
+
+

DESCRIPTION

+
       The cl_cpool_get function retrieves an object from a
+       composite pool.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_cpool_get(
+        IN      cl_cpool_t* const       p_pool )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_pool );
+
+        p_pool_obj = (cl_pool_obj_t*)cl_qcpool_get( &p_pool->qcpool );
+        if( !p_pool_obj )
+                return( NULL );
+
+        CL_ASSERT( p_pool_obj->list_obj.p_object );
+        return( (void*)p_pool_obj->list_obj.p_object );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_cpool_t structure from which to retrieve
+               an object.
+
+ RETURN VALUES
+       Returns a pointer to the first component of a composite object.
+
+       Returns NULL if the pool is empty and can not be grown automatically.
+
+

NOTES

+
       cl_cpool_get returns the object at the head of the pool. If the pool is
+       empty, it is automatically grown to accommodate this request unless the
+       grow_size parameter passed to the cl_cpool_init function was zero.
+
+

SEE ALSO

+
       Composite Pool, cl_cpool_get_tail, cl_cpool_put, cl_cpool_grow,
+       cl_cpool_count
+
+
+
+ +

[Functions] +Component Library: Composite Pool/cl_cpool_grow

+ +

[top][index]

+

NAME

+
       cl_cpool_grow
+
+

DESCRIPTION

+
       The cl_cpool_grow function grows a composite pool by
+       the specified number of objects.
+
+

SYNOPSIS

+
CL_INLINE cl_status_t CL_API
+cl_cpool_grow(
+        IN      cl_cpool_t* const       p_pool,
+        IN      const size_t            obj_count )
+{
+        CL_ASSERT( p_pool );
+        return( cl_qcpool_grow( &p_pool->qcpool, obj_count ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_cpool_t structure whose capacity to grow.
+
+       obj_count
+               [in] Number of objects by which to grow the pool.
+
+ RETURN VALUES
+       CL_SUCCESS if the composite pool grew successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to grow the
+       composite pool.
+
+       cl_status_t value returned by optional initialization callback function
+       specified by the pfn_initializer parameter passed to the
+       cl_cpool_init function.
+
+

NOTES

+
       It is not necessary to call cl_cpool_grow if the pool is
+       configured to grow automatically.
+
+

SEE ALSO

+
       Composite Pool
+
+
+
+ +

[Functions] +Component Library: Composite Pool/cl_cpool_init

+ +

[top][index]

+

NAME

+
       cl_cpool_init
+
+

DESCRIPTION

+
       The cl_cpool_init function initializes a composite pool for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_cpool_init(
+        IN      cl_cpool_t* const               p_pool,
+        IN      const size_t                    min_size,
+        IN      const size_t                    max_size,
+        IN      const size_t                    grow_size,
+        IN      size_t* const                   component_sizes,
+        IN      const uint32_t                  num_components,
+        IN      cl_pfn_cpool_init_t             pfn_initializer OPTIONAL,
+        IN      cl_pfn_cpool_dtor_t             pfn_destructor OPTIONAL,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_cpool_t structure to initialize.
+
+       min_size
+               [in] Minimum number of objects that the pool should support. All
+               necessary allocations to allow storing the minimum number of items
+               are performed at initialization time, and all necessary callbacks
+               successfully invoked.
+
+       max_size
+               [in] Maximum number of objects to which the pool is allowed to grow.
+               A value of zero specifies no maximum.
+
+       grow_size
+               [in] Number of objects to allocate when incrementally growing the pool.
+               A value of zero disables automatic growth.
+
+       component_sizes
+               [in] Pointer to the first entry in an array of sizes describing,
+               in order, the sizes of the components that make up a composite object.
+
+       num_components
+               [in] Number of components that make up a composite object.
+
+       pfn_initializer
+               [in] Initialization callback to invoke for every new object when
+               growing the pool. This parameter may be NULL only if the objects
+               stored in the composite pool consist of only one component.
+               See the cl_pfn_cpool_init function type declaration for details
+               about the callback function.
+
+       pfn_destructor
+               [in] Destructor callback to invoke for every object before memory for
+               that object is freed. This parameter is optional and may be NULL.
+               See the cl_pfn_cpool_dtor function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+ RETURN VALUES
+       CL_SUCCESS if the composite pool was initialized successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the
+       composite pool.
+
+       CL_INVALID_SETTING if a NULL constructor was provided for composite objects
+       consisting of more than one component.  Also returns CL_INVALID_SETTING if
+       the maximum size is non-zero and less than the minimum size.
+
+       Other cl_status_t value returned by optional initialization callback function
+       specified by the pfn_initializer parameter.
+
+

NOTES

+
       cl_cpool_init initializes, and if necessary, grows the pool to
+       the capacity desired.
+
+

SEE ALSO

+
       Composite Pool, cl_cpool_construct, cl_cpool_destroy,
+       cl_cpool_get, cl_cpool_put, cl_cpool_grow,
+       cl_cpool_count, cl_pfn_cpool_ctor_t, cl_pfn_cpool_init_t,
+       cl_pfn_cpool_dtor_t
+
+
+
+ +

[Functions] +Component Library: Composite Pool/cl_cpool_put

+ +

[top][index]

+

NAME

+
       cl_cpool_put
+
+

DESCRIPTION

+
       The cl_cpool_put function returns an object to a composite pool.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_cpool_put(
+        IN      cl_cpool_t* const       p_pool,
+        IN      void* const                     p_object )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_pool );
+        CL_ASSERT( p_object );
+
+        /* Calculate the offset to the list object representing this object. */
+        p_pool_obj = (cl_pool_obj_t*)
+                (((uint8_t*)p_object) - sizeof(cl_pool_obj_t));
+
+        /* good sanity check */
+        CL_ASSERT( p_pool_obj->list_obj.p_object == p_object );
+
+        cl_qcpool_put( &p_pool->qcpool, (cl_pool_item_t*)p_pool_obj );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_cpool_t structure to which to return
+               an object.
+
+       p_object
+               [in] Pointer to the first component of an object to return to the pool.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_cpool_put places the returned object at the head of the pool.
+
+       The object specified by the p_object parameter must have been
+       retrieved from the pool by a previous call to cl_cpool_get.
+
+

SEE ALSO

+
       Composite Pool, cl_cpool_put_tail, cl_cpool_get
+
+
+
+ +

[Structures] +Component Library: Composite Pool/cl_cpool_t

+ +

[top][index]

+

NAME

+
       cl_cpool_t
+
+

DESCRIPTION

+
       Composite pool structure.
+
+       The cl_cpool_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_cpool
+{
+        cl_qcpool_t                             qcpool;
+        cl_pfn_cpool_init_t             pfn_init;
+        cl_pfn_cpool_dtor_t             pfn_dtor;
+        const void                              *context;
+
+} cl_cpool_t;
+
+

FIELDS

+
       qcpool
+               Quick composite pool that manages all objects.
+
+       pfn_init
+               Pointer to the user's initializer callback, used by the pool
+               to translate the quick composite pool's initializer callback to
+               a composite pool initializer callback.
+
+       pfn_dtor
+               Pointer to the user's destructor callback, used by the pool
+               to translate the quick composite pool's destructor callback to
+               a composite pool destructor callback.
+
+       context
+               User's provided context for callback functions, used by the pool
+               to when invoking callbacks.
+
+

SEE ALSO

+
       Composite Pool
+
+
+
+ +

[Functions] +Component Library: Composite Pool/cl_is_cpool_inited

+ +

[top][index]

+

NAME

+
       cl_is_cpool_inited
+
+

DESCRIPTION

+
       The cl_is_cpool_inited function returns whether a composite pool was
+       successfully initialized.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_cpool_inited(
+        IN      const cl_cpool_t* const p_pool )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_pool );
+        return( cl_is_qcpool_inited( &p_pool->qcpool ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_cpool_t structure whose initialization state
+               to check.
+
+ RETURN VALUES
+       TRUE if the composite pool was initialized successfully.
+
+       FALSE otherwise.
+
+

NOTES

+
       Allows checking the state of a composite pool to determine if invoking
+       member functions is appropriate.
+
+

SEE ALSO

+
       Composite Pool
+
+
+
+ +

[Definitions] +Component Library: Composite Pool/cl_pfn_cpool_dtor_t

+ +

[top][index]

+

NAME

+
       cl_pfn_cpool_dtor_t
+
+

DESCRIPTION

+
       The cl_pfn_cpool_dtor_t function type defines the prototype for
+       functions used as destructor for objects being deallocated by a
+       composite pool.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_cpool_dtor_t)(
+        IN      void* const                     p_object,
+        IN      void*                           context );
+
+

PARAMETERS

+
       p_object
+               [in] Pointer to an object to destruct.
+
+       context
+               [in] Context provided in the call to cl_cpool_init.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by the user as an optional parameter to the
+       cl_cpool_init function.
+
+       The destructor is invoked once per allocated object, allowing the user
+       to perform any necessary cleanup. Users should not attempt to deallocate
+       the memory for the composite object, as the composite pool manages
+       object allocation and deallocation.
+
+

SEE ALSO

+
       Composite Pool, cl_cpool_init
+
+
+
+ +

[Definitions] +Component Library: Composite Pool/cl_pfn_cpool_init_t

+ +

[top][index]

+

NAME

+
       cl_pfn_cpool_init_t
+
+

DESCRIPTION

+
       The cl_pfn_cpool_init_t function type defines the prototype for
+       functions used as initializers for objects being allocated by a
+       composite pool.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_cpool_init_t)(
+        IN      void** const            p_comp_array,
+        IN      const uint32_t          num_components,
+        IN      void*                           context );
+
+

PARAMETERS

+
       p_object
+               [in] Pointer to an object to initialize.
+
+       context
+               [in] Context provided in a call to cl_cpool_init.
+
+ RETURN VALUES
+       Return CL_SUCCESS to indicates that initialization of the object
+       was successful and that initialization of further objects may continue.
+
+       Other cl_status_t values will be returned by cl_cpool_init
+       and cl_cpool_grow.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by the user as an optional parameter to the
+       cl_cpool_init function.
+
+       The initializer is invoked once per allocated object, allowing the user
+       to chain components to form a composite object and perform any necessary
+       initialization.  Returning a status other than CL_SUCCESS aborts a grow
+       operation, initiated either through cl_cpool_init or cl_cpool_grow, and
+       causes the initiating function to fail.  Any non-CL_SUCCESS status will
+       be returned by the function that initiated the grow operation.
+
+       All memory for the requested number of components is pre-allocated.
+
+       When later performing a cl_cpool_get call, the return value is a pointer
+       to the first component.
+
+

SEE ALSO

+
       Composite Pool, cl_cpool_init, cl_cpool_grow
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_debug_h.html b/branches/Ndi/docs/complib/cl_debug_h.html new file mode 100644 index 00000000..6b946692 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_debug_h.html @@ -0,0 +1,534 @@ + + + + +./inc_docs/complib/cl_debug_h.html + + + + +Generated from ./inc/complib/cl_debug.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/Debug Output

+ +

[top][parent][index]

+

NAME

+
       Debug Output
+
+

DESCRIPTION

+
       The debug output functions and macros send debug messages to the current
+       debug target.
+
+
+
+ +

[Definitions] +Component Library: Debug Output/64-bit Print Format

+ +

[top][index]

+

NAME

+
       64-bit Print Format
+
+

DESCRIPTION

+
       The 64-bit print keywords allow users to use 64-bit values in debug or
+       console output.
+
+       Different platforms define 64-bit print formats differently. The 64-bit
+       print formats exposed by the component library are supported in all
+       platforms.
+
+

VALUES

+
       PRId64
+               Print a 64-bit integer in signed decimal format.
+       PRIx64
+               Print a 64-bit integer in hexadecimal format.
+       PRIo64
+               Print a 64-bit integer in octal format.
+       PRIu64
+               Print a 64-bit integer in unsigned decimal format.
+
+

EXAMPLE

+
       uint64 MyVal = 2;
+       // Print a 64-bit integer in hexadecimal format.
+       cl_dbg_out( "MyVal: 0x%" PRIx64 "\n", MyVal );
+
+

NOTES

+
       Standard print flags to specify padding and precision can still be used
+       following the '%' sign in the string preceding the 64-bit print keyword.
+
+       The above keywords are strings and make use of compilers' string
+       concatenation ability.
+
+
+
+ +

[Functions] +Component Library: Debug Output/cl_break

+ +

[top][index]

+

NAME

+
       cl_break
+
+

DESCRIPTION

+
       The cl_break function halts execution.
+
+

SYNOPSIS

+
*       void
+*       cl_break();
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       In a release build, cl_break has no effect.
+
+
+
+ +

[Functions] +Component Library: Debug Output/cl_dbg_out

+ +

[top][index]

+

NAME

+
       cl_dbg_out
+
+

DESCRIPTION

+
       The cl_dbg_out function sends a debug message to the debug target in
+       debug builds only.
+
+

SYNOPSIS

+
CL_EXPORT void
+cl_dbg_out(
+        IN      const char* const       debug_message,
+        IN      ... );
+
+

PARAMETERS

+
       debug_message
+               [in] ANSI string formatted identically as for a call to the standard C
+               function printf.
+
+       ...
+               [in] Extra parameters for string formatting, as defined for the
+               standard C function printf.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       In a release build, cl_dbg_out has no effect.
+
+       The formatting of the debug_message string is the same as for printf
+
+       cl_dbg_out sends the debug message to the current debug target.
+
+

SEE ALSO

+
       Debug Output, cl_msg_out
+
+
+
+ +

[Definitions] +Component Library: Debug Output/CL_ENTER

+ +

[top][index]

+

NAME

+
       CL_ENTER
+
+

DESCRIPTION

+
       The CL_ENTER macro marks the entrance into a function by sending a
+       string to the current debug target if the requested debug level matches
+       the current debug level.
+
+

SYNOPSIS

+
*       CL_ENTER( DBG_LVL, CHK_LVL );
+
+

PARAMETERS

+
       DBG_LVL
+               [in] Debug level for the string to output
+
+       CHK_LVL
+               [in] Current debug level against which to check DBG_LVL
+
+

RETURN VALUE

+
       This macro does not return a value.
+
+

EXAMPLE

+
       #define __MODULE__      "my_module"
+       #define MY_FUNC_DBG_LVL 1
+
+       uint32_t        my_dbg_lvl = CL_DBG_ALL;
+
+       void
+       my_func()
+       {
+               CL_ENTER( MY_FUNC_DBG_LVL, my_dbg_lvl );
+               CL_EXIT( MY_FUNC_DBG_LVL, my_dbg_lvl );
+       }
+
+

RESULT

+
       my_module:my_func() [
+       my_module:my_func() ]
+
+

NOTES

+
       The function entrance notification is printed only if all bits set
+       in DBG_LVL are also set in CHK_LVL.  CHK_LVL may have additional bits set.
+
+       If the __MODULE__ preprocessor keyword is defined, that keyword will be
+       prepended to the function name, separated with a colon.
+
+       In multi-processor environments where the current processor can be
+       determined, the zero-based number of the processor on which the output
+       is generated is prepended to the output.
+
+

SEE ALSO

+
       Debug Output, Debug Levels, CL_PRINT, CL_EXIT, CL_TRACE, CL_TRACE_EXIT
+
+
+
+ +

[Definitions] +Component Library: Debug Output/CL_EXIT

+ +

[top][index]

+

NAME

+
       CL_EXIT
+
+

DESCRIPTION

+
       The CL_EXIT macro marks the exit from a function by sending a string
+       to the current debug target if the requested debug level matches the
+       current debug level.
+
+

SYNOPSIS

+
*       CL_EXIT( DBG_LVL, CHK_LVL );
+
+

PARAMETERS

+
       DBG_LVL
+               [in] Debug level for the string to output
+
+       CHK_LVL
+               [in] Current debug level against which to check DBG_LVL
+
+

RETURN VALUE

+
       This macro does not return a value.
+
+

EXAMPLE

+
       #define __MODULE__      "my_module"
+       #define MY_FUNC_DBG_LVL 1
+
+       uint32_t        my_dbg_lvl = CL_DBG_ALL;
+
+       void
+       my_func()
+       {
+               CL_ENTER( MY_FUNC_DBG_LVL, my_dbg_lvl );
+               CL_EXIT( MY_FUNC_DBG_LVL, my_dbg_lvl );
+       }
+
+

RESULT

+
       my_module:my_func() [
+       my_module:my_func() ]
+
+

NOTES

+
       The exit notification is printed only if all bits set in DBG_LVL are also
+       set in CHK_LVL.  CHK_LVL may have additional bits set.
+
+       The CL_EXIT macro must only be used after the CL_ENTRY macro as it
+       depends on that macro's implementation.
+
+       If the __MODULE__ preprocessor keyword is defined, that keyword will be
+       prepended to the function name, separated with a colon.
+
+       In multi-processor environments where the current processor can be
+       determined, the zero-based number of the processor on which the output
+       is generated is prepended to the output.
+
+

SEE ALSO

+
       Debug Output, Debug Levels, CL_PRINT, CL_ENTER, CL_TRACE, CL_TRACE_EXIT
+
+
+
+ +

[Functions] +Component Library: Debug Output/cl_msg_out

+ +

[top][index]

+

NAME

+
       cl_msg_out
+
+

DESCRIPTION

+
       The cl_msg_out function sends a debug message to the message log target.
+
+

SYNOPSIS

+
CL_EXPORT void
+cl_msg_out(
+        IN      const char* const       message,
+        IN      ... );
+
+

PARAMETERS

+
       message
+               [in] ANSI string formatted identically as for a call to the standard C
+               function printf.
+
+       ...
+               [in] Extra parameters for string formatting, as defined for the
+               standard C function printf.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_msg_out is available in both debug and release builds.
+
+       The formatting of the message string is the same as for printf
+
+       cl_msg_out sends the message to the current message logging target.
+
+

SEE ALSO

+
       Debug Output, cl_dbg_out
+
+
+
+ +

[Definitions] +Component Library: Debug Output/CL_PRINT

+ +

[top][index]

+

NAME

+
       CL_PRINT
+
+

DESCRIPTION

+
       The CL_PRINT macro sends a string to the current debug target if
+       the requested debug level matches the current debug level.
+
+

SYNOPSIS

+
*       CL_PRINT( DBG_LVL, CHK_LVL, STRING );
+
+

PARAMETERS

+
       DBG_LVL
+               [in] Debug level for the string to output
+
+       CHK_LVL
+               [in] Current debug level against which to check DBG_LVL
+
+       STRING
+               [in] String to send to the current debug target.  The string includes
+               parentheses in order to allow additional parameters.
+
+

RETURN VALUE

+
       This macro does not return a value.
+
+

EXAMPLE

+
       #define MY_FUNC_DBG_LVL 1
+
+       uint32_t        my_dbg_lvl = CL_DBG_ALL;
+
+       void
+       my_func()
+       {
+               CL_PRINT( MY_FUNC_DBG_LVL, my_dbg_lvl, ("Hello %s!\n", "world") );
+       }
+
+

RESULT

+
       Hello world!
+
+

NOTES

+
       The requested string is printed only if all bits set in DBG_LVL are also
+       set in CHK_LVL unless the most significant bit is set (indicating an
+       error), in which case the lower bits are ignored.  CHK_LVL may have
+       additional bits set.
+
+       In multi-processor environments where the current processor can be
+       determined, the zero-based number of the processor on which the output
+       is generated is prepended to the output.
+
+

SEE ALSO

+
       Debug Output, Debug Levels, CL_ENTER, CL_EXIT, CL_TRACE, CL_TRACE_EXIT
+
+
+
+ +

[Definitions] +Component Library: Debug Output/CL_TRACE

+ +

[top][index]

+

NAME

+
       CL_TRACE
+
+

DESCRIPTION

+
       The CL_TRACE macro sends a string to the current debug target if
+       the requested debug level matches the current debug level.  The
+       output is prepended with the function name and, depending on the
+       debug level requested, an indication of the severity of the message.
+
+

SYNOPSIS

+
*       CL_TRACE( DBG_LVL, CHK_LVL, STRING );
+
+

PARAMETERS

+
       DBG_LVL
+               [in] Debug level for the string to output
+
+       CHK_LVL
+               [in] Current debug level against which to check DBG_LVL
+
+       STRING
+               [in] String to send to the current debug target.  The string includes
+               parentheses in order to allow additional parameters.
+
+

RETURN VALUE

+
       This macro does not return a value.
+
+

EXAMPLE

+
       #define __MODULE__      "my_module"
+       #define MY_FUNC_DBG_LVL 1
+
+       uint32_t        my_dbg_lvl = CL_DBG_ALL;
+
+       void
+       my_func()
+       {
+               CL_ENTER( MY_FUNC_DBG_LVL, my_dbg_lvl );
+               CL_TRACE( MY_FUNC_DBG_LVL, my_dbg_lvl, ("Hello %s!\n", "world") );
+               CL_EXIT( MY_FUNC_DBG_LVL, my_dbg_lvl );
+       }
+
+

RESULT

+
       my_module:my_func() [
+       my_module:my_func(): Hello world!
+       my_module:my_func() ]
+
+

NOTES

+
       The requested string is printed only if all bits set in DBG_LVL are also
+       set in CHK_LVL.  CHK_LVL may have additional bits set.
+
+       The CL_TRACE macro must only be used after the CL_ENTRY macro as it
+       depends on that macro's implementation.
+
+       If the DBG_LVL has the upper bit set, the output will contain
+       an "!ERROR!" statement between the function name and STRING.
+
+       If the __MODULE__ preprocessor keyword is defined, that keyword will be
+       prepended to the function name, separated with a colon.
+
+       In multi-processor environments where the current processor can be
+       determined, the zero-based number of the processor on which the output
+       is generated is prepended to the output.
+
+

SEE ALSO

+
       Debug Output, Debug Levels, CL_PRINT, CL_ENTER, CL_EXIT, CL_TRACE_EXIT
+
+
+
+ +

[Definitions] +Component Library: Debug Output/CL_TRACE_EXIT

+ +

[top][index]

+

NAME

+
       CL_TRACE_EXIT
+
+

DESCRIPTION

+
       The CL_TRACE_EXIT macro combines the functionality of the CL_TRACE and
+       CL_EXIT macros, in that order.
+
+

SYNOPSIS

+
*       CL_TRACE_EXIT(  DBG_LVL, CHK_LVL, STRING );
+
+

PARAMETERS

+
       DBG_LVL
+               [in] Debug level for the string to output
+
+       CHK_LVL
+               [in] Current debug level against which to check DBG_LVL
+
+       STRING
+               [in] String to send to the current debug target.  The string includes
+               parentheses in order to allow additional parameters.
+
+

RETURN VALUE

+
       This macro does not return a value.
+
+

EXAMPLE

+
       #define __MODULE__      "my_module"
+       #define MY_FUNC_DBG_LVL 1
+
+       uint32_t        my_dbg_lvl = CL_DBG_ALL;
+
+       void
+       my_func()
+       {
+               CL_ENTER( MY_FUNC_DBG_LVL, my_dbg_lvl );
+               CL_TRACE_EXIT( MY_FUNC_DBG_LVL, my_dbg_lvl, ("Hello %s!\n", "world") );
+       }
+
+

RESULT

+
       my_module:my_func() [
+       my_module:my_func(): Hello world!
+       my_module:my_func() ]
+
+

NOTES

+
       The requested string is printed only if all bits set in DBG_LVL are also
+       set in CHK_LVL.  CHK_LVL may have additional bits set.
+
+       The CL_TRACE_EXIT macro must only be used after the CL_ENTRY macro as it
+       depends on that macro's implementation.
+
+       If the DBG_LVL has the upper bit set, the output will contain
+       an "!ERROR!" statement between the function name and STRING.
+
+       If the __MODULE__ preprocessor keyword is defined, that keyword will be
+       prepended to the function name, separated with a colon.
+
+       In multi-processor environments where the current processor can be
+       determined, the zero-based number of the processor on which the output
+       is generated is prepended to the output.
+
+

SEE ALSO

+
       Debug Output, Debug Levels, CL_PRINT, CL_ENTER, CL_EXIT, CL_TRACE
+
+
+
+ +

[Definitions] +Component Library: Debug Output/Debug Levels

+ +

[top][index]

+

NAME

+
       Debug Levels
+
+

DESCRIPTION

+
       The debug output macros reserve the upper bit of the debug level to
+       convey an error.
+
+

SYNOPSIS

+
#define CL_DBG_DISABLE          0
+#define CL_DBG_ERROR            0x80000000
+#define CL_DBG_ALL                      0xFFFFFFFF
+
+

VALUES

+
       CL_DBG_DISABLE
+               Disable all debug output, including errors.
+
+       CL_DBG_ERROR
+               Enable error debug output.
+
+       CL_DBG_ALL
+               Enbale all debug output.
+
+

NOTES

+
       Users can define custom debug levels using the lower 31 bits of their
+       debug level to control non-error debug output.  Error messages are
+       always displayed, regardless of the lower bit definition.
+
+       When specifying the debug output desired for non-error messages
+       (the CHK_LVL parameter in the debug output macros), users must define
+       all bits whose output they are interested in.
+
+

SEE ALSO

+
       Debug Output, CL_PRINT, CL_ENTER, CL_EXIT, CL_TRACE, CL_TRACE_EXIT
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_event_h.html b/branches/Ndi/docs/complib/cl_event_h.html new file mode 100644 index 00000000..001da4f3 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_event_h.html @@ -0,0 +1,274 @@ + + + + +./inc_docs/complib/cl_event_h.html + + + + +Generated from ./inc/complib/cl_event.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/Event

+ +

[top][parent][index]

+

NAME

+
       Event
+
+

DESCRIPTION

+
       The Event provides the ability to suspend and wakeup a thread.
+
+       The event functions operates on a cl_event_t structure which should be
+       treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_event_t
+
+       Initialization/Destruction:
+               cl_event_construct, cl_event_init, cl_event_destroy
+
+       Manipulation:
+               cl_event_signal, cl_event_reset, cl_event_wait_on
+
+
+
+ +

[Functions] +Component Library: Event/cl_event_construct

+ +

[top][index]

+

NAME

+
       cl_event_construct
+
+

DESCRIPTION

+
       The cl_event_construct function constructs an event.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_event_construct(
+        IN      cl_event_t* const       p_event );
+
+

PARAMETERS

+
       p_event
+               [in] Pointer to an cl_event_t structure to construct.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_event_destroy without first calling cl_event_init.
+
+       Calling cl_event_construct is a prerequisite to calling any other event
+       function except cl_event_init.
+
+

SEE ALSO

+
       Event, cl_event_init, cl_event_destroy
+
+
+
+ +

[Functions] +Component Library: Event/cl_event_destroy

+ +

[top][index]

+

NAME

+
       cl_event_destroy
+
+

DESCRIPTION

+
       The cl_event_destroy function performs any necessary cleanup of an event.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_event_destroy(
+        IN      cl_event_t* const       p_event );
+
+

PARAMETERS

+
       p_event
+               [in] Pointer to an cl_event_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function should only be called after a call to cl_event_construct
+       or cl_event_init.
+
+

SEE ALSO

+
       Event, cl_event_construct, cl_event_init
+
+
+
+ +

[Functions] +Component Library: Event/cl_event_init

+ +

[top][index]

+

NAME

+
       cl_event_init
+
+

DESCRIPTION

+
       The cl_event_init function initializes an event for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_event_init(
+        IN      cl_event_t* const       p_event,
+        IN      const boolean_t         manual_reset );
+
+

PARAMETERS

+
       p_event
+               [in] Pointer to an cl_event_t structure to initialize.
+
+       manual_reset
+               [in] If FALSE, indicates that the event resets itself after releasing
+               a single waiter.  If TRUE, the event remains in the signalled state
+               until explicitly reset by a call to cl_event_reset.
+
+ RETURN VALUES
+       CL_SUCCESS if event initialization succeeded.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       Allows calling event manipulation functions, such as cl_event_signal,
+       cl_event_reset, and cl_event_wait_on.
+
+       The event is initially in a reset state.
+
+

SEE ALSO

+
       Event, cl_event_construct, cl_event_destroy, cl_event_signal,
+       cl_event_reset, cl_event_wait_on
+
+
+
+ +

[Functions] +Component Library: Event/cl_event_reset

+ +

[top][index]

+

NAME

+
       cl_event_reset
+
+

DESCRIPTION

+
       The cl_event_reset function sets an event to the non-signalled state.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_event_reset(
+        IN      cl_event_t* const       p_event );
+
+

PARAMETERS

+
       p_event
+               [in] Pointer to an cl_event_t structure to reset.
+
+ RETURN VALUES
+       CL_SUCCESS if the event was successfully reset.
+
+       CL_ERROR otherwise.
+
+

SEE ALSO

+
       Event, cl_event_signal, cl_event_wait_on
+
+
+
+ +

[Functions] +Component Library: Event/cl_event_signal

+ +

[top][index]

+

NAME

+
       cl_event_signal
+
+

DESCRIPTION

+
       The cl_event_signal function sets an event to the signalled state and
+       releases one or more waiting threads.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_event_signal(
+        IN      cl_event_t* const       p_event );
+
+

PARAMETERS

+
       p_event
+               [in] Pointer to an cl_event_t structure to set.
+
+ RETURN VALUES
+       CL_SUCCESS if the event was successfully signalled.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       For auto-reset events, the event is reset automatically once a wait
+       operation is satisfied.
+
+       Triggering the event multiple times does not guarantee that the same
+       number of wait operations are satisfied. This is because events are
+       either in a signalled on non-signalled state, and triggering an event
+       that is already in the signalled state has no effect.
+
+

SEE ALSO

+
       Event, cl_event_reset, cl_event_wait_on
+
+
+
+ +

[Functions] +Component Library: Event/cl_event_wait_on

+ +

[top][index]

+

NAME

+
       cl_event_wait_on
+
+

DESCRIPTION

+
       The cl_event_wait_on function waits for the specified event to be
+       triggered for a minimum amount of time.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_event_wait_on(
+        IN      cl_event_t* const       p_event,
+        IN      const uint32_t          wait_us,
+        IN      const boolean_t         interruptible );
+
+

PARAMETERS

+
       p_event
+               [in] Pointer to an cl_event_t structure on which to wait.
+
+       wait_us
+               [in] Number of microseconds to wait.
+
+       interruptible
+               [in] Indicates whether the wait operation can be interrupted
+               by external signals.
+
+ RETURN VALUES
+       CL_SUCCESS if the wait operation succeeded in response to the event
+       being set.
+
+       CL_TIMEOUT if the specified time period elapses.
+
+       CL_NOT_DONE if the wait was interrupted by an external signal.
+
+       CL_ERROR if the wait operation failed.
+
+

NOTES

+
       If wait_us is set to EVENT_NO_TIMEOUT, the function will wait until the
+       event is triggered and never timeout.
+
+       If the timeout value is zero, this function simply tests the state of
+       the event.
+
+       If the event is already in the signalled state at the time of the call
+       to cl_event_wait_on, the call completes immediately with CL_SUCCESS.
+
+

SEE ALSO

+
       Event, cl_event_signal, cl_event_reset
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_fleximap_h.html b/branches/Ndi/docs/complib/cl_fleximap_h.html new file mode 100644 index 00000000..ec2bbb53 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_fleximap_h.html @@ -0,0 +1,948 @@ + + + + +./inc_docs/complib/cl_fleximap_h.html + + + + +Generated from ./inc/complib/cl_fleximap.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/Flexi Map

+ +

[top][parent][index]

+

NAME

+
       Flexi Map
+
+

DESCRIPTION

+
       Flexi map implements a binary tree that stores user provided cl_fmap_item_t
+       structures.  Each item stored in a flexi map has a unique user defined key
+       (duplicates are not allowed).  Flexi map provides the ability to
+       efficiently search for an item given a key.  Flexi map allows user defined
+       keys of any size.  Storage for keys and a comparisson function are provided
+       by users to allow flexi map to store items with arbitrary key values.
+
+       Flexi map does not allocate any memory, and can therefore not fail
+       any operations due to insufficient memory.  Flexi map can thus be useful
+       in minimizing the error paths in code.
+
+       Flexi map is not thread safe, and users must provide serialization when
+       adding and removing items from the map.
+
+       The flexi map functions operate on a cl_fmap_t structure which should
+       be treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_fmap_t, cl_fmap_item_t
+
+       Callbacks:
+               cl_pfn_fmap_apply_t
+
+       Item Manipulation:
+               cl_fmap_key
+
+       Initialization:
+               cl_fmap_init
+
+       Iteration:
+               cl_fmap_end, cl_fmap_head, cl_fmap_tail, cl_fmap_next, cl_fmap_prev
+
+       Manipulation:
+               cl_fmap_insert, cl_fmap_get, cl_fmap_remove_item, cl_fmap_remove,
+               cl_fmap_remove_all, cl_fmap_merge, cl_fmap_delta
+
+       Search:
+               cl_fmap_apply_func
+
+       Attributes:
+               cl_fmap_count, cl_is_fmap_empty,
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_apply_func

+ +

[top][index]

+

NAME

+
       cl_fmap_apply_func
+
+

DESCRIPTION

+
       The cl_fmap_apply_func function executes a specified function
+       for every item stored in a flexi map.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_fmap_apply_func(
+        IN      const cl_fmap_t* const  p_map,
+        IN      cl_pfn_fmap_apply_t             pfn_func,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure.
+
+       pfn_func
+               [in] Function invoked for every item in the flexi map.
+               See the cl_pfn_fmap_apply_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       The function provided must not perform any map operations, as these
+       would corrupt the flexi map.
+
+

SEE ALSO

+
       Flexi Map, cl_pfn_fmap_apply_t
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_count

+ +

[top][index]

+

NAME

+
       cl_fmap_count
+
+

DESCRIPTION

+
       The cl_fmap_count function returns the number of items stored
+       in a flexi map.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_fmap_count(
+        IN      const cl_fmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        return( p_map->count );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure whose item count to return.
+
+

RETURN VALUE

+
       Returns the number of items stored in the map.
+
+

SEE ALSO

+
       Flexi Map, cl_is_fmap_empty
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_delta

+ +

[top][index]

+

NAME

+
       cl_fmap_delta
+
+

DESCRIPTION

+
       The cl_fmap_delta function computes the differences between two maps.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_fmap_delta(
+        IN OUT  cl_fmap_t* const        p_map1,
+        IN OUT  cl_fmap_t* const        p_map2,
+        OUT             cl_fmap_t* const        p_new,
+        OUT             cl_fmap_t* const        p_old );
+
+

PARAMETERS

+
       p_map1
+               [in/out] Pointer to the first of two cl_fmap_t structures whose
+               differences to compute.
+
+       p_map2
+               [in/out] Pointer to the second of two cl_fmap_t structures whose
+               differences to compute.
+
+       p_new
+               [out] Pointer to an empty cl_fmap_t structure that contains the items
+               unique to p_map2 upon return from the function.
+
+       p_old
+               [out] Pointer to an empty cl_fmap_t structure that contains the items
+               unique to p_map1 upon return from the function.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       Items are evaluated based on their keys.  Items that exist in both
+       p_map1 and p_map2 remain in their respective maps.  Items that
+       exist only p_map1 are moved to p_old.  Likewise, items that exist only
+       in p_map2 are moved to p_new.  This function can be usefull in evaluating
+       changes between two maps.
+
+       Both maps pointed to by p_new and p_old must be empty on input.  This
+       requirement removes the possibility of failures.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_merge
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_end

+ +

[top][index]

+

NAME

+
       cl_fmap_end
+
+

DESCRIPTION

+
       The cl_fmap_end function returns the end of a flexi map.
+
+

SYNOPSIS

+
CL_INLINE const cl_fmap_item_t* const CL_API
+cl_fmap_end(
+        IN      const cl_fmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        /* Nil is the end of the map. */
+        return( &p_map->nil );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure whose end to return.
+
+

RETURN VALUE

+
       Pointer to the end of the map.
+
+

NOTES

+
       cl_fmap_end is useful for determining the validity of map items returned
+       by cl_fmap_head, cl_fmap_tail, cl_fmap_next, or cl_fmap_prev.  If the map
+       item pointer returned by any of these functions compares to the end, the
+       end of the map was encoutered.
+       When using cl_fmap_head or cl_fmap_tail, this condition indicates that
+       the map is empty.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_head, cl_fmap_tail, cl_fmap_next, cl_fmap_prev
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_get

+ +

[top][index]

+

NAME

+
       cl_fmap_get
+
+

DESCRIPTION

+
       The cl_fmap_get function returns the map item associated with a key.
+
+

SYNOPSIS

+
CL_EXPORT cl_fmap_item_t* CL_API
+cl_fmap_get(
+        IN      const cl_fmap_t* const  p_map,
+        IN      const void* const               p_key );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure from which to retrieve the
+               item with the specified key.
+
+       p_key
+               [in] Pointer to a key value used to search for the desired map item.
+
+ RETURN VALUES
+       Pointer to the map item with the desired key value.
+
+       Pointer to the map end if there was no item with the desired key value
+       stored in the flexi map.
+
+

NOTES

+
       cl_fmap_get does not remove the item from the flexi map.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_remove
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_head

+ +

[top][index]

+

NAME

+
       cl_fmap_head
+
+

DESCRIPTION

+
       The cl_fmap_head function returns the map item with the lowest key
+       value stored in a flexi map.
+
+

SYNOPSIS

+
CL_INLINE cl_fmap_item_t* CL_API
+cl_fmap_head(
+        IN      const cl_fmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        return( (cl_fmap_item_t*)p_map->nil.pool_item.list_item.p_next );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure whose item with the lowest key
+               is returned.
+
+ RETURN VALUES
+       Pointer to the map item with the lowest key in the flexi map.
+
+       Pointer to the map end if the flexi map was empty.
+
+

NOTES

+
       cl_fmap_head does not remove the item from the map.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_tail, cl_fmap_next, cl_fmap_prev, cl_fmap_end,
+       cl_fmap_item_t
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_init

+ +

[top][index]

+

NAME

+
       cl_fmap_init
+
+

DESCRIPTION

+
       The cl_fmap_init function initialized a flexi map for use.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_fmap_init(
+        IN      cl_fmap_t* const        p_map,
+        IN      cl_pfn_fmap_cmp_t       pfn_compare );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure to initialize.
+
+       pfn_compare
+               [in] Pointer to the compare function used to compare keys.
+               See the cl_pfn_fmap_cmp_t function type declaration for details
+               about the callback function.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       Allows calling flexi map manipulation functions.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_insert, cl_fmap_remove
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_insert

+ +

[top][index]

+

NAME

+
       cl_fmap_insert
+
+

DESCRIPTION

+
       The cl_fmap_insert function inserts a map item into a flexi map.
+
+

SYNOPSIS

+
CL_EXPORT cl_fmap_item_t* CL_API
+cl_fmap_insert(
+        IN      cl_fmap_t* const                p_map,
+        IN      const void* const               p_key,
+        IN      cl_fmap_item_t* const   p_item );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure into which to add the item.
+
+       p_key
+               [in] Pointer to the key value to assign to the item.  Storage for
+               the key must be persistant, as only the pointer is stored.  Users
+               are responsible for maintaining the validity of key pointers while
+               they are in use.
+
+       p_item
+               [in] Pointer to a cl_fmap_item_t stucture to insert into the flexi map.
+
+

RETURN VALUE

+
       Pointer to the item in the map with the specified key.  If insertion
+       was successful, this is the pointer to the item.  If an item with the
+       specified key already exists in the map, the pointer to that item is
+       returned.
+
+

NOTES

+
       Insertion operations may cause the flexi map to rebalance.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_remove, cl_fmap_item_t
+
+
+
+ +

[Structures] +Component Library: Flexi Map/cl_fmap_item_t

+ +

[top][index]

+

NAME

+
       cl_fmap_item_t
+
+

DESCRIPTION

+
       The cl_fmap_item_t structure is used by maps to store objects.
+
+       The cl_fmap_item_t structure should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_fmap_item
+{
+        /* Must be first to allow casting. */
+        cl_pool_item_t                  pool_item;
+        struct _cl_fmap_item    *p_left;
+        struct _cl_fmap_item    *p_right;
+        struct _cl_fmap_item    *p_up;
+        cl_map_color_t                  color;
+        const void* __ptr64             p_key;
+#ifdef _DEBUG_
+        struct _cl_fmap                 *p_map;
+#endif
+
+} cl_fmap_item_t;
+
+

FIELDS

+
       pool_item
+               Used to store the item in a doubly linked list, allowing more
+               efficient map traversal.
+
+       p_left
+               Pointer to the map item that is a child to the left of the node.
+
+       p_right
+               Pointer to the map item that is a child to the right of the node.
+
+       p_up
+               Pointer to the map item that is the parent of the node.
+
+       p_nil
+               Pointer to the map's NIL item, used as a terminator for leaves.
+               The NIL sentinel is in the cl_fmap_t structure.
+
+       color
+               Indicates whether a node is red or black in the map.
+
+       p_key
+               Pointer to the value that uniquely represents a node in a map.  This
+               pointer is set by calling cl_fmap_insert and can be retrieved by
+               calling cl_fmap_key.
+
+

NOTES

+
       None of the fields of this structure should be manipulated by users, as
+       they are crititcal to the proper operation of the map in which they
+       are stored.
+
+       To allow storing items in either a quick list, a quick pool, or a flexi
+       map, the map implementation guarantees that the map item can be safely
+       cast to a pool item used for storing an object in a quick pool, or cast to
+       a list item used for storing an object in a quick list.  This removes the
+       need to embed a flexi map item, a list item, and a pool item in objects
+       that need to be stored in a quick list, a quick pool, and a flexi map.
+
+       The flexi map item is defined to be identical in layout as a map item.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_insert, cl_fmap_key, cl_pool_item_t, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_key

+ +

[top][index]

+

NAME

+
       cl_fmap_key
+
+

DESCRIPTION

+
       The cl_fmap_key function retrieves the key value of a map item.
+
+

SYNOPSIS

+
#pragma warning (push)
+#pragma warning (disable :4244)
+CL_INLINE const void* CL_API
+cl_fmap_key(
+        IN      const cl_fmap_item_t* const     p_item )
+{
+        CL_ASSERT( p_item );
+        return( p_item->p_key );
+}
+#pragma warning (pop )
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item whose key value to return.
+
+

RETURN VALUE

+
       Returns the a pointer to the key value for the specified map item.
+       The key value should not be modified to insure proper flexi map operation.
+
+

NOTES

+
       The key value is set in a call to cl_fmap_insert.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_insert
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_merge

+ +

[top][index]

+

NAME

+
       cl_fmap_merge
+
+

DESCRIPTION

+
       The cl_fmap_merge function moves all items from one map to another,
+       excluding duplicates.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_fmap_merge(
+        OUT             cl_fmap_t* const        p_dest_map,
+        IN OUT  cl_fmap_t* const        p_src_map );
+
+

PARAMETERS

+
       p_dest_map
+               [out] Pointer to a cl_fmap_t structure to which items should be added.
+
+       p_src_map
+               [in/out] Pointer to a cl_fmap_t structure whose items to add
+               to p_dest_map.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       Items are evaluated based on their keys only.
+
+       Upon return from cl_fmap_merge, the flexi map referenced by p_src_map
+       contains all duplicate items.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_delta
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_next

+ +

[top][index]

+

NAME

+
       cl_fmap_next
+
+

DESCRIPTION

+
       The cl_fmap_next function returns the map item with the next higher
+       key value than a specified map item.
+
+

SYNOPSIS

+
CL_INLINE cl_fmap_item_t* CL_API
+cl_fmap_next(
+        IN      const cl_fmap_item_t* const     p_item )
+{
+        CL_ASSERT( p_item );
+        return( (cl_fmap_item_t*)p_item->pool_item.list_item.p_next );
+}
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item whose successor to return.
+
+ RETURN VALUES
+       Pointer to the map item with the next higher key value in a flexi map.
+
+       Pointer to the map end if the specified item was the last item in
+       the flexi map.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_head, cl_fmap_tail, cl_fmap_prev, cl_fmap_end,
+       cl_fmap_item_t
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_prev

+ +

[top][index]

+

NAME

+
       cl_fmap_prev
+
+

DESCRIPTION

+
       The cl_fmap_prev function returns the map item with the next lower
+       key value than a precified map item.
+
+

SYNOPSIS

+
CL_INLINE cl_fmap_item_t* CL_API
+cl_fmap_prev(
+        IN      const cl_fmap_item_t* const     p_item )
+{
+        CL_ASSERT( p_item );
+        return( (cl_fmap_item_t*)p_item->pool_item.list_item.p_prev );
+}
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item whose predecessor to return.
+
+ RETURN VALUES
+       Pointer to the map item with the next lower key value in a flexi map.
+
+       Pointer to the map end if the specifid item was the first item in
+       the flexi map.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_head, cl_fmap_tail, cl_fmap_next, cl_fmap_end,
+       cl_fmap_item_t
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_remove

+ +

[top][index]

+

NAME

+
       cl_fmap_remove
+
+

DESCRIPTION

+
       The cl_fmap_remove function removes the map item with the specified key
+       from a flexi map.
+
+

SYNOPSIS

+
CL_EXPORT cl_fmap_item_t* CL_API
+cl_fmap_remove(
+        IN      cl_fmap_t* const        p_map,
+        IN      const void* const       p_key );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure from which to remove the item
+               with the specified key.
+
+       p_key
+               [in] Pointer to the key value used to search for the map item
+               to remove.
+
+ RETURN VALUES
+       Pointer to the removed map item if it was found.
+
+       Pointer to the map end if no item with the specified key exists in the
+       flexi map.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_remove_item, cl_fmap_remove_all, cl_fmap_insert
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_remove_all

+ +

[top][index]

+

NAME

+
       cl_fmap_remove_all
+
+

DESCRIPTION

+
       The cl_fmap_remove_all function removes all items in a flexi map,
+       leaving it empty.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_fmap_remove_all(
+        IN      cl_fmap_t* const        p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+
+        p_map->root.p_left = &p_map->nil;
+        p_map->nil.pool_item.list_item.p_next = &p_map->nil.pool_item.list_item;
+        p_map->nil.pool_item.list_item.p_prev = &p_map->nil.pool_item.list_item;
+        p_map->count = 0;
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure to empty.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_remove, cl_fmap_remove_item
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_remove_item

+ +

[top][index]

+

NAME

+
       cl_fmap_remove_item
+
+

DESCRIPTION

+
       The cl_fmap_remove_item function removes the specified map item
+       from a flexi map.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_fmap_remove_item(
+        IN      cl_fmap_t* const                p_map,
+        IN      cl_fmap_item_t* const   p_item );
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item to remove from its flexi map.
+
+ RETURN VALUES
+       This function does not return a value.
+
+       In a debug build, cl_fmap_remove_item asserts that the item being removed
+       is in the specified map.
+
+

NOTES

+
       Removes the map item pointed to by p_item from its flexi map.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_remove, cl_fmap_remove_all, cl_fmap_insert
+
+
+
+ +

[Structures] +Component Library: Flexi Map/cl_fmap_t

+ +

[top][index]

+

NAME

+
       cl_fmap_t
+
+

DESCRIPTION

+
       Flexi map structure.
+
+       The cl_fmap_t structure should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_fmap
+{
+        cl_fmap_item_t          root;
+        cl_fmap_item_t          nil;
+        cl_state_t                      state;
+        size_t                          count;
+        cl_pfn_fmap_cmp_t       pfn_compare;
+
+} cl_fmap_t;
+
+

PARAMETERS

+
       root
+               Map item that serves as root of the map.  The root is set up to
+               always have itself as parent.  The left pointer is set to point to
+               the item at the root.
+
+       nil
+               Map item that serves as terminator for all leaves, as well as providing
+               the list item used as quick list for storing map items in a list for
+               faster traversal.
+
+       state
+               State of the map, used to verify that operations are permitted.
+
+       count
+               Number of items in the map.
+
+       pfn_compare
+               Pointer to a compare function to invoke to compare the keys of
+               items in the map.
+
+

SEE ALSO

+
       Flexi Map, cl_pfn_fmap_cmp_t
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_fmap_tail

+ +

[top][index]

+

NAME

+
       cl_fmap_tail
+
+

DESCRIPTION

+
       The cl_fmap_tail function returns the map item with the highest key
+       value stored in a flexi map.
+
+

SYNOPSIS

+
CL_INLINE cl_fmap_item_t* CL_API
+cl_fmap_tail(
+        IN      const cl_fmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        return( (cl_fmap_item_t*)p_map->nil.pool_item.list_item.p_prev );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure whose item with the highest key
+               is returned.
+
+ RETURN VALUES
+       Pointer to the map item with the highest key in the flexi map.
+
+       Pointer to the map end if the flexi map was empty.
+
+

NOTES

+
       cl_fmap_end does not remove the item from the map.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_head, cl_fmap_next, cl_fmap_prev, cl_fmap_end,
+       cl_fmap_item_t
+
+
+
+ +

[Functions] +Component Library: Flexi Map/cl_is_fmap_empty

+ +

[top][index]

+

NAME

+
       cl_is_fmap_empty
+
+

DESCRIPTION

+
       The cl_is_fmap_empty function returns whether a flexi map is empty.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_fmap_empty(
+        IN      const cl_fmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+
+        return( p_map->count == 0 );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_fmap_t structure to test for emptiness.
+
+ RETURN VALUES
+       TRUE if the flexi map is empty.
+
+       FALSE otherwise.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_count, cl_fmap_remove_all
+
+
+
+ +

[Definitions] +Component Library: Flexi Map/cl_pfn_fmap_apply_t

+ +

[top][index]

+

NAME

+
       cl_pfn_fmap_apply_t
+
+

DESCRIPTION

+
       The cl_pfn_fmap_apply_t function type defines the prototype for functions
+       used to iterate items in a flexi map.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_fmap_apply_t)(
+        IN      cl_fmap_item_t* const   p_map_item,
+        IN      void*                                   context );
+
+

PARAMETERS

+
       p_map_item
+               [in] Pointer to a cl_fmap_item_t structure.
+
+       context
+               [in] Value passed to the callback function.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_fmap_apply_func
+       function.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_apply_func
+
+
+
+ +

[Definitions] +Component Library: Flexi Map/cl_pfn_fmap_cmp_t

+ +

[top][index]

+

NAME

+
       cl_pfn_fmap_cmp_t
+
+

DESCRIPTION

+
       The cl_pfn_fmap_cmp_t function type defines the prototype for functions
+       used to compare item keys in a flexi map.
+
+

SYNOPSIS

+
typedef intn_t
+(CL_API *cl_pfn_fmap_cmp_t)(
+        IN      const void* const               p_key1,
+        IN      const void*     const           p_key2 );
+
+

PARAMETERS

+
       p_key1
+               [in] Pointer to the first of two keys to compare.
+
+       p_key2
+               [in] Pointer to the second of two keys to compare.
+
+

RETURN VALUE

+
       Returns 0 if the keys match.
+       Returns less than 0 if p_key1 is less than p_key2.
+       Returns greater than 0 if p_key1 is greater than p_key2.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_fmap_init function.
+
+

SEE ALSO

+
       Flexi Map, cl_fmap_init
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_ioctl_h.html b/branches/Ndi/docs/complib/cl_ioctl_h.html new file mode 100644 index 00000000..86ea18a0 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_ioctl_h.html @@ -0,0 +1,609 @@ + + + + +./inc_docs/complib/cl_ioctl_h.html + + + + +Generated from ./inc/complib/cl_ioctl.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/IOCTL Object

+ +

[top][parent][index]

+

NAME

+
       IOCTL Object
+
+

DESCRIPTION

+
       The IOCTL object provides functionality for handling IOCTL requests.
+
+       The IOCTL object is only available in kernel mode and provides
+       functionality for accessing information about IO requests initiated
+       by a user-mode application.  The IOCTL_CODE macro is used in both
+       user and kernel mode to initiate and dispatch IOCTL requests, respectively.
+
+       In Linux, in order for the IOCTL object to be used, requests must be
+       initiated and handled using the Device Framework abstraction.
+
+

SEE ALSO

+
       Structures:
+               cl_ioctl_handle_t
+
+       Callbacks:
+               cl_pfn_ioctl_handler_t
+
+       Control Code Generation
+               IOCTL_CODE
+
+       Kernel Mode Access
+               cl_ioctl_process
+               cl_ioctl_complete
+               cl_ioctl_type
+               cl_ioctl_cmd
+               cl_ioctl_ctl_code
+               cl_ioctl_in_buf
+               cl_ioctl_in_size
+               cl_ioctl_out_buf
+               cl_ioctl_out_size
+
+       User Mode Access
+               cl_ioctl_request
+               cl_ioctl_result
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_cmd

+ +

[top][index]

+

NAME

+
       cl_ioctl_cmd
+
+

DESCRIPTION

+
       Returns the command of an IOCTL
+
+

SYNOPSIS

+
CL_EXPORT uint16_t CL_API
+cl_ioctl_cmd(
+        IN      cl_ioctl_handle_t       h_ioctl );
+
+

PARAMETERS

+
       h_ioctl
+               [in] Handle to an IOCTL
+
+

RETURN VALUE

+
       Returns the command of the specified IOCTL request, as defined using
+       the IOCTL_CMD macro.
+
+

NOTES

+
       The cl_ioctl_cmd function is only available in the kernel.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_ioctl_type, cl_ioctl_ctl_code
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_complete

+ +

[top][index]

+

NAME

+
       cl_ioctl_complete
+
+

DESCRIPTION

+
       Fills in completion information for an IOCTL and releases the IOCTL request
+       for completion.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_ioctl_complete(
+        IN      cl_ioctl_handle_t       h_ioctl,
+        IN      cl_status_t                     io_status,
+        IN      size_t                          ret_bytes );
+
+

PARAMETERS

+
       h_ioctl
+               Handle to the IOCTL being completed.  This handle was provided to
+               the IOCTL handler.
+
+       io_status
+               Status of the IOCTL request.
+
+       ret_bytes
+               Number of bytes written to the output buffer.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_ioctl_process
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_ctl_code

+ +

[top][index]

+

NAME

+
       cl_ioctl_ctl_code
+
+

DESCRIPTION

+
       Returns the 32-bit control code of an IOCTL
+
+

SYNOPSIS

+
CL_EXPORT uint32_t CL_API
+cl_ioctl_ctl_code(
+        IN      cl_ioctl_handle_t       h_ioctl );
+
+

PARAMETERS

+
       h_ioctl
+               [in] Handle to an IOCTL
+
+

RETURN VALUE

+
       Returns the 32-bit control code of the specified IOCTL request,
+       as defined using the IOCTL_CMD macro.
+
+

NOTES

+
       The cl_ioctl_ctl_code function is only available in the kernel.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_ioctl_type, cl_ioctl_cmd
+
+
+
+ +

[Definitions] +Component Library: IOCTL Object/cl_ioctl_handle_t

+ +

[top][index]

+

NAME

+
       cl_ioctl_handle_t
+
+

DESCRIPTION

+
       Opaque handle representing an IO request.
+
+

NOTES

+
       The cl_ioctl_handle_t type is only available in the kernel.
+       The cl_ioctl_handle_t type should be treated as opaque, as it
+       varies from environment to environment.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_type, cl_ioctl_cmd, cl_ioctl_in_buf,
+       cl_ioctl_in_size, cl_ioctl_out_buf, cl_ioctl_out_size,
+       cl_ioctl_set_status, cl_ioctl_set_ret_bytes
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_in_buf

+ +

[top][index]

+

NAME

+
       cl_ioctl_in_buf
+
+

DESCRIPTION

+
       Returns a pointer to the input buffer of an IOCTL.
+
+

SYNOPSIS

+
CL_EXPORT void* CL_API
+cl_ioctl_in_buf(
+        IN      cl_ioctl_handle_t       h_ioctl );
+
+

PARAMETERS

+
       h_ioctl
+               [in] Handle to an IOCTL
+
+

RETURN VALUE

+
       Returns the input buffer of the specified IOCTL request.
+
+

NOTES

+
       The cl_ioctl_in_buf function is only available in the kernel.
+
+       In Windows, for IOCTL operations defined as METHOD_IN_DIRECT, the
+       returned pointer points to the MDL describing the input buffer.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_ioctl_in_size,
+       cl_ioctl_out_buf, cl_ioctl_out_size
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_in_size

+ +

[top][index]

+

NAME

+
       cl_ioctl_in_size
+
+

DESCRIPTION

+
       Returns the size of the input buffer of an IOCTL.
+
+

SYNOPSIS

+
CL_EXPORT ULONG CL_API
+cl_ioctl_in_size(
+        IN      cl_ioctl_handle_t       h_ioctl );
+
+

PARAMETERS

+
       h_ioctl
+               [in] Handle to an IOCTL
+
+

RETURN VALUE

+
       Returns the size, in bytes, of the input buffer of the specified
+       IOCTL request.
+
+

NOTES

+
       The cl_ioctl_in_size function is only available in the kernel.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_ioctl_in_buf,
+       cl_ioctl_out_buf, cl_ioctl_out_size
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_out_buf

+ +

[top][index]

+

NAME

+
       cl_ioctl_out_buf
+
+

DESCRIPTION

+
       Returns a pointer to the output buffer of an IOCTL.
+
+

SYNOPSIS

+
CL_EXPORT void* CL_API
+cl_ioctl_out_buf(
+        IN      cl_ioctl_handle_t       h_ioctl );
+
+

PARAMETERS

+
       h_ioctl
+               [in] Handle to an IOCTL
+
+

RETURN VALUE

+
       Returns a pointer to the output buffer of the specified IOCTL request.
+
+

NOTES

+
       The cl_ioctl_out_buf function is only available in the kernel.
+
+       In Windows, for IOCTL operations defined as METHOD_IN_DIRECT or
+       METHOD_OUT_DIRECT, the returned pointer points to the MDL describing
+       the input buffer.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_ioctl_out_size,
+       cl_ioctl_in_buf, cl_ioctl_in_size
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_out_size

+ +

[top][index]

+

NAME

+
       cl_ioctl_out_size
+
+

DESCRIPTION

+
       Returns the size of the output buffer of an IOCTL.
+
+

SYNOPSIS

+
CL_EXPORT ULONG CL_API
+cl_ioctl_out_size(
+        IN      cl_ioctl_handle_t       h_ioctl );
+
+

PARAMETERS

+
       h_ioctl
+               [in] Handle to an IOCTL
+
+

RETURN VALUE

+
       Returns the size, in bytes, of the input buffer of the specified
+       IOCTL request.
+
+

NOTES

+
       The cl_ioctl_out_size function is only available in the kernel.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_ioctl_out_buf,
+       cl_ioctl_in_buf, cl_ioctl_in_size
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_process

+ +

[top][index]

+

NAME

+
       cl_ioctl_process
+
+

DESCRIPTION

+
       The cl_ioctl_process function unpacks information initiated by a call to
+       cl_ioctl_request function and invokes a user-supplied callback.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_ioctl_process(
+        IN      void                                    *p_ioctl,
+        IN      cl_pfn_ioctl_handler_t  pfn_ioctl_handler,
+        IN      void                                    *context_1,
+        IN      void                                    *context_2 );
+
+

PARAMETERS

+
       p_ioctl
+               [in] Pointer to an OS specific IOCTL information.  In Linux,
+               this parameter depends on whether the IOCTL is handled synchronously
+               or asynchronously.  See the notes for further detail.
+               In Windows, this is a pointer to an IRP.
+
+       pfn_ioctl_handler
+               [in] Pointer to the callback function to invoke for handling the IOCTL.
+               This callback is independent of the IOCTL command.
+
+       context_1
+               [in] First of two context parameters to pass to the handler.
+
+       context_2
+               [in] Second of two context parameters to pass to the handler.
+
+ RETURN VALUES
+       CL_SUCCESS if the IOCTL was processed successfully.
+
+       Other values to indicate various failures.
+
+

NOTES

+
       Users must call cl_ioctl_complete from within the handler if completing
+       the IOCTL request synchronously.  If the IOCTL request's control code is
+       invalid, the handler should return CL_INVALID_REQUEST.
+
+       In Linux, the p_ioctl parameter is a copy of the argp parameter on input,
+       and on output points to the IOCTL request object passed to the IOCTL
+       handler if and only if the IOCTL handler returned CL_PENDING.
+       This allows the user to cancel the request by passing the same
+       handle to the cancel routine that was passed to the IOCTL handler.
+       If all IOCTLs are handled synchronously, it is acceptable to pass the argp
+       parameter of the IOCTL entry point instead of a copy.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_pfn_ioctl_handler_t, cl_ioctl_complete
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_request

+ +

[top][index]

+

NAME

+
       cl_ioctl_request
+
+

DESCRIPTION

+
       The cl_ioctl_request is used by user-mode clients to initiate IOCTL
+       requests to a device.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_ioctl_request(
+        IN              void                    *h_dev,
+        IN              uint32_t                ioctl_code,
+        IN              void                    *p_in_buf,
+        IN              size_t                  in_size,
+                OUT     void                    *p_out_buf,
+        IN              size_t                  out_size,
+                OUT     size_t                  *p_ret_bytes OPTIONAL,
+        IN              void                    *p_async_info OPTIONAL );
+
+

PARAMETERS

+
       h_dev
+               [in] Handle to the device to which the IOCTL request is targetted.
+               In Linux, this is a file descriptor.  In Windows, this is a file
+               handle.
+
+       ioctl_code
+               [in] Control code for the IOCTL request.
+
+       p_in_buf
+               [in] Pointer to the input buffer.
+
+       in_size
+               [in] Size, in bytes, of the input buffer.
+
+       p_out_buf
+               [out] Pointer to the output buffer.
+
+       out_size
+               [in] Size, in bytes, of the output buffer.
+
+       p_ret_bytes
+               [out] Number of bytes written to the output buffer.  This parameter is
+               mutually exclusive of the p_async_info parameter.
+
+       p_async_info
+               [in] For platforms that support asynchronous I/O, supplies a pointer
+               to that platform's async I/O structure, if any.  For Windows, this
+               is a pointer to an OVERLAPPED structure.  This parameter is mutually
+               exclusive of the p_ret_bytes parameter.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_result
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_result

+ +

[top][index]

+

NAME

+
       cl_ioctl_result
+
+

DESCRIPTION

+
       Checks the status of an asynchronous IOCTL request.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_ioctl_result(
+        IN      void            *h_dev,
+        IN      void            *p_async_info,
+        OUT     size_t          *p_ret_bytes,
+        IN      boolean_t       blocking );
+
+

PARAMETERS

+
       h_dev
+               [in] Handle to the device to which the IOCTL request is targetted.
+               In Linux, this is a file descriptor.  In Windows, this is a file
+               handle.
+
+       p_async_info
+               [in] For platforms that support asynchronous I/O, supplies a pointer
+               to that platform's async I/O structure, if any.  For Windows, this
+               is a pointer to an OVERLAPPED structure.  This must be the same
+               as that provided in the cl_ioctl_request function.
+
+       p_ret_bytes
+               [out] Number of bytes written to the output buffer.
+
+       blocking
+               [in] If TRUE, indicates that the call should wait until the
+               specified IOCTL request is complete.
+
+ RETURN VALUES
+       CL_SUCCESS if the IOCTL request was successful.  p_ret_bytes contains
+       the number bytes written to the output buffer.
+
+       CL_PENDING if the IOCTL request is not yet complete.
+
+       Other status values to indicate errors.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_request
+
+
+
+ +

[Functions] +Component Library: IOCTL Object/cl_ioctl_type

+ +

[top][index]

+

NAME

+
       cl_ioctl_type
+
+

DESCRIPTION

+
       Returns the type of an IOCTL.
+
+

SYNOPSIS

+
CL_EXPORT uint16_t CL_API
+cl_ioctl_type(
+        IN      cl_ioctl_handle_t       h_ioctl );
+
+

PARAMETERS

+
       h_ioctl
+               [in] Handle to an IOCTL
+
+

RETURN VALUE

+
       Returns the type of the specified IOCTL request, as defined using
+       the IOCTL_CMD macro.
+
+

NOTES

+
       The cl_ioctl_type function is only available in the kernel.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_ioctl_cmd, cl_ioctl_ctl_code
+
+
+
+ +

[Definitions] +Component Library: IOCTL Object/cl_pfn_ioctl_handler_t

+ +

[top][index]

+

NAME

+
       cl_pfn_ioctl_handler_t
+
+

DESCRIPTION

+
       The cl_pfn_ioctl_handler_t function type defines the prototype for
+       IOCTL handlers used when handling IOCTL requests initiated by
+       cl_ioctl_request.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_ioctl_handler_t)(
+        IN      cl_ioctl_handle_t       h_ioctl,
+        IN      void                            *context_1,
+        IN      void                            *context_2 );
+
+

PARAMETERS

+
       h_ioctl
+               [in] Handle to the IOCTL request.
+
+       context_1
+               [in] First context parameters, as provided to cl_ioctl_process.
+
+       context_2
+               [in] Second context parameters, as provided to cl_ioctl_process.
+
+ RETURN VALUES
+       CL_SUCCESS if the IOCTL was completed successfully.
+
+       CL_PENDING if the IOCTL is being processed asynchronously.
+
+       Other return values in case of errors.
+
+

NOTES

+
       It is acceptable to complete the IOCTL successfully to report an error
+       status in the output buffer.
+
+

SEE ALSO

+
       IOCTL Object, cl_ioctl_handle_t, cl_ioctl_process
+
+
+
+ +

[Definitions] +Component Library: IOCTL Object/IOCTL_CODE

+ +

[top][index]

+

NAME

+
       IOCTL_CODE
+
+

DESCRIPTION

+
       Macro for defining IO control command codes.
+
+

SYNOPSIS

+
*       uint32_t IOCTL_CODE( uint16_t type, uint16_t cmd )
+
+

PARAMETERS

+
       type
+               [in] user-defined type representing the type of command.  For Linux,
+               the type is truncated to 8-bits.  For Windows, the type is a 16-bit
+               value, as described in "Specifying Device Types" in the DDK docs.
+
+       cmd
+               [in] User-defined command.  For Linux, the command field is truncated
+               to 8-bits.  For Windows, the command can be 12-bits, with values
+               below 0x800 reserved by Microsoft for system defined commands.
+
+

RETURN VALUE

+
       A 32-bit control code.  User-mode clients use the control code to initiate
+       requests.  Kernel-mode clients use the control code to distinguish between
+       different requests.
+
+

NOTE

+
       In Windows, all IOCTL command codes defined with the IOCTL_CODE command
+       result in FILE_ANY_ACCESS and METHOD_BUFFERED being specified.
+
+

SEE ALSO

+
       IOCTL Object, cl_dev_ioctl, cl_ioctl_type, cl_ioctl_cmd
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_irqlock_h.html b/branches/Ndi/docs/complib/cl_irqlock_h.html new file mode 100644 index 00000000..527b6ec7 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_irqlock_h.html @@ -0,0 +1,221 @@ + + + + +./inc_docs/complib/cl_irqlock_h.html + + + + +Generated from ./inc/complib/cl_irqlock.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/Irqlock

+ +

[top][parent][index]

+

NAME

+
       Irqlock
+
+

DESCRIPTION

+
       Irqlock provides synchronization at interrupt level between threads for 
+       exclusive access to a resource.
+
+       The irqlock functions manipulate a cl_irqlock_t structure which should 
+       be treated as opaque and should be manipulated only through the provided 
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_irqlock_t
+
+       Initialization:
+               cl_irqlock_construct, cl_irqlock_init, cl_irqlock_destroy
+
+       Manipulation
+               cl_irqlock_acquire, cl_irqlock_release
+
+
+
+ +

[Functions] +Component Library: Irqlock/cl_irqlock_acquire

+ +

[top][index]

+

NAME

+
       cl_irqlock_acquire
+
+

DESCRIPTION

+
       The cl_irqlock_acquire function acquires a IRQ lock.
+       This version of lock does not prevent an interrupt from 
+       occuring on the processor on which the code is being
+       executed. To protect from an interrupt level resource
+       use the cl_irqlock_acquire_irq function.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_irqlock_acquire( 
+        IN      cl_irqlock_t* const             p_irqlock );
+
+

PARAMETERS

+
       p_irqlock 
+               [in] Pointer to a IRQ lock structure to acquire.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Irqlock, cl_irqlock_release
+
+
+
+ +

[Functions] +Component Library: Irqlock/cl_irqlock_construct

+ +

[top][index]

+

NAME

+
       cl_irqlock_construct
+
+

DESCRIPTION

+
       The cl_irqlock_construct function initializes the state of a 
+       IRQ lock.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_irqlock_construct( 
+        IN      cl_irqlock_t* const             p_irqlock );
+
+

PARAMETERS

+
       p_irqlock 
+               [in] Pointer to a IRQ lock structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_irqlock_destroy without first calling 
+       cl_irqlock_init.
+
+       Calling cl_irqlock_construct is a prerequisite to calling any other
+       IRQ lock function except cl_irqlock_init.
+
+

SEE ALSO

+
       Irqlock, cl_irqlock_init, cl_irqlock_destroy
+
+
+
+ +

[Functions] +Component Library: Irqlock/cl_irqlock_destroy

+ +

[top][index]

+

NAME

+
       cl_irqlock_destroy
+
+

DESCRIPTION

+
       The cl_irqlock_destroy function performs all necessary cleanup of a 
+       IRQ lock.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_irqlock_destroy( 
+        IN      cl_irqlock_t* const             p_irqlock );
+
+

PARAMETERS

+
       p_irqlock 
+               [in] Pointer to a IRQ lock structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Performs any necessary cleanup of a IRQ lock. This function must only 
+       be called if either cl_irqlock_construct or cl_irqlock_init has been 
+       called.
+
+

SEE ALSO

+
       Irqlock, cl_irqlock_construct, cl_irqlock_init
+
+
+
+ +

[Functions] +Component Library: Irqlock/cl_irqlock_init

+ +

[top][index]

+

NAME

+
       cl_irqlock_init
+
+

DESCRIPTION

+
       The cl_irqlock_init function initializes a IRQ lock for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_irqlock_init( 
+        IN      cl_irqlock_t* const             p_irqlock,
+        IN      cl_interrupt_t* const   p_interrupt );
+
+

PARAMETERS

+
       p_irqlock 
+               [in] Pointer to a IRQ lock structure to initialize.
+
+       p_interrupt
+               [in] Platform specific pointer conveying information about the
+               interrupt vector and level with which to synchronize.
+
+ RETURN VALUES
+       CL_SUCCESS if initialization succeeded.
+
+       CL_ERROR if initialization failed. Callers should call 
+       cl_irqlock_destroy to clean up any resources allocated during 
+       initialization.
+
+

NOTES

+
       Initialize the IRQ lock structure. Allows calling cl_irqlock_aquire 
+       and cl_irqlock_release.
+
+       In Linux, the p_interrupt parameter is currently ignored.
+
+       In Windows, the p_interrupt parameter is a pointer to a KINTERRUPT object,
+       the value of which is supplied by a call to IoConnectInterrupt.
+
+

SEE ALSO

+
       Irqlock, cl_irqlock_construct, cl_irqlock_destroy, 
+       cl_irqlock_acquire, cl_irqlock_release
+
+
+
+ +

[Functions] +Component Library: Irqlock/cl_irqlock_release

+ +

[top][index]

+

NAME

+
       cl_irqlock_release
+
+

DESCRIPTION

+
       The cl_irqlock_release function releases a IRQ lock object.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_irqlock_release(
+        IN      cl_irqlock_t* const             p_irqlock );
+
+

PARAMETERS

+
       p_irqlock 
+               [in] Pointer to a IRQ lock structure to release.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Releases a IRQ lock after a call to cl_irqlock_acquire.
+
+

SEE ALSO

+
       Irqlock, cl_irqlock_acquire
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_list_h.html b/branches/Ndi/docs/complib/cl_list_h.html new file mode 100644 index 00000000..33a5f936 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_list_h.html @@ -0,0 +1,1412 @@ + + + + +./inc_docs/complib/cl_list_h.html + + + + +Generated from ./inc/complib/cl_list.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/List

+ +

[top][parent][index]

+

NAME

+
       List
+
+

DESCRIPTION

+
       List stores objects in a doubly linked list.
+
+       Unlike quick list, users pass pointers to the object being stored, rather
+       than to a cl_list_item_t structure.  Insertion operations on a list can
+       fail, and callers should trap for such failures.
+
+       Use quick list in situations where insertion failures cannot be tolerated.
+
+       List is not thread safe, and users must provide serialization.
+
+       The list functions operates on a cl_list_t structure which should be
+       treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Types:
+               cl_list_iterator_t
+
+       Structures:
+               cl_list_t
+
+       Callbacks:
+               cl_pfn_list_apply_t, cl_pfn_list_find_t
+
+       Initialization/Destruction:
+               cl_list_construct, cl_list_init, cl_list_destroy
+
+       Iteration:
+               cl_list_next, cl_list_prev, cl_list_head, cl_list_tail,
+               cl_list_end
+
+       Manipulation:
+               cl_list_insert_head, cl_list_insert_tail,
+               cl_list_insert_array_head, cl_list_insert_array_tail,
+               cl_list_insert_prev, cl_list_insert_next,
+               cl_list_remove_head, cl_list_remove_tail,
+               cl_list_remove_object, cl_list_remove_item, cl_list_remove_all
+
+       Search:
+               cl_is_object_in_list, cl_list_find_from_head, cl_list_find_from_tail,
+               cl_list_apply_func
+
+       Attributes:
+               cl_list_count, cl_is_list_empty, cl_is_list_inited
+
+
+
+ +

[Functions] +Component Library: List/cl_is_list_empty

+ +

[top][index]

+

NAME

+
       cl_is_list_empty
+
+

DESCRIPTION

+
       The cl_is_list_empty function returns whether a list is empty.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_list_empty(
+        IN      const cl_list_t* const  p_list )
+{
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+        return( cl_is_qlist_empty( &p_list->list ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure.
+
+ RETURN VALUES
+       TRUE if the specified list is empty.
+
+       FALSE otherwise.
+
+

SEE ALSO

+
       List, cl_list_count, cl_list_remove_all
+
+
+
+ +

[Functions] +Component Library: List/cl_is_list_inited

+ +

[top][index]

+

NAME

+
       cl_is_list_inited
+
+

DESCRIPTION

+
       The cl_is_list_inited function returns whether a list was
+       initialized successfully.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_list_inited(
+        IN      const cl_list_t* const  p_list )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /*
+         * The pool is the last thing initialized.  If it is initialized, the
+         * list is initialized too.
+         */
+        return( cl_is_qpool_inited( &p_list->list_item_pool ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure whose initilization state
+               to check.
+
+ RETURN VALUES
+       TRUE if the list was initialized successfully.
+
+       FALSE otherwise.
+
+

NOTES

+
       Allows checking the state of a list to determine if invoking
+       member functions is appropriate.
+
+

SEE ALSO

+
       List
+
+
+
+ +

[Functions] +Component Library: List/cl_is_object_in_list

+ +

[top][index]

+

NAME

+
       cl_is_object_in_list
+
+

DESCRIPTION

+
       The cl_is_object_in_list function returns whether an object
+       is stored in a list.
+
+

SYNOPSIS

+
CL_EXPORT boolean_t CL_API
+cl_is_object_in_list(
+        IN      const cl_list_t* const  p_list,
+        IN      const void* const               p_object );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure in which to look for the object.
+
+       p_object
+               [in] Pointer to an object stored in a list.
+
+ RETURN VALUES
+       TRUE if p_object was found in the list.
+
+       FALSE otherwise.
+
+

SEE ALSO

+
       List
+
+
+
+ +

[Functions] +Component Library: List/cl_list_apply_func

+ +

[top][index]

+

NAME

+
       cl_list_apply_func
+
+

DESCRIPTION

+
       The cl_list_apply_func function executes a specified function for every
+       object stored in a list.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_list_apply_func(
+        IN      const cl_list_t* const  p_list,
+        IN      cl_pfn_list_apply_t             pfn_func,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure to iterate.
+
+       pfn_func
+               [in] Function invoked for every item in a list.
+               See the cl_pfn_list_apply_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_list_apply_func invokes the specified callback function for every
+       object stored in the list, starting from the head.  The function specified
+       by the pfn_func parameter must not perform any list operations as these
+       would corrupt the list.
+
+

SEE ALSO

+
       List, cl_list_find_from_head, cl_list_find_from_tail,
+       cl_pfn_list_apply_t
+
+
+
+ +

[Functions] +Component Library: List/cl_list_construct

+ +

[top][index]

+

NAME

+
       cl_list_construct
+
+

DESCRIPTION

+
       The cl_list_construct function constructs a list.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_list_construct(
+        IN      cl_list_t* const        p_list );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to cl_list_t object whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_list_init, cl_list_destroy and cl_is_list_inited.
+
+       Calling cl_list_construct is a prerequisite to calling any other
+       list function except cl_list_init.
+
+

SEE ALSO

+
       List, cl_list_init, cl_list_destroy, cl_is_list_inited
+
+
+
+ +

[Functions] +Component Library: List/cl_list_count

+ +

[top][index]

+

NAME

+
       cl_list_count
+
+

DESCRIPTION

+
       The cl_list_count function returns the number of objects stored in a list.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_list_count(
+        IN      const cl_list_t* const  p_list )
+{
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        return( cl_qlist_count( &p_list->list ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure whose object to count.
+
+ RETURN VALUES
+       Number of objects stored in the specified list.
+
+

SEE ALSO

+
       List
+
+
+
+ +

[Functions] +Component Library: List/cl_list_destroy

+ +

[top][index]

+

NAME

+
       cl_list_destroy
+
+

DESCRIPTION

+
       The cl_list_destroy function destroys a list.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_list_destroy(
+        IN      cl_list_t* const        p_list );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to cl_list_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_list_destroy does not affect any of the objects stored in the list,
+       but does release all memory allocated internally.  Further operations
+       should not be attempted on the list after cl_list_destroy is invoked.
+
+       This function should only be called after a call to cl_list_construct
+       or cl_list_init.
+
+       In debug builds, cl_list_destroy asserts if the list is not empty.
+
+

SEE ALSO

+
       List, cl_list_construct, cl_list_init
+
+
+
+ +

[Functions] +Component Library: List/cl_list_end

+ +

[top][index]

+

NAME

+
       cl_list_end
+
+

DESCRIPTION

+
       The cl_list_end function returns returns the list iterator for
+       the end of a list.
+
+

SYNOPSIS

+
CL_INLINE const cl_list_iterator_t CL_API
+cl_list_end(
+        IN      const cl_list_t* const  p_list )
+{
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        return( cl_qlist_end( &p_list->list ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure for which the iterator for the
+               object at the head is to be returned.
+
+

RETURN VALUE

+
       cl_list_iterator_t for the end of the list.
+
+

NOTES

+
       Use cl_list_obj to retrieve the object associated with the
+       returned cl_list_iterator_t.
+
+

SEE ALSO

+
       List, cl_list_head, cl_list_tail, cl_list_next, cl_list_prev,
+       cl_list_obj
+
+
+
+ +

[Functions] +Component Library: List/cl_list_find_from_head

+ +

[top][index]

+

NAME

+
       cl_list_find_from_head
+
+

DESCRIPTION

+
       The cl_list_find_from_head function uses a specified function
+       to search for an object starting from the head of a list.
+
+

SYNOPSIS

+
CL_EXPORT const cl_list_iterator_t CL_API
+cl_list_find_from_head(
+        IN      const cl_list_t* const  p_list,
+        IN      cl_pfn_list_find_t              pfn_func,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure to search.
+
+       pfn_func
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_list_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+ RETURN VALUES
+       Returns the iterator for the object if found.
+
+       Returns the iterator for the list end otherwise.
+
+

NOTES

+
       cl_list_find_from_head does not remove the found object from
+       the list.  The iterator for the object is returned when the function
+       provided by the pfn_func parameter returns CL_SUCCESS.  The function
+       specified by the pfn_func parameter must not perform any list
+       operations as these would corrupt the list.
+
+

SEE ALSO

+
       List, cl_list_find_from_tail, cl_list_apply_func,
+       cl_pfn_list_find_t
+
+
+
+ +

[Functions] +Component Library: List/cl_list_find_from_tail

+ +

[top][index]

+

NAME

+
       cl_list_find_from_tail
+
+

DESCRIPTION

+
       The cl_list_find_from_tail function uses a specified function
+       to search for an object starting from the tail of a list.
+
+

SYNOPSIS

+
CL_EXPORT const cl_list_iterator_t CL_API
+cl_list_find_from_tail(
+        IN      const cl_list_t* const  p_list,
+        IN      cl_pfn_list_find_t              pfn_func,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure to search.
+
+       pfn_func
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_list_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+ RETURN VALUES
+       Returns the iterator for the object if found.
+
+       Returns the iterator for the list end otherwise.
+
+

NOTES

+
       cl_list_find_from_tail does not remove the found object from
+       the list.  The iterator for the object is returned when the function
+       provided by the pfn_func parameter returns CL_SUCCESS.  The function
+       specified by the pfn_func parameter must not perform any list
+       operations as these would corrupt the list.
+
+

SEE ALSO

+
       List, cl_list_find_from_head, cl_list_apply_func,
+       cl_pfn_list_find_t
+
+
+
+ +

[Functions] +Component Library: List/cl_list_head

+ +

[top][index]

+

NAME

+
       cl_list_head
+
+

DESCRIPTION

+
       The cl_list_head function returns returns a list iterator for
+       the head of a list.
+
+

SYNOPSIS

+
CL_INLINE const cl_list_iterator_t CL_API
+cl_list_head(
+        IN      const cl_list_t* const  p_list )
+{
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        return( cl_qlist_head( &p_list->list ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure for which the iterator for the
+               object at the head is to be returned.
+
+ RETURN VALUES
+       cl_list_iterator_t for the head of the list.
+
+       cl_list_iterator_t for the end of the list if the list is empty.
+
+

NOTES

+
       Use cl_list_obj to retrieve the object associated with the
+       returned cl_list_iterator_t.
+
+

SEE ALSO

+
       List, cl_list_tail, cl_list_next, cl_list_prev, cl_list_end,
+       cl_list_obj
+
+
+
+ +

[Functions] +Component Library: List/cl_list_init

+ +

[top][index]

+

NAME

+
       cl_list_init
+
+

DESCRIPTION

+
       The cl_list_init function initializes a list for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_list_init(
+        IN      cl_list_t* const        p_list,
+        IN      const size_t            min_items );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to cl_list_t structure to initialize.
+
+       min_items
+               [in] Minimum number of items that can be stored.  All necessary
+               allocations to allow storing the minimum number of items is performed
+               at initialization time.
+
+ RETURN VALUES
+       CL_SUCCESS if the list was initialized successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory for initialization.
+
+

NOTES

+
       The list will always be able to store at least as many items as specified
+       by the min_items parameter.
+
+

SEE ALSO

+
       List, cl_list_construct, cl_list_destroy, cl_list_insert_head,
+       cl_list_insert_tail, cl_list_remove_head, cl_list_remove_tail
+
+
+
+ +

[Functions] +Component Library: List/cl_list_insert_array_head

+ +

[top][index]

+

NAME

+
       cl_list_insert_array_head
+
+ DESCRIPTION:
+       The cl_list_insert_array_head function inserts an array of objects
+       at the head of a list.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_list_insert_array_head(
+        IN      cl_list_t* const        p_list,
+        IN      const void* const       p_array,
+        IN      uint32_t                        item_count,
+        IN      const uint32_t          item_size );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure into which to insert the objects.
+
+       p_array
+               [in] Pointer to the first object in an array.
+
+       item_count
+               [in] Number of objects in the array.
+
+       item_size
+               [in] Size of the objects added to the list.  This is the stride in the
+               array from one object to the next.
+
+ RETURN VALUES
+       CL_SUCCESS if the insertion was successful.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion.
+
+

NOTES

+
       Inserts all objects in the array to the head of the list, preserving the
+       ordering of the objects.  If not successful, no items are added.
+       List insertion operations are guaranteed to work for the minimum number
+       of items as specified in cl_list_init by the min_items parameter.
+
+

SEE ALSO

+
       List, cl_list_insert_array_tail, cl_list_insert_head, cl_list_insert_tail,
+       cl_list_insert_prev, cl_list_insert_next
+
+
+
+ +

[Functions] +Component Library: List/cl_list_insert_array_tail

+ +

[top][index]

+

NAME

+
       cl_list_insert_array_tail
+
+

DESCRIPTION

+
       The cl_list_insert_array_tail function inserts an array of objects
+       at the tail of a list.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_list_insert_array_tail(
+        IN      cl_list_t* const        p_list,
+        IN      const void* const       p_array,
+        IN      uint32_t                        item_count,
+        IN      const uint32_t          item_size);
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure into which to insert the objects.
+
+       p_array
+               [in] Pointer to the first object in an array.
+
+       item_count
+               [in] Number of objects in the array.
+
+       item_size
+               [in] Size of the objects added to the list.  This is the stride in the
+               array from one object to the next.
+
+ RETURN VALUES
+       CL_SUCCESS if the insertion was successful.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion.
+
+

NOTES

+
       Inserts all objects in the array to the tail of the list, preserving the
+       ordering of the objects.  If not successful, no items are added.
+       List insertion operations are guaranteed to work for the minimum number
+       of items as specified in cl_list_init by the min_items parameter.
+
+

SEE ALSO

+
       List, cl_list_insert_array_head, cl_list_insert_head, cl_list_insert_tail,
+       cl_list_insert_prev, cl_list_insert_next
+
+
+
+ +

[Functions] +Component Library: List/cl_list_insert_head

+ +

[top][index]

+

NAME

+
       cl_list_insert_head
+
+

DESCRIPTION

+
       The cl_list_insert_head function inserts an object at the head of a list.
+
+

SYNOPSIS

+
CL_INLINE cl_status_t CL_API
+cl_list_insert_head(
+        IN      cl_list_t* const        p_list,
+        IN      const void* const       p_object )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        /* Get a list item to add to the list. */
+        p_pool_obj = (cl_pool_obj_t*)cl_qpool_get( &p_list->list_item_pool );
+        if( !p_pool_obj )
+                return( CL_INSUFFICIENT_MEMORY );
+
+        p_pool_obj->list_obj.p_object = p_object;
+        cl_qlist_insert_head( &p_list->list, &p_pool_obj->list_obj.list_item );
+        return( CL_SUCCESS );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure into which to insert the object.
+
+       p_object
+               [in] Pointer to an object to insert into the list.
+
+ RETURN VALUES
+       CL_SUCCESS if the insertion was successful.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion.
+
+

NOTES

+
       Inserts the specified object at the head of the list.  List insertion
+       operations are guaranteed to work for the minimum number of items as
+       specified in cl_list_init by the min_items parameter.
+
+

SEE ALSO

+
       List, cl_list_insert_tail, cl_list_insert_array_head,
+       cl_list_insert_array_tail, cl_list_insert_prev, cl_list_insert_next,
+       cl_list_remove_head
+
+
+
+ +

[Functions] +Component Library: List/cl_list_insert_next

+ +

[top][index]

+

NAME

+
       cl_list_insert_next
+
+

DESCRIPTION

+
       The cl_list_insert_next function inserts an object in a list after
+       the object associated with a given iterator.
+
+

SYNOPSIS

+
CL_INLINE cl_status_t CL_API
+cl_list_insert_next(
+        IN      cl_list_t* const                        p_list,
+        IN      const cl_list_iterator_t        iterator,
+        IN      const void* const                       p_object )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        /* Get a list item to add to the list. */
+        p_pool_obj = (cl_pool_obj_t*)cl_qpool_get( &p_list->list_item_pool );
+        if( !p_pool_obj )
+                return( CL_INSUFFICIENT_MEMORY );
+
+        p_pool_obj->list_obj.p_object = p_object;
+        cl_qlist_insert_next( &p_list->list, (cl_list_item_t*)iterator,
+                &p_pool_obj->list_obj.list_item );
+        return( CL_SUCCESS );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure into which to insert the object.
+
+       iterator
+               [in] cl_list_iterator_t returned by a previous call to cl_list_head,
+               cl_list_tail, cl_list_next, or cl_list_prev.
+
+       p_object
+               [in] Pointer to an object to insert into the list.
+
+ RETURN VALUES
+       CL_SUCCESS if the insertion was successful.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion.
+
+

SEE ALSO

+
       List, cl_list_insert_prev, cl_list_insert_head, cl_list_insert_tail,
+       cl_list_insert_array_head, cl_list_insert_array_tail
+
+
+
+ +

[Functions] +Component Library: List/cl_list_insert_prev

+ +

[top][index]

+

NAME

+
       cl_list_insert_prev
+
+

DESCRIPTION

+
       The cl_list_insert_prev function inserts an object in a list before
+       the object associated with a given iterator.
+
+

SYNOPSIS

+
CL_INLINE cl_status_t CL_API
+cl_list_insert_prev(
+        IN      cl_list_t* const                        p_list,
+        IN      const cl_list_iterator_t        iterator,
+        IN      const void* const                       p_object )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        /* Get a list item to add to the list. */
+        p_pool_obj = (cl_pool_obj_t*)cl_qpool_get( &p_list->list_item_pool );
+        if( !p_pool_obj )
+                return( CL_INSUFFICIENT_MEMORY );
+
+        p_pool_obj->list_obj.p_object = p_object;
+        cl_qlist_insert_prev( &p_list->list, (cl_list_item_t*)iterator,
+                &p_pool_obj->list_obj.list_item );
+        return( CL_SUCCESS );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure into which to insert the object.
+
+       iterator
+               [in] cl_list_iterator_t returned by a previous call to cl_list_head,
+               cl_list_tail, cl_list_next, or cl_list_prev.
+
+       p_object
+               [in] Pointer to an object to insert into the list.
+
+ RETURN VALUES
+       CL_SUCCESS if the insertion was successful.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion.
+
+

SEE ALSO

+
       List, cl_list_insert_next, cl_list_insert_head, cl_list_insert_tail,
+       cl_list_insert_array_head, cl_list_insert_array_tail
+
+
+
+ +

[Functions] +Component Library: List/cl_list_insert_tail

+ +

[top][index]

+

NAME

+
       cl_list_insert_tail
+
+

DESCRIPTION

+
       The cl_list_insert_tail function inserts an object at the head of a list.
+
+

SYNOPSIS

+
CL_INLINE cl_status_t CL_API
+cl_list_insert_tail(
+        IN      cl_list_t* const        p_list,
+        IN      const void* const       p_object )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        /* Get a list item to add to the list. */
+        p_pool_obj = (cl_pool_obj_t*)cl_qpool_get( &p_list->list_item_pool );
+        if( !p_pool_obj )
+                return( CL_INSUFFICIENT_MEMORY );
+
+        p_pool_obj->list_obj.p_object = p_object;
+        cl_qlist_insert_tail( &p_list->list, &p_pool_obj->list_obj.list_item );
+        return( CL_SUCCESS );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure into which to insert the object.
+
+       p_object
+               [in] Pointer to an object to insert into the list.
+
+ RETURN VALUES
+       CL_SUCCESS if the insertion was successful.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion.
+
+

NOTES

+
       Inserts the specified object at the tail of the list.  List insertion
+       operations are guaranteed to work for the minimum number of items as
+       specified in cl_list_init by the min_items parameter.
+
+

SEE ALSO

+
       List, cl_list_insert_head, cl_list_insert_array_head,
+       cl_list_insert_array_tail, cl_list_insert_prev, cl_list_insert_next,
+       cl_list_remove_tail
+
+
+
+ +

[Definitions] +Component Library: List/cl_list_iterator_t

+ +

[top][index]

+

NAME

+
       cl_list_iterator_t
+
+

DESCRIPTION

+
       Iterator type used to walk a list.
+
+

SYNOPSIS

+
typedef const cl_list_item_t *cl_list_iterator_t;
+
+

NOTES

+
       The iterator should be treated as opaque to prevent corrupting the list.
+
+

SEE ALSO

+
       List, cl_list_head, cl_list_tail, cl_list_next, cl_list_prev,
+       cl_list_obj
+
+
+
+ +

[Functions] +Component Library: List/cl_list_next

+ +

[top][index]

+

NAME

+
       cl_list_next
+
+

DESCRIPTION

+
       The cl_list_next function returns a list iterator for the object stored
+       in a list after the object associated with a given list iterator.
+
+

SYNOPSIS

+
CL_INLINE const cl_list_iterator_t CL_API
+cl_list_next(
+        IN      const cl_list_iterator_t        iterator )
+{
+        CL_ASSERT( iterator );
+
+        return( cl_qlist_next( iterator ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure for which the iterator for the
+               next object is to be returned.
+
+       iterator
+               [in] cl_list_iterator_t returned by a previous call to cl_list_head,
+               cl_list_tail, cl_list_next, or cl_list_prev.
+
+ RETURN VALUES
+       cl_list_iterator_t for the object following the object associated with
+       the list iterator specified by the iterator parameter.
+
+       cl_list_iterator_t for the end of the list if the list is empty.
+
+

NOTES

+
       Use cl_list_obj to retrieve the object associated with the
+       returned cl_list_iterator_t.
+
+

SEE ALSO

+
       List, cl_list_prev, cl_list_head, cl_list_tail, cl_list_end,
+       cl_list_obj
+
+
+
+ +

[Functions] +Component Library: List/cl_list_obj

+ +

[top][index]

+

NAME

+
       cl_list_obj
+
+

DESCRIPTION

+
       The cl_list_obj function returns the object associated
+       with a list iterator.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_list_obj(
+        IN      const cl_list_iterator_t        iterator )
+{
+        CL_ASSERT( iterator );
+
+        return( (void*)((cl_pool_obj_t*)iterator)->list_obj.p_object );
+}
+
+

PARAMETERS

+
       iterator
+               [in] cl_list_iterator_t returned by a previous call to cl_list_head,
+               cl_list_tail, cl_list_next, or cl_list_prev whose object is requested.
+
+

RETURN VALUE

+
       Pointer to the object associated with the list iterator specified
+       by the iterator parameter.
+
+

SEE ALSO

+
       List, cl_list_head, cl_list_tail, cl_list_next, cl_list_prev
+
+
+
+ +

[Functions] +Component Library: List/cl_list_prev

+ +

[top][index]

+

NAME

+
       cl_list_prev
+
+

DESCRIPTION

+
       The cl_list_prev function returns a list iterator for the object stored
+       in a list before the object associated with a given list iterator.
+
+

SYNOPSIS

+
CL_INLINE const cl_list_iterator_t CL_API
+cl_list_prev(
+        IN      const cl_list_iterator_t        iterator )
+{
+        CL_ASSERT( iterator );
+
+        return( cl_qlist_prev( iterator ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure for which the iterator for the
+               next object is to be returned.
+
+       iterator
+               [in] cl_list_iterator_t returned by a previous call to cl_list_head,
+               cl_list_tail, cl_list_next, or cl_list_prev.
+
+ RETURN VALUES
+       cl_list_iterator_t for the object preceding the object associated with
+       the list iterator specified by the iterator parameter.
+
+       cl_list_iterator_t for the end of the list if the list is empty.
+
+

NOTES

+
       Use cl_list_obj to retrieve the object associated with the
+       returned cl_list_iterator_t.
+
+

SEE ALSO

+
       List, cl_list_next, cl_list_head, cl_list_tail, cl_list_end,
+       cl_list_obj
+
+
+
+ +

[Functions] +Component Library: List/cl_list_remove_all

+ +

[top][index]

+

NAME

+
       cl_list_remove_all
+
+

DESCRIPTION

+
       The cl_list_remove_all function removes all objects from a list,
+       leaving it empty.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_list_remove_all(
+        IN      cl_list_t* const        p_list )
+{
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        /* Return all the list items to the pool. */
+        cl_qpool_put_list( &p_list->list_item_pool, &p_list->list );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure from which to remove all objects.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       List, cl_list_remove_head, cl_list_remove_tail, cl_list_remove_object,
+       cl_list_remove_item
+
+
+
+ +

[Functions] +Component Library: List/cl_list_remove_head

+ +

[top][index]

+

NAME

+
       cl_list_remove_head
+
+

DESCRIPTION

+
       The cl_list_remove_head function removes an object from the head of a list.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_list_remove_head(
+        IN      cl_list_t* const        p_list )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        /* See if the list is empty. */
+        if( cl_is_qlist_empty( &p_list->list ) )
+                return( NULL );
+
+        /* Get the item at the head of the list. */
+        p_pool_obj = (cl_pool_obj_t*)cl_qlist_remove_head( &p_list->list );
+
+        /* Place the pool item back into the pool. */
+        cl_qpool_put( &p_list->list_item_pool, (cl_pool_item_t*)p_pool_obj );
+
+        return( (void*)p_pool_obj->list_obj.p_object );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure from which to remove an object.
+
+ RETURN VALUES
+       Returns the pointer to the object formerly at the head of the list.
+
+       NULL if the list was empty.
+
+

SEE ALSO

+
       List, cl_list_remove_tail, cl_list_remove_all, cl_list_remove_object,
+       cl_list_remove_item, cl_list_insert_head
+
+
+
+ +

[Functions] +Component Library: List/cl_list_remove_item

+ +

[top][index]

+

NAME

+
       cl_list_remove_item
+
+

DESCRIPTION

+
       The cl_list_remove_item function removes an object from the head of a list.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_list_remove_item(
+        IN      cl_list_t* const                        p_list,
+        IN      const cl_list_iterator_t        iterator )
+{
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        cl_qlist_remove_item( &p_list->list, (cl_list_item_t*)iterator );
+
+        /* Place the list item back into the pool. */
+        cl_qpool_put( &p_list->list_item_pool, (cl_pool_item_t*)iterator );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure from which to remove the item.
+
+       iterator
+               [in] cl_list_iterator_t returned by a previous call to cl_list_head,
+               cl_list_tail, cl_list_next, or cl_list_prev.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       List, cl_list_remove_object, cl_list_remove_head, cl_list_remove_tail,
+       cl_list_remove_all
+
+
+
+ +

[Functions] +Component Library: List/cl_list_remove_object

+ +

[top][index]

+

NAME

+
       cl_list_remove_object
+
+

DESCRIPTION

+
       The cl_list_remove_object function removes a specific object from a list.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_list_remove_object(
+        IN      cl_list_t* const        p_list,
+        IN      const void* const       p_object );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure from which to remove the object.
+
+       p_object
+               [in] Pointer to an object to remove from the list.
+
+ RETURN VALUES
+       CL_SUCCESS if the object was removed.
+
+       CL_NOT_FOUND if the object was not found in the list.
+
+

NOTES

+
       Removes the first occurrence of an object from a list.
+
+

SEE ALSO

+
       List, cl_list_remove_item, cl_list_remove_head, cl_list_remove_tail,
+       cl_list_remove_all
+
+
+
+ +

[Functions] +Component Library: List/cl_list_remove_tail

+ +

[top][index]

+

NAME

+
       cl_list_remove_tail
+
+

DESCRIPTION

+
       The cl_list_remove_tail function removes an object from the tail of a list.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_list_remove_tail(
+        IN      cl_list_t* const        p_list )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        /* See if the list is empty. */
+        if( cl_is_qlist_empty( &p_list->list ) )
+                return( NULL );
+
+        /* Get the item at the head of the list. */
+        p_pool_obj = (cl_pool_obj_t*)cl_qlist_remove_tail( &p_list->list );
+
+        /* Place the list item back into the pool. */
+        cl_qpool_put( &p_list->list_item_pool, (cl_pool_item_t*)p_pool_obj );
+
+        return( (void*)p_pool_obj->list_obj.p_object );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure from which to remove an object.
+
+ RETURN VALUES
+       Returns the pointer to the object formerly at the tail of the list.
+
+       NULL if the list was empty.
+
+

SEE ALSO

+
       List, cl_list_remove_head, cl_list_remove_all, cl_list_remove_object,
+       cl_list_remove_item, cl_list_insert_head
+
+
+
+ +

[Structures] +Component Library: List/cl_list_t

+ +

[top][index]

+

NAME

+
       cl_list_t
+
+

DESCRIPTION

+
       List structure.
+
+       The cl_list_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_list
+{
+        cl_qlist_t                      list;
+        cl_qpool_t                      list_item_pool;
+
+} cl_list_t;
+
+

FIELDS

+
       list
+               Quick list of items stored in the list.
+
+       list_item_pool
+               Quick pool of list objects for storing objects in the quick list.
+
+

SEE ALSO

+
       List
+
+
+
+ +

[Functions] +Component Library: List/cl_list_tail

+ +

[top][index]

+

NAME

+
       cl_list_tail
+
+

DESCRIPTION

+
       The cl_list_tail function returns returns a list iterator for
+       the tail of a list.
+
+

SYNOPSIS

+
CL_INLINE const cl_list_iterator_t CL_API
+cl_list_tail(
+        IN      const cl_list_t* const  p_list )
+{
+        CL_ASSERT( p_list );
+        CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) );
+
+        return( cl_qlist_tail( &p_list->list ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_list_t structure for which the iterator for the
+               object at the tail is to be returned.
+
+ RETURN VALUES
+       cl_list_iterator_t for the tail of the list.
+
+       cl_list_iterator_t for the end of the list if the list is empty.
+
+

NOTES

+
       Use cl_list_obj to retrieve the object associated with the
+
+       returned cl_list_iterator_t.
+
+

SEE ALSO

+
       List, cl_list_head, cl_list_next, cl_list_prev, cl_list_end,
+       cl_list_obj
+
+
+
+ +

[Definitions] +Component Library: List/cl_pfn_list_apply_t

+ +

[top][index]

+

NAME

+
       cl_pfn_list_apply_t
+
+

DESCRIPTION

+
       The cl_pfn_list_apply_t function type defines the prototype for functions
+       used to iterate objects in a list.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_list_apply_t)(
+        IN      void* const                     p_object,
+        IN      void*                           context );
+
+

PARAMETERS

+
       p_object
+               [in] Pointer to an object stored in a list.
+
+       context
+               [in] Context provided in a call to cl_list_apply_func.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_list_apply_func
+       function.
+
+

SEE ALSO

+
       List, cl_list_apply_func
+
+
+
+ +

[Definitions] +Component Library: List/cl_pfn_list_find_t

+ +

[top][index]

+

NAME

+
       cl_pfn_list_find_t
+
+

DESCRIPTION

+
       The cl_pfn_list_find_t function type defines the prototype for functions
+       used to find objects in a list.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_list_find_t)(
+        IN      const void* const       p_object,
+        IN      void*                           context );
+
+

PARAMETERS

+
       p_object
+               [in] Pointer to an object stored in a list.
+
+       context
+               [in] Context provided in a call to ListFindFromHead or ListFindFromTail.
+
+ RETURN VALUES
+       Return CL_SUCCESS if the desired item was found.  This stops list iteration.
+
+       Return CL_NOT_FOUND to continue the list iteration.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_list_find_from_head
+       and cl_list_find_from_tail functions.
+
+

SEE ALSO

+
       List, cl_list_find_from_head, cl_list_find_from_tail
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_log_h.html b/branches/Ndi/docs/complib/cl_log_h.html new file mode 100644 index 00000000..7c3dffaf --- /dev/null +++ b/branches/Ndi/docs/complib/cl_log_h.html @@ -0,0 +1,117 @@ + + + + +./inc_docs/complib/cl_log_h.html + + + + +Generated from ./inc/complib/cl_log.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+
+ +

[Modules] +Component Library/Log Provider

+ +

[top][parent][index]

+

NAME

+
       Log Provider
+
+

DESCRIPTION

+
       The log provider allows users to log information in a system log instead of
+       the console or debugger target.
+
+
+
+ +

[Functions] +Component Library: Log Provider/cl_log_event

+ +

[top][index]

+

NAME

+
       cl_log_event
+
+

DESCRIPTION

+
       The cl_log_event function adds a new entry to the system log.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_log_event(
+        IN      const char* const       name,
+        IN      const cl_log_type_t     type,
+        IN      const char* const       message,
+        IN      const void* const       p_data OPTIONAL,
+        IN      const uint32_t          data_len );
+
+

PARAMETERS

+
       name
+               [in] Pointer to an ANSI string containing the name of the source for
+               the log entry.
+
+       type
+               [in] Defines the type of log entry to add to the system log.
+               See the definition of cl_log_type_t for acceptable values.
+
+       message
+               [in] Pointer to an ANSI string containing the text for the log entry.
+               The message should not be terminated with a new line, as the log
+               provider appends a new line to all log entries.
+
+       p_data
+               [in] Optional pointer to data providing context for the log entry.
+               At most 256 bytes of data can be successfully logged.
+
+       data_len
+               [in] Length of the buffer pointed to by the p_data parameter.  Ignored
+               if p_data is NULL.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       If the data length exceeds the maximum supported, the event is logged
+       without its accompanying data.
+
+

SEE ALSO

+
       Log Provider, cl_log_type_t
+
+
+
+ +

[Definitions] +Component Library: Log Provider/cl_log_type_t

+ +

[top][index]

+

NAME

+
       cl_log_type_t
+
+

DESCRIPTION

+
       The cl_log_type_t enumerated type is used to differentiate between
+       different types of log entries.
+
+

SYNOPSIS

+
typedef enum _cl_log_type
+{
+        CL_LOG_INFO,
+        CL_LOG_WARN,
+        CL_LOG_ERROR
+
+} cl_log_type_t;
+
+

VALUES

+
       CL_LOG_INFO
+               Indicates a log entry is purely informational.
+
+       CL_LOG_WARN
+               Indicates a log entry is a warning but non-fatal.
+
+       CL_LOG_ERROR
+               Indicates a log entry is a fatal error.
+
+

SEE ALSO

+
       Log Provider, cl_log_event
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_map_h.html b/branches/Ndi/docs/complib/cl_map_h.html new file mode 100644 index 00000000..2845187a --- /dev/null +++ b/branches/Ndi/docs/complib/cl_map_h.html @@ -0,0 +1,898 @@ + + + + +./inc_docs/complib/cl_map_h.html + + + + +Generated from ./inc/complib/cl_map.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Map

+ +

[top][parent][index]

+

NAME

+
       Map
+
+

DESCRIPTION

+
       Map implements a binary tree that stores user objects.  Each item stored
+       in a map has a unique 64-bit key (duplicates are not allowed).  Map
+       provides the ability to efficiently search for an item given a key.
+
+       Map may allocate memory when inserting objects, and can therefore fail
+       operations due to insufficient memory.  Use quick map in situations where
+       such insertion failures cannot be tolerated.
+
+       Map is not thread safe, and users must provide serialization when adding
+       and removing items from the map.
+
+       The map functions operates on a cl_map_t structure which should be treated
+       as opaque and should be manipulated only through the provided functions.
+
+

SEE ALSO

+
       Types:
+               cl_map_iterator_t
+
+       Structures:
+               cl_map_t, cl_map_item_t, cl_map_obj_t
+
+       Item Manipulation:
+               cl_map_obj, cl_map_key
+
+       Initialization:
+               cl_map_construct, cl_map_init, cl_map_destroy
+
+       Iteration:
+               cl_map_end, cl_map_head, cl_map_tail, cl_map_next, cl_map_prev
+
+       Manipulation
+               cl_map_insert, cl_map_get, cl_map_remove_item, cl_map_remove,
+               cl_map_remove_all, cl_map_merge, cl_map_delta
+
+       Attributes:
+               cl_map_count, cl_is_map_empty, cl_is_map_inited
+
+
+
+ +

[Functions] +Component Library: Event/cl_is_map_inited

+ +

[top][index]

+

NAME

+
       cl_is_map_inited
+
+

DESCRIPTION

+
       The cl_is_map_inited function returns whether a map was
+       successfully initialized.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_map_inited(
+        IN      const cl_map_t* const   p_map )
+{
+        /*
+         * The map's pool of map items is the last thing initialized.
+         * We can therefore use it to test for initialization.
+         */
+        return( cl_is_qpool_inited( &p_map->pool ) );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_map_t structure whose initialization state
+               to check.
+
+ RETURN VALUES
+       TRUE if the map was initialized successfully.
+
+       FALSE otherwise.
+
+

NOTES

+
       Allows checking the state of a map to determine if invoking
+       member functions is appropriate.
+
+

SEE ALSO

+
       Map
+
+
+
+ +

[Functions] +Component Library: Map/cl_is_map_empty

+ +

[top][index]

+

NAME

+
       cl_is_map_empty
+
+

DESCRIPTION

+
       The cl_is_map_empty function returns whether a map is empty.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_map_empty(
+        IN      const cl_map_t* const   p_map )
+{
+        CL_ASSERT( p_map );
+        return( cl_is_qmap_empty( &p_map->qmap ) );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a map to test for emptiness.
+
+ RETURN VALUES
+       TRUE if the map is empty.
+
+       FALSE otherwise.
+
+

SEE ALSO

+
       Map, cl_map_count, cl_map_remove_all
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_construct

+ +

[top][index]

+

NAME

+
       cl_map_construct
+
+

DESCRIPTION

+
       The cl_map_construct function constructs a map.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_map_construct(
+        IN      cl_map_t* const p_map );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_map_t structure to construct.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_map_init, cl_map_destroy, and cl_is_map_inited.
+
+       Calling cl_map_construct is a prerequisite to calling any other
+       map function except cl_map_init.
+
+

SEE ALSO

+
       Map, cl_map_init, cl_map_destroy, cl_is_map_inited
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_count

+ +

[top][index]

+

NAME

+
       cl_map_count
+
+

DESCRIPTION

+
       The cl_map_count function returns the number of items stored
+       in a map.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_map_count(
+        IN      const cl_map_t* const   p_map )
+{
+        CL_ASSERT( p_map );
+        return( cl_qmap_count( &p_map->qmap ) );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a map whose item count to return.
+
+

RETURN VALUE

+
       Returns the number of items stored in the map.
+
+

SEE ALSO

+
       Map, cl_is_map_empty
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_delta

+ +

[top][index]

+

NAME

+
       cl_map_delta
+
+

DESCRIPTION

+
       The cl_map_delta function computes the differences between two maps.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_map_delta(
+        IN OUT  cl_map_t* const p_map1,
+        IN OUT  cl_map_t* const p_map2,
+        OUT             cl_map_t* const p_new,
+        OUT             cl_map_t* const p_old );
+
+

PARAMETERS

+
       p_map1
+               [in/out] Pointer to the first of two cl_map_t structures whose
+               differences to compute.
+
+       p_map2
+               [in/out] Pointer to the second of two cl_map_t structures whose
+               differences to compute.
+
+       p_new
+               [out] Pointer to an empty cl_map_t structure that contains the items
+               unique to p_map2 upon return from the function.
+
+       p_old
+               [out] Pointer to an empty cl_map_t structure that contains the items
+               unique to p_map1 upon return from the function.
+
+ RETURN VALUES
+       CL_SUCCESS if the operation succeeded.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory for the operation
+       to succeed.
+
+

NOTES

+
       Items are evaluated based on their keys.  Items that exist in both
+       p_map1 and p_map2 remain in their respective maps.  Items that
+       exist only p_map1 are moved to p_old.  Likewise, items that exist only
+       in p_map2 are moved to p_new.  This function can be usefull in evaluating
+       changes between two maps.
+
+       Both maps pointed to by p_new and p_old must be empty on input.
+
+       Upon failure, all input maps are restored to their original state.
+
+

SEE ALSO

+
       Map, cl_map_merge
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_destroy

+ +

[top][index]

+

NAME

+
       cl_map_destroy
+
+

DESCRIPTION

+
       The cl_map_destroy function destroys a map.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_map_destroy(
+        IN      cl_map_t* const p_map );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a map to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Performs any necessary cleanup of the specified map. Further
+       operations should not be attempted on the map. cl_map_destroy does
+       not affect any of the objects stored in the map.
+       This function should only be called after a call to cl_map_construct.
+
+       In debug builds, cl_map_destroy asserts that the map is empty.
+
+

SEE ALSO

+
       Map, cl_map_construct, cl_map_init
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_end

+ +

[top][index]

+

NAME

+
       cl_map_end
+
+

DESCRIPTION

+
       The cl_map_end function returns the iterator for the end of a map.
+
+

SYNOPSIS

+
CL_INLINE const cl_map_iterator_t CL_API
+cl_map_end(
+        IN      const cl_map_t* const   p_map )
+{
+        CL_ASSERT( p_map );
+        return( cl_qmap_end( &p_map->qmap ) );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_map_t structure whose end to return.
+
+

RETURN VALUE

+
       Iterator for the end of the map.
+
+

NOTES

+
       cl_map_end is useful for determining the validity of map items returned
+       by cl_map_head, cl_map_tail, cl_map_next, cl_map_prev.  If the iterator
+       by any of these functions compares to the end, the end of the map was
+       encoutered.
+       When using cl_map_head or cl_map_tail, this condition indicates that
+       the map is empty.
+
+

SEE ALSO

+
       Map, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_prev
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_get

+ +

[top][index]

+

NAME

+
       cl_map_get
+
+

DESCRIPTION

+
       The cl_map_get function returns the object associated with a key.
+
+

SYNOPSIS

+
CL_EXPORT void* CL_API
+cl_map_get(
+        IN      const cl_map_t* const   p_map,
+        IN      const uint64_t                  key );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a map from which to retrieve the object with
+               the specified key.
+
+       key
+               [in] Key value used to search for the desired object.
+
+ RETURN VALUES
+       Pointer to the object with the desired key value.
+
+       NULL if there was no item with the desired key value stored in
+       the map.
+
+

NOTES

+
       cl_map_get does not remove the item from the map.
+
+

SEE ALSO

+
       Map, cl_map_remove
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_head

+ +

[top][index]

+

NAME

+
       cl_map_head
+
+

DESCRIPTION

+
       The cl_map_head function returns the map item with the lowest key
+       value stored in a map.
+
+

SYNOPSIS

+
CL_INLINE cl_map_iterator_t CL_API
+cl_map_head(
+        IN      const cl_map_t* const   p_map )
+{
+        CL_ASSERT( p_map );
+        return( cl_qmap_head( &p_map->qmap ) );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a map whose item with the lowest key is returned.
+
+ RETURN VALUES
+       Iterator for the object with the lowest key in the map.
+
+       Iterator for the map end if the map was empty.
+
+

NOTES

+
       cl_map_head does not remove the object from the map.
+
+

SEE ALSO

+
       Map, cl_map_tail, cl_map_next, cl_map_prev, cl_map_end
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_init

+ +

[top][index]

+

NAME

+
       cl_map_init
+
+

DESCRIPTION

+
       The cl_map_init function initialized a map for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_map_init(
+        IN      cl_map_t* const p_map,
+        IN      const size_t    min_items );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_map_t structure to initialize.
+
+       min_items
+               [in] Minimum number of items that can be stored.  All necessary
+               allocations to allow storing the minimum number of items is performed
+               at initialization time.
+
+ RETURN VALUES
+       CL_SUCCESS if the map was initialized successfully.
+
+

NOTES

+
       Allows calling map manipulation functions.
+
+

SEE ALSO

+
       Map, cl_map_destroy, cl_map_insert, cl_map_remove
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_insert

+ +

[top][index]

+

NAME

+
       cl_map_insert
+
+

DESCRIPTION

+
       The cl_map_insert function inserts a map item into a map.
+
+

SYNOPSIS

+
CL_EXPORT void* CL_API
+cl_map_insert(
+        IN      cl_map_t* const         p_map,
+        IN      const uint64_t          key,
+        IN      const void* const       p_object );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a map into which to add the item.
+
+       key
+               [in] Value to associate with the object.
+
+       p_object
+               [in] Pointer to an object to insert into the map.
+
+ RETURN VALUES
+       Pointer to the object in the map with the specified key after the call
+       completes.
+
+       NULL if there was not enough memory to insert the desired item.
+
+

NOTES

+
       Insertion operations may cause the map to rebalance.
+
+       If the map already contains an object already with the specified key,
+       that object will not be replaced and the pointer to that object is
+       returned.
+
+

SEE ALSO

+
       Map, cl_map_remove, cl_map_item_t
+
+
+
+ +

[Definitions] +Component Library: Map/cl_map_iterator_t

+ +

[top][index]

+

NAME

+
       cl_map_iterator_t
+
+

DESCRIPTION

+
       Iterator type used to walk a map.
+
+

SYNOPSIS

+
typedef const cl_map_item_t *cl_map_iterator_t;
+
+

NOTES

+
       The iterator should be treated as opaque to prevent corrupting the map.
+
+

SEE ALSO

+
       Map, cl_map_head, cl_map_tail, cl_map_next, cl_map_prev, cl_map_key
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_key

+ +

[top][index]

+

NAME

+
       cl_map_key
+
+

DESCRIPTION

+
       The cl_map_key function retrieves the key value of a map item.
+
+

SYNOPSIS

+
CL_INLINE uint64_t CL_API
+cl_map_key(
+        IN      const cl_map_iterator_t itor )
+{
+        return( cl_qmap_key( itor ) );
+}
+
+

PARAMETERS

+
       itor
+               [in] Iterator for the item whose key to return.
+
+

RETURN VALUE

+
       Returns the 64-bit key value for the specified iterator.
+
+

NOTES

+
       The iterator specified by the itor parameter must have been retrived by
+       a previous call to cl_map_head, cl_map_tail, cl_map_next, or cl_map_prev.
+
+       The key value is set in a call to cl_map_insert.
+
+

SEE ALSO

+
       Map, cl_map_insert, cl_map_head, cl_map_tail, cl_map_next, cl_map_prev
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_merge

+ +

[top][index]

+

NAME

+
       cl_map_merge
+
+

DESCRIPTION

+
       The cl_map_merge function moves all items from one map to another,
+       excluding duplicates.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_map_merge(
+        OUT             cl_map_t* const p_dest_map,
+        IN OUT  cl_map_t* const p_src_map );
+
+

PARAMETERS

+
       p_dest_map
+               [out] Pointer to a cl_map_t structure to which items should be added.
+
+       p_src_map
+               [in/out] Pointer to a cl_map_t structure whose items to add
+               to p_dest_map.
+
+ RETURN VALUES
+       CL_SUCCESS if the operation succeeded.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory for the operation
+       to succeed.
+
+

NOTES

+
       Items are evaluated based on their keys only.
+
+       Upon return from cl_map_merge, the map referenced by p_src_map contains
+       all duplicate items.
+
+

SEE ALSO

+
       Map, cl_map_delta
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_next

+ +

[top][index]

+

NAME

+
       cl_map_next
+
+

DESCRIPTION

+
       The cl_map_next function returns the map item with the next higher
+       key value than a specified map item.
+
+

SYNOPSIS

+
CL_INLINE cl_map_iterator_t CL_API
+cl_map_next(
+        IN      const cl_map_iterator_t itor )
+{
+        CL_ASSERT( itor );
+        return( cl_qmap_next( itor ) );
+}
+
+

PARAMETERS

+
       itor
+               [in] Iterator for an object in a map whose successor to return.
+
+ RETURN VALUES
+       Iterator for the object with the next higher key value in a map.
+
+       Iterator for the map end if the specified object was the last item in
+       the map.
+
+

NOTES

+
       The iterator must have been retrieved by a previous call to cl_map_head,
+       cl_map_tail, cl_map_next, or cl_map_prev.
+
+

SEE ALSO

+
       Map, cl_map_head, cl_map_tail, cl_map_prev, cl_map_end
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_obj

+ +

[top][index]

+

NAME

+
       cl_map_obj
+
+

DESCRIPTION

+
       The cl_map_obj function returns the object associated with an iterator.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_map_obj(
+        IN      const cl_map_iterator_t itor )
+{
+        return( cl_qmap_obj( PARENT_STRUCT( itor, cl_map_obj_t, item ) ) );
+}
+
+

PARAMETERS

+
       itor
+               [in] Iterator whose object to return.
+
+ RETURN VALUES
+       Returns the value of the object pointer associated with the iterator.
+
+       The iterator must have been retrieved by a previous call to cl_map_head,
+       cl_map_tail, cl_map_next, or cl_map_prev.
+
+

SEE ALSO

+
       Map, cl_map_head, cl_map_tail, cl_map_next, cl_map_prev
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_prev

+ +

[top][index]

+

NAME

+
       cl_map_prev
+
+

DESCRIPTION

+
       The cl_map_prev function returns the map item with the next lower
+       key value than a precified map item.
+
+

SYNOPSIS

+
CL_INLINE cl_map_iterator_t CL_API
+cl_map_prev(
+        IN      const cl_map_iterator_t itor )
+{
+        CL_ASSERT( itor );
+        return( cl_qmap_prev( itor ) );
+}
+
+

PARAMETERS

+
       itor
+               [in] Iterator for an object in a map whose predecessor to return.
+
+ RETURN VALUES
+       Iterator for the object with the next lower key value in a map.
+
+       Iterator for the map end if the specified object was the first item in
+       the map.
+
+

NOTES

+
       The iterator must have been retrieved by a previous call to cl_map_head,
+       cl_map_tail, cl_map_next, or cl_map_prev.
+
+

SEE ALSO

+
       Map, cl_map_head, cl_map_tail, cl_map_next, cl_map_end
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_remove

+ +

[top][index]

+

NAME

+
       cl_map_remove
+
+

DESCRIPTION

+
       The cl_map_remove function removes the map item with the specified key
+       from a map.
+
+

SYNOPSIS

+
CL_EXPORT void* CL_API
+cl_map_remove(
+        IN      cl_map_t* const p_map,
+        IN      const uint64_t  key );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_map_t structure from which to remove the item
+               with the specified key.
+
+       key
+               [in] Key value used to search for the object to remove.
+
+ RETURN VALUES
+       Pointer to the object associated with the specified key if
+       it was found and removed.
+
+       NULL if no object with the specified key exists in the map.
+
+

SEE ALSO

+
       Map, cl_map_remove_item, cl_map_remove_all, cl_map_insert
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_remove_all

+ +

[top][index]

+

NAME

+
       cl_map_remove_all
+
+

DESCRIPTION

+
       The cl_map_remove_all function removes all objects from a map,
+       leaving it empty.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_map_remove_all(
+        IN      cl_map_t* const p_map );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a map to empty.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Map, cl_map_remove, cl_map_remove_item
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_remove_item

+ +

[top][index]

+

NAME

+
       cl_map_remove_item
+
+

DESCRIPTION

+
       The cl_map_remove_item function removes the specified map item
+       from a map.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_map_remove_item(
+        IN      cl_map_t* const                 p_map,
+        IN      const cl_map_iterator_t itor );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a map from which to remove the object associated with
+               the specified iterator.
+
+       itor
+               [in] Iterator for an object to remove from its map.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Removes the object associated with the specifid iterator from its map.
+
+       The specified iterator is no longer valid after the call completes.
+
+       The iterator must have been retrieved by a previous call to cl_map_head,
+       cl_map_tail, cl_map_next, or cl_map_prev.
+
+

SEE ALSO

+
       Map, cl_map_remove, cl_map_remove_all, cl_map_insert, cl_map_head,
+       cl_map_tail, cl_map_next, cl_map_prev
+
+
+
+ +

[Structures] +Component Library: Map/cl_map_t

+ +

[top][index]

+

NAME

+
       cl_map_t
+
+

DESCRIPTION

+
       Quick map structure.
+
+       The cl_map_t structure should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_map
+{
+        cl_qmap_t       qmap;
+        cl_qpool_t      pool;
+
+} cl_map_t;
+
+

FIELDS

+
       qmap
+               Quick map object that maintains the map.
+
+       pool
+               Pool of cl_map_obj_t structures used to store user objects
+               in the map.
+
+

SEE ALSO

+
       Map, cl_map_obj_t
+
+
+
+ +

[Functions] +Component Library: Map/cl_map_tail

+ +

[top][index]

+

NAME

+
       cl_map_tail
+
+

DESCRIPTION

+
       The cl_map_tail function returns the map item with the highest key
+       value stored in a map.
+
+

SYNOPSIS

+
CL_INLINE cl_map_iterator_t CL_API
+cl_map_tail(
+        IN      const cl_map_t* const   p_map )
+{
+        CL_ASSERT( p_map );
+        return( cl_qmap_tail( &p_map->qmap ) );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a map whose item with the highest key
+               is returned.
+
+ RETURN VALUES
+       Iterator for the object with the highest key in the map.
+
+       Iterator for the map end if the map was empty.
+
+

NOTES

+
       cl_map_end does no remove the object from the map.
+
+

SEE ALSO

+
       Map, cl_map_head, cl_map_next, cl_map_prev, cl_map_end
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_math_h.html b/branches/Ndi/docs/complib/cl_math_h.html new file mode 100644 index 00000000..7dc206d8 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_math_h.html @@ -0,0 +1,103 @@ + + + + +./inc_docs/complib/cl_math_h.html + + + + +Generated from ./inc/complib/cl_math.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Definitions] +Component Library: Math/MAX

+ +

[top][index]

+

NAME

+
       MAX
+
+

DESCRIPTION

+
       The MAX macro returns the greater of two values.
+
+

SYNOPSIS

+
*       MAX( x, y );
+
+

PARAMETERS

+
       x
+               [in] First of two values to compare.
+
+       y
+               [in] Second of two values to compare.
+
+

RETURN VALUE

+
       Returns the greater of the x and y parameters.
+
+

SEE ALSO

+
       MIN, ROUNDUP
+
+
+
+ +

[Definitions] +Component Library: Math/MIN

+ +

[top][index]

+

NAME

+
       MIN
+
+

DESCRIPTION

+
       The MIN macro returns the greater of two values.
+
+

SYNOPSIS

+
*       MIN( x, y );
+
+

PARAMETERS

+
       x
+               [in] First of two values to compare.
+
+       y
+               [in] Second of two values to compare.
+
+

RETURN VALUE

+
       Returns the lesser of the x and y parameters.
+
+

SEE ALSO

+
       MAX, ROUNDUP
+
+
+
+ +

[Definitions] +Component Library: Math/ROUNDUP

+ +

[top][index]

+

NAME

+
       ROUNDUP
+
+

DESCRIPTION

+
       The ROUNDUP macro rounds a value up to a given multiple.
+
+

SYNOPSIS

+
*       ROUNDUP( val, align );
+
+

PARAMETERS

+
       val
+               [in] Value that is to be rounded up. The type of the value is
+               indeterminate, but must be at most the size of a natural integer
+               for the platform.
+
+       align
+               [in] Multiple to which the val parameter must be rounded up.
+
+

RETURN VALUE

+
       Returns a value that is the input value specified by val rounded up to
+       the nearest multiple of align.
+
+

NOTES

+
       The value provided must be of a type at most the size of a natural integer.
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_memory_h.html b/branches/Ndi/docs/complib/cl_memory_h.html new file mode 100644 index 00000000..cd6a1ca0 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_memory_h.html @@ -0,0 +1,629 @@ + + + + +./inc_docs/complib/cl_memory_h.html + + + + +Generated from ./inc/complib/cl_memory.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Public/Memory Management

+ +

[top][index]

+

NAME

+
       Memory Management
+
+

DESCRIPTION

+
       The memory management functionality provides memory manipulation
+       functions as well as powerful debugging tools.
+
+       The Allocation Tracking functionality provides a means for tracking memory
+       allocations in order to detect memory leaks.
+
+       Memory allocation tracking stores the file name and line number where
+       allocations occur. Gathering this information does have an adverse impact
+       on performance, and memory tracking should therefore not be enabled in
+       release builds of software.
+
+       Memory tracking is compiled into the debug version of the library,
+       and can be enabled for the release version as well. To Enable memory
+       tracking in a release build of the public layer, users should define
+       the MEM_TRACK_ON keyword for compilation.
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_check_for_read

+ +

[top][index]

+

NAME

+
       cl_check_for_read
+
+

DESCRIPTION

+
       Checks a user-mode virtual address for read access.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_check_for_read(
+        IN      const void* const       vaddr,
+        IN      const size_t            count );
+
+

PARAMETERS

+
       vaddr
+               [in] Virtual address to check for read access.
+
+       count
+               [in] Number of bytes of the buffer at the specified address
+               to validate.
+
+ RETURN VALUES
+       CL_SUCCESS if the virtual address is valid for a read of the specified
+       size.
+
+       CL_INVALID_PERMISSION if the virtual address or the size is not valid.
+
+

NOTES

+
       This call is only available in the kernel.  The buffer can only be accessed
+       in the context of the application thread (i.e. in the path of an IOCTL
+       request).  Callers cannot be holding a spinlock when calling this function.
+
+

SEE ALSO

+
       Memory Management, cl_check_for_write, cl_copy_to_user, cl_copy_from_user
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_check_for_write

+ +

[top][index]

+

NAME

+
       cl_check_for_write
+
+

DESCRIPTION

+
       Checks a user-mode virtual address for write access.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_check_for_write(
+        IN      void* const             vaddr,
+        IN      const size_t    count );
+
+

PARAMETERS

+
       vaddr
+               [in] Virtual address to check for write access.
+
+       count
+               [in] Number of bytes of the buffer at the specified
+               address to validate.
+
+ RETURN VALUES
+       CL_SUCCESS if the virtual address is valid for a write of the specified
+       size.
+
+       CL_INVALID_PERMISSION if the virtual address or the size is not valid.
+
+

NOTES

+
       This call is only available in the kernel.  The buffer can only be accessed
+       in the context of the application thread (i.e. in the path of an IOCTL
+       request).  Callers cannot be holding a spinlock when calling this function.
+
+

SEE ALSO

+
       Memory Management, cl_check_for_read, cl_copy_to_user, cl_copy_from_user
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_copy_from_user

+ +

[top][index]

+

NAME

+
       cl_copy_from_user
+
+

DESCRIPTION

+
       Copies data from a user-mode buffer, performing access checks.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_copy_from_user(
+        IN      void* const                     p_dest,
+        IN      const void* const       p_src,
+        IN      const size_t            count );
+
+

PARAMETERS

+
       p_dest
+               [in] Pointer to the buffer being copied to.
+
+       p_src
+               [in] User-mode virtual address from which to copy data.
+
+       count
+               [in] Number of bytes to copy from the source buffer to the
+               destination buffer.
+
+ RETURN VALUES
+       CL_SUCCESS if the user-mode buffer virtual address is valid as the
+       source of the copy.
+
+       CL_INVALID_PERMISSION if the virtual address or the count is not valid.
+
+

NOTES

+
       This call is only available in the kernel.  The buffer can only be accessed
+       in the context of the application thread (i.e. in the path of an IOCTL
+       request).  Callers cannot be holding a spinlock when calling this function.
+
+

SEE ALSO

+
       Memory Management, cl_check_for_read, cl_check_for_write, cl_copy_to_user
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_copy_to_user

+ +

[top][index]

+

NAME

+
       cl_copy_to_user
+
+

DESCRIPTION

+
       Copies data into a user-mode buffer, performing access checks.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_copy_to_user(
+        IN      void* const                     p_dest,
+        IN      const void* const       p_src,
+        IN      const size_t            count );
+
+

PARAMETERS

+
       p_dest
+               [in] User-mode virtual address to which to copy data.
+
+       p_src
+               [in] Pointer to the buffer being copied from.
+
+       count
+               [in] Number of bytes to copy from the source buffer to the
+               destination buffer.
+
+ RETURN VALUES
+       CL_SUCCESS if the user-mode buffer virtual address is valid as the
+       destination of the copy.
+
+       CL_INVALID_PERMISSION if the virtual address or the count is not valid.
+
+

NOTES

+
       This call is only available in the kernel.  The buffer can only be accessed
+       in the context of the application thread (i.e. in the path of an IOCTL
+       request).  Callers cannot be holding a spinlock when calling this function.
+
+

SEE ALSO

+
       Memory Management, cl_check_for_read, cl_check_for_write, cl_copy_from_user
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_free

+ +

[top][index]

+

NAME

+
       cl_free
+
+

DESCRIPTION

+
       The cl_free function deallocates a block of memory.
+
+

SYNOPSIS

+
void
+cl_free(
+        IN      void* const     p_memory );
+
+

PARAMETERS

+
       p_memory
+               [in] Pointer to a memory block.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       The p_memory parameter is the pointer returned by a previous call to
+       cl_malloc, or cl_zalloc.
+
+       cl_free has no effect if p_memory is NULL.
+
+

SEE ALSO

+
       Memory Management, cl_alloc, cl_zalloc
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_get_pagesize

+ +

[top][index]

+

NAME

+
       cl_get_pagesize
+
+

DESCRIPTION

+
       Returns the number of bytes in a OS defined page.
+
+

SYNOPSIS

+
CL_EXPORT uint32_t CL_API
+cl_get_pagesize( void );
+
+

PARAMETERS

+
       NONE
+
+ RETURN VALUES
+       Returns the number of bytes in a page as defined by the Operating
+       System.
+
+

SEE ALSO

+
       Memory Management
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_get_physaddr

+ +

[top][index]

+

NAME

+
       cl_get_physaddr
+
+

DESCRIPTION

+
       Returns the Physical address for a kernel virtual address.
+
+

SYNOPSIS

+
CL_EXPORT uint64_t CL_API
+cl_get_physaddr(
+        IN      void *vaddr );
+
+

PARAMETERS

+
       p_addr
+               [in] Pointer to virtual to which the physical address is required.
+
+ RETURN VALUES
+       Returns the physical address for a virtual address.
+
+

NOTES

+
       This call is only available in kernel mode.
+
+

SEE ALSO

+
       Memory Management
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_malloc

+ +

[top][index]

+

NAME

+
       cl_malloc
+
+

DESCRIPTION

+
       The cl_malloc function allocates a block of memory.
+
+

SYNOPSIS

+
void*
+cl_malloc(
+        IN      const size_t    size );
+
+

PARAMETERS

+
       size
+               [in] Size of the requested allocation.
+
+ RETURN VALUES
+       Pointer to allocated memory if successful.
+
+       NULL otherwise.
+
+

NOTES

+
       Allocated memory follows alignment rules specific to the different
+       environments.
+
+

SEE ALSO

+
       Memory Management, cl_free, cl_zalloc, cl_palloc, cl_pzalloc,
+       cl_memset, cl_memclr, cl_memcpy, cl_memcmp
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_mem_display

+ +

[top][index]

+

NAME

+
       cl_mem_display
+
+

DESCRIPTION

+
       The cl_mem_display function displays all tracked memory allocations to
+       the applicable debugger.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_mem_display( void );
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Each tracked memory allocation is displayed along with the file name and
+       line number that allocated it.
+
+       Output is sent to the platform's debugging target, which may be the
+       system log file.
+
+

SEE ALSO

+
       Memory Management
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_memclr

+ +

[top][index]

+

NAME

+
       cl_memclr
+
+

DESCRIPTION

+
       The cl_memclr function sets every byte in a memory range to zero.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_memclr(
+        IN      void* const             p_memory,
+        IN      const size_t    count )
+{
+        cl_memset( p_memory, 0, count );
+}
+
+

PARAMETERS

+
       p_memory
+               [in] Pointer to a memory block.
+
+       count
+               [in] Number of bytes to set.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Memory Management, cl_memset, cl_memcpy, cl_memcmp
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_memcmp

+ +

[top][index]

+

NAME

+
       cl_memcmp
+
+

DESCRIPTION

+
       The cl_memcmp function compares two memory buffers.
+
+

SYNOPSIS

+
CL_EXPORT int32_t CL_API
+cl_memcmp(
+        IN      const void* const       p_mem,
+        IN      const void* const       p_ref,
+        IN      const size_t            count );
+
+

PARAMETERS

+
       p_mem
+               [in] Pointer to a memory block being compared.
+
+       p_ref
+               [in] Pointer to the reference memory block to compare against.
+
+       count
+               [in] Number of bytes to compare.
+
+ RETURN VALUES
+       Returns less than zero if p_mem is less than p_ref.
+
+       Returns greater than zero if p_mem is greater than p_ref.
+
+       Returns zero if the two memory regions are the identical.
+
+

SEE ALSO

+
       Memory Management, cl_memset, cl_memclr, cl_memcpy
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_memcpy

+ +

[top][index]

+

NAME

+
       cl_memcpy
+
+

DESCRIPTION

+
       The cl_memcpy function copies a given number of bytes from
+       one buffer to another.
+
+

SYNOPSIS

+
CL_EXPORT void* CL_API
+cl_memcpy(
+        IN      void* const                     p_dest,
+        IN      const void* const       p_src,
+        IN      const size_t            count );
+
+

PARAMETERS

+
       p_dest
+               [in] Pointer to the buffer being copied to.
+
+       p_src
+               [in] Pointer to the buffer being copied from.
+
+       count
+               [in] Number of bytes to copy from the source buffer to the
+               destination buffer.
+
+

RETURN VALUE

+
       Returns a pointer to the destination buffer.
+
+

SEE ALSO

+
       Memory Management, cl_memset, cl_memclr, cl_memcmp
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_memset

+ +

[top][index]

+

NAME

+
       cl_memset
+
+

DESCRIPTION

+
       The cl_memset function sets every byte in a memory range to a given value.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_memset(
+        IN      void* const             p_memory,
+        IN      const uint8_t   fill,
+        IN      const size_t    count );
+
+

PARAMETERS

+
       p_memory
+               [in] Pointer to a memory block.
+
+       fill
+               [in] Byte value with which to fill the memory.
+
+       count
+               [in] Number of bytes to set.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Memory Management, cl_memclr, cl_memcpy, cl_memcmp
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_palloc

+ +

[top][index]

+

NAME

+
       cl_palloc
+
+

DESCRIPTION

+
       The cl_palloc function allocates a block of memory from paged pool if the
+       operating system supports it.  If the operating system does not distinguish
+       between pool types, cl_palloc is identical to cl_malloc.
+
+

SYNOPSIS

+
void*
+cl_palloc(
+        IN      const size_t    size );
+
+

PARAMETERS

+
       size
+               [in] Size of the requested allocation.
+
+ RETURN VALUES
+       Pointer to allocated memory if successful.
+
+       NULL otherwise.
+
+

NOTES

+
       Allocated memory follows alignment rules specific to the different
+       environments.
+
+

SEE ALSO

+
       Memory Management, cl_free, cl_malloc, cl_zalloc, cl_pzalloc,
+       cl_memset, cl_memclr, cl_memcpy, cl_memcmp
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_pzalloc

+ +

[top][index]

+

NAME

+
       cl_pzalloc
+
+

DESCRIPTION

+
       The cl_pzalloc function allocates a block of memory from paged pool if the
+       operating system supports it and initializes it to zero.  If the operating
+       system does not distinguish between pool types, cl_pzalloc is identical
+       to cl_zalloc.
+
+

SYNOPSIS

+
void*
+cl_pzalloc(
+        IN      const size_t    size );
+
+

PARAMETERS

+
       size
+               [in] Size of the requested allocation.
+
+ RETURN VALUES
+       Pointer to allocated memory if successful.
+
+       NULL otherwise.
+
+

NOTES

+
       Allocated memory follows alignment rules specific to the different
+       environments.
+
+

SEE ALSO

+
       Memory Management, cl_free, cl_malloc, cl_zalloc, cl_palloc,
+       cl_memset, cl_memclr, cl_memcpy, cl_memcmp
+
+
+
+ +

[Functions] +Component Library: Memory Management/cl_zalloc

+ +

[top][index]

+

NAME

+
       cl_zalloc
+
+

DESCRIPTION

+
       The cl_zalloc function allocates a block of memory initialized to zero.
+
+

SYNOPSIS

+
void*
+cl_zalloc(
+        IN      const size_t    size );
+
+

PARAMETERS

+
       size
+               [in] Size of the requested allocation.
+
+ RETURN VALUES
+       Pointer to allocated memory if successful.
+
+       NULL otherwise.
+
+

NOTES

+
       Allocated memory follows alignment rules specific to the different
+       environments.
+
+

SEE ALSO

+
       Memory Management, cl_free, cl_malloc, cl_palloc, cl_pzalloc,
+       cl_memset, cl_memclr, cl_memcpy, cl_memcmp
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_mutex_h.html b/branches/Ndi/docs/complib/cl_mutex_h.html new file mode 100644 index 00000000..b9bbfdb4 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_mutex_h.html @@ -0,0 +1,207 @@ + + + + +./inc_docs/complib/cl_mutex_h.html + + + + +Generated from ./inc/complib/cl_mutex.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +complib/Mutex

+ +

[top][index]

+

NAME

+
       Mutex
+
+

DESCRIPTION

+
       Mutex provides synchronization between threads for exclusive access to
+       a resource.
+
+       The Mutex functions manipulate a cl_mutex_t structure which should
+       be treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_mutex_t
+
+       Initialization:
+               cl_mutex_construct, cl_mutex_init, cl_mutex_destroy
+
+       Manipulation
+               cl_mutex_acquire, cl_mutex_release
+
+
+
+ +

[Functions] +Component Library: Mutex/cl_mutex_acquire

+ +

[top][index]

+

NAME

+
       cl_mutex_acquire
+
+

DESCRIPTION

+
       The cl_mutex_acquire function acquires a mutex.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_mutex_acquire(
+        IN      cl_mutex_t* const       p_mutex );
+
+

PARAMETERS

+
       p_mutex
+               [in] Pointer to a mutex structure to acquire.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Mutex, cl_mutex_release
+
+
+
+ +

[Functions] +Component Library: Mutex/cl_mutex_construct

+ +

[top][index]

+

NAME

+
       cl_mutex_construct
+
+

DESCRIPTION

+
       The cl_mutex_construct function initializes the state of a
+       mutex.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_mutex_construct(
+        IN      cl_mutex_t* const       p_mutex );
+
+

PARAMETERS

+
       p_mutex
+               [in] Pointer to a mutex structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_semphore_destroy without first calling
+       cl_mutex_init.
+
+       Calling cl_mutex_construct is a prerequisite to calling any other
+       mutex function except cl_mutex_init.
+
+

SEE ALSO

+
       Mutex, cl_semphore_init cl_mutex_destroy
+
+
+
+ +

[Functions] +Component Library: Mutex/cl_mutex_destroy

+ +

[top][index]

+

NAME

+
       cl_mutex_destroy
+
+

DESCRIPTION

+
       The cl_mutex_destroy function performs all necessary cleanup of a
+       mutex.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_mutex_destroy(
+        IN      cl_mutex_t* const       p_mutex );
+
+

PARAMETERS

+
       p_mutex
+               [in] Pointer to a mutex structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Performs any necessary cleanup of a mutex. This function must only
+       be called if either cl_mutex_construct or cl_mutex_init has been
+       called.
+
+

SEE ALSO

+
       Mutex, cl_mutex_construct, cl_mutex_init
+
+
+
+ +

[Functions] +Component Library: Mutex/cl_mutex_init

+ +

[top][index]

+

NAME

+
       cl_mutex_init
+
+

DESCRIPTION

+
       The cl_mutex_init function initializes a mutex for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_mutex_init(
+        IN      cl_mutex_t* const       p_mutex );
+
+

PARAMETERS

+
       p_mutex
+               [in] Pointer to a mutex structure to initialize.
+
+ RETURN VALUES
+       CL_SUCCESS if initialization succeeded.
+
+       CL_ERROR if initialization failed. Callers should call
+       cl_mutex_destroy to clean up any resources allocated during
+       initialization.
+
+

NOTES

+
       Initializes the mutex structure. Allows calling cl_mutex_aquire
+       and cl_mutex_release. The cl_mutex is always created in the unlocked state.
+
+

SEE ALSO

+
       Mutex, cl_mutex_construct, cl_mutex_destroy,
+       cl_mutex_acquire, cl_mutex_release
+
+
+
+ +

[Functions] +Component Library: Mutex/cl_mutex_release

+ +

[top][index]

+

NAME

+
       cl_mutex_release
+
+

DESCRIPTION

+
       The cl_mutex_release function releases a mutex object.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_mutex_release(
+        IN      cl_mutex_t* const       p_mutex );
+
+

PARAMETERS

+
       p_mutex
+               [in] Pointer to a mutex structure to release.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Releases a mutex after a call to cl_mutex_acquire.
+
+

SEE ALSO

+
       Mutex, cl_mutex_acquire
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_obj_h.html b/branches/Ndi/docs/complib/cl_obj_h.html new file mode 100644 index 00000000..9dcd6e3c --- /dev/null +++ b/branches/Ndi/docs/complib/cl_obj_h.html @@ -0,0 +1,997 @@ + + + + +./inc_docs/complib/cl_obj_h.html + + + + +Generated from ./inc/complib/cl_obj.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Object

+ +

[top][parent][index]

+

NAME

+
       Object
+
+

DESCRIPTION

+
       Object describes a basic class that can be used to track accesses to an
+       object and provides automatic cleanup of an object that is dependent
+       on another object.
+
+       Dependencies between objects are described using a relationship.  A
+       child object is considered dependent on a parent object.  Destruction of
+       a parent object automatically results in the destruction of any child
+       objects associated with the parent.
+
+       The relationship between parent and child objects is many to many.
+       Parents can have multiple child objects, and a child can be dependent on
+       multiple parent objects.  In the latter case, destruction of any parent
+       object results in the destruction of the child object.
+
+       Other relationships between objects are described using references.  An
+       object that takes a reference on a second object prevents the second object
+       from being deallocated as long as the reference is held.
+
+

SEE ALSO

+
       Types
+               cl_destroy_type_t
+
+       Structures:
+               cl_obj_t, cl_obj_rel_t
+
+       Callbacks:
+               cl_pfn_obj_call_t
+
+       Initialization/Destruction:
+               cl_obj_mgr_create, cl_obj_mgr_destroy,
+               cl_obj_construct, cl_obj_init, cl_obj_destroy, cl_obj_deinit
+
+       Object Relationships:
+               cl_obj_ref, cl_obj_deref,
+               cl_rel_alloc, cl_rel_free, cl_obj_insert_rel, cl_obj_remove_rel
+
+       Object Manipulation:
+               cl_obj_reset
+
+
+
+ +

[Definitions] +Component Library: Object/cl_destroy_type_t

+ +

[top][index]

+

NAME

+
       cl_destroy_type_t
+
+

DESCRIPTION

+
       Indicates the type of destruction to perform on an object.
+
+

SYNOPSIS

+
typedef enum _cl_destroy_type
+{
+        CL_DESTROY_ASYNC,
+        CL_DESTROY_SYNC
+
+}       cl_destroy_type_t;
+
+

VALUES

+
       CL_DESTROY_ASYNC
+               Indicates that the object should be destroyed asynchronously.  Objects
+               destroyed asynchronously complete initial destruction processing, then
+               return the calling thread.  Once their reference count goes to zero,
+               they are queue onto an asynchronous thread to complete destruction
+               processing.
+
+       CL_DESTROY_SYNC
+               Indicates that the object should be destroyed synchronously.  Objects
+               destroyed synchronously wait (block) until their reference count goes
+               to zero.  Once their reference count goes to zero, destruction
+               processing is completed by the calling thread.
+
+

SEE ALSO

+
       Object, cl_obj_init, cl_obj_destroy, cl_obj_deinit, cl_obj_t
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_construct

+ +

[top][index]

+

NAME

+
       cl_obj_construct
+
+

DESCRIPTION

+
       This routine prepares an object for use.  The object must be successfully
+       initialized before being used.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_obj_construct(
+        IN                              cl_obj_t * const                        p_obj,
+        IN              const   uint32_t                                        obj_type );
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object to construct.
+
+       obj_type
+               [in] A user-specified type associated with the object.  This type
+               is recorded by the object for debugging purposes and may be accessed
+               by the user.
+
+

RETURN VALUE

+
       None.
+
+

NOTES

+
       This call must succeed before invoking any other function on an object.
+
+

SEE ALSO

+
       Object, cl_obj_init, cl_obj_destroy, cl_obj_deinit.
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_deinit

+ +

[top][index]

+

NAME

+
       cl_obj_deinit
+
+

DESCRIPTION

+
       Release all resources allocated by an object.  This routine should
+       typically be called from a user's pfn_free routine.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_obj_deinit(
+        IN                              cl_obj_t * const                        p_obj );
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object to free.
+
+

RETURN VALUE

+
       None.
+
+

NOTES

+
       This call must be invoked to release the object from the global object
+       manager.
+
+

SEE ALSO

+
       Object, cl_obj_construct, cl_obj_init, cl_obj_destroy, cl_obj_t
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_deref

+ +

[top][index]

+

NAME

+
       cl_obj_deref
+
+

DESCRIPTION

+
       Decrements the reference count on an object and returns the updated count.
+       This routine is thread safe, but results in locking the object.
+
+

SYNOPSIS

+
CL_EXPORT int32_t CL_API
+cl_obj_deref(
+        IN                              cl_obj_t * const                        p_obj );
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object to dereference.
+
+

RETURN VALUE

+
       The updated reference count.
+
+

SEE ALSO

+
       Object, cl_obj_t, cl_obj_ref
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_destroy

+ +

[top][index]

+

NAME

+
       cl_obj_destroy
+
+

DESCRIPTION

+
       This routine destroys the specified object.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_obj_destroy(
+        IN                              cl_obj_t *                                      p_obj );
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object to destroy.
+
+

RETURN VALUE

+
       None.
+
+

NOTES

+
       This routine starts the destruction process for the specified object.  For
+       additional information regarding destruction callbacks, see the following
+       fields in cl_obj_t and parameters in cl_obj_init: pfn_destroying,
+       pfn_cleanup, and pfn_free.
+
+       In most cases, after calling this routine, users should call cl_obj_deinit
+       from within their pfn_free callback routine.
+
+

SEE ALSO

+
       Object, cl_obj_construct, cl_obj_init, cl_obj_deinit,
+       cl_obj_t, cl_destroy_type_t, cl_pfn_obj_call_t
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_init

+ +

[top][index]

+

NAME

+
       cl_obj_init
+
+

DESCRIPTION

+
       This routine initializes an object for use.  Upon the successful completion
+       of this call, the object is ready for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_obj_init(
+        IN                              cl_obj_t * const                        p_obj,
+        IN                              cl_destroy_type_t                       destroy_type,
+        IN              const   cl_pfn_obj_call_t                       pfn_destroying OPTIONAL,
+        IN              const   cl_pfn_obj_call_t                       pfn_cleanup OPTIONAL,
+        IN              const   cl_pfn_obj_call_t                       pfn_free );
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object to initialize.
+
+       destroy_type
+               [in] Specifies the destruction model used by this object.
+
+       pfn_destroying
+               [in] User-specified callback invoked to notify a user that an object has
+               been marked for destruction.  This callback is invoked directly from
+               the thread destroying the object and is used to notify a user that
+               a parent object has invoked a child object's destructor.
+
+       pfn_cleanup
+               [in] User-specified callback invoked to an object is undergoing
+               destruction.  For object's destroyed asynchronously, this callback
+               is invoked from the context of the asynchronous destruction thread.
+               Users may block in the context of this thread; however, further
+               destruction processing will not continue until this callback returns.
+
+       pfn_free
+               [in] User-specified callback invoked to notify a user that an object has
+               been destroyed and is ready for deallocation.  Users should either
+               call cl_obj_deinit or cl_obj_reset from within this callback.
+
+

RETURN VALUE

+
       CL_SUCCESS
+               The object was successfully initialized.
+
+       CL_INSUFFICIENT_MEMORY
+               The object could not allocate the necessary memory resources to
+               complete initialization.
+
+

NOTES

+
       The three destruction callbacks are used to notify the user of the progress
+       of the destruction, permitting the user to perform an additional processing.
+       Pfn_destroying is used to notify the user that the object is being
+       destroyed.  It is called after an object has removed itself from
+       relationships with its parents, but before it destroys any child objects
+       that it might have.
+
+       Pfn_cleanup is invoked after all child objects have been destroyed, and
+       there are no more references on the object itself.  For objects destroyed
+       asynchronously, pfn_cleanup is invoked from an asynchronous destruction
+       thread.
+
+       Pfn_free is called to notify the user that the destruction of the object has
+       completed.  All relationships have been removed, and all child objects have
+       been destroyed.  Relationship items (cl_obj_rel_t) that were used to
+       identify parent objects are returned to the user through the p_parent_list
+       field of the cl_obj_t structure.
+
+

SEE ALSO

+
       Object, cl_obj_construct, cl_obj_destroy, cl_obj_deinit,
+       cl_obj_t, cl_destroy_type_t, cl_pfn_obj_call_t,
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_insert_rel

+ +

[top][index]

+

NAME

+
       cl_obj_insert_rel
+
+

DESCRIPTION

+
       Forms a relationship between two objects, with the existence of the child
+       object dependent on the parent.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_obj_insert_rel(
+        IN                              cl_obj_rel_t * const            p_rel,
+        IN                              cl_obj_t * const                        p_parent_obj,
+        IN                              cl_obj_t * const                        p_child_obj );
+
+

PARAMETERS

+
       p_rel
+               [in] A reference to an unused relationship item.
+
+       p_parent_obj
+               [in] A reference to the parent object.
+
+       p_child_obj
+               [in] A reference to the child object.
+
+

RETURN VALUE

+
       None.
+
+

NOTES

+
       This call inserts a relationship between the parent and child object.
+       The relationship allows for the automatic destruction of the child object
+       if the parent is destroyed.
+
+       A given object can have multiple parent and child objects, but the
+       relationships must form into an object tree.  That is, there cannot be any
+       cycles formed through the parent-child relationships.  (For example, an
+       object cannot be both the parent and a child of a second object.)
+
+

SEE ALSO

+
       Object, cl_rel_alloc, cl_rel_free, cl_obj_remove_rel, cl_obj_destroy
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_insert_rel_parent_locked

+ +

[top][index]

+

NAME

+
       cl_obj_insert_rel_parent_locked
+
+

DESCRIPTION

+
       Forms a relationship between two objects, with the existence of the child
+       object dependent on the parent.  The parent's object lock is held.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_obj_insert_rel_parent_locked(
+        IN                              cl_obj_rel_t * const            p_rel,
+        IN                              cl_obj_t * const                        p_parent_obj,
+        IN                              cl_obj_t * const                        p_child_obj );
+
+

PARAMETERS

+
       p_rel
+               [in] A reference to an unused relationship item.
+
+       p_parent_obj
+               [in] A reference to the parent object.
+
+       p_child_obj
+               [in] A reference to the child object.
+
+

RETURN VALUE

+
       None.
+
+

NOTES

+
       This call inserts a relationship between the parent and child object.
+       The relationship allows for the automatic destruction of the child object
+       if the parent is destroyed.
+
+       A given object can have multiple parent and child objects, but the
+       relationships must form into an object tree.  That is, there cannot be any
+       cycles formed through the parent-child relationships.  (For example, an
+       object cannot be both the parent and a child of a second object.)
+
+       This call requires the caller to already hold the parent object's lock.
+
+

SEE ALSO

+
       Object, cl_rel_alloc, cl_rel_free, cl_obj_remove_rel, cl_obj_destroy
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_lock

+ +

[top][index]

+

NAME

+
       cl_obj_lock
+
+

DESCRIPTION

+
       Acquires an object's lock.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_obj_lock(
+        IN                              cl_obj_t * const                        p_obj )
+{
+        CL_ASSERT( p_obj->state == CL_INITIALIZED ||
+                p_obj->state == CL_DESTROYING );
+        cl_spinlock_acquire( &p_obj->lock );
+}
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object whose lock to acquire.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Object, cl_obj_t, cl_obj_unlock
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_mgr_create

+ +

[top][index]

+

NAME

+
       cl_obj_mgr_create
+
+

DESCRIPTION

+
       This routine creates an object manager used to track all objects by
+       the user.  The object manager assists with debugging efforts by identifying
+       objects that are not destroyed properly.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_obj_mgr_create(void);
+
+

PARAMETERS

+
       None.
+
+

RETURN VALUE

+
       CL_SUCCESS
+               The object manager was succesfully created.
+
+       CL_INSUFFICIENT_MEMORY
+               The object manager could not be allocated.
+
+

NOTES

+
       This call must succeed before invoking any other object-related function.
+
+

SEE ALSO

+
       Object, cl_obj_mgr_destroy
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_mgr_destroy

+ +

[top][index]

+

NAME

+
       cl_obj_mgr_destroy
+
+

DESCRIPTION

+
       This routine destroys the object manager created through cl_obj_mgr_create.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_obj_mgr_destroy(void);
+
+

PARAMETERS

+
       None.
+
+

RETURN VALUE

+
       None.
+
+

NOTES

+
       When the object manager is destroyed, it will display information about all
+       objects that have not yet been destroyed.
+
+

SEE ALSO

+
       Object, cl_obj_mgr_create
+
+
+
+ +

[Structures] +Component Library: Object/cl_obj_mgr_t

+ +

[top][index]

+

NAME

+
       cl_obj_mgr_t
+
+

DESCRIPTION

+
       The global object manager.
+
+       The manager must be created before constructing any other objects, and all
+       objects must be destroyed before the object manager is destroyed.
+
+       The manager is used to maintain the list of all objects currently active
+       in the system.  It provides a pool of relationship items used to
+       describe parent-child, or dependent, relationships between two objects.
+       The manager contains an asynchronous processing thread that is used to
+       support asynchronous object destruction.
+
+

SYNOPSIS

+
typedef struct _cl_obj_mgr
+{
+        cl_qlist_t                                      obj_list;
+        cl_spinlock_t                           lock;
+
+        cl_async_proc_t                         async_proc_mgr;
+
+        cl_qpool_t                                      rel_pool;
+
+}       cl_obj_mgr_t;
+
+

FIELDS

+
       obj_list
+               List of all object's in the system.  Object's are inserted into this
+               list when constructed and removed when freed.
+
+       lock
+               A lock used by the object manager for synchronization to the obj_list.
+
+       async_proc_mgr
+               An asynchronous processing manager used to process asynchronous
+               destruction requests.  Users wishing to synchronize the execution of
+               specific routines with object destruction may queue work requests to
+               this processing manager.
+
+       rel_pool
+               Pool of items used to describe dependent relationships.  Users may
+               obtain relationship objects from this pool when forming relationships,
+               but are not required to do so.
+
+

SEE ALSO

+
       Object, cl_obj_mgr_create, cl_obj_mgr_destroy,
+       cl_obj_construct, cl_obj_deinit,
+       cl_qlist_t, cl_spinlock_t, cl_async_proc_t, cl_qpool_t
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_ref

+ +

[top][index]

+

NAME

+
       cl_obj_ref
+
+

DESCRIPTION

+
       Increments the reference count on an object and returns the updated count.
+       This routine is thread safe, but does not result in locking the object.
+
+

SYNOPSIS

+
CL_EXPORT int32_t CL_API
+cl_obj_ref(
+        IN                              cl_obj_t * const                        p_obj );
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object to reference.
+
+

RETURN VALUE

+
       The updated reference count.
+
+

SEE ALSO

+
       Object, cl_obj_t, cl_obj_deref
+
+
+
+ +

[Structures] +Component Library: Object/cl_obj_rel_t

+ +

[top][index]

+

NAME

+
       cl_obj_rel_t
+
+

DESCRIPTION

+
       Identifies a dependent relationship between two objects.
+
+

SYNOPSIS

+
typedef struct _cl_obj_rel
+{
+        cl_pool_item_t                          pool_item;              /* Must be first. */
+        struct _cl_obj                          *p_parent_obj;
+
+        cl_list_item_t                          list_item;
+        struct _cl_obj                          *p_child_obj;
+
+}       cl_obj_rel_t;
+
+

FIELDS

+
       pool_item
+               An item used to store the relationship in a free pool maintained
+               by the object manager.  This field is also used by the parent object
+               to store the relationship in its child_list.
+
+       p_parent_obj
+               A reference to the parent object for the relationship.
+
+       list_item
+               This field is used by the child object to store the relationship in
+               its parent_list.
+
+       p_child_obj
+               A reference to the child object for the relationship.
+
+

NOTES

+
       This structure is used to define all dependent relationships.  Dependent
+       relationships are those where the destruction of a parent object result in
+       the destruction of child objects.  For other types of relationships, simple
+       references between objects may be used.
+
+       Relationship items are stored in lists maintained by both the parent
+       and child objects.  References to both objects exist while the
+       relationship is maintained.  Typically, relationships are defined by
+       the user by calling cl_obj_insert_rel, but are destroyed automatically
+       via an object's destruction process.
+
+

SEE ALSO

+
       Object, cl_rel_alloc, cl_rel_free, cl_obj_insert_rel, cl_obj_remove_rel,
+       cl_obj_destroy
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_remove_rel

+ +

[top][index]

+

NAME

+
       cl_obj_remove_rel
+
+

DESCRIPTION

+
       Manually removes a relationship between two objects.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_obj_remove_rel(
+        IN                              cl_obj_rel_t * const            p_rel );
+
+

PARAMETERS

+
       p_rel
+               [in] A reference to the relationship to remove.
+
+

RETURN VALUE

+
       None.
+
+

NOTES

+
       This routine permits a user to manually remove a dependent relationship
+       between two objects.  When removing a relationship using this call, the
+       user must ensure that objects referenced by the relationship are not
+       destroyed, either directly or indirectly via a parent.
+
+

SEE ALSO

+
       Object, cl_rel_alloc, cl_rel_free, cl_obj_insert_rel, cl_obj_destroy
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_reset

+ +

[top][index]

+

NAME

+
       cl_obj_reset
+
+

DESCRIPTION

+
       Reset an object's state.  This is called after cl_obj_destroy has
+       been called on a object, but before cl_obj_deinit has been invoked.
+       After an object has been reset, it is ready for re-use.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_obj_reset(
+        IN                              cl_obj_t * const                        p_obj );
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object to reset.
+
+

RETURN VALUE

+
       None.
+
+

NOTES

+
       This routine allows an object to be initialized once, then destroyed
+       and re-used multiple times.  This permits the user to allocate and
+       maintain a pool of objects.  The objects may be reset and returned to
+       the pool, rather than freed, after being destroyed.  The objects would
+       not be freed until the pool itself was destroyed.
+
+

SEE ALSO

+
       Object, cl_obj_destroy, cl_obj_free, cl_obj_t
+
+
+
+ +

[Structures] +Component Library: Object/cl_obj_t

+ +

[top][index]

+

NAME

+
       cl_obj_t
+
+

DESCRIPTION

+
       Object structure.
+
+

SYNOPSIS

+
typedef struct _cl_obj
+{
+        cl_pool_item_t                          pool_item;      /* Must be first. */
+        uint32_t                                        type;
+        cl_state_t                                      state;
+        cl_destroy_type_t                       destroy_type;
+
+        cl_async_proc_item_t            async_item;
+        cl_event_t                                      event;
+
+        cl_pfn_obj_call_t                       pfn_destroying;
+        cl_pfn_obj_call_t                       pfn_cleanup;
+        cl_pfn_obj_call_t                       pfn_free;
+
+        cl_spinlock_t                           lock;
+
+        cl_qlist_t                                      parent_list;
+        cl_qlist_t                                      child_list;
+
+        atomic32_t                                      ref_cnt;
+
+}       cl_obj_t;
+
+

FIELDS

+
       pool_item
+               Used to track the object with the global object manager.  We use
+               a pool item, rather than a list item, to let users store the object
+               in a pool.
+
+       type
+               Stores a user-specified object type.
+
+       state
+               Records the current state of the object, such as initialized,
+               destroying, etc.
+
+       destroy_type
+               Specifies the type of destruction, synchronous or asynchronous, to
+               perform on this object.
+
+       async_item
+               Asynchronous item used when destroying the object asynchronously.
+               This item is queued to an asynchronous thread to complete destruction
+               processing.
+
+       event
+               Event used when destroying the object synchronously.  A call to destroy
+               the object will wait on this event until the destruction has completed.
+
+       pfn_destroying
+               User-specified callback invoked to notify a user that an object has
+               been marked for destruction.  This callback is invoked directly from
+               the thread destroying the object and is used to notify a user that
+               a parent object has invoked a child object's destructor.
+
+       pfn_cleanup
+               User-specified callback invoked as an object is undergoing destruction.
+               For object's destroyed asynchronously, this callback is invoked from
+               the context of the asynchronous destruction thread.  Users may block
+               in the context of this thread; however, further destruction processing
+               will not continue until this callback returns.
+
+       pfn_free
+               User-specified callback invoked to notify a user that an object has
+               been destroyed and is ready for deallocation.  Users should either
+               call cl_obj_deinit or cl_obj_reset from within this callback.
+
+       lock
+               A lock provided by the object.
+
+       parent_list
+               A list of relationships to parent objects that an object is dependent
+               on.
+
+       child_list
+               A list of all child objects that are dependent on this object.
+               Destroying this object will result in all related objects maintained
+               in the child list also being destroyed.
+
+       ref_cnt
+               A count of the number of objects still referencing this object.
+
+

SEE ALSO

+
       Object, cl_obj_construct, cl_obj_init, cl_obj_destroy,
+       cl_obj_deinit, cl_pfn_obj_call_t, cl_destroy_type_t,
+       cl_pool_item_t, cl_state_t, cl_async_proc_item_t,
+       cl_event_t, cl_spinlock_t, cl_qlist_t, atomic32_t
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_type

+ +

[top][index]

+

NAME

+
       cl_obj_type
+
+

DESCRIPTION

+
       Returns the type of an object.
+
+

SYNOPSIS

+
CL_INLINE uint32_t CL_API
+cl_obj_type(
+        IN                              cl_obj_t * const                        p_obj )
+{
+        return p_obj->type;
+}
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object whose type to return.
+
+

RETURN VALUE

+
       The type of the object, as specified in the call to cl_obj_init.
+
+

SEE ALSO

+
       Object, cl_obj_t, cl_obj_init
+
+
+
+ +

[Functions] +Component Library: Object/cl_obj_unlock

+ +

[top][index]

+

NAME

+
       cl_obj_unlock
+
+

DESCRIPTION

+
       Releases an object's lock previously acquired by a call to cl_obj_lock.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_obj_unlock(
+        IN                              cl_obj_t * const                        p_obj )
+{
+        CL_ASSERT( p_obj->state == CL_INITIALIZED ||
+                p_obj->state == CL_DESTROYING );
+        cl_spinlock_release( &p_obj->lock );
+}
+
+

PARAMETERS

+
       p_obj
+               [in] A pointer to the object whose lock to release.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Object, cl_obj_t, cl_obj_lock
+
+
+
+ +

[Definitions] +Component Library: Object/cl_pfn_obj_call_t

+ +

[top][index]

+

NAME

+
       cl_pfn_obj_call_t
+
+

DESCRIPTION

+
       The cl_pfn_obj_call_t function type defines the prototype for functions
+       used to return objects to the user.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_obj_call_t)(
+        IN                              struct _cl_obj                          *p_obj );
+
+

PARAMETERS

+
       p_obj
+               [in] Pointer to a cl_obj_t.  This is the object being returned to
+               the user.
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
       This function type is provided as a prototype for functions provided
+       by users as parameters to the cl_obj_init function.
+
+

SEE ALSO

+
       Object, cl_obj_init, cl_obj_t
+
+
+
+ +

[Functions] +Component Library: Object/cl_rel_alloc

+ +

[top][index]

+

NAME

+
       cl_rel_alloc
+
+

DESCRIPTION

+
       Retrieves an object relationship item from the object manager.
+
+

SYNOPSIS

+
CL_EXPORT cl_obj_rel_t* CL_API
+cl_rel_alloc(void);
+
+

PARAMETERS

+
       None.
+
+

RETURN VALUE

+
       A reference to an allocated relationship object, or NULL if no relationship
+       object could be allocated.
+
+

NOTES

+
       This routine retrieves a cl_obj_rel_t structure from a pool maintained
+       by the object manager.  The pool automatically grows as needed.
+
+       Relationship items are used to describe a dependent relationship between
+       a parent and child object.  In cases where a child has a fixed number of
+       relationships, the user may be able to allocate and manage the cl_obj_rel_t
+       structures more efficiently than obtaining the structures through this call.
+
+

SEE ALSO

+
       Object, cl_rel_free, cl_obj_insert_rel, cl_obj_remove_rel, cl_obj_destroy
+
+
+
+ +

[Functions] +Component Library: Object/cl_rel_free

+ +

[top][index]

+

NAME

+
       cl_rel_free
+
+

DESCRIPTION

+
       Return a relationship object to the global object manager.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_rel_free(
+        IN                              cl_obj_rel_t * const            p_rel );
+
+

PARAMETERS

+
       p_rel
+               [in] A reference to the relationship item to free.
+
+

RETURN VALUE

+
       None.
+
+

NOTES

+
       Relationship items must not be freed until both the parent and child
+       object have removed their references to one another.  Relationship items
+       may be freed after calling cl_obj_remove_rel or after the associated
+       child object's free callback has been invoked.  In the latter case, the
+       invalid relationship items are referenced by the child object's parent_list.
+
+

SEE ALSO

+
       Object, cl_rel_alloc, cl_obj_insert_rel, cl_obj_remove_rel, cl_obj_destroy
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_passivelock_h.html b/branches/Ndi/docs/complib/cl_passivelock_h.html new file mode 100644 index 00000000..50cdf150 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_passivelock_h.html @@ -0,0 +1,417 @@ + + + + +./inc_docs/complib/cl_passivelock_h.html + + + + +Generated from ./inc/complib/cl_passivelock.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Passive Lock

+ +

[top][parent][index]

+

NAME

+
       Passive Lock
+
+

DESCRIPTION

+
       The Passive Lock provides synchronization between multiple threads that
+       are sharing the lock with a single thread holding the lock exclusively.
+
+       Passive lock works exclusively between threads and cannot be used in
+       situations where the caller cannot be put into a waiting state.
+
+       The passive lock functions operate a cl_plock_t structure which should
+       be treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_plock_t
+
+       Initialization:
+               cl_plock_construct, cl_plock_init, cl_plock_destroy
+
+       Manipulation
+               cl_plock_acquire, cl_plock_excl_acquire, cl_plock_release
+
+
+
+ +

[Functions] +Component Library: Passive Lock/cl_plock_acquire

+ +

[top][index]

+

NAME

+
       cl_plock_acquire
+
+

DESCRIPTION

+
       The cl_plock_acquire function acquires a passive lock for
+       shared access.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_plock_acquire(
+        IN      cl_plock_t* const       p_lock )
+{
+        cl_status_t     status;
+
+        CL_ASSERT( p_lock );
+
+        status =
+                cl_event_wait_on( &p_lock->reader_event, EVENT_NO_TIMEOUT, FALSE );
+        CL_ASSERT( status == CL_SUCCESS );
+
+        /*
+         * Increment the reader count to block a thread trying for exclusive
+         * access.
+         */
+        cl_atomic_inc( &p_lock->reader_count );
+#ifdef DBG_PASSIVE_LOCKS
+        cl_dbg_out( "cl_plock_acquire: ReaderCount = %u\n",
+                p_lock->reader_count );
+#endif
+        /*
+         * Release the reader event to satisfy the wait of another reader
+         * or a writer.
+         */
+        cl_event_signal( &p_lock->reader_event );
+}
+
+

PARAMETERS

+
       p_lock
+               [in] Pointer to a cl_plock_t structure to acquire.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Passive Lock, cl_plock_release, cl_plock_excl_acquire
+
+
+
+ +

[Functions] +Component Library: Passive Lock/cl_plock_construct

+ +

[top][index]

+

NAME

+
       cl_plock_construct
+
+

DESCRIPTION

+
       The cl_plock_construct function initializes the state of a
+       passive lock.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_plock_construct(
+        IN      cl_plock_t* const       p_lock )
+{
+        CL_ASSERT( p_lock );
+
+        p_lock->reader_count = 0;
+        cl_event_construct( &p_lock->reader_event );
+        cl_event_construct( &p_lock->writer_event );
+}
+
+

PARAMETERS

+
       p_lock
+               [in] Pointer to a cl_plock_t structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_plock_destroy without first calling cl_plock_init.
+
+       Calling cl_plock_construct is a prerequisite to calling any other
+       passive lock function except cl_plock_init.
+
+

SEE ALSO

+
       Passive Lock, cl_plock_init, cl_plock_destroy
+
+
+
+ +

[Functions] +Component Library: Passive Lock/cl_plock_destroy

+ +

[top][index]

+

NAME

+
       cl_plock_destroy
+
+

DESCRIPTION

+
       The cl_plock_destroy function performs any necessary cleanup
+       of a passive lock.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_plock_destroy(
+        IN      cl_plock_t* const       p_lock )
+{
+        CL_ASSERT( p_lock );
+        CL_ASSERT( p_lock->reader_count == 0 );
+
+        cl_event_destroy( &p_lock->writer_event );
+        cl_event_destroy( &p_lock->reader_event );
+}
+
+

PARAMETERS

+
       p_lock
+               [in] Pointer to a cl_plock_t structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_plock_destroy performs any necessary cleanup of the specified
+       passive lock.
+
+       This function must only be called if cl_plock_construct or
+       cl_plock_init has been called. The passive lock must not be held
+       when calling this function.
+
+

SEE ALSO

+
       Passive Lock, cl_plock_construct, cl_plock_init
+
+
+
+ +

[Functions] +Component Library: Passive Lock/cl_plock_excl_acquire

+ +

[top][index]

+

NAME

+
       cl_plock_excl_acquire
+
+

DESCRIPTION

+
       The cl_plock_excl_acquire function acquires exclusive access
+       to a passive lock.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_plock_excl_acquire(
+        IN      cl_plock_t* const       p_lock )
+{
+        cl_status_t     status;
+
+        CL_ASSERT( p_lock );
+
+        /* Acquire the reader event.  This will block new readers. */
+        status =
+                cl_event_wait_on( &p_lock->reader_event, EVENT_NO_TIMEOUT, FALSE );
+        CL_ASSERT( status == CL_SUCCESS );
+
+        /* Wait for the writer event until all readers have exited. */
+        while( p_lock->reader_count )
+        {
+#ifdef DBG_PASSIVE_LOCKS
+                cl_dbg_out( "cl_plock_excl_acquire: ReaderCount = %u\n",
+                        p_lock->reader_count );
+#endif
+                status =
+                        cl_event_wait_on( &p_lock->writer_event, EVENT_NO_TIMEOUT, FALSE );
+                CL_ASSERT( status == CL_SUCCESS );
+        }
+
+#ifdef DBG_PASSIVE_LOCKS
+        cl_dbg_out( "cl_plock_excl_acquire: Exit\n" );
+#endif
+}
+
+

PARAMETERS

+
       p_lock
+               [in] Pointer to a cl_plock_t structure to acquire exclusively.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Passive Lock, cl_plock_release, cl_plock_acquire
+
+
+
+ +

[Functions] +Component Library: Passive Lock/cl_plock_init

+ +

[top][index]

+

NAME

+
       cl_plock_init
+
+

DESCRIPTION

+
       The cl_plock_init function initializes a passive lock.
+
+

SYNOPSIS

+
CL_INLINE cl_status_t CL_API
+cl_plock_init(
+        IN      cl_plock_t* const       p_lock )
+{
+        cl_status_t     status;
+
+        CL_ASSERT( p_lock );
+
+        cl_plock_construct( p_lock );
+
+        status = cl_event_init( &p_lock->writer_event, FALSE );
+        if( status != CL_SUCCESS )
+        {
+                cl_plock_destroy( p_lock );
+                return( status );
+        }
+
+        status = cl_event_init( &p_lock->reader_event, FALSE );
+        if( status != CL_SUCCESS )
+        {
+                cl_plock_destroy( p_lock );
+                return( status );
+        }
+
+        /*
+         * Set the writer event to signalled so that the first
+         * wait operation succeeds.
+         */
+        status = cl_event_signal( &p_lock->writer_event );
+        if( status != CL_SUCCESS )
+        {
+                cl_plock_destroy( p_lock );
+                return( status );
+        }
+
+        /*
+         * Set the reader event to signalled so that the first
+         * wait operation succeeds.
+         */
+        status = cl_event_signal( &p_lock->reader_event );
+        if( status != CL_SUCCESS )
+        {
+                cl_plock_destroy( p_lock );
+                return( status );
+        }
+
+        return( CL_SUCCESS );
+}
+
+

PARAMETERS

+
       p_lock
+               [in] Pointer to a cl_plock_t structure to initialize.
+
+ RETURN VALUES
+       CL_SUCCESS if the passive lock was initialized successfully.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       Allows calling cl_plock_acquire, cl_plock_release,
+       cl_plock_excl_acquire, and cl_plock_excl_release.
+
+

SEE ALSO

+
       Passive Lock, cl_plock_construct, cl_plock_destroy,
+       cl_plock_excl_acquire, cl_plock_excl_release,
+       cl_plock_acquire, cl_plock_release
+
+
+
+ +

[Functions] +Component Library: Passive Lock/cl_plock_release

+ +

[top][index]

+

NAME

+
       cl_plock_release
+
+

DESCRIPTION

+
       The cl_plock_release function releases a passive lock from
+       shared or exclusive access.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_plock_release(
+        IN      cl_plock_t* const       p_lock )
+{
+        CL_ASSERT( p_lock );
+
+        if( p_lock->reader_count )
+        {
+
+                /*
+                 * Decrement the count to allow a thread waiting for exclusive
+                 * access to continue.
+                 */
+                cl_atomic_dec( &p_lock->reader_count );
+
+                #ifdef DBG_PASSIVE_LOCKS
+                        cl_dbg_out( "cl_plock_release: ReaderCount = %u\n",
+                                p_lock->reader_count );
+                #endif
+
+                /* Release a writer, if any. */
+                cl_event_signal( &p_lock->writer_event );
+        }
+        else
+        {
+                /* Release threads waiting to acquire the lock. */
+                cl_event_signal( &p_lock->reader_event );
+                cl_event_signal( &p_lock->writer_event );
+
+                #ifdef DBG_PASSIVE_LOCKS
+                        cl_dbg_out( "cl_plock_release: Exit\n" );
+                #endif
+        }
+}
+
+

PARAMETERS

+
       p_lock
+               [in] Pointer to a cl_plock_t structure to release.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Passive Lock, cl_plock_acquire, cl_plock_excl_acquire
+
+
+
+ +

[Structures] +Component Library: Passive Lock/cl_plock_t

+ +

[top][index]

+

NAME

+
       cl_plock_t
+
+

DESCRIPTION

+
       Passive Lock structure.
+
+       The cl_plock_t structure should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_plock
+{
+        cl_event_t              reader_event;
+        cl_event_t              writer_event;
+        atomic32_t              reader_count;
+
+} cl_plock_t;
+
+

FIELDS

+
       reader_event
+               Event used to synchronize shared access to the lock.
+
+       writer_event
+               Event used to synchronize exclusive access to the lock.
+
+       reader_count
+               Number of threads holding the lock for shared access.
+
+

SEE ALSO

+
       Passive Lock
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_perf_h.html b/branches/Ndi/docs/complib/cl_perf_h.html new file mode 100644 index 00000000..df5184f7 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_perf_h.html @@ -0,0 +1,583 @@ + + + + +./inc_docs/complib/cl_perf_h.html + + + + +Generated from ./inc/complib/cl_perf.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Performance Counters

+ +

[top][parent][index]

+

NAME

+
       Performance Counters
+
+

DESCRIPTION

+
       The performance counters allows timing operations to benchmark
+       software performance and help identify potential bottlenecks.
+
+       All performance counters are NULL macros when disabled, preventing them
+       from adversly affecting performance in builds where the counters are not
+       used.
+
+       Each counter records elapsed time in micro-seconds, minimum time elapsed,
+       and total number of samples.
+
+       Each counter is independently protected by a spinlock, allowing use of
+       the counters in multi-processor environments.
+
+       The impact of serializing access to performance counters is measured,
+       allowing measurements to be corrected as necessary.
+
+

NOTES

+
       Performance counters do impact performance, and should only be enabled
+       when gathering data.  Counters can be enabled or disabled on a per-user
+       basis at compile time.  To enable the counters, users should define
+       the PERF_TRACK_ON keyword before including the cl_perf.h file.
+       Undefining the PERF_TRACK_ON keyword disables the performance counters.
+       When disabled, all performance tracking calls resolve to no-ops.
+
+       When using performance counters, it is the user's responsibility to
+       maintain the counter indexes.  It is recomended that users define an
+       enumerated type to use for counter indexes.  It improves readability
+       and simplifies maintenance by reducing the work necessary in managing
+       the counter indexes.
+
+

SEE ALSO

+
       Structures:
+               cl_perf_t
+
+       Initialization:
+               cl_perf_construct, cl_perf_init, cl_perf_destroy
+
+       Manipulation
+               cl_perf_reset, cl_perf_display, cl_perf_start, cl_perf_update,
+               cl_perf_log, cl_perf_stop
+
+       Macros:
+               PERF_DECLARE, PERF_DECLARE_START
+
+
+
+ +

[Definitions] +Component Library: Performance Counters/cl_perf_clr

+ +

[top][index]

+

NAME

+
       cl_perf_clr
+
+

DESCRIPTION

+
       The cl_perf_clr macro clears a counter variable.
+
+

SYNOPSIS

+
void
+cl_perf_inc(
+        IN      const uintn_t index );
+
+

PARAMETERS

+
       index
+               [in] Index of the performance counter to set.
+
+

NOTES

+
       This macro has no effect when performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_log,
+       cl_perf_update, cl_perf_stop
+
+
+
+ +

[Functions] +Component Library: Performance Counters/cl_perf_construct

+ +

[top][index]

+

NAME

+
       cl_perf_construct
+
+

DESCRIPTION

+
       The cl_perf_construct macro constructs a performance
+       tracking container.
+
+

SYNOPSIS

+
void
+cl_perf_construct(
+        IN      cl_perf_t* const        p_perf );
+
+

PARAMETERS

+
       p_perf
+               [in] Pointer to a performance counter container to construct.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_perf_construct allows calling cl_perf_destroy without first calling
+       cl_perf_init.
+
+       Calling cl_perf_construct is a prerequisite to calling any other
+       perfromance counter function except cl_perf_init.
+
+       This function is implemented as a macro and has no effect when
+       performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, cl_perf_init, cl_perf_destroy
+
+
+
+ +

[Functions] +Component Library: Performance Counters/cl_perf_destroy

+ +

[top][index]

+

NAME

+
       cl_perf_destroy
+
+

DESCRIPTION

+
       The cl_perf_destroy function destroys a performance tracking container.
+
+

SYNOPSIS

+
void
+cl_perf_destroy(
+        IN      cl_perf_t* const        p_perf,
+        IN      const boolean_t         display );
+
+

PARAMETERS

+
       p_perf
+               [in] Pointer to a performance counter container to destroy.
+
+       display
+               [in] If TRUE, causes the performance counters to be displayed.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_perf_destroy frees all resources allocated in a call to cl_perf_init.
+       If the display parameter is set to TRUE, displays all counter values
+       before deallocating resources.
+
+       This function should only be called after a call to cl_perf_construct
+       or cl_perf_init.
+
+       This function is implemented as a macro and has no effect when
+       performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, cl_perf_construct, cl_perf_init
+
+
+
+ +

[Functions] +Component Library: Performance Counters/cl_perf_display

+ +

[top][index]

+

NAME

+
       cl_perf_display
+
+

DESCRIPTION

+
       The cl_perf_display function displays the current performance
+       counter values.
+
+

SYNOPSIS

+
void
+cl_perf_display(
+        IN      const cl_perf_t* const  p_perf );
+
+

PARAMETERS

+
       p_perf
+               [in] Pointer to a performance counter container whose counter
+               values to display.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function is implemented as a macro and has no effect when
+       performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, cl_perf_init
+
+
+
+ +

[Definitions] +Component Library: Performance Counters/cl_perf_inc

+ +

[top][index]

+

NAME

+
       cl_perf_inc
+
+

DESCRIPTION

+
       The cl_perf_inc macro increments a counter variable by one.
+
+

SYNOPSIS

+
void
+cl_perf_inc(
+        IN      const uintn_t index );
+
+

PARAMETERS

+
       index
+               [in] Index of the performance counter to set.
+
+

NOTES

+
       This macro has no effect when performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_log,
+       cl_perf_update, cl_perf_stop
+
+
+
+ +

[Functions] +Component Library: Performance Counters/cl_perf_init

+ +

[top][index]

+

NAME

+
       cl_perf_init
+
+

DESCRIPTION

+
       The cl_perf_init function initializes a performance counter container
+       for use.
+
+

SYNOPSIS

+
cl_status_t
+cl_perf_init(
+        IN      cl_perf_t* const        p_perf,
+        IN      const uintn_t           num_counters );
+
+

PARAMETERS

+
       p_perf
+               [in] Pointer to a performance counter container to initalize.
+
+       num_cntrs
+               [in] Number of counters to allocate in the container.
+
+ RETURN VALUES
+       CL_SUCCESS if initialization was successful.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize
+       the container.
+
+       CL_ERROR if an error was encountered initializing the locks for the
+       performance counters.
+
+

NOTES

+
       This function allocates all memory required for the requested number of
+       counters and initializes all locks protecting those counters.  After a
+       successful initialization, cl_perf_init calibrates the counters and
+       resets their value.
+
+       This function is implemented as a macro and has no effect when
+       performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, cl_perf_construct, cl_perf_destroy, cl_perf_display
+
+
+
+ +

[Definitions] +Component Library: Performance Counters/cl_perf_log

+ +

[top][index]

+

NAME

+
       cl_perf_log
+
+

DESCRIPTION

+
       The cl_perf_log macro adds a given timing sample to a
+       counter in a performance counter container.
+
+

SYNOPSIS

+
void
+cl_perf_log(
+        IN      cl_perf_t* const        p_perf,
+        IN      const uintn_t           index,
+        IN      const uint64_t          pc_total_time );
+
+

PARAMETERS

+
       p_perf
+               [in] Pointer to a performance counter container to whose counter
+               the sample should be added.
+
+       index
+               [in] Number of the performance counter to update with a new sample.
+
+       pc_total_time
+               [in] Total elapsed time for the sample being added.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This macro has no effect when performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_start,
+       cl_perf_update, cl_perf_stop
+
+
+
+ +

[Functions] +Component Library: Performance Counters/cl_perf_reset

+ +

[top][index]

+

NAME

+
       cl_perf_reset
+
+

DESCRIPTION

+
       The cl_perf_reset function resets the counters contained in
+       a performance tracking container.
+
+

SYNOPSIS

+
void
+cl_perf_reset(
+        IN      cl_perf_t* const        p_perf );
+
+

PARAMETERS

+
       p_perf
+               [in] Pointer to a performance counter container whose counters
+               to reset.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function is implemented as a macro and has no effect when
+       performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters
+
+
+
+ +

[Definitions] +Component Library: Performance Counters/cl_perf_start

+ +

[top][index]

+

NAME

+
       cl_perf_start
+
+

DESCRIPTION

+
       The cl_perf_start macro sets the starting value of a timed sequence.
+
+

SYNOPSIS

+
void
+cl_perf_start(
+        IN      const uintn_t index );
+
+

PARAMETERS

+
       index
+               [in] Index of the performance counter to set.
+
+

NOTES

+
       This macro has no effect when performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_log,
+       cl_perf_update, cl_perf_stop
+
+
+
+ +

[Definitions] +Component Library: Performance Counters/cl_perf_stop

+ +

[top][index]

+

NAME

+
       cl_perf_stop
+
+

DESCRIPTION

+
       The cl_perf_log macro updates a counter in a performance counter
+       container with a new timing sample.
+
+

SYNOPSIS

+
void
+cl_perf_stop(
+        IN      cl_perf_t* const        p_perf,
+        IN      const uintn_t           index );
+
+

PARAMETERS

+
       p_perf
+               [in] Pointer to a performance counter container to whose counter
+               a sample should be added.
+
+       index
+               [in] Number of the performance counter to update with a new sample.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       The ending time stamp is taken and elapsed time calculated before updating
+       the specified counter.
+
+       This macro has no effect when performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_start,
+       cl_perf_log
+
+
+
+ +

[Definitions] +Component Library: Performance Counters/cl_perf_update

+ +

[top][index]

+

NAME

+
       cl_perf_update
+
+

DESCRIPTION

+
       The cl_perf_update macro adds a timing sample based on a provided start
+       time to a counter in a performance counter container.
+
+

SYNOPSIS

+
void
+cl_perf_update(
+        IN      cl_perf_t* const        p_perf,
+        IN      const uintn_t           index,
+        IN      const uint64_t          start_time );
+
+

PARAMETERS

+
       p_perf
+               [in] Pointer to a performance counter container to whose counter
+               the sample should be added.
+
+       index
+               [in] Number of the performance counter to update with a new sample.
+
+       start_time
+               [in] Timestamp to use as the start time for the timing sample.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This macro has no effect when performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_start,
+       cl_perf_lob, cl_perf_stop
+
+
+
+ +

[Definitions] +Component Library: Performance Counters/cl_perf_update_ctr

+ +

[top][index]

+

NAME

+
       cl_perf_update_ctr
+
+

DESCRIPTION

+
       The cl_perf_update_ctr macro updates a counter in a performance
+       counter container.
+
+

SYNOPSIS

+
void
+cl_perf_update_ctr(
+        IN      cl_perf_t* const        p_perf,
+        IN      const uintn_t           index );
+
+

PARAMETERS

+
       p_perf
+               [in] Pointer to a performance counter container to whose counter
+               the sample should be added.
+
+       index
+               [in] Number of the performance counter to update with a new sample.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This macro has no effect when performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_start,
+       cl_perf_lob, cl_perf_stop
+
+
+
+ +

[Definitions] +Component Library: Performance Counters/PERF_DECLARE

+ +

[top][index]

+

NAME

+
       PERF_DECLARE
+
+

DESCRIPTION

+
       The PERF_DECLARE macro declares a performance counter variable used
+       to store the starting time of a timing sequence.
+
+

SYNOPSIS

+
*       PERF_DECLARE( index )
+
+

PARAMETERS

+
       index
+               [in] Index of the performance counter for which to use this
+               variable.
+
+

NOTES

+
       Variables should generally be declared on the stack to support
+       multi-threading.  In cases where a counter needs to be used to
+       time operations accross multiple functions, care must be taken to
+       ensure that the start time stored in this variable is not overwritten
+       before the related performance counter has been updated.
+
+       This macro has no effect when performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, PERF_DECLARE_START, cl_perf_start, cl_perf_log,
+       cl_perf_stop
+
+
+
+ +

[Definitions] +Component Library: Performance Counters/PERF_DECLARE_START

+ +

[top][index]

+

NAME

+
       PERF_DECLARE_START
+
+

DESCRIPTION

+
       The PERF_DECLARE_START macro declares a performance counter variable
+       and sets it to the starting time of a timed sequence.
+
+

SYNOPSIS

+
*       PERF_DECLARE_START( index )
+
+

PARAMETERS

+
       index
+               [in] Index of the performance counter for which to use this
+               variable.
+
+

NOTES

+
       Variables should generally be declared on the stack to support
+       multi-threading.
+
+       This macro has no effect when performance counters are disabled.
+
+

SEE ALSO

+
       Performance Counters, PERF_DECLARE, cl_perf_start, cl_perf_log,
+       cl_perf_stop
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_pool_h.html b/branches/Ndi/docs/complib/cl_pool_h.html new file mode 100644 index 00000000..8502b831 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_pool_h.html @@ -0,0 +1,581 @@ + + + + +./inc_docs/complib/cl_pool_h.html + + + + +Generated from ./inc/complib/cl_pool.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Pool

+ +

[top][parent][index]

+

NAME

+
       Pool
+
+

DESCRIPTION

+
       The pool provides a self-contained and self-sustaining pool
+       of user defined objects.
+
+       To aid in object oriented design, the pool provides the user
+       the ability to specify callbacks that are invoked for each object for
+       construction, initialization, and destruction. Constructor and destructor
+       callback functions may not fail.
+
+       A pool does not return memory to the system as the user returns
+       objects to the pool. The only method of returning memory to the system is
+       to destroy the pool.
+
+       The Pool functions operate on a cl_pool_t structure which should be treated
+       as opaque and should be manipulated only through the provided functions.
+
+

SEE ALSO

+
       Structures:
+               cl_pool_t
+
+       Callbacks:
+               cl_pfn_pool_init_t, cl_pfn_pool_dtor_t
+
+       Initialization/Destruction:
+               cl_pool_construct, cl_pool_init, cl_pool_destroy
+
+       Manipulation:
+               cl_pool_get, cl_pool_put, cl_pool_grow
+
+       Attributes:
+               cl_is_pool_inited, cl_pool_count
+
+
+
+ +

[Functions] +Component Library: Pool/cl_is_pool_inited

+ +

[top][index]

+

NAME

+
       cl_is_pool_inited
+
+

DESCRIPTION

+
       The cl_is_pool_inited function returns whether a pool was successfully
+       initialized.
+
+

SYNOPSIS

+
CL_INLINE uint32_t CL_API
+cl_is_pool_inited(
+        IN      const cl_pool_t* const  p_pool )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_pool );
+        return( cl_is_qcpool_inited( &p_pool->qcpool ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_pool_t structure whose initialization state
+               to check.
+
+ RETURN VALUES
+       TRUE if the pool was initialized successfully.
+
+       FALSE otherwise.
+
+

NOTES

+
       Allows checking the state of a pool to determine if invoking member
+       functions is appropriate.
+
+

SEE ALSO

+
       Pool
+
+
+
+ +

[Definitions] +Component Library: Pool/cl_pfn_pool_dtor_t

+ +

[top][index]

+

NAME

+
       cl_pfn_pool_dtor_t
+
+

DESCRIPTION

+
       The cl_pfn_pool_dtor_t function type defines the prototype for
+       functions used as destructor for objects being deallocated by a
+       pool.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_pool_dtor_t)(
+        IN      void* const                     p_object,
+        IN      void*                           context );
+
+

PARAMETERS

+
       p_object
+               [in] Pointer to an object to destruct.
+
+       context
+               [in] Context provided in the call to cl_pool_init.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by the user as an optional parameter to the
+       cl_pool_init function.
+
+       The destructor is invoked once per allocated object, allowing the user
+       to perform any necessary cleanup. Users should not attempt to deallocate
+       the memory for the object, as the pool manages object
+       allocation and deallocation.
+
+

SEE ALSO

+
       Pool, cl_pool_init
+
+
+
+ +

[Definitions] +Component Library: Pool/cl_pfn_pool_init_t

+ +

[top][index]

+

NAME

+
       cl_pfn_pool_init_t
+
+

DESCRIPTION

+
       The cl_pfn_pool_init_t function type defines the prototype for
+       functions used as initializers for objects being allocated by a
+       pool.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_pool_init_t)(
+        IN      void* const                     p_object,
+        IN      void*                           context );
+
+

PARAMETERS

+
       p_object
+               [in] Pointer to an object to initialize.
+
+       context
+               [in] Context provided in a call to cl_pool_init.
+
+ RETURN VALUES
+       Return CL_SUCCESS to indicates that initialization of the object
+       was successful and initialization of further objects may continue.
+
+       Other cl_status_t values will be returned by cl_pool_init
+       and cl_pool_grow.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by the user as an optional parameter to the
+       cl_pool_init function.
+
+       The initializer is invoked once per allocated object, allowing the user
+       to trap initialization failures. Returning a status other than CL_SUCCESS
+       aborts a grow operation, initiated either through cl_pool_init or
+       cl_pool_grow, and causes the initiating function to fail.
+       Any non-CL_SUCCESS status will be returned by the function that initiated
+       the grow operation.
+
+

SEE ALSO

+
       Pool, cl_pool_init, cl_pool_grow
+
+
+
+ +

[Functions] +Component Library: Pool/cl_pool_construct

+ +

[top][index]

+

NAME

+
       cl_pool_construct
+
+

DESCRIPTION

+
       The cl_pool_construct function constructs a pool.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_pool_construct(
+        IN      cl_pool_t* const        p_pool );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_pool_t structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_pool_init, cl_pool_destroy, and cl_is_pool_inited.
+
+       Calling cl_pool_construct is a prerequisite to calling any other
+       pool function except cl_pool_init.
+
+

SEE ALSO

+
       Pool, cl_pool_init, cl_pool_destroy, cl_is_pool_inited
+
+
+
+ +

[Functions] +Component Library: Pool/cl_pool_count

+ +

[top][index]

+

NAME

+
       cl_pool_count
+
+

DESCRIPTION

+
       The cl_pool_count function returns the number of available objects
+       in a pool.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_pool_count(
+        IN      cl_pool_t* const        p_pool )
+{
+        CL_ASSERT( p_pool );
+        return( cl_qcpool_count( &p_pool->qcpool ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_pool_t structure for which the number of
+               available objects is requested.
+
+

RETURN VALUE

+
       Returns the number of objects available in the specified pool.
+
+

SEE ALSO

+
       Pool
+
+
+
+ +

[Functions] +Component Library: Pool/cl_pool_destroy

+ +

[top][index]

+

NAME

+
       cl_pool_destroy
+
+

DESCRIPTION

+
       The cl_pool_destroy function destroys a pool.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_pool_destroy(
+        IN      cl_pool_t* const        p_pool )
+{
+        CL_ASSERT( p_pool );
+        cl_qcpool_destroy( &p_pool->qcpool );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_pool_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       All memory allocated for objects is freed. The destructor callback,
+       if any, will be invoked for every allocated object. Further operations
+       on the pool should not be attempted after cl_pool_destroy
+       is invoked.
+
+       This function should only be called after a call to
+       cl_pool_construct or cl_pool_init.
+
+       In a debug build, cl_pool_destroy asserts that all objects are in
+       the pool.
+
+

SEE ALSO

+
       Pool, cl_pool_construct, cl_pool_init
+
+
+
+ +

[Functions] +Component Library: Pool/cl_pool_get

+ +

[top][index]

+

NAME

+
       cl_pool_get
+
+

DESCRIPTION

+
       The cl_pool_get function retrieves an object from a pool.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_pool_get(
+        IN      cl_pool_t* const        p_pool )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_pool );
+
+        p_pool_obj = (cl_pool_obj_t*)cl_qcpool_get( &p_pool->qcpool );
+        if( !p_pool_obj )
+                return( NULL );
+
+        CL_ASSERT( p_pool_obj->list_obj.p_object );
+        return( (void*)p_pool_obj->list_obj.p_object );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_pool_t structure from which to retrieve
+               an object.
+
+ RETURN VALUES
+       Returns a pointer to an object.
+
+       Returns NULL if the pool is empty and can not be grown automatically.
+
+

NOTES

+
       cl_pool_get returns the object at the head of the pool. If the pool is
+       empty, it is automatically grown to accommodate this request unless the
+       grow_size parameter passed to the cl_pool_init function was zero.
+
+

SEE ALSO

+
       Pool, cl_pool_get_tail, cl_pool_put, cl_pool_grow, cl_pool_count
+
+
+
+ +

[Functions] +Component Library: Pool/cl_pool_grow

+ +

[top][index]

+

NAME

+
       cl_pool_grow
+
+

DESCRIPTION

+
       The cl_pool_grow function grows a pool by
+       the specified number of objects.
+
+

SYNOPSIS

+
CL_INLINE cl_status_t CL_API
+cl_pool_grow(
+        IN      cl_pool_t* const        p_pool,
+        IN      const size_t            obj_count )
+{
+        CL_ASSERT( p_pool );
+        return( cl_qcpool_grow( &p_pool->qcpool, obj_count ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_pool_t structure whose capacity to grow.
+
+       obj_count
+               [in] Number of objects by which to grow the pool.
+
+ RETURN VALUES
+       CL_SUCCESS if the pool grew successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to grow the
+       pool.
+
+       cl_status_t value returned by optional initialization callback function
+       specified by the pfn_initializer parameter passed to the
+       cl_pool_init function.
+
+

NOTES

+
       It is not necessary to call cl_pool_grow if the pool is
+       configured to grow automatically.
+
+

SEE ALSO

+
       Pool
+
+
+
+ +

[Functions] +Component Library: Pool/cl_pool_init

+ +

[top][index]

+

NAME

+
       cl_pool_init
+
+

DESCRIPTION

+
       The cl_pool_init function initializes a pool for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_pool_init(
+        IN      cl_pool_t* const                p_pool,
+        IN      const size_t                    min_count,
+        IN      const size_t                    max_count,
+        IN      const size_t                    grow_size,
+        IN      const size_t                    object_size,
+        IN      cl_pfn_pool_init_t              pfn_initializer OPTIONAL,
+        IN      cl_pfn_pool_dtor_t              pfn_destructor OPTIONAL,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_pool_t structure to initialize.
+
+       min_count
+               [in] Minimum number of objects that the pool should support. All
+               necessary allocations to allow storing the minimum number of items
+               are performed at initialization time, and all necessary callbacks
+               invoked.
+
+       max_count
+               [in] Maximum number of objects to which the pool is allowed to grow.
+               A value of zero specifies no maximum.
+
+       grow_size
+               [in] Number of objects to allocate when incrementally growing the pool.
+               A value of zero disables automatic growth.
+
+       object_size
+               [in] Size, in bytes, of each object.
+
+       pfn_initializer
+               [in] Initialization callback to invoke for every new object when
+               growing the pool. This parameter is optional and may be NULL.
+               See the cl_pfn_pool_init_t function type declaration for details
+               about the callback function.
+
+       pfn_destructor
+               [in] Destructor callback to invoke for every object before memory for
+               that object is freed. This parameter is optional and may be NULL.
+               See the cl_pfn_pool_dtor_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+ RETURN VALUES
+       CL_SUCCESS if the pool was initialized successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the
+       pool.
+
+       CL_INVALID_SETTING if a the maximum size is non-zero and less than the
+       minimum size.
+
+       Other cl_status_t value returned by optional initialization callback function
+       specified by the pfn_initializer parameter.
+
+

NOTES

+
       cl_pool_init initializes, and if necessary, grows the pool to
+       the capacity desired.
+
+

SEE ALSO

+
       Pool, cl_pool_construct, cl_pool_destroy,
+       cl_pool_get, cl_pool_put, cl_pool_grow,
+       cl_pool_count, cl_pfn_pool_init_t, cl_pfn_pool_dtor_t
+
+
+
+ +

[Functions] +Component Library: Pool/cl_pool_put

+ +

[top][index]

+

NAME

+
       cl_pool_put
+
+

DESCRIPTION

+
       The cl_pool_put function returns an object to a pool.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_pool_put(
+        IN      cl_pool_t* const        p_pool,
+        IN      void* const                     p_object )
+{
+        cl_pool_obj_t   *p_pool_obj;
+
+        CL_ASSERT( p_pool );
+        CL_ASSERT( p_object );
+
+        /* Calculate the offset to the list object representing this object. */
+        p_pool_obj = (cl_pool_obj_t*)
+                (((uint8_t*)p_object) - sizeof(cl_pool_obj_t));
+
+        /* good sanity check */
+        CL_ASSERT( p_pool_obj->list_obj.p_object == p_object );
+
+        cl_qcpool_put( &p_pool->qcpool, (cl_pool_item_t*)p_pool_obj );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_pool_t structure to which to return
+               an object.
+
+       p_object
+               [in] Pointer to an object to return to the pool.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_pool_put places the returned object at the head of the pool.
+
+       The object specified by the p_object parameter must have been
+       retrieved from the pool by a previous call to cl_pool_get.
+
+

SEE ALSO

+
       Pool, cl_pool_put_tail, cl_pool_get
+
+
+
+ +

[Structures] +Component Library: Pool/cl_pool_t

+ +

[top][index]

+

NAME

+
       cl_pool_t
+
+

DESCRIPTION

+
       pool structure.
+
+       The cl_pool_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_pool
+{
+        cl_qcpool_t                             qcpool;
+        cl_pfn_pool_init_t              pfn_init;
+        cl_pfn_pool_dtor_t              pfn_dtor;
+        const void                              *context;
+
+} cl_pool_t;
+
+

FIELDS

+
       qcpool
+               Quick composite pool that manages all objects.
+
+       pfn_init
+               Pointer to the user's initializer callback, used by the pool
+               to translate the quick composite pool's initializer callback to
+               a pool initializer callback.
+
+       pfn_dtor
+               Pointer to the user's destructor callback, used by the pool
+               to translate the quick composite pool's destructor callback to
+               a pool destructor callback.
+
+       context
+               User's provided context for callback functions, used by the pool
+               to when invoking callbacks.
+
+

SEE ALSO

+
       Pool
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_ptr_vector_h.html b/branches/Ndi/docs/complib/cl_ptr_vector_h.html new file mode 100644 index 00000000..f9419dc2 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_ptr_vector_h.html @@ -0,0 +1,890 @@ + + + + +./inc_docs/complib/cl_ptr_vector_h.html + + + + +Generated from ./inc/complib/cl_ptr_vector.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Pointer Vector

+ +

[top][parent][index]

+

NAME

+
       Pointer Vector
+
+

DESCRIPTION

+
       The Pointer Vector is a self-sizing array of pointers. Like a traditonal
+       array, a pointer vector allows efficient constant time access to elements
+       with a specified index.  A pointer vector grows transparently as the
+       user adds elements to the array.
+
+       The cl_pointer vector_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SEE ALSO

+
       Structures:
+               cl_ptr_vector_t
+
+       Callbacks:
+               cl_pfn_ptr_vec_apply_t, cl_pfn_ptr_vec_find_t
+
+       Item Manipulation:
+               cl_ptr_vector_set_obj, cl_ptr_vector_obj
+
+       Initialization:
+               cl_ptr_vector_construct, cl_ptr_vector_init, cl_ptr_vector_destroy
+
+       Manipulation:
+               cl_ptr_vector_get_capacity, cl_ptr_vector_set_capacity,
+               cl_ptr_vector_get_size, cl_ptr_vector_set_size, cl_ptr_vector_set_min_size
+               cl_ptr_vector_get_ptr, cl_ptr_vector_get, cl_ptr_vector_at, cl_ptr_vector_set
+
+       Search:
+               cl_ptr_vector_find_from_start, cl_ptr_vector_find_from_end
+               cl_ptr_vector_apply_func
+
+
+
+ +

[Definitions] +Component Library: Pointer Vector/cl_pfn_ptr_vec_apply_t

+ +

[top][index]

+

NAME

+
       cl_pfn_ptr_vec_apply_t
+
+

DESCRIPTION

+
       The cl_pfn_ptr_vec_apply_t function type defines the prototype for
+       functions used to iterate elements in a pointer vector.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_ptr_vec_apply_t)(
+        IN      const size_t            index,
+        IN      void* const                     element,
+        IN      void*                           context );
+
+

PARAMETERS

+
       index
+               [in] Index of the element.
+
+       p_element
+               [in] Pointer to an element at the specified index in the pointer vector.
+
+       context
+               [in] Context provided in a call to cl_ptr_vector_apply_func.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function passed by users as a parameter to the cl_ptr_vector_apply_func
+       function.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_apply_func
+
+
+
+ +

[Definitions] +Component Library: Pointer Vector/cl_pfn_ptr_vec_find_t

+ +

[top][index]

+

NAME

+
       cl_pfn_ptr_vec_find_t
+
+

DESCRIPTION

+
       The cl_pfn_ptr_vec_find_t function type defines the prototype for
+       functions used to find elements in a pointer vector.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_ptr_vec_find_t)(
+        IN      const size_t            index,
+        IN      const void* const       element,
+        IN      void*                           context );
+
+

PARAMETERS

+
       index
+               [in] Index of the element.
+
+       p_element
+               [in] Pointer to an element at the specified index in the
+               pointer vector.
+
+       context
+               [in] Context provided in a call to cl_ptr_vector_find_from_start or
+               cl_ptr_vector_find_from_end.
+
+ RETURN VALUES
+       Return CL_SUCCESS if the element was found. This stops pointer vector
+       iteration.
+
+       CL_NOT_FOUND to continue the pointer vector iteration.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the
+       cl_ptr_vector_find_from_start and cl_ptr_vector_find_from_end functions.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_find_from_start, cl_ptr_vector_find_from_end
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_apply_func

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_apply_func
+
+

DESCRIPTION

+
       The cl_ptr_vector_apply_func function invokes a specified function for
+       every element in a pointer vector.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_ptr_vector_apply_func(
+        IN      const cl_ptr_vector_t* const    p_vector,
+        IN      cl_pfn_ptr_vec_apply_t                  pfn_callback,
+        IN      const void* const                               context );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure whose elements to iterate.
+
+       pfn_callback
+               [in] Function invoked for every element in the array.
+               See the cl_pfn_ptr_vec_apply_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback function.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_ptr_vector_apply_func invokes the specified function for every element
+       in the pointer vector, starting from the beginning of the pointer vector.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_find_from_start, cl_ptr_vector_find_from_end,
+       cl_pfn_ptr_vec_apply_t
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_at

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_at
+
+

DESCRIPTION

+
       The cl_ptr_vector_at function copies an element stored in a pointer
+       vector at a specified index, performing boundary checks.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_ptr_vector_at(
+        IN      const cl_ptr_vector_t* const    p_vector,
+        IN      const size_t                                    index,
+        OUT     void** const                                    p_element );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure from which to get a copy of
+               an element.
+
+       index
+               [in] Index of the element.
+
+       p_element
+               [out] Pointer to storage for the pointer element. Contains a copy of
+               the desired pointer upon successful completion of the call.
+
+ RETURN VALUES
+       CL_SUCCESS if an element was found at the specified index.
+
+       CL_INVALID_SETTING if the index was out of range.
+
+

NOTES

+
       cl_ptr_vector_at provides constant time access regardless of
+       the index, and performs boundary checking on the pointer vector.
+
+       Upon success, the p_element parameter contains a copy of the
+       desired element.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_get
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_construct

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_construct
+
+

DESCRIPTION

+
       The cl_ptr_vector_construct function constructs a pointer vector.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_ptr_vector_construct(
+        IN      cl_ptr_vector_t* const  p_vector );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure to construct.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_ptr_vector_destroy without first calling
+       cl_ptr_vector_init.
+
+       Calling cl_ptr_vector_construct is a prerequisite to calling any other
+       pointer vector function except cl_ptr_vector_init.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_init, cl_ptr_vector_destroy
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_destroy

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_destroy
+
+

DESCRIPTION

+
       The cl_ptr_vector_destroy function destroys a pointer vector.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_ptr_vector_destroy(
+        IN      cl_ptr_vector_t* const  p_vector );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_ptr_vector_destroy frees all memory allocated for the pointer vector.
+
+       This function should only be called after a call to cl_ptr_vector_construct
+       or cl_ptr_vector_init.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_construct, cl_ptr_vector_init
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_find_from_end

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_find_from_end
+
+

DESCRIPTION

+
       The cl_ptr_vector_find_from_end function uses a specified function to
+       search for elements in a pointer vector starting from the highest index.
+
+

SYNOPSIS

+
CL_EXPORT size_t CL_API
+cl_ptr_vector_find_from_end(
+        IN      const cl_ptr_vector_t* const    p_vector,
+        IN      cl_pfn_ptr_vec_find_t                   pfn_callback,
+        IN      const void* const                               context );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure to inititalize.
+
+       pfn_callback
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_ptr_vec_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback function.
+
+ RETURN VALUES
+       Index of the element, if found.
+
+       Size of the pointer vector if the element was not found.
+
+

NOTES

+
       cl_ptr_vector_find_from_end does not remove the found element from
+       the pointer vector. The index of the element is returned when the function
+       provided by the pfn_callback parameter returns CL_SUCCESS.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_find_from_start, cl_ptr_vector_apply_func,
+       cl_pfn_ptr_vec_find_t
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_find_from_start

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_find_from_start
+
+

DESCRIPTION

+
       The cl_ptr_vector_find_from_start function uses a specified function to
+       search for elements in a pointer vector starting from the lowest index.
+
+

SYNOPSIS

+
CL_EXPORT size_t CL_API
+cl_ptr_vector_find_from_start(
+        IN      const cl_ptr_vector_t* const    p_vector,
+        IN      cl_pfn_ptr_vec_find_t                   pfn_callback,
+        IN      const void* const                               context );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure to inititalize.
+
+       pfn_callback
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_ptr_vec_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback function.
+
+ RETURN VALUES
+       Index of the element, if found.
+
+       Size of the pointer vector if the element was not found.
+
+

NOTES

+
       cl_ptr_vector_find_from_start does not remove the found element from
+       the pointer vector. The index of the element is returned when the function
+       provided by the pfn_callback parameter returns CL_SUCCESS.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_find_from_end, cl_ptr_vector_apply_func,
+       cl_pfn_ptr_vec_find_t
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_get

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_get
+
+

DESCRIPTION

+
       The cl_ptr_vector_get function returns the pointer stored in a
+       pointer vector at a specified index.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_ptr_vector_get(
+        IN      const cl_ptr_vector_t* const    p_vector,
+        IN      const size_t                                    index )
+{
+        CL_ASSERT( p_vector );
+        CL_ASSERT( p_vector->state == CL_INITIALIZED );
+        CL_ASSERT( p_vector->size > index );
+
+        return( (void*)p_vector->p_ptr_array[index] );
+}
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure from which to get an
+               element.
+
+       index
+               [in] Index of the element.
+
+

RETURN VALUE

+
       Value of the pointer stored at the specified index.
+
+

NOTES

+
       cl_ptr_vector_get provides constant access times regardless of the index.
+
+       cl_ptr_vector_get does not perform boundary checking. Callers are
+       responsible for providing an index that is within the range of the pointer
+       vector.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_at, cl_ptr_vector_set, cl_ptr_vector_get_size
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_get_capacity

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_get_capacity
+
+

DESCRIPTION

+
       The cl_ptr_vector_get_capacity function returns the capacity of
+       a pointer vector.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_ptr_vector_get_capacity(
+        IN      const cl_ptr_vector_t* const    p_vector )
+{
+        CL_ASSERT( p_vector );
+        CL_ASSERT( p_vector->state == CL_INITIALIZED );
+
+        return( p_vector->capacity );
+}
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure whose capacity to return.
+
+

RETURN VALUE

+
       Capacity, in elements, of the pointer vector.
+
+

NOTES

+
       The capacity is the number of elements that the pointer vector can store,
+       and can be greater than the number of elements stored. To get the number
+       of elements stored in the pointer vector, use cl_ptr_vector_get_size.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_set_capacity, cl_ptr_vector_get_size
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_get_size

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_get_size
+
+

DESCRIPTION

+
       The cl_ptr_vector_get_size function returns the size of a pointer vector.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_ptr_vector_get_size(
+        IN      const cl_ptr_vector_t* const    p_vector )
+{
+        CL_ASSERT( p_vector );
+        CL_ASSERT( p_vector->state == CL_UNINITIALIZED ||
+                p_vector->state == CL_INITIALIZED );
+
+        return( p_vector->size );
+}
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure whose size to return.
+
+

RETURN VALUE

+
       Size, in elements, of the pointer vector.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_set_size, cl_ptr_vector_get_capacity
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_init

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_init
+
+

DESCRIPTION

+
       The cl_ptr_vector_init function initializes a pointer vector for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_ptr_vector_init(
+        IN      cl_ptr_vector_t* const  p_vector,
+        IN      const size_t                    min_cap,
+        IN      const size_t                    grow_size );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure to inititalize.
+
+       min_cap
+               [in] Initial number of elements the vector will support.
+               The vector is always initialized with a size of zero.
+
+       grow_size
+               [in] Number of elements to allocate when incrementally growing
+               the pointer vector.  A value of zero disables automatic growth.
+
+ RETURN VALUES
+       CL_SUCCESS if the pointer vector was initialized successfully.
+
+       CL_INSUFFICIENT_MEMORY if the initialization failed.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_construct, cl_ptr_vector_destroy,
+       cl_ptr_vector_set, cl_ptr_vector_get, cl_ptr_vector_at
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_insert

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_insert
+
+

DESCRIPTION

+
       The cl_ptr_vector_insert function inserts an element into a pointer vector.
+
+

SYNOPSIS

+
CL_INLINE cl_status_t CL_API
+cl_ptr_vector_insert(
+        IN      cl_ptr_vector_t* const  p_vector,
+        IN      const void* const               element,
+        OUT     size_t* const                   p_index OPTIONAL )
+{
+        cl_status_t             status;
+
+        CL_ASSERT( p_vector );
+        CL_ASSERT( p_vector->state == CL_INITIALIZED );
+
+        status = cl_ptr_vector_set( p_vector, p_vector->size, element );
+        if( status == CL_SUCCESS && p_index )
+                *p_index = p_vector->size - 1;
+
+        return( status );
+}
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure into which to store
+               an element.
+
+       element
+               [in] Pointer to store in the pointer vector.
+
+       p_index
+               [out] Pointer to the index of the element.  Valid only if
+               insertion was successful.
+
+ RETURN VALUES
+       CL_SUCCESS if the element was successfully inserted.
+
+       CL_INSUFFICIENT_MEMORY if the pointer vector could not be resized to
+       accommodate the new element.
+
+

NOTES

+
       cl_ptr_vector_insert places the new element at the end of
+       the pointer vector.
+
+       cl_ptr_vector_insert grows the pointer vector as needed to accommodate
+       the new element, unless the grow_size parameter passed into the
+       cl_ptr_vector_init function was zero.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_remove, cl_ptr_vector_set
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_remove

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_remove
+
+

DESCRIPTION

+
       The cl_ptr_vector_remove function removes and returns the pointer stored
+       in a pointer vector at a specified index.  Items beyond the removed item
+       are shifted down and the size of the pointer vector is decremented.
+
+

SYNOPSIS

+
CL_EXPORT void* CL_API
+cl_ptr_vector_remove(
+        IN      cl_ptr_vector_t* const  p_vector,
+        IN      const size_t                    index );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure from which to get an
+               element.
+
+       index
+               [in] Index of the element.
+
+

RETURN VALUE

+
       Value of the pointer stored at the specified index.
+
+

NOTES

+
       cl_ptr_vector_get does not perform boundary checking. Callers are
+       responsible for providing an index that is within the range of the pointer
+       vector.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_insert, cl_ptr_vector_get_size
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_set

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_set
+
+

DESCRIPTION

+
       The cl_ptr_vector_set function sets the element at the specified index.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_ptr_vector_set(
+        IN      cl_ptr_vector_t* const  p_vector,
+        IN      const size_t                    index,
+        IN      const void* const               element );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure into which to store
+               an element.
+
+       index
+               [in] Index of the element.
+
+       element
+               [in] Pointer to store in the pointer vector.
+
+ RETURN VALUES
+       CL_SUCCESS if the element was successfully set.
+
+       CL_INSUFFICIENT_MEMORY if the pointer vector could not be resized to
+       accommodate the new element.
+
+

NOTES

+
       cl_ptr_vector_set grows the pointer vector as needed to accommodate
+       the new element, unless the grow_size parameter passed into the
+       cl_ptr_vector_init function was zero.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_get
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_set_capacity

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_set_capacity
+
+

DESCRIPTION

+
       The cl_ptr_vector_set_capacity function reserves memory in a
+       pointer vector for a specified number of pointers.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_ptr_vector_set_capacity(
+        IN      cl_ptr_vector_t* const  p_vector,
+        IN      const size_t                    new_capacity );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure whose capacity to set.
+
+       new_capacity
+               [in] Total number of elements for which the pointer vector should
+               allocate memory.
+
+ RETURN VALUES
+       CL_SUCCESS if the capacity was successfully set.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to satisfy the
+       operation. The pointer vector is left unchanged.
+
+

NOTES

+
       cl_ptr_vector_set_capacity increases the capacity of the pointer vector.
+       It does not change the size of the pointer vector. If the requested
+       capacity is less than the current capacity, the pointer vector is left
+       unchanged.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_get_capacity, cl_ptr_vector_set_size,
+       cl_ptr_vector_set_min_size
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_set_min_size

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_set_min_size
+
+

DESCRIPTION

+
       The cl_ptr_vector_set_min_size function resizes a pointer vector to a
+       specified size if the pointer vector is smaller than the specified size.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_ptr_vector_set_min_size(
+        IN      cl_ptr_vector_t* const  p_vector,
+        IN      const size_t                    min_size );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure whose minimum size to set.
+
+       min_size
+               [in] Minimum number of elements that the pointer vector should contain.
+
+ RETURN VALUES
+       CL_SUCCESS if the pointer vector size is greater than or equal to min_size.
+       This could indicate that the pointer vector's capacity was increased to
+       min_size or that the pointer vector was already of sufficient size.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to resize the
+       pointer vector.  The pointer vector is left unchanged.
+
+

NOTES

+
       If min_size is smaller than the current size of the pointer vector,
+       the pointer vector is unchanged. The pointer vector is unchanged if the
+       size could not be changed due to insufficient memory being available to
+       perform the operation.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_get_size, cl_ptr_vector_set_size,
+       cl_ptr_vector_set_capacity
+
+
+
+ +

[Functions] +Component Library: Pointer Vector/cl_ptr_vector_set_size

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_set_size
+
+

DESCRIPTION

+
       The cl_ptr_vector_set_size function resizes a pointer vector, either
+       increasing or decreasing its size.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_ptr_vector_set_size(
+        IN      cl_ptr_vector_t* const  p_vector,
+        IN      const size_t                    size );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_ptr_vector_t structure whose size to set.
+
+       size
+               [in] Number of elements desired in the pointer vector.
+
+ RETURN VALUES
+       CL_SUCCESS if the size of the pointer vector was set successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to complete the
+       operation. The pointer vector is left unchanged.
+
+

NOTES

+
       cl_ptr_vector_set_size sets the pointer vector to the specified size.
+       If size is smaller than the current size of the pointer vector, the size
+       is reduced.
+
+       This function can only fail if size is larger than the current capacity.
+
+

SEE ALSO

+
       Pointer Vector, cl_ptr_vector_get_size, cl_ptr_vector_set_min_size,
+       cl_ptr_vector_set_capacity
+
+
+
+ +

[Structures] +Component Library: Pointer Vector/cl_ptr_vector_t

+ +

[top][index]

+

NAME

+
       cl_ptr_vector_t
+
+

DESCRIPTION

+
       Pointer Vector structure.
+
+       The cl_ptr_vector_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_ptr_vector
+{
+        size_t                          size;
+        size_t                          grow_size;
+        size_t                          capacity;
+        const void                      **p_ptr_array;
+        cl_state_t                      state;
+
+} cl_ptr_vector_t;
+
+

FIELDS

+
       size
+                Number of elements successfully initialized in the pointer vector.
+
+       grow_size
+                Number of elements to allocate when growing.
+
+       capacity
+                total # of elements allocated.
+
+       alloc_list
+                List of allocations.
+
+       p_ptr_array
+                Internal array of pointers to elements.
+
+       state
+               State of the pointer vector.
+
+

SEE ALSO

+
       Pointer Vector
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_qcomppool_h.html b/branches/Ndi/docs/complib/cl_qcomppool_h.html new file mode 100644 index 00000000..a89206ab --- /dev/null +++ b/branches/Ndi/docs/complib/cl_qcomppool_h.html @@ -0,0 +1,740 @@ + + + + +./inc_docs/complib/cl_qcomppool_h.html + + + + +Generated from ./inc/complib/cl_qcomppool.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Quick Composite Pool

+ +

[top][parent][index]

+

NAME

+
       Quick Composite Pool
+
+

DESCRIPTION

+
       The Quick Composite Pool provides a self-contained and self-sustaining
+       pool of user defined composite objects.
+
+       A composite object is an object that is composed of one or more
+       sub-objects, each of which needs to be treated separately for
+       initialization. Objects can be retrieved from the pool as long as there
+       is memory in the system.
+
+       To aid in object oriented design, the Quick Composite Pool provides users
+       the ability to specify callbacks that are invoked for each object for
+       construction, initialization, and destruction. Constructor and destructor
+       callback functions may not fail.
+
+       A Quick Composite Pool does not return memory to the system as the user
+       returns objects to the pool. The only method of returning memory to the
+       system is to destroy the pool.
+
+       The Quick Composite Pool operates on cl_pool_item_t structures that
+       describe composite objects. This provides for more efficient memory use.
+       If using a cl_pool_item_t is not desired, the Composite Pool provides
+       similar functionality but operates on opaque objects.
+
+       The Quick Composit Pool functions operate on a cl_qcpool_t structure
+       which should be treated as opaque and should be manipulated only through
+       the provided functions.
+
+

SEE ALSO

+
       Structures:
+               cl_qcpool_t, cl_pool_item_t
+
+       Callbacks:
+               cl_pfn_qcpool_init_t, cl_pfn_qcpool_dtor_t
+
+       Initialization/Destruction:
+               cl_qcpool_construct, cl_qcpool_init, cl_qcpool_destroy
+
+       Manipulation:
+               cl_qcpool_get, cl_qcpool_put, cl_qcpool_put_list, cl_qcpool_grow
+
+       Attributes:
+               cl_is_qcpool_inited, cl_qcpool_count
+
+
+
+ +

[Functions] +Component Library: Quick Composite Pool/cl_is_qcpool_inited

+ +

[top][index]

+

NAME

+
       cl_is_qcpool_inited
+
+

DESCRIPTION

+
       The cl_is_qcpool_inited function returns whether a quick composite pool was
+       successfully initialized.
+
+

SYNOPSIS

+
CL_INLINE uint32_t CL_API
+cl_is_qcpool_inited(
+        IN      const cl_qcpool_t* const        p_pool )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_pool );
+        /* CL_ASSERT that the pool is not in some invalid state. */
+        CL_ASSERT( cl_is_state_valid( p_pool->state ) );
+
+        return( p_pool->state == CL_INITIALIZED );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qcpool_t structure to check.
+
+ RETURN VALUES
+       TRUE if the quick composite pool was initialized successfully.
+
+       FALSE otherwise.
+
+

NOTES

+
       Allows checking the state of a quick composite pool to determine if
+       invoking member functions is appropriate.
+
+

SEE ALSO

+
       Quick Composite Pool
+
+
+
+ +

[Definitions] +Component Library: Quick Composite Pool/cl_pfn_qcpool_dtor_t

+ +

[top][index]

+

NAME

+
       cl_pfn_qcpool_dtor_t
+
+

DESCRIPTION

+
       The cl_pfn_qcpool_dtor_t function type defines the prototype for
+       functions used as destructor for objects being deallocated by a
+       quick composite pool.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_qcpool_dtor_t)(
+        IN      const cl_pool_item_t* const     p_pool_item,
+        IN      void*                                           context );
+
+

PARAMETERS

+
       p_pool_item
+               [in] Pointer to a cl_pool_item_t structure representing an object.
+
+       context
+               [in] Context provided in a call to cl_qcpool_init.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by the user as an optional parameter to the
+       cl_qcpool_init function.
+
+       The destructor is invoked once per allocated object, allowing the user
+       to perform any necessary cleanup. Users should not attempt to deallocate
+       the memory for the composite object, as the quick composite pool manages
+       object allocation and deallocation.
+
+

SEE ALSO

+
       Quick Composite Pool, cl_qcpool_init
+
+
+
+ +

[Definitions] +Component Library: Quick Composite Pool/cl_pfn_qcpool_init_t

+ +

[top][index]

+

NAME

+
       cl_pfn_qcpool_init_t
+
+

DESCRIPTION

+
       The cl_pfn_qcpool_init_t function type defines the prototype for
+       functions used as initializer for objects being allocated by a
+       quick composite pool.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_qcpool_init_t)(
+        IN      void** const                    p_comp_array,
+        IN      const uint32_t                  num_components,
+        IN      void*                                   context,
+        OUT     cl_pool_item_t** const  pp_pool_item );
+
+

PARAMETERS

+
       p_comp_array
+               [in] Pointer to the first entry in an array of pointers, each of
+               which points to a component that makes up a composite object.
+
+       num_components
+               [in] Number of components that in the component array.
+
+       context
+               [in] Context provided in a call to cl_qcpool_init.
+
+       pp_pool_item
+               [out] Users should set this pointer to reference the cl_pool_item_t
+               structure that represents the composite object.  This pointer must
+               not be NULL if the function returns CL_SUCCESS.
+
+

RETURN VALUE

+
       Return CL_SUCCESS to indicate that initialization of the object
+       was successful and that initialization of further objects may continue.
+
+       Other cl_status_t values will be returned by cl_qcpool_init
+       and cl_qcpool_grow.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by the user as a parameter to the
+       cl_qcpool_init function.
+
+       The initializer is invoked once per allocated object, allowing the user
+       to chain components to form a composite object and perform any necessary
+       initialization.  Returning a status other than CL_SUCCESS aborts a grow
+       operation, initiated either through cl_qcpool_init or cl_qcpool_grow,
+       and causes the initiating function to fail.  Any non-CL_SUCCESS status
+       will be returned by the function that initiated the grow operation.
+
+       All memory for the requested number of components is pre-allocated.  Users
+       should include space in one of their components for the cl_pool_item_t
+       structure that will represent the composite object to avoid having to
+       allocate that structure in the initialization callback.  Alternatively,
+       users may specify an additional component for the cl_pool_item_t structure.
+
+       When later performing a cl_qcpool_get call, the return value is a pointer
+       to the cl_pool_item_t returned by this function in the pp_pool_item
+       parameter. Users must set pp_pool_item to a valid pointer to the
+       cl_pool_item_t representing the object if they return CL_SUCCESS.
+
+

SEE ALSO

+
       Quick Composite Pool, cl_qcpool_init
+
+
+
+ +

[Structures] +Component Library: Quick Composite Pool/cl_pool_item_t

+ +

[top][index]

+

NAME

+
       cl_pool_item_t
+
+

DESCRIPTION

+
       The cl_pool_item_t structure is used by pools to store objects.
+
+

SYNOPSIS

+
typedef struct _cl_pool_item
+{
+        cl_list_item_t          list_item;
+#ifdef _DEBUG_
+        /* Pad to make the cl_pool_obj structure line up properly */
+        void                            *pad;
+        /* Pointer to the owner pool used for sanity checks. */
+        struct _cl_qcpool       *p_pool;
+#endif
+
+} cl_pool_item_t;
+
+

FIELDS

+
       list_item
+               Used internally by the pool. Users should not use this field.
+
+       p_pool
+               Used internally by the pool in debug builds to check for consistency.
+
+

NOTES

+
       The pool item structure is defined in such a way as to safely allow
+       users to cast from a pool item to a list item for storing items
+       retrieved from a quick pool in a quick list.
+
+

SEE ALSO

+
       Quick Composite Pool, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick Composite Pool/cl_qcpool_construct

+ +

[top][index]

+

NAME

+
       cl_qcpool_construct
+
+

DESCRIPTION

+
       The cl_qcpool_construct function constructs a quick composite pool.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qcpool_construct(
+        IN      cl_qcpool_t* const      p_pool );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qcpool_t structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_qcpool_init, cl_qcpool_destroy, cl_is_qcpool_inited.
+
+       Calling cl_qcpool_construct is a prerequisite to calling any other
+       quick composite pool function except cl_qcpool_init.
+
+

SEE ALSO

+
       Quick Composite Pool, cl_qcpool_init, cl_qcpool_destroy,
+       cl_is_qcpool_inited
+
+
+
+ +

[Functions] +Component Library: Quick Composite Pool/cl_qcpool_count

+ +

[top][index]

+

NAME

+
       cl_qcpool_count
+
+

DESCRIPTION

+
       The cl_qcpool_count function returns the number of available objects
+       in a quick composite pool.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_qcpool_count(
+        IN      cl_qcpool_t* const      p_pool )
+{
+        CL_ASSERT( p_pool );
+        CL_ASSERT( p_pool->state == CL_INITIALIZED );
+
+        return( cl_qlist_count( &p_pool->free_list ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qcpool_t structure for which the number of
+               available objects is requested.
+
+

RETURN VALUE

+
       Returns the number of objects available in the specified
+       quick composite pool.
+
+

SEE ALSO

+
       Quick Composite Pool
+
+
+
+ +

[Functions] +Component Library: Quick Composite Pool/cl_qcpool_destroy

+ +

[top][index]

+

NAME

+
       cl_qcpool_destroy
+
+

DESCRIPTION

+
       The cl_qcpool_destroy function destroys a quick composite pool.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qcpool_destroy(
+        IN      cl_qcpool_t* const      p_pool );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qcpool_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       All memory allocated for composite objects is freed. The destructor
+       callback, if any, will be invoked for every allocated object. Further
+       operations on the composite pool should not be attempted after
+       cl_qcpool_destroy is invoked.
+
+       This function should only be called after a call to
+       cl_qcpool_construct or cl_qcpool_init.
+
+       In a debug build, cl_qcpool_destroy asserts that all objects are in
+       the pool.
+
+

SEE ALSO

+
       Quick Composite Pool, cl_qcpool_construct, cl_qcpool_init
+
+
+
+ +

[Functions] +Component Library: Quick Composite Pool/cl_qcpool_get

+ +

[top][index]

+

NAME

+
       cl_qcpool_get
+
+

DESCRIPTION

+
       The cl_qcpool_get function retrieves an object from a
+       quick composite pool.
+
+

SYNOPSIS

+
CL_EXPORT cl_pool_item_t* CL_API
+cl_qcpool_get(
+        IN      cl_qcpool_t* const      p_pool );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qcpool_t structure from which to retrieve
+               an object.
+
+ RETURN VALUES
+       Returns a pointer to a cl_pool_item_t for a composite object.
+
+       Returns NULL if the pool is empty and can not be grown automatically.
+
+

NOTES

+
       cl_qcpool_get returns the object at the head of the pool. If the pool is
+       empty, it is automatically grown to accommodate this request unless the
+       grow_size parameter passed to the cl_qcpool_init function was zero.
+
+

SEE ALSO

+
       Quick Composite Pool, cl_qcpool_get_tail, cl_qcpool_put,
+       cl_qcpool_grow, cl_qcpool_count
+
+
+
+ +

[Functions] +Component Library: Quick Composite Pool/cl_qcpool_grow

+ +

[top][index]

+

NAME

+
       cl_qcpool_grow
+
+

DESCRIPTION

+
       The cl_qcpool_grow function grows a quick composite pool by
+       the specified number of objects.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_qcpool_grow(
+        IN      cl_qcpool_t* const              p_pool,
+        IN      size_t                                  obj_count );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qcpool_t structure whose capacity to grow.
+
+       obj_count
+               [in] Number of objects by which to grow the pool.
+
+ RETURN VALUES
+       CL_SUCCESS if the quick composite pool grew successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to grow the
+       quick composite pool.
+
+       cl_status_t value returned by optional initialization callback function
+       specified by the pfn_initializer parameter passed to the
+       cl_qcpool_init function.
+
+

NOTES

+
       It is not necessary to call cl_qcpool_grow if the pool is
+       configured to grow automatically.
+
+

SEE ALSO

+
       Quick Composite Pool
+
+
+
+ +

[Functions] +Component Library: Quick Composite Pool/cl_qcpool_init

+ +

[top][index]

+

NAME

+
       cl_qcpool_init
+
+

DESCRIPTION

+
       The cl_qcpool_init function initializes a quick composite pool for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_qcpool_init(
+        IN      cl_qcpool_t* const              p_pool,
+        IN      const size_t                    min_size,
+        IN      const size_t                    max_size,
+        IN      const size_t                    grow_size,
+        IN      const size_t* const             component_sizes,
+        IN      const uint32_t                  num_components,
+        IN      cl_pfn_qcpool_init_t    pfn_initializer OPTIONAL,
+        IN      cl_pfn_qcpool_dtor_t    pfn_destructor OPTIONAL,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qcpool_t structure to initialize.
+
+       min_size
+               [in] Minimum number of objects that the pool should support. All
+               necessary allocations to allow storing the minimum number of items
+               are performed at initialization time, and all necessary callbacks
+               successfully invoked.
+
+       max_size
+               [in] Maximum number of objects to which the pool is allowed to grow.
+               A value of zero specifies no maximum.
+
+       grow_size
+               [in] Number of objects to allocate when incrementally growing the pool.
+               A value of zero disables automatic growth.
+
+       component_sizes
+               [in] Pointer to the first entry in an array of sizes describing,
+               in order, the sizes of the components that make up a composite object.
+
+       num_components
+               [in] Number of components that make up a composite object.
+
+       pfn_initializer
+               [in] Initializer callback to invoke for every new object when growing
+               the pool. This parameter may be NULL only if the objects stored in
+               the quick composite pool consist of only one component. If NULL, the
+               pool assumes the cl_pool_item_t structure describing objects is
+               located at the head of each object. See the cl_pfn_qcpool_init_t
+               function type declaration for details about the callback function.
+
+       pfn_destructor
+               [in] Destructor callback to invoke for every object before memory for
+               that object is freed. This parameter is optional and may be NULL.
+               See the cl_pfn_qcpool_dtor_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+ RETURN VALUES
+       CL_SUCCESS if the quick composite pool was initialized successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the
+       quick composite pool.
+
+       CL_INVALID_SETTING if a NULL constructor was provided for composite objects
+       consisting of more than one component.  Also returns CL_INVALID_SETTING if
+       the maximum size is non-zero and less than the minimum size.
+
+       Other cl_status_t value returned by optional initialization callback function
+       specified by the pfn_initializer parameter.
+
+       If initialization fails, the pool is left in a destroyed state.  Callers
+       may still safely call cl_qcpool_destroy.
+
+

NOTES

+
       cl_qcpool_init initializes, and if necessary, grows the pool to
+       the capacity desired.
+
+

SEE ALSO

+
       Quick Composite Pool, cl_qcpool_construct, cl_qcpool_destroy,
+       cl_qcpool_get, cl_qcpool_put, cl_qcpool_grow,
+       cl_qcpool_count, cl_pfn_qcpool_init_t, cl_pfn_qcpool_dtor_t
+
+
+
+ +

[Functions] +Component Library: Quick Composite Pool/cl_qcpool_put

+ +

[top][index]

+

NAME

+
       cl_qcpool_put
+
+

DESCRIPTION

+
       The cl_qcpool_put function returns an object to a quick composite pool.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qcpool_put(
+        IN      cl_qcpool_t* const              p_pool,
+        IN      cl_pool_item_t* const   p_pool_item )
+{
+        CL_ASSERT( p_pool );
+        CL_ASSERT( p_pool->state == CL_INITIALIZED );
+        CL_ASSERT( p_pool_item );
+        /* Make sure items being returned came from the specified pool. */
+        CL_ASSERT( p_pool_item->p_pool == p_pool );
+
+        /* return this lil' doggy to the pool */
+        cl_qlist_insert_head( &p_pool->free_list, &p_pool_item->list_item );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qcpool_t structure to which to return
+               an object.
+
+       p_pool_item
+               [in] Pointer to a cl_pool_item_t structure for the object
+               being returned.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_qcpool_put places the returned object at the head of the pool.
+
+       The object specified by the p_pool_item parameter must have been
+       retrieved from the pool by a previous call to cl_qcpool_get.
+
+

SEE ALSO

+
       Quick Composite Pool, cl_qcpool_put_tail, cl_qcpool_get
+
+
+
+ +

[Functions] +Component Library: Quick Composite Pool/cl_qcpool_put_list

+ +

[top][index]

+

NAME

+
       cl_qcpool_put_list
+
+

DESCRIPTION

+
       The cl_qcpool_put_list function returns a list of objects to the head of
+       a quick composite pool.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qcpool_put_list(
+        IN      cl_qcpool_t* const      p_pool,
+        IN      cl_qlist_t* const       p_list )
+{
+#ifdef _DEBUG_
+        cl_list_item_t  *p_item;
+#endif
+
+        CL_ASSERT( p_pool );
+        CL_ASSERT( p_pool->state == CL_INITIALIZED );
+        CL_ASSERT( p_list );
+
+#ifdef _DEBUG_
+        /* Chech that all items in the list came from this pool. */
+        p_item = cl_qlist_head( p_list );
+        while( p_item != cl_qlist_end( p_list ) )
+        {
+                CL_ASSERT( ((cl_pool_item_t*)p_item)->p_pool == p_pool );
+                p_item = cl_qlist_next( p_item );
+        }
+#endif
+
+        /* return these lil' doggies to the pool */
+        cl_qlist_insert_list_head( &p_pool->free_list, p_list );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qcpool_t structure to which to return
+               a list of objects.
+
+       p_list
+               [in] Pointer to a cl_qlist_t structure for the list of objects
+               being returned.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_qcpool_put_list places the returned objects at the head of the pool.
+
+       The objects in the list specified by the p_list parameter must have been
+       retrieved from the pool by a previous call to cl_qcpool_get.
+
+

SEE ALSO

+
       Quick Composite Pool, cl_qcpool_put, cl_qcpool_put_tail, cl_qcpool_get
+
+
+
+ +

[Structures] +Component Library: Quick Composite Pool/cl_qcpool_t

+ +

[top][index]

+

NAME

+
       cl_qcpool_t
+
+

DESCRIPTION

+
       Quick composite pool structure.
+
+       The cl_qcpool_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_qcpool
+{
+        uint32_t                                num_components;
+        size_t                                  *component_sizes;
+        void                                    **p_components;
+        size_t                                  num_objects;
+        size_t                                  max_objects;
+        size_t                                  grow_size;
+        cl_pfn_qcpool_init_t    pfn_init;
+        cl_pfn_qcpool_dtor_t    pfn_dtor;
+        const void                              *context;
+        cl_qlist_t                              free_list;
+        cl_qlist_t                              alloc_list;
+        cl_state_t                              state;
+
+} cl_qcpool_t;
+
+

FIELDS

+
       num_components
+               Number of components per object.
+
+       component_sizes
+               Array of sizes, one for each component.
+
+       p_components
+               Array of pointers to components, used for the constructor callback.
+
+       num_objects
+               Number of objects managed by the pool
+
+       grow_size
+               Number of objects to add when automatically growing the pool.
+
+       pfn_init
+               Pointer to the user's initializer callback to invoke when initializing
+               new objects.
+
+       pfn_dtor
+               Pointer to the user's destructor callback to invoke before deallocating
+               memory allocated for objects.
+
+       context
+               User's provided context for callback functions, used by the pool
+               when invoking callbacks.
+
+       free_list
+               Quick list of objects available.
+
+       alloc_list
+               Quick list used to store information about allocations.
+
+       state
+               State of the pool.
+
+

SEE ALSO

+
       Quick Composite Pool
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_qlist_h.html b/branches/Ndi/docs/complib/cl_qlist_h.html new file mode 100644 index 00000000..e023b863 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_qlist_h.html @@ -0,0 +1,1728 @@ + + + + +./inc_docs/complib/cl_qlist_h.html + + + + +Generated from ./inc/complib/cl_qlist.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Quick List

+ +

[top][parent][index]

+

NAME

+
       Quick List
+
+

DESCRIPTION

+
       Quick list implements a doubly linked that stores user provided
+       cl_list_item_t structures.
+       Quick list does not allocate any memory, and can therefore not fail any
+       operations.  Quick list can therefore be useful in minimizing the error
+       paths in code.
+
+       Quick list is not thread safe, and users must provide serialization when
+       adding and removing items from the list. Note that it is possible to
+       walk a quick list while simultaneously adding to it.
+
+       The Quick List functions operate on a cl_qlist_t structure which should be
+       treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_qlist_t, cl_list_item_t, cl_list_obj_t
+
+       Callbacks:
+               cl_pfn_qlist_apply_t, cl_pfn_qlist_find_t
+
+       Item Manipulation:
+               cl_qlist_set_obj, cl_qlist_obj
+
+       Initialization:
+               cl_qlist_init
+
+       Iteration:
+               cl_qlist_next, cl_qlist_prev, cl_qlist_head, cl_qlist_tail,
+               cl_qlist_end
+
+       Manipulation:
+               cl_qlist_insert_head, cl_qlist_insert_tail,
+               cl_qlist_insert_list_head, cl_qlist_insert_list_tail,
+               cl_qlist_insert_array_head, cl_qlist_insert_array_tail,
+               cl_qlist_insert_prev, cl_qlist_insert_next,
+               cl_qlist_remove_head, cl_qlist_remove_tail,
+               cl_qlist_remove_item, cl_qlist_remove_all
+
+       Search:
+               cl_is_item_in_qlist, cl_qlist_find_next, cl_qlist_find_prev,
+               cl_qlist_find_from_head, cl_qlist_find_from_tail
+               cl_qlist_apply_func, cl_qlist_move_items
+
+       Attributes:
+               cl_qlist_count, cl_is_qlist_empty
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_is_item_in_qlist

+ +

[top][index]

+

NAME

+
       cl_is_item_in_qlist
+
+

DESCRIPTION

+
       The cl_is_item_in_qlist function checks for the presence of a
+       list item in a quick list.
+
+

SYNOPSIS

+
CL_EXPORT boolean_t CL_API
+cl_is_item_in_qlist(
+        IN      const cl_qlist_t* const         p_list,
+        IN      const cl_list_item_t* const     p_list_item );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+       p_list_item
+               [in] Pointer to the cl_list_item_t to find.
+
+ RETURN VALUES
+       TRUE if the list item was found in the quick list.
+
+       FALSE otherwise.
+
+

SEE ALSO

+
       Quick List, cl_qlist_remove_item, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_is_qlist_empty

+ +

[top][index]

+

NAME

+
       cl_is_qlist_empty
+
+

DESCRIPTION

+
       The cl_is_qlist_empty function returns whether a quick list is empty.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_qlist_empty(
+        IN      const cl_qlist_t* const p_list )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        return( !cl_qlist_count( p_list ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+ RETURN VALUES
+       TRUE if the specified quick list is empty.
+
+       FALSE otherwise.
+
+

SEE ALSO

+
       Quick List, cl_qlist_count, cl_qlist_remove_all
+
+
+
+ +

[Structures] +Component Library: Quick List/cl_list_item_t

+ +

[top][index]

+

NAME

+
       cl_list_item_t
+
+

DESCRIPTION

+
       The cl_list_item_t structure is used by lists to store objects.
+
+

SYNOPSIS

+
typedef struct _cl_list_item
+{
+        struct _cl_list_item    *p_next;
+        struct _cl_list_item    *p_prev;
+#ifdef _DEBUG_
+        struct _cl_qlist                *p_list;
+#endif
+
+} cl_list_item_t;
+
+

FIELDS

+
       p_next
+               Used internally by the list. Users should not use this field.
+
+       p_prev
+               Used internally by the list. Users should not use this field.
+
+

SEE ALSO

+
       Quick List
+
+
+
+ +

[Structures] +Component Library: Quick List/cl_list_obj_t

+ +

[top][index]

+

NAME

+
       cl_list_obj_t
+
+

DESCRIPTION

+
       The cl_list_obj_t structure is used by lists to store objects.
+
+

SYNOPSIS

+
typedef struct _cl_list_obj
+{
+        cl_list_item_t          list_item;
+        const void                      *p_object;              /* User's context */
+
+} cl_list_obj_t;
+
+

FIELDS

+
       list_item
+               Used internally by the list. Users should not use this field.
+
+       p_object
+               User defined context. Users should not access this field directly.
+               Use cl_qlist_set_obj and cl_qlist_obj to set and retrieve the value
+               of this field.
+
+

NOTES

+
       Users can use the cl_qlist_set_obj and cl_qlist_obj functions to store
+       and retrieve context information in the list item.
+
+

SEE ALSO

+
       Quick List, cl_qlist_set_obj, cl_qlist_obj, cl_list_item_t
+
+
+
+ +

[Definitions] +Component Library: Quick List/cl_pfn_qlist_apply_t

+ +

[top][index]

+

NAME

+
       cl_pfn_qlist_apply_t
+
+

DESCRIPTION

+
       The cl_pfn_qlist_apply_t function type defines the prototype for functions
+       used to iterate items in a quick list.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_qlist_apply_t)(
+        IN      cl_list_item_t* const   p_list_item,
+        IN      void*                                   context );
+
+

PARAMETERS

+
       p_list_item
+               [in] Pointer to a cl_list_item_t structure.
+
+       context
+               [in] Value passed to the callback function.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_qlist_apply_func
+       function.
+
+

SEE ALSO

+
       Quick List, cl_qlist_apply_func
+
+
+
+ +

[Definitions] +Component Library: Quick List/cl_pfn_qlist_find_t

+ +

[top][index]

+

NAME

+
       cl_pfn_qlist_find_t
+
+

DESCRIPTION

+
       The cl_pfn_qlist_find_t function type defines the prototype for functions
+       used to find items in a quick list.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_qlist_find_t)(
+        IN      const cl_list_item_t* const     p_list_item,
+        IN      void*                                           context );
+
+

PARAMETERS

+
       p_list_item
+               [in] Pointer to a cl_list_item_t.
+
+       context
+               [in] Value passed to the callback function.
+
+ RETURN VALUES
+       Return CL_SUCCESS if the desired item was found. This stops list iteration.
+
+       Return CL_NOT_FOUND to continue list iteration.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_qlist_find_from_head,
+       cl_qlist_find_from_tail, cl_qlist_find_next, and cl_qlist_find_prev
+       functions.
+
+

SEE ALSO

+
       Quick List, cl_qlist_find_from_head, cl_qlist_find_from_tail,
+       cl_qlist_find_next, cl_qlist_find_prev
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_apply_func

+ +

[top][index]

+

NAME

+
       cl_qlist_apply_func
+
+

DESCRIPTION

+
       The cl_qlist_apply_func function executes a specified function
+       for every list item stored in a quick list.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qlist_apply_func(
+        IN      const cl_qlist_t* const p_list,
+        IN      cl_pfn_qlist_apply_t    pfn_func,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+       pfn_func
+               [in] Function invoked for every item in the quick list.
+               See the cl_pfn_qlist_apply_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       The function provided must not perform any list operations, as these
+       would corrupt the quick list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_find_from_head, cl_qlist_find_from_tail,
+       cl_qlist_move_items, cl_pfn_qlist_apply_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_count

+ +

[top][index]

+

NAME

+
       cl_qlist_count
+
+

DESCRIPTION

+
       The cl_qlist_count function returns the number of list items stored
+       in a quick list.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_qlist_count(
+        IN      const cl_qlist_t* const p_list )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        return( p_list->count );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+

RETURN VALUE

+
       Number of items in the list.  This function iterates though the quick
+       list to count the items.
+
+

SEE ALSO

+
       Quick List, cl_is_qlist_empty
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_end

+ +

[top][index]

+

NAME

+
       cl_qlist_end
+
+

DESCRIPTION

+
       The cl_qlist_end function returns the end of a quick list.
+
+

SYNOPSIS

+
CL_INLINE const cl_list_item_t* const CL_API
+cl_qlist_end(
+        IN      const cl_qlist_t* const p_list )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        return( &p_list->end );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+

RETURN VALUE

+
       Pointer to the end of the list.
+
+

NOTES

+
       cl_qlist_end is useful for determining the validity of list items returned
+       by cl_qlist_head, cl_qlist_tail, cl_qlist_next, cl_qlist_prev, as well as
+       the cl_qlist_find functions.  If the list item pointer returned by any of
+       these functions compares to the end, the end of the list was encoutered.
+       When using cl_qlist_head or cl_qlist_tail, this condition indicates that
+       the list is empty.
+
+

SEE ALSO

+
       Quick List, cl_qlist_head, cl_qlist_tail, cl_qlist_next, cl_qlist_prev,
+       cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_find_from_head

+ +

[top][index]

+

NAME

+
       cl_qlist_find_from_head
+
+

DESCRIPTION

+
       The cl_qlist_find_from_head function invokes a specified function to
+       search for an item, starting at the head of a quick list.
+
+

SYNOPSIS

+
CL_INLINE cl_list_item_t* CL_API
+cl_qlist_find_from_head(
+        IN      const cl_qlist_t* const p_list,
+        IN      cl_pfn_qlist_find_t             pfn_func,
+        IN      const void* const               context )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+        /* CL_ASSERT that a find function is provided. */
+        CL_ASSERT( pfn_func );
+
+        return( cl_qlist_find_next( p_list, cl_qlist_end( p_list ), pfn_func,
+                context ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+       pfn_func
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_qlist_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context if a
+               callback function is provided, or value compared to the quick list's
+               list items.
+
+ Returns:
+       Pointer to the list item, if found.
+
+       Pointer to the list end otherwise
+
+

NOTES

+
       cl_qlist_find_from_head does not remove list items from the list.
+       The list item is returned when the function specified by the pfn_func
+       parameter returns CL_SUCCESS.
+
+       The function provided by the pfn_func parameter must not perform any list
+       operations, as these would corrupt the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_find_from_tail, cl_qlist_find_next, cl_qlist_find_prev,
+       cl_qlist_end, cl_qlist_apply_func, cl_qlist_move_items, cl_list_item_t,
+       cl_pfn_qlist_find_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_find_from_tail

+ +

[top][index]

+

NAME

+
       cl_qlist_find_from_tail
+
+

DESCRIPTION

+
       The cl_qlist_find_from_tail function invokes a specified function to
+       search for an item, starting at the tail of a quick list.
+
+

SYNOPSIS

+
CL_INLINE cl_list_item_t* CL_API
+cl_qlist_find_from_tail(
+        IN      const cl_qlist_t* const p_list,
+        IN      cl_pfn_qlist_find_t             pfn_func,
+        IN      const void* const               context )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+        /* CL_ASSERT that a find function is provided. */
+        CL_ASSERT( pfn_func );
+
+        return( cl_qlist_find_prev( p_list, cl_qlist_end( p_list ), pfn_func,
+                context ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+       pfn_func
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_qlist_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context if a
+               callback function is provided, or value compared to the quick list's
+               list items.
+
+ Returns:
+       Pointer to the list item, if found.
+
+       Pointer to the list end otherwise
+
+

NOTES

+
       cl_qlist_find_from_tail does not remove list items from the list.
+       The list item is returned when the function specified by the pfn_func
+       parameter returns CL_SUCCESS.
+
+       The function provided by the pfn_func parameter must not perform any list
+       operations, as these would corrupt the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_find_from_head, cl_qlist_find_next, cl_qlist_find_prev,
+       cl_qlist_apply_func, cl_qlist_end, cl_qlist_move_items, cl_list_item_t,
+       cl_pfn_qlist_find_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_find_next

+ +

[top][index]

+

NAME

+
       cl_qlist_find_next
+
+

DESCRIPTION

+
       The cl_qlist_find_next function invokes a specified function to
+       search for an item, starting from a given list item.
+
+

SYNOPSIS

+
CL_EXPORT cl_list_item_t* CL_API
+cl_qlist_find_next(
+        IN      const cl_qlist_t* const         p_list,
+        IN      const cl_list_item_t* const     p_list_item,
+        IN      cl_pfn_qlist_find_t                     pfn_func,
+        IN      const void* const                       context );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure in which to search.
+
+       p_list_item
+               [in] Pointer to a cl_list_item_t structure from which to start the search.
+
+       pfn_func
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_qlist_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context if a
+               callback function is provided, or value compared to the quick list's
+               list items.
+
+ Returns:
+       Pointer to the list item, if found.
+
+       p_list_item if not found.
+
+

NOTES

+
       cl_qlist_find_next does not remove list items from the list.
+       The list item is returned when the function specified by the pfn_func
+       parameter returns CL_SUCCESS.  The list item from which the search starts is
+       excluded from the search.
+
+       The function provided by the pfn_func must not perform any list operations,
+       as these would corrupt the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_find_prev, cl_qlist_find_from_head,
+       cl_qlist_find_from_tail, cl_qlist_end, cl_qlist_apply_func,
+       cl_qlist_move_items, cl_list_item_t, cl_pfn_qlist_find_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_find_prev

+ +

[top][index]

+

NAME

+
       cl_qlist_find_prev
+
+

DESCRIPTION

+
       The cl_qlist_find_prev function invokes a specified function to
+       search backward for an item, starting from a given list item.
+
+

SYNOPSIS

+
CL_EXPORT cl_list_item_t* CL_API
+cl_qlist_find_prev(
+        IN      const cl_qlist_t* const         p_list,
+        IN      const cl_list_item_t* const     p_list_item,
+        IN      cl_pfn_qlist_find_t                     pfn_func,
+        IN      const void* const                       context );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure in which to search.
+
+       p_list_item
+               [in] Pointer to a cl_list_item_t structure from which to start the search.
+
+       pfn_func
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_qlist_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context if a
+               callback function is provided, or value compared to the quick list's
+               list items.
+
+ Returns:
+       Pointer to the list item, if found.
+
+       p_list_item if not found.
+
+

NOTES

+
       cl_qlist_find_prev does not remove list items from the list.
+       The list item is returned when the function specified by the pfn_func
+       parameter returns CL_SUCCESS.  The list item from which the search starts is
+       excluded from the search.
+
+       The function provided by the pfn_func must not perform any list operations,
+       as these would corrupt the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_find_next, cl_qlist_find_from_head,
+       cl_qlist_find_from_tail, cl_qlist_end, cl_qlist_apply_func,
+       cl_qlist_move_items, cl_list_item_t, cl_pfn_qlist_find_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_head

+ +

[top][index]

+

NAME

+
       cl_qlist_head
+
+

DESCRIPTION

+
       The cl_qlist_head function returns the list item at
+       the head of a quick list.
+
+

SYNOPSIS

+
CL_INLINE cl_list_item_t* CL_API
+cl_qlist_head(
+        IN      const cl_qlist_t* const p_list )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        return( cl_qlist_next( &p_list->end ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+ RETURN VALUES
+       Pointer to the list item at the head of the quick list.
+
+       Pointer to the list end if the list was empty.
+
+

NOTES

+
       cl_qlist_head does not remove the item from the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_tail, cl_qlist_next, cl_qlist_prev, cl_qlist_end,
+       cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_init

+ +

[top][index]

+

NAME

+
       cl_qlist_init
+
+

DESCRIPTION

+
       The cl_qlist_init function initializes a quick list.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qlist_init(
+        IN      cl_qlist_t* const       p_list )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+
+        p_list->state = CL_INITIALIZED;
+
+        /* Reset the quick list data structure. */
+        __cl_qlist_reset( p_list );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure to initialize.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       Allows calling quick list manipulation functions.
+
+

SEE ALSO

+
       Quick List, cl_qlist_insert_head, cl_qlist_insert_tail,
+       cl_qlist_remove_head, cl_qlist_remove_tail
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_insert_array_head

+ +

[top][index]

+

NAME

+
       cl_qlist_insert_array_head
+
+

DESCRIPTION

+
       The cl_qlist_insert_array_head function inserts an array of list items
+       at the head of a quick list.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qlist_insert_array_head(
+        IN      cl_qlist_t* const               p_list,
+        IN      cl_list_item_t* const   p_array,
+        IN      size_t                                  item_count,
+        IN      const size_t                    item_size );
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure into which to insert
+               the objects.
+
+       p_array
+               [in] Pointer to the first list item in an array of cl_list_item_t
+               structures.
+
+       item_count
+               [in] Number of cl_list_item_t structures in the array.
+
+       item_size
+               [in] Size of the items added to the list. This is the stride in the
+               array from one cl_list_item_t structure to the next.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Inserts all the list items in the array specified by the p_array parameter
+       to the head of the quick list specified by the p_list parameter,
+       preserving ordering of the list items.
+
+       The array pointer passed into the function points to the cl_list_item_t
+       in the first element of the caller's element array.  There is no
+       restriction on where the element is stored in the parent structure.
+
+

SEE ALSO

+
       Quick List, cl_qlist_insert_array_tail, cl_qlist_insert_head,
+       cl_qlist_insert_tail, cl_qlist_insert_list_head, cl_qlist_insert_list_tail,
+       cl_qlist_insert_prev, cl_qlist_insert_next, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_insert_array_tail

+ +

[top][index]

+

NAME

+
       cl_qlist_insert_array_tail
+
+

DESCRIPTION

+
       The cl_qlist_insert_array_tail function inserts an array of list items
+       at the tail of a quick list.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qlist_insert_array_tail(
+        IN      cl_qlist_t* const               p_list,
+        IN      cl_list_item_t* const   p_array,
+        IN      size_t                                  item_count,
+        IN      const size_t                    item_size);
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure into which to insert
+               the objects.
+
+       p_array
+               [in] Pointer to the first list item in an array of cl_list_item_t
+               structures.
+
+       item_count
+               [in] Number of cl_list_item_t structures in the array.
+
+       item_size
+               [in] Size of the items added to the list. This is the stride in the
+               array from one cl_list_item_t structure to the next.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Inserts all the list items in the array specified by the p_array parameter
+       to the tail of the quick list specified by the p_list parameter,
+       preserving ordering of the list items.
+
+       The array pointer passed into the function points to the cl_list_item_t
+       in the first element of the caller's element array.  There is no
+       restriction on where the element is stored in the parent structure.
+
+

SEE ALSO

+
       Quick List, cl_qlist_insert_array_head, cl_qlist_insert_head,
+       cl_qlist_insert_tail, cl_qlist_insert_list_head, cl_qlist_insert_list_tail,
+       cl_qlist_insert_prev, cl_qlist_insert_next, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_insert_head

+ +

[top][index]

+

NAME

+
       cl_qlist_insert_head
+
+

DESCRIPTION

+
       The cl_qlist_insert_head function inserts a list item at the
+       head of a quick list.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qlist_insert_head(
+        IN      cl_qlist_t* const               p_list,
+        IN      cl_list_item_t* const   p_list_item )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list_item );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        /*
+         * The list item must not already be part of the list.  Note that this
+         * assertion may fail if an uninitialized list item happens to have its
+         * list pointer equal to the specified list.  The chances of this
+         * happening are acceptable in light of the value of this check.
+         */
+        CL_ASSERT( p_list_item->p_list != p_list );
+
+#if defined( _DEBUG_ )
+        p_list_item->p_list = p_list;
+#endif
+
+        /* Insert before the head. */
+        __cl_primitive_insert( cl_qlist_head( p_list ), p_list_item );
+
+        p_list->count++;
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure into which to insert the object.
+
+       p_list_item
+               [in] Pointer to a cl_list_item_t structure to add.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       In debug builds, cl_qlist_insert_head asserts that the specified list item
+       is not already in the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_insert_tail, cl_qlist_insert_list_head,
+       cl_qlist_insert_list_tail, cl_qlist_insert_array_head,
+       cl_qlist_insert_array_tail, cl_qlist_insert_prev, cl_qlist_insert_next,
+       cl_qlist_remove_head, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_insert_list_head

+ +

[top][index]

+

NAME

+
       cl_qlist_insert_list_head
+
+

DESCRIPTION

+
       The cl_qlist_insert_list_head function merges two quick lists by
+       inserting one at the head of the other.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qlist_insert_list_head(
+        IN      cl_qlist_t* const       p_dest_list,
+        IN      cl_qlist_t* const       p_src_list );
+
+

PARAMETERS

+
       p_dest_list
+               [in] Pointer to destination quicklist object.
+
+       p_src_list
+               [in] Pointer to quicklist to add.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Inserts all list items in the source list to the head of the
+       destination list. The ordering of the list items is preserved.
+
+       The list pointed to by the p_src_list parameter is empty when
+       the call returns.
+
+

SEE ALSO

+
       Quick List, cl_qlist_insert_list_tail, cl_qlist_insert_head,
+       cl_qlist_insert_tail, cl_qlist_insert_array_head,
+       cl_qlist_insert_array_tail, cl_qlist_insert_prev, cl_qlist_insert_next,
+       cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_insert_list_tail

+ +

[top][index]

+

NAME

+
       cl_qlist_insert_list_tail
+
+

DESCRIPTION

+
       The cl_qlist_insert_list_tail function merges two quick lists by
+       inserting one at the tail of the other.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qlist_insert_list_tail(
+        IN      cl_qlist_t* const       p_dest_list,
+        IN      cl_qlist_t* const       p_src_list );
+
+

PARAMETERS

+
       p_dest_list
+               [in] Pointer to destination quicklist object
+
+       p_src_list
+               [in] Pointer to quicklist to add
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Inserts all list items in the source list to the tail of the
+       destination list. The ordering of the list items is preserved.
+
+       The list pointed to by the p_src_list parameter is empty when
+       the call returns.
+
+

SEE ALSO

+
       Quick List, cl_qlist_insert_list_head, cl_qlist_insert_head,
+       cl_qlist_insert_tail, cl_qlist_insert_array_head,
+       cl_qlist_insert_array_tail, cl_qlist_insert_prev, cl_qlist_insert_next,
+       cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_insert_next

+ +

[top][index]

+

NAME

+
       cl_qlist_insert_next
+
+

DESCRIPTION

+
       The cl_qlist_insert_next function inserts a list item after a specified
+       list item in a quick list.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qlist_insert_next(
+        IN      cl_qlist_t* const               p_list,
+        IN      cl_list_item_t* const   p_list_item,
+        IN      cl_list_item_t* const   p_new_item )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list_item );
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_new_item );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        /*
+         * The list item must not already be part of the list.  Note that this
+         * assertion may fail if an uninitialized list item happens to have its
+         * list pointer equal to the specified list.  The chances of this
+         * happening are acceptable in light of the value of this check.
+         */
+        CL_ASSERT( p_new_item->p_list != p_list );
+
+#if defined( _DEBUG_ )
+        p_new_item->p_list = p_list;
+#endif
+
+        __cl_primitive_insert( cl_qlist_next( p_list_item ), p_new_item );
+
+        p_list->count++;
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure into which to add the new item.
+
+       p_list_item
+               [in] Pointer to a cl_list_item_t structure.
+
+       p_new_item
+               [in] Pointer to a cl_list_item_t structure to add to the quick list.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Inserts the new list item after the list item specified by p_list_item.
+       The list item specified by p_list_item must be in the quick list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_insert_prev, cl_qlist_insert_head,
+       cl_qlist_insert_tail, cl_qlist_insert_list_head, cl_qlist_insert_list_tail,
+       cl_qlist_insert_array_head, cl_qlist_insert_array_tail, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_insert_prev

+ +

[top][index]

+

NAME

+
       cl_qlist_insert_prev
+
+

DESCRIPTION

+
       The cl_qlist_insert_prev function inserts a list item before a
+       specified list item in a quick list.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qlist_insert_prev(
+        IN      cl_qlist_t* const               p_list,
+        IN      cl_list_item_t* const   p_list_item,
+        IN      cl_list_item_t* const   p_new_item )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list_item );
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_new_item );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        /*
+         * The list item must not already be part of the list.  Note that this
+         * assertion may fail if an uninitialized list item happens to have its
+         * list pointer equal to the specified list.  The chances of this
+         * happening are acceptable in light of the value of this check.
+         */
+        CL_ASSERT( p_new_item->p_list != p_list );
+
+#if defined( _DEBUG_ )
+        p_new_item->p_list = p_list;
+#endif
+
+        __cl_primitive_insert( p_list_item, p_new_item );
+
+        p_list->count++;
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure into which to add the new item.
+
+       p_list_item
+               [in] Pointer to a cl_list_item_t structure.
+
+       p_new_item
+               [in] Pointer to a cl_list_item_t structure to add to the quick list.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Inserts the new list item before the list item specified by p_list_item.
+
+

SEE ALSO

+
       Quick List, cl_qlist_insert_next, cl_qlist_insert_head,
+       cl_qlist_insert_tail, cl_qlist_insert_list_head, cl_qlist_insert_list_tail,
+       cl_qlist_insert_array_head, cl_qlist_insert_array_tail, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_insert_tail

+ +

[top][index]

+

NAME

+
       cl_qlist_insert_tail
+
+

DESCRIPTION

+
       The cl_qlist_insert_tail function inserts a list item at the tail
+       of a quick list.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qlist_insert_tail(
+        IN      cl_qlist_t* const               p_list,
+        IN      cl_list_item_t* const   p_list_item )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list_item );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        /*
+         * The list item must not already be part of the list.  Note that this
+         * assertion may fail if an uninitialized list item happens to have its
+         * list pointer equal to the specified list.  The chances of this
+         * happening are acceptable in light of the value of this check.
+         */
+        CL_ASSERT( p_list_item->p_list != p_list );
+
+#if defined( _DEBUG_ )
+        p_list_item->p_list = p_list;
+#endif
+
+        /*
+         * Put the new element in front of the end which is the same
+         * as being at the tail
+         */
+        __cl_primitive_insert( &p_list->end, p_list_item );
+
+        p_list->count++;
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure into which to insert the object.
+
+       p_list_item
+               [in] Pointer to cl_list_item_t structure to add.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       In debug builds, cl_qlist_insert_tail asserts that the specified list item
+       is not already in the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_insert_head, cl_qlist_insert_list_head,
+       cl_qlist_insert_list_tail, cl_qlist_insert_array_head,
+       cl_qlist_insert_array_tail, cl_qlist_insert_prev, cl_qlist_insert_next,
+       cl_qlist_remove_tail, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_move_items

+ +

[top][index]

+

NAME

+
       cl_qlist_move_items
+
+

DESCRIPTION

+
       The cl_qlist_move_items function moves list items from one list to
+       another based on the return value of a user supplied function.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qlist_move_items(
+        IN      cl_qlist_t* const       p_src_list,
+        IN      cl_qlist_t* const       p_dest_list,
+        IN      cl_pfn_qlist_find_t     pfn_func,
+        IN      const void* const       context );
+
+

PARAMETERS

+
       p_src_list
+               [in] Pointer to a cl_qlist_t structure from which
+               list items are removed.
+
+       p_dest_list
+               [in] Pointer to a cl_qlist_t structure to which the source
+               list items are added.
+
+       pfn_func
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_qlist_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       If the function specified by the pfn_func parameter returns CL_SUCCESS,
+       the related list item is removed from p_src_list and inserted at the tail
+       of the p_dest_list.
+
+       The cl_qlist_move_items function continues iterating through p_src_list
+       from the last item moved, allowing multiple items to be located and moved
+       in a single list iteration.
+
+       The function specified by pfn_func must not perform any list operations,
+       as these would corrupt the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_find_from_head, cl_qlist_find_from_tail,
+       cl_qlist_apply_func, cl_pfn_qlist_find_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_next

+ +

[top][index]

+

NAME

+
       cl_qlist_next
+
+

DESCRIPTION

+
       The cl_qlist_next function returns a pointer to the list item following
+       a given list item in a quick list.
+
+

SYNOPSIS

+
CL_INLINE cl_list_item_t* CL_API
+cl_qlist_next(
+        IN      const cl_list_item_t* const     p_list_item )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list_item );
+
+        /* Return the next item. */
+        return( p_list_item->p_next );
+}
+
+

PARAMETERS

+
       p_list_item
+               [in] Pointer to the cl_list_item_t whose successor to return.
+
+ Returns:
+       Pointer to the list item following the list item specified by
+       the p_list_item parameter in the quick list.
+
+       Pointer to the list end if p_list_item was at the tail of the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_head, cl_qlist_tail, cl_qlist_prev, cl_qlist_end,
+       cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_obj

+ +

[top][index]

+

NAME

+
       cl_qlist_obj
+
+

DESCRIPTION

+
       The cl_qlist_set_obj function returns the object stored in a list object.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_qlist_obj(
+        IN      const cl_list_obj_t* const      p_list_obj )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list_obj );
+
+        return( (void*)p_list_obj->p_object );
+}
+
+

PARAMETERS

+
       p_list_obj
+               [in] Pointer to a cl_list_obj_t structure.
+
+

RETURN VALUE

+
       Returns the value of the object pointer stored in the list object.
+
+

SEE ALSO

+
       Quick List, cl_qlist_set_obj
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_prev

+ +

[top][index]

+

NAME

+
       cl_qlist_prev
+
+

DESCRIPTION

+
       The cl_qlist_prev function returns a poirter to the list item preceding
+       a given list item in a quick list.
+
+

SYNOPSIS

+
CL_INLINE cl_list_item_t* CL_API
+cl_qlist_prev(
+        IN      const cl_list_item_t* const     p_list_item )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list_item );
+
+        /* Return the previous item. */
+        return( p_list_item->p_prev );
+}
+
+

PARAMETERS

+
       p_list_item
+               [in] Pointer to the cl_list_item_t whose predecessor to return.
+
+ Returns:
+       Pointer to the list item preceding the list item specified by
+       the p_list_item parameter in the quick list.
+
+       Pointer to the list end if p_list_item was at the tail of the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_head, cl_qlist_tail, cl_qlist_next, cl_qlist_end,
+       cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_remove_all

+ +

[top][index]

+

NAME

+
       cl_qlist_remove_all
+
+

DESCRIPTION

+
       The cl_qlist_remove_all function removes all items from a quick list.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qlist_remove_all(
+        IN      cl_qlist_t* const       p_list )
+{
+#if defined( _DEBUG_ )
+        cl_list_item_t  *p_list_item;
+
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+        p_list_item = cl_qlist_head( p_list );
+        while( p_list_item != cl_qlist_end( p_list ) )
+        {
+                p_list_item = cl_qlist_next( p_list_item );
+                cl_qlist_prev( p_list_item )->p_list = NULL;
+        }
+#endif
+
+        __cl_qlist_reset( p_list );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Quick List, cl_qlist_remove_head, cl_qlist_remove_tail,
+       cl_qlist_remove_item, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_remove_head

+ +

[top][index]

+

NAME

+
       cl_qlist_remove_head
+
+

DESCRIPTION

+
       The cl_qlist_remove_head function removes and returns the list item
+       at the head of a quick list.
+
+

SYNOPSIS

+
CL_INLINE cl_list_item_t* CL_API
+cl_qlist_remove_head(
+        IN      cl_qlist_t* const       p_list )
+{
+        cl_list_item_t  *p_item;
+
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        p_item = cl_qlist_head( p_list );
+        /* CL_ASSERT that the list item is part of the list. */
+        CL_ASSERT( p_item->p_list == p_list );
+
+        if( p_item == cl_qlist_end( p_list ) )
+                return( p_item );
+
+#if defined( _DEBUG_ )
+        /* Clear the item's link to the list. */
+        p_item->p_list = NULL;
+#endif
+
+        __cl_primitive_remove( p_item );
+
+        p_list->count--;
+
+        return( p_item );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+ RETURN VALUES
+       Returns a pointer to the list item formerly at the head of the quick list.
+
+       Pointer to the list end if the list was empty.
+
+

SEE ALSO

+
       Quick List, cl_qlist_remove_tail, cl_qlist_remove_all, cl_qlist_remove_item,
+       cl_qlist_end, cl_qlist_head, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_remove_item

+ +

[top][index]

+

NAME

+
       cl_qlist_remove_item
+
+

DESCRIPTION

+
       The cl_qlist_remove_item function removes a specific list item from a quick list.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qlist_remove_item(
+        IN      cl_qlist_t* const               p_list,
+        IN      cl_list_item_t* const   p_list_item )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list_item  );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+        /* CL_ASSERT that the list item is part of the list. */
+        CL_ASSERT( p_list_item->p_list == p_list );
+
+        if( p_list_item == cl_qlist_end( p_list ) )
+                return;
+
+#if defined( _DEBUG_ )
+        /* Clear the item's link to the list. */
+        p_list_item->p_list = NULL;
+#endif
+
+        __cl_primitive_remove( p_list_item );
+
+        p_list->count--;
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure from which to remove the item.
+
+       p_list_item
+               [in] Pointer to a cl_list_item_t structure to remove.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Removes the list item pointed to by the p_list_item parameter from
+       its list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_remove_head, cl_qlist_remove_tail, cl_qlist_remove_all,
+       cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_remove_tail

+ +

[top][index]

+

NAME

+
       cl_qlist_remove_tail
+
+

DESCRIPTION

+
       The cl_qlist_remove_tail function removes and returns the list item
+       at the tail of a quick list.
+
+

SYNOPSIS

+
CL_INLINE cl_list_item_t* CL_API
+cl_qlist_remove_tail(
+        IN      cl_qlist_t* const       p_list )
+{
+        cl_list_item_t  *p_item;
+
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        p_item = cl_qlist_tail( p_list );
+        /* CL_ASSERT that the list item is part of the list. */
+        CL_ASSERT( p_item->p_list == p_list );
+
+        if( p_item == cl_qlist_end( p_list ) )
+                return( p_item );
+
+#if defined( _DEBUG_ )
+        /* Clear the item's link to the list. */
+        p_item->p_list = NULL;
+#endif
+
+        __cl_primitive_remove( p_item );
+
+        p_list->count--;
+
+        return( p_item );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+ RETURN VALUES
+       Returns a pointer to the list item formerly at the tail of the quick list.
+
+       Pointer to the list end if the list was empty.
+
+

SEE ALSO

+
       Quick List, cl_qlist_remove_head, cl_qlist_remove_all, cl_qlist_remove_item,
+       cl_qlist_end, cl_qlist_tail, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_set_obj

+ +

[top][index]

+

NAME

+
       cl_qlist_set_obj
+
+

DESCRIPTION

+
       The cl_qlist_set_obj function sets the object stored in a list object.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qlist_set_obj(
+        IN      cl_list_obj_t* const    p_list_obj,
+        IN      const void* const               p_object )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list_obj );
+        p_list_obj->p_object = p_object;
+}
+
+

PARAMETERS

+
       p_list_obj
+               [in] Pointer to a cl_list_obj_t structure.
+
+       p_object
+               [in] User defined context.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Quick List, cl_qlist_obj
+
+
+
+ +

[Structures] +Component Library: Quick List/cl_qlist_t

+ +

[top][index]

+

NAME

+
       cl_qlist_t
+
+

DESCRIPTION

+
       Quick list structure.
+
+       The cl_qlist_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_qlist
+{
+        cl_list_item_t  end;
+        size_t                  count;
+        cl_state_t              state;
+
+} cl_qlist_t;
+
+

FIELDS

+
       end
+               List item used to mark the end of the list.
+
+       count
+               Number of items in the list.
+
+       state
+               State of the quick list.
+
+

SEE ALSO

+
       Quick List
+
+
+
+ +

[Functions] +Component Library: Quick List/cl_qlist_tail

+ +

[top][index]

+

NAME

+
       cl_qlist_tail
+
+

DESCRIPTION

+
       The cl_qlist_tail function returns the list item at
+       the tail of a quick list.
+
+

SYNOPSIS

+
CL_INLINE cl_list_item_t* CL_API
+cl_qlist_tail(
+        IN      const cl_qlist_t* const p_list )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_list );
+        /* CL_ASSERT that the list was initialized. */
+        CL_ASSERT( p_list->state == CL_INITIALIZED );
+
+        return( cl_qlist_prev( &p_list->end ) );
+}
+
+

PARAMETERS

+
       p_list
+               [in] Pointer to a cl_qlist_t structure.
+
+ RETURN VALUES
+       Pointer to the list item at the tail of the quick list.
+
+       Pointer to the list end if the list was empty.
+
+

NOTES

+
       cl_qlist_tail does not remove the item from the list.
+
+

SEE ALSO

+
       Quick List, cl_qlist_head, cl_qlist_next, cl_qlist_prev, cl_qlist_end,
+       cl_list_item_t
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_qlockpool_h.html b/branches/Ndi/docs/complib/cl_qlockpool_h.html new file mode 100644 index 00000000..f06edb61 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_qlockpool_h.html @@ -0,0 +1,340 @@ + + + + +./inc_docs/complib/cl_qlockpool_h.html + + + + +Generated from ./inc/complib/cl_qlockpool.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Quick Locking Pool

+ +

[top][parent][index]

+

NAME

+
       Quick Locking Pool
+
+

DESCRIPTION

+
       The Quick Locking Pool represents a thread-safe quick pool.
+
+       This object should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SEE ALSO

+
       Structures:
+               cl_qlock_pool_t
+
+       Initialization:
+               cl_qlock_pool_construct, cl_qlock_pool_init, cl_qlock_pool_destroy
+
+       Manipulation
+               cl_qlock_pool_get, cl_qlock_pool_put
+
+
+
+ +

[Functions] +Component Library: Quick Locking Pool/cl_qlock_pool_construct

+ +

[top][index]

+

NAME

+
       cl_qlock_pool_construct
+
+

DESCRIPTION

+
       This function constructs a Quick Locking Pool.
+
+

SYNOPSIS

+
static inline void
+cl_qlock_pool_construct(
+        IN cl_qlock_pool_t* const p_pool )
+{
+        cl_qpool_construct( &p_pool->pool );
+        cl_spinlock_construct( &p_pool->lock );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a Quick Locking Pool to construct.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_qlock_pool_init, cl_qlock_pool_destroy
+
+       Calling cl_qlock_pool_construct is a prerequisite to calling any other
+       method except cl_qlock_pool_init.
+
+

SEE ALSO

+
       Quick Locking Pool, cl_qlock_pool_init, cl_qlock_pool_destroy
+
+
+
+ +

[Functions] +Component Library: Quick Locking Pool/cl_qlock_pool_destroy

+ +

[top][index]

+

NAME

+
       cl_qlock_pool_destroy
+
+

DESCRIPTION

+
       The cl_qlock_pool_destroy function destroys a node, releasing
+       all resources.
+
+

SYNOPSIS

+
static inline void
+cl_qlock_pool_destroy(
+        IN cl_qlock_pool_t* const p_pool )
+{
+        /*
+                If the pool has already been put into use, grab the lock
+                to sync with other threads before we blow everything away.
+        */
+        if( cl_is_qpool_inited( &p_pool->pool ) )
+        {
+                cl_spinlock_acquire( &p_pool->lock );
+                cl_qpool_destroy( &p_pool->pool );
+                cl_spinlock_release( &p_pool->lock );
+        }
+        else
+                cl_qpool_destroy( &p_pool->pool );
+
+        cl_spinlock_destroy( &p_pool->lock );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a Quick Locking Pool to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Performs any necessary cleanup of the specified Quick Locking Pool.
+       Further operations should not be attempted on the destroyed object.
+       This function should only be called after a call to
+       cl_qlock_pool_construct or cl_qlock_pool_init.
+
+

SEE ALSO

+
       Quick Locking Pool, cl_qlock_pool_construct, cl_qlock_pool_init
+
+
+
+ +

[Functions] +Component Library: Quick Locking Pool/cl_qlock_pool_get

+ +

[top][index]

+

NAME

+
       cl_qlock_pool_get
+
+

DESCRIPTION

+
       Gets an object wrapper and wire MAD from the pool.
+
+

SYNOPSIS

+
static inline cl_pool_item_t*
+cl_qlock_pool_get(
+        IN cl_qlock_pool_t* const p_pool )
+{
+        cl_pool_item_t* p_item;
+        cl_spinlock_acquire( &p_pool->lock );
+        p_item = cl_qpool_get( &p_pool->pool );
+        cl_spinlock_release( &p_pool->lock );
+        return( p_item );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to an cl_qlock_pool_t object.
+
+ RETURN VALUES
+       Returns a pointer to a cl_pool_item_t contained in the user object.
+
+

NOTES

+
       The object must eventually be returned to the pool with a call to
+       cl_qlock_pool_put.
+
+       The cl_qlock_pool_construct or cl_qlock_pool_init must be called before
+       using this function.
+
+

SEE ALSO

+
       Quick Locking Pool, cl_qlock_pool_put
+
+
+
+ +

[Functions] +Component Library: Quick Locking Pool/cl_qlock_pool_init

+ +

[top][index]

+

NAME

+
       cl_qlock_pool_init
+
+

DESCRIPTION

+
       The cl_qlock_pool_init function initializes a Quick Locking Pool for use.
+
+

SYNOPSIS

+
static inline cl_status_t
+cl_qlock_pool_init(
+        IN cl_qlock_pool_t*                     const p_pool,
+        IN      const size_t                    min_size,
+        IN      const size_t                    max_size,
+        IN      const size_t                    grow_size,
+        IN      const size_t                    object_size,
+        IN      cl_pfn_qpool_init_t             pfn_initializer OPTIONAL,
+        IN      cl_pfn_qpool_dtor_t             pfn_destructor OPTIONAL,
+        IN      const void* const               context )
+{
+        cl_status_t status;
+
+        cl_qlock_pool_construct( p_pool );
+
+        status = cl_spinlock_init( &p_pool->lock );
+        if( status )
+                return( status );
+
+        status = cl_qpool_init( &p_pool->pool, min_size, max_size, grow_size,
+                        object_size, pfn_initializer, pfn_destructor, context );
+
+        return( status );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to an cl_qlock_pool_t object to initialize.
+
+       min_size
+               [in] Minimum number of objects that the pool should support. All
+               necessary allocations to allow storing the minimum number of items
+               are performed at initialization time, and all necessary callbacks
+               successfully invoked.
+
+       max_size
+               [in] Maximum number of objects to which the pool is allowed to grow.
+               A value of zero specifies no maximum.
+
+       grow_size
+               [in] Number of objects to allocate when incrementally growing the pool.
+               A value of zero disables automatic growth.
+
+       object_size
+               [in] Size, in bytes, of each object.
+
+       pfn_initializer
+               [in] Initialization callback to invoke for every new object when
+               growing the pool. This parameter is optional and may be NULL. If NULL,
+               the pool assumes the cl_pool_item_t structure describing objects is
+               located at the head of each object. See the cl_pfn_qpool_init_t
+               function type declaration for details about the callback function.
+
+       pfn_destructor
+               [in] Destructor callback to invoke for every object before memory for
+               that object is freed. This parameter is optional and may be NULL.
+               See the cl_pfn_qpool_dtor_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+ RETURN VALUES
+       CL_SUCCESS if the quick pool was initialized successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the
+       quick pool.
+
+       CL_INVALID_SETTING if a the maximum size is non-zero and less than the
+       minimum size.
+
+       Other cl_status_t value returned by optional initialization callback function
+       specified by the pfn_initializer parameter.
+
+

NOTES

+
       Allows calling other Quick Locking Pool methods.
+
+

SEE ALSO

+
       Quick Locking Pool, cl_qlock_pool_construct, cl_qlock_pool_destroy
+
+
+
+ +

[Functions] +Component Library: Quick Locking Pool/cl_qlock_pool_put

+ +

[top][index]

+

NAME

+
       cl_qlock_pool_put
+
+

DESCRIPTION

+
       Returns an object to the pool.
+
+

SYNOPSIS

+
static inline void
+cl_qlock_pool_put(
+        IN cl_qlock_pool_t* const p_pool,
+        IN cl_pool_item_t* const p_item )
+{
+        cl_spinlock_acquire( &p_pool->lock );
+        cl_qpool_put( &p_pool->pool, p_item );
+        cl_spinlock_release( &p_pool->lock );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to an cl_qlock_pool_t object.
+
+       p_item
+               [in] Pointer to the cl_pool_item_t in an object that was previously
+               retrieved from the pool.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       The cl_qlock_pool_construct or cl_qlock_pool_init must be called before
+       using this function.
+
+

SEE ALSO

+
       Quick Locking Pool, cl_qlock_pool_get
+
+
+
+ +

[Structures] +Component Library: Quick Locking Pool/cl_qlock_pool_t

+ +

[top][index]

+

NAME

+
       cl_qlock_pool_t
+
+

DESCRIPTION

+
       Quick Locking Pool structure.
+
+       This object should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_qlock_pool
+{
+        cl_spinlock_t                           lock;
+        cl_qpool_t                                      pool;
+
+} cl_qlock_pool_t;
+
+

FIELDS

+
       lock
+               Spinlock guarding the pool.
+
+       pool
+               quick_pool of user objects.
+
+

SEE ALSO

+
       Quick Locking Pool
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_qmap_h.html b/branches/Ndi/docs/complib/cl_qmap_h.html new file mode 100644 index 00000000..b80ebc54 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_qmap_h.html @@ -0,0 +1,998 @@ + + + + +./inc_docs/complib/cl_qmap_h.html + + + + +Generated from ./inc/complib/cl_qmap.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Quick Map

+ +

[top][parent][index]

+

NAME

+
       Quick Map
+
+

DESCRIPTION

+
       Quick map implements a binary tree that stores user provided cl_map_item_t
+       structures.  Each item stored in a quick map has a unique 64-bit key
+       (duplicates are not allowed).  Quick map provides the ability to
+       efficiently search for an item given a key.
+
+       Quick map does not allocate any memory, and can therefore not fail
+       any operations due to insufficient memory.  Quick map can thus be useful
+       in minimizing the error paths in code.
+
+       Quick map is not thread safe, and users must provide serialization when
+       adding and removing items from the map.
+
+       The quick map functions operate on a cl_qmap_t structure which should be
+       treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_qmap_t, cl_map_item_t, cl_map_obj_t
+
+       Callbacks:
+               cl_pfn_qmap_apply_t
+
+       Item Manipulation:
+               cl_qmap_set_obj, cl_qmap_obj, cl_qmap_key
+
+       Initialization:
+               cl_qmap_init
+
+       Iteration:
+               cl_qmap_end, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_prev
+
+       Manipulation:
+               cl_qmap_insert, cl_qmap_get, cl_qmap_remove_item, cl_qmap_remove,
+               cl_qmap_remove_all, cl_qmap_merge, cl_qmap_delta
+
+       Search:
+               cl_qmap_apply_func
+
+       Attributes:
+               cl_qmap_count, cl_is_qmap_empty,
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_is_qmap_empty

+ +

[top][index]

+

NAME

+
       cl_is_qmap_empty
+
+

DESCRIPTION

+
       The cl_is_qmap_empty function returns whether a quick map is empty.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_qmap_empty(
+        IN      const cl_qmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+
+        return( p_map->count == 0 );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure to test for emptiness.
+
+ RETURN VALUES
+       TRUE if the quick map is empty.
+
+       FALSE otherwise.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_count, cl_qmap_remove_all
+
+
+
+ +

[Structures] +Component Library: Quick Map/cl_map_item_t

+ +

[top][index]

+

NAME

+
       cl_map_item_t
+
+

DESCRIPTION

+
       The cl_map_item_t structure is used by maps to store objects.
+
+       The cl_map_item_t structure should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_map_item
+{
+        /* Must be first to allow casting. */
+        cl_pool_item_t                  pool_item;
+        struct _cl_map_item             *p_left;
+        struct _cl_map_item             *p_right;
+        struct _cl_map_item             *p_up;
+        cl_map_color_t                  color;
+        uint64_t                                key;
+#ifdef _DEBUG_
+        struct _cl_qmap                 *p_map;
+#endif
+
+} cl_map_item_t;
+
+

FIELDS

+
       pool_item
+               Used to store the item in a doubly linked list, allowing more
+               efficient map traversal.
+
+       p_left
+               Pointer to the map item that is a child to the left of the node.
+
+       p_right
+               Pointer to the map item that is a child to the right of the node.
+
+       p_up
+               Pointer to the map item that is the parent of the node.
+
+       p_nil
+               Pointer to the map's NIL item, used as a terminator for leaves.
+               The NIL sentinel is in the cl_qmap_t structure.
+
+       color
+               Indicates whether a node is red or black in the map.
+
+       key
+               Value that uniquely represents a node in a map.  This value is set by
+               calling cl_qmap_insert and can be retrieved by calling cl_qmap_key.
+
+

NOTES

+
       None of the fields of this structure should be manipulated by users, as
+       they are crititcal to the proper operation of the map in which they
+       are stored.
+
+       To allow storing items in either a quick list, a quick pool, or a quick
+       map, the map implementation guarantees that the map item can be safely
+       cast to a pool item used for storing an object in a quick pool, or cast to
+       a list item used for storing an object in a quick list.  This removes the
+       need to embed a map item, a list item, and a pool item in objects that need
+       to be stored in a quick list, a quick pool, and a quick map.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_insert, cl_qmap_key, cl_pool_item_t, cl_list_item_t
+
+
+
+ +

[Structures] +Component Library: Quick Map/cl_map_obj_t

+ +

[top][index]

+

NAME

+
       cl_map_obj_t
+
+

DESCRIPTION

+
       The cl_map_obj_t structure is used to store objects in maps.
+
+       The cl_map_obj_t structure should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_map_obj
+{
+        cl_map_item_t                   item;
+        const void                              *p_object;
+
+} cl_map_obj_t;
+
+

FIELDS

+
       item
+               Map item used by internally by the map to store an object.
+
+       p_object
+               User defined context. Users should not access this field directly.
+               Use cl_qmap_set_obj and cl_qmap_obj to set and retrieve the value
+               of this field.
+
+

NOTES

+
       None of the fields of this structure should be manipulated by users, as
+       they are crititcal to the proper operation of the map in which they
+       are stored.
+
+       Use cl_qmap_set_obj and cl_qmap_obj to set and retrieve the object
+       stored in a map item, respectively.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_set_obj, cl_qmap_obj, cl_map_item_t
+
+
+
+ +

[Definitions] +Component Library: Quick Map/cl_pfn_qmap_apply_t

+ +

[top][index]

+

NAME

+
       cl_pfn_qmap_apply_t
+
+

DESCRIPTION

+
       The cl_pfn_qmap_apply_t function type defines the prototype for functions
+       used to iterate items in a quick map.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_qmap_apply_t)(
+        IN      cl_map_item_t* const    p_map_item,
+        IN      void*                                   context );
+
+

PARAMETERS

+
       p_map_item
+               [in] Pointer to a cl_map_item_t structure.
+
+       context
+               [in] Value passed to the callback function.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_qmap_apply_func
+       function.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_apply_func
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_apply_func

+ +

[top][index]

+

NAME

+
       cl_qmap_apply_func
+
+

DESCRIPTION

+
       The cl_qmap_apply_func function executes a specified function
+       for every item stored in a quick map.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qmap_apply_func(
+        IN      const cl_qmap_t* const  p_map,
+        IN      cl_pfn_qmap_apply_t             pfn_func,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure.
+
+       pfn_func
+               [in] Function invoked for every item in the quick map.
+               See the cl_pfn_qmap_apply_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       The function provided must not perform any map operations, as these
+       would corrupt the quick map.
+
+

SEE ALSO

+
       Quick Map, cl_pfn_qmap_apply_t
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_count

+ +

[top][index]

+

NAME

+
       cl_qmap_count
+
+

DESCRIPTION

+
       The cl_qmap_count function returns the number of items stored
+       in a quick map.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_qmap_count(
+        IN      const cl_qmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        return( p_map->count );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure whose item count to return.
+
+

RETURN VALUE

+
       Returns the number of items stored in the map.
+
+

SEE ALSO

+
       Quick Map, cl_is_qmap_empty
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_delta

+ +

[top][index]

+

NAME

+
       cl_qmap_delta
+
+

DESCRIPTION

+
       The cl_qmap_delta function computes the differences between two maps.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qmap_delta(
+        IN OUT  cl_qmap_t* const        p_map1,
+        IN OUT  cl_qmap_t* const        p_map2,
+        OUT             cl_qmap_t* const        p_new,
+        OUT             cl_qmap_t* const        p_old );
+
+

PARAMETERS

+
       p_map1
+               [in/out] Pointer to the first of two cl_qmap_t structures whose
+               differences to compute.
+
+       p_map2
+               [in/out] Pointer to the second of two cl_qmap_t structures whose
+               differences to compute.
+
+       p_new
+               [out] Pointer to an empty cl_qmap_t structure that contains the items
+               unique to p_map2 upon return from the function.
+
+       p_old
+               [out] Pointer to an empty cl_qmap_t structure that contains the items
+               unique to p_map1 upon return from the function.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       Items are evaluated based on their keys.  Items that exist in both
+       p_map1 and p_map2 remain in their respective maps.  Items that
+       exist only p_map1 are moved to p_old.  Likewise, items that exist only
+       in p_map2 are moved to p_new.  This function can be usefull in evaluating
+       changes between two maps.
+
+       Both maps pointed to by p_new and p_old must be empty on input.  This
+       requirement removes the possibility of failures.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_merge
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_end

+ +

[top][index]

+

NAME

+
       cl_qmap_end
+
+

DESCRIPTION

+
       The cl_qmap_end function returns the end of a quick map.
+
+

SYNOPSIS

+
CL_INLINE const cl_map_item_t* const CL_API
+cl_qmap_end(
+        IN      const cl_qmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        /* Nil is the end of the map. */
+        return( &p_map->nil );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure whose end to return.
+
+

RETURN VALUE

+
       Pointer to the end of the map.
+
+

NOTES

+
       cl_qmap_end is useful for determining the validity of map items returned
+       by cl_qmap_head, cl_qmap_tail, cl_qmap_next, or cl_qmap_prev.  If the map
+       item pointer returned by any of these functions compares to the end, the
+       end of the map was encoutered.
+       When using cl_qmap_head or cl_qmap_tail, this condition indicates that
+       the map is empty.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_prev
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_get

+ +

[top][index]

+

NAME

+
       cl_qmap_get
+
+

DESCRIPTION

+
       The cl_qmap_get function returns the map item associated with a key.
+
+

SYNOPSIS

+
CL_EXPORT cl_map_item_t* CL_API
+cl_qmap_get(
+        IN      const cl_qmap_t* const  p_map,
+        IN      const uint64_t                  key );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure from which to retrieve the
+               item with the specified key.
+
+       key
+               [in] Key value used to search for the desired map item.
+
+ RETURN VALUES
+       Pointer to the map item with the desired key value.
+
+       Pointer to the map end if there was no item with the desired key value
+       stored in the quick map.
+
+

NOTES

+
       cl_qmap_get does not remove the item from the quick map.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_remove
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_head

+ +

[top][index]

+

NAME

+
       cl_qmap_head
+
+

DESCRIPTION

+
       The cl_qmap_head function returns the map item with the lowest key
+       value stored in a quick map.
+
+

SYNOPSIS

+
CL_INLINE cl_map_item_t* CL_API
+cl_qmap_head(
+        IN      const cl_qmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        return( (cl_map_item_t*)p_map->nil.pool_item.list_item.p_next );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure whose item with the lowest key
+               is returned.
+
+ RETURN VALUES
+       Pointer to the map item with the lowest key in the quick map.
+
+       Pointer to the map end if the quick map was empty.
+
+

NOTES

+
       cl_qmap_head does not remove the item from the map.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_tail, cl_qmap_next, cl_qmap_prev, cl_qmap_end,
+       cl_qmap_item_t
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_init

+ +

[top][index]

+

NAME

+
       cl_qmap_init
+
+

DESCRIPTION

+
       The cl_qmap_init function initialized a quick map for use.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qmap_init(
+        IN      cl_qmap_t* const        p_map );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure to initialize.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       Allows calling quick map manipulation functions.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_insert, cl_qmap_remove
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_insert

+ +

[top][index]

+

NAME

+
       cl_qmap_insert
+
+

DESCRIPTION

+
       The cl_qmap_insert function inserts a map item into a quick map.
+
+

SYNOPSIS

+
CL_EXPORT cl_map_item_t* CL_API
+cl_qmap_insert(
+        IN      cl_qmap_t* const                p_map,
+        IN      const uint64_t                  key,
+        IN      cl_map_item_t* const    p_item );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure into which to add the item.
+
+       key
+               [in] Value to assign to the item.
+
+       p_item
+               [in] Pointer to a cl_map_item_t stucture to insert into the quick map.
+
+

RETURN VALUE

+
       Pointer to the item in the map with the specified key.  If insertion
+       was successful, this is the pointer to the item.  If an item with the
+       specified key already exists in the map, the pointer to that item is
+       returned.
+
+

NOTES

+
       Insertion operations may cause the quick map to rebalance.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_remove, cl_map_item_t
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_key

+ +

[top][index]

+

NAME

+
       cl_qmap_key
+
+

DESCRIPTION

+
       The cl_qmap_key function retrieves the key value of a map item.
+
+

SYNOPSIS

+
CL_INLINE uint64_t CL_API
+cl_qmap_key(
+        IN      const cl_map_item_t* const      p_item )
+{
+        CL_ASSERT( p_item );
+        return( p_item->key );
+}
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item whose key value to return.
+
+

RETURN VALUE

+
       Returns the 64-bit key value for the specified map item.
+
+

NOTES

+
       The key value is set in a call to cl_qmap_insert.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_insert
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_merge

+ +

[top][index]

+

NAME

+
       cl_qmap_merge
+
+

DESCRIPTION

+
       The cl_qmap_merge function moves all items from one map to another,
+       excluding duplicates.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qmap_merge(
+        OUT             cl_qmap_t* const        p_dest_map,
+        IN OUT  cl_qmap_t* const        p_src_map );
+
+

PARAMETERS

+
       p_dest_map
+               [out] Pointer to a cl_qmap_t structure to which items should be added.
+
+       p_src_map
+               [in/out] Pointer to a cl_qmap_t structure whose items to add
+               to p_dest_map.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       Items are evaluated based on their keys only.
+
+       Upon return from cl_qmap_merge, the quick map referenced by p_src_map
+       contains all duplicate items.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_delta
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_next

+ +

[top][index]

+

NAME

+
       cl_qmap_next
+
+

DESCRIPTION

+
       The cl_qmap_next function returns the map item with the next higher
+       key value than a specified map item.
+
+

SYNOPSIS

+
CL_INLINE cl_map_item_t* CL_API
+cl_qmap_next(
+        IN      const cl_map_item_t* const      p_item )
+{
+        CL_ASSERT( p_item );
+        return( (cl_map_item_t*)p_item->pool_item.list_item.p_next );
+}
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item whose successor to return.
+
+ RETURN VALUES
+       Pointer to the map item with the next higher key value in a quick map.
+
+       Pointer to the map end if the specified item was the last item in
+       the quick map.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_head, cl_qmap_tail, cl_qmap_prev, cl_qmap_end,
+       cl_map_item_t
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_obj

+ +

[top][index]

+

NAME

+
       cl_qmap_obj
+
+

DESCRIPTION

+
       The cl_qmap_obj function returns the object stored in a map object.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_qmap_obj(
+        IN      const cl_map_obj_t* const       p_map_obj )
+{
+        CL_ASSERT( p_map_obj );
+        return( (void*)p_map_obj->p_object );
+}
+
+

PARAMETERS

+
       p_map_obj
+               [in] Pointer to a map object stucture whose object pointer to return.
+
+

RETURN VALUE

+
       Returns the value of the object pointer stored in the map object.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_set_obj
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_prev

+ +

[top][index]

+

NAME

+
       cl_qmap_prev
+
+

DESCRIPTION

+
       The cl_qmap_prev function returns the map item with the next lower
+       key value than a precified map item.
+
+

SYNOPSIS

+
CL_INLINE cl_map_item_t* CL_API
+cl_qmap_prev(
+        IN      const cl_map_item_t* const      p_item )
+{
+        CL_ASSERT( p_item );
+        return( (cl_map_item_t*)p_item->pool_item.list_item.p_prev );
+}
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item whose predecessor to return.
+
+ RETURN VALUES
+       Pointer to the map item with the next lower key value in a quick map.
+
+       Pointer to the map end if the specifid item was the first item in
+       the quick map.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_end,
+       cl_map_item_t
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_remove

+ +

[top][index]

+

NAME

+
       cl_qmap_remove
+
+

DESCRIPTION

+
       The cl_qmap_remove function removes the map item with the specified key
+       from a quick map.
+
+

SYNOPSIS

+
CL_EXPORT cl_map_item_t* CL_API
+cl_qmap_remove(
+        IN      cl_qmap_t* const        p_map,
+        IN      const uint64_t          key );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure from which to remove the item
+               with the specified key.
+
+       key
+               [in] Key value used to search for the map item to remove.
+
+ RETURN VALUES
+       Pointer to the removed map item if it was found.
+
+       Pointer to the map end if no item with the specified key exists in the
+       quick map.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_remove_item, cl_qmap_remove_all, cl_qmap_insert
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_remove_all

+ +

[top][index]

+

NAME

+
       cl_qmap_remove_all
+
+

DESCRIPTION

+
       The cl_qmap_remove_all function removes all items in a quick map,
+       leaving it empty.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qmap_remove_all(
+        IN      cl_qmap_t* const        p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+
+        p_map->root.p_left = &p_map->nil;
+        p_map->nil.pool_item.list_item.p_next = &p_map->nil.pool_item.list_item;
+        p_map->nil.pool_item.list_item.p_prev = &p_map->nil.pool_item.list_item;
+        p_map->count = 0;
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure to empty.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_remove, cl_qmap_remove_item
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_remove_item

+ +

[top][index]

+

NAME

+
       cl_qmap_remove_item
+
+

DESCRIPTION

+
       The cl_qmap_remove_item function removes the specified map item
+       from a quick map.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qmap_remove_item(
+        IN      cl_qmap_t* const                p_map,
+        IN      cl_map_item_t* const    p_item );
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item to remove from its quick map.
+
+ RETURN VALUES
+       This function does not return a value.
+
+       In a debug build, cl_qmap_remove_item asserts that the item being removed
+       is in the specified map.
+
+

NOTES

+
       Removes the map item pointed to by p_item from its quick map.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_remove, cl_qmap_remove_all, cl_qmap_insert
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_set_obj

+ +

[top][index]

+

NAME

+
       cl_qmap_set_obj
+
+

DESCRIPTION

+
       The cl_qmap_set_obj function sets the object stored in a map object.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qmap_set_obj(
+        IN      cl_map_obj_t* const     p_map_obj,
+        IN      const void* const       p_object )
+{
+        CL_ASSERT( p_map_obj );
+        p_map_obj->p_object = p_object;
+}
+
+

PARAMETERS

+
       p_map_obj
+               [in] Pointer to a map object stucture whose object pointer
+               is to be set.
+
+       p_object
+               [in] User defined context.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_obj
+
+
+
+ +

[Structures] +Component Library: Quick Map/cl_qmap_t

+ +

[top][index]

+

NAME

+
       cl_qmap_t
+
+

DESCRIPTION

+
       Quick map structure.
+
+       The cl_qmap_t structure should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_qmap
+{
+        cl_map_item_t   root;
+        cl_map_item_t   nil;
+        cl_state_t              state;
+        size_t                  count;
+
+} cl_qmap_t;
+
+

PARAMETERS

+
       root
+               Map item that serves as root of the map.  The root is set up to
+               always have itself as parent.  The left pointer is set to point to
+               the item at the root.
+
+       nil
+               Map item that serves as terminator for all leaves, as well as providing
+               the list item used as quick list for storing map items in a list for
+               faster traversal.
+
+       state
+               State of the map, used to verify that operations are permitted.
+
+       count
+               Number of items in the map.
+
+

SEE ALSO

+
       Quick Map
+
+
+
+ +

[Functions] +Component Library: Quick Map/cl_qmap_tail

+ +

[top][index]

+

NAME

+
       cl_qmap_tail
+
+

DESCRIPTION

+
       The cl_qmap_tail function returns the map item with the highest key
+       value stored in a quick map.
+
+

SYNOPSIS

+
CL_INLINE cl_map_item_t* CL_API
+cl_qmap_tail(
+        IN      const cl_qmap_t* const  p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        return( (cl_map_item_t*)p_map->nil.pool_item.list_item.p_prev );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_qmap_t structure whose item with the highest key
+               is returned.
+
+ RETURN VALUES
+       Pointer to the map item with the highest key in the quick map.
+
+       Pointer to the map end if the quick map was empty.
+
+

NOTES

+
       cl_qmap_end does not remove the item from the map.
+
+

SEE ALSO

+
       Quick Map, cl_qmap_head, cl_qmap_next, cl_qmap_prev, cl_qmap_end,
+       cl_qmap_item_t
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_qpool_h.html b/branches/Ndi/docs/complib/cl_qpool_h.html new file mode 100644 index 00000000..69575803 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_qpool_h.html @@ -0,0 +1,628 @@ + + + + +./inc_docs/complib/cl_qpool_h.html + + + + +Generated from ./inc/complib/cl_qpool.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Quick Pool

+ +

[top][parent][index]

+

NAME

+
       Quick Pool
+
+

DESCRIPTION

+
       The quick pool provides a self-contained and self-sustaining pool
+       of user defined objects.
+
+       To aid in object oriented design, the quick pool provides the user
+       the ability to specify callbacks that are invoked for each object for
+       construction, initialization, and destruction. Constructor and destructor
+       callback functions may not fail.
+
+       A quick pool does not return memory to the system as the user returns
+       objects to the pool. The only method of returning memory to the system is
+       to destroy the pool.
+
+       The quick pool operates on cl_pool_item_t structures that describe
+       objects. This can provides for more efficient memory use and operation.
+       If using a cl_pool_item_t is not desired, the Pool provides similar
+       functionality but operates on opaque objects.
+
+       The quick pool functions operates on a cl_qpool_t structure which should
+       be treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_qpool_t, cl_pool_item_t
+
+       Callbacks:
+               cl_pfn_qpool_init_t, cl_pfn_qpool_dtor_t
+
+       Initialization/Destruction:
+               cl_qpool_construct, cl_qpool_init, cl_qpool_destroy
+
+       Manipulation:
+               cl_qpool_get, cl_qpool_put, cl_qpool_put_list, cl_qpool_grow
+
+       Attributes:
+               cl_is_qpool_inited, cl_qpool_count
+
+
+
+ +

[Functions] +Component Library: Quick Pool/cl_is_qpool_inited

+ +

[top][index]

+

NAME

+
       cl_is_qpool_inited
+
+

DESCRIPTION

+
       The cl_is_qpool_inited function returns whether a quick pool was
+       successfully initialized.
+
+

SYNOPSIS

+
CL_INLINE uint32_t CL_API
+cl_is_qpool_inited(
+        IN      const cl_qpool_t* const p_pool )
+{
+        /* CL_ASSERT that a non-null pointer is provided. */
+        CL_ASSERT( p_pool );
+        return( cl_is_qcpool_inited( &p_pool->qcpool ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qpool_t structure whose initialization state
+               to check.
+
+ RETURN VALUES
+       TRUE if the quick pool was initialized successfully.
+
+       FALSE otherwise.
+
+

NOTES

+
       Allows checking the state of a quick pool to determine if
+       invoking member functions is appropriate.
+
+

SEE ALSO

+
       Quick Pool
+
+
+
+ +

[Definitions] +Component Library: Quick Pool/cl_pfn_qpool_dtor_t

+ +

[top][index]

+

NAME

+
       cl_pfn_qpool_dtor_t
+
+

DESCRIPTION

+
       The cl_pfn_qpool_dtor_t function type defines the prototype for
+       functions used as destructor for objects being deallocated by a
+       quick pool.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_qpool_dtor_t)(
+        IN      const cl_pool_item_t* const     p_pool_item,
+        IN      void*                                           context );
+
+

PARAMETERS

+
       p_pool_item
+               [in] Pointer to a cl_pool_item_t structure representing an object.
+
+       context
+               [in] Context provided in a call to cl_qpool_init.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by the user as an optional parameter to the
+       cl_qpool_init function.
+
+       The destructor is invoked once per allocated object, allowing the user
+       to perform any necessary cleanup. Users should not attempt to deallocate
+       the memory for the object, as the quick pool manages object
+       allocation and deallocation.
+
+

SEE ALSO

+
       Quick Pool, cl_qpool_init
+
+
+
+ +

[Definitions] +Component Library: Quick Pool/cl_pfn_qpool_init_t

+ +

[top][index]

+

NAME

+
       cl_pfn_qpool_init_t
+
+

DESCRIPTION

+
       The cl_pfn_qpool_init_t function type defines the prototype for
+       functions used as constructor for objects being allocated by a
+       quick pool.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_qpool_init_t)(
+        IN      void* const                             p_object,
+        IN      void*                                   context,
+        OUT     cl_pool_item_t** const  pp_pool_item );
+
+

PARAMETERS

+
       p_object
+               [in] Pointer to an object to initialize.
+
+       context
+               [in] Context provided in a call to cl_qpool_init.
+
+ RETURN VALUES
+       Return CL_SUCCESS to indicate that initialization of the object
+       was successful and that initialization of further objects may continue.
+
+       Other cl_status_t values will be returned by cl_qcpool_init
+       and cl_qcpool_grow.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by the user as an optional parameter to the
+       cl_qpool_init function.
+
+       The initializer is invoked once per allocated object, allowing the user
+       to perform any necessary initialization.  Returning a status other than
+       CL_SUCCESS aborts a grow operation, initiated either through cl_qcpool_init
+       or cl_qcpool_grow, causing the initiating function to fail.
+       Any non-CL_SUCCESS status will be returned by the function that initiated
+       the grow operation.
+
+       All memory for the object is pre-allocated.  Users should include space in
+       their objects for the cl_pool_item_t structure that will represent the
+       object to avoid having to allocate that structure in the initialization
+       callback.
+
+       When later performing a cl_qcpool_get call, the return value is a pointer
+       to the cl_pool_item_t returned by this function in the pp_pool_item
+       parameter.  Users must set pp_pool_item to a valid pointer to the
+       cl_pool_item_t representing the object if they return CL_SUCCESS.
+
+

SEE ALSO

+
       Quick Pool, cl_qpool_init
+
+
+
+ +

[Functions] +Component Library: Quick Pool/cl_qpool_construct

+ +

[top][index]

+

NAME

+
       cl_qpool_construct
+
+

DESCRIPTION

+
       The cl_qpool_construct function constructs a quick pool.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_qpool_construct(
+        IN      cl_qpool_t* const       p_pool );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qpool_t structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_qpool_init, cl_qpool_destroy, cl_is_qpool_inited.
+
+       Calling cl_qpool_construct is a prerequisite to calling any other
+       quick pool function except cl_pool_init.
+
+

SEE ALSO

+
       Quick Pool, cl_qpool_init, cl_qpool_destroy, cl_is_qpool_inited.
+
+
+
+ +

[Functions] +Component Library: Quick Pool/cl_qpool_count

+ +

[top][index]

+

NAME

+
       cl_qpool_count
+
+

DESCRIPTION

+
       The cl_qpool_count function returns the number of available objects
+       in a quick pool.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_qpool_count(
+        IN      cl_qpool_t* const       p_pool )
+{
+        CL_ASSERT( p_pool );
+        return( cl_qcpool_count( &p_pool->qcpool ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qpool_t structure for which the number of
+               available objects is requested.
+
+

RETURN VALUE

+
       Returns the number of objects available in the specified quick pool.
+
+

SEE ALSO

+
       Quick Pool
+
+
+
+ +

[Functions] +Component Library: Quick Pool/cl_qpool_destroy

+ +

[top][index]

+

NAME

+
       cl_qpool_destroy
+
+

DESCRIPTION

+
       The cl_qpool_destroy function destroys a quick pool.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qpool_destroy(
+        IN      cl_qpool_t* const       p_pool )
+{
+        CL_ASSERT( p_pool );
+        cl_qcpool_destroy( &p_pool->qcpool );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qpool_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       All memory allocated for objects is freed. The destructor callback,
+       if any, will be invoked for every allocated object. Further operations
+       on the pool should not be attempted after cl_qpool_destroy
+       is invoked.
+
+       This function should only be called after a call to
+       cl_qpool_construct or cl_qpool_init.
+
+       In a debug build, cl_qpool_destroy asserts that all objects are in
+       the pool.
+
+

SEE ALSO

+
       Quick Pool, cl_qpool_construct, cl_qpool_init
+
+
+
+ +

[Functions] +Component Library: Quick Pool/cl_qpool_get

+ +

[top][index]

+

NAME

+
       cl_qpool_get
+
+

DESCRIPTION

+
       The cl_qpool_get function retrieves an object from a
+       quick pool.
+
+

SYNOPSIS

+
CL_INLINE cl_pool_item_t* CL_API
+cl_qpool_get(
+        IN      cl_qpool_t* const       p_pool )
+{
+        CL_ASSERT( p_pool );
+        return( cl_qcpool_get( &p_pool->qcpool ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qpool_t structure from which to retrieve
+               an object.
+
+ RETURN VALUES
+       Returns a pointer to a cl_pool_item_t for an object.
+
+       Returns NULL if the pool is empty and can not be grown automatically.
+
+

NOTES

+
       cl_qpool_get returns the object at the head of the pool. If the pool is
+       empty, it is automatically grown to accommodate this request unless the
+       grow_size parameter passed to the cl_qpool_init function was zero.
+
+

SEE ALSO

+
       Quick Pool, cl_qpool_get_tail, cl_qpool_put, cl_qpool_grow, cl_qpool_count
+
+
+
+ +

[Functions] +Component Library: Quick Pool/cl_qpool_grow

+ +

[top][index]

+

NAME

+
       cl_qpool_grow
+
+

DESCRIPTION

+
       The cl_qpool_grow function grows a quick pool by
+       the specified number of objects.
+
+

SYNOPSIS

+
CL_INLINE cl_status_t CL_API
+cl_qpool_grow(
+        IN      cl_qpool_t* const       p_pool,
+        IN      const size_t            obj_count )
+{
+        CL_ASSERT( p_pool );
+        return( cl_qcpool_grow( &p_pool->qcpool, obj_count ) );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qpool_t structure whose capacity to grow.
+
+       obj_count
+               [in] Number of objects by which to grow the pool.
+
+ RETURN VALUES
+       CL_SUCCESS if the quick pool grew successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to grow the
+       quick pool.
+
+       cl_status_t value returned by optional initialization callback function
+       specified by the pfn_initializer parameter passed to the
+       cl_qpool_init function.
+
+

NOTES

+
       It is not necessary to call cl_qpool_grow if the pool is
+       configured to grow automatically.
+
+

SEE ALSO

+
       Quick Pool
+
+
+
+ +

[Functions] +Component Library: Quick Pool/cl_qpool_init

+ +

[top][index]

+

NAME

+
       cl_qpool_init
+
+

DESCRIPTION

+
       The cl_qpool_init function initializes a quick pool for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_qpool_init(
+        IN      cl_qpool_t* const               p_pool,
+        IN      const size_t                    min_size,
+        IN      const size_t                    max_size,
+        IN      const size_t                    grow_size,
+        IN      const size_t                    object_size,
+        IN      cl_pfn_qpool_init_t             pfn_initializer OPTIONAL,
+        IN      cl_pfn_qpool_dtor_t             pfn_destructor OPTIONAL,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qpool_t structure to initialize.
+
+       min_size
+               [in] Minimum number of objects that the pool should support. All
+               necessary allocations to allow storing the minimum number of items
+               are performed at initialization time, and all necessary callbacks
+               successfully invoked.
+
+       max_size
+               [in] Maximum number of objects to which the pool is allowed to grow.
+               A value of zero specifies no maximum.
+
+       grow_size
+               [in] Number of objects to allocate when incrementally growing the pool.
+               A value of zero disables automatic growth.
+
+       object_size
+               [in] Size, in bytes, of each object.
+
+       pfn_initializer
+               [in] Initialization callback to invoke for every new object when
+               growing the pool. This parameter is optional and may be NULL. If NULL,
+               the pool assumes the cl_pool_item_t structure describing objects is
+               located at the head of each object. See the cl_pfn_qpool_init_t
+               function type declaration for details about the callback function.
+
+       pfn_destructor
+               [in] Destructor callback to invoke for every object before memory for
+               that object is freed. This parameter is optional and may be NULL.
+               See the cl_pfn_qpool_dtor_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+ RETURN VALUES
+       CL_SUCCESS if the quick pool was initialized successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the
+       quick pool.
+
+       CL_INVALID_SETTING if a the maximum size is non-zero and less than the
+       minimum size.
+
+       Other cl_status_t value returned by optional initialization callback function
+       specified by the pfn_initializer parameter.
+
+

NOTES

+
       cl_qpool_init initializes, and if necessary, grows the pool to
+       the capacity desired.
+
+

SEE ALSO

+
       Quick Pool, cl_qpool_construct, cl_qpool_destroy,
+       cl_qpool_get, cl_qpool_put, cl_qpool_grow,
+       cl_qpool_count, cl_pfn_qpool_init_t, cl_pfn_qpool_init_t,
+       cl_pfn_qpool_dtor_t
+
+
+
+ +

[Functions] +Component Library: Quick Pool/cl_qpool_put

+ +

[top][index]

+

NAME

+
       cl_qpool_put
+
+

DESCRIPTION

+
       The cl_qpool_put function returns an object to the head of a quick pool.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qpool_put(
+        IN      cl_qpool_t* const               p_pool,
+        IN      cl_pool_item_t* const   p_pool_item )
+{
+        CL_ASSERT( p_pool );
+        cl_qcpool_put( &p_pool->qcpool, p_pool_item );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qpool_t structure to which to return
+               an object.
+
+       p_pool_item
+               [in] Pointer to a cl_pool_item_t structure for the object
+               being returned.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_qpool_put places the returned object at the head of the pool.
+
+       The object specified by the p_pool_item parameter must have been
+       retrieved from the pool by a previous call to cl_qpool_get.
+
+

SEE ALSO

+
       Quick Pool, cl_qpool_put_tail, cl_qpool_get
+
+
+
+ +

[Functions] +Component Library: Quick Pool/cl_qpool_put_list

+ +

[top][index]

+

NAME

+
       cl_qpool_put_list
+
+

DESCRIPTION

+
       The cl_qpool_put_list function returns a list of objects to the head
+       of a quick pool.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_qpool_put_list(
+        IN      cl_qpool_t* const       p_pool,
+        IN      cl_qlist_t* const       p_list )
+{
+        CL_ASSERT( p_pool );
+        cl_qcpool_put_list( &p_pool->qcpool, p_list );
+}
+
+

PARAMETERS

+
       p_pool
+               [in] Pointer to a cl_qpool_t structure to which to return
+               a list of objects.
+
+       p_list
+               [in] Pointer to a cl_qlist_t structure for the list of objects
+               being returned.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_qpool_put_list places the returned objects at the head of the pool.
+
+       The objects in the list specified by the p_list parameter must have been
+       retrieved from the pool by a previous call to cl_qpool_get.
+
+

SEE ALSO

+
       Quick Pool, cl_qpool_put, cl_qpool_put_tail, cl_qpool_get
+
+
+
+ +

[Structures] +Component Library: Quick Pool/cl_qpool_t

+ +

[top][index]

+

NAME

+
       cl_qpool_t
+
+

DESCRIPTION

+
       Quick pool structure.
+
+       The cl_qpool_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_qpool
+{
+        cl_qcpool_t                             qcpool;
+        cl_pfn_qpool_init_t             pfn_init;
+        cl_pfn_qpool_dtor_t             pfn_dtor;
+        const void                              *context;
+
+} cl_qpool_t;
+
+

FIELDS

+
       qcpool
+               Quick composite pool that manages all objects.
+
+       pfn_init
+               Pointer to the user's initializer callback, used by the pool
+               to translate the quick composite pool's initializer callback to
+               a quick pool initializer callback.
+
+       pfn_dtor
+               Pointer to the user's destructor callback, used by the pool
+               to translate the quick composite pool's destructor callback to
+               a quick pool destructor callback.
+
+       context
+               User's provided context for callback functions, used by the pool
+               to when invoking callbacks.
+
+

SEE ALSO

+
       Quick Pool
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_rbmap_h.html b/branches/Ndi/docs/complib/cl_rbmap_h.html new file mode 100644 index 00000000..f2c76df2 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_rbmap_h.html @@ -0,0 +1,563 @@ + + + + +./inc_docs/complib/cl_rbmap_h.html + + + + +Generated from ./inc/complib/cl_rbmap.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/RB Map

+ +

[top][parent][index]

+

NAME

+
       RB Map
+
+

DESCRIPTION

+
       RB map implements a binary tree that stores user provided cl_rbmap_item_t
+       structures.  Each item stored in a RB map has a unique key
+       (duplicates are not allowed).  RB map provides the ability to
+       efficiently search for an item given a key.
+
+       RB map does not allocate any memory, and can therefore not fail
+       any operations due to insufficient memory.  RB map can thus be useful
+       in minimizing the error paths in code.
+
+       RB map is not thread safe, and users must provide serialization when
+       adding and removing items from the map.
+
+       The RB map functions operate on a cl_rbmap_t structure which should be
+       treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_rbmap_t, cl_rbmap_item_t
+
+       Initialization:
+               cl_rbmap_init
+
+       Iteration:
+               cl_rbmap_root, cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up
+
+       Manipulation:
+               cl_rbmap_insert, cl_rbmap_get, cl_rbmap_remove_item, cl_rbmap_remove,
+               cl_rbmap_reset, cl_rbmap_merge, cl_rbmap_delta
+
+       Search:
+               cl_rbmap_apply_func
+
+       Attributes:
+               cl_rbmap_count, cl_is_rbmap_empty,
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_is_rbmap_empty

+ +

[top][index]

+

NAME

+
       cl_is_rbmap_empty
+
+

DESCRIPTION

+
       The cl_is_rbmap_empty function returns whether a RB map is empty.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_rbmap_empty(
+        IN      const cl_rbmap_t* const p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+
+        return( p_map->count == 0 );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_rbmap_t structure to test for emptiness.
+
+ RETURN VALUES
+       TRUE if the RB map is empty.
+
+       FALSE otherwise.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_count, cl_rbmap_reset
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_rbmap_count

+ +

[top][index]

+

NAME

+
       cl_rbmap_count
+
+

DESCRIPTION

+
       The cl_rbmap_count function returns the number of items stored
+       in a RB map.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_rbmap_count(
+        IN      const cl_rbmap_t* const p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        return( p_map->count );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_rbmap_t structure whose item count to return.
+
+

RETURN VALUE

+
       Returns the number of items stored in the map.
+
+

SEE ALSO

+
       RB Map, cl_is_rbmap_empty
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_rbmap_end

+ +

[top][index]

+

NAME

+
       cl_rbmap_end
+
+

DESCRIPTION

+
       The cl_rbmap_end function returns the end of a RB map.
+
+

SYNOPSIS

+
CL_INLINE const cl_rbmap_item_t* const CL_API
+cl_rbmap_end(
+        IN      const cl_rbmap_t* const p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+        /* Nil is the end of the map. */
+        return( &p_map->nil );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_rbmap_t structure whose end to return.
+
+

RETURN VALUE

+
       Pointer to the end of the map.
+
+

NOTES

+
       cl_rbmap_end is useful for determining the validity of map items returned
+       by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev.  If the map
+       item pointer returned by any of these functions compares to the end, the
+       end of the map was encoutered.
+       When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that
+       the map is empty.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev
+       cl_rbmap_root, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_rbmap_init

+ +

[top][index]

+

NAME

+
       cl_rbmap_init
+
+

DESCRIPTION

+
       The cl_rbmap_init function initialized a RB map for use.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_rbmap_init(
+        IN      cl_rbmap_t* const       p_map )
+{
+        CL_ASSERT( p_map );
+
+        /* special setup for the root node */
+        p_map->root.p_left = &p_map->nil;
+        p_map->root.p_right = &p_map->nil;
+        p_map->root.p_up = &p_map->root;
+        p_map->root.color = CL_MAP_BLACK;
+
+        /* Setup the node used as terminator for all leaves. */
+        p_map->nil.p_left = &p_map->nil;
+        p_map->nil.p_right = &p_map->nil;
+        p_map->nil.p_up = &p_map->nil;
+        p_map->nil.color = CL_MAP_BLACK;
+
+#ifdef _DEBUG_
+        p_map->root.p_map = p_map;
+        p_map->nil.p_map = p_map;
+#endif
+
+        p_map->state = CL_INITIALIZED;
+
+        p_map->count = 0;
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_rbmap_t structure to initialize.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       Allows calling RB map manipulation functions.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_insert, cl_rbmap_remove
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_rbmap_insert

+ +

[top][index]

+

NAME

+
       cl_rbmap_insert
+
+

DESCRIPTION

+
       The cl_rbmap_insert function inserts a map item into a RB map.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_rbmap_insert(
+        IN      cl_rbmap_t* const               p_map,
+        IN      cl_rbmap_item_t* const  p_insert_at,
+        IN      cl_rbmap_item_t* const  p_item,
+        IN      boolean_t                               left );
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_rbmap_t structure into which to add the item.
+
+       p_insert_at
+               [in] Pointer to a cl_rbmap_item_t structure to serve as parent
+               to p_item.
+
+       p_item
+               [in] Pointer to a cl_rbmap_item_t stucture to insert into the RB map.
+
+       left
+               [in] Indicates that p_item should be inserted to the left of p_insert_at.
+
+

RETURN VALUE

+
       Pointer to the item in the map with the specified key.  If insertion
+       was successful, this is the pointer to the item.  If an item with the
+       specified key already exists in the map, the pointer to that item is
+       returned.
+
+

NOTES

+
       Insertion operations may cause the RB map to rebalance.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_remove, cl_rbmap_item_t
+
+
+
+ +

[Structures] +Component Library: RB Map/cl_rbmap_item_t

+ +

[top][index]

+

NAME

+
       cl_rbmap_item_t
+
+

DESCRIPTION

+
       The cl_rbmap_item_t structure is used by maps to store objects.
+
+       The cl_rbmap_item_t structure should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_rbmap_item
+{
+        struct _cl_rbmap_item           *p_left;
+        struct _cl_rbmap_item           *p_right;
+        struct _cl_rbmap_item           *p_up;
+        cl_map_color_t                          color;
+#ifdef _DEBUG_
+        struct _cl_rbmap                        *p_map;
+#endif
+
+} cl_rbmap_item_t;
+
+

FIELDS

+
       p_left
+               Pointer to the map item that is a child to the left of the node.
+
+       p_right
+               Pointer to the map item that is a child to the right of the node.
+
+       p_up
+               Pointer to the map item that is the parent of the node.
+
+       color
+               Indicates whether a node is red or black in the map.
+
+

NOTES

+
       None of the fields of this structure should be manipulated by users, as
+       they are crititcal to the proper operation of the map in which they
+       are stored.
+
+       To allow storing items in either a quick list, a quick pool, or a quick
+       map, the map implementation guarantees that the map item can be safely
+       cast to a pool item used for storing an object in a quick pool, or cast to
+       a list item used for storing an object in a quick list.  This removes the
+       need to embed a map item, a list item, and a pool item in objects that need
+       to be stored in a quick list, a quick pool, and a RB map.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_insert, cl_rbmap_key, cl_pool_item_t, cl_list_item_t
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_rbmap_left

+ +

[top][index]

+

NAME

+
       cl_rbmap_left
+
+

DESCRIPTION

+
       The cl_rbmap_left function returns the map item to the left
+       of the specified map item.
+
+

SYNOPSIS

+
CL_INLINE cl_rbmap_item_t* CL_API
+cl_rbmap_left(
+        IN      const cl_rbmap_item_t* const    p_item )
+{
+        CL_ASSERT( p_item );
+        return( (cl_rbmap_item_t*)p_item->p_left );
+}
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item whose predecessor to return.
+
+ RETURN VALUES
+       Pointer to the map item to the left in a RB map.
+
+       Pointer to the map end if no item is to the left.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end,
+       cl_rbmap_item_t
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_rbmap_remove_item

+ +

[top][index]

+

NAME

+
       cl_rbmap_remove_item
+
+

DESCRIPTION

+
       The cl_rbmap_remove_item function removes the specified map item
+       from a RB map.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_rbmap_remove_item(
+        IN      cl_rbmap_t* const               p_map,
+        IN      cl_rbmap_item_t* const  p_item );
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item to remove from its RB map.
+
+ RETURN VALUES
+       This function does not return a value.
+
+       In a debug build, cl_rbmap_remove_item asserts that the item being removed
+       is in the specified map.
+
+

NOTES

+
       Removes the map item pointed to by p_item from its RB map.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_remove, cl_rbmap_reset, cl_rbmap_insert
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_rbmap_reset

+ +

[top][index]

+

NAME

+
       cl_rbmap_reset
+
+

DESCRIPTION

+
       The cl_rbmap_reset function removes all items in a RB map,
+       leaving it empty.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_rbmap_reset(
+        IN      cl_rbmap_t* const       p_map )
+{
+        CL_ASSERT( p_map );
+        CL_ASSERT( p_map->state == CL_INITIALIZED );
+
+        p_map->root.p_left = &p_map->nil;
+        p_map->count = 0;
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_rbmap_t structure to empty.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_remove, cl_rbmap_remove_item
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_rbmap_right

+ +

[top][index]

+

NAME

+
       cl_rbmap_right
+
+

DESCRIPTION

+
       The cl_rbmap_right function returns the map item to the right
+       of the specified map item.
+
+

SYNOPSIS

+
CL_INLINE cl_rbmap_item_t* CL_API
+cl_rbmap_right(
+        IN      const cl_rbmap_item_t* const    p_item )
+{
+        CL_ASSERT( p_item );
+        return( (cl_rbmap_item_t*)p_item->p_right );
+}
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a map item whose predecessor to return.
+
+ RETURN VALUES
+       Pointer to the map item to the right in a RB map.
+
+       Pointer to the map end if no item is to the right.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end,
+       cl_rbmap_item_t
+
+
+
+ +

[Functions] +Component Library: RB Map/cl_rbmap_root

+ +

[top][index]

+

NAME

+
       cl_rbmap_root
+
+

DESCRIPTION

+
       The cl_rbmap_root function returns the root of a RB map.
+
+

SYNOPSIS

+
CL_INLINE cl_rbmap_item_t* const CL_API
+cl_rbmap_root(
+        IN      const cl_rbmap_t* const p_map )
+{
+        CL_ASSERT( p_map );
+        return( p_map->root.p_left );
+}
+
+

PARAMETERS

+
       p_map
+               [in] Pointer to a cl_rbmap_t structure whose root to return.
+
+

RETURN VALUE

+
       Pointer to the end of the map.
+
+

NOTES

+
       cl_rbmap_end is useful for determining the validity of map items returned
+       by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev.  If the map
+       item pointer returned by any of these functions compares to the end, the
+       end of the map was encoutered.
+       When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that
+       the map is empty.
+
+

SEE ALSO

+
       RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev,
+       cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up
+
+
+
+ +

[Structures] +Component Library: RB Map/cl_rbmap_t

+ +

[top][index]

+

NAME

+
       cl_rbmap_t
+
+

DESCRIPTION

+
       Quick map structure.
+
+       The cl_rbmap_t structure should be treated as opaque and should
+       be manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_rbmap
+{
+        cl_rbmap_item_t root;
+        cl_rbmap_item_t nil;
+        cl_state_t              state;
+        size_t                  count;
+
+} cl_rbmap_t;
+
+

PARAMETERS

+
       root
+               Map item that serves as root of the map.  The root is set up to
+               always have itself as parent.  The left pointer is set to point to
+               the item at the root.
+
+       nil
+               Map item that serves as terminator for all leaves, as well as providing
+               the list item used as quick list for storing map items in a list for
+               faster traversal.
+
+       state
+               State of the map, used to verify that operations are permitted.
+
+       count
+               Number of items in the map.
+
+

SEE ALSO

+
       RB Map
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_reqmgr_h.html b/branches/Ndi/docs/complib/cl_reqmgr_h.html new file mode 100644 index 00000000..50b7d70d --- /dev/null +++ b/branches/Ndi/docs/complib/cl_reqmgr_h.html @@ -0,0 +1,463 @@ + + + + +./inc_docs/complib/cl_reqmgr_h.html + + + + +Generated from ./inc/complib/cl_reqmgr.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Request Manager

+ +

[top][parent][index]

+

NAME

+
       Request Manager
+
+

DESCRIPTION

+
       The Request Manager manages synchronous as well as asynchronous
+       requests for objects.
+
+       Request manager does not supply the objects, but merely returns whether
+       objects are available to satisfy requests. This allows users to use
+       various sources for objects.
+
+       While the request manager manages synchronous and asynchronous requests
+       for objects, it does not itself operate asynchronously. Instead, the
+       cl_req_mgr_resume function returns information for resuming asynchronous
+       requests. If a call to cl_req_mgr_resume returns CL_SUCCESS, additional
+       requests may be able to resume. It is recommended that users flush
+       pending requests by calling cl_req_mgr_resume while CL_SUCCESS is returned.
+
+       The request manager functions operates on a cl_req_mgr_t structure which
+       should be treated as opaque and should be manipulated only through the
+       provided functions.
+
+

SEE ALSO

+
       Types:
+               cl_req_type_t
+
+       Structures:
+               cl_req_mgr_t
+
+       Callbacks:
+               cl_pfn_req_cb_t, cl_pfn_reqmgr_get_count_t
+
+       Initialization/Destruction:
+               cl_req_mgr_construct, cl_req_mgr_init, cl_req_mgr_destroy
+
+       Manipulation:
+               cl_req_mgr_get, cl_req_mgr_resume
+
+       Attributes:
+               cl_is_req_mgr_inited, cl_req_mgr_count
+
+
+
+ +

[Definitions] +Component Library: Request Manager/cl_pfn_req_cb_t

+ +

[top][index]

+

NAME

+
       cl_pfn_req_cb_t
+
+

DESCRIPTION

+
       The cl_pfn_req_cb_t function type defines the prototype for functions
+       used to store a function pointer to a user defined function.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_req_cb_t)( void );
+
+

PARAMETERS

+
       This function does not take parameters.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Function pointers specified by this parameter do not have to match the
+       defined syntax, as these callbacks are never invoked directly by the
+       request manager.  When specifying a function with a different prototype,
+       cast the function pointer to this type.
+
+

SEE ALSO

+
       Request Manager, cl_req_mgr_get, cl_req_mgr_resume
+
+
+
+ +

[Definitions] +Component Library: Request Manager/cl_pfn_reqmgr_get_count_t

+ +

[top][index]

+

NAME

+
       cl_pfn_reqmgr_get_count_t
+
+

DESCRIPTION

+
       The cl_pfn_reqmgr_get_count_t function type defines the prototype for
+       functions used to retrieve the number of available objects in a pool.
+
+

SYNOPSIS

+
typedef size_t
+(CL_API *cl_pfn_reqmgr_get_count_t)(
+        IN      void*   context );
+
+

PARAMETERS

+
       Context
+               [in] Context provided in a call to cl_req_mgr_init by
+               the get_context parameter.
+
+

RETURN VALUE

+
       Returns the number of objects available in an object pool for which
+       requests are managed by a request manager.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function passed into cl_req_mgr_init. This function is invoked by the
+       request manager when trying to fulfill requests for resources, either
+       through a call to cl_req_mgr_get or cl_req_mgr_resume.
+
+

SEE ALSO

+
       Request Manager, cl_req_mgr_init, cl_req_mgr_get, cl_req_mgr_resume
+
+
+
+ +

[Functions] +Component Library: Request Manager/cl_req_mgr_construct

+ +

[top][index]

+

NAME

+
       cl_req_mgr_construct
+
+

DESCRIPTION

+
       The cl_req_mgr_construct function constructs a request manager.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_req_mgr_construct(
+        IN      cl_req_mgr_t* const     p_req_mgr );
+
+

PARAMETERS

+
       p_req_mgr
+               [in] Pointer to a cl_req_mgr_t structure to construct.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_req_mgr_construct allows calling cl_req_mgr_destroy without first
+       calling cl_req_mgr_init.
+
+       Calling cl_req_mgr_construct is a prerequisite to calling any other
+       request manager function except cl_req_mgr_init.
+
+

SEE ALSO

+
       Request Manager, cl_req_mgr_init, cl_req_mgr_destroy
+
+
+
+ +

[Functions] +Component Library: Request Manager/cl_req_mgr_destroy

+ +

[top][index]

+

NAME

+
       cl_req_mgr_destroy
+
+

DESCRIPTION

+
       The cl_req_mgr_destroy function destroys a request manager.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_req_mgr_destroy(
+        IN      cl_req_mgr_t* const     p_req_mgr );
+
+

PARAMETERS

+
       p_req_mgr
+               [in] Pointer to a cl_req_mgr_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_req_mgr_destroy frees all memory allocated by the request manager.
+       Further operations on the request manager should not be attempted.
+
+       This function should only be called after a call to cl_req_mgr_construct
+       or cl_req_mgr_init.
+
+

SEE ALSO

+
       Request Manager, cl_req_mgr_construct, cl_req_mgr_init
+
+
+
+ +

[Functions] +Component Library: Request Manager/cl_req_mgr_get

+ +

[top][index]

+

NAME

+
       cl_req_mgr_get
+
+

DESCRIPTION

+
       The cl_req_mgr_get function handles synchronous and asynchronous
+       requests for objects.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_req_mgr_get(
+        IN              cl_req_mgr_t* const     p_req_mgr,
+        IN OUT  size_t* const           p_count,
+        IN              const cl_req_type_t     req_type,
+        IN              cl_pfn_req_cb_t         pfn_callback,
+        IN              const void* const       context1,
+        IN              const void* const       context2 );
+
+

PARAMETERS

+
       p_req_mgr
+               [in] Pointer to a cl_req_mgr_t structure from which to check
+               for resources.
+
+       p_count
+               [in/out] On input, contains the number of objects requested.
+               On output, contains the number of objects available.
+
+       req_type
+               [in] Enumerated type describing the type of request. Valid values are:
+                       ReqGetSync
+                               Synchronous request.
+                       ReqGetAsync
+                               Asynchronous requests for which all objects are required at
+                               once.
+                       ReqGetAsyncPartialOk
+                               Asynchronous requests that may be broken into multiple smaller
+                               requests.
+
+       pfn_callback
+               [in] Pointer to a callback function for use by the caller. This
+               callback function is never invoked by the request manager.
+
+       context1
+               [in] First of two contexts for a resource request.
+
+       context2
+               [in] Second of two contexts for a resource request.
+
+ RETURN VALUES
+       CL_SUCCESS if all objects requested are available.
+
+       CL_PENDING if the request could not be completed in its entirety.
+       The p_count parameter contains the number of objects immediately available.
+
+       CL_INSUFFICIENT_RESOURCES if the request could not be completed due to
+       insufficient objects being available.
+
+       CL_INSUFFICIENT_MEMORY if the request failed due to a lack of system memory.
+
+

NOTES

+
       Upon successful completion of this function, the p_count parameter contains
+       the number of objects available.
+
+       Synchronous requests fail if there are any asynchronous requests pending,
+       or if there are not enough resources to immediately satisfy the request in
+       its entirety .
+
+       Asynchronous requests fail if there is insufficient system memory to
+       queue them.
+
+       Once an asynchronous request is queued, use cl_req_mgr_resume to retrieve
+       information for resuming queued requests.
+
+

SEE ALSO

+
       Request Manager, cl_req_mgr_resume
+
+
+
+ +

[Functions] +Component Library: Request Manager/cl_req_mgr_init

+ +

[top][index]

+

NAME

+
       cl_req_mgr_init
+
+

DESCRIPTION

+
       The cl_req_mgr_init function initializes a request manager for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_req_mgr_init(
+        IN      cl_req_mgr_t* const                     p_req_mgr,
+        IN      cl_pfn_reqmgr_get_count_t       pfn_get_count,
+        IN      const void* const                       get_context );
+
+

PARAMETERS

+
       p_req_mgr
+               [in] Pointer to a cl_req_mgr_t structure to initialize.
+
+       pfn_get_count
+               [in] Callback function invoked by the request manager to get the
+               number of objects available in a pool of objects for which the
+               request manager is managing requests.
+               See the cl_pfn_req_mgr_get_count_t function type declaration for
+               details about the callback function.
+
+       get_context
+               [in] Context to pass into the function specified by the
+               pfn_get_count parameter.
+
+ RETURN VALUES
+       CL_SUCCESS if the request manager was successfully initialized.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize
+       the request manager.
+
+

SEE ALSO

+
       Request Manager, cl_req_mgr_construct, cl_req_mgr_destroy, cl_req_mgr_get,
+       cl_req_mgr_resume, cl_pfn_req_mgr_get_count_t
+
+
+
+ +

[Functions] +Component Library: Request Manager/cl_req_mgr_resume

+ +

[top][index]

+

NAME

+
       cl_req_mgr_resume
+
+

DESCRIPTION

+
       The cl_req_mgr_resume function attempts to resume queued requests.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_req_mgr_resume(
+        IN      cl_req_mgr_t* const             p_req_mgr,
+        OUT     size_t* const                   p_count,
+        OUT     cl_pfn_req_cb_t* const  ppfn_callback,
+        OUT     const void** const              p_context1,
+        OUT     const void** const              p_context2 );
+
+

PARAMETERS

+
       p_req_mgr
+               [in] Pointer to a cl_req_mgr_t structure from which to resume requests.
+
+       p_count
+               [out] Contains the number of objects available for a resuming request.
+
+       ppfn_callback
+               [out] Contains the pfn_callback value for the resuming request, as
+               provided to the call to the cl_req_mgr_get function.
+
+       p_context1
+               [out] Contains the context1 value for the resuming request, as provided
+               to the call to the cl_req_mgr_get function.
+
+       p_context2
+               [out] Contains the context2 value for the resuming request, as provided
+               to the call to the cl_req_mgr_get function.
+
+ RETURN VALUES
+       CL_SUCCESS if a request was completed.
+
+       CL_PENDING if a request was continued, but not completed.
+
+       CL_INSUFFICIENT_RESOURCES if a request could not be continued due to
+       a lack of resources.
+
+       CL_NOT_DONE if there were no pending requests.
+
+

NOTES

+
       cl_req_mgr_resume resumes at most one request. Further requests may be
+       able to be resumed if this call returns CL_SUCCESS.
+
+

SEE ALSO

+
       Request Manager, cl_req_mgr_get
+
+
+
+ +

[Structures] +Component Library: Request Manager/cl_req_mgr_t

+ +

[top][index]

+

NAME

+
       cl_req_mgr_t
+
+

DESCRIPTION

+
       Quick composite pool structure.
+
+       The cl_req_mgr_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_req_mgr
+{
+        cl_pfn_reqmgr_get_count_t       pfn_get_count;
+        const void                                      *get_context;
+        cl_qlist_t                                      request_queue;
+        cl_qpool_t                                      request_pool;
+
+} cl_req_mgr_t;
+
+

FIELDS

+
       pfn_get_count
+               Pointer to the count callback function.
+
+       get_context
+               Context to pass as single parameter to count callback.
+
+       request_queue
+               Pending requests for elements.
+
+       request_pool
+               Pool of request structures for storing requests in the request queue.
+
+

SEE ALSO

+
       Request Manager
+
+
+
+ +

[Definitions] +Component Library: Request Manager/cl_req_type_t

+ +

[top][index]

+

NAME

+
       cl_req_type_t
+
+

DESCRIPTION

+
       The cl_req_type_t enumerated type describes the type of request.
+
+

SYNOPSIS

+
typedef enum _cl_req_type
+{
+        REQ_GET_SYNC,
+        REQ_GET_ASYNC,
+        REQ_GET_PARTIAL_OK
+
+} cl_req_type_t;
+
+

VALUES

+
       REQ_GET_SYNC
+               Synchronous request.
+
+       REQ_GET_ASYNC
+               Asynchronous requests for which all objects are required at once.
+
+       REQ_GET_PARTIAL_OK
+               Asynchronous requests that may be broken into multiple smaller requests.
+
+

SEE ALSO

+
       Request Manager, cl_req_mgr_get
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_spinlock_h.html b/branches/Ndi/docs/complib/cl_spinlock_h.html new file mode 100644 index 00000000..960e2e2b --- /dev/null +++ b/branches/Ndi/docs/complib/cl_spinlock_h.html @@ -0,0 +1,210 @@ + + + + +./inc_docs/complib/cl_spinlock_h.html + + + + +Generated from ./inc/complib/cl_spinlock.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Spinlock

+ +

[top][parent][index]

+

NAME

+
       Spinlock
+
+

DESCRIPTION

+
       Spinlock provides synchronization between threads for exclusive access to
+       a resource.
+
+       The spinlock functions manipulate a cl_spinlock_t structure which should
+       be treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_spinlock_t
+
+       Initialization:
+               cl_spinlock_construct, cl_spinlock_init, cl_spinlock_destroy
+
+       Manipulation
+               cl_spinlock_acquire, cl_spinlock_release
+
+
+
+ +

[Functions] +Component Library: Spinlock/cl_spinlock_acquire

+ +

[top][index]

+

NAME

+
       cl_spinlock_acquire
+
+

DESCRIPTION

+
       The cl_spinlock_acquire function acquires a spin lock.
+       This version of lock does not prevent an interrupt from
+       occuring on the processor on which the code is being
+       executed.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_spinlock_acquire(
+        IN      cl_spinlock_t* const    p_spinlock );
+
+

PARAMETERS

+
       p_spinlock
+               [in] Pointer to a spin lock structure to acquire.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Spinlock, cl_spinlock_release
+
+
+
+ +

[Functions] +Component Library: Spinlock/cl_spinlock_construct

+ +

[top][index]

+

NAME

+
       cl_spinlock_construct
+
+

DESCRIPTION

+
       The cl_spinlock_construct function initializes the state of a
+       spin lock.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_spinlock_construct(
+        IN      cl_spinlock_t* const    p_spinlock );
+
+

PARAMETERS

+
       p_spinlock
+               [in] Pointer to a spin lock structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_spinlock_destroy without first calling
+       cl_spinlock_init.
+
+       Calling cl_spinlock_construct is a prerequisite to calling any other
+       spin lock function except cl_spinlock_init.
+
+

SEE ALSO

+
       Spinlock, cl_spinlock_init, cl_spinlock_destroy
+
+
+
+ +

[Functions] +Component Library: Spinlock/cl_spinlock_destroy

+ +

[top][index]

+

NAME

+
       cl_spinlock_destroy
+
+

DESCRIPTION

+
       The cl_spinlock_destroy function performs all necessary cleanup of a
+       spin lock.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_spinlock_destroy(
+        IN      cl_spinlock_t* const    p_spinlock );
+
+

PARAMETERS

+
       p_spinlock
+               [in] Pointer to a spin lock structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Performs any necessary cleanup of a spin lock. This function must only
+       be called if either cl_spinlock_construct or cl_spinlock_init has been
+       called.
+
+

SEE ALSO

+
       Spinlock, cl_spinlock_construct, cl_spinlock_init
+
+
+
+ +

[Functions] +Component Library: Spinlock/cl_spinlock_init

+ +

[top][index]

+

NAME

+
       cl_spinlock_init
+
+

DESCRIPTION

+
       The cl_spinlock_init function initializes a spin lock for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_spinlock_init(
+        IN      cl_spinlock_t* const    p_spinlock );
+
+

PARAMETERS

+
       p_spinlock
+               [in] Pointer to a spin lock structure to initialize.
+
+ RETURN VALUES
+       CL_SUCCESS if initialization succeeded.
+
+       CL_ERROR if initialization failed. Callers should call
+       cl_spinlock_destroy to clean up any resources allocated during
+       initialization.
+
+

NOTES

+
       Initialize the spin lock structure. Allows calling cl_spinlock_aquire
+       and cl_spinlock_release.
+
+

SEE ALSO

+
       Spinlock, cl_spinlock_construct, cl_spinlock_destroy,
+       cl_spinlock_acquire, cl_spinlock_release
+
+
+
+ +

[Functions] +Component Library: Spinlock/cl_spinlock_release

+ +

[top][index]

+

NAME

+
       cl_spinlock_release
+
+

DESCRIPTION

+
       The cl_spinlock_release function releases a spin lock object.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_spinlock_release(
+        IN      cl_spinlock_t* const    p_spinlock );
+
+

PARAMETERS

+
       p_spinlock
+               [in] Pointer to a spin lock structure to release.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Releases a spin lock after a call to cl_spinlock_acquire.
+
+

SEE ALSO

+
       Spinlock, cl_spinlock_acquire
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_syscallback_h.html b/branches/Ndi/docs/complib/cl_syscallback_h.html new file mode 100644 index 00000000..e02cbcd0 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_syscallback_h.html @@ -0,0 +1,243 @@ + + + + +./inc_docs/complib/cl_syscallback_h.html + + + + +Generated from ./inc/complib/cl_syscallback.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/System Callback

+ +

[top][parent][index]

+

NAME

+
       System Callback
+
+

DESCRIPTION

+
       The System Callback provider uses threads from a system thread-pool to
+       invoke specified callback functions.
+
+       Callbacks can be queued in a low- or high-priority queue for processing.
+
+       cl_thread_suspend and cl_thread_stall can be used to delay or stall the
+       callback thread.
+
+       Environments that do not have a native system thread-pool emulate this
+       functionality to provide cross-environment support.
+
+       The cl_sys_callback_item_t structure should be treated as opaque and be
+       manipulated only through the provided functions.
+
+
+
+ +

[Functions] +Component Library: System Callback/cl_is_sys_callback_inited

+ +

[top][index]

+

NAME

+
       cl_is_sys_callback_inited
+
+

DESCRIPTION

+
       The cl_is_sys_callback_inited function returns whether the system
+       callback provider was initialized successfully
+
+

SYNOPSIS

+
boolean_t
+__cl_is_sys_callback_inited( void );
+/*
+* RETURN VALUES
+*       TRUE if the system callback provider was initialized successfully.
+*
+*       FALSE otherwise.
+
+

NOTES

+
       Allows checking the state of the system callback provider to determine
+       if invoking member functions is appropriate.
+
+

SEE ALSO

+
       System Callback
+
+
+
+ +

[Definitions] +Component Library: System Callback/cl_pfn_sys_callback_t

+ +

[top][index]

+

NAME

+
       cl_pfn_sys_callback_t
+
+

DESCRIPTION

+
       The cl_pfn_sys_callback_t function type defines the prototype for
+       functions invoked by the system callback provider.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_sys_callback_t)(
+        IN      void*   get_context,
+        IN      void*   queue_context );
+
+

PARAMETERS

+
       get_context
+               [in] Value of the get_context parameter specified in a call
+               to cl_sys_callback_get.
+
+       queue_context
+               [in] Value of the queue_context parameter specified in a call
+               to cl_sys_callback_queue.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by users as a parameter to the
+       cl_sys_callback_queue function.
+
+

SEE ALSO

+
       System Callback, cl_sys_callback_queue
+
+
+
+ +

[Functions] +Component Library: System Callback/cl_sys_callback_get

+ +

[top][index]

+

NAME

+
       cl_sys_callback_get
+
+

DESCRIPTION

+
       The cl_sys_callback_get function retrieves a system callback item.
+
+

SYNOPSIS

+
CL_EXPORT cl_sys_callback_item_t* CL_API
+cl_sys_callback_get(
+        IN      const void* const get_context );
+
+

PARAMETERS

+
       get_context
+               [in] Context value to pass into the callback function.
+
+ RETURN VALUES
+       Returns a pointer to a system callback item if successful.
+
+       Returns NULL if the call fails.
+
+

NOTES

+
       A system callback item must be released with a call to cl_sys_callback_put.
+
+       Care must be taken to prevent a system callback item from being returned
+       to the pool while it is queued. Callers of cl_sys_callback_queue must not
+       return the system callback item to the pool until their callback has been
+       invoked.
+
+       In Windows 2000 Kernel Mode, the get_context is a pointer to the device
+       object for which the system callback is being used.
+
+

SEE ALSO

+
       System Callback, SysCallbackPut, SysCallbackQueue
+
+
+
+ +

[Functions] +Component Library: System Callback/cl_sys_callback_put

+ +

[top][index]

+

NAME

+
       cl_sys_callback_put
+
+

DESCRIPTION

+
       The cl_sys_callback_put function releases the specified
+       system callback item.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_sys_callback_put(
+        IN      cl_sys_callback_item_t* const   p_item );
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a system callback item to release.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       The p_item parameter points to a system callback item returned by
+       a previous call to cl_sys_callback_get.
+
+       The specified system callback item must not be queued when making
+       a call to this function.  This function can, however, be called
+       from the callback function.
+
+

SEE ALSO

+
       System Callback, cl_sys_callback_get, cl_sys_callback_queue
+
+
+
+ +

[Functions] +Component Library: System Callback/cl_sys_callback_queue

+ +

[top][index]

+

NAME

+
       cl_sys_callback_queue
+
+

DESCRIPTION

+
       The cl_sys_callback_queue function queues the specified system callback item
+       for execution.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_sys_callback_queue(
+        IN      cl_sys_callback_item_t* const   p_item,
+        IN      cl_pfn_sys_callback_t                   pfn_callback,
+        IN      const void* const                               queue_context,
+        IN      const boolean_t                                 high_priority );
+
+

PARAMETERS

+
       p_item
+               [in] Pointer to a system callback item.
+
+       pfn_callback
+               [in] Pointer to a function to be invoked by the system callback module.
+               See the cl_pfn_sys_callback_t function type definition for details
+               about the callback function.
+
+       queue_context
+               [in] Value passed to the system callback function.
+
+       high_priority
+               [in] Specifies whether the request should be queued in the high- or
+               low-priority queue.
+
+ RETURN VALUES
+       CL_SUCCESS if the system callback item was successfully queued.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       A thread from the system thread pool will invoke the specified callback
+       function with the get_context value specified in the call to
+       cl_sys_callback_get and the specified context as parameters.
+
+       The high priority queue is processed before the low priority queue. There
+       is no fairness algorithm implemented for removing items from the queues.
+
+       Care should be taken to only queue a given system callback item once
+       at a time.
+
+

SEE ALSO

+
       System Callback, cl_sys_callback_get, cl_pfn_sys_callback_t
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_thread_h.html b/branches/Ndi/docs/complib/cl_thread_h.html new file mode 100644 index 00000000..aeebc824 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_thread_h.html @@ -0,0 +1,164 @@ + + + + +./inc_docs/complib/cl_thread_h.html + + + + +Generated from ./inc/complib/cl_thread.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Functions] +Component Library: Thread/cl_is_blockable

+ +

[top][index]

+

NAME

+
       cl_is_blockable
+
+

DESCRIPTION

+
       The cl_is_blockable indicates if the current caller context is
+       blockable.
+
+

SYNOPSIS

+
CL_EXPORT boolean_t CL_API
+cl_is_blockable( void );
+
+

RETURN VALUE

+
       TRUE if the caller's thread context can be blocked, i.e it is safe
+       to perform a sleep, or call a down operation on a semaphore.
+
+       FALSE otherwise
+
+

SEE ALSO

+
       Thread
+
+
+
+ +

[Definitions] +Component Library: Thread/cl_pfn_thread_callback_t

+ +

[top][index]

+

NAME

+
       cl_pfn_thread_callback_t
+
+

DESCRIPTION

+
       The cl_pfn_thread_callback_t function type defines the prototype
+       for functions invoked by thread objects
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_thread_callback_t)(
+        IN      void*   context );
+
+

PARAMETERS

+
       context
+               [in] Value specified in a call to cl_thread_init or
+               cl_thread_pool_create.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function provided by users as a parameter to the cl_thread_init
+       and cl_thread_pool_create functions.
+
+

SEE ALSO

+
       Thread Pool
+
+
+
+ +

[Functions] +Component Library: Thread/cl_proc_count

+ +

[top][index]

+

NAME

+
       cl_proc_count
+
+

DESCRIPTION

+
       The cl_proc_count function returns the number of processors in the system.
+
+

SYNOPSIS

+
CL_EXPORT uint32_t CL_API
+cl_proc_count( void );
+
+

RETURN VALUE

+
       Returns the number of processors in the system.
+
+
+
+ +

[Functions] +Component Library: Thread/cl_thread_stall

+ +

[top][index]

+

NAME

+
       cl_thread_stall
+
+

DESCRIPTION

+
       The cl_thread_stall function stalls the calling thread for a minimum of
+       the specified number of microseconds.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_thread_stall(
+        IN      const uint32_t  pause_us );
+
+

PARAMETERS

+
       pause_us
+               [in] Number of microseconds to stall the calling thread.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       The cl_thread_stall function performs a busy wait for the specified
+       number of microseconds. Care should be taken when using this function as
+       it does not relinquish its quantum of operation. For longer wait
+       operations, users should call cl_thread_suspend if possible.
+
+

SEE ALSO

+
       Thread, cl_thread_suspend
+
+
+
+ +

[Functions] +Component Library: Thread/cl_thread_suspend

+ +

[top][index]

+

NAME

+
       cl_thread_suspend
+
+

DESCRIPTION

+
       The cl_thread_suspend function suspends the calling thread for a minimum
+       of the specified number of milliseconds.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_thread_suspend(
+        IN      const uint32_t  pause_ms );
+
+

PARAMETERS

+
       pause_ms
+               [in] Number of milliseconds to suspend the calling thread.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function should only be called if it is valid for the caller's thread
+       to enter a wait state. For stalling a thread that cannot enter a wait
+       state, callers should use cl_thread_stall.
+
+

SEE ALSO

+
       Thread, cl_thread_stall
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_threadpool_h.html b/branches/Ndi/docs/complib/cl_threadpool_h.html new file mode 100644 index 00000000..e6ca4418 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_threadpool_h.html @@ -0,0 +1,273 @@ + + + + +./inc_docs/complib/cl_threadpool_h.html + + + + +Generated from ./inc/complib/cl_threadpool.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Thread Pool

+ +

[top][parent][index]

+

NAME

+
       Thread Pool
+
+

DESCRIPTION

+
       The Thread Pool manages a user specified number of threads.
+
+       Each thread in the thread pool waits for a user initiated signal before
+       invoking a user specified callback function. All threads in the thread
+       pool invoke the same callback function.
+
+       The thread pool functions operate on a cl_thread_pool_t structure which
+       should be treated as opaque, and should be manipulated only through the
+       provided functions.
+
+

SEE ALSO

+
       Structures:
+               cl_thread_pool_t
+
+       Initialization:
+               cl_thread_pool_construct, cl_thread_pool_init, cl_thread_pool_destroy
+
+       Manipulation
+               cl_thread_pool_signal
+
+
+
+ +

[Functions] +Component Library: Thread Pool/cl_thread_pool_construct

+ +

[top][index]

+

NAME

+
       cl_thread_pool_construct
+
+

DESCRIPTION

+
       The cl_thread_pool_construct function initializes the state of a
+       thread pool.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_thread_pool_construct(
+        IN      cl_thread_pool_t* const p_thread_pool );
+
+

PARAMETERS

+
       p_thread_pool
+               [in] Pointer to a thread pool structure.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_thread_pool_destroy without first calling
+       cl_thread_pool_init.
+
+       Calling cl_thread_pool_construct is a prerequisite to calling any other
+       thread pool function except cl_thread_pool_init.
+
+

SEE ALSO

+
       Thread Pool, cl_thread_pool_init, cl_thread_pool_destroy
+
+
+
+ +

[Functions] +Component Library: Thread Pool/cl_thread_pool_destroy

+ +

[top][index]

+

NAME

+
       cl_thread_pool_destroy
+
+

DESCRIPTION

+
       The cl_thread_pool_destroy function performs any necessary cleanup
+       for a thread pool.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_thread_pool_destroy(
+        IN      cl_thread_pool_t* const p_thread_pool );
+
+

PARAMETERS

+
       p_thread_pool
+               [in] Pointer to a thread pool structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function blocks until all threads exit, and must therefore not
+       be called from any of the thread pool's threads. Because of its blocking
+       nature, callers of cl_thread_pool_destroy must ensure that entering a wait
+       state is valid from the calling thread context.
+
+       This function should only be called after a call to
+       cl_thread_pool_construct or cl_thread_pool_init.
+
+

SEE ALSO

+
       Thread Pool, cl_thread_pool_construct, cl_thread_pool_init
+
+
+
+ +

[Functions] +Component Library: Thread Pool/cl_thread_pool_init

+ +

[top][index]

+

NAME

+
       cl_thread_pool_init
+
+

DESCRIPTION

+
       The cl_thread_pool_init function creates the threads to be
+       managed by a thread pool.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_thread_pool_init(
+        IN      cl_thread_pool_t* const         p_thread_pool,
+        IN      uint32_t                                        thread_count,
+        IN      cl_pfn_thread_callback_t        pfn_callback,
+        IN      const void* const                       context,
+        IN      const char* const                       name );
+
+

PARAMETERS

+
       p_thread_pool
+               [in] Pointer to a thread pool structure to initialize.
+
+       thread_count
+               [in] Number of threads to be managed by the thread pool.
+
+       pfn_callback
+               [in] Address of a function to be invoked by a thread.
+               See the cl_pfn_thread_callback_t function type definition for
+               details about the callback function.
+
+       context
+               [in] Value to pass to the callback function.
+
+       name
+               [in] Name to associate with the threads.  The name may be up to 16
+               characters, including a terminating null character.  All threads
+               created in the pool have the same name.
+
+ RETURN VALUES
+       CL_SUCCESS if the thread pool creation succeeded.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to inititalize
+       the thread pool.
+
+       CL_ERROR if the threads could not be created.
+
+

NOTES

+
       cl_thread_pool_init creates and starts the specified number of threads.
+       If thread_count is zero, the thread pool creates as many threads as there
+       are processors in the system.
+
+

SEE ALSO

+
       Thread Pool, cl_thread_pool_construct, cl_thread_pool_destroy,
+       cl_thread_pool_signal, cl_pfn_thread_callback_t
+
+
+
+ +

[Functions] +Component Library: Thread Pool/cl_thread_pool_signal

+ +

[top][index]

+

NAME

+
       cl_thread_pool_signal
+
+

DESCRIPTION

+
       The cl_thread_pool_signal function signals a single thread of
+       the thread pool to invoke the thread poolÂ’s callback function.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_thread_pool_signal(
+        IN      cl_thread_pool_t* const p_thread_pool );
+
+

PARAMETERS

+
       p_thread_pool
+               [in] Pointer to a thread pool structure to signal.
+
+ RETURN VALUES
+       CL_SUCCESS if the thread pool was successfully signalled.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       Each call to this function wakes up at most one waiting thread in
+       the thread pool.
+
+       If all threads are running, cl_thread_pool_signal has no effect.
+
+

SEE ALSO

+
       Thread Pool
+
+
+
+ +

[Structures] +Component Library: Thread Pool/cl_thread_pool_t

+ +

[top][index]

+

NAME

+
       cl_thread_pool_t
+
+

DESCRIPTION

+
       Thread pool structure.
+
+       The cl_thread_pool_t structure should be treated as opaque, and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_thread_pool
+{
+        cl_pfn_thread_callback_t        pfn_callback;
+        const void                                      *context;
+        cl_list_t                                       thread_list;
+        cl_event_t                                      wakeup_event;
+        cl_event_t                                      destroy_event;
+        boolean_t                                       exit;
+        cl_state_t                                      state;
+        atomic32_t                                      running_count;
+
+} cl_thread_pool_t;
+
+

FIELDS

+
       pfn_callback
+               Callback function for the thread to invoke.
+
+       context
+               Context to pass to the thread callback function.
+
+       thread_list
+               List of threads managed by the thread pool.
+
+       event
+               Event used to signal threads to wake up and do work.
+
+       destroy_event
+               Event used to signal threads to exit.
+
+       exit
+               Flag used to indicates threads to exit.
+
+       state
+               State of the thread pool.
+
+       running_count
+               Number of threads running.
+
+

SEE ALSO

+
       Thread Pool
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_timer_h.html b/branches/Ndi/docs/complib/cl_timer_h.html new file mode 100644 index 00000000..86882a04 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_timer_h.html @@ -0,0 +1,432 @@ + + + + +./inc_docs/complib/cl_timer_h.html + + + + +Generated from ./inc/complib/cl_timer.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Timer

+ +

[top][parent][index]

+

NAME

+
       Timer
+
+

DESCRIPTION

+
       The Timer provides the ability to schedule a function to be invoked at
+       a given time in the future.
+
+       The timer callback function must not perform any blocking operations.
+
+       The timer functions operate on a cl_timer_t structure which should be
+       treated as opaque and should be manipulated only through the provided
+       functions.
+
+

SEE ALSO

+
       Structures:
+               cl_timer_t
+
+       Callbacks:
+               cl_pfn_timer_callback_t
+
+       Initialization:
+               cl_timer_construct, cl_timer_init, cl_timer_destroy
+
+       Manipulation:
+               cl_timer_start, cl_timer_stop
+
+
+
+ +

[Functions] +Component Library: Time Stamp/cl_get_tick_count

+ +

[top][index]

+

NAME

+
       cl_get_tick_count
+
+

DESCRIPTION

+
       The cl_get_tick_count function returns the raw high-resolution
+       performance counter value.
+
+

SYNOPSIS

+
CL_EXPORT uint64_t CL_API
+cl_get_tick_count( void );
+
+

RETURN VALUE

+
       Value of the high-resolution performance counter.
+
+

SEE ALSO

+
       Timer, cl_get_time_stamp, cl_get_tick_freq
+
+
+
+ +

[Functions] +Component Library: Time Stamp/cl_get_tick_freq

+ +

[top][index]

+

NAME

+
       cl_get_tick_freq
+
+

DESCRIPTION

+
       The cl_get_tick_freq function returns the frequency of the
+       high-resolution performance counter.
+
+

SYNOPSIS

+
CL_EXPORT uint64_t CL_API
+cl_get_tick_freq( void );
+
+

RETURN VALUE

+
       The frequency of the high-resolution performance counter.
+
+

SEE ALSO

+
       Timer, cl_get_time_stamp, cl_get_tick_count
+
+
+
+ +

[Functions] +Component Library: Time Stamp/cl_get_time_stamp

+ +

[top][index]

+

NAME

+
       cl_get_time_stamp
+
+

DESCRIPTION

+
       The cl_get_time_stamp function returns the current time stamp in
+       microseconds since the system was booted.
+
+

SYNOPSIS

+
CL_EXPORT uint64_t CL_API
+cl_get_time_stamp( void );
+
+

RETURN VALUE

+
       Time elapsed, in microseconds, since the system was booted.
+
+

SEE ALSO

+
       Timer, cl_get_time_stamp_usec, cl_get_time_stamp_sec
+
+
+
+ +

[Functions] +Component Library: Time Stamp/cl_get_time_stamp_sec

+ +

[top][index]

+

NAME

+
       cl_get_time_stamp_sec
+
+

DESCRIPTION

+
       The cl_get_time_stamp_sec function returns the current time stamp in
+       seconds since the system was booted.
+
+

SYNOPSIS

+
CL_EXPORT uint32_t CL_API
+cl_get_time_stamp_sec( void );
+
+

RETURN VALUE

+
       Time elapsed, in seconds, since the system was booted.
+
+

SEE ALSO

+
       Timer, cl_get_time_stamp
+
+
+
+ +

[Functions] +Component Library: Time Stamp/cl_get_time_stamp_usec

+ +

[top][index]

+

NAME

+
       cl_get_time_stamp_usec
+
+

DESCRIPTION

+
       The cl_get_time_stamp_usec function returns the current time stamp in
+       microseconds since the system was booted.
+
+

SYNOPSIS

+
CL_INLINE uint64_t CL_API
+cl_get_time_stamp_usec( void )
+{
+        return cl_get_time_stamp();
+}
+
+

RETURN VALUE

+
       Time elapsed, in microseconds, since the system was booted.
+
+

SEE ALSO

+
       Timer, cl_get_time_stamp, cl_get_time_stamp_sec
+
+
+
+ +

[Definitions] +Component Library: Timer/cl_pfn_timer_callback_t

+ +

[top][index]

+

NAME

+
       cl_pfn_timer_callback_t
+
+

DESCRIPTION

+
       The cl_pfn_timer_callback_t function type defines the prototype for
+       functions used to notify users of a timer expiration.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_timer_callback_t)(
+        IN void*        context );
+
+

PARAMETERS

+
       context
+               [in] Value specified in a previous call to cl_timer_init.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_timer_init function.
+
+

SEE ALSO

+
       Timer, cl_timer_init
+
+
+
+ +

[Functions] +Component Library: Timer/cl_timer_construct

+ +

[top][index]

+

NAME

+
       cl_timer_construct
+
+

DESCRIPTION

+
       The cl_timer_construct function initializes the state of a timer.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_timer_construct(
+        IN      cl_timer_t* const       p_timer );
+
+

PARAMETERS

+
       p_timer
+               [in] Pointer to a cl_timer_t structure whose state to initialize.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_timer_destroy without first calling cl_timer_init.
+
+       Calling cl_timer_construct is a prerequisite to calling any other
+       timer function except cl_timer_init.
+
+

SEE ALSO

+
       Timer, cl_timer_init, cl_timer_destroy
+
+
+
+ +

[Functions] +Component Library: Timer/cl_timer_destroy

+ +

[top][index]

+

NAME

+
       cl_timer_destroy
+
+

DESCRIPTION

+
       The cl_timer_destroy function performs any necessary cleanup of a timer.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_timer_destroy(
+        IN      cl_timer_t* const       p_timer );
+
+

PARAMETERS

+
       p_timer
+               [in] Pointer to a cl_timer_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_timer_destroy cancels any pending callbacks.
+
+       This function should only be called after a call to cl_timer_construct
+       or cl_timer_init.
+
+

SEE ALSO

+
       Timer, cl_timer_construct, cl_timer_init
+
+
+
+ +

[Functions] +Component Library: Timer/cl_timer_init

+ +

[top][index]

+

NAME

+
       cl_timer_init
+
+

DESCRIPTION

+
       The cl_timer_init function initializes a timer for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_timer_init(
+        IN      cl_timer_t* const               p_timer,
+        IN      cl_pfn_timer_callback_t pfn_callback,
+        IN      const void* const               context );
+
+

PARAMETERS

+
       p_timer
+               [in] Pointer to a cl_timer_t structure to initialize.
+
+       pfn_callback
+               [in] Address of a callback function to be invoked when a timer expires.
+               See the cl_pfn_timer_callback_t function type definition for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback function.
+
+ RETURN VALUES
+       CL_SUCCESS if the timer was successfully initialized.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       Allows calling cl_timer_start and cl_timer_stop.
+
+

SEE ALSO

+
       Timer, cl_timer_construct, cl_timer_destroy, cl_timer_start,
+       cl_timer_stop, cl_pfn_timer_callback_t
+
+
+
+ +

[Functions] +Component Library: Timer/cl_timer_start

+ +

[top][index]

+

NAME

+
       cl_timer_start
+
+

DESCRIPTION

+
       The cl_timer_start function sets a timer to expire after a given interval.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_timer_start(
+        IN      cl_timer_t* const       p_timer,
+        IN      const uint32_t          time_ms );
+
+

PARAMETERS

+
       p_timer
+               [in] Pointer to a cl_timer_t structure to schedule.
+
+       time_ms
+               [in] Time, in milliseconds, before the timer should expire.
+
+ RETURN VALUES
+       CL_SUCCESS if the timer was successfully scheduled.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       cl_timer_start implicitly stops the timer before being scheduled.
+
+       The interval specified by the time_ms parameter is a minimum interval.
+       The timer is guaranteed to expire no sooner than the desired interval, but
+       may take longer to expire.
+
+

SEE ALSO

+
       Timer, cl_timer_stop, cl_timer_trim
+
+
+
+ +

[Functions] +Component Library: Timer/cl_timer_stop

+ +

[top][index]

+

NAME

+
       cl_timer_stop
+
+

DESCRIPTION

+
       The cl_timer_stop function stops a pending timer from expiring.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_timer_stop(
+        IN      cl_timer_t* const       p_timer );
+
+

PARAMETERS

+
       p_timer
+               [in] Pointer to a cl_timer_t structure.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

SEE ALSO

+
       Timer, cl_timer_start, cl_timer_trim
+
+
+
+ +

[Functions] +Component Library: Timer/cl_timer_trim

+ +

[top][index]

+

NAME

+
       cl_timer_trim
+
+

DESCRIPTION

+
       The cl_timer_trim function pulls in the absolute expiration
+       time of a timer if the current expiration time exceeds the specified
+       interval.
+
+       sets a timer to expire after a given
+       interval if that interval is less than the current timer expiration.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_timer_trim(
+        IN      cl_timer_t* const       p_timer,
+        IN      const uint32_t          time_ms );
+
+

PARAMETERS

+
       p_timer
+               [in] Pointer to a cl_timer_t structure to schedule.
+
+       time_ms
+               [in] Maximum time, in milliseconds, before the timer should expire.
+
+ RETURN VALUES
+       CL_SUCCESS if the timer was successfully scheduled.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       cl_timer_trim has no effect if the time interval is greater than the
+       remaining time when the timer is set.
+
+       If the new interval time is less than the remaining time, cl_timer_trim
+       implicitly stops the timer before reseting it.
+
+       If the timer is reset, it is guaranteed to expire no sooner than the
+       new interval, but may take longer to expire.
+
+

SEE ALSO

+
       Timer, cl_timer_start, cl_timer_stop
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_types_h.html b/branches/Ndi/docs/complib/cl_types_h.html new file mode 100644 index 00000000..fc26528f --- /dev/null +++ b/branches/Ndi/docs/complib/cl_types_h.html @@ -0,0 +1,410 @@ + + + + +./inc_docs/complib/cl_types_h.html + + + + +Generated from ./inc/complib/cl_types.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Definitions] +Component Library/Data Types

+ +

[top][parent][index]

+

NAME

+
       Data Types
+
+

DESCRIPTION

+
       The component library provides and uses explicitly sized types.
+
+

VALUES

+
       char
+               8-bit, defined by compiler.
+
+       void
+               0-bit, defined by compiler.
+
+       int8_t
+               8-bit signed integer.
+
+       uint8_t
+               8-bit unsigned integer.
+
+       int16_t
+               16-bit signed integer.
+
+       uint16_t
+               16-bit unsigned integer.
+
+       net16_t
+               16-bit network byte order value.
+
+       int32_t
+               32-bit signed integer.
+
+       uint32_t
+               32-bit unsigned integer.
+
+       net32_t
+               32-bit network byte order value.
+
+       int64_t
+               64-bit signed integer.
+
+       uint64_t
+               64-bit unsigned integer.
+
+       net64_t
+               64-bit network byte order value.
+
+       intn_t
+               Signed natural sized integer.  32-bit on a 32-bit platform, 64-bit on
+               a 64-bit platform.
+
+       uintn_t
+               Unsigned natural sized integer.  32-bit on a 32-bit platform, 64-bit on
+               a 64-bit platform.
+
+       boolean_t
+               integral sized.  Set to TRUE or FALSE and used in logical expressions.
+
+

NOTES

+
       Pointer types are not defined as these provide no value and can potentially
+       lead to naming confusion.
+
+
+
+ +

[Definitions] +Component Library/Object States

+ +

[top][parent][index]

+

NAME

+
       Object States
+
+

DESCRIPTION

+
       The object states enumerated type defines the valid states of components.
+
+

SYNOPSIS

+
typedef enum _cl_state
+{
+        CL_UNINITIALIZED = 1,
+        CL_INITIALIZED,
+        CL_DESTROYING,
+        CL_DESTROYED
+
+} cl_state_t;
+
+

VALUES

+
       CL_UNINITIALIZED
+               Indicates that initialization was not invoked successfully.
+
+       CL_INITIALIZED
+               Indicates initialization was successful.
+
+       CL_DESTROYING
+               Indicates that the object is undergoing destruction.
+
+       CL_DESTROYED
+               Indicates that the object's destructor has already been called.  Most
+               objects set their final state to CL_DESTROYED before freeing the
+               memory associated with the object.
+
+
+
+ +

[Definitions] +Component Library/Parameter Keywords

+ +

[top][parent][index]

+

NAME

+
       Parameter Keywords
+
+

DESCRIPTION

+
       The Parameter Keywords can be used to clarify the usage of function
+       parameters to users.
+
+

VALUES

+
       IN
+               Designates that the parameter is used as input to a function.
+
+       OUT
+               Designates that the parameter's value will be set by the function.
+
+       OPTIONAL
+               Designates that the parameter is optional, and may be NULL.
+               The OPTIONAL keyword, if used, follows the parameter name.
+
+

EXAMPLE

+
       // Function declaration.
+       void*
+       my_func(
+           IN void* const p_param1,
+           OUT void** const p_handle OPTIONAL );
+
+

NOTES

+
       Multiple keywords can apply to a single parameter. The IN and OUT
+       keywords precede the parameter type. The OPTIONAL
+       keyword, if used, follows the parameter name.
+
+
+
+ +

[Definitions] +Component Library: Data Types/CL_STATUS_MSG

+ +

[top][index]

+

NAME

+
       CL_STATUS_MSG
+
+

DESCRIPTION

+
       The CL_STATUS_MSG macro returns a textual representation of
+       an cl_status_t code.
+
+

SYNOPSIS

+
*       const char*
+*       CL_STATUS_MSG(
+*               IN cl_status_t errcode );
+
+

PARAMETERS

+
       errcode
+               [in] cl_status_t code for which to return a text representation.
+
+

RETURN VALUE

+
       Pointer to a string containing a textual representation of the errcode
+       parameter.
+
+

NOTES

+
       This function performs boundary checking on the cl_status_t value,
+       masking off the upper 24-bits. If the value is out of bounds, the string
+       "invalid status code" is returned.
+
+

SEE ALSO

+
       cl_status_t
+
+
+
+ +

[Definitions] +Component Library: Data Types/cl_status_t

+ +

[top][index]

+

NAME

+
       cl_status_t
+
+

DESCRIPTION

+
       The cl_status_t return types are used by the component library to
+       provide detailed function return values.
+
+

SYNOPSIS

+
typedef enum _cl_status
+{
+        CL_SUCCESS = 0,
+        CL_ERROR,
+        CL_INVALID_STATE,
+        CL_INVALID_OPERATION,
+        CL_INVALID_SETTING,
+        CL_INVALID_PARAMETER,
+        CL_INSUFFICIENT_RESOURCES,
+        CL_INSUFFICIENT_MEMORY,
+        CL_INVALID_PERMISSION,
+        CL_COMPLETED,
+        CL_NOT_DONE,
+        CL_PENDING,
+        CL_TIMEOUT,
+        CL_CANCELED,
+        CL_REJECT,
+        CL_OVERRUN,
+        CL_NOT_FOUND,
+        CL_UNAVAILABLE,
+        CL_BUSY,
+        CL_DISCONNECT,
+        CL_DUPLICATE,
+        CL_INVALID_REQUEST,
+
+        CL_STATUS_COUNT                 /* should be the last value */
+
+} cl_status_t;
+
+

SEE ALSO

+
       Data Types, CL_STATUS_MSG
+
+
+
+ +

[Functions] +Component Library: Error Trapping/cl_panic

+ +

[top][index]

+

NAME

+
       cl_panic
+
+

DESCRIPTION

+
       Halts execution of the current process.  Halts the system if called in
+       from the kernel.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_panic(
+        IN      const char* const       message,
+        IN      ... );
+
+

PARAMETERS

+
       message
+               [in] ANSI string formatted identically as for a call to the standard C
+               function printf describing the cause for the panic.
+
+       ...
+               [in] Extra parameters for string formatting, as defined for the
+               standard C function printf.
+
+

RETURN VALUE

+
       This function does not return.
+
+

NOTES

+
       The formatting of the message string is the same as for printf
+
+       cl_panic sends the message to the current message logging target.
+
+
+
+ +

[Definitions] +Component Library: Object States/cl_is_state_valid

+ +

[top][index]

+

NAME

+
       cl_is_state_valid
+
+

DESCRIPTION

+
       The cl_is_state_valid function returns whether a state has a valid value.
+
+

SYNOPSIS

+
CL_INLINE boolean_t CL_API
+cl_is_state_valid(
+        IN      const cl_state_t        state )
+{
+        return( (state == CL_UNINITIALIZED) || (state == CL_INITIALIZED) ||
+                (state == CL_DESTROYING) || (state == CL_DESTROYED) );
+}
+
+

PARAMETERS

+
       state
+               State whose value to validate.
+
+ RETURN VALUES
+       TRUE if the specified state has a valid value.
+
+       FALSE otherwise.
+
+

NOTES

+
       This function is used in debug builds to check for valid states.  If an
+       uninitialized object is passed, the memory for the state may cause the
+       state to have an invalid value.
+
+

SEE ALSO

+
       Object States
+
+
+
+ +

[Definitions] +Component Library: Pointer Manipulation/offsetof

+ +

[top][index]

+

NAME

+
       offsetof
+
+

DESCRIPTION

+
       The offsetof macro returns the offset of a member within a structure.
+
+

SYNOPSIS

+
*       uintn_t
+*       offsetof(
+*               IN TYPE,
+*               IN MEMBER );
+
+

PARAMETERS

+
       TYPE
+               [in] Name of the structure containing the specified member.
+
+       MEMBER
+               [in] Name of the member whose offset in the specified structure
+               is to be returned.
+
+

RETURN VALUE

+
       Number of bytes from the beginning of the structure to the
+       specified member.
+
+

SEE ALSO

+
       PARENT_STRUCT
+
+
+
+ +

[Definitions] +Component Library: Pointer Manipulation/PARENT_STRUCT

+ +

[top][index]

+

NAME

+
       PARENT_STRUCT
+
+

DESCRIPTION

+
       The PARENT_STRUCT macro returns a pointer to a structure
+       given a name and pointer to one of its members.
+
+

SYNOPSIS

+
*       PARENT_TYPE*
+*       PARENT_STRUCT(
+*               IN void* const p_member,
+*               IN PARENT_TYPE,
+*               IN MEMBER_NAME );
+
+

PARAMETERS

+
       p_member
+               [in] Pointer to the MEMBER_NAME member of a PARENT_TYPE structure.
+
+       PARENT_TYPE
+               [in] Name of the structure containing the specified member.
+
+       MEMBER_NAME
+               [in] Name of the member whose address is passed in the p_member
+               parameter.
+
+

RETURN VALUE

+
       Pointer to a structure of type PARENT_TYPE whose MEMBER_NAME member is
+       located at p_member.
+
+

SEE ALSO

+
       offsetof
+
+
+
+ +

[Definitions] +Component Library: Unreferenced Parameters/UNUSED_PARAM

+ +

[top][index]

+

NAME

+
       UNUSED_PARAM
+
+

DESCRIPTION

+
       The UNUSED_PARAM macro can be used to eliminates compiler warnings related
+       to intentionally unused formal parameters in function implementations.
+
+

SYNOPSIS

+
*       UNUSED_PARAM( P )
+
+

EXAMPLE

+
       void my_func( int32_t value )
+       {
+               UNUSED_PARAM( value );
+       }
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_vector_h.html b/branches/Ndi/docs/complib/cl_vector_h.html new file mode 100644 index 00000000..e316bdcd --- /dev/null +++ b/branches/Ndi/docs/complib/cl_vector_h.html @@ -0,0 +1,984 @@ + + + + +./inc_docs/complib/cl_vector_h.html + + + + +Generated from ./inc/complib/cl_vector.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Vector

+ +

[top][parent][index]

+

NAME

+
       Vector
+
+

DESCRIPTION

+
       The Vector is a self-sizing array. Like a traditonal array, a vector
+       allows efficient constant time access to elements with a specified index.
+       A vector grows transparently as the user adds elements to the array.
+
+       As the vector grows in size, it does not relocate existing elements in
+       memory. This allows using pointers to elements stored in a Vector.
+
+       Users can supply an initializer functions that allow a vector to ensure
+       that new items added to the vector are properly initialized. A vector
+       calls the initializer function on a per object basis when growing the
+       array. The initializer is optional.
+
+       The initializer function can fail, and returns a cl_status_t. The vector
+       will call the destructor function, if provided, for an element that
+       failed initialization. If an initializer fails, a vector does not call
+       the initializer for objects in the remainder of the new memory allocation.
+
+       The cl_vector_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SEE ALSO

+
       Structures:
+               cl_vector_t
+
+       Callbacks:
+               cl_pfn_vec_init_t, cl_pfn_vec_dtor_t, cl_pfn_vec_apply_t,
+               cl_pfn_vec_find_t
+
+       Item Manipulation:
+               cl_vector_set_obj, cl_vector_obj
+
+       Initialization:
+               cl_vector_construct, cl_vector_init, cl_vector_destroy
+
+       Manipulation:
+               cl_vector_get_capacity, cl_vector_set_capacity,
+               cl_vector_get_size, cl_vector_set_size, cl_vector_set_min_size
+               cl_vector_get_ptr, cl_vector_get, cl_vector_at, cl_vector_set
+
+       Search:
+               cl_vector_find_from_start, cl_vector_find_from_end
+               cl_vector_apply_func
+
+
+
+ +

[Definitions] +Component Library: Vector/cl_pfn_vec_apply_t

+ +

[top][index]

+

NAME

+
       cl_pfn_vec_apply_t
+
+

DESCRIPTION

+
       The cl_pfn_vec_apply_t function type defines the prototype for functions
+       used to iterate elements in a vector.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_vec_apply_t)(
+        IN      const size_t            index,
+        IN      void* const                     p_element,
+        IN      void*                           context );
+
+

PARAMETERS

+
       index
+               [in] Index of the element.
+
+       p_element
+               [in] Pointer to an element at the specified index in the vector.
+
+       context
+               [in] Context provided in a call to cl_vector_apply_func.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the function passed by users as a parameter to the cl_vector_apply_func
+       function.
+
+

SEE ALSO

+
       Vector, cl_vector_apply_func
+
+
+
+ +

[Definitions] +Component Library: Vector/cl_pfn_vec_dtor_t

+ +

[top][index]

+

NAME

+
       cl_pfn_vec_dtor_t
+
+

DESCRIPTION

+
       The cl_pfn_vec_dtor_t function type defines the prototype for functions
+       used as destructor for elements being deallocated from a vector.
+
+

SYNOPSIS

+
typedef void
+(CL_API *cl_pfn_vec_dtor_t)(
+        IN      void* const                     p_element,
+        IN      void*                           context );
+
+

PARAMETERS

+
       p_element
+               [in] Pointer to an element being deallocated from a vector.
+
+       context
+               [in] Context provided in a call to cl_vector_init.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the destructor function provided by users as an optional parameter to
+       the cl_vector_init function.
+
+

SEE ALSO

+
       Vector, cl_vector_init
+
+
+
+ +

[Definitions] +Component Library: Vector/cl_pfn_vec_find_t

+ +

[top][index]

+

NAME

+
       cl_pfn_vec_find_t
+
+

DESCRIPTION

+
       The cl_pfn_vec_find_t function type defines the prototype for functions
+       used to find elements in a vector.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_vec_find_t)(
+        IN      const size_t            index,
+        IN      const void* const       p_element,
+        IN      void*                           context );
+
+

PARAMETERS

+
       index
+               [in] Index of the element.
+
+       p_element
+               [in] Pointer to an element at the specified index in the vector.
+
+       context
+               [in] Context provided in a call to cl_vector_find_from_start or
+               cl_vector_find_from_end.
+
+ RETURN VALUES
+       Return CL_SUCCESS if the element was found. This stops vector iteration.
+
+       CL_NOT_FOUND to continue the vector iteration.
+
+

NOTES

+
       This function type is provided as function prototype reference for the
+       function provided by users as a parameter to the cl_vector_find_from_start
+       and cl_vector_find_from_end functions.
+
+

SEE ALSO

+
       Vector, cl_vector_find_from_start, cl_vector_find_from_end
+
+
+
+ +

[Definitions] +Component Library: Vector/cl_pfn_vec_init_t

+ +

[top][index]

+

NAME

+
       cl_pfn_vec_init_t
+
+

DESCRIPTION

+
       The cl_pfn_vec_init_t function type defines the prototype for functions
+       used as initializer for elements being allocated by a vector.
+
+

SYNOPSIS

+
typedef cl_status_t
+(CL_API *cl_pfn_vec_init_t)(
+        IN      void* const                     p_element,
+        IN      void*                           context );
+
+

PARAMETERS

+
       p_element
+               [in] Pointer to an element being added to a vector.
+
+       context
+               [in] Context provided in a call to cl_vector_init.
+
+ RETURN VALUES
+       Return CL_SUCCESS to indicate that the element was initialized successfully.
+
+       Other cl_status_t values will be returned by the cl_vector_init,
+       cl_vector_set_size, and cl_vector_set_min_size functions.
+
+       In situations where the vector's size needs to grows in order to satisfy
+       a call to cl_vector_set, a non-successful status returned by the
+       initializer callback causes the growth to stop.
+
+

NOTES

+
       This function type is provided as function prototype reference for
+       the initializer function provided by users as an optional parameter to
+       the cl_vector_init function.
+
+

SEE ALSO

+
       Vector, cl_vector_init
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_apply_func

+ +

[top][index]

+

NAME

+
       cl_vector_apply_func
+
+

DESCRIPTION

+
       The cl_vector_apply_func function invokes a specified function for every
+       element in a vector.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_vector_apply_func(
+        IN      const cl_vector_t* const        p_vector,
+        IN      cl_pfn_vec_apply_t                      pfn_callback,
+        IN      const void* const                       context );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure whose elements to iterate.
+
+       pfn_callback
+               [in] Function invoked for every element in the array.
+               See the cl_pfn_vec_apply_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback function.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_vector_apply_func invokes the specified function for every element
+       in the vector, starting from the beginning of the vector.
+
+

SEE ALSO

+
       Vector, cl_vector_find_from_start, cl_vector_find_from_end,
+       cl_pfn_vec_apply_t
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_at

+ +

[top][index]

+

NAME

+
       cl_vector_at
+
+

DESCRIPTION

+
       The cl_vector_at function copies an element stored in a vector at a
+       specified index, performing boundary checks.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_vector_at(
+        IN      const cl_vector_t* const        p_vector,
+        IN      const size_t                            index,
+        OUT     void* const                                     p_element );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure from which to get a copy of
+               an element.
+
+       index
+               [in] Index of the element.
+
+       p_element
+               [out] Pointer to storage for the element. Contains a copy of the
+               desired element upon successful completion of the call.
+
+ RETURN VALUES
+       CL_SUCCESS if an element was found at the specified index.
+
+       CL_INVALID_SETTING if the index was out of range.
+
+

NOTES

+
       cl_vector_at provides constant time access regardless of the index, and
+       performs boundary checking on the vector.
+
+       Upon success, the p_element parameter contains a copy of the desired element.
+
+

SEE ALSO

+
       Vector, cl_vector_get, cl_vector_get_ptr
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_construct

+ +

[top][index]

+

NAME

+
       cl_vector_construct
+
+

DESCRIPTION

+
       The cl_vector_construct function constructs a vector.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_vector_construct(
+        IN      cl_vector_t* const      p_vector );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure to construct.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       Allows calling cl_vector_destroy without first calling cl_vector_init.
+
+       Calling cl_vector_construct is a prerequisite to calling any other
+       vector function except cl_vector_init.
+
+

SEE ALSO

+
       Vector, cl_vector_init, cl_vector_destroy
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_destroy

+ +

[top][index]

+

NAME

+
       cl_vector_destroy
+
+

DESCRIPTION

+
       The cl_vector_destroy function destroys a vector.
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_vector_destroy(
+        IN      cl_vector_t* const      p_vector );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure to destroy.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_vector_destroy frees all memory allocated for the vector. The vector
+       is left initialized to a zero capacity and size.
+
+       This function should only be called after a call to cl_vector_construct
+       or cl_vector_init.
+
+

SEE ALSO

+
       Vector, cl_vector_construct, cl_vector_init
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_find_from_end

+ +

[top][index]

+

NAME

+
       cl_vector_find_from_end
+
+

DESCRIPTION

+
       The cl_vector_find_from_end function uses a specified function to search
+       for elements in a vector starting from the highest index.
+
+

SYNOPSIS

+
CL_EXPORT size_t CL_API
+cl_vector_find_from_end(
+        IN      const cl_vector_t* const        p_vector,
+        IN      cl_pfn_vec_find_t                       pfn_callback,
+        IN      const void* const                       context );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure to inititalize.
+
+       pfn_callback
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_vec_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback function.
+
+ RETURN VALUES
+       Index of the element, if found.
+
+       Size of the vector if the element was not found.
+
+

NOTES

+
       cl_vector_find_from_end does not remove the found element from
+       the vector. The index of the element is returned when the function
+       provided by the pfn_callback parameter returns CL_SUCCESS.
+
+

SEE ALSO

+
       Vector, cl_vector_find_from_start, cl_vector_apply_func,
+       cl_pfn_vec_find_t
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_find_from_start

+ +

[top][index]

+

NAME

+
       cl_vector_find_from_start
+
+

DESCRIPTION

+
       The cl_vector_find_from_start function uses a specified function to
+       search for elements in a vector starting from the lowest index.
+
+

SYNOPSIS

+
CL_EXPORT size_t CL_API
+cl_vector_find_from_start(
+        IN      const cl_vector_t* const        p_vector,
+        IN      cl_pfn_vec_find_t                       pfn_callback,
+        IN      const void* const                       context );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure to inititalize.
+
+       pfn_callback
+               [in] Function invoked to determine if a match was found.
+               See the cl_pfn_vec_find_t function type declaration for details
+               about the callback function.
+
+       context
+               [in] Value to pass to the callback function.
+
+ RETURN VALUES
+       Index of the element, if found.
+
+       Size of the vector if the element was not found.
+
+

NOTES

+
       cl_vector_find_from_start does not remove the found element from
+       the vector. The index of the element is returned when the function
+       provided by the pfn_callback parameter returns CL_SUCCESS.
+
+

SEE ALSO

+
       Vector, cl_vector_find_from_end, cl_vector_apply_func, cl_pfn_vec_find_t
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_get

+ +

[top][index]

+

NAME

+
       cl_vector_get
+
+

DESCRIPTION

+
       The cl_vector_get function copies an element stored in a vector at a
+       specified index.
+
+

SYNOPSIS

+
CL_INLINE void CL_API
+cl_vector_get(
+        IN      const cl_vector_t* const        p_vector,
+        IN      const size_t                            index,
+        OUT     void* const                                     p_element )
+{
+        void *p_src;
+
+        CL_ASSERT( p_vector );
+        CL_ASSERT( p_vector->state == CL_INITIALIZED );
+        CL_ASSERT( p_element );
+
+        /* Get a pointer to the element. */
+        p_src = cl_vector_get_ptr( p_vector, index );
+        p_vector->pfn_copy( p_element, p_src, p_vector->element_size );
+}
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure from which to get a copy of
+               an element.
+
+       index
+               [in] Index of the element.
+
+       p_element
+               [out] Pointer to storage for the element. Contains a copy of the
+               desired element upon successful completion of the call.
+
+

RETURN VALUE

+
       This function does not return a value.
+
+

NOTES

+
       cl_vector_get provides constant time access regardless of the index.
+
+       cl_vector_get does not perform boundary checking on the vector, and
+       callers are responsible for providing an index that is within the range
+       of the vector. To access elements after performing boundary checks,
+       use cl_vector_at.
+
+       The p_element parameter contains a copy of the desired element upon
+       return from this function.
+
+

SEE ALSO

+
       Vector, cl_vector_get_ptr, cl_vector_at
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_get_capacity

+ +

[top][index]

+

NAME

+
       cl_vector_get_capacity
+
+

DESCRIPTION

+
       The cl_vector_get_capacity function returns the capacity of a vector.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_vector_get_capacity(
+        IN      const cl_vector_t* const        p_vector )
+{
+        CL_ASSERT( p_vector );
+        CL_ASSERT( p_vector->state == CL_INITIALIZED );
+
+        return( p_vector->capacity );
+}
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure whose capacity to return.
+
+

RETURN VALUE

+
       Capacity, in elements, of the vector.
+
+

NOTES

+
       The capacity is the number of elements that the vector can store, and
+       can be greater than the number of elements stored. To get the number of
+       elements stored in the vector, use cl_vector_get_size.
+
+

SEE ALSO

+
       Vector, cl_vector_set_capacity, cl_vector_get_size
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_get_ptr

+ +

[top][index]

+

NAME

+
       cl_vector_get_ptr
+
+

DESCRIPTION

+
       The cl_vector_get_ptr function returns a pointer to an element
+       stored in a vector at a specified index.
+
+

SYNOPSIS

+
CL_INLINE void* CL_API
+cl_vector_get_ptr(
+        IN      const cl_vector_t* const        p_vector,
+        IN      const size_t                            index )
+{
+        CL_ASSERT( p_vector );
+        CL_ASSERT( p_vector->state == CL_INITIALIZED );
+
+        return( p_vector->p_ptr_array[index] );
+}
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure from which to get a
+               pointer to an element.
+
+       index
+               [in] Index of the element.
+
+

RETURN VALUE

+
       Pointer to the element stored at specified index.
+
+

NOTES

+
       cl_vector_get_ptr provides constant access times regardless of the index.
+
+       cl_vector_get_ptr does not perform boundary checking. Callers are
+       responsible for providing an index that is within the range of the vector.
+
+

SEE ALSO

+
       Vector, cl_vector_get, cl_vector_at, cl_vector_set, cl_vector_get_size
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_get_size

+ +

[top][index]

+

NAME

+
       cl_vector_get_size
+
+

DESCRIPTION

+
       The cl_vector_get_size function returns the size of a vector.
+
+

SYNOPSIS

+
CL_INLINE size_t CL_API
+cl_vector_get_size(
+        IN      const cl_vector_t* const        p_vector )
+{
+        CL_ASSERT( p_vector );
+        CL_ASSERT( p_vector->state == CL_INITIALIZED );
+
+        return( p_vector->size );
+}
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure whose size to return.
+
+

RETURN VALUE

+
       Size, in elements, of the vector.
+
+

SEE ALSO

+
       Vector, cl_vector_set_size, cl_vector_get_capacity
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_init

+ +

[top][index]

+

NAME

+
       cl_vector_init
+
+

DESCRIPTION

+
       The cl_vector_init function initializes a vector for use.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_vector_init(
+        IN      cl_vector_t* const      p_vector,
+        IN      const size_t            min_size,
+        IN      const size_t            grow_size,
+        IN      const size_t            element_size,
+        IN      cl_pfn_vec_init_t       pfn_init OPTIONAL,
+        IN      cl_pfn_vec_dtor_t       pfn_dtor OPTIONAL,
+        IN      const void* const       context );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure to inititalize.
+
+       initial_size
+               [in] Initial number of elements.
+
+       grow_size
+               [in] Number of elements to allocate when incrementally growing
+               the vector.  A value of zero disables automatic growth.
+
+       element_size
+               [in] Size of each element.
+
+       pfn_init
+               [in] Initializer callback to invoke for every new element.
+               See the cl_pfn_vec_init_t function type declaration for details about
+               the callback function.
+
+       pfn_dtor
+               [in] Destructor callback to invoke for elements being deallocated.
+               See the cl_pfn_vec_dtor_t function type declaration for details about
+               the callback function.
+
+       context
+               [in] Value to pass to the callback functions to provide context.
+
+ RETURN VALUES
+       CL_SUCCESS if the vector was initialized successfully.
+
+       CL_INSUFFICIENT_MEMORY if the initialization failed.
+
+       cl_status_t value returned by optional initializer function specified by
+       the pfn_init parameter.
+
+

NOTES

+
       The constructor and initializer functions, if any, are invoked for every
+       new element in the array.
+
+

SEE ALSO

+
       Vector, cl_vector_construct, cl_vector_destroy, cl_vector_set,
+       cl_vector_get, cl_vector_get_ptr, cl_vector_at
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_set

+ +

[top][index]

+

NAME

+
       cl_vector_set
+
+

DESCRIPTION

+
       The cl_vector_set function sets the element at the specified index.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_vector_set(
+        IN      cl_vector_t* const      p_vector,
+        IN      const size_t            index,
+        IN      void* const                     p_element );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure into which to store
+               an element.
+
+       index
+               [in] Index of the element.
+
+       p_element
+               [in] Pointer to an element to store in the vector.
+
+ RETURN VALUES
+       CL_SUCCESS if the element was successfully set.
+
+       CL_INSUFFICIENT_MEMORY if the vector could not be resized to accommodate
+       the new element.
+
+

NOTES

+
       cl_vector_set grows the vector as needed to accommodate the new element,
+       unless the grow_size parameter passed into the cl_vector_init function
+       was zero.
+
+

SEE ALSO

+
       Vector, cl_vector_get
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_set_capacity

+ +

[top][index]

+

NAME

+
       cl_vector_set_capacity
+
+

DESCRIPTION

+
       The cl_vector_set_capacity function reserves memory in a vector for a
+       specified number of elements.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_vector_set_capacity(
+        IN      cl_vector_t* const      p_vector,
+        IN      const size_t            new_capacity );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure whose capacity to set.
+
+       new_capacity
+               [in] Total number of elements for which the vector should
+               allocate memory.
+
+ RETURN VALUES
+       CL_SUCCESS if the capacity was successfully set.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to satisfy the
+       operation. The vector is left unchanged.
+
+

NOTES

+
       cl_vector_set_capacity increases the capacity of the vector. It does
+       not change the size of the vector. If the requested capacity is less
+       than the current capacity, the vector is left unchanged.
+
+

SEE ALSO

+
       Vector, cl_vector_get_capacity, cl_vector_set_size,
+       cl_vector_set_min_size
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_set_min_size

+ +

[top][index]

+

NAME

+
       cl_vector_set_min_size
+
+

DESCRIPTION

+
       The cl_vector_set_min_size function resizes a vector to a specified size
+       if the vector is smaller than the specified size.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_vector_set_min_size(
+        IN      cl_vector_t* const      p_vector,
+        IN      const size_t            min_size );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure whose minimum size to set.
+
+       min_size
+               [in] Minimum number of elements that the vector should contain.
+
+ RETURN VALUES
+       CL_SUCCESS if the vector size is greater than or equal to min_size.  This
+       could indicate that the vector's capacity was increased to min_size or
+       that the vector was already of sufficient size.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to resize the vector.
+       The vector is left unchanged.
+
+

NOTES

+
       If min_size is smaller than the current size of the vector, the vector is
+       unchanged. The vector is unchanged if the size could not be changed due
+       to insufficient memory being available to perform the operation.
+
+

SEE ALSO

+
       Vector, cl_vector_get_size, cl_vector_set_size, cl_vector_set_capacity
+
+
+
+ +

[Functions] +Component Library: Vector/cl_vector_set_size

+ +

[top][index]

+

NAME

+
       cl_vector_set_size
+
+

DESCRIPTION

+
       The cl_vector_set_size function resizes a vector, either increasing or
+       decreasing its size.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_vector_set_size(
+        IN      cl_vector_t* const      p_vector,
+        IN      const size_t            size );
+
+

PARAMETERS

+
       p_vector
+               [in] Pointer to a cl_vector_t structure whose size to set.
+
+       size
+               [in] Number of elements desired in the vector.
+
+ RETURN VALUES
+       CL_SUCCESS if the size of the vector was set successfully.
+
+       CL_INSUFFICIENT_MEMORY if there was not enough memory to complete the
+       operation. The vector is left unchanged.
+
+

NOTES

+
       cl_vector_set_size sets the vector to the specified size. If size is
+       smaller than the current size of the vector, the size is reduced.
+       The destructor function, if any, will be invoked for all elements that
+       are above size. Likewise, the constructor and initializer, if any, will
+       be invoked for all new elements.
+
+       This function can only fail if size is larger than the current capacity.
+
+

SEE ALSO

+
       Vector, cl_vector_get_size, cl_vector_set_min_size,
+       cl_vector_set_capacity
+
+
+
+ +

[Structures] +Component Library: Vector/cl_vector_t

+ +

[top][index]

+

NAME

+
       cl_vector_t
+
+

DESCRIPTION

+
       Vector structure.
+
+       The cl_vector_t structure should be treated as opaque and should be
+       manipulated only through the provided functions.
+
+

SYNOPSIS

+
typedef struct _cl_vector
+{
+        size_t                          size;
+        size_t                          grow_size;
+        size_t                          capacity;
+        size_t                          element_size;
+        cl_pfn_vec_init_t       pfn_init;
+        cl_pfn_vec_dtor_t       pfn_dtor;
+        cl_pfn_vec_copy_t       pfn_copy;
+        const void                      *context;
+        cl_qlist_t                      alloc_list;
+        void                            **p_ptr_array;
+        cl_state_t                      state;
+
+} cl_vector_t;
+
+

FIELDS

+
       size
+                Number of elements successfully initialized in the vector.
+
+       grow_size
+                Number of elements to allocate when growing.
+
+       capacity
+                total # of elements allocated.
+
+       element_size
+                Size of each element.
+
+       pfn_init
+                User supplied element initializer.
+
+       pfn_dtor
+                User supplied element destructor.
+
+       pfn_copy
+                Copy operator.
+
+       context
+                User context for callbacks.
+
+       alloc_list
+                List of allocations.
+
+       p_ptr_array
+                Internal array of pointers to elements.
+
+       state
+               State of the vector.
+
+

SEE ALSO

+
       Vector
+
+
+ + diff --git a/branches/Ndi/docs/complib/cl_waitobj_h.html b/branches/Ndi/docs/complib/cl_waitobj_h.html new file mode 100644 index 00000000..d09adb96 --- /dev/null +++ b/branches/Ndi/docs/complib/cl_waitobj_h.html @@ -0,0 +1,356 @@ + + + + +./inc_docs/complib/cl_waitobj_h.html + + + + +Generated from ./inc/complib/cl_waitobj.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Wait Object

+ +

[top][parent][index]

+

NAME

+
       Wait Object
+
+

DESCRIPTION

+
       The Wait Object provides the capability for a user mode process to
+       create and manipulate a wait object that can also be manipulated from
+       kernel mode.
+
+

SEE ALSO

+
       Structures:
+               cl_waitobj_handle_t
+
+       User Mode Initialization/Destruction:
+               cl_waitobj_create
+               cl_waitobj_destroy
+
+       Kernel Mode Access:
+               cl_waitobj_ref
+               cl_waitobj_deref
+
+       Manipulation:
+               cl_waitobj_signal
+               cl_waitobj_reset
+               cl_waitobj_wait_on
+
+
+
+ +

[Functions] +Component Library: Wait Object/cl_waitobj_create

+ +

[top][index]

+

NAME

+
       cl_waitobj_create
+
+

DESCRIPTION

+
       The cl_waitobj_create function creates a wait object.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_waitobj_create(
+        IN      const boolean_t                         manual_reset, 
+        OUT     cl_waitobj_handle_t* const      ph_wait_obj );
+
+

PARAMETERS

+
       manual_reset
+               [in] If FALSE, indicates that the event resets itself after releasing 
+               a single waiter.  If TRUE, the event remains in the signalled state 
+               until explicitly reset by a call to cl_event_reset.
+
+       ph_wait_obj
+               [out] Pointer to a wait object handle set upon successful creation.
+
+ RETURN VALUES
+       CL_SUCCESS if the wait object was created successfully.
+
+       CL_ERROR if the wait object creation failed.
+
+

NOTES

+
       This function is only available in user mode.
+
+

SEE ALSO

+
       Wait Object, cl_waitobj_handle_t, cl_waitobj_destroy, 
+       cl_waitobj_signal, cl_waitobj_reset, cl_waitobj_wait_on
+
+
+
+ +

[Functions] +Component Library: Wait Object/cl_waitobj_deref

+ +

[top][index]

+

NAME

+
       cl_waitobj_deref
+
+

DESCRIPTION

+
       The cl_waitobj_deref function release a reference on a kernel mode 
+       wait object handle and allows the wait object to be destroyed.
+                                                               
+
+

SYNOPSIS

+
CL_EXPORT void CL_API
+cl_waitobj_deref(
+        IN      cl_waitobj_handle_t     h_kernel_wait_obj );
+
+

PARAMETERS

+
       h_kernel_wait_obj
+               [in] A wait object handle returned by a previous call to cl_waitobj_ref. 
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       This function is only available in kernel mode.
+
+

SEE ALSO

+
       Wait Object, cl_waitobj_handle_t, cl_waitobj_ref, 
+       cl_waitobj_signal, cl_waitobj_reset, cl_waitobj_wait_on
+
+
+
+ +

[Functions] +Component Library: Wait Object/cl_waitobj_destroy

+ +

[top][index]

+

NAME

+
       cl_waitobj_destroy
+
+

DESCRIPTION

+
       The cl_waitobj_destroy function destroys a wait object.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_waitobj_destroy(
+        IN      cl_waitobj_handle_t     h_wait_obj );
+
+

PARAMETERS

+
       h_wait_obj
+               [in] A handle to the wait object to destroy, obtained by a pervious
+               call to cl_waitobj_create.
+
+ RETURN VALUES
+       CL_SUCCESS if the wait object handle is destroyed.
+
+       CL_INVALID_PARAMETER if the wait object handle is invalid.
+
+

NOTES

+
       This function is only available in user mode.
+
+

SEE ALSO

+
       Wait Object, cl_waitobj_handle_t, cl_waitobj_create, 
+       cl_waitobj_signal, cl_waitobj_reset, cl_waitobj_wait_on
+
+
+
+ +

[Definitions] +Component Library: Wait Object/cl_waitobj_handle_t

+ +

[top][index]

+

NAME

+
       cl_waitobj_handle_t
+
+

DESCRIPTION

+
       Defines the handle for an OS wait object.
+
+

NOTES

+
       The wait object handle should be treated as opaque and is defined
+       differently depending on the target environment.
+
+

SEE ALSO

+
       Wait Object, cl_waitobj_create, cl_waitobj_destroy,
+       cl_waitobj_ref, cl_waitobj_deref, cl_waitobj_signal,
+       cl_waitobj_reset, cl_waitobj_wait_on
+
+
+
+ +

[Functions] +Component Library: Wait Object/cl_waitobj_ref

+ +

[top][index]

+

NAME

+
       cl_waitobj_ref
+
+

DESCRIPTION

+
       The cl_waitobj_ref function validates a user mode wait object handle 
+       and returns a kernel mode wait object handle.  A reference is taken
+       on the object to prevent its destruction even if the user mode 
+       application destroys it.
+                                                               
+
+

SYNOPSIS

+
CL_EXPORT cl_waitobj_handle_t CL_API
+cl_waitobj_ref(
+        IN      void                                    *h_user_wait_obj );
+
+

PARAMETERS

+
       h_user_wait_obj
+               [in] A wait object handle passed from user mode. 
+
+ RETURN VALUES
+       Returns a kernel wait object handle upon success.  The returned handle 
+       should only be used as parameters to kernel mode calls.
+
+       Returns NULL in case of failure.
+
+

NOTES

+
       This function is only available in kernel mode.
+
+

SEE ALSO

+
       Wait Object, cl_waitobj_handle_t, cl_waitobj_deref,
+       cl_waitobj_signal, cl_waitobj_reset, cl_waitobj_wait_on
+
+
+
+ +

[Functions] +Component Library: Wait Object/cl_waitobj_reset

+ +

[top][index]

+

NAME

+
       cl_waitobj_reset
+
+

DESCRIPTION

+
       The cl_waitobj_reset function sets an wait object to the non-signalled state.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_waitobj_reset(
+        IN      cl_waitobj_handle_t     h_wait_obj );
+
+

PARAMETERS

+
       h_wait_obj
+               [in] A handle to the wait object that needs to reset.
+
+ RETURN VALUES
+       CL_SUCCESS if the wait object was successfully reset.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       In kernel mode, a pointer to a cl_event_t can safely be used instead of
+       a wait object handle.
+
+

SEE ALSO

+
       Wait Object, cl_waitobj_create, cl_waitobj_destroy,
+       cl_waitobj_ref, cl_waitobj_deref,
+       cl_waitobj_signal, cl_waitobj_wait_on
+
+
+
+ +

[Functions] +Component Library: Wait Object/cl_waitobj_signal

+ +

[top][index]

+

NAME

+
       cl_waitobj_signal
+
+

DESCRIPTION

+
       The cl_waitobj_signal function sets a wait object to the signalled 
+       state and releases one or more waiting threads.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_waitobj_signal(
+        IN      cl_waitobj_handle_t     h_wait_obj );
+
+

PARAMETERS

+
       h_wait_obj
+               [in] A handle to the wait object that needs to be signaled.
+ 
+ RETURN VALUES
+       CL_SUCCESS if the event was successfully signalled.
+
+       CL_ERROR otherwise.
+
+

NOTES

+
       For auto-reset wait objects, the wait object is reset automatically once 
+       a wait operation is satisfied. 
+
+       Triggering the wait object multiple times does not guarantee that the same 
+       number of wait operations are satisfied. This is because wait objects are 
+       either in a signalled on non-signalled state, and triggering a wait object 
+       that is already in the signalled state has no effect.
+
+       In kernel mode, a pointer to a cl_event_t can safely be used instead of
+       a wait object handle.
+
+

SEE ALSO

+
       Wait Object, cl_waitobj_create, cl_waitobj_destroy,
+       cl_waitobj_ref, cl_waitobj_deref,
+       cl_waitobj_reset, cl_waitobj_wait_on
+
+
+
+ +

[Functions] +Component Library: Wait Object/cl_waitobj_wait_on

+ +

[top][index]

+

NAME

+
       cl_waitobj_wait_on
+
+

DESCRIPTION

+
       The cl_waitobj_wait_on function waits for the specified wait object to be 
+       triggered for a minimum amount of time.
+
+

SYNOPSIS

+
CL_EXPORT cl_status_t CL_API
+cl_waitobj_wait_on(
+        IN      cl_waitobj_handle_t             h_wait_obj,
+        IN      const uint32_t                  wait_us,
+        IN      const boolean_t                 interruptible );
+
+

PARAMETERS

+
       h_wait_obj
+               [in] A handle to the wait object on which to wait.
+
+       wait_us 
+               [in] Number of microseconds to wait.
+
+       interruptible
+               [in] Indicates whether the wait operation can be interrupted
+               by external signals.
+
+ RETURN VALUES
+       CL_SUCCESS if the wait operation succeeded in response to the wait object 
+       being set.
+
+       CL_TIMEOUT if the specified time period elapses.
+
+       CL_NOT_DONE if the wait was interrupted by an external signal.
+
+       CL_ERROR if the wait operation failed.
+
+

NOTES

+
       If wait_us is set to EVENT_NO_TIMEOUT, the function will wait until the 
+       wait object is triggered and never timeout.
+
+       If the timeout value is zero, this function simply tests the state of 
+       the wait object.
+
+       If the wait object is already in the signalled state at the time of the call
+       to cl_waitobj_wait_on, the call completes immediately with CL_SUCCESS.
+
+       In kernel mode, a pointer to a cl_event_t can safely be used instead of
+       a wait object handle.
+
+

SEE ALSO

+
       Wait Object, cl_waitobj_create, cl_waitobj_destroy,
+       cl_waitobj_ref, cl_waitobj_deref, 
+       cl_waitobj_signal, cl_waitobj_reset
+
+
+ + diff --git a/branches/Ndi/docs/complib/comp_lib_h.html b/branches/Ndi/docs/complib/comp_lib_h.html new file mode 100644 index 00000000..36e5f2d8 --- /dev/null +++ b/branches/Ndi/docs/complib/comp_lib_h.html @@ -0,0 +1,50 @@ + + + + +./inc_docs/complib/comp_lib_h.html + + + + +Generated from ./inc/complib/comp_lib.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +Component Library/Component Library

+ +

[top][index]

+

NAME

+
       component library 
+
+

DESCRIPTION

+
       The component library is a collection of components that can be used to
+       create complex projects quickly and reliably.
+
+       The component library simplifies development by eliminating the need to
+       re-implement existing functionality. This contributes to shorter
+       development cycles as well as smaller code bases, helping reduce the
+       number of bugs by leveraging tried and tested code.
+
+       The component library also provides the same interface in multiple
+       environments, such as kernel mode and user mode, allowing code to be used
+       in both, again reducing code duplication and development life cycles.
+
+       Components of the library all follow the same usage model, as follows:
+               - The constructor for all components should be called before any other
+               function for that component.  It is acceptable to call the initializer
+               without first calling the constructor.
+
+               - The initializer for all components must be called successfully
+               before any function manipulating that component is called.
+
+               - The destructor for all components must be called if the initializer
+               was called.
+
+       In a debug build, the components assert that the proper sequence is
+       followed.
+
+
+ + diff --git a/branches/Ndi/docs/iba/ib_al_h.html b/branches/Ndi/docs/iba/ib_al_h.html new file mode 100644 index 00000000..dc82f88b --- /dev/null +++ b/branches/Ndi/docs/iba/ib_al_h.html @@ -0,0 +1,10482 @@ + + + + +./inc_docs/iba/ib_al_h.html + + + + +Generated from ./inc/iba/ib_al.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:51 +
+
+ +

[Modules] +IB Access Layer API/Access Layer

+ +

[top][index]

+

NAME

+
       InfiniBand Access Layer
+
+

COPYRIGHT

+
       Copyright (c) 2003 Intel Corporation - All Rights Reserved.
+
+

DESCRIPTION

+
       The access layer provides transport level access to an InfiniBand fabric.
+       It supplies a foundation upon which a channel driver may be built.  The
+       access layer exposes the capabilities of the InfiniBand architecture and
+       adds support for higher-level functionality required by most users of an
+       InfiniBand fabric.  Users define the protocols and policies used by the
+       access layer, and the access layer implements them under the direction
+       of a user.
+
+
+
+ +

[Definitions] +Access Layer/ATS

+ +

[top][parent][index]

+

NAME

+
       DAPL Address Translation Service
+
+

DESCRIPTION

+
       ATS service ID, service name, and IPv4 offset for DAPL-compliant
+       ATS service records.
+
+
+
+ +

[Functions] +Access Layer/ib_add_svc_entry

+ +

[top][parent][index]

+

NAME

+
       ib_add_svc_entry
+
+

DESCRIPTION

+
       Adds a new service entry to an existing I/O controller.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_add_svc_entry(
+        IN              const   ib_ioc_handle_t                         h_ioc,
+        IN              const   ib_svc_entry_t* const           p_svc_entry,
+                OUT                     ib_svc_handle_t* const          ph_svc );
+
+

PARAMETERS

+
       h_ioc
+               [in] A handle to an existing I/O controller that will support the
+               added service.
+
+       p_svc_entry
+               [in] Service entry information that will be reported as part of the
+               controller's service profile.
+
+       ph_svc
+               [out] Upon successful completion of this call, this references a handle
+               to the added service.  This handle may be used to remove the service
+               entry.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The service entry was successfully added.
+
+       IB_INVALID_HANDLE
+               The I/O controller handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the service entry information or handle was not
+               provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register the service entry.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the I/O
+               controller to register the service entry.
+
+

NOTES

+
       This routine adds a new service to an I/O controller.  Once added, the
+       service will be reported with the controller profile, provided that the
+       controller is registered with the local device manager.
+
+

SEE ALSO

+
       ib_create_ioc, ib_remove_svc_entry, ib_reg_ioc, ib_svc_entry_t
+
+
+
+ +

[Definitions] +Access Layer/ib_al_flags_t

+ +

[top][parent][index]

+

NAME

+
       ib_al_flags_t
+
+

DESCRIPTION

+
       Access layer flags used to direct the operation of various calls.
+
+

SYNOPSIS

+
typedef uint32_t                                                        ib_al_flags_t;
+#define IB_FLAGS_SYNC                                           0x00000001
+
+

VALUES

+
       IB_FLAGS_SYNC
+               Indicates that the given operation should be performed synchronously.
+               The call will block until it completes.  Callbacks will still be
+               invoked.
+
+

SEE ALSO

+
       ib_cm_req_t, ib_cm_rep_t, ib_cm_dreq_t, ib_cm_lap_t,
+       ib_reg_svc_req_t, ib_mcast_req_t, ib_query_req_t, ib_sub_req_t
+
+
+
+ +

[Functions] +Access Layer/ib_alloc_pd

+ +

[top][parent][index]

+

NAME

+
       ib_alloc_pd
+
+

DESCRIPTION

+
       Allocates a protection domain on the specified channel adapter.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_alloc_pd(
+        IN              const   ib_ca_handle_t                          h_ca,
+        IN              const   ib_pd_type_t                            pd_type,
+        IN              const   void* const                                     pd_context,
+                OUT                     ib_pd_handle_t* const           ph_pd );
+
+

PARAMETERS

+
       h_ca
+               [in] A handle to an opened channel adapter.
+
+       pd_type
+               [in] Indicates the type of protection domain being created.
+
+       pd_context
+               [in] A client-specified context to associate with this allocated
+               protection domain.  This context is returned to the user when
+               invoking asynchronous callbacks referencing this protection domain.
+
+       ph_pd
+               [out] Upon successful completion of this call, this references a
+               handle to the allocated protection domain.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The operation was successful.
+
+       IB_INVALID_CA_HANDLE
+               The channel adapter handle was invalid.
+
+       IB_INVALID_PARAMETER
+               The supplied pd_type value is invalid or a reference to the protection
+               domain handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to allocate the protection domain.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to create the protection domain.
+
+

NOTES

+
       When successful, this routine returns a handle to a newly allocated
+       protection domain.
+
+

SEE ALSO

+
       ib_dealloc_pd, ib_pd_type_t
+
+
+
+ +

[Structures] +Access Layer/ib_apr_info_t

+ +

[top][parent][index]

+

NAME

+
       ib_apr_info_t
+
+

DESCRIPTION

+
       Infiniband-defined additional rejection information.
+
+

SYNOPSIS

+
typedef struct _ib_apr_info
+{
+        uint8_t                                         data[IB_APR_INFO_SIZE];
+
+}       ib_apr_info_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Structures] +Access Layer/ib_apr_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_apr_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of an alternate path response.
+
+

SYNOPSIS

+
typedef union _ib_apr_pdata
+{
+        uint8_t                                         data[IB_APR_PDATA_SIZE];
+
+}       ib_apr_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Structures] +Access Layer/ib_ari_t

+ +

[top][parent][index]

+

NAME

+
       ib_ari_t
+
+

DESCRIPTION

+
       Infiniband-defined additional rejection information.
+
+

SYNOPSIS

+
typedef struct _ib_ari
+{
+        uint8_t                                         data[IB_ARI_SIZE];
+
+}       ib_ari_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Structures] +Access Layer/ib_async_event_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_async_event_rec_t
+
+

DESCRIPTION

+
       Information returned when an asynchronous event occurs on an allocated
+       resource.
+
+

SYNOPSIS

+
typedef struct _ib_async_event_rec
+{
+        ib_async_event_t                                                        code;
+        uint64_t                                                                        vendor_specific;
+
+        void* __ptr64                                                           context;
+        union _handle_t
+        {
+                ib_ca_handle_t                                                  h_ca;
+                ib_cq_handle_t                                                  h_cq;
+                ib_qp_handle_t                                                  h_qp;
+                ib_srq_handle_t                                                 h_srq;
+
+        } handle;
+
+}       ib_async_event_rec_t;
+
+

FIELDS

+
       code
+               A code that identifies the type of event being reported.
+
+       vendor_specific
+               A field containing optional vendor specific information.
+
+       context
+               User-defined context information associated with the resource on
+               which the error occurred.
+
+       handle
+               A handle to the resource for which this event record was generated.
+               This handle will match the handle returned during the creation of
+               resource.  It is provided in case an event occurs before a client's
+               call to create a resource can return.
+
+

SEE ALSO

+
       ib_async_event_t, ib_pfn_event_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_bind_mw

+ +

[top][parent][index]

+

NAME

+
       ib_bind_mw
+
+

DESCRIPTION

+
       Binds a memory window to a registered memory region.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_bind_mw(
+        IN              const   ib_mw_handle_t                          h_mw,
+        IN              const   ib_qp_handle_t                          h_qp,
+        IN                              ib_bind_wr_t* const                     p_mw_bind,
+                OUT                     net32_t* const                          p_rkey );
+
+

PARAMETERS

+
       h_mw
+               [in] A handle to an existing memory window.
+
+       h_qp
+               [in] A handle to a queue pair that the bind request will be posted to.
+
+       p_mw_bind
+               [in] Describes the memory window bind request.
+
+       p_rkey
+               [out] The new rkey for the memory window that may be used by a remote
+               end-point when performing RDMA or atomic operations to this memory
+               region.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory window bind operation was successfully posted.
+
+       IB_INVALID_MW_HANDLE
+               The memory window handle was invalid.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the memory window bind work request or rkey was not
+               provided.
+
+       IB_INVALID_SERVICE_TYPE
+               The queue pair configuration does not support this type of service.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_INVALID_RKEY
+               The rkey is invalid for the memory region being bound.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to bind the memory window.
+
+

NOTES

+
       This routine posts a request to bind a memory window to a registered
+       memory region.  The bind operation occurs on the specified queue pair,
+       but the bound region is usable across all queue pairs within the same
+       protection domain.
+
+

SEE ALSO

+
       ib_create_mw, ib_bind_wr_t
+
+
+
+ +

[Functions] +Access Layer/ib_cancel_mad

+ +

[top][parent][index]

+

NAME

+
       ib_cancel_mad
+
+

DESCRIPTION

+
       This routine cancels a pending send transaction to a MAD service.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cancel_mad(
+        IN              const   ib_mad_svc_handle_t                     h_mad_svc,
+        IN                              ib_mad_element_t* const         p_mad_element );
+
+

PARAMETERS

+
       h_mad_svc
+               [in] The MAD service to which the send operation was directed.
+
+       p_mad_element
+               [in] A handle to a sent MAD element.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The requested MAD transaction was located and canceled.
+
+       IB_INVALID_PARAMETER
+               A reference to the MAD element list was not provided.
+
+       IB_NOT_FOUND
+               The requested transaction was not located or had already completed.
+
+

NOTES

+
       This routine cancels a pending send transaction to a MAD service.  If
+       the request is successfully located and has not yet completed, it will
+       be completed with its status set to IB_CANCELED.  The canceled operation
+       will be returned to the user through the normal MAD completion callback.
+       If the send transaction has already completed, this call will return
+       IB_NOT_FOUND.
+
+

SEE ALSO

+
       ib_send_mad
+
+
+
+ +

[Functions] +Access Layer/ib_cancel_query

+ +

[top][parent][index]

+

NAME

+
       ib_cancel_query
+
+

DESCRIPTION

+
       Routine used to cancel a query of the subnet administrator.
+
+

SYNOPSIS

+
AL_EXPORT void AL_API
+ib_cancel_query(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   ib_query_handle_t                       h_query );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an open instance of the access layer.
+
+       h_query
+               [in] Query handle returned by a previous call to ib_query().
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
       This routine directs the access layer to cancel a query to the subnet
+       administrator.  The access layer will issue notify the user with the
+       final status of the query through the query callback specified in the
+       call to ib_query().
+
+

SEE ALSO

+
       ib_query
+
+
+
+ +

[Structures] +Access Layer/ib_cep_listen_t

+ +

[top][parent][index]

+

NAME

+
       ib_cep_listen_t
+
+

DESCRIPTION

+
       Request to listen for incoming connection attempts.
+
+

SYNOPSIS

+
typedef struct _ib_cep_listen
+{
+        net64_t                                         svc_id;
+
+        net64_t                                         port_guid;
+
+        uint8_t* __ptr64                        p_cmp_buf;
+        uint8_t                                         cmp_len;
+        uint8_t                                         cmp_offset;
+
+}       ib_cep_listen_t;
+
+

FIELDS

+
       svc_id
+               The identifier of the service to register for incoming connection
+               requests.
+
+       port_guid
+               Directs the communication manager to register the listen only
+               with the specified port.  This should be set to IB_ALL_PORTS
+               if the listen is not directed to a particular port.
+
+       p_cmp_buf
+               An optionally provided buffer that will be used to match incoming
+               connection requests with a registered service.  Use of this buffer
+               permits multiple services to listen on the same service ID as long as
+               they provide different compare buffers.  Incoming requests will
+               be matched against the compare buffer.
+
+       cmp_len
+               Specifies the size of the compare buffer in bytes.  The length must
+               be the same for all requests using the same service ID.
+
+       cmp_offset
+               An offset into the user-defined data area of a connection request
+               which contains the start of the data that will be compared against.
+               The offset must be the same for all requests using the same service ID.
+
+

NOTES

+
       Users fill out this structure when listening on a service ID with the
+       local communication manager.  The communication manager will use the given
+       service ID and compare buffer to route connection requests to the
+       appropriate client.  Users may direct listens requests on a particular
+       channel adapter, port, or LID.
+
+
+
+ +

[Functions] +Access Layer/ib_ci_call

+ +

[top][parent][index]

+

NAME

+
       ib_ci_call
+
+

DESCRIPTION

+
       Performs a vendor specific CA interface function call.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_ci_call(
+        IN                              ib_ca_handle_t                          h_ca,
+        IN              const   void* __ptr64 *         const   handle_array    OPTIONAL,
+        IN                              uint32_t                                        num_handles,
+        IN                              ib_ci_op_t*                     const   p_ci_op );
+
+

PARAMETERS

+
       h_ca
+               [in] An opened instance of a channel adapter.
+
+       handle_array
+               [in] This parameter references an array containing handles of
+               existing CA resources.  This array should contain all of the
+               handles specified in the vendor specific data provided with this
+               call.  All handles specified through this array are validated by
+               the access layer as existing and belonging to the calling process.
+               The verbs provider driver is responsible for verifying that the
+               number and type of handles are correct for the requested operation.
+
+       num_handles
+               [in] The number of the handles in handle array.  This count is
+               verified by the access layer.
+
+       p_ci_op
+               [in] A reference to the vendor specific CA interface data
+               structure containing the operation parameters.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The operation was successful.
+
+       IB_INVALID_CA_HANDLE
+               The specified CA handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the vendor specific data was not provided.
+
+       IB_INVALID_HANDLE
+               A handle specified in the handle array was invalid.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+       IB_ERROR
+               An error occurred while processing the command.  Additional
+               error information is provided in the p_ci_op status field.
+
+

NOTES

+
       This routine performs a vendor specific CA interface function call.
+       The optional p_ci_op structure provides a means to pass vendor
+       specific parameters and data to the verbs provider driver.  If the
+       vendor specific data contains handles, the client should provide the
+       optional handle array that lists all of the handles specified in the
+       vendor specific data.  The handles in the handle array are restricted
+       to the following types:  ib_pd_handle_t, ib_cq_handle_t,
+       ib_av_handle_t, ib_qp_handle_t, ib_mr_handle_t, or ib_mw_handle_t.
+       The contents of the handle array are verified by the access layer and
+       the verbs provider driver.  This call cannot be used to allocate private
+       handles that are passed as parameters in access layer calls.
+
+

SEE ALSO

+
       ib_open_ca, ib_alloc_pd, ib_create_av, ib_create_cq,
+       ib_create_qp, ib_reg_mr, ib_reg_phys, ib_reg_shared,
+       ib_create_mw, ib_ci_op_t
+
+
+
+ +

[Functions] +Access Layer/ib_close_al

+ +

[top][parent][index]

+

NAME

+
       ib_close_al
+
+

DESCRIPTION

+
       Deregisters a channel driver with the access layer and releases all
+       associated resources, including queue pairs, connection requests,
+       and completion queues.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_close_al(
+        IN              const   ib_al_handle_t                          h_al );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an instance of the access layer.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The access layer was closed successfully.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+

NOTES

+
       This call destroys an existing instance of the access layer.  Since
+       callbacks may be outstanding against the resources managed by this
+       access layer instance when the destroy operation is invoked, this
+       call may block until all outstanding callbacks complete.  This
+       routine may not be called from a callback invoked by the access layer.
+
+

SEE ALSO

+
       ib_open_al
+
+
+
+ +

[Functions] +Access Layer/ib_close_ca

+ +

[top][parent][index]

+

NAME

+
       ib_close_ca
+
+

DESCRIPTION

+
       Closes an opened channel adapter.  Once closed, no further access to this
+       channel adapter is possible.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_close_ca(
+        IN              const   ib_ca_handle_t                          h_ca,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_ca
+               [in] A handle to an opened channel adapter.
+
+       pfn_destroy_cb
+               [in] A user-specified callback that is invoked after the channel
+               adapter has been successfully destroyed.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The close request was registered.
+
+       IB_INVALID_CA_HANDLE
+               The channel adapter handle was invalid.
+
+

NOTES

+
       This call closes the opened channel adapter and frees all associated
+       resources, such as queue pairs, protection domains, and completion
+       queues.  Since callbacks may be outstanding against the channel adapter
+       or one of its resources at the time the close operation is invoked, this
+       call operates asynchronously.  The user will be notified through a callback
+       once the close operation completes, indicating that no additional callbacks
+       will be invoked for the specified channel adapter or a related resource.
+
+

SEE ALSO

+
       ib_open_ca
+
+
+
+ +

[Functions] +Access Layer/ib_cm_apr

+ +

[top][parent][index]

+

NAME

+
       ib_cm_apr
+
+

DESCRIPTION

+
       Responds to a load alternate path request, to accept or reject the
+       proposed alternate path.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_apr(
+        IN              const   ib_cm_handle_t                          h_cm_lap,
+        IN              const   ib_cm_apr_t* const                      p_cm_apr );
+
+

PARAMETERS

+
       h_cm_lap
+               [in] A handle to a load alternate path request corresponding to the
+               response.  This handle is provided through the ib_pfn_cm_lap_cb_t.
+
+       p_cm_apr
+               [in] Information describing the alternate path response.  The response
+               will accept or reject the load request.  If the request is rejected
+               this parameter will reference additional rejection information.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The load alternate path response was sent successfully.
+
+       IB_INVALID_HANDLE
+               The connection manager load alternate path handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the alternate path information was not provided.
+
+       IB_INVALID_STATE
+               The current connection state does not allow sending this message.
+
+       IB_INVALID_SETTING
+               The private data length specified in alternate path information is
+               invalid.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle specified in the alternate path information
+               was invalid.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to send the alternate path response.
+
+

NOTES

+
       This routine responds to a load alternate path request.
+
+

SEE ALSO

+
       ib_cm_lap, ib_cm_apr_t, ib_pfn_cm_lap_cb_t, ib_pfn_cm_apr_cb_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_apr_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_apr_rec_t
+
+

DESCRIPTION

+
       Load alternate path response information returned to the user through
+       a callback.
+
+

SYNOPSIS

+
typedef struct _ib_cm_apr_rec
+{
+        ib_api_status_t                         cm_status;
+        ib_apr_status_t                         apr_status;
+
+        const uint8_t* __ptr64          p_info;
+        uint8_t                                         info_length;
+
+        const uint8_t* __ptr64          p_apr_pdata;
+
+        ib_qp_handle_t                          h_qp;
+        const void* __ptr64                     qp_context;
+
+}       ib_cm_apr_rec_t;
+
+

FIELDS

+
       cm_status
+               The status of the alternate path response.  IB_SUCCESS indicates that
+               the alternate path was loaded successfully.  IB_TIMEOUT indicates that
+               a reply was not received within the specified timeout and retry count.
+               Other error values indicates that the alternate path was not loaded.
+               if the apr_status is IB_AP_SUCCESS, the QP failed to load the path.
+               Other apr_status values indicate that the request was rejected for some
+               reason.
+
+       apr_status
+               The alternate path response status.  This indicates additional failure
+               information to a load alternate path request and is defined by the
+               InfiniBand specification.
+
+       info_length
+               Length of valid data in the APR additional information buffer.
+
+       p_info
+               APR additional information.
+
+       p_apr_pdata
+               A reference to user-defined private data sent as part of the alternate
+               path response.
+
+       h_qp
+               The queue pair handle associated with the alternate path response.
+
+       qp_context
+               The queue pair context associated with the alternate path response.
+
+

SEE ALSO

+
       ib_cm_lap, ib_pfn_cm_apr_cb_t, ib_apr_status_t, ib_apr_info_t
+       ib_apr_pdata_t, ib_qp_type_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_apr_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_apr_t
+
+

DESCRIPTION

+
       Load alternate path information used to configure a queue pair with an
+       alternate path.
+
+

SYNOPSIS

+
typedef struct _ib_cm_apr
+{
+        const uint8_t* __ptr64                  p_apr_pdata;
+        uint8_t                                                 apr_length;
+
+        ib_qp_type_t                                    qp_type;
+
+        /* valid for rc, uc & rd qp_type only */
+        ib_qp_handle_t                                  h_qp;
+
+        ib_apr_status_t                                 apr_status;
+        uint8_t                                                 info_length;
+        const ib_apr_info_t* __ptr64    p_info;
+
+}       ib_cm_apr_t;
+
+

FIELDS

+
       p_apr_pdata
+               Optional user-defined private data sent as part of the alternate
+               path response message.
+
+       apr_length
+               Defines the size of the user-defined private data.
+
+       qp_type
+               Indicates the CM service type.
+
+       h_qp
+               A handle to the queue pair that should receive the alternate path.
+
+       apr_status
+               The alternate path response status.  This indicates additional failure
+               information to a load alternate path request and is defined by the
+               Infiniband specification.
+
+       info_length
+               Length of valid data in the APR additional information buffer.
+
+       p_info
+               APR additional information.
+
+

SEE ALSO

+
       ib_cm_apr, ib_pfn_cm_apr_cb_t, ib_lap_pdata_t, ib_qp_type_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_cancel

+ +

[top][parent][index]

+

NAME

+
       ib_cm_cancel
+
+

DESCRIPTION

+
       Routine used to cancel listening for connection requests.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_cancel(
+        IN              const   ib_listen_handle_t                      h_cm_listen,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_cm_listen
+               [in] A handle to an existing listen request.
+
+       pfn_destroy_cb
+               [in] A user-specified callback that is invoked after the listen
+               request has been successfully canceled.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The cancel listen operation was initiated.
+
+       IB_INVALID_HANDLE
+               The connection manager handle was invalid.
+
+

NOTES

+
       This routine cancels a listen request.  To avoid a race condition
+       canceling a request at the same time a connection callback is in
+       progress, the cancel operation operates asynchronously.  For
+       additional details see ib_pfn_destroy_cb_t.
+
+

SEE ALSO

+
       ib_cm_listen, ib_pfn_destroy_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_drep

+ +

[top][parent][index]

+

NAME

+
       ib_cm_drep
+
+

DESCRIPTION

+
       This routine replies to a disconnection request and disconnects
+       a queue pair or end-to-end context.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_drep(
+        IN              const   ib_cm_handle_t                          h_cm_dreq,
+        IN              const   ib_cm_drep_t* const                     p_cm_drep );
+
+

PARAMETERS

+
       h_cm_dreq
+               [in] A handle to a disconnection request being replied to.  This
+               handle is provided through the ib_pfn_cm_dreq_cb_t callback.
+
+       p_cm_drep
+               [in] Reply information used to respond to the disconnection request.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The disconnect request was sent successfully.
+
+       IB_INVALID_HANDLE
+               The connection manager disconnect request handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the disconnect repy information was not provided.
+
+       IB_INVALID_STATE
+               The current connection state does not allow sending this message.
+
+       IB_INVALID_SETTING
+               The private data length specified in disconnect reply information is
+               invalid.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to send the disconnect reply.
+
+

NOTES

+
       This function will disconnect a queue pair or end-to-end context.  It
+       results in sending a disconnection reply message to the remote end-point.
+       After calling this routine, data transfers on the specified queue pair or
+       end-to-end context will fail.
+
+

SEE ALSO

+
       ib_cm_dreq, ib_pfn_cm_dreq_cb_t, ib_cm_drep_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_drep_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_drep_rec_t
+
+

DESCRIPTION

+
       Disconnection reply information returned to the user through their
+       disconnect reply callback.
+
+

SYNOPSIS

+
typedef struct _ib_cm_drep_rec
+{
+        ib_api_status_t                         cm_status;
+
+        const uint8_t* __ptr64          p_drep_pdata;
+
+        ib_qp_handle_t                          h_qp;
+        const void* __ptr64                     qp_context;
+
+}       ib_cm_drep_rec_t;
+
+

FIELDS

+
       cm_status
+               The status of the disconnect request.  Valid values are IB_SUCCESS
+               and IB_TIMEOUT.  IB_TIMEOUT indicates that a reply was not received
+               within the specified timeout and retry count.
+
+       p_drep_pdata
+               A reference to user-defined private data sent as part of the
+               disconnect reply.
+
+       h_qp
+               The queue pair handle associated with the disconnect reply.
+
+       qp_context
+               The queue pair context associated with the disconnect reply.
+
+

SEE ALSO

+
       ib_cm_drep, ib_pfn_cm_drep_cb_t, ib_drep_pdata_t, ib_qp_type_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_drep_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_drep_t
+
+

DESCRIPTION

+
       Disconnection reply information used when tearing down a connection.
+
+

SYNOPSIS

+
typedef struct _ib_cm_drep
+{
+        uint8_t* __ptr64                        p_drep_pdata;
+        uint8_t                                         drep_length;
+
+}       ib_cm_drep_t;
+
+

FIELDS

+
       p_drep_pdata
+               A reference to user-defined private data sent as part of the
+               disconnection reply.
+
+       drep_length
+               Defines the size of the user-defined private data.
+
+

SEE ALSO

+
       ib_cm_drep, ib_drep_pdata_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_dreq

+ +

[top][parent][index]

+

NAME

+
       ib_cm_dreq
+
+

DESCRIPTION

+
       This routine disconnects a queue pair or end-to-end context.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_dreq(
+        IN              const   ib_cm_dreq_t* const                     p_cm_dreq );
+
+

PARAMETERS

+
       p_cm_dreq
+               [in] Information that describes the connection being disconnected.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The disconnect request was sent successfully.
+
+       IB_INVALID_PARAMETER
+               A reference to the disconnect request information was not provided.
+
+       IB_INVALID_STATE
+               The current connection state does not allow sending this message.
+
+       IB_INVALID_SETTING
+               The private data length specified in disconnect request information is
+               invalid.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle specified in the disconnect request information
+               was invalid.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to send the disconnect request.
+
+

NOTES

+
       This function will disconnect a queue pair or end-to-end context.
+       It results in sending a disconnection request message to the remote
+       end-point.  After calling this routine, data transfers on the specified
+       queue pair or end-to-end context will fail.
+
+

SEE ALSO

+
       ib_cm_drep, ib_pfn_cm_dreq_cb_t, ib_cm_dreq_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_dreq_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_dreq_rec_t
+
+

DESCRIPTION

+
       Disconnection request information returned to the user through their
+       disconnection callback.
+
+

SYNOPSIS

+
typedef struct _ib_cm_dreq_rec
+{
+        ib_cm_handle_t                          h_cm_dreq;
+
+        const uint8_t* __ptr64          p_dreq_pdata;
+
+        const void* __ptr64                     qp_context;
+
+}       ib_cm_dreq_rec_t;
+
+

FIELDS

+
       h_cm_dreq
+               A handle to the disconnection request.  This handle is used to reply
+               to the disconnection request.
+
+       p_dreq_pdata
+               A reference to user-defined private data sent as part of the
+               disconnect request.
+
+       qp_context
+               The queue pair context associated with the disconnect request.
+
+

SEE ALSO

+
       ib_cm_dreq, ib_pfn_cm_dreq_cb_t, ib_dreq_pdata_t, ib_qp_type_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_dreq_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_dreq_t
+
+

DESCRIPTION

+
       Disconnection request information used to tear down a connection.
+
+

SYNOPSIS

+
typedef struct _ib_cm_dreq
+{
+        ib_al_flags_t                           flags;
+
+        uint8_t* __ptr64                        p_dreq_pdata;
+        uint8_t                                         dreq_length;
+
+        ib_qp_type_t                            qp_type;
+
+        /* valid for rc, uc & rd qp_type only */
+        ib_qp_handle_t                          h_qp;
+        ib_pfn_cm_drep_cb_t                     pfn_cm_drep_cb;
+
+}       ib_cm_dreq_t;
+
+

FIELDS

+
       flags
+               Used to describe the mode of operation.  Set to IB_FLAGS_SYNC to
+               process the called routine synchronously.
+
+       p_dreq_pdata
+               A reference to user-defined private data sent as part of the
+               disconnection request.
+
+       dreq_length
+               Defines the size of the user-defined private data.
+
+       qp_type
+               Indicates the CM service type.
+
+       h_qp
+               A handle to the queue pair to disconnect.
+
+       pfn_cm_drep_cb
+               References a user-defined callback that will be invoked when
+               the reply to the disconnect is received.
+
+

NOTES

+
       Users submit this structure to disconnect a queue pair or end-to-end
+       context.  A single disconnect call disconnects either a queue pair or
+       an end-to-end context, but not both.
+
+

SEE ALSO

+
       ib_cm_dreq, ib_cm_drep, ib_dreq_pdata_t, ib_al_flags_t,
+       ib_qp_type_t
+
+
+
+ +

[Definitions] +Access Layer/ib_cm_failover_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_failover_t
+
+

DESCRIPTION

+
       Fail over acceptance status returned as part of a connection reply.
+
+

SYNOPSIS

+
typedef uint8_t                                                         ib_cm_failover_t;
+#define IB_FAILOVER_ACCEPT_SUCCESS                      0
+#define IB_FAILOVER_ACCEPT_UNSUPPORTED          1
+#define IB_FAILOVER_ACCEPT_ERROR                        2
+
+

NOTES

+
       These values and their use are defined the Infiniband specification.
+
+

SEE ALSO

+
       ib_cm_rep, ib_cm_rep_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_handoff

+ +

[top][parent][index]

+

NAME

+
       ib_cm_handoff
+
+

DESCRIPTION

+
       Hands off the received REQ information to svc_id.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_handoff(
+        IN              const   ib_cm_handle_t                          h_cm_req,
+        IN              const   ib_net64_t                                      svc_id );
+
+

PARAMETERS

+
       h_cm_req
+               [in] A handle to the connection request being handed off.
+               This is the h_cm_req handle provided through the ib_pfn_cm_req_cb_t
+               callback.
+
+       svc_id
+               [in] The service id to which this connection request is handed off.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The handoff was initiated.
+
+       IB_INVALID_HANDLE
+               The connection manager handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A valid service id was not provided.
+
+       IB_INVALID_STATE
+               The current connection state does not allow this transfer.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to complete the request.
+
+

NOTES

+
       This routine results in the access layer handing off the connection
+       to the service id as a new incoming connection.
+
+

SEE ALSO

+
       ib_pfn_cm_req_cb_t, ib_cm_rej_t, ib_cm_listen
+
+
+
+ +

[Functions] +Access Layer/ib_cm_lap

+ +

[top][parent][index]

+

NAME

+
       ib_cm_lap
+
+

DESCRIPTION

+
       Issues a load alternate path request to a specified end-point.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_lap(
+        IN              const   ib_cm_lap_t* const                      p_cm_lap );
+
+

PARAMETERS

+
       p_cm_lap
+               [in] Information describing the alternate path to load and the remote
+               endpoint for the connection.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The load alternate path request was sent successfully.
+
+       IB_INVALID_PARAMETER
+               A reference to the load alternate path information was not provided.
+
+       IB_UNSUPPORTED
+               The passive side of the connection attempted to load an alternate path.
+
+       IB_INVALID_STATE
+               The current connection state does not allow sending this message.
+
+       IB_INVALID_SETTING
+               The load alternate path information contains one or more of the
+               following errors:
+                 - The class version, queue pair type, or path is not supported by
+                       connection manager.
+                 - The primary path is not on the same channel adapter as the queue
+                       pair.
+                 - The primary and alternate paths are on different channel adapters.
+                 - The primary and alternate paths specify different MTUs.
+                 - The alternate path record packet lifetime is out of range.
+                 - The alternate path record pkey is out of range.
+                 - The specified private data length is invalid.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle specified in the load alternate path information
+               was invalid.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to send the load alternate path request.
+
+

NOTES

+
       This routine issues initiates loading an alternate path on an existing
+       connected queue pair or end-to-end context.  If the request is successful,
+       the alternate path will be loaded and armed for path migration.
+
+       The p_cm_lap parameter describes the alternate path to load and indicates
+       the remote endpoint of an existing connection that will receive the load
+       request.
+
+

SEE ALSO

+
       ib_cm_apr, ib_cm_lap_t, ib_pfn_cm_lap_cb_t, ib_pfn_cm_apr_cb_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_lap_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_lap_rec_t
+
+

DESCRIPTION

+
       Load alternate path request information returned to the user through
+       a callback.
+
+

SYNOPSIS

+
typedef struct _ib_cm_lap_rec
+{
+        ib_cm_handle_t                          h_cm_lap;
+        ib_path_rec_t                           alt_path;
+
+        const uint8_t* __ptr64          p_lap_pdata;
+
+        const void* __ptr64                     qp_context;
+
+}       ib_cm_lap_rec_t;
+
+

FIELDS

+
       p_lap_pdata
+               A reference to user-defined private data sent as part of the load
+               alternate path request.
+
+       qp_context
+               The queue pair context associated with a connection request.
+
+       h_cm_lap
+               A handle to the load alternate path request.  This handle is used
+               to reply to the load request.
+
+       alt_path
+               Requested alternate path.  Users must accept or reject the path by
+               calling ib_cm_apr.
+
+

SEE ALSO

+
       ib_cm_lap, ib_pfn_cm_lap_cb_t, ib_lap_pdata_t, ib_qp_type_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_lap_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_lap_t
+
+

DESCRIPTION

+
       Load alternate path information used to configure a queue pair with an
+       alternate path.
+
+

SYNOPSIS

+
typedef struct _ib_cm_lap
+{
+        ib_al_flags_t                           flags;
+
+        const uint8_t* __ptr64          p_lap_pdata;
+        uint8_t                                         lap_length;
+
+        ib_qp_type_t                            qp_type;
+
+        /* valid for rc, uc & rd qp_type only */
+        ib_qp_handle_t                          h_qp;
+
+        uint8_t                                         remote_resp_timeout;
+        ib_path_rec_t* __ptr64          p_alt_path;
+        ib_pfn_cm_apr_cb_t                      pfn_cm_apr_cb;
+
+}       ib_cm_lap_t;
+
+

FIELDS

+
       flags
+               Used to describe the mode of operation.  Set to IB_FLAGS_SYNC to
+               process the called routine synchronously.
+
+       p_lap_pdata
+               Optional user-defined private data sent as part of the load alternate
+               path message.
+
+       lap_length
+               Defines the size of the user-defined private data.
+
+       qp_type
+               Indicates the CM service type.
+
+       h_qp
+               A handle to the queue pair that should receive the alternate path.
+
+       remote_resp_timeout
+               The time within which the remote CM should transmit a response to
+               the sender.  This value is expressed as
+               4.096 * (2 ^ local_resp_timeout) microseconds.
+
+       p_alt_path
+               The path record to use for the alternate connection.
+
+       pfn_cm_apr_cb
+               References a user-defined callback that will be invoked when the
+               response to the load request is received.
+
+

SEE ALSO

+
       ib_cm_lap, ib_pfn_cm_lap_cb_t, ib_pfn_cm_apr_cb_t, ib_path_rec_t,
+       ib_pfn_lap_pdata_t, ib_qp_type_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_listen

+ +

[top][parent][index]

+

NAME

+
       ib_cm_listen
+
+

DESCRIPTION

+
       Issues a request to the local communication manager to listen for
+       incoming connection requests.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_listen(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   ib_cm_listen_t* const           p_cm_listen,
+        IN              const   ib_pfn_listen_err_cb_t          pfn_listen_err_cb,
+        IN              const   void* const                                     listen_context,
+                OUT                     ib_listen_handle_t* const       ph_cm_listen );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an opened instance of the access layer.
+
+       p_cm_listen
+               [in] Information used to direct the listen request to match incoming
+               connection requests.
+
+       pfn_listen_err_cb
+               [in] User-specified error callback routine to invoke if an error
+               occurs while listening.
+
+       listen_context
+               User-specified context information that is returned as a part of all
+               connection requests through the pfn_cm_req_cb routine.  The context is
+               also returned through the error and destroy callbacks.
+
+       ph_cm_listen
+               [out] Upon successful completion of this call, this references a handle
+               to the listen request.  This handle may be used to cancel the listen
+               operation.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The listen request was successfully registered with the connection
+               manager.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the listen request information, error callback function,
+               or listen handle was not provided.
+
+       IB_INVALID_SETTING
+               The class version specified in the listen request is not supported by
+               connection manager or the listen request is not unique.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register the listen request.
+
+       IB_INVALID_GUID
+               A channel adapter or port GUID is not wildcarded and no channel adapter
+               or port in the system was found for the specified GUID.
+
+       IB_INVALID_LID
+               The lid is not wildcarded and is not within the lid range for the port
+               specified in the listen request information.
+
+       IB_INVALID_PKEY
+               The pkey is not wildcarded and is not a valid pkey for the port
+               specified in the listen request information.
+
+

NOTES

+
       This routine directs the access layer to route connection requests
+       matching the specified connection parameters to the client.  Clients
+       listen for connections matching a particular service ID, and may optionally
+       direct their listen request towards a specific channel adapter, port, or
+       LID.
+
+       If local configuration changes occur that invalidate a listen request, the
+       specified error callback will be invoked.  Invalidated listen requests
+       should be canceled by the user.  An example of a configuration change that
+       invalidates listen requests is a LID change for directed listens.  The
+       listen error callback will be invoked within the context of a system
+       thread.
+
+

SEE ALSO

+
       ib_cm_listen_t, ib_pfn_listen_err_cb_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_listen_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_listen_t
+
+

DESCRIPTION

+
       Request to listen for incoming connection attempts.
+
+

SYNOPSIS

+
typedef struct _ib_cm_listen
+{
+        ib_net64_t                                      svc_id;
+
+        ib_net64_t                                      ca_guid;
+        ib_net64_t                                      port_guid;
+        ib_net16_t                                      lid;
+        ib_net16_t                                      pkey;
+
+        uint8_t* __ptr64                        p_compare_buffer;
+        uint8_t                                         compare_offset;
+        uint8_t                                         compare_length;
+
+        ib_pfn_cm_req_cb_t                      pfn_cm_req_cb;
+
+        ib_qp_type_t                            qp_type;
+
+        /* valid for ud qp_type only */
+        const void* __ptr64                     sidr_context;
+
+}       ib_cm_listen_t;
+
+

FIELDS

+
       svc_id
+               The identifier of the service to register for incoming connection
+               requests.
+
+       ca_guid
+               Directs the communication manager to register the listen only
+               with the specified channel adapter.  This should be set to IB_ALL_CAS
+               if the listen is not directed to a particular channel adapter.
+
+       port_guid
+               Directs the communication manager to register the listen only
+               with the specified port.  This should be set to IB_ALL_PORTS
+               if the listen is not directed to a particular port.
+
+       lid
+               Directs the communication manager to register the listen only
+               with the specified LID.  This should be set to IB_ALL_LIDS
+               if the listen is not directed to a particular LID.
+
+       pkey
+               Directs the communication manager to register the listen only with
+               the specified pkey value.  This should be set to IB_ALL_PKEYS
+               iv the listen is not directed to a particular partition.
+
+       p_compare_buffer
+               An optionally provided buffer that will be used to match incoming
+               connection requests with a registered service.  Use of this buffer
+               permits multiple services to listen on the same service ID as long as
+               they provide different compare buffers.  Incoming requests will
+               be matched against the compare buffer.
+
+       compare_offset
+               An offset into the user-defined data area of a connection request
+               which contains the start of the data that will be compared against.
+               The offset must be the same for all requests using the same service ID.
+
+       compare_length
+               Specifies the size of the compare buffer in bytes.  The length must
+               be the same for all requests using the same service ID.
+
+       pfn_cm_req_cb
+               References a user-provided callback that will be invoked whenever a
+               connection request is received.
+
+       qp_type
+               Indicates the CM service type.
+
+       pfn_cm_mra_cb
+               References a user-provided callback that will be invoked when
+               a message received acknowledgement is received.
+
+       pfn_cm_rej_cb
+               References a user-provided callback that will be invoked if the
+               connection is rejected by the remote end-point.
+
+       sidr_context
+               sidr specific context for listens. This context is passed back in
+               the ib_pfn_cm_req_cb_t callback.
+
+

NOTES

+
       Users fill out this structure when listening on a service ID with the
+       local communication manager.  The communication manager will use the given
+       service ID and compare buffer to route connection requests to the
+       appropriate client.  Users may direct listens requests on a particular
+       channel adapter, port, or LID.
+
+       Message received acknowledgement (MRA) callbacks will not be invoked
+       until a connection request has been replied to.
+
+

SEE ALSO

+
       ib_listen_info_t, ib_pfn_cm_req_cb_t, ib_pfn_cm_mra_cb_t,
+       ib_qp_type_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_mra

+ +

[top][parent][index]

+

NAME

+
       ib_cm_mra
+
+

DESCRIPTION

+
       Notifies the remote end-point of a connection or load alternate path
+       request that the request message has been received, but additional
+       processing is required.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_mra(
+        IN              const   ib_cm_handle_t                          h_cm,
+        IN              const   ib_cm_mra_t* const                      p_cm_mra );
+
+

PARAMETERS

+
       h_cm
+               [in] A handle to the connection request, connection reply, or load
+               alternate path request that should receive the message received
+               acknowledgement message.  This is the h_cm_req, h_cm_rep, or
+               h_cm_lap handle provided through the ib_pfn_cm_req_cb_t,
+               ib_pfn_cm_rep_cb_t, or ib_pfn_cm_lap_cb_t callback, respectively.
+
+       p_cm_mra
+               [in] Contains the message received acknowledgement data to return to
+               the requesting end-point.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The message receive acknowledge was sent successfully.
+
+       IB_INVALID_HANDLE
+               The connection manager reply handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the message receive acknowledge information was not
+               provided.
+
+       IB_INVALID_STATE
+               The current connection state does not allow sending this message.
+
+       IB_INVALID_SETTING
+               The class version is not supported by connection manager or the
+               specified private data length is invalid.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to send the message receive acknowledge.
+
+

NOTES

+
       This routine results in the access layer acknowledging a connection or
+       load alternate path message.  It should be invoked by a client if the
+       client is unable to respond to a request within a specified timeout,
+       in order to prevent the remote end-point from timing out.
+
+

SEE ALSO

+
       ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t, ib_pfn_cm_lap_cb_t, ib_cm_mra_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_mra_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_mra_rec_t
+
+

DESCRIPTION

+
       Message received acknowledgement information returned to the user through
+       a callback.
+
+

SYNOPSIS

+
typedef struct _ib_cm_mra_rec
+{
+        const uint8_t* __ptr64          p_mra_pdata;
+
+        ib_qp_handle_t                          h_qp;
+        const void* __ptr64                     qp_context;
+
+}       ib_cm_mra_rec_t;
+
+

FIELDS

+
       p_mra_pdata
+               A reference to user-defined private data sent as part of the MRA.
+
+       h_qp
+               The queue pair handle associated with a connection request.
+
+       qp_context
+               The queue pair context associated with a connection request.
+
+

SEE ALSO

+
       ib_cm_req, ib_cm_mra, ib_pfn_cm_mra_cb_t, ib_mra_pdata_t, ib_qp_type_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_mra_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_mra_t
+
+

DESCRIPTION

+
       Connection message received acknowledgement information used to
+       indicate that a connection request, reply, or load alternate path
+       has been received.
+
+

SYNOPSIS

+
typedef struct _ib_cm_mra
+{
+        uint8_t                                         svc_timeout;
+
+        const uint8_t* __ptr64          p_mra_pdata;
+        uint8_t                                         mra_length;
+
+}       ib_cm_mra_t;
+
+

FIELDS

+
       svc_timeout
+               Indicates the amount of time that the local service requires to
+               complete processing of the previously received message.
+
+       p_mra_pdata
+               Optional user-defined private data sent as part of the message
+               received acknowledgement.
+
+       mra_length
+               Defines the size of the user-defined private data.
+
+

SEE ALSO

+
       ib_cm_mra, ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t, ib_pfn_cm_lap_cb_t,
+       ib_mra_pdata_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_rej

+ +

[top][parent][index]

+

NAME

+
       ib_cm_rej
+
+

DESCRIPTION

+
       Rejects a connection request from a remote end-point.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_rej(
+        IN              const   ib_cm_handle_t                          h_cm,
+        IN              const   ib_cm_rej_t* const                      p_cm_rej );
+
+

PARAMETERS

+
       h_cm
+               [in] A handle to the connection request or reply being rejected.
+               This is the h_cm_req or h_cm_rep handle provided through the
+               ib_pfn_cm_req_cb_t or ib_pfn_cm_rep_cb_t callback, respectively.
+
+       p_cm_rej
+               [in] Contains the connection rejection information to return to the
+               connecting end-point.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The connection reject was initiated.
+
+       IB_INVALID_HANDLE
+               The connection manager handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the reject information was not provided.
+
+

NOTES

+
       This routine results in the access layer rejecting a connection
+       and notifying the remote end-point.
+
+

SEE ALSO

+
       ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t, ib_cm_rej_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_rej_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_rej_rec_t
+
+

DESCRIPTION

+
       Connection rejection information returned to the user through their
+       rejection callback.
+
+

SYNOPSIS

+
typedef struct _ib_cm_rej_rec
+{
+        ib_rej_status_t                         rej_status;
+        const uint8_t* __ptr64          p_ari;
+        uint8_t                                         ari_length;
+
+        const uint8_t* __ptr64          p_rej_pdata;
+
+        ib_qp_handle_t                          h_qp;
+        const void* __ptr64                     qp_context;
+
+}       ib_cm_rej_rec_t;
+
+

FIELDS

+
       rej_status
+               The reason for the connection rejection.
+
+       p_ari
+               Additional rejection information.  The data referenced by this field
+               is dependent on the rej_status and is defined by the Infiniband
+               specification.
+
+       ari_length
+               Length of valid data provided in the p_ari buffer.
+
+       p_rej_pdata
+               A reference to user-defined private data sent as part of the connection
+               request reply.
+
+       h_qp
+               The queue pair handle associated with a connection request.
+
+       qp_context
+               The queue pair context associated with a connection request.
+
+

SEE ALSO

+
       ib_cm_rej, ib_pfn_cm_rej_cb_t, ib_rej_status_t, ib_ari_t, ib_rej_pdata_t,
+       ib_qp_type_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_rej_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_rej_t
+
+

DESCRIPTION

+
       Information used to reject a connection request.
+
+

SYNOPSIS

+
typedef struct _ib_cm_rej
+{
+        ib_rej_status_t                         rej_status;
+
+        ib_ari_t* __ptr64                       p_ari;
+        uint8_t                                         ari_length;
+        const uint8_t* __ptr64          p_rej_pdata;
+        uint8_t                                         rej_length;
+
+}       ib_cm_rej_t;
+
+

FIELDS

+
       rej_status
+               The reason for the connection rejection.
+
+       p_ari
+               Additional rejection information.  The data referenced by this field
+               is dependent on the rej_status and is defined by the Infiniband
+               specification.
+
+       ari_length
+               Length of valid data provided in the p_ari buffer.
+
+       p_rej_pdata
+               A reference to user-defined private data sent as part of the
+               reject message.
+
+       rej_length
+               Defines the size of the user-defined private data.
+
+

SEE ALSO

+
       ib_cm_rej, ib_pfn_cm_rej_cb_t, ib_rej_status_t, ib_ari_t, ib_rej_pdata_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_rep

+ +

[top][parent][index]

+

NAME

+
       ib_cm_rep
+
+

DESCRIPTION

+
       Sends a reply to a connection request, indicating that the connection
+       has been accepted.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_rep(
+        IN              const   ib_cm_handle_t                          h_cm_req,
+        IN              const   ib_cm_rep_t* const                      p_cm_rep );
+
+

PARAMETERS

+
       h_cm_req
+               [in] A handle to the connection request being replied to.  This handle
+               is provided by the access layer through the ib_pfn_cm_req_cb_t
+               callback.
+
+       p_cm_rep
+               [in] Contains reply information to return to the initiator of the
+               connection request.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The connection reply was initiated.
+
+       IB_INVALID_HANDLE
+               The connection manager request handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the reply information was not provided.
+
+       IB_INVALID_STATE
+               The current connection state does not allow sending this message.
+
+       IB_INVALID_SETTING
+               The connect reply information contains one or more of the following
+               errors:
+                 - The class version, queue pair type, or path is not supported by
+                       connection manager.
+                 - The private data length exceeds the value allowed by the connection
+                       class version.
+                 - The primary path is not on the same channel adapter as the queue
+                       pair.
+                 - The primary and alternate paths are on different channel adapters.
+                 - The primary and alternate paths specify different MTUs.
+                 - A primary or alternate path record packet lifetime is out of range.
+                 - A primary or alternate path record pkey is out of range.
+                 - The specified private data length is invalid.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle specified in the reply was invalid.
+
+       IB_INVALID_QP_STATE
+               The queue pair was in an invalid state to perform the operation.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to send the connect reply.
+
+

NOTES

+
       This routine results in the access layer replying to a connection
+       request from a remote node.  This call results in sending a response
+       to the requesting node that the request has been accepted.
+
+

SEE ALSO

+
       ib_cm_rep_t, ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_rep_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_rep_rec_t
+
+

DESCRIPTION

+
       Connection request reply information returned to the user through their
+       connection reply callback.
+
+

SYNOPSIS

+
typedef struct _ib_cm_rep_rec
+{
+        const uint8_t* __ptr64          p_rep_pdata;
+
+        ib_qp_type_t                            qp_type;
+
+        ib_cm_handle_t                          h_cm_rep;
+        /* valid for rc, uc & rd qp_type only */
+        const void* __ptr64                     qp_context;
+        uint8_t                                         resp_res;
+        boolean_t                                       flow_ctrl;
+        ib_apr_status_t                         apr_status;
+
+        /* valid for ud qp_type only */
+        const void* __ptr64                     sidr_context;
+        ib_sidr_status_t                        status;
+        ib_net32_t                                      remote_qp;
+        ib_net32_t                                      remote_qkey;
+        ib_class_port_info_t            class_info;
+
+}       ib_cm_rep_rec_t;
+
+

FIELDS

+
       p_rep_pdata
+               A reference to user-defined private data sent as part of the connection
+               request reply.
+
+       qp_type
+               Indicates the CM service type.
+
+       h_cm_rep
+               The handle to the communication manager reply.  This handle is used
+               to issue a ready to use message or to reject the connection.
+
+       h_qp
+               The handle to the queue pair associated with a connection request.
+
+       qp_context
+               The queue pair context associated with a connection request.
+
+       resp_res
+               The maximum number of RDMA read/atomic operations from the recipient
+               that the requestor supports on the connection.  This may be less than
+               the init_depth specified in the call to ib_cm_req.  The local queue
+               pair will be configured with this value unless the connection is
+               rejected.
+
+       flow_ctrl
+               Indicates if the remote CA implements hardware end-to-end flow control.
+
+       apr_status
+               Indicates whether the alternate path information was accepted.
+
+       h_al
+               The AL handle on which the SIDR request was issued.
+
+       sidr_context
+               The sidr_context used in ib_cm_req.
+
+       status
+               Status of the request made previously using ib_cm_req.
+
+       remote_qp
+               Identifies the destination queue pair number.
+
+       remote_qkey
+               Identifies the destination qkey.
+
+       class_info
+               Identifies the class_port_info returned if status was not successful.
+               This field has no value if status is successful.
+
+

SEE ALSO

+
       ib_cm_req, ib_cm_rep, ib_pfn_cm_rep_cb_t, ib_cm_status_t, ib_rep_pdata_t
+       ib_qp_type_t, ib_sidr_status_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_rep_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_rep_t
+
+

DESCRIPTION

+
       Connection reply information used when establishing a connection.
+
+

SYNOPSIS

+
typedef struct _ib_cm_rep
+{
+        ib_al_flags_t                           flags;
+
+        const uint8_t* __ptr64          p_rep_pdata;
+        uint8_t                                         rep_length;
+
+        ib_qp_handle_t                          h_qp;
+        ib_qp_type_t                            qp_type;
+
+        /* valid for rc, uc & rd qp_type only */
+        ib_access_t                                     access_ctrl;
+        uint32_t                                        sq_depth;
+        uint32_t                                        rq_depth;
+
+        uint8_t                                         init_depth;
+        uint8_t                                         target_ack_delay;
+        ib_cm_failover_t                        failover_accepted;
+        boolean_t                                       flow_ctrl;
+        uint8_t                                         rnr_nak_timeout;
+        uint8_t                                         rnr_retry_cnt;
+
+        ib_pfn_cm_rej_cb_t                      pfn_cm_rej_cb;
+        ib_pfn_cm_mra_cb_t                      pfn_cm_mra_cb;
+        ib_pfn_cm_rtu_cb_t                      pfn_cm_rtu_cb;
+        ib_pfn_cm_lap_cb_t                      pfn_cm_lap_cb;
+        ib_pfn_cm_dreq_cb_t                     pfn_cm_dreq_cb;
+
+        ib_recv_wr_t* __ptr64                   p_recv_wr;
+        ib_recv_wr_t* __ptr64 *__ptr64  pp_recv_failure;
+
+        /*valid for ud qp_type only */
+        ib_sidr_status_t                        status;
+        ib_class_port_info_t            class_info;
+
+}       ib_cm_rep_t;
+
+

FIELDS

+
       flags
+               Used to describe the mode of operation.  Set to IB_FLAGS_SYNC to
+               process the called routine synchronously.
+
+       p_rep_pdata
+               Optional user-defined private data sent as part of the connection
+               reply.
+
+       rep_length
+               Defines the size of the user-defined private data.
+
+       qp_type
+               Indicates the CM service type.
+
+       h_qp
+               A handle to the queue pair to use in the connection. For SIDR, h_qp
+               is valid only if sidr status is IB_SIDR_SUCCESS.
+
+       access_ctrl
+               Indicates the type of access permitted on the local QP.
+
+       sq_depth
+               The maximum number of outstanding send operations that the local
+               QP needs to support.
+
+       rq_depth
+               The maximum number of outstanding receive operations that the local
+               QP needs to support.
+
+       init_depth
+               The maximum number of outstanding RDMA read/atomic operations the
+               sender of the reply will have outstanding to the remote QP.
+
+       target_ack_delay
+               The time that the remote QP should wait to receive an ACK from the
+               local QP.
+
+       failover_accepted
+               Status indicating if the fail over path was accepted by the sender
+               of the reply.
+
+       flow_ctrl
+               Indicates whether the local CA supports end-to-end flow control.
+
+       rnr_nak_timeout
+               The time to wait before retrying a packet after receiving a RNR NAK.
+
+       rnr_retry_cnt
+               The number of times that the local QP should retry a send operation
+               after receiving an RNR NACK before reporting an error.
+
+       pfn_cm_rtu_cb
+               References a user-defined callback that will be invoked when
+               a connection is ready to use for send operations.
+
+       pfn_cm_lap_cb
+               References a user-defined callback that will be invoked when
+               a load alternate path request is received for the connecting
+               queue pair or end-to-end context.
+
+       pfn_cm_dreq_cb
+               References a user-defined callback that will be invoked when
+               a disconnect request is received is for the connecting
+               queue pair or end-to-end context.
+
+       p_recv_wr
+               A reference to the head of the work request list to be initially
+               posted to the receive queue.  Providing this list closes a potential
+               race condition between sending a CM REP message and posting receives.
+               Use of this field is optional.
+
+       pp_recv_failure
+               If the post receive operation failed, this references the work
+               request in the p_recv_wr list where the first failure occurred.
+               This field is required only if p_recv_wr is used.
+
+       status
+               sidr status value returned back to a previously received REQ.
+
+       class_info
+               The contents of this field are valid only if status is IB_SIDR_REDIRECT.
+
+

SEE ALSO

+
       ib_cm_rep, ib_access_t, ib_cm_failover_t, ib_rep_pdata_t
+       ib_pfn_cm_rtu_cb_t, ib_pfn_cm_lap_cb_t, ib_pfn_cm_dreq_cb_t,
+       ib_qp_type_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_req

+ +

[top][parent][index]

+

NAME

+
       ib_cm_req
+
+

DESCRIPTION

+
       Issues a connection request to a specified end-point.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_req(
+        IN              const   ib_cm_req_t* const                      p_cm_req );
+
+

PARAMETERS

+
       p_cm_req
+               [in] Information describing the type of connection and the remote
+               endpoint for the connection.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The connection request was initiated.
+
+       IB_INVALID_PARAMETER
+               A reference to the connect request information was not provided.
+
+       IB_INVALID_SETTING
+               The connect request information contains one or more of the following
+               errors:
+                 - The class version, queue pair type, or path is not supported by
+                       connection manager.
+                 - The private data length exceeds the value allowed by the specified
+                       connection class version.
+                 - The primary path is not on the same channel adapter as the queue
+                       pair.
+                 - The primary and alternate paths are on different channel adapters.
+                 - The primary and alternate paths specify different MTUs.
+                 - A primary or alternate path record packet lifetime is out of range.
+                 - A primary or alternate path record pkey is out of range.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle specified in the connect request was invalid.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_STATE
+               The queue pair or end-to-end context is already connected.
+
+       IB_INVALID_QP_STATE
+               The queue pair was in an invalid state to perform the operation.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to initiate the connect request.
+
+

NOTES

+
       This routine issues a connection request through the communication
+       manager to a specified end-point.  The p_cm_req parameter contains
+       details needed to form the connection.  The connection request will
+       match with a remote ib_cm_listen or ib_cm_req connection request.
+
+

SEE ALSO

+
       ib_cm_req_t, ib_cm_listen, ib_pfn_cm_req_cb_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_req_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_req_rec_t
+
+

DESCRIPTION

+
       Connection request information returned to the user through their
+       connection request callback.
+
+

SYNOPSIS

+
#pragma warning(disable:4324)
+typedef struct _ib_cm_req_rec
+{
+        const void* __ptr64                     context;
+        ib_cm_handle_t                          h_cm_req;
+        ib_listen_handle_t                      h_cm_listen;
+
+        const uint8_t* __ptr64          p_req_pdata;
+
+        ib_qp_type_t                            qp_type;
+
+        /* valid for rc, uc & rd qp_type only */
+        uint8_t                                         resp_res;
+        boolean_t                                       flow_ctrl;
+        uint8_t                                         rnr_retry_cnt;
+        ib_path_rec_t                           primary_path;
+        ib_path_rec_t                           alt_path;
+
+        /* valid for ud qp_type only */
+        ib_net16_t                                      pkey;
+        const void* __ptr64                     sidr_context;
+
+}       ib_cm_req_rec_t;
+#pragma warning(default:4324)
+
+

FIELDS

+
       context
+               For peer-to-peer connections, this is the queue pair context associated
+               with a connection request.  For listens, this is the listen context
+               specified through the ib_cm_listen routine.
+
+       h_cm_req
+               The handle to the communication manager request.  This handle is used
+               to reply to the or reject the connection.
+
+       h_cm_listen
+               For connection request callbacks initiated in response to an
+               ib_cm_listen call, this is a handle to the listen request.  This
+               handle is provided to the user to avoid a race condition between
+               the return of the ib_cm_listen routine and the notification of a
+               connection request.
+
+       p_req_pdata
+               A reference to user-defined private data sent as part of the connection
+               request.
+
+       qp_type
+               Indicates the CM service type.
+
+       resp_res
+               The maximum number of RDMA read/atomic operations from the recipient
+               that the requestor supports on the connection.  The init_depth
+               specified in the call to ib_cm_rep must be less than or equal to
+               this value.
+
+       flow_ctrl
+               Indicates if the remote CA implements hardware end-to-end flow control.
+
+       rnr_retry_cnt
+               Requested number of RNR NAK retries to perform before generating a
+               local error.
+
+       primary_path
+               The path record to use for the primary connection.
+
+       alt_path
+               The path record to use for the alternate connection.
+
+       pkey
+               The pkey used in the user's request.
+
+       sidr_context
+               The sidr_context used in ib_cm_listen.
+
+

SEE ALSO

+
       ib_cm_req, ib_cm_listen, ib_pfn_cm_req_cb_t,
+       ib_access_t, ib_path_rec_t, ib_req_pdata_t, ib_qp_type_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_req_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_req_t
+
+

DESCRIPTION

+
       Connection request information used to establish a new connection.
+
+

SYNOPSIS

+
typedef struct _ib_cm_req
+{
+        ib_net64_t                                      svc_id;
+
+        ib_al_flags_t                           flags;
+        uint8_t                                         max_cm_retries;
+
+        ib_path_rec_t* __ptr64          p_primary_path;
+
+        ib_pfn_cm_rep_cb_t                      pfn_cm_rep_cb;
+
+        const uint8_t* __ptr64          p_req_pdata;
+        uint8_t                                         req_length;
+
+        ib_qp_type_t                            qp_type;
+
+        /* valid for rc, uc & rd qp_type only */
+        ib_qp_handle_t                          h_qp;
+
+        uint8_t* __ptr64                        p_compare_buffer;
+        uint8_t                                         compare_offset;
+        uint8_t                                         compare_length;
+
+        uint8_t                                         resp_res;
+        uint8_t                                         init_depth;
+        uint8_t                                         remote_resp_timeout;
+        boolean_t                                       flow_ctrl;
+        uint8_t                                         local_resp_timeout;
+        uint8_t                                         rnr_nak_timeout;
+        uint8_t                                         rnr_retry_cnt;
+        uint8_t                                         retry_cnt;
+
+        ib_path_rec_t* __ptr64          p_alt_path OPTIONAL;
+
+        ib_pfn_cm_req_cb_t                      pfn_cm_req_cb;
+        ib_pfn_cm_mra_cb_t                      pfn_cm_mra_cb;
+        ib_pfn_cm_rej_cb_t                      pfn_cm_rej_cb;
+
+        /* valid for ud qp_type only */
+        ib_al_handle_t                          h_al;
+        const void* __ptr64                     sidr_context;
+        uint32_t                                        timeout_ms;
+        ib_net16_t                                      pkey;
+
+}       ib_cm_req_t;
+
+

FIELDS

+
       svc_id
+               The ID of the remote service to which the connection request is
+               being made.
+
+       flags
+               Used to describe the mode of operation.  Set to IB_FLAGS_SYNC to
+               process the called routine synchronously.
+
+       max_cm_retries
+               The maximum number of times that either CM should resend a connection
+               establishment message.
+
+       p_primary_path
+               Path information over which to establish the primary connection.
+
+       pfn_cm_rep_cb
+               References a user-provided callback that will be invoked when
+               a reply to the connection request is received.
+
+       p_req_pdata
+               Optional user-defined private data sent as part of the connection
+               request.
+
+       req_length
+               Defines the size of the user-defined private data.
+
+       qp_type
+               Indicates the CM service type.
+
+       h_qp
+               A handle to the queue pair to use in the connection.
+
+       p_compare_buffer
+               An optionally provided buffer that will be used to match incoming
+               connection requests with a registered service.  Use of this buffer
+               permits multiple services to connect using the same service ID as
+               long as they provide different compare buffers.  Incoming requests
+               will be matched against the compare buffer.  Valid for peer-to-peer
+               connection requests only.
+
+       compare_offset
+               An offset into the user-defined data area of a connection request
+               which contains the start of the data that will be compared against.
+               The offset must be the same for all requests using the same service ID.
+               Valid for peer-to-peer connection requests only.
+
+       compare_length
+               Specifies the size of the compare buffer in bytes.  The length must
+               be the same for all requests using the same service ID.  Valid for
+               peer-to-peer connection requests only.
+
+       resp_res
+               The maximum number of outstanding RDMA read/atomic operations the
+               requestor supports from the remote QP.
+
+       init_depth
+               The maximum number of outstanding RDMA read/atomic operations the
+               requestor will have outstanding to the remote QP.
+
+       remote_resp_timeout
+               The time within which the remote CM should transmit a response to
+               the sender.  This value is expressed as
+               4.096 * (2 ^ local_resp_timeout) microseconds.
+
+       flow_ctrl
+               Indicates whether the local CA supports end-to-end flow control.
+
+       local_resp_timeout
+               The time that the remote CM should wait to receive a response from
+               the local CM.  This value is expressed as
+               4.096 * (2 ^ local_resp_timeout) microseconds.
+
+       rnr_nak_timeout
+               The time to wait before retrying a packet after receiving a RNR NAK.
+               This value is defined in section 9.7.5.2.8 of the IB Spec, table 45.
+
+       rnr_retry_cnt
+               The number of times that the local QP should retry a send operation
+               after receiving an RNR NACK before reporting an error.
+
+       retry_cnt
+               The number of times that a QP should retry a send operation before
+               reporting an error.
+
+       p_alt_path
+               Optional path information that will be used as the alternate
+               connection path in the case of failure.
+
+       pfn_cm_req_cb
+               References a user-provided callback that will be invoked when
+               a request for a connection is received.  This is required for peer-to
+               peer connection requests, and must be NULL for client/server
+               connection requests.
+
+       pfn_cm_mra_cb
+               References a user-provided callback that will be invoked when
+               a message received acknowledgement is received.
+
+       pfn_cm_rej_cb
+               References a user-provided callback that will be invoked if the
+               connection is rejected by the remote end-point.
+
+       sidr_context
+               The user-defined sidr context information that will be passed back in a
+               ib_cm_req callback.
+
+       timeout_ms
+               Timeout value in milli-seconds for the REQ to expire.  The CM will add
+               twice packet lifetime to this value to determine the actual timeout
+               value used.
+
+       pkey
+               pkey to be used as part of the request. This field is only valid for
+               IB_MCLASS_CM_VER_2 clients.
+
+

SEE ALSO

+
       ib_cm_req, ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t, ib_pfn_cm_mra_cb_t,
+       ib_pfn_cm_rej_cb_t, ib_path_rec_t, ib_req_pdata_t, ib_qp_type_t
+
+
+
+ +

[Functions] +Access Layer/ib_cm_rtu

+ +

[top][parent][index]

+

NAME

+
       ib_cm_rtu
+
+

DESCRIPTION

+
       Sends a ready to use message for a connection request, indicating that
+       the connection has been accepted and is ready for use.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_cm_rtu(
+        IN              const   ib_cm_handle_t                          h_cm_rep,
+        IN              const   ib_cm_rtu_t* const                      p_cm_rtu );
+
+

PARAMETERS

+
       h_cm_rep
+               [in] A handle to the connection reply being responded to.  This handle
+               is provided by the access layer through the ib_pfn_cm_rep_cb_t
+               callback.
+
+       p_cm_rtu
+               [in] Contains ready to use information to return to the sender of the
+               connection reply.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The connection ready to use was initiated.
+
+       IB_INVALID_HANDLE
+               The connection manager reply handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the ready to use information was not provided.
+
+       IB_INVALID_STATE
+               The current connection state does not allow sending this message.
+
+       IB_INVALID_SETTING
+               The specified queue pair attributes were invalid or the private data
+               length exceeds the value allowed by the specified connection class
+               version.
+
+       IB_UNSUPPORTED
+               The specified queue pair access control was not supported.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to send the ready to use response.
+
+

NOTES

+
       This routine results in the access layer marking a connection as ready
+       to use and notifying the remote end-point.
+
+

SEE ALSO

+
       ib_cm_rep_t, ib_pfn_cm_rep_cb_t, ib_cm_rtu_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_rtu_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_rtu_rec_t
+
+

DESCRIPTION

+
       Connection ready to use message information returned to the user through
+       their ready to use callback.
+
+

SYNOPSIS

+
typedef struct _ib_cm_rtu_rec
+{
+        const uint8_t* __ptr64          p_rtu_pdata;
+
+        ib_qp_handle_t                          h_qp;
+        const void* __ptr64                     qp_context;
+
+}       ib_cm_rtu_rec_t;
+
+

FIELDS

+
       p_rtu_pdata
+               A reference to user-defined private data sent as part of the ready
+               to use message.
+
+       h_qp
+               The queue pair handle associated with the connection request.
+
+       qp_context
+               The queue pair context associated with the connection request.
+
+

SEE ALSO

+
       ib_cm_rtu, ib_pfn_cm_rtu_cb_t, ib_cm_status_t, ib_rtu_pdata_t,
+       ib_qp_type_t
+
+
+
+ +

[Structures] +Access Layer/ib_cm_rtu_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_rtu_t
+
+

DESCRIPTION

+
       Connection ready to use information used when establishing a connection.
+
+

SYNOPSIS

+
typedef struct _ib_cm_rtu
+{
+        ib_access_t                                     access_ctrl;
+        uint32_t                                        sq_depth;
+        uint32_t                                        rq_depth;
+
+        const uint8_t* __ptr64          p_rtu_pdata;
+        uint8_t                                         rtu_length;
+
+        ib_pfn_cm_apr_cb_t                      pfn_cm_apr_cb;
+        ib_pfn_cm_dreq_cb_t                     pfn_cm_dreq_cb;
+
+}       ib_cm_rtu_t;
+
+

FIELDS

+
       access_ctrl
+               Indicates the type of access permitted on the local QP.
+
+       sq_depth
+               The maximum number of outstanding send operations that the local
+               QP needs to support.  This field should be set to zero if the CA
+               does not support changing the work request depth after the QP is
+               created.
+
+       rq_depth
+               The maximum number of outstanding receive operations that the local
+               QP needs to support.  This field should be set to zero if the CA
+               does not support changing the work request depth after the QP is
+               created.
+
+       p_rtu_pdata
+               Optional user-defined private data sent as part of the connection
+               ready to use message.
+
+       rtu_length
+               Defines the size of the user-defined private data.
+
+       pfn_cm_apr_cb
+               References a user-defined callback that will be invoked when an
+               alternate path response is received for the connecting queue pair
+               or end-to-end context.
+
+       pfn_cm_dreq_cb
+               References a user-defined callback that will be invoked when a
+               disconnect request is received is for the connecting queue pair
+               or end-to-end context.
+
+

SEE ALSO

+
       ib_cm_rtu, ib_access_t, ib_rtu_pdata_t
+
+
+
+ +

[Structures] +Access Layer/ib_cq_create_t

+ +

[top][parent][index]

+

NAME

+
       ib_cq_create_t
+
+

DESCRIPTION

+
       Attributes used to initialize a completion queue at creation time.
+
+

SYNOPSIS

+
typedef struct _ib_cq_create
+{
+        uint32_t                                                                size;
+        ib_pfn_comp_cb_t                                                pfn_comp_cb;
+        cl_waitobj_handle_t                                             h_wait_obj;
+
+}       ib_cq_create_t;
+
+

FIELDS

+
       size
+               Specifies the maximum number of work completions that may be on the
+               completion queue.  If the creation call is successful, the actual
+               size of the completion queue will be returned.  The actual size of
+               the CQ will be greater than or equal to the requested size.
+
+       pfn_comp_cb
+               A callback that is invoked whenever a signaled completion occurs on
+               the completion queue.  This field is mutually exclusive with the
+               p_event field.
+
+       h_wait_obj
+               A wait object that is triggered whenever a signaled completion occurs
+               on the completion queue.  This field is mutually exclusive with the
+               pfn_comp_cb field and is only valid for user-mode clients.  The wait
+               object must be ready for use when the call to ib_create_cq is invoked.
+
+

NOTES

+
       Clients must specify either an event or a callback when creating a
+       completion queue.  When a signaled completion occurs on the completion
+       queue, the client will be notified through the callback or by
+       signaling the specified event.
+
+

SEE ALSO

+
       ib_create_cq, ib_pfn_comp_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_create_av

+ +

[top][parent][index]

+

NAME

+
       ib_create_av
+
+

DESCRIPTION

+
       Creates an address vector.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_create_av(
+        IN              const   ib_pd_handle_t                          h_pd,
+        IN              const   ib_av_attr_t* const                     p_av_attr,
+                OUT                     ib_av_handle_t* const           ph_av );
+
+

PARAMETERS

+
       h_pd
+               [in] A handle to an allocated protection domain that the address
+               vector will be associated with.
+
+       p_av_attr
+               [in] Attributes for the newly created address vector.
+
+       ph_av
+               [out] Upon successful completion of this call, this references a
+               handle to the newly created address vector.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The operation was successful.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the address vector attributes or handle was not
+               provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to create the address vector.
+
+       IB_INVALID_PORT
+               The port number supplied, through the address vector attributes,
+               was invalid for the given channel adapter.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to create the address vector.
+
+

NOTES

+
       This routine creates an address vector.  Clients specify the attributes
+       of the address vector through the p_av_attr parameter.
+
+

SEE ALSO

+
       ib_query_av, ib_modify_av, ib_destroy_av
+
+
+
+ +

[Functions] +Access Layer/ib_create_cq

+ +

[top][parent][index]

+

NAME

+
       ib_create_cq
+
+

DESCRIPTION

+
       Creates a completion queue and returns its handle to the user.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_create_cq(
+        IN              const   ib_ca_handle_t                          h_ca,
+        IN      OUT                     ib_cq_create_t* const           p_cq_create,
+        IN              const   void* const                                     cq_context,
+        IN              const   ib_pfn_event_cb_t                       pfn_cq_event_cb OPTIONAL,
+                OUT                     ib_cq_handle_t* const           ph_cq );
+
+

PARAMETERS

+
       h_ca
+               [in] A handle to an open channel adapter.
+
+       p_cq_create
+               [in] Attributes necessary to allocate and initialize the
+               completion queue.
+
+       cq_context
+               [in] A user-specified context associated with the completion queue.
+
+       pfn_cq_event_cb
+               [in] User-specified error callback routine invoked after an
+               asynchronous event has occurred on the completion queue.
+
+       ph_cq
+               [out] Upon successful completion of this call, this references a
+               handle to the newly created completion queue.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The completion queue was successfully created.
+
+       IB_INVALID_CA_HANDLE
+               The channel adapter handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the completion queue attributes or handle was not
+               provided.
+
+       IB_INVALID_SETTING
+               The specified attributes that should be used to create the completion
+               queue are invalid.  Both completion callback and wait object
+               information were supplied or are missing.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to create the completion queue.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to create the completion queue.
+
+       IB_INVALID_CQ_SIZE
+               The requested size of the completion queue was larger than the
+               maximum supported by the associated channel adapter.
+
+

NOTES

+
       This routine allocates a completion queue on the specified channel
+       adapter.  If the completion queue cannot be allocated, an error is
+       returned.  When creating the completion queue, users associate a context
+       with the completion queue.  This context is returned to the user through
+       the completion and asynchronous event callbacks.
+
+

SEE ALSO

+
       ib_query_cq, ib_modify_cq, ib_destroy_cq, ib_cq_create_t, ib_pfn_event_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_create_ioc

+ +

[top][parent][index]

+

NAME

+
       ib_create_ioc
+
+

DESCRIPTION

+
       Creates an instance of an I/O controller.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_create_ioc(
+        IN              const   ib_ca_handle_t                          h_ca,
+        IN              const   ib_ioc_profile_t* const         p_ioc_profile,
+                OUT                     ib_ioc_handle_t* const          ph_ioc );
+
+

PARAMETERS

+
       h_ca
+               [in] A handle to an opened channel adapter.  The controller will be
+               created to be exposed through the given adapter.
+
+       p_ioc_profile
+               [in] I/O controller profile information.
+
+       ph_ioc
+               [out] Upon successful completion of this call, this references a
+               handle to the created I/O controller.  This handle may be used to
+               add service entries to the controller and register it.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The I/O controller was successfully created.
+
+       IB_INVALID_CA_HANDLE
+               The channel adapter handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the I/O controller profile information or handle
+               was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to create the I/O controller.
+
+

NOTES

+
       This routine creates an I/O controller.  Once created, services may be
+       added to the controller before being registered with the local device
+       manager.
+
+

SEE ALSO

+
       ib_destroy_ioc, ib_add_svc_entry, ib_reg_ioc, ib_ioc_profile_t
+
+
+
+ +

[Functions] +Access Layer/ib_create_mad_pool

+ +

[top][parent][index]

+

NAME

+
       ib_create_mad_pool
+
+

DESCRIPTION

+
       Creates a pool of MAD elements for use sending and receive management
+       datagrams.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_create_mad_pool(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   size_t                                          min,
+        IN              const   size_t                                          max,
+        IN              const   size_t                                          grow_size,
+                OUT                     ib_pool_handle_t* const         ph_pool );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an open instance of the access layer.
+
+       min
+               [in] The minimum number of MAD elements to create in the pool.
+
+       max
+               [in] The maximum number of MAD elements that will be created by the
+               pool.  If max is set to 0, the pool will continue to grow as long
+               as system resources are available.
+
+       grow_size
+               [in] The number of MAD elements to add to the pool when growing it.
+               If set to 0, the pool will not grow beyond the number specified
+               at creation.  This value must be greater than 0, if min is set to 0.
+
+       ph_pool
+               [out] On successful completion of this call, this returns a handle to
+               the newly created pool.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The MAD pool was created successfully.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the pool handle was not provided.
+
+       IB_INVALID_SETTING
+               The maximum number of MAD elements was non-zero and less than the
+               minimum number of MAD elements.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to create the MAD pool.
+
+

NOTES

+
       This routine creates a pool of MAD elements.  The elements may be used
+       to send and receive MADs on alias and MAD type QPs.
+
+

SEE ALSO

+
       ib_destroy_mad_pool, ib_get_mad, ib_put_mad, ib_reg_mad_pool,
+       ib_dereg_mad_pool
+
+
+
+ +

[Functions] +Access Layer/ib_create_mw

+ +

[top][parent][index]

+

NAME

+
       ib_create_mw
+
+

DESCRIPTION

+
       Creates a memory window associated with the specified protection domain.
+       Newly created windows are not bound to any specific memory region.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_create_mw(
+        IN              const   ib_pd_handle_t                          h_pd,
+                OUT                     net32_t* const                          p_rkey,
+                OUT                     ib_mw_handle_t* const           ph_mw );
+
+

PARAMETERS

+
       h_pd
+               [in] A handle to an existing protection domain that the memory window
+               should be created within.
+
+       p_rkey
+               [out] The current rkey associated with the memory window.  This key is
+               used to bind the window to a registered memory region.
+
+       ph_mw
+               [out] Upon successful completion of this call, this references a handle
+               to the memory window.  This handle is used to bind and destroy
+               the window.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory window was successfully created.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the memory window rkey or handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to create the memory window.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to create the memory window.
+
+

NOTES

+
       This routine creates an unbound memory window associated with a specified
+       protection domain.  The memory window cannot be used for data transfer
+       operations until being bound to a registered memory region.
+
+

SEE ALSO

+
       ib_destroy_mw, ib_query_mw, ib_bind_mw
+
+
+
+ +

[Functions] +Access Layer/ib_create_qp

+ +

[top][parent][index]

+

NAME

+
       ib_create_qp
+
+

DESCRIPTION

+
       Creates a queue pair and returns its handle to the user.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_create_qp(
+        IN              const   ib_pd_handle_t                          h_pd,
+        IN              const   ib_qp_create_t* const           p_qp_create,
+        IN              const   void* const                                     qp_context,
+        IN              const   ib_pfn_event_cb_t                       pfn_qp_event_cb OPTIONAL,
+                OUT                     ib_qp_handle_t* const           ph_qp );
+
+

PARAMETERS

+
       h_pd
+               [in] This is a handle to a protection domain associated with the queue
+               pair.
+
+       p_qp_create
+               [in] Attributes necessary to allocate and initialize the queue pair.
+
+       qp_context
+               [in] A user-specified context information associated with the
+               queue pair.
+
+       pfn_qp_event_cb
+               [in] User-specified error callback routine invoked after an
+               asynchronous event has occurred on the queue pair.
+
+       ph_qp
+               [out] Upon successful completion of this call, this references a
+               handle to the newly created queue pair.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The queue pair was successfully created.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain to associate with the queue pair was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the queue pair attributes or handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to create the queue pair.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to create the queue pair.
+
+       IB_INVALID_CQ_HANDLE
+               The send or receive completion queue to associate with the queue pair
+               was invalid.
+
+       IB_INVALID_SRQ_HANDLE
+               The shared receive queue to be associated with the queue pair
+               was invalid.
+
+       IB_INVALID_SETTING
+               The specified queue pair creation attributes are invalid.
+
+       IB_UNSUPPORTED
+               The specified queue pair type was not supported by the channel adapter.
+
+       IB_INVALID_MAX_WRS
+               The requested maximum send or receive work request depth could not be
+               supported.
+
+       IB_INVALID_MAX_SGE
+               The requested maximum number of scatter-gather entries for the send or
+               receive queue could not be supported.
+
+

NOTES

+
       1. This routine allocates a queue pair with the specified attributes.  If
+       the queue pair cannot be allocated, an error is returned.  When creating
+       the queue pair, users associate a context with the queue pair.  This
+       context is returned to the user through the asynchronous event callback
+       if an event occurs.
+
+       2. For QPs that are associated with an SRQ, the Consumer should take
+       the QP through the Error State before invoking a Destroy QP or a Modify
+       QP to the Reset State. The Consumer may invoke the Destroy QP without
+       first performing a Modify QP to the Error State and waiting for the Affiliated 
+       Asynchronous Last WQE Reached Event. However, if the Consumer
+       does not wait for the Affiliated Asynchronous Last WQE Reached Event,
+       then WQE and Data Segment leakage may occur.
+
+       3. This routine is used to create queue pairs of type:
+               IB_QPT_RELIABLE_CONN
+               IB_QPT_UNRELIABLE_CONN
+               IB_QPT_UNRELIABLE_DGRM
+               IB_QPT_MAD
+
+       4. Callers of ib_create_qp should call ib_init_dgrm_svc if the queue pair
+       is of type IB_QPT_UNRELIABLE_DGRM or IB_QPT_MAD before sending or
+       receiving data.  IB_QPT_RELIABLE_CONN, IB_QPT_UNRELIABLE_CONN type
+       queue pairs should be used by the connection establishment process
+       before data may be sent or received on the QP.
+
+       This call does not return the QP attributes as MAD QPs do not support
+       such an operation.  This is a minor specification deviation.
+
+

SEE ALSO

+
       ib_query_qp, ib_modify_qp, ib_destroy_qp, ib_cm_req, ib_cm_rep, ib_cm_rtu
+       ib_init_dgrm_svc, ib_qp_create_t, ib_pfn_event_cb_t, ib_qp_attr_t
+
+
+
+ +

[Functions] +Access Layer/ib_create_srq

+ +

[top][parent][index]

+

NAME

+
       ib_create_srq
+
+

DESCRIPTION

+
       Creates a shared receive queue and returns its handle to the user.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_create_srq(
+        IN              const   ib_pd_handle_t                  h_pd,
+        IN              const   ib_srq_attr_t* const            p_srq_attr,
+        IN              const   void* const                             srq_context,
+        IN              const   ib_pfn_event_cb_t                       pfn_srq_event_cb OPTIONAL,
+                OUT             ib_srq_handle_t* const          ph_srq );
+
+

PARAMETERS

+
       h_pd
+               [in] This is a handle to a protection domain associated with the shared queue
+               pair.
+
+       p_srq_attr
+               [in] Attributes necessary to allocate and initialize a shared receive queue.
+
+       srq_context
+               [in] A user-specified context information associated with the shared
+               receive queue.
+
+       pfn_qp_event_cb
+               [in] User-specified error callback routine invoked after an
+               asynchronous event has occurred on the shared receive queue.
+
+       ph_srq
+               [out] Upon successful completion of this call, this references a
+               handle to the newly created shared receive queue.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The receive queue was successfully created.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain to associate with the shared receive queue was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the shared receive queue attributes or handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to create the shared receive queue.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to create the shared receive queue.
+
+       IB_INVALID_SETTING
+               The specified shared receive queue creation attributes are invalid.
+
+       IB_INVALID_MAX_WRS
+               The requested maximum send or receive work request depth could not be
+               supported.
+
+       IB_INVALID_MAX_SGE
+               The requested maximum number of scatter-gather entries for the send or
+               receive queue could not be supported.
+
+

NOTES

+
       This routine allocates a shared receive queue with the specified attributes.  If
+       the shared receive queue cannot be allocated, an error is returned.  When creating
+       the shared receive queue, users associate a context with the shared receive queue.  This
+       context is returned to the user through the asynchronous event callback
+       if an event occurs.
+
+       This routine is used to create receive queues, which work with QPs of type:
+
+       IB_QPT_RELIABLE_CONN
+       IB_QPT_UNRELIABLE_CONN
+       IB_QPT_UNRELIABLE_DGRM
+
+

SEE ALSO

+
       ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,
+       ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t
+
+
+
+ +

[Functions] +Access Layer/ib_dealloc_pd

+ +

[top][parent][index]

+

NAME

+
       ib_dealloc_pd
+
+

DESCRIPTION

+
       Deallocates a protection domain.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_dealloc_pd(
+        IN              const   ib_pd_handle_t                          h_pd,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_pd
+               [in] A handle to an allocated protection domain.
+
+       pfn_destroy_cb
+               [in] A user-specified callback that is invoked after the protection
+               domain has been successfully destroyed.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The operation was successful.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+

NOTES

+
       This call deallocates a protection domain and releases all associated
+       resources, including queue pairs and registered memory regions.  Since
+       callbacks may be outstanding against one of protection domain's related
+       resources at the time the deallocation call is invoked, this call operates
+       asynchronously.  The user will be notified through a callback once the
+       deallocation call completes, indicating that no additional callbacks
+       will be invoked for a related resource.
+
+

SEE ALSO

+
       ib_alloc_pd
+
+
+
+ +

[Functions] +Access Layer/ib_dereg_mad_pool

+ +

[top][parent][index]

+

NAME

+
       ib_dereg_mad_pool
+
+

DESCRIPTION

+
       Deregisters a MAD pool from a protection domain.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_dereg_mad_pool(
+        IN              const   ib_pool_key_t                           pool_key );
+
+

PARAMETERS

+
       pool_key
+               [in] Key to the MAD pool to deregister.  The specified pool must
+               have been registered with a protection domain through a call to
+               ib_reg_mad_pool.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The MAD pool was successfully deregistered from the protection domain.
+
+       IB_INVALID_PARAMETER
+               The MAD pool key was invalid.
+
+       IB_RESOURCE_BUSY
+               One or more MAD elements were removed from the MAD pool using the
+               specified pool key, and were not returned.
+
+

NOTES

+
       This function deregisters a MAD pool with a protection domain.  After
+       successful completion of this call, the MAD elements of the associated
+       pool are no longer usable on the protection domain.
+
+

SEE ALSO

+
       ib_create_mad_pool, ib_destroy_mad_pool, ib_reg_mad_pool
+
+
+
+ +

[Functions] +Access Layer/ib_dereg_mr

+ +

[top][parent][index]

+

NAME

+
       ib_dereg_mr
+
+

DESCRIPTION

+
       Deregisters a registered memory region.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_dereg_mr(
+        IN              const   ib_mr_handle_t                          h_mr );
+
+

PARAMETERS

+
       h_mr
+               [in] A handle to a registered memory region that will be unregistered.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory region was successfully deregistered.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_RESOURCE_BUSY
+               The memory region has memory windows bound to it.
+
+

NOTES

+
       This routine deregisters a memory region with a channel adapter.  The
+       region may be deregistered only if there are no memory  windows or
+       existing shared memory regions currently bound to the region.  Work
+       requests referencing this region when it is deregistered will fail
+       with a WRS_LOCAL_PROTECTION_ERR error.
+
+

SEE ALSO

+
       ib_reg_mem, ib_reg_phys, ib_reg_shared
+
+
+
+ +

[Functions] +Access Layer/ib_dereg_pnp

+ +

[top][parent][index]

+

NAME

+
       ib_dereg_pnp
+
+

DESCRIPTION

+
       Routine used to cancel notification of local events or I/O controller
+       assignments.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_dereg_pnp(
+        IN              const   ib_pnp_handle_t                         h_pnp,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_pnp
+               [in] A handle returned as a result of an ib_reg_pnp operation.
+
+       pfn_destroy_cb
+               [in] A user-specified callback that is invoked after the PnP
+               registration has been successfully deregistered.
+
+

NOTES

+
       This routine cancels a pending PnP operation.  To avoid a race condition
+       canceling a request at the same time a notification callback is in
+       progress, the cancel operation operates asynchronously.  For additional
+       details see ib_pfn_destroy_cb_t.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The PnP deregistration was initiated.
+
+       IB_INVALID_HANDLE
+               The PnP handle was invalid.
+
+

SEE ALSO

+
       ib_reg_pnp, ib_pfn_destroy_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_dereg_svc

+ +

[top][parent][index]

+

NAME

+
       ib_dereg_svc
+
+

DESCRIPTION

+
       Remove a service as being registered with the subnet administrator.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_dereg_svc(
+        IN              const   ib_reg_svc_handle_t                     h_reg_svc,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_reg_svc
+               [in] A handle to a registered service.
+
+       pfn_destroy_cb
+               [in] A user-specified callback that is invoked after the service
+               has been deregistered.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The service deregistration was initiated.
+
+       IB_INVALID_HANDLE
+               The registered service handle was invalid.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to perform the operation.
+
+

NOTES

+
       This routine deregisters a service with the subnet administrator.
+       To avoid a race condition deregistering a service at the same time
+       the registration completion callback is in progress, the deregister
+       operation operates asynchronously.  For additional details see
+       ib_pfn_destroy_cb_t.
+
+

SEE ALSO

+
       ib_reg_svc, ib_pfn_destroy_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_destroy_av

+ +

[top][parent][index]

+

NAME

+
       ib_destroy_av
+
+

DESCRIPTION

+
       Destroys an existing address vector.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_destroy_av(
+        IN              const   ib_av_handle_t                          h_av );
+
+

PARAMETERS

+
       h_av
+               [in] A handle to an existing address vector.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The address vector was successfully destroyed.
+
+       IB_INVALID_AV_HANDLE
+               The address vector handle was invalid.
+
+

NOTES

+
       This routine destroys an existing address vector.
+
+

SEE ALSO

+
       ib_create_av
+
+
+
+ +

[Functions] +Access Layer/ib_destroy_cq

+ +

[top][parent][index]

+

NAME

+
       ib_destroy_cq
+
+

DESCRIPTION

+
       Destroys a completion queue.  Once destroyed, no further access to the
+       completion queue is possible.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_destroy_cq(
+        IN              const   ib_cq_handle_t                          h_cq,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_qp
+               [in] A handle to an existing completion queue.
+
+       pfn_destroy_cb
+               [in] A user-provided callback that is invoked after the
+               completion queue has been successfully destroyed.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The destroy request was registered.
+
+       IB_INVALID_CQ_HANDLE
+               The completion queue handle was invalid.
+
+

NOTES

+
       This call destroys an existing completion queue.  Since callbacks may be
+       outstanding against the completion queue at the time the destroy operation
+       is invoked, the this call operates asynchronously.  The user will be
+       notified through a callback once the destroy operation completes,
+       indicating that no additional callbacks will be invoked for the specified
+       completion queue.
+
+       If there are still queue pairs associated with the completion queue when
+       this function is invoked, the destroy operation will fail with status
+       IB_RESOURCE_BUSY.
+
+

SEE ALSO

+
       ib_create_cq, ib_pfn_destroy_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_destroy_ioc

+ +

[top][parent][index]

+

NAME

+
       ib_destroy_ioc
+
+

DESCRIPTION

+
       Destroys an instance of an I/O controller.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_destroy_ioc(
+        IN              const   ib_ioc_handle_t                         h_ioc );
+
+

PARAMETERS

+
       h_ioc
+               [in] A handle to an existing I/O controller.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The I/O controller was successfully destroyed.
+
+       IB_INVALID_HANDLE
+               The I/O controller handle was invalid.
+
+

NOTES

+
       Once an I/O controller is destroyed, it is no longer reported by the
+       local device manager as an exported device.  This routine automatically
+       removes all services associated with the controller.
+
+

SEE ALSO

+
       ib_create_ioc
+
+
+
+ +

[Functions] +Access Layer/ib_destroy_mad_pool

+ +

[top][parent][index]

+

NAME

+
       ib_destroy_mad_pool
+
+

DESCRIPTION

+
       Destroys a MAD pool and all associated resources.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_destroy_mad_pool(
+        IN              const   ib_pool_handle_t                        h_pool );
+
+

PARAMETERS

+
       h_pool
+               [in] A handle to a MAD pool allocated through the ib_create_mad_pool
+               routine.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The MAD pool was successfully destroyed.
+
+       IB_INVALID_HANDLE
+               The MAD pool handle was invalid.
+
+       IB_RESOURCE_BUSY
+               One or more MAD elements have not been returned to the MAD pool.
+
+

NOTES

+
       This call destroys a MAD pool and all resources allocated by the pool.
+
+

SEE ALSO

+
       ib_create_mad_pool, ib_get_mad, ib_put_mad
+
+
+
+ +

[Functions] +Access Layer/ib_destroy_mw

+ +

[top][parent][index]

+

NAME

+
       ib_destroy_mw
+
+

DESCRIPTION

+
       Destroys a memory window.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_destroy_mw(
+        IN              const   ib_mw_handle_t                          h_mw );
+
+

PARAMETERS

+
       h_mw
+               [in] A handle to an existing memory window.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory window was successfully destroyed.
+
+       IB_INVALID_MW_HANDLE
+               The memory window handle was invalid.
+
+

NOTES

+
       This routine deallocates a window entry created via a ib_create_mw.
+       Once this operation is complete, future accesses to the window will fail.
+
+

SEE ALSO

+
       ib_create_mw
+
+
+
+ +

[Functions] +Access Layer/ib_destroy_qp

+ +

[top][parent][index]

+

NAME

+
       ib_destroy_qp
+
+

DESCRIPTION

+
       Release a queue pair.  Once destroyed, no further access to this
+       queue pair is possible.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_destroy_qp(
+        IN              const   ib_qp_handle_t                          h_qp,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_qp
+               [in] A handle to an existing queue pair.
+
+       pfn_destroy_cb
+               [in] A user-specified callback that is invoked after the queue pair
+               has been successfully destroyed.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The destroy request was registered.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle was invalid.
+
+

NOTES

+
       This call destroys an existing queue pair.  Since callbacks may be
+       outstanding against the queue pair at the time the destroy operation is
+       invoked, the this call operates asynchronously.  The user will be notified
+       through a callback once the destroy operation completes, indicating that
+       no additional callbacks will be invoked for the specified queue pair.
+
+

SEE ALSO

+
       ib_create_qp
+
+
+
+ +

[Functions] +Access Layer/ib_destroy_srq

+ +

[top][parent][index]

+

NAME

+
       ib_destroy_srq
+
+

DESCRIPTION

+
       Release a shared receive queue.  Once destroyed, no further access to this
+       shared receive queue is possible.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_destroy_srq(
+        IN              const   ib_srq_handle_t                         h_srq,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_srq
+               [in] A handle to an existing shared shared receive queue.
+
+       pfn_destroy_cb
+               [in] A user-specified callback that is invoked after the shared receive queue
+               has been successfully destroyed.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The destroy request was registered.
+
+       IB_INVALID_SRQ_HANDLE
+               The shared receive queue handle was invalid.
+
+       IB_RESOURCE_BUSY
+               There are QPs, bound to the shared receive queue
+
+

NOTES

+
       This call destroys an existing shared receive queue.  Since callbacks may be
+       outstanding against the shared receive queue at the time the destroy operation is
+       invoked, then this call operates asynchronously.  The user will be notified
+       through a callback once the destroy operation completes, indicating that
+       no additional callbacks will be invoked for the specified shared receive queue.
+
+

SEE ALSO

+
       ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,
+       ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t
+
+
+
+ +

[Definitions] +Access Layer/ib_device_attr_mask_t

+ +

[top][parent][index]

+

NAME

+
       ib_device_attr_mask_t
+
+

DESCRIPTION

+
       Used to specify desired attributes of a device or port.
+
+

SYNOPSIS

+
#define         IB_DEV_PORT_ACTIVE              0x1
+
+

VALUES

+
       IB_DEV_PORT_ACTIVE
+               Specifies that a port state should be active.  Applies only to port
+               GUIDs.
+
+

SEE ALSO

+
       ib_get_guid
+
+
+
+ +

[Structures] +Access Layer/ib_dgrm_info_t

+ +

[top][parent][index]

+

NAME

+
       ib_dgrm_info_t
+
+

DESCRIPTION

+
       Information specified when initializing a datagram queue pair before its
+       first use.
+
+

SYNOPSIS

+
typedef struct _ib_dgrm_info
+{
+        ib_net64_t                                      port_guid;
+        uint32_t                                        qkey;
+        uint16_t                                        pkey_index;
+
+}       ib_dgrm_info_t;
+
+

FIELDS

+
       port_guid
+               Specifies the port that the datagram service will use.  This field
+               applies only to IB_QPT_UNRELIABLE_DGRM and IB_QPT_MAD QP types.
+
+       qkey
+               Specifies the qkey that the queue pair will use.  Incoming messages
+               must have a matching qkey for the message to be accepted by the
+               receiving QP.
+
+       pkey_index
+               Specifies the pkey associated with this queue pair.
+
+

SEE ALSO

+
       ib_init_dgrm_svc
+
+
+
+ +

[Structures] +Access Layer/ib_drep_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_drep_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a reply to a disconnection request.
+
+

SYNOPSIS

+
typedef union _ib_drep_pdata
+{
+        uint8_t                                         data[IB_DREP_PDATA_SIZE];
+
+}       ib_drep_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Structures] +Access Layer/ib_dreq_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_dreq_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a disconnection request.
+
+

SYNOPSIS

+
typedef union _ib_dreq_pdata
+{
+        uint8_t                                         data[IB_DREQ_PDATA_SIZE];
+
+}       ib_dreq_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Functions] +Access Layer/ib_force_apm

+ +

[top][parent][index]

+

NAME

+
       ib_force_apm
+
+

DESCRIPTION

+
       This routine indicates that a queue pair should immediately migrate to its
+       alternate path.  All future data transfers will occur over the new path.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_force_apm(
+        IN              const   ib_qp_handle_t                          h_qp );
+
+

PARAMETERS

+
       h_qp
+               [in] A handle to the queue pair to migrate.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The queue pair or end-to-end context was successfully modified.
+
+       IB_INVALID_PARAMETER
+               Neither or both of the queue pair or the end-to-end context handles
+               were valid.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle was invalid.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to register the modify the queue pair or end-to-end context.
+
+       IB_UNSUPPORTED
+               The requested modification was not supported.
+
+       IB_INVALID_QP_STATE
+               The queue pair was in an invalid state for the requested operation.
+
+

NOTES

+
       For this routine to operate correctly, the specified queue pair must have
+       an existing alternate path loaded.  If an alternate path is not loaded, or
+       has not yet been armed, this call will fail.
+
+       Use of this call results in additional data transfers that occur on the
+       given queue pair using the alternate path.  Once this call completes, a
+       new alternate path may be loaded using the ib_cm_lap call.
+
+

SEE ALSO

+
       ib_cm_lap
+
+
+
+ +

[Functions] +Access Layer/ib_get_ca_by_gid

+ +

[top][parent][index]

+

NAME

+
       ib_get_ca_by_gid
+
+

DESCRIPTION

+
       Returns the GUID of a channel adapter contain the given port GID.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_get_ca_by_gid(
+        IN                              ib_al_handle_t                          h_al,
+        IN              const   ib_gid_t* const                         p_gid,
+                OUT                     ib_net64_t* const                       p_ca_guid );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an opened instance of the access layer.
+
+       p_gid
+               [in] A port GID.
+
+       p_ca_guid
+               [out] A GUID to the CA that contains the port matching the user-
+               specified GID.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The operation was successful.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the port GID or CA GUID was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+       IB_NOT_FOUND
+               No channel adapters in the system contain the specifed port GID.
+
+

NOTES

+
       This routine returns a CA GUID that contains the user-specified port GID.
+       If no channel adapters in the system contain the port GID, the call will
+       return IB_NOT_FOUND.
+
+

SEE ALSO

+
       ib_open_al, ib_open_ca, ib_get_ca_guids
+
+
+
+ +

[Functions] +Access Layer/ib_get_ca_guids

+ +

[top][parent][index]

+

NAME

+
       ib_get_ca_guids
+
+

DESCRIPTION

+
       Returns a list of GUIDS for all channel adapter currently available in
+       the system.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_get_ca_guids(
+        IN                              ib_al_handle_t                          h_al,
+                OUT                     ib_net64_t* const                       p_guid_array OPTIONAL,
+        IN      OUT                     size_t* const                           p_guid_cnt );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an opened instance of the access layer.
+
+       p_guid_array
+               [out] An array of GUIDs provided by the user and filled out by the
+               access layer.  If this parameter is NULL, the access layer will return
+               the number of entries in the array necessary to retrieve the GUID list.
+
+       p_guid_cnt
+               [in/out] On input, this specifies the number of entries in the
+               GUID array.
+
+               On output, the access layer will set this to the number of valid
+               entries in the p_guid_array or the minimum number of entries needed
+               in the GUID array in order to return all channel adapter GUIDs.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The operation was successful.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the GUID count was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+

NOTES

+
       This routine returns a list of GUIDs for all available channel adapters.
+       When called, the access layer will examine p_guid_cnt to determine the
+       number of entries available in the p_guid_array.  If the count is too
+       small, the function will return IB_INSUFFICIENT_MEMORY, and set p_guid_cnt
+       to the number of needed entries.
+
+

SEE ALSO

+
       ib_open_al, ib_open_ca
+
+
+
+ +

[Functions] +Access Layer/ib_get_guid

+ +

[top][parent][index]

+

NAME

+
       ib_get_guid
+
+

DESCRIPTION

+
       Returns a GUID for a device or port that matches the user-specified
+       attributes.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_get_guid(
+        IN                              ib_al_handle_t                          h_al,
+        IN              const   uint32_t                                        index,
+        IN              const   ib_pnp_class_t                          device_type,
+        IN              const   uint64_t                                        attr_mask,
+                OUT                     ib_net64_t* const                       p_guid );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an opened instance of the access layer.
+
+       index
+               [in] Specifies the location of the device or port.  Users specify this
+               value to iterate through all devices or ports on the system.  If set
+               to IB_ANY_INDEX, then the first device or port matching the given
+               attributes will be returned.
+
+       device_type
+               [in] Indicates the type of device to retrieve the GUID for.
+
+       attr_mask
+               [in] Specifies a set of attributes that the given device or port
+               must have for a successful match to occur.
+
+       p_guid
+               [out] On successful return, this parameter will reference the GUID
+               of the device or port that contains the specified attributes.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The operation was successful.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_SETTING
+               The specified device type is invalid.
+
+       IB_INVALID_PARAMETER
+               No p_guid parameter was specified.
+
+       IB_NO_MATCH
+               The device or port at the specified index does not have the given
+               attributes.
+
+       IB_INVALID_INDEX
+               No device or port exists for the specified index.
+
+

NOTES

+
       This routine returns a GUID for a device or port that matches the
+       user-specified attributes.  If index is IB_ANY_INDEX, then the first
+       device or port matching the given attributes is returned if a match is
+       found.  If no match is found, the call will return IB_NO_MATCH.  If a
+       valid index is specified, then the device or port located at that index
+       will be examined to see if it has the given attributes.  If the device
+       or port with those attributes is found, its GUID is returned.
+
+       This routine may be used to locate a device or port with a given set
+       of attributes, or iterate through all devices or ports on the system.
+       The specified index values are set by the access layer, but the index
+       associated with a GUID may change if devices are removed from the system.
+
+

SEE ALSO

+
       ib_open_al, ib_pnp_class_t, ib_get_ca_guids, ib_query_ca_by_guid
+
+
+
+ +

[Functions] +Access Layer/ib_get_mad

+ +

[top][parent][index]

+

NAME

+
       ib_get_mad
+
+

DESCRIPTION

+
       Obtains a MAD element from the pool.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_get_mad(
+        IN              const   ib_pool_key_t                           pool_key,
+        IN              const   size_t                                          buf_size,
+                OUT                     ib_mad_element_t                        **pp_mad_element );
+
+

PARAMETERS

+
       pool_key
+               [in] Key for the pool to obtain a MAD element for the desired
+               protection domain.
+
+       buf_size
+               [in] The size of the buffer referenced by the MAD element.
+
+       pp_mad_element
+               [out] Upon successful completion of this call, this references
+               the returned MAD element.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The MAD element was successfully retrieved from the MAD pool.
+
+       IB_INVALID_PARAMETER
+               The MAD pool key was invalid or a reference to the MAD element
+               pointer was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to obtain the MAD element.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to grow and register the MAD pool.
+
+

NOTES

+
       This function obtains a MAD element containing a data segment
+       that references a data buffer for the given pool key.  The data buffer
+       referenced by the MAD element is zeroed before being returned to the
+       user.
+
+       It is recommended that elements retrieved from a MAD pool for use on
+       the receive queue of a MAD QP have a buffer size of 256 bytes.
+
+       For MADs being sent, buf_size should be set to the size of the relevant
+       data sent as part of the MAD, and should not include any padding needed
+       to make the MAD size a multiple of 256 bytes.  For most MADs, buf_size
+       may be set equal to the size of the MAD header plus the amount of user
+       data transfered as part of the MAD.
+
+

SEE ALSO

+
       ib_put_mad, ib_send_mad, ib_mad_element_t
+
+
+
+ +

[Functions] +Access Layer/ib_get_mad_buf

+ +

[top][parent][index]

+

NAME

+
       ib_get_mad_buf
+
+

DESCRIPTION

+
       Returns a pointer to the MAD buffer associated with a MAD element.
+
+

SYNOPSIS

+
#pragma warning(push)
+#pragma warning(disable: 4244 ) 
+AL_INLINE void* AL_API
+ib_get_mad_buf(
+        IN              const   ib_mad_element_t* const         p_mad_element )
+{
+        CL_ASSERT( p_mad_element );
+        return( p_mad_element->p_mad_buf );
+}
+#pragma warning (pop)
+
+

PARAMETERS

+
       p_mad_element
+               [in] A pointer to a MAD element.
+
+

NOTES

+
       Returns a pointer to the MAD buffer associated with a MAD element.
+
+

SEE ALSO

+
       ib_mad_element_t
+
+
+
+ +

[Functions] +Access Layer/ib_get_port_by_gid

+ +

[top][parent][index]

+

NAME

+
       ib_get_port_by_gid
+
+

DESCRIPTION

+
       Returns the GUID of a port that contains the given port GID.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_get_port_by_gid(
+        IN                              ib_al_handle_t                          h_al,
+        IN              const   ib_gid_t* const                         p_gid,
+                OUT                     ib_net64_t* const                       p_port_guid );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an opened instance of the access layer.
+
+       p_gid
+               [in] A port GID.
+
+       p_port_guid
+               [out] A GUID to the port that contains the matching user-
+               specified GID.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The operation was successful.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the port GID or port GUID was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+       IB_NOT_FOUND
+               No channel adapters in the system contain the specifed port GID.
+
+

NOTES

+
       This routine returns a port GUID that contains the user-specified port GID.
+       If no channel adapters in the system contain the port GID, the call will
+       return IB_NOT_FOUND.
+
+

SEE ALSO

+
       ib_open_al, ib_open_ca, ib_get_ca_guids
+
+
+
+ +

[Functions] +Access Layer/ib_get_query_node_rec

+ +

[top][parent][index]

+

NAME

+
       ib_get_query_node_rec
+
+

DESCRIPTION

+
       Retrieves a node record result from a MAD returned by a call to
+       ib_query().
+
+

SYNOPSIS

+
AL_INLINE ib_node_record_t* AL_API
+ib_get_query_node_rec(
+        IN                              ib_mad_element_t                        *p_result_mad,
+        IN                              uint32_t                                        result_index )
+{
+        ib_sa_mad_t             *p_sa_mad;
+
+        CL_ASSERT( p_result_mad );
+        p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad );
+        CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_NODE_RECORD );
+
+        return( (ib_node_record_t*)ib_get_query_result( p_result_mad,
+                result_index ) );
+}
+
+

PARAMETERS

+
       p_result_mad
+               [in] This is a reference to the MAD returned as a result of the
+               query.
+
+       result_index
+               [in] A zero-based index indicating which result to return.
+
+

NOTES

+
       This call returns a pointer to the start of a node record result from
+       a call to ib_query().
+
+

SEE ALSO

+
       ib_query_rec_t, ib_mad_element_t, ib_get_query_result, ib_node_record_t
+
+
+
+ +

[Functions] +Access Layer/ib_get_query_path_rec

+ +

[top][parent][index]

+

NAME

+
       ib_get_query_path_rec
+
+

DESCRIPTION

+
       Retrieves a path record result from a MAD returned by a call to
+       ib_query().
+
+

SYNOPSIS

+
AL_INLINE ib_path_rec_t* AL_API
+ib_get_query_path_rec(
+        IN                              ib_mad_element_t                        *p_result_mad,
+        IN                              uint32_t                                        result_index )
+{
+        ib_sa_mad_t             *p_sa_mad;
+
+        CL_ASSERT( p_result_mad );
+        p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad );
+        CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_PATH_RECORD );
+
+        return( (ib_path_rec_t*)ib_get_query_result( p_result_mad, result_index ) );
+}
+
+

PARAMETERS

+
       p_result_mad
+               [in] This is a reference to the MAD returned as a result of the
+               query.
+
+       result_index
+               [in] A zero-based index indicating which result to return.
+
+

NOTES

+
       This call returns a pointer to the start of a path record result from
+       a call to ib_query().
+
+

SEE ALSO

+
       ib_query_rec_t, ib_mad_element_t, ib_get_query_result, ib_path_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_get_query_portinfo_rec

+ +

[top][parent][index]

+

NAME

+
       ib_get_query_portinfo_rec
+
+

DESCRIPTION

+
       Retrieves a port info record result from a MAD returned by a call to
+       ib_query().
+
+

SYNOPSIS

+
AL_INLINE ib_portinfo_record_t* AL_API
+ib_get_query_portinfo_rec(
+        IN                              ib_mad_element_t                        *p_result_mad,
+        IN                              uint32_t                                        result_index )
+{
+        ib_sa_mad_t             *p_sa_mad;
+
+        CL_ASSERT( p_result_mad );
+        p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad );
+        CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_PORTINFO_RECORD );
+
+        return( (ib_portinfo_record_t*)ib_get_query_result( p_result_mad,
+                result_index ) );
+}
+
+

PARAMETERS

+
       p_result_mad
+               [in] This is a reference to the MAD returned as a result of the
+               query.
+
+       result_index
+               [in] A zero-based index indicating which result to return.
+
+

NOTES

+
       This call returns a pointer to the start of a port info record result from
+       a call to ib_query().
+
+

SEE ALSO

+
       ib_query_rec_t, ib_mad_element_t, ib_get_query_result, ib_portinfo_record_t
+
+
+
+ +

[Functions] +Access Layer/ib_get_query_result

+ +

[top][parent][index]

+

NAME

+
       ib_get_query_result
+
+

DESCRIPTION

+
       Retrieves a result structure from a MAD returned by a call to ib_query().
+
+

SYNOPSIS

+
AL_INLINE void* AL_API
+ib_get_query_result(
+        IN                              ib_mad_element_t                        *p_result_mad,
+        IN                              uint32_t                                        result_index )
+{
+        ib_sa_mad_t             *p_sa_mad;
+
+        CL_ASSERT( p_result_mad );
+        p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad );
+        CL_ASSERT( p_sa_mad );
+        CL_ASSERT( ib_get_attr_size( p_sa_mad->attr_offset ) * (result_index + 1) +
+                IB_SA_MAD_HDR_SIZE <= p_result_mad->size );
+
+        return( p_sa_mad->data +
+                (ib_get_attr_size( p_sa_mad->attr_offset ) * result_index) );
+}
+
+

PARAMETERS

+
       p_result_mad
+               [in] This is a reference to the MAD returned as a result of the
+               query.
+
+       result_index
+               [in] A zero-based index indicating which result to return.
+
+

NOTES

+
       This call returns a pointer to the start of a result structure from a call
+       to ib_query().  The type of result structure must be known to the user
+       either through the user's context or the query_type returned as part of
+       the ib_query_rec_t structure.
+
+

SEE ALSO

+
       ib_query_rec_t, ib_mad_element_t
+
+
+
+ +

[Functions] +Access Layer/ib_get_query_svc_rec

+ +

[top][parent][index]

+

NAME

+
       ib_get_query_svc_rec
+
+

DESCRIPTION

+
       Retrieves a service record result from a MAD returned by a call to
+       ib_query().
+
+

SYNOPSIS

+
AL_INLINE ib_service_record_t* AL_API
+ib_get_query_svc_rec(
+        IN                              ib_mad_element_t                        *p_result_mad,
+        IN                              uint32_t                                        result_index )
+{
+        ib_sa_mad_t             *p_sa_mad;
+
+        CL_ASSERT( p_result_mad );
+        p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad );
+        CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_SERVICE_RECORD );
+
+        return( (ib_service_record_t*)ib_get_query_result( p_result_mad,
+                result_index ) );
+}
+
+

PARAMETERS

+
       p_result_mad
+               [in] This is a reference to the MAD returned as a result of the
+               query.
+
+       result_index
+               [in] A zero-based index indicating which result to return.
+
+

NOTES

+
       This call returns a pointer to the start of a service record result from
+       a call to ib_query().
+
+

SEE ALSO

+
       ib_query_rec_t, ib_mad_element_t, ib_get_query_result, ib_service_record_t
+
+
+
+ +

[Functions] +Access Layer/ib_get_spl_qp

+ +

[top][parent][index]

+

NAME

+
       ib_get_spl_qp
+
+

DESCRIPTION

+
       Create a special QP or QP alias.  This call provides access to queue
+       pairs 0 and 1, and the raw queue pair types.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_get_spl_qp(
+        IN              const   ib_pd_handle_t                          h_pd,
+        IN              const   ib_net64_t                                      port_guid,
+        IN              const   ib_qp_create_t* const           p_qp_create,
+        IN              const   void* const                                     qp_context,
+        IN              const   ib_pfn_event_cb_t                       pfn_qp_event_cb OPTIONAL,
+                OUT                     ib_pool_key_t* const            p_pool_key OPTIONAL,
+                OUT                     ib_qp_handle_t* const           ph_qp );
+
+

PARAMETERS

+
       h_pd
+               [in] This is a handle to a protection domain associated with the queue
+               pair.  This must be a protection domain alias for aliased QP types.
+
+       port_guid
+               [in] The port GUID that the special QP will be associated with.
+
+       p_qp_create
+               [in] Attributes necessary to allocate and initialize the queue pair.
+
+       qp_context
+               [in] A user-specified context information associated with the
+               queue pair.
+
+       pfn_qp_ervent_cb
+               [in] User-specified error callback routine invoked after an
+               asynchronous event has occurred on the queue pair.
+
+       p_pool_key
+               [in] A key to a pool of MAD elements that are used to send MADs.
+               This key is only valid for aliased QP types.
+
+       ph_qp
+               [out] Upon successful completion of this call, this references a
+               handle to the newly created queue pair.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The queue pair was successfully created.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain to associate with the queue pair was invalid.
+
+       IB_INVALID_PORT
+               The port number supplied was invalid for the given channel adapter.
+
+       IB_INVALID_PARAMETER
+               A reference to the queue pair attributes or handle was not provided.
+
+       IB_INVALID_PERMISSION
+               The calling process does not have sufficient privilege to create the
+               requested queue pair type.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to create the queue pair.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to create the queue pair.
+
+       IB_INVALID_CQ_HANDLE
+               The send or receive completion queue to associate with the queue pair
+               was invalid.
+
+       IB_INVALID_SETTING
+               The specified queue pair type was invalid.
+
+       IB_UNSUPPORTED
+               The specified queue pair type was not supported by the channel adapter.
+
+       IB_INVALID_MAX_WRS
+               The requested maximum send or receive work request depth could not be
+               supported.
+
+       IB_INVALID_MAX_SGE
+               The requested maximum number of scatter-gather entries for the send or
+               receive queue could not be supported.
+
+

NOTES

+
       This routine allocates a queue pair with the specified attributes.  If
+       the queue pair cannot be allocated, an error is returned.  When creating
+       the queue pair, users associate a context with the queue pair.  This
+       context is returned to the user through the asynchronous event callback
+       if an event occurs.
+
+       This routine is used to create queue pairs of type:
+
+       IB_QPT_QP0
+       IB_QPT_QP1
+       IB_QPT_RAW_IPV6
+       IB_QPT_RAW_ETHER
+       IB_QPT_QP0_ALIAS
+       IB_QPT_QP1_ALIAS
+
+       Callers of ib_get_spl_qp should call ib_init_dgrm_svc if the queue pair is
+       of type IB_QPT_QP0, IB_QPT_QP1, IB_QPT_RAW_IPV6, IB_QPT_RAW_ETHER before
+       sending or receiving data.  MADs may be sent on aliased QPs on the
+       successful return of this routine.
+
+

SEE ALSO

+
       ib_query_qp, ib_modify_qp, ib_destroy_qp, ib_get_mad
+       ib_init_dgrm_svc, ib_qp_create_t, ib_pfn_event_cb_t, ib_qp_attr_t
+
+
+
+ +

[Structures] +Access Layer/ib_gid_pair_t

+ +

[top][parent][index]

+

NAME

+
       ib_gid_pair_t
+
+

DESCRIPTION

+
       Source and destination GIDs.
+
+

SYNOPSIS

+
typedef struct _ib_gid_pair
+{
+        ib_gid_t                                        src_gid;
+        ib_gid_t                                        dest_gid;
+
+}       ib_gid_pair_t;
+
+

FIELDS

+
       src_gid
+               Source GID of a path.
+
+       dest_gid
+               Destination GID of a path.
+
+

NOTES

+
       This structure is used to describe the endpoints of a path.
+
+

SEE ALSO

+
       ib_gid_t
+
+
+
+ +

[Structures] +Access Layer/ib_guid_pair_t

+ +

[top][parent][index]

+

NAME

+
       ib_guid_pair_t
+
+

DESCRIPTION

+
       Source and destination GUIDs.  These may be port or channel adapter
+       GUIDs, depending on the context in which this structure is used.
+
+

SYNOPSIS

+
typedef struct _ib_guid_pair
+{
+        ib_net64_t                                      src_guid;
+        ib_net64_t                                      dest_guid;
+
+}       ib_guid_pair_t;
+
+

FIELDS

+
       src_guid
+               Source GUID of a path.
+
+       dest_guid
+               Destination GUID of a path.
+
+

NOTES

+
       This structure is used to describe the endpoints of a path.  The given
+       GUID pair may belong to either ports or channel adapters.
+
+

SEE ALSO

+
       ib_guid_t
+
+
+
+ +

[Functions] +Access Layer/ib_init_dgrm_svc

+ +

[top][parent][index]

+

NAME

+
       ib_init_dgrm_svc
+
+

DESCRIPTION

+
       Initializes a datagram queue pair for use.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_init_dgrm_svc(
+        IN              const   ib_qp_handle_t                          h_qp,
+        IN              const   ib_dgrm_info_t* const           p_dgrm_info OPTIONAL );
+
+

PARAMETERS

+
       h_qp
+               [in] A handle to an existing queue pair.
+
+       p_dgrm_info
+               [in] References information needed to configure the queue pair for
+               use sending and receiving datagrams.  This field is optional for
+               IB_QPT_QP0, IB_QPT_QP1 queue pair types and is not used for
+               IB_QPT_RAW_IPV6, and IB_QPT_RAW_ETHER queue pair types.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The datagram queue pair was initialized successfully.
+
+       IB_INVALID_QP_HANDLE
+               The datagram queue pair handle was invalid.
+
+       IB_INVALID_PARAMETER
+               The queue pair handle was not created as a datagram queue pair type
+               or a reference to the datagram service information was not provided.
+
+       IB_INVALID_QP_STATE
+               The queue pair was in an invalid state for the requested operation.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to initialize the datagram queue pair.
+
+

NOTES

+
       This call binds the queue pair to a given port and transitions its state
+       to ready to send and receive data.  A queue pair must be initialized
+       before it can be used to send and receive datagrams.
+
+       This routine is used to initialize queue pairs of type:
+
+       IB_QPT_QP0
+       IB_QPT_QP1
+       IB_QPT_MAD
+       IB_QPT_RAW_IPV6
+       IB_QPT_RAW_ETHER
+       IB_QPT_UNRELIABLE_DGRM
+
+       For IB_QPT_MAD type queue pairs, receive buffers are automatically posted
+       by the access layer, however, users must call ib_reg_mad_svc to receive
+       MADs.  Received MAD elements must be returned to the access layer through
+       the ib_put_mad() call.
+
+

SEE ALSO

+
       ib_create_qp, ib_get_spl_qp, ib_dgrm_info_t, ib_reg_mad_svc
+
+
+
+ +

[Functions] +Access Layer/ib_join_mcast

+ +

[top][parent][index]

+

NAME

+
       ib_join_mcast
+
+

DESCRIPTION

+
       Attaches a queue pair to a multicast group.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_join_mcast(
+        IN              const   ib_qp_handle_t                          h_qp,
+        IN              const   ib_mcast_req_t* const           p_mcast_req );
+
+

PARAMETERS

+
       h_qp
+               [in] A handle to an unreliable datagram queue pair that will join the
+               multicast group.
+
+       p_mcast_req
+               [in] Specifies the multicast group to join.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The join multicast group request has been initiated.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the multicast group request information was not
+               provided.
+
+       IB_INVALID_SERVICE_TYPE
+               The queue pair configuration does not support this type of service.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to join the multicast group.
+
+       IB_INVALID_GUID
+               No port was found for the port_guid specified in the request.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to perform the operation.
+
+       IB_INVALID_PKEY
+               The pkey specified in the multicast join request does not match the
+               pkey of the queue pair.
+
+       IB_INVALID_PORT
+               The port GUID specified in the multicast join request does not match
+               the port of the queue pair.
+
+       IB_ERROR
+               An error occurred while performing the multicast group join operation.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available to complete
+               the request.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to complete the request.
+
+

NOTES

+
       This routine results in the specified queue pair joining a multicast
+       group.  If the multicast group does not already exist, it will be created
+       at the user's option.  Information about the multicast group is returned
+       to the user through a callback specified through the p_mcast_req
+       parameter.
+
+       If the specified queue pair is already a member of a multicast group when
+       this call is invoked, an error will occur if there are conflicting
+       membership requirements.  The QP is restricted to being bound to a single
+       port_guid and using a single pkey.
+
+

SEE ALSO

+
       ib_leave_mcast, ib_mcast_req_t, ib_create_qp, ib_init_dgrm_svc
+
+
+
+ +

[Structures] +Access Layer/ib_lap_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_lap_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a load alternate path message.
+
+

SYNOPSIS

+
typedef union _ib_lap_pdata
+{
+        uint8_t                                         data[IB_LAP_PDATA_SIZE];
+
+}       ib_lap_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Functions] +Access Layer/ib_leave_mcast

+ +

[top][parent][index]

+

NAME

+
       ib_leave_mcast
+
+

DESCRIPTION

+
       Removes a queue pair from a multicast group.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_leave_mcast(
+        IN              const   ib_mcast_handle_t                       h_mcast,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_mcast
+               [in] A handle to a joined multicast group.
+
+       pfn_destroy_cb
+               [in] An optional user-specified callback that is invoked after the
+               leave request has completed.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The leave multicast group request has been initiated.
+
+       IB_INVALID_MCAST_HANDLE
+               The multicast group handle was invalid.
+
+       IB_ERROR
+               An error occurred while performing the multicast group leave operation.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to perform the operation.
+
+

NOTES

+
       This routine detaches a queue pair from a multicast group and removes
+       it as a member of the group with the subnet administrator.
+
+

SEE ALSO

+
       ib_join_mcast, ib_pfn_destroy_cb_t
+
+
+
+ +

[Structures] +Access Layer/ib_lid_pair_t

+ +

[top][parent][index]

+

NAME

+
       ib_lid_pair_t
+
+

DESCRIPTION

+
       Source and destination LIDs.
+
+

SYNOPSIS

+
typedef struct _ib_lid_pair
+{
+        ib_net16_t                                      src_lid;
+        ib_net16_t                                      dest_lid;
+
+}       ib_lid_pair_t;
+
+

FIELDS

+
       src_lid
+               Source LID of a path.
+
+       dest_lid
+               Destination LID of a path.
+
+

NOTES

+
       This structure is used to describe the endpoints of a path.
+
+
+
+ +

[Structures] +Access Layer/ib_listen_err_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_listen_err_rec_t
+
+

DESCRIPTION

+
       Information returned to the user when an error occurs on a listen request.
+
+

SYNOPSIS

+
typedef struct _ib_listen_err_rec
+{
+        void* __ptr64                                                           listen_context;
+        ib_api_status_t                                                         reason;
+        ib_listen_handle_t                                                      h_cm_listen;
+
+}       ib_listen_err_rec_t;
+
+

FIELDS

+
       listen_context
+               User-defined context information associated with the listen request
+               through the ib_cm_listen call.
+
+       reason
+               A status that identifies the reason for error being reported.
+
+       h_cm_listen
+               The handle for the listen request.  This handle will match the handle
+               returned by ib_cm_listen call.  It is provided in case an error event
+               occurs before a client's call to ib_cm_listen can return.
+
+

SEE ALSO

+
       ib_pfn_listen_err_cb_t, ib_api_status_t
+
+
+
+ +

[Definitions] +Access Layer/ib_listen_info_t

+ +

[top][parent][index]

+

NAME

+
       ib_listen_info_t
+
+

DESCRIPTION

+
       Constants used to specify directed listen requests.
+
+

SYNOPSIS

+
#define IB_ALL_CAS                                              0
+#define IB_ALL_PORTS                                    0
+#define IB_ALL_LIDS                                             0
+#define IB_ALL_PKEYS                                    0
+
+

SEE ALSO

+
       ib_cm_listen, ib_cm_listen_t
+
+
+
+ +

[Functions] +Access Layer/ib_local_mad

+ +

[top][parent][index]

+

NAME

+
       ib_local_mad
+
+

DESCRIPTION

+
       Request that a locally received MAD be processed by the channel adapter
+       on which it was received.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_local_mad(
+        IN              const   ib_ca_handle_t                          h_ca,
+        IN              const   uint8_t                                         port_num,
+        IN              const   void* const                                     p_mad_in,
+                OUT                     void*                                           p_mad_out );
+
+

PARAMETERS

+
       h_ca
+               [in] A handle to the channel adapter that should process the MAD.
+               This must be the same adapter that the MAD was received on.
+
+       port_num
+               [in] The port number to which this request is directed.
+
+       p_mad_in
+               [in] Pointer to a management datagram (MAD) structure containing
+               the command to be processed.
+
+       p_mad_out
+               [out] References a MAD that should contain the response to the
+               received input MAD specified through the p_mad_in parameter.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The local MAD was processed successfully.
+
+       IB_INVALID_CA_HANDLE
+               The channel adapter handle was invalid.
+
+       IB_INVALID_PORT
+               The port number was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the MAD input or MAD output buffer was not provided.
+
+

NOTES

+
       This call is provided to support SMA and GSA implementations above the
+       verbs interface on ports that the access layer has disabled.  This routine
+       is used to perform local operations by the channel adapter.  On successful
+       return, the provide output MAD should be used when sending a response.
+
+

SEE ALSO

+
       ib_query_ca, ib_ca_attr_t
+
+
+
+ +

[Structures] +Access Layer/ib_mad_element_t

+ +

[top][parent][index]

+

NAME

+
       ib_mad_element_t
+
+

DESCRIPTION

+
       Information used to submit a work request to a management datagram (MAD)
+       queue pair.
+
+

SYNOPSIS

+
typedef struct _ib_mad_element
+{
+        struct _ib_mad_element* __ptr64 p_next;
+        const void* __ptr64                     context1;
+        const void* __ptr64                     context2;
+
+        /* Request/completion data. */
+        ib_mad_t* __ptr64                       p_mad_buf;
+        uint32_t                                        size;
+        uint32_t                                        immediate_data;
+        ib_net32_t                                      remote_qp;
+
+        /* Send request information. */
+        ib_av_handle_t                          h_av;
+        ib_send_opt_t                           send_opt;
+        ib_net32_t                                      remote_qkey;
+        boolean_t                                       resp_expected;
+        uint32_t                                        timeout_ms;
+        uint32_t                                        retry_cnt;
+        uint8_t                                         rmpp_version;
+
+        /* Completion information. */
+        ib_wc_status_t                          status;
+        boolean_t                                       grh_valid;
+        ib_grh_t* __ptr64                       p_grh;
+
+        /* Completed receive data or send request information if h_av is NULL. */
+        uint32_t                                        recv_opt;
+        ib_net16_t                                      remote_lid;
+        uint8_t                                         remote_sl;
+        uint16_t                                        pkey_index;
+        uint8_t                                         path_bits;
+
+        /* Transaction completion data. */
+        void* __ptr64                           send_context1;
+        void* __ptr64                           send_context2;
+
+}       ib_mad_element_t;
+
+

FIELDS

+
       p_next
+               A pointer used to chain MAD elements together.  This value is
+               set to NULL to mark the end of the chain.
+
+       context1
+               User-defined context information associated with the datagram.
+
+       context2
+               User-defined context information associated with the datagram.
+
+       p_buffer
+               The local data buffer contain the MAD.
+
+       size
+               The size of the MAD referenced by p_buffer.
+
+       immediate_data
+               32-bit field sent or received as part of a datagram message.
+               This field is valid for send operations if the send_opt
+               IB_SEND_OPT_IMMEDIATE flag has been set.  This field is valid
+               on received datagram completions if the recv_opt
+               IB_RECV_OPT_IMMEDIATE flag is set.
+
+       remote_qp
+               Identifies the destination queue pair of a datagram send operation or
+               the source queue pair of a received datagram.
+
+       h_av
+               An address vector that specifies the path information used to route
+               the outbound datagram to the destination queue pair.  This handle may
+               be NULL when sending a directed route SMP or if the access layer
+               should create the address vector for the user.
+
+       send_opt
+               Optional send control parameters.  The following options are valid:
+               IB_SEND_OPT_IMMEDIATE and IB_SEND_OPT_SOLICITED.  IB_SEND_OPT_FENCE
+               is only valid on MAD QPs.
+
+       remote_qkey
+               The qkey for the destination queue pair.
+
+       resp_expected
+               This field is used to indicate that the submitted operation expects
+               a response.  When set, the access layer will retry this send operation
+               until the corresponding response is successfully received, or the
+               request times out.  Send operations for which a response is expected
+               will always be completed by the access layer before the corresponding
+               received response.
+
+       timeout_ms
+               Specifies the number of milliseconds to wait for a response to
+               a request until retrying or timing out the request.  This field is
+               ignored if resp_expected is set to FALSE.
+
+       retry_cnt
+               Specifies the number of times that the request will be retried
+               before failing the request.  This field is ignored if resp_expected
+               is set to FALSE.
+
+       rmpp_version
+               Indicates the version of the RMPP protocol to use when sending this
+               MAD.  For MADs posted to MAD services of type IB_MAD_SVC_DEFAULT,
+               setting this field to 0 disables RMPP on user-defined management
+               classes or invokes the default RMPP version for well-defined management
+               classes, if appropriate.  For MADs posted to MAD services of type
+               IB_MAD_SVC_RMPP, setting this field to 0 disables RMPP on the sent
+               MAD.  Note that if the RMPP header exists, but the RMPP protocol is
+               not activated for this MAD, the user must ensure that the RMPP header
+               has been zeroed.  This field is intended to help support backwards
+               compatibility.
+
+       status
+               The result of the MAD work request.
+
+       grh_valid
+               A flag indicating whether the p_grh reference is valid.
+
+       p_grh
+               A reference to the global route header information.
+
+       recv_opt
+               Indicates optional fields valid as part of a work request that
+               completed on an unreliable datagram queue pair.
+
+       remote_lid
+               The source LID of the received datagram.
+
+       remote_sl
+               The service level used by the source of the received datagram.
+
+       pkey_index
+               This is valid only for IB_QPT_QP1 and IB_QPT_QP1_ALIAS QP types.
+               For received datagrams, this field contains the pkey index for
+               the source queue pair.  For send operations, this field contains
+               the pkey index to use when posting the send work request. 
+
+       path_bits
+               The portion of the remote_lid that may be changed to vary the path
+               through the subnet to the remote port.
+
+       send_context1
+               If this datagram was received as a response to a sent datagram, this
+               field contains the context1 value of the send operation.  If this is
+               an unsolicited receive, this field will be 0.
+
+       send_context2
+               If this datagram was received as a response to a sent datagram, this
+               field contains the context2 value of the send operation.  If this is
+               an unsolicited receive, this field will be 0.
+
+       remote_qp
+               Identifies the source queue pair of a received datagram.
+
+

NOTES

+
       The format of data sent over the fabric is expected to be in the form
+       of a MAD.  MADs are expected to match the format defined by the
+       Infiniband specification and must be in network-byte order when posted
+       to a MAD service.
+
+       This structure is received to notify a user that a datagram has been
+       received for a registered management class.  Information of the source
+       of the data is provided, along with the data buffer.
+
+       The MAD element structure is defined such that a received MAD element
+       may be re-used as a sent response.  In such cases, the h_av field may be
+       NULL.  The address vector will be created and destroyed by the access
+       layer.
+
+

SEE ALSO

+
       ib_get_mad, ib_put_mad, ib_send_mad, ib_local_ds_t, ib_send_opt_t,
+       ib_pfn_mad_recv_cb_t, ib_get_mad_buf
+
+
+
+ +

[Structures] +Access Layer/ib_mad_svc_t

+ +

[top][parent][index]

+

NAME

+
       ib_mad_svc_t
+
+

DESCRIPTION

+
       Information used to request management datagram support with a queue pair.
+
+

SYNOPSIS

+
typedef struct _ib_mad_svc
+{
+        void                                            *mad_svc_context;
+        ib_pfn_mad_comp_cb_t            pfn_mad_send_cb;
+        ib_pfn_mad_comp_cb_t            pfn_mad_recv_cb;
+
+        boolean_t                                       support_unsol;
+        uint8_t                                         mgmt_class;
+        uint8_t                                         mgmt_version;
+        boolean_t                                       method_array[IB_MAX_METHODS];
+
+        ib_mad_svc_type_t                       svc_type;
+
+}       ib_mad_svc_t;
+
+

FIELDS

+
       mad_svc_context
+               User-defined context that is returned by the access layer through
+               the pfn_mad_send_cb and pfn_mad_recv_cb.
+
+       pfn_mad_send_cb
+               A send callback that is invoked to notify the user that a send
+               operation has completed for a sent MAD.
+
+       pfn_mad_recv_cb
+               A receive callback that is invoked to notify the user that a MAD
+               has been received.
+
+       support_unsol
+               If set to TRUE, this field indicates that the registering client
+               supports processing unsolicited MADs.  Unsolicited MADs are
+               received MADs that do not have the response bit set.  If set to TRUE,
+               the following fields are required (must be non-zero): mgmt_class,
+               mgmt_version, and method_array.
+
+       mgmt_version
+               Indicates which version of a management class the client requires
+               support for.  The access layer distinguishes between clients
+               requiring different versions of the same management class.
+               This field is ignored if the support_unsol field is set to FALSE.
+
+       mgmt_class
+               Indicates the management class that should be supported by the
+               access layer.  This field is ignored if the support_unsol field is
+               set to FALSE.
+
+       method_array
+               An array of 127 entries specifying which methods are supported by
+               a client when receiving unsolicited MADs.  Each index corresponds to
+               a single method, and each entry in the array indicates if the method
+               is supported by the client.  This field is ignored if the
+               support_unsol field is set to FALSE.
+
+       svc_type
+               Indicates the type of services that should be provided by the MAD
+               service.
+
+

NOTES

+
       Clients use this structure to define which management datagram methods
+       they support, and the type of support required for each.  A received MAD
+       is distinguished by the access layer based on the following three fields:
+       management class, management version, and method.
+
+       Specific combinations of class, version, and method may be registered
+       for unsolicited MADs only once.  The access layer supports multiple
+       clients registering for unsolicited MADs as long as they do not share the
+       same methods, class, or version.
+
+       The svc_type field can be set by a client to indicate that the access
+       layer should invoke RMPP for the specified management class of MADs.  If
+       set to IB_MAD_SVC_DEFAULT, the access layer will automatically invoke RMPP
+       for well known MAD classes (those defined by the 1.1 version of the
+       InfiniBand specification).  The svc_type field is intended to be used by
+       clients sending and receiving vendor specific management class requiring
+       RMPP and clients providing their own MAD services.
+
+

SEE ALSO

+
       ib_reg_mad_svc, ib_pfn_mad_send_cb_t, ib_pfn_mad_recv_cb_t,
+       ib_mad_svc_type_t
+
+
+
+ +

[Definitions] +Access Layer/ib_mad_svc_type_t

+ +

[top][parent][index]

+

NAME

+
       ib_mad_svc_type_t
+
+

DESCRIPTION

+
       Indicates the type of services provided by a MAD service.
+
+

SYNOPSIS

+
typedef enum _ib_mad_svc_type
+{
+        IB_MAD_SVC_DEFAULT = 0,
+        IB_MAD_SVC_RMPP,
+        IB_MAD_SVC_RAW
+
+}       ib_mad_svc_type_t;
+
+

VALUES

+
       IB_MAD_SVC_DEFAULT
+               Indicates that the access layer will provide all necessary services,
+               including retransmissions and RMPP for well-defined management classes.
+
+       IB_MAD_SVC_RMPP
+               Indicates that the MAD service requires retransmissions and the RMPP
+               header is available on all MADs.  (The RMPP protocol will be activated
+               on a per send basis.)  This service type should be used for
+               user-defined management classes requiring RMPP.
+
+       IB_MAD_SVC_RAW
+               Specifies that the MAD service will not perform retransmissions or
+               perform RMPP.  All MADs received or sent on a MAD service of this type
+
+

NOTES

+
       This enum is used to define the types of MAD services available to users.
+
+

SEE ALSO

+
       ib_mad_svc_t, ib_reg_mad_svc
+
+
+
+ +

[Structures] +Access Layer/ib_mcast_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_mcast_rec_t
+
+

DESCRIPTION

+
       Information returned as a result of joining a multicast group.
+
+

SYNOPSIS

+
typedef struct _ib_mcast_rec
+{
+        const void* __ptr64                     mcast_context;
+        ib_api_status_t                         status;
+        ib_net16_t                                      error_status;
+
+        ib_mcast_handle_t                       h_mcast;
+        ib_member_rec_t* __ptr64        p_member_rec;
+
+}       ib_mcast_rec_t;
+
+

FIELDS

+
       mcast_context
+               User-defined context information associated with the multicast join
+               request.
+
+       status
+               Indicates the success of the multicast group join operation.
+
+       error_status
+               Provide additional error information that was provided by the SA.
+               This field is only valid if status is set to IB_REMOTE_ERROR.
+
+       h_mcast
+               Upon successful completion of a multicast join, this references a
+               handle to the multicast group.  This handle is used to leave the
+               multicast group.
+
+       p_member_rec
+               References a member record that provides information about the
+               multicast group.
+
+

NOTES

+
       This structure is returned to a client through a callback to notify them
+       of the result of a multicast join operation.
+
+

SEE ALSO

+
       ib_join_mcast, ib_pfn_mcast_cb_t, ib_leave_mcast
+
+
+
+ +

[Structures] +Access Layer/ib_mcast_req_t

+ +

[top][parent][index]

+

NAME

+
       ib_mcast_req_t
+
+

DESCRIPTION

+
       Information used to join a multicast group.
+
+

SYNOPSIS

+
typedef struct _ib_mcast_req
+{
+        boolean_t                                       create;
+        ib_member_rec_t                         member_rec;
+
+        const void* __ptr64                     mcast_context;
+        ib_pfn_mcast_cb_t                       pfn_mcast_cb;
+
+        uint32_t                                        timeout_ms;
+        uint32_t                                        retry_cnt;
+        ib_al_flags_t                           flags;
+
+        ib_net64_t                                      port_guid;
+        uint16_t                                        pkey_index;
+
+}       ib_mcast_req_t;
+
+

FIELDS

+
       create
+               Indicates that the multicast group should be created if it does not
+               already exist.
+
+       member_rec
+               Specifies the membership information of the multicast group to join
+               or create.  The mgid and join state (scope_state) fields of the
+               member record must be set.  In addition, if create is set to TRUE,
+               the following fields must also be set: qkey, tclass, service level
+               and flow label (sl_flow_hop), and pkey.  All other fields are ignored
+               by the access layer.
+
+       mcast_context
+               User-defined context information associated with the join request.
+               This context is returned to the user through the function specified
+               by the pfn_mcast_cb field.
+
+       pfn_mcast_cb
+               A user-defined callback that is invoked upon completion of the
+               join request.
+
+       timeout_ms
+               Specifies the number of milliseconds to wait for a response for
+               the join request until retrying or timing out the request.
+
+       retry_cnt
+               Specifies the number of times that the join request will be retried
+               before failing the request.
+
+       flags
+               Used to describe the mode of operation.  Set to IB_FLAGS_SYNC to
+               process the called routine synchronously.
+
+       port_guid
+               Indicates the port that will join the multicast group.  The QP
+               specified as part of the ib_join_mast call will bind to this port.
+
+       pkey_index
+               Specifies the pkey associated with this queue pair.
+
+

NOTES

+
       This structure is used when joining an existing multicast group or
+       creating a new multicast group.
+
+

SEE ALSO

+
       ib_join_mcast, ib_pfn_mcast_cb_t, ib_gid_t
+
+
+
+ +

[Functions] +Access Layer/ib_modify_av

+ +

[top][parent][index]

+

NAME

+
       ib_modify_av
+
+

DESCRIPTION

+
       Modifies the attributes of an existing address vector.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_modify_av(
+        IN              const   ib_av_handle_t                          h_av,
+        IN              const   ib_av_attr_t* const                     p_av_attr );
+
+

PARAMETERS

+
       h_av
+               [in] A handle to an existing address vector.
+
+       p_av_attr
+               [in] The new attributes to use when modifying the address vector.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The address vector was successfully modified.
+
+       IB_INVALID_AV_HANDLE
+               The address vector handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the address vector attributes structure was not
+               provided.
+
+       IB_INVALID_PORT
+               The port number supplied, through the address vector attributes,
+               was invalid for the given channel adapter.
+
+

NOTES

+
       This routine modifies the attributes of an existing address vector.
+       The new attributes are specified through the p_av_attr parameter.
+
+

SEE ALSO

+
       ib_create_av, ib_destroy_av
+
+
+
+ +

[Functions] +Access Layer/ib_modify_ca

+ +

[top][parent][index]

+

NAME

+
       ib_modify_ca
+
+

DESCRIPTION

+
       Modifies the attributes and violation counters associated with a port.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_modify_ca(
+        IN              const   ib_ca_handle_t                          h_ca,
+        IN              const   uint8_t                                         port_num,
+        IN              const   ib_ca_mod_t                                     ca_mod,
+        IN              const   ib_port_attr_mod_t* const       p_port_attr_mod );
+
+

PARAMETERS

+
       h_ca
+               [in] A handle to an opened channel adapter.
+
+       port_num
+               [in] An index to the port that is being modified.  The port_num matches
+               the index of the port as returned through the ib_query_ca call.
+
+       ca_mod
+               [in] A mask of the attributes and counters to modify.
+
+       p_port_attr_mod
+               [in] A list of the specific port attribute information to modify.  For
+               the access layer to modify an attribute, its corresponding bit must be
+               set in the ca_mod parameter.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The attributes were successfully modified.
+
+       IB_INVALID_CA_HANDLE
+               The channel adapter handle was invalid.
+
+       IB_INVALID_PORT
+               The port number supplied was invalid for the given channel adapter.
+
+       IB_INVALID_PARAMETER
+               The supplied ca_mod mask is invalid or a reference to the port
+               attribute information was not provided.
+
+       IB_UNSUPPORTED
+               The optional qkey and pkey violation counters are not supported by
+               this channel adapter, but an attempt was made to modify them.
+
+

NOTES

+
       This call sets the attributes for a port in its associated PORT_INFO
+       structure.  It will also reset pkey and qkey violation counters.
+
+

SEE ALSO

+
       ib_open_ca, ib_query_ca, ib_close_ca, ib_ca_mod_t, ib_port_attr_mod_t
+
+
+
+ +

[Functions] +Access Layer/ib_modify_cq

+ +

[top][parent][index]

+

NAME

+
       ib_modify_cq
+
+

DESCRIPTION

+
       Modifies the attributes associated with a completion queue, allowing the
+       completion queue to be resized.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_modify_cq(
+        IN              const   ib_cq_handle_t                          h_cq,
+        IN      OUT                     uint32_t* const                         p_size );
+
+

PARAMETERS

+
       h_cq
+               [in] A handle to an existing completion queue.
+
+       p_size
+               [in/out] Specifies the new size of the completion queue.  If the
+               modify call is successful, the actual size of the completion queue
+               will be returned.  The actual size of the CQ will be greater than or
+               equal to the requested size.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The completion queue was successfully modified.
+
+       IB_INVALID_CQ_HANDLE
+               The completion queue handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the completion queue size was not provided.
+
+       IB_INVALID_CQ_SIZE
+               The requested size of the completion queue was larger than the
+               maximum supported by the associated channel adapter.
+
+       IB_OVERFLOW
+               The specified size of the completion queue is smaller than the number
+               of work completions currently on the completion queue.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to modify the completion queue.
+
+

NOTES

+
       This routine allows a client to modify the size of a completion queue.
+       If the new size is larger than what the associated channel adapter can
+       support, an error is returned.  If the completion queue has valid
+       completion entries on it and the requested size is smaller than the
+       number of entries, an overflow error is returned and the modify
+       operation is aborted.
+
+

SEE ALSO

+
       ib_create_cq
+
+
+
+ +

[Functions] +Access Layer/ib_modify_qp

+ +

[top][parent][index]

+

NAME

+
       ib_modify_qp
+
+

DESCRIPTION

+
       Modifies the attributes of an existing queue pair.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_modify_qp(
+        IN              const   ib_qp_handle_t                          h_qp,
+        IN              const   ib_qp_mod_t* const                      p_qp_mod );
+
+

PARAMETERS

+
       h_qp
+               [in] A handle to an existing queue pair.
+
+       p_qp_mod
+               [in] The new attributes to use when modifying the queue pair.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The queue pair was successfully modified.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the queue pair attributes was not provided.
+
+       IB_INVALID_SETTING
+               The specified queue pair attributes were invalid.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to register the modify the queue pair.
+
+       IB_UNSUPPORTED
+               The requested modification was not supported.
+
+       IB_INVALID_QP_STATE
+               The queue pair was in an invalid state for the requested operation.
+
+       IB_INVALID_PKEY
+               The specified pkey was not valid.
+
+       IB_INVALID_APM_STATE
+               The specified automatic path migration state was not valid.
+
+

NOTES

+
       This routine modifies the attributes of an existing queue pair and
+       transitions it to a new state.  The new state and attributes are
+       specified through the p_qp_mod parameter.  Upon successful completion,
+       the queue pair is in the requested state.
+
+

SEE ALSO

+
       ib_create_qp, ib_destroy_qp, ib_qp_mod_t
+
+
+
+ +

[Functions] +Access Layer/ib_modify_srq

+ +

[top][parent][index]

+

NAME

+
       ib_modify_srq
+
+

DESCRIPTION

+
       Modifies the attributes of an existing shared receive queue.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_modify_srq(
+        IN              const   ib_srq_handle_t                 h_srq,
+        IN              const   ib_srq_attr_t* const            p_srq_attr,
+        IN              const   ib_srq_attr_mask_t                      srq_attr_mask );
+
+

PARAMETERS

+
       h_srq
+               [in] A handle to an existing shared receive queue.
+
+       p_srq_attr
+               [in] Attributes necessary to allocate and initialize a shared receive queue.
+
+       srq_attr_mask
+               [in] Flags, indicating which fields in the previous structure are valid.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The shared receive queue was successfully modified.
+
+       IB_INVALID_SRQ_HANDLE
+               The shared receive queue handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the shared receive queue attributes was not provided.
+
+       IB_INVALID_SETTING
+               The specified shared receive queue attributes were invalid.
+
+       IB_UNSUPPORTED
+               The required action is not supported yet.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to register the modify the shared receive queue.
+
+

NOTES

+
       This routine modifies the attributes of an existing shared receive queue and
+       transitions it to a new state.  The new state and attributes are
+       specified through the p_qp_mod parameter.  Upon successful completion,
+       the shared receive queue is in the requested state.
+
+

SEE ALSO

+
       ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,
+       ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t
+
+
+
+ +

[Structures] +Access Layer/ib_mra_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_mra_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a message receipt acknowledgement.
+
+

SYNOPSIS

+
typedef union _ib_mra_pdata
+{
+        uint8_t                                         data[IB_MRA_PDATA_SIZE];
+
+}       ib_mra_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Functions] +Access Layer/ib_open_al

+ +

[top][parent][index]

+

NAME

+
       ib_open_al
+
+

DESCRIPTION

+
       This routine opens an instance of the access layer for the user and
+       returns its handle.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_open_al(
+                OUT                     ib_al_handle_t* const           ph_al );
+
+

PARAMETERS

+
       ph_al
+               [in] Upon successful completion of this call, this parameter will
+               reference a handle to the access layer.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The access layer was opened successfully.
+
+       IB_INVALID_PARAMETER
+               A reference to the access layer handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+

NOTES

+
       This function opens an instance of the access layer.  An instance of the
+       access layer is required before allocating additional resources from the
+       access layer or a channel adapter.  If successful, a handle to the access
+       layer is returned.  User-mode clients should not call ib_open_al from the
+       module initialization routine.
+
+

SEE ALSO

+
       ib_close_al
+
+
+
+ +

[Functions] +Access Layer/ib_open_ca

+ +

[top][parent][index]

+

NAME

+
       ib_open_ca
+
+

DESCRIPTION

+
       Opens a channel adapter for additional access.  A channel adapter must
+       be opened before consuming resources on that adapter.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_open_ca(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   ib_net64_t                                      ca_guid,
+        IN              const   ib_pfn_event_cb_t                       pfn_ca_event_cb OPTIONAL,
+        IN              const   void* const                                     ca_context,
+                OUT                     ib_ca_handle_t* const           ph_ca );
+
+

PARAMETERS

+
       h_al
+               [in] The handle to an open instance of AL.
+
+       ca_guid
+               [in] The GUID of the channel adapter to open.
+
+       pfn_ca_event_cb
+               [in] A user-specified callback that is invoked after an
+               asynchronous event has occurred on the channel adapter.
+
+       ca_context
+               [in] A client-specified context to associate with this opened instance
+               of the channel adapter.  This context is returned to the user when
+               invoking asynchronous callbacks referencing this channel adapter.
+
+       ph_ca
+               [out] Upon successful completion of this call, this references a
+               handle to the opened channel adapter.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The operation was successful.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_GUID
+               No channel adapter in the system was found for the specified ca_guid.
+
+       IB_INVALID_PARAMETER
+               A reference to the CA handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to open the channel adapter.
+
+

NOTES

+
       When successful, this routine returns a handle to an open instance of a CA.
+
+

SEE ALSO

+
       ib_query_ca, ib_modify_ca, ib_close_ca, ib_pfn_event_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_peek_cq

+ +

[top][parent][index]

+

NAME

+
       ib_peek_cq
+
+

DESCRIPTION

+
       Returns the number of entries currently on the completion queue.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_peek_cq(
+        IN              const   ib_cq_handle_t                          h_cq,
+        OUT                             uint32_t* const                         p_n_cqes );
+
+

PARAMETERS

+
       h_cq
+               [in] Handle to the completion queue to peek.
+
+       p_n_cqes
+               [out] Upon successful completion of this call, contains the number
+               of completion queue entries currently on the completion queue.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The peek operation completed successfully.
+
+       IB_INVALID_CQ_HANDLE
+               The completion queue handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the completion queue entry count was not provided.
+
+       IB_UNSUPPORTED
+               This operation is not supported by the channel adapter.
+
+

NOTES

+
       The value returned is a snapshot of the number of compleiton queue
+       entries curently on the completion queue.  Support for this operation
+       is optional by a channel adapter vendor.
+
+

SEE ALSO

+
       ib_create_cq, ib_poll_cq, ib_rearm_cq, ib_rearm_n_cq
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_cm_apr_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_cm_apr_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after receiving a load
+       alternate path response message.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_cm_apr_cb_t)(
+        IN                              ib_cm_apr_rec_t                         *p_cm_apr_rec );
+
+

PARAMETERS

+
       p_cm_apr_rec
+               [in] Load alternate path response information sent by the remote side.
+
+

NOTES

+
       This callback is invoked to notify the user of a load alternate path
+       response.  If a response is not received within the specified timeout
+       period, this callback will be invoked with the status set to IB_CM_TIMEOUT.
+
+       In the kernel, this callback is typically invoked from within a tasklet,
+       depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_cm_lap, ib_cm_apr, ib_cm_apr_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_cm_drep_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_cm_drep_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after receiving a disconnect
+       reply message.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_cm_drep_cb_t)(
+        IN                              ib_cm_drep_rec_t                        *p_cm_drep_rec );
+
+

PARAMETERS

+
       p_cm_drep_rec
+               [in] Disconnect reply information returned to the user.
+
+

NOTES

+
       This callback is invoked to notify the user of a disconnect reply.  If
+       no reply was received within the specified timeout period, this callback
+       will be invoked with the status set to IB_CM_TIMEOUT.
+
+       In the kernel, this callback is typically invoked from within a
+       tasklet, depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_cm_dreq, ib_cm_drep, ib_cm_drep_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_cm_dreq_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_cm_dreq_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after receiving a disconnect
+       request message.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_cm_dreq_cb_t)(
+        IN                              ib_cm_dreq_rec_t                        *p_cm_dreq_rec );
+
+

PARAMETERS

+
       p_cm_dreq_rec
+               [in] Disconnect request information returned to the user.
+
+

NOTES

+
       This callback is invoked to notify the user of a disconnect request.
+       Users must call ib_cm_drep to respond to the disconnect request.  After
+       this callback returns, the queue pair associated with the connection is
+       transitioned to the time-wait state and is no longer usable for sending
+       and receiving data.
+
+       In the kernel, this callback is typically invoked from within a tasklet,
+       depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_cm_req, ib_cm_listen, ib_cm_drep, ib_cm_dreq_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_cm_lap_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_cm_lap_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after receiving a load
+       alternate path message.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_cm_lap_cb_t)(
+        IN                              ib_cm_lap_rec_t                         *p_cm_lap_rec );
+
+

PARAMETERS

+
       p_cm_lap_rec
+               [in] Load alternate path information sent by the remote side.
+
+

NOTES

+
       This callback is invoked to notify the user of a load alternate path
+       request.  Users must call ib_cm_apr to respond to the load alternate
+       path request from within this callback.  The ib_cm_apr call is used
+       to accept or reject the load alternate path request.
+
+       In the kernel, this callback is typically invoked from within a
+       tasklet, depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_cm_lap, ib_cm_apr, ib_cm_lap_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_cm_mra_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_cm_mra_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after receiving a message
+       received acknowledgement.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_cm_mra_cb_t)(
+        IN                              ib_cm_mra_rec_t                         *p_cm_mra_rec );
+
+

PARAMETERS

+
       p_cm_mra_rec
+               [in] Message received acknowledgement information received from the
+               remote side.
+
+

NOTES

+
       This callback is invoked to notify the user that their request was
+       successfully received, but additional processing is required.  This
+       callback may be invoked after calling ib_cm_req or ib_cm_rep
+
+       In the kernel, this callback is typically invoked from within a tasklet,
+       depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_cm_req, ib_cm_rep, ib_cm_mra_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_cm_rej_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_cm_rej_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after receiving a connection
+       rejection message.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_cm_rej_cb_t)(
+        IN                              ib_cm_rej_rec_t                         *p_cm_rej_rec );
+
+

PARAMETERS

+
       p_cm_rej_rec
+               [in] Connection rejection information returned to the user.
+
+

NOTES

+
       This callback is invoked to notify the user that a connection has been
+       rejected.  This routine may be invoked after calling ib_cm_req or
+       ib_cm_rep.
+
+       In the kernel, this callback is typically invoked from within a tasklet,
+       depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_cm_req, ib_cm_rep, ib_cm_rtu, ib_cm_rej, ib_cm_rej_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_cm_rep_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_cm_rep_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after receiving a connection
+       request reply message.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_cm_rep_cb_t)(
+        IN                              ib_cm_rep_rec_t                         *p_cm_rep_rec );
+
+

PARAMETERS

+
       p_cm_rep_rec
+               [in] Connection request reply information returned to the user,
+               indicating the remote connection data.
+
+

NOTES

+
       This callback is invoked to notify the user of a connection request reply.
+       This routine is invoked after calling ib_cm_req.  Users must call
+       ib_cm_rtu to accept the connection or ib_cm_rej to reject the connection
+       from the callback.
+
+       Users may also call ib_cm_mra to acknowledge the connection request reply
+       and prevent the remote side from timing out the connection request.  The
+       ib_cm_mra routine should be invoked if the user requires substantial
+       processing time to process the connection request reply.
+
+       If a reply is not received within the specified timeout period,
+       this callback will be invoked with the status set to IB_CM_TIMEOUT.  Users
+       may call ib_cm_rej to notify the remote side that the connection request
+       is being rejected due to a timeout.
+
+       In the kernel, this callback is typically invoked from within a tasklet,
+       depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_cm_req, ib_cm_listen, ib_cm_rep, ib_cm_rej, ib_cm_mra, ib_cm_rej,
+       ib_cm_rep_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_cm_req_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_cm_req_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after receiving a connection
+       request message.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_cm_req_cb_t)(
+        IN                              ib_cm_req_rec_t                         *p_cm_req_rec );
+
+

PARAMETERS

+
       p_cm_req_rec
+               [in] Connection request information returned to the user, indicating
+               the parameters for the connection.
+
+

NOTES

+
       This callback is invoked to notify the user of a connection request.  This
+       routine is invoked for peer to peer connection request calls to ib_cm_req
+       and for calls to ib_cm_listen.  Users must call ib_cm_rep to accept the
+       connection or ib_cm_rej to reject the connection from the callback.
+
+       Users may also call ib_cm_mra to acknowledge the connection request and
+       prevent the remote side from timing out the connection request.  The
+       ib_cm_mra routine should be invoked if the user requires substantial
+       processing time to process the connection request.
+
+       In the kernel, this callback is typically invoked from within a tasklet,
+       depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_cm_req, ib_cm_listen, ib_cm_rep, ib_cm_mra, ib_cm_rej, ib_cm_req_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_cm_rtu_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_cm_rtu_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after receiving a connection
+       ready to use message.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_cm_rtu_cb_t)(
+        IN                              ib_cm_rtu_rec_t                         *p_cm_rtu_rec );
+
+

PARAMETERS

+
       p_cm_rtu_rec
+               [in] Connection ready to use information returned to the user.
+
+

NOTES

+
       This callback is invoked to notify the user that a connection is ready
+       to use.  This routine is invoked after calling ib_cm_rep.  If a ready to
+       use message is not received within the specified timeout period, this
+       callback will be invoked with the status set to IB_CM_TIMEOUT.
+
+       This callback will be invoked before a user is notified of any completions
+       that has occurred on the associated queue pair.
+
+       In the kernel, this callback is typically invoked from within a tasklet,
+       depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_cm_rep, ib_cm_rtu_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_comp_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_comp_cb_t
+
+

DESCRIPTION

+
       Completion callback provided by a client.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_comp_cb_t)(
+        IN              const   ib_cq_handle_t                          h_cq,
+        IN                              void                                            *cq_context );
+
+

PARAMETERS

+
       h_cq
+               [in] Handle for the completion queue on which the completion occurred.
+
+       cq_context
+               [in] User-specified context for the completion queue on which the
+               completion occurred.
+
+

NOTES

+
       This function is invoked upon completion of a work request on a queue pair
+       associated with the completion queue.  The context associated with the
+       completion queue on which the completion occurred is return to the client
+       through the callback.
+
+       In the kernel, this callback is usually invoked using a tasklet, dependent
+       on the implementation of the underlying verbs provider driver.
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_destroy_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_destroy_cb_t
+
+

DESCRIPTION

+
       Asynchronous callback invoked after a resource has been successfully
+       destroyed.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_destroy_cb_t)(
+        IN                              void                                            *context );
+
+

PARAMETERS

+
       context
+               [in] User-specified context associated with the resource being
+               destroyed.  The context for the resource is usually set during the
+               object's creation.
+
+

NOTES

+
       This callback notifies a client that a resource has been successfully
+       destroyed.  It is used to indicate that all pending callbacks associated
+       with the resource have completed, and no additional events will be
+       generated for that resource.
+
+       This callback is invoked within a system thread context in the kernel.
+
+       If the user specifies ib_sync_destroy as the asynchronous callback, then
+       the object being destroyed will be destroyed synchronously.  This may 
+       result in the calling thread blocking while outstanding callbacks complete.
+
+

SEE ALSO

+
       ib_sync_destroy
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_event_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_event_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after an asynchronous event
+       has occurred on an allocated resource.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_event_cb_t)(
+        IN                              ib_async_event_rec_t            *p_event_rec );
+
+

PARAMETERS

+
       p_event_rec
+               [in] Information returned to the user, indicating the type of
+               event and the associated user context.
+
+

NOTES

+
       This callback is invoked within a system thread context in the kernel.
+
+

SEE ALSO

+
       ib_async_event_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_listen_err_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_listen_err_cb_t
+
+

DESCRIPTION

+
       A user-specified callback that is invoked after an error has occurred on
+       a listen request.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_listen_err_cb_t)(
+        IN                              ib_listen_err_rec_t                     *p_listen_err_rec );
+
+

PARAMETERS

+
       p_listen_err_rec
+               [in] Error information returned to the user, indicating the reason
+               for the error and associated context information.
+
+

NOTES

+
       This callback is invoked within a system thread context in the kernel.
+
+

SEE ALSO

+
       p_listen_err_rec
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_mad_comp_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_mad_comp_cb_t
+
+

DESCRIPTION

+
       User-defined callback used to notify the user of a completion for a
+       sent or received datagram.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_mad_comp_cb_t)(
+        IN              const   ib_mad_svc_handle_t                     h_mad_svc,
+        IN                              void                                            *mad_svc_context,
+        IN                              ib_mad_element_t                        *p_mad_element );
+
+

PARAMETERS

+
       h_mad_svc
+               [in] Handle to the MAD service on which the completion occured.
+
+       mad_svc_context
+               [in] User-defined context information associated with the MAD service
+               on which the completion occurred.
+
+       p_mad_element
+               [in] References information on the completed MAD request.
+
+

NOTES

+
       This function is invoked upon completion of a sent or receive MAD.
+       It is separate from the normal completion callbacks in order to allow
+       the access layer to perform post processing on the MAD, such as
+       segmentation and reassembly, and retransmissions if a response was
+       expected.
+
+       The mad element returned through this call should be returned to its MAD
+       pool after completion processing on the MAD has concluded.  Completed
+       receive MAD elements should not be reposted to the receive queue of a
+       MAD QP.
+
+       In the kernel, this callback is typically invoked from within a
+       tasklet, depending on the implementation of the verbs provider driver.
+
+

SEE ALSO

+
       ib_send_mad, ib_reg_mad_svc
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_mcast_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_mcast_cb_t
+
+

DESCRIPTION

+
       User-defined callback invoked on completion of a multicast join request.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_mcast_cb_t)(
+        IN                              ib_mcast_rec_t                          *p_mcast_rec );
+
+

PARAMETERS

+
       p_mcast_rec
+               [in] References the result of the join operation.
+
+

NOTES

+
       The callback is used to notify a client of the result of a multicast
+       join request.
+
+       This callback is invoked within a system thread context in the kernel.
+
+

SEE ALSO

+
       ib_join_mcast, ib_mcast_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_pnp_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_pnp_cb_t
+
+

DESCRIPTION

+
       User-defined callback that is invoked to notify a client of the addition
+       or removal of a channel adapter, a port up or down event, port changes,
+       and the assignment of an I/O controller to a local port.
+
+

SYNOPSIS

+
typedef ib_api_status_t
+(AL_API * __ptr64 ib_pfn_pnp_cb_t)(
+        IN                              ib_pnp_rec_t                            *p_pnp_rec );
+
+

PARAMETERS

+
       p_pnp_rec
+               [in] A reference to a plug and play record.  The plug and play
+               record contains details about the type of local event that has
+               occurred, along with the relevant device information.
+
+ RETURN VALUES
+       IB_SUCCESS
+               Indicates to the PnP manager that the callback client requires it
+               to maintain a context for this event.
+
+       Other
+               Indicates to the PnP manager that the callback client does not need
+               a context for this event.
+
+

NOTES

+
       The callback is used to notify users of local events that have occurred
+       on a given channel adapter.  Information about the type of event that
+       occurred along with the associated device is returned to the user through
+       the p_pnp_rec parameter.
+
+       Users register for plug and play changes by requesting notification from
+       the access layer.  Users may register for notifications either by directly
+       invoking the appropriate function in the access layer, or indirectly by
+       adding the necessary registration data to the access layer device file.
+
+       This callback is invoked from within a system thread context.
+
+       If the callback returns a status other than IB_SUCCESS, no further
+       callback for related events will be delivered.
+
+

SEE ALSO

+
       ib_pnp_rec_t, ib_reg_pnp, ib_dereg_pnp, ib_reject_ioc
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_query_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_query_cb_t
+
+

DESCRIPTION

+
       User-defined callback invoked on completion of a subnet administrator
+       query.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_query_cb_t)(
+        IN                              ib_query_rec_t                          *p_query_rec );
+
+

PARAMETERS

+
       p_query_rec
+               [in] This is a reference to a structure containing the result of the
+               query.
+
+

NOTES

+
       This routine is invoked to notify a client of the result of a subnet
+       administration query.  The p_query_rec parameter references the result
+       of the query and, in the case of a successful query, any information
+       returned by the subnet administrator.
+
+       In the kernel, this callback is usually invoked using a tasklet, dependent
+       on the implementation of the underlying verbs provider driver.
+
+

SEE ALSO

+
       ib_query_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_reg_svc_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_reg_svc_cb_t
+
+

DESCRIPTION

+
       User-defined callback that is invoked to notify a client of the result
+       of a service registration attempt.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_reg_svc_cb_t)(
+        IN                              ib_reg_svc_rec_t                        *p_reg_svc_rec );
+
+

PARAMETERS

+
       p_reg_svc_rec
+               [in] References the result of the service registration attempt.
+
+

NOTES

+
       The callback is used to notify a client of the result of a service
+       registration attempt with the subnet administrator.
+
+       In the kernel, this callback is usually invoked using a tasklet, dependent
+       on the implementation of the underlying verbs provider driver.
+
+

SEE ALSO

+
       ib_reg_svc, ib_reg_svc_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_report_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_report_cb_t
+
+

DESCRIPTION

+
       User-defined callback that is invoked to notify a client of an event
+       that has occurred on the fabric.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_report_cb_t)(
+        IN                              ib_report_rec_t                         *p_report_rec );
+
+

PARAMETERS

+
       p_report_rec
+               [in] A reference to an event report.  The report contains
+               details about the type of event that has occurred, along with the
+               relevant device information.
+
+

NOTES

+
       The callback is used to notify users of remote events that have been seen
+       by a specified class manager.  Information about the type of event that
+       occurred along with the associated device is returned to the user through
+       the p_report_rec parameter.
+
+       Users register for device changes by subscribing with a class manager.
+       Users may subscribe for events either by directly invoking the
+       appropriate function in the access layer, or indirectly by adding the
+       necessary registration data to the access layer device file.
+
+       This callback is invoked from within a system thread context.
+
+

SEE ALSO

+
       ib_report_rec_t, ib_subscribe, ib_unsubscribe
+
+
+
+ +

[Functions] +Access Layer/ib_pfn_sub_cb_t

+ +

[top][parent][index]

+

NAME

+
       ib_pfn_sub_cb_t
+
+

DESCRIPTION

+
       User-defined callback invoked on completion of a subscription request.
+
+

SYNOPSIS

+
typedef void
+(AL_API * __ptr64 ib_pfn_sub_cb_t)(
+        IN                              ib_sub_rec_t                            *p_sub_rec );
+
+

PARAMETERS

+
       p_sub_rec
+               [in] This is a reference to a structure containing the result of the
+               subscription request.
+
+

NOTES

+
       This routine is invoked to notify a client of the result of a
+       subscription request with a class manager.  If the subscription request
+       was successful, the client will receive future notifications of the
+       subscribed event from the class manager.
+
+       This callback will always be invoked before a client receives information
+       reported on a subscribed event that has occurred.
+
+       In the kernel, this callback is usually invoked using a tasklet, dependent
+       on the implementation of the underlying verbs provider driver.
+
+

SEE ALSO

+
       ib_subscribe, ib_sub_rec_t
+
+
+
+ +

[Structures] +Access Layer/ib_pnp_ca_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_pnp_ca_rec_t
+
+

DESCRIPTION

+
       Notification information used to describe local channel adapter events.
+
+

SYNOPSIS

+
typedef struct _ib_pnp_ca_rec
+{
+        ib_pnp_rec_t                            pnp_rec;
+        ib_ca_attr_t* __ptr64           p_ca_attr;
+
+}       ib_pnp_ca_rec_t;
+
+

FIELDS

+
       pnp_rec
+               Structure describing the plug and play event being reported.
+
+       p_ca_attr
+               Attributes of the channel adapter that has experienced the event.
+               NULL for IB_PNP_CA_REMOVE, IB_PNP_PORT_REMOVE, and IB_PNP_IOC_REMOVE
+               events.
+
+

NOTES

+
       This structure is returned to the user to notify them of the addition
+       or the removal of a channel adapter.
+
+       The context field is NULL unless a context value has already been set
+       by the user.
+
+       Context values can be changed by updating the appropriate field
+       and will be take effect once the notification callback returns.
+
+       Once a device has been removed, all context associated with that device
+       is lost.
+
+       Recipients of CA-related PnP events should cast the ib_pnp_rec_t structure
+       returned in the PnP callback to this type to access CA-specific information.
+
+

SEE ALSO

+
       ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t,
+       ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t
+
+
+
+ +

[Definitions] +Access Layer/ib_pnp_class_t

+ +

[top][parent][index]

+

NAME

+
       ib_pnp_class_t
+
+

DESCRIPTION

+
       Specifies the class of plug and play events that are being subscribed for.
+
+

SYNOPSIS

+
#define IB_PNP_CA                                               0x00000001
+#define IB_PNP_PORT                                             0x00000002
+#define IB_PNP_IOU                                              0x00000004
+#define IB_PNP_IOC                                              0x00000008
+
+#define IB_PNP_FLAG_REG_SYNC                    0x40000000
+#define IB_PNP_FLAG_REG_COMPLETE                0x80000000
+#define IB_PNP_FLAG_MASK                                0xF0000000
+#define IB_PNP_CLASS_MASK                               0x000000FF
+
+typedef uint32_t        ib_pnp_class_t;
+
+

VALUES

+
       IB_PNP_CA
+               Value used to register for local channel adapter events.  These
+                events include the addition or removal of a local channel adapter.
+
+       IB_PNP_PORT
+               Value used to register for local port events.  These events include
+               local port up or down events and port LID or Pkey changes.
+
+       IB_PNP_IOU
+               Value used to register for I/O unit PnP events.  I/O unit events
+               include notifications of I/O unit assignment to and dissociation from
+               the local host.
+
+       IB_PNP_IOC
+               Value used to register for an I/O controller PnP event.  I/O controller
+               events include notification of an I/O controller assignment to a local
+               port and indication that an I/O controller dissociation has occurred.
+
+       IB_PNP_FLAG_REG_SYNC
+               Flag that is ORed with the PnP Class to control behavior of the
+               ib_pnp_reg call.  When set, ib_pnp_reg returns after client has
+               received all events for the current state of the system.
+
+       IB_PNP_FLAG_REG_COMPLETE
+               Flag that is ORed with the PnP Class to control whether an event
+               is generated to indicate that a client has received all events for the
+               current state of the system.
+
+

NOTES

+
       When registering for PnP notification, a client specifies the class of
+       local events that the client wishes to be notified of.  For example to
+       request notification of events on a port, a client would use IB_PNP_PORT.
+       To be notified of the assignment of an I/O controller, a client would use
+       IB_PNP_IOC.
+
+       The PnP APIs do not support registration for multiple event classes at
+       a time.
+
+

SEE ALSO

+
       ib_pfn_pnp_cb_t, ib_pfn_report_cb_t, ib_pnp_rec_t, ib_pnp_event_t
+
+
+
+ +

[Definitions] +Access Layer/ib_pnp_event_t

+ +

[top][parent][index]

+

NAME

+
       ib_pnp_event_t
+
+

DESCRIPTION

+
       Indicates the type of plug and play event that has occurred.
+
+

SYNOPSIS

+
#define IB_PNP_EVENT_PATH                               0x00000800
+#define IB_PNP_EVENT_ADD                                0x00001000
+#define IB_PNP_EVENT_REMOVE                             0x00002000
+#define IB_PNP_EVENT_CHANGE                             0x00004000
+#define IB_PNP_EVENT_INIT                               0x00008000
+#define IB_PNP_EVENT_ARMED                              0x00010000
+#define IB_PNP_EVENT_ACTIVE                             0x00020000
+#define IB_PNP_EVENT_DOWN                               0x00040000
+#define IB_PNP_EVENT_PKEY                               0x00080000
+#define IB_PNP_EVENT_SM                                 0x00100000
+#define IB_PNP_EVENT_GID                                0x00200000
+#define IB_PNP_EVENT_LID                                0x00400000
+#define IB_PNP_EVENT_SUBNET                             0x00800000
+
+#define IB_PNP_CA_ADD                                   (IB_PNP_CA | IB_PNP_EVENT_ADD)
+#define IB_PNP_CA_REMOVE                                (IB_PNP_CA | IB_PNP_EVENT_REMOVE)
+
+#define IB_PNP_PORT_ADD                                 (IB_PNP_PORT | IB_PNP_EVENT_ADD)
+#define IB_PNP_PORT_REMOVE                              (IB_PNP_PORT | IB_PNP_EVENT_REMOVE)
+#define IB_PNP_PORT_INIT                                (IB_PNP_PORT | IB_PNP_EVENT_INIT)
+#define IB_PNP_PORT_ARMED                               (IB_PNP_PORT | IB_PNP_EVENT_ARMED)
+#define IB_PNP_PORT_ACTIVE                              (IB_PNP_PORT | IB_PNP_EVENT_ACTIVE)
+#define IB_PNP_PORT_DOWN                                (IB_PNP_PORT | IB_PNP_EVENT_DOWN)
+#define IB_PNP_PKEY_CHANGE                              (IB_PNP_PORT | IB_PNP_EVENT_PKEY)
+#define IB_PNP_SM_CHANGE                                (IB_PNP_PORT | IB_PNP_EVENT_SM)
+#define IB_PNP_GID_CHANGE                               (IB_PNP_PORT | IB_PNP_EVENT_GID)
+#define IB_PNP_LID_CHANGE                               (IB_PNP_PORT | IB_PNP_EVENT_LID)
+#define IB_PNP_SUBNET_TIMEOUT_CHANGE    (IB_PNP_PORT | IB_PNP_EVENT_SUBNET)
+
+#define IB_PNP_IOU_ADD                                  (IB_PNP_IOU | IB_PNP_EVENT_ADD)
+#define IB_PNP_IOU_REMOVE                               (IB_PNP_IOU | IB_PNP_EVENT_REMOVE)
+#define IB_PNP_IOC_ADD                                  (IB_PNP_IOC | IB_PNP_EVENT_ADD)
+#define IB_PNP_IOC_REMOVE                               (IB_PNP_IOC | IB_PNP_EVENT_REMOVE)
+#define IB_PNP_IOC_PATH_ADD                             (IB_PNP_IOC | IB_PNP_EVENT_PATH | \
+                                                                                IB_PNP_EVENT_ADD)
+#define IB_PNP_IOC_PATH_REMOVE                  (IB_PNP_IOC | IB_PNP_EVENT_PATH | \
+                                                                                IB_PNP_EVENT_REMOVE)
+
+#define IB_PNP_REG_COMPLETE                             IB_PNP_FLAG_REG_COMPLETE
+
+typedef uint32_t        ib_pnp_event_t;
+
+

VALUES

+
       IB_PNP_CA_ADD
+               Indicates that a new channel adapter has been added.
+
+       IB_PNP_CA_REMOVE
+               Indicates that a channel adapter has been removed.
+
+       IB_PNP_PORT_ADD
+               Indicates that a new port has been added.  This callback will always
+               be followed by a callback to indicate the actual port state to allow
+               clients to use the PnP callbacks to drive their state machine.
+
+       IB_PNP_PORT_REMOVE
+               Indicates that a port has been removed.
+               A CA remove event will trigger this event first.
+
+       IB_PNP_PORT_INIT
+               Indicates that a port is in the IB_LINK_INIT state.
+
+       IB_PNP_PORT_ARMED
+               Indicates that a port is in the IB_LINK_ARMED state.
+
+       IB_PNP_PORT_ACTIVE
+               Indicates that a port is in the IB_LINK_ACTIVE state.
+
+       IB_PNP_PORT_DOWN
+               Indicates that a port down event has occurred.
+
+       IB_PNP_PKEY_CHANGE
+               Indicates that port Pkey change has ocurred.
+
+       IB_PNP_SM_CHANGE
+               Indicates that the SM assignment for a port has changed.
+
+       IB_PNP_GID_CHANGE
+               Indicates that the GID assignment for a port has changed.
+
+       IB_PNP_LID_CHANGE
+               Indicates that the LID or LMC assignment for a port has changed.
+
+       IB_PNP_SUBNET_TIMEOUT_CHANGE
+               Indicates that the subnet timeout assignment for a port has changed.
+
+       IB_PNP_IOU_ADD
+               Indicates that an I/O unit assignment has occured.
+
+       IB_PNP_IOU_REMOVE
+               Indicates that an I/O unit disassociation has occured.
+
+       IB_PNP_IOC_ADD
+               Indicates that an I/O controller assignment has occurred.
+
+       IB_PNP_IOC_REMOVE
+               Indicates that an I/O controller dissociation has occurred.
+               A port down event will trigger this event first.
+
+       IB_PNP_IOC_PATH_ADD
+               Indicates that a new path to an I/O controller is available.
+
+       IB_PNP_IOC_PATH_REMOVE
+               Indiactes that a path to an I/O controller is no longer avaialble.
+
+       IB_PNP_REG_COMPLETE
+               Indicates that all events associated with a ib_reg_pnp call have been
+               reported to the user.  The user's state of the system is now in
+               sync with that of the access layer.
+
+

NOTES

+
               The Access Layer maintains a queue of client PnP registrations.
+               Using this queue, PnP events are reported to clients in a specific
+               order.  CA add, port add, and IOC add events are reported from the
+               head of the queue, while CA remove, port remove, and IOC remove events
+               are reported from the tail.  Clients are responsible for performing
+               registrations in the proper sequence to ensure that PnP event
+               notifiations occur in the desired order.
+
+

SEE ALSO

+
       ib_pfn_pnp_cb_t, ib_pfn_report_cb_t, ib_pnp_rec_t, ib_pnp_class_t
+
+
+
+ +

[Structures] +Access Layer/ib_pnp_ioc_path_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_pnp_ioc_path_rec_t
+
+

DESCRIPTION

+
       Notification information used to describe local channel adapter, port,
+       and I/O controller events.
+
+

SYNOPSIS

+
typedef struct _ib_pnp_ioc_path_rec
+{
+        ib_pnp_rec_t                            pnp_rec;
+        net64_t                                         ca_guid;
+        net64_t                                         port_guid;
+        ib_path_rec_t                           path;
+
+}       ib_pnp_ioc_path_rec_t;
+
+

FIELDS

+
       pnp_rec
+               Structure describing the plug and play event being reported.
+
+       ca_guid
+               GUID of the local HCA through which the I/O controller is accessible.
+               Valid only for IB_PNP_IOC_PATH_ADD and IB_PNP_IOC_PATH_REMOVE events.
+
+       port_guid
+               GUID of the local HCA port through which the I/O controller is
+               accessible.  Valid only for IB_PNP_IOC_PATH_ADD and
+               IB_PNP_IOC_PATH_REMOVE events.
+
+       p_path
+               Path record that provides connectivity with a given I/O controller.
+               Valid only for IB_PNP_IOC_PATH_ADD and IB_PNP_IOC_PATH_REMOVE events.
+
+

NOTES

+
       This structure is returned to the user to notify them of the addition
+       and removal of a path to an I/O controller.  I/O controller path
+       notifications are only delivered with respect to a previously reported
+       I/O controller.
+
+       The context field is NULL unless a context value has already been set
+       by the user.
+
+       Context values can be changed by updating the appropriate field
+       and will be take effect once the notification callback returns.
+
+       Once a device has been removed, all context associated with that device
+       is lost.  Context is maintained between port down and subsequent port up
+       events provided that the channel adapter is not removed.
+
+

SEE ALSO

+
       ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t,
+       ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t
+
+
+
+ +

[Structures] +Access Layer/ib_pnp_ioc_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_pnp_ioc_rec_t
+
+

DESCRIPTION

+
       Notification information used to describe local channel adapter, port,
+       and I/O controller events.
+
+

SYNOPSIS

+
typedef struct _ib_pnp_ioc_rec
+{
+        ib_pnp_rec_t                            pnp_rec;
+        net64_t                                         ca_guid;
+        ib_ioc_info_t                           info;
+        ib_svc_entry_t                          svc_entry_array[1];
+
+}       ib_pnp_ioc_rec_t;
+
+

FIELDS

+
       pnp_rec
+               Structure describing the plug and play event being reported.
+
+       ca_guid
+               GUID of the local HCA through which the I/O controller is accessible.
+               Valid only for IB_PNP_IOC_ADD events.
+
+       p_ioc_info
+               The I/O controller information for an assigned controller, including
+               information for the I/O unit.  Valid only for IB_PNP_IOC_ADD events.
+
+       svc_entry_array
+               If an I/O controller is being reported, this will reference an array
+               of service entries associated with the I/O controller.  The actual
+               number of entries in the array may be determined by examining the
+               svc_entries field in the I/O controller profile.  Valid only for
+               IB_PNP_IOC_ADD events.
+
+

NOTES

+
       This structure is returned to the user to notify them of the addition
+       and removal of an I/O controller.
+
+       The context field is NULL unless a context value has already been set
+       by the user.
+
+       Context values can be changed by updating the appropriate field
+       and will be take effect once the notification callback returns.
+
+       Once a device has been removed, all context associated with that device
+       is lost.  Context is maintained between port down and subsequent port up
+       events provided that the channel adapter is not removed.
+
+

SEE ALSO

+
       ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t,
+       ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t
+
+
+
+ +

[Structures] +Access Layer/ib_pnp_iou_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_pnp_iou_rec_t
+
+

DESCRIPTION

+
       Notification information used to describe local I/O unit events.
+
+

SYNOPSIS

+
typedef struct _ib_pnp_iou_rec
+{
+        ib_pnp_rec_t                            pnp_rec;
+        net64_t                                         guid;
+        net64_t                                         ca_guid;
+        net64_t                                         chassis_guid;
+        uint8_t                                         slot;
+        net32_t                                         vend_id;
+        net16_t                                         dev_id;
+        net32_t                                         revision;
+        char                                            desc[IB_NODE_DESCRIPTION_SIZE + 1];
+
+}       ib_pnp_iou_rec_t;
+
+

FIELDS

+
       pnp_rec
+               Structure describing the plug and play event being reported.
+
+       ca_guid
+               GUID of the local HCA through which the I/O unit is accessible.  Valid
+               only for IB_PNP_IOU_ADD events.
+
+       chassis guid
+               GUID of the chassis in which an I/O unit is installed.  Valid only for
+               IB_PNP_IOU_ADD events.
+
+       slot
+               Chassis slot number in which an I/O unit is installed.  Valid only for
+               IB_PNP_IOU_ADD events.
+
+       guid
+               GUID of an I/O unit from which one or more I/O controllers are assigned
+               to this host.  Valid only for IB_PNP_IOU_ADD events.
+
+       vend_id
+               Vendor ID of an I/O unit from which one or more I/O controllers are
+               assigned to this host.  Valid only for IB_PNP_IOU_ADD events.
+
+       dev_id
+               Device ID of an I/O unit from which one or more I/O controllers are
+               assigned to this host.  Valid only for IB_PNP_IOU_ADD events.
+
+       revision
+               Revision of an I/O unit from which one or more I/O controllers are
+               assigned to this host.  Valid only for IB_PNP_IOU_ADD events.
+
+       desc
+               Node description string for an I/O unit from which one or more I/O
+               controllers are assigned to this host.  Valid only for IB_PNP_IOU_ADD
+               events.
+
+

NOTES

+
       This structure is returned to the user to notify them of the addition
+       and removal of an I/O Unit.
+
+       The context field is NULL unless a context value has already been set
+       by the user.
+
+       Context values can be changed by updating the appropriate field
+       and will be take effect once the notification callback returns.
+
+       Once a device has been removed, all context associated with that device
+       is lost.  Context is maintained between port down and subsequent port up
+       events provided that the channel adapter is not removed.
+
+

SEE ALSO

+
       ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t,
+       ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t
+
+
+
+ +

[Structures] +Access Layer/ib_pnp_port_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_pnp_port_rec_t
+
+

DESCRIPTION

+
       Notification information used to describe local port events.
+
+

SYNOPSIS

+
typedef struct _ib_pnp_port_rec
+{
+        ib_pnp_rec_t                            pnp_rec;
+        ib_ca_attr_t* __ptr64           p_ca_attr;
+        ib_port_attr_t* __ptr64         p_port_attr;
+
+}       ib_pnp_port_rec_t;
+
+

FIELDS

+
       pnp_rec
+               Structure describing the plug and play event being reported.
+
+       p_ca_attr
+               Attributes of the channel adapter that has experienced the event.
+               NULL for IB_PNP_CA_REMOVE, IB_PNP_PORT_REMOVE, and IB_PNP_IOC_REMOVE
+               events.
+
+       p_port_attr
+               Attributes of the port that has experienced the event.  Valid only
+               for IB_PNP_PORT_UP, IB_PNP_PORT_DOWN, IB_PNP_PKEY_CHANGE, and
+               IB_PNP_IOC_ADD events.
+
+

NOTES

+
       This structure is returned to the user to notify them of port events.
+
+       The context field is NULL unless a context value has already been set
+       by the user.
+
+       Context values can be changed by updating the appropriate field
+       and will be take effect once the notification callback returns.
+
+       Once a device has been removed, all context associated with that device
+       is lost.  Context is maintained between port down and subsequent port up
+       events provided that the channel adapter is not removed.
+
+       Recipients of port related PnP events should cast the ib_pnp_rec_t structure
+       returned in the PnP callback to this type to access port specific information.
+
+

SEE ALSO

+
       ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t,
+       ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t
+
+
+
+ +

[Structures] +Access Layer/ib_pnp_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_pnp_rec_t
+
+

DESCRIPTION

+
       Notification information used to describe local channel adapter, port,
+       and I/O controller events.
+
+

SYNOPSIS

+
typedef struct _ib_pnp_rec
+{
+        ib_pnp_event_t                          pnp_event;
+
+        ib_pnp_handle_t                         h_pnp;
+        ib_pnp_handle_t                         h_ioc_event;
+
+        void* __ptr64                           pnp_context;
+        void* __ptr64                           context;
+        //NOTE:
+        //guid and ca_guid use as key to flexi map need to keep these field together
+        ib_net64_t                                      guid;
+        ib_net64_t                                      ca_guid;
+
+}       ib_pnp_rec_t;
+
+

FIELDS

+
       pnp_event
+               Describes the type of plug and play event that is being reported.
+
+       h_pnp
+               A handle to the notification registration for which this PnP record
+               was generated.  This handle will match the handle returned through
+               an ib_reg_pnp call.  It is provided in case a PnP notification event
+               occurs before a client's call to ib_reg_pnp can return.  This handle
+               may be used to cancel further notification of PnP events.
+
+       h_ioc_event
+               A handle that is unique to an I/O controller assignment event.
+               This handle is used to reject the assignment of an I/O controller
+               from within the ib_pfn_pnp_cb_t callback.  Valid for IB_PNP_IOC_ADD
+               events only.
+
+       pnp_context
+               User-defined context information specified when registering for
+               notification of the event.  See the notes section below for
+               more details.
+
+       context
+               This field references a user-specified context on which the event
+               occurred.  See the notes section below for more details.
+
+       guid
+               The GUID of the adapter, port, IOU, or IOC for which
+               the PnP event occurred.
+
+       ca_guid
+               The  GUID of the HCA 
+
+

NOTES

+
       This structure is returned to the user to notify them of: the addition
+       of a channel adapter, the removal of a channel adapter, a port up or down
+       event, a port pkey change, and I/O controller addition and removal events.
+
+       The context field is NULL unless a context value has already been set
+       by the user.
+
+       The context value can be changed by updating its field
+       and will take effect once the notification callback returns.
+
+       Once a device has been removed, all context associated with that device
+       is lost.  Context is maintained between port down and subsequent port up
+       events provided that the channel adapter is not removed.
+
+       I/O controller path notifications are only delivered with respect to a
+       previously reported I/O controller.
+
+

SEE ALSO

+
       ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t,
+       ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t
+
+
+
+ +

[Structures] +Access Layer/ib_pnp_req_t

+ +

[top][parent][index]

+

NAME

+
       ib_pnp_req_t
+
+

DESCRIPTION

+
       Information used to register for notification of local and I/O
+       controller assignment events.
+
+

SYNOPSIS

+
typedef struct _ib_pnp_req
+{
+        ib_pnp_class_t                          pnp_class;
+        const void                                      *pnp_context;
+        ib_pfn_pnp_cb_t                         pfn_pnp_cb;
+
+}       ib_pnp_req_t;
+
+

FIELDS

+
       pnp_class
+               Specifies the class of PnP events that the client wishes to be
+               notified of.
+
+       pnp_context
+               User-defined context information associated with this notification.
+               The context data is returned to the user as a part of their PnP
+               notification callback.
+
+       pfn_pnp_cb
+               User-defined callback function that is invoked to notify the user of
+               the occurrance of a plug and play event.
+
+

NOTES

+
       This structure is used when requesting notification of local events from
+       the access layer.  The class of PnP events which to be notified of is
+       specified through the pnp_class field.
+
+

SEE ALSO

+
       ib_pnp_class_t, ib_pfn_pnp_cb_t, ib_reg_pnp, ib_pnp_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_poll_cq

+ +

[top][parent][index]

+

NAME

+
       ib_poll_cq
+
+

DESCRIPTION

+
       Checks a completion queue for completed work requests.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_poll_cq(
+        IN              const   ib_cq_handle_t                          h_cq,
+        IN      OUT                     ib_wc_t** const                         pp_free_wclist,
+                OUT                     ib_wc_t** const                         pp_done_wclist );
+
+

PARAMETERS

+
       h_cq
+               [in] A handle to a completion queue to check for completions on.
+
+       pp_free_wclist
+               [in/out] On input, a list of work completion structures provided by
+               the client.  These are used to report completed work requests through
+               the pp_done_wclist.
+
+               On output, this contains the list of work completions structures for
+               which no work completion was found.
+
+       pp_done_wclist
+               [out] A list of work completions retrieved from the completion queue.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The poll operation completed successfully.  If the work completion
+               structures referenced by the pp_free_wclist list is empty there are
+               potentially more completions available to retrieve.
+
+       IB_INVALID_PARAMETER
+               A reference to the free or done work completion list was not provided.
+
+       IB_INVALID_CQ_HANDLE
+               The completion queue handle was invalid.
+
+       IB_NOT_FOUND
+               No completed work requests were removed from the completion queue.
+
+

NOTES

+
       This routine retrieves completed work requests from the specified
+       completion queue.  This call will retrieve all completed requests,
+       up to to the number of work completion structures referenced by the
+       pp_free_wclist.  Completed requests will be returned through the
+       pp_done_wclist parameter.
+
+

SEE ALSO

+
       ib_create_cq, ib_post_send, ib_post_recv, ib_bind_mw, ib_wc_t
+
+
+
+ +

[Functions] +Access Layer/ib_post_recv

+ +

[top][parent][index]

+

NAME

+
       ib_post_recv
+
+

DESCRIPTION

+
       This routine posts a work request to the receive queue of a queue pair.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_post_recv(
+        IN              const   ib_qp_handle_t                          h_qp,
+        IN                              ib_recv_wr_t* const                     p_recv_wr,
+                OUT                     ib_recv_wr_t                            **pp_recv_failure OPTIONAL );
+
+

PARAMETERS

+
       h_qp
+               [in] The queue pair to which this work request is being submitted.
+
+       p_recv_wr
+               [in] A reference to the head of the work request list.
+
+       pp_recv_failure
+               [out] If the post receive operation failed, this references the work
+               request in the p_recv_wr list where the first failure occurred.
+               This parameter may be NULL if only a single work request is being
+               posted to the QP.
+
+ RETURN VALUES
+       IB_SUCCESS
+               All work requests were successfully posted.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the receive work request list was not provided.
+
+       IB_INSUFFICIENT_RESOURCES
+               The number of posted work requests exceed the current depth available
+               on the receive queue.
+
+       IB_INVALID_WR_TYPE
+               The work request type was invalid.
+
+       IB_INVALID_QP_STATE
+               The current queue pair state does not allow posting receives.
+
+

NOTES

+
       This routine posts a work request to the receive queue of a queue pair.
+       The type of work to perform is defined by the p_recv_wr parameter.  This
+       call is used to post data buffers to receive incoming message sends.
+
+

SEE ALSO

+
       ib_recv_wr_t
+
+
+
+ +

[Functions] +Access Layer/ib_post_send

+ +

[top][parent][index]

+

NAME

+
       ib_post_send
+
+

DESCRIPTION

+
       This routine posts a work request to the send queue of a queue pair.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_post_send(
+        IN              const   ib_qp_handle_t                          h_qp,
+        IN                              ib_send_wr_t* const                     p_send_wr,
+                OUT                     ib_send_wr_t                            **pp_send_failure OPTIONAL );
+
+

PARAMETERS

+
       h_qp
+               [in] The queue pair to which this work request is being submitted.
+
+       p_send_wr
+               [in] A reference to the head of the work request list.
+
+       pp_send_failure
+               [out] If the post send operation failed, this references the work
+               request in the p_send_wr list where the first failure occurred.
+               This parameter may be NULL if only a single work request is being
+               posted to the QP.
+
+ RETURN VALUES
+       IB_SUCCESS
+               All work requests were successfully posted.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the send work request list was not provided.
+
+       IB_INSUFFICIENT_RESOURCES
+               The number of posted work requests exceed the current depth available
+               on the send queue.
+
+       IB_INVALID_WR_TYPE
+               The work request type was invalid.
+
+       IB_INVALID_QP_STATE
+               The current queue pair state does not allow posting sends.
+
+       IB_INVALID_MAX_SGE
+               The number of work request scatter gather elements exceed the queue
+               pair configuration.
+
+       IB_UNSUPPORTED
+               The requested operation is not supported by the channel adapter.
+
+

NOTES

+
       This routine posts a work request to the send queue of a queue pair.
+       The type of work to perform is defined by the p_send_wr parameter.
+
+

SEE ALSO

+
       ib_send_wr_t
+
+
+
+ +

[Functions] +Access Layer/ib_post_srq_recv

+ +

[top][parent][index]

+

NAME

+
       ib_post_srq_recv
+
+

DESCRIPTION

+
       This routine posts a work request to the shared receive queue of a shared receive queue.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_post_srq_recv(
+        IN              const   ib_srq_handle_t                         h_srq,
+        IN                              ib_recv_wr_t* const                     p_recv_wr,
+                OUT                     ib_recv_wr_t                            **pp_recv_failure OPTIONAL );
+
+

PARAMETERS

+
       h_srq
+               [in] The shared receive queue to which this work request is being submitted.
+
+       p_recv_wr
+               [in] A reference to the head of the work request list.
+
+       pp_recv_failure
+               [out] If the post receive operation failed, this references the work
+               request in the p_recv_wr list where the first failure occurred.
+               This parameter may be NULL if only a single work request is being
+               posted to the QP.
+
+ RETURN VALUES
+       IB_SUCCESS
+               All work requests were successfully posted.
+
+       IB_INVALID_QP_HANDLE
+               The shared receive queue handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the receive work request list was not provided.
+
+       IB_INSUFFICIENT_RESOURCES
+               The number of posted work requests exceed the current depth available
+               on the receive queue.
+
+       IB_INVALID_WR_TYPE
+               The work request type was invalid.
+
+       IB_INVALID_QP_STATE
+               The current shared receive queue state does not allow posting receives.
+
+

NOTES

+
       This routine posts a work request to the shared receive queue.
+       The type of work to perform is defined by the p_recv_wr parameter.  This
+       call is used to post data buffers to receive incoming message sends.
+
+

SEE ALSO

+
       ib_recv_wr_t
+
+
+
+ +

[Functions] +Access Layer/ib_put_mad

+ +

[top][parent][index]

+

NAME

+
       ib_put_mad
+
+

DESCRIPTION

+
       Returns a list of MAD elements to the pool.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_put_mad(
+        IN              const   ib_mad_element_t*                       p_mad_element_list );
+
+

PARAMETERS

+
       p_mad_element_list
+               [in] A pointer to a list of MAD elements.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The list of MAD elements was successfully returned to the MAD pool.
+
+       IB_INVALID_PARAMETER
+               A reference to the MAD element list was not provided.
+
+

NOTES

+
       This function returns a list of MAD elements to the pool.
+
+

SEE ALSO

+
       ib_get_mad, ib_mad_element_t
+
+
+
+ +

[Functions] +Access Layer/ib_query

+ +

[top][parent][index]

+

NAME

+
       ib_query
+
+

DESCRIPTION

+
       Routine used to request an access layer provided query of the subnet
+       administrator.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_query(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   ib_query_req_t* const           p_query_req,
+                OUT                     ib_query_handle_t* const        ph_query OPTIONAL );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an open instance of the access layer.
+
+       p_query_req
+               [in] Specifies the type of query that the access layer should perform,
+               along with information needed to process the completed query.
+
+       ph_query
+               [out] Pointer to a query handle that can be used to cancel the query.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The subnet administrator query was initiated.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the query request was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+       IB_INVALID_GUID
+               No port was found for the port_guid specified in the request.
+
+       IB_ERROR
+               An invalid query_type was specified in the request.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to perform the operation.
+
+

NOTES

+
       This routine directs the access layer to initiate a query to the subnet
+       administrator for desired information.  The access layer will issue the
+       query, collect the results, and report them to the client through a user-
+       specified callback.  The access layer is responsible for retrying the
+       operation as directed by the client.
+
+

SEE ALSO

+
       ib_cancel_query, ib_query_req_t
+
+
+
+ +

[Functions] +Access Layer/ib_query_av

+ +

[top][parent][index]

+

NAME

+
       ib_query_av
+
+

DESCRIPTION

+
       Returns the attributes of an address vector.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_query_av(
+        IN              const   ib_av_handle_t                          h_av,
+                OUT                     ib_av_attr_t* const                     p_av_attr,
+                OUT                     ib_pd_handle_t* const           ph_pd );
+
+

PARAMETERS

+
       h_av
+               [in] A handle to an existing address vector.
+
+       p_av_attr
+               [out] Upon successful completion, the structure referenced by this
+               parameter contains the attributes of the specified address vector.
+
+       ph_pd
+               [out] Upon successful completion, this references a handle to the
+               protection domain associated with the address vector.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The attributes were returned successfully.
+
+       IB_INVALID_AV_HANDLE
+               The address vector handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the address vector attributes structure or protection
+               domain handle was not provided.
+
+

SEE ALSO

+
       ib_create_av, ib_modify_av, ib_destroy_av, ib_av_attr_t
+
+
+
+ +

[Functions] +Access Layer/ib_query_ca

+ +

[top][parent][index]

+

NAME

+
       ib_query_ca
+
+

DESCRIPTION

+
       Queries the attributes of an opened channel adapter.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_query_ca(
+        IN              const   ib_ca_handle_t                          h_ca,
+                OUT                     ib_ca_attr_t* const                     p_ca_attr OPTIONAL,
+        IN      OUT                     uint32_t* const                         p_size );
+
+

PARAMETERS

+
       h_ca
+               [in] The handle to an open channel adapter.
+
+       p_ca_attr
+               [out] A reference to a buffer where the channel adapter attributes,
+               including port attribute information will be copied.  If this parameter
+               is NULL, then the required buffer size needed to return all of the CA
+               attribute information is returned through the p_size parameter.  The
+               ib_ca_attr_t structure for the specified channel adapter is stored
+               at the top of the buffer.
+
+       p_size
+               [in/out] On input, this references the size of the data buffer
+               referenced by the p_ca_attr parameter.
+
+               On output, the number of bytes used or needed to copy all CA
+               attribute information.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The attributes were returned successfully.
+
+       IB_INVALID_CA_HANDLE
+               The channel adapter handle was invalid.
+
+       IB_INSUFFICIENT_MEMORY
+               The size of the p_ca_attr buffer, specified through p_size, is
+               insufficient to store all of the CA attribute information.
+
+       IB_INVALID_PARAMETER
+               A reference to the size was not provided.
+
+

NOTES

+
       This routine returns information about the specified channel adapter,
+       including port attributes.  The amount of information returned through
+       this call is variable sized.  Users may obtain the size of the data
+       buffer required to obtain the CA attributes by calling this function
+       with p_ca_attr set to NULL.  The access layer will then return the
+       necessary size in the variable referenced by the p_size parameter.
+
+

SEE ALSO

+
       ib_open_ca, ib_query_ca_by_guid, ib_modify_ca, ib_close_ca, ib_ca_attr_t
+
+
+
+ +

[Functions] +Access Layer/ib_query_ca_by_guid

+ +

[top][parent][index]

+

NAME

+
       ib_query_ca_by_guid
+
+

DESCRIPTION

+
       Queries the attributes of an opened channel adapter.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_query_ca_by_guid(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   ib_net64_t                                      ca_guid,
+                OUT                     ib_ca_attr_t* const                     p_ca_attr OPTIONAL,
+        IN      OUT                     uint32_t* const                         p_size );
+
+

PARAMETERS

+
       h_al
+               [in] The handle to an open instance of AL.
+
+       ca_guid
+               [in] The GUID of the channel adapter to query.
+
+       p_ca_attr
+               [out] A reference to a buffer where the channel adapter attributes,
+               including port attribute information will be copied.  If this parameter
+               is NULL, then the required buffer size needed to return all of the CA
+               attribute information is returned through the p_size parameter.  The
+               ib_ca_attr_t structure for the specified channel adapter is stored
+               at the top of the buffer.
+
+       p_size
+               [in/out] On input, this references the size of the data buffer
+               referenced by the p_ca_attr parameter.
+
+               On output, the number of bytes used or needed to copy all CA
+               attribute information.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The attributes were returned successfully.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_GUID
+               No channel adapter in the system was found for the specified ca_guid.
+
+       IB_INSUFFICIENT_MEMORY
+               The size of the p_ca_attr buffer, specified through p_size, is
+               insufficient to store all of the CA attribute information.
+
+       IB_INVALID_PARAMETER
+               A reference to the size was not provided.
+
+

NOTES

+
       This routine returns information about the specified channel adapter,
+       including port attributes.  The amount of information returned through
+       this call is variable sized.  Users may obtain the size of the data
+       buffer required to obtain the CA attributes by calling this function
+       with p_ca_attr set to NULL.  The access layer will then return the
+       necessary size in the variable referenced by the p_size parameter.
+
+

SEE ALSO

+
       ib_open_ca, ib_query_ca, ib_modify_ca, ib_close_ca, ib_ca_attr_t
+
+
+
+ +

[Functions] +Access Layer/ib_query_cq

+ +

[top][parent][index]

+

NAME

+
       ib_query_cq
+
+

DESCRIPTION

+
       Returns information about the specified completion queue.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_query_cq(
+        IN              const   ib_cq_handle_t          h_cq,
+        OUT             uint32_t* const                         p_size );
+
+

PARAMETERS

+
       h_cq
+               [in] A handle to an existing completion queue.
+
+       p_size
+               [out] Upon successful completion of this call, contains the actual
+               size of the completion queue.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The completion queue was successfully queried.
+
+       IB_INVALID_CQ_HANDLE
+               The completion queue handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the completion queue size was not provided.
+
+

SEE ALSO

+
       ib_create_cq
+
+
+
+ +

[Functions] +Access Layer/ib_query_mr

+ +

[top][parent][index]

+

NAME

+
       ib_query_mr
+
+

DESCRIPTION

+
       Query the current attributes of a memory region.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_query_mr(
+        IN              const   ib_mr_handle_t                          h_mr,
+                OUT                     ib_mr_attr_t* const                     p_mr_attr );
+
+

PARAMETERS

+
       h_mr
+               [in] A handle to a registered memory region.
+
+       p_mr_attr
+               [out] A reference to a structure where the registered memory attributes
+               will be copied.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory region attributes were returned successfully.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the memory region attributes was not provided.
+
+

NOTES

+
       This routine returns information about the specified registered memory
+       region.
+
+

SEE ALSO

+
       ib_dereg_mr, ib_reg_mem, ib_reg_shared, ib_mr_attr_t
+
+
+
+ +

[Functions] +Access Layer/ib_query_mw

+ +

[top][parent][index]

+

NAME

+
       ib_query_mw
+
+

DESCRIPTION

+
       Query the current attributes of a memory window.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_query_mw(
+        IN              const   ib_mw_handle_t                          h_mw,
+                OUT                     ib_pd_handle_t* const           ph_pd,
+                OUT                     net32_t* const                          p_rkey );
+
+

PARAMETERS

+
       h_mw
+               [in] A handle to an existing memory window.
+
+       ph_pd
+               [out] Upon successful completion of this call, this will reference
+               the protection domain associated with this memory window.
+
+       p_rkey
+               [out] Upon successful completion of this call, this will reference
+               the current rkey associated with this memory window.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory window attributes were returned successfully.
+
+       IB_INVALID_MW_HANDLE
+               The memory window handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the protection domain handle or rkey was not provided.
+
+

NOTES

+
       This routine returns information about the specified memory window.
+
+

SEE ALSO

+
       ib_create_mw
+
+
+
+ +

[Functions] +Access Layer/ib_query_qp

+ +

[top][parent][index]

+

NAME

+
       ib_query_qp
+
+

DESCRIPTION

+
       Query the current attributes of the queue pair.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_query_qp(
+        IN              const   ib_qp_handle_t                          h_qp,
+                OUT                     ib_qp_attr_t* const                     p_qp_attr );
+
+

PARAMETERS

+
       h_qp
+               [in] A handle to an existing queue pair.
+
+       p_qp_attr
+               [out] Upon successful completion of this call, the structure
+               referenced by this parameter contains the attributes of the specified
+               quere pair.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The queue pair attributes were returned successfully.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the queue pair attributes structure was not provided.
+
+

NOTES

+
       This routine returns information about the specified queue pair.
+
+

SEE ALSO

+
       ib_create_qp, ib_modify_qp, ib_qp_attr_t
+
+
+
+ +

[Structures] +Access Layer/ib_query_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_query_rec_t
+
+

DESCRIPTION

+
       Contains the results of a subnet administration query.
+
+

SYNOPSIS

+
typedef struct _ib_query_rec
+{
+        const void* __ptr64                     query_context;
+        ib_api_status_t                         status;
+
+        ib_query_type_t                         query_type;
+        uint32_t                                        result_cnt;
+        ib_mad_element_t* __ptr64       p_result_mad;
+
+}       ib_query_rec_t;
+
+

FIELDS

+
       query_context
+               User-defined context information associated with the query through
+               the ib_reg_query call.
+
+       status
+               Indicates the success of the query operation.
+
+       query_type
+               Indicates the type of query for which the results are being returned.
+               This matches the query_type specified through the ib_reg_query call.
+
+       result_cnt
+               The number of result structures that were returned by the query.
+
+       p_result_mad
+               For queries returning IB_SUCCESS or IB_REMOTE_ERROR, this references
+               the MAD returned by the subnet administrator containing the list
+               of results or the returned error code.
+
+

NOTES

+
       A query result structure is returned to a client through their
+       ib_pfn_query_cb_t routine to notify them of the results of a subnet
+       administration query.  If the query was successful or received an error
+       from the subnet administrator, p_result_mad will reference a MAD element
+       containing the results.  The MAD referenced by p_result_mad is owned by
+       the user and remains available even after their callback returns.  Users
+       must call ib_put_mad() to return the MAD element back to the access layer
+       when they are done accessing the results.
+
+       To retrieve individual result structures from the p_result_mad, users
+       may call ib_get_query_result().
+
+

SEE ALSO

+
       ib_query, ib_pfn_query_cb_t, ib_api_status_t, ib_put_mad, ib_mad_element_t
+       ib_query_status_t, ib_query_type_t, ib_get_query_result
+
+
+
+ +

[Structures] +Access Layer/ib_query_req_t

+ +

[top][parent][index]

+

NAME

+
       ib_query_req_t
+
+

DESCRIPTION

+
       Information used to request an access layer provided query of the subnet
+       administrator.
+
+

SYNOPSIS

+
typedef struct _ib_query_req
+{
+        ib_query_type_t                         query_type;
+        const void* __ptr64                     p_query_input;
+        ib_net64_t                                      port_guid;
+
+        uint32_t                                        timeout_ms;
+        uint32_t                                        retry_cnt;
+        ib_al_flags_t                           flags;
+
+        const void* __ptr64                     query_context;
+        ib_pfn_query_cb_t                       pfn_query_cb;
+
+}       ib_query_req_t;
+
+

FIELDS

+
       query_type
+               Indicates the type of query that the access layer should perform.
+
+       p_query_input
+               A pointer to the input for the query.  The data referenced by this
+               structure is dependent on the type of query being requested and is
+               determined by the specified query_type.
+
+       port_guid
+               Directs the query to use the specified port.  The request will
+               contact the management entity reachable through the given port.
+
+       timeout_ms
+               Specifies the number of milliseconds to wait for a response for
+               this query until retrying or timing out the request.
+
+       retry_cnt
+               Specifies the number of times that the query will be retried before
+               failing the request.
+
+       flags
+               Used to describe the mode of operation.  Set to IB_FLAGS_SYNC to
+               process the called routine synchronously.
+
+       query_context
+               User-defined context information associated with this query.  The
+               context data is returned to the user as a part of their query
+               callback.
+
+       pfn_query_cb
+               A user-defined callback that is invoked upon completion of the query.
+
+

NOTES

+
       This structure is used when requesting an access layer provided query
+       of the subnet administrator.  Clients specify the type of query through
+       the query_type field.  Based on the type of query, the p_query_input
+       field is set to reference the appropriate data structure.
+
+       The information referenced by the p_query_input field is one of the
+       following:
+
+               -- a NULL terminated service name
+               -- a service id
+               -- a single GUID
+               -- a pair of GUIDs specified through an ib_guid_pair_t structure
+               -- a pair of GIDs specified through an ib_gid_pair_t structure
+
+

SEE ALSO

+
       ib_query_type_t, ib_pfn_query_cb_t, ib_guid_pair_t,
+       ib_gid_pair_t
+
+
+
+ +

[Functions] +Access Layer/ib_query_srq

+ +

[top][parent][index]

+

NAME

+
       ib_query_srq
+
+

DESCRIPTION

+
       Query the current attributes of the shared receive queue.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_query_srq(
+        IN              const   ib_srq_handle_t                         h_srq,
+                OUT                     ib_srq_attr_t* const                    p_srq_attr );
+
+

PARAMETERS

+
       h_srq
+               [in] A handle to an existing shared receive queue.
+
+       p_srq_attr
+               [out] Upon successful completion of this call, the structure
+               referenced by this parameter contains the attributes of the specified
+               quere pair.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The shared receive queue attributes were returned successfully.
+
+       IB_INVALID_SRQ_HANDLE
+               The shared receive queue handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the shared receive queue attributes structure was not provided.
+
+

NOTES

+
       This routine returns information about the specified shared receive queue.
+
+

SEE ALSO

+
       ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t,
+       ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t
+
+
+
+ +

[Definitions] +Access Layer/ib_query_type_t

+ +

[top][parent][index]

+

NAME

+
       ib_query_type_t
+
+

DESCRIPTION

+
       Abstracted queries supported by the access layer.
+
+

SYNOPSIS

+
typedef enum _ib_query_type
+{
+        IB_QUERY_USER_DEFINED,
+
+        IB_QUERY_ALL_SVC_RECS,
+        IB_QUERY_SVC_REC_BY_NAME,
+        IB_QUERY_SVC_REC_BY_ID,
+
+        IB_QUERY_CLASS_PORT_INFO,
+
+        IB_QUERY_NODE_REC_BY_NODE_GUID,
+        IB_QUERY_PORT_REC_BY_LID,
+
+        IB_QUERY_VLARB_BY_LID_PORT_BLOCK,
+        IB_QUERY_SLVL_BY_LID_AND_PORTS,
+
+        IB_QUERY_PATH_REC_BY_PORT_GUIDS,
+        IB_QUERY_PATH_REC_BY_GIDS,
+        IB_QUERY_PATH_REC_BY_LIDS,
+
+}       ib_query_type_t;
+
+

VALUES

+
       IB_QUERY_USER_DEFINED
+               Query the SA based on user-defined input.  Queries of this type
+               should reference an ib_user_query_t structure as input into the
+               query.
+
+       IB_QUERY_SVC_REC_BY_NAME
+               Query for service records based on the service name.  Queries of
+               this type should reference an ib_svc_name_t structure as input
+               into the query.
+
+       IB_QUERY_SVC_REC_BY_ID
+               Query for service records based on the service ID.  Queries of
+               this type should reference an ib_net64_t value that indicates the
+               ID of the service being requested.
+
+       IB_QUERY_NODE_REC_BY_NODE_GUID
+               Query for node information based on the node's GUID.  Queries of
+               this type should reference an ib_net64_t value that indicates the
+               GUID of the node being requested.
+
+       IB_QUERY_PORT_REC_BY_LID
+               Query for port information based on the port's base LID.  Queries of
+               this type should reference an ib_net16_t value that indicates the
+               base LID of the port being requested.
+
+       IB_QUERY_PATH_REC_BY_PORT_GUIDS
+               Query for path records between the specified pair of port GUIDs.
+               Queries of this type should reference an ib_guid_pair_t structure
+               that indicates the GUIDs of the path being requested.
+
+       IB_QUERY_PATH_REC_BY_GIDS
+               Query for path records between the specified pair of port GIDs.
+               Queries of this type should reference an ib_gid_pair_t structure
+               that indicates the GIDs of the path being requested.
+
+       IB_QUERY_PATH_REC_BY_LIDS
+               Query for path records between the specified pair of port LIDs.
+               Queries of this type should reference an ib_lid_pair_t structure
+               that indicates the LIDs of the path being requested.
+
+

NOTES

+
       This enum is used to define abstracted queries provided by the access
+       layer.  Users may issue queries not listed here by sending MADs directly
+       to the subnet administrator or a class manager.  These queries are
+       intended to represent those most often used by clients.
+
+

SEE ALSO

+
       ib_query, ib_query_req_t, ib_user_query_t, ib_gid_pair_t, ib_lid_pair_t
+       ib_guid_pair_t
+
+
+
+ +

[Functions] +Access Layer/ib_rearm_cq

+ +

[top][parent][index]

+

NAME

+
       ib_rearm_cq
+
+

DESCRIPTION

+
       This indicates that the completion queue should notify the client when
+       the next completion is added.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_rearm_cq(
+        IN              const   ib_cq_handle_t                          h_cq,
+        IN              const   boolean_t                                       solicited );
+
+

PARAMETERS

+
       h_cq
+               [in] Handle to the completion queue to rearm.
+
+       solicited
+               [in] A flag indicating whether the request is to generate a
+               notification on the next entry, if set to FALSE, or on the next
+               solicited entry being added to the completion queue, if set to TRUE.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The completion queue rearm request was registered successfully.
+
+       IB_INVALID_CQ_HANDLE
+               The completion queue handle was invalid.
+
+

NOTES

+
       This routine instructs the channel interface to invoke the completion
+       handler when the next completion queue entry is added to this CQ.
+
+

SEE ALSO

+
       ib_create_cq, ib_peek_cq, ib_poll_cq, ib_rearm_n_cq
+
+
+
+ +

[Functions] +Access Layer/ib_rearm_n_cq

+ +

[top][parent][index]

+

NAME

+
       ib_rearm_n_cq
+
+

DESCRIPTION

+
       This indicates that the completion queue should notify the client when
+       the next N completions have been added to this CQ.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_rearm_n_cq(
+        IN              const   ib_cq_handle_t                          h_cq,
+        IN              const   uint32_t                                        n_cqes );
+
+

PARAMETERS

+
       h_cq
+               [in] Handle to the completion queue to rearm.
+
+       n_cqes
+               [in] The number of completion queue entries to be added to the
+               completion queue before notifying the client.  This value must
+               greater than or equal to one and less than or equal to the size
+               of the completion queue.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The completion queue rearm request was registered successfully.
+
+       IB_INVALID_CQ_HANDLE
+               The completion queue handle was invalid.
+
+       IB_INVALID_PARAMETER
+               The requested number of completion queue entries was invalid.
+
+       IB_UNSUPPORTED
+               This operation is not supported by the channel adapter.
+
+

NOTES

+
       This routine instructs the channel interface to invoke the completion
+       handler when the next N completions have been added to this CQ regardless
+       of the completion type (solicited or unsolicited).  Any CQ entries that
+       existed before the rearm is enabled will not result in a call to the
+       handler.  Support for this operation is optional by a channel adapter
+       vendor.
+
+

SEE ALSO

+
       ib_create_cq, ib_peek_cq, ib_poll_cq, ib_rearm_cq
+
+
+
+ +

[Functions] +Access Layer/ib_reg_ioc

+ +

[top][parent][index]

+

NAME

+
       ib_reg_ioc
+
+

DESCRIPTION

+
       Registers an I/O controller with the local device manager, which will
+       export the controller to the fabric.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reg_ioc(
+        IN              const   ib_ioc_handle_t                         h_ioc );
+
+

PARAMETERS

+
       h_ioc
+               [in] A handle to the controller being registered.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The I/O controller was successfully registered.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register the I/O controller.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the I/O
+               unit to register the I/O controller.
+
+       IB_INVALID_HANDLE
+               The I/O controller handle was invalid.
+
+

NOTES

+
       This routine registers an I/O controller with the local device manager.
+       The device manager exports the controller to the fabric as part of an
+       I/O unit.  Typically, clients will call ib_add_svc_entry to add services
+       to the controller before registering it with the device manager.
+
+

SEE ALSO

+
       ib_create_ioc, ib_destroy_ioc, ib_add_svc_entry
+
+
+
+ +

[Functions] +Access Layer/ib_reg_mad_pool

+ +

[top][parent][index]

+

NAME

+
       ib_reg_mad_pool
+
+

DESCRIPTION

+
       Registers a MAD pool for use with a protection domain.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reg_mad_pool(
+        IN              const   ib_pool_handle_t                        h_pool,
+        IN              const   ib_pd_handle_t                          h_pd,
+                OUT                     ib_pool_key_t* const            p_pool_key );
+
+

PARAMETERS

+
       h_pool
+               [in] A handle to a MAD pool.
+
+       h_pd
+               [in] A handle to a protection domain.
+
+       p_pool_key
+               [out] A key associated with registering the MAD pool with the
+               protection domain.  This key is returned to the user and is used
+               when retrieving MADs from the pool.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The MAD pool was successfully registered with the protection domain.
+
+       IB_INVALID_HANDLE
+               The MAD pool handle was invalid.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the pool key was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register the MAD pool.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to register the MAD pool.
+
+

NOTES

+
       This function registers a MAD pool with a protection domain.  After
+       successful completion of this call, the MAD elements of the associated
+       pool are usable on any queue pairs associated with the given protection
+       domain.
+
+

SEE ALSO

+
       ib_create_mad_pool, ib_destroy_mad_pool, ib_dereg_mad_pool, ib_get_mad
+
+
+
+ +

[Functions] +Access Layer/ib_reg_mad_svc

+ +

[top][parent][index]

+

NAME

+
       ib_reg_mad_svc
+
+

DESCRIPTION

+
       Requests management datagram support for a specified class with a
+       queue pair.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reg_mad_svc(
+        IN              const   ib_qp_handle_t                          h_qp,
+        IN              const   ib_mad_svc_t* const                     p_mad_svc,
+                OUT                     ib_mad_svc_handle_t* const      ph_mad_svc );
+
+

PARAMETERS

+
       h_qp
+               [in] A handle to queue pair.  The queue pair must have been created
+               as one of the following types: IB_QPT_QP0, IB_QPT_QP0_ALIAS,
+               IB_QPT_QP1, IB_QPT_QP1_ALIAS, or IB_QPT_MAD.
+
+       p_mad_svc
+               [in] A reference to the management class and methods supported by
+               this queue pair.
+
+       ph_mad_svc
+               [out] On successful completion of this call, this references a
+               handle to the newly created MAD service.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The queue pair was registered successfully.
+
+       IB_INVALID_QP_HANDLE
+               The queue pair handle was invalid.
+
+       IB_INVALID_PARAMETER
+               The queue pair handle was not created with the proper queue pair
+               type or a reference to the MAD service information or handle was
+               not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register the queue pair.
+
+

NOTES

+
       This routine registers a queue pair as using a particular management
+       class.  This indicates that the access layer should perform additional
+       processing on MADs sent and received by this queue pair.  Queue pairs
+       registered for MAD support receive access layer SAR and retransmissions
+       services.  A queue pair may be registered for multiple management classes.
+
+

SEE ALSO

+
       ib_create_qp, ib_mad_svc_t
+
+
+
+ +

[Functions] +Access Layer/ib_reg_mem

+ +

[top][parent][index]

+

NAME

+
       ib_reg_mem
+
+

DESCRIPTION

+
       Registers a virtual memory region with a channel adapter.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reg_mem(
+        IN              const   ib_pd_handle_t                          h_pd,
+        IN              const   ib_mr_create_t* const           p_mr_create,
+                OUT                     net32_t* const                          p_lkey,
+                OUT                     net32_t* const                          p_rkey,
+                OUT                     ib_mr_handle_t* const           ph_mr );
+
+

PARAMETERS

+
       h_pd
+               [in] A handle to an existing protection domain that the memory
+               should be registered with.
+
+       p_mr_create
+               [in] Information describing the memory region to register.
+
+       p_lkey
+               [out] The local access key associated with this registered memory
+               region.
+
+       p_rkey
+               [out] A key that may be used by a remote end-point when performing
+               RDMA or atomic operations to this registered memory region.
+
+       ph_mr
+               [out] Upon successful completion of this call, this references a
+               handle to the registered memory region.  This handle is used when
+               performing data transfers and to deregister the memory.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory region was successfully registered.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain to associate with the memory region was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the memory region information, lkey, rkey, or handle
+               was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register the memory region.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to register the memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+

NOTES

+
       This routine registers a virtual memory region with a channel adapter.
+       Memory must be registered before being used in a data transfer operation.
+
+

SEE ALSO

+
       ib_dereg_mr, ib_reg_phys, ib_reg_shared, ib_mr_create_t
+
+
+
+ +

[Functions] +Access Layer/ib_reg_phys

+ +

[top][parent][index]

+

NAME

+
       ib_reg_phys
+
+

DESCRIPTION

+
       Registers a physical memory region with a channel adapter.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reg_phys(
+        IN              const   ib_pd_handle_t                          h_pd,
+        IN              const   ib_phys_create_t* const         p_phys_create,
+        IN      OUT                     uint64_t* const                         p_vaddr,
+                OUT                     net32_t* const                          p_lkey,
+                OUT                     net32_t* const                          p_rkey,
+                OUT                     ib_mr_handle_t* const           ph_mr );
+
+

PARAMETERS

+
       h_pd
+               [in] A handle to an existing protection domain that the memory
+               should be registered with.
+
+       p_phys_create
+               [in] Information describing the memory region to register.
+
+       p_vaddr
+               [in/out] On input, references the requested virtual address for the
+               start of the physical region.  On output, references the actual
+               virtual address assigned to the registered region.
+
+       p_lkey
+               [out] The local access key associated with this registered memory
+               region.
+
+       p_rkey
+               [out] A key that may be used by a remote end-point when performing
+               RDMA or atomic operations to this registered memory region.
+
+       ph_mr
+               [out] Upon successful completion of this call, this references a
+               handle to the registered memory region.  This handle is used when
+               performing data transfers and to deregister the memory.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The physical memory region was successfully registered.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain to associate with the physical memory region
+               was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the physical memory region information, virtual address,
+               lkey, rkey, or handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register the physical memory region.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to register the physical memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+

NOTES

+
       This routine registers an array of physical pages as a single virtually
+       contiguous region with a channel adapter.  Memory must be registered
+       before being used in a data transfer operation.
+
+

SEE ALSO

+
       ib_dereg_mr, ib_reg_mem, ib_reg_shared, ib_phys_create_t
+
+
+
+ +

[Functions] +Access Layer/ib_reg_pnp

+ +

[top][parent][index]

+

NAME

+
       ib_reg_pnp
+
+

DESCRIPTION

+
       Routine used to register for notification of local and I/O controller
+       assignment events.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reg_pnp(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   ib_pnp_req_t* const                     p_pnp_req,
+                OUT                     ib_pnp_handle_t* const          ph_pnp );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an open instance of the access layer.
+
+       p_pnp_req
+               [in] Specifies the type of events that the user wishes to be notified
+               of, along with information needed to process the completed query.
+
+       ph_pnp
+               [out] Upon successful completion of this call, this references a handle
+               to the PnP notification request.  This handle may be used to cancel the
+               notification registration.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The PnP registration was successful.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the PnP request information or handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register for PnP notification.
+
+

NOTES

+
       This routine registers the calling client with the access layer for
+       notification of locally occurring events, or the assignment of I/O
+       controllers to a local device.  Once registered, a client will receive
+       notification, via a callback, that a given event has occurred on a
+       local device.  Clients may restrict the types of events and devices
+       that are reported.  The p_pnp_req parameter is used to indicate which
+       device events to report to the user.
+
+       Upon invoking this routine, the client may receive a callback through
+       the ib_pfn_pnp_cb_t routine to notify them of the current system state.
+       For example, if a client registers for notification of port up events,
+       then the access layer will notify the client of all available ports when
+       this routine is first invoked.
+
+

SEE ALSO

+
       ib_dereg_pnp, ib_pnp_req_t, ib_pnp_rec_t, ib_pfn_pnp_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_reg_shared

+ +

[top][parent][index]

+

NAME

+
       ib_reg_shared
+
+

DESCRIPTION

+
       Registers a memory region that has the same physical pages as an
+       existing registered memory region.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reg_shared(
+        IN              const   ib_mr_handle_t                          h_mr,
+        IN              const   ib_pd_handle_t                          h_pd,
+        IN              const   ib_access_t                                     access_ctrl,
+        IN      OUT                     uint64_t* const                         p_vaddr,
+                OUT                     net32_t* const                          p_lkey,
+                OUT                     net32_t* const                          p_rkey,
+                OUT                     ib_mr_handle_t* const           ph_mr );
+
+

PARAMETERS

+
       h_mr
+               [in] A handle to an existing registered memory region that this
+               registration should share physical pages with.
+
+       h_pd
+               [in] Handle to the PD on which memory is being registered
+
+       access_ctrl
+               [in] Access rights of the registered region.
+
+       p_vaddr
+               [in/out] On input, this specifies the requested virtual address for the
+               start of the physical region.  On output, this references the actual
+               virtual address assigned to the registered region.  This is always a
+               64-bit quantity to support registering more than 4GB of memory on
+               32-bit systems with PAE.
+
+       p_lkey
+               [out] The local access key associated with this registered memory
+               region.
+
+       p_rkey
+               [out] A key that may be used by a remote end-point when performing RDMA
+               or atomic operations to this registered memory region.
+
+       ph_mr
+               [out] Upon successful completion of this call, this references a handle
+               to the registered memory region.  This handle is used when performing
+               data transfers and to deregister the memory.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The shared memory region was successfully registered.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the virtual address, lkey, rkey, or handle was not
+               provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register the shared memory region.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to register the shared memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+

NOTES

+
       This routine registers a memory region that shares the same set of
+       physical pages associated with an existing registered memory region.
+
+

SEE ALSO

+
       ib_dereg_mr, ib_reg_mem, ib_reg_phys, ib_reg_shared, ib_mr_create_t
+
+
+
+ +

[Functions] +Access Layer/ib_reg_shmid

+ +

[top][parent][index]

+

NAME

+
       ib_reg_shmid
+
+

DESCRIPTION

+
       Registers a memory region to be shared across multiple processes.
+       The memory is referenced by a shared memory identifier.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reg_shmid(
+        IN              const   ib_pd_handle_t                          h_pd,
+        IN              const   ib_shmid_t                                      shmid,
+        IN              const   ib_mr_create_t* const           p_mr_create,
+                OUT                     uint64_t* const                         p_vaddr,
+                OUT                     net32_t* const                          p_lkey,
+                OUT                     net32_t* const                          p_rkey,
+                OUT                     ib_mr_handle_t* const           ph_mr );
+
+

PARAMETERS

+
       h_pd
+               [in] A handle to an existing protection domain that the memory
+               should be registered with.
+
+       shmid
+               [in] An identifier for the shared memory region.
+
+       p_mr_create
+               [in] Information describing the attributes of the memory region to
+               register.
+
+       p_vaddr,
+               [out] The HCA assigned, HCA relative virtual address for the
+               memory region.
+
+       p_lkey
+               [out] The local access key associated with this registered memory
+               region.
+
+       p_rkey
+               [out] A key that may be used by a remote end-point when performing RDMA
+               or atomic operations to this registered memory region.
+
+       ph_mr
+               [out] Upon successful completion of this call, this references a handle
+               to the registered memory region.  This handle is used when performing
+               data transfers and to deregister the memory.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The shared memory region was successfully registered.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the memory region information, lkey, rkey, or handle
+               was not provided.
+
+       IB_INVALID_SETTING
+               The length and page mapping for the memory region do not match those
+               of the region identified by the provided SHMID.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to register the shared memory region.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to register the shared memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+

NOTES

+
       This routine registers a memory region that is shared between processes.
+       The region being registered is identified through a shared memory
+       identifier.  The registered region shares hardware resources as much
+       as possible.
+
+

SEE ALSO

+
       ib_dereg_mr, ib_reg_mem, ib_reg_shared, ib_mr_create_t
+
+
+
+ +

[Functions] +Access Layer/ib_reg_svc

+ +

[top][parent][index]

+

NAME

+
       ib_reg_svc
+
+

DESCRIPTION

+
       Routine used to register for a service with the subnet administrator.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reg_svc(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   ib_reg_svc_req_t* const         p_reg_svc_req,
+                OUT                     ib_reg_svc_handle_t* const      ph_reg_svc );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an open instance of the access layer.
+
+       p_reg_svc_req
+               [in] Describes the service to register with the subnet administrator.
+
+       ph_reg_svc
+               [out] Pointer to a service registration handle, used to deregister
+               the service.  Set upon successful completion of the function.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The service registration was initiated.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the service registration request was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+       IB_NOT_FOUND
+               No channel adapters in the system contain the GID specified in the
+               service record.
+
+       IB_INVALID_GID
+               No port was found matching the GID specified in the service record.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to perform the operation.
+
+

NOTES

+
       This routine registers a service with the subnet administrator.  Registered
+       services are reported by the subnet administrator to clients querying the
+       subnet administrator for service information.
+
+       Once registered, a client will receive notification, via a callback,
+       that a service has been successfully registered.
+
+

SEE ALSO

+
       ib_dereg_svc, ib_reg_svc_req_t
+
+
+
+ +

[Structures] +Access Layer/ib_reg_svc_rec_t

+ +

[top][parent][index]

+

NAME

+
       _ib_reg_svc_rec_t
+
+

DESCRIPTION

+
       Information returned as a result of registering a service with the subnet
+       administrator.  This includes name service registration.
+
+

SYNOPSIS

+
typedef struct _ib_reg_svc_rec
+{
+        const void* __ptr64                     svc_context;
+        ib_reg_svc_handle_t                     h_reg_svc;
+        ib_api_status_t                         req_status;
+        ib_net16_t                                      resp_status;
+        ib_service_record_t                     svc_rec;
+
+}       ib_reg_svc_rec_t;
+
+

FIELDS

+
       svc_context
+               User-defined context information associated with the registration
+               through the ib_reg_svc call.
+
+       req_status
+               Indicates the success of the registration operation.
+
+       resp_status
+               Indicates the status of the response from the SA
+
+       h_reg_svc
+               For successful queries, this references the first record of
+               information returned by the subnet administrator.  If multiple
+               records of information were returned, the ib_reg_svc_rec_t will
+               be chained together.
+
+       svc_rec
+               The service record returned by the SA for the registered service.
+
+

NOTES

+
       A query result structure is returned to a client through their
+       ib_pfn_query_cb_t routine to notify them of the results of a subnet
+       administration query.
+
+

SEE ALSO

+
       ib_reg_svc, ib_pfn_reg_svc_cb_t, ib_reg_svc_status_t
+
+
+
+ +

[Structures] +Access Layer/ib_reg_svc_req_t

+ +

[top][parent][index]

+

NAME

+
       ib_reg_svc_req_t
+
+

DESCRIPTION

+
       Information used to request that a service be registered with the subnet
+       administrator.
+
+

SYNOPSIS

+
typedef struct _ib_reg_svc_req
+{
+        ib_service_record_t                     svc_rec;
+        ib_net64_t                                      port_guid;
+
+        uint32_t                                        timeout_ms;
+        uint32_t                                        retry_cnt;
+        ib_al_flags_t                           flags;
+
+        const void                                      *svc_context;
+        ib_net64_t                                      svc_data_mask;
+
+        ib_pfn_reg_svc_cb_t                     pfn_reg_svc_cb;
+
+}       ib_reg_svc_req_t;
+
+

FIELDS

+
       svc_rec
+               Service record that describes the service being registered.
+
+       port_guid
+               Directs the registration to use the specified port.  The request will
+               contact the management entity reachable through the given port.
+
+       timeout_ms
+               Specifies the number of milliseconds to wait for a response for
+               the registration until retrying or timing out the request.
+
+       retry_cnt
+               Specifies the number of times that the registration will be retried
+               before failing the request.
+
+       flags
+               Used to describe the mode of operation.  Set to IB_FLAGS_SYNC to
+               process the called routine synchronously.
+
+       svc_context
+               User-defined context information associated with this registration
+               request.  This context is returned to the user through the function
+               specified by the pfn_reg_svc_cb field.
+
+       svc_data_mask
+               User-defined component mask indicating which parts of the private
+               data is populated. This is used as an extension to the svc_id
+               for data compare. Also used as a cheap way to communicate data
+               to all clients for this service.
+
+       pfn_reg_svc_cb
+               A user-defined callback that is invoked upon completion of the
+               registration request.
+
+

NOTES

+
       This structure is used to register a service with the subnet administrator.
+       The registration call operates asynchronously unless the flags field is
+       set to IB_FLAGS_SYNC.  If synchronous operation is indicated, the client
+       will receive a callback with the results of the registration attempt
+       before the ib_reg_svc call returns.  Synchronous operation results in
+       the calling thread blocking.
+
+

SEE ALSO

+
       ib_reg_svc, ib_svc_rec_t, ib_pfn_reg_svc_cb_t
+
+
+
+ +

[Structures] +Access Layer/ib_rej_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_rej_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a connection reject message.
+
+

SYNOPSIS

+
typedef union _ib_rej_pdata
+{
+        uint8_t                                         data[IB_REJ_PDATA_SIZE];
+
+}       ib_rej_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Functions] +Access Layer/ib_reject_ioc

+ +

[top][parent][index]

+

NAME

+
       ib_reject_ioc
+
+

DESCRIPTION

+
       Rejects an I/O controller assignment to a host.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_reject_ioc(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   ib_pnp_handle_t                         h_ioc_event );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an open instance of the access layer.
+
+       h_ioc_event
+               [in] A handle provided as part of the notification of an I/O controller
+               being assigned.  This handle is obtained through the ib_pnp_rec_t
+               structure given to a client through their ib_pfn_pnp_cb_t callback.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The I/O controller reject request was initiated.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_HANDLE
+               The I/O controller handle was invalid.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to perform the operation.
+
+

NOTES

+
       This routine rejects an I/O controller assigned by the configuration
+       manager to the local host.  The access layer sends a rejection notification
+       to the configuration manager and disable access to the controller from
+       the local host.  This routine must be called from a client's
+       ib_pfn_pnp_cb_t callback to reject a newly assigned I/O controller.
+
+

SEE ALSO

+
       ib_pfn_pnp_cb_t, ib_pnp_rec_t
+
+
+
+ +

[Functions] +Access Layer/ib_remove_svc_entry

+ +

[top][parent][index]

+

NAME

+
       ib_remove_svc_entry
+
+

DESCRIPTION

+
       This removes a service entry from an I/O controller.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_remove_svc_entry(
+        IN              const   ib_svc_handle_t                         h_svc );
+
+

PARAMETERS

+
       h_svc
+               [in] A handle to an existing service entry.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The service entry was successfully removed.
+
+       IB_INVALID_HANDLE
+               The service entry handle was invalid.
+
+

NOTES

+
       This routine removes the specified service from its associated I/O
+       controller.  Once removed, the service information will no longer be
+       exported along with the controller.
+
+

SEE ALSO

+
       ib_add_svc_entry
+
+
+
+ +

[Structures] +Access Layer/ib_rep_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_rep_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a reply to a request for communication.
+
+

SYNOPSIS

+
typedef union _ib_rep_pdata
+{
+        uint8_t                                         data[IB_REP_PDATA_SIZE];
+
+}       ib_rep_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Structures] +Access Layer/ib_report_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_report_rec_t
+
+

DESCRIPTION

+
       Reported event information returned to the user when a subscribed for
+       event occurs.
+
+

SYNOPSIS

+
typedef struct _ib_report_rec
+{
+        const void* __ptr64                             report_context;
+        ib_mad_notice_attr_t* __ptr64   p_notice;
+
+}       ib_report_rec_t;
+
+

FIELDS

+
       report_context
+               Client-defined context information specified when registering for
+               the report.
+
+       p_notice
+               Reported information that describes the event that has occurred.
+
+

NOTES

+
       Subscription for reported events is done through a class manager.  When
+       a class manager detects that such an event occurs, it will generate a
+       report to the subscribed client.  The reported information is referenced
+       through the p_notice field.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t
+
+
+
+ +

[Structures] +Access Layer/ib_req_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_req_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a request for communication.
+
+

SYNOPSIS

+
typedef union _ib_req_pdata
+{
+        uint8_t                                         data[IB_REQ_PDATA_SIZE];
+
+}       ib_req_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Functions] +Access Layer/ib_rereg_mem

+ +

[top][parent][index]

+

NAME

+
       ib_rereg_mem
+
+

DESCRIPTION

+
       Modifies the attributes of an existing memory region.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_rereg_mem(
+        IN              const   ib_mr_handle_t                          h_mr,
+        IN              const   ib_mr_mod_t                                     mr_mod_mask,
+        IN              const   ib_mr_create_t* const           p_mr_create OPTIONAL,
+                OUT                     net32_t* const                          p_lkey,
+                OUT                     net32_t* const                          p_rkey,
+        IN              const   ib_pd_handle_t                          h_pd OPTIONAL );
+
+

PARAMETERS

+
       h_mr
+               [in] A handle to the registered memory region being modified.
+
+       mr_mod_mask
+               [in] A mask used to specify which attributes of the memory region are
+               being modified.
+
+       p_mr_create
+               [in] This references information needed to perform the modification on
+               the registered memory region.  This parameter may be NULL if only the
+               protection domain will be modified.
+
+       p_lkey
+               [out] The local access key associated with this registered memory
+               region.
+
+       p_rkey
+               [out] A key that may be used by a remote end-point when performing RDMA
+               or atomic operations to this registered memory region.
+
+       h_pd
+               [in] An optionally provided parameter used to modify the protection
+               domain of a registered region.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory region attributes were modified successfully.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the lkey or rkey was not provided or the specified
+               modify mask is invalid.
+
+       IB_INVALID_SETTING
+               The specified memory region attributes are invalid.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to modify the memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+       IB_RESOURCE_BUSY
+               The memory region has windows bound to it.
+
+

NOTES

+
       This routine modifies the attributes of the specified memory region.
+       The memory being modified may have been registered using either virtual
+       or physical registration.  Conceptually, this routine is equivalent to
+       to calling ib_dereg_mr, followed by ib_reg_mem, but may be higher
+       performing.
+
+

SEE ALSO

+
       ib_reg_mem, ib_reg_phys, ib_dereg_mr, ib_mr_mod_t, ib_mr_create_t
+
+
+
+ +

[Functions] +Access Layer/ib_rereg_phys

+ +

[top][parent][index]

+

NAME

+
       ib_rereg_phys
+
+

DESCRIPTION

+
       Modifies the attributes of an existing memory region.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_rereg_phys(
+        IN              const   ib_mr_handle_t                          h_mr,
+        IN              const   ib_mr_mod_t                                     mr_mod_mask,
+        IN              const   ib_phys_create_t* const         p_phys_create OPTIONAL,
+        IN      OUT                     uint64_t* const                         p_vaddr,
+                OUT                     net32_t* const                          p_lkey,
+                OUT                     net32_t* const                          p_rkey,
+        IN              const   ib_pd_handle_t                          h_pd OPTIONAL );
+
+

PARAMETERS

+
       h_mr
+               [in] A handle to the registered memory region being modified.
+
+       mr_mod_mask
+               [in] A mask used to specify which attributes of the memory region are
+               being modified.
+
+       p_phys_create
+               [in] This references information needed to perform the modification on
+               the registered memory region.  This parameter may be NULL if
+               only the protection domain will be modified.
+
+       p_vaddr
+               [in/out] On input, this specifies the requested virtual address for the
+               start of the physical region.  On output, this references the actual
+               virtual address assigned to the registered region.
+
+       p_lkey
+               [out] The local access key associated with this registered memory
+               region.
+
+       p_rkey
+               [out] A key that may be used by a remote end-point when performing RDMA
+               or atomic operations to this registered memory region.
+
+       h_pd
+               [in] An optionally provided parameter used to modify the protection
+               domain of a registered region.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory region attributes were modified successfully.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the virtual address, lkey, rkey was not provided or
+               the specified modify mask is invalid.
+
+       IB_INVALID_SETTING
+               The specified memory region attributes are invalid.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to modify the memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+       IB_RESOURCE_BUSY
+               The memory region has windows bound to it.
+
+

NOTES

+
       This routine modifies the attributes of the specified memory region.
+       The memory being modified may have been registered using either virtual
+       or physical registration.  Conceptually, this routine is equivalent to
+       to calling ib_dereg_mr, followed by ib_reg_phys, but may be higher
+       performing.
+
+

SEE ALSO

+
       ib_reg_mem, ib_reg_phys, ib_dereg_mr, ib_mr_mod_t, ib_mr_create_t
+
+
+
+ +

[Structures] +Access Layer/ib_rtu_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_rtu_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a ready to use message.
+
+

SYNOPSIS

+
typedef union _ib_rtu_pdata
+{
+        uint8_t                                         data[IB_RTU_PDATA_SIZE];
+
+}       ib_rtu_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Functions] +Access Layer/ib_send_mad

+ +

[top][parent][index]

+

NAME

+
       ib_send_mad
+
+

DESCRIPTION

+
       This routine posts a work request to the send queue of a queue pair.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_send_mad(
+        IN              const   ib_mad_svc_handle_t                     h_mad_svc,
+        IN                              ib_mad_element_t* const         p_mad_element_list,
+                OUT                     ib_mad_element_t                        **pp_mad_failure OPTIONAL );
+
+

PARAMETERS

+
       h_mad_svc
+               [in] The MAD service to which this work request is being submitted.
+
+       p_mad_element_list
+               [in] A list of MAD elements that will be posted to the send queue.
+
+       pp_mad_failure
+               [out] If the send MAD operation failed, this references the MAD
+               element in the p_mad_element_list where the first failure occurred.
+               This parameter is optional if p_mad_element_list contains a single
+               MAD.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The MAD element list was successfully posted.
+
+       IB_INVALID_HANDLE
+               The MAD service handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the MAD element list was not provided.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available to complete
+               the request.
+
+       IB_INVALID_SETTING
+               The MAD element RMPP version is not supported by the access layer.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to complete the request.
+
+

NOTES

+
       This routine posts a work request to send a MAD on a MAD service.  All
+       MAD elements successfully posted by this call are under the control of
+       the access layer and should not be accessed until the send operation
+       completes.
+
+       In order to guarantee that MADs sent by separate clients do not use the
+       same transaction ID, the access layer reserves the upper 32-bits of the
+       TID on all unsolicited MADs.  MADs sent with the response bit set will
+       not have their transaction ID's modified.  Unsolicited MADs will have the
+       upper 32-bits of their TID set to an access layer generated client ID.
+
+

SEE ALSO

+
       ib_mad_element_t, ib_cancel_mad
+
+
+
+ +

[Structures] +Access Layer/ib_shmid_t

+ +

[top][parent][index]

+

NAME

+
       ib_shmid_t
+
+

DESCRIPTION

+
       Shared Memory Identifier, used to uniquely identify a shared memory region.
+
+

SYNOPSIS

+
typedef uint8_t         ib_shmid_t[64];
+
+

SEE ALSO

+
       ib_reg_shmid
+
+
+
+ +

[Structures] +Access Layer/ib_sidr_rep_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_sidr_rep_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a service ID resolution reply.
+
+

SYNOPSIS

+
typedef union _ib_sidr_rep_pdata
+{
+        uint8_t                                         data[IB_SIDR_REP_PDATA_SIZE];
+
+}       ib_sidr_rep_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Structures] +Access Layer/ib_sidr_req_pdata_t

+ +

[top][parent][index]

+

NAME

+
       ib_sidr_req_pdata_t
+
+

DESCRIPTION

+
       User data sent as part of a service ID resolution request.
+
+

SYNOPSIS

+
typedef union _ib_sidr_req_pdata
+{
+        uint8_t                                         data[IB_SIDR_REQ_PDATA_SIZE];
+
+}       ib_sidr_req_pdata_t;
+
+

SEE ALSO

+
       ib_cm_data_sizes_t
+
+
+
+ +

[Structures] +Access Layer/ib_sub_rec_t

+ +

[top][parent][index]

+

NAME

+
       ib_sub_rec_t
+
+

DESCRIPTION

+
       Information returned to a user that indicates the result of a subscription
+       request.
+
+

SYNOPSIS

+
typedef struct _ib_sub_rec
+{
+        const void* __ptr64                     sub_context;
+        ib_api_status_t                         status;
+        ib_sub_handle_t                         h_sub;
+
+}       ib_sub_rec_t;
+
+

FIELDS

+
       sub_context
+               References user-defined context information associated with the
+               subscription request.  This field is set by the user through the
+               ib_subscribe routine.
+
+       status
+               Indicates the success of the subscription request.
+
+       h_sub
+               The handle to the subscription request that was returned to the user
+               from the ib_subscribe call.  This handle is provided to the user to
+               avoid a race condition between the return of the ib_subscribe routine
+               and the notification of an event.
+
+

NOTES

+
       This structure is returned to the user to notify them of the results
+       of a subscription request.  After successfully subscribing with a
+       class manager for an event, this structure will be returned to the user
+       with the status set to IB_SUCCESS.  The sub_context field will be set
+       to the context specified through the p_sub_req parameter in the
+       ib_subscribe routine.
+
+

SEE ALSO

+
       ib_subscribe
+
+
+
+ +

[Structures] +Access Layer/ib_sub_req_t

+ +

[top][parent][index]

+

NAME

+
       ib_sub_req_t
+
+

DESCRIPTION

+
       Information used to subscribed for event notification from a class
+       manager.
+
+

SYNOPSIS

+
typedef struct _ib_sub_req
+{
+        ib_svc_name_t* __ptr64          p_class_mgr_name;
+        ib_inform_info_t* __ptr64       p_inform_info;
+        ib_net64_t                                      port_guid;
+
+        uint32_t                                        timeout_ms;
+        uint32_t                                        retry_cnt;
+        ib_al_flags_t                           flags;
+
+        const void* __ptr64                     sub_context;
+        ib_pfn_sub_cb_t                         pfn_sub_cb;
+
+        const void* __ptr64                     report_context;
+        ib_pfn_report_cb_t                      pfn_report_cb;
+
+}       ib_sub_req_t;
+
+

FIELDS

+
       p_class_mgr_name
+               The service name of the class manager to subscribe for events with.
+
+       p_inform_info
+               Information describing the type of event being subscribed to.
+
+       port_guid
+               Directs the subscription request to use the specified port.  The
+               request will contact the subnet administrator reachable through the
+               given port.
+
+       timeout_ms
+               Specifies the number of milliseconds to wait for a response for
+               this subscription until retrying or timing out the request.
+
+       retry_cnt
+               Specifies the number of times that the query will be retried before
+               failing the request.
+
+       flags
+               Used to describe the mode of operation.  Set to IB_FLAGS_SYNC to
+               process the called routine synchronously.
+
+       sub_context
+               User-defined context information associated with this subscription
+               request.  This context is returned to the user through the function
+               specified by the pfn_sub_cb field.
+
+       pfn_sub_cb
+               A user-defined callback that is invoked upon completion of the
+               subscription request.  This is used to notify a client that of the
+               result of their subscription request.
+
+       report_context
+               User-defined context information associated with this subscription.
+               This context is returned to the user through the client's
+               ib_pfn_report_cb_t callback routine specified in ib_open_al.
+
+       pfn_report_cb
+               A user-defined callback that is invoked to notify the user that an
+               event report has been received.
+
+

NOTES

+
       This structure is used to subscribe for events with a class manager.  Both
+       the subscription request and any corresponding event notifications operate
+       asynchronously.  Clients will be notified of the result of their
+       subscription request before receiving notification of associated events.
+
+

SEE ALSO

+
       ib_subscribe, ib_svc_name_t, ib_inform_info_t, ib_pfn_sub_cb_t,
+       ib_pfn_report_cb_t, ib_open_al
+
+
+
+ +

[Functions] +Access Layer/ib_subscribe

+ +

[top][parent][index]

+

NAME

+
       ib_subscribe
+
+

DESCRIPTION

+
       Subscribe with a class manager for event notification.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_subscribe(
+        IN              const   ib_al_handle_t                          h_al,
+        IN              const   ib_sub_req_t* const                     p_sub_req,
+                OUT                     ib_sub_handle_t* const          ph_sub );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an open instance of the access layer.
+
+       p_sub_req
+               [in] Specifies the type of events that the user wishes to be
+               notified of, along with information needed to process the completed
+               subscription.
+
+       ph_sub
+               [out] Upon successful completion of this call, this references a handle
+               to the subscription request.  This handle may be used to unsubscribe
+               from the events.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The subscription request was initiated.
+
+       IB_INVALID_AL_HANDLE
+               The access layer handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the subscription request or handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+       IB_INVALID_GUID
+               No port was found for the port_guid specified in the request.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to perform the operation.
+
+

NOTES

+
       This routine registers the calling client with a class manager for
+       notification of events.  Once registered, a client will receive
+       notification, via a callback, that a given event has occurred on
+       a device managed by the class manager.
+
+

SEE ALSO

+
       ib_unsubscribe, ib_sub_req_t, ib_pfn_sub_cb_t, ib_pfn_report_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_sync_destroy

+ +

[top][parent][index]

+

NAME

+
       ib_sync_destroy
+
+

DESCRIPTION

+
       Access layer routine used to indicate synchronous destruction of an
+       object.
+
+

SYNOPSIS

+
static const ib_pfn_destroy_cb_t ib_sync_destroy = (ib_pfn_destroy_cb_t)-1i64;
+
+

PARAMETERS

+
       Not Applicable.
+
+

NOTES

+
       Users specify ib_sync_destroy as the ib_pfn_destroy_cb_t callback in order
+       to force synchronous object destruction.  This may result in the calling
+       thread blocking while outstanding callbacks complete.
+
+

SEE ALSO

+
       ib_pfn_destroy_cb_t
+
+
+
+ +

[Functions] +Access Layer/ib_unsubscribe

+ +

[top][parent][index]

+

NAME

+
       ib_unsubscribe
+
+

DESCRIPTION

+
       Unsubscribe with a class manager for event notification.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+ib_unsubscribe(
+        IN              const   ib_sub_handle_t                         h_sub,
+        IN              const   ib_pfn_destroy_cb_t                     pfn_destroy_cb OPTIONAL );
+
+

PARAMETERS

+
       h_al
+               [in] A handle to an open instance of the access layer.
+
+       h_sub
+               [in] A handle to a subscribed event.
+
+       pfn_destroy_cb
+               [in] A user-specified callback that is invoked after the subscription
+               request has been successfully canceled.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The unsubscribe request was initiated.
+
+       IB_INVALID_HANDLE
+               The subscription handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the subscription request or handle was not provided.
+
+       IB_INSUFFICIENT_MEMORY
+               There was insufficient memory to perform the operation.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to perform the operation.
+
+

NOTES

+
       This routine cancels an active or pending event subscription with a class
+       manager.  To avoid a race condition canceling a subscription at the same
+       time an event notification callback is in progress, the unsubscribe
+       operation operates asynchronously.  For additional details see
+       ib_pfn_destroy_cb_t.
+
+

SEE ALSO

+
       ib_subscribe, ib_pfn_destroy_cb_t
+
+
+
+ +

[Structures] +Access Layer/ib_user_query_t

+ +

[top][parent][index]

+

NAME

+
       ib_user_query_t
+
+

DESCRIPTION

+
       User-defined query information.
+
+

SYNOPSIS

+
typedef struct _ib_user_query
+{
+        uint8_t                                 method;
+        ib_net16_t                              attr_id;
+        uint32_t                                attr_size;
+        ib_net64_t                              comp_mask;
+        void* __ptr64                   p_attr;
+
+}       ib_user_query_t;
+
+

FIELDS

+
       method
+               Method to be run
+
+       attr_id
+               Attribute identifier of query data.
+
+       attr_size
+               Size of the query attribute in bytes.  This is translated into the
+               attr_offset field of the SA MAD by the ib_query call.
+
+       comp_mask
+               Indicates the attribute components that are specified for the query.
+
+       p_attr
+               References the attribute structure used as input into the query.
+               This field is ignored if comp_mask is set to 0.
+
+

NOTES

+
       This structure is used to describe a user-defined query.  The attribute
+       ID, attribute offset, component mask, and attribute structure must match
+       those defined by the IBA specification.  Users should refer to chapter 15
+       of the IBA specification for additional details.
+
+

SEE ALSO

+
       ib_query_type_t, ib_query, ib_get_attr_offset, ib_get_attr_size
+
+
+
+ +

[Functions] +Access Layer/mlnx_create_fmr

+ +

[top][parent][index]

+

NAME

+
       mlnx_create_fmr
+
+

DESCRIPTION

+
       Creates a Mellanox fast memory region.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+mlnx_create_fmr(
+        IN              const   ib_pd_handle_t                          h_pd,
+        IN              const   mlnx_fmr_create_t* const        p_fmr_create,
+                OUT                     mlnx_fmr_handle_t* const        ph_fmr );
+
+

PARAMETERS

+
       h_pd
+               [in] An optionally provided parameter used to modify the protection
+               domain of a registered region.
+       p_fmr_create
+               [in] This references information needed to perform the modification on
+               the registered memory region.  This parameter may be NULL if only the
+               protection domain will be modified.
+       ph_fmr
+               [out] A handle to the registered memory region being modified.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory region attributes were modified successfully.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the lkey or rkey was not provided or the specified
+               modify mask is invalid.
+
+       IB_INVALID_SETTING
+               The specified memory region attributes are invalid.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to modify the memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+       IB_RESOURCE_BUSY
+               The memory region has windows bound to it.
+
+

NOTES

+
       This is a Mellanox specific extension to verbs.
+
+

SEE ALSO

+
       mlnx_destroy_fmr, mlnx_fmr_create_t
+
+
+
+ +

[Functions] +Access Layer/mlnx_destroy_fmr

+ +

[top][parent][index]

+

NAME

+
       mlnx_destroy_fmr
+
+

DESCRIPTION

+
       Destroys an existing Mellanox fast memory region.
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+mlnx_destroy_fmr(
+        IN              const   mlnx_fmr_handle_t                       h_fmr );
+
+

PARAMETERS

+
       h_fmr
+               [in] A handle to the registered memory region being modified.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory region attributes were modified successfully.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the lkey or rkey was not provided or the specified
+               modify mask is invalid.
+
+       IB_INVALID_SETTING
+               The specified memory region attributes are invalid.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to modify the memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+       IB_RESOURCE_BUSY
+               The memory region has windows bound to it.
+
+

NOTES

+
       This is a Mellanox specific extension to verbs.
+
+

SEE ALSO

+
       mlnx_destroy_fmr, mlnx_fmr_create_t
+
+
+
+ +

[Functions] +Access Layer/mlnx_map_fmr

+ +

[top][parent][index]

+

NAME

+
       mlnx_map_fmr
+
+

DESCRIPTION

+
       //TODO
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+mlnx_map_phys_fmr(
+        IN              const   mlnx_fmr_handle_t                       h_fmr,
+        IN              const   uint64_t* const                         paddr_list,
+        IN              const   int                                                     list_len,
+        IN      OUT                     uint64_t* const                         p_vaddr,
+                OUT                     net32_t* const                          p_lkey,
+                OUT                     net32_t* const                          p_rkey );
+
+

PARAMETERS

+
       h_fmr
+               [in] Handle to the fast memory region that  these pages map to 
+       page_list
+               [in] array of phys address
+       list_len
+               [in] number of pages in the list
+       p_vaddr
+               [in/out] On input, references the requested virtual address for the
+               start of the FMR.  On output, references the actual
+               virtual address assigned to the FMR.
+       p_lkey
+               [out] The local access key associated with this registered memory
+               region.
+       p_rkey
+               [out] A key that may be used by a remote end-point when performing
+               RDMA or atomic operations to this registered memory region.
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory region attributes were modified successfully.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the lkey or rkey was not provided or the specified
+               modify mask is invalid.
+
+       IB_INVALID_SETTING
+               The specified memory region attributes are invalid.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to modify the memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+       IB_RESOURCE_BUSY
+               The memory region has windows bound to it.
+
+

NOTES

+
       This is a Mellanox specific extension to verbs.
+
+

SEE ALSO

+
       mlnx_destroy_fmr, mlnx_fmr_create_t
+
+
+
+ +

[Functions] +Access Layer/mlnx_unmap_fmr

+ +

[top][parent][index]

+

NAME

+
       mlnx_unmap_fmr
+
+

DESCRIPTION

+
       //TODO
+
+

SYNOPSIS

+
AL_EXPORT ib_api_status_t AL_API
+mlnx_unmap_fmr(
+        IN              const   mlnx_fmr_handle_t                       h_fmr );
+
+

PARAMETERS

+
       h_fmr
+
+ RETURN VALUES
+       IB_SUCCESS
+               The memory region attributes were modified successfully.
+
+       IB_INVALID_MR_HANDLE
+               The memory region handle was invalid.
+
+       IB_INVALID_PARAMETER
+               A reference to the lkey or rkey was not provided or the specified
+               modify mask is invalid.
+
+       IB_INVALID_SETTING
+               The specified memory region attributes are invalid.
+
+       IB_INVALID_PD_HANDLE
+               The protection domain handle was invalid.
+
+       IB_INSUFFICIENT_RESOURCES
+               There were insufficient resources currently available on the channel
+               adapter to modify the memory region.
+
+       IB_UNSUPPORTED
+               The requested access rights are not supported by the channel adapter.
+
+       IB_INVALID_PERMISSION
+               The requested access rights are invalid.
+
+       IB_RESOURCE_BUSY
+               The memory region has windows bound to it.
+
+

NOTES

+
       This is a Mellanox specific extension to verbs.
+
+

SEE ALSO

+
       mlnx_destroy_fmr, mlnx_fmr_create_t
+
+
+ + diff --git a/branches/Ndi/docs/iba/ib_types_h.html b/branches/Ndi/docs/iba/ib_types_h.html new file mode 100644 index 00000000..3fb64687 --- /dev/null +++ b/branches/Ndi/docs/iba/ib_types_h.html @@ -0,0 +1,10744 @@ + + + + +./inc_docs/iba/ib_types_h.html + + + + +Generated from ./inc/iba/ib_types.h with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:50 +
+
+ +

[Modules] +IBA Base/Constants

+ +

[top][index]

+

NAME

+
       Constants
+
+

DESCRIPTION

+
       The following constants are used throughout the IBA code base.
+
+       Definitions are from the InfiniBand Architecture Specification v1.2
+
+
+
+ +

[Modules] +IBA Base/Type Definitions

+ +

[top][index]

+

NAME

+
       Type Definitions
+
+

DESCRIPTION

+
       Definitions are from the InfiniBand Architecture Specification v1.2
+
+
+
+ +

[Definitions] +Access Layer/ib_access_t

+ +

[top][parent][index]

+

NAME

+
       ib_access_t
+
+

DESCRIPTION

+
       Indicates the type of access is permitted on resources such as QPs,
+       memory regions and memory windows.
+
+

SYNOPSIS

+
typedef uint32_t                                ib_access_t;
+#define IB_AC_RDMA_READ                 0x00000001
+#define IB_AC_RDMA_WRITE                0x00000002
+#define IB_AC_ATOMIC                    0x00000004
+#define IB_AC_LOCAL_WRITE               0x00000008
+#define IB_AC_MW_BIND                   0x00000010
+
+

NOTES

+
       Users may combine access rights using a bit-wise or operation to specify
+       additional access.  For example: IB_AC_RDMA_READ | IB_AC_RDMA_WRITE grants
+       RDMA read and write access.
+
+
+
+ +

[Definitions] +Access Layer/ib_api_status_t

+ +

[top][parent][index]

+

NAME

+
       ib_api_status_t
+
+

DESCRIPTION

+
       Function return codes indicating the success or failure of an API call.
+       Note that success is indicated by the return value IB_SUCCESS, which
+       is always zero.
+
+

NOTES

+
       IB_VERBS_PROCESSING_DONE is used by UVP library to terminate a verbs call
+       in the pre-ioctl step itself.
+
+

SYNOPSIS

+
typedef enum _ib_api_status_t
+{
+        IB_SUCCESS,
+        IB_INSUFFICIENT_RESOURCES,
+        IB_INSUFFICIENT_MEMORY,
+        IB_INVALID_PARAMETER,
+        IB_INVALID_SETTING,
+        IB_NOT_FOUND,
+        IB_TIMEOUT,
+        IB_CANCELED,
+        IB_INTERRUPTED,
+        IB_INVALID_PERMISSION,
+        IB_UNSUPPORTED,
+        IB_OVERFLOW,
+        IB_MAX_MCAST_QPS_REACHED,
+        IB_INVALID_QP_STATE,
+        IB_INVALID_APM_STATE,
+        IB_INVALID_PORT_STATE,
+        IB_INVALID_STATE,
+        IB_RESOURCE_BUSY,
+        IB_INVALID_PKEY,
+        IB_INVALID_LKEY,
+        IB_INVALID_RKEY,
+        IB_INVALID_MAX_WRS,
+        IB_INVALID_MAX_SGE,
+        IB_INVALID_CQ_SIZE,
+        IB_INVALID_SRQ_SIZE,
+        IB_INVALID_SERVICE_TYPE,
+        IB_INVALID_GID,
+        IB_INVALID_LID,
+        IB_INVALID_GUID,
+        IB_INVALID_CA_HANDLE,
+        IB_INVALID_AV_HANDLE,
+        IB_INVALID_CQ_HANDLE,
+        IB_INVALID_QP_HANDLE,
+        IB_INVALID_SRQ_HANDLE,
+        IB_INVALID_PD_HANDLE,
+        IB_INVALID_MR_HANDLE,
+        IB_INVALID_FMR_HANDLE,
+        IB_INVALID_MW_HANDLE,
+        IB_INVALID_MCAST_HANDLE,
+        IB_INVALID_CALLBACK,
+        IB_INVALID_AL_HANDLE,                                   /* InfiniBand Access Layer */
+        IB_INVALID_HANDLE,                                              /* InfiniBand Access Layer */
+        IB_ERROR,                                                               /* InfiniBand Access Layer */
+        IB_REMOTE_ERROR,                                                /* Infiniband Access Layer */
+        IB_VERBS_PROCESSING_DONE,                               /* See Notes above                 */
+        IB_INVALID_WR_TYPE,
+        IB_QP_IN_TIMEWAIT,
+        IB_EE_IN_TIMEWAIT,
+        IB_INVALID_PORT,
+        IB_NOT_DONE,
+        IB_INVALID_INDEX,
+        IB_NO_MATCH,
+        IB_PENDING,
+        IB_UNKNOWN_ERROR                                                /* ALWAYS LAST ENUM VALUE! */
+
+}       ib_api_status_t;
+
+
+
+ +

[Definitions] +Access Layer/ib_apm_state_t

+ +

[top][parent][index]

+

NAME

+
       ib_apm_state_t
+
+

DESCRIPTION

+
       The current automatic path migration state of a queue pair
+
+

SYNOPSIS

+
typedef enum _ib_apm_state
+{
+        IB_APM_MIGRATED = 1,
+        IB_APM_REARM,
+        IB_APM_ARMED
+
+}       ib_apm_state_t;
+
+
+
+ +

[Definitions] +Access Layer/ib_apr_status_t

+ +

[top][parent][index]

+

NAME

+
       ib_apr_status_t
+
+

DESCRIPTION

+
       Automatic path migration status information.
+
+

SYNOPSIS

+
typedef uint8_t                                                         ib_apr_status_t;
+
+

SEE ALSO

+
       ib_cm_apr, ib_cm_apr_rec_t
+
+

SOURCE

+
#define IB_AP_SUCCESS                                           0
+#define IB_AP_INVALID_COMM_ID                           1
+#define IB_AP_UNSUPPORTED                                       2
+#define IB_AP_REJECT                                            3
+#define IB_AP_REDIRECT                                          4
+#define IB_AP_IS_CURRENT                                        5
+#define IB_AP_INVALID_QPN                                       6
+#define IB_AP_INVALID_LID                                       7
+#define IB_AP_INVALID_GID                                       8
+#define IB_AP_INVALID_FLOW_LBL                          9
+#define IB_AP_INVALID_TCLASS                            10
+#define IB_AP_INVALID_HOP_LIMIT                         11
+#define IB_AP_INVALID_PKT_RATE                          12
+#define IB_AP_INVALID_SL                                        13
+
+
+
+ +

[Definitions] +Access Layer/ib_atomic_t

+ +

[top][parent][index]

+

NAME

+
       ib_atomic_t
+
+

DESCRIPTION

+
       Indicates atomicity levels supported by an adapter.
+
+

SYNOPSIS

+
typedef enum _ib_atomic_t
+{
+        IB_ATOMIC_NONE,
+        IB_ATOMIC_LOCAL,
+        IB_ATOMIC_GLOBAL
+
+}       ib_atomic_t;
+
+

VALUES

+
       IB_ATOMIC_NONE
+               Atomic operations not supported.
+
+       IB_ATOMIC_LOCAL
+               Atomic operations guaranteed between QPs of a single CA.
+
+       IB_ATOMIC_GLOBAL
+               Atomic operations are guaranteed between CA and any other entity
+               in the system.
+
+
+
+ +

[Structures] +Access Layer/ib_av_attr_t

+ +

[top][parent][index]

+

NAME

+
       ib_av_attr_t
+
+

DESCRIPTION

+
       IBA address vector.
+
+

SYNOPSIS

+
typedef struct _ib_av_attr
+{
+        uint8_t                                 port_num;
+
+        uint8_t                                 sl;
+        ib_net16_t                              dlid;
+
+        boolean_t                               grh_valid;
+        ib_grh_t                                grh;
+        uint8_t                                 static_rate;
+        uint8_t                                 path_bits;
+
+        struct _av_conn
+        {
+                uint8_t                         path_mtu;
+                uint8_t                         local_ack_timeout;
+                uint8_t                         seq_err_retry_cnt;
+                uint8_t                         rnr_retry_cnt;
+
+        }       conn;
+
+}       ib_av_attr_t;
+
+

SEE ALSO

+
       ib_gid_t
+
+
+
+ +

[Structures] +Access Layer/ib_bind_wr_t

+ +

[top][parent][index]

+

NAME

+
       ib_bind_wr_t
+
+

DESCRIPTION

+
       Information used to submit a memory window bind work request to the send
+       queue of a queue pair.
+
+

SYNOPSIS

+
typedef struct _ib_bind_wr
+{
+        uint64_t                                wr_id;
+        ib_send_opt_t                   send_opt;
+
+        ib_mr_handle_t                  h_mr;
+        ib_access_t                             access_ctrl;
+        net32_t                                 current_rkey;
+
+        ib_local_ds_t                   local_ds;
+
+}       ib_bind_wr_t;
+
+

FIELDS

+
       wr_id
+               A 64-bit work request identifier that is returned to the consumer
+               as part of the work completion.
+
+       send_opt
+               Optional send control parameters.
+
+       h_mr
+               Handle to the memory region to which this window is being bound.
+
+       access_ctrl
+               Access rights for this memory window.
+
+       current_rkey
+               The current rkey assigned to this window for remote access.
+
+       local_ds
+               A reference to a local data segment used by the bind operation.
+
+

SEE ALSO

+
       ib_send_opt_t, ib_access_t, ib_local_ds_t
+
+
+
+ +

[Structures] +Access Layer/ib_ca_attr_t

+ +

[top][parent][index]

+

NAME

+
       ib_ca_attr_t
+
+

DESCRIPTION

+
       Information about a channel adapter.
+
+

SYNOPSIS

+
typedef struct _ib_ca_attr
+{
+        ib_net64_t                              ca_guid;
+
+        uint32_t                                vend_id;
+        uint16_t                                dev_id;
+        uint16_t                                revision;
+        uint64_t                                fw_ver;
+
+        /*
+         * Total size of the ca attributes in bytes
+         */
+        uint32_t                                size;
+        uint32_t                                max_qps;
+        uint32_t                                max_wrs;
+
+        uint32_t                                max_sges;
+        uint32_t                                max_rd_sges;
+
+        uint32_t                                max_cqs;
+        uint32_t                                max_cqes;
+
+        uint32_t                                max_pds;
+
+        uint32_t                                init_regions;
+        uint64_t                                init_region_size;
+
+        uint32_t                                init_windows;
+        uint32_t                                max_addr_handles;
+
+        uint32_t                                max_partitions;
+
+        ib_atomic_t                             atomicity;
+
+        uint8_t                                 max_qp_resp_res;
+        uint8_t                                 max_resp_res;
+
+        uint8_t                                 max_qp_init_depth;
+
+        uint32_t                                max_ipv6_qps;
+        uint32_t                                max_ether_qps;
+
+        uint32_t                                max_mcast_grps;
+        uint32_t                                max_mcast_qps;
+        uint32_t                                max_qps_per_mcast_grp;
+        uint32_t                                max_fmr;
+        uint32_t                                max_map_per_fmr;
+        uint32_t                                max_srq;
+        uint32_t                                max_srq_wrs;
+        uint32_t                                max_srq_sges;
+
+        /*
+         * local_ack_delay:
+         * Specifies the maximum time interval between the local CA receiving
+         * a message and the transmission of the associated ACK or NAK.
+         *
+         * timeout = 4.096 microseconds * 2^local_ack_delay
+         */
+        uint8_t                                 local_ack_delay;
+
+        boolean_t                               bad_pkey_ctr_support;
+        boolean_t                               bad_qkey_ctr_support;
+        boolean_t                               raw_mcast_support;
+        boolean_t                               apm_support;
+        boolean_t                               av_port_check;
+        boolean_t                               change_primary_port;
+        boolean_t                               modify_wr_depth;
+        boolean_t                               modify_srq_depth;
+        boolean_t                               current_qp_state_support;
+        boolean_t                               shutdown_port_capability;
+        boolean_t                               init_type_support;
+        boolean_t                               port_active_event_support;
+        boolean_t                               system_image_guid_support;
+        boolean_t                               hw_agents;
+
+        ib_net64_t                              system_image_guid;
+
+        uint32_t                                num_page_sizes;
+        uint8_t                                 num_ports;
+
+        uint32_t* __ptr64               p_page_size;
+        ib_port_attr_t* __ptr64 p_port_attr;
+
+}       ib_ca_attr_t;
+
+

FIELDS

+
       ca_guid
+               GUID for this adapter.
+
+       vend_id
+               IEEE vendor ID for this adapter
+
+       dev_id
+               Device ID of this adapter. (typically from PCI device ID)
+
+       revision
+               Revision ID of this adapter
+
+       fw_ver
+               Device Firmware version.
+
+       size
+               Total size in bytes for the HCA attributes.  This size includes total
+               size required for all the variable members of the structure.  If a
+               vendor requires to pass vendor specific fields beyond this structure,
+               the HCA vendor can choose to report a larger size.  If a vendor is
+               reporting extended vendor specific features, they should also provide
+               appropriate access functions to aid with the required interpretation.
+
+       max_qps
+               Maximum number of QP's supported by this HCA.
+
+       max_wrs
+               Maximum number of work requests supported by this HCA.
+
+       max_sges
+               Maximum number of scatter gather elements supported per work request.
+
+       max_rd_sges
+               Maximum number of scatter gather elements supported for READ work
+               requests for a Reliable Datagram QP.  This value must be zero if RD
+               service is not supported.
+
+       max_cqs
+               Maximum number of Completion Queues supported.
+
+       max_cqes
+               Maximum number of CQ elements supported per CQ.
+
+       max_pds
+               Maximum number of protection domains supported.
+
+       init_regions
+               Initial number of memory regions supported.  These are only informative
+               values.  HCA vendors can extended and grow these limits on demand.
+
+       init_region_size
+               Initial limit on the size of the registered memory region.
+
+       init_windows
+               Initial number of window entries supported.
+
+       max_addr_handles
+               Maximum number of address handles supported.
+
+       max_partitions
+               Maximum number of partitions supported.
+
+       atomicity
+               Indicates level of atomic operations supported by this HCA.
+
+       max_qp_resp_res
+               Maximum limit on number of responder resources for incomming RDMA
+               operations on QPs.
+
+       max_fmr
+               Maximum number of Fast Memory Regions supported.
+
+       max_map_per_fmr
+               Maximum number of mappings, supported by a Fast Memory Region.
+
+       max_srq
+               Maximum number of Shared Receive Queues supported.
+
+       max_srq_wrs
+               Maximum number of work requests supported by this SRQ.
+
+       max_srq_sges
+               Maximum number of scatter gather elements supported per work request on SRQ.
+
+       max_resp_res
+               Maximum number of responder resources per HCA, with this HCA used as
+               the target.
+
+       max_qp_init_depth
+               Maximimum initiator depth per QP for initiating RDMA reads and
+               atomic operations.
+
+       max_ipv6_qps
+       max_ether_qps
+               Maximum number of IPV6 and raw ether QP's supported by this HCA.
+
+       max_mcast_grps
+               Maximum number of multicast groups supported.
+
+       max_mcast_qps
+               Maximum number of QP's that can support multicast operations.
+
+       max_qps_per_mcast_grp
+               Maximum number of multicast QP's per multicast group.
+
+       local_ack_delay
+               Specifies the maximum time interval between the local CA receiving
+               a message and the transmission of the associated ACK or NAK.
+               timeout = 4.096 microseconds * 2^local_ack_delay
+
+       bad_pkey_ctr_support
+       bad_qkey_ctr_support
+               Indicates support for the bad pkey and qkey counters.
+
+       raw_mcast_support
+               Indicates support for raw packet multicast.
+
+       apm_support
+               Indicates support for Automatic Path Migration.
+
+       av_port_check
+               Indicates ability to check port number in address handles.
+
+       change_primary_port
+               Indicates ability to change primary port for a QP during a
+               SQD->RTS transition.
+
+       modify_wr_depth
+               Indicates ability to modify QP depth during a modify QP operation.
+               Check the verb specification for permitted states.
+
+       modify_srq_depth
+               Indicates ability to modify SRQ depth during a modify SRQ operation.
+               Check the verb specification for permitted states.
+
+       current_qp_state_support
+               Indicates ability of the HCA to support the current QP state modifier
+               during a modify QP operation.
+
+       shutdown_port_capability
+               Shutdown port capability support indicator.
+
+       init_type_support
+               Indicates init_type_reply and ability to set init_type is supported.
+
+       port_active_event_support
+               Port active event support indicator.
+
+       system_image_guid_support
+               System image GUID support indicator.
+
+       hw_agents
+               Indicates SMA is implemented in HW.
+
+       system_image_guid
+               Optional system image GUID.  This field is valid only if the
+               system_image_guid_support flag is set.
+
+       num_page_sizes
+               Indicates support for different page sizes supported by the HCA.
+               The variable size array can be obtained from p_page_size.
+
+       num_ports
+               Number of physical ports supported on this HCA.
+
+       p_page_size
+               Array holding different page size supported.
+
+       p_port_attr
+               Array holding port attributes.
+
+

NOTES

+
       This structure contains the attributes of a channel adapter.  Users must
+       call ib_copy_ca_attr to copy the contents of this structure to a new
+       memory region.
+
+

SEE ALSO

+
       ib_port_attr_t, ib_atomic_t, ib_copy_ca_attr
+
+
+
+ +

[Definitions] +Access Layer/ib_ca_mod_t

+ +

[top][parent][index]

+

NAME

+
       ib_ca_mod_t -- Modify port attributes and error counters
+
+

DESCRIPTION

+
       Specifies modifications to the port attributes of a channel adapter.
+
+

SYNOPSIS

+
typedef uint32_t                                                        ib_ca_mod_t;
+#define IB_CA_MOD_IS_CM_SUPPORTED                       0x00000001
+#define IB_CA_MOD_IS_SNMP_SUPPORTED                     0x00000002
+#define IB_CA_MOD_IS_DEV_MGMT_SUPPORTED         0x00000004
+#define IB_CA_MOD_IS_VEND_SUPPORTED                     0x00000008
+#define IB_CA_MOD_IS_SM                                         0x00000010
+#define IB_CA_MOD_IS_SM_DISABLED                        0x00000020
+#define IB_CA_MOD_QKEY_CTR                                      0x00000040
+#define IB_CA_MOD_PKEY_CTR                                      0x00000080
+#define IB_CA_MOD_IS_NOTICE_SUPPORTED           0x00000100
+#define IB_CA_MOD_IS_TRAP_SUPPORTED                     0x00000200
+#define IB_CA_MOD_IS_APM_SUPPORTED                      0x00000400
+#define IB_CA_MOD_IS_SLMAP_SUPPORTED            0x00000800
+#define IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED       0x00001000
+#define IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED       0x00002000
+#define IB_CA_MOD_IS_SYSGUID_SUPPORTED          0x00004000
+#define IB_CA_MOD_IS_DR_NOTICE_SUPPORTED        0x00008000
+#define IB_CA_MOD_IS_BOOT_MGMT_SUPPORTED        0x00010000
+#define IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED      0x00020000
+#define IB_CA_MOD_IS_REINIT_SUPORTED            0x00040000
+#define IB_CA_MOD_IS_LEDINFO_SUPPORTED          0x00080000
+#define IB_CA_MOD_SHUTDOWN_PORT                         0x00100000
+#define IB_CA_MOD_INIT_TYPE_VALUE                       0x00200000
+#define IB_CA_MOD_SYSTEM_IMAGE_GUID                     0x00400000
+#define IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED        0x00800000
+#define IB_CA_MOD_RESERVED_MASK                         0xFF000000
+
+

VALUES

+
       IB_CA_MOD_IS_CM_SUPPORTED
+               Indicates if there is a communication manager accessible through
+               the port.
+
+       IB_CA_MOD_IS_SNMP_SUPPORTED
+               Indicates if there is an SNMP agent accessible through the port.
+
+       IB_CA_MOD_IS_DEV_MGMT_SUPPORTED
+               Indicates if there is a device management agent accessible through
+               the port.
+
+       IB_CA_MOD_IS_VEND_SUPPORTED
+               Indicates if there is a vendor supported agent accessible through
+               the port.
+
+       IB_CA_MOD_IS_SM
+               Indicates if there is a subnet manager accessible through
+               the port.
+
+       IB_CA_MOD_IS_SM_DISABLED
+               Indicates if the port has been disabled for configuration by the subnet
+               manager.
+
+       IB_CA_MOD_QKEY_CTR
+               Used to reset the qkey violation counter associated with the port.
+
+       IB_CA_MOD_PKEY_CTR
+               Used to reset the pkey violation counter associated with the port.
+
+       IB_CA_MOD_IS_NOTICE_SUPPORTED
+               Indicates that this CA supports ability to generate Notices for
+               Port State changes. (only applicable to switches)
+
+       IB_CA_MOD_IS_TRAP_SUPPORTED
+               Indicates that this management port supports ability to generate
+               trap messages. (only applicable to switches)
+
+       IB_CA_MOD_IS_APM_SUPPORTED
+               Indicates that this port is capable of performing Automatic Migration.
+
+       IB_CA_MOD_IS_SLMAP_SUPPORTED
+               Indicates this port supports SLMAP capability.
+
+       IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED
+               Indicates that PKEY is supported in NVRAM
+
+       IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED
+               Indicates that MKEY is supported in NVRAM
+
+       IB_CA_MOD_IS_SYSGUID_SUPPORTED
+               Indicates System Image GUID support.
+
+       IB_CA_MOD_IS_DR_NOTICE_SUPPORTED
+               Indicate support for generating Direct Routed Notices
+
+       IB_CA_MOD_IS_BOOT_MGMT_SUPPORTED
+               Indicates support for Boot Management
+
+       IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED
+               Indicates capability to generate notices for changes to CAPMASK
+
+       IB_CA_MOD_IS_REINIT_SUPORTED
+               Indicates type of node init supported. Refer to Chapter 14 for
+               Initialization actions.
+
+       IB_CA_MOD_IS_LEDINFO_SUPPORTED
+               Indicates support for LED info.
+
+       IB_CA_MOD_SHUTDOWN_PORT
+               Used to modify the port active indicator.
+
+       IB_CA_MOD_INIT_TYPE_VALUE
+               Used to modify the init_type value for the port.
+
+       IB_CA_MOD_SYSTEM_IMAGE_GUID
+               Used to modify the system image GUID for the port.
+
+       IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED
+               Used to modify the system image GUID for the port.
+
+       IB_CA_MOD_RESERVED_MASK
+               Mask of all the reserved bits.  If any of these bits are set
+               ib_modify_ca will return IB_INVALID_PARAMETER.
+
+
+
+ +

[Structures] +Access Layer/ib_ci_op_t

+ +

[top][parent][index]

+

NAME

+
       ib_ci_op_t
+
+

DESCRIPTION

+
       A structure used for vendor specific CA interface communication.
+
+

SYNOPSIS

+
typedef struct _ib_ci_op
+{
+        IN                              uint32_t                                        command;
+        IN                              uint32_t                                        buf_size;
+        IN                              uint32_t                                        buf_info;
+        IN      OUT                     int32_t                                         status;
+                OUT                     uint32_t                                        num_bytes_ret;
+        IN      OUT                     void* __ptr64                           p_buf OPTIONAL;
+
+}       ib_ci_op_t;
+
+

FIELDS

+
       command
+               A command code that is understood by the verbs provider.
+
+       status
+               The completion status from the verbs provider.  This field should be
+               initialize to indicate an error to allow detection and cleanup in
+               case a communication error occurs between user-mode and kernel-mode.
+
+       buf_size
+               The size of the buffer in bytes.
+
+       buf_info
+               Additional buffer information
+
+       p_buf
+               A reference to a buffer containing vendor specific data.  The verbs
+               provider must not access pointers in the p_buf between user-mode and
+               kernel-mode.  Any pointers embedded in the p_buf are invalidated by
+               the user-mode/kernel-mode transition.
+
+       num_bytes_ret
+               The size in bytes of the vendor specific data returned in the buffer.
+               This field is set by the verbs provider.  The verbs provider should
+               verify that the buffer size is sufficient to hold the data being
+               returned.
+
+

NOTES

+
       This structure is provided to allow the exchange of vendor specific
+       data between the originator and the verbs provider.  Users of this
+       structure are expected to know the format of data in the p_buf based
+       on the structure command field or the usage context.
+
+
+
+ +

[Definitions] +Access Layer/ib_cm_cap_mask_t

+ +

[top][parent][index]

+

NAME

+
       ib_cm_cap_mask_t
+
+

DESCRIPTION

+
       Capability mask values in ClassPortInfo.
+
+

SYNOPSIS

+
#define IB_CM_RELIABLE_CONN_CAPABLE                     CL_HTON16(9)
+#define IB_CM_RELIABLE_DGRM_CAPABLE                     CL_HTON16(10)
+#define IB_CM_RDGRM_CAPABLE                                     CL_HTON16(11)
+#define IB_CM_UNRELIABLE_CONN_CAPABLE           CL_HTON16(12)
+#define IB_CM_SIDR_CAPABLE                                      CL_HTON16(13)
+
+

SEE ALSO

+
       ib_cm_rep, ib_class_port_info_t
+
+

SOURCE

+
*
+
+
+
+ +

[Functions] +Access layer/ib_copy_ca_attr

+ +

[top][index]

+

NAME

+
       ib_copy_ca_attr
+
+

DESCRIPTION

+
       Copies CA attributes.
+
+

SYNOPSIS

+
AL_EXPORT ib_ca_attr_t* AL_API
+ib_copy_ca_attr(
+        IN                              ib_ca_attr_t* const             p_dest,
+        IN              const   ib_ca_attr_t* const             p_src );
+
+

PARAMETERS

+
       p_dest
+               Pointer to the buffer that is the destination of the copy.
+
+       p_src
+               Pointer to the CA attributes to copy.
+
+

RETURN VALUE

+
       Pointer to the copied CA attributes.
+
+

NOTES

+
       The buffer pointed to by the p_dest parameter must be at least the size
+       specified in the size field of the buffer pointed to by p_src.
+
+

SEE ALSO

+
       ib_ca_attr_t, ib_dup_ca_attr, ib_free_ca_attr
+
+
+
+ +

[Definitions] +Access Layer/ib_init_type_t

+ +

[top][parent][index]

+

NAME

+
       ib_init_type_t
+
+

DESCRIPTION

+
       If supported by the HCA, the type of initialization requested by
+       this port before SM moves it to the active or armed state.  If the
+       SM implements reinitialization, it shall set these bits to indicate
+       the type of initialization performed prior to activating the port.
+       Otherwise, these bits shall be set to 0.
+
+

SYNOPSIS

+
typedef uint8_t                                 ib_init_type_t;
+#define IB_INIT_TYPE_NO_LOAD                            0x01
+#define IB_INIT_TYPE_PRESERVE_CONTENT           0x02
+#define IB_INIT_TYPE_PRESERVE_PRESENCE          0x04
+#define IB_INIT_TYPE_DO_NOT_RESUSCITATE         0x08
+
+
+
+ +

[Structures] +Access Layer/ib_local_ds_t

+ +

[top][parent][index]

+

NAME

+
       ib_local_ds_t
+
+

DESCRIPTION

+
       Local data segment information referenced by send and receive work
+       requests.  This is used to specify local data buffers used as part of a
+       work request.
+
+

SYNOPSIS

+
typedef struct _ib_local_ds
+{
+        uint64_t                                vaddr;
+        uint32_t                                length;
+        uint32_t                                lkey;
+
+}       ib_local_ds_t;
+
+
+
+ +

[Structures] +Access Layer/ib_mr_attr_t

+ +

[top][parent][index]

+

NAME

+
       ib_mr_attr_t
+
+

DESCRIPTION

+
       Attributes of a registered memory region.
+
+

SYNOPSIS

+
typedef struct _ib_mr_attr
+{
+        ib_pd_handle_t                  h_pd;
+        uint64_t                                local_lb;
+        uint64_t                                local_ub;
+        uint64_t                                remote_lb;
+        uint64_t                                remote_ub;
+        ib_access_t                             access_ctrl;
+        net32_t                                 lkey;
+        net32_t                                 rkey;
+
+}       ib_mr_attr_t;
+
+

DESCRIPTION

+
       h_pd
+               Handle to the protection domain for this memory region.
+
+       local_lb
+               The virtual address of the lower bound of protection for local
+               memory access.  This is always a 64-bit quantity to support registering
+               more than 4GB of memory on 32-bit systems with PAE.
+
+       local_ub
+               The virtual address of the upper bound of protection for local
+               memory access.  This is always a 64-bit quantity to support registering
+               more than 4GB of memory on 32-bit systems with PAE.
+
+       remote_lb
+               The virtual address of the lower bound of protection for remote
+               memory access.  This is always a 64-bit quantity to support registering
+               more than 4GB of memory on 32-bit systems with PAE.
+
+       remote_ub
+               The virtual address of the upper bound of protection for remote
+               memory access.  This is always a 64-bit quantity to support registering
+               more than 4GB of memory on 32-bit systems with PAE.
+
+       access_ctrl
+               Access rights for the specified memory region.
+
+       lkey
+               The lkey associated with this memory region.
+
+       rkey
+               The rkey associated with this memory region.
+
+

NOTES

+
       The remote_lb, remote_ub, and rkey are only valid if remote memory access
+       is enabled for this memory region.
+
+

SEE ALSO

+
       ib_access_t
+
+
+
+ +

[Structures] +Access Layer/ib_mr_create_t

+ +

[top][parent][index]

+

NAME

+
       ib_mr_create_t
+
+

DESCRIPTION

+
       Information required to create a registered memory region.
+
+

SYNOPSIS

+
typedef struct _ib_mr_create
+{
+        void* __ptr64                   vaddr;
+        uint64_t                                length;
+        ib_access_t                             access_ctrl;
+
+}       ib_mr_create_t;
+
+

FIELDS

+
       vaddr
+               Starting virtual address of the region being registered.
+
+       length
+               Length of the buffer to register.
+
+       access_ctrl
+               Access rights of the registered region.
+
+

SEE ALSO

+
       ib_access_t
+
+
+
+ +

[Definitions] +Access Layer/ib_mr_mod_t

+ +

[top][parent][index]

+

NAME

+
       ib_mr_mod_t
+
+

DESCRIPTION

+
       Mask used to specify which attributes of a registered memory region are
+       being modified.
+
+

SYNOPSIS

+
typedef uint32_t                                                ib_mr_mod_t;
+#define IB_MR_MOD_ADDR                                  0x00000001
+#define IB_MR_MOD_PD                                    0x00000002
+#define IB_MR_MOD_ACCESS                                0x00000004
+
+

PARAMETERS

+
       IB_MEM_MOD_ADDR
+               The address of the memory region is being modified.
+
+       IB_MEM_MOD_PD
+               The protection domain associated with the memory region is being
+               modified.
+
+       IB_MEM_MOD_ACCESS
+               The access rights the memory region are being modified.
+
+
+
+ +

[Definitions] +Access Layer/ib_pd_type_t

+ +

[top][parent][index]

+

NAME

+
       ib_pd_type_t
+
+

DESCRIPTION

+
       Indicates the type of protection domain being allocated.
+
+

SYNOPSIS

+
typedef enum _ib_pd_type
+{
+        IB_PDT_NORMAL,
+        IB_PDT_ALIAS,
+        IB_PDT_SQP,
+        IB_PDT_UD
+
+}       ib_pd_type_t;
+
+

VALUES

+
       IB_PDT_NORMAL
+               Protection domain for all non-aliased QPs.
+
+       IB_PDT_ALIAS
+               Protection domain for IB_QPT_QP0_ALIAS and IB_QPT_QP1_ALIAS QPs.
+
+       IB_PDT_SQP
+               Protection domain for special queue pair usage.
+
+       IB_PDT_UD
+               Protection domain for UD queue pair usage.
+
+
+
+ +

[Structures] +Access Layer/ib_phys_create_t

+ +

[top][parent][index]

+

NAME

+
       ib_phys_create_t
+
+

DESCRIPTION

+
       Information required to create a physical memory region.
+
+

SYNOPSIS

+
typedef struct _ib_phys_create
+{
+        uint64_t                                        length;
+        uint32_t                                        num_ranges;
+        ib_phys_range_t* __ptr64        range_array;
+        uint32_t                                        buf_offset;
+        uint32_t                                        hca_page_size;
+        ib_access_t                                     access_ctrl;
+
+}       ib_phys_create_t;
+
+

FIELDS

+
       length
+               The length of the memory region in bytes.
+
+       num_ranges
+               Number of ib_phys_range structures listed in the specified range array.
+
+       range_array
+               An array of ib_phys_range structures to be registered as a single memory
+               region.
+
+       buf_offset
+               The offset into the first physical memory range of the specified memory
+               region on which to start the virtual address.
+
+       hca_page_size
+               The HCA page size to use to register the memory.
+
+       access_ctrl
+               Access rights of the registered region.
+
+

SEE ALSO

+
       ib_access_t
+
+
+
+ +

[Structures] +Access Layer/ib_phys_range_t

+ +

[top][parent][index]

+

NAME

+
       ib_phys_range_t
+
+

DESCRIPTION

+
       Information describing a physical memory range.
+
+

SYNOPSIS

+
typedef struct _ib_phys_range
+{
+        uint64_t                                base_addr;
+        uint64_t                                size;
+
+}       ib_phys_range_t;
+
+

FIELDS

+
       base_addr
+               Physical address of the base of the memory range.
+
+       size
+               size, in bytes, of the memory range.
+
+

NOTES

+
       The base address must be start and end on an HCA-supported page boundary.
+
+

SEE ALSO

+
       ib_phys_create_t
+
+
+
+ +

[Structures] +Access Layer/ib_port_attr_mod_t

+ +

[top][parent][index]

+

NAME

+
       ib_port_attr_mod_t
+
+

DESCRIPTION

+
       Port attributes that may be modified.
+
+

SYNOPSIS

+
typedef struct _ib_port_attr_mod
+{
+        ib_port_cap_t                   cap;
+        uint16_t                                pkey_ctr;
+        uint16_t                                qkey_ctr;
+
+        ib_init_type_t                  init_type;
+        ib_net64_t                              system_image_guid;
+
+}       ib_port_attr_mod_t;
+
+

SEE ALSO

+
       ib_port_cap_t
+
+
+
+ +

[Structures] +Access Layer/ib_port_attr_t

+ +

[top][parent][index]

+

NAME

+
       ib_port_attr_t
+
+

DESCRIPTION

+
       Information about a port on a given channel adapter.
+
+

SYNOPSIS

+
typedef struct _ib_port_attr
+{
+        ib_net64_t                              port_guid;
+        uint8_t                                 port_num;
+        uint8_t                                 mtu;
+        uint64_t                                max_msg_size;
+        ib_net16_t                              lid;
+        uint8_t                                 lmc;
+
+        /*
+         * LinkWidthSupported as defined in PortInfo.  Required to calculate
+         * inter-packet delay (a.k.a. static rate).
+         */
+        uint8_t                                 link_width_supported;
+
+        uint16_t                                max_vls;
+
+        ib_net16_t                              sm_lid;
+        uint8_t                                 sm_sl;
+        uint8_t                                 link_state;
+
+        ib_init_type_t                  init_type_reply;        /* Optional */
+
+        /*
+         * subnet_timeout:
+         * The maximum expected subnet propagation delay to reach any port on
+         * the subnet.  This value also determines the rate at which traps can
+         * be generated from this node.
+         *
+         * timeout = 4.096 microseconds * 2^subnet_timeout
+         */
+        uint8_t                                 subnet_timeout;
+
+        ib_port_cap_t                   cap;
+        uint16_t                                pkey_ctr;
+        uint16_t                                qkey_ctr;
+
+        uint16_t                                num_gids;
+        uint16_t                                num_pkeys;
+        /*
+         * Pointers at the end of the structure to allow doing a simple
+         * memory comparison of contents up to the first pointer.
+         */
+        ib_gid_t* __ptr64               p_gid_table;
+        ib_net16_t* __ptr64             p_pkey_table;
+
+}       ib_port_attr_t;
+
+

SEE ALSO

+
       uint8_t, ib_port_cap_t, ib_link_states_t
+
+
+
+ +

[Structures] +Access Layer/ib_port_cap_t

+ +

[top][parent][index]

+

NAME

+
       ib_port_cap_t
+
+

DESCRIPTION

+
       Indicates which management agents are currently available on the specified
+       port.
+
+

SYNOPSIS

+
typedef struct _ib_port_cap
+{
+        boolean_t               cm;
+        boolean_t               snmp;
+        boolean_t               dev_mgmt;
+        boolean_t               vend;
+        boolean_t               sm;
+        boolean_t               sm_disable;
+        boolean_t               qkey_ctr;
+        boolean_t               pkey_ctr;
+        boolean_t               notice;
+        boolean_t               trap;
+        boolean_t               apm;
+        boolean_t               slmap;
+        boolean_t               pkey_nvram;
+        boolean_t               mkey_nvram;
+        boolean_t               sysguid;
+        boolean_t               dr_notice;
+        boolean_t               boot_mgmt;
+        boolean_t               capm_notice;
+        boolean_t               reinit;
+        boolean_t               ledinfo;
+        boolean_t               port_active;
+        boolean_t               ipd;
+        boolean_t               pkey_switch_ext_port;
+        boolean_t               bm;
+        boolean_t               link_rtl;
+        boolean_t               client_reregister;
+
+}       ib_port_cap_t;
+
+
+
+ +

[Structures] +Access Layer/ib_qp_attr_t

+ +

[top][parent][index]

+

NAME

+
       ib_qp_attr_t
+
+

DESCRIPTION

+
       Queue pair attributes returned through ib_query_qp.
+
+

SYNOPSIS

+
typedef struct _ib_qp_attr
+{
+        ib_pd_handle_t                  h_pd;
+        ib_qp_type_t                    qp_type;
+        ib_access_t                             access_ctrl;
+        uint16_t                                pkey_index;
+
+        uint32_t                                sq_max_inline;
+        uint32_t                                sq_depth;
+        uint32_t                                rq_depth;
+        uint32_t                                sq_sge;
+        uint32_t                                rq_sge;
+        uint8_t                                 init_depth;
+        uint8_t                                 resp_res;
+
+        ib_cq_handle_t                  h_sq_cq;
+        ib_cq_handle_t                  h_rq_cq;
+        ib_srq_handle_t                 h_srq;
+
+        boolean_t                               sq_signaled;
+
+        ib_qp_state_t                   state;
+        ib_net32_t                              num;
+        ib_net32_t                              dest_num;
+        ib_net32_t                              qkey;
+
+        ib_net32_t                              sq_psn;
+        ib_net32_t                              rq_psn;
+
+        uint8_t                                 primary_port;
+        uint8_t                                 alternate_port;
+        ib_av_attr_t                    primary_av;
+        ib_av_attr_t                    alternate_av;
+        ib_apm_state_t                  apm_state;
+
+}       ib_qp_attr_t;
+
+

FIELDS

+
       h_pd
+               This is a handle to a protection domain associated with the QP.
+
+       sq_max_inline
+               Maximum payload that can be inlined directly in a WQE, eliminating
+               protection checks and additional DMA operations.
+
+

NOTES

+
       Other fields are defined by the Infiniband specification.
+
+

SEE ALSO

+
       ib_qp_type_t, ib_access_t, ib_qp_state_t, ib_av_attr_t, ib_apm_state_t
+
+
+
+ +

[Structures] +Access Layer/ib_qp_create_t

+ +

[top][parent][index]

+

NAME

+
       ib_qp_create_t
+
+

DESCRIPTION

+
       Attributes used to initialize a queue pair at creation time.
+
+

SYNOPSIS

+
typedef struct _ib_qp_create
+{
+        ib_qp_type_t                    qp_type;
+
+        uint32_t                                sq_depth;
+        uint32_t                                rq_depth;
+        uint32_t                                sq_sge;
+        uint32_t                                rq_sge;
+
+        ib_cq_handle_t                  h_sq_cq;
+        ib_cq_handle_t                  h_rq_cq;
+        ib_srq_handle_t                 h_srq;
+
+        boolean_t                               sq_signaled;
+
+}       ib_qp_create_t;
+
+

FIELDS

+
       type
+               Specifies the type of queue pair to create.
+
+       sq_depth
+               Indicates the requested maximum number of work requests that may be
+               outstanding on the queue pair's send queue.  This value must be less
+               than or equal to the maximum reported by the channel adapter associated
+               with the queue pair.
+
+       rq_depth
+               Indicates the requested maximum number of work requests that may be
+               outstanding on the queue pair's receive queue.  This value must be less
+               than or equal to the maximum reported by the channel adapter associated
+               with the queue pair.
+
+       sq_sge
+               Indicates the maximum number scatter-gather elements that may be
+               given in a send work request.  This value must be less
+               than or equal to the maximum reported by the channel adapter associated
+               with the queue pair.
+
+       rq_sge
+               Indicates the maximum number scatter-gather elements that may be
+               given in a receive work request.  This value must be less
+               than or equal to the maximum reported by the channel adapter associated
+               with the queue pair.
+
+       h_sq_cq
+               A handle to the completion queue that will be used to report send work
+               request completions.  This handle must be NULL if the type is
+               IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS.
+
+       h_rq_cq
+               A handle to the completion queue that will be used to report receive
+               work request completions.  This handle must be NULL if the type is
+               IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS.
+
+       h_srq
+               A handle to an SRQ to get receive completions via. Must be coded NULL 
+               when QP is not associated with SRQ
+
+       sq_signaled
+               A flag that is used to indicate whether the queue pair will signal
+               an event upon completion of a send work request.  If set to
+               TRUE, send work requests will always generate a completion
+               event.  If set to FALSE, a completion event will only be
+               generated if the send_opt field of the send work request has the
+               IB_SEND_OPT_SIGNALED flag set.
+
+

SEE ALSO

+
       ib_qp_type_t, ib_qp_attr_t
+
+
+
+ +

[Structures] +Access Layer/ib_qp_mod_t

+ +

[top][parent][index]

+

NAME

+
       ib_qp_mod_t
+
+

DESCRIPTION

+
       Information needed to change the state of a queue pair through the
+       ib_modify_qp call.
+
+

SYNOPSIS

+
typedef struct _ib_qp_mod
+{
+        ib_qp_state_t                           req_state;
+
+        union _qp_state
+        {
+                struct _qp_init
+                {
+                        uint8_t                         primary_port;
+                        ib_net32_t                      qkey;
+                        uint16_t                        pkey_index;
+                        ib_access_t                     access_ctrl;
+
+                }       init;
+
+                struct _qp_rtr
+                {
+                        ib_net32_t                      rq_psn;
+                        ib_net32_t                      dest_qp;
+                        ib_av_attr_t            primary_av;
+                        uint8_t                         resp_res;
+                        uint8_t                         rnr_nak_timeout;
+
+                        ib_qp_opts_t            opts;
+                        ib_av_attr_t            alternate_av;
+                        ib_net32_t                      qkey;
+                        uint16_t                        pkey_index;
+                        ib_access_t                     access_ctrl;
+                        uint32_t                        sq_depth;
+                        uint32_t                        rq_depth;
+
+                }       rtr;
+
+                struct _qp_rts
+                {
+                        ib_net32_t                      sq_psn;
+                        uint8_t                         retry_cnt;
+                        uint8_t                         rnr_retry_cnt;
+                        uint8_t                         local_ack_timeout;
+                        uint8_t                         init_depth;
+
+                        ib_qp_opts_t            opts;
+                        uint8_t                         rnr_nak_timeout;
+                        ib_qp_state_t           current_state;
+                        ib_net32_t                      qkey;
+                        ib_access_t                     access_ctrl;
+                        uint8_t                         resp_res;
+
+                        ib_av_attr_t            primary_av;
+                        ib_av_attr_t            alternate_av;
+
+                        uint32_t                        sq_depth;
+                        uint32_t                        rq_depth;
+
+                        ib_apm_state_t          apm_state;
+                        uint8_t                         primary_port;
+                        uint16_t                        pkey_index;
+
+                }       rts;
+
+                struct _qp_sqd
+                {
+                        boolean_t                       sqd_event;
+
+                }       sqd;
+
+        }       state;
+
+}       ib_qp_mod_t;
+
+

SEE ALSO

+
       ib_qp_state_t, ib_access_t, ib_av_attr_t, ib_apm_state_t
+
+
+
+ +

[Definitions] +Access Layer/ib_qp_opts_t

+ +

[top][parent][index]

+

NAME

+
       ib_qp_opts_t
+
+

DESCRIPTION

+
       Optional fields supplied in the modify QP operation.
+
+

SYNOPSIS

+
typedef uint32_t                                ib_qp_opts_t;
+#define IB_MOD_QP_ALTERNATE_AV          0x00000001
+#define IB_MOD_QP_PKEY                          0x00000002
+#define IB_MOD_QP_APM_STATE                     0x00000004
+#define IB_MOD_QP_PRIMARY_AV            0x00000008
+#define IB_MOD_QP_RNR_NAK_TIMEOUT       0x00000010
+#define IB_MOD_QP_RESP_RES                      0x00000020
+#define IB_MOD_QP_INIT_DEPTH            0x00000040
+#define IB_MOD_QP_PRIMARY_PORT          0x00000080
+#define IB_MOD_QP_ACCESS_CTRL           0x00000100
+#define IB_MOD_QP_QKEY                          0x00000200
+#define IB_MOD_QP_SQ_DEPTH                      0x00000400
+#define IB_MOD_QP_RQ_DEPTH                      0x00000800
+#define IB_MOD_QP_CURRENT_STATE         0x00001000
+#define IB_MOD_QP_RETRY_CNT                     0x00002000
+#define IB_MOD_QP_LOCAL_ACK_TIMEOUT     0x00004000
+#define IB_MOD_QP_RNR_RETRY_CNT         0x00008000
+
+

SEE ALSO

+
       ib_qp_mod_t
+
+
+
+ +

[Definitions] +Access Layer/ib_qp_state_t

+ +

[top][parent][index]

+

NAME

+
       ib_qp_state_t
+
+

DESCRIPTION

+
       Indicates or sets the state of a queue pair.  The current state of a queue
+       pair is returned through the ib_qp_query call and set via the
+       ib_qp_modify call.
+
+

SYNOPSIS

+
typedef uint32_t                                ib_qp_state_t;
+#define IB_QPS_RESET                    0x00000001
+#define IB_QPS_INIT                             0x00000002
+#define IB_QPS_RTR                              0x00000004
+#define IB_QPS_RTS                              0x00000008
+#define IB_QPS_SQD                              0x00000010
+#define IB_QPS_SQD_DRAINING             0x00000030
+#define IB_QPS_SQD_DRAINED              0x00000050
+#define IB_QPS_SQERR                    0x00000080
+#define IB_QPS_ERROR                    0x00000100
+#define IB_QPS_TIME_WAIT                0xDEAD0000      /* InfiniBand Access Layer */
+
+
+
+ +

[Definitions] +Access Layer/ib_qp_type_t

+ +

[top][parent][index]

+

NAME

+
       ib_qp_type_t
+
+

DESCRIPTION

+
       Indicates the type of queue pair being created.
+
+

SYNOPSIS

+
typedef enum _ib_qp_type
+{
+        IB_QPT_RELIABLE_CONN    = 0,            /* Matches CM REQ transport type */
+        IB_QPT_UNRELIABLE_CONN  = 1,            /* Matches CM REQ transport type */
+        IB_QPT_UNRELIABLE_DGRM  = 3,            /* Purposefully skip RDD type. */
+        IB_QPT_QP0,
+        IB_QPT_QP1,
+        IB_QPT_RAW_IPV6,
+        IB_QPT_RAW_ETHER,
+        IB_QPT_MAD,                                                             /* InfiniBand Access Layer */
+        IB_QPT_QP0_ALIAS,                                               /* InfiniBand Access Layer */
+        IB_QPT_QP1_ALIAS,                                               /* InfiniBand Access Layer */
+        IB_QPT_UNKNOWN
+}       ib_qp_type_t;
+
+

VALUES

+
       IB_QPT_RELIABLE_CONN
+               Reliable, connected queue pair.
+
+       IB_QPT_UNRELIABLE_CONN
+               Unreliable, connected queue pair.
+
+       IB_QPT_UNRELIABLE_DGRM
+               Unreliable, datagram queue pair.
+
+       IB_QPT_QP0
+               Queue pair 0.
+
+       IB_QPT_QP1
+               Queue pair 1.
+
+       IB_QPT_RAW_DGRM
+               Raw datagram queue pair.
+
+       IB_QPT_RAW_IPV6
+               Raw IP version 6 queue pair.
+
+       IB_QPT_RAW_ETHER
+               Raw Ethernet queue pair.
+
+       IB_QPT_MAD
+               Unreliable, datagram queue pair that will send and receive management
+               datagrams with assistance from the access layer.
+
+       IB_QPT_QP0_ALIAS
+               Alias to queue pair 0.  Aliased QPs can only be created on an aliased
+               protection domain.
+
+       IB_QPT_QP1_ALIAS
+               Alias to queue pair 1.  Aliased QPs can only be created on an aliased
+               protection domain.
+
+
+
+ +

[Definitions] +Access Layer/ib_recv_opt_t

+ +

[top][parent][index]

+

NAME

+
       ib_recv_opt_t
+
+

DESCRIPTION

+
       Indicates optional fields valid in a receive work completion.
+
+

SYNOPSIS

+
typedef uint32_t                                        ib_recv_opt_t;
+#define IB_RECV_OPT_IMMEDIATE           0x00000001
+#define IB_RECV_OPT_FORWARD                     0x00000002
+#define IB_RECV_OPT_GRH_VALID           0x00000004
+#define IB_RECV_OPT_VEND_MASK           0xFFFF0000
+
+

VALUES

+
       IB_RECV_OPT_IMMEDIATE
+               Indicates that immediate data is valid for this work completion.
+
+       IB_RECV_OPT_FORWARD
+               Indicates that the received trap should be forwarded to the SM.
+
+       IB_RECV_OPT_GRH_VALID
+               Indicates presence of the global route header. When set, the first
+               40 bytes received are the GRH.
+
+       IB_RECV_OPT_VEND_MASK
+               This mask indicates bits reserved in the receive options that may be
+               used by the verbs provider to indicate vendor specific options.  Bits
+               set in this area of the receive options are ignored by the Access Layer,
+               but may have specific meaning to the underlying VPD.
+
+
+
+ +

[Structures] +Access Layer/ib_recv_wr_t

+ +

[top][parent][index]

+

NAME

+
       ib_recv_wr_t
+
+

DESCRIPTION

+
       Information used to submit a work request to the receive queue of a queue
+       pair.
+
+

SYNOPSIS

+
typedef struct _ib_recv_wr
+{
+        struct _ib_recv_wr* __ptr64     p_next;
+        uint64_t                                        wr_id;
+        uint32_t                                        num_ds;
+        ib_local_ds_t* __ptr64          ds_array;
+
+}       ib_recv_wr_t;
+
+

FIELDS

+
       p_next
+               A pointer used to chain work requests together.  This permits multiple
+               work requests to be posted to a queue pair through a single function
+               call.  This value is set to NULL to mark the end of the chain.
+
+       wr_id
+               A 64-bit work request identifier that is returned to the consumer
+               as part of the work completion.
+
+       num_ds
+               Number of local data segments specified by this work request.
+
+       ds_array
+               A reference to an array of local data segments used by the send
+               operation.
+
+

SEE ALSO

+
       ib_local_ds_t
+
+
+
+ +

[Definitions] +Access Layer/ib_rej_status_t

+ +

[top][parent][index]

+

NAME

+
       ib_rej_status_t
+
+

DESCRIPTION

+
       Rejection reasons.
+
+

SYNOPSIS

+
typedef ib_net16_t                                                      ib_rej_status_t;
+
+

SEE ALSO

+
       ib_cm_rej, ib_cm_rej_rec_t
+
+

SOURCE

+
#define IB_REJ_INSUF_QP                                         CL_HTON16(1)
+#define IB_REJ_INSUF_EEC                                        CL_HTON16(2)
+#define IB_REJ_INSUF_RESOURCES                          CL_HTON16(3)
+#define IB_REJ_TIMEOUT                                          CL_HTON16(4)
+#define IB_REJ_UNSUPPORTED                                      CL_HTON16(5)
+#define IB_REJ_INVALID_COMM_ID                          CL_HTON16(6)
+#define IB_REJ_INVALID_COMM_INSTANCE            CL_HTON16(7)
+#define IB_REJ_INVALID_SID                                      CL_HTON16(8)
+#define IB_REJ_INVALID_XPORT                            CL_HTON16(9)
+#define IB_REJ_STALE_CONN                                       CL_HTON16(10)
+#define IB_REJ_RDC_NOT_EXIST                            CL_HTON16(11)
+#define IB_REJ_INVALID_GID                                      CL_HTON16(12)
+#define IB_REJ_INVALID_LID                                      CL_HTON16(13)
+#define IB_REJ_INVALID_SL                                       CL_HTON16(14)
+#define IB_REJ_INVALID_TRAFFIC_CLASS            CL_HTON16(15)
+#define IB_REJ_INVALID_HOP_LIMIT                        CL_HTON16(16)
+#define IB_REJ_INVALID_PKT_RATE                         CL_HTON16(17)
+#define IB_REJ_INVALID_ALT_GID                          CL_HTON16(18)
+#define IB_REJ_INVALID_ALT_LID                          CL_HTON16(19)
+#define IB_REJ_INVALID_ALT_SL                           CL_HTON16(20)
+#define IB_REJ_INVALID_ALT_TRAFFIC_CLASS        CL_HTON16(21)
+#define IB_REJ_INVALID_ALT_HOP_LIMIT            CL_HTON16(22)
+#define IB_REJ_INVALID_ALT_PKT_RATE                     CL_HTON16(23)
+#define IB_REJ_PORT_REDIRECT                            CL_HTON16(24)
+#define IB_REJ_INVALID_MTU                                      CL_HTON16(26)
+#define IB_REJ_INSUFFICIENT_RESP_RES            CL_HTON16(27)
+#define IB_REJ_USER_DEFINED                                     CL_HTON16(28)
+#define IB_REJ_INVALID_RNR_RETRY                        CL_HTON16(29)
+#define IB_REJ_DUPLICATE_LOCAL_COMM_ID          CL_HTON16(30)
+#define IB_REJ_INVALID_CLASS_VER                        CL_HTON16(31)
+#define IB_REJ_INVALID_FLOW_LBL                         CL_HTON16(32)
+#define IB_REJ_INVALID_ALT_FLOW_LBL                     CL_HTON16(33)
+
+
+
+ +

[Definitions] +Access Layer/ib_send_opt_t

+ +

[top][parent][index]

+

NAME

+
       ib_send_opt_t
+
+

DESCRIPTION

+
       Optional flags used when posting send work requests.  These flags
+       indicate specific processing for the send operation.
+
+

SYNOPSIS

+
typedef uint32_t                                        ib_send_opt_t;
+#define IB_SEND_OPT_IMMEDIATE           0x00000001
+#define IB_SEND_OPT_FENCE                       0x00000002
+#define IB_SEND_OPT_SIGNALED            0x00000004
+#define IB_SEND_OPT_SOLICITED           0x00000008
+#define IB_SEND_OPT_INLINE                      0x00000010
+#define IB_SEND_OPT_LOCAL                       0x00000020
+#define IB_SEND_OPT_VEND_MASK           0xFFFF0000
+
+

VALUES

+
       The following flags determine the behavior of a work request when
+       posted to the send side.
+
+       IB_SEND_OPT_IMMEDIATE
+               Send immediate data with the given request.
+
+       IB_SEND_OPT_FENCE
+               The operation is fenced.  Complete all pending send operations before
+               processing this request.
+
+       IB_SEND_OPT_SIGNALED
+               If the queue pair is configured for signaled completion, then
+               generate a completion queue entry when this request completes.
+
+       IB_SEND_OPT_SOLICITED
+               Set the solicited bit on the last packet of this request.
+
+       IB_SEND_OPT_INLINE
+               Indicates that the requested send data should be copied into a VPD
+               owned data buffer.  This flag permits the user to issue send operations
+               without first needing to register the buffer(s) associated with the
+               send operation.  Verb providers that support this operation may place
+               vendor specific restrictions on the size of send operation that may
+               be performed as inline.
+
+       IB_SEND_OPT_LOCAL
+               Indicates that a sent MAD request should be given to the local VPD for
+               processing.  MADs sent using this option are not placed on the wire.
+               This send option is only valid for MAD send operations.
+
+       IB_SEND_OPT_VEND_MASK
+               This mask indicates bits reserved in the send options that may be used
+               by the verbs provider to indicate vendor specific options.  Bits set
+               in this area of the send options are ignored by the Access Layer, but
+               may have specific meaning to the underlying VPD.
+
+
+
+ +

[Structures] +Access Layer/ib_send_wr_t

+ +

[top][parent][index]

+

NAME

+
       ib_send_wr_t
+
+

DESCRIPTION

+
       Information used to submit a work request to the send queue of a queue
+       pair.
+
+

SYNOPSIS

+
typedef struct _ib_send_wr
+{
+        struct _ib_send_wr* __ptr64     p_next;
+        uint64_t                                        wr_id;
+        ib_wr_type_t                            wr_type;
+        ib_send_opt_t                           send_opt;
+        uint32_t                                        num_ds;
+        ib_local_ds_t* __ptr64          ds_array;
+        ib_net32_t                                      immediate_data;
+
+        union _send_dgrm
+        {
+                struct _send_ud
+                {
+                        ib_net32_t              remote_qp;
+                        ib_net32_t              remote_qkey;
+                        ib_av_handle_t  h_av;
+                        uint16_t                pkey_index;
+                        void* __ptr64   rsvd;
+
+                }       ud;
+
+                struct _send_raw_ether
+                {
+                        ib_net16_t              dest_lid;
+                        uint8_t                 path_bits;
+                        uint8_t                 sl;
+                        uint8_t                 max_static_rate;
+                        ib_net16_t              ether_type;
+
+                }       raw_ether;
+
+                struct _send_raw_ipv6
+                {
+                        ib_net16_t              dest_lid;
+                        uint8_t                 path_bits;
+                        uint8_t                 sl;
+                        uint8_t                 max_static_rate;
+
+                }       raw_ipv6;
+
+        }       dgrm;
+
+        struct _send_remote_ops
+        {
+                uint64_t                        vaddr;
+                net32_t                         rkey;
+
+                ib_net64_t                      atomic1;
+                ib_net64_t                      atomic2;
+
+        }       remote_ops;
+
+}       ib_send_wr_t;
+
+

FIELDS

+
       p_next
+               A pointer used to chain work requests together.  This permits multiple
+               work requests to be posted to a queue pair through a single function
+               call.  This value is set to NULL to mark the end of the chain.
+
+       wr_id
+               A 64-bit work request identifier that is returned to the consumer
+               as part of the work completion.
+
+       wr_type
+               The type of work request being submitted to the send queue.
+
+       send_opt
+               Optional send control parameters.
+
+       num_ds
+               Number of local data segments specified by this work request.
+
+       ds_array
+               A reference to an array of local data segments used by the send
+               operation.
+
+       immediate_data
+               32-bit field sent as part of a message send or RDMA write operation.
+               This field is only valid if the send_opt flag IB_SEND_OPT_IMMEDIATE
+               has been set.
+
+       dgrm.ud.remote_qp
+               Identifies the destination queue pair of an unreliable datagram send
+               operation.
+
+       dgrm.ud.remote_qkey
+               The qkey for the destination queue pair.
+
+       dgrm.ud.h_av
+               An address vector that specifies the path information used to route
+               the outbound datagram to the destination queue pair.
+
+       dgrm.ud.pkey_index
+               The pkey index for this send work request.  This is valid only
+               for IB_QPT_QP1 and IB_QPT_QP1_ALIAS QP types.  The work request
+               is posted to using this pkey index build the GMP's BTH instead
+               of the QP's pkey.
+
+       dgrm.ud.rsvd
+               Reserved for use by the Access Layer.
+
+       dgrm.raw_ether.dest_lid
+               The destination LID that will receive this raw ether send.
+
+       dgrm.raw_ether.path_bits
+               path bits...
+
+       dgrm.raw_ether.sl
+               service level...
+
+       dgrm.raw_ether.max_static_rate
+               static rate...
+
+       dgrm.raw_ether.ether_type
+               ether type...
+
+       dgrm.raw_ipv6.dest_lid
+               The destination LID that will receive this raw ether send.
+
+       dgrm.raw_ipv6.path_bits
+               path bits...
+
+       dgrm.raw_ipv6.sl
+               service level...
+
+       dgrm.raw_ipv6.max_static_rate
+               static rate...
+
+       remote_ops.vaddr
+               The registered virtual memory address of the remote memory to access
+               with an RDMA or atomic operation.
+
+       remote_ops.rkey
+               The rkey associated with the specified remote vaddr. This data must
+               be presented exactly as obtained from the remote node. No swapping
+               of data must be performed.
+
+       atomic1
+               The first operand for an atomic operation.
+
+       atomic2
+               The second operand for an atomic operation.
+
+

NOTES

+
       The format of data sent over the fabric is user-defined and is considered
+       opaque to the access layer.  The sole exception to this are MADs posted
+       to a MAD QP service.  MADs are expected to match the format defined by
+       the Infiniband specification and must be in network-byte order when posted
+       to the MAD QP service.
+
+

SEE ALSO

+
       ib_wr_type_t, ib_local_ds_t, ib_send_opt_t
+
+
+
+ +

[Definitions] +Access Layer/ib_srq_attr_mask_t

+ +

[top][parent][index]

+

NAME

+
       ib_srq_attr_mask_t
+
+

DESCRIPTION

+
       Indicates valid fields in ib_srq_attr_t structure
+
+

SYNOPSIS

+
typedef enum _ib_srq_attr_mask {
+        IB_SRQ_MAX_WR   = 1 << 0,
+        IB_SRQ_LIMIT    = 1 << 1,
+} ib_srq_attr_mask_t;
+
+
+
+ +

[Structures] +Access Layer/ib_srq_attr_t

+ +

[top][parent][index]

+

NAME

+
       ib_srq_attr_t
+
+

DESCRIPTION

+
       Attributes used to initialize a shared queue pair at creation time.
+
+

SYNOPSIS

+
typedef struct _ib_srq_attr {
+        uint32_t                                max_wr;
+        uint32_t                                max_sge;
+        uint32_t                                srq_limit;
+} ib_srq_attr_t;
+
+

FIELDS

+
       max_wr
+               Specifies the max number of work request on SRQ.
+
+       max_sge
+               Specifies the max number of scatter/gather elements in one work request.
+
+       srq_limit
+               Specifies the low water mark for SRQ.
+
+

SEE ALSO

+
       ib_qp_type_t, ib_srq_attr_mask_t
+
+
+
+ +

[Definitions] +Access Layer/ib_wc_status_t

+ +

[top][parent][index]

+

NAME

+
       ib_wc_status_t
+
+

DESCRIPTION

+
       Indicates the status of a completed work request.  These VALUES are
+       returned to the user when retrieving completions.  Note that success is
+       identified as IB_WCS_SUCCESS, which is always zero.
+
+

SYNOPSIS

+
typedef enum _ib_wc_status_t
+{
+        IB_WCS_SUCCESS,
+        IB_WCS_LOCAL_LEN_ERR,
+        IB_WCS_LOCAL_OP_ERR,
+        IB_WCS_LOCAL_PROTECTION_ERR,
+        IB_WCS_WR_FLUSHED_ERR,
+        IB_WCS_MEM_WINDOW_BIND_ERR,
+        IB_WCS_REM_ACCESS_ERR,
+        IB_WCS_REM_OP_ERR,
+        IB_WCS_RNR_RETRY_ERR,
+        IB_WCS_TIMEOUT_RETRY_ERR,
+        IB_WCS_REM_INVALID_REQ_ERR,
+        IB_WCS_BAD_RESP_ERR,
+        IB_WCS_LOCAL_ACCESS_ERR,
+        IB_WCS_GENERAL_ERR,
+        IB_WCS_UNMATCHED_RESPONSE,                      /* InfiniBand Access Layer */
+        IB_WCS_CANCELED,                                        /* InfiniBand Access Layer */
+        IB_WCS_UNKNOWN                                          /* Must be last. */
+
+}       ib_wc_status_t;
+
+

VALUES

+
       IB_WCS_SUCCESS
+               Work request completed successfully.
+
+       IB_WCS_MAD
+               The completed work request was associated with a managmenet datagram
+               that requires post processing.  The MAD will be returned to the user
+               through a callback once all post processing has completed.
+
+       IB_WCS_LOCAL_LEN_ERR
+               Generated for a work request posted to the send queue when the
+               total of the data segment lengths exceeds the message length of the
+               channel.  Generated for a work request posted to the receive queue when
+               the total of the data segment lengths is too small for a
+               valid incoming message.
+
+       IB_WCS_LOCAL_OP_ERR
+               An internal QP consistency error was generated while processing this
+               work request.  This may indicate that the QP was in an incorrect state
+               for the requested operation.
+
+       IB_WCS_LOCAL_PROTECTION_ERR
+               The data segments of the locally posted work request did not refer to
+               a valid memory region.  The memory may not have been properly
+               registered for the requested operation.
+
+       IB_WCS_WR_FLUSHED_ERR
+               The work request was flushed from the QP before being completed.
+
+       IB_WCS_MEM_WINDOW_BIND_ERR
+               A memory window bind operation failed due to insufficient access
+               rights.
+
+       IB_WCS_REM_ACCESS_ERR,
+               A protection error was detected at the remote node for a RDMA or atomic
+               operation.
+
+       IB_WCS_REM_OP_ERR,
+               The operation could not be successfully completed at the remote node.
+               This may indicate that the remote QP was in an invalid state or
+               contained an invalid work request.
+
+       IB_WCS_RNR_RETRY_ERR,
+               The RNR retry count was exceeded while trying to send this message.
+
+       IB_WCS_TIMEOUT_RETRY_ERR
+               The local transport timeout counter expired while trying to send this
+               message.
+
+       IB_WCS_REM_INVALID_REQ_ERR,
+               The remote node detected an invalid message on the channel.  This error
+               is usually a result of one of the following:
+                       - The operation was not supported on receive queue.
+                       - There was insufficient buffers to receive a new RDMA request.
+                       - There was insufficient buffers to receive a new atomic operation.
+                       - An RDMA request was larger than 2^31 bytes.
+
+      IB_WCS_BAD_RESP_ERR,
+              An unexpected transport layer opcode was returned
+              by the responder.
+
+      IB_WCS_LOCAL_ACCESS_ERR,
+              A protection error occurred on a local data buffer
+              during the processing of a RDMA Write with Immediate Data 
+              operation sent from the remote node.
+
+       IB_WCS_UNMATCHED_RESPONSE
+               A response MAD was received for which there was no matching send.  The
+               send operation may have been canceled by the user or may have timed
+               out.
+
+       IB_WCS_CANCELED
+               The completed work request was canceled by the user.
+
+      IB_WCS_GENERAL_ERR,
+              Any other error
+
+
+
+ +

[Structures] +Access Layer/ib_wc_t

+ +

[top][parent][index]

+

NAME

+
       ib_wc_t
+
+

DESCRIPTION

+
       Work completion information.
+
+

SYNOPSIS

+
typedef struct _ib_wc
+{
+        struct _ib_wc* __ptr64  p_next;
+        uint64_t                                wr_id;
+        ib_wc_type_t                    wc_type;
+
+        uint32_t                                length;
+        ib_wc_status_t                  status;
+        uint64_t                                vendor_specific;
+
+        union _wc_recv
+        {
+                struct _wc_conn
+                {
+                        ib_recv_opt_t   recv_opt;
+                        ib_net32_t              immediate_data;
+
+                }       conn;
+
+                struct _wc_ud
+                {
+                        ib_recv_opt_t   recv_opt;
+                        ib_net32_t              immediate_data;
+                        ib_net32_t              remote_qp;
+                        uint16_t                pkey_index;
+                        ib_net16_t              remote_lid;
+                        uint8_t                 remote_sl;
+                        uint8_t                 path_bits;
+
+                }       ud;
+
+                struct _wc_raw_ipv6
+                {
+                        ib_net16_t              remote_lid;
+                        uint8_t                 remote_sl;
+                        uint8_t                 path_bits;
+
+                }       raw_ipv6;
+
+                struct _wc_raw_ether
+                {
+                        ib_net16_t              remote_lid;
+                        uint8_t                 remote_sl;
+                        uint8_t                 path_bits;
+                        ib_net16_t              ether_type;
+
+                }       raw_ether;
+
+        }       recv;
+
+}       ib_wc_t;
+
+

FIELDS

+
       p_next
+               A pointer used to chain work completions.  This permits multiple
+               work completions to be retrieved from a completion queue through a
+               single function call.  This value is set to NULL to mark the end of
+               the chain.
+
+       wr_id
+               The 64-bit work request identifier that was specified when posting the
+               work request.
+
+       wc_type
+               Indicates the type of work completion.
+
+       length
+               The total length of the data sent or received with the work request.
+
+       status
+               The result of the work request.
+
+       vendor_specific
+               HCA vendor specific information returned as part of the completion.
+
+       recv.conn.recv_opt
+               Indicates optional fields valid as part of a work request that
+               completed on a connected (reliable or unreliable) queue pair.
+
+       recv.conn.immediate_data
+               32-bit field received as part of an inbound message on a connected
+               queue pair.  This field is only valid if the recv_opt flag
+               IB_RECV_OPT_IMMEDIATE has been set.
+
+       recv.ud.recv_opt
+               Indicates optional fields valid as part of a work request that
+               completed on an unreliable datagram queue pair.
+
+       recv.ud.immediate_data
+               32-bit field received as part of an inbound message on a unreliable
+               datagram queue pair.  This field is only valid if the recv_opt flag
+               IB_RECV_OPT_IMMEDIATE has been set.
+
+       recv.ud.remote_qp
+               Identifies the source queue pair of a received datagram.
+
+       recv.ud.pkey_index
+               The pkey index of the source queue pair. This is valid only for
+               IB_QPT_QP1 and IB_QPT_QP1_ALIAS QP types.
+
+       recv.ud.remote_lid
+               The source LID of the received datagram.
+
+       recv.ud.remote_sl
+               The service level used by the source of the received datagram.
+
+       recv.ud.path_bits
+               path bits...
+
+       recv.raw_ipv6.remote_lid
+               The source LID of the received message.
+
+       recv.raw_ipv6.remote_sl
+               The service level used by the source of the received message.
+
+       recv.raw_ipv6.path_bits
+               path bits...
+
+       recv.raw_ether.remote_lid
+               The source LID of the received message.
+
+       recv.raw_ether.remote_sl
+               The service level used by the source of the received message.
+
+       recv.raw_ether.path_bits
+               path bits...
+
+       recv.raw_ether.ether_type
+               ether type...
+
+

NOTES

+
       When the work request completes with error, the only values that the
+       consumer can depend on are the wr_id field, and the status of the
+       operation.
+
+       If the consumer is using the same CQ for completions from more than
+       one type of QP (i.e Reliable Connected, Datagram etc), then the consumer
+       must have additional information to decide what fields of the union are
+       valid.
+
+

SEE ALSO

+
       ib_wc_type_t, ib_qp_type_t, ib_wc_status_t, ib_recv_opt_t
+
+
+
+ +

[Definitions] +Access Layer/ib_wc_type_t

+ +

[top][parent][index]

+

NAME

+
       ib_wc_type_t
+
+

DESCRIPTION

+
       Indicates the type of work completion.
+
+

SYNOPSIS

+
typedef enum _ib_wc_type_t
+{
+        IB_WC_SEND,
+        IB_WC_RDMA_WRITE,
+        IB_WC_RECV,
+        IB_WC_RDMA_READ,
+        IB_WC_MW_BIND,
+        IB_WC_FETCH_ADD,
+        IB_WC_COMPARE_SWAP,
+        IB_WC_RECV_RDMA_WRITE,
+        IB_WC_UNKNOWN
+
+}       ib_wc_type_t;
+
+
+
+ +

[Definitions] +Access Layer/ib_wr_type_t

+ +

[top][parent][index]

+

NAME

+
       ib_wr_type_t
+
+

DESCRIPTION

+
       Identifies the type of work request posted to a queue pair.
+
+

SYNOPSIS

+
typedef enum _ib_wr_type_t
+{
+        WR_SEND = 1,
+        WR_RDMA_WRITE,
+        WR_RDMA_READ,
+        WR_COMPARE_SWAP,
+        WR_FETCH_ADD,
+        WR_UNKNOWN
+
+}       ib_wr_type_t;
+
+
+
+ +

[Structures] +Access Layer/mlnx_fmr_create_t

+ +

[top][parent][index]

+

NAME

+
       mlnx_fmr_create_t
+
+

DESCRIPTION

+
       Information required to create a Mellanox fast memory region.
+
+

SYNOPSIS

+
typedef struct _mlnx_fmr_create
+{
+        int                                     max_pages;
+        int                                     max_maps;
+        uint8_t                         page_size;
+        ib_access_t                     access_ctrl;
+
+}       mlnx_fmr_create_t;
+
+

FIELDS

+
       max_pages
+               max pages in the region.
+
+       max_maps
+               max times, the region can be mapped before remapping.
+
+       page_size
+               log2 of the page size (e.g. 12 for 4KB).
+
+       access_ctrl
+               Access rights of the registered region.
+
+

NOTES

+
       This is a Mellanox specific extension to verbs.
+
+

SEE ALSO

+
       ib_access_t
+
+
+
+ +

[Structures] +IBA Base: Constants/IB_CLASS_CAP_GETSET

+ +

[top][index]

+

NAME

+
       IB_CLASS_CAP_GETSET
+
+

DESCRIPTION

+
       ClassPortInfo CapabilityMask bits.  This bit will be set
+       if the class supports Get(Notice) and Set(Notice) MADs (13.4.8.1).
+
+

SEE ALSO

+
       ib_class_port_info_t, IB_CLASS_CAP_TRAP
+
+

SOURCE

+
#define IB_CLASS_CAP_GETSET                                     0x0002
+
+
+
+ +

[Structures] +IBA Base: Constants/IB_CLASS_CAP_TRAP

+ +

[top][index]

+

NAME

+
       IB_CLASS_CAP_TRAP
+
+

DESCRIPTION

+
       ClassPortInfo CapabilityMask bits.  This bit will be set
+       if the class supports Trap() MADs (13.4.8.1).
+
+

SEE ALSO

+
       ib_class_port_info_t, IB_CLASS_CAP_GETSET
+
+

SOURCE

+
#define IB_CLASS_CAP_TRAP                                       0x0001
+
+
+
+ +

[Structures] +IBA Base: Constants/IB_CLASS_RESP_TIME_MASK

+ +

[top][index]

+

NAME

+
       IB_CLASS_RESP_TIME_MASK
+
+

DESCRIPTION

+
       Mask bits to extract the reponse time value from the
+       resp_time_val field of ib_class_port_info_t.
+
+

SEE ALSO

+
       ib_class_port_info_t
+
+

SOURCE

+
#define IB_CLASS_RESP_TIME_MASK                         0x1F
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_DEFAULT_PARTIAL_PKEY

+ +

[top][index]

+

NAME

+
       IB_DEFAULT_PARTIAL_PKEY 
+
+

DESCRIPTION

+
       0x7FFF in network order
+
+

SOURCE

+
#define IB_DEFAULT_PARTIAL_PKEY                                (CL_HTON16(0x7FFF))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_DEFAULT_PKEY

+ +

[top][index]

+

NAME

+
       IB_DEFAULT_PKEY
+
+

DESCRIPTION

+
       P_Key value for the default partition.
+
+

SOURCE

+
#define IB_DEFAULT_PKEY                                         0xFFFF
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_DEFAULT_SUBNET_PREFIX

+ +

[top][index]

+

NAME

+
       IB_DEFAULT_SUBNET_PREFIX
+
+

DESCRIPTION

+
       Default subnet GID prefix.
+
+

SOURCE

+
#define IB_DEFAULT_SUBNET_PREFIX                        (CL_HTON64(0xFE80000000000000ULL))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_INVALID_PORT_NUM

+ +

[top][index]

+

NAME

+
       IB_INVALID_PORT_NUM
+
+

DESCRIPTION

+
       Value used to indicate an invalid port number (14.2.5.10).
+
+

SOURCE

+
#define IB_INVALID_PORT_NUM                                     0xFF
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_LID_MCAST_END

+ +

[top][index]

+

NAME

+
       IB_LID_MCAST_END
+
+

DESCRIPTION

+
       Highest valid multicast LID value.
+
+

SOURCE

+
#define IB_LID_MCAST_END_HO                                     0xFFFE
+#define IB_LID_MCAST_END                                        (CL_HTON16(IB_LID_MCAST_END_HO))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_LID_MCAST_START

+ +

[top][index]

+

NAME

+
       IB_LID_MCAST_START
+
+

DESCRIPTION

+
       Lowest valid multicast LID value.
+
+

SOURCE

+
#define IB_LID_MCAST_START_HO                           0xC000
+#define IB_LID_MCAST_START                                      (CL_HTON16(IB_LID_MCAST_START_HO))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_LID_PERMISSIVE

+ +

[top][index]

+

NAME

+
       IB_LID_PERMISSIVE
+
+

DESCRIPTION

+
       Permissive LID
+
+

SOURCE

+
#define IB_LID_PERMISSIVE                                       0xFFFF
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_LID_UCAST_END

+ +

[top][index]

+

NAME

+
       IB_LID_UCAST_END
+
+

DESCRIPTION

+
       Highest valid unicast LID value.
+
+

SOURCE

+
#define IB_LID_UCAST_END_HO                                     0xBFFF
+#define IB_LID_UCAST_END                                        (CL_HTON16(IB_LID_UCAST_END_HO))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_LID_UCAST_START

+ +

[top][index]

+

NAME

+
       IB_LID_UCAST_START
+
+

DESCRIPTION

+
       Lowest valid unicast LID value.
+
+

SOURCE

+
#define IB_LID_UCAST_START_HO                           0x0001
+#define IB_LID_UCAST_START                                      (CL_HTON16(IB_LID_UCAST_START_HO))
+
+
+
+ +

[Definitions] +IBA Base: Constants/ib_link_states_t

+ +

[top][index]

+

NAME

+
       ib_link_states_t
+
+

DESCRIPTION

+
       Defines the link states of a port.
+
+

SOURCE

+
#define IB_LINK_NO_CHANGE 0
+#define IB_LINK_DOWN      1
+#define IB_LINK_INIT      2
+#define IB_LINK_ARMED     3
+#define IB_LINK_ACTIVE    4
+#define IB_LINK_ACT_DEFER 5
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_CLASS_PORT_INFO

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_CLASS_PORT_INFO
+
+

DESCRIPTION

+
       ClassPortInfo attribute (13.4.8)
+
+

SOURCE

+
#define IB_MAD_ATTR_CLASS_PORT_INFO                     (CL_NTOH16(0x0001))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_DIAG_CODE

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_DIAG_CODE
+
+

DESCRIPTION

+
       DiagCode attribute (16.3.3)
+
+

SOURCE

+
#define IB_MAD_ATTR_DIAG_CODE                           (CL_NTOH16(0x0024))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT
+
+

DESCRIPTION

+
       DiagnosticTimeout attribute (16.3.3)
+
+

SOURCE

+
#define IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT          (CL_NTOH16(0x0020))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_GUID_INFO

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_GUID_INFO
+
+

DESCRIPTION

+
       GUIDInfo attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_GUID_INFO                           (CL_NTOH16(0x0014))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_GUIDINFO_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_GUIDINFO_RECORD
+
+

DESCRIPTION

+
       GuidInfoRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_GUIDINFO_RECORD                     (CL_NTOH16(0x0030))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_INFORM_INFO

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_INFORM_INFO
+
+

DESCRIPTION

+
       InformInfo attribute (13.4.8)
+
+

SOURCE

+
#define IB_MAD_ATTR_INFORM_INFO                         (CL_NTOH16(0x0003))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_INFORM_INFO_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_INFORM_INFO_RECORD
+
+

DESCRIPTION

+
       InformInfo Record attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_INFORM_INFO_RECORD                  (CL_NTOH16(0x00F3))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_IO_CONTROLLER_PROFILE

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_IO_CONTROLLER_PROFILE
+
+

DESCRIPTION

+
       IOControllerProfile attribute (16.3.3)
+
+

SOURCE

+
#define IB_MAD_ATTR_IO_CONTROLLER_PROFILE       (CL_NTOH16(0x0011))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_IO_UNIT_INFO

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_IO_UNIT_INFO
+
+

DESCRIPTION

+
       IOUnitInfo attribute (16.3.3)
+
+

SOURCE

+
#define IB_MAD_ATTR_IO_UNIT_INFO                        (CL_NTOH16(0x0010))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_LED_INFO

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_LED_INFO
+
+

DESCRIPTION

+
       LedInfo attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_LED_INFO                            (CL_NTOH16(0x0031))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_LFT_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_LFT_RECORD
+
+

DESCRIPTION

+
       LinearForwardingTableRecord attribute (15.2.5.6)
+
+

SOURCE

+
#define IB_MAD_ATTR_LFT_RECORD                  (CL_NTOH16(0x0015))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_LIN_FWD_TBL

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_LIN_FWD_TBL
+
+

DESCRIPTION

+
       Switch linear forwarding table
+
+

SOURCE

+
#define IB_MAD_ATTR_LIN_FWD_TBL                         (CL_NTOH16(0x0019))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_LINK_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_LINK_RECORD
+
+

DESCRIPTION

+
       LinkRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_LINK_RECORD                         (CL_NTOH16(0x0020))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_MCAST_FWD_TBL

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_MCAST_FWD_TBL
+
+

DESCRIPTION

+
       Switch multicast forwarding table
+
+

SOURCE

+
#define IB_MAD_ATTR_MCAST_FWD_TBL                       (CL_NTOH16(0x001B))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_MCMEMBER_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_MCMEMBER_RECORD
+
+

DESCRIPTION

+
       MCMemberRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_MCMEMBER_RECORD                     (CL_NTOH16(0x0038))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_MULTIPATH_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_MULTIPATH_RECORD
+
+

DESCRIPTION

+
       MultiPathRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_MULTIPATH_RECORD                    (CL_NTOH16(0x003A))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_NODE_DESC

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_NODE_DESC
+
+

DESCRIPTION

+
       NodeDescription attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_NODE_DESC                           (CL_NTOH16(0x0010))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_NODE_INFO

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_NODE_INFO
+
+

DESCRIPTION

+
       NodeInfo attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_NODE_INFO                           (CL_NTOH16(0x0011))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_NODE_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_NODE_RECORD
+
+

DESCRIPTION

+
       NodeRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_NODE_RECORD                         (CL_NTOH16(0x0011))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_NOTICE

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_NOTICE
+
+

DESCRIPTION

+
       Notice attribute (13.4.8)
+
+

SOURCE

+
#define IB_MAD_ATTR_NOTICE                                      (CL_NTOH16(0x0002))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_P_KEY_TABLE

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_P_KEY_TABLE
+
+

DESCRIPTION

+
       PartitionTable attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_P_KEY_TABLE                         (CL_NTOH16(0x0016))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_PATH_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_PATH_RECORD
+
+

DESCRIPTION

+
       PathRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_PATH_RECORD                         (CL_NTOH16(0x0035))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_PKEYTBL_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_PKEYTBL_RECORD
+
+

DESCRIPTION

+
       PKEY Table Record attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_PKEY_TBL_RECORD                     (CL_NTOH16(0x0033))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_PORT_CNTRS

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_PORT_CNTRS
+
+

DESCRIPTION

+
       SwitchInfo attribute (16.1.2)
+
+

SOURCE

+
#define IB_MAD_ATTR_PORT_CNTRS                          (CL_NTOH16(0x0012))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_PORT_INFO

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_PORT_INFO
+
+

DESCRIPTION

+
       PortInfo attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_PORT_INFO                           (CL_NTOH16(0x0015))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_CTRL

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_PORT_SMPL_CTRL
+
+

DESCRIPTION

+
       NodeDescription attribute (16.1.2)
+
+

SOURCE

+
#define IB_MAD_ATTR_PORT_SMPL_CTRL                      (CL_NTOH16(0x0010))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_RSLT

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_PORT_SMPL_RSLT
+
+

DESCRIPTION

+
       NodeInfo attribute (16.1.2)
+
+

SOURCE

+
#define IB_MAD_ATTR_PORT_SMPL_RSLT                      (CL_NTOH16(0x0011))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_PORTINFO_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_PORTINFO_RECORD
+
+

DESCRIPTION

+
       PortInfoRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_PORTINFO_RECORD                     (CL_NTOH16(0x0012))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_PREPARE_TO_TEST

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_PREPARE_TO_TEST
+
+

DESCRIPTION

+
       PrepareToTest attribute (16.3.3)
+
+

SOURCE

+
#define IB_MAD_ATTR_PREPARE_TO_TEST                     (CL_NTOH16(0x0021))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_RND_FWD_TBL

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_RND_FWD_TBL
+
+

DESCRIPTION

+
       Switch random forwarding table
+
+

SOURCE

+
#define IB_MAD_ATTR_RND_FWD_TBL                         (CL_NTOH16(0x001A))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_SERVICE_ENTRIES

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_SERVICE_ENTRIES
+
+

DESCRIPTION

+
       ServiceEntries attribute (16.3.3)
+
+

SOURCE

+
#define IB_MAD_ATTR_SERVICE_ENTRIES                     (CL_NTOH16(0x0012))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_SERVICE_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_SERVICE_RECORD
+
+

DESCRIPTION

+
       ServiceRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_SERVICE_RECORD                      (CL_NTOH16(0x0031))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_SLVL_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_SLVL_RECORD
+
+

DESCRIPTION

+
       SLtoVL Mapping Table Record attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_SLVL_RECORD                         (CL_NTOH16(0x0013))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_SLVL_TABLE

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_SLVL_TABLE
+
+

DESCRIPTION

+
       SL VL Mapping Table attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_SLVL_TABLE                          (CL_NTOH16(0x0017))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_SM_INFO

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_SM_INFO
+
+

DESCRIPTION

+
       SMInfo attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_SM_INFO                                     (CL_NTOH16(0x0020))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_SMINFO_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_SMINFO_RECORD
+
+

DESCRIPTION

+
       SmInfoRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_SMINFO_RECORD                       (CL_NTOH16(0x0018))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_SVC_ASSOCIATION_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_SVC_ASSOCIATION_RECORD
+
+

DESCRIPTION

+
       Service Association Record attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_SVC_ASSOCIATION_RECORD              (CL_NTOH16(0x003B))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_SWITCH_INFO
+
+

DESCRIPTION

+
       SwitchInfo attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_SWITCH_INFO                         (CL_NTOH16(0x0012))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_TEST_DEVICE_LOOP

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_TEST_DEVICE_LOOP
+
+

DESCRIPTION

+
       TestDeviceLoop attribute (16.3.3)
+
+

SOURCE

+
#define IB_MAD_ATTR_TEST_DEVICE_LOOP            (CL_NTOH16(0x0023))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_TEST_DEVICE_ONCE

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_TEST_DEVICE_ONCE
+
+

DESCRIPTION

+
       TestDeviceOnce attribute (16.3.3)
+
+

SOURCE

+
#define IB_MAD_ATTR_TEST_DEVICE_ONCE            (CL_NTOH16(0x0022))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_TRACE_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_MTRACE_RECORD
+
+

DESCRIPTION

+
       TraceRecord attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_TRACE_RECORD                        (CL_NTOH16(0x0039))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_VENDOR_DIAG

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_VENDOR_DIAG
+
+

DESCRIPTION

+
       VendorDiag attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_VENDOR_DIAG                         (CL_NTOH16(0x0030))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_VL_ARBITRATION

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_VL_ARBITRATION
+
+

DESCRIPTION

+
       VL Arbitration Table attribute (14.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_VL_ARBITRATION                      (CL_NTOH16(0x0018))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_ATTR_VLARB_RECORD

+ +

[top][index]

+

NAME

+
       IB_MAD_ATTR_VLARB_RECORD
+
+

DESCRIPTION

+
       VL Arbitration Table Record attribute (15.2.5)
+
+

SOURCE

+
#define IB_MAD_ATTR_VLARB_RECORD                        (CL_NTOH16(0x0036))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_GET

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_GET
+
+

DESCRIPTION

+
       Get() Method (13.4.5)
+
+

SOURCE

+
#define IB_MAD_METHOD_GET                                       0x01
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_GET_RESP

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_GET_RESP
+
+

DESCRIPTION

+
       GetResp() Method (13.4.5)
+
+

SOURCE

+
#define IB_MAD_METHOD_GET_RESP                          0x81
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_GETTABLE

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_GETTABLE
+
+

DESCRIPTION

+
       SubnAdmGetTable() Method (15.2.2)
+
+

SOURCE

+
#define IB_MAD_METHOD_GETTABLE                          0x12
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_GETTABLE_RESP

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_GETTABLE_RESP
+
+

DESCRIPTION

+
       SubnAdmGetTableResp() Method (15.2.2)
+
+

SOURCE

+
#define IB_MAD_METHOD_GETTABLE_RESP                     0x92
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_REPORT

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_REPORT
+
+

DESCRIPTION

+
       Report() Method (13.4.5)
+
+

SOURCE

+
#define IB_MAD_METHOD_REPORT                            0x06
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_REPORT_RESP

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_REPORT_RESP
+
+

DESCRIPTION

+
       ReportResp() Method (13.4.5)
+
+

SOURCE

+
#define IB_MAD_METHOD_REPORT_RESP                       0x86
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_RESP_MASK

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_RESP_MASK
+
+

DESCRIPTION

+
       Response mask to extract 'R' bit from the method field. (13.4.5)
+
+

SOURCE

+
#define IB_MAD_METHOD_RESP_MASK                         0x80
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_SEND

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_SEND
+
+

DESCRIPTION

+
       Send() Method (13.4.5)
+
+

SOURCE

+
#define IB_MAD_METHOD_SEND                                      0x03
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_SET

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_SET
+
+

DESCRIPTION

+
       Set() Method (13.4.5)
+
+

SOURCE

+
#define IB_MAD_METHOD_SET                                       0x02
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_TRAP

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_TRAP
+
+

DESCRIPTION

+
       Trap() Method (13.4.5)
+
+

SOURCE

+
#define IB_MAD_METHOD_TRAP                                      0x05
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_METHOD_TRAP_REPRESS

+ +

[top][index]

+

NAME

+
       IB_MAD_METHOD_TRAP_REPRESS
+
+

DESCRIPTION

+
       TrapRepress() Method (13.4.5)
+
+

SOURCE

+
#define IB_MAD_METHOD_TRAP_REPRESS                      0x07
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_STATUS_BUSY

+ +

[top][index]

+

NAME

+
       IB_MAD_STATUS_BUSY
+
+

DESCRIPTION

+
       Temporarily busy, MAD discarded (13.4.7)
+
+

SOURCE

+
#define IB_MAD_STATUS_BUSY                                      (CL_HTON16(0x0001))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_STATUS_INVALID_FIELD

+ +

[top][index]

+

NAME

+
       IB_MAD_STATUS_INVALID_FIELD
+
+

DESCRIPTION

+
       Attribute contains one or more invalid fields (13.4.7)
+
+

SOURCE

+
#define IB_MAD_STATUS_INVALID_FIELD                     (CL_HTON16(0x001C))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_STATUS_REDIRECT

+ +

[top][index]

+

NAME

+
       IB_MAD_STATUS_REDIRECT
+
+

DESCRIPTION

+
       QP Redirection required (13.4.7)
+
+

SOURCE

+
#define IB_MAD_STATUS_REDIRECT                          (CL_HTON16(0x0002))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_STATUS_UNSUP_CLASS_VER

+ +

[top][index]

+

NAME

+
       IB_MAD_STATUS_UNSUP_CLASS_VER
+
+

DESCRIPTION

+
       Unsupported class version (13.4.7)
+
+

SOURCE

+
#define IB_MAD_STATUS_UNSUP_CLASS_VER           (CL_HTON16(0x0004))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD

+ +

[top][index]

+

NAME

+
       IB_MAD_STATUS_UNSUP_METHOD
+
+

DESCRIPTION

+
       Unsupported method (13.4.7)
+
+

SOURCE

+
#define IB_MAD_STATUS_UNSUP_METHOD                      (CL_HTON16(0x0008))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD_ATTR

+ +

[top][index]

+

NAME

+
       IB_MAD_STATUS_UNSUP_METHOD_ATTR
+
+

DESCRIPTION

+
       Unsupported method/attribute combination (13.4.7)
+
+

SOURCE

+
#define IB_MAD_STATUS_UNSUP_METHOD_ATTR         (CL_HTON16(0x000C))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MAX_METHOD

+ +

[top][index]

+

NAME

+
       IB_MAX_METHOD
+
+

DESCRIPTION

+
       Total number of methods available to a class, not including the R-bit.
+
+

SOURCE

+
#define IB_MAX_METHODS                                          128
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCAST_BLOCK_ID_MASK_HO

+ +

[top][index]

+

NAME

+
       IB_MCAST_BLOCK_ID_MASK_HO
+
+

DESCRIPTION

+
       Mask (host order) to recover the Multicast block ID.
+
+

SOURCE

+
#define IB_MCAST_BLOCK_ID_MASK_HO                       0x000001FF
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCAST_BLOCK_SIZE

+ +

[top][index]

+

NAME

+
       IB_MCAST_BLOCK_SIZE
+
+

DESCRIPTION

+
       Number of port mask entries in a multicast forwarding table block.
+
+

SOURCE

+
#define IB_MCAST_BLOCK_SIZE                                     32
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCAST_MASK_SIZE

+ +

[top][index]

+

NAME

+
       IB_MCAST_MASK_SIZE
+
+

DESCRIPTION

+
       Number of port mask bits in each entry in the multicast forwarding table.
+
+

SOURCE

+
#define IB_MCAST_MASK_SIZE                                      16
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCAST_MAX_BLOCK_ID

+ +

[top][index]

+

NAME

+
       IB_MCAST_MAX_BLOCK_ID
+
+

DESCRIPTION

+
       Maximum number of Multicast port mask blocks
+
+

SOURCE

+
#define IB_MCAST_MAX_BLOCK_ID                           511
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCAST_POSITION_MASK_HO

+ +

[top][index]

+

NAME

+
       IB_MCAST_POSITION_MASK_HO
+
+

DESCRIPTION

+
       Mask (host order) to recover the multicast block position.
+
+

SOURCE

+
#define IB_MCAST_POSITION_MASK_HO                               0xF0000000
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCAST_POSITION_MAX

+ +

[top][index]

+

NAME

+
       IB_MCAST_POSITION_MAX
+
+

DESCRIPTION

+
       Maximum value for the multicast block position.
+
+

SOURCE

+
#define IB_MCAST_POSITION_MAX                           0xF
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCAST_POSITION_SHIFT

+ +

[top][index]

+

NAME

+
       IB_MCAST_POSITION_SHIFT
+
+

DESCRIPTION

+
       Shift value to normalize the multicast block position value.
+
+

SOURCE

+
#define IB_MCAST_POSITION_SHIFT                         28
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_BIS

+ +

[top][index]

+

NAME

+
       IB_MCLASS_BIS
+
+

DESCRIPTION

+
       Subnet Management Class, BIS
+
+

SOURCE

+
#define IB_MCLASS_BIS 0x12
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_BM

+ +

[top][index]

+

NAME

+
       IB_MCLASS_BM
+
+

DESCRIPTION

+
       Subnet Management Class, Baseboard Manager (13.4.4)
+
+

SOURCE

+
#define IB_MCLASS_BM                                            0x05
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_COMM_MGMT

+ +

[top][index]

+

NAME

+
       IB_MCLASS_COMM_MGMT
+
+

DESCRIPTION

+
       Subnet Management Class, Communication Management (13.4.4)
+
+

SOURCE

+
#define IB_MCLASS_COMM_MGMT                                     0x07
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_DEV_ADM

+ +

[top][index]

+

NAME

+
       IB_MCLASS_DEV_ADM
+
+

DESCRIPTION

+
       Subnet Management Class, Device Administration
+
+

SOURCE

+
#define IB_MCLASS_DEV_ADM 0x10
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_DEV_MGMT

+ +

[top][index]

+

NAME

+
       IB_MCLASS_DEV_MGMT
+
+

DESCRIPTION

+
       Subnet Management Class, Device Management (13.4.4)
+
+

SOURCE

+
#define IB_MCLASS_DEV_MGMT                                      0x06
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_PERF

+ +

[top][index]

+

NAME

+
       IB_MCLASS_PERF
+
+

DESCRIPTION

+
       Subnet Management Class, Performance Manager (13.4.4)
+
+

SOURCE

+
#define IB_MCLASS_PERF                                          0x04
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_SNMP

+ +

[top][index]

+

NAME

+
       IB_MCLASS_SNMP
+
+

DESCRIPTION

+
       Subnet Management Class, SNMP Tunneling (13.4.4)
+
+

SOURCE

+
#define IB_MCLASS_SNMP                                          0x08
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_SUBN_ADM

+ +

[top][index]

+

NAME

+
       IB_MCLASS_SUBN_ADM
+
+

DESCRIPTION

+
       Subnet Management Class, Subnet Administration (13.4.4)
+
+

SOURCE

+
#define IB_MCLASS_SUBN_ADM                                      0x03
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_SUBN_DIR

+ +

[top][index]

+

NAME

+
       IB_MCLASS_SUBN_DIR
+
+

DESCRIPTION

+
       Subnet Management Class, Subnet Manager directed route (13.4.4)
+
+

SOURCE

+
#define IB_MCLASS_SUBN_DIR                                      0x81
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_SUBN_LID

+ +

[top][index]

+

NAME

+
       IB_MCLASS_SUBN_LID
+
+

DESCRIPTION

+
       Subnet Management Class, Subnet Manager LID routed (13.4.4)
+
+

SOURCE

+
#define IB_MCLASS_SUBN_LID                                      0x01
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_VENDOR_HIGH_RANGE_MAX

+ +

[top][index]

+

NAME

+
       IB_MCLASS_VENDOR_HIGH_RANGE_MAX
+
+

DESCRIPTION

+
       Subnet Management Class, Vendor Specific High Range End
+
+

SOURCE

+
#define IB_MCLASS_VENDOR_HIGH_RANGE_MAX 0x4f
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_VENDOR_HIGH_RANGE_MIN

+ +

[top][index]

+

NAME

+
       IB_MCLASS_VENDOR_HIGH_RANGE_MIN
+
+

DESCRIPTION

+
       Subnet Management Class, Vendor Specific High Range Start
+
+

SOURCE

+
#define IB_MCLASS_VENDOR_HIGH_RANGE_MIN 0x30
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_VENDOR_LOW_RANGE_MAX

+ +

[top][index]

+

NAME

+
       IB_MCLASS_VENDOR_LOW_RANGE_MAX
+
+

DESCRIPTION

+
       Subnet Management Class, Vendor Specific Low Range End
+
+

SOURCE

+
#define IB_MCLASS_VENDOR_LOW_RANGE_MAX 0x0f
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MCLASS_VENDOR_LOW_RANGE_MIN

+ +

[top][index]

+

NAME

+
       IB_MCLASS_VENDOR_LOW_RANGE_MIN
+
+

DESCRIPTION

+
       Subnet Management Class, Vendor Specific Low Range Start
+
+

SOURCE

+
#define IB_MCLASS_VENDOR_LOW_RANGE_MIN 0x09
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MTU_LEN_TYPE

+ +

[top][index]

+

NAME

+
       IB_MTU_LEN_TYPE
+
+

DESCRIPTION

+
       Encoded path MTU.
+               1: 256
+               2: 512
+               3: 1024
+               4: 2048
+               5: 4096
+               others: reserved
+
+

SOURCE

+
#define IB_MTU_LEN_256                                                  1
+#define IB_MTU_LEN_512                                                  2
+#define IB_MTU_LEN_1024                                                 3
+#define IB_MTU_LEN_2048                                                 4
+#define IB_MTU_LEN_4096                                                 5
+
+#define IB_MIN_MTU    IB_MTU_LEN_256
+#define IB_MAX_MTU    IB_MTU_LEN_4096
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_MULTIPATH_REC_BASE_MASK

+ +

[top][index]

+

NAME

+
       IB_MULTIPATH_REC_BASE_MASK
+
+

DESCRIPTION

+
       Mask for the base value field for multipath record MTU, rate,
+       and packet lifetime.
+
+

SOURCE

+
#define IB_MULTIPATH_REC_BASE_MASK                      0x3F
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_NODE_NUM_PORTS_MAX

+ +

[top][index]

+

NAME

+
       IB_NODE_NUM_PORTS_MAX
+
+

DESCRIPTION

+
       Maximum number of ports in a single node (14.2.5.7).
+
+

SOURCE

+
#define IB_NODE_NUM_PORTS_MAX                           0xFE
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_NODE_TYPE_CA

+ +

[top][index]

+

NAME

+
       IB_NODE_TYPE_CA
+
+

DESCRIPTION

+
       Encoded generic node type used in MAD attributes (13.4.8.2)
+
+

SOURCE

+
#define IB_NODE_TYPE_CA                                         0x01
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_NODE_TYPE_ROUTER

+ +

[top][index]

+

NAME

+
       IB_NODE_TYPE_ROUTER
+
+

DESCRIPTION

+
       Encoded generic node type used in MAD attributes (13.4.8.2)
+
+

SOURCE

+
#define IB_NODE_TYPE_ROUTER                                     0x03
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_NODE_TYPE_SWITCH

+ +

[top][index]

+

NAME

+
       IB_NODE_TYPE_SWITCH
+
+

DESCRIPTION

+
       Encoded generic node type used in MAD attributes (13.4.8.2)
+
+

SOURCE

+
#define IB_NODE_TYPE_SWITCH                                     0x02
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_NOTICE_NODE_TYPE_CA

+ +

[top][index]

+

NAME

+
       IB_NOTICE_NODE_TYPE_CA
+
+

DESCRIPTION

+
       Encoded generic node type used in MAD attributes (13.4.8.2)
+
+

SOURCE

+
#define IB_NOTICE_NODE_TYPE_CA                          (CL_NTOH32(0x000001))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_NOTICE_NODE_TYPE_ROUTER

+ +

[top][index]

+

NAME

+
       IB_NOTICE_NODE_TYPE_ROUTER
+
+

DESCRIPTION

+
       Encoded generic node type used in MAD attributes (13.4.8.2)
+
+

SOURCE

+
#define IB_NOTICE_NODE_TYPE_ROUTER                      (CL_NTOH32(0x000003))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_NOTICE_NODE_TYPE_SUBN_MGMT

+ +

[top][index]

+

NAME

+
       IB_NOTICE_NODE_TYPE_SUBN_MGMT
+
+

DESCRIPTION

+
       Encoded generic node type used in MAD attributes (13.4.8.2).
+       Note that this value is not defined for the NodeType field
+       of the NodeInfo attribute (14.2.5.3).
+
+

SOURCE

+
#define IB_NOTICE_NODE_TYPE_SUBN_MGMT           (CL_NTOH32(0x000004))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_NOTICE_NODE_TYPE_SWITCH

+ +

[top][index]

+

NAME

+
       IB_NOTICE_NODE_TYPE_SWITCH
+
+

DESCRIPTION

+
       Encoded generic node type used in MAD attributes (13.4.8.2)
+
+

SOURCE

+
#define IB_NOTICE_NODE_TYPE_SWITCH                      (CL_NTOH32(0x000002))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_PATH_REC_BASE_MASK

+ +

[top][index]

+

NAME

+
       IB_PATH_REC_BASE_MASK
+
+

DESCRIPTION

+
       Mask for the base value field for path record MTU, rate,
+       and packet lifetime.
+
+

SOURCE

+
#define IB_PATH_REC_BASE_MASK                           0x3F
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_PATH_REC_SELECTOR_MASK

+ +

[top][index]

+

NAME

+
       IB_PATH_REC_SELECTOR_MASK
+
+

DESCRIPTION

+
       Mask for the selector field for path record MTU, rate,
+       and packet lifetime.
+
+

SOURCE

+
#define IB_PATH_REC_SELECTOR_MASK                       0xC0
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_PATH_SELECTOR_TYPE

+ +

[top][index]

+

NAME

+
       IB_PATH_SELECTOR_TYPE
+
+

DESCRIPTION

+
       Path selector.
+               0: greater than specified
+               1: less than specified
+               2: exactly the specified
+               3: largest available
+
+

SOURCE

+
#define IB_PATH_SELECTOR_GREATER_THAN           0
+#define IB_PATH_SELECTOR_LESS_THAN                      1
+#define IB_PATH_SELECTOR_EXACTLY                        2
+#define IB_PATH_SELECTOR_LARGEST                        3
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_PKEY_BASE_MASK

+ +

[top][index]

+

NAME

+
       IB_PKEY_BASE_MASK
+
+

DESCRIPTION

+
       Masks for the base P_Key value given a P_Key Entry.
+
+

SOURCE

+
#define IB_PKEY_BASE_MASK                                       (CL_HTON16(0x7FFF))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_PKEY_ENTRIES_MAX

+ +

[top][index]

+

NAME

+
       IB_PKEY_ENTRIES_MAX
+
+

DESCRIPTION

+
       Maximum number of PKEY entries per port (14.2.5.7).
+
+

SOURCE

+
#define IB_PKEY_ENTRIES_MAX (IB_PKEY_MAX_BLOCKS * IB_PKEY_BLOCK_SIZE)
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_PKEY_MAX_BLOCKS

+ +

[top][index]

+

NAME

+
       IB_PKEY_MAX_BLOCKS
+
+

DESCRIPTION

+
       Maximum number of PKEY blocks (14.2.5.7).
+
+

SOURCE

+
#define IB_PKEY_MAX_BLOCKS                                      2048
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_PKEY_TYPE_MASK

+ +

[top][index]

+

NAME

+
       IB_PKEY_TYPE_MASK
+
+

DESCRIPTION

+
       Masks for the P_Key membership type given a P_Key Entry.
+
+

SOURCE

+
#define IB_PKEY_TYPE_MASK                                       (CL_NTOH16(0x8000))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_QP1_WELL_KNOWN_Q_KEY

+ +

[top][index]

+

NAME

+
       IB_QP1_WELL_KNOWN_Q_KEY
+
+

DESCRIPTION

+
       Well-known Q_Key for QP1 privileged mode access (15.4.2).
+
+

SOURCE

+
#define IB_QP1_WELL_KNOWN_Q_KEY                         CL_NTOH32(0x80010000)
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_ATTR_MOD_ACKNOWLEDGE

+ +

[top][index]

+

NAME

+
       IB_SMINFO_ATTR_MOD_ACKNOWLEDGE
+
+

DESCRIPTION

+
       Encoded attribute modifier value used on SubnSet(SMInfo) SMPs.
+
+

SOURCE

+
#define IB_SMINFO_ATTR_MOD_ACKNOWLEDGE          (CL_NTOH32(0x000002))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_ATTR_MOD_DISABLE

+ +

[top][index]

+

NAME

+
       IB_SMINFO_ATTR_MOD_DISABLE
+
+

DESCRIPTION

+
       Encoded attribute modifier value used on SubnSet(SMInfo) SMPs.
+
+

SOURCE

+
#define IB_SMINFO_ATTR_MOD_DISABLE                      (CL_NTOH32(0x000003))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_ATTR_MOD_DISCOVER

+ +

[top][index]

+

NAME

+
       IB_SMINFO_ATTR_MOD_DISCOVER
+
+

DESCRIPTION

+
       Encoded attribute modifier value used on SubnSet(SMInfo) SMPs.
+
+

SOURCE

+
#define IB_SMINFO_ATTR_MOD_DISCOVER                     (CL_NTOH32(0x000005))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_ATTR_MOD_HANDOVER

+ +

[top][index]

+

NAME

+
       IB_SMINFO_ATTR_MOD_HANDOVER
+
+

DESCRIPTION

+
       Encoded attribute modifier value used on SubnSet(SMInfo) SMPs.
+
+

SOURCE

+
#define IB_SMINFO_ATTR_MOD_HANDOVER             (CL_NTOH32(0x000001))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_ATTR_MOD_STANDBY

+ +

[top][index]

+

NAME

+
       IB_SMINFO_ATTR_MOD_STANDBY
+
+

DESCRIPTION

+
       Encoded attribute modifier value used on SubnSet(SMInfo) SMPs.
+
+

SOURCE

+
#define IB_SMINFO_ATTR_MOD_STANDBY                      (CL_NTOH32(0x000004))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_STATE_DISCOVERING

+ +

[top][index]

+

NAME

+
       IB_SMINFO_STATE_DISCOVERING
+
+

DESCRIPTION

+
       Encoded state value used in the SMInfo attribute.
+
+

SOURCE

+
#define IB_SMINFO_STATE_DISCOVERING                     1
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_STATE_INIT

+ +

[top][index]

+

NAME

+
       IB_SMINFO_STATE_INIT
+
+

DESCRIPTION

+
       Encoded state value used in the SMInfo attribute.
+
+

SOURCE

+
#define IB_SMINFO_STATE_INIT                                    4
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_STATE_MASTER

+ +

[top][index]

+

NAME

+
       IB_SMINFO_STATE_MASTER
+
+

DESCRIPTION

+
       Encoded state value used in the SMInfo attribute.
+
+

SOURCE

+
#define IB_SMINFO_STATE_MASTER                          3
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_STATE_NOTACTIVE

+ +

[top][index]

+

NAME

+
       IB_SMINFO_STATE_NOTACTIVE
+
+

DESCRIPTION

+
       Encoded state value used in the SMInfo attribute.
+
+

SOURCE

+
#define IB_SMINFO_STATE_NOTACTIVE                       0
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMINFO_STATE_STANDBY

+ +

[top][index]

+

NAME

+
       IB_SMINFO_STATE_STANDBY
+
+

DESCRIPTION

+
       Encoded state value used in the SMInfo attribute.
+
+

SOURCE

+
#define IB_SMINFO_STATE_STANDBY                         2
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMP_DIRECTION

+ +

[top][index]

+

NAME

+
       IB_SMP_DIRECTION
+
+

DESCRIPTION

+
       The Direction bit for directed route SMPs.
+
+

SOURCE

+
#define IB_SMP_DIRECTION_HO             0x8000
+#define IB_SMP_DIRECTION                (CL_HTON16(IB_SMP_DIRECTION_HO))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SMP_STATUS_MASK

+ +

[top][index]

+

NAME

+
       IB_SMP_STATUS_MASK
+
+

DESCRIPTION

+
       Mask value for extracting status from a directed route SMP.
+
+

SOURCE

+
#define IB_SMP_STATUS_MASK_HO           0x7FFF
+#define IB_SMP_STATUS_MASK              (CL_HTON16(IB_SMP_STATUS_MASK_HO))
+
+
+
+ +

[Definitions] +IBA Base: Constants/IB_SUBNET_PATH_HOPS_MAX

+ +

[top][index]

+

NAME

+
       IB_SUBNET_PATH_HOPS_MAX
+
+

DESCRIPTION

+
       Maximum number of directed route switch hops in a subnet (14.2.1.2).
+
+

SOURCE

+
#define IB_SUBNET_PATH_HOPS_MAX                         64
+
+
+
+ +

[Definitions] +IBA Base: Constants/Join States

+ +

[top][index]

+

NAME

+
       Join States
+
+

DESCRIPTION

+
       Defines the join state flags for multicast group management.
+
+

SOURCE

+
#define IB_JOIN_STATE_FULL                      1
+#define IB_JOIN_STATE_NON                       2
+#define IB_JOIN_STATE_SEND_ONLY         4
+
+
+
+ +

[Definitions] +IBA Base: Constants/MAD_BLOCK_GRH_SIZE

+ +

[top][index]

+

NAME

+
       MAD_BLOCK_GRH_SIZE
+
+

DESCRIPTION

+
       Size of a MAD datagram, including the GRH.
+
+

SOURCE

+
#define MAD_BLOCK_GRH_SIZE                                      296
+
+
+
+ +

[Definitions] +IBA Base: Constants/MAD_BLOCK_SIZE

+ +

[top][index]

+

NAME

+
       MAD_BLOCK_SIZE
+
+

DESCRIPTION

+
       Size of a non-RMPP MAD datagram.
+
+

SOURCE

+
#define MAD_BLOCK_SIZE                                          256
+
+
+
+ +

[Definitions] +IBA Base: Constants/MAD_RMPP_DATA_SIZE

+ +

[top][index]

+

NAME

+
       MAD_RMPP_DATA_SIZE
+
+

DESCRIPTION

+
       Size of an RMPP transaction data section.
+
+

SOURCE

+
#define MAD_RMPP_DATA_SIZE              (MAD_BLOCK_SIZE - MAD_RMPP_HDR_SIZE)
+
+
+
+ +

[Definitions] +IBA Base: Constants/MAD_RMPP_HDR_SIZE

+ +

[top][index]

+

NAME

+
       MAD_RMPP_HDR_SIZE
+
+

DESCRIPTION

+
       Size of an RMPP header, including the common MAD header.
+
+

SOURCE

+
#define MAD_RMPP_HDR_SIZE                                       36
+
+
+
+ +

[Definitions] +IBA Base: Types/DM_SVC_NAME

+ +

[top][index]

+

NAME

+
       DM_SVC_NAME
+
+

DESCRIPTION

+
       IBA defined Device Management service name (16.3)
+
+

SYNOPSIS

+
#define DM_SVC_NAME                             "DeviceManager.IBTA"
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_class_is_rmpp

+ +

[top][index]

+

NAME

+
       ib_class_is_rmpp
+
+

DESCRIPTION

+
       Indicates if the Class Code supports RMPP
+
+

SYNOPSIS

+
AL_INLINE boolean_t     AL_API
+ib_class_is_rmpp(
+        IN              const   uint8_t class_code )
+{
+        return( (class_code == IB_MCLASS_SUBN_ADM) ||
+                (class_code == IB_MCLASS_DEV_MGMT) ||
+                (class_code == IB_MCLASS_DEV_ADM) ||
+                (class_code == IB_MCLASS_BIS) ||
+                ib_class_is_vendor_specific_high( class_code ) ); 
+}
+
+

PARAMETERS

+
       class_code
+               [in] The Management Datagram Class Code
+
+

RETURN VALUE

+
       TRUE if the class supports RMPP
+       FALSE otherwise.
+
+

NOTES

+
+
+
+
+ +

[Functions] +IBA Base: Types/ib_class_is_vendor_specific

+ +

[top][index]

+

NAME

+
       ib_class_is_vendor_specific
+
+

DESCRIPTION

+
       Indicates if the Class Code if a vendor specific class
+
+

SYNOPSIS

+
static inline boolean_t
+ib_class_is_vendor_specific(
+        IN              const   uint8_t class_code )
+{
+  return( ib_class_is_vendor_specific_low(class_code) ||
+                         ib_class_is_vendor_specific_high(class_code) );
+}
+
+

PARAMETERS

+
       class_code
+               [in] The Management Datagram Class Code
+
+

RETURN VALUE

+
       TRUE if the class is a Vendor Specific MAD
+       FALSE otherwise.
+
+

NOTES

+
+
+

SEE ALSO

+
  ib_class_is_vendor_specific_low, ib_class_is_vendor_specific_high
+
+
+
+ +

[Functions] +IBA Base: Types/ib_class_is_vendor_specific_high

+ +

[top][index]

+

NAME

+
       ib_class_is_vendor_specific_high
+
+

DESCRIPTION

+
       Indicates if the Class Code if a vendor specific class from 
+  the high range
+
+

SYNOPSIS

+
static inline boolean_t
+ib_class_is_vendor_specific_high(
+        IN              const   uint8_t class_code )
+{
+        return( (class_code >= IB_MCLASS_VENDOR_HIGH_RANGE_MIN) &&
+           (class_code <= IB_MCLASS_VENDOR_HIGH_RANGE_MAX)) ;
+}
+
+

PARAMETERS

+
       class_code
+               [in] The Management Datagram Class Code
+
+

RETURN VALUE

+
       TRUE if the class is in the High range of Vendor Specific MADs 
+       FALSE otherwise.
+
+

NOTES

+
+
+

SEE ALSO

+
 IB_MCLASS_VENDOR_HIGH_RANGE_MIN, IB_MCLASS_VENDOR_HIGH_RANGE_MAX
+
+
+
+ +

[Functions] +IBA Base: Types/ib_class_is_vendor_specific_low

+ +

[top][index]

+

NAME

+
       ib_class_is_vendor_specific_low
+
+

DESCRIPTION

+
       Indicates if the Class Code if a vendor specific class from 
+  the low range
+
+

SYNOPSIS

+
static inline boolean_t
+ib_class_is_vendor_specific_low(
+        IN              const   uint8_t class_code )
+{
+        return( (class_code >= IB_MCLASS_VENDOR_LOW_RANGE_MIN) &&
+           (class_code <= IB_MCLASS_VENDOR_LOW_RANGE_MAX)) ;
+}
+
+

PARAMETERS

+
       class_code
+               [in] The Management Datagram Class Code
+
+

RETURN VALUE

+
       TRUE if the class is in the Low range of Vendor Specific MADs 
+       FALSE otherwise.
+
+

NOTES

+
+
+

SEE ALSO

+
 IB_MCLASS_VENDOR_LOW_RANGE_MIN, IB_MCLASS_VENDOR_LOW_RANGE_MAX
+
+
+
+ +

[Structures] +IBA Base: Types/ib_class_port_info_t

+ +

[top][index]

+

NAME

+
       ib_class_port_info_t
+
+

DESCRIPTION

+
       IBA defined ClassPortInfo attribute (13.4.8.1)
+       route between two end-points on a subnet.
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_class_port_info
+{
+        uint8_t                                 base_ver;
+        uint8_t                                 class_ver;
+        ib_net16_t                              cap_mask;
+        ib_net32_t                              resp_time_val;
+        ib_gid_t                                redir_gid;
+        ib_net32_t                              redir_tc_sl_fl;
+        ib_net16_t                              redir_lid;
+        ib_net16_t                              redir_pkey;
+        ib_net32_t                              redir_qp;
+        ib_net32_t                              redir_qkey;
+        ib_gid_t                                trap_gid;
+        ib_net32_t                              trap_tc_sl_fl;
+        ib_net16_t                              trap_lid;
+        ib_net16_t                              trap_pkey;
+        ib_net32_t                              trap_hop_qp;
+        ib_net32_t                              trap_qkey;
+
+}       PACK_SUFFIX ib_class_port_info_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       base_ver
+               Maximum supported MAD Base Version.
+
+       class_ver
+               Maximum supported management class version.
+
+       cap_mask
+               Supported capabilities of this management class.
+
+       resp_time_value
+               Maximum expected response time.
+
+       redr_gid
+               GID to use for redirection, or zero
+
+       recdir_tc_sl_fl
+               Traffic class, service level and flow label the requester
+               should use if the service is redirected.
+
+       redir_lid
+               LID used for redirection, or zero
+
+       redir_pkey
+               P_Key used for redirection
+
+       redir_qp
+               QP number used for redirection
+
+       redir_qkey
+               Q_Key associated with the redirected QP.  This shall be the
+               well known Q_Key value.
+
+       trap_gid
+               GID value used for trap messages from this service.
+
+       trap_tc_sl_fl
+               Traffic class, service level and flow label used for
+               trap messages originated by this service.
+
+       trap_lid
+               LID used for trap messages, or zero
+
+       trap_pkey
+               P_Key used for trap messages
+
+       trap_hop_qp
+               Hop limit (upper 8 bits) and QP number used for trap messages
+
+       trap_qkey
+               Q_Key associated with the trap messages QP.
+
+

SEE ALSO

+
       IB_CLASS_CAP_GETSET, IB_CLASS_CAP_TRAP
+
+
+
+ +

[Structures] +IBA Base: Types/ib_dm_mad_t

+ +

[top][index]

+

NAME

+
       ib_dm_mad_t
+
+

DESCRIPTION

+
       IBA defined Device Management MAD (16.3.1)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_dm_mad
+{
+        ib_mad_t                hdr;
+        uint8_t                 resv[40];
+
+#define IB_DM_DATA_SIZE                 192
+        uint8_t                 data[IB_DM_DATA_SIZE];
+
+}       PACK_SUFFIX ib_dm_mad_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       hdr
+               Common MAD header.
+
+       resv
+               Reserved.
+
+       data
+               Device Management payload.  The structure and content of this field
+               depend upon the method, attr_id, and attr_mod fields in the header.
+
+

SEE ALSO

+
 ib_mad_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_field32_t

+ +

[top][index]

+

NAME

+
       ib_field32_t
+
+

DESCRIPTION

+
       Represents a 32-bit field, and allows access as a 32-bit network byte
+       ordered or a 4-byte array.
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef union _ib_field32_t
+{
+        net32_t         val;
+        uint8_t         bytes[4];
+
+}       PACK_SUFFIX ib_field32_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       val
+               Full field value.
+
+       bytes
+               Byte array representing the field.  The byte array provides identical
+               access independently from CPU byte-ordering.
+
+
+
+ +

[Functions] +IBA Base: Types/ib_get_async_event_str

+ +

[top][index]

+

NAME

+
       ib_get_async_event_str
+
+

DESCRIPTION

+
       Returns a string for the specified asynchronous event.
+
+

SYNOPSIS

+
AL_EXPORT const char* AL_API
+ib_get_async_event_str(
+        IN                              ib_async_event_t                        event );
+
+

PARAMETERS

+
       event
+               [in] event value
+
+ RETURN VALUES
+       Pointer to the asynchronous event description string.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_get_err_str

+ +

[top][index]

+

NAME

+
       ib_get_err_str
+
+

DESCRIPTION

+
       Returns a string for the specified status value.
+
+

SYNOPSIS

+
AL_EXPORT const char* AL_API
+ib_get_err_str(
+        IN                              ib_api_status_t                         status );
+
+

PARAMETERS

+
       status
+               [in] status value
+
+ RETURN VALUES
+       Pointer to the status description string.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_get_node_type_str

+ +

[top][index]

+

NAME

+
       ib_get_node_type_str
+
+

DESCRIPTION

+
       Returns a string for the specified node type.
+
+

SYNOPSIS

+
AL_INLINE const char* AL_API
+ib_get_node_type_str(
+        IN                              uint8_t                                         node_type )
+{
+        if( node_type >= IB_NODE_TYPE_ROUTER )
+                node_type = 0;
+        return( __ib_node_type_str[node_type] );
+}
+
+

PARAMETERS

+
       node_type
+               [in] Encoded node type as returned in the NodeInfo attribute.
+ RETURN VALUES
+       Pointer to the node type string.
+
+

NOTES

+
+
+

SEE ALSO

+
 ib_node_info_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_get_port_state_from_str

+ +

[top][index]

+

NAME

+
       ib_get_port_state_from_str
+
+

DESCRIPTION

+
       Returns a string for the specified port state.
+
+

SYNOPSIS

+
AL_INLINE const uint8_t AL_API
+ib_get_port_state_from_str(
+        IN                              char*                                           p_port_state_str )
+{
+        if( !strncmp(p_port_state_str,"No State Change (NOP)",12) )
+                return(0);
+        else if( !strncmp(p_port_state_str, "DOWN",4) )
+                return(1);
+        else if( !strncmp(p_port_state_str, "INIT", 4) )
+                return(2);
+        else if( !strncmp(p_port_state_str,"ARMED", 5) )
+                return(3);
+        else if( !strncmp(p_port_state_str, "ACTIVE", 6) )
+                return(4);
+        else if( !strncmp(p_port_state_str, "ACTDEFER", 8) )
+                return(5);
+        return(6);
+}
+
+

PARAMETERS

+
       p_port_state_str
+               [in] A string matching one returned by ib_get_port_state_str
+
+ RETURN VALUES
+       The appropriate code.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_port_info_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_get_port_state_str

+ +

[top][index]

+

NAME

+
       ib_get_port_state_str
+
+

DESCRIPTION

+
       Returns a string for the specified port state.
+
+

SYNOPSIS

+
AL_INLINE const char* AL_API
+ib_get_port_state_str(
+        IN                              uint8_t                                         port_state )
+{
+        if( port_state > IB_LINK_ACTIVE )
+                port_state = IB_LINK_ACTIVE + 1;
+        return( __ib_port_state_str[port_state] );
+}
+
+

PARAMETERS

+
       port_state
+               [in] Encoded port state as returned in the PortInfo attribute.
+ RETURN VALUES
+       Pointer to the port state string.
+
+

NOTES

+
+
+

SEE ALSO

+
 ib_port_info_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_get_qp_type_str

+ +

[top][index]

+

NAME

+
       ib_get_qp_type_str
+
+

DESCRIPTION

+
       Returns a string for the specified QP type
+
+

SYNOPSIS

+
AL_EXPORT const char* AL_API
+ib_get_qp_type_str(
+        IN                              uint8_t                                         qp_type );
+
+

PARAMETERS

+
       qp_type
+               [in] Encoded QP type as defined in the
+ RETURN VALUES
+       Pointer to the QP type string.
+
+

NOTES

+
+
+

SEE ALSO

+
 ib_qp_type_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_get_wc_status_str

+ +

[top][index]

+

NAME

+
       ib_get_wc_status_str
+
+

DESCRIPTION

+
       Returns a string for the specified work completion status.
+
+

SYNOPSIS

+
AL_EXPORT const char* AL_API
+ib_get_wc_status_str(
+        IN                              ib_wc_status_t                          wc_status );
+
+

PARAMETERS

+
       wc_status
+               [in] work completion status value
+
+ RETURN VALUES
+       Pointer to the work completion status description string.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_get_wc_type_str

+ +

[top][index]

+

NAME

+
       ib_get_wc_type_str
+
+

DESCRIPTION

+
       Returns a string for the specified work completion type.
+
+

SYNOPSIS

+
AL_EXPORT const char* AL_API
+ib_get_wc_type_str(
+        IN                              ib_wc_type_t                            wc_type );
+
+

PARAMETERS

+
       wc_type
+               [in] work completion type value
+
+ RETURN VALUES
+       Pointer to the work completion type description string.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_get_wr_type_str

+ +

[top][index]

+

NAME

+
       ib_get_wr_type_str
+
+

DESCRIPTION

+
       Returns a string for the specified work request type
+
+

SYNOPSIS

+
AL_EXPORT const char* AL_API
+ib_get_wr_type_str(
+        IN                              uint8_t                                         wr_type );
+
+

PARAMETERS

+
       wr_type
+               [in] Encoded work request type as defined in the
+ RETURN VALUES
+       Pointer to the work request type string.
+
+

NOTES

+
+
+

SEE ALSO

+
 ib_wr_type_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_gid_get_guid

+ +

[top][index]

+

NAME

+
       ib_gid_get_guid
+
+

DESCRIPTION

+
       Gets the guid from a GID.
+
+

SYNOPSIS

+
AL_INLINE ib_net64_t AL_API
+ib_gid_get_guid(
+        IN              const   ib_gid_t* const                         p_gid )
+{
+        return( p_gid->unicast.interface_id );
+}
+
+

PARAMETERS

+
       p_gid
+               [in] Pointer to the GID object.
+
+ RETURN VALUES
+       64-bit GUID value.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_gid_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_gid_get_subnet_prefix

+ +

[top][index]

+

NAME

+
       ib_gid_get_subnet_prefix
+
+

DESCRIPTION

+
       Gets the subnet prefix from a GID.
+
+

SYNOPSIS

+
AL_INLINE ib_net64_t AL_API
+ib_gid_get_subnet_prefix(
+        IN              const   ib_gid_t* const                         p_gid )
+{
+        return( p_gid->unicast.prefix );
+}
+
+

PARAMETERS

+
       p_gid
+               [in] Pointer to the GID object.
+
+ RETURN VALUES
+       64-bit subnet prefix value.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_gid_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_gid_is_link_local

+ +

[top][index]

+

NAME

+
       ib_gid_is_link_local
+
+

DESCRIPTION

+
       Returns TRUE if the unicast GID scoping indicates link local,
+       FALSE otherwise.
+
+

SYNOPSIS

+
static inline boolean_t
+ib_gid_is_link_local(
+        IN              const   ib_gid_t* const                         p_gid )
+{
+        return( ib_gid_get_subnet_prefix( p_gid ) == IB_DEFAULT_SUBNET_PREFIX );
+}
+
+

PARAMETERS

+
       p_gid
+               [in] Pointer to the GID object.
+
+ RETURN VALUES
+       Returns TRUE if the unicast GID scoping indicates link local,
+       FALSE otherwise.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_gid_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_gid_is_site_local

+ +

[top][index]

+

NAME

+
       ib_gid_is_site_local
+
+

DESCRIPTION

+
       Returns TRUE if the unicast GID scoping indicates site local,
+       FALSE otherwise.
+
+

SYNOPSIS

+
static inline boolean_t
+ib_gid_is_site_local(
+        IN              const   ib_gid_t* const                         p_gid )
+{
+        return( ( ib_gid_get_subnet_prefix( p_gid ) &
+                CL_HTON64( 0xFFFFFFFFFFFF0000ULL ) ) == CL_HTON64( 0xFEC0000000000000ULL ) );
+}
+
+

PARAMETERS

+
       p_gid
+               [in] Pointer to the GID object.
+
+ RETURN VALUES
+       Returns TRUE if the unicast GID scoping indicates link local,
+       FALSE otherwise.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_gid_t
+
+
+
+ +

[Definitions] +IBA Base: Types/ib_gid_prefix_t

+ +

[top][index]

+

NAME

+
       ib_gid_prefix_t
+
+

DESCRIPTION

+
+
+

SOURCE

+
typedef ib_net64_t              ib_gid_prefix_t;
+
+
+
+ +

[Functions] +IBA Base: Types/ib_gid_set_default

+ +

[top][index]

+

NAME

+
       ib_gid_set_default
+
+

DESCRIPTION

+
       Sets a GID to the default value.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_gid_set_default(
+        IN                              ib_gid_t* const                         p_gid,
+        IN              const   ib_net64_t                                      interface_id )
+{
+        p_gid->unicast.prefix = IB_DEFAULT_SUBNET_PREFIX;
+        p_gid->unicast.interface_id = interface_id;
+}
+
+

PARAMETERS

+
       p_gid
+               [in] Pointer to the GID object.
+
+       interface_id
+               [in] Manufacturer assigned EUI64 value of a port.
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_gid_t
+
+
+
+ +

[Definitions] +IBA Base: Types/ib_gid_t

+ +

[top][index]

+

NAME

+
       ib_gid_t
+
+

DESCRIPTION

+
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef union _ib_gid
+{
+        uint8_t                                 raw[16];
+        struct _ib_gid_unicast
+        {
+                ib_gid_prefix_t         prefix;
+                ib_net64_t                      interface_id;
+
+        } PACK_SUFFIX unicast;
+
+        struct _ib_gid_multicast
+        {
+                uint8_t                         header[2];
+                uint8_t                         raw_group_id[14];
+
+        } PACK_SUFFIX multicast;
+
+}       PACK_SUFFIX ib_gid_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       raw
+               GID represented as an unformated byte array.
+
+       unicast
+               Typical unicast representation with subnet prefix and
+               port GUID.
+
+       multicast
+               Representation for multicast use.
+
+

SEE ALSO

+ +
+ +

[Structures] +IBA Base: Types/ib_gmp_t

+ +

[top][index]

+

NAME

+
       ib_gmp_t
+
+

DESCRIPTION

+
       IBA defined GMP MAD format. (16.1.1)
+
+

SYNOPSIS

+
#define IB_GMP_DATA_SIZE 200
+
+#include <complib/cl_packon.h>
+typedef struct _ib_gmp
+{
+        uint8_t                                 base_ver;
+        uint8_t                                 mgmt_class;
+        uint8_t                                 class_ver;
+        uint8_t                                 method;
+        ib_net16_t                              status;
+        ib_net16_t                              resv;
+        ib_net64_t                              trans_id;
+        ib_net16_t                              attr_id;
+        ib_net16_t                              resv1;
+        ib_net32_t                              attr_mod;
+        uint8_t                                 resv2[40];
+        uint8_t                                 data[IB_GMP_DATA_SIZE];
+
+}       PACK_SUFFIX ib_gmp_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_grh_get_ver_class_flow

+ +

[top][index]

+

NAME

+
       ib_grh_get_ver_class_flow
+
+

DESCRIPTION

+
       Get encoded version, traffic class and flow label in grh
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_grh_get_ver_class_flow(
+        IN              const   ib_net32_t                                      ver_class_flow,
+                OUT                     uint8_t* const                          p_ver OPTIONAL,
+                OUT                     uint8_t* const                          p_tclass OPTIONAL,
+                OUT                     net32_t* const                          p_flow_lbl OPTIONAL )
+{
+        ib_net32_t tmp_ver_class_flow;
+
+        tmp_ver_class_flow = cl_ntoh32( ver_class_flow );
+
+        if (p_ver)
+                *p_ver = (uint8_t)(tmp_ver_class_flow >> 28);
+
+        if (p_tclass)
+                *p_tclass = (uint8_t)(tmp_ver_class_flow >> 20);
+
+        if (p_flow_lbl)
+                *p_flow_lbl = (ver_class_flow & CL_HTON32( 0x000FFFFF ));
+}
+
+

PARAMETERS

+
       ver_class_flow
+               [in] the version, traffic class and flow label info.
+
+ RETURN VALUES
+       p_ver
+               [out] pointer to the version info.
+
+       p_tclass
+               [out] pointer to the traffic class info.
+
+       p_flow_lbl
+               [out] pointer to the flow label info
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_grh_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_grh_set_ver_class_flow

+ +

[top][index]

+

NAME

+
       ib_grh_set_ver_class_flow
+
+

DESCRIPTION

+
       Set encoded version, traffic class and flow label in grh
+
+

SYNOPSIS

+
AL_INLINE ib_net32_t AL_API
+ib_grh_set_ver_class_flow(
+        IN              const   uint8_t                                         ver,
+        IN              const   uint8_t                                         tclass,
+        IN              const   net32_t                                         flow_lbl )
+{
+        ib_net32_t              ver_class_flow;
+
+        ver_class_flow = cl_hton32( (ver << 28) | (tclass << 20) );
+        ver_class_flow |= (flow_lbl & CL_HTON32( 0x000FFFFF ));
+        return (ver_class_flow);
+}
+
+

PARAMETERS

+
       ver
+               [in] the version info.
+
+       tclass
+               [in] the traffic class info.
+
+       flow_lbl
+               [in] the flow label info
+
+ RETURN VALUES
+       ver_class_flow
+               [out] the version, traffic class and flow label info.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_grh_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_grh_t

+ +

[top][index]

+

NAME

+
       ib_grh_t
+
+

DESCRIPTION

+
       Global route header information received with unreliable datagram messages
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_grh
+{
+        ib_net32_t                              ver_class_flow;
+        ib_net16_t                              resv1;
+        uint8_t                                 resv2;
+        uint8_t                                 hop_limit;
+        ib_gid_t                                src_gid;
+        ib_gid_t                                dest_gid;
+
+}       PACK_SUFFIX ib_grh_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Structures] +IBA Base: Types/ib_guid_info_t

+ +

[top][index]

+

NAME

+
       ib_guid_info_t
+
+

DESCRIPTION

+
       IBA defined GuidInfo. (14.2.5.5)
+
+

SYNOPSIS

+
#define GUID_TABLE_MAX_ENTRIES          8
+
+#include <complib/cl_packon.h>
+typedef struct _ib_guid_info
+{
+        ib_net64_t                      guid[GUID_TABLE_MAX_ENTRIES];
+
+}       PACK_SUFFIX ib_guid_info_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_get_dev_id

+ +

[top][index]

+

NAME

+
       ib_inform_get_dev_id
+
+

DESCRIPTION

+
       Retrieves the device ID from a vendor specific inform trap.
+
+

SYNOPSIS

+
AL_INLINE uint16_t AL_API
+ib_inform_get_dev_id(
+        IN              const   ib_inform_info_t* const         p_inform_info )
+{
+        return ib_inform_get_trap_num( p_inform_info );
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in] Pointer to the inform info structure whose
+               device ID to return.
+
+ RETURN VALUES
+       Returns the vendor ID of the inform info, in host byte order.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_set_dev_id
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_get_prod_type

+ +

[top][index]

+

NAME

+
       ib_inform_get_prod_type
+
+

DESCRIPTION

+
       Retrieves the producer type from an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE uint32_t AL_API
+ib_inform_get_prod_type(
+        IN              const   ib_inform_info_t* const         p_inform_info )
+{
+        return (cl_ntoh32( p_inform_info->combo3 ) >> 8);
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in] Pointer to the inform info structure whose
+               prducer type to return.
+
+ RETURN VALUES
+       Returns the producer type of the infrom info, in host byte order.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_set_prod_type
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_get_qpn

+ +

[top][index]

+

NAME

+
       ib_inform_get_qpn
+
+

DESCRIPTION

+
       Retrieves the QPN from an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE net32_t AL_API
+ib_inform_get_qpn(
+        IN              const   ib_inform_info_t* const         p_inform_info )
+{
+        return (p_inform_info->combo2 & CL_NTOH32( 0x00FFFFFF ));
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in] Pointer to the inform info structure whose
+               QPN to return.
+
+ RETURN VALUES
+       Returns the QPN of the infrom info.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_set_qpn
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_get_resp_time_val

+ +

[top][index]

+

NAME

+
       ib_inform_get_resp_time_val
+
+

DESCRIPTION

+
       Retrieves the response time value from an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_inform_get_resp_time_val(
+        IN              const   ib_inform_info_t* const         p_inform_info )
+{
+        return (uint8_t)(cl_ntoh32( p_inform_info->combo2 ) >> 27);
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in] Pointer to the inform info structure whose
+               response time value to return.
+
+ RETURN VALUES
+       Returns the response time value of the infrom info.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_set_resp_time_val
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_get_trap_num

+ +

[top][index]

+

NAME

+
       ib_inform_get_trap_num
+
+

DESCRIPTION

+
       Retrieves the trap number from an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE uint16_t AL_API
+ib_inform_get_trap_num(
+        IN              const   ib_inform_info_t* const         p_inform_info )
+{
+        return cl_ntoh16( p_inform_info->combo1 );
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in] Pointer to the inform info structure whose
+               trap number to return.
+
+ RETURN VALUES
+       Returns the trap number of the infrom info, in host byte order.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_set_trap_num
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_get_vend_id

+ +

[top][index]

+

NAME

+
       ib_inform_get_vend_id
+
+

DESCRIPTION

+
       Retrieves the vendor ID from an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE uint32_t AL_API
+ib_inform_get_vend_id(
+        IN              const   ib_inform_info_t* const         p_inform_info )
+{
+        return ib_inform_get_prod_type( p_inform_info );
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in] Pointer to the inform info structure whose
+               vendor ID to return.
+
+ RETURN VALUES
+       Returns the vendor ID of the infrom info, in host byte order.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_set_vend_id
+
+
+
+ +

[Structures] +IBA Base: Types/ib_inform_info_record_t

+ +

[top][index]

+

NAME

+
       ib_inform_info_record_t
+
+

DESCRIPTION

+
       IBA defined InformInfo Record. (15.2.5.12)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_inform_info_record
+{
+        ib_gid_t                                subscriber_gid;
+        net16_t                                 subscriber_enum;
+        uint16_t                                reserved[3];
+        ib_inform_info_t                inform_info;
+
+}       PACK_SUFFIX ib_inform_info_record_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_set_dev_id

+ +

[top][index]

+

NAME

+
       ib_inform_set_dev_id
+
+

DESCRIPTION

+
       Sets the producer type of a vendor specific inform trap.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_inform_set_dev_id(
+        IN      OUT                     ib_inform_info_t* const         p_inform_info,
+        IN              const   uint16_t                                        dev_id )
+{
+        ib_inform_set_trap_num( p_inform_info, dev_id );
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in/out] Pointer to the inform info structure
+               whose device ID to set.
+
+       dev_id
+               [in] Device ID of inform trap.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_get_dev_id
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_set_prod_type

+ +

[top][index]

+

NAME

+
       ib_inform_set_prod_type
+
+

DESCRIPTION

+
       Sets the producer type of an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_inform_set_prod_type(
+        IN      OUT                     ib_inform_info_t* const         p_inform_info,
+        IN              const   uint32_t                                        prod_type )
+{
+        p_inform_info->combo3 = cl_hton32( prod_type << 8 );
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in/out] Pointer to the inform info structure
+               whose producer type to set.
+
+       prod_type
+               [in] Producer type of inform trap.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_get_prod_type
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_set_qpn

+ +

[top][index]

+

NAME

+
       ib_inform_set_qpn
+
+

DESCRIPTION

+
       Sets the QPN of an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_inform_set_qpn(
+        IN      OUT                     ib_inform_info_t* const         p_inform_info,
+        IN              const   net32_t                                         qpn )
+{
+        p_inform_info->combo2 &= CL_NTOH32( 0xFF000000 );
+        p_inform_info->combo2 |= (qpn & CL_NTOH32( 0x00FFFFFF ));
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in/out] Pointer to the inform info structure
+               whose QPN to set.
+
+       qpn
+               [in] QPN of the inform info.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_get_qpn
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_set_resp_time_val

+ +

[top][index]

+

NAME

+
       ib_inform_set_resp_time_val
+
+

DESCRIPTION

+
       Sets the response time value of an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_inform_set_resp_time_val(
+        IN      OUT                     ib_inform_info_t* const         p_inform_info,
+        IN              const   uint8_t                                         resp_time_val )
+{
+        uint32_t        val;
+
+        val = cl_ntoh32( p_inform_info->combo2 );
+        val &= 0x07FFFFFF;
+        val |= (resp_time_val << 27);
+        p_inform_info->combo2 = cl_hton32( val );
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in/out] Pointer to the inform info structure
+               whose response time value to set.
+
+       resp_time_val
+               [in] Response time value of the inform info.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_get_resp_time_val
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_set_trap_num

+ +

[top][index]

+

NAME

+
       ib_inform_set_trap_num
+
+

DESCRIPTION

+
       Sets the trap number of an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_inform_set_trap_num(
+        IN      OUT                     ib_inform_info_t* const         p_inform_info,
+        IN              const   uint16_t                                        trap_num )
+{
+        p_inform_info->combo1 = cl_hton16( trap_num );
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in/out] Pointer to the inform info structure
+               whose trap number to set.
+
+       trap_num
+               [in] Trap number to set.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_inform_info_t, ib_inform_get_trap_num
+
+
+
+ +

[Functions] +IBA Base: Types/ib_inform_set_vend_id

+ +

[top][index]

+

NAME

+
       ib_inform_set_vend_id
+
+

DESCRIPTION

+
       Sets the vendor ID of an inform info structure.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_inform_set_vend_id(
+        IN      OUT                     ib_inform_info_t* const         p_inform_info,
+        IN              const   uint32_t                                        vend_id )
+{
+        ib_inform_set_prod_type( p_inform_info, vend_id );
+}
+
+

PARAMETERS

+
       p_inform_info
+               [in/out] Pointer to the inform info structure
+               whose vendor ID to set.
+
+       vend_id
+               [in] Vendor ID of inform trap.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_mad_inform_info_t, ib_inform_get_vend_id
+
+
+
+ +

[Structures] +IBA Base: Types/ib_ioc_profile_t

+ +

[top][index]

+

NAME

+
       ib_ioc_profile_t
+
+

DESCRIPTION

+
       IBA defined IO Controller profile structure (16.3.3.4)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef __declspec(align(8)) struct _ib_ioc_profile
+{
+        ib_net64_t                              ioc_guid;
+
+        ib_net32_t                              vend_id;
+
+        ib_net32_t                              dev_id;
+        ib_net16_t                              dev_ver;
+        ib_net16_t                              resv2;
+
+        ib_net32_t                              subsys_vend_id;
+        ib_net32_t                              subsys_id;
+
+        ib_net16_t                              io_class;
+        ib_net16_t                              io_subclass;
+        ib_net16_t                              protocol;
+        ib_net16_t                              protocol_ver;
+
+        ib_net32_t                              resv3;
+        ib_net16_t                              send_msg_depth;
+        uint8_t                                 resv4;
+        uint8_t                                 rdma_read_depth;
+        ib_net32_t                              send_msg_size;
+        ib_net32_t                              rdma_size;
+
+        uint8_t                                 ctrl_ops_cap;
+#define CTRL_OPS_CAP_ST         0x01
+#define CTRL_OPS_CAP_SF         0x02
+#define CTRL_OPS_CAP_RT         0x04
+#define CTRL_OPS_CAP_RF         0x08
+#define CTRL_OPS_CAP_WT         0x10
+#define CTRL_OPS_CAP_WF         0x20
+#define CTRL_OPS_CAP_AT         0x40
+#define CTRL_OPS_CAP_AF         0x80
+
+        uint8_t                                 resv5;
+
+        uint8_t                                 num_svc_entries;
+#define MAX_NUM_SVC_ENTRIES     0xff
+
+        uint8_t                                 resv6[9];
+
+#define CTRL_ID_STRING_LEN      64
+        char                                    id_string[CTRL_ID_STRING_LEN];
+
+}       PACK_SUFFIX ib_ioc_profile_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       ioc_guid
+               An EUI-64 GUID used to uniquely identify the IO controller.
+
+       vend_id
+               IO controller vendor ID, IEEE format.
+
+       dev_id
+               A number assigned by the vendor to identify the type of controller.
+
+       dev_ver
+               A number assigned by the vendor to identify the divice version.
+
+       subsys_vend_id
+               ID of the vendor of the enclosure, if any, in which the IO controller
+               resides in IEEE format; otherwise zero.
+
+       subsys_id
+               A number identifying the subsystem where the controller resides.
+
+       io_class
+               0x0000 - 0xfffe = reserved for IO classes encompased by InfiniBand
+               Architecture.  0xffff = Vendor specific.
+
+       io_subclass
+               0x0000 - 0xfffe = reserved for IO subclasses encompased by InfiniBand
+               Architecture.  0xffff = Vendor specific.  This shall be set to 0xfff
+               if the io_class component is 0xffff.
+
+       protocol
+               0x0000 - 0xfffe = reserved for IO subclasses encompased by InfiniBand
+               Architecture.  0xffff = Vendor specific.  This shall be set to 0xfff
+               if the io_class component is 0xffff.
+
+       protocol_ver
+               Protocol specific.
+
+       send_msg_depth
+               Maximum depth of the send message queue.
+
+       rdma_read_depth
+               Maximum depth of the per-channel RDMA read queue.
+
+       send_msg_size
+               Maximum size of send messages.
+
+       ctrl_ops_cap
+               Supported operation types of this IO controller.  A bit set to one
+               for affirmation of supported capability.
+
+       num_svc_entries
+               Number of entries in the service entries table.
+
+       id_string
+               UTF-8 encoded string for identifying the controller to an operator.
+
+

SEE ALSO

+
 ib_dm_mad_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_iou_info_diag_dev_id

+ +

[top][index]

+

NAME

+
       ib_iou_info_diag_dev_id
+
+

DESCRIPTION

+
       Returns the DiagDeviceID.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_iou_info_diag_dev_id(
+        IN              const   ib_iou_info_t* const            p_iou_info )
+{
+        return( (uint8_t)(p_iou_info->diag_rom >> 6 & 1) );
+}
+
+

PARAMETERS

+
       p_iou_info
+               [in] Pointer to the IO Unit information structure.
+
+ RETURN VALUES
+       DiagDeviceID field of the IO Unit information.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_iou_info_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_iou_info_option_rom

+ +

[top][index]

+

NAME

+
       ib_iou_info_option_rom
+
+

DESCRIPTION

+
       Returns the OptionROM.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_iou_info_option_rom(
+        IN              const   ib_iou_info_t*  const   p_iou_info )
+{
+        return( (uint8_t)(p_iou_info->diag_rom >> 7) );
+}
+
+

PARAMETERS

+
       p_iou_info
+               [in] Pointer to the IO Unit information structure.
+
+ RETURN VALUES
+       OptionROM field of the IO Unit information.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_iou_info_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_iou_info_t

+ +

[top][index]

+

NAME

+
       ib_iou_info_t
+
+

DESCRIPTION

+
       IBA defined IO Unit information structure (16.3.3.3)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_iou_info
+{
+        ib_net16_t              change_id;
+        uint8_t                 max_controllers;
+        uint8_t                 diag_rom;
+
+#define IB_DM_CTRL_LIST_SIZE    128
+#define IB_DM_MAX_CTRL                  0xFF;
+
+        uint8_t                 controller_list[IB_DM_CTRL_LIST_SIZE];
+#define IOC_NOT_INSTALLED               0x0
+#define IOC_INSTALLED                   0x1
+//              Reserved values                         0x02-0xE
+#define SLOT_DOES_NOT_EXIST             0xF
+
+}       PACK_SUFFIX ib_iou_info_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       change_id
+               Value incremented, with rollover, by any change to the controller_list.
+
+       max_controllers
+               Number of slots in controller_list.
+
+       diag_rom
+               A byte containing two fields: DiagDeviceID and OptionROM.
+               These fields may be read using the ib_iou_info_diag_dev_id
+               and ib_iou_info_option_rom functions.
+
+       controller_list
+               A series of 4-bit nibbles, with each nibble representing a slot
+               in the IO Unit.  Individual nibbles may be read using the
+               ioc_at_slot function.
+
+

SEE ALSO

+
 ib_dm_mad_t, ib_iou_info_diag_dev_id, ib_iou_info_option_rom, ioc_at_slot
+
+
+
+ +

[Structures] +IBA Base: Types/ib_lft_record_t

+ +

[top][index]

+

NAME

+
       ib_lft_record_t
+
+

DESCRIPTION

+
       IBA defined LinearForwardingTable. (14.2.5.6)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_lft_record
+{
+        ib_net16_t              lid;
+        ib_net16_t              block_num;
+        uint32_t                resv0;
+        uint8_t                 lft[64];
+
+}       PACK_SUFFIX ib_lft_record_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_mad_init_new

+ +

[top][index]

+

NAME

+
       ib_mad_init_new
+
+

DESCRIPTION

+
       Initializes a MAD common header.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_mad_init_new(
+        IN                              ib_mad_t* const                         p_mad,
+        IN              const   uint8_t                                         mgmt_class,
+        IN              const   uint8_t                                         class_ver,
+        IN              const   uint8_t                                         method,
+        IN              const   ib_net64_t                                      trans_id,
+        IN              const   ib_net16_t                                      attr_id,
+        IN              const   ib_net32_t                                      attr_mod )
+{
+        CL_ASSERT( p_mad );
+        p_mad->base_ver = 1;
+        p_mad->mgmt_class = mgmt_class;
+        p_mad->class_ver = class_ver;
+        p_mad->method = method;
+        p_mad->status = 0;
+        p_mad->class_spec = 0;
+        p_mad->trans_id = trans_id;
+        p_mad->attr_id = attr_id;
+        p_mad->resv = 0;
+        p_mad->attr_mod = attr_mod;
+}
+
+

PARAMETERS

+
       p_mad
+               [in] Pointer to the MAD common header.
+
+       mgmt_class
+               [in] Class of operation.
+
+       class_ver
+               [in] Version of MAD class-specific format.
+
+       method
+               [in] Method to perform, including 'R' bit.
+
+       trans_Id
+               [in] Transaction ID.
+
+       attr_id
+               [in] Attribute ID.
+
+       attr_mod
+               [in] Attribute modifier.
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_mad_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_mad_init_response

+ +

[top][index]

+

NAME

+
       ib_mad_init_response
+
+

DESCRIPTION

+
       Initializes a MAD common header as a response.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_mad_init_response(
+        IN              const   ib_mad_t* const                         p_req_mad,
+        IN                              ib_mad_t* const                         p_mad,
+        IN              const   ib_net16_t                                      status )
+{
+        CL_ASSERT( p_req_mad );
+        CL_ASSERT( p_mad );
+        *p_mad = *p_req_mad;
+        p_mad->status = status;
+        if( p_mad->method == IB_MAD_METHOD_SET )
+                p_mad->method = IB_MAD_METHOD_GET;
+        p_mad->method |= IB_MAD_METHOD_RESP_MASK;
+}
+
+

PARAMETERS

+
       p_req_mad
+               [in] Pointer to the MAD common header in the original request MAD.
+
+       p_mad
+               [in] Pointer to the MAD common header to initialize.
+
+       status
+               [in] MAD Status value to return;
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
       p_req_mad and p_mad may point to the same MAD.
+
+

SEE ALSO

+
       ib_mad_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_mad_is_response

+ +

[top][index]

+

NAME

+
       ib_mad_is_response
+
+

DESCRIPTION

+
       Returns TRUE if the MAD is a response ('R' bit set),
+       FALSE otherwise.
+
+

SYNOPSIS

+
AL_INLINE boolean_t AL_API
+ib_mad_is_response(
+        IN              const   ib_mad_t* const                         p_mad )
+{
+        CL_ASSERT( p_mad );
+        return( (p_mad->method & IB_MAD_METHOD_RESP_MASK) ==
+                        IB_MAD_METHOD_RESP_MASK );
+}
+
+

PARAMETERS

+
       p_mad
+               [in] Pointer to the MAD.
+
+ RETURN VALUES
+       Returns TRUE if the MAD is a response ('R' bit set),
+       FALSE otherwise.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_mad_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_mad_t

+ +

[top][index]

+

NAME

+
       ib_mad_t
+
+

DESCRIPTION

+
       IBA defined MAD header (13.4.3)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_mad
+{
+        uint8_t                                 base_ver;
+        uint8_t                                 mgmt_class;
+        uint8_t                                 class_ver;
+        uint8_t                                 method;
+        ib_net16_t                              status;
+        ib_net16_t                              class_spec;
+        ib_net64_t                              trans_id;
+        ib_net16_t                              attr_id;
+        ib_net16_t                              resv;
+        ib_net32_t                              attr_mod;
+
+}       PACK_SUFFIX ib_mad_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       base_ver
+               MAD base format.
+
+       mgmt_class
+               Class of operation.
+
+       class_ver
+               Version of MAD class-specific format.
+
+       method
+               Method to perform, including 'R' bit.
+
+       status
+               Status of operation.
+
+       class_spec
+               Reserved for subnet management.
+
+       trans_id
+               Transaction ID.
+
+       attr_id
+               Attribute ID.
+
+       resv
+               Reserved field.
+
+       attr_mod
+               Attribute modifier.
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_member_get_scope

+ +

[top][index]

+

NAME

+
       ib_member_get_scope
+
+

DESCRIPTION

+
       Get encoded MGID scope
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_member_get_scope(
+        IN              const   uint8_t                                         scope_state )
+{
+        return (scope_state >> 4);
+}
+
+

PARAMETERS

+
       scope_state
+               [in] the scope and state
+
+ RETURN VALUES
+       Encoded scope.
+
+

SEE ALSO

+
       ib_member_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_member_get_scope_state

+ +

[top][index]

+

NAME

+
       ib_member_get_scope_state
+
+

DESCRIPTION

+
       Get encoded MGID scope and JoinState
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_member_get_scope_state(
+        IN              const   uint8_t                                         scope_state,
+                OUT                     uint8_t* const                          p_scope,
+                OUT                     uint8_t* const                          p_state )
+{
+        if (p_scope)
+                *p_scope = ib_member_get_scope( scope_state );
+
+        if (p_state)
+                *p_state = ib_member_get_state( scope_state );
+}
+
+

PARAMETERS

+
       scope_state
+               [in] the scope and state
+
+ RETURN VALUES
+       p_scope
+               [out] pointer to the MGID scope
+
+       p_state
+               [out] pointer to the join state
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_member_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_member_get_sl_flow_hop

+ +

[top][index]

+

NAME

+
       ib_member_get_sl_flow_hop
+
+

DESCRIPTION

+
       Get encoded sl flow label and hop limit
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_member_get_sl_flow_hop(
+        IN              const   ib_net32_t                                      sl_flow_hop,
+                OUT                     uint8_t* const                          p_sl OPTIONAL,
+                OUT                     net32_t* const                          p_flow_lbl OPTIONAL,
+                OUT                     uint8_t* const                          p_hop OPTIONAL )
+{
+        ib_net32_t tmp_sl_flow_hop;
+
+        if (p_sl)
+                *p_sl = (uint8_t)(sl_flow_hop & 0x0f);
+
+        tmp_sl_flow_hop = sl_flow_hop >> 4;
+
+        if (p_flow_lbl)
+                *p_flow_lbl = (uint32_t)(tmp_sl_flow_hop & 0xffffff);
+
+        tmp_sl_flow_hop = tmp_sl_flow_hop >> 20;
+
+        if (p_hop)
+                *p_hop = (uint8_t)(tmp_sl_flow_hop & 0xff);
+}
+
+

PARAMETERS

+
       sl_flow_hop
+               [in] the sl flow label and hop limit of MC Group
+
+ RETURN VALUES
+       p_sl
+               [out] pointer to the service level
+
+       p_flow_lbl
+               [out] pointer to the flow label info
+
+       p_hop
+               [out] pointer to the hop count limit.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_member_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_member_get_state

+ +

[top][index]

+

NAME

+
       ib_member_get_state
+
+

DESCRIPTION

+
       Get encoded MGID JoinState
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_member_get_state(
+        IN              const   uint8_t                                         scope_state )
+{
+        return (scope_state & 0x0f);
+}
+
+

PARAMETERS

+
       scope_state
+               [in] the scope and state
+
+ RETURN VALUES
+               Encoded JoinState
+
+

SEE ALSO

+
       ib_member_rec_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_member_rec_t

+ +

[top][index]

+

NAME

+
       ib_member_rec_t
+
+

DESCRIPTION

+
       Multicast member record, used to create, join, and leave multicast
+       groups.
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_member_rec
+{
+        ib_gid_t                                mgid;
+        ib_gid_t                                port_gid;
+        ib_net32_t                              qkey;
+        ib_net16_t                              mlid;
+        uint8_t                                 mtu;
+        uint8_t                                 tclass;
+        ib_net16_t                              pkey;
+        uint8_t                                 rate;
+        uint8_t                                 pkt_life;
+        ib_net32_t                              sl_flow_hop;
+        uint8_t                                 scope_state;
+        uint8_t                                 proxy_join;
+        uint8_t                                 reserved[2];
+        uint8_t                                 pad[4];
+
+}       PACK_SUFFIX ib_member_rec_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       mgid
+               Multicast GID address for this multicast group.
+
+       port_gid
+               Valid GID of the endpoint joining this multicast group.
+
+       requestor_gid
+               GID of the endpoint making this request on hehave of port_gid.
+
+       qkey
+               Q_Key to be used by this multicast group.
+
+       mlid
+               Multicast LID for this multicast group.
+
+       mtu
+               MTU and MTU selector fields to use on this path
+
+       tclass
+               Another global routing parameter.
+
+       pkey
+               Partition key (P_Key) to use for this member.
+
+       rate
+               Rate and rate selector fields to use on this path.
+
+       pkt_life
+               Packet lifetime
+
+       sl_flow_hop
+               Global routing parameters: service level, hop count, and flow label.
+
+       scope_state
+               MGID scope and JoinState of multicast request.
+
+       proxy_join
+               Enables others in the Partition to proxy add/remove from the group
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_member_set_join_state

+ +

[top][index]

+

NAME

+
       ib_member_set_join_state
+
+

DESCRIPTION

+
       Set JoinState
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_member_set_join_state(
+        IN      OUT                     ib_member_rec_t                         *p_mc_rec,
+        IN              const   uint8_t                                         state )
+{
+        p_mc_rec->scope_state &= 0xF0;
+        p_mc_rec->scope_state |= (state & 0x0F);
+}
+
+

PARAMETERS

+
       p_mc_rec
+               [in] pointer to the member record
+
+       state
+               [in] the JoinState
+
+ RETURN VALUES
+       NONE
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_member_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_member_set_scope

+ +

[top][index]

+

NAME

+
       ib_member_set_scope
+
+

DESCRIPTION

+
       Set encoded scope of a MCR.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_member_set_scope(
+        IN      OUT                     uint8_t* const                          p_scope_state,
+        IN              const   uint8_t                                         scope )
+{
+        CL_ASSERT( scope <= 0x0F );
+        /* Scope is MS 4-bits. */
+        *p_scope_state &= 0xF0;
+        *p_scope_state |= (scope << 4);
+}
+
+

PARAMETERS

+
       scope_state
+               [in/out] Pointer to the MCR scope_state field.
+
+       scope
+               [in] The desired scope.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_member_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_member_set_scope_state

+ +

[top][index]

+

NAME

+
       ib_member_set_scope_state
+
+

DESCRIPTION

+
       Set encoded version, MGID scope and JoinState
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_member_set_scope_state(
+        IN              const   uint8_t                                         scope,
+        IN              const   uint8_t                                         state )
+{
+        /* Scope is MS 4-bits, state is LS 4-bits */
+        return ((scope << 4) | (state & 0xF));
+}
+
+

PARAMETERS

+
       scope
+               [in] the MGID scope
+
+       state
+               [in] the JoinState
+
+ RETURN VALUES
+       scope_state
+               [out] the encoded one
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_member_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_member_set_sl_flow_hop

+ +

[top][index]

+

NAME

+
       ib_member_set_sl_flow_hop
+
+

DESCRIPTION

+
       Set encoded version, sl flow and hop
+
+

SYNOPSIS

+
AL_INLINE ib_net32_t AL_API
+ib_member_set_sl_flow_hop(
+        IN              const   uint8_t                                         sl,
+        IN              const   net32_t                                         flow_lbl,
+        IN              const   uint8_t                                         hop_limit )
+{
+        ib_net32_t              sl_flow_hop;
+
+        sl_flow_hop = sl;
+        sl_flow_hop <<= 20;
+        sl_flow_hop |= (cl_ntoh32( flow_lbl ) & 0x000FFFFF);
+        sl_flow_hop <<= 8;
+        sl_flow_hop |= hop_limit;
+        return cl_hton32(sl_flow_hop);
+}
+
+

PARAMETERS

+
       sl
+               [in] the service level.
+
+       flow_lbl
+               [in] the flow label info
+
+       hop_limit
+               [in] the hop limit.
+
+ RETURN VALUES
+       sl_flow_hop
+               [out] the sl flow label and hop limit
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_member_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_member_set_state

+ +

[top][index]

+

NAME

+
       ib_member_set_state
+
+

DESCRIPTION

+
       Set encoded JoinState of a MCR.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_member_set_state(
+        IN      OUT                     uint8_t* const                          p_scope_state,
+        IN              const   uint8_t                                         state )
+{
+        CL_ASSERT( state <= 0x0F );
+        /* State is LS 4-bits. */
+        *p_scope_state &= 0x0F;
+        *p_scope_state |= (state & 0x0F);
+}
+
+

PARAMETERS

+
       scope_state
+               [in/out] Pointer to the MCR scope_state field to modify.
+
+       state
+               [in] the JoinState
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_member_rec_t
+
+
+
+ +

[Definitions] +IBA Base: Types/ib_net16_t

+ +

[top][index]

+

NAME

+
       ib_net16_t
+
+

DESCRIPTION

+
       Defines the network ordered type for 16-bit values.
+
+

SOURCE

+
typedef uint16_t        ib_net16_t;
+
+
+
+ +

[Definitions] +IBA Base: Types/ib_net32_t

+ +

[top][index]

+

NAME

+
       ib_net32_t
+
+

DESCRIPTION

+
       Defines the network ordered type for 32-bit values.
+
+

SOURCE

+
typedef uint32_t        ib_net32_t;
+
+
+
+ +

[Definitions] +IBA Base: Types/ib_net64_t

+ +

[top][index]

+

NAME

+
       ib_net64_t
+
+

DESCRIPTION

+
       Defines the network ordered type for 64-bit values.
+
+

SOURCE

+
typedef uint64_t        ib_net64_t;
+
+
+
+ +

[Functions] +IBA Base: Types/ib_node_info_get_local_port_num

+ +

[top][index]

+

NAME

+
       ib_node_info_get_local_port_num
+
+

DESCRIPTION

+
       Gets a the local port number from the NodeInfo attribute.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_node_info_get_local_port_num(
+        IN              const   ib_node_info_t* const           p_ni )
+{
+        return( (uint8_t)(( p_ni->port_num_vendor_id &
+                        IB_NODE_INFO_PORT_NUM_MASK )
+                        >> IB_NODE_INFO_PORT_NUM_SHIFT ));
+}
+
+

PARAMETERS

+
       p_ni
+               [in] Pointer to a NodeInfo attribute.
+
+ RETURN VALUES
+       Local port number that returned the attribute.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_node_info_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_node_info_get_vendor_id

+ +

[top][index]

+

NAME

+
       ib_node_info_get_vendor_id
+
+

DESCRIPTION

+
       Gets the VendorID from the NodeInfo attribute.
+
+

SYNOPSIS

+
AL_INLINE ib_net32_t AL_API
+ib_node_info_get_vendor_id(
+        IN              const   ib_node_info_t* const           p_ni )
+{
+        return( (ib_net32_t)( p_ni->port_num_vendor_id &
+                        IB_NODE_INFO_VEND_ID_MASK ) );
+}
+
+

PARAMETERS

+
       p_ni
+               [in] Pointer to a NodeInfo attribute.
+
+ RETURN VALUES
+       VendorID that returned the attribute.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_node_info_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_node_info_t

+ +

[top][index]

+

NAME

+
       ib_node_info_t
+
+

DESCRIPTION

+
       IBA defined NodeInfo. (14.2.5.3)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_node_info
+{
+        uint8_t                         base_version;
+        uint8_t                         class_version;
+        uint8_t                         node_type;
+        uint8_t                         num_ports;
+        ib_net64_t                      sys_guid;
+        ib_net64_t                      node_guid;
+        ib_net64_t                      port_guid;
+        ib_net16_t                      partition_cap;
+        ib_net16_t                      device_id;
+        ib_net32_t                      revision;
+        ib_net32_t                      port_num_vendor_id;
+
+}       PACK_SUFFIX ib_node_info_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_get_count

+ +

[top][index]

+

NAME

+
       ib_notice_get_count
+
+

DESCRIPTION

+
       Retrieves the notice toggle count from a notice trap.
+
+

SYNOPSIS

+
AL_INLINE boolean_t AL_API
+ib_notice_get_count(
+        IN              const   ib_mad_notice_attr_t* const     p_notice_attr )
+{
+        return ((cl_ntoh16( p_notice_attr->combo3 ) & 0xFFFE) >> 1);
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in] Pointer to the notice attribute structure whose
+               notice toggle count to return.
+
+ RETURN VALUES
+       Returns the notice toggle count of the notice.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_set_count
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_get_dev_id

+ +

[top][index]

+

NAME

+
       ib_notice_get_dev_id
+
+

DESCRIPTION

+
       Retrieves the device ID from a vendor specific notice trap.
+
+

SYNOPSIS

+
AL_INLINE uint16_t AL_API
+ib_notice_get_dev_id(
+        IN              const   ib_mad_notice_attr_t* const     p_notice_attr )
+{
+        return ib_notice_get_trap_num( p_notice_attr );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in] Pointer to the notice attribute structure whose
+               device ID to return.
+
+ RETURN VALUES
+       Returns the vendor ID of the notice, in host byte order.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_set_dev_id
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_get_generic

+ +

[top][index]

+

NAME

+
       ib_notice_get_generic
+
+

DESCRIPTION

+
       Retrieves whether a notice trap is generic.
+
+

SYNOPSIS

+
AL_INLINE boolean_t AL_API
+ib_notice_get_generic(
+        IN              const   ib_mad_notice_attr_t* const     p_notice_attr )
+{
+        if( cl_ntoh32( p_notice_attr->combo1 ) & 0x00000001 )
+                return TRUE;
+        return FALSE;
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in] Pointer to the notice attribute structure for which to return
+               whether it is generic or not.
+
+ RETURN VALUES
+       Returns TRUE if the notice is generic.
+
+       Returns FALSE if the notice is vendor specific.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_set_generic
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_get_prod_type

+ +

[top][index]

+

NAME

+
       ib_notice_get_prod_type
+
+

DESCRIPTION

+
       Retrieves the producer type from a generic notice trap.
+
+

SYNOPSIS

+
AL_INLINE uint32_t AL_API
+ib_notice_get_prod_type(
+        IN              const   ib_mad_notice_attr_t* const     p_notice_attr )
+{
+        return (cl_ntoh32( p_notice_attr->combo1 ) >> 8);
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in] Pointer to the notice attribute structure whose
+               prducer type to return.
+
+ RETURN VALUES
+       Returns the producer type of the notice, in host byte order.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_set_prod_type
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_get_toggle

+ +

[top][index]

+

NAME

+
       ib_notice_get_toggle
+
+

DESCRIPTION

+
       Retrieves the notice toggle bit from a notice trap.
+
+

SYNOPSIS

+
AL_INLINE boolean_t AL_API
+ib_notice_get_toggle(
+        IN              const   ib_mad_notice_attr_t* const     p_notice_attr )
+{
+        return (cl_ntoh16( p_notice_attr->combo3 ) & 0x0001);
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in] Pointer to the notice attribute structure whose
+               notice toggle bit value to return.
+
+ RETURN VALUES
+       Returns TRUE if the notice toggle bit of the notice is set.
+
+       Returns FALSE otherwise.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_set_toggle
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_get_trap_num

+ +

[top][index]

+

NAME

+
       ib_notice_get_trap_num
+
+

DESCRIPTION

+
       Retrieves the trap number from a generic notice trap.
+
+

SYNOPSIS

+
AL_INLINE uint16_t AL_API
+ib_notice_get_trap_num(
+        IN              const   ib_mad_notice_attr_t* const     p_notice_attr )
+{
+        return cl_ntoh16( p_notice_attr->combo2 );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in] Pointer to the notice attribute structure whose
+               trap number to return.
+
+ RETURN VALUES
+       Returns the vendor ID of the notice, in host byte order.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_set_trap_num
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_get_type

+ +

[top][index]

+

NAME

+
       ib_notice_get_type
+
+

DESCRIPTION

+
       Retrieves the type of a notice trap.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_notice_get_type(
+        IN              const   ib_mad_notice_attr_t* const     p_notice_attr )
+{
+        return (uint8_t)((cl_ntoh32( p_notice_attr->combo1 ) >> 1) & 0x0000007F);
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in] Pointer to the notice attribute structure whose type to return.
+
+ RETURN VALUES
+       Returns the type of the notice.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_set_type
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_get_vend_id

+ +

[top][index]

+

NAME

+
       ib_notice_get_vend_id
+
+

DESCRIPTION

+
       Retrieves the vendor ID from a vendor specific notice trap.
+
+

SYNOPSIS

+
AL_INLINE uint32_t AL_API
+ib_notice_get_vend_id(
+        IN              const   ib_mad_notice_attr_t* const     p_notice_attr )
+{
+        return ib_notice_get_prod_type( p_notice_attr );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in] Pointer to the notice attribute structure whose
+               vendor ID to return.
+
+ RETURN VALUES
+       Returns the vendor ID of the notice, in host byte order.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_set_vend_id
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_set_count

+ +

[top][index]

+

NAME

+
       ib_notice_set_count
+
+

DESCRIPTION

+
       Sets the toggle count of a notice trap.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_notice_set_count(
+        IN      OUT                     ib_mad_notice_attr_t* const     p_notice_attr,
+        IN              const   uint16_t                                        toggle_cnt )
+{
+        uint16_t        val;
+        val = cl_ntoh16( p_notice_attr->combo3 );
+        val &= 0x0001;
+        val |= (toggle_cnt << 1);
+        p_notice_attr->combo3 = cl_hton16( val );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in/out] Pointer to the notice attribute structure
+               whose device ID to set.
+
+       toggle_cnt
+               [in] Toggle count value of the notice.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_get_count
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_set_dev_id

+ +

[top][index]

+

NAME

+
       ib_notice_set_dev_id
+
+

DESCRIPTION

+
       Sets the producer type of a vendor specific notice trap.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_notice_set_dev_id(
+        IN      OUT                     ib_mad_notice_attr_t* const     p_notice_attr,
+        IN              const   uint16_t                                        dev_id )
+{
+        ib_notice_set_trap_num( p_notice_attr, dev_id );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in/out] Pointer to the notice attribute structure
+               whose device ID to set.
+
+       dev_id
+               [in] Device ID of notice trap.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_get_dev_id
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_set_generic

+ +

[top][index]

+

NAME

+
       ib_notice_set_generic
+
+

DESCRIPTION

+
       Sets whether a notice trap is generic.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_notice_set_generic(
+        IN      OUT                     ib_mad_notice_attr_t* const     p_notice_attr,
+        IN              const   boolean_t                                       is_generic )
+{
+        uint32_t        val;
+
+        val = cl_ntoh32( p_notice_attr->combo1 );
+        if( is_generic )
+                val |= 0x00000001;
+        else
+                val &= 0xFFFFFFFE;
+        p_notice_attr->combo1 = cl_hton32( val );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in/out] Pointer to the notice attribute structure for which to set
+               the generic bit.
+
+       is_generic
+               [in] TRUE if the notice is generic, FALSE if vendor specific.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_get_generic
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_set_prod_type

+ +

[top][index]

+

NAME

+
       ib_notice_set_prod_type
+
+

DESCRIPTION

+
       Sets the producer type of a generic notice trap.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_notice_set_prod_type(
+        IN      OUT                     ib_mad_notice_attr_t* const     p_notice_attr,
+        IN              const   uint32_t                                        prod_type )
+{
+        uint32_t        val;
+
+        val = cl_ntoh32( p_notice_attr->combo1 );
+        /* Clear the type. */
+        val &= 0x000000FF;
+        /* Set new value. */
+        val |= (prod_type << 8);
+        p_notice_attr->combo1 = cl_hton32( val );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in/out] Pointer to the notice attribute structure
+               whose producer type to set.
+
+       prod_type
+               [in] Producer type of notice trap.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_get_prod_type
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_set_toggle

+ +

[top][index]

+

NAME

+
       ib_notice_set_toggle
+
+

DESCRIPTION

+
       Sets the notice toggle bit of a notice trap.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_notice_set_toggle(
+        IN      OUT                     ib_mad_notice_attr_t* const     p_notice_attr,
+        IN              const   boolean_t                                       toggle_val )
+{
+        uint16_t        val;
+        val = cl_ntoh16( p_notice_attr->combo3 );
+        if( toggle_val )
+                val |= 0x0001;
+        else
+                val &= 0xFFFE;
+        p_notice_attr->combo3 = cl_hton16( val );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in/out] Pointer to the notice attribute structure
+               whose notice toggle bit to set or clear.
+
+       toggle_val
+               [in] Boolean value indicating whether the toggle bit of the notice
+               should be set or cleared.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_get_toggle
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_set_trap_num

+ +

[top][index]

+

NAME

+
       ib_notice_set_trap_num
+
+

DESCRIPTION

+
       Sets the trap number of a generic notice trap.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_notice_set_trap_num(
+        IN      OUT                     ib_mad_notice_attr_t* const     p_notice_attr,
+        IN              const   uint16_t                                        trap_num )
+{
+        p_notice_attr->combo2 = cl_hton16( trap_num );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in/out] Pointer to the notice attribute structure
+               whose trap number to set.
+
+       trap_num
+               [in] Trap number to set.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_get_trap_num
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_set_type

+ +

[top][index]

+

NAME

+
       ib_notice_set_type
+
+

DESCRIPTION

+
       Sets the type of a notice trap.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_notice_set_type(
+        IN      OUT                     ib_mad_notice_attr_t* const     p_notice_attr,
+        IN              const   uint8_t                                         type )
+{
+        uint32_t        val;
+
+        val = cl_ntoh32( p_notice_attr->combo1 );
+        /* Clear the type. */
+        val &= 0xFFFFFF01;
+        /* Set new value. */
+        val |= (((uint32_t)(type & 0x7F)) << 1);
+        p_notice_attr->combo1 = cl_hton32( val );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in/out] Pointer to the notice attribute structure whose type to set.
+
+       type
+               [in] Type of notice trap.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_get_type
+
+
+
+ +

[Functions] +IBA Base: Types/ib_notice_set_vend_id

+ +

[top][index]

+

NAME

+
       ib_notice_set_vend_id
+
+

DESCRIPTION

+
       Sets the vendor ID of a vendor specific notice trap.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_notice_set_vend_id(
+        IN      OUT                     ib_mad_notice_attr_t* const     p_notice_attr,
+        IN              const   uint32_t                                        vend_id )
+{
+        ib_notice_set_prod_type( p_notice_attr, vend_id );
+}
+
+

PARAMETERS

+
       p_notice_attr
+               [in/out] Pointer to the notice attribute structure
+               whose vendor ID to set.
+
+       vend_id
+               [in] Vendor ID of notice trap.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_mad_notice_attr_t, ib_notice_get_vend_id
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_get_ipd

+ +

[top][index]

+

NAME

+
       ib_path_get_ipd
+
+

DESCRIPTION

+
       Returns the encoded value for the inter packet delay.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_path_get_ipd(
+        IN                              uint8_t                                         local_link_width_supported,
+        IN                              uint8_t                                         path_rec_rate )
+{
+        uint8_t ipd = 0;
+
+        switch(local_link_width_supported)
+        {
+                //link_width_supported = 1: 1x
+                case 1:
+                        break;
+
+                //link_width_supported = 3: 1x or 4x
+                case 3:
+                        switch(path_rec_rate & 0x3F)
+                        {
+                                case IB_PATH_RECORD_RATE_2_5_GBS:
+                                        ipd = 3;
+                                        break;
+                                default:
+                                        break;
+                        }
+                        break;
+
+                //link_width_supported = 11: 1x or 4x or 12x
+                case 11:
+                        switch(path_rec_rate & 0x3F)
+                        {
+                                case IB_PATH_RECORD_RATE_2_5_GBS:
+                                        ipd = 11;
+                                        break;
+                                case IB_PATH_RECORD_RATE_10_GBS:
+                                        ipd = 2;
+                                        break;
+                                default:
+                                        break;
+                        }
+                        break;
+
+                default:
+                        break;
+        }
+
+        return ipd;
+}
+
+

PARAMETERS

+
       local_link_width_supported
+               [in] link with supported for this port
+
+       path_rec_rate
+               [in] rate field of the path record
+
+ RETURN VALUES
+       Returns the ipd
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_path_rec_flow_lbl

+ +

[top][index]

+

NAME

+
       ib_path_rec_flow_lbl
+
+

DESCRIPTION

+
       Get flow label.
+
+

SYNOPSIS

+
AL_INLINE net32_t AL_API
+ib_path_rec_flow_lbl(
+        IN              const   ib_path_rec_t* const            p_rec )
+{
+        return( cl_hton32( (cl_ntoh32(p_rec->hop_flow_raw.val) >> 8) & 0x000FFFFF ) );
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       Flow label of the path record.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_hop_limit

+ +

[top][index]

+

NAME

+
       ib_path_rec_hop_limit
+
+

DESCRIPTION

+
       Get hop limit.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_path_rec_hop_limit(
+        IN              const   ib_path_rec_t* const            p_rec )
+{
+        return( p_rec->hop_flow_raw.bytes[3] );
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       Hop limit of the path record.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_init_local

+ +

[top][index]

+

NAME

+
       ib_path_rec_init_local
+
+

DESCRIPTION

+
       Initializes a subnet local path record.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_path_rec_init_local(
+        IN                              ib_path_rec_t*  const           p_rec,
+        IN              const   ib_gid_t*               const           p_dgid,
+        IN              const   ib_gid_t*               const           p_sgid,
+        IN              const   ib_net16_t                                      dlid,
+        IN              const   ib_net16_t                                      slid,
+        IN              const   uint8_t                                         num_path,
+        IN              const   ib_net16_t                                      pkey,
+        IN              const   uint8_t                                         sl,
+        IN              const   uint8_t                                         mtu_selector,
+        IN              const   uint8_t                                         mtu,
+        IN              const   uint8_t                                         rate_selector,
+        IN              const   uint8_t                                         rate,
+        IN              const   uint8_t                                         pkt_life_selector,
+        IN              const   uint8_t                                         pkt_life,
+        IN              const   uint8_t                                         preference )
+{
+        p_rec->dgid = *p_dgid;
+        p_rec->sgid = *p_sgid;
+        p_rec->dlid = dlid;
+        p_rec->slid = slid;
+        p_rec->num_path = num_path;
+        p_rec->pkey = pkey;
+        /* Lower 4 bits of path rec's SL are reserved. */
+        p_rec->sl = cl_ntoh16( sl );
+        p_rec->mtu = (uint8_t)((mtu & IB_PATH_REC_BASE_MASK) |
+                        (uint8_t)(mtu_selector << 6));
+        p_rec->rate = (uint8_t)((rate & IB_PATH_REC_BASE_MASK) |
+                        (uint8_t)(rate_selector << 6));
+        p_rec->pkt_life = (uint8_t)((pkt_life & IB_PATH_REC_BASE_MASK) |
+                        (uint8_t)(pkt_life_selector << 6));
+        p_rec->preference = preference;
+
+        /* Clear global routing fields for local path records */
+        p_rec->hop_flow_raw.val = 0;
+        p_rec->tclass = 0;
+
+        p_rec->resv0 = 0;
+        p_rec->resv1 = 0;
+        p_rec->resv2 = 0;
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+       dgid
+               [in] GID of destination port.
+
+       sgid
+               [in] GID of source port.
+
+       dlid
+               [in] LID of destination port.
+
+       slid
+               [in] LID of source port.
+
+       num_path
+               [in] In queries, maximum number of paths to return.
+               In responses, undefined.
+
+       pkey
+               [in] Partition key (P_Key) to use on this path.
+
+       sl
+               [in] Service level to use on this path.  Lower 4-bits are valid.
+
+       mtu_selector
+               [in] Encoded MTU selector value to use on this path
+
+       mtu
+               [in] Encoded MTU to use on this path
+
+       rate_selector
+               [in] Encoded rate selector value to use on this path.
+
+       rate
+               [in] Encoded rate to use on this path.
+
+       pkt_life_selector
+               [in] Encoded Packet selector value lifetime for this path.
+
+       pkt_life
+               [in] Encoded Packet lifetime for this path.
+
+       preference
+               [in] Indicates the relative merit of this path versus other path
+               records returned from the SA.  Lower numbers are better.
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_gid_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_mtu

+ +

[top][index]

+

NAME

+
       ib_path_rec_mtu
+
+

DESCRIPTION

+
       Get encoded path MTU.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_path_rec_mtu(
+        IN              const   ib_path_rec_t* const            p_rec )
+{
+        return( (uint8_t)(p_rec->mtu & IB_PATH_REC_BASE_MASK) );
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       Encoded path MTU.
+               1: 256
+               2: 512
+               3: 1024
+               4: 2048
+               5: 4096
+               others: reserved
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_mtu_sel

+ +

[top][index]

+

NAME

+
       ib_path_rec_mtu_sel
+
+

DESCRIPTION

+
       Get encoded path MTU selector.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_path_rec_mtu_sel(
+        IN              const   ib_path_rec_t* const            p_rec )
+{
+        return( (uint8_t)((p_rec->mtu & IB_PATH_REC_SELECTOR_MASK) >> 6) );
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       Encoded path MTU selector value (for queries).
+               0: greater than MTU specified
+               1: less than MTU specified
+               2: exactly the MTU specified
+               3: largest MTU available
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_num_path

+ +

[top][index]

+

NAME

+
       ib_path_rec_num_path
+
+

DESCRIPTION

+
       Get max number of paths to return.
+
+

SYNOPSIS

+
static inline uint8_t   
+ib_path_rec_num_path(
+        IN      const   ib_path_rec_t* const    p_rec )
+{
+        return( p_rec->num_path &0x7F );
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       Maximum number of paths to return for each unique SGID_DGID combination.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_pkt_life

+ +

[top][index]

+

NAME

+
       ib_path_rec_pkt_life
+
+

DESCRIPTION

+
       Get encoded path pkt_life.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_path_rec_pkt_life(
+        IN              const   ib_path_rec_t* const            p_rec )
+{
+        return( (uint8_t)(p_rec->pkt_life & IB_PATH_REC_BASE_MASK) );
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       Encoded path pkt_life = 4.096 µsec * 2 ** PacketLifeTime.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_pkt_life_sel

+ +

[top][index]

+

NAME

+
       ib_path_rec_pkt_life_sel
+
+

DESCRIPTION

+
       Get encoded path pkt_lifetime selector.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_path_rec_pkt_life_sel(
+        IN              const   ib_path_rec_t* const            p_rec )
+{
+        return( (uint8_t)((p_rec->pkt_life & IB_PATH_REC_SELECTOR_MASK) >> 6 ));
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       Encoded path pkt_lifetime selector value (for queries).
+               0: greater than rate specified
+               1: less than rate specified
+               2: exactly the rate specified
+               3: smallest packet lifetime available
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_rate

+ +

[top][index]

+

NAME

+
       ib_path_rec_rate
+
+

DESCRIPTION

+
       Get encoded path rate.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_path_rec_rate(
+        IN              const   ib_path_rec_t* const            p_rec )
+{
+        return( (uint8_t)(p_rec->rate & IB_PATH_REC_BASE_MASK) );
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       Encoded path rate.
+               2: 2.5 Gb/sec.
+               3: 10 Gb/sec.
+               4: 30 Gb/sec.
+               5: 5 Gb/sec.
+               6: 20 Gb/sec.
+               7: 40 Gb/sec.
+               8: 60 Gb/sec.
+               9: 80 Gb/sec.
+               10: 120 Gb/sec.
+               others: reserved
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_rate_sel

+ +

[top][index]

+

NAME

+
       ib_path_rec_rate_sel
+
+

DESCRIPTION

+
       Get encoded path rate selector.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_path_rec_rate_sel(
+        IN              const   ib_path_rec_t* const            p_rec )
+{
+        return( (uint8_t)((p_rec->rate & IB_PATH_REC_SELECTOR_MASK) >> 6) );
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       Encoded path rate selector value (for queries).
+               0: greater than rate specified
+               1: less than rate specified
+               2: exactly the rate specified
+               3: largest rate available
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_set_hop_flow_raw

+ +

[top][index]

+

NAME

+
       ib_path_rec_set_hop_flow_raw
+
+

DESCRIPTION

+
       Sets the hop limit, flow label, and raw traffic bits of a path record.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_path_rec_set_hop_flow_raw(
+                OUT                     ib_path_rec_t* const            p_rec,
+        IN              const   uint8_t                                         hop_limit,
+        IN              const   net32_t                                         flow_lbl,
+        IN              const   boolean_t                                       raw )
+{
+        p_rec->hop_flow_raw.val = (cl_ntoh32( flow_lbl ) & 0x000FFFFF) << 8;
+        if( raw )
+                p_rec->hop_flow_raw.val |= 0x80000000;
+        p_rec->hop_flow_raw.val = cl_hton32( p_rec->hop_flow_raw.val );
+        p_rec->hop_flow_raw.bytes[3] = hop_limit;
+}
+
+

PARAMETERS

+
       p_rec
+               Pointer to the path record whose hop limit, flow label, and rab
+               traffic fields to set.
+
+       hop_limit
+               Hop limit to set in the path record.
+
+       flow_lbl
+               Flow label, in network byte order, to set in the path record.
+
+       raw
+               Boolean flag to indicate whether the path record is for raw traffic.
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_path_rec_sl

+ +

[top][index]

+

NAME

+
       ib_path_rec_sl
+
+

DESCRIPTION

+
       Get path service level.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_path_rec_sl(
+        IN              const   ib_path_rec_t* const            p_rec )
+{
+        return( (uint8_t)((cl_ntoh16( p_rec->sl )) & 0xF) );
+}
+
+

PARAMETERS

+
       p_rec
+               [in] Pointer to the path record object.
+
+ RETURN VALUES
+       SL.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_path_rec_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_path_rec_t

+ +

[top][index]

+

NAME

+
       ib_path_rec_t
+
+

DESCRIPTION

+
       Path records encapsulate the properties of a given
+       route between two end-points on a subnet.
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef __declspec(align(8)) struct _ib_path_rec
+{
+        uint64_t                                resv0;
+        ib_gid_t                                dgid;
+        ib_gid_t                                sgid;
+        ib_net16_t                              dlid;
+        ib_net16_t                              slid;
+        ib_field32_t                    hop_flow_raw;
+        uint8_t                                 tclass;
+        uint8_t                                 num_path;
+        ib_net16_t                              pkey;
+        ib_net16_t                              sl;
+        uint8_t                                 mtu;
+        uint8_t                                 rate;
+        uint8_t                                 pkt_life;
+        uint8_t                                 preference;
+        uint16_t                                resv1;
+        uint32_t                                resv2;
+
+}       PACK_SUFFIX ib_path_rec_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       resv0
+               Reserved bytes.
+
+       dgid
+               GID of destination port.
+
+       sgid
+               GID of source port.
+
+       dlid
+               LID of destination port.
+
+       slid
+               LID of source port.
+
+       hop_flow_raw
+               Global routing parameters: hop count, flow label and raw bit.
+
+       tclass
+               Another global routing parameter.
+
+       num_path
+               In queries, maximum number of paths to return.
+               In responses, undefined.
+
+       pkey
+               Partition key (P_Key) to use on this path.
+
+       resv1
+               Reserved byte.
+
+       sl
+               Service level to use on this path.
+
+       mtu
+               MTU and MTU selector fields to use on this path
+
+       rate
+               Rate and rate selector fields to use on this path.
+
+       pkt_life
+               Packet lifetime
+
+       preference
+               Indicates the relative merit of this path versus other path
+               records returned from the SA.  Lower numbers are better.
+
+       resv1
+               Reserved bytes.
+
+       resv2
+               Reserved bytes.
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_pkey_get_base

+ +

[top][index]

+

NAME

+
       ib_pkey_get_base
+
+

DESCRIPTION

+
       Returns the base P_Key value with the membership bit stripped.
+
+

SYNOPSIS

+
AL_INLINE ib_net16_t AL_API
+ib_pkey_get_base(
+        IN              const   ib_net16_t                                      pkey )
+{
+        return( (ib_net16_t)(pkey & IB_PKEY_BASE_MASK) );
+}
+
+

PARAMETERS

+
       pkey
+               [in] P_Key value
+
+

RETURN VALUE

+
       Returns the base P_Key value with the membership bit stripped.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_pkey_is_full_member

+ +

[top][index]

+

NAME

+
       ib_pkey_is_full_member
+
+

DESCRIPTION

+
       Indicates if the port is a full member of the parition.
+
+

SYNOPSIS

+
AL_INLINE boolean_t AL_API
+ib_pkey_is_full_member(
+        IN              const   ib_net16_t                                      pkey )
+{
+        return( (pkey & IB_PKEY_TYPE_MASK) == IB_PKEY_TYPE_MASK );
+}
+
+

PARAMETERS

+
       pkey
+               [in] P_Key value
+
+

RETURN VALUE

+
       TRUE if the port is a full member of the partition.
+       FALSE otherwise.
+
+

NOTES

+
+
+

SEE ALSO

+
 ib_pkey_get_base, ib_net16_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_pkey_is_invalid

+ +

[top][index]

+

NAME

+
       ib_pkey_is_invalid
+
+

DESCRIPTION

+
       Returns TRUE if the given P_Key is an invalid P_Key
+  C10-116: the CI shall regard a P_Key as invalid if its low-order
+           15 bits are all zero...
+
+

SYNOPSIS

+
static inline boolean_t 
+ib_pkey_is_invalid(
+        IN      const   ib_net16_t              pkey )
+{
+  if (ib_pkey_get_base(pkey) == 0x0000)
+    return TRUE;
+  return FALSE;
+}
+
+

PARAMETERS

+
       pkey
+               [in] P_Key value
+
+

RETURN VALUE

+
       Returns the base P_Key value with the membership bit stripped.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Structures] +IBA Base: Types/ib_pkey_table_info_t

+ +

[top][index]

+

NAME

+
       ib_pkey_table_info_t
+
+

DESCRIPTION

+
       IBA defined PKey table. (14.2.5.7)
+
+

SYNOPSIS

+
#define PKEY_TABLE_MAX_ENTRIES          32
+
+#include <complib/cl_packon.h>
+typedef struct _ib_pkey_table_info
+{
+        ib_net16_t                      pkey[PKEY_TABLE_MAX_ENTRIES];
+
+}       PACK_SUFFIX ib_pkey_table_info_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Structures] +IBA Base: Types/ib_port_counters_t

+ +

[top][index]

+

NAME

+
       ib_gmp_t
+
+

DESCRIPTION

+
       IBA defined PortCounters MAD format. (16.1.3.5)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_port_counters
+{
+        uint8_t                 reserved0;
+        uint8_t                 port_select;
+        ib_net16_t              counter_select;
+        ib_net16_t              symbol_error_counter; 
+        uint8_t                 link_error_recovery_counter;
+        uint8_t                 link_down_counter; 
+        ib_net16_t              port_rcv_errors; 
+        ib_net16_t              port_rcv_remote_physical_errors;
+        ib_net16_t              port_rcv_switch_relay_errors; 
+        ib_net16_t              port_xmit_discard; 
+        uint8_t                 port_xmit_constraint_errors;
+        uint8_t                 port_rcv_constraint_errors;
+        uint8_t                 reserved1;
+        /* uint4_t excessive_buffer_overrun_errors;
+        uint4_t local_link_integrity_errors; */
+        uint8_t                 lli_errors_exc_buf_errors;
+        ib_net16_t              reserved2; 
+        ib_net16_t              vl15_dropped;
+        ib_net32_t              port_xmit_data;
+        ib_net32_t              port_rcv_data;
+        ib_net32_t              port_xmit_pkts;
+        ib_net32_t              port_rcv_pkts;
+
+}       PACK_SUFFIX ib_port_counters_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_port_info_compute_rate

+ +

[top][index]

+

NAME

+
       ib_port_info_compute_rate
+
+

DESCRIPTION

+
       Returns the encoded value for the path rate.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_compute_rate(
+        IN              const   ib_port_info_t* const           p_pi )
+{
+        switch( p_pi->link_width_active * p_pi->link_width_active *
+                ib_port_info_get_link_speed_active( p_pi ) )
+        {
+        case 1:
+                return IB_PATH_RECORD_RATE_2_5_GBS;
+
+        case 2:
+                return IB_PATH_RECORD_RATE_5_GBS;
+
+        case 4:
+                return IB_PATH_RECORD_RATE_10_GBS;
+
+        case 8:
+                return IB_PATH_RECORD_RATE_20_GBS;
+
+        case 16:
+                return IB_PATH_RECORD_RATE_40_GBS;
+
+        case 64:
+                return IB_PATH_RECORD_RATE_30_GBS;
+
+        case 128:
+                return IB_PATH_RECORD_RATE_60_GBS;
+
+        case 256:
+                return IB_PATH_RECORD_RATE_120_GBS;
+
+        default:
+                return IB_PATH_RECORD_RATE_2_5_GBS;
+        }
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Returns the encoded value for the link speed supported.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_init_type

+ +

[top][index]

+

NAME

+
       ib_port_info_get_init_type
+
+

DESCRIPTION

+
       Gets the init type of a port.
+
+

SYNOPSIS

+
static inline uint8_t   
+ib_port_info_get_init_type(
+        IN const ib_port_info_t* const p_pi)
+{
+        return (uint8_t) (p_pi->vl_cap & 0x0F);
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       InitType field
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_link_speed_active

+ +

[top][index]

+

NAME

+
       ib_port_info_get_link_speed_active
+
+

DESCRIPTION

+
       Returns the Link Speed Active value assigned to this port.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_link_speed_active(
+        IN              const   ib_port_info_t* const           p_pi )
+{
+        return( (uint8_t)((p_pi->link_speed & IB_PORT_LINK_SPEED_ACTIVE_MASK) >>
+                IB_PORT_LINK_SPEED_SHIFT) );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Returns the link speed active value assigned to this port.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_link_speed_sup

+ +

[top][index]

+

NAME

+
       ib_port_info_get_link_speed_sup
+
+

DESCRIPTION

+
       Returns the encoded value for the link speed supported.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_link_speed_sup(
+        IN              const   ib_port_info_t* const           p_pi )
+{
+        return( (uint8_t)((p_pi->state_info1 &
+                        IB_PORT_LINK_SPEED_SUPPORTED_MASK) >>
+                        IB_PORT_LINK_SPEED_SHIFT) );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Returns the encoded value for the link speed supported.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_lmc

+ +

[top][index]

+

NAME

+
       ib_port_info_get_lmc
+
+

DESCRIPTION

+
       Returns the LMC value assigned to this port.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_lmc(
+        IN              const   ib_port_info_t* const           p_pi )
+{
+        return( (uint8_t)(p_pi->mkey_lmc & IB_PORT_LMC_MASK) );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Returns the LMC value assigned to this port.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_mpb

+ +

[top][index]

+

NAME

+
       ib_port_info_get_mpb
+
+

DESCRIPTION

+
       Returns the M_Key protect bits assigned to this port.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_mpb(
+        IN              const   ib_port_info_t* const           p_pi )
+{
+        return( (uint8_t)((p_pi->mkey_lmc & IB_PORT_MPB_MASK) >>
+                        IB_PORT_MPB_SHIFT) );
+}
+
+

PARAMETERS

+
       p_ni
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Returns the M_Key protect bits assigned to this port.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_mtu_cap

+ +

[top][index]

+

NAME

+
       ib_port_info_get_mtu_cap
+
+

DESCRIPTION

+
       Returns the encoded value for the maximum MTU supported by this port.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_mtu_cap(
+        IN              const   ib_port_info_t* const           p_pi )
+{
+        return( (uint8_t)(p_pi->mtu_cap & 0x0F) );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Returns the LMC value assigned to this port.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_neighbor_mtu

+ +

[top][index]

+

NAME

+
       ib_port_info_get_neighbor_mtu
+
+

DESCRIPTION

+
       Returns the encoded value for the neighbor MTU at this port.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_neighbor_mtu(
+        IN const ib_port_info_t* const p_pi )
+{
+        return( (uint8_t)((p_pi->mtu_smsl & 0xF0) >> 4) );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Returns the encoded value for the neighbor MTU at this port.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_op_vls

+ +

[top][index]

+

NAME

+
       ib_port_info_get_op_vls
+
+

DESCRIPTION

+
       Gets the operational VLs on a port.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_op_vls(
+        IN const ib_port_info_t* const p_pi)
+{
+        return((p_pi->vl_enforce >> 4) & 0x0F);
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       OP_VLS field
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_port_state

+ +

[top][index]

+

NAME

+
       ib_port_info_get_port_state
+
+

DESCRIPTION

+
       Returns the port state.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_port_state(
+        IN              const   ib_port_info_t* const           p_pi )
+{
+        return( (uint8_t)(p_pi->state_info1 & IB_PORT_STATE_MASK) );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Port state.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_sm_sl

+ +

[top][index]

+

NAME

+
       ib_port_info_get_sm_sl
+
+

DESCRIPTION

+
       Returns the encoded value for the SM sl at this port.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_sm_sl(
+        IN const ib_port_info_t* const p_pi )
+{
+        return( (uint8_t)(p_pi->mtu_smsl & 0x0F) );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Returns the encoded value for the neighbor MTU at this port.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_get_vl_cap

+ +

[top][index]

+

NAME

+
       ib_port_info_get_vl_cap
+
+

DESCRIPTION

+
       Gets the VL Capability of a port.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_port_info_get_vl_cap(
+        IN const ib_port_info_t* const p_pi)
+{
+        return((p_pi->vl_cap >> 4) & 0x0F);
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       VL_CAP field
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_set_link_speed_sup

+ +

[top][index]

+

NAME

+
       ib_port_info_set_link_speed_sup
+
+

DESCRIPTION

+
       Given an integer of the supported link speed supported.
+       Set the appropriate bits in state_info1
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_port_info_set_link_speed_sup(
+        IN                              uint8_t const                           speed,
+        IN                              ib_port_info_t*                         p_pi )
+{
+        p_pi->state_info1 =
+                ( ~IB_PORT_LINK_SPEED_SUPPORTED_MASK & p_pi->state_info1 ) |
+                ( IB_PORT_LINK_SPEED_SUPPORTED_MASK &
+                        (speed << IB_PORT_LINK_SPEED_SHIFT) );
+}
+
+

PARAMETERS

+
       speed
+               [in] Supported Speeds Code.
+
+       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_set_lmc

+ +

[top][index]

+

NAME

+
       ib_port_info_set_lmc
+
+

DESCRIPTION

+
       Sets the LMC value in the PortInfo attribute.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_port_info_set_lmc(
+        IN                              ib_port_info_t* const           p_pi,
+        IN              const   uint8_t                                         lmc )
+{
+        CL_ASSERT( lmc <= 0x7 );
+        p_pi->mkey_lmc = (uint8_t)((p_pi->mkey_lmc & 0xF8) | lmc);
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+       lmc
+               [in] LMC value to set, must be less than 7.
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_set_mpb

+ +

[top][index]

+

NAME

+
       ib_port_info_set_mpb
+
+

DESCRIPTION

+
       Set the M_Key protect bits of this port.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_port_info_set_mpb(
+        IN                              ib_port_info_t*                         p_pi,
+        IN                              uint8_t                                         mpb )
+{
+        p_pi->mkey_lmc =
+                ((p_pi->mkey_lmc & ~IB_PORT_MPB_MASK) |
+                (mpb << IB_PORT_MPB_SHIFT));
+}
+
+

PARAMETERS

+
       mpb
+               [in] M_Key protect bits
+       p_ni
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_set_neighbor_mtu

+ +

[top][index]

+

NAME

+
       ib_port_info_set_neighbor_mtu
+
+

DESCRIPTION

+
       Sets the Neighbor MTU value in the PortInfo attribute.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_port_info_set_neighbor_mtu(
+        IN                              ib_port_info_t* const           p_pi,
+        IN              const   uint8_t                                         mtu )
+{
+        CL_ASSERT( mtu <= 5 );
+        CL_ASSERT( mtu != 0 );
+        p_pi->mtu_smsl = (uint8_t)((p_pi->mtu_smsl & 0x0F) | (mtu << 4));
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+       mtu
+               [in] Encoded MTU value to set
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_set_op_vls

+ +

[top][index]

+

NAME

+
       ib_port_info_set_op_vls
+
+

DESCRIPTION

+
       Sets the operational VLs on a port.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_port_info_set_op_vls(
+        IN                              ib_port_info_t* const           p_pi,
+        IN              const   uint8_t                                         op_vls )
+{
+        p_pi->vl_enforce = (uint8_t)((p_pi->vl_enforce & 0x0F) | (op_vls << 4) );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+       op_vls
+               [in] Encoded operation VLs value.
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_set_port_state

+ +

[top][index]

+

NAME

+
       ib_port_info_set_port_state
+
+

DESCRIPTION

+
       Sets the port state.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_port_info_set_port_state(
+        IN                              ib_port_info_t* const           p_pi,
+        IN              const   uint8_t                                         port_state )
+{
+        p_pi->state_info1 = (uint8_t)((p_pi->state_info1 & 0xF0) | port_state );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+       port_state
+               [in] Port state value to set.
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_set_sm_sl

+ +

[top][index]

+

NAME

+
       ib_port_info_set_sm_sl
+
+

DESCRIPTION

+
       Sets the SM sl value in the PortInfo attribute.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_port_info_set_sm_sl(
+        IN                              ib_port_info_t* const           p_pi,
+        IN              const   uint8_t                                         sm_sl )
+{
+        CL_ASSERT( sm_sl<= 5 );
+        CL_ASSERT( sm_sl != 0 );
+        p_pi->mtu_smsl = (uint8_t)((p_pi->mtu_smsl & 0xF0) | sm_sl );
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+       mtu
+               [in] Encoded SM sl value to set
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_set_state_no_change

+ +

[top][index]

+

NAME

+
       ib_port_info_set_state_no_change
+
+

DESCRIPTION

+
       Sets the port state fields to the value for "no change".
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_port_info_set_state_no_change(
+        IN                              ib_port_info_t* const           p_pi )
+{
+        ib_port_info_set_port_state( p_pi, IB_LINK_NO_CHANGE );
+        p_pi->state_info2 = 0;
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_port_info_set_timeout

+ +

[top][index]

+

NAME

+
       ib_port_info_set_timeout
+
+

DESCRIPTION

+
       Sets the encoded subnet timeout value in the PortInfo attribute.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_port_info_set_timeout(
+        IN                              ib_port_info_t* const           p_pi,
+        IN              const   uint8_t                                         timeout )
+{
+        CL_ASSERT( timeout <= 0x1F );
+        p_pi->subnet_timeout = (uint8_t)(timeout & 0x1F);
+}
+
+

PARAMETERS

+
       p_pi
+               [in] Pointer to a PortInfo attribute.
+
+       timeout
+               [in] Encoded timeout value to set
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Structures] +IBA Base: Types/ib_port_info_t

+ +

[top][index]

+

NAME

+
       ib_port_info_t
+
+

DESCRIPTION

+
       IBA defined PortInfo. (14.2.5.6)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_port_info
+{
+        ib_net64_t                      m_key;
+        ib_net64_t                      subnet_prefix;
+        ib_net16_t                      base_lid;
+        ib_net16_t                      master_sm_base_lid;
+        ib_net32_t                      capability_mask;
+        ib_net16_t                      diag_code;
+        ib_net16_t                      m_key_lease_period;
+        uint8_t                         local_port_num;
+        uint8_t                         link_width_enabled;
+        uint8_t                         link_width_supported;
+        uint8_t                         link_width_active;
+        uint8_t                         state_info1; /* LinkSpeedSupported and PortState */
+        uint8_t                         state_info2; /* PortPhysState and LinkDownDefaultState */
+        uint8_t                         mkey_lmc;
+        uint8_t                         link_speed;      /* LinkSpeedEnabled and LinkSpeedActive */
+        uint8_t                         mtu_smsl;
+        uint8_t                         vl_cap;          /* VLCap and InitType */
+        uint8_t                         vl_high_limit;
+        uint8_t                         vl_arb_high_cap;
+        uint8_t                         vl_arb_low_cap;
+        uint8_t                         mtu_cap;
+        uint8_t                         vl_stall_life;
+        uint8_t                         vl_enforce;
+        ib_net16_t                      m_key_violations;
+        ib_net16_t                      p_key_violations;
+        ib_net16_t                      q_key_violations;
+        uint8_t                         guid_cap;
+        uint8_t                         subnet_timeout; /* cli_rereg(1b), resrv(
+2b), timeout(5b) */
+        uint8_t                         resp_time_value;
+        uint8_t                         error_threshold;
+
+}       PACK_SUFFIX ib_port_info_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_rmpp_is_flag_set

+ +

[top][index]

+

NAME

+
       ib_rmpp_is_flag_set
+
+

DESCRIPTION

+
       Returns TRUE if the MAD has the given RMPP flag set.
+
+

SYNOPSIS

+
AL_INLINE boolean_t AL_API
+ib_rmpp_is_flag_set(
+        IN              const   ib_rmpp_mad_t* const            p_rmpp_mad,
+        IN              const   uint8_t                                         flag )
+{
+        CL_ASSERT( p_rmpp_mad );
+        return( (p_rmpp_mad->rmpp_flags & flag) == flag );
+}
+
+

PARAMETERS

+
       ib_rmpp_mad_t
+               [in] Pointer to a MAD with an RMPP header.
+
+       flag
+               [in] The RMPP flag being examined.
+
+ RETURN VALUES
+       Returns TRUE if the MAD has the given RMPP flag set.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_mad_t, ib_rmpp_mad_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_rmpp_mad_t

+ +

[top][index]

+

NAME

+
       ib_rmpp_mad_t
+
+

DESCRIPTION

+
       IBA defined MAD RMPP header (13.6.2.1)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_rmpp_mad
+{
+        ib_mad_t                                common_hdr;
+
+        uint8_t                                 rmpp_version;
+        uint8_t                                 rmpp_type;
+        uint8_t                                 rmpp_flags;
+        uint8_t                                 rmpp_status;
+
+        ib_net32_t                              seg_num;
+        ib_net32_t                              paylen_newwin;
+
+}       PACK_SUFFIX ib_rmpp_mad_t;
+#include <complib/cl_packoff.h>
+
+

SEE ALSO

+
       ib_mad_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_sa_mad_get_payload_ptr

+ +

[top][index]

+

NAME

+
       ib_sa_mad_get_payload_ptr
+
+

DESCRIPTION

+
       Gets a pointer to the SA MAD's payload area.
+
+

SYNOPSIS

+
AL_INLINE void* AL_API
+ib_sa_mad_get_payload_ptr(
+        IN              const   ib_sa_mad_t* const                      p_sa_mad )
+{
+        return( (void*)p_sa_mad->data );
+}
+
+

PARAMETERS

+
       p_smp
+               [in] Pointer to the SA MAD packet.
+
+ RETURN VALUES
+       Pointer to SA MAD payload area.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_mad_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_sa_mad_t

+ +

[top][index]

+

NAME

+
       ib_sa_mad_t
+
+

DESCRIPTION

+
       IBA defined SA MAD format. (15.2.1)
+
+

SYNOPSIS

+
#define IB_SA_DATA_SIZE 200
+
+#include <complib/cl_packon.h>
+typedef struct _ib_sa_mad
+{
+        uint8_t                                 base_ver;
+        uint8_t                                 mgmt_class;
+        uint8_t                                 class_ver;
+        uint8_t                                 method;
+        ib_net16_t                              status;
+        ib_net16_t                              resv;
+        ib_net64_t                              trans_id;
+        ib_net16_t                              attr_id;
+        ib_net16_t                              resv1;
+        ib_net32_t                              attr_mod;
+
+        uint8_t                                 rmpp_version;
+        uint8_t                                 rmpp_type;
+        uint8_t                                 rmpp_flags;
+        uint8_t                                 rmpp_status;
+
+        ib_net32_t                              seg_num;
+        ib_net32_t                              paylen_newwin;
+
+        ib_net64_t                              sm_key;
+
+        ib_net16_t                              attr_offset;
+        ib_net16_t                              resv3;
+
+        ib_net64_t                              comp_mask;
+
+        uint8_t                                 data[IB_SA_DATA_SIZE];
+}       PACK_SUFFIX ib_sa_mad_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_slvl_table_get_vl

+ +

[top][index]

+

NAME

+
       ib_slvl_table_get_vl
+
+

DESCRIPTION

+
       Retrieves the VL for a given SL from an SL to VL mapping table.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_slvl_table_get_vl(
+        IN              const   ib_slvl_table_t* const          p_slvl_tbl,
+        IN              const   uint8_t                                         sl )
+{
+        uint8_t vl;
+
+        /* There are two VL's per byte. */
+        vl = p_slvl_tbl->vl_table[sl/2];
+        /* If odd, shift down 4 bits. */
+        if( sl % 2 )
+                vl >>= 4;
+
+        /* Mask off upper bits and return. */
+        return vl & 0x0F;
+}
+
+

PARAMETERS

+
       p_slvl_tbl
+               [in] Pointer to the SL to VL mapping table from which to return the VL.
+
+       sl
+               [in] SL in the table for which to return the VL.
+
+ RETURN VALUES
+       Returns the VL value for the specified SL in the provided table.
+
+

SEE ALSO

+
       ib_slvl_table_t, ib_slvl_table_set_vl
+
+
+
+ +

[Structures] +IBA Base: Types/ib_slvl_table_record_t

+ +

[top][index]

+

NAME

+
       ib_slvl_table_record_t
+
+

DESCRIPTION

+
       IBA defined Sl to VL Mapping Table Record for SA Query. (15.2.5.4)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_slvl_table_record
+{
+        ib_net16_t              lid; // for CA: lid of port, for switch lid of port 0
+        uint8_t                 in_port_num;    // reserved for CA's
+        uint8_t                 out_port_num;   // reserved for CA's
+        uint32_t                resv;
+        ib_slvl_table_t slvl_tbl;
+
+}       PACK_SUFFIX ib_slvl_table_record_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_slvl_table_set_vl

+ +

[top][index]

+

NAME

+
       ib_slvl_table_set_vl
+
+

DESCRIPTION

+
       Sets the VL for a given SL in an SL to VL mapping table.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_slvl_table_set_vl(
+        IN      OUT                     ib_slvl_table_t* const          p_slvl_tbl,
+        IN              const   uint8_t                                         sl,
+        IN              const   uint8_t                                         vl )
+{
+        uint8_t entry;
+
+        /* Get the current value for the byte in which the VL is stored. */
+        entry = p_slvl_tbl->vl_table[sl/2];
+
+        /* Clear the appropriate bits and set the new VL value. */
+        if( sl % 2 )
+        {
+                entry &= 0x0F;
+                entry |= ((vl & 0x0F) << 4);
+        }
+        else
+        {
+                entry &= 0xF0;
+                entry |= (vl & 0x0F);
+        }
+        /* Store the updated entry back into the table. */
+        p_slvl_tbl->vl_table[sl/2] = entry;
+}
+
+

PARAMETERS

+
       slvl_tbl
+               [in/out] Pointer to the SL to VL mapping table in which to store the VL.
+
+       sl
+               [in] SL in the table for which to store the VL.
+
+       vl
+               [in] VL to store at the specifed SL.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       ib_slvl_table_t, ib_slvl_table_get_vl
+
+
+
+ +

[Structures] +IBA Base: Types/ib_slvl_table_t

+ +

[top][index]

+

NAME

+
       ib_slvl_table_t
+
+

DESCRIPTION

+
       IBA defined SL2VL Mapping Table Attribute. (14.2.5.8)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_slvl_table
+{
+        uint8_t         vl_table[IB_MAX_NUM_VLS/2];
+
+}       PACK_SUFFIX ib_slvl_table_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Structures] +IBA Base: Types/ib_sm_info_t

+ +

[top][index]

+

NAME

+
       ib_sm_info_t
+
+

DESCRIPTION

+
       SMInfo structure (14.2.5.13).
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_sm_info
+{
+        ib_net64_t                      guid;
+        ib_net64_t                      sm_key;
+        ib_net32_t                      act_count;
+        uint8_t                         pri_state;
+
+}       PACK_SUFFIX ib_sm_info_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       guid
+               Port GUID for this SM.
+
+       sm_key
+               SM_Key of this SM.
+
+       act_count
+               Activity counter used as a heartbeat.
+
+       pri_state
+               Priority and State information
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_sminfo_get_priority

+ +

[top][index]

+

NAME

+
       ib_sminfo_get_priority
+
+

DESCRIPTION

+
       Returns the priority value.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_sminfo_get_priority(
+        IN              const   ib_sm_info_t* const                     p_smi )
+{
+        return( (uint8_t)((p_smi->pri_state & 0xF0)>>4) );
+}
+
+

PARAMETERS

+
       p_smi
+               [in] Pointer to the SMInfo Attribute.
+
+ RETURN VALUES
+       Returns the priority value.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_sminfo_get_state

+ +

[top][index]

+

NAME

+
       ib_sminfo_get_state
+
+

DESCRIPTION

+
       Returns the state value.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_sminfo_get_state(
+        IN              const   ib_sm_info_t* const                     p_smi )
+{
+        return( (uint8_t)(p_smi->pri_state & 0x0F) );
+}
+
+

PARAMETERS

+
       p_smi
+               [in] Pointer to the SMInfo Attribute.
+
+ RETURN VALUES
+       Returns the state value.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_smp_get_payload_ptr

+ +

[top][index]

+

NAME

+
       ib_smp_get_payload_ptr
+
+

DESCRIPTION

+
       Gets a pointer to the SMP payload area.
+
+

SYNOPSIS

+
AL_INLINE void* AL_API
+ib_smp_get_payload_ptr(
+        IN              const   ib_smp_t* const                         p_smp )
+{
+        return( (void*)p_smp->data );
+}
+
+

PARAMETERS

+
       p_smp
+               [in] Pointer to the SMP packet.
+
+ RETURN VALUES
+       Pointer to SMP payload area.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_mad_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_smp_get_status

+ +

[top][index]

+

NAME

+
       ib_smp_get_status
+
+

DESCRIPTION

+
       Returns the SMP status value in network order.
+
+

SYNOPSIS

+
AL_INLINE ib_net16_t AL_API
+ib_smp_get_status(
+        IN              const   ib_smp_t* const                         p_smp )
+{
+        return( (ib_net16_t)(p_smp->status & IB_SMP_STATUS_MASK) );
+}
+
+

PARAMETERS

+
       p_smp
+               [in] Pointer to the SMP packet.
+
+ RETURN VALUES
+       Returns the SMP status value in network order.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_smp_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_smp_init_new

+ +

[top][index]

+

NAME

+
       ib_smp_init_new
+
+

DESCRIPTION

+
       Initializes a MAD common header.
+
+

TODO

+
       This is too big for inlining, but leave it here for now
+       since there is not yet another convient spot.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_smp_init_new(
+        IN                              ib_smp_t* const                         p_smp,
+        IN              const   uint8_t                                         method,
+        IN              const   ib_net64_t                                      trans_id,
+        IN              const   ib_net16_t                                      attr_id,
+        IN              const   ib_net32_t                                      attr_mod,
+        IN              const   uint8_t                                         hop_count,
+        IN              const   ib_net64_t                                      m_key,
+        IN              const   uint8_t*                                        path_out,
+        IN              const   ib_net16_t                                      dr_slid,
+        IN              const   ib_net16_t                                      dr_dlid )
+{
+        CL_ASSERT( p_smp );
+        CL_ASSERT( hop_count < IB_SUBNET_PATH_HOPS_MAX );
+        p_smp->base_ver = 1;
+        p_smp->mgmt_class = IB_MCLASS_SUBN_DIR;
+        p_smp->class_ver = 1;
+        p_smp->method = method;
+        p_smp->status = 0;
+        p_smp->hop_ptr = 0;
+        p_smp->hop_count = hop_count;
+        p_smp->trans_id = trans_id;
+        p_smp->attr_id = attr_id;
+        p_smp->resv = 0;
+        p_smp->attr_mod = attr_mod;
+        p_smp->m_key = m_key;
+        p_smp->dr_slid = dr_slid;
+        p_smp->dr_dlid = dr_dlid;
+
+        cl_memclr( p_smp->resv1,
+                        sizeof(p_smp->resv1) +
+                        sizeof(p_smp->data) +
+                        sizeof(p_smp->initial_path) +
+                        sizeof(p_smp->return_path) );
+
+        /* copy the path */
+        cl_memcpy( &p_smp->initial_path, path_out,
+                        sizeof( p_smp->initial_path ) );
+}
+
+

PARAMETERS

+
       p_smp
+               [in] Pointer to the SMP packet.
+
+       method
+               [in] Method to perform, including 'R' bit.
+
+       trans_Id
+               [in] Transaction ID.
+
+       attr_id
+               [in] Attribute ID.
+
+       attr_mod
+               [in] Attribute modifier.
+
+       hop_count
+               [in] Number of hops in the path.
+
+       m_key
+               [in] Management key for this SMP.
+
+       path_out
+               [in] Port array for outbound path.
+
+
+ RETURN VALUES
+       None.
+
+

NOTES

+
       Payload area is initialized to zero.
+
+

SEE ALSO

+
       ib_mad_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_smp_is_d

+ +

[top][index]

+

NAME

+
       ib_smp_is_d
+
+

DESCRIPTION

+
       Returns TRUE if the SMP 'D' (direction) bit is set.
+
+

SYNOPSIS

+
AL_INLINE boolean_t AL_API
+ib_smp_is_d(
+        IN              const   ib_smp_t* const                         p_smp )
+{
+        return( (p_smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION );
+}
+
+

PARAMETERS

+
       p_smp
+               [in] Pointer to the SMP packet.
+
+ RETURN VALUES
+       Returns TRUE if the SMP 'D' (direction) bit is set.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_smp_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_smp_is_response

+ +

[top][index]

+

NAME

+
       ib_smp_is_response
+
+

DESCRIPTION

+
       Returns TRUE if the SMP is a response MAD, FALSE otherwise.
+
+

SYNOPSIS

+
AL_INLINE boolean_t AL_API
+ib_smp_is_response(
+        IN              const   ib_smp_t* const                         p_smp )
+{
+        return( ib_mad_is_response( (const ib_mad_t*)p_smp ) );
+}
+
+

PARAMETERS

+
       p_smp
+               [in] Pointer to the SMP packet.
+
+ RETURN VALUES
+       Returns TRUE if the SMP is a response MAD, FALSE otherwise.
+
+

NOTES

+
+
+

SEE ALSO

+
       ib_smp_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_smp_t

+ +

[top][index]

+

NAME

+
       ib_smp_t
+
+

DESCRIPTION

+
       IBA defined SMP. (14.2.1.2)
+
+

SYNOPSIS

+
#define IB_SMP_DATA_SIZE 64
+#include <complib/cl_packon.h>
+typedef struct _ib_smp
+{
+        uint8_t                                 base_ver;
+        uint8_t                                 mgmt_class;
+        uint8_t                                 class_ver;
+        uint8_t                                 method;
+        ib_net16_t                              status;
+        uint8_t                                 hop_ptr;
+        uint8_t                                 hop_count;
+        ib_net64_t                              trans_id;
+        ib_net16_t                              attr_id;
+        ib_net16_t                              resv;
+        ib_net32_t                              attr_mod;
+        ib_net64_t                              m_key;
+        ib_net16_t                              dr_slid;
+        ib_net16_t                              dr_dlid;
+        uint32_t                                resv1[7];
+        uint8_t                                 data[IB_SMP_DATA_SIZE];
+        uint8_t                                 initial_path[IB_SUBNET_PATH_HOPS_MAX];
+        uint8_t                                 return_path[IB_SUBNET_PATH_HOPS_MAX];
+
+}       PACK_SUFFIX ib_smp_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       base_ver
+               MAD base format.
+
+       mgmt_class
+               Class of operation.
+
+       class_ver
+               Version of MAD class-specific format.
+
+       method
+               Method to perform, including 'R' bit.
+
+       status
+               Status of operation.
+
+       hop_ptr
+               Hop pointer for directed route MADs.
+
+       hop_count
+               Hop count for directed route MADs.
+
+       trans_Id
+               Transaction ID.
+
+       attr_id
+               Attribute ID.
+
+       resv
+               Reserved field.
+
+       attr_mod
+               Attribute modifier.
+
+       m_key
+               Management key value.
+
+       dr_slid
+               Directed route source LID.
+
+       dr_dlid
+               Directed route destination LID.
+
+       resv0
+               Reserved for 64 byte alignment.
+
+       data
+               MAD data payload.
+
+       initial_path
+               Outbound port list.
+
+       return_path
+               Inbound port list.
+
+

SEE ALSO

+ +
+ +

[Structures] +IBA Base: Types/ib_svc_entries_t

+ +

[top][index]

+

NAME

+
       ib_svc_entries_t
+
+

DESCRIPTION

+
       IBA defined IO Controller service entry array (16.3.3.5)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_svc_entries
+{
+#define SVC_ENTRY_COUNT                         4
+        ib_svc_entry_t                  service_entry[SVC_ENTRY_COUNT];
+
+}       PACK_SUFFIX ib_svc_entries_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       service_entry
+               An array of IO controller service entries.
+
+

SEE ALSO

+
 ib_dm_mad_t, ib_svc_entry_t
+
+
+
+ +

[Structures] +IBA Base: Types/ib_svc_entry_t

+ +

[top][index]

+

NAME

+
       ib_svc_entry_t
+
+

DESCRIPTION

+
       IBA defined IO Controller service entry structure (16.3.3.5)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_svc_entry
+{
+#define MAX_SVC_ENTRY_NAME_LEN          40
+        char                                    name[MAX_SVC_ENTRY_NAME_LEN];
+
+        ib_net64_t                              id;
+
+}       PACK_SUFFIX ib_svc_entry_t;
+#include <complib/cl_packoff.h>
+
+

FIELDS

+
       name
+               UTF-8 encoded, null-terminated name of the service.
+
+       id
+               An identifier of the associated Service.
+
+

SEE ALSO

+
 ib_svc_entries_t
+
+
+
+ +

[Functions] +IBA Base: Types/ib_switch_info_clear_state_change

+ +

[top][index]

+

NAME

+
       ib_switch_info_clear_state_change
+
+

DESCRIPTION

+
       Clears the switch's state change bit.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_switch_info_clear_state_change(
+        IN                              ib_switch_info_t* const         p_si )
+{
+        p_si->life_state = (uint8_t)(p_si->life_state & 0xFB);
+}
+
+

PARAMETERS

+
       p_ni
+               [in] Pointer to a PortInfo attribute.
+
+ RETURN VALUES
+       Returns the LMC value assigned to this port.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Functions] +IBA Base: Types/ib_switch_info_get_state_change

+ +

[top][index]

+

NAME

+
       ib_switch_info_get_state_change
+
+

DESCRIPTION

+
       Returns the value of the state change flag.
+
+

SYNOPSIS

+
AL_INLINE boolean_t AL_API
+ib_switch_info_get_state_change(
+        IN              const   ib_switch_info_t* const         p_si )
+{
+        return( (p_si->life_state & IB_SWITCH_PSC) == IB_SWITCH_PSC );
+}
+
+

PARAMETERS

+
       p_si
+               [in] Pointer to a SwitchInfo attribute.
+
+ RETURN VALUES
+       Returns the value of the state change flag.
+
+

NOTES

+
+
+

SEE ALSO

+ +
+ +

[Structures] +IBA Base: Types/ib_switch_info_t

+ +

[top][index]

+

NAME

+
       ib_switch_info_t
+
+

DESCRIPTION

+
       IBA defined SwitchInfo. (14.2.5.4)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_switch_info
+{
+        ib_net16_t                      lin_cap;
+        ib_net16_t                      rand_cap;
+        ib_net16_t                      mcast_cap;
+        ib_net16_t                      lin_top;
+        uint8_t                         def_port;
+        uint8_t                         def_mcast_pri_port;
+        uint8_t                         def_mcast_not_port;
+        uint8_t                         life_state;
+        ib_net16_t                      lids_per_port;
+        ib_net16_t                      enforce_cap;
+        uint8_t                         flags;
+
+}       PACK_SUFFIX ib_switch_info_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ib_vl_arb_element_get_vl

+ +

[top][index]

+

NAME

+
       ib_vl_arb_element_get_vl
+
+

DESCRIPTION

+
       Retrieves the VL from a VL arbitration table element.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ib_vl_arb_element_get_vl(
+        IN              const   ib_vl_arb_element_t                     vl_arb_element )
+{
+        return (vl_arb_element.res_vl >> 4);
+}
+
+

PARAMETERS

+
       vl_arb_element
+               [in] VL arbitration table element from which to return the VL.
+
+ RETURN VALUES
+       Returns the VL value for the specified VL arbitration table element.
+
+

SEE ALSO

+
       vl_arb_element, ib_vl_arb_element_set_vl
+
+
+
+ +

[Functions] +IBA Base: Types/ib_vl_arb_element_set_vl

+ +

[top][index]

+

NAME

+
       ib_vl_arb_element_set_vl
+
+

DESCRIPTION

+
       Retrieves the VL from a VL arbitration table element.
+
+

SYNOPSIS

+
AL_INLINE void AL_API
+ib_vl_arb_element_set_vl(
+        IN      OUT                     ib_vl_arb_element_t* const      p_vl_arb_element,
+        IN              const   uint8_t                                         vl )
+{
+        p_vl_arb_element->res_vl = vl << 4;
+}
+
+

PARAMETERS

+
       vl_arb_element
+               [in/out] VL arbitration table element in which to store the VL.
+
+       vl
+               [in] VL to store in the specified element.
+
+ RETURN VALUES
+       This function does not return a value.
+
+

SEE ALSO

+
       vl_arb_element, ib_vl_arb_element_get_vl
+
+
+
+ +

[Structures] +IBA Base: Types/ib_vl_arb_element_t

+ +

[top][index]

+

NAME

+
       ib_vl_arb_element_t
+
+

DESCRIPTION

+
       IBA defined VL Arbitration Table Element. (14.2.5.9)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_vl_arb_element
+{
+        uint8_t res_vl;
+        uint8_t weight;
+
+}       PACK_SUFFIX ib_vl_arb_element_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Structures] +IBA Base: Types/ib_vl_arb_table_record_t

+ +

[top][index]

+

NAME

+
       ib_vl_arb_table_record_t
+
+

DESCRIPTION

+
       IBA defined VL Arbitration Table Record for SA Query. (15.2.5.9)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_vl_arb_table_record
+{
+        ib_net16_t                      lid; // for CA: lid of port, for switch lid of port 0
+        uint8_t                         port_num;
+        uint8_t                         block_num;
+        uint32_t                        reserved;
+        ib_vl_arb_table_t       vl_arb_tbl;
+
+}       PACK_SUFFIX ib_vl_arb_table_record_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Structures] +IBA Base: Types/ib_vl_arb_table_t

+ +

[top][index]

+

NAME

+
       ib_vl_arb_table_t
+
+

DESCRIPTION

+
       IBA defined VL Arbitration Table. (14.2.5.9)
+
+

SYNOPSIS

+
#include <complib/cl_packon.h>
+typedef struct _ib_vl_arb_table
+{
+        ib_vl_arb_element_t vl_entry[IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK];
+
+}       PACK_SUFFIX ib_vl_arb_table_t;
+#include <complib/cl_packoff.h>
+
+
+
+ +

[Functions] +IBA Base: Types/ioc_at_slot

+ +

[top][index]

+

NAME

+
       ioc_at_slot
+
+

DESCRIPTION

+
       Returns the IOC value at the specified slot.
+
+

SYNOPSIS

+
AL_INLINE uint8_t AL_API
+ioc_at_slot(
+        IN              const   ib_iou_info_t*  const   p_iou_info,
+        IN                              uint8_t                                 slot )
+{
+        if( !slot )
+                return SLOT_DOES_NOT_EXIST;
+        else if( slot-- & 0x01 )
+                return (p_iou_info->controller_list[slot >> 1] >> 4);
+        else
+                return (p_iou_info->controller_list[slot >> 1] & 0x0F);
+}
+
+

PARAMETERS

+
       p_iou_info
+               [in] Pointer to the IO Unit information structure.
+
+       slot
+               [in] 1-based slot number of the IOC slot to check.
+
+ RETURN VALUES
+       Returns the encoded value for the desired slot.  Possible values are
+       SLOT_DOES_NOT_EXIST, IOC_NOT_INSTALLED, and IOC_INSTALLED.
+
+

NOTES

+
       The input slot number is 1-based, not zero based.
+
+

SEE ALSO

+
       ib_iou_info_t
+
+
+
+ +

[Functions] +IBA Bases: Types/ib_dm_get_slot_lo_hi

+ +

[top][index]

+

DESCRIPTION

+
       Returns the IOC slot number, and the lower and upper bound of the
+       service entries given the attribute modifier of ServiceEntries response.
+
+

SEE ALSO

+
 ib_dm_set_slot_lo_hi
+
+
+
+ +

[Functions] +IBA Bases: Types/ib_dm_set_slot_lo_hi

+ +

[top][index]

+

DESCRIPTION

+
       Joins the IOC slot number, and the lower and upper bound of the service 
+       entries and returns it.
+
+

SEE ALSO

+
 ib_dm_get_slot_lo_hi
+
+
+
+ +

[Definitions] +Verbs/ib_async_event_t

+ +

[top][index]

+

NAME

+
       ib_async_event_t -- Async event types
+
+

DESCRIPTION

+
       This type indicates the reason the async callback was called.
+       The context in the ib_event_rec_t indicates the resource context
+       that associated with the callback.  For example, for IB_AE_CQ_ERROR
+       the context provided during the ib_create_cq is returned in the event.
+
+

SYNOPSIS

+
typedef enum _ib_async_event_t
+{
+        IB_AE_SQ_ERROR = 1,
+        IB_AE_SQ_DRAINED,
+        IB_AE_RQ_ERROR,
+        IB_AE_CQ_ERROR,
+        IB_AE_QP_FATAL,
+        IB_AE_QP_COMM,
+        IB_AE_QP_APM,
+        IB_AE_LOCAL_FATAL,
+        IB_AE_PKEY_TRAP,
+        IB_AE_QKEY_TRAP,
+        IB_AE_MKEY_TRAP,
+        IB_AE_PORT_TRAP,
+        IB_AE_SYSIMG_GUID_TRAP,
+        IB_AE_BUF_OVERRUN,
+        IB_AE_LINK_INTEGRITY,
+        IB_AE_FLOW_CTRL_ERROR,
+        IB_AE_BKEY_TRAP,
+        IB_AE_QP_APM_ERROR,
+        IB_AE_WQ_REQ_ERROR,
+        IB_AE_WQ_ACCESS_ERROR,
+        IB_AE_PORT_ACTIVE,
+        IB_AE_PORT_DOWN,
+        IB_AE_CLIENT_REREGISTER,
+        IB_AE_SRQ_LIMIT_REACHED,
+        IB_AE_SRQ_CATAS_ERROR,
+        IB_AE_SRQ_QP_LAST_WQE_REACHED,
+        IB_AE_UNKNOWN           /* ALWAYS LAST ENUM VALUE */
+
+}       ib_async_event_t;
+
+

VALUES

+
       IB_AE_SQ_ERROR
+               An error occurred when accessing the send queue of the QP.
+               This event is optional.
+
+       IB_AE_SQ_DRAINED
+               The send queue of the specified QP has completed the outstanding
+               messages in progress when the state change was requested and, if
+               applicable, has received all acknowledgements for those messages.
+
+       IB_AE_RQ_ERROR
+               An error occurred when accessing the receive queue of the QP.
+               This event is optional.
+
+       IB_AE_CQ_ERROR
+               An error occurred when writing an entry to the CQ.
+
+       IB_AE_QP_FATAL
+               A catastrophic error occurred while accessing or processing the
+               work queue that prevents reporting of completions.
+
+       IB_AE_QP_COMM
+               The first packet has arrived for the receive work queue where the
+               QP is still in the RTR state.
+
+       IB_AE_QP_APM
+               If alternate path migration is supported, this event indicates that
+               the QP connection has migrated to the alternate path.
+
+       IB_AE_LOCAL_FATAL
+               A catastrophic HCA error occurred which cannot be attributed to any
+               resource; behavior is indeterminate.
+
+       IB_AE_PKEY_TRAP
+               A PKEY violation was detected.  This event is optional.
+
+       IB_AE_QKEY_TRAP
+               A QKEY violation was detected.  This event is optional.
+
+       IB_AE_MKEY_TRAP
+               An MKEY violation was detected.  This event is optional.
+
+       IB_AE_PORT_TRAP
+               A port capability change was detected.  This event is optional.
+
+       IB_AE_SYSIMG_GUID_TRAP
+               If the system image GUID is supported, this event indicates that the
+               system image GUID of this HCA has been changed.  This event is
+               optional.
+
+       IB_AE_BUF_OVERRUN
+               The number of consecutive flow control update periods with at least
+               one overrun error in each period has exceeded the threshold specified
+               in the port info attributes.  This event is optional.
+
+       IB_AE_LINK_INTEGRITY
+               The detection of excessively frequent local physical errors has
+               exceeded the threshold specified in the port info attributes.  This
+               event is optional.
+
+       IB_AE_FLOW_CTRL_ERROR
+               An HCA watchdog timer monitoring the arrival of flow control updates
+               has expired without receiving an update.  This event is optional.
+
+       IB_AE_BKEY_TRAP
+               An BKEY violation was detected.  This event is optional.
+
+       IB_AE_QP_APM_ERROR
+               If alternate path migration is supported, this event indicates that
+               an incoming path migration request to this QP was not accepted.
+
+       IB_AE_WQ_REQ_ERROR
+               An OpCode violation was detected at the responder.
+
+       IB_AE_WQ_ACCESS_ERROR
+               An access violation was detected at the responder.
+
+       IB_AE_PORT_ACTIVE
+               If the port active event is supported, this event is generated
+               when the link becomes active: IB_LINK_ACTIVE.
+
+       IB_AE_PORT_DOWN
+               The link is declared unavailable: IB_LINK_INIT, IB_LINK_ARMED,
+               IB_LINK_DOWN.
+
+       IB_AE_CLIENT_REREGISTER
+               The SM idicate to client to reregister its SA records.
+
+       IB_AE_SRQ_LIMIT_REACHED
+               Reached SRQ low watermark
+
+       IB_AE_SRQ_CATAS_ERROR
+               An error occurred while processing or accessing the SRQ that prevents
+               dequeuing a WQE from the SRQ and reporting of receive completions.
+
+       IB_AE_SRQ_QP_LAST_WQE_REACHED
+               An event,  issued for a QP, associated with a shared receive queue, when
+                       a CQE is generated for the last WQE, or
+                       the QP gets in the Error State and there are no more WQEs on the RQ.
+
+       IB_AE_UNKNOWN
+               An unknown error occurred which cannot be attributed to any
+               resource; behavior is indeterminate.
+
+
+
+ +

[Structures] +Verbs/ib_event_rec_t

+ +

[top][index]

+

NAME

+
       ib_event_rec_t -- Async event notification record
+
+

DESCRIPTION

+
       When an async event callback is made, this structure is passed to indicate
+       the type of event, the source of event that caused it, and the context
+       associated with this event.
+
+       context -- Context of the resource that caused the event.
+               -- ca_context if this is a port/adapter event.
+               -- qp_context if the source is a QP event
+               -- cq_context if the source is a CQ event.
+               -- ee_context if the source is an EE event.
+
+

SYNOPSIS

+
typedef struct _ib_event_rec
+{
+        void* __ptr64                   context;
+        ib_async_event_t                type;
+
+        /* HCA vendor specific event information. */
+        uint64_t                                vendor_specific;
+
+        /* The following structures are valid only for trap types. */
+        union _trap
+        {
+                struct
+                {
+                        uint16_t                        lid;
+                        ib_net64_t                      port_guid;
+                        uint8_t                         port_num;
+
+                        /*
+                         * The following structure is valid only for
+                         * P_KEY, Q_KEY, and M_KEY violation traps.
+                         */
+                        struct
+                        {
+                                uint8_t                 sl;
+                                uint16_t                src_lid;
+                                uint16_t                dest_lid;
+                                union _key
+                                {
+                                        uint16_t        pkey;
+                                        uint32_t        qkey;
+                                        uint64_t        mkey;
+                                } key;
+                                uint32_t                src_qp;
+                                uint32_t                dest_qp;
+                                ib_gid_t                src_gid;
+                                ib_gid_t                dest_gid;
+
+                        }       violation;
+
+                } info;
+
+                ib_net64_t      sysimg_guid;
+
+        }       trap;
+
+}       ib_event_rec_t;
+
+
+ + diff --git a/branches/Ndi/docs/masterindex.html b/branches/Ndi/docs/masterindex.html new file mode 100644 index 00000000..b0e6139b --- /dev/null +++ b/branches/Ndi/docs/masterindex.html @@ -0,0 +1,2741 @@ + + + + +Index + + + + +Generated from ./inc/ with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+

+[Sourcefiles] +[Index] +[Definitions] +[Functions] +[Modules] +[Structures] +

+

Index

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+64-bit Print Format +Access Layer +Asynchronous Processor
+Atomic Operations +ATS +Byte Swapping
+cl_async_proc.h +cl_async_proc_construct +cl_async_proc_destroy
+cl_async_proc_init +cl_async_proc_item_t +cl_async_proc_queue
+cl_async_proc_t +cl_atomic.h +cl_atomic_add
+cl_atomic_comp_xchg +cl_atomic_dec +cl_atomic_inc
+cl_atomic_sub +cl_atomic_xchg +cl_break
+cl_byteswap.h +cl_check_for_read +cl_check_for_write
+cl_comppool.h +cl_copy_from_user +cl_copy_to_user
+cl_cpool_construct +cl_cpool_count +cl_cpool_destroy
+cl_cpool_get +cl_cpool_grow +cl_cpool_init
+cl_cpool_put +cl_cpool_t +cl_dbg_out
+cl_debug.h +cl_destroy_type_t +CL_ENTER
+cl_event.h +cl_event_construct +cl_event_destroy
+cl_event_init +cl_event_reset +cl_event_signal
+cl_event_wait_on +CL_EXIT +cl_fleximap.h
+cl_fmap_apply_func +cl_fmap_count +cl_fmap_delta
+cl_fmap_end +cl_fmap_get +cl_fmap_head
+cl_fmap_init +cl_fmap_insert +cl_fmap_item_t
+cl_fmap_key +cl_fmap_merge +cl_fmap_next
+cl_fmap_prev +cl_fmap_remove +cl_fmap_remove_all
+cl_fmap_remove_item +cl_fmap_t +cl_fmap_tail
+cl_free +cl_get_pagesize +cl_get_physaddr
+cl_get_tick_count +cl_get_tick_freq +cl_get_time_stamp
+cl_get_time_stamp_sec +cl_get_time_stamp_usec +cl_hton16
+CL_HTON16 +CL_HTON32 +cl_hton32
+cl_hton64 +CL_HTON64 +cl_ioctl.h
+cl_ioctl_cmd +cl_ioctl_complete +cl_ioctl_ctl_code
+cl_ioctl_handle_t +cl_ioctl_in_buf +cl_ioctl_in_size
+cl_ioctl_out_buf +cl_ioctl_out_size +cl_ioctl_process
+cl_ioctl_request +cl_ioctl_result +cl_ioctl_type
+cl_irqlock.h +cl_irqlock_acquire +cl_irqlock_construct
+cl_irqlock_destroy +cl_irqlock_init +cl_irqlock_release
+cl_is_blockable +cl_is_cpool_inited +cl_is_fmap_empty
+cl_is_item_in_qlist +cl_is_list_empty +cl_is_list_inited
+cl_is_map_empty +cl_is_map_inited +cl_is_object_in_list
+cl_is_pool_inited +cl_is_qcpool_inited +cl_is_qlist_empty
+cl_is_qmap_empty +cl_is_qpool_inited +cl_is_rbmap_empty
+cl_is_state_valid +cl_is_sys_callback_inited +cl_list.h
+cl_list_apply_func +cl_list_construct +cl_list_count
+cl_list_destroy +cl_list_end +cl_list_find_from_head
+cl_list_find_from_tail +cl_list_head +cl_list_init
+cl_list_insert_array_head +cl_list_insert_array_tail +cl_list_insert_head
+cl_list_insert_next +cl_list_insert_prev +cl_list_insert_tail
+cl_list_item_t +cl_list_iterator_t +cl_list_next
+cl_list_obj +cl_list_obj_t +cl_list_prev
+cl_list_remove_all +cl_list_remove_head +cl_list_remove_item
+cl_list_remove_object +cl_list_remove_tail +cl_list_t
+cl_list_tail +cl_log.h +cl_log_event
+cl_log_type_t +cl_malloc +cl_map.h
+cl_map_construct +cl_map_count +cl_map_delta
+cl_map_destroy +cl_map_end +cl_map_get
+cl_map_head +cl_map_init +cl_map_insert
+cl_map_item_t +cl_map_iterator_t +cl_map_key
+cl_map_merge +cl_map_next +cl_map_obj
+cl_map_obj_t +cl_map_prev +cl_map_remove
+cl_map_remove_all +cl_map_remove_item +cl_map_t
+cl_map_tail +cl_math.h +cl_mem_display
+cl_memclr +cl_memcmp +cl_memcpy
+cl_memory.h +cl_memset +cl_msg_out
+cl_mutex.h +cl_mutex_acquire +cl_mutex_construct
+cl_mutex_destroy +cl_mutex_init +cl_mutex_release
+cl_ntoh +cl_ntoh16 +CL_NTOH16
+CL_NTOH32 +cl_ntoh32 +cl_ntoh64
+CL_NTOH64 +cl_obj.h +cl_obj_construct
+cl_obj_deinit +cl_obj_deref +cl_obj_destroy
+cl_obj_init +cl_obj_insert_rel +cl_obj_insert_rel_parent_locked
+cl_obj_lock +cl_obj_mgr_create +cl_obj_mgr_destroy
+cl_obj_mgr_t +cl_obj_ref +cl_obj_rel_t
+cl_obj_remove_rel +cl_obj_reset +cl_obj_t
+cl_obj_type +cl_obj_unlock +cl_palloc
+cl_panic +cl_passivelock.h +cl_perf.h
+cl_perf_clr +cl_perf_construct +cl_perf_destroy
+cl_perf_display +cl_perf_inc +cl_perf_init
+cl_perf_log +cl_perf_reset +cl_perf_start
+cl_perf_stop +cl_perf_update +cl_perf_update_ctr
+cl_pfn_async_proc_cb_t +cl_pfn_cpool_dtor_t +cl_pfn_cpool_init_t
+cl_pfn_fmap_apply_t +cl_pfn_fmap_cmp_t +cl_pfn_ioctl_handler_t
+cl_pfn_list_apply_t +cl_pfn_list_find_t +cl_pfn_obj_call_t
+cl_pfn_pool_dtor_t +cl_pfn_pool_init_t +cl_pfn_ptr_vec_apply_t
+cl_pfn_ptr_vec_find_t +cl_pfn_qcpool_dtor_t +cl_pfn_qcpool_init_t
+cl_pfn_qlist_apply_t +cl_pfn_qlist_find_t +cl_pfn_qmap_apply_t
+cl_pfn_qpool_dtor_t +cl_pfn_qpool_init_t +cl_pfn_req_cb_t
+cl_pfn_reqmgr_get_count_t +cl_pfn_sys_callback_t +cl_pfn_thread_callback_t
+cl_pfn_timer_callback_t +cl_pfn_vec_apply_t +cl_pfn_vec_dtor_t
+cl_pfn_vec_find_t +cl_pfn_vec_init_t +cl_plock_acquire
+cl_plock_construct +cl_plock_destroy +cl_plock_excl_acquire
+cl_plock_init +cl_plock_release +cl_plock_t
+cl_pool.h +cl_pool_construct +cl_pool_count
+cl_pool_destroy +cl_pool_get +cl_pool_grow
+cl_pool_init +cl_pool_item_t +cl_pool_put
+cl_pool_t +CL_PRINT +cl_proc_count
+cl_ptr_vector.h +cl_ptr_vector_apply_func +cl_ptr_vector_at
+cl_ptr_vector_construct +cl_ptr_vector_destroy +cl_ptr_vector_find_from_end
+cl_ptr_vector_find_from_start +cl_ptr_vector_get +cl_ptr_vector_get_capacity
+cl_ptr_vector_get_size +cl_ptr_vector_init +cl_ptr_vector_insert
+cl_ptr_vector_remove +cl_ptr_vector_set +cl_ptr_vector_set_capacity
+cl_ptr_vector_set_min_size +cl_ptr_vector_set_size +cl_ptr_vector_t
+cl_pzalloc +cl_qcomppool.h +cl_qcpool_construct
+cl_qcpool_count +cl_qcpool_destroy +cl_qcpool_get
+cl_qcpool_grow +cl_qcpool_init +cl_qcpool_put
+cl_qcpool_put_list +cl_qcpool_t +cl_qlist.h
+cl_qlist_apply_func +cl_qlist_count +cl_qlist_end
+cl_qlist_find_from_head +cl_qlist_find_from_tail +cl_qlist_find_next
+cl_qlist_find_prev +cl_qlist_head +cl_qlist_init
+cl_qlist_insert_array_head +cl_qlist_insert_array_tail +cl_qlist_insert_head
+cl_qlist_insert_list_head +cl_qlist_insert_list_tail +cl_qlist_insert_next
+cl_qlist_insert_prev +cl_qlist_insert_tail +cl_qlist_move_items
+cl_qlist_next +cl_qlist_obj +cl_qlist_prev
+cl_qlist_remove_all +cl_qlist_remove_head +cl_qlist_remove_item
+cl_qlist_remove_tail +cl_qlist_set_obj +cl_qlist_t
+cl_qlist_tail +cl_qlock_pool_construct +cl_qlock_pool_destroy
+cl_qlock_pool_get +cl_qlock_pool_init +cl_qlock_pool_put
+cl_qlock_pool_t +cl_qlockpool.h +cl_qmap.h
+cl_qmap_apply_func +cl_qmap_count +cl_qmap_delta
+cl_qmap_end +cl_qmap_get +cl_qmap_head
+cl_qmap_init +cl_qmap_insert +cl_qmap_key
+cl_qmap_merge +cl_qmap_next +cl_qmap_obj
+cl_qmap_prev +cl_qmap_remove +cl_qmap_remove_all
+cl_qmap_remove_item +cl_qmap_set_obj +cl_qmap_t
+cl_qmap_tail +cl_qpool.h +cl_qpool_construct
+cl_qpool_count +cl_qpool_destroy +cl_qpool_get
+cl_qpool_grow +cl_qpool_init +cl_qpool_put
+cl_qpool_put_list +cl_qpool_t +cl_rbmap.h
+cl_rbmap_count +cl_rbmap_end +cl_rbmap_init
+cl_rbmap_insert +cl_rbmap_item_t +cl_rbmap_left
+cl_rbmap_remove_item +cl_rbmap_reset +cl_rbmap_right
+cl_rbmap_root +cl_rbmap_t +cl_rel_alloc
+cl_rel_free +cl_req_mgr_construct +cl_req_mgr_destroy
+cl_req_mgr_get +cl_req_mgr_init +cl_req_mgr_resume
+cl_req_mgr_t +cl_req_type_t +cl_reqmgr.h
+cl_spinlock.h +cl_spinlock_acquire +cl_spinlock_construct
+cl_spinlock_destroy +cl_spinlock_init +cl_spinlock_release
+CL_STATUS_MSG +cl_status_t +cl_sys_callback_get
+cl_sys_callback_put +cl_sys_callback_queue +cl_syscallback.h
+cl_thread.h +cl_thread_pool_construct +cl_thread_pool_destroy
+cl_thread_pool_init +cl_thread_pool_signal +cl_thread_pool_t
+cl_thread_stall +cl_thread_suspend +cl_threadpool.h
+cl_timer.h +cl_timer_construct +cl_timer_destroy
+cl_timer_init +cl_timer_start +cl_timer_stop
+cl_timer_trim +CL_TRACE +CL_TRACE_EXIT
+cl_types.h +cl_vector.h +cl_vector_apply_func
+cl_vector_at +cl_vector_construct +cl_vector_destroy
+cl_vector_find_from_end +cl_vector_find_from_start +cl_vector_get
+cl_vector_get_capacity +cl_vector_get_ptr +cl_vector_get_size
+cl_vector_init +cl_vector_set +cl_vector_set_capacity
+cl_vector_set_min_size +cl_vector_set_size +cl_vector_t
+cl_waitobj.h +cl_waitobj_create +cl_waitobj_deref
+cl_waitobj_destroy +cl_waitobj_handle_t +cl_waitobj_ref
+cl_waitobj_reset +cl_waitobj_signal +cl_waitobj_wait_on
+cl_zalloc +comp_lib.h +Component Library
+Composite Pool +Constants +Data Types
+Debug Levels +Debug Output +DM_SVC_NAME
+Event +Flexi Map +ib_access_t
+ib_add_svc_entry +ib_al.h +ib_al_flags_t
+ib_alloc_pd +ib_api_status_t +ib_apm_state_t
+ib_apr_info_t +ib_apr_pdata_t +ib_apr_status_t
+ib_ari_t +ib_async_event_rec_t +ib_async_event_t
+ib_atomic_t +ib_av_attr_t +ib_bind_mw
+ib_bind_wr_t +ib_ca_attr_t +ib_ca_mod_t
+ib_cancel_mad +ib_cancel_query +ib_cep_listen_t
+ib_ci_call +ib_ci_op_t +IB_CLASS_CAP_GETSET
+IB_CLASS_CAP_TRAP +ib_class_is_rmpp +ib_class_is_vendor_specific
+ib_class_is_vendor_specific_high +ib_class_is_vendor_specific_low +ib_class_port_info_t
+IB_CLASS_RESP_TIME_MASK +ib_close_al +ib_close_ca
+ib_cm_apr +ib_cm_apr_rec_t +ib_cm_apr_t
+ib_cm_cancel +ib_cm_cap_mask_t +ib_cm_drep
+ib_cm_drep_rec_t +ib_cm_drep_t +ib_cm_dreq
+ib_cm_dreq_rec_t +ib_cm_dreq_t +ib_cm_failover_t
+ib_cm_handoff +ib_cm_lap +ib_cm_lap_rec_t
+ib_cm_lap_t +ib_cm_listen +ib_cm_listen_t
+ib_cm_mra +ib_cm_mra_rec_t +ib_cm_mra_t
+ib_cm_rej +ib_cm_rej_rec_t +ib_cm_rej_t
+ib_cm_rep +ib_cm_rep_rec_t +ib_cm_rep_t
+ib_cm_req +ib_cm_req_rec_t +ib_cm_req_t
+ib_cm_rtu +ib_cm_rtu_rec_t +ib_cm_rtu_t
+ib_copy_ca_attr +ib_cq_create_t +ib_create_av
+ib_create_cq +ib_create_ioc +ib_create_mad_pool
+ib_create_mw +ib_create_qp +ib_create_srq
+ib_dealloc_pd +IB_DEFAULT_PARTIAL_PKEY +IB_DEFAULT_PKEY
+IB_DEFAULT_SUBNET_PREFIX +ib_dereg_mad_pool +ib_dereg_mr
+ib_dereg_pnp +ib_dereg_svc +ib_destroy_av
+ib_destroy_cq +ib_destroy_ioc +ib_destroy_mad_pool
+ib_destroy_mw +ib_destroy_qp +ib_destroy_srq
+ib_device_attr_mask_t +ib_dgrm_info_t +ib_dm_get_slot_lo_hi
+ib_dm_mad_t +ib_dm_set_slot_lo_hi +ib_drep_pdata_t
+ib_dreq_pdata_t +ib_event_rec_t +ib_field32_t
+ib_force_apm +ib_get_async_event_str +ib_get_ca_by_gid
+ib_get_ca_guids +ib_get_err_str +ib_get_guid
+ib_get_mad +ib_get_mad_buf +ib_get_node_type_str
+ib_get_port_by_gid +ib_get_port_state_from_str +ib_get_port_state_str
+ib_get_qp_type_str +ib_get_query_node_rec +ib_get_query_path_rec
+ib_get_query_portinfo_rec +ib_get_query_result +ib_get_query_svc_rec
+ib_get_spl_qp +ib_get_wc_status_str +ib_get_wc_type_str
+ib_get_wr_type_str +ib_gid_get_guid +ib_gid_get_subnet_prefix
+ib_gid_is_link_local +ib_gid_is_site_local +ib_gid_pair_t
+ib_gid_prefix_t +ib_gid_set_default +ib_gid_t
+ib_gmp_t +ib_grh_get_ver_class_flow +ib_grh_set_ver_class_flow
+ib_grh_t +ib_guid_info_t +ib_guid_pair_t
+ib_inform_get_dev_id +ib_inform_get_prod_type +ib_inform_get_qpn
+ib_inform_get_resp_time_val +ib_inform_get_trap_num +ib_inform_get_vend_id
+ib_inform_info_record_t +ib_inform_set_dev_id +ib_inform_set_prod_type
+ib_inform_set_qpn +ib_inform_set_resp_time_val +ib_inform_set_trap_num
+ib_inform_set_vend_id +ib_init_dgrm_svc +ib_init_type_t
+IB_INVALID_PORT_NUM +ib_ioc_profile_t +ib_iou_info_diag_dev_id
+ib_iou_info_option_rom +ib_iou_info_t +ib_join_mcast
+ib_lap_pdata_t +ib_leave_mcast +ib_lft_record_t
+IB_LID_MCAST_END +IB_LID_MCAST_START +ib_lid_pair_t
+IB_LID_PERMISSIVE +IB_LID_UCAST_END +IB_LID_UCAST_START
+ib_link_states_t +ib_listen_err_rec_t +ib_listen_info_t
+ib_local_ds_t +ib_local_mad +IB_MAD_ATTR_CLASS_PORT_INFO
+IB_MAD_ATTR_DIAG_CODE +IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT +IB_MAD_ATTR_GUID_INFO
+IB_MAD_ATTR_GUIDINFO_RECORD +IB_MAD_ATTR_INFORM_INFO +IB_MAD_ATTR_INFORM_INFO_RECORD
+IB_MAD_ATTR_IO_CONTROLLER_PROFILE +IB_MAD_ATTR_IO_UNIT_INFO +IB_MAD_ATTR_LED_INFO
+IB_MAD_ATTR_LFT_RECORD +IB_MAD_ATTR_LIN_FWD_TBL +IB_MAD_ATTR_LINK_RECORD
+IB_MAD_ATTR_MCAST_FWD_TBL +IB_MAD_ATTR_MCMEMBER_RECORD +IB_MAD_ATTR_MULTIPATH_RECORD
+IB_MAD_ATTR_NODE_DESC +IB_MAD_ATTR_NODE_INFO +IB_MAD_ATTR_NODE_RECORD
+IB_MAD_ATTR_NOTICE +IB_MAD_ATTR_P_KEY_TABLE +IB_MAD_ATTR_PATH_RECORD
+IB_MAD_ATTR_PKEYTBL_RECORD +IB_MAD_ATTR_PORT_CNTRS +IB_MAD_ATTR_PORT_INFO
+IB_MAD_ATTR_PORT_SMPL_CTRL +IB_MAD_ATTR_PORT_SMPL_RSLT +IB_MAD_ATTR_PORTINFO_RECORD
+IB_MAD_ATTR_PREPARE_TO_TEST +IB_MAD_ATTR_RND_FWD_TBL +IB_MAD_ATTR_SERVICE_ENTRIES
+IB_MAD_ATTR_SERVICE_RECORD +IB_MAD_ATTR_SLVL_RECORD +IB_MAD_ATTR_SLVL_TABLE
+IB_MAD_ATTR_SM_INFO +IB_MAD_ATTR_SMINFO_RECORD +IB_MAD_ATTR_SVC_ASSOCIATION_RECORD
+IB_MAD_ATTR_SWITCH_INFO +IB_MAD_ATTR_TEST_DEVICE_LOOP +IB_MAD_ATTR_TEST_DEVICE_ONCE
+IB_MAD_ATTR_TRACE_RECORD +IB_MAD_ATTR_VENDOR_DIAG +IB_MAD_ATTR_VL_ARBITRATION
+IB_MAD_ATTR_VLARB_RECORD +ib_mad_element_t +ib_mad_init_new
+ib_mad_init_response +ib_mad_is_response +IB_MAD_METHOD_GET
+IB_MAD_METHOD_GET_RESP +IB_MAD_METHOD_GETTABLE +IB_MAD_METHOD_GETTABLE_RESP
+IB_MAD_METHOD_REPORT +IB_MAD_METHOD_REPORT_RESP +IB_MAD_METHOD_RESP_MASK
+IB_MAD_METHOD_SEND +IB_MAD_METHOD_SET +IB_MAD_METHOD_TRAP
+IB_MAD_METHOD_TRAP_REPRESS +IB_MAD_STATUS_BUSY +IB_MAD_STATUS_INVALID_FIELD
+IB_MAD_STATUS_REDIRECT +IB_MAD_STATUS_UNSUP_CLASS_VER +IB_MAD_STATUS_UNSUP_METHOD
+IB_MAD_STATUS_UNSUP_METHOD_ATTR +ib_mad_svc_t +ib_mad_svc_type_t
+ib_mad_t +IB_MAX_METHOD +IB_MCAST_BLOCK_ID_MASK_HO
+IB_MCAST_BLOCK_SIZE +IB_MCAST_MASK_SIZE +IB_MCAST_MAX_BLOCK_ID
+IB_MCAST_POSITION_MASK_HO +IB_MCAST_POSITION_MAX +IB_MCAST_POSITION_SHIFT
+ib_mcast_rec_t +ib_mcast_req_t +IB_MCLASS_BIS
+IB_MCLASS_BM +IB_MCLASS_COMM_MGMT +IB_MCLASS_DEV_ADM
+IB_MCLASS_DEV_MGMT +IB_MCLASS_PERF +IB_MCLASS_SNMP
+IB_MCLASS_SUBN_ADM +IB_MCLASS_SUBN_DIR +IB_MCLASS_SUBN_LID
+IB_MCLASS_VENDOR_HIGH_RANGE_MAX +IB_MCLASS_VENDOR_HIGH_RANGE_MIN +IB_MCLASS_VENDOR_LOW_RANGE_MAX
+IB_MCLASS_VENDOR_LOW_RANGE_MIN +ib_member_get_scope +ib_member_get_scope_state
+ib_member_get_sl_flow_hop +ib_member_get_state +ib_member_rec_t
+ib_member_set_join_state +ib_member_set_scope +ib_member_set_scope_state
+ib_member_set_sl_flow_hop +ib_member_set_state +ib_modify_av
+ib_modify_ca +ib_modify_cq +ib_modify_qp
+ib_modify_srq +ib_mr_attr_t +ib_mr_create_t
+ib_mr_mod_t +ib_mra_pdata_t +IB_MTU_LEN_TYPE
+IB_MULTIPATH_REC_BASE_MASK +ib_net16_t +ib_net32_t
+ib_net64_t +ib_node_info_get_local_port_num +ib_node_info_get_vendor_id
+ib_node_info_t +IB_NODE_NUM_PORTS_MAX +IB_NODE_TYPE_CA
+IB_NODE_TYPE_ROUTER +IB_NODE_TYPE_SWITCH +ib_notice_get_count
+ib_notice_get_dev_id +ib_notice_get_generic +ib_notice_get_prod_type
+ib_notice_get_toggle +ib_notice_get_trap_num +ib_notice_get_type
+ib_notice_get_vend_id +IB_NOTICE_NODE_TYPE_CA +IB_NOTICE_NODE_TYPE_ROUTER
+IB_NOTICE_NODE_TYPE_SUBN_MGMT +IB_NOTICE_NODE_TYPE_SWITCH +ib_notice_set_count
+ib_notice_set_dev_id +ib_notice_set_generic +ib_notice_set_prod_type
+ib_notice_set_toggle +ib_notice_set_trap_num +ib_notice_set_type
+ib_notice_set_vend_id +ib_open_al +ib_open_ca
+ib_path_get_ipd +IB_PATH_REC_BASE_MASK +ib_path_rec_flow_lbl
+ib_path_rec_hop_limit +ib_path_rec_init_local +ib_path_rec_mtu
+ib_path_rec_mtu_sel +ib_path_rec_num_path +ib_path_rec_pkt_life
+ib_path_rec_pkt_life_sel +ib_path_rec_rate +ib_path_rec_rate_sel
+IB_PATH_REC_SELECTOR_MASK +ib_path_rec_set_hop_flow_raw +ib_path_rec_sl
+ib_path_rec_t +IB_PATH_SELECTOR_TYPE +ib_pd_type_t
+ib_peek_cq +ib_pfn_cm_apr_cb_t +ib_pfn_cm_drep_cb_t
+ib_pfn_cm_dreq_cb_t +ib_pfn_cm_lap_cb_t +ib_pfn_cm_mra_cb_t
+ib_pfn_cm_rej_cb_t +ib_pfn_cm_rep_cb_t +ib_pfn_cm_req_cb_t
+ib_pfn_cm_rtu_cb_t +ib_pfn_comp_cb_t +ib_pfn_destroy_cb_t
+ib_pfn_event_cb_t +ib_pfn_listen_err_cb_t +ib_pfn_mad_comp_cb_t
+ib_pfn_mcast_cb_t +ib_pfn_pnp_cb_t +ib_pfn_query_cb_t
+ib_pfn_reg_svc_cb_t +ib_pfn_report_cb_t +ib_pfn_sub_cb_t
+ib_phys_create_t +ib_phys_range_t +IB_PKEY_BASE_MASK
+IB_PKEY_ENTRIES_MAX +ib_pkey_get_base +ib_pkey_is_full_member
+ib_pkey_is_invalid +IB_PKEY_MAX_BLOCKS +ib_pkey_table_info_t
+IB_PKEY_TYPE_MASK +ib_pnp_ca_rec_t +ib_pnp_class_t
+ib_pnp_event_t +ib_pnp_ioc_path_rec_t +ib_pnp_ioc_rec_t
+ib_pnp_iou_rec_t +ib_pnp_port_rec_t +ib_pnp_rec_t
+ib_pnp_req_t +ib_poll_cq +ib_port_attr_mod_t
+ib_port_attr_t +ib_port_cap_t +ib_port_counters_t
+ib_port_info_compute_rate +ib_port_info_get_init_type +ib_port_info_get_link_speed_active
+ib_port_info_get_link_speed_sup +ib_port_info_get_lmc +ib_port_info_get_mpb
+ib_port_info_get_mtu_cap +ib_port_info_get_neighbor_mtu +ib_port_info_get_op_vls
+ib_port_info_get_port_state +ib_port_info_get_sm_sl +ib_port_info_get_vl_cap
+ib_port_info_set_link_speed_sup +ib_port_info_set_lmc +ib_port_info_set_mpb
+ib_port_info_set_neighbor_mtu +ib_port_info_set_op_vls +ib_port_info_set_port_state
+ib_port_info_set_sm_sl +ib_port_info_set_state_no_change +ib_port_info_set_timeout
+ib_port_info_t +ib_post_recv +ib_post_send
+ib_post_srq_recv +ib_put_mad +IB_QP1_WELL_KNOWN_Q_KEY
+ib_qp_attr_t +ib_qp_create_t +ib_qp_mod_t
+ib_qp_opts_t +ib_qp_state_t +ib_qp_type_t
+ib_query +ib_query_av +ib_query_ca
+ib_query_ca_by_guid +ib_query_cq +ib_query_mr
+ib_query_mw +ib_query_qp +ib_query_rec_t
+ib_query_req_t +ib_query_srq +ib_query_type_t
+ib_rearm_cq +ib_rearm_n_cq +ib_recv_opt_t
+ib_recv_wr_t +ib_reg_ioc +ib_reg_mad_pool
+ib_reg_mad_svc +ib_reg_mem +ib_reg_phys
+ib_reg_pnp +ib_reg_shared +ib_reg_shmid
+ib_reg_svc +ib_reg_svc_rec_t +ib_reg_svc_req_t
+ib_rej_pdata_t +ib_rej_status_t +ib_reject_ioc
+ib_remove_svc_entry +ib_rep_pdata_t +ib_report_rec_t
+ib_req_pdata_t +ib_rereg_mem +ib_rereg_phys
+ib_rmpp_is_flag_set +ib_rmpp_mad_t +ib_rtu_pdata_t
+ib_sa_mad_get_payload_ptr +ib_sa_mad_t +ib_send_mad
+ib_send_opt_t +ib_send_wr_t +ib_shmid_t
+ib_sidr_rep_pdata_t +ib_sidr_req_pdata_t +ib_slvl_table_get_vl
+ib_slvl_table_record_t +ib_slvl_table_set_vl +ib_slvl_table_t
+ib_sm_info_t +IB_SMINFO_ATTR_MOD_ACKNOWLEDGE +IB_SMINFO_ATTR_MOD_DISABLE
+IB_SMINFO_ATTR_MOD_DISCOVER +IB_SMINFO_ATTR_MOD_HANDOVER +IB_SMINFO_ATTR_MOD_STANDBY
+ib_sminfo_get_priority +ib_sminfo_get_state +IB_SMINFO_STATE_DISCOVERING
+IB_SMINFO_STATE_INIT +IB_SMINFO_STATE_MASTER +IB_SMINFO_STATE_NOTACTIVE
+IB_SMINFO_STATE_STANDBY +IB_SMP_DIRECTION +ib_smp_get_payload_ptr
+ib_smp_get_status +ib_smp_init_new +ib_smp_is_d
+ib_smp_is_response +IB_SMP_STATUS_MASK +ib_smp_t
+ib_srq_attr_mask_t +ib_srq_attr_t +ib_sub_rec_t
+ib_sub_req_t +IB_SUBNET_PATH_HOPS_MAX +ib_subscribe
+ib_svc_entries_t +ib_svc_entry_t +ib_switch_info_clear_state_change
+ib_switch_info_get_state_change +ib_switch_info_t +ib_sync_destroy
+ib_types.h +ib_unsubscribe +ib_user_query_t
+ib_vl_arb_element_get_vl +ib_vl_arb_element_set_vl +ib_vl_arb_element_t
+ib_vl_arb_table_record_t +ib_vl_arb_table_t +ib_wc_status_t
+ib_wc_t +ib_wc_type_t +ib_wr_type_t
+ioc_at_slot +IOCTL Object +IOCTL_CODE
+Irqlock +Join States +List
+Log Provider +MAD_BLOCK_GRH_SIZE +MAD_BLOCK_SIZE
+MAD_RMPP_DATA_SIZE +MAD_RMPP_HDR_SIZE +Map
+MAX +Memory Management +MIN
+mlnx_create_fmr +mlnx_destroy_fmr +mlnx_fmr_create_t
+mlnx_map_fmr +mlnx_unmap_fmr +Mutex
+Object +Object States +offsetof
+Parameter Keywords +PARENT_STRUCT +Passive Lock
+PERF_DECLARE +PERF_DECLARE_START +Performance Counters
+Pointer Vector +Pool +Quick Composite Pool
+Quick List +Quick Locking Pool +Quick Map
+Quick Pool +RB Map +Request Manager
+ROUNDUP +Spinlock +System Callback
+Thread Pool +Timer +Type Definitions
+UNUSED_PARAM +Vector +Wait Object
+ + diff --git a/branches/Ndi/docs/openfabrics.gif b/branches/Ndi/docs/openfabrics.gif new file mode 100644 index 0000000000000000000000000000000000000000..148d87752e676f4fe97573730f58cc5e0182878c GIT binary patch literal 3660 zcmWNSdobj zLv27|5LiVtprosaR{)i9%0z;S8WFp}d;>{sqdG}LQ(cR^iJ(g*>u5s^V)Q7w`kN_$ zS**5^&1NG5z%*6Y*dAwm+<1$Tskw=&70cYx+{)V0cAJge_HEnk?T9;b0a~lW4tv14 zLv_c^9shGgJ2fdd-v!(|RNV^*F886Hopfg!&Bc}E`q0JIS<&l(hFcrM-4){3OZ0AY z_3(1{bR+q6Dh1rz?d9pk+zlLjuD$OL5HiN}*#m@*d+%kc1@!>o<9mI56v9XT@$&_u zUl;}5F$(DR^WR5i^%EmT1A_cXkz=U1@t^~NAp50lXfG@Dpjp%)<;WQ6*uSCSArVoB zw2!~odUTK)GY}OWX>n{^|HQwJ@y`rTP8^LpVwU=^!>N}pDZ>eg$KsPt_?&Lpnm(1B zay&4#(f5qhlQ-g+HMuL}^~uvIL22UCsi%T@{i)nD0sIlKyf$ST)NPy(zzPDeKyOcI|?&`ljgW<%&vC!Hs(UHTl)5ij(zo#W%%y)ssba?boWS z1a-31#&>wKJ@nWh@X7!c=GiA;Jbmx{g0mxc1wpl zhCWH8OTE(f&xVKkM#tJmze-+we?B(S^WyW9SN{%lZI3rXEkO z4o!Y}Gc_?RUwJK?o|M0NKJ)#R{M*afuamQL)3dVixu3GxW%=8M*Yn?I-_Fb~yq{eB z@pfTue(~*p3qR)GeUyLrwfKHv=F{5Zr!VtMKi)0>USD6=`2mMufpx$H1pq+!Unc@e>}@BN<4tdx*DUUq$X8xnL1$BDc454ek<#ACGdh)1CQA zc>)GbUN^kFYJ91Qu&2a)p6=N@Q#aI7>~Lmy`xlqeXyT2oKR-hQ=JV{5rx`7c^Zz}( zwMBj7kKQYNz837amj&JWmx@MHu-aM#)9h`V>x>Qz{jTsMN_QMRc9@rv$geb8<2;>Y zYwR~@edv(B%ZbW?s^_1)c|I*)-{<mzSI|t8IZ2+_U`7A+QML3;KyaX+{Z_=K}xh_!$!>#mcV2vqxh<^O#^)IfQ^Ybe#^2C!I>0heR(nPo zZ3e884Q*-PSBH5FvlfoJQ$V2ZvRj%Y+GfqE;-!>WHwPwsdyZ)^mAYpyEbo#jDczQB z35KmEI(?N9N`g!&R#Mq_7gMQKU3_F0@hR2DlP5HHoS(!UndxP?%7K5LL#oPU+g;e3 zeUv;d4zU-L@%G!D8*Mt(vHKZJ>%Q!l-s%x&CaeoAO2eV;u3KRJDb9rwNx7HX^$)w0 zwlAb3169h8HVk$JE)Qw!>%7cUpc!r*A>NBkxVQ2?=2=15)ax>@!jXay-%lx<2DAWv zWP$HXz=VAmqd6fbtmp{!uoIVEEU4V_6%KyhSf;;PZ9h;>fkLQM%mlNoXumiWkg{AZ zAc5D1y5ck~$(XcA`dLr1=i1?m%D9zGd(xm#xA}JYTjOLZ>E3`ISR;LO^mt*Q3Ofo2 z;1q0)&&IY@Cux5f-ZjToV8n)IYFWY`PV9wj-c3=rTAj{F5MxX`J#_USpVw#6ifDEc zZ>Xq~xM>7uVUzp!Zk-$`iA~^e7Y*bR`=}5MGa<*|gN%N`i~_(oG%cmiVcqaJSPa;s zUcM?6XvXLJE$4)tEykVP(si29knFBvt~c44R3#m5?Gb>dmahht8b2{K7FvE! z7pWnK`M9Kw9K?Kx&X;~4UQ!Qsqp{w(Q@R;l^hy+>#*N`5sXGn2pA_B2MwAeVr?oGv&AR#TxjW4xj7(Fq@JRY(y)r8{o^lIJ_QdBbYW z4aXcQL@?&O@#bnNvt)4ey8X%xW?rpt^*wEt#WZ2Su-~ZFI44fgStC#>#sS`hjlxkh z8$iL%fQszaS{{2nif`O%B|OvCqWX%AHk(k-6`?5_^@a?`h(>{nER;MW>(Ozx#HEco zDcLjA)K6Jtas^H(Ul~+cNUcj{L2vt^>y6Cc!4k2vDhzSo&L=u>BVTz-2WgE#;z%&O zQW)AFbW-Dh0&Wxp8(ISeYb9+AbmulGp;x((%s12Ins#!a$EwAsv>FJ;?w6}&Lfqrj zQ72U0DrzS=#@&+~BKSb%;Tlc^P<{_+x-m0oO>DT8fJ>eM=&WA^G`aYrcCavwdZr-V z5nZM1Mm@TPBShV6(2Mw|pKcGg3gL;mQ37(mb^=+L#PK7`sq|X2inJ6J_$EeML)JPs zV61Qk%{66Eo^TY%OeT32>@tBv^C>-T3b7>g##%Bs^Ygm42Rfei6OvA+z*X$q^sG+- zmh4q>`+A#6y_tNHw|^tacit>|-3F(vQKfKh-gWI1p>C=Js?)k&L)S#=@_(&`5&Hi0 znQ%CHX_}pi7y?{*p9z>SFRCo*Z8Aw<)q50=YYEl*59P?QlxDw{Lvzmm+xj7*wZC(y zPLemuLwy!><E2*^n{zwNeWG=ScWi{#%_ z`X+Q&h9TqUAoA|_9VlLC%(*}JJ{|c=t?>Zknt!xfUPq~1Qe^3W3d5WMx5f985p7x& zWoq1xoJ0;fA>9dYZxMY}Z_lBq)iXE$sG&cO;ieM5la}B&=)zB0S!|nbMb~+2j`yFl zJE$Srd!BvWDPDtQ7&SvRUo(5tze;o2&AJ%QR&;dMZiK&Lm6p)F|3OYY?10}H^3)Rd zwry&pVQC;aQwU#<3#0=_f|m}iixBSL@a;?sDi~lGy$r>;(I59Ujc|OEA)7{zrvEzD(E|Y4H;v|r<6PksDchoftmyh!4oB!ct8nl$;Z$T5dttu07|5gb)h0C6;#Fr7bb%? z%xv^94xqwz%98b6p(~=8ATT>k0A|#H)ybd{AOF`_?C*dzleOZQ)H}CW0RJh%uP!N}guz+!9_w+sD-qy;0A3IkfUv{^ z5=E?A7GL0F6$eHG5LG5bPgF>h+7xO=fGidoMT17@gF)0B*yoUS3oIzxh5W3 + + + +Definitions + + + + +Generated from ./inc/ with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+

+[Sourcefiles] +[Index] +[Definitions] +[Functions] +[Modules] +[Structures] +

+

Definitions

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+64-bit Print Format +ATS +cl_destroy_type_t
+CL_ENTER +CL_EXIT +CL_HTON16
+CL_HTON32 +CL_HTON64 +cl_ioctl_handle_t
+cl_is_state_valid +cl_list_iterator_t +cl_log_type_t
+cl_map_iterator_t +CL_NTOH16 +CL_NTOH32
+CL_NTOH64 +cl_perf_clr +cl_perf_inc
+cl_perf_log +cl_perf_start +cl_perf_stop
+cl_perf_update +cl_perf_update_ctr +cl_pfn_async_proc_cb_t
+cl_pfn_cpool_dtor_t +cl_pfn_cpool_init_t +cl_pfn_fmap_apply_t
+cl_pfn_fmap_cmp_t +cl_pfn_ioctl_handler_t +cl_pfn_list_apply_t
+cl_pfn_list_find_t +cl_pfn_obj_call_t +cl_pfn_pool_dtor_t
+cl_pfn_pool_init_t +cl_pfn_ptr_vec_apply_t +cl_pfn_ptr_vec_find_t
+cl_pfn_qcpool_dtor_t +cl_pfn_qcpool_init_t +cl_pfn_qlist_apply_t
+cl_pfn_qlist_find_t +cl_pfn_qmap_apply_t +cl_pfn_qpool_dtor_t
+cl_pfn_qpool_init_t +cl_pfn_req_cb_t +cl_pfn_reqmgr_get_count_t
+cl_pfn_sys_callback_t +cl_pfn_thread_callback_t +cl_pfn_timer_callback_t
+cl_pfn_vec_apply_t +cl_pfn_vec_dtor_t +cl_pfn_vec_find_t
+cl_pfn_vec_init_t +CL_PRINT +cl_req_type_t
+CL_STATUS_MSG +cl_status_t +CL_TRACE
+CL_TRACE_EXIT +cl_waitobj_handle_t +Data Types
+Debug Levels +DM_SVC_NAME +ib_access_t
+ib_al_flags_t +ib_api_status_t +ib_apm_state_t
+ib_apr_status_t +ib_async_event_t +ib_atomic_t
+ib_ca_mod_t +ib_cm_cap_mask_t +ib_cm_failover_t
+IB_DEFAULT_PARTIAL_PKEY +IB_DEFAULT_PKEY +IB_DEFAULT_SUBNET_PREFIX
+ib_device_attr_mask_t +ib_gid_prefix_t +ib_gid_t
+ib_init_type_t +IB_INVALID_PORT_NUM +IB_LID_MCAST_END
+IB_LID_MCAST_START +IB_LID_PERMISSIVE +IB_LID_UCAST_END
+IB_LID_UCAST_START +ib_link_states_t +ib_listen_info_t
+IB_MAD_ATTR_CLASS_PORT_INFO +IB_MAD_ATTR_DIAG_CODE +IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT
+IB_MAD_ATTR_GUID_INFO +IB_MAD_ATTR_GUIDINFO_RECORD +IB_MAD_ATTR_INFORM_INFO
+IB_MAD_ATTR_INFORM_INFO_RECORD +IB_MAD_ATTR_IO_CONTROLLER_PROFILE +IB_MAD_ATTR_IO_UNIT_INFO
+IB_MAD_ATTR_LED_INFO +IB_MAD_ATTR_LFT_RECORD +IB_MAD_ATTR_LIN_FWD_TBL
+IB_MAD_ATTR_LINK_RECORD +IB_MAD_ATTR_MCAST_FWD_TBL +IB_MAD_ATTR_MCMEMBER_RECORD
+IB_MAD_ATTR_MULTIPATH_RECORD +IB_MAD_ATTR_NODE_DESC +IB_MAD_ATTR_NODE_INFO
+IB_MAD_ATTR_NODE_RECORD +IB_MAD_ATTR_NOTICE +IB_MAD_ATTR_P_KEY_TABLE
+IB_MAD_ATTR_PATH_RECORD +IB_MAD_ATTR_PKEYTBL_RECORD +IB_MAD_ATTR_PORT_CNTRS
+IB_MAD_ATTR_PORT_INFO +IB_MAD_ATTR_PORT_SMPL_CTRL +IB_MAD_ATTR_PORT_SMPL_RSLT
+IB_MAD_ATTR_PORTINFO_RECORD +IB_MAD_ATTR_PREPARE_TO_TEST +IB_MAD_ATTR_RND_FWD_TBL
+IB_MAD_ATTR_SERVICE_ENTRIES +IB_MAD_ATTR_SERVICE_RECORD +IB_MAD_ATTR_SLVL_RECORD
+IB_MAD_ATTR_SLVL_TABLE +IB_MAD_ATTR_SM_INFO +IB_MAD_ATTR_SMINFO_RECORD
+IB_MAD_ATTR_SVC_ASSOCIATION_RECORD +IB_MAD_ATTR_SWITCH_INFO +IB_MAD_ATTR_TEST_DEVICE_LOOP
+IB_MAD_ATTR_TEST_DEVICE_ONCE +IB_MAD_ATTR_TRACE_RECORD +IB_MAD_ATTR_VENDOR_DIAG
+IB_MAD_ATTR_VL_ARBITRATION +IB_MAD_ATTR_VLARB_RECORD +IB_MAD_METHOD_GET
+IB_MAD_METHOD_GET_RESP +IB_MAD_METHOD_GETTABLE +IB_MAD_METHOD_GETTABLE_RESP
+IB_MAD_METHOD_REPORT +IB_MAD_METHOD_REPORT_RESP +IB_MAD_METHOD_RESP_MASK
+IB_MAD_METHOD_SEND +IB_MAD_METHOD_SET +IB_MAD_METHOD_TRAP
+IB_MAD_METHOD_TRAP_REPRESS +IB_MAD_STATUS_BUSY +IB_MAD_STATUS_INVALID_FIELD
+IB_MAD_STATUS_REDIRECT +IB_MAD_STATUS_UNSUP_CLASS_VER +IB_MAD_STATUS_UNSUP_METHOD
+IB_MAD_STATUS_UNSUP_METHOD_ATTR +ib_mad_svc_type_t +IB_MAX_METHOD
+IB_MCAST_BLOCK_ID_MASK_HO +IB_MCAST_BLOCK_SIZE +IB_MCAST_MASK_SIZE
+IB_MCAST_MAX_BLOCK_ID +IB_MCAST_POSITION_MASK_HO +IB_MCAST_POSITION_MAX
+IB_MCAST_POSITION_SHIFT +IB_MCLASS_BIS +IB_MCLASS_BM
+IB_MCLASS_COMM_MGMT +IB_MCLASS_DEV_ADM +IB_MCLASS_DEV_MGMT
+IB_MCLASS_PERF +IB_MCLASS_SNMP +IB_MCLASS_SUBN_ADM
+IB_MCLASS_SUBN_DIR +IB_MCLASS_SUBN_LID +IB_MCLASS_VENDOR_HIGH_RANGE_MAX
+IB_MCLASS_VENDOR_HIGH_RANGE_MIN +IB_MCLASS_VENDOR_LOW_RANGE_MAX +IB_MCLASS_VENDOR_LOW_RANGE_MIN
+ib_mr_mod_t +IB_MTU_LEN_TYPE +IB_MULTIPATH_REC_BASE_MASK
+ib_net16_t +ib_net32_t +ib_net64_t
+IB_NODE_NUM_PORTS_MAX +IB_NODE_TYPE_CA +IB_NODE_TYPE_ROUTER
+IB_NODE_TYPE_SWITCH +IB_NOTICE_NODE_TYPE_CA +IB_NOTICE_NODE_TYPE_ROUTER
+IB_NOTICE_NODE_TYPE_SUBN_MGMT +IB_NOTICE_NODE_TYPE_SWITCH +IB_PATH_REC_BASE_MASK
+IB_PATH_REC_SELECTOR_MASK +IB_PATH_SELECTOR_TYPE +ib_pd_type_t
+IB_PKEY_BASE_MASK +IB_PKEY_ENTRIES_MAX +IB_PKEY_MAX_BLOCKS
+IB_PKEY_TYPE_MASK +ib_pnp_class_t +ib_pnp_event_t
+IB_QP1_WELL_KNOWN_Q_KEY +ib_qp_opts_t +ib_qp_state_t
+ib_qp_type_t +ib_query_type_t +ib_recv_opt_t
+ib_rej_status_t +ib_send_opt_t +IB_SMINFO_ATTR_MOD_ACKNOWLEDGE
+IB_SMINFO_ATTR_MOD_DISABLE +IB_SMINFO_ATTR_MOD_DISCOVER +IB_SMINFO_ATTR_MOD_HANDOVER
+IB_SMINFO_ATTR_MOD_STANDBY +IB_SMINFO_STATE_DISCOVERING +IB_SMINFO_STATE_INIT
+IB_SMINFO_STATE_MASTER +IB_SMINFO_STATE_NOTACTIVE +IB_SMINFO_STATE_STANDBY
+IB_SMP_DIRECTION +IB_SMP_STATUS_MASK +ib_srq_attr_mask_t
+IB_SUBNET_PATH_HOPS_MAX +ib_wc_status_t +ib_wc_type_t
+ib_wr_type_t +IOCTL_CODE +Join States
+MAD_BLOCK_GRH_SIZE +MAD_BLOCK_SIZE +MAD_RMPP_DATA_SIZE
+MAD_RMPP_HDR_SIZE +MAX +MIN
+Object States +offsetof +Parameter Keywords
+PARENT_STRUCT +PERF_DECLARE +PERF_DECLARE_START
+ROUNDUP +UNUSED_PARAM
+ + diff --git a/branches/Ndi/docs/robo_functions.html b/branches/Ndi/docs/robo_functions.html new file mode 100644 index 00000000..823659ce --- /dev/null +++ b/branches/Ndi/docs/robo_functions.html @@ -0,0 +1,1535 @@ + + + + +Functions + + + + +Generated from ./inc/ with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+

+[Sourcefiles] +[Index] +[Definitions] +[Functions] +[Modules] +[Structures] +

+

Functions

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+cl_async_proc_construct +cl_async_proc_destroy +cl_async_proc_init
+cl_async_proc_queue +cl_atomic_add +cl_atomic_comp_xchg
+cl_atomic_dec +cl_atomic_inc +cl_atomic_sub
+cl_atomic_xchg +cl_break +cl_check_for_read
+cl_check_for_write +cl_copy_from_user +cl_copy_to_user
+cl_cpool_construct +cl_cpool_count +cl_cpool_destroy
+cl_cpool_get +cl_cpool_grow +cl_cpool_init
+cl_cpool_put +cl_dbg_out +cl_event_construct
+cl_event_destroy +cl_event_init +cl_event_reset
+cl_event_signal +cl_event_wait_on +cl_fmap_apply_func
+cl_fmap_count +cl_fmap_delta +cl_fmap_end
+cl_fmap_get +cl_fmap_head +cl_fmap_init
+cl_fmap_insert +cl_fmap_key +cl_fmap_merge
+cl_fmap_next +cl_fmap_prev +cl_fmap_remove
+cl_fmap_remove_all +cl_fmap_remove_item +cl_fmap_tail
+cl_free +cl_get_pagesize +cl_get_physaddr
+cl_get_tick_count +cl_get_tick_freq +cl_get_time_stamp
+cl_get_time_stamp_sec +cl_get_time_stamp_usec +cl_hton16
+cl_hton32 +cl_hton64 +cl_ioctl_cmd
+cl_ioctl_complete +cl_ioctl_ctl_code +cl_ioctl_in_buf
+cl_ioctl_in_size +cl_ioctl_out_buf +cl_ioctl_out_size
+cl_ioctl_process +cl_ioctl_request +cl_ioctl_result
+cl_ioctl_type +cl_irqlock_acquire +cl_irqlock_construct
+cl_irqlock_destroy +cl_irqlock_init +cl_irqlock_release
+cl_is_blockable +cl_is_cpool_inited +cl_is_fmap_empty
+cl_is_item_in_qlist +cl_is_list_empty +cl_is_list_inited
+cl_is_map_empty +cl_is_map_inited +cl_is_object_in_list
+cl_is_pool_inited +cl_is_qcpool_inited +cl_is_qlist_empty
+cl_is_qmap_empty +cl_is_qpool_inited +cl_is_rbmap_empty
+cl_is_sys_callback_inited +cl_list_apply_func +cl_list_construct
+cl_list_count +cl_list_destroy +cl_list_end
+cl_list_find_from_head +cl_list_find_from_tail +cl_list_head
+cl_list_init +cl_list_insert_array_head +cl_list_insert_array_tail
+cl_list_insert_head +cl_list_insert_next +cl_list_insert_prev
+cl_list_insert_tail +cl_list_next +cl_list_obj
+cl_list_prev +cl_list_remove_all +cl_list_remove_head
+cl_list_remove_item +cl_list_remove_object +cl_list_remove_tail
+cl_list_tail +cl_log_event +cl_malloc
+cl_map_construct +cl_map_count +cl_map_delta
+cl_map_destroy +cl_map_end +cl_map_get
+cl_map_head +cl_map_init +cl_map_insert
+cl_map_key +cl_map_merge +cl_map_next
+cl_map_obj +cl_map_prev +cl_map_remove
+cl_map_remove_all +cl_map_remove_item +cl_map_tail
+cl_mem_display +cl_memclr +cl_memcmp
+cl_memcpy +cl_memset +cl_msg_out
+cl_mutex_acquire +cl_mutex_construct +cl_mutex_destroy
+cl_mutex_init +cl_mutex_release +cl_ntoh
+cl_ntoh16 +cl_ntoh32 +cl_ntoh64
+cl_obj_construct +cl_obj_deinit +cl_obj_deref
+cl_obj_destroy +cl_obj_init +cl_obj_insert_rel
+cl_obj_insert_rel_parent_locked +cl_obj_lock +cl_obj_mgr_create
+cl_obj_mgr_destroy +cl_obj_ref +cl_obj_remove_rel
+cl_obj_reset +cl_obj_type +cl_obj_unlock
+cl_palloc +cl_panic +cl_perf_construct
+cl_perf_destroy +cl_perf_display +cl_perf_init
+cl_perf_reset +cl_plock_acquire +cl_plock_construct
+cl_plock_destroy +cl_plock_excl_acquire +cl_plock_init
+cl_plock_release +cl_pool_construct +cl_pool_count
+cl_pool_destroy +cl_pool_get +cl_pool_grow
+cl_pool_init +cl_pool_put +cl_proc_count
+cl_ptr_vector_apply_func +cl_ptr_vector_at +cl_ptr_vector_construct
+cl_ptr_vector_destroy +cl_ptr_vector_find_from_end +cl_ptr_vector_find_from_start
+cl_ptr_vector_get +cl_ptr_vector_get_capacity +cl_ptr_vector_get_size
+cl_ptr_vector_init +cl_ptr_vector_insert +cl_ptr_vector_remove
+cl_ptr_vector_set +cl_ptr_vector_set_capacity +cl_ptr_vector_set_min_size
+cl_ptr_vector_set_size +cl_pzalloc +cl_qcpool_construct
+cl_qcpool_count +cl_qcpool_destroy +cl_qcpool_get
+cl_qcpool_grow +cl_qcpool_init +cl_qcpool_put
+cl_qcpool_put_list +cl_qlist_apply_func +cl_qlist_count
+cl_qlist_end +cl_qlist_find_from_head +cl_qlist_find_from_tail
+cl_qlist_find_next +cl_qlist_find_prev +cl_qlist_head
+cl_qlist_init +cl_qlist_insert_array_head +cl_qlist_insert_array_tail
+cl_qlist_insert_head +cl_qlist_insert_list_head +cl_qlist_insert_list_tail
+cl_qlist_insert_next +cl_qlist_insert_prev +cl_qlist_insert_tail
+cl_qlist_move_items +cl_qlist_next +cl_qlist_obj
+cl_qlist_prev +cl_qlist_remove_all +cl_qlist_remove_head
+cl_qlist_remove_item +cl_qlist_remove_tail +cl_qlist_set_obj
+cl_qlist_tail +cl_qlock_pool_construct +cl_qlock_pool_destroy
+cl_qlock_pool_get +cl_qlock_pool_init +cl_qlock_pool_put
+cl_qmap_apply_func +cl_qmap_count +cl_qmap_delta
+cl_qmap_end +cl_qmap_get +cl_qmap_head
+cl_qmap_init +cl_qmap_insert +cl_qmap_key
+cl_qmap_merge +cl_qmap_next +cl_qmap_obj
+cl_qmap_prev +cl_qmap_remove +cl_qmap_remove_all
+cl_qmap_remove_item +cl_qmap_set_obj +cl_qmap_tail
+cl_qpool_construct +cl_qpool_count +cl_qpool_destroy
+cl_qpool_get +cl_qpool_grow +cl_qpool_init
+cl_qpool_put +cl_qpool_put_list +cl_rbmap_count
+cl_rbmap_end +cl_rbmap_init +cl_rbmap_insert
+cl_rbmap_left +cl_rbmap_remove_item +cl_rbmap_reset
+cl_rbmap_right +cl_rbmap_root +cl_rel_alloc
+cl_rel_free +cl_req_mgr_construct +cl_req_mgr_destroy
+cl_req_mgr_get +cl_req_mgr_init +cl_req_mgr_resume
+cl_spinlock_acquire +cl_spinlock_construct +cl_spinlock_destroy
+cl_spinlock_init +cl_spinlock_release +cl_sys_callback_get
+cl_sys_callback_put +cl_sys_callback_queue +cl_thread_pool_construct
+cl_thread_pool_destroy +cl_thread_pool_init +cl_thread_pool_signal
+cl_thread_stall +cl_thread_suspend +cl_timer_construct
+cl_timer_destroy +cl_timer_init +cl_timer_start
+cl_timer_stop +cl_timer_trim +cl_vector_apply_func
+cl_vector_at +cl_vector_construct +cl_vector_destroy
+cl_vector_find_from_end +cl_vector_find_from_start +cl_vector_get
+cl_vector_get_capacity +cl_vector_get_ptr +cl_vector_get_size
+cl_vector_init +cl_vector_set +cl_vector_set_capacity
+cl_vector_set_min_size +cl_vector_set_size +cl_waitobj_create
+cl_waitobj_deref +cl_waitobj_destroy +cl_waitobj_ref
+cl_waitobj_reset +cl_waitobj_signal +cl_waitobj_wait_on
+cl_zalloc +ib_add_svc_entry +ib_alloc_pd
+ib_bind_mw +ib_cancel_mad +ib_cancel_query
+ib_ci_call +ib_class_is_rmpp +ib_class_is_vendor_specific
+ib_class_is_vendor_specific_high +ib_class_is_vendor_specific_low +ib_close_al
+ib_close_ca +ib_cm_apr +ib_cm_cancel
+ib_cm_drep +ib_cm_dreq +ib_cm_handoff
+ib_cm_lap +ib_cm_listen +ib_cm_mra
+ib_cm_rej +ib_cm_rep +ib_cm_req
+ib_cm_rtu +ib_copy_ca_attr +ib_create_av
+ib_create_cq +ib_create_ioc +ib_create_mad_pool
+ib_create_mw +ib_create_qp +ib_create_srq
+ib_dealloc_pd +ib_dereg_mad_pool +ib_dereg_mr
+ib_dereg_pnp +ib_dereg_svc +ib_destroy_av
+ib_destroy_cq +ib_destroy_ioc +ib_destroy_mad_pool
+ib_destroy_mw +ib_destroy_qp +ib_destroy_srq
+ib_dm_get_slot_lo_hi +ib_dm_set_slot_lo_hi +ib_force_apm
+ib_get_async_event_str +ib_get_ca_by_gid +ib_get_ca_guids
+ib_get_err_str +ib_get_guid +ib_get_mad
+ib_get_mad_buf +ib_get_node_type_str +ib_get_port_by_gid
+ib_get_port_state_from_str +ib_get_port_state_str +ib_get_qp_type_str
+ib_get_query_node_rec +ib_get_query_path_rec +ib_get_query_portinfo_rec
+ib_get_query_result +ib_get_query_svc_rec +ib_get_spl_qp
+ib_get_wc_status_str +ib_get_wc_type_str +ib_get_wr_type_str
+ib_gid_get_guid +ib_gid_get_subnet_prefix +ib_gid_is_link_local
+ib_gid_is_site_local +ib_gid_set_default +ib_grh_get_ver_class_flow
+ib_grh_set_ver_class_flow +ib_inform_get_dev_id +ib_inform_get_prod_type
+ib_inform_get_qpn +ib_inform_get_resp_time_val +ib_inform_get_trap_num
+ib_inform_get_vend_id +ib_inform_set_dev_id +ib_inform_set_prod_type
+ib_inform_set_qpn +ib_inform_set_resp_time_val +ib_inform_set_trap_num
+ib_inform_set_vend_id +ib_init_dgrm_svc +ib_iou_info_diag_dev_id
+ib_iou_info_option_rom +ib_join_mcast +ib_leave_mcast
+ib_local_mad +ib_mad_init_new +ib_mad_init_response
+ib_mad_is_response +ib_member_get_scope +ib_member_get_scope_state
+ib_member_get_sl_flow_hop +ib_member_get_state +ib_member_set_join_state
+ib_member_set_scope +ib_member_set_scope_state +ib_member_set_sl_flow_hop
+ib_member_set_state +ib_modify_av +ib_modify_ca
+ib_modify_cq +ib_modify_qp +ib_modify_srq
+ib_node_info_get_local_port_num +ib_node_info_get_vendor_id +ib_notice_get_count
+ib_notice_get_dev_id +ib_notice_get_generic +ib_notice_get_prod_type
+ib_notice_get_toggle +ib_notice_get_trap_num +ib_notice_get_type
+ib_notice_get_vend_id +ib_notice_set_count +ib_notice_set_dev_id
+ib_notice_set_generic +ib_notice_set_prod_type +ib_notice_set_toggle
+ib_notice_set_trap_num +ib_notice_set_type +ib_notice_set_vend_id
+ib_open_al +ib_open_ca +ib_path_get_ipd
+ib_path_rec_flow_lbl +ib_path_rec_hop_limit +ib_path_rec_init_local
+ib_path_rec_mtu +ib_path_rec_mtu_sel +ib_path_rec_num_path
+ib_path_rec_pkt_life +ib_path_rec_pkt_life_sel +ib_path_rec_rate
+ib_path_rec_rate_sel +ib_path_rec_set_hop_flow_raw +ib_path_rec_sl
+ib_peek_cq +ib_pfn_cm_apr_cb_t +ib_pfn_cm_drep_cb_t
+ib_pfn_cm_dreq_cb_t +ib_pfn_cm_lap_cb_t +ib_pfn_cm_mra_cb_t
+ib_pfn_cm_rej_cb_t +ib_pfn_cm_rep_cb_t +ib_pfn_cm_req_cb_t
+ib_pfn_cm_rtu_cb_t +ib_pfn_comp_cb_t +ib_pfn_destroy_cb_t
+ib_pfn_event_cb_t +ib_pfn_listen_err_cb_t +ib_pfn_mad_comp_cb_t
+ib_pfn_mcast_cb_t +ib_pfn_pnp_cb_t +ib_pfn_query_cb_t
+ib_pfn_reg_svc_cb_t +ib_pfn_report_cb_t +ib_pfn_sub_cb_t
+ib_pkey_get_base +ib_pkey_is_full_member +ib_pkey_is_invalid
+ib_poll_cq +ib_port_info_compute_rate +ib_port_info_get_init_type
+ib_port_info_get_link_speed_active +ib_port_info_get_link_speed_sup +ib_port_info_get_lmc
+ib_port_info_get_mpb +ib_port_info_get_mtu_cap +ib_port_info_get_neighbor_mtu
+ib_port_info_get_op_vls +ib_port_info_get_port_state +ib_port_info_get_sm_sl
+ib_port_info_get_vl_cap +ib_port_info_set_link_speed_sup +ib_port_info_set_lmc
+ib_port_info_set_mpb +ib_port_info_set_neighbor_mtu +ib_port_info_set_op_vls
+ib_port_info_set_port_state +ib_port_info_set_sm_sl +ib_port_info_set_state_no_change
+ib_port_info_set_timeout +ib_post_recv +ib_post_send
+ib_post_srq_recv +ib_put_mad +ib_query
+ib_query_av +ib_query_ca +ib_query_ca_by_guid
+ib_query_cq +ib_query_mr +ib_query_mw
+ib_query_qp +ib_query_srq +ib_rearm_cq
+ib_rearm_n_cq +ib_reg_ioc +ib_reg_mad_pool
+ib_reg_mad_svc +ib_reg_mem +ib_reg_phys
+ib_reg_pnp +ib_reg_shared +ib_reg_shmid
+ib_reg_svc +ib_reject_ioc +ib_remove_svc_entry
+ib_rereg_mem +ib_rereg_phys +ib_rmpp_is_flag_set
+ib_sa_mad_get_payload_ptr +ib_send_mad +ib_slvl_table_get_vl
+ib_slvl_table_set_vl +ib_sminfo_get_priority +ib_sminfo_get_state
+ib_smp_get_payload_ptr +ib_smp_get_status +ib_smp_init_new
+ib_smp_is_d +ib_smp_is_response +ib_subscribe
+ib_switch_info_clear_state_change +ib_switch_info_get_state_change +ib_sync_destroy
+ib_unsubscribe +ib_vl_arb_element_get_vl +ib_vl_arb_element_set_vl
+ioc_at_slot +mlnx_create_fmr +mlnx_destroy_fmr
+mlnx_map_fmr +mlnx_unmap_fmr
+ + diff --git a/branches/Ndi/docs/robo_modules.html b/branches/Ndi/docs/robo_modules.html new file mode 100644 index 00000000..64e6c96a --- /dev/null +++ b/branches/Ndi/docs/robo_modules.html @@ -0,0 +1,116 @@ + + + + +Modules + + + + +Generated from ./inc/ with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+

+[Sourcefiles] +[Index] +[Definitions] +[Functions] +[Modules] +[Structures] +

+

Modules

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+Access Layer +Asynchronous Processor +Atomic Operations +Byte Swapping +Component Library
+Composite Pool +Constants +Debug Output +Event +Flexi Map
+IOCTL Object +Irqlock +List +Log Provider +Map
+Memory Management +Mutex +Object +Passive Lock +Performance Counters
+Pointer Vector +Pool +Quick Composite Pool +Quick List +Quick Locking Pool
+Quick Map +Quick Pool +RB Map +Request Manager +Spinlock
+System Callback +Thread Pool +Timer +Type Definitions +Vector
+Wait Object
+ + diff --git a/branches/Ndi/docs/robo_sourcefiles.html b/branches/Ndi/docs/robo_sourcefiles.html new file mode 100644 index 00000000..cee58ddf --- /dev/null +++ b/branches/Ndi/docs/robo_sourcefiles.html @@ -0,0 +1,149 @@ + + + + +Sourcefiles + + + + +Generated from ./inc/ with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+

+[Sourcefiles] +[Index] +[Definitions] +[Functions] +[Modules] +[Structures] +

+ + + diff --git a/branches/Ndi/docs/robo_strutures.html b/branches/Ndi/docs/robo_strutures.html new file mode 100644 index 00000000..19552812 --- /dev/null +++ b/branches/Ndi/docs/robo_strutures.html @@ -0,0 +1,381 @@ + + + + +Structures + + + + +Generated from ./inc/ with ROBODoc v4.99.17 on Sun Mar 04 2007 18:17:52 +
+

+[Sourcefiles] +[Index] +[Definitions] +[Functions] +[Modules] +[Structures] +

+

Structures

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+cl_async_proc_item_t +cl_async_proc_t +cl_cpool_t +cl_fmap_item_t
+cl_fmap_t +cl_list_item_t +cl_list_obj_t +cl_list_t
+cl_map_item_t +cl_map_obj_t +cl_map_t +cl_obj_mgr_t
+cl_obj_rel_t +cl_obj_t +cl_plock_t +cl_pool_item_t
+cl_pool_t +cl_ptr_vector_t +cl_qcpool_t +cl_qlist_t
+cl_qlock_pool_t +cl_qmap_t +cl_qpool_t +cl_rbmap_item_t
+cl_rbmap_t +cl_req_mgr_t +cl_thread_pool_t +cl_vector_t
+ib_apr_info_t +ib_apr_pdata_t +ib_ari_t +ib_async_event_rec_t
+ib_av_attr_t +ib_bind_wr_t +ib_ca_attr_t +ib_cep_listen_t
+ib_ci_op_t +IB_CLASS_CAP_GETSET +IB_CLASS_CAP_TRAP +ib_class_port_info_t
+IB_CLASS_RESP_TIME_MASK +ib_cm_apr_rec_t +ib_cm_apr_t +ib_cm_drep_rec_t
+ib_cm_drep_t +ib_cm_dreq_rec_t +ib_cm_dreq_t +ib_cm_lap_rec_t
+ib_cm_lap_t +ib_cm_listen_t +ib_cm_mra_rec_t +ib_cm_mra_t
+ib_cm_rej_rec_t +ib_cm_rej_t +ib_cm_rep_rec_t +ib_cm_rep_t
+ib_cm_req_rec_t +ib_cm_req_t +ib_cm_rtu_rec_t +ib_cm_rtu_t
+ib_cq_create_t +ib_dgrm_info_t +ib_dm_mad_t +ib_drep_pdata_t
+ib_dreq_pdata_t +ib_event_rec_t +ib_field32_t +ib_gid_pair_t
+ib_gmp_t +ib_grh_t +ib_guid_info_t +ib_guid_pair_t
+ib_inform_info_record_t +ib_ioc_profile_t +ib_iou_info_t +ib_lap_pdata_t
+ib_lft_record_t +ib_lid_pair_t +ib_listen_err_rec_t +ib_local_ds_t
+ib_mad_element_t +ib_mad_svc_t +ib_mad_t +ib_mcast_rec_t
+ib_mcast_req_t +ib_member_rec_t +ib_mr_attr_t +ib_mr_create_t
+ib_mra_pdata_t +ib_node_info_t +ib_path_rec_t +ib_phys_create_t
+ib_phys_range_t +ib_pkey_table_info_t +ib_pnp_ca_rec_t +ib_pnp_ioc_path_rec_t
+ib_pnp_ioc_rec_t +ib_pnp_iou_rec_t +ib_pnp_port_rec_t +ib_pnp_rec_t
+ib_pnp_req_t +ib_port_attr_mod_t +ib_port_attr_t +ib_port_cap_t
+ib_port_counters_t +ib_port_info_t +ib_qp_attr_t +ib_qp_create_t
+ib_qp_mod_t +ib_query_rec_t +ib_query_req_t +ib_recv_wr_t
+ib_reg_svc_rec_t +ib_reg_svc_req_t +ib_rej_pdata_t +ib_rep_pdata_t
+ib_report_rec_t +ib_req_pdata_t +ib_rmpp_mad_t +ib_rtu_pdata_t
+ib_sa_mad_t +ib_send_wr_t +ib_shmid_t +ib_sidr_rep_pdata_t
+ib_sidr_req_pdata_t +ib_slvl_table_record_t +ib_slvl_table_t +ib_sm_info_t
+ib_smp_t +ib_srq_attr_t +ib_sub_rec_t +ib_sub_req_t
+ib_svc_entries_t +ib_svc_entry_t +ib_switch_info_t +ib_user_query_t
+ib_vl_arb_element_t +ib_vl_arb_table_record_t +ib_vl_arb_table_t +ib_wc_t
+mlnx_fmr_create_t
+ + diff --git a/branches/Ndi/docs/robodoc.css b/branches/Ndi/docs/robodoc.css new file mode 100644 index 00000000..44ae2c54 --- /dev/null +++ b/branches/Ndi/docs/robodoc.css @@ -0,0 +1,36 @@ +body +{ + background-color: #ffffff; + color: #000000; + font-family: 'Lucida Grande', Verdana, + Geneva, Lucida, Arial, + Helvetica, sans-serif; + font-size: 10pt; + margin: 2% 5%; +} +h1, h2, h3, h4, h5, h6, h7 +{ + background-color: #dddddd; + color: #000000; + text-align: right; + font-size: 11pt; +} +td.even, td.uneven +{ + color: #000000; + font-size: 10pt; +} +td.even +{ + background-color: #eeeeee; +} +span.SOURCE +{ + white-space: pre; +} +pre +{ + background-color: #ffffff; + color: #000000; + font-size: 10pt; +} diff --git a/branches/Ndi/etc/makebin.bat b/branches/Ndi/etc/makebin.bat new file mode 100644 index 00000000..d94bfe95 --- /dev/null +++ b/branches/Ndi/etc/makebin.bat @@ -0,0 +1,231 @@ +@echo off +setlocal +echo %1 %2 + +if "%1"=="" goto usage + +if "%2"=="" goto usage + +if not exist %1 goto usage +if not exist %2 goto usage + +if not exist %1\bin\kernel\objfre_wnet_amd64\amd64 goto error1 +if not exist %1\bin\kernel\objfre_wnet_ia64\ia64 goto error2 +if not exist %1\bin\kernel\objfre_wnet_x86\i386 goto error3 +if not exist %1\bin\user\objfre_wnet_amd64\amd64 goto error4 +if not exist %1\bin\user\objfre_wnet_ia64\ia64 goto error5 +if not exist %1\bin\user\objfre_wnet_x86\i386 goto error6 +if not exist %1\bin\user\objchk_wnet_amd64\amd64 goto error7 +if not exist %1\bin\user\objchk_wnet_ia64\ia64 goto error8 +if not exist %1\bin\user\objchk_wnet_x86\i386 goto error9 + +setlocal + +rem +rem KERNEL MODE +rem + +rem Copy AMD64 drivers +set bin_dir=%1\bin\kernel\objfre_wnet_amd64\amd64 +set dest_dir=%2\HCA\amd64\ +for %%i in (%bin_dir%\ibbus.sys, %bin_dir%\ibiou.sys, %bin_dir%\mthca.sys, %bin_dir%\mt23108.sys, %bin_dir%\thca.sys) do xcopy %%i %dest_dir% /y +xcopy %bin_dir%\ipoib.sys %2\net\amd64\ /y +xcopy %bin_dir%\vnic.sys %2\net\amd64\ /y +xcopy %bin_dir%\ibsrp.sys %2\storage\amd64\ /y + +rem Copy IA64 drivers +set bin_dir=%1\bin\kernel\objfre_wnet_ia64\ia64 +set dest_dir=%2\HCA\ia64\ +for %%i in (%bin_dir%\ibbus.sys, %bin_dir%\ibiou.sys, %bin_dir%\mthca.sys, %bin_dir%\mt23108.sys, %bin_dir%\thca.sys) do xcopy %%i %dest_dir% /y +xcopy %bin_dir%\ipoib.sys %2\net\ia64\ /y +xcopy %bin_dir%\vnic.sys %2\net\ia64\ /y +xcopy %bin_dir%\ibsrp.sys %2\storage\ia64\ /y + +rem Copy x86 drivers +set bin_dir=%1\bin\kernel\objfre_wnet_x86\i386 +set dest_dir=%2\HCA\x86\ +for %%i in (%bin_dir%\ibbus.sys, %bin_dir%\ibiou.sys, %bin_dir%\mthca.sys, %bin_dir%\mt23108.sys, %bin_dir%\thca.sys) do xcopy %%i %dest_dir% /y +xcopy %bin_dir%\ipoib.sys %2\net\x86\ /y +xcopy %bin_dir%\vnic.sys %2\net\x86\ /y +xcopy %bin_dir%\ibsrp.sys %2\storage\x86\ /y + +rem +rem USER MODE +rem + +set bin_dir=%1\bin\user\objchk_wnet_amd64\amd64 +set dest_dir=%2\HCA\amd64\ +for %%i in (%bin_dir%\ibald.dll, %bin_dir%\complibd.dll, %bin_dir%\mthcaud.dll, %bin_dir%\mt23108ud.dll, %bin_dir%\IbInstaller.dll) do xcopy %%i %dest_dir% /y +xcopy %bin_dir%\*.exe %2\tools\amd64\debug\ /y +xcopy %bin_dir%\dapld.dll %2\DAPL\amd64\ /y +xcopy %bin_dir%\datd.dll %2\DAPL\amd64\ /y + +rem Copy IA64 drivers +set bin_dir=%1\bin\user\objchk_wnet_ia64\ia64 +set dest_dir=%2\HCA\ia64\ +for %%i in (%bin_dir%\ibald.dll, %bin_dir%\complibd.dll, %bin_dir%\mthcaud.dll, %bin_dir%\mt23108ud.dll, %bin_dir%\IbInstaller.dll) do xcopy %%i %dest_dir% /y +xcopy %bin_dir%\*.exe %2\tools\ia64\debug\ /y +xcopy %bin_dir%\dapld.dll %2\DAPL\ia64\ /y +xcopy %bin_dir%\datd.dll %2\DAPL\ia64\ /y + +rem Copy x86 drivers +set bin_dir=%1\bin\user\objchk_wnet_x86\i386 +set dest_dir=%2\HCA\x86\ +for %%i in (%bin_dir%\ibald.dll, %bin_dir%\complibd.dll, %bin_dir%\mthcaud.dll, %bin_dir%\mt23108ud.dll, %bin_dir%\IbInstaller.dll) do xcopy %%i %dest_dir% /y +xcopy %bin_dir%\*.exe %2\tools\x86\debug\ /y +xcopy %bin_dir%\dapld.dll %2\DAPL\x86\ /y +xcopy %bin_dir%\datd.dll %2\DAPL\x86\ /y + +rem WOW64 DLLs +copy /B %bin_dir%\installsp.exe %2\tools\x86\debug /y +copy /B %bin_dir%\ibald.dll %2\HCA\amd64\ibal32d.dll /y +copy /B %bin_dir%\complibd.dll %2\HCA\amd64\cl32d.dll /y +copy /B %bin_dir%\ibald.dll %2\HCA\ia64\ibal32d.dll /y +copy /B %bin_dir%\complibd.dll %2\HCA\ia64\cl32d.dll /y +copy /B %bin_dir%\mthcaud.dll %2\HCA\amd64\mthca32d.dll /y +copy /B %bin_dir%\mthcaud.dll %2\HCA\ia64\mthca32d.dll /y +copy /B %bin_dir%\mt23108ud.dll %2\HCA\amd64\mtuvp32d.dll /y +copy /B %bin_dir%\mt23108ud.dll %2\HCA\ia64\mtuvp32d.dll /y +copy /B %bin_dir%\dapld.dll %2\DAPL\amd64\dapl32d.dll /y +copy /B %bin_dir%\datd.dll %2\DAPL\amd64\dat32d.dll /y +copy /B %bin_dir%\dapld.dll %2\DAPL\ia64\dapl32d.dll /y +copy /B %bin_dir%\datd.dll %2\DAPL\ia64\dat32d.dll /y + +set bin_dir=%1\bin\user\objfre_wnet_amd64\amd64 +set dest_dir=%2\HCA\amd64\ +for %%i in (%bin_dir%\ibal.dll, %bin_dir%\complib.dll, %bin_dir%\mthcau.dll, %bin_dir%\mt23108u.dll, %bin_dir%\IbInstaller.dll) do xcopy %%i %dest_dir% /y +xcopy %bin_dir%\ibwsd.dll %2\net\amd64\ /y +xcopy %bin_dir%\installsp.exe %2\net\amd64\ /y +xcopy %bin_dir%\*.exe %2\tools\amd64\release\ /y +xcopy %bin_dir%\dapl.dll %2\DAPL\amd64\ /y +xcopy %bin_dir%\dat.dll %2\DAPL\amd64\ /y +xcopy %bin_dir%\mtcr.dll %2\FwTools\amd64\ /y +xcopy %bin_dir%\flint.exe %2\FwTools\amd64\ /y +xcopy %bin_dir%\mst.exe %2\FwTools\amd64\ /y + +rem Copy IA64 drivers +set bin_dir=%1\bin\user\objfre_wnet_ia64\ia64 +set dest_dir=%2\HCA\ia64\ +for %%i in (%bin_dir%\ibal.dll, %bin_dir%\complib.dll, %bin_dir%\mthcau.dll, %bin_dir%\mt23108u.dll, %bin_dir%\IbInstaller.dll) do xcopy %%i %dest_dir% /y +xcopy %bin_dir%\ibwsd.dll %2\net\ia64\ /y +xcopy %bin_dir%\installsp.exe %2\net\ia64\ /y +xcopy %bin_dir%\*.exe %2\tools\ia64\release\ /y +xcopy %bin_dir%\dapl.dll %2\DAPL\ia64\ /y +xcopy %bin_dir%\dat.dll %2\DAPL\ia64\ /y +xcopy %bin_dir%\mtcr.dll %2\FwTools\ia64\ /y +xcopy %bin_dir%\flint.exe %2\FwTools\ia64\ /y +xcopy %bin_dir%\mst.exe %2\FwTools\ia64\ /y + +rem Copy x86 drivers +set bin_dir=%1\bin\user\objfre_wnet_x86\i386 +set dest_dir=%2\HCA\x86\ +for %%i in (%bin_dir%\ibal.dll, %bin_dir%\complib.dll, %bin_dir%\mthcau.dll, %bin_dir%\mt23108u.dll, %bin_dir%\IbInstaller.dll) do xcopy %%i %dest_dir% /y +xcopy %bin_dir%\*.exe %2\tools\x86\release\ /y +xcopy %bin_dir%\dapl.dll %2\DAPL\x86\ /y +xcopy %bin_dir%\dat.dll %2\DAPL\x86\ /y +xcopy %bin_dir%\mtcr.dll %2\FwTools\x86\ /y +xcopy %bin_dir%\flint.exe %2\FwTools\x86\ /y +xcopy %bin_dir%\mst.exe %2\FwTools\x86\ /y + +rem WOW64 DLLs +xcopy %bin_dir%\ibwsd.dll %2\net\x86\ /y +xcopy %bin_dir%\installsp.exe %2\net\x86\ /y +xcopy %bin_dir%\installsp.exe %2\tools\x86\release /y +copy /B %bin_dir%\ibwsd.dll %2\net\amd64\ibwsd32.dll /y +copy /B %bin_dir%\ibwsd.dll %2\net\ia64\ibwsd32.dll /y +copy /B %bin_dir%\ibal.dll %2\HCA\amd64\ibal32.dll /y +copy /B %bin_dir%\complib.dll %2\HCA\amd64\cl32.dll /y +copy /B %bin_dir%\ibal.dll %2\HCA\ia64\ibal32.dll /y +copy /B %bin_dir%\complib.dll %2\HCA\ia64\cl32.dll /y +copy /B %bin_dir%\mthcau.dll %2\HCA\amd64\mthca32.dll /y +copy /B %bin_dir%\mthcau.dll %2\HCA\ia64\mthca32.dll /y +copy /B %bin_dir%\mt23108u.dll %2\HCA\amd64\mtuvp32.dll /y +copy /B %bin_dir%\mt23108u.dll %2\HCA\ia64\mtuvp32.dll /y +copy /B %bin_dir%\dapl.dll %2\DAPL\amd64\dapl32.dll /y +copy /B %bin_dir%\dat.dll %2\DAPL\amd64\dat32.dll /y +copy /B %bin_dir%\dapl.dll %2\DAPL\ia64\dapl32.dll /y +copy /B %bin_dir%\dat.dll %2\DAPL\ia64\dat32.dll /y + +rem Copy INF files to arch specific dirs - WHQL reasons +xcopy %1\hw\mthca\kernel\mthca.inf %2\HCA\amd64 /y +xcopy %1\hw\mthca\kernel\mthca.inf %2\HCA\x86 /y +xcopy %1\hw\mthca\kernel\mthca.inf %2\HCA\ia64 /y +xcopy %1\hw\mt23108\kernel\infinihost.inf %2\HCA\amd64 /y +xcopy %1\hw\mt23108\kernel\infinihost.inf %2\HCA\x86 /y +xcopy %1\hw\mt23108\kernel\infinihost.inf %2\HCA\ia64 /y +xcopy %1\core\bus\kernel\ib_bus.inf %2\HCA\amd64 /y +xcopy %1\core\bus\kernel\ib_bus.inf %2\HCA\x86 /y +xcopy %1\core\bus\kernel\ib_bus.inf %2\HCA\ia64 /y +xcopy %1\ulp\ipoib\kernel\netipoib.inf %2\net\amd64 /y +xcopy %1\ulp\ipoib\kernel\netipoib.inf %2\net\x86 /y +xcopy %1\ulp\ipoib\kernel\netipoib.inf %2\net\ia64 /y +xcopy %1\ulp\inic\kernel\netvnic.inf %2\net\amd64 /y +xcopy %1\ulp\inic\kernel\netvnic.inf %2\net\x86 /y +xcopy %1\ulp\inic\kernel\netvnic.inf %2\net\ia64 /y +rem .inf files needs mods before arch specific dest change. +xcopy %1\ulp\srp\kernel\ib_srp.inf %2\storage\ /y + +xcopy %1\bin %2\bin\ /ey +xcopy %1\bin\*.pdb %2\symbols\ /ey + +pushd %2 +del /s bin\*.pdb +del /s ibtest* +del /s pingpong* +del /s *Sdp* +del /s Select* +del /s usrp* +del /s *.lib +del /s *.exp +popd + +rem Copy DAT header files + +if exist %1\ulp\dapl\dat\include\dat ( + pushd %1\ulp\dapl\dat\include\dat + + xcopy dat.h %2\DAPL /Y + xcopy dat_error.h %2\DAPL /Y + xcopy dat_platform_specific.h %2\DAPL /Y + xcopy dat_redirection.h %2\DAPL /Y + xcopy dat_registry.h %2\DAPL /Y + xcopy dat_vendor_specific.h %2\DAPL /Y + xcopy udat.h %2\DAPL /Y + xcopy udat_config.h %2\DAPL /Y + xcopy udat_redirection.h %2\DAPL /Y + xcopy udat_vendor_specific.h %2\DAPL /Y + popd +) else ( + echo Skip copy of DAT header files; files not present. +) +goto end + +:usage +echo makebin src dest +echo src base directory. +echo dest directory in which to build the installable binary tree. +goto end + +:error1 +echo %1\bin\kernel\objfre_wnet_amd64\amd64 missing +:error2 +echo %1\bin\kernel\objfre_wnet_ia64\ia64 missing +:error3 +echo %1\bin\kernel\objfre_wnet_x86\i386 missing +:error4 +echo %1\bin\user\objfre_wnet_amd64\amd64 missing +:error5 +echo %6\bin\user\objfre_wnet_ia64\ia64 missing +:error6 +echo %1\bin\user\objfre_wnet_x86\i386 missing +:error7 +echo %1\bin\user\objchk_wnet_amd64\amd64 missing +:error8 +echo %1\bin\user\objchk_wnet_ia64\ia64 missing +:error9 +echo %1\bin\user\objchk_wnet_x86\i386 missing + +echo missin + +:end +endlocal diff --git a/branches/Ndi/etc/wpp/ALTraceRt.cmd b/branches/Ndi/etc/wpp/ALTraceRt.cmd new file mode 100644 index 00000000..e3846f2a --- /dev/null +++ b/branches/Ndi/etc/wpp/ALTraceRt.cmd @@ -0,0 +1,14 @@ +rem level=32 => Highest 4 = information, 3 = warning + +tracepdb.exe -f ibal.pdb -p tmf +tracepdb.exe -f ibbus.pdb -p tmf + +set TRACE_FORMAT_PREFIX=%%7!08d! %%2!s!: %%8!04x!: + +tracelog -stop ALDetailedRt + +tracelog -start ALDetailedRt -ls -guid #B199CE55-F8BF-4147-B119-DACD1E5987A6 -flag 0x0f00 -level 5 -rt -ft 1 +tracelog -enable ALDetailedRt -guid #99DC84E3-B106-431e-88A6-4DD20C9BBDE3 -flag 0x0f00 -level 5 +tracefmt.exe -rt ALDetailedRt -Displayonly -p tmf -ods + +tracelog -stop ALDetailedRt diff --git a/branches/Ndi/etc/wpp/CreateTrace.cmd b/branches/Ndi/etc/wpp/CreateTrace.cmd new file mode 100644 index 00000000..668f36df --- /dev/null +++ b/branches/Ndi/etc/wpp/CreateTrace.cmd @@ -0,0 +1,12 @@ +set DDK_HOME=\\tzachid0\c$\Winddk\3790.1830 + +if %PROCESSOR_ARCHITECTURE% == x86 (set ARCH_PATH=i386) else set ARCH_PATH=AMD64 + +mkdir %SystemDrive%\trace +copy %DDK_HOME%\tools\tracing\%ARCH_PATH%\*.exe %SystemDrive%\trace +copy %DDK_HOME%\tools\tracing\%ARCH_PATH%\*.dll %SystemDrive%\trace + +copy %DDK_HOME%\tools\tracing\i386\tracepdb.exe %SystemDrive%\trace + +copy %DDK_HOME%\bin\x86\mspdb70.dll %SystemDrive%\trace +copy %DDK_HOME%\bin\x86\msvcr70.dll %SystemDrive%\trace \ No newline at end of file diff --git a/branches/Ndi/etc/wpp/IPoIBTraceRt.cmd b/branches/Ndi/etc/wpp/IPoIBTraceRt.cmd new file mode 100644 index 00000000..b8fb78c8 --- /dev/null +++ b/branches/Ndi/etc/wpp/IPoIBTraceRt.cmd @@ -0,0 +1,13 @@ +rem level=32 => Highest 4 = information, 3 = warning + +tracepdb.exe -f ipoib.pdb -p tmf + + +set TRACE_FORMAT_PREFIX=%%7!08d! %%2!s!: %%8!04x!: + +tracelog -stop IPoIBdRt + +tracelog -start IPoIBdRt -ls -guid #3F9BC73D-EB03-453a-B27B-20F9A664211A -flag 0x0fff -level 5 -rt -ft 1 +tracefmt.exe -rt IPoIBdRt -Displayonly -p tmf -ods + +tracelog -stop IPoIBdRt diff --git a/branches/Ndi/etc/wpp/MTHCATraceRt.cmd b/branches/Ndi/etc/wpp/MTHCATraceRt.cmd new file mode 100644 index 00000000..f2e3fed4 --- /dev/null +++ b/branches/Ndi/etc/wpp/MTHCATraceRt.cmd @@ -0,0 +1,14 @@ +rem level=32 => Highest 4 = information, 3 = warning + +tracepdb.exe -f mthca.pdb -p tmf +tracepdb.exe -f mthcau.pdb -p tmf + +set TRACE_FORMAT_PREFIX=%%7!08d! %%2!s!: %%8!04x!: + +tracelog -stop MTHCALogdRt + +tracelog -start MTHCALogdRt -ls -guid #8BF1F640-63FE-4743-B9EF-FA38C695BFDE -flag 0x0f00 -level 5 -rt -ft 1 +tracelog -enable MTHCALogdRt -guid #2C718E52-0D36-4bda-9E58-0FC601818D8F -flag 0x0f00 -level 5 +tracefmt.exe -rt MTHCALogdRt -Displayonly -p tmf -ods + +tracelog -stop MTHCALogdRt diff --git a/branches/Ndi/etc/wpp/SDPTraceRt.cmd b/branches/Ndi/etc/wpp/SDPTraceRt.cmd new file mode 100644 index 00000000..78249dc9 --- /dev/null +++ b/branches/Ndi/etc/wpp/SDPTraceRt.cmd @@ -0,0 +1,10 @@ +rem level=32 => Highest 4 = information, 3 = warning + +tracelog -stop SdpDetailedRt + +tracelog -start SdpDetailedRt -ls -guid #D6FA8A24-9457-455d-9B49-3C1E5D195558 -flag 0xffff -level 4 -rt -ft 1 +tracelog -enable SdpDetailedRt -guid #2D4C03CC-E071-48e2-BDBD-526A0D69D6C9 -flag 0xffff -level 4 +tracefmt.exe -rt SdpDetailedRt -Displayonly -p tmf -ods + +tracelog -stop SdpDetailedRt + diff --git a/branches/Ndi/etc/wpp/StartSdpTrace.cmd b/branches/Ndi/etc/wpp/StartSdpTrace.cmd new file mode 100644 index 00000000..2f193ded --- /dev/null +++ b/branches/Ndi/etc/wpp/StartSdpTrace.cmd @@ -0,0 +1,7 @@ +rem level=32 => Highest 4 = information, 3 = warning + +tracelog -stop SdpDetailedRt + +tracelog -start SdpDetailedRt -ls -UseCPUCycle -guid #D6FA8A24-9457-455d-9B49-3C1E5D195558 -flag 0xffff -level 5 +tracelog -enable SdpDetailedRt -guid #2D4C03CC-E071-48e2-BDBD-526A0D69D6C9 -flag 0xffff -level 5 + diff --git a/branches/Ndi/etc/wpp/StartTrace.cmd b/branches/Ndi/etc/wpp/StartTrace.cmd new file mode 100644 index 00000000..0617de47 --- /dev/null +++ b/branches/Ndi/etc/wpp/StartTrace.cmd @@ -0,0 +1,22 @@ +rem MTHCA +del c:\WinIB1.etl +del c:\WinIB2.etl +tracelog -start MTHCALog -ls -guid #8BF1F640-63FE-4743-B9EF-FA38C695BFDE -flag 0x1 -level 2 -UseCPUCycle -f c:\WInIB1.etl +tracelog -enable MTHCALog -guid #2C718E52-0D36-4bda-9E58-0FC601818D8F -flag 0x1 -level 2 + + + +rem IBAL +tracelog -start IBALLog -ls -guid #B199CE55-F8BF-4147-B119-DACD1E5987A6 -flag 0x1 -level 2 -UseCPUCycle -f c:\WInIB2.etl +tracelog -enable IBALLog -guid #99DC84E3-B106-431e-88A6-4DD20C9BBDE3 -flag 0x1 -level 2 + + + +rem SDP +rem tracelog -start SDPLog -ls -guid #D6FA8A24-9457-455d-9B49-3C1E5D195558 -flag 0xffff -level 32 -UseCPUCycle +rem tracelog -enable SDPLOg -guid #2D4C03CC-E071-48e2-BDBD-526A0D69D6C9 -flag 0xffff -level 32 + +rem SDP +rem tracelog -start SDPLog -ls -guid #D6FA8A24-9457-455d-9B49-3C1E5D195558 -flag 0xffff -level 32 -UseCPUCycle +rem tracelog -enable SDPLOg -guid #2D4C03CC-E071-48e2-BDBD-526A0D69D6C9 -flag 0xffff -level 32 + diff --git a/branches/Ndi/etc/wpp/StopSdpTrace.cmd b/branches/Ndi/etc/wpp/StopSdpTrace.cmd new file mode 100644 index 00000000..81533e31 --- /dev/null +++ b/branches/Ndi/etc/wpp/StopSdpTrace.cmd @@ -0,0 +1,10 @@ +rem level=32 => Highest 4 = information, 3 = warning + +tracelog -stop SdpDetailedRt + +set TRACE_FORMAT_PREFIX=%%7!07d! %%2!s! %%8!04x!.%%3!04x!: %%4!s!: %%!COMPNAME! %%!FUNC! + + +tracefmt.exe -seq -p tmf C:\LogFile.Etl -nosummary -hires -o result.txt + + diff --git a/branches/Ndi/etc/wpp/StopTrace.cmd b/branches/Ndi/etc/wpp/StopTrace.cmd new file mode 100644 index 00000000..daf7ca5a --- /dev/null +++ b/branches/Ndi/etc/wpp/StopTrace.cmd @@ -0,0 +1,8 @@ +tracelog.exe -stop MTHCALog +tracelog.exe -stop IBALLog +rem tracelog.exe -stop SDPLOg + +set TRACE_FORMAT_PREFIX=%%7!08d! %%!LEVEL! %%2!s!: %%8!04x!.%%3!04x!: %%4!s!: %%!FUNC!: + +tracefmt.exe -p tmf -display -v -displayonly -nosummary | sort > aaa +start notepad aaa diff --git a/branches/Ndi/hw/dirs b/branches/Ndi/hw/dirs new file mode 100644 index 00000000..f65d5562 --- /dev/null +++ b/branches/Ndi/hw/dirs @@ -0,0 +1,3 @@ +DIRS=\ + mt23108 \ + mthca diff --git a/branches/Ndi/hw/mt23108/dirs b/branches/Ndi/hw/mt23108/dirs new file mode 100644 index 00000000..9adac806 --- /dev/null +++ b/branches/Ndi/hw/mt23108/dirs @@ -0,0 +1,4 @@ +DIRS=\ + vapi \ + kernel \ + user diff --git a/branches/Ndi/hw/mt23108/kernel/Makefile b/branches/Ndi/hw/mt23108/kernel/Makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/hw/mt23108/kernel/SOURCES b/branches/Ndi/hw/mt23108/kernel/SOURCES new file mode 100644 index 00000000..cdef4833 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/SOURCES @@ -0,0 +1,58 @@ +TARGETNAME=thca +TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=DRIVER + +SOURCES= hca_driver.c \ + hca_data.c \ + hca_direct.c \ + hca_mcast.c \ + hca_memory.c \ + hca_verbs.c \ + hca_smp.c \ + hca.rc + +MT_HOME=..\vapi + +INCLUDES=\ + ..\..\..\inc;..\..\..\inc\kernel; \ + $(MT_HOME)\tavor_arch_db; \ + $(MT_HOME)\Hca\verbs; \ + $(MT_HOME)\Hca\verbs\common; \ + $(MT_HOME)\mlxsys\mtl_types; \ + $(MT_HOME)\mlxsys\mtl_types\win; \ + $(MT_HOME)\mlxsys\mtl_types\win\win; \ + $(MT_HOME)\mlxsys\mtl_common; \ + $(MT_HOME)\mlxsys\mtl_common\os_dep\win; \ + $(MT_HOME)\mlxsys\mosal; \ + $(MT_HOME)\mlxsys\mosal\os_dep\win; \ + $(MT_HOME)\Hca\hcahal; \ + $(MT_HOME)\Hca\hcahal\tavor; \ + $(MT_HOME)\Hca\hcahal\tavor\os_dep\win; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_hob; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_pdm; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_cqm; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_qpm; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_mwm; \ + $(MT_HOME)\Hca\hcahal\tavor\util; \ + $(MT_HOME)\Hca\hcahal\tavor\thh_hob; \ + $(MT_HOME)\Hca\hcahal\tavor\cmdif; \ + $(MT_HOME)\Hca\hcahal\tavor\eventp; \ + $(MT_HOME)\Hca\hcahal\tavor\uar; \ + $(MT_HOME)\Hca\hcahal\tavor\mrwm; \ + $(MT_HOME)\Hca\hcahal\tavor\udavm; \ + $(MT_HOME)\Hca\hcahal\tavor\mcgm; \ + $(MT_HOME)\Hca\hcahal\tavor\ddrmm; \ + $(MT_HOME)\Hca\hcahal\tavor\uldm; \ + $(MT_HOME)\mlxsys\os_dep\win\tdriver; + +C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__MSC__ \ + -D__KERNEL__ -D__WIN__ -D__LITTLE_ENDIAN -DMT_LITTLE_ENDIAN \ + -DUSE_RELAY_MOD_NAME -DMAX_ERROR=4 -DIVAPI_THH \ + -DMTL_MODULE=HCA + +TARGETLIBS= \ + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\mt23108.lib \ + $(DDK_LIB_PATH)\wdmguid.lib + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/hw/mt23108/kernel/hca.rc b/branches/Ndi/hw/mt23108/kernel/hca.rc new file mode 100644 index 00000000..9dfda9eb --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Tavor HCA Filter Driver (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Tavor HCA Filter Driver" +#endif + +#define VER_INTERNALNAME_STR "thca.sys" +#define VER_ORIGINALFILENAME_STR "thca.sys" + +#include diff --git a/branches/Ndi/hw/mt23108/kernel/hca_data.c b/branches/Ndi/hw/mt23108/kernel/hca_data.c new file mode 100644 index 00000000..0a645663 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_data.c @@ -0,0 +1,2200 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "hca_data.h" +#include "hca_debug.h" + +static cl_spinlock_t hob_lock; + +#if 1 +u_int32_t g_mlnx_dbg_lvl = CL_DBG_ERROR ; +#else +u_int32_t g_mlnx_dbg_lvl = CL_DBG_ERROR | + MLNX_DBG_QPN | + MLNX_DBG_MEM | + MLNX_DBG_INFO | + MLNX_DBG_TRACE | + // MLNX_DBG_DIRECT | + 0; +#endif + +u_int32_t g_mlnx_dpc2thread = 0; + +#ifdef MODULE_LICENSE +MODULE_LICENSE("Proprietary"); +#endif + +MODULE_PARM(g_mlnx_dbg_lvl, "i"); +MODULE_PARM(g_mlnx_dpc2thread, "i"); + +cl_qlist_t mlnx_hca_list; +//mlnx_hca_t mlnx_hca_array[MLNX_MAX_HCA]; +//uint32_t mlnx_num_hca = 0; + +mlnx_hob_t mlnx_hob_array[MLNX_NUM_HOBKL]; // kernel HOB - one per HCA (cmdif access) + +mlnx_hobul_t *mlnx_hobul_array[MLNX_NUM_HOBUL]; // kernel HOBUL - one per HCA (kar access) + +/* User verb library name */ +/* TODO: Move to linux osd file. +char mlnx_uvp_lib_name[MAX_LIB_NAME] = {"libmlnx_uvp.so"}; +*/ + +static void +mlnx_async_dpc( + IN cl_async_proc_item_t *async_item_p ); + +#if MLNX_COMP_MODEL +static void +mlnx_comp_dpc( + IN PRKDPC p_dpc, + IN void *context, + IN void *pfn_comp_cb, + IN void *unused ); +#else +static void +mlnx_comp_dpc( + IN cl_async_proc_item_t *async_item_p ); +#endif + +// ### Callback Interface +static void +mlnx_comp_cb( + IN HH_hca_hndl_t hh_hndl, + IN HH_cq_hndl_t hh_cq, + IN void *private_data); + +static void +mlnx_async_cb( + IN HH_hca_hndl_t hh_hndl, + IN HH_event_record_t *hh_er_p, + IN void *private_data); + +///////////////////////////////////////////////////////// +// ### HCA +///////////////////////////////////////////////////////// +void +mlnx_hca_insert( + IN mlnx_hca_t *p_hca ) +{ + cl_spinlock_acquire( &hob_lock ); + cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item ); + cl_spinlock_release( &hob_lock ); +} + +void +mlnx_hca_remove( + IN mlnx_hca_t *p_hca ) +{ + cl_spinlock_acquire( &hob_lock ); + cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item ); + cl_spinlock_release( &hob_lock ); +} + +mlnx_hca_t* +mlnx_hca_from_guid( + IN ib_net64_t guid ) +{ + cl_list_item_t *p_item; + mlnx_hca_t *p_hca = NULL; + + cl_spinlock_acquire( &hob_lock ); + p_item = cl_qlist_head( &mlnx_hca_list ); + while( p_item != cl_qlist_end( &mlnx_hca_list ) ) + { + p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item ); + if( p_hca->guid == guid ) + break; + p_item = cl_qlist_next( p_item ); + p_hca = NULL; + } + cl_spinlock_release( &hob_lock ); + return p_hca; +} + +mlnx_hca_t* +mlnx_hca_from_hh_hndl( + IN HH_hca_hndl_t hh_hndl ) +{ + cl_list_item_t *p_item; + mlnx_hca_t *p_hca = NULL; + + cl_spinlock_acquire( &hob_lock ); + p_item = cl_qlist_head( &mlnx_hca_list ); + while( p_item != cl_qlist_end( &mlnx_hca_list ) ) + { + p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item ); + if( p_hca->hh_hndl == hh_hndl ) + break; + p_item = cl_qlist_next( p_item ); + p_hca = NULL; + } + cl_spinlock_release( &hob_lock ); + return p_hca; +} + + +/* +void +mlnx_names_from_guid( + IN ib_net64_t guid, + OUT char **hca_name_p, + OUT char **dev_name_p) +{ + unsigned int idx; + + if (!hca_name_p) return; + if (!dev_name_p) return; + + for (idx = 0; idx < mlnx_num_hca; idx++) + { + if (mlnx_hca_array[idx].ifx.guid == guid) + { + *hca_name_p = mlnx_hca_array[idx].hca_name_p; + *dev_name_p = mlnx_hca_array[idx].dev_name_p; + } + } +} +*/ + +///////////////////////////////////////////////////////// +// ### HOB +///////////////////////////////////////////////////////// +cl_status_t +mlnx_hobs_init( void ) +{ + u_int32_t idx; + + cl_qlist_init( &mlnx_hca_list ); + + for (idx = 0; idx < MLNX_NUM_HOBKL; idx++) + { + mlnx_hob_array[idx].hh_hndl = NULL; + mlnx_hob_array[idx].comp_cb_p = NULL; + mlnx_hob_array[idx].async_cb_p = NULL; + mlnx_hob_array[idx].ca_context = NULL; + mlnx_hob_array[idx].async_proc_mgr_p = NULL; + mlnx_hob_array[idx].cl_device_h = NULL; + // mlnx_hob_array[idx].port_lmc_p = NULL; + mlnx_hob_array[idx].index = idx; + mlnx_hob_array[idx].mark = E_MARK_INVALID; + } + return cl_spinlock_init( &hob_lock ); +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobs_insert( + IN mlnx_hca_t *p_hca, + OUT mlnx_hob_t **hob_pp) +{ + u_int32_t idx; + ib_api_status_t status = IB_ERROR; + mlnx_cache_t *p_cache; + + p_cache = (mlnx_cache_t*)cl_pzalloc( sizeof(mlnx_cache_t) * 2 ); + if( !p_cache ) + return IB_INSUFFICIENT_MEMORY; + + cl_spinlock_acquire(&hob_lock); + for (idx = 0; idx < MLNX_NUM_HOBKL; idx++) + { + if (!mlnx_hob_array[idx].hh_hndl) + { + mlnx_hob_array[idx].hh_hndl = p_hca->hh_hndl; + mlnx_hob_array[idx].mark = E_MARK_CA; + if (hob_pp) *hob_pp = &mlnx_hob_array[idx]; + status = IB_SUCCESS; + break; + } + } + cl_spinlock_release(&hob_lock); + + if (IB_SUCCESS == status) + (*hob_pp)->cache = p_cache; + else + cl_free( p_cache ); + + return status; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobs_set_cb( + IN mlnx_hob_t *hob_p, + IN ci_completion_cb_t comp_cb_p, + IN ci_async_event_cb_t async_cb_p, + IN const void* const ib_context) +{ + cl_status_t cl_status; + + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + // Setup the callbacks + if (!hob_p->async_proc_mgr_p) + { + hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) ); + if( !hob_p->async_proc_mgr_p ) + { + return IB_INSUFFICIENT_MEMORY; + } + cl_async_proc_construct( hob_p->async_proc_mgr_p ); + cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" ); + if( cl_status != CL_SUCCESS ) + { + cl_async_proc_destroy( hob_p->async_proc_mgr_p ); + cl_free(hob_p->async_proc_mgr_p); + hob_p->async_proc_mgr_p = NULL; + return IB_INSUFFICIENT_RESOURCES; + } + } + + if (hob_p->hh_hndl) + { + THH_hob_set_async_eventh(hob_p->hh_hndl, + mlnx_async_cb, + &hob_p->index); // This is the context our CB wants to receive + THH_hob_set_comp_eventh( hob_p->hh_hndl, + mlnx_comp_cb, + &hob_p->index); // This is the context our CB wants to receive + hob_p->comp_cb_p = comp_cb_p; + hob_p->async_cb_p = async_cb_p; + hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hca_idx %d context 0x%p\n", hob_p - mlnx_hob_array, ib_context)); + return IB_SUCCESS; + } + return IB_ERROR; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobs_get_context( + IN mlnx_hob_t *hob_p, + OUT void **context_p) +{ + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + if (hob_p->hh_hndl) + { + if (context_p) *context_p = &hob_p->index; + return IB_SUCCESS; + } + return IB_ERROR; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_hobs_remove( + IN mlnx_hob_t *hob_p) +{ + cl_async_proc_t *p_async_proc; + mlnx_cache_t *p_cache; + + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + cl_spinlock_acquire( &hob_lock ); + + hob_p->mark = E_MARK_INVALID; + + p_async_proc = hob_p->async_proc_mgr_p; + hob_p->async_proc_mgr_p = NULL; + + p_cache = hob_p->cache; + hob_p->cache = NULL; + + hob_p->hh_hndl = NULL; + hob_p->comp_cb_p = NULL; + hob_p->async_cb_p = NULL; + hob_p->ca_context = NULL; + hob_p->cl_device_h = NULL; + + cl_spinlock_release( &hob_lock ); + + if( p_async_proc ) + { + cl_async_proc_destroy( p_async_proc ); + cl_free( p_async_proc ); + } + + if( p_cache ) + cl_free( p_cache ); + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: hobs_remove idx %d hh_hndl 0x%p\n", hob_p - mlnx_hob_array, hob_p->hh_hndl)); +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobs_lookup( + IN HH_hca_hndl_t hndl, + OUT mlnx_hob_t **hca_p) +{ + u_int32_t idx; + + if (!hca_p) + return IB_ERROR; + + cl_spinlock_acquire( &hob_lock ); + for (idx = 0; idx < MLNX_NUM_HOBKL; idx++) + { + if (hndl == mlnx_hob_array[idx].hh_hndl) + { + *hca_p = &mlnx_hob_array[idx]; + cl_spinlock_release( &hob_lock ); + return IB_SUCCESS; + } + } + cl_spinlock_release( &hob_lock ); + return IB_ERROR; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_hobs_get_handle( + IN mlnx_hob_t *hob_p, + OUT HH_hca_hndl_t *hndl_p) +{ + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + if (hndl_p) + *hndl_p = hob_p->hh_hndl; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +mlnx_hobul_t * +mlnx_hobs_get_hobul( + IN mlnx_hob_t *hob_p) +{ + // Verify handle + if ((hob_p - mlnx_hob_array) >= MLNX_NUM_HOBKL) + return NULL; + + return mlnx_hobul_array[hob_p->index]; +} + + +static int priv_ceil_log2(u_int32_t n) +{ + int shift; + + for (shift = 31; shift >0; shift--) + if (n & (1 << shift)) break; + + if (((unsigned)1 << shift) < n) shift++; + + return shift; +} + +///////////////////////////////////////////////////////// +// ### HOBUL +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobul_new( + IN mlnx_hob_t *hob_p, + IN HH_hca_hndl_t hh_hndl, + IN void *resources_p) +{ + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + ib_api_status_t status; + VAPI_hca_cap_t hca_caps; + u_int32_t i; +#if MLNX_COMP_MODEL == 1 + static uint32_t proc_num = 0; +#endif + + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + if (NULL == (hobul_p = cl_zalloc( sizeof(mlnx_hobul_t)))) + return IB_INSUFFICIENT_MEMORY; + + // The following will NULL all pointers/sizes (used in cleanup) +// cl_memclr(hobul_p, sizeof (mlnx_hobul_t)); + + hobul_p->hh_hndl = hh_hndl; + + if (HH_OK != THHUL_hob_create(resources_p, hh_hndl->dev_id, &hobul_p->hhul_hndl)) + { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hh_hndl; + + if (hca_ul_info) + { + hobul_p->vendor_id = hca_ul_info->vendor_id; + hobul_p->device_id = hca_ul_info->dev_id; + hobul_p->hca_ul_resources_p = resources_p; + hobul_p->cq_ul_resources_sz = hca_ul_info->cq_ul_resources_sz; + hobul_p->qp_ul_resources_sz = hca_ul_info->qp_ul_resources_sz; + hobul_p->pd_ul_resources_sz = hca_ul_info->pd_ul_resources_sz; + } + + if (HH_OK != THH_hob_query(hh_hndl, &hca_caps)) + { + status = IB_ERROR; + goto cleanup; + } + + hobul_p->cq_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_cq)); + hobul_p->qp_idx_mask = MASK32(priv_ceil_log2(hca_caps.max_num_qp)); // Currently mask = 0xFFFF + hobul_p->max_pd = MASK32(priv_ceil_log2(hca_caps.max_pd_num)) + 1; + hobul_p->max_cq = hobul_p->cq_idx_mask + 1; + hobul_p->max_qp = hobul_p->qp_idx_mask + 1; + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: sizes cq 0%x qp 0%x pd 0%x\n", hca_caps.max_num_cq, hca_caps.max_num_qp, hca_caps.max_pd_num)); + + /* create and initialize the data stucture for CQs */ + hobul_p->cq_info_tbl = cl_zalloc(hobul_p->max_cq * sizeof (cq_info_t)); + + /* create and initialize the data stucture for QPs */ + hobul_p->qp_info_tbl = cl_zalloc(hobul_p->max_qp * sizeof (qp_info_t)); + + /* create and initialize the data stucture for PDs */ + hobul_p->pd_info_tbl = cl_zalloc(hobul_p->max_pd * sizeof (pd_info_t)); + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CL: alloc failed? cq=%d qp=%d pd=%d\n", + !hobul_p->cq_info_tbl, !hobul_p->qp_info_tbl, !hobul_p->pd_info_tbl)); + + if (!hobul_p->pd_info_tbl || + !hobul_p->qp_info_tbl || + !hobul_p->cq_info_tbl) + { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + /* Initialize all mutexes. */ + for( i = 0; i < hobul_p->max_cq; i++ ) + { + cl_mutex_construct( &hobul_p->cq_info_tbl[i].mutex ); +#if MLNX_COMP_MODEL + KeInitializeDpc( &hobul_p->cq_info_tbl[i].dpc, + mlnx_comp_dpc, &hobul_p->cq_info_tbl[i] ); +#if MLNX_COMP_MODEL == 1 + KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[i].dpc, + (CCHAR)(proc_num++ % cl_proc_count()) ); +#endif /* MLNX_COMP_MODEL == 1 */ +#endif /* MLNX_COMP_MODEL */ + } + + for( i = 0; i < hobul_p->max_qp; i++ ) + cl_mutex_construct( &hobul_p->qp_info_tbl[i].mutex ); + + for( i = 0; i < hobul_p->max_pd; i++ ) + cl_mutex_construct( &hobul_p->pd_info_tbl[i].mutex ); + + for( i = 0; i < hobul_p->max_cq; i++ ) + { + if( cl_mutex_init( &hobul_p->cq_info_tbl[i].mutex ) != CL_SUCCESS ) + { + status = IB_ERROR; + goto cleanup; + } + } + + for( i = 0; i < hobul_p->max_qp; i++ ) + { + if( cl_mutex_init( &hobul_p->qp_info_tbl[i].mutex ) != CL_SUCCESS ) + { + status = IB_ERROR; + goto cleanup; + } + } + + for( i = 0; i < hobul_p->max_pd; i++ ) + { + if( cl_mutex_init( &hobul_p->pd_info_tbl[i].mutex ) != CL_SUCCESS ) + { + status = IB_ERROR; + goto cleanup; + } + } + + hobul_p->log2_mpt_size = ((THH_hca_ul_resources_t *)resources_p)->log2_mpt_size; + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("log2_mpt_size = %d\n", hobul_p->log2_mpt_size)); + + cl_spinlock_acquire(&hob_lock); + mlnx_hobul_array[hob_p->index] = hobul_p; + cl_spinlock_release(&hob_lock); + + return IB_SUCCESS; + +cleanup: + if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl ); + if (hobul_p->pd_info_tbl) + { + for( i = 0; i < hobul_p->max_pd; i++ ) + cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex ); + cl_free(hobul_p->pd_info_tbl); + } + if (hobul_p->qp_info_tbl) + { + for( i = 0; i < hobul_p->max_qp; i++ ) + cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex ); + cl_free(hobul_p->qp_info_tbl); + } + if (hobul_p->cq_info_tbl) + { + for( i = 0; i < hobul_p->max_cq; i++ ) + cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex ); + cl_free(hobul_p->cq_info_tbl); + } + if (hobul_p) cl_free( hobul_p); + return status; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_hobul_get( + IN mlnx_hob_t *hob_p, + OUT void **resources_p ) +{ + mlnx_hobul_t *hobul_p; + + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + hobul_p = mlnx_hobul_array[hob_p->index]; + + if (hobul_p && resources_p) + { + *resources_p = hobul_p->hca_ul_resources_p; + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_hobul_delete( + IN mlnx_hob_t *hob_p) +{ + mlnx_hobul_t *hobul_p; + u_int32_t i; + + // Verify handle + CL_ASSERT((hob_p - mlnx_hob_array) < MLNX_NUM_HOBKL); + + cl_spinlock_acquire(&hob_lock); + hobul_p = mlnx_hobul_array[hob_p->index]; + mlnx_hobul_array[hob_p->index] = NULL; + cl_spinlock_release(&hob_lock); + + if (!hobul_p) return; + + if (hobul_p->hhul_hndl) THHUL_hob_destroy( hobul_p->hhul_hndl ); + if (hobul_p->pd_info_tbl) + { + for( i = 0; i < hobul_p->max_pd; i++ ) + cl_mutex_destroy( &hobul_p->pd_info_tbl[i].mutex ); + cl_free(hobul_p->pd_info_tbl); + } + if (hobul_p->qp_info_tbl) + { + for( i = 0; i < hobul_p->max_qp; i++ ) + cl_mutex_destroy( &hobul_p->qp_info_tbl[i].mutex ); + cl_free(hobul_p->qp_info_tbl); + } + if (hobul_p->cq_info_tbl) + { + for( i = 0; i < hobul_p->max_cq; i++ ) + { + KeRemoveQueueDpc( &hobul_p->cq_info_tbl[i].dpc ); + cl_mutex_destroy( &hobul_p->cq_info_tbl[i].mutex ); + } + cl_free(hobul_p->cq_info_tbl); + } + if (hobul_p) cl_free( hobul_p); +} + +///////////////////////////////////////////////////////// +// ### Callbacks +///////////////////////////////////////////////////////// + +ib_async_event_t +mlnx_map_vapi_event_type( + IN unsigned event_id, + OUT ENUM_EVENT_CLASS *event_class_p) +{ + switch (event_id) + { + case VAPI_QP_PATH_MIGRATED: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_QP_APM; + + case VAPI_QP_COMM_ESTABLISHED: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_QP_COMM; + + case VAPI_SEND_QUEUE_DRAINED: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_SQ_DRAINED; + + case VAPI_CQ_ERROR: + if (event_class_p) *event_class_p = E_EV_CQ; + return IB_AE_CQ_ERROR; + + case VAPI_LOCAL_WQ_INV_REQUEST_ERROR: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_WQ_REQ_ERROR; + + case VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_WQ_ACCESS_ERROR; + + case VAPI_LOCAL_WQ_CATASTROPHIC_ERROR: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_QP_FATAL; + + case VAPI_PATH_MIG_REQ_ERROR: + if (event_class_p) *event_class_p = E_EV_QP; + return IB_AE_QP_APM_ERROR; + + case VAPI_LOCAL_CATASTROPHIC_ERROR: + if (event_class_p) *event_class_p = E_EV_CA; + return IB_AE_LOCAL_FATAL; + + case VAPI_PORT_ERROR: + /* + * In tavor_hca\src\Hca\hcahal\tavor\eventp\event_irqh.c: + * TAVOR_IF_EV_TYPE_PORT_ERR maps one of two port events: + * - TAVOR_IF_SUB_EV_PORT_DOWN + * - TAVOR_IF_SUB_EV_PORT_UP + * + * These map to (respectively) + * - VAPI_PORT_ERROR + * - VAPI_PORT_ACTIVE + */ + if (event_class_p) *event_class_p = E_EV_CA; + return IB_AE_PORT_DOWN; /* INIT, ARMED, DOWN */ + + case VAPI_PORT_ACTIVE: + if (event_class_p) *event_class_p = E_EV_CA; + return IB_AE_PORT_ACTIVE; /* ACTIVE STATE */ + + case VAPI_CLIENT_REREGISTER: + if (event_class_p) *event_class_p = E_EV_CA; + return IB_AE_CLIENT_REREGISTER; /* ACTIVE STATE */ + + default: + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n", + event_id, VAPI_PORT_ACTIVE, IB_AE_LOCAL_FATAL)); + if (event_class_p) *event_class_p = E_EV_CA; + return IB_AE_LOCAL_FATAL; + } +} + +void +mlnx_conv_vapi_event( + IN HH_event_record_t *hh_event_p, + IN ib_event_rec_t *ib_event_p, + OUT ENUM_EVENT_CLASS *event_class_p) +{ + + // ib_event_p->context is handled by the caller + // + ib_event_p->type = mlnx_map_vapi_event_type(hh_event_p->etype, event_class_p); + + // no traps currently generated + // ib_event_p->trap_info.lid = ; + // ib_event_p->trap_info.port_guid = ; + // ib_event_p->trap_info.port_num = hh_er; +} + +void +mlnx_async_cb( + IN HH_hca_hndl_t hh_hndl, + IN HH_event_record_t *hh_er_p, + IN void *private_data) +{ + u_int32_t obj_idx; + mlnx_hob_t *hob_p; + + mlnx_cb_data_t cb_data; + mlnx_cb_data_t *cb_data_p; + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC CB %p (0x%x)\n", + private_data, (private_data) ? *(u_int32_t *)private_data : 0xB5)); + + if (!private_data || !hh_er_p) return; + + obj_idx = *(u_int32_t *)private_data; + if (obj_idx >= MLNX_NUM_HOBKL) return; + + hob_p = mlnx_hob_array + obj_idx; + + // g_mlnx_dpc2thread will be initialized as a module paramter (default - disabled(0)) + if (g_mlnx_dpc2thread) + { + cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t)); + if (!cb_data_p) return; + + cb_data_p->hh_hndl = hh_hndl; + cb_data_p->private_data = private_data; + cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t)); + cb_data_p->async_item.pfn_callback = mlnx_async_dpc; + cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item ); + } else + { + cb_data_p = &cb_data; + + cb_data_p->hh_hndl = hh_hndl; + cb_data_p->private_data = private_data; + cl_memcpy(&cb_data_p->hh_er, hh_er_p, sizeof(HH_event_record_t)); + mlnx_async_dpc( &cb_data_p->async_item ); + } +} + +static void +mlnx_async_dpc( + IN cl_async_proc_item_t *async_item_p ) +{ + HH_event_record_t *hh_er_p; + u_int32_t obj_idx; + mlnx_hob_t *hob_p; + mlnx_hobul_t *hobul_p; + mlnx_cb_data_t *cb_data_p; + + ENUM_EVENT_CLASS event_class; + ib_event_rec_t event_r; + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ASYNC DPC %p\n", async_item_p)); + + cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item ); + + if (!cb_data_p) return; + + hh_er_p = &cb_data_p->hh_er; + obj_idx = *(u_int32_t *)cb_data_p->private_data; + hob_p = mlnx_hob_array + obj_idx; + hobul_p = mlnx_hobul_array[obj_idx]; + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC type %d ca_context %p\n", + hh_er_p->etype, hob_p->ca_context)); + + if (!hob_p || + !hobul_p || + !hob_p->hh_hndl || + !hob_p->async_cb_p) + { + goto cleanup; + } + + cl_memclr(&event_r, sizeof(event_r)); + mlnx_conv_vapi_event(hh_er_p, &event_r, &event_class); + + switch(event_class) + { + case E_EV_CA: + event_r.context = (void *)hob_p->ca_context; + break; + + case E_EV_QP: + { + obj_idx = hh_er_p->event_modifier.qpn & hobul_p->qp_idx_mask; + if (obj_idx < hobul_p->max_qp) + event_r.context = (void *)hobul_p->qp_info_tbl[obj_idx].qp_context; + else + { + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad qpn 0x%x max 0x%x\n", obj_idx, hobul_p->max_qp)); + goto cleanup; + } + } + break; + + case E_EV_CQ: + { + obj_idx = hh_er_p->event_modifier.cq & hobul_p->cq_idx_mask; + if (obj_idx < hobul_p->max_cq) + event_r.context = (void *)hobul_p->cq_info_tbl[obj_idx].cq_context; + else + { + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC bad cqn 0x%x max 0x%x\n", obj_idx, hobul_p->max_cq)); + goto cleanup; + } + } + break; + + case E_EV_LAST: + default: + // CL_ASSERT(0); // This shouldn't happen + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("ASYNC DPC unknown event_class 0x%x\n", event_class)); + break; + } + + // Call the registered CB + (*hob_p->async_cb_p)(&event_r); + // Fall Through +cleanup: + if (g_mlnx_dpc2thread) + { + cl_free(cb_data_p); + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_comp_cb( + IN HH_hca_hndl_t hh_hndl, + IN HH_cq_hndl_t hh_cq, + IN void *private_data) +{ +#if MLNX_COMP_MODEL + u_int32_t cq_num; + u_int32_t hca_idx; + mlnx_hob_t *hob_p; + mlnx_hobul_t *hobul_p; +#if MLNX_COMP_MODEL == 2 + static uint32_t proc_num = 0; +#endif + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data)); + + UNUSED_PARAM( hh_hndl ); + + hca_idx = *(u_int32_t *)private_data; + hob_p = mlnx_hob_array + hca_idx; + hobul_p = mlnx_hobul_array[hca_idx]; + cq_num = hh_cq & hobul_p->cq_idx_mask; + + if (NULL != hob_p && NULL != hobul_p && + hob_p->hh_hndl && hob_p->comp_cb_p) + { + if (cq_num < hobul_p->max_cq) + { +#if MLNX_COMP_MODEL == 2 + KeSetTargetProcessorDpc( &hobul_p->cq_info_tbl[cq_num].dpc, + (CCHAR)(proc_num++ % cl_proc_count()) ); +#endif /* MLNX_COMP_MODEL == 2 */ + KeInsertQueueDpc( &hobul_p->cq_info_tbl[cq_num].dpc, + hob_p, NULL ); + } + else + { + HCA_TRACE( HCA_DBG_ERROR, ("CQ index out of range!!!\n") ); + } + } +#else /* MLNX_COMP_MODEL */ + u_int32_t obj_idx; + mlnx_hob_t *hob_p; + + mlnx_cb_data_t cb_data; + mlnx_cb_data_t *cb_data_p; + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP CB cq 0x%x %p\n", hh_cq, private_data)); + + if (!private_data) return; + + obj_idx = *(u_int32_t *)private_data; + hob_p = mlnx_hob_array + obj_idx; + if (!hob_p) return; + + if (g_mlnx_dpc2thread) + { + cb_data_p = cl_malloc(sizeof(mlnx_cb_data_t)); + if (!cb_data_p) return; + + cb_data_p->hh_hndl = hh_hndl; + cb_data_p->hh_cq = hh_cq; + cb_data_p->private_data = private_data; + + cb_data_p->async_item.pfn_callback = mlnx_comp_dpc; + + // Report completion through async_proc + cl_async_proc_queue(hob_p->async_proc_mgr_p, &cb_data_p->async_item ); + + } else + { + cb_data_p = &cb_data; + + cb_data_p->hh_hndl = hh_hndl; + cb_data_p->hh_cq = hh_cq; + cb_data_p->private_data = private_data; + + // Report completion directly from DPC (verbs should NOT sleep) + mlnx_comp_dpc( &cb_data_p->async_item ); + } +#endif /* MLNX_COMP_MODEL */ +} + +#if MLNX_COMP_MODEL +static void +mlnx_comp_dpc( + IN PRKDPC p_dpc, + IN void *context, + IN void *arg1, + IN void *unused ) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t*)arg1; + UNUSED_PARAM( p_dpc ); + UNUSED_PARAM( unused ); + + hob_p->comp_cb_p( (void*)((cq_info_t*)context)->cq_context ); +} +#else /* MLNX_COMP_MODEL */ +static void +mlnx_comp_dpc( + IN cl_async_proc_item_t *async_item_p ) +{ + u_int32_t cq_num; + u_int32_t hca_idx; + mlnx_hob_t *hob_p; + mlnx_hobul_t *hobul_p; + mlnx_cb_data_t *cb_data_p; + + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("COMP DPC %p\n", async_item_p)); + + cb_data_p = PARENT_STRUCT( async_item_p, mlnx_cb_data_t, async_item ); + if (!cb_data_p) return; + + hca_idx = *(u_int32_t *)cb_data_p->private_data; + hob_p = mlnx_hob_array + hca_idx; + hobul_p = mlnx_hobul_array[hca_idx]; + cq_num = (u_int32_t)cb_data_p->hh_cq & hobul_p->cq_idx_mask; + + if (NULL != hob_p && NULL != hobul_p && + hob_p->hh_hndl && hob_p->comp_cb_p) + { + if (cq_num < hobul_p->max_cq) + { + (*hob_p->comp_cb_p)((void *)hobul_p->cq_info_tbl[cq_num].cq_context); + } + } + + if (g_mlnx_dpc2thread) + { + cl_free(cb_data_p); + } +} +#endif /* MLNX_COMP_MODEL */ + +// ### Conversions + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +VAPI_mrw_acl_t +map_ibal_acl( + IN ib_access_t ibal_acl) +{ + VAPI_mrw_acl_t vapi_acl = 0; + + if (ibal_acl & IB_AC_RDMA_READ) vapi_acl |= VAPI_EN_REMOTE_READ; + if (ibal_acl & IB_AC_RDMA_WRITE) vapi_acl |= VAPI_EN_REMOTE_WRITE; + if (ibal_acl & IB_AC_ATOMIC) vapi_acl |= VAPI_EN_REMOTE_ATOM; + if (ibal_acl & IB_AC_LOCAL_WRITE) vapi_acl |= VAPI_EN_LOCAL_WRITE; + if (ibal_acl & IB_AC_MW_BIND) vapi_acl |= VAPI_EN_MEMREG_BIND; + + return vapi_acl; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_access_t +map_vapi_acl( + IN VAPI_mrw_acl_t vapi_acl) +{ + ib_access_t ibal_acl = 0; + + if (vapi_acl & VAPI_EN_REMOTE_READ) ibal_acl |= IB_AC_RDMA_READ; + if (vapi_acl & VAPI_EN_REMOTE_WRITE) ibal_acl |= IB_AC_RDMA_WRITE; + if (vapi_acl & VAPI_EN_REMOTE_ATOM) ibal_acl |= IB_AC_ATOMIC; + if (vapi_acl & VAPI_EN_LOCAL_WRITE) ibal_acl |= IB_AC_LOCAL_WRITE; + if (vapi_acl & VAPI_EN_MEMREG_BIND) ibal_acl |= IB_AC_MW_BIND; + + return ibal_acl; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +static VAPI_rdma_atom_acl_t +map_ibal_qp_acl( + IN ib_access_t ibal_acl) +{ + VAPI_rdma_atom_acl_t vapi_qp_acl = 0; + + if (ibal_acl & IB_AC_RDMA_WRITE) vapi_qp_acl |= VAPI_EN_REM_WRITE; + if (ibal_acl & IB_AC_RDMA_READ) vapi_qp_acl |= VAPI_EN_REM_READ; + if (ibal_acl & IB_AC_ATOMIC) vapi_qp_acl |= VAPI_EN_REM_ATOMIC_OP; + + return vapi_qp_acl; + +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +static ib_access_t +map_vapi_qp_acl( + IN VAPI_rdma_atom_acl_t vapi_qp_acl) +{ + ib_access_t ibal_acl = IB_AC_LOCAL_WRITE; + + if (vapi_qp_acl & VAPI_EN_REM_WRITE) ibal_acl |= IB_AC_RDMA_WRITE; + if (vapi_qp_acl & VAPI_EN_REM_READ) ibal_acl |= IB_AC_RDMA_READ; + if (vapi_qp_acl & VAPI_EN_REM_ATOMIC_OP) ibal_acl |= IB_AC_ATOMIC; + + return ibal_acl; +} + + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_lock_region( + IN mlnx_mro_t *mro_p, + IN boolean_t um_call ) +{ + MOSAL_iobuf_t old_iobuf; + + // Find context + if( um_call ) + mro_p->mr_prot_ctx = MOSAL_get_current_prot_ctx(); + else + mro_p->mr_prot_ctx = MOSAL_get_kernel_prot_ctx(); + + // Save pointer to existing locked region. + old_iobuf = mro_p->mr_iobuf; + + // Lock Region + if (MT_OK != MOSAL_iobuf_register((MT_virt_addr_t)mro_p->mr_start, + (MT_size_t)mro_p->mr_size, + mro_p->mr_prot_ctx, + mro_p->mr_mosal_perm, + &mro_p->mr_iobuf, + 0 )) + { + return IB_ERROR; + } + + if( old_iobuf ) + { + if( MT_OK != MOSAL_iobuf_deregister( old_iobuf ) ) + return IB_ERROR; + } + + return IB_SUCCESS; +} + + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_ibal_mr_create( + IN u_int32_t pd_idx, + IN OUT mlnx_mro_t *mro_p, + IN VAPI_mr_change_t change_flags, + IN ib_mr_create_t const *p_mr_create, + IN boolean_t um_call, + OUT HH_mr_t *mr_props_p ) +{ + ib_api_status_t status; + + /* Set ACL information first since it is used to lock the region. */ + if( change_flags & VAPI_MR_CHANGE_ACL ) + { + mro_p->mr_acl = map_ibal_acl( p_mr_create->access_ctrl ); + // This computation should be externalized by THH + mro_p->mr_mosal_perm = + MOSAL_PERM_READ | + ((mro_p->mr_acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0); + } + + if( change_flags & VAPI_MR_CHANGE_TRANS ) + { + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("addr 0x%p size %"PRId64"\n", (void *)p_mr_create->vaddr, p_mr_create->length)); + // Build TPT entries + mro_p->mr_start = (IB_virt_addr_t)p_mr_create->vaddr; + mro_p->mr_size = p_mr_create->length; + if (IB_SUCCESS != (status = mlnx_lock_region(mro_p, um_call))) + { + return status; + } + } + + /* Now fill in the MR properties. */ + mr_props_p->start = mro_p->mr_start; + mr_props_p->size = mro_p->mr_size; + mr_props_p->acl = mro_p->mr_acl; + mr_props_p->pd = pd_idx; + + // Setup MTT info + mr_props_p->tpt.tpt_type = HH_TPT_IOBUF; + mr_props_p->tpt.tpt.iobuf = mro_p->mr_iobuf; + + return IB_SUCCESS; +} + +///////////////////////////////////////////////////////// +// On entry mro_p->mr_start holds the pmr address +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_ibal_pmr_create( + IN u_int32_t pd_idx, + IN mlnx_mro_t *mro_p, + IN ib_phys_create_t const *p_pmr_create, + OUT HH_mr_t *mr_props_p ) +{ + VAPI_phy_addr_t* buf_lst = NULL; + VAPI_size_t* sz_lst = NULL; + u_int32_t i; + u_int32_t page_shift = priv_ceil_log2(p_pmr_create->hca_page_size); + u_int64_t page_mask = (1 << page_shift) - 1; + u_int64_t tot_sz = 0; + + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, + ("PRE: addr %p size 0x%"PRIx64" shift %d\n", + (void *)(uintn_t)mro_p->mr_start, p_pmr_create->length, page_mask)); + mro_p->mr_start = (mro_p->mr_start & ~page_mask) | (p_pmr_create->buf_offset & page_mask); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, + ("POST: addr %p\n", (void *)(uintn_t)mro_p->mr_start)); + + mr_props_p->start = mro_p->mr_start; + mr_props_p->size = p_pmr_create->length; + mr_props_p->acl = map_ibal_acl(p_pmr_create->access_ctrl); + mr_props_p->pd = pd_idx; + +#ifdef _DEBUG_ + mro_p->mr_size = mr_props_p->size; +// mro_p->mr_first_page_addr = 0; +// mro_p->mr_num_pages = (mro_p->mr_end >> PAGESHIFT) + 1 - (mro_p->mr_start >> PAGESHIFT); +// CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st pg addr 0x%p pages %d\n", +// (void *)mro_p->mr_first_page_addr, p_pmr_create->num_bufs)); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("1st phys addr 0x%"PRIx64" phys pages %d\n", + p_pmr_create->range_array[0].base_addr, p_pmr_create->num_ranges)); +#endif + + // Build TPT entries + if (!p_pmr_create->range_array) + { + return IB_INVALID_PARAMETER; + } + + if (p_pmr_create->hca_page_size != + MT_DOWN_ALIGNX_PHYS(p_pmr_create->hca_page_size, page_shift)) + { + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf size is not page aligned\n")); + return IB_INVALID_PARAMETER; + } + + for (i = 0; i < p_pmr_create->num_ranges; i++) + { + uint64_t start_addr = p_pmr_create->range_array[i].base_addr; + uint64_t end_addr = start_addr + p_pmr_create->range_array[i].size; + + if( end_addr < start_addr ) { + CL_TRACE( CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf end < start\n") ); + return IB_INVALID_PARAMETER; + } + + if (start_addr != + MT_DOWN_ALIGNX_PHYS(start_addr, page_shift)) + { + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("phys buf start adrs is not page aligned\n")); + return IB_INVALID_PARAMETER; + } + + tot_sz += p_pmr_create->range_array[i].size; + } + + if( tot_sz < p_pmr_create->length + p_pmr_create->buf_offset ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("length(0x"PRIx64") + buf offset(0x"PRIx64") larger than sum " + "of phys ranges(0x"PRIx64")\n", + p_pmr_create->length, p_pmr_create->buf_offset, tot_sz) ); + return IB_INVALID_PARAMETER; + } + + if( p_pmr_create->buf_offset > p_pmr_create->range_array[0].size ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("buf offset(0x%x) > than 1st phy range size(0x"PRIx64")\n", + p_pmr_create->buf_offset, p_pmr_create->range_array[0].size) ); + return IB_INVALID_PARAMETER; + } + + /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */ + buf_lst = (VAPI_phy_addr_t*)cl_pzalloc( sizeof(VAPI_phy_addr_t)*(p_pmr_create->num_ranges)); + if (!buf_lst) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to allocate range address list.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + + /* Memory registration must be done at PASSIVE_LEVEL, so paged memory here is fine. */ + sz_lst = (VAPI_size_t*)cl_pzalloc( sizeof(VAPI_size_t)*(p_pmr_create->num_ranges)); + if (!sz_lst) + { + cl_free( buf_lst ); + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to allocate range size list.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + for (i = 0; i < p_pmr_create->num_ranges; i++) + { + buf_lst[i] = p_pmr_create->range_array[i].base_addr; + sz_lst[i] = p_pmr_create->range_array[i].size; + } + + mr_props_p->tpt.tpt_type = HH_TPT_BUF; + mr_props_p->tpt.num_entries = p_pmr_create->num_ranges; + mr_props_p->tpt.tpt.buf_lst.buf_sz_lst = sz_lst; + mr_props_p->tpt.tpt.buf_lst.phys_buf_lst = buf_lst; + mr_props_p->tpt.tpt.buf_lst.iova_offset = p_pmr_create->buf_offset; + + return IB_SUCCESS; +} + + +u_int8_t +mlnx_gid_to_index( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int8_t *raw_gid) +{ + ib_gid_t *gid_table_p = NULL; + u_int8_t index = 0; // default return value + u_int8_t i; + + gid_table_p = cl_zalloc( 64*sizeof(ib_gid_t)); + + mlnx_get_hca_gid_tbl(hh_hndl, port_num, 64, gid_table_p); + + for (i = 0; i < 64; i++) + { + if (!cl_memcmp(raw_gid, gid_table_p[i].raw, sizeof(ib_gid_t))) + { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("1: found GID at index %d\n", i)); + index = i; + break; + } + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("2: found GID at index %d\n", index)); + + cl_free( gid_table_p); + return index; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_conv_ibal_av( + IN HH_hca_hndl_t hh_hndl, + IN const ib_av_attr_t *ibal_av_p, + OUT VAPI_ud_av_t *vapi_av_p) +{ + vapi_av_p->port = ibal_av_p->port_num; + vapi_av_p->sl = ibal_av_p->sl; + vapi_av_p->dlid = cl_ntoh16 (ibal_av_p->dlid); + + vapi_av_p->static_rate = + (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3); + ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL, + &vapi_av_p->traffic_class, &vapi_av_p->flow_label ); + vapi_av_p->src_path_bits = ibal_av_p->path_bits; // PATH: + //vapi_av_p->src_path_bits = 0; + + /* For global destination or Multicast address:*/ + if (ibal_av_p->grh_valid) + { + vapi_av_p->grh_flag = TRUE; + vapi_av_p->hop_limit = ibal_av_p->grh.hop_limit; + // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("raw %p, &raw %p\n", ibal_av_p->grh.src_gid.raw, &ibal_av_p->grh.src_gid.raw)); + vapi_av_p->sgid_index = mlnx_gid_to_index(hh_hndl, ibal_av_p->port_num, (u_int8_t *)ibal_av_p->grh.src_gid.raw); + cl_memcpy(vapi_av_p->dgid, ibal_av_p->grh.dest_gid.raw, sizeof(vapi_av_p->dgid)); + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_conv_vapi_av( + IN HH_hca_hndl_t hh_hndl, + IN const VAPI_ud_av_t *vapi_av_p, + OUT ib_av_attr_t *ibal_av_p) +{ + uint8_t ver; + + ibal_av_p->port_num = vapi_av_p->port; + ibal_av_p->sl = vapi_av_p->sl; + ibal_av_p->dlid = cl_ntoh16(vapi_av_p->dlid); + + /* For global destination or Multicast address:*/ + ibal_av_p->grh_valid = vapi_av_p->grh_flag; + + ver = 2; + ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow( ver, + vapi_av_p->traffic_class, + vapi_av_p->flow_label); + ibal_av_p->grh.hop_limit = vapi_av_p->hop_limit; + + THH_hob_get_sgid(hh_hndl, + vapi_av_p->port, + vapi_av_p->sgid_index, + &ibal_av_p->grh.src_gid.raw); + + cl_memcpy(ibal_av_p->grh.dest_gid.raw, vapi_av_p->dgid, sizeof(vapi_av_p->dgid)); + + ibal_av_p->static_rate = (vapi_av_p->static_rate? + IB_PATH_RECORD_RATE_2_5_GBS : IB_PATH_RECORD_RATE_10_GBS); + ibal_av_p->path_bits = vapi_av_p->src_path_bits; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +int +mlnx_map_vapi_cqe_status( + IN VAPI_wc_status_t vapi_status) +{ + switch (vapi_status) + { + case IB_COMP_SUCCESS: return IB_WCS_SUCCESS; + case IB_COMP_LOC_LEN_ERR: return IB_WCS_LOCAL_LEN_ERR; + case IB_COMP_LOC_QP_OP_ERR: return IB_WCS_LOCAL_OP_ERR; + case IB_COMP_LOC_PROT_ERR: return IB_WCS_LOCAL_PROTECTION_ERR; + case IB_COMP_WR_FLUSH_ERR: return IB_WCS_WR_FLUSHED_ERR; + case IB_COMP_MW_BIND_ERR: return IB_WCS_MEM_WINDOW_BIND_ERR; + case IB_COMP_REM_INV_REQ_ERR: return IB_WCS_REM_INVALID_REQ_ERR; + case IB_COMP_REM_ACCESS_ERR: return IB_WCS_REM_ACCESS_ERR; + case IB_COMP_REM_OP_ERR: return IB_WCS_REM_OP_ERR; + case IB_COMP_RETRY_EXC_ERR: return IB_WCS_TIMEOUT_RETRY_ERR; + case IB_COMP_RNR_RETRY_EXC_ERR: return IB_WCS_RNR_RETRY_ERR; + case IB_COMP_REM_ABORT_ERR: return IB_WCS_REM_ACCESS_ERR; // ??? + case IB_COMP_FATAL_ERR: return IB_WCS_REM_ACCESS_ERR; // ??? + case IB_COMP_GENERAL_ERR: return IB_WCS_REM_ACCESS_ERR; // ??? + default: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map %d (last known %d) returning %d\n", + vapi_status, IB_COMP_GENERAL_ERR, IB_WCS_REM_ACCESS_ERR)); + return IB_WCS_REM_ACCESS_ERR; + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +int +mlnx_map_vapi_cqe_type( + IN VAPI_cqe_opcode_t opcode) +{ + switch (opcode) + { + case VAPI_CQE_SQ_SEND_DATA: return IB_WC_SEND; + case VAPI_CQE_SQ_RDMA_WRITE: return IB_WC_RDMA_WRITE; + case VAPI_CQE_SQ_RDMA_READ: return IB_WC_RDMA_READ; + case VAPI_CQE_SQ_COMP_SWAP: return IB_WC_COMPARE_SWAP; + case VAPI_CQE_SQ_FETCH_ADD: return IB_WC_FETCH_ADD; + case VAPI_CQE_SQ_BIND_MRW: return IB_WC_MW_BIND; + case VAPI_CQE_RQ_SEND_DATA: return IB_WC_RECV; + case VAPI_CQE_RQ_RDMA_WITH_IMM: return IB_WC_RECV_RDMA_WRITE; + default: + return IB_WC_SEND; + } +} + +///////////////////////////////////////////////////////// +// Map Remote Node Addr Type +///////////////////////////////////////////////////////// +int +mlnx_map_vapi_rna_type( + IN VAPI_remote_node_addr_type_t rna) +{ + switch (rna) + { + case VAPI_RNA_UD: return IB_QPT_UNRELIABLE_DGRM; + case VAPI_RNA_RAW_ETY: return IB_QPT_RAW_ETHER; + case VAPI_RNA_RAW_IPV6: return IB_QPT_RAW_IPV6; + default: + return IB_QPT_RELIABLE_CONN; + } +} + +////////////////////////////////////////////////////////////// +// Convert from VAPI memory-region attributes to IBAL +////////////////////////////////////////////////////////////// +void +mlnx_conv_vapi_mr_attr( + IN ib_pd_handle_t pd_h, + IN HH_mr_info_t *mr_info_p, + OUT ib_mr_attr_t *mr_query_p) +{ + mr_query_p->h_pd = pd_h; + mr_query_p->local_lb = mr_info_p->local_start; + mr_query_p->local_ub = mr_info_p->local_start + mr_info_p->local_size; + mr_query_p->remote_lb = mr_info_p->remote_start; + mr_query_p->remote_ub = mr_info_p->remote_start + mr_info_p->remote_size; + + mr_query_p->access_ctrl = map_vapi_acl(mr_info_p->acl); + mr_query_p->lkey = mr_info_p->lkey; + mr_query_p->rkey = cl_hton32(mr_info_p->rkey); +} + +////////////////////////////////////////////////////////////// +// Convert from IBAL memory-window bind request to VAPI +////////////////////////////////////////////////////////////// +void +mlnx_conv_bind_req( + IN HHUL_qp_hndl_t hhul_qp_hndl, + IN ib_bind_wr_t* const p_mw_bind, + OUT HHUL_mw_bind_t *bind_prop_p) +{ + bind_prop_p->qp = hhul_qp_hndl; + bind_prop_p->id = p_mw_bind->wr_id; + bind_prop_p->acl = map_ibal_acl(p_mw_bind->access_ctrl); + bind_prop_p->size = p_mw_bind->local_ds.length; + bind_prop_p->start = (VAPI_virt_addr_t)(MT_virt_addr_t)p_mw_bind->local_ds.vaddr; + bind_prop_p->mr_lkey = p_mw_bind->local_ds.lkey; + bind_prop_p->comp_type = + (p_mw_bind->send_opt & IB_SEND_OPT_SIGNALED) ? VAPI_SIGNALED : VAPI_UNSIGNALED; +} + + +///////////////////////////////////////////////////////// +// Map IBAL qp type to VAPI transport and special qp_type +///////////////////////////////////////////////////////// +int +mlnx_map_ibal_qp_type( + IN ib_qp_type_t ibal_qpt, + OUT VAPI_special_qp_t *vapi_qp_type_p) +{ + switch (ibal_qpt) + { + case IB_QPT_RELIABLE_CONN: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP; + return IB_TS_RC; + + case IB_QPT_UNRELIABLE_CONN: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP; + return IB_TS_UC; + + case IB_QPT_UNRELIABLE_DGRM: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_REGULAR_QP; + return IB_TS_UD; + + case IB_QPT_QP0: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP; + return IB_TS_UD; + + case IB_QPT_QP1: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP; + return IB_TS_UD; + + case IB_QPT_RAW_IPV6: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_IPV6_QP; // TBD: ?? + return IB_TS_RAW; + + case IB_QPT_RAW_ETHER: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP; // TBD: ?? + return IB_TS_RAW; + + case IB_QPT_MAD: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP; + return IB_TS_UD; + + case IB_QPT_QP0_ALIAS: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_SMI_QP; + return IB_TS_UD; + + case IB_QPT_QP1_ALIAS: + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_GSI_QP; + return IB_TS_UD; + + default: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map ibal_qp_type %d (last known %d) returning %d\n", + ibal_qpt, IB_QPT_QP1_ALIAS, IB_TS_RAW)); + if (vapi_qp_type_p) *vapi_qp_type_p = VAPI_RAW_ETY_QP; + return IB_TS_RAW; + } +} + +///////////////////////////////////////////////////////// +// QP and CQ value must be handled by caller +///////////////////////////////////////////////////////// +void +mlnx_conv_qp_create_attr( + IN const ib_qp_create_t *create_attr_p, + OUT HHUL_qp_init_attr_t *init_attr_p, + OUT VAPI_special_qp_t *vapi_qp_type_p) +{ + init_attr_p->ts_type = mlnx_map_ibal_qp_type(create_attr_p->qp_type, vapi_qp_type_p); + + init_attr_p->qp_cap.max_oust_wr_sq = create_attr_p->sq_depth; + init_attr_p->qp_cap.max_oust_wr_rq = create_attr_p->rq_depth; + init_attr_p->qp_cap.max_sg_size_sq = create_attr_p->sq_sge; + init_attr_p->qp_cap.max_sg_size_rq = create_attr_p->rq_sge; + + init_attr_p->sq_sig_type = (create_attr_p->sq_signaled) ? VAPI_SIGNAL_ALL_WR : VAPI_SIGNAL_REQ_WR; + init_attr_p->rq_sig_type = VAPI_SIGNAL_ALL_WR; + + init_attr_p->srq = HHUL_INVAL_SRQ_HNDL; +} + +///////////////////////////////////////////////////////// +// NOTE: ibal_qp_state is non linear - so we cannot use a LUT +///////////////////////////////////////////////////////// +VAPI_qp_state_t +mlnx_map_ibal_qp_state( + IN ib_qp_state_t ibal_qp_state) +{ + VAPI_qp_state_t vapi_qp_state = VAPI_RESET; + + if (ibal_qp_state & IB_QPS_RESET) vapi_qp_state = VAPI_RESET; + else if (ibal_qp_state & IB_QPS_INIT) vapi_qp_state = VAPI_INIT; + else if (ibal_qp_state & IB_QPS_RTR) vapi_qp_state = VAPI_RTR; + else if (ibal_qp_state & IB_QPS_RTS) vapi_qp_state = VAPI_RTS; + else if (ibal_qp_state & IB_QPS_SQD) vapi_qp_state = VAPI_SQD; + else if (ibal_qp_state & IB_QPS_SQERR) vapi_qp_state = VAPI_SQE; + else if (ibal_qp_state & IB_QPS_ERROR) vapi_qp_state = VAPI_ERR; + + return vapi_qp_state; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_qp_state_t +mlnx_map_vapi_qp_state( + IN VAPI_qp_state_t vapi_qp_state) +{ + switch (vapi_qp_state) + { + case VAPI_RESET: return IB_QPS_RESET; + case VAPI_INIT: return IB_QPS_INIT; + case VAPI_RTR: return IB_QPS_RTR; + case VAPI_RTS: return IB_QPS_RTS; + case VAPI_SQD: return IB_QPS_SQD; + case VAPI_SQE: return IB_QPS_SQERR; + case VAPI_ERR: return IB_QPS_ERROR; + // TBD: IB_QPS_SQD_DRAINING + // TBD: IB_QPS_SQD_DRAINED + default: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_qp_state %d (last known %d) returning %d\n", + vapi_qp_state, VAPI_ERR, IB_QPS_INIT)); + return IB_QPS_INIT; + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_apm_state_t +mlnx_map_vapi_apm_state( + IN VAPI_mig_state_t vapi_apm_state) +{ + switch (vapi_apm_state) + { + case VAPI_MIGRATED: return IB_APM_MIGRATED; + case VAPI_REARM: return IB_APM_REARM; + case VAPI_ARMED: return IB_APM_ARMED; + + default: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("FAIL to map vapi_apm_state %d (last known %d) returning %d\n", + vapi_apm_state, VAPI_ARMED, 0)); + return 0; + } +} + +#if 0 +///////////////////////////////////////////////////////// +// UNUSED: IBAL uses same encoding as THH +///////////////////////////////////////////////////////// +static +u_int32_t ibal_mtu_to_vapi(u_int32_t ibal_mtu) +{ + u_int32_t mtu = 0; + + // MTU256=1, MTU512=2, MTU1024=3 + while (ibal_mtu >>= 1) mtu++; + return mtu - 7; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +static +u_int32_t vapi_mtu_to_ibal(u_int32_t vapi_mtu) +{ + return (1 << (vapi_mtu + 7)); +} +#endif + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_conv_vapi_qp_attr( + IN HH_hca_hndl_t hh_hndl, + IN VAPI_qp_attr_t *hh_qp_attr_p, + OUT ib_qp_attr_t *qp_attr_p) +{ + qp_attr_p->access_ctrl = map_vapi_qp_acl(hh_qp_attr_p->remote_atomic_flags); + qp_attr_p->pkey_index = (uint16_t)hh_qp_attr_p->pkey_ix; + qp_attr_p->sq_depth = hh_qp_attr_p->cap.max_oust_wr_sq; + qp_attr_p->rq_depth = hh_qp_attr_p->cap.max_oust_wr_rq; + qp_attr_p->sq_sge = hh_qp_attr_p->cap.max_sg_size_sq; + qp_attr_p->rq_sge = hh_qp_attr_p->cap.max_sg_size_rq; + qp_attr_p->sq_max_inline = hh_qp_attr_p->cap.max_inline_data_sq; + qp_attr_p->init_depth = hh_qp_attr_p->ous_dst_rd_atom; // outstanding outgoing + qp_attr_p->resp_res = hh_qp_attr_p->qp_ous_rd_atom; // outstanding as target (in) + + qp_attr_p->num = cl_ntoh32(hh_qp_attr_p->qp_num); + CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_qpn 0x%x = hh_qpn 0x%x\n", + qp_attr_p->num, + hh_qp_attr_p->qp_num)); + + qp_attr_p->dest_num = cl_ntoh32(hh_qp_attr_p->dest_qp_num); + CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("ibal_dest 0x%x = hh_dest 0x%x\n", + qp_attr_p->dest_num, + hh_qp_attr_p->dest_qp_num)); + qp_attr_p->qkey = cl_ntoh32 (hh_qp_attr_p->qkey); + + qp_attr_p->sq_psn = cl_ntoh32 (hh_qp_attr_p->sq_psn); + qp_attr_p->rq_psn = cl_ntoh32 (hh_qp_attr_p->rq_psn); + + qp_attr_p->primary_port = hh_qp_attr_p->port; + qp_attr_p->alternate_port = hh_qp_attr_p->alt_port; + + qp_attr_p->state = mlnx_map_vapi_qp_state(hh_qp_attr_p->qp_state); + qp_attr_p->apm_state = mlnx_map_vapi_apm_state(hh_qp_attr_p->path_mig_state); + + mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->av, &qp_attr_p->primary_av); + qp_attr_p->primary_av.conn.path_mtu = (u_int8_t)hh_qp_attr_p->path_mtu; + qp_attr_p->primary_av.conn.local_ack_timeout = hh_qp_attr_p->timeout; + qp_attr_p->primary_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count; + qp_attr_p->primary_av.conn.rnr_retry_cnt = hh_qp_attr_p->rnr_retry; + + mlnx_conv_vapi_av(hh_hndl, &hh_qp_attr_p->alt_av, &qp_attr_p->alternate_av); + qp_attr_p->alternate_av.conn. path_mtu = (u_int8_t)hh_qp_attr_p->path_mtu; + qp_attr_p->alternate_av.conn.local_ack_timeout = hh_qp_attr_p->timeout; + qp_attr_p->alternate_av.conn.seq_err_retry_cnt = hh_qp_attr_p->retry_count; + qp_attr_p->alternate_av.conn.rnr_retry_cnt = hh_qp_attr_p->rnr_retry; +} +#if 0 +XXX: +QP_ATTR_QP_STATE +QP_ATTR_EN_SQD_ASYN_NOTIF +QP_ATTR_QP_NUM ++ QP_ATTR_REMOTE_ATOMIC_FLAGS ++ QP_ATTR_PKEY_IX ++ QP_ATTR_PORT ++ QP_ATTR_QKEY ++ QP_ATTR_RQ_PSN ++ QP_ATTR_AV + +QP_ATTR_PATH_MTU ++ QP_ATTR_TIMEOUT ++ QP_ATTR_RETRY_COUNT ++ QP_ATTR_RNR_RETRY +QP_ATTR_QP_OUS_RD_ATOM + +- QP_ATTR_ALT_PATH + ++ QP_ATTR_MIN_RNR_TIMER +QP_ATTR_SQ_PSN +QP_ATTR_OUS_DST_RD_ATOM +QP_ATTR_PATH_MIG_STATE +QP_ATTR_CAP +#endif + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_qp_modify_attr( + IN HH_hca_hndl_t hh_hndl, + IN ib_qp_type_t qp_type, + IN const ib_qp_mod_t *modify_attr_p, + OUT VAPI_qp_attr_t *qp_attr_p, + OUT VAPI_qp_attr_mask_t *attr_mask_p) +{ + + qp_attr_p->qp_state = mlnx_map_ibal_qp_state(modify_attr_p->req_state); + *attr_mask_p = QP_ATTR_QP_STATE; + + switch(modify_attr_p->req_state) + { + case IB_QPS_RESET: + break; + + case IB_QPS_INIT: + *attr_mask_p |= QP_ATTR_PORT | + QP_ATTR_QKEY | + QP_ATTR_PKEY_IX ; + + qp_attr_p->port = modify_attr_p->state.init.primary_port; + qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.init.qkey); + qp_attr_p->pkey_ix = modify_attr_p->state.init.pkey_index; + if (IB_QPT_RELIABLE_CONN == qp_type) + { + *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS; + qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.init.access_ctrl); + } else + { + qp_attr_p->remote_atomic_flags = 0; + } + break; + + case IB_QPS_RTR: + /* VAPI doesn't support modifying the WQE depth ever. */ + if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH || + modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH ) + { + return IB_UNSUPPORTED; + } + + *attr_mask_p |= QP_ATTR_RQ_PSN | + QP_ATTR_DEST_QP_NUM | + QP_ATTR_QP_OUS_RD_ATOM | + QP_ATTR_MIN_RNR_TIMER | + QP_ATTR_AV ; + + qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn); + qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp); + qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rtr.resp_res; + + qp_attr_p->min_rnr_timer = modify_attr_p->state.rtr.rnr_nak_timeout; + +#if 1 + CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("modify_qp: hh_dest 0x%x = ibal_dest 0x%x\n", + qp_attr_p->dest_qp_num, modify_attr_p->state.rtr.dest_qp)); +#endif + + // Convert primary RC AV (mandatory) + cl_memclr(&qp_attr_p->av, sizeof(VAPI_ud_av_t)); + mlnx_conv_ibal_av(hh_hndl, + &modify_attr_p->state.rtr.primary_av, &qp_attr_p->av); + + if (IB_QPT_RELIABLE_CONN == qp_type) + { + *attr_mask_p |= QP_ATTR_PATH_MTU; + qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU + *attr_mask_p |= QP_ATTR_TIMEOUT; + qp_attr_p->timeout = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // XXX: conv + *attr_mask_p |= QP_ATTR_RETRY_COUNT; + qp_attr_p->retry_count = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt; + *attr_mask_p |= QP_ATTR_RNR_RETRY; + qp_attr_p->rnr_retry = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt; + } + + // Convert Remote Atomic Flags + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) + { + *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS; + qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rtr.access_ctrl); + } + + // Convert alternate RC AV + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) + { + *attr_mask_p |= QP_ATTR_ALT_PATH; + cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t)); + mlnx_conv_ibal_av(hh_hndl, + &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_av); + + if (IB_QPT_RELIABLE_CONN == qp_type) + { + qp_attr_p->alt_timeout = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv +#if 0 + /* Incompliant with spec 1.1! Data already set before */ + qp_attr_p->retry_count = modify_attr_p->state.rtr.alternate_av.conn.seq_err_retry_cnt; + qp_attr_p->rnr_retry = modify_attr_p->state.rtr.alternate_av.conn.rnr_retry_cnt; +#endif + } + } + break; + + case IB_QPS_RTS: + /* VAPI doesn't support modifying the WQE depth ever. */ + if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH || + modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH ) + { + return IB_UNSUPPORTED; + } + + *attr_mask_p |= QP_ATTR_SQ_PSN | + QP_ATTR_RETRY_COUNT | + QP_ATTR_RNR_RETRY | + QP_ATTR_TIMEOUT| + QP_ATTR_OUS_DST_RD_ATOM | + QP_ATTR_MIN_RNR_TIMER; + + qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn); + + if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) + { + *attr_mask_p |= QP_ATTR_REMOTE_ATOMIC_FLAGS; + qp_attr_p->remote_atomic_flags = map_ibal_qp_acl(modify_attr_p->state.rts.access_ctrl); + } + + qp_attr_p->timeout = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv + qp_attr_p->ous_dst_rd_atom = modify_attr_p->state.rts.init_depth; + qp_attr_p->retry_count = modify_attr_p->state.rts.retry_cnt; + qp_attr_p->rnr_retry = modify_attr_p->state.rts.rnr_retry_cnt; + qp_attr_p->min_rnr_timer = modify_attr_p->state.rts.rnr_nak_timeout; + + // Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS) + if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) { + *attr_mask_p |= QP_ATTR_QP_OUS_RD_ATOM; + qp_attr_p->qp_ous_rd_atom = modify_attr_p->state.rts.resp_res; + } + + // Convert alternate RC AV + if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) + { + *attr_mask_p |= QP_ATTR_ALT_PATH; + cl_memclr(&qp_attr_p->alt_av, sizeof(VAPI_ud_av_t)); + mlnx_conv_ibal_av(hh_hndl, + &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_av); + if (IB_QPT_RELIABLE_CONN == qp_type) + { + qp_attr_p->alt_timeout = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv +#if 0 + /* Incompliant with spec 1.1! Data already set before */ + qp_attr_p->retry_count = modify_attr_p->state.rts.alternate_av.conn.seq_err_retry_cnt; + qp_attr_p->rnr_retry = modify_attr_p->state.rts.alternate_av.conn.rnr_retry_cnt; +#endif + } + } + break; + + // TBD: The following are treated equally (SQ Drain) + case IB_QPS_SQD: + case IB_QPS_SQD_DRAINING: + case IB_QPS_SQD_DRAINED: + *attr_mask_p |= QP_ATTR_EN_SQD_ASYN_NOTIF; + qp_attr_p->en_sqd_asyn_notif = (MT_bool)modify_attr_p->state.sqd.sqd_event; + break; + + case IB_QPS_SQERR: + case IB_QPS_ERROR: + case IB_QPS_TIME_WAIT: + default: + break; + } + CL_TRACE(MLNX_DBG_QPN, g_mlnx_dbg_lvl, ("CL: conv_qp_modify: new state %d attr_mask 0x%x\n", qp_attr_p->qp_state, *attr_mask_p)); + return IB_SUCCESS; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +static VAPI_wr_opcode_t +map_ibal_send_opcode( + IN ib_wr_type_t ibal_opcode, + IN boolean_t imm) +{ + VAPI_wr_opcode_t vapi_opcode; + + switch (ibal_opcode) + { + case WR_SEND: vapi_opcode = VAPI_SEND; + break; + case WR_RDMA_WRITE: vapi_opcode = VAPI_RDMA_WRITE; + break; + case WR_RDMA_READ: vapi_opcode = VAPI_RDMA_READ; + break; + case WR_COMPARE_SWAP: vapi_opcode = VAPI_ATOMIC_CMP_AND_SWP; + break; + case WR_FETCH_ADD: vapi_opcode = VAPI_ATOMIC_FETCH_AND_ADD; + break; + default: vapi_opcode = VAPI_SEND; + break; + } + if (imm && (VAPI_SEND == vapi_opcode || VAPI_RDMA_WRITE == vapi_opcode)) vapi_opcode++; + return vapi_opcode; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_send_desc( + IN IB_ts_t transport, + IN const ib_send_wr_t *ibal_send_wqe_p, + OUT VAPI_sr_desc_t *vapi_send_desc_p) +{ + boolean_t imm = FALSE; + u_int32_t idx; + register VAPI_sg_lst_entry_t *sg_lst_p; + register ib_local_ds_t *ds_array; + + + switch (transport) + { + case IB_TS_UD: + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "UD")); + { + mlnx_avo_t *avo_p = (mlnx_avo_t *)ibal_send_wqe_p->dgrm.ud.h_av; + + vapi_send_desc_p->remote_qp = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qp); + vapi_send_desc_p->remote_qkey = cl_ntoh32 (ibal_send_wqe_p->dgrm.ud.remote_qkey); + + if (!avo_p || avo_p->mark != E_MARK_AV) + return IB_INVALID_AV_HANDLE; + + vapi_send_desc_p->remote_ah = avo_p->h_av; // was ah.hhul + break; + } + + case IB_TS_RC: + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("mapping %s QP\n", "RC")); + // vapi_send_desc_p->remote_qp = 0; + // vapi_send_desc_p->remote_qkey = 0; + vapi_send_desc_p->remote_addr = ibal_send_wqe_p->remote_ops.vaddr; + vapi_send_desc_p->r_key = ibal_send_wqe_p->remote_ops.rkey; + vapi_send_desc_p->compare_add = ibal_send_wqe_p->remote_ops.atomic1; + vapi_send_desc_p->swap = ibal_send_wqe_p->remote_ops.atomic2; + break; + + default: // TBD: RAW, RD + return IB_UNSUPPORTED; + } + + imm = (0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_IMMEDIATE)); + vapi_send_desc_p->fence = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_FENCE)); + vapi_send_desc_p->set_se = (MT_bool)(0 != (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SOLICITED)); + vapi_send_desc_p->comp_type = (ibal_send_wqe_p->send_opt & IB_SEND_OPT_SIGNALED) ? +VAPI_SIGNALED : VAPI_UNSIGNALED; + + vapi_send_desc_p->id = ibal_send_wqe_p->wr_id; + vapi_send_desc_p->opcode = map_ibal_send_opcode(ibal_send_wqe_p->wr_type, imm); + + if (imm) + vapi_send_desc_p->imm_data = cl_ntoh32 (ibal_send_wqe_p->immediate_data); + + vapi_send_desc_p->sg_lst_len = ibal_send_wqe_p->num_ds; + + sg_lst_p = vapi_send_desc_p->sg_lst_p; + ds_array = ibal_send_wqe_p->ds_array; + for (idx = 0; idx < ibal_send_wqe_p->num_ds; idx++) + { + sg_lst_p->addr = ds_array->vaddr; + sg_lst_p->len = ds_array->length; + sg_lst_p->lkey = ds_array->lkey; + // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_send (conv) addr %Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey)); + sg_lst_p++; + ds_array++; + } + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("send: rqpn 0x%x rkey 0x%x\n", + vapi_send_desc_p->remote_qp, + vapi_send_desc_p->remote_qkey)); + return IB_SUCCESS; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_conv_recv_desc( + IN const ib_recv_wr_t *ibal_recv_wqe_p, + OUT VAPI_rr_desc_t *vapi_recv_desc_p) +{ + u_int32_t idx; + register VAPI_sg_lst_entry_t *sg_lst_p; + register ib_local_ds_t *ds_array; + + vapi_recv_desc_p->id = ibal_recv_wqe_p->wr_id; + vapi_recv_desc_p->sg_lst_len = ibal_recv_wqe_p->num_ds; + vapi_recv_desc_p->opcode = VAPI_RECEIVE; + vapi_recv_desc_p->comp_type = VAPI_SIGNALED; + + sg_lst_p = vapi_recv_desc_p->sg_lst_p; + ds_array = ibal_recv_wqe_p->ds_array; + for (idx = 0; idx < ibal_recv_wqe_p->num_ds; idx++) + { + sg_lst_p->addr = ds_array->vaddr; + sg_lst_p->len = ds_array->length; + sg_lst_p->lkey = ds_array->lkey; + // CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("post_recv (conv) addr 0x%Lx size %d key 0x%x\n", sg_lst_p->addr, sg_lst_p->len, sg_lst_p->lkey)); + sg_lst_p++; + ds_array++; + } + + return IB_SUCCESS; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +vapi_port_cap_to_ibal( + IN IB_port_cap_mask_t vapi_port_cap, + OUT ib_port_cap_t *ibal_port_cap_p) +{ + if (vapi_port_cap & IB_CAP_MASK_IS_CONN_MGMT_SUP) + ibal_port_cap_p->cm = TRUE; + if (vapi_port_cap & IB_CAP_MASK_IS_SNMP_TUNN_SUP) + ibal_port_cap_p->snmp = TRUE; + if (vapi_port_cap & IB_CAP_MASK_IS_DEVICE_MGMT_SUP) + ibal_port_cap_p->dev_mgmt = TRUE; + if (vapi_port_cap & IB_CAP_MASK_IS_VENDOR_CLS_SUP) + ibal_port_cap_p->vend = TRUE; + if (vapi_port_cap & IB_CAP_MASK_IS_SM_DISABLED) + ibal_port_cap_p->sm_disable = TRUE; + if (vapi_port_cap & IB_CAP_MASK_IS_SM) + ibal_port_cap_p->sm = TRUE; + if (vapi_port_cap & IB_CAP_MASK_IS_CLIENT_REREGISTRATION_SUP) + ibal_port_cap_p->client_reregister= TRUE; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_conv_vapi_hca_cap( + IN HH_hca_dev_t *hca_info_p, + IN VAPI_hca_cap_t *vapi_hca_cap_p, + IN VAPI_hca_port_t *vapi_hca_ports, + OUT ib_ca_attr_t *ca_attr_p) +{ + u_int8_t port_num; + VAPI_hca_port_t *vapi_port_p; + ib_port_attr_t *ibal_port_p; + + ca_attr_p->vend_id = hca_info_p->vendor_id; + ca_attr_p->dev_id = (uint16_t)hca_info_p->dev_id; + ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver; + ca_attr_p->fw_ver = hca_info_p->fw_ver; + + ca_attr_p->ca_guid = *(UNALIGNED64 u_int64_t *)vapi_hca_cap_p->node_guid; + ca_attr_p->num_ports = vapi_hca_cap_p->phys_port_num; + ca_attr_p->max_qps = vapi_hca_cap_p->max_num_qp; + ca_attr_p->max_wrs = vapi_hca_cap_p->max_qp_ous_wr; + ca_attr_p->max_sges = vapi_hca_cap_p->max_num_sg_ent; + ca_attr_p->max_rd_sges = vapi_hca_cap_p->max_num_sg_ent_rd; + ca_attr_p->max_cqs = vapi_hca_cap_p->max_num_cq; + ca_attr_p->max_cqes = vapi_hca_cap_p->max_num_ent_cq; + ca_attr_p->max_pds = vapi_hca_cap_p->max_pd_num; + ca_attr_p->init_regions = vapi_hca_cap_p->max_num_mr; + ca_attr_p->init_windows = vapi_hca_cap_p->max_mw_num; + ca_attr_p->init_region_size = vapi_hca_cap_p->max_mr_size; + ca_attr_p->max_addr_handles = vapi_hca_cap_p->max_ah_num; + ca_attr_p->atomicity = vapi_hca_cap_p->atomic_cap; + ca_attr_p->max_partitions = vapi_hca_cap_p->max_pkeys; + ca_attr_p->max_qp_resp_res = vapi_hca_cap_p->max_qp_ous_rd_atom; + ca_attr_p->max_resp_res = vapi_hca_cap_p->max_res_rd_atom; + ca_attr_p->max_qp_init_depth = vapi_hca_cap_p->max_qp_init_rd_atom; + ca_attr_p->max_ipv6_qps = vapi_hca_cap_p->max_raw_ipv6_qp; + ca_attr_p->max_ether_qps = vapi_hca_cap_p->max_raw_ethy_qp; + ca_attr_p->max_mcast_grps = vapi_hca_cap_p->max_mcast_grp_num; + ca_attr_p->max_mcast_qps = vapi_hca_cap_p->max_total_mcast_qp_attach_num; + ca_attr_p->max_qps_per_mcast_grp = vapi_hca_cap_p->max_mcast_qp_attach_num; + ca_attr_p->local_ack_delay = vapi_hca_cap_p->local_ca_ack_delay; + ca_attr_p->bad_pkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_PKEY_COUNT_CAP; + ca_attr_p->bad_qkey_ctr_support = vapi_hca_cap_p->flags & VAPI_BAD_QKEY_COUNT_CAP; + ca_attr_p->raw_mcast_support = vapi_hca_cap_p->flags & VAPI_RAW_MULTI_CAP; + ca_attr_p->apm_support = vapi_hca_cap_p->flags & VAPI_AUTO_PATH_MIG_CAP; + ca_attr_p->av_port_check = vapi_hca_cap_p->flags & VAPI_UD_AV_PORT_ENFORCE_CAP; + ca_attr_p->change_primary_port = vapi_hca_cap_p->flags & VAPI_CHANGE_PHY_PORT_CAP; + ca_attr_p->modify_wr_depth = vapi_hca_cap_p->flags & VAPI_RESIZE_OUS_WQE_CAP; + ca_attr_p->hw_agents = FALSE; // in the context of IBAL then agent is implemented on the host + + ca_attr_p->num_page_sizes = 1; + ca_attr_p->p_page_size[0] = PAGESIZE; // TBD: extract an array of page sizes from HCA cap + + for (port_num = 0; port_num < vapi_hca_cap_p->phys_port_num; port_num++) + { + // Setup port pointers + ibal_port_p = &ca_attr_p->p_port_attr[port_num]; + vapi_port_p = &vapi_hca_ports[port_num]; + + // Port Cabapilities + cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t)); + vapi_port_cap_to_ibal(vapi_port_p->capability_mask, &ibal_port_p->cap); + + // Port Atributes + ibal_port_p->port_num = port_num + 1; + ibal_port_p->port_guid = ibal_port_p->p_gid_table[0].unicast.interface_id; + ibal_port_p->lid = cl_ntoh16(vapi_port_p->lid); + ibal_port_p->lmc = vapi_port_p->lmc; + ibal_port_p->max_vls = vapi_port_p->max_vl_num; + ibal_port_p->sm_lid = cl_ntoh16(vapi_port_p->sm_lid); + ibal_port_p->sm_sl = vapi_port_p->sm_sl; + ibal_port_p->link_state = (vapi_port_p->state != 0) ? (uint8_t)vapi_port_p->state : IB_LINK_DOWN; + ibal_port_p->num_gids = vapi_port_p->gid_tbl_len; + ibal_port_p->num_pkeys = vapi_port_p->pkey_tbl_len; + ibal_port_p->pkey_ctr = (uint16_t)vapi_port_p->bad_pkey_counter; + ibal_port_p->qkey_ctr = (uint16_t)vapi_port_p->qkey_viol_counter; + ibal_port_p->max_msg_size = vapi_port_p->max_msg_sz; + ibal_port_p->mtu = (u_int8_t)vapi_port_p->max_mtu; + + ibal_port_p->subnet_timeout = 5; // TBD: currently 128us + // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec +#if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Port %d port_guid 0x%"PRIx64"\n", + ibal_port_p->port_num, ibal_port_p->port_guid)); +#endif + } +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_get_hca_pkey_tbl( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int16_t num_entries, + OUT void* table_p) +{ + u_int16_t size; + ib_net16_t *pkey_p; + + if (HH_OK != THH_hob_get_pkey_tbl( hh_hndl, port_num, num_entries, &size, table_p)) + return IB_ERROR; + + pkey_p = (ib_net16_t *)table_p; +#if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d pkey0 0x%x pkey1 0x%x\n", port_num, pkey_p[0], pkey_p[1])); +#endif + return IB_SUCCESS; +} + +ib_api_status_t +mlnx_get_hca_gid_tbl( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int16_t num_entries, + OUT void* table_p) +{ + u_int16_t size; + + if (HH_OK != THH_hob_get_gid_tbl( hh_hndl, port_num, num_entries, &size, table_p)) + return IB_ERROR; + + return IB_SUCCESS; +} diff --git a/branches/Ndi/hw/mt23108/kernel/hca_data.h b/branches/Ndi/hw/mt23108/kernel/hca_data.h new file mode 100644 index 00000000..e5248975 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_data.h @@ -0,0 +1,608 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __HCA_DATA_H__ +#define __HCA_DATA_H__ + + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern u_int32_t g_mlnx_dbg_lvl; +extern uint32_t g_sqp_max_avs; +extern char mlnx_uvp_lib_name[]; + +#define MLNX_DBG_INFO (1<<1) +#define MLNX_DBG_TRACE (1<<2) +#define MLNX_DBG_VERBOSE (1<<3) +// for data path debugging +#define MLNX_DBG_DIRECT (1<<4) +#define MLNX_DBG_QPN (1<<5) +#define MLNX_DBG_MEM (1<<6) + +#define MLNX_MAX_HCA 4 +#define MLNX_NUM_HOBKL MLNX_MAX_HCA +#define MLNX_NUM_HOBUL MLNX_MAX_HCA +#define MLNX_NUM_CB_THR 1 +#define MLNX_SIZE_CB_POOL 256 +#define MLNX_UAL_ALLOC_HCA_UL_RES 1 +#define MLNX_UAL_FREE_HCA_UL_RES 2 + + +// Defines for QP ops +#define MLNX_MAX_NUM_SGE 8 +#define MLNX_MAX_WRS_PER_CHAIN 4 + +#define MLNX_NUM_RESERVED_QPS 16 + +/* + * Completion model. + * 0: No DPC processor assignment + * 1: DPCs per-CQ, processor affinity set at CQ initialization time. + * 2: DPCs per-CQ, processor affinity set at runtime. + * 3: DPCs per-CQ, no processor affinity set. + */ +#define MLNX_COMP_MODEL 3 + +#define PD_HCA_FROM_HNDL(hndl) (((pd_info_t *)hndl)->hca_idx) +#define PD_NUM_FROM_HNDL(hndl) (((pd_info_t *)hndl)->pd_num) +#define CQ_HCA_FROM_HNDL(hndl) (((cq_info_t *)hndl)->hca_idx) +#define CQ_NUM_FROM_HNDL(hndl) (((cq_info_t *)hndl)->cq_num) +#define QP_HCA_FROM_HNDL(hndl) (((qp_info_t *)hndl)->hca_idx) +#define QP_NUM_FROM_HNDL(hndl) (((qp_info_t *)hndl)->qp_num) + +#define PD_HNDL_FROM_PD(pd_num) (&hobul_p->pd_info_tbl[pd_num]) +#define CQ_HNDL_FROM_CQ(cq_num) (&hobul_p->cq_info_tbl[cq_num]) +#define QP_HNDL_FROM_QP(qp_num) (&hobul_p->qp_info_tbl[qp_num]) + +#ifdef _DEBUG_ +#define VALIDATE_INDEX(index, limit, error, label) \ + { \ + if (index >= limit) \ + { \ + status = error; \ + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); \ + goto label; \ + } \ + } +#else +#define VALIDATE_INDEX(index, limit, error, label) +#endif + + + +// Typedefs + +typedef enum { + E_EV_CA=1, + E_EV_QP, + E_EV_CQ, + E_EV_LAST +} ENUM_EVENT_CLASS; + +typedef enum { + E_MARK_CA=1, // Channel Adaptor + E_MARK_PD, // Protection Domain + E_MARK_CQ, // Completion Queue + E_MARK_QP, // Queue Pair + E_MARK_AV, // Address Vector (UD) + E_MARK_MG, // Multicast Group + E_MARK_MR, // Memory Region + E_MARK_MW, // Memory Windows + E_MARK_INVALID, +} ENUM_MARK; + +typedef enum { + E_MR_PHYS=1, + E_MR_SHARED, + E_MR_ANY, + E_MR_INVALID +} ENUM_MR_TYPE; + +/* + * Attribute cache for port info saved to expedite local MAD processing. + * Note that the cache accounts for the worst case GID and PKEY table size + * but is allocated from paged pool, so it's nothing to worry about. + */ + +typedef struct _guid_block +{ + boolean_t valid; + ib_guid_info_t tbl; + +} mlnx_guid_block_t; + +typedef struct _port_info_cache +{ + boolean_t valid; + ib_port_info_t info; + +} mlnx_port_info_cache_t; + +typedef struct _pkey_block +{ + boolean_t valid; + ib_pkey_table_t tbl; + +} mlnx_pkey_block_t; + +typedef struct _sl_vl_cache +{ + boolean_t valid; + ib_slvl_table_t tbl; + +} mlnx_sl_vl_cache_t; + +typedef struct _vl_arb_block +{ + boolean_t valid; + ib_vl_arb_table_t tbl; + +} mlnx_vl_arb_block_t; + +typedef struct _attr_cache +{ + mlnx_guid_block_t guid_block[32]; + mlnx_port_info_cache_t port_info; + mlnx_pkey_block_t pkey_tbl[2048]; + mlnx_sl_vl_cache_t sl_vl; + mlnx_vl_arb_block_t vl_arb[4]; + +} mlnx_cache_t; + +typedef struct _ib_ca { + ENUM_MARK mark; + HH_hca_hndl_t hh_hndl; + ci_completion_cb_t comp_cb_p; + ci_async_event_cb_t async_cb_p; + const void *ca_context; + void *cl_device_h; + u_int32_t index; + cl_async_proc_t *async_proc_mgr_p; + mlnx_cache_t *cache; // Cached port attributes. + const void * __ptr64 p_dev_obj; // store underlying device object +} mlnx_hob_t; + +typedef struct _ib_um_ca +{ + MDL *p_mdl; + void *p_mapped_addr; + HH_hca_hndl_t hh_hndl; + mlnx_hob_t *hob_p; + /* The next two fields must be grouped together as the are mapped to UM. */ + HH_hca_dev_t dev_info; + uint8_t ul_hca_res[1]; // Beginning of UL resource buffer. +} mlnx_um_ca_t; + +typedef struct { + cl_async_proc_item_t async_item; + HH_hca_hndl_t hh_hndl; + HH_cq_hndl_t hh_cq; // for completion + HH_event_record_t hh_er; // for async events + void *private_data; +} mlnx_cb_data_t; + +typedef struct { + cl_list_item_t list_item; + HH_hca_hndl_t hh_hndl; + struct _hca_if { + HH_hca_hndl_t hh_hndl; + void * kernel_crspace_addr; + ULONG kernel_crspace_size; + } s; +// char *hca_name_p; + net64_t guid; + const void* __ptr64 p_dev_obj; // hca device object +// ci_interface_t ifx; +} mlnx_hca_t; + +typedef struct _ib_pd { /* struct of PD related resources */ + ENUM_MARK mark; + cl_mutex_t mutex; + u_int32_t kernel_mode; + atomic32_t count; + u_int32_t hca_idx; + // mlnx_hob_t *hob_p; + HH_hca_hndl_t hh_hndl; /* For HH direct access */ + HH_pd_hndl_t pd_num; /* For HH-UL direct access */ + HHUL_pd_hndl_t hhul_pd_hndl; + void *pd_ul_resources_p; +} pd_info_t; + +typedef struct _ib_cq { /* struct of CQ related resources */ + ENUM_MARK mark; + cl_mutex_t mutex; + u_int32_t hca_idx; + u_int32_t kernel_mode; + // mlnx_hob_t *hob_p; + HH_hca_hndl_t hh_hndl; /* For HH direct access */ + HH_cq_hndl_t cq_num; /* For HH-UL direct access */ +// HH_pd_hndl_t pd_num; /* For HH-UL direct access */ + HHUL_cq_hndl_t hhul_cq_hndl; + void *cq_ul_resources_p; + const void *cq_context; + KDPC dpc; + atomic32_t spl_qp_cnt; + +} cq_info_t; + +typedef struct _ib_qp { + ENUM_MARK mark; + cl_mutex_t mutex; + u_int32_t hca_idx; + u_int32_t kernel_mode; + // mlnx_hob_t *hob_p; + HH_hca_hndl_t hh_hndl; // For HH direct access */ + HHUL_qp_hndl_t hhul_qp_hndl; + IB_wqpn_t qp_num; // For direct HH-UL access */ + HH_pd_hndl_t pd_num; // For HH-UL direct access */ + IB_port_t port; // Valid for special QPs only */ + ib_qp_type_t qp_type; // Required for qp_query + u_int32_t sq_signaled; // Required for qp_query + ib_cq_handle_t h_sq_cq; + ib_cq_handle_t h_rq_cq; + u_int32_t sq_size; + u_int32_t rq_size; + VAPI_sr_desc_t *send_desc_p; + VAPI_rr_desc_t *recv_desc_p; + VAPI_sg_lst_entry_t *send_sge_p; + VAPI_sg_lst_entry_t *recv_sge_p; + void *qp_ul_resources_p; + const void *qp_context; +} qp_info_t; + +typedef struct HOBUL_t { + HH_hca_hndl_t hh_hndl; /* For HH direct access */ + HHUL_hca_hndl_t hhul_hndl; /* user level HCA resources handle for HH */ + u_int32_t cq_idx_mask; /* */ + u_int32_t qp_idx_mask; /* */ + u_int32_t vendor_id; /* \ */ + u_int32_t device_id; /* > 3 items needed for initializing user level */ + void *hca_ul_resources_p; /* / */ + MT_size_t cq_ul_resources_sz; /* Needed for allocating user resources for CQs */ + MT_size_t qp_ul_resources_sz; /* Needed for allocating user resources for QPs */ + MT_size_t pd_ul_resources_sz; /* Needed for allocating user resources for PDs */ + u_int32_t max_cq; /* Max num. of CQs - size of following table */ + cq_info_t *cq_info_tbl; + u_int32_t max_qp; /* Max num. of QPs - size of following table */ + qp_info_t *qp_info_tbl; + u_int32_t max_pd; /* Max num. of PDs - size of following table */ + pd_info_t *pd_info_tbl; + u_int32_t log2_mpt_size; + atomic32_t count; +} mlnx_hobul_t, *mlnx_hobul_hndl_t; + +typedef struct _ib_mr { + ENUM_MARK mark; + ENUM_MR_TYPE mr_type; + u_int64_t mr_start; // TBD: IA64 + u_int64_t mr_size; // TBD: IA64 +// u_int64_t mr_first_page_addr; // TBD : IA64 +// u_int32_t mr_num_pages; + ib_pd_handle_t mr_pd_handle; + MOSAL_iobuf_t mr_iobuf; + VAPI_mrw_acl_t mr_acl; + VAPI_lkey_t mr_lkey; + MOSAL_protection_ctx_t mr_prot_ctx; + MOSAL_mem_perm_t mr_mosal_perm; +} mlnx_mro_t; + +typedef struct _ib_mw { + ENUM_MARK mark; + u_int32_t hca_idx; + u_int32_t pd_idx; + u_int32_t mw_rkey; +} mlnx_mwo_t; + +typedef struct _ib_mcast { + ENUM_MARK mark; + IB_gid_t mcast_gid; + u_int32_t hca_idx; + u_int32_t qp_num; + u_int32_t kernel_mode; +} mlnx_mcast_t; + +typedef struct _ib_av { + ENUM_MARK mark; + u_int32_t hca_idx; + u_int32_t pd_idx; + u_int32_t user_mode; + HHUL_ud_av_hndl_t h_av; +} mlnx_avo_t; + +typedef mlnx_hob_t *mlnx_hca_h; + +// Global Variables +//extern mlnx_hca_t mlnx_hca_array[]; +//extern uint32_t mlnx_num_hca; + +extern mlnx_hob_t mlnx_hob_array[]; +extern mlnx_hobul_t *mlnx_hobul_array[]; + +// Functions +void +setup_ci_interface( + IN const ib_net64_t ca_guid, + OUT ci_interface_t *p_interface ); + +void +mlnx_hca_insert( + IN mlnx_hca_t *p_hca ); + +void +mlnx_hca_remove( + IN mlnx_hca_t *p_hca ); + +mlnx_hca_t* +mlnx_hca_from_guid( + IN ib_net64_t guid ); + +mlnx_hca_t* +mlnx_hca_from_hh_hndl( + IN HH_hca_hndl_t hh_hndl ); + +/* +void +mlnx_names_from_guid( + IN ib_net64_t guid, + OUT char **hca_name_p, + OUT char **dev_name_p); +*/ + +cl_status_t +mlnx_hobs_init( void ); + +ib_api_status_t +mlnx_hobs_insert( + IN mlnx_hca_t *p_hca, + OUT mlnx_hob_t **hob_p); + +void +mlnx_hobs_get_handle( + IN mlnx_hob_t *hob_p, + OUT HH_hca_hndl_t *hndl_p); + +ib_api_status_t +mlnx_hobs_set_cb( + IN mlnx_hob_t *hob_p, + IN ci_completion_cb_t comp_cb_p, + IN ci_async_event_cb_t async_cb_p, + IN const void* const ib_context); + +ib_api_status_t +mlnx_hobs_get_context( + IN mlnx_hob_t *hob_p, + OUT void **context_p); + +ib_api_status_t +mlnx_hobs_create_device( + IN mlnx_hob_t *hob_p, + OUT char* dev_name); + +void +mlnx_hobs_remove( + IN mlnx_hob_t *hob_p); + +ib_api_status_t +mlnx_hobs_lookup( + IN HH_hca_hndl_t hndl, + OUT mlnx_hob_t **hca_p); + +mlnx_hobul_t * +mlnx_hobs_get_hobul( + IN mlnx_hob_t *hob_p); + +ib_api_status_t +mlnx_hobul_new( + IN mlnx_hob_t *hob_p, + IN HH_hca_hndl_t hh_hndl, + IN void *resources_p); + +void +mlnx_hobul_get( + IN mlnx_hob_t *hob_p, + OUT void **resources_p ); + +void +mlnx_hobul_delete( + IN mlnx_hob_t *hob_p); + +// Conversion Functions + +VAPI_mrw_acl_t +map_ibal_acl( + IN ib_access_t ibal_acl); + +ib_access_t +map_vapi_acl( + IN VAPI_mrw_acl_t vapi_acl); + +ib_api_status_t +mlnx_lock_region( + IN mlnx_mro_t *mro_p, + IN boolean_t um_call ); + +ib_api_status_t +mlnx_conv_ibal_mr_create( + IN u_int32_t pd_idx, + IN OUT mlnx_mro_t *mro_p, + IN VAPI_mr_change_t change_flags, + IN ib_mr_create_t const *p_mr_create, + IN boolean_t um_call, + OUT HH_mr_t *mr_props_p ); + +ib_api_status_t +mlnx_conv_ibal_pmr_create( + IN u_int32_t pd_idx, + IN mlnx_mro_t *mro_p, + IN ib_phys_create_t const *p_pmr_create, + OUT HH_mr_t *mr_props_p ); + +void +mlnx_conv_ibal_av( + IN HH_hca_hndl_t hh_hndl, + IN const ib_av_attr_t *ibal_av_p, + OUT VAPI_ud_av_t *vapi_av_p); + +void +mlnx_conv_vapi_av( + IN HH_hca_hndl_t hh_hndl, + IN const VAPI_ud_av_t *vapi_av_p, + OUT ib_av_attr_t *ibal_av_p); + +int +mlnx_map_vapi_cqe_status( + IN VAPI_wc_status_t vapi_status); + +int +mlnx_map_vapi_cqe_type( + IN VAPI_cqe_opcode_t opcode); + +int +mlnx_map_vapi_rna_type( + IN VAPI_remote_node_addr_type_t rna); + +void +mlnx_conv_vapi_mr_attr( + IN ib_pd_handle_t pd_h, + IN HH_mr_info_t *mr_info_p, + OUT ib_mr_attr_t *mr_query_p); + +void +mlnx_conv_bind_req( + IN HHUL_qp_hndl_t hhul_qp_hndl, + IN ib_bind_wr_t* const p_mw_bind, + OUT HHUL_mw_bind_t *bind_prop_p); + +int +mlnx_map_ibal_qp_type( + IN ib_qp_type_t ibal_qpt, + OUT VAPI_special_qp_t *vapi_qp_type_p); + +void +mlnx_conv_qp_create_attr( + IN const ib_qp_create_t *create_attr_p, + IN HHUL_qp_init_attr_t *init_attr_p, + OUT VAPI_special_qp_t *vapi_qp_type_p); + +void +mlnx_conv_vapi_qp_attr( + IN HH_hca_hndl_t hh_hndl, + IN VAPI_qp_attr_t *hh_qp_attr_p, + OUT ib_qp_attr_t *qp_attr_p); + +ib_api_status_t +mlnx_conv_qp_modify_attr( + IN HH_hca_hndl_t hh_hndl, + IN ib_qp_type_t qp_type, + IN const ib_qp_mod_t *modify_attr_p, + OUT VAPI_qp_attr_t *qp_attr_p, + OUT VAPI_qp_attr_mask_t *attr_mask_p); + +ib_api_status_t +mlnx_conv_send_desc( + IN IB_ts_t transport, + IN const ib_send_wr_t *ibal_send_wqe_p, + OUT VAPI_sr_desc_t *vapi_send_desc_p); + +ib_api_status_t +mlnx_conv_recv_desc( + IN const ib_recv_wr_t *ibal_recv_wqe_p, + OUT VAPI_rr_desc_t *vapi_recv_desc_p); + +void +mlnx_conv_vapi_hca_cap( + IN HH_hca_dev_t *hca_info_p, + IN VAPI_hca_cap_t *vapi_hca_cap_p, + IN VAPI_hca_port_t *vapi_hca_ports, + OUT ib_ca_attr_t *ca_attr_p); + +ib_api_status_t +mlnx_get_hca_pkey_tbl( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int16_t num_entries, + OUT void* table); + +ib_api_status_t +mlnx_get_hca_gid_tbl( + IN HH_hca_hndl_t hh_hndl, + IN u_int8_t port_num, + IN u_int16_t num_entries, + OUT void* table); + +ib_api_status_t +mlnx_local_mad ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_av_attr_t *p_av_src_attr, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ); + +void +mlnx_memory_if( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_ecc_if( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_direct_if( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_mcast_if( + IN OUT ci_interface_t *p_interface ); + +ib_api_status_t +fw_access_ctrl( + IN const void* __ptr64 context, + IN const void* __ptr64* const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL); + +#endif diff --git a/branches/Ndi/hw/mt23108/kernel/hca_debug.h b/branches/Ndi/hw/mt23108/kernel/hca_debug.h new file mode 100644 index 00000000..ea46a547 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_debug.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined( _HCA_DEBUG_H_ ) +#define _HCA_DEBUG_H_ + +#include + + +#define HCA_DBG_DEV (1 << 0) +#define HCA_DBG_PNP (1 << 1) +#define HCA_DBG_PO (1 << 2) + +#define HCA_DBG_ERROR CL_DBG_ERROR +#define HCA_DBG_FULL CL_DBG_ALL + + +extern uint32_t g_mlnx_dbg_lvl; + + +#define HCA_ENTER( msg_lvl ) \ + CL_ENTER( msg_lvl, g_mlnx_dbg_lvl ) + +#define HCA_EXIT( msg_lvl ) \ + CL_EXIT( msg_lvl, g_mlnx_dbg_lvl ) + +#define HCA_TRACE( msg_lvl, msg ) \ + CL_TRACE( msg_lvl, g_mlnx_dbg_lvl, msg ) + +#define HCA_TRACE_EXIT( msg_lvl, msg ) \ + CL_TRACE_EXIT( msg_lvl, g_mlnx_dbg_lvl, msg ) + +#define HCA_PRINT( msg_lvl, msg ) \ + CL_PRINT( msg_lvl, g_mlnx_dbg_lvl, msg ) + +#endif /* !defined( _HCA_DEBUG_H_ ) */ + + diff --git a/branches/Ndi/hw/mt23108/kernel/hca_direct.c b/branches/Ndi/hw/mt23108/kernel/hca_direct.c new file mode 100644 index 00000000..7bbdc3b2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_direct.c @@ -0,0 +1,598 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "hca_data.h" + + +/* Controls whether to use the VAPI entrypoints in THH, or the IBAL native ones. */ +#define MLNX_SEND_NATIVE 1 +#define MLNX_RECV_NATIVE 1 +#define MLNX_POLL_NATIVE 1 + + +/* +* Work Request Processing Verbs. +*/ +ib_api_status_t +mlnx_post_send ( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t *p_send_wr, + OUT ib_send_wr_t **pp_failed ) +{ + ib_api_status_t status = IB_SUCCESS; + qp_info_t *qp_info_p = (qp_info_t *)h_qp; + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; +#if !MLNX_SEND_NATIVE + HH_ret_t ret; + VAPI_sr_desc_t send_desc; + VAPI_special_qp_t vapi_qp_type; + IB_ts_t transport; + ib_send_wr_t *wqe_p; +#endif + + // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %x qp %x\n", qp_info_p->hca_idx, qp_info_p->qp_num)); + if( !p_send_wr ) + { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (!qp_info_p || E_MARK_QP != qp_info_p->mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(qp_info_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_QP_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[qp_info_p->hca_idx]; + + CL_ASSERT(hobul_p); + CL_ASSERT(hobul_p->qp_info_tbl); + + qp_idx = qp_info_p->qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + +#if MLNX_SEND_NATIVE + return THHUL_qpm_post_send_wrs( hobul_p->hhul_hndl, + qp_info_p->hhul_qp_hndl, p_send_wr, pp_failed ); +#else + // Assuming that posting all WQE will succeed. Errors are handled below. + *pp_failed = NULL; + + // Loop and post all descriptors in list, bail out on failure. + transport = mlnx_map_ibal_qp_type(qp_info_p->qp_type, &vapi_qp_type); + + if (VAPI_REGULAR_QP != vapi_qp_type) + { + memset(&send_desc, 0, sizeof(send_desc)); + // send_desc.sg_lst_p = &sg_lst_a[0]; + send_desc.sg_lst_p = hobul_p->qp_info_tbl[qp_idx].send_sge_p; + if (!send_desc.sg_lst_p) { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %x qp %x\n", qp_info_p->hca_idx, qp_idx)); + } + CL_ASSERT(send_desc.sg_lst_p); + for (wqe_p = p_send_wr; wqe_p; wqe_p = wqe_p->p_next) + { + // sq_size is a misnomer, it is really max_sge + CL_ASSERT( hobul_p->qp_info_tbl[qp_idx].sq_size >= wqe_p->num_ds); + + status = mlnx_conv_send_desc( transport, wqe_p, &send_desc); + if (IB_SUCCESS != status) break; + + if (HH_OK != (ret = THHUL_qpm_post_send_req(hobul_p->hhul_hndl, + qp_info_p->hhul_qp_hndl, + &send_desc))) + { + status = (HH_EAGAIN == ret) ? IB_INSUFFICIENT_RESOURCES : + (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : + IB_ERROR; + *pp_failed = wqe_p; + // wqe_p->p_next = NULL; + goto cleanup; + } + } + } + else { + // For regular QP use real send multiple + VAPI_sr_desc_t desc_list[MLNX_MAX_WRS_PER_CHAIN]; + VAPI_sg_lst_entry_t sg_list[MLNX_MAX_WRS_PER_CHAIN][MLNX_MAX_NUM_SGE]; + u_int32_t num_wrs; + + wqe_p = p_send_wr; + while (wqe_p) { + for (num_wrs = 0; (num_wrs < MLNX_MAX_WRS_PER_CHAIN) && wqe_p; + wqe_p = wqe_p->p_next, num_wrs++) + { + desc_list[num_wrs].sg_lst_p = &sg_list[num_wrs][0]; + status = mlnx_conv_send_desc(transport, wqe_p, &desc_list[num_wrs]); + if (status != IB_SUCCESS) { + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, + ("FAILED to map the send_desc %d\n", num_wrs)); + break; + } + } + if (num_wrs > 0) { + if (num_wrs > 1) { + ret = THHUL_qpm_post_send_reqs(hobul_p->hhul_hndl, + qp_info_p->hhul_qp_hndl, + num_wrs, desc_list); + } else { + ret = THHUL_qpm_post_send_req(hobul_p->hhul_hndl, + qp_info_p->hhul_qp_hndl, + desc_list); + } + if (HH_OK != ret) { + status = (HH_EAGAIN == ret) ? IB_INSUFFICIENT_RESOURCES : + (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : + IB_ERROR; + *pp_failed = wqe_p; + // wqe_p->p_next = NULL; + goto cleanup; + } + } else { + /* no work requests this round */ + CL_TRACE (MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("NO WRs\n")); + *pp_failed = wqe_p; + break; + } + } + } + + return status; +#endif + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +} + +ib_api_status_t +mlnx_post_recv ( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t *p_recv_wr, + OUT ib_recv_wr_t **pp_failed OPTIONAL ) +{ + ib_api_status_t status = IB_SUCCESS; + qp_info_t *qp_info_p = (qp_info_t *)h_qp; + + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; +#if !MLNX_RECV_NATIVE + HH_ret_t ret; + ib_recv_wr_t *wqe_p; + IB_ts_t transport; + VAPI_rr_desc_t recv_desc; + VAPI_special_qp_t vapi_qp_type; +#endif + + // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %x qp %x\n", + // qp_info_p->hca_idx, qp_info_p->qp_num)); + if( !p_recv_wr ) + { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (!qp_info_p || E_MARK_QP != qp_info_p->mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(qp_info_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_QP_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[qp_info_p->hca_idx]; + + CL_ASSERT(hobul_p); + CL_ASSERT(hobul_p->qp_info_tbl); + + qp_idx = qp_info_p->qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + +#if MLNX_RECV_NATIVE + return THHUL_qpm_post_recv_wrs( hobul_p->hhul_hndl, qp_info_p->hhul_qp_hndl, + p_recv_wr, pp_failed ); +#else + // Assuming that posting all WQE will succeed. Errors are handled below. + *pp_failed = NULL; + + // Loop and post all descriptors in list, bail out on failure. + transport = mlnx_map_ibal_qp_type(qp_info_p->qp_type, &vapi_qp_type); + + if (VAPI_REGULAR_QP != vapi_qp_type) + { + memset(&recv_desc, 0, sizeof(recv_desc)); + recv_desc.sg_lst_p = hobul_p->qp_info_tbl[qp_idx].recv_sge_p; + for (wqe_p = p_recv_wr; wqe_p; wqe_p = wqe_p->p_next) + { + // rq_size is a misnomer, it is really max_sge + CL_ASSERT( hobul_p->qp_info_tbl[qp_idx].rq_size >= wqe_p->num_ds); + + mlnx_conv_recv_desc(wqe_p, &recv_desc); + if (HH_OK != (ret = THHUL_qpm_post_recv_req(hobul_p->hhul_hndl, qp_info_p->hhul_qp_hndl, &recv_desc))) + { + status = (HH_EAGAIN == ret) ? IB_INSUFFICIENT_RESOURCES : + (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : + IB_ERROR; + + *pp_failed = wqe_p; + // wqe_p->p_next = NULL; + goto cleanup; + } + } + } + else { + // For regular QP use real send multiple + VAPI_rr_desc_t desc_list[MLNX_MAX_WRS_PER_CHAIN]; + VAPI_sg_lst_entry_t sg_list[MLNX_MAX_WRS_PER_CHAIN][MLNX_MAX_NUM_SGE]; + u_int32_t num_wrs; + + wqe_p = p_recv_wr; + while (wqe_p) { + for (num_wrs = 0; (num_wrs < MLNX_MAX_WRS_PER_CHAIN) && wqe_p; + wqe_p = wqe_p->p_next, num_wrs++) + { + desc_list [num_wrs].sg_lst_p = &sg_list [num_wrs][0]; + status = mlnx_conv_recv_desc(wqe_p, &desc_list[num_wrs]); + if (status != IB_SUCCESS) { + CL_TRACE(MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, + ("FAILED to map the recv_desc %d\n", num_wrs)); + break; + } + } + // CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("num_wrs %d\n", num_wrs)); + if (num_wrs > 0) { + if (num_wrs > 1) { + ret = THHUL_qpm_post_recv_reqs (hobul_p->hhul_hndl, + qp_info_p->hhul_qp_hndl, + num_wrs, desc_list); + } else { + ret = THHUL_qpm_post_recv_req (hobul_p->hhul_hndl, + qp_info_p->hhul_qp_hndl, + desc_list); + } + if (HH_OK != ret) { + status = (HH_EAGAIN == ret) ? IB_INSUFFICIENT_RESOURCES : + (HH_EINVAL_SG_NUM == ret) ? IB_INVALID_MAX_SGE : + IB_ERROR; + *pp_failed = wqe_p; + // wqe_p->p_next = NULL; + goto cleanup; + } + } else { + /* no work requests this round */ + CL_TRACE (MLNX_DBG_DIRECT, g_mlnx_dbg_lvl, ("NO WRs\n")); + *pp_failed = wqe_p; + break; + } + } + } + + return status; +#endif + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +} + +/* +* Completion Processing and Completion Notification Request Verbs. +*/ + +ib_api_status_t +mlnx_peek_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_n_cqes ) +{ + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num || + E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) + { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + status = THHUL_cqm_count_cqe( + hobul_p->hhul_hndl, hhul_cq_hndl, p_n_cqes ); + if( status != IB_SUCCESS ) + goto cleanup; + + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +} + +ib_api_status_t +mlnx_poll_cq ( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ) +{ + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; +#if !MLNX_POLL_NATIVE + HH_ret_t ret; + VAPI_wc_desc_t comp_desc; + ib_wc_t *wc_p; +#endif + + if (!pp_free_wclist || !pp_done_wclist || !*pp_free_wclist) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num || + E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + return THHUL_cqm_poll4wc(hobul_p->hhul_hndl, hhul_cq_hndl, + pp_free_wclist, pp_done_wclist ); + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +} + +ib_api_status_t +mlnx_enable_cq_notify ( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ) +{ + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + VAPI_cq_notif_type_t hh_request; + + hh_request = (solicited) ? + VAPI_SOLIC_COMP: /* Notify on solicited completion event only */ + VAPI_NEXT_COMP; /* Notify on next completion */ + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num || + E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) + { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + if (HH_OK != THHUL_cqm_req_comp_notif(hobul_p->hhul_hndl, hhul_cq_hndl, hh_request)) + { + status = IB_ERROR; + goto cleanup; + } + + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +} + +ib_api_status_t +mlnx_enable_ncomp_cq_notify ( + IN const ib_cq_handle_t h_cq, + IN const uint32_t n_cqes ) +{ + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->cq_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if (hobul_p->cq_info_tbl[cq_idx].cq_num != cq_num || + E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) + { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + if (HH_OK != THHUL_cqm_req_ncomp_notif( + hobul_p->hhul_hndl, hhul_cq_hndl, n_cqes )) + { + status = IB_ERROR; + goto cleanup; + } + + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + return status; +} + +ib_api_status_t +mlnx_bind_mw ( + IN const ib_mw_handle_t h_mw, + IN const ib_qp_handle_t h_qp, + IN ib_bind_wr_t* const p_mw_bind, + OUT net32_t* const p_rkey ) +{ + ib_api_status_t status = IB_UNKNOWN_ERROR; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t qp_idx = 0; + u_int32_t new_key; + mlnx_hobul_t *hobul_p; + mlnx_mwo_t *mwo_p; + HHUL_qp_hndl_t hhul_qp_hndl; + HHUL_mw_bind_t bind_props; + + // CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + mwo_p = (mlnx_mwo_t *)h_mw; + if (!mwo_p || mwo_p->mark != E_MARK_MW) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + + if (!p_rkey) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_QP_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p || NULL == hobul_p->qp_info_tbl || NULL == hobul_p->pd_info_tbl) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + // CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("line %d - qp_idx 0x%x\n", __LINE__, qp_idx)); + + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + +#if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("line %d - qp_num 0x%x valid %d\n", + __LINE__, + hobul_p->qp_info_tbl[qp_idx].qp_num, + E_MARK_QP == hobul_p->qp_info_tbl[qp_idx].mark)); +#endif + if (hobul_p->qp_info_tbl[qp_idx].qp_num != qp_num || + E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + /* Trap the RKEY passed in not matching. */ + if ( cl_ntoh32( p_mw_bind->current_rkey ) != mwo_p->mw_rkey ) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl; + + mlnx_conv_bind_req(hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl, p_mw_bind, &bind_props); + + // Binding a window to zero length is in fact an unbinding + // IF unbinding, window rkey remains the same. + // IF binding, new r_key tag is the previous tag incremented by 1: + new_key = mwo_p->mw_rkey; + if( bind_props.size > 0 ) { + new_key += (1 << hobul_p->log2_mpt_size); + } + + if (HH_OK != THHUL_qpm_post_bind_req(&bind_props, new_key)) + { + status = IB_ERROR; + goto cleanup; + } + + *p_rkey = cl_hton32( new_key ); + // CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + // CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +void +mlnx_direct_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->post_send = mlnx_post_send; + p_interface->post_recv = mlnx_post_recv; + + p_interface->enable_ncomp_cq_notify = mlnx_enable_ncomp_cq_notify; + p_interface->peek_cq = mlnx_peek_cq; + p_interface->poll_cq = mlnx_poll_cq; + p_interface->enable_cq_notify = mlnx_enable_cq_notify; + + p_interface->bind_mw = mlnx_bind_mw; +} diff --git a/branches/Ndi/hw/mt23108/kernel/hca_driver.c b/branches/Ndi/hw/mt23108/kernel/hca_driver.c new file mode 100644 index 00000000..bc2de2f1 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_driver.c @@ -0,0 +1,1897 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Provides the driver entry points for the Tavor VPD. + */ + + +#include "hca_driver.h" +#include +#include +#pragma warning( push, 3 ) +#include "MdCard.h" +#pragma warning( pop ) +#include +#include + + +/* + * UVP name does not include file extension. For debug builds, UAL + * will append "d.dll". For release builds, UAL will append ".dll" + */ +char mlnx_uvp_lib_name[MAX_LIB_NAME] = {"mt23108u"}; + + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_driver_obj, + IN PUNICODE_STRING p_registry_path ); + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_Param_Path ); + +static void +hca_drv_unload( + IN PDRIVER_OBJECT p_driver_obj ); + +static NTSTATUS +hca_sysctl( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ); + +static NTSTATUS +hca_add_device( + IN PDRIVER_OBJECT p_driver_obj, + IN PDEVICE_OBJECT p_pdo ); +// +//static NTSTATUS +//hca_enable( +// IN DEVICE_OBJECT* const p_dev_obj ); +// +//static NTSTATUS +//hca_disable( +// IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +hca_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static void +hca_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ); + +//static NTSTATUS +//hca_deactivate( +// IN DEVICE_OBJECT* const p_dev_obj, +// IN IRP* const p_irp, +// OUT cl_irp_action_t* const p_action ); +// +static NTSTATUS +hca_query_bus_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static ci_interface_t* +__alloc_hca_ifc( + IN hca_dev_ext_t* const p_ext ); + +static NTSTATUS +__get_ci_interface( + IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +__get_hca_handle( + IN hca_dev_ext_t* const p_ext ); + +static NTSTATUS +__hca_register( + IN DEVICE_OBJECT *p_dev_obj ); + +//static void +//__work_item_pnp_cb( +// IN DEVICE_OBJECT *p_dev_obj, +// IN hca_work_item_context_t *p_context ); + +static NTSTATUS +__pnp_notify_target( + IN TARGET_DEVICE_REMOVAL_NOTIFICATION *p_notify, + IN void *context ); + +static NTSTATUS +__pnp_notify_ifc( + IN DEVICE_INTERFACE_CHANGE_NOTIFICATION *p_notify, + IN void *context ); + +static NTSTATUS +fw_access_pciconf ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN ULONG op_flag, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ); + +static NTSTATUS +fw_get_pci_bus_interface( + IN DEVICE_OBJECT *p_dev_obj, + OUT BUS_INTERFACE_STANDARD *p_BusInterface ); + +static NTSTATUS +fw_flash_write_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ); + +static NTSTATUS +fw_flash_read_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ); + +static NTSTATUS +fw_flash_get_ca_guid( + IN DEVICE_OBJECT *p_dev_obj, + OUT uint64_t *ca_guid ); + +static NTSTATUS +fw_flash_read4( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t addr, + IN OUT uint32_t *p_data); + +static NTSTATUS +fw_flash_readbuf( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t offset, + IN OUT void *p_data, + IN uint32_t len); +static NTSTATUS +fw_set_bank( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t bank ); + +static NTSTATUS +fw_flash_init( + IN BUS_INTERFACE_STANDARD *p_BusInterface ); + +static NTSTATUS +fw_flash_deinit( + IN BUS_INTERFACE_STANDARD *p_BusInterface ); + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (INIT, DriverEntry) +#pragma alloc_text (INIT, __read_registry) +#pragma alloc_text (PAGE, hca_drv_unload) +#pragma alloc_text (PAGE, hca_sysctl) +#pragma alloc_text (PAGE, hca_add_device) +#pragma alloc_text (PAGE, hca_start) +//#pragma alloc_text (PAGE, hca_deactivate) +//#pragma alloc_text (PAGE, hca_enable) +//#pragma alloc_text (PAGE, hca_disable) +#pragma alloc_text (PAGE, hca_release_resources) +#pragma alloc_text (PAGE, hca_query_bus_relations) +#pragma alloc_text (PAGE, hca_set_power) +#pragma alloc_text (PAGE, __alloc_hca_ifc) +#pragma alloc_text (PAGE, __get_ci_interface) +#pragma alloc_text (PAGE, __get_hca_handle) +#pragma alloc_text (PAGE, __hca_register) +//#pragma alloc_text (PAGE, __work_item_pnp_cb) +#pragma alloc_text (PAGE, __pnp_notify_target) +#pragma alloc_text (PAGE, __pnp_notify_ifc) +#pragma alloc_text (PAGE, fw_flash_get_ca_guid) +#endif + + +static const cl_vfptr_pnp_po_t hca_vfptr_pnp = { + "Tavor HCA VPD", + hca_start, /* StartDevice */ + cl_irp_skip, + cl_irp_skip, + cl_do_sync_pnp, + cl_irp_skip, /* QueryRemove */ + hca_release_resources, + cl_do_remove, /* Remove */ + cl_irp_skip, /* CancelRemove */ + cl_irp_skip, /* SurpriseRemove */ + cl_irp_skip, + cl_irp_skip, + cl_irp_skip, + cl_do_sync_pnp, + hca_query_bus_relations, + cl_irp_ignore, + cl_irp_skip, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, /* QueryPower */ + hca_set_power, /* SetPower */ + cl_irp_ignore, /* PowerSequence */ + cl_irp_ignore /* WaitWake */ +}; + + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_driver_obj, + IN PUNICODE_STRING p_registry_path ) +{ + NTSTATUS status; + cl_status_t cl_status; + + HCA_ENTER( HCA_DBG_DEV ); + + status = CL_INIT; + if( !NT_SUCCESS(status) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("cl_init returned %08X.\n", status) ); + return status; + } + + status = __read_registry( p_registry_path ); + if( !NT_SUCCESS( status ) ) + { + CL_DEINIT; + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("__read_registry_path returned 0x%X.\n", status) ); + return status; + } + + /* Initializae Adapter DB */ + cl_status = mlnx_hobs_init(); + if( cl_status != CL_SUCCESS ) + { + CL_DEINIT; + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("mlnx_hobs_init returned %s.\n", cl_status_text[cl_status]) ); + return cl_to_ntstatus( cl_status ); + } +// cl_memclr( mlnx_hca_array, MLNX_MAX_HCA * sizeof(ci_interface_t) ); + + p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp; + p_driver_obj->MajorFunction[IRP_MJ_POWER] = cl_power; + p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = hca_sysctl; + p_driver_obj->DriverUnload = hca_drv_unload; + p_driver_obj->DriverExtension->AddDevice = hca_add_device; + + HCA_EXIT( HCA_DBG_DEV ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_registry_path ) +{ + NTSTATUS status; + /* Remember the terminating entry in the table below. */ + RTL_QUERY_REGISTRY_TABLE table[2]; + UNICODE_STRING param_path; + + HCA_ENTER( HCA_DBG_DEV ); + + RtlInitUnicodeString( ¶m_path, NULL ); + param_path.MaximumLength = p_registry_path->Length + + sizeof(L"\\Parameters"); + param_path.Buffer = cl_zalloc( param_path.MaximumLength ); + if( !param_path.Buffer ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to allocate parameters path buffer.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + RtlAppendUnicodeStringToString( ¶m_path, p_registry_path ); + RtlAppendUnicodeToString( ¶m_path, L"\\Parameters" ); + + /* + * Clear the table. This clears all the query callback pointers, + * and sets up the terminating table entry. + */ + cl_memclr( table, sizeof(table) ); + + /* Setup the table entries. */ + table[0].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[0].Name = L"DebugFlags"; + table[0].EntryContext = &g_mlnx_dbg_lvl; + table[0].DefaultType = REG_DWORD; + table[0].DefaultData = &g_mlnx_dbg_lvl; + table[0].DefaultLength = sizeof(ULONG); + + /* Have at it! */ + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, + param_path.Buffer, table, NULL, NULL ); + + cl_free( param_path.Buffer ); + HCA_EXIT( HCA_DBG_DEV ); + return status; +} + + +static void +hca_drv_unload( + IN PDRIVER_OBJECT p_driver_obj ) +{ + HCA_ENTER( HCA_DBG_DEV ); + + UNUSED_PARAM( p_driver_obj ); + + CL_DEINIT; + + HCA_EXIT( HCA_DBG_DEV ); +} + + +static NTSTATUS +hca_sysctl( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_DEV ); + + p_ext = p_dev_obj->DeviceExtension; + + IoSkipCurrentIrpStackLocation( p_irp ); + status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + + HCA_EXIT( HCA_DBG_DEV ); + return status; +} + + +static NTSTATUS +hca_add_device( + IN PDRIVER_OBJECT p_driver_obj, + IN PDEVICE_OBJECT p_pdo ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_dev_obj, *p_next_do; + hca_dev_ext_t *p_ext; + //cl_status_t cl_status; + + HCA_ENTER( HCA_DBG_PNP ); + + /* + * Create the device so that we have a device extension to store stuff in. + */ + status = IoCreateDevice( p_driver_obj, sizeof(hca_dev_ext_t), + NULL, FILE_DEVICE_INFINIBAND, FILE_DEVICE_SECURE_OPEN, + FALSE, &p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoCreateDevice returned 0x%08X.\n", status) ); + return status; + } + + p_ext = p_dev_obj->DeviceExtension; + + //cl_status = cl_event_init( &p_ext->mutex, FALSE ); + //if( cl_status != CL_SUCCESS ) + //{ + // IoDeleteDevice( p_dev_obj ); + // HCA_TRACE_EXIT( HCA_DBG_ERROR, + // ("cl_mutex_init returned %s.\n", cl_status_text[status]) ); + // return cl_to_ntstatus( status ); + //} + //cl_event_signal( &p_ext->mutex ); + + /* Attach to the device stack. */ + p_next_do = IoAttachDeviceToDeviceStack( p_dev_obj, p_pdo ); + if( !p_next_do ) + { + //cl_event_destroy( &p_ext->mutex ); + IoDeleteDevice( p_dev_obj ); + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoAttachDeviceToDeviceStack failed.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* Inititalize the complib extension. */ + cl_init_pnp_po_ext( p_dev_obj, p_next_do, p_pdo, g_mlnx_dbg_lvl, + &hca_vfptr_pnp, NULL ); + + p_ext->state = HCA_ADDED; + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +__get_ci_interface( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + NTSTATUS status; + IRP *p_irp; + hca_dev_ext_t *p_ext; + IO_STATUS_BLOCK io_status; + IO_STACK_LOCATION *p_io_stack; + KEVENT event; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Query for the verbs interface. */ + p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->p_al_dev, + NULL, 0, NULL, &event, &io_status ); + if( !p_irp ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoBuildSynchronousFsdRequest failed.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Format the IRP. */ + p_io_stack = IoGetNextIrpStackLocation( p_irp ); + p_io_stack->MinorFunction = IRP_MN_QUERY_INTERFACE; + p_io_stack->Parameters.QueryInterface.Version = IB_CI_INTERFACE_VERSION; + p_io_stack->Parameters.QueryInterface.Size = sizeof(ib_ci_ifc_t); + p_io_stack->Parameters.QueryInterface.Interface = + (INTERFACE*)&p_ext->ci_ifc; + p_io_stack->Parameters.QueryInterface.InterfaceSpecificData = NULL; + p_io_stack->Parameters.QueryInterface.InterfaceType = + &GUID_IB_CI_INTERFACE; + p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( p_ext->p_al_dev, p_irp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = io_status.Status; + } + + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Query interface for verbs returned %08x.\n", status) ); + return status; + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +__get_hca_handle( + IN hca_dev_ext_t* const p_ext ) +{ + NTSTATUS status; + IRP *p_irp; + IO_STATUS_BLOCK io_status; + IO_STACK_LOCATION *p_io_stack; + KEVENT event; + + HCA_ENTER( HCA_DBG_PNP ); + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Query for the verbs interface. */ + p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->cl_ext.p_next_do, + NULL, 0, NULL, &event, &io_status ); + if( !p_irp ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoBuildSynchronousFsdRequest failed.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Format the IRP. */ + p_io_stack = IoGetNextIrpStackLocation( p_irp ); + p_io_stack->MinorFunction = IRP_MN_QUERY_INTERFACE; + p_io_stack->Parameters.QueryInterface.Version = 1; + p_io_stack->Parameters.QueryInterface.Size = 0; + p_io_stack->Parameters.QueryInterface.Interface = NULL; + { + void *p = &p_ext->hca.s; + memset( p, 0, sizeof(p_ext->hca.s) ); + p_io_stack->Parameters.QueryInterface.InterfaceSpecificData = p; + } + p_io_stack->Parameters.QueryInterface.InterfaceType = + &GUID_MD_INTERFACE; + p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = io_status.Status; + } + + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Query interface for HCA handle returned %08x.\n", status) ); + return status; + } + p_ext->hca.hh_hndl = p_ext->hca.s.hh_hndl; + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +__pnp_notify_target( + IN TARGET_DEVICE_REMOVAL_NOTIFICATION *p_notify, + IN void *context ) +{ + NTSTATUS status = STATUS_SUCCESS; + DEVICE_OBJECT *p_dev_obj; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_dev_obj = context; + p_ext = p_dev_obj->DeviceExtension; + + if( IsEqualGUID( &p_notify->Event, &GUID_TARGET_DEVICE_QUERY_REMOVE ) ) + { + if( p_ext->state == HCA_REGISTERED ) + { + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + p_ext->state = HCA_STARTED; + } + + /* Release AL's file object so that it can unload. */ + CL_ASSERT( p_ext->p_al_file_obj ); + CL_ASSERT( p_ext->p_al_file_obj == p_notify->FileObject ); + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + } + else if( IsEqualGUID( &p_notify->Event, + &GUID_TARGET_DEVICE_REMOVE_COMPLETE ) ) + { + if( p_ext->state == HCA_REGISTERED ) + { + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + p_ext->state = HCA_STARTED; + } + + /* Release AL's file object so that it can unload. */ + if( p_ext->p_al_file_obj ) + { + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + } + + /* Cancel our target device change registration. */ + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + } + else if( IsEqualGUID( &p_notify->Event, + &GUID_TARGET_DEVICE_REMOVE_CANCELLED ) ) + { + /* Cancel our target device change registration. */ + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + + /* Get the device object pointer for the AL. */ + CL_ASSERT( !p_ext->p_al_file_obj ); + CL_ASSERT( !p_ext->p_al_dev ); + p_ext->p_al_file_obj = p_notify->FileObject; + p_ext->p_al_dev = IoGetRelatedDeviceObject( p_ext->p_al_file_obj ); + + status = IoRegisterPlugPlayNotification( + EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, + p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, + &p_ext->pnp_target_entry ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("IoRegisterPlugPlayNotification returned %08x.\n") ); + return status; + } + + __hca_register( p_dev_obj ); + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static ci_interface_t* +__alloc_hca_ifc( + IN hca_dev_ext_t* const p_ext ) +{ + ci_interface_t *p_ifc; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ifc = ExAllocatePool( PagedPool, sizeof(ci_interface_t) ); + if( !p_ifc ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to allocate ci_interface_t (%d bytes).\n", + sizeof(ci_interface_t)) ); + return NULL; + } + + setup_ci_interface( p_ext->hca.guid, p_ifc ); + + p_ifc->p_hca_dev = p_ext->cl_ext.p_pdo; + p_ifc->vend_id = p_ext->hca.hh_hndl->vendor_id; + p_ifc->dev_id = (uint16_t)p_ext->hca.hh_hndl->dev_id; + p_ifc->dev_revision = (uint16_t)p_ext->hca.hh_hndl->hw_ver; + + HCA_EXIT( HCA_DBG_PNP ); + return p_ifc; +} + + +static NTSTATUS +__hca_register( + IN DEVICE_OBJECT *p_dev_obj ) +{ + hca_dev_ext_t *p_ext; + NTSTATUS status; + ib_api_status_t ib_status; + ci_interface_t *p_hca_ifc; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + ASSERT( p_ext->state == HCA_STARTED ); + ASSERT( p_ext->p_al_dev ); + + /* Get the AL's lower interface. */ + status = __get_ci_interface( p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("__get_ci_interface returned %08x.\n", status) ); + return status; + } + + /* Allocate and populate our HCA interface structure. */ + p_hca_ifc = __alloc_hca_ifc( p_ext ); + if( !p_hca_ifc ) + { + HCA_TRACE( HCA_DBG_ERROR, ("__alloc_hca_ifc failed.\n") ); + return STATUS_NO_MEMORY; + } + + /* Notify AL that we're available... */ + ib_status = p_ext->ci_ifc.register_ca( p_hca_ifc ); + ExFreePool( p_hca_ifc ); + if( ib_status != IB_SUCCESS ) + { + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + p_ext->state = HCA_REGISTERED; + return STATUS_SUCCESS; +} + + +//static void +//__work_item_pnp_cb( +// IN DEVICE_OBJECT *p_dev_obj, +// IN hca_work_item_context_t *p_context ) +//{ +// hca_dev_ext_t *p_ext; +// NTSTATUS status; +// +// HCA_ENTER( HCA_DBG_PNP ); +// +// p_ext = p_dev_obj->DeviceExtension; +// +// cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE ); +// do +// { +// /* Check the state under protection of the mutex. */ +// if( p_ext->state != HCA_ADDED && +// p_ext->state != HCA_STARTED ) +// { +// HCA_TRACE( HCA_DBG_ERROR, ("Invalid state.\n") ); +// break; +// } +// +// ASSERT( !p_ext->p_al_dev ); +// +// /* Get the AL device object. */ +// HCA_TRACE( HCA_DBG_PNP, ("Calling IoGetDeviceObjectPointer.\n") ); +// status = IoGetDeviceObjectPointer( &p_context->sym_link_name, +// FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev ); +// if( !NT_SUCCESS( status ) ) +// { +// HCA_TRACE( HCA_DBG_ERROR, +// ("IoGetDeviceObjectPointer returned %08x.\n", status) ); +// break; +// } +// +// cl_event_signal( &p_ext->mutex ); +// /* Register for removal notification of the IB Fabric root device. */ +// HCA_TRACE( HCA_DBG_PNP, +// ("Registering for target notifications.\n") ); +// status = IoRegisterPlugPlayNotification( +// EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, +// p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, +// &p_ext->pnp_target_entry ); +// cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE ); +// if( !NT_SUCCESS( status ) ) +// { +// ObDereferenceObject( p_ext->p_al_file_obj ); +// HCA_TRACE( HCA_DBG_ERROR, +// ("IoRegisterPlugPlayNotification returned %08x.\n", status) ); +// break; +// } +// +// if( p_ext->state == HCA_STARTED ) +// { +// /* Queue the work item again to complete the registration. */ +// IoQueueWorkItem( p_context->p_item, __work_item_started_cb, +// DelayedWorkQueue, p_context->p_item ); +// } +// else +// { +// /* Free the work item. */ +// IoFreeWorkItem( p_context->p_item ); +// } +// } while( !p_ext ); +// +// cl_event_signal( &p_ext->mutex ); +// cl_free( p_context ); +// return; +//} + + +static NTSTATUS +__pnp_notify_ifc( + IN DEVICE_INTERFACE_CHANGE_NOTIFICATION *p_notify, + IN void *context ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_dev_obj; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_dev_obj = context; + p_ext = p_dev_obj->DeviceExtension; + + if( !IsEqualGUID( &p_notify->Event, &GUID_DEVICE_INTERFACE_ARRIVAL ) ) + { + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; + } + + /* + * Sanity check. We should only be getting notifications of the + * CI interface exported by AL. + */ + ASSERT( + IsEqualGUID( &p_notify->InterfaceClassGuid, &GUID_IB_CI_INTERFACE ) ); + + if( p_ext->state != HCA_STARTED ) + { + HCA_TRACE( HCA_DBG_ERROR, ("Invalid state: %d\n", p_ext->state) ); + return STATUS_SUCCESS; + } + + ASSERT( !p_ext->p_al_dev ); + ASSERT( !p_ext->p_al_file_obj ); + + /* Get the AL device object. */ + HCA_TRACE( HCA_DBG_PNP, ("Calling IoGetDeviceObjectPointer.\n") ); + status = IoGetDeviceObjectPointer( p_notify->SymbolicLinkName, + FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("IoGetDeviceObjectPointer returned %08x.\n", status) ); + return STATUS_SUCCESS; + } + + /* Register for removal notification of the IB Fabric root device. */ + HCA_TRACE( HCA_DBG_PNP, + ("Registering for target notifications.\n") ); + status = IoRegisterPlugPlayNotification( + EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, + p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, + &p_ext->pnp_target_entry ); + if( !NT_SUCCESS( status ) ) + { + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + HCA_TRACE( HCA_DBG_ERROR, + ("IoRegisterPlugPlayNotification returned %08x.\n", status) ); + return STATUS_SUCCESS; + } + + status = __hca_register( p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + HCA_TRACE( HCA_DBG_ERROR, + ("__get_ci_interface returned %08x.\n", status) ); + return STATUS_SUCCESS; + } + + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} +// +// +//static NTSTATUS +//hca_enable( +// IN DEVICE_OBJECT* const p_dev_obj ) +//{ +// PIO_WORKITEM p_item; +// hca_dev_ext_t *p_ext; +// +// HCA_ENTER( HCA_DBG_PNP ); +// +// p_ext = p_dev_obj->DeviceExtension; +// +// /* Check for the AL device reference. */ +// if( p_ext->p_al_dev ) +// { +// __hca_register( p_dev_obj ); +// } +// p_ext->state = HCA_STARTED; +// +// HCA_EXIT( HCA_DBG_PNP ); +// return STATUS_SUCCESS; +//} + + +static NTSTATUS +hca_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + /* Handled on the way up. */ + status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Lower drivers failed IRP_MN_START_DEVICE.\n") ); + return status; + } + + p_ext = p_dev_obj->DeviceExtension; + + /* Get the HH HCA handle for this instance. */ + status = __get_hca_handle( p_ext ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("Failed to get HH HCA handle.\n") ); + return status; + } + + { + VAPI_hca_cap_t hca_cap; + int rc; + + if (HH_OK != THH_hob_open_hca(p_ext->hca.hh_hndl, NULL, NULL)) { + status = IB_ERROR; + return status; + } + + rc = THH_hob_query(p_ext->hca.hh_hndl, &hca_cap); + if (rc != HH_OK) { + HCA_TRACE( HCA_DBG_ERROR, ("Error on getting guid (%#x).\n", rc) ); + status = IB_ERROR; + return status; + } + p_ext->hca.guid = *(uint64_t *)hca_cap.node_guid; + p_ext->hca.p_dev_obj = p_ext->cl_ext.p_pdo; + + THH_hob_close_hca(p_ext->hca.hh_hndl); + } + + mlnx_hca_insert( &p_ext->hca ); + + /* + * Change the state since the PnP callback can happen + * before the callback returns. + */ + p_ext->state = HCA_STARTED; + /* Register for interface arrival of the IB_AL device. */ + status = IoRegisterPlugPlayNotification( + EventCategoryDeviceInterfaceChange, + PNPNOTIFY_DEVICE_INTERFACE_INCLUDE_EXISTING_INTERFACES, + (void*)&GUID_IB_CI_INTERFACE, p_dev_obj->DriverObject, + __pnp_notify_ifc, p_dev_obj, &p_ext->pnp_ifc_entry ); + if( !NT_SUCCESS( status ) ) + { + p_ext->state = HCA_ADDED; + HCA_TRACE( HCA_DBG_ERROR, + ("IoRegisterPlugPlayNotification returned %08x.\n", status) ); + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static void +hca_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + if( p_ext->state == HCA_REGISTERED ) + { + CL_ASSERT( p_ext->ci_ifc.deregister_ca ); + CL_ASSERT( p_ext->p_al_dev ); + CL_ASSERT( p_ext->p_al_file_obj ); + /* Notify AL that the CA is being removed. */ + p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + } + + if( p_ext->pnp_target_entry ) + { + ASSERT( p_ext->pnp_ifc_entry ); + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + } + + if( p_ext->pnp_ifc_entry ) + IoUnregisterPlugPlayNotification( p_ext->pnp_ifc_entry ); + + if( p_ext->p_al_file_obj ) + ObDereferenceObject( p_ext->p_al_file_obj ); + + //cl_event_destroy( &p_ext->mutex ); + + HCA_EXIT( HCA_DBG_PNP ); +} +// +// +//static NTSTATUS +//hca_disable( +// IN DEVICE_OBJECT* const p_dev_obj ) +//{ +// hca_dev_ext_t *p_ext; +// +// HCA_ENTER( HCA_DBG_PNP ); +// +// p_ext = p_dev_obj->DeviceExtension; +// +// ASSERT( p_ext->state == HCA_STARTED ); +// +// if( p_ext->state = HCA_REGISTERED ) +// { +// /* Notify AL that the CA is being removed. */ +// p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); +// /* Release AL's CI interface. */ +// p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); +// +// p_ext->state = HCA_STARTED; +// } +// +// HCA_EXIT( HCA_DBG_PNP ); +// return STATUS_SUCCESS; +//} +// +// +//static NTSTATUS +//hca_deactivate( +// IN DEVICE_OBJECT* const p_dev_obj, +// IN IRP* const p_irp, +// OUT cl_irp_action_t* const p_action ) +//{ +// NTSTATUS status; +// hca_dev_ext_t *p_ext; +// +// HCA_ENTER( HCA_DBG_PNP ); +// +// UNUSED_PARAM( p_irp ); +// +// p_ext = p_dev_obj->DeviceExtension; +// +// *p_action = IrpSkip; +// +// status = hca_disable( p_dev_obj ); +// +// mlnx_hca_remove( &p_ext->hca ); +// +// p_ext->hca.hh_hndl = NULL; +// +// p_ext->state = HCA_ADDED; +// +// HCA_EXIT( HCA_DBG_PNP ); +// return status; +//} + + +static NTSTATUS +hca_query_bus_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + DEVICE_RELATIONS *p_rel; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + //cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE ); + if( p_ext->state == HCA_REGISTERED ) + { + status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp ); + if( !NT_SUCCESS( status ) ) + { + //cl_event_signal( &p_ext->mutex ); + *p_action = IrpComplete; + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("AL get_relations returned %08x.\n", status) ); + return status; + } + } + else + { + status = cl_alloc_relations( p_irp, 1 ); + if( !NT_SUCCESS( status ) ) + { + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("cl_alloc_relations returned %08x.\n", status) ); + return status; + } + + p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information; + p_rel->Count = 0; + p_rel->Objects[0] = NULL; + } + + //cl_event_signal( &p_ext->mutex ); + + *p_action = IrpPassDown; + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +hca_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *p_io_stack; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + *p_action = IrpSkip; + + p_io_stack = IoGetCurrentIrpStackLocation( p_irp ); + + if( p_io_stack->Parameters.Power.Type != DevicePowerState ) + return STATUS_SUCCESS; + + switch( p_io_stack->Parameters.Power.State.DeviceState ) + { + case PowerDeviceD0: + if( p_ext->p_al_dev ) + status = __hca_register( p_dev_obj ); + else + status = STATUS_SUCCESS; + break; + + default: + if( p_ext->state == HCA_REGISTERED ) + { + /* Notify AL that the CA is being removed. */ + p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + + p_ext->state = HCA_STARTED; + } + status = STATUS_SUCCESS; + break; + } + + if( !NT_SUCCESS( status ) ) + *p_action = IrpComplete; + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + +typedef struct Primary_Sector{ + uint32_t fi_addr; + uint32_t fi_size; + uint32_t signature; + uint32_t fw_reserved[5]; + uint32_t vsd[56]; + uint32_t branch_to; + uint32_t crc016; +} primary_sector_t; + +static uint32_t old_dir; +static uint32_t old_pol; +static uint32_t old_mod; +static uint32_t old_dat; + +static NTSTATUS +fw_access_pciconf ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN ULONG op_flag, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + + ULONG bytes; + NTSTATUS status = STATUS_SUCCESS; + + PAGED_CODE(); + + if (p_BusInterface) + { + + bytes = p_BusInterface->SetBusData( + p_BusInterface->Context, + PCI_WHICHSPACE_CONFIG, + (PVOID)&offset, + PCI_CONF_ADDR, + sizeof(ULONG) ); + + if( op_flag == 0 ) + { + if ( bytes ) + bytes = p_BusInterface->GetBusData( + p_BusInterface->Context, + PCI_WHICHSPACE_CONFIG, + p_buffer, + PCI_CONF_DATA, + length ); + if ( !bytes ) + status = STATUS_NOT_SUPPORTED; + } + + else + { + if ( bytes ) + bytes = p_BusInterface->SetBusData( + p_BusInterface->Context, + PCI_WHICHSPACE_CONFIG, + p_buffer, + PCI_CONF_DATA, + length); + + if ( !bytes ) + status = STATUS_NOT_SUPPORTED; + } + } + return status; +} + +static NTSTATUS +fw_get_pci_bus_interface( + IN DEVICE_OBJECT *p_dev_obj, + OUT BUS_INTERFACE_STANDARD *p_BusInterface ) +{ + KEVENT event; + NTSTATUS status; + PIRP p_irp; + IO_STATUS_BLOCK ioStatus; + PIO_STACK_LOCATION p_irpStack; + PDEVICE_OBJECT p_target_obj; + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + p_target_obj = IoGetAttachedDeviceReference( p_dev_obj ); + + p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, + p_target_obj, + NULL, + 0, + NULL, + &event, + &ioStatus ); + if (p_irp == NULL) + { + HCA_TRACE( HCA_DBG_ERROR, + ("IoBuildSynchronousFsdRequest failed.\n") ); + status = STATUS_INSUFFICIENT_RESOURCES; + goto End; + } + p_irpStack = IoGetNextIrpStackLocation( p_irp ); + p_irpStack->MinorFunction = IRP_MN_QUERY_INTERFACE; + p_irpStack->Parameters.QueryInterface.InterfaceType = (LPGUID) &GUID_BUS_INTERFACE_STANDARD; + p_irpStack->Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD); + p_irpStack->Parameters.QueryInterface.Version = 1; + p_irpStack->Parameters.QueryInterface.Interface = (PINTERFACE) p_BusInterface; + p_irpStack->Parameters.QueryInterface.InterfaceSpecificData = NULL; + + p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + status = IoCallDriver( p_target_obj, p_irp ); + + if ( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, FALSE, NULL ); + status = ioStatus.Status; + } +End: + // Done with reference + ObDereferenceObject( p_target_obj ); + return status; +} + +static NTSTATUS +__map_crspace( + IN mlnx_hob_t * p_hob, + IN PVOID p_buf, + IN ULONG buf_size + ) +{ + NTSTATUS status; + PMDL mdl_p; + mlnx_hca_t *p_hca = mlnx_hca_from_hh_hndl(p_hob->hh_hndl); + PVOID ua, ka; + ULONG sz; + struct _map_crspace *res_p = (struct _map_crspace *)p_buf; + + HCA_ENTER( HCA_DBG_PNP ); + + // sanity checks + if ( buf_size < sizeof *res_p ) { + status = STATUS_INVALID_PARAMETER; + goto out; + } + if (p_hca == NULL) { + status = STATUS_UNSUCCESSFUL; + goto out; + } + ka = p_hca->s.kernel_crspace_addr; + sz = p_hca->s.kernel_crspace_size; + if ( sz == 0 || ka == NULL) { + HCA_TRACE( HCA_DBG_ERROR, ("No kernel mapping of CR space.\n") ); + status = STATUS_UNSUCCESSFUL; + goto out; + } + + // prepare for mapping to user space + mdl_p = IoAllocateMdl( ka, sz, FALSE,FALSE,NULL); + if (mdl_p == NULL) { + HCA_TRACE( HCA_DBG_ERROR, ("IoAllocateMdl failed.\n") ); + status = STATUS_INSUFFICIENT_RESOURCES; + goto out; + } + + // fill MDL + MmBuildMdlForNonPagedPool(mdl_p); + + // map the buffer into user space + ua = MmMapLockedPagesSpecifyCache( mdl_p, UserMode, MmNonCached, + NULL, FALSE, NormalPagePriority ); + if (ua == NULL) { + HCA_TRACE( HCA_DBG_ERROR, ("MmMapLockedPagesSpecifyCache failed.\n") ); + IoFreeMdl( mdl_p ); + status = STATUS_UNSUCCESSFUL; + goto out; + } + + // fill the structure + res_p->va = ua; + res_p->size = sz; + res_p->ctx = mdl_p; + status = STATUS_SUCCESS; + +out: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + +static NTSTATUS +__unmap_crspace( + IN PVOID p_buf, + IN ULONG buf_size + ) +{ + NTSTATUS status; + PMDL mdl_p; + PVOID ua; + struct _unmap_crspace *parm = (struct _unmap_crspace *)p_buf; + + HCA_ENTER( HCA_DBG_PNP ); + + // sanity checks + if ( buf_size < sizeof *parm ) { + status = STATUS_INVALID_PARAMETER; + goto out; + } + mdl_p = parm->ctx; + ua = parm->va; + if ( mdl_p == NULL || ua == NULL) { + status = STATUS_INVALID_PARAMETER; + goto out; + } + + // do the work + MmUnmapLockedPages(ua, mdl_p); + IoFreeMdl( mdl_p ); + status = STATUS_SUCCESS; + +out: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +ib_api_status_t +fw_access_ctrl( + IN const void* __ptr64 p_context, + IN const void* __ptr64* const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + DEVICE_OBJECT *p_dev_obj; + static BUS_INTERFACE_STANDARD BusInterface; + static uint32_t if_ready; + NTSTATUS status; + PVOID p_data; + ULONG offset; + ULONG POINTER_ALIGNMENT length; + ib_ci_op_t *p_ci; + mlnx_hob_t *p_hob; + + UNREFERENCED_PARAMETER(handle_array); + UNREFERENCED_PARAMETER(num_handles); + UNREFERENCED_PARAMETER(p_umv_buf); + + status = STATUS_SUCCESS; + if( p_umv_buf ) + p_hob = ((mlnx_um_ca_t* __ptr64)p_context)->hob_p; + else + p_hob = (mlnx_hob_t *)(const void *)p_context; + + p_dev_obj = (DEVICE_OBJECT *)(const void *)p_hob->p_dev_obj; + p_ci = p_ci_op; + + if ( !p_ci ) + return STATUS_INVALID_DEVICE_REQUEST; + if ( !p_ci->buf_size ) + return STATUS_INVALID_DEVICE_REQUEST; + + length = p_ci->buf_size; + offset = p_ci->buf_info; + p_data = p_ci->p_buf; + + switch ( p_ci->command ) + { + case FW_MAP_CRSPACE: + status = __map_crspace(p_hob, p_data, length); + break; + + case FW_UNMAP_CRSPACE: + status = __unmap_crspace(p_data, length); + break; + + case FW_READ: // read data from flash + if ( if_ready ) + { + status = fw_flash_read_data(&BusInterface, p_data, offset, length); + } + break; + case FW_WRITE: // write data to flash + if ( if_ready ) + { + + status = fw_flash_write_data(&BusInterface, p_data, offset, length); + } + break; + case FW_READ_CMD: + if ( if_ready ) + { + status = fw_access_pciconf(&BusInterface, 0 , p_data, offset, 4); + } + break; + case FW_WRITE_CMD: + if ( if_ready ) + { + status = fw_access_pciconf(&BusInterface, 1 , p_data, offset, 4); + } + break; + case FW_CLOSE_IF: // close BusInterface + if (if_ready ) + { + if_ready = 0; + BusInterface.InterfaceDereference((PVOID)BusInterface.Context); + } + return status; + case FW_OPEN_IF: // open BusInterface + if ( !if_ready ) + { + status = fw_get_pci_bus_interface(p_dev_obj, &BusInterface); + + if ( NT_SUCCESS( status ) ) + { + if_ready = 1; + status = STATUS_SUCCESS; + } + } + return status; + default: + status = STATUS_NOT_SUPPORTED; + } + + if ( status != STATUS_SUCCESS ) + { + if ( if_ready ) + { + if_ready = 0; + BusInterface.InterfaceDereference((PVOID)BusInterface.Context); + } + HCA_TRACE_EXIT( HCA_DBG_ERROR, + ("fw_access_ctrl failed returns %08x.\n", status) ); + } + return status; +} + +static NTSTATUS +fw_flash_write_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + NTSTATUS status; + uint32_t cnt = 0; + uint32_t lcl_data; + + lcl_data = (*((uint32_t*)p_buffer) << 24); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET+4, length ); + if ( status != STATUS_SUCCESS ) + return status; + lcl_data = ( WRITE_BIT | (offset & ADDR_MSK)); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET, 4 ); + if ( status != STATUS_SUCCESS ) + return status; + + lcl_data = 0; + + do + { + if (++cnt > 5000) + { + return STATUS_DEVICE_NOT_READY; + } + + status = fw_access_pciconf(p_BusInterface, FW_READ , &lcl_data, FLASH_OFFSET, 4 ); + if ( status != STATUS_SUCCESS ) + return status; + + } while(lcl_data & CMD_MASK); + + return status; +} + +static NTSTATUS +fw_flash_read_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t cnt = 0; + uint32_t lcl_data = ( READ_BIT | (offset & ADDR_MSK)); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE, &lcl_data, FLASH_OFFSET, 4 ); + if ( status != STATUS_SUCCESS ) + return status; + + lcl_data = 0; + do + { + // Timeout checks + if (++cnt > 5000 ) + { + return STATUS_DEVICE_NOT_READY; + } + + status = fw_access_pciconf(p_BusInterface, FW_READ, &lcl_data, FLASH_OFFSET, 4 ); + + if ( status != STATUS_SUCCESS ) + return status; + + } while(lcl_data & CMD_MASK); + + status = fw_access_pciconf(p_BusInterface, FW_READ, p_buffer, FLASH_OFFSET+4, length ); + return status; +} + +static NTSTATUS +fw_flash_get_ca_guid( + IN DEVICE_OBJECT *p_dev_obj, + OUT net64_t *ca_guid ) +{ + NTSTATUS status = STATUS_SUCCESS; + BUS_INTERFACE_STANDARD BusInterface; + + uint32_t NODE_GUIDH, NODE_GUIDL; + uint32_t prim_ptr = 0; + uint32_t signature, offset, sect_size; + + primary_sector_t ps; + cl_memset( &ps, 0, sizeof(primary_sector_t)); + + status = fw_get_pci_bus_interface(p_dev_obj, &BusInterface); + + if ( !NT_SUCCESS( status ) ) + return status; + + status = fw_flash_init (&BusInterface); + if (status != STATUS_SUCCESS ) + goto err1; + + status = fw_flash_read_data(&BusInterface, &signature, FW_SIG_OFFSET, 4); + if (status != STATUS_SUCCESS ) + goto err2; + + if (signature == FW_SIGNATURE) + { + //Fail Safe image + + /* Find the sector size offset. */ + status = fw_flash_read_data( + &BusInterface, &offset, FW_SECT_PTR_OFFSET, 4 ); + if( status != STATUS_SUCCESS ) + goto err2; + + offset &= 0x0000FFFF; + + status = fw_flash_read_data( + &BusInterface, §_size, FW_SECT_OFFSET + offset, 4 ); + if( status != STATUS_SUCCESS ) + goto err2; + + sect_size = 1 << (sect_size & 0x0000FFFF); + + /* Try to read the GUIDs from the primary image. */ + status = fw_flash_readbuf(&BusInterface, sect_size, &ps, sizeof(ps)); + if ( status == STATUS_SUCCESS && ps.signature != FW_SIGNATURE ) + { + /* Hmm, that didn't work. Try the secondary image. */ + status = fw_flash_readbuf( + &BusInterface, sect_size * 2, &ps, sizeof(ps) ); + } + if( status == STATUS_SUCCESS ) + { + signature = ps.signature; + status = fw_flash_read_data(&BusInterface, &prim_ptr, ps.fi_addr+0x24, 4); + if (status == STATUS_SUCCESS ) + prim_ptr = prim_ptr + ps.fi_addr; + } + else + { + signature = 0; + } + } + else + { + // Short image + HCA_TRACE( HCA_DBG_ERROR, + ("Invalid signature %08x, assuming short image.\n", signature) ); + prim_ptr = signature; + signature = FW_SIGNATURE; + } + + if ( signature == FW_SIGNATURE && prim_ptr < MAX_FLASH_SIZE ) + { + /* now we can read ca guid + * since we read it in host mode fw_flash_read4() + * swaps it back in BE - how it was stored in FW + */ + if (( status = fw_flash_read4(&BusInterface, prim_ptr, &NODE_GUIDL)) == STATUS_SUCCESS ) + if (( status = fw_flash_read4(&BusInterface, prim_ptr+4, &NODE_GUIDH)) == STATUS_SUCCESS ) + { + *ca_guid = NODE_GUIDH; + *ca_guid = (*ca_guid << 32) | NODE_GUIDL; + } + } + else + { + //invalid GUID pointer + status = STATUS_NO_SUCH_DEVICE; + } +err2: + fw_flash_deinit(&BusInterface); +err1: + BusInterface.InterfaceDereference((PVOID)BusInterface.Context); + return status; +} + +static NTSTATUS +fw_flash_read4( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t addr, + IN OUT uint32_t *p_data) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t lcl_data = 0; + uint32_t bank; + static uint32_t curr_bank = 0xffffffff; + + if (addr & 0x3) + { + HCA_TRACE( HCA_DBG_ERROR, ("Invalid address %08x\n", addr) ); + return STATUS_INVALID_PARAMETER; + } + + bank = addr & BANK_MASK; + if (bank != curr_bank) + { + curr_bank = bank; + if ((status = fw_set_bank(p_BusInterface, bank)) != STATUS_SUCCESS ) + { + HCA_TRACE( HCA_DBG_ERROR, ("fw_set_bank returned %08x\n", status) ); + return STATUS_INVALID_PARAMETER; + } + } + status = fw_flash_read_data(p_BusInterface, &lcl_data, addr, 4); + *p_data = cl_ntoh32(lcl_data); + return STATUS_SUCCESS; +} + +static NTSTATUS +fw_flash_readbuf( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t offset, + IN OUT void *p_data, + IN uint32_t len) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t *p_lcl_data; + uint32_t i; + + if (offset & 0x3) + { + //Address should be 4-bytes aligned + HCA_TRACE( HCA_DBG_ERROR, ("Invalid address %08x\n", offset) ); + return STATUS_INVALID_PARAMETER; + } + if (len & 0x3) + { + //Length should be 4-bytes aligned + HCA_TRACE( HCA_DBG_ERROR, ("Invalid length %d\n", len) ); + return STATUS_INVALID_PARAMETER; + } + p_lcl_data = (uint32_t *)p_data; + + for ( i=0; i < (len >> 2); i++) + { + if ( (status = fw_flash_read_data( p_BusInterface, p_lcl_data, offset, sizeof(uint32_t) )) != STATUS_SUCCESS ) + return status; + offset += 4; + p_lcl_data++; + } + return STATUS_SUCCESS; +} // Flash::flash_read + +static NTSTATUS +fw_flash_writebuf( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t i; + uint8_t *p_data = (uint8_t *)p_buffer; + + for ( i = 0; i < length; i++ ) + { + status = fw_flash_write_data (p_BusInterface, p_data, offset, 1 ); + if (status != STATUS_SUCCESS ) + return status; + p_data++; + offset++; + } + return status; +} +static NTSTATUS +fw_flash_init( + IN BUS_INTERFACE_STANDARD *p_BusInterface ) +{ + uint32_t dir; + uint32_t pol; + uint32_t mod; + + uint32_t cnt=0; + uint32_t data; + NTSTATUS status = STATUS_SUCCESS; + uint32_t semaphore = 0; + + while ( !semaphore ) + { + status = fw_access_pciconf(p_BusInterface, FW_READ , &data, SEMAP63, 4); + if ( status != STATUS_SUCCESS ) + break; + if( !data ) + { + semaphore = 1; + break; + } + if (++cnt > 5000 ) + { + break; + } + } + + if ( !semaphore ) + { + return STATUS_NOT_SUPPORTED; + } + + // Save old values + + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dir,GPIO_DIR_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_pol,GPIO_POL_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_mod,GPIO_MOD_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dat,GPIO_DAT_L , 4); + + // Set Direction=1, Polarity=0, Mode=0 for 3 GPIO lower bits + dir = old_dir | 0x70; + pol = old_pol & ~0x70; + mod = old_mod & ~0x70; + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &dir,GPIO_DIR_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &pol,GPIO_POL_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &mod,GPIO_MOD_L , 4); + if ( status == STATUS_SUCCESS ) + // Set CPUMODE + status = fw_access_pciconf(p_BusInterface, FW_READ , &data, CPUMODE, 4); + if ( status == STATUS_SUCCESS ) + { + data &= ~CPUMODE_MSK; + data |= 1 << CPUMODE_SHIFT; + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, CPUMODE, 4); + } + if ( status == STATUS_SUCCESS ) + { + // Reset flash + data = 0xf0; + status = fw_flash_write_data(p_BusInterface, &data, 0x0, 4); + } + return status; +} + +static NTSTATUS +fw_flash_deinit( + IN BUS_INTERFACE_STANDARD *p_BusInterface ) +{ + uint32_t data = 0; + NTSTATUS status = STATUS_SUCCESS; + + status = fw_set_bank(p_BusInterface, 0); + if ( status == STATUS_SUCCESS ) + // Restore origin values + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dir,GPIO_DIR_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_pol,GPIO_POL_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_mod,GPIO_MOD_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dat,GPIO_DAT_L , 4); + if ( status == STATUS_SUCCESS ) + // Free GPIO Semaphore + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, SEMAP63, 4); + return status; +} + +static NTSTATUS +fw_set_bank( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t bank ) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t data = ( (uint32_t)0x70 << 24 ); + uint32_t mask = ((bank >> (BANK_SHIFT-4)) << 24 ); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATACLEAR_L, 4); + if (status == STATUS_SUCCESS) + { + // A1 + data &= mask; + //data |= mask; // for A0 + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATASET_L, 4); + } + return status; +} diff --git a/branches/Ndi/hw/mt23108/kernel/hca_driver.h b/branches/Ndi/hw/mt23108/kernel/hca_driver.h new file mode 100644 index 00000000..50a56f65 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_driver.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined( _HCA_DRIVER_H_ ) +#define _HCA_DRIVER_H_ + + +#include +#include +#include +#include +#include "hca_debug.h" +#include "hca_data.h" + + +#if !defined(FILE_DEVICE_INFINIBAND) // Not defined in WXP DDK +#define FILE_DEVICE_INFINIBAND 0x0000003B +#endif + +/****s* HCA/hca_reg_state_t +* NAME +* hca_reg_state_t +* +* DESCRIPTION +* State for tracking registration with AL. This state is independent of the +* device PnP state, and both are used to properly register with AL. +* +* SYNOPSIS +*/ +typedef enum _hca_reg_state +{ + HCA_SHUTDOWN, + HCA_ADDED, + HCA_STARTED, + HCA_REGISTERED + +} hca_reg_state_t; +/* +* VALUES +* HCA_SHUTDOWN +* Cleaning up. +* +* HCA_ADDED +* AddDevice was called and successfully registered for interface +* notifications. +* +* HCA_STARTED +* IRP_MN_START_DEVICE was called. The HCA is fully functional. +* +* HCA_REGISTERED +* Fully functional and registered with the bus root. +*********/ + + +typedef struct _hca_dev_ext +{ + cl_pnp_po_ext_t cl_ext; + + /* Notification entry for PnP interface events. */ + void *pnp_ifc_entry; + + /* Notification entry for PnP target events. */ + void *pnp_target_entry; + + /* Interface for the lower edge of the IB_AL device. */ + ib_ci_ifc_t ci_ifc; + + hca_reg_state_t state; + + DEVICE_OBJECT *p_al_dev; + FILE_OBJECT *p_al_file_obj; + + mlnx_hca_t hca; + +} hca_dev_ext_t; + +/*********************************** +Firmware Update definitions +***********************************/ +#define PCI_CONF_ADDR (0x00000058) +#define PCI_CONF_DATA (0x0000005c) +#define FLASH_OFFSET (0x000f01a4) +#define READ_BIT (1<<29) +#define WRITE_BIT (2<<29) +#define ADDR_MSK (0x0007ffff) +#define CMD_MASK (0xe0000000) +#define BANK_SHIFT (19) +#define BANK_MASK (0xfff80000) +#define MAX_FLASH_SIZE (0x80000) // 512K + +#define SEMAP63 (0xf03fc) +#define GPIO_DIR_L (0xf008c) +#define GPIO_POL_L (0xf0094) +#define GPIO_MOD_L (0xf009c) +#define GPIO_DAT_L (0xf0084) +#define GPIO_DATACLEAR_L (0xf00d4) +#define GPIO_DATASET_L (0xf00dc) + +#define CPUMODE (0xf0150) +#define CPUMODE_MSK (0xc0000000UL) +#define CPUMODE_SHIFT (30) + +/* buffer structure for FW_MAP_CRBASE */ +struct _map_crspace { + PVOID va; /* address of CRSPACE, mapped to user space */ + PVOID ctx; /* opaque operation context; to be used in FW_UNMAP_CRBASE */ + ULONG size; /* size of CRSPACE, mapped to user space */ +}; + +struct _unmap_crspace { + PVOID va; /* address of CRSPACE, mapped to user space */ + PVOID ctx; /* operation context, received in FW_MAP_CRBASE */ +}; + +/* Definitions intended to become shared with UM. Later... */ +#define FW_READ 0x00 +#define FW_WRITE 0x01 +#define FW_READ_CMD 0x08 +#define FW_WRITE_CMD 0x09 +#define FW_MAP_CRSPACE 0x0A +#define FW_UNMAP_CRSPACE 0x0B +#define FW_OPEN_IF 0xe7 +#define FW_CLOSE_IF 0x7e + +#define FW_SIGNATURE (0x5a445a44) +#define FW_SIG_OFFSET (0x24) + +#define FW_SECT_PTR_OFFSET (0x14) +/* The real offset is 0x32, but we're reading DWORDS at a time, so we align */ +#define FW_SECT_OFFSET (0x30) + + +#endif /* !defined( _HCA_DRIVER_H_ ) */ diff --git a/branches/Ndi/hw/mt23108/kernel/hca_mcast.c b/branches/Ndi/hw/mt23108/kernel/hca_mcast.c new file mode 100644 index 00000000..49690f2c --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_mcast.c @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include + +#include +#include +#include +#include +#include + +#include "hca_data.h" + +/* +* Multicast Support Verbs. +*/ +ib_api_status_t +mlnx_attach_mcast ( + IN const ib_qp_handle_t h_qp, + IN const ib_gid_t *p_mcast_gid, + IN const uint16_t mcast_lid, + OUT ib_mcast_handle_t *ph_mcast, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t qp_idx = 0; + mlnx_mcast_t *mcast_p = NULL; + mlnx_hobul_t *hobul_p; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + UNUSED_PARAM( mcast_lid ); + + if (!p_mcast_gid || !ph_mcast) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (NULL == (mcast_p = cl_zalloc( sizeof(mlnx_mcast_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark ) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + memcpy(&mcast_p->mcast_gid, &p_mcast_gid->raw[0], sizeof(IB_gid_t)); + mcast_p->hca_idx = hca_idx; + mcast_p->qp_num = qp_num; + mcast_p->mark = E_MARK_MG; + + cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex); + + if (HH_OK != THH_hob_attach_to_multicast( hobul_p->hh_hndl, qp_num, mcast_p->mcast_gid)) { + status = IB_ERROR; + goto cleanup_locked; + } + + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + + *ph_mcast = (ib_mcast_handle_t)mcast_p; + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); +cleanup: + if (mcast_p) cl_free( mcast_p); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_detach_mcast ( + IN const ib_mcast_handle_t h_mcast) +{ + ib_api_status_t status; + mlnx_mcast_t *mcast_p = (mlnx_mcast_t *)h_mcast; + + u_int32_t hca_idx; + u_int32_t qp_num; + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if (!mcast_p || mcast_p->mark != E_MARK_MG) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + hca_idx = mcast_p->hca_idx; + qp_num = mcast_p->qp_num; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark ) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex); + + if (HH_OK != THH_hob_detach_from_multicast( hobul_p->hh_hndl, qp_num, mcast_p->mcast_gid)) { + status = IB_ERROR; + goto cleanup_locked; + } + + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + + mcast_p->mark = E_MARK_INVALID; + cl_free( mcast_p); + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); +cleanup: + if (mcast_p) { + mcast_p->mark = E_MARK_INVALID; + cl_free( mcast_p); + } + + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + + +void +mlnx_mcast_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->attach_mcast = mlnx_attach_mcast; + p_interface->detach_mcast = mlnx_detach_mcast; +} diff --git a/branches/Ndi/hw/mt23108/kernel/hca_memory.c b/branches/Ndi/hw/mt23108/kernel/hca_memory.c new file mode 100644 index 00000000..26d8f50d --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_memory.c @@ -0,0 +1,979 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "hca_data.h" + +/* + * Memory Management Verbs. + */ + +ib_api_status_t +mlnx_register_mr ( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t *p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t *ph_mr, + IN boolean_t um_call ) +{ + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + HH_mr_t mr_props; + mlnx_mro_t *mro_p = NULL; + u_int32_t lkey=0, rkey=0; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_PD_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (!p_mr_create || 0 == p_mr_create->length) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (NULL == (mro_p = cl_zalloc( sizeof(mlnx_mro_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + // Convert MR properties (LOCKS THE REGION as a side effect) + cl_memclr(&mr_props, sizeof(HH_mr_t)); + status = mlnx_conv_ibal_mr_create(pd_idx, mro_p, + VAPI_MR_CHANGE_TRANS | VAPI_MR_CHANGE_PD | VAPI_MR_CHANGE_ACL, + p_mr_create, um_call, &mr_props); + if (status != IB_SUCCESS ) { + goto cleanup; + } + + // Register MR + if (HH_OK != THH_hob_register_mr(hobul_p->hh_hndl, &mr_props, &lkey, &rkey)) { + status = IB_ERROR; + goto cleanup_post_lock; + } + + if (p_lkey) *p_lkey = lkey; + if (p_rkey) *p_rkey = cl_hton32( rkey ); + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + + mro_p->mark = E_MARK_MR; + mro_p->mr_type = E_MR_ANY; + mro_p->mr_pd_handle = PD_HNDL_FROM_PD(pd_idx); + mro_p->mr_lkey = lkey; + if (ph_mr) *ph_mr = (ib_mr_handle_t)mro_p; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_post_lock: + MOSAL_iobuf_deregister(mro_p->mr_iobuf); + +cleanup: + if (mro_p) { + mro_p->mark = E_MARK_INVALID; + cl_free( mro_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_register_pmr ( + IN const ib_pd_handle_t h_pd, + IN const ib_phys_create_t* const p_pmr_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ) +{ + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + HH_mr_t mr_props = { 0 }; + mlnx_mro_t *mro_p = NULL; + u_int32_t lkey, rkey; + + UNUSED_PARAM( um_call ); + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_PD_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (!p_vaddr || !p_pmr_create || + 0 == p_pmr_create->length ) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + mro_p = cl_zalloc( sizeof(mlnx_mro_t)); + if ( !mro_p ) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + // Convert PMR properties + mro_p->mr_start = *p_vaddr; + cl_memclr(&mr_props, sizeof(HH_mr_t)); + status = mlnx_conv_ibal_pmr_create( pd_idx, mro_p, p_pmr_create, + &mr_props ); + if (status != IB_SUCCESS ) { + goto cleanup; + } + + // Register MR + if (HH_OK != THH_hob_register_mr( hobul_p->hh_hndl, &mr_props, + &lkey, &rkey )) { + status = IB_ERROR; + goto cleanup; + } + if (p_lkey) *p_lkey = lkey; + if (p_rkey) *p_rkey = cl_hton32( rkey ); + + if( mr_props.tpt.tpt.buf_lst.buf_sz_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.buf_sz_lst ); + + if( mr_props.tpt.tpt.buf_lst.phys_buf_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.phys_buf_lst ); + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + + mro_p->mark = E_MARK_MR; + mro_p->mr_type = E_MR_PHYS; + mro_p->mr_pd_handle = PD_HNDL_FROM_PD(pd_idx); + mro_p->mr_lkey = lkey; + if (ph_mr) *ph_mr = (ib_mr_handle_t)mro_p; + *p_vaddr = mro_p->mr_start; // return the updated address + + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("mro_p 0x%p mark %d\n", mro_p, (mro_p ? mro_p->mark : 0xBAD))); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( mr_props.tpt.tpt.buf_lst.buf_sz_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.buf_sz_lst ); + + if( mr_props.tpt.tpt.buf_lst.phys_buf_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.phys_buf_lst ); + + if (mro_p) { + mro_p->mark = E_MARK_INVALID; + cl_free( mro_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_query_mr ( + IN const ib_mr_handle_t h_mr, + OUT ib_mr_attr_t *p_mr_query ) +{ + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status = IB_SUCCESS; + + HH_mr_info_t mr_info; + mlnx_mro_t *mro_p = NULL; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mro_p = (mlnx_mro_t *)h_mr; + if (!mro_p || mro_p->mark != E_MARK_MR) { + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + hca_idx = PD_HCA_FROM_HNDL(mro_p->mr_pd_handle); + pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MR_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MR_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("mro_p 0x%p mark %d\n", mro_p, (mro_p ? mro_p->mark : 0xBAD))); + + if (HH_OK != THH_hob_query_mr(hobul_p->hh_hndl, mro_p->mr_lkey, &mr_info)) { + status = IB_ERROR; + goto cleanup; + } + + mlnx_conv_vapi_mr_attr((ib_pd_handle_t)PD_HNDL_FROM_PD(pd_idx), &mr_info, p_mr_query); + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + + +ib_api_status_t +mlnx_modify_mr ( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mem_modify_req, + IN const ib_mr_create_t *p_mr_create, + OUT uint32_t *p_lkey, + OUT uint32_t *p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ) +{ + u_int32_t hca_idx; + u_int32_t pd_idx, old_pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + VAPI_mr_change_t change_mask; + HH_mr_t mr_props; + mlnx_mro_t *mro_p = NULL; + u_int32_t lkey, rkey; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mro_p = (mlnx_mro_t *)h_mr; + if (!mro_p || mro_p->mark != E_MARK_MR) { + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + if ( !p_mr_create || 0 == p_mr_create->length || + !p_lkey || !p_rkey) + { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if( (mem_modify_req & IB_MR_MOD_PD) && !h_pd ) + { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + hca_idx = PD_HCA_FROM_HNDL(mro_p->mr_pd_handle); + if( (mem_modify_req & IB_MR_MOD_PD) && h_pd ) + pd_idx = PD_NUM_FROM_HNDL(h_pd); + else + pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + old_pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MR_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MR_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(MLNX_DBG_MEM, g_mlnx_dbg_lvl, ("mro_p 0x%p mark %d\n", mro_p, (mro_p ? mro_p->mark : 0xBAD))); + + // change_mask = mem_modify_req; + change_mask = 0; + if (mem_modify_req & IB_MR_MOD_ADDR) change_mask |= VAPI_MR_CHANGE_TRANS; + if (mem_modify_req & IB_MR_MOD_PD) change_mask |= VAPI_MR_CHANGE_PD; + if (mem_modify_req & IB_MR_MOD_ACCESS) change_mask |= VAPI_MR_CHANGE_ACL; + + cl_memclr(&mr_props, sizeof(HH_mr_t)); + status = mlnx_conv_ibal_mr_create(pd_idx, mro_p, change_mask, p_mr_create, + um_call, &mr_props); + if ( status != IB_SUCCESS ) { + goto cleanup; + } + + if (HH_OK != THH_hob_reregister_mr(hobul_p->hh_hndl, + mro_p->mr_lkey, + change_mask, + &mr_props, + &lkey, &rkey)) + { + status = IB_ERROR; + goto cleanup; + } + + // update PD object count + if( (mem_modify_req & IB_MR_MOD_PD) && h_pd ) + { + mro_p->mr_pd_handle = PD_HNDL_FROM_PD( pd_idx ); + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + cl_atomic_dec(&hobul_p->pd_info_tbl[old_pd_idx].count); + } + + // Update our "shadow" (TBD: old memory region may need to be unlocked) + mro_p->mr_lkey = lkey; + + // Return new keys to the caller + if (p_lkey) *p_lkey = lkey; + if (p_rkey) *p_rkey = rkey; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + + +ib_api_status_t +mlnx_modify_pmr ( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mem_modify_req, + IN const ib_phys_create_t* const p_pmr_create, + IN OUT uint64_t* const p_vaddr, + OUT uint32_t* const p_lkey, + OUT uint32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ) +{ + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + VAPI_mr_change_t change_mask; + HH_mr_t mr_props = { 0 }; + mlnx_mro_t *mro_p = NULL; + u_int32_t lkey, rkey; + + UNUSED_PARAM( um_call ); + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mro_p = (mlnx_mro_t *)h_mr; + if (!mro_p || mro_p->mark != E_MARK_MR) { + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + if ( !p_pmr_create || 0 == p_pmr_create->length || + !p_lkey || !p_rkey) + { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + hca_idx = PD_HCA_FROM_HNDL(mro_p->mr_pd_handle); + if( h_pd ) + pd_idx = PD_NUM_FROM_HNDL( h_pd ); + else + pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MR_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MR_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // change_mask = mem_modify_req; + change_mask = 0; + if (mem_modify_req & IB_MR_MOD_ADDR) change_mask |= VAPI_MR_CHANGE_TRANS; + if (mem_modify_req & IB_MR_MOD_PD) change_mask |= VAPI_MR_CHANGE_PD; + if (mem_modify_req & IB_MR_MOD_ACCESS) change_mask |= VAPI_MR_CHANGE_ACL; + + // Convert PMR properties + mro_p->mr_start = *p_vaddr; + cl_memclr(&mr_props, sizeof(HH_mr_t)); + if (IB_SUCCESS != (status = mlnx_conv_ibal_pmr_create(pd_idx, mro_p, p_pmr_create, &mr_props))) { + goto cleanup; + } + + if (HH_OK != THH_hob_reregister_mr(hobul_p->hh_hndl, + mro_p->mr_lkey, + change_mask, + &mr_props, + &lkey, &rkey)) + { + status = IB_ERROR; + goto cleanup; + } + + if( mr_props.tpt.tpt.buf_lst.buf_sz_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.buf_sz_lst ); + + if( mr_props.tpt.tpt.buf_lst.phys_buf_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.phys_buf_lst ); + + // Update our "shadow" + mro_p->mr_lkey = lkey; + + // Return new keys to the caller + if (p_lkey) *p_lkey = lkey; + if (p_rkey) *p_rkey = rkey; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( mr_props.tpt.tpt.buf_lst.buf_sz_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.buf_sz_lst ); + + if( mr_props.tpt.tpt.buf_lst.phys_buf_lst ) + cl_free( mr_props.tpt.tpt.buf_lst.phys_buf_lst ); + + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_register_smr ( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ) +{ + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + HH_smr_t smr_props; + mlnx_mro_t *base_mro_p = NULL; + mlnx_mro_t *new_mro_p = NULL; + u_int32_t lkey, rkey; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_PD_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (!ph_mr || !p_vaddr || !p_lkey || !p_rkey ) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + base_mro_p = (mlnx_mro_t *)h_mr; + if (!base_mro_p || base_mro_p->mark != E_MARK_MR) { + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + // Convert SMR properties + smr_props.lkey = base_mro_p->mr_lkey; // L-Key of the region to share with + // This region start virtual addr + smr_props.start = *p_vaddr; + // PD handle for new memory region + smr_props.pd = PD_NUM_FROM_HNDL(base_mro_p->mr_pd_handle); + smr_props.acl = map_ibal_acl(access_ctrl); // Access control (R/W permission local/remote + + // Allocate new handle for shared region + if (NULL == (new_mro_p = cl_zalloc( sizeof(mlnx_mro_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + new_mro_p->mr_start = *p_vaddr; + new_mro_p->mr_size = base_mro_p->mr_size; + + // This computation should be externalized by THH + new_mro_p->mr_mosal_perm = + MOSAL_PERM_READ | + ((smr_props.acl & VAPI_EN_LOCAL_WRITE) ? MOSAL_PERM_WRITE : 0); + + if (IB_SUCCESS != (status = mlnx_lock_region(new_mro_p, um_call ))) { + goto cleanup; + } + + // Register MR + if (HH_OK != THH_hob_register_smr(hobul_p->hh_hndl, &smr_props, &lkey, &rkey)) { + status = IB_ERROR; + goto cleanup; + } + + // Return modified values + *p_vaddr = smr_props.start; + *p_lkey = lkey; + *p_rkey = cl_hton32( rkey ); + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + + new_mro_p->mark = E_MARK_MR; + new_mro_p->mr_type = E_MR_SHARED; + new_mro_p->mr_pd_handle = PD_HNDL_FROM_PD(pd_idx); + new_mro_p->mr_lkey = lkey; + + *ph_mr = (ib_mr_handle_t)new_mro_p; + +// CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("new_mro_p 0x%p page 0x%x, %d\n", +// new_mro_p, new_mro_p->mr_first_page_addr, new_mro_p->mr_num_pages)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (new_mro_p) { + new_mro_p->mark = E_MARK_INVALID; + cl_free( new_mro_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_deregister_mr ( + IN const ib_mr_handle_t h_mr) +{ + mlnx_mro_t *mro_p = NULL; + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mro_p = (mlnx_mro_t *)h_mr; + if (!mro_p || mro_p->mark != E_MARK_MR) { + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("mro_p 0x%p mark %d\n", mro_p, (mro_p ? mro_p->mark : 0xBAD))); + status = IB_INVALID_MR_HANDLE; + goto cleanup; + } + + hca_idx = PD_HCA_FROM_HNDL(mro_p->mr_pd_handle); + pd_idx = PD_NUM_FROM_HNDL(mro_p->mr_pd_handle); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MR_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MR_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + THH_hob_deregister_mr(hobul_p->hh_hndl, mro_p->mr_lkey); + + if (mro_p->mr_type != E_MR_PHYS) { + MOSAL_iobuf_deregister(mro_p->mr_iobuf); + } + + // update PD object count + cl_atomic_dec(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + if (mro_p) { + mro_p->mark = E_MARK_INVALID; + cl_free( mro_p); + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (mro_p) { + mro_p->mark = E_MARK_INVALID; + cl_free( mro_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + + + +ib_api_status_t +mlnx_alloc_fmr( + IN const ib_pd_handle_t h_pd, + IN const mlnx_fmr_create_t* const p_fmr_create, + OUT mlnx_fmr_handle_t* const ph_fmr + ) +{ + UNUSED_PARAM( h_pd ); + UNUSED_PARAM( p_fmr_create ); + UNUSED_PARAM( ph_fmr ); + return IB_UNSUPPORTED; +} + + +ib_api_status_t +mlnx_map_phys_fmr ( + IN const mlnx_fmr_handle_t h_fmr, + IN const uint64_t* const page_list, + IN const int list_len, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey + ) +{ + UNUSED_PARAM( h_fmr ); + UNUSED_PARAM( page_list ); + UNUSED_PARAM( list_len ); + UNUSED_PARAM( p_vaddr ); + UNUSED_PARAM( p_lkey ); + UNUSED_PARAM( p_rkey ); + return IB_UNSUPPORTED; +} + + +ib_api_status_t +mlnx_unmap_fmr ( + IN const mlnx_fmr_handle_t *ph_fmr) +{ + UNUSED_PARAM( ph_fmr ); + return IB_UNSUPPORTED; +} + + +ib_api_status_t +mlnx_dealloc_fmr ( + IN const mlnx_fmr_handle_t h_fmr + ) +{ + UNUSED_PARAM( h_fmr ); + return IB_UNSUPPORTED; +} + + +/* +* Memory Window Verbs. +*/ + +ib_api_status_t +mlnx_create_mw ( + IN const ib_pd_handle_t h_pd, + OUT net32_t* const p_rkey, + OUT ib_mw_handle_t *ph_mw, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + mlnx_mwo_t *mwo_p = NULL; + ib_api_status_t status; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_PD_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (!p_rkey || !ph_mw) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (NULL == (mwo_p = cl_zalloc( sizeof(mlnx_mwo_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (HH_OK != THH_hob_alloc_mw(hobul_p->hh_hndl, pd_idx, (IB_rkey_t *)&mwo_p->mw_rkey)) + { + status = IB_ERROR; + goto cleanup; + } + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + + mwo_p->mark = E_MARK_MW; + mwo_p->hca_idx = hca_idx; + mwo_p->pd_idx = pd_idx; + *p_rkey = cl_hton32( mwo_p->mw_rkey ); + + *ph_mw = (ib_mw_handle_t)mwo_p; + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (mwo_p) { + mwo_p->mark = E_MARK_INVALID; + cl_free( mwo_p); + } + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_query_mw ( + IN const ib_mw_handle_t h_mw, + OUT ib_pd_handle_t *ph_pd, + OUT net32_t* const p_rkey, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + mlnx_mwo_t *mwo_p = NULL; + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mwo_p = (mlnx_mwo_t *)h_mw; + if (!mwo_p || mwo_p->mark != E_MARK_MW) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + + hca_idx = mwo_p->hca_idx; + pd_idx = mwo_p->pd_idx; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MW_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MW_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + + if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(pd_idx); + if (p_rkey) *p_rkey = cl_hton32( mwo_p->mw_rkey ); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_destroy_mw ( + IN const ib_mw_handle_t h_mw) +{ + mlnx_mwo_t *mwo_p = NULL; + u_int32_t hca_idx; + u_int32_t pd_idx; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( !cl_is_blockable() ) + return IB_UNSUPPORTED; + + mwo_p = (mlnx_mwo_t *)h_mw; + if (!mwo_p || mwo_p->mark != E_MARK_MW) { + status = IB_INVALID_MW_HANDLE; + goto cleanup; + } + + hca_idx = mwo_p->hca_idx; + pd_idx = mwo_p->pd_idx; + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_MW_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_MW_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (HH_OK != THH_hob_free_mw(hobul_p->hh_hndl, (IB_rkey_t)mwo_p->mw_rkey)) + { + status = IB_ERROR; + goto cleanup; + } + + // update PD object count + cl_atomic_dec(&hobul_p->pd_info_tbl[pd_idx].count); + + if (mwo_p) { + mwo_p->mark = E_MARK_INVALID; + cl_free( mwo_p); + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (mwo_p) { + mwo_p->mark = E_MARK_INVALID; + cl_free( mwo_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + + +void +mlnx_memory_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->register_mr = mlnx_register_mr; + p_interface->register_pmr = mlnx_register_pmr; + p_interface->query_mr = mlnx_query_mr; + p_interface->modify_mr = mlnx_modify_mr; + p_interface->modify_pmr = mlnx_modify_pmr; + p_interface->register_smr = mlnx_register_smr; + p_interface->deregister_mr = mlnx_deregister_mr; + + p_interface->alloc_mlnx_fmr = mlnx_alloc_fmr; + p_interface->map_phys_mlnx_fmr = mlnx_map_phys_fmr; + p_interface->unmap_mlnx_fmr = mlnx_unmap_fmr; + p_interface->dealloc_mlnx_fmr = mlnx_dealloc_fmr; + + p_interface->create_mw = mlnx_create_mw; + p_interface->query_mw = mlnx_query_mw; + p_interface->destroy_mw = mlnx_destroy_mw; +} + diff --git a/branches/Ndi/hw/mt23108/kernel/hca_smp.c b/branches/Ndi/hw/mt23108/kernel/hca_smp.c new file mode 100644 index 00000000..725634fd --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_smp.c @@ -0,0 +1,581 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Voltaire Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * SMP handling of IB Access Layer VPD for Mellanox MT23108 HCA + */ + + +#include "hca_data.h" +#include "hca_debug.h" + + +boolean_t +mlnx_cachable_guid_info( + IN const mlnx_cache_t* const p_cache, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + uint32_t idx; + + /* Get the table selector from the attribute */ + idx = cl_ntoh32( p_mad_in->attr_mod ); + + /* + * TODO: Setup the response to fail the MAD instead of sending + * it down to the HCA. + */ + if( idx > 31 ) + return FALSE; + + if( !p_cache->guid_block[idx].valid ) + return FALSE; + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad_in->method == IB_MAD_METHOD_SET ) + { + if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ), + &p_cache->guid_block[idx].tbl, sizeof(ib_guid_info_t) ) ) + { + /* The set is requesting a change. */ + return FALSE; + } + } + + /* Setup the response mad. */ + cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE ); + p_mad_out->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status = IB_SMP_DIRECTION; + else + p_mad_out->status = 0; + + /* Copy the cached data. */ + cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + &p_cache->guid_block[idx].tbl, sizeof(ib_guid_info_t) ); + + return TRUE; +} + + +boolean_t +mlnx_cachable_pkey_table( + IN const mlnx_cache_t* const p_cache, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)cl_ntoh32( p_mad_in->attr_mod )); + + /* + * TODO: Setup the response to fail the MAD instead of sending + * it down to the HCA. + */ + if( idx > 2047 ) + return FALSE; + + if( !p_cache->pkey_tbl[idx].valid ) + return FALSE; + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad_in->method == IB_MAD_METHOD_SET ) + { + if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ), + &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) ) ) + { + /* The set is requesting a change. */ + return FALSE; + } + } + + /* Setup the response mad. */ + cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE ); + p_mad_out->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status = IB_SMP_DIRECTION; + else + p_mad_out->status = 0; + + /* Copy the cached data. */ + cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + &p_cache->pkey_tbl[idx].tbl, sizeof(ib_pkey_table_t) ); + + return TRUE; +} + + +boolean_t +mlnx_cachable_sl_vl_table( + IN const mlnx_cache_t* const p_cache, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + if( !p_cache->sl_vl.valid ) + return FALSE; + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad_in->method == IB_MAD_METHOD_SET ) + { + if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ), + &p_cache->sl_vl.tbl, sizeof(ib_slvl_table_t) ) ) + { + /* The set is requesting a change. */ + return FALSE; + } + } + + /* Setup the response mad. */ + cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE ); + p_mad_out->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status = IB_SMP_DIRECTION; + else + p_mad_out->status = 0; + + /* Copy the cached data. */ + cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + &p_cache->sl_vl.tbl, sizeof(ib_slvl_table_t) ); + + return TRUE; +} + + +boolean_t +mlnx_cachable_vl_arb_table( + IN const mlnx_cache_t* const p_cache, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)(cl_ntoh32( p_mad_in->attr_mod ) >> 16)) - 1; + + /* + * TODO: Setup the response to fail the MAD instead of sending + * it down to the HCA. + */ + if( idx > 3 ) + return FALSE; + + if( !p_cache->vl_arb[idx].valid ) + return FALSE; + + /* + * If a SET, see if the set is identical to the cache, + * in which case it's a no-op. + */ + if( p_mad_in->method == IB_MAD_METHOD_SET ) + { + if( cl_memcmp( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ), + &p_cache->vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ) ) + { + /* The set is requesting a change. */ + return FALSE; + } + } + + /* Setup the response mad. */ + cl_memcpy( p_mad_out, p_mad_in, MAD_BLOCK_SIZE ); + p_mad_out->method = (IB_MAD_METHOD_RESP_MASK | IB_MAD_METHOD_GET); + if( p_mad_out->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status = IB_SMP_DIRECTION; + else + p_mad_out->status = 0; + + /* Copy the cached data. */ + cl_memcpy( ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + &p_cache->vl_arb[idx].tbl, sizeof(ib_vl_arb_table_t) ); + + return TRUE; +} + + +boolean_t +mlnx_cachable_port_info( + IN const mlnx_cache_t* const p_cache, + IN const uint8_t port_num, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + ib_port_info_t *p_port_info; + + UNUSED_PARAM( p_mad_out ); + + if( !p_cache->port_info.valid ) + return FALSE; + + if( p_mad_in->method == IB_MAD_METHOD_GET ) + return FALSE; + + /* + * NOTE: Even though the input MAD is const, we modify it to change + * some parameters to no-ops to compensate for problems in the HCA chip. + */ + p_port_info = + (ib_port_info_t*)ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ); + + /* We can only cache requests for the same port that the SMP came in on. */ + if( p_mad_in->attr_mod != 0 && + cl_ntoh32( p_mad_in->attr_mod ) != port_num ) + { + return FALSE; + } + + /* + * to avoid unnecessary glitches in port state, we translate these + * fields to NOP when there is no change. Note these fields cannot + * change within the hardware without a Set going through here. + */ + if( p_port_info->link_width_enabled == + p_cache->port_info.info.link_width_enabled ) + { + p_port_info->link_width_enabled = 0; + } + if( (p_port_info->state_info2 & 0x0F) == + (p_cache->port_info.info.state_info2 & 0x0F) ) + { + p_port_info->state_info2 &= 0xF0; + } + if( (p_port_info->link_speed & 0x0F) == + (p_cache->port_info.info.link_speed & 0x0F) ) + { + p_port_info->link_speed &= 0xF0; + } + if( (p_port_info->vl_enforce & 0xF0) == + (p_cache->port_info.info.vl_enforce & 0xF0) ) + { + p_port_info->vl_enforce &= 0x0F; + } + + /* + * We modified the input MAD to change things to no-ops, but + * we can't actually fulfill the MAD with cached data. + */ + return FALSE; +} + + +boolean_t +mlnx_cachable_mad( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + if( p_mad_in->mgmt_class!= IB_MCLASS_SUBN_DIR && + p_mad_in->mgmt_class != IB_MCLASS_SUBN_LID ) + { + return FALSE; + } + + switch( p_mad_in->attr_id ) + { + case IB_MAD_ATTR_GUID_INFO: + return mlnx_cachable_guid_info( + &h_ca->cache[port_num-1], p_mad_in, p_mad_out ); + + case IB_MAD_ATTR_P_KEY_TABLE: + return mlnx_cachable_pkey_table( + &h_ca->cache[port_num-1], p_mad_in, p_mad_out ); + + case IB_MAD_ATTR_SLVL_TABLE: + return mlnx_cachable_sl_vl_table( + &h_ca->cache[port_num-1], p_mad_in, p_mad_out ); + + case IB_MAD_ATTR_VL_ARBITRATION: + return mlnx_cachable_vl_arb_table( + &h_ca->cache[port_num-1], p_mad_in, p_mad_out ); + + case IB_MAD_ATTR_PORT_INFO: + return mlnx_cachable_port_info( + &h_ca->cache[port_num-1], port_num, p_mad_in, p_mad_out ); + + default: + break; + } + return FALSE; +} + + +void +mlnx_update_guid_info( + IN mlnx_cache_t* const p_cache, + IN const ib_mad_t* const p_mad_out ) +{ + uint32_t idx; + + /* Get the table selector from the attribute */ + idx = cl_ntoh32( p_mad_out->attr_mod ); + + /* + * We only get successful MADs here, so invalid settings + * shouldn't happen. + */ + CL_ASSERT( idx <= 31 ); + + cl_memcpy( &p_cache->guid_block[idx].tbl, + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + sizeof(ib_guid_info_t) ); + p_cache->guid_block[idx].valid = TRUE; +} + + +void +mlnx_update_pkey_table( + IN mlnx_cache_t* const p_cache, + IN const ib_mad_t* const p_mad_out ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)cl_ntoh32( p_mad_out->attr_mod )); + + ASSERT( idx <= 2047 ); + + cl_memcpy( &p_cache->pkey_tbl[idx].tbl, + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + sizeof(ib_pkey_table_t) ); + p_cache->pkey_tbl[idx].valid = TRUE; +} + + +void +mlnx_update_sl_vl_table( + IN mlnx_cache_t* const p_cache, + IN const ib_mad_t* const p_mad_out ) +{ + cl_memcpy( &p_cache->sl_vl.tbl, + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + sizeof(ib_slvl_table_t) ); + p_cache->sl_vl.valid = TRUE; +} + + +void +mlnx_update_vl_arb_table( + IN mlnx_cache_t* const p_cache, + IN const ib_mad_t* const p_mad_out ) +{ + uint16_t idx; + + /* Get the table selector from the attribute */ + idx = ((uint16_t)(cl_ntoh32( p_mad_out->attr_mod ) >> 16)) - 1; + + CL_ASSERT( idx <= 3 ); + + cl_memcpy( &p_cache->vl_arb[idx].tbl, + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ), + sizeof(ib_vl_arb_table_t) ); + p_cache->vl_arb[idx].valid = TRUE; +} + + +void +mlnx_update_port_info( + IN const mlnx_cache_t* const p_cache, + IN const uint8_t port_num, + IN const ib_mad_t* const p_mad_out ) +{ + UNUSED_PARAM( p_cache ); + + /* We can only cache requests for the same port that the SMP came in on. */ + /* TODO: Add synchronization to support getting data from other ports. */ + if( p_mad_out->attr_mod != 0 && + cl_ntoh32( p_mad_out->attr_mod ) != port_num ) + { + return; + } + + /* TODO: Setup the capabilites mask properly. */ +} + + +void +mlnx_update_cache( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_mad_t *p_mad_out ) +{ + if( p_mad_out->mgmt_class != IB_MCLASS_SUBN_DIR && + p_mad_out->mgmt_class != IB_MCLASS_SUBN_LID ) + { + return; + } + + /* Any successful response updates the cache. */ + if( p_mad_out->status ) + return; + + + switch( p_mad_out->attr_id ) + { + case IB_MAD_ATTR_GUID_INFO: + mlnx_update_guid_info( + &h_ca->cache[port_num-1], p_mad_out ); + break; + + case IB_MAD_ATTR_P_KEY_TABLE: + mlnx_update_pkey_table( + &h_ca->cache[port_num-1], p_mad_out ); + break; + + case IB_MAD_ATTR_SLVL_TABLE: + mlnx_update_sl_vl_table( + &h_ca->cache[port_num-1], p_mad_out ); + break; + + case IB_MAD_ATTR_VL_ARBITRATION: + mlnx_update_vl_arb_table( + &h_ca->cache[port_num-1], p_mad_out ); + break; + + case IB_MAD_ATTR_PORT_INFO: + mlnx_update_port_info( + &h_ca->cache[port_num-1], port_num, p_mad_out ); + break; + + default: + break; + } + +} + + +/* + * Local MAD Support Verbs. For CAs that do not support + * agents in HW. + */ +ib_api_status_t +mlnx_local_mad ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_av_attr_t *p_av_src_attr, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + ib_api_status_t status; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + u_int32_t hca_idx; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + UNUSED_PARAM(*p_av_src_attr); + + if (port_num > 2) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + if (!hob_p || E_MARK_CA != hob_p->mark) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_idx = hob_p->index; + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + if( !mlnx_cachable_mad( h_ca, port_num, p_mad_in, p_mad_out ) ) + { + if( HH_OK != THH_hob_process_local_mad( hobul_p->hh_hndl, port_num, + 0x0, 0, (void *)p_mad_in, p_mad_out ) ) + { + HCA_TRACE( HCA_DBG_ERROR, + ("MAD failed:\n\tClass 0x%x\n\tMethod 0x%x\n\tAttr 0x%x", + p_mad_in->mgmt_class, p_mad_in->method, p_mad_in->attr_id ) ); + status = IB_ERROR; + goto cleanup; + } + if( (p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR || + p_mad_in->mgmt_class == IB_MCLASS_SUBN_LID) && + p_mad_in->attr_id == IB_MAD_ATTR_PORT_INFO ) + { + ib_port_info_t *p_pi_in, *p_pi_out; + + if( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + p_pi_in = (ib_port_info_t*) + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ); + p_pi_out = (ib_port_info_t*) + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ); + } + else + { + p_pi_in = (ib_port_info_t*)(p_mad_in + 1); + p_pi_out = (ib_port_info_t*)(p_mad_out + 1); + } + + /* Work around FW bug 33958 */ + p_pi_out->subnet_timeout &= 0x7F; + if( p_mad_in->method == IB_MAD_METHOD_SET ) + p_pi_out->subnet_timeout |= (p_pi_in->subnet_timeout & 0x80); + } + + mlnx_update_cache( h_ca, port_num, p_mad_out ); + } + + /* Modify direction for Direct MAD */ + if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status |= IB_SMP_DIRECTION; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} diff --git a/branches/Ndi/hw/mt23108/kernel/hca_verbs.c b/branches/Ndi/hw/mt23108/kernel/hca_verbs.c new file mode 100644 index 00000000..6e95a8f9 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/hca_verbs.c @@ -0,0 +1,2348 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "hca_data.h" +#include "hca_debug.h" + + +#define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1)) + + +/* Matches definition in IbAccess for MaxSMPsWatermark */ +uint32_t g_sqp_max_avs = ((4096/sizeof(ib_mad_t))*32*5); + + +// Local declarations +ib_api_status_t +mlnx_query_qp ( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t *p_qp_attr, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* CA Access Verbs +*/ +ib_api_status_t +mlnx_open_ca ( + IN const ib_net64_t ca_guid, // IN const char * ca_name, + IN const ci_completion_cb_t pfn_completion_cb, + IN const ci_async_event_cb_t pfn_async_event_cb, + IN const void*const ca_context, + OUT ib_ca_handle_t *ph_ca) +{ +// char * ca_name = NULL; +// char * dev_name = NULL; + mlnx_hca_t *p_hca; + HH_hca_dev_t * hca_ul_info; + void * hca_ul_resources_p = NULL; // (THH_hca_ul_resources_t *) + ib_api_status_t status; + mlnx_hob_t *new_ca = NULL; + MOSAL_protection_ctx_t prot_ctx; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("context 0x%p\n", ca_context)); + + p_hca = mlnx_hca_from_guid( ca_guid ); + if( !p_hca ) { + HCA_EXIT( MLNX_DBG_TRACE ); + return IB_NOT_FOUND; + } + + //// Verify that the device has been discovered (it'd better be) + //mlnx_names_from_guid(ca_guid, &ca_name, &dev_name); + //if (!ca_name) { + // CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + // return IB_NOT_FOUND; + //} + + //// We have name - lookup device + //if (HH_OK != HH_lookup_hca(ca_name, &hh_hndl)) { + // CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + // return IB_NOT_FOUND; + //} + + hca_ul_info = p_hca->hh_hndl; + + { + // We are opening the HCA in kernel mode. + // if a HOBKL exists for this device (i.e. it is open) - return E_BUSY + if (IB_SUCCESS == mlnx_hobs_lookup(p_hca->hh_hndl, &new_ca)) { + if (ph_ca) *ph_ca = (ib_ca_handle_t)new_ca; + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_RESOURCE_BUSY; + } + + // Create a mapping from hca index to hh_hndl + status = mlnx_hobs_insert(p_hca, &new_ca); + if (IB_SUCCESS != status) { + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; + } + + /* save copy of HCA device object */ + new_ca->p_dev_obj = p_hca->p_dev_obj; + + // Initialize the device driver + if (HH_OK != THH_hob_open_hca(p_hca->hh_hndl, NULL, NULL)) { + status = IB_ERROR; + goto cleanup; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("context 0x%p\n", ca_context)); + status = mlnx_hobs_set_cb(new_ca, + pfn_completion_cb, + pfn_async_event_cb, + ca_context); + if (IB_SUCCESS != status) { + goto cleanup; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("ul_resource sizes: hca %d pd %d\n", + hca_ul_info->hca_ul_resources_sz, + hca_ul_info->pd_ul_resources_sz)); + + hca_ul_resources_p = cl_zalloc( hca_ul_info->hca_ul_resources_sz); + + /* get the kernel protection context */ + prot_ctx = MOSAL_get_kernel_prot_ctx(); + } + + if (!hca_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (HH_OK != THH_hob_alloc_ul_res(p_hca->hh_hndl, prot_ctx, hca_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + + // TBD: !!! in user mode (kernel hobul_idx != hob_idx) + status = mlnx_hobul_new(new_ca, p_hca->hh_hndl, hca_ul_resources_p); + if (IB_SUCCESS != status) { + goto cleanup; + } + + // Return the HOBUL index + if (ph_ca) *ph_ca = new_ca; + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (hca_ul_resources_p) + cl_free( hca_ul_resources_p); + THH_hob_close_hca(p_hca->hh_hndl); + mlnx_hobs_remove(new_ca); + + // For user mode call - return status to user mode + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_query_ca ( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t *p_ca_attr, + IN OUT uint32_t *p_byte_count, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_hndl_t hh_hndl = NULL; + HH_hca_dev_t *hca_ul_info; + VAPI_hca_cap_t hca_cap; + VAPI_hca_port_t *hca_ports = NULL; + uint32_t size, required_size; + u_int8_t port_num, num_ports; + u_int32_t num_gids, num_pkeys; + u_int32_t num_page_sizes = 1; // TBD: what is actually supported + uint8_t *last_p; + void *hca_ul_resources_p = NULL; + u_int32_t priv_op; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if (NULL == p_byte_count) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + mlnx_hobs_get_handle(hob_p, &hh_hndl); + if (NULL == hh_hndl) { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("returning E_NODEV dev\n")); + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hh_hndl; + + if (HH_OK != THH_hob_query(hh_hndl, &hca_cap)) { + status = IB_ERROR; + goto cleanup; + } + + num_ports = hca_cap.phys_port_num; /* Number of physical ports of the HCA */ + + if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof(VAPI_hca_port_t)))) { + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("Failed to cl_zalloc ports array\n")); + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + // Loop on ports and get their properties + num_gids = 0; + num_pkeys = 0; + required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) + + PTR_ALIGN(sizeof(u_int32_t) * num_page_sizes) + + PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports); + for (port_num = 0; port_num < num_ports; port_num++) { + if (HH_OK != THH_hob_query_port_prop(hh_hndl, port_num+1, &hca_ports[port_num])) { + status = IB_ERROR; + goto cleanup; + } + + num_gids = hca_ports[port_num].gid_tbl_len; + size = PTR_ALIGN(sizeof(ib_gid_t) * num_gids); + required_size += size; + + num_pkeys = hca_ports[port_num].pkey_tbl_len; + size = PTR_ALIGN(sizeof(u_int16_t) * num_pkeys); + required_size += size; + } + + if( p_umv_buf && p_umv_buf->command ) + { + /* + * Prepare the buffer with the size including hca_ul_resources_sz + * NO ALIGNMENT for this size + */ + + if (p_umv_buf->p_inout_buf) + { + cl_memcpy (&priv_op, p_umv_buf->p_inout_buf, sizeof (priv_op)); + CL_TRACE(MLNX_DBG_TRACE, g_mlnx_dbg_lvl, ("priv_op = %d\n", priv_op)); + + /* + * Yes, UVP request for hca_ul_info + */ + if (p_umv_buf->input_size != + (sizeof (HH_hca_dev_t) + sizeof (priv_op) )) + { + *p_byte_count = required_size; + p_umv_buf->output_size = 0; + status = IB_INVALID_PARAMETER; + goto cleanup; + } + cl_memcpy( (uint8_t* __ptr64)p_umv_buf->p_inout_buf + sizeof (priv_op), + hca_ul_info, sizeof (HH_hca_dev_t)); + p_umv_buf->output_size = p_umv_buf->input_size; + } + } + + if (NULL == p_ca_attr || *p_byte_count < required_size) { + *p_byte_count = required_size; + status = IB_INSUFFICIENT_MEMORY; + if ( p_ca_attr != NULL) { + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("Failed *p_byte_count < required_size\n")); + } + goto cleanup; + } + + // Space is sufficient - setup table pointers + last_p = (uint8_t*)p_ca_attr; + last_p += PTR_ALIGN(sizeof(*p_ca_attr)); + + p_ca_attr->p_page_size = (uint32_t*)last_p; + last_p += PTR_ALIGN(num_page_sizes * sizeof(u_int32_t)); + + p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p; + last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t)); + + for (port_num = 0; port_num < num_ports; port_num++) { + p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p; + size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len); + last_p += size; + + p_ca_attr->p_port_attr[port_num].p_pkey_table = (u_int16_t *)last_p; + size = PTR_ALIGN(sizeof(u_int16_t) * hca_ports[port_num].pkey_tbl_len); + last_p += size; + } + + // Separate the loops to ensure that table pointers are always setup + for (port_num = 0; port_num < num_ports; port_num++) { + status = mlnx_get_hca_pkey_tbl(hh_hndl, port_num+1, + hca_ports[port_num].pkey_tbl_len, + p_ca_attr->p_port_attr[port_num].p_pkey_table); + if (IB_SUCCESS != status) { + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("Failed to mlnx_get_hca_pkey_tbl for port_num:%d\n",port_num)); + goto cleanup; + } + + status = mlnx_get_hca_gid_tbl(hh_hndl, port_num+1, + hca_ports[port_num].gid_tbl_len, + &p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw); + if (IB_SUCCESS != status) { + CL_TRACE (CL_DBG_ERROR, g_mlnx_dbg_lvl, + ("Failed to mlnx_get_hca_gid_tbl for port_num:%d\n",port_num)); + goto cleanup; + } + +#if 0 + { + int i; + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("port %d gid0:", port_num)); + for (i = 0; i < 16; i++) + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, (" 0x%x", p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[i])); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("\n")); + } +#endif + } + + // Convert query result into IBAL structure (no cl_memset()) + if( p_umv_buf && p_umv_buf->command ) + { + // p_ca_attr->size = required_size - hca_ul_info->hca_ul_resources_sz; + p_ca_attr->size = required_size; + } + else + { + p_ca_attr->size = required_size; + } + + // !!! GID/PKEY tables must be queried before this call !!! + mlnx_conv_vapi_hca_cap(hca_ul_info, &hca_cap, hca_ports, p_ca_attr); + + // verify: required space == used space + CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) ); + +#if 0 + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("Space required %d used %d\n", + required_size, + ((uintn_t)last_p) - ((uintn_t)p_ca_attr)))); +#endif + + if( p_umv_buf && p_umv_buf->command ) p_umv_buf->status = IB_SUCCESS; + if (hca_ul_resources_p) cl_free (hca_ul_resources_p); + if (hca_ports) cl_free( hca_ports ); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( p_umv_buf && p_umv_buf->command ) p_umv_buf->status = status; + if (hca_ul_resources_p) cl_free (hca_ul_resources_p); + if (hca_ports) cl_free( hca_ports); + if( p_ca_attr != NULL || status != IB_INSUFFICIENT_MEMORY ) + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_modify_ca ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t modca_cmd, + IN const ib_port_attr_mod_t *p_port_attr) +{ + ib_api_status_t status; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_hndl_t hh_hndl = NULL; + + VAPI_hca_attr_t hca_attr; + VAPI_hca_attr_mask_t hca_attr_mask = 0; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + mlnx_hobs_get_handle(hob_p, &hh_hndl); + if (NULL == hh_hndl) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + cl_memclr(&hca_attr, sizeof(hca_attr)); + if (modca_cmd & IB_CA_MOD_IS_SM) { + hca_attr_mask |= HCA_ATTR_IS_SM; + hca_attr.is_sm = (MT_bool)p_port_attr->cap.sm; + } + if (modca_cmd & IB_CA_MOD_IS_SNMP_SUPPORTED) { + hca_attr_mask |= HCA_ATTR_IS_SNMP_TUN_SUP; + hca_attr.is_snmp_tun_sup = (MT_bool)p_port_attr->cap.snmp; + } + if (modca_cmd & IB_CA_MOD_IS_DEV_MGMT_SUPPORTED) { + hca_attr_mask |= HCA_ATTR_IS_DEV_MGT_SUP; + hca_attr.is_dev_mgt_sup = (MT_bool)p_port_attr->cap.dev_mgmt; + } + if (modca_cmd & IB_CA_MOD_IS_VEND_SUPPORTED) { + hca_attr_mask |= HCA_ATTR_IS_VENDOR_CLS_SUP; + hca_attr.is_vendor_cls_sup = (MT_bool)p_port_attr->cap.vend; + } + if (modca_cmd & IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED) { + hca_attr_mask |= HCA_ATTR_IS_CLIENT_REREGISTRATION_SUP; + hca_attr.is_client_reregister_sup= (MT_bool)p_port_attr->cap.client_reregister; + } + if (modca_cmd & IB_CA_MOD_QKEY_CTR) { + if (p_port_attr->qkey_ctr == 0) + hca_attr.reset_qkey_counter = TRUE; + } + + if (0 != hca_attr_mask) { + if (HH_OK != THH_hob_modify( hh_hndl, port_num, &hca_attr, &hca_attr_mask)) + { + status = IB_ERROR; + goto cleanup; + } + } + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_close_ca ( + IN ib_ca_handle_t h_ca) +{ + ib_api_status_t status; + + HH_hca_hndl_t hh_hndl = NULL; + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_dev_t *hca_ul_info; + void *hca_ul_resources_p = NULL; + mlnx_hobul_t *hobul_p; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + hobul_p = mlnx_hobul_array[hob_p->index]; + if( !hobul_p ) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + if( hobul_p->count ) { + status = IB_RESOURCE_BUSY; + goto cleanup; + } + + mlnx_hobs_get_handle(hob_p, &hh_hndl); + if (NULL == hh_hndl) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hh_hndl; + mlnx_hobul_get(hob_p, &hca_ul_resources_p); + + if (hca_ul_resources_p) { + THH_hob_free_ul_res(hh_hndl, hca_ul_resources_p); + cl_free( hca_ul_resources_p); + } + mlnx_hobul_delete(hob_p); + THH_hob_close_hca(hh_hndl); + mlnx_hobs_remove(hob_p); + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + + +static ib_api_status_t +mlnx_um_open( + IN const ib_ca_handle_t h_ca, + IN OUT ci_umv_buf_t* const p_umv_buf, + OUT ib_ca_handle_t* const ph_um_ca ) +{ + ib_api_status_t status; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_hndl_t hh_hndl = NULL; + HH_hca_dev_t *hca_ul_info; + mlnx_um_ca_t *p_um_ca; + MOSAL_protection_ctx_t prot_ctx; + + HCA_ENTER( MLNX_DBG_TRACE ); + + mlnx_hobs_get_handle( hob_p, &hh_hndl ); + if( !hh_hndl ) + { + HCA_TRACE(MLNX_DBG_INFO, ("returning E_NODEV dev\n")); + status = IB_INVALID_CA_HANDLE; + goto mlnx_um_open_err1; + } + + hca_ul_info = (HH_hca_dev_t *)hh_hndl; + + if( !p_umv_buf->command ) + { + p_um_ca = (mlnx_um_ca_t*)cl_zalloc( sizeof(mlnx_um_ca_t) ); + if( !p_um_ca ) + { + p_umv_buf->status = IB_INSUFFICIENT_MEMORY; + goto mlnx_um_open_err1; + } + /* Copy the dev info. */ + p_um_ca->dev_info = *hca_ul_info; + p_um_ca->hob_p = hob_p; + *ph_um_ca = (ib_ca_handle_t)p_um_ca; + p_umv_buf->status = IB_SUCCESS; + p_umv_buf->output_size = 0; + HCA_EXIT( MLNX_DBG_TRACE ); + return IB_SUCCESS; + } + + /* + * Prepare the buffer with the size including hca_ul_resources_sz + * NO ALIGNMENT for this size + */ + if( !p_umv_buf->p_inout_buf || + p_umv_buf->output_size < sizeof(void*) ) + { + p_umv_buf->status = IB_INVALID_PARAMETER; + goto mlnx_um_open_err1; + } + + HCA_TRACE( MLNX_DBG_TRACE, ("priv_op = %d\n", p_umv_buf->command )); + + /* Yes, UVP request for hca_ul_info. */ + p_um_ca = (mlnx_um_ca_t*)cl_zalloc( + sizeof(mlnx_um_ca_t) + hca_ul_info->hca_ul_resources_sz - 1 ); + if( !p_um_ca ) + { + p_umv_buf->status = IB_INSUFFICIENT_MEMORY; + goto mlnx_um_open_err1; + } + + p_um_ca->p_mdl = IoAllocateMdl( &p_um_ca->dev_info, + (ULONG)(sizeof(HH_hca_dev_t) + hca_ul_info->hca_ul_resources_sz), + FALSE, TRUE, NULL ); + if( !p_um_ca->p_mdl ) + { + p_umv_buf->status = IB_ERROR; + goto mlnx_um_open_err2; + } + /* Build the page list... */ + MmBuildMdlForNonPagedPool( p_um_ca->p_mdl ); + + /* Map the memory into the calling process's address space. */ + __try + { + p_um_ca->p_mapped_addr = + MmMapLockedPagesSpecifyCache( p_um_ca->p_mdl, + UserMode, MmCached, NULL, FALSE, NormalPagePriority ); + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + p_umv_buf->status = IB_ERROR; + goto mlnx_um_open_err3; + } + + /* Register with THH (attach to the HCA). */ + prot_ctx = MOSAL_get_current_prot_ctx(); + if( THH_hob_alloc_ul_res(hh_hndl, prot_ctx, p_um_ca->ul_hca_res) != HH_OK ) + { + HCA_TRACE( CL_DBG_ERROR, ("Failed to get ul_res\n")); + p_umv_buf->status = IB_ERROR; + } + + if( p_umv_buf->status == IB_SUCCESS ) + { + /* Copy the dev info. */ + p_um_ca->dev_info = *hca_ul_info; + p_um_ca->hob_p = hob_p; + *ph_um_ca = (ib_ca_handle_t)p_um_ca; + (*(void** __ptr64)p_umv_buf->p_inout_buf) = p_um_ca->p_mapped_addr; + p_umv_buf->status = IB_SUCCESS; + } + else + { + MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl ); +mlnx_um_open_err3: + IoFreeMdl( p_um_ca->p_mdl ); +mlnx_um_open_err2: + cl_free( p_um_ca ); +mlnx_um_open_err1: + *ph_um_ca = NULL; + } + + //*ph_um_ca = NULL; + p_umv_buf->output_size = sizeof(void*); + HCA_EXIT( MLNX_DBG_TRACE ); + return p_umv_buf->status; +} + + +static void +mlnx_um_close( + IN ib_ca_handle_t h_ca, + IN ib_ca_handle_t h_um_ca ) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HH_hca_hndl_t hh_hndl = NULL; + mlnx_um_ca_t *p_um_ca = (mlnx_um_ca_t*)h_um_ca; + + HCA_ENTER( MLNX_DBG_TRACE ); + + mlnx_hobs_get_handle( hob_p, &hh_hndl ); + if( !hh_hndl ) + goto mlnx_um_close_cleanup; + + if( !p_um_ca ) + return; + + if( !p_um_ca->p_mapped_addr ) + goto done; + + THH_hob_free_ul_res( hh_hndl, p_um_ca->ul_hca_res ); + +mlnx_um_close_cleanup: + MmUnmapLockedPages( p_um_ca->p_mapped_addr, p_um_ca->p_mdl ); + IoFreeMdl( p_um_ca->p_mdl ); +done: + cl_free( p_um_ca ); + + HCA_EXIT( MLNX_DBG_TRACE ); +} + + +/* +* Protection Domain and Reliable Datagram Domain Verbs +*/ + +ib_api_status_t +mlnx_allocate_pd ( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_type_t type, + OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + mlnx_hob_t *hob_p; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + HHUL_pd_hndl_t hhul_pd_hndl = 0; + void *pd_ul_resources_p = NULL; + u_int32_t pd_idx; + ib_api_status_t status; + MOSAL_protection_ctx_t prot_ctx; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( p_umv_buf ) + hob_p = ((mlnx_um_ca_t *)h_ca)->hob_p; + else + hob_p = (mlnx_hob_t *)h_ca; + + hobul_p = mlnx_hobs_get_hobul(hob_p); + if (NULL == hobul_p) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if ((p_umv_buf->input_size - sizeof (u_int32_t)) != + hca_ul_info->pd_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + pd_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + /* get the current protection context */ + prot_ctx = MOSAL_get_current_prot_ctx(); + } + else + { + // for kernel mode calls - allocate app resources. Use prep->call->done sequence + pd_ul_resources_p = cl_zalloc( hca_ul_info->pd_ul_resources_sz); + if (NULL == pd_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + switch( type ) + { + case IB_PDT_SQP: + if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl, + g_sqp_max_avs, PD_FOR_SQP, &hhul_pd_hndl, pd_ul_resources_p)) + { + status = IB_ERROR; + goto cleanup; + } + break; + + case IB_PDT_UD: + if (HH_OK != THHUL_pdm_alloc_pd_avs_prep(hobul_p->hhul_hndl, + g_sqp_max_avs, PD_NO_FLAGS, &hhul_pd_hndl, pd_ul_resources_p)) + { + status = IB_ERROR; + goto cleanup; + } + break; + + default: + if (HH_OK != THHUL_pdm_alloc_pd_prep(hobul_p->hhul_hndl, &hhul_pd_hndl, pd_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + } + /* get the current protection context */ + prot_ctx = MOSAL_get_kernel_prot_ctx(); + } + + // Allocate the PD (cmdif) + if (HH_OK != THH_hob_alloc_pd(hobul_p->hh_hndl, prot_ctx, pd_ul_resources_p, &pd_idx)) { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup_pd; + } + + if( !(p_umv_buf && p_umv_buf->command) ) + { + // Manage user level resources + if (HH_OK != THHUL_pdm_alloc_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl, pd_idx, pd_ul_resources_p)) { + THH_hob_free_pd(hobul_p->hh_hndl, pd_idx); + status = IB_ERROR; + goto cleanup_pd; + } + } + + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_pd); + + // Save data refs for future use + cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex); + hobul_p->pd_info_tbl[pd_idx].pd_num = pd_idx; + hobul_p->pd_info_tbl[pd_idx].hca_idx = hob_p->index; + hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl = hhul_pd_hndl; + hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = pd_ul_resources_p; + hobul_p->pd_info_tbl[pd_idx].count = 0; + hobul_p->pd_info_tbl[pd_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command); + hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_PD; + cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex); + + cl_atomic_inc( &hobul_p->count ); + + if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(pd_idx); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca_idx 0x%x pd_idx 0x%x returned 0x%p\n", hob_p->index, pd_idx, *ph_pd)); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + /* + * Copy the pd_idx back to user + */ + cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->pd_ul_resources_sz), + &pd_idx, sizeof (pd_idx)); + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_pd: + THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE); + THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl); + +cleanup: + if( !(p_umv_buf && p_umv_buf->command) && pd_ul_resources_p ) + cl_free( pd_ul_resources_p); + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_deallocate_pd ( + IN ib_pd_handle_t h_pd) +{ + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + mlnx_hobul_t *hobul_p; + HHUL_pd_hndl_t hhul_pd_hndl; + ib_api_status_t status; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->pd_info_tbl[pd_idx].mutex); + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d k_mod %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count, hobul_p->pd_info_tbl[pd_idx].kernel_mode)); + + if (0 != hobul_p->pd_info_tbl[pd_idx].count) { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + status = IB_RESOURCE_BUSY; + goto cleanup_locked; + } + + hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl; + + // PREP: + if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) { + if (HH_OK != THHUL_pdm_free_pd_prep(hobul_p->hhul_hndl, hhul_pd_hndl, FALSE)) { + status = IB_ERROR; + goto cleanup_locked; + } + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d before free_pd hh_hndl %p\n", + pd_idx, hobul_p->hh_hndl)); + + if (HH_OK != THH_hob_free_pd(hobul_p->hh_hndl, pd_idx)) { + status = IB_ERROR; + goto cleanup_locked; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d after free_pd\n", pd_idx)); + + if (hobul_p->pd_info_tbl[pd_idx].kernel_mode) { + if (HH_OK != THHUL_pdm_free_pd_done(hobul_p->hhul_hndl, hhul_pd_hndl)) { + status = IB_ERROR; + goto cleanup_locked; + } + if (hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p) + cl_free( hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p); + } + + hobul_p->pd_info_tbl[pd_idx].mark = E_MARK_INVALID; + hobul_p->pd_info_tbl[pd_idx].pd_ul_resources_p = NULL; + + cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex); + + cl_atomic_dec( &hobul_p->count ); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->pd_info_tbl[pd_idx].mutex); + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +/* +* Address Vector Management Verbs +*/ +ib_api_status_t +mlnx_create_av ( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t *p_addr_vector, + OUT ib_av_handle_t *ph_av, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + HHUL_ud_av_hndl_t av_h; + mlnx_hobul_t *hobul_p; + mlnx_avo_t *avo_p = NULL; + HHUL_pd_hndl_t hhul_pd_hndl; + ib_api_status_t status; + + VAPI_ud_av_t av; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + hhul_pd_hndl = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl; + + if (NULL == (avo_p = cl_zalloc( sizeof(mlnx_avo_t)))) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + cl_memclr(&av, sizeof(av)); + mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av); + // This creates a non priviledged ud_av. + // To create a privilged ud_av call THH_hob_create_ud_av() + if (HH_OK != THHUL_pdm_create_ud_av(hobul_p->hhul_hndl, hhul_pd_hndl, &av, &av_h)) { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup; + } + + // update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + + avo_p->mark = E_MARK_AV; + avo_p->hca_idx = hca_idx; + avo_p->pd_idx = pd_idx; + avo_p->h_av = av_h; + + if (ph_av) *ph_av = (ib_av_handle_t)avo_p; + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (avo_p) { + avo_p->mark = E_MARK_INVALID; + cl_free( avo_p); + } + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_query_av ( + IN const ib_av_handle_t h_av, + OUT ib_av_attr_t *p_addr_vector, + OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + mlnx_avo_t *avo_p = (mlnx_avo_t *)h_av; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + VAPI_ud_av_t av; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + if (!avo_p || avo_p->mark != E_MARK_AV) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[avo_p->hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + if (p_addr_vector) { + if (HH_OK != THHUL_pdm_query_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) { + status = IB_ERROR; + goto cleanup; + } + mlnx_conv_vapi_av(hobul_p->hh_hndl, &av, p_addr_vector); + } + + if (ph_pd) *ph_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(avo_p->pd_idx); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_modify_av ( + IN const ib_av_handle_t h_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + mlnx_avo_t *avo_p = (mlnx_avo_t *)h_av; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + VAPI_ud_av_t av; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + if (!avo_p || avo_p->mark != E_MARK_AV) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[avo_p->hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + + cl_memclr(&av, sizeof(av)); + mlnx_conv_ibal_av(hobul_p->hh_hndl, p_addr_vector, &av); + if (HH_OK != THHUL_pdm_modify_ud_av(hobul_p->hhul_hndl, avo_p->h_av, &av)) { + status = IB_ERROR; + goto cleanup; + } + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_destroy_av ( + IN const ib_av_handle_t h_av) +{ + mlnx_avo_t *avo_p = (mlnx_avo_t *)h_av; + mlnx_hobul_t *hobul_p; + ib_api_status_t status; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + if (!avo_p || avo_p->mark != E_MARK_AV) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(avo_p->hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[avo_p->hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_AV_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(avo_p->pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[avo_p->pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // This destroy's a non priviledged ud_av. + // To destroy a privilged ud_av call THH_hob_destroy_ud_av() + if (HH_OK != THHUL_pdm_destroy_ud_av(hobul_p->hhul_hndl, avo_p->h_av)) { + status = IB_ERROR; + goto cleanup; + } + + // update PD object count + cl_atomic_dec(&hobul_p->pd_info_tbl[avo_p->pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", avo_p->pd_idx, hobul_p->pd_info_tbl[avo_p->pd_idx].count)); + + avo_p->mark = E_MARK_INVALID; + cl_free( avo_p); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup: + if (avo_p) { + avo_p->mark = E_MARK_INVALID; + cl_free( avo_p); + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +/* +* Queue Pair Management Verbs +*/ + +ib_api_status_t +mlnx_create_qp ( + IN const ib_pd_handle_t h_pd, + IN const void *qp_context, + IN const ib_qp_create_t *p_create_attr, + OUT ib_qp_attr_t *p_qp_attr, + OUT ib_qp_handle_t *ph_qp, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + ib_qp_handle_t h_qp; + + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + u_int32_t qp_num; + u_int32_t qp_idx; + u_int32_t send_cq_num; + u_int32_t send_cq_idx; + u_int32_t recv_cq_num; + u_int32_t recv_cq_idx; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + HH_qp_init_attr_t hh_qp_init_attr; + HHUL_qp_init_attr_t ul_qp_init_attr; + HHUL_qp_hndl_t hhul_qp_hndl = NULL; + VAPI_qp_cap_t hh_qp_cap; + void *qp_ul_resources_p = NULL; + VAPI_sg_lst_entry_t *send_sge_p = NULL; + VAPI_sg_lst_entry_t *recv_sge_p = NULL; + u_int32_t num_sge; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // The create attributes must be provided + if (!p_create_attr) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + // convert input parameters + cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr)); + mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, NULL); + send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq); + recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq); + send_cq_idx = send_cq_num & hobul_p->cq_idx_mask; + recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + ul_qp_init_attr.pd = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl; + ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl; + ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl; + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if ((p_umv_buf->input_size - sizeof (u_int32_t)) != + hca_ul_info->qp_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + } else { + // for kernel mode calls - allocate app resources. Use prep->call->done sequence + qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz); + if (!qp_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (HH_OK != THHUL_qpm_create_qp_prep(hobul_p->hhul_hndl, &ul_qp_init_attr, &hhul_qp_hndl, &hh_qp_cap, qp_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + // TBD: if not same report error to IBAL + ul_qp_init_attr.qp_cap = hh_qp_cap; // struct assign + } + + // Convert HHUL to HH structure (for HH create_qp) + hh_qp_init_attr.pd = pd_idx; + hh_qp_init_attr.rdd = 0; // TBD: RDD + if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL ) + { + // TBD: HH handle from HHUL handle. + CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL ); + } + else + { + hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL; + } + hh_qp_init_attr.sq_cq = send_cq_num; + hh_qp_init_attr.rq_cq = recv_cq_num; + hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type; + hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type; + hh_qp_init_attr.ts_type = ul_qp_init_attr.ts_type; + hh_qp_init_attr.qp_cap = ul_qp_init_attr.qp_cap; // struct assign + + // Allocate the QP (cmdif) + if (HH_OK != THH_hob_create_qp(hobul_p->hh_hndl, &hh_qp_init_attr, qp_ul_resources_p, &qp_num)) { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup_qp; + } + + if( !(p_umv_buf && p_umv_buf->command) ) + { + // Manage user level resources + if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) { + THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num); + status = IB_ERROR; + goto cleanup_qp; + } + + // Create SQ and RQ iov + num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq; + send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t)); + if (!send_sge_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup_qp; + } + + num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq; + recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t)); + if (!recv_sge_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup_qp; + } + } + + // Save data refs for future use + qp_idx = qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hobul_p 0x%p mask 0x%x qp_idx 0x%x qp_num 0x%x\n", + hobul_p, hobul_p->qp_idx_mask, qp_idx, qp_num)); + + h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx); + cl_mutex_acquire(&h_qp->mutex); + h_qp->pd_num = pd_idx; + h_qp->hhul_qp_hndl = hhul_qp_hndl; + h_qp->qp_type = p_create_attr->qp_type; + h_qp->sq_signaled = p_create_attr->sq_signaled; + h_qp->qp_context = qp_context; + h_qp->qp_ul_resources_p = qp_ul_resources_p; + h_qp->sq_size = ul_qp_init_attr.qp_cap.max_sg_size_sq; + h_qp->rq_size = ul_qp_init_attr.qp_cap.max_sg_size_rq; + h_qp->send_sge_p = send_sge_p; + h_qp->recv_sge_p = recv_sge_p; + h_qp->qp_num = qp_num; + h_qp->h_sq_cq = &hobul_p->cq_info_tbl[send_cq_idx]; + h_qp->h_rq_cq = &hobul_p->cq_info_tbl[recv_cq_idx]; + h_qp->kernel_mode = !(p_umv_buf && p_umv_buf->command); + h_qp->mark = E_MARK_QP; + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n", + qp_num, qp_idx, send_cq_idx, recv_cq_idx)); + cl_mutex_release(&h_qp->mutex); + // Update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + // Query QP to obtain requested attributes + if (p_qp_attr) { + if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) + { + if( !(p_umv_buf && p_umv_buf->command) ) + goto cleanup_qp; + else + goto cleanup; + } + } + + if (ph_qp) *ph_qp = h_qp; + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->status = IB_SUCCESS; + /* + * Copy the qp_idx back to user + */ + cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->qp_ul_resources_sz), + &qp_num, sizeof (qp_num)); + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_qp: + if (send_sge_p) cl_free( send_sge_p); + if (recv_sge_p) cl_free( recv_sge_p); + if( !(p_umv_buf && p_umv_buf->command) ) + THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl); + +cleanup: + if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p) + cl_free( qp_ul_resources_p); + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_create_spl_qp ( + IN const ib_pd_handle_t h_pd, + IN const uint8_t port_num, + IN const void *qp_context, + IN const ib_qp_create_t *p_create_attr, + OUT ib_qp_attr_t *p_qp_attr, + OUT ib_qp_handle_t *ph_qp ) +{ + ib_api_status_t status; + ib_qp_handle_t h_qp; + ci_umv_buf_t *p_umv_buf = NULL; + + u_int32_t hca_idx = PD_HCA_FROM_HNDL(h_pd); + u_int32_t pd_idx = PD_NUM_FROM_HNDL(h_pd); + u_int32_t qp_num; + u_int32_t qp_idx; + u_int32_t send_cq_num; + u_int32_t send_cq_idx; + u_int32_t recv_cq_num; + u_int32_t recv_cq_idx; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + HH_qp_init_attr_t hh_qp_init_attr; + HHUL_qp_init_attr_t ul_qp_init_attr; + HHUL_qp_hndl_t hhul_qp_hndl = NULL; + VAPI_special_qp_t vapi_qp_type; + VAPI_qp_cap_t hh_qp_cap; + void *qp_ul_resources_p = NULL; + VAPI_sg_lst_entry_t *send_sge_p = NULL; + VAPI_sg_lst_entry_t *recv_sge_p = NULL; + u_int32_t num_sge; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_INVALID_PD_HANDLE, cleanup); + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // The create attributes must be provided + if (!p_create_attr) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + // convert input parameters + cl_memclr(&ul_qp_init_attr, sizeof(ul_qp_init_attr)); + mlnx_conv_qp_create_attr(p_create_attr, &ul_qp_init_attr, &vapi_qp_type); + send_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_sq_cq); + recv_cq_num = CQ_NUM_FROM_HNDL(p_create_attr->h_rq_cq); + send_cq_idx = send_cq_num & hobul_p->cq_idx_mask; + recv_cq_idx = recv_cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(send_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[send_cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + VALIDATE_INDEX(recv_cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[recv_cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + ul_qp_init_attr.pd = hobul_p->pd_info_tbl[pd_idx].hhul_pd_hndl; + ul_qp_init_attr.sq_cq = hobul_p->cq_info_tbl[send_cq_idx].hhul_cq_hndl; + ul_qp_init_attr.rq_cq = hobul_p->cq_info_tbl[recv_cq_idx].hhul_cq_hndl; + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if (p_umv_buf->input_size != hca_ul_info->qp_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + qp_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + } else { + // For kernel mode calls - allocate app resources. Use prep->call->done sequence + qp_ul_resources_p = cl_zalloc( hca_ul_info->qp_ul_resources_sz); + if (!qp_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + + if (HH_OK != THHUL_qpm_special_qp_prep(hobul_p->hhul_hndl, + vapi_qp_type, + port_num, + &ul_qp_init_attr, + &hhul_qp_hndl, + &hh_qp_cap, + qp_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + // TBD: if not same report error to IBAL + ul_qp_init_attr.qp_cap = hh_qp_cap; // struct assign + } + + // Convert HHUL to HH structure (for HH create_qp) + hh_qp_init_attr.pd = pd_idx; + hh_qp_init_attr.rdd = 0; // TBD: RDD + if( ul_qp_init_attr.srq != HHUL_INVAL_SRQ_HNDL ) + { + // TBD: HH handle from HHUL handle. + CL_ASSERT( ul_qp_init_attr.srq == HHUL_INVAL_SRQ_HNDL ); + } + else + { + hh_qp_init_attr.srq = HH_INVAL_SRQ_HNDL; + } + hh_qp_init_attr.sq_cq = send_cq_num; + hh_qp_init_attr.rq_cq = recv_cq_num; + hh_qp_init_attr.sq_sig_type = ul_qp_init_attr.sq_sig_type; + hh_qp_init_attr.rq_sig_type = ul_qp_init_attr.rq_sig_type; + hh_qp_init_attr.ts_type = VAPI_TS_UD; + hh_qp_init_attr.qp_cap = ul_qp_init_attr.qp_cap; // struct assign + + // Allocate the QP (cmdif) + if (HH_OK != THH_hob_get_special_qp( hobul_p->hh_hndl, + vapi_qp_type, + port_num, + &hh_qp_init_attr, + qp_ul_resources_p, + &qp_num)) + { + status = IB_ERROR; + goto cleanup_qp; + } + + if( !(p_umv_buf && p_umv_buf->command) ) + { + // Manage user level resources + if (HH_OK != THHUL_qpm_create_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, qp_num, qp_ul_resources_p)) { + THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num); + status = IB_ERROR; + goto cleanup_qp; + } + + // Create SQ and RQ iov + num_sge = ul_qp_init_attr.qp_cap.max_sg_size_sq; + send_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t)); + if (!send_sge_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup_qp; + } + + num_sge = ul_qp_init_attr.qp_cap.max_sg_size_rq; + recv_sge_p = cl_zalloc( num_sge * sizeof(VAPI_sg_lst_entry_t)); + if (!recv_sge_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup_qp; + } + } + + // Save data refs for future use + qp_idx = qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_ERROR, cleanup_qp); + + h_qp = (ib_qp_handle_t)QP_HNDL_FROM_QP(qp_idx); + cl_mutex_acquire(&h_qp->mutex); + h_qp->pd_num = pd_idx; + h_qp->hhul_qp_hndl = hhul_qp_hndl; + h_qp->qp_type = p_create_attr->qp_type; + h_qp->sq_signaled = p_create_attr->sq_signaled; + h_qp->qp_context = qp_context; + h_qp->qp_ul_resources_p = qp_ul_resources_p; + h_qp->sq_size = ul_qp_init_attr.qp_cap.max_sg_size_sq; + h_qp->rq_size = ul_qp_init_attr.qp_cap.max_sg_size_rq; + h_qp->send_sge_p = send_sge_p; + h_qp->recv_sge_p = recv_sge_p; + h_qp->qp_num = qp_num; + h_qp->h_sq_cq = &hobul_p->cq_info_tbl[send_cq_idx]; + h_qp->h_rq_cq = &hobul_p->cq_info_tbl[recv_cq_idx]; + h_qp->kernel_mode = !(p_umv_buf && p_umv_buf->command); + h_qp->mark = E_MARK_QP; + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("qp num 0x%x idx 0x%x cq_s 0x%x cq_r 0x%x\n", + qp_num, qp_idx, send_cq_idx, recv_cq_idx)); + cl_mutex_release(&h_qp->mutex); + + /* Mark the CQ's associated with this special QP as being high priority. */ + cl_atomic_inc( &h_qp->h_sq_cq->spl_qp_cnt ); + KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, HighImportance ); + cl_atomic_inc( &h_qp->h_rq_cq->spl_qp_cnt ); + KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, HighImportance ); + + // Update PD object count + cl_atomic_inc(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + // Query QP to obtain requested attributes + if (p_qp_attr) { + if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) { + goto cleanup; + } + } + + if (ph_qp) *ph_qp = h_qp; + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_qp: + if (send_sge_p) cl_free( send_sge_p); + if (recv_sge_p) cl_free( recv_sge_p); + if( !(p_umv_buf && p_umv_buf->command) ) + THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl); + +cleanup: + if( !(p_umv_buf && p_umv_buf->command) && qp_ul_resources_p ) + cl_free( qp_ul_resources_p); + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_modify_qp ( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t *p_modify_attr, + OUT ib_qp_attr_t *p_qp_attr OPTIONAL, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ) +{ + ib_api_status_t status; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; + HHUL_qp_hndl_t hhul_qp_hndl; + VAPI_qp_attr_mask_t hh_qp_attr_mask; + VAPI_qp_attr_t hh_qp_attr; + VAPI_qp_state_t hh_qp_state; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("Before acquire mutex to modify qp_idx 0x%x\n", + qp_idx)); + + cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex); + + hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl; + + // Obtain curernt state of QP + if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, hobul_p->qp_info_tbl[qp_idx].qp_num, &hh_qp_attr)) + { + status = IB_ERROR; + goto cleanup_locked; + } + hh_qp_state = hh_qp_attr.qp_state; // The current (pre-modify) state + + // Convert the input parameters. Use query result as default (no cl_memset()) + // cl_memclr(&hh_qp_attr, sizeof(hh_qp_attr)); + status = mlnx_conv_qp_modify_attr(hobul_p->hh_hndl, + hobul_p->qp_info_tbl[qp_idx].qp_type, + p_modify_attr, &hh_qp_attr, &hh_qp_attr_mask); + if( status != IB_SUCCESS ) + goto cleanup_locked; + + if (HH_OK != THH_hob_modify_qp(hobul_p->hh_hndl, + hobul_p->qp_info_tbl[qp_idx].qp_num, + hh_qp_state, &hh_qp_attr, &hh_qp_attr_mask)) + { + status = IB_ERROR; + goto cleanup_locked; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("After hob_modify_qp qp_idx 0x%x k_mod %d\n", + qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode)); + + // Notify HHUL of the new (post-modify) state. This is done for both k-mode calls only + if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) { + if (HH_OK != THHUL_qpm_modify_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl, hh_qp_attr.qp_state)) + { + status = IB_ERROR; + goto cleanup_locked; + } + } + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + + if ((p_qp_attr) && !(p_umv_buf && p_umv_buf->command)) { + if (IB_SUCCESS != (status = mlnx_query_qp (h_qp, p_qp_attr, p_umv_buf))) { + goto cleanup; + } + } + + if ( p_umv_buf && p_umv_buf->command && (! hobul_p->qp_info_tbl[qp_idx].kernel_mode)) { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("mod_qp qp_idx %d umv_buf %p inout_buf %p\n", + qp_idx, p_umv_buf, p_umv_buf->p_inout_buf)); + if (p_umv_buf->p_inout_buf) { + p_umv_buf->output_size = sizeof (VAPI_qp_state_t); + cl_memcpy (p_umv_buf->p_inout_buf, &(hh_qp_attr.qp_state), + (size_t)p_umv_buf->output_size); + p_umv_buf->status = IB_SUCCESS; + } + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_query_qp ( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t *p_qp_attr, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; + VAPI_qp_attr_t hh_qp_attr; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&h_qp->mutex); + + if (HH_OK != THH_hob_query_qp(hobul_p->hh_hndl, h_qp->qp_num, &hh_qp_attr)) { + status = IB_ERROR; + goto cleanup_locked; + } + + // Convert query result into IBAL structure (no cl_memset()) + mlnx_conv_vapi_qp_attr(hobul_p->hh_hndl, &hh_qp_attr, p_qp_attr); + p_qp_attr->qp_type = h_qp->qp_type; + p_qp_attr->h_pd = (ib_pd_handle_t)PD_HNDL_FROM_PD(h_qp->pd_num); + p_qp_attr->h_sq_cq = h_qp->h_sq_cq; + p_qp_attr->h_rq_cq = h_qp->h_rq_cq; + p_qp_attr->sq_signaled = h_qp->sq_signaled; + + cl_mutex_release(&h_qp->mutex); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_destroy_qp ( + IN const ib_qp_handle_t h_qp, + IN const uint64_t timewait ) +{ + ib_api_status_t status; + + u_int32_t hca_idx = QP_HCA_FROM_HNDL(h_qp); + u_int32_t qp_num = QP_NUM_FROM_HNDL(h_qp); + u_int32_t pd_idx = 0; + u_int32_t qp_idx = 0; + mlnx_hobul_t *hobul_p; + HHUL_qp_hndl_t hhul_qp_hndl; + + UNUSED_PARAM( timewait ); + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hca %d qp 0x%x\n", hca_idx, qp_num)); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CA_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + qp_idx = qp_num & hobul_p->qp_idx_mask; + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("hobul_p 0x%p mask 0x%x qp_idx 0x%x mark %d\n", + hobul_p, hobul_p->qp_idx_mask, qp_idx, hobul_p->qp_info_tbl[qp_idx].mark)); + + VALIDATE_INDEX(qp_idx, hobul_p->max_qp, IB_INVALID_QP_HANDLE, cleanup); + if ( E_MARK_QP != hobul_p->qp_info_tbl[qp_idx].mark) { + if (E_MARK_INVALID == hobul_p->qp_info_tbl[qp_idx].mark) { + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status IB_INVALID_QP_HANDLE\n")); + return IB_SUCCESS; // Already freed + } + status = IB_INVALID_QP_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->qp_info_tbl[qp_idx].mutex); + + hhul_qp_hndl = hobul_p->qp_info_tbl[qp_idx].hhul_qp_hndl; + pd_idx = hobul_p->qp_info_tbl[qp_idx].pd_num; + VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup_locked); + + if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd_idx 0x%x mark %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].mark)); + status = IB_INVALID_PD_HANDLE; + goto cleanup_locked; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("Before THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n", + qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx)); + + // PREP: no PREP required for destroy_qp + if (HH_OK != THH_hob_destroy_qp(hobul_p->hh_hndl, qp_num)) { + status = IB_ERROR; + goto cleanup_locked; + } + + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, + ("After THH_destroy qp_idx 0x%x k_mod %d pd_idx 0x%x\n", + qp_idx, hobul_p->qp_info_tbl[qp_idx].kernel_mode, pd_idx)); + + if (hobul_p->qp_info_tbl[qp_idx].kernel_mode) { + if (HH_OK != THHUL_qpm_destroy_qp_done(hobul_p->hhul_hndl, hhul_qp_hndl)) { + status = IB_ERROR; + goto cleanup_locked; + } + if (hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p) + cl_free( hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p); + if (hobul_p->qp_info_tbl[qp_idx].send_sge_p) + cl_free( hobul_p->qp_info_tbl[qp_idx].send_sge_p); + if (hobul_p->qp_info_tbl[qp_idx].recv_sge_p) + cl_free( hobul_p->qp_info_tbl[qp_idx].recv_sge_p); + } + + if( h_qp->qp_type == IB_QPT_QP0 || h_qp->qp_type == IB_QPT_QP1 ) + { + if( !cl_atomic_dec( &h_qp->h_sq_cq->spl_qp_cnt ) ) + KeSetImportanceDpc( &h_qp->h_sq_cq->dpc, MediumImportance ); + if( !cl_atomic_dec( &h_qp->h_rq_cq->spl_qp_cnt ) ) + KeSetImportanceDpc( &h_qp->h_rq_cq->dpc, MediumImportance ); + } + + hobul_p->qp_info_tbl[qp_idx].mark = E_MARK_INVALID; + hobul_p->qp_info_tbl[qp_idx].qp_ul_resources_p = NULL; + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); + + // Update PD object count + cl_atomic_dec(&hobul_p->pd_info_tbl[pd_idx].count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("pd %d count %d\n", pd_idx, hobul_p->pd_info_tbl[pd_idx].count)); + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->qp_info_tbl[qp_idx].mutex); +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +/* +* Completion Queue Managment Verbs. +*/ + +ib_api_status_t +mlnx_create_cq ( + IN const ib_ca_handle_t h_ca, + IN const void *cq_context, + IN OUT uint32_t *p_size, + OUT ib_cq_handle_t *ph_cq, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + + mlnx_hob_t *hob_p; + u_int32_t cq_idx; + u_int32_t cq_num; + u_int32_t cq_size = 0; + mlnx_hobul_t *hobul_p; + HH_hca_dev_t *hca_ul_info; + HHUL_cq_hndl_t hhul_cq_hndl = NULL; + void *cq_ul_resources_p = NULL; + MOSAL_protection_ctx_t prot_ctx; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if( p_umv_buf ) + hob_p = ((mlnx_um_ca_t *)h_ca)->hob_p; + else + hob_p = (mlnx_hob_t *)h_ca; + + hobul_p = mlnx_hobs_get_hobul(hob_p); + if (NULL == hobul_p) { + status = IB_INVALID_CA_HANDLE; + goto cleanup; + } + + hca_ul_info = (HH_hca_dev_t *)hobul_p->hh_hndl; + if (NULL == hca_ul_info) { + status = IB_INVALID_PD_HANDLE; + goto cleanup; + } + + // The size must be provided + if (!p_size) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + // TBD: verify that the number requested does not exceed to maximum allowed + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if ((p_umv_buf->input_size - sizeof (u_int32_t)) != + hca_ul_info->cq_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + /* get the current protection context */ + prot_ctx = MOSAL_get_current_prot_ctx(); + } else { + // for kernel mode calls - allocate app resources. Use prep->call->done sequence + cq_ul_resources_p = cl_zalloc( hca_ul_info->cq_ul_resources_sz); + if (!cq_ul_resources_p) { + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + if (HH_OK != THHUL_cqm_create_cq_prep(hobul_p->hhul_hndl, *p_size, &hhul_cq_hndl, &cq_size, cq_ul_resources_p)) { + status = IB_ERROR; + goto cleanup; + } + /* get the current protection context */ + prot_ctx = MOSAL_get_kernel_prot_ctx(); + } + + // Allocate the CQ (cmdif) + if (HH_OK != THH_hob_create_cq(hobul_p->hh_hndl, prot_ctx, cq_ul_resources_p, &cq_num)) { + status = IB_INSUFFICIENT_RESOURCES; + goto cleanup_cq; + } + + if( !(p_umv_buf && p_umv_buf->command) ) + { + // Manage user level resources + if (HH_OK != THHUL_cqm_create_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl, cq_num, cq_ul_resources_p)) { + THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num); + status = IB_ERROR; + goto cleanup_cq; + } + } + + // Save data refs for future use + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_ERROR, cleanup_cq); + cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex); + hobul_p->cq_info_tbl[cq_idx].hca_idx = hob_p->index; + hobul_p->cq_info_tbl[cq_idx].cq_num = cq_num; +// hobul_p->cq_info_tbl[cq_idx].pd_num = pd_idx; + hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl = hhul_cq_hndl; + hobul_p->cq_info_tbl[cq_idx].cq_context = cq_context; + hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = cq_ul_resources_p; + hobul_p->cq_info_tbl[cq_idx].kernel_mode = !(p_umv_buf && p_umv_buf->command); + hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_CQ; + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + + // Update CA object count + cl_atomic_inc(&hobul_p->count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("HCA %d count %d\n", h_ca->index, hobul_p->count)); + + *p_size = cq_size; + if (ph_cq) *ph_cq = (ib_cq_handle_t)CQ_HNDL_FROM_CQ(cq_idx); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->status = IB_SUCCESS; + /* + * Copy the cq_idx back to user + */ + cl_memcpy (((uint8_t* __ptr64)p_umv_buf->p_inout_buf + hca_ul_info->cq_ul_resources_sz), + &cq_num, sizeof (cq_num)); + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_cq: + THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl); + +cleanup: + if( !(p_umv_buf && p_umv_buf->command) && cq_ul_resources_p ) + cl_free( cq_ul_resources_p); + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_resize_cq ( + IN const ib_cq_handle_t h_cq, + IN OUT uint32_t *p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + + HHUL_cq_hndl_t hhul_cq_hndl; + void *cq_ul_resources_p = NULL; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if (!p_size) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex); + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + + if( p_umv_buf && p_umv_buf->command ) + { + // For user mode calls - obtain and verify the vendor information + if( p_umv_buf->input_size != hobul_p->cq_ul_resources_sz || + NULL == p_umv_buf->p_inout_buf ) + { + status = IB_INVALID_PARAMETER; + goto cleanup_locked; + } + cq_ul_resources_p = (void *)p_umv_buf->p_inout_buf; + + } else { + // for kernel mode calls - obtain the saved app resources. Use prep->call->done sequence + cq_ul_resources_p = hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p; + + status = THHUL_cqm_resize_cq_prep( + hobul_p->hhul_hndl, hhul_cq_hndl, + *p_size, p_size, cq_ul_resources_p ); + if( status != IB_SUCCESS ) + goto cleanup_locked; + } + + if (HH_OK != THH_hob_resize_cq(hobul_p->hh_hndl, cq_num, cq_ul_resources_p)) { + status = IB_ERROR; + goto cleanup_locked; + } + + // DONE: when called on behalf of kernel module + if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) { + if (HH_OK != THHUL_cqm_resize_cq_done( hobul_p->hhul_hndl, hhul_cq_hndl, cq_ul_resources_p)) + { + status = IB_ERROR; + goto cleanup_locked; + } + } + + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->status = IB_SUCCESS; + } + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_query_cq ( + IN const ib_cq_handle_t h_cq, + OUT uint32_t *p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + if (!p_size) { + status = IB_INVALID_PARAMETER; + goto cleanup; + } + + /* Query is fully handled in user-mode. */ + if( p_umv_buf && p_umv_buf->command ) + { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex); + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; + if (HH_OK != THHUL_cqm_query_cq(hobul_p->hhul_hndl, hhul_cq_hndl, p_size)){ + status = IB_ERROR; + goto cleanup_locked; + } + + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + +cleanup: + if( p_umv_buf && p_umv_buf->command ) + { + p_umv_buf->output_size = 0; + p_umv_buf->status = status; + } + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + +ib_api_status_t +mlnx_destroy_cq ( + IN const ib_cq_handle_t h_cq) +{ + ib_api_status_t status; + + u_int32_t hca_idx = CQ_HCA_FROM_HNDL(h_cq); + u_int32_t cq_num = CQ_NUM_FROM_HNDL(h_cq); + u_int32_t cq_idx; +// u_int32_t pd_idx = 0; + mlnx_hobul_t *hobul_p; + HHUL_cq_hndl_t hhul_cq_hndl; + + CL_ENTER(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + + VALIDATE_INDEX(hca_idx, MLNX_MAX_HCA, IB_INVALID_CQ_HANDLE, cleanup); + hobul_p = mlnx_hobul_array[hca_idx]; + if (NULL == hobul_p) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cq_idx = cq_num & hobul_p->cq_idx_mask; + VALIDATE_INDEX(cq_idx, hobul_p->max_cq, IB_INVALID_CQ_HANDLE, cleanup); + if ( E_MARK_CQ != hobul_p->cq_info_tbl[cq_idx].mark) { + status = IB_INVALID_CQ_HANDLE; + goto cleanup; + } + + cl_mutex_acquire(&hobul_p->cq_info_tbl[cq_idx].mutex); + + hhul_cq_hndl = hobul_p->cq_info_tbl[cq_idx].hhul_cq_hndl; +// pd_idx = hobul_p->cq_info_tbl[cq_idx].pd_num; +// VALIDATE_INDEX(pd_idx, hobul_p->max_pd, IB_ERROR, cleanup); +// if (E_MARK_PD != hobul_p->pd_info_tbl[pd_idx].mark) { +// status = IB_INVALID_PD_HANDLE; +// goto cleanup_locked; +// } + + // PREP: no PREP required for destroy_cq + if (HH_OK != THH_hob_destroy_cq(hobul_p->hh_hndl, cq_num)) { + status = IB_ERROR; + goto cleanup_locked; + } + + if (hobul_p->cq_info_tbl[cq_idx].kernel_mode) { + if (HH_OK != THHUL_cqm_destroy_cq_done(hobul_p->hhul_hndl, hhul_cq_hndl)) { + status = IB_ERROR; + goto cleanup_locked; + } + if (hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p) + cl_free( hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p); + } + + hobul_p->cq_info_tbl[cq_idx].mark = E_MARK_INVALID; + hobul_p->cq_info_tbl[cq_idx].cq_ul_resources_p = NULL; + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + + // Update CA object count + cl_atomic_dec(&hobul_p->count); + CL_TRACE(MLNX_DBG_INFO, g_mlnx_dbg_lvl, ("CA %d count %d\n", hca_idx, hobul_p->count)); + + + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return IB_SUCCESS; + +cleanup_locked: + cl_mutex_release(&hobul_p->cq_info_tbl[cq_idx].mutex); + +cleanup: + CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + CL_EXIT(MLNX_DBG_TRACE, g_mlnx_dbg_lvl); + return status; +} + + +void +setup_ci_interface( + IN const ib_net64_t ca_guid, + IN OUT ci_interface_t *p_interface ) +{ + cl_memclr(p_interface, sizeof(*p_interface)); + + /* Guid of the CA. */ + p_interface->guid = ca_guid; + + /* Version of this interface. */ + p_interface->version = VERBS_VERSION; + + /* UVP name */ + cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME); + + CL_TRACE(MLNX_DBG_TRACE, g_mlnx_dbg_lvl, ("UVP filename %s\n", p_interface->libname)); + + /* The real interface. */ + p_interface->open_ca = mlnx_open_ca; + p_interface->query_ca = mlnx_query_ca; + p_interface->modify_ca = mlnx_modify_ca; // ++ + p_interface->close_ca = mlnx_close_ca; + p_interface->um_open_ca = mlnx_um_open; + p_interface->um_close_ca = mlnx_um_close; + + p_interface->allocate_pd = mlnx_allocate_pd; + p_interface->deallocate_pd = mlnx_deallocate_pd; + + p_interface->create_av = mlnx_create_av; + p_interface->query_av = mlnx_query_av; + p_interface->modify_av = mlnx_modify_av; + p_interface->destroy_av = mlnx_destroy_av; + + p_interface->create_qp = mlnx_create_qp; + p_interface->create_spl_qp = mlnx_create_spl_qp; + p_interface->modify_qp = mlnx_modify_qp; + p_interface->query_qp = mlnx_query_qp; + p_interface->destroy_qp = mlnx_destroy_qp; + + p_interface->create_cq = mlnx_create_cq; + p_interface->resize_cq = mlnx_resize_cq; + p_interface->query_cq = mlnx_query_cq; + p_interface->destroy_cq = mlnx_destroy_cq; + + p_interface->local_mad = mlnx_local_mad; + + p_interface->vendor_call = fw_access_ctrl; + + mlnx_memory_if(p_interface); + mlnx_direct_if(p_interface); + mlnx_mcast_if(p_interface); + + + return; +} + +#if 0 +CL_TRACE(CL_DBG_ERROR, g_mlnx_dbg_lvl, ("file %s line %d\n", __FILE__, __LINE__)); +#endif diff --git a/branches/Ndi/hw/mt23108/kernel/infinihost.inf b/branches/Ndi/hw/mt23108/kernel/infinihost.inf new file mode 100644 index 00000000..d6080b52 --- /dev/null +++ b/branches/Ndi/hw/mt23108/kernel/infinihost.inf @@ -0,0 +1,191 @@ +; Mellanox Technologies InfiniBand HCAs. +; Copyright 2005 SilverStorm Technologies all Rights Reserved. + +[Version] +Signature="$Windows NT$" +Class=InfiniBandHca +ClassGUID={58517E00-D3CF-40c9-A679-CEE5752F4491} +Provider=%OPENIB% +DriverVer=03/08/2006,1.0.0000.614 + +; ================= Destination directory section ===================== + +[DestinationDirs] +DefaultDestDir=%DIRID_DRIVERS% +ClassCopyFiles=%DIRID_SYSTEM% +MT23108.UMCopyFiles=%DIRID_SYSTEM% +MT23108.WOW64CopyFiles=%DIRID_SYSTEM_X86% + +; ================= Class Install section ===================== + +[ClassInstall32] +CopyFiles=ClassCopyFiles +AddReg=ClassAddReg + +[ClassCopyFiles] +IbInstaller.dll + +[ClassAddReg] +HKR,,,,"InfiniBand Host Channel Adapters" +HKR,,Icon,,-5 +HKR,,SilentInstall,,1 +HKLM,"System\CurrentControlSet\Control\CoDeviceInstallers", \ + {58517E00-D3CF-40c9-A679-CEE5752F4491},%REG_MULTI_SZ_APPEND%, \ + "IbInstaller.dll,IbCoInstaller" + +; ================= Device Install section ===================== + +[SourceDisksNames.x86] +1=%DiskId%,,,\x86 + +[SourceDisksNames.amd64] +1=%DiskId%,,,\amd64 + +[SourceDisksNames.ia64] +1=%DiskId%,,,\ia64 + +[SourceDisksFiles] +IbInstaller.dll=1 +mt23108.sys=1 +thca.sys=1 +mt23108u.dll=1 +mt23108ud.dll=1 + +[SourceDisksFiles.amd64] +IbInstaller.dll=1 +mt23108.sys=1 +thca.sys=1 +mt23108u.dll=1 +mt23108ud.dll=1 +mtuvp32.dll=1 +mtuvp32d.dll=1 + +[SourceDisksFiles.ia64] +IbInstaller.dll=1 +ibal.sys=1 +mt23108.sys=1 +thca.sys=1 +mt23108u.dll=1 +mt23108ud.dll=1 +mtuvp32.dll=1 +mtuvp32d.dll=1 + +[Manufacturer] +%MTL% = HCA.DeviceSection,ntx86,ntamd64,ntia64 + +[HCA.DeviceSection] +; empty since we don't support W9x/Me + +[HCA.DeviceSection.ntx86] +%MT23108.DeviceDesc% = MT23108.DDInstall,PCI\VEN_15B3&DEV_5A44 +%MT25208.DeviceDesc% = MT23108.DDInstall,PCI\VEN_15B3&DEV_6278 + +[HCA.DeviceSection.ntamd64] +%MT23108.DeviceDesc% = MT23108.DDInstall,PCI\VEN_15B3&DEV_5A44 +%MT25208.DeviceDesc% = MT23108.DDInstall,PCI\VEN_15B3&DEV_6278 + +[HCA.DeviceSection.ntia64] +%MT23108.DeviceDesc% = MT23108.DDInstall,PCI\VEN_15B3&DEV_5A44 +%MT25208.DeviceDesc% = MT23108.DDInstall,PCI\VEN_15B3&DEV_6278 + +[MT23108.DDInstall.ntx86] +CopyFiles = MT23108.CopyFiles +CopyFiles = MT23108.UMCopyFiles + +[MT23108.DDInstall.ntamd64] +CopyFiles = MT23108.CopyFiles +CopyFiles = MT23108.UMCopyFiles +CopyFiles = MT23108.WOW64CopyFiles + +[MT23108.DDInstall.ntia64] +CopyFiles = MT23108.CopyFiles +CopyFiles = MT23108.UMCopyFiles +CopyFiles = MT23108.WOW64CopyFiles + +[MT23108.DDInstall.ntx86.HW] +AddReg = MT23108.FiltersReg + +[MT23108.DDInstall.ntamd64.HW] +AddReg = MT23108.FiltersReg + +[MT23108.DDInstall.ntia64.HW] +AddReg = MT23108.FiltersReg + +[MT23108.DDInstall.ntx86.Services] +AddService = thca,%SPSVCINST_NULL%,THCA.ServiceInstall +AddService = mt23108,%SPSVCINST_ASSOCSERVICE%,MT23108.ServiceInstall + +[MT23108.DDInstall.ntamd64.Services] +AddService = thca,%SPSVCINST_NULL%,THCA.ServiceInstall +AddService = mt23108,%SPSVCINST_ASSOCSERVICE%,MT23108.ServiceInstall + +[MT23108.DDInstall.ntia64.Services] +AddService = thca,%SPSVCINST_NULL%,THCA.ServiceInstall +AddService = mt23108,%SPSVCINST_ASSOCSERVICE%,MT23108.ServiceInstall + +[MT23108.CopyFiles] +mt23108.sys +thca.sys + +[MT23108.UMCopyFiles] +mt23108u.dll,,,2 +mt23108ud.dll,,,2 + +[MT23108.WOW64CopyFiles] +mt23108u.dll,mtuvp32.dll,,2 +mt23108ud.dll,mtuvp32d.dll,,2 + +; +; ============= Service Install section ============== +; + +[MT23108.ServiceInstall] +DisplayName = %MT23108.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\mt23108.sys +LoadOrderGroup = extended base +AddReg = MT23108.ParamsReg + +[THCA.ServiceInstall] +DisplayName = %THCA.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\thca.sys +LoadOrderGroup = extended base +AddReg = THCA.ParamsReg + +[MT23108.FiltersReg] +HKR,,"UpperFilters", 0x00010000,"thca" + +[MT23108.ParamsReg] +HKR,"Parameters","DebugLevel",%REG_DWORD%,2 +HKR,"Parameters","ConfAddr",%REG_DWORD%,88 +HKR,"Parameters","ConfData",%REG_DWORD%,92 +HKR,"Parameters","DdrMapOffset",%REG_DWORD%,0x100000 +HKR,"Parameters","DdrMapSize",%REG_DWORD%,0x1600000 +HKR,"Parameters","ResetCard",%REG_DWORD%,0 + +[THCA.ParamsReg] +HKR,"Parameters","DebugFlags",%REG_DWORD%,0x80000000 + +[Strings] +OPENIB = "OpenIB Alliance" +MTL = "Mellanox Technologies Ltd." +MT23108.ServiceDesc = "Mellanox MT23108 InfiniBand HCA Driver" +MT23108.DeviceDesc = "InfiniHost (MT23108) - Mellanox InfiniBand HCA [MT23108 Driver]" +MT25208.DeviceDesc = "InfiniHost (MT25208) - Mellanox InfiniBand HCA for PCI Express [MT23108 Driver]" +THCA.ServiceDesc = "Mellanox HCA VPD for IBAL" +DiskId = "OpenIB InfiniBand HCA installation disk" +SPSVCINST_NULL = 0x0 +SPSVCINST_ASSOCSERVICE = 0x00000002 +SERVICE_KERNEL_DRIVER = 1 +SERVICE_DEMAND_START = 3 +SERVICE_ERROR_NORMAL = 1 +REG_DWORD = 0x00010001 +REG_MULTI_SZ_APPEND = 0x00010008 +DIRID_SYSTEM = 11 +DIRID_DRIVERS = 12 +DIRID_SYSTEM_X86 = 16425 diff --git a/branches/Ndi/hw/mt23108/user/Makefile b/branches/Ndi/hw/mt23108/user/Makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/hw/mt23108/user/SOURCES b/branches/Ndi/hw/mt23108/user/SOURCES new file mode 100644 index 00000000..4740ea9c --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/SOURCES @@ -0,0 +1,63 @@ +!if $(FREEBUILD) +TARGETNAME=mt23108u +!else +TARGETNAME=mt23108ud +!endif +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=DYNLINK +DLLDEF=$(O)\uvpd_exports.def +USE_NTDLL=1 +DLLENTRY=DllMain + +SOURCES= \ + uvpd.rc \ + mlnx_ual_av.c \ + mlnx_ual_ca.c \ + mlnx_ual_cq.c \ + mlnx_ual_main.c \ + mlnx_ual_mcast.c \ + mlnx_ual_mrw.c \ + mlnx_ual_osbypass.c \ + mlnx_ual_pd.c \ + mlnx_ual_qp.c + +MT_HOME=..\vapi + +INCLUDES= \ + .\; \ + ..\..\..\inc; \ + ..\..\..\inc\user; \ + $(MT_HOME)\mlxsys\tools; \ + $(MT_HOME)\Hca\verbs; \ + $(MT_HOME)\Hca\hcahal; \ + $(MT_HOME)\mlxsys\mtl_common; \ + $(MT_HOME)\mlxsys\mtl_common\os_dep\win; \ + $(MT_HOME)\mlxsys\mosal; \ + $(MT_HOME)\mlxsys\mosal\os_dep\win; \ + $(MT_HOME)\mlxsys\mtl_types; \ + $(MT_HOME)\mlxsys\mtl_types\win; \ + $(MT_HOME)\mlxsys\mtl_types\win\win; \ + $(MT_HOME)\mlxsys\mtl_types\win\win32; \ + $(MT_HOME)\Hca\hcahal\tavor; \ + $(MT_HOME)\Hca\hcahal\tavor\util; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_hob; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_pdm; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_cqm; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_qpm; \ + $(MT_HOME)\Hca\hcahal\tavor\thhul_mwm; + +USER_C_FLAGS=$(USER_C_FLAGS) /DIVAPI_THH /DCL_NO_TRACK_MEM + +TARGETLIBS=\ + $(TARGETPATH)\*\vapi.lib \ + $(SDK_LIB_PATH)\user32.lib \ + $(SDK_LIB_PATH)\kernel32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/hw/mt23108/user/hca_data.h b/branches/Ndi/hw/mt23108/user/hca_data.h new file mode 100644 index 00000000..8a53c5df --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/hca_data.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include + + +typedef struct _ib_ca +{ + HH_hca_dev_t * __ptr64 p_hca_ul_info; + void *p_hca_ul_resources; + ib_ca_attr_t *p_hca_attr; + HHUL_hca_hndl_t hhul_hca_hndl; + u_int32_t priv_op; + void *p_al_ci_ca; + +} mlnx_ual_hobul_t; + + +typedef struct _ib_pd +{ + mlnx_ual_hobul_t *p_hobul; + void *p_pd_ul_resources; + HHUL_pd_hndl_t hhul_pd_hndl; + u_int32_t pd_idx; + +#define MLNX_MAX_AVS_PER_PD 0xFFFFFFFF + +} mlnx_ual_pd_info_t; + + +typedef struct _ib_cq +{ + mlnx_ual_hobul_t *p_hobul; + void *p_cq_ul_resources; + HHUL_cq_hndl_t hhul_cq_hndl; + u_int32_t cq_idx; + u_int32_t cq_size; + +} mlnx_ual_cq_info_t; + + +typedef struct _ib_qp +{ + ib_pd_handle_t h_uvp_pd; + void *p_qp_ul_resources; + HHUL_qp_hndl_t hhul_qp_hndl; + u_int32_t qp_idx; + VAPI_qp_cap_t ul_qp_cap; + IB_ts_t type; + +} mlnx_ual_qp_info_t; + + +typedef struct _ib_mw +{ + ib_pd_handle_t h_uvp_pd; + u_int32_t rkey; + HHUL_mw_hndl_t hhul_mw_hndl; + +} mlnx_ual_mw_info_t; + + +typedef struct _ib_av +{ + ib_pd_handle_t h_uvp_pd; + ib_av_attr_t *p_i_av_attr; + HHUL_ud_av_hndl_t h_av; + +} mlnx_ual_av_info_t; diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_av.c b/branches/Ndi/hw/mt23108/user/mlnx_ual_av.c new file mode 100644 index 00000000..6a82cd41 --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_av.c @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" + +extern u_int32_t mlnx_dbg_lvl; + +void +mlnx_get_av_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + + CL_ASSERT(p_uvp); + + /* + * Address Vector Management Verbs + */ + p_uvp->pre_create_av = mlnx_pre_create_av; + p_uvp->post_create_av = mlnx_post_create_av; + p_uvp->pre_query_av = mlnx_pre_query_av; + p_uvp->post_query_av = mlnx_post_query_av; + p_uvp->pre_modify_av = mlnx_pre_modify_av; + p_uvp->post_modify_av = mlnx_post_modify_av; + p_uvp->pre_destroy_av = mlnx_pre_destroy_av; + p_uvp->post_destroy_av = mlnx_post_destroy_av; + +} + + +u_int8_t +gid_to_index_lookup ( + IN ib_ca_attr_t *p_ca_attr, + IN u_int8_t port_num, + IN u_int8_t *raw_gid) +{ + ib_gid_t *p_gid_table = NULL; + u_int8_t i, index = 0; + u_int16_t num_gids; + + p_gid_table = p_ca_attr->p_port_attr[port_num].p_gid_table; + CL_ASSERT (p_gid_table); + + num_gids = p_ca_attr->p_port_attr[port_num].num_gids; + CL_TRACE (MLNX_TRACE_LVL_6, mlnx_dbg_lvl, + ("Port %d has %d gids\n", port_num, num_gids)); + + for (i = 0; i < num_gids; i++) + { + if (cl_memcmp (raw_gid, p_gid_table[i].raw, sizeof (ib_gid_t))) + { + CL_TRACE (MLNX_TRACE_LVL_6, mlnx_dbg_lvl, + ("found GID at index %d\n", i)); + index = i; + break; + } + } + return index; +} + + +void +map_itom_av_attr ( + IN ib_ca_attr_t *p_ca_attr, + IN const ib_av_attr_t *p_av_attr, + OUT VAPI_ud_av_t *p_hhul_av) +{ + u_int8_t ver; + u_int8_t tclass; + u_int32_t flow_lbl; + + p_hhul_av->sl = p_av_attr->sl; + p_hhul_av->port = p_av_attr->port_num; + p_hhul_av->dlid = CL_NTOH16 (p_av_attr->dlid); + /* + * VAPI uses static rate as IPD. + * 0 is matched links. 3 is suitable for 4x to 1x. + */ + p_hhul_av->static_rate = + (p_av_attr->static_rate == IB_PATH_RECORD_RATE_10_GBS? 0 : 3); + + p_hhul_av->src_path_bits = 0; + /* p_hhul_av->src_path_bits = p_av_attr->path_bits; */ + CL_TRACE (MLNX_TRACE_LVL_6, mlnx_dbg_lvl, + ("ib_av_attr->path_bits %d\n", p_av_attr->path_bits)); + p_hhul_av->grh_flag = (MT_bool)p_av_attr->grh_valid; + + if (p_av_attr->grh_valid) + { + ib_grh_get_ver_class_flow (p_av_attr->grh.ver_class_flow, + &ver, &tclass, &flow_lbl); + + p_hhul_av->hop_limit = p_av_attr->grh.hop_limit; + p_hhul_av->sgid_index = + gid_to_index_lookup (p_ca_attr, + p_av_attr->port_num, + (u_int8_t *) p_av_attr->grh.src_gid.raw); + + cl_memcpy (p_hhul_av->dgid, p_av_attr->grh.dest_gid.raw, + sizeof (IB_gid_t)); + + p_hhul_av->traffic_class = tclass; + p_hhul_av->flow_label = flow_lbl; + } +} + +ib_api_status_t +mlnx_pre_create_av ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_av_attr_t *p_av_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status = IB_VERBS_PROCESSING_DONE; + + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + /* + * Saving the av_attribute for the post to create the av + */ + p_umv_buf->input_size = p_umv_buf->output_size = sizeof (ib_av_attr_t); + p_umv_buf->p_inout_buf = cl_zalloc (p_umv_buf->input_size); + + if (p_umv_buf->p_inout_buf == NULL) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("FAILED to create priv buffer\n")); + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + cl_memcpy (p_umv_buf->p_inout_buf, p_av_attr, sizeof (ib_av_attr_t)); + /* + * We are going to create the AV entirely in user mode. + * To signal the AL about that we return IB_VERBS_PROCESSING_DONE + */ +cleanup: + FUNC_EXIT; + return status; +} + + +void +mlnx_post_create_av ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_av_handle_t *ph_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status; + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void*)h_uvp_pd); + ib_av_attr_t *p_av_attr; + VAPI_ud_av_t hhul_av; + mlnx_ual_av_info_t *p_new_av = NULL; + + FUNC_ENTER; + CL_ASSERT (p_pd_info); + CL_ASSERT (p_umv_buf); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT (p_hobul); + + /* + * Set initial value for handle + */ + *ph_uvp_av = NULL; + + status = ioctl_status; + + if (IB_SUCCESS == status) + { + if (sizeof (ib_av_attr_t) != p_umv_buf->output_size) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Priv buffer has different size\n")); + status = IB_ERROR; + goto cleanup; + } + p_av_attr = (ib_av_attr_t *) p_umv_buf->p_inout_buf; + CL_ASSERT (p_av_attr); + + p_new_av = cl_zalloc (sizeof (mlnx_ual_av_info_t)); + + map_itom_av_attr (p_hobul->p_hca_attr, p_av_attr, &hhul_av); + + if (HH_OK != + THHUL_pdm_create_ud_av (p_hobul->hhul_hca_hndl, + p_pd_info->hhul_pd_hndl, + &hhul_av, + &p_new_av->h_av)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("FAILED to create usermode UD AV\n")); + status = IB_ERROR; + goto cleanup; + } + + p_new_av->p_i_av_attr = p_av_attr; + p_new_av->h_uvp_pd = h_uvp_pd; + *ph_uvp_av = p_new_av; + p_umv_buf->p_inout_buf = NULL; + } + + /* + * clean_up if required + */ +cleanup: + if ((IB_SUCCESS != status) && (IB_SUCCESS == ioctl_status)) + { + if (p_new_av) + { + if (p_new_av->p_i_av_attr); + { + cl_free (p_new_av->p_i_av_attr); + } + cl_free (p_new_av); + } + } + + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_query_av ( + IN const ib_av_handle_t h_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + FUNC_ENTER; + FUNC_EXIT; + return IB_VERBS_PROCESSING_DONE; +} + + +void +mlnx_post_query_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ib_av_attr_t *p_addr_vector, + IN OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status; + mlnx_ual_av_info_t *p_av_info = (mlnx_ual_av_info_t *)((void*) h_uvp_av); + + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + CL_ASSERT(p_av_info); + + status = ioctl_status; + + if (status == IB_SUCCESS) + { + cl_memcpy (p_addr_vector, p_av_info->p_i_av_attr, sizeof (ib_av_attr_t)); + status = IB_VERBS_PROCESSING_DONE; + } + + FUNC_EXIT; +} + + +ib_api_status_t +mlnx_pre_modify_av ( + IN const ib_av_handle_t h_uvp_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status = IB_VERBS_PROCESSING_DONE; + + mlnx_ual_av_info_t *p_av_info = (mlnx_ual_av_info_t *)((void*) h_uvp_av); + mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + VAPI_ud_av_t hhul_av; + + FUNC_ENTER; + CL_ASSERT (p_umv_buf); + + p_pd_info = (mlnx_ual_pd_info_t *)((void*) p_av_info->h_uvp_pd); + CL_ASSERT (p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT (p_hobul); + + map_itom_av_attr (p_hobul->p_hca_attr, p_addr_vector, &hhul_av); + + if (HH_OK != + THHUL_pdm_modify_ud_av (p_hobul->hhul_hca_hndl, + p_av_info->h_av, + &hhul_av)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to modify AV\n")); + status = IB_ERROR; + } + else + { + cl_memcpy (p_av_info->p_i_av_attr, p_addr_vector, sizeof (ib_av_attr_t)); + } + + FUNC_EXIT; + + return status; +} + + +void +mlnx_post_modify_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_destroy_av ( + IN const ib_av_handle_t h_uvp_av) +{ + FUNC_ENTER; + FUNC_EXIT; + return IB_VERBS_PROCESSING_DONE; +} + + +void +mlnx_post_destroy_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status) +{ + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_av_info_t *p_av_info = (mlnx_ual_av_info_t *)((void*) h_uvp_av); + + FUNC_ENTER; + CL_ASSERT (p_av_info); + + p_pd_info = (mlnx_ual_pd_info_t *)((void*) p_av_info->h_uvp_pd); + CL_ASSERT (p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT (p_hobul); + + if (HH_OK != + THHUL_pdm_destroy_ud_av (p_hobul->hhul_hca_hndl, + p_av_info->h_av)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to destroy av\n")); + } + + /* + * We still have to clean resources even THHUL failed + */ + if (p_av_info->p_i_av_attr); + { + cl_free (p_av_info->p_i_av_attr); + } + cl_free (p_av_info); + + FUNC_EXIT; + return; +} + + diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_ca.c b/branches/Ndi/hw/mt23108/user/mlnx_ual_ca.c new file mode 100644 index 00000000..09edb90d --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_ca.c @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" +//#include "hca_dev.h" + +extern u_int32_t mlnx_dbg_lvl; + +void +mlnx_get_ca_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + CL_ASSERT(p_uvp); + + /* + * HCA Access Verbs + */ + p_uvp->pre_open_ca = mlnx_pre_open_ca; + p_uvp->post_open_ca = mlnx_post_open_ca; + + + p_uvp->pre_query_ca = mlnx_pre_query_ca; + p_uvp->post_query_ca = mlnx_post_query_ca; + + p_uvp->pre_modify_ca = NULL; + p_uvp->post_modify_ca = NULL; + + p_uvp->pre_close_ca = mlnx_pre_close_ca; + p_uvp->post_close_ca = mlnx_post_close_ca; + +} + + + +ib_api_status_t +mlnx_pre_open_ca ( + IN const ib_net64_t ca_guid, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + FUNC_ENTER; + if( p_umv_buf ) + { + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_zalloc( sizeof(mlnx_ual_hobul_t) ); + if( !p_umv_buf->p_inout_buf ) + { + return IB_INSUFFICIENT_MEMORY; + } + } + p_umv_buf->input_size = p_umv_buf->output_size = sizeof(mlnx_ual_hobul_t); + p_umv_buf->command = TRUE; + } + FUNC_EXIT; + return IB_SUCCESS; +} + + +ib_api_status_t +mlnx_post_open_ca ( + IN const ib_net64_t ca_guid, + IN ib_api_status_t ioctl_status, + OUT ib_ca_handle_t *ph_uvp_ca, + IN ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status = ioctl_status; + mlnx_ual_hobul_t *new_ca = (mlnx_ual_hobul_t *)p_umv_buf->p_inout_buf; + + FUNC_ENTER; + + if (IB_SUCCESS == status) + { + *ph_uvp_ca = (ib_ca_handle_t)new_ca; + + /* + * hca_ul_info will be intialized now + */ + new_ca->p_hca_ul_resources = (new_ca->p_hca_ul_info + 1 ); + new_ca->p_hca_attr = NULL; + + /* Create user layer CA Object */ + if (HH_OK != + THHUL_hob_create(new_ca->p_hca_ul_resources, + new_ca->p_hca_ul_info->dev_id, + &new_ca->hhul_hca_hndl)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to create THHUL_hob object\n")); + status = IB_INSUFFICIENT_RESOURCES; + } + } + + /* + * Free resources for ERROR cases + * Clean-up if required + */ + if( status != IB_SUCCESS ) + cl_free( new_ca ); + + FUNC_EXIT; + return status; +} + +ib_api_status_t +mlnx_pre_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status = IB_SUCCESS; + + FUNC_ENTER; + + CL_ASSERT(h_uvp_ca); + + /* hca_ul_info should be filled up by open_ca() */ + if ( h_uvp_ca->p_hca_ul_info->status != HH_HCA_STATUS_OPENED ) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Device is not opened\n")); + status = IB_INVALID_CA_HANDLE; + return status; + } + /* + * First time call query_ca - populate our internal cached attributes + * so we can access the GID table. Note that query_ca calls *always* + * get their attributes from the kernel. + */ + if ( !h_uvp_ca->p_hca_attr ) + { + /* + * Assume if user buffer is valid then byte_cnt is valid too + * so we can preallocate ca attr buffer for post ioctl data saving + * + * Note that we squirel the buffer away into the umv_buf and only + * set it into the HCA if the query is successful. + */ + if ( p_ca_attr != NULL ) + { + p_umv_buf->p_inout_buf = cl_zalloc(byte_count); + if ( !p_umv_buf->p_inout_buf ) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to alloc new_ca\n")); + status = IB_INSUFFICIENT_RESOURCES; + return status; + } + } + p_umv_buf->input_size = p_umv_buf->output_size = 0; + } + + FUNC_EXIT; + return status; +} + + +void +mlnx_post_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ) +{ + FUNC_ENTER; + + CL_ASSERT(h_uvp_ca); + CL_ASSERT(p_umv_buf); + + if ( ioctl_status == IB_SUCCESS && p_ca_attr && + byte_count && !h_uvp_ca->p_hca_attr ) + { + CL_ASSERT( byte_count >= p_ca_attr->size ); + h_uvp_ca->p_hca_attr = p_umv_buf->p_inout_buf; + ib_copy_ca_attr( h_uvp_ca->p_hca_attr, p_ca_attr ); + } + else if (p_umv_buf->p_inout_buf) + { + cl_free (p_umv_buf->p_inout_buf); + } + + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_modify_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN uint8_t port_num, + IN ib_ca_mod_t ca_mod, + IN const ib_port_attr_mod_t* p_port_attr_mod) +{ + FUNC_ENTER; + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_modify_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status) +{ + FUNC_ENTER; + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_close_ca ( + IN ib_ca_handle_t h_uvp_ca) +{ + FUNC_ENTER; + FUNC_EXIT; + return IB_SUCCESS; +} + + +ib_api_status_t +mlnx_post_close_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status ) +{ + mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void*)h_uvp_ca); + + FUNC_ENTER; + + CL_ASSERT(p_hobul); + + if (p_hobul->hhul_hca_hndl) + { + if (HH_OK != THHUL_hob_destroy (p_hobul->hhul_hca_hndl)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to destroy THHUL_hob object\n")); + return IB_SUCCESS; + } + } + + if (p_hobul->p_hca_attr) + { + cl_free( p_hobul->p_hca_attr); + p_hobul->p_hca_attr = NULL; + } + cl_free(p_hobul); + + FUNC_EXIT; + return IB_SUCCESS; +} + diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_cq.c b/branches/Ndi/hw/mt23108/user/mlnx_ual_cq.c new file mode 100644 index 00000000..083bec97 --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_cq.c @@ -0,0 +1,494 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" + +extern u_int32_t mlnx_dbg_lvl; + +void +mlnx_get_cq_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + FUNC_ENTER; + + CL_ASSERT(p_uvp); + + /* + * Completion Queue Management Verbs + */ + p_uvp->pre_create_cq = mlnx_pre_create_cq; + p_uvp->post_create_cq = mlnx_post_create_cq; + + p_uvp->pre_query_cq = mlnx_pre_query_cq; + p_uvp->post_query_cq = NULL; + + p_uvp->pre_resize_cq = mlnx_pre_resize_cq; + p_uvp->post_resize_cq = mlnx_post_resize_cq; + + p_uvp->pre_destroy_cq = mlnx_pre_destroy_cq; + p_uvp->post_destroy_cq = mlnx_post_destroy_cq; + + FUNC_EXIT; +} + + +ib_api_status_t +mlnx_pre_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status = IB_SUCCESS; + HH_ret_t hh_ret = HH_OK; + mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void*)h_uvp_ca); + + mlnx_ual_cq_info_t *p_new_cq = NULL; + size_t size; + + FUNC_ENTER; + + CL_ASSERT(p_umv_buf); + + CL_ASSERT(p_hobul); + + do + { + /* CA should be initialized */ + if (!p_hobul->p_hca_ul_info) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("INVALID hca_ul_info buffer\n")); + status = IB_INVALID_CA_HANDLE; + break; + } + + if (!p_hobul->p_hca_ul_resources) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("INVALID hca_ul_resources buffer\n")); + status = IB_RESOURCE_BUSY; + break; + } + + p_new_cq = cl_zalloc (sizeof(mlnx_ual_cq_info_t)); + if (!p_new_cq) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed alloc new CQ\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + + p_new_cq->p_cq_ul_resources = + cl_zalloc(p_hobul->p_hca_ul_info->cq_ul_resources_sz); + if (!p_new_cq->p_cq_ul_resources) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed alloc new CQ UL resources\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + + hh_ret = THHUL_cqm_create_cq_prep (p_hobul->hhul_hca_hndl, + *p_size, + &p_new_cq->hhul_cq_hndl, + &p_new_cq->cq_size, + p_new_cq->p_cq_ul_resources); + if (hh_ret != HH_OK) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Calling THHUL_cqm_create_cq_prep Failed\n")); + status = IB_RESOURCE_BUSY; + break; + } + + if (*p_size != p_new_cq->cq_size) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("The created cq_size %d different than *p_size %d\n", + p_new_cq->cq_size, *p_size)); + cl_memcpy ((void *)p_size, &p_new_cq->cq_size, sizeof (p_new_cq->cq_size)); + } + + /* + * Store the parent PD of this CQ + */ + p_new_cq->p_hobul = p_hobul; + + size = p_hobul->p_hca_ul_info->cq_ul_resources_sz + + sizeof (u_int32_t) + sizeof (mlnx_ual_cq_info_t *); + p_umv_buf->p_inout_buf = cl_zalloc(size); + + if (!p_umv_buf->p_inout_buf) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed alloc user private buffer\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + /* + * We only set the input_size up to qp_ul_resources_sz + sizeof (qp_idx) + * The rest of the buffer we store the pointer to our allocated + * qp_info struct in order to retrieve it later in the post. + */ + p_umv_buf->input_size = p_umv_buf->output_size = + (uint32_t)size - sizeof (mlnx_ual_cq_info_t *); + + cl_memcpy (p_umv_buf->p_inout_buf, + p_new_cq->p_cq_ul_resources, + p_hobul->p_hca_ul_info->cq_ul_resources_sz); + /* + * Store the pointer of our qp_info struct to inout_buf and retrieve + * it later in the post + */ + cl_memcpy (((u_int8_t *)p_umv_buf->p_inout_buf + size - + sizeof (mlnx_ual_cq_info_t *)), + &p_new_cq, + sizeof (mlnx_ual_cq_info_t *)); + p_umv_buf->command = TRUE; + } while (0); + + /* + * clean_up if required + */ + if (IB_SUCCESS != status) + { + if (p_new_cq) + { + if (p_new_cq->p_cq_ul_resources); + { + cl_free (p_new_cq->p_cq_ul_resources); + } + if (hh_ret == HH_OK && p_new_cq->hhul_cq_hndl ) + { + THHUL_cqm_destroy_cq_done (p_hobul->hhul_hca_hndl, + p_new_cq->hhul_cq_hndl); + } + if (p_umv_buf->p_inout_buf) + { + cl_free ( p_umv_buf->p_inout_buf); + } + cl_free (p_new_cq); + } + } + + FUNC_EXIT; + return status; +} + + +void +mlnx_post_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + OUT ib_cq_handle_t *ph_uvp_cq, + IN ci_umv_buf_t *p_umv_buf ) +{ + mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void*)h_uvp_ca); + MT_size_t buf_size = 0; + mlnx_ual_cq_info_t *p_new_cq = NULL; + + + FUNC_ENTER; + + CL_ASSERT(p_umv_buf); + + CL_ASSERT(p_hobul); + + buf_size = p_hobul->p_hca_ul_info->cq_ul_resources_sz + + sizeof (u_int32_t) + sizeof (mlnx_ual_cq_info_t *); + + /* Retrieve our cq_info back from priv buffer */ + cl_memcpy (&p_new_cq, + ((u_int8_t *)p_umv_buf->p_inout_buf + buf_size - + sizeof (mlnx_ual_cq_info_t *)), + sizeof (mlnx_ual_cq_info_t *)); + CL_ASSERT(p_new_cq); + + *ph_uvp_cq = p_new_cq; + + if ( ioctl_status == IB_SUCCESS ) + { + if (IB_SUCCESS != p_umv_buf->status) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad status %ld\n", p_umv_buf->status)); + goto err; + } + else if ((buf_size - sizeof (mlnx_ual_cq_info_t *)) != + p_umv_buf->output_size) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad user priv buffer size (exp - 4) = %d, res = %ld\n", + buf_size, p_umv_buf->output_size)); + goto err; + } + + cl_memcpy (p_new_cq->p_cq_ul_resources, + p_umv_buf->p_inout_buf, + p_hobul->p_hca_ul_info->cq_ul_resources_sz); + + cl_memcpy (&p_new_cq->cq_idx, + ((u_int8_t *)p_umv_buf->p_inout_buf + + p_hobul->p_hca_ul_info->cq_ul_resources_sz), + sizeof (u_int32_t)); + + if (HH_OK != + THHUL_cqm_create_cq_done (p_hobul->hhul_hca_hndl, + p_new_cq->hhul_cq_hndl, + p_new_cq->cq_idx, + p_new_cq->p_cq_ul_resources)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Call THHUL_cqm_create_cq_done Failed\n")); + goto err; + } + + CL_TRACE (MLNX_TRACE_LVL_7, mlnx_dbg_lvl, + ("Newly created CQ cq_idx 0x%x (CQ size %d)\n",p_new_cq->cq_idx, p_new_cq->cq_size)); + } + else + { +err: + if (p_new_cq->p_cq_ul_resources) + cl_free (p_new_cq->p_cq_ul_resources); + + cl_free (p_new_cq); + *ph_uvp_cq = NULL; + } + + cl_free (p_umv_buf->p_inout_buf); + p_umv_buf->p_inout_buf = NULL; + + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_resize_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_uvp_cq); + mlnx_ual_hobul_t *p_hobul; + ib_api_status_t status = IB_SUCCESS; + void *p_cq_ul_resources; + + FUNC_ENTER; + + p_hobul = p_cq_info->p_hobul; + + CL_ASSERT(p_umv_buf); + CL_ASSERT(p_hobul); + + do + { + p_cq_ul_resources = + cl_zalloc (p_hobul->p_hca_ul_info->cq_ul_resources_sz); + if (!p_cq_ul_resources) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to alloc cq_ul_res\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + + CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, + ("Before resize_cq_prep *p_size = %d\n", *p_size)); + status = THHUL_cqm_resize_cq_prep ( p_hobul->hhul_hca_hndl, + p_cq_info->hhul_cq_hndl, + *p_size, &p_cq_info->cq_size, + p_cq_ul_resources); + if( status != IB_SUCCESS ) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("THHUL_cqm_resize_cq_prep failed\n")); + break; + } + CL_TRACE (MLNX_TRACE_LVL_8, mlnx_dbg_lvl, + ("After resize_cq_prep *p_size = %d\n", *p_size)); + + p_umv_buf->p_inout_buf = p_cq_ul_resources; + p_umv_buf->input_size = (uint32_t)p_hobul->p_hca_ul_info->cq_ul_resources_sz; + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->command = TRUE; + + } while (0); + + FUNC_EXIT; + return status; +} + + +void +mlnx_post_resize_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status; + mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_uvp_cq); + //mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + + FUNC_ENTER; + CL_ASSERT (p_cq_info); + CL_ASSERT(p_umv_buf); + + //p_pd_info = (mlnx_ual_pd_info_t *) p_cq_info->h_uvp_pd; + //CL_ASSERT (p_pd_info); + + p_hobul = p_cq_info->p_hobul; + CL_ASSERT (p_hobul); + + status = ioctl_status; + + do + { + if (IB_SUCCESS == status) + { + if (IB_SUCCESS != p_umv_buf->status) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad status %ld\n", p_umv_buf->status)); + status = p_umv_buf->status; + break; + } + else if (p_umv_buf->output_size != + (p_hobul->p_hca_ul_info->cq_ul_resources_sz) ) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad priv buf size %ld\n", p_umv_buf->output_size)); + status = IB_ERROR; + break; + } + + if (HH_OK != THHUL_cqm_resize_cq_done ( + p_hobul->hhul_hca_hndl, + p_cq_info->hhul_cq_hndl, + p_umv_buf->p_inout_buf)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("THHUL_cqm_resize_cq_done failed\n")); + status = IB_ERROR; + break; + } + } + + } while (0); + + if (IB_SUCCESS != status) + { + /* + * Undo resize + */ + if (HH_OK != THHUL_cqm_resize_cq_done ( + p_hobul->hhul_hca_hndl, + p_cq_info->hhul_cq_hndl, + NULL)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to UNDO resize (serious)\n")); + } + } + + cl_free (p_umv_buf->p_inout_buf); + + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_query_cq ( + IN const ib_cq_handle_t h_uvp_cq, + OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_uvp_cq); + + FUNC_ENTER; + + *p_size = p_cq_info->cq_size; + + FUNC_EXIT; + return IB_VERBS_PROCESSING_DONE; +} + + +ib_api_status_t +mlnx_pre_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq) +{ + FUNC_ENTER; + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status) +{ + mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *) ((void*)h_uvp_cq); + //mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + + FUNC_ENTER; + CL_ASSERT (p_cq_info); + + //p_pd_info = (mlnx_ual_pd_info_t *) p_cq_info->h_uvp_pd; + //CL_ASSERT (p_pd_info); + + p_hobul = p_cq_info->p_hobul; + CL_ASSERT (p_hobul); + + THHUL_cqm_destroy_cq_done (p_hobul->hhul_hca_hndl, + p_cq_info->hhul_cq_hndl); + + if (p_cq_info->p_cq_ul_resources) + { + cl_free (p_cq_info->p_cq_ul_resources ); + p_cq_info->p_cq_ul_resources = NULL; + } + cl_free (p_cq_info); + + FUNC_EXIT; + return; +} + + diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_main.c b/branches/Ndi/hw/mt23108/user/mlnx_ual_main.c new file mode 100644 index 00000000..d847f407 --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_main.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" + +u_int32_t mlnx_dbg_lvl = 0; // MLNX_TRACE_LVL_8; + + +static void uvp_init(); + + +extern BOOL APIENTRY +_DllMainCRTStartupForGS( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ); + + +BOOL APIENTRY +DllMain( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ) +{ + switch( ul_reason_for_call ) + { + case DLL_PROCESS_ATTACH: + if( !_DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ) ) + { + return FALSE; + } + + uvp_init(); + break; + + default: + return _DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ); + } + return TRUE; +} + + +/* + * UVP Shared Library Init routine +*/ + +static void +uvp_init() +{ +#define ENV_BUFSIZE 20 + TCHAR envstr[ENV_BUFSIZE]; + + /* + * Override default debugging level if environment variable is set + */ + if (( GetEnvironmentVariable("MLNX_TRACE_LVL", envstr, ENV_BUFSIZE)) != 0 ) { + switch(strtoul(envstr,NULL,0)) { + case 0: + mlnx_dbg_lvl=0; + break; + case 1: + mlnx_dbg_lvl = MLNX_TRACE_LVL_1; + break; + case 2: + mlnx_dbg_lvl = MLNX_TRACE_LVL_2; + break; + case 3: + mlnx_dbg_lvl = MLNX_TRACE_LVL_3; + break; + case 4: + mlnx_dbg_lvl = MLNX_TRACE_LVL_4; + break; + case 5: + mlnx_dbg_lvl = MLNX_TRACE_LVL_5; + break; + case 6: + mlnx_dbg_lvl = MLNX_TRACE_LVL_6; + break; + case 7: + mlnx_dbg_lvl = MLNX_TRACE_LVL_7; + break; + case 8: + mlnx_dbg_lvl = MLNX_TRACE_LVL_8; + break; + default: + mlnx_dbg_lvl = MLNX_TRACE_LVL_8; + } + } + /* + * Open the MOSAL device + */ + + //MOSAL_user_lib_init(); +} + +__declspec(dllexport) ib_api_status_t +uvp_get_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + FUNC_ENTER; + + CL_ASSERT(p_uvp); + /* + * Version of the header file this interface export can handle + */ + p_uvp->version = 0x100; + p_uvp->guid = 0x12345678; + + /* + * CA Management + */ + mlnx_get_ca_interface (p_uvp); + + /* + * Protection Domain + */ + mlnx_get_pd_interface (p_uvp); + + /* + * QP Management Verbs + */ + mlnx_get_qp_interface (p_uvp); + + /* + * Completion Queue Management Verbs + */ + mlnx_get_cq_interface (p_uvp); + + /* + * AV Management + */ + mlnx_get_av_interface(p_uvp); + + /* + * Memory Region / Window Management Verbs + */ + mlnx_get_mrw_interface (p_uvp); + + /* + * Multicast Support Verbs + */ + mlnx_get_mcast_interface (p_uvp); + + /* + * OS bypass (send, receive, poll/notify cq) + */ + mlnx_get_osbypass_interface(p_uvp); + + + /* + * Local MAD support, for HCA's that do not support + * Agents in the HW. + * ??? Do we need this for user-mode ??? + */ + + FUNC_EXIT; + return IB_SUCCESS; +} + diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_main.h b/branches/Ndi/hw/mt23108/user/mlnx_ual_main.h new file mode 100644 index 00000000..306588d9 --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_main.h @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __UAL_MAIN_H__ +#define __UAL_MAIN_H__ + +#include +#include +#include +#include +#include +#include +#include + +//#include +#include "hca_data.h" +#include +#include +#include +//#include + +/* + * Debug level + */ +#define MLNX_TRACE_LVL_8 0x000000ff +#define MLNX_TRACE_LVL_7 0x0000007f +#define MLNX_TRACE_LVL_6 0x0000003f +#define MLNX_TRACE_LVL_5 0x0000001f +#define MLNX_TRACE_LVL_4 0x0000000f +#define MLNX_TRACE_LVL_3 0x00000007 +#define MLNX_TRACE_LVL_2 0x00000003 +#define MLNX_TRACE_LVL_1 0x00000001 +#define MLNX_ENTER_EXIT MLNX_TRACE_LVL_8 + +#define FUNC_ENTER CL_ENTER(MLNX_ENTER_EXIT, mlnx_dbg_lvl) +#define FUNC_EXIT CL_EXIT(MLNX_ENTER_EXIT, mlnx_dbg_lvl) + +#define MAX_WRS_PER_CHAIN 16 +#define MAX_NUM_SGE 32 + +#define MLNX_SGE_SIZE 16 +#define MLNX_UAL_ALLOC_HCA_UL_RES 1 +#define MLNX_UAL_FREE_HCA_UL_RES 2 + +typedef unsigned __int3264 cl_dev_handle_t; + + +/* + * PROTOTYPES + */ + +/************* CA operations *************************/ +void +mlnx_get_ca_interface ( + IN OUT uvp_interface_t *p_uvp ); + + +ib_api_status_t +mlnx_pre_open_ca ( + IN const ib_net64_t ca_guid, + IN OUT ci_umv_buf_t *p_umv_buf); + + +ib_api_status_t +mlnx_post_open_ca ( + IN const ib_net64_t ca_guid, + IN ib_api_status_t ioctl_status, + OUT ib_ca_handle_t *ph_uvp_ca, + IN ci_umv_buf_t *p_umv_buf ); + + +ib_api_status_t +mlnx_pre_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ); + +void +mlnx_post_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_modify_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN uint8_t port_num, + IN ib_ca_mod_t modca_cmd, + IN const ib_port_attr_mod_t* p_port_attr_mod ); + +void +mlnx_post_modify_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status); + +ib_api_status_t +mlnx_pre_close_ca ( + IN ib_ca_handle_t h_uvp_ca ); + +ib_api_status_t +mlnx_post_close_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status ); + + +/************* PD Management *************************/ +void +mlnx_get_pd_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_allocate_pd ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_allocate_pd ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + OUT ib_pd_handle_t *ph_uvp_pd, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_deallocate_pd ( + IN const ib_pd_handle_t h_uvp_pd); + +void +mlnx_post_deallocate_pd ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status ); + + +/************* AV Management *************************/ +void +mlnx_get_av_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_create_av ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf); + + +void +mlnx_post_create_av ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_av_handle_t *ph_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_av ( + IN const ib_av_handle_t h_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlnx_post_query_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ib_av_attr_t *p_addr_vector, + IN OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_modify_av ( + IN const ib_av_handle_t h_uvp_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_modify_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_destroy_av ( + IN const ib_av_handle_t h_uvp_av); + +void +mlnx_post_destroy_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status); + + +/************* CQ Management *************************/ +void +mlnx_get_cq_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlnx_post_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + OUT ib_cq_handle_t *ph_uvp_cq, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_resize_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlnx_post_resize_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_cq ( + IN const ib_cq_handle_t h_uvp_cq, + OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq); + +void +mlnx_post_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status); + +/************* QP Management *************************/ +void +mlnx_get_qp_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_create_qp ( + IN const ib_pd_handle_t h_uvp_pd,// Fix me: if needed + IN const ib_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_qp_handle_t *ph_uvp_qp, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_qp_mod_t *p_modify_attr, // Fixme + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN ib_qp_attr_t *p_query_attr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp); + +void +mlnx_post_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status ); + +/************* MR/MW Management *************************/ +void +mlnx_get_mrw_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_register_mr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_create_t *p_mr_create, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_register_mr ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + OUT const ib_mr_handle_t *ph_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_query_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN ib_api_status_t ioctl_status, + IN const ib_mr_attr_t *p_mr_query, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_modify_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_pd_handle_t h_uvp_pd OPTIONAL, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t *p_mr_create OPTIONAL, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_modify_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_pd_handle_t h_uvp_pd OPTIONAL, + IN ib_api_status_t ioctl_status, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_register_smr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_access_t access_ctrl, + IN void *p_vaddr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_register_smr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_handle_t h_uvp_mr, + IN ib_api_status_t ioctl_status, + IN const void *p_vaddr, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + OUT const ib_mr_handle_t *ph_uvp_smr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_deregister_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_deregister_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_create_mw ( + IN const ib_pd_handle_t h_uvp_pd, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_create_mw ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN net32_t rkey, + OUT ib_mw_handle_t *ph_uvp_mw, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_query_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_query_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN ib_api_status_t ioctl_status, + IN net32_t rkey, + OUT ib_pd_handle_t *ph_pd, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_destroy_mw ( + IN const ib_mw_handle_t h_uvp_mw); + // IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_destroy_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN ib_api_status_t ioctl_status); + + +/************* MCAST Management *************************/ +void +mlnx_get_mcast_interface ( + IN OUT uvp_interface_t *p_uvp ); + + +ib_api_status_t +mlnx_pre_attach_mcast ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_gid_t *p_mcast_gid, + IN const uint16_t mcast_lid, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_attach_mcast ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + OUT ib_mcast_handle_t *ph_mcast, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_detach_mcast ( + IN ib_mcast_handle_t h_uvp_mcast, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_detach_mcast ( + IN ib_mcast_handle_t h_uvp_mcast, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf); + + +/************* OS BYPASS Management *************************/ +void +mlnx_get_osbypass_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_post_send ( + IN const void* __ptr64 h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t** pp_send_failure ); + +ib_api_status_t +mlnx_post_recv ( + IN const void* __ptr64 h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ); + +ib_api_status_t +mlnx_bind_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN const ib_qp_handle_t h_uvp_qp, + IN ib_bind_wr_t *p_mw_bind, + OUT net32_t* const p_rkey ); + +ib_api_status_t +mlnx_poll_cq ( + IN const void* __ptr64 h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); + +ib_api_status_t +mlnx_enable_cq_notify ( + IN const void* __ptr64 h_cq, + IN const boolean_t solicited ); + +ib_api_status_t +mlnx_enable_ncomp_cq_notify ( + IN const void* __ptr64 h_cq, + IN const uint32_t n_cqes ); + +ib_api_status_t +mlnx_peek_cq ( + IN const void* __ptr64 h_cq, + OUT uint32_t* const p_n_cqes ); + +#endif diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_mcast.c b/branches/Ndi/hw/mt23108/user/mlnx_ual_mcast.c new file mode 100644 index 00000000..f847f00d --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_mcast.c @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" + +extern u_int32_t mlnx_dbg_lvl; + +void +mlnx_get_mcast_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + FUNC_ENTER; + + CL_ASSERT(p_uvp); + + /* + * Multicast Support Verbs + */ + p_uvp->pre_attach_mcast = NULL; + p_uvp->post_attach_mcast = NULL; + p_uvp->pre_detach_mcast = NULL; + p_uvp->post_detach_mcast = NULL; + + FUNC_EXIT; +} + + + +ib_api_status_t +mlnx_pre_attach_mcast ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_gid_t *p_mcast_gid, + IN const uint16_t mcast_lid, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; +#if 1 + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + p_umv_buf->command = TRUE; +#endif + + FUNC_EXIT; + return IB_SUCCESS; +} + + + +void +mlnx_post_attach_mcast ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + OUT ib_mcast_handle_t *ph_mcast, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + FUNC_EXIT; + return; +} + + + +ib_api_status_t +mlnx_pre_detach_mcast ( + IN ib_mcast_handle_t h_uvp_mcast, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; +#if 1 + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; +#endif + + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_detach_mcast ( + IN ib_mcast_handle_t h_uvp_mcast, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + FUNC_EXIT; + return; +} diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_mrw.c b/branches/Ndi/hw/mt23108/user/mlnx_ual_mrw.c new file mode 100644 index 00000000..2c0e808b --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_mrw.c @@ -0,0 +1,429 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" + +extern u_int32_t mlnx_dbg_lvl; + +void +mlnx_get_mrw_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + FUNC_ENTER; + + CL_ASSERT(p_uvp); + + /* + * Memory Management Verbs + */ +// p_uvp->pre_register_mr = NULL; +// p_uvp->post_register_mr = NULL; +// p_uvp->pre_query_mr = NULL; +// p_uvp->post_query_mr = NULL; +// p_uvp->pre_deregister_mr = NULL; +// p_uvp->post_deregister_mr = NULL; +// p_uvp->pre_modify_mr = NULL; +// p_uvp->post_modify_mr = NULL; +// p_uvp->pre_register_smr = NULL; +// p_uvp->post_register_smr = NULL; + + /* + * Memory Window Verbs + */ + p_uvp->pre_create_mw = mlnx_pre_create_mw; + p_uvp->post_create_mw = mlnx_post_create_mw; + p_uvp->pre_query_mw = mlnx_pre_query_mw; + p_uvp->post_query_mw = mlnx_post_query_mw; + p_uvp->pre_destroy_mw = mlnx_pre_destroy_mw; + p_uvp->post_destroy_mw = mlnx_post_destroy_mw; + + /* register_pmr is not supported in user-mode */ + + FUNC_EXIT; +} + + + +ib_api_status_t +mlnx_pre_register_mr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_create_t *p_mr_create, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_register_mr ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + OUT const ib_mr_handle_t *ph_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_query_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_query_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN ib_api_status_t ioctl_status, + IN const ib_mr_attr_t *p_mr_query, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_modify_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_pd_handle_t h_uvp_pd OPTIONAL, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t *p_mr_create OPTIONAL, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_modify_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_pd_handle_t h_uvp_pd OPTIONAL, + IN ib_api_status_t ioctl_status, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_register_smr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_access_t access_ctrl, + IN void *p_vaddr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_register_smr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_handle_t h_uvp_mr, + IN ib_api_status_t ioctl_status, + IN const void *p_vaddr, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + OUT const ib_mr_handle_t *ph_uvp_smr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_deregister_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_deregister_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_create_mw ( + IN const ib_pd_handle_t h_uvp_pd, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void*) h_uvp_pd); + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_mw_info_t *p_new_mw; + + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + CL_ASSERT(p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT(p_hobul); + + p_new_mw = cl_zalloc (sizeof (mlnx_ual_mw_info_t)); + if (p_new_mw == NULL) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to alloc memory\n")); + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + p_new_mw->h_uvp_pd = h_uvp_pd; + + p_umv_buf->input_size = p_umv_buf->output_size = + sizeof (mlnx_ual_mw_info_t *); + + p_umv_buf->p_inout_buf = cl_zalloc (p_umv_buf->input_size); + if (p_umv_buf->p_inout_buf == NULL) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to alloc memory for priv buffer\n")); + status = IB_INSUFFICIENT_MEMORY; + goto cleanup; + } + p_umv_buf->status = IB_SUCCESS; + p_umv_buf->command = TRUE; + + cl_memcpy (p_umv_buf->p_inout_buf, &p_new_mw, p_umv_buf->input_size); + +cleanup: + if (IB_SUCCESS != status) + { + if (p_new_mw) + { + cl_free (p_new_mw); + } + } + + FUNC_EXIT; + return status; +} + + +void +mlnx_post_create_mw ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN net32_t rkey, + OUT ib_mw_handle_t *ph_uvp_mw, + IN ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void*) h_uvp_pd); + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_mw_info_t *p_new_mw; + + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + CL_ASSERT(p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT(p_hobul); + + + status = ioctl_status; + + CL_ASSERT (p_umv_buf->p_inout_buf); + cl_memcpy (&p_new_mw, p_umv_buf->p_inout_buf, p_umv_buf->input_size); + + + *ph_uvp_mw = (ib_mw_handle_t) p_new_mw; + + if (IB_SUCCESS == status) + { + if (IB_SUCCESS != p_umv_buf->status) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad status %ld\n", p_umv_buf->status)); + status = p_umv_buf->status; + goto cleanup; + } + + p_new_mw->rkey = rkey; + + if (HH_OK != + THHUL_mwm_alloc_mw (p_hobul->hhul_hca_hndl, + rkey, + &p_new_mw->hhul_mw_hndl)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("thhul_alloc_mw failed\n")); + status = IB_ERROR; + goto cleanup; + } + } + else + { + cl_free (p_new_mw); + } + +cleanup: + cl_free (p_umv_buf->p_inout_buf); + p_umv_buf->p_inout_buf = NULL; + + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_query_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + p_umv_buf->command = TRUE; + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_query_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN ib_api_status_t ioctl_status, + IN net32_t rkey, + OUT ib_pd_handle_t *ph_pd, + IN ci_umv_buf_t *p_umv_buf ) +{ + FUNC_ENTER; + *ph_pd = ((mlnx_ual_mw_info_t *)((void*)h_uvp_mw))->h_uvp_pd; + + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_destroy_mw ( + IN const ib_mw_handle_t h_uvp_mw) +{ + FUNC_ENTER; + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_destroy_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN ib_api_status_t ioctl_status) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_mw_info_t *p_mw_info = (mlnx_ual_mw_info_t *)((void*) h_uvp_mw); + + FUNC_ENTER; + CL_ASSERT(p_mw_info); + + p_pd_info = (mlnx_ual_pd_info_t *)((void*) p_mw_info->h_uvp_pd); + CL_ASSERT(p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT(p_hobul); + + if (HH_OK != + THHUL_mwm_free_mw (p_hobul->hhul_hca_hndl, p_mw_info->hhul_mw_hndl)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("thhul_free_mw failed\n")); + status = IB_ERROR; + } + + if (status == IB_SUCCESS) + { + cl_free (p_mw_info); + } + + FUNC_EXIT; + return; +} diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_osbypass.c b/branches/Ndi/hw/mt23108/user/mlnx_ual_osbypass.c new file mode 100644 index 00000000..c69c8068 --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_osbypass.c @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" + +//#define MTPERF +#include +//EZ: Hack could not find where defined? +int PERF_time_this; +MTPERF_EXTERN_SEGMENT(Perf_only_ibal); + +extern u_int32_t mlnx_dbg_lvl; + +void +mlnx_get_osbypass_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + + CL_ASSERT(p_uvp); + + /* + * Work Request Processing Verbs + * Should the types be same as Verbs? + */ + p_uvp->post_send = mlnx_post_send; + p_uvp->post_recv = mlnx_post_recv; + + /* + * Completion Processing and + * Completion Notification Request Verbs. + * Should the types be same as Verbs? + */ + p_uvp->poll_cq = mlnx_poll_cq; + p_uvp->rearm_cq = mlnx_enable_cq_notify; + p_uvp->rearm_n_cq = mlnx_enable_ncomp_cq_notify; + p_uvp->peek_cq = mlnx_peek_cq; + + /* Memory window bind */ + p_uvp->bind_mw = mlnx_bind_mw; +} + + +static VAPI_mrw_acl_t +map_itom_access_ctrl ( + IN ib_access_t i_acl) +{ + VAPI_mrw_acl_t m_acl = 0; + + if (i_acl & IB_AC_RDMA_READ) m_acl |= VAPI_EN_REMOTE_READ; + if (i_acl & IB_AC_RDMA_WRITE) m_acl |= VAPI_EN_REMOTE_WRITE; + if (i_acl & IB_AC_ATOMIC) m_acl |= VAPI_EN_REMOTE_ATOM; + if (i_acl & IB_AC_LOCAL_WRITE) m_acl |= VAPI_EN_LOCAL_WRITE; + if (i_acl & IB_AC_MW_BIND) m_acl |= VAPI_EN_MEMREG_BIND; + + return m_acl; +} + + +ib_api_status_t +mlnx_post_send ( + IN const void* __ptr64 h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t** pp_send_failure ) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void*) h_qp); + mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + + FUNC_ENTER; + CL_ASSERT (p_qp_info); + + p_pd_info = (mlnx_ual_pd_info_t *)((void*) p_qp_info->h_uvp_pd); + CL_ASSERT (p_pd_info); + + p_hobul = (mlnx_ual_hobul_t *) p_pd_info->p_hobul; + CL_ASSERT (p_hobul); + + CL_ASSERT( p_send_wr ); + + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Send QP idx %x\n", p_qp_info->qp_idx)); + status = THHUL_qpm_post_send_wrs( p_hobul->hhul_hca_hndl, + p_qp_info->hhul_qp_hndl, + p_send_wr, + pp_send_failure ); + if ( status != IB_SUCCESS ) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Post_send failed status %x\n", status)); + } + + FUNC_EXIT; + return status; +} + + +ib_api_status_t +mlnx_post_recv ( + IN const void* __ptr64 h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void*) h_qp); + mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + + FUNC_ENTER; + CL_ASSERT (p_qp_info); + + p_pd_info = (mlnx_ual_pd_info_t *)((void*)p_qp_info->h_uvp_pd); + CL_ASSERT (p_pd_info); + + p_hobul = (mlnx_ual_hobul_t *) p_pd_info->p_hobul; + CL_ASSERT (p_hobul); + + CL_ASSERT( p_recv_wr ); + + status = THHUL_qpm_post_recv_wrs( p_hobul->hhul_hca_hndl, p_qp_info->hhul_qp_hndl, + p_recv_wr, pp_recv_failure ); + + if ( status != IB_SUCCESS ) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Post_recv failed status %x\n", status)); + } + + FUNC_EXIT; + return status; +} + + +ib_api_status_t +mlnx_bind_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN const ib_qp_handle_t h_uvp_qp, + IN ib_bind_wr_t *p_mw_bind, + OUT net32_t* const p_rkey ) +{ + ib_api_status_t status = IB_SUCCESS; + HHUL_mw_bind_t m_bind_prop; + HH_ret_t hh_ret; + mlnx_ual_mw_info_t *p_mw_info = (mlnx_ual_mw_info_t *)((void*) h_uvp_mw); + mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void*) h_uvp_qp); + mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + + + FUNC_ENTER; + CL_ASSERT (p_mw_info); + CL_ASSERT (p_qp_info); + + p_pd_info = (mlnx_ual_pd_info_t *)((void*) p_qp_info->h_uvp_pd); + CL_ASSERT (p_pd_info); + + p_hobul = (mlnx_ual_hobul_t *) p_pd_info->p_hobul; + CL_ASSERT (p_hobul); + + m_bind_prop.qp = p_qp_info->hhul_qp_hndl; + m_bind_prop.id = p_mw_bind->wr_id; + m_bind_prop.acl = map_itom_access_ctrl (p_mw_bind->access_ctrl); + m_bind_prop.size = p_mw_bind->local_ds.length; + m_bind_prop.start = (VAPI_virt_addr_t) (MT_virt_addr_t) + p_mw_bind->local_ds.vaddr; + m_bind_prop.mr_lkey = p_mw_bind->local_ds.lkey; + m_bind_prop.comp_type = (p_mw_bind->send_opt & IB_SEND_OPT_SIGNALED) ? + VAPI_SIGNALED : VAPI_UNSIGNALED; + + if (HH_OK != + (hh_ret = THHUL_mwm_bind_mw (p_hobul->hhul_hca_hndl, + p_mw_info->hhul_mw_hndl, + &m_bind_prop, + p_rkey))) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("thhul_bind_mw get error status %d\n", hh_ret)); + status = IB_ERROR; + } + + FUNC_EXIT; + return status; +} + + +ib_api_status_t +mlnx_poll_cq ( + IN const void* __ptr64 h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ) +{ + ib_api_status_t status = IB_UNKNOWN_ERROR; + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_cq); + + FUNC_ENTER; + CL_ASSERT (p_cq_info); + + p_hobul = (mlnx_ual_hobul_t *) p_cq_info->p_hobul; + CL_ASSERT (p_hobul); + + if (!pp_free_wclist || !*pp_free_wclist || !pp_done_wclist) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl,("Passed in bad params\n")); + status = IB_INVALID_PARAMETER; + return status; + } + + status = THHUL_cqm_poll4wc(p_hobul->hhul_hca_hndl, p_cq_info->hhul_cq_hndl, + pp_free_wclist, pp_done_wclist ); + + FUNC_EXIT; + return status; +} + + +ib_api_status_t +mlnx_enable_cq_notify ( + IN const void* __ptr64 h_cq, + IN const boolean_t solicited ) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_cq); + HH_ret_t hh_ret; + VAPI_cq_notif_type_t hh_request; + + FUNC_ENTER; + CL_ASSERT (p_cq_info); + + p_hobul = (mlnx_ual_hobul_t *) p_cq_info->p_hobul; + CL_ASSERT (p_hobul); + + hh_request = (solicited) ? VAPI_SOLIC_COMP : VAPI_NEXT_COMP; + + if (HH_OK != + (hh_ret = THHUL_cqm_req_comp_notif (p_hobul->hhul_hca_hndl, + p_cq_info->hhul_cq_hndl, + hh_request) )) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("req_comp_notif return error status %d\n", hh_ret)); + status = IB_ERROR; + } + + FUNC_EXIT; + return status; +} +ib_api_status_t +mlnx_enable_ncomp_cq_notify ( + IN const void* __ptr64 h_cq, + IN const uint32_t n_cqes ) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_cq); + HH_ret_t hh_ret; + + FUNC_ENTER; + CL_ASSERT (p_cq_info); + + p_hobul = (mlnx_ual_hobul_t *) p_cq_info->p_hobul; + CL_ASSERT (p_hobul); + + hh_ret = THHUL_cqm_req_ncomp_notif(p_hobul->hhul_hca_hndl, p_cq_info->hhul_cq_hndl, n_cqes); + if (hh_ret != HH_OK ) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("req_ncomp_notif return error status %d\n", hh_ret)); + status = IB_ERROR; + } + + FUNC_EXIT; + return status; +} + +ib_api_status_t +mlnx_peek_cq( + IN const void* __ptr64 h_cq, + OUT uint32_t* const p_n_cqes ) +{ + ib_api_status_t status = IB_UNKNOWN_ERROR; + mlnx_ual_cq_info_t *p_cq_info = (mlnx_ual_cq_info_t *)((void*) h_cq); + mlnx_ual_hobul_t *p_hobul; + + FUNC_ENTER; + + CL_ASSERT (p_cq_info); + + p_hobul = (mlnx_ual_hobul_t *)p_cq_info->p_hobul; + + CL_ASSERT (p_hobul); + + status = THHUL_cqm_count_cqe( p_hobul->hhul_hca_hndl, p_cq_info->hhul_cq_hndl, p_n_cqes ); + + if( status != IB_SUCCESS ) + { + CL_TRACE(MLNX_TRACE_LVL_1, mlnx_dbg_lvl, ("completes with ERROR status %d\n", status)); + } + + FUNC_EXIT; + return status; + +} diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_pd.c b/branches/Ndi/hw/mt23108/user/mlnx_ual_pd.c new file mode 100644 index 00000000..fa4d011a --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_pd.c @@ -0,0 +1,331 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "mlnx_ual_main.h" + +extern u_int32_t mlnx_dbg_lvl; + +void +mlnx_get_pd_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + FUNC_ENTER; + + CL_ASSERT(p_uvp); + + /* + * Protection Domain + */ + p_uvp->pre_allocate_pd = mlnx_pre_allocate_pd; + p_uvp->post_allocate_pd = mlnx_post_allocate_pd; + p_uvp->pre_deallocate_pd = mlnx_pre_deallocate_pd; + p_uvp->post_deallocate_pd = mlnx_post_deallocate_pd; + + FUNC_EXIT; +} + + + +ib_api_status_t +mlnx_pre_allocate_pd ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void *)h_uvp_ca); + mlnx_ual_pd_info_t *p_new_pd = NULL; + MT_size_t size; + + FUNC_ENTER; + + CL_ASSERT(p_hobul); + CL_ASSERT(p_umv_buf); + + do + { + /* CA should be initialized */ + if (!p_hobul->p_hca_ul_info) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("INVALID hca_ul_info buffer\n")); + status = IB_INVALID_CA_HANDLE; + break; + } + + /* Currently supporting multiple PDs per process */ + if (!p_hobul->p_hca_ul_resources) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("INVALID hca_ul_resources buffer\n")); + status = IB_RESOURCE_BUSY; + break; + } + + p_new_pd = cl_zalloc (sizeof(mlnx_ual_pd_info_t)); + if (!p_new_pd) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed alloc new PD\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + + p_new_pd->p_pd_ul_resources = + cl_zalloc(p_hobul->p_hca_ul_info->pd_ul_resources_sz); + if (!p_new_pd->p_pd_ul_resources) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed alloc new PD UL resources\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + + if (HH_OK != + THHUL_pdm_alloc_pd_avs_prep (p_hobul->hhul_hca_hndl, + MLNX_MAX_AVS_PER_PD, + PD_NO_FLAGS, // TBD + &p_new_pd->hhul_pd_hndl, + p_new_pd->p_pd_ul_resources)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Calling THHUL_alloc_pd_prep Failed\n")); + status = IB_RESOURCE_BUSY; + break; + } + p_new_pd->p_hobul = p_hobul; + + size = p_hobul->p_hca_ul_info->pd_ul_resources_sz + + sizeof (u_int32_t) + sizeof (mlnx_ual_pd_info_t *); + + p_umv_buf->p_inout_buf = cl_zalloc(size); + + if (!p_umv_buf->p_inout_buf) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed alloc user private buffer\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + p_umv_buf->command = TRUE; + p_umv_buf->input_size = p_umv_buf->output_size = + (uint32_t)size - sizeof (mlnx_ual_pd_info_t *); + + CL_TRACE (MLNX_TRACE_LVL_6, mlnx_dbg_lvl, + ("umv_buf->input_size %ld, pd_ul_res_sz %d\n", + p_umv_buf->input_size, + p_hobul->p_hca_ul_info->pd_ul_resources_sz)); + + cl_memcpy (p_umv_buf->p_inout_buf, + p_new_pd->p_pd_ul_resources, + p_hobul->p_hca_ul_info->pd_ul_resources_sz); + cl_memcpy (( (u_int8_t *)p_umv_buf->p_inout_buf + size - + sizeof (mlnx_ual_pd_info_t *)), + &p_new_pd, + sizeof (mlnx_ual_pd_info_t *)); + + } while (0); + + /* + * clean_up if required + */ + if (IB_SUCCESS != status) + { + if (p_new_pd) + { + if (p_new_pd->p_pd_ul_resources); + { + cl_free (p_new_pd->p_pd_ul_resources); + + if (!p_umv_buf->p_inout_buf) + { + THHUL_pdm_free_pd_prep (p_hobul->hhul_hca_hndl, + p_new_pd->hhul_pd_hndl, FALSE); + THHUL_pdm_free_pd_done (p_hobul->hhul_hca_hndl, + p_new_pd->hhul_pd_hndl); + } + } + cl_free (p_new_pd); + } + } + + FUNC_EXIT; + return status; +} + + +void +mlnx_post_allocate_pd ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + OUT ib_pd_handle_t *ph_uvp_pd, + IN ci_umv_buf_t *p_umv_buf ) +{ + mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void *)h_uvp_ca); + mlnx_ual_pd_info_t *p_new_pd; + + MT_size_t size; + + FUNC_ENTER; + + CL_ASSERT(p_hobul); + CL_ASSERT(p_umv_buf); + + size = p_hobul->p_hca_ul_info->pd_ul_resources_sz + + sizeof (u_int32_t) + sizeof (mlnx_ual_pd_info_t *); + + cl_memcpy (&p_new_pd, + ((u_int8_t *)p_umv_buf->p_inout_buf + size - + sizeof (mlnx_ual_pd_info_t*)), + sizeof (mlnx_ual_pd_info_t *)); + CL_ASSERT(p_new_pd); + *ph_uvp_pd = (ib_pd_handle_t) p_new_pd; + + if ( ioctl_status == IB_SUCCESS ) + { + if (IB_SUCCESS != p_umv_buf->status) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("post_allocate_pd return status %s\n", + ib_get_err_str(p_umv_buf->status))); + goto err; + } + else if ((size - sizeof (mlnx_ual_pd_info_t *)) != + p_umv_buf->output_size ) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad user priv buffer size exp = %d, res = %ld\n", + size, p_umv_buf->output_size)); + goto err; + } + + cl_memcpy (p_new_pd->p_pd_ul_resources, + p_umv_buf->p_inout_buf, + p_hobul->p_hca_ul_info->pd_ul_resources_sz); + cl_memcpy (&p_new_pd->pd_idx, + ((u_int8_t *)p_umv_buf->p_inout_buf + + p_hobul->p_hca_ul_info->pd_ul_resources_sz), + sizeof (u_int32_t)); + + if (HH_OK != + THHUL_pdm_alloc_pd_done (p_hobul->hhul_hca_hndl, + p_new_pd->hhul_pd_hndl, + p_new_pd->pd_idx, + p_new_pd->p_pd_ul_resources)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Call THHUL_pdm_alloc_pd_done Failed\n")); + goto err; + } + } + else + { +err: + if (p_new_pd->p_pd_ul_resources) + cl_free (p_new_pd->p_pd_ul_resources); + + cl_free (p_new_pd); + *ph_uvp_pd = NULL; + } + + cl_free (p_umv_buf->p_inout_buf); + + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_deallocate_pd ( + IN const ib_pd_handle_t h_uvp_pd) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void *)h_uvp_pd); + mlnx_ual_hobul_t *p_hobul; + + FUNC_ENTER; + + CL_ASSERT(p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT(p_hobul); + + do + { + if (!p_pd_info->p_pd_ul_resources) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("INVALID PD UL resources\n")); + status = IB_INVALID_PD_HANDLE; + break; + } + + if (HH_OK != THHUL_pdm_free_pd_prep(p_hobul->hhul_hca_hndl, p_pd_info->hhul_pd_hndl, FALSE)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Calling THHUL_free_pd_prep Failed\n")); + status = IB_RESOURCE_BUSY; + break; + } + } while (0); + + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_deallocate_pd ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status ) +{ + mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void *)h_uvp_pd); + mlnx_ual_hobul_t *p_hobul; + UNREFERENCED_PARAMETER(ioctl_status); + + FUNC_ENTER; + CL_ASSERT(p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT(p_hobul); + + if (p_pd_info->p_pd_ul_resources) + { + cl_free (p_pd_info->p_pd_ul_resources ); + p_pd_info->p_pd_ul_resources = NULL; + } + + THHUL_pdm_free_pd_done (p_hobul->hhul_hca_hndl, p_pd_info->hhul_pd_hndl); + cl_free (p_pd_info); + FUNC_EXIT; + return; +} diff --git a/branches/Ndi/hw/mt23108/user/mlnx_ual_qp.c b/branches/Ndi/hw/mt23108/user/mlnx_ual_qp.c new file mode 100644 index 00000000..4fff143f --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/mlnx_ual_qp.c @@ -0,0 +1,531 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" + + +extern u_int32_t mlnx_dbg_lvl; + +void +mlnx_get_qp_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + FUNC_ENTER; + + CL_ASSERT(p_uvp); + + /* + * QP Management Verbs + */ + p_uvp->pre_create_qp = mlnx_pre_create_qp; + p_uvp->post_create_qp = mlnx_post_create_qp; + + // !!! none for create_spl_qp, UAL will return error !!! + + p_uvp->pre_modify_qp = mlnx_pre_modify_qp; + p_uvp->post_modify_qp = mlnx_post_modify_qp; + p_uvp->pre_query_qp = NULL; + p_uvp->post_query_qp = mlnx_post_query_qp; + p_uvp->pre_destroy_qp = mlnx_pre_destroy_qp; + p_uvp->post_destroy_qp = mlnx_post_destroy_qp; + + FUNC_EXIT; +} + + +IB_ts_t +map_ibal_qp_type (ib_qp_type_t ibal_qp_type) +{ + if (ibal_qp_type == IB_QPT_RELIABLE_CONN) return IB_TS_RC; + else if (ibal_qp_type == IB_QPT_UNRELIABLE_CONN) return IB_TS_UC; + // else if (ibal_qp_type == IB_QPT_RELIABLE_DGRM) return IB_TS_RD; + else if (ibal_qp_type == IB_QPT_UNRELIABLE_DGRM) return IB_TS_UD; + else if (ibal_qp_type == IB_QPT_RAW_IPV6) return IB_TS_RAW; + else if (ibal_qp_type == IB_QPT_RAW_ETHER) return IB_TS_RAW; + else return IB_TS_UD; +} + + +ib_api_status_t +mlnx_pre_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void *)h_uvp_pd); + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_qp_info_t *p_new_qp = NULL; + HHUL_qp_init_attr_t ul_qp_init_attr; + MT_size_t size; + + FUNC_ENTER; + CL_ASSERT(p_pd_info); + CL_ASSERT(p_umv_buf); + CL_ASSERT(p_create_attr); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT(p_hobul); + + do + { + /* CA should be initialized */ + if (!p_hobul->p_hca_ul_info) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("INVALID hca_ul_info buffer\n")); + status = IB_INVALID_CA_HANDLE; + break; + } + + if (!p_hobul->p_hca_ul_resources) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("INVALID hca_ul_resources buffer\n")); + status = IB_RESOURCE_BUSY; + break; + } + + if (!p_pd_info->p_pd_ul_resources) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("INVALID pd_ul_resources buffer\n")); + status = IB_RESOURCE_BUSY; + break; + } + + p_new_qp = cl_zalloc (sizeof(mlnx_ual_qp_info_t)); + if (!p_new_qp) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed alloc new QP\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + + p_new_qp->p_qp_ul_resources = + cl_zalloc(p_hobul->p_hca_ul_info->qp_ul_resources_sz); + if (!p_new_qp->p_qp_ul_resources) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed alloc new QP UL resources\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + + cl_memclr (&ul_qp_init_attr, sizeof (HHUL_qp_init_attr_t)); + + ul_qp_init_attr.qp_cap.max_oust_wr_sq = p_create_attr->sq_depth; + ul_qp_init_attr.qp_cap.max_oust_wr_rq = p_create_attr->rq_depth; + ul_qp_init_attr.qp_cap.max_sg_size_sq = p_create_attr->sq_sge; + ul_qp_init_attr.qp_cap.max_sg_size_rq = p_create_attr->rq_sge; + ul_qp_init_attr.ts_type = map_ibal_qp_type (p_create_attr->qp_type); + ul_qp_init_attr.srq = HHUL_INVAL_SRQ_HNDL; + /* + * save the qp_type to qp_info to use later on + */ + p_new_qp->type = ul_qp_init_attr.ts_type; + ul_qp_init_attr.sq_sig_type = + (p_create_attr->sq_signaled) ? VAPI_SIGNAL_ALL_WR:VAPI_SIGNAL_REQ_WR; + ul_qp_init_attr.rq_sig_type = VAPI_SIGNAL_ALL_WR; + ul_qp_init_attr.pd = p_pd_info->hhul_pd_hndl; + ul_qp_init_attr.sq_cq = + ((mlnx_ual_cq_info_t *)(p_create_attr->h_sq_cq))->hhul_cq_hndl; + ul_qp_init_attr.rq_cq = + ((mlnx_ual_cq_info_t *)(p_create_attr->h_rq_cq))->hhul_cq_hndl; + + if (HH_OK != + THHUL_qpm_create_qp_prep (p_hobul->hhul_hca_hndl, + &ul_qp_init_attr, + &p_new_qp->hhul_qp_hndl, + &p_new_qp->ul_qp_cap, + p_new_qp->p_qp_ul_resources)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Calling THHUL_qpm_create_qp_prep Failed\n")); + status = IB_RESOURCE_BUSY; + break; + } + + /* + * Store the parent PD of this QP + */ + p_new_qp->h_uvp_pd = h_uvp_pd; + + size = p_hobul->p_hca_ul_info->qp_ul_resources_sz + + sizeof (u_int32_t) + sizeof (mlnx_ual_qp_info_t *); + p_umv_buf->p_inout_buf = cl_zalloc(size); + if (!p_umv_buf->p_inout_buf) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed alloc user private buffer\n")); + status = IB_INSUFFICIENT_MEMORY; + break; + } + + /* + * We only set the input_size up to qp_ul_resources_sz + sizeof (qp_idx) + * The rest of the buffer we store the pointer to our allocated + * qp_info struct in order to retrieve it later in the post. + */ + p_umv_buf->input_size = p_umv_buf->output_size = + (uint32_t)size - sizeof (mlnx_ual_qp_info_t *); + + cl_memcpy (p_umv_buf->p_inout_buf, + p_new_qp->p_qp_ul_resources, + p_hobul->p_hca_ul_info->qp_ul_resources_sz); + /* + * Store the pointer of our qp_info struct to inout_buf and retrieve + * it later in the post + */ + cl_memcpy ( ( (u_int8_t *)p_umv_buf->p_inout_buf + size - + sizeof (mlnx_ual_qp_info_t *)), + &p_new_qp, + sizeof (mlnx_ual_qp_info_t *)); + p_umv_buf->command = TRUE; + + } while (0); + + /* + * clean_up if required + */ + if (IB_SUCCESS != status) + { + if (p_new_qp) + { + if (p_new_qp->hhul_qp_hndl) + { + THHUL_qpm_destroy_qp_done (p_hobul->hhul_hca_hndl, + p_new_qp->hhul_qp_hndl); + } + if (p_new_qp->p_qp_ul_resources); + { + cl_free (p_new_qp->p_qp_ul_resources); + } + cl_free (p_new_qp); + } + if (p_umv_buf->p_inout_buf) + { + cl_free ( p_umv_buf->p_inout_buf ); + } + } + + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_qp_handle_t *ph_uvp_qp, + IN ci_umv_buf_t *p_umv_buf ) +{ + mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void*)h_uvp_pd); + mlnx_ual_hobul_t *p_hobul; + mlnx_ual_qp_info_t *p_new_qp; + MT_size_t buf_size; + + FUNC_ENTER; + CL_ASSERT(p_pd_info); + CL_ASSERT(p_umv_buf); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT(p_hobul); + + buf_size = p_hobul->p_hca_ul_info->qp_ul_resources_sz + + sizeof (u_int32_t) + sizeof (mlnx_ual_qp_info_t *); + + /* Retrieve our qp_info back from priv buffer */ + cl_memcpy (&p_new_qp, ((u_int8_t *)p_umv_buf->p_inout_buf + buf_size - + sizeof (mlnx_ual_qp_info_t *)), sizeof (mlnx_ual_qp_info_t *)); + CL_ASSERT(p_new_qp); + + *ph_uvp_qp = (ib_qp_handle_t) p_new_qp; + + if ( ioctl_status == IB_SUCCESS ) + { + if (IB_SUCCESS != p_umv_buf->status) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad status %ld\n", p_umv_buf->status)); + goto err; + } + else if ((buf_size - sizeof (mlnx_ual_qp_info_t *)) != + p_umv_buf->output_size) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad user priv buffer size exp = %d, res = %ld\n", + buf_size, p_umv_buf->output_size)); + goto err; + } + + cl_memcpy (p_new_qp->p_qp_ul_resources, + p_umv_buf->p_inout_buf, + p_hobul->p_hca_ul_info->qp_ul_resources_sz); + + cl_memcpy (&p_new_qp->qp_idx, + ((u_int8_t *)p_umv_buf->p_inout_buf + + p_hobul->p_hca_ul_info->qp_ul_resources_sz), + sizeof (u_int32_t)); + + if (HH_OK != + THHUL_qpm_create_qp_done (p_hobul->hhul_hca_hndl, + p_new_qp->hhul_qp_hndl, + p_new_qp->qp_idx, + p_new_qp->p_qp_ul_resources)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Call THHUL_qpm_create_qp_done Failed\n")); + goto err; + } + + CL_TRACE (MLNX_TRACE_LVL_7, mlnx_dbg_lvl, + ("Newly created QP qp_idx 0x%x\n",p_new_qp->qp_idx)); + } + else + { +err: + if (p_new_qp->p_qp_ul_resources) + cl_free (p_new_qp->p_qp_ul_resources); + + cl_free (p_new_qp); + *ph_uvp_qp = NULL; + } + + cl_free (p_umv_buf->p_inout_buf); + p_umv_buf->p_inout_buf = NULL; + + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_qp_mod_t *p_modify_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status = IB_SUCCESS; + UNREFERENCED_PARAMETER(h_uvp_qp); + UNREFERENCED_PARAMETER(p_modify_attr); + + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + + /* + * Prepare the buffer to get the qp_state back + */ + p_umv_buf->input_size = sizeof (VAPI_qp_state_t); + p_umv_buf->output_size = p_umv_buf->input_size; + p_umv_buf->command = TRUE; + p_umv_buf->p_inout_buf = cl_zalloc (p_umv_buf->input_size); + if (!p_umv_buf->p_inout_buf) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Failed to allocate mem for priv buffer\n")); + status = IB_INSUFFICIENT_MEMORY; + } + FUNC_EXIT; + return status; +} + + +void +mlnx_post_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status; + mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp); + mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + VAPI_qp_state_t cur_qp_state; + + FUNC_ENTER; + CL_ASSERT(p_qp_info); + CL_ASSERT(p_umv_buf); + + p_pd_info = (mlnx_ual_pd_info_t *)((void *) p_qp_info->h_uvp_pd); + CL_ASSERT(p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT(p_hobul); + + status = ioctl_status; + + do + { + if (IB_SUCCESS == status) + { + if (IB_SUCCESS != p_umv_buf->status) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad status %ld\n", p_umv_buf->status)); + status = p_umv_buf->status; + break; + } + else if (sizeof (VAPI_qp_state_t) != p_umv_buf->output_size) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Bad user priv buffer size exp = %zd, res = %ld\n", + sizeof (VAPI_qp_state_t), p_umv_buf->output_size)); + status = IB_ERROR; + break; + } + + cl_memcpy (&cur_qp_state, + p_umv_buf->p_inout_buf, + sizeof (VAPI_qp_state_t)); + + CL_TRACE (MLNX_TRACE_LVL_6, mlnx_dbg_lvl, + ("Committed to modify QP to state %d\n", cur_qp_state)); + + if (HH_OK != + THHUL_qpm_modify_qp_done (p_hobul->hhul_hca_hndl, + p_qp_info->hhul_qp_hndl, + cur_qp_state)) + { + status = IB_ERROR; + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("Call THHUL_qpm_modify_qp_done Failed\n")); + break; + } + } + + } while (0); + + cl_free (p_umv_buf->p_inout_buf); + + FUNC_EXIT; + return; +} + + +ib_api_status_t +mlnx_pre_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UNREFERENCED_PARAMETER(h_uvp_qp); + FUNC_ENTER; + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + p_umv_buf->status = IB_SUCCESS; + p_umv_buf->command = TRUE; + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ib_qp_attr_t *p_query_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp); + + FUNC_ENTER; + + UNREFERENCED_PARAMETER(p_umv_buf); + + if( ioctl_status == IB_SUCCESS ) + { + p_query_attr->sq_max_inline = p_qp_info->ul_qp_cap.max_inline_data_sq; + p_query_attr->sq_sge = p_qp_info->ul_qp_cap.max_sg_size_sq; + p_query_attr->sq_depth = p_qp_info->ul_qp_cap.max_oust_wr_sq; + p_query_attr->rq_sge = p_qp_info->ul_qp_cap.max_sg_size_rq; + p_query_attr->rq_depth = p_qp_info->ul_qp_cap.max_oust_wr_rq; + } + + FUNC_EXIT; +} + + +ib_api_status_t +mlnx_pre_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp) +{ + UNREFERENCED_PARAMETER(h_uvp_qp); + FUNC_ENTER; + FUNC_EXIT; + return IB_SUCCESS; +} + + +void +mlnx_post_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status) +{ + mlnx_ual_qp_info_t *p_qp_info = (mlnx_ual_qp_info_t *)((void *) h_uvp_qp); + mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + + UNREFERENCED_PARAMETER(ioctl_status); + + FUNC_ENTER; + CL_ASSERT(p_qp_info); + + p_pd_info = (mlnx_ual_pd_info_t *)((void *)p_qp_info->h_uvp_pd); + CL_ASSERT(p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT(p_hobul); + + if (HH_OK != + THHUL_qpm_destroy_qp_done (p_hobul->hhul_hca_hndl, + p_qp_info->hhul_qp_hndl)) + { + CL_TRACE (MLNX_TRACE_LVL_1, mlnx_dbg_lvl, + ("THHUL_destroy_qp_done failed\n")); + } + + if (p_qp_info->p_qp_ul_resources); + { + cl_free (p_qp_info->p_qp_ul_resources); + p_qp_info->p_qp_ul_resources = NULL; + } + + cl_free (p_qp_info); + + FUNC_EXIT; + return; +} + diff --git a/branches/Ndi/hw/mt23108/user/uvpd.rc b/branches/Ndi/hw/mt23108/user/uvpd.rc new file mode 100644 index 00000000..a67b222a --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/uvpd.rc @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DLL +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Tavor HCA User Mode Verb Provider (Debug)" +#define VER_INTERNALNAME_STR "mt23108ud.dll" +#define VER_ORIGINALFILENAME_STR "mt23108ud.dll" +#else +#define VER_FILEDESCRIPTION_STR "Tavor HCA User Mode Verb Provider" +#define VER_INTERNALNAME_STR "mt23108u.dll" +#define VER_ORIGINALFILENAME_STR "mt23108u.dll" +#endif + +#include diff --git a/branches/Ndi/hw/mt23108/user/uvpd_exports.src b/branches/Ndi/hw/mt23108/user/uvpd_exports.src new file mode 100644 index 00000000..96e58172 --- /dev/null +++ b/branches/Ndi/hw/mt23108/user/uvpd_exports.src @@ -0,0 +1,10 @@ +#if DBG +LIBRARY mt23108ud.dll +#else +LIBRARY mt23108u.dll +#endif + +#ifndef _WIN64 +EXPORTS +uvp_get_interface +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.c new file mode 100644 index 00000000..cf3e1885 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.c @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_HH_C + + +#include +#include + +static HH_hca_dev_t HH_hca_dev_tbl[MAX_HCA_DEV_NUM]; +static HH_hca_dev_t* const devBegin = &HH_hca_dev_tbl[0]; +static HH_hca_dev_t* const devEnd = &HH_hca_dev_tbl[0] + MAX_HCA_DEV_NUM; + +static HH_if_ops_t invalid_ops; +static HH_if_ops_t zombie_ops; + +/* forward declarations */ +static HH_hca_dev_t* find_free_dev(void); +static void init_trivial_ops(void); + + +/************************************************************************/ +/* To ease debugging, hook for trivial return statement. + * Otherwise, calling stack Thru macros may be hard to get,\. + */ +static inline HH_ret_t self_return(HH_ret_t rc) +{ return rc; } + + + +/************************************************************************/ +HH_ret_t HH_add_hca_dev( + HH_hca_dev_t* dev_info, + HH_hca_hndl_t* hca_hndl_p +) +{ + HH_ret_t rc = HH_EAGAIN; + HH_hca_dev_t* dev = find_free_dev(); + MTL_TRACE4(MT_FLFMT("HH_add_hca_dev")); + if (dev != devEnd) + { + rc = HH_OK; + *dev = *dev_info; /* the whole struct */ + MTL_DEBUG4(MT_FLFMT("dev=0x%p, desc=%s res_sz:{hca="SIZE_T_FMT", pd="SIZE_T_FMT", cq="SIZE_T_FMT", qp="SIZE_T_FMT"}"), + dev, dev->dev_desc, + dev->hca_ul_resources_sz, dev->pd_ul_resources_sz, + dev->cq_ul_resources_sz, dev->qp_ul_resources_sz); + *hca_hndl_p = dev; + } + return rc; +} /* HH_add_hca_dev */ + + +/************************************************************************/ +HH_ret_t HH_rem_hca_dev(HH_hca_hndl_t hca_hndl) +{ + HH_ret_t rc = ((devBegin <= hca_hndl) && (hca_hndl < devEnd) && + (hca_hndl->if_ops != NULL) + ? HH_OK : HH_ENODEV); +/***TODO: There needs to be a method for reclaiming zombie entries. +This is needed because the entries are marked as zombie when the HCA +receives an IRP_MN_STOP_DEVICE IRP (which doesn't unload the driver), +and a new entry is consumed when the device receives the +IRP_MN_START_DEVICE IRP. +***/ + if (rc == HH_OK) + { + hca_hndl->status = HH_HCA_STATUS_ZOMBIE; + hca_hndl->if_ops = &zombie_ops; + } + return rc; +} /* HH_rem_hca_dev */ + + +/************************************************************************/ +HH_ret_t HH_list_hcas(u_int32_t buf_entries, + u_int32_t* num_of_hcas_p, + HH_hca_hndl_t* hca_list_buf_p) +{ + HH_ret_t rc = ((hca_list_buf_p != NULL) || (buf_entries == 0) + ? HH_OK : HH_EINVAL); + u_int32_t nActual = 0; + HH_hca_dev_t* dev = devBegin; + + MTL_DEBUG4(MT_FLFMT("HH_list_hcas: buf_entries=%d, N="VIRT_ADDR_FMT", devBegin=0x%p"), + buf_entries, devEnd-devBegin, devBegin); + for (; dev != devEnd; ++dev) + { + HH_if_ops_t* dev_if_ops = dev->if_ops; + if ((dev_if_ops != &invalid_ops) && (dev_if_ops != &zombie_ops) ) + { + if (buf_entries <= nActual) /* == even would be sufficient... */ + { + hca_list_buf_p = NULL; /* user supplied buffer exceeded */ + rc = HH_EAGAIN; + } + ++nActual; + if (hca_list_buf_p) + { + MTL_DEBUG4(MT_FLFMT("nActual=%d, dev=%p"), nActual, dev); + *hca_list_buf_p++ = dev; + MTL_DEBUG4(MT_FLFMT("dev=0x%p, res_sz:{hca="SIZE_T_FMT", pd="SIZE_T_FMT", cq="SIZE_T_FMT", qp="SIZE_T_FMT"}"), + dev, dev->hca_ul_resources_sz, dev->pd_ul_resources_sz, + dev->cq_ul_resources_sz, dev->qp_ul_resources_sz); + } + } + } + *num_of_hcas_p = nActual; /* valid result, even if rc != HH_OK */ + return rc; +} /* HH_list_hcas */ + +/************************************************************************/ +HH_ret_t HH_lookup_hca(const char * name, HH_hca_hndl_t* hca_handle_p) +{ + HH_hca_dev_t* dev; + + if (!hca_handle_p) + return HH_EINVAL; + + for (dev = devBegin; dev != devEnd; ++dev) + { + HH_if_ops_t* dev_if_ops = dev->if_ops; + if ((dev_if_ops != &invalid_ops) && (dev_if_ops != &zombie_ops) ) + { + if (!strcmp(name, dev->dev_desc)) { + *hca_handle_p = dev; + return HH_OK; + } + } + } + return HH_ENODEV; +} + +/************************************************************************/ +/************************************************************************/ +/** Internal functions **/ + +/************************************************************************/ +static HH_hca_dev_t* find_free_dev() +{ + HH_hca_dev_t* dev = devBegin; + while ((dev != devEnd) && (dev->if_ops != &invalid_ops)) + { + ++dev; + } + MTL_TRACE4("%s[%d]%s(): devB=%p, devE=%p, dev=%p\n", + __FILE__, __LINE__, __FUNCTION__, devBegin, devEnd, dev); + return dev; +} /* find_free_dev */ + + +#include "invalid.ic" +#include "zombie.ic" +#include "hhenosys.ic" + +/************************************************************************/ +void HH_ifops_tbl_set_enosys(HH_if_ops_t* tbl) +{ + enosys_init(tbl); +} + + +/************************************************************************/ +static void init_trivial_ops(void) +{ + static int beenThereDoneThat = 0; + if (beenThereDoneThat == 0) + { + beenThereDoneThat = 1; + invalid_init(&invalid_ops); + zombie_init(&zombie_ops); + } +} /* init_trivial_ops */ + + + +/* Dummy function */ +HH_ret_t HHIF_dummy() +{ + return(HH_OK); +} + + +#ifdef MT_KERNEL + + +MODULE_LICENSE("GPL"); +int init_hh_driver(void) +{ + HH_hca_dev_t* dev = devBegin; + MTL_TRACE('1', "%s: installing hh_mod\n", __FUNCTION__); + init_trivial_ops(); + while (dev != devEnd) + { + dev->if_ops = &invalid_ops; + ++dev; + } + return(0); +} + +void cleanup_hh_driver(void) +{ + MTL_TRACE('1', "%s: remove hh_mod\n", __FUNCTION__); + return; +} + + +#if defined( __WIN__ ) + +int HH_init_module(void) +{ + return( init_hh_driver() ); +} + +void HH_cleanup_module(void) +{ + cleanup_hh_driver(); +} + +#elif !defined( VXWORKS_OS ) + +int init_module(void) +{ + return(init_hh_driver()); +} + +void cleanup_module(void) +{ + cleanup_hh_driver(); + return; +} + +#endif + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.h new file mode 100644 index 00000000..e4f280aa --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh.h @@ -0,0 +1,867 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_HH_H +#define H_HH_H + +#include +#include +#include +#include +#include + +/* + * Global defines + * + */ + + +/* + * Typedefs + * + */ + +typedef enum +{ + HH_HCA_STATUS_INVALID, /* Invalid context entry */ + HH_HCA_STATUS_ZOMBIE, /* HCA was removed but still has outstanding resources */ + HH_HCA_STATUS_CLOSED, + HH_HCA_STATUS_OPENED +} HH_hca_status_t; + + +/* + * Device information (public object/context) + * + */ +#pragma warning( disable : 4201 ) +typedef struct HH_hca_dev_st { + char * __ptr64 dev_desc; /* Device description (name, etc.) */ + char * __ptr64 user_lib; /* User level library (dyn-link) */ + u_int32_t vendor_id; /* IEEE's 24 bit Device Vendor ID */ + u_int32_t dev_id; /* Device ID */ + u_int32_t hw_ver; /* Hardware version (step/rev) */ + u_int64_t fw_ver; + struct hh_if_ops* __ptr64 if_ops; /* Interface operations */ + + /* Size (bytes) of user-level ... */ + union + { + MT_size_t hca_ul_resources_sz; /* .. resources context for an HCA */ + void* __ptr64 resv0; + }; + union + { + MT_size_t pd_ul_resources_sz; /* .. resources context for a PD */ + void* __ptr64 resv1; + }; + union + { + MT_size_t cq_ul_resources_sz; /* .. resources context for a CQ */ + void* __ptr64 resv2; + }; + union + { + MT_size_t srq_ul_resources_sz; /* .. resources context for a SRQ */ + void* __ptr64 resv3; + }; + union + { + MT_size_t qp_ul_resources_sz; /* .. resources context for a QP */ + void* __ptr64 resv4; + }; + + void* __ptr64 device; /* Device private data */ + HH_hca_status_t status; /* Device Status */ +} HH_hca_dev_t; +#pragma warning( default : 4201 ) + +typedef enum { + HH_TPT_PAGE, + HH_TPT_BUF, + HH_TPT_IOBUF} HH_tpt_type_t; + +typedef struct { + + HH_tpt_type_t tpt_type; + + MT_size_t num_entries; /* Num. of entries */ + union { + struct { + VAPI_phy_addr_t *phys_page_lst; /* Array of physical addrs. per page */ + u_int8_t page_shift; /* log2 page size in this page list */ + } page_lst; + struct { + VAPI_phy_addr_t *phys_buf_lst; /* Array of physical addrs. per buffer (sys page-sz aligned) */ + VAPI_size_t *buf_sz_lst; /* Size of each buffer (sys page-sz multiple) */ + VAPI_phy_addr_t iova_offset; /* Offset of start IOVA in first buffer */ + } buf_lst; + MOSAL_iobuf_t iobuf; /* HH_TPT_IOBUF */ + } tpt; +} HH_tpt_t; + + +typedef struct HH_mr_st { /* Memory region registration data */ + IB_virt_addr_t start; /* Start virtual address (byte addressing) */ + VAPI_size_t size; /* Size in bytes */ + HH_pd_hndl_t pd; /* PD handle for new memory region */ + VAPI_mrw_acl_t acl; /* Access control (R/W permission local/remote */ + HH_tpt_t tpt; /* TPT - physical addresses list */ +} HH_mr_t; + + +typedef struct HH_smr_st { /* Shared Memory region registration data */ + VAPI_lkey_t lkey; /* L-Key of the region to share with */ + IB_virt_addr_t start; /* This region start virtual addr */ + HH_pd_hndl_t pd ; /* PD handle for new memory region */ + VAPI_mrw_acl_t acl; /* Access control (R/W permission local/remote */ +} HH_smr_t; + + +typedef struct HH_mr_info_st { + VAPI_lkey_t lkey; /* Local region key */ + VAPI_rkey_t rkey; /* Remote region key */ + IB_virt_addr_t local_start; /* Actual enforce local access lower limit */ + VAPI_size_t local_size; /* Actual enforce size for local access */ + IB_virt_addr_t remote_start; /* Actual enforce remote access lower limit + * (volid only if remote access allowed */ + VAPI_size_t remote_size; /* Actual enforce size for remote access + * (volid only if remote access allowed */ + HH_pd_hndl_t pd; /* PD handle for new memory region */ + VAPI_mrw_acl_t acl; /* Access control (R/W local/remote */ +} HH_mr_info_t; + + + +/* + * Queue Pairs + * + */ + +/* Initial Attributes passed during creation */ +typedef struct HH_qp_init_attr_st { + VAPI_ts_type_t ts_type; /* Transport Type */ + HH_pd_hndl_t pd; /* Protection Domain for this QP */ + HH_rdd_hndl_t rdd; /* Reliable Datagram Domain (RD only) */ + HH_srq_hndl_t srq; + + HH_cq_hndl_t sq_cq; /* Send Queue Completion Queue Number */ + HH_cq_hndl_t rq_cq; /* Receive Queue Completion Queue Number */ + + VAPI_sig_type_t sq_sig_type; /* Signal Type for Send Queue */ + VAPI_sig_type_t rq_sig_type; /* Signal Type for Receive Queue */ + + VAPI_qp_cap_t qp_cap; /* Capabilities (Max(outstand)+max(S/G)) */ + +} HH_qp_init_attr_t; + + +/* HH Event Records */ +typedef struct { + VAPI_event_record_type_t etype; /* event record type - see vapi.h */ + VAPI_event_syndrome_t syndrome; /* syndrome value (for fatal error) */ + union { + IB_wqpn_t qpn; /* Affiliated QP Number */ + HH_srq_hndl_t srq; /* Affiliated SRQ handle */ + IB_eecn_t eecn; /* Affiliated EEC Number */ + HH_cq_hndl_t cq; /* Affiliated CQ handle */ + IB_port_t port; /* Affiliated Port Number */ + } event_modifier; +} HH_event_record_t; + + +typedef void (*HH_async_eventh_t)(HH_hca_hndl_t, + HH_event_record_t *, + void* private_context); +typedef void (*HH_comp_eventh_t)(HH_hca_hndl_t, + HH_cq_hndl_t, + void* private_context); + +/***************************/ +/* HH API Function mapping */ +/***************************/ + + +typedef struct hh_if_ops { + + /* Global HCA resources */ + /************************/ + + HH_ret_t (*HHIF_open_hca)(HH_hca_hndl_t hca_hndl, + EVAPI_hca_profile_t *prop_props_p, + EVAPI_hca_profile_t *sugg_props_p); + + HH_ret_t (*HHIF_close_hca)(HH_hca_hndl_t hca_hndl); + + HH_ret_t (*HHIF_alloc_ul_resources)(HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t user_protection_context, + void* hca_ul_resources_p); + + HH_ret_t (*HHIF_free_ul_resources)(HH_hca_hndl_t hca_hndl, + void* hca_ul_resources_p); + + HH_ret_t (*HHIF_query_hca)(HH_hca_hndl_t hca_hndl, + VAPI_hca_cap_t* hca_cap_p); + + HH_ret_t (*HHIF_modify_hca)(HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + VAPI_hca_attr_t* hca_attr_p, + VAPI_hca_attr_mask_t* hca_attr_mask_p); + + HH_ret_t (*HHIF_query_port_prop)(HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + VAPI_hca_port_t* hca_port_p); + + HH_ret_t (*HHIF_get_pkey_tbl)(HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_pkey_t* pkey_tbl_p); + + HH_ret_t (*HHIF_get_gid_tbl)(HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_gid_t* gid_tbl_p); + + HH_ret_t (*HHIF_get_lid)(HH_hca_hndl_t hca_hndl, + IB_port_t port, + IB_lid_t* lid_p, + u_int8_t* lmc_p); + + + /* Protection Domain */ + /*********************/ + + HH_ret_t (*HHIF_alloc_pd)(HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t prot_ctx, + void * pd_ul_resources_p, + HH_pd_hndl_t *pd_num_p); + + HH_ret_t (*HHIF_free_pd)(HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd); + + + /* Reliable Datagram Domain */ + /****************************/ + + HH_ret_t (*HHIF_alloc_rdd)(HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t* rdd_p); + + HH_ret_t (*HHIF_free_rdd)(HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t rdd); + + + /* Privileged UD AV */ + /********************/ + + HH_ret_t (*HHIF_create_priv_ud_av)(HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + VAPI_ud_av_t* av_p, + HH_ud_av_hndl_t* ah_p); + + HH_ret_t (*HHIF_modify_priv_ud_av)(HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p); + + HH_ret_t (*HHIF_query_priv_ud_av)(HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p); + + HH_ret_t (*HHIF_destroy_priv_ud_av)(HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah); + + + /* Memory Regions/Windows */ + /**************************/ + + HH_ret_t (*HHIF_register_mr)(HH_hca_hndl_t hca_hndl, + HH_mr_t* mr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p); + + HH_ret_t (*HHIF_reregister_mr)(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey, + VAPI_mr_change_t change_mask, + HH_mr_t* mr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p); + + HH_ret_t (*HHIF_register_smr)(HH_hca_hndl_t hca_hndl, + HH_smr_t* smr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p); + + HH_ret_t (*HHIF_deregister_mr)(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey); + + HH_ret_t (*HHIF_query_mr)(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey, + HH_mr_info_t* mr_info_p); + + HH_ret_t (*HHIF_alloc_mw)(HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + IB_rkey_t* initial_rkey_p); + + HH_ret_t (*HHIF_query_mw)(HH_hca_hndl_t hca_hndl, + IB_rkey_t initial_rkey, + IB_rkey_t* current_rkey_p, + HH_pd_hndl_t *pd); + + HH_ret_t (*HHIF_free_mw)(HH_hca_hndl_t hca_hndl, + IB_rkey_t initial_rkey); + + /* Fast Memory Regions */ + /***********************/ + HH_ret_t (*HHIF_alloc_fmr)(HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + VAPI_mrw_acl_t acl, + MT_size_t max_pages, /* Maximum number of pages that can be mapped using this region */ + u_int8_t log2_page_sz, /* Fixed page size for all maps on a given FMR */ + VAPI_lkey_t* last_lkey_p); /* To be used as the initial FMR handle */ + + HH_ret_t (*HHIF_map_fmr)(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t last_lkey, + EVAPI_fmr_map_t* map_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p); + + HH_ret_t (*HHIF_unmap_fmr)(HH_hca_hndl_t hca_hndl, + u_int32_t num_of_fmrs_to_unmap, + VAPI_lkey_t* last_lkeys_array); + + HH_ret_t (*HHIF_free_fmr)(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t last_lkey); /* as returned on last successful mapping operation */ + + + /* Completion Queues */ + /*********************/ + + HH_ret_t (*HHIF_create_cq)(HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t user_protection_context, + void* cq_ul_resources_p, + HH_cq_hndl_t* cq); + + HH_ret_t (*HHIF_resize_cq)(HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + void* cq_ul_resources_p); + + HH_ret_t (*HHIF_query_cq)(HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + VAPI_cqe_num_t* num_o_cqes_p); + + HH_ret_t (*HHIF_destroy_cq)(HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq); + + +#if defined(MT_SUSPEND_QP) + HH_ret_t (*HHIF_suspend_cq)(HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + MT_bool do_suspend); +#endif + + /* Queue Pairs */ + /***************/ + + HH_ret_t (*HHIF_create_qp)(HH_hca_hndl_t hca_hndl, + HH_qp_init_attr_t* init_attr_p, + void* qp_ul_resources_p, + IB_wqpn_t* qpn_p); + + HH_ret_t (*HHIF_get_special_qp)(HH_hca_hndl_t hca_hndl, + VAPI_special_qp_t qp_type, + IB_port_t port, + HH_qp_init_attr_t* init_attr_p, + void* qp_ul_resources_p, + IB_wqpn_t* sqp_hndl_p); + + HH_ret_t (*HHIF_modify_qp)(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + VAPI_qp_state_t cur_qp_state, + VAPI_qp_attr_t* qp_attr_p, + VAPI_qp_attr_mask_t* qp_attr_mask_p); + + HH_ret_t (*HHIF_query_qp)(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qp_num, + VAPI_qp_attr_t* qp_attr_p); + + HH_ret_t (*HHIF_destroy_qp)(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qp_num); + +#if defined(MT_SUSPEND_QP) + HH_ret_t (*HHIF_suspend_qp)(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qp_num, + MT_bool suspend_flag); +#endif + + + /* Shared Receive Queue (SRQ) */ + /******************************/ + + HH_ret_t (*HHIF_create_srq)(HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + void *srq_ul_resources_p, + HH_srq_hndl_t *srq_p); + + HH_ret_t (*HHIF_query_srq)(HH_hca_hndl_t hca_hndl, + HH_srq_hndl_t srq, + u_int32_t *limit_p); + + HH_ret_t (*HHIF_modify_srq)(HH_hca_hndl_t hca_hndl, + HH_srq_hndl_t srq, + void *srq_ul_resources_p); + + HH_ret_t (*HHIF_destroy_srq)(HH_hca_hndl_t hca_hndl, + HH_srq_hndl_t srq); + + + /* End to End Context */ + /**********************/ + + HH_ret_t (*HHIF_create_eec)(HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t rdd, + IB_eecn_t* eecn_p); + + HH_ret_t (*HHIF_modify_eec)(HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn, + VAPI_qp_state_t cur_ee_state, + VAPI_qp_attr_t* ee_attr_p, + VAPI_qp_attr_mask_t* ee_attr_mask_p); + + HH_ret_t (*HHIF_query_eec)(HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn, + VAPI_qp_attr_t* ee_attr_p); + + HH_ret_t (*HHIF_destroy_eec)(HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn); + + + /* Event Handler Calls */ + /***********************/ + + HH_ret_t (*HHIF_set_async_eventh)(HH_hca_hndl_t hca_hndl, + HH_async_eventh_t handler, + void* private_context); + + HH_ret_t (*HHIF_set_comp_eventh)(HH_hca_hndl_t hca_hndl, + HH_comp_eventh_t handler, + void* private_context); + + + /* Multicast Groups */ + /********************/ + + HH_ret_t (*HHIF_attach_to_multicast)(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + IB_gid_t dgid); + + HH_ret_t (*HHIF_detach_from_multicast)(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + IB_gid_t dgid); + + + /* Local MAD processing */ + HH_ret_t (*HHIF_process_local_mad)(HH_hca_hndl_t hca_hndl, + IB_port_t port, + IB_lid_t slid, + EVAPI_proc_mad_opt_t proc_mad_opts, + void* mad_in_p, + void* mad_out_p); + + + HH_ret_t (*HHIF_ddrmm_alloc)(HH_hca_hndl_t hca_hndl, + VAPI_size_t size, + u_int8_t align_shift, + VAPI_phy_addr_t* buf_p); + + HH_ret_t (*HHIF_ddrmm_query)(HH_hca_hndl_t hca_hndl, + u_int8_t align_shift, + VAPI_size_t* total_mem, + VAPI_size_t* free_mem, + VAPI_size_t* largest_chunk, + VAPI_phy_addr_t* largest_free_addr_p); + + + HH_ret_t (*HHIF_ddrmm_free)(HH_hca_hndl_t hca_hndl, + VAPI_phy_addr_t buf, + VAPI_size_t size); + + +} HH_if_ops_t; + + +/* + * HH Functions mapping definition + */ + + +/* Global HCA resources */ +/************************/ +#define HH_open_hca(hca_hndl, prop_props_p, sugg_props_p) \ + (hca_hndl)->if_ops->HHIF_open_hca(hca_hndl, prop_props_p, sugg_props_p) + +#define HH_close_hca(hca_hndl) \ + (hca_hndl)->if_ops->HHIF_close_hca(hca_hndl) + +#define HH_alloc_ul_resources(hca_hndl, usr_prot_ctx, hca_ul_resources_p) \ + (hca_hndl)->if_ops->HHIF_alloc_ul_resources(hca_hndl, usr_prot_ctx, hca_ul_resources_p) + +#define HH_free_ul_resources(hca_hndl, hca_ul_resources_p) \ + (hca_hndl)->if_ops->HHIF_free_ul_resources(hca_hndl, hca_ul_resources_p) + +#define HH_query_hca(hca_hndl, hca_cap_p) \ + (hca_hndl)->if_ops->HHIF_query_hca(hca_hndl, hca_cap_p) + +#define HH_modify_hca(\ + hca_hndl, port_num, hca_attr_p, hca_attr_mask_p) \ + (hca_hndl)->if_ops->HHIF_modify_hca(\ + hca_hndl, port_num, hca_attr_p, hca_attr_mask_p) + +#define HH_query_port_prop(hca_hndl, port_num, hca_port_p) \ + (hca_hndl)->if_ops->HHIF_query_port_prop(hca_hndl, port_num, hca_port_p) + +#define HH_get_pkey_tbl(\ + hca_hndl, port_num, tbl_len_in, tbl_len_out, pkey_tbl_p) \ + (hca_hndl)->if_ops->HHIF_get_pkey_tbl(\ + hca_hndl, port_num, tbl_len_in, tbl_len_out, pkey_tbl_p) + +#define HH_get_gid_tbl(\ + hca_hndl, port_num, tbl_len_in, tbl_len_out, pkey_tbl_p) \ + (hca_hndl)->if_ops->HHIF_get_gid_tbl(\ + hca_hndl, port_num, tbl_len_in, tbl_len_out, pkey_tbl_p) + +#define HH_get_lid(\ + hca_hndl, port, lid_p, lmc_p) \ + (hca_hndl)->if_ops->HHIF_get_lid(\ + hca_hndl, port, lid_p, lmc_p) + + + +/* Protection Domain */ +/*********************/ +#define HH_alloc_pd(hca_hndl, prot_ctx, pd_ul_resources_p, pd_num_p) \ + (hca_hndl)->if_ops->HHIF_alloc_pd(hca_hndl, prot_ctx, pd_ul_resources_p, pd_num_p) + +#define HH_free_pd(hca_hndl, pd) \ + (hca_hndl)->if_ops->HHIF_free_pd(hca_hndl, pd) + + + +/* Reliable Datagram Domain */ +/****************************/ +#define HH_alloc_rdd(hca_hndl, rdd_p) \ + (hca_hndl)->if_ops->HHIF_alloc_rdd(hca_hndl, rdd_p) + +#define HH_free_rdd(hca_hndl, rdd) \ + (hca_hndl)->if_ops->HHIF_free_rdd(hca_hndl, rdd) + + + +/* Privileged UD AV */ +/********************/ +#define HH_create_priv_ud_av(hca_hndl, pd, av_p, ah_p) \ + (hca_hndl)->if_ops->HHIF_create_priv_ud_av(hca_hndl, pd, av_p, ah_p) + +#define HH_modify_priv_ud_av(hca_hndl, ah, av_p) \ + (hca_hndl)->if_ops->HHIF_modify_priv_ud_av(hca_hndl, ah, av_p) + +#define HH_query_priv_ud_av(hca_hndl, ah, av_p) \ + (hca_hndl)->if_ops->HHIF_query_priv_ud_av(hca_hndl, ah, av_p) + +#define HH_destroy_priv_ud_av(hca_hndl, ah) \ + (hca_hndl)->if_ops->HHIF_destroy_priv_ud_av(hca_hndl, ah) + + + +/* Memory Regions/Windows */ +/**************************/ +#define HH_register_mr(\ + hca_hndl, mr_props_p, lkey_p, rkey_p) \ + (hca_hndl)->if_ops->HHIF_register_mr(\ + hca_hndl, mr_props_p, lkey_p, rkey_p) + +#define HH_reregister_mr(\ + hca_hndl, lkey, change_mask, mr_props_p, lkey_p, rkey_p) \ + (hca_hndl)->if_ops->HHIF_reregister_mr(\ + hca_hndl, lkey, change_mask, mr_props_p, lkey_p, rkey_p) + +#define HH_register_smr(\ + hca_hndl, smr_props_p, lkey_p, rkey_p) \ + (hca_hndl)->if_ops->HHIF_register_smr(\ + hca_hndl, smr_props_p, lkey_p, rkey_p) + +#define HH_deregister_mr(hca_hndl, lkey) \ + (hca_hndl)->if_ops->HHIF_deregister_mr(hca_hndl, lkey) + +#define HH_query_mr(hca_hndl, lkey, mr_info_p) \ + (hca_hndl)->if_ops->HHIF_query_mr(hca_hndl, lkey, mr_info_p) + +#define HH_alloc_mw(hca_hndl, pd, initial_rkey_p) \ + (hca_hndl)->if_ops->HHIF_alloc_mw(hca_hndl, pd, initial_rkey_p) + +#define HH_query_mw(hca_hndl, initial_rkey, current_rkey_p, pd_p) \ + (hca_hndl)->if_ops->HHIF_query_mw(hca_hndl, initial_rkey, current_rkey_p, pd_p) + +#define HH_free_mw(hca_hndl, initial_rkey) \ + (hca_hndl)->if_ops->HHIF_free_mw(hca_hndl, initial_rkey) + + +/* Fast Memory Regions */ +/***********************/ +#define HH_alloc_fmr(hca_hndl,pd,acl,max_pages,log2_page_sz,last_lkey_p) \ + (hca_hndl)->if_ops->HHIF_alloc_fmr(hca_hndl,pd,acl,max_pages,log2_page_sz,last_lkey_p) + +#define HH_map_fmr(hca_hndl,last_lkey,map_p,lkey_p,rkey_p) \ + (hca_hndl)->if_ops->HHIF_map_fmr(hca_hndl,last_lkey,map_p,lkey_p,rkey_p) + +#define HH_unmap_fmr(hca_hndl,num_of_fmrs_to_unmap,last_lkeys_array) \ + (hca_hndl)->if_ops->HHIF_unmap_fmr(hca_hndl,num_of_fmrs_to_unmap,last_lkeys_array) + +#define HH_free_fmr(hca_hndl,last_lkey) \ + (hca_hndl)->if_ops->HHIF_free_fmr(hca_hndl,last_lkey) + + +/* Completion Queues */ +/*********************/ +#define HH_create_cq(hca_hndl, usr_prot_ctx, cq_ul_resources_p, cq) \ + (hca_hndl)->if_ops->HHIF_create_cq(hca_hndl, usr_prot_ctx, cq_ul_resources_p, cq) + +#define HH_resize_cq(hca_hndl, cq, cq_ul_resources_p) \ + (hca_hndl)->if_ops->HHIF_resize_cq(hca_hndl, cq, cq_ul_resources_p) + +#define HH_query_cq(hca_hndl, cq, num_o_cqes_p) \ + (hca_hndl)->if_ops->HHIF_query_cq(hca_hndl, cq, num_o_cqes_p) + +#define HH_destroy_cq(hca_hndl, cq) \ + (hca_hndl)->if_ops->HHIF_destroy_cq(hca_hndl, cq) + + + +/* Queue Pairs */ +/***************/ +#define HH_create_qp(\ + hca_hndl, init_attr_p, qp_ul_resources_p, qpn_p) \ + (hca_hndl)->if_ops->HHIF_create_qp(\ + hca_hndl, init_attr_p, qp_ul_resources_p, qpn_p) + +#define HH_get_special_qp(\ + hca_hndl, qp_type, port, init_attr_p, qp_ul_resources_p, sqp_hndl_p) \ + (hca_hndl)->if_ops->HHIF_get_special_qp(\ + hca_hndl, qp_type, port, init_attr_p, qp_ul_resources_p, sqp_hndl_p) + +#define HH_modify_qp(\ + hca_hndl, qp_num, cur_qp_state, qp_attr_p, qp_attr_mask_p) \ + (hca_hndl)->if_ops->HHIF_modify_qp(\ + hca_hndl, qp_num, cur_qp_state, qp_attr_p, qp_attr_mask_p) + +#define HH_query_qp(hca_hndl, qp_num, qp_attr_p) \ + (hca_hndl)->if_ops->HHIF_query_qp(hca_hndl, qp_num, qp_attr_p) + +#define HH_destroy_qp(hca_hndl, qp_num) \ + (hca_hndl)->if_ops->HHIF_destroy_qp(hca_hndl, qp_num) + + + /* Shared Receive Queue (SRQ) */ + /******************************/ + +#define HH_create_srq(hca_hndl, pd, srq_ul_resources_p,srq_p) \ + (hca_hndl)->if_ops->HHIF_create_srq(hca_hndl, pd, srq_ul_resources_p,srq_p) + +#define HH_query_srq(hca_hndl, srq, limit_p) \ + (hca_hndl)->if_ops->HHIF_query_srq(hca_hndl, srq, limit_p) + +#define HH_modify_srq(hca_hndl, srq, srq_ul_resources_p) \ + (hca_hndl)->if_ops->HHIF_modify_srq(hca_hndl, srq, srq_ul_resources_p) + +#define HH_destroy_srq(hca_hndl, srq) \ + (hca_hndl)->if_ops->HHIF_destroy_srq(hca_hndl, srq) + + +/* End to End Context */ +/**********************/ +#define HH_create_eec(hca_hndl, rdd, eecn_p) \ + (hca_hndl)->if_ops->HHIF_create_eec(hca_hndl, rdd, eecn_p) + +#define HH_modify_eec(\ + hca_hndl, eecn, cur_ee_state, ee_attr_p, ee_attr_mask_p) \ + (hca_hndl)->if_ops->HHIF_modify_eec(\ + hca_hndl, eecn, cur_ee_state, ee_attr_p, ee_attr_mask_p) + +#define HH_query_eec(hca_hndl, eecn, ee_attr_p) \ + (hca_hndl)->if_ops->HHIF_query_eec(hca_hndl, eecn, ee_attr_p) + +#define HH_destroy_eec(hca_hndl, eecn) \ + (hca_hndl)->if_ops->HHIF_destroy_eec(hca_hndl, eecn) + + + +/* Event Handler Calls */ +/***********************/ +#define HH_set_async_eventh(hca_hndl, handler, private_context) \ + (hca_hndl)->if_ops->HHIF_set_async_eventh(hca_hndl, handler, private_context) + +#define HH_set_comp_eventh(hca_hndl, handler, private_context) \ + (hca_hndl)->if_ops->HHIF_set_comp_eventh(hca_hndl, handler, private_context) + + + +/* Multicast Groups */ +/********************/ +#define HH_attach_to_multicast(hca_hndl, qpn, dgid) \ + (hca_hndl)->if_ops->HHIF_attach_to_multicast(hca_hndl, qpn, dgid) + +#define HH_detach_from_multicast(hca_hndl, qpn, dgid) \ + (hca_hndl)->if_ops->HHIF_detach_from_multicast(hca_hndl, qpn, dgid) + +/* Local MAD processing */ +/************************/ +#define HH_process_local_mad(\ + hca_hndl, port, slid, flags, mad_in_p, mad_out_p) \ + (hca_hndl)->if_ops->HHIF_process_local_mad(\ + hca_hndl, port, slid, flags, mad_in_p, mad_out_p) + +#define HH_ddrmm_alloc(\ + hca_hndl, size, align_shift, buf_p) \ + (hca_hndl)->if_ops->HHIF_ddrmm_alloc(\ + hca_hndl, size, align_shift, buf_p) + +#define HH_ddrmm_query(\ + hca_hndl, align_shift, total_mem, free_mem, largest_chunk,largest_free_addr_p) \ + (hca_hndl)->if_ops->HHIF_ddrmm_query(\ + hca_hndl, align_shift, total_mem, free_mem, largest_chunk,largest_free_addr_p) + +#define HH_ddrmm_free(\ + hca_hndl, buf, size) \ + (hca_hndl)->if_ops->HHIF_ddrmm_free(\ + hca_hndl, buf, size) + +#if defined(MT_SUSPEND_QP) +#define HH_suspend_qp(\ + hca_hndl, qp_num, suspend_flag) \ + (hca_hndl)->if_ops->HHIF_suspend_qp(\ + hca_hndl, qp_num, suspend_flag) +#define HH_suspend_cq(\ + hca_hndl, cq, do_suspend) \ + (hca_hndl)->if_ops->HHIF_suspend_cq(\ + hca_hndl, cq, do_suspend) +#endif + + + + +extern HH_ret_t HHIF_dummy(void); + + +/**************************************************************************** + * Function: HH_add_hca_dev + * + * Arguments: + * dev_info (IN) : device data (with strings not copied). + * hca_hndl_p (OUT) : Allocated HCA object in HH + * + * Returns: HH_OK : operation successfull. + * HH_EAGAIN : not enough resources. + * + * Description: + * Add new HCA device to HH layer. This function should be called by an + * HCA device driver for each new device it finds and set up. + * Important: + * Though given HCA handle is a pointer to HH_hca_dev_t, do NOT try to + * copy the HH_hca_dev_t and use a pointer to the copy. The original + * pointer/handle must be used ! + * + ****************************************************************************/ +extern HH_ret_t HH_add_hca_dev(HH_hca_dev_t* dev_info, HH_hca_hndl_t* hca_hndl_p); + + +/**************************************************************************** + * Function: HH_rem_hca_dev + * + * Arguments: + * hca_hndl (IN) : HCA to remove + * + * Returns: HH_OK : operation successfull. + * HH_ENODEV : no such device. + * + * Description: + * Remove given HCA from HH. This function should be called by an + * HCA device driver for each of its devices upon cleanup (or for + * specific device which was "hot-removed". + * + ****************************************************************************/ +extern HH_ret_t HH_rem_hca_dev(HH_hca_hndl_t hca_hndl); + +/***************************************************************************** + * Function: HH_lookup_hca + * + * Arguments: + * name (IN) : device name + * hca_handle_p (OUT) : HCA handle + * + * Returns: HH_OK : operation successfull. + * HH_ENODEV : no such device. + * + *****************************************************************************/ +extern HH_ret_t HH_lookup_hca(const char * name, HH_hca_hndl_t* hca_handle_p); + + +/***************************************************************************** + * Function: HH_list_hcas + * + * Arguments: + * buf_entries(IN) : Number of entries in given buffer + * num_of_hcas_p(OUT) : Actual number of currently available HCAs + * hca_list_buf_p(OUT) : A buffer of buf_sz entries of HH_hca_hndl_t + * Returns: HH_OK : operation successfull. + * HH_EINVAL : Invalid params (NULL ptrs). + * HH_EAGAIN : buf_entries is smaller then num_of_hcas + * Description: + * Each device is refereced using HH_hca_hndl. In order to get list of + * available devices and get a handle to each of the HCA currently + * available HH provides in kernel HCA listing function (immitating usage + * of ls /dev/ in user space)).. + * + *****************************************************************************/ +extern HH_ret_t HH_list_hcas(u_int32_t buf_entries, + u_int32_t* num_of_hcas_p, + HH_hca_hndl_t* hca_list_buf_p); + +/************************************************************************ + * Set the if_ops tbl with dummy functions returning HH_ENOSYS. + * This is convenient for initializing tables prior + * setting them with partial real implementation. + * + * This way, the general HH_if_ops_t table structure can be extended, + * requiring just recompilation. + ************************************************************************/ +extern void HH_ifops_tbl_set_enosys(HH_if_ops_t* tbl); + +#endif /*_H_HH_H_*/ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_common.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_common.c new file mode 100644 index 00000000..4f37b4d7 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_common.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include + +#if defined(__DARWIN__) && defined (MT_KERNEL) +#ifndef abs +#define abs(x) ((x)>0?(x):-(x)) +#endif +#endif + +/* We cannot use simple and efficient switch/cases, + * since HH_ERROR_LIST has some different symbols with equal values! + */ + +enum { E_SYMBOL, E_MESSAGE }; + + +#undef HH_ERROR_INFO +#define HH_ERROR_INFO(A, B, C) {#A, C}, +#define ERR_TABLE_SIZE (-VAPI_EGEN + 1) +static const char *tab[ERR_TABLE_SIZE][2] = { + HH_ERROR_LIST +}; +#undef HH_ERROR_INFO + +static const int32_t err_nums[] = { +#define HH_ERROR_INFO(A, B, C) A, + HH_ERROR_LIST +#undef HH_ERROR_INFO + HH_ERROR_DUMMY_CODE +}; +#define ERR_NUMS_SIZE (sizeof(err_nums)/sizeof(int32_t)) + + +/************************************************************************/ +static const char* HH_strerror_t(HH_ret_t errnum, unsigned int te) +{ + const char* s = "HH Unknown Error"; + int ie = ERR_TABLE_SIZE; + int i; + for (i=0; (err_nums[i] != HH_ERROR_DUMMY_CODE) && (i < ERR_NUMS_SIZE); i++) + { + if (err_nums[i] == errnum) + { + ie = i; + break; + } + } + if (ie < ERR_TABLE_SIZE) + { + const char* ts = tab[ie][te]; + if (ts) { s = ts; } + } + return s; +} /* HH_strerror_t */ + + +/************************************************************************/ +const char* HH_strerror(HH_ret_t errnum) +{ + return HH_strerror_t(errnum, E_MESSAGE); +} /* HH_strerror */ + + +/************************************************************************/ +const char* HH_strerror_sym(HH_ret_t errnum) +{ + return HH_strerror_t(errnum, E_SYMBOL); +} /* HH_strerror_sym */ + + +#if defined(HH_COMMON_TEST) +/* Compile by + gcc -g -Wall -DHH_COMMON_TEST -I. -I$MTHOME/include -o /tmp/hhc hh_common.c + */ +#include +int main(int argc, char** argv) +{ + if (argc < 2) + { + fprintf(stderr, "Usage: %s \n", argv[0]); + } + else + { + int ai; + for (ai = 1; ai != argc; ++ai) + { + int errrc = atol(argv[ai]); + printf("[%d] errrc=%d, sym=%s, doc=%s\n", ai, errrc, + HH_strerror_sym(errrc), HH_strerror(errrc)); + } + } + return 0; +} /* main */ + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_common.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_common.h new file mode 100644 index 00000000..6e3eff88 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_common.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_HH_COMMON_H +#define H_HH_COMMON_H + +#include + +#define MAX_HCA_DEV_NUM 32 + + +/* + * Typedefs + * + */ + +typedef struct HH_hca_dev_st* __ptr64 HH_hca_hndl_t; +typedef struct HHUL_sr_wqe_st HHUL_sr_wqe_t; +typedef struct HHUL_rr_wqe_st HHUL_rr_wqe_t; + +typedef u_int32_t HH_pd_hndl_t; +typedef u_int32_t HH_rdd_hndl_t; +typedef MT_ulong_ptr_t HH_ud_av_hndl_t; +typedef u_int32_t HH_cq_hndl_t; +typedef u_int32_t HH_srq_hndl_t; + +#define HH_INVAL_SRQ_HNDL 0xFFFFFFFF + +typedef enum {PD_NO_FLAGS=0, PD_FOR_SQP=1} HH_pdm_pd_flags_t; + + +/* + * Return error codes + * First put all VAPI codes, then all codes not in VAPI + * + */ +#if 0 + { /* left-brace for balance check */ +#endif + +#define HH_ERROR_LIST \ +HH_ERROR_INFO(HH_OK, = VAPI_OK , "Operation Succeeded")\ +HH_ERROR_INFO(HH_ERR, = VAPI_EGEN , "General Layer Error")\ +HH_ERROR_INFO(HH_EFATAL, = VAPI_EFATAL , "Fatal error") \ +HH_ERROR_INFO(HH_EAGAIN, = VAPI_EAGAIN , "Not Enough Resources")\ +HH_ERROR_INFO(HH_EINTR, = VAPI_EINTR , "Operation interrupted")\ +HH_ERROR_INFO(HH_EBUSY, = VAPI_EBUSY , "Resource is busy/in-use")\ +HH_ERROR_INFO(HH_EINVAL, = VAPI_EINVAL_PARAM , "Invalid parameter")\ +HH_ERROR_INFO(HH_EINVAL_PD_HNDL, = VAPI_EINVAL_PD_HNDL , "Invalid PD handle")\ +HH_ERROR_INFO(HH_EINVAL_AV_HNDL, = VAPI_EINVAL_AV_HNDL , "Invalid Address Vector handle")\ +HH_ERROR_INFO(HH_EINVAL_QP_NUM, = VAPI_EINVAL_QP_HNDL , "Invalid Queue Pair Number")\ +HH_ERROR_INFO(HH_EINVAL_SRQ_HNDL, = VAPI_EINVAL_SRQ_HNDL , "Invalid SRQ handle")\ +HH_ERROR_INFO(HH_EINVAL_EEC_NUM, = VAPI_EINVAL_EEC_HNDL , "Invalid EE-Context Number")\ +HH_ERROR_INFO(HH_EINVAL_CQ_HNDL, = VAPI_EINVAL_CQ_HNDL , "Invalid Completion Queue Handle")\ +HH_ERROR_INFO(HH_EINVAL_QP_STATE, = VAPI_EINVAL_QP_STATE , "Invalid Queue Pair State")\ +HH_ERROR_INFO(HH_EINVAL_HCA_ID, = VAPI_EINVAL_HCA_ID , "Wrong HCA ID")\ +HH_ERROR_INFO(HH_EINVAL_CQ_NOT_TYPE, = VAPI_EINVAL_NOTIF_TYPE, "Invalid Completion Notification Type")\ +HH_ERROR_INFO(HH_EINVAL_PARAM, = VAPI_EINVAL_PARAM, "Invalid Parameter")\ +HH_ERROR_INFO(HH_EINVAL_HCA_HNDL, = VAPI_EINVAL_HCA_HNDL , "Bad HCA device Handle")\ +HH_ERROR_INFO(HH_ENOSYS, = VAPI_ENOSYS , "Not Supported")\ +HH_ERROR_INFO(HH_EINVAL_PORT, = VAPI_EINVAL_PORT , "Invalid Port Number")\ +HH_ERROR_INFO(HH_EINVAL_OPCODE, = VAPI_EINVAL_OP , "Invalid Operation")\ +HH_ERROR_INFO(HH_ENOMEM, = VAPI_EAGAIN , "Not Enough Memory")\ +HH_ERROR_INFO(HH_E2BIG_SG_NUM, = VAPI_E2BIG_SG_NUM , "Max. SG size exceeds capabilities")\ +HH_ERROR_INFO(HH_E2BIG_WR_NUM, = VAPI_E2BIG_WR_NUM , "Max. WR number exceeds capabilities")\ +HH_ERROR_INFO(HH_EINVAL_WQE, = VAPI_E2BIG_WR_NUM , "Invalid WQE")\ +HH_ERROR_INFO(HH_EINVAL_SG_NUM, = VAPI_EINVAL_SG_NUM , "Invalid scatter/gather list length") \ +HH_ERROR_INFO(HH_EINVAL_SG_FMT, = VAPI_EINVAL_SG_FMT , "Invalid scatter/gather list format") \ +HH_ERROR_INFO(HH_E2BIG_CQE_NUM, = VAPI_E2BIG_CQ_NUM , "CQE number exceeds CQ cap.") \ +HH_ERROR_INFO(HH_CQ_EMPTY, = VAPI_CQ_EMPTY , "CQ is empty")\ +HH_ERROR_INFO(HH_EINVAL_VA, = VAPI_EINVAL_VA , "Invalid virtual address")\ +HH_ERROR_INFO(HH_EINVAL_MW, = VAPI_EINVAL_MW_HNDL , "Invalid memory window")\ +HH_ERROR_INFO(HH_CQ_FULL, = VAPI_EAGAIN , "CQ is full")\ +HH_ERROR_INFO(HH_EINVAL_MTU, = VAPI_EINVAL_MTU , "MTU violation")\ +HH_ERROR_INFO(HH_2BIG_MCG_SIZE, = VAPI_E2BIG_MCG_SIZE ,"Number of QPs attached to multicast groups exceeded") \ +HH_ERROR_INFO(HH_EINVAL_MCG_GID, = VAPI_EINVAL_MCG_GID ,"Invalid Multicast group GID") \ +HH_ERROR_INFO(HH_EINVAL_SERVICE_TYPE,= VAPI_EINVAL_SERVICE_TYPE , "Non supported transport service for QP.")\ +HH_ERROR_INFO(HH_EINVAL_MIG_STATE, = VAPI_EINVAL_MIG_STATE ,"Invalid Path Migration State") \ +HH_ERROR_INFO(HH_COMPLETED, = VAPI_COMPLETED ,"Poll Loop Completed") \ +HH_ERROR_INFO(HH_POLL_NEEDED, = VAPI_POLL_NEEDED ,"Drain CQ with poll_cq") \ +HH_ERROR_INFO(HH_ERROR_MIN, = VAPI_ERROR_MAX , "Dummy min error code : put all error codes after it")\ +HH_ERROR_INFO(HH_NO_MCG, EMPTY ,"No Multicast group was found") \ +HH_ERROR_INFO(HH_MCG_FULL, EMPTY ,"Multicast group is not empty") \ +HH_ERROR_INFO(HH_MCG_EMPTY, EMPTY ,"Multicast group is empty") \ +HH_ERROR_INFO(HH_ENODEV, EMPTY , "Unknown device")\ +HH_ERROR_INFO(HH_DISCARD, EMPTY ,"Data Discarded")\ +HH_ERROR_INFO(HH_ERROR_MAX, EMPTY ,"Dummy max error code : put all error codes before it") +#if 0 + } /* right-brace for balance check */ +#endif + +enum +{ +#define HH_ERROR_INFO(A, B, C) A B, + HH_ERROR_LIST +#undef HH_ERROR_INFO + HH_ERROR_DUMMY_CODE +}; +typedef int32_t HH_ret_t; + +#if defined (__DLL_EXPORTS__) +__declspec(dllexport) const char* HH_strerror(HH_ret_t errnum); +__declspec(dllexport) const char* HH_strerror_sym(HH_ret_t errnum); +#else +extern const char* HH_strerror(HH_ret_t errnum); +extern const char* HH_strerror_sym(HH_ret_t errnum); +#endif + +/************************************************************************ + *** Low level only *** + ***/ + +typedef struct +{ + MT_virt_addr_t addr; + MT_virt_addr_t size; +} HH_buff_entry_t; + +#if 0 +typedef void (*HH_async_event_t)(HH_hca_hndl_t, + HH_event_record_t *, + void* private_data); +typedef void (*HH_comp_event_t)(HH_hca_hndl_t, + HH_cq_num_t, + void* private_data); +#endif + +#endif /* H_HH_COMMON_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_init.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_init.h new file mode 100644 index 00000000..61c6813f --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_init.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_HH_INIT_H +#define H_HH_INIT_H + +#ifdef MT_KERNEL + +#ifdef __cplusplus +extern "C" { +#endif + +int init_hh_driver(void); +void cleanup_hh_driver(void); + +#ifdef __cplusplus +} +#endif + + +#endif /* MT_KERNEL */ + +#endif /*H_HH_INIT_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_rx_stub.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_rx_stub.c new file mode 100644 index 00000000..c0e21415 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_rx_stub.c @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + mrw.type = VAPI_MR; + mrw.page_size = PAGE_SIZE; + mrw.size = REGION_SIZE; + mrw.start = va; + mrw.acl = VAPI_EN_LOCAL_WRITE | VAPI_EN_REMOTE_WRITE | VAPI_EN_REMOTE_READ; + + mrw.phys_addr_lst_len = NUM_OF_PAGE_ENTRIES(va, REGION_SIZE); + + mrw.phys_addr_lst = (MT_phys_addr_t *)MALLOC(mrw.phys_addr_lst_len * sizeof(MT_phys_addr_t)); + if(mrw.phys_addr_lst == 0) + { + MTL_ERROR('1', "%s: couldn't allocate memory for phys_addr_lst\n", __FUNCTION__); + return(-1); + } + build_tpt_lst(va, mrw.phys_addr_lst_len, mrw.phys_addr_lst); + + + ret = GHH_init_hh_all_gamla(); + + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in initilization of Gamlas\n", __FUNCTION__); + return(HH_OK); + } + + + HH_open_hca(HCA_NUM); + + + ret = HH_create_pd(HCA_NUM,&pd_num); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in create PD\n",__FUNCTION__); + return(-1); + } + + + ret = HH_register_mrw(HCA_NUM, &mrw, &rkey, &lkey); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in memory registration for device\n",__FUNCTION__); + return(-1); + } + FREE(mrw.phys_addr_lst); + + /* Allocate memory for CQ */ + ret = HH_get_cq_buffer_sz(HCA_NUM,MAX_CQ_ENTRIES,&cq_buf_sz,&cq_num_ent); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in getting CQ buf size\n",__FUNCTION__); + return(-1); + } + + + cq_buf_va = (MT_virt_addr_t)MALLOC(cq_buf_sz); + cq_mrw.type = VAPI_MR; + cq_mrw.page_size = PAGE_SIZE; + cq_mrw.size = cq_buf_sz; + cq_mrw.start = cq_buf_va; + cq_mrw.acl = VAPI_EN_LOCAL_WRITE; /* Local only ! */ + cq_mrw.phys_addr_lst_len = NUM_OF_PAGE_ENTRIES(cq_mrw.start, cq_mrw.size); + cq_mrw.phys_addr_lst = (MT_phys_addr_t *)MALLOC(cq_mrw.phys_addr_lst_len * sizeof(MT_phys_addr_t)); + + if(cq_mrw.phys_addr_lst == 0) + { + MTL_ERROR('1', "%s: couldn't allocate memory for phys_addr_lst\n", __FUNCTION__); + return(-1); + } + + build_tpt_lst(cq_mrw.start, cq_mrw.phys_addr_lst_len, cq_mrw.phys_addr_lst); + + ret = HH_register_mrw(HCA_NUM, &cq_mrw, &cq_buf_rkey, &cq_buf_lkey); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in memory registration for device\n",__FUNCTION__); + return(-1); + } + FREE(cq_mrw.phys_addr_lst); + + ret = HH_create_cq(HCA_NUM,cq_buf_va,cq_buf_lkey,cq_buf_sz,&cq_num_ent,&cq_num); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: Failed to create CQ\n",__FUNCTION__); + return(-1); + } + + /* Register region for WQEs */ + qp_cap.max_oust_wr_rq = MAX_WQES; + qp_cap.max_oust_wr_sq = MAX_WQES; + qp_cap.max_sg_size_rq = MAX_SG_ENTRIES; + qp_cap.max_sg_size_sq = MAX_SG_ENTRIES; + ret = HH_get_wqe_buf_sz(HCA_NUM,VAPI_TS_RC,&qp_cap,&qp_wqe_buff_sz,&act_qp_cap); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in getting wqe buf size\n",__FUNCTION__); + return(-1); + } + + qp_wqe_buff_va = (MT_virt_addr_t)MALLOC(qp_wqe_buff_sz); + qp_wqe_mrw.type = VAPI_MR; + qp_wqe_mrw.page_size = PAGE_SIZE; + qp_wqe_mrw.size = qp_wqe_buff_sz; + qp_wqe_mrw.start = qp_wqe_buff_va; + qp_wqe_mrw.acl = VAPI_EN_LOCAL_WRITE; /* Local only ! */ + qp_wqe_mrw.phys_addr_lst_len = NUM_OF_PAGE_ENTRIES(qp_wqe_mrw.start, qp_wqe_mrw.size); + qp_wqe_mrw.phys_addr_lst = (MT_phys_addr_t *)MALLOC(qp_wqe_mrw.phys_addr_lst_len * sizeof(MT_phys_addr_t)); + if(qp_wqe_mrw.phys_addr_lst == 0) + { + MTL_ERROR('1', "%s: couldn't allocate memory for phys_addr_lst\n", __FUNCTION__); + return(-1); + } + build_tpt_lst(qp_wqe_mrw.start, qp_wqe_mrw.phys_addr_lst_len, qp_wqe_mrw.phys_addr_lst); + + ret = HH_register_mrw(HCA_NUM, &qp_wqe_mrw, &wqe_buff_rkey, &wqe_buff_lkey); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in memory registration for device\n",__FUNCTION__); + return(-1); + } + FREE(qp_wqe_mrw.phys_addr_lst); + + + /* Open the QP */ + qp_init_attr.ts_type = VAPI_TS_RC; + qp_init_attr.pd_num = pd_num; + qp_init_attr.rdd_num = 0; + qp_init_attr.wqe_buff = qp_wqe_mrw.start; + qp_init_attr.wqe_buff_lkey = wqe_buff_lkey; + qp_init_attr.wqe_buff_size = qp_wqe_mrw.size; + + qp_init_attr.rq_sig_type = VAPI_SIGNALED; + qp_init_attr.rq_cq_num = cq_num; + qp_init_attr.sq_sig_type = VAPI_SIGNALED; + qp_init_attr.sq_cq_num = cq_num; + + ret = HH_create_qp(HCA_NUM,&qp_init_attr,&qp_num); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in create QP\n",__FUNCTION__); + return(-1); + } + + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + /* Common modifiers */ + qp_attr.av.dlid = RC_DLID; + qp_attr.av.grh_flag = FALSE; + qp_attr.av.sl = RC_SL; + qp_attr.av.src_path_bits = 0; + qp_attr.av.static_rate = 1; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_AV); + qp_attr.path_mtu = RC_MTU; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PATH_MTU); + qp_attr.dest_qp_num = RC_DST_QP_NUM; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_DEST_QP_NUM); + qp_attr.pkey_ix = RC_PKEY_IX; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX); + qp_attr.port = 1; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT); + qp_attr.qp_state = VAPI_RTS; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + /* Requester Modifiers */ + qp_attr.rq_psn = 0; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RQ_PSN); + + /* Responder Modifiers */ + qp_attr.sq_psn = 0; /* Initial PSN for requester */ + qp_attr.timeout = 0x1f; /* timeout */ + qp_attr.rnr_retry = 0; /* Retries not supported */ + + + + ret = HH_modify_qp(HCA_NUM,qp_num,VAPI_RESET,&qp_attr,&qp_attr_mask); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in create QP\n",__FUNCTION__); + return(-1); + } + + /* Post WQE */ + dest1_mrw.type = VAPI_MR; + dest1_mrw.size = 4096; + dest1_mrw.start = (MT_virt_addr_t)MALLOC(dest1_mrw.size); + + mlock((u_int32_t*)((u_int32_t)dest1_mrw.start),dest1_mrw.size); + + dest1_mrw.page_size = PAGE_SIZE; + dest1_mrw.num = pd_num; + dest1_mrw.acl = VAPI_EN_LOCAL_WRITE | VAPI_EN_REMOTE_WRITE | VAPI_EN_REMOTE_READ ; + dest1_mrw.phys_addr_lst_len = NUM_OF_PAGE_ENTRIES(dest1_mrw.start, dest1_mrw.size); + dest1_mrw.phys_addr_lst = (MT_phys_addr_t *)MALLOC(dest1_mrw.phys_addr_lst_len * sizeof(MT_phys_addr_t)); + if((dest1_mrw.phys_addr_lst == 0)||(dest1_mrw.start==0)) + { + MTL_ERROR('1', "%s: couldn't allocate memory\n", __FUNCTION__); + return(-1); + } + build_tpt_lst(dest1_mrw.start, dest1_mrw.phys_addr_lst_len, dest1_mrw.phys_addr_lst); + ret = HH_register_mrw(HCA_NUM, &dest1_mrw, &dest1_rkey, &dest1_lkey); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in memory registration for MRW\n",__FUNCTION__); + return(-1); + } + FREE(dest1_mrw.phys_addr_lst); + + printf("VA: %x RKey: %x\n",dest1_mrw.start,dest1_rkey); + + rr_wqe_p = (HH_rr_wqe_t *)qp_wqe_buff_va; + rr_wqe_p->comp_type = 0; // VAPI_SIGNALED; + rr_wqe_p->id = 0x666; + rr_wqe_p->next = 0; + rr_wqe_p->total_len = 4096 * 8; + rr_wqe_p->sg_lst_len = 1 * 8; + rr_wqe_p->sg_lst_p[0].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[0].len = 4096; + rr_wqe_p->sg_lst_p[0].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[1].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[1].len = 4096; + rr_wqe_p->sg_lst_p[1].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[2].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[2].len = 4096; + rr_wqe_p->sg_lst_p[2].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[3].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[3].len = 4096; + rr_wqe_p->sg_lst_p[3].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[4].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[4].len = 4096; + rr_wqe_p->sg_lst_p[4].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[5].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[5].len = 4096; + rr_wqe_p->sg_lst_p[5].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[6].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[6].len = 4096; + rr_wqe_p->sg_lst_p[6].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[7].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[7].len = 4096; + rr_wqe_p->sg_lst_p[7].lkey = dest1_lkey; + /* + rr_wqe_p->sg_lst_p[2].addr = dest1_mrw.start+16; + rr_wqe_p->sg_lst_p[2].len = 33; + rr_wqe_p->sg_lst_p[2].lkey = dest1_lkey; + */ + + ret = GHQPM_rq_doorbell(HH_hca_dev_tbl + HCA_NUM,qp_num, qp_wqe_buff_va); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in doorbell for RQ\n",__FUNCTION__); + return(-1); + } + + /* link another WQE */ + rr_wqe_p->next = (MT_virt_addr_t)((void*)rr_wqe_p + + sizeof(HH_rr_wqe_t) + + (rr_wqe_p->sg_lst_len-1)*sizeof(HH_sg_lst_entry_t)); + + rr_wqe_p = (HH_rr_wqe_t *)rr_wqe_p->next; + + rr_wqe_p->comp_type = 0; // VAPI_SIGNALED; + rr_wqe_p->id = 0x666; + rr_wqe_p->next = 0; + rr_wqe_p->total_len = 4096 * 8; + rr_wqe_p->sg_lst_len = 1 * 8; + rr_wqe_p->sg_lst_p[0].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[0].len = 4096; + rr_wqe_p->sg_lst_p[0].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[1].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[1].len = 4096; + rr_wqe_p->sg_lst_p[1].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[2].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[2].len = 4096; + rr_wqe_p->sg_lst_p[2].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[3].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[3].len = 4096; + rr_wqe_p->sg_lst_p[3].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[4].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[4].len = 4096; + rr_wqe_p->sg_lst_p[4].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[5].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[5].len = 4096; + rr_wqe_p->sg_lst_p[5].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[6].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[6].len = 4096; + rr_wqe_p->sg_lst_p[6].lkey = dest1_lkey; + rr_wqe_p->sg_lst_p[7].addr = dest1_mrw.start; + rr_wqe_p->sg_lst_p[7].len = 4096; + rr_wqe_p->sg_lst_p[7].lkey = dest1_lkey; + + + + + ret = GHQPM_rq_doorbell(HH_hca_dev_tbl + HCA_NUM, qp_num, qp_wqe_buff_va); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in doorbell for RQ\n",__FUNCTION__); + return(-1); + } + + + /* Here is a good place to send the first packet ! */ + +#ifdef RX_STUB + rx_stub(HH_hca_dev_tbl+HCA_NUM); +#else + GHRX_receive_poll((GHH_dev_t*)((HH_hca_dev_tbl + HCA_NUM)->device)); +#endif /* RX_STUB */ + + + sprintf(tmp_buf,"\n\n--------The result buf:"); + + + + for (i=0;i<128;i++) + { + if (i%16==0) { sprintf(tmp_buf1,"\n"); strcat(tmp_buf,tmp_buf1); } + sprintf(tmp_buf1,"%02X ",(u_int8_t)*((u_int8_t*)((MT_phys_addr_t)dest1_mrw.start+i))); + strcat(tmp_buf,tmp_buf1); + } + + + + MTL_TRACE('4',"%s\n\n",tmp_buf); + + + + + ret = HH_destroy_qp(HCA_NUM,qp_num); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in destroy QP\n",__FUNCTION__); + return(-1); + } + + ret = HH_destroy_cq(HCA_NUM,cq_num); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in destroy CQ\n",__FUNCTION__); + return(-1); + } + + ret = HH_destroy_pd(HCA_NUM,pd_num); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in destroy PD\n",__FUNCTION__); + return(-1); + } + + ret = HH_close_hca(HCA_NUM); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in close HCA\n",__FUNCTION__); + return(-1); + } + + ret = HH_rem_hca_dev(HCA_NUM); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in HCA remove device\n",__FUNCTION__); + return(-1); + } + + return(0); +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_stub_defines.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_stub_defines.h new file mode 100644 index 00000000..d02d13ff --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_stub_defines.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define RC_SLID 0x112 +#define RC_DLID 0x122 +#define RC_MTU MTU256 +#define RC_SL 1 +#define RC_SRC_QP_NUM 2 +#define RC_DST_QP_NUM 0x2 +#define RC_PKEY_IX 0 +#define RC_PKEY 0x8888 +#define RC_RQ_START_PSN 0 + + + +HH_ret_t rx_stub(HH_hca_dev_t *dev); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_tx_stub.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_tx_stub.c new file mode 100644 index 00000000..640b1e42 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_tx_stub.c @@ -0,0 +1,428 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + HH_mrw_t mrw, mrw_sg; + + MT_virt_addr_t va; + VAPI_rkey_t rkey, rkey_sg; + VAPI_lkey_t lkey, lkey_sg; + + + /* Protection Domain */ + HH_pd_num_t pd_num; + + /* Queue Pair Variables */ + VAPI_qp_cap_t qp_cap,act_qp_cap; + HH_qp_init_attr_t qp_init_attr; + VAPI_qp_num_t qp_num; + VAPI_qp_attr_t qp_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + + size_t qp_wqe_buff_sz; + HH_mrw_t qp_wqe_mrw; + VAPI_rkey_t wqe_buff_rkey; + VAPI_lkey_t wqe_buff_lkey; + MT_virt_addr_t qp_wqe_buff_va; + + + /* Completion Queue Configuration */ + size_t cq_buf_sz; + u_int32_t cq_num_ent; + HH_cq_num_t cq_num; + HH_mrw_t cq_mrw; + VAPI_rkey_t cq_buf_rkey; + VAPI_rkey_t cq_buf_lkey; + /*VAPI_wc_desc_t wc;*/ + MT_virt_addr_t cq_buf_va; + + + /* Post Send WQE */ + HH_sr_wqe_t *wqe_p; + HH_sg_lst_entry_t sg_lst[5]; + + + + + + va = (MT_virt_addr_t)MALLOC(REGION_SIZE); + + mlock((u_int32_t*)va, REGION_SIZE); + + if(va == 0) + { + MTL_ERROR('1', "%s: couldn't allocate memory for region\n", __FUNCTION__); + return(-1); + } + + + /* Device Initialization */ + ret = GHH_init_hh_all_gamla(); + + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in initilization of Gamlas\n", __FUNCTION__); + return(HH_OK); + } + + + + /* + * HCA Calls + * + */ + HH_open_hca(HCA_NUM); + + ret = HH_create_pd(HCA_NUM,&pd_num); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in create PD\n",__FUNCTION__); + return(-1); + } + + + + + /* + * Memory Registration Calls + * + */ + + /* Initialize Memory Regions */ + mrw.type = VAPI_MR; + mrw.page_size = PAGE_SIZE; + mrw.size = REGION_SIZE; + mrw.start = va; + mrw.acl = VAPI_EN_LOCAL_WRITE | VAPI_EN_REMOTE_WRITE | VAPI_EN_REMOTE_READ; + + mrw.phys_addr_lst_len = NUM_OF_PAGE_ENTRIES(va, REGION_SIZE); + + mrw.phys_addr_lst = (MT_phys_addr_t *)MALLOC(mrw.phys_addr_lst_len * sizeof(MT_phys_addr_t)); + + if(mrw.phys_addr_lst == 0) + { + MTL_ERROR('1', "%s: couldn't allocate memory for phys_addr_lst\n", __FUNCTION__); + return(-1); + } + + build_tpt_lst(va, mrw.phys_addr_lst_len, mrw.phys_addr_lst); + + ret = HH_register_mrw(HCA_NUM, &mrw, &rkey, &lkey); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in memory registration for device\n",__FUNCTION__); + return(-1); + } + + FREE(mrw.phys_addr_lst); + + + + /* Source Data Memory Region*/ + mrw_sg.type = VAPI_MR; + mrw_sg.page_size = PAGE_SIZE; + mrw_sg.size = REGION_SIZE; + mrw_sg.start = va; + mrw_sg.acl = VAPI_EN_LOCAL_WRITE | VAPI_EN_REMOTE_WRITE | VAPI_EN_REMOTE_READ; + + mrw_sg.phys_addr_lst_len = NUM_OF_PAGE_ENTRIES(va, REGION_SIZE); + + mrw_sg.phys_addr_lst = (MT_phys_addr_t *)MALLOC(mrw_sg.phys_addr_lst_len * sizeof(MT_phys_addr_t)); + + + if(mrw_sg.phys_addr_lst == 0) + { + MTL_ERROR('1', "%s: couldn't allocate memory for phys_addr_lst\n", __FUNCTION__); + return(-1); + } + + build_tpt_lst(va, mrw_sg.phys_addr_lst_len, mrw_sg.phys_addr_lst); + + ret = HH_register_mrw(HCA_NUM, &mrw_sg, &rkey_sg, &lkey_sg); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in memory registration for device\n",__FUNCTION__); + return(-1); + } + + FREE(mrw_sg.phys_addr_lst); + + + + + + + + + /* + * CQ Verbs + * + */ + ret = HH_get_cq_buffer_sz(HCA_NUM,MAX_CQ_ENTRIES,&cq_buf_sz,&cq_num_ent); + + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in getting CQ buf size\n",__FUNCTION__); + return(-1); + } + + + /* Completion Queue Memory Registration */ + cq_buf_va = (MT_virt_addr_t)MALLOC(cq_buf_sz); + cq_mrw.type = VAPI_MR; + cq_mrw.page_size = PAGE_SIZE; + cq_mrw.size = cq_buf_sz; + cq_mrw.start = cq_buf_va; + cq_mrw.acl = VAPI_EN_LOCAL_WRITE; /* Local only ! */ + cq_mrw.phys_addr_lst_len = NUM_OF_PAGE_ENTRIES(cq_mrw.start, cq_mrw.size); + cq_mrw.phys_addr_lst = (MT_phys_addr_t *)MALLOC(cq_mrw.phys_addr_lst_len * sizeof(MT_phys_addr_t)); + + if(cq_mrw.phys_addr_lst == 0) + { + MTL_ERROR('1', "%s: couldn't allocate memory for phys_addr_lst\n", __FUNCTION__); + return(-1); + } + + build_tpt_lst(cq_mrw.start, cq_mrw.phys_addr_lst_len, cq_mrw.phys_addr_lst); + + ret = HH_register_mrw(HCA_NUM, &cq_mrw, &cq_buf_rkey, &cq_buf_lkey); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in memory registration for device\n",__FUNCTION__); + return(-1); + } + FREE(cq_mrw.phys_addr_lst); + + ret = HH_create_cq(HCA_NUM,cq_buf_va,cq_buf_lkey,cq_buf_sz,&cq_num_ent,&cq_num); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: Failed to create CQ\n",__FUNCTION__); + return(-1); + } + + + + /* + * Queue Pair Calls + * + */ + + /* Register region for WQEs */ + qp_cap.max_oust_wr_rq = MAX_WQES; + qp_cap.max_oust_wr_sq = MAX_WQES; + qp_cap.max_sg_size_rq = MAX_SG_ENTRIES; + qp_cap.max_sg_size_sq = MAX_SG_ENTRIES; + + ret = HH_get_wqe_buf_sz(HCA_NUM,VAPI_TS_RC,&qp_cap,&qp_wqe_buff_sz,&act_qp_cap); + + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in getting wqe buf size\n",__FUNCTION__); + return(-1); + } + + /* Queue Pair Buffers */ + qp_wqe_buff_va = (MT_virt_addr_t)MALLOC(qp_wqe_buff_sz); + qp_wqe_mrw.type = VAPI_MR; + qp_wqe_mrw.page_size = PAGE_SIZE; + qp_wqe_mrw.size = qp_wqe_buff_sz; + qp_wqe_mrw.start = qp_wqe_buff_va; + qp_wqe_mrw.acl = VAPI_EN_LOCAL_WRITE; /* Local only ! */ + qp_wqe_mrw.phys_addr_lst_len = NUM_OF_PAGE_ENTRIES(qp_wqe_mrw.start, qp_wqe_mrw.size); + qp_wqe_mrw.phys_addr_lst = (MT_phys_addr_t *)MALLOC(qp_wqe_mrw.phys_addr_lst_len * sizeof(MT_phys_addr_t)); + + if(qp_wqe_mrw.phys_addr_lst == 0) + { + MTL_ERROR('1', "%s: couldn't allocate memory for phys_addr_lst\n", __FUNCTION__); + return(-1); + } + + build_tpt_lst(qp_wqe_mrw.start, qp_wqe_mrw.phys_addr_lst_len, qp_wqe_mrw.phys_addr_lst); + + ret = HH_register_mrw(HCA_NUM, &qp_wqe_mrw, &wqe_buff_rkey, &wqe_buff_lkey); + + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in memory registration for device\n",__FUNCTION__); + return(-1); + } + FREE(qp_wqe_mrw.phys_addr_lst); + + + /* Open the QP */ + qp_init_attr.ts_type = VAPI_TS_RC; + qp_init_attr.pd_num = pd_num; + qp_init_attr.rdd_num = 0; + qp_init_attr.wqe_buff = qp_wqe_mrw.start; + qp_init_attr.wqe_buff_lkey = wqe_buff_lkey; + qp_init_attr.wqe_buff_size = qp_wqe_mrw.size; + + qp_init_attr.rq_sig_type = VAPI_SIGNALED; + qp_init_attr.rq_cq_num = cq_num; + qp_init_attr.sq_sig_type = VAPI_SIGNALED; + qp_init_attr.sq_cq_num = cq_num; + + ret = HH_create_qp(HCA_NUM,&qp_init_attr,&qp_num); + + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in create QP\n",__FUNCTION__); + return(-1); + } + + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + + /* Common modifiers */ + qp_attr.av.dlid = RC_DLID; + qp_attr.av.grh_flag = FALSE; + qp_attr.av.sl = RC_SL; + qp_attr.av.src_path_bits = 0; + qp_attr.av.static_rate = 1; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_AV); + qp_attr.path_mtu = RC_MTU; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PATH_MTU); + qp_attr.dest_qp_num = RC_DST_QP_NUM; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_DEST_QP_NUM); + qp_attr.pkey_ix = RC_PKEY_IX; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX); + qp_attr.port = 0; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT); + qp_attr.qp_state = VAPI_RTS; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + + /* Requester Modifiers */ + qp_attr.rq_psn = 0; QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RQ_PSN); + + /* Responder Modifiers */ + qp_attr.sq_psn = 0; /* Initial PSN for requester */ + qp_attr.timeout = 0x1f; /* timeout */ + qp_attr.rnr_retry = 0; /* Retries not supported */ + + + ret = HH_modify_qp(HCA_NUM,qp_num,VAPI_RESET,&qp_attr,&qp_attr_mask); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in create QP\n",__FUNCTION__); + return(-1); + } + + + /* + * Preparing Work Queue Element + * + */ + wqe_p = (HH_sr_wqe_t*)qp_wqe_buff_va; + + sg_lst[0].addr = mrw_sg.start; + sg_lst[0].len = PAGE_SIZE; + sg_lst[0].lkey = lkey_sg; + + sg_lst[1].addr = mrw_sg.start; + sg_lst[1].len = PAGE_SIZE; + sg_lst[1].lkey = lkey_sg; + + sg_lst[2].addr = mrw_sg.start; + sg_lst[2].len = PAGE_SIZE; + sg_lst[2].lkey = lkey_sg; + + + wqe_p->id = 10; + wqe_p->opcode = VAPI_SEND; + wqe_p->sg_lst_len = 3; + wqe_p->sg_total_byte_len = 3*PAGE_SIZE; + wqe_p->sg_lst_p = sg_lst; + + + printf("Hit Enter To Post first descriptor\n"); + getchar(); + + ret = GHQPM_sq_doorbell(HH_hca_dev_tbl + HCA_NUM,qp_num, wqe_p); + if(ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in doorbell for RQ\n",__FUNCTION__); + return(-1); + } + printf("Hit Enter to finish program\n"); + getchar(); + + + +// printf("Hit Enter To Post second descriptor\n"); +// getchar(); + +// ret = GHQPM_sq_doorbell(HH_hca_dev_tbl + HCA_NUM,qp_num, wqe_p); +// if(ret != HH_OK) +// { +// MTL_ERROR('1', "%s: failed in doorbell for RQ\n",__FUNCTION__); +// return(-1); +// } + +// printf("Hit Enter to finish program\n"); +// getchar(); + + + + /* + * Destroy All + * + */ + ret = HH_destroy_qp(HCA_NUM,qp_num); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in destroy QP\n",__FUNCTION__); + return(-1); + } + + ret = HH_destroy_cq(HCA_NUM,cq_num); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in destroy CQ\n",__FUNCTION__); + return(-1); + } + + ret = HH_destroy_pd(HCA_NUM,pd_num); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in destroy PD\n",__FUNCTION__); + return(-1); + } + + ret = HH_close_hca(HCA_NUM); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in close HCA\n",__FUNCTION__); + return(-1); + } + + ret = HH_rem_hca_dev(HCA_NUM); + if (ret != HH_OK) + { + MTL_ERROR('1', "%s: failed in HCA remove device\n",__FUNCTION__); + return(-1); + } + + return(0); +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_tx_stub_defines.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_tx_stub_defines.h new file mode 100644 index 00000000..964992ed --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hh_tx_stub_defines.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define RC_SLID 0x112 +#define RC_DLID 0x121 +#define RC_MTU MTU256 +#define RC_SL 1 +#define RC_SRC_QP_NUM 2 +#define RC_DST_QP_NUM 0x42 +#define RC_PKEY_IX 0 +#define RC_PKEY 0x8888 +#define RC_RQ_START_PSN 0 + + + +HH_ret_t rx_stub(HH_hca_dev_t *dev); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhenosys.ic b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhenosys.ic new file mode 100644 index 00000000..89630630 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhenosys.ic @@ -0,0 +1,513 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id:$ + */ + + +static HH_ret_t enosys_open_hca( + HH_hca_hndl_t hca_hndl, + EVAPI_hca_profile_t *profile_p, + EVAPI_hca_profile_t *sugg_profile_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_close_hca( + HH_hca_hndl_t hca_hndl +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_alloc_ul_resources( + HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t user_protection_context, + void* hca_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_free_ul_resources( + HH_hca_hndl_t hca_hndl, + void* hca_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_query_hca( + HH_hca_hndl_t hca_hndl, + VAPI_hca_cap_t* hca_cap_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_modify_hca( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + VAPI_hca_attr_t* hca_attr_p, + VAPI_hca_attr_mask_t* hca_attr_mask_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_query_port_prop( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + VAPI_hca_port_t* hca_port_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_get_pkey_tbl( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_pkey_t* pkey_tbl_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_get_gid_tbl( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_gid_t* pkey_tbl_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_get_lid( + HH_hca_hndl_t hca_hndl, + IB_port_t port, + IB_lid_t* lid_p, + u_int8_t* lmc_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_alloc_pd( + HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t prot_ctx, + void * pd_ul_resources_p, + HH_pd_hndl_t *pd_num_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_free_pd( + HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_alloc_rdd( + HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t* rdd_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_free_rdd( + HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t rdd +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_priv_ud_av( + HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + VAPI_ud_av_t* av_p, + HH_ud_av_hndl_t* ah_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_modify_priv_ud_av( + HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_query_priv_ud_av( + HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_destroy_priv_ud_av( + HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_register_mr( + HH_hca_hndl_t hca_hndl, + HH_mr_t* mr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_reregister_mr( + HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey, + VAPI_mr_change_t change_mask, + HH_mr_t* mr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_register_smr( + HH_hca_hndl_t hca_hndl, + HH_smr_t* smr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_deregister_mr( + HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_query_mr( + HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey, + HH_mr_info_t* mr_info_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_alloc_mw( + HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + IB_rkey_t* initial_rkey_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_free_mw( + HH_hca_hndl_t hca_hndl, + IB_rkey_t initial_rkey +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_cq( + HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t user_protection_context, + void* cq_ul_resources_p, + HH_cq_hndl_t* cq +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_resize_cq( + HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + void* cq_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_query_cq( + HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + VAPI_cqe_num_t* num_o_cqes_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_destroy_cq( + HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_qp( + HH_hca_hndl_t hca_hndl, + HH_qp_init_attr_t* init_attr_p, + void* qp_ul_resources_p, + IB_wqpn_t* qpn_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_get_special_qp( + HH_hca_hndl_t hca_hndl, + VAPI_special_qp_t qp_type, + IB_port_t port, + HH_qp_init_attr_t* init_attr_p, + void* qp_ul_resources_p, + IB_wqpn_t* sqp_hndl_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_modify_qp( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qp_num, + VAPI_qp_state_t cur_qp_state, + VAPI_qp_attr_t* qp_attr_p, + VAPI_qp_attr_mask_t* qp_attr_mask_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_query_qp( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qp_num, + VAPI_qp_attr_t* qp_attr_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_destroy_qp( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qp_num +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_srq(HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + void *srq_ul_resources_p, + HH_srq_hndl_t *srq_p) +{ + return HH_ENOSYS; +} + + +static HH_ret_t enosys_query_srq(HH_hca_hndl_t hca_hndl, + HH_srq_hndl_t srq, + u_int32_t *limit_p) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_modify_srq(HH_hca_hndl_t hca_hndl, + HH_srq_hndl_t srq, + void *srq_ul_resources_p) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_destroy_srq(HH_hca_hndl_t hca_hndl, + HH_srq_hndl_t srq) +{ + return HH_ENOSYS; +} + + +static HH_ret_t enosys_create_eec( + HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t rdd, + IB_eecn_t* eecn_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_modify_eec( + HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn, + VAPI_qp_state_t cur_ee_state, + VAPI_qp_attr_t* ee_attr_p, + VAPI_qp_attr_mask_t* ee_attr_mask_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_query_eec( + HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn, + VAPI_qp_attr_t* ee_attr_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_destroy_eec( + HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_set_async_eventh( + HH_hca_hndl_t hca_hndl, + HH_async_eventh_t handler, + void* private_data +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_set_comp_eventh( + HH_hca_hndl_t hca_hndl, + HH_comp_eventh_t handler, + void* private_data +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_attach_to_multicast( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + IB_gid_t dgid +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_detach_from_multicast( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + IB_gid_t dgid +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_process_local_mad( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + IB_lid_t slid, + EVAPI_proc_mad_opt_t proc_mad_opts, + void* mad_in_p, + void* mad_out_p +) +{ + return HH_ENOSYS; +} + + +static void enosys_init(HH_if_ops_t* p) +{ + p->HHIF_open_hca = &enosys_open_hca; + p->HHIF_close_hca = &enosys_close_hca; + p->HHIF_alloc_ul_resources = &enosys_alloc_ul_resources; + p->HHIF_free_ul_resources = &enosys_free_ul_resources; + p->HHIF_query_hca = &enosys_query_hca; + p->HHIF_modify_hca = &enosys_modify_hca; + p->HHIF_query_port_prop = &enosys_query_port_prop; + p->HHIF_get_pkey_tbl = &enosys_get_pkey_tbl; + p->HHIF_get_gid_tbl = &enosys_get_gid_tbl; + p->HHIF_get_lid = &enosys_get_lid; + p->HHIF_alloc_pd = &enosys_alloc_pd; + p->HHIF_free_pd = &enosys_free_pd; + p->HHIF_alloc_rdd = &enosys_alloc_rdd; + p->HHIF_free_rdd = &enosys_free_rdd; + p->HHIF_create_priv_ud_av = &enosys_create_priv_ud_av; + p->HHIF_modify_priv_ud_av = &enosys_modify_priv_ud_av; + p->HHIF_query_priv_ud_av = &enosys_query_priv_ud_av; + p->HHIF_destroy_priv_ud_av = &enosys_destroy_priv_ud_av; + p->HHIF_register_mr = &enosys_register_mr; + p->HHIF_reregister_mr = &enosys_reregister_mr; + p->HHIF_register_smr = &enosys_register_smr; + p->HHIF_deregister_mr = &enosys_deregister_mr; + p->HHIF_query_mr = &enosys_query_mr; + p->HHIF_alloc_mw = &enosys_alloc_mw; + p->HHIF_free_mw = &enosys_free_mw; + p->HHIF_create_cq = &enosys_create_cq; + p->HHIF_resize_cq = &enosys_resize_cq; + p->HHIF_query_cq = &enosys_query_cq; + p->HHIF_destroy_cq = &enosys_destroy_cq; + p->HHIF_create_qp = &enosys_create_qp; + p->HHIF_get_special_qp = &enosys_get_special_qp; + p->HHIF_modify_qp = &enosys_modify_qp; + p->HHIF_query_qp = &enosys_query_qp; + p->HHIF_destroy_qp = &enosys_destroy_qp; + p->HHIF_create_eec = &enosys_create_eec; + p->HHIF_modify_eec = &enosys_modify_eec; + p->HHIF_query_eec = &enosys_query_eec; + p->HHIF_destroy_eec = &enosys_destroy_eec; + p->HHIF_set_async_eventh = &enosys_set_async_eventh; + p->HHIF_set_comp_eventh = &enosys_set_comp_eventh; + p->HHIF_attach_to_multicast = &enosys_attach_to_multicast; + p->HHIF_detach_from_multicast = &enosys_detach_from_multicast; + p->HHIF_process_local_mad = &enosys_process_local_mad; + p->HHIF_create_srq = &enosys_create_srq; + p->HHIF_destroy_srq = &enosys_destroy_srq; + p->HHIF_query_srq = &enosys_query_srq; + p->HHIF_modify_srq = &enosys_modify_srq; +} /* enosys_init */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.c new file mode 100644 index 00000000..b2f55b06 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_HHUL_C + + + + + + + + +#include +#include +/* add includes for other Mellanox devices here */ + + + +extern HH_ret_t HHUL_alloc_hca_hndl +( + u_int32_t vendor_id, + u_int32_t device_id, + void* hca_ul_resources_p, + HHUL_hca_hndl_t* hhul_hca_hndl_p +) +{ + HH_ret_t rc = HH_ENODEV; + if (vendor_id == MT_MELLANOX_IEEE_VENDOR_ID) { + /* Tavor (InfiniHost) */ + rc = THHUL_hob_create(hca_ul_resources_p, device_id, hhul_hca_hndl_p); + } else { /* unknown vendor */ + return HH_ENOSYS; + } + + + if (rc == HH_OK) + { + struct HHUL_hca_dev_st* p = *hhul_hca_hndl_p; + p->vendor_id = vendor_id; + p->dev_id = device_id; + } + return rc; +} /* HHUL_alloc_hca_hndl */ + + +#include "hhulenosys.ic" + +void HHUL_ifops_tbl_set_enosys(HHUL_if_ops_t* tbl) +{ + enosys_init(tbl); +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.h new file mode 100644 index 00000000..98409c1f --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul.h @@ -0,0 +1,1558 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_HHUL_H +#define H_HHUL_H + +#include +#include +#include + +typedef struct HHUL_hca_dev_st* HHUL_hca_hndl_t; + +typedef void* HHUL_mw_hndl_t; +typedef void* HHUL_qp_hndl_t; +typedef void* HHUL_srq_hndl_t; +#define HHUL_INVAL_SRQ_HNDL ((void*)(MT_ulong_ptr_t)-1) +typedef VAPI_ud_av_hndl_t HHUL_ud_av_hndl_t; +typedef void* HHUL_cq_hndl_t; +typedef unsigned long HHUL_pd_hndl_t; + +typedef struct HHUL_qp_init_attr_st { + IB_ts_t ts_type; + HHUL_pd_hndl_t pd; + HHUL_cq_hndl_t sq_cq; + HHUL_cq_hndl_t rq_cq; + VAPI_sig_type_t sq_sig_type; + VAPI_sig_type_t rq_sig_type; + HHUL_srq_hndl_t srq; /* HHUL_INVAL_SRQ_HNDL if not associated with a SRQ */ + VAPI_qp_cap_t qp_cap; +} HHUL_qp_init_attr_t; + + +typedef struct HHUL_hca_dev_st HHUL_hca_dev_t; +typedef struct HHUL_mw_bind_st HHUL_mw_bind_t; + + + +/************************************************************************ + * Function: HHUL_alloc_hca_hndl + * + * Arguments: + * vendor_id - The Vendor ID for the device to get handle for + * device_id - The device ID for the device to get handle for + * hca_ul_resources_p - Resources allocated by the privileged driver + * hhul_hca_hndl_p - HCA handle provided for this HCA in user-level + * (user-level resources context) + * Returns: HH_OK, + * HH_ENODEV - Unknown device type (based on vendor and device id). + * HH_EAGAIN - Failed to allocate user space resources + * + * Description: + * + * This function invokes the HHUL_init_user_level() + * which initilizes resources in user-level for given + * HCA. The specific HHUL_init_user_level() is + * selected based on device type (given in id + * parameters). The hca_ul_resources_p parameter is a + * copy of the resources context given by + * HH_alloc_ul_resources(). + * + ************************************************************************/ +extern HH_ret_t HHUL_alloc_hca_hndl +( + u_int32_t vendor_id, + u_int32_t device_id, + void* hca_ul_resources_p, + HHUL_hca_hndl_t* hhul_hca_hndl_p +); + + +/************************************************************************ + * Function: HHUL_init_user_level + * + * This is merely a prototype function, that should have specific + * implementation for each type of device. + * + * Arguments: + * hca_ul_resourcs_p (IN) - The user-level resources context + * allocated by + * HH_alloc_ul_resources(). Its size is + * defined by the specific instance of + * this function (per device type). + * hhul_hndl_p (OUT) - The user level handle of the HCA + * + * Returns: HH_OK + * HH_EINVAL - Invalid parameters + * (NULL ptr or invalid resources context) + * Description: + * + * This function is invoked by the HHUL wrapper for each HCA one wishes + * to use in an application, when HHUL_alloc_hca_hndl is invoked. + * The specificdevice function is selected based on the device type. + * It should be called after allocating privileged resources using + * HH_alloc_ul_resources(). + * + * This function should allocate all the user-level HCA resources + * required by given device in order to later manage the user-level HCA + * operations. This includes user-level mirroring of HCA resources data + * required while performing OS bypassing and other user-level context. + * + * The returned hhul_hndl is actually a pointer to HHUL device context. + * + ************************************************************************/ + +#if 0 +extern HH_ret_t HHUL_init_user_level +( + void* hca_ul_resources_p, + HHUL_hca_hndl_t* hhul_hndl_p +); +#endif + +typedef struct HHUL_if_ops_st { + + HH_ret_t (*HHULIF_cleanup_user_level)(HHUL_hca_hndl_t hhul_hndl); + + HH_ret_t (*HHULIF_alloc_pd_prep)(HHUL_hca_hndl_t hca_hndl, + HHUL_pd_hndl_t* hhul_pd_p, + void* pd_ul_resources_p); + + HH_ret_t (*HHULIF_alloc_pd_avs_prep)(HHUL_hca_hndl_t hca_hndl, + u_int32_t max_num_avs, + HH_pdm_pd_flags_t pd_flags, + HHUL_pd_hndl_t* hhul_pd_p, + void* pd_ul_resources_p); + + HH_ret_t (*HHULIF_alloc_pd_done)(HHUL_hca_hndl_t hca_hndl, + HHUL_pd_hndl_t hhul_pd, + HH_pd_hndl_t hh_pd, + void* pd_ul_resources_p); + + HH_ret_t (*HHULIF_free_pd_prep)(HHUL_hca_hndl_t hca_hndl, + HHUL_pd_hndl_t pd, + MT_bool undo_flag); + + HH_ret_t (*HHULIF_free_pd_done)(HHUL_hca_hndl_t hca_hndl, + HHUL_pd_hndl_t pd); + + HH_ret_t (*HHULIF_alloc_mw)(HHUL_hca_hndl_t hhul_hndl, + IB_rkey_t initial_rkey, + HHUL_mw_hndl_t* mw_p); + + HH_ret_t (*HHULIF_bind_mw)(HHUL_hca_hndl_t hhul_hndl, + HHUL_mw_hndl_t mw, + HHUL_mw_bind_t* bind_prop_p, + IB_rkey_t* bind_rkey_p); + + HH_ret_t (*HHULIF_free_mw)(HHUL_hca_hndl_t hhul_hndl, + HHUL_mw_hndl_t mw); + + HH_ret_t (*HHULIF_create_ud_av)(HHUL_hca_hndl_t hca_hndl, + HHUL_pd_hndl_t pd, + VAPI_ud_av_t* av_p, + HHUL_ud_av_hndl_t* ah_p); + + HH_ret_t (*HHULIF_modify_ud_av)(HHUL_hca_hndl_t hca_hndl, + HHUL_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p); + + HH_ret_t (*HHULIF_query_ud_av)(HHUL_hca_hndl_t hca_hndl, + HHUL_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p); + + HH_ret_t (*HHULIF_destroy_ud_av)(HHUL_hca_hndl_t hca_hndl, + HHUL_ud_av_hndl_t ah); + + HH_ret_t (*HHULIF_create_cq_prep)(HHUL_hca_hndl_t hca_hndl, + VAPI_cqe_num_t num_o_cqes, + HHUL_cq_hndl_t* hhul_cq_p, + VAPI_cqe_num_t* num_o_cqes_p, + void* cq_ul_resources_p); + + HH_ret_t (*HHULIF_create_cq_done)(HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t hhul_cq, + HH_cq_hndl_t hh_cq, + void* cq_ul_resources_p); + + ib_api_status_t (*HHULIF_resize_cq_prep)(HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_cqe_num_t num_o_cqes, + VAPI_cqe_num_t* num_o_cqes_p, + void* cq_ul_resources_p); + + HH_ret_t (*HHULIF_resize_cq_done)(HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + void* cq_ul_resources_p); + + HH_ret_t (*HHULIF_poll4cqe)(HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_wc_desc_t* cqe_p); + + HH_ret_t (*HHULIF_poll_and_rearm_cq)(HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + int solicitedNotification, + VAPI_wc_desc_t* cqe_p); + + HH_ret_t (*HHULIF_peek_cq)(HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_cqe_num_t cqe_num); + + + HH_ret_t (*HHULIF_req_comp_notif)(HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_cq_notif_type_t notif_type); + + HH_ret_t (*HHULIF_req_ncomp_notif)(HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_cqe_num_t cqe_num); + + HH_ret_t (*HHULIF_destroy_cq_done)(HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq); + + HH_ret_t (*HHULIF_create_qp_prep)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_init_attr_t* qp_init_attr_p, + HHUL_qp_hndl_t* qp_hndl_p, + VAPI_qp_cap_t* qp_cap_out_p, + void* qp_ul_resources_p); + + HH_ret_t (*HHULIF_special_qp_prep)(HHUL_hca_hndl_t hca_hndl, + VAPI_special_qp_t special_qp_type, + IB_port_t port, + HHUL_qp_init_attr_t* qp_init_attr_p, + HHUL_qp_hndl_t* qp_hndl_p, + VAPI_qp_cap_t* qp_cap_out_p, + void* qp_ul_resources_p); + + HH_ret_t (*HHULIF_create_qp_done)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t hhul_qp, + IB_wqpn_t hh_qp, + void* qp_ul_resources_p); + + HH_ret_t (*HHULIF_modify_qp_done)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t hhul_qp, + VAPI_qp_state_t cur_state); + + HH_ret_t (*HHULIF_post_send_req)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + VAPI_sr_desc_t* send_req_p); + + HH_ret_t (*HHULIF_post_send_req2)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + VAPI_comp_type_t comp_type, + VAPI_ud_av_hndl_t remote_ah, + void* WorkReq); + + HH_ret_t (*HHULIF_post_inline_send_req)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + VAPI_sr_desc_t* send_req_p); + + HH_ret_t (*HHULIF_post_send_reqs)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + u_int32_t num_of_requests, + VAPI_sr_desc_t* send_req_array); + + HH_ret_t (*HHULIF_post_gsi_send_req)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + VAPI_sr_desc_t* send_req_p, + VAPI_pkey_ix_t pkey_index); + + HH_ret_t (*HHULIF_post_recv_req)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + VAPI_rr_desc_t* recv_req_p); + + HH_ret_t (*HHULIF_post_recv_req2)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + VAPI_comp_type_t comp_type, + u_int32_t sg_lst_len, + VAPI_wr_id_t ReqId, + VAPI_sg_lst_entry_t *sg_lst_p); + + HH_ret_t (*HHULIF_post_recv_reqs)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + u_int32_t num_of_requests, + VAPI_rr_desc_t* recv_req_array); + + HH_ret_t (*HHULIF_destroy_qp_done)(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl); + +HH_ret_t (*HHULIF_create_srq_prep)( + /*IN*/ + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t pd, + u_int32_t max_outs, + u_int32_t max_sentries, + /*OUT*/ + HHUL_srq_hndl_t *srq_hndl_p, + u_int32_t *actual_max_outs_p, + u_int32_t *actual_max_sentries_p, + void *srq_ul_resources_p); + +HH_ret_t (*HHULIF_create_srq_done)( + HHUL_hca_hndl_t hca, + HHUL_srq_hndl_t hhul_srq, + HH_srq_hndl_t hh_srq, + void *srq_ul_resources_p); + +HH_ret_t (*HHULIF_modify_srq_prep)( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/HHUL_srq_hndl_t hhul_srq, + /*IN*/VAPI_srq_attr_t *srq_attr_p, + /*IN*/VAPI_srq_attr_mask_t srq_attr_mask, + /*OUT*/void *srq_ul_resources_p); + +HH_ret_t (*HHULIF_modify_srq_done)( + /*IN*/HHUL_hca_hndl_t hca, + /*IN*/HHUL_srq_hndl_t hhul_srq, + /*IN*/void *srq_ul_resources_p, + /*OUT*/u_int32_t *max_outs_wr_p /* Max. outstanding WQEs */ +); + +HH_ret_t (*HHULIF_destroy_srq_done)( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_srq +); + +HH_ret_t (*HHULIF_post_srq)( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_srq_hndl_t hhul_srq, + /*IN*/ u_int32_t num_of_requests, + /*IN*/ VAPI_rr_desc_t *recv_req_array, + /*OUT*/ u_int32_t *posted_requests_p +); + +} HHUL_if_ops_t; + + +struct HHUL_hca_dev_st { + HH_hca_hndl_t hh_hndl; /* kernel level HH handle of associated HCA */ + char* dev_desc; /* Device description (name, etc.) */ + u_int32_t vendor_id; /* IEEE's 24 bit Device Vendor ID */ + u_int32_t dev_id; /* Device/part ID */ + u_int32_t hw_ver; /* Hardware version (Stepping/Rev.) */ + u_int64_t fw_ver; /* Device's firmware version (device specific) */ + HHUL_if_ops_t* if_ops; /* Interface operations (Function map) */ + MT_size_t hca_ul_resources_sz; /* #bytes user-level resr. HCA context */ + MT_size_t pd_ul_resources_sz; /* #bytes user-level resr. PD context */ + MT_size_t cq_ul_resources_sz; /* #bytes user-level resr. CQ context */ + MT_size_t srq_ul_resources_sz; /* #bytes user-level resr. SRQ context */ + MT_size_t qp_ul_resources_sz; /* #bytes user-level resr. QP context */ + void* device; /* Pointer to device's private data */ + void* hca_ul_resources_p; /* Privileged User-Level resources + * allocated for this HCA in process */ +}; + + +struct HHUL_mw_bind_st { + VAPI_lkey_t mr_lkey; /* L-Key of memory region to bind to */ + IB_virt_addr_t start; /* Memory window. + * start virtual address (byte addressing) */ + VAPI_size_t size; /* Size of memory window in bytes */ + VAPI_mrw_acl_t acl; /* Access Control (R/W permission - local/remote) */ + HHUL_qp_hndl_t qp; /* QP to use for posting this binding request */ + VAPI_wr_id_t id; /* Work request ID to be used in this binding request*/ + VAPI_comp_type_t comp_type; /* Create CQE or not (for QPs set to signaling per request) */ +}; + + + +/************************************************************************ + * Function: HHUL_cleanup_user_level + * + * + * Arguments: hhul_hndl (IN) - The user level HCA handle + * + * Returns: HH_OK + * HH_EINVAL_HCA_HNDL + * Description: + * + * This function frees the device specific user level resources. + * + * After calling this function the device dependent layer should free the + * associated privileged resources by calling the privileged call + * HH_free_ul_resources(). The associated hca_ul_resources should be + * saved before the call to this function in order to be able to use it + * when calling HH_free_ul_resources(). + * + ************************************************************************/ + +#define HHUL_cleanup_user_level(hhul_hndl) \ + (hhul_hndl)->if_ops->HHULIF_cleanup_user_level(hhul_hndl) + + +/************************************************************************ + * Function: HHUL_alloc_mw + * + * + * Arguments: hhul_hndl (IN) - The user level HCA handle + * initial_rkey (IN) - The initial R-Key provided by kernel level + * mw_p (OUT) - Returned memory window handle for user level + * + * Returns: HH_OK + * HH_EINVAL_HCA_HNDL + * + * Description: + * + * In order to efficiently deal with OS bypassing while performing memory + * window binding, this function should be called after a successful + * return from kernel level memory window allocation. The initial R-Key + * returned from the kernel level call should be used when allocating + * user level memory window context. + * + ************************************************************************/ + +#define HHUL_alloc_mw(hhul_hndl, initial_rkey, mw_p) \ + (hhul_hndl)->if_ops->HHULIF_alloc_mw(hhul_hndl, initial_rkey, mw_p) + + +/************************************************************************ + * Function: HHUL_bind_mw + * + * + * Arguments: hhul_hndl (IN) - The user level HCA handle + * mw (IN) - The memory window handle + * bind_prop_p (IN) - Binding properties + * bind_rkey_p (OUT) - The allocated R-Key for binding + * + * Returns: HH_OK + * HH_EINVAL_HCA_HNDL + * HH_EINVAL - Invalid parameters (NULL ptrs) + * HH_EINVAL_MW_HNDL - Invalid memory window handle + * HH_EINVAL_QP_HNDL - + * Given QP handle is unknown or not in the correct + * state state or is not of the correct trasport + * service type (RC,UC,RD). HH_EAGAIN - No available + * resources to perform given operation. + * + * Description: + * + * This function performs the posting of the memory binding WQE based on + * given binding properties. Unbinding of a window may be done by + * providing a 0 sized window. + * + ************************************************************************/ + +#define HHUL_bind_mw(\ + hhul_hndl, mw, bind_prop_p, bind_rkey_p) \ + (hhul_hndl)->if_ops->HHULIF_bind_mw(\ + hhul_hndl, mw, bind_prop_p, bind_rkey_p) + + +/************************************************************************ + * Function: HHUL_free_mw + * + * + * Arguments: hhul_hndl (IN) - The user level HCA handle + * mw (IN) - The memory window handle + * + * Returns: HH_OK + * HH_EINVAL_HCA_HNDL + * HH_EINVAL_MW_HNDL - Invalid memory window handle + * + * Description: + * + * Free user-level context of given memory window. This function call + * should be followed by an invocation to the kernel function which frees + * the memory window resource. + * + ************************************************************************/ + +#define HHUL_free_mw(hhul_hndl, mw) \ + (hhul_hndl)->if_ops->HHULIF_free_mw(hhul_hndl, mw) + + +/************************************************************************ + * Function: HHUL_create_ud_av + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * pd (IN) - PD of created ud_av + * av_p (IN) - Given address vector to create handle for + * ah_p (OUT) - The returned address handle + * + * Returns: HH_OK, + * HH_ENODEV - Unknown device + * HH_EINVAL - Invalid parameters (NULL pointer, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is not in "HH_OPENED" state + * HH_EAGAIN - No available UD AV resources + * + * Description: + * + * This function allocates the privileged resources and returns a handle + * which may be used when posting UD QP WQEs in a system that enforces + * privileged UD AVs. + * + ************************************************************************/ + +#define HHUL_create_ud_av(hca_hndl, pd, av_p, ah_p) \ + (hca_hndl)->if_ops->HHULIF_create_ud_av(hca_hndl, pd, av_p, ah_p) + + +/************************************************************************ + * Function: HHUL_modify_ud_av + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * ah (IN) - The address handle to modify + * av_p (IN) - Modified address vector + * + * Returns: HH_OK, + * HH_ENODEV - Unknown device + * HH_EINVAL - Invalid parameters (NULL pointer, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is not in "HH_OPENED" state + * HH_EINVAL_AV_HNDL - Unknown UD AV handle + * + * Description: + * Modify properties of given UD address handle. + * + ************************************************************************/ + +#define HHUL_modify_ud_av(hca_hndl, ah, av_p) \ + (hca_hndl)->if_ops->HHULIF_modify_ud_av(hca_hndl, ah, av_p) + + +/************************************************************************ + * Function: HHUL_query_ud_av + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * ah (IN) - The address handle to modify + * av_p (IN) - Returned address vector + * + * Returns: HH_OK, + * HH_ENODEV - Unknown device + * HH_EINVAL - Invalid parameters (NULL pointer, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is not in "HH_OPENED" state + * HH_EINVAL_AV_HNDL - Unknown UD AV handle + * + * Description: + * Get address vector data for given address handle. + * + ************************************************************************/ + +#define HHUL_query_ud_av(hca_hndl, ah, av_p) \ + (hca_hndl)->if_ops->HHULIF_query_ud_av(hca_hndl, ah, av_p) + + +/************************************************************************ + * Function: HHUL_destroy_ud_av + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * ah (IN) - The address handle to destroy + * + * Returns: HH_OK, + * HH_ENODEV - Unknown device + * HH_EINVAL - Invalid parameters (NULL pointer, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is not in "HH_OPENED" state + * HH_EINVAL_AV_HNDL - Unknown UD AV handle + * + * Description: + * Free privileged UD address handle resources in HCA. + * + ************************************************************************/ + +#define HHUL_destroy_ud_av(hca_hndl, ah) \ + (hca_hndl)->if_ops->HHULIF_destroy_ud_av(hca_hndl, ah) + + +/************************************************************************ + * Function: HHUL_create_cq_prep + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * hh_cq - The CQ to modify + * num_o_cqes (IN) - Requested minimum number of CQEs in CQ + * hhul_cq_p (OUT) - Returned user-level handle for this CQ + * num_o_cqes_p (OUT) - Actual number of CQEs in updated CQ + * cq_ul_resources_p (OUT) - Pointer to allocated resources + * context (of size cq_ul_resources_sz) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters + * (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown or not + * in "HH_OPENED" state + * HH_EINVAL_CQ_HNDL - Unknown CQ + * HH_EAGAIN - No available resources to complete + * this operation + * + * Description: + * + * Before creating the CQ in a privileged call user level resources must + * be allocated. This function deals with the allocation of the required + * user level resources based on given parameters. + * + * The resources context is returned in the cq_ul_resources_p and should + * be given to kernel level call. Freeing of these resources is done + * using the function HHUL_destroy_cq_done(). + * + ************************************************************************/ + +#define HHUL_create_cq_prep(\ + hca_hndl, num_o_cqes, hhul_cq_p, num_o_cqes_p, cq_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_create_cq_prep(\ + hca_hndl, num_o_cqes, hhul_cq_p, num_o_cqes_p, cq_ul_resources_p) + + +/************************************************************************ + * Function: HHUL_create_cq_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * hhul_cq (IN) - The user level CQ handle + * hh_cq (IN) - The CQ allocated by HH + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_CQ_HNDL - Unknown CQ handle + * + * Description: + * + * After creation of the CQ in the privileged call to HH_create_qp() + * through VIP's checkings, this function deals with binding of allocated + * CQ to pre-allocated user-level context. + * + * The CQ cannot be polled before calling this function. + * + ************************************************************************/ + +#define HHUL_create_cq_done(hca_hndl, hhul_cq, hh_cq, cq_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_create_cq_done(hca_hndl, hhul_cq, hh_cq, cq_ul_resources_p) + + +/************************************************************************ + * Function: HHUL_resize_cq_prep + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * cq (IN) - The CQ to modify + * num_o_cqes (IN) - Requested minimum number of CQEs in CQ + * num_o_cqes_p (OUT) - Actual number of CQEs in updated CQ + * cq_ul_resources_p (OUT) - Pointer to updated allocated + * resources context for modified CQ + * (of size cq_ul_resources_sz) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_CQ_HNDL - Unknown CQ + * HH_EAGAIN - No available resources to complete this operation + * HH_EBUSY - Previous resize is still in progress + * + * Description: + * + * This function prepares user-level resources for CQ modification + * (e.g. alternate CQE buffer). It should be called before calling the + * kernel level HH_resize_cq() (which should be called with given updated + * resources context). + * Only one outstanding resize is allowed. + * + ************************************************************************/ +#define HHUL_resize_cq_prep(\ + hca_hndl, cq, num_o_cqes, num_o_cqes_p, cq_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_resize_cq_prep(\ + hca_hndl, cq, num_o_cqes, num_o_cqes_p, cq_ul_resources_p) + +/************************************************************************ + * Function: HHUL_resize_cq_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * cq (IN) - The CQ to modify + * cq_ul_resources_p (IN) - Pointer to updated allocated + * resources context for modified CQ + * (of size cq_ul_resources_sz) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_CQ_HNDL - Unknown CQ or given CQ is not "resizing" + * (i.e. HHUL_resize_cq_prep() was not invoked for it) + * HH_EAGAIN - No available resources to complete this operation + * + * Description: + * + * This function notifies the user-level that the CQ modify operation has + * completed. It should be called after calling the + * kernel level HH_resize_cq() (which should be called with given updated + * resources context). + * + * In case of a failure in HH_resize_cq(), HHUL_resize_cq_done() + * must be called with cq_ul_resources=NULL in order to cause cleanup of + * any resources allocated for CQ modification on HHUL_resize_cq_prep() + * and to assure proper CQ polling. + * + ************************************************************************/ +#define HHUL_resize_cq_done(hca_hndl, cq, cq_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_resize_cq_done(hca_hndl, cq, cq_ul_resources_p) + + +/************************************************************************ + * Function: HHUL_poll4cqe + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * cq (IN) - The CQ to poll + * cqe_p (OUT) - The returned CQE (on HH_OK) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_CQ_HNDL - Unknown CQ + * HH_CQ_EMPTY - No CQE in given CQ + * + * Description: Pop CQE in head of CQ. + * + * + ************************************************************************/ + +#define HHUL_poll4cqe(hca_hndl, cq, cqe_p) \ + (hca_hndl)->if_ops->HHULIF_poll4cqe(hca_hndl, cq, cqe_p) + +/************************************************************************ + * Function: HHUL_poll_and_rearm_cq + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * cq (IN) - The CQ to poll + * solicitedNotification (IN) - Notification event type (Next=FALSE or Solicited=TRUE) + * cqe_p (OUT) - The returned CQE (on HH_OK) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_CQ_HNDL - Unknown CQ + * HH_CQ_EMPTY - No CQE in given CQ + * HH_EAGAIN - cqe is valid, cq has been rearmed, do subsequent poll4cqe calls + * + * Description: if head of CQ is valid then pop head of CQ and return HH_OK + * otherwise rearm the CQ and then check the head of the CQ again + * if the head of the CQ is valid then pop head of CQ and return HH_EAGIN + * otherwise return HH_CQ_EMPTY + * + * + ************************************************************************/ + +#define HHUL_poll_and_rearm_cq(hca_hndl, cq, solicitedNotification, cqe_p) \ + (hca_hndl)->if_ops->HHULIF_poll_and_rearm_cq(hca_hndl, cq, solicitedNotification, cqe_p) + + +/********************************************************** + * + * Function: HHUL_peek_cq + * + * Arguments: + * hca_hndl: Handle to HCA. + * cq: CQ Handle. + * cqe_num: Number of CQE to peek to (next CQE is #1) + * + * Returns: + * HH_OK: At least cqe_num CQEs outstanding in given CQ + * HH_CQ_EMPTY: Less than cqe_num CQEs are outstanding in given CQ + * HH_E2BIG_CQ_NUM: cqe_index is beyond CQ size (or 0) + * HH_EINVAL_CQ_HNDL: invalid CQ handle + * HH_EINVAL_HCA_HNDL: invalid HCA handle + * + * Description: + * Check if there are at least cqe_num CQEs outstanding in the CQ. + * (i.e., peek into the cqe_num CQE in the given CQ). + * No CQE is consumed from the CQ. + * + **********************************************************/ +#define HHUL_peek_cq(hca_hndl,cq,cqe_num) \ + (hca_hndl)->if_ops->HHULIF_peek_cq(hca_hndl, cq, cqe_num) + +/************************************************************************ + * Function: HHUL_req_comp_notif + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * cq (IN) - The CQ to poll + * notif_type (IN) - Notification event type (Next or Solicited) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_CQ_HNDL - Unknown CQ + * + * Description: Request completion notification for given CQ. + * + ************************************************************************/ + +#define HHUL_req_comp_notif(hca_hndl, cq, notif_type) \ + (hca_hndl)->if_ops->HHULIF_req_comp_notif(hca_hndl, cq, notif_type) + +/************************************************************************* + * Function: HHUL_req_ncomp_notif + * + * Arguments: + * hca_hndl: Handle to HCA. + * cq: CQ Handle. + * cqe_num: Number of outstanding CQEs which trigger this notification + * (This may be 1 up to CQ size, limited by HCA capability - 0x7FFF for InfiniHost) + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle + * VAPI_E2BIG_CQ_NUM: cqe_index is beyond CQ size or beyond HCA notification capability (or 0) + * For InfiniHost cqe_num is limited to 0x7FFF. + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: + * Request notification when CQ holds at least N (non-polled) CQEs + * + * + *************************************************************************/ +#define HHUL_req_ncomp_notif(hca_hndl, cq, cqe_num) \ + (hca_hndl)->if_ops->HHULIF_req_ncomp_notif(hca_hndl, cq, cqe_num) + + +/************************************************************************ + * Function: HHUL_destroy_cq_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * cq (IN) - The CQ to free user level resources for + * + * + * Arguments: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_CQ_HNDL - Unknown CQ + * + * Description: + * + * This function frees the user level resources allocated during + * HHUL_create_cq_prep(). It must be called after succesfully calling the + * kernel level HH_destroy_cq() (or when HH_create_cq() fails). + * + ************************************************************************/ + +#define HHUL_destroy_cq_done(hca_hndl, cq) \ + (hca_hndl)->if_ops->HHULIF_destroy_cq_done(hca_hndl, cq) + + +/************************************************************************ + * Function: HHUL_create_qp_prep + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * qp_init_attr_p - init attributes for the QP + * qp_hndl_p (OUT) - User level QP handle + * qp_cap_out_p (OUT) - Actual QP capabilities + * qp_ul_resources_p (OUT) - The user-level resources context + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (number) + * HH_EAGAIN - No available resources to complete this operation + * + * Description: + * + * This function allocates user level resources for a new QP. It is + * called before calling the kernel level HH_create_qp(). + * The allocated resources context returned in qp_ul_resources_p should + * be passed to HH_create_qp() in order to + * synchronize the hardware context. + * + * Freeing of resources allocated here may be done using the function + * HHUL_destroy_qp_done(). + * + ************************************************************************/ + +#define HHUL_create_qp_prep(\ + hca_hndl, qp_init_attr_p, qp_hndl_p, qp_cap_out_p, qp_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_create_qp_prep(\ + hca_hndl, qp_init_attr_p, qp_hndl_p, qp_cap_out_p, qp_ul_resources_p) + + +/************************************************************************ + * Function: HHUL_special_qp_prep + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * special_qp_type (IN) - Type of special QP to prepare for + * port - Port number for special QP. + * qp_init_attr_p - init attributes for the QP + * qp_hndl_p (OUT) - User level QP handle + * qp_cap_out_p (OUT) - Actual QP capabilities + * qp_ul_resources_p (OUT) - The user-level resources context + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (number) + * HH_EAGAIN - No available resources to complete this operation + * + ************************************************************************/ + +#define HHUL_special_qp_prep(\ + hca_hndl, qp_type, port, qp_init_attr_p, qp_hndl_p, qp_cap_out_p, qp_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_special_qp_prep(\ + hca_hndl, qp_type, port, qp_init_attr_p, qp_hndl_p, qp_cap_out_p, qp_ul_resources_p) + + +/************************************************************************ + * Function: HHUL_create_qp_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * hhul_qp (IN) - The user-level QP context + * hh_qp (IN) - The QP number (or handle for special QP) returned + * by HH on QP creation. + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (for HHUL's) + * + * Description: + * + * On succesful call to HH's QP creation function this function should be + * called in order to enable the QP. This function performs the binding + * of the hardware QP resource allocated to the user-level QP context. + * + * + ************************************************************************/ + +#define HHUL_create_qp_done(hca_hndl, hhul_qp, hh_qp, qp_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_create_qp_done(hca_hndl, hhul_qp, hh_qp, qp_ul_resources_p) + +/************************************************************************ + * Function: HHUL_modify_qp_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * hhul_qp (IN) - The user-level QP context + * cur_state (IN) - state of QP after modify operation. + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (for HHUL's) + * + * Description: + * + * On succesful call to HH's QP modify function this function should be + * called in order to synchronize the user copy of the QP state with the + * actual QP state after the modify-qp operation. + * + * + ************************************************************************/ + +#define HHUL_modify_qp_done(hca_hndl, hhul_qp, cur_state) \ + (hca_hndl)->if_ops->HHULIF_modify_qp_done(hca_hndl, hhul_qp, cur_state) + +/************************************************************************ + * Function: HHUL_post_send_req + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * qp_hndl (IN) - User level QP handle + * send_req_p (IN) - Send request + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (number) + * HH_EAGAIN - No available resources to complete this operation + * HH_EINVAL_WQE - Invalid send request (send_req_p) + * HH_EINVAL_SG_NUM - Scatter/Gather list length error + * HH_EINVAL_QP_STATE - Invalid QP state (not RTS) + * HH_EINVAL_AV_HNDL - Invalid UD address handle (UD only) + * HH_EINVAL_OPCODE - Invalid opcode for given send-q + * + * Description: + * + * This function posts a send request to the send queue of the given + * QP. QP must be in RTS state. + * + * Every WQE successfully posted (HH_OK) must create a CQE in the CQ + * associated with this send queue (unless QP explicitly moved to RESET + * state). + * + * + * + ************************************************************************/ + +#define HHUL_post_send_req(hca_hndl, qp_hndl, send_req_p) \ + (hca_hndl)->if_ops->HHULIF_post_send_req(hca_hndl, qp_hndl, send_req_p) + +#define HHUL_post_send_req2(hca_hndl,qp_hndl,comp_tp,r_ah,WorkReq ) \ + (hca_hndl)->if_ops->HHULIF_post_send_req2(hca_hndl, qp_hndl, comp_tp,r_ah,WorkReq) + +#define HHUL_post_send_reqs(hca_hndl, qp_hndl, num_of_requests, send_req_array) \ + (hca_hndl)->if_ops->HHULIF_post_send_reqs(hca_hndl, qp_hndl, num_of_requests,send_req_array) + + +/************************************************************************ + * Function: HHUL_post_inline_send_req + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * qp_hndl (IN) - User level QP handle + * send_req_p (IN) - Send request with a signle gather entry + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (number) + * HH_EAGAIN - No available resources to complete this operation + * HH_EINVAL_WQE - Invalid send request (send_req_p) + * HH_EINVAL_SG_NUM - Scatter/Gather list length error + * HH_EINVAL_QP_STATE - Invalid QP state (not RTS) + * HH_EINVAL_AV_HNDL - Invalid UD address handle (UD only) + * HH_EINVAL_OPCODE - Invalid opcode for given send-q + * + * Description: + * + * This function posts an inline send request (data copied into WQE) + * The request may be send, send w/immediate, RDMA-wrire , RDMA-write w/immediate. + * Gather list may be of one entry only, and data length is defined in QP capabilities + * (max_inline_data_sq) - to be defined in create_qp and queries via query_qp. + * + ************************************************************************/ +#define HHUL_post_inline_send_req(hca_hndl, qp_hndl, send_req_p) \ + (hca_hndl)->if_ops->HHULIF_post_inline_send_req(hca_hndl, qp_hndl, send_req_p) + +/************************************************************************ + * Function: HHUL_post_gsi_send_req + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * qp_hndl (IN) - User level QP handle + * send_req_p (IN) - Send request with a signle gather entry + * pkey_index (IN) - Pkey index to put in outgoing + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (number) + * HH_EAGAIN - No available resources to complete this operation + * HH_EINVAL_WQE - Invalid send request (send_req_p) + * HH_EINVAL_SG_NUM - Scatter/Gather list length error + * HH_EINVAL_QP_STATE - Invalid QP state (not RTS) + * HH_EINVAL_AV_HNDL - Invalid UD address handle + * HH_EINVAL_OPCODE - Invalid opcode for given send-q + * + * Description: + * + * This function posts an inline send request (data copied into WQE) + * The request may be send, send w/immediate, RDMA-wrire , RDMA-write w/immediate. + * Gather list may be of one entry only, and data length is defined in QP capabilities + * (max_inline_data_sq) - to be defined in create_qp and queries via query_qp. + * + ************************************************************************/ +#define HHUL_post_gsi_send_req(hca_hndl, qp_hndl, send_req_p, pkey_index) \ + (hca_hndl)->if_ops->HHULIF_post_gsi_send_req(hca_hndl, qp_hndl, send_req_p, pkey_index) + + +/************************************************************************ + * Function: HHUL_post_recv_req + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * qp_hndl (IN) - User level QP handle + * recv_req_p (IN) - Receive request + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (number) + * HH_EAGAIN - No available resources to complete this operation + * HH_EINVAL_WQE - Invalid send request (send_req_p) + * HH_EINVAL_SG_NUM - Scatter/Gather list length error + * HH_EINVAL_QP_STATE - Invalid QP state (RESET or ERROR) + * + * Description: + * + * This function posts a receive request to the receive queue of the + * given QP. QP must not be in RESET or ERROR state. + * + * Every WQE successfully posted (HH_OK) must create a CQE in the CQ + * associated with this send queue (unless QP explicitly moved to RESET + * state). + * + ************************************************************************/ + +#define HHUL_post_recv_req(hca_hndl, qp_hndl, recv_req_p) \ + (hca_hndl)->if_ops->HHULIF_post_recv_req(hca_hndl, qp_hndl, recv_req_p) + +#define HHUL_post_recv_req2(hca_hndl, qp_hndl, comp_type,sg_lst_len,ReqId,sg_lst_p) \ + (hca_hndl)->if_ops->HHULIF_post_recv_req2(hca_hndl, qp_hndl, comp_type,sg_lst_len,ReqId,sg_lst_p) + +#define HHUL_post_recv_reqs(hca_hndl, qp_hndl, num_of_requests, recv_req_array) \ + (hca_hndl)->if_ops->HHULIF_post_recv_reqs(hca_hndl, qp_hndl, num_of_requests,recv_req_array) + + +/************************************************************************ + * Function: HHUL_destroy_qp_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * qp_hndl (IN) - User level handle of QP to destroy + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (number) + * + * Description: + * + * This function frees the user level resources allocated during + * HHUL_create_qp_prep(). It must be called after succesfully calling the + * kernel level HH_destroy_qp() (or on failure of HH_create_qp() or + * HH_get_special_qp() ). + * + ************************************************************************/ + +#define HHUL_destroy_qp_done(hca_hndl, qp_hndl) \ + (hca_hndl)->if_ops->HHULIF_destroy_qp_done(hca_hndl, qp_hndl) + + + +/************************************************************************ + * Function: HHUL_create_srq_prep + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * max_outs (IN) - Requested max. outstanding WQEs in the SRQ + * max_sentries (IN)- Requested max. scatter entries per WQE + * srq_hndl_p (OUT) - Returned (HHUL) SRQ handle + * max_outs_p (OUT) - Actual limit on number of outstanding WQEs + * max_sentries(OUT)- Actual limit on scatter entries per WQE + * srq_ul_resources_p(OUT)- SRQ user-level resources context (to pass down) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, etc.) + * HH_E2BIG_WR_NUM - requested max. outstanding WQEs is higher than HCA capability + * HH_E@BIG_SG_NUM - requested sentries is higher than HCA capability + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EAGAIN - No available resources to complete this operation + * + * Description: + * This function allocates user level resources for a new SRQ. It is + * called before calling the kernel level HH_create_srq(). + * The allocated resources context returned in srq_ul_resources_p should + * be passed to HH_create_srq() in order to synchronize the hardware context. + * + * Freeing of resources allocated here may be done using the function + * HHUL_destroy_srq_done(). + * + ************************************************************************/ +#define HHUL_create_srq_prep(hca_hndl, pd, max_outs, max_sentries, \ + srq_hndl_p, actual_max_outs_p, acutal_max_sentries, \ + srq_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_create_srq_prep(hca_hndl, pd, max_outs, max_sentries, \ + srq_hndl_p, actual_max_outs_p, acutal_max_sentries, \ + srq_ul_resources_p) + +/************************************************************************ + * Function: HHUL_create_srq_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * hhul_srq (IN) - The user-level SRQ context + * hh_srq (IN) - The SRQ handle returned by HH on SRQ creation. + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_SRQ_HNDL - Unknown SRQ (for HHUL's) + * + * Description: + * On succesful call to HH's SRQ creation function this function should be + * called in order to enable the SRQ. This function performs the binding + * of the hardware SRQ resource allocated to the user-level QP context. + * + * + ************************************************************************/ +#define HHUL_create_srq_done(hca_hndl, hhul_srq, hh_srq, srq_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_create_srq_done(hca_hndl, hhul_srq, hh_srq, srq_ul_resources_p) + +/************************************************************************ + * Function: HHUL_modify_srq_prep + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * srq_hndl (IN) - (HHUL) SRQ handle + * srq_attr_p(IN) - New attributes values + * srq_attr_mask(IN)- Flags for attributes to change + * srq_ul_resources_p(OUT)- SRQ user-level resources context (to pass down) + * + * Returns: HH_OK, + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_SRQ_HNDL + * HH_E2BIG_WR_NUM - requested max. outstanding WQEs is higher than HCA capability + * HH_ENOSYS - Unsupported feature (e.g., limit event) + * HH_EAGAIN - No available resources to complete this operation + * HH_EINVAL - Invalid parameters (NULL pointer, etc.) + * + * Description: + * This function allocates user level resources for modifying a SRQ. It is + * called before calling the kernel level HH_modify_srq(). + * The allocated resources context returned in srq_ul_resources_p should + * be passed to HH_modify_srq() in order to synchronize the hardware context. + * + * Freeing of resources allocated here may be done using the function + * HHUL_modify_srq_done(). + * + ************************************************************************/ +#define HHUL_modify_srq_prep(hca_hndl, srq_hndl, srq_attr_p, srq_attr_mask, \ + srq_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_modify_srq_prep(hca_hndl, srq_hndl, srq_attr_p, srq_attr_mask,\ + srq_ul_resources_p) + +/************************************************************************ + * Function: HHUL_modify_srq_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * hhul_srq (IN) - The user-level SRQ context + * srq_ul_resources_p (IN) - UL resources buffer returned from HH_modify_srq(). + * max_outs_wr_p (OUT) - Actual max. outstanding WQEs per SRQ (after modify) + * + * Returns: HH_OK, + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_SRQ_HNDL - Unknown SRQ (for HHUL's) + * HH_EAGAIN - Not enough resources to complete operation. + * HH_EFATAL - unexpected (consistancy) error + * + * Description: + * On succesful call to HH_modify_srq this function should be + * called in order to enable the SRQ. This function completes the transition + * to the new SRQ resources (after resizing). + * If this function fails, the resize is enulled. + * + * In case of error in HH_modify_srq - this function should be called with + * srq_ul_resources_p==NULL. This would free any resources allocated during + * HHUL_modify_srq_prep(). + * + * + ************************************************************************/ +#define HHUL_modify_srq_done(hca_hndl, hhul_srq, srq_ul_resources_p, max_outs_wr_p) \ + (hca_hndl)->if_ops->HHULIF_modify_srq_done(hca_hndl, hhul_srq, srq_ul_resources_p, max_outs_wr_p) + +/************************************************************************ + * Function: HHUL_destroy_srq_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * srq_hndl (IN) - User level handle of SRQ to destroy + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_SRQ_HNDL - Unknown SRQ + * + * Description: + * This function frees the user level resources allocated during + * HHUL_create_srq_prep(). It must be called after succesfully calling the + * kernel level HH_destroy_srq() (or on failure of HH_create_srq()). + * + ************************************************************************/ +#define HHUL_destroy_srq_done(hca_hndl, hhul_srq) \ + (hca_hndl)->if_ops->HHULIF_destroy_srq_done(hca_hndl, hhul_srq) + +/************************************************************************ + * Function: HHUL_post_srq + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * hhul_srq (IN) - User level SRQ handle + * num_of_requests (IN) - Number of requests given in recv_req_array + * recv_req_array (IN) - Array of receive requests + * posted_requests_p (OUT) - Actually commited/posted requests (valid also in error). + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_QP_HNDL - Unknown QP (number) + * HH_EAGAIN - No available resources to complete this operation + * HH_EINVAL_WQE - Invalid send request (send_req_p) + * HH_EINVAL_SG_NUM - Scatter/Gather list length error + * HH_EINVAL_QP_STATE - Invalid QP state (RESET or ERROR) + * + * Description: + * This function posts receive requests to the given SRQ. + * Upon error return code, returned *posted_requests_p identify the number of requests + * successfully posted (or the index of the failing WQE). The error code refer to the + * request in index *posted_requests_p. + * + ************************************************************************/ +#define HHUL_post_srq(hca_hndl, hhul_srq, num_of_requests, recv_req_array, posted_requests_p) \ + (hca_hndl)->if_ops->HHULIF_post_srq(hca_hndl, hhul_srq, num_of_requests, recv_req_array, \ + posted_requests_p) + + +/************************************************************************ + * Function: HHUL_alloc_pd_prep + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * hhul_qp_p (OUT) - Returned user-level handle for this QP + * pd_ul_resources_p (OUT) - Pointer to allocated resources + * context (of size pd_ul_resources_sz) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters + * (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown or not + * in "HH_OPENED" state + * HH_EAGAIN - No available resources to complete + * this operation + * + * Description: + * + * Before creating the PD in a privileged call user level resources must + * be allocated. This function deals with the allocation of the required + * user level resources based on given parameters. + * + * The resources context is returned in the pd_ul_resources_p and should + * be given to kernel level call. Freeing of these resources is done + * using the function HHUL_free_pd_done(). + * + ************************************************************************/ + +#define HHUL_alloc_pd_prep(\ + hca_hndl, hhul_pd_p, pd_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_alloc_pd_prep(hca_hndl, hhul_pd_p, pd_ul_resources_p) + + +/************************************************************************ + * Function: HHUL_alloc_pd_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * hhul_pd (IN) - The user level PD handle + * hh_pd (IN) - The PD allocated by HH + * pd_ul_resources_p (OUT) - Pointer to allocated resources + * context (of size pd_ul_resources_sz) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_PD_HNDL - Unknown CQ handle + * + * Description: + * + * After creation of the PD in the privileged call to HH_alloc_pd() + * through VIP's checkings, this function deals with binding of allocated + * PD to pre-allocated user-level context. + * + ************************************************************************/ + +#define HHUL_alloc_pd_done(hca_hndl, hhul_pd, hh_pd, pd_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_alloc_pd_done(hca_hndl, hhul_pd, hh_pd, pd_ul_resources_p) + +/************************************************************************ + * Function: HHUL_free_pd_prep + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * pd (IN) - The PD to free user level resources for + * undo_flag (IN) - if TRUE, undo the previous free_pd_prep, + * and restore the PD + * + * + * Arguments: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_PD_HNDL - Unknown PD + * HH_EBUSY - when prepping (undo_flag = FALSE), indicates that PD + * still has allocated udav's + * + * Description: + * + * This function prepares the user level resources allocated during + * HHUL_alloc_pd_prep() for freeing, checking if there are still UDAVs allocated to this PD. + * It must be called before calling the kernel level HH_free_pd(). + * + ************************************************************************/ + +#define HHUL_free_pd_prep(hca_hndl, pd, undo_flag) \ + (hca_hndl)->if_ops->HHULIF_free_pd_prep(hca_hndl, pd, undo_flag) + + +/************************************************************************ + * Function: HHUL_free_pd_done + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * pd (IN) - The PD to free user level resources for + * + * + * Arguments: HH_OK, + * HH_EINVAL - Invalid parameters (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown not in "HH_OPENED" state + * HH_EINVAL_PD_HNDL - Unknown PD + * + * Description: + * + * This function frees the user level resources allocated during + * HHUL_alloc_pd_prep(). It must be called after succesfully calling the + * kernel level HH_free_pd() (or when HH_alloc_pd() fails). + * + ************************************************************************/ + +#define HHUL_free_pd_done(hca_hndl, pd) \ + (hca_hndl)->if_ops->HHULIF_free_pd_done(hca_hndl, pd) + + +/************************************************************************ + * Set the if_ops tbl with dummy functions returning HH_ENOSYS. + * This is convenient for initializing tables prior + * setting them with partial real implementation. + * + * This way, the general HH_if_ops_t table structure can be extended, + * requiring just recompilation. + ************************************************************************/ +extern void HHUL_ifops_tbl_set_enosys(HHUL_if_ops_t* tbl); + + +/************************************************************************ + * Function: HHUL_alloc_pd_avs_prep + * + * + * Arguments: hca_hndl (IN) - User level handle of the HH device context + * max_num_avs - desired max number of AVs to be available for this PD + * pd_flags - currently, if is a PD for a SQP or not. + * hhul_qp_p (OUT) - Returned user-level handle for this QP + * pd_ul_resources_p (OUT) - Pointer to allocated resources + * context (of size pd_ul_resources_sz) + * + * Returns: HH_OK, + * HH_EINVAL - Invalid parameters + * (NULL pointer, invalid L-Key, etc.) + * HH_EINVAL_HCA_HNDL - Given HCA is unknown or not + * in "HH_OPENED" state + * HH_EAGAIN - No available resources to complete + * this operation + * + * Description: + * + * Before creating the PD in a privileged call user level resources must + * be allocated. This function deals with the allocation of the required + * user level resources based on given parameters. + * + * If the caller desires to use the default maximum number of AVs for this PD, + * max_num_avs should be set to EVAPI_DEFAULT_AVS_PER_PD. + * + * The resources context is returned in the pd_ul_resources_p and should + * be given to kernel level call. Freeing of these resources is done + * using the function HHUL_free_pd_done(). + * + ************************************************************************/ + +#define HHUL_alloc_pd_avs_prep(\ + hca_hndl,max_num_avs, pd_flags, hhul_pd_p, pd_ul_resources_p) \ + (hca_hndl)->if_ops->HHULIF_alloc_pd_avs_prep(hca_hndl, max_num_avs, pd_flags,hhul_pd_p, pd_ul_resources_p) + + +/************************************************************************ + * Some common struct's to be used by lower level drivers + */ + + +/* Send Request Descriptor */ +struct HHUL_sr_wqe_st { + struct HHUL_sr_wqe_st *next; /* for WQEs list */ + struct HHUL_sr_wqe_st *ul_wqe_p; /* For passing from kernel to user */ + + VAPI_wr_id_t id; + + VAPI_wr_opcode_t opcode; + VAPI_comp_type_t comp_type; + + VAPI_imm_data_t imm_data; + MT_bool fence; + MT_bool av_valid; /* TRUE is following AV is valid */ + VAPI_ud_av_t av; + VAPI_qp_num_t remote_qp; + VAPI_qkey_t remote_qkey; + VAPI_ethertype_t ethertype; + + MT_bool set_se; + VAPI_virt_addr_t remote_addr; + + + VAPI_rkey_t r_key; + u_int64_t operand1; /* for atomic */ + u_int64_t operand2; /* for atomic */ + + u_int32_t sg_lst_len; + + /* TBD - Add AckReg - Data Seg - If Gent - */ + + /* TBD - Add support for reliable datagram (RD) */ + u_int32_t sg_total_byte_len; + + VAPI_sg_lst_entry_t sg_lst_p[1]; + + /* here comes the data ... + * if (sg_lst_len > 0) + * MALLOC(sizeof(HH_sr_wqe_t)+[sg_lst_len-1]*sizeof(VAPI_sg_lst_entry_t)) + */ + +}; /* HHUL_sr_wqe_t */; + + +/* Receive Request WQE */ /* TBD - move to HH */ +struct HHUL_rr_wqe_st { + struct HHUL_rr_wqe_st* next; /* Next wqe in receive queue */ + struct HHUL_rr_wqe_st* prev; /* Previous wqe in receive queue */ + + VAPI_wr_id_t id; /* ID provided by the user */ + + VAPI_comp_type_t comp_type; /* Mellanox Specific + * {VAPI_SIGNALED, VAPI_UNSIGNALED} */ + u_int32_t total_len; /* Current s/g list length - + * need to calculate it */ + u_int32_t sg_lst_len; + VAPI_sg_lst_entry_t sg_lst_p[1]; /* TBD is it ok to define this way */ + + /* here comes the data ... + * if (sg_lst_len > 0) + * MALLOC(sizeof(HHUL_rr_wqe_t)+[sg_lst_len-1]*sizeof(VAPI_sg_lst_entry_t)) + */ +} /* HHUL_rr_wqe_t */; + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul_obj.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul_obj.h new file mode 100644 index 00000000..7e83b979 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul_obj.h @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +typedef struct { + MT_virt_addr_t addr; + MT_virt_addr_t len; + VAPI_lkey_t lkey; +} HHUL_sg_lst_entry_t; + + + +/* Receive Request WQE */ +typedef struct HH_rr_wqe_st { + VAPI_wr_id_t id; /* ID provided by the user */ + + MT_virt_addr_t next; /* next wqe in struct */ + + VAPI_comp_type_t comp_type; /* Mellanox Specific {VAPI_SIGNALED, VAPI_UNSIGNALED} */ + u_int32_t total_len; /* Current s/g list length - need to calculate it */ + + u_int32_t sg_lst_len; + HHUL_sg_lst_entry_t sg_lst_p[1]; /* TBD is this ok to define it this way */ + + /* here comes the data ... + * malloc(sizeof(GHQPC_rr_wqe_t)+[sg_lst_len-1]*sizeof(VAPI_sg_lst_entry_t)) + */ +} HHUL_rr_wqe_t; + + +#if 0 +/* TBD - probably obsolete. see HH_sr_wqe_t in hhul_common.h */ +/* Send Request Descriptor */ +typedef struct HHUL_sr_desc_st +{ + HH_hca_hndl_t hh_hca_hndl; /* Handle to HCA */ + + VAPI_sr_desc_t vapi_sr_desc; /* VAPI Send Request Descriptor */ + + VAPI_av_t vapi_av; + + HHUL_sg_lst_entry_t *hh_sg_lst; + + /* TBD - Add AckReg - Data Seg - If Gent - */ + + /* TBD - Add support for reliable datagram (RD) */ + +} HH_sr_desc_t; +#endif + + + +struct HHUL_hca_obj { + HH_hca_hndl_t hca_hndl; + + HHUL_qp_obj_st *qp_head; + HHUL_cq_obj_st *cq_head; + + /* TBD - Add support for address vector image */ + /* TBD - Add support for RDD */ + +} HHUL_hca_obj_t; + + + +#endif /* H_HHUL_OBJ_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul_stub.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul_stub.c new file mode 100644 index 00000000..33eec5fb --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhul_stub.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +HH_ret_t HHUL_destroy_qp(HHUL_hca_hndl_t hca_hndl, VAPI_qp_num_t qp_num) +{ + return HH_OK; +} + + + +HH_ret_t HHUL_get_qp_state(HHUL_hca_hndl_t hca_hndl, VAPI_qp_num_t qp_num, VAPI_qp_state_t *qp_state_p) +{ + *qp_state_p = VAPI_RESET; + return HH_OK; +} + + +HH_ret_t HHUL_create_cq(HHUL_hca_hndl_t hca_hndl, HH_cq_num_t cq_num, MT_virt_addr_t cq_buff, MT_virt_addr_t cq_size) +{ + return HH_OK; +} + + +HH_ret_t HHUL_destroy_cq(HHUL_hca_hndl_t hca_hndl, HH_cq_num_t cq_num) +{ + return HH_OK; +} + + + +/* TBD - Complete Memory Windows Registration */ +/* HHUL_bind_memory_region */ + + + +HH_ret_t HHUL_post_send_wr(HHUL_hca_hndl_t hca_hndl, VAPI_sr_desc_t *vapi_sr_desc_p, VAPI_ud_av_t *vapi_av_p) /* TBD - EEC */ +{ + return HH_OK; +} + + +HH_ret_t HHUL_post_receive_wr(HH_hca_hndl_t hca_hndl, VAPI_rr_desc_t *vapi_rr_desc_p) +{ + return HH_OK; +} + + + +HH_ret_t HHUL_poll_for_completion(HHUL_hca_hndl_t hca_hndl, HH_cq_num_t cq_num, + VAPI_wc_desc_t *VAPI_comp_desc_p) +{ + return HH_OK; +} + + + + +HH_ret_t HHUL_req_comp_notify(HH_hca_hndl_t hca_hndl, HH_cq_num_t cq_num, VAPI_cq_notif_type_t cq_not_type) +{ + return HH_OK; +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhulenosys.ic b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhulenosys.ic new file mode 100644 index 00000000..c47e086a --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/hhulenosys.ic @@ -0,0 +1,382 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id:$ + */ + + +static HH_ret_t enosys_cleanup_user_level( + HHUL_hca_hndl_t hhul_hndl +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_alloc_pd_prep( + HHUL_hca_hndl_t hca_hndl, + HHUL_pd_hndl_t* hhul_pd_p, + void* pd_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_alloc_pd_done( + HHUL_hca_hndl_t hca_hndl, + HHUL_pd_hndl_t hhul_pd, + HH_pd_hndl_t hh_pd, + void* pd_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_free_pd_done( + HHUL_hca_hndl_t hca_hndl, + HHUL_pd_hndl_t hhul_pd +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_alloc_mw( + HHUL_hca_hndl_t hhul_hndl, + IB_rkey_t initial_rkey, + HHUL_mw_hndl_t* mw_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_bind_mw( + HHUL_hca_hndl_t hhul_hndl, + HHUL_mw_hndl_t mw, + HHUL_mw_bind_t* bind_prop_p, + IB_rkey_t* bind_rkey_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_free_mw( + HHUL_hca_hndl_t hhul_hndl, + HHUL_mw_hndl_t mw +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_ud_av( + HHUL_hca_hndl_t hca_hndl, + HHUL_pd_hndl_t pd, + VAPI_ud_av_t* av_p, + HHUL_ud_av_hndl_t* ah_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_modify_ud_av( + HHUL_hca_hndl_t hca_hndl, + HHUL_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_query_ud_av( + HHUL_hca_hndl_t hca_hndl, + HHUL_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_destroy_ud_av( + HHUL_hca_hndl_t hca_hndl, + HHUL_ud_av_hndl_t ah +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_cq_prep( + HHUL_hca_hndl_t hca_hndl, + VAPI_cqe_num_t num_o_cqes, + HHUL_cq_hndl_t* hhul_cq_p, + VAPI_cqe_num_t* num_o_cqes_p, + void* cq_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_cq_done( + HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t hhul_cq, + HH_cq_hndl_t hh_cq, + void* cq_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_resize_cq_prep( + HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_cqe_num_t num_o_cqes, + VAPI_cqe_num_t* num_o_cqes_p, + void* cq_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_resize_cq_done( + HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + void* cq_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_poll4cqe( + HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_wc_desc_t* cqe_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_peek_cq( + HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_cqe_num_t cqe_num +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_req_comp_notif( + HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_cq_notif_type_t notif_type +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_req_ncomp_notif( + HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq, + VAPI_cqe_num_t cqe_num +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_destroy_cq_done( + HHUL_hca_hndl_t hca_hndl, + HHUL_cq_hndl_t cq +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_qp_prep( + HHUL_hca_hndl_t hca_hndl, + HHUL_qp_init_attr_t* qp_init_attr_p, + HHUL_qp_hndl_t* qp_hndl_p, + VAPI_qp_cap_t* qp_cap_out_p, + void* qp_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_special_qp_prep( + HHUL_hca_hndl_t hca_hndl, + VAPI_special_qp_t qp_type, + IB_port_t port, + HHUL_qp_init_attr_t* qp_init_attr_p, + HHUL_qp_hndl_t* qp_hndl_p, + VAPI_qp_cap_t* qp_cap_out_p, + void* qp_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_qp_done( + HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t hhul_qp, + IB_wqpn_t hh_qp, + void* qp_ul_resources_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_post_send_req( + HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + VAPI_sr_desc_t* send_req_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_post_inline_send_req(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + VAPI_sr_desc_t* send_req_p) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_post_send_reqs(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + u_int32_t num_of_requests, + VAPI_sr_desc_t* send_req_array) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_post_recv_req( + HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + VAPI_rr_desc_t* recv_req_p +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_post_recv_reqs(HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl, + u_int32_t num_of_requests, + VAPI_rr_desc_t* recv_req_array) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_destroy_qp_done( + HHUL_hca_hndl_t hca_hndl, + HHUL_qp_hndl_t qp_hndl +) +{ + return HH_ENOSYS; +} + +static HH_ret_t enosys_create_srq_prep( + /*IN*/ + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t pd, + u_int32_t max_outs, + u_int32_t max_sentries, + /*OUT*/ + HHUL_srq_hndl_t *srq_hndl_p, + u_int32_t *actual_max_outs_p, + u_int32_t *actual_max_sentries_p, + void /*THH_srq_ul_resources_t*/ *srq_ul_resources_p) +{ + return HH_ENOSYS; +} + + +static HH_ret_t enosys_create_srq_done( + HHUL_hca_hndl_t hca, + HHUL_srq_hndl_t hhul_srq, + HH_srq_hndl_t hh_srq, + void/*THH_srq_ul_resources_t*/ *srq_ul_resources_p +) +{ + return HH_ENOSYS; +} + + +static HH_ret_t enosys_destroy_srq_done( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_srq +) +{ + return HH_ENOSYS; +} + + +static HH_ret_t enosys_post_srq( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_srq_hndl_t hhul_srq, + /*IN*/ u_int32_t num_of_requests, + /*IN*/ VAPI_rr_desc_t *recv_req_array, + /*OUT*/ u_int32_t *posted_requests_p +) +{ + return HH_ENOSYS; +} + + +static void enosys_init(HHUL_if_ops_t* p) +{ + p->HHULIF_cleanup_user_level = &enosys_cleanup_user_level; + p->HHULIF_alloc_pd_prep = &enosys_alloc_pd_prep; + p->HHULIF_alloc_pd_done = &enosys_alloc_pd_done; + p->HHULIF_free_pd_done = &enosys_free_pd_done; + p->HHULIF_alloc_mw = &enosys_alloc_mw; + p->HHULIF_bind_mw = &enosys_bind_mw; + p->HHULIF_free_mw = &enosys_free_mw; + p->HHULIF_create_ud_av = &enosys_create_ud_av; + p->HHULIF_modify_ud_av = &enosys_modify_ud_av; + p->HHULIF_query_ud_av = &enosys_query_ud_av; + p->HHULIF_destroy_ud_av = &enosys_destroy_ud_av; + p->HHULIF_create_cq_prep = &enosys_create_cq_prep; + p->HHULIF_create_cq_done = &enosys_create_cq_done; + p->HHULIF_resize_cq_prep = &enosys_resize_cq_prep; + p->HHULIF_resize_cq_done = &enosys_resize_cq_done; + p->HHULIF_poll4cqe = &enosys_poll4cqe; + p->HHULIF_peek_cq = &enosys_peek_cq; + p->HHULIF_req_comp_notif = &enosys_req_comp_notif; + p->HHULIF_req_ncomp_notif = &enosys_req_ncomp_notif; + p->HHULIF_destroy_cq_done = &enosys_destroy_cq_done; + p->HHULIF_create_qp_prep = &enosys_create_qp_prep; + p->HHULIF_special_qp_prep = &enosys_special_qp_prep; + p->HHULIF_create_qp_done = &enosys_create_qp_done; + p->HHULIF_post_send_req = &enosys_post_send_req; + p->HHULIF_post_inline_send_req = &enosys_post_inline_send_req; + p->HHULIF_post_send_reqs = &enosys_post_send_reqs; + p->HHULIF_post_recv_req = &enosys_post_recv_req; + p->HHULIF_post_recv_reqs = &enosys_post_recv_reqs; + p->HHULIF_destroy_qp_done = &enosys_destroy_qp_done; + p->HHULIF_create_srq_prep = &enosys_create_srq_prep; + p->HHULIF_create_srq_done = &enosys_create_srq_done; + p->HHULIF_destroy_srq_done = &enosys_destroy_srq_done; + p->HHULIF_post_srq = &enosys_post_srq; +} /* enosys_init */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/invalid.ic b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/invalid.ic new file mode 100644 index 00000000..40a78df0 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/invalid.ic @@ -0,0 +1,480 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id:$ + */ + + +static HH_ret_t invalid_open_hca( + HH_hca_hndl_t hca_hndl, + EVAPI_hca_profile_t *profile_p, + EVAPI_hca_profile_t *sugg_profile_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_close_hca( + HH_hca_hndl_t hca_hndl +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_alloc_ul_resources( + HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t user_protection_context, + void* hca_ul_resources_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_free_ul_resources( + HH_hca_hndl_t hca_hndl, + void* hca_ul_resources_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_query_hca( + HH_hca_hndl_t hca_hndl, + VAPI_hca_cap_t* hca_cap_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_modify_hca( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + VAPI_hca_attr_t* hca_attr_p, + VAPI_hca_attr_mask_t* hca_attr_mask_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_query_port_prop( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + VAPI_hca_port_t* hca_port_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_get_pkey_tbl( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_pkey_t* pkey_tbl_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_get_gid_tbl( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_gid_t* pkey_tbl_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_get_lid( + HH_hca_hndl_t hca_hndl, + IB_port_t port, + IB_lid_t* lid_p, + u_int8_t* lmc_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_alloc_pd( + HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t prot_ctx, + void * pd_ul_resources_p, + HH_pd_hndl_t *pd_num_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_free_pd( + HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_alloc_rdd( + HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t* rdd_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_free_rdd( + HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t rdd +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_create_priv_ud_av( + HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + VAPI_ud_av_t* av_p, + HH_ud_av_hndl_t* ah_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_modify_priv_ud_av( + HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_query_priv_ud_av( + HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah, + VAPI_ud_av_t* av_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_destroy_priv_ud_av( + HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_register_mr( + HH_hca_hndl_t hca_hndl, + HH_mr_t* mr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_reregister_mr( + HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey, + VAPI_mr_change_t change_mask, + HH_mr_t* mr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_register_smr( + HH_hca_hndl_t hca_hndl, + HH_smr_t* smr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_deregister_mr( + HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_query_mr( + HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey, + HH_mr_info_t* mr_info_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_alloc_mw( + HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + IB_rkey_t* initial_rkey_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_free_mw( + HH_hca_hndl_t hca_hndl, + IB_rkey_t initial_rkey +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_create_cq( + HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t user_protection_context, + void* cq_ul_resources_p, + HH_cq_hndl_t* cq +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_resize_cq( + HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + void* cq_ul_resources_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_query_cq( + HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + VAPI_cqe_num_t* num_o_cqes_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_destroy_cq( + HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_create_qp( + HH_hca_hndl_t hca_hndl, + HH_qp_init_attr_t* init_attr_p, + void* qp_ul_resources_p, + IB_wqpn_t* qpn_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_get_special_qp( + HH_hca_hndl_t hca_hndl, + VAPI_special_qp_t qp_type, + IB_port_t port, + HH_qp_init_attr_t* init_attr_p, + void* qp_ul_resources_p, + IB_wqpn_t* sqp_hndl_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_modify_qp( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qp_num, + VAPI_qp_state_t cur_qp_state, + VAPI_qp_attr_t* qp_attr_p, + VAPI_qp_attr_mask_t* qp_attr_mask_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_query_qp( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qp_num, + VAPI_qp_attr_t* qp_attr_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_destroy_qp( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qp_num +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_create_eec( + HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t rdd, + IB_eecn_t* eecn_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_modify_eec( + HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn, + VAPI_qp_state_t cur_ee_state, + VAPI_qp_attr_t* ee_attr_p, + VAPI_qp_attr_mask_t* ee_attr_mask_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_query_eec( + HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn, + VAPI_qp_attr_t* ee_attr_p +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_destroy_eec( + HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_set_async_eventh( + HH_hca_hndl_t hca_hndl, + HH_async_eventh_t handler, + void* private_data +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_set_comp_eventh( + HH_hca_hndl_t hca_hndl, + HH_comp_eventh_t handler, + void* private_data +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_attach_to_multicast( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + IB_gid_t dgid +) +{ + return HH_HCA_STATUS_INVALID; +} + +static HH_ret_t invalid_detach_from_multicast( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + IB_gid_t dgid +) +{ + return HH_HCA_STATUS_INVALID; +} + + +static HH_ret_t invalid_process_local_mad( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + IB_lid_t slid, + EVAPI_proc_mad_opt_t proc_mad_opts, + void* mad_in_p, + void* mad_out_p +) +{ + return HH_HCA_STATUS_INVALID; +} + + +static void invalid_init(HH_if_ops_t* p) +{ + p->HHIF_open_hca = &invalid_open_hca; + p->HHIF_close_hca = &invalid_close_hca; + p->HHIF_alloc_ul_resources = &invalid_alloc_ul_resources; + p->HHIF_free_ul_resources = &invalid_free_ul_resources; + p->HHIF_query_hca = &invalid_query_hca; + p->HHIF_modify_hca = &invalid_modify_hca; + p->HHIF_query_port_prop = &invalid_query_port_prop; + p->HHIF_get_pkey_tbl = &invalid_get_pkey_tbl; + p->HHIF_get_gid_tbl = &invalid_get_gid_tbl; + p->HHIF_get_lid = &invalid_get_lid; + p->HHIF_alloc_pd = &invalid_alloc_pd; + p->HHIF_free_pd = &invalid_free_pd; + p->HHIF_alloc_rdd = &invalid_alloc_rdd; + p->HHIF_free_rdd = &invalid_free_rdd; + p->HHIF_create_priv_ud_av = &invalid_create_priv_ud_av; + p->HHIF_modify_priv_ud_av = &invalid_modify_priv_ud_av; + p->HHIF_query_priv_ud_av = &invalid_query_priv_ud_av; + p->HHIF_destroy_priv_ud_av = &invalid_destroy_priv_ud_av; + p->HHIF_register_mr = &invalid_register_mr; + p->HHIF_reregister_mr = &invalid_reregister_mr; + p->HHIF_register_smr = &invalid_register_smr; + p->HHIF_deregister_mr = &invalid_deregister_mr; + p->HHIF_query_mr = &invalid_query_mr; + p->HHIF_alloc_mw = &invalid_alloc_mw; + p->HHIF_free_mw = &invalid_free_mw; + p->HHIF_create_cq = &invalid_create_cq; + p->HHIF_resize_cq = &invalid_resize_cq; + p->HHIF_query_cq = &invalid_query_cq; + p->HHIF_destroy_cq = &invalid_destroy_cq; + p->HHIF_create_qp = &invalid_create_qp; + p->HHIF_get_special_qp = &invalid_get_special_qp; + p->HHIF_modify_qp = &invalid_modify_qp; + p->HHIF_query_qp = &invalid_query_qp; + p->HHIF_destroy_qp = &invalid_destroy_qp; + p->HHIF_create_eec = &invalid_create_eec; + p->HHIF_modify_eec = &invalid_modify_eec; + p->HHIF_query_eec = &invalid_query_eec; + p->HHIF_destroy_eec = &invalid_destroy_eec; + p->HHIF_set_async_eventh = &invalid_set_async_eventh; + p->HHIF_set_comp_eventh = &invalid_set_comp_eventh; + p->HHIF_attach_to_multicast = &invalid_attach_to_multicast; + p->HHIF_detach_from_multicast = &invalid_detach_from_multicast; + p->HHIF_process_local_mad = &invalid_process_local_mad; +} /* invalid_init */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/rx_stub.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/rx_stub.c new file mode 100644 index 00000000..baf79f30 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/rx_stub.c @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + }; + u_int16_t payload_size = 16; /*In bytes this must be the same as the buf size*/ + + /* init parameters the rest will be updated by the build function */ + + /* LRH */ + lrh_st.VL = 0x7; + lrh_st.LVer = 0; + lrh_st.SL = RC_SL; + lrh_st.DLID = RC_SLID; + lrh_st.SLID = RC_DLID; + + /* BTH */ + bth_st.SE = 0; + bth_st.M = 0; + bth_st.TVer = 0; + bth_st.P_KEY = RC_PKEY; + bth_st.DestQP= RC_SRC_QP_NUM; + bth_st.A = 0; + bth_st.PSN = RC_RQ_START_PSN; + + /* DETH */ + deth_st.Q_Key = 0x11111111; + deth_st.reserved1 = 0x88; + deth_st.SrcQP = 0x222222; + + /* AETH */ + aeth_st.Syndrome = 0xFF; /* 8 bit field */ + aeth_st.MSN = 0xFFFFFF; /* 24 bit field */ + + + opcode = RC_SEND_FIRST_OP; /* RC_ACKNOWLEDGE_OP; one of the cases */ + + LNH = IBA_LOCAL; /* only IBA is supported in the lib so don't change it*/ +/***********************************************/ + for (num_pkt = 1; num_pkt <= NUM_PKT + 1 ; num_pkt++) { + + switch (opcode) { + + case RC_SEND_FIRST_OP: + ret = MPGA_fast_rc_send_first(&lrh_st, grh_st_p, &bth_st, LNH, + payload_size, &header_size, &header_buf_p); + CHECK_RESULT(ret); + break; + case RC_SEND_MIDDLE_OP: + ret = MPGA_fast_rc_send_middle(&lrh_st, grh_st_p, &bth_st, LNH, + payload_size, &header_size, &header_buf_p); + CHECK_RESULT(ret); + break; + case RC_SEND_LAST_OP: + bth_st.A = 1; + ret = MPGA_fast_rc_send_last(&lrh_st, grh_st_p, &bth_st, LNH, + payload_size, &header_size, &header_buf_p); + CHECK_RESULT(ret); + break; + case RC_SEND_LAST_W_IM_OP: + return(HH_ENOSYS); break; + case RC_SEND_ONLY_OP: + bth_st.A = 1; + ret = MPGA_fast_rc_send_only(&lrh_st, grh_st_p, &bth_st, LNH, + payload_size, &header_size, &header_buf_p); + CHECK_RESULT(ret); + break; + case RC_ACKNOWLEDGE_OP: + ret = MPGA_fast_rc_acknowledge(&lrh_st, grh_st_p, &bth_st, &aeth_st, + LNH, &header_size, &header_buf_p); + CHECK_RESULT(ret); + break; + + case UD_SEND_ONLY_OP: + ret=MPGA_fast_ud_send_only(&lrh_st, &bth_st, &deth_st, payload_size, + &header_size, &header_buf_p); + CHECK_RESULT(ret); + break; + + default: + printf("\n*** ERROR dont have such case %d ***\n",opcode); + return(HH_ERR); + break; + } + /*End of the buid part update for the next packet if needed*/ + + bth_st.PSN++; + if (opcode == RC_SEND_FIRST_OP) opcode = RC_SEND_MIDDLE_OP; + else if (num_pkt == (NUM_PKT - 1)) opcode = RC_SEND_LAST_OP; + else opcode = RC_SEND_ONLY_OP; + + /***********************************************************/ + + MPGA_print_pkt((u_int8_t*)header_buf_p,header_size - 4); + +/***********************************************/ + + /* Packet for analyzer sim the packet from eyals buffer*/ + + packet_p = (u_int8_t*) malloc(sizeof(u_int8_t) * (header_size + payload_size + 4)); + memset(packet_p , 0, (header_size + payload_size + 4)); + memcpy((u_int8_t*)packet_p + 4, header_buf_p, header_size); + memcpy(((u_int8_t*)packet_p + header_size + 4), payload_buf_p, payload_size); +/***********************************************/ + + GHGA_analyze((GHH_dev_t*)dev->device, packet_p, &packet_size); + + + + MPGA_print_pkt((u_int8_t*)packet_p,(header_size + payload_size + 4)); /* Printing the wanted header/Packet*/ + + + free(header_buf_p); + free(packet_p); + } + return EXIT_SUCCESS; +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmd_types.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmd_types.h new file mode 100644 index 00000000..e8aac5e9 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmd_types.h @@ -0,0 +1,578 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_CMD_TYPES_H +#define H_CMD_TYPES_H + +#include +#include +#include + + +typedef MT_size_t THH_mpt_index_t; /* ?? */ +typedef u_int16_t THH_mcg_hash_t; /* !!! to be defined in THH_mcgm !!! */ + +/* matan: SQD event req is passed to HCA on opcode modifier field. + however, THH_cmd_MODIFY_QPEE() has no input argument selecting + sqd_event = 0/1. in order not to change the existing API, we define + the flag below & mask it off on entry of THH_cmd_MODIFY_QPEE(). if + (cmd == RTS2SQD) its value is used as sqd_event parameter.*/ +#define THH_CMD_SQD_EVENT_REQ 0x80000000 + +/* QP/EE transitions */ +enum { + QPEE_TRANS_RST2INIT = TAVOR_IF_CMD_RST2INIT_QPEE, + QPEE_TRANS_INIT2INIT = TAVOR_IF_CMD_INIT2INIT_QPEE, + QPEE_TRANS_INIT2RTR = TAVOR_IF_CMD_INIT2RTR_QPEE, + QPEE_TRANS_RTR2RTS = TAVOR_IF_CMD_RTR2RTS_QPEE, + QPEE_TRANS_RTS2RTS = TAVOR_IF_CMD_RTS2RTS_QPEE, + QPEE_TRANS_SQERR2RTS = TAVOR_IF_CMD_SQERR2RTS_QPEE, + QPEE_TRANS_2ERR = TAVOR_IF_CMD_2ERR_QPEE, + QPEE_TRANS_RTS2SQD = TAVOR_IF_CMD_RTS2SQD_QPEE, + QPEE_TRANS_RTS2SQD_WITH_EVENT = TAVOR_IF_CMD_RTS2SQD_QPEE | THH_CMD_SQD_EVENT_REQ, + QPEE_TRANS_SQD2RTS = TAVOR_IF_CMD_SQD2RTS_QPEE, + QPEE_TRANS_ERR2RST = TAVOR_IF_CMD_ERR2RST_QPEE +}; + +typedef u_int32_t THH_qpee_transition_t; + + +enum { + THH_CMD_STAT_OK = TAVOR_IF_CMD_STAT_OK, /* command completed successfully */ + THH_CMD_STAT_INTERNAL_ERR = TAVOR_IF_CMD_STAT_INTERNAL_ERR, /* Internal error (such as a bus error) occurred while processing command */ + THH_CMD_STAT_BAD_OP = TAVOR_IF_CMD_STAT_BAD_OP, /* Operation/command not supported or opcode modifier not supported */ + THH_CMD_STAT_BAD_PARAM = TAVOR_IF_CMD_STAT_BAD_PARAM, /* Parameter not supported or parameter out of range */ + THH_CMD_STAT_BAD_SYS_STATE = TAVOR_IF_CMD_STAT_BAD_SYS_STATE, /* System not enabled or bad system state */ + THH_CMD_STAT_BAD_RESOURCE = TAVOR_IF_CMD_STAT_BAD_RESOURCE, /* Attempt to access reserved or unallocaterd resource */ + THH_CMD_STAT_RESOURCE_BUSY = TAVOR_IF_CMD_STAT_RESOURCE_BUSY, /* Requested resource is currently executing a command, or is otherwise busy */ + THH_CMD_STAT_DDR_MEM_ERR = TAVOR_IF_CMD_STAT_DDR_MEM_ERR, /* memory error */ + THH_CMD_STAT_EXCEED_LIM = TAVOR_IF_CMD_STAT_EXCEED_LIM, /* Required capability exceeds device limits */ + THH_CMD_STAT_BAD_RES_STATE = TAVOR_IF_CMD_STAT_BAD_RES_STATE, /* Resource is not in the appropriate state or ownership */ + THH_CMD_STAT_BAD_INDEX = TAVOR_IF_CMD_STAT_BAD_INDEX, /* Index out of range */ + THH_CMD_STAT_BAD_QPEE_STATE = TAVOR_IF_CMD_STAT_BAD_QPEE_STATE, /* Attempt to modify a QP/EE which is not in the presumed state */ + THH_CMD_STAT_BAD_SEG_PARAM = TAVOR_IF_CMD_STAT_BAD_SEG_PARAM, /* Bad segment parameters (Address/Size) */ + THH_CMD_STAT_REG_BOUND = TAVOR_IF_CMD_STAT_REG_BOUND, /* Memory Region has Memory Windows bound to */ + THH_CMD_STAT_BAD_PKT = TAVOR_IF_CMD_STAT_BAD_PKT, /* Bad management packet (silently discarded) */ + THH_CMD_STAT_BAD_SIZE = TAVOR_IF_CMD_STAT_BAD_SIZE, /* More outstanding CQEs in CQ than new CQ size */ + + /* driver added statuses */ + THH_CMD_STAT_EAGAIN = 0x0100, /* No (software) resources to enqueue given command - retry later*/ + THH_CMD_STAT_EABORT = 0x0101, /* Command aborted (due to change in cmdif state) */ + THH_CMD_STAT_ETIMEOUT = 0X0102, /* command not completed after timeout */ + THH_CMD_STAT_EFATAL = 0x0103, /* unexpected error - fatal */ + THH_CMD_STAT_EBADARG = 0x0104, /* bad argument */ + THH_CMD_STAT_EINTR = 0X0105 /* process received signal */ +}; + +typedef u_int32_t THH_cmd_status_t; + + +#define THH_CMD_STAT_OK_STR "TAVOR_IF_CMD_STAT_OK - command completed successfully" +#define THH_CMD_STAT_INTERNAL_ERR_STR "TAVOR_IF_CMD_STAT_INTERNAL_ERR = Internal error (such as a bus error) occurred while processing command" +#define THH_CMD_STAT_BAD_OP_STR "TAVOR_IF_CMD_STAT_BAD_OP - Operation/command not supported or opcode modifier not supported" +#define THH_CMD_STAT_BAD_PARAM_STR "TAVOR_IF_CMD_STAT_BAD_PARAM - Parameter not supported or parameter out of range" +#define THH_CMD_STAT_BAD_SYS_STATE_STR "TAVOR_IF_CMD_STAT_BAD_SYS_STATE - System not enabled or bad system state" +#define THH_CMD_STAT_BAD_RESOURCE_STR "TAVOR_IF_CMD_STAT_BAD_RESOURCE - Attempt to access reserved or unallocaterd resource" +#define THH_CMD_STAT_RESOURCE_BUSY_STR "TAVOR_IF_CMD_STAT_RESOURCE_BUSY - Requested resource is currently executing a command, or is otherwise busy" +#define THH_CMD_STAT_DDR_MEM_ERR_STR "TAVOR_IF_CMD_STAT_DDR_MEM_ERR - memory error" +#define THH_CMD_STAT_EXCEED_LIM_STR "TAVOR_IF_CMD_STAT_EXCEED_LIM - Required capability exceeds device limits" +#define THH_CMD_STAT_BAD_RES_STATE_STR "TAVOR_IF_CMD_STAT_BAD_RES_STATE - Resource is not in the appropriate state or ownership" +#define THH_CMD_STAT_BAD_INDEX_STR "TAVOR_IF_CMD_STAT_BAD_INDEX - Index out of range" +#define THH_CMD_STAT_BAD_QPEE_STATE_STR "TAVOR_IF_CMD_STAT_BAD_QPEE_STATE - Attempt to modify a QP/EE which is not in the presumed state" +#define THH_CMD_STAT_BAD_SEG_PARAM_STR "TAVOR_IF_CMD_STAT_BAD_SEG_PARAM - Bad segment parameters (Address/Size)" +#define THH_CMD_STAT_REG_BOUND_STR "TAVOR_IF_CMD_STAT_REG_BOUND - Memory Region has Memory Windows bound to" +#define THH_CMD_STAT_BAD_PKT_STR "TAVOR_IF_CMD_STAT_BAD_PKT - Bad management packet (silently discarded)" +#define THH_CMD_STAT_BAD_SIZE_STR "THH_CMD_STAT_BAD_SIZE - More outstanding CQEs in CQ than new CQ size" +#define THH_CMD_STAT_EAGAIN_STR "0x0100 - No (software) resources to enqueue given command - retry later" +#define THH_CMD_STAT_EABORT_STR "0x0101 - Command aborted (due to change in cmdif state)" +#define THH_CMD_STAT_ETIMEOUT_STR "0X0102 - command not completed after timeout" +#define THH_CMD_STAT_EFATAL_STR "0x0103 - unexpected error - fatal" +#define THH_CMD_STAT_EBADARG_STR "0x0104 - bad argument" +#define THH_CMD_STAT_EINTR_STR "0x0105 - process received signal" + + +enum { + DDR_STAT_OK = 0, + DDR_STAT_CAL_1_ERR = 1, + DDR_STAT_CAL_2_ERR = 2 +}; +typedef u_int32_t THH_ddr_cal_status_t; + +enum { + DIM_DI_NONE = 0, + DIM_DI_PARITY = 1, + DIM_DI_ECC = 2 +}; +typedef u_int32_t THH_dim_integrity_t; + +enum { + DDR_NO_AUTO=0, + DDR_AUTO_PRECHARGE_PER_TRANSLATION=1, + DDR_AUTO_PRECHARGE_PER_64BIT=2 +}; +typedef u_int32_t THH_dim_apm_t; + +enum { + DIM_STAT_ENABLED = 0, + DIM_STAT_DISABLED = 1 +}; +typedef u_int32_t THH_dim_status_t; + +typedef struct THH_dim_info_t { + u_int32_t dimmsize; /* Size of DIMM in units of 2^20 Bytes. */ + /* This value is valid only when DIMMStatus is DIM_STAT_OPERATIONAL. */ + THH_dim_integrity_t di; /* Data Integrity Configuration */ + u_int8_t dimmslot; /* Slot number in which the Logical DIMM is located. */ + /*;Two logical DIMMs may be on same module and therefore on same slot.\;This value is only valid when DIMMStatus is not 1. */ + THH_dim_status_t dimmstatus; /* When it is 1-255 the Logical DIMM in question is disabled*/ + /* 0 - DIMM is Operational \;1 - No DIMM detected */ + /* 2 - DIMM max freq is smaller than DMU freq\;3- DIMM min freq is greater than DMU freq\;4 - DIMM CAS Latency does not match the other DIMMs\;5 - DIMM CAS Latency is not supported.\;6 - DIMM chip width does not match the other DIMMs. (x4/x8/x16)\;6 - DIMM chip width is not supported\;7 - DIMM buffered/unbuffered does not match the other DIMMs\;8 - DIMM does not support 8 byte bursts\;9 - DIMM does not have 4 banks\;10 - 255 Other DIMM Errors */ + u_int64_t vendor_id; /* JDEC Manufacturer ID (64 bits) */ +} +THH_dim_info_t; + + +enum { + THH_OWNER_SW = 0, + THH_OWNER_HW = 1 +}; +typedef u_int32_t THH_owner_t; + +typedef struct { + u_int64_t qpc_base_addr; /* QPC Base Address. Table must be aligned on its size */ + u_int8_t log_num_of_qp; /* Log base 2 of number of supported QPs */ + u_int64_t eec_base_addr; /* EEC Base Address. Table must be aligned on its size */ + u_int64_t srqc_base_addr; /* SRQC Base Address. Table must be aligned on its size */ + u_int8_t log_num_of_srq; /* Log base 2 of number of supported SRQs */ + u_int8_t log_num_of_ee; /* Log base 2 of number of supported EEs. */ + u_int64_t cqc_base_addr; /* CQC Base Address. Table must be aligned on its size */ + u_int8_t log_num_of_cq; /* Log base 2 of number of supported CQs. */ + u_int64_t eqpc_base_addr; /* Extended QPC Base Address. Table has same number of entries as QPC table. Table must be aligned to entry size. */ + u_int64_t eeec_base_addr; /* Extended EEC Base Address. Table has same number of entries as EEC table. Table must be aligned to entry size. */ + u_int64_t eqc_base_addr; /* EQC Base Address. */ + u_int8_t log_num_eq; /* Log base 2 of number of supported EQs. Must be 6 in MT23108 */ + u_int64_t rdb_base_addr; /* Base address of table that holds remote read and remote atomic requests. Table must be aligned to RDB entry size (32 bytes). Table size is implicitly defined when QPs/EEs are configured with indexes into this table. */ +} +THH_contexts_prms_t; + +typedef struct { /* Protected UD-AV table parameters */ + u_int32_t l_key; /* L_Key used to access TPT */ + u_int32_t pd; /* PD used by TPT for matching against PD of region entry being accessed. */ + MT_bool xlation_en; /* When cleared, address is physical address and no translation will be done. When set, address is virtual. TPT will be accessed in both cases for address decoding purposes. */ +} +THH_ud_av_tbl_prms_t; + +typedef struct { + u_int64_t mc_base_addr; /* Base Address of the Multicast Table. The base address must be aligned to the entry size. */ + u_int16_t log_mc_table_entry_sz; /* Log2 of the Size of multicast group member (MGM) entry. Must be greater than 5 (to allow CTRL and GID sections). That implies the number of QPs per MC table entry. */ + u_int32_t mc_table_hash_sz; /* Number of entries in multicast DGID hash table (must be power of 2). + INIT_HCA - the required number of entries + QUERY_HCA - the actual number of entries assigned by firmware (will be less than or equal to the amount required in INIT_HCA) */ + u_int8_t log_mc_table_sz; /* Log2 of the overall number of MC entries in the MCG table (includes both hash and auxiliary tables) */ + u_int8_t mc_hash_fn; /* Multicast hash function\;0 - Default hash function\;other - reserved */ +} +THH_mcast_prms_t; + +typedef struct { + u_int64_t mpt_base_adr; /* MPT - Memory Protection Table base physical address. Entry size is 64 bytes. Table must be aligned to its size. */ + u_int8_t log_mpt_sz; /* Log (base 2) of the number of region/windows entries in the MPT table. */ + u_int8_t pfto; /* Page Fault RNR Timeout - The field returned in RNR Naks generated when a page fault is detected. It has no effect when on-demand-paging is not used. */ + u_int8_t mtt_version; /* Version of MTT page walk. Must be zero */ + u_int64_t mtt_base_addr; /* MTT - Memory Translation table base physical address. Table must be aligned to its size. */ + u_int8_t mtt_segment_size; /* The size of MTT segment is 64*2^MTT_Segment_Size bytes */ +} +THH_tpt_prms_t; + +typedef struct { + u_int64_t uar_base_addr; /* UAR Base Address (QUERY_HCA only) */ + u_int8_t uar_page_sz; /* This field defines the size of each UAR page. Size of UAR Page is 4KB*2^UAR_Page_Size */ + u_int64_t uar_scratch_base_addr; /* Base address of UAR scratchpad. Number of entries in table is UAR BAR size divided by UAR Page Size. Table must be aligned to entry size. */ +} +THH_uar_prms_t; + +typedef struct { + u_int32_t tbd; /* To Be Defined */ +} +THH_sched_arb_t; + +typedef struct { + u_int8_t hca_core_clock; /* Internal Clock Period (in units of 1/16 ns) (QUERY_HCA only) */ + u_int16_t router_qp; /* Upper 16 bit to be used as a QP number for router mode. Low order 8 bits are taken from the TClass field of the incoming packet.\;Valid only if RE bit is set */ + MT_bool re; /* Router Mode Enable\;If this bit is set, entire packet (including all headers and ICRC) will be considered as a data payload and will be scattered to memory as specified in the descriptor that is posted on the QP matching the TClass field of packet. */ + MT_bool udp; /* UD Port Check Enable\;0 - Port field in Address Vector is ignored\;1 - HCA will check the port field in AV entry (fetched for UD descriptor) against the Port of the UD QP executing the descriptor. */ + MT_bool he; /* host is big endian - Used for Atomic Operations */ + MT_bool ud; /* enable UD address vector protection (privileged UDAVs). 0 = disabled; 1 = enabled */ + THH_contexts_prms_t qpc_eec_cqc_eqc_rdb_parameters; + THH_ud_av_tbl_prms_t udavtable_memory_parameters; /* Memory Access Parameters for UD Address Vector +Table. Used for QPs/EEc that are configured to use protected Address Vectors. */ + THH_mcast_prms_t multicast_parameters; + THH_tpt_prms_t tpt_parameters; + THH_uar_prms_t uar_parameters; /* UAR Parameters */ +} +THH_hca_props_t; + + +enum { + EQ_STATE_RSRV1 = 0, + EQ_STATE_ARMED = 1, + EQ_STATE_ALWAYS_ARMED = 2, + EQ_STATE_FIRED = 3 +}; +typedef u_int32_t THH_eq_state_t; + +enum { + EQ_STATUS_OK = 0, + EQ_STATUS_OVERFLOW = 9, + EQ_STATUS_WRITE_FAILURE = 10 +}; +typedef u_int32_t THH_eq_status_t; + +typedef struct { + THH_eq_state_t st; /* Event delivery state machine\;01 - Armed\;11 - Fired\;10,00 - Reserved */ + MT_bool oi; /* Overrun detection ignore */ + MT_bool tr; /* Translation Required. If set - EQ access undergo address translation. */ + THH_owner_t owner; /* SW/HW ownership */ + THH_eq_status_t status; /* EQ status:\;0000 - OK\;1001 - EQ overflow\;1010 - EQ write failure */ + u_int64_t start_address; /* Start Address of Event Queue. Must be aligned on 32-byte boundary */ + u_int32_t usr_page; + u_int8_t log_eq_size; /* Log2 of the amount of entries in the EQ */ + u_int8_t intr; /* Interupt (message) to be generated to report event to INT layer.\;0000iiii - specifies GPIO pin to be asserted\;1jjjjjjj - specificies type of interrupt message to be generated (total 128 different messages supported). */ + u_int32_t lost_count; /* Number of events lost due to EQ overrun */ + u_int32_t lkey; /* Memory key (L-Key) to be used to access EQ */ + u_int32_t pd; /* Protection Domain */ + u_int32_t consumer_indx; /* Contains next entry to be read upon poll for completion. Must be initalized to '0 while opening EQ */ + u_int32_t producer_indx; /* Contains next entry in EQ to be written by the HCA. Must be initialized to '1 while opening EQ. */ +} +THH_eqc_t; + +enum { + CQ_STATE_DISARMED = 0x0, + CQ_STATE_ARMED = 0x1, + CQ_STATE_ARMED_SOLICITED = 0x4, + CQ_STATE_FIRED = 0xA +}; +typedef u_int32_t THH_cq_state_t; + +enum { + CQ_STATUS_OK = 0, + CQ_STATUS_OVERFLOW = 9, + CQ_STATUS_WRITE_FAILURE = 10 +}; +typedef u_int32_t THH_cq_status_t; + +typedef struct { + THH_cq_state_t st; /* Event delivery state machine (Regular Events)\;0 - Disarmed\;1 - Armed\;Armed_Solicited=4\;0xA - Fired\;other - Reserved */ + MT_bool oi; /* Ignore overrun of this CQ if this bit is set */ + MT_bool tr; /* Translation Required - if set, accesses to CQ will undergo address translation. */ + THH_cq_status_t status; /* CQ status\;0000 - OK\;1001 - CQ overflow\;1010 - CQ write failure */ + u_int64_t start_address; /* Start address of CQ. Must be aligned on CQE size (32 bytes) */ + u_int32_t usr_page; /* UAR page this CQ can be accessed through (ringinig CQ doorbells) */ + u_int8_t log_cq_size; /* amount of entries in CQ 2^Log_CQ_size */ + THH_eqn_t e_eqn; /* Event Queue this CQ reports errors to (e.g. CQ overflow) */ + THH_eqn_t c_eqn; /* Event Queue this CQ reports completion events to */ + u_int32_t pd; /* Protection Domain */ + u_int32_t l_key; /* Memory key (L_Key) to be used to access CQ */ + u_int32_t last_notified_indx; /* Maintained by HW, not to be altered by SW */ + u_int32_t solicit_producer_indx; /* Maintained by HW, not to be altered by SW. points to last CQE reported for message with S-bit set */ + u_int32_t consumer_indx; /* Contains index to the next entry to be read upon poll for completion. The first completion after passing ownership of CQ from software to hardware will be reported to value passed in this field. */ + u_int32_t producer_indx; /* Maintained by HW, not to be altered by SW. Points to the next entry to be written to by Hardware. CQ overrun is reported if Producer_indx + 1 equals to Consumer_indx. */ + u_int32_t cqn; /* CQ number. Least significant bits are constrained by the position of this CQ in CQC table */ +} +THH_cqc_t; + +enum { + PM_STATE_ARMED = 0, + PM_STATE_REARM = 1, + PM_STATE_MIGRATED = 3 +}; +typedef u_int32_t THH_pm_state_t; + +enum { + THH_ST_RC = 0, + THH_ST_UC = 1, + THH_ST_RD = 2, + THH_ST_UD = 3, + THH_ST_MLX = 7 +}; +typedef u_int32_t THH_service_type_t; + +typedef struct { + u_int8_t pkey_index; /* PKey table index */ + IB_port_t port_number; /* Specific port associated with this QP/EE.\;0 - Port 1\;1 - Port 2 */ + IB_lid_t rlid; /* Remte (Destination) LID */ + u_int8_t my_lid_path_bits; /* Source LID - the lower 7 bits (upper bits are taken from PortInfo) */ + MT_bool g; /* Global address enable - if set, GRH will be formed for packet header */ + u_int8_t rnr_retry; /* RNR retry count */ + u_int8_t hop_limit; /* IPv6 hop limit */ + u_int8_t max_stat_rate; /* Maximum static rate control. \;0 - 4X injection rate\;1 - 1X injection rate\;other - reserved\; */ + /* removed - u_int8_t MSG;*/ /* Message size (valid for UD AV only), size is 256*2^MSG bytes */ + u_int8_t mgid_index; /* Index to port GID table */ + u_int8_t ack_timeout; /* Locak ACK timeout */ + u_int32_t flow_label; /* IPv6 flow label */ + u_int8_t tclass; /* IPv6 TClass */ + IB_sl_t sl; /* InfiniBand Service Level (SL) */ + IB_gid_t rgid; /* Remote GID */ +} +THH_address_path_t; + +typedef struct { + u_int8_t ver; /* Version of QPC format. Must be zero for MT23108 */ + /* MT_bool te; */ /* Address translation enable. If cleared - no address translation will be performed for all memory accesses (data buffers and descriptors) associated with this QP. Present in all transports, invalid (reserved) in EE comnext */ + /* u_int8_t ce; Cache Mode. Must be set to '1 for proper HCA operation */ + MT_bool de; /* Send/Receive Descriptor Event enable - if set, events can be generated upon descriptors' completion on send/receive queue (controlled by E bit in WQE). Invalid in EE context */ + THH_pm_state_t pm_state; /* Path migration state (Migrated, Armed or Rearm)\;11-Migrated\;00-Armed\;01-Rearm */ + THH_service_type_t st; /* Service type (invalid in EE context):\;000-Reliable Connection\;001-Unreliable Connection\;010-Reliable Datagram\;011-Unreliable Datagram\;111-MLX transport (raw bits injection). Used fro management QPs and RAW */ + VAPI_qp_state_t state; /* For QUERY_QPEE */ + MT_bool sq_draining; /* query only - when (qp_state == VAPI_SQD) indicates whether sq is in drain process (TRUE), or drained.*/ + u_int8_t sched_queue; /* Schedule queue to be used for WQE scheduling to execution. Detrmines QOS for this QP. */ + u_int8_t msg_max; /* Max message size allowed on the QP. Maximum message size is two in the power of 2^msg_Max */ + IB_mtu_t mtu; /* MTU of the QP. Must be the same for both paths (primery and laternative). Encoding is per IB spec. Not valid (reserved) in EE context */ + u_int32_t usr_page; /* Index (offset) of user page allocated for this QP (see "non_privileged Access to the HCA Hardware"). Not valid (reserved) in EE context. */ + u_int32_t local_qpn_een; /* Local QO number Lower bits determine position of this record in QPC table, and - thus - constrained */ + u_int32_t remote_qpn_een; /* Remote QP/EE number */ + THH_address_path_t primary_address_path; /* see Table 6, "Address path format" on page 27. */ + THH_address_path_t alternative_address_path; /* see Table 6, "Address Path Format, on page 27 */ + u_int32_t rdd; /* Reliable Datagram Domain */ + u_int32_t pd; /* QP protection domain. Not valid (reserved) in EE context. */ + u_int32_t wqe_base_adr; /* Bits 63:32 of WQE address for both SQ and RQ. \;Reserved for EE context. */ + u_int32_t wqe_lkey; /* memory key (L-Key) to be used to access WQEs. Not valid (reserved) in EE context. */ + MT_bool ssc; /* If set - all send WQEs generate CQEs. If celared - only send WQEs with C bit set generate completion. Not valid (reserved) in EE context. */ + MT_bool sic; /* If zero - Ignore end to end credits on send queue. Not valid (reserved) in EE context. */ + MT_bool sae; /* If set - Atomic operations enabled on send queue. Not valid (reserved) in EE context. */ + MT_bool swe; /* If set - RDMA - write enabled on send queue. Not valid (reserved) in EE context. */ + MT_bool sre; /* If set - RDMA - read enabled on send queue. Not valid (reserved) in EE context. */ + u_int8_t retry_count; /* Maximum retry count */ + u_int8_t sra_max; /* Maximum number of outstanding RDMA-read/Atomic operations allowed in the send queue. Maximum number is 2^SRA_Max. Not valid (reserved) in EE context. */ + u_int8_t flight_lim; /* Number of outstanding (in-flight) messages on the wire allowed for this send queue. \;Number of outstanding messages is 2^Flight_Lim. \;Must be 0 for EE context. */ + u_int8_t ack_req_freq; /* ACK required frequency. ACK required bit will be set in every 2^AckReqFreq packets at least. Not valid (reserved) in EE context. */ + u_int32_t next_send_psn; /* Next PSN to be sent */ + u_int32_t cqn_snd; /* CQ number completions from this queue to be reported to. Not valid (reserved) in EE context. */ + u_int64_t next_snd_wqe; /* Pointer and properties of next WQE on send queue. The format is same as next segment (first 8 bytes) in the WQE. Not valid (reserved) in EE context. */ + MT_bool rsc; /* If set - all receive WQEs generate CQEs. If celared - only receive WQEs with C bit set generate completion. Not valid (reserved) in EE context. */ + MT_bool ric; /* Invalid Credits. If this bit is set, place "Invalid Credits" to ACKs sent from this queue. Not valid (reserved) in EE context. */ + MT_bool rae; /* If set - Atomic operations enabled. on receive queue. Not valid (reserved) in EE context. */ + MT_bool rwe; /* If set - RDMA - write enabled on receive queue. Not valid (reserved) in EE context. */ + MT_bool rre; /* If set - RDMA - read enabled on receive queue. Not valid (reserved) in EE context. */ + u_int8_t rra_max; /* Maximum number of outstanding RDMA-read/Atomic operations allowed on receive queue is 2^RRA_Max. \;Must be 0 for EE context. */ + u_int32_t next_rcv_psn; /* Next (expected) PSN on receive */ + u_int8_t min_rnr_nak; /* Minimum RNR NAK timer value (TTTTT field encoding). Not valid (reserved) in EE context. */ + u_int32_t ra_buff_indx; /* Index to outstanding read/atomic buffer. */ + u_int32_t cqn_rcv; /* CQ number completions from receive queue to be reported to. Not valid (reserved) in EE context. */ + u_int64_t next_rcv_wqe; /* Pointer and properties of next WQE on the receive queue. Ths format is same as next segment (first 8 bytes) in the WQE Not valid (reserved) in EE context. */ + u_int32_t q_key; /* Q_Key to be validated against received datagrams and sent if MSB of Q_Key specified in the WQE is set. Not valid (reserved) in EE context. */ + u_int32_t srqn; /* Specifies the SRQ number from which the QP dequeues receive descriptors. Valid only if srq bit is set. Not valid (reserved) in EE context. */ + MT_bool srq; /* If set, this queue is fed by descriptors from SRQ specified in the srqn field. Not valid (reserved) in EE context. */ +} +THH_qpee_context_t; + +typedef struct THH_srq_context_st { + u_int32_t pd; /* SRQ protection domain */ + u_int32_t uar; /* UAR index for doorbells of this SRQ */ + u_int32_t l_key; /* memory key (L-Key) to be used to access WQEs */ + u_int32_t wqe_addr_h; /* Bits 63:32 of WQE address for SRQ */ + u_int32_t next_wqe_addr_l; /* Bits 31:0 of next WQE address (valid only on QUERY/HW2SW) */ + u_int32_t ds; /* Descriptor size for SRQ (divided by 16) */ + u_int16_t wqe_cnt; /* WQE count on SRQ */ + u_int8_t state; /* SRQ state (QUERY)- 0xF=SW-own 0x0=HW-own 0x1=SRQ-error */ +} THH_srq_context_t; + +typedef struct { + u_int8_t ver; /* Version. Must be zero for InfiniHost */ + MT_bool r_w; /* Defines whether this entry is Region (TRUE) or Window (FALSE) */ + MT_bool pa; /* Physical address. If set, no virtual-to-physical address translation will be performed for this region */ + MT_bool lr; /* If set - local read access enabled */ + MT_bool lw; /* If set - local write access enabled */ + MT_bool rr; /* If set - Remote read access enabled. */ + MT_bool rw; /* If set - remote write access enabled */ + MT_bool a; /* If set - Remote Atomic access is enabled */ + MT_bool eb; /* If set - Bind is enabled. Valid for region entry only. */ + MT_bool pw; /* If set, all writes to this region are posted writes */ + MT_bool m_io; /* If set - memory command is used on the uplink bus, if cleared - IO. If IO configured - PW bit must be cleared. */ + u_int8_t status; /* 0 valid window/region\;1 valid unbound window */ + u_int8_t page_size; /* Page size used for the region. Actual size is [4K]*2^Page_size bytes. */ + u_int32_t mem_key; /* The memory Key. This field is compared to key used to access the region/window. Lower-order bits are restricted (index to the table). */ + u_int32_t pd; /* Protection Domain */ + u_int64_t start_address; /* Start Address - Virtual Address where this region/window starts */ + u_int64_t reg_wnd_len; /* Region/Window Length */ + u_int32_t lkey; /* LKey used for accessing the MTT (for bound windows) */ + u_int32_t win_cnt; /* Number of windows bounded to this region. Valid for regions only. */ + u_int32_t win_cnt_limit; /* The number of windows (limit) that can be bounded to this region. If bind operation is attempted while Win_cnt_limit, the operation will be aborted, CQE with error will be generated and QP will transfer to error state. Zero means no limit. */ + u_int64_t mtt_seg_adr; /* Base (first) address of the MTT segment, aligned on segment_size boundary */ +} +THH_mpt_entry_t; + +typedef struct { + u_int64_t ptag; /* physical tag (full address). Low order bits are masked according to page size*/ + MT_bool p; /* Present bit. If set, page entry is valid. If cleared, access to this page will generate 'non-present page access fault'. */ +} +THH_mtt_entry_t; + +typedef struct THH_port_init_props_st{ /* !!! This is going to be changed for updated INIT_IB !!! */ + MT_bool e; /* Port Physical Link Enable */ + u_int8_t vl_cap; /* Maximum VLs supported on the port, excluding VL15 */ + IB_link_width_t port_width_cap; /* IB Port Width */ + IB_mtu_t mtu_cap; /* Maximum MTU Supported */ + u_int16_t max_gid; /* Maximum number of GIDs for the port */ + u_int16_t max_pkey; /* Maximum pkeys for the port */ + MT_bool g0; /* ADDED FOR NEW INIT_IB */ + IB_guid_t guid0; /* ADDED FOR NEW INIT_IB */ +} +THH_port_init_props_t; + +typedef struct { + MT_bool rqk; /* reset QKey Violation counter */ + u_int32_t capability_mask; /* PortInfo Capability Mask */ +} THH_set_ib_props_t; + +#if 0 +typedef struct { /* To be used for MODIFY_HCA */ + +} THH_port_props_t; +#endif + +typedef struct { + u_int8_t log_max_qp; /* Log2 of the Maximum number of QPs supported */ + u_int8_t log2_rsvd_qps; /* Log2 of the number of QPs reserved for firmware use */ + u_int8_t log_max_qp_sz; /* Log2 of the maximum WQEs allowed on the RQ or the SQ */ + u_int8_t log_max_srqs; /* Log2 of the Maximum number of SRQs supported */ + u_int8_t log2_rsvd_srqs; /* Log2 of the number of SRQs reserved for firmware use */ + u_int8_t log_max_srq_sz; /* Log2 of the maximum WQEs allowed on the SRQ */ + u_int8_t log_max_ee; /* Log2 of the Maximum number of EE contexts supported */ + u_int8_t log2_rsvd_ees; /* Log2 of the number of EECs reserved for firmware use */ + u_int8_t log_max_cq; /* Log2 of the Maximum number of CQs supported */ + u_int8_t log2_rsvd_cqs; /* Log2 of the number of CQs reserved for firmware use */ + u_int8_t log_max_cq_sz; /* Log2 of the Maximum CQEs allowed in a CQ */ + u_int8_t log_max_eq; /* Log2 of the Maximum number of EQs */ + u_int8_t num_rsvd_eqs; /* The number of EQs reserved for firmware use */ + u_int8_t log_max_mpts; /* Log2 of the Maximum number of MPT entries (the number of Regions/Windows) */ + u_int8_t log_max_mtt_seg; /* Log2 of the Maximum number of MTT segments */ + u_int8_t log2_rsvd_mrws; /* Log2 of the number of MPTs reserved for firmware use */ + u_int8_t log_max_mrw_sz; /* Log2 of the Maximum Size of Memory Region/Window */ + u_int8_t log2_rsvd_mtts; /* Log2 of the number of MTT segments reserved for firmware use */ + u_int8_t log_max_av; /* Log2 of the Maximum number of Address Vectors */ + u_int8_t log_max_ra_res_qp; /* Log2 of the Maximum number of outstanding RDMA read/Atomic per QP as a responder */ + u_int8_t log_max_ra_req_qp; /* Log2 of the maximum number of outstanding RDMA read/Atomic per QP as a requester */ + u_int8_t log_max_ra_res_global; /* Log2 of the maximum number of RDMA read/atomic operations the HCA responder can support globally. That implies the RDB table size. */ + u_int8_t local_ca_ack_delay; /* The Local CA ACK Delay. This is the value recommended to be returned in Query HCA verb. + The delay value in microseconds is computed using 4.096us * 2^(Local_CA_ACK_Delay). */ + u_int8_t log_max_gid; /* Log2 of the maximum number of GIDs per port */ + u_int8_t log_max_pkey; /* Log2 of the max PKey Table Size (per IB port) */ + u_int8_t num_ports; /* Number of IB ports */ + IB_link_width_t max_port_width; /* IB Port Width */ + IB_mtu_t max_mtu; /* Maximum MTU Supported */ + u_int8_t max_vl; /* Maximum number of VLs per IB port excluding VL15 */ + MT_bool rc; /* RC Transport supported */ + MT_bool uc; /* UC Transport Supported */ + MT_bool ud; /* UD Transport Supported */ + MT_bool rd; /* RD Transport Supported */ + MT_bool raw_ipv6; /* Raw IPv6 Transport Supported */ + MT_bool raw_ether; /* Raw Ethertype Transport Supported */ + MT_bool srq; /* SRQ is supported */ + MT_bool pkv; /* PKey Violation Counter Supported */ + MT_bool qkv; /* QKey Violation Coutner Supported */ + MT_bool mw; /* Memory windows supported */ + MT_bool apm; /* Automatic Path Migration Supported */ + MT_bool atm; /* Atomic operations supported (atomicity is guaranteed between QPs on this HCA) */ + MT_bool rm; /* Raw Multicast Supported */ + MT_bool avp; /* Address Vector Port checking supported */ + MT_bool udm; /* UD Multicast supported */ + MT_bool pg; /* Paging on demand supported */ + MT_bool r; /* Router mode supported */ + u_int8_t log_pg_sz; /* Log2 of system page size */ + u_int8_t uar_sz; /* UAR Area Size = 1MB * 2^max_uar_sz */ + u_int8_t num_rsvd_uars; /* The number of UARs reserved for firmware use */ + u_int16_t max_desc_sz; /* Max descriptor size */ + u_int8_t max_sg; /* Maximum S/G list elements in a WQE */ + u_int8_t log_max_mcg; /* Log2 of the maximum number of multicast groups */ + u_int8_t log_max_qp_mcg; /* Log2 of the maximum number of QPs per multicast group */ + /*MT_bool mce; / * Multicast support for extended QP list. - removed */ + u_int8_t log_max_rdds; /* Log2 of the maximum number of RDDs */ + u_int8_t num_rsvd_rdds; /* The number of RDDs reserved for firmware use */ + u_int8_t log_max_pd; /* Log2 of the maximum number of PDs */ + u_int8_t num_rsvd_pds; /* The number of PDs reserved for firmware use */ + u_int16_t qpc_entry_sz; /* QPC Entry Size for the device\;For Tavor entry size is 256 bytes */ + u_int16_t eec_entry_sz; /* EEC Entry Size for the device\;For Tavor entry size is 256 bytes */ + u_int16_t eqpc_entry_sz; /* Extended QPC entry size for the device\;For Tavor entry size is 32 bytes */ + u_int16_t eeec_entry_sz; /* Extended EEC entry size for the device\;For Tavor entry size is 32 bytes */ + u_int16_t cqc_entry_sz; /* CQC entry size for the device\;For Tavor entry size is 64 bytes */ + u_int16_t eqc_entry_sz; /* EQ context entry size for the device\;For Tavor entry size is 64 bytes */ + u_int16_t srq_entry_sz; /* SRQ entry size for the device\;For Tavor entry size is 32 bytes */ + u_int16_t uar_scratch_entry_sz; /* UAR Scratchpad Entry Size\;For Tavor entry size is 32 bytes */ +} +THH_dev_lim_t; + +typedef struct { + u_int16_t fw_rev_major; /* Firmware Revision - Major */ + u_int16_t fw_rev_minor; /* Firmware Revision - Minor */ + u_int16_t fw_rev_subminor; /* Firmware Sub-minor version (Patch level). */ + u_int16_t cmd_interface_rev; /* Command Interface Interpreter Revision ID */ + u_int8_t log_max_outstanding_cmd; /* Log2 of the maximum number of commands the HCR can support simultaneously */ + u_int64_t fw_base_addr; /* Physical Address of Firmware Area in DDR Memory */ + u_int64_t fw_end_addr; /* End of firmware address in DDR memory */ + u_int64_t error_buf_start; /* Read Only buffer for catastrofic error reports (phys addr) */ + u_int32_t error_buf_size; /* size of catastrophic error report buffer in words */ +} +THH_fw_props_t; + +typedef struct { + u_int32_t vendor_id; /* Adapter vendor ID */ + u_int32_t device_id; /* Adapter Device ID */ + u_int32_t revision_id; /* Adapter Revision ID */ + u_int8_t intapin; /* Interrupt Signal ID of HCA device pin that is connected to the INTA trace in the HCA board. + 0..39 and 63 are valid values. 255 means INTA trace in board is not connected to the HCA device. + All other values are reserved */ +} +THH_adapter_props_t; + +typedef struct { + u_int64_t ddr_start_adr; /* DDR memory start address */ + u_int64_t ddr_end_adr; /* DDR memory end address (excluding DDR memory reserved for firmware) */ + MT_bool dh; /* When Set DDR is Hidden and cannot be accessed from PCI bus */ + THH_dim_integrity_t di; + THH_dim_apm_t ap; + THH_dim_info_t dimm0; + THH_dim_info_t dimm1; + THH_dim_info_t dimm2; + THH_dim_info_t dimm3; +} +THH_ddr_props_t; + + +typedef struct { + u_int32_t next_gid_index; + IB_gid_t mgid; /* Group's GID */ + u_int32_t valid_qps; /* number of QPs in given group (size of qps array) */ + IB_wqpn_t *qps; /* QPs array with valid_qps QPN entries */ +} +THH_mcg_entry_t; + +#endif /* H_CMD_TYPES_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.c new file mode 100644 index 00000000..4a5dd4fa --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.c @@ -0,0 +1,1976 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#ifdef VXWORKS_OS +#include "Osa_Context.h" +#include "Dump.h" +#endif + + +/*================ macro definitions ===============================================*/ +#ifdef THH_CMD_TIME_TRACK +extern u_int64_t THH_eventp_last_cmdif_interrupt; +#endif + +#define PRM_RSC_DELTA 32 +#define MAX_CMD_OBJS 32 /* max number of objects allowed to be created */ +#define CMD_ETIME_EVENTS 1000000 /* max execution time is fixed when we're using events */ +#define MAX_ITER_ON_EINTR 10 +#define MAX_UC_FOR_GO 100 /* time in microsec to do busy wait on go bit */ +#define MAX_UC_SLEEP_FOR_GO 300000000 /* time in microsec to do busy wait on go bit */ +#define SHORT_UC_DELAY_FOR_GO 10 /* time in microsec to do busy wait on go bit after first time */ + +#ifdef IN + #undef IN +#endif +#define IN + +#ifdef OUT + #undef OUT +#endif +#define OUT + +#define HCR_DW_BYTE_OFFSET(field) (MT_BYTE_OFFSET(tavorprm_hca_command_register_st,field) & (~3)) +#define HCR_DW_BIT_OFFSET(field) (MT_BIT_OFFSET(tavorprm_hca_command_register_st,field) - HCR_DW_BYTE_OFFSET(field) * 8) +#define HCR_BIT_SIZE(field) MT_BIT_SIZE(tavorprm_hca_command_register_st,field) + + +#define PREP_TOKEN_DW(token_dw, val) MT_INSERT32((token_dw),(val),HCR_DW_BIT_OFFSET(token),MT_BIT_SIZE(tavorprm_hca_command_register_st,token)); \ + token_dw = MOSAL_cpu_to_be32(token_dw); + + + +#define DEFAULT_TOKEN 0x1234 /* default token value to use when there can be no outstanding commands */ +#define FREE_LIST_EOL ((u_int32_t)(-1)) + +#define NEW_EQE_FORMAT + + + +#ifdef VXWORKS_OS +extern int Osa_TraceMe(); +#endif + + +/*================ static variables definitions ====================================*/ + + + +/*================ static functions prototypes =====================================*/ +static THH_cmd_status_t sys_en_hca(struct cmd_if_context_st *entry); +static THH_cmd_status_t main_cmd_flow(struct cmd_if_context_st *entry, command_fields_t *cmd_prms); +static MT_bool cmdif_is_free(struct cmd_if_context_st *entry); +static void cleanup_cmdobj(struct cmd_if_context_st *entry); +static inline void write_command_dw(u_int32_t *dst_buf, u_int8_t go, + u_int8_t use_event, u_int8_t op_mod, u_int16_t opcode); +static inline THH_cmd_status_t cmd_if_status(struct cmd_if_context_st *entry); +static inline void set_mailbox(u_int32_t *dst_buf, MT_phys_addr_t mbx_pa); +static inline void ptr_to_mailbox_ptr(MT_phys_addr_t ptr, addr_64bit_t *mbx_pmtr); +static inline void cvt_be32_to_cpu(void *buf, u_int32_t size); +static inline void cvt_cpu_to_be32(void *buf, u_int32_t size); +static void *memcpy_to_tavor(void *dst, const void *src, MT_size_t size); +static void *memcpy_from_tavor(void *dst, const void *src, MT_size_t size); +static inline u_int64_t d32_to_s64(u_int32_t hi, u_int32_t lo); +static THH_cmd_status_t cmd_flow_events(struct cmd_if_context_st *entry, command_fields_t *cmd_prms); +static THH_cmd_status_t cmd_flow_no_events(struct cmd_if_context_st *entry, command_fields_t *cmd_prms); +/* ===> parse functions <=== */ +static void parse_HCR(u_int32_t *result_hcr_image_p, priv_hcr_t *hcr); +#ifdef NEW_EQE_FORMAT +static void parse_new_HCR(u_int32_t *result_hcr_image_p, priv_hcr_t *hcr); +#endif +static void edit_hcr(struct cmd_if_context_st *entry, command_fields_t *cmd_prms, u_int16_t token, cmd_ctx_t *ctx_p, int event); +static void extract_hcr(command_fields_t *cmd_prms, priv_hcr_t *hcr); + +static void print_outs_commands(ctx_obj_t *ctxo_p); +static void track_exec_cmds(struct cmd_if_context_st *entry, cmd_ctx_t *ctx_p); +static void print_track_arr(struct cmd_if_context_st *entry); + +/* ===> pool handling <=== */ +static HH_ret_t alloc_cmd_contexts(struct cmd_if_context_st *entry, u_int32_t num, MT_bool in_at_ddr, MT_bool out_at_ddr); +static HH_ret_t de_alloc_cmd_contexts(struct cmd_if_context_st *entry); +static HH_ret_t acq_cmd_ctx(struct cmd_if_context_st *entry, cmd_ctx_t **ctx_pp); +static void rel_cmd_ctx(struct cmd_if_context_st *entry, cmd_ctx_t *ctx_p); +static HH_ret_t re_alloc_resources(struct cmd_if_context_st *entry, MT_bool in_at_ddr, MT_bool out_at_ddr); + +static int log2(u_int64_t arg); + + +/* ===> print functions <=== */ +//static void print_hcr_dump(struct cmd_if_context_st *entry, u_int32_t cmd); +//static void print_hcr_fileds(u_int32_t *buf, u_int32_t cmd); + +/********************************** UTILS *********************************/ +static void cmd_hexdump( void *buf, int size, const char * dump_title ) +{ + int i, j, maxlines, bytes_left, this_line; + char linebuf[200], tempout[20]; + u_int8_t *iterator; + + + iterator = (u_int8_t *)buf; + bytes_left = size; + if (size <= 0) { + return; + } + + MTL_ERROR1("%s, starting at addr 0x%p, size=%d:\n", + dump_title, buf, size); + + + maxlines = (size / 16) + ((size % 16) ? 1 : 0); + + for (i = 0; i < maxlines; i++) { + memset(linebuf, 0, sizeof(linebuf)); + this_line = (bytes_left > 16 ? 16 : bytes_left); + + for (j = 0; j < this_line; j++) { + if ((j % 4) == 0) { + strcat(linebuf," "); + } + sprintf(tempout, "%02x", *iterator); + iterator++; bytes_left--; + strcat(linebuf,tempout); + } + MTL_ERROR1("%s\n", linebuf); + } + MTL_ERROR1("%s END\n", dump_title); +} + +static void dump_cmd_err_info(cmd_ctx_t *ctx_p, command_fields_t *cmd_prms) +{ + MTL_ERROR1("CMD ERROR DUMP. opcode=0x%x, opc_mod = 0x%x, exec_time_micro=%u\n", + (u_int32_t)cmd_prms->opcode, cmd_prms->opcode_modifier, cmd_prms->exec_time_micro); + cmd_hexdump((void *) ctx_p->hcr_buf,HCR_SIZE*sizeof(u_int32_t), "HCR dump"); + if ((cmd_prms->in_trans == TRANS_MAILBOX) && (cmd_prms->in_param_size > 0)) { + cmd_hexdump((void *) ctx_p->in_prm.prm_alloc_va,cmd_prms->in_param_size, "IN MAILBOX dump"); + } + if ((cmd_prms->out_trans == TRANS_MAILBOX) && (cmd_prms->out_param_size > 0)) { + cmd_hexdump((void *) ctx_p->out_prm.prm_alloc_va,cmd_prms->out_param_size, "OUT MAILBOX dump"); + } +} + +/* ==== inline functions ===============*/ + +static HH_ret_t inline get_ctx_by_idx(struct cmd_if_context_st *entry, u_int16_t idx, cmd_ctx_t **ctx_pp) +{ + if ( (idxctx_obj.num) && (entry->ctx_obj.ctx_arr[idx].ref_cnt>0) ) { + *ctx_pp = &entry->ctx_obj.ctx_arr[idx]; + return HH_OK; + } + return HH_EAGAIN; +} + +static THH_eqn_t inline eqn_set(struct cmd_if_context_st *entry, THH_eqn_t new_eqn) +{ + THH_eqn_t old_eqn; + + MOSAL_spinlock_dpc_lock(&entry->eqn_spl); + old_eqn = entry->eqn; + entry->eqn = new_eqn; + MOSAL_spinlock_unlock(&entry->eqn_spl); + return old_eqn; +} + + +/* + * write_command_dw + */ +static inline void write_command_dw(u_int32_t *dst_buf, u_int8_t go, + u_int8_t use_event, u_int8_t op_mod, u_int16_t opcode) +{ + u_int32_t cmd=0; + + MT_INSERT32(cmd,opcode,HCR_DW_BIT_OFFSET(opcode),HCR_BIT_SIZE(opcode)); + MT_INSERT32(cmd,op_mod,HCR_DW_BIT_OFFSET(opcode_modifier),HCR_BIT_SIZE(opcode_modifier)); + MT_INSERT32(cmd,use_event,HCR_DW_BIT_OFFSET(e),HCR_BIT_SIZE(e)); + MT_INSERT32(cmd,go,HCR_DW_BIT_OFFSET(go),HCR_BIT_SIZE(go)); + MT_INSERT32(cmd,0,HCR_DW_BIT_OFFSET(status),HCR_BIT_SIZE(status)); /* status */ + *dst_buf = MOSAL_cpu_to_be32(cmd); +} + + +/* + * ptr_to_mailbox_ptr + */ +static inline void ptr_to_mailbox_ptr(MT_phys_addr_t ptr, addr_64bit_t *mbx_pmtr) +{ + if ( sizeof(ptr) == 4 ) { + MTL_DEBUG4("pointer is 32 bit\n"); + mbx_pmtr->addr_h = 0; + mbx_pmtr->addr_l = (u_int32_t)ptr; + } + else if ( sizeof(ptr) == 8 ) { + MTL_DEBUG4("pointer is 64 bit\n"); + mbx_pmtr->addr_h = (u_int32_t)(((u_int64_t)ptr)>>32); + mbx_pmtr->addr_l = (u_int32_t)ptr; + } + else { + MTL_ERROR1("bad address pointer size: %d\n",(int)sizeof(ptr)); + } + mbx_pmtr->addr_h = MOSAL_cpu_to_be32(mbx_pmtr->addr_h); + mbx_pmtr->addr_l = MOSAL_cpu_to_be32(mbx_pmtr->addr_l); +} + +/*================ global functions definitions ====================================*/ + +/* + * THH_cmd_create + */ +HH_ret_t THH_cmd_create(THH_hob_t hob, u_int32_t hw_ver, MT_phys_addr_t cr_base, MT_phys_addr_t uar0_base, THH_cmd_t *cmd_if_p, + MT_bool inf_timeout, u_int32_t num_cmds_outs) +{ + struct cmd_if_context_st *entry; + HH_ret_t rc; + u_int64_t cps; + + FUNC_IN; + + /* allocate memory for cmdif object */ + entry = TMALLOC(struct cmd_if_context_st); + if ( !entry ) { + MTL_ERROR1(MT_FLFMT("%s: failed to allocate memory for cmdif object"), __func__); + MT_RETURN(HH_EAGAIN); + } + memset(entry, 0, sizeof(struct cmd_if_context_st)); + + /* check if we should post commands UAR0 */ + if ( uar0_base == (MT_phys_addr_t) MAKE_ULONGLONG(0xFFFFFFFFFFFFFFFF) ) { + entry->post_to_uar0 = FALSE; + } + else { + entry->post_to_uar0 = TRUE; + MTL_ERROR1(MT_FLFMT("%s: posting to uar0"), __func__); + } + + entry->hob = hob; + entry->hcr_virt_base = (void *)MOSAL_io_remap(cr_base + TAVOR_HCR_OFFSET_FROM_CR_BASE, + sizeof(struct tavorprm_hca_command_register_st)/8); + + /* map hcr to kernel space */ + if ( !entry->hcr_virt_base ) { + MTL_ERROR1(MT_FLFMT("%s: MOSAL_io_remap() failed. pa="PHYS_ADDR_FMT", size=" SIZE_T_FMT), __func__, + cr_base + TAVOR_HCR_OFFSET_FROM_CR_BASE, sizeof(struct tavorprm_hca_command_register_st)/8); + cleanup_cmdobj(entry); + MT_RETURN(HH_EAGAIN); + } + + /* if we're going to post to uar0 we need to map 8 bytes from UAR0 to kernel space */ + if ( entry->post_to_uar0 ) { + entry->uar0_virt_base = (void *)MOSAL_io_remap(uar0_base, 8); + if ( !entry->uar0_virt_base ) { + cleanup_cmdobj(entry); + MT_RETURN(HH_EAGAIN); + } + } + + entry->sys_enabled = FALSE; + entry->ddrmm = THH_DDRMM_INVALID_HANDLE; + entry->eqn = THH_INVALID_EQN; + + entry->inf_timeout = inf_timeout; + entry->req_num_cmds_outs = num_cmds_outs; + + entry->tokens_shift = 0; + entry->tokens_counter = 0; + entry->tokens_idx_mask = (1<tokens_shift)-1; + + cps = MOSAL_get_counts_per_sec(); + if ( cps & MAKE_ULONGLONG(0xffffffff00000000) ) { + MTL_ERROR1(MT_FLFMT("%s: *** delay time calculation for go bit will not be accurate !!!"), __func__); + } + entry->counts_busy_wait_for_go = (u_int64_t)(((u_int32_t)cps)/1000000) * MAX_UC_FOR_GO; + entry->counts_sleep_wait_for_go = (u_int64_t)(((u_int32_t)cps)/1000000) * MAX_UC_SLEEP_FOR_GO; + entry->short_wait_for_go = (u_int64_t)(((u_int32_t)cps)/1000000) * SHORT_UC_DELAY_FOR_GO; + + entry->in_at_ddr = FALSE; + entry->out_at_ddr = FALSE; + rc = alloc_cmd_contexts(entry, 1, entry->in_at_ddr, entry->in_at_ddr); + if ( rc != HH_OK ) { + MTL_ERROR1(MT_FLFMT("failed to allocate command contexts")); + cleanup_cmdobj(entry); + MT_RETURN(HH_EAGAIN); + } + + entry->track_arr = TNMALLOC(u_int16_t, 256); + if ( !entry->track_arr ) { + cleanup_cmdobj(entry); + MT_RETURN(HH_EAGAIN); + } + memset(entry->track_arr, 0, sizeof(u_int16_t)*256); + + MOSAL_mutex_init(&entry->sys_en_mtx); + MOSAL_sem_init(&entry->no_events_sem, 1); + MOSAL_mutex_init(&entry->hcr_mtx); + + MOSAL_sem_init(&entry->use_events_sem, 0); + MOSAL_sem_init(&entry->fw_outs_sem, 0); + + MOSAL_spinlock_init(&entry->close_spl); + entry->close_action = FALSE; + MOSAL_syncobj_init(&entry->fatal_list); + + MOSAL_spinlock_init(&entry->eqn_spl); + + MOSAL_spinlock_init(&entry->ctr_spl); + entry->events_in_pipe = 0; + entry->poll_in_pipe = 0; + + *cmd_if_p = (THH_cmd_t)entry; + + MT_RETURN(HH_OK); +} + + + +/* + * THH_cmd_destroy + */ +HH_ret_t THH_cmd_destroy(THH_cmd_t cmd_if) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + + FUNC_IN; + + + MOSAL_mutex_free(&entry->sys_en_mtx); + MOSAL_sem_free(&entry->no_events_sem); + MOSAL_sem_free(&entry->use_events_sem); + MOSAL_sem_free(&entry->fw_outs_sem); + MOSAL_syncobj_free(&entry->fatal_list); + + cleanup_cmdobj(entry); + + MT_RETURN(HH_OK); +} + + +/* + * THH_cmd_set_fw_props + */ +THH_cmd_status_t THH_cmd_set_fw_props(THH_cmd_t cmd_if, THH_fw_props_t *fw_props) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + unsigned int i; + + FUNC_IN; + if ( entry->query_fw_done==FALSE ) { + entry->queried_max_outstanding = 1<log_max_outstanding_cmd; + if ( entry->req_num_cmds_outs <= entry->queried_max_outstanding ) { + entry->max_outstanding = entry->req_num_cmds_outs; + } + else { + entry->max_outstanding = entry->queried_max_outstanding; + } + + entry->sw_num_rsc = 1<max_outstanding)) + PRM_RSC_DELTA); + MTL_DEBUG1(MT_FLFMT("%s: fw=%d, delta=%d, used=%d"), __func__, + entry->queried_max_outstanding, PRM_RSC_DELTA, entry->sw_num_rsc); + + if ( re_alloc_resources(entry, entry->in_at_ddr, entry->out_at_ddr) != HH_OK ) { + MTL_ERROR1(MT_FLFMT("%s: re_alloc_resources failed"), __func__); + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + + for ( i=0; imax_outstanding; ++i ) { + MOSAL_sem_rel(&entry->fw_outs_sem); + } + /* the following line is not an error but just to make sure the line is printed */ + MTL_DEBUG1(MT_FLFMT("%s: queried_max_outstanding=%d, max_outstanding=%d"), __func__, + entry->queried_max_outstanding, entry->max_outstanding); + memcpy(&entry->fw_props,fw_props,sizeof(THH_fw_props_t)); +/*** warning C4242: '=' : conversion from 'int' to 'u_int8_t', possible loss of data ***/ + entry->tokens_shift = (u_int8_t)log2(entry->sw_num_rsc); + entry->tokens_idx_mask = (1<tokens_shift)-1; + entry->query_fw_done = TRUE; + } + MT_RETURN(THH_CMD_STAT_OK); +} + + + + + +/* + * THH_cmd_set_eq + */ +HH_ret_t THH_cmd_set_eq(THH_cmd_t cmd_if) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + int i; + THH_eqn_t old_eqn; + unsigned long ctr; + + FUNC_IN; + old_eqn = eqn_set(entry, 0); + if ( old_eqn != THH_INVALID_EQN ) { + /* eqn already set. clr_eq before setting a new one */ + MT_RETURN(HH_OK); + } + + /* make sure there are no commands that passed the if in main_cmd_flow + before we changed state tp prevent deadlock when we acquire the semaphore */ + do { + MOSAL_spinlock_lock(&entry->ctr_spl); + ctr = entry->poll_in_pipe; + MOSAL_spinlock_unlock(&entry->ctr_spl); + if ( ctr == 0 ) break; + MOSAL_delay_execution(20000); + + } while ( 1 ); + + MOSAL_sem_acq_ui(&entry->no_events_sem); + + for ( i=0; i<(int)entry->sw_num_rsc; ++i ) { + MTL_DEBUG2(MT_FLFMT("increase sem level")); + MOSAL_sem_rel(&entry->use_events_sem); + } + + MT_RETURN(HH_OK); +} + + +/* + * THH_cmd_clr_eq + */ +HH_ret_t THH_cmd_clr_eq(THH_cmd_t cmd_if) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + int i; + THH_eqn_t old_eqn; + unsigned long ctr; + + FUNC_IN; + MTL_DEBUG1(MT_FLFMT("%s: called"), __func__); + + old_eqn = eqn_set(entry, THH_INVALID_EQN); + if ( old_eqn == THH_INVALID_EQN ) { + MTL_DEBUG1(MT_FLFMT("%s: returning"), __func__); + MT_RETURN(HH_OK); /* already cleared */ + } + + /* make sure there are no commands that passed the if in main_cmd_flow + before we changed state tp prevent deadlock when we acquire the semaphore */ + do { + MOSAL_spinlock_lock(&entry->ctr_spl); + ctr = entry->events_in_pipe; + MOSAL_spinlock_unlock(&entry->ctr_spl); + if ( ctr == 0 ) break; + MOSAL_delay_execution(20000); + + } while ( 1 ); + + /* acquire semaphore to the full depth to avoid to others to acquire it */ + for ( i=0; i<(int)entry->sw_num_rsc; ++i ) { + MOSAL_sem_acq_ui(&entry->use_events_sem); + MTL_DEBUG1(MT_FLFMT("%s: acquired %d"), __func__, i); + } + + MOSAL_sem_rel(&entry->no_events_sem); + MTL_DEBUG1(MT_FLFMT("%s: returning"), __func__); + MT_RETURN(HH_OK); +} + + + +/* + * THH_cmd_eventh + */ +void THH_cmd_eventh(THH_cmd_t cmd_if, u_int32_t *result_hcr_image_p) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + priv_hcr_t hcr; + cmd_ctx_t *ctx_p; + + FUNC_IN; + MOSAL_sem_rel(&entry->fw_outs_sem); + parse_new_HCR(result_hcr_image_p, &hcr); + +#ifdef THH_CMD_TIME_TRACK + MTL_ERROR1("CMD_TIME:END: cmd=UNKNOWN token=0x%X time=["U64_FMT"]\n", + hcr.token, THH_eventp_last_cmdif_interrupt); +#endif + + MOSAL_spinlock_dpc_lock(&entry->ctx_obj.spl); + if ( (get_ctx_by_idx(entry, hcr.token&entry->tokens_idx_mask, &ctx_p)!=HH_OK) || + (hcr.token!=ctx_p->token) + ) { + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); + MTL_ERROR1(MT_FLFMT("%s: could not find context by token. token=0x%04x"), __func__, hcr.token); + THH_hob_fatal_error(entry->hob, THH_FATAL_TOKEN, VAPI_EV_SYNDROME_NONE); + MT_RETV; + } + ctx_p->hcr = hcr; + +/* fix for vapi_status console cmd */ +#if !defined(VXWORKS_OS) + MOSAL_syncobj_signal(&ctx_p->syncobj); + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); +#else + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); + MOSAL_syncobj_signal(&ctx_p->syncobj); +#endif + + MT_RETV; +} + + +/* + * THH_cmd_asign_ddrmm + */ +HH_ret_t THH_cmd_assign_ddrmm(THH_cmd_t cmd_if, THH_ddrmm_t ddrmm) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + MT_bool in_at_ddr, out_at_ddr; + + FUNC_IN; + + MOSAL_mutex_acq_ui(&entry->sys_en_mtx); + if ( entry->ddrmm != THH_DDRMM_INVALID_HANDLE ) { + /* ddrmm already assigned */ + MOSAL_mutex_rel(&entry->sys_en_mtx); + MT_RETURN(HH_EBUSY); + } + /* ddrmm was not assigned - assign it */ + entry->ddrmm = ddrmm; + if ( entry->sys_enabled ) { +#ifdef EQS_CMD_IN_DDR + out_at_ddr = TRUE; +#else + out_at_ddr = FALSE; +#endif +#ifdef IN_PRMS_AT_DDR + in_at_ddr = TRUE; +#else + in_at_ddr = FALSE; +#endif + + if ( (in_at_ddr!=entry->in_at_ddr) || (out_at_ddr!=entry->out_at_ddr) ) { + if ( re_alloc_resources(entry, in_at_ddr, out_at_ddr) != HH_OK ) { + MOSAL_mutex_rel(&entry->sys_en_mtx); + MT_RETURN(HH_EAGAIN); + } + else { + entry->in_at_ddr = in_at_ddr; + entry->out_at_ddr = out_at_ddr; + } + } + + } + + MOSAL_mutex_rel(&entry->sys_en_mtx); + + MT_RETURN(HH_OK); +} + + +/* + * THH_cmd_revoke_ddrmm + */ +HH_ret_t THH_cmd_revoke_ddrmm(THH_cmd_t cmd_if) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + HH_ret_t rc; + + FUNC_IN; + MTL_DEBUG1(MT_FLFMT("%s called"), __func__); + MOSAL_sem_acq_ui(&entry->no_events_sem); + entry->in_at_ddr = FALSE; + entry->out_at_ddr = FALSE; + rc = re_alloc_resources(entry, entry->in_at_ddr, entry->out_at_ddr); + if ( rc != HH_OK ) { + MOSAL_sem_rel(&entry->no_events_sem); + MTL_ERROR1(MT_FLFMT("%s: re_alloc_resources failed - %s"), __func__, HH_strerror(rc)); + MT_RETURN(rc); + } + entry->ddrmm = THH_DDRMM_INVALID_HANDLE; + + MOSAL_sem_rel(&entry->no_events_sem); + MT_RETURN(HH_OK); +} + + + +/* + * THH_cmd_SYS_EN + */ +THH_cmd_status_t THH_cmd_SYS_EN(IN THH_cmd_t cmd_if) +{ + command_fields_t cmd_prms = {0}; + THH_cmd_status_t rc; + + FUNC_IN; + cmd_prms.opcode = TAVOR_IF_CMD_SYS_EN; + cmd_prms.in_trans = TRANS_NA; + cmd_prms.out_trans = TRANS_NA; + rc = cmd_invoke(cmd_if, &cmd_prms); + if ( rc != THH_CMD_STAT_OK ) { + MTL_ERROR1("%s\n", str_THH_cmd_status_t(rc)); + } + MT_RETURN(rc); +} + + + +/* + * str_THH_cmd_status_t + */ +const char *str_THH_cmd_status_t(THH_cmd_status_t status) +{ + switch ( status ) { + case THH_CMD_STAT_OK: + return THH_CMD_STAT_OK_STR; + + case THH_CMD_STAT_INTERNAL_ERR: + return THH_CMD_STAT_INTERNAL_ERR_STR; + + case THH_CMD_STAT_BAD_OP: + return THH_CMD_STAT_BAD_OP_STR; + + case THH_CMD_STAT_BAD_PARAM: + return THH_CMD_STAT_BAD_PARAM_STR; + + case THH_CMD_STAT_BAD_SYS_STATE: + return THH_CMD_STAT_BAD_SYS_STATE_STR; + + case THH_CMD_STAT_BAD_RESOURCE: + return THH_CMD_STAT_BAD_RESOURCE_STR; + + case THH_CMD_STAT_RESOURCE_BUSY: + return THH_CMD_STAT_RESOURCE_BUSY_STR; + + case THH_CMD_STAT_DDR_MEM_ERR: + return THH_CMD_STAT_DDR_MEM_ERR_STR; + + case THH_CMD_STAT_EXCEED_LIM: + return THH_CMD_STAT_EXCEED_LIM_STR; + + case THH_CMD_STAT_BAD_RES_STATE: + return THH_CMD_STAT_BAD_RES_STATE_STR; + + case THH_CMD_STAT_BAD_INDEX: + return THH_CMD_STAT_BAD_INDEX_STR; + + case THH_CMD_STAT_BAD_QPEE_STATE: + return THH_CMD_STAT_BAD_QPEE_STATE_STR; + + case THH_CMD_STAT_BAD_SEG_PARAM: + return THH_CMD_STAT_BAD_SEG_PARAM_STR; + + case THH_CMD_STAT_REG_BOUND: + return THH_CMD_STAT_REG_BOUND_STR; + + case THH_CMD_STAT_BAD_PKT: + return THH_CMD_STAT_BAD_PKT_STR; + + case THH_CMD_STAT_EAGAIN: + return THH_CMD_STAT_EAGAIN_STR; + + case THH_CMD_STAT_EABORT: + return THH_CMD_STAT_EABORT_STR; + + case THH_CMD_STAT_ETIMEOUT: + return THH_CMD_STAT_ETIMEOUT_STR; + + case THH_CMD_STAT_EFATAL: + return THH_CMD_STAT_EFATAL_STR; + + case THH_CMD_STAT_EBADARG: + return THH_CMD_STAT_EBADARG_STR; + + case THH_CMD_STAT_EINTR: + return THH_CMD_STAT_EINTR_STR; + + case THH_CMD_STAT_BAD_SIZE: + return THH_CMD_STAT_BAD_SIZE_STR; + + default: + return "unrecognized status"; + } +} + + +const char *cmd_str(tavor_if_cmd_t opcode) +{ + switch ( opcode ) { + case TAVOR_IF_CMD_SYS_EN: + return "TAVOR_IF_CMD_SYS_EN"; + case TAVOR_IF_CMD_SYS_DIS: + return "TAVOR_IF_CMD_SYS_DIS"; + case TAVOR_IF_CMD_QUERY_DEV_LIM: + return "TAVOR_IF_CMD_QUERY_DEV_LIM"; + case TAVOR_IF_CMD_QUERY_FW: + return "TAVOR_IF_CMD_QUERY_FW"; + case TAVOR_IF_CMD_QUERY_DDR: + return "TAVOR_IF_CMD_QUERY_DDR"; + case TAVOR_IF_CMD_QUERY_ADAPTER: + return "TAVOR_IF_CMD_QUERY_ADAPTER"; + case TAVOR_IF_CMD_INIT_HCA: + return "TAVOR_IF_CMD_INIT_HCA"; + case TAVOR_IF_CMD_CLOSE_HCA: + return "TAVOR_IF_CMD_CLOSE_HCA"; + case TAVOR_IF_CMD_INIT_IB: + return "TAVOR_IF_CMD_INIT_IB"; + case TAVOR_IF_CMD_CLOSE_IB: + return "TAVOR_IF_CMD_CLOSE_IB"; + case TAVOR_IF_CMD_QUERY_HCA: + return "TAVOR_IF_CMD_QUERY_HCA"; + case TAVOR_IF_CMD_SET_IB: + return "TAVOR_IF_CMD_SET_IB"; + case TAVOR_IF_CMD_SW2HW_MPT: + return "TAVOR_IF_CMD_SW2HW_MPT"; + case TAVOR_IF_CMD_QUERY_MPT: + return "TAVOR_IF_CMD_QUERY_MPT"; + case TAVOR_IF_CMD_HW2SW_MPT: + return "TAVOR_IF_CMD_HW2SW_MPT"; + case TAVOR_IF_CMD_READ_MTT: + return "TAVOR_IF_CMD_READ_MTT"; + case TAVOR_IF_CMD_WRITE_MTT: + return "TAVOR_IF_CMD_WRITE_MTT"; + case TAVOR_IF_CMD_MAP_EQ: + return "TAVOR_IF_CMD_MAP_EQ"; + case TAVOR_IF_CMD_SW2HW_EQ: + return "TAVOR_IF_CMD_SW2HW_EQ"; + case TAVOR_IF_CMD_HW2SW_EQ: + return "TAVOR_IF_CMD_HW2SW_EQ"; + case TAVOR_IF_CMD_QUERY_EQ: + return "TAVOR_IF_CMD_QUERY_EQ"; + case TAVOR_IF_CMD_SW2HW_CQ: + return "TAVOR_IF_CMD_SW2HW_CQ"; + case TAVOR_IF_CMD_HW2SW_CQ: + return "TAVOR_IF_CMD_HW2SW_CQ"; + case TAVOR_IF_CMD_QUERY_CQ: + return "TAVOR_IF_CMD_QUERY_CQ"; + case TAVOR_IF_CMD_RST2INIT_QPEE: + return "TAVOR_IF_CMD_RST2INIT_QPEE"; + case TAVOR_IF_CMD_INIT2RTR_QPEE: + return "TAVOR_IF_CMD_INIT2RTR_QPEE"; + case TAVOR_IF_CMD_RTR2RTS_QPEE: + return "TAVOR_IF_CMD_RTR2RTS_QPEE"; + case TAVOR_IF_CMD_RTS2RTS_QPEE: + return "TAVOR_IF_CMD_RTS2RTS_QPEE"; + case TAVOR_IF_CMD_SQERR2RTS_QPEE: + return "TAVOR_IF_CMD_SQERR2RTS_QPEE"; + case TAVOR_IF_CMD_2ERR_QPEE: + return "TAVOR_IF_CMD_2ERR_QPEE"; + case TAVOR_IF_CMD_RTS2SQD_QPEE: + return "TAVOR_IF_CMD_RTS2SQD_QPEE"; + case TAVOR_IF_CMD_SQD2RTS_QPEE: + return "TAVOR_IF_CMD_SQD2RTS_QPEE"; + case TAVOR_IF_CMD_ERR2RST_QPEE: + return "TAVOR_IF_CMD_ERR2RST_QPEE"; + case TAVOR_IF_CMD_QUERY_QPEE: + return "TAVOR_IF_CMD_QUERY_QPEE"; + case TAVOR_IF_CMD_CONF_SPECIAL_QP: + return "TAVOR_IF_CMD_CONF_SPECIAL_QP"; + case TAVOR_IF_CMD_MAD_IFC: + return "TAVOR_IF_CMD_MAD_IFC"; + case TAVOR_IF_CMD_READ_MGM: + return "TAVOR_IF_CMD_READ_MGM"; + case TAVOR_IF_CMD_WRITE_MGM: + return "TAVOR_IF_CMD_WRITE_MGM"; + case TAVOR_IF_CMD_MGID_HASH: + return "TAVOR_IF_CMD_MGID_HASH"; + case TAVOR_IF_CMD_CONF_NTU: + return "TAVOR_IF_CMD_CONF_NTU"; + case TAVOR_IF_CMD_QUERY_NTU: + return "TAVOR_IF_CMD_QUERY_NTU"; + case TAVOR_IF_CMD_RESIZE_CQ: + return "TAVOR_IF_CMD_RESIZE_CQ"; + case TAVOR_IF_CMD_SUSPEND_QPEE: + return "TAVOR_IF_CMD_SUSPEND_QPEE"; + case TAVOR_IF_CMD_UNSUSPEND_QPEE: + return "TAVOR_IF_CMD_UNSUSPEND_QPEE"; + case TAVOR_IF_CMD_SW2HW_SRQ: + return "TAVOR_IF_CMD_SW2HW_SRQ"; + case TAVOR_IF_CMD_HW2SW_SRQ: + return "TAVOR_IF_CMD_HW2SW_SRQ"; + case TAVOR_IF_CMD_QUERY_SRQ: + return "TAVOR_IF_CMD_QUERY_SRQ"; + case TAVOR_IF_CMD_SYNC_TPT: + return "TAVOR_IF_CMD_SYNC_TPT"; + break; + case TAVOR_IF_CMD_QUERY_DEBUG_MSG: + return "TAVOR_IF_CMD_QUERY_DEBUG_MSG"; + break; + case TAVOR_IF_CMD_SET_DEBUG_MSG: + return "TAVOR_IF_CMD_SET_DEBUG_MSG"; + break; + case TAVOR_IF_CMD_DIAG_RPRT: + return "TAVOR_IF_CMD_DIAG_RPRT"; + break; + case TAVOR_IF_CMD_NOP: + return "TAVOR_IF_CMD_NOP"; + break; + case TAVOR_IF_CMD_MOD_STAT_CFG: + return "TAVOR_IF_CMD_MOD_STAT_CFG"; + break; + case TAVOR_IF_CMD_ACCESS_DDR: + return "TAVOR_IF_CMD_ACCESS_DDR"; + break; + case TAVOR_IF_CMD_MODIFY_MPT: + return "TAVOR_IF_CMD_MODIFY_MPT"; + break; + case TAVOR_IF_CMD_SQD2SQD_QPEE: + return "TAVOR_IF_CMD_SQD2SQD_QPEE"; + break; + case TAVOR_IF_CMD_INIT2INIT_QPEE: + return "TAVOR_IF_CMD_INIT2INIT_QPEE"; + break; + default: + return "[UNKNOWN_COMMAND]"; + } +} + + +/*================ static functions definitions ====================================*/ + + +/* + * cmd_invoke + */ +THH_cmd_status_t cmd_invoke(THH_cmd_t cmd_if, command_fields_t *cmd_prms) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + THH_cmd_status_t rc; + + FUNC_IN; + + if ( entry->have_fatal && ((cmd_prms->input_modifier==0) || (cmd_prms->opcode!=TAVOR_IF_CMD_CLOSE_HCA)) ) { + MT_RETURN(THH_CMD_STAT_EFATAL); + } + + if ( !entry->sys_enabled ) { + /* sys not enabled - we only allow sys enable cmd */ + if ( cmd_prms->opcode != TAVOR_IF_CMD_SYS_EN ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + else { + rc = sys_en_hca(entry); + MT_RETURN(rc); + } + } + else { + /* system enabled */ + if ( cmd_prms->opcode == TAVOR_IF_CMD_SYS_EN ) { + /* we don't allow re-invoking sys enable */ + MT_RETURN(THH_CMD_STAT_BAD_SYS_STATE); + } + else { + rc = main_cmd_flow(entry, cmd_prms); + if ( rc != THH_CMD_STAT_OK ) { + MTL_ERROR1(MT_FLFMT("Failed command 0x%X (%s): status=0x%X (%s)\n"), + cmd_prms->opcode, cmd_str(cmd_prms->opcode),rc,str_THH_cmd_status_t(rc)); + } + MT_RETURN(rc); + } + } +} + + +/* + * sys_en_hca + */ +static THH_cmd_status_t sys_en_hca(struct cmd_if_context_st *entry) +{ + u_int32_t token = 0, i, dw6; + THH_cmd_status_t rc; + + FUNC_IN; + MOSAL_mutex_acq_ui(&entry->sys_en_mtx); + if ( entry->sys_enabled ) { + MOSAL_mutex_rel(&entry->sys_en_mtx); + MT_RETURN(THH_CMD_STAT_OK); /* already enabled */ + } + if ( !cmdif_is_free(entry) ) { + MOSAL_mutex_rel(&entry->sys_en_mtx); + /* no need to call the hob at this early stage - just return THH_CMD_STAT_EFATAL */ + MT_RETURN(THH_CMD_STAT_EFATAL); + } + + PREP_TOKEN_DW(token, DEFAULT_TOKEN); + + MOSAL_MMAP_IO_WRITE_DWORD((u_int8_t *)(entry->hcr_virt_base)+HCR_DW_BYTE_OFFSET(token), token); + + write_command_dw(&dw6, 1, 0, 0, TAVOR_IF_CMD_SYS_EN); + MOSAL_MMAP_IO_WRITE_DWORD(((u_int32_t *)(entry->hcr_virt_base))+6, dw6); + for ( i=0; i<(TAVOR_IF_CMD_ETIME_SYS_EN/10000); ++i ) { + MOSAL_delay_execution(10000); + if ( cmdif_is_free(entry) ) { + MTL_TRACE1("command executed in %d mili seconds\n", i*10); + break; + } + } + if ( !cmdif_is_free(entry) ) { + MOSAL_mutex_rel(&entry->sys_en_mtx); + /* no need to call the hob at this early stage - just return THH_CMD_STAT_EFATAL */ + MT_RETURN(THH_CMD_STAT_ETIMEOUT); + } + rc = cmd_if_status(entry); + if ( rc == THH_CMD_STAT_OK ) { + entry->sys_enabled = TRUE; + } + MOSAL_mutex_rel(&entry->sys_en_mtx); + MT_RETURN(rc); +} + + +/* + * main_cmd_flow + */ +static THH_cmd_status_t main_cmd_flow(struct cmd_if_context_st *entry, command_fields_t *cmd_prms) +{ + THH_cmd_status_t rc; + + FUNC_IN; + MOSAL_spinlock_lock(&entry->ctr_spl); + if ( entry->eqn == THH_INVALID_EQN ) { + /* events not enabled */ + entry->poll_in_pipe++; + MOSAL_spinlock_unlock(&entry->ctr_spl); + rc = cmd_flow_no_events(entry, cmd_prms); + MOSAL_spinlock_lock(&entry->ctr_spl); + entry->poll_in_pipe--; + MOSAL_spinlock_unlock(&entry->ctr_spl); + MT_RETURN(rc); + } + else { + /* events are enabled */ + entry->events_in_pipe++; + MOSAL_spinlock_unlock(&entry->ctr_spl); + rc = cmd_flow_events(entry, cmd_prms); + MOSAL_spinlock_lock(&entry->ctr_spl); + entry->events_in_pipe--; + MOSAL_spinlock_unlock(&entry->ctr_spl); + MT_RETURN(rc); + } +} + + +/* + * cmdif_is_free + */ +static MT_bool cmdif_is_free(struct cmd_if_context_st *entry) +{ + u_int32_t val; + volatile u_int32_t *offset = (volatile u_int32_t *)entry->hcr_virt_base, *ptr; + MT_bool is_free; + + ptr = &offset[HCR_DW_BYTE_OFFSET(go)>>2]; + val = MOSAL_be32_to_cpu(MOSAL_MMAP_IO_READ_DWORD(ptr)); + is_free = !MT_EXTRACT32(val, HCR_DW_BIT_OFFSET(go), HCR_BIT_SIZE(go)); + return is_free; +} + + +/* + * cleanup_cmdobj + */ +static void cleanup_cmdobj(struct cmd_if_context_st *entry) +{ + FUNC_IN; + if ( entry->track_arr ) FREE(entry->track_arr); + de_alloc_cmd_contexts(entry); + if ( entry->hcr_virt_base ) MOSAL_io_unmap((MT_virt_addr_t)(entry->hcr_virt_base)); + if ( entry->uar0_virt_base) MOSAL_io_unmap((MT_virt_addr_t)(entry->uar0_virt_base)); + FREE(entry); + MT_RETV; +} + + + + + +/* + * cmd_if_status + */ +static inline THH_cmd_status_t cmd_if_status(struct cmd_if_context_st *entry) +{ + u_int32_t *offset = (u_int32_t *)entry->hcr_virt_base; + u_int32_t cmd; + + cmd = offset[HCR_DW_BYTE_OFFSET(status)>>2]; + cmd = MOSAL_be32_to_cpu(cmd); + return (THH_cmd_status_t)MT_EXTRACT32(cmd, HCR_DW_BIT_OFFSET(status), HCR_BIT_SIZE(status)); +} + + +/* + * set_mailbox + */ +static inline void set_mailbox(u_int32_t *dst_buf, MT_phys_addr_t mbx_pa) +{ + addr_64bit_t addr; + + ptr_to_mailbox_ptr(mbx_pa, &addr); + dst_buf[0] = addr.addr_h; + dst_buf[1] = addr.addr_l; +} + + +/* + * cvt_be32_to_cpu + * size in bytes + */ +static inline void cvt_be32_to_cpu(void *buf, u_int32_t size) +{ + u_int32_t i, *p=(u_int32_t *)(buf); + for ( i=0; i<(size>>2); ++i ) { + *p = MOSAL_be32_to_cpu(*p); + p++; + } +} + + +/* + * cvt_cpu_to_be32 + * size in bytes + */ +static inline void cvt_cpu_to_be32(void *buf, u_int32_t size) +{ + u_int32_t i, *p=(u_int32_t *)(buf); + for ( i=0; i<(size>>2); ++i ) { + *p = MOSAL_cpu_to_be32(*p); + p++; + } +} + + +/* + * d32_to_s64 + */ +static inline u_int64_t d32_to_s64(u_int32_t hi, u_int32_t lo) +{ + return(((u_int64_t)hi) << 32) | (u_int64_t)(lo); +} + + +/* + * + */ +#if 0 +static inline void prep_token_dw(u_int32_t *token_dw_p, u_int16_t val) +{ + MT_INSERT32((*token_dw_p),(val),HCR_DW_BIT_OFFSET(token),MT_BIT_SIZE(tavorprm_hca_command_register_st,token)); + *token_dw_p = MOSAL_cpu_to_be32(*token_dw_p); +} +#endif + +/* + * cmd_flow_events + */ +static THH_cmd_status_t cmd_flow_events(struct cmd_if_context_st *entry, command_fields_t *cmd_prms) +{ + THH_cmd_status_t ret=THH_CMD_STAT_EFATAL; + int i; +#ifdef MAX_DEBUG + u_int64_t command_time; /* For sampling time command started */ +#endif + u_int64_t start_time, busy_end_time, go_end_time; + call_result_t rc; + HH_ret_t rc1; + cmd_ctx_t *ctx_p; + + FUNC_IN; + + + MOSAL_sem_acq_ui(&entry->use_events_sem); + if ( entry->have_fatal ) { + ret = THH_CMD_STAT_EFATAL; + goto ex_ues_rel; + } + + MOSAL_sem_acq_ui(&entry->fw_outs_sem); /* released in THH_cmd_eventh */ + if ( entry->have_fatal ) { + ret = THH_CMD_STAT_EFATAL; + goto ex_fos_rel; + } + + + MOSAL_spinlock_dpc_lock(&entry->ctx_obj.spl); + rc1 = acq_cmd_ctx(entry, &ctx_p); + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); + if ( rc1 != HH_OK ) { + MTL_ERROR1(MT_FLFMT("%s: failed to acquire context. this is fatal !!!"), __func__); + THH_hob_fatal_error(entry->hob, THH_FATAL_NONE, VAPI_EV_SYNDROME_NONE); + print_outs_commands(&entry->ctx_obj); + print_track_arr(entry); + ret = THH_CMD_STAT_EFATAL; + goto ex_fos_rel; + } + MTL_DEBUG8(MT_FLFMT("token=0x%04x"), ctx_p->token); + edit_hcr(entry, cmd_prms, ctx_p->token, ctx_p, 1); + + /* execute the command */ +#ifdef MAX_DEBUG + command_time= MOSAL_get_time_counter(); +#endif + + MOSAL_syncobj_clear(&ctx_p->syncobj); + + MOSAL_mutex_acq_ui(&entry->hcr_mtx); + if ( entry->have_fatal ) { + ret = THH_CMD_STAT_EFATAL; + goto ex_hm_rel; + + } + if ( !entry->post_to_uar0 ) { + /* check that the go bit is 0 */ + start_time = MOSAL_get_time_counter(); + busy_end_time = start_time + entry->counts_busy_wait_for_go; + go_end_time = start_time + entry->counts_sleep_wait_for_go; + while ( !cmdif_is_free(entry) ) { + if ( MOSAL_get_time_counter() > busy_end_time ) { + /* expired busy wait loop */ + if ( MOSAL_get_time_counter() > go_end_time ) { + /* fatal condition detected */ + + THH_hob_fatal_error(entry->hob, THH_FATAL_GOBIT, VAPI_EV_SYNDROME_NONE); + MTL_ERROR1(MT_FLFMT("%s: go bit was not cleared for %d usec"), __func__, MAX_UC_SLEEP_FOR_GO); + print_outs_commands(&entry->ctx_obj); + print_track_arr(entry); + ret = THH_CMD_STAT_EFATAL; + goto ex_hm_rel; + } + + /* we go to sleep for 1 os tick */ + MOSAL_usleep_ui(1000); + + /* in case of fatal state terminate the loop immediately */ + if ( entry->have_fatal ) { + ret = THH_CMD_STAT_EFATAL; + goto ex_hm_rel; + } + + /* calculate short busy waits */ + busy_end_time = MOSAL_get_time_counter() + entry->short_wait_for_go; + } + } + } + + +#ifdef THH_CMD_TIME_TRACK + MTL_ERROR1("CMD_TIME:START: cmd=%s token=0x%X time=["U64_FMT"]\n", + cmd_str(cmd_prms->opcode), ctx_p->token, MOSAL_get_time_counter()); +#endif + /* execute the command */ + if ( entry->post_to_uar0 ) { + for ( i=0; i<4; ++i ) { + MOSAL_MMAP_IO_WRITE_DWORD(((u_int32_t *)(entry->uar0_virt_base))+i, ctx_p->hcr_buf[i]); + } + for ( i=4; i<8; ++i ) { + MOSAL_MMAP_IO_WRITE_DWORD(((u_int32_t *)(entry->uar0_virt_base))+i-4, ctx_p->hcr_buf[i]); + } + } + else { + for ( i=0; i<7; ++i ) { + MOSAL_MMAP_IO_WRITE_DWORD(((u_int32_t *)(entry->hcr_virt_base))+i, ctx_p->hcr_buf[i]); + } + } + + track_exec_cmds(entry, ctx_p); + MOSAL_mutex_rel(&entry->hcr_mtx); + + MTL_TRACE7(MT_FLFMT("using timeout %d usec (0 signifies inifinite !!!)"), entry->inf_timeout ? MOSAL_SYNC_TIMEOUT_INFINITE : cmd_prms->exec_time_micro); + rc = MOSAL_syncobj_waiton_ui(&ctx_p->syncobj, entry->inf_timeout ? MOSAL_SYNC_TIMEOUT_INFINITE : cmd_prms->exec_time_micro); + if ( entry->have_fatal ) { + ret = THH_CMD_STAT_EFATAL; + goto ex_ctx_rel; + } + switch ( rc ) { + case MT_OK: + +#ifdef MAX_DEBUG + { + unsigned long counts_per_usec = ((unsigned long)MOSAL_get_counts_per_sec())/1000000; + if (counts_per_usec == 0) counts_per_usec= 1; + command_time= MOSAL_get_time_counter() - command_time; + MTL_DEBUG4(MT_FLFMT("Command completed after approx. "U64_FMT" CPU clocks (~ %lu [usec])"), + command_time,((unsigned long)command_time)/counts_per_usec); + } +#endif + /* woke by event */ + break; + + case MT_ETIMEDOUT: +#ifdef VXWORKS_OS + Osa_TraceMe(); + Dump_Snapshot(); +#endif + break; + + default: + MTL_ERROR1(MT_FLFMT("%s: unexpeted return code from MOSAL_syncobj_waiton: %s(%d)"), + __func__, mtl_strerror_sym(rc), rc); + } /* end of switch (rc) */ + + + if ( rc == MT_OK ) { + ret = ctx_p->hcr.status; + extract_hcr(cmd_prms, &ctx_p->hcr); + } + else { + ret = rc==MT_ETIMEDOUT ? THH_CMD_STAT_ETIMEOUT : THH_CMD_STAT_EFATAL; + /* + I release this semaphore even though the event may eventually arrive and release this + semaphore again but this is a fatal condition so it's ok + */ + MOSAL_sem_rel(&entry->fw_outs_sem); + dump_cmd_err_info(ctx_p, cmd_prms); + THH_hob_fatal_error(entry->hob, THH_FATAL_CMD_TIMEOUT, VAPI_EV_SYNDROME_NONE); + ret = THH_CMD_STAT_EFATAL; + print_outs_commands(&entry->ctx_obj); + print_track_arr(entry); + goto ex_ctx_rel; + } + + + + MOSAL_spinlock_dpc_lock(&entry->ctx_obj.spl); + rel_cmd_ctx(entry, ctx_p); + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); + MOSAL_sem_rel(&entry->use_events_sem); + + /* + we moved this code from the if statement above because we shouldn't be holding a spinlock while + we try to report errors (especially via something like printf) + */ + + if (MOSAL_EXPECT_FALSE(rc != MT_OK)) { + MTL_ERROR1(MT_FLFMT("Command not completed after timeout: cmd=%s (0x%x), token=0x%04x, pid=%d, go=%d"), + cmd_str(cmd_prms->opcode),cmd_prms->opcode, ctx_p->token, MOSAL_getpid(), + cmdif_is_free(entry)==TRUE ? 0 : 1); + + } else { + MTL_DEBUG8(MT_FLFMT("successful completion for token=0x%04x\n"), ctx_p->token); + } + + /* check if we're in a fatal state */ + if ( entry->have_fatal ) { ret = THH_CMD_STAT_EFATAL; } + goto ex_no_clean; /* normal function exit */ + + +ex_hm_rel: + MOSAL_mutex_rel(&entry->hcr_mtx); +ex_ctx_rel: + MOSAL_spinlock_dpc_lock(&entry->ctx_obj.spl); + rel_cmd_ctx(entry, ctx_p); + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); +ex_fos_rel: + MOSAL_sem_rel(&entry->fw_outs_sem); +ex_ues_rel: + MOSAL_sem_rel(&entry->use_events_sem); +ex_no_clean: + MT_RETURN(ret); +} + + +/* + * cmd_flow_no_events + */ +static THH_cmd_status_t cmd_flow_no_events(struct cmd_if_context_st *entry, command_fields_t *cmd_prms) +{ + priv_hcr_t hcr; + THH_cmd_status_t rc; + u_int32_t i; + HH_ret_t rc1; + cmd_ctx_t *ctx_p; + + FUNC_IN; + + if ( entry->have_fatal && ((cmd_prms->input_modifier==0) || (cmd_prms->opcode!=TAVOR_IF_CMD_CLOSE_HCA)) ) { + MT_RETURN(THH_CMD_STAT_EFATAL); + } + + MOSAL_sem_acq_ui(&entry->no_events_sem); + /* make sure go bit is cleared */ + if ( !cmdif_is_free(entry) ) { + MTL_ERROR1(MT_FLFMT("%s: go bit is set"), __func__); + MOSAL_sem_rel(&entry->no_events_sem); + THH_hob_fatal_error(entry->hob, THH_FATAL_GOBIT, VAPI_EV_SYNDROME_NONE); + MT_RETURN(THH_CMD_STAT_EFATAL); + } + + MOSAL_spinlock_dpc_lock(&entry->ctx_obj.spl); + rc1 = acq_cmd_ctx(entry, &ctx_p); + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); + if ( rc1 != HH_OK ) { + MOSAL_sem_rel(&entry->no_events_sem); + MTL_ERROR1(MT_FLFMT("%s: acq_cmd_ctx failed"), __func__); + MT_RETURN(THH_CMD_STAT_EFATAL); /* this is not EAGAIN since this thing must not happen */ + } + + edit_hcr(entry, cmd_prms, DEFAULT_TOKEN, ctx_p, 0); + + + /* execute the command */ + for ( i=0; i<7; ++i ) { + MOSAL_MMAP_IO_WRITE_DWORD(((u_int32_t *)(entry->hcr_virt_base))+i, ctx_p->hcr_buf[i]); + } + + if ( !entry->inf_timeout ) { + i = cmd_prms->exec_time_micro/10000; + } + else { + i = 0xffffffff; /* not matematically infinite but practically it's a long time */ + } + if ( (cmd_prms->exec_time_micro>=LOOP_DELAY_TRESHOLD) || (entry->inf_timeout==TRUE) ) { + for ( i=0; i<(cmd_prms->exec_time_micro/10000); ++i ) { + MOSAL_usleep_ui(10000); + if ( cmdif_is_free(entry) ) { + if ( i>=1 ) { + MTL_TRACE1("command executed in %d mili seconds\n", i*10); + } + else { + MTL_TRACE1("command executed in less than 10 mili seconds\n"); + } + break; + } + } + } + else { + MOSAL_delay_execution(cmd_prms->exec_time_micro); + } + + + if ( !cmdif_is_free(entry) ) { + MTL_TRACE1("command failed after %d msec\n", i*10); + MOSAL_spinlock_dpc_lock(&entry->ctx_obj.spl); + rel_cmd_ctx(entry, ctx_p); + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); + MOSAL_sem_rel(&entry->no_events_sem); + dump_cmd_err_info(ctx_p, cmd_prms); + THH_hob_fatal_error(entry->hob, THH_FATAL_GOBIT, VAPI_EV_SYNDROME_NONE); + MT_RETURN(THH_CMD_STAT_ETIMEOUT); + } + + parse_HCR((u_int32_t*)entry->hcr_virt_base, &hcr); + extract_hcr(cmd_prms, &hcr); + MOSAL_spinlock_dpc_lock(&entry->ctx_obj.spl); + rel_cmd_ctx(entry, ctx_p); + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); + + + rc = cmd_if_status(entry); + MOSAL_sem_rel(&entry->no_events_sem); + MTL_DEBUG2("status=0x%08x\n", rc); + MT_RETURN(rc); +} + +/* + * memcpy_to_tavor + */ +static void *memcpy_to_tavor(void *dst, const void *src, MT_size_t size) +{ + u_int32_t *dst32 = (u_int32_t *)dst; + u_int32_t *src32 = (u_int32_t *)src; + MT_size_t i; + + for ( i=0; i<(size>>2); ++i ) { + dst32[i] = MOSAL_cpu_to_be32(src32[i]); + } + return dst; +} + + +/* + * memcpy_from_tavor + */ +static void *memcpy_from_tavor(void *dst, const void *src, MT_size_t size) +{ + u_int32_t *dst32 = (u_int32_t *)dst; + u_int32_t *src32 = (u_int32_t *)src; + MT_size_t i; + + for ( i=0; i<(size>>2); ++i ) { + dst32[i] = MOSAL_be32_to_cpu(src32[i]); + } + return dst; +} + +/*====== parse input output mailbox structs ===============*/ +#if 0 +static void parse_QUERY_FW(void *buf, THH_fw_props_t *fw_props_p) +{ + cvt_be32_to_cpu(buf, sizeof(struct tavorprm_query_fw_st)>>5); + fw_props_p->fw_rev_major = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_query_fw_st, fw_rev_major), MT_BIT_SIZE(tavorprm_query_fw_st, fw_rev_major)); + fw_props_p->fw_rev_minor = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_query_fw_st, fw_rev_minor), MT_BIT_SIZE(tavorprm_query_fw_st, fw_rev_minor)); + fw_props_p->cmd_interface_rev = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_query_fw_st, cmd_interface_rev), MT_BIT_SIZE(tavorprm_query_fw_st, cmd_interface_rev)); + fw_props_p->log_max_outstanding_cmd = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_query_fw_st, log_max_outstanding_cmd), MT_BIT_SIZE(tavorprm_query_fw_st, log_max_outstanding_cmd)); + fw_props_p->fw_base_addr = d32_to_s64(MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_query_fw_st, fw_base_addr_h), MT_BIT_SIZE(tavorprm_query_fw_st, fw_base_addr_h)), + MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_query_fw_st, fw_base_addr_l), MT_BIT_SIZE(tavorprm_query_fw_st, fw_base_addr_l))); + + fw_props_p->fw_end_addr = d32_to_s64(MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_query_fw_st, fw_end_addr_h), MT_BIT_SIZE(tavorprm_query_fw_st, fw_end_addr_h)), + MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_query_fw_st, fw_end_addr_l), MT_BIT_SIZE(tavorprm_query_fw_st, fw_end_addr_l))); + +} +#endif + + +static void parse_HCR(u_int32_t *result_hcr_image_p, priv_hcr_t *hcr) +{ + u_int32_t buf[sizeof(struct tavorprm_hca_command_register_st) >> 5]; + + MOSAL_MMAP_IO_READ_BUF_DWORD(result_hcr_image_p,buf,sizeof(buf)>>2); + + cvt_be32_to_cpu(buf, sizeof(buf)); + + /* in param */ + hcr->in_param[0] = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, in_param_h), + MT_BIT_SIZE(tavorprm_hca_command_register_st, in_param_h)); + hcr->in_param[1] = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, in_param_l), + MT_BIT_SIZE(tavorprm_hca_command_register_st, in_param_l)); + + /* input modifier */ + hcr->input_modifier = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, input_modifier), + MT_BIT_SIZE(tavorprm_hca_command_register_st, input_modifier)); + + /* out param */ + hcr->out_param[0] = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, out_param_h), + MT_BIT_SIZE(tavorprm_hca_command_register_st, out_param_h)); + hcr->out_param[1] = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, out_param_l), + MT_BIT_SIZE(tavorprm_hca_command_register_st, out_param_l)); + + /* token */ + hcr->token = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, token), + MT_BIT_SIZE(tavorprm_hca_command_register_st, token)); + + /* opcode */ + hcr->opcode = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, opcode), + MT_BIT_SIZE(tavorprm_hca_command_register_st, opcode)); + + /* opcode modifier */ + hcr->opcode_modifier = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, opcode_modifier), + MT_BIT_SIZE(tavorprm_hca_command_register_st, opcode_modifier)); + + /* e bit */ + hcr->e = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, e), + MT_BIT_SIZE(tavorprm_hca_command_register_st, e)); + + /* go bit */ + hcr->go = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, go), + MT_BIT_SIZE(tavorprm_hca_command_register_st, go)); + + /* status */ + hcr->status = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hca_command_register_st, status), + MT_BIT_SIZE(tavorprm_hca_command_register_st, status)); +} + +#ifdef NEW_EQE_FORMAT +static void parse_new_HCR(u_int32_t *result_hcr_image_p, priv_hcr_t *hcr) +{ + u_int32_t buf[sizeof(struct tavorprm_hcr_completion_event_st) >> 5]; + + /* we don't read the hardware so memcpy suffices */ + memcpy(buf, result_hcr_image_p, sizeof(buf)); + + cvt_be32_to_cpu(buf, sizeof(buf)); + + /* token */ + hcr->token = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hcr_completion_event_st, token), + MT_BIT_SIZE(tavorprm_hcr_completion_event_st, token)); + + /* status */ + hcr->status = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hcr_completion_event_st, status), + MT_BIT_SIZE(tavorprm_hcr_completion_event_st, status)); + + /* out param */ + hcr->out_param[0] = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hcr_completion_event_st, out_param_h), + MT_BIT_SIZE(tavorprm_hcr_completion_event_st, out_param_h)); + hcr->out_param[1] = MT_EXTRACT_ARRAY32(buf, MT_BIT_OFFSET(tavorprm_hcr_completion_event_st, out_param_l), + MT_BIT_SIZE(tavorprm_hcr_completion_event_st, out_param_l)); +} +#endif + + +/* + * edit_hcr + */ +static void edit_hcr(struct cmd_if_context_st *entry, command_fields_t *cmd_prms, u_int16_t token, cmd_ctx_t *ctx_p, int event) +{ + u_int32_t _token = 0; + + switch ( cmd_prms->in_trans ) { + case TRANS_NA: + /* note! since these are zeroes I do not bother to deal with endianess */ + ctx_p->hcr_buf[0] = 0; + ctx_p->hcr_buf[1] = 0; + break; + case TRANS_IMMEDIATE: + { + u_int32_t *caller_prms = (u_int32_t *)cmd_prms->in_param; + ctx_p->hcr_buf[0] = MOSAL_cpu_to_be32(caller_prms[0]); + ctx_p->hcr_buf[1] = MOSAL_cpu_to_be32(caller_prms[1]); + } + break; + case TRANS_MAILBOX: + /* convert the data pointed by in_prms from cpu to be */ + cmd_prms->in_param_va = (u_int8_t *)(ctx_p->in_prm.prm_base_va); + memcpy_to_tavor((void *)ctx_p->in_prm.prm_base_va, cmd_prms->in_param, cmd_prms->in_param_size); + set_mailbox(&ctx_p->hcr_buf[0], ctx_p->in_prm.prm_base_pa); + break; + } + + ctx_p->hcr_buf[2] = MOSAL_cpu_to_be32(cmd_prms->input_modifier); + + switch ( cmd_prms->out_trans ) { + case TRANS_NA: + /* note! since these are zeroes I do not bother to deal with endianess */ + ctx_p->hcr_buf[3] = 0; + ctx_p->hcr_buf[4] = 0; + break; + + case TRANS_IMMEDIATE: + break; + case TRANS_MAILBOX: + cmd_prms->out_param_va = (u_int8_t *)ctx_p->out_prm.prm_base_va; + set_mailbox(&ctx_p->hcr_buf[3], ctx_p->out_prm.prm_base_pa); + break; + } + + MT_INSERT32(_token, token, 16, 16); + ctx_p->hcr_buf[5] = MOSAL_cpu_to_be32(_token); +/*** warning C4242: 'function' : conversion from 'int' to 'u_int8_t', possible loss of data ***/ + write_command_dw(&ctx_p->hcr_buf[6], 1, (u_int8_t)event, cmd_prms->opcode_modifier, cmd_prms->opcode); +} + +/* + * extract_hcr + */ +static void extract_hcr(command_fields_t *cmd_prms, priv_hcr_t *hcr) +{ + switch ( cmd_prms->out_trans ) { + case TRANS_NA: + break; + case TRANS_IMMEDIATE: + { + u_int32_t *caller_prms = (u_int32_t *)cmd_prms->out_param; + caller_prms[0] = hcr->out_param[0]; + caller_prms[1] = hcr->out_param[1]; + } + break; + case TRANS_MAILBOX: + MTL_DEBUG1("out is TRANS_MAILBOX\n"); + memcpy_from_tavor(cmd_prms->out_param, cmd_prms->out_param_va, cmd_prms->out_param_size); + break; + } +} + +/*========== memory allocation functions ============================*/ + +#if 0 +/* + * print_hcr_dump + */ +static void print_hcr_dump(struct cmd_if_context_st *entry, u_int32_t cmd) +{ +#if 6 <= MAX_DEBUG + u_int32_t i, hcr_size=PSEUDO_MT_BYTE_SIZE(tavorprm_hca_command_register_st); + u_int8_t *hcr = (u_int8_t *)entry->hcr_virt_base; + + MTL_DEBUG6("hcr dump\n"); + for ( i=0; i<(hcr_size-4); ++i ) { + MTL_DEBUG6("%02x\n", hcr[i]); + } + for ( i=0; i<4; ++i ) { + MTL_DEBUG6("%02x\n", ((u_int8_t *)(&cmd))[i]); + } +#endif +} + + +/* + * print_hcr_fileds + */ +static void print_hcr_fileds(u_int32_t *buf, u_int32_t cmd) +{ + u_int32_t i, hcr_size=PSEUDO_MT_BYTE_SIZE(tavorprm_hca_command_register_st); + u_int32_t *hcr32 = buf; + u_int32_t dst32[PSEUDO_MT_BYTE_SIZE(tavorprm_hca_command_register_st)>>2]; + u_int64_t in_prm, out_prm; + u_int32_t in_mod; + u_int16_t token, opcode, op_mod; + u_int8_t e, go, status; + + for ( i=0; i<((hcr_size>>2)-1); ++i ) { + dst32[i] = MOSAL_be32_to_cpu(hcr32[i]); + } + dst32[i] = MOSAL_be32_to_cpu(cmd); + + in_prm = ((u_int64_t)dst32[0]<<32) + dst32[1]; + in_mod = dst32[2]; + out_prm = ((u_int64_t)dst32[3]<<32) + dst32[4]; + token = MT_EXTRACT32(dst32[5], 16, 16); + opcode = MT_EXTRACT32(dst32[6], 0, 12); + op_mod = MT_EXTRACT32(dst32[6], 12, 4); + e = MT_EXTRACT32(dst32[6], 22, 1); + go = MT_EXTRACT32(dst32[6], 23, 1); + status = MT_EXTRACT32(dst32[6], 24, 8); + + MTL_DEBUG5("hcr fields values\n"); + MTL_DEBUG5("in_param = "U64_FMT"\n", in_prm); + MTL_DEBUG5("input_modifier = %x\n", in_mod); + MTL_DEBUG5("out_param = "U64_FMT"\n", out_prm); + MTL_DEBUG5("token = %x\n", token); + MTL_DEBUG5("opcode = %x\n", opcode); + MTL_DEBUG5("opcode modifier = %x\n", op_mod); + MTL_DEBUG5("e = %d\n", e); + MTL_DEBUG5("go = %d\n", go); + MTL_DEBUG5("status = %x\n", status); +} +#endif + +/* + * log2() + */ +static int log2(u_int64_t arg) +{ + int i; + u_int64_t tmp; + + if ( arg == 0 ) { +#ifndef __DARWIN__ + return INT_MIN; /* log2(0) = -infinity */ +#else + return -1; /* 0.5 = 0 => log2(0) = -1 */ +#endif + } + + tmp = 1; + i = 0; + while ( tmp < arg ) { + tmp = tmp << 1; + ++i; + } + + return i; +} + + + +/* + * alloc_prm_ctx + */ +static HH_ret_t alloc_prm_ctx(struct cmd_if_context_st *entry, + prms_buf_t *prm_p, + MT_size_t buf_sz, + MT_bool in_ddr) +{ + MT_phys_addr_t pa; + MT_size_t alloc_sz; + MT_virt_addr_t alloc_va, va; + HH_ret_t rc; + call_result_t mrc; + + if ( in_ddr ) { + /* params put in ddr */ + alloc_sz = buf_sz; + rc = THH_ddrmm_alloc(entry->ddrmm, alloc_sz, PRM_ALIGN_SHIFT, &pa); + if ( rc != HH_OK ) { + MTL_ERROR1(MT_FLFMT("%s: failed to allocate "SIZE_T_FMT" bytes in ddr"), __func__, alloc_sz); + return rc; + } + va = MOSAL_io_remap(pa, alloc_sz); + if ( !va ) { + rc = THH_ddrmm_free(entry->ddrmm, pa, alloc_sz); + if ( rc != HH_OK ) MTL_ERROR1(MT_FLFMT("%s: THH_ddrmm_free failed. pa=" PHYS_ADDR_FMT ", size=" SIZE_T_FMT), __func__, pa, alloc_sz); + return HH_EAGAIN; + } + alloc_va = va; + prm_p->in_ddr = TRUE; + } + else { + /* params put in main memory */ + alloc_sz = buf_sz + (1<in_ddr = FALSE; + } + + prm_p->prm_alloc_va = alloc_va; + prm_p->prm_base_va = va; + prm_p->prm_base_pa = pa; + prm_p->prm_buf_sz = alloc_sz; + prm_p->allocated = 1; + + return HH_OK; +} + + +/* + * de_alloc_prm_ctx + */ +static HH_ret_t de_alloc_prm_ctx(struct cmd_if_context_st *entry, + prms_buf_t *prms_p) +{ + if ( prms_p->in_ddr ) { + MOSAL_io_unmap(prms_p->prm_base_va); + return THH_ddrmm_free(entry->ddrmm, prms_p->prm_base_pa, prms_p->prm_buf_sz); + } + else { + MOSAL_pci_phys_free_consistent((void *)(MT_ulong_ptr_t)(prms_p->prm_alloc_va), prms_p->prm_buf_sz); + return HH_OK; + } +} + +/* + * alloc_cmd_contexts + */ +static HH_ret_t alloc_cmd_contexts(struct cmd_if_context_st *entry, u_int32_t num, MT_bool in_at_ddr, MT_bool out_at_ddr) +{ + cmd_ctx_t *ctx_p; + HH_ret_t rc; + u_int32_t i, j; + + ctx_p = TNMALLOC(cmd_ctx_t, num); + if ( !ctx_p ) { + MTL_ERROR1(MT_FLFMT("%s: failed to allocated "SIZE_T_FMT" bytes"), __func__, sizeof(cmd_ctx_t)*num); + return HH_EAGAIN; + } + memset(ctx_p, 0, sizeof(cmd_ctx_t)*num); + entry->ctx_obj.ctx_arr = ctx_p; + entry->ctx_obj.num = num; + + for ( i=0; ictx_obj.ctx_arr[j].in_prm); + de_alloc_prm_ctx(entry, &entry->ctx_obj.ctx_arr[j].out_prm); + } + return rc; + } + + rc = alloc_prm_ctx(entry, &ctx_p[i].out_prm, MAX_OUT_PRM_SIZE, out_at_ddr); + if ( rc != HH_OK ) { + MTL_ERROR1(MT_FLFMT("%s: alloc_prm_ctx failed"), __func__); + for ( j=0; jctx_obj.ctx_arr[j].in_prm); + de_alloc_prm_ctx(entry, &entry->ctx_obj.ctx_arr[j].out_prm); + } + de_alloc_prm_ctx(entry, &entry->ctx_obj.ctx_arr[i].in_prm); + return rc; + } + + entry->ctx_obj.ctx_arr[i].ref_cnt = 0; + entry->ctx_obj.ctx_arr[i].next_free_idx = i+1; + entry->ctx_obj.ctx_arr[i].entry_idx = i; + + entry->ctx_obj.ctx_arr[i].token = 0; + MOSAL_syncobj_init(&entry->ctx_obj.ctx_arr[i].syncobj); + } + + entry->ctx_obj.ctx_arr[num-1].next_free_idx = FREE_LIST_EOL; + entry->ctx_obj.free_list_head = 0; + + MOSAL_spinlock_init(&entry->ctx_obj.spl); + return HH_OK; +} + + +/* + * de_alloc_cmd_contexts + */ +static HH_ret_t de_alloc_cmd_contexts(struct cmd_if_context_st *entry) +{ + HH_ret_t rc, rcx=HH_OK; + u_int32_t i; + + for ( i=0; ictx_obj.num; ++i ) { + if ( entry->ctx_obj.ctx_arr[i].in_prm.allocated ) { + rc = de_alloc_prm_ctx(entry, &entry->ctx_obj.ctx_arr[i].in_prm); + if ( rc != HH_OK ) { + MTL_ERROR1(MT_FLFMT("%s: de_alloc_prm_ctx failed - %s"), __func__, HH_strerror(rc)); + rcx = HH_ERR; + } + } + + if ( entry->ctx_obj.ctx_arr[i].out_prm.allocated ) { + rc = de_alloc_prm_ctx(entry, &entry->ctx_obj.ctx_arr[i].out_prm); + if ( rc != HH_OK ) { + MTL_ERROR1(MT_FLFMT("%s: de_alloc_prm_ctx failed - %s"), __func__, HH_strerror(rc)); + rcx = HH_ERR; + } + } + } + if ( entry->ctx_obj.ctx_arr ) { + FREE(entry->ctx_obj.ctx_arr); + entry->ctx_obj.ctx_arr = NULL; + } + return rcx; +} + +static HH_ret_t acq_cmd_ctx(struct cmd_if_context_st *entry, cmd_ctx_t **ctx_pp) +{ + u_int32_t last_head; + + if ( !entry->ctx_obj.ctx_arr ) { + MTL_ERROR1(MT_FLFMT("%s: no resources were allocated"), __func__); + return HH_EAGAIN; + } + + if ( entry->ctx_obj.free_list_head != FREE_LIST_EOL ) { + last_head = entry->ctx_obj.free_list_head; + entry->ctx_obj.free_list_head = entry->ctx_obj.ctx_arr[last_head].next_free_idx; + *ctx_pp = &entry->ctx_obj.ctx_arr[last_head]; + entry->ctx_obj.ctx_arr[last_head].token = (entry->tokens_counter<tokens_shift) | last_head; + entry->tokens_counter++; + entry->ctx_obj.ctx_arr[last_head].ref_cnt = 1; + return HH_OK; + } + + return HH_EAGAIN; +} + + +static void rel_cmd_ctx(struct cmd_if_context_st *entry, cmd_ctx_t *ctx_p) +{ + u_int32_t entry_idx = ctx_p->entry_idx; + u_int32_t old_head = entry->ctx_obj.free_list_head; + entry->ctx_obj.free_list_head = entry_idx; + ctx_p->next_free_idx = old_head; + ctx_p->ref_cnt = 0; +} + + + +static HH_ret_t re_alloc_resources(struct cmd_if_context_st *entry, MT_bool in_at_ddr, MT_bool out_at_ddr) +{ + HH_ret_t rc; + + rc = de_alloc_cmd_contexts(entry); + if ( rc != HH_OK ) { + MTL_ERROR1(MT_FLFMT("%s: de_alloc_cmd_contexts failed - %s"), __func__, HH_strerror(rc)); + return rc; + } + + rc = alloc_cmd_contexts(entry, entry->sw_num_rsc, in_at_ddr, out_at_ddr); + if ( rc != HH_OK ) { + MTL_ERROR1(MT_FLFMT("%s: alloc_cmd_contexts failed - %s"), __func__, HH_strerror(rc)); + return rc; + } + + return HH_OK; +} + +THH_cmd_status_t THH_cmd_notify_fatal(THH_cmd_t cmd_if, THH_fatal_err_t fatal_err) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + int i; + + FUNC_IN; + MTL_DEBUG2("%s: cmd_if = %p\n", __func__, (void *) cmd_if); + /* Don't need spinlock here. The value is set to TRUE and stays there until + * cmdif is destroyed. + */ + entry->have_fatal = TRUE; + + /* wake all processes waiting for completion of commands */ + MTL_DEBUG2(MT_FLFMT("%s: waking waiting processes"), __func__); + MOSAL_spinlock_dpc_lock(&entry->ctx_obj.spl); + for ( i=0; i<(int)entry->ctx_obj.num; ++i ) { + MOSAL_syncobj_signal(&entry->ctx_obj.ctx_arr[i].syncobj); + } + MOSAL_spinlock_unlock(&entry->ctx_obj.spl); + MTL_DEBUG2(MT_FLFMT("%s: woke waiting processes"), __func__); + + + MT_RETURN(THH_CMD_STAT_OK); +} + +THH_cmd_status_t THH_cmd_handle_fatal(THH_cmd_t cmd_if) +{ + struct cmd_if_context_st *entry = (struct cmd_if_context_st *)cmd_if; + + FUNC_IN; + MTL_DEBUG1(MT_FLFMT("%s: called"), __func__); + /* Don't need spinlock here. */ + if (entry->have_fatal != TRUE) { + MT_RETURN(THH_CMD_STAT_BAD_RES_STATE); /* only callable from within fatal state */ + } + THH_cmd_clr_eq(cmd_if); + MTL_DEBUG1(MT_FLFMT("%s: returning"), __func__); + MT_RETURN(THH_CMD_STAT_OK); +} + + + + +/* + * print_outs_commands + */ +static void print_outs_commands(ctx_obj_t *ctxo_p) +{ + u_int32_t i; + + MOSAL_spinlock_dpc_lock(&ctxo_p->spl); + MTL_ERROR1(MT_FLFMT("list of outstanding tokens:")); + for ( i=0; inum; ++i ) { + if ( ctxo_p->ctx_arr[i].ref_cnt > 0 ) { + MTL_ERROR1(MT_FLFMT("outstanding i=%d, token=0x%04x"), i, ctxo_p->ctx_arr[i].token); + } + } + + MOSAL_spinlock_unlock(&ctxo_p->spl); +} + + +static void track_exec_cmds(struct cmd_if_context_st *entry, cmd_ctx_t *ctx_p) +{ + u_int16_t token=ctx_p->token; + int idx = token & entry->tokens_idx_mask; + entry->track_arr[idx] = token; +} + + + +static void print_track_arr(struct cmd_if_context_st *entry) +{ + int num=(1<tokens_shift), i, shift=entry->tokens_shift; + + for (i=0; itrack_arr[i], entry->track_arr[i]>>shift); + } +} diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.h new file mode 100644 index 00000000..947a743c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif.h @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __cmdif_h +#define __cmdif_h + +#include +#include +#include +#include + +/* #define SIMULATE_HALT_HCA 1 */ + +#define THH_CMDIF_INVALID_HANDLE 0xFFFFFFFF + + + +/****************************************************************************** + * Function: THH_cmd_create + * + * Description: Create a THH_cmd object + * + * Parameters: + * hw_ver(IN) hardware version + * cr_base(IN) physical address of the cr-space + * uar0_base(IN) base address of uar0 for posting commands + * cmd_if_p(OUT) handle to create THH_cmd_t object + * inf_timeout(IN) when TRUE the cmdif will wait infinitely for the completion of a command + * num_cmds_outs(IN) number of commands in the air requested by the user (the real + * value may be less depending on the value returned by FW) + * + * Returns: + * + *****************************************************************************/ +HH_ret_t THH_cmd_create(THH_hob_t hob, u_int32_t hw_ver, MT_phys_addr_t cr_base, MT_phys_addr_t uar0_base, THH_cmd_t *cmd_if_p, + MT_bool inf_timeout, u_int32_t num_cmds_outs); + + +/****************************************************************************** + * Function: THH_cmd_destroy + * + * Description: Destroy the THH_cmd object + * + * Parameters: + * cmd_if(IN) handle of the THH_cmd_t object + * + * Returns: + * + *****************************************************************************/ +HH_ret_t THH_cmd_destroy(THH_cmd_t cmd_if); + + + +/****************************************************************************** + * Function: THH_cmd_set_fw_props + * + * Description: set params set by QUERY_FW command + * + * Parameters: + * cmd_if(IN) handle of the THH_cmd_t object + * fw_props_p(IN) pointer to queried fw props + * + * Returns: + * + *****************************************************************************/ +THH_cmd_status_t THH_cmd_set_fw_props(THH_cmd_t cmd_if, THH_fw_props_t *fw_props); + +/****************************************************************************** + * Function: THH_cmd_set_eq + * + * Description: Enable the command interfcae object to work with events + * + * Parameters: + * cmd_if(IN) handle of the THH_cmd_t object + * + * Returns: + * + *****************************************************************************/ +HH_ret_t THH_cmd_set_eq(THH_cmd_t cmd_if); + + +/****************************************************************************** + * Function: THH_cmd_clr_eq + * + * Description: inform the object to stop reporting completions to the EQ + * + * Parameters: + * cmd_if(IN) handle of the THH_cmd_t object + * + * Returns: + * + *****************************************************************************/ +HH_ret_t THH_cmd_clr_eq(THH_cmd_t cmd_if); + + +/****************************************************************************** + * Function: THH_cmd_eventh + * + * Description: This function is called whenever a command was completed (when + * working with events) + * + * Parameters: + * cmd_if(IN) handle of the THH_cmd_t object + * result_hcr_image_p(IN) - the (big-endian) HCR image of the command completion EQE + * + * Returns: + * + *****************************************************************************/ +void THH_cmd_eventh(THH_cmd_t cmd_if, u_int32_t *result_hcr_image_p); + + +/****************************************************************************** + * Function: THH_cmd_assign_ddrmm + * + * Description: Assign memory manager to be used by this object for allocating + * memory frm DDR + * + * Parameters: + * cmd_if(IN) handle of the THH_cmd_t object + * ddrmm(IN) handle of assigned ddr memory manager + * + * Returns: + * + *****************************************************************************/ +HH_ret_t THH_cmd_assign_ddrmm(THH_cmd_t cmd_if, THH_ddrmm_t ddrmm); + + +/****************************************************************************** + * Function: THH_cmd_revoke_ddrmm + * + * Description: revoke the associated memory manager + * + * Parameters: + * cmd_if(IN) handle of the THH_cmd_t object + * + * Returns: + * + *****************************************************************************/ +HH_ret_t THH_cmd_revoke_ddrmm(THH_cmd_t cmd_if); + + +/***************************************************************************** +* * +* COMMAND FUNCTIONS * +* * +******************************************************************************/ + +/* System commands */ +THH_cmd_status_t THH_cmd_SYS_EN(IN THH_cmd_t cmd_if); +THH_cmd_status_t THH_cmd_SYS_DIS(IN THH_cmd_t cmd_if); + +/* General queries */ +THH_cmd_status_t THH_cmd_QUERY_DEV_LIM(IN THH_cmd_t cmd_if, OUT THH_dev_lim_t *dev_lim); +THH_cmd_status_t THH_cmd_QUERY_FW(IN THH_cmd_t cmd_if, OUT THH_fw_props_t *fw_props); +THH_cmd_status_t THH_cmd_QUERY_DDR(IN THH_cmd_t cmd_if, OUT THH_ddr_props_t *ddr_props); +THH_cmd_status_t THH_cmd_QUERY_ADAPTER(IN THH_cmd_t cmd_if, OUT THH_adapter_props_t *adapter_props); + +/* HCA initialization and maintenance commands */ +THH_cmd_status_t THH_cmd_INIT_HCA(IN THH_cmd_t cmd_if, IN THH_hca_props_t *hca_props); +THH_cmd_status_t THH_cmd_INIT_IB(IN THH_cmd_t cmd_if, IN IB_port_t port, + IN THH_port_init_props_t *port_init_props); +THH_cmd_status_t THH_cmd_QUERY_HCA(IN THH_cmd_t cmd_if, OUT THH_hca_props_t *hca_props); +THH_cmd_status_t THH_cmd_SET_IB(IN THH_cmd_t cmd_if, IN IB_port_t port, + IN THH_set_ib_props_t *port_props); +THH_cmd_status_t THH_cmd_CLOSE_IB(IN THH_cmd_t cmd_if, IN IB_port_t port); +#ifdef SIMULATE_HALT_HCA +THH_cmd_status_t THH_cmd_CLOSE_HCA(IN THH_cmd_t cmd_if); +#else +THH_cmd_status_t THH_cmd_CLOSE_HCA(IN THH_cmd_t cmd_if, MT_bool do_halt); +#endif +/* TPT commands */ +THH_cmd_status_t THH_cmd_SW2HW_MPT(IN THH_cmd_t cmd_if, IN THH_mpt_index_t mpt_index, + IN THH_mpt_entry_t *mpt_entry); +THH_cmd_status_t THH_cmd_MODIFY_MPT(IN THH_cmd_t cmd_if, IN THH_mpt_index_t mpt_index, + IN THH_mpt_entry_t *mpt_entry, MT_bool modify_entire_entry); +THH_cmd_status_t THH_cmd_QUERY_MPT(IN THH_cmd_t cmd_if, IN THH_mpt_index_t mpt_index, + OUT THH_mpt_entry_t *mpt_entry); +THH_cmd_status_t THH_cmd_HW2SW_MPT(IN THH_cmd_t cmd_if, IN THH_mpt_index_t mpt_index, + OUT THH_mpt_entry_t *mpt_entry); +THH_cmd_status_t THH_cmd_READ_MTT(IN THH_cmd_t cmd_if, IN u_int64_t mtt_pa, IN MT_size_t num_elems, + OUT THH_mtt_entry_t *mtt_entry); +THH_cmd_status_t THH_cmd_WRITE_MTT(IN THH_cmd_t cmd_if, IN u_int64_t mtt_pa, IN MT_size_t num_elems, + IN THH_mtt_entry_t *mtt_entry); +THH_cmd_status_t THH_cmd_SYNC_TPT(IN THH_cmd_t cmd_if); + +/* EQ commands */ +THH_cmd_status_t THH_cmd_MAP_EQ(IN THH_cmd_t cmd_if, IN THH_eqn_t eqn, IN u_int64_t event_mask); +THH_cmd_status_t THH_cmd_SW2HW_EQ(IN THH_cmd_t cmd_if, IN THH_eqn_t eqn, IN THH_eqc_t *eq_context); +THH_cmd_status_t THH_cmd_HW2SW_EQ(IN THH_cmd_t cmd_if, IN THH_eqn_t eqn, OUT THH_eqc_t *eq_context); +THH_cmd_status_t THH_cmd_QUERY_EQ(IN THH_cmd_t cmd_if, IN THH_eqn_t eqn, OUT THH_eqc_t *eq_context); + + +/* CQ commands */ +THH_cmd_status_t THH_cmd_SW2HW_CQ(IN THH_cmd_t cmd_if, IN HH_cq_hndl_t cqn, + IN THH_cqc_t *cq_context); +THH_cmd_status_t THH_cmd_HW2SW_CQ(IN THH_cmd_t cmd_if, IN HH_cq_hndl_t cqn, + OUT THH_cqc_t *cq_context); +THH_cmd_status_t THH_cmd_QUERY_CQ(IN THH_cmd_t cmd_if, IN HH_cq_hndl_t cqn, + IN THH_cqc_t *cq_context); +THH_cmd_status_t THH_cmd_RESIZE_CQ(THH_cmd_t cmd_if, HH_cq_hndl_t cqn, + u_int64_t start_address, u_int32_t l_key, u_int8_t log_cq_size, + u_int32_t *new_producer_index_p); +/* if given new_producer_index_p==NULL then opcode_modifier=1 (fixed resize CQ - FM issue #17002) */ + +/* QP/EE commands */ +THH_cmd_status_t THH_cmd_MODIFY_QP(IN THH_cmd_t cmd_if, IN IB_wqpn_t qpn, + IN THH_qpee_transition_t trans, + IN THH_qpee_context_t *qp_context, + IN u_int32_t optparammask); +THH_cmd_status_t THH_cmd_MODIFY_EE(IN THH_cmd_t cmd_if, IN IB_eecn_t eecn, + IN THH_qpee_transition_t trans, + IN THH_qpee_context_t *ee_context, + IN u_int32_t optparammask); +THH_cmd_status_t THH_cmd_QUERY_QP(IN THH_cmd_t cmd_if, IN IB_wqpn_t qpn, + OUT THH_qpee_context_t *qp_context); +THH_cmd_status_t THH_cmd_QUERY_EE(IN THH_cmd_t cmd_if, IN IB_eecn_t eecn, + OUT THH_qpee_context_t *ee_context); +#if defined(MT_SUSPEND_QP) +THH_cmd_status_t THH_cmd_SUSPEND_QP(THH_cmd_t cmd_if, u_int32_t qpn, MT_bool suspend_flag); +#endif + +/* Special QP commands */ +THH_cmd_status_t THH_cmd_CONF_SPECIAL_QP(IN THH_cmd_t cmd_if, IN VAPI_special_qp_t qp_type, + IN IB_wqpn_t base_qpn); +THH_cmd_status_t THH_cmd_MAD_IFC(IN THH_cmd_t cmd_if, + IN MT_bool mkey_validate, + IN IB_lid_t slid, /* SLID is ignored if mkey_validate is false */ + IN IB_port_t port, + IN void *mad_in, + OUT void *mad_out); + +/* SRQ commands */ + +THH_cmd_status_t THH_cmd_SW2HW_SRQ(IN THH_cmd_t cmd_if, + IN u_int32_t srqn, /* SRQ number/index */ + IN THH_srq_context_t *srqc_p);/* SRQ context */ + +THH_cmd_status_t THH_cmd_HW2SW_SRQ(IN THH_cmd_t cmd_if, + IN u_int32_t srqn, /* SRQ number/index */ + OUT THH_srq_context_t *srqc_p);/* SRQ context (NULL for no output)*/ + +THH_cmd_status_t THH_cmd_QUERY_SRQ(IN THH_cmd_t cmd_if, + IN u_int32_t srqn, /* SRQ number/index */ + OUT THH_srq_context_t *srqc_p);/* SRQ context */ + +/* Multicast groups commands */ +THH_cmd_status_t THH_cmd_READ_MGM(IN THH_cmd_t cmd_if, IN u_int32_t mcg_index, + IN MT_size_t max_qp_per_mcg, OUT THH_mcg_entry_t *mcg_entry); +THH_cmd_status_t THH_cmd_WRITE_MGM(IN THH_cmd_t cmd_if, IN u_int32_t mcg_index, + IN MT_size_t max_qp_per_mcg, IN THH_mcg_entry_t *mcg_entry); +THH_cmd_status_t THH_cmd_MGID_HASH(IN THH_cmd_t cmd_if, IN IB_gid_t mgid, OUT THH_mcg_hash_t *hash_val); + +/* fatal error notification */ +THH_cmd_status_t THH_cmd_notify_fatal(IN THH_cmd_t cmd_if, IN THH_fatal_err_t fatal_err); +THH_cmd_status_t THH_cmd_handle_fatal(IN THH_cmd_t cmd_if); + +const char *str_THH_cmd_status_t(THH_cmd_status_t status); +const char *cmd_str(tavor_if_cmd_t opcode); + +#endif /* __cmdif_h */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif_priv.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif_priv.h new file mode 100644 index 00000000..6e1b7cc3 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmdif_priv.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __cmdif_priv_h +#define __cmdif_priv_h + +#include +#include +#include +#include +#include +#include + +#define PSEUDO_MT_BYTE_SIZE(x) (sizeof(struct x) >> 3) + + +typedef struct { + u_int32_t in_param[2]; /* Input Parameter: 64 bit parameter or 64 bit pointer to input mailbox (see command description) */ + u_int32_t input_modifier;/* Input Parameter Modifier */ + u_int32_t out_param[2]; /* Output Parameter: 64 bit parameter or 64 bit pointer to output mailbox (see command description) */ + u_int16_t token; /* Software assigned token to the command, to uniquely identify it. The token is returned to the software in the EQE reported. */ + u_int16_t opcode; /* Command opcode */ + u_int8_t opcode_modifier;/* Opcode Modifier, see specific description for each command. */ + u_int8_t e; /* Event Request\;0 - Don't report event (software will poll the G bit)\;1 - Report event to EQ when the command completes */ + u_int8_t go; /* Go (0=Software ownership for the HCR, 1=Hardware ownership for the HCR)\;Software can write to the HCR only if Go bit is cleared.\;Software must set the Go bit to trigger the HW to execute the command. Software must not write to this register value other than 1 for the Go bit. */ + u_int8_t status; /* Command execution status report. Valid only if command interface in under SW ownership (Go bit is cleared)\;0 - command completed without error. If different than zero, command execution completed with error. Syndrom encoding is depended on command executed and is defined for each command */ +} +priv_hcr_t; + +typedef struct { + MOSAL_syncobj_t *syncobj_p; /* pointer to synchronization object */ + u_int16_t token_val; /* the value of the token */ + priv_hcr_t hcr; + MT_bool in_use; /* free if 0, otherwise used */ + MT_bool signalled; +} +wait_context_t; + + +typedef enum { + TRANS_NA, + TRANS_IMMEDIATE, + TRANS_MAILBOX +} +trans_type_t; + +typedef struct { + u_int8_t *in_param; /* holds the virtually contigious buffer of the parameter block passed */ + u_int8_t *in_param_va; /* used internaly to hold the address of the allocated physical contigious buffer + - no need to initialize by the wrapper */ + u_int32_t in_param_size; + trans_type_t in_trans; + + u_int32_t input_modifier; + + u_int8_t *out_param; /* holds the virtually contigious buffer of the parameter block passed */ + u_int8_t *out_param_va; /* used internaly to hold the address of the allocated physical contigious buffer + - no need to initialize by the wrapper */ + u_int32_t out_param_size; + trans_type_t out_trans; + + tavor_if_cmd_t opcode; + u_int8_t opcode_modifier; + + u_int32_t exec_time_micro; +} +command_fields_t; + + +typedef struct { + MT_virt_addr_t wide_pool; /* pointer to base address of the pool, which is not aligned */ + MT_virt_addr_t pool; /* pointer to start address of the pool - this address is aligned */ + MT_phys_addr_t pool_pa; /* physical address of the pool */ + u_int32_t num_bufs; /* number of buffers in the pool */ + MT_size_t buf_size; /* size of a buffer in the pool */ + void **buf_ptrs; /* array holding pointers to the buffers in the pool */ +} +pool_control_t; + +#define TOKEN_VALUES_BASE (1+0) /* the '1' must be there to ensure the value of UNALLOCATED_TOKEN is valid */ +#define UNALLOCATED_TOKEN 0 +#define MAX_IN_PRM_SIZE 0x400 /* is this the right value ?? */ +#define MAX_OUT_PRM_SIZE 0x400 /* is this the right value ?? */ +#define LOOP_DELAY_TRESHOLD 10000 /* beyond 10 msec we make the deay in loop */ + +/*================ type definitions ================================================*/ + +typedef struct { + MT_virt_addr_t prm_base_va; /* base virtual address of params buffer */ + MT_phys_addr_t prm_base_pa; /* base physical address of params buffer */ + MT_size_t prm_buf_sz; /* size of allocated buffer used for params */ + MT_virt_addr_t prm_alloc_va; /* pointer to allocated buffer. useful when params + are in main memory and due to alignment requirements + prm_base_va may be higher then the allocated buffer + pointer */ + MT_bool in_ddr; /* TRUE if prms are in ddr */ + int allocated; /* set to 1 when the object has been allocated to aid in cleanup */ +} +prms_buf_t; + +#define HCR_SIZE 8 +/* type which contains all the resources used to execute a command */ +typedef struct { + u_int16_t token; /* the value used in the token field */ + MOSAL_syncobj_t syncobj; /* pointer to synchronization object */ + prms_buf_t in_prm; + prms_buf_t out_prm; + u_int32_t ref_cnt; /* 0=entry not in use >0 in use */ + priv_hcr_t hcr; /* used to pass command results from event handler to the process */ + u_int32_t next_free_idx; /* indes of the next free element in the array */ + u_int32_t entry_idx; /* index of this entry in the array */ + u_int32_t hcr_buf[HCR_SIZE]; /* this buffer contains the image to be written to uar0 */ +} +cmd_ctx_t; + + +typedef struct { + cmd_ctx_t *ctx_arr; /* pointer to an array of command contexts */ + u_int32_t num; /* number of aloocated contexts */ + MOSAL_spinlock_t spl; /* spinlock to protect the data */ + u_int32_t free_list_head; +} +ctx_obj_t; + + +struct cmd_if_context_st { + THH_hob_t hob; + void *hcr_virt_base; /* virtual address base of the HCR */ + void *uar0_virt_base; /* virtual address base of the HCR */ + MT_bool sys_enabled; /* true if THH_CMD_SYS_EN has been executed successfully */ + volatile THH_eqn_t eqn; /* eqn used by this interface */ + MOSAL_spinlock_t eqn_spl; + MOSAL_mutex_t sys_en_mtx; /* mutex used during execution of SYS_EN */ + MOSAL_semaphore_t no_events_sem; /* semaphore used during execution when EQN is not yet set */ + MOSAL_mutex_t hcr_mtx; /* mutex used to protect the hcr */ + MOSAL_semaphore_t use_events_sem; /* semaphore used while executing commands when EQN is set */ + MOSAL_semaphore_t fw_outs_sem; + + u_int32_t max_outstanding; /* max number of outstanding commands possible */ + u_int32_t queried_max_outstanding; /* max number of outstanding commands supported by FW */ + u_int32_t req_num_cmds_outs; /* requested number of outstanding commands */ + + THH_ddrmm_t ddrmm; /* handle to ddr memory manager */ + pool_control_t in_prm_pool; /* used for managing memory for in params */ + MT_phys_addr_t phys_mem_addr; /* physical address allocated */ + MT_offset_t phys_mem_size; /* size of allocated physical memory */ + + pool_control_t out_prm_pool; /* used for managing memory for out params */ + + MT_phys_addr_t ddr_prms_buf; /* physical address in ddr to be used with input mailboxes */ + + u_int8_t tokens_shift; + u_int16_t tokens_idx_mask; + u_int16_t tokens_counter; + + MT_bool inf_timeout; /* when TRUE cmdif will wait inifinitely for the completion of a command */ + + MT_bool in_at_ddr, out_at_ddr; /* where are input and output params located */ + + unsigned int sw_num_rsc; /* number pf software resources */ + + ctx_obj_t ctx_obj; /* object that contains resources needed for executing commands */ + MT_bool query_fw_done; /* set to TRUE after the first time calling query FW and allocating resources */ + THH_fw_props_t fw_props; /* valid when query_fw_done is TRUE (after QUERY_FW) */ + u_int64_t counts_busy_wait_for_go; /* number of cpu clocks to busy wait for the go bit */ + u_int64_t short_wait_for_go; /* short busy wait for go */ + u_int64_t counts_sleep_wait_for_go; /* number of cpu clocks to wait for the go bit by suspending */ + +#ifdef GO_MT_BIT_TIME_DEBUG + u_int32_t go_wait_counts[sizeof(count_levels)/sizeof(u_int32_t)+1]; +#endif + volatile MT_bool have_fatal; /* set if a fatal error has occurred */ + MOSAL_spinlock_t close_spl; + MT_bool close_action; + MOSAL_syncobj_t fatal_list; /* list of processes waiting untill HCA is closed */ + + MT_bool post_to_uar0; /* when true cmds with events are posted like doorbels */ + + MOSAL_spinlock_t ctr_spl; + unsigned long events_in_pipe; + unsigned long poll_in_pipe; + + u_int16_t *track_arr; +}; + +typedef struct { + u_int32_t addr_h; + u_int32_t addr_l; +} +addr_64bit_t; + +#define PRM_ALIGN_SHIFT 4 /* alignment required in params buffers */ + + + +THH_cmd_status_t cmd_invoke(THH_cmd_t cmd_if, command_fields_t *cmd_prms); +THH_cmd_status_t allocate_prm_resources(THH_cmd_t cmd_if, THH_fw_props_t *fw_props, MT_bool ddr); +MT_bool valid_handle(THH_cmd_t cmd_if); +void va_pa_mapping_helper(THH_cmd_t cmd_if, MT_virt_addr_t va, MT_phys_addr_t pa); + + +/*================ external definitions ============================================*/ +#ifdef DEBUG_MEM_OV +extern MT_phys_addr_t cmdif_dbg_ddr; /* address in ddr used for out params in debug mode */ +#endif + +#endif /* __cmdif_priv_h */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmds_wrap.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmds_wrap.c new file mode 100644 index 00000000..2e2f0754 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/cmdif/cmds_wrap.c @@ -0,0 +1,3088 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + #include + #include + #include + #include + +/* T.D.(matan): change the method of reporting SQ_DRAINING state to a more cosisntent one.*/ +#define THH_CMD_QP_DRAINING_FLAG 0x80 + +/* For the READ_MTT and WRITE_MTT, the mailbox contains a 16-byte preamble */ +#define MTT_CMD_PA_PREAMBLE_SIZE (16) + +#define EX_FLD(dst, a, st, fld) dst->fld = MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, fld), MT_BIT_SIZE(st, fld)) +#define INS_FLD(src, a, st, fld) MT_INSERT_ARRAY32(a, src->fld, MT_BIT_OFFSET(st, fld), MT_BIT_SIZE(st, fld)) + +#define EX_BOOL_FLD(dst, a, st, fld) dst->fld = ((MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, fld), MT_BIT_SIZE(st, fld))==0) ? FALSE : TRUE) +#define INS_BOOL_FLD(src, a, st, fld) MT_INSERT_ARRAY32(a, ((src->fld==FALSE)?0:1), MT_BIT_OFFSET(st, fld), MT_BIT_SIZE(st, fld)) + +#define QP_EX_FLD(dst, a, st, fld) dst->fld = MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, qpc_eec_data.fld), MT_BIT_SIZE(st, qpc_eec_data.fld)) +#define QP_INS_FLD(src, a, st, fld) MT_INSERT_ARRAY32(a, src->fld, MT_BIT_OFFSET(st, qpc_eec_data.fld), MT_BIT_SIZE(st, qpc_eec_data.fld)) + +#define QP_EX_BOOL_FLD(dst, a, st, fld) dst->fld = ((MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, qpc_eec_data.fld), MT_BIT_SIZE(st, qpc_eec_data.fld))==0) ? FALSE : TRUE) +#define QP_INS_BOOL_FLD(src, a, st, fld) MT_INSERT_ARRAY32(a, ((src->fld==FALSE)?0:1), MT_BIT_OFFSET(st, qpc_eec_data.fld), MT_BIT_SIZE(st, qpc_eec_data.fld)) + +#define EX_FLD64(dst, a, st, fld) dst->fld = (MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, fld##_l), MT_BIT_SIZE(st, fld##_l)) | \ + (u_int64_t)(MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, fld##_h), MT_BIT_SIZE(st, fld##_h))) << 32) + +#define INS_FLD64(src, a, st, fld) MT_INSERT_ARRAY32(a, (u_int32_t)src->fld, MT_BIT_OFFSET(st, fld##_l), MT_BIT_SIZE(st, fld##_l)) ; \ + MT_INSERT_ARRAY32(a, (u_int32_t)((src->fld) >> 32), MT_BIT_OFFSET(st, fld##_h), MT_BIT_SIZE(st, fld##_h)) + +#define INS_FLD64_SH(src, a, st, fld) MT_INSERT_ARRAY32(a, (u_int32_t)((src->fld) >> (MT_BIT_OFFSET(st, fld##_l) & MASK32(5))), MT_BIT_OFFSET(st, fld##_l), MT_BIT_SIZE(st, fld##_l)) ; \ + MT_INSERT_ARRAY32(a, (u_int32_t)((src->fld) >> 32), MT_BIT_OFFSET(st, fld##_h), MT_BIT_SIZE(st, fld##_h)) + +#define EX_FLD64_SH(dst, a, st, fld) dst->fld = ((MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, fld##_l), MT_BIT_SIZE(st, fld##_l))) << ((MT_BIT_OFFSET(st, fld##_l)& MASK32(5))) | \ + (u_int64_t)(MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, fld##_h), MT_BIT_SIZE(st, fld##_h))) << 32) + +#define QP_EX_FLD64(dst, a, st, fld) dst->fld = (MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, qpc_eec_data.fld##_1), MT_BIT_SIZE(st, qpc_eec_data.fld##_1)) | \ + (u_int64_t)(MT_EXTRACT_ARRAY32(a, MT_BIT_OFFSET(st, qpc_eec_data.fld##_0), MT_BIT_SIZE(st, qpc_eec_data.fld##_0))) << 32) + +#define QP_INS_FLD64(src, a, st, fld) MT_INSERT_ARRAY32(a, (u_int32_t)src->fld, MT_BIT_OFFSET(st, qpc_eec_data.fld##_1), MT_BIT_SIZE(st, qpc_eec_data.fld##_1)) ; \ + MT_INSERT_ARRAY32(a, (u_int32_t)((src->fld) >> 32), MT_BIT_OFFSET(st, qpc_eec_data.fld##_0), MT_BIT_SIZE(st, qpc_eec_data.fld##_0)) + +#define THH_CMDS_WRAP_DEBUG_LEVEL 4 +#define CMDS_DBG MTL_DEBUG4 + +#if defined(MAX_DEBUG) && THH_CMDS_WRAP_DEBUG_LEVEL <= MAX_DEBUG +#define THH_CMD_PRINT_DEV_LIMS(a) THH_cmd_print_dev_lims(a) +#define THH_CMD_PRINT_HCA_PROPS(a) THH_cmd_print_hca_props(a) +#define THH_CMD_PRINT_INIT_IB(a, b) THH_cmd_print_init_ib(a, b) +#define THH_CMD_PRINT_QUERY_ADAPTER(a) THH_cmd_print_query_adapter(a) +#define THH_CMD_PRINT_QUERY_DDR(a) THH_cmd_print_query_ddr(a) +#define THH_CMD_PRINT_QUERY_FW(a) THH_cmd_print_query_fw(a) +#define THH_CMD_PRINT_CQ_CONTEXT(a) THH_cmd_print_cq_context(a) +#define THH_CMD_PRINT_QP_CONTEXT(a) THH_cmd_print_qp_context(a) +#define THH_CMD_PRINT_EQ_CONTEXT(a) THH_cmd_print_eq_context(a) +#define THH_CMD_PRINT_MPT_ENTRY(a) THH_cmd_print_mpt_entry(a) +#define THH_CMD_PRINT_MTT_ENTRIES(a,b) THH_cmd_print_mtt_entries(a,b) +#define THH_CMD_PRINT_MGM_ENTRY(a) THH_cmd_print_mgm_entry(a) +#define THH_CMD_MAILBOX_PRINT(a,b,c) THH_cmd_mailbox_print(a,b,c) +#else +#define THH_CMD_PRINT_DEV_LIMS(a) +#define THH_CMD_PRINT_HCA_PROPS(a) +#define THH_CMD_PRINT_INIT_IB(a, b) +#define THH_CMD_PRINT_QUERY_ADAPTER(a) +#define THH_CMD_PRINT_QUERY_DDR(a) +#define THH_CMD_PRINT_QUERY_FW(a) +#define THH_CMD_PRINT_CQ_CONTEXT(a) +#define THH_CMD_PRINT_QP_CONTEXT(a) +#define THH_CMD_PRINT_EQ_CONTEXT(a) +#define THH_CMD_PRINT_MPT_ENTRY(a) +#define THH_CMD_PRINT_MTT_ENTRIES(a,b) +#define THH_CMD_MAILBOX_PRINT(a,b,c) +#define THH_CMD_PRINT_MGM_ENTRY(a) +#endif + +/***************************************************** */ +/************** CMD INTERFACE UTILITIES ************* */ +/***************************************************** */ +static MT_bool THH_tavor_qpstate_2_vapi_qpstate(tavor_if_qp_state_t tavor_qp_state, + VAPI_qp_state_t * vapi_qp_state) +{ + switch(tavor_qp_state) { + case TAVOR_IF_QP_STATE_RESET: + *vapi_qp_state = VAPI_RESET; + break; + + case TAVOR_IF_QP_STATE_INIT: + *vapi_qp_state = VAPI_INIT; + break; + + case TAVOR_IF_QP_STATE_RTR: + *vapi_qp_state = VAPI_RTR; + break; + + case TAVOR_IF_QP_STATE_RTS: + *vapi_qp_state = VAPI_RTS; + break; + + case TAVOR_IF_QP_STATE_SQER: + *vapi_qp_state = VAPI_SQE; + break; + + /* T.D.(matan): change this.*/ + case TAVOR_IF_QP_STATE_DRAINING: + *vapi_qp_state = (VAPI_qp_state_t)(VAPI_SQD | THH_CMD_QP_DRAINING_FLAG); + break; + case TAVOR_IF_QP_STATE_SQD: + *vapi_qp_state = VAPI_SQD; + break; + + case TAVOR_IF_QP_STATE_ERR: + *vapi_qp_state = VAPI_ERR; + break; + default: + return FALSE; + } + return TRUE; +} +static MT_bool THH_vapi_qpstate_2_tavor_qpstate(VAPI_qp_state_t vapi_qp_state, + tavor_if_qp_state_t *tavor_qp_state) +{ + switch(vapi_qp_state) { + case VAPI_RESET: + *tavor_qp_state = TAVOR_IF_QP_STATE_RESET; + break; + + case VAPI_INIT: + *tavor_qp_state = TAVOR_IF_QP_STATE_INIT; + break; + + case VAPI_RTR: + *tavor_qp_state = TAVOR_IF_QP_STATE_RTR; + break; + + case VAPI_RTS: + *tavor_qp_state = TAVOR_IF_QP_STATE_RTS; + break; + + case VAPI_SQE: + *tavor_qp_state = TAVOR_IF_QP_STATE_SQER; + break; + + case VAPI_SQD: + *tavor_qp_state = TAVOR_IF_QP_STATE_SQD; + break; + + case VAPI_ERR: + *tavor_qp_state = TAVOR_IF_QP_STATE_ERR; + break; + default: + return FALSE; + } + return TRUE; +} + + +/***************************************************** */ +/********** CMD INTERFACE PRINT UTILITIES **************/ +/***************************************************** */ + +//-------------------------------------------------------------------------------------------------- +#if defined(MAX_DEBUG) && THH_CMDS_WRAP_DEBUG_LEVEL <= MAX_DEBUG + +static void THH_cmd_mailbox_print( void *mailbox, int size, const char * func ) +{ + int i, j, maxlines, bytes_left, this_line; + char linebuf[200], tempout[20]; + u_int8_t *iterator; + + if (mailbox == NULL) return; + + iterator = (u_int8_t *)mailbox; + bytes_left = size; + return; + MTL_DEBUG4("MailboxPrint from function %s, starting at addr 0x%p, size=%d:\n", + func, mailbox, size); + + if (size <= 0) { + return; + } + + maxlines = (size / 16) + ((size % 16) ? 1 : 0); + + for (i = 0; i < maxlines; i++) { + memset(linebuf, 0, sizeof(linebuf)); + this_line = (bytes_left > 16 ? 16 : bytes_left); + + for (j = 0; j < this_line; j++) { + if ((j % 4) == 0) { + strcat(linebuf," "); + } + sprintf(tempout, "%02x", *iterator); + iterator++; bytes_left--; + strcat(linebuf,tempout); + } + MTL_DEBUG4("%s\n", linebuf); + } + MTL_DEBUG3("MailboxPrint END\n"); +} + + +/* + * print_mtt_entries + */ +static void THH_cmd_print_mtt_entries(u_int32_t elts_this_loop, void *inprm) +{ + u_int32_t *p=(u_int32_t *)((u_int8_t *)inprm+MTT_CMD_PA_PREAMBLE_SIZE), i, + *pp = (u_int32_t *)inprm; + u_int64_t tag; + int present; + return; + MTL_DEBUG1("mtt_pa = "U64_FMT"\n", (((u_int64_t)(*pp))<<32)+*(pp+1)); + for ( i=0; ihca_core_clock); + + CMDS_DBG( "he (host endian) = %s\n", hca_props->he ? "Big Endian" : "Little Endian"); + + CMDS_DBG( "re (Router Mode Enable) = %s\n", hca_props->re ? "TRUE" : "FALSE"); + + CMDS_DBG( "router_qp = %s\n", hca_props->router_qp ? "TRUE" : "FALSE"); + + CMDS_DBG( "ud (UD address vector protection) = %s\n", hca_props->ud ? "TRUE" : "FALSE"); + + CMDS_DBG( "udp (UDP port check enabled) = %s\n", hca_props->udp ? "TRUE" : "FALSE"); + + /* multicast parameters */ + CMDS_DBG( "\nmulticast_parameters.log_mc_table_entry_sz = %d\n", hca_props->multicast_parameters.log_mc_table_entry_sz); + CMDS_DBG( "multicast_parameters.log_mc_table_sz = %d\n", hca_props->multicast_parameters.log_mc_table_sz); + CMDS_DBG( "multicast_parameters.mc_base_addr = "U64_FMT"\n", hca_props->multicast_parameters.mc_base_addr); + CMDS_DBG( "multicast_parameters.mc_hash_fn = %d\n", hca_props->multicast_parameters.mc_hash_fn); + CMDS_DBG( "multicast_parameters.mc_table_hash_sz = %d\n", hca_props->multicast_parameters.mc_table_hash_sz); + + /* QP,EEC, EQC, RDB, CQC parameters */ + CMDS_DBG( "\nqpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr = "U64_FMT"\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr = "U64_FMT"\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr = "U64_FMT"\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr = "U64_FMT"\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr = "U64_FMT"\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr = "U64_FMT"\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr = "U64_FMT"\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.log_num_eq = %d\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.log_num_eq); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq = %d\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee = %d\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee); + CMDS_DBG( "qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp = %d\n", hca_props->qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp); + + /* TPT parameters */ + CMDS_DBG( "\ntpt_parameters.mpt_base_adr = "U64_FMT"\n", hca_props->tpt_parameters.mpt_base_adr); + CMDS_DBG( "tpt_parameters.mtt_base_adr = "U64_FMT"\n", hca_props->tpt_parameters.mtt_base_addr); + CMDS_DBG( "tpt_parameters.log_mpt_sz = %d\n", hca_props->tpt_parameters.log_mpt_sz); + CMDS_DBG( "tpt_parameters.mtt_segment_size = %d\n", hca_props->tpt_parameters.mtt_segment_size); + CMDS_DBG( "tpt_parameters.mtt_version = %d\n", hca_props->tpt_parameters.mtt_version); + CMDS_DBG( "tpt_parameters.pfto = %d\n", hca_props->tpt_parameters.pfto); + + /* UAR parameters */ + CMDS_DBG( "\nuar_parameters.uar_base_addr = "U64_FMT"\n", hca_props->uar_parameters.uar_base_addr); + CMDS_DBG( "uar_parameters.uar_scratch_base_addr = "U64_FMT"\n", hca_props->uar_parameters.uar_scratch_base_addr); + CMDS_DBG( "uar_parameters.uar_page_sz = %d\n", hca_props->uar_parameters.uar_page_sz); + + /* UDAV parameters */ + CMDS_DBG( "\nudavtable_memory_parameters.l_key = 0x%x\n", hca_props->udavtable_memory_parameters.l_key); + CMDS_DBG( "udavtable_memory_parameters.pd = %d\n", hca_props->udavtable_memory_parameters.pd); + CMDS_DBG( "udavtable_memory_parameters.xlation_en = %s\n", hca_props->udavtable_memory_parameters.xlation_en ? "TRUE" : "FALSE"); +} + +void THH_cmd_print_dev_lims(THH_dev_lim_t *dev_lim) +{ + CMDS_DBG("QUERY DEV LIMS DUMP (THH_dev_lim_t structure)\n"); + + CMDS_DBG( "dev_lim->apm (Automatic Path Migration) = %s\n", dev_lim->apm ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->atm (Atomic Operations) = %s\n", dev_lim->atm ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->avp (Address Vector port checking) = %s\n", dev_lim->avp ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->udm (UD Multicast support) = %s\n", dev_lim->udm ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->mw (Memory Windows) = %s\n", dev_lim->mw ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->pg (Paging on-demand) = %s\n", dev_lim->pg ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->pkv (PKey Violation Counter) = %s\n", dev_lim->pkv ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->pkv (PKey Violation Counter) = %s\n", dev_lim->pkv ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->qkv (QKey Violation Counter) = %s\n", dev_lim->qkv ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->r (Router Mode) = %s\n", dev_lim->r ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->raw_ether (Raw Ethernet mode) = %s\n", dev_lim->raw_ether ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->raw_ipv6 (Raw IpV6 mode) = %s\n", dev_lim->raw_ipv6 ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->rc (RC Transport) = %s\n", dev_lim->rc ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->rd (RD Transport) = %s\n", dev_lim->rd ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->uc (UC Transport) = %s\n", dev_lim->uc ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->ud (UD Transport) = %s\n", dev_lim->ud ? "TRUE" : "FALSE"); + CMDS_DBG( "dev_lim->rm (Raw Multicast) = %s\n", dev_lim->rm ? "TRUE" : "FALSE"); + /* multicast parameters */ + CMDS_DBG( "dev_lim->cqc_entry_sz = %d\n",dev_lim->cqc_entry_sz); + CMDS_DBG( "dev_lim->eec_entry_sz = %d\n",dev_lim->eec_entry_sz); + CMDS_DBG( "dev_lim->eeec_entry_sz = %d\n",dev_lim->eeec_entry_sz); + CMDS_DBG( "dev_lim->eqc_entry_sz = %d\n",dev_lim->eqc_entry_sz); + CMDS_DBG( "dev_lim->eqpc_entry_sz = %d\n",dev_lim->eqpc_entry_sz); + CMDS_DBG( "dev_lim->log_max_cq_sz = %d\n",dev_lim->log_max_cq_sz); + CMDS_DBG( "dev_lim->qpc_entry_sz = %d\n",dev_lim->qpc_entry_sz); + CMDS_DBG( "dev_lim->max_uar_sz = %d\n",dev_lim->uar_sz); + CMDS_DBG( "dev_lim->uar_scratch_entry_sz = %d\n",dev_lim->uar_scratch_entry_sz); + CMDS_DBG( "dev_lim->log_max_av = %d\n",dev_lim->log_max_av); + CMDS_DBG( "dev_lim->log_max_cq = %d\n",dev_lim->log_max_cq); + CMDS_DBG( "dev_lim->log_max_cq_sz = %d\n",dev_lim->log_max_cq_sz); + CMDS_DBG( "dev_lim->log_max_ee = %d\n",dev_lim->log_max_ee); + CMDS_DBG( "dev_lim->log_max_eq = %d\n",dev_lim->log_max_eq); + CMDS_DBG( "dev_lim->log_max_gid = %d\n",dev_lim->log_max_gid); + CMDS_DBG( "dev_lim->log_max_mcg = %d\n",dev_lim->log_max_mcg); + CMDS_DBG( "dev_lim->log_max_mpts = %d\n",dev_lim->log_max_mpts); + CMDS_DBG( "dev_lim->log_max_mtt_seg = %d\n",dev_lim->log_max_mtt_seg); + CMDS_DBG( "dev_lim->log_max_mrw_sz = %d\n",dev_lim->log_max_mrw_sz); + CMDS_DBG( "dev_lim->log_max_pd = %d\n",dev_lim->log_max_pd); + CMDS_DBG( "dev_lim->log_max_pkey = %d\n",dev_lim->log_max_pkey); + CMDS_DBG( "dev_lim->log_max_qp = %d\n",dev_lim->log_max_qp); + CMDS_DBG( "dev_lim->log_max_qp_mcg = %d\n",dev_lim->log_max_qp_mcg); + CMDS_DBG( "dev_lim->log_max_qp_sz = %d\n",dev_lim->log_max_qp_sz); + CMDS_DBG( "dev_lim->log_max_ra_req_qp = %d\n",dev_lim->log_max_ra_req_qp); + CMDS_DBG( "dev_lim->log_max_ra_res_qp = %d\n",dev_lim->log_max_ra_res_qp); + CMDS_DBG( "dev_lim->log_max_ra_res_global = %d\n",dev_lim->log_max_ra_res_global); + CMDS_DBG( "dev_lim->log_max_rdds = %d\n",dev_lim->log_max_rdds); + CMDS_DBG( "dev_lim->log_pg_sz = %d\n",dev_lim->log_pg_sz); + CMDS_DBG( "dev_lim->max_desc_sz = %d\n",dev_lim->max_desc_sz); + CMDS_DBG( "dev_lim->max_mtu = %d\n",dev_lim->max_mtu); + CMDS_DBG( "dev_lim->max_port_width = %d\n",dev_lim->max_port_width); + CMDS_DBG( "dev_lim->max_sg = %d\n",dev_lim->max_sg); + CMDS_DBG( "dev_lim->max_vl = %d\n",dev_lim->max_vl); + CMDS_DBG( "dev_lim->num_ports = %d\n",dev_lim->num_ports); + CMDS_DBG( "dev_lim->log2_rsvd_qps = %d\n",dev_lim->log2_rsvd_qps); + CMDS_DBG( "dev_lim->log2_rsvd_ees = %d\n",dev_lim->log2_rsvd_ees); + CMDS_DBG( "dev_lim->log2_rsvd_cqs = %d\n",dev_lim->log2_rsvd_cqs); + CMDS_DBG( "dev_lim->num_rsvd_eqs = %d\n",dev_lim->num_rsvd_eqs); + CMDS_DBG( "dev_lim->log2_rsvd_mrws = %d\n",dev_lim->log2_rsvd_mrws); + CMDS_DBG( "dev_lim->log2_rsvd_mtts = %d\n",dev_lim->log2_rsvd_mtts); + CMDS_DBG( "dev_lim->num_rsvd_uars = %d\n",dev_lim->num_rsvd_uars); + CMDS_DBG( "dev_lim->num_rsvd_rdds = %d\n",dev_lim->num_rsvd_rdds); + CMDS_DBG( "dev_lim->num_rsvd_pds = %d\n",dev_lim->num_rsvd_pds); + CMDS_DBG( "dev_lim->local_ca_ack_delay = %d\n",dev_lim->local_ca_ack_delay); +} + +void THH_cmd_print_query_fw(THH_fw_props_t *fw_props) +{ + CMDS_DBG("QUERY FW DUMP (THH_fw_props_t structure)\n"); + CMDS_DBG( "fw_props->cmd_interface_rev = 0x%x\n", fw_props->cmd_interface_rev); + CMDS_DBG( "fw_props->fw_rev_major = 0x%x\n", fw_props->fw_rev_major); + CMDS_DBG( "fw_props->fw_rev_minor = 0x%x\n", fw_props->fw_rev_minor); + CMDS_DBG( "fw_props->fw_rev_subminor = 0x%x\n", fw_props->fw_rev_subminor); + CMDS_DBG( "fw_props->fw_base_addr = "U64_FMT"\n", fw_props->fw_base_addr); + CMDS_DBG( "fw_props->fw_end_addr = "U64_FMT"\n", fw_props->fw_end_addr); + CMDS_DBG( "fw_props->error_buf_start = "U64_FMT"\n", fw_props->error_buf_start); + CMDS_DBG( "fw_props->error_buf_size = %d\n", fw_props->error_buf_size); +} + +void THH_cmd_print_query_adapter( THH_adapter_props_t *adapter_props) +{ + CMDS_DBG("QUERY ADAPTER DUMP (THH_adapter_props_t structure)\n"); + CMDS_DBG( "adapter_props->device_id = %d\n", adapter_props->device_id); + CMDS_DBG( "adapter_props->intapin = %d\n", adapter_props->intapin); + CMDS_DBG( "adapter_props->revision_id = %d\n", adapter_props->revision_id); + CMDS_DBG( "adapter_props->vendor_id = %d\n", adapter_props->vendor_id); +} + +void THH_cmd_print_query_ddr( THH_ddr_props_t *ddr_props) +{ + CMDS_DBG("QUERY DDR DUMP (THH_ddr_props_t structure)\n"); + CMDS_DBG( "ddr_props->ddr_start_adr = "U64_FMT"\n", ddr_props->ddr_start_adr); + CMDS_DBG( "ddr_props->ddr_end_adr = "U64_FMT"\n", ddr_props->ddr_end_adr); + CMDS_DBG( "\nddr_props->dimm0.di = %d\n", ddr_props->dimm0.di); + CMDS_DBG( "ddr_props->dimm0.dimmsize = %d\n", ddr_props->dimm0.dimmsize); + CMDS_DBG( "ddr_props->dimm0.dimmstatus = %d\n", ddr_props->dimm0.dimmstatus); + CMDS_DBG( "ddr_props->dimm0.vendor_id = "U64_FMT"\n", ddr_props->dimm0.vendor_id); + CMDS_DBG( "\nddr_props->dimm1.di = %d\n", ddr_props->dimm1.di); + CMDS_DBG( "ddr_props->dimm1.dimmsize = %d\n", ddr_props->dimm1.dimmsize); + CMDS_DBG( "ddr_props->dimm1.dimmstatus = %d\n", ddr_props->dimm1.dimmstatus); + CMDS_DBG( "ddr_props->dimm1.vendor_id = "U64_FMT"\n", ddr_props->dimm1.vendor_id); + CMDS_DBG( "\nddr_props->dimm2.di = %d\n", ddr_props->dimm2.di); + CMDS_DBG( "ddr_props->dimm2.dimmsize = %d\n", ddr_props->dimm2.dimmsize); + CMDS_DBG( "ddr_props->dimm2.dimmstatus = %d\n", ddr_props->dimm2.dimmstatus); + CMDS_DBG( "ddr_props->dimm2.vendor_id = "U64_FMT"\n", ddr_props->dimm2.vendor_id); + CMDS_DBG( "\nddr_props->dimm3.di = %d\n", ddr_props->dimm3.di); + CMDS_DBG( "ddr_props->dimm3.dimmsize = %d\n", ddr_props->dimm3.dimmsize); + CMDS_DBG( "ddr_props->dimm3.dimmstatus = %d\n", ddr_props->dimm3.dimmstatus); + CMDS_DBG( "ddr_props->dimm3.vendor_id = "U64_FMT"\n", ddr_props->dimm3.vendor_id); + CMDS_DBG( "ddr_props->dh = %s\n", (ddr_props->dh ? "TRUE" : "FALSE")); + CMDS_DBG( "ddr_props->ap = %d\n", ddr_props->ap); + CMDS_DBG( "ddr_props->di = %d\n", ddr_props->di); +} + +void THH_cmd_print_init_ib(IB_port_t port, THH_port_init_props_t *port_init_props) +{ + CMDS_DBG("INIT_IB DUMP (THH_port_init_props_t structure) for port %d\n", port); + CMDS_DBG( "port_init_props->max_gid = %d\n", port_init_props->max_gid); + CMDS_DBG( "port_init_props->max_pkey = %d\n", port_init_props->max_pkey); + CMDS_DBG( "port_init_props->mtu_cap = 0x%x\n", port_init_props->mtu_cap); + CMDS_DBG( "port_init_props->port_width_cap = 0x%x\n", port_init_props->port_width_cap); + CMDS_DBG( "port_init_props->vl_cap = 0x%x\n", port_init_props->vl_cap); + CMDS_DBG( "port_init_props->g0 = %s\n", port_init_props->g0 ? "TRUE" : "FALSE"); + CMDS_DBG( "port_init_props->guid0 = 0x%2x%2x%2x%2x%2x%2x%2x%2x", + port_init_props->guid0[0], port_init_props->guid0[1], port_init_props->guid0[2], + port_init_props->guid0[3], port_init_props->guid0[4], port_init_props->guid0[5], + port_init_props->guid0[6], port_init_props->guid0[7] ); +} + +void THH_cmd_print_cq_context(THH_cqc_t *cqc) +{ + return; + CMDS_DBG("CQ CONTEXT DUMP (THH_cqc_t structure)\n"); + CMDS_DBG( "cqc->st = 0x%x\n", cqc->st); + CMDS_DBG( "cqc->oi (overrun ignore) = %s\n", cqc->oi ? "TRUE" : "FALSE"); + CMDS_DBG( "cqc->tr (translation required) = %s\n", cqc->tr ? "TRUE" : "FALSE"); + CMDS_DBG( "cqc->status = 0x%x\n", cqc->status); + CMDS_DBG( "cqc->start_address = "U64_FMT"\n", cqc->start_address); + CMDS_DBG( "cqc->usr_page = 0x%X\n", cqc->usr_page); + CMDS_DBG( "cqc->log_cq_size = %d\n", cqc->log_cq_size); + CMDS_DBG( "cqc->c_eqn = 0x%X\n", cqc->c_eqn); + CMDS_DBG( "cqc->e_eqn = 0x%X\n", cqc->e_eqn); + CMDS_DBG( "cqc->pd = 0x%X\n", cqc->pd); + CMDS_DBG( "cqc->l_key = 0x%X\n", cqc->l_key); + CMDS_DBG( "cqc->last_notified_indx = 0x%X\n", cqc->last_notified_indx); + CMDS_DBG( "cqc->solicit_producer_indx = 0x%X\n", cqc->solicit_producer_indx); + CMDS_DBG( "cqc->consumer_indx = 0x%X\n", cqc->consumer_indx); + CMDS_DBG( "cqc->producer_indx = 0x%X\n", cqc->producer_indx); + CMDS_DBG( "cqc->cqn = 0x%X\n", cqc->cqn); +} +void THH_cmd_print_qp_context(THH_qpee_context_t *qpc) +{ + return; + CMDS_DBG("QPEE CONTEXT DUMP (THH_qpee_context_t structure)\n"); + CMDS_DBG( "QPC ver = 0x%x\n", qpc->ver); + // 'te' field has been removed: + //CMDS_DBG( "QPC Address Translation Enabled = %s\n", (qpc->te ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC Descriptor Event Enabled = %s\n", (qpc->de ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC Path Migration State = 0x%x\n", qpc->pm_state); + CMDS_DBG( "QPC Service Type = 0x%x\n", qpc->st); + CMDS_DBG( "QPC VAPI-encoded State = %d\n", qpc->state); + CMDS_DBG( "QPC Sched Queue = %d\n", qpc->sched_queue); + CMDS_DBG( "QPC msg_max = %d\n", qpc->msg_max); + CMDS_DBG( "QPC MTU (encoded) = %d\n", qpc->mtu); + CMDS_DBG( "QPC usr_page = 0x%x\n", qpc->usr_page); + CMDS_DBG( "QPC local_qpn_een = 0x%x\n", qpc->local_qpn_een); + CMDS_DBG( "QPC remote_qpn_een = 0x%x\n", qpc->remote_qpn_een); + CMDS_DBG( "QPC pd = 0x%x\n", qpc->pd); + CMDS_DBG( "QPC wqe_base_adr = 0x%x\n", qpc->wqe_base_adr); + CMDS_DBG( "QPC wqe_lkey = 0x%x\n", qpc->wqe_lkey); + CMDS_DBG( "QPC ssc = %s\n", (qpc->ssc ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC sic = %s\n", (qpc->sic ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC sae = %s\n", (qpc->sae ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC swe = %s\n", (qpc->swe ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC sre = %s\n", (qpc->sre ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC retry_count = 0x%x\n", qpc->retry_count); + CMDS_DBG( "QPC sra_max = 0x%x\n", qpc->sra_max); + CMDS_DBG( "QPC flight_lim = 0x%x\n", qpc->flight_lim); + CMDS_DBG( "QPC ack_req_freq = 0x%x\n", qpc->ack_req_freq); + CMDS_DBG( "QPC next_send_psn = 0x%x\n", qpc->next_send_psn); + CMDS_DBG( "QPC cqn_snd = 0x%x\n", qpc->cqn_snd); + CMDS_DBG( "QPC next_snd_wqe = "U64_FMT"\n", qpc->next_snd_wqe); + CMDS_DBG( "QPC rsc = %s\n", (qpc->rsc ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC ric = %s\n", (qpc->ric ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC rae = %s\n", (qpc->rae ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC rwe = %s\n", (qpc->rwe ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC rre = %s\n", (qpc->rre ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC rra_max = 0x%x\n", qpc->rra_max); + CMDS_DBG( "QPC next_rcv_psn = 0x%x\n", qpc->next_rcv_psn); + CMDS_DBG( "QPC min_rnr_nak = 0x%x\n", qpc->min_rnr_nak); + CMDS_DBG( "QPC ra_buff_indx = 0x%x\n", qpc->ra_buff_indx); + CMDS_DBG( "QPC cqn_rcv = 0x%x\n", qpc->cqn_rcv); + CMDS_DBG( "QPC next_rcv_wqe = "U64_FMT"\n", qpc->next_rcv_wqe); + CMDS_DBG( "QPC q_key = 0x%x\n", qpc->q_key); + CMDS_DBG( "QPC srqn = 0x%x\n", qpc->srqn); + CMDS_DBG( "QPC srq = %s\n", (qpc->srq ? "TRUE" : "FALSE")); + CMDS_DBG( "QPC primary.ack_timeout = %d\n" , qpc->primary_address_path.ack_timeout); + CMDS_DBG( "QPC primary.max_stat_rate = %d\n" , qpc->primary_address_path.max_stat_rate); +} +void THH_cmd_print_eq_context(THH_eqc_t *eqc) +{ + return; + CMDS_DBG("EQ CONTEXT DUMP (THH_eqc_t structure)\n"); + CMDS_DBG( "eqc->st = 0x%x\n", eqc->st); + CMDS_DBG( "eqc->oi (overrun ignore) = %s\n", eqc->oi ? "TRUE" : "FALSE"); + CMDS_DBG( "eqc->tr (translation required) = %s\n", eqc->tr ? "TRUE" : "FALSE"); + CMDS_DBG( "eqc->owner = %s\n", (eqc->owner == THH_OWNER_SW ? "THH_OWNER_SW" : "THH_OWNER_HW")); + CMDS_DBG( "eqc->status = 0x%x\n", eqc->status); + CMDS_DBG( "eqc->start_address = "U64_FMT"\n", eqc->start_address); + CMDS_DBG( "eqc->usr_page = 0x%x\n", eqc->usr_page); + CMDS_DBG( "eqc->log_eq_size = %d\n", eqc->log_eq_size); + CMDS_DBG( "eqc->intr = 0x%x\n", eqc->intr); + CMDS_DBG( "eqc->lost_count = 0x%x\n", eqc->lost_count); + CMDS_DBG( "eqc->l_key = 0x%X\n", eqc->lkey); + CMDS_DBG( "eqc->pd = 0x%X\n", eqc->pd); + CMDS_DBG( "eqc->consumer_indx = 0x%X\n", eqc->consumer_indx); + CMDS_DBG( "eqc->producer_indx = 0x%X\n", eqc->producer_indx); +} +void THH_cmd_print_mpt_entry(THH_mpt_entry_t *mpt) +{ + CMDS_DBG("MPT ENTRY DUMP (THH_mpt_entry_t structure)\n"); + CMDS_DBG( "MPT entry type = %s\n", (mpt->r_w ? "REGION" : "WINDOW")); + CMDS_DBG( "MPT physical addr flag = %s\n", (mpt->pa ? "TRUE" : "FALSE")); + CMDS_DBG( "MPT Local read access = %s\n", (mpt->lr ? "TRUE" : "FALSE")); + CMDS_DBG( "MPT Local write access = %s\n", (mpt->lw ? "TRUE" : "FALSE")); + CMDS_DBG( "MPT Remote read access = %s\n", (mpt->rr ? "TRUE" : "FALSE")); + CMDS_DBG( "MPT Remote write access = %s\n", (mpt->rw ? "TRUE" : "FALSE")); + CMDS_DBG( "MPT Atomic access = %s\n", (mpt->a ? "TRUE" : "FALSE")); + CMDS_DBG( "MPT Atomic access = %s\n", (mpt->a ? "TRUE" : "FALSE")); + CMDS_DBG( "MPT All writes posted = %s\n", (mpt->pw ? "TRUE" : "FALSE")); + CMDS_DBG( "MPT m_io = %s\n", (mpt->m_io ? "TRUE" : "FALSE")); + CMDS_DBG( "MPT Status = 0x%x\n", mpt->status); + CMDS_DBG( "MPT Page size = %d (Actual size is [4K]*2^Page_size)\n", mpt->page_size); + CMDS_DBG( "MPT mem key = 0x%x\n", mpt->mem_key); + CMDS_DBG( "MPT pd = 0x%x\n", mpt->pd); + CMDS_DBG( "MPT start_address = "U64_FMT"\n", mpt->start_address); + CMDS_DBG( "MPT length = "U64_FMT"\n", mpt->reg_wnd_len); + CMDS_DBG( "MPT lkey = 0x%x\n", mpt->lkey); + CMDS_DBG( "MPT win_cnt = 0x%x\n", mpt->win_cnt); + CMDS_DBG( "MPT win_cnt_limit = 0x%x\n", mpt->win_cnt_limit); + CMDS_DBG( "MPT MTT seg addr = "U64_FMT"\n", mpt->mtt_seg_adr); +} + +void THH_cmd_print_mgm_entry(THH_mcg_entry_t *mgm) +{ + IB_wqpn_t *qp_iterator; + u_int32_t i; + + CMDS_DBG("MGM ENTRY DUMP (THH_mcg_entry_t structure)\n"); + CMDS_DBG( "MGM next_gid_index = 0x%x\n", mgm->next_gid_index); + CMDS_DBG("MGM GID = %d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d \n", + mgm->mgid[0], mgm->mgid[1], mgm->mgid[2], mgm->mgid[3] + , mgm->mgid[4], mgm->mgid[5], mgm->mgid[6], mgm->mgid[7] + , mgm->mgid[8], mgm->mgid[9], mgm->mgid[10], mgm->mgid[10] + , mgm->mgid[12], mgm->mgid[13], mgm->mgid[14], mgm->mgid[15]); + CMDS_DBG( "MGM valid_qps = %d \n", mgm->valid_qps); + for (qp_iterator = mgm->qps, i = 0; i < mgm->valid_qps; i++, qp_iterator++) { + CMDS_DBG( "MGM qps[%d] = 0x%x\n", i, *qp_iterator); + } +} + +#else + +void THH_cmd_print_hca_props(THH_hca_props_t *hca_props) {} +void THH_cmd_print_dev_lims(THH_dev_lim_t *dev_lim) {} +void THH_cmd_print_query_fw(THH_fw_props_t *fw_props) {} +void THH_cmd_print_query_adapter( THH_adapter_props_t *adapter_props) {} +void THH_cmd_print_query_ddr( THH_ddr_props_t *ddr_props) {} +void THH_cmd_print_init_ib(IB_port_t port, THH_port_init_props_t *port_init_props) {} +void THH_cmd_print_cq_context(THH_cqc_t *cqc) {} +void THH_cmd_print_qp_context(THH_qpee_context_t *qpc) {} +void THH_cmd_print_eq_context(THH_eqc_t *eqc) {} +void THH_cmd_print_mpt_entry(THH_mpt_entry_t *mpt) {} +void THH_cmd_print_mgm_entry(THH_mcg_entry_t *mgm) {} + +#endif /* #if THH_CMDS_WRAP_DEBUG_LEVEL */ +/***************************************************** */ +/************* END of PRINT UTILITIES **************** */ +/***************************************************** */ + + +/* + * THH_cmd_QUERY_DEV_LIM + */ +THH_cmd_status_t THH_cmd_QUERY_DEV_LIM(THH_cmd_t cmd_if, THH_dev_lim_t *dev_lim) +{ + command_fields_t cmd_desc; + u_int8_t *outprm; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_query_dev_lim_st); + + FUNC_IN; + outprm = TNMALLOC(u_int8_t, buf_size); + if ( !outprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = 0; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_QUERY_DEV_LIM; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_QUERY_DEV_LIM; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + FREE(outprm); + MT_RETURN(rc); + } + + if ( dev_lim ) { + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_qp); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log2_rsvd_qps); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_qp_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_srqs); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log2_rsvd_srqs); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_srq_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_ee); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log2_rsvd_ees); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_cq); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log2_rsvd_cqs); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_cq_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_eq); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, num_rsvd_eqs); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_mpts); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_mtt_seg); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log2_rsvd_mrws); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_mrw_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log2_rsvd_mtts); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_av); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_ra_res_qp); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_ra_req_qp); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_ra_res_global); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, num_ports); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, max_vl); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, max_port_width); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, max_mtu); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, local_ca_ack_delay); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_gid); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_pkey); + + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, rc); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, uc); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, ud); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, rd); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, raw_ipv6); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, raw_ether); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, srq); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, pkv); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, qkv); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, mw); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, apm); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, atm); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, rm); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, avp); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, udm); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, pg); + EX_BOOL_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, r); + + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_pg_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, uar_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, num_rsvd_uars); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, max_desc_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, max_sg); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_mcg); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_qp_mcg); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_rdds); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, num_rsvd_rdds); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, log_max_pd); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, num_rsvd_pds); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, qpc_entry_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, eec_entry_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, eqpc_entry_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, eeec_entry_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, cqc_entry_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, eqc_entry_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, srq_entry_sz); + EX_FLD(dev_lim, outprm, tavorprm_query_dev_lim_st, uar_scratch_entry_sz); + THH_CMD_PRINT_DEV_LIMS(dev_lim); + } + + FREE(outprm); + MT_RETURN(rc); +} + +/* + * THH_cmd_QUERY_FW + */ +THH_cmd_status_t THH_cmd_QUERY_FW(THH_cmd_t cmd_if, THH_fw_props_t *fw_props) +{ + command_fields_t cmd_desc; + //u_int8_t outprm[PSEUDO_MT_BYTE_SIZE(tavorprm_query_fw_st)]; + u_int8_t *outprm; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_query_fw_st); + + FUNC_IN; + outprm = TNMALLOC(u_int8_t, buf_size); + if ( !outprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = 0; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_QUERY_FW; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_QUERY_FW; + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + FREE(outprm); + MT_RETURN(rc); + } + + EX_FLD(fw_props, outprm, tavorprm_query_fw_st, fw_rev_major); + EX_FLD(fw_props, outprm, tavorprm_query_fw_st, fw_rev_minor); + EX_FLD(fw_props, outprm, tavorprm_query_fw_st, fw_rev_subminor); + EX_FLD(fw_props, outprm, tavorprm_query_fw_st, cmd_interface_rev); + EX_FLD(fw_props, outprm, tavorprm_query_fw_st, log_max_outstanding_cmd); + EX_FLD64(fw_props, outprm, tavorprm_query_fw_st, fw_base_addr); + EX_FLD64(fw_props, outprm, tavorprm_query_fw_st, fw_end_addr); + EX_FLD64(fw_props, outprm, tavorprm_query_fw_st, error_buf_start); + EX_FLD(fw_props, outprm, tavorprm_query_fw_st, error_buf_size); + FREE(outprm); + + THH_cmd_set_fw_props(cmd_if, fw_props); + + THH_CMD_PRINT_QUERY_FW(fw_props); + + MT_RETURN(rc); +} + +/* + * THH_cmd_QUERY_DDR + */ +THH_cmd_status_t THH_cmd_QUERY_DDR(THH_cmd_t cmd_if, THH_ddr_props_t *ddr_props) +{ + command_fields_t cmd_desc; + u_int8_t *outprm; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_query_ddr_st); + + FUNC_IN; + outprm = TNMALLOC(u_int8_t, buf_size); + if ( !outprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = 0; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_QUERY_DDR; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_QUERY_DDR; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + FREE(outprm); + MT_RETURN(rc); + } + + if ( ddr_props ) { + EX_FLD64(ddr_props, outprm, tavorprm_query_ddr_st, ddr_start_adr); + EX_FLD64(ddr_props, outprm, tavorprm_query_ddr_st, ddr_end_adr); + + EX_BOOL_FLD(ddr_props, outprm, tavorprm_query_ddr_st, dh); + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, di); + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, ap); + + /* TBD handle dimm structs here */ + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, dimm0.dimmsize); + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, dimm0.dimmstatus); + EX_FLD64(ddr_props, outprm, tavorprm_query_ddr_st, dimm0.vendor_id); + + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, dimm1.dimmsize); + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, dimm1.dimmstatus); + EX_FLD64(ddr_props, outprm, tavorprm_query_ddr_st, dimm1.vendor_id); + + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, dimm2.dimmsize); + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, dimm2.dimmstatus); + EX_FLD64(ddr_props, outprm, tavorprm_query_ddr_st, dimm2.vendor_id); + + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, dimm3.dimmsize); + EX_FLD(ddr_props, outprm, tavorprm_query_ddr_st, dimm3.dimmstatus); + EX_FLD64(ddr_props, outprm, tavorprm_query_ddr_st, dimm3.vendor_id); + + THH_CMD_PRINT_QUERY_DDR(ddr_props); + } + FREE(outprm); + MT_RETURN(rc); +} + +/* + * THH_cmd_QUERY_ADAPTER + */ +THH_cmd_status_t THH_cmd_QUERY_ADAPTER(THH_cmd_t cmd_if, THH_adapter_props_t *adapter_props) +{ + command_fields_t cmd_desc; + u_int8_t *outprm; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_query_adapter_st); + + FUNC_IN; + outprm = TNMALLOC(u_int8_t, buf_size); + if (outprm == NULL) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = 0; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_QUERY_ADAPTER; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_QUERY_ADAPTER; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + FREE(outprm); + MT_RETURN(rc); + } + + if ( adapter_props ) { + EX_FLD(adapter_props, outprm, tavorprm_query_adapter_st, vendor_id); + EX_FLD(adapter_props, outprm, tavorprm_query_adapter_st, device_id); + EX_FLD(adapter_props, outprm, tavorprm_query_adapter_st, revision_id); + EX_FLD(adapter_props, outprm, tavorprm_query_adapter_st, intapin); + + THH_CMD_PRINT_QUERY_ADAPTER(adapter_props); + } + FREE(outprm); + MT_RETURN(rc); +} + +/* + * THH_cmd_INIT_HCA + */ + +THH_cmd_status_t THH_cmd_INIT_HCA(THH_cmd_t cmd_if, THH_hca_props_t *hca_props) +{ + command_fields_t cmd_desc; + u_int8_t *inprm; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_init_hca_st); + u_int64_t rdb_base_addr_save; + + FUNC_IN; + if (hca_props == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + inprm = TNMALLOC(u_int8_t, buf_size); + if ( !inprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + memset(inprm, 0, buf_size); + THH_CMD_PRINT_HCA_PROPS(hca_props); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = 0; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_INIT_HCA; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = hca_props->qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp > 18 ? + 2*(TAVOR_IF_CMD_ETIME_INIT_HCA) : TAVOR_IF_CMD_ETIME_INIT_HCA; + + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, router_qp); + INS_BOOL_FLD(hca_props, inprm, tavorprm_init_hca_st, re); + INS_BOOL_FLD(hca_props, inprm, tavorprm_init_hca_st, udp); + INS_BOOL_FLD(hca_props, inprm, tavorprm_init_hca_st, he); + INS_BOOL_FLD(hca_props, inprm, tavorprm_init_hca_st, ud); + + INS_FLD64_SH(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp); + INS_FLD64_SH(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq); + INS_FLD64_SH(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee); + INS_FLD64_SH(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq); + INS_FLD64(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr); + INS_FLD64(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr); + INS_FLD64_SH(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_eq); + + /* zero out low order 32 bits of rdb base addr for passing to Tavor */ + rdb_base_addr_save = hca_props->qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr; + hca_props->qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr &= MAKE_ULONGLONG(0xFFFFFFFF00000000); + INS_FLD64(hca_props, inprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr); + /*restore original RDB base address */ + hca_props->qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr =rdb_base_addr_save; + + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, udavtable_memory_parameters.l_key); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, udavtable_memory_parameters.pd); + INS_BOOL_FLD(hca_props, inprm, tavorprm_init_hca_st, udavtable_memory_parameters.xlation_en); + + INS_FLD64(hca_props, inprm, tavorprm_init_hca_st, multicast_parameters.mc_base_addr); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, multicast_parameters.log_mc_table_entry_sz); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, multicast_parameters.log_mc_table_sz); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, multicast_parameters.mc_table_hash_sz); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, multicast_parameters.mc_hash_fn); + + INS_FLD64(hca_props, inprm, tavorprm_init_hca_st, tpt_parameters.mpt_base_adr); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, tpt_parameters.log_mpt_sz); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, tpt_parameters.pfto); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, tpt_parameters.mtt_segment_size); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, tpt_parameters.mtt_version); + INS_FLD64(hca_props, inprm, tavorprm_init_hca_st, tpt_parameters.mtt_base_addr); + + INS_FLD64(hca_props, inprm, tavorprm_init_hca_st, uar_parameters.uar_base_addr); + INS_FLD(hca_props, inprm, tavorprm_init_hca_st, uar_parameters.uar_page_sz); + INS_FLD64(hca_props, inprm, tavorprm_init_hca_st, uar_parameters.uar_scratch_base_addr); + THH_CMD_MAILBOX_PRINT(inprm, buf_size, __func__); + + rc = cmd_invoke(cmd_if, &cmd_desc); + FREE(inprm); + MT_RETURN(rc); +} + + +/* + * THH_cmd_CLOSE_HCA + */ +#ifdef SIMULATE_HALT_HCA +THH_cmd_status_t THH_cmd_CLOSE_HCA(THH_cmd_t cmd_if) +#else +THH_cmd_status_t THH_cmd_CLOSE_HCA(THH_cmd_t cmd_if, MT_bool do_halt) +#endif +{ + command_fields_t cmd_desc; + THH_cmd_status_t rc; + + FUNC_IN; + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = 0; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_CLOSE_HCA; +#ifdef SIMULATE_HALT_HCA + cmd_desc.opcode_modifier = 0; +#else + cmd_desc.opcode_modifier = (do_halt == FALSE ? 0 : 1); +#endif + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_CLOSE_HCA; + + rc = cmd_invoke(cmd_if, &cmd_desc); + MT_RETURN(rc); +} + + +/* + * THH_cmd_INIT_IB + */ +THH_cmd_status_t THH_cmd_INIT_IB(THH_cmd_t cmd_if, IB_port_t port, + THH_port_init_props_t *port_init_props) +{ + command_fields_t cmd_desc; + u_int8_t *inprm; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_init_ib_st); + u_int32_t temp_u32; + + FUNC_IN; + if (port_init_props == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + + inprm = TNMALLOC(u_int8_t, buf_size); + if ( !inprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + memset(inprm, 0, buf_size); + THH_CMD_PRINT_INIT_IB(port, port_init_props); + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = port; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_INIT_IB; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_INIT_IB; + + INS_FLD(port_init_props, inprm, tavorprm_init_ib_st, vl_cap); + INS_FLD(port_init_props, inprm, tavorprm_init_ib_st, port_width_cap); + INS_FLD(port_init_props, inprm, tavorprm_init_ib_st, mtu_cap); + INS_FLD(port_init_props, inprm, tavorprm_init_ib_st, max_gid); + INS_FLD(port_init_props, inprm, tavorprm_init_ib_st, max_pkey); + + INS_BOOL_FLD(port_init_props, inprm, tavorprm_init_ib_st, g0); + + /* We get GUID0 in BIG_ENDIAN format. It needs to be split up into Host-endian format before passing to cmd_invoke */ + /* Note that need to memcpy each 4 bytes to a temporary u_int32_t variable, since there is no guarantee */ + /* that the GUID is 4-byte aligned (it is an array of unsigned chars) */ + memcpy(&temp_u32, &(port_init_props->guid0[0]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_init_ib_st, guid0_h), MT_BIT_SIZE(tavorprm_init_ib_st, guid0_h)); + + memcpy(&temp_u32, &(port_init_props->guid0[4]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_init_ib_st, guid0_l), MT_BIT_SIZE(tavorprm_init_ib_st, guid0_l)); + + rc = cmd_invoke(cmd_if, &cmd_desc); + FREE(inprm); + MT_RETURN(rc); +} + +/* + * THH_cmd_CLOSE_IB + */ +THH_cmd_status_t THH_cmd_SYS_DIS(THH_cmd_t cmd_if) +{ + command_fields_t cmd_desc; + THH_cmd_status_t rc; + + FUNC_IN; + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = 0; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_SYS_DIS; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_SYS_DIS; + + rc = cmd_invoke(cmd_if, &cmd_desc); + MT_RETURN(rc); +} + + + +/* + * THH_cmd_CLOSE_IB + */ +THH_cmd_status_t THH_cmd_CLOSE_IB(THH_cmd_t cmd_if, IB_port_t port) +{ + command_fields_t cmd_desc; + THH_cmd_status_t rc; + + FUNC_IN; + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = port; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_CLOSE_IB; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_CLOSE_IB; + + rc = cmd_invoke(cmd_if, &cmd_desc); + MT_RETURN(rc); +} + +/* + * THH_cmd_QUERY_HCA + */ + +THH_cmd_status_t THH_cmd_QUERY_HCA(THH_cmd_t cmd_if, THH_hca_props_t *hca_props) +{ + command_fields_t cmd_desc; + u_int8_t *outprm; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_init_hca_st); + + FUNC_IN; + outprm = TNMALLOC(u_int8_t, buf_size); + if ( !outprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = 0; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_QUERY_HCA; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_QUERY_HCA; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + FREE(outprm); + MT_RETURN(rc); + } + + if ( hca_props ) { + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, hca_core_clock); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, router_qp); + EX_BOOL_FLD(hca_props, outprm, tavorprm_init_hca_st, re); + EX_BOOL_FLD(hca_props, outprm, tavorprm_init_hca_st, udp); + EX_BOOL_FLD(hca_props, outprm, tavorprm_init_hca_st, he); + EX_BOOL_FLD(hca_props, outprm, tavorprm_init_hca_st, ud); + + EX_FLD64_SH(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp); + EX_FLD64_SH(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq); + EX_FLD64_SH(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee); + EX_FLD64_SH(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq); + EX_FLD64(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr); + EX_FLD64(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr); + EX_FLD64_SH(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.log_num_eq); + EX_FLD64(hca_props, outprm, tavorprm_init_hca_st, qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr); + + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, udavtable_memory_parameters.l_key); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, udavtable_memory_parameters.pd); + EX_BOOL_FLD(hca_props, outprm, tavorprm_init_hca_st, udavtable_memory_parameters.xlation_en); + + EX_FLD64(hca_props, outprm, tavorprm_init_hca_st, multicast_parameters.mc_base_addr); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, multicast_parameters.log_mc_table_entry_sz); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, multicast_parameters.log_mc_table_sz); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, multicast_parameters.mc_table_hash_sz); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, multicast_parameters.mc_hash_fn); + + EX_FLD64(hca_props, outprm, tavorprm_init_hca_st, tpt_parameters.mpt_base_adr); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, tpt_parameters.log_mpt_sz); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, tpt_parameters.pfto); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, tpt_parameters.mtt_segment_size); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, tpt_parameters.mtt_version); + EX_FLD64(hca_props, outprm, tavorprm_init_hca_st, tpt_parameters.mtt_base_addr); + + EX_FLD64(hca_props, outprm, tavorprm_init_hca_st, uar_parameters.uar_base_addr); + EX_FLD(hca_props, outprm, tavorprm_init_hca_st, uar_parameters.uar_page_sz); + EX_FLD64(hca_props, outprm, tavorprm_init_hca_st, uar_parameters.uar_scratch_base_addr); + THH_CMD_MAILBOX_PRINT(outprm, buf_size, __func__); + + THH_CMD_PRINT_HCA_PROPS(hca_props); + } + + FREE(outprm); + MT_RETURN(rc); +} + + +/* + * THH_cmd_SET_IB + */ +THH_cmd_status_t THH_cmd_SET_IB(THH_cmd_t cmd_if, IB_port_t port, + THH_set_ib_props_t *port_init_props) +{ + command_fields_t cmd_desc; + u_int8_t inprm[PSEUDO_MT_BYTE_SIZE(tavorprm_set_ib_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_set_ib_st); + + FUNC_IN; + if (port_init_props == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG);} + memset(inprm, 0, buf_size); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = port; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_SET_IB; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_SET_IB; + + INS_BOOL_FLD(port_init_props, inprm, tavorprm_set_ib_st, rqk); + INS_FLD(port_init_props, inprm, tavorprm_set_ib_st, capability_mask); + + rc = cmd_invoke(cmd_if, &cmd_desc); + MT_RETURN(rc); +} + +/* + * THH_cmd_SW2HW_MPT + */ +THH_cmd_status_t THH_cmd_SW2HW_MPT(THH_cmd_t cmd_if, THH_mpt_index_t mpt_index, + THH_mpt_entry_t *mpt_entry) +{ + command_fields_t cmd_desc; + u_int8_t inprm[PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st)]; + THH_cmd_status_t rc; + MT_size_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st); + + FUNC_IN; + MTL_DEBUG4("THH_cmd_SW2HW_MPT: mpt_index = "SIZE_T_FMT", buf_size = "SIZE_T_FMT"\n", mpt_index, buf_size); + if (mpt_entry == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + THH_CMD_PRINT_MPT_ENTRY(mpt_entry); + + memset(inprm, 0, buf_size); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = (u_int32_t)buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = (u_int32_t)mpt_index; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_SW2HW_MPT; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_SW2HW_MPT; + + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, ver); + + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, r_w); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, pa); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, lr); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, lw); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, rr); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, rw); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, a); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, eb); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, m_io); + + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, status); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, page_size); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, mem_key); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, pd); + INS_FLD64(mpt_entry, inprm, tavorprm_mpt_st, start_address); + INS_FLD64(mpt_entry, inprm, tavorprm_mpt_st, reg_wnd_len); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, lkey); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, win_cnt); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, win_cnt_limit); + INS_FLD64_SH(mpt_entry, inprm, tavorprm_mpt_st, mtt_seg_adr); + + THH_CMD_MAILBOX_PRINT(inprm, (int)buf_size, __func__); +#if 1 + rc = cmd_invoke(cmd_if, &cmd_desc); +#else + MTL_DEBUG4("THH_cmd_SW2HW_MPT: SKIPPING cmd_invoke !!!!!!!!!\n"); + rc = THH_CMD_STAT_OK; +#endif + MT_RETURN(rc); +} + +/* + * THH_cmd_SW2HW_MPT + */ +THH_cmd_status_t THH_cmd_MODIFY_MPT(THH_cmd_t cmd_if, THH_mpt_index_t mpt_index, + THH_mpt_entry_t *mpt_entry, MT_bool modify_entire_entry) +{ + command_fields_t cmd_desc; + u_int8_t inprm[PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st)]; + THH_cmd_status_t rc; + MT_size_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st); + + FUNC_IN; + MTL_DEBUG4("THH_cmd_MODIFY_MPT: mpt_index = "SIZE_T_FMT", (in_param)buf_size = "SIZE_T_FMT"\n", + mpt_index, buf_size); + MTL_DEBUG4("%s: mpt_entry->reg_wnd_len="U64_FMT" mpt_entry->mtt_seg_adr="U64_FMT, + __func__,mpt_entry->reg_wnd_len, mpt_entry->mtt_seg_adr); + if (mpt_entry == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + THH_CMD_PRINT_MPT_ENTRY(mpt_entry); + + memset(inprm, 0, buf_size); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = (u_int32_t)buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = (u_int32_t)mpt_index; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_MODIFY_MPT ; + cmd_desc.opcode_modifier = (modify_entire_entry == TRUE) ? 0x5 : 0x3; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_CLASS_B; + + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, ver); + + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, r_w); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, pa); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, lr); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, lw); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, rr); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, rw); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, a); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, eb); + INS_BOOL_FLD(mpt_entry, inprm, tavorprm_mpt_st, m_io); + + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, status); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, page_size); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, mem_key); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, pd); + INS_FLD64(mpt_entry, inprm, tavorprm_mpt_st, start_address); + INS_FLD64(mpt_entry, inprm, tavorprm_mpt_st, reg_wnd_len); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, lkey); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, win_cnt); + INS_FLD(mpt_entry, inprm, tavorprm_mpt_st, win_cnt_limit); + INS_FLD64_SH(mpt_entry, inprm, tavorprm_mpt_st, mtt_seg_adr); + + THH_CMD_MAILBOX_PRINT(inprm, (int)buf_size, __func__); +#if 1 + rc = cmd_invoke(cmd_if, &cmd_desc); +#else + MTL_DEBUG4("%s: SKIPPING cmd_invoke !!!!!!!!!\n", __func__); + rc = THH_CMD_STAT_OK; +#endif + MT_RETURN(rc); +} + +/* + * THH_cmd_QUERY_MPT + */ +THH_cmd_status_t THH_cmd_QUERY_MPT(THH_cmd_t cmd_if, THH_mpt_index_t mpt_index, + THH_mpt_entry_t *mpt_entry) +{ + command_fields_t cmd_desc; + u_int8_t outprm[PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st); + + FUNC_IN; + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = (u_int32_t)mpt_index; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_QUERY_MPT; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_QUERY_MPT; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + MT_RETURN(rc); + } + + if ( mpt_entry ) { + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, ver); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, r_w); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, pa); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, lr); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, lw); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, rr); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, rw); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, a); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, eb); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, m_io); + + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, status); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, page_size); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, mem_key); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, pd); + EX_FLD64(mpt_entry, outprm, tavorprm_mpt_st, start_address); + EX_FLD64(mpt_entry, outprm, tavorprm_mpt_st, reg_wnd_len); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, lkey); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, win_cnt); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, win_cnt_limit); + EX_FLD64_SH(mpt_entry, outprm, tavorprm_mpt_st, mtt_seg_adr); + } + + MT_RETURN(rc); +} + +/* + * THH_cmd_HW2SW_MPT + */ +THH_cmd_status_t THH_cmd_HW2SW_MPT(THH_cmd_t cmd_if, THH_mpt_index_t mpt_index, + THH_mpt_entry_t *mpt_entry) +{ + command_fields_t cmd_desc; + u_int8_t outprm[PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st); + + FUNC_IN; + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = (u_int32_t)mpt_index; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_HW2SW_MPT; + /* when output is not necessary putting 1 in opcode modifier + will case the command execute faster */ + cmd_desc.opcode_modifier = mpt_entry ? 0 : 1; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_HW2SW_MPT; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + MT_RETURN(rc); + } + + if ( mpt_entry ) { + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, ver); + + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, r_w); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, pa); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, lr); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, lw); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, rr); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, rw); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, a); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, eb); + EX_BOOL_FLD(mpt_entry, outprm, tavorprm_mpt_st, m_io); + + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, status); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, page_size); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, mem_key); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, pd); + EX_FLD64(mpt_entry, outprm, tavorprm_mpt_st, start_address); + EX_FLD64(mpt_entry, outprm, tavorprm_mpt_st, reg_wnd_len); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, lkey); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, win_cnt); + EX_FLD(mpt_entry, outprm, tavorprm_mpt_st, win_cnt_limit); + EX_FLD64_SH(mpt_entry, outprm, tavorprm_mpt_st, mtt_seg_adr); + } + + MT_RETURN(rc); +} + +/* + * THH_cmd_READ_MTT + */ +THH_cmd_status_t THH_cmd_READ_MTT(THH_cmd_t cmd_if, u_int64_t mtt_pa, MT_size_t num_elems, + THH_mtt_entry_t *mtt_entry) +{ + command_fields_t cmd_desc; + u_int8_t *outprm, *iterator; + int i, local_num_elts, elts_this_loop, max_elts_per_buffer; + u_int32_t buf_size; + u_int32_t mtt_pa_transfer[2]; + THH_cmd_status_t rc = THH_CMD_STAT_OK; + MT_bool buf_align_adjust = TRUE; + + /* TBD: need to: a. limit number of entries in the mailbox */ + /* b. for performance, if the initial addr is odd, */ + /* do a loop of a single element, then do the rest */ + + + FUNC_IN; + if (!num_elems) { + MT_RETURN(THH_CMD_STAT_BAD_PARAM); + } + + outprm = TNMALLOC(u_int8_t, MAX_OUT_PRM_SIZE); + if ( !outprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + memset(outprm, 0, MAX_OUT_PRM_SIZE); + + local_num_elts = (int)num_elems; + max_elts_per_buffer = (MAX_OUT_PRM_SIZE - MTT_CMD_PA_PREAMBLE_SIZE) / ( PSEUDO_MT_BYTE_SIZE(tavorprm_mtt_st)); + + while(local_num_elts > 0) { + + elts_this_loop = (local_num_elts > max_elts_per_buffer ? max_elts_per_buffer : local_num_elts); + + /* if the mtt_pa address is odd (3 LSBs ignored), and we need to use multiple commands */ + /* and we are also reading an odd number of elements, then decrease the elements in this loop */ + /* by one so that on the next go-around, the reading will start at an even mtt_pa address */ + /* If necessary, the adjustment needs to be performed only once */ + if ((buf_align_adjust) && (local_num_elts > max_elts_per_buffer) && ((mtt_pa>>3)& 0x1) && (!(elts_this_loop & 0x1))) { + elts_this_loop--; + buf_align_adjust = FALSE; + } + + buf_size = elts_this_loop*PSEUDO_MT_BYTE_SIZE(tavorprm_mtt_st); + memset(outprm, 0, buf_size); + + iterator = outprm; + + /* The command interface expects the mtt_pa format to be the HIGH order word first, */ + /* then the low-order word. The command interface adjusts endianness within words */ + + mtt_pa_transfer[0]= (u_int32_t)(mtt_pa >> 32); /* MS-DWORD */ + mtt_pa_transfer[1]= (u_int32_t)(mtt_pa & 0xFFFFFFFF); /* LS-DWORD */ + + cmd_desc.in_param = (u_int8_t *)&(mtt_pa_transfer[0]); + cmd_desc.in_param_size = sizeof(mtt_pa_transfer); + cmd_desc.in_trans = TRANS_IMMEDIATE; + cmd_desc.input_modifier = elts_this_loop; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_READ_MTT; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_READ_MTT; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + FREE(outprm); + MT_RETURN(rc); + } + + if ( mtt_entry ) { + for (i = 0; i < elts_this_loop; i++, mtt_entry++, iterator += PSEUDO_MT_BYTE_SIZE(tavorprm_mtt_st) ) { + EX_FLD64_SH(mtt_entry, iterator, tavorprm_mtt_st, ptag); + EX_BOOL_FLD(mtt_entry, iterator, tavorprm_mtt_st, p); + } + } + + if (rc != THH_CMD_STAT_OK) { + FREE(outprm); + MT_RETURN(rc); + } + /* update loop parameters */ + mtt_pa += elts_this_loop * PSEUDO_MT_BYTE_SIZE(tavorprm_mtt_st); /* incr target pointer */ + local_num_elts -= elts_this_loop; + } + FREE(outprm); + MT_RETURN(rc); +} + +/* + * THH_cmd_WRITE_MTT + */ +THH_cmd_status_t THH_cmd_WRITE_MTT(THH_cmd_t cmd_if, u_int64_t mtt_pa, MT_size_t num_elems, + THH_mtt_entry_t *mtt_entry) +{ +#if 1 + command_fields_t cmd_desc; + u_int8_t *inprm, *iterator; + int i, local_num_elts, elts_this_loop, max_elts_per_buffer; + u_int32_t buf_size; + THH_cmd_status_t rc = THH_CMD_STAT_OK; + MT_bool buf_align_adjust = TRUE; + + /* TBD: need to: a. limit number of entries in the mailbox */ + /* b. for performance, if the initial addr is odd, */ + /* do one loop of an odd number of elements, then do the rest */ + + FUNC_IN; + if ( !STACK_OK ) MT_RETURN(THH_CMD_STAT_EFATAL); + if (!num_elems) { + MT_RETURN(THH_CMD_STAT_BAD_PARAM); + } + + inprm = (u_int8_t*)MALLOC(MAX_IN_PRM_SIZE); + if ( !inprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + local_num_elts = (u_int32_t)num_elems; + max_elts_per_buffer = (MAX_IN_PRM_SIZE - MTT_CMD_PA_PREAMBLE_SIZE) / ( PSEUDO_MT_BYTE_SIZE(tavorprm_mtt_st)); + + MTL_DEBUG4("THH_cmd_WRITE_MTT: local_num_elts = %d, max_elts_per_buffer = %d\n",local_num_elts, max_elts_per_buffer); + while(local_num_elts > 0) { + + elts_this_loop = (local_num_elts > max_elts_per_buffer ? max_elts_per_buffer : local_num_elts); + + /* if the mtt_pa address is odd (3 LSBs ignored), and we need to use multiple commands */ + /* and we are also writing an odd number of elements, then decrease the elements in this loop */ + /* by one so that on the next go-around, the writing will start at an even mtt_pa address */ + /* If necessary, the adjustment needs to be performed only once */ + if ((buf_align_adjust) && (local_num_elts > max_elts_per_buffer) && ((mtt_pa>>3)& 0x1) && (!(elts_this_loop & 0x1))) { + elts_this_loop--; + buf_align_adjust = FALSE; + } + if (elts_this_loop <= 0) { + break; + } + + buf_size = elts_this_loop*PSEUDO_MT_BYTE_SIZE(tavorprm_mtt_st) + MTT_CMD_PA_PREAMBLE_SIZE; + memset(inprm, 0, buf_size); + iterator = inprm; + + MTL_DEBUG4("THH_cmd_WRITE_MTT: elts_this_loop = %d, buf_size = %d, buf_align_adjust = %d\n",elts_this_loop, buf_size, buf_align_adjust); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = elts_this_loop; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_WRITE_MTT; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_WRITE_MTT; + + /* copy */ + ((u_int32_t*)iterator)[0]= (u_int32_t)(mtt_pa >> 32); /* MS-DWORD */ + ((u_int32_t*)iterator)[1]= (u_int32_t)(mtt_pa & 0xFFFFFFFF); /* LS-DWORD */ + + iterator += MTT_CMD_PA_PREAMBLE_SIZE; + + for (i = 0; i < elts_this_loop; i++, mtt_entry++, iterator += PSEUDO_MT_BYTE_SIZE(tavorprm_mtt_st) ) { + INS_FLD64_SH(mtt_entry, iterator, tavorprm_mtt_st, ptag); + INS_BOOL_FLD(mtt_entry, iterator, tavorprm_mtt_st, p); + } + + + THH_CMD_MAILBOX_PRINT(inprm, buf_size, __func__); + THH_CMD_PRINT_MTT_ENTRIES(elts_this_loop, inprm); +#if 1 + rc = cmd_invoke(cmd_if, &cmd_desc); + if (rc != THH_CMD_STAT_OK) { + FREE(inprm); + MT_RETURN(rc); + } +#else + MTL_DEBUG4("THH_cmd_WRITE_MTT: SKIPPING cmd_invoke\n"); + rc = THH_CMD_STAT_INTERNAL_ERR; +#endif + /* update loop parameters */ + mtt_pa += elts_this_loop * PSEUDO_MT_BYTE_SIZE(tavorprm_mtt_st); /* incr target pointer */ + local_num_elts -= elts_this_loop; + } + FREE(inprm); + MT_RETURN(rc); +#else + FREE(inprm); + MT_RETURN(THH_CMD_STAT_INTERNAL_ERR); +#endif +} + + +THH_cmd_status_t THH_cmd_SYNC_TPT(THH_cmd_t cmd_if) +{ + command_fields_t cmd_prms = {0}; + THH_cmd_status_t rc; + + FUNC_IN; + cmd_prms.opcode = TAVOR_IF_CMD_SYNC_TPT; + cmd_prms.in_trans = TRANS_NA; + cmd_prms.out_trans = TRANS_NA; + cmd_prms.exec_time_micro = TAVOR_IF_CMD_ETIME_SYNC_TPT; + rc = cmd_invoke(cmd_if, &cmd_prms); + if ( rc != THH_CMD_STAT_OK ) { + MTL_ERROR1(MT_FLFMT("THH_cmd_SYNC_TPT failed: %s\n"), str_THH_cmd_status_t(rc)); + } + MT_RETURN(rc); +} + + +/* + * THH_cmd_MAP_EQ + */ +THH_cmd_status_t THH_cmd_MAP_EQ(THH_cmd_t cmd_if, THH_eqn_t eqn, u_int64_t event_mask) +{ + command_fields_t cmd_desc; + THH_cmd_status_t rc; + u_int32_t event_mask_transfer[2]; + + FUNC_IN; + + + /* The command interface expects the event_mask format to be the HIGH order word first, */ + /* then the low-order word. The command interface adjusts endianness within words */ + + event_mask_transfer[0]= (u_int32_t)(event_mask >> 32); /* MS-DWORD */ + event_mask_transfer[1]= (u_int32_t)(event_mask & 0xFFFFFFFF); /* LS-DWORD */ + + MTL_DEBUG4("THH_cmd_MAP_EQ: eqn = 0x%x, event_mask = "U64_FMT"\n", eqn, event_mask); + MTL_DEBUG4("THH_cmd_MAP_EQ: event_mask_transfer [0] = 0x%x, event_mask_transfer [1] = 0x%x\n", + event_mask_transfer[0], event_mask_transfer[1]); + + cmd_desc.in_param = (u_int8_t *)&(event_mask_transfer[0]); + cmd_desc.in_param_size = sizeof(event_mask_transfer); + cmd_desc.in_trans = TRANS_IMMEDIATE; + cmd_desc.input_modifier = eqn; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_MAP_EQ; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_MAP_EQ; + + rc = cmd_invoke(cmd_if, &cmd_desc); + MT_RETURN(rc); +} + +/* + * THH_cmd_SW2HW_EQ + */ +THH_cmd_status_t THH_cmd_SW2HW_EQ(THH_cmd_t cmd_if, THH_eqn_t eqn, THH_eqc_t *eq_context) +{ + command_fields_t cmd_desc; + u_int8_t inprm[PSEUDO_MT_BYTE_SIZE(tavorprm_eqc_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_eqc_st); + + FUNC_IN; + if (eq_context == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + memset(inprm, 0, buf_size); + + THH_CMD_PRINT_EQ_CONTEXT(eq_context); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = eqn; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_SW2HW_EQ; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_SW2HW_EQ; + + INS_FLD(eq_context, inprm, tavorprm_eqc_st, st); + + INS_BOOL_FLD(eq_context, inprm, tavorprm_eqc_st, oi); + INS_BOOL_FLD(eq_context, inprm, tavorprm_eqc_st, tr); + + INS_FLD(eq_context, inprm, tavorprm_eqc_st, owner); + INS_FLD(eq_context, inprm, tavorprm_eqc_st, status); + INS_FLD64(eq_context, inprm, tavorprm_eqc_st, start_address); + INS_FLD(eq_context, inprm, tavorprm_eqc_st, usr_page); + INS_FLD(eq_context, inprm, tavorprm_eqc_st, log_eq_size); + INS_FLD(eq_context, inprm, tavorprm_eqc_st, pd); + INS_FLD(eq_context, inprm, tavorprm_eqc_st, intr); + INS_FLD(eq_context, inprm, tavorprm_eqc_st, lost_count); + INS_FLD(eq_context, inprm, tavorprm_eqc_st, lkey); + INS_FLD(eq_context, inprm, tavorprm_eqc_st, consumer_indx); + INS_FLD(eq_context, inprm, tavorprm_eqc_st, producer_indx); + THH_CMD_MAILBOX_PRINT(inprm, buf_size, __func__); + + rc = cmd_invoke(cmd_if, &cmd_desc); + MT_RETURN(rc); +} + +/* + * THH_cmd_HW2SW_EQ + */ +THH_cmd_status_t THH_cmd_HW2SW_EQ(THH_cmd_t cmd_if, THH_eqn_t eqn, THH_eqc_t *eq_context) +{ + command_fields_t cmd_desc; + u_int8_t outprm[PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_mpt_st); + + FUNC_IN; + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = eqn; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_HW2SW_EQ; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_HW2SW_EQ; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + MT_RETURN(rc); + } + + if ( eq_context ) { + EX_FLD(eq_context, outprm, tavorprm_eqc_st, st); + EX_BOOL_FLD(eq_context, outprm, tavorprm_eqc_st, oi); + EX_BOOL_FLD(eq_context, outprm, tavorprm_eqc_st, tr); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, owner); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, status); + EX_FLD64(eq_context, outprm, tavorprm_eqc_st, start_address); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, usr_page); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, log_eq_size); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, pd); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, intr); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, lost_count); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, lkey); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, consumer_indx); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, producer_indx); + } + + MT_RETURN(rc); +} + +/* + * THH_cmd_QUERY_EQ + */ +THH_cmd_status_t THH_cmd_QUERY_EQ(THH_cmd_t cmd_if, THH_eqn_t eqn, THH_eqc_t *eq_context) +{ + command_fields_t cmd_desc; + u_int8_t outprm[PSEUDO_MT_BYTE_SIZE(tavorprm_eqc_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_eqc_st); + + FUNC_IN; + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = eqn; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_HW2SW_EQ; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_HW2SW_EQ; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + MT_RETURN(rc); + } + + if ( eq_context ) { + EX_FLD(eq_context, outprm, tavorprm_eqc_st, st); + + EX_BOOL_FLD(eq_context, outprm, tavorprm_eqc_st, oi); + EX_BOOL_FLD(eq_context, outprm, tavorprm_eqc_st, tr); + + EX_FLD(eq_context, outprm, tavorprm_eqc_st, owner); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, status); + EX_FLD64(eq_context, outprm, tavorprm_eqc_st, start_address); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, usr_page); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, log_eq_size); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, pd); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, intr); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, lost_count); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, lkey); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, consumer_indx); + EX_FLD(eq_context, outprm, tavorprm_eqc_st, producer_indx); + } + + MT_RETURN(rc); +} + +/* + * THH_cmd_SW2HW_EQ + */ +THH_cmd_status_t THH_cmd_SW2HW_CQ(THH_cmd_t cmd_if, HH_cq_hndl_t cqn, THH_cqc_t *cq_context) +{ + command_fields_t cmd_desc; + u_int8_t inprm[PSEUDO_MT_BYTE_SIZE(tavorprm_completion_queue_context_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_completion_queue_context_st); + + FUNC_IN; + if (cq_context == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + memset(inprm, 0, buf_size); + + THH_CMD_PRINT_CQ_CONTEXT(cq_context); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = cqn; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_SW2HW_CQ; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_SW2HW_CQ; + + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, st); + + INS_BOOL_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, oi); + INS_BOOL_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, tr); + + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, status); + INS_FLD64(cq_context, inprm, tavorprm_completion_queue_context_st, start_address); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, usr_page); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, log_cq_size); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, e_eqn); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, c_eqn); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, pd); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, l_key); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, last_notified_indx); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, solicit_producer_indx); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, consumer_indx); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, producer_indx); + INS_FLD(cq_context, inprm, tavorprm_completion_queue_context_st, cqn); + THH_CMD_MAILBOX_PRINT(inprm, buf_size, __func__); +#if 1 + rc = cmd_invoke(cmd_if, &cmd_desc); +#else + MTL_DEBUG4("THH_cmd_SW2HW_CG: SKIPPING cmd_invoke !!!!!!!!!\n"); + rc = THH_CMD_STAT_OK; +#endif + MT_RETURN(rc); +} + +/* + * THH_cmd_HW2SW_EQ + */ +THH_cmd_status_t THH_cmd_HW2SW_CQ(THH_cmd_t cmd_if, HH_cq_hndl_t cqn, THH_cqc_t *cq_context) +{ + command_fields_t cmd_desc; + u_int8_t outprm[PSEUDO_MT_BYTE_SIZE(tavorprm_completion_queue_context_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_completion_queue_context_st); + + FUNC_IN; + MTL_DEBUG4("THH_cmd_HW2SW_CQ: cqn = 0x%x, cq_context = 0x%p\n", cqn, cq_context); + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = cqn; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_HW2SW_CQ; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_HW2SW_CQ; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + MT_RETURN(rc); + } + + + if ( cq_context ) { + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, st); + + EX_BOOL_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, oi); + EX_BOOL_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, tr); + + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, status); + EX_FLD64(cq_context, outprm, tavorprm_completion_queue_context_st, start_address); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, usr_page); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, log_cq_size); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, e_eqn); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, c_eqn); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, pd); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, l_key); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, last_notified_indx); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, solicit_producer_indx); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, consumer_indx); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, producer_indx); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, cqn); + THH_CMD_PRINT_CQ_CONTEXT(cq_context); + } + MT_RETURN(rc); +} + +/* + * THH_cmd_QUERY_CQ + */ +THH_cmd_status_t THH_cmd_QUERY_CQ(THH_cmd_t cmd_if, HH_cq_hndl_t cqn, THH_cqc_t *cq_context) +{ + command_fields_t cmd_desc; + u_int8_t outprm[PSEUDO_MT_BYTE_SIZE(tavorprm_completion_queue_context_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_completion_queue_context_st); + + FUNC_IN; + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = cqn; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_QUERY_CQ; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_QUERY_CQ; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + MT_RETURN(rc); + } + + if ( cq_context ) { + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, st); + + EX_BOOL_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, oi); + EX_BOOL_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, tr); + + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, status); + EX_FLD64(cq_context, outprm, tavorprm_completion_queue_context_st, start_address); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, usr_page); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, log_cq_size); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, e_eqn); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, c_eqn); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, pd); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, l_key); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, last_notified_indx); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, solicit_producer_indx); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, consumer_indx); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, producer_indx); + EX_FLD(cq_context, outprm, tavorprm_completion_queue_context_st, cqn); + THH_CMD_PRINT_CQ_CONTEXT(cq_context); + } + + MT_RETURN(rc); +} + +/* + * THH_cmd_RESIZE_CQ + */ +THH_cmd_status_t THH_cmd_RESIZE_CQ(THH_cmd_t cmd_if, HH_cq_hndl_t cqn, + u_int64_t start_address, u_int32_t l_key, u_int8_t log_cq_size, + u_int32_t *new_producer_index_p) +{ + command_fields_t cmd_desc; + u_int32_t inprm[PSEUDO_MT_BYTE_SIZE(tavorprm_resize_cq_st)]; + THH_cmd_status_t rc; + const u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_resize_cq_st); + u_int32_t out_param_tmp[2]; + + FUNC_IN; + memset(inprm, 0, buf_size); + + cmd_desc.in_param = (u_int8_t*)inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = cqn; + cmd_desc.out_trans = TRANS_IMMEDIATE; + cmd_desc.out_param = (u_int8_t*)out_param_tmp; + cmd_desc.opcode = TAVOR_IF_CMD_RESIZE_CQ; + cmd_desc.opcode_modifier = new_producer_index_p ? 0 /* legacy mode */: + 1 /* fixed resize: new_pi= old_pi % new_size */; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_RESIZE_CQ; + + memset(inprm,0,buf_size); + inprm[MT_BYTE_OFFSET(tavorprm_resize_cq_st, start_addr_h) >> 2]= (u_int32_t)(start_address >> 32); + inprm[MT_BYTE_OFFSET(tavorprm_resize_cq_st, start_addr_l) >> 2]= (u_int32_t)(start_address & 0xFFFFFFFF); + inprm[MT_BYTE_OFFSET(tavorprm_resize_cq_st, l_key) >> 2]= l_key; + MT_INSERT_ARRAY32(inprm, log_cq_size , + MT_BIT_OFFSET(tavorprm_resize_cq_st, log_cq_size), MT_BIT_SIZE(tavorprm_resize_cq_st, log_cq_size)) ; + //MTL_ERROR1(MT_FLFMT("RESIZE_CQ: mailbox[0-3]= 0x%X 0x%X 0x%X 0x%X"),inprm[0],inprm[1],inprm[2],inprm[3]); + THH_CMD_MAILBOX_PRINT(inprm, buf_size, __func__); + rc = cmd_invoke(cmd_if, &cmd_desc); + if (new_producer_index_p) *new_producer_index_p= out_param_tmp[0]; /* new producer index is in out_param_h */ + MT_RETURN(rc); +} + + +/* + * GET_QP_CMD_EXEC_TIME -- returns QP command exec time in microseconds + */ +static u_int32_t get_qp_cmd_exec_time(THH_qpee_transition_t trans) +{ + switch(trans) { + case QPEE_TRANS_RST2INIT: + return TAVOR_IF_CMD_ETIME_RST2INIT_QPEE; + case QPEE_TRANS_INIT2INIT: + return TAVOR_IF_CMD_ETIME_INIT2INIT_QPEE; + case QPEE_TRANS_INIT2RTR: + return TAVOR_IF_CMD_ETIME_INIT2RTR_QPEE; + case QPEE_TRANS_RTR2RTS: + return TAVOR_IF_CMD_ETIME_RTR2RTS_QPEE; + case QPEE_TRANS_RTS2RTS: + return TAVOR_IF_CMD_ETIME_RTS2RTS_QPEE; + case QPEE_TRANS_SQERR2RTS: + return TAVOR_IF_CMD_ETIME_SQERR2RTS_QPEE; + case QPEE_TRANS_SQD2RTS: + return TAVOR_IF_CMD_ETIME_SQD2RTS_QPEE; + case QPEE_TRANS_2ERR: + return TAVOR_IF_CMD_ETIME_2ERR_QPEE; + case QPEE_TRANS_RTS2SQD: + return TAVOR_IF_CMD_ETIME_RTS2SQD; + case QPEE_TRANS_ERR2RST: + return TAVOR_IF_CMD_ETIME_ERR2RST_QPEE; + default: + MTL_ERROR1(MT_FLFMT("no such qp transition exists \n")); + return 0; + } +} +/* + * THH_cmd_MODIFY_QPEE + * is_ee: 0 = QP, 1 = EE + */ +static THH_cmd_status_t THH_cmd_MODIFY_QPEE( THH_cmd_t cmd_if, MT_bool is_ee, u_int32_t qpn, THH_qpee_transition_t trans, + THH_qpee_context_t *qp_context,u_int32_t optparammask) +{ + command_fields_t cmd_desc; + u_int8_t *inprm = NULL; + THH_cmd_status_t rc; + u_int32_t buf_size, sqd_event_req; + u_int32_t temp_u32; + tavor_if_qp_state_t tavor_if_qp_state; + + FUNC_IN; + + /* we save the value of sqd_event bit in xaction field & clr it to receive a normal xaction value. */ + sqd_event_req = trans & THH_CMD_SQD_EVENT_REQ; + trans &= ~THH_CMD_SQD_EVENT_REQ; + + CMDS_DBG("%s: TRANSACTION val = 0x%x\n", __func__, trans); + + /* see which transition was requested */ + switch(trans) { + /* have input mailbox only */ + case QPEE_TRANS_RST2INIT: + case QPEE_TRANS_INIT2INIT: + case QPEE_TRANS_INIT2RTR: + case QPEE_TRANS_RTR2RTS: + case QPEE_TRANS_RTS2RTS: + case QPEE_TRANS_SQERR2RTS: + case QPEE_TRANS_SQD2RTS: + if (qp_context == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + + inprm = (u_int8_t *)MALLOC(PSEUDO_MT_BYTE_SIZE(tavorprm_qp_ee_state_transitions_st)); + if (inprm == NULL) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + + buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_qp_ee_state_transitions_st); + memset(inprm, 0, buf_size); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = qpn | (is_ee ? 0x1000000 : 0); + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = (tavor_if_cmd_t)trans; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = get_qp_cmd_exec_time(trans); + + THH_CMD_PRINT_QP_CONTEXT(qp_context); + + /* translate VAPI qp state to Tavor qp state, and fail if not valid*/ + if (!THH_vapi_qpstate_2_tavor_qpstate(qp_context->state, &tavor_if_qp_state)) { + CMDS_DBG("%s: VAPI QP state (0x%x) is not valid\n", __func__, qp_context->state); + rc = THH_CMD_STAT_EFATAL; + goto retn; + } + + MT_INSERT_ARRAY32(inprm, optparammask, MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, opt_param_mask), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, opt_param_mask)); + + // 'te' field has been removed. + //QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, te); + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, de); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st,pm_state); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, st); + + MT_INSERT_ARRAY32(inprm, tavor_if_qp_state, MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.state), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.state)); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, sched_queue); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, msg_max); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, mtu); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, usr_page); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, local_qpn_een); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, remote_qpn_een); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.pkey_index); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.port_number); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.rlid); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.my_lid_path_bits); + + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.g); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.rnr_retry); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.hop_limit); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.max_stat_rate); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.mgid_index); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.ack_timeout); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.flow_label); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.tclass); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.sl); + + memcpy(&temp_u32, &(qp_context->primary_address_path.rgid[0]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_127_96), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_127_96)); + + memcpy(&temp_u32, &(qp_context->primary_address_path.rgid[4]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_95_64), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_95_64)); + + memcpy(&temp_u32, &(qp_context->primary_address_path.rgid[8]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_63_32), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_63_32)); + + memcpy(&temp_u32, &(qp_context->primary_address_path.rgid[12]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_31_0), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_31_0)); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.pkey_index); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.port_number); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.rlid); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.my_lid_path_bits); + + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.g); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.rnr_retry); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.hop_limit); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.max_stat_rate); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.mgid_index); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.ack_timeout); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.flow_label); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.tclass); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.sl); + + memcpy(&temp_u32, &(qp_context->alternative_address_path.rgid[0]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_127_96), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_127_96)); + + memcpy(&temp_u32, &(qp_context->alternative_address_path.rgid[4]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_95_64), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_95_64)); + + memcpy(&temp_u32, &(qp_context->alternative_address_path.rgid[8]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_63_32), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_63_32)); + + memcpy(&temp_u32, &(qp_context->alternative_address_path.rgid[12]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_31_0), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_31_0)); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, rdd); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, pd); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, wqe_base_adr); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, wqe_lkey); + + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, ssc); + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, sic); + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, sae); + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, swe); + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, sre); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, retry_count); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, sra_max); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, flight_lim); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, ack_req_freq); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, next_send_psn); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, cqn_snd); + QP_INS_FLD64(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, next_snd_wqe); + + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, rsc); + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, ric); + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, rae); + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, rwe); + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, rre); + + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, rra_max); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, next_rcv_psn); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, min_rnr_nak); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, ra_buff_indx); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, cqn_rcv); + QP_INS_FLD64(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, next_rcv_wqe); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, q_key); + QP_INS_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, srqn); + + QP_INS_BOOL_FLD(qp_context, inprm, tavorprm_qp_ee_state_transitions_st, srq); + + rc = cmd_invoke(cmd_if, &cmd_desc); + goto retn; + + /* No mailboxes, and no immed data */ + case QPEE_TRANS_2ERR: + case QPEE_TRANS_RTS2SQD: + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = qpn | ( (sqd_event_req && (trans == QPEE_TRANS_RTS2SQD)) ? TAVOR_IF_SQD_EVENT_FLAG:0 ); + cmd_desc.input_modifier |= (is_ee ? 0x1000000 : 0); + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = (tavor_if_cmd_t)trans; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = get_qp_cmd_exec_time(trans); + + rc = cmd_invoke(cmd_if, &cmd_desc); + goto retn; + + /* output mailbox only */ + /* matan: this is now the ANY2RST xition. */ + case QPEE_TRANS_ERR2RST: + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = qpn | (is_ee ? 0x1000000 : 0); + cmd_desc.out_param = 0; /* not using outbox */ + cmd_desc.out_param_size = 0; /* not using outbox */ + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = (tavor_if_cmd_t)trans; + /* matan: ANY2RST is always called with (opcode_modifier |= 2), meaning no need + to move into ERR before RST. Also, set LSB so that no outbox will be generated */ + cmd_desc.opcode_modifier = 3 ; /* bits 0 and 1 set */ + cmd_desc.exec_time_micro = get_qp_cmd_exec_time(trans); + + rc = cmd_invoke(cmd_if, &cmd_desc); + goto retn; + default: + MTL_ERROR1("%s: BAD TRANSACTION val = 0x%x\n", __func__, trans); + CMDS_DBG("%s: BAD TRANSACTION val = 0x%x\n", __func__, trans); + MT_RETURN(THH_CMD_STAT_EBADARG); + } + MT_RETURN(THH_CMD_STAT_EFATAL); // ??? BAD_ARG ?? + +retn: + if (inprm != NULL) { + FREE(inprm); + } + MT_RETURN(rc); +} + +/* + * THH_cmd_MODIFY_QP + */ +THH_cmd_status_t THH_cmd_MODIFY_QP(THH_cmd_t cmd_if, IB_wqpn_t qpn, + THH_qpee_transition_t trans, + THH_qpee_context_t *qp_context, + u_int32_t optparammask) +{ + CMDS_DBG("%s: TRANSACTION val = 0x%x\n", __func__, trans); + return (THH_cmd_MODIFY_QPEE(cmd_if,0,qpn,trans,qp_context,optparammask)); +} + +/* + * THH_cmd_MODIFY_EE + */ +THH_cmd_status_t THH_cmd_MODIFY_EE(THH_cmd_t cmd_if, IB_eecn_t eecn, + THH_qpee_transition_t trans, + THH_qpee_context_t *ee_context, + u_int32_t optparammask) +{ + return (THH_cmd_MODIFY_QPEE(cmd_if,1,eecn,trans,ee_context,optparammask)); +} + + +/* + * THH_cmd_QUERY_QPEE + */ +#ifdef VXWORKS_OS +static void THH_cmd_QUERY_QPEE_fill(THH_qpee_context_t *qp_context,u_int8_t *outprm) +{ + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rdd); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, pd); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, wqe_base_adr); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, wqe_lkey); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, ssc); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, sic); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, sae); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, swe); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, sre); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, retry_count); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, sra_max); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, flight_lim); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, ack_req_freq); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, next_send_psn); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, cqn_snd); + QP_EX_FLD64(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, next_snd_wqe); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rsc); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, ric); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rae); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rwe); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rre); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rra_max); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, next_rcv_psn); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, min_rnr_nak); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, ra_buff_indx); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, cqn_rcv); + QP_EX_FLD64(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, next_rcv_wqe); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, q_key); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, srqn); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, srq); + /* THH_CMD_PRINT_QP_CONTEXT(qp_context); */ +} +#endif +static THH_cmd_status_t THH_cmd_QUERY_QPEE( THH_cmd_t cmd_if, MT_bool is_ee, u_int32_t qpn, + THH_qpee_context_t *qp_context) +{ + command_fields_t cmd_desc; + u_int8_t *outprm; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_qp_ee_state_transitions_st); + u_int32_t temp_u32; + tavor_if_qp_state_t tavor_if_qp_state; + + FUNC_IN; + outprm = TNMALLOC(u_int8_t, buf_size); + if ( !outprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = qpn; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_QUERY_QPEE; + cmd_desc.opcode_modifier = is_ee ? 1 : 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_QUERY_QPEE; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + FREE(outprm); + MT_RETURN(rc); + } + + if ( qp_context ) { + // 'te' field has been removed. + //QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, te); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, de); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, pm_state); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, st); + + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, sched_queue); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, msg_max); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, mtu); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, usr_page); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, local_qpn_een); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, remote_qpn_een); + + tavor_if_qp_state = (tavor_if_qp_state_t) + MT_EXTRACT_ARRAY32(outprm, MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.state), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.state)); + if(!THH_tavor_qpstate_2_vapi_qpstate(tavor_if_qp_state, &(qp_context->state))){ + CMDS_DBG("%s: TAVOR QP state (0x%x) is not valid\n", __func__, tavor_if_qp_state); + FREE(outprm); + return THH_CMD_STAT_EFATAL; + } + + /*T.D.(matan): change along with the rest of SQ_DRAINING improvements.*/ + qp_context->sq_draining = (qp_context->state & THH_CMD_QP_DRAINING_FLAG) ? TRUE:FALSE; + qp_context->state = (VAPI_qp_state_t) (qp_context->state & ~(THH_CMD_QP_DRAINING_FLAG)); + + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.pkey_index); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.port_number); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.rlid); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.my_lid_path_bits); + + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.g); + + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.rnr_retry); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.hop_limit); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.max_stat_rate); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.mgid_index); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.ack_timeout); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.flow_label); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.tclass); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, primary_address_path.sl); + + /* extract RGID. Note that get the RGID from the command object as 4 double-words, each in CPU-endianness. */ + /* Need to take them one at a time, and convert each to big-endian before storing in the output RGID array */ + /* Note that need to memcpy each 4 bytes to a temporary u_int32_t variable, since there is no guarantee */ + /* that the RGID is 4-byte aligned (it is an array of unsigned chars) */ + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_127_96), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_127_96))); + memcpy(&(qp_context->primary_address_path.rgid[0]), &temp_u32, sizeof(u_int32_t)); + + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_95_64), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_95_64))); + memcpy(&(qp_context->primary_address_path.rgid[4]), &temp_u32, sizeof(u_int32_t)); + + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_63_32), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_63_32))); + memcpy(&(qp_context->primary_address_path.rgid[8]), &temp_u32, sizeof(u_int32_t)); + + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_31_0), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.primary_address_path.rgid_31_0))); + memcpy(&(qp_context->primary_address_path.rgid[12]), &temp_u32, sizeof(u_int32_t)); + + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.pkey_index); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.port_number); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.rlid); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.my_lid_path_bits); + + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.g); + + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.rnr_retry); + + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.hop_limit); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.max_stat_rate); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.mgid_index); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.ack_timeout); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.flow_label); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.tclass); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, alternative_address_path.sl); + + /* extract RGID. Note that get the RGID from the command object as 4 double-words, each in CPU-endianness. */ + /* Need to take them one at a time, and convert each to big-endian before storing in the output RGID array */ + /* Note that need to memcpy each 4 bytes to a temporary u_int32_t variable, since there is no guarantee */ + /* that the RGID is 4-byte aligned (it is an array of unsigned chars) */ + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_127_96), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_127_96))); + memcpy(&(qp_context->alternative_address_path.rgid[0]), &temp_u32, sizeof(u_int32_t)); + + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_95_64), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_95_64))); + memcpy(&(qp_context->alternative_address_path.rgid[4]), &temp_u32, sizeof(u_int32_t)); + + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_63_32), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_63_32))); + memcpy(&(qp_context->alternative_address_path.rgid[8]), &temp_u32, sizeof(u_int32_t)); + + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_31_0), + MT_BIT_SIZE(tavorprm_qp_ee_state_transitions_st, qpc_eec_data.alternative_address_path.rgid_31_0))); + memcpy(&(qp_context->alternative_address_path.rgid[12]), &temp_u32, sizeof(u_int32_t)); + + /* TBD: ???? following code beak Vapi in Vxworks. + * If compile without inline code, then it work. + * It look like compiler error with optimization + */ +#ifndef VXWORKS_OS + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rdd); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, pd); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, wqe_base_adr); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, wqe_lkey); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, ssc); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, sic); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, sae); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, swe); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, sre); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, retry_count); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, sra_max); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, flight_lim); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, ack_req_freq); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, next_send_psn); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, cqn_snd); + QP_EX_FLD64(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, next_snd_wqe); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rsc); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, ric); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rae); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rwe); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rre); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, rra_max); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, next_rcv_psn); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, min_rnr_nak); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, ra_buff_indx); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, cqn_rcv); + QP_EX_FLD64(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, next_rcv_wqe); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, q_key); + QP_EX_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, srqn); + QP_EX_BOOL_FLD(qp_context, outprm, tavorprm_qp_ee_state_transitions_st, srq); + /* THH_CMD_PRINT_QP_CONTEXT(qp_context); */ + } + +#else + THH_cmd_QUERY_QPEE_fill(qp_context,outprm); +#endif + + FREE(outprm); + MT_RETURN(rc); +} + +/* + * THH_cmd_QUERY_QP + */ +THH_cmd_status_t THH_cmd_QUERY_QP(THH_cmd_t cmd_if, IB_wqpn_t qpn, + THH_qpee_context_t *qp_context) +{ + return (THH_cmd_QUERY_QPEE(cmd_if,0,qpn,qp_context)); +} +/* + * THH_cmd_QUERY_EE + */ +THH_cmd_status_t THH_cmd_QUERY_EE(THH_cmd_t cmd_if, IB_eecn_t eecn, + THH_qpee_context_t *ee_context) +{ + return (THH_cmd_QUERY_QPEE(cmd_if,1,eecn,ee_context)); +} + +/* + * THH_cmd_CONF_SPECIAL_QP + */ +THH_cmd_status_t THH_cmd_CONF_SPECIAL_QP(THH_cmd_t cmd_if, VAPI_special_qp_t qp_type, + IB_wqpn_t base_qpn) +{ + command_fields_t cmd_desc; + u_int8_t op_modifier; + THH_cmd_status_t rc; + + FUNC_IN; + MTL_DEBUG4("%s: ENTERING \n", __func__); + switch(qp_type) { + case VAPI_SMI_QP: + op_modifier = 0; + break; + case VAPI_GSI_QP: + op_modifier = 1; + break; + case VAPI_RAW_IPV6_QP: + op_modifier = 2; + break; + case VAPI_RAW_ETY_QP: + op_modifier = 3; + break; + default: + MT_RETURN (THH_CMD_STAT_EBADARG); + } + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = base_qpn; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_CONF_SPECIAL_QP; + cmd_desc.opcode_modifier = op_modifier; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_CONF_SPECIAL_QP; + + rc = cmd_invoke(cmd_if, &cmd_desc); + MT_RETURN(rc); +} + +/* + * THH_cmd_MAD_IFC + */ +THH_cmd_status_t THH_cmd_MAD_IFC(THH_cmd_t cmd_if, + MT_bool mkey_validate, + IB_lid_t slid, /* SLID is ignored if mkey_validate is false */ + IB_port_t port, + void *mad_in, + void *mad_out) +{ + struct cmd_if_context_st *cmdif_p = (struct cmd_if_context_st *)cmd_if; + command_fields_t cmd_desc; + THH_cmd_status_t rc; + u_int32_t i, *int32_inbuf, *int32_outbuf, *orig_inbuf = NULL, *int32_temp_inbuf = NULL; + + /* support NULL mad_out */ + static u_int32_t dummy_mad_out[256/sizeof(u_int32_t)]; /* STATIC ! (not on stack) */ + + + FUNC_IN; + if (mad_in == NULL) return THH_CMD_STAT_EBADARG; + int32_temp_inbuf = (u_int32_t*)MALLOC(256); + if (int32_temp_inbuf == NULL) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + int32_inbuf = (u_int32_t *) mad_in; + int32_outbuf = (u_int32_t *) (mad_out == NULL ? dummy_mad_out : mad_out); + orig_inbuf = int32_temp_inbuf; + + cmd_desc.in_param = (u_int8_t *) int32_temp_inbuf; + cmd_desc.in_param_size = 256; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = (port & 3); + /* For Mkey validation the MAD's source LID is required (upper 16 bits of input mod.) */ + if ((mkey_validate) && + (THH_FW_VER_VALUE(cmdif_p->fw_props.fw_rev_major, + cmdif_p->fw_props.fw_rev_minor, + cmdif_p->fw_props.fw_rev_subminor) > THH_FW_VER_VALUE(1,0x17,0) ) ) { + /* SLID for MAD_IFC is supported only after 1.17.0000 */ + cmd_desc.input_modifier |= ( ((u_int32_t)slid) << 16 ); + } + cmd_desc.out_param = (u_int8_t*)int32_outbuf; + cmd_desc.out_param_size = 256; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_MAD_IFC; + cmd_desc.opcode_modifier = mkey_validate ? 0 : 1; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_MAD_IFC; + + /* reverse endianness to CPU endian, since MAD frames are all BE*/ + for (i = 0; i < 256/(sizeof(u_int32_t)); i++) { + *int32_temp_inbuf = MOSAL_be32_to_cpu(*int32_inbuf); + int32_inbuf++; + int32_temp_inbuf++; + } + rc = cmd_invoke(cmd_if, &cmd_desc); + /* reverse endianness to big endian, since information is gotten cpu-endian*/ + for (i = 0; i < 256/(sizeof(u_int32_t)); i++) { + *int32_outbuf = MOSAL_cpu_to_be32(*int32_outbuf); + int32_outbuf++; + } + FREE(orig_inbuf); + THH_CMD_MAILBOX_PRINT(mad_out, 256, __func__); + MT_RETURN(rc); +} + + +THH_cmd_status_t THH_cmd_SW2HW_SRQ(THH_cmd_t cmd_if, + u_int32_t srqn, /* SRQ number/index */ + THH_srq_context_t *srqc_p) /* SRQ context */ +{ + command_fields_t cmd_desc; + u_int8_t inprm[PSEUDO_MT_BYTE_SIZE(tavorprm_srq_context_st)]; + THH_cmd_status_t rc; + const u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_srq_context_st); + + FUNC_IN; + if (srqc_p == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + memset(inprm, 0, buf_size); + INS_FLD(srqc_p, inprm, tavorprm_srq_context_st, pd); + INS_FLD(srqc_p, inprm, tavorprm_srq_context_st, uar); + INS_FLD(srqc_p, inprm, tavorprm_srq_context_st, l_key); + INS_FLD(srqc_p, inprm, tavorprm_srq_context_st, wqe_addr_h); + INS_FLD(srqc_p, inprm, tavorprm_srq_context_st, next_wqe_addr_l); + INS_FLD(srqc_p, inprm, tavorprm_srq_context_st, ds); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = srqn; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_SW2HW_SRQ; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_CLASS_C; + + rc = cmd_invoke(cmd_if, &cmd_desc); + MT_RETURN(rc); +} + +THH_cmd_status_t THH_cmd_HW2SW_SRQ(THH_cmd_t cmd_if, + u_int32_t srqn, /* SRQ number/index */ + THH_srq_context_t *srqc_p) /* SRQ context */ +{ + command_fields_t cmd_desc; + u_int8_t outprm[PSEUDO_MT_BYTE_SIZE(tavorprm_srq_context_st)]; + THH_cmd_status_t rc; + const u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_srq_context_st); + + FUNC_IN; + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = srqn; + cmd_desc.out_param = srqc_p != NULL ? outprm : 0; + cmd_desc.out_param_size = srqc_p != NULL ? buf_size : 0; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_HW2SW_SRQ; + cmd_desc.opcode_modifier = srqc_p != NULL ? 1 : 0; /* No need for output if no *srqc_p */ + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_CLASS_C; + + rc = cmd_invoke(cmd_if, &cmd_desc); + + if (srqc_p != NULL) { + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, pd); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, uar); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, l_key); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, wqe_addr_h); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, next_wqe_addr_l); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, ds); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, wqe_cnt); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, state); + } + MT_RETURN(rc); +} + +THH_cmd_status_t THH_cmd_QUERY_SRQ(THH_cmd_t cmd_if, + u_int32_t srqn, /* SRQ number/index */ + THH_srq_context_t *srqc_p) /* SRQ context */ +{ + command_fields_t cmd_desc; + u_int8_t outprm[PSEUDO_MT_BYTE_SIZE(tavorprm_srq_context_st)]; + THH_cmd_status_t rc; + const u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_srq_context_st); + + FUNC_IN; + if (srqc_p == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = srqn; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_QUERY_SRQ; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_CLASS_C; + + rc = cmd_invoke(cmd_if, &cmd_desc); + + if (srqc_p != NULL){ + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, pd); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, uar); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, l_key); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, wqe_addr_h); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, next_wqe_addr_l); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, ds); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, wqe_cnt); + EX_FLD(srqc_p, outprm, tavorprm_srq_context_st, state); + } + MT_RETURN(rc); +} + + +/* + * THH_cmd_READ_MGM + */ +THH_cmd_status_t THH_cmd_READ_MGM(THH_cmd_t cmd_if, u_int32_t mcg_index, + MT_size_t max_qp_per_mcg, THH_mcg_entry_t *mcg_entry) +{ + // need to add qps_per_mcg_entry field + command_fields_t cmd_desc; + u_int8_t *outprm; + THH_cmd_status_t rc; + IB_wqpn_t /**qp_buf,*/ *qp_iterator; + u_int32_t buf_size, i, num_active_qps_found, temp_u32; + u_int8_t valid; + + FUNC_IN; + + /* the default mcg_entry structure contains space for 8 qps per mcg */ + /* If HCA is configured for more than 8 qps per group, space for the extra qp entries */ + /* must be allocated as well */ + buf_size = (u_int32_t)(PSEUDO_MT_BYTE_SIZE(tavorprm_mgm_entry_st) + + ((max_qp_per_mcg - 8)*PSEUDO_MT_BYTE_SIZE(tavorprm_mgmqp_st))); + + outprm = (u_int8_t*)MALLOC(buf_size); + if ( !outprm ) { + MT_RETURN(THH_CMD_STAT_EAGAIN); + } + + memset(outprm, 0, buf_size); + + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = mcg_index; + cmd_desc.out_param = outprm; + cmd_desc.out_param_size = buf_size; + cmd_desc.out_trans = TRANS_MAILBOX; + cmd_desc.opcode = TAVOR_IF_CMD_READ_MGM; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_READ_MGM; + + rc = cmd_invoke(cmd_if, &cmd_desc); + if ( rc != THH_CMD_STAT_OK ) { + goto invoke_err; + } + + if ( mcg_entry ) { + /* allocate memory for the multicast QPs IB_wqpn_t */ + //qp_buf = (IB_wqpn_t *)MALLOC(sizeof(IB_wqpn_t) * max_qp_per_mcg); + //if ( !qp_buf ) { + // rc = THH_CMD_STAT_EAGAIN; + //goto malloc_err; + //} + //memset(qp_buf, 0, sizeof(IB_wqpn_t) * max_qp_per_mcg); + + /* get fixed portion of reply */ + EX_FLD(mcg_entry, outprm, tavorprm_mgm_entry_st, next_gid_index); + + /* extract MGID. Note that get the MGID from the command object as 4 double-words, each in CPU-endianness. */ + /* Need to take them one at a time, and convert each to big-endian before storing in the output MGID array */ + /* Note that need to memcpy each 4 bytes to a temporary u_int32_t variable, since there is no guarantee */ + /* that the MGID is 4-byte aligned (it is an array of unsigned chars) */ + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgid_128_96), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgid_128_96))); + memcpy(&(mcg_entry->mgid[0]), &temp_u32, sizeof(u_int32_t)); + + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgid_95_64), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgid_95_64))); + memcpy(&(mcg_entry->mgid[4]), &temp_u32, sizeof(u_int32_t)); + + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgid_63_32), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgid_63_32))); + memcpy(&(mcg_entry->mgid[8]), &temp_u32, sizeof(u_int32_t)); + + temp_u32 = MOSAL_cpu_to_be32(MT_EXTRACT_ARRAY32(outprm, + MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgid_31_0), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgid_31_0))); + memcpy(&(mcg_entry->mgid[12]), &temp_u32, sizeof(u_int32_t)); + + /* Now, extract the QP entries in the group */ + for (i = 0, num_active_qps_found = 0, qp_iterator = mcg_entry->qps/*qp_buf*/; i < max_qp_per_mcg; i++ ) { + /* extract VALID bit for each QP. If valid is set, extract the QP number and insert in */ + /* the QP array returned */ + valid = (u_int8_t)(MT_EXTRACT_ARRAY32(outprm, + (MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgmqp_0.qi) + i*(MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0))), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0.qi) )); + if (valid) { + /* NULL protection .. */ + if (mcg_entry->qps) { + *((u_int32_t *) qp_iterator) = MT_EXTRACT_ARRAY32(outprm, + (MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgmqp_0.qpn_i) + i*(MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0))), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0.qpn_i) ); + qp_iterator++; + } + num_active_qps_found++; + } + } + mcg_entry->valid_qps = num_active_qps_found; + /* If valid QPs found, return the allocated QP number buffer and number found. Otherwise */ + /* return 0 QPs and delete the buffer */ + //if (num_active_qps_found) { + // mcg_entry->qps = qp_buf; + //} else { + // mcg_entry->qps = NULL; + // FREE(qp_buf); + //} + } + +invoke_err: + FREE(outprm); + MT_RETURN(rc); +} + +/* + * THH_cmd_WRITE_MGM + */ +THH_cmd_status_t THH_cmd_WRITE_MGM(THH_cmd_t cmd_if, u_int32_t mcg_index, + MT_size_t max_qp_per_mcg, THH_mcg_entry_t *mcg_entry) +{ + command_fields_t cmd_desc; + u_int8_t inprm[PSEUDO_MT_BYTE_SIZE(tavorprm_mgm_entry_st)]; + THH_cmd_status_t rc; + u_int32_t buf_size = PSEUDO_MT_BYTE_SIZE(tavorprm_mgm_entry_st); + IB_wqpn_t *qp_iterator; + u_int32_t i, temp_u32; + + FUNC_IN; + + CMDS_DBG("THH_cmd_WRITE_MGM: index=%u, max_qp_per_mcg = "SIZE_T_FMT"\n", mcg_index, max_qp_per_mcg); + if (mcg_entry == NULL) {MT_RETURN(THH_CMD_STAT_EBADARG); } + THH_CMD_PRINT_MGM_ENTRY(mcg_entry); + memset(inprm, 0, buf_size); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = buf_size; + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = mcg_index; + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = TAVOR_IF_CMD_WRITE_MGM; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_WRITE_MGM; + + /* get fixed portion of reply */ + INS_FLD(mcg_entry, inprm, tavorprm_mgm_entry_st, next_gid_index); + + /* insert MGID. Note that get the MGID from the command object as 4 double-words, each in CPU-endianness. */ + /* Need to take them one at a time, and convert each to big-endian before storing in the output MGID array */ + memcpy(&temp_u32, &(mcg_entry->mgid[0]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgid_128_96), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgid_128_96)); + + memcpy(&temp_u32, &(mcg_entry->mgid[4]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgid_95_64), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgid_95_64)); + + memcpy(&temp_u32, &(mcg_entry->mgid[8]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgid_63_32), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgid_63_32)); + + memcpy(&temp_u32, &(mcg_entry->mgid[12]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32), + MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgid_31_0), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgid_31_0)); + + /* Now, insert the QP entries in the group */ + for (i = 0, qp_iterator = mcg_entry->qps; i < max_qp_per_mcg; i++, qp_iterator++ ) { + /* Insert valid entries. First, insert a VALID bit = 1 for each valid QP number, then insert */ + /* the QP number itself. If there are no more valid entries, insert only a VALID bit = 0 for each */ + /* invalid entry, up to the maximum allowed QPs */ + if (i < mcg_entry->valid_qps) { + MT_INSERT_ARRAY32(inprm, 1, + (MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgmqp_0.qi) + i*(MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0))), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0.qi) ); + MT_INSERT_ARRAY32(inprm, *((u_int32_t *) qp_iterator) , + (MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgmqp_0.qpn_i) + i*(MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0))), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0.qpn_i) ); + } else { + MT_INSERT_ARRAY32(inprm, 0, + (MT_BIT_OFFSET(tavorprm_mgm_entry_st, mgmqp_0.qi) + i*(MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0))), + MT_BIT_SIZE(tavorprm_mgm_entry_st, mgmqp_0.qi) ); + } + } + THH_CMD_MAILBOX_PRINT(inprm, buf_size, __func__); + rc = cmd_invoke(cmd_if, &cmd_desc); + MT_RETURN(rc); +} + +/* + * THH_cmd_HASH + */ +THH_cmd_status_t THH_cmd_MGID_HASH(THH_cmd_t cmd_if, IB_gid_t mgid, THH_mcg_hash_t *hash_val) +{ + command_fields_t cmd_desc; + u_int8_t inprm[16]; + THH_cmd_status_t rc; + u_int32_t out_param[2], temp_u32; + + FUNC_IN; + memset(inprm, 0, 16); + memset(out_param, 0, sizeof(out_param)); + + cmd_desc.in_param = inprm; + cmd_desc.in_param_size = sizeof(IB_gid_t); + cmd_desc.in_trans = TRANS_MAILBOX; + cmd_desc.input_modifier = 0; + cmd_desc.out_param = (u_int8_t *)&(out_param[0]); + cmd_desc.out_param_size = sizeof(out_param); + cmd_desc.out_trans = TRANS_IMMEDIATE; + cmd_desc.opcode = TAVOR_IF_CMD_MGID_HASH; + cmd_desc.opcode_modifier = 0; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_MGID_HASH; + + /* insert GID into mailbox, 1 double-word at a time. Modify byte ordering within doubl-words */ + /* to cpu-endian, so that lower layers will properly process the GID */ + memcpy(&temp_u32, &(mgid[0]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32),0,32); + memcpy(&temp_u32, &(mgid[4]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32),32,32); + memcpy(&temp_u32, &(mgid[8]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32),64,32); + memcpy(&temp_u32, &(mgid[12]), sizeof(u_int32_t)); + MT_INSERT_ARRAY32(inprm, MOSAL_be32_to_cpu(temp_u32),96,32); + + rc = cmd_invoke(cmd_if, &cmd_desc); + + /* Note that output result is directly inserted into hash_val parameter, in cpu-endian order */ + /* Only the first of the two output double-words needs to be copied. */ + CMDS_DBG( "THH_cmd_MGID_HASH: out_param[0] = 0x%x; out_param[1] = 0x%x\n", + out_param[0], out_param[1]); + + *hash_val = (THH_mcg_hash_t)MT_EXTRACT32(out_param[1],0,16); + MT_RETURN(rc); +} + +#if defined(MT_SUSPEND_QP) +THH_cmd_status_t THH_cmd_SUSPEND_QP(THH_cmd_t cmd_if, u_int32_t qpn, MT_bool suspend_flag) +{ + command_fields_t cmd_desc; + THH_cmd_status_t rc; + FUNC_IN; + + MTL_DEBUG2(MT_FLFMT("%s: qpn = 0x%x, suspend_flag = %s"), + __func__, qpn, ((suspend_flag==TRUE) ? "TRUE" : "FALSE" )); + cmd_desc.in_param = 0; + cmd_desc.in_param_size = 0; + cmd_desc.in_trans = TRANS_NA; + cmd_desc.input_modifier = qpn & 0xFFFFFF; + //cmd_desc.input_modifier |= (is_ee ? 0x1000000 : 0); + cmd_desc.out_param = 0; + cmd_desc.out_param_size = 0; + cmd_desc.out_trans = TRANS_NA; + cmd_desc.opcode = (suspend_flag == TRUE) ? TAVOR_IF_CMD_SUSPEND_QPEE : TAVOR_IF_CMD_UNSUSPEND_QPEE; + cmd_desc.opcode_modifier = 1; + cmd_desc.exec_time_micro = TAVOR_IF_CMD_ETIME_CLASS_C; + + rc = cmd_invoke(cmd_if, &cmd_desc); + MTL_DEBUG2(MT_FLFMT("%s: qpn = 0x%x, command returned 0x%x"), __func__,qpn, rc); + MT_RETURN(rc); +} +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.c new file mode 100644 index 00000000..e79ef02b --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.c @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include + + +#define ELSE_ACQ_ERROR(f) else { MTL_ERROR1("%s MOSAL_mutex_acq failed\n", f); } +#define logIfErr(f) \ + if (rc != HH_OK) { MTL_ERROR1("%s: rc=%s\n", f, HH_strerror_sym(rc)); } + + +typedef struct THH_ddrmm_st +{ + MT_phys_addr_t mem_base; + MT_size_t mem_sz; + Extbuddy_hndl xb; + MOSAL_mutex_t mtx; /* protect xb */ +} DDRMM_t; + + +/************************************************************************/ +/************************************************************************/ +/* private functions */ + +/************************************************************************/ +static int well_alligned(MT_phys_addr_t mem_base, MT_size_t mem_sz) +{ + unsigned int lg2sz = ceil_log2(mem_sz); + MT_phys_addr_t mask = (1ul << lg2sz) - 1; + MT_phys_addr_t residue = mem_base & mask; + return (residue == 0); +} /* well_alligned */ + + +/************************************************************************/ +static void lookup_sort( + MT_size_t n, + const MT_size_t* sizes, + MT_size_t* lut +) +{ + unsigned int i, i1, j; + for (i = 0; i != n; lut[i] = i, ++i); /* identity init */ + for (i = 0, i1 = 1; i1 != n; i = i1++) /* small n, so Bubble sort O(n^2) */ + { + MT_size_t luiMax = i; + MT_size_t iMax = sizes[ lut[i] ]; + for (j = i1; j != n; ++j) + { + MT_size_t v = sizes[ lut[j] ]; + if (iMax < v) + { + iMax = v; + luiMax = j; + } + } + j = (unsigned int)lut[i]; lut[i] = lut[luiMax]; lut[luiMax] = j; /* swap */ + } +} /* lookup_sort */ + + +/************************************************************************/ +/************************************************************************/ +/* interface functions */ + + +/************************************************************************/ +HH_ret_t THH_ddrmm_create( + MT_phys_addr_t mem_base, /* IN */ + MT_size_t mem_sz, /* IN */ + THH_ddrmm_t* ddrmm_p /* OUT */ +) +{ + HH_ret_t rc = HH_OK; + Extbuddy_hndl xb = NULL; + DDRMM_t* mm = NULL; + + MTL_TRACE1("{THH_ddrmm_create: base="U64_FMT", sz="SIZE_T_FMT"\n", + (u_int64_t)mem_base, mem_sz); + if (!well_alligned(mem_base, mem_sz)) + { + rc = HH_EINVAL; + } + else + { + xb = extbuddy_create((u_int32_t) mem_sz, 0); + mm = (xb ? TMALLOC(DDRMM_t) : NULL); + } + if (mm == NULL) + { + rc = HH_EAGAIN; + } + else + { + mm->mem_base = mem_base; + mm->mem_sz = mem_sz; + mm->xb = xb; + MOSAL_mutex_init(&mm->mtx); + *ddrmm_p = mm; + } + MTL_TRACE1("}THH_ddrmm_create: ddrmm=%p\n", *ddrmm_p); +// logIfErr("THH_ddrmm_create") + return rc; +} /* THH_ddrmm_create */ + + +/************************************************************************/ +HH_ret_t THH_ddrmm_destroy(THH_ddrmm_t ddrmm) +{ + HH_ret_t rc = HH_OK; + MTL_TRACE1("{THH_ddrmm_destroy: ddrmm=%p\n", ddrmm); + extbuddy_destroy(ddrmm->xb); + MOSAL_mutex_free(&ddrmm->mtx); + FREE(ddrmm); + MTL_TRACE1("}THH_ddrmm_destroy\n"); + logIfErr("THH_ddrmm_destroy") + return rc; +} /* THH_ddrmm_destroy */ + + + +/************************************************************************/ +HH_ret_t THH_ddrmm_reserve ( + THH_ddrmm_t ddrmm, /* IN */ + MT_phys_addr_t addr, /* IN */ + MT_size_t size /* IN */ +) +{ + MT_bool ok = FALSE; + HH_ret_t rc; + + MTL_TRACE1("{THH_ddrmm_reserve: ddrmm=%p, addr="U64_FMT", size="SIZE_T_FMT"\n", + ddrmm, (u_int64_t)addr, size); + MOSAL_mutex_acq_ui(&ddrmm->mtx); + MTL_DEBUG4(MT_FLFMT("rel="U64_FMT", size="SIZE_T_FMT""), + (u_int64_t)(addr - ddrmm->mem_base), size); + ok = extbuddy_reserve(ddrmm->xb, (u_int32_t)(addr - ddrmm->mem_base), (u_int32_t)size); + MOSAL_mutex_rel(&ddrmm->mtx); + rc = (ok ? HH_OK : HH_EINVAL); + MTL_TRACE1("}THH_ddrmm_reserve\n"); + logIfErr("THH_ddrmm_reserve") + return rc; +} /* THH_ddrmm_reserve */ + + +/************************************************************************/ +/* Note: For chunks in the array of size THH_DDRMM_INVALID_SZ, no allocation is made */ +HH_ret_t THH_ddrmm_alloc_sz_aligned( + THH_ddrmm_t ddrmm, /* IN */ + MT_size_t num_o_chunks, /* IN */ + MT_size_t* chunks_log2_sizes, /* IN */ + MT_phys_addr_t* chunks_addrs /* OUT */ +) +{ + HH_ret_t rc = HH_EAGAIN; + MT_size_t* slut; /* Sorted Look-Up Table - (:politely incorrect:) */ + + MTL_TRACE1("{THH_ddrmm_alloc_sz_aligned: ddrmm=%p, n="SIZE_T_FMT"\n", + ddrmm, num_o_chunks); + slut = TNMALLOC(MT_size_t, num_o_chunks); /* small, so not VMALLOC */ + if (slut) + { + MT_size_t i; + rc = HH_OK; + lookup_sort(num_o_chunks, chunks_log2_sizes, slut); + for (i = 0; (i != num_o_chunks) && (rc == HH_OK); ++i) + { + MT_size_t si = slut[i]; + u_int8_t log2sz = (u_int8_t)chunks_log2_sizes[si]; + if (log2sz != (u_int8_t)THH_DDRMM_INVALID_SZ) { + rc = THH_ddrmm_alloc(ddrmm, (MT_size_t)1 << log2sz, log2sz, &chunks_addrs[si]); + } else { + chunks_addrs[si]= THH_DDRMM_INVALID_PHYS_ADDR; + /* No allocation if given log size is zero (workaround struct design in ddr_alloc_size_vec)*/ + } + } + if (rc != HH_OK) + { /* Backwards. Note that we avoid (MT_size_t)-1 > 0 */ + while (i-- > 0) + { /* Now i >= 0 */ + MT_size_t si = slut[i]; + u_int8_t log2sz = (u_int8_t)chunks_log2_sizes[si]; + if (log2sz != (u_int8_t)THH_DDRMM_INVALID_SZ) + THH_ddrmm_free(ddrmm, chunks_addrs[si], (MT_size_t)1 << log2sz); + } + } + FREE(slut); + } + MTL_TRACE1("}THH_ddrmm_alloc_sz_aligned\n"); + logIfErr("THH_ddrmm_alloc_sz_aligned") + return rc; +} /* THH_ddrmm_alloc_sz_aligned */ + + + +/************************************************************************/ +HH_ret_t THH_ddrmm_alloc( + THH_ddrmm_t ddrmm, /* IN */ + MT_size_t size, /* IN */ + u_int8_t align_shift, /* IN */ + MT_phys_addr_t* buf_p /* OUT */ +) +{ + HH_ret_t rc = HH_EINVAL; + /* internally (extbuddy) we need power-2 size */ + MT_size_t log2sz = ceil_log2(size); + MTL_TRACE1("{THH_ddrmm_alloc: ddrmm=%p, sz="SIZE_T_FMT", lg2=%d, shift=%d\n", + ddrmm, size, (u_int32_t)log2sz, align_shift); + if (log2sz >= align_shift) + { + u_int32_t p = EXTBUDDY_NULL; + MOSAL_mutex_acq_ui(&ddrmm->mtx); + p = extbuddy_alloc(ddrmm->xb, (u_int8_t)log2sz); + MOSAL_mutex_rel(&ddrmm->mtx); + MTL_DEBUG7(MT_FLFMT("log2sz="SIZE_T_FMT", p=0x%x"), log2sz, p); + rc = HH_EAGAIN; + if (p != EXTBUDDY_NULL) + { + *buf_p = ddrmm->mem_base + p; + rc = HH_OK; + } + } + MTL_TRACE1("}THH_ddrmm_alloc: buf="U64_FMT"\n", (u_int64_t)*buf_p); + if ( rc != HH_OK ) { + if (rc == HH_EAGAIN) { + MTL_DEBUG1("%s: rc=%s\n",__func__, HH_strerror_sym(rc)); + } else { + MTL_ERROR1("%s: rc=%s\n",__func__, HH_strerror_sym(rc)); + } + } + return rc; +} /* THH_ddrmm_alloc */ + + +/************************************************************************/ +HH_ret_t THH_ddrmm_alloc_bound( + THH_ddrmm_t ddrmm, /* IN */ + MT_size_t size, /* IN */ + u_int8_t align_shift, /* IN */ + MT_phys_addr_t area_start, /* IN */ + MT_phys_addr_t area_size, /* IN */ + MT_phys_addr_t* buf_p /* OUT */ +) +{ + HH_ret_t rc = HH_EINVAL; + /* internally (extbuddy) we need power-2 size */ + MT_size_t log2sz = ceil_log2(size); + MTL_TRACE1("{THH_ddrmm_alloc_bound: ddrmm=%p, sz="SIZE_T_FMT", shift=%d, " + "area:{start="U64_FMT", size="U64_FMT"\n", + ddrmm, size, align_shift, (u_int64_t)area_start, (u_int64_t)area_size); + if (log2sz >= align_shift) + { + u_int32_t p = EXTBUDDY_NULL; + MOSAL_mutex_acq_ui(&ddrmm->mtx); + p = extbuddy_alloc_bound(ddrmm->xb, (u_int8_t)log2sz, (u_int32_t)area_start, (u_int32_t)area_size); + MOSAL_mutex_rel(&ddrmm->mtx); + rc = HH_EAGAIN; + if (p != EXTBUDDY_NULL) + { + *buf_p = ddrmm->mem_base + p; + rc = HH_OK; + } + } + MTL_TRACE1("}THH_ddrmm_alloc_bound: buf="U64_FMT"\n", (u_int64_t)*buf_p); + logIfErr("THH_ddrmm_alloc_bound") + return rc; +} /* THH_ddrmm_alloc_bound */ + + +/************************************************************************/ +HH_ret_t THH_ddrmm_free( + THH_ddrmm_t ddrmm, /* IN */ + MT_phys_addr_t buf, /* IN */ + MT_size_t size /* IN */ +) +{ + HH_ret_t rc = HH_OK; + MT_size_t log2sz = ceil_log2(size); + MTL_TRACE1("{THH_ddrmm_free: ddrmm=%p, buf="U64_FMT", sz="SIZE_T_FMT"\n", + ddrmm, (u_int64_t)buf, size); + MOSAL_mutex_acq_ui(&ddrmm->mtx); + extbuddy_free(ddrmm->xb, (u_int32_t)(buf - ddrmm->mem_base), (u_int8_t)log2sz); + MOSAL_mutex_rel(&ddrmm->mtx); + MTL_TRACE1("}THH_ddrmm_free\n"); + logIfErr("THH_ddrmm_free") + return rc; +} /* THH_ddrmm_free */ + + +/************************************************************************/ +HH_ret_t THH_ddrmm_query( + THH_ddrmm_t ddrmm, /* IN */ + u_int8_t align_shift, /* IN */ + VAPI_size_t* total_mem, /* OUT */ + VAPI_size_t* free_mem, /* OUT */ + VAPI_size_t* largest_chunk, /* OUT */ + VAPI_phy_addr_t* largest_free_addr_p /* OUT */ +) +{ + HH_ret_t rc = HH_OK; + int log2_sz; + MTL_TRACE1("{THH_ddrmm_query: ddrmm=%p, shift=%d\n", ddrmm, align_shift); + *total_mem = ddrmm->mem_sz; + *free_mem = extbuddy_total_available(ddrmm->xb); + log2_sz = extbuddy_log2_max_available(ddrmm->xb); + *largest_chunk = 0; + *largest_free_addr_p = ddrmm->mem_base + ddrmm->mem_sz; /* like null */ + if (log2_sz >= align_shift) + { + u_int32_t p; + *largest_chunk = (VAPI_size_t)1 << log2_sz; +/*** warning C4242: 'function' : conversion from 'int' to 'u_int8_t', possible loss of data ***/ + extbuddy_query_chunks(ddrmm->xb, (u_int8_t)log2_sz, 1, &p); + *largest_free_addr_p = ddrmm->mem_base + p; + } + MTL_TRACE1("}THH_ddrmm_query: total="U64_FMT", free="U64_FMT", lc="U64_FMT", p="U64_FMT"\n", + *total_mem, *free_mem, + *largest_chunk, (VAPI_phy_addr_t)*largest_free_addr_p); + logIfErr("THH_ddrmm_query") + return rc; +} /* THH_ddrmm_query */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.h new file mode 100644 index 00000000..259a2257 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/ddrmm/tddrmm.h @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(_TDDRM__H) +#define _TDDRM__H + +#include +#include +#include +#include +#include + + +#define THH_DDRMM_INVALID_HANDLE ((THH_ddrmm_t)0) +#define THH_DDRMM_INVALID_SZ ((MT_size_t)-1) +#define THH_DDRMM_INVALID_PHYS_ADDR ((MT_phys_addr_t)-1) + + +/************************************************************************ + * Function: THH_ddrmm_create + * + * Arguments: + * mem_base - Physical address base for DDR memory + * mem_sz - Size in bytes of DDR memory 1 + * ddrmm_p - Created object handle + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * HH_EAGAIN - Not enough resources for creating this object + * + * Description: + * Create DDR memory management object context. + */ +extern HH_ret_t THH_ddrmm_create( + MT_phys_addr_t mem_base, /* IN */ + MT_size_t mem_sz, /* IN */ + THH_ddrmm_t* ddrmm_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_ddrmm_destroy + * + * Arguments: + * ddrmm - The handle of the object to destroy + * + * Returns: + * HH_OK HH_EINVAL - No such object + * + * Description: + * Free given object resources. + */ +extern HH_ret_t THH_ddrmm_destroy(THH_ddrmm_t ddrmm /* IN */); + + +/************************************************************************ + * Function: THH_ddrmm_reserve + * + * Arguments: + * ddrmm + * addr - Physical address of reserved area + * size - Byte size of the reserved area + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters (Given area is beyond DDR memory space) + * or called after some allocation done. + * + * Description: + * Can be used only before any THH_ddrmm_alloc...() calls. + * Some areas in the attached DDR memory are reserved and may not be + * allocated by HCA driver or an application (e.g. firmware reserved + * areas). This function allows the HCA driver to explicitly define a + * memory space to exclude of the THH_ddrmm dynamic allocation. + */ +extern HH_ret_t THH_ddrmm_reserve ( + THH_ddrmm_t ddrmm, /* IN */ + MT_phys_addr_t addr, /* IN */ + MT_size_t size /* IN */ +); + + +/************************************************************************ + * Function: THH_ddrmm_alloc_sz_aligned + * + * Arguments: + * ddrmm - THH_ddrmm context + * num_o_chunks - The number of chunks to allocate (size of arrays below) + * chunks_sizes - An array of num_o_chunks sizes (log2) array. + * One for each chunk. + * chunks_addrs - An array of num_o_chunks addresses allocated + * for the chunks based on given size/alignment + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters (NULL...) + * HH_EAGAIN - No resources available to match all chunks requirements + * + * Description: + * HCA resources which are allocated during HCA initialization are all + * required to be size aligned (context tables etc.). By providing all + * those memory requirement at once this function allows the THH_ddrmm + * to efficiently allocate the attached DDR memory resources under the + * alignment constrains. + */ +extern HH_ret_t THH_ddrmm_alloc_sz_aligned( + THH_ddrmm_t ddrmm, /* IN */ + MT_size_t num_o_chunks, /* IN */ + MT_size_t* chunks_log2_sizes, /* IN */ + MT_phys_addr_t* chunks_addrs /* OUT */ +); + + +/************************************************************************ + * Function: THH_ddrmm_alloc + * + * Arguments: + * ddrmm - The object handle + * size - Size in bytes of memory chunk required + * align_shift - Alignment shift of chunk (log2 of alignment value) + * buf_p - The returned allocated buffer physical address + * + * Returns: + * HH_OK + * HH_EINVAL - Invalud object handle + * HH_EAGAIN - Not enough resources for required allocation + * + * Description: + * Allocate a physically contiguous memory chunk in DDR memory which + * follows requirement of size and alignment. + */ +extern HH_ret_t THH_ddrmm_alloc( + THH_ddrmm_t ddrmm, /* IN */ + MT_size_t size, /* IN */ + u_int8_t align_shift, /* IN */ + MT_phys_addr_t* buf_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_ddrmm_alloc_bound + * + * Arguments: + * ddrmm - The object handle + * size - Size in bytes of memory chunk required + * align_shift - Alignment shift of chunk (log2 of alignment value) + * area_start - Start of area where allocation is restricted to. + * area_size - Size of area where allocation is restricted to. + * buf_p - The returned allocated buffer physical address + * + * Returns: + * HH_OK + * HH_EINVAL - Invalud object handle + * HH_EAGAIN - Not enough resources for required allocation + * + * Description: + * Allocate a physically contiguous memory chunk in DDR memory which + * follows requirement of size and alignment. + */ +extern HH_ret_t THH_ddrmm_alloc_bound( + THH_ddrmm_t ddrmm, /* IN */ + MT_size_t size, /* IN */ + u_int8_t align_shift, /* IN */ + MT_phys_addr_t area_start, /* IN */ + MT_phys_addr_t area_size, /* IN */ + MT_phys_addr_t* buf_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_ddrmm_free + * + * Arguments: + * ddrmm - The object handle + * buf - Exact address of buffer as given in allocation + * size - Original size (in bytes)of buffer as given in allocation + * + * Returns: + * HH_OK + * HH_EINVAL - Given handle in unknown or given address is not + * an address returned by THH_ddrmm_alloc + * (or THH_ddrmm_alloc_sz_aligned) + * + * Description: + * Free a memory chunk allocated by THH_ddrmm_alloc. + */ +extern HH_ret_t THH_ddrmm_free( + THH_ddrmm_t ddrmm, /* IN */ + MT_phys_addr_t buf, /* IN */ + MT_size_t size /* IN */ +); + + +/************************************************************************ + * Function: THH_ddrmm_query + * + * Arguments: + * ddrmm - The object handle + * align_shift - Alignment requirements for returned largest_chunk + * total_mem - Total byte count of memory managed by this object + * free_mem - Total byte count of free memory + * largest_chunk - Largest chunk possible to allocate with given + * align_shift requirements + * largest_free_addr_p - Address of the refered largest chunk + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid handle + * + * Description: + * Query ddrmm object for allocation capabilities with given alignment + * contrains (use 0 if alignment is not needed). This is useful in case + * one wishes to get a hint from the object on the amount to request. + */ +extern HH_ret_t THH_ddrmm_query( + THH_ddrmm_t ddrmm, /* IN */ + u_int8_t align_shift, /* IN */ + VAPI_size_t* total_mem, /* OUT */ + VAPI_size_t* free_mem, /* OUT */ + VAPI_size_t* largest_chunk, /* OUT */ + VAPI_phy_addr_t* largest_free_addr_p /* OUT */ +); + + +#endif /* _TDDRM__H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/event_irqh.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/event_irqh.c new file mode 100644 index 00000000..58faac79 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/event_irqh.c @@ -0,0 +1,693 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include +#include + +#include +#include + +//#define MTPERF +#include + +#ifdef THH_CMD_TIME_TRACK +u_int64_t THH_eventp_last_cmdif_interrupt; +#endif + +/*================ macro definitions ===============================================*/ + +/*================ type definitions ================================================*/ + + +typedef struct eq_entry_st { + u_int8_t event_type; + tavor_if_port_event_subtype_t event_sub_type; + u_int32_t event_data[6]; /* Delivers auxiliary data to handle event. - 24 bytes */ +}eq_entry_t; + + +/*================ global variables definitions ====================================*/ + +MTPERF_NEW_SEGMENT(interupt_segment,100000); +MTPERF_NEW_SEGMENT(inter2dpc_segment,100000); +MTPERF_NEW_SEGMENT(dpc_segment,100000); +MTPERF_NEW_SEGMENT(part_of_DPC_segment,100000); + +/*================ static functions prototypes =====================================*/ + +inline static MT_bool read_eq_entry(u_int32_t *buff_p, + EQ_type_t eq_type, + eq_entry_t *eqe_p); + + + +inline static HH_ret_t move_eqe_to_hw(EQP_eq_entry_t *eqe_p, + u_int32_t *eqe_buff_p); /* pointer to start of EQE buffer */ + +static void handle_eqe_ib_events(EQP_eq_entry_t *eqe_p, + eq_entry_t *eq_data_p); +/*================ global functions definitions ====================================*/ + + + +/************************************************************************ + * Function: thh_intr_handler + * + + Arguments: + eventp - handler to eventp object + isr_ctx1 & isr_ctx2 - ignored + + + Description: + + Clear_INT(); // De-assert INT pin (write to Clr_INT register) + ECR = Read_ECR(); // read ECR register + foreach (EQN set in ECR) { + Schedule_EQ_handler(EQN); // schedule appropriate handler + Clear_ECR; // Clear ECR bits that were taken care of + } + + + +************************************************************************/ + + +BOOLEAN thh_intr_handler(MT_ulong_ptr_t eventp_dpc, void* isr_ctx1, void* isr_ctx2) +{ + THH_eventp_t eventp = (THH_eventp_t)eventp_dpc; + u_int32_t ecr[2]; /* 0 - low 1- high */ + int i,j, eq_num; + EQP_eq_entry_t *event_entry; + + + /* start measurements */ + + //MTPERF_TIME_START(interupt_segment); + + /* De-assert INT pin (write to Clr_INT register) */ + MOSAL_MMAP_IO_WRITE_DWORD(eventp->intr_clr_reg, eventp->intr_clr_mask); + + /*MTL_DEBUG1("%s: ECR register=0x%08x\n", __func__, eventp->ecr_h_base );*/ + + /* read ECR register */ + ecr[0] = MOSAL_be32_to_cpu(MOSAL_MMAP_IO_READ_DWORD(eventp->ecr_l_base)); +#if EQP_ECR_USED>1 + ecr[1] = MOSAL_be32_to_cpu(MOSAL_MMAP_IO_READ_DWORD(eventp->ecr_h_base)); + MTL_ERROR1("%s: READ ECR HIGH\n", __func__); +#endif + + /* No need to check anything of all bits are zero */ + if (ecr[0] == 0) { + return FALSE; + } + if (ecr[0] == 0xffffffff) { + /* master abort */ + /* verify that it indeed master abort by reading ECR high too */ + ecr[1] = MOSAL_be32_to_cpu(MOSAL_MMAP_IO_READ_DWORD(eventp->ecr_h_base)); + if (ecr[1] == 0xffffffff) { + /* notify hob for fatal error - must be done from DPC level */ + MTL_ERROR1("%s: THH_FATAL_MASTER_ABORT: on ECR[0 & 1] \n", __func__); + if (eventp->have_fatal) { + /* no need to do anything since we already in fatal state */ + MTL_ERROR1("%s: THH_FATAL_MASTER_ABORT recieved when we are in FATAL state - NOP \n", __func__); + } + else { /* a new fatal error */ + eventp->have_fatal = TRUE; + eventp->fatal_type = THH_FATAL_MASTER_ABORT; + MOSAL_DPC_schedule(&(eventp->fatal_error_dpc)); /* schedule DPC to notify hob on master abort */ + } + return TRUE; + } + MTL_ERROR1("%s: THH_FATAL_MASTER_ABORT: on ECR[0] only - ignoring \n", __func__); + } + + /* work on both words */ + for(i=0; imax_eq_num_used; j++) { /* foreach bit */ + if((ecr[i] & BITS32(j,1))) { /* if the j-th bit is set */ + eq_num = (i << 5) + j; + event_entry = &(eventp->eq_table[eq_num]); + MOSAL_spinlock_irq_lock(&(event_entry->state_lock)); + if (IS_EQ_VALID_P(event_entry)) { +#if !defined(VXWORKS_OS) /* can not use printf in isr */ + MTL_DEBUG1("%s: ECR_LOW=0x%08x ECR_HIGH=not read, eq_num = %d \n", __func__, ecr[0], eq_num); +#endif + /* check if this is catastrophic error */ + if (eq_num == EQP_CATAS_ERR_EQN) { + MTL_ERROR1("%s: THH_FATAL_ERROR recieved on EQP_CATAS_ERR_EQN (%d) \n", + __func__, EQP_CATAS_ERR_EQN); + if (eventp->have_fatal) { + /* no need to do anything since we already in fatal state */ + MTL_ERROR1("%s: THH_FATAL_ERROR recieved when we are in FATAL state - NOP \n", __func__); + } + else { /* a new fatal error */ + eventp->have_fatal = TRUE; + eventp->fatal_type = THH_FATAL_EVENT; + MOSAL_DPC_schedule(&(eventp->fatal_error_dpc)); /* schedule DPC to notify hob on fatal error */ + } + } + else { + if (MOSAL_DPC_schedule(&event_entry->polling_dpc)){ /* schedule DPC to poll EQ */ + #if !defined(DPC_IS_DIRECT_CALL) + MOSAL_atomic_inc32(&event_entry->dpc_cntr); + #else + event_entry->dpc_cntr++; /* Monitor number of outstanding DPCs */ + #endif +#ifdef THH_CMD_TIME_TRACK + if (eq_num == EQP_CMD_IF_EQN) { + THH_eventp_last_cmdif_interrupt= MOSAL_get_time_counter(); + } +#endif + } + } + } + else { + if (eventp->have_fatal) { + MTL_DEBUG1("%s: in fatal state: got event to EQ=%d but it is not setup; eq state=%d\n", __func__, + i*32 + j, event_entry->res_state); + } + else { + MTL_ERROR1("%s: Internal error: got event to EQ=%d but it is not setup; eq state=%d\n", __func__, + i*32 + j, event_entry->res_state); + } + + } + MOSAL_spinlock_unlock(&(event_entry->state_lock)); + + /* clear ecr bits that were taken care of - already in big-endian */ + if (event_entry->clr_ecr_addr) { + MOSAL_MMAP_IO_WRITE_DWORD(event_entry->clr_ecr_addr, event_entry->clr_ecr_mask); + } + else { + MTL_ERROR1(MT_FLFMT("event_entry->clr_ecr_addr=null\n")); + } + + if (eq_num == EQP_CATAS_ERR_EQN) { + goto end_intr; + } + } /* the j-th bit is set */ + } /* foreach bit */ + } /* foreach dword */ + +end_intr: + return TRUE; +} /* thh_intr_handler() */ + +/* inline functions must be placed BEFORE their call */ +/************************************************************************************************/ +inline static MT_bool read_eq_entry(u_int32_t *eqe_buff_p, + EQ_type_t eq_type, + eq_entry_t *eqe_p) +{ + u_int32_t tmp_eq_data[8] = {0,0,0,0,0,0,0,0}; /* entry size is 32 bytes */ + unsigned int i; + + + MTL_DEBUG1("%s: eqe_buff= %x %x %x %x %x %x %x %x \n", __func__, *eqe_buff_p, + *(eqe_buff_p+1), *(eqe_buff_p+2), *(eqe_buff_p+3),*(eqe_buff_p+4), + *(eqe_buff_p+5), *(eqe_buff_p+6), *(eqe_buff_p+7)); + /* first extract the owner & event_type only*/ + +#ifdef EQS_CMD_IN_DDR + tmp_eq_data[EQE_OWNER_OFFSET] = MOSAL_be32_to_cpu(MOSAL_MMAP_IO_READ_DWORD((eqe_buff_p+EQE_OWNER_OFFSET))); + /* owner */ + if ( EQE_SW_OWNER != MT_EXTRACT_ARRAY32(tmp_eq_data, MT_BIT_OFFSET(tavorprm_event_queue_entry_st, owner), + MT_BIT_SIZE(tavorprm_event_queue_entry_st, owner))) { + MTL_DEBUG1("%s: EQE_HW_OWNER\n", __func__); + return FALSE; + } +#else + /* if EQE not in SW ownership - end of EQEs to handle */ + if ((((u_int8_t*)eqe_buff_p)[EQE_OWNER_BYTE_OFFSET] & EQE_HW_OWNER) != EQE_SW_OWNER) { + MTL_DEBUG1("%s: EQE_HW_OWNER\n", __func__); + return FALSE; + } +#endif + + /* event_type & event_sub_type */ +#ifdef EQS_CMD_IN_DDR + tmp_eq_data[EQE_EVENT_TYPE_OFFSET] = MOSAL_be32_to_cpu(MOSAL_MMAP_IO_READ_DWORD((eqe_buff_p+EQE_EVENT_TYPE_OFFSET))); +#else + tmp_eq_data[EQE_EVENT_TYPE_OFFSET] = MOSAL_be32_to_cpu(*(eqe_buff_p+EQE_EVENT_TYPE_OFFSET)); +#endif + eqe_p->event_type = MT_EXTRACT_ARRAY32(tmp_eq_data, MT_BIT_OFFSET(tavorprm_event_queue_entry_st, event_type), + MT_BIT_SIZE(tavorprm_event_queue_entry_st, event_type)); + + eqe_p->event_sub_type = (tavor_if_port_event_subtype_t) + MT_EXTRACT_ARRAY32(tmp_eq_data, MT_BIT_OFFSET(tavorprm_event_queue_entry_st, event_sub_type), + MT_BIT_SIZE(tavorprm_event_queue_entry_st, event_sub_type)); + + /* extract of data will be done according to the event type */ + + if(eq_type != EQP_CMD_IF_EVENT) { + for(i=EQE_DATA_OFFSET; i< EQE_OWNER_OFFSET; i++) { +#ifdef EQS_CMD_IN_DDR + eqe_p->event_data[i-EQE_DATA_OFFSET] = MOSAL_be32_to_cpu(MOSAL_MMAP_IO_READ_DWORD((eqe_buff_p + i))); +#else + eqe_p->event_data[i-EQE_DATA_OFFSET] = MOSAL_be32_to_cpu(*(eqe_buff_p + i)); +#endif + } + } + else { /* need to leave as big-endien for the cmd_if callback */ +#ifdef EQS_CMD_IN_DDR + MOSAL_MMAP_IO_READ_BUF_DWORD(eqe_buff_p+EQE_DATA_OFFSET,eqe_p->event_data,EQE_DATA_BYTE_SIZE/4); +#else + memcpy(eqe_p->event_data, eqe_buff_p+EQE_DATA_OFFSET, EQE_DATA_BYTE_SIZE); +#endif + } + FUNC_OUT; + return TRUE; +} + +/************************************************************************************************/ + +inline static HH_ret_t move_eqe_to_hw(EQP_eq_entry_t *eqe_p, + u_int32_t *eqe_buff_p) /* pointer to start of EQE buffer */ +{ + + HH_ret_t ret = HH_OK; + + /* move EQE to HW ownership */ + +#ifdef EQS_CMD_IN_DDR + MOSAL_MMAP_IO_WRITE_DWORD((eqe_buff_p+EQE_OWNER_OFFSET), 0xffffffff); +#else + *(eqe_buff_p+EQE_OWNER_OFFSET) = 0xffffffff; /* no need for cpu to be32 sine all is ff */ +#endif + + /* update consumer index the FW take care to the cyclic buffer update */ + ret = THH_uar_eq_cmd(eqe_p->eventp_p->kar, TAVOR_IF_UAR_EQ_INC_CI, eqe_p->eqn, 0); + if(ret != HH_OK) { + MTL_ERROR1("%s: Internal error: THH_uar_eq_cmd failed ret=%d.\n", __func__,ret); + } + MT_RETURN( ret); +} + + +/************************************************************************ + * Function: eq_polling_dpc + * + + Arguments: + + Returns: + HH_OK + HH_EINVAL -Invalid parameters + + Description: + + lock spinlock of EQ + clear ECR of this EQ + While (EQE[Consumer_indx].Owner == SW) { + consume_entry - call EQ handler; // remove entry from the queue + EQE[Consumer_indx++].Owner = HW; // mark entry to the Hardware ownership for next time around + consumer_indx &= MASK; // wrap around index + } + update_consumer_index(EQ,consumer_indx); // update consumer index in HCA via UAR + subscribe_for_event(EQ); // subscribe for event for next time + unlock EQ spinlock + +************************************************************************/ + +void eq_polling_dpc(DPC_CONTEXT_t *dpc_obj_p) +{ + EQP_eq_entry_t *eqe_p = (EQP_eq_entry_t *)dpc_obj_p->func_ctx; /* EQ context entry (not the EQE) */ + u_int32_t cons_indx; + eq_entry_t eqe; + u_int32_t *eqe_buff_p; + void* cyc_buff; + HH_ret_t ret; + + + //MTPERF_TIME_END(inter2dpc_segment); + //MTPERF_TIME_START(dpc_segment); + + FUNC_IN; + + + if (eqe_p->eventp_p->have_fatal){ + goto dpc_done1; + } + +#if !defined(DPC_IS_DIRECT_CALL) && !defined(IMPROVE_EVENT_HANDLING) + // the following fragment is unnecessary, because THH_eventp_teardown_eq() will wait for all DPCs to exit. + /* In Darwin, the DPC is a direct function call, so this is guaranteed to be + * checked already */ + MOSAL_spinlock_irq_lock(&eqe_p->state_lock); + if (!IS_EQ_VALID_P(eqe_p)) { /* Make sure the EQ is still in use (was not torn down) */ + MOSAL_spinlock_unlock(&eqe_p->state_lock); + MTL_ERROR1(MT_FLFMT("DPC invoked for an EQ not in use.\n")); + goto dpc_done1; + } + MOSAL_spinlock_unlock(&eqe_p->state_lock); +#endif + + + /* CLEAR ECR bit of this EQ since it might be set again between ISR & DPC */ + /* moved to comment since any write to the Tavor registers harm the performance */ + /* MOSAL_MMAP_IO_WRITE_DWORD(eqe_p->clr_ecr_addr, eqe_p->clr_ecr_mask); */ + + cyc_buff = eqe_p->eq_buff; + /* EQEs polling loop */ +/* need to protect only in case simultanuous DPC can be called at the same time + currently will be set only for windows */ +#if defined( SIMULTANUOUS_DPC ) && !defined( DPC_IS_DIRECT_CALL ) + MOSAL_spinlock_dpc_lock(&eqe_p->dpc_lock); /* protect consumer index access */ +#endif + + for(cons_indx = eqe_p->cons_indx;/* break done inside the loop*/ ; + cons_indx = (cons_indx == (eqe_p->eq_buff_entry_num - 1)) ? 0 : cons_indx+1) { + + MTL_DEBUG1("%s: cons_indx=%d cyc_buff=%p\n", __func__,cons_indx, cyc_buff); + eqe_buff_p = ((u_int32_t*)cyc_buff) + EQE_DWORD_SIZE*cons_indx ; /* TK: can improve with << of EQE logsize later */ + /* read the EQE and change to machine endianess */ + if (read_eq_entry(eqe_buff_p, eqe_p->eq_type, &eqe) == FALSE){ + MTL_DEBUG1("%s: entry not in SW ownership, cons_indx=%d cyc_buff=%p\n", __func__,cons_indx, cyc_buff); + break; /* no more EQEs to poll */ + } + + /* if we are here then we have EQE in SW ownership */ + + /* first return EQE to HW ownership and update consumer index */ + if((ret = move_eqe_to_hw(eqe_p, eqe_buff_p)) != HH_OK) { + MTL_ERROR1("%s: failed moving EQE to HW \n", __func__); + goto dpc_done2; + } + /* TK: maybe this should be moved before the move to hw?? + now check that we don't have overrun */ + + if(eqe.event_type == TAVOR_IF_EV_TYPE_OVERRUN) { + MTL_ERROR1("%s: EQ OVERRUN eqn=%d\n", __func__, eqe_p->eqn); + /* need to notify the hob for the fatal error */ + eqe_p->eventp_p->have_fatal = TRUE; + THH_hob_fatal_error(eqe_p->eventp_p->hob, THH_FATAL_EQ_OVF, VAPI_CATAS_ERR_EQ_OVERFLOW); + goto dpc_done2; + } + + MTL_DEBUG1("%s: EQ type =%d\n", __func__, eqe_p->eq_type); + switch (eqe_p->eq_type) { + /* handle CMD_IF EQ */ + case EQP_CMD_IF_EVENT: + if(eqe.event_type != TAVOR_IF_EV_TYPE_CMD_IF_COMP) { + MTL_ERROR1("%s: Internal error: wrong EQ type to CMD_IF EQ. EQ type =%d\n", __func__, + eqe.event_type); + } + else { + /* notify cmd_if module */ + THH_cmd_eventh(eqe_p->eventp_p->cmd_if, (u_int32_t*)eqe.event_data); + } + break; + /* IB events */ + case EQP_IB_EVENT: + handle_eqe_ib_events(eqe_p,&eqe); + break; + + /* Completion Events */ + case EQP_CQ_COMP_EVENT: + /* sanity check */ + if(eqe.event_type != TAVOR_IF_EV_TYPE_CQ_COMP) { + MTL_ERROR1("%s: Internal error: wrong EQ type to COMPLETION EVENTS EQ. EQ type =%d\n", __func__, + eqe.event_type); + } + else { + u_int32_t cqnum=0; + cqnum = MT_EXTRACT_ARRAY32(eqe.event_data, MT_BIT_OFFSET(tavorprm_completion_event_st, cqn), + MT_BIT_SIZE(tavorprm_completion_event_st, cqn)); + MTL_DEBUG2("%s: Got completion event on CQN=%d. Going to DIS-ARM CQ\n", __func__,cqnum); + /* disarm CQ */ + ret = THH_uar_eq_cmd(eqe_p->eventp_p->kar, TAVOR_IF_UAR_EQ_DISARM_CQ, eqe_p->eqn, cqnum); + + /* call handler */ + + eqe_p->handler.comp_event_h(eqe_p->eventp_p->hh_hca_hndl, (HH_cq_hndl_t)cqnum, + eqe_p->priv_context); + } + break; + + case EQP_CATAS_ERR_EVENT: + MTL_ERROR1("%s: Internal error: got to EQP_CATAS_ERR_EVENT in regular DPC \n", __func__); + goto dpc_done2; + break; + case EQP_MLX_EVENT: + break; + } + } /* polling EQE for loop */ + + /* re-arm EQ for all EQs */ + ret = THH_uar_eq_cmd(eqe_p->eventp_p->kar, TAVOR_IF_UAR_EQ_INT_ARM, eqe_p->eqn, 0); + if(ret != HH_OK) { + MTL_ERROR1("%s: Internal error: THH_uar_eq_cmd failed ret=%d.\n", __func__,ret); + } + + /* update consumer index of EQ */ + eqe_p->cons_indx = cons_indx; + +dpc_done2: +#if defined( SIMULTANUOUS_DPC ) && !defined( DPC_IS_DIRECT_CALL ) + MOSAL_spinlock_unlock(&eqe_p->dpc_lock); +#endif + +dpc_done1: +#if defined(DPC_IS_DIRECT_CALL) + /* In darwin, this spinlock is locked all throughout the intr_handler which + * directly calls this function */ + eqe_p->dpc_cntr--; /* signal DPC done (for cleanup) */ +#else + MOSAL_atomic_dec32(&eqe_p->dpc_cntr); +#endif + + FUNC_OUT; + +#ifdef MTPERF + if (eqe_p->eq_type == EQP_CQ_COMP_EVENT) { + /*MTPERF_TIME_END(dpc_segment)*/; + } +#endif +} + +/************************************************************************************************/ + + +static inline VAPI_event_record_type_t tavor2vapi_qp_error_type(u_int8_t event_type, MT_bool is_qp) +{ + + VAPI_event_record_type_t vapi_event_type; + switch(event_type) { + case TAVOR_IF_EV_TYPE_PATH_MIG: + vapi_event_type = is_qp ? VAPI_QP_PATH_MIGRATED : VAPI_EEC_PATH_MIGRATED; + break; + case TAVOR_IF_EV_TYPE_SEND_Q_DRAINED: + vapi_event_type = VAPI_SEND_QUEUE_DRAINED; + break; + case TAVOR_IF_EV_TYPE_PATH_MIG_FAIL: + vapi_event_type = VAPI_PATH_MIG_REQ_ERROR; + break; + case TAVOR_IF_EV_TYPE_COMM_EST: + vapi_event_type = is_qp ? VAPI_QP_COMM_ESTABLISHED : VAPI_EEC_COMM_ESTABLISHED; + break; + case TAVOR_IF_EV_TYPE_LOCAL_WQ_CATAS_ERR: + vapi_event_type = is_qp ? VAPI_LOCAL_WQ_CATASTROPHIC_ERROR : VAPI_LOCAL_EEC_CATASTROPHIC_ERROR; + break; + case TAVOR_IF_EV_TYPE_LOCAL_WQ_INVALID_REQ_ERR: + vapi_event_type = VAPI_LOCAL_WQ_INV_REQUEST_ERROR; + break; + case TAVOR_IF_EV_TYPE_LOCAL_WQ_ACCESS_VIOL_ERR: + vapi_event_type = VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR; + break; + case TAVOR_IF_EV_TYPE_SRQ_QP_LAST_WQE_REACHED: + vapi_event_type = VAPI_RECEIVE_QUEUE_DRAINED; + break; + case TAVOR_IF_EV_TYPE_LOCAL_SRQ_CATAS_ERR: + vapi_event_type = VAPI_SRQ_CATASTROPHIC_ERROR; + break; + + default: + MTL_ERROR1("%s: Unknown event type = %d\n", __func__,event_type); + vapi_event_type = VAPI_LOCAL_CATASTROPHIC_ERROR; + } + return vapi_event_type; +} + + + +/************************************************************************************************/ +static void handle_eqe_ib_events(EQP_eq_entry_t *eqe_p, + eq_entry_t *eq_data_p) +{ + HH_event_record_t event_record; + u_int8_t syndrome=0; + VAPI_event_syndrome_t vapi_syndrome = VAPI_EV_SYNDROME_NONE; + event_record.syndrome = VAPI_EV_SYNDROME_NONE; + + FUNC_IN; + switch(eq_data_p->event_type) { +#if 0 /* TK: currently not supported by Tavor */ + case TAVOR_IF_EV_TYPE_LOCAL_EE_CATAS_ERR: +#endif + /* QP/EEC errors */ + case TAVOR_IF_EV_TYPE_PATH_MIG_FAIL: + case TAVOR_IF_EV_TYPE_PATH_MIG: + case TAVOR_IF_EV_TYPE_COMM_EST: + case TAVOR_IF_EV_TYPE_LOCAL_WQ_CATAS_ERR: + case TAVOR_IF_EV_TYPE_LOCAL_WQ_INVALID_REQ_ERR: + case TAVOR_IF_EV_TYPE_LOCAL_WQ_ACCESS_VIOL_ERR: + case TAVOR_IF_EV_TYPE_SEND_Q_DRAINED: + case TAVOR_IF_EV_TYPE_SRQ_QP_LAST_WQE_REACHED: /* this is QP event */ + { + u_int32_t qpn=0; + u_int32_t is_qp=1; + + qpn = MT_EXTRACT_ARRAY32(eq_data_p->event_data, MT_BIT_OFFSET(tavorprm_qp_ee_event_st, qpn_een), + MT_BIT_SIZE(tavorprm_qp_ee_event_st, qpn_een)); + + is_qp = !(MT_EXTRACT_ARRAY32(eq_data_p->event_data, MT_BIT_OFFSET(tavorprm_qp_ee_event_st, e_q), + MT_BIT_SIZE(tavorprm_qp_ee_event_st, e_q))); + + if (is_qp) { + event_record.event_modifier.qpn = qpn; + /* need to translate to VAPI_event_record_type_t */ +/*** warning C4242: 'function' : conversion from 'u_int32_t' to 'MT_bool', possible loss of data ***/ + event_record.etype = tavor2vapi_qp_error_type(eq_data_p->event_type,(MT_bool)is_qp); + } + else { /* EEC is not supported now */ + MTL_ERROR1("%s: Internal error: is_eq = 0 but EEC not supported. eqn=%d\n", __func__, + eqe_p->eqn); + } + break; + } + + case TAVOR_IF_EV_TYPE_LOCAL_SRQ_CATAS_ERR: + { + event_record.event_modifier.srq= + MT_EXTRACT_ARRAY32(eq_data_p->event_data, + MT_BIT_OFFSET(tavorprm_qp_ee_event_st, qpn_een), + MT_BIT_SIZE(tavorprm_qp_ee_event_st, qpn_een)); + event_record.etype = tavor2vapi_qp_error_type(eq_data_p->event_type,FALSE); + break; + } + + /* IB - affiliated errors CQ */ + case TAVOR_IF_EV_TYPE_CQ_ERR: + { + u_int32_t cqnum=0; + + cqnum = MT_EXTRACT_ARRAY32(eq_data_p->event_data, MT_BIT_OFFSET(tavorprm_completion_queue_error_st, cqn), + MT_BIT_SIZE(tavorprm_completion_queue_error_st, cqn)); + syndrome = MT_EXTRACT_ARRAY32(eq_data_p->event_data, MT_BIT_OFFSET(tavorprm_completion_queue_error_st, syndrome), + MT_BIT_SIZE(tavorprm_completion_queue_error_st, syndrome)); + event_record.etype = VAPI_CQ_ERROR; + event_record.event_modifier.cq = cqnum; + event_record.syndrome = (syndrome == TAVOR_IF_CQ_OVERRUN) ? VAPI_CQ_ERR_OVERRUN : + ((syndrome == TAVOR_IF_CQ_ACCSS_VIOL_ERR) ? VAPI_CQ_ERR_ACCESS_VIOL : + VAPI_EV_SYNDROME_NONE); + MTL_ERROR1("%s: CQ error on CQ number= %d syndrome is %s (%d)\n", __func__,cqnum, + VAPI_event_syndrome_sym(event_record.syndrome), event_record.syndrome); + break; + } + /* Unaffiliated errors */ + case TAVOR_IF_EV_TYPE_LOCAL_CATAS_ERR: + { + MTL_ERROR1("%s: CATASTROPHIC ERROR - should not be in this EQ: \n", __func__); + MTL_ERROR1("CATASTROPHIC ERROR: data: %x %x %x %x %x %x \n", *(eq_data_p->event_data), + *(eq_data_p->event_data+1), *(eq_data_p->event_data+2), *(eq_data_p->event_data+3), + *(eq_data_p->event_data+4), *(eq_data_p->event_data+5)); + break; + } + case TAVOR_IF_EV_TYPE_PORT_ERR: + { + IB_port_t port; + port = MT_EXTRACT_ARRAY32(eq_data_p->event_data,MT_BIT_OFFSET(tavorprm_port_state_change_st, p), + MT_BIT_SIZE(tavorprm_port_state_change_st, p)); + + if (eq_data_p->event_sub_type == TAVOR_IF_SUB_EV_PORT_DOWN) { + event_record.etype = VAPI_PORT_ERROR; + } + else if (eq_data_p->event_sub_type == TAVOR_IF_SUB_EV_PORT_UP) { + event_record.etype = VAPI_PORT_ACTIVE; + } + else { + MTL_ERROR1("%s: Wrong sub-type for Port event on port=%d sub_type=%d\n", __func__,port,eq_data_p->event_sub_type); + } + + event_record.event_modifier.port = port; + MTL_DEBUG1("%s: Port change event on port=%d sub_type=%d\n", __func__,port,eq_data_p->event_sub_type); + break; + } + default: + MTL_ERROR1("%s: Unsupported event type = %d\n", __func__,eq_data_p->event_type); + /* in case of catastrophic error - no call to upper layer but notify HOB to handle */ + MTL_ERROR1("CATASTROPHIC ERROR: data: %x %x %x %x %x %x \n", *(eq_data_p->event_data), + *(eq_data_p->event_data+1), *(eq_data_p->event_data+2), *(eq_data_p->event_data+3), + *(eq_data_p->event_data+4), *(eq_data_p->event_data+5)); + + THH_hob_fatal_error(eqe_p->eventp_p->hob, THH_FATAL_EVENT, vapi_syndrome); + /**** !!!! FUNCTION returns HERE !!!! */ + FUNC_OUT; + return; + } /* switch(eq_data_p->event_type) */ + + /* call the event callback */ + eqe_p->handler.ib_comp_event_h(eqe_p->eventp_p->hh_hca_hndl, &event_record, eqe_p->priv_context); + + FUNC_OUT; + +} + + +/************************************************************************************************/ + +void fatal_error_dpc(DPC_CONTEXT_t *dpc_obj_p) +{ + VAPI_event_syndrome_t syndrome; + + THH_eventp_t eventp = (THH_eventp_t)dpc_obj_p->func_ctx; /* eventp of this hob */ + FUNC_IN; + + if (eventp->fatal_type == THH_FATAL_MASTER_ABORT) { + syndrome = VAPI_CATAS_ERR_MASTER_ABORT; + MTL_DEBUG1("%s: VAPI_CATAS_ERR_MASTER_ABORT\n", __func__); + } + else { + syndrome = VAPI_CATAS_ERR_GENERAL; + MTL_DEBUG1("%s: VAPI_CATAS_ERR_GENERAL\n", __func__); + } + + THH_hob_fatal_error(eventp->hob, eventp->fatal_type, syndrome); + + FUNC_OUT; +} diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.c new file mode 100644 index 00000000..96a1d6df --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.c @@ -0,0 +1,1359 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#define MTPERF +#include + +#ifdef EQS_CMD_IN_DDR +#include +#endif + + +/*================ macro definitions ===============================================*/ + + +/*** error C4296: '<' : expression is always false ***/ +#define NOT_VALID_EQ_NUM(eq_num) ((eq_num) >= EQP_MAX_EQS) + + +/*================ type definitions ================================================*/ + +#define PHYS_EQ_MAX_SIZE 0x2000 + +/*================ global variables definitions ====================================*/ + + +MTPERF_EXTERN_SEGMENT(interupt_segment); +MTPERF_EXTERN_SEGMENT(dpc_segment); +MTPERF_EXTERN_SEGMENT(inter2dpc_segment); +MTPERF_EXTERN_SEGMENT(part_of_DPC_segment); + + +/*================ static functions prototypes =====================================*/ + +#ifdef MAX_DEBUG +static char * const eq_type_str(EQ_type_t eq_type) +{ + switch ( eq_type ) { + case EQP_CQ_COMP_EVENT: + return "EQP_CQ_COMP_EVENT"; + case EQP_IB_EVENT: + return "EQP_IB_EVENT"; + case EQP_CMD_IF_EVENT: + return "EQP_CMD_IF_EVENT"; + case EQP_MLX_EVENT: + return "EQP_MLX_EVENT"; + case EQP_CATAS_ERR_EVENT: + return "EQP_CATAS_ERR_EVENT"; + default: + return "***UNKNOWNN***"; + } +} +#endif + +#ifdef __WIN__ +#ifndef MAX_DEBUG +static char * const eq_type_str(EQ_type_t eq_type) { return NULL; } +#endif +#endif + +static THH_eqn_t insert_new_eq(THH_eventp_t eventp, + void* eventh, + void *priv_context, + MT_size_t max_outs_eqe, + EQ_type_t eq_type); + +static HH_ret_t map_eq(THH_eventp_t eventp, + THH_eqn_t eqn, + THH_eventp_mtmask_t tavor_mask); + +static void remove_eq(THH_eventp_t eventp, + THH_eqn_t eqn); + + +static HH_ret_t prepare_intr_resources(THH_eventp_t eventp); + +static HH_ret_t remove_intr_resources(THH_eventp_t eventp); + +static HH_ret_t add_catast_err_eq(THH_eventp_t eventp); + +/*================ external functions definitions ====================================*/ + + + +extern void eq_polling_dpc(DPC_CONTEXT_t *func_ctx); + +extern void fatal_error_dpc(DPC_CONTEXT_t *func_ctx); + +extern BOOLEAN thh_intr_handler(MT_ulong_ptr_t eventp, void* isr_ctx1, void* isr_ctx2); + + + + +/************************************************************************ + * Function: THH_eventp_create + * + + Arguments: + version_p - Version information + event_res_p - See 7.2.1 THH_eventp_res_t - Event processing resources on page 63 + cmd_if - Command interface object to use for EQ setup commands + kar - KAR object to use for EQ doorbells + eventp_p - Returned object handle + + Returns: + HH_OK + HH_EINVAL -Invalid parameters + HH_EAGAIN -Not enough resources to create object + HH_ERR - internal error + + Description: + Create THH_eventp object context. No EQs are set up until an event consumer registers + using one of the functions below. + + +************************************************************************/ + +HH_ret_t THH_eventp_create ( /*IN */ THH_hob_t hob, + /*IN */ THH_eventp_res_t *event_res_p, + /*IN */ THH_uar_t kar, + /*OUT*/ THH_eventp_t *eventp_p ) +{ + + THH_eventp_t new_eventp_p = NULL; + u_int32_t i; + HH_ret_t ret; + + FUNC_IN; + + + /* allocation of object structure */ + new_eventp_p = (THH_eventp_t)VMALLOC(sizeof(THH_eventp_internal_t)); + if (!new_eventp_p) { + MTL_ERROR4("%s: Cannot allocate EVENTP object.\n", __func__); + MT_RETURN(HH_EAGAIN); + } + + memset(new_eventp_p,0,sizeof(THH_eventp_internal_t)); + MOSAL_mutex_init(&new_eventp_p->mtx); + for (i= 0; i < EQP_MAX_EQS; i++) { + SET_EQ_FREE(new_eventp_p,i); + new_eventp_p->eq_table[i].eq_buff_entry_num= 0; + new_eventp_p->eq_table[i].alloc_mem_addr_p= 0; + if (MOSAL_spinlock_init(&(new_eventp_p->eq_table[i].state_lock)) != MT_OK){ + MTL_ERROR4("%s: Failed to initializing spinlocks.\n", __func__); + ret= HH_ERR; + goto err_free_mem; + } +#ifdef SIMULTANUOUS_DPC +#ifdef IMPROVE_EVENT_HANDLING + new_eventp_p->eq_table[i].dpc_lock = 0; +#else + if (MOSAL_spinlock_init(&(new_eventp_p->eq_table[i].dpc_lock)) != MT_OK){ + MTL_ERROR4("%s: Failed to initializing dpc_lock.\n", __func__); + ret= HH_ERR; + goto err_free_mem; + } +#endif +#endif + + } + + /* filling eventp structure */ + memcpy(&new_eventp_p->event_resources, event_res_p, sizeof(THH_eventp_res_t)); + new_eventp_p->max_eq_num_used = EQP_MIN_EQ_NUM; + new_eventp_p->fatal_type = THH_FATAL_NONE; + new_eventp_p->have_fatal = FALSE; + new_eventp_p->hob = hob; + if (THH_hob_get_cmd_if (hob, &new_eventp_p->cmd_if) != HH_OK){ + MTL_ERROR4("%s: Cannot get cmd_if object handle.\n", __func__); + ret = HH_ERR; + goto err_free_mem; + } + new_eventp_p->kar = kar; + if ((ret = THH_uar_get_index(new_eventp_p->kar, &new_eventp_p->kar_index)) != HH_OK){ /* the KAR */ + MTL_ERROR4("%s: Failed to THH_uar_get_index. ret=%d.\n", __func__,ret); + ret = HH_ERR; + goto err_free_mem; + } + if (THH_hob_get_ver_info(hob, &new_eventp_p->version) != HH_OK){ + MTL_ERROR4("%s: Cannot get version.\n", __func__); + ret = HH_ERR; + goto err_free_mem; + } + if (THH_hob_get_mrwm(hob, &new_eventp_p->mrwm_internal) != HH_OK){ + MTL_ERROR4("%s: Cannot get mrwm_internal.\n", __func__); + ret = HH_ERR; + goto err_free_mem; + } +#ifdef EQS_CMD_IN_DDR + if (THH_hob_get_ddrmm(hob, &new_eventp_p->ddrmm) != HH_OK){ + MTL_ERROR4("%s: Cannot get ddrmm.\n", __func__); + ret = HH_ERR; + goto err_free_mem; + } +#endif + if (THH_hob_get_hca_hndl(hob, &new_eventp_p->hh_hca_hndl) != HH_OK){ + MTL_ERROR4("%s: Cannot get HH_HCA_hndl.\n", __func__); + ret = HH_ERR; + goto err_free_mem; + } + + new_eventp_p->ctx_internal = MOSAL_get_kernel_prot_ctx(); + + /* init DPC of master abort & catastrophic error*/ + MOSAL_DPC_init(&new_eventp_p->fatal_error_dpc, fatal_error_dpc, (MT_ulong_ptr_t)new_eventp_p, + MOSAL_SINGLE_CTX); + + MTL_TRACE4("%s: SUCCESS to MOSAL_DPC_init the master_abort_dpc. \n", __func__); + + if (prepare_intr_resources(new_eventp_p) != HH_OK){ + MTL_ERROR4("%s: Cannot set interrupt resources.\n", __func__); + ret = HH_ERR; + goto err_free_mem; + } + + /* initialize value of CLR_ECR for all EQs */ + for ( i=EQP_MIN_EQ_NUM; ieq_table[i].clr_ecr_addr = new_eventp_p->clr_ecr_l_base; + } + else { + new_eventp_p->eq_table[i].clr_ecr_addr = new_eventp_p->clr_ecr_h_base; + } + new_eventp_p->eq_table[i].clr_ecr_mask = MOSAL_cpu_to_be32(1 << (i % 32)); + } + + /* setup the catastrophic error EQ - must be a separete EQ initialzed at the begining */ + add_catast_err_eq(new_eventp_p); + + *eventp_p = new_eventp_p; + MT_RETURN(HH_OK); + + + /* error handling cleanup */ +err_free_mem: + VFREE(new_eventp_p); + MT_RETURN(ret); + +} + +/************************************************************************ + * Function: THH_eventp_destroy + * + + Arguments: + eventp -The THH_eventp object to destroy + + Returns: + HH_OK + HH_EINVAL -Invalid event object handle + + Description: + Destroy object context. If any EQs are still set they are torn-down when this + function is called Those EQs should generate an HCA catastrophic error ((i.e.callbacks + for IB compliant, proprietary and debug events are invoked) since this call implies + abnormal HCA closure (in a normal HCA closure all EQs should be torn-down before a + call to this function). + + + ************************************************************************/ + +HH_ret_t THH_eventp_destroy( /*IN */ THH_eventp_t eventp) +{ + THH_eqn_t eqn; + + FUNC_IN; + + if (eventp == NULL) { + MTL_ERROR4("%s: eventp is NULL.\n", __func__); + MT_RETURN( HH_EINVAL); + } + + //MTPERF_REPORT_PRINTF(interupt_segment); + //MTPERF_REPORT_PRINTF(inter2dpc_segment); + //MTPERF_REPORT_PRINTF(dpc_segment); + //MTPERF_REPORT_PRINTF(part_of_DPC_segment); + + + /* Teardown any EQ still up */ + for (eqn=0; eqnmtx); + remove_intr_resources(eventp); + MOSAL_mutex_rel(&eventp->mtx); + MOSAL_mutex_free(&eventp->mtx); + VFREE(eventp); + MT_RETURN( HH_OK); + + +} +/************************************************************************ + * Function: THH_eventp_setup_comp_eq + * + Arguments: + eventp -The THH_eventp object handle + eventh -The callback handle for events over this EQ + priv_context -Private context to be used in callback invocation + max_outs_eqe -Maximum outstanding EQEs in EQ created + eqn_p -Allocated EQ index + + Returns: + HH_OK + HH_EINVAL -Invalid handle + HH_EAGAIN -Not enough resources available (e.g.EQC,mem- ory,etc.). + + Description: + Set up an EQ for completion events and register given handler as a callback for + such events. + Note that the created EQ is not mapped to any event at this stage since completion + events are mapped using the CQC set up on CQ creation. + + + ************************************************************************/ + + +HH_ret_t THH_eventp_setup_comp_eq(/*IN */ THH_eventp_t eventp, + /*IN */ HH_comp_eventh_t eventh, + /*IN */ void *priv_context, + /*IN */ MT_size_t max_outs_eqe, + /*OUT*/ THH_eqn_t *eqn_p ) + +{ + THH_eqn_t new_eq; + + FUNC_IN; + + if (eventp == NULL || eventh == NULL || max_outs_eqe == 0) { + MTL_ERROR4("%s: NULL parameter. eventp=%p, eventh=%p, max_outs_eqe=%d\n", __func__, + eventp, eventh, (u_int32_t)max_outs_eqe); + MT_RETURN(HH_EINVAL); + } + if (MOSAL_mutex_acq(&eventp->mtx, TRUE) != MT_OK){ + + MTL_ERROR4("%s: MOSAL_mutex_acq failed\n", __func__); + MT_RETURN(HH_EINTR); + } + new_eq = insert_new_eq(eventp, (void*)eventh, priv_context, max_outs_eqe, + EQP_CQ_COMP_EVENT); + + if (new_eq == EQP_MAX_EQS) { /* All EQs are occupied */ + MTL_ERROR4("%s: Fail in adding new EQ.\n", __func__); + MOSAL_mutex_rel(&eventp->mtx); + MT_RETURN( HH_EAGAIN); + } + MTL_DEBUG1("%s success: eqn=%d\n", __func__, new_eq); + + *eqn_p = new_eq; + MOSAL_mutex_rel(&eventp->mtx); + + MT_RETURN( HH_OK); +} + +/************************************************************************ + * Function: + * + Arguments: + eventp -The THH_eventp object handle + eventh -The callback handle for events over this EQ + priv_context -Private context to be used in callback invocation + max_outs_eqe -Maximum outstanding EQEs in EQ created + eqn_p -Allocated EQ index + + Returns: + HH_OK + HH_EINVAL -Invalid handle + HH_EAGAIN -Not enough resources available (e.g.EQC,mem- ory,etc.). + HH_ERR - Internal error + + Description: + Set up an EQ and map events given in mask to it. Events over new EQ are reported to + given handler. + + + ************************************************************************/ + +HH_ret_t THH_eventp_setup_ib_eq(/*IN */ THH_eventp_t eventp, + /*IN */ HH_async_eventh_t eventh, + /*IN */ void *priv_context, + /*IN */ MT_size_t max_outs_eqe, + /*OUT*/ THH_eqn_t *eqn_p ) + +{ + THH_eqn_t new_eq; + THH_eventp_mtmask_t tavor_mask=0; + + FUNC_IN; + + if (eventp == NULL || eventh == NULL || max_outs_eqe == 0) { + MTL_ERROR4("%s: NULL parameter. eventp=%p, eventh=%p, max_outs_eqe=%d\n", __func__, + eventp, eventh, (u_int32_t)max_outs_eqe); + MT_RETURN( HH_EINVAL); + } + + if (MOSAL_mutex_acq(&eventp->mtx, TRUE) != MT_OK){ + + MTL_ERROR4("%s: MOSAL_mutex_acq failed\n", __func__); + MT_RETURN(HH_EINTR); + } + + new_eq = insert_new_eq(eventp, (void*)eventh, priv_context, max_outs_eqe, + EQP_IB_EVENT); + + if (new_eq == EQP_MAX_EQS) { /* All EQs are occupied */ + MTL_ERROR4("%s: Fail in adding new EQ.\n", __func__); + MOSAL_mutex_rel(&eventp->mtx); + MT_RETURN( HH_EAGAIN); + } + + /* map the EQ to events */ + /* prepare mask for all IB events */ + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_PATH_MIG); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_COMM_EST); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_SEND_Q_DRAINED); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_CQ_ERR); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_LOCAL_WQ_CATAS_ERR); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_LOCAL_EE_CATAS_ERR); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_PATH_MIG_FAIL ); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_PORT_ERR); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_LOCAL_WQ_INVALID_REQ_ERR); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_LOCAL_WQ_ACCESS_VIOL_ERR); + if (eventp->event_resources.is_srq_enable) { + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_LOCAL_SRQ_CATAS_ERR); + TAVOR_IF_EV_MASK_SET(tavor_mask,TAVOR_IF_EV_MASK_SRQ_QP_LAST_WQE_REACHED); + } + + if (map_eq(eventp, new_eq, tavor_mask) != HH_OK){ + MTL_ERROR4("%s: Failed to map EQ.\n", __func__); + MOSAL_mutex_rel(&eventp->mtx); + MT_RETURN( HH_ERR); + } + MTL_DEBUG1("%s: Succeeded to map EQ=%d. mask="U64_FMT"\n", __func__, new_eq, (u_int64_t)tavor_mask); + *eqn_p = new_eq; + + MOSAL_mutex_rel(&eventp->mtx); + + MT_RETURN( HH_OK); +} + + + +/************************************************************************ + * Function: + * + Arguments: + eventp -The THH_eventp object handle + + Arguments: + HH_OK + HH_EINVAL -Invalid handle + HH_EAGAIN -Not enough resources available (e.g.EQC,mem- ory,etc.). + EE_ERR - internal error + + Description: + This function setup an EQ and maps command interface events to it. It also takes + care of notifying the THH_cmd associated with it (as de ned on the THH_eventp creation)of + this EQ availability using the THH_cmd_set_eq() (see page 38)function.This causes the THH_cmd + to set event generation to given EQ for all commands dispatched after this noti cation. The + THH_eventp automatically sets noti cation of events from this EQ to the THH_cmd_eventh() + (see page 39)callback of associated THH_cmd. The function should be invoked by the THH_hob + in order to cause the THH_cmd associated with this eventp to execute commands using events. + + + ************************************************************************/ + +HH_ret_t THH_eventp_setup_cmd_eq ( /*IN */ THH_eventp_t eventp, + /*IN */ MT_size_t max_outs_eqe) + +{ + + THH_eqn_t new_eq; + THH_eventp_mtmask_t tavor_mask = 0; + HH_ret_t ret; + + + FUNC_IN; + + + if (eventp == NULL || max_outs_eqe == 0) { + MTL_ERROR4("%s: NULL parameter. eventp=%p, max_outs_eqe=%d\n", __func__, + eventp, (u_int32_t)max_outs_eqe); + MT_RETURN( HH_EINVAL); + } + + if (MOSAL_mutex_acq(&eventp->mtx, TRUE) != MT_OK){ + MTL_ERROR4("%s: MOSAL_mutex_acq failed\n", __func__); + MT_RETURN(HH_EINTR); + } + + new_eq = insert_new_eq(eventp, NULL, NULL, max_outs_eqe, EQP_CMD_IF_EVENT); + + if (new_eq == EQP_MAX_EQS) { /* All EQs are occupied */ + MTL_ERROR4("%s: Fail in adding new EQ.\n", __func__); + MOSAL_mutex_rel(&eventp->mtx); + MT_RETURN( HH_EAGAIN); + } + + /* map the EQ to events */ + TAVOR_IF_EV_MASK_SET(tavor_mask, TAVOR_IF_EV_MASK_CMD_IF_COMP); + if (map_eq(eventp,new_eq,tavor_mask) != HH_OK){ + MTL_ERROR4("%s: Failed to map EQ.\n", __func__); + MOSAL_mutex_rel(&eventp->mtx); + MT_RETURN( HH_ERR); + } +#if 1 + if ((ret = THH_cmd_set_eq(eventp->cmd_if)) != HH_OK){ + MTL_ERROR4("%s: Failed to THH_cmd_set_eq. ret=%d\n", __func__, ret); + MOSAL_mutex_rel(&eventp->mtx); + MT_RETURN( HH_ERR); + } +#else + ret=HH_OK; +#endif + MOSAL_mutex_rel(&eventp->mtx); + + MTL_DEBUG1("%s success: eqn=%d\n", __func__, new_eq); + + MT_RETURN( HH_OK); +} + + +/************************************************************************ + * Function: THH_eventp_setup_mt_eq + * + Arguments: + eventp -The THH_eventp object handle + event_mask -Flags combination of events to map to this EQ + eventh -The callback handle for events over this EQ + priv_context -Private context to be used in callback invocation + max_outs_eqe -Maximum outstanding EQEs in EQ created eqn_p -Allocated EQ index + + Returns: + HH_OK + HH_EINVAL -Invalid handle + HH_EAGAIN -Not enough resources available (e.g.EQC,mem- ory,etc.). + HH_ERR - internal error + + Description: + Set up an EQ for reporting events beyond IB-spec.(debug events and others). All events + given in the event_mask are mapped to the new EQ. + + + + ************************************************************************/ + +HH_ret_t THH_eventp_setup_mt_eq(/*IN */ THH_eventp_t eventp, + /*IN */ THH_eventp_mtmask_t event_mask, + /*IN */ THH_mlx_eventh_t eventh, + /*IN */ void *priv_context, + /*IN */ MT_size_t max_outs_eqe, + /*OUT*/ THH_eqn_t *eqn_p) + +{ + THH_eqn_t new_eq; + + FUNC_IN; + + if (eventp == NULL || eventh == NULL || max_outs_eqe == 0 || event_mask == 0) { + MTL_ERROR4("%s: NULL parameter. eventp=%p, eventh=%p, max_outs_eqe=%d, event_mask="U64_FMT"\n", __func__, + eventp, eventh, (u_int32_t)max_outs_eqe, (u_int64_t)event_mask); + MT_RETURN( HH_EINVAL); + } + + if (MOSAL_mutex_acq(&eventp->mtx, TRUE) != MT_OK){ + MTL_ERROR4("%s: MOSAL_mutex_acq failed\n", __func__); + MT_RETURN(HH_EINTR); + } + + new_eq = insert_new_eq(eventp, (void*)eventh, priv_context, max_outs_eqe, EQP_MLX_EVENT); + + if (new_eq == EQP_MAX_EQS) { /* All EQs are occupied */ + MTL_ERROR4("%s: Fail in adding new EQ.\n", __func__); + MOSAL_mutex_rel(&eventp->mtx); + MT_RETURN( HH_EAGAIN); + } + + /* map the EQ to events */ + if (map_eq(eventp,new_eq,event_mask) != HH_OK){ + MTL_ERROR4("%s: Failed to map EQ.\n", __func__); + MOSAL_mutex_rel(&eventp->mtx); + MT_RETURN( HH_ERR); + } + + *eqn_p = new_eq; + MOSAL_mutex_rel(&eventp->mtx); + + + MT_RETURN( HH_OK); +} + + +/************************************************************************ + * Function: + * + Arguments: + eventp + eq -The EQ to replace handler for + eventh -The new handler + priv_context -Private context to be used with handler + + Returns: + HH_OK + HH_EINVAL + HH_ENORSC -Given EQ is not set up + + Description: + Replace the callback function of an EQ previously set up. This may be used + in order to change the handler without loosing events.It retains the EQ and + just replaces the callback function.All EQEs polled after a return from this + function will be reported to the new handler. + + ************************************************************************/ + +HH_ret_t THH_eventp_replace_handler(/*IN */ THH_eventp_t eventp, + /*IN */ THH_eqn_t eqn, + /*IN */ THH_eventp_handler_t eventh, + /*IN */ void *priv_context) + +{ + HH_ret_t ret=HH_OK; + + FUNC_IN; + + if (eventp == NULL || NOT_VALID_EQ_NUM(eqn)){ + MTL_ERROR4("%s: Invalid parameter. eventp=%p, eq_num=%d\n", __func__, + eventp, (u_int32_t)eqn); + MT_RETURN( HH_EINVAL); + } + + MOSAL_spinlock_irq_lock(&(eventp->eq_table[eqn].state_lock)); + + if (!IS_EQ_VALID(eventp,eqn)){ + MOSAL_spinlock_unlock(&(eventp->eq_table[eqn].state_lock)); + MTL_ERROR4("%s: EQ %d is not in use.\n", __func__, (u_int32_t)eqn); + MT_RETURN( HH_EINVAL); + } + + switch (eventp->eq_table[eqn].eq_type) + { + case EQP_CQ_COMP_EVENT: + if(eventh.comp_event_h == NULL) { + MTL_ERROR4("%s: Invalid event handler is NULL\n", __func__); + ret = HH_EINVAL; + } + else { + eventp->eq_table[eqn].handler.comp_event_h= eventh.comp_event_h; + } + break; + case EQP_IB_EVENT: + if(eventh.ib_comp_event_h == NULL) { + MTL_ERROR4("%s: Invalid event handler is NULL\n", __func__); + ret = HH_EINVAL; + } + else { + eventp->eq_table[eqn].handler.ib_comp_event_h= eventh.ib_comp_event_h; + } + break; + case EQP_CMD_IF_EVENT: /* no event handler in this case */ + break; + case EQP_MLX_EVENT: + if(eventh.mlx_event_h == NULL) { + MTL_ERROR4("%s: Invalid event handler is NULL\n", __func__); + ret = HH_EINVAL; + } + else { + eventp->eq_table[eqn].handler.mlx_event_h= eventh.mlx_event_h; + } + break; + case EQP_CATAS_ERR_EVENT: + MTL_ERROR4("%s: Internal error: EQP_CATAS_ERR_EVENT should not get any handle\n", __func__); + break; + default: + MTL_ERROR4("%s: Internal error: invalid event type.\n", __func__); + ret= HH_ERR; + } + eventp->eq_table[eqn].priv_context = priv_context; + MOSAL_spinlock_unlock(&(eventp->eq_table[eqn].state_lock)); + + MT_RETURN(ret); +} + +/************************************************************************ + * Function: + * + Arguments: + eventp -The THH_eventp object handle + eqn -The EQ to teardown + + Returns: + HH_OK + HH_EINVAL -Invalid handles (e.g.no such EQ set up) + + Description: + This function tear down an EQ set up by one of the previous functions. Given eqn + which EQ to tear down. This teardown includes cleaning of any context relating to + the callback associated with it. + + + ************************************************************************/ + +HH_ret_t THH_eventp_teardown_eq(/*IN */ THH_eventp_t eventp, + /*IN */ THH_eqn_t eqn ) + +{ + THH_cmd_status_t cmd_ret; + HH_ret_t ret; + unsigned long i=0; + + FUNC_IN; + + if (eventp == NULL || NOT_VALID_EQ_NUM(eqn)){ + MTL_ERROR4("%s: Invalid parameter. eventp=%p, eq_num=%d\n", __func__, + eventp, (u_int32_t)eqn); + MT_RETURN( HH_EINVAL); + } + + + if (MOSAL_mutex_acq(&eventp->mtx, TRUE) != MT_OK){ + MTL_ERROR4("%s: MOSAL_mutex_acq failed\n", __func__); + MT_RETURN(HH_EINTR); + } + + MOSAL_spinlock_irq_lock(&(eventp->eq_table[eqn].state_lock)); + if (!IS_EQ_VALID(eventp,eqn)) { /* Given EQN is not in use ? */ + MOSAL_spinlock_unlock(&(eventp->eq_table[eqn].state_lock)); + if (eventp->max_eq_num_used == eqn) { /* the highest EQ is removed */ + eventp->max_eq_num_used--; + } + MOSAL_mutex_rel(&(eventp->mtx)); + return HH_EINVAL; + } + SET_EQ_CLEANUP(eventp,eqn); /* Mark EQ while in the cleanup stage (disable DPC sched. from ISR) */ + MOSAL_spinlock_unlock(&(eventp->eq_table[eqn].state_lock)); + + /* Wait for all outstanding DPCs */ + while (eventp->eq_table[eqn].dpc_cntr) + { + /* this must be done for Linux only since there is no preemption in Linux */ + /* (DPC/tasklet cannot run while this context hold the CPU) */ +#ifdef __LINUX__ + schedule(); +#endif + i++; + if (i==0xffffffff) { + MTL_DEBUG4("%s: dpc_cntr was not zero after %lu iterations for eq_num=%d, dpc_cntr=%d\n", + __func__, i, eqn, eventp->eq_table[eqn].dpc_cntr); + i=0; + //break; + } + } + + /* for CMD_IF event need to notify the cmd_if object */ + if (eventp->eq_table[eqn].eq_type == EQP_CMD_IF_EVENT){ + if ((ret = THH_cmd_clr_eq(eventp->cmd_if)) != HH_OK){ + MTL_ERROR4("%s: Failed to THH_cmd_clr_eq. ret=%d\n", __func__, ret); + } + } + + /* unmap events of EQ by putting special EQ number */ + /* in case of fatal error - do not call CMD_IF */ + if (eventp->have_fatal == FALSE) { + if ((cmd_ret = THH_cmd_MAP_EQ(eventp->cmd_if, TAVOR_IF_UNMAP_QP_BIT, eventp->eq_table[eqn].events_mask)) + != THH_CMD_STAT_OK){ + MTL_ERROR4("%s: Failed to unmap EQ events. CMD_IF error:%d.\n", __func__,cmd_ret); + } + } + remove_eq(eventp,eqn); + + MOSAL_mutex_rel(&(eventp->mtx)); + MT_RETURN( HH_OK); + +} + + +/************************************************************************ + * Function: THH_eventp_notify_fatal + * + Arguments: + eventp -The THH_eventp object handle + fatal_err - the error code of the fatal error + + Return: + HH_OK + HH_EINVAL -Invalid handle + + Description: + This function is invoked by THH_hob_fatal_error to notify eventp when a fatal error + has occurred. + + ************************************************************************/ + +HH_ret_t THH_eventp_notify_fatal ( /*IN */ THH_eventp_t eventp, + /*IN */ THH_fatal_err_t fatal_err) +{ + + FUNC_IN; + + if (eventp == NULL ) { + MTL_ERROR4("%s: NULL parameter. eventp=%p\n",__func__,eventp); + MT_RETURN( HH_EINVAL); + } + + eventp->have_fatal = TRUE; + MT_RETURN(HH_OK); +} + +/************************************************************************ + * Function: THH_eventp_handle_fatal + * + Arguments: + eventp -The THH_eventp object handle + + Return: + HH_OK + HH_EINVAL -Invalid handle + + Description: + This function is invoked by THH_hob_fatal_error t + + + + o handle the fatal error + has occurred. + + ************************************************************************/ + +HH_ret_t THH_eventp_handle_fatal ( /*IN */ THH_eventp_t eventp) +{ + + FUNC_IN; + + if (eventp == NULL ) { + MTL_ERROR4("%s: NULL parameter. eventp=%p\n", __func__, eventp); + MT_RETURN( HH_EINVAL); + } + + MT_RETURN(HH_OK); +} + + +/********************* STATIC FUNCTIONS ************************************************/ + +/* This function must be invoked with eventp's mutex locked */ +static THH_eqn_t insert_new_eq(THH_eventp_t eventp, + void* eventh, + void *priv_context, + MT_size_t max_outs_eqe, + EQ_type_t eq_type) +{ + unsigned int new_eq=EQP_MAX_EQS+1; + EQP_eq_entry_t *new_entry; + THH_internal_mr_t params; + THH_eqc_t eq_context; + MT_size_t entries_num; + THH_cmd_status_t cmd_ret; + HH_ret_t ret=HH_OK; + MT_bool virtual_eq = TRUE; + VAPI_size_t alloc_mem_bytes_size; + call_result_t rc; + + + FUNC_IN; + /* TK NOTE: this will not work if we have more then one EQ from the same type + In this case we will need to change this huristic */ + switch (eq_type) { + case EQP_CATAS_ERR_EVENT: + new_eq = EQP_CATAS_ERR_EQN; + break; + case EQP_CQ_COMP_EVENT: + new_eq = EQP_CQ_COMP_EQN; + break; + case EQP_IB_EVENT: + new_eq = EQP_ASYNCH_EQN; + break; + case EQP_CMD_IF_EVENT: + new_eq = EQP_CMD_IF_EQN; + break; + case EQP_MLX_EVENT: /* currently not supported */ + break; + default: + MTL_ERROR4("%s: Internal error: invalid event queue type.\n", __func__); + return EQP_MAX_EQS; + } + + MTL_DEBUG3("%s:eq_type=%s; eq_num=%d \n", __func__, eq_type_str(eq_type), new_eq); + + if (new_eq != EQP_MAX_EQS+1) { /* one of the above types */ + SET_EQ_INIT(eventp,new_eq); /* reserve EQ while initializing */ + } + else { /* Not one of the above types */ + /* TK: this code is not realy working now since no other types of EQs are exposed */ + for (new_eq= EQP_MIN_EQ_NUM; new_eq < EQP_MAX_EQS; new_eq++) {/* Find free EQ */ + /* reserve EQ while initializing */ + MOSAL_spinlock_irq_lock(&(eventp->eq_table[new_eq].state_lock)); + if (IS_EQ_FREE(eventp,new_eq)) { /* find free entry */ + SET_EQ_INIT(eventp,new_eq); /* reserve EQ while initializing */ + if (new_eq+1 > eventp->max_eq_num_used) { + eventp->max_eq_num_used=new_eq+1; /* should be done under spinlock since the intr handler use this */ + } + MOSAL_spinlock_unlock(&(eventp->eq_table[new_eq].state_lock)); + break; + } + MOSAL_spinlock_unlock(&(eventp->eq_table[new_eq].state_lock)); + } + } + + /* no free EQ */ + if (new_eq == EQP_MAX_EQS) { + MTL_ERROR4("%s: All EQs are busy.\n", __func__); + MT_RETURN( EQP_MAX_EQS); + } + + MTL_DEBUG3("%s: in params: max_outs_eqe = "SIZE_T_FMT", eq_type=%s; got EQ num = %d\n", + __func__, max_outs_eqe, eq_type_str(eq_type), new_eq); + new_entry = &(eventp->eq_table[new_eq]); + /* number of outs_eqes must be power of 2 */ + entries_num = THH_CYCLIC_BUFF_SIZE(max_outs_eqe); /* already take care for the empty entry in cyclic buff */ + +#ifdef EQS_CMD_IN_DDR + /* when EQEs in DDR we can get the alignment we need */ + alloc_mem_bytes_size = entries_num*EQ_ENTRY_SIZE; + virtual_eq=FALSE; + ret = THH_ddrmm_alloc(eventp->ddrmm, alloc_mem_bytes_size, floor_log2(EQ_ENTRY_SIZE), + &new_entry->alloc_mem_addr_p); + if ( ret != HH_OK ) { + MTL_ERROR4("%s: failed to allocate ddr memory\n", __func__); + goto err_free_eq; + } + new_entry->eq_buff = (void*)MOSAL_io_remap(new_entry->alloc_mem_addr_p, alloc_mem_bytes_size); + if ( !new_entry->eq_buff ) { + goto err_free_alloc; + } + memset((void*)new_entry->eq_buff,0xff,alloc_mem_bytes_size); +#else + /* EQS are in main memory */ + /* need to add 1 for the alignment */ + alloc_mem_bytes_size = (entries_num+1)*EQ_ENTRY_SIZE; + /* if this is a small EQ then work with physically contiguous memory */ + if (alloc_mem_bytes_size <= PHYS_EQ_MAX_SIZE) { +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + new_entry->alloc_mem_addr_p = (MT_virt_addr_t)MOSAL_pci_phys_alloc_consistent((MT_size_t)alloc_mem_bytes_size, floor_log2(EQ_ENTRY_SIZE)); + if (new_entry->alloc_mem_addr_p != 0) { + virtual_eq=FALSE; + MTL_TRACE5("%s: EQ %d is going to be with physical memory. \n", __func__,new_eq); + } + } + + if (virtual_eq) { +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'const size_t', possible loss of data ***/ + new_entry->alloc_mem_addr_p = (MT_virt_addr_t)MOSAL_pci_virt_alloc_consistent((size_t)alloc_mem_bytes_size, floor_log2(EQ_ENTRY_SIZE)); + if (!new_entry->alloc_mem_addr_p) { + MTL_ERROR4("%s: Cannot allocate EQE buffer.\n", __func__); + goto err_free_eq; + } + } + + /* EQEs cyclic buffer should be aligned to entry size */ + new_entry->eq_buff = (void *)MT_UP_ALIGNX_VIRT(new_entry->alloc_mem_addr_p, floor_log2(EQ_ENTRY_SIZE)); +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'size_t', possible loss of data ***/ + memset((void*)new_entry->alloc_mem_addr_p,0xff,(size_t)alloc_mem_bytes_size); +#endif + + + MTL_DEBUG3("%s: real entries_num = "SIZE_T_FMT", table size= "SIZE_T_FMT"\n", __func__, entries_num,(entries_num * EQ_ENTRY_SIZE) ); + new_entry->pd = THH_RESERVED_PD; + new_entry->priv_context = priv_context; + new_entry->dpc_cntr= 0; + new_entry->eventp_p = eventp; + new_entry->virtual_eq = virtual_eq; + new_entry->eq_type = eq_type; + new_entry->eqn = new_eq; + new_entry->cons_indx= 0; + switch (eq_type) + { + case EQP_CQ_COMP_EVENT: + new_entry->handler.comp_event_h = (HH_comp_eventh_t)eventh; + break; + case EQP_IB_EVENT: + new_entry->handler.ib_comp_event_h = (HH_async_eventh_t)eventh; + break; + case EQP_CMD_IF_EVENT: /* no event handler in this case */ + break; + case EQP_MLX_EVENT: + new_entry->handler.mlx_event_h = (THH_mlx_eventh_t)eventh; + break; + case EQP_CATAS_ERR_EVENT: + new_entry->handler.mlx_event_h = NULL; /* no handler for catast error */ + break; + default: + MTL_ERROR4("%s: Internal error: invalid event queue type.\n", __func__); + goto err_free_mem; + } + + if (virtual_eq) { + /* registering memory region for this buffer */ + memset(¶ms, 0, sizeof(params)); + params.start = (IB_virt_addr_t)(MT_virt_addr_t)(new_entry->eq_buff); + params.size = (VAPI_size_t)entries_num * EQ_ENTRY_SIZE; + params.pd = THH_RESERVED_PD; + params.vm_ctx = MOSAL_get_kernel_prot_ctx(); //eventp->ctx_internal; + params.force_memkey = FALSE; + + MTL_DEBUG4("%s: registering mem region. start addr="U64_FMT", size="U64_FMT"\n", __func__, + params.start, params.size); + if((ret = THH_mrwm_register_internal(eventp->mrwm_internal, ¶ms, &new_entry->mem_lkey)) != HH_OK){ + MTL_ERROR4("%s: Failed to register EQ buffer in memory. ret=%d\n", __func__, ret); + goto err_free_mem; + } + MTL_TRACE4("%s: SUCCESS to register EQ buffer in memory. \n", __func__); + } + else { + new_entry->mem_lkey = 0; + } + + /* prepare EQ for HW ownership */ + memset(&eq_context,0,sizeof(THH_eqc_t)); + eq_context.st = EQ_STATE_ARMED; + + if (eq_type == EQP_CATAS_ERR_EVENT) { + eq_context.oi = TRUE; /* Overrun detection ignore */ + } + else { + eq_context.oi = FALSE; /* Overrun detection ignore */ + } + + eq_context.tr = virtual_eq; /* Translation Required. If set - EQ access undergo address translation. */ + eq_context.owner = THH_OWNER_HW; /* SW/HW ownership */ + eq_context.status = EQ_STATUS_OK; /* EQ status:\;0000 - OK\;1001 - EQ overflow\;1010 - EQ write failure */ + if (virtual_eq) { + eq_context.start_address = (u_int64_t)(MT_virt_addr_t)new_entry->eq_buff; /* Start Address of Event Queue. Must be aligned on 32-byte boundary */ + } + else { +#ifdef EQS_CMD_IN_DDR + eq_context.start_address = (u_int64_t)(new_entry->alloc_mem_addr_p); +#else + MT_phys_addr_t pa; + + rc = MOSAL_virt_to_phys(MOSAL_get_kernel_prot_ctx(), (MT_virt_addr_t)new_entry->eq_buff, &pa); + if ( rc != MT_OK ) { + MTL_ERROR4(MT_FLFMT("%s: failed va=%p"), __func__, new_entry->eq_buff); + goto err_unreg_mem; + } + else { + eq_context.start_address = (u_int64_t)pa; + } +#endif + } + + if ((ret = THH_uar_get_index(eventp->kar, &eq_context.usr_page)) != HH_OK){ /* the KAR */ + MTL_ERROR4("%s: Failed to THH_uar_get_index. ret=%d.\n", __func__,ret); + goto err_unreg_mem; + } + eq_context.log_eq_size = floor_log2(entries_num); /* Log2 of the amount of entries in the EQ */ + eq_context.intr = eventp->event_resources.intr_clr_bit; /* Interrupt (message) to be generated to report event to INT layer. + \;0000iiii - specifies GPIO pin to be asserted + \;1jjjjjjj - specificies type of interrupt message to be generated (total 128 different messages supported). */ + eq_context.lkey = new_entry->mem_lkey; /* Memory key (L-Key) to be used to access EQ */ + eq_context.consumer_indx = 0; /* Contains next entry to be read upon poll for completion. Must be initialized to '0 while opening EQ */ + eq_context.producer_indx = 0; /* Contains next entry in EQ to be written by the HCA. Must be initialized to '1 while opening EQ. */ + eq_context.pd = THH_RESERVED_PD; // TK - need to add to structure + + if ((cmd_ret = THH_cmd_SW2HW_EQ(eventp->cmd_if, new_eq, &eq_context)) != THH_CMD_STAT_OK){ + MTL_ERROR4("%s: Failed to move EQ to HW ownership. CMD_IF error:%d.\n", __func__,cmd_ret); + goto err_unreg_mem; + } + + MTL_TRACE4("%s: SUCCESS to THH_cmd_SW2HW_EQ EQ=%d. \n", __func__,new_eq); + /* init DPC of this EQ */ + if (eq_type != EQP_CATAS_ERR_EVENT) { + MOSAL_DPC_init(&new_entry->polling_dpc, eq_polling_dpc, (MT_ulong_ptr_t)new_entry, MOSAL_SINGLE_CTX); + } + /* each eq use only one word (high or low) of the clr_ecr_reg + decide on this if EQ number is bigger then 32. + also prepare the mask to be used when polling the EQ in the DPC */ + + MTL_TRACE4("%s: SUCCESS to MOSAL_DPC_init. \n", __func__); + if (new_eq < 32) { + new_entry->clr_ecr_addr = eventp->clr_ecr_l_base; + } + else { + new_entry->clr_ecr_addr = eventp->clr_ecr_h_base; + } + new_entry->clr_ecr_mask = MOSAL_cpu_to_be32(1 << (new_eq % 32)); + + /* this must be done last since this is the indication that the entry is valid */ + new_entry->alloc_mem_bytes_size = alloc_mem_bytes_size; + new_entry->eq_buff_entry_num = entries_num; + SET_EQ_VALID(eventp,new_eq); + + MT_RETURN( (THH_eqn_t)new_eq); + + /* error handling */ +err_unreg_mem: + if (virtual_eq) { + if ((ret = THH_mrwm_deregister_mr(eventp->mrwm_internal, new_entry->mem_lkey)) != HH_OK){ + MTL_ERROR4("%s: Failed to deregister memory region. ret=%d\n", __func__,ret); + } + } +err_free_mem: +#ifdef EQS_CMD_IN_DDR + MOSAL_io_unmap((MT_virt_addr_t)new_entry->eq_buff); +err_free_alloc: + ret = THH_ddrmm_free(eventp->ddrmm, new_entry->alloc_mem_addr_p, alloc_mem_bytes_size); + if ( ret != HH_OK ) { + MTL_ERROR4("%s: failed to THH_ddrmm_free\n", __func__); + } +#else + if (virtual_eq) { + MOSAL_pci_virt_free_consistent((void *)new_entry->alloc_mem_addr_p, alloc_mem_bytes_size); + } + else { +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + MOSAL_pci_phys_free_consistent((void *)(MT_ulong_ptr_t)new_entry->alloc_mem_addr_p, (MT_size_t)alloc_mem_bytes_size); + } +#endif +err_free_eq: + SET_EQ_FREE(eventp,new_eq); /* return EQ to free-pool */ + MT_RETURN(EQP_MAX_EQS); +} /* insert_new_eq */ + + +/************************************************************************/ + + +static HH_ret_t map_eq(THH_eventp_t eventp, + THH_eqn_t eqn, + THH_eventp_mtmask_t tavor_mask) +{ + THH_cmd_status_t cmd_ret; + + + FUNC_IN; + /* save mask for later unmapping when EQ is teared down */ + eventp->eq_table[eqn].events_mask = tavor_mask; + MTL_TRACE4("%s: EQ=%d mask="U64_FMT" \n", __func__, eqn, (u_int64_t)tavor_mask); + + if ((cmd_ret = THH_cmd_MAP_EQ(eventp->cmd_if, eqn, tavor_mask))!= THH_CMD_STAT_OK){ + MTL_ERROR4("%s: Failed to map EQ events. CMD_IF error:%d EQN=%d.\n", __func__,cmd_ret,eqn); + /* due to this error need to remove EQ */ + remove_eq(eventp, eqn); + MT_RETURN( HH_ERR); + } + MTL_TRACE4("%s: SUCCESS to THH_cmd_MAP_EQ. EQ=%d mask="U64_FMT" \n", __func__, eqn, (u_int64_t)tavor_mask); + MT_RETURN( HH_OK); +} + + +/************************************************************************/ + + +static void remove_eq(THH_eventp_t eventp, + THH_eqn_t eqn) +{ + THH_cmd_status_t cmd_ret; + THH_eqc_t eq_context; + VAPI_size_t buf_sz; +#ifdef EQS_CMD_IN_DDR + MT_phys_addr_t buf_addr; +#else + void* buf_addr; +#endif + MT_bool virt_buf; + HH_ret_t ret; + + + FUNC_IN; + + /* clear EQC (in SW ownership) */ + memset(&eq_context,0,sizeof(THH_eqc_t)); + + /* in case of fatal error - do not call CMD_IF */ + if (eventp->have_fatal == FALSE) { + if ((cmd_ret = THH_cmd_HW2SW_EQ(eventp->cmd_if, eqn, &eq_context)) != THH_CMD_STAT_OK){ + MTL_ERROR4("%s: Failed to move EQ to SW ownership. CMD_IF error:%d.\n", __func__,cmd_ret); + /* TK - maybe we need to exit in this case */ + } + } + /* unregister MR of buffer and free the eq_buff */ + if (eventp->eq_table[eqn].virtual_eq) { + ret = THH_mrwm_deregister_mr(eventp->mrwm_internal, eventp->eq_table[eqn].mem_lkey); + if (ret != HH_OK){ + MTL_ERROR4("%s: Failed to deregister memory region. ret=%d\n", __func__,ret); + } + } + MOSAL_spinlock_irq_lock(&(eventp->eq_table[eqn].state_lock)); + virt_buf= eventp->eq_table[eqn].virtual_eq; +#ifdef EQS_CMD_IN_DDR + buf_addr= eventp->eq_table[eqn].alloc_mem_addr_p; +#else + buf_addr= (void*)eventp->eq_table[eqn].alloc_mem_addr_p; +#endif + buf_sz= eventp->eq_table[eqn].alloc_mem_bytes_size; + + eventp->eq_table[eqn].alloc_mem_addr_p= 0; + eventp->eq_table[eqn].eq_buff_entry_num = 0; + eventp->eq_table[eqn].alloc_mem_bytes_size = 0; + SET_EQ_FREE(eventp,eqn); + MOSAL_spinlock_unlock(&(eventp->eq_table[eqn].state_lock)); + + /* Free buffer */ + +#ifdef EQS_CMD_IN_DDR + MOSAL_io_unmap((MT_virt_addr_t)eventp->eq_table[eqn].eq_buff); + ret = THH_ddrmm_free(eventp->ddrmm, buf_addr, buf_sz); + if ( ret != HH_OK ) { + MTL_ERROR4("%s: failed to THH_ddrmm_free\n", __func__); + } +#else + if (virt_buf) { + MOSAL_pci_virt_free_consistent(buf_addr, buf_sz); + } + else { +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + MOSAL_pci_phys_free_consistent(buf_addr, (MT_size_t)buf_sz); + } +#endif + + FUNC_OUT; + return; +} + + +/************************************************************************************************/ + +HH_ret_t prepare_intr_resources(THH_eventp_t eventp) +{ + call_result_t msl_ret; + + MT_phys_addr_t cr_base = eventp->event_resources.cr_base; + + FUNC_IN; + /* map both ecr & clr_ecr registers (4 words) */ + // TK: need to use the structure and do separate io remap for each register + if ((eventp->ecr_h_base = MOSAL_io_remap(cr_base+TAVOR_ECR_H_OFFSET_FROM_CR_BASE, 4*sizeof(u_int32_t))) + == 0){ + MTL_ERROR1("%s: Failed to MOSAL_io_remap for ECR\n", __func__); + MT_RETURN( HH_ERR); + } + eventp->ecr_l_base = eventp->ecr_h_base + 4; + eventp->clr_ecr_h_base = eventp->ecr_h_base + 8; + eventp->clr_ecr_l_base = eventp->ecr_h_base + 12; + + + MTL_DEBUG1("%s: ECR register="VIRT_ADDR_FMT"\n", __func__, eventp->ecr_h_base ); + + + /* if interrupt bit < 32: we use the low word and otherwise the high word of the clr_int register */ + if ((eventp->intr_clr_reg = MOSAL_io_remap(cr_base + (eventp->event_resources.intr_clr_bit<32 ? + TAVOR_CLR_INT_L_OFFSET_FROM_CR_BASE : TAVOR_CLR_INT_H_OFFSET_FROM_CR_BASE), + sizeof(u_int32_t))) == 0) + { + MTL_ERROR1("%s: Failed to MOSAL_io_remap for INTR_CLR_REG\n", __func__); + MT_RETURN( HH_ERR); + } + eventp->intr_clr_mask = MOSAL_be32_to_cpu(1 << (eventp->event_resources.intr_clr_bit % 32)); + + //MTL_DEBUG1("%s: TAVOR IRQ=%d\n", __func__, eventp->event_resources.irq); + if ((msl_ret = MOSAL_ISR_set(&eventp->isr_obj, thh_intr_handler, eventp->event_resources.irq, + "InfiniHost", (MT_ulong_ptr_t)eventp)) != MT_OK){ + MTL_ERROR1("%s: Failed to MOSAL_ISR_set MOSAL ret=%d\n", __func__, msl_ret); + MT_RETURN( HH_ERR); + } + MT_RETURN( HH_OK); +} + + +/* +***********************************************************************************************/ + +HH_ret_t remove_intr_resources(THH_eventp_t eventp) +{ + call_result_t msl_ret; + + + FUNC_IN; + MOSAL_io_unmap(eventp->ecr_h_base); + MOSAL_io_unmap(eventp->intr_clr_reg); + + if ((msl_ret = MOSAL_ISR_unset(&eventp->isr_obj)) != MT_OK){ + MTL_ERROR1("%s: Failed to MOSAL_ISR_unset MOSAL ret=%d\n", __func__, msl_ret); + MT_RETURN( HH_ERR); + } + MT_RETURN( HH_OK); +} + +/************************************************************************************************/ + + +static HH_ret_t add_catast_err_eq(THH_eventp_t eventp) +{ + THH_eventp_mtmask_t tavor_mask=0; + + FUNC_IN; + + if (MOSAL_mutex_acq(&eventp->mtx, TRUE) != MT_OK){ + + MTL_ERROR4("%s: MOSAL_mutex_acq failed\n", __func__); + MT_RETURN(HH_EINTR); + } + + if (insert_new_eq(eventp, (void*)NULL, NULL, 4, EQP_CATAS_ERR_EVENT) == EQP_MAX_EQS) { + MTL_ERROR4("%s: Failed to add the catastrophic event EQ.\n", __func__); + MT_RETURN (HH_ERR); + } + + TAVOR_IF_EV_MASK_SET(tavor_mask, TAVOR_IF_EV_MASK_LOCAL_CATAS_ERR); + + if (map_eq(eventp, EQP_CATAS_ERR_EQN, tavor_mask) != HH_OK){ + MTL_ERROR4("%s: Failed to map catastrophic error EQ.\n", __func__); + MOSAL_mutex_rel(&eventp->mtx); + MT_RETURN( HH_ERR); + } + MOSAL_mutex_rel(&eventp->mtx); + MTL_DEBUG1("%s: Succeeded to map EQ=%d. mask="U64_FMT"\n", __func__, + EQP_CATAS_ERR_EQN, (u_int64_t)tavor_mask); + MT_RETURN(HH_OK); + +} + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.h new file mode 100644 index 00000000..c26cd771 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp.h @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_EVENTP_H +#define H_EVENTP_H + +#include +#include +#include +#include +#include + + +typedef struct THH_eventp_res_st{ + MT_phys_addr_t cr_base; /* physical address of the CR-space */ + u_int8_t intr_clr_bit; /* Bit number to clear using the interrupt clear register */ + MOSAL_IRQ_ID_t irq; /* IRQ line to hook interrupt handler to */ + MT_bool is_srq_enable; /* Is SRQ supported in this FW */ +} THH_eventp_res_t; + + + +/* Mask bits from tavor_if_eventt_mask_enum_t in tavor_if_defs.h */ +typedef tavor_if_eventt_mask_t THH_eventp_mtmask_t; + +#define TAVOR_IF_EV_MASK_CLR_ALL(mask) ((mask)=0) +#define TAVOR_IF_EV_MASK_SET(mask,attr) ((mask)=((mask)|(attr))) +#define TAVOR_IF_EV_MASK_CLR(mask,attr) ((mask)=((mask)&(~(attr)))) +#define TAVOR_IF_EV_IS_SET(mask,attr) (((mask)&(attr))!=0) + + + +/************************************************************************ + * Function: THH_eventp_create + * + + Arguments: + version_p - Version information + event_res_p - See 7.2.1 THH_eventp_res_t - Event processing resources on page 63 + cmd_if - Command interface object to use for EQ setup commands + kar - KAR object to use for EQ doorbells + eventp_p - Returned object handle + + Returns: + HH_OK + HH_EINVAL -Invalid parameters + HH_EAGAIN -Not enough resources to create object + + Description: + Create THH_eventp object context. No EQs are set up until an event consumer registers + using one of the functions below. + + + ************************************************************************/ + +extern HH_ret_t THH_eventp_create ( /*IN */ THH_hob_t hob, + /*IN */ THH_eventp_res_t *event_res_p, + /*IN */ THH_uar_t kar, + /*OUT*/ THH_eventp_t *eventp_p ); + +/************************************************************************ + * Function: THH_eventp_destroy + * + + Arguments: + eventp -The THH_eventp object to destroy + + Returns: + HH_OK + HH_EINVAL -Invalid event object handle + HH_ERR - internal error + + Description: + Destroy object context. If any EQs are still set they are torn-down when this + function is called Those EQs should generate an HCA catastrophic error ((i.e.callbacks + for IB compliant, proprietary and debug events are invoked) since this call implies + abnormal HCA closure (in a normal HCA closure all EQs should be torn-down before a + call to this function). + + + ************************************************************************/ + +extern HH_ret_t THH_eventp_destroy( /*IN */ THH_eventp_t eventp ); + +/************************************************************************ + * Function: THH_eventp_setup_comp_eq + * + Arguments: + eventp -The THH_eventp object handle + eventh -The callback handle for events over this EQ + priv_context -Private context to be used in callback invocation + max_outs_eqe -Maximum outstanding EQEs in EQ created + eqn_p -Allocated EQ index + + Returns: + HH_OK + HH_EINVAL -Invalid handle + HH_EAGAIN -Not enough resources available (e.g.EQC,mem- ory,etc.). + HH_ERR - internal error + + Description: + Set up an EQ for completion events and register given handler as a callback for such events. + Note that the created EQ is not mapped to any event at this stage since completion events are + mapped using the CQC set up on CQ creation. + + + ************************************************************************/ + + +extern HH_ret_t THH_eventp_setup_comp_eq(/*IN */ THH_eventp_t eventp, + /*IN */ HH_comp_eventh_t eventh, + /*IN */ void *priv_context, + /*IN */ MT_size_t max_outs_eqe, + /*OUT*/ THH_eqn_t *eqn_p ); + + +/************************************************************************ + * Function: + * + Arguments: + eventp -The THH_eventp object handle + event_mask -Flags combination of events to map to this EQ + eventh -The callback handle for events over this EQ + priv_context -Private context to be used in callback invocation + max_outs_eqe -Maximum outstanding EQEs in EQ created + eqn_p -Allocated EQ index + + Returns: + HH_OK + HH_EINVAL -Invalid handle + HH_EAGAIN -Not enough resources available (e.g.EQC,mem- ory,etc.). + HH_ERR - internal error + + Description: + Set up an EQ and map events given in mask to it. Events over new EQ are reported to + given handler. + + + ************************************************************************/ + +extern HH_ret_t THH_eventp_setup_ib_eq(/*IN */ THH_eventp_t eventp, + /*IN */ HH_async_eventh_t eventh, + /*IN */ void *priv_context, + /*IN */ MT_size_t max_outs_eqe, + /*OUT*/ THH_eqn_t *eqn_p ); + + +/************************************************************************ + * Function: + * + Arguments: + eventp -The THH_eventp object handle + max_outs_eqe -Maximum outstanding EQEs in EQ created + + + Return:: + HH_OK + HH_EINVAL -Invalid handle + HH_EAGAIN -Not enough resources available (e.g.EQC,mem- ory,etc.). + HH_ERR - internal error + + Description: + This function setup an EQ and maps command interface events to it. It also takes + care of notifying the THH_cmd associated with it (as de ned on the THH_eventp creation)of + this EQ availability using the THH_cmd_set_eq() (see page 38)function.This causes the THH_cmd + to set event generation to given EQ for all commands dispatched after this noti cation. The + THH_eventp automatically sets noti cation of events from this EQ to the THH_cmd_eventh() + (see page 39)callback of assoicated THH_cmd. The function should be invoked by the THH_hob + in order to cause the THH_cmd associated with this eventp to execute commands using events. + + + ************************************************************************/ + +extern HH_ret_t THH_eventp_setup_cmd_eq ( /*IN */ THH_eventp_t eventp, + /*IN */ MT_size_t max_outs_eqe); + +/************************************************************************ + * Function: + * + Arguments: + eventp -The THH_eventp object handle + event_mask -Flags combination of events to map to this EQ + eventh -The callback handle for events over this EQ + priv_context -Private context to be used in callback invocation + max_outs_eqe -Maximum outstanding EQEs in EQ created eqn_p -Allocated EQ index + + Returns: + HH_OK + HH_EINVAL -Invalid handle + HH_EAGAIN -Not enough resources available (e.g.EQC,mem- ory,etc.). + HH_ERR - internal error + + Description: + Set up an EQ for reporting events beyond IB-spec.(debug events and others). All events + given in the event_mask are mapped to the new EQ. + + + + ************************************************************************/ + +extern HH_ret_t THH_eventp_setup_mt_eq(/*IN */ THH_eventp_t eventp, + /*IN */ THH_eventp_mtmask_t event_mask, + /*IN */ THH_mlx_eventh_t eventh, + /*IN */ void *priv_context, + /*IN */ MT_size_t max_outs_eqe, + /*OUT*/ THH_eqn_t *eqn_p); + + +/************************************************************************ + * Function: + * + Arguments: + eventp + eq -The EQ to replace handler for + eventh -The new handler + priv_context -Private context to be used with handler + + Returns: + HH_OK + HH_EINVAL + HH_ENORSC -Given EQ is not set up + HH_ERR - internal error + + Description: + Replace the callback function of an EQ previously set up. This may be used + in order to change the handler without loosing events.It retains the EQ and + just replaces the callback function.All EQEs polled after a return from this + function will be repored to the new handler. + + ************************************************************************/ + +extern HH_ret_t THH_eventp_replace_handler(/*IN */ THH_eventp_t eventp, + /*IN */ THH_eqn_t eq, + /*IN */ THH_eventp_handler_t eventh, + /*IN */ void *priv_context); + +/************************************************************************ + * Function: + * + Arguments: + eventp -The THH_eventp object handle + eqn -The EQ to teardown + + Returns: + HH_OK + HH_EINVAL -Invalid handles (e.g.no such EQ set up) + HH_ERR - internal error + + Description: + This function tear down an EQ set up by one of the previous functions. Given eqn denes + which EQ to tear down. This teardown includes cleaning of any context relating to + the callback associated with it. + + + ************************************************************************/ + +extern HH_ret_t THH_eventp_teardown_eq(/*IN */ THH_eventp_t eventp, + /*IN */ THH_eqn_t eqn ); + + +extern HH_ret_t THH_eventp_notify_fatal ( /*IN */ THH_eventp_t eventp, + /*IN */ THH_fatal_err_t fatal_err); + +extern HH_ret_t THH_eventp_handle_fatal ( /*IN */ THH_eventp_t eventp); +#endif /* H_EVENTP_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp_priv.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp_priv.h new file mode 100644 index 00000000..9506cb72 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/eventp/eventp_priv.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_EVENTP_PRIV_H +#define H_EVENTP_PRIV_H + + + +#include +#include +#include +#include +#include +#include + +/*================ macro definitions ===============================================*/ +#define EQP_MAX_EQS 32 /* this if for now since we only use 3 EQs and its save us reading two ECR registers */ +#define EQP_ECR_USED 1 /* should be changed to 2 when > 32 EQs are used */ +#define EQP_CATAS_ERR_EQN 4 /* this is reserved only for catastrophic error notification */ +#define EQP_CMD_IF_EQN 1 /* this is reserved only for command interface */ +#define EQP_CQ_COMP_EQN 0 /* this is reserved only for CQ completion events */ +#define EQP_ASYNCH_EQN 3 /* this is reserved only for asynch events */ +#define EQE_HW_OWNER 0x80 +#define EQP_MIN_EQ_NUM (EQP_CATAS_ERR_EQN+1) /* make sure we start only after reserved EQs */ +#define EQE_SW_OWNER 0x00 +#define EQE_OWNER_BYTE_OFFSET 31 + +#define EQE_DATA_BYTE_SIZE (MT_BYTE_SIZE(tavorprm_event_queue_entry_st, event_data)) /* in bytes */ +#define EQE_OWNER_OFFSET (MT_BIT_OFFSET(tavorprm_event_queue_entry_st, owner) /32 ) /* in DWORDS relay on owner field to be first in DWORD*/ +#define EQE_EVENT_TYPE_OFFSET (MT_BIT_OFFSET(tavorprm_event_queue_entry_st, event_type) /32 ) /* in DWORDS relay on owner field to be first in DWORD*/ +#define EQE_DATA_OFFSET (MT_BIT_OFFSET(tavorprm_event_queue_entry_st, event_data) /32 ) /* in DWORDS relay on owner field to be first in DWORD*/ +#define EQE_DWORD_SIZE (sizeof(struct tavorprm_event_queue_entry_st)/32) /* in DWORDS */ +#define EQ_ENTRY_SIZE (sizeof(struct tavorprm_event_queue_entry_st) / 8) /* in bytes */ + + +/* Values to put in eq_buff_entry_num to mark entry state. If not one of those - entry is valid */ +#define SET_EQ_INIT(eventp,eq_num) (eventp->eq_table[eq_num].res_state= EQP_EQ_INIT) +#define SET_EQ_VALID(eventp,eq_num) (eventp->eq_table[eq_num].res_state= EQP_EQ_VALID) +#define SET_EQ_CLEANUP(eventp,eq_num) (eventp->eq_table[eq_num].res_state= EQP_EQ_CLEANUP) +#define SET_EQ_FREE(eventp,eq_num) (eventp->eq_table[eq_num].res_state= EQP_EQ_FREE) +/* Macros to test if entry is invalid/free */ +#define IS_EQ_FREE(eventp,eq_num) (eventp->eq_table[eq_num].res_state == EQP_EQ_FREE) +#define IS_EQ_VALID(eventp,eq_num) (eventp->eq_table[eq_num].res_state == EQP_EQ_VALID) +#define IS_EQ_VALID_P(eq_p) (eq_p->res_state == EQP_EQ_VALID) + + +/*================ type definitions ================================================*/ + +typedef enum { + EQP_CQ_COMP_EVENT, + EQP_IB_EVENT, + EQP_CMD_IF_EVENT, + EQP_MLX_EVENT, + EQP_CATAS_ERR_EVENT +}EQ_type_t; + +/* States of the EQ resources (for EQs pool management) */ +typedef enum { + EQP_EQ_FREE, + EQP_EQ_INIT, + EQP_EQ_VALID, + EQP_EQ_CLEANUP +}EQ_resource_state_t; + +/* entry saved for each EQ */ +typedef struct EQP_eq_entry_st { + EQ_resource_state_t res_state; /* EQ resource (entry) state */ +#ifdef EQS_CMD_IN_DDR + MT_phys_addr_t alloc_mem_addr_p; /* need to save the allocated pointer for free at destroy */ +#else + MT_virt_addr_t alloc_mem_addr_p; /* need to save the allocated pointer for free at destroy */ +#endif + VAPI_size_t alloc_mem_bytes_size; + void* eq_buff; + VAPI_size_t eq_buff_entry_num; /* if ==0 entry is invalid */ + VAPI_pd_hndl_t pd; + VAPI_lkey_t mem_lkey; + void *priv_context; + THH_eventp_mtmask_t events_mask; + EQ_type_t eq_type; + THH_eqn_t eqn; /* eq number - needed by DPC */ + THH_eventp_handler_t handler; + THH_eventp_t eventp_p; + MOSAL_DPC_t polling_dpc; + volatile u_int32_t dpc_cntr; /* Outstanding DPCs counter (possible values:0,1,2)*/ + u_int32_t cons_indx; + MT_virt_addr_t clr_ecr_addr; + u_int32_t clr_ecr_mask; + MT_bool virtual_eq; +#ifdef IMPROVE_EVENT_HANDLING + volatile u_int32_t dpc_lock; +#else + MOSAL_spinlock_t dpc_lock; /* ensure that only one DPC is running - WINDOWS allows + more than one DPC to run at the same time */ +#endif + MOSAL_spinlock_t state_lock; /* protect on the eq state - + must be used for every access to this entry */ +}EQP_eq_entry_t; + + +/* The main EVENT processor structure */ +typedef struct THH_eventp_st { + THH_hob_t hob; + THH_ver_info_t version; + THH_eventp_res_t event_resources; + THH_cmd_t cmd_if; + THH_uar_t kar; + THH_uar_index_t kar_index; + THH_mrwm_t mrwm_internal; + THH_ddrmm_t ddrmm; + EQP_eq_entry_t eq_table[EQP_MAX_EQS]; /* static table of eqs */ + MOSAL_mutex_t mtx; /* used internally */ + MOSAL_protection_ctx_t ctx_internal; + MOSAL_ISR_t isr_obj; + HH_hca_hndl_t hh_hca_hndl; + volatile MT_virt_addr_t ecr_h_base; + volatile MT_virt_addr_t ecr_l_base; + volatile MT_virt_addr_t clr_ecr_h_base; + volatile MT_virt_addr_t clr_ecr_l_base; + volatile MT_virt_addr_t intr_clr_reg; /* can be the high or the low but not both */ + u_int32_t intr_clr_mask; + volatile u_int8_t max_eq_num_used; + volatile MT_bool have_fatal; + volatile THH_fatal_err_t fatal_type; /* need to distingush between master abort and catast error */ + MOSAL_DPC_t fatal_error_dpc; +}THH_eventp_internal_t; + + +/*================ external functions ================================================*/ + + + +#endif /* H_EVENTP_PRIV_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.c new file mode 100644 index 00000000..30613485 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.c @@ -0,0 +1,1017 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/*================ macro definitions ===============================================*/ + +//#define EPOOL_ENTRY_SIZE 8 /* the minimum needed for the Epool */ + +/*================ type definitions ================================================*/ +typedef VIP_hashp_p_t mcg_status_hash_t; +typedef enum +{ + MGHT =0, AMGM=1 +} mcg_tbl_kind_t; +typedef enum +{ + INSERT =0, REMOVE=1 +} mcg_op_t; + + + +/* The main MCG-manager structure */ +typedef struct THH_mcgm_st +{ + THH_hob_t hob; + THH_ver_info_t version; + VAPI_size_t mght_total_entries; + VAPI_size_t mght_hash_bins; /* actually, it requires 16 bit!! */ + u_int16_t max_qp_per_mcg; + MOSAL_mutex_t mtx; /* used internally */ + THH_cmd_t cmd_if_h; + EPool_t free_list; /* the free list of the non-hash part of the MCG table */ + mcg_status_hash_t my_hash; + //u_int32_t amgm_idx; + //u_int32_t mght_idx; +}THH_mcgm_props_t; + +/* multicast groups status hash tabel*/ +struct mcg_status_entry +{ + IB_gid_t mgid; /* Group's GID */ + u_int16_t num_valid_qps; + u_int32_t idx; //absolute idx !!! (AMGM + MGHT) + u_int32_t prev_idx; //absolute idx !!! (AMGM + MGHT) +}; + +/*================ global variables definitions ====================================*/ +static const EPool_meta_t free_list_meta = +{ + 2*sizeof(unsigned long), + 0, /* unsigned long 'prev' index */ + sizeof(unsigned long) +}; + + +/*================ static functions prototypes =====================================*/ + +static HH_ret_t THMCG_get_status_entry(IB_gid_t gid, + mcg_status_hash_t* hash_p, + mcg_status_entry_t* mcg_p); +static HH_ret_t THMCG_update_status_entry(mcg_status_hash_t* hash_p,mcg_status_entry_t* entry_p,MT_bool new_e); +static HH_ret_t THMCG_remove_status_entry(mcg_status_hash_t* hash_p,mcg_status_entry_t* entry_p); + +static inline HH_ret_t read_alloc_mgm(THH_mcgm_t mcgm, THH_mcg_entry_t* entry,u_int32_t idx); + +static HH_ret_t THMCG_reduce_MGHT(THH_mcgm_t mcgm, u_int32_t idx,THH_mcg_entry_t* fw_tmp_entry); + +static HH_ret_t THMCG_find_last_index(THH_mcgm_t mcgm, THH_mcg_hash_t fw_hash_val, + u_int32_t* last, THH_mcg_entry_t* last_entry); +static inline void print_fw_entry(THH_mcg_entry_t* entry); +static inline void print_status_entry(mcg_status_entry_t* my_entry); + +/*================ global functions definitions ====================================*/ + +/************************************************************************ + * Function: THH_mcgm_create + * + Arguments: + hob - THH_hob this object is included in + mght_total_entries - Number of entries in the MGHT + mght_hash_bins - Number of bins in the hash table + max_qp_per_mcg - Max number of QPs per Multicast Group + mcgm_p - Allocated THH_mcgm_t object + Returns: + HH_OK + HH_EINVAL + HH_EAGAIN + + Description: Create THH_mcgm_t instance. + + implementation saving the values: + 1.Total number of entries in the MGHT. + 2.Size of the hash table part (i.e.number of bins). + 3.THH_cmd object to use (taken from the THH_hob using THH_hob_get_cmd_if()). + 4.Version information (taken using THH_hob_get_ver_info(). + 5.Free MGHT entries pool management data structure. + + + ************************************************************************/ + +extern HH_ret_t THH_mcgm_create(/*IN */ THH_hob_t hob, + /*IN */ VAPI_size_t mght_total_entries, + /*IN */ VAPI_size_t mght_hash_bins, + /*IN */ u_int16_t max_qp_per_mcg, + /*OUT*/ THH_mcgm_t *mcgm_p ) +{ + THH_mcgm_t new_mcgm_p = NULL; + unsigned long amgm_table_size; + VIP_common_ret_t ret=VIP_OK; + HH_ret_t hh_ret = HH_OK; + + FUNC_IN; + + + MTL_DEBUG1("%s: starting...\n", __func__); + /* allocation of object structure */ + new_mcgm_p = (THH_mcgm_t)MALLOC(sizeof(THH_mcgm_props_t)); + if (!new_mcgm_p) + { + MTL_ERROR4("%s: Cannot allocate MGM object.\n", __func__); + MT_RETURN(HH_EAGAIN); + } + MTL_DEBUG1("after allocating new_mcgm_p\n"); + memset(new_mcgm_p,0,sizeof(THH_mcgm_props_t)); + + //check arguments + MTL_DEBUG1("hash bins:"U64_FMT", total entries:"U64_FMT"\n",mght_hash_bins,mght_total_entries); + if ((mght_hash_bins > mght_total_entries) || (mght_total_entries <= 0) || (mght_hash_bins <= 0)) + { + + MTL_ERROR4("%s: bad initial values!\n", __func__); + FREE(new_mcgm_p); + MT_RETURN(HH_EINVAL); + } + + + MOSAL_mutex_init(&new_mcgm_p->mtx); + + amgm_table_size = (unsigned long)(mght_total_entries - mght_hash_bins); + MTL_DEBUG1("total: "U64_FMT", hashbins: "U64_FMT" \n", + mght_total_entries,mght_hash_bins); + + new_mcgm_p->free_list.entries = (void*)(MT_ulong_ptr_t)VMALLOC(sizeof(unsigned long)* 2 *amgm_table_size); + if (!new_mcgm_p->free_list.entries) + { + MTL_ERROR4("%s: Cannot allocate AMGM table.\n", __func__); + FREE(new_mcgm_p); + MT_RETURN(HH_EAGAIN); + } + MTL_DEBUG1("after allocating free_list.entries\n" ); + + memset(new_mcgm_p->free_list.entries,0,sizeof(unsigned long)* 2 *amgm_table_size); + /* init the epool */ + new_mcgm_p->free_list.size = amgm_table_size; + new_mcgm_p->free_list.meta = &free_list_meta; + epool_init(&(new_mcgm_p->free_list)); + + + + /*filling mcgm structure */ + new_mcgm_p->hob = hob; + new_mcgm_p->mght_total_entries = mght_total_entries; + new_mcgm_p->mght_hash_bins = mght_hash_bins; + new_mcgm_p->max_qp_per_mcg = max_qp_per_mcg; + MTL_DEBUG1("max qp per mgm entry: %d\n",new_mcgm_p->max_qp_per_mcg); + + hh_ret = THH_hob_get_cmd_if (hob, &new_mcgm_p->cmd_if_h); + if (hh_ret != HH_OK) { + MTL_ERROR4("%s: Cannot get cmd_if object handle.\n", __func__); + goto clean; + } + hh_ret = THH_hob_get_ver_info(hob, &new_mcgm_p->version); + if (hh_ret != HH_OK) { + MTL_ERROR4("%s: Cannot get version.\n", __func__); + goto clean; + } + /*init local hash table */ + ret = VIP_hashp_create_maxsize((u_int32_t)mght_total_entries,(u_int32_t)mght_total_entries,&(new_mcgm_p->my_hash)); + if (ret != VIP_OK) + { + MTL_ERROR4("%s: Cannot allocate multicast status hash.\n", __func__); + hh_ret = HH_EAGAIN; + goto clean; + } + MTL_DEBUG1("after creating hash table\n"); + VIP_hashp_may_grow(new_mcgm_p->my_hash,FALSE); /* fix hash table size */ + //new_mcgm_p->amgm_idx = 0; + //new_mcgm_p->mght_idx = 0; + + /* succeeded to create object - return params: */ + *mcgm_p = new_mcgm_p; + + MT_RETURN(HH_OK); + + clean: + VFREE(new_mcgm_p->free_list.entries); + FREE(new_mcgm_p); + MT_RETURN(hh_ret); +} + +/************************************************************************ + * Function: THMCG_remove_status_entry + * + Arguments: + hash_p - pointer to status hash + entry_p - pointer to entry that should be removed + + Returns: + HH_OK + HH_ERR - an error has occured + + Description: + removes enty from status hash + + ************************************************************************/ + +static void THMCG_destroy_remove_status_entry( VIP_hash_key_t key,VIP_hashp_value_t hash_val, void * priv_data) +{ + + + FUNC_IN; + + if (hash_val != 0) { + FREE(hash_val); + } + return; +} +/************************************************************************ + * Function: THH_mcgm_destroy + * + Arguments: + mcgm - THH_mcgm to destroy + + Returns: + HH_OK + HH_EINVAL + + Description: + Free THH_mcgm context resources. + ************************************************************************/ + +extern HH_ret_t THH_mcgm_destroy( /*IN */ THH_mcgm_t mcgm ) +{ + VIP_common_ret_t ret = VIP_OK; + + FUNC_IN; + + THMCG_CHECK_NULL(mcgm,done); + + MOSAL_mutex_acq_ui(&mcgm->mtx); + epool_cleanup(&(mcgm->free_list)); + + /* first free the epool */ + VFREE(mcgm->free_list.entries); + + /*destroy my_hash. Need to de-allocate 'malloced' entries */ + ret = VIP_hashp_destroy(mcgm->my_hash,&THMCG_destroy_remove_status_entry,0); + if (ret != VIP_OK) + { + MTL_ERROR4("%s: Cannot destroy multicast status hash.\n", __func__); + } + +done: + MOSAL_mutex_rel(&mcgm->mtx); + MOSAL_mutex_free(&mcgm->mtx); + FREE(mcgm); + MT_RETURN(ret); + +} + +/************************************************************************ + Function: THH_mcgm_attach_qp + + Arguments: + mcgm + qpn -QP number of QP to attach + mgid -GID of a multicast group to attach to + + Returns: + HH_OK + HH_EINVAL + HH_EAGAIN - No more MGHT entries. + HH_2BIG_MCG_SIZE - Number of QPs attached to multicast groups exceeded") \ + HH_ERR - an error has ocuured + + Description: + Attach given QP to multicast with given DGID. Add new group if this is the first QP in MCG. + ************************************************************************/ +extern HH_ret_t THH_mcgm_attach_qp(/*IN */ THH_mcgm_t mcgm, + /*IN */ IB_wqpn_t qpn, + /*IN */ IB_gid_t mgid ) +{ + THH_mcg_entry_t fw_tmp_entry; + THH_cmd_status_t c_status; + mcg_status_entry_t my_entry; + HH_ret_t ret,s_ret = HH_OK; + THH_mcg_hash_t fw_hash_val; + u_int32_t new_idx,i; + MT_bool new_e = TRUE; + MT_bool is_empty=TRUE; + + FUNC_IN; + + ret=HH_EINVAL; + THMCG_CHECK_NULL(mcgm,fin); + ret=HH_OK; + + MOSAL_mutex_acq_ui(&mcgm->mtx); + + MTL_DEBUG1("got gid:%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.\n",mgid[0],mgid[1], + mgid[2],mgid[3],mgid[4],mgid[5],mgid[6],mgid[7], + mgid[8],mgid[9],mgid[10],mgid[11],mgid[12],mgid[13], + mgid[14],mgid[15]); + + memset(&my_entry,0,sizeof(mcg_status_entry_t)); + s_ret = THMCG_get_status_entry(mgid,&(mcgm->my_hash),&my_entry); + switch (s_ret) { + + case HH_NO_MCG: + MTL_DEBUG1(MT_FLFMT("gid doesn't have a multicast group yet \n")); + + //get hash idx from fw + c_status = THH_cmd_MGID_HASH(mcgm->cmd_if_h,mgid,&fw_hash_val); + ret= HH_ERR; + THMCG_CHECK_CMDIF_ERR(c_status,fin," THH_cmd_HASH failed\n"); + ret= HH_OK; + + MTL_DEBUG1("returned hash val:0x%x \n",fw_hash_val); + if (fw_hash_val >= mcgm->mght_hash_bins) + { + MTL_ERROR1("ERROR:got invalid hash idx for new gid\n"); + ret=HH_ERR; + goto fin; + } + + //read the entry where the new gid supposed to be- is it empty? + ret= read_alloc_mgm(mcgm,&fw_tmp_entry,fw_hash_val); + if (ret!= HH_OK) { + goto fin; + } + + for (i=0; i< 16; i++) { + if (fw_tmp_entry.mgid[i] != 0) { + is_empty= FALSE; + break; + } + } + if (!is_empty) /* if the hash isn't empty*/ + { + /* put the gid in amgm */ + u_int32_t last=0; + //mcgm->amgm_idx++; + MTL_DEBUG1("original hash idx is taken. before find last gid\n"); + if (fw_tmp_entry.next_gid_index > 0) { +/*** warning C4242: 'function' : conversion from 'u_int32_t' to 'THH_mcg_hash_t', possible loss of data ***/ + ret=THMCG_find_last_index(mcgm,(THH_mcg_hash_t)fw_tmp_entry.next_gid_index,&last,&fw_tmp_entry); + THMCG_CHECK_HH_ERR(ret,clean,"THMCG_find_last_index failed"); + } + + //get free idx + new_idx = (u_int32_t)epool_alloc(&mcgm->free_list); + if (new_idx == EPOOL_NULL) + { + ret = HH_EAGAIN; + THMCG_CHECK_HH_ERR(ret,clean,"THH_mcgm_attach_qp: $$ No free entries in MCGM table.\n"); + } + + MTL_DEBUG1("after allocating new idx: 0x%x in AMGM\n",new_idx); + new_idx+=(u_int32_t)mcgm->mght_hash_bins; //abs idx in MGHT+AMGM + + //update the last entry + fw_tmp_entry.next_gid_index = new_idx; + c_status = THH_cmd_WRITE_MGM(mcgm->cmd_if_h,last,mcgm->max_qp_per_mcg,&fw_tmp_entry); + ret= HH_ERR; + THMCG_CHECK_CMDIF_ERR(c_status,clean," THH_cmd_WRITE_MGM failed\n"); + ret= HH_OK; + my_entry.prev_idx = last; + + }else{ + //mcgm->mght_idx++; + new_idx = (u_int32_t)fw_hash_val; + my_entry.prev_idx = 0xffffffff; + MTL_DEBUG1("using fw hash idx \n"); + } + + //either in MGHT or AMGM - insert new GID entry + my_entry.idx = new_idx; + my_entry.num_valid_qps = 1; + memcpy(my_entry.mgid,mgid,sizeof(IB_gid_t)); + + fw_tmp_entry.next_gid_index=0; + fw_tmp_entry.valid_qps=0; + memset(fw_tmp_entry.qps, 0, sizeof(IB_wqpn_t) * mcgm->max_qp_per_mcg); + memcpy(fw_tmp_entry.mgid,mgid,sizeof(IB_gid_t)); + + break; + + case HH_OK: + MTL_DEBUG1("$$$ gid already exists in hash table\n"); + + //read the entry + ret=read_alloc_mgm(mcgm,&fw_tmp_entry,my_entry.idx); + if (ret!= HH_OK) { + goto fin; + } + if (fw_tmp_entry.valid_qps != my_entry.num_valid_qps) { + MTL_ERROR1(MT_FLFMT("mismatch hw (%d)/sw (%d) MCG entry \n"),fw_tmp_entry.valid_qps,my_entry.num_valid_qps); + ret= HH_ERR; + goto clean; + } + + //check if the qp already exists in the gid + for (i=0; imax_qp_per_mcg){ + ret=HH_2BIG_MCG_SIZE; + MTL_ERROR1("exceeded mcgroup's max qps amount\n"); + goto clean; + } + + my_entry.num_valid_qps ++; + new_e = FALSE; + break; + + default: MTL_ERROR1(MT_FLFMT("MCG_get_status_entry failed")); + goto fin; + } + fw_tmp_entry.qps[fw_tmp_entry.valid_qps] = qpn; + fw_tmp_entry.valid_qps ++; + + MTL_DEBUG1("writing new mcg entry to idx: 0x%x\n",my_entry.idx); + c_status = THH_cmd_WRITE_MGM(mcgm->cmd_if_h,my_entry.idx,mcgm->max_qp_per_mcg,&fw_tmp_entry); + ret=HH_ERR; + THMCG_CHECK_CMDIF_ERR(c_status,clean," THH_cmd_WRITE_MGM failed\n"); + ret=HH_OK; + + THMCG_update_status_entry(&mcgm->my_hash,&my_entry,new_e); + + //MTL_ERROR1("so far: amgm:%d mght:%d \n",mcgm->amgm_idx,mcgm->mght_idx); + +clean: + if (fw_tmp_entry.qps) { + FREE(fw_tmp_entry.qps); + } +fin: + MOSAL_mutex_rel(&mcgm->mtx); + MT_RETURN(ret); +} + + + +/************************************************************************ + * Function: THH_mcgm_detach_qp + * + Arguments: + mcgm + qpn - QP number of QP to attach + mgid - GID of a multicast group to attach to + + Returns: + HH_OK + HH_EINVAL - No such multicast group or given QP is not in given group + HH_ERR - an error has occured + + Description: + Detach given QP from multicast group with given GID. + + ************************************************************************/ + +extern HH_ret_t THH_mcgm_detach_qp(/*IN */ THH_mcgm_t mcgm, + /*IN */ IB_wqpn_t qpn, + /*IN */ IB_gid_t mgid) +{ + THH_mcg_entry_t fw_tmp_entry; + THH_cmd_status_t c_status; + mcg_status_entry_t my_entry; + HH_ret_t ret = HH_OK; + MT_bool qp_is_in = FALSE; + u_int32_t i; + + FUNC_IN; + + ret=HH_ERR; + THMCG_CHECK_NULL(mcgm,fin); + ret=HH_OK; + + MOSAL_mutex_acq_ui(&mcgm->mtx); + + MTL_DEBUG1("got gid:%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.\n",mgid[0],mgid[1], + mgid[2],mgid[3],mgid[4],mgid[5],mgid[6],mgid[7], + mgid[8],mgid[9],mgid[10],mgid[11],mgid[12],mgid[13], + mgid[14],mgid[15]); + MTL_DEBUG1("got qpn: 0x%x \n",qpn); + + ret = THMCG_get_status_entry(mgid,&(mcgm->my_hash),&my_entry); + if (ret != HH_OK) { + if (ret == HH_NO_MCG) { + ret= HH_EINVAL_MCG_GID; + MTL_ERROR1("this gid doesn't have a multicast group\n"); + }else { + MTL_ERROR1("MCG_get_status_entry failed"); + } + goto fin; + } + print_status_entry(&my_entry); + + //read the entry + MTL_DEBUG1("reading from idx: 0x%x \n",my_entry.idx); + + ret=read_alloc_mgm(mcgm,&fw_tmp_entry,my_entry.idx); + if (ret!= HH_OK) { + goto fin; + } + + for (i=0; i< fw_tmp_entry.valid_qps; i++) + { + if (fw_tmp_entry.qps[i] == qpn) + { + qp_is_in = TRUE; + break; + } + } + //qp is not in group + if (qp_is_in == FALSE) + { + MTL_ERROR1("qp doesn't belong to gid's multicast group\n"); + ret=HH_EINVAL_QP_NUM; + goto clean; + } + + MTL_DEBUG1("MCGM detach qp: qp & gid are valid \n"); + + if (fw_tmp_entry.valid_qps > 1) + { + MTL_DEBUG1("no need to remove gid's mcg \n"); + for (i=0; i< fw_tmp_entry.valid_qps; i++) + { + //assuming qp exists max once in mcg group + if (fw_tmp_entry.qps[i] == qpn) + fw_tmp_entry.qps[i]=fw_tmp_entry.qps[fw_tmp_entry.valid_qps-1]; + } + fw_tmp_entry.valid_qps--; + + c_status = THH_cmd_WRITE_MGM(mcgm->cmd_if_h,my_entry.idx,mcgm->max_qp_per_mcg,&fw_tmp_entry); + ret=HH_ERR; + THMCG_CHECK_CMDIF_ERR(c_status,clean," THH_cmd_WRITE_MGM failed\n"); + ret=HH_OK; + + //update my hash + my_entry.num_valid_qps--; + ret=THMCG_update_status_entry(&mcgm->my_hash,&my_entry,FALSE); + THMCG_CHECK_HH_ERR(ret,clean," THMCG_update_status_entry failed\n"); + }else { + MTL_DEBUG1("no more qp's - removing MGM entry\n"); + + //MGHT + if (my_entry.idx < mcgm->mght_hash_bins) + { + MTL_DEBUG1("reduced entry is in MGHT \n"); + + if (fw_tmp_entry.next_gid_index > 0) { + ret=THMCG_reduce_MGHT(mcgm,my_entry.idx/*the entry to remove*/,&fw_tmp_entry); + THMCG_CHECK_HH_ERR(ret,clean," THMCG_reduce_MGHT failed\n"); + //mcgm->amgm_idx--; + }else{ + /* write nullified entry */ + memset(fw_tmp_entry.mgid,0,sizeof(IB_gid_t)); + c_status = THH_cmd_WRITE_MGM(mcgm->cmd_if_h,my_entry.idx,mcgm->max_qp_per_mcg,&fw_tmp_entry); + ret=HH_ERR; + THMCG_CHECK_CMDIF_ERR(c_status,clean," THH_cmd_WRITE_MGM failed\n"); + ret=HH_OK; + //mcgm->mght_idx--; + } + } + //AMGM + else + { + THH_mcg_entry_t fw_prev_entry; + u_int32_t next_idx = fw_tmp_entry.next_gid_index; + + MTL_DEBUG1("reduced entry is in AMGM \n"); + //mcgm->amgm_idx--; + /*1.HW: update the prev */ + ret=read_alloc_mgm(mcgm,&fw_prev_entry,my_entry.prev_idx); + if (ret!= HH_OK) { + goto clean; + } + + fw_prev_entry.next_gid_index = fw_tmp_entry.next_gid_index; + + MTL_DEBUG1("writing updated prev to AMGM \n"); + c_status = THH_cmd_WRITE_MGM(mcgm->cmd_if_h,my_entry.prev_idx,mcgm->max_qp_per_mcg,&fw_prev_entry); + FREE(fw_prev_entry.qps); + + ret=HH_ERR; + THMCG_CHECK_CMDIF_ERR(c_status,clean," THH_cmd_WRITE_MGM failed\n"); + ret=HH_OK; + + + /* 2. read the gid of the next entry (if there's any)*/ + if (next_idx> 0) { + mcg_status_entry_t next_entry; + + c_status = THH_cmd_READ_MGM(mcgm->cmd_if_h,next_idx,mcgm->max_qp_per_mcg,&fw_tmp_entry); + ret=HH_ERR; + THMCG_CHECK_CMDIF_ERR(c_status,clean," THH_cmd_WRITE_MCG failed\n"); + ret=HH_OK; + + + /* 2.1 update the prev field of the next in my DB*/ + ret=THMCG_get_status_entry(fw_tmp_entry.mgid,&mcgm->my_hash,&next_entry); + THMCG_CHECK_HH_ERR(ret,clean," THMCG_get_status_entry failed\n"); + + next_entry.prev_idx = my_entry.prev_idx; + + ret=THMCG_update_status_entry(&mcgm->my_hash,&next_entry,FALSE); + THMCG_CHECK_HH_ERR(ret,clean," THMCG_get_status_entry failed\n"); + } + + epool_free(&mcgm->free_list,(unsigned long)((VAPI_size_t)my_entry.idx - mcgm->mght_hash_bins)); + + }/* else - AMGM */ + + /* remove the entry from my table */ + ret=THMCG_remove_status_entry(&mcgm->my_hash,&my_entry); + THMCG_CHECK_HH_ERR(ret,clean," THMCG_remove_status_entry failed\n"); + + } + + clean: + if (fw_tmp_entry.qps) { + FREE(fw_tmp_entry.qps); + } + fin: + MOSAL_mutex_rel(&mcgm->mtx); + MT_RETURN(ret); +} + + + +/***** static definitions **********************************/ + +/************************************************************************ + * Function: THMCG_find_last_index + * + Arguments: + mcgm + fw_hash_val - the MGHT idx to start with + last- pointer to last index in chain + last_entry - pointer to last fw entry that will be filled + Returns: + HH_OK + HH_ERR - an error has occured + + Description: + finds the last fw entry in a chain of the sam hash key + ************************************************************************/ +static HH_ret_t THMCG_find_last_index(THH_mcgm_t mcgm, THH_mcg_hash_t start, + u_int32_t* last, THH_mcg_entry_t* last_entry) +{ + + THH_cmd_status_t c_status; + HH_ret_t ret = HH_EAGAIN; + u_int32_t cur_idx=(u_int32_t)start; + + FUNC_IN; + + + while (1) + { + MTL_DEBUG1("MCGM find last idx: reading from idx: 0x%x \n",cur_idx); + memset(last_entry->qps, 0, sizeof(IB_wqpn_t) * mcgm->max_qp_per_mcg); + c_status = THH_cmd_READ_MGM(mcgm->cmd_if_h,cur_idx,mcgm->max_qp_per_mcg,last_entry); + ret=HH_ERR; + THMCG_CHECK_CMDIF_ERR(c_status,fin," THH_cmd_READ_MGM failed\n"); + ret=HH_OK; + print_fw_entry(last_entry); + if (last_entry->next_gid_index==0) { + *last=cur_idx; + break; + } + cur_idx = last_entry->next_gid_index; + } + + fin: + MT_RETURN(ret); +} + +/************************************************************************ + * Function: THMCG_update_status_entry + * + Arguments: + hash_p - status hash table + mcg_p - the status entry to be updated + new_e - is it a new entry (add) or not (update) + Returns: + HH_OK + HH_ERR - an error has occured + + Description: + updates or addes status entry + ************************************************************************/ + +static HH_ret_t THMCG_update_status_entry(mcg_status_hash_t* hash_p,mcg_status_entry_t* entry_p,MT_bool new_e) +{ + + u_int32_t hash_key; + VIP_common_ret_t hash_ret; + VIP_hashp_value_t hash_val; + mcg_status_entry_t* mgm_p; + + FUNC_IN; + + hash_key = GID_2_HASH_KEY(entry_p->mgid); + + if (!new_e) + { + hash_ret = VIP_hashp_erase(*hash_p, hash_key, &hash_val); + if (hash_ret != VIP_OK) + { + MTL_ERROR1("failed VIP_hashp_erase: got %s \n",VAPI_strerror_sym(hash_ret)); + return HH_EINVAL; + } + mgm_p = (mcg_status_entry_t*)hash_val; + }else { + mgm_p = (mcg_status_entry_t*)MALLOC(sizeof(mcg_status_entry_t)); + if (mgm_p == NULL) + { + MT_RETURN(HH_EAGAIN); + } + } + print_status_entry(entry_p); + + mgm_p->idx = entry_p->idx; + mgm_p->prev_idx = entry_p->prev_idx; + mgm_p->num_valid_qps = entry_p->num_valid_qps; + memcpy(mgm_p->mgid,entry_p->mgid,sizeof(IB_gid_t)); + + hash_ret = VIP_hashp_insert(*hash_p, hash_key, (VIP_hashp_value_t)mgm_p); + if (hash_ret != VIP_OK) + { + MTL_ERROR1(MT_FLFMT("failed VIP_hashp_insert\n")); + if (new_e) FREE(mgm_p); + return HH_ERR; + } + +//////////////// +#if 0 + /* try to find mcg of this mcg */ + hash_ret = VIP_hashp_find(*hash_p, hash_key, &hash_val); + if (hash_ret != VIP_OK) + { + MTL_ERROR1("failed VIP_hashp_insert\n"); + return HH_ERR; + } + print_status_entry((mcg_status_entry_t*)hash_val); +#endif + +//////////////// + + + MT_RETURN( HH_OK); +} + +/************************************************************************ + * Function: THMCG_get_status_entry + * + Arguments: + gid - the status entry gid + hash_p - status hash table + mcg_p - the status entry that will be filled + Returns: + HH_OK + HH_ERR - an error has occured + + Description: + returns the entry which matches the gid + ************************************************************************/ + +static HH_ret_t THMCG_get_status_entry(IB_gid_t gid, + mcg_status_hash_t* hash_p, + mcg_status_entry_t* mcg_p) +{ + VIP_common_ret_t v_ret = VIP_OK; + HH_ret_t ret = HH_OK; + u_int32_t hash_key; + VIP_hashp_value_t hash_val; + + FUNC_IN; + + hash_key = GID_2_HASH_KEY(gid); + + MTL_DEBUG2("%s: GID key is: %u\n", __func__, hash_key); + + /* try to find mcg of this mcg */ + v_ret = VIP_hashp_find(*hash_p, hash_key, &hash_val); + + switch (v_ret) + { + case VIP_OK: + MTL_DEBUG1("returning HH OK\n"); + *mcg_p = *((mcg_status_entry_t*)hash_val); + break; + + case VIP_EINVAL_HNDL: + MTL_DEBUG1("returning HH NO MCG\n"); + ret= HH_NO_MCG; + break; + + default: + ret=HH_EAGAIN; + } + + return ret; +} + +/************************************************************************ + * Function: THMCG_reduce_MGHT + * + Arguments: + mcgm + cur_idx - entry idx in FW table + fw_entry_p - pointer to fw_hash_entry that reduced from MGHT + + Returns: + HH_OK + HH_ERR - an error has occured + + Description: + removes entry from MGHT, moves its succ to the entry's place in MGHT and updates status hash + + ************************************************************************/ + +static HH_ret_t THMCG_reduce_MGHT(THH_mcgm_t mcgm, u_int32_t cur_idx, THH_mcg_entry_t* fw_entry_p) +{ + THH_mcg_entry_t fw_next_entry; + THH_cmd_status_t c_status; + mcg_status_entry_t my_entry; + HH_ret_t ret = HH_OK; + + FUNC_IN; + + //read the next entry & write it instead the removed one + ret=read_alloc_mgm(mcgm,&fw_next_entry,fw_entry_p->next_gid_index); + if (ret!= HH_OK) { + goto fin; + } + + c_status = THH_cmd_WRITE_MGM(mcgm->cmd_if_h,cur_idx,mcgm->max_qp_per_mcg,&fw_next_entry); + FREE(fw_next_entry.qps); + + ret=HH_ERR; + THMCG_CHECK_CMDIF_ERR(c_status,fin," THH_cmd_WRITE_MGM failed\n"); + ret=HH_OK; + + //free the next idx in epool + epool_free(&mcgm->free_list,(unsigned long)((VAPI_size_t)fw_entry_p->next_gid_index - mcgm->mght_hash_bins)); + + //update the next's entry + memcpy(my_entry.mgid,fw_next_entry.mgid,sizeof(IB_gid_t)); + my_entry.idx = cur_idx; +/*** warning C4242: '=' : conversion from 'u_int32_t' to 'u_int16_t', possible loss of data ***/ + my_entry.num_valid_qps = (u_int16_t)fw_next_entry.valid_qps; + my_entry.prev_idx = 0xffffffff; + ret=THMCG_update_status_entry(&mcgm->my_hash,&my_entry,FALSE); + THMCG_CHECK_HH_ERR(ret,fin," THMCG_update_status_entry failed\n"); + print_status_entry(&my_entry); + + + fin: + MT_RETURN(ret); +} + +/************************************************************************ + * Function: THMCG_remove_status_entry + * + Arguments: + hash_p - pointer to status hash + entry_p - pointer to entry that should be removed + + Returns: + HH_OK + HH_ERR - an error has occured + + Description: + removes enty from status hash + + ************************************************************************/ + +static HH_ret_t THMCG_remove_status_entry(mcg_status_hash_t* hash_p,mcg_status_entry_t* entry_p) +{ + + u_int32_t hash_key; + VIP_common_ret_t hash_ret; + VIP_hashp_value_t hash_val; + HH_ret_t ret = HH_OK; + + FUNC_IN; + + hash_key = GID_2_HASH_KEY(entry_p->mgid); + + hash_ret = VIP_hashp_erase(*hash_p, hash_key, &hash_val); + if (hash_ret != VIP_OK) + { + MTL_ERROR1("failed VIP_hashp_erase\n"); + ret= HH_ERR; + } + FREE(hash_val); + MT_RETURN(ret); +} + +static inline void print_fw_entry(THH_mcg_entry_t* entry) +{ + u_int32_t i; + MTL_DEBUG1("--- FW ENTRY ----\n"); + + MTL_DEBUG1("gid:%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.\n",entry->mgid[0],entry->mgid[1], + entry->mgid[2],entry->mgid[3],entry->mgid[4],entry->mgid[5],entry->mgid[6],entry->mgid[7], + entry->mgid[8],entry->mgid[9],entry->mgid[10],entry->mgid[11],entry->mgid[12],entry->mgid[13], + entry->mgid[14],entry->mgid[15]); + + MTL_DEBUG1("num of qps:0x%x \n",entry->valid_qps); + if (entry->qps) { + for (i=0; i< entry->valid_qps ; i++) + {MTL_DEBUG1("qpn:0x%x ",entry->qps[i]);} + }else {MTL_DEBUG1("-... no qps ...-\n");} + MTL_DEBUG1("next gid idx:%d \n",entry->next_gid_index); + MTL_DEBUG1("--------------\n"); +} + +static inline void print_status_entry(mcg_status_entry_t* my_entry) +{ + MTL_DEBUG1("--- STATUS ENTRY --\n"); + MTL_DEBUG1("gid:%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.\n",my_entry->mgid[0],my_entry->mgid[1], + my_entry->mgid[2],my_entry->mgid[3],my_entry->mgid[4],my_entry->mgid[5],my_entry->mgid[6], + my_entry->mgid[7],my_entry->mgid[8],my_entry->mgid[9],my_entry->mgid[10],my_entry->mgid[11], + my_entry->mgid[12],my_entry->mgid[13],my_entry->mgid[14],my_entry->mgid[15]); + + MTL_DEBUG1("\nnum of qps:0x%x\n",my_entry->num_valid_qps); + MTL_DEBUG1("gid_idx:0x%x prev_idx:0x%x \n",my_entry->idx,my_entry->prev_idx); + MTL_DEBUG1("------------------\n"); +} + +static inline HH_ret_t read_alloc_mgm(THH_mcgm_t mcgm, THH_mcg_entry_t* entry,u_int32_t idx) +{ + THH_cmd_status_t c_status= THH_CMD_STAT_OK; + entry->qps = TNMALLOC(IB_wqpn_t,mcgm->max_qp_per_mcg); + if (entry->qps == NULL) { + MTL_ERROR1(MT_FLFMT("Null pointer detected\n")); + MT_RETURN(HH_EAGAIN); + } + memset(entry->qps, 0, sizeof(IB_wqpn_t) * mcgm->max_qp_per_mcg); + c_status = THH_cmd_READ_MGM(mcgm->cmd_if_h,idx,mcgm->max_qp_per_mcg,entry); + if (c_status != THH_CMD_STAT_OK) { + FREE(entry->qps); + entry->qps = NULL; + MTL_ERROR1(MT_FLFMT("failed READ_MGM\n")); + MT_RETURN(HH_ERR); + } + print_fw_entry(entry); + MT_RETURN(HH_OK); +} + +HH_ret_t THH_mcgm_get_num_mcgs(THH_mcgm_t mcgm, u_int32_t *num_mcgs_p) +{ + THMCG_CHECK_NULL(mcgm,done); + *num_mcgs_p = VIP_hashp_get_num_of_objects(mcgm->my_hash); + MT_RETURN(HH_OK); + +done: + MT_RETURN(HH_EINVAL); +} diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.h new file mode 100644 index 00000000..5f274a36 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mcgm/mcgm.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(H_MCGM_H) +#define H_MCGM_H + +#include +#include +#include +#include + +typedef struct mcg_status_entry mcg_status_entry_t; + +#define GID_2_HASH_KEY(gid) (*((u_int32_t *)gid) ^ *((u_int32_t *)gid+1) ^ *((u_int32_t *)gid+2) ^ *((u_int32_t *)gid+3)) + +#define THMCG_CHECK_HH_ERR(res, lable,message) \ + if ((res) != HH_OK) { \ + MTL_ERROR1(MT_FLFMT(message)); \ + goto lable; \ + } + +#define THMCG_CHECK_CMDIF_ERR(res, lable,message) \ + if ((res) != THH_CMD_STAT_OK) { \ + MTL_ERROR1(MT_FLFMT(message)); \ + goto lable; \ + } + +#define THMCG_CHECK_NULL(p, lable) \ + if ((p) == NULL) { \ + MTL_ERROR1(MT_FLFMT("Null pointer detected\n")); \ + goto lable; \ + } + + + + +/************************************************************************ + * Function: THH_mcgm_create + * + Arguments: + hob - THH_hob this object is included in + mght_total_entries - Number of entries in the MGHT + mght_hash_bins - Number of bins in the hash table + max_qp_per_mcg - Max number of QPs per Multicast Group + mcgm_p - Allocated THH_mcgm_t object + Returns: + HH_OK + HH_EINVAL + HH_EAGAIN + + Description: Create THH_mcgm_t instance. + + + ************************************************************************/ + +extern HH_ret_t THH_mcgm_create(/*IN */ THH_hob_t hob, + /*IN */ VAPI_size_t mght_total_entries, + /*IN */ VAPI_size_t mght_hash_bins, + /*IN */ u_int16_t max_qp_per_mcg, + /*OUT*/ THH_mcgm_t *mcgm_p ); + +/************************************************************************ + * Function: THH_mcgm_destroy + * + Arguments: + mcgm - THH_mcgm to destroy + + Returns: + HH_OK + HH_EINVAL + + Description: + Free THH_mcgm context resources. + ************************************************************************/ + +extern HH_ret_t THH_mcgm_destroy( /*IN */ THH_mcgm_t mcgm ); + +/************************************************************************ + * Function: THH_mcgm_attach_qp + * + Arguments: + mcgm + qpn -QP number of QP to attach + mgid -GID of a multicast group to attach to + + Returns: + HH_OK + HH_EINVAL + HH_EAGAIN - No more MGHT entries. + HH_2BIG_MCG_SIZE - Number of QPs attached to multicast groups exceeded") \ + HH_ERR - an error has occured + + Description: + Attach given QP to multicast with given DGID. + + ************************************************************************/ + + +extern HH_ret_t THH_mcgm_attach_qp(/*IN */ THH_mcgm_t mcgm, + /*IN */ IB_wqpn_t qpn, + /*IN */ IB_gid_t mgid ); +/************************************************************************ + * Function: THH_mcgm_detach_qp + * + Arguments: + mcgm + qpn - QP number of QP to attach + mgid - GID of a multicast group to attach to + + Returns: + HH_OK + HH_EINVAL - No such multicast group or given QP i not in given group + HH_ERR - an error has occured + + + Description: + Detach given QP from multicast group with given GID. + + ************************************************************************/ + +extern HH_ret_t THH_mcgm_detach_qp(/*IN */ THH_mcgm_t mcgm, + /*IN */ IB_wqpn_t qpn, + /*IN */ IB_gid_t mgid); + +/************************************************************************ + * Function: THMCG_get_num_mcgs + * + Arguments: + mcgm + num_mcgs_p - current number of multicast groups + + Returns: + HH_OK + HH_EINVAL - invalid mcgm handle + + + Description: + + ************************************************************************/ +extern HH_ret_t THH_mcgm_get_num_mcgs(THH_mcgm_t mcgm, u_int32_t *num_mcgs_p); + +#endif /* H_MCGM_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.c new file mode 100644 index 00000000..4555b65f --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.c @@ -0,0 +1,2910 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* First to validate "tmrwm.h" is a legal header */ +#include +#if defined(USE_STD_MEMORY) +# include +#endif +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "extbuddy.h" + +#include +#ifndef MTL_TRACK_ALLOC +#define EXTBUDDY_ALLOC_MTT(index,log2_n_segs) +#define EXTBUDDY_FREE_MTT(index,log2_n_segs) +#else +#define EXTBUDDY_ALLOC_MTT(index,log2_n_segs) \ + memtrack_alloc(MEMTRACK_MTT_SEG,(void *)(MT_virt_addr_t)index,(unsigned long)(1<key_prefix[mpt_seg][mpt_index-(mrwm)->offset[mpt_seg]] << (mrwm)->props.log2_mpt_sz) | (mpt_index)) + +/* macro for translating cmd_rc return codes for non-destroy procs */ +#define CMDRC2HH_ND(cmd_rc) ((cmd_rc == THH_CMD_STAT_OK) ? HH_OK : \ + (cmd_rc == THH_CMD_STAT_EINTR) ? HH_EINTR : HH_EFATAL) + +/* Reg_Segs_t phys_pages array allocation/free */ + +#define SMART_MALLOC(size) THH_SMART_MALLOC(size) +#define SMART_FREE(ptr,size) THH_SMART_FREE(ptr,size) + +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'const size_t', possible loss of data ***/ +#define ALLOC_PHYS_PAGES_ARRAY(reg_segs_p) \ + (reg_segs_p)->phys_pages= (VAPI_phy_addr_t*)SMART_MALLOC((size_t)(sizeof(VAPI_phy_addr_t)*(reg_segs_p)->n_pages)) + +/* Free memory allocated in buf_lst_2_pages() into phys_pages of reg_segs */ +#define FREE_PHYS_PAGES_ARRAY(reg_segs_p) { \ + SMART_FREE((reg_segs_p)->phys_pages,sizeof(VAPI_phy_addr_t)*(reg_segs_p)->n_pages); \ + (reg_segs_p)->phys_pages= NULL; \ +} + +static u_int8_t native_page_shift; + + + + +#define DHERE {MTL_DEBUG4(MT_FLFMT(""));} + +enum +{ + /* These are the tunebale parameters */ + LOG2_MIN_SEG_SIZE = 3, + /* LOG2_MAX_SEGS = 20, 1M limit [REMOVED! 2002/November/18] */ + MTT_WRITE_MAX = 64, + + /* Better be given explicitly on creation (by firmware), hard code for now */ + MTT_LOG_MTT_ENTRY_SIZE = 3 + +}; + +/*keeps all the data shared between shared mr's */ +typedef struct +{ + VAPI_size_t size; + u_int32_t seg_start; /* segment index - given by EPool */ + u_int8_t log2_segs; + u_int8_t page_shift; + u_int32_t ref_count; + MOSAL_spinlock_t ref_lock; /* May be removed when atomic is available */ +} Shared_data_t; + +/* Since remote=local for {key,start,size} + * we can do with less than the whole HH_mr_info_t. + * On the other hand, we do need to save some internal data. + */ +typedef struct +{ + VAPI_lkey_t key; + IB_virt_addr_t start; + HH_pd_hndl_t pd; + + Shared_data_t* shared_p; + MOSAL_iobuf_t iobuf; /* for internal regions only */ + + /* the small fields, in the end */ + VAPI_mrw_acl_t acl; + MOSAL_mutex_t modify_mtx; + +#if defined(MT_SUSPEND_QP) + MT_bool is_suspended; /* cannot use mpt_entry field below, since mpt_entry + * is set before iobuf_dereg when suspending + */ + THH_mpt_entry_t *mpt_entry; /* saved at suspend, for unsuspend */ + MT_virt_addr_t va; /* saved at suspend, for unsuspend */ + MT_size_t size; /* saved at suspend, for unsuspend */ + MOSAL_prot_ctx_t prot_ctx; /* saved at suspend, for unsuspend */ +#endif + +} Mr_sw_t; + +typedef struct { + u_int32_t seg_start; /* segment index - given by EPool */ + u_int8_t log2_segs; + u_int8_t log2_page_sz; + MT_virt_addr_t mtt_entries; /* Mapped MTT entries */ + MT_virt_addr_t mpt_entry; /* Mapped MPT entry */ + u_int32_t last_free_key; /* Last memory key that was explicitly freed + * (for key wrap around detection) */ +} FMR_sw_info_t; + + +typedef enum { + MPT_int, + MPT_ext, + MPT_EOR, /* end of regions */ + MPT_win = MPT_EOR, + MPT_N, + MPT_reserved +} mpt_segment_t; + +/* The MRWM main structure */ +typedef struct THH_mrwm_st +{ + THH_hob_t hob; + THH_mrwm_props_t props; + u_int32_t usage_cnt[MPT_N]; /* current number of rgn_int,rgn_ext,win */ + VIP_array_p_t mpt[MPT_N]; /* MPT array for each of the entities */ + u_int8_t* is_fmr_bits; + u_int32_t max_mpt[MPT_N];/* limit size of each mpt array */ + u_int32_t offset[MPT_N]; /* [MPT_int=0] = 0, [MPT_EOR] = #regions */ + u_int16_t *key_prefix[MPT_N]; /* persistant key prefix storage */ + MOSAL_spinlock_t key_prefix_lock;/* protect key_prefix updates (2be changed to atomic_inc)*/ + /* u_int8_t log2_seg_size; */ + Extbuddy_hndl xbuddy_tpt; + u_int32_t surplus_segs; /* # segs we can give over 1/region */ + MOSAL_spinlock_t reserve_lock; /* protect MPT (usage_cnt) and MTT (surplus_segs) reservations*/ + MOSAL_mutex_t extbuddy_lock; /* protect extbuddy calls */ + + /* convenient handle saving */ + THH_cmd_t cmd_if; + THH_uldm_t uldm; +} TMRWM_t; + +/* place holder for parameters during registartion */ +typedef struct +{ + THH_mrwm_t mrwm; + THH_mpt_entry_t mpt_entry; + u_int32_t mpt_index; /* == lower_bits(mpt_entry.lkey) */ + VAPI_phy_addr_t* phys_pages; + VAPI_size_t n_pages; + u_int8_t log2_page_size; + u_int32_t seg_start; + u_int32_t key; + VAPI_mrw_acl_t acl; + MOSAL_iobuf_t iobuf; /* for internal regions */ +} Reg_Segs_t; + + +/************************************************************************/ +/* private functions */ + + +static inline mpt_segment_t get_mpt_seg(THH_mrwm_t mrwm,u_int32_t mpt_index) +{ + if (mpt_index < mrwm->offset[MPT_int]) { /* internal region */ + return MPT_reserved; + } else if (mpt_index < mrwm->offset[MPT_ext]) { + return MPT_int; + } else if (mpt_index < mrwm->offset[MPT_win]) { + return MPT_ext; + } /* else... */ + return MPT_win; +} + +/********** MPT entries and MTT segments reservations *************/ + + +/* reserve an MPT entry in given segment (to limit VIP_array's size) */ +static inline HH_ret_t reserve_mpt_entry(THH_mrwm_t mrwm, mpt_segment_t mpt_seg) +{ + MOSAL_spinlock_lock(&mrwm->reserve_lock); + if (mrwm->usage_cnt[mpt_seg] >= mrwm->max_mpt[mpt_seg]) { + MOSAL_spinlock_unlock(&mrwm->reserve_lock); + return HH_EAGAIN; /* reached limit for given segment */ + } + MTL_DEBUG5(MT_FLFMT("%s: usage_cnt[%d]=%d -> %d"),__func__, + mpt_seg,mrwm->usage_cnt[mpt_seg],mrwm->usage_cnt[mpt_seg]+1); + mrwm->usage_cnt[mpt_seg]++; + MOSAL_spinlock_unlock(&mrwm->reserve_lock); + return HH_OK; +} + +/* opposite of reserve_mpt */ +static inline void release_mpt_entry(THH_mrwm_t mrwm, mpt_segment_t mpt_seg) +{ + MOSAL_spinlock_lock(&mrwm->reserve_lock); +#ifdef MAX_DEBUG + if (mrwm->usage_cnt[mpt_seg] == 0) { + MTL_ERROR1(MT_FLFMT("%s: Invoked while usage_cnt==0"),__func__); + } +#endif + MTL_DEBUG5(MT_FLFMT("%s: usage_cnt[%d]=%d -> %d"),__func__, + mpt_seg,mrwm->usage_cnt[mpt_seg],mrwm->usage_cnt[mpt_seg]-1); + mrwm->usage_cnt[mpt_seg]--; + MOSAL_spinlock_unlock(&mrwm->reserve_lock); +} + +/* reserve MTT segments (to assure allocation in "extbuddy" structure) */ +static inline HH_ret_t reserve_mtt_segs(THH_mrwm_t mrwm, u_int32_t surplus2reserve) +{ + MOSAL_spinlock_lock(&mrwm->reserve_lock); + if (surplus2reserve > mrwm->surplus_segs) { + MTL_ERROR4(MT_FLFMT("%s: Cannot reserve %d MTT segments (%d dynamic MTT segments left)"), + __func__,surplus2reserve,mrwm->surplus_segs); + MOSAL_spinlock_unlock(&mrwm->reserve_lock); + return HH_EAGAIN; /* reached limit */ + } + MTL_DEBUG5(MT_FLFMT("%s: MTT segments %d -> %d"),__func__, + mrwm->surplus_segs,mrwm->surplus_segs-surplus2reserve); + mrwm->surplus_segs -= surplus2reserve; + MOSAL_spinlock_unlock(&mrwm->reserve_lock); + return HH_OK; +} + +static inline void release_mtt_segs(THH_mrwm_t mrwm, u_int32_t surplus2reserve) +{ + MOSAL_spinlock_lock(&mrwm->reserve_lock); + MTL_DEBUG5(MT_FLFMT("%s: MTT segments %d -> %d"),__func__, + mrwm->surplus_segs,mrwm->surplus_segs+surplus2reserve); + mrwm->surplus_segs += surplus2reserve; + MOSAL_spinlock_unlock(&mrwm->reserve_lock); +} + +/************************************************************************/ +static HH_ret_t buf_lst_2_pages +( + const VAPI_phy_addr_t* phys_buf_lst, + const VAPI_size_t* buf_sz_lst, + MT_size_t n, + IB_virt_addr_t start, + VAPI_phy_addr_t iova_offset, + Reg_Segs_t* reg_segs +); + + +static inline HH_ret_t tpt_buf_lst_2_pages(const HH_mr_t* mr, Reg_Segs_t* rs) +{ + const HH_tpt_t* t = &mr->tpt; + MTL_DEBUG4(MT_FLFMT("tpt_buf_lst_2_pages")); + return buf_lst_2_pages(t->tpt.buf_lst.phys_buf_lst, t->tpt.buf_lst.buf_sz_lst, + t->num_entries, + mr->start, t->tpt.buf_lst.iova_offset, + rs); +} /* tpt_buf_lst_2_pages */ + +/******************************************************/ + +static void release_shared_mtts(THH_mrwm_t mrwm, Mr_sw_t *mrsw_p) +{ + MOSAL_spinlock_lock(&mrsw_p->shared_p->ref_lock); + if (mrsw_p->shared_p->ref_count > 1) { + mrsw_p->shared_p->ref_count--; + MOSAL_spinlock_unlock(&mrsw_p->shared_p->ref_lock); + }else{ + MOSAL_spinlock_unlock(&mrsw_p->shared_p->ref_lock); + /* MTT segment 0 is reserved, so if seg_start is 0 it is a physical addressing region (no MTTs)*/ + if (mrsw_p->shared_p->seg_start != 0) { + MOSAL_mutex_acq_ui(&mrwm->extbuddy_lock); + extbuddy_free(mrwm->xbuddy_tpt, mrsw_p->shared_p->seg_start, mrsw_p->shared_p->log2_segs); + EXTBUDDY_FREE_MTT(mrsw_p->shared_p->seg_start, mrsw_p->shared_p->log2_segs); + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + release_mtt_segs(mrwm,(1 << mrsw_p->shared_p->log2_segs) - 1); + } + FREE(mrsw_p->shared_p); + } + mrsw_p->shared_p = NULL; +} + +static HH_ret_t change_translation(Reg_Segs_t* rs_p,HH_mr_t* mr_props_p, Mr_sw_t *mrsw_p, + u_int8_t *log2_segs_p) +{ + HH_ret_t ret= HH_EAGAIN; + u_int32_t n_segs_o = (1 << mrsw_p->shared_p->log2_segs); /* >= 1 !! */ + THH_mrwm_t mrwm = rs_p->mrwm; + + /*calc num of segs of new mr*/ + u_int8_t page_shift; + u_int8_t log2_mtt_seg_sz = mrwm->props.log2_mtt_seg_sz; + /* avoid explict u_int64_t division ! */ + u_int32_t n_segs; + u_int8_t log2_segs; + MT_size_t seg_comp; + MOSAL_iobuf_props_t iobuf_props; + + FUNC_IN; + + switch (mr_props_p->tpt.tpt_type) { + case HH_TPT_PAGE: + /*calc num of segs of new mr*/ + page_shift = mr_props_p->tpt.tpt.page_lst.page_shift; + rs_p->n_pages = ((mr_props_p->start+mr_props_p->size - 1)>> page_shift)-(mr_props_p->start>> page_shift)+ 1; + /* avoid explict u_int64_t division ! */ + if (rs_p->n_pages != mr_props_p->tpt.num_entries) { + MTL_ERROR1(MT_FLFMT("%s: Given "SIZE_T_DFMT" pages of %uKB is smaller than given region size ("U64_FMT" KB)"), + __func__,mr_props_p->tpt.num_entries,(1<<(page_shift-10)),mr_props_p->size>>10); + return HH_EINVAL; + } + rs_p->phys_pages = (VAPI_phy_addr_t*)mr_props_p->tpt.tpt.page_lst.phys_page_lst; + rs_p->log2_page_size= mr_props_p->tpt.tpt.page_lst.page_shift; + break; + + case HH_TPT_BUF: + if ((mr_props_p->tpt.num_entries == 1) + && (mr_props_p->tpt.tpt.buf_lst.phys_buf_lst[0] == mr_props_p->start)) + { + /* no translation needed */ + rs_p->mpt_entry.pa = TRUE; + rs_p->n_pages = 1; + rs_p->seg_start= 0; + rs_p->log2_page_size= 0; + ret= HH_OK; + }else { + ret = tpt_buf_lst_2_pages(mr_props_p, rs_p); + } + if (ret != HH_OK) return ret; + break; + + case HH_TPT_IOBUF: + rs_p->iobuf= mr_props_p->tpt.tpt.iobuf; + if (MOSAL_iobuf_get_props(rs_p->iobuf,&iobuf_props) != MT_OK) { + MTL_ERROR4(MT_FLFMT("Failed MOSAL_iobuf_get_props.")); + return HH_EINVAL; + } + rs_p->n_pages= iobuf_props.nr_pages; + rs_p->phys_pages= NULL; +/*** warning C4242: '=' : conversion from 'MT_u_int_t' to 'u_int8_t', possible loss of data ***/ + rs_p->log2_page_size = (u_int8_t)iobuf_props.page_shift; + break; + + default: + MTL_ERROR2(MT_FLFMT("%s: Invalid tpt type (%d)\n"),__func__,mr_props_p->tpt.tpt_type); + return HH_EINVAL; + } +/*** warning C4242: '=' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + seg_comp= (u_int8_t)(rs_p->n_pages >> log2_mtt_seg_sz); + seg_comp= ((seg_comp << log2_mtt_seg_sz) != rs_p->n_pages) ? seg_comp + 1 : seg_comp; + /*check that n_segs will not overflow 32 bits */ + log2_segs = ceil_log2(seg_comp); + if (log2_segs >= (8*sizeof(n_segs))) return HH_EINVAL_PARAM; + n_segs = 1 << log2_segs; + + //MTL_DEBUG4(MT_FLFMT("start=0x%Lx, size=0x%Lx, shift=%d, ne=%d, np=%d"),(u_int64_t)start, + // (u_int64_t)mr_props_p->size, page_shift,(int)mr_props_p->tpt.num_entries, (int)n_pages); + + MTL_DEBUG3(MT_FLFMT("log2 mtt sz=%d \n"),log2_mtt_seg_sz); + MTL_DEBUG3(MT_FLFMT("n_segs_o=%d n_segs=%d \n"),n_segs_o,n_segs); + + if ( (n_segs != n_segs_o) || (mrsw_p->shared_p->ref_count > 1) || (rs_p->mpt_entry.pa)) { + /* replace MTTs (or just free if new "translation" is physical with no translation) */ + u_int32_t seg_start = EXTBUDDY_NULL; + /* we are not using spinlock on the ref_cnt on the above check to simplify code*/ + /* if we are "lucky" enough we may get here even we can keep use the same MTTs */ + /* (when 2 sharing regions did the change_translation simultaneously) */ + /* but... this is not a bug. Just an overkill (reallocating MTTs) */ + /* ("shared memory" regions are rarely reregistered) */ + release_shared_mtts(mrwm,mrsw_p); + + MTL_DEBUG1(MT_FLFMT("alloc new MTT's \n")); + /* 2. alloc new MTT's */ + if (!rs_p->mpt_entry.pa) { /* If translation needed */ + if (reserve_mtt_segs(mrwm, n_segs-1) != HH_OK) { /* surplus segments reservation */ + MTL_ERROR1(MT_FLFMT("Out of MTT segments")); + return HH_EAGAIN; + } + if (MOSAL_mutex_acq(&mrwm->extbuddy_lock,TRUE) != MT_OK) { + release_mtt_segs(mrwm,n_segs-1); + return HH_EINTR; + } + seg_start = extbuddy_alloc(mrwm->xbuddy_tpt, log2_segs); + if (seg_start != EXTBUDDY_NULL) {EXTBUDDY_ALLOC_MTT(seg_start,log2_segs);} + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + + if (seg_start != EXTBUDDY_NULL) { + rs_p->seg_start = seg_start; + rs_p->mpt_entry.mtt_seg_adr = mrwm->props.mtt_base | + (seg_start << (log2_mtt_seg_sz+ MTT_LOG_MTT_ENTRY_SIZE));; + }else{ /* reregister (translation) failed */ + MTL_ERROR1(MT_FLFMT("Failed allocating MTT segments (unexpected error !!)")); + release_mtt_segs(mrwm,n_segs-1); + return HH_EAGAIN; + } + } + }else{ /* else, using the same MTT's of the original region */ + rs_p->seg_start= mrsw_p->shared_p->seg_start; + rs_p->log2_page_size = mrsw_p->shared_p->page_shift; + } + + *log2_segs_p= log2_segs; + return HH_OK; +} + + +/************************************************************************/ +static void determine_ctx( + THH_mrwm_t mrwm, + THH_internal_mr_t* mr_props, + MOSAL_protection_ctx_t* ctx_p +) +{ + HH_ret_t rc = THH_uldm_get_protection_ctx(mrwm->uldm, mr_props->pd, ctx_p); + if (rc != HH_OK) + { +#ifndef __DARWIN__ + MTL_DEBUG4(MT_FLFMT("THH_uldm_get_protection_ctx failed, use ctx=0x%x"), + mr_props->vm_ctx); +#else + MTL_DEBUG4(MT_FLFMT("THH_uldm_get_protection_ctx failed")); +#endif + *ctx_p = mr_props->vm_ctx; + } +} /* determine_ctx */ + + + +/************************************************************************ + * Copies and check props into mrwm handle. + * Here are the restrictions: + * #-regions + #-windows <= #MPT-entries. + * segment_size = 2^n >= 2^3 = 8 + * #-regions <= #-segments <= 1M = 2^20 + */ +static MT_bool check_props(const THH_mrwm_props_t* props, THH_mrwm_t mrwm) +{ + MT_bool ok = FALSE; + u_int8_t log2_mtt_sz = props->log2_mtt_sz; + u_int32_t n_log_reserved_mtt_segs= props->log2_rsvd_mtt_segs + props->log2_mtt_seg_sz; + u_int32_t tavor_num_reserved_mtts = (u_int32_t) (1ul << props->log2_rsvd_mtt_segs); + u_int32_t tavor_num_reserved_mpts = (u_int32_t) (1ul << props->log2_rsvd_mpts); + u_int32_t n_req_mpts = (u_int32_t)(tavor_num_reserved_mpts + props->max_mem_reg_internal + + props->max_mem_reg + props->max_mem_win); + + MTL_DEBUG4(MT_FLFMT("base="U64_FMT", log2_mpt_sz=%d, log2_mtt_sz=%d, n_req_mpts=0x%x"), + props->mtt_base, props->log2_mpt_sz, log2_mtt_sz, n_req_mpts); + if ((n_req_mpts <= (1ul << props->log2_mpt_sz)) && + (log2_mtt_sz > n_log_reserved_mtt_segs) && + (log2_mtt_sz >= LOG2_MIN_SEG_SIZE)) /* funny check, but... */ + { + u_int32_t n_segs; + u_int32_t n_rgns= (u_int32_t)(props->max_mem_reg_internal + props->max_mem_reg); + u_int8_t log2_n_segs = props->log2_mtt_sz - props->log2_mtt_seg_sz; + mrwm->props = *props; /* But we may fix some values */ + MTL_DEBUG4(MT_FLFMT("log2_n_segs=%d, max=%d"), + log2_n_segs, props->log2_max_mtt_segs); + if (log2_n_segs > props->log2_max_mtt_segs) + { + /* Waste of MTT memory, but we cannot use more than 1M */ + mrwm->props.log2_mtt_sz = props->log2_mtt_seg_sz + + props->log2_max_mtt_segs; + log2_n_segs = props->log2_max_mtt_segs; + MTL_DEBUG4(MT_FLFMT("Enlarge: log2_n_segs=%d"), log2_n_segs); + } + n_segs = (1ul << log2_n_segs); + ok = (n_rgns <= n_segs); + if (!ok) + { + MTL_ERROR1(MT_FLFMT("n_rgns=0x%x > n_segs=0x%x"), n_rgns, n_segs); + return ok; + } + mrwm->surplus_segs = (n_segs - tavor_num_reserved_mtts) - n_rgns; + } + MTL_DEBUG4(MT_FLFMT("ok=%d"), ok); + return ok; +} /* check_props */ + + +/************************************************************************/ +static void mpt_entry_init(THH_mpt_entry_t* e) +{ + memset(e, 0, sizeof(THH_mpt_entry_t)); + e->ver = 0; + /* e->ce = 1; */ + e->lr = TRUE; + e->pw = 0; + e->m_io = TRUE; + /* e->vl = 0; */ + /* e->owner = 0; */ + e->status = 0; + e->win_cnt = 0; + e->win_cnt_limit = 0; /* 0 means no limit */ +} /* mpt_entry_init */ + + +/************************************************************************/ +static void props2mpt_entry(const HH_mr_t* props, THH_mpt_entry_t* e) +{ + VAPI_mrw_acl_t acl = props->acl; + + mpt_entry_init(e); + e->lw = (acl & VAPI_EN_LOCAL_WRITE ? TRUE : FALSE); + e->rr = (acl & VAPI_EN_REMOTE_READ ? TRUE : FALSE); + e->rw = (acl & VAPI_EN_REMOTE_WRITE ? TRUE : FALSE); + e->a = (acl & VAPI_EN_REMOTE_ATOM ? TRUE : FALSE); + e->eb = (acl & VAPI_EN_MEMREG_BIND ? TRUE : FALSE); + e->pd = props->pd; + e->start_address = props->start; + e->reg_wnd_len = props->size; + e->pa = FALSE; +} /* props2mpt_entry */ + +/************************************************************************/ +static HH_ret_t smr_props2mpt_entry(THH_mrwm_t mrwm,const HH_smr_t* props, THH_mpt_entry_t* e) +{ + VAPI_mrw_acl_t acl = props->acl; + HH_mr_info_t orig_mr; + HH_ret_t ret; + + mpt_entry_init(e); + e->lw = (acl & VAPI_EN_LOCAL_WRITE ? TRUE : FALSE); + e->rr = (acl & VAPI_EN_REMOTE_READ ? TRUE : FALSE); + e->rw = (acl & VAPI_EN_REMOTE_WRITE ? TRUE : FALSE); + e->a = (acl & VAPI_EN_REMOTE_ATOM ? TRUE : FALSE); + e->eb = (acl & VAPI_EN_MEMREG_BIND ? TRUE : FALSE); + e->pd = props->pd; + e->start_address = props->start; + + /* size is not given, so we must query it according to lkey of the original mr*/ + ret = THH_mrwm_query_mr(mrwm,props->lkey,&orig_mr); + if (ret != HH_OK) { + MTL_ERROR1("failed quering the original mr \n"); + return ret; + } + + MTL_DEBUG1("end SMR props2mpt \n"); + /*TBD: what about remote ?? */ + e->reg_wnd_len = orig_mr.local_size; + return HH_OK; +} /* props2mpt_entry */ + +static void init_fmr_mpt_entry(THH_mpt_entry_t* mpt_entry_p, + HH_pd_hndl_t pd, + VAPI_mrw_acl_t acl, + u_int32_t init_memkey, + u_int8_t log2_page_sz, + u_int64_t mtt_seg_adr) +{ + memset(mpt_entry_p, 0, sizeof(THH_mpt_entry_t)); + mpt_entry_p->pd = pd; + mpt_entry_p->lr = TRUE; + mpt_entry_p->m_io = TRUE; + mpt_entry_p->r_w = TRUE; /* Region */ + mpt_entry_p->pa = FALSE; + mpt_entry_p->page_size = log2_page_sz - TAVOR_LOG_MPT_PG_SZ_SHIFT; + mpt_entry_p->mem_key = init_memkey; /* Initial (invalid) key */ + mpt_entry_p->mtt_seg_adr = mtt_seg_adr; + mpt_entry_p->lw = (acl & VAPI_EN_LOCAL_WRITE ? TRUE : FALSE); + mpt_entry_p->rr = (acl & VAPI_EN_REMOTE_READ ? TRUE : FALSE); + mpt_entry_p->rw = (acl & VAPI_EN_REMOTE_WRITE ? TRUE : FALSE); + mpt_entry_p->a = (acl & VAPI_EN_REMOTE_ATOM ? TRUE : FALSE); + mpt_entry_p->eb = FALSE; /* No memory bind allowed over FMRs */ + mpt_entry_p->reg_wnd_len= 0; /* prevent access via this MPT */ +} + +/************************************************************************/ +static void internal_props2mpt_entry( + const THH_internal_mr_t* props, + THH_mpt_entry_t* e +) +{ + mpt_entry_init(e); + e->lw = TRUE; + e->rr = FALSE; + e->rw = FALSE; + e->a = FALSE; + e->eb = FALSE; + e->pd = props->pd; + e->start_address = props->start; + e->reg_wnd_len = props->size; +} /* internal_props2mpt_entry */ + + +/************************************************************************/ +inline static u_int32_t make_key(THH_mrwm_t mrwm, mpt_segment_t mpt_seg, u_int32_t mpt_index) +{ + if ((mpt_index < mrwm->offset[mpt_seg]) || + (mpt_index >= mrwm->offset[mpt_seg]+mrwm->max_mpt[mpt_seg])) { + MTL_ERROR4(MT_FLFMT("%s: Given MPT index (0x%X) is not in given mpt segment"),__func__, + mpt_index); + return 0; + } + MOSAL_spinlock_dpc_lock(&mrwm->key_prefix_lock); /* TBD: change to atomic_inc */ + mrwm->key_prefix[mpt_seg][mpt_index-mrwm->offset[mpt_seg]]++; + MOSAL_spinlock_unlock(&mrwm->key_prefix_lock); + return CURRENT_MEMKEY(mrwm,mpt_seg,mpt_index); +} /* make_key */ + + +/************************************************************************/ +/* Go thru the two synced arrays of buffers addresses and sizes. + * For each get the alignment - that is lowest bit. + * Return the total_size and the minimal lowest bit. + * The latter is minimized with initial page_shift provided value. + * Note that this lowest bit can be used to test native page alignment. + */ +static u_int8_t buf_lst_2_page_shift +( + const VAPI_phy_addr_t* phys_buf_lst, + const VAPI_size_t* buf_sz_lst, + MT_size_t n, + u_int8_t page_shift, + VAPI_size_t* total_p +) +{ + MT_size_t bi; + VAPI_phy_addr_t bs[2], *bs_begin = &bs[0], *bs_end = bs_begin + 2, *pbs; + VAPI_size_t total_size = 0; + + MTL_DEBUG4(MT_FLFMT("n="SIZE_T_FMT", page_shift=%d"), n, page_shift); + /* Find gcd of address+size that is a power of 2. + * Actually minimizing lowest bit on. + */ + for (bi = n; bi--; ) + { /* 'arraying' the address + size values, to allow for loop */ + bs[0] = phys_buf_lst[bi]; /* must be native page aligned */ + bs[1] = buf_sz_lst[bi]; + MTL_DEBUG4(MT_FLFMT("buf="U64_FMT", sz="U64_FMT), bs[0], bs[1]); + total_size += buf_sz_lst[bi]; + for (pbs = bs_begin; pbs != bs_end; ++pbs) + { + VAPI_phy_addr_t u32 = *pbs; + if (u32) + { + u_int8_t l = lowest_bit(u32); + MTL_DEBUG4(MT_FLFMT("lowest_bit("U64_FMT")=%d"), u32, l); + if (l < page_shift) + { + page_shift = l; + } + } + } + } + *total_p = total_size; + MTL_DEBUG4(MT_FLFMT("page_shift=%d"), page_shift); + return page_shift; +} /* buf_lst_2_page_shift */ + + +/************************************************************************/ +static HH_ret_t buf_lst_2_pages +( + const VAPI_phy_addr_t* phys_buf_lst, + const VAPI_size_t* buf_sz_lst, + MT_size_t n, + IB_virt_addr_t start, + VAPI_phy_addr_t iova_offset, + Reg_Segs_t* reg_segs +) +{ + VAPI_size_t total_size; + VAPI_phy_addr_t initial_pages_skip; + VAPI_phy_addr_t* currp; + VAPI_size_t page_size; + MT_size_t bi; + IB_virt_addr_t start_unoffset = start - iova_offset; + u_int8_t page_shift = lowest_bit(start_unoffset); + VAPI_size_t tmp; + + MTL_DEBUG4(MT_FLFMT("start="U64_FMT", offset="U64_FMT", unoffset="U64_FMT", shift=%d"), + start, iova_offset, start_unoffset, page_shift); + + /* calc page shift */ + page_shift = buf_lst_2_page_shift(phys_buf_lst, buf_sz_lst, n, page_shift, + &total_size); + + if (page_shift > TAVOR_IF_MAX_MPT_PAGE_SIZE) { + MTL_ERROR1("page shift calculated :%d , due to MPT restrictions page shift wil be 32 \n",page_shift); + page_shift = TAVOR_IF_MAX_MPT_PAGE_SIZE; + } + MTL_DEBUG4(MT_FLFMT("n="SIZE_T_DFMT", page_shift=%d"), n, page_shift); + if (page_shift < native_page_shift) + { + MTL_ERROR1(MT_FLFMT("page shift below system page size")); + return HH_EINVAL; + } + + initial_pages_skip = /* page_size * (iova_offset/page_size) */ + iova_offset & ~(((VAPI_phy_addr_t)1 << page_shift) - 1); + MTL_DEBUG4(MT_FLFMT("total_size="U64_FMT", initial_pages_skip="U64_FMT),total_size, initial_pages_skip); + + total_size -= initial_pages_skip; + reg_segs->n_pages = total_size >> page_shift; + reg_segs->log2_page_size = page_shift; + MTL_DEBUG4(MT_FLFMT("total_size="U64_FMT", page_shift=%d, n_pages="SIZE_T_DFMT), + total_size, page_shift, reg_segs->n_pages); + tmp= sizeof(VAPI_phy_addr_t)*reg_segs->n_pages; + if (tmp > ((MT_phys_addr_t) MAKE_ULONGLONG(0xFFFFFFFFFFFFFFFF))) { + MTL_ERROR1(MT_FLFMT("total bufs size exceeds max size available on this machine")); + return HH_EINVAL; + } +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'const size_t', possible loss of data ***/ + ALLOC_PHYS_PAGES_ARRAY(reg_segs); + if (reg_segs->phys_pages == NULL) + { + MTL_ERROR1(MT_FLFMT("alloc of "SIZE_T_DFMT" phys_pages failed"),reg_segs->n_pages); + return HH_EAGAIN; + } + + page_size = (VAPI_size_t)1<< page_shift; + currp = reg_segs->phys_pages; + MTL_DEBUG4(MT_FLFMT("page_size="U64_FMT", phys_buf_lst=%p, currp=%p"),page_size, phys_buf_lst, currp); + for (bi = 0; bi != n; ++bi) + { + VAPI_phy_addr_t buf_page = phys_buf_lst[bi] + initial_pages_skip; + VAPI_phy_addr_t buf_page_end = buf_page + buf_sz_lst[bi]; + initial_pages_skip = 0; /* skip only in 1st buffer */ + MTL_DEBUG4(MT_FLFMT("bi="SIZE_T_FMT", currp=%p, bp="U64_FMT", bp_end="U64_FMT), + bi, currp, buf_page, buf_page_end); + for ( ; buf_page != buf_page_end; buf_page += page_size, ++currp) + { + MTL_DEBUG4(MT_FLFMT("currp=%p, b="U64_FMT), currp, buf_page); + *currp = buf_page; + } + } + + + return HH_OK; +} /* buf_lst_2_pages */ + + + + +/************************************************************************/ +/* For the sake of MicroCode(?) efficiency, + * we ensure writing an even number of MTTs. + */ +static HH_ret_t mtt_writes( + THH_cmd_t cmd_if, + VAPI_phy_addr_t* phys_page_lst, + VAPI_phy_addr_t mtt_pa, + VAPI_size_t n_pages +) +{ + static const MT_size_t MTT_WRITE_MAX_SIZE = + MTT_WRITE_MAX * (1ul << MTT_LOG_MTT_ENTRY_SIZE); + THH_mtt_entry_t *e0,*e_end; + THH_cmd_status_t cmd_rc = THH_CMD_STAT_OK; + VAPI_size_t n_entries = MTT_WRITE_MAX, n_w_entries; + + MTL_DEBUG4(MT_FLFMT("mtt_writes: mtt_pa="U64_FMT", n="U64_FMT), mtt_pa, n_pages); + e0 = (THH_mtt_entry_t*)MALLOC((MTT_WRITE_MAX + 1) * sizeof(THH_mtt_entry_t)); + if (!e0) { + MTL_ERROR1(MT_FLFMT("kmalloc of "SIZE_T_FMT" bytes failed"), + (MTT_WRITE_MAX + 1) * sizeof(THH_mtt_entry_t)); + return HH_EAGAIN; + } + + e_end = e0 + MTT_WRITE_MAX; + + + while (n_pages) + { + THH_mtt_entry_t* e = e0; + if (n_pages < MTT_WRITE_MAX) + { + n_entries = n_pages; + e_end = e0 + n_pages; + } + for (; e != e_end; ++e) + { + e->ptag = *phys_page_lst++; + e->p = TRUE; + /* MTL_DEBUG4(MT_FLFMT("e=0x%p, e->ptag=0x%Lx"), e, e->ptag); */ + } + /* dummy extra, to ensure even number of MTTs */ + e->ptag = 0; + e->p = FALSE; + n_w_entries = (n_entries + 1) & ~1ul; /* even upper bound */ + MTL_DEBUG4(MT_FLFMT("mtt_pa="U64_FMT", ne="U64_FMT), mtt_pa, n_w_entries); +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + cmd_rc = THH_cmd_WRITE_MTT(cmd_if, mtt_pa, (MT_size_t)n_w_entries, e0); + n_pages -= n_entries; + MTL_DEBUG4(MT_FLFMT("cmd_rc=%d, n_pages="U64_FMT), cmd_rc, n_pages); + if (cmd_rc != THH_CMD_STAT_OK) + { + n_pages = 0; + } + else if (n_pages) /* may save 64-bit addition */ + { + mtt_pa += MTT_WRITE_MAX_SIZE; + } + } DHERE; + if (cmd_rc != THH_CMD_STAT_OK) { + MTL_ERROR1(MT_FLFMT("mtt writes failed got %d \n"),cmd_rc); + } + FREE(e0); + return (CMDRC2HH_ND(cmd_rc)); +} /* mtt_writes */ + +static HH_ret_t mtt_writes_iobuf( + THH_cmd_t cmd_if, + MOSAL_iobuf_t iobuf, + VAPI_phy_addr_t mtt_pa, + VAPI_size_t n_pages +) +{ + static const MT_size_t MTT_WRITE_MAX_SIZE = + MTT_WRITE_MAX * (1ul << MTT_LOG_MTT_ENTRY_SIZE); + THH_mtt_entry_t *e0,*e; + THH_cmd_status_t cmd_rc = THH_CMD_STAT_OK; + MT_size_t n_entries = MTT_WRITE_MAX, n_w_entries; + MT_size_t n_entries_out, cur_entry; + MOSAL_iobuf_iter_t iobuf_iter; + MT_phys_addr_t *mt_pages_p; + call_result_t mt_rc; + HH_ret_t rc= HH_EAGAIN; + + MTL_DEBUG4(MT_FLFMT("mtt_writes_iobuf: mtt_pa="U64_FMT", n="U64_FMT", iobuf=0x%p, n_pages="U64_FMT), + mtt_pa, n_pages,iobuf,n_pages); + + e0 = (THH_mtt_entry_t*)MALLOC((MTT_WRITE_MAX + 1) * sizeof(THH_mtt_entry_t)); + if (!e0) { + MTL_ERROR2(MT_FLFMT("kmalloc of "SIZE_T_FMT" bytes failed"), + (MTT_WRITE_MAX + 1) * sizeof(THH_mtt_entry_t)); + return HH_EAGAIN; + } + mt_pages_p= (MT_phys_addr_t*)MALLOC(MTT_WRITE_MAX * sizeof(MT_phys_addr_t)); /* for MOSAL_iobuf_get_tpt_seg */ + if (!mt_pages_p) { + MTL_ERROR2(MT_FLFMT("kmalloc of "SIZE_T_FMT" bytes failed"), + MTT_WRITE_MAX * sizeof(MT_phys_addr_t)); + goto fail_mt_pages; + } + (void)MOSAL_iobuf_iter_init(iobuf,&iobuf_iter); + + while (n_pages) + { + if (n_pages < MTT_WRITE_MAX) { +/*** warning C4242: '=' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + n_entries = (MT_size_t)n_pages; + } + + MTL_DEBUG5(MT_FLFMT("%s: n_pages="U64_FMT" n_entries="SIZE_T_FMT" mtt_pa="U64_FMT), + __func__, n_pages, n_entries, mtt_pa); + + /* get next segment of the page table */ + mt_rc= MOSAL_iobuf_get_tpt_seg(iobuf, &iobuf_iter, n_entries, &n_entries_out, mt_pages_p); + if (mt_rc != MT_OK) { + MTL_ERROR2(MT_FLFMT("Failed MOSAL_iobuf_get_tpt_seg (%s)"),mtl_strerror_sym(mt_rc)); + rc= HH_EFATAL; + goto fail_get_tpt; + } + if (n_entries_out != n_entries) { /* sanity check */ + MTL_ERROR2(MT_FLFMT( + "Number of pages returned from MOSAL_iobuf_get_tpt_seg ("SIZE_T_DFMT + ") is different from expected ("SIZE_T_DFMT")"), n_entries_out, n_entries); + rc= HH_EFATAL; + goto fail_get_tpt; + } + for (e= e0, cur_entry= 0; cur_entry < n_entries; ++e, ++cur_entry) { + e->ptag = mt_pages_p[cur_entry]; + e->p = TRUE; + /* MTL_DEBUG4(MT_FLFMT("e=0x%p, e->ptag=0x%Lx"), e, e->ptag); */ + } + /* dummy extra, to ensure even number of MTTs */ + e->ptag = 0; + e->p = FALSE; + n_w_entries = (n_entries + 1) & ~1ul; /* even upper bound */ + cmd_rc = THH_cmd_WRITE_MTT(cmd_if, mtt_pa, n_w_entries, e0); + if (cmd_rc != THH_CMD_STAT_OK) { + MTL_ERROR1(MT_FLFMT("THH_cmd_WRITE_MTT failed (err=%d)"),cmd_rc); + rc= HH_EFATAL; + goto fail_cmd; + } + n_pages -= n_entries; + mtt_pa += MTT_WRITE_MAX_SIZE; + } + FREE(mt_pages_p); + FREE(e0); + MTL_DEBUG5(MT_FLFMT("mtt_writes_iobuf - DONE")); + return HH_OK; + + fail_cmd: + fail_get_tpt: + FREE(mt_pages_p); + fail_mt_pages: + FREE(e0); + return rc; +} /* mtt_writes */ + + + +/************************************************************************/ +static HH_ret_t register_pages(Reg_Segs_t* rs_p,mpt_segment_t mpt_seg,VAPI_mrw_type_t mr_type) +{ + HH_ret_t rc = HH_EAGAIN; + THH_mrwm_t mrwm = rs_p->mrwm; + THH_mpt_entry_t* mpt_entry_p = &rs_p->mpt_entry; + THH_cmd_status_t cmd_rc; + u_int8_t log2_seg_sz = mrwm->props.log2_mtt_seg_sz; + VAPI_phy_addr_t mtt_seg_adr; + + MTL_DEBUG4(MT_FLFMT("register_pages: seg_start=0x%x, log2_seg_sz=%u iobuf=0x%p n_pages="SIZE_T_DFMT), + rs_p->seg_start, log2_seg_sz, rs_p->iobuf, rs_p->n_pages); + + mtt_seg_adr = mrwm->props.mtt_base | + (rs_p->seg_start << (log2_seg_sz + MTT_LOG_MTT_ENTRY_SIZE)); + rs_p->key = make_key(mrwm, mpt_seg,rs_p->mpt_index); + + mpt_entry_p->r_w = TRUE; + mpt_entry_p->page_size = rs_p->log2_page_size - TAVOR_LOG_MPT_PG_SZ_SHIFT; + mpt_entry_p->mem_key = rs_p->key; + mpt_entry_p->mtt_seg_adr = mtt_seg_adr; + + if ( (!mpt_entry_p->pa) && + ((mr_type == VAPI_MR) || (mr_type == VAPI_MPR)) ) + { + if (rs_p->iobuf != NULL) { /* use MOSAL_iobuf to write MTT entries */ + rc= mtt_writes_iobuf(mrwm->cmd_if, rs_p->iobuf, mtt_seg_adr, rs_p->n_pages); + } else { /* page tables is given in rs_p->phys_pages */ + rc= mtt_writes(mrwm->cmd_if, rs_p->phys_pages, mtt_seg_adr, rs_p->n_pages); + } + } + else rc = HH_OK; /* no need to write mtt with SHARED (or no-translation) mr */ + + if (rc == HH_OK) + { + cmd_rc = THH_cmd_SW2HW_MPT(mrwm->cmd_if, rs_p->mpt_index, &rs_p->mpt_entry); + MTL_DEBUG4(MT_FLFMT("SW2HW_MPT: cmd_rc=%d"), cmd_rc); + if (cmd_rc != THH_CMD_STAT_OK) { + MTL_ERROR1(MT_FLFMT("register pages failed got %d \n"),cmd_rc); + rc = (CMDRC2HH_ND(cmd_rc)); + } + } + return rc; +} /* register_pages */ + + +/************************************************************************/ +static HH_ret_t save_sw_context(const Reg_Segs_t* rs_p, u_int8_t log2_segs, VAPI_mrw_type_t mr_type, + Mr_sw_t* mrsw) +{ + mrsw->key = rs_p->key; + mrsw->start = rs_p->mpt_entry.start_address; + mrsw->pd = rs_p->mpt_entry.pd; + mrsw->acl = rs_p->acl; + if (mr_type != VAPI_MSHAR) { + mrsw->shared_p = (Shared_data_t*)MALLOC(sizeof(Shared_data_t)); + if (mrsw->shared_p == NULL) { + MTL_ERROR1(MT_FLFMT("save_sw_ctxt: failed allocating memory \n")); + return HH_EAGAIN; + } + mrsw->shared_p->size = rs_p->mpt_entry.reg_wnd_len; + mrsw->shared_p->seg_start = rs_p->seg_start; + mrsw->shared_p->log2_segs = log2_segs; + mrsw->shared_p->page_shift = rs_p->log2_page_size; + mrsw->shared_p->ref_count = 1; + MOSAL_spinlock_init(&mrsw->shared_p->ref_lock); + }else { + MOSAL_spinlock_lock(&mrsw->shared_p->ref_lock); + mrsw->shared_p->ref_count ++; + MOSAL_spinlock_unlock(&mrsw->shared_p->ref_lock); + } + mrsw->iobuf = rs_p->iobuf; + return HH_OK; +} /* save_sw_context */ + + +static HH_ret_t init_fmr_context(THH_mrwm_t mrwm, + FMR_sw_info_t* fmr_info_p, + u_int32_t mpt_index, + u_int32_t seg_start, + u_int8_t log2_segs, + u_int8_t log2_page_sz) +{ + MT_phys_addr_t mtt_seg_adr= + mrwm->props.mtt_base | (seg_start << (mrwm->props.log2_mtt_seg_sz + MTT_LOG_MTT_ENTRY_SIZE)); + MT_phys_addr_t mpt_entry_adr= mrwm->props.mpt_base | (mpt_index << TAVOR_IF_STRIDE_MPT_BIT); + + fmr_info_p->mtt_entries= MOSAL_io_remap(mtt_seg_adr,(MT_size_t)1 << (log2_segs + mrwm->props.log2_mtt_seg_sz + MTT_LOG_MTT_ENTRY_SIZE)); + if (fmr_info_p->mtt_entries == 0) { + MTL_ERROR2(MT_FLFMT("%s: MOSAL_io_remap("PHYS_ADDR_FMT", 0x%x) failed"), + __func__, mtt_seg_adr, + 1 << (log2_segs + mrwm->props.log2_mtt_seg_sz + MTT_LOG_MTT_ENTRY_SIZE)); + return HH_EAGAIN; + } + + fmr_info_p->mpt_entry= MOSAL_io_remap(mpt_entry_adr, TAVOR_IF_STRIDE_MPT); + if (fmr_info_p->mpt_entry == 0) { + MTL_ERROR2(MT_FLFMT("%s: MOSAL_io_remap("PHYS_ADDR_FMT", 0x%x) failed"), __func__, + mpt_entry_adr, TAVOR_IF_STRIDE_MPT); + MOSAL_io_unmap((MT_virt_addr_t)fmr_info_p->mtt_entries); + return HH_EAGAIN; + } + + fmr_info_p->last_free_key= CURRENT_MEMKEY(mrwm,MPT_ext,mpt_index); + fmr_info_p->seg_start= seg_start; + fmr_info_p->log2_segs= log2_segs; + fmr_info_p->log2_page_sz= log2_page_sz; + + return HH_OK; +} + + +/************************************************************************/ +static HH_ret_t alloc_reg_pages( + Reg_Segs_t* rs_p, + mpt_segment_t tpt_group, + VAPI_lkey_t* forced_key, + Mr_sw_t** mrsw_pp +) +{ + HH_ret_t rc = HH_EAGAIN; + VIP_common_ret_t vip_array_rc=VIP_EAGAIN; + Mr_sw_t* mrsw_p= TMALLOC(Mr_sw_t); + VIP_array_handle_t mpt_index= 0xFFFFFFFF; + THH_mrwm_t mrwm = rs_p->mrwm; + u_int32_t seg_start = EXTBUDDY_NULL; + u_int8_t log2_mtt_seg_sz = mrwm->props.log2_mtt_seg_sz; + /* avoid explict u_int64_t division ! */ + u_int32_t n_segs; + MT_size_t seg_comp; + u_int8_t log2_segs; + + if (mrsw_pp) { + *mrsw_pp = NULL; + } +/*** warning C4242: '=' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + seg_comp= (MT_size_t)(rs_p->n_pages >> log2_mtt_seg_sz); + seg_comp= ((seg_comp << log2_mtt_seg_sz) != rs_p->n_pages) ? seg_comp + 1 : seg_comp; + /*check that n_segs will not overflow 32 bits */ + log2_segs = ceil_log2(seg_comp); + if (log2_segs >= (8*sizeof(n_segs))) return HH_EINVAL_PARAM; + n_segs = 1 << log2_segs; + + MTL_DEBUG4(MT_FLFMT("%s: n_pages="SIZE_T_DFMT" n_segs=%u"), __func__, + rs_p->n_pages, n_segs); + + if (!mrsw_p) { + MTL_ERROR3(MT_FLFMT("%s: Failed allocating MR_sw_t for new memory region"),__func__); + return HH_EAGAIN; + } + memset(mrsw_p,0,sizeof(Mr_sw_t)); + MTL_DEBUG4(MT_FLFMT("log2_mtt_seg_sz=%d"), log2_mtt_seg_sz); + MTL_DEBUG4(MT_FLFMT("alloc_reg_pages: #pg="SIZE_T_FMT", #segs=0x%x, surp=0x%x, g=%d"), + rs_p->n_pages, n_segs, mrwm->surplus_segs, tpt_group); + + if (reserve_mpt_entry(mrwm,tpt_group) != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: Out of MPT entries"),__func__); + goto failed_mpt_reserve; + } + + if (!rs_p->mpt_entry.pa) { + if ((rc= reserve_mtt_segs(mrwm,n_segs-1)) != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: Out of MTT entries"),__func__); + goto failed_mtt_reserve; + } + } + + if (forced_key) /* must be Internal! */ + { + mpt_index= (*forced_key & MASK32(mrwm->props.log2_mpt_sz)); + if ((mpt_index >= mrwm->offset[MPT_int]) && + (mpt_index < mrwm->offset[MPT_ext])) { + vip_array_rc= + VIP_array_insert2hndl(mrwm->mpt[tpt_group],mrsw_p,mpt_index-mrwm->offset[MPT_int]); + } else { + vip_array_rc= VIP_EINVAL_PARAM; /* given key is not available */ + } + } + else /* !forced_key */ + { + vip_array_rc= VIP_array_insert(mrwm->mpt[tpt_group],mrsw_p,&mpt_index); + mpt_index+= mrwm->offset[tpt_group]; + } + + if (vip_array_rc != VIP_OK) { + MTL_ERROR3(MT_FLFMT("%s: Failed MPT entry allocation (%s)"),__func__, + VAPI_strerror_sym(vip_array_rc)); + goto failed_vip_array; + } + + + if (!rs_p->mpt_entry.pa) { + if (MOSAL_mutex_acq(&mrwm->extbuddy_lock, TRUE) != MT_OK) { + rc= VIP_EINTR; + goto failed_mutex; + } + seg_start = extbuddy_alloc(mrwm->xbuddy_tpt, log2_segs); + if (seg_start != EXTBUDDY_NULL) {EXTBUDDY_ALLOC_MTT(seg_start,log2_segs);} + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + if (seg_start == EXTBUDDY_NULL) { + MTL_ERROR3(MT_FLFMT("%s: Failed allocation of %d MTT segment/s allocation"),__func__, + log2_segs); + rc= HH_EAGAIN; + goto failed_extbuddy; + } + rs_p->seg_start = seg_start; + } + + + rs_p->mpt_index = mpt_index; + /*in PMR, it acts the same, as long as it's not SHARED */ + rc = register_pages(rs_p,tpt_group,VAPI_MR); + if (rc != HH_OK) { + goto failed_register_pages; + } + + /*saving the new MPT entry */ + /*in PMR, it acts the same, as long as it's not SHARED */ + rc = save_sw_context(rs_p, log2_segs,VAPI_MR,mrsw_p); + if (rc != HH_OK) { + MTL_ERROR1("failed save_sw_ctxt \n"); + goto failed_save_ctx; + } + + if (mrsw_pp) { + *mrsw_pp = mrsw_p; + } + + return HH_OK; + + failed_save_ctx: + THH_cmd_HW2SW_MPT(mrwm->cmd_if, mpt_index, NULL); /* reclaim MPT entry from HW */ + failed_register_pages: + MOSAL_mutex_acq_ui(&mrwm->extbuddy_lock); + extbuddy_free(mrwm->xbuddy_tpt, seg_start, log2_segs); + EXTBUDDY_FREE_MTT(seg_start, log2_segs); + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + failed_extbuddy: + failed_mutex: + VIP_array_erase(mrwm->mpt[tpt_group],mpt_index-mrwm->offset[tpt_group],NULL); + failed_vip_array: + if (!rs_p->mpt_entry.pa) { + release_mtt_segs(mrwm,n_segs-1); + } + failed_mtt_reserve: + release_mpt_entry(mrwm,tpt_group); + failed_mpt_reserve: + FREE(mrsw_p); + return rc; +} /* alloc_reg_pages */ + + +/************************************************************************/ +static void swinfo2mrinfo(const Mr_sw_t* swmr, HH_mr_info_t* hmr) +{ + hmr->lkey = swmr->key; + hmr->rkey = swmr->key; + hmr->local_start = swmr->start; + hmr->remote_start = swmr->start; + hmr->local_size = swmr->shared_p->size; + hmr->remote_size = swmr->shared_p->size; + hmr->pd = swmr->pd; + hmr->acl = swmr->acl; +} /* swinfo2mrinfo */ + + + +/************************************************************************/ +/* Handling THH_mrwm_register_internal(...) case + * where physical buffers are given + */ +static HH_ret_t bufs_register_internal( + THH_internal_mr_t* mr_p, + Reg_Segs_t* rs_p, + VAPI_lkey_t* forced_key, + Mr_sw_t** mrsw_pp +) +{ + HH_ret_t rc; + MTL_DEBUG4(MT_FLFMT("bufs_register_internal")); + rc = buf_lst_2_pages(mr_p->phys_buf_lst, mr_p->buf_sz_lst, (unsigned int)mr_p->num_bufs, + mr_p->start, 0 /* no offset */,rs_p); + MTL_DEBUG4(MT_FLFMT("rc=%d"), rc); + if (rc == HH_OK) + { + rs_p->acl = VAPI_EN_LOCAL_WRITE; + rc = alloc_reg_pages(rs_p, MPT_int, forced_key, mrsw_pp); + } + if (rs_p->phys_pages) FREE_PHYS_PAGES_ARRAY(rs_p); + return rc; +} /* bufs_register_internal */ + + +/************************************************************************/ +/* Handling THH_mrwm_register_internal(...) case + * where physical buffers are not supplied and pages need to be locked + */ +static HH_ret_t lock_register_internal( + THH_internal_mr_t* mr_props_p, + Reg_Segs_t* rs_p, + VAPI_lkey_t* forced_key, + Mr_sw_t **mrsw_pp +) +{ + MOSAL_iobuf_props_t iobuf_props; + MOSAL_protection_ctx_t ctx; + HH_ret_t rc = HH_ENOSYS; + call_result_t mosal_rc; + + MTL_DEBUG4(MT_FLFMT("%s: start="U64_FMT" size="U64_FMT), __func__ , + mr_props_p->start, mr_props_p->size); + + /* Arrange for pages locking */ + determine_ctx(rs_p->mrwm, mr_props_p, &ctx); + +/*** warning C4242: 'function' : conversion from 'IB_virt_addr_t' to 'MT_virt_addr_t', possible loss of data ***/ +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + mosal_rc = MOSAL_iobuf_register((MT_virt_addr_t)mr_props_p->start, (MT_size_t)mr_props_p->size, ctx, + MOSAL_PERM_READ | MOSAL_PERM_WRITE, &rs_p->iobuf, MOSAL_IOBUF_LNX_FLG_MARK_ALL_DONTCOPY); + if (mosal_rc != MT_OK) { + MTL_ERROR4(MT_FLFMT("MOSAL_iobuf_register: rc=%s"), mtl_strerror_sym(mosal_rc)); + return mosal_rc == MT_EAGAIN ? HH_EAGAIN : HH_EINVAL_VA; + } + + if (MOSAL_iobuf_get_props(rs_p->iobuf,&iobuf_props) != MT_OK) { + MTL_ERROR4(MT_FLFMT("Failed MOSAL_iobuf_get_props.")); + rc= HH_EINVAL; + } else { + rs_p->n_pages= iobuf_props.nr_pages; +/*** warning C4242: '=' : conversion from 'MT_u_int_t' to 'u_int8_t', possible loss of data ***/ + rs_p->log2_page_size = (u_int8_t)iobuf_props.page_shift; + rs_p->acl = VAPI_EN_LOCAL_WRITE; + rc = alloc_reg_pages(rs_p, MPT_int, forced_key, mrsw_pp); + } + + if ( rc != HH_OK ) { + MOSAL_iobuf_deregister(rs_p->iobuf); + rs_p->iobuf= NULL; + } + + return rc; +} /* lock_register_internal */ + + +/************************************************************************/ +static void internal_unlock(VIP_delay_unlock_t delay_unlock_obj, MOSAL_iobuf_t iobuf, MT_bool have_fatal) +{ + MTL_DEBUG4(MT_FLFMT("%s: iobuf=0x%p have_fatal=%d"),__func__,iobuf,have_fatal); + if ( iobuf ) { + if (have_fatal) { + VIP_delay_unlock_insert(delay_unlock_obj, iobuf); + } + else { + MOSAL_iobuf_deregister(iobuf); + } + } +} /* internal_unlock */ + + + +/************************************************************************/ +/************************************************************************/ +/* interface functions */ + + +/************************************************************************/ +HH_ret_t THH_mrwm_create( + THH_hob_t hob, /* IN */ + THH_mrwm_props_t* mrwm_props, /* IN */ + THH_mrwm_t* mrwm_p /* OUT */ +) +{ + HH_ret_t rc = HH_EAGAIN; + TMRWM_t* mrwm = TMALLOC(TMRWM_t); + u_int32_t mtt_sz = 1ul << mrwm_props->log2_mtt_sz; + int i; + MT_bool ok = TRUE; + + MTL_TRACE1("{THH_mrwm_create: hob=%p\n", hob); + + if (mrwm) memset(mrwm,0,sizeof(TMRWM_t)); + ok = (mrwm && check_props(mrwm_props, mrwm) && + ((rc=THH_hob_get_cmd_if(hob, &mrwm->cmd_if)) == HH_OK) && + ((rc=THH_hob_get_uldm(hob, &mrwm->uldm)) == HH_OK)); + + /* Key prefix arrays (for "persistant" storage) */ + if (ok) {rc = HH_EAGAIN;} /*reinitialize return code */ + ok= ok && ((mrwm->key_prefix[MPT_int]= + TNVMALLOC(u_int16_t,mrwm->props.max_mem_reg_internal)) != NULL); + ok= ok && ((mrwm->key_prefix[MPT_ext]= + TNVMALLOC(u_int16_t,mrwm->props.max_mem_reg)) != NULL); + ok= ok && ((mrwm->key_prefix[MPT_win]= + TNVMALLOC(u_int16_t,mrwm->props.max_mem_win)) != NULL); + + /* we allocate mtt segmenst, each of 2^LOG2_SEG_SIZE entries */ + ok = ok && ((mrwm->xbuddy_tpt = + extbuddy_create(mtt_sz >> mrwm->props.log2_mtt_seg_sz, 0)) != NULL); + ok = ok && extbuddy_reserve(mrwm->xbuddy_tpt, 0, (1ul << mrwm->props.log2_rsvd_mtt_segs)); + + ok = ok && + ((rc= VIP_array_create_maxsize((u_int32_t)(mrwm->props.max_mem_reg_internal>>10),(u_int32_t)mrwm->props.max_mem_reg_internal, + &mrwm->mpt[MPT_int])) == HH_OK); + ok = ok && + ((rc= VIP_array_create_maxsize((u_int32_t)(mrwm->props.max_mem_reg>>10),(u_int32_t)mrwm->props.max_mem_reg, + &mrwm->mpt[MPT_ext])) == HH_OK); + ok = ok && + ((rc= VIP_array_create_maxsize((u_int32_t)(mrwm->props.max_mem_win>>10),(u_int32_t)mrwm->props.max_mem_win, + &mrwm->mpt[MPT_win])) == HH_OK); + + if (ok) { + mrwm->is_fmr_bits= TNMALLOC(u_int8_t,mrwm->props.max_mem_reg>>3); + if (mrwm->is_fmr_bits == NULL) { + rc = HH_EAGAIN; + goto cleanup; + } + memset(mrwm->is_fmr_bits,0,sizeof(u_int8_t)*mrwm->props.max_mem_reg>>3); + mrwm->hob = hob; + /* divide MPT to 3 sections: 1) Internal region. 2) External region. 3) mem. windows. */ + mrwm->offset[MPT_int] = (1<props.log2_rsvd_mpts); + mrwm->offset[MPT_ext] = (u_int32_t)(mrwm->offset[MPT_int]+mrwm_props->max_mem_reg_internal); + mrwm->offset[MPT_win] = (u_int32_t)(mrwm->offset[MPT_ext]+mrwm_props->max_mem_reg); + mrwm->max_mpt[MPT_int]= (u_int32_t)mrwm_props->max_mem_reg_internal; + mrwm->max_mpt[MPT_ext]= (u_int32_t)mrwm_props->max_mem_reg; + mrwm->max_mpt[MPT_win]= (u_int32_t)mrwm_props->max_mem_win; + for (i= 0; i < MPT_N ; i++) { + mrwm->usage_cnt[i]= 0; + memset(mrwm->key_prefix[i],0,sizeof(u_int16_t)*mrwm->max_mpt[i]); + } + MOSAL_mutex_init(&mrwm->extbuddy_lock); + MOSAL_spinlock_init(&mrwm->reserve_lock); + MOSAL_spinlock_init(&mrwm->key_prefix_lock); + *mrwm_p = mrwm; + MTL_TRACE1("}THH_mrwm_create: mrwm=%p,rc=OK\n", *mrwm_p); + return HH_OK; + } + +cleanup: + for (i= 0; i < MPT_N ; i++) { + if (mrwm->mpt[i]) VIP_array_destroy(mrwm->mpt[i],NULL); + if (mrwm->key_prefix[i]) VFREE(mrwm->key_prefix[i]); + } + if (mrwm->xbuddy_tpt) extbuddy_destroy(mrwm->xbuddy_tpt); + IFFREE(mrwm); + MTL_TRACE1("}THH_mrwm_create: mrwm=%p\n", *mrwm_p); + logIfErr("THH_mrwm_create") + return rc; +} /* THH_mrwm_create */ + + +/************************************************************************/ +static void VIP_free_mw(void* p) +{ + MTL_ERROR1(MT_FLFMT("found unreleased mw!!!!\n")); +} + + +HH_ret_t THH_mrwm_destroy( + THH_mrwm_t mrwm, /* IN */ + MT_bool hca_failure /* IN */ +) +{ + int i; + VIP_common_ret_t ret=VIP_OK; + VIP_array_handle_t hdl; + VIP_array_obj_t obj; + + MTL_TRACE1("THH_mrwm_destroy{\n"); + for (i= 0; i < MPT_N ; i++) { + if (i==MPT_int) { + ret= VIP_array_get_first_handle(mrwm->mpt[i],&hdl,&obj); + while (ret == VIP_OK) { + MTL_ERROR1(MT_FLFMT("found unreleased internal mr!!!!\n")); + if (!hca_failure) + { + THH_mrwm_deregister_mr(mrwm,((Mr_sw_t*)obj)->key); + }else { + internal_unlock(THH_hob_get_delay_unlock(mrwm->hob),((Mr_sw_t*)obj)->iobuf, TRUE); + } + ret= VIP_array_get_next_handle(mrwm->mpt[i],&hdl,&obj); + } + } + + if (i==MPT_ext) { + ret= VIP_array_get_first_handle(mrwm->mpt[i],&hdl,&obj); + while (ret == VIP_OK) { + /* check if it's fmr or mr */ + u_int8_t offset_in_cell = hdl & 0x7; + if ((mrwm->is_fmr_bits[hdl>>3] >> offset_in_cell) & 0x1) + { + MTL_ERROR1(MT_FLFMT("found unreleased fmr!!!!\n")); + if (!hca_failure) { + THH_mrwm_free_fmr(mrwm,((FMR_sw_info_t*)obj)->last_free_key); + } + + }else + { + MTL_ERROR1(MT_FLFMT("found unreleased mr!!!!\n")); + if (!hca_failure) { + THH_mrwm_deregister_mr(mrwm,((Mr_sw_t*)obj)->key); + }else{ + if (((Mr_sw_t*)obj)->shared_p->ref_count > 1) { + + ((Mr_sw_t*)obj)->shared_p->ref_count--; + + }else { + /*free shared data structures */ + FREE(((Mr_sw_t*)obj)->shared_p); + } + } + + } + ret= VIP_array_get_next_handle(mrwm->mpt[i],&hdl,&obj); + } + } + + if (i==MPT_win) { + VIP_array_destroy(mrwm->mpt[i],VIP_free_mw); + }else { + VIP_array_destroy(mrwm->mpt[i],NULL); + } + VFREE(mrwm->key_prefix[i]); + } + FREE(mrwm->is_fmr_bits); + MOSAL_mutex_free(&mrwm->extbuddy_lock); + extbuddy_destroy(mrwm->xbuddy_tpt); + FREE(mrwm); + + MTL_TRACE1("}THH_mrwm_destroy\n"); + return HH_OK; +} /* THH_mrwm_destroy */ + + +/************************************************************************/ +HH_ret_t THH_mrwm_register_mr( + THH_mrwm_t mrwm, /* IN */ + HH_mr_t* mr_props_p, /* IN */ + VAPI_lkey_t* lkey_p, /* OUT */ + IB_rkey_t* rkey_p /* OUT */ +) +{ + HH_ret_t rc = HH_EINVAL; + HH_tpt_t* tpt = &mr_props_p->tpt; /* just a shorthand */ + IB_virt_addr_t start = mr_props_p->start; + u_int8_t page_shift; + Reg_Segs_t reg_segs; + MOSAL_iobuf_props_t iobuf_props; + + MTL_TRACE1("{THH_mrwm_register_mr: mrwm=%p\n", mrwm); + reg_segs.mrwm = mrwm; + reg_segs.acl = mr_props_p->acl; + reg_segs.iobuf= NULL; + props2mpt_entry(mr_props_p, ®_segs.mpt_entry); + switch (tpt->tpt_type) + { + case HH_TPT_PAGE: + page_shift = tpt->tpt.page_lst.page_shift; + reg_segs.n_pages = ((start + mr_props_p->size - 1) >> page_shift) - + (start >> page_shift) + 1; + MTL_DEBUG4(MT_FLFMT("start="U64_FMT", size="U64_FMT", shift=%d, ne=" + SIZE_T_DFMT", np="SIZE_T_DFMT), + (u_int64_t)start, (u_int64_t)mr_props_p->size, page_shift, + tpt->num_entries, reg_segs.n_pages); + if (tpt->num_entries != reg_segs.n_pages) + { + MTL_ERROR1(MT_FLFMT("mismatch: num_entries="SIZE_T_DFMT" != n_pages="SIZE_T_DFMT), + tpt->num_entries, reg_segs.n_pages); + } + if (tpt->num_entries >= reg_segs.n_pages) + { + if (tpt->num_entries > reg_segs.n_pages) + { + MTL_ERROR1(MT_FLFMT("Warning: Extra tpt entries will be ignored")); + } + reg_segs.phys_pages = tpt->tpt.page_lst.phys_page_lst; + reg_segs.log2_page_size = page_shift; + rc = alloc_reg_pages(®_segs, MPT_ext, NULL, NULL); + } + break; + + case HH_TPT_BUF: + if ((mr_props_p->tpt.num_entries == 1) + && (mr_props_p->tpt.tpt.buf_lst.phys_buf_lst[0] == mr_props_p->start)) + { + /* no translation needed */ + reg_segs.mpt_entry.pa = TRUE; + reg_segs.n_pages = 1; + reg_segs.seg_start= 0; + reg_segs.phys_pages= NULL; + reg_segs.log2_page_size = TAVOR_LOG_MPT_PG_SZ_SHIFT; + rc= HH_OK; + }else { + rc = tpt_buf_lst_2_pages(mr_props_p, ®_segs); + } + if (rc == HH_OK) + { + rc = alloc_reg_pages(®_segs, MPT_ext, NULL, NULL); + if (reg_segs.phys_pages != NULL) FREE_PHYS_PAGES_ARRAY(®_segs); + } + break; + + case HH_TPT_IOBUF: + reg_segs.iobuf= tpt->tpt.iobuf; + if (MOSAL_iobuf_get_props(reg_segs.iobuf,&iobuf_props) != MT_OK) { + MTL_ERROR4(MT_FLFMT("Failed MOSAL_iobuf_get_props.")); + rc= HH_EINVAL; + break; + } + reg_segs.n_pages= iobuf_props.nr_pages; +/*** warning C4242: '=' : conversion from 'MT_u_int_t' to 'u_int8_t', possible loss of data ***/ + reg_segs.log2_page_size = (u_int8_t)iobuf_props.page_shift; + rc = alloc_reg_pages(®_segs, MPT_ext, NULL, NULL); + break; + + default: + MTL_ERROR1(MT_FLFMT("bad tpt_type=%d"), tpt->tpt_type); + } + if (rc == HH_OK) + { + *lkey_p = reg_segs.key; + *rkey_p = reg_segs.key; + } + MTL_TRACE1("}THH_mrwm_register_mr: lkey=0x%x\n", *lkey_p); + logIfErr("THH_mrwm_register_mr") + return rc; +} /* THH_mrwm_register_mr */ + + +/************************************************************************/ +HH_ret_t THH_mrwm_register_internal( + THH_mrwm_t mrwm, /* IN */ + THH_internal_mr_t* mr_props_p, /* IN */ + VAPI_lkey_t* lkey_p /* OUT */ +) +{ + HH_ret_t rc = HH_EAGAIN; + Reg_Segs_t reg_segs; + VAPI_lkey_t* forced_key; + Mr_sw_t* mrsw_p = NULL; + + MTL_TRACE1("{THH_mrwm_register_internal: mrwm=%p, force=%d, nbufs="SIZE_T_FMT"\n", + mrwm, mr_props_p->force_memkey, mr_props_p->num_bufs); + reg_segs.mrwm = mrwm; + internal_props2mpt_entry(mr_props_p, ®_segs.mpt_entry); + reg_segs.phys_pages = NULL; + reg_segs.iobuf= NULL; + forced_key = (mr_props_p->force_memkey ? &mr_props_p->memkey : NULL); + if (mr_props_p->num_bufs != 0) + { /* physical buffers supplied */ + rc = bufs_register_internal(mr_props_p, ®_segs, forced_key, &mrsw_p); + } + else + { + rc = lock_register_internal(mr_props_p, ®_segs, forced_key, &mrsw_p); + } + if (rc == HH_OK) + { + MOSAL_mutex_init(&mrsw_p->modify_mtx); + *lkey_p = reg_segs.key; + } + MTL_TRACE1("}THH_mrwm_register_internal: lkey=0x%x\n", *lkey_p); + logIfErr("THH_mrwm_register_internal") + return rc; +} /* THH_mrwm_register_internal */ + +/************************************************************************/ +static HH_ret_t modify_reg_pages( + Reg_Segs_t* rs_p, + Mr_sw_t* mrsw_p +) +{ + HH_ret_t rc = HH_EAGAIN; + THH_mrwm_t mrwm = rs_p->mrwm; + u_int32_t seg_start = EXTBUDDY_NULL; + u_int8_t log2_mtt_seg_sz = mrwm->props.log2_mtt_seg_sz; + /* avoid explict u_int64_t division ! */ + u_int32_t n_segs; + VAPI_size_t seg_comp; + u_int8_t log2_segs; + VAPI_phy_addr_t mtt_seg_adr; + THH_cmd_status_t cmd_rc; + + if (!mrsw_p) { + MTL_ERROR3(MT_FLFMT("%s: current memory region object is NULL"),__func__); + return HH_EINVAL; + } + + seg_comp= rs_p->n_pages >> log2_mtt_seg_sz; + seg_comp= ((seg_comp << log2_mtt_seg_sz) != rs_p->n_pages) ? seg_comp + 1 : seg_comp; + /*check that n_segs will not overflow 32 bits */ + log2_segs = ceil_log2(seg_comp); + if (log2_segs >= (8*sizeof(n_segs))) return HH_EINVAL_PARAM; + n_segs = 1 << log2_segs; + + MTL_DEBUG4(MT_FLFMT("%s: n_pages="SIZE_T_DFMT" n_segs=%u"), __func__, + rs_p->n_pages, n_segs); + + MTL_DEBUG4(MT_FLFMT("log2_mtt_seg_sz=%d"), log2_mtt_seg_sz); + + /* always need to allocate new MTT segments, since we may be using modify option 2, and in this + * case, the current MTT segments continue to be active -- they may not be touched */ + + if ((rc= reserve_mtt_segs(mrwm,n_segs-1)) != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: Out of MTT entries"),__func__); + return rc; + } + + if (MOSAL_mutex_acq(&mrwm->extbuddy_lock, TRUE) != MT_OK) { + rc= VIP_EINTR; + goto failed_mutex; + } + seg_start = extbuddy_alloc(mrwm->xbuddy_tpt, log2_segs); + if (seg_start != EXTBUDDY_NULL) {EXTBUDDY_ALLOC_MTT(seg_start,log2_segs);} + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + if (seg_start == EXTBUDDY_NULL) { + MTL_ERROR3(MT_FLFMT("%s: Failed allocation of %d MTT segment/s allocation"),__func__, + log2_segs); + rc= HH_EAGAIN; + goto failed_extbuddy; + } + rs_p->seg_start = seg_start; + + mtt_seg_adr = mrwm->props.mtt_base | + (rs_p->seg_start << (log2_mtt_seg_sz + MTT_LOG_MTT_ENTRY_SIZE)); + rc= mtt_writes_iobuf(mrwm->cmd_if, rs_p->iobuf, mtt_seg_adr, rs_p->n_pages); + if (rc != HH_OK) { + MTL_ERROR3(MT_FLFMT("%s: Failed mtt_writes_iobuf (%d: %s) of npages = "SIZE_T_DFMT ", seg addr="U64_FMT), + __func__,rc, HH_strerror_sym(rc),rs_p->n_pages,mtt_seg_adr ); + goto failed_register_pages; + } + + /* issue command to update the MPT entry in Tavor */ + rs_p->mpt_entry.mtt_seg_adr = mtt_seg_adr; + cmd_rc = THH_cmd_MODIFY_MPT(mrwm->cmd_if, rs_p->mpt_index, &rs_p->mpt_entry, FALSE); + MTL_DEBUG4(MT_FLFMT("THH_cmd_MODIFY_MPT: cmd_rc=%d"), cmd_rc); + if (cmd_rc != THH_CMD_STAT_OK) { + MTL_ERROR1(MT_FLFMT("THH_cmd_MODIFY_MPT failed (%d) \n"),cmd_rc); + rc = (CMDRC2HH_ND(cmd_rc)); + goto failed_modify_mpt; + } + + /* save new mpt info in sw context */ + MOSAL_spinlock_lock(&mrsw_p->shared_p->ref_lock); + mrsw_p->shared_p->size = rs_p->mpt_entry.reg_wnd_len; + mrsw_p->shared_p->seg_start = rs_p->seg_start; + mrsw_p->shared_p->log2_segs = log2_segs; + MOSAL_spinlock_unlock(&mrsw_p->shared_p->ref_lock); + + return HH_OK; + + failed_modify_mpt: + failed_register_pages: + MOSAL_mutex_acq_ui(&mrwm->extbuddy_lock); + extbuddy_free(mrwm->xbuddy_tpt, seg_start, log2_segs); + EXTBUDDY_FREE_MTT(seg_start, log2_segs); + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + failed_extbuddy: + failed_mutex: + release_mtt_segs(mrwm,n_segs-1); + return rc; +} /* alloc_reg_pages */ +/************************************************************************/ +/* Handling THH_mrwm_modify_internal(...) case + * where physical buffers are not supplied and pages need to be locked + */ +static HH_ret_t lock_modify_internal( + THH_internal_mr_t* mr_props_p, + Reg_Segs_t* rs_p, + Mr_sw_t *mrsw_p +) +{ + MOSAL_iobuf_props_t iobuf_props; + HH_ret_t rc = HH_ENOSYS; + call_result_t mosal_rc; + + MTL_DEBUG4(MT_FLFMT("%s: start="U64_FMT" size="U64_FMT), __func__ , + mr_props_p->start, mr_props_p->size); + + mosal_rc = MOSAL_iobuf_register((MT_virt_addr_t)mr_props_p->start, (MT_size_t)mr_props_p->size, mr_props_p->vm_ctx, + MOSAL_PERM_READ | MOSAL_PERM_WRITE, &rs_p->iobuf, + MOSAL_IOBUF_LNX_FLG_MARK_ALL_DONTCOPY); + if (mosal_rc != MT_OK) { + MTL_ERROR4(MT_FLFMT("MOSAL_iobuf_register: rc=%s"), mtl_strerror_sym(mosal_rc)); + return mosal_rc == MT_EAGAIN ? HH_EAGAIN : HH_EINVAL_VA; + } + + if (MOSAL_iobuf_get_props(rs_p->iobuf,&iobuf_props) != MT_OK) { + MTL_ERROR4(MT_FLFMT("Failed MOSAL_iobuf_get_props.")); + rc= HH_EINVAL; + } else { + rs_p->n_pages= iobuf_props.nr_pages; + rs_p->log2_page_size = (u_int8_t)iobuf_props.page_shift; + rs_p->acl = VAPI_EN_LOCAL_WRITE; + rc = modify_reg_pages(rs_p, mrsw_p); + } + + if ( rc != HH_OK ) { + MOSAL_iobuf_deregister(rs_p->iobuf); + rs_p->iobuf= NULL; + } else { + mrsw_p->iobuf = rs_p->iobuf; + } + + return rc; +} /* lock_modify_internal */ + +/************************************************************************/ +HH_ret_t THH_mrwm_modify_internal( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey, /* IN */ + THH_internal_mr_t* mr_props_p, /* IN */ + THH_mrwm_modify_flags_t flags /* IN */ +) +{ + HH_ret_t rc = HH_OK; + u_int32_t mpt_index = lkey & ((1ul << mrwm->props.log2_mpt_sz) - 1); + VIP_common_ret_t vip_rc; + Reg_Segs_t reg_segs; + u_int8_t freed_log2_segs; + u_int32_t freed_seg_start; + mpt_segment_t mpt_seg; + VIP_array_obj_t vip_array_obj; + Mr_sw_t *mrsw_p; + MOSAL_iobuf_t freed_iobuf; /* for internal regions */ + + + if (flags != THH_MRWM_MODIFY_FLAGS_TRANSLATION) { + MTL_ERROR4(MT_FLFMT("%s: Invalid flags set (0x%X) for internal mr modify"),__func__,flags); + return HH_ENOSYS; + } + + mpt_seg= get_mpt_seg(mrwm,mpt_index); + if (mpt_seg != MPT_int) { + MTL_ERROR4(MT_FLFMT("%s: Invalid L-key (0x%X) for (internal) memory region"),__func__,lkey); + return HH_EINVAL; + } + + if (mr_props_p->num_bufs != 0) { + MTL_ERROR4(MT_FLFMT("%s: Providing phys pages for internal mr modify. Currently not supported"),__func__); + return HH_ENOSYS; + } + + if (mr_props_p->force_memkey != FALSE) { + MTL_ERROR4(MT_FLFMT("%s: Forcing memory key for internal mr modify."),__func__); + return HH_EINVAL; + } + + vip_rc= VIP_array_find_hold(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg],&vip_array_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR4(MT_FLFMT("%s: Failed removing L-key (0x%X) for memory region (%s)"),__func__, + lkey,VAPI_strerror_sym(vip_rc)); + return vip_rc == VIP_EINVAL_HNDL ? HH_EINVAL : HH_EBUSY; + } + mrsw_p= (Mr_sw_t*)vip_array_obj; + + /* Prevent another resize MR operation while this one is in progress */ + if (MOSAL_mutex_acq(&mrsw_p->modify_mtx, TRUE) != MT_OK) { + MTL_ERROR1(MT_FLFMT("%s: Could not acquire mutex -- received signal. returning"), __func__); + VIP_array_find_release(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg]); + rc = HH_EINTR; + } + + /* get pd and protection context of current mr */ + mr_props_p->pd = mrsw_p->pd; + rc = THH_uldm_get_protection_ctx( mrwm->uldm, mr_props_p->pd, &mr_props_p->vm_ctx); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: Could not get protection context for internal mr modify."),__func__); + goto done; + } + + freed_iobuf = mrsw_p->iobuf; + freed_log2_segs = mrsw_p->shared_p->log2_segs; + freed_seg_start = mrsw_p->shared_p->seg_start; + + memset(®_segs,0,sizeof(Reg_Segs_t)); + reg_segs.mrwm = mrwm; + reg_segs.phys_pages = NULL; + reg_segs.iobuf= NULL; + reg_segs.mpt_index = mpt_index; + reg_segs.mpt_entry.reg_wnd_len = mr_props_p->size; + rc = lock_modify_internal(mr_props_p,®_segs,mrsw_p); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: lock_modify_internal failed (%d: %s)"),__func__, + rc, HH_strerror_sym(rc)); + goto done; + } + + /* release previous iobuf */ + MOSAL_iobuf_deregister(freed_iobuf); + + /* release previous MTT entries */ + MOSAL_mutex_acq_ui(&mrwm->extbuddy_lock); + extbuddy_free(mrwm->xbuddy_tpt, freed_seg_start, freed_log2_segs); + EXTBUDDY_FREE_MTT(freed_seg_start, freed_log2_segs); + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + release_mtt_segs(mrwm, (1 << freed_log2_segs) - 1); + +done: + MOSAL_mutex_rel(&mrsw_p->modify_mtx); + VIP_array_find_release(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg]); + return rc; + +} + +/************************************************************************/ +HH_ret_t THH_mrwm_register_smr( + THH_mrwm_t mrwm, /* IN */ + HH_smr_t* smr_props_p, /* IN */ + VAPI_lkey_t* lkey_p, /* OUT */ + IB_rkey_t* rkey_p /* OUT */ +) +{ + HH_ret_t rc = HH_EINVAL; + Reg_Segs_t reg_segs; + VIP_common_ret_t vip_rc; + VIP_array_obj_t vip_obj; + Mr_sw_t *mrsw_p,*shared_mr_p; + VIP_array_handle_t mr_hndl; + u_int32_t shared_index; /*index of region shared with*/ + + + MTL_TRACE1("{THH_mrwm_register_smr: mrwm=%p\n", mrwm); + /* prepare inputs */ + shared_index = smr_props_p->lkey & ((1ul << mrwm->props.log2_mpt_sz) - 1); + if ((shared_index < mrwm->offset[MPT_ext]) || (shared_index >= mrwm->offset[MPT_win])) { + MTL_ERROR4(MT_FLFMT("%s: Got Lkey (0x%X) invalid for (ext.) memory region"),__func__, + smr_props_p->lkey); + return HH_EINVAL; + } + shared_index-= mrwm->offset[MPT_ext]; + /* Hold shared MR while sharing in progress (retain properties until this function done) */ + vip_rc= VIP_array_find_hold(mrwm->mpt[MPT_ext],shared_index,&vip_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR4(MT_FLFMT("%s: Failed to find (ext.) region with Lkey=0x%X"),__func__, + smr_props_p->lkey); + return HH_EINVAL; + } + shared_mr_p= (Mr_sw_t*)vip_obj; + rc = smr_props2mpt_entry(mrwm,smr_props_p,®_segs.mpt_entry); + if (rc != HH_OK) { + MTL_ERROR1("failed smr_props2mpt_entry \n"); + goto failed_props2mpt; + } + + + mrsw_p= TMALLOC(Mr_sw_t); + if (mrsw_p == NULL) { + MTL_ERROR4(MT_FLFMT("%s: Failed allocation for MR SW context memory"),__func__); + rc= HH_EAGAIN; + goto failed_malloc; + } + memset(mrsw_p, 0, sizeof(Mr_sw_t)); + + reg_segs.acl = smr_props_p->acl; + reg_segs.iobuf= NULL; + /*allocate new mpt idx */ + if (reserve_mpt_entry(mrwm,MPT_ext) != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: No more free MPT entry for external regions"),__func__); + rc= HH_EAGAIN; + goto failed_reserve_mpt; + } + vip_rc= VIP_array_insert(mrwm->mpt[MPT_ext],mrsw_p,&mr_hndl); + if (vip_rc != VIP_OK) + { + MTL_ERROR1("register_smr: ERROR: failed allocating new mpt idx \n"); + rc= HH_EAGAIN; + goto failed_array_insert; + } + + + reg_segs.mpt_index = mrwm->offset[MPT_ext] + mr_hndl; + + + /*taking MTT seg start from the mr we're sharing with*/ + reg_segs.seg_start = shared_mr_p->shared_p->seg_start; + reg_segs.log2_page_size = shared_mr_p->shared_p->page_shift; + reg_segs.mrwm = mrwm; + + + rc = register_pages(®_segs,MPT_ext,VAPI_MSHAR); + if (rc != HH_OK) { + MTL_ERROR1("register_smr: failed register_pages \n"); + goto failed_register; + } + + /*pointing to the original allocated struct */ + mrsw_p->shared_p = shared_mr_p->shared_p; + /*saving the new MPT entry ctx*/ + rc = save_sw_context(®_segs,0,VAPI_MSHAR,mrsw_p); + if (rc != HH_OK) { + MTL_ERROR1("%s: unexpected error !!! failed save_sw_context \n",__func__); + goto failed_save; + } + VIP_array_find_release(mrwm->mpt[MPT_ext],shared_index); + + *lkey_p = reg_segs.key; + *rkey_p = reg_segs.key; + return HH_OK; + + failed_save: + THH_cmd_HW2SW_MPT(mrwm->cmd_if, reg_segs.mpt_index, NULL); + failed_register: + VIP_array_erase(mrwm->mpt[MPT_ext],mr_hndl,NULL); + failed_array_insert: + release_mpt_entry(mrwm,MPT_ext); + failed_reserve_mpt: + FREE(mrsw_p); + failed_malloc: + failed_props2mpt: + VIP_array_find_release(mrwm->mpt[MPT_ext],shared_index); + return rc; + +} /* THH_mrwm_register_smr */ + + +/************************************************************************/ +HH_ret_t THH_mrwm_reregister_mr( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey, + VAPI_mr_change_t change_mask, /* IN */ + HH_mr_t* mr_props_p, /* IN */ + VAPI_lkey_t* lkey_p, /* OUT */ + IB_rkey_t* rkey_p /* OUT */ + ) +{ + HH_ret_t rc = HH_OK; + u_int32_t mpt_index = lkey & ((1ul << mrwm->props.log2_mpt_sz) - 1); + VIP_common_ret_t vip_rc; + THH_cmd_status_t cmd_rc; + Reg_Segs_t reg_segs; + u_int8_t log2_segs; + mpt_segment_t mpt_seg; + VIP_array_obj_t vip_array_obj; + Mr_sw_t *mrsw_p; + + memset(®_segs,0,sizeof(Reg_Segs_t)); + reg_segs.mrwm = mrwm; + reg_segs.iobuf= NULL; /* external */ + reg_segs.mpt_index = mpt_index; + + mpt_seg= get_mpt_seg(mrwm,mpt_index); + if (mpt_seg != MPT_ext) { + MTL_ERROR4(MT_FLFMT("%s: Invalid L-key (0x%X) for (external) memory region"),__func__,lkey); + return HH_EINVAL; + } + /* hide entry while changing translation (i.e. no other operation allowed - mostly sharing) */ + vip_rc= VIP_array_erase_prepare(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg],&vip_array_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR4(MT_FLFMT("%s: Failed removing L-key (0x%X) for memory region (%s)"),__func__, + lkey,VAPI_strerror_sym(vip_rc)); + return vip_rc == VIP_EINVAL_HNDL ? HH_EINVAL : HH_EBUSY; + } + mrsw_p= (Mr_sw_t*)vip_array_obj; + + MTL_DEBUG1(MT_FLFMT("before query orig mr \n")); + /* query original mpt entry */ + cmd_rc = THH_cmd_HW2SW_MPT(mrwm->cmd_if, mpt_index,®_segs.mpt_entry); + if (cmd_rc != THH_CMD_STAT_OK) { + switch(cmd_rc) { + case THH_CMD_STAT_REG_BOUND: + MTL_ERROR1(MT_FLFMT("There are mw bounded to this region \n")); + VIP_array_erase_undo(mrwm->mpt[MPT_ext],mpt_index - mrwm->offset[MPT_ext]); + return HH_EBUSY; + default: + rc = HH_EFATAL; + } + goto failure_release; + } + rc = HH_OK; + + /* make new mem key */ + /*reg_segs.mpt_entry.mem_key = make_key(mrwm,mpt_seg,reg_segs.mpt_index);*/ + /* CHANGE: We retain the same memory key - some ULPs don't like that we replace the Rkey */ + + /* fill changed attributes in sw mpt entry */ + if (change_mask & VAPI_MR_CHANGE_ACL) { + reg_segs.mpt_entry.lw = (mr_props_p->acl & VAPI_EN_LOCAL_WRITE ? TRUE : FALSE); + reg_segs.mpt_entry.rr = (mr_props_p->acl & VAPI_EN_REMOTE_READ ? TRUE : FALSE); + reg_segs.mpt_entry.rw = (mr_props_p->acl & VAPI_EN_REMOTE_WRITE ? TRUE : FALSE); + reg_segs.mpt_entry.a = (mr_props_p->acl & VAPI_EN_REMOTE_ATOM ? TRUE : FALSE); + reg_segs.mpt_entry.eb = (mr_props_p->acl & VAPI_EN_MEMREG_BIND ? TRUE : FALSE); + reg_segs.acl = mr_props_p->acl; + } + + if (change_mask & VAPI_MR_CHANGE_PD) { + reg_segs.mpt_entry.pd = mr_props_p->pd; + } + + if (change_mask & VAPI_MR_CHANGE_TRANS) { + + MTL_DEBUG1(MT_FLFMT("changed translation \n")); + rc = change_translation(®_segs,mr_props_p,mrsw_p,&log2_segs); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("change translation failed \n")); + goto failure_release; + } + reg_segs.mpt_entry.page_size = reg_segs.log2_page_size - TAVOR_LOG_MPT_PG_SZ_SHIFT; + reg_segs.mpt_entry.start_address = mr_props_p->start; + reg_segs.mpt_entry.reg_wnd_len = mr_props_p->size; + + /* write the new MTT's */ + MTL_DEBUG3(MT_FLFMT("before mtt writes \n")); + if (reg_segs.iobuf != NULL) { + rc= mtt_writes_iobuf(mrwm->cmd_if, reg_segs.iobuf, reg_segs.mpt_entry.mtt_seg_adr, + reg_segs.n_pages); + } else { + rc = mtt_writes(mrwm->cmd_if, reg_segs.phys_pages,reg_segs.mpt_entry.mtt_seg_adr,reg_segs.n_pages); + } + if (rc!= HH_OK) { + MTL_ERROR1(MT_FLFMT("mtt_writes(_iobuf) failed (%s)\n"),HH_strerror_sym(rc)); + goto failure_release; + } + MTL_DEBUG4(MT_FLFMT("mtt_writes: rc=%d"), rc); + } + + + /* write new MPT to HW */ + cmd_rc = THH_cmd_SW2HW_MPT(reg_segs.mrwm->cmd_if, reg_segs.mpt_index, ®_segs.mpt_entry); + MTL_DEBUG4(MT_FLFMT("SW2HW_MPT: cmd_rc=%d"), cmd_rc); + rc = (CMDRC2HH_ND(cmd_rc)); + if (rc != HH_OK) { + goto failure_release; + } + + /* save the new MPT locally */ + reg_segs.key = reg_segs.mpt_entry.mem_key; + + /* save the new attributes locally */ + mrsw_p->key = reg_segs.key; + + if (change_mask & VAPI_MR_CHANGE_ACL) { + mrsw_p->acl = reg_segs.acl; + } + + if (change_mask & VAPI_MR_CHANGE_PD) { + mrsw_p->pd = reg_segs.mpt_entry.pd ; + } + + if (change_mask & VAPI_MR_CHANGE_TRANS) { + mrsw_p->start = reg_segs.mpt_entry.start_address; + if (mrsw_p->shared_p == NULL) { + mrsw_p->shared_p = (Shared_data_t*)MALLOC(sizeof(Shared_data_t)); + if (mrsw_p->shared_p == NULL) { + MTL_ERROR1(MT_FLFMT("failed allocating memory \n")); + rc=HH_EAGAIN; + goto failure_release; + } + } + mrsw_p->shared_p->size = reg_segs.mpt_entry.reg_wnd_len; + mrsw_p->shared_p->seg_start = reg_segs.seg_start; + mrsw_p->shared_p->log2_segs = log2_segs; + mrsw_p->shared_p->page_shift = reg_segs.log2_page_size; + mrsw_p->shared_p->ref_count = 1; + MOSAL_spinlock_init(&mrsw_p->shared_p->ref_lock); + if ((mr_props_p->tpt.tpt_type == HH_TPT_BUF) && (reg_segs.phys_pages != NULL)) + FREE_PHYS_PAGES_ARRAY(®_segs); + } + + VIP_array_erase_undo(mrwm->mpt[MPT_ext],mpt_index - mrwm->offset[MPT_ext]); + *lkey_p = reg_segs.mpt_entry.mem_key; + *rkey_p = reg_segs.mpt_entry.mem_key; + logIfErr("THH_mrwm_reregister_mr") + return rc; + + failure_release: + /* Invalidate entry after failure - as requested by IB */ + if (mrsw_p->shared_p) { /* still has MTTs to release */ + release_shared_mtts(mrwm,mrsw_p); + } + if ((mr_props_p->tpt.tpt_type == HH_TPT_BUF) && (reg_segs.phys_pages != NULL)) + FREE_PHYS_PAGES_ARRAY(®_segs); + VIP_array_erase_done(mrwm->mpt[MPT_ext],mpt_index - mrwm->offset[MPT_ext],NULL); + release_mpt_entry(mrwm,MPT_ext); + FREE(mrsw_p); + return rc; +} /* THH_mrwm_reregister_mr */ + +/************************************************************************/ +HH_ret_t THH_mrwm_query_mr( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey, /* IN */ + HH_mr_info_t* mr_info_p /* OUT */ +) +{ + VIP_common_ret_t vip_rc; + u_int32_t mpt_index = lkey & ((1ul << mrwm->props.log2_mpt_sz) - 1); + mpt_segment_t mpt_seg; + VIP_array_obj_t vip_obj; + Mr_sw_t *mrsw_p; + + mpt_seg= get_mpt_seg(mrwm,mpt_index); + if ((mpt_seg != MPT_ext) && (mpt_seg != MPT_int)){ + MTL_ERROR4(MT_FLFMT("%s: Invalid L-key (0x%X) for memory region"),__func__,lkey); + return HH_EINVAL; + } + vip_rc= VIP_array_find_hold(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg],&vip_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR4(MT_FLFMT("%s: Failed finding a memory region with L-key 0x%X (%s)"),__func__, + lkey,VAPI_strerror_sym(vip_rc)); + return (HH_ret_t)vip_rc; + } + mrsw_p= (Mr_sw_t*)vip_obj; + swinfo2mrinfo(mrsw_p, mr_info_p); + VIP_array_find_release(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg]); + MTL_TRACE1("}THH_mrwm_query_mr\n"); + return HH_OK; +} /* THH_mrwm_query_mr */ + + +/************************************************************************/ +HH_ret_t THH_mrwm_deregister_mr( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey /* IN */ +) +{ + HH_ret_t rc = HH_EINVAL; + VIP_common_ret_t vip_rc= VIP_EINVAL_PARAM; + u_int32_t mpt_index = lkey & ((1ul << mrwm->props.log2_mpt_sz) - 1); + MT_bool have_fatal = FALSE; + VIP_array_obj_t vip_obj; + Mr_sw_t *mrsw_p=NULL; + THH_cmd_status_t cmd_rc = THH_CMD_STAT_OK; + mpt_segment_t mpt_seg; + + MTL_TRACE1("{THH_mrwm_deregister_mr: mrwm=%p, lkey=0x%x, mi=0x%x\n", + mrwm, lkey, mpt_index); + mpt_seg= get_mpt_seg(mrwm,mpt_index); + if ((mpt_seg != MPT_ext) && (mpt_seg != MPT_int)){ + MTL_ERROR4(MT_FLFMT("%s: Invalid L-key (0x%X) for memory region"),__func__,lkey); + return HH_EINVAL; + } + vip_rc= VIP_array_erase_prepare(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg],&vip_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR4(MT_FLFMT("%s: Failed removing L-key (0x%X) for memory region (%s)"),__func__, + lkey,VAPI_strerror_sym(vip_rc)); + return (HH_ret_t)vip_rc; + } + mrsw_p= (Mr_sw_t*)vip_obj; + +#if defined(MT_SUSPEND_QP) + if ((mpt_seg != MPT_int) || (mrsw_p->is_suspended == FALSE)) { + /* if region IS suspended, is is already in SW ownership */ + cmd_rc = THH_cmd_HW2SW_MPT(mrwm->cmd_if, mpt_index, NULL); + } +#else + cmd_rc = THH_cmd_HW2SW_MPT(mrwm->cmd_if, mpt_index, NULL); +#endif + /* for memory regions only, anything that is not a 'legal' return code is considered fatal. + * In all problem cases, the unlocking of memory is deferred until THH_hob_destroy. + */ + MTL_DEBUG4(MT_FLFMT("cmd_rc=%d=%s"), cmd_rc, str_THH_cmd_status_t(cmd_rc)); + switch(cmd_rc) { + case THH_CMD_STAT_RESOURCE_BUSY: + case THH_CMD_STAT_REG_BOUND: + VIP_array_erase_undo(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg]); + rc = HH_EBUSY; + break; + case THH_CMD_STAT_EINTR: + VIP_array_erase_undo(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg]); + rc = HH_EINTR; + break; + default: /* OK and all fatal errors*/ + { + have_fatal = (cmd_rc != THH_CMD_STAT_OK) ? TRUE : FALSE; + if (have_fatal && (cmd_rc != THH_CMD_STAT_EFATAL)) { + MTL_ERROR1(MT_FLFMT("POSSIBLE FATAL ERROR:cmd_rc=%d=%s"), cmd_rc, str_THH_cmd_status_t(cmd_rc)); + } + + VIP_array_erase_done(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg],NULL); + release_mpt_entry(mrwm,mpt_seg); + if (mpt_index >= mrwm->offset[MPT_ext]) { + /* return fatal for external mem region if had fatal */ + release_shared_mtts(mrwm,mrsw_p); + rc = (have_fatal == TRUE) ? HH_EFATAL : HH_OK ; + } else { /* internal region */ + /* return OK for internal mem region even if had fatal, because we are + * handling deferred unlocking here. + */ + internal_unlock(THH_hob_get_delay_unlock(mrwm->hob), mrsw_p->iobuf, have_fatal); + release_shared_mtts(mrwm,mrsw_p); + MOSAL_mutex_free(&mrsw_p->modify_mtx); + rc = HH_OK; + } + FREE(mrsw_p); + } + } + + MTL_TRACE1("}THH_mrwm_deregister_mr, rc=%d\n", rc); + if (rc != HH_EFATAL) { + logIfErr("THH_mrwm_deregister_mr") + } + return rc; +} /* THH_mrwm_deregister_mr */ + + +/************************************************************************/ +HH_ret_t THH_mrwm_alloc_mw( + THH_mrwm_t mrwm, /* IN */ + HH_pd_hndl_t pd, /* IN */ + IB_rkey_t* initial_rkey_p /* OUT */ +) +{ + HH_ret_t rc = HH_EAGAIN; + VIP_array_handle_t win_hndl; + VIP_common_ret_t vip_rc; + + MTL_TRACE1("{THH_mrwm_alloc_mw: mrwm=%p, pd=%d\n", mrwm, pd); + if (reserve_mpt_entry(mrwm,MPT_win) != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: No more free MPT entries for memory windows"),__func__); + return HH_EAGAIN; + } + vip_rc= VIP_array_insert(mrwm->mpt[MPT_win],NULL,&win_hndl); + + if (vip_rc == VIP_OK) + { + THH_mpt_entry_t mpt_entry; + THH_cmd_status_t cmd_rc; + u_int32_t mpt_index = mrwm->offset[MPT_win] + win_hndl; + + mpt_entry_init(&mpt_entry); + mpt_entry.pd = pd; + mpt_entry.r_w = FALSE; + mpt_entry.mem_key = CURRENT_MEMKEY(mrwm,MPT_win,mpt_index); + cmd_rc = THH_cmd_SW2HW_MPT(mrwm->cmd_if, mpt_index, &mpt_entry); + MTL_DEBUG4(MT_FLFMT("alloc_mw: cmd_rc=%d=%s"), + cmd_rc, str_THH_cmd_status_t(cmd_rc)); + rc = (CMDRC2HH_ND(cmd_rc)); + *initial_rkey_p = mpt_entry.mem_key; + } + if ((rc != HH_OK) && (vip_rc == VIP_OK)) { + VIP_array_erase(mrwm->mpt[MPT_win],win_hndl,NULL); + release_mpt_entry(mrwm,MPT_win); + } + MTL_TRACE1("}THH_mrwm_alloc_mw: ley=0x%x\n", *initial_rkey_p); + logIfErr("THH_mrwm_alloc_mw") + return rc; +} /* THH_mrwm_alloc_mw */ + + +/************************************************************************/ +HH_ret_t THH_mrwm_query_mw( + THH_mrwm_t mrwm, /* IN */ + IB_rkey_t initial_rkey, /* IN */ + IB_rkey_t* current_rkey_p, /* OUT */ + HH_pd_hndl_t* pd_p /* OUT */ +) +{ + HH_ret_t rc = HH_EINVAL; + VIP_common_ret_t vip_rc; + u_int32_t mpt_index= initial_rkey & MASK32(mrwm->props.log2_mpt_sz); + u_int32_t win_index= mpt_index-mrwm->offset[MPT_win]; + VIP_array_obj_t win_obj; + MTL_TRACE1("{THH_mrwm_query_mw: mrwm=%p, ini_key=0x%x\n", mrwm, initial_rkey); + + if (get_mpt_seg(mrwm,mpt_index) != MPT_win) { + MTL_ERROR4(MT_FLFMT("%s: Invalid initial R-key for memory window (0x%X)"),__func__, + initial_rkey); + return HH_EINVAL_MW; + } + vip_rc= VIP_array_find_hold(mrwm->mpt[MPT_win],win_index,&win_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR3(MT_FLFMT("%s: Invalid mem-window memkey (0x%X)"),__func__,initial_rkey); + } else { + THH_mpt_entry_t mpt_entry; + THH_cmd_status_t cmd_rc; + mpt_entry_init(&mpt_entry); /* not really needed */ + cmd_rc = THH_cmd_QUERY_MPT(mrwm->cmd_if, mpt_index, &mpt_entry); + rc = (CMDRC2HH_ND(cmd_rc)); + if (cmd_rc == THH_CMD_STAT_OK) + { + *current_rkey_p = mpt_entry.mem_key; + *pd_p = mpt_entry.pd; + } + } + + VIP_array_find_release(mrwm->mpt[MPT_win],win_index); + MTL_TRACE1("}THH_mrwm_query_mw: cur_key=0x%x, pd=%d\n", + *current_rkey_p, *current_rkey_p); + logIfErr("THH_mrwm_query_mw") + return rc; +} /* THH_mrwm_query_mw */ + + +/************************************************************************/ +HH_ret_t THH_mrwm_free_mw( + THH_mrwm_t mrwm, /* IN */ + IB_rkey_t initial_rkey /* IN */ +) +{ + HH_ret_t rc = HH_EINVAL; + VIP_common_ret_t vip_rc; + VIP_array_obj_t win_obj; + u_int32_t mpt_index= initial_rkey & MASK32(mrwm->props.log2_mpt_sz); + u_int32_t win_index= mpt_index-mrwm->offset[MPT_win]; + MTL_TRACE1("{THH_mrwm_free_mw: mrwm=%p, ini_key=0x%x\n", mrwm, initial_rkey); + + if (get_mpt_seg(mrwm,mpt_index) != MPT_win) { + MTL_ERROR4(MT_FLFMT("%s: Invalid initial R-key for memory window (0x%X)"),__func__, + initial_rkey); + return HH_EINVAL_MW; + } + vip_rc= VIP_array_erase_prepare(mrwm->mpt[MPT_win],win_index,&win_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR3(MT_FLFMT("%s: Invalid mem-window memkey (0x%X)"),__func__,initial_rkey); + } else { + THH_mpt_entry_t mpt_entry; + THH_cmd_status_t cmd_rc = THH_cmd_HW2SW_MPT(mrwm->cmd_if, mpt_index, + &mpt_entry); + switch(cmd_rc) { + case THH_CMD_STAT_OK: + rc = HH_OK; + break; + case THH_CMD_STAT_RESOURCE_BUSY: + rc = HH_EBUSY; + break; + case THH_CMD_STAT_EINTR: + rc = HH_EINTR; + break; + default: + rc = HH_EFATAL; + } + if (cmd_rc == THH_CMD_STAT_OK) + { + mrwm->key_prefix[MPT_win][win_index] = /* sync key prefix for next allocation */ + (mpt_entry.mem_key >> mrwm->props.log2_mpt_sz) + 1; + } + if ((rc == HH_OK) || (rc == HH_EFATAL)) { + VIP_array_erase_done(mrwm->mpt[MPT_win],win_index,NULL); + release_mpt_entry(mrwm,MPT_win); + } else { + VIP_array_erase_undo(mrwm->mpt[MPT_win],win_index); + } + } + MTL_TRACE1("}THH_mrwm_free_mw\n"); + if (rc != HH_EFATAL) { + logIfErr("THH_mrwm_free_mw") + } + return rc; +} /* THH_mrwm_free_mw */ + + +HH_ret_t THH_mrwm_alloc_fmr(THH_mrwm_t mrwm, /*IN*/ + HH_pd_hndl_t pd, /*IN*/ + VAPI_mrw_acl_t acl, /*IN*/ + MT_size_t max_pages, /*IN*/ + u_int8_t log2_page_sz,/*IN*/ + VAPI_lkey_t* last_lkey_p) /*OUT*/ +{ + HH_ret_t rc = HH_EAGAIN; + VIP_common_ret_t vip_rc; + FMR_sw_info_t* new_fmr_p; + VIP_array_handle_t fmr_hndl; + u_int32_t seg_start = EXTBUDDY_NULL; + u_int8_t log2_mtt_seg_sz = mrwm->props.log2_mtt_seg_sz; + u_int32_t n_segs; + u_int8_t log2_segs; + THH_mpt_entry_t mpt_entry; + THH_mpt_index_t mpt_index; + THH_cmd_status_t cmd_rc; + MT_size_t seg_comp = max_pages >> log2_mtt_seg_sz; + + + /*compute n_segs: round it up to mtt seg size multiple */ + seg_comp= ((seg_comp << log2_mtt_seg_sz) != max_pages) ? seg_comp + 1 : seg_comp; + /*check that n_segs will not overflow 32 bits */ + log2_segs = ceil_log2(seg_comp); + if (log2_segs >= (8*sizeof(n_segs))) return HH_EINVAL_PARAM; + n_segs = 1 << log2_segs; + + + if (log2_page_sz < TAVOR_LOG_MPT_PG_SZ_SHIFT) { + MTL_ERROR4(MT_FLFMT("Given log2_page_sz too small (%d)"),log2_page_sz); + return HH_EINVAL_PARAM; + } + + new_fmr_p= TMALLOC(FMR_sw_info_t); + if (new_fmr_p == NULL) { + MTL_ERROR4(MT_FLFMT("%s: Failed allocating memory for FMR context"),__func__); + goto failed_malloc; /* HH_EAGAIN */ + } + + if (reserve_mpt_entry(mrwm,MPT_ext) != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: No more free MPT entry for external regions"),__func__); + goto failed_reserve_mpt; + } + vip_rc= VIP_array_insert(mrwm->mpt[MPT_ext],new_fmr_p,&fmr_hndl); + if (vip_rc != VIP_OK) { + MTL_ERROR1(MT_FLFMT("Failed allocating MPT entry for FMR (%s)"),VAPI_strerror_sym(vip_rc)); + rc= HH_EAGAIN; + goto failed_array_insert; + } + + /* set the fmr_bit in the array */ + { + u_int8_t offset_in_cell = fmr_hndl & 0x7; + mrwm->is_fmr_bits[fmr_hndl>>3]|= (((u_int8_t)1) << offset_in_cell); + } + + /* we must ensure at least one segment for each region */ + if (reserve_mtt_segs(mrwm,n_segs-1) != HH_OK) { + MTL_ERROR4(MT_FLFMT("Not enough available MTT segments for a new FMR of %d segments"),n_segs); + rc= HH_EAGAIN; + goto failed_out_of_mtt; + } + if (MOSAL_mutex_acq(&mrwm->extbuddy_lock, TRUE) != MT_OK) { + rc= HH_EINTR; /* Operation interrupted */ + goto failed_mutex; + } + seg_start = extbuddy_alloc(mrwm->xbuddy_tpt, log2_segs); + if (seg_start != EXTBUDDY_NULL) {EXTBUDDY_ALLOC_MTT(seg_start,log2_segs);} + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + if (seg_start == EXTBUDDY_NULL) { + MTL_ERROR1(MT_FLFMT("Failed allocating MTT segments for FMR")); + rc= HH_EAGAIN; + goto failed_extbd; + } + + mpt_index = mrwm->offset[MPT_ext] + fmr_hndl; + init_fmr_mpt_entry(&mpt_entry,pd,acl,make_key(mrwm, MPT_ext,(u_int32_t)mpt_index),log2_page_sz, + mrwm->props.mtt_base | (seg_start << (mrwm->props.log2_mtt_seg_sz + MTT_LOG_MTT_ENTRY_SIZE)) ); + MTL_DEBUG4(MT_FLFMT("mtt_seg_adr="U64_FMT), mpt_entry.mtt_seg_adr); + + cmd_rc = THH_cmd_SW2HW_MPT(mrwm->cmd_if, mpt_index, &mpt_entry); + if (cmd_rc != THH_CMD_STAT_OK) { + MTL_ERROR1(MT_FLFMT("SW2HW_MPT failed - cmd_rc=%d"), cmd_rc); + rc = (cmd_rc == THH_CMD_STAT_EINTR) ? HH_EINTR : HH_EFATAL; + goto failed_sw2hw_mpt; + } + + /*saving the new MPT entry */ + rc = init_fmr_context(mrwm,new_fmr_p ,(u_int32_t)mpt_index, seg_start, log2_segs, log2_page_sz); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("failed init_fmr_context() \n")); + goto failed_sw_ctx; + } + + *last_lkey_p = mpt_entry.mem_key; + return rc; + + failed_sw_ctx: + if (THH_cmd_HW2SW_MPT(mrwm->cmd_if, mpt_index, &mpt_entry) != THH_CMD_STAT_OK) rc= HH_EFATAL; + failed_sw2hw_mpt: + MOSAL_mutex_acq_ui(&mrwm->extbuddy_lock); + extbuddy_free(mrwm->xbuddy_tpt, seg_start,log2_segs); + EXTBUDDY_FREE_MTT(seg_start, log2_segs); + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + failed_extbd: + failed_mutex: + release_mtt_segs(mrwm,n_segs - 1); + failed_out_of_mtt: + VIP_array_erase(mrwm->mpt[MPT_ext],fmr_hndl,NULL); + failed_array_insert: + release_mpt_entry(mrwm,MPT_ext); + failed_reserve_mpt: + FREE(new_fmr_p); + failed_malloc: + return rc; +} + +HH_ret_t THH_mrwm_map_fmr(THH_mrwm_t mrwm, /*IN*/ + VAPI_lkey_t last_lkey, /*IN*/ + EVAPI_fmr_map_t* map_p, /*IN*/ + VAPI_lkey_t* lkey_p, /*OUT*/ + IB_rkey_t* rkey_p) /*OUT*/ +{ + u_int32_t mpt_index= last_lkey & MASK32(mrwm->props.log2_mpt_sz); + u_int32_t fmr_hndl; + VIP_array_obj_t vip_obj; + FMR_sw_info_t* fmr_info_p; + MT_size_t max_pages,real_num_of_pages,i; + u_int32_t cur_memkey,new_memkey; +#ifndef WRITE_QWORD_WORKAROUND + volatile u_int64_t tmp_qword; +#endif + MT_virt_addr_t cur_mtt_p; + VIP_common_ret_t vip_rc; + + /* Validity checks */ + + if (get_mpt_seg(mrwm,mpt_index) != MPT_ext){ + MTL_ERROR3(MT_FLFMT("%s: Invalid FMR lkey (0x%X)"),__func__,last_lkey); + return HH_EINVAL; + } + fmr_hndl= mpt_index-mrwm->offset[MPT_ext]; + vip_rc= VIP_array_find_hold(mrwm->mpt[MPT_ext],fmr_hndl,&vip_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR2(MT_FLFMT("THH_mrmw_map_fmr invoked for invalid MPT (last_lkey=0x%X)"),last_lkey); + return HH_EINVAL; + } + fmr_info_p= (FMR_sw_info_t*)vip_obj; + cur_memkey= CURRENT_MEMKEY(mrwm,MPT_ext,mpt_index); + if (last_lkey != cur_memkey) { + VIP_array_find_release(mrwm->mpt[MPT_ext],fmr_hndl); + MTL_ERROR2(MT_FLFMT("THH_mrmw_map_fmr invoked with last_lkey=0x%X while current lkey=0x%X"), + last_lkey,cur_memkey); + return HH_EINVAL; + } + + max_pages= (MT_size_t)1<<(fmr_info_p->log2_segs + mrwm->props.log2_mtt_seg_sz); + /* TBD: possible optimization for line above: save max_pages on FMR allocation in FMR_sw_info_t */ +/*** warning C4242: '=' : conversion from 'VAPI_virt_addr_t' to 'MT_size_t', possible loss of data ***/ + real_num_of_pages= (MT_size_t)(((map_p->start + map_p->size - 1) >> fmr_info_p->log2_page_sz) - /* end_page - start_page + 1 */ + (map_p->start >> fmr_info_p->log2_page_sz) + 1); + if ((map_p->page_array_len > max_pages) || (real_num_of_pages != map_p->page_array_len)) { + VIP_array_find_release(mrwm->mpt[MPT_ext],fmr_hndl); + MTL_ERROR2(MT_FLFMT("%s: illegal number of pages for mapping FMR at MPT index 0x%X: start="U64_FMT + " , size="U64_FMT" , log2_page_sz=%d, " + "real_num_of_pages="SIZE_T_DFMT" , page_array_len="SIZE_T_DFMT" , max_pages="SIZE_T_DFMT), __func__,mpt_index,map_p->start, map_p->size,fmr_info_p->log2_page_sz, + real_num_of_pages,map_p->page_array_len,max_pages); + return HH_EINVAL; + } + + /* Compute new memory key */ + MOSAL_spinlock_dpc_lock(&mrwm->key_prefix_lock); + ++(mrwm->key_prefix[MPT_ext][fmr_hndl]); + new_memkey= CURRENT_MEMKEY(mrwm,MPT_ext,mpt_index); + if (new_memkey == fmr_info_p->last_free_key) { + mrwm->key_prefix[MPT_ext][fmr_hndl]--; /* Restore previous key */ + MOSAL_spinlock_unlock(&mrwm->key_prefix_lock); + VIP_array_find_release(mrwm->mpt[MPT_ext],fmr_hndl); + MTL_DEBUG4(MT_FLFMT("Wrap around of memory key detected for MPT index %d (last_free_key=0x%X)"), + mpt_index,new_memkey); + return HH_EAGAIN; /* Retry after unmapping */ + } + MOSAL_spinlock_unlock(&mrwm->key_prefix_lock); + + if (cur_memkey != fmr_info_p->last_free_key) { /* It's a "remap" - invalidate MPT before updating MTT andother MPT fields */ + MOSAL_MMAP_IO_WRITE_BYTE((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_STATUS_OFFSET),0xf0); + } + + for (i= 0, cur_mtt_p= fmr_info_p->mtt_entries; i < real_num_of_pages; i++) { /* Write MTT entries */ +#ifdef WRITE_QWORD_WORKAROUND + MOSAL_MMAP_IO_WRITE_DWORD(cur_mtt_p,MOSAL_cpu_to_be32((u_int32_t)(map_p->page_array[i] >> 32))); + MOSAL_MMAP_IO_WRITE_DWORD(cur_mtt_p+4,MOSAL_cpu_to_be32(((u_int32_t)(map_p->page_array[i] & 0xFFFFFFFF)) | 1); +#else + ((volatile u_int32_t*)&tmp_qword)[0]= + MOSAL_cpu_to_be32((u_int32_t)(map_p->page_array[i] >> 32)); /* ptag_h */ + ((volatile u_int32_t*)&tmp_qword)[1]= + MOSAL_cpu_to_be32(((u_int32_t)(map_p->page_array[i] & 0xFFFFF000)) | 1); /* ptag_l | p */ + MOSAL_MMAP_IO_WRITE_QWORD(cur_mtt_p, tmp_qword ); +#endif + cur_mtt_p+= (1<mpt_entry + TAVOR_IF_MPT_HW_START_ADDR_OFFSET), + MOSAL_cpu_to_be32((u_int32_t)(map_p->start >> 32))); + MOSAL_MMAP_IO_WRITE_DWORD((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_START_ADDR_OFFSET + 4), + MOSAL_cpu_to_be32((u_int32_t)(map_p->start & 0xFFFFFFFF))); +#else + ((volatile u_int32_t*)&tmp_qword)[0]= + MOSAL_cpu_to_be32((u_int32_t)(map_p->start >> 32)); /* start_h */ + ((volatile u_int32_t*)&tmp_qword)[1]= + MOSAL_cpu_to_be32((u_int32_t)(map_p->start & 0xFFFFFFFF)); /* start_l */ + MOSAL_MMAP_IO_WRITE_QWORD((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_START_ADDR_OFFSET),tmp_qword); +#endif + /* MemKey+Lkey update */ + MOSAL_MMAP_IO_WRITE_DWORD((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_MEMKEY_OFFSET),MOSAL_cpu_to_be32(new_memkey)); + MOSAL_MMAP_IO_WRITE_DWORD((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_LKEY_OFFSET),MOSAL_cpu_to_be32(new_memkey)); + +#ifdef WRITE_QWORD_WORKAROUND + MOSAL_MMAP_IO_WRITE_DWORD((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_LEN_OFFSET), + MOSAL_cpu_to_be32((u_int32_t)(map_p->size >> 32))); + MOSAL_MMAP_IO_WRITE_DWORD((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_LEN_OFFSET + 4), + MOSAL_cpu_to_be32((u_int32_t)(map_p->size & 0xFFFFFFFF))); +#else + ((volatile u_int32_t*)&tmp_qword)[0]= + MOSAL_cpu_to_be32((u_int32_t)(map_p->size >> 32)); /* length_h */ + ((volatile u_int32_t*)&tmp_qword)[1]= + MOSAL_cpu_to_be32((u_int32_t)(map_p->size & 0xFFFFFFFF)); /* length_l */ + MOSAL_MMAP_IO_WRITE_QWORD((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_LEN_OFFSET),tmp_qword); /* length change makes MPT valid again */ +#endif + /* revalidate this MPT */ + MOSAL_MMAP_IO_WRITE_BYTE((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_STATUS_OFFSET),0); + + VIP_array_find_release(mrwm->mpt[MPT_ext],fmr_hndl); + *lkey_p= new_memkey; + *rkey_p= new_memkey; + return HH_OK; +} + +HH_ret_t THH_mrwm_unmap_fmr(THH_mrwm_t mrwm, /*IN*/ + u_int32_t num_of_fmrs_to_unmap,/*IN*/ + VAPI_lkey_t* last_lkeys_array) /*IN*/ +{ + u_int32_t mpt_index,fmr_hndl; + u_int32_t index_mask= MASK32(mrwm->props.log2_mpt_sz); + VIP_array_obj_t vip_obj; + FMR_sw_info_t* fmr_info_p; + u_int32_t cur_memkey; + u_int32_t i; + THH_cmd_status_t cmd_rc; + VIP_common_ret_t vip_rc; + + for (i= 0; i < num_of_fmrs_to_unmap; i++) { + mpt_index= last_lkeys_array[i] & index_mask; + if (get_mpt_seg(mrwm,mpt_index) != MPT_ext){ + MTL_ERROR3(MT_FLFMT("%s: Invalid FMR lkey (0x%X)"),__func__,last_lkeys_array[i]); + continue; + } + fmr_hndl= mpt_index-mrwm->offset[MPT_ext]; + vip_rc= VIP_array_find_hold(mrwm->mpt[MPT_ext],fmr_hndl,&vip_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR2(MT_FLFMT("THH_mrmw_map_fmr invoked for invalid MPT (last_lkey=0x%X)"), + last_lkeys_array[i]); + continue; + } + fmr_info_p= (FMR_sw_info_t*)vip_obj; + cur_memkey= CURRENT_MEMKEY(mrwm,MPT_ext,mpt_index); + if (last_lkeys_array[i] != cur_memkey) { + VIP_array_find_release(mrwm->mpt[MPT_ext],fmr_hndl); + MTL_ERROR2(MT_FLFMT("THH_mrmw_map_fmr invoked with last_lkey=0x%X while current lkey=0x%X"), + last_lkeys_array[i],cur_memkey); + continue; /* continue unmap for any region we can */ + } + + MOSAL_MMAP_IO_WRITE_BYTE((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_STATUS_OFFSET),0xf0); /* invalidate mpt */ + fmr_info_p->last_free_key= cur_memkey; + VIP_array_find_release(mrwm->mpt[MPT_ext],fmr_hndl); + } + + cmd_rc = THH_cmd_SYNC_TPT(mrwm->cmd_if); + if ((cmd_rc != THH_CMD_STAT_OK) && (cmd_rc != THH_CMD_STAT_EINTR)) { + MTL_ERROR1(MT_FLFMT("Fatal error: Command SYNC_TPT failed")); + } + return (CMDRC2HH_ND(cmd_rc)); +} + +HH_ret_t THH_mrwm_free_fmr(THH_mrwm_t mrwm, /*IN*/ + VAPI_lkey_t last_lkey) /*IN*/ + +{ + u_int32_t mpt_index= last_lkey & MASK32(mrwm->props.log2_mpt_sz); + VIP_array_obj_t vip_obj; + VIP_array_handle_t fmr_hndl; + FMR_sw_info_t* fmr_info_p; + THH_cmd_status_t stat; + VIP_common_ret_t vip_rc; + + /* Validity checks */ + if (get_mpt_seg(mrwm,mpt_index) != MPT_ext){ + MTL_ERROR3(MT_FLFMT("%s: Invalid FMR lkey (0x%X)"),__func__,last_lkey); + return HH_EINVAL; + } + fmr_hndl= mpt_index-mrwm->offset[MPT_ext]; + vip_rc= VIP_array_erase_prepare(mrwm->mpt[MPT_ext],fmr_hndl,&vip_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR2(MT_FLFMT("THH_mrmw_map_fmr invoked for invalid MPT (last_lkey=0x%X)"),last_lkey); + return HH_EINVAL; + } + fmr_info_p= (FMR_sw_info_t*)vip_obj; + + if (last_lkey != CURRENT_MEMKEY(mrwm,MPT_ext,mpt_index)) { + VIP_array_erase_undo(mrwm->mpt[MPT_ext],fmr_hndl); + MTL_ERROR2(MT_FLFMT("THH_mrwm_free_fmr invoked with last_lkey=0x%X while current lkey=0x%X"), + last_lkey,CURRENT_MEMKEY(mrwm,MPT_ext,mpt_index)); + return HH_EINVAL; + } + + MOSAL_MMAP_IO_WRITE_BYTE((fmr_info_p->mpt_entry + TAVOR_IF_MPT_HW_STATUS_OFFSET),0xf0); /* invalidate mpt */ + + stat = THH_cmd_SYNC_TPT(mrwm->cmd_if); + if ((stat != THH_CMD_STAT_OK) && (stat != THH_CMD_STAT_EINTR)) { + MTL_ERROR1(MT_FLFMT("Fatal error: Command SYNC_TPT failed")); + } + + /* MOSAL_io_unmap for MTTs+MPT */ + MOSAL_io_unmap(fmr_info_p->mpt_entry); + MOSAL_io_unmap(fmr_info_p->mtt_entries); + + /* Return MTTs to extbuddy and MPT to epool */ + MOSAL_mutex_acq_ui(&mrwm->extbuddy_lock); + if (!extbuddy_free(mrwm->xbuddy_tpt, fmr_info_p->seg_start,fmr_info_p->log2_segs)) { + MTL_ERROR4(MT_FLFMT( + "extbuddy_free failed for %d MTT segments from segment %d - resource leak !"), + fmr_info_p->seg_start,fmr_info_p->log2_segs); /* continue anyway */ + } + EXTBUDDY_FREE_MTT(fmr_info_p->seg_start,fmr_info_p->log2_segs); + MOSAL_mutex_rel(&mrwm->extbuddy_lock); + release_mtt_segs(mrwm,(1 << fmr_info_p->log2_segs) - 1); + + VIP_array_erase_done(mrwm->mpt[MPT_ext],fmr_hndl,NULL); + /* zero the fmr_bit in the array */ + { + u_int8_t offset_in_cell = fmr_hndl & 0x7; + mrwm->is_fmr_bits[fmr_hndl>>3]&= ~(((u_int8_t)1) << offset_in_cell); + } + + release_mpt_entry(mrwm,MPT_ext); + FREE(fmr_info_p); + return ((stat ==THH_CMD_STAT_OK) ? HH_OK : HH_EFATAL); +} + +/************************************************************************/ +/* Assumed to be the first called in this module, single thread. */ +void THH_mrwm_init(void) +{ + native_page_shift = MOSAL_SYS_PAGE_SHIFT; + MTL_DEBUG4(MT_FLFMT("native_page: shift=%d"), native_page_shift); +} /* THH_mrwm_init */ + + +HH_ret_t THH_mrwm_get_num_objs(THH_mrwm_t mrwm,u_int32_t *num_mr_int_p, + u_int32_t *num_mr_ext_p,u_int32_t *num_mws_p ) +{ + /* check attributes */ + if ( mrwm == NULL || mrwm->mpt[MPT_int] == NULL || + mrwm->mpt[MPT_ext] == NULL || mrwm->mpt[MPT_win] == NULL) { + return HH_EINVAL; + } + + if (num_mr_int_p == NULL && num_mr_ext_p == NULL && num_mws_p == NULL) { + return HH_EINVAL; + } + + if (num_mr_int_p) { + *num_mr_int_p = VIP_array_get_num_of_objects(mrwm->mpt[MPT_int]); + } + if (num_mr_ext_p) { + *num_mr_ext_p = VIP_array_get_num_of_objects(mrwm->mpt[MPT_ext]); + } + if (num_mws_p) { + *num_mws_p = VIP_array_get_num_of_objects(mrwm->mpt[MPT_win]); + } + return HH_OK; +} + +#if defined(MT_SUSPEND_QP) +HH_ret_t THH_mrwm_suspend_internal( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey, /* IN */ + MT_bool suspend_flag /* IN */ +) +{ + VIP_common_ret_t vip_rc; + u_int32_t mpt_index = lkey & ((1ul << mrwm->props.log2_mpt_sz) - 1); + mpt_segment_t mpt_seg; + VIP_array_obj_t vip_obj; + Mr_sw_t *mrsw_p; + THH_cmd_status_t cmd_st; + MOSAL_iobuf_props_t iobuf_props = {0}; + call_result_t mosal_rc; + HH_ret_t rc = HH_OK; + + MTL_TRACE1(MT_FLFMT("{%s: L_key=0x%x, suspend_flag=%s"), + __func__, lkey, (suspend_flag==TRUE)?"TRUE":"FALSE"); + + mpt_seg= get_mpt_seg(mrwm,mpt_index); + if (mpt_seg != MPT_int){ + MTL_ERROR4(MT_FLFMT("%s: Invalid L-key (0x%X) for internal memory region"),__func__,lkey); + return HH_EINVAL; + } + + vip_rc= VIP_array_find_hold(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg],&vip_obj); + if (vip_rc != VIP_OK) { + MTL_ERROR4(MT_FLFMT("%s: Failed finding internal memory region with L-key 0x%X (%s)"),__func__, + lkey,VAPI_strerror_sym(vip_rc)); + return (HH_ret_t)vip_rc; + } + mrsw_p= (Mr_sw_t*)vip_obj; + + if (suspend_flag == TRUE) { + if (mrsw_p->is_suspended == TRUE) { + MTL_DEBUG2(MT_FLFMT("%s: internal memory region with L-key 0x%x already suspended"), + __func__,lkey); + rc = HH_EAGAIN; + goto suspend_hold; + } + if (mrsw_p->iobuf == NULL) { + MTL_ERROR1(MT_FLFMT("%s: suspending intl_reg with L-key 0x%X. IOBUF is NULL!!"), + __func__,lkey); + rc = HH_ERR; + goto suspend_hold; + } + + mrsw_p->mpt_entry = TMALLOC(THH_mpt_entry_t); + if (mrsw_p->mpt_entry == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Could not malloc mem for saving mpt_entry for internal reg L-key 0x%X"), + __func__,lkey); + rc = HH_EAGAIN; + goto suspend_hold; + } + /* change MPT entry to SW ownership to disable it, and save the mpt entry for restoring later */ + cmd_st = THH_cmd_HW2SW_MPT(mrwm->cmd_if, mpt_index, mrsw_p->mpt_entry); + if (cmd_st != THH_CMD_STAT_OK) { + MTL_ERROR1(MT_FLFMT("%s: THH_cmd_HW2SW_MPT returned %d for internal reg L-key 0x%X"), + __func__,cmd_st, lkey); + rc = HH_ERR; + goto suspend_malloc; + } + /* deregister the iobuf */ + MOSAL_iobuf_get_props(mrsw_p->iobuf, &iobuf_props); + mrsw_p->prot_ctx = iobuf_props.prot_ctx; + mrsw_p->va = iobuf_props.va; + mrsw_p->size = iobuf_props.size; + + MOSAL_iobuf_deregister(mrsw_p->iobuf); + mrsw_p->iobuf= NULL; + mrsw_p->is_suspended = TRUE; + } else { + /* unsuspending */ + /* reregister the iobuf */ + if (mrsw_p->is_suspended == FALSE) { + MTL_ERROR1(MT_FLFMT("%s: unsuspend request. internel region is not suspended"), __func__); + rc = HH_ERR; + goto unsuspend_hold; + } + mosal_rc = MOSAL_iobuf_register( mrsw_p->va, mrsw_p->size, mrsw_p->prot_ctx, + MOSAL_PERM_READ | MOSAL_PERM_WRITE, &mrsw_p->iobuf, + MOSAL_IOBUF_LNX_FLG_MARK_ALL_DONTCOPY); + if (mosal_rc != MT_OK) { + MTL_ERROR1(MT_FLFMT("%s: unsuspend. MOSAL_iobuf_register: rc=%s"), __func__, mtl_strerror_sym(mosal_rc)); + rc = (mosal_rc == MT_EAGAIN) ? HH_EAGAIN : HH_EINVAL_VA; + goto unsuspend_hold; + } + + /* get properties of the iobuf just obtained, to get n_pages. */ + mosal_rc = MOSAL_iobuf_get_props(mrsw_p->iobuf, &iobuf_props); + if (mosal_rc != MT_OK) { + MTL_ERROR1(MT_FLFMT("%s: unsuspend. MOSAL_iobuf_get_props: rc=%s"), __func__, mtl_strerror_sym(mosal_rc)); + rc = HH_ERR; + goto unsuspend_iobuf; + } + + /* write the MTT entry with the page translation table*/ + rc = mtt_writes_iobuf(mrwm->cmd_if, + mrsw_p->iobuf, + (VAPI_phy_addr_t)mrsw_p->mpt_entry->mtt_seg_adr, + iobuf_props.nr_pages); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: unsuspend. mtt_writes_iobuf failed (%d: %s)"),__func__, + rc,HH_strerror_sym(rc)); + goto unsuspend_iobuf; + } + + /* re-activate the MPT entry */ + cmd_st = THH_cmd_SW2HW_MPT(mrwm->cmd_if, mpt_index, mrsw_p->mpt_entry); + if (cmd_st != THH_CMD_STAT_OK) { + MTL_ERROR1(MT_FLFMT("%s: THH_cmd_SW2HW_MPT returned %d for internal reg L-key 0x%X"), + __func__,cmd_st, lkey); + rc = HH_ERR; + goto unsuspend_iobuf; + } + + /* clean-up */ + FREE(mrsw_p->mpt_entry); + mrsw_p->mpt_entry= NULL; + mrsw_p->is_suspended = FALSE; + } + + VIP_array_find_release(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg]); + MTL_TRACE1("}THH_mrwm_suspend_mr\n"); + return HH_OK; + +suspend_malloc: + FREE(mrsw_p->mpt_entry); + mrsw_p->mpt_entry = NULL; + +suspend_hold: + VIP_array_find_release(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg]); + return rc; + +unsuspend_iobuf: + MOSAL_iobuf_deregister(mrsw_p->iobuf); + mrsw_p->iobuf= NULL; +unsuspend_hold: + VIP_array_find_release(mrwm->mpt[mpt_seg],mpt_index-mrwm->offset[mpt_seg]); + return rc; + + +} /* THH_mrwm_query_mr */ +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.h new file mode 100644 index 00000000..b7e4fb03 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/mrwm/tmrwm.h @@ -0,0 +1,427 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(_TMRW__H) +#define _TMRW__H + +#include +#include +#include +#include +#include +#include +#include /* just for THH_hob_t decl */ + +typedef u_int32_t THH_pdm_t; + + +#define THH_MRWM_MODIFY_FLAGS_TRANSLATION 1 /* implies size change - only this one + * needs to be supported for SRQ. For + * the rest ENOSYS + */ +#define THH_MRWM_MODIFY_FLAGS_START_ADDR (1<<1) /* If not set, start address is only + * used to create the new iobuf, but + * MPT.start is retained from original MR + */ +#define THH_MRWM_MODIFY_FLAGS_ACL (1<<2) +#define THH_MRWM_MODIFY_FLAGS_PD (1<<3) +/* More: TBD */ +typedef u_int32_t THH_mrwm_modify_flags_t; + + +typedef struct +{ + u_int64_t mtt_base; /* Physical address of MTT */ + MT_phys_addr_t mpt_base; /* Physical address of MPT */ + u_int8_t log2_mpt_sz; /* Log2 of number of entries in MPT */ + u_int8_t log2_mtt_sz; /* Log2 of number of entries in the MTT */ + u_int8_t log2_mtt_seg_sz; /* Log2 of MTT segment size in entries */ + u_int8_t log2_max_mtt_segs; /* Log2 of maximum MTT segments possible */ + u_int8_t log2_rsvd_mpts; /* Log2 of number of MPTs reserved for firmware */ + u_int8_t log2_rsvd_mtt_segs; /* Log2 of number of MTT segments reserved for firmware */ + MT_size_t max_mem_reg; /* Max regions in MPT for external */ + MT_size_t max_mem_reg_internal; /* Max regions ... internal (WQEs & CQEs) */ + MT_size_t max_mem_win; /* Max memory windows in the MPT */ +} THH_mrwm_props_t; + + +typedef struct +{ + IB_virt_addr_t start; /* Region start address in user virtual space */ + VAPI_size_t size; /* Region size */ + HH_pd_hndl_t pd; /* PD to associate with requested region */ + MOSAL_protection_ctx_t vm_ctx; /* Virtual context of given virt. address */ + MT_bool force_memkey; /* Allocate region with given memory key */ + VAPI_lkey_t memkey; /* Requested memory key (valid iff force_memkey) */ + + /* Optional supplied physical buffers. Similar to HH_tpt_t.buf_lst */ + MT_size_t num_bufs; /* != 0 iff physical buffers supplied */ + VAPI_phy_addr_t* phys_buf_lst; /* size = num_bufs */ + VAPI_size_t* buf_sz_lst; /* [num_bufs], corresponds to phys_buf_lst */ +} THH_internal_mr_t; + +/************************************************************************ + * Function: THH_mrwm_create + * + * Arguments: + * hob + * mrwm_props - Tables sizes and allocation partioning + * mrwm_p - The allocated THH_mrwm object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters (MPT size given is smaller than + * total number of regions and windows, or NULL ptr.) + * HH_EAGAIN - Not enough resources in order to allocate object + * + * Description: + * This function creates the THH_mrwm_t object instance in order to + * manage the MPT and MTT resources. + */ +extern HH_ret_t THH_mrwm_create( + THH_hob_t hob, /* IN */ + THH_mrwm_props_t* mrwm_props, /* IN */ + THH_mrwm_t* mrwm_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_mrwm_destroy + * + * Arguments: + * mrwm - Object to destroy + * + * Returns: + * HH_OK + * HH_EINVAL - Unknown object + * + * Description: + * This function frees the THH_mrwm object resources. + */ +extern HH_ret_t THH_mrwm_destroy( + THH_mrwm_t mrwm, /* IN */ + MT_bool hca_failure /* IN */ +); + + +/************************************************************************ + * Function: THH_mrwm_register_mr + * + * Arguments: + * mrwm + * mr_props - Memory region properties + * lkey_p - L-Key allocated for region (to be used as region handle) + * rkey_p - R-Key allocated for region (valid for remote access) + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters (properties or pointers) + * HH_EAGAIN - No free region resources available + * + * Description: + * This function registers given memory region (virtual or physical - + * based on given mr_props_p). + */ +extern HH_ret_t THH_mrwm_register_mr( + THH_mrwm_t mrwm, /* IN */ + HH_mr_t* mr_props_p, /* IN */ + VAPI_lkey_t* lkey_p, /* OUT */ + IB_rkey_t* rkey_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_mrwm_register_internal + * + * Arguments: + * mrwm + * mr_props_p - Requested internal memory region propetries + * memkey_p - Memory key to use in order to access this region + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * HH_EAGAIN - No resources to allocate internal memory region + * + * Description: + * For the WQEs and CQEs buffers internal memory registration is + * required in order enable access of the InifinHost to those + * buffers. This function performs a full memory registration operation + * in addition to the registration operation as done for + * THH_mrwm_register_mr(), i.e. it deals with locking the memory and + * getting physical pages table (which is done by the VIP layers for + * external memory registrations). + */ +extern HH_ret_t THH_mrwm_register_internal( + THH_mrwm_t mrwm, /* IN */ + THH_internal_mr_t* mr_props_p, /* IN */ + VAPI_lkey_t* memkey_p /* OUT */ +); + +/************************************************************************ + * Function: THH_mrwm_modify_internal + * + * Arguments: + * mrwm + * memkey_p - region's lkey + * mr_props_p - Requested new internal memory region propetries + * flags - indicate which properties in the mr_props structure + * are valid (ie., are to be modified) + * Currently, only THH_MRWM_MODIFY_FLAGS_TRANSLATION is supported. + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * HH_EAGAIN - No resources to allocate internal memory region + * + * Description: + * This function is currently used by SRQs, when modifying the size + * of an SRQ. Size modification (i.e., number of outstanding WQEs) + * is the only modification currently supported. The function must + * pin the pages of the WQE buffer, create new MTT entries for the WQEs buffer, + * update the data in the MPT entry for the region (MTT seg addr and length) + * and issue the MODIFY_MPT command. If all goes well, the old iobuf is + * deleted, as are the old MTT entries. + */ +extern HH_ret_t THH_mrwm_modify_internal( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey, /* IN */ + THH_internal_mr_t* mr_props_p, /* IN */ + THH_mrwm_modify_flags_t flags /* IN */ +); + + +/************************************************************************ + * Function: THH_mrwm_reregister_mr + * + * Arguments: + * mrwm + * lkey + * change_mask - Change request + * mr_props_p - Updated memory region properties + * lkey_p - + * rkey_p - Returned R-key + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * HH_EAGAIN - Not enough resources to complete operation + * + * Description: + * (see HH-API s HH_reregister_mr) + */ +extern HH_ret_t THH_mrwm_reregister_mr( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey, + VAPI_mr_change_t change_mask, /* IN */ + HH_mr_t* mr_props_p, /* IN */ + VAPI_lkey_t* lkey_p, /* OUT */ + IB_rkey_t* rkey_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_mrwm_register_smr + * + * Arguments: + * mrwm + * smr_props_p - Shared memory region properties + * lkey_p - Returned L-key + * rkey_p - Returned R-key + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters (properties or pointers) + * HH_EAGAIN - No free region resources available + * + * Description: + * This function uses the same physical pages (MTT) translation entries + * for a new region (new MPT entry). + */ +extern HH_ret_t THH_mrwm_register_smr( + THH_mrwm_t mrwm, /* IN */ + HH_smr_t* smr_props_p, /* IN */ + VAPI_lkey_t* lkey_p, /* OUT */ + IB_rkey_t* rkey_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_mrwm_query_mr + * + * Arguments: + * mrwm + * lkey - L-key of memory region as returned on registration + * mr_info_p - Returned memory region information + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * This function returns properties of registered memory region using + * region s L-key as a handle. + */ +extern HH_ret_t THH_mrwm_query_mr( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey, /* IN */ + HH_mr_info_t* mr_info_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_mrwm_deregister_mr + * + * Arguments: + * mrwm + * lkey - L-key of region to deregister + * + * Returns: + * HH_OK + * HH_EINVAL - Unknown region + * HH_EBUSY - Given region is still bounded to memory windows + * + * Description: + * This function frees given memory region resources (unless memory + * windows are still bounded to it). + */ +extern HH_ret_t THH_mrwm_deregister_mr( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey /* IN */ +); + + +/************************************************************************ + * Function: THH_mrwm_alloc_mw + * + * Arguments: + * mrwm + * pd - The protection domain of the allocated window + * initial_rkey_p - R-Key to be used for first binding request + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters (unknown PD or NULL ptr.) + * HH_EAGAIN - No available MPT resources + * + * Description: + * Allocate MPT entry for a memory window. + */ +extern HH_ret_t THH_mrwm_alloc_mw( + THH_mrwm_t mrwm, /* IN */ + HH_pd_hndl_t pd, /* IN */ + IB_rkey_t* initial_rkey_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_mrwm_query_mw + * + * Arguments: + * mrwm + * initial_rkey - R-Key received on window allocation + * current_rkey_p - The current R-Key associated with this window + * pd_p - The protection domain of this window + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters (unknown window or NULL ptr.) + * + * Description: + * Return properties of given memory window (initial R-Key used as a handle). + */ +extern HH_ret_t THH_mrwm_query_mw( + THH_mrwm_t mrwm, /* IN */ + IB_rkey_t initial_rkey, /* IN */ + IB_rkey_t* current_rkey_p, /* OUT */ + HH_pd_hndl_t* pd_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_mrwm_free_mw + * + * Arguments: + * mrwm + * initial_rkey - R-Key received on window allocation + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters (initial_rkey does not match + * any memory window) + * + * Description: + * Free the MPT resources associated with given memory window. + */ +extern HH_ret_t THH_mrwm_free_mw( + THH_mrwm_t mrwm, /* IN */ + IB_rkey_t initial_rkey /* IN */ +); + + + +/************************************************************************ + * Fast memory region + ************************************************************************/ + +HH_ret_t THH_mrwm_alloc_fmr(THH_mrwm_t mrwm, /*IN*/ + HH_pd_hndl_t pd, /*IN*/ + VAPI_mrw_acl_t acl, /*IN*/ + MT_size_t max_pages, /*IN*/ + u_int8_t log2_page_sz,/*IN*/ + VAPI_lkey_t* last_lkey_p);/*OUT*/ + +HH_ret_t THH_mrwm_map_fmr(THH_mrwm_t mrwm, /*IN*/ + VAPI_lkey_t last_lkey, /*IN*/ + EVAPI_fmr_map_t* map_p, /*IN*/ + VAPI_lkey_t* lkey_p, /*OUT*/ + IB_rkey_t* rkey_p); /*OUT*/ + +HH_ret_t THH_mrwm_unmap_fmr(THH_mrwm_t mrwm, /*IN*/ + u_int32_t num_of_fmrs_to_unmap,/*IN*/ + VAPI_lkey_t* last_lkeys_array); /*IN*/ + +HH_ret_t THH_mrwm_free_fmr(THH_mrwm_t mrwm, /*IN*/ + VAPI_lkey_t last_lkey); /*IN*/ + +/* debug info */ +HH_ret_t THH_mrwm_get_num_objs(THH_mrwm_t mrwm,u_int32_t *num_mr_int_p, + u_int32_t *num_mr_ext_p,u_int32_t *num_mws_p ); + + +#if defined(MT_SUSPEND_QP) +HH_ret_t THH_mrwm_suspend_internal( + THH_mrwm_t mrwm, /* IN */ + VAPI_lkey_t lkey, /* IN */ + MT_bool suspend_flag /* IN */ + ); +#endif +#endif /* _TMRW__H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_kl.def b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_kl.def new file mode 100644 index 00000000..6d1840fe --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_kl.def @@ -0,0 +1,160 @@ +EXPORTS + ; for OS only + DllInitialize private + DllUnload private + ; for Windows Tavor Driver only + THH_add_hca + THH_rmv_hca + ; ----- Thh_mod_obj.c ----- + THH_init_module + THH_cleanup_module + ; ----- Cmdif.c ----- + THH_cmd_create + THH_cmd_destroy + THH_cmd_set_eq + THH_cmd_clr_eq + THH_cmd_assign_ddrmm + THH_cmd_revoke_ddrmm + THH_cmd_eventh + ; ----- Cmds_wrap.c ----- + THH_cmd_print_hca_props + THH_cmd_print_dev_lims + THH_cmd_print_query_fw + THH_cmd_print_query_adapter + THH_cmd_print_query_ddr + THH_cmd_print_init_ib + THH_cmd_print_cq_context + THH_cmd_print_qp_context + THH_cmd_print_eq_context + THH_cmd_print_mpt_entry + THH_cmd_print_mgm_entry + ; ----- Eventp.c ----- + THH_eventp_create + THH_eventp_destroy + THH_eventp_setup_comp_eq + THH_eventp_setup_ib_eq + THH_eventp_setup_cmd_eq + THH_eventp_setup_mt_eq + THH_eventp_replace_handler + THH_eventp_teardown_eq + ; ----- Mcgm.c ----- + THH_mcgm_create + THH_mcgm_destroy + THH_mcgm_attach_qp + THH_mcgm_detach_qp + ; ----- Thh_hob.c ----- + THH_hob_query_port_prop + THH_hob_alloc_ul_res + THH_hob_free_ul_res + THH_hob_alloc_pd + THH_hob_free_pd + THH_hob_alloc_rdd + THH_hob_free_rdd + THH_hob_create_ud_av + THH_hob_modify_ud_av + THH_hob_query_ud_av + THH_hob_destroy_ud_av + THH_hob_register_mr + THH_hob_reregister_mr + THH_hob_register_smr + THH_hob_query_mr + THH_hob_deregister_mr + THH_hob_alloc_mw + THH_hob_query_mw + THH_hob_free_mw + THH_hob_create_cq + THH_hob_resize_cq + THH_hob_query_cq + THH_hob_destroy_cq + THH_hob_create_qp + THH_hob_get_special_qp + THH_hob_modify_qp + THH_hob_query_qp + THH_hob_destroy_qp + THH_hob_get_qp1_pkey + THH_hob_get_sgid + THH_hob_create_eec + THH_hob_modify_eec + THH_hob_query_eec + THH_hob_destroy_eec + THH_hob_attach_to_multicast + THH_hob_detach_from_multicast + THH_hob_close_hca + THH_hob_open_hca + THH_hob_destroy + THH_hob_query + THH_hob_modify + THH_hob_get_pkey + THH_hob_get_pkey_tbl + THH_hob_set_comp_eventh + THH_hob_set_async_eventh + THH_hob_get_ver_info + THH_hob_get_cmd_if + THH_hob_get_uldm + THH_hob_get_ddrmm + THH_hob_get_mrwm + THH_hob_get_qpm + THH_hob_get_cqm + THH_hob_get_eventp + THH_hob_get_udavm_info + THH_hob_get_hca_hndl + THH_hob_alloc_ul_res + THH_hob_free_ul_res + THH_hob_alloc_pd + THH_hob_free_pd + THH_hob_alloc_rdd + THH_hob_free_rdd + THH_hob_create_ud_av + THH_hob_modify_ud_av + THH_hob_query_ud_av + THH_hob_destroy_ud_av + THH_hob_register_mr + THH_hob_reregister_mr + THH_hob_register_smr + THH_hob_query_mr + THH_hob_deregister_mr + THH_hob_alloc_mw + THH_hob_query_mw + THH_hob_free_mw + THH_hob_create_cq + THH_hob_resize_cq + THH_hob_query_cq + THH_hob_destroy_cq + THH_hob_create_qp + THH_hob_get_special_qp + THH_hob_modify_qp + THH_hob_query_qp + THH_hob_destroy_qp + THH_hob_create_eec + THH_hob_modify_eec + THH_hob_query_eec + THH_hob_destroy_eec + THH_hob_attach_to_multicast + THH_hob_detach_from_multicast + THH_hob_get_num_ports + ; ----- Thh_uldm.c ----- + THH_uldm_create + THH_uldm_destroy + THH_uldm_alloc_ul_res + THH_uldm_free_ul_res + THH_uldm_alloc_uar + THH_uldm_free_uar + THH_uldm_alloc_pd + THH_uldm_free_pd + THH_uldm_get_protection_ctx + ; ----- Uar.c ----- + THH_uar_destroy + THH_uar_get_index + THH_uar_recvq_dbell + THH_uar_cq_cmd + THH_uar_eq_cmd + THH_uar_blast + ; ----- Udavm.c ----- + THH_udavm_create + THH_udavm_destroy + THH_udavm_get_memkey + THH_udavm_create_av + THH_udavm_modify_av + THH_udavm_query_av + THH_udavm_destroy_av + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.c new file mode 100644 index 00000000..0f35f766 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.c @@ -0,0 +1,214 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include "thh_mod_obj.h" + +/* if non-zero, indicates legacy sqp initialization. May be modified by insmod parameter */ +static int thh_legacy_sqp = 0; +static int av_in_host_mem = 0; +static int infinite_cmd_timeout = 0; /* when 1 we use inifinite timeouts on commands completion */ +static int num_cmds_outs = 0; /* max number of outstanding commands */ +static int fatal_delay_halt = 0; /* when 1, HALT_HCA on fatal is delayed to just before the reset */ +static int async_eq_size = 0; /* size of async event queue */ +static int cmdif_use_uar0 = 0; /* when 1, cmdif posts commands to uar0 */ +static int ignore_subsystem_id = 0; /* when 1, we do not check the subsystem_vendor_id & subsystem_id */ + +/* + * Add a Tavor in device tables + */ +HH_ret_t THH_add_hca( + MT_size_t hca_num, + card_hw_props_t *card_hw_props_p, + HH_hca_hndl_t * hh_hca_hndl_p + ) +{ + THH_module_flags_t mod_flags; + THH_hw_props_t hw_props; + + /* fill HW params */ + hw_props.bus = card_hw_props_p->bus; + hw_props.dev_func = card_hw_props_p->dev_func; + hw_props.device_id = card_hw_props_p->device_id; + hw_props.pci_vendor_id = card_hw_props_p->pci_vendor_id; + hw_props.hw_ver = card_hw_props_p->hw_ver; + hw_props.cr_base = card_hw_props_p->cr_base; + hw_props.uar_base = card_hw_props_p->uar_base; + hw_props.ddr_base = card_hw_props_p->ddr_base; + hw_props.interrupt_props.irq = card_hw_props_p->interrupt_props.irq; + hw_props.interrupt_props.intr_pin = card_hw_props_p->interrupt_props.intr_pin; + + /*initialize module flags structure */ + memset(&mod_flags, 0, sizeof(THH_module_flags_t)); + mod_flags.legacy_sqp = (thh_legacy_sqp == 0 ? FALSE : TRUE); + mod_flags.av_in_host_mem = (av_in_host_mem == 0 ? FALSE : TRUE); + mod_flags.inifinite_cmd_timeout = (infinite_cmd_timeout==1 ? TRUE : FALSE); + mod_flags.fatal_delay_halt = (fatal_delay_halt==1 ? TRUE : FALSE); + mod_flags.cmdif_post_uar0 = cmdif_use_uar0==1 ? TRUE : FALSE; + + if ( num_cmds_outs == 0 ) { + mod_flags.num_cmds_outs = 0xffffffff; + } + else { + mod_flags.num_cmds_outs = num_cmds_outs; + } + + mod_flags.async_eq_size = async_eq_size; + + /* Create the Tavor HOB object */ + MTL_TRACE1("THH_init_hca: calling THH_hob_create: Tavor No %d\n", hca_num); + + return THH_hob_create(&hw_props, (u_int32_t)hca_num, &mod_flags, hh_hca_hndl_p); +} + + +/* + * Remove a Tavor in device tables + */ +HH_ret_t THH_rmv_hca( + MT_size_t hca_num + ) +{ + HH_hca_hndl_t *hh_list, hh_iterator; + u_int32_t num_entries; + HH_ret_t ret; + + /* get number of entries in the HCA registered devices table */ + HH_list_hcas(0, &num_entries, NULL); + if (num_entries == 0) { + /* no devices !! */ + MTL_TRACE1( "THH_rmv_hca: No HCAs registered\n"); + return (-1); + } + hh_list = (struct HH_hca_dev_st **)VMALLOC(num_entries * sizeof(HH_hca_hndl_t)); + + /* get list of HH handles of all available devices */ + HH_list_hcas(num_entries, &num_entries, hh_list); + if (num_entries < hca_num) { + /* no our device !! */ + MTL_TRACE1( "THH_rmv_hca: No HCAs requested (%d)\n", hca_num); + return (-1); + } + + /* remove HCA */ + hh_iterator = hh_list[hca_num]; + if (hh_iterator->vendor_id == MT_MELLANOX_IEEE_VENDOR_ID && + hh_iterator->dev_id == MT23108_DEV_ID) { + MTL_DEBUG3("THH_rmv_hca: removing the device %s\n", hh_iterator->dev_desc); + ret = THH_hob_destroy(hh_iterator); + if (ret == HH_OK) { + MTL_DEBUG3("THH_rmv_hca: device removed successfully\n"); + } else { + MTL_ERROR1("THH_rmv_hca: problems in removing device: THH_hob_destroy returned (%d)\n", ret); + } + } + + VFREE(hh_list); + return ret; +} + +int THH_init_module( THH_module_params_t *params_p) +{ + HH_ret_t ret; + + MTL_TRACE('1', "%s: TAVOR device init_module() called\n", __func__); + + thh_legacy_sqp = params_p->thh_legacy_sqp; + av_in_host_mem = params_p->av_in_host_mem; + infinite_cmd_timeout = params_p->infinite_cmd_timeout; /* when 1 we use inifinite timeouts on commands completion */ + num_cmds_outs = params_p->num_cmds_outs; /* max number of outstanding commands */ + fatal_delay_halt = params_p->fatal_delay_halt; + async_eq_size = params_p->async_eq_size; + cmdif_use_uar0 = params_p->cmdif_use_uar0; + ignore_subsystem_id = params_p->ignore_subsystem_id; + + + ret = THH_init(); + if (ret != HH_OK) { + MTL_ERROR('1', "init_module: THH_init failed (%d)\n", ret); + return(-1); + } + + MTL_TRACE('1', "THH: for all devices loaded\n"); + printk("<1>THH kernel module initialized successfully\n"); + + return 0; +} + +int THH_cleanup_module(void) +{ + HH_ret_t ret; + + ret = THH_cleanup(); + if (ret != HH_OK) { + MTL_ERROR('1', "cleanup_module: THH_cleanup failed (%d)\n", ret); + return(0); + } + MTL_TRACE1("THH kernel module removed successfully\n"); + return 0; +} + +#ifdef __KERNEL__ + + +/* ----- Kernel Space ----- */ +#ifndef MT_BUILD_LIB +NTSTATUS +DriverEntry( + PDRIVER_OBJECT pi_pDriverObject, + PUNICODE_STRING pi_pRegistryPath + ) +{ /* DriverEntry */ + + DbgPrint("\n***** THH_KL: DriverEntry()"); + return STATUS_SUCCESS; + +} /* DriverEntry */ + +NTSTATUS DllInitialize(PUNICODE_STRING RegistryPath) +{ + DbgPrint("\n***** THH_KL: DllInitialize()"); + return STATUS_SUCCESS; +} + +NTSTATUS DllUnload() +{ + DbgPrint("\n***** THH_KL DllUnload()"); + return STATUS_SUCCESS; +} +#endif +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.h new file mode 100644 index 00000000..f27e2705 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thh_mod_obj.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THH_MOD_OBJ_H +#define H_THH_MOD_OBJ_H + +#include + +typedef struct { + int thh_legacy_sqp; /* if non-zero, indicates legacy sqp initialization */ + int av_in_host_mem; /* place AV ni the system memory */ + int infinite_cmd_timeout; /* when 1 we use inifinite timeouts on commands completion */ + int num_cmds_outs; /* max number of outstanding commands */ + int fatal_delay_halt; /* when 1, HALT_HCA on fatal is delayed to just before the reset */ + int async_eq_size; /* size of async event queue */ + int cmdif_use_uar0; /* when 1, cmdif posts commands to uar0 */ + int ignore_subsystem_id; /* when 1, we do not check the subsystem_vendor_id & subsystem_id */ +} THH_module_params_t; + +typedef struct { + MOSAL_IRQ_ID_t irq; + u_int8_t intr_pin; +} card_intr_props_t; + +typedef struct { + u_int8_t bus; + u_int8_t dev_func; + u_int16_t device_id; + u_int16_t pci_vendor_id; + u_int32_t hw_ver; + MT_phys_addr_t cr_base; + MT_phys_addr_t uar_base; + MT_phys_addr_t ddr_base; + card_intr_props_t interrupt_props; +} card_hw_props_t; + +int THH_init_module( THH_module_params_t * params_p); +int THH_cleanup_module(void); +HH_ret_t THH_add_hca( + MT_size_t hca_num, + card_hw_props_t * hw_props_p, + HH_hca_hndl_t * hh_hca_hndl_p + ); + +HH_ret_t THH_rmv_hca( + MT_size_t hca_num + ); + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_kl.def b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_kl.def new file mode 100644 index 00000000..047663c0 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_kl.def @@ -0,0 +1,61 @@ +EXPORTS + ; for OS only + DllInitialize private + DllUnload private + ; ----- Thhul_cqm.c ----- + THHUL_cqm_create + THHUL_cqm_destroy + THHUL_cqm_create_cq_prep + THHUL_cqm_create_cq_done + THHUL_cqm_destroy_cq_done + THHUL_cqm_resize_cq_prep + THHUL_cqm_resize_cq_done + THHUL_cqm_cq_cleanup + THHUL_cqm_poll4cqe + THHUL_cqm_req_comp_notif + ; ----- Thhul_hob.c ----- + THHUL_hob_create + THHUL_hob_destroy + THHUL_hob_query_version + THHUL_hob_get_pdm + THHUL_hob_get_cqm + THHUL_hob_get_qpm + THHUL_hob_get_uar + THHUL_hob_get_mwm + THHUL_hob_is_priv_ud_av + ; ----- Thhul_mwm.c ----- + THHUL_mwm_create + THHUL_mwm_destroy + THHUL_mwm_alloc_mw + THHUL_mwm_bind_mw + THHUL_mwm_free_mw + ; ----- Thhul_pdm.c ----- + THHUL_pdm_create + THHUL_pdm_destroy + THHUL_pdm_alloc_pd_prep + THHUL_pdm_alloc_pd_done + THHUL_pdm_free_pd_done + THHUL_pdm_create_ud_av + THHUL_pdm_modify_ud_av + THHUL_pdm_query_ud_av + THHUL_pdm_destroy_ud_av +; THHUL_pdm_get_hh_pd + THHUL_pdm_get_ud_av_memkey_sqp_ok + ; ----- Thhul_qpm.c ----- + THHUL_qpm_create + THHUL_qpm_destroy + THHUL_qpm_create_qp_prep + THHUL_qpm_special_qp_prep + THHUL_qpm_create_qp_done + THHUL_qpm_destroy_qp_done + THHUL_qpm_modify_qp_done + THHUL_qpm_post_send_req + THHUL_qpm_post_recv_req + THHUL_qpm_comp_ok + THHUL_qpm_comp_err + ; ----- Thhul_srqm.c ----- + THHUL_srqm_create_srq_prep + THHUL_srqm_create_srq_done + THHUL_srqm_destroy_srq_done + THHUL_srqm_post_recv_reqs + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_mod_obj.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_mod_obj.c new file mode 100644 index 00000000..51d098db --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/os_dep/win/thhul_mod_obj.c @@ -0,0 +1,50 @@ +/* + This software is available to you under a choice of one of two + licenses. You may choose to be licensed under the terms of the GNU + General Public License (GPL) Version 2, available at + , or the OpenIB.org BSD + license, available in the LICENSE.TXT file accompanying this + software. These details are also available at + . + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + + Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. +*/ + +#ifdef __KERNEL__ + +#include "mtl_types.h" + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT pi_pDriverObject, + IN PUNICODE_STRING pi_pRegistryPath + ) +{ /* DriverEntry */ + + DbgPrint("\n***** THHUL_KL: DriverEntry()"); + return STATUS_SUCCESS; + +} /* DriverEntry */ + +NTSTATUS DllInitialize(PUNICODE_STRING RegistryPath) +{ + DbgPrint("\n***** THHUL_KL: DllInitialize()"); + return STATUS_SUCCESS; +} + +NTSTATUS DllUnload() +{ + DbgPrint("\n***** THHUL_KL: DllUnload()"); + return STATUS_SUCCESS; +} +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh.h new file mode 100644 index 00000000..06110fb3 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THH_H +#define H_THH_H + +#include "thh_common.h" + +#define THH_VMALLOC_THRESHOLD (2*MOSAL_SYS_PAGE_SIZE) + +#ifdef __LINUX__ +#define THH_SMART_MALLOC(size) ({ \ + void *p; \ + if ( (size) > THH_VMALLOC_THRESHOLD ) { \ + p = VMALLOC(size); \ + } \ + else { \ + p = MALLOC(size); \ + } \ + p; \ + }) + + + + +#define THH_SMART_FREE(ptr,size) do { \ + if ( (size) > THH_VMALLOC_THRESHOLD ) { \ + VFREE(ptr); \ + } \ + else { \ + FREE(ptr); \ + } \ + } \ + while(0) +#else +#define THH_SMART_MALLOC(size) VMALLOC(size) +#define THH_SMART_FREE(ptr,size) VFREE(ptr) +#endif + +#define THH_FW_VER_VALUE(major,minor,subminor) \ + ( (((u_int64_t)(major)) << 32) | (((u_int64_t)(minor)) << 16) | ((u_int64_t)(subminor)) ) + + +/* THH objects handles */ +typedef struct THH_hob_st *THH_hob_t; +typedef MT_ulong_ptr_t THH_cmd_t; /* type to identify the cmdif object */ +typedef struct THH_eventp_st *THH_eventp_t; +typedef struct THH_ddrmm_st *THH_ddrmm_t; +typedef struct THH_uldm_st *THH_uldm_t; +typedef struct THH_mrwm_st *THH_mrwm_t; +typedef struct THH_cqm_st *THH_cqm_t; +typedef struct THH_qpm_st *THH_qpm_t; +typedef struct THH_srqm_st *THH_srqm_t; +typedef struct THH_mcgm_st *THH_mcgm_t; +typedef struct THH_sqp_demux_st *THH_sqp_demux_t; + + + +/* event's handlers types */ +typedef u_int8_t THH_event_type_t; +typedef u_int8_t THH_event_subtype_t; +typedef void (*THH_mlx_eventh_t)(HH_hca_hndl_t hh_hndl, + THH_event_type_t event_type, + THH_event_subtype_t event_subtype, + void* event_data, + void* private_data); + +typedef union { + HH_comp_eventh_t comp_event_h; + HH_async_eventh_t ib_comp_event_h; + THH_mlx_eventh_t mlx_event_h; +}THH_eventp_handler_t; + +/* structure for passing module flags or parameters from 'insmod' to THH_hob_create */ +typedef struct THH_module_flags_st { + MT_bool legacy_sqp; /* TRUE if should perform INIT_IB in THH_hob_open_hca */ + MT_bool av_in_host_mem; /* TRUE if udav's should use host memory. */ + /* FALSE if udav's should use DDR SDRAM on Tavor */ + MT_bool inifinite_cmd_timeout; /* when TRUE cmdif will wait infinitely for the completion of a command */ + MT_bool fatal_delay_halt; /* when TRUE, HALT_HCA/disable on fatal error will be delayed to before the reset */ + u_int32_t num_cmds_outs; /* max number of outstanding commands that will be used by the driver + The real value will not exceed tha value reported by fw */ + u_int32_t async_eq_size; /* The size of the async event queue (max # of outstanding async events) */ + MT_bool cmdif_post_uar0; /* when TRUE cmdif will post commands to uar0 */ +} THH_module_flags_t; + +/* + * THH_hob_state_t tracks the status of an HCA -- is it OK, or has a fatal error occurred. + * Actually, the states used in practice use the FATAL states as modifiers of the base states. + * Thus, the states we may see in practice are: + * THH_STATE_CREATING, THH_STATE_OPENING, THH_STATE_RUNNING, THH_STATE_CLOSING, THH_STATE_DESTROYING + * and fatal modifiers on these states: + * + * THH_STATE_CREATING | THH_STATE_FATAL_HCA_HALTED + * THH_STATE_OPENING | THH_STATE_FATAL_HCA_HALTED + * + * THH_STATE_RUNNING | THH_STATE_FATAL_START + * THH_STATE_RUNNING | THH_STATE_FATAL_HCA_HALTED + * + * THH_STATE_CLOSING | THH_STATE_FATAL_HCA_HALTED + * THH_STATE_DESTROYING | THH_STATE_FATAL_HCA_HALTED + * + * Note that in the RUNNING state, have two FATAL possibilities. When FATAL first occurs, + * we enter the RUNNING/FATAL_START state, in which all commands and all calls to THH + * (with very few exceptions) return FATAL. In addition, we attempt to halt the HCA. + * After the halt-hca attempt returns, we enter the RUNNING/FATAL-HCA-HALTED state. + * + */ +enum { + THH_STATE_NONE = 0, + THH_STATE_CREATING = 0x1, + THH_STATE_CLOSED = 0x2, + THH_STATE_OPENING = 0x4, + THH_STATE_RUNNING = 0x8, + THH_STATE_CLOSING = 0x10, + THH_STATE_DESTROYING= 0x20, + THH_STATE_FATAL_START = 0x40, /* CATASTROPHIC EVENT has been reported */ + THH_STATE_FATAL_HCA_HALTED = 0x80 /* Failed HCA has been halted */ +}; +typedef u_int32_t THH_hob_state_t; + +#define THH_STATE_HAVE_ANY_FATAL (THH_STATE_FATAL_START | THH_STATE_FATAL_HCA_HALTED) + +/* Fatal event type enumeration, for passing to fatal event handlers */ +typedef enum { + THH_FATAL_NONE, + THH_FATAL_MASTER_ABORT, /* detected master abort */ + THH_FATAL_GOBIT, /* GO bit of HCR remains set (i.e., stuck) */ + THH_FATAL_CMD_TIMEOUT, /* timeout on a command execution */ + THH_FATAL_EQ_OVF, /* an EQ has overflowed */ + THH_FATAL_EVENT, /* firmware has generated a LOCAL CATASTROPHIC ERR event */ + THH_FATAL_CR, /* unexpected read from CR-space */ + THH_FATAL_TOKEN, /* invalid token on command completion */ + THH_FATAL_EXTERNAL, /* externally-generated artificial fatal via THH_hob_external_fatal */ + THH_FATAL_END /* indicates end of fatal error codes */ +} THH_fatal_err_t; + +#endif /* H_THH_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_common.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_common.h new file mode 100644 index 00000000..2e9943c7 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_common.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THH_COMMON_H +#define H_THH_COMMON_H + + +#include +#include +#include +#include + +/* resources reserved in driver */ +#define THH_NUM_RSVD_PD 2 +#define THH_NUM_RSVD_QP 8 + +/* this macro ensure that total buff size is power of 2 and one extra entry for the cyclic buffer */ +#define THH_CYCLIC_BUFF_SIZE(entries) ((MT_size_t)1<<(floor_log2(entries)+1)) + +typedef u_int32_t THH_eqn_t; +#define THH_INVALID_EQN 0xFFFFFFFF + +typedef struct THH_udavm_st *THH_udavm_t; /* type to identify the udav object */ +typedef u_int32_t THH_uar_index_t; +typedef struct THH_uar_st *THH_uar_t; + + +#pragma warning( disable : 4201 ) + +/* VERSION INFORMATION: used in order to retrieve major version numbers + in order to deal with differences in different versions. */ +typedef struct THH_ver_info_st { + u_int32_t hw_ver; /* HW version (stepping etc.)*/ + u_int16_t fw_ver_major; /* Firmware major version */ + u_int16_t fw_ver_minor; /* Firmware minor version */ + u_int16_t fw_ver_subminor; /* Firmware Sub-minor version (Patch level). */ + u_int16_t cmd_if_ver; /* Command interface version */ +}THH_ver_info_t; + +typedef struct THH_hca_ul_resources_st { + HH_hca_hndl_t hh_hca_hndl; + THH_ver_info_t version; + THH_uar_index_t uar_index; + union + { + MT_virt_addr_t uar_map; + void* __ptr64 resv0; + }; + /* HCA capabilities to validate or use in THHUL */ + MT_bool priv_ud_av; /* Privileged UD AV are enforced ? */ + u_int32_t log2_mpt_size; + char * __ptr64 av_ddr_base; + char * __ptr64 av_host_base; + + u_int32_t max_qp_ous_wr; /* Maximum Number of oustanding WR on any WQ. */ + u_int32_t max_srq_ous_wr; /* Maximum Number of oustanding WR on any WQ. */ + u_int32_t max_num_sg_ent; /* Max num of scatter/gather entries for desc other than RD */ + u_int32_t max_num_sg_ent_srq; /* Max num of scatter/gather entries for SRQs */ + u_int32_t max_num_sg_ent_rd; /* Max num of scatter/gather entries for RD desc */ + u_int32_t max_num_ent_cq; /* Max num of supported entries per CQ */ +} THH_hca_ul_resources_t; + +typedef struct THH_pd_ul_resources_st { + /* if user-level udavm_buf is used (i.e., non-zero), it should be malloc'ed + * with size = (udavm_buf_sz + (1 udav entry size)), to allow the kernel level + * to align the start of the udavm table to the entry size -- so some spare is + * needed. Therefore, the udavm_buf_size value is the size of the actual udavm + * table, not the size of the malloc'ed buffer. + */ + union + { + MT_virt_addr_t udavm_buf; /* IN */ + void* __ptr64 resv0; + }; + union + { + MT_size_t udavm_buf_sz; /* IN */ + void* __ptr64 resv1; + }; + HH_pdm_pd_flags_t pd_flags; /* IN - if non-zero, is a PD for a special QP*/ + VAPI_lkey_t udavm_buf_memkey; /* OUT - set by THH_uldm */ +} THH_pd_ul_resources_t; + +typedef struct +{ + union + { + MT_virt_addr_t cqe_buf; /* CQE buffer virtual addr. CQE size aligned */ + void* __ptr64 resv0; + }; + union + { + MT_size_t cqe_buf_sz; /* Buffer size in bytes (mult of CQE size) */ + void* __ptr64 resv1; + }; + THH_uar_index_t uar_index; /* Index of UAR used for this CQ. */ + u_int32_t new_producer_index; /* New producer index after "resize_cq" (OUT)*/ +} THH_cq_ul_resources_t; + +typedef struct +{ + union + { + MT_virt_addr_t wqes_buf; /* WQEs buffer virtual address */ + void* __ptr64 resv0; + }; + union + { + MT_size_t wqes_buf_sz; /* Buffer size in bytes */ + void* __ptr64 resv1; + }; + THH_uar_index_t uar_index; /* index of UAR used for this QP */ + /* ER: Not used anywhere: MT_virt_addr_t uar_map; */ /* Address in user space of UAR */ +} THH_qp_ul_resources_t; + +typedef struct +{ + union + { + MT_virt_addr_t wqes_buf; /* WQEs buffer virtual address */ + void* __ptr64 resv0; + }; + union + { + MT_size_t wqes_buf_sz; /* Buffer size in bytes */ + void* __ptr64 resv1; + }; + union + { + MT_size_t wqe_sz; /* WQE (descriptor) size in bytes */ + void* __ptr64 resv2; + }; + THH_uar_index_t uar_index; /* index of UAR used for this QP */ +} THH_srq_ul_resources_t; + +#pragma warning( default : 4201 ) +#endif /* H_THH_COMMON_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.c new file mode 100644 index 00000000..e3a23f35 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.c @@ -0,0 +1,649 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "tcqm.h" +#if defined(USE_STD_MEMORY) +# include +#endif +#include +#include +#include +#include +#include +#include +#include + +#include +#include + + +#define logIfErr(f) \ + if (rc != HH_OK) { MTL_ERROR1("%s: rc=%s\n", f, HH_strerror_sym(rc)); } + +/* macro for translating cmd_rc return codes for non-destroy procs */ +#define CMDRC2HH_ND(cmd_rc) ((cmd_rc == THH_CMD_STAT_OK) ? HH_OK : \ + (cmd_rc == THH_CMD_STAT_EINTR) ? HH_EINTR : HH_EFATAL) + +#define CMDRC2HH_BUSY(cmd_rc) ((cmd_rc == THH_CMD_STAT_OK) ? HH_OK : \ + (cmd_rc == THH_CMD_STAT_EINTR) ? HH_EINTR : \ + ((cmd_rc == THH_CMD_STAT_RESOURCE_BUSY) || \ + (cmd_rc == THH_CMD_STAT_REG_BOUND)) ? HH_EBUSY : HH_EFATAL) + +#define TCQM_CQN(cqm,cqc_index) \ + ( ( ((cqm)->entries[cqc_index].cqn_prefix) << (cqm)->log2_max_cq ) | (cqc_index) ) + +enum +{ + CQE_size = sizeof(struct tavorprm_completion_queue_entry_st)/8, /* 32 */ + CQE_size_log2 = 5, + CQE_size_mask = (1ul << CQE_size_log2) - 1 +}; + +typedef struct Completion_Queue_Context Completion_Queue_Context_t; + +typedef struct +{ + unsigned long prev; + unsigned long next; +} _cq_free_offs_t; + + +/* Completion Queue Context Manager - entry info */ +typedef struct CQCM_entry_s +{ + /* THH_cq_props_t props; / * May be optimzed out, using CmdIf output */ + unsigned long n_cq; /* With buf_sz, may just recompute and save needed */ + VAPI_lkey_t lkey; + MOSAL_protection_ctx_t user_protection_context; /*Save protection context to be used on resize*/ +#if defined(MT_SUSPEND_QP) + MT_bool is_suspended; +#endif +} CQCM_entry_t; + + +/* Completion Queue Context Manager - entry */ +typedef struct +{ + union + { + CQCM_entry_t used; + _cq_free_offs_t freelist; + } u; + unsigned char cqn_prefix:7; /* CQ number - avoid ghost CQ events (FM issue #15134) */ + unsigned char in_use :1; +} CQCM_entry_ut; + +static const EPool_meta_t fl_meta = + { + sizeof(CQCM_entry_ut), + (unsigned int)(MT_ulong_ptr_t)(&(((CQCM_entry_ut*)(0))->u.freelist.prev)), + (unsigned int)(MT_ulong_ptr_t)(&(((CQCM_entry_ut*)(0))->u.freelist.next)) + }; + + +/* The main CQ-manager structure */ +typedef struct THH_cqm_st +{ + THH_hob_t hob; + u_int8_t log2_max_cq; + u_int32_t max_cq; + CQCM_entry_ut* entries; + EPool_t flist; + + /* convenient handle saving */ + THH_cmd_t cmd_if; + THH_mrwm_t mrwm_internal; + + MT_bool cq_resize_fixed; /* FW fix for FM issue #16966/#17002: comp. events during resize */ +} TCQM_t; + + +/************************************************************************/ +/************************************************************************/ +/* private functions */ +/************************************************************************/ + +#if MAX_TRACE >= 1 +static char* ulr_print(char* buf, const THH_cq_ul_resources_t* p) +{ + sprintf(buf, "{CQulRes: buf="VIRT_ADDR_FMT", sz="SIZE_T_FMT", uar=%d}", + p->cqe_buf, p->cqe_buf_sz, p->uar_index); + return buf; +} /* ulr_print */ +#endif + + +/************************************************************************/ +static inline MT_bool in_use_cq(TCQM_t* cqm, HH_cq_hndl_t cq) +{ + u_int32_t cqc_index= cq & MASK32(cqm->log2_max_cq); + MT_bool in_use = (cqc_index < cqm->max_cq) && cqm->entries[cqc_index].in_use; + if (!in_use) { MTL_ERROR2(MT_FLFMT("unused cq=0x%x"), cq); } + return in_use; +} /* in_use_cq */ + + +/************************************************************************/ +static HH_ret_t sw2hw_cq +( + THH_cmd_t cmd_if, + THH_cq_ul_resources_t* user_prot_ctx_p, + u_int32_t cqn, + unsigned long n_cq_entries, + VAPI_lkey_t lkey, + THH_eqn_t comp_eqn, + THH_eqn_t error_eqn +) +{ + THH_cqc_t cqc; + THH_cmd_status_t cmd_rc; + + memset(&cqc, 0, sizeof(THH_cqc_t)); + cqc.st = 0; /* disarmed */ +#ifdef NO_CQ_CI_DBELL + /* Use this option carefully - CQ overrun may cause unexpected behavior */ + /* It is recommended to set CQ size to be the total of max. outstanding */ + /* WQEs of all attached work queues. */ + cqc.oi = 1;/*CQ's consumer index update DBells are not used - must ignore CQ overrun*/ +#else + cqc.oi = 0;/* Enforce CQ overrun detection based on consumer index doorbells updates*/ +#endif + cqc.tr = 1; + cqc.status = 0; + cqc.start_address = user_prot_ctx_p->cqe_buf; + cqc.usr_page = user_prot_ctx_p->uar_index; + cqc.log_cq_size = floor_log2(n_cq_entries); + cqc.e_eqn = error_eqn; + cqc.c_eqn = comp_eqn; + cqc.pd = THH_RESERVED_PD; + cqc.l_key = lkey; + cqc.cqn = cqn; + + cmd_rc = THH_cmd_SW2HW_CQ(cmd_if, cqn, &cqc); + MTL_DEBUG4(MT_FLFMT("cmd_rc=%d=%s"), cmd_rc, str_THH_cmd_status_t(cmd_rc)); + return (CMDRC2HH_ND(cmd_rc)); +} /* sw2hw_cq */ + + +/************************************************************************/ +static HH_ret_t hw2sw_cq +( + THH_cmd_t cmd_if, + u_int32_t cqn +) +{ + THH_cmd_status_t cmd_rc; + cmd_rc = THH_cmd_HW2SW_CQ(cmd_if, cqn, NULL); + return (CMDRC2HH_ND(cmd_rc)); +} /* hw2sw_cq */ + + +/************************************************************************/ +/************************************************************************/ +/* interface functions */ + + +/************************************************************************/ +HH_ret_t THH_cqm_create( + THH_hob_t hob, /* IN */ + u_int8_t log2_max_cq, /* IN */ + u_int8_t log2_rsvd_cqs, /* IN */ + THH_cqm_t* cqm_p /* OUT */ +) +{ + THH_cmd_t cmd_if; + THH_ver_info_t version; + HH_ret_t rc = THH_hob_get_cmd_if(hob, &cmd_if); + TCQM_t* cqm = 0; + CQCM_entry_ut* entries = 0; + unsigned long ncq = 1ul << log2_max_cq; + unsigned long tavor_num_reserved_cqs = 1ul << log2_rsvd_cqs; + MTL_TRACE1("{THH_cqm_create: hob=%p, log2_max_cq=%d, rsvd_cqs=%lu\n", + hob, log2_max_cq, tavor_num_reserved_cqs); + +#ifdef NO_CQ_CI_DBELL + MTL_ERROR4(MT_FLFMT("WARNING: HCA driver is in CQ-Overrun-Ignore mode !")); +#endif + + if (rc == HH_OK) { + rc= THH_hob_get_ver_info(hob, &version); + } + if (rc == HH_OK) + { + cqm = TMALLOC(TCQM_t); + entries = ((log2_max_cq < 24) && (ncq > tavor_num_reserved_cqs) + ? TNVMALLOC(CQCM_entry_ut, ncq) + : NULL); + if (!(cqm && entries)) + { + rc = HH_EAGAIN; MTL_ERROR2(MT_FLFMT("")); + } + else + { + HH_ret_t hob_rc; + /* clearing is needed, but for the sake of consistency */ + memset(cqm, 0, sizeof(TCQM_t)); + memset(entries, 0, ncq * sizeof(CQCM_entry_ut)); + hob_rc = THH_hob_get_mrwm(hob, &cqm->mrwm_internal); + if (hob_rc != HH_OK) + { + rc = HH_EAGAIN; MTL_ERROR2(MT_FLFMT("")); + } + } + } + if (rc == HH_OK) + { + cqm->hob = hob; + cqm->cmd_if = cmd_if; + cqm->log2_max_cq = log2_max_cq; + cqm->max_cq = ncq; + cqm->entries = entries; + cqm->flist.entries = entries; + cqm->flist.size = ncq; + cqm->flist.meta = &fl_meta; + epool_init(&cqm->flist); + /* reserve is simpler than using an offset */ + epool_reserve(&cqm->flist, 0, tavor_num_reserved_cqs); + + cqm->cq_resize_fixed= (version.fw_ver_major >= 3); + rc = HH_OK; + *cqm_p = cqm; + } + else + { + if (entries) {VFREE(entries);} + if (cqm) {FREE(cqm);} + } + MTL_TRACE1("}THH_cqm_create: cqm=%p\n", cqm); + logIfErr("THH_cqm_create"); + return rc; +} /* THH_cqm_create */ + + +/************************************************************************/ +HH_ret_t THH_cqm_destroy( + THH_cqm_t cqm, /* IN */ + MT_bool hca_failure /* IN */ +) +{ + HH_ret_t rc = HH_OK; + MTL_TRACE1("{THH_cqm_destroy: cqm=%p, hca_failure=%d\n", cqm, hca_failure); + if (!hca_failure) + { + CQCM_entry_ut* e = cqm->entries; + CQCM_entry_ut* e_end = e + cqm->max_cq; + THH_mrwm_t mrwm_internal = cqm->mrwm_internal; + int any_busy = 0; + for (; (e != e_end) && (rc == HH_OK); ++e) + { + if (e->in_use) + { + HH_ret_t mrrc = THH_mrwm_deregister_mr(mrwm_internal, e->u.used.lkey); + switch (mrrc) + { + case HH_OK: + break; + case HH_EINVAL: + rc = HH_EINVAL; MTL_ERROR2(MT_FLFMT("")); + break; + case HH_EBUSY: + any_busy = 1; MTL_ERROR2(MT_FLFMT("")); /* Cannot happen! */ + break; + default: MTL_ERROR2(MT_FLFMT("")); + } + } + } + if (any_busy) { rc = HH_EINVAL; } /* again... should not happen */ + } + epool_cleanup(&cqm->flist); + VFREE(cqm->entries); + FREE(cqm); + MTL_TRACE1("}THH_cqm_destroy\n"); + logIfErr("THH_cqm_destroy"); + return rc; +} /* THH_cqm_destroy */ + + +/************************************************************************/ +HH_ret_t THH_cqm_create_cq( + THH_cqm_t cqm, /* IN */ + MOSAL_protection_ctx_t user_protection_context, /* IN */ + THH_eqn_t comp_eqn, /* IN */ + THH_eqn_t error_eqn, /* IN */ + THH_cq_ul_resources_t* cq_ul_resources_p, /* IO */ + HH_cq_hndl_t* cq_p /* OUT */ +) +{ + MT_virt_addr_t cqe_buf = cq_ul_resources_p->cqe_buf; + MT_virt_addr_t unalligned_bits = cqe_buf & CQE_size_mask; + MT_size_t buf_sz = cq_ul_resources_p->cqe_buf_sz; + MT_size_t residue = buf_sz % CQE_size; + HH_ret_t rc = ((unalligned_bits == 0) && (residue == 0) + ? HH_OK : HH_EINVAL); + u_int32_t new_cqn= 0xFFFFFFFF; /* Initialize to invalid CQN */ +#if MAX_TRACE >= 1 + char ulr_tbuf[256], *ulr_buf = &ulr_tbuf[0]; +#ifndef __DARWIN__ + MTL_TRACE1("{THH_cqm_create_cq: cqm=%p, ctx=0x%x, Ceqn=0x%x, Eeqn=0x%x\n" + " %s\n", cqm, user_protection_context, comp_eqn, error_eqn, + ulr_print(ulr_buf, cq_ul_resources_p)); +#else + MTL_TRACE1("{THH_cqm_create_cq: cqm=%p, Ceqn=0x%x, Eeqn=0x%x\n" + " %s\n", cqm, comp_eqn, error_eqn, + ulr_print(ulr_buf, cq_ul_resources_p)); +#endif +#endif + if (rc == HH_OK) + { + VAPI_lkey_t lkey; + HH_ret_t mr_rc = HH_ERR; + unsigned long n_cq_entries = (unsigned long)(buf_sz / CQE_size); + u_int32_t cqc_index = epool_alloc(&cqm->flist); + rc = HH_ENOSYS; /* pessimistic */ + if (cqc_index != EPOOL_NULL) + { + THH_internal_mr_t mr_internal; + memset(&cqm->entries[cqc_index], 0, sizeof(CQCM_entry_ut)); + memset(&mr_internal, 0, sizeof(mr_internal)); + mr_internal.start = cq_ul_resources_p->cqe_buf; + mr_internal.size = buf_sz; + mr_internal.pd = THH_RESERVED_PD; + mr_internal.vm_ctx = user_protection_context; + mr_internal.force_memkey = FALSE; + mr_internal.memkey = (VAPI_lkey_t)0; + + mr_rc = THH_mrwm_register_internal( + cqm->mrwm_internal, &mr_internal, &lkey); + new_cqn= ( ++(cqm->entries[cqc_index].cqn_prefix) << cqm->log2_max_cq ) | cqc_index; + rc = ((mr_rc == HH_OK) + ? sw2hw_cq(cqm->cmd_if, cq_ul_resources_p, new_cqn, n_cq_entries, + lkey, comp_eqn, error_eqn) + : mr_rc); + MTL_DEBUG4(MT_FLFMT("mr=%d=%s, rc=%d=%s"), + mr_rc, HH_strerror_sym(mr_rc), rc, HH_strerror_sym(rc)); + } + + else { + MTL_ERROR2(MT_FLFMT("CQ pool is drained.\n")); + rc = HH_EAGAIN; + } + + if (rc == HH_OK) + { + /* cqm->entries[cq].u.used.props = *cq_props_p; */ + cqm->entries[cqc_index].u.used.n_cq = n_cq_entries; + cqm->entries[cqc_index].u.used.lkey = lkey; + /* Save protection context for CQ-resize */ + cqm->entries[cqc_index].u.used.user_protection_context= user_protection_context; + cqm->entries[cqc_index].in_use = 1; +#if defined(MT_SUSPEND_QP) + cqm->entries[cqc_index].u.used.is_suspended = FALSE; +#endif + *cq_p = new_cqn; + } + else /* clean */ + { + MTL_ERROR2(MT_FLFMT("fail, now clean")); + if (mr_rc == HH_OK) + { + (void)THH_mrwm_deregister_mr(cqm->mrwm_internal, lkey); + } + if (cqc_index != EPOOL_NULL) + { + epool_free(&cqm->flist, cqc_index); + } + } + } + MTL_TRACE1("}THH_cqm_create_cq, cq=0x%x\n", *cq_p); + logIfErr("THH_cqm_create_cq"); + return rc; +} /* THH_cqm_create_cq */ + +/************************************************************************/ +HH_ret_t THH_cqm_resize_cq( + THH_cqm_t cqm, /* IN */ + HH_cq_hndl_t cq, /* IN */ + THH_cq_ul_resources_t* cq_ul_resources_p /* IO */ +) +{ + MT_virt_addr_t cqe_buf = cq_ul_resources_p->cqe_buf; + MT_virt_addr_t unalligned_bits = cqe_buf & CQE_size_mask; + MT_size_t buf_sz = cq_ul_resources_p->cqe_buf_sz; + MT_size_t residue = buf_sz % CQE_size; + VAPI_lkey_t lkey; + unsigned long n_cq_entries = (unsigned long)(buf_sz / CQE_size); + CQCM_entry_ut* sw_cqc_p; + THH_internal_mr_t mr_internal; + THH_cmd_status_t cmd_rc; + HH_ret_t rc; + + /* Validate parameters */ + if (!in_use_cq(cqm, cq)) { + MTL_ERROR1(MT_FLFMT("Invalid CQ handle (0x%X)"),cq); + return HH_EINVAL_CQ_HNDL; + } + sw_cqc_p = &cqm->entries[cq & MASK32(cqm->log2_max_cq)]; + + if ((unalligned_bits != 0) || (residue != 0) || (buf_sz == 0)) { + MTL_ERROR1(MT_FLFMT("%s: Invalid CQEs buffer (va="VIRT_ADDR_FMT" , size="SIZE_T_FMT")"), + __func__,cqe_buf,buf_sz); + return HH_EINVAL; + } + + /* Register new CQEs buffer */ + memset(&mr_internal, 0, sizeof(mr_internal)); + mr_internal.start = cq_ul_resources_p->cqe_buf; + mr_internal.size = buf_sz; + mr_internal.pd = THH_RESERVED_PD; + mr_internal.vm_ctx = sw_cqc_p->u.used.user_protection_context; + mr_internal.force_memkey = FALSE; + mr_internal.memkey = (VAPI_lkey_t)0; + + rc= THH_mrwm_register_internal(cqm->mrwm_internal, &mr_internal, &lkey); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed registering new CQEs buffer (%s)"), + __func__,HH_strerror_sym(rc)); + return rc; + } + + cmd_rc= THH_cmd_RESIZE_CQ(cqm->cmd_if, cq, cqe_buf, lkey, floor_log2(n_cq_entries), + cqm->cq_resize_fixed ? NULL : &cq_ul_resources_p->new_producer_index); + if (cmd_rc != THH_CMD_STAT_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed command RESIZE_CQ (%s)"), + __func__,str_THH_cmd_status_t(cmd_rc)); + switch (cmd_rc) { + case THH_CMD_STAT_BAD_SIZE: + rc= HH_E2BIG_CQE_NUM; /* Retry after polling some CQEs */ + break; + case THH_CMD_STAT_BAD_RES_STATE: /* CQ in error state or does not exist anymore */ + case THH_CMD_STAT_BAD_INDEX: /* Wrong CQ number */ + rc= HH_EINVAL_CQ_HNDL; + break; + case THH_CMD_STAT_BAD_OP: + rc= HH_ENOSYS; /* Probably old firmware */ + break; + case THH_CMD_STAT_EINTR: + rc = HH_EINTR; + break; + default: + rc= HH_EFATAL; /* Unexpected error */ + break; + } + (void)THH_mrwm_deregister_mr(cqm->mrwm_internal, lkey); /* deregister new buffer */ + return rc; + } + + rc= THH_mrwm_deregister_mr(cqm->mrwm_internal, sw_cqc_p->u.used.lkey); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed deregistration of old CQEs buffer (%s) !!"), + __func__,HH_strerror_sym(rc)); + /* Nothing we can do about old CQEs region but anyway nobody uses it for any other resource */ + } + /* Save new parameters of the CQ */ + sw_cqc_p->u.used.n_cq = n_cq_entries; + sw_cqc_p->u.used.lkey = lkey; + + return HH_OK; +} + +/************************************************************************/ +HH_ret_t THH_cqm_destroy_cq( + THH_cqm_t cqm /* IN */, + HH_cq_hndl_t cq /* IN */ +) +{ + u_int32_t cqc_index= cq & MASK32(cqm->log2_max_cq); + HH_ret_t rc = HH_EINVAL_CQ_HNDL; + MTL_TRACE1("{THH_cqm_destroy_cq, cqm=%p, cq=0x%x\n", cqm, cq); + if (in_use_cq(cqm, cq)) + { + rc = hw2sw_cq(cqm->cmd_if, cq); + if ((rc == HH_OK) || (rc == HH_EFATAL)) + { + CQCM_entry_ut* e = &cqm->entries[cqc_index]; + THH_cmd_status_t mrrc = THH_mrwm_deregister_mr(cqm->mrwm_internal, + e->u.used.lkey); + if (mrrc != THH_CMD_STAT_OK) + { + MTL_ERROR1(MT_FLFMT("%s: Failed deregistration of CQEs buffer (%s) !!"), + __func__,str_THH_cmd_status_t(mrrc)); + rc = CMDRC2HH_BUSY(mrrc); + } + else + { + /* If we are in a fatal error, return OK for destruction */ + if (rc == HH_EFATAL){ + MTL_DEBUG1(MT_FLFMT("%s: in fatal error"), __func__); + rc = HH_OK; + } + e->in_use = 0; + epool_free(&cqm->flist, cqc_index); + } + } + } + MTL_TRACE1("}THH_cqm_destroy_cq\n"); + logIfErr("THH_cqm_destroy_cq"); + return rc; +} /* THH_cqm_destroy_cq */ + + +/************************************************************************/ +/* Note: we actually not validating that given 'cq' is indeed in use. */ +HH_ret_t THH_cqm_query_cq( + THH_cqm_t cqm, /* IN */ + HH_cq_hndl_t cq, /* IN */ + VAPI_cqe_num_t* num_o_cqes_p /* IN */ +) +{ + u_int32_t cqc_index= cq & MASK32(cqm->log2_max_cq); + HH_ret_t rc = (in_use_cq(cqm, cq) ? HH_OK : HH_EINVAL_CQ_HNDL); + MTL_TRACE1("{THH_cqm_query_cq: cqm=%p, cq=0x%x\n", cqm, cq); + if (rc == HH_OK) rc= (TCQM_CQN(cqm,cqc_index) == cq) ? HH_OK : HH_EINVAL_CQ_HNDL; + if (rc == HH_OK) + { + *num_o_cqes_p = cqm->entries[cqc_index].u.used.n_cq; + } + MTL_TRACE1("}THH_cqm_query_cq\n"); + logIfErr("THH_cqm_query_cq"); + return rc; +} /* THH_cqm_query_cq */ + + + +/************************************************************************/ +/* Assumed to be the first called in this module, single thread. */ +void THH_cqm_init(void) +{ + MTL_TRACE1("THH_cqm_init\n"); +} /* THH_cqm_init */ + + +/************************************************************************/ +HH_ret_t THH_cqm_get_num_cqs( + THH_cqm_t cqm, /* IN */ + u_int32_t *num_cqs_p /* OUT*/ +) +{ + CQCM_entry_ut* e; + CQCM_entry_ut* e_end; + int num_alloc_cqs = 0; + + if (cqm == NULL) { + return HH_EINVAL; + } + + e = cqm->entries; + e_end = e + cqm->max_cq; + for (; (e != e_end) ; ++e){ + if (e->in_use){ num_alloc_cqs++; } + } + return HH_OK; +} /* THH_cqm_destroy */ + +#if defined(MT_SUSPEND_QP) +HH_ret_t THH_cqm_suspend_cq( + THH_cqm_t cqm, /* IN */ + HH_cq_hndl_t cq, /* IN */ + MT_bool do_suspend /* IN */) +{ + u_int32_t cqc_index= cq & MASK32(cqm->log2_max_cq); + HH_ret_t rc = HH_EINVAL_CQ_HNDL; + MTL_TRACE1("{THH_cqm_suspend_cq, cqm=%p, cq=0x%x, do_suspend=%s\n", + cqm, cq,(do_suspend==FALSE)?"FALSE":"TRUE"); + if (in_use_cq(cqm, cq)) + { + CQCM_entry_ut* e = &cqm->entries[cqc_index]; + MT_bool is_suspended = e->u.used.is_suspended; + if (do_suspend == is_suspended) { + /* cq suspension already in desired state */ + MTL_DEBUG2(MT_FLFMT("%s: CQ 0x%x already %s"), + __func__,cq, (is_suspended == TRUE)?"suspended" : "unsuspended"); + return HH_OK; + } + rc = THH_mrwm_suspend_internal(cqm->mrwm_internal,e->u.used.lkey, do_suspend); + if (rc != HH_OK) + { + MTL_ERROR1(MT_FLFMT("%s: Failed THH_mrwm_suspend_mr of CQEs buffer region(%d: %s) !!"), + __func__,rc, HH_strerror_sym(rc)); + } + else + { + MTL_DEBUG2(MT_FLFMT("%s: CQ 0x%x is %s"), + __func__,cq, (do_suspend == TRUE)?"suspended" : "unsuspended"); + e->u.used.is_suspended = do_suspend; + } + } + MTL_TRACE1("}THH_cqm_suspend_cq\n"); + logIfErr("THH_cqm_suspend_cq"); + return rc; +} /* THH_cqm_suspend_cq */ +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.h new file mode 100644 index 00000000..da11dcf4 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_cqm/tcqm.h @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(H_TCQM_H) +#define H_TCQM_H + +#include +#include +#include +#include +#include +#include + + +/************************************************************************ + * Function: THH_cqm_create + * + * Arguments: + * hob - The THH_hob object in which this object will be included + * log2_max_cq - (log2) Max. number of CQs (CQC table size) + * cqm_p - The allocated CQM object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * HH_EAGAIN - Not enough resources available + * + * Description: + * This function creates the THH_cqm object. + */ +extern HH_ret_t THH_cqm_create( + THH_hob_t hob, /* IN */ + u_int8_t log2_max_cq, /* IN */ + u_int8_t log2_rsvd_cqs, /* IN */ + THH_cqm_t* cqm_p /* OUT */ +); + +/************************************************************************ + * Function: THH_cqm_destroy + * + * Arguments: + * cqm - The object to destroy + * hca_failure - If TRUE object destruction is required + * due to HCA (hardware) failure (e.g. surprise removal) + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid handle + * + * Description: + * Free all CQM related resources. + */ +extern HH_ret_t THH_cqm_destroy( + THH_cqm_t cqm, /* IN */ + MT_bool hca_failure /* IN */ +); + +/************************************************************************ + * Function: THH_cqm_create_cq + * + * Arguments: + * cqm - CQM object context + * user_protection_context - User context of given CQE buffer + * comp_eqn - Completion Event Queue + * error_eqn - Error Error Queue + * cq_ul_resources_p buffers, requested and/or actually created. + * cq_p - The allocated CQ handle (probably CQ index). + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * HH_EAGAIN - Not enough resources available to complete operation + * + * Description: + * Set up a CQ resource. + */ +extern HH_ret_t THH_cqm_create_cq( + THH_cqm_t cqm, /* IN */ + MOSAL_protection_ctx_t user_protection_context, /* IN */ + THH_eqn_t comp_eqn, /* IN */ + THH_eqn_t error_eqn, /* IN */ + THH_cq_ul_resources_t* cq_ul_resources_p, /* IO */ + HH_cq_hndl_t* cq_p /* OUT */ +); + +/************************************************************************ + * Function: THH_cqm_modify_cq + * + * Arguments: + * cqm (IN) - CQM object context + * cq (IN) - CQ to resize + * cq_ul_resources_p (IO)- CQ resources allocated/defined in user space + * and returned producer index + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * HH_EAGAIN - Not enough resources available to complete operation + * + * Description: + * Modify CQ by replacing the CQEs buffer. + * Replace CQEs buffer with new buffer given is cq_ul_resources_p (new cqe_buf + buf_sz) + * Return in cq_ul_resources_p the next prodcuer index (to start with in new buffer) + */ +HH_ret_t THH_cqm_resize_cq( + THH_cqm_t cqm, /* IN */ + HH_cq_hndl_t cq, /* IN */ + THH_cq_ul_resources_t* cq_ul_resources_p /* IO */ +); + +/************************************************************************ + * Function: THH_cqm_destroy_cq + * Arguments: + * cqm - The THH_cqm object handle + * cq - The CQ to destroy + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid handles + * + * Description: + * Free CQ resources. + */ +extern HH_ret_t THH_cqm_destroy_cq( + THH_cqm_t cqm /* IN */, + HH_cq_hndl_t cq /* IN */ +); + +/************************************************************************ + * Function: THH_cqm_query_cq + * + * Arguments: + * cqm - The THH_cqm object handle + * cq - The CQ to query + * num_o_cqes_p - Maximum outstanding CQEs for this CQ + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid handles + * + * Description: + * Query CQ for number of outstanding CQEs limit. + */ +extern HH_ret_t THH_cqm_query_cq( + THH_cqm_t cqm, /* IN */ + HH_cq_hndl_t cq, /* IN */ + VAPI_cqe_num_t* num_o_cqes_p /* IN */ +); + + +/************************************************************************ + * Function: THH_cqm_get_num_cqs + * + * Arguments: + * cqm - The THH_cqm object handle + * num_cqs_p - number of CQs currently allocated + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid handles + * + * Description: + */ +HH_ret_t THH_cqm_get_num_cqs( + THH_cqm_t cqm, /* IN */ + u_int32_t *num_cqs_p /* OUT*/ +); + +#if defined(MT_SUSPEND_QP) +/************************************************************************ + * Function: THH_cqm_suspend_cq + * + * Arguments: + * cqm - The THH_cqm object handle + * cq - CQ handle + * do_suspend -- if TRUE, suspend (i.e., unpin the CQ's resources). + * if FALSE, unsuspend (i.e., re-pin the CQs resources). + * + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid handles + * + * Description: + */ +HH_ret_t THH_cqm_suspend_cq( + THH_cqm_t cqm, /* IN */ + HH_cq_hndl_t cq, /* IN */ + MT_bool do_suspend /* IN */ +); +#endif +#endif /* H_TCQM_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_default_profile.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_default_profile.h new file mode 100644 index 00000000..b93effcf --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_default_profile.h @@ -0,0 +1,121 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THH_DEFAULT_PROFILE_H +#define H_THH_DEFAULT_PROFILE_H + +#include + +/* WQE IN DDR DEFINES */ +/* CHANGE THE DEFINE BELOW TO BE DEFINED TO ZERO TO DISABLE WQEs IN DDR */ +#define THH_LOG2_WQE_DDR_SPACE_PER_QP 0 /* was 4096 */ + +#define THH_DDR_LOG2_MTT_ENTRIES_PER_SEG (3) +#define THH_DDR_LOG2_MTT_SEGS_PER_REGION (1) + +#define THH_DDR_LOG2_INFLIGHT_RDMA_PER_QP (3) +#define THH_DDR_LOG2_MIN_QP_PER_MCG (3) /* minimum QPs per MCG. May be increased by calculations */ +#define THH_DDR_LOG2_MAX_MCG (13) /* log2 max MCG entries */ +#define THH_DDR_LOG2_MCG_HASH_PROPORTION (-1) /* log2 of proportion of MCG entries in mcg hash table*/ +#define THH_DDR_LOG2_MAX_EQ (6) +#define THH_DDR_MAX_PRIV_UDAVS (1<<16) +#define THH_USE_PRIV_UDAV (FALSE) +#define THH_MAX_ASYNC_EQ_SIZE (1<<14) /* max number of outstanding async events */ + +typedef struct THH_profile_input_st { + u_int32_t max_qps; /* max number of QPs to configure */ + u_int32_t max_cqs; + u_int32_t max_pds; + u_int32_t max_regions; + u_int32_t max_windows; + + u_int32_t min_qps; /* min number of QPs to configure */ + u_int32_t min_cqs; + u_int32_t min_pds; + u_int32_t min_regions; + u_int32_t min_windows; + + u_int32_t reduction_pct_qps; /* percent by which to reduce QPs if need reduction */ + u_int32_t reduction_pct_cqs; /* percent by which to reduce CQs if need reduction */ + u_int32_t reduction_pct_pds; /* percent by which to reduce PDs if need reduction */ + u_int32_t reduction_pct_regions; + u_int32_t reduction_pct_windows; + + u_int32_t log2_max_eq; + u_int32_t log2_mtt_entries_per_seg; + u_int32_t log2_mtt_segs_per_region; + u_int32_t log2_inflight_rdma_per_qp; + u_int32_t log2_max_mcg; + u_int32_t log2_min_qp_per_mcg; + int log2_mcg_hash_proportion; + u_int32_t max_priv_udavs; + MT_bool use_priv_udav; + u_int32_t log2_wqe_ddr_space_per_qp; +} THH_profile_input_t; + +/* NOTE: In the case of NON-privileged UDAV, we need one internal region per allocated PD. The number of PDs */ +/* by default is #QPs/4. This means that the number of internal regions in the MPT is not properly calculated. */ +/* However, there is a problem in that the MTT segment size MUST be a power of 2 (so that MTT entry addresses */ +/* are composed of a segment address and an entry offset in the segment). Using a segment size of 16 requires */ +/* reducing the number of supported QPs. For now, we are ignoring this issue, since users will mostly run */ +/* in UDAV protected mode */ + +/* INIT_IB: No provision for overriding GUID0 on the chip is provided for now. some customers may wish to override the + * default GUIDs burned into the chip. A define will not do the job, since each chip in a network + * must have a different GUID0. When we provide default-override capability, we need to think about allowing + * the administrator of a network to specify GUIDs per card on a host */ + + + +/* DEFINES WHICH SHOULD REALLY COME FROM THE FIRMWARE. */ + +#define THH_DDR_LOG2_SEG_SIZE_PER_REGION (3) +#define THH_DDR_MCG_ENTRY_SIZE (64) +#define THH_DDR_MCG_BYTES_PER_QP (4) +#define THH_DDR_MCG_ENTRY_HEADER_SIZE (32) +#define THH_DDR_LOG2_RDB_ENTRY_SIZE (5) +#define THH_DDR_LOG2_EQC_ENTRY_SIZE (ceil_log2(hob->dev_lims.eqc_entry_sz)) +#define THH_DDR_LOG2_EEC_ENTRY_SIZE (ceil_log2(hob->dev_lims.eec_entry_sz)) +#define THH_DDR_LOG2_EEEC_ENTRY_SIZE (ceil_log2(hob->dev_lims.eeec_entry_sz)) +#define THH_DDR_LOG2_QPC_ENTRY_SIZE (ceil_log2(hob->dev_lims.qpc_entry_sz)) +#define THH_DDR_LOG2_EQPC_ENTRY_SIZE (ceil_log2(hob->dev_lims.eqpc_entry_sz)) +#define THH_DDR_LOG2_SRQC_ENTRY_SIZE (ceil_log2(hob->dev_lims.srq_entry_sz)) +#define THH_DDR_LOG2_MTT_ENTRY_SIZE (3) +#define THH_DDR_LOG2_MIN_MTT_SEG_SIZE (6) +#define THH_DDR_LOG2_MPT_ENTRY_SIZE (6) +#define THH_DDR_LOG2_CQC_ENTRY_SIZE (ceil_log2(hob->dev_lims.cqc_entry_sz)) +#define THH_DDR_LOG2_UAR_SCR_ENTRY_SIZE (ceil_log2(hob->dev_lims.uar_scratch_entry_sz)) +#define THH_DDR_ADDR_VEC_SIZE (32) + +/* END firmware defines */ + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c new file mode 100644 index 00000000..e514476c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.c @@ -0,0 +1,6917 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define TEST_RETURN_FATAL(hob) if ((hob->thh_state & THH_STATE_HAVE_ANY_FATAL) != 0) { \ + MTL_ERROR1(MT_FLFMT("%s: Device in FATAL state"), __func__); \ + return HH_EFATAL; \ + } +#define TEST_CMD_FATAL(ret) if (ret == THH_CMD_STAT_EFATAL) { \ + MTL_ERROR1(MT_FLFMT("%s: cmdif returned FATAL"), __func__); \ + return HH_EFATAL; \ + } +#define FREE_RSRC_RET(hob) rc = ((have_fatal == TRUE) ? HH_OK : rc) ; return rc +#define DECLARE_FATAL_VARS MT_bool have_fatal=FALSE +#define WAIT_IF_FATAL(hob) THH_hob_wait_if_fatal(hob,&have_fatal); +#define IB_MAX_MESSAGE_SIZE (1 << 31) +#define CMD_EQ_SIZE 250 /* to be smaller then 8K - so it's EQ can get into physical memory */ + +#if 4 <= MAX_DEBUG +#define THH_PRINT_PROFILE(a) THH_print_profile(a) +#define THH_PRINT_USR_PROFILE(a) THH_print_usr_profile(a) +#else +#define THH_PRINT_PROFILE(a) +#define THH_PRINT_USR_PROFILE(a) +#endif + +/* global reference here for cmdif when putting Outbox in DDR memory*/ +#ifdef DEBUG_MEM_OV +#define CMDIF_SIZE_IN_DDR 0x100000 /* allocate 1M in DDR memory*/ +MT_phys_addr_t cmdif_dbg_ddr; /* address in ddr used for out params in debug mode */ +#endif + + +#define THH_WATERMARK 1024 /* 1K*/ + +#define GET_DDR_ADDR(phys_addr,hide_ddr,ddr_base) ((u_int64_t) ((((hide_ddr) == TRUE)&&(sizeof(MT_phys_addr_t)<=4)) ? \ + (((u_int64_t)(phys_addr)) | ((ddr_base) & MAKE_ULONGLONG(0xFFFFFFFF00000000))) : phys_addr)) + +static HH_ret_t THH_hob_query_struct_init(THH_hob_t hob, MT_bool have_usr_profile, VAPI_hca_cap_t *hca_cap_p); +static HH_ret_t THH_hob_halt_hca(/*IN*/ THH_hob_t hob); + + +static void THH_dummy_async_event(HH_hca_hndl_t hca_hndl, HH_event_record_t *event_rec_p, void* ptr) +{ + /* TBD : This should be an error */ + MTL_TRACE1("THH_dummy_async_event: called for devices %s with event type 0x%x", + hca_hndl->dev_desc, event_rec_p->etype); + return; +} + +static void THH_dummy_comp_event(HH_hca_hndl_t hca_hndl, HH_cq_hndl_t cq_num, void* ptr) +{ + /* TBD : This should be an error */ + MTL_TRACE1("THH_dummy_comp_event: called for device %s and cq num 0x%x", + hca_hndl->dev_desc, cq_num); + return; +} + +int THH_hob_fatal_err_thread(void *arg); + +/****************************************************************************** +****************************************************************************** +************************ INTERNAL FUNCTIONS ********************************* +****************************************************************************** +******************************************************************************/ +#if 4 <= MAX_DEBUG +static void THH_print_profile(THH_profile_t *profile) +{ + MTL_DEBUG1("Profile printout\n"); + + MTL_DEBUG1(" ddr_alloc_vec_size = "SIZE_T_DFMT"\n",profile->ddr_alloc_vec_size); + MTL_DEBUG1(" ddr_size = "SIZE_T_XFMT" ("SIZE_T_DFMT")\n", + profile->ddr_size,profile->ddr_size ); + + MTL_DEBUG1(" ddr_size_code = %d\n", profile->ddr_size_code); + + MTL_DEBUG1(" num_external_mem_regions = "SIZE_T_XFMT" ("SIZE_T_DFMT")\n", + profile->num_external_mem_regions,profile->num_external_mem_regions ); + MTL_DEBUG1(" num_mem_windows = "SIZE_T_XFMT" ("SIZE_T_DFMT")\n", + profile->num_mem_windows,profile->num_mem_windows); + + MTL_DEBUG1(" log2_max_qps = "SIZE_T_DFMT"\n", profile->log2_max_qps); + MTL_DEBUG1(" max_num_qps = "SIZE_T_XFMT" ("SIZE_T_DFMT")\n", + profile->max_num_qps,profile->max_num_qps); + MTL_DEBUG1(" log2_max_cqs = "SIZE_T_DFMT"\n", profile->log2_max_cqs); + MTL_DEBUG1(" max_num_cqs = "SIZE_T_XFMT" ("SIZE_T_DFMT")\n", + profile->max_num_cqs,profile->max_num_cqs); + MTL_DEBUG1(" max_num_pds = "SIZE_T_XFMT" ("SIZE_T_DFMT")\n", + profile->max_num_pds,profile->max_num_pds); + + MTL_DEBUG1(" log2_max_mpt_entries = "SIZE_T_DFMT"\n", profile->log2_max_mpt_entries); + MTL_DEBUG1(" log2_max_mtt_entries = "SIZE_T_DFMT"\n", profile->log2_max_mtt_entries); + MTL_DEBUG1(" log2_mtt_segs_per_region = "SIZE_T_DFMT"\n", profile->log2_mtt_segs_per_region); + MTL_DEBUG1(" log2_mtt_entries_per_seg = "SIZE_T_DFMT"\n", profile->log2_mtt_entries_per_seg); + + + MTL_DEBUG1(" log2_max_uar = "SIZE_T_DFMT"\n", profile->log2_max_uar); + MTL_DEBUG1(" log2_uar_pg_size = %d\n", profile->log2_uar_pg_size); + + MTL_DEBUG1(" log2_wqe_ddr_space_per_qp = "SIZE_T_DFMT"\n",profile->log2_wqe_ddr_space_per_qp); + + MTL_DEBUG1(" use_priv_udav = %s\n", (profile->use_priv_udav ? "TRUE" : "FALSE")); + MTL_DEBUG1(" max_priv_udavs = "SIZE_T_XFMT" ("SIZE_T_DFMT")\n", + profile->max_priv_udavs,profile->max_priv_udavs); + + MTL_DEBUG1(" log2_max_mcgs = "SIZE_T_DFMT"\n", profile->log2_max_mcgs); + MTL_DEBUG1(" qps_per_mcg = "SIZE_T_DFMT"\n", profile->qps_per_mcg); + MTL_DEBUG1(" log2_mcg_entry_size = "SIZE_T_DFMT"\n", profile->log2_mcg_entry_size); + MTL_DEBUG1(" log2_mcg_hash_size = "SIZE_T_DFMT"\n",profile->log2_mcg_hash_size); + + + MTL_DEBUG1(" log2_max_eecs = "SIZE_T_DFMT"\n",profile->log2_max_eecs); + + MTL_DEBUG1(" log2_max_eqs = %d\n", profile->log2_max_eqs); + return; +} + +static void THH_print_usr_profile(EVAPI_hca_profile_t *profile) +{ + MTL_DEBUG1("User Profile printout\n"); + + MTL_DEBUG1(" num_qp = %d\n",profile->num_qp); + MTL_DEBUG1(" num_cq = %d\n",profile->num_cq); + MTL_DEBUG1(" num_pd = %d\n",profile->num_pd); + MTL_DEBUG1(" num_mr = %d\n",profile->num_mr); + MTL_DEBUG1(" num_mw = %d\n",profile->num_mw); + MTL_DEBUG1(" max_qp_ous_rd_atom = %d\n",profile->max_qp_ous_rd_atom); + MTL_DEBUG1(" max_mcg = %d\n",profile->max_mcg); + MTL_DEBUG1(" qp_per_mcg = %d\n",profile->qp_per_mcg); + MTL_DEBUG1(" require = %s\n",(profile->require == 0) ? "FALSE" : "TRUE"); + return; +} +#endif + +static const char* THH_get_ddr_allocation_string (u_int32_t index) +{ + switch(index) { + case 0: return "mtt sz"; + case 1: return "mpt sz"; + case 2: return "qpc sz"; + case 3: return "eqpc sz"; + case 4: return "srqc sz"; + case 5: return "cqc sz"; + case 6: return "rdb sz"; + case 7: return "uar scratch sz"; + case 8: return "eqc sz"; + case 9: return "mcg sz"; + case 10: return "eec sz"; + case 11: return "eeec sz"; + #if 0 + case 12: return "wqe pool sz"; + case 13: return "uplink qp sz"; + case 14: return "uplink mem sz"; + #endif + default: return "UNKNOWN"; + + } +} + + +/* + * THH_get_ddr_size_code + * + */ +static void THH_print_hw_props(THH_hw_props_t *hw_props_p) +{ + MTL_DEBUG4("%s: cr_base = " PHYS_ADDR_FMT "\n", __func__, hw_props_p->cr_base); +// MTL_DEBUG4("%s: ddr_base = " PHYS_ADDR_FMT "\n", __func__hw_props_p->ddr_base); + MTL_DEBUG4("%s: uar_base = " PHYS_ADDR_FMT "\n", __func__, hw_props_p->uar_base); + MTL_DEBUG4("%s: device_id = 0x%x\n", __func__, hw_props_p->device_id); + MTL_DEBUG4("%s: pci_vendor_id = 0x%x\n", __func__, hw_props_p->pci_vendor_id); + MTL_DEBUG4("%s: intr_pin = 0x%x\n", __func__, hw_props_p->interrupt_props.intr_pin); +#ifndef __DARWIN__ + //MOSAL_IRQ_ID_t does not have to be integer + MTL_DEBUG4("%s: irq = 0x%x\n", __func__, hw_props_p->interrupt_props.irq); +#endif /* not defined __DARWIN__ */ + MTL_DEBUG4("%s: bus = %d\n", __func__, hw_props_p->bus); + MTL_DEBUG4("%s: dev_func = 0x%x\n", __func__, hw_props_p->dev_func); +} + +/* + * THH_get_ddr_size_code + * + */ +static THH_ddr_size_enum_t THH_get_ddr_size_code(MT_size_t ddr_size) +{ + MTL_DEBUG4("THH_get_ddr_size_code: ddr size = "SIZE_T_FMT"\n", ddr_size); + if (ddr_size < (1UL<<25)) { + return THH_DDR_SIZE_32M; + } else if (ddr_size < (1UL<<26)) { + return THH_DDR_SIZE_64M; + } else if (ddr_size < (1UL<<27)) { + return THH_DDR_SIZE_128M; + } else if (ddr_size < (1UL<<28)) { + return THH_DDR_SIZE_256M; + } else if (ddr_size < (1UL<<29)) { + return THH_DDR_SIZE_512M; + } else if (ddr_size < (1UL<<30)) { + return THH_DDR_SIZE_1024M; + } else if (ddr_size < (1UL<<31)) { + return THH_DDR_SIZE_2048M; + } else if (ddr_size < 0xFFFFFFFF) { + return THH_DDR_SIZE_4096M; + } else { + return THH_DDR_SIZE_BIG; + } +} +#ifndef __DARWIN__ +//TODO: all this code is OS dependent! +//Must work with the pointer to PCI device + +/* + * read_pci_config -- reads all configuration registers for given device + * except for skipping regs 22 and 23 + */ +static HH_ret_t read_pci_config(u_int8_t bus, u_int8_t devfun, u_int32_t *config) +{ + u_int8_t offset = 0; + HH_ret_t rc = HH_OK; + + for (offset = 0; offset < 64; offset += 4) { + if (offset == 22 || offset == 23) { + continue; + } + rc = MOSAL_PCI_read_config_dword(bus,devfun,offset,config); + if (rc != MT_OK) { + return HH_ERR; + } + config++; + } + return HH_OK; +} +static HH_ret_t write_pci_config(u_int8_t bus, u_int8_t devfun, u_int32_t *config) +{ + u_int8_t offset = 0; + HH_ret_t rc = HH_OK; + + for (offset = 0; offset < 64; offset += 4) { + if (offset == 22 || offset == 23) { + continue; + } + rc = MOSAL_PCI_write_config_dword(bus,devfun,offset,*config); + if (rc != MT_OK) { + return HH_ERR; + } + config++; + } + return HH_OK; +} +/****************************************************************************** + * Function: THH_hob_get_pci_br_config + * + * Description: Gets p2p bridge configuration for this hca's bridge + * + * input: + * hob + * output: + * ack_timeout_p + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Does MAD query to get the data in real time. This function is used + * in pre-calculation of VAPI_query_hca values (at open_hca time). + * + *****************************************************************************/ +static HH_ret_t THH_hob_get_pci_br_config(THH_hob_t hob) +{ + call_result_t rc; + u_int16_t index=0; + u_int8_t bus; + u_int8_t dev_func; + MOSAL_PCI_cfg_hdr_t cfg_hdr; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + /* scan all bridges to find mellanox pci bridge which belongs to this hca */ + while (TRUE) { + /*1. find device */ + rc = MOSAL_PCI_find_device(hob->hw_props.pci_vendor_id, + (hob->hw_props.device_id)+2, + index, &bus, &dev_func); + index++; + if (rc != MT_OK) { + MTL_DEBUG4(MT_FLFMT("%s: No more InfiniBridges."), __func__); + break; + } + MTL_DEBUG4(MT_FLFMT("%s: InfiniBridge %d: pci_find_device returned: bus=%d, dev_func=%d"), + __func__, index, bus, dev_func); + + /*2. get pci header */ + rc = MOSAL_PCI_get_cfg_hdr(bus, dev_func, &cfg_hdr); + if (rc != MT_OK) { + MTL_ERROR4(MT_FLFMT("%s: Could not get header for device bus %d, dev_func 0x%x"), + __func__, bus, dev_func); + continue; + } + + if ((cfg_hdr.type1.header_type & 0x7F) != MOSAL_PCI_HEADER_TYPE1) { + MTL_DEBUG1(MT_FLFMT("%s: Wrong PCI header type (0x%02X). Should be type 1. Device ignored."), + __func__, cfg_hdr.type0.header_type); + continue; + } + + /*3. check if this is our bridge */ + if (cfg_hdr.type1.sec_bus != hob->hw_props.bus) { + MTL_DEBUG1(MT_FLFMT("%s: Not our bridge. bus = %d, dev_num=%d"), + __func__, bus, dev_func ); + continue; + } + + /* found our bridge. Read and save its configuration */ + MTL_DEBUG1(MT_FLFMT("%s: found bridge. bus = %d, dev_num=%d"), + __func__, bus, dev_func ); + if (read_pci_config(bus,dev_func,hob->pci_bridge_info.config) != MT_OK) { + return (HH_ERR); + } else { + hob->pci_bridge_info.bus = bus; + hob->pci_bridge_info.dev_func = dev_func; + hob->pci_bridge_info.is_valid = TRUE; + return HH_OK; + } + } + + return HH_ERR; // did not find bridge +} +#endif /* not defined __DARWIN__ */ + +/****************************************************************************** + * Function: THH_hob_get_node_guid + * + * Description: Gets node GUID for this HCA. + * + * input: + * hob + * port_num - 1 or 2 + * output: + * node_guid - pointer to a GUID structure + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Does MAD query to get the data in real time. This function is used + * in pre-calculation of VAPI_query_hca values (at open_hca time). + * + *****************************************************************************/ +static HH_ret_t THH_hob_get_node_guid(THH_hob_t hob, + IB_guid_t *node_guid) +{ + SM_MAD_NodeInfo_t node_info; + u_int8_t *mad_frame_in; + u_int8_t *mad_frame_out; + THH_cmd_status_t cmd_ret; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + MTL_DEBUG4("==> THH_hob_get_node_guid\n"); + TEST_RETURN_FATAL(hob); + + mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_in ) { + return HH_EAGAIN; + } + mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_out ) { + FREE(mad_frame_in); + return HH_EAGAIN; + } + memset(mad_frame_in, 0, sizeof(mad_frame_in)); + memset(mad_frame_out, 0, sizeof(mad_frame_out)); + + /* get PKey table using MAD commands in THH_cmd object */ + /* First, build the MAD header */ + MADHeaderBuild(IB_CLASS_SMP, + 0, + IB_METHOD_GET, + IB_SMP_ATTRIB_NODEINFO, + (u_int32_t) 0, + &(mad_frame_in[0])); + + /* issue the query */ + cmd_ret = THH_cmd_MAD_IFC(hob->cmd, 0, 0, 1, &(mad_frame_in[0]), &(mad_frame_out[0])); + if (cmd_ret != THH_CMD_STAT_OK) { + TEST_CMD_FATAL(cmd_ret); + MTL_ERROR2( "THH_hob_get_pkey_tbl: ERROR : Get Node Info command failed (%d) for port 1\n", cmd_ret); + MTL_DEBUG4("<== THH_hob_get_node_guid. ERROR\n"); + FREE(mad_frame_out); + FREE(mad_frame_in); + return HH_EINVAL; + } + MadBufPrint(&(mad_frame_out[0])); + NodeInfoMADToSt(&node_info, &(mad_frame_out[0])); + NodeInfoPrint(&node_info); + +// guid = node_info.qwNodeGUID; +// MTL_DEBUG4("THH_hob_get_node_guid: Node GUID = 0x%Lx\n", guid); +// for (i = 7; i >= 0 ; --i) { +// (*node_guid)[i] = (u_int8_t) (guid & 0x0FF); +// guid >>= 8; +// } + memcpy((*node_guid), node_info.qwNodeGUID, sizeof(IB_guid_t)); + MTL_DEBUG4("<== THH_hob_get_node_guid\n"); + FREE(mad_frame_out); + FREE(mad_frame_in); + return HH_OK; +} + +/* now obtained from DEV_LIMS */ +#if 0 +/****************************************************************************** + * Function: THH_hob_get_ack_timeout + * + * Description: Gets ack timeout for this HCA. + * + * input: + * hob + * output: + * ack_timeout_p + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Does MAD query to get the data in real time. This function is used + * in pre-calculation of VAPI_query_hca values (at open_hca time). + * + *****************************************************************************/ +static HH_ret_t THH_hob_get_ack_timeout( + THH_hob_t hob, + u_int8_t* ack_timeout_p) +{ + SM_MAD_PortInfo_t port_info; + u_int8_t *mad_frame_in; + u_int8_t *mad_frame_out; + THH_cmd_status_t cmd_ret; + + MTL_DEBUG4("ENTERING THH_hob_get_ack_timeout\n"); + + mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_in ) { + return HH_EAGAIN; + } + mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_out ) { + FREE(mad_frame_in); + return HH_EAGAIN; + } + memset(mad_frame_in, 0, sizeof(mad_frame_in)); + memset(mad_frame_out, 0, sizeof(mad_frame_out)); + + /* get PortInfo for port 12 (first port) */ + /* First, build the MAD header */ + + MADHeaderBuild(IB_CLASS_SMP, + 0, + IB_METHOD_GET, + IB_SMP_ATTRIB_PORTINFO, + (u_int32_t) 1, + &(mad_frame_in[0])); + + /* issue the query */ + cmd_ret = THH_cmd_MAD_IFC(hob->cmd, 0, 0, 1, &(mad_frame_in[0]), &(mad_frame_out[0])); + if (cmd_ret != THH_CMD_STAT_OK) { + TEST_CMD_FATAL(cmd_ret); + MTL_ERROR2( "THH_hob_get_ack_timeout: ERROR : Get Port Info command failed (%d) for port 1\n", cmd_ret); + FREE(mad_frame_out); + FREE(mad_frame_in); + return HH_ERR; + } + PortInfoMADToSt(&port_info, &(mad_frame_out[0])); + PortInfoPrint(&port_info); + + *ack_timeout_p = port_info.cRespTimeValue; + FREE(mad_frame_out); + FREE(mad_frame_in); + return HH_OK; +} +#endif + +/****************************************************************************** + * Function: calculate_ddr_alloc_vec + * + * Description: Calculates sizes for DDR area allocation from profile + * + * input: + * hob + * profile -- pointer to data structure containing computation input + * output: + * alloc_size_vec -- pointer to vector of allocation sizes to compute + * + * returns: + * HH_OK + * + * + *****************************************************************************/ +static void calculate_ddr_alloc_vec(/*IN*/ THH_hob_t hob, + /*IN*/ THH_profile_t *profile, + /*OUT*/THH_ddr_allocation_vector_t *alloc_size_vec) +{ + + alloc_size_vec->log2_mtt_size = profile->log2_max_mtt_entries + THH_DDR_LOG2_MTT_ENTRY_SIZE; + alloc_size_vec->log2_mpt_size = profile->log2_max_mpt_entries + THH_DDR_LOG2_MPT_ENTRY_SIZE; + alloc_size_vec->log2_qpc_size = profile->log2_max_qps + THH_DDR_LOG2_QPC_ENTRY_SIZE; + alloc_size_vec->log2_eqpc_size = profile->log2_max_qps + THH_DDR_LOG2_EQPC_ENTRY_SIZE; + alloc_size_vec->log2_srqc_size = hob->dev_lims.srq ? + profile->log2_max_srqs + THH_DDR_LOG2_SRQC_ENTRY_SIZE : THH_DDRMM_INVALID_SZ; + alloc_size_vec->log2_cqc_size = profile->log2_max_cqs + THH_DDR_LOG2_CQC_ENTRY_SIZE; + alloc_size_vec->log2_rdb_size = profile->log2_max_qps + profile->log2_inflight_rdma_per_qp + + THH_DDR_LOG2_RDB_ENTRY_SIZE; + alloc_size_vec->log2_uar_scratch_size = profile->log2_max_uar + THH_DDR_LOG2_UAR_SCR_ENTRY_SIZE; + alloc_size_vec->log2_eqc_size = profile->log2_max_eqs + THH_DDR_LOG2_EQC_ENTRY_SIZE; + if (THH_DEV_LIM_MCG_ENABLED(hob)) { + alloc_size_vec->log2_mcg_size = profile->log2_max_mcgs + profile->log2_mcg_entry_size; + } else { + alloc_size_vec->log2_mcg_size = 0; + } + alloc_size_vec->log2_eec_size = profile->log2_max_eecs + THH_DDR_LOG2_EEC_ENTRY_SIZE; + alloc_size_vec->log2_eeec_size = profile->log2_max_eecs + THH_DDR_LOG2_EEEC_ENTRY_SIZE; /* in-flight rdma */ + + return; +} + + +/****************************************************************************** + * Function: THH_check_profile + * + * Description: Validates profile values against tavor max values, as obtained + * from GET_DEV_LIM query + * + * Details: + * + *****************************************************************************/ +static HH_ret_t THH_check_profile(THH_hob_t hob) +{ + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob->profile.log2_uar_pg_size < hob->dev_lims.log_pg_sz) { + MTL_ERROR1("THH_calculate_default_profile: log2 UAR page size(%u) is less than the Tavor minimum (%u)\n", + hob->profile.log2_uar_pg_size, hob->dev_lims.log_pg_sz); + return HH_EAGAIN; + } + hob->profile.log2_max_qps = (hob->profile.log2_max_qps > hob->dev_lims.log_max_qp) ? + hob->dev_lims.log_max_qp : hob->profile.log2_max_qps; + hob->profile.log2_max_mcgs = (hob->profile.log2_max_mcgs > hob->dev_lims.log_max_mcg) ? + hob->dev_lims.log_max_mcg : hob->profile.log2_max_mcgs; + hob->profile.log2_max_eecs = (hob->profile.log2_max_eecs > hob->dev_lims.log_max_ee) ? + hob->dev_lims.log_max_ee : hob->profile.log2_max_eecs; + hob->profile.log2_max_cqs = (hob->profile.log2_max_cqs > hob->dev_lims.log_max_cq) ? + hob->dev_lims.log_max_cq : hob->profile.log2_max_cqs; + hob->profile.log2_max_uar = (hob->profile.log2_max_uar > hob->dev_lims.uar_sz + 20UL - hob->profile.log2_uar_pg_size) ? + hob->dev_lims.uar_sz + 20UL - hob->profile.log2_uar_pg_size : hob->profile.log2_max_uar; + + hob->profile.log2_max_eqs = (hob->profile.log2_max_eqs > hob->dev_lims.log_max_eq) ? + hob->dev_lims.log_max_eq : hob->profile.log2_max_eqs; + hob->profile.max_num_pds = (hob->profile.max_num_pds > (1UL<dev_lims.log_max_pd)) ? + (1UL<dev_lims.log_max_pd) : hob->profile.max_num_pds; + + if (THH_DEV_LIM_MCG_ENABLED(hob)) { + hob->profile.qps_per_mcg = (hob->profile.qps_per_mcg > (MT_size_t) (1U<dev_lims.log_max_qp_mcg)) ? + (MT_size_t) (1U<dev_lims.log_max_qp_mcg) : hob->profile.qps_per_mcg; + } + + return HH_OK; +} + + +#define THH_PROFILE_CALC_QP_AT_MINIMUM (1) +#define THH_PROFILE_CALC_CQ_AT_MINIMUM (1 << 1) +#define THH_PROFILE_CALC_PD_AT_MINIMUM (1 << 2) +#define THH_PROFILE_CALC_REG_AT_MINIMUM (1 << 3) +#define THH_PROFILE_CALC_WIN_AT_MINIMUM (1 << 4) +#define THH_PROFILE_CALC_ALL_AT_MINIMUM (THH_PROFILE_CALC_QP_AT_MINIMUM | THH_PROFILE_CALC_CQ_AT_MINIMUM | \ + THH_PROFILE_CALC_PD_AT_MINIMUM | THH_PROFILE_CALC_REG_AT_MINIMUM | \ + THH_PROFILE_CALC_WIN_AT_MINIMUM ) + +static int check_profile_sanity(THH_hob_t hob, EVAPI_hca_profile_t *user_profile, THH_profile_input_t *thh_profile) +{ + u_int64_t tmp_calc; + /* check for bad minimum values */ + if ((user_profile->num_qp == 0) || (user_profile->num_cq == 0) || (user_profile->num_pd == 0) || + (user_profile->num_mr == 0) || (user_profile->max_qp_ous_rd_atom == 0) ) { + MTL_ERROR1(MT_FLFMT("profile: QPs or CQs or PDs or MRs or max_qp_ous_rd_atom equal to 0")); + return 0; + } + if (user_profile->num_qp > (1U<dev_lims.log_max_qp)) { + MTL_ERROR1(MT_FLFMT("profile: num QPs more than device limit(%d)"), + (1U<dev_lims.log_max_qp)); + return 0; + } else if (user_profile->num_qp < 1) { + MTL_ERROR1(MT_FLFMT("profile: num QPs must be at least 1")); + return 0; + } + + if (user_profile->num_cq > (1U<dev_lims.log_max_cq)) { + MTL_ERROR1(MT_FLFMT("profile: num CQs more than device limit(%d)"), + (1U<dev_lims.log_max_cq)); + return 0; + } else if (user_profile->num_cq < 1) { + MTL_ERROR1(MT_FLFMT("profile: num CQs must be at least 1")); + return 0; + } + + if (user_profile->num_pd > (1U<dev_lims.log_max_pd)) { + MTL_ERROR1(MT_FLFMT("profile: num PDs more than device limit(%d)"), + (1U<dev_lims.log_max_pd)); + return 0; + } else if (user_profile->num_pd < 1) { + MTL_ERROR1(MT_FLFMT("profile: num PDs must be at least 1")); + return 0; + } + if (user_profile->num_mr < 1) { + MTL_ERROR1(MT_FLFMT("profile: num MRs must be at least 1")); + return 0; + } + if (user_profile->max_qp_ous_rd_atom > (1U<dev_lims.log_max_ra_res_qp)) { + MTL_ERROR1(MT_FLFMT("profile: max_qp_ous_rd_atom more than device limit (%d)"), + (1U<dev_lims.log_max_ra_res_qp)); + return 0; + } + + if (ceil_log2((u_int64_t)user_profile->max_mcg) > hob->dev_lims.log_max_mcg) { + MTL_ERROR1(MT_FLFMT("profile: num MCGs more than device limit(%d)"), + (1U<dev_lims.log_max_mcg)); + return 0; + } + + if (ceil_log2((u_int64_t)user_profile->qp_per_mcg) > hob->dev_lims.log_max_qp_mcg) { + MTL_ERROR1(MT_FLFMT("profile: QPs per multicast group greater than device limit (%d)"), + (1U<dev_lims.log_max_qp_mcg)); + return 0; + } + if (user_profile->num_cq > (user_profile->num_qp * 2)) { + MTL_ERROR1(MT_FLFMT("profile: CQs more than twice QPs in hca profile")); + return 0; + } + if ((user_profile->max_mcg > 0) && (user_profile->qp_per_mcg < 8)) { + MTL_ERROR1(MT_FLFMT("profile: if MCGs not zero, QP_PER_MCG must be >= 8")); + return 0; + } + if (ceil_log2(user_profile->num_mr) > hob->dev_lims.log_max_mpts) { + MTL_ERROR1("profile: Requested MRs use more MTTs than HCA provides\n"); + return 0; + } + + tmp_calc = (u_int64_t)((u_int64_t) user_profile->num_qp + + (u_int64_t) (unsigned long)THH_NUM_RSVD_QP + + (u_int64_t) user_profile->num_cq + + (u_int64_t) user_profile->num_mr); + if ( hob->dev_lims.log_max_mtt_seg < ceil_log2( tmp_calc * (u_int64_t)(1U<log2_mtt_segs_per_region) )) { + MTL_ERROR1("profile: Requested parameters (CQs + QPs + MRs) use more MTTs than HCA provides\n"); + return 0; + } + + + if (ceil_log2(user_profile->num_mr) > hob->dev_lims.log_max_mpts) { + MTL_ERROR1("profile: Requested MRs use more MPTs than HCA provides\n"); + return 0; + } + + if (ceil_log2(user_profile->num_mw) > hob->dev_lims.log_max_mpts) { + MTL_ERROR1("profile: Requested MWs use more MPTs than HCA provides\n"); + return 0; + } + + tmp_calc = (u_int64_t)((u_int64_t) user_profile->num_qp + + (u_int64_t) (unsigned long)THH_NUM_RSVD_QP + + (u_int64_t) user_profile->num_cq + + (u_int64_t) user_profile->num_mr + + (u_int64_t) user_profile->num_mw); + + if ( hob->dev_lims.log_max_mpts < ceil_log2( tmp_calc)) { + MTL_ERROR1("profile: Requested parameters (CQs + QPs + MRs + MWs) use more MPTs than HCA provides\n"); + return 0; + } + return 1; +} +/****************************************************************************** + * Function: THH_calculate_profile + * + * Description: Calculates and installs profile values + * + * input + * hob + * profile_user_data - pointer to a user override for the data used + * in calculating the THH profile. + * + * Details: + * + * All calculations are derived from the following data: + * + * - max QPs = 64k per 128M DDR size (= 2^16) + * - max MPT entries per HCA: 1M (= 2^20) + * - avg Regions/windows per QP 8 (= 2^3) + * - avg Segments per Region 8 (= 2^3) + * - avg inflight RDMA per QP 4 (= 2^2) + * + * Calculations are as follows: + * Max UARs = 1 per QP + * Max CQs = 1 per QP + * Max PDs = 1 per QP + * Max Regions/Wins per QP = 8, divided as follows: + * internal regions = 2 per QP (1 for QP, one for CQ) + * external regions = 2 per QP + * windows = 4 per QP + * + * MPT: + * Tavor has a max of 1M regions/windows per HCA, and the MPT size must + * be a power of 2. It is pointless to have fewer than 8 regions/windows per QP + * (as divided up above). This means that the maximum number of QPs allowable, + * regardless of DDR size, is 128K. Therefore, the presence of the "min" function + * in calculating the max number of MPT entries. In effect, the 1M table size limitation + * means that a DDR larger than 256M will only add to the user-available DDR memory, and + * not to the driver's internal tables. + * + * MTT: + * The default MTT size allocated has 2 segments per Region, with a segment size of 8 entries. + * + * MCG: for 128M: 4096 Groups per HCA, with 16 QPs per group (so that entry size is 64 bytes). + * for 256M: 8192 Groups per HCA, with 16 QPs per group (so that entry size is 64 bytes). + * + * NOTES: + * If the profile_user_data is NULL, default values are used. After the profile calculation, + * a check is done to see that all values are within HCA_DEV_LIM values, and that the result + * does not exceed the DDR memory size. If any violations are encountered, the number of QPs + * is reduced by half, and the calculation is redone. + * + *****************************************************************************/ +static HH_ret_t THH_calculate_profile(THH_hob_t hob, + EVAPI_hca_profile_t *profile_user_data, + EVAPI_hca_profile_t *sugg_profile_p) +{ + u_int8_t log2_host_pg_size; + EVAPI_hca_profile_t local_user_profile; + THH_profile_input_t profile_input_data; + u_int64_t tot_ddr_allocs; + THH_ddr_allocation_vector_t ddr_alloc_vec; + MT_size_t *ddr_alloc_iterator, temp_size; + u_int32_t i; + MT_bool ddr_calc_loop = TRUE, need_to_loop = FALSE; + u_int32_t calc_at_minimum = 0; +// EVAPI_hca_profile_t hca_profile; + + + if (profile_user_data != NULL) { + + memcpy(&local_user_profile, profile_user_data, sizeof(EVAPI_hca_profile_t)); + + /* default value substitutions */ + local_user_profile.num_qp = (local_user_profile.num_qp == 0xFFFFFFFF) ? + THH_PROF_MAX_QPS : local_user_profile.num_qp; + local_user_profile.num_cq = (local_user_profile.num_cq == 0xFFFFFFFF) ? + THH_PROF_MAX_CQS : local_user_profile.num_cq; + local_user_profile.num_pd = (local_user_profile.num_pd == 0xFFFFFFFF) ? + THH_PROF_MAX_PDS : local_user_profile.num_pd; + local_user_profile.num_mr = (local_user_profile.num_mr == 0xFFFFFFFF) ? + THH_PROF_MAX_REGIONS : local_user_profile.num_mr; + local_user_profile.num_mw = (local_user_profile.num_mw == 0xFFFFFFFF) ? + THH_PROF_MAX_WINDOWS : local_user_profile.num_mw; + + local_user_profile.max_qp_ous_rd_atom = (local_user_profile.max_qp_ous_rd_atom == 0xFFFFFFFF) ? + (1 << THH_DDR_LOG2_INFLIGHT_RDMA_PER_QP): + local_user_profile.max_qp_ous_rd_atom; + + local_user_profile.max_mcg = (local_user_profile.max_mcg == 0xFFFFFFFF) ? + (1 << THH_DDR_LOG2_MAX_MCG):local_user_profile.max_mcg; + + local_user_profile.qp_per_mcg = (local_user_profile.qp_per_mcg == 0xFFFFFFFF) ? + (1 << THH_DDR_LOG2_MIN_QP_PER_MCG):local_user_profile.qp_per_mcg; + + if (sugg_profile_p != NULL) { + memcpy(sugg_profile_p, &local_user_profile, sizeof(EVAPI_hca_profile_t)); + } + + profile_input_data.max_qps = local_user_profile.num_qp ; + profile_input_data.max_cqs = local_user_profile.num_cq ; + profile_input_data.max_pds = local_user_profile.num_pd ; + profile_input_data.max_regions = local_user_profile.num_mr ; + profile_input_data.max_windows = local_user_profile.num_mw ; + + profile_input_data.min_qps = (1U<dev_lims.log2_rsvd_qps) + THH_NUM_RSVD_QP + 1; + profile_input_data.min_cqs = (1U<dev_lims.log2_rsvd_cqs) + 1; + profile_input_data.min_pds = hob->dev_lims.num_rsvd_pds + THH_NUM_RSVD_PD + 1; + profile_input_data.min_regions = (1 << hob->dev_lims.log2_rsvd_mtts) + 1; + profile_input_data.min_windows = (1 << hob->dev_lims.log2_rsvd_mrws); + + profile_input_data.reduction_pct_qps = 10; + profile_input_data.reduction_pct_cqs = 10; + profile_input_data.reduction_pct_pds = 10; + profile_input_data.reduction_pct_regions = 10; + profile_input_data.reduction_pct_windows = 10; + + profile_input_data.log2_inflight_rdma_per_qp = ceil_log2(local_user_profile.max_qp_ous_rd_atom); + profile_input_data.log2_max_mcg = ceil_log2(local_user_profile.max_mcg); + profile_input_data.log2_min_qp_per_mcg = ceil_log2(local_user_profile.qp_per_mcg); + + profile_input_data.log2_max_eq = THH_DDR_LOG2_MAX_EQ; + profile_input_data.log2_mcg_hash_proportion = THH_DDR_LOG2_MCG_HASH_PROPORTION; + profile_input_data.log2_mtt_entries_per_seg = THH_DDR_LOG2_MTT_ENTRIES_PER_SEG; + profile_input_data.log2_mtt_segs_per_region = THH_DDR_LOG2_MTT_SEGS_PER_REGION; + profile_input_data.use_priv_udav = THH_USE_PRIV_UDAV; + profile_input_data.log2_wqe_ddr_space_per_qp = THH_LOG2_WQE_DDR_SPACE_PER_QP; + /*sanity checks */ + if (check_profile_sanity(hob,&local_user_profile, &profile_input_data) == 0) { + MTL_ERROR1(MT_FLFMT("THH_calculate_profile: user profile not valid")); + return HH_EINVAL_PARAM; + } + } else { + /* use internally defined default values */ + profile_input_data.max_qps = THH_PROF_MAX_QPS; + profile_input_data.max_cqs = THH_PROF_MAX_CQS; + profile_input_data.max_pds = THH_PROF_MAX_PDS; + profile_input_data.max_regions = THH_PROF_MAX_REGIONS; + profile_input_data.max_windows = THH_PROF_MAX_WINDOWS; + profile_input_data.max_priv_udavs = THH_DDR_MAX_PRIV_UDAVS; + + profile_input_data.min_qps = THH_PROF_MIN_QPS; + profile_input_data.min_cqs = THH_PROF_MIN_CQS; + profile_input_data.min_pds = THH_PROF_MIN_PDS; + profile_input_data.min_regions = THH_PROF_MIN_REGIONS; + profile_input_data.min_windows = THH_PROF_MIN_WINDOWS; + + profile_input_data.reduction_pct_qps = THH_PROF_PCNT_REDUCTION_QPS; + profile_input_data.reduction_pct_cqs = THH_PROF_PCNT_REDUCTION_CQS; + profile_input_data.reduction_pct_pds = THH_PROF_PCNT_REDUCTION_PDS; + profile_input_data.reduction_pct_regions = THH_PROF_PCNT_REDUCTION_REGIONS; + profile_input_data.reduction_pct_windows = THH_PROF_PCNT_REDUCTION_WINDOWS; + + profile_input_data.log2_inflight_rdma_per_qp = THH_DDR_LOG2_INFLIGHT_RDMA_PER_QP; + profile_input_data.log2_max_eq = THH_DDR_LOG2_MAX_EQ; + profile_input_data.log2_max_mcg = THH_DDR_LOG2_MAX_MCG; + profile_input_data.log2_min_qp_per_mcg = THH_DDR_LOG2_MIN_QP_PER_MCG; + profile_input_data.log2_mcg_hash_proportion = THH_DDR_LOG2_MCG_HASH_PROPORTION; + profile_input_data.log2_mtt_entries_per_seg = THH_DDR_LOG2_MTT_ENTRIES_PER_SEG; + profile_input_data.log2_mtt_segs_per_region = THH_DDR_LOG2_MTT_SEGS_PER_REGION; + profile_input_data.use_priv_udav = THH_USE_PRIV_UDAV; + profile_input_data.log2_wqe_ddr_space_per_qp = THH_LOG2_WQE_DDR_SPACE_PER_QP; + } + + hob->profile.use_priv_udav = profile_input_data.use_priv_udav; + hob->profile.max_priv_udavs = profile_input_data.max_priv_udavs; + + /* need inflight rdma per QP for rdb size in DDR, and for THH_qpm_create */ + hob->profile.log2_inflight_rdma_per_qp = (u_int8_t) profile_input_data.log2_inflight_rdma_per_qp; + + /* manipulate MCG max if not inputting a profile, or if inputting profile which allows reduction */ + /* Reduce the number of MCGs for smaller DDR memories */ + hob->profile.log2_max_mcgs = profile_input_data.log2_max_mcg; + if ((hob->profile.ddr_size_code < THH_DDR_SIZE_128M) + && ((profile_user_data == NULL)|| (local_user_profile.require == FALSE))) { + hob->profile.log2_max_mcgs--; + } + + log2_host_pg_size = MOSAL_SYS_PAGE_SHIFT; + if (log2_host_pg_size < hob->dev_lims.log_pg_sz) { + MTL_ERROR1("THH_calculate_default_profile: Host min page size(%lu) is too small\n", + (unsigned long ) MOSAL_SYS_PAGE_SIZE); + return HH_EAGAIN; + } + + /* do not allocate DDR memory for MCGs if MCG is not enabled in dev_limits */ + hob->profile.ddr_alloc_vec_size = (THH_DEV_LIM_MCG_ENABLED(hob) ? + THH_DDR_ALLOCATION_VEC_SIZE : THH_DDR_ALLOCATION_VEC_SIZE - 1) ; /* no eec as yet */ + hob->profile.log2_wqe_ddr_space_per_qp = profile_input_data.log2_wqe_ddr_space_per_qp; + + /* MCG calculations - not in recalculation loop, since the amount of memory involved is very small*/ + /* each MCG entry must be a power-of-2 size. To guarantee a power-of-2, we take a "ceiling" log of the */ + /* MCG entry size(in bytes), and then compute the actual number of QPs per mcg backwards from the mcg_size variable. */ + /* We also require (as a sanity check) that the log2_mcg_hash_size be greater than zero */ + if ((THH_DEV_LIM_MCG_ENABLED(hob)) && + ((int)(hob->profile.log2_max_mcgs + profile_input_data.log2_mcg_hash_proportion) > 0)) { + hob->profile.log2_mcg_entry_size = ceil_log2(((1U<profile.qps_per_mcg = ( (1U<<(hob->profile.log2_mcg_entry_size)) - THH_DDR_MCG_ENTRY_HEADER_SIZE) / + THH_DDR_MCG_BYTES_PER_QP; + + /* the hash proportion is the log of the power-of-2 fraction of the total MCG entries used for the hash table. */ + /* Thus, for example, a proportion of (1/2) gets a log2_mcg_hash_proportion = -1 */ + hob->profile.log2_mcg_hash_size = hob->profile.log2_max_mcgs + profile_input_data.log2_mcg_hash_proportion; + } else { + /*UD MCGs not available on this HCA*/ + hob->profile.log2_mcg_entry_size = 0; + hob->profile.qps_per_mcg = 0; + hob->profile.log2_mcg_hash_size = 0; + hob->profile.log2_max_mcgs = 0; + } + + hob->profile.log2_mtt_entries_per_seg = profile_input_data.log2_mtt_entries_per_seg; + hob->profile.log2_mtt_segs_per_region = profile_input_data.log2_mtt_segs_per_region; + + hob->profile.log2_uar_pg_size = log2_host_pg_size; + hob->profile.log2_max_uar = hob->dev_lims.uar_sz + 20 - hob->profile.log2_uar_pg_size; +/*** warning C4242: '=' : conversion from 'u_int32_t' to 'u_int8_t', possible loss of data ***/ + hob->profile.log2_max_eqs = (u_int8_t)profile_input_data.log2_max_eq; /* 64 EQs */ + hob->profile.max_num_pds = profile_input_data.max_pds; + + hob->profile.max_num_qps = profile_input_data.max_qps; + + hob->profile.log2_max_qps = ceil_log2(profile_input_data.max_qps+ + (1U<dev_lims.log2_rsvd_qps) + THH_NUM_RSVD_QP); + + /* adjust max QPs downward (if using internal profile, or if user profile permits) + * if the few reserved QPs cause max qps to go beyond a power-of-2. + */ + + if (hob->profile.log2_max_qps > ceil_log2(profile_input_data.max_qps)) { + MTL_DEBUG1(MT_FLFMT("%s: reserved qps cause profile qps to jump a power-of-2"),__func__); + if ((profile_user_data==NULL) || (local_user_profile.require == FALSE)) { + hob->profile.log2_max_qps--; + hob->profile.max_num_qps = (1U<profile.log2_max_qps) - (1U<dev_lims.log2_rsvd_qps) + - THH_NUM_RSVD_QP; + MTL_DEBUG1(MT_FLFMT("%s: Adjusting max qps to "SIZE_T_DFMT),__func__, hob->profile.max_num_qps); + } + } + + /* TBD: Expose max_srqs to profile given by user and use MOD_STAT_CFG */ + if (hob->dev_lims.srq) { + hob->profile.log2_max_srqs = hob->dev_lims.log_max_srqs; + hob->profile.max_num_srqs = + (1U << hob->dev_lims.log_max_srqs) - (1 << hob->dev_lims.log2_rsvd_srqs); + } else { + hob->profile.log2_max_srqs = 0; + hob->profile.max_num_srqs = 0; + } + + hob->profile.max_num_cqs = profile_input_data.max_cqs; + hob->profile.log2_max_cqs = ceil_log2(profile_input_data.max_cqs + + (1U<dev_lims.log2_rsvd_cqs)); + /* adjust max CQs downward (if using internal profile, or if user profile permits) + * if the few reserved CQs cause max cqs to go beyond a power-of-2. + */ + + if (hob->profile.log2_max_cqs > ceil_log2(profile_input_data.max_cqs)) { + MTL_DEBUG1(MT_FLFMT("%s: reserved cqs cause profile cqs to jump a power-of-2"),__func__); + if ((profile_user_data == NULL) || (local_user_profile.require == FALSE)) { + hob->profile.log2_max_cqs--; + hob->profile.max_num_cqs = (1U<profile.log2_max_cqs) - (1U<dev_lims.log2_rsvd_cqs); + MTL_DEBUG1(MT_FLFMT("%s: Adjusting max cqs to "SIZE_T_DFMT),__func__, hob->profile.max_num_cqs); + } + } + hob->profile.num_external_mem_regions = profile_input_data.max_regions; /* 2 per QP */ + hob->profile.num_mem_windows = profile_input_data.max_windows; + hob->profile.log2_max_eecs = 0; + + while (ddr_calc_loop) { + MT_bool continue_calc_loop; + + continue_calc_loop = FALSE; + + MTL_DEBUG4("THH_calculate_profile: max_qps = "SIZE_T_FMT", max_cqs = "SIZE_T_FMT", max_priv_udav="SIZE_T_FMT + ",\nmax_pds="SIZE_T_FMT",max_reg="SIZE_T_FMT", max_win="SIZE_T_FMT"\n", + hob->profile.max_num_qps, hob->profile.max_num_cqs, hob->profile.max_priv_udavs, + hob->profile.max_num_pds, hob->profile.num_external_mem_regions, + hob->profile.num_mem_windows); + + /* add all raw resources without Tavor-reserved quantities */ + temp_size = hob->profile.max_num_qps + THH_NUM_RSVD_QP + + hob->profile.max_num_cqs + hob->profile.num_external_mem_regions; + + hob->profile.log2_max_mtt_entries = ceil_log2( + ( temp_size * (1U<dev_lims.log2_rsvd_mtts) + ); + + /* add all raw resources without Tavor-reserved quantities */ + temp_size = hob->profile.max_num_qps + THH_NUM_RSVD_QP + hob->profile.max_num_cqs + + hob->profile.num_external_mem_regions + hob->profile.num_mem_windows; + + hob->profile.log2_max_mpt_entries = ceil_log2(temp_size + (1 << hob->dev_lims.log2_rsvd_mrws)); + + + if (hob->profile.log2_max_mtt_entries - profile_input_data.log2_mtt_entries_per_seg + > hob->dev_lims.log_max_mtt_seg) { + continue_calc_loop = TRUE; + need_to_loop = TRUE; + } + + if (!continue_calc_loop) { + /* Now, compute the total DDR size, and verify that we have not over-allocated it. If yes, reduce QPs by half, and */ + /* recompute all above parameters starting with log2_max_regions */ + calculate_ddr_alloc_vec(hob, &(hob->profile),&ddr_alloc_vec); + + /* Add up all the sizes in the ddr allocation vector */ + tot_ddr_allocs = 0; + ddr_alloc_iterator = (MT_size_t *)&(ddr_alloc_vec); + for (i = 0; i < hob->profile.ddr_alloc_vec_size; i++, ddr_alloc_iterator++) { + if ((*ddr_alloc_iterator) == THH_DDRMM_INVALID_SZ) { + temp_size = 0; /* no allocation */ + } else if ((*ddr_alloc_iterator) >= ceil_log2(hob->profile.ddr_size)) { + temp_size = hob->profile.ddr_size; + } else { + temp_size = (((MT_size_t) 1ul) << (*ddr_alloc_iterator)); + } + MTL_DEBUG4("THH_calculate_profile:DDR: %s = "SIZE_T_XFMT"("SIZE_T_DFMT")\n", + THH_get_ddr_allocation_string(i), temp_size, temp_size); + tot_ddr_allocs += temp_size; + } + + /* see if need to reserve space for WQEs in DDR */ + if (hob->profile.log2_wqe_ddr_space_per_qp != 0) { + temp_size = (((MT_size_t) 1ul) << (hob->profile.log2_max_qps + hob->profile.log2_wqe_ddr_space_per_qp)); + MTL_DEBUG4("THH_calculate_profile: WQEs ddr area = "SIZE_T_XFMT" ("SIZE_T_DFMT")\n", + temp_size, temp_size); + tot_ddr_allocs += temp_size; + } + + /* see if need to reserve space for privileged UDAVs in DDR */ + if (hob->profile.use_priv_udav) { + temp_size = hob->profile.max_priv_udavs * (sizeof(struct tavorprm_ud_address_vector_st) / 8); + MTL_DEBUG4("THH_calculate_profile: privileged UDAVs ddr area = "SIZE_T_XFMT" ("SIZE_T_DFMT")\n", + temp_size, temp_size); + tot_ddr_allocs += temp_size; + } + + /* test against DDR size */ + MTL_DEBUG4("THH_calculate_profile: total DDR allocs = %d MB (incl reserved areas)\n",(int)(tot_ddr_allocs>>20)); + if ((hob->profile.ddr_size < tot_ddr_allocs) || + ((profile_user_data == NULL) && (hob->profile.max_num_qps>(1U<<16)))){ + /*do not want more than 64K QPs if using internal defaults*/ + continue_calc_loop = TRUE; + need_to_loop = TRUE; + } + + } + if (continue_calc_loop) { + u_int64_t temp; + u_int32_t u32_temp, change_flag; + /* Reduce flagged profile input params by factor of 10 percent */ + change_flag = 0; + if ((calc_at_minimum & THH_PROFILE_CALC_QP_AT_MINIMUM) == 0) { + change_flag++; + temp = (u_int64_t)(hob->profile.max_num_qps) * (100 - profile_input_data.reduction_pct_qps); + /*check for overflow. If have overflow, use approximate percentages (divide by 1024) */ + if (temp & MAKE_ULONGLONG(0xFFFFFFFF00000000)) { + temp = (u_int64_t)(hob->profile.max_num_qps) * (1024 - (profile_input_data.reduction_pct_qps*10)); + temp >>= 10; + u32_temp = (u_int32_t)(temp & 0xFFFFFFFF); + } else { + /* use more exact percentages -- but still not floating point */ + u32_temp = (u_int32_t)temp; + u32_temp /= 100; + } + if (u32_temp <= (u_int32_t)profile_input_data.min_qps) { + calc_at_minimum |= THH_PROFILE_CALC_QP_AT_MINIMUM; + u32_temp = profile_input_data.min_qps; + } + hob->profile.max_num_qps = u32_temp; + hob->profile.log2_max_qps = ceil_log2(u32_temp + (1U<dev_lims.log2_rsvd_qps) + + THH_NUM_RSVD_QP); + } + + + if ((calc_at_minimum & THH_PROFILE_CALC_CQ_AT_MINIMUM) == 0) { + change_flag++; + temp = (u_int64_t)(hob->profile.max_num_cqs) * (100 - profile_input_data.reduction_pct_cqs); + if (temp & MAKE_ULONGLONG(0xFFFFFFFF00000000)) { + temp = (u_int64_t)(hob->profile.max_num_cqs) * (1024 - (profile_input_data.reduction_pct_cqs*10)); + temp >>= 10; + u32_temp = (u_int32_t)(temp & 0xFFFFFFFF); + } else { + /* use more exact percentages -- but still not floating point */ + u32_temp = (u_int32_t)temp; + u32_temp /= 100; + } + if (u32_temp <= (u_int32_t)profile_input_data.min_cqs) { + calc_at_minimum |= THH_PROFILE_CALC_CQ_AT_MINIMUM; + u32_temp = profile_input_data.min_cqs; + } + hob->profile.max_num_cqs = u32_temp; + hob->profile.log2_max_cqs = ceil_log2(u32_temp + (1U<dev_lims.log2_rsvd_cqs)); + } + + if ((calc_at_minimum & THH_PROFILE_CALC_PD_AT_MINIMUM) == 0) { + change_flag++; + temp = (u_int64_t)(hob->profile.max_num_pds) * (100 - profile_input_data.reduction_pct_pds); + if (temp & MAKE_ULONGLONG(0xFFFFFFFF00000000)) { + temp = (u_int64_t)(hob->profile.max_num_pds) * (1024 - (profile_input_data.reduction_pct_pds*10)); + temp >>= 10; + u32_temp = (u_int32_t)(temp & 0xFFFFFFFF); + } else { + /* use more exact percentages -- but still not floating point */ + u32_temp = (u_int32_t)temp; + u32_temp /= 100; + } + if (u32_temp <= (u_int32_t)profile_input_data.min_pds) { + calc_at_minimum |= THH_PROFILE_CALC_PD_AT_MINIMUM; + u32_temp = profile_input_data.min_pds; + } + hob->profile.max_num_pds = u32_temp; + } + + if ((calc_at_minimum & THH_PROFILE_CALC_REG_AT_MINIMUM) == 0) { + change_flag++; + temp = (u_int64_t)(hob->profile.num_external_mem_regions) * (100 - profile_input_data.reduction_pct_regions); + if (temp & MAKE_ULONGLONG(0xFFFFFFFF00000000)) { + temp = (u_int64_t)(hob->profile.num_external_mem_regions) * (1024 - (profile_input_data.reduction_pct_regions*10)); + temp >>= 10; + u32_temp = (u_int32_t)(temp & 0xFFFFFFFF); + } else { + /* use more exact percentages -- but still not floating point */ + u32_temp = (u_int32_t)temp; + u32_temp /= 100; + } + if (u32_temp <= (u_int32_t)profile_input_data.min_regions) { + calc_at_minimum |= THH_PROFILE_CALC_REG_AT_MINIMUM; + u32_temp = profile_input_data.min_regions; + } + hob->profile.num_external_mem_regions = u32_temp; + } + + if ((calc_at_minimum & THH_PROFILE_CALC_WIN_AT_MINIMUM) == 0) { + change_flag++; + temp = (u_int64_t)(hob->profile.num_mem_windows) * (100 - profile_input_data.reduction_pct_windows); + if (temp & MAKE_ULONGLONG(0xFFFFFFFF00000000)) { + temp = (u_int64_t)(hob->profile.num_mem_windows) * (1024 - (profile_input_data.reduction_pct_windows*10)); + temp >>= 10; + u32_temp = (u_int32_t)(temp & 0xFFFFFFFF); + } else { + /* use more exact percentages -- but still not floating point */ + u32_temp = (u_int32_t)temp; + u32_temp /= 100; + } + if (u32_temp <= (u_int32_t)profile_input_data.min_windows) { + calc_at_minimum |= THH_PROFILE_CALC_WIN_AT_MINIMUM; + u32_temp = profile_input_data.min_windows; + } + hob->profile.num_mem_windows = u32_temp; + } + if (hob->profile.log2_inflight_rdma_per_qp > THH_DDR_LOG2_INFLIGHT_RDMA_PER_QP) { + hob->profile.log2_inflight_rdma_per_qp--; + change_flag++; + } + + /* check if we were able to perform any reductions */ + if (change_flag == 0) { + MTL_ERROR1("THH_calculate_default_profile: DDR memory to small for MIN profile\n"); + ddr_alloc_iterator = (MT_size_t *)&(ddr_alloc_vec); + for (i = 0; i < hob->profile.ddr_alloc_vec_size; i++, ddr_alloc_iterator++) { + if ((*ddr_alloc_iterator) == THH_DDRMM_INVALID_SZ) { + temp_size = 0; /* no allocation */ + } else if ((*ddr_alloc_iterator) >= ceil_log2(hob->profile.ddr_size)) { + temp_size = hob->profile.ddr_size; + MTL_ERROR1(MT_FLFMT("THH_calculate_profile: %s uses ALL available DDR memory"), + THH_get_ddr_allocation_string(i)); + } else { + temp_size = (((MT_size_t) 1ul) << (*ddr_alloc_iterator)); + } + MTL_ERROR1(MT_FLFMT("THH_calculate_profile:DDR: %s = "SIZE_T_XFMT"("SIZE_T_DFMT")"), + THH_get_ddr_allocation_string(i), temp_size, temp_size); + } + return HH_EAGAIN; + } + } else { + ddr_calc_loop = FALSE; + } + } + + THH_check_profile(hob); /* final adjustment to catch dev-lim overruns*/ + + /* adjust mcg hash table after final adjustment of mcg size */ + if (THH_DEV_LIM_MCG_ENABLED(hob)) { + hob->profile.log2_mcg_hash_size = (hob->profile.log2_max_mcgs) + profile_input_data.log2_mcg_hash_proportion; +/*** error C4296: '<' : expression is always false ***/ + //if (hob->profile.log2_mcg_hash_size < 0) { + // hob->profile.log2_mcg_hash_size = 0; + //} + } + + THH_PRINT_PROFILE(&(hob->profile)); + MTL_DEBUG4("Leaving THH_calculate_profile\n"); + + if (sugg_profile_p != NULL) { + sugg_profile_p->num_mw = (u_int32_t)hob->profile.num_mem_windows; + sugg_profile_p->num_qp = (u_int32_t)hob->profile.max_num_qps; + sugg_profile_p->num_cq = (u_int32_t)hob->profile.max_num_cqs; + sugg_profile_p->num_pd = (u_int32_t)hob->profile.max_num_pds; + sugg_profile_p->num_mr = (u_int32_t)hob->profile.num_external_mem_regions; + sugg_profile_p->max_qp_ous_rd_atom = (1U<profile.log2_inflight_rdma_per_qp); + } + if ((profile_user_data != NULL) && (profile_user_data->require != 0) && (need_to_loop == TRUE)) { + MTL_ERROR1("THH_calculate_default_profile: Provided profile requires too many resources\n"); + return HH_ENOMEM; + } + return HH_OK; +} + +/***************************************************************************** +****************************************************************************** +************************ HOB Interface FUNCTIONS **************************** +****************************************************************************** +******************************************************************************/ + +/****************************************************************************** + * Function: THH_hob_close_hca + * + * Description: This function stops HCA hardware activity and frees all associated resources. + * + * input: + * hca_hndl + * output: + * none + * returns: + * HH_OK + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: If any errors occur, continue process of de-allocating resources. However, log the errors, + * and return HH_ERR instead of HH_OK + * + *****************************************************************************/ +HH_ret_t THH_hob_close_hca_internal(HH_hca_hndl_t hca_hndl, MT_bool invoked_from_destroy) +{ + /* TBD - Complete function */ + THH_cmd_status_t cmd_ret; + MT_bool have_error = FALSE; + HH_ret_t ret = HH_OK; + MT_phys_addr_t *ddr_alloc_area; + MT_size_t *ddr_alloc_size; + u_int32_t i; + u_int16_t num_ports; + call_result_t res; + THH_hob_t thh_hob_p; + DECLARE_FATAL_VARS; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_close_hca: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_close_hca : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_close_hca : ERROR : HOB is already destroyed\n"); + return HH_ERR; + } + + /* uninterruptible acquire. Want to be sure to clean up */ + MOSAL_mutex_acq_ui(&(thh_hob_p->mtx)); + if (hca_hndl->status == HH_HCA_STATUS_CLOSED) { + MOSAL_mutex_rel(&(thh_hob_p->mtx)); + MTL_ERROR1("THH_hob_close_hca: Device already closed\n"); + return HH_EINVAL_HCA_HNDL; + } + + /* move the HCA to CLOSING state, preserving fatal indicators */ + MOSAL_spinlock_dpc_lock(&thh_hob_p->fatal_spl); + if ((thh_hob_p->thh_state & THH_STATE_RUNNING) == 0) { + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + MOSAL_mutex_rel(&(thh_hob_p->mtx)); + MTL_ERROR1(MT_FLFMT("THH_hob_close_hca: already invoked")); + return HH_EBUSY; + } + thh_hob_p->thh_state &= THH_STATE_HAVE_ANY_FATAL; + thh_hob_p->thh_state |= THH_STATE_CLOSING; + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + + /* transfer to closing state */ + WAIT_IF_FATAL(thh_hob_p); + if (have_fatal == FALSE) { + num_ports = thh_hob_p->dev_lims.num_ports; + for (i = 1; i <= num_ports; i++) { + cmd_ret = THH_cmd_CLOSE_IB(thh_hob_p->cmd, (IB_port_t) i); + if (cmd_ret != THH_CMD_STAT_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_cmd_CLOSE_IB error (%d)\n", cmd_ret); + have_error = TRUE; + } + } + } + /* test if a fatal error occurred during CLOSE_IB. */ + if (have_fatal == FALSE) { + WAIT_IF_FATAL(thh_hob_p); + } + thh_hob_p->compl_eq = THH_INVALID_EQN; + thh_hob_p->ib_eq = THH_INVALID_EQN; + + /* destroy eventq mgr. Event manager must destroy all EQs */ + ret = THH_eventp_destroy( thh_hob_p->eventp ); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_eventp_destroy error (%d)\n", ret); + have_error = TRUE; + } + thh_hob_p->eventp = (THH_eventp_t)THH_INVALID_HNDL; + + if (thh_hob_p->mcgm != (THH_mcgm_t)THH_INVALID_HNDL) { + ret = THH_mcgm_destroy(thh_hob_p->mcgm ); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_mcgm_destroy error (%d)\n", ret); + have_error = TRUE; + } + thh_hob_p->mcgm = (THH_mcgm_t)THH_INVALID_HNDL; + } + + MTL_DEBUG4("%s: calling MOSAL_unmap_phys_addr FOR KAR = " VIRT_ADDR_FMT "\n", __func__, + (MT_virt_addr_t) thh_hob_p->kar_addr); + if ((res = (MOSAL_unmap_phys_addr(MOSAL_get_kernel_prot_ctx(), (MT_virt_addr_t) thh_hob_p->kar_addr, + ((MT_size_t)1 << thh_hob_p->profile.log2_uar_pg_size)))) != MT_OK) { + MTL_ERROR1("THH_hob_close_hca: MOSAL_unmap_phys_addr error for kar: %d\n", res); + have_error = TRUE; + } + thh_hob_p->kar_addr = (MT_virt_addr_t) 0; + + ret = THH_uar_destroy(thh_hob_p->kar); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_uar_destroy error (%d)\n", ret); + have_error = TRUE; + } + thh_hob_p->kar = (THH_uar_t)THH_INVALID_HNDL; + + + if (thh_hob_p->udavm_use_priv) { + ret = THH_udavm_destroy(thh_hob_p->udavm); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_udavm_destroy error (%d)\n", ret); + have_error = TRUE; + } + thh_hob_p->udavm = (THH_udavm_t)THH_INVALID_HNDL; + + ret = THH_mrwm_deregister_mr(thh_hob_p->mrwm, thh_hob_p->udavm_lkey); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_mrwm_deregister_mr error (%d)\n", ret); + have_error = TRUE; + } + + if ((res = MOSAL_unmap_phys_addr( MOSAL_get_kernel_prot_ctx(), + (MT_virt_addr_t) thh_hob_p->udavm_table , + thh_hob_p->udavm_table_size )) != MT_OK) { + MTL_ERROR1("THH_hob_close_hca: MOSAL_unmap_phys_addr error for udavm: %d\n", res); + have_error = TRUE; + } + thh_hob_p->udavm_table = (MT_virt_addr_t) NULL; + + ret = THH_ddrmm_free(thh_hob_p->ddrmm, thh_hob_p->udavm_table_ddr, thh_hob_p->udavm_table_size); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_ddrmm_free error (%d)\n", ret); + have_error = TRUE; + } + + } + thh_hob_p->udavm = (THH_udavm_t)THH_INVALID_HNDL; + thh_hob_p->udavm_table = (MT_virt_addr_t) NULL; + thh_hob_p->udavm_table_ddr = (MT_phys_addr_t) 0; + thh_hob_p->udavm_table_size = 0; + thh_hob_p->udavm_lkey = 0; + + if (thh_hob_p->srqm != (THH_srqm_t)THH_INVALID_HNDL) { /* SRQs are supported - SRQM exists */ + ret = THH_srqm_destroy( thh_hob_p->srqm); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_srqm_destroy error %s(%d)\n", HH_strerror_sym(ret), ret); + have_error = TRUE; + } + thh_hob_p->srqm = (THH_srqm_t)THH_INVALID_HNDL; + } + + ret = THH_qpm_destroy( thh_hob_p->qpm, have_error); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_qpm_destroy error (%d)\n", ret); + have_error = TRUE; + } + thh_hob_p->qpm = (THH_qpm_t)THH_INVALID_HNDL; + + FREE(thh_hob_p->init_ib_props); + thh_hob_p->init_ib_props = ( THH_port_init_props_t *) NULL; + + ret = THH_cqm_destroy( thh_hob_p->cqm, have_error); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_cqm_destroy error (%d)\n", ret); + have_error = TRUE; + } + thh_hob_p->cqm = (THH_cqm_t)THH_INVALID_HNDL; + + ret = THH_mrwm_destroy(thh_hob_p->mrwm, have_error); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_mrwm_destroy error (%d)\n", ret); + have_error = TRUE; + } + thh_hob_p->mrwm = (THH_mrwm_t)THH_INVALID_HNDL; + + ret = THH_uldm_destroy(thh_hob_p->uldm ); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_uldm_destroy error (%d)\n", ret); + have_error = TRUE; + } + thh_hob_p->uldm = (THH_uldm_t)THH_INVALID_HNDL; + + ret = THH_cmd_revoke_ddrmm(thh_hob_p->cmd); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_cmd_revoke_ddrmm error (%d)\n", ret); + have_error = TRUE; + } + + ddr_alloc_area = (MT_phys_addr_t *) &(thh_hob_p->ddr_alloc_base_addrs_vec); + ddr_alloc_size = (MT_size_t *)&(thh_hob_p->ddr_alloc_size_vec); + for (i = 0; i < thh_hob_p->profile.ddr_alloc_vec_size; i++, ddr_alloc_area++, ddr_alloc_size++) { + ret = *ddr_alloc_area != THH_DDRMM_INVALID_PHYS_ADDR ? + THH_ddrmm_free(thh_hob_p->ddrmm,*ddr_alloc_area, ((MT_size_t)1<< (*ddr_alloc_size))) : HH_OK; + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_ddrmm_free error (%d). i = %d\n", ret, i); + have_error = TRUE; + } + } + + /* test for fatal again here */ + if (have_fatal == FALSE) { + WAIT_IF_FATAL(thh_hob_p); + } + if (have_fatal == FALSE) { + MTL_TRACE1("THH_hob_close_hca: Performing THH_cmd_CLOSE_HCA (no fatal)\n"); +#ifdef SIMULATE_HALT_HCA + cmd_ret = THH_cmd_CLOSE_HCA(thh_hob_p->cmd); +#else + cmd_ret = THH_cmd_CLOSE_HCA(thh_hob_p->cmd, FALSE); +#endif + if (cmd_ret != THH_CMD_STAT_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_cmd_CLOSE_HCA error (%d)\n", cmd_ret); + have_error = TRUE; + } + } + hca_hndl->status = HH_HCA_STATUS_CLOSED; + + /* move state to "CLOSED"*/ + MOSAL_spinlock_dpc_lock(&thh_hob_p->fatal_spl); + thh_hob_p->thh_state &= THH_STATE_HAVE_ANY_FATAL; + thh_hob_p->thh_state |= THH_STATE_CLOSED; + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + MOSAL_mutex_rel(&(thh_hob_p->mtx)); + + MTL_TRACE2("THH_hob_close_hca: device name %s\n", hca_hndl->dev_desc); + + if (have_fatal == FALSE) { + WAIT_IF_FATAL(thh_hob_p); + } + if ((have_fatal == TRUE) && (invoked_from_destroy == FALSE)) { + // Leo: (WINDOWS) restart doen't work because the card reset doesn't work + #ifndef __WIN__ + MTL_TRACE1("THH_hob_close_hca: HAVE FATAL, restarting\n"); + ret = THH_hob_restart(hca_hndl); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_close_hca: THH_hob_restart error (%d)\n", ret); + have_error = TRUE; + } + #endif + } + if (have_error && (have_fatal == FALSE)) { + return HH_ERR; + } else { + return(HH_OK); + } +} /* THH_hob_close_hca_internal */ + +HH_ret_t THH_hob_close_hca(HH_hca_hndl_t hca_hndl) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + return (THH_hob_close_hca_internal(hca_hndl,FALSE)); +} + + +/****************************************************************************** + * Function: THH_hob_destroy + * + * Description: Deregister given device from HH and free the HH object. + * + * input: + * hca_hndl + * output: + * none + * returns: + * HH_OK + * HH_EINVAL_HCA_HNDL + * + * Comments: If HCA is still open,THH_hob_close_hca() is + * invoked before freeing the THH_hob. + * + * Returns HH_EINVAL_HCA_HNDL if any function called internally fails + * + *****************************************************************************/ +HH_ret_t THH_hob_destroy(HH_hca_hndl_t hca_hndl) +{ + HH_ret_t ret, fn_ret = HH_OK; + THH_cmd_status_t cmd_ret; + THH_hob_t hob_p; + int int_ret = 0; +#if !defined(__DARWIN__) && !defined(__WIN__) + MT_virt_addr_t va; +#endif + call_result_t mosal_ret; + MT_bool have_fatal = FALSE; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_destroy: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_destroy : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + + /* return ERROR if device is still open */ + if (hca_hndl->status != HH_HCA_STATUS_CLOSED) { + MTL_ERROR1("THH_hob_destroy: Unloading device %s: while it is still open. Attempting to close it.\n", hca_hndl->dev_desc); + ret = THH_hob_close_hca_internal(hca_hndl, TRUE); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_destroy: Could not close device %s: not opened or unknown (err=%d)\n", hca_hndl->dev_desc, ret); + fn_ret = HH_EINVAL_HCA_HNDL; + } + hca_hndl->status = HH_HCA_STATUS_CLOSED; + } + + MTL_TRACE2("THH_hob_destroy: removing the device %s\n", hca_hndl->dev_desc); + hob_p = THHOBP(hca_hndl); + if (hob_p == NULL) { + MTL_ERROR1("THH_hob_destroy : ERROR : HOB is already destroyed\n"); + return HH_ERR; + } + + + /* move the HCA to DESTROYING state, preserving fatal indicators */ + MOSAL_spinlock_dpc_lock(&hob_p->fatal_spl); + if ((hob_p->thh_state & THH_STATE_DESTROYING) != 0) { + MOSAL_spinlock_unlock(&hob_p->fatal_spl); + MTL_ERROR1(MT_FLFMT("THH_hob_destroy: already invoked")); + return HH_EBUSY; + } + hob_p->thh_state &= THH_STATE_HAVE_ANY_FATAL; + hob_p->thh_state |= THH_STATE_DESTROYING; + MOSAL_spinlock_unlock(&hob_p->fatal_spl); + +#ifndef __DARWIN__ /* TODO: add support in darwin for fatal error handling */ + + /* release the fatal signalling thread */ + hob_p->fatal_thread_obj.have_fatal = FALSE; + MOSAL_syncobj_signal(&hob_p->fatal_thread_obj.fatal_err_sync); + mosal_ret = MOSAL_syncobj_waiton(&(hob_p->fatal_thread_obj.stop_sync), 10000000); + if (mosal_ret != MT_OK) { + if (mosal_ret == MT_EINTR) { + MTL_DEBUG1(MT_FLFMT("%s: Received OS interrupt while initializing fatal error thread (err = %d)"), + __func__,mosal_ret); + fn_ret = HH_EINTR; + } else { + MTL_ERROR1(MT_FLFMT("%s: Timeout on destroying fatal error thread (err = %d)"), + __func__,mosal_ret); + fn_ret = HH_ERR; + } + } + +#endif /* not defined __DARWIN__ */ + + /* unregister the device from HH */ + ret = HH_rem_hca_dev(hca_hndl); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_destroy: Could not remove device 0x%p: unknown (%d)\n", hca_hndl, ret); + fn_ret = HH_EINVAL_HCA_HNDL; + } + + /* destroy objects created in hob_create, and issue SYS_DIS command to Tavor */ + ret = THH_ddrmm_destroy(hob_p->ddrmm); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_destroy: Could not destroy ddrmm object (err = %d)\n", ret); + fn_ret = HH_ERR; + } + /* do SYS_DIS only if do not have a fatal error state */ + if ((hob_p->thh_state & THH_STATE_HAVE_ANY_FATAL) == 0) { + have_fatal = FALSE; + cmd_ret = THH_cmd_SYS_DIS(hob_p->cmd); + if (cmd_ret != THH_CMD_STAT_OK) { + MTL_ERROR1("THH_hob_destroy: SYS_DIS command failed (err = %d)\n", cmd_ret); + if (cmd_ret == THH_CMD_STAT_EFATAL) { + have_fatal = TRUE; + } + fn_ret = HH_ERR; + } + } else { + /* halt the HCA if delayed-halt flag was set, in fatal case only, + * to make sure that there is no PCI activity. If fatal occurred during SYS_DIS above, + * HCA was already closed, so don't need the "halt hca" operation + */ + have_fatal = TRUE; + if (hob_p->module_flags.fatal_delay_halt != 0) { + MTL_DEBUG1(MT_FLFMT("%s: performing delayed halt-hca"), __func__); + THH_hob_halt_hca(hob_p); + } + } + + ret = THH_cmd_destroy(hob_p->cmd); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_destroy: Could not destroy cmd object (err = %d)\n", ret); + fn_ret = HH_ERR; + } + + /* do PCI reset here if have a catastrophic error */ + if (have_fatal == TRUE) { + /* perform sw reset */ + MTL_ERROR1(MT_FLFMT("%s: FATAL ERROR "), __func__); + +#if !defined(__DARWIN__) && !defined(__WIN__) +#if 0 /*defined(__WIN__)*/ + if (hob_p->pci_hca_info.is_valid == TRUE) { + MOSAL_reset_card( hob_p->pci_hca_info.bus, hob_p->pci_hca_info.dev_func ); + } +#else + /* Do the Tavor RESET */ + va = MOSAL_io_remap(hob_p->hw_props.cr_base + 0xF0010, 4); + if ( va ) { + /* perform sw reset */ + MTL_ERROR1(MT_FLFMT("%s: PERFORMING SW RESET. pa="PHYS_ADDR_FMT" va="VIRT_ADDR_FMT), + __func__, hob_p->hw_props.cr_base + 0xF0010, va); + MOSAL_MMAP_IO_WRITE_DWORD(((unsigned long)va),MOSAL_cpu_to_be32(0x00000001)); + /* sleep for one second, per PRM */ + MOSAL_delay_execution(1000000); + MOSAL_io_unmap(va); + } + + /* now, rewrite the PCI configuration */ + if (hob_p->pci_bridge_info.is_valid == TRUE) { + write_pci_config(hob_p->pci_bridge_info.bus, hob_p->pci_bridge_info.dev_func, + hob_p->pci_bridge_info.config); + } + if (hob_p->pci_hca_info.is_valid == TRUE) { + write_pci_config(hob_p->pci_hca_info.bus, hob_p->pci_hca_info.dev_func, + hob_p->pci_hca_info.config); + } +#endif /* defined __WIN__ */ +#endif /* not defined __DARWIN__ */ + } + + int_ret = VIP_delay_unlock_destroy(hob_p->delay_unlocks); + if (int_ret != 0) { + MTL_ERROR1("THH_hob_destroy: Could not destroy delay_unlocks (err = %d)\n", int_ret); + fn_ret = HH_ERR; + } + + if (hob_p->fw_error_buf_start_va != (MT_virt_addr_t)(MT_ulong_ptr_t) NULL) { + MOSAL_io_unmap(hob_p->fw_error_buf_start_va); + } + + if (hob_p->fw_error_buf != NULL) { + FREE(hob_p->fw_error_buf); + } + + MOSAL_mutex_free(&(hob_p->mtx)); + /* Finally, free the THH object */ + FREE(hca_hndl->device); + hca_hndl->device = NULL; + + return(fn_ret); +} + +/****************************************************************************** + * Function: THH_hob_open_hca + * + * Description: This function opens the given HCA and initializes the HCA with + * given properties/ capabilities. if prop_props_p is NULL a default + * HCA profile will be set up. + * + * input: + * hca_hndl + * prop_props_p - Proprietary properties (Non IB) + * output: + * none + * returns: + * HH_OK + * HH_EINVAL + * HH_EBUSY + * + * Comments: If HCA is still open,THH_hob_close_hca() is + * invoked before freeing the THH_hob. + * + * Returns HH_EINVAL_HCA_HNDL if any function called internally fails + * + *****************************************************************************/ +HH_ret_t THH_hob_open_hca(HH_hca_hndl_t hca_hndl, + EVAPI_hca_profile_t *prop_props_p, + EVAPI_hca_profile_t *sugg_profile_p) +{ + MT_virt_addr_t kar_addr; + HH_ret_t ret; + THH_cmd_status_t cmd_ret; + THH_hca_props_t local_hca_props; + MT_phys_addr_t *ddr_alloc_area; + MT_size_t *ddr_alloc_size; + u_int32_t i; + THH_internal_mr_t udav_internal_mr; + MT_size_t udav_entry_size = 0, udav_table_size = 0; + MT_phys_addr_t udav_phys_addr = 0; + MT_virt_addr_t udav_virt_addr = 0; + VAPI_lkey_t dummy_key; + THH_eventp_res_t event_res; + u_int16_t num_ports, last_port_initialized; + THH_hob_t thh_hob_p; + VAPI_size_t udav_vapi_size; + THH_qpm_init_t thh_qpm_init_params; + MT_bool have_fatal = FALSE; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + MTL_DEBUG4("Entering THH_hob_open_hca\n"); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_open_hca: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_open_hca : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_open_hca: ERROR : No device registered\n"); + return HH_EAGAIN; + } + /* Get user profile if available -- if not, use default proportionally to resources. */ + + /* DDR parameters. Get from default profile. For each one, check that does not exceed */ + /* The maximum resource value supportable by the installed card. */ + +/* + Objects: + cmd; -- already exists + ddrmm; -- already exists + uldm; -- uar, pd: log2_max_uar, log2_max_pg_sz, max_pd + mrwm; -- log2_mpt_sz (log2 of number of entries in MPT) + log2_mtt_sz (Log2 of number of entries in the MTT) + max_mem_reg (Maximum memory regions to be allocated in the MPT for external registration only) + max_mem_reg_internal (Maximum memory regions to be alloc in the MPT for internal use only (WQEs and CQEs buffers) ) + max_mem_win (Maximum memory windows to be allocated in the MPT) + cqm; -- log2_max_cq, + eecm; -- + qpm; -- log2_max_qp, + privileged_ud_av (boolean) + udavm; -- max_av + mcgm; -- num mcg's: IBTA min = 512, 8 QPs/group + eventp; -- 64 event queues + kar; -- UAR 0 (no extra resources needed) + +*/ + + /* Test if have fatal error, and move thh_state to "opening" */ + MOSAL_spinlock_dpc_lock(&thh_hob_p->fatal_spl); + if ((thh_hob_p->thh_state & THH_STATE_HAVE_ANY_FATAL) != 0) { + /* already in FATAL state */ + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + MTL_DEBUG4(MT_FLFMT("%s: already in FATAL state"), __func__); + MT_RETURN(HH_EFATAL); + } else if (thh_hob_p->thh_state != THH_STATE_CLOSED) { + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + MTL_ERROR1(MT_FLFMT("THH_hob_open_hca: ERROR : Device not closed. state = 0x%x"),thh_hob_p->thh_state ); + MT_RETURN(HH_EBUSY); + } + thh_hob_p->thh_state = THH_STATE_OPENING; + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + + /* get the MUTEX */ + if (MOSAL_mutex_acq(&(thh_hob_p->mtx), TRUE) != MT_OK) { + MTL_ERROR1(MT_FLFMT("THH_hob_open_hca: received signal. returning")); + ret = HH_EINTR; + goto post_state_change_error; + } + if (hca_hndl->status == HH_HCA_STATUS_OPENED) { + MTL_ERROR1("THH_hob_open_hca: ERROR : Device already open\n"); + ret = HH_EBUSY; + goto post_mutex_acquire_err; + } + + if (prop_props_p != NULL) { + THH_PRINT_USR_PROFILE(prop_props_p); + } + ret = THH_calculate_profile(thh_hob_p, prop_props_p, sugg_profile_p); + if (ret != HH_OK) { + MTL_ERROR1(MT_FLFMT("THH_hob_open_hca: could not create internal profile (%d)"), ret); + if (sugg_profile_p != NULL) { + THH_PRINT_USR_PROFILE(sugg_profile_p); + } + //ret = HH_ERR; + goto post_mutex_acquire_err; + } + if (sugg_profile_p != NULL) { + THH_PRINT_USR_PROFILE(sugg_profile_p); + } + + /* check profile against QUERY_DEV_LIMS data*/ + ret = THH_check_profile(thh_hob_p); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: Profile check failed (%d)\n", ret); + ret = HH_ERR; + goto post_mutex_acquire_err; + } + + /* Do ddrmm allocation here, because we need the allocated MCG base address */ + /* for the INIT_HCA command following the centralized DDR allocation */ + calculate_ddr_alloc_vec(thh_hob_p, &(thh_hob_p->profile), &(thh_hob_p->ddr_alloc_size_vec)); + + /* Allocate all required DDR areas */ + ret = THH_ddrmm_alloc_sz_aligned(thh_hob_p->ddrmm, + thh_hob_p->profile.ddr_alloc_vec_size, /*number of chunks */ + (MT_size_t *) &(thh_hob_p->ddr_alloc_size_vec), /* IN */ + (MT_phys_addr_t *)&(thh_hob_p->ddr_alloc_base_addrs_vec) ); /* OUT */ + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not allocate required areas in DDR (%s)\n", HH_strerror_sym(ret)); + ret = HH_ERR; + goto post_mutex_acquire_err; + } + + /* call cmd interface to initialize its mailboxes in DDR */ + ret = THH_cmd_assign_ddrmm(thh_hob_p->cmd, thh_hob_p->ddrmm); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: Failed THH_cmd_assign_ddrmm (%s)\n",HH_strerror_sym(ret)); + goto cmd_assign_ddrmm_err; + } + + /* set up parameters for INIT HCA */ + memset(&local_hca_props, 0, sizeof(THH_hca_props_t)); +#ifdef MT_LITTLE_ENDIAN + local_hca_props.he = TAVOR_IF_HOST_LTLENDIAN; +#else + local_hca_props.he = TAVOR_IF_HOST_BIGENDIAN; +#endif + local_hca_props.re = FALSE; /* not a router */ + local_hca_props.udp = TRUE; /* check port in UD AV */ + local_hca_props.ud = thh_hob_p->profile.use_priv_udav; + + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.cqc_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.cqc_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.eec_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.eec_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.eqc_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.eqc_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.qpc_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.qpc_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.rdb_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.rdb_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.log_num_of_cq = + (u_int8_t)thh_hob_p->profile.log2_max_cqs; + // local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee = thh_hob_p->profile.log2_max_eecs; + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.log_num_of_ee = 0; + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.log_num_eq = thh_hob_p->profile.log2_max_eqs; + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.log_num_of_qp = (u_int8_t)thh_hob_p->profile.log2_max_qps; + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.eqpc_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.eqpc_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.log_num_of_srq = (u_int8_t)thh_hob_p->profile.log2_max_srqs; + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.srqc_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.srqc_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.qpc_eec_cqc_eqc_rdb_parameters.eeec_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.eeec_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + + local_hca_props.udavtable_memory_parameters.l_key = THH_UDAVM_PRIV_RESERVED_LKEY; + local_hca_props.udavtable_memory_parameters.pd = THH_RESERVED_PD; + local_hca_props.udavtable_memory_parameters.xlation_en = TRUE; + + local_hca_props.tpt_parameters.log_mpt_sz = (u_int8_t)thh_hob_p->profile.log2_max_mpt_entries; + local_hca_props.tpt_parameters.mpt_base_adr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.mpt_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.tpt_parameters.mtt_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.mtt_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.tpt_parameters.pfto = 0; /* TBD -- not yet supported. Page Fault RNR Timeout */ + local_hca_props.tpt_parameters.mtt_segment_size = (u_int8_t)((thh_hob_p->profile.log2_mtt_entries_per_seg + THH_DDR_LOG2_MTT_ENTRY_SIZE) + - THH_DDR_LOG2_MIN_MTT_SEG_SIZE); + + local_hca_props.uar_parameters.uar_base_addr = thh_hob_p->hw_props.uar_base; + local_hca_props.uar_parameters.uar_page_sz = thh_hob_p->profile.log2_uar_pg_size - 12; + local_hca_props.uar_parameters.uar_scratch_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.uar_scratch_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + + local_hca_props.multicast_parameters.log_mc_table_sz = (u_int8_t)thh_hob_p->profile.log2_max_mcgs; + local_hca_props.multicast_parameters.mc_base_addr = + GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.mcg_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + local_hca_props.multicast_parameters.mc_hash_fn = 0; + local_hca_props.multicast_parameters.log_mc_table_entry_sz = (u_int16_t)(thh_hob_p->profile.log2_mcg_entry_size); + local_hca_props.multicast_parameters.mc_table_hash_sz = 1 << (thh_hob_p->profile.log2_mcg_hash_size); + + /* INIT_HCA command */ + cmd_ret = THH_cmd_INIT_HCA(thh_hob_p->cmd,&local_hca_props); + if (cmd_ret != THH_CMD_STAT_OK) { + MTL_ERROR1("THH_hob_open_hca: CMD_error in THH_cmd_INIT_HCA (%d)\n", cmd_ret); + ret = HH_EAGAIN; + goto init_hca_err; + } + + /* Now, query HCA to get actual allocated parameters */ + cmd_ret = THH_cmd_QUERY_HCA(thh_hob_p->cmd, &(thh_hob_p->hca_props)); + if (cmd_ret != THH_CMD_STAT_OK) { + MTL_ERROR1("THH_hob_open_hca: CMD_error in THH_cmd_QUERY_HCA (%d)\n", cmd_ret); + ret = HH_EAGAIN; + goto query_hca_err; + } + + /* create uldm */ + ret = THH_uldm_create(thh_hob_p, thh_hob_p->hw_props.uar_base, (u_int8_t) thh_hob_p->profile.log2_max_uar, + (u_int8_t) thh_hob_p->profile.log2_uar_pg_size, + (u_int32_t) (thh_hob_p->profile.max_num_pds + thh_hob_p->dev_lims.num_rsvd_pds + THH_NUM_RSVD_PD), + &(thh_hob_p->uldm)); if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not create uldm (%d)\n", ret); + goto uldm_create_err; + } + + thh_hob_p->mrwm_props.mtt_base = GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.mtt_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + thh_hob_p->mrwm_props.mpt_base = GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.mpt_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr); + thh_hob_p->mrwm_props.log2_mpt_sz = (u_int8_t)thh_hob_p->profile.log2_max_mpt_entries; + thh_hob_p->mrwm_props.log2_mtt_sz = (u_int8_t)thh_hob_p->profile.log2_max_mtt_entries; + thh_hob_p->mrwm_props.log2_mtt_seg_sz = (u_int8_t)thh_hob_p->profile.log2_mtt_entries_per_seg; + thh_hob_p->mrwm_props.max_mem_reg = thh_hob_p->profile.num_external_mem_regions; + thh_hob_p->mrwm_props.max_mem_reg_internal = thh_hob_p->profile.max_num_qps + thh_hob_p->profile.max_num_cqs; + thh_hob_p->mrwm_props.max_mem_win = thh_hob_p->profile.num_mem_windows; + thh_hob_p->mrwm_props.log2_max_mtt_segs = (u_int8_t)(thh_hob_p->profile.log2_max_mtt_entries - + thh_hob_p->mrwm_props.log2_mtt_seg_sz); + thh_hob_p->mrwm_props.log2_rsvd_mpts = thh_hob_p->dev_lims.log2_rsvd_mrws; + thh_hob_p->mrwm_props.log2_rsvd_mtt_segs = thh_hob_p->dev_lims.log2_rsvd_mtts; + + ret = THH_mrwm_create(thh_hob_p, &(thh_hob_p->mrwm_props), &(thh_hob_p->mrwm)); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not create mrwm (%d)\n", ret); + goto mrwm_create_err; + } + + /* Create objects */ + ret = THH_cqm_create(thh_hob_p, (u_int8_t) thh_hob_p->profile.log2_max_cqs, + thh_hob_p->dev_lims.log2_rsvd_cqs, &(thh_hob_p->cqm)); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not create cqm (%d)\n", ret); + goto cqm_create_err; + } + + /* create qpm object here */ + + /* initialize INIT_IB parameters here for possible use by qpm */ + num_ports = thh_hob_p->dev_lims.num_ports; + + thh_hob_p->init_ib_props = (THH_port_init_props_t *) MALLOC(num_ports * sizeof(THH_port_init_props_t)); + if (!(thh_hob_p->init_ib_props)) { + MTL_ERROR1( "THH_hob_open_hca: ERROR : cannot allocate memory for port init props)\n"); + goto init_ib_props_malloc_err; + } + for (i = 1; i <= num_ports; i++) { + /* redundant for now. However, leaving option for setting different properties per port */ + thh_hob_p->init_ib_props[i-1].e = TRUE; + thh_hob_p->init_ib_props[i-1].g0 = FALSE; + thh_hob_p->init_ib_props[i-1].max_gid = (1 << (thh_hob_p->dev_lims.log_max_gid)); + thh_hob_p->init_ib_props[i-1].mtu_cap = thh_hob_p->dev_lims.max_mtu; + thh_hob_p->init_ib_props[i-1].max_pkey = (1 << (thh_hob_p->dev_lims.log_max_pkey)); + thh_hob_p->init_ib_props[i-1].vl_cap = thh_hob_p->dev_lims.max_vl; + thh_hob_p->init_ib_props[i-1].port_width_cap = thh_hob_p->dev_lims.max_port_width; + } + + memset(&(thh_qpm_init_params), 0, sizeof(thh_qpm_init_params)); + thh_qpm_init_params.rdb_base_index = + /* 32 low-order bits, right-shifted by log of size of rdb entry */ + (u_int32_t)(((u_int64_t)(GET_DDR_ADDR(thh_hob_p->ddr_alloc_base_addrs_vec.rdb_base_addr,thh_hob_p->ddr_props.dh, + thh_hob_p->ddr_props.ddr_start_adr))) & (u_int64_t)0xFFFFFFFF ) + >> THH_DDR_LOG2_RDB_ENTRY_SIZE; + thh_qpm_init_params.log2_max_qp = (u_int8_t) thh_hob_p->profile.log2_max_qps; + thh_qpm_init_params.log2_max_outs_rdma_atom = thh_hob_p->profile.log2_inflight_rdma_per_qp; + thh_qpm_init_params.log2_max_outs_dst_rd_atom = thh_hob_p->dev_lims.log_max_ra_req_qp; + thh_qpm_init_params.n_ports = (u_int8_t)num_ports; + thh_qpm_init_params.port_props = + (thh_hob_p->module_flags.legacy_sqp == TRUE ? NULL : thh_hob_p->init_ib_props ); + thh_qpm_init_params.log2_rsvd_qps = thh_hob_p->dev_lims.log2_rsvd_qps; + + ret = THH_qpm_create(thh_hob_p, &(thh_qpm_init_params), &(thh_hob_p->qpm)); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not create qpm %s(%d)\n", HH_strerror_sym(ret), ret); + goto qpm_create_err; + } + + if (thh_hob_p->dev_lims.srq) { + ret= THH_srqm_create(thh_hob_p, + (u_int8_t)thh_hob_p->profile.log2_max_srqs, thh_hob_p->dev_lims.log2_rsvd_srqs, + &(thh_hob_p->srqm)); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not create srqm - %s(%d)\n", HH_strerror_sym(ret), ret); + goto srqm_create_err; + } + } else { + thh_hob_p->srqm= (THH_srqm_t)THH_INVALID_HNDL; /* SRQs are not supported */ + } + + /* CREATE ALL CONTAINED OBJECTS */ + + /* create UDAVm if privileged UDAV is set */ + if (thh_hob_p->profile.use_priv_udav) { + + thh_hob_p->udavm_use_priv = TRUE; + + /* create the table in DDR memory */ + udav_entry_size = (unsigned)(sizeof(struct tavorprm_ud_address_vector_st) / 8); + udav_table_size = thh_hob_p->profile.max_priv_udavs * udav_entry_size; + + ret = THH_ddrmm_alloc(thh_hob_p->ddrmm, udav_table_size, ceil_log2(udav_entry_size), + &udav_phys_addr); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not allocate protected udavm area in DDR(err = %d)\n", ret); + goto udavm_ddrmm_alloc_err; + } + udav_virt_addr = (MT_virt_addr_t) MOSAL_map_phys_addr( udav_phys_addr , udav_table_size, + MOSAL_MEM_FLAGS_NO_CACHE | MOSAL_MEM_FLAGS_PERM_WRITE | MOSAL_MEM_FLAGS_PERM_READ , + MOSAL_get_kernel_prot_ctx()); + if (udav_virt_addr == (MT_virt_addr_t) NULL) { + MTL_ERROR1("THH_hob_open_hca: could not map physical address " PHYS_ADDR_FMT " to virtual\n", + udav_phys_addr); + goto udavm_mosal_map_err; + } + + memset(&udav_internal_mr, 0, sizeof(udav_internal_mr)); + udav_internal_mr.force_memkey = TRUE; + udav_internal_mr.memkey = THH_UDAVM_PRIV_RESERVED_LKEY; + udav_internal_mr.pd = THH_RESERVED_PD; + udav_internal_mr.size = udav_table_size; + udav_internal_mr.start = udav_virt_addr; + udav_internal_mr.vm_ctx = MOSAL_get_kernel_prot_ctx(); + if (udav_phys_addr) { + VAPI_phy_addr_t udav_phy = udav_phys_addr; + udav_internal_mr.num_bufs = 1; /* != 0 iff physical buffesrs supplied */ + udav_internal_mr.phys_buf_lst = &udav_phy; /* size = num_bufs */ + udav_vapi_size = (VAPI_size_t) udav_table_size; + udav_internal_mr.buf_sz_lst = &udav_vapi_size; /* [num_bufs], corresponds to phys_buf_lst */ + } + + thh_hob_p->udavm_table_size = udav_table_size; + thh_hob_p->udavm_table = udav_virt_addr; + thh_hob_p->udavm_table_ddr = udav_phys_addr; + + ret = THH_mrwm_register_internal(thh_hob_p->mrwm, &udav_internal_mr, &dummy_key); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not register created udavm table (%d)\n", ret); + goto udavm_table_register_err; + } + thh_hob_p->udavm_lkey = dummy_key; + + ret = THH_udavm_create(&(thh_hob_p->version_info), + dummy_key, + udav_virt_addr, + udav_table_size, + TRUE, + &(thh_hob_p->udavm), + &(thh_hob_p->av_ddr_base), + &(thh_hob_p->av_host_base)); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not create udavm (%d)\n", ret); + goto udavm_create_err; + } + + } else { + thh_hob_p->udavm_use_priv = FALSE; + } + + /* CREATE KAR (kernel UAR), using UAR 1 for this purpose. */ + kar_addr = (MT_virt_addr_t) MOSAL_map_phys_addr( thh_hob_p->hw_props.uar_base + ((MT_phys_addr_t)1 << thh_hob_p->profile.log2_uar_pg_size), + ((MT_size_t)1 << thh_hob_p->profile.log2_uar_pg_size), + MOSAL_MEM_FLAGS_NO_CACHE | MOSAL_MEM_FLAGS_PERM_WRITE, + MOSAL_get_kernel_prot_ctx()); + if (kar_addr == (MT_virt_addr_t) NULL) { +#ifndef __DARWIN__ + MTL_ERROR1("THH_hob_open_hca: MOSAL_map_phys_addr failed for prot ctx %d, addr " PHYS_ADDR_FMT ", size %d\n", + MOSAL_get_kernel_prot_ctx(), + (MT_phys_addr_t) (thh_hob_p->hw_props.uar_base), + (1 << thh_hob_p->profile.log2_uar_pg_size)); +#else + MTL_ERROR1("THH_hob_open_hca: MOSAL_map_phys_addr failed: addr " PHYS_ADDR_FMT ", size %d\n", + (MT_phys_addr_t) (thh_hob_p->hw_props.uar_base), + (1 << thh_hob_p->profile.log2_uar_pg_size)); +#endif + goto kar_map_phys_addr_err; + } + thh_hob_p->kar_addr = kar_addr; + MTL_DEBUG4("%s: MOSAL_map_phys_addr FOR KAR = " VIRT_ADDR_FMT "\n", __func__, kar_addr); + ret = THH_uar_create(&(thh_hob_p->version_info), 1/* Kernel UAR page index */, + (void *) kar_addr, &(thh_hob_p->kar)); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not create KAR (%d)\n", ret); + thh_hob_p->kar_addr = (MT_virt_addr_t) 0; + goto kar_create_err; + } + + //sharon: wrote fixed numbers till fw bug fixed + if (THH_DEV_LIM_MCG_ENABLED(thh_hob_p)) { + ret = THH_mcgm_create(thh_hob_p, + ((VAPI_size_t)1 << thh_hob_p->hca_props.multicast_parameters.log_mc_table_sz), + /*thh_hob_p->hca_props.multicast_parameters.mc_table_hash_sz*/ + (VAPI_size_t)1 << (thh_hob_p->profile.log2_mcg_hash_size), + (u_int16_t)thh_hob_p->profile.qps_per_mcg, + &(thh_hob_p->mcgm) ); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not create mcgm (%d)\n", ret); + thh_hob_p->hca_capabilities.max_mcast_grp_num = 0; + thh_hob_p->hca_capabilities.max_mcast_qp_attach_num = 0; + thh_hob_p->hca_capabilities.max_total_mcast_qp_attach_num = 0; + thh_hob_p->mcgm = (THH_mcgm_t)THH_INVALID_HNDL; + } + } + + /* CREATE EVENTP*/ + event_res.cr_base = thh_hob_p->hw_props.cr_base; + event_res.intr_clr_bit = thh_hob_p->adapter_props.intapin; + event_res.irq = thh_hob_p->hw_props.interrupt_props.irq; + event_res.is_srq_enable = thh_hob_p->dev_lims.srq; + + ret = THH_eventp_create (thh_hob_p, &event_res, thh_hob_p->kar, &(thh_hob_p->eventp)); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_open_hca: could not create eventp (%d)\n", ret); + goto eventp_create_err; + } + + /* CREATE THE VARIOUS EVENT QUEUES (eventp object operations) */ + /* register dummy completion and async event handlers */ + /* set max outstanding EQEs to max number of CQs configured */ + + ret = THH_eventp_setup_ib_eq(thh_hob_p->eventp, + &THH_dummy_async_event, + NULL, + (MT_size_t)(thh_hob_p->module_flags.async_eq_size == 0 ? + THH_MAX_ASYNC_EQ_SIZE : + thh_hob_p->module_flags.async_eq_size ), + &(thh_hob_p->ib_eq)); + if (ret != HH_OK) { + MTL_ERROR1(MT_FLFMT("THH_hob_open_hca: ERROR : cannot set up async event queue for size "SIZE_T_DFMT" (ret=%d)"), + (MT_size_t)(thh_hob_p->module_flags.async_eq_size == 0 ? + THH_MAX_ASYNC_EQ_SIZE :thh_hob_p->module_flags.async_eq_size ),ret); + goto eventp_async_err; + } + + ret = THH_eventp_setup_comp_eq(thh_hob_p->eventp, + &THH_dummy_comp_event , + NULL, + (MT_size_t)(1 <<(thh_hob_p->profile.log2_max_cqs)) - + (MT_size_t)(1ul << (thh_hob_p->dev_lims.log2_rsvd_cqs)), + &(thh_hob_p->compl_eq)); + if (ret != HH_OK) { + MTL_ERROR1( "THH_hob_open_hca: ERROR : cannot set up completion event queue (%d)\n", ret); + goto eventp_compl_err; + } + + + /* PERFORM INIT_IB only for legacy SQPs. Use max values obtained from QUERY_DEV_LIMS */ + + last_port_initialized = 0; + + if (thh_hob_p->module_flags.legacy_sqp == TRUE) { + for (i = 1; i <= num_ports; i++) { + MTL_TRACE2("THH_hob_open_hca: INIT_IB COMMAND\n"); + cmd_ret = THH_cmd_INIT_IB(thh_hob_p->cmd, (IB_port_t) i, &(thh_hob_p->init_ib_props[i-1])); + if (cmd_ret != THH_CMD_STAT_OK) { + MTL_ERROR1("THH_hob_open_hca: CMD_error in THH_cmd_INIT_IB (%d) for port %d\n", cmd_ret, i); + if (cmd_ret ==THH_CMD_STAT_EFATAL) { + ret = HH_EFATAL; + } else { + ret = HH_EAGAIN; + } + goto init_ib_err; + } + + else { + MTL_TRACE2("THH_hob_open_hca: INIT_IB COMMAND completed successfuly\n"); + } + last_port_initialized++; + } + } + + + /* This must be called after INIT_IB, since it uses the max_pkey value stored in that struct */ + MTL_TRACE2("THH_hob_open_hca: Before THH_hob_query_struct_init\n"); + ret = THH_hob_query_struct_init(thh_hob_p, + ((prop_props_p == NULL)? FALSE : TRUE), + &(thh_hob_p->hca_capabilities)); + if (ret != HH_OK) { + MTL_ERROR1( "THH_hob_query_struct_init: ERROR : cannot initialize data for query_hca (%d)\n", ret); + goto query_struct_init_err; + } + + /* TK - start events after all CMDs are done */ + +#if (! defined __DARWIN__) || (defined DARWIN_WITH_INTERRUPTS_CMDIF) + ret = THH_eventp_setup_cmd_eq(thh_hob_p->eventp, CMD_EQ_SIZE /* to overcome a FW bug */ + /*(1 << (thh_hob_p->fw_props.log_max_outstanding_cmd))*/ ); + if (ret != HH_OK) { + MTL_ERROR1( "THH_hob_open_hca: ERROR : cannot set up command event queue (%d)\n", ret); + goto eventp_cmd_err; + } +#endif + + /* move the HCA to running state if had no fatal. If had fatal, return HH_EFATAL */ + MOSAL_spinlock_dpc_lock(&thh_hob_p->fatal_spl); + if ((thh_hob_p->thh_state & THH_STATE_HAVE_ANY_FATAL) != 0) { + /* already in FATAL state */ + MTL_DEBUG4(MT_FLFMT("%s: already in FATAL state"), __func__); + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + ret = HH_EFATAL; + goto fatal_err_at_end; + } + thh_hob_p->thh_state = THH_STATE_RUNNING; + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + + MTL_TRACE2("THH_hob_open_hca: device name %s\n", hca_hndl->dev_desc); + hca_hndl->status = HH_HCA_STATUS_OPENED; + + /* free the mutex */ + MOSAL_mutex_rel(&(thh_hob_p->mtx)); + + return(HH_OK); + +fatal_err_at_end: +eventp_cmd_err: +query_struct_init_err: +init_ib_err: + /* see if need to close IB for some ports. Do not close ports on fatal error*/ + MOSAL_spinlock_dpc_lock(&thh_hob_p->fatal_spl); + /*test fatal again, here -- may have gotten FATAL before end of OPEN_hca process */ + if ((thh_hob_p->thh_state & THH_STATE_HAVE_ANY_FATAL) != 0) { + /* got FATAL during OPEN_HCA */ + MTL_DEBUG4(MT_FLFMT("THH_hob_open_hca: In FATAL state")); + have_fatal = TRUE; + } else { + have_fatal = FALSE; + } + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + if ((last_port_initialized) && (have_fatal == FALSE)) { + for (i = 1; i <= last_port_initialized; i++) { + MTL_DEBUG4(MT_FLFMT("THH_hob_open_hca: closing IB port %d"), i); + THH_cmd_CLOSE_IB(thh_hob_p->cmd, (IB_port_t) i); + } + } + thh_hob_p->compl_eq = THH_INVALID_EQN; + +eventp_compl_err: + thh_hob_p->ib_eq = THH_INVALID_EQN; + +eventp_async_err: + THH_eventp_destroy(thh_hob_p->eventp); + +eventp_create_err: + if (thh_hob_p->mcgm != (THH_mcgm_t)THH_INVALID_HNDL) { + THH_mcgm_destroy(thh_hob_p->mcgm ); + thh_hob_p->mcgm = (THH_mcgm_t)THH_INVALID_HNDL; + } + + THH_uar_destroy(thh_hob_p->kar); + thh_hob_p->kar = (THH_uar_t)THH_INVALID_HNDL; + +kar_create_err: + MOSAL_unmap_phys_addr(MOSAL_get_kernel_prot_ctx(), (MT_virt_addr_t) kar_addr, + ((MT_size_t)1 << thh_hob_p->profile.log2_uar_pg_size)); + thh_hob_p->kar_addr = (MT_virt_addr_t) 0; + +kar_map_phys_addr_err: + if (thh_hob_p->profile.use_priv_udav) { + THH_udavm_destroy(thh_hob_p->udavm); + } + thh_hob_p->udavm = (THH_udavm_t)THH_INVALID_HNDL; +udavm_create_err: + if (thh_hob_p->profile.use_priv_udav) { + THH_mrwm_deregister_mr(thh_hob_p->mrwm, dummy_key); + thh_hob_p->udavm_lkey = 0; + } +udavm_table_register_err: + if (thh_hob_p->profile.use_priv_udav) { + MOSAL_unmap_phys_addr( MOSAL_get_kernel_prot_ctx(), (MT_virt_addr_t) udav_virt_addr , udav_table_size ); + thh_hob_p->udavm_table = (MT_virt_addr_t) NULL; + } + +udavm_mosal_map_err: + if (thh_hob_p->profile.use_priv_udav) { + THH_ddrmm_free(thh_hob_p->ddrmm, udav_phys_addr, udav_table_size); + thh_hob_p->udavm_table_ddr = (MT_phys_addr_t) 0; + thh_hob_p->udavm_table_size = 0; + } + +udavm_ddrmm_alloc_err: + THH_srqm_destroy(thh_hob_p->srqm); + thh_hob_p->srqm = (THH_srqm_t)THH_INVALID_HNDL; + +srqm_create_err: + THH_qpm_destroy( thh_hob_p->qpm, TRUE); + thh_hob_p->qpm = (THH_qpm_t)THH_INVALID_HNDL; + +qpm_create_err: + FREE(thh_hob_p->init_ib_props); + thh_hob_p->init_ib_props = ( THH_port_init_props_t *) NULL; + +init_ib_props_malloc_err: + THH_cqm_destroy( thh_hob_p->cqm, TRUE); + thh_hob_p->cqm = (THH_cqm_t)THH_INVALID_HNDL; + +cqm_create_err: + THH_mrwm_destroy(thh_hob_p->mrwm, TRUE); + thh_hob_p->mrwm = (THH_mrwm_t)THH_INVALID_HNDL; + +mrwm_create_err: + THH_uldm_destroy(thh_hob_p->uldm ); + thh_hob_p->uldm = (THH_uldm_t)THH_INVALID_HNDL; +uldm_create_err: +query_hca_err: +#ifdef SIMULATE_HALT_HCA + THH_cmd_CLOSE_HCA(thh_hob_p->cmd); +#else + MOSAL_spinlock_dpc_lock(&thh_hob_p->fatal_spl); + /*test fatal again, here -- may have gotten FATAL before end of OPEN_hca process */ + if ((thh_hob_p->thh_state & THH_STATE_HAVE_ANY_FATAL) != 0) { + /* got FATAL during OPEN_HCA */ + MTL_DEBUG4(MT_FLFMT("THH_hob_open_hca: In FATAL state")); + have_fatal = TRUE; + } else { + have_fatal = FALSE; + } + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + if (have_fatal) { + if (thh_hob_p->module_flags.fatal_delay_halt == 0) { + MTL_DEBUG1(MT_FLFMT("%s: halting the HCA"), __func__); + THH_cmd_CLOSE_HCA(thh_hob_p->cmd, TRUE); + } + } else { + MTL_DEBUG1(MT_FLFMT("%s: closing the HCA on non-fatal error %d"), __func__, ret); + THH_cmd_CLOSE_HCA(thh_hob_p->cmd, FALSE); + } +#endif + +init_hca_err: + THH_cmd_revoke_ddrmm(thh_hob_p->cmd); + +cmd_assign_ddrmm_err: + + ddr_alloc_area = (MT_phys_addr_t *) &(thh_hob_p->ddr_alloc_base_addrs_vec); + ddr_alloc_size = (MT_size_t *)&(thh_hob_p->ddr_alloc_size_vec); + for (i = 0; i < thh_hob_p->profile.ddr_alloc_vec_size; i++, ddr_alloc_area++, ddr_alloc_size++) { + if (*ddr_alloc_area != THH_DDRMM_INVALID_PHYS_ADDR) + /* Do not free in case skipped during allocation (e.g., SRQC) */ + THH_ddrmm_free(thh_hob_p->ddrmm,*ddr_alloc_area, ((MT_size_t)1 << (*ddr_alloc_size))); + } + +post_mutex_acquire_err: + MOSAL_mutex_rel(&(thh_hob_p->mtx)); + +post_state_change_error: + MOSAL_spinlock_dpc_lock(&thh_hob_p->fatal_spl); + /*test fatal again, here -- may have gotten FATAL before end of OPEN_hca process */ + if ((thh_hob_p->thh_state & THH_STATE_HAVE_ANY_FATAL) != 0) { + /* got FATAL during OPEN_HCA */ + MTL_DEBUG4(MT_FLFMT("THH_hob_open_hca: In FATAL state")); + have_fatal = TRUE; + } else { + /* restore the state to closed */ + have_fatal = FALSE; + thh_hob_p->thh_state = THH_STATE_CLOSED; + } + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + if (have_fatal) { + THH_hob_restart(hca_hndl); + } + return ret; +} + +/****************************************************************************** + * Function: THH_hob_query + * + * Description: Implements VAPI_query_hca verb. Data is already stored in HOB object. + * + * input: + * hca_hndl + * output: + * hca_cap_p -- pointer to output structure + * returns: + * HH_OK + * HH_EINVAL_HCA_HNDL + * + *****************************************************************************/ + HH_ret_t THH_hob_query(HH_hca_hndl_t hca_hndl, + VAPI_hca_cap_t *hca_cap_p) +{ + THH_hob_t hob; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_query : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + + hob = THHOBP(hca_hndl); + + if (hob == NULL) { + MTL_ERROR1("THH_hob_query : ERROR : No device registered\n"); + return HH_EINVAL; + } + + TEST_RETURN_FATAL(hob); + + /* check if ib_init_props have been created -- indicates that open_hca called for this device */ + if (hob->init_ib_props == (THH_port_init_props_t *)NULL) { + MTL_ERROR1("THH_hob_query: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + memcpy(hca_cap_p, &(hob->hca_capabilities), sizeof(VAPI_hca_cap_t)); + return HH_OK; +} + +#define SET_MAX_SG(a) (((a) < (u_int32_t)hob->dev_lims.max_sg) ? (a) : hob->dev_lims.max_sg) +/****************************************************************************** + * Function: THH_hob_query_struct_init + * + * Description: Pre-computes the data for VAPI_query_hca. Called during THH_hob_open_hca() + * + * input: + * hob + * output: + * hca_cap_p -- pointer to output structure + * returns: + * HH_OK + * HH_EINVAL_HCA_HNDL + * + * Comments: Needs to use a MAD query for some of the parameters + * + *****************************************************************************/ +static HH_ret_t THH_hob_query_struct_init(THH_hob_t hob, + MT_bool have_usr_profile, + VAPI_hca_cap_t *hca_cap_p) +{ + HH_ret_t ret; + u_int32_t flags = 0; + u_int32_t log2_num_spare_segs = 0; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + MTL_DEBUG4("Entering THH_hob_query_struct_init\n"); + /* Maximum Number of QPs supported. */ +#if 0 + hca_cap_p->max_num_qp = hob->profile.max_num_qps - THH_NUM_RSVD_QP - + ((have_usr_profile == FALSE)? 0 : (1U<dev_lims.log2_rsvd_qps) ); +#else + hca_cap_p->max_num_qp = (u_int32_t)hob->profile.max_num_qps; +#endif + hca_cap_p->max_num_srq = (u_int32_t)hob->profile.max_num_srqs; + hca_cap_p->srq_resize_supported= TRUE; + /* Maximum Number of oustanding WR on any WQ. */ + hca_cap_p->max_qp_ous_wr = (1 << (hob->dev_lims.log_max_qp_sz)) - 1; + hca_cap_p->max_wqe_per_srq = (1 << (hob->dev_lims.log_max_srq_sz)) - 1; + + /* Various flags (VAPI_hca_cap_flags_t) */ + +// VAPI_RESIZE_OUS_WQE_CAP = 1 /* Not currently supported */ + + flags = (hob->hca_props.udp ? VAPI_UD_AV_PORT_ENFORCE_CAP : 0) | + (hob->dev_lims.apm ? VAPI_AUTO_PATH_MIG_CAP : 0 ) | + (hob->dev_lims.rm ? VAPI_RAW_MULTI_CAP : 0) | + (hob->dev_lims.pkv ? VAPI_BAD_PKEY_COUNT_CAP : 0) | + (hob->dev_lims.qkv ? VAPI_BAD_QKEY_COUNT_CAP : 0) | + VAPI_CHANGE_PHY_PORT_CAP | VAPI_RC_RNR_NAK_GEN_CAP | VAPI_PORT_ACTIVE_EV_CAP; + + + hca_cap_p->flags = flags; + /* Max num of scatter/gather entries for desc other than RD */ + hca_cap_p->max_num_sg_ent = SET_MAX_SG(28); + /* Max num of scatter entries for SRQs */ + hca_cap_p->max_srq_sentries = SET_MAX_SG(31); + /* Max num of scatter/gather entries for RD desc - not supported */ + hca_cap_p->max_num_sg_ent_rd = 0 ; + /* Max num of supported CQs */ +#if 0 + hca_cap_p->max_num_cq = hob->profile.max_num_cqs - + ((have_usr_profile == FALSE)? 0 : (1U<dev_lims.log2_rsvd_cqs) ); +#else + hca_cap_p->max_num_cq = (u_int32_t)hob->profile.max_num_cqs; +#endif + /* Max num of supported entries per CQ */ + hca_cap_p->max_num_ent_cq = (1 << (hob->dev_lims.log_max_cq_sz)) - 1 /*for extra cqe needed */; + /* Maximum number of memory region supported. */ +#if 0 + hca_cap_p->max_num_mr = hob->profile.num_external_mem_regions - + ((have_usr_profile == FALSE)? 0 : (1U<dev_lims.log2_rsvd_mtts) ); +#else + hca_cap_p->max_num_mr = (u_int32_t)hob->profile.num_external_mem_regions; +#endif + /* Largest contigous block of memory region in bytes. This may be achieved by registering + * PHYSICAL memory directly for the region, (and using a page size for the region equal to + * the size of the physical memory block you are registering). The PRM allocates 5 bytes + * for registering the log2 of the page size (with a 4K page size having page-size val 0). + * Thus, the maximum page size per MTT entry is 4k * 2^31 (= 2^(31+12). A single region + * can include multiple entries, and we can use all the spare MTT entries available for + * this HUGE region. The driver requires that every memory region have at least a single + * segment available for registration. We can thus use all the spare segments (we have + * allocated 2 segments per region, but only really need one) to this region. If there are + * no spare MTT entries, we calculate the value based on the usual mtt entries per segment. + * This will still be a HUGE number (probably 2^46 or greater). + */ + if (hob->profile.log2_mtt_segs_per_region == 0) + log2_num_spare_segs = (u_int32_t)hob->profile.log2_mtt_entries_per_seg; + else + log2_num_spare_segs = (u_int32_t)(hob->profile.log2_max_mtt_entries - hob->profile.log2_mtt_segs_per_region); + + /* check that do not overflow 2^64 !! */ + if (log2_num_spare_segs >= 21) + hca_cap_p->max_mr_size = MAKE_ULONGLONG(0xFFFFFFFFFFFFFFFF); + else + hca_cap_p->max_mr_size = (((u_int64_t)1L) << (31 + 12 + log2_num_spare_segs)) ; + /* Maximum number of protection domains supported. */ +#if 0 + hca_cap_p->max_pd_num = hob->profile.max_num_pds - THH_NUM_RSVD_PD - + ((have_usr_profile == FALSE)? 0 : hob->dev_lims.num_rsvd_pds ); +#else + hca_cap_p->max_pd_num = (u_int32_t)hob->profile.max_num_pds; +#endif + /* Largest page size supported by this HCA */ + hca_cap_p->page_size_cap = (1 << (hob->dev_lims.log_pg_sz)); + /* Number of physical ports of the HCA. */ + hca_cap_p->phys_port_num = hob->dev_lims.num_ports; + /* Maximum number of partitions supported . */ + hca_cap_p->max_pkeys = hob->init_ib_props[0].max_pkey; + /* Maximum number of oust. RDMA read/atomic as target */ + hca_cap_p->max_qp_ous_rd_atom = 1 << (hob->profile.log2_inflight_rdma_per_qp); + /* EE Maximum number of outs. RDMA read/atomic as target -- NOT YET SUPPORTED */ + hca_cap_p->max_ee_ous_rd_atom = 0; + /* Max. Num. of resources used for RDMA read/atomic as target */ + hca_cap_p->max_res_rd_atom = ((1 << (hob->ddr_alloc_size_vec.log2_rdb_size - THH_DDR_LOG2_RDB_ENTRY_SIZE)) > 255 ? + 255 :(1 << (hob->ddr_alloc_size_vec.log2_rdb_size - THH_DDR_LOG2_RDB_ENTRY_SIZE))) ; + /* Max. Num. of outs. RDMA read/atomic as initiator. Note that 255 is the max in the struct */ + hca_cap_p->max_qp_init_rd_atom = ((1 << (hob->dev_lims.log_max_ra_req_qp)) > 255 ? + 255 : (1 << (hob->dev_lims.log_max_ra_req_qp))); + /* EE Max. Num. of outs. RDMA read/atomic as initiator -- NOT YET SUPPORTED */ + hca_cap_p->max_ee_init_rd_atom = 0; + /* Level of Atomicity supported: if supported, is only within this HCA*/ + hca_cap_p->atomic_cap = (hob->dev_lims.atm ? VAPI_ATOMIC_CAP_HCA : VAPI_ATOMIC_CAP_NONE); + /* Maximum number of EEC supported. -- NOT YET SUPPORTED */ +#if 0 + hca_cap_p->max_ee_num = 1 << (hob->hca_props.qpc_eec_cqc_rdb_parameters.log_num_of_ee); +#else + hca_cap_p->max_ee_num = 0; +#endif + /* Maximum number of IB_RDD supported -- NOT YET SUPPORTED */ + hca_cap_p-> max_rdd_num = 0; + /* Maximum Number of memory windows supported */ +#if 0 + hca_cap_p->max_mw_num = hob->profile.num_mem_windows - + ((have_usr_profile == FALSE)? 0 : (1U<dev_lims.log2_rsvd_mrws) ); +#else + hca_cap_p->max_mw_num = (u_int32_t)hob->profile.num_mem_windows; +#endif + /* Maximum number of Raw IPV6 QPs supported -- NOT YET SUPPORTED */ + hca_cap_p->max_raw_ipv6_qp = 0; + /* Maximum number of Raw Ethertypes QPs supported -- NOT YET SUPPORTED */ + hca_cap_p->max_raw_ethy_qp = 0; + /* Maximum Number of multicast groups */ + hca_cap_p->max_mcast_grp_num = 1 << (hob->hca_props.multicast_parameters.log_mc_table_sz); + /* Maximum number of QP per multicast group */ + hca_cap_p->max_mcast_qp_attach_num = ( (1U<<(hob->hca_props.multicast_parameters.log_mc_table_entry_sz)) + - THH_DDR_MCG_ENTRY_HEADER_SIZE) / + THH_DDR_MCG_BYTES_PER_QP; + /* Maximum number of QPs which can be attached to a mcast grp */ + hca_cap_p->max_total_mcast_qp_attach_num = hca_cap_p->max_mcast_grp_num * hca_cap_p->max_mcast_qp_attach_num; + /* Maximum number of address handles */ + hca_cap_p->max_ah_num = (u_int32_t)(hob->profile.use_priv_udav ? hob->profile.max_priv_udavs : + THHUL_PDM_MAX_UL_UDAV_PER_PD*(hob->profile.max_num_pds+THH_NUM_RSVD_PD)); + /* max number of fmrs for the use is the number of user entries in MPT */ +#if 0 + hca_cap_p->max_num_fmr = hob->profile.num_external_mem_regions - + ((have_usr_profile == FALSE)? 0 : (1U<dev_lims.log2_rsvd_mtts) ); +#else + hca_cap_p->max_num_fmr = (u_int32_t)((hob->ddr_props.dh == FALSE) ? hob->profile.num_external_mem_regions : 0); +#endif +/* max maps per fmr is the max number that can be expressed by the MS bits of a u_int32_t + that are unused for MPT addressing (which will occupy the LS bits of that u_int32_t).*/ + hca_cap_p->max_num_map_per_fmr = (1 << (32 - hob->profile.log2_max_mpt_entries)) - 1; + + /* Log2 4.096usec Max. RX to ACK or NAK delay */ + hca_cap_p->local_ca_ack_delay = hob->dev_lims.local_ca_ack_delay; + + /* Node GUID for this hca */ + ret = THH_hob_get_node_guid(hob,&(hca_cap_p->node_guid)); + + return(ret); +} + +/****************************************************************************** + * Function: THH_hob_modify + * + * Description: Implements the VAPI_modify_hca verb + * + * input: + * hca_hndl + * port_num - 1 or 2 + * hca_attr_p - contains values to modify + * hca_attr_mask_p - mask specifying which values in hca_attr_p should + * be used for modification. + * output: + * none + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_EINVAL_PORT + * HH_ENOSYS + * + * Comments: Implements IB Spec 1.0a now. must be modified to support IB Spec 1.1 + * JPM + * + *****************************************************************************/ +HH_ret_t THH_hob_modify( + HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + VAPI_hca_attr_t *hca_attr_p, + VAPI_hca_attr_mask_t *hca_attr_mask_p) +{ + /* TBD, will use SET_IB command. Problem is that can only set PKey and QKey counters to zero. */ + HH_ret_t retn; + THH_cmd_status_t cmd_ret; + VAPI_hca_port_t port_props; + THH_set_ib_props_t set_ib_props; + IB_port_cap_mask_t capabilities; + THH_hob_t hob; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_modify: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_modify : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + + hob = THHOBP(hca_hndl); + + if (hob == NULL) { + MTL_ERROR1("THH_hob_modify : ERROR : No device registered\n"); + return HH_EINVAL; + } + TEST_RETURN_FATAL(hob); + + if (port_num > hob->dev_lims.num_ports || port_num < 1) { + MTL_ERROR2( "THH_hob_modify: ERROR : invalid port number(%d)\n", port_num); + return HH_EINVAL_PORT; + } + + memset(&set_ib_props, 0, sizeof(THH_set_ib_props_t)); + + set_ib_props.rqk = hca_attr_p->reset_qkey_counter; + + /* start with current capabilities */ + retn = THH_hob_query_port_prop(hca_hndl, port_num, &port_props); + if (retn != HH_OK) { + MTL_ERROR1("THH_hob_modify : ERROR : cannot get current capabilities (%d)\n", retn); + return HH_EAGAIN; + } + capabilities = port_props.capability_mask; + + /* now, modify the capability mask according to the input */ + if (HCA_ATTR_IS_FLAGS_SET(*hca_attr_mask_p)) { + /* calculate capabilities modification mask */ + if(HCA_ATTR_IS_SET(*hca_attr_mask_p, HCA_ATTR_IS_SM) ) { + if (hca_attr_p->is_sm) { + IB_CAP_MASK_SET(capabilities, IB_CAP_MASK_IS_SM); + } else { + IB_CAP_MASK_CLR(capabilities, IB_CAP_MASK_IS_SM); + } + } + if(HCA_ATTR_IS_SET(*hca_attr_mask_p, HCA_ATTR_IS_SNMP_TUN_SUP) ) { + if (hca_attr_p->is_snmp_tun_sup) { + IB_CAP_MASK_SET(capabilities, IB_CAP_MASK_IS_SNMP_TUNN_SUP); + } else { + IB_CAP_MASK_CLR(capabilities, IB_CAP_MASK_IS_SNMP_TUNN_SUP); + } + } + if(HCA_ATTR_IS_SET(*hca_attr_mask_p, HCA_ATTR_IS_DEV_MGT_SUP) ) { + if (hca_attr_p->is_dev_mgt_sup) { + IB_CAP_MASK_SET(capabilities, IB_CAP_MASK_IS_DEVICE_MGMT_SUP); + } else { + IB_CAP_MASK_CLR(capabilities, IB_CAP_MASK_IS_DEVICE_MGMT_SUP); + } + } + if(HCA_ATTR_IS_SET(*hca_attr_mask_p, HCA_ATTR_IS_VENDOR_CLS_SUP) ) { + if (hca_attr_p->is_vendor_cls_sup) { + IB_CAP_MASK_SET(capabilities, IB_CAP_MASK_IS_VENDOR_CLS_SUP); + } else { + IB_CAP_MASK_CLR(capabilities, IB_CAP_MASK_IS_VENDOR_CLS_SUP); + } + } + if(HCA_ATTR_IS_SET(*hca_attr_mask_p, HCA_ATTR_IS_CLIENT_REREGISTRATION_SUP) ) { + if (hca_attr_p->is_client_reregister_sup) { + IB_CAP_MASK_SET(capabilities, IB_CAP_MASK_IS_CLIENT_REREGISTRATION_SUP); + } else { + IB_CAP_MASK_CLR(capabilities, IB_CAP_MASK_IS_CLIENT_REREGISTRATION_SUP); + } + } + } + + set_ib_props.capability_mask = capabilities; + + /* now, perform the CMD */ + cmd_ret = THH_cmd_SET_IB(hob->cmd , port_num, &set_ib_props); + if (cmd_ret != THH_CMD_STAT_OK) { + TEST_CMD_FATAL(cmd_ret); + MTL_ERROR1("THH_hob_modify: CMD_error in THH_cmd_SET_IB (%d)\n", cmd_ret); + return HH_EINVAL; + } + + return(HH_OK); +} +/****************************************************************************** + * Function: THH_hob_query_port_prop + * + * Description: Implements the VAPI_query_hca_port_prop verb + * + * input: + * hca_hndl + * port_num - 1 or 2 + * output: + * hca_port_p - port properties output structure + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_EINVAL_PORT + * HH_ERR + * + * Comments: Does MAD query to get the data in real time. Data is not pre-fetched, because + * the current port state is needed for the answer -- so the query must be performed + * anyway. + * + *****************************************************************************/ +HH_ret_t THH_hob_query_port_prop(HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + VAPI_hca_port_t *hca_port_p ) +{ + SM_MAD_PortInfo_t port_info; + u_int8_t *mad_frame_in; + u_int8_t *mad_frame_out; + THH_cmd_status_t cmd_ret; + + THH_hob_t hob; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_query_port_prop: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_query_port_prop : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + hob = THHOBP(hca_hndl); + if (hob == NULL) { + MTL_ERROR1("THH_hob_query_port_prop : ERROR : No device registered\n"); + return HH_EINVAL; + } + TEST_RETURN_FATAL(hob); + + if (port_num > hob->dev_lims.num_ports || port_num < 1) { + MTL_ERROR2( "THH_hob_query_port_prop: ERROR : invalid port number(%d)\n", port_num); + return HH_EINVAL_PORT; + } + + mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_in ) { + return HH_EAGAIN; + } + mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_out ) { + FREE(mad_frame_in); + return HH_EAGAIN; + } + memset(mad_frame_in, 0, sizeof(mad_frame_in)); + memset(mad_frame_out, 0, sizeof(mad_frame_out)); + + /* get port props using MAD commands in THH_cmd object */ + /* First, build the MAD header */ + MADHeaderBuild(IB_CLASS_SMP, + 0, + IB_METHOD_GET, + IB_SMP_ATTRIB_PORTINFO, + (u_int32_t) port_num, + &(mad_frame_in[0])); + + /* issue the query */ + cmd_ret = THH_cmd_MAD_IFC(hob->cmd, 0, 0, port_num, &(mad_frame_in[0]), &(mad_frame_out[0])); + if (cmd_ret != THH_CMD_STAT_OK) { + TEST_CMD_FATAL(cmd_ret); + MTL_ERROR2( "THH_hob_query_port_prop: ERROR : Get Port Info command failed (%d) for port %d\n", cmd_ret, port_num); + FREE(mad_frame_out); + FREE(mad_frame_in); + if ( cmd_ret == THH_CMD_STAT_EINTR ) { + return HH_EINTR; + } + return HH_ERR; + } + /* now, translate the response to a structure */ + PortInfoMADToSt(&port_info, &(mad_frame_out[0])); + + /* finally, extract the information we want */ + hca_port_p->bad_pkey_counter = port_info.wPKViolations; + hca_port_p->capability_mask = port_info.dwCapMask; + hca_port_p->gid_tbl_len = port_info.bGUIDCap; + hca_port_p->lid = port_info.wLID; + hca_port_p->lmc = port_info.cLMC; + hca_port_p->max_msg_sz = IB_MAX_MESSAGE_SIZE; + hca_port_p->max_mtu = hob->dev_lims.max_mtu; + hca_port_p->max_vl_num = port_info.cVLCap; + hca_port_p->pkey_tbl_len = hob->init_ib_props[port_num-1].max_pkey; + hca_port_p->qkey_viol_counter = port_info.wQKViolations; + hca_port_p->sm_lid = port_info.wMasterSMLID; + hca_port_p->sm_sl = port_info.cMasterSMSL; + hca_port_p->state = port_info.cPortState; + hca_port_p->subnet_timeout = port_info.cSubnetTO; + + hca_port_p->initTypeReply = 0; /* not yet supported in FW */ + + FREE(mad_frame_out); + FREE(mad_frame_in); + return(HH_OK); +} + +/****************************************************************************** + * Function: THH_hob_get_pkey_tbl_local + * + * Description: Gets PKEY table for a given port + * + * input: + * hca_hndl + * port_num - 1 or 2 + * tbl_len_in - size of table provided for response (in pkeys) + * use_mad_query_for_pkeys - if TRUE, query Tavor for pkeys + * else, use pkey_table tracking in thh_qpm + * output: + * tbl_len_out - size of returned table (in pkeys) + * pkey_tbl_p - pointer to table containing data (space provided by caller) + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Does MAD query to get the data in real time. + * + *****************************************************************************/ +static HH_ret_t THH_hob_get_pkey_tbl_local(HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + u_int16_t tbl_len_in, + u_int16_t *tbl_len_out, + IB_pkey_t *pkey_tbl_p, + MT_bool use_mad_query_for_pkeys) +{ + SM_MAD_Pkey_table_t pkey_table; + u_int8_t *mad_frame_in; + u_int8_t *mad_frame_out; + int i,j; + int num_pkeys, pkey_index, num_pkeytable_commands; + THH_cmd_status_t cmd_ret; + THH_hob_t thh_hob_p; + THH_qpm_t qpm; + HH_ret_t hh_ret = HH_OK; + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_get_pkey_tbl: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_get_pkey_tbl : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + /* check if have valid port number */ + if (port_num > thh_hob_p->dev_lims.num_ports || port_num < 1) { + MTL_ERROR1("THH_hob_get_pkey_tbl: port number (%d) not valid)\n", port_num); + return HH_EINVAL_PORT; + } + + if (tbl_len_out == NULL) { + return HH_EINVAL; + } + + + /* check that pkey table has enough space */ + num_pkeys = thh_hob_p->init_ib_props[port_num-1].max_pkey; +/*** warning C4242: '=' : conversion from 'int' to 'u_int16_t', possible loss of data ***/ + *tbl_len_out = (u_int16_t)num_pkeys; + + if (tbl_len_in < num_pkeys) { + if (!tbl_len_in) { + MTL_TRACE2( "THH_hob_get_pkey_tbl: returning number of pkeys configured (%d)\n", num_pkeys); + } else { + MTL_ERROR2( "THH_hob_get_pkey_tbl: ERROR : not enough space in return value table. num keys = %d\n", + num_pkeys); + } + return HH_EAGAIN; + } + + /* check that have valid output buffer area */ + if (pkey_tbl_p == NULL) { + return HH_EINVAL; + } + + + mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_in ) { + return HH_EAGAIN; + } + mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_out ) { + FREE(mad_frame_in); + return HH_EAGAIN; + } + + /* get KEY table using MAD command in THH_cmd object */ + /* get PKey table using MAD commands in THH_cmd object */ + /* First, build the MAD header */ + if (use_mad_query_for_pkeys == TRUE) { + num_pkeytable_commands = ((num_pkeys - 1) / 32) + 1; + + pkey_index = 0; + for (i = 0; i < num_pkeytable_commands; i++) { + memset(mad_frame_in, 0, sizeof(mad_frame_in)); + memset(mad_frame_out, 0, sizeof(mad_frame_out)); + MADHeaderBuild(IB_CLASS_SMP, + 0, + IB_METHOD_GET, + IB_SMP_ATTRIB_PARTTABLE, + (u_int32_t) (32*i), + &(mad_frame_in[0])); + + cmd_ret = THH_cmd_MAD_IFC(thh_hob_p->cmd, 0, 0, port_num, &(mad_frame_in[0]), &(mad_frame_out[0])); + if (cmd_ret != THH_CMD_STAT_OK) { + TEST_CMD_FATAL(cmd_ret); + MTL_ERROR2( "THH_hob_get_pkey_tbl: ERROR : Get Partition Table command failed (%d) for port %d\n", cmd_ret, port_num); + FREE(mad_frame_out); + FREE(mad_frame_in); + return HH_ERR; + } + PKeyTableMADToSt(&pkey_table, &(mad_frame_out[0])); + + for (j = 0; j < 32; j++) { + pkey_tbl_p[pkey_index++] = pkey_table.pkey[j]; + if (pkey_index == num_pkeys) { + break; + } + } + } + } else { + hh_ret = THH_hob_get_qpm ( thh_hob_p, &qpm ); + if (hh_ret != HH_OK) { + MTL_ERROR2( "THH_hob_get_qpm: invalid QPM handle (ret= %d)\n", hh_ret); + FREE(mad_frame_out); + FREE(mad_frame_in); + return HH_EINVAL; + } +/*** warning C4242: '=' : conversion from 'int' to 'u_int16_t', possible loss of data ***/ + hh_ret = THH_qpm_get_all_pkeys(qpm,port_num,(u_int16_t)num_pkeys, pkey_tbl_p); + if (hh_ret != HH_OK) { + MTL_ERROR2( "THH_qpm_get_all_sgids failed (ret= %d)\n", hh_ret); + FREE(mad_frame_out); + FREE(mad_frame_in); + return HH_EINVAL; + } + } + + FREE(mad_frame_out); + FREE(mad_frame_in); + return(HH_OK); +} +/****************************************************************************** + * Function: THH_hob_get_pkey_tbl + * + * Description: Gets PKEY table for a given port + * + * input: + * hca_hndl + * port_num - 1 or 2 + * tbl_len_in - size of table provided for response (in pkeys) + * output: + * tbl_len_out - size of returned table (in pkeys) + * pkey_tbl_p - pointer to table containing data (space provided by caller) + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Does MAD query to get the data in real time. + * + *****************************************************************************/ +HH_ret_t THH_hob_get_pkey_tbl(HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + u_int16_t tbl_len_in, + u_int16_t *tbl_len_out, + IB_pkey_t *pkey_tbl_p) +{ + MT_bool is_legacy = FALSE; + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + FUNC_IN; + + MTL_DEBUG4("THH_hob_get_pkey_tbl: hca_hndl=0x%p, port= %d, return table len = %d\n", + hca_hndl, port_num, tbl_len_in); + + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_get_pkey_tbl : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_get_pkey_tbl : ERROR : No device registered\n"); + return HH_EINVAL; + } + TEST_RETURN_FATAL(thh_hob_p); + + THH_hob_get_legacy_mode(thh_hob_p, &is_legacy); + return(THH_hob_get_pkey_tbl_local(hca_hndl,port_num,tbl_len_in, + tbl_len_out,pkey_tbl_p,is_legacy)); + +} + +HH_ret_t THH_hob_init_pkey_tbl( HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_pkey_t *pkey_tbl_p) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + return(THH_hob_get_pkey_tbl_local(hca_hndl,port_num,tbl_len_in, + tbl_len_out,pkey_tbl_p,1)); + +} + + +/****************************************************************************** + * Function: THH_hob_get_gid_tbl_local + * + * Description: Gets GID table for a given port + * + * input: + * hca_hndl + * port_num - 1 or 2 + * tbl_len_in - size of table provided for response (in pkeys) + * use_mad_query_for_gid_prefix - if TRUE, query Tavor for gid prefix. + * else, use gid_table tracking in thh_qpm + * output: + * tbl_len_out - size of returned table (in pkeys) + * param_gid_p - pointer to table containing data (space provided by caller) + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Does MAD query to get the data in real time. + * + *****************************************************************************/ +#ifndef IVAPI_THH +static +#endif +HH_ret_t THH_hob_get_gid_tbl_local( HH_hca_hndl_t hca_hndl, + IB_port_t port, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_gid_t* param_gid_p, + MT_bool use_mad_query_for_gid_prefix) +{ + SM_MAD_PortInfo_t port_info; + SM_MAD_GUIDInfo_t guid_info; + u_int8_t *mad_frame_in; + u_int8_t *mad_frame_out; + THH_cmd_status_t cmd_ret; + int num_guids, guid_index; + int num_guidinfo_commands; + u_int8_t *gid_p = (u_int8_t *) param_gid_p; + int i,j; + HH_ret_t hh_ret = HH_OK; + THH_qpm_t qpm; + IB_gid_t gid; + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + FUNC_IN; + + MTL_DEBUG4("THH_hob_get_gid_tbl_local: hca_hndl=0x%p, port= %d, return table len = %d\n", + hca_hndl, port, tbl_len_in); + + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK && use_mad_query_for_gid_prefix) { + MTL_ERROR1("THH_hob_get_gid_tbl: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_get_gid_tbl : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + + /* check if have valid port number */ + if (port > thh_hob_p->dev_lims.num_ports || port < 1) { + MTL_ERROR1("THH_hob_get_gid_tbl: port number (%d) not valid)\n", port); + return HH_EINVAL_PORT; + } + + if (tbl_len_out == NULL) { + return HH_EINVAL; + } + + + /* check that gid table has enough space */ + num_guids = thh_hob_p->init_ib_props[port-1].max_gid; +/*** warning C4242: '=' : conversion from 'int' to 'u_int16_t', possible loss of data ***/ + *tbl_len_out = (u_int16_t)num_guids; + + if (tbl_len_in < num_guids) { + if (!tbl_len_in) { + MTL_TRACE2( "THH_hob_get_gid_tbl: returning gid table configured size (%d)\n", num_guids); + } else { + MTL_ERROR2( "THH_hob_get_gid_tbl: ERROR : not enough space in return value table. Need %d\n", + num_guids); + } + return HH_EAGAIN; + } + + /* check that have valid output buffer area */ + if (param_gid_p == NULL) { + return HH_EINVAL; + } + + + /* get GID table using MAD commands in THH_cmd object */ + if (use_mad_query_for_gid_prefix == TRUE) { + + mad_frame_in = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_in ) { + return HH_EAGAIN; + } + mad_frame_out = TNMALLOC(u_int8_t, IB_MAD_SIZE); + if ( !mad_frame_out ) { + FREE(mad_frame_in); + return HH_EAGAIN; + } + /* First, get the GID prefix from via MAD query */ + memset(mad_frame_in, 0, sizeof(mad_frame_in)); + memset(mad_frame_out, 0, sizeof(mad_frame_out)); + MADHeaderBuild(IB_CLASS_SMP, + 0, + IB_METHOD_GET, + IB_SMP_ATTRIB_PORTINFO, + (u_int32_t) port, + &(mad_frame_in[0])); + + /* issue the query */ + cmd_ret = THH_cmd_MAD_IFC(thh_hob_p->cmd, 0, 0, port, &(mad_frame_in[0]), &(mad_frame_out[0])); + if (cmd_ret != THH_CMD_STAT_OK) { + TEST_CMD_FATAL(cmd_ret); + MTL_ERROR2( "THH_hob_get_gid_tbl: ERROR : Get Port Info command failed (%d) for port %d\n", cmd_ret, port); + FREE(mad_frame_out); + FREE(mad_frame_in); + return HH_ERR; + } + PortInfoMADToSt(&port_info, &(mad_frame_out[0])); + PortInfoPrint(&port_info); + memcpy(&gid, &(port_info.qwGIDPrefix), sizeof(port_info.qwGIDPrefix)); + + /* Now, get the GUIDs, and build GIDS */ + num_guidinfo_commands = ((num_guids - 1) / 8) + 1; + + guid_index = 0; + for (i = 0; i < num_guidinfo_commands; i++) { + memset(mad_frame_in, 0, sizeof(mad_frame_in)); + memset(mad_frame_out, 0, sizeof(mad_frame_out)); + MADHeaderBuild(IB_CLASS_SMP, + 0, + IB_METHOD_GET, + IB_SMP_ATTRIB_GUIDINFO, + (u_int32_t) (i*8), + &(mad_frame_in[0])); + + cmd_ret = THH_cmd_MAD_IFC(thh_hob_p->cmd, 0, 0, port, &(mad_frame_in[0]), &(mad_frame_out[0])); + if (cmd_ret != THH_CMD_STAT_OK) { + TEST_CMD_FATAL(cmd_ret); + MTL_ERROR2( "THH_hob_get_gid_tbl: ERROR : Get GUID Info command failed (%d) for port %d\n", cmd_ret, port); + FREE(mad_frame_out); + FREE(mad_frame_in); + return HH_ERR; + } + GUIDInfoMADToSt(&guid_info, &(mad_frame_out[0])); + GUIDInfoPrint(&guid_info); + + for (j = 0; j < 8; j++) { + memcpy (gid_p, &(gid), sizeof(port_info.qwGIDPrefix)); + gid_p += sizeof(port_info.qwGIDPrefix); + memcpy (gid_p, &(guid_info.guid[j]), sizeof(IB_guid_t)); + gid_p += sizeof(u_int64_t); + guid_index++; + if (guid_index == num_guids) { + break; + } + } + } + FREE(mad_frame_out); + FREE(mad_frame_in); + } else { + memset(&port_info, 0, sizeof(port_info)); + hh_ret = THH_hob_get_qpm ( thh_hob_p, &qpm ); + if (hh_ret != HH_OK) { + MTL_ERROR2( "THH_hob_get_qpm: invalid QPM handle (ret= %d)\n", hh_ret); + return HH_EINVAL; + } +/*** warning C4242: 'function' : conversion from 'int' to 'u_int8_t', possible loss of data ***/ + hh_ret = THH_qpm_get_all_sgids(qpm,port,(u_int8_t)num_guids, param_gid_p); + if (hh_ret != HH_OK) { + MTL_ERROR2( "THH_qpm_get_all_sgids failed (ret= %d)\n", hh_ret); + return HH_EINVAL; + } + return HH_OK; + } + + return HH_OK; +} /* THH_get_gid_tbl */ +/****************************************************************************** + * Function: THH_hob_get_gid_tbl + * + * Description: Gets GID table for a given port + * + * input: + * hca_hndl + * port_num - 1 or 2 + * tbl_len_in - size of table provided for response (in pkeys) + * output: + * tbl_len_out - size of returned table (in pkeys) + * param_gid_p - pointer to table containing data (space provided by caller) + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Does MAD query to get the data in real time. + * + *****************************************************************************/ +HH_ret_t THH_hob_get_gid_tbl( HH_hca_hndl_t hca_hndl, + IB_port_t port, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_gid_t* param_gid_p) +{ + MT_bool is_legacy = FALSE; + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + FUNC_IN; + + MTL_DEBUG4("THH_hob_get_gid_tbl: hca_hndl=0x%p, port= %d, return table len = %d\n", + hca_hndl, port, tbl_len_in); + + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_get_gid_tbl : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_get_gid_tbl : ERROR : No device registered\n"); + return HH_EINVAL; + } + TEST_RETURN_FATAL(thh_hob_p); + + + THH_hob_get_legacy_mode(thh_hob_p, &is_legacy); + return(THH_hob_get_gid_tbl_local(hca_hndl,port,tbl_len_in, + tbl_len_out,param_gid_p,is_legacy)); + +} + +HH_ret_t THH_hob_init_gid_tbl( HH_hca_hndl_t hca_hndl, + IB_port_t port, + u_int16_t tbl_len_in, + u_int16_t* tbl_len_out, + IB_gid_t* param_gid_p) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + return(THH_hob_get_gid_tbl_local(hca_hndl,port,tbl_len_in, + tbl_len_out,param_gid_p,1)); + +} + +/****************************************************************************** + * Function: THH_hob_set_comp_eventh + * + * Description: Sets completion event handler for VIP layer (below vapi). Used internally + * by VAPI + * + * input: + * hca_hndl + * event - pointer to handler function + * private_data - pointer to context data provided to handler + * returns: + * HH_OK + * HH_EAGAIN + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Initial (dummy) handler is provided during open_hca. Therefore, + * the function THH_eventp_replace_handler is used to register the handler. + * + *****************************************************************************/ +HH_ret_t THH_hob_set_comp_eventh(HH_hca_hndl_t hca_hndl, + HH_comp_eventh_t event, + void* private_data) +{ + HH_ret_t ret; + THH_eventp_handler_t ev_hndlr; + + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_set_comp_eventh: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_set_comp_eventh : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_set_comp_eventh: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + if (event == NULL) { + event = THH_dummy_comp_event; + } else { + TEST_RETURN_FATAL(thh_hob_p); + } + ev_hndlr.comp_event_h = event; + + + if (thh_hob_p->eventp == (THH_eventp_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_set_comp_eventh: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + /* neutralizing for VAPI start */ + // return HH_OK; + + ret = THH_eventp_replace_handler(thh_hob_p->eventp,thh_hob_p->compl_eq, ev_hndlr, private_data); + if (ret != HH_OK) { + MTL_ERROR1( "THH_hob_set_comp_eventh: ERROR : cannot register completion event handler (%d)\n", ret); + return HH_ERR; + } + + return HH_OK; +} + +/****************************************************************************** + * Function: THH_hob_set_async_eventh + * + * Description: Sets async handler for VIP layer (below vapi). Used internally + * by VAPI + * + * input: + * hca_hndl + * event - pointer to handler function + * private_data - pointer to context data provided to handler + * returns: + * HH_OK + * HH_EAGAIN + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Initial (dummy) handler is provided during open_hca. Therefore, + * the function THH_eventp_replace_handler is used to register the handler. + * + *****************************************************************************/ +HH_ret_t THH_hob_set_async_eventh( HH_hca_hndl_t hca_hndl, + HH_async_eventh_t event, + void* private_data) +{ + HH_ret_t ret; + THH_eventp_handler_t ev_hndlr; + + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_set_async_eventh: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_set_async_eventh : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (event == NULL) {event = &THH_dummy_async_event; + } else { + TEST_RETURN_FATAL(thh_hob_p); + } + + ev_hndlr.ib_comp_event_h = event; + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_set_async_eventh: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + if (thh_hob_p->eventp == (THH_eventp_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_set_async_eventh: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } +// neutralizing for VAPI start +// return HH_OK; + + ret = THH_eventp_replace_handler(thh_hob_p->eventp,thh_hob_p->ib_eq, ev_hndlr, private_data); + if (ret != HH_OK) { + MTL_ERROR1( "THH_hob_set_async_eventh: ERROR : cannot register async event handler (%d)\n", ret); + return HH_ERR; + } + + /* track async event handler setting for use in fatal error handling */ + MOSAL_spinlock_dpc_lock(&thh_hob_p->async_spl); + thh_hob_p->async_eventh = event; + thh_hob_p->async_ev_private_context = private_data; + MOSAL_spinlock_unlock(&thh_hob_p->async_spl); + + return HH_OK; +} + + +#ifndef __DARWIN__ +int THH_hob_fatal_err_thread(void *arg) + { + THH_hob_t hob_p; + THH_hob_cat_err_thread_t *fatal_thread_obj_p = (THH_hob_cat_err_thread_t *)arg; + HH_event_record_t fatal_ev_rec; + THH_cmd_t cmd_if; + THH_eventp_t eventp; + call_result_t mosal_ret; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + MTL_TRACE2("%s: Initializing\n", __func__); + + hob_p = (THH_hob_t)(fatal_thread_obj_p->hob); + MOSAL_thread_set_name(&fatal_thread_obj_p->mto, "cleanup_thread"); + + + /* signal that thread is up */ + fatal_thread_obj_p->have_fatal = FALSE; + MOSAL_syncobj_signal(&fatal_thread_obj_p->start_sync); + + MTL_TRACE3("%s: about to wait on fatal error signal\n", __func__); + mosal_ret=MOSAL_syncobj_waiton(&hob_p->fatal_thread_obj.fatal_err_sync, + MOSAL_SYNC_TIMEOUT_INFINITE); + if (mosal_ret == MT_EINTR || hob_p->fatal_thread_obj.have_fatal == FALSE) { + MTL_DEBUG1(MT_FLFMT("%s: GOT termination request"), __func__); + /* if no fatal error, just return */ + MOSAL_syncobj_signal(&fatal_thread_obj_p->stop_sync); + return 1; + } + MTL_ERROR1(MT_FLFMT("%s: RECEIVED FATAL ERROR WAKEUP"), __func__); + + /* fatal error processing */ + if (THH_hob_get_cmd_if(hob_p, &cmd_if) == HH_OK) { + THH_cmd_handle_fatal(cmd_if); + } + if (THH_hob_get_eventp(hob_p,&eventp) == HH_OK) { + THH_eventp_handle_fatal(eventp); + } + + /* Halt HCA here */ + if (hob_p->module_flags.fatal_delay_halt == 0) { + MTL_DEBUG1(MT_FLFMT("%s: halting the HCA"), __func__); + THH_hob_halt_hca(hob_p); + } + + MOSAL_spinlock_dpc_lock(&hob_p->fatal_spl); + /* turn off STARTED bit, and turn on HALTED bit */ + hob_p->thh_state &= ~(THH_STATE_FATAL_START); + hob_p->thh_state |= THH_STATE_FATAL_HCA_HALTED; + MOSAL_syncobj_signal(&hob_p->thh_fatal_complete_syncobj); + MOSAL_spinlock_unlock(&hob_p->fatal_spl); + + /* INVOKE THE async event callback with fatal error */ + if (hob_p->hh_hca_hndl != NULL && hob_p->async_eventh != NULL) { + MTL_TRACE1(MT_FLFMT("%s: INVOKE ASYNC CALLBACK"), __func__); + memset(&fatal_ev_rec, 0, sizeof(HH_event_record_t)); + fatal_ev_rec.etype = VAPI_LOCAL_CATASTROPHIC_ERROR; + fatal_ev_rec.syndrome = hob_p->fatal_syndrome; + (*(hob_p->async_eventh))(hob_p->hh_hca_hndl, &fatal_ev_rec, hob_p->async_ev_private_context); + } + MOSAL_syncobj_signal(&fatal_thread_obj_p->stop_sync); + return 0; +} +#endif /* not defined __DARWIN__ */ + + +/* + * mosal_find_capbility_ptr + */ +static call_result_t mosal_find_capbility_ptr(u_int8_t bus, + u_int8_t dev_func, + u_int8_t cap_id, + u_int8_t *cap_ptr_p) +{ + call_result_t rc; + u_int8_t cap_ptr; + u_int32_t cap_val_dw; + + /* read cap pointer */ + rc = MOSAL_PCI_read_config_byte(bus, dev_func, 0x34, &cap_ptr); + if ( rc != MT_OK ) { + MTL_ERROR1(MT_FLFMT("%s: failed reading cap pointer - %s"), __func__, mtl_strerror(rc)); + return rc; + } + + while ( 1 ) { + rc = MOSAL_PCI_read_config_dword(bus, dev_func, cap_ptr, &cap_val_dw); + if ( rc != MT_OK ) { + MTL_ERROR1(MT_FLFMT("%s: failed reading dword at address 0x%x - %s"), __func__, cap_ptr, mtl_strerror(rc)); + return rc; + } + if ( (cap_val_dw&0xff) == cap_id ) { + *cap_ptr_p = cap_ptr; + return MT_OK; + } + cap_ptr = (u_int8_t)(cap_val_dw>>8) & 0xfc; /* mask 2 lsbs */ + if ( cap_ptr == 0 ) break; + } + return MT_ENORSC; +} + + +/* + * THH_set_max_read_request_size + * + * set Max Read Request Size for Arbel in Tavor mode 5 => 4096 bytes + */ +static call_result_t THH_set_max_read_request_size(THH_hw_props_t *hw_props_p) +{ + call_result_t rc; + u_int8_t cap_ptr; + u_int16_t cap_val; + u_int8_t mrrs_val = 5; /* => 4096 bytes */ + const u_int8_t rbc_cap_id = 16; /* Max Read Request Size capability ID */ + + rc = mosal_find_capbility_ptr(hw_props_p->bus, hw_props_p->dev_func, rbc_cap_id, &cap_ptr); + if ( rc != MT_OK ) { + MTL_DEBUG1(MT_FLFMT("%s: failed to find MRRS capability - %s"), __func__, mtl_strerror_sym(rc)); + return rc; + } + rc = MOSAL_PCI_read_config_word(hw_props_p->bus, hw_props_p->dev_func, cap_ptr+8, &cap_val); + if ( rc != MT_OK ) { + MTL_ERROR1(MT_FLFMT("%s: failed to read rbc - %s"), __func__, mtl_strerror_sym(rc)); + return rc; + } + cap_val &= 0x8fff; + cap_val |= (mrrs_val<<12); + rc = MOSAL_PCI_write_config_word(hw_props_p->bus, hw_props_p->dev_func, cap_ptr+8, cap_val); + if ( rc != MT_OK ) { + MTL_ERROR1(MT_FLFMT("%s: failed to write rbc - %s"), __func__, mtl_strerror_sym(rc)); + } + return rc; +} + +/* + * THH_set_rbc + * + * set the default Read Byte Count for Tavor - 3 ==> 4096 bytes + */ +static call_result_t THH_set_rbc(THH_hw_props_t *hw_props_p) +{ + call_result_t rc; + u_int8_t cap_ptr, cap_val; + u_int8_t rbc_val = 3; + const u_int8_t rbc_cap_id = 7; /* Read Byte Count capability ID */ + + rc = mosal_find_capbility_ptr(hw_props_p->bus, hw_props_p->dev_func, rbc_cap_id, &cap_ptr); + if ( rc != MT_OK ) { + MTL_DEBUG1(MT_FLFMT("%s: failed to find RBC capability - %s"), __func__, mtl_strerror_sym(rc)); + return rc; + } + rc = MOSAL_PCI_read_config_byte(hw_props_p->bus, hw_props_p->dev_func, cap_ptr+2, &cap_val); + if ( rc != MT_OK ) { + MTL_ERROR1(MT_FLFMT("%s: failed to read rbc - %s"), __func__, mtl_strerror_sym(rc)); + return rc; + } + cap_val &= 0xf3; + cap_val |= (rbc_val<<2); + rc = MOSAL_PCI_write_config_byte(hw_props_p->bus, hw_props_p->dev_func, cap_ptr+2, cap_val); + if ( rc != MT_OK ) { + MTL_ERROR1(MT_FLFMT("%s: failed to write rbc - %s"), __func__, mtl_strerror_sym(rc)); + } + return rc; +} + + + +/* + * THH_set_capabilities + */ +static void THH_set_capabilities(THH_hw_props_t *hw_props_p) +{ + /* set the default Read Byte Count for Tavor */ + THH_set_rbc(hw_props_p); + + /* set max read request size in capabilty structure of PCI express */ + THH_set_max_read_request_size(hw_props_p); +} + +/****************************************************************************** + * Function: THH_hob_create + * + * Description: Creates the HOB object for an HCA, and registers it in HH + * + * input: + * hw_props_p -- PCI properties (BARs, etc) + * hca_seq_num - a sequence number assigned to this HCA to differentiate it + * from other HCAs on this host + * mod_flags - flags passed in at module initialization (e.g., insmod) + * output: + * hh_hndl_p - size of returned table (in pkeys) + * returns: + * HH_OK + * HH_EAGAIN + * HH_ERR -- other errors + * + * Comments: This function involves the following steps: + * 1.Allocate THH_hob data context. + * 2.Create the THH_cmd_if object instance (in order to enable + * queries of HCA resources even before HCA is opened). + * 3.Invoke ENABLE_SYS command ((polling mode). + * 4.Query HCA for available DDRmemory resources + * (use the Command interface in polling mode)and create + * the THH_ddrmm object based on results. + * 5.Query HCA for other capabilties and save them in THH_hob context. + * 6.Register HCA in HH (i.e.call HH_add_hca_dev()). + * + * Also initializes the HOB mutex for controlling Open HCA and Close HCA + * + *****************************************************************************/ +HH_ret_t THH_hob_create(/*IN*/ THH_hw_props_t *hw_props_p, + /*IN*/ u_int32_t hca_seq_num, + /*IN*/ THH_module_flags_t *mod_flags, + /*OUT*/ HH_hca_hndl_t *hh_hndl_p ) +{ + + //HH_hca_hndl_t hca_hndl = 0; + HH_ret_t ret; + HH_if_ops_t *if_ops_p = 0; + THH_hob_t hob_p; + MT_size_t ddr_size; + MT_size_t fw_size; + HH_hca_hndl_t new_hh_hndl; + THH_cmd_status_t cmd_ret; + HH_hca_dev_t tdev; + u_int64_t fw_version; + u_int32_t req_fw_maj_version=0; + u_int16_t req_fw_min_version = 0; + u_int16_t req_fw_submin_version = 0; + int int_ret = 0; + call_result_t mosal_ret = MT_OK; + MT_phys_addr_t cmdif_uar0_arg; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + MTL_DEBUG4("Entering THH_hob_create\nhca_seq_num = %d, legacy_flag = %s, av_in_host_mem = %s\n", + hca_seq_num, (mod_flags->legacy_sqp == FALSE ? "FALSE" : "TRUE"), + (mod_flags->av_in_host_mem == FALSE ? "FALSE" : "TRUE")); + THH_print_hw_props(hw_props_p); + + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_create: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + THH_set_capabilities(hw_props_p); + + /* Allocate THH hob structure */ + hob_p = (THH_hob_t)MALLOC(sizeof(struct THH_hob_st)); + + if (hob_p == 0) { + MTL_ERROR1("THH_hob_create: could not allocate memory for THH hob\n"); + return HH_EAGAIN; + } + + MTL_DEBUG1("THH_hob_create: HOB address = 0x%p\n", hob_p); + memset(hob_p, 0, sizeof(struct THH_hob_st)); + + + /* initialize the HOB mutex */ + MOSAL_mutex_init(&(hob_p->mtx)); + + /* set device name */ + sprintf(hob_p->dev_name, "InfiniHost%d",hca_seq_num); +#if !defined(__DARWIN__) + printk("\nMellanox Tavor Device Driver is creating device \"%s\" (bus=%02x, devfn=%02x)\n\n", + hob_p->dev_name,hw_props_p->bus,hw_props_p->dev_func); +#else + MTL_DEBUG1("\nMellanox Tavor Device Driver is creating device \"%s\"\n\n", hob_p->dev_name); +#endif + + /* set embedded object handles to invalid */ + + hob_p->cmd = (THH_cmd_t)THH_INVALID_HNDL; + hob_p->ddrmm = (THH_ddrmm_t)THH_INVALID_HNDL; + hob_p->uldm = (THH_uldm_t)THH_INVALID_HNDL; + hob_p->mrwm = (THH_mrwm_t)THH_INVALID_HNDL; + hob_p->cqm = (THH_cqm_t)THH_INVALID_HNDL; +//hob_p->eecm = (THH_eecm_t)THH_INVALID_HNDL; /* JPM -- EECM ADDITIONS HERE */ + hob_p->qpm = (THH_qpm_t)THH_INVALID_HNDL; + hob_p->udavm = (THH_udavm_t)THH_INVALID_HNDL; + hob_p->mcgm = (THH_mcgm_t)THH_INVALID_HNDL; + hob_p->eventp = (THH_eventp_t)THH_INVALID_HNDL; + hob_p->kar = (THH_uar_t)THH_INVALID_HNDL; + + /* initialize EQ handles to all EQs invalid */ + hob_p->compl_eq = THH_INVALID_EQN; + hob_p->ib_eq = THH_INVALID_EQN; + + /* initialize fatal error handling fields */ + memcpy(&(hob_p->hw_props), hw_props_p, sizeof(THH_hw_props_t)); + memcpy(&(hob_p->module_flags), mod_flags, sizeof(THH_module_flags_t)); + hob_p->hca_seq_num = hca_seq_num; + hob_p->thh_state = THH_STATE_CREATING; + if (MOSAL_spinlock_init(&(hob_p->fatal_spl)) != MT_OK){ + MTL_ERROR4(MT_FLFMT("%s: Failed to initializing fatal error spinlock"), __func__); + ret= HH_ERR; + goto err_free_hob; + } + if (MOSAL_spinlock_init(&(hob_p->async_spl)) != MT_OK){ + MTL_ERROR4(MT_FLFMT("%s: Failed to initializing async handler tracking spinlock"), __func__); + ret= HH_ERR; + goto err_free_hob; + } + /* dummy async event handler is passed to THH_eventp when initializing ib_eq */ + hob_p->async_eventh = &THH_dummy_async_event; + hob_p->async_ev_private_context = NULL; + +#ifndef __DARWIN__ + /* get bridge config info */ + ret = THH_hob_get_pci_br_config(hob_p); + + /* get hca config info */ + ret = read_pci_config(hw_props_p->bus,hw_props_p->dev_func,hob_p->pci_hca_info.config); + hob_p->pci_hca_info.bus = hw_props_p->bus; + hob_p->pci_hca_info.dev_func = hw_props_p->dev_func; + hob_p->pci_hca_info.is_valid = TRUE; +#endif /* not defined __DARWIN__ */ + + /* create the THH_cmd object so that can initialize and query the adapter */ + /* HCR register offset */ + if ( mod_flags->cmdif_post_uar0) { + cmdif_uar0_arg = hw_props_p->uar_base; + } + else { + cmdif_uar0_arg = (MT_phys_addr_t) MAKE_ULONGLONG(0xFFFFFFFFFFFFFFFF); + } + ret = THH_cmd_create(hob_p, hw_props_p->hw_ver, hw_props_p->cr_base, cmdif_uar0_arg, &(hob_p->cmd), + mod_flags->inifinite_cmd_timeout, mod_flags->num_cmds_outs); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_create: could not create CMD object (%d)\n", ret); + ret = HH_ERR; + goto err_free_hob; + } + + + /* invoke SYS_ENA command on tavor to initialize it -- load firmware from flash, etc.*/ + cmd_ret = THH_cmd_SYS_EN(hob_p->cmd); + if (cmd_ret != THH_CMD_STAT_OK) { + if (cmd_ret == THH_CMD_STAT_EFATAL) { + MTL_ERROR1(MT_FLFMT("THH_hob_create: FATAL ERROR in THH_cmd_SYS_EN")); + ret = HH_EFATAL; + } else { + MTL_ERROR1(MT_FLFMT("THH_hob_create: CMD_error in THH_cmd_SYS_EN (%d)"), cmd_ret); + ret = HH_ERR; + } + goto cmd_err; + } + + + /* do query firmware command */ + cmd_ret = THH_cmd_QUERY_FW(hob_p->cmd, &(hob_p->fw_props)); + if (cmd_ret != THH_CMD_STAT_OK) { + if (cmd_ret == THH_CMD_STAT_EFATAL) { + MTL_ERROR1(MT_FLFMT("THH_hob_create: FATAL ERROR in THH_cmd_QUERY_FW")); + ret = HH_EFATAL; + } else { + MTL_ERROR1(MT_FLFMT("THH_hob_create: CMD_error in THH_cmd_QUERY_FW (%d)"), cmd_ret); + ret = HH_ERR; + } + goto undo_sys_ena; + } + fw_version = hob_p->fw_props.fw_rev_major; + fw_version = (fw_version <<16) | hob_p->fw_props.fw_rev_minor; + fw_version = (fw_version <<16) | hob_p->fw_props.fw_rev_subminor; + /* enter data into version info structure */ + hob_p->version_info.fw_ver_major = hob_p->fw_props.fw_rev_major; + hob_p->version_info.fw_ver_minor = hob_p->fw_props.fw_rev_minor; + hob_p->version_info.fw_ver_subminor = hob_p->fw_props.fw_rev_subminor; + hob_p->version_info.hw_ver = hob_p->hw_props.hw_ver; + hob_p->version_info.cmd_if_ver = hob_p->fw_props.cmd_interface_rev; + + + if (fw_version < THH_MIN_FW_VERSION) { + req_fw_maj_version = (u_int32_t) ((((u_int64_t)THH_MIN_FW_VERSION)>>32) & MAKE_ULONGLONG(0xFFFFFFFF)); + req_fw_min_version = (u_int16_t) ((((u_int64_t)THH_MIN_FW_VERSION)>>16) & MAKE_ULONGLONG(0xFFFF)); + req_fw_submin_version = (u_int16_t) (((u_int64_t)THH_MIN_FW_VERSION) & MAKE_ULONGLONG(0xFFFF)); + MTL_ERROR1("THH_hob_create: INSTALLED FIRMWARE VERSION IS NOT SUPPORTED:\n Installed: %x.%x.%x, Minimum Required: %x.%x.%x\n\n", + hob_p->fw_props.fw_rev_major, hob_p->fw_props.fw_rev_minor, hob_p->fw_props.fw_rev_subminor, + req_fw_maj_version, req_fw_min_version, req_fw_submin_version); + ret = HH_ERR; + goto undo_sys_ena; + } + + /* map the firmware error buffer if the appropriate fw version is installed */ + if ((fw_version >= THH_MIN_FW_ERRBUF_VERSION) && + (hob_p->fw_props.error_buf_start != (u_int64_t) 0) && + (hob_p->fw_props.error_buf_size != 0)) + { + + /* wa for FW bug number 19695 */ + if ( (hob_p->fw_props.error_buf_startcr_base) || (hob_p->fw_props.error_buf_start>(hw_props_p->cr_base+0x100000)) ) { + MTL_ERROR1(MT_FLFMT("%s: fw_props.error_buf_start is outside of cr-space start="U64_FMT", size=0x%x"), + __func__, hob_p->fw_props.error_buf_start, hob_p->fw_props.error_buf_size); + ret = HH_ERR; + goto undo_sys_ena; + } + + MTL_DEBUG4(MT_FLFMT("THH_hob_create: using cat err buf. pa=0x"U64_FMT", sz=%d"), + hob_p->fw_props.error_buf_start, hob_p->fw_props.error_buf_size); + hob_p->fw_error_buf_start_va = MOSAL_io_remap(hob_p->fw_props.error_buf_start, + 4*(hob_p->fw_props.error_buf_size)); + if (hob_p->fw_error_buf_start_va == (MT_virt_addr_t)(MT_ulong_ptr_t) NULL) { + MTL_ERROR1(MT_FLFMT("%s: Could not map fw error buffer (phys addr = "U64_FMT", size=%d"), + __func__, hob_p->fw_props.error_buf_start, hob_p->fw_props.error_buf_size); + } else { + hob_p->fw_error_buf = TNMALLOC(u_int32_t,hob_p->fw_props.error_buf_size); + if (hob_p->fw_error_buf == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Could not allocate buffer for FW catastrophic error info"),__func__); + } + } + } + + /* Get device limits */ + cmd_ret = THH_cmd_QUERY_DEV_LIM(hob_p->cmd, &(hob_p->dev_lims)); + if (cmd_ret != THH_CMD_STAT_OK) { + if (cmd_ret == THH_CMD_STAT_EFATAL) { + MTL_ERROR1(MT_FLFMT("THH_hob_create: FATAL ERROR in THH_cmd_QUERY_DEV_LIM")); + ret = HH_EFATAL; + } else { + MTL_ERROR1(MT_FLFMT("THH_hob_create: CMD_error in THH_cmd_QUERY_DEV_LIM (%d)"), cmd_ret); + ret = HH_ERR; + } + goto undo_sys_ena; + } + + MTL_DEBUG1(MT_FLFMT("%s: log_max_srq=%u log2_rsvd_srqs=%u srq_entry_sz=%u srq=%ssupported"), __func__, + hob_p->dev_lims.log_max_srqs, hob_p->dev_lims.log2_rsvd_srqs, + hob_p->dev_lims.srq_entry_sz, hob_p->dev_lims.srq ? " ":"NOT-"); + + /* Enable SRQ only for FW version 3.1 and up */ + if ((hob_p->dev_lims.srq) && + (fw_version < THH_MIN_FW_VERSION_SRQ)) { + MTL_ERROR1("%s: Disabling SRQ support due to FW version: " + "Installed: %x.%x.%x, Minimum Required: 3.1.0\n", __func__, + hob_p->fw_props.fw_rev_major, hob_p->fw_props.fw_rev_minor, hob_p->fw_props.fw_rev_subminor); + hob_p->dev_lims.srq= FALSE; + } + + + /* query tavor for DDR memory resources data */ + cmd_ret = THH_cmd_QUERY_DDR(hob_p->cmd, &(hob_p->ddr_props)); + if (cmd_ret != THH_CMD_STAT_OK) { + if (cmd_ret == THH_CMD_STAT_EFATAL) { + MTL_ERROR1(MT_FLFMT("THH_hob_create: FATAL ERROR in THH_cmd_QUERY_DDR")); + ret = HH_EFATAL; + } else { + MTL_ERROR1(MT_FLFMT("THH_hob_create: CMD_error in THH_cmd_QUERY_DDR (%d)"), cmd_ret); + ret = HH_ERR; + } + goto undo_sys_ena; + } + + /* HIDE-DDR set in firmware: sanity checks */ +#if 0 + /* 1. fail if using 32-bit platform (not PAE and not IA64) */ + if ((hob_p->ddr_props.dh == TRUE) && (sizeof(MT_phys_addr_t) <=4)) { + MTL_ERROR1("THH_hob_create: HIDE_DDR is not supported on platforms using 32-bit physical addresses\n\n"); + ret = HH_ERR; + goto undo_sys_ena; + } +#endif + + /* 2. Fail if firmware version is not recent enough. */ + if ((hob_p->ddr_props.dh == TRUE) && (fw_version < THH_MIN_FW_HIDE_DDR_VERSION)) { + req_fw_maj_version = (u_int32_t) ((((u_int64_t)THH_MIN_FW_HIDE_DDR_VERSION)>>32) & 0xFFFFFFFF); + req_fw_min_version = (u_int16_t) ((((u_int64_t)THH_MIN_FW_HIDE_DDR_VERSION)>>16) & 0xFFFF); + req_fw_submin_version = (u_int16_t) (((u_int64_t)THH_MIN_FW_HIDE_DDR_VERSION) & 0xFFFF); + MTL_ERROR1("THH_hob_create: INSTALLED FIRMWARE VERSION DOES NOT SUPPORT HIDE_DDR:\n Installed: %x.%x.%x, Minimum Required: %x.%x.%x\n\n", + hob_p->fw_props.fw_rev_major, hob_p->fw_props.fw_rev_minor, hob_p->fw_props.fw_rev_subminor, + req_fw_maj_version, req_fw_min_version, req_fw_submin_version); + ret = HH_ERR; + goto undo_sys_ena; + } + +#ifdef DEBUG_MEM_OV +cmdif_dbg_ddr = hob_p->ddr_props.ddr_start_adr; /* address in ddr used for out params in debug mode */ +#endif + + /* print info messages that device is operating in HIDE DDR mode (not an error) */ + if (hob_p->ddr_props.dh == TRUE) { + MTL_ERROR1("Device %s is operating in HIDE_DDR mode.\n", hob_p->dev_name); + } + + +/* query tavor for adapter data */ + cmd_ret = THH_cmd_QUERY_ADAPTER(hob_p->cmd, &(hob_p->adapter_props)); + if (cmd_ret != THH_CMD_STAT_OK) { + if (cmd_ret == THH_CMD_STAT_EFATAL) { + MTL_ERROR1(MT_FLFMT("THH_hob_create: FATAL ERROR in THH_cmd_QUERY_ADAPTER")); + ret = HH_EFATAL; + } else { + MTL_ERROR1(MT_FLFMT("THH_hob_create: CMD_error in THH_cmd_QUERY_ADAPTER (%d)"), cmd_ret); + ret = HH_ERR; + } + goto undo_sys_ena; + } + if ( (hob_p->fw_props.fw_end_addr <= hob_p->fw_props.fw_base_addr) || + hob_p->fw_props.fw_base_addr < hob_p->ddr_props.ddr_start_adr || + hob_p->fw_props.fw_end_addr > hob_p->ddr_props.ddr_end_adr) { + /* FW region is either improper, or does not lie within bounds of DDR */ + MTL_ERROR1("THH_hob_create: FW region is either improper, or is outside DDR\nFW end = "U64_FMT + ", FW start = "U64_FMT"\n DDR end = "U64_FMT", DDR start = "U64_FMT"\n", + hob_p->fw_props.fw_end_addr, hob_p->fw_props.fw_base_addr, + hob_p->ddr_props.ddr_end_adr, hob_p->ddr_props.ddr_start_adr); + ret = HH_ERR; + goto undo_sys_ena; + + } + fw_size = (MT_size_t) (hob_p->fw_props.fw_end_addr - hob_p->fw_props.fw_base_addr + 1); + + if (hob_p->ddr_props.ddr_end_adr < hob_p->ddr_props.ddr_start_adr) { + MTL_ERROR1("THH_hob_create: DDR end address ("U64_FMT") is less than DDR base addr (" U64_FMT ")\n", + hob_p->ddr_props.ddr_end_adr, hob_p->ddr_props.ddr_start_adr); + ret = HH_ERR; + goto undo_sys_ena; + } + + ddr_size = (MT_size_t) (hob_p->ddr_props.ddr_end_adr - hob_p->ddr_props.ddr_start_adr + 1) ; + hob_p->profile.ddr_size = ddr_size - fw_size; + + /* DDR size code is used in THH_calculate_profile to set number of QPs proportionally to size */ + hob_p->profile.ddr_size_code = THH_get_ddr_size_code(ddr_size - fw_size); + + ret = THH_ddrmm_create((MT_phys_addr_t) (hob_p->ddr_props.ddr_start_adr), ddr_size, &(hob_p->ddrmm)); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_create: could not create DDRMM object (%d)\n", ret); + goto undo_sys_ena; + } + + ret = THH_ddrmm_reserve(hob_p->ddrmm, hob_p->fw_props.fw_base_addr, fw_size); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_create: could not reserve FW space in DDRMM object (err = %d)\n", ret); + goto undo_ddrm_create; + } + +#ifdef DEBUG_MEM_OV + ret = THH_ddrmm_reserve(hob_p->ddrmm, hob_p->ddr_props.ddr_start_adr, CMDIF_SIZE_IN_DDR); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_create: could not reserve DDR Outbox space in DDRMM object (err = %d)\n", ret); + goto undo_ddrm_create; + } +#endif + + /*create the delay unlock object for catastrophic error use */ + int_ret = VIP_delay_unlock_create(&hob_p->delay_unlocks); + if (int_ret != 0) { + MTL_ERROR1("THH_hob_create: could create delay unlock object (err = %d)\n", int_ret); + ret = HH_ENOMEM; + goto delay_unlock_err; + } + + +#ifndef __DARWIN__ /* TODO, need to take care of fatal errors in Darwin */ + + /* launch catastrophic error thread */ + hob_p->fatal_thread_obj.hob = (struct THH_hob_st *)hob_p; + MOSAL_syncobj_init(&hob_p->fatal_thread_obj.start_sync); + MOSAL_syncobj_init(&hob_p->fatal_thread_obj.stop_sync); + MOSAL_syncobj_init(&hob_p->fatal_thread_obj.fatal_err_sync); + MOSAL_syncobj_init(&hob_p->thh_fatal_complete_syncobj); + hob_p->fatal_thread_obj.have_fatal = FALSE; + mosal_ret = MOSAL_thread_start(&hob_p->fatal_thread_obj.mto, MOSAL_KTHREAD_CLONE_FLAGS, + THH_hob_fatal_err_thread, (void *)(&(hob_p->fatal_thread_obj))); +// if (mosal_ret != MT_OK) { +// MTL_ERROR1("THH_hob_create: could not create fatal error thread (err = %d)\n", mosal_ret); +// ret = HH_ERR; +// goto fatal_thr_create_err; +// } + + /*wait for fatal thread initialization complete */ + mosal_ret = MOSAL_syncobj_waiton(&(hob_p->fatal_thread_obj.start_sync), 10000000); + if (mosal_ret != MT_OK) { + if (mosal_ret == MT_EINTR) { + MTL_DEBUG1(MT_FLFMT("%s: Received OS interrupt while initializing fatal error thread (err = %d)"), + __func__,mosal_ret); + ret = HH_EINTR; + } else { + MTL_ERROR1(MT_FLFMT("%s: Timeout on initializing fatal error thread (err = %d)"), + __func__,mosal_ret); + ret = HH_ERR; + } + goto fatal_thr_init_err; + } + MTL_DEBUG4("%s: Created send completion thread.\n", __func__); + /* set up the procedure mapping table and register the tavor device */ + +#endif /* ! defined __DARWIN__ */ + + if_ops_p = &(hob_p->if_ops); + +#ifndef IVAPI_THH + HH_ifops_tbl_set_enosys(if_ops_p); /* by default, all retuen HH_ENOSYS */ +#endif + + /* HCA Calls */ + if_ops_p->HHIF_open_hca = &THH_hob_open_hca; + if_ops_p->HHIF_close_hca = &THH_hob_close_hca; + if_ops_p->HHIF_alloc_ul_resources = &THH_hob_alloc_ul_res; + if_ops_p->HHIF_free_ul_resources = &THH_hob_free_ul_res; + if_ops_p->HHIF_query_hca = &THH_hob_query; + if_ops_p->HHIF_modify_hca = &THH_hob_modify; + + /* Misc HCA Operations*/ + if_ops_p->HHIF_query_port_prop = &THH_hob_query_port_prop; + if_ops_p->HHIF_get_pkey_tbl = &THH_hob_get_pkey_tbl; + if_ops_p->HHIF_get_gid_tbl = &THH_hob_get_gid_tbl; + + /* Protection Domain Calls */ + if_ops_p->HHIF_alloc_pd = &THH_hob_alloc_pd; + if_ops_p->HHIF_free_pd = &THH_hob_free_pd; + if_ops_p->HHIF_alloc_rdd = &THH_hob_alloc_rdd; + if_ops_p->HHIF_free_rdd = &THH_hob_free_rdd; + + /* privileged UD AV */ + if_ops_p->HHIF_create_priv_ud_av = &THH_hob_create_ud_av; + if_ops_p->HHIF_modify_priv_ud_av = &THH_hob_modify_ud_av; + if_ops_p->HHIF_query_priv_ud_av = &THH_hob_query_ud_av; + if_ops_p->HHIF_destroy_priv_ud_av = &THH_hob_destroy_ud_av; + + /* Memory Registration */ + if_ops_p->HHIF_register_mr = &THH_hob_register_mr; + if_ops_p->HHIF_reregister_mr = &THH_hob_reregister_mr; + if_ops_p->HHIF_register_smr = &THH_hob_register_smr; + if_ops_p->HHIF_query_mr = &THH_hob_query_mr; + if_ops_p->HHIF_deregister_mr = &THH_hob_deregister_mr; + + if_ops_p->HHIF_alloc_mw = &THH_hob_alloc_mw; + if_ops_p->HHIF_query_mw = &THH_hob_query_mw; + if_ops_p->HHIF_free_mw = &THH_hob_free_mw; + + /* Fast memory regions */ + if_ops_p->HHIF_alloc_fmr = &THH_hob_alloc_fmr; + if_ops_p->HHIF_map_fmr = &THH_hob_map_fmr; + if_ops_p->HHIF_unmap_fmr = &THH_hob_unmap_fmr; + if_ops_p->HHIF_free_fmr = &THH_hob_free_fmr; + + /* Completion Queues */ + if_ops_p->HHIF_create_cq = &THH_hob_create_cq; + if_ops_p->HHIF_resize_cq = &THH_hob_resize_cq; + if_ops_p->HHIF_query_cq = &THH_hob_query_cq; + if_ops_p->HHIF_destroy_cq = &THH_hob_destroy_cq; + + /* Queue Pair */ + if_ops_p->HHIF_create_qp = &THH_hob_create_qp; + if_ops_p->HHIF_get_special_qp = &THH_hob_get_special_qp; + if_ops_p->HHIF_modify_qp = &THH_hob_modify_qp; + if_ops_p->HHIF_query_qp = &THH_hob_query_qp; + if_ops_p->HHIF_destroy_qp = &THH_hob_destroy_qp; +#if defined(MT_SUSPEND_QP) + if_ops_p->HHIF_suspend_qp = &THH_hob_suspend_qp; + if_ops_p->HHIF_suspend_cq = &THH_hob_suspend_cq; +#endif + /* SRQ */ + if_ops_p->HHIF_create_srq = &THH_hob_create_srq; + if_ops_p->HHIF_query_srq = &THH_hob_query_srq; + if_ops_p->HHIF_modify_srq = &THH_hob_modify_srq; + if_ops_p->HHIF_destroy_srq = &THH_hob_destroy_srq; + + /* EEC */ + if_ops_p->HHIF_create_eec = &THH_hob_create_eec; + if_ops_p->HHIF_modify_eec = &THH_hob_modify_eec; + if_ops_p->HHIF_query_eec = &THH_hob_query_eec; + if_ops_p->HHIF_destroy_eec = &THH_hob_destroy_eec; + + if_ops_p->HHIF_set_comp_eventh = &THH_hob_set_comp_eventh; + if_ops_p->HHIF_set_async_eventh = &THH_hob_set_async_eventh; + + + + /* Multicast groups */ + if_ops_p->HHIF_attach_to_multicast = &THH_hob_attach_to_multicast; + if_ops_p->HHIF_detach_from_multicast = &THH_hob_detach_from_multicast; + + /* Process local MAD */ + if_ops_p->HHIF_process_local_mad = &THH_hob_process_local_mad; + + if_ops_p->HHIF_ddrmm_alloc = &THH_hob_ddrmm_alloc; + if_ops_p->HHIF_ddrmm_query = &THH_hob_ddrmm_query; + if_ops_p->HHIF_ddrmm_free = &THH_hob_ddrmm_free; + + /* + * Register device in the init structure + * + */ + tdev.dev_desc = hob_p->dev_name; + tdev.user_lib = "TBD libhcatavor"; /* for future dynamic use */ + tdev.vendor_id = MT_MELLANOX_IEEE_VENDOR_ID; + tdev.dev_id = (u_int32_t)hw_props_p->device_id; + MTL_DEBUG1("hw_props_p: device_id = 0x%X, pci_vendor_id=0x%X,hw_ver=0x%X\n", + hw_props_p->device_id, hw_props_p->pci_vendor_id, hw_props_p->hw_ver); + tdev.fw_ver= tdev.fw_ver = hob_p->fw_props.fw_rev_major; + tdev.fw_ver = (tdev.fw_ver <<16) | hob_p->fw_props.fw_rev_minor; + tdev.fw_ver = (tdev.fw_ver <<16) | hob_p->fw_props.fw_rev_subminor;; + tdev.hw_ver = hob_p->hw_props.hw_ver; + tdev.if_ops = if_ops_p; + tdev.hca_ul_resources_sz = sizeof(THH_hca_ul_resources_t); + tdev.pd_ul_resources_sz = sizeof(THH_pd_ul_resources_t); + tdev.cq_ul_resources_sz = sizeof(THH_cq_ul_resources_t); + tdev.srq_ul_resources_sz = sizeof(THH_srq_ul_resources_t); + tdev.qp_ul_resources_sz = sizeof(THH_qp_ul_resources_t); + tdev.device = (void *) hob_p; + tdev.status = HH_HCA_STATUS_CLOSED; + + /* Grab the mutex now, just before adding the device to HH */ + MTL_DEBUG4("THH_hob_create: about to grab mutex\n"); + if (MOSAL_mutex_acq(&(hob_p->mtx), TRUE) != MT_OK) { + MTL_DEBUG1(MT_FLFMT("%s: Received signal. returning HH_EINTR"), __func__); + ret = HH_EINTR; + goto err_acq_mutex; + } + + MTL_DEBUG4("THH_hob_create: Before HH_add_hca_dev\n"); + ret = HH_add_hca_dev(&tdev, &new_hh_hndl); + if (ret != HH_OK) { + MTL_ERROR1("THH_hob_create: could not register device %s in HCA HAL\n", + hob_p->dev_name); + goto err_release_mutex; + } + + /* insert HH hca handle into device structure */ + ((THH_hob_t)(new_hh_hndl->device))->hh_hca_hndl = new_hh_hndl; + MTL_TRACE1("THH_hob_create: hh_hca_hndl created = %p\n", (void *) new_hh_hndl); + + if (hh_hndl_p != NULL) { + *hh_hndl_p = new_hh_hndl; + } + MOSAL_spinlock_dpc_lock(&hob_p->fatal_spl); + hob_p->thh_state = THH_STATE_CLOSED; + MOSAL_spinlock_unlock(&hob_p->fatal_spl); + + MOSAL_mutex_rel(&(hob_p->mtx)); + + return(HH_OK); + + /* ERROR HANDLING: undoes previous steps in reverse order, as needed */ +err_release_mutex: + MOSAL_mutex_rel(&(hob_p->mtx)); +err_acq_mutex: + VIP_delay_unlock_destroy(hob_p->delay_unlocks); + +fatal_thr_init_err: + /* signal the waited-on sync object, so that thread exits */ + MOSAL_syncobj_signal(&(hob_p->fatal_thread_obj.fatal_err_sync)); + +//fatal_thr_create_err: +delay_unlock_err: +undo_ddrm_create: + THH_ddrmm_destroy(hob_p->ddrmm); + +undo_sys_ena: + if (hob_p->fw_error_buf_start_va != (MT_virt_addr_t)(MT_ulong_ptr_t) NULL) { + MOSAL_io_unmap(hob_p->fw_error_buf_start_va); + } + + if (hob_p->fw_error_buf != NULL) { + FREE(hob_p->fw_error_buf); + } + +cmd_ret = THH_cmd_SYS_DIS(hob_p->cmd); +if (cmd_ret != THH_CMD_STAT_OK) { + if (cmd_ret == THH_CMD_STAT_EFATAL) { + MTL_ERROR1(MT_FLFMT("THH_hob_create: FATAL ERROR in THH_cmd_SYS_DIS")); + } else { + MTL_ERROR1(MT_FLFMT("THH_hob_create: CMD_error in THH_cmd_SYS_DIS (%d)"), cmd_ret); + } +} +cmd_err: + THH_cmd_destroy(hob_p->cmd); + +err_free_hob: + MOSAL_mutex_free(&(hob_p->mtx)); + FREE(hob_p); + return (ret); +} + + +/***************************************************************************** +****************************************************************************** +************** EXTERNALLY VISIBLE FUNCTIONS, WITH PROTOTYPES IN THH_HOB.H **** +****************************************************************************** +*****************************************************************************/ + + +HH_ret_t THH_hob_get_ver_info ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_ver_info_t *version_p ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_ver_info: ERROR : No device registered\n"); + return HH_EINVAL; + } + + memcpy(version_p, &(hob->version_info), sizeof(THH_ver_info_t)); + return HH_OK; +} + + +HH_ret_t THH_hob_get_cmd_if ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_cmd_t *cmd_if_p ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_cmd_if: ERROR : No device registered\n"); + return HH_EINVAL; + } + + + if (hob->cmd == THH_CMDIF_INVALID_HANDLE) { + MTL_ERROR1("THH_hob_get_cmd_if: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + *cmd_if_p = hob->cmd; + return HH_OK; +} + +HH_ret_t THH_hob_get_uldm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_uldm_t *uldm_p ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_uldm: ERROR : No device registered\n"); + return HH_EINVAL; + } + + + if (hob->uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_get_uldm: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + *uldm_p = hob->uldm; + return HH_OK; +} + +HH_ret_t THH_hob_get_ddrmm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_ddrmm_t *ddrmm_p ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_ddrmm: ERROR : No device registered\n"); + return HH_EINVAL; + } + + + if (hob->ddrmm == (THH_ddrmm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_get_ddrmm: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + *ddrmm_p = hob->ddrmm; + return HH_OK; +} + +HH_ret_t THH_hob_get_mrwm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_mrwm_t *mrwm_p ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_mrwm: ERROR : No device registered\n"); + return HH_EINVAL; + } + + + if (hob->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_get_mrwm: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + *mrwm_p = hob->mrwm; + return HH_OK; +} + +HH_ret_t THH_hob_get_qpm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_qpm_t *qpm_p ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_qpm: ERROR : No device registered\n"); + return HH_EINVAL; + } + + + if (hob->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_get_qpm: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + *qpm_p = hob->qpm; + return HH_OK; +} + +HH_ret_t THH_hob_get_cqm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_cqm_t *cqm_p ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_cqm: ERROR : No device registered\n"); + return HH_EINVAL; + } + + + if (hob->cqm == (THH_cqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_get_cqm: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + *cqm_p = hob->cqm; + return HH_OK; +} + +HH_ret_t THH_hob_get_eventp ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_eventp_t *eventp_p ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_eventp: ERROR : No device registered\n"); + return HH_EINVAL; + } + + + if (hob->eventp== (THH_eventp_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_get_eventp: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + *eventp_p = hob->eventp; + return HH_OK; +} +HH_ret_t THH_hob_get_udavm_info ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_udavm_t *udavm_p, + /*OUT*/ MT_bool *use_priv_udav, + /*OUT*/ MT_bool *av_in_host_mem, + /*OUT*/ VAPI_lkey_t *lkey , + /*OUT*/ u_int32_t *max_ah_num, + /*OUT*/ MT_bool *hide_ddr) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_udavm_info: ERROR : No device registered\n"); + return HH_EINVAL; + } + + *av_in_host_mem = (MT_bool) (hob->module_flags.av_in_host_mem); + + *use_priv_udav = hob->udavm_use_priv; + *hide_ddr = (hob->ddr_props.dh ? TRUE : FALSE); + *max_ah_num = hob->hca_capabilities.max_ah_num; + if (!(hob->udavm_use_priv)) { + return HH_OK; + } + + if (hob->udavm == (THH_udavm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_get_udavm_info: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + *udavm_p = hob->udavm; + *lkey = hob->udavm_lkey; + + return HH_OK; +} + + +HH_ret_t THH_hob_get_hca_hndl ( /*IN*/ THH_hob_t hob, + /*OUT*/ HH_hca_hndl_t *hca_hndl_p ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_hca_hndl: ERROR : No device registered\n"); + return HH_EINVAL; + } + + *hca_hndl_p = hob->hh_hca_hndl; + return HH_OK; +} + +HH_ret_t THH_hob_check_qp_init_attrs ( /*IN*/ THH_hob_t hob, + /*IN*/ HH_qp_init_attr_t * init_attr_p, + /*IN*/ MT_bool is_special_qp ) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hob == NULL) { + MTL_ERROR1("THH_hob_get_check_qp_init_attrs: ERROR : No device registered\n"); + return HH_EINVAL; + } + + + if (init_attr_p == NULL) { + MTL_ERROR1("THH_hob_get_check_qp_init_attrs: ERROR : null attributes\n"); + return HH_EINVAL; + } + + if (init_attr_p->qp_cap.max_oust_wr_rq > hob->hca_capabilities.max_qp_ous_wr || + init_attr_p->qp_cap.max_oust_wr_sq > hob->hca_capabilities.max_qp_ous_wr){ + MTL_ERROR1("%s : max outs work requests more than HCA maximum\n", __func__); + return HH_E2BIG_WR_NUM; + } + + if (is_special_qp || init_attr_p->ts_type != VAPI_TS_RD) { + if (init_attr_p->qp_cap.max_sg_size_rq > hob->hca_capabilities.max_num_sg_ent || + init_attr_p->qp_cap.max_sg_size_sq > hob->hca_capabilities.max_num_sg_ent) { + MTL_ERROR1("%s : max s/g list size more than HCA maximum\n", __func__); + return HH_E2BIG_SG_NUM; + } + } else { + /* is RD */ + if (init_attr_p->qp_cap.max_sg_size_rq > hob->hca_capabilities.max_num_sg_ent_rd || + init_attr_p->qp_cap.max_sg_size_sq > hob->hca_capabilities.max_num_sg_ent_rd) { + MTL_ERROR1("%s : max s/g list size more than HCA maximum\n", __func__); + return HH_E2BIG_SG_NUM; + } + } + return HH_OK; +} + +/* Used in restarting HCA on fatal error, in function THH_hob_restart */ +HH_ret_t THH_hob_get_init_params(/*IN*/ THH_hob_t thh_hob_p, + /*OUT*/ THH_hw_props_t *hw_props_p, + /*OUT*/ u_int32_t *hca_seq_num, + /*OUT*/ THH_module_flags_t *mod_flags) +{ + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: ERROR : No device registered"), __func__); + return HH_EAGAIN; + } + memcpy(hw_props_p, &thh_hob_p->hw_props, sizeof(THH_hw_props_t)); + *hca_seq_num = thh_hob_p->hca_seq_num; + memcpy(mod_flags, &thh_hob_p->module_flags, sizeof(THH_module_flags_t)); + return HH_OK; +} +static HH_ret_t THH_hob_halt_hca(/*IN*/ THH_hob_t hob) +{ +#ifdef SIMULATE_HALT_HCA + THH_cmd_CLOSE_IB(hob->cmd,1); + THH_cmd_CLOSE_IB(hob->cmd,2); + THH_cmd_CLOSE_HCA(hob->cmd, FALSE); + THH_cmd_SYS_DIS(hob->cmd); +#else + THH_cmd_status_t stat; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + stat = THH_cmd_CLOSE_HCA(hob->cmd, TRUE); // HALT the HCA + MTL_ERROR1(MT_FLFMT("%s: HALT HCA returned 0x%x"), __func__,stat); +#endif + return HH_OK; +} + +static VAPI_event_syndrome_t get_fatal_err_syndrome(THH_hob_t hob) +{ + u_int32_t temp; + int i; + + if ((hob->fw_error_buf_start_va != (MT_virt_addr_t)(MT_ulong_ptr_t) NULL) && + (hob->fw_error_buf != NULL) && + (hob->fw_props.error_buf_size > 0) ) { + MOSAL_MMAP_IO_READ_BUF_DWORD(hob->fw_error_buf_start_va, + hob->fw_error_buf,hob->fw_props.error_buf_size); + /* check for non-zero data in fw catastrophic error buffer */ + temp = 0; + for (i = 0; i < (int) hob->fw_props.error_buf_size; i++) { + temp |= hob->fw_error_buf[i]; + } + if (temp == 0) { + return VAPI_CATAS_ERR_GENERAL; + } else { + /* Have non-zero data. print out the syndrome details, and return general category */ + for (i = 0; i < (int) hob->fw_props.error_buf_size; i++) { + MTL_ERROR1(MT_FLFMT("get_fatal_err_syndrome: FW CATASTR ERRBUF[%d] = 0x%x"), + i, MOSAL_be32_to_cpu(hob->fw_error_buf[i])); + } + switch( (MOSAL_be32_to_cpu(hob->fw_error_buf[0])>>24) & 0xFF) { + case TAVOR_IF_EV_CATAS_ERR_FW_INTERNAL_ERR: + return VAPI_CATAS_ERR_FW_INTERNAL; + case TAVOR_IF_EV_CATAS_ERR_MISBEHAVED_UAR_PAGE: + return VAPI_CATAS_ERR_MISBEHAVED_UAR_PAGE; + case TAVOR_IF_EV_CATAS_ERR_UPLINK_BUS_ERR: + return VAPI_CATAS_ERR_UPLINK_BUS_ERR; + case TAVOR_IF_EV_CATAS_ERR_HCA_DDR_DATA_ERR: + return VAPI_CATAS_ERR_HCA_DDR_DATA_ERR; + case TAVOR_IF_EV_CATAS_ERR_INTERNAL_PARITY_ERR: + return VAPI_CATAS_ERR_INTERNAL_PARITY_ERR; + default: + return VAPI_CATAS_ERR_GENERAL; + } + } + } else { + /* no access to the fw cat error buffer */ + return VAPI_CATAS_ERR_GENERAL; + } +} +/**************************************************************************************** + * name: MGT_HOB_rcv + * function: handles receive channel listening + * args: + * returns: + * descr: This procedure is spawned as a thread main routine . + * posts the initial receive buffers, then handles the completion queue polling + * and data recording of Completion Queue events (the FIFO) for a test session + ****************************************************************************************/ + +HH_ret_t THH_hob_fatal_error(/*IN*/ THH_hob_t hob, + /*IN*/ THH_fatal_err_t fatal_err_type, + /*IN*/ VAPI_event_syndrome_t syndrome) +{ +#ifndef __DARWIN__ + THH_cmd_t cmd_if; + THH_eventp_t eventp; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + FUNC_IN; + MTL_DEBUG1(MT_FLFMT("%s: device=%s, err_type=%d, syndrome=%d"), __func__, + hob->dev_name, fatal_err_type, syndrome); + + if (hob == NULL) { + MTL_ERROR1(MT_FLFMT("%s: ERROR : No device registered"), __func__); + MT_RETURN(HH_EAGAIN); + } + + /* make sure that only one invocation is allowed */ + MOSAL_spinlock_dpc_lock(&hob->fatal_spl); + if ((hob->thh_state & THH_STATE_HAVE_ANY_FATAL) != 0) { + /* already in FATAL state */ + MTL_DEBUG4(MT_FLFMT("%s: already in FATAL state"), __func__); + MOSAL_spinlock_unlock(&hob->fatal_spl); + MT_RETURN(HH_OK); + } + + MTL_ERROR1(MT_FLFMT("%s: device=%s, err_type=%d, syndrome=%d"), __func__, + hob->dev_name, fatal_err_type, syndrome); + + switch(fatal_err_type) { + /* get syndrome from iomapped firmware memory */ + case THH_FATAL_MASTER_ABORT: /* detected master abort */ + hob->fatal_syndrome = VAPI_CATAS_ERR_MASTER_ABORT; + break; + case THH_FATAL_GOBIT: /* GO bit of HCR remains set (i.e., stuck) */ + hob->fatal_syndrome = VAPI_CATAS_ERR_GO_BIT; + break; + case THH_FATAL_CMD_TIMEOUT: /* timeout on a command execution */ + hob->fatal_syndrome = VAPI_CATAS_ERR_CMD_TIMEOUT; + break; + case THH_FATAL_EQ_OVF: /* an EQ has overflowed */ + hob->fatal_syndrome = VAPI_CATAS_ERR_EQ_OVERFLOW; + break; + case THH_FATAL_EVENT: /* firmware has generated a LOCAL CATASTROPHIC ERR event */ + hob->fatal_syndrome = get_fatal_err_syndrome(hob); + break; + case THH_FATAL_CR: /* unexpected read from CR-space */ + hob->fatal_syndrome = VAPI_CATAS_ERR_EQ_OVERFLOW; + break; + case THH_FATAL_TOKEN: /* invalid token on command completion */ + hob->fatal_syndrome = VAPI_CATAS_ERR_FATAL_TOKEN; + break; + case THH_FATAL_EXTERNAL: /* invalid token on command completion */ + hob->fatal_syndrome = VAPI_CATAS_ERR_FATAL_EXTERNAL; + break; + case THH_FATAL_NONE: + default: + hob->fatal_syndrome = VAPI_CATAS_ERR_GENERAL; + } + + MTL_ERROR1(MT_FLFMT("%s: Fatal Event Syndrome = %s (%d)"), + __func__,VAPI_event_syndrome_sym(hob->fatal_syndrome), hob->fatal_syndrome); + + if (hob->thh_state == THH_STATE_RUNNING) { + /* make use of thread to perform HALT and signal user apps */ + hob->thh_state |= THH_STATE_FATAL_START; + } else { + /* creating, opening, closing, or destroying HCA. + * Indicate HCA_HALTED directly. + */ + hob->thh_state |= THH_STATE_FATAL_HCA_HALTED; + } + MOSAL_spinlock_unlock(&hob->fatal_spl); + + /* notify cmd and eventp objects, if they exist */ + if (THH_hob_get_cmd_if(hob, &cmd_if) == HH_OK) { + THH_cmd_notify_fatal(cmd_if, fatal_err_type); + } + + if (THH_hob_get_eventp(hob,&eventp) == HH_OK) { + THH_eventp_notify_fatal(eventp, fatal_err_type); + } + + /* now, signal the fatal error thread, ONLY IF WE WERE IN RUNNING STATE */ + if ((hob->thh_state & THH_STATE_RUNNING) != 0) { + hob->fatal_thread_obj.have_fatal = TRUE; + MTL_TRACE1(MT_FLFMT("%s: signalling fatal thread"), __func__); + MOSAL_syncobj_signal(&hob->fatal_thread_obj.fatal_err_sync); + } + +#endif /* not defined __DARWIN__ - TODO in darwin, implement the fatal error handling */ + MT_RETURN(HH_OK); +} + +HH_ret_t THH_hob_get_state(THH_hob_t thh_hob_p, THH_hob_state_t *fatal_state) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: ERROR : No device registered"), __func__); + return HH_EAGAIN; + } + + if (fatal_state == NULL) { + MTL_ERROR1(MT_FLFMT("%s: ERROR : NULL fatal_state parameter"), __func__); + return HH_EINVAL; + } + + MOSAL_spinlock_dpc_lock(&thh_hob_p->fatal_spl); + *fatal_state = thh_hob_p->thh_state; + MOSAL_spinlock_unlock(&thh_hob_p->fatal_spl); + return HH_OK; +} + +HH_ret_t THH_hob_get_fatal_syncobj(THH_hob_t thh_hob_p, MOSAL_syncobj_t *syncobj) +{ + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: ERROR : No device registered"), __func__); + return HH_EAGAIN; + } + + if (syncobj == NULL) { + MTL_ERROR1(MT_FLFMT("%s: ERROR : NULL syncobj return parameter"), __func__); + return HH_EINVAL; + } + + *syncobj = thh_hob_p->thh_fatal_complete_syncobj; + return HH_OK; +} + +HH_ret_t THH_hob_wait_if_fatal(THH_hob_t thh_hob_p, MT_bool *had_fatal) +{ + THH_hob_state_t state; + + FUNC_IN; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Received NULL HOB pointer"), __func__); + *had_fatal = FALSE; + MT_RETURN(HH_OK); + } + + /*get fatal state value */ + MOSAL_spinlock_dpc_lock(&(thh_hob_p->fatal_spl)); + state = thh_hob_p->thh_state; + MOSAL_spinlock_unlock(&(thh_hob_p->fatal_spl)); + + MTL_DEBUG4(MT_FLFMT("%s: FATAL STATE=%d"), __func__, state); + + if ((state & THH_STATE_HAVE_ANY_FATAL) == 0) { + *had_fatal = FALSE; + MT_RETURN(HH_OK); + } + + /* We were in running state. Wait for fatal thread to complete HCA HALT */ + if ((state & THH_STATE_FATAL_START) != 0) { + MOSAL_syncobj_waiton_ui(&thh_hob_p->thh_fatal_complete_syncobj, 10000000); + } + + /* We are in the FATAL_HCA_HALTED compound state */ + *had_fatal = TRUE; + MT_RETURN(HH_OK); + +} + +HH_ret_t THH_hob_restart(/*IN*/ HH_hca_hndl_t hca_hndl) +{ + THH_hw_props_t hw_props; + u_int32_t hca_seq_num; + THH_module_flags_t mod_flags; + THH_hob_t thh_hob_p; + HH_hca_hndl_t new_hh_hca_hndl; + HH_ret_t rc; + + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + FUNC_IN; + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1(MT_FLFMT("%s: NOT IN TASK CONTEXT"), __func__); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1(MT_FLFMT("%s: ERROR : Invalid HCA handle"), __func__); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: ERROR : no device registered"), __func__); + return HH_EAGAIN; + } + + if ((thh_hob_p->thh_state & THH_STATE_FATAL_HCA_HALTED) == 0) { + MTL_ERROR1(MT_FLFMT("%s: HCA is not halted (state 0x%x)"), __func__, thh_hob_p->thh_state); + return HH_ERR; + } + + THH_hob_get_init_params(thh_hob_p, &hw_props, &hca_seq_num, &mod_flags); + + /* PCI reset is done in destroy, if have catastrophic error */ + rc = THH_hob_destroy(hca_hndl); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: cannot destroy old HOB (ret=%d)"), __func__, rc); + } + + rc = THH_hob_create(&hw_props,hca_seq_num,&mod_flags,&new_hh_hca_hndl); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: cannot create new HOB (ret=%d)"), __func__, rc); + } + return rc; +} + +/***************************************************************************** +****************************************************************************** +************** PASS-THROUGH FUNCTIONS ********************************** **** +****************************************************************************** +*****************************************************************************/ + + +/****************************************************************************** + * Function: THH_hob_alloc_ul_res <==> THH_uldm_alloc_ul_res + *****************************************************************************/ +HH_ret_t THH_hob_alloc_ul_res(HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t prot_ctx, + void *hca_ul_resources_p) +{ + THH_hca_ul_resources_t* res = (THH_hca_ul_resources_t *)hca_ul_resources_p; + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_alloc_ul_res: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_alloc_ul_res : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + /* Need to see that uldm object has been allocated. Then, need to invoke + * the alloc_ul_resources method of the uldm here. + * NOTE: may want the constructor to already pre-allocate the ul resources based upon + * configuration info obtained via query. + */ + memset(res, 0, sizeof(THH_hca_ul_resources_t)); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_alloc_ul_res: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_alloc_ul_res: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + /* Set THH_hob's information in given hca_ul_res_p buffer */ + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->hh_hca_hndl= hca_hndl; + memcpy(&(((THH_hca_ul_resources_t*)hca_ul_resources_p)->version), + &(thh_hob_p->version_info),sizeof(THH_ver_info_t)); + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->priv_ud_av = thh_hob_p->profile.use_priv_udav; + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->log2_mpt_size = (u_int32_t)thh_hob_p->profile.log2_max_mpt_entries; + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->av_ddr_base = thh_hob_p->av_ddr_base; + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->av_host_base = thh_hob_p->av_host_base; + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->max_qp_ous_wr= thh_hob_p->hca_capabilities.max_qp_ous_wr; + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->max_srq_ous_wr= thh_hob_p->hca_capabilities.max_wqe_per_srq; + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->max_num_sg_ent= thh_hob_p->hca_capabilities.max_num_sg_ent; + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->max_num_sg_ent_srq= thh_hob_p->hca_capabilities.max_srq_sentries; + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->max_num_sg_ent_rd= thh_hob_p->hca_capabilities.max_num_sg_ent_rd; + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->max_num_ent_cq= thh_hob_p->hca_capabilities.max_num_ent_cq; + + /* Invoke THH_uldm in order to get a UAR resource */ + return THH_uldm_alloc_ul_res(thh_hob_p->uldm, prot_ctx, + (THH_hca_ul_resources_t *)hca_ul_resources_p); +} /* THH_alloc_ul_resources */ + +/****************************************************************************** + * Function: THH_hob_free_ul_res <==> THH_uldm_free_ul_res + *****************************************************************************/ +HH_ret_t THH_hob_free_ul_res(HH_hca_hndl_t hca_hndl, + void *hca_ul_resources_p) +{ + THH_hca_ul_resources_t* res = (THH_hca_ul_resources_t *)hca_ul_resources_p; + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_free_ul_res: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_free_ul_res : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + /* Need to see that uldm object has been allocated. Then, need to invoke + * the alloc_ul_resources method of the uldm here. + * NOTE: may want the constructor to already pre-allocate the ul resources based upon + * configuration info obtained via query. + */ + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_free_ul_res: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + if (thh_hob_p->uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_free_ul_res: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_uldm_free_ul_res(thh_hob_p->uldm, res); +} /* THH_free_ul_resources */ + + + + +/****************************************************************************** + * Function: THH_hob_alloc_pd <==> THH_uldm_alloc_pd + *****************************************************************************/ +HH_ret_t THH_hob_alloc_pd(HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t prot_ctx, + void * pd_ul_resources_p, + HH_pd_hndl_t *pd_num_p) +{ + THH_hob_t thh_hob_p; + HH_ret_t ret; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_alloc_pd: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_alloc_pd : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_alloc_pd: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_alloc_pd: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + ret = THH_uldm_alloc_pd(thh_hob_p->uldm, prot_ctx, + (THH_pd_ul_resources_t *)pd_ul_resources_p, pd_num_p); +// MTL_DEBUG4("THH_hob_alloc_pd: ret = %d\n", ret); + return ret; +} + +/****************************************************************************** + * Function: THH_hob_free_pd <==> THH_uldm_free_pd + *****************************************************************************/ +HH_ret_t THH_hob_free_pd(HH_hca_hndl_t hca_hndl, HH_pd_hndl_t pd_num) +{ + u_int32_t max_pd; + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_free_pd: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_free_pd : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_free_pd: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + max_pd = (1 << thh_hob_p->dev_lims.log_max_pd); + if (pd_num > max_pd - 1) { + MTL_ERROR1("THH_hob_free_pd: ERROR : PD number (%d) is greater than max allowed (%d)\n", pd_num, max_pd); + return HH_EAGAIN; + } + + if (thh_hob_p->uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_free_pd: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + rc = THH_uldm_free_pd(thh_hob_p->uldm, pd_num); + return rc; +} + + +/****************************************************************************** + * Function: THH_hob_alloc_rdd <==> THH_eecm_alloc_rdd + *****************************************************************************/ +HH_ret_t THH_hob_alloc_rdd(HH_hca_hndl_t hh_dev_p, + HH_rdd_hndl_t *rdd_p) +{ +#if 0 + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_alloc_rdd: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_alloc_rdd : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_alloc_rdd: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->eecm == (THH_eecm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_alloc_rdd: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_eecm_alloc_rdd(thh_hob_p->uldm, rdd_p); +#else + return HH_ENOSYS; +#endif +} + +/****************************************************************************** + * Function: THH_hob_free_rdd <==> THH_eecm_free_rdd + *****************************************************************************/ +HH_ret_t THH_hob_free_rdd(HH_hca_hndl_t hh_dev_p, HH_rdd_hndl_t rdd) +{ +#if 0 + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_free_rdd: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_free_rdd : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_free_rdd: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + + if (thh_hob_p->eecm == (THH_eecm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_free_rdd: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + rc = THH_eecm_free_rdd(thh_hob_p->uldm, rdd); + return rc; +#else + return HH_ENOSYS; +#endif +} + + +/****************************************************************************** + * Function: THH_hob_create_ud_av <==> THH_udavm_create_av + *****************************************************************************/ +HH_ret_t THH_hob_create_ud_av(HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + VAPI_ud_av_t *av_p, + HH_ud_av_hndl_t *ah_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_create_ud_av: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_create_ud_av : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_create_ud_av: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->udavm == (THH_udavm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_create_ud_av: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_udavm_create_av(thh_hob_p->udavm, pd, av_p, ah_p); +} + + +/****************************************************************************** + * Function: THH_hob_modify_ud_av <==> THH_udavm_modify_av + *****************************************************************************/ +HH_ret_t THH_hob_modify_ud_av(HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah, + VAPI_ud_av_t *av_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_modify_ud_av: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_modify_ud_av : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_modify_ud_av: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->udavm == (THH_udavm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_modify_ud_av: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_udavm_modify_av(thh_hob_p->udavm, ah, av_p); +} + + +/****************************************************************************** + * Function: THH_hob_query_ud_av <==> THH_udavm_query_av + *****************************************************************************/ +HH_ret_t THH_hob_query_ud_av(HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah, + VAPI_ud_av_t *av_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_query_ud_av: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_query_ud_av : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_query_ud_av: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->udavm == (THH_udavm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_query_ud_av: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_udavm_query_av(thh_hob_p->udavm, ah, av_p); +} + + +/****************************************************************************** + * Function: THH_hob_destroy_ud_av <==> THH_udavm_destroy_av + *****************************************************************************/ +HH_ret_t THH_hob_destroy_ud_av(HH_hca_hndl_t hca_hndl, + HH_ud_av_hndl_t ah) +{ + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_destroy_ud_av: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_destroy_ud_av : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_destroy_ud_av: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + + if (thh_hob_p->udavm == (THH_udavm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_destroy_ud_av: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + rc = THH_udavm_destroy_av(thh_hob_p->udavm, ah); + return rc; +} + + + +/****************************************************************************** + * Function: THH_hob_register_mr <==> THH_mrwm_register_mr + *****************************************************************************/ +HH_ret_t THH_hob_register_mr(HH_hca_hndl_t hca_hndl, + HH_mr_t *mr_props_p, + VAPI_lkey_t *lkey_p, + IB_rkey_t *rkey_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_register_mr: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_register_mr : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_register_mr: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_register_mr: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_mrwm_register_mr(thh_hob_p->mrwm, mr_props_p, lkey_p, rkey_p); +} + + + +/****************************************************************************** + * Function: THH_hob_reregister_mr <==> THH_mrwm_reregister_mr + *****************************************************************************/ +HH_ret_t THH_hob_reregister_mr(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey, + VAPI_mr_change_t change_mask, + HH_mr_t *mr_props_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t *rkey_p) +{ + THH_hob_t thh_hob_p; + + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_reregister_mr: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_reregister_mr : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_reregister_mr: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_reregister_mr: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_mrwm_reregister_mr(thh_hob_p->mrwm, lkey, change_mask, mr_props_p,lkey_p, rkey_p); +} + + + +/****************************************************************************** + * Function: THH_hob_register_smr <==> THH_mrwm_register_smr + *****************************************************************************/ +HH_ret_t THH_hob_register_smr(HH_hca_hndl_t hca_hndl, + HH_smr_t *mr_props_p, + VAPI_lkey_t *lkey_p, + IB_rkey_t *rkey_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_register_smr: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_register_smr : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_register_smr: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_register_smr: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_mrwm_register_smr(thh_hob_p->mrwm, mr_props_p, lkey_p, rkey_p); +} + + +/****************************************************************************** + * Function: THH_hob_query_mr <==> THH_mrwm_query_mr + *****************************************************************************/ +HH_ret_t THH_hob_query_mr(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey, + HH_mr_info_t *mr_info_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_query_mr: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_query_mr : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_query_mr: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_query_mr: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_mrwm_query_mr(thh_hob_p->mrwm, lkey, mr_info_p); +} + + + +/****************************************************************************** + * Function: THH_hob_deregister_mr <==> THH_mrwm_deregister_mr + *****************************************************************************/ +HH_ret_t THH_hob_deregister_mr(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t lkey) +{ + THH_hob_t thh_hob_p; + HH_ret_t rc; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_deregister_mr: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_deregister_mr : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_deregister_mr: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_deregister_mr: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + rc = THH_mrwm_deregister_mr(thh_hob_p->mrwm, lkey); + return rc; +} + +/****************************************************************************** + * Function: THH_hob_alloc_mw <==> THH_mrwm_alloc_mw + *****************************************************************************/ +HH_ret_t THH_hob_alloc_mw(HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + IB_rkey_t *initial_rkey_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_alloc_mw: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_alloc_mw : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_alloc_mw: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_alloc_mw: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_mrwm_alloc_mw(thh_hob_p->mrwm, pd, initial_rkey_p); +} + + +/****************************************************************************** + * Function: THH_hob_query_mw <==> THH_mrwm_query_mw + *****************************************************************************/ +HH_ret_t THH_hob_query_mw(HH_hca_hndl_t hca_hndl, + IB_rkey_t initial_rkey, + IB_rkey_t *current_rkey_p, + HH_pd_hndl_t *pd_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_query_mw: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_query_mw : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_query_mw: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_query_mw: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + MTL_TRACE1("%s: -KL- called for key 0x%x", __func__,initial_rkey); + return THH_mrwm_query_mw(thh_hob_p->mrwm, initial_rkey, current_rkey_p, pd_p); +} + + +/****************************************************************************** + * Function: THH_hob_free_mw <==> THH_mrwm_free_mw + *****************************************************************************/ +HH_ret_t THH_hob_free_mw(HH_hca_hndl_t hca_hndl, + IB_rkey_t initial_rkey) +{ + THH_hob_t thh_hob_p; + HH_ret_t rc; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_free_mw: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_free_mw : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_free_mw: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_free_mw: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + rc = THH_mrwm_free_mw(thh_hob_p->mrwm, initial_rkey); + return rc; +} + + + /* Fast Memory Regions */ + /***********************/ +/****************************************************************************** + * Function: THH_hob_create_cq <==> THH_mrwm_alloc_fmr + *****************************************************************************/ +HH_ret_t THH_hob_alloc_fmr(HH_hca_hndl_t hca_hndl, + HH_pd_hndl_t pd, + VAPI_mrw_acl_t acl, + MT_size_t max_pages, /* Maximum number of pages that can be mapped using this region */ + u_int8_t log2_page_sz, /* Fixed page size for all maps on a given FMR */ + VAPI_lkey_t* last_lkey_p) /* To be used as the initial FMR handle */ +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_alloc_fmr : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_alloc_fmr: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_alloc_fmr: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + if (thh_hob_p->ddr_props.dh == TRUE) { + /* Must hide DDR memory. alloc fmr not supported */ + MTL_ERROR1("THH_hob_alloc_fmr: Device is operating in HIDE_DDR mode. Cannot alloc fmr\n"); + return HH_ENOSYS; + } + + return THH_mrwm_alloc_fmr(thh_hob_p->mrwm,pd,acl,max_pages,log2_page_sz,last_lkey_p); +} + +/****************************************************************************** + * Function: THH_hob_map_fmr <==> THH_mrwm_map_fmr + *****************************************************************************/ +HH_ret_t THH_hob_map_fmr(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t last_lkey, + EVAPI_fmr_map_t* map_p, + VAPI_lkey_t* lkey_p, + IB_rkey_t* rkey_p) + +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_map_fmr : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_map_fmr: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_map_fmr: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_mrwm_map_fmr(thh_hob_p->mrwm,last_lkey,map_p,lkey_p,rkey_p); +} + +/****************************************************************************** + * Function: THH_hob_unmap_fmr <==> THH_mrwm_unmap_fmr + *****************************************************************************/ +HH_ret_t THH_hob_unmap_fmr(HH_hca_hndl_t hca_hndl, + u_int32_t num_of_fmrs_to_unmap, + VAPI_lkey_t* last_lkeys_array) + +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_unmap_fmr : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_unmap_fmr: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_unmap_fmr: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_mrwm_unmap_fmr(thh_hob_p->mrwm,num_of_fmrs_to_unmap,last_lkeys_array); +} + +/****************************************************************************** + * Function: THH_hob_free_fmr <==> THH_mrwm_free_fmr + *****************************************************************************/ +HH_ret_t THH_hob_free_fmr(HH_hca_hndl_t hca_hndl, + VAPI_lkey_t last_lkey) /* as returned on last successful mapping operation */ + +{ + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_free_fmr : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_free_fmr: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + + if (thh_hob_p->mrwm == (THH_mrwm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_free_fmr: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + rc = THH_mrwm_free_fmr(thh_hob_p->mrwm,last_lkey); + return rc; +} + + + /****************************************************************************** + * Function: THH_hob_create_cq <==> THH_cqm_create_cq + *****************************************************************************/ +HH_ret_t THH_hob_create_cq(HH_hca_hndl_t hca_hndl, + MOSAL_protection_ctx_t user_prot_context, + void *cq_ul_resources_p, + HH_cq_hndl_t *cq_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_create_cq: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_create_cq : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_create_cq: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->cqm == (THH_cqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_create_cq: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_cqm_create_cq(thh_hob_p->cqm, user_prot_context, thh_hob_p->compl_eq, + thh_hob_p->ib_eq, + (THH_cq_ul_resources_t*)cq_ul_resources_p, cq_p); +} + +/****************************************************************************** + * Function: THH_hob_resize_cq <==> THH_cqm_modify_cq + *****************************************************************************/ +HH_ret_t THH_hob_resize_cq(HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + void *cq_ul_resources_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_resize_cq: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_resize_cq : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_resize_cq: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->cqm == (THH_cqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_resize_cq: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_cqm_resize_cq(thh_hob_p->cqm, cq, (THH_cq_ul_resources_t*)cq_ul_resources_p); +} + +/****************************************************************************** + * Function: THH_hob_query_cq <==> THH_cqm_query_cq + *****************************************************************************/ +HH_ret_t THH_hob_query_cq(HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + VAPI_cqe_num_t *num_o_cqes_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_query_cq: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_query_cq : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_query_cq: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->cqm == (THH_cqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_query_cq: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_cqm_query_cq(thh_hob_p->cqm, cq, num_o_cqes_p); +} + + +/****************************************************************************** + * Function: THH_hob_destroy_cq <==> THH_cqm_destroy_cq + *****************************************************************************/ +HH_ret_t THH_hob_destroy_cq(HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq) +{ + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_destroy_cq: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_destroy_cq : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_destroy_cq: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + + if (thh_hob_p->cqm == (THH_cqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_destroy_cq: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + rc = THH_cqm_destroy_cq(thh_hob_p->cqm, cq); + return rc; +} + + +/****************************************************************************** + * Function: THH_hob_create_qp <==> THH_qpm_create_qp + *****************************************************************************/ +HH_ret_t THH_hob_create_qp(HH_hca_hndl_t hca_hndl, + HH_qp_init_attr_t *init_attr_p, + void *qp_ul_resources_p, + IB_wqpn_t *qpn_p) +{ + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_create_qp: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_create_qp : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_create_qp: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_create_qp: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + if ((rc=THH_hob_check_qp_init_attrs(thh_hob_p,init_attr_p,FALSE)) != HH_OK) { + MTL_ERROR1("THH_hob_create_qp: ERROR : requested capabilities exceed HCA limits\n"); + return rc; + } + + return THH_qpm_create_qp(thh_hob_p->qpm, init_attr_p, 0, (THH_qp_ul_resources_t*)qp_ul_resources_p, qpn_p); +} + +/****************************************************************************** + * Function: THH_hob_get_special_qp <==> THH_qpm_get_special_qp + *****************************************************************************/ +HH_ret_t THH_hob_get_special_qp(HH_hca_hndl_t hca_hndl, + VAPI_special_qp_t qp_type, + IB_port_t port, + HH_qp_init_attr_t *init_attr_p, + void *qp_ul_resources_p, + IB_wqpn_t *sqp_hndl_p) +{ + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_get_special_qp: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_get_special_qp : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_get_special_qp: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_get_special_qp: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + if ((rc=THH_hob_check_qp_init_attrs(thh_hob_p,init_attr_p,TRUE)) != HH_OK) { + MTL_ERROR1("THH_hob_get_special_qp: ERROR : requested capabilities exceed HCA limits\n"); + return rc; + } + + return THH_qpm_get_special_qp(thh_hob_p->qpm, qp_type, port, init_attr_p, (THH_qp_ul_resources_t*)qp_ul_resources_p, sqp_hndl_p); +} + +/****************************************************************************** + * Function: THH_hob_modify_qp <==> THH_qpm_modify_qp + *****************************************************************************/ +HH_ret_t THH_hob_modify_qp(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + VAPI_qp_state_t cur_qp_state, + VAPI_qp_attr_t *qp_attr_p, + VAPI_qp_attr_mask_t *qp_attr_mask_p) +{ + HH_ret_t ret; + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_modify_qp: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_modify_qp : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_modify_qp: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_modify_qp: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + ret = THH_qpm_modify_qp(thh_hob_p->qpm, qpn, cur_qp_state, qp_attr_p, qp_attr_mask_p); + + return ret; +} + + +/****************************************************************************** + * Function: THH_hob_query_qp <==> THH_qpm_query_qp + *****************************************************************************/ +HH_ret_t THH_hob_query_qp(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + VAPI_qp_attr_t *qp_attr_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_query_qp: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_query_qp : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_query_qp: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_query_qp: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_qpm_query_qp(thh_hob_p->qpm, qpn, qp_attr_p); +} + + +/****************************************************************************** + * Function: THH_hob_destroy_qp <==> THH_qpm_destroy_qp + *****************************************************************************/ +HH_ret_t THH_hob_destroy_qp(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn) +{ + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_destroy_qp: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_destroy_qp : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_destroy_qp: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_destroy_qp: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + rc = THH_qpm_destroy_qp(thh_hob_p->qpm, qpn); + return rc; +} + +/* HH_create_srq */ +HH_ret_t THH_hob_create_srq(HH_hca_hndl_t hca_hndl, HH_pd_hndl_t pd, void *srq_ul_resources_p, + HH_srq_hndl_t *srq_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("%s: NOT IN TASK CONTEXT)\n", __func__); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("%s : ERROR : Invalid HCA handle\n", __func__); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("%s: ERROR : No device registered\n", __func__); + return HH_EINVAL_HCA_HNDL; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->srqm == (THH_srqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("%s: SRQs are not supported in this HCA configuration\n", __func__); + return HH_ENOSYS; + } + + return THH_srqm_create_srq(thh_hob_p->srqm, pd, srq_ul_resources_p, srq_p); +} + +HH_ret_t THH_hob_query_srq(HH_hca_hndl_t hca_hndl, HH_srq_hndl_t srq, u_int32_t *limit_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("%s: NOT IN TASK CONTEXT)\n", __func__); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("%s : ERROR : Invalid HCA handle\n", __func__); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("%s: ERROR : No device registered\n", __func__); + return HH_EINVAL_HCA_HNDL; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->srqm == (THH_srqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("%s: SRQs are not supported in this HCA configuration", __func__); + return HH_ENOSYS; + } + + return THH_srqm_query_srq(thh_hob_p->srqm, srq, limit_p); +} + +HH_ret_t THH_hob_modify_srq(HH_hca_hndl_t hca_hndl, HH_srq_hndl_t srq, void *srq_ul_resources_p) +{ + THH_hob_t thh_hob_p; + + MTL_TRACE4("%s: srq=0x%X)\n", __func__, srq); + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("%s: NOT IN TASK CONTEXT)\n", __func__); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("%s : ERROR : Invalid HCA handle\n", __func__); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("%s: ERROR : No device registered\n", __func__); + return HH_EINVAL_HCA_HNDL; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->srqm == (THH_srqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("%s: SRQs are not supported in this HCA configuration", __func__); + return HH_ENOSYS; + } + + return THH_srqm_modify_srq(thh_hob_p->srqm, srq, srq_ul_resources_p); +} + + +HH_ret_t THH_hob_destroy_srq(HH_hca_hndl_t hca_hndl, HH_srq_hndl_t srq) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("%s: NOT IN TASK CONTEXT)\n", __func__); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("%s : ERROR : Invalid HCA handle\n", __func__); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("%s: ERROR : No device registered\n", __func__); + return HH_EINVAL_HCA_HNDL; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->srqm == (THH_srqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("%s: SRQs are not supported in this HCA configuration", __func__); + return HH_ENOSYS; + } + + return THH_srqm_destroy_srq(thh_hob_p->srqm, srq); +} + + +/****************************************************************************** + * Function: THH_hob_process_local_mad <==> THH_qpm_process_local_mad + *****************************************************************************/ +HH_ret_t THH_hob_process_local_mad( + HH_hca_hndl_t hca_hndl, + IB_port_t port, + IB_lid_t slid, /* For Mkey violation trap */ + EVAPI_proc_mad_opt_t proc_mad_opts, + void * mad_in_p, + void * mad_out_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_process_local_mad: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_process_local_mad : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_process_local_mad: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_destroy_qp: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_qpm_process_local_mad(thh_hob_p->qpm,port,slid,proc_mad_opts,mad_in_p,mad_out_p); +} + + +/****************************************************************************** + * Function: THH_hob_ddrmm_alloc <==> THH_ddrmm_alloc + *****************************************************************************/ +HH_ret_t THH_hob_ddrmm_alloc( + HH_hca_hndl_t hca_hndl, + VAPI_size_t size, + u_int8_t align_shift, + VAPI_phy_addr_t* buf_p) +{ + THH_hob_t thh_hob_p; + MT_phys_addr_t adrs; + HH_ret_t ret = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1(MT_FLFMT(" NOT IN TASK CONTEXT\n")); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1(MT_FLFMT("ERROR : Invalid HCA handle\n")); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("ERROR : No device registered\n")); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->ddrmm == (THH_ddrmm_t)THH_INVALID_HNDL) { + MTL_ERROR1(MT_FLFMT("ERROR : HCA device has not yet been opened\n")); + return HH_EAGAIN; + } + + if (thh_hob_p->ddr_props.dh == TRUE) { + /* Must hide DDR memory. alloc fmr not supported */ + MTL_ERROR1(MT_FLFMT("%s: Device is operating in HIDE_DDR mode. Cannot alloc ddr memory"), __func__); + return HH_ENOSYS; + } + + MTL_DEBUG1(MT_FLFMT("before THH_ddrmm_alloc \n")); + /* tavor ALWAYS has DDR, on other devices we should query if ther's DDR */ +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + ret = THH_ddrmm_alloc(thh_hob_p->ddrmm,(MT_size_t)size,align_shift,&adrs); + MTL_DEBUG1(MT_FLFMT("after THH_ddrmm_alloc \n")); + *buf_p = (VAPI_phy_addr_t)adrs; + return ret; + +} + +/****************************************************************************** + * Function: THH_hob_ddrmm_query <==> THH_ddrmm_query + *****************************************************************************/ +HH_ret_t THH_hob_ddrmm_query( + HH_hca_hndl_t hca_hndl, + u_int8_t align_shift, + VAPI_size_t* total_mem, + VAPI_size_t* free_mem, + VAPI_size_t* largest_chunk, + VAPI_phy_addr_t* largest_free_addr_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1(MT_FLFMT(" NOT IN TASK CONTEXT\n")); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1(MT_FLFMT("ERROR : Invalid HCA handle\n")); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("ERROR : No device registered\n")); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->ddrmm == (THH_ddrmm_t)THH_INVALID_HNDL) { + MTL_ERROR1(MT_FLFMT("ERROR : HCA device has not yet been opened\n")); + return HH_EAGAIN; + } + + if (thh_hob_p->ddr_props.dh == TRUE) { + /* Must hide DDR memory. alloc fmr not supported */ + MTL_ERROR1(MT_FLFMT("%s: Device is operating in HIDE_DDR mode. Cannot query ddr memory"), __func__); + return HH_ENOSYS; + } + + MTL_DEBUG1(MT_FLFMT("before THH_ddrmm_query \n")); + /* tavor ALWAYS has DDR, on other devices we should query if ther's DDR */ + return THH_ddrmm_query(thh_hob_p->ddrmm,align_shift,total_mem,free_mem,largest_chunk,largest_free_addr_p); + +} + + +/****************************************************************************** + * Function: THH_hob_ddrmm_free <==> THH_ddrmm_free + *****************************************************************************/ +HH_ret_t THH_hob_ddrmm_free( + HH_hca_hndl_t hca_hndl, + VAPI_phy_addr_t buf, + VAPI_size_t size) + +{ + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1(MT_FLFMT(" NOT IN TASK CONTEXT\n")); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1(MT_FLFMT("ERROR : Invalid HCA handle\n")); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("ERROR : No device registered\n")); + return HH_EAGAIN; + } + + + if (thh_hob_p->ddrmm == (THH_ddrmm_t)THH_INVALID_HNDL) { + MTL_ERROR1(MT_FLFMT("ERROR : HCA device has not yet been opened\n")); + return HH_EAGAIN; + } + +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + rc = THH_ddrmm_free(thh_hob_p->ddrmm,buf,(MT_size_t)size); + return rc; +} + + + + + + + + +/****************************************************************************** + * Function: THH_hob_get_qp1_pkey <==> THH_qpm_get_qp1_pkey + *****************************************************************************/ +HH_ret_t THH_hob_get_qp1_pkey( + HH_hca_hndl_t hca_hndl, + IB_port_t port,/*IN */ + VAPI_pkey_t* pkey_p/*OUT*/) + +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hca_hndl == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1(MT_FLFMT("HCA %s has not yet been opened"),thh_hob_p->dev_name); + return HH_EAGAIN; + } + + return THH_qpm_get_qp1_pkey(thh_hob_p->qpm,port,pkey_p); +} + +HH_ret_t THH_hob_get_pkey( + HH_hca_hndl_t hca_hndl, + IB_port_t port,/*IN */ + VAPI_pkey_ix_t pkey_index, /*IN*/ + VAPI_pkey_t* pkey_p/*OUT*/) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hca_hndl == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1(MT_FLFMT("HCA %s has not yet been opened"),thh_hob_p->dev_name); + return HH_EAGAIN; + } + + return THH_qpm_get_pkey(thh_hob_p->qpm,port,pkey_index,pkey_p); +} + +/****************************************************************************** + * Function: THH_hob_get_sgid <==> THH_qpm_get_sgid + *****************************************************************************/ +HH_ret_t THH_hob_get_sgid( + HH_hca_hndl_t hca_hndl, + IB_port_t port,/*IN */ + u_int8_t index,/*IN*/ + IB_gid_t* gid_p/*OUT*/) + +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hca_hndl == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1(MT_FLFMT("HCA %s has not yet been opened"),thh_hob_p->dev_name); + return HH_EAGAIN; + } + + return THH_qpm_get_sgid(thh_hob_p->qpm,port,index,gid_p); +} + +HH_ret_t THH_hob_get_legacy_mode(THH_hob_t thh_hob_p,MT_bool *p_mode) +{ + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EAGAIN; + } + + *p_mode = thh_hob_p->module_flags.legacy_sqp; + + return HH_OK; +} + + + /****************************************************************************** + * Function: THH_hob_create_eec <==> THH_eecm_create_eec + *****************************************************************************/ +HH_ret_t THH_hob_create_eec(HH_hca_hndl_t hca_hndl, + HH_rdd_hndl_t rdd, + IB_eecn_t *eecn_p) +{ +#if 0 + THH_hob_t thh_hob_p; + + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_create_eec: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_create_eec : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_create_eec: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + + if (thh_hob_p->eecm == (THH_eecm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_create_eec: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_eecm_create_eec(thh_hob_p->eecm, rdd, eecn_p); +#else + return HH_ENOSYS; +#endif +} + +/****************************************************************************** + * Function: THH_hob_modify_eec <==> THH_eecm_modify_eec + *****************************************************************************/ +HH_ret_t THH_hob_modify_eec(HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn, + VAPI_qp_state_t cur_ee_state, + VAPI_qp_attr_t *ee_attr_p, + VAPI_qp_attr_mask_t *ee_attr_mask_p) +{ +#if 0 + THH_hob_t thh_hob_p; + + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_modify_eec: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_modify_eec : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_modify_eec: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->eecm == (THH_eecm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_modify_eec: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_eecm_modify_eec(thh_hob_p->eecm, eecn, cur_ee_state, ee_attr_p, ee_attr_mask_p); +#else + return HH_ENOSYS; +#endif +} + + + +/****************************************************************************** + * Function: THH_hob_query_eec <==> THH_eecm_query_eec + *****************************************************************************/ +HH_ret_t THH_hob_query_eec(HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn, + VAPI_qp_attr_t *ee_attr_p) +{ +#if 0 + THH_hob_t thh_hob_p; + + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_query_eec: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_query_eec : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_query_eec: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->eecm == (THH_eecm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_query_eec: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_eecm_query_eec(thh_hob_p->eecm, eecn, ee_attr_p); +#else + return HH_ENOSYS; +#endif +} + + + +/****************************************************************************** + * Function: THH_hob_destroy_eec <==> THH_eecm_destroy_eec + *****************************************************************************/ +HH_ret_t THH_hob_destroy_eec(HH_hca_hndl_t hca_hndl, + IB_eecn_t eecn) +{ +#if 0 + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_destroy_eec: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_destroy_eec : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_destroy_eec: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + + if (thh_hob_p->eecm == (THH_eecm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_destroy_eec: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + rc = THH_eecm_destroy_eec(thh_hob_p->eecm, eecn); + return rc; +#else + return HH_ENOSYS; +#endif +} + + + +/****************************************************************************** + * Function: THH_hob_attach_to_multicast <==> THH_mcgm_attach_qp + *****************************************************************************/ +HH_ret_t THH_hob_attach_to_multicast( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + IB_gid_t dgid) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_attach_to_multicast: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_attach_to_multicast : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_attach_to_multicast: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + if (thh_hob_p->mcgm == (THH_mcgm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_attach_to_multicast: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_mcgm_attach_qp(thh_hob_p->mcgm, qpn, dgid); +} + + +/****************************************************************************** + * Function: THH_hob_detach_from_multicast <==> THH_mcgm_detach_qp + *****************************************************************************/ +HH_ret_t THH_hob_detach_from_multicast( + HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + IB_gid_t dgid) +{ + THH_hob_t thh_hob_p; + HH_ret_t rc = HH_OK; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_detach_from_multicast: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_detach_from_multicast : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_detach_from_multicast: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + if (thh_hob_p->mcgm == (THH_mcgm_t)THH_INVALID_HNDL) { + MTL_ERROR1( "THH_hob_detach_from_multicast: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + rc = THH_mcgm_detach_qp(thh_hob_p->mcgm, qpn, dgid); + return rc; +} + +VIP_delay_unlock_t THH_hob_get_delay_unlock(THH_hob_t hob) +{ + if (hob == NULL) { + return NULL; + } else { + return (hob->delay_unlocks); + } +} + +HH_ret_t THH_get_debug_info( + HH_hca_hndl_t hca_hndl, /*IN*/ + THH_debug_info_t *debug_info_p /*OUT*/ +) +{ + THH_hob_t hob_p; + HH_ret_t rc = HH_OK; + MT_bool have_error = FALSE; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_get_debug_info: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_get_debug_info : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + hob_p = THHOBP(hca_hndl); + + if (hob_p == NULL) { + MTL_ERROR1("THH_get_debug_info: ERROR : No device registered\n"); + return HH_EAGAIN; + } + + memset(debug_info_p, 0, sizeof(THH_debug_info_t)); + + memcpy(&(debug_info_p->hw_props), &(hob_p->hw_props), sizeof(THH_hw_props_t)); + memcpy(&(debug_info_p->profile), &(hob_p->profile), sizeof(THH_profile_t)); + memcpy(&(debug_info_p->ddr_addr_vec), &(hob_p->ddr_alloc_base_addrs_vec), + sizeof(THH_ddr_base_addr_vector_t)); + memcpy(&(debug_info_p->ddr_size_vec), &(hob_p->ddr_alloc_size_vec), + sizeof(THH_ddr_allocation_vector_t)); + debug_info_p->num_ddr_addrs = THH_DDR_ALLOCATION_VEC_SIZE; + memcpy(&(debug_info_p->mrwm_props), &(hob_p->mrwm_props), sizeof(THH_mrwm_props_t)); + + debug_info_p->hide_ddr = hob_p->ddr_props.dh; + + rc = THH_uldm_get_num_objs(hob_p->uldm,&(debug_info_p->allocated_ul_res),&(debug_info_p->allocated_pd)); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("THH_get_debug_info: proc THH_uldm_get_num_of_objs returned ERROR")); + have_error = TRUE; + } + + rc = THH_mrwm_get_num_objs(hob_p->mrwm,&(debug_info_p->allocated_mr_int), + &(debug_info_p->allocated_mr_ext), &(debug_info_p->allocated_mw)); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("THH_get_debug_info: proc THH_mrwm_get_num_of_objs returned ERROR")); + have_error = TRUE; + } + + rc = THH_qpm_get_num_qps(hob_p->qpm,&(debug_info_p->allocated_qp)); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("THH_get_debug_info: proc THH_tqpm_get_num_of_qps returned ERROR")); + have_error = TRUE; + } + + rc = THH_cqm_get_num_cqs(hob_p->cqm,&(debug_info_p->allocated_cq)); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("THH_get_debug_info: proc THH_tcqm_get_num_of_cqs returned ERROR")); + have_error = TRUE; + } + + rc = THH_mcgm_get_num_mcgs(hob_p->mcgm,&(debug_info_p->allocated_mcg)); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("THH_get_debug_info: proc THH_mcgm_get_num_of_mcgs returned ERROR")); + have_error = TRUE; + } + + return (have_error ? HH_ERR : HH_OK); +} + +/****************************************************************************** + * Function: THH_hob_get_num_ports + * + * Description: Gets number of physical ports configured for HCA + * + * input: + * hca_hndl + * output: + * num_ports_p - 1 or 2 + * returns: + * HH_OK + * HH_EINVAL + * HH_EINVAL_HCA_HNDL + * HH_ERR + * + * Comments: Does MAD query to get the data in real time. + * + *****************************************************************************/ +HH_ret_t THH_hob_get_num_ports( HH_hca_hndl_t hca_hndl, + IB_port_t *num_ports_p) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (hca_hndl == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + *num_ports_p = (IB_port_t)(thh_hob_p->dev_lims.num_ports); + return HH_OK; +} + +/****************************************************************************** + * Function: THH_hob_external_fatal + * + * Description: Artificially generates the fatal error flow from an external + * call + * + * input: + * hca_hndl + * output: + * + * returns: + * HH_OK + * HH_EINVAL_HCA_HNDL + * HH_EFATAL -- driver already in fatal state + * + * Comments: Meant to enable performing a VAPI STOP following a Tavor reset. + * + *****************************************************************************/ +HH_ret_t THH_hob_external_fatal( HH_hca_hndl_t hca_hndl) +{ + THH_hob_t thh_hob_p; + + if (hca_hndl == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1(MT_FLFMT("Invalid HCA handle")); + return HH_EINVAL_HCA_HNDL; + } + TEST_RETURN_FATAL(thh_hob_p); + MTL_ERROR1(MT_FLFMT("%s: Generating a fatal error for device %s"), + __func__, hca_hndl->dev_desc); + THH_hob_fatal_error(thh_hob_p, THH_FATAL_EXTERNAL,VAPI_EV_SYNDROME_NONE); + return HH_OK; +} + +#if defined(MT_SUSPEND_QP) +/****************************************************************************** + * Function: THH_hob_suspend_qp <==> THH_qpm_suspend_qp + *****************************************************************************/ +HH_ret_t THH_hob_suspend_qp(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + MT_bool suspend_flag) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_suspend_qp: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_suspend_qp : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_suspend_qp: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->qpm == (THH_qpm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_suspend_qp: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_qpm_suspend_qp(thh_hob_p->qpm, qpn, suspend_flag); +} +/****************************************************************************** + * Function: THH_hob_suspend_cq <==> THH_qpm_suspend_cq + *****************************************************************************/ +HH_ret_t THH_hob_suspend_cq(HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + MT_bool do_suspend) +{ + THH_hob_t thh_hob_p; + + MT_RETURN_IF_LOW_STACK(THH_WATERMARK); + if (MOSAL_get_exec_ctx() != MOSAL_IN_TASK) { + MTL_ERROR1("THH_hob_suspend_qp: NOT IN TASK CONTEXT)\n"); + return HH_ERR; + } + + if (hca_hndl == NULL) { + MTL_ERROR1("THH_hob_suspend_qp : ERROR : Invalid HCA handle\n"); + return HH_EINVAL_HCA_HNDL; + } + thh_hob_p = THHOBP(hca_hndl); + + if (thh_hob_p == NULL) { + MTL_ERROR1("THH_hob_suspend_qp: ERROR : No device registered\n"); + return HH_EAGAIN; + } + TEST_RETURN_FATAL(thh_hob_p); + + + if (thh_hob_p->cqm == (THH_cqm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_hob_suspend_qp: ERROR : HCA device has not yet been opened\n"); + return HH_EAGAIN; + } + + return THH_cqm_suspend_cq(thh_hob_p->cqm, cq, do_suspend); +} +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.h new file mode 100644 index 00000000..dc517719 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob.h @@ -0,0 +1,430 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THH_HOB_H +#define H_THH_HOB_H + +#include +#include +#include +#include +#include + +#define THH_RESERVED_PD 0 + +/* the memory key used for the UDAVM in THH (privileged mode) */ +#define THH_UDAVM_PRIV_RESERVED_LKEY 0 + +#define THH_INVALID_HNDL ((MT_ulong_ptr_t) (-1L)) + +typedef struct THH_intr_props_st { + MOSAL_IRQ_ID_t irq; + u_int8_t intr_pin; +} THH_intr_props_t; + +typedef struct THH_hw_props_st { + u_int8_t bus; + u_int8_t dev_func; + u_int16_t device_id; + u_int16_t pci_vendor_id; + u_int32_t hw_ver; + MT_phys_addr_t cr_base; + MT_phys_addr_t uar_base; + MT_phys_addr_t ddr_base; + THH_intr_props_t interrupt_props; +} THH_hw_props_t; + + +/* Prototypes */ + +/****************************************************************************** + * Function: THH_hob_create + * + * Arguments: + * hw_props_p - pointer to HW properties. + * hca_seq_num - a sequence number assigned to this HCA to differentiate it + * from other HCAs on this host + * mod_flags - ptr to struct containing flags passed during module initialization + * (e.g. insmod) + * hh_hndl_p - Returned HH context handle. HOB object is accessed via device field + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * + * Description: + * Allocate THH_hob object and register the device in HH. + */ +DLL_API HH_ret_t THH_hob_create(/*IN*/ THH_hw_props_t *hw_props_p, + /*IN*/ u_int32_t hca_seq_num, + /*IN*/ THH_module_flags_t *mod_flags, + /*OUT*/ HH_hca_hndl_t *hh_hndl_p ); + +/****************************************************************************** + * Function: THH_hob_destroy + * + * Arguments: + * hca_hndl - The HCA handle allocated on device registration + * + * Returns: + * HH_OK + * HH_EINVAL_HCA_HNDL - The HCA either is not opened or is unknown + * + * Description: + * Deregister given device from HH and free all associated resources. + * If the HCA is still open, perform THH_hob_close_hca() before freeing the THH_hob. + */ +DLL_API HH_ret_t THH_hob_destroy(HH_hca_hndl_t hca_hndl); + +/****************************************************************************** + * Function: THH_hob_get_ver_info + * + * Arguments: + * hob + * version_p - Returned version information + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Get version information of device associated with given hob. + */ +DLL_API HH_ret_t THH_hob_get_ver_info ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_ver_info_t *version_p ); + +/****************************************************************************** + * Function: THH_hob_get_cmd_if + * + * Arguments: + * hob + * cmd_if_p - Included command interface object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Get a handle to associated command interface object. + */ +DLL_API HH_ret_t THH_hob_get_cmd_if ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_cmd_t *cmd_if_p ); + +/****************************************************************************** + * Function: THH_hob_get_uldm + * + * Arguments: + * hob + * uldm_p - Included THH_uldm object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Get a handle to associated user-level domain management object. + */ +DLL_API HH_ret_t THH_hob_get_uldm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_uldm_t *uldm_p ); + +/****************************************************************************** + * Function: THH_hob_get_mrwm + * + * Arguments: + * hob + * mrwm_p - Included THH_mrwm object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Get a handle to associated memory regions/windows management object. + */ +DLL_API HH_ret_t THH_hob_get_mrwm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_mrwm_t *mrwm_p ); + +/****************************************************************************** + * Function: THH_hob_get_udavm_info + * + * Arguments: + * hob + * udavm_p - Included THH_udavm object + * use_priv_udav - flag: TRUE if using privileged UDAV mode + * av_on_board - flag: TRUE if should use DDR SDRAM on Tavor for UDAVs + * lkey - lkey allocated for udav table in privileged UDAV mode + * hide_ddr - flag: TRUE if should use host memory for UDAVs + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * return udavm information (needed by uldm object in PD allocation). + */ +DLL_API HH_ret_t THH_hob_get_udavm_info ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_udavm_t *udavm_p, + /*OUT*/ MT_bool *use_priv_udav, + /*OUT*/ MT_bool *av_on_board, + /*OUT*/ VAPI_lkey_t *lkey, + /*OUT*/ u_int32_t *max_ah_num, + /*OUT*/ MT_bool *hide_ddr); + +/****************************************************************************** + * Function: THH_hob_get_hca_hndl + * + * Arguments: + * hob + * hca_hndl_p - Included THH_mrwm object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Get a handle to the HH hca object. + */ +DLL_API HH_ret_t THH_hob_get_hca_hndl ( /*IN*/ THH_hob_t hob, + /*OUT*/ HH_hca_hndl_t *hca_hndl_p ); +/****************************************************************************** + * Function: THH_hob_get_ddrmm + * + * Arguments: + * hob + * uldm_p - Included THH_ddrmm object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Get a handle to DDR memory management object. + */ +DLL_API HH_ret_t THH_hob_get_ddrmm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_ddrmm_t *ddrmm_p ); + +/****************************************************************************** + * Function: THH_hob_get_qpm + * + * Arguments: + * hob + * eventp_p - Included THH_qpm object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Get a handle to qp management object. + */ +DLL_API HH_ret_t THH_hob_get_qpm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_qpm_t *qpm_p ); + +/****************************************************************************** + * Function: THH_hob_get_cqm + * + * Arguments: + * hob + * eventp_p - Included THH_cqm object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Get a handle to cq management object. + */ +DLL_API HH_ret_t THH_hob_get_cqm ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_cqm_t *cqm_p ); + +/****************************************************************************** + * Function: THH_hob_get_eventp + * + * Arguments: + * hob + * eventp_p - Included THH_eventp object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Get a handle to event management object. + */ +DLL_API HH_ret_t THH_hob_get_eventp ( /*IN*/ THH_hob_t hob, + /*OUT*/ THH_eventp_t *eventp_p ); + +/* Function for special QPs (provide info. for building MLX IB headers) */ +DLL_API HH_ret_t THH_hob_get_sgid(HH_hca_hndl_t hca_hndl,IB_port_t port,u_int8_t idx, IB_gid_t* gid_p); +DLL_API HH_ret_t THH_hob_get_qp1_pkey(HH_hca_hndl_t hca_hndl,IB_port_t port,VAPI_pkey_t* pkey); +DLL_API HH_ret_t THH_hob_get_pkey(HH_hca_hndl_t hca_hndl,IB_port_t port,VAPI_pkey_ix_t pkey_index, + VAPI_pkey_t* pkey_p/*OUT*/); +DLL_API HH_ret_t THH_hob_get_gid_tbl(HH_hca_hndl_t hca_hndl,IB_port_t port,u_int16_t tbl_len_in, + u_int16_t* tbl_len_out,IB_gid_t* param_gid_p); +DLL_API HH_ret_t THH_hob_init_gid_tbl(HH_hca_hndl_t hca_hndl,IB_port_t port,u_int16_t tbl_len_in, + u_int16_t* tbl_len_out,IB_gid_t* param_gid_p); +DLL_API HH_ret_t THH_hob_get_pkey_tbl(HH_hca_hndl_t hca_hndl,IB_port_t port_num, + u_int16_t tbl_len_in,u_int16_t *tbl_len_out,IB_pkey_t *pkey_tbl_p); +DLL_API HH_ret_t THH_hob_init_pkey_tbl(HH_hca_hndl_t hca_hndl,IB_port_t port_num, + u_int16_t tbl_len_in,u_int16_t *tbl_len_out,IB_pkey_t *pkey_tbl_p); +DLL_API HH_ret_t THH_hob_get_legacy_mode(THH_hob_t thh_hob_p,MT_bool *p_mode); +DLL_API HH_ret_t THH_hob_check_qp_init_attrs (THH_hob_t hob, HH_qp_init_attr_t * init_attr_p, + MT_bool is_special_qp ); + + +/****************************************************************************** + * Function: THH_hob_fatal_error + * + * Arguments: + * hob + * fatal_err_type - type of fatal error which occurred + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Initiates centralized fatal error handling when a fatal error is detected + */ +DLL_API HH_ret_t THH_hob_fatal_error(/*IN*/ THH_hob_t hob, + /*IN*/ THH_fatal_err_t fatal_err_type, + /*IN*/ VAPI_event_syndrome_t syndrome); + +VIP_delay_unlock_t THH_hob_get_delay_unlock(THH_hob_t hob); + +DLL_API HH_ret_t THH_hob_get_init_params(/*IN*/ THH_hob_t thh_hob_p, + /*OUT*/ THH_hw_props_t *hw_props_p, + /*OUT*/ u_int32_t *hca_seq_num, + /*OUT*/ THH_module_flags_t *mod_flags); +DLL_API HH_ret_t THH_hob_restart(HH_hca_hndl_t hca_hndl); + +DLL_API HH_ret_t THH_hob_get_state(THH_hob_t thh_hob_p, THH_hob_state_t *fatal_state); +DLL_API HH_ret_t THH_hob_get_fatal_syncobj(THH_hob_t thh_hob_p, MOSAL_syncobj_t *syncobj); +DLL_API HH_ret_t THH_hob_wait_if_fatal(THH_hob_t thh_hob_p, MT_bool *had_fatal); + +DLL_API HH_ret_t THH_hob_query_port_prop(HH_hca_hndl_t hca_hndl,IB_port_t port_num,VAPI_hca_port_t *hca_port_p ); +DLL_API HH_ret_t THH_hob_alloc_ul_res(HH_hca_hndl_t hca_hndl,MOSAL_protection_ctx_t prot_ctx,void *hca_ul_resources_p); +DLL_API HH_ret_t THH_hob_free_ul_res(HH_hca_hndl_t hca_hndl,void *hca_ul_resources_p); +DLL_API HH_ret_t THH_hob_alloc_pd(HH_hca_hndl_t hca_hndl, MOSAL_protection_ctx_t prot_ctx, void * pd_ul_resources_p,HH_pd_hndl_t *pd_num_p); +DLL_API HH_ret_t THH_hob_free_pd(HH_hca_hndl_t hca_hndl, HH_pd_hndl_t pd_num); +DLL_API HH_ret_t THH_hob_alloc_rdd(HH_hca_hndl_t hh_dev_p, HH_rdd_hndl_t *rdd_p); +DLL_API HH_ret_t THH_hob_free_rdd(HH_hca_hndl_t hh_dev_p, HH_rdd_hndl_t rdd); +DLL_API HH_ret_t THH_hob_create_ud_av(HH_hca_hndl_t hca_hndl,HH_pd_hndl_t pd,VAPI_ud_av_t *av_p, HH_ud_av_hndl_t *ah_p); +DLL_API HH_ret_t THH_hob_modify_ud_av(HH_hca_hndl_t hca_hndl, HH_ud_av_hndl_t ah,VAPI_ud_av_t *av_p); +DLL_API HH_ret_t THH_hob_query_ud_av(HH_hca_hndl_t hca_hndl, HH_ud_av_hndl_t ah,VAPI_ud_av_t *av_p); +DLL_API HH_ret_t THH_hob_destroy_ud_av(HH_hca_hndl_t hca_hndl, HH_ud_av_hndl_t ah); +DLL_API HH_ret_t THH_hob_register_mr(HH_hca_hndl_t hca_hndl,HH_mr_t *mr_props_p,VAPI_lkey_t *lkey_p,IB_rkey_t *rkey_p); +DLL_API HH_ret_t THH_hob_reregister_mr(HH_hca_hndl_t hca_hndl,VAPI_lkey_t lkey, VAPI_mr_change_t change_mask, HH_mr_t *mr_props_p, + VAPI_lkey_t* lkey_p,IB_rkey_t *rkey_p); +DLL_API HH_ret_t THH_hob_register_smr(HH_hca_hndl_t hca_hndl,HH_smr_t *mr_props_p,VAPI_lkey_t *lkey_p,IB_rkey_t *rkey_p); +DLL_API HH_ret_t THH_hob_query_mr(HH_hca_hndl_t hca_hndl,VAPI_lkey_t lkey,HH_mr_info_t *mr_info_p); +DLL_API HH_ret_t THH_hob_deregister_mr(HH_hca_hndl_t hca_hndl,VAPI_lkey_t lkey); +DLL_API HH_ret_t THH_hob_alloc_mw(HH_hca_hndl_t hca_hndl,HH_pd_hndl_t pd,IB_rkey_t *initial_rkey_p); +DLL_API HH_ret_t THH_hob_query_mw(HH_hca_hndl_t hca_hndl,IB_rkey_t initial_rkey,IB_rkey_t *current_rkey_p,HH_pd_hndl_t *pd_p); +DLL_API HH_ret_t THH_hob_free_mw(HH_hca_hndl_t hca_hndl,IB_rkey_t initial_rkey); +DLL_API HH_ret_t THH_hob_alloc_fmr(HH_hca_hndl_t hca_hndl, HH_pd_hndl_t pd, + VAPI_mrw_acl_t acl,MT_size_t max_pages,u_int8_t log2_page_sz,VAPI_lkey_t* last_lkey_p); +DLL_API HH_ret_t THH_hob_map_fmr(HH_hca_hndl_t hca_hndl,VAPI_lkey_t last_lkey, + EVAPI_fmr_map_t* map_p,VAPI_lkey_t* lkey_p,IB_rkey_t* rkey_p); +DLL_API HH_ret_t THH_hob_unmap_fmr(HH_hca_hndl_t hca_hndl,u_int32_t num_of_fmrs_to_unmap, VAPI_lkey_t* last_lkeys_array); +DLL_API HH_ret_t THH_hob_free_fmr(HH_hca_hndl_t hca_hndl,VAPI_lkey_t last_lkey); +DLL_API HH_ret_t THH_hob_create_cq(HH_hca_hndl_t hca_hndl,MOSAL_protection_ctx_t user_prot_context, + void *cq_ul_resources_p,HH_cq_hndl_t *cq_p); +DLL_API HH_ret_t THH_hob_resize_cq(HH_hca_hndl_t hca_hndl,HH_cq_hndl_t cq,void *cq_ul_resources_p); +DLL_API HH_ret_t THH_hob_query_cq(HH_hca_hndl_t hca_hndl,HH_cq_hndl_t cq,VAPI_cqe_num_t *num_o_cqes_p); +DLL_API HH_ret_t THH_hob_destroy_cq(HH_hca_hndl_t hca_hndl,HH_cq_hndl_t cq); +DLL_API HH_ret_t THH_hob_create_qp(HH_hca_hndl_t hca_hndl,HH_qp_init_attr_t *init_attr_p, void *qp_ul_resources_p,IB_wqpn_t *qpn_p); +DLL_API HH_ret_t THH_hob_get_special_qp(HH_hca_hndl_t hca_hndl,VAPI_special_qp_t qp_type,IB_port_t port, + HH_qp_init_attr_t *init_attr_p,void *qp_ul_resources_p,IB_wqpn_t *sqp_hndl_p); +DLL_API HH_ret_t THH_hob_modify_qp(HH_hca_hndl_t hca_hndl,IB_wqpn_t qpn,VAPI_qp_state_t cur_qp_state, + VAPI_qp_attr_t *qp_attr_p,VAPI_qp_attr_mask_t *qp_attr_mask_p); +DLL_API HH_ret_t THH_hob_query_qp(HH_hca_hndl_t hca_hndl,IB_wqpn_t qpn,VAPI_qp_attr_t *qp_attr_p); +DLL_API HH_ret_t THH_hob_destroy_qp(HH_hca_hndl_t hca_hndl,IB_wqpn_t qpn); + +DLL_API HH_ret_t THH_hob_create_srq(HH_hca_hndl_t hca_hndl, HH_pd_hndl_t pd, void *srq_ul_resources_p, + HH_srq_hndl_t *srq_p); +DLL_API HH_ret_t THH_hob_query_srq(HH_hca_hndl_t hca_hndl, HH_srq_hndl_t srq, u_int32_t *limit_p); +DLL_API HH_ret_t THH_hob_modify_srq(HH_hca_hndl_t hca_hndl, HH_srq_hndl_t srq, void *srq_ul_resources_p); +DLL_API HH_ret_t THH_hob_destroy_srq(HH_hca_hndl_t hca_hndl, HH_srq_hndl_t srq); + + +DLL_API HH_ret_t THH_hob_process_local_mad(HH_hca_hndl_t hca_hndl,IB_port_t port_num, IB_lid_t slid, + EVAPI_proc_mad_opt_t proc_mad_opts, void *mad_in_p, void * mad_out_p ); + + +DLL_API HH_ret_t THH_hob_ddrmm_alloc(HH_hca_hndl_t hca_hndl,VAPI_size_t size,u_int8_t align_shift,VAPI_phy_addr_t* buf_p); +DLL_API HH_ret_t THH_hob_ddrmm_query(HH_hca_hndl_t hca_hndl,u_int8_t align_shift,VAPI_size_t* total_mem, + VAPI_size_t* free_mem,VAPI_size_t* largest_chunk, + VAPI_phy_addr_t* largest_free_addr_p); + +DLL_API HH_ret_t THH_hob_ddrmm_free(HH_hca_hndl_t hca_hndl,VAPI_phy_addr_t buf, VAPI_size_t size); + + +DLL_API HH_ret_t THH_hob_create_eec(HH_hca_hndl_t hca_hndl,HH_rdd_hndl_t rdd,IB_eecn_t *eecn_p); +DLL_API HH_ret_t THH_hob_modify_eec(HH_hca_hndl_t hca_hndl,IB_eecn_t eecn,VAPI_qp_state_t cur_ee_state, + VAPI_qp_attr_t *ee_attr_p,VAPI_qp_attr_mask_t *ee_attr_mask_p); +DLL_API HH_ret_t THH_hob_query_eec(HH_hca_hndl_t hca_hndl,IB_eecn_t eecn,VAPI_qp_attr_t *ee_attr_p); +DLL_API HH_ret_t THH_hob_destroy_eec(HH_hca_hndl_t hca_hndl,IB_eecn_t eecn); +DLL_API HH_ret_t THH_hob_attach_to_multicast(HH_hca_hndl_t hca_hndl,IB_wqpn_t qpn,IB_gid_t dgid); +DLL_API HH_ret_t THH_hob_detach_from_multicast(HH_hca_hndl_t hca_hndl,IB_wqpn_t qpn,IB_gid_t dgid); +DLL_API HH_ret_t THH_hob_get_num_ports( HH_hca_hndl_t hca_hndl, IB_port_t *num_ports_p); +DLL_API HH_ret_t THH_hob_external_fatal( HH_hca_hndl_t hca_hndl); + +#ifdef IVAPI_THH + +DLL_API HH_ret_t THH_hob_set_comp_eventh(HH_hca_hndl_t hca_hndl, + HH_comp_eventh_t event, + void* private_data); +DLL_API HH_ret_t THH_hob_set_async_eventh(HH_hca_hndl_t hca_hndl, + HH_async_eventh_t event, + void* private_data); +DLL_API HH_ret_t THH_hob_open_hca(HH_hca_hndl_t hca_hndl, + EVAPI_hca_profile_t *prop_props_p, + EVAPI_hca_profile_t *sugg_profile_p); +DLL_API HH_ret_t THH_hob_close_hca(HH_hca_hndl_t hca_hndl); +DLL_API HH_ret_t THH_hob_query(HH_hca_hndl_t hca_hndl, VAPI_hca_cap_t *hca_cap_p); +DLL_API HH_ret_t THH_hob_modify(HH_hca_hndl_t hca_hndl, + IB_port_t port_num, + VAPI_hca_attr_t *hca_attr_p, + VAPI_hca_attr_mask_t *hca_attr_mask_p); + +#endif /* IVAPI_THH */ +#if defined(MT_SUSPEND_QP) +HH_ret_t THH_hob_suspend_qp(HH_hca_hndl_t hca_hndl, + IB_wqpn_t qpn, + MT_bool suspend_flag); +HH_ret_t THH_hob_suspend_cq(HH_hca_hndl_t hca_hndl, + HH_cq_hndl_t cq, + MT_bool do_suspend); +#endif + +#endif /* H_THH_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob_priv.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob_priv.h new file mode 100644 index 00000000..b56b35f2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_hob/thh_hob_priv.h @@ -0,0 +1,312 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THH_HOB_PRIV_H +#define H_THH_HOB_PRIV_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************* TEMPORARY DEFINES, INCLUDED UNTIL THINGS GET FIXED UP ****************/ + + +/* ******************* END TEMPORARY DEFINES ******************* */ + +#define THHOBP(hca_hndl) ((THH_hob_t)(hca_hndl->device)) +#define THHOB(hca_id) ((THH_hob_t)(HH_hca_dev_tbl[hca_id].device)) + +#define THH_RESERVED_PD 0 +#define THH_DEV_LIM_MCG_ENABLED(hob) (hob->dev_lims.udm == 1) + + +typedef struct THH_ib_props_st { + int dummy; +} THH_ib_props_t; + +/* minimum required firmware -- major_rev=1, minor_rev = 0x0015, sub_minor (patch) = 0 */ +#define THH_MIN_FW_VERSION MAKE_ULONGLONG(0x0000000100150000) +#define THH_MIN_FW_ERRBUF_VERSION MAKE_ULONGLONG(0x0000000100180000) +#define THH_MIN_FW_HIDE_DDR_VERSION MAKE_ULONGLONG(0x0000000100180000) +#define THH_MIN_FW_VERSION_SRQ MAKE_ULONGLONG(0x0000000300010000) + +/* Definitions for default profile */ +/* NOTE: All table sizes MUST be a power of 2 */ + +#define THH_DDR_ALLOCATION_VEC_SIZE 10 /* EEC not implemented yet */ +typedef struct THH_ddr_allocation_vector_st { + MT_size_t log2_mtt_size; + MT_size_t log2_mpt_size; + MT_size_t log2_qpc_size; + MT_size_t log2_eqpc_size; /* QPC alt path */ + MT_size_t log2_srqc_size; + MT_size_t log2_cqc_size; + MT_size_t log2_rdb_size; /* in-flight rdma */ + MT_size_t log2_uar_scratch_size; + MT_size_t log2_eqc_size; + MT_size_t log2_mcg_size; + MT_size_t log2_eec_size; + MT_size_t log2_eeec_size; /* EEC alt path */ +#if 0 + MT_size_t log2_wqe_pool_size; + MT_size_t log2_uplink_qp_size; + MT_size_t log2_uplink_mem_size; +#endif +} THH_ddr_allocation_vector_t; + +typedef struct THH_ddr_base_addr_vector_st { + MT_phys_addr_t mtt_base_addr; + MT_phys_addr_t mpt_base_addr; + MT_phys_addr_t qpc_base_addr; + MT_phys_addr_t eqpc_base_addr; + MT_phys_addr_t srqc_base_addr; + MT_phys_addr_t cqc_base_addr; + MT_phys_addr_t rdb_base_addr; /* in-flight rdma */ + MT_phys_addr_t uar_scratch_base_addr; + MT_phys_addr_t eqc_base_addr; + MT_phys_addr_t mcg_base_addr; + MT_phys_addr_t eec_base_addr; + MT_phys_addr_t eeec_base_addr; +#if 0 + MT_phys_addr_t log2_wqe_pool_size; + MT_phys_addr_t log2_uplink_qp_size; + MT_phys_addr_t log2_uplink_mem_size; +#endif +} THH_ddr_base_addr_vector_t; + +typedef enum { + THH_DDR_SIZE_BAD, + THH_DDR_SIZE_32M = 1, + THH_DDR_SIZE_64M, + THH_DDR_SIZE_128M, + THH_DDR_SIZE_256M, + THH_DDR_SIZE_512M, + THH_DDR_SIZE_1024M, + THH_DDR_SIZE_2048M, + THH_DDR_SIZE_4096M, + THH_DDR_SIZE_BIG, +} THH_ddr_size_enum_t; + +typedef struct THH_non_ddr_defaults_st { + MT_bool use_priv_udav; + THH_ddr_size_enum_t ddr_size_code; + MT_size_t ddr_size; + MT_size_t max_num_pds; + MT_size_t num_external_mem_regions; + MT_size_t num_mem_windows; + MT_size_t ddr_alloc_vec_size; + MT_size_t log2_max_uar; + MT_size_t log2_max_qps; + MT_size_t max_num_qps; + MT_size_t log2_max_srqs; + MT_size_t max_num_srqs; + MT_size_t log2_wqe_ddr_space_per_qp; + MT_size_t log2_max_cqs; + MT_size_t max_num_cqs; + MT_size_t log2_max_eecs; + MT_size_t log2_max_mcgs; + MT_size_t log2_mcg_entry_size; + MT_size_t log2_mcg_hash_size; + MT_size_t qps_per_mcg; + MT_size_t log2_max_mpt_entries; + MT_size_t log2_max_mtt_entries; + MT_size_t log2_mtt_entries_per_seg; + MT_size_t log2_mtt_segs_per_region; + u_int8_t log2_max_eqs; + u_int8_t log2_uar_pg_size; + MT_size_t max_priv_udavs; + u_int8_t log2_inflight_rdma_per_qp; +} THH_profile_t; + +#define THH_DEF_CQ_PER_QP 1 + +#define TAVOR_MAX_EQ 64 +#define THH_COMPL_EQ_IX 0 +#define THH_IB_EQ_IX 1 +#define THH_CMD_EQ_IX 2 +#define THH_MT_EQ_IX 3 + +typedef struct THH_hob_port_info_st { + u_int32_t capability_bits; +}THH_hob_port_info_t; + +/* catastrophic error thread structure */ +typedef struct THH_hob_cat_err_thread_st { + MOSAL_thread_t mto; + MOSAL_mutex_t mutex; + MOSAL_syncobj_t start_sync; /* sync object needed on start of thread */ + MOSAL_syncobj_t stop_sync; /* sync object needed on exit of thread */ + MOSAL_syncobj_t fatal_err_sync; /* wait on fatal_err_sync object */ + struct THH_hob_st *hob; /* pointer to this thread's HOB object */ + volatile MT_bool have_fatal; /*TRUE ==> catastrophic error has occurred */ + /*FALSE ==> just exit. */ +} THH_hob_cat_err_thread_t; + +typedef struct THH_hob_pci_info_st { + MT_bool is_valid; + u_int8_t bus; + u_int8_t dev_func; + u_int32_t config[64]; +} THH_hob_pci_info_t; + +typedef struct THH_hob_st { + /* THH_hob_create parameters */ + u_int32_t hca_seq_num; + THH_module_flags_t module_flags; + THH_hw_props_t hw_props; + + char dev_name[20]; + u_int32_t dev_id; + HH_hca_hndl_t hh_hca_hndl; + + THH_dev_lim_t dev_lims; /* QUERY_DEV_LIM */ + THH_adapter_props_t adapter_props; /* QUERY_ADAPTER */ + THH_fw_props_t fw_props; /* QUERY_FW */ + THH_ddr_props_t ddr_props; /* QUERY_DDR */ + THH_ib_props_t ib_props; /* QUERY_IB */ + + THH_port_init_props_t *init_ib_props; /* VMALLOCed. One entry per port */ + /* HCA Props */ + THH_hca_props_t hca_props; + + VAPI_hca_cap_t hca_capabilities; /* filled at end of open_hca, and saved for Query_hca */ + + MT_virt_addr_t fw_error_buf_start_va; + u_int32_t * fw_error_buf; /* kmalloced buffer ready to hold info at cat error */ + + THH_ddr_allocation_vector_t ddr_alloc_size_vec; + THH_ddr_base_addr_vector_t ddr_alloc_base_addrs_vec; + THH_profile_t profile; + + /* HH Interface */ + HH_if_ops_t if_ops; + + /* Version information */ + THH_ver_info_t version_info; + + /* udavm information if privileged is used*/ + MT_bool udavm_use_priv; + VAPI_lkey_t udavm_lkey; + MT_virt_addr_t udavm_table; + MT_phys_addr_t udavm_table_ddr; + MT_size_t udavm_table_size; + + /* EQ handles */ + THH_eqn_t compl_eq; + THH_eqn_t ib_eq; + + /* Mutexes, etc */ + MOSAL_mutex_t mtx; /* used internally */ + + /* CONTAINED OBJECTS HANDLES */ + THH_cmd_t cmd; + THH_ddrmm_t ddrmm; + THH_uldm_t uldm; + THH_mrwm_t mrwm; + THH_cqm_t cqm; + /* THH_eecm_t eecm; */ + THH_qpm_t qpm; + THH_srqm_t srqm; + THH_udavm_t udavm; + + char *av_ddr_base; + char *av_host_base; + + THH_mcgm_t mcgm; + THH_eventp_t eventp; + THH_uar_t kar; + MT_virt_addr_t kar_addr; + + /* for THH_get_debug_info() */ + THH_mrwm_props_t mrwm_props; + + /* fatal error handling fields */ + VAPI_event_syndrome_t fatal_syndrome; + THH_hob_state_t thh_state; + MOSAL_syncobj_t thh_fatal_complete_syncobj; + HH_async_eventh_t async_eventh; /* saved handler and context, registered by VIP */ + void* async_ev_private_context; + MOSAL_spinlock_t async_spl; + MOSAL_spinlock_t fatal_spl; + + VIP_delay_unlock_t delay_unlocks; + THH_hob_cat_err_thread_t fatal_thread_obj; + + THH_hob_pci_info_t pci_bridge_info; + THH_hob_pci_info_t pci_hca_info; + +} THH_hob_dev_t; + +typedef struct { + THH_hw_props_t hw_props; + THH_profile_t profile; + THH_ddr_base_addr_vector_t ddr_addr_vec; + THH_ddr_base_addr_vector_t ddr_size_vec; + u_int32_t num_ddr_addrs; + THH_mrwm_props_t mrwm_props; + MT_bool hide_ddr; + + /* Allocated resources count */ + u_int32_t allocated_ul_res; /* From ULDM */ + /* (in current implementation ul_res num. is the same as UAR num., excluding UAR1) */ + u_int32_t allocated_pd; /* From ULDM */ + u_int32_t allocated_cq; /* From TCQM */ + u_int32_t allocated_qp; /* From TQPM */ + u_int32_t allocated_mr_int; /* From TMRWM */ + u_int32_t allocated_mr_ext; /* From TMRWM */ + u_int32_t allocated_mw; /* From TMRWM */ + u_int32_t allocated_mcg; /* From MCGM */ +} THH_debug_info_t ; + + +HH_ret_t THH_get_debug_info( + HH_hca_hndl_t hca_hndl, /*IN*/ + THH_debug_info_t *debug_info_p /*OUT*/ +); + + +#endif /* H_THH_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.c new file mode 100644 index 00000000..ecf4c5e2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.c @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include + +extern void THH_cqm_init(void); +extern void THH_qpm_init(void); +extern void THH_mrwm_init(void); + +HH_ret_t THH_init(void) +{ + /* This function should be called by the DDK entry point, to perform any module initializations */ + /* that are global (and not per HCA). In Linux, for example, the DDK entry point is "init_module". */ + + THH_cqm_init(); + THH_qpm_init(); + THH_mrwm_init(); + return(HH_OK); +} + + +HH_ret_t THH_cleanup(void) +{ + /* This function should be called by the DDK exit point, to perform any module initializations */ + /* that are global (and not per HCA). In Linux, for example, the DDK exit point is "cleanup_module". */ + return(HH_OK); +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.h new file mode 100644 index 00000000..58b79040 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_init.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_THH_INIT_H +#define H_THH_INIT_H + + +/* global THH data structures */ + + +#include +#include + + +HH_ret_t THH_init(void); /* Kernel module entry point */ +HH_ret_t THH_cleanup(void); /* Kernel module cleanup point */ + +#endif /* H_THH_INIT_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.c new file mode 100644 index 00000000..15d75521 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.c @@ -0,0 +1,2681 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* Hopefully, we'll get the following fw restrictions out... */ +#define SUPPORT_DESTROY_QPI_REUSE 1 +#define SUPPORT_2ERR 1 +#define SUPPORT_DESTROY 1 +/* #define DELAY_CONF_SPECIAL_QPS 1 */ + +#include +#if defined(USE_STD_MEMORY) +# include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern void MadBufPrint(void *madbuf); + +static void printGIDTable(THH_qpm_t qpm); +static void printPKeyTable(THH_qpm_t qpm); + +#define CMDRC2HH_ND(cmd_rc) ((cmd_rc == THH_CMD_STAT_OK) ? HH_OK : \ + (cmd_rc == THH_CMD_STAT_EINTR) ? HH_EINTR : HH_EFATAL) + +#define ELSE_ACQ_ERROR(f) else { MTL_ERROR1("%s MOSAL_mutex_acq failed\n", f); } +#define logIfErr(f) \ + if (rc != HH_OK) { MTL_ERROR1("%s: rc=%s\n", f, HH_strerror_sym(rc)); } + + +#if !defined(ARR_SIZE) +# define ARR_SIZE(a) (sizeof(a)/sizeof(a[0])) +#endif + +/* These are the tunebale parameters */ +enum +{ + TUNABLE_ACK_REQ_FREQ = 10, + TUNABLE_FLIGHT_LIMIT = 9 +}; + + +enum {WQE_CHUNK_SIZE_LOG2 = 6, + WQE_CHUNK_SIZE = 1ul << WQE_CHUNK_SIZE_LOG2, + WQE_CHUNK_MASK = WQE_CHUNK_SIZE - 1 + }; + + + +typedef struct +{ + IB_wqpn_t qpn; + HH_srq_hndl_t srqn; /* If invalid, equals HH_EINVAL_SRQ_HNDL */ + u_int32_t pd; + u_int32_t rdd; + u_int32_t cqn_snd; + u_int32_t cqn_rcv; + + /* The following is kept merely to support query */ + VAPI_qp_cap_t cap; + + /* kept to support modify-qp/query-qp */ + VAPI_rdma_atom_acl_t remote_atomic_flags;/* Enable/Disable RDMA and atomic */ + u_int8_t qp_ous_rd_atom; /* Maximum number of oust. RDMA read/atomic as target */ + u_int8_t ous_dst_rd_atom; /* Number of outstanding RDMA rd/atomic ops at destination */ + + u_int8_t st; /* sufficient for THH_service_type_t - enum */ + VAPI_qp_state_t state; + unsigned int ssc:1; + unsigned int rsc:1; + + MT_virt_addr_t wqes_buf; /* WQEs buffer virtual address */ + MT_size_t wqes_buf_sz; + VAPI_lkey_t lkey; + THH_uar_index_t uar_index; /* index of UAR used for this QP */ + MT_phys_addr_t pa_ddr; +#if defined(MT_SUSPEND_QP) + MT_bool is_suspended; +#endif +} TQPM_sw_qpc_t; + + +typedef struct +{ + signed char tab[VAPI_ERR+1][VAPI_ERR+1]; /* -1 or THH_qpee_transition_t */ +} State_machine; + +static const VAPI_special_qp_t qp_types[] = { + VAPI_SMI_QP, VAPI_GSI_QP, /* _Used_ for special QPs */ + VAPI_RAW_IPV6_QP, VAPI_RAW_ETY_QP /* Not used: but supported for query */ +}; +enum {n_qp_types = ARR_SIZE(qp_types)}; + +typedef struct +{ + u_int8_t n_ports; + MT_bool configured; + IB_wqpn_t first_sqp_qpn; /* Above FW reserved QPCs */ + THH_port_init_props_t* port_props; + TQPM_sw_qpc_t** sqp_ctx; /* Context for special QPs is outside of the VIP_array */ +} Special_QPs; + + +/* The main QP-manager structure + * Note that {(EPool_t flist), (THH_ddrmm_t ddrmm)} + * have their own mutex-es controlling multi-threads. + */ +typedef struct THH_qpm_st +{ + /* Capabilities */ + u_int8_t log2_max_qp; + u_int32_t rdb_base_index; + u_int8_t log2_max_outs_rdma_atom; + u_int8_t log2_max_outs_dst_rd_atom; + u_int32_t max_outs_rdma_atom; /* convenient 2^log2_max_outs_rdma_atom */ + u_int32_t idx_mask; /* convenient (2^log2_max_qp) - 1 */ + + /* SW resources tables */ + Special_QPs sqp_info; + VIP_array_p_t qp_tbl; /* Index 0 into the table is first_sqp_qpn+NUM_SQP */ + u_int32_t first_rqp; /* First regular QP */ + u_int8_t* qpn_prefix; /* persistant 8 bit prefix change for QP numbers */ + /* In order to avoid holding to much memory for QP numbers prefix we: + * 1) Manipulate only upper 8 bits of the 24 bit QPN + * 2) Hold only MAX_QPN_PREFIX numbers to be shared among QPs with the same lsb of index */ + MOSAL_mutex_t mtx; /* protect sqp_ctx and pkey/sgid tables */ + + /* convenient handle saving */ + THH_hob_t hob; + THH_cmd_t cmd_if; + THH_uldm_t uldm; + THH_mrwm_t mrwm_internal; + THH_ddrmm_t ddrmm; + + /* mirror of the port gid tbl - for special qps*/ + IB_gid_t* sgid_tbl[NUM_PORTS]; + u_int16_t num_sgids[NUM_PORTS]; + + /* mirror of the port qp1 pkey - values are kept CPU endian - little endian*/ + VAPI_pkey_t* pkey_tbl[NUM_PORTS]; + u_int16_t pkey_tbl_sz[NUM_PORTS]; + VAPI_pkey_ix_t qp1_pkey_idx[NUM_PORTS]; + MT_bool port_active[NUM_PORTS]; +} TQPM_t; + +static MT_bool constants_ok = TRUE; /* guarding this tqpm module */ +static State_machine state_machine; /* const after THH_qpm_init */ +static const u_int32_t valid_tavor_ibmtu_mask = + (1ul << MTU256) | + (1ul << MTU512) | + (1ul << MTU1024) | + (1ul << MTU2048); + + +static u_int8_t native_page_shift; +static u_int32_t native_page_size; +static u_int32_t native_page_low_mask; + +/************************************************************************/ +/************************************************************************/ +/* private functions */ + + + +/************************************************************************/ +static inline MT_bool is_sqp(THH_qpm_t qpm,IB_wqpn_t qpn) +{ + u_int32_t qp_idx = qpn & qpm->idx_mask; + + return ((qp_idx >= qpm->sqp_info.first_sqp_qpn) && (qp_idx < qpm->first_rqp)); +} + +/************************************************************************/ +static inline MT_bool is_sqp0(THH_qpm_t qpm,IB_wqpn_t qpn,IB_port_t* port_p) +{ + u_int32_t qp_idx = qpn & qpm->idx_mask; + MT_bool is_true; + + is_true = ((qp_idx >= qpm->sqp_info.first_sqp_qpn) && + (qp_idx < qpm->sqp_info.first_sqp_qpn + qpm->sqp_info.n_ports)); + if (is_true == TRUE) { + *port_p = ((qp_idx-(qpm->sqp_info.first_sqp_qpn & qpm->idx_mask))%qpm->sqp_info.n_ports) + 1; + } + + MTL_DEBUG1(MT_FLFMT("%s: qpn=0x%x, mask=0x%x, first=0x%x, nports=0x%x, *port = %d, ret=%s"),__func__, + qpn, qpm->idx_mask,qpm->sqp_info.first_sqp_qpn, qpm->sqp_info.n_ports, *port_p, + (is_true==FALSE)?"FALSE":"TRUE"); + return is_true; +} + +/************************************************************************/ +static inline MT_bool is_sqp1(THH_qpm_t qpm,IB_wqpn_t qpn,IB_port_t* port_p) +{ + u_int32_t qp_idx = qpn & qpm->idx_mask; + MT_bool is_true = FALSE; + + is_true = ((qp_idx >= qpm->sqp_info.first_sqp_qpn + qpm->sqp_info.n_ports) && + (qp_idx < qpm->sqp_info.first_sqp_qpn + (2 * qpm->sqp_info.n_ports) )); + if (is_true == TRUE) { + *port_p = ((qp_idx-((qpm->sqp_info.first_sqp_qpn & qpm->idx_mask)+qpm->sqp_info.n_ports))%qpm->sqp_info.n_ports) + 1; + } + MTL_DEBUG1(MT_FLFMT("%s: qpn=0x%x, mask=0x%x, first=0x%x, nports=0x%x, *port = %d, ret=%s"),__func__, + qpn, qpm->idx_mask,qpm->sqp_info.first_sqp_qpn, qpm->sqp_info.n_ports, *port_p, + (is_true==FALSE)?"FALSE":"TRUE"); + return is_true; +} +/************************************************************************/ +static inline MT_bool check_2update_pkey(VAPI_qp_state_t cur_state,VAPI_qp_state_t new_state, + VAPI_qp_attr_mask_t* attr_mask_p) +{ + if (*attr_mask_p & QP_ATTR_PKEY_IX) + { + /*obligatory */ + if ((cur_state == VAPI_RESET) && (new_state == VAPI_INIT)) + { + return TRUE; + } + /* optional */ + if ((cur_state == VAPI_INIT) && (new_state == VAPI_RTR)) + { + return TRUE; + } + /* optional */ + if ((cur_state == VAPI_SQD) && (new_state == VAPI_RTS)) + { + return TRUE; + } + } + return FALSE; +} +/************************************************************************/ +static MT_bool check_constants(void) +{ + static const VAPI_qp_state_t states[] = + { + VAPI_INIT,VAPI_RESET,VAPI_RTR,VAPI_RTS,VAPI_SQD,VAPI_SQE,VAPI_ERR + }; + static const IB_mtu_t mtu_vals[] = /* Check all IB MTU values */ + { /* not just Tavor's. */ + MTU256, MTU512, MTU1024, MTU2048, MTU4096 + }; + int i; + for (i = 0, constants_ok = TRUE; i != ARR_SIZE(states); ++i) + { +/*** error C4296: '<=' : expression is always true ***/ + constants_ok = constants_ok && /*(0 <= states[i]) &&*/ (states[i] < VAPI_ERR+1); + } + for (i = 0; i != ARR_SIZE(mtu_vals); ++i) + { +/*** error C4296: '<=' : expression is always true ***/ + constants_ok = constants_ok && /*(0 <= mtu_vals[i]) &&*/ (mtu_vals[i] < 32); + } + MTL_DEBUG4(MT_FLFMT("constants_ok=%d"), constants_ok); + return constants_ok; +} /* check_constants */ + + +/************************************************************************/ +static inline int defined_qp_state(int qp_state) +{ + int def = ((VAPI_RESET <= qp_state) && (qp_state <= VAPI_ERR)); + MTL_DEBUG4(MT_FLFMT("qp_state=%d, def=%d"), qp_state, def); + return def; +} /* defined_qp_state */ + + +/************************************************************************/ +static void init_state_machine(State_machine* xs2s) +{ + int fi, ti; + + for (fi = 0; fi != (VAPI_ERR+1); ++fi) + { + /* first, initialize as undefined */ + for (ti = 0; ti != VAPI_ERR; ++ti) + { + xs2s->tab[fi][ti] = -1; + } + xs2s->tab[fi][VAPI_ERR] = QPEE_TRANS_2ERR; /* undef later fi=VAPI_RESET */ + xs2s->tab[fi][VAPI_RESET] = QPEE_TRANS_ERR2RST; + } + + xs2s->tab[VAPI_RESET][VAPI_ERR] = -1; /* Apr/21/2002 meeting */ + + /* see state graph in Tavor-PRM, (12.3 Command Summary) */ + xs2s->tab[VAPI_RESET][VAPI_INIT] = QPEE_TRANS_RST2INIT; + xs2s->tab[VAPI_INIT] [VAPI_INIT] = QPEE_TRANS_INIT2INIT; + xs2s->tab[VAPI_INIT] [VAPI_RTR] = QPEE_TRANS_INIT2RTR; + xs2s->tab[VAPI_RTR] [VAPI_RTS] = QPEE_TRANS_RTR2RTS; + xs2s->tab[VAPI_RTS] [VAPI_RTS] = QPEE_TRANS_RTS2RTS; + xs2s->tab[VAPI_SQE] [VAPI_RTS] = QPEE_TRANS_SQERR2RTS; + xs2s->tab[VAPI_RTS] [VAPI_SQD] = QPEE_TRANS_RTS2SQD; + xs2s->tab[VAPI_SQD] [VAPI_RTS] = QPEE_TRANS_SQD2RTS; + xs2s->tab[VAPI_ERR] [VAPI_RESET] = QPEE_TRANS_ERR2RST; +} /* init_state_machine */ +/*******************************************************************************/ +/* + * init_sgid_table + */ +static HH_ret_t init_sgid_tbl(THH_qpm_t qpm) +{ + HH_ret_t ret = HH_OK; + HH_hca_hndl_t hca_hndl; + u_int16_t tbl_len_out; + MT_bool destroy_tbl = FALSE; + int i; + + if (qpm == NULL) { + MTL_ERROR1("[%s]: ERROR: NULL qpm value \n",__FUNCTION__); + return HH_EINVAL; + } + for (i=0; i< qpm->sqp_info.n_ports; i++) + { + qpm->num_sgids[i] = 0; + } + + ret = THH_hob_get_hca_hndl(qpm->hob,&hca_hndl); + if (ret != HH_OK) + { + MTL_ERROR1("[%s]: ERROR: THH_hob_get_hca_hndl failed \n",__FUNCTION__); + return ret; + } + + for (i=0; i< qpm->sqp_info.n_ports; i++) + { + qpm->sgid_tbl[i] = (IB_gid_t*)TQPM_GOOD_ALLOC((sizeof(IB_gid_t) * DEFAULT_SGID_TBL_SZ)); + //query the gid table for all ports + ret = THH_hob_init_gid_tbl(hca_hndl,i+1,DEFAULT_SGID_TBL_SZ,&tbl_len_out,qpm->sgid_tbl[i]); + if (ret != HH_OK) + { + if (ret == HH_EAGAIN) + { + TQPM_GOOD_FREE(qpm->sgid_tbl[i],(sizeof(IB_gid_t) * DEFAULT_SGID_TBL_SZ)); + qpm->sgid_tbl[i] = (IB_gid_t*)TQPM_GOOD_ALLOC(sizeof(IB_gid_t) * tbl_len_out); + ret = THH_hob_init_gid_tbl(hca_hndl,i+1,tbl_len_out,&tbl_len_out,qpm->sgid_tbl[i]); + if (ret != HH_OK) + { + destroy_tbl = TRUE; + } + + }else destroy_tbl = TRUE; + } + + if (destroy_tbl) + { + MTL_ERROR1("[%s]: ERROR: THH_hob_get_gid_tbl failed for port %d\n",__FUNCTION__,i+1); + TQPM_GOOD_FREE(qpm->sgid_tbl[i],(sizeof(IB_gid_t) * tbl_len_out) ); + qpm->sgid_tbl[i] = NULL; + qpm->num_sgids[i] = 0; + }else + { + qpm->num_sgids[i] = tbl_len_out; + } + + destroy_tbl = FALSE; + } + + //TBD: ret for one port failure + MT_RETURN(ret); +} + +/* + * init_pkey_table + */ +static HH_ret_t init_pkey_tbl(THH_qpm_t qpm) +{ + HH_ret_t ret = HH_OK; + HH_hca_hndl_t hca_hndl; + u_int16_t tbl_len_out; + MT_bool destroy_tbl = FALSE; + int i; + + if (qpm == NULL) { + MTL_ERROR1("[%s]: ERROR: NULL qpm value \n",__FUNCTION__); + return HH_EINVAL; + } + for (i=0; i< qpm->sqp_info.n_ports; i++) + { + qpm->qp1_pkey_idx[i] = 0xffff; + } + + for (i=0; i< qpm->sqp_info.n_ports; i++) + { + qpm->pkey_tbl_sz[i] = 0; + } + + ret = THH_hob_get_hca_hndl(qpm->hob,&hca_hndl); + if (ret != HH_OK) + { + MTL_ERROR1("[%s]: ERROR: THH_hob_get_hca_hndl failed \n",__FUNCTION__); + return ret; + } + + for (i=0; i< qpm->sqp_info.n_ports; i++) + { + qpm->pkey_tbl[i] = (VAPI_pkey_t*)TQPM_GOOD_ALLOC((sizeof(VAPI_pkey_t)*DEFAULT_PKEY_TBL_SZ)); + + //query the pkey table for all ports + ret = THH_hob_init_pkey_tbl(hca_hndl,i+1,DEFAULT_PKEY_TBL_SZ,&tbl_len_out,qpm->pkey_tbl[i]); + if (ret != HH_OK) + { + if (ret == HH_EAGAIN) + { + TQPM_GOOD_FREE(qpm->pkey_tbl[i],(sizeof(VAPI_pkey_t)*DEFAULT_PKEY_TBL_SZ)); + qpm->pkey_tbl[i] = (VAPI_pkey_t*)TQPM_GOOD_ALLOC((sizeof(VAPI_pkey_t)* tbl_len_out)); + ret = THH_hob_init_pkey_tbl(hca_hndl,i+1,tbl_len_out,&tbl_len_out,qpm->pkey_tbl[i]); + if (ret != HH_OK) + { + destroy_tbl = TRUE; + } + + }else destroy_tbl = TRUE; + } + + if (destroy_tbl) + { + MTL_ERROR1("[%s]: ERROR: THH_hob_get_pkey_tbl failed for port %d\n",__FUNCTION__,i+1); + TQPM_GOOD_FREE(qpm->pkey_tbl[i],(sizeof(VAPI_pkey_t)*tbl_len_out)); + qpm->pkey_tbl[i] = NULL; + qpm->pkey_tbl_sz[i] = 0; + }else + { + if ( qpm->pkey_tbl[i][0] == 0 ) + { + qpm->pkey_tbl[i][0] = 0xffff; + } + qpm->pkey_tbl_sz[i] = tbl_len_out; + } + + destroy_tbl = FALSE; + } + + //TBD: ret for one port failure + MT_RETURN(ret); +} + +/************************************************************************/ +static MT_bool copy_port_props(TQPM_t* qpm, const THH_qpm_init_t* init_attr_p) +{ + MT_bool ok = TRUE; + const THH_port_init_props_t* in_port_props = init_attr_p->port_props; + if (in_port_props) + { + unsigned int n_ports = init_attr_p->n_ports; + THH_port_init_props_t* props = TNMALLOC(THH_port_init_props_t, n_ports); + if (props) + { + memcpy(props, in_port_props, n_ports * sizeof(THH_port_init_props_t)); + qpm->sqp_info.port_props = props; + } + else + { + MTL_ERROR1(MT_FLFMT("Allocating port_props (%d) failed"), n_ports); + ok = FALSE; + } + } else { + qpm->sqp_info.port_props = NULL; + } + MTL_DEBUG4(MT_FLFMT("copy_port_props: qpm=0x%p, ok=%d"), qpm, ok); + return ok; +} /* copy_port_props */ + + + +/************************************************************************/ +static HH_ret_t conf_special_qps(TQPM_t* qpm) +{ + HH_ret_t rc = HH_OK; + static const VAPI_special_qp_t qp01[2] = {VAPI_SMI_QP, VAPI_GSI_QP}; + Special_QPs* sqp = &qpm->sqp_info; + unsigned int n_ports = sqp->n_ports; + IB_wqpn_t qpn = sqp->first_sqp_qpn; + unsigned ti; + for (ti = 0; (ti != 2) && (rc == HH_OK); ++ti, qpn += n_ports) + { + VAPI_special_qp_t qp_type = qp01[ti]; + THH_cmd_status_t cmd_rc = + THH_cmd_CONF_SPECIAL_QP(qpm->cmd_if, qp_type, qpn); + switch(cmd_rc) { + case THH_CMD_STAT_OK: + rc = HH_OK; + break; + case THH_CMD_STAT_EINTR: + rc = HH_EINTR; + break; + default: + MTL_ERROR1(MT_FLFMT("THH_cmd_CONF_SPECIAL_QP ti=%d, qpn=0x%x, crc=%d=%s"), + ti, qpn, cmd_rc, str_THH_cmd_status_t(cmd_rc)); + rc = HH_EFATAL; + } + } + MTL_DEBUG4(MT_FLFMT("rc=%d"), rc); + return rc; +} /* conf_special_qps */ + + +/************************************************************************/ +/* We ensure allocating and freeing DDR memory of sizes + * that are least native page size. The rational is that this is + * the minimal mappable memory. + */ +static MT_size_t complete_pg_sz(MT_size_t sz) +{ + MTL_DEBUG4(MT_FLFMT("complete_pg_sz: sz="SIZE_T_FMT), sz); + if ((sz & native_page_low_mask) != 0) + { + MTL_DEBUG4(MT_FLFMT("fixing up non page-aligned size="SIZE_T_FMT), sz); + sz &= ~((MT_size_t)native_page_low_mask); + sz += native_page_size; + MTL_DEBUG4(MT_FLFMT("complete_pg_sz: enlarging to sz="SIZE_T_FMT), sz); + } + return sz; +} /* complete_pg_sz */ + + +/************************************************************************/ +/* We ensure the buffer completely falls within a 4Gb block. + * Now, 4G = 4*1*K*1K*1K = 4*1024^3 = 2^(2+3*10) = 2^32 ==> 32 bits. + * We take advantage of being able to shift right by WQE_CHUNK_SIZE_LOG2 bits. + * So instead of testing 32-bit overflow, + * we test (32-WQE_CHUNK_SIZE_LOG2)-bit overflow. + * Thus being able to test using 32 bits calculations. + */ +static MT_bool within_4GB(MT_virt_addr_t buf, MT_size_t sz) +{ + static const unsigned int shift_4GdivWQCZ = 32 - WQE_CHUNK_SIZE_LOG2; + MT_virt_addr_t bbeg_rsh = buf >> WQE_CHUNK_SIZE_LOG2; + MT_virt_addr_t bend_rsh = bbeg_rsh + (sz >> WQE_CHUNK_SIZE_LOG2); + MT_bool same_4GB = ((bbeg_rsh >> shift_4GdivWQCZ) == + (bend_rsh >> shift_4GdivWQCZ)); + MTL_DEBUG4(MT_FLFMT("same_4GB=%d"), same_4GB); + return same_4GB; +} /* within_4GB */ + + +/************************************************************************/ +/* If buffer is supplied, validate address. + * If null buffer supplied, alloc a physical DDR buffer and map it. + * We have to check for 'Within 4GB' anyway. + * When mapping, we allow a second chance to pass the 4GB restriction. + * If allocated, we also return the physical memory address. + */ +static HH_ret_t check_make_wqes_buf( + THH_qpm_t qpm, + THH_qp_ul_resources_t* qp_ul_resources_p, + HH_pd_hndl_t pd, + MOSAL_protection_ctx_t* ctx_p, + MT_phys_addr_t* pa_p +) +{ + HH_ret_t rc = HH_OK; + MT_virt_addr_t wqes_buf = qp_ul_resources_p->wqes_buf; + MT_size_t buf_sz = qp_ul_resources_p->wqes_buf_sz; + + MTL_DEBUG4(MT_FLFMT("wqes_buf="VIRT_ADDR_FMT", buf_sz="SIZE_T_FMT), wqes_buf, buf_sz); + if (wqes_buf != 0) + { + MT_virt_addr_t unalligned_bits = wqes_buf & WQE_CHUNK_MASK; + if ((unalligned_bits != 0) || !within_4GB(wqes_buf, buf_sz)) + { + wqes_buf = 0; + rc = HH_EINVAL_PARAM; + } + } + else if (buf_sz != 0) /* When uses SRQ, buf_sz may be 0 */ + { + rc = THH_uldm_get_protection_ctx(qpm->uldm, pd, ctx_p); + if (rc == HH_OK) + { + qp_ul_resources_p->wqes_buf_sz = buf_sz = + complete_pg_sz(buf_sz); /* fixed up size returned */ + rc = THH_ddrmm_alloc(qpm->ddrmm, buf_sz, native_page_shift, pa_p); + MTL_DEBUG4(MT_FLFMT("rc=%d"), rc); + if (rc == HH_OK) + { + static const MOSAL_mem_flags_t + mem_flags = MOSAL_MEM_FLAGS_NO_CACHE | + MOSAL_MEM_FLAGS_PERM_READ | + MOSAL_MEM_FLAGS_PERM_WRITE; + MT_virt_addr_t va_1stmapped = wqes_buf = (MT_virt_addr_t) + MOSAL_map_phys_addr(*pa_p, buf_sz, mem_flags, *ctx_p); + if (wqes_buf && !within_4GB(wqes_buf, buf_sz)) + { /* bad luck? give mapping another chance, to fit, before unmap! */ + wqes_buf = (MT_virt_addr_t)MOSAL_map_phys_addr( + *pa_p, buf_sz, mem_flags, *ctx_p); + MOSAL_unmap_phys_addr(*ctx_p, (MT_virt_addr_t)va_1stmapped, buf_sz); + if (wqes_buf && !within_4GB(wqes_buf, buf_sz)) + { + MOSAL_unmap_phys_addr(*ctx_p, (MT_virt_addr_t)wqes_buf, buf_sz); + wqes_buf = 0; + } + } + qp_ul_resources_p->wqes_buf = wqes_buf; + if (wqes_buf == 0) + { + THH_ddrmm_free(qpm->ddrmm, *pa_p, buf_sz); + *pa_p = 0; + rc = HH_EAGAIN; + } + } + } + } + return rc; +} /* check_make_wqes_buf */ + + +/************************************************************************/ +static MT_bool attr_hh2swqpc( + const HH_qp_init_attr_t* hh_attr, + MT_bool mlx, + TQPM_sw_qpc_t* sw_qpc_p +) +{ + MT_bool ok = TRUE; + + memset(sw_qpc_p, 0, sizeof(TQPM_sw_qpc_t)); + sw_qpc_p->pd = hh_attr->pd; + sw_qpc_p->rdd = hh_attr->rdd; + sw_qpc_p->srqn = hh_attr->srq; + sw_qpc_p->cqn_snd = hh_attr->sq_cq; + sw_qpc_p->cqn_rcv = hh_attr->rq_cq; + sw_qpc_p->cap = hh_attr->qp_cap; + + /* st THH_service_type_t */ + if (mlx) + { + sw_qpc_p->st = THH_ST_MLX; + } + else + { + switch (hh_attr->ts_type) + { + case VAPI_TS_RC: sw_qpc_p->st = THH_ST_RC; break; +// JPM: RD and UC are not currently supported +// case VAPI_TS_RD: summ_p->st = THH_ST_RD; break; + case VAPI_TS_UC: sw_qpc_p->st = THH_ST_UC; break; + case VAPI_TS_UD: sw_qpc_p->st = THH_ST_UD; break; + default: ok = FALSE; MTL_ERROR1(MT_FLFMT("ts_type=%d"), hh_attr->ts_type); + } + } + + sw_qpc_p->ssc = (hh_attr->sq_sig_type == VAPI_SIGNAL_ALL_WR); + sw_qpc_p->rsc = (hh_attr->rq_sig_type == VAPI_SIGNAL_ALL_WR); + return ok; +} /* attr_summary */ + + +/************************************************************************/ +HH_ret_t create_qp( + THH_qpm_t qpm, /* IN */ + HH_qp_init_attr_t* init_attr_p, /* IN */ + MT_bool mlx, /* IN */ + THH_qp_ul_resources_t* qp_ul_resources_p, /* IO */ + TQPM_sw_qpc_t* new_qp_p /* IN */ +) +{ + VAPI_lkey_t lkey; + HH_ret_t rc, mrc = HH_ERR; + MT_phys_addr_t pa_ddr = 0; + MOSAL_protection_ctx_t ctx; + + rc = (attr_hh2swqpc(init_attr_p, mlx, new_qp_p) ? HH_OK : HH_EINVAL_SERVICE_TYPE); + if (rc == HH_OK) + { + rc = check_make_wqes_buf(qpm, qp_ul_resources_p, init_attr_p->pd, + &ctx, &pa_ddr); + } + if ((rc == HH_OK) && (qp_ul_resources_p->wqes_buf_sz != 0)) + /* If there is WQEs buffer to register */ + { + THH_internal_mr_t params; + memset(¶ms, 0, sizeof(params)); + params.start = qp_ul_resources_p->wqes_buf, + params.size = qp_ul_resources_p->wqes_buf_sz; + params.pd = init_attr_p->pd; + params.vm_ctx = ctx; + params.force_memkey = FALSE; + params.memkey = (VAPI_lkey_t)0; + if (pa_ddr) + { + VAPI_phy_addr_t phy_array = (VAPI_phy_addr_t)pa_ddr; /* 1 element array - for register internal */ + params.num_bufs = 1; + params.phys_buf_lst = &phy_array; /* Addresses of automatic */ + params.buf_sz_lst = ¶ms.size; /* automatic variables! */ + } + mrc = THH_mrwm_register_internal(qpm->mrwm_internal, ¶ms, &lkey); + rc = mrc; + } + if (rc == HH_OK) + { + /* save parameters in this manager */ + new_qp_p->state= VAPI_RESET; + new_qp_p->wqes_buf= qp_ul_resources_p->wqes_buf; + new_qp_p->wqes_buf_sz= qp_ul_resources_p->wqes_buf_sz; + new_qp_p->uar_index= qp_ul_resources_p->uar_index; + new_qp_p->lkey = (qp_ul_resources_p->wqes_buf_sz != 0) ? lkey : 0/*Invalid*/; + new_qp_p->pa_ddr = pa_ddr; + } + if (rc != HH_OK) + { /* clean */ + if (mrc == HH_OK) { THH_mrwm_deregister_mr(qpm->mrwm_internal, lkey); } + if (pa_ddr != 0) + { + MT_size_t wqes_buf_sz = qp_ul_resources_p->wqes_buf_sz; /* pg complete */ + MOSAL_unmap_phys_addr(ctx, (MT_virt_addr_t)qp_ul_resources_p->wqes_buf, + wqes_buf_sz); + THH_ddrmm_free(qpm->ddrmm, pa_ddr, wqes_buf_sz); + } + } + MTL_DEBUG4(MT_FLFMT("rc=%d, ul_res->wqes_buf="VIRT_ADDR_FMT), + rc, qp_ul_resources_p->wqes_buf); + return rc; +} /* create_qp */ + + + + +/************************************************************************/ +static void udav2qpc_path(const VAPI_ud_av_t* av, THH_address_path_t* path) +{ + path->sl = av->sl; + path->my_lid_path_bits = av->src_path_bits; + path->flow_label = av->flow_label; + path->hop_limit = av->hop_limit; + path->max_stat_rate = (av->static_rate == 0) ? 0 : 1 ; /* IPD=0 -> 0 , IPD=3 -> 1, everything else ->1 */ + path->g = av->grh_flag; + path->mgid_index = av->sgid_index; + path->rlid = av->dlid; + path->tclass = av->traffic_class; + memcpy(&path->rgid, &av->dgid, sizeof(path->rgid)); +} /* udav2qpc_path */ + + +/************************************************************************/ +static IB_mtu_t log2mtu_to_ib_mtu(u_int8_t lg2mtu) +{ + IB_mtu_t ib_mtu = TAVOR_LOG2_MAX_MTU; + switch (lg2mtu) + { + case 8: ib_mtu = MTU256; break; + case 9: ib_mtu = MTU512; break; + case 10: ib_mtu = MTU1024; break; + case 11: ib_mtu = MTU2048; break; + case 12: ib_mtu = MTU4096; break; + default: + MTL_ERROR1(MT_FLFMT("Unsupported MTU for log2(max_msg)=%d, use ibmtu=%d"), + lg2mtu, ib_mtu); + } + return ib_mtu; +} /* log2mtu_to_ib_mtu */ + + +/************************************************************************/ +static void qpc_path2udav(const THH_address_path_t* path, VAPI_ud_av_t* av) +{ + av->sl = path->sl; + av->src_path_bits = path->my_lid_path_bits; + av->flow_label = path->flow_label; + av->hop_limit = path->hop_limit; + av->static_rate = (path->max_stat_rate == 0) ? 0 : 3; + av->grh_flag = path->g; + av->sgid_index = path->mgid_index; + av->dlid = path->rlid; + av->traffic_class = path->tclass; + memcpy(&av->dgid, &path->rgid, sizeof(path->rgid)); +} /* qpc_path2udav */ + + +/************************************************************************/ +static void qpc_default(THH_qpee_context_t* qpc_p) +{ + memset(qpc_p, 0, sizeof(THH_qpee_context_t)); + qpc_p->ver = 0; + //qpc_p->te = 1; + /*qpc_p->ce = 1; */ + qpc_p->ack_req_freq = TUNABLE_ACK_REQ_FREQ; + qpc_p->flight_lim = TUNABLE_FLIGHT_LIMIT; + qpc_p->ric = FALSE; /* Provide E2E credits in ACKs */ + qpc_p->sic = FALSE; /* Consider to E2E credits */ + qpc_p->msg_max = 31; /* HW checks message <= (QP MTU, UD msg_max) */ + qpc_p->mtu = MTU2048; +} /* qpc_default */ + + +/************************************************************************/ +/* Initialize THH_qpee_context_t structure with + * attributes given or computed upon QP creation. + * + */ +static void init2qpc_using_create_values( + const TQPM_sw_qpc_t* qp_p, + THH_qpee_context_t* qpc_p +) +{ + + qpc_p->st = qp_p->st; + qpc_p->pd = qp_p->pd; + qpc_p->rdd = qp_p->rdd; + qpc_p->srq = (qp_p->srqn != HH_INVAL_SRQ_HNDL); + qpc_p->srqn = qp_p->srqn; + qpc_p->cqn_snd = qp_p->cqn_snd; + qpc_p->cqn_rcv = qp_p->cqn_rcv; +/*** warning C4242: '=' : conversion from 'const unsigned int' to 'MT_bool', possible loss of data ***/ + qpc_p->ssc = (MT_bool)qp_p->ssc; +/*** warning C4242: '=' : conversion from 'const unsigned int' to 'MT_bool', possible loss of data ***/ + qpc_p->rsc = (MT_bool)qp_p->rsc; + + qpc_p->usr_page = qp_p->uar_index; + qpc_p->wqe_base_adr = (sizeof(MT_virt_addr_t) <= 4 + ? (u_int32_t)0 + : (u_int32_t)(((u_int64_t)qp_p->wqes_buf) >> 32)); + qpc_p->wqe_lkey = qp_p->lkey; +} /* init2qpc_using_create_values */ + + + +/************************************************************************/ +static void qpc2vapi_attr( + const THH_qpee_context_t* qpc_p, + VAPI_qp_attr_t* qp_attr_p +) +{ + VAPI_rdma_atom_acl_t aflags = 0; + memset(qp_attr_p, 0, sizeof(*qp_attr_p)); + qp_attr_p->qp_state = qpc_p->state; + qp_attr_p->sq_draining = qpc_p->sq_draining; + qp_attr_p->qp_num = qpc_p->local_qpn_een; + aflags |= (qpc_p->rae ? VAPI_EN_REM_ATOMIC_OP : 0); + aflags |= (qpc_p->rwe ? VAPI_EN_REM_WRITE : 0); + aflags |= (qpc_p->rre ? VAPI_EN_REM_READ : 0); + qp_attr_p->remote_atomic_flags = aflags; + qp_attr_p->qkey = qpc_p->q_key; + qp_attr_p->path_mtu = qpc_p->mtu; + switch (qpc_p->pm_state) + { + case PM_STATE_MIGRATED: qp_attr_p->path_mig_state = VAPI_MIGRATED; break; + case PM_STATE_REARM: qp_attr_p->path_mig_state = VAPI_REARM; break; + case PM_STATE_ARMED: qp_attr_p->path_mig_state = VAPI_ARMED; break; + default: ; /* hmmm... */ + } + qp_attr_p->rq_psn = qpc_p->next_rcv_psn; + qp_attr_p->sq_psn = qpc_p->next_send_psn; + qp_attr_p->qp_ous_rd_atom = ((qpc_p->rae || qpc_p->rre) + ? 1u << qpc_p->rra_max : 0); + qp_attr_p->ous_dst_rd_atom = ((qpc_p->sre==0)&&(qpc_p->sae)==0) ? 0 : 1u << qpc_p->sra_max; + qp_attr_p->min_rnr_timer = qpc_p->min_rnr_nak; + qp_attr_p->dest_qp_num = qpc_p->remote_qpn_een; + qp_attr_p->pkey_ix = qpc_p->primary_address_path.pkey_index; + qp_attr_p->port = qpc_p->primary_address_path.port_number; + qpc_path2udav(&qpc_p->primary_address_path, &qp_attr_p->av); + qp_attr_p->timeout = qpc_p->primary_address_path.ack_timeout; + qp_attr_p->retry_count = qpc_p->retry_count; + qp_attr_p->rnr_retry = qpc_p->primary_address_path.rnr_retry; + qp_attr_p->alt_pkey_ix = qpc_p->alternative_address_path.pkey_index; + qp_attr_p->alt_port = qpc_p->alternative_address_path.port_number; + qpc_path2udav(&qpc_p->alternative_address_path, &qp_attr_p->alt_av); + qp_attr_p->alt_timeout = qpc_p->alternative_address_path.ack_timeout; + /* qp_attr_p->alt_retry_count = qpc_p->alternative_address_path. */ + //qp_attr_p->alt_rnr_retry = qpc_p->alternative_address_path.rnr_retry; +} /* qpc2vapi_attr */ + + + +/************************************************************************/ +/* Transfer VAPI_qp_attr_t struct to THH_qpee_context_t struct + * Consider the caller attr_mask for generate opt_mask for the + * command interface. + */ +static HH_ret_t vapi2qpc_modify( + THH_qpm_t qpm, + TQPM_sw_qpc_t* qp_p, + const VAPI_qp_attr_t* attr_p, + const THH_qpee_transition_t trans, + const u_int32_t attr_mask, + THH_qpee_context_t* qpc_p, + u_int32_t* opt_mask_p +) +{ + HH_ret_t rc = HH_OK; + u_int32_t opt_mask = 0; + IB_port_t sqp_port; + MT_bool is_sqp = (is_sqp0(qpm,qp_p->qpn,&sqp_port) || is_sqp1(qpm,qp_p->qpn,&sqp_port)); + + qpc_p->st= qp_p->st; + + if (attr_mask & QP_ATTR_CAP) + { + /* resizing WQ size (QP size) not supported */ + rc = HH_ENOSYS; + goto done; + } + + if (attr_mask & QP_ATTR_SCHED_QUEUE) { + qpc_p->sched_queue = attr_p->sched_queue; + opt_mask |= TAVOR_IF_QPEE_OPTPAR_SCHED_QUEUE; /* For INIT2RTR and SQD2RTS */ + } else { + /* The default assignment below will be effective only on RST2INIT + * when sched_queue is not explicitly provided (but is required parameter) */ + qpc_p->sched_queue = attr_p->av.sl; + } + + /* if (1 || attr_mask & QP_ATTR_QP_NUM) */ + { + qpc_p->local_qpn_een = attr_p->qp_num; + } + qpc_p->sae = qpc_p->swe = qpc_p->sre = 1; /* Enforcement only on responder side (per IB) */ + + + if (attr_mask & QP_ATTR_PKEY_IX) + { + /* error should have been checked in upper level, so just C-implicit mask */ +/*** warning C4242: '=' : conversion from 'const VAPI_pkey_ix_t' to 'u_int8_t', possible loss of data ***/ + qpc_p->primary_address_path.pkey_index = (u_int8_t)attr_p->pkey_ix; + opt_mask |= TAVOR_IF_QPEE_OPTPAR_PKEY_INDEX; + } + + if ((attr_mask & QP_ATTR_PORT) || + ((is_sqp == TRUE) && (trans == TAVOR_IF_CMD_RST2INIT_QPEE)) ) { + /* "The following attributes are not applicable if the QP specified is a Special QP:"... */ + /* (IB-spec. 1.1: Page 512) - But Tavor requires them for SQ association */ + + /* according to change in tavor_if_defs.h (23.12.2002 - port was seperated from AV). */ + opt_mask |= TAVOR_IF_QPEE_OPTPAR_PORT_NUM; + // no port changes for special QPs! + if ( is_sqp == FALSE ) { + qpc_p->primary_address_path.port_number = attr_p->port; + } else { + qpc_p->primary_address_path.port_number = sqp_port; + } + } + + /* patch for vl15 problem */ + /* TBD sched_queue based on ULP ??? */ + /* for now qp0(sm) - 0x8, all others = 0x0; + */ + if(is_sqp0(qpm,qp_p->qpn,&sqp_port)) + { + qpc_p->sched_queue = 0x8; + opt_mask |= TAVOR_IF_QPEE_OPTPAR_SCHED_QUEUE; + } + else + { + qpc_p->sched_queue = attr_p->av.sl; + opt_mask |= TAVOR_IF_QPEE_OPTPAR_SCHED_QUEUE; + } + + if (attr_mask & QP_ATTR_QKEY) + { + qpc_p->q_key = attr_p->qkey; + opt_mask |= TAVOR_IF_QPEE_OPTPAR_Q_KEY; + } + if (attr_mask & QP_ATTR_AV) + { + udav2qpc_path(&attr_p->av, &qpc_p->primary_address_path); + opt_mask |= TAVOR_IF_QPEE_OPTPAR_PRIMARY_ADDR_PATH; + } + + // special QPs get msg_max & MTU of UD QPs. + if (qp_p->st == THH_ST_UD || is_sqp ) + { + qpc_p->msg_max = TAVOR_LOG2_MAX_MTU; + qpc_p->mtu = log2mtu_to_ib_mtu(TAVOR_LOG2_MAX_MTU); + } + else if (attr_mask & QP_ATTR_PATH_MTU) { + { /* See check_constants() that verifies using following shift is fine. */ + if (((1ul << attr_p->path_mtu) & valid_tavor_ibmtu_mask) != 0) + { + qpc_p->mtu = attr_p->path_mtu; + } + else + { + MTL_ERROR1(MT_FLFMT("Unsupported mtu=%d value"), attr_p->path_mtu); + rc = HH_EINVAL_PARAM; + } + } + } + + if (attr_mask & QP_ATTR_TIMEOUT){ + qpc_p->primary_address_path.ack_timeout = attr_p->timeout; + /*sqd->rts: this attr is optional , rtr->rts: this attr is mandatory */ + opt_mask |= TAVOR_IF_QPEE_OPTPAR_ACK_TIMEOUT; + } + + if (attr_mask & QP_ATTR_RETRY_COUNT) + { + /* according to change in tavor_if_defs.h (23.12.2002 - retry_count was seperated from AV). */ + opt_mask |= TAVOR_IF_QPEE_OPTPAR_RETRY_COUNT; + qpc_p->retry_count = attr_p->retry_count; + } + + if (attr_mask & QP_ATTR_RNR_RETRY) + { + qpc_p->primary_address_path.rnr_retry = attr_p->rnr_retry; + opt_mask |= TAVOR_IF_QPEE_OPTPAR_RNR_RETRY; + qpc_p->alternative_address_path.rnr_retry = attr_p->rnr_retry; + opt_mask |= TAVOR_IF_QPEE_OPTPAR_ALT_RNR_RETRY; + } + /*if (attr_mask & QP_ATTR_RQ_PSN)*/ + { + qpc_p->next_rcv_psn = attr_p->rq_psn; + } + + if (attr_mask & QP_ATTR_REMOTE_ATOMIC_FLAGS) + { + VAPI_rdma_atom_acl_t flags = attr_p->remote_atomic_flags; + qpc_p->rae = (flags & VAPI_EN_REM_ATOMIC_OP) ? 1 : 0; + qpc_p->rwe = (flags & VAPI_EN_REM_WRITE) ? 1 : 0; + qpc_p->rre = (flags & VAPI_EN_REM_READ) ? 1 : 0; + + /* if current outstanding rd-atomic value is 0, disable rdma-read and atomic capability*/ + if ((trans == QPEE_TRANS_RTR2RTS)||(trans==QPEE_TRANS_RTS2RTS)||(trans==QPEE_TRANS_SQERR2RTS)) { + if (qp_p->qp_ous_rd_atom == 0) { + MTL_DEBUG3(MT_FLFMT("%s: setting rae/rre to zero, because qp_ous_rd_atom is 0. Trans=%d"), + __func__,trans); + qpc_p->rae = qpc_p->rre = 0; + } + } + opt_mask |= TAVOR_IF_QPEE_OPTPAR_RRE | + TAVOR_IF_QPEE_OPTPAR_RAE | + TAVOR_IF_QPEE_OPTPAR_REW; + + } + + if (attr_mask & QP_ATTR_QP_OUS_RD_ATOM) + { + if (attr_p->qp_ous_rd_atom != 0) + { + qpc_p->rra_max = ceil_log2(attr_p->qp_ous_rd_atom); + if (qpc_p->rra_max > qpm->log2_max_outs_rdma_atom) + { + MTL_ERROR1(MT_FLFMT("Error rra_max=0x%x > QPM's log2_max=0x%x, attr_p->qp_ous_rd_atom = 0x%x"), + qpc_p->rra_max, qpm->log2_max_outs_rdma_atom,attr_p->qp_ous_rd_atom); + rc = HH_EINVAL_PARAM; + } else { + if ((trans==QPEE_TRANS_SQD2RTS)&&(qp_p->qp_ous_rd_atom==0)) { + /* outstanding rd/atomics was previously zero, so need to restore rd/atomic flags */ + MTL_DEBUG3(MT_FLFMT("%s: restoring rae/rre to requested values, because qp_ous_rd_atom changed from 0. Trans=%d"), + __func__,trans); + qpc_p->rae = (qp_p->remote_atomic_flags & VAPI_EN_REM_ATOMIC_OP) ? 1 : 0; + qpc_p->rre = (qp_p->remote_atomic_flags & VAPI_EN_REM_READ) ? 1 : 0; + opt_mask |= TAVOR_IF_QPEE_OPTPAR_RRE | TAVOR_IF_QPEE_OPTPAR_RAE; + } + } + } else { + qpc_p->rra_max = 0; + if (qpc_p->rre || qpc_p->rae) + { + MTL_ERROR1(MT_FLFMT("%s: Warning: resetting rre+rae bits for qp_ous_rd_atom=0. Trans=%d"), + __func__, trans); + qpc_p->rre = qpc_p->rae = 0; + opt_mask |= (TAVOR_IF_QPEE_OPTPAR_RRE | TAVOR_IF_QPEE_OPTPAR_RAE); + } + } + opt_mask |= TAVOR_IF_QPEE_OPTPAR_RRA_MAX; + } + + + if (attr_mask & QP_ATTR_OUS_DST_RD_ATOM) + { + qpc_p->sra_max = (attr_p->ous_dst_rd_atom == 0) ? 0: floor_log2(attr_p->ous_dst_rd_atom); + qpc_p->swe = 1; + if ((attr_p->ous_dst_rd_atom)==0) { + qpc_p->sre = qpc_p->sae = 0; + } else { + if (qpc_p->sra_max > qpm->log2_max_outs_dst_rd_atom) + { + MTL_ERROR1(MT_FLFMT("Error sra_max=0x%x > QPM's log2_max=0x%x, attr_p->qp_ous_dst_rd_atom = 0x%x"), + qpc_p->sra_max, qpm->log2_max_outs_dst_rd_atom,attr_p->ous_dst_rd_atom); + rc = HH_EINVAL_PARAM; + } + qpc_p->sre = qpc_p->sae = 1; + } + opt_mask |= TAVOR_IF_QPEE_OPTPAR_SRA_MAX; + } + + if (attr_mask & QP_ATTR_ALT_PATH) + { + udav2qpc_path(&attr_p->alt_av, &qpc_p->alternative_address_path); + opt_mask |= TAVOR_IF_QPEE_OPTPAR_ALT_ADDR_PATH; + //} + //if (attr_mask & QP_ATTR_ALT_TIMEOUT) + //{ + qpc_p->alternative_address_path.ack_timeout = attr_p->alt_timeout; + //opt_mask |= TAVOR_IF_QPEE_OPTPAR_ALT_ADDR_PATH; + //} + //if (attr_mask & QP_ATTR_ALT_RETRY_COUNT) + //{ +// qpc_p->alternative_address_path.ack_timeout = attr_p->alt_timeout; + //opt_mask |= TAVOR_IF_QPEE_OPTPAR_ALT_ADDR_PATH; + //} + //if (attr_mask & QP_ATTR_ALT_RNR_RETRY) + //{ + /* according to change in tavor_if_defs.h (23.12.2002). */ + // qpc_p->alternative_address_path.rnr_retry = attr_p->alt_rnr_retry; + // opt_mask |= TAVOR_IF_QPEE_OPTPAR_ALT_RNR_RETRY; + + //} + //if (attr_mask & QP_ATTR_ALT_PKEY_IX) + //{ +/*** warning C4242: '=' : conversion from 'const VAPI_pkey_ix_t' to 'u_int8_t', possible loss of data ***/ + qpc_p->alternative_address_path.pkey_index = (u_int8_t)attr_p->alt_pkey_ix; + //opt_mask |= TAVOR_IF_QPEE_OPTPAR_ALT_ADDR_PATH; + //} + //if (attr_mask & QP_ATTR_ALT_PORT) + //{ + qpc_p->alternative_address_path.port_number = attr_p->alt_port; + //opt_mask |= TAVOR_IF_QPEE_OPTPAR_ALT_ADDR_PATH; + } + if (attr_mask & QP_ATTR_MIN_RNR_TIMER) + { +/*** warning C4242: '=' : conversion from 'const IB_rnr_nak_timer_code_t' to 'u_int8_t', possible loss of data ***/ + qpc_p->min_rnr_nak = (u_int8_t)attr_p->min_rnr_timer; + opt_mask |= TAVOR_IF_QPEE_OPTPAR_RNR_TIMEOUT; + } + if (attr_mask & QP_ATTR_SQ_PSN) + { + qpc_p->next_send_psn = attr_p->sq_psn; + } + + if (attr_mask & QP_ATTR_PATH_MIG_STATE) + { + switch (attr_p->path_mig_state) + { + case VAPI_MIGRATED: qpc_p->pm_state = PM_STATE_MIGRATED; break; + case VAPI_REARM: qpc_p->pm_state = PM_STATE_REARM; break; + case VAPI_ARMED: qpc_p->pm_state = PM_STATE_ARMED; break; + default: rc = HH_EINVAL_PARAM; + } + opt_mask |= TAVOR_IF_QPEE_OPTPAR_PM_STATE; + } else { /* Default required in order to assure initialization */ + qpc_p->pm_state = PM_STATE_MIGRATED; + } + + if (attr_mask & QP_ATTR_DEST_QP_NUM) + { + qpc_p->remote_qpn_een = attr_p->dest_qp_num; + } + *opt_mask_p = opt_mask; + +done: + MTL_DEBUG4(MT_FLFMT("vapi2qpc_modify: rc=%d"), rc); + return rc; +} /* vapi2qpc_modify */ + +/************************************************************************/ +/* Track rdma/atomic parameter changes + */ +static void track_rdma_atomic( + const VAPI_qp_attr_t* attr_p, + const u_int32_t attr_mask, + TQPM_sw_qpc_t* qp_p +) +{ + + if (attr_mask & QP_ATTR_REMOTE_ATOMIC_FLAGS) + { + qp_p->remote_atomic_flags = attr_p->remote_atomic_flags; + + } + + if (attr_mask & QP_ATTR_QP_OUS_RD_ATOM) + { + if (attr_p->qp_ous_rd_atom != 0) + { + qp_p->qp_ous_rd_atom = (1<qp_ous_rd_atom)); + } else{ + qp_p->qp_ous_rd_atom = 0; + } + } + + if (attr_mask & QP_ATTR_OUS_DST_RD_ATOM) + { + qp_p->ous_dst_rd_atom = attr_p->ous_dst_rd_atom; + } + + return; +} /* vapi2qpc_modify */ + + +/************************************************************************/ +static HH_ret_t prepare_special_qp( + THH_qpm_t qpm, + IB_wqpn_t qpn, + THH_qpee_transition_t trans +) +{ + IB_port_t port = 0; /* regular=0, non-special [1..) */ + HH_ret_t rc = HH_OK; + + MTL_DEBUG4(MT_FLFMT("entry point.")); + + if (((trans == QPEE_TRANS_INIT2RTR) || (trans == QPEE_TRANS_ERR2RST)) && + (is_sqp0(qpm,qpn,&port))) + { + if (!qpm->sqp_info.configured) + { + MTL_DEBUG4(MT_FLFMT("calling conf_special_qps() .")); + rc = conf_special_qps(qpm); + if (rc == HH_OK) + { + qpm->sqp_info.configured = TRUE; + } + } + if ((rc == HH_OK) && (qpm->sqp_info.port_props != NULL)) + { + THH_cmd_status_t cmd_rc = THH_CMD_STAT_OK; + + MTL_DEBUG1(MT_FLFMT("%s: port = %d, qpn = 0x%x"), __func__, port, qpn); + if( trans == QPEE_TRANS_INIT2RTR && (qpm->port_active[port-1] == FALSE) ) { + cmd_rc = THH_cmd_INIT_IB(qpm->cmd_if, port, + &qpm->sqp_info.port_props[port-1]); + if( cmd_rc == THH_CMD_STAT_OK ) + qpm->port_active[port-1] = TRUE; + } + + else if( trans == QPEE_TRANS_ERR2RST && (qpm->port_active[port-1] == TRUE) ) { + cmd_rc = THH_cmd_CLOSE_IB(qpm->cmd_if, port); + if ( cmd_rc == THH_CMD_STAT_OK ) + qpm->port_active[port-1] = FALSE; + } + + rc = (CMDRC2HH_ND(cmd_rc)); + MTL_DEBUG4(MT_FLFMT("cmd_rc=%d=%s, trans=%d"), + cmd_rc, str_THH_cmd_status_t(cmd_rc), trans); + } + } + + return rc; +} /* prepare_special_qp */ + + +/************************************************************************/ +/* Following Tavor-PRM 13.6.x optparammask possible bits */ +static inline u_int32_t x_optmask(THH_qpee_transition_t t) +{ + static const u_int32_t common_mask = + TAVOR_IF_QPEE_OPTPAR_ALT_ADDR_PATH | + TAVOR_IF_QPEE_OPTPAR_ALT_RNR_RETRY | + TAVOR_IF_QPEE_OPTPAR_RRE | + TAVOR_IF_QPEE_OPTPAR_RAE | + TAVOR_IF_QPEE_OPTPAR_REW | + TAVOR_IF_QPEE_OPTPAR_Q_KEY | + TAVOR_IF_QPEE_OPTPAR_RNR_TIMEOUT; + + u_int32_t mask = 0; + switch (t) /* cases of mask=0, use above defauly and commented out */ + { + /* case QPEE_TRANS_RST2INIT : mask=0 */ + case QPEE_TRANS_INIT2INIT: + mask = + TAVOR_IF_QPEE_OPTPAR_RRE | + TAVOR_IF_QPEE_OPTPAR_RAE | + TAVOR_IF_QPEE_OPTPAR_REW | + TAVOR_IF_QPEE_OPTPAR_Q_KEY | + TAVOR_IF_QPEE_OPTPAR_PORT_NUM | + TAVOR_IF_QPEE_OPTPAR_PKEY_INDEX; + break; + + case QPEE_TRANS_INIT2RTR : + mask = common_mask | TAVOR_IF_QPEE_OPTPAR_PKEY_INDEX | TAVOR_IF_QPEE_OPTPAR_SCHED_QUEUE; + break; + + case QPEE_TRANS_RTR2RTS : + mask = common_mask | TAVOR_IF_QPEE_OPTPAR_PM_STATE; + break; + + case QPEE_TRANS_RTS2RTS : + mask = common_mask | TAVOR_IF_QPEE_OPTPAR_PM_STATE; + break; + + case QPEE_TRANS_SQERR2RTS: + mask = + TAVOR_IF_QPEE_OPTPAR_RRE | + TAVOR_IF_QPEE_OPTPAR_RAE | + TAVOR_IF_QPEE_OPTPAR_REW | + TAVOR_IF_QPEE_OPTPAR_Q_KEY | + TAVOR_IF_QPEE_OPTPAR_RNR_TIMEOUT; + break; + /* case QPEE_TRANS_2ERR : mask=0 */ + /* case QPEE_TRANS_RTS2SQD : mask=0 */ + case QPEE_TRANS_SQD2RTS : + mask = TAVOR_IF_QPEE_OPTPAR_ALL | TAVOR_IF_QPEE_OPTPAR_SCHED_QUEUE; + break; + /* case QPEE_TRANS_ERR2RST : mask=0 */ + default:; + } + return mask; +} /* x_optmask */ + + + +/************************************************************************/ +static inline void rst2init_dummy_attributes(THH_qpee_context_t* qpc_p, MT_bool is_sqp, IB_port_t port ) +{ + qpc_p->primary_address_path.pkey_index = 0; + qpc_p->primary_address_path.port_number = is_sqp ? port : 1; + qpc_p->q_key = 1; +} + +static HH_ret_t modify_qp_checks( + THH_qpm_t qpm, /* IN */ + TQPM_sw_qpc_t* qp_p, /* IN */ + VAPI_qp_state_t cur_qp_state, /* IN */ + VAPI_qp_attr_t* qp_attr_p, /* IN */ + VAPI_qp_attr_mask_t attr_mask, /* IN */ + THH_qpee_transition_t* trans_p, /* OUT */ + VAPI_qp_attr_t* altfix_attr_p, /* OUT */ + MT_bool* trivial_rst2rst /* OUT */ +) +{ + HH_ret_t rc = HH_OK; + IB_port_t port; + MT_bool is_sqp; + + is_sqp = (is_sqp0(qpm,qp_p->qpn,&port)) | (is_sqp1(qpm,qp_p->qpn,&port)); + + *trivial_rst2rst = FALSE; + + if ( (cur_qp_state == VAPI_RESET) && + (qp_p->state == VAPI_RESET) && + (qp_attr_p->qp_state == VAPI_RESET) ) + { + rc = HH_OK; + *trivial_rst2rst = TRUE; + } + + else if ( ((cur_qp_state != qp_p->state) && + (cur_qp_state != VAPI_ERR) && /* user may know of error */ + (cur_qp_state != VAPI_SQE)) || /* may know of send-queue error */ + !defined_qp_state(cur_qp_state) || + !defined_qp_state(qp_attr_p->qp_state) + ) + { + rc = HH_EINVAL_QP_STATE; + MTL_ERROR1(MT_FLFMT("mismatch: state, cur_qp_state=%s, qp_p->state=%s."), + VAPI_qp_state_sym(cur_qp_state), VAPI_qp_state_sym(qp_p->state)); + MTL_ERROR1(MT_FLFMT("mismatch cont.: curr_qp_state=%s,qp_attr_p->qp_state=%s."), + VAPI_qp_state_sym(cur_qp_state),VAPI_qp_state_sym(qp_attr_p->qp_state)); + } + else + { + /* Support for RESET->ERR transition. First do RESET->INIT*/ + if ((cur_qp_state == VAPI_RESET) && (qp_attr_p->qp_state == VAPI_ERR) && + (qp_p->state == VAPI_RESET)) { + /* pre transition to init state if requesting 2ERR from RESET state*/ + THH_qpee_context_t qpc; + THH_cmd_status_t rce; + qpc_default(&qpc); + init2qpc_using_create_values(qp_p, &qpc); + rst2init_dummy_attributes(&qpc, is_sqp, port); + qpc.local_qpn_een = qp_p->qpn; + rce = THH_cmd_MODIFY_QP(qpm->cmd_if, qp_p->qpn, QPEE_TRANS_RST2INIT, &qpc, 0); + MTL_DEBUG1(MT_FLFMT("pre 2INIT, rce=%d=%s"),rce,str_THH_cmd_status_t(rce)); + rc = ((rce == THH_CMD_STAT_OK) ? HH_OK : + (rce == THH_CMD_STAT_RESOURCE_BUSY) ? HH_EBUSY : + (rce == THH_CMD_STAT_EINTR) ? HH_EINTR : HH_EFATAL ); + cur_qp_state = VAPI_INIT; /* we just did move to */ + + /* QP with SRQ modified to reset - must first modify to ERR to flush all WQEs */ + } else if ((qp_attr_p->qp_state == VAPI_RESET) && (qp_p->state != VAPI_ERR) && + (qp_p->srqn != HH_INVAL_SRQ_HNDL)) { + THH_cmd_status_t rce; + MTL_DEBUG4( + MT_FLFMT("%s: Moving QP 0x%X to error state before moving to reset (uses SRQ 0x%X)"), + __func__, qp_p->qpn, qp_p->srqn); + rce = THH_cmd_MODIFY_QP(qpm->cmd_if, qp_p->qpn, QPEE_TRANS_2ERR, 0, 0); + rc = ((rce == THH_CMD_STAT_OK) ? HH_OK : + (rce == THH_CMD_STAT_RESOURCE_BUSY) ? HH_EBUSY : + (rce == THH_CMD_STAT_EINTR) ? HH_EINTR : HH_EFATAL ); + if (rc == HH_OK) { + cur_qp_state = VAPI_ERR; /* we just did move to */ + qp_p->state= VAPI_ERR; + } + } + } + if (rc == HH_OK) + { + *trans_p = state_machine.tab[cur_qp_state][qp_attr_p->qp_state]; + /* if qp_attr_p->en_sqd_asyn_notif was set, we add a flag to xition value passed + to THH_cmd_MODIFY_QPEE(). no need to check (qp_attr_p->qp_state == VAPI_SQD) - + te flag is masked off anyway upon entry of THH_cmd_MODIFY_QPEE()*/ + if( qp_attr_p->en_sqd_asyn_notif && (*trans_p == QPEE_TRANS_RTS2SQD) ) { + *trans_p = QPEE_TRANS_RTS2SQD_WITH_EVENT; + } + MTL_DEBUG4(MT_FLFMT("cur=%s, next=%s, trans=%d"), + VAPI_qp_state_sym(cur_qp_state), + VAPI_qp_state_sym(qp_attr_p->qp_state), *trans_p); + if ( (*trans_p == (THH_qpee_transition_t)(-1)) && (trivial_rst2rst == FALSE) ) + { + rc = HH_EINVAL_QP_STATE; MTL_DEBUG4(MT_FLFMT("bad trans")); + } + /* + // since all alt_av related fields were combined under QP_ATTR_ALT_PATH + // there is no need to check for partial delivery of them. + else + { + if (!fix_partial_alternate(attr_mask, qpm->cmd_if, qpn, qp_attr_p, + altfix_attr_p, &qp_attr_p)) + { + rc = HH_EINVAL_PARAM; + } + } + */ + } + MTL_DEBUG4(MT_FLFMT("rc=%d=%s, trans=%d"), rc, HH_strerror_sym(rc), *trans_p); + return rc; +} /* modify_qp_checks */ + + + +/************************************************************************/ +/************************************************************************/ +/* interface functions */ + + +/************************************************************************/ +HH_ret_t THH_qpm_create( + THH_hob_t hob, /* IN */ + const THH_qpm_init_t* init_attr_p, /* IN */ + THH_qpm_t* qpm_p /* OUT */ +) +{ + HH_ret_t rc = HH_EAGAIN; + VIP_common_ret_t vret; + TQPM_t* qpm; + u_int8_t log2_max_qp = init_attr_p->log2_max_qp; + u_int8_t log2_max_outs = init_attr_p->log2_max_outs_rdma_atom; + u_int32_t rdb_base_align_mask = (1ul << log2_max_outs) - 1,i; + unsigned long tavor_num_reserved_qps = 1ul << init_attr_p->log2_rsvd_qps; + unsigned long nqp = 1ul << log2_max_qp; + unsigned long nsqp= NUM_SQP_PER_PORT * init_attr_p->n_ports; /* Number of special QPs */ + unsigned long nrqp= nqp - tavor_num_reserved_qps - nsqp; /* Number of regular QPs */ + + *qpm_p = NULL; // needed to know if to free mutex. will be non-NULL only if everything OK + if ((!constants_ok) || (log2_max_qp > 24) || + (nqp <= tavor_num_reserved_qps) || (init_attr_p->rdb_base_index & rdb_base_align_mask) ) { + MTL_ERROR1(MT_FLFMT("%s: Invalid initialization parameters for THH_qpm"),__func__); + return HH_EINVAL; + } + + qpm = TMALLOC(TQPM_t); + if (qpm == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocation of THH_qpm object"),__func__); + return HH_EAGAIN; + } + memset(qpm, 0, sizeof(TQPM_t)); + + qpm->qpn_prefix= (u_int8_t *)MALLOC(MAX_QPN_PREFIX); + if (qpm->qpn_prefix == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocation of qpn_prefix table (%u entries)"),__func__, + MAX_QPN_PREFIX); + goto failed_qpn_prefix; + } + memset(qpm->qpn_prefix,0,MAX_QPN_PREFIX); + + vret= VIP_array_create_maxsize(nrqp > 1024 ? 1024 : nrqp, nrqp, &qpm->qp_tbl); + if (vret != VIP_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed VIP_array_create(1024) (vret=%d)"),__func__,vret); + rc= HH_EAGAIN; + goto failed_qp_tbl; + } + + MTL_DEBUG4("{THH_qpm_create: hob=%p, log2MaxQP=%d, qpm=%p, rsvd_qps=%lu, " + "ra_idx=0x%x, log2_max_outs=%d\n", + hob, log2_max_qp, qpm, tavor_num_reserved_qps, + init_attr_p->rdb_base_index, init_attr_p->log2_max_outs_rdma_atom); + + MTL_DEBUG4("{THH_qpm_create: constants_ok=%d, rdb_base_index=0x%x, align_mask=0x%x\n", + constants_ok, init_attr_p->rdb_base_index, rdb_base_align_mask); + + if ((THH_hob_get_cmd_if(hob, &qpm->cmd_if) != HH_OK) || + (THH_hob_get_mrwm(hob, &qpm->mrwm_internal) != HH_OK) || + (THH_hob_get_ddrmm(hob, &qpm->ddrmm) != HH_OK) || + (THH_hob_get_uldm(hob, &qpm->uldm) != HH_OK)) + { + MTL_ERROR1(MT_FLFMT("%s: Failed getting internal HOB objects"),__func__); + rc= HH_ERR; + goto failed_obj_get; + } + rc = HH_OK; + + qpm->hob = hob; + qpm->log2_max_qp = log2_max_qp; + + /* speacial QPs info */ + qpm->sqp_info.sqp_ctx= TNMALLOC(TQPM_sw_qpc_t*, nsqp); + if (qpm->sqp_info.sqp_ctx == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating sqp_ctx"),__func__); + goto failed_sqp_ctx; + } + memset(qpm->sqp_info.sqp_ctx, 0, nsqp * sizeof(TQPM_sw_qpc_t*)); + qpm->sqp_info.first_sqp_qpn = tavor_num_reserved_qps; + qpm->sqp_info.configured = FALSE; /* configure on demand */ + qpm->sqp_info.n_ports= init_attr_p->n_ports; + if (!copy_port_props(qpm, init_attr_p)) + { + goto failed_port_props; + } + + + qpm->first_rqp= qpm->sqp_info.first_sqp_qpn + nsqp; /* Index of first QP in qp_tbl */ + qpm->rdb_base_index = init_attr_p->rdb_base_index; + qpm->log2_max_outs_rdma_atom = log2_max_outs; + qpm->log2_max_outs_dst_rd_atom = init_attr_p->log2_max_outs_dst_rd_atom; + qpm->max_outs_rdma_atom = (1ul << log2_max_outs); + qpm->idx_mask = (1ul << log2_max_qp) - 1; + + if (qpm->sqp_info.port_props) {/* used as flag for non legacy behavior */ + for(i = 0;i < init_attr_p->n_ports;i++) { + qpm->port_active[i] = FALSE; + } +#if !defined(DELAY_CONF_SPECIAL_QPS) + rc = conf_special_qps(qpm); + if (rc != HH_OK) goto failed_conf_sqp; +#endif + } + MOSAL_mutex_init(&qpm->mtx); + + init_sgid_tbl(qpm); + init_pkey_tbl(qpm); + + MTL_TRACE1("}THH_qpm_create: qpm=%p\n", qpm); + logIfErr("THH_qpm_create"); + *qpm_p = qpm; + return HH_OK; + + failed_conf_sqp: + if (qpm->sqp_info.port_props != NULL) FREE(qpm->sqp_info.port_props); + failed_port_props: + FREE(qpm->sqp_info.sqp_ctx); + failed_sqp_ctx: + failed_obj_get: + VIP_array_destroy(qpm->qp_tbl,NULL); + failed_qp_tbl: + FREE(qpm->qpn_prefix); + failed_qpn_prefix: + FREE(qpm); + return rc; +} /* THH_qpm_create */ + +static void TQPM_free_sw_qpc(void *sw_qpc) +{ + TQPM_sw_qpc_t* qp_p= (TQPM_sw_qpc_t*)sw_qpc; + if (qp_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Invoked for NULL SW QP context"), __func__); + } else { + MTL_ERROR1(MT_FLFMT("%s: Cleaning QP left-overs (qpn=0x%X)"), __func__, qp_p->qpn); + FREE(sw_qpc); + } +} + +/************************************************************************/ +HH_ret_t THH_qpm_destroy(THH_qpm_t qpm /* IN */, MT_bool hca_failure /* IN */) +{ + int i; + VIP_common_ret_t vret=VIP_OK; + u_int32_t nsqp= qpm->first_rqp - qpm->sqp_info.first_sqp_qpn; /* Number of special QPs */ + + MTL_TRACE1("{THH_qpm_destroy: qpm=%p, hfail=%d\n", qpm, hca_failure); + /* Clean regular QPs "left-overs" */ + MTL_TRACE2(MT_FLFMT("%s: Cleaning VIP_array..."), __func__); + vret= VIP_array_destroy(qpm->qp_tbl, TQPM_free_sw_qpc); + if (vret != VIP_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed VIP_array_destroy for qp_tbl (%d - %s)"), __func__, + vret, VAPI_strerror_sym(vret)); + /* Continue - the show must go on... */ + } + + /* Cleaning special QPs left-overs */ + MTL_TRACE2(MT_FLFMT("%s: Cleaning special QPs..."), __func__); + for (i= 0; i < (int)nsqp; i++) { + if (qpm->sqp_info.sqp_ctx[i] != NULL) {FREE(qpm->sqp_info.sqp_ctx[i]);} + } + FREE(qpm->sqp_info.sqp_ctx); + if (qpm->sqp_info.port_props != NULL) { + MTL_TRACE2(MT_FLFMT("%s: Cleaning port_props..."), __func__); + FREE(qpm->sqp_info.port_props); + } + +/* free pkey & sgid tbl */ + MTL_TRACE2(MT_FLFMT("%s: Cleaning SGID table..."), __func__); + for (i=0; i< qpm->sqp_info.n_ports; i++) + { + if (qpm->sgid_tbl[i] != NULL) + { + TQPM_GOOD_FREE(qpm->sgid_tbl[i],(sizeof(IB_gid_t) * qpm->num_sgids[i])); + } + } + + MTL_TRACE2(MT_FLFMT("%s: Cleaning Pkey table..."), __func__); + for (i=0; i< qpm->sqp_info.n_ports; i++) + { + if (qpm->pkey_tbl[i] != NULL) + { + TQPM_GOOD_FREE(qpm->pkey_tbl[i],(sizeof(VAPI_pkey_t)*qpm->pkey_tbl_sz[i])); + } + } + + MTL_TRACE2(MT_FLFMT("%s: Cleaning qpn_prefix..."), __func__); + FREE(qpm->qpn_prefix); + MOSAL_mutex_free(&qpm->mtx); + FREE(qpm); + MTL_TRACE1("}THH_qpm_destroy\n"); + return HH_OK; +} /* THH_qpm_destroy */ + + +/************************************************************************/ +HH_ret_t THH_qpm_create_qp( + THH_qpm_t qpm, /* IN */ + HH_qp_init_attr_t* init_attr_p, /* IN */ + MT_bool mlx, /* IN */ + THH_qp_ul_resources_t* qp_ul_resources_p, /* IO */ + IB_wqpn_t* qpn_p /* OUT */ +) +{ + HH_ret_t rc = HH_EAGAIN; + VIP_common_ret_t vret; + u_int32_t qp_idx; + TQPM_sw_qpc_t *new_qp_p; + VIP_array_handle_t qp_hndl; + u_int32_t wild_bits; + + MTL_TRACE1("{THH_qpm_create_qp: qpm=%p, mlx=%d\n", qpm, mlx); + if ((init_attr_p->srq != HH_INVAL_SRQ_HNDL) && (init_attr_p->ts_type != VAPI_TS_RC)) { + /* SRQs are supported only for RC QPs in Tavor */ + MTL_ERROR2(MT_FLFMT("%s: SRQ association with transport service type %s(%d)" + " - only RC QPs are allowed with SRQs."), + __func__, VAPI_ts_type_sym(init_attr_p->ts_type), init_attr_p->ts_type); + return HH_ENOSYS; + } + + new_qp_p= TMALLOC(TQPM_sw_qpc_t); + if (new_qp_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating memory for new SW-QPC"),__func__); + return HH_EAGAIN; + } + memset(new_qp_p,0,sizeof(TQPM_sw_qpc_t)); + vret= VIP_array_insert(qpm->qp_tbl, new_qp_p, &qp_hndl ); + if (vret != VIP_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating QP (%d - %s), qpm->qp_tbl=%p"),__func__, + vret, VAPI_strerror_sym(vret), qpm->qp_tbl); + rc= (HH_ret_t)vret; + goto failed_array_insert; + } + qp_idx= qp_hndl + qpm->first_rqp; + if (qp_idx >= (1U<log2_max_qp)) { + MTL_ERROR1(MT_FLFMT("%s: QP index (0x%x) greater than (1<log2_max_qp)-1); + } + rc = create_qp(qpm, init_attr_p, mlx, qp_ul_resources_p, new_qp_p); + if (rc != HH_OK) goto failed_create_qp; + + /* perturb high bits */ + wild_bits = qpm->qpn_prefix[qp_idx & QPN_PREFIX_INDEX_MASK]++; + new_qp_p->qpn = ( (wild_bits << qpm->log2_max_qp) | qp_idx ) & 0xFFFFFF; + if (new_qp_p->qpn == 0xFFFFFF) new_qp_p->qpn= qp_idx; /* 0xFFFFFF is reserved for multicast */ + + *qpn_p = new_qp_p->qpn; + MTL_TRACE1("}THH_qpm_create_qp: qpn=0x%x\n", *qpn_p); + logIfErr("THH_qpm_create_qp"); + return rc; + + failed_create_qp: + VIP_array_erase(qpm->qp_tbl, qp_hndl, NULL); + failed_array_insert: + FREE(new_qp_p); + return rc; +} /* THH_qpm_create_qp */ + + +/************************************************************************/ +HH_ret_t THH_qpm_get_special_qp( + THH_qpm_t qpm, /* IN */ + VAPI_special_qp_t qp_type, /* IN */ + IB_port_t port, /* IN */ + HH_qp_init_attr_t* init_attr_p, /* IN */ + THH_qp_ul_resources_t* qp_ul_resources_p, /* IO */ + IB_wqpn_t* sqp_hndl_p /* OUT */ +) +{ + const Special_QPs* sqp_info = &qpm->sqp_info; + HH_ret_t rc = HH_OK; + unsigned int port_idx = port - 1; + unsigned int qpti = 0; /* SQP Type index */ + MTL_TRACE1("{THH_qpm_get_special_qp: qpm=%p\n", qpm); + if (qpm->sqp_info.port_props == NULL) + { + MTL_ERROR1(MT_FLFMT("get_special_qp: not supported in legacy mode")); + rc = HH_ENOSYS; + } + else + { + if (port_idx >= sqp_info->n_ports) + { + MTL_ERROR1(MT_FLFMT("THH_qpm_get_special_qp: bad port=%d"), port); + rc = HH_EINVAL_PORT; + } + for (qpti = 0; (qpti != n_qp_types) && (qp_types[qpti] != qp_type); ++qpti); + if (qpti == NUM_SQP_PER_PORT) + { + MTL_ERROR1(MT_FLFMT("THH_qpm_get_special_qp: bad qp_type=%d"), qp_type); + rc = HH_EINVAL_PARAM; + } + + +#if defined(DELAY_CONF_SPECIAL_QPS) + if ((rc == HH_OK) && (!qpm->sqp_info.configured)) + { + rc = conf_special_qps(qpm); + if (rc == HH_OK) + { + qpm->sqp_info.configured = TRUE; + } + } +#endif + } + if (rc == HH_OK) + { + u_int32_t sqp_indx= (qpm->sqp_info.n_ports * qpti) + port_idx; + if (MOSAL_mutex_acq(&qpm->mtx, TRUE) != MT_OK) return HH_EINTR; + + if (qpm->sqp_info.sqp_ctx[sqp_indx] == NULL) { /* This SQP is not used */ + // making sure of MLX xport service for special QPs: + qpm->sqp_info.sqp_ctx[sqp_indx]= TMALLOC(TQPM_sw_qpc_t); + if (qpm->sqp_info.sqp_ctx[sqp_indx] == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating memory for new SW-QPC"),__func__); + rc= HH_EAGAIN; + } else { + init_attr_p->ts_type = THH_ST_MLX; + rc = create_qp(qpm, init_attr_p, TRUE, qp_ul_resources_p, qpm->sqp_info.sqp_ctx[sqp_indx]); + if (rc != HH_OK) { + FREE(qpm->sqp_info.sqp_ctx[sqp_indx]); + qpm->sqp_info.sqp_ctx[sqp_indx]= NULL; + } else { + qpm->sqp_info.sqp_ctx[sqp_indx]->qpn= qpm->sqp_info.first_sqp_qpn + sqp_indx; + MTL_DEBUG4(MT_FLFMT( + "%s: Allocated SQP of type %d (port %d) with qpn=0x%X " + "(qpti=%u sqp_indx=%u first_sqp_qpn=0x%X)"), __func__, + qp_type, port, qpm->sqp_info.sqp_ctx[sqp_indx]->qpn, + qpti,sqp_indx,qpm->sqp_info.first_sqp_qpn); + *sqp_hndl_p = qpm->sqp_info.sqp_ctx[sqp_indx]->qpn; + } + } + } + else + { + rc = HH_EBUSY; + } + MOSAL_mutex_rel(&qpm->mtx); + + } + MTL_TRACE1("}THH_qpm_get_special_qp\n"); + logIfErr("THH_qpm_get_special_qp"); + return rc; +} /* THH_qpm_get_special_qp */ + +static inline HH_ret_t THH_modify_cmdrc2rc(THH_cmd_status_t cmd_rc) +{ + HH_ret_t rc; + switch(cmd_rc){ + case THH_CMD_STAT_OK: + rc = HH_OK; + break; + case THH_CMD_STAT_EINTR: + rc = HH_EINTR; + break; + case THH_CMD_STAT_BAD_PARAM: + case THH_CMD_STAT_BAD_INDEX: + rc = HH_EINVAL_PARAM; + break; + case THH_CMD_STAT_BAD_RESOURCE: /* accessing reserved qp/ee */ + case THH_CMD_STAT_RESOURCE_BUSY: + rc = HH_EBUSY; + break; + case THH_CMD_STAT_BAD_QPEE_STATE: + rc = HH_EINVAL_QP_STATE; + break; + case THH_CMD_STAT_BAD_RES_STATE: + rc = HH_EINVAL_MIG_STATE; + break; + case THH_CMD_STAT_BAD_SYS_STATE: + rc = HH_ERR; /* HCA is disabled */ + break; + default: + rc = HH_EFATAL; + } + return rc; +} +/************************************************************************/ +/* We protect against erroneous application modifying same QP + * in multi-threads. We use a mutex per QPM. + * It may be more efficient to have a mutex per QP, + * but we leave it for future consideration. + */ +HH_ret_t THH_qpm_modify_qp( + THH_qpm_t qpm, /* IN */ + IB_wqpn_t qpn, /* IN */ + VAPI_qp_state_t cur_qp_state, /* IN */ + VAPI_qp_attr_t* qp_attr_p, /* IN */ + VAPI_qp_attr_mask_t* qp_attr_mask_p /* IN */ +) +{ + VAPI_qp_attr_t altfix_attr; + THH_qpee_transition_t trans; + VIP_array_obj_t qp_obj; + TQPM_sw_qpc_t* qp_p; + HH_ret_t rc = HH_EAGAIN; + VIP_common_ret_t vret; + int i; + u_int32_t qp_idx = qpn & qpm->idx_mask; + MT_bool trivial_rst2rst; + + MTL_DEBUG1("{THH_qpm_modify_qp: qpm=%p, qpn=0x%x, curr_state=%d\n, next_state=%d", + qpm, qpn, cur_qp_state,qp_attr_p->qp_state); + if (is_sqp(qpm,qpn)) { + if (MOSAL_mutex_acq(&qpm->mtx, TRUE) != MT_OK) return HH_EINTR; + qp_p= qpm->sqp_info.sqp_ctx[qpn - qpm->sqp_info.first_sqp_qpn]; + if (qp_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Given special QP handle is not active (qpn=0x%X)"),__func__,qpn); + MOSAL_mutex_rel(&qpm->mtx); + return HH_EINVAL_QP_NUM; + } + } else { /* regular RQ */ + vret= VIP_array_find_hold(qpm->qp_tbl, qp_idx - qpm->first_rqp, &qp_obj); + qp_p= (TQPM_sw_qpc_t*)qp_obj; + if ((vret != VIP_OK) || (qpn != qp_p->qpn)) { + MTL_ERROR1(MT_FLFMT("%s: Invalid QP handle (qpn=0x%X)"),__func__,qpn); + if (vret == VIP_OK) VIP_array_find_release(qpm->qp_tbl, qp_idx - qpm->first_rqp); + return HH_EINVAL_QP_NUM; + } + } + + rc = modify_qp_checks(qpm, qp_p, cur_qp_state, qp_attr_p, *qp_attr_mask_p, + &trans, &altfix_attr,&trivial_rst2rst); + MTL_DEBUG4(MT_FLFMT("trans=%d, rst2rst=%d"), trans, trivial_rst2rst); + if (rc == HH_OK && !trivial_rst2rst) + { + THH_qpee_context_t qpc; + u_int32_t opt_mask; + u_int32_t qp_idx = qpn & qpm->idx_mask; + MT_bool legacy_mode; + + qpc_default(&qpc); + qpc.state = qp_attr_p->qp_state; + if ((qp_attr_p->qp_state == VAPI_INIT) && (cur_qp_state != VAPI_INIT)) + { + init2qpc_using_create_values(qp_p, &qpc); + } + + // just making sure qpn is correct. + qp_attr_p->qp_num = qpn; + rc = vapi2qpc_modify(qpm, qp_p, qp_attr_p, trans, *qp_attr_mask_p, + &qpc, &opt_mask); + + qpc.local_qpn_een = qpn; + qpc.ra_buff_indx = qpm->rdb_base_index + qp_idx * qpm->max_outs_rdma_atom; + //MTL_ERROR1("%s: opt mask before screening was: 0x%x", __func__,opt_mask); + opt_mask &= x_optmask(trans); + //MTL_ERROR1("%s: opt mask after screening was: 0x%x", __func__,opt_mask); + + /* + prepare_special_qp() will call INIT_IB/CLOSE_IB + for special QPs & their associated port. + this should be done only when operating in non legacy + mode (legacy mode has executed INIT_IB from THH_hob_open_hca() ). + */ + if( (rc == HH_OK) && (is_sqp(qpm,qpn)) ) { + rc = THH_hob_get_legacy_mode(qpm->hob,&legacy_mode); + if( rc == HH_OK && (legacy_mode == FALSE) ) { + MTL_TRACE2("%s: operating under non legacy mode - activating port.", __func__); + rc = prepare_special_qp(qpm, qpn, trans); + } + } + + if (rc == HH_OK) + { + THH_cmd_status_t cmd_rc = + THH_cmd_MODIFY_QP(qpm->cmd_if, qpn, trans, &qpc, opt_mask); + + rc = THH_modify_cmdrc2rc(cmd_rc); + MTL_DEBUG4(MT_FLFMT("cmd_rc=%d=%s"), + cmd_rc, str_THH_cmd_status_t(cmd_rc)); + if (rc == HH_OK) + { + IB_port_t sqp1_port; + /* check whether to update pkey index of qp1 in our struct*/ + if (is_sqp1(qpm,qpn,&sqp1_port)) + { + if (check_2update_pkey(cur_qp_state,qp_attr_p->qp_state,qp_attr_mask_p)) + { + MTL_DEBUG4("updating pkey in the required transition. port %d \n",sqp1_port); + qpm->qp1_pkey_idx[sqp1_port-1/*idx!*/] = qp_attr_p->pkey_ix; + for (i=0; i< qpm->sqp_info.n_ports; i++) + { + MTL_DEBUG1("port %d: qp1 pkey idx:%x \n",i+1,qpm->qp1_pkey_idx[i]); + } + } + } + + qp_p->state = qp_attr_p->qp_state; + track_rdma_atomic(qp_attr_p,*qp_attr_mask_p, qp_p); + } + } + } + + if (is_sqp(qpm,qpn)) { + MOSAL_mutex_rel(&qpm->mtx); + } else { /* regular RQ */ + VIP_array_find_release(qpm->qp_tbl, qp_idx - qpm->first_rqp); + } + + MTL_TRACE1("}THH_qpm_modify_qp\n"); + logIfErr("THH_qpm_modify_qp"); + return rc; +} /* THH_qpm_modify_qp */ + + +/************************************************************************/ +/* Same comment about mutex as above THH_qpm_modify_qp(...) applies */ +HH_ret_t THH_qpm_query_qp( + THH_qpm_t qpm, /* IN */ + IB_wqpn_t qpn, /* IN */ + VAPI_qp_attr_t* qp_attr_p /* IN */ +) +{ + HH_ret_t rc = HH_OK; + VIP_common_ret_t vret; + IB_port_t dummy_port; + VIP_array_obj_t qp_obj; + TQPM_sw_qpc_t* qp_p; + u_int32_t qp_idx = qpn & qpm->idx_mask; + + MTL_TRACE1("{THH_qpm_query_qp: qpm=%p, qpn=0x%x\n", qpm, qpn); + if (is_sqp(qpm,qpn)) { + if (MOSAL_mutex_acq(&qpm->mtx, TRUE) != MT_OK) return HH_EINTR; + qp_p= qpm->sqp_info.sqp_ctx[qpn - qpm->sqp_info.first_sqp_qpn]; + if (qp_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Given special QP handle is not active (qpn=0x%X)"),__func__,qpn); + return HH_EINVAL_QP_NUM; + } + } else { /* regular RQ */ + vret= VIP_array_find_hold(qpm->qp_tbl, qp_idx - qpm->first_rqp, &qp_obj); + qp_p= (TQPM_sw_qpc_t*)qp_obj; + if ((vret != VIP_OK) || (qpn != qp_p->qpn)) { + MTL_ERROR1(MT_FLFMT("%s: Invalid QP handle (qpn=0x%X)"),__func__,qpn); + if (vret == VIP_OK) VIP_array_find_release(qpm->qp_tbl, qp_idx - qpm->first_rqp); + return HH_EINVAL_QP_NUM; + } + } + + memset(qp_attr_p, 0, sizeof(VAPI_qp_attr_t)); + + switch (qp_p->state) + { + case VAPI_RESET: + qp_attr_p->qp_state = VAPI_RESET; + qp_attr_p->qp_num = qpn; + if (is_sqp0(qpm,qp_attr_p->qp_num,&dummy_port)) { + qp_attr_p->qp_num = 0; + } else if (is_sqp1(qpm,qp_attr_p->qp_num,&dummy_port)) { + qp_attr_p->qp_num = 1; + } + break; + default: + { + THH_qpee_context_t qpc; + THH_cmd_status_t crc = THH_cmd_QUERY_QP(qpm->cmd_if, qpn, &qpc); + if (crc == THH_CMD_STAT_OK) + { + qpc2vapi_attr(&qpc, qp_attr_p); + if (is_sqp0(qpm,qp_attr_p->qp_num,&dummy_port)) { + qp_attr_p->qp_num = 0; + } else if (is_sqp1(qpm,qp_attr_p->qp_num,&dummy_port)) { + qp_attr_p->qp_num = 1; + } + } else { + rc = ((crc == THH_CMD_STAT_OK) ? HH_OK : + (crc == THH_CMD_STAT_EINTR) ? HH_EINTR : + (crc == THH_CMD_STAT_RESOURCE_BUSY) ? HH_EBUSY : HH_EFATAL); + MTL_ERROR1(MT_FLFMT("ERROR: THH_cmd_QUERY_QP returned %s"),str_THH_cmd_status_t(crc)); + } + } + } + if (rc == HH_OK) + { + qp_attr_p->cap = qp_p->cap; + qp_attr_p->ous_dst_rd_atom = qp_p->ous_dst_rd_atom; + qp_attr_p->qp_ous_rd_atom = qp_p->qp_ous_rd_atom; + qp_attr_p->remote_atomic_flags = qp_p->remote_atomic_flags; + } + + + if (is_sqp(qpm,qpn)) { + MOSAL_mutex_rel(&qpm->mtx); + } else { /* regular RQ */ + VIP_array_find_release(qpm->qp_tbl, qp_idx - qpm->first_rqp); + } + MTL_TRACE1("}THH_qpm_query_qp, state=%d=%s\n", + qp_attr_p->qp_state, VAPI_qp_state_sym(qp_attr_p->qp_state)); + logIfErr("THH_qpm_query_qp"); + return rc; +} /* THH_qpm_query_qp */ + + +/************************************************************************/ +/* Same comment about mutex as above THH_qpm_modify_qp(...) applies */ +HH_ret_t THH_qpm_destroy_qp( + THH_qpm_t qpm, /* IN */ + IB_wqpn_t qpn /* IN */ +) +{ + HH_ret_t rc = HH_OK; + VIP_common_ret_t vret; + u_int32_t qp_idx = qpn & qpm->idx_mask; + TQPM_sw_qpc_t* qp2destroy; + VIP_array_obj_t array_obj; + + MTL_TRACE1("{THH_qpm_destroy_qp: qpm=%p, qpn=0x%x\n", qpm, qpn); + + if (is_sqp(qpm,qpn)) { + if (MOSAL_mutex_acq(&qpm->mtx, TRUE) != MT_OK) return MT_EINTR; + qp2destroy= qpm->sqp_info.sqp_ctx[qp_idx - qpm->sqp_info.first_sqp_qpn]; + if (qp2destroy == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Given special QP handle is not active (qpn=0x%X)"),__func__,qpn); + MOSAL_mutex_rel(&qpm->mtx); + return HH_EINVAL_QP_NUM; + } + } else { + vret= VIP_array_erase_prepare(qpm->qp_tbl, qp_idx - qpm->first_rqp, &array_obj); + qp2destroy= (TQPM_sw_qpc_t*)array_obj; + if (vret != VIP_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed VIP_array_erase_prepare for qpn=0x%X (%d - %s)"), __func__, + qpn, vret, VAPI_strerror_sym(vret)); + return (vret == VIP_EINVAL_HNDL) ? HH_EINVAL_QP_NUM : (HH_ret_t)vret; + } + if (qpn != qp2destroy->qpn) { + MTL_ERROR1(MT_FLFMT("%s: Invalid qpn=0x%X"), __func__, qpn); + VIP_array_erase_undo(qpm->qp_tbl, qp_idx - qpm->first_rqp); + return HH_EINVAL_QP_NUM; + } + } + +#if defined(MT_SUSPEND_QP) + /* if qp is suspended, unsuspend it here, directly calling command interface */ + { + THH_cmd_status_t crc; + if (qp2destroy->is_suspended == TRUE) { + crc = THH_cmd_SUSPEND_QP(qpm->cmd_if, qpn, FALSE); + if (crc != THH_CMD_STAT_OK){ + MTL_ERROR1(MT_FLFMT("%s: FAILED unsuspending QP 0x%x. "),__func__, qpn); + } + } + qp2destroy->is_suspended=FALSE; + } +#endif + + if (qp2destroy->state != VAPI_RESET) + { /* Assure QP is left in RESET (SW ownership) */ + THH_qpee_context_t qpc; + THH_cmd_status_t rce; + qpc_default(&qpc); + qpc.local_qpn_een = qpn; + + /* + prepare_special_qp() will call INIT_IB/CLOSE_IB + for special QPs & their associated port. + this should be done only when operating in non legacy + mode (legacy mode has executed INIT_IB from THH_hob_open_hca() ). + */ + if( is_sqp(qpm,qpn) ) + { + MT_bool legacy_mode; + + rc = THH_hob_get_legacy_mode(qpm->hob,&legacy_mode); + if( rc == HH_OK && (legacy_mode == FALSE) ) + { + MTL_TRACE2("%s: operating under non legacy mode - activating port.", __func__); + rc = prepare_special_qp(qpm, qpn, QPEE_TRANS_ERR2RST); + } + } + + /* really ANY2RST transition, not ERR2RST */ + rce = THH_cmd_MODIFY_QP(qpm->cmd_if, qpn, QPEE_TRANS_ERR2RST, &qpc, 0); + MTL_DEBUG4(MT_FLFMT("2RST: rc=%d=%s"), rce, str_THH_cmd_status_t(rce)); + rc = (((rce == THH_CMD_STAT_OK)||(rce == THH_CMD_STAT_EFATAL)) ? HH_OK : + (rce == THH_CMD_STAT_EINTR) ? HH_EINTR : + (rce == THH_CMD_STAT_RESOURCE_BUSY) ? HH_EBUSY : HH_EINVAL); + } + + if ((rc == HH_OK) && (qp2destroy->wqes_buf_sz != 0)) + { /* Release descriptor's memory region (if WQEs buffer exists - could happen with SRQ)*/ + rc= THH_mrwm_deregister_mr(qpm->mrwm_internal, qp2destroy->lkey); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed deregistering internal MR (qpn=0x%X lkey=0x%X)" + " ==> MR resource leak (%s)"), + __func__, qp2destroy->qpn, qp2destroy->lkey, HH_strerror_sym(rc)); + } + if (qp2destroy->pa_ddr != (MT_phys_addr_t)0) + { + MOSAL_protection_ctx_t ctx; /* was not saved, so recover */ + rc = THH_uldm_get_protection_ctx(qpm->uldm, qp2destroy->pd, &ctx); + if (rc != HH_OK) + { + MTL_ERROR1("THH_qpm_destroy_qp: failed recover protection ctx\n"); + } + else + { + MT_size_t buf_sz = complete_pg_sz(qp2destroy->wqes_buf_sz); + MOSAL_unmap_phys_addr(ctx, qp2destroy->wqes_buf, buf_sz); + THH_ddrmm_free(qpm->ddrmm, qp2destroy->pa_ddr, buf_sz); + } + } + } + + if (rc == HH_OK) { + if (is_sqp(qpm,qpn)) { + qpm->sqp_info.sqp_ctx[qp_idx - qpm->sqp_info.first_sqp_qpn]= NULL; + MOSAL_mutex_rel(&qpm->mtx); + } else { /* regular RQ */ + VIP_array_erase_done(qpm->qp_tbl, qp_idx - qpm->first_rqp, NULL); + } + FREE(qp2destroy); + + } else { /* Failure */ + if (is_sqp(qpm,qpn)) { + MOSAL_mutex_rel(&qpm->mtx); + } else { /* regular RQ */ + VIP_array_erase_undo(qpm->qp_tbl, qp_idx - qpm->first_rqp); + } + } + + + MTL_TRACE1("}THH_qpm_destroy_qp\n"); + logIfErr("THH_qpm_destroy_qp"); + return rc; +} /* THH_qpm_destroy_qp */ + +/************************************************************************/ +/* Assumed to be the first called in this module, single thread. */ +void THH_qpm_init(void) +{ + MTL_TRACE1("THH_qpm_init{ compiled: date=%s, time=%s\n", __DATE__, __TIME__); + if (check_constants()) + { + init_state_machine(&state_machine); + } + else + { + MTL_ERROR1(MT_FLFMT("THH_qpm_init: ERROR bad constants.")); + } + + native_page_shift = MOSAL_SYS_PAGE_SHIFT; + native_page_size = 1ul << native_page_shift; + native_page_low_mask = (1ul << native_page_shift)-1; + MTL_DEBUG4(MT_FLFMT("native_page: shift=%d, size=0x%x, mask=0x%x"), + native_page_shift, native_page_size, native_page_low_mask); + + MTL_TRACE1("THH_qpm_init}\n"); +} /* THH_qpm_init */ + + +/***********************************************************************************/ +/****************************************************************************** + * Function: process_local_mad + *****************************************************************************/ +HH_ret_t THH_qpm_process_local_mad(THH_qpm_t qpm, /* IN */ + IB_port_t port,/*IN */ + IB_lid_t slid, /* For Mkey violation trap */ + EVAPI_proc_mad_opt_t proc_mad_opts,/*IN */ + void * mad_in,/*IN */ + void * mad_out /*OUT*/ + ) +{ + THH_cmd_status_t cmd_ret; + HH_ret_t ret = HH_OK; + u_int8_t j,num_entries; + u_int8_t* my_mad_in,*my_mad_out; + u_int8_t* tbl_tmp = NULL; + u_int32_t attr; + SM_MAD_GUIDInfo_t guid_info; + u_int32_t start_idx=0; + SM_MAD_Pkey_table_t pkey_tbl; + MT_bool set_op = FALSE; + MT_bool validate_mkey = ((proc_mad_opts & EVAPI_MAD_IGNORE_MKEY) ? FALSE : TRUE); + + FUNC_IN; + + if (qpm == NULL) { + MTL_ERROR1("[%s]: ERROR : Invalid qpm handle\n",__FUNCTION__); + ret = HH_EINVAL; + goto done; + } + + if ((port > qpm->sqp_info.n_ports) || (port < 1)) { + MTL_ERROR1("[%s]: ERROR : invalid port number (%d)\n",__FUNCTION__,port); + ret = HH_EINVAL_PORT; + goto done; + } + + memset(mad_out, 0, IB_MAD_LEN); + + my_mad_in =(u_int8_t*)mad_in; + + MTL_DEBUG4("%s: MAD IN: \n", __func__); + MadBufPrint(my_mad_in); + + attr = MOSAL_be32_to_cpu(((u_int32_t*)my_mad_in)[4]) >> 16; + + MTL_DEBUG4("%s: method:0x%x attr:0x%x validate_mkey: %s\n", __func__, + my_mad_in[3],attr, (validate_mkey ? "TRUE" : "FALSE" )); + + if (my_mad_in[3] == IB_METHOD_SET) + { + set_op = TRUE; + } + + + cmd_ret = THH_cmd_MAD_IFC(qpm->cmd_if, validate_mkey, slid, port, mad_in, mad_out); + if (cmd_ret != THH_CMD_STAT_OK) { + MTL_ERROR2("[%s]: ERROR on port %d: %d \n",__FUNCTION__,port,cmd_ret); + switch (cmd_ret) { + case THH_CMD_STAT_EINTR: + ret= HH_EINTR; break; + case THH_CMD_STAT_BAD_PKT: + case THH_CMD_STAT_EBADARG: + ret= HH_EINVAL; break; + case THH_CMD_STAT_BAD_INDEX: + ret= HH_EINVAL_PORT; break; + default: + ret= HH_EFATAL; + } + goto done; + } + + my_mad_out = (u_int8_t*)mad_out; + + MTL_DEBUG4("%s: MAD OUT: \n", __func__); + MadBufPrint(my_mad_out); + + if (set_op) + { + switch (attr) + { + + case IB_SMP_ATTRIB_PORTINFO: + MTL_DEBUG2("[%s]: got SET_PORTINFO, port %d \n",__FUNCTION__,port); + tbl_tmp = (u_int8_t*)(qpm->sgid_tbl[port-1]); + num_entries = (u_int8_t)qpm->num_sgids[port-1]; + + for (j =0; j< num_entries; j++) + { + /* update all the gids' prefixes in my table - BIG Endian*/ + memcpy(tbl_tmp + j*sizeof(IB_gid_t),((u_int8_t*)mad_out)+IB_SMP_DATA_START+8,8); + } + MTL_DEBUG2("[%s]: preffix:%d.%d.%d.%d.%d.%d.%d.%d \n",__FUNCTION__,tbl_tmp[0],tbl_tmp[1], + tbl_tmp[2],tbl_tmp[3],tbl_tmp[4],tbl_tmp[5], + tbl_tmp[6],tbl_tmp[7]); + break; + + //requested to set gids, update in my table + case IB_SMP_ATTRIB_GUIDINFO: + MTL_DEBUG2("[%s]: got SET_GUIDINFO, port %d \n",__FUNCTION__,port); + printGIDTable(qpm); + + tbl_tmp = (u_int8_t*)(qpm->sgid_tbl[port-1]); + num_entries = (u_int8_t)qpm->num_sgids[port-1]; + + GUIDInfoMADToSt(&guid_info, my_mad_out); + + start_idx = MOSAL_be32_to_cpu(((u_int32_t*)my_mad_out)[5]); + + MTL_DEBUG2("%s: start idx %d \n", __func__,start_idx); + /* skip in gid table to the starting idx to copy from */ + tbl_tmp += (start_idx * sizeof(IB_gid_t)); + + for (j = 0; j < 8; j++) { + /* check start index first, just in case already out of range */ + if (start_idx >= num_entries) { + break; + } + tbl_tmp += 8 /*sizeof gid prefix */; + memcpy(tbl_tmp, &(guid_info.guid[j]), sizeof(IB_guid_t)); + tbl_tmp += sizeof(u_int64_t); + start_idx++; + } + printGIDTable(qpm); + + break; + + case IB_SMP_ATTRIB_PARTTABLE: + MTL_DEBUG2("[%s]: got SET_PORTTABLE, port %d \n",__FUNCTION__,port); + printPKeyTable(qpm); + + num_entries = (u_int8_t)qpm->pkey_tbl_sz[port-1]; + + /* Select only 16 LSBs */ + start_idx = (MOSAL_be32_to_cpu(((u_int32_t*)my_mad_out)[5])) & 0xFFFF; + MTL_DEBUG2("%s: start idx %d \n", __func__,start_idx); + + /* copy & change the endieness */ + PKeyTableMADToSt(&pkey_tbl, my_mad_out); + + for (j = 0; j < 32; j++) { + /* check start index first, just in case already out of range */ + if (start_idx >= num_entries) { + break; + } + qpm->pkey_tbl[port-1][start_idx++] = pkey_tbl.pkey[j]; + } + printPKeyTable(qpm); + + break; + + default: MTL_DEBUG5("%s: no need to do anything \n", __func__); + } + }/*end if set_op*/ + +done: + MT_RETURN(ret); +} + +/****************************************************************************** + * Function: THH_qpm_get_sgid + *****************************************************************************/ +HH_ret_t THH_qpm_get_sgid(THH_qpm_t qpm, /* IN */ + IB_port_t port,/*IN */ + u_int8_t index, /*IN */ + IB_gid_t* gid_p/*OUT*/ + ) +{ + HH_ret_t ret= HH_OK; + + if ((port<1) || (port > qpm->sqp_info.n_ports)) + { + return HH_EINVAL_PORT; + } + + if (qpm->sgid_tbl[port-1] == NULL) + { + MTL_ERROR1("[%s]: ERROR: failure getting port %d gid tbl\n",__FUNCTION__,port); + return HH_EINVAL_PARAM; + } + + if (index >= qpm->num_sgids[port-1]) + { + MTL_ERROR1("[%s]: ERROR: invalid index",__FUNCTION__); + return HH_EINVAL_PARAM; + } + printGIDTable(qpm); + memcpy(*gid_p,qpm->sgid_tbl[port-1][index],sizeof(IB_gid_t)); + return ret; +} + +/****************************************************************************** + * Function: THH_qpm_get_all_sgids + *****************************************************************************/ +HH_ret_t THH_qpm_get_all_sgids(THH_qpm_t qpm, /* IN */ + IB_port_t port,/*IN */ + u_int8_t num_out_entries, /*IN */ + IB_gid_t* gid_p/*OUT*/ + ) +{ + HH_ret_t ret= HH_OK; + + if ((port<1) || (port > qpm->sqp_info.n_ports)) + { + return HH_EINVAL_PORT; + } + + if (qpm->sgid_tbl[port-1] == NULL) + { + MTL_ERROR1("[%s]: ERROR: failure getting port %d gid tbl\n",__FUNCTION__,port); + return HH_EINVAL_PARAM; + } + + if (num_out_entries < qpm->num_sgids[port-1]) + { + MTL_ERROR1("[%s]: ERROR: not enough space in output gid table",__FUNCTION__); + return HH_EAGAIN; + } + memcpy(*gid_p,qpm->sgid_tbl[port-1],sizeof(IB_gid_t) * num_out_entries); + return ret; +} + + +/****************************************************************************** + * Function: THH_qpm_get_qp1_pkey + *****************************************************************************/ +HH_ret_t THH_qpm_get_qp1_pkey(THH_qpm_t qpm, /* IN */ + IB_port_t port,/*IN */ + VAPI_pkey_t* pkey_p/*OUT*/ + ) +{ + + if ((port<1) || (port > qpm->sqp_info.n_ports)) + { + MTL_ERROR1("%s: port number (%d) not valid\n", __func__,port); + return HH_EINVAL_PORT; + } + + + if (qpm->pkey_tbl[port-1] == NULL) + { + MTL_ERROR1("[%s]: ERROR: failure getting port %d pkey tbl\n",__func__,port); + return HH_EINVAL_PARAM; + } + + //qp1 pkey isn't initialized yet + if (qpm->qp1_pkey_idx[port-1] == 0xffff) + { + MTL_ERROR1("[%s]: ERROR: qp1 pkey for port %d isn't initialized yet \n",__func__,port); + return HH_ERR; + } + MTL_DEBUG4("get Pkey: port %d idx: %d \n",port,qpm->qp1_pkey_idx[port-1]); + + *pkey_p = qpm->pkey_tbl[port-1][qpm->qp1_pkey_idx[port-1]]; + return HH_OK; +} + +/****************************************************************************** + * Function: THH_qpm_get_pkey + *****************************************************************************/ +HH_ret_t THH_qpm_get_pkey(THH_qpm_t qpm, /* IN */ + IB_port_t port,/*IN */ + VAPI_pkey_ix_t pkey_index,/*IN*/ + VAPI_pkey_t* pkey_p/*OUT*/) +{ + if ((port<1) || (port > qpm->sqp_info.n_ports)) { + MTL_ERROR1("%s: port number (%d) not valid\n", __func__,port); + return HH_EINVAL_PORT; + } + + if (qpm->pkey_tbl[port-1] == NULL) { + MTL_ERROR1("%s: ERROR: failure getting port %d pkey tbl\n",__func__,port); + return HH_EINVAL_PARAM; + } + + if (pkey_index >= qpm->pkey_tbl_sz[port-1]) { + MTL_ERROR1("%s: given pkey_index (%d) is beyond pkey table end (%d entries)\n",__func__, + pkey_index,qpm->pkey_tbl_sz[port-1]); + return HH_EINVAL_PARAM; + } + + *pkey_p = qpm->pkey_tbl[port-1][pkey_index]; + return HH_OK; +} + +/****************************************************************************** + * Function: THH_qpm_get_all_pkeys + *****************************************************************************/ +HH_ret_t THH_qpm_get_all_pkeys(THH_qpm_t qpm, /* IN */ + IB_port_t port,/*IN */ + u_int16_t out_num_pkey_entries, /*IN */ + VAPI_pkey_t* pkey_p /*OUT*/ + ) +{ + + if ((port<1) || (port > qpm->sqp_info.n_ports)) + { + return HH_EINVAL_PORT; + } + + + if (qpm->pkey_tbl[port-1] == NULL) + { + MTL_ERROR1("[%s]: ERROR: failure getting port %d pkey tbl\n",__FUNCTION__,port); + return HH_EINVAL_PARAM; + } + + if (qpm->pkey_tbl_sz[port-1] > out_num_pkey_entries) { + MTL_ERROR1("[%s]: ERROR: pkey out table too small (is %d, should be %d) \n",__FUNCTION__, + out_num_pkey_entries, qpm->pkey_tbl_sz[port-1]); + return HH_ERR; + } + MTL_DEBUG4("get Pkey table: port %d\n",port); + + memcpy(pkey_p, qpm->pkey_tbl[port-1], sizeof(VAPI_pkey_t)*qpm->pkey_tbl_sz[port-1]); + return HH_OK; +} + + +static void printPKeyTable(THH_qpm_t qpm) +{ +#if defined(MAX_DEBUG) && 5 <= MAX_DEBUG + int i,j; + + for (i=0; i< qpm->sqp_info.n_ports; i++) + { + MTL_DEBUG5("port %d pkey tbl: \n",i+1); + for (j=0; j< qpm->pkey_tbl_sz[i]; j++) + { + MTL_DEBUG5(" 0x%x ",qpm->pkey_tbl[i][j]); + } + MTL_DEBUG5("\n"); + } +#else + return; +#endif +} + +static void printGIDTable(THH_qpm_t qpm) +{ +#if defined(MAX_DEBUG) && 5 <= MAX_DEBUG + int i,k; + + for (k=0; k< qpm->sqp_info.n_ports; k++) + { + MTL_DEBUG5("port %d sgid tbl: \n",k+1); + for (i=0; i< qpm->num_sgids[k]; i++) + { + MTL_DEBUG5("GID[%d] = %x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x.%x\n", k, + qpm->sgid_tbl[k][i][0],qpm->sgid_tbl[k][i][1],qpm->sgid_tbl[k][i][2],qpm->sgid_tbl[k][i][3], + qpm->sgid_tbl[k][i][4],qpm->sgid_tbl[k][i][5],qpm->sgid_tbl[k][i][6],qpm->sgid_tbl[k][i][7], + qpm->sgid_tbl[k][i][8],qpm->sgid_tbl[k][i][9],qpm->sgid_tbl[k][i][10],qpm->sgid_tbl[k][i][11], + qpm->sgid_tbl[k][i][12],qpm->sgid_tbl[k][i][13],qpm->sgid_tbl[k][i][14],qpm->sgid_tbl[k][i][15]); + + } + MTL_DEBUG5("\n"); + } +#else + return; +#endif +} + + +HH_ret_t THH_qpm_get_num_qps(THH_qpm_t qpm /* IN */, u_int32_t *num_qps_p /*OUT*/) +{ + u_int32_t num_objs; + + if (qpm == NULL) { + MTL_ERROR1("[%s]: ERROR : Invalid qpm handle\n",__FUNCTION__); + return HH_EINVAL; + } + + num_objs= VIP_array_get_num_of_objects(qpm->qp_tbl); + if (num_objs == (u_int32_t) VIP_EINVAL_HNDL) { + return HH_EINVAL; + } else { + *num_qps_p = num_objs; + return HH_OK; + } +} + +#if defined(MT_SUSPEND_QP) +/************************************************************************/ +/* Same comment about mutex as above THH_qpm_modify_qp(...) applies */ +HH_ret_t THH_qpm_suspend_qp( + THH_qpm_t qpm, /* IN */ + IB_wqpn_t qpn, /* IN */ + MT_bool suspend_flag /* IN */ +) +{ + HH_ret_t rc = HH_OK; + VIP_common_ret_t vret; + VIP_array_obj_t qp_obj; + TQPM_sw_qpc_t* qp_p; + u_int32_t qp_idx = qpn & qpm->idx_mask; + + MTL_TRACE1("{THH_qpm_suspend_qp: qpm=%p, qpn=0x%x, suspend_flag=%s\n", + qpm, qpn, ((suspend_flag == TRUE) ? "TRUE" : "FALSE" )); + if (is_sqp(qpm,qpn)) { + if (MOSAL_mutex_acq(&qpm->mtx, TRUE) != MT_OK) return HH_EINTR; + qp_p= qpm->sqp_info.sqp_ctx[qpn - qpm->sqp_info.first_sqp_qpn]; + if (qp_p == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Given special QP handle is not active (qpn=0x%X)"),__func__,qpn); + return HH_EINVAL_QP_NUM; + } + } else { /* regular QP */ + vret= VIP_array_find_hold(qpm->qp_tbl, qp_idx - qpm->first_rqp, &qp_obj); + qp_p= (TQPM_sw_qpc_t*)qp_obj; + if ((vret != VIP_OK) || (qpn != qp_p->qpn)) { + MTL_ERROR1(MT_FLFMT("%s: Invalid QP handle (qpn=0x%X)"),__func__,qpn); + if (vret == VIP_OK) VIP_array_find_release(qpm->qp_tbl, qp_idx - qpm->first_rqp); + return HH_EINVAL_QP_NUM; + } + } + + /* issue tavor command in all cases, since we are not adding a "suspend" state to QP */ + do { + THH_cmd_status_t crc; + + rc = HH_OK; + + if (qp_p->is_suspended == suspend_flag) { + /* already in requested suspension state */ + MTL_ERROR1(MT_FLFMT("%s: qpn=0x%X is already in requested state (suspend = %s)"), + __func__,qpn, (suspend_flag == FALSE)?"FALSE":"TRUE"); + break; + } + if (suspend_flag == FALSE) { + /* unsuspend request -- restore the internal region */ + /* lkey = 0 ==> no send and no receive WQEs */ + if (qp_p->lkey != 0) { + rc = THH_mrwm_suspend_internal(qpm->mrwm_internal,qp_p->lkey,FALSE); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: THH_mrwm_(un)suspend_internal failed (%d:%s). Region stays suspended"), + __func__, rc, HH_strerror_sym(rc)); + break; + } + } + } + crc = THH_cmd_SUSPEND_QP(qpm->cmd_if, qpn, suspend_flag); + if (crc == THH_CMD_STAT_OK) + { + rc = HH_OK; + if (suspend_flag == TRUE) { + /* suspend request -- suspend the internal region */ + /* lkey = 0 ==> no send and no receive WQEs */ + if (qp_p->lkey != 0) { + rc = THH_mrwm_suspend_internal(qpm->mrwm_internal,qp_p->lkey,TRUE); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: suspend. THH_mrwm_suspend_internal failed (%d:%s). Suspended anyway"), + __func__, rc, HH_strerror_sym(rc)); + rc = HH_OK; + } + } + } + } else { + rc = ((crc == THH_CMD_STAT_BAD_PARAM) ? HH_EINVAL_PARAM : + (crc == THH_CMD_STAT_BAD_INDEX) ? HH_EINVAL_QP_NUM : + (crc == THH_CMD_STAT_BAD_RESOURCE) ? HH_EINVAL_QP_NUM : + (crc == THH_CMD_STAT_BAD_RES_STATE) ? HH_EINVAL_QP_STATE : + (crc == THH_CMD_STAT_BAD_QPEE_STATE) ? HH_EINVAL_QP_STATE : + (crc == THH_CMD_STAT_BAD_QPEE_STATE) ? HH_EINVAL_QP_STATE : + (crc == THH_CMD_STAT_BAD_SYS_STATE) ? HH_EINVAL_HCA_HNDL : + (crc == THH_CMD_STAT_RESOURCE_BUSY) ? HH_EBUSY : HH_ERR); + MTL_ERROR1(MT_FLFMT("ERROR: THH_cmd_SUSPEND_QP returned %s"),str_THH_cmd_status_t(crc)); + } + qp_p->is_suspended = suspend_flag; + } while(0); + + if (is_sqp(qpm,qpn)) { + MOSAL_mutex_rel(&qpm->mtx); + } else { /* regular RQ */ + VIP_array_find_release(qpm->qp_tbl, qp_idx - qpm->first_rqp); + } + logIfErr("THH_qpm_suspend_qp"); + return rc; +} /* THH_qpm_query_qp */ +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.h new file mode 100644 index 00000000..e31c33f2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_qpm/tqpm.h @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(H_TQPM_H) +#define H_TQPM_H + +#include +#include +#include +#include +#include + + +/************************************************************************ + * Structure to pass to THH_qpm_create(). Make sure to initialize + * (via memset(&, 0, sizeof()) with zeros before setting fields. + * Thus future enhancement may ease backward compatible. + * + * log2_max_qp - (log2) Max. number of QPs (QPC table size) + * rdb_base_index - virtual index to area allocated by HOB (see PRM 5.2). + * log2_max_outs_rdma_atom - log2 of number allocated per each QP, + * statrting from rdb_base_index. + * n_ports - Number of ports for this HCA. Needed for special QP allocation. + */ +typedef struct +{ + u_int32_t rdb_base_index; + u_int8_t log2_max_qp; + u_int8_t log2_rsvd_qps; + u_int8_t log2_max_outs_rdma_atom; + u_int8_t log2_max_outs_dst_rd_atom; + u_int8_t n_ports; + struct THH_port_init_props_st* port_props; /* (cmd_types.h) indexed from 1 */ +} THH_qpm_init_t; + +#define DEFAULT_SGID_TBL_SZ 32 +#define DEFAULT_PKEY_TBL_SZ 64 +#define NUM_PORTS 2 /* Hardware limit. Real n_ports is limited by the init params. */ +#define NUM_SQP_PER_PORT 4 /* SMI, GSI, RawEth, RawIPv6 */ +#define NUM_SQP (NUM_PORTS * NUM_SQP_PER_PORT) +#define MAX_QPN_PREFIX_LOG 12 +#define MAX_QPN_PREFIX (1<dev_lims.log2_rsvd_qps) + THH_NUM_RSVD_QP)) +#define THH_PROF_MAX_CQS ((1<<14) - (1<dev_lims.log2_rsvd_cqs)) +#define THH_PROF_MAX_PDS ((1<<14) - THH_NUM_RSVD_PD) +#define THH_PROF_MAX_REGIONS ((1<<17) - (1 << hob->dev_lims.log2_rsvd_mtts)) +#define THH_PROF_MAX_WINDOWS ((1<<18) - (1 << hob->dev_lims.log2_rsvd_mrws)) + +#define THH_PROF_MIN_QPS ((1<<14) - ((1<dev_lims.log2_rsvd_qps) + THH_NUM_RSVD_QP)) +#define THH_PROF_MIN_CQS ((1<<12) - (1<dev_lims.log2_rsvd_cqs)) +#define THH_PROF_MIN_PDS ((1<<12) - THH_NUM_RSVD_PD) +#define THH_PROF_MIN_REGIONS ((1<<15) - (1 << hob->dev_lims.log2_rsvd_mtts)) +#define THH_PROF_MIN_WINDOWS ((1<<16) - (1 << hob->dev_lims.log2_rsvd_mrws)) + +#else + /* profile which will enable a maximum of + * 1 million QPs, when the Tavor on-board memory + * is 1 Gigabyte, at the expense of fewer CQs, + * memory regions, and memory windows . To activate + * this profile, change the "if 1" above to "if 0" + * and recompile and reinstall the driver + */ + +#define THH_PROF_MAX_QPS ((1<<20) - 24) +#define THH_PROF_MAX_CQS ((1<<18) - 128) +#define THH_PROF_MAX_PDS ((1<<18) - 2) +#define THH_PROF_MAX_REGIONS ((1<<18) - 16) +#define THH_PROF_MAX_WINDOWS ((1<<19) - 16) + +#define THH_PROF_MIN_QPS ((1<<14) - 24) +#define THH_PROF_MIN_CQS ((1<<17) - 128) +#define THH_PROF_MIN_PDS ((1<<12) - 2) +#define THH_PROF_MIN_REGIONS ((1<<18) - 16) +#define THH_PROF_MIN_WINDOWS ((1<<19) - 16) + +#endif + + +#define THH_PROF_PCNT_REDUCTION_QPS (50) +#define THH_PROF_PCNT_REDUCTION_CQS (50) +#define THH_PROF_PCNT_REDUCTION_PDS (50) +#define THH_PROF_PCNT_REDUCTION_REGIONS (50) +#define THH_PROF_PCNT_REDUCTION_WINDOWS (50) + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.c new file mode 100644 index 00000000..dbbcb6b2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.c @@ -0,0 +1,363 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_THH_SRQM_C + +#include +#include +#include +#include +#include +#include +#include +#include "thh_srqm.h" + +struct THH_srqm_st { + VIP_array_p_t srq_array; + THH_hob_t hob; + THH_cmd_t cmdif; + THH_mrwm_t mrwm; + THH_uldm_t uldm; + u_int32_t max_srq; /* Excluding reserved */ + u_int32_t rsvd_srq; /* Offset of first user SRQ from SRQC table base */ +}; + +typedef struct THH_srq_st { + VAPI_lkey_t lkey; + /* No need for anything more for SRQ destruction. Still, VIP_array requires an "object" */ +} *THH_srq_t; + +/************************************************************************/ +/* Private functions */ +/************************************************************************/ + +void free_srq_context(void* srq_context) +{ + MTL_ERROR1(MT_FLFMT("THH_srqm_destroy: Garbage collection: Releasing SRQ #%u"), + ((THH_srq_t)srq_context)->lkey); + /* Internal memory regions are cleaned-up on THH_mrwm_destroy */ + FREE(srq_context); +} + + +/************************************************************************/ +/* Public functions */ +/************************************************************************/ + +HH_ret_t THH_srqm_create( + THH_hob_t hob, /* IN */ + u_int8_t log2_max_srq, /* IN */ + u_int8_t log2_rsvd_srq, /* IN */ + THH_srqm_t* srqm_p /* OUT */ +) +{ + HH_ret_t ret; + VIP_common_ret_t vret; + u_int32_t rsvd_srq= 1 << log2_rsvd_srq; + u_int32_t max_srq= (1 << log2_max_srq) - rsvd_srq; + u_int32_t initial_array_sz= max_srq > 1024 ? 1024 : max_srq; + MTL_DEBUG1(MT_FLFMT("%s: Invoked with log2_max_srq=0x%u log2_rsrv_srq=0x%u srqm_p=0x%p"), + __func__, log2_max_srq, log2_rsvd_srq, srqm_p); + + *srqm_p= MALLOC(sizeof(struct THH_srqm_st)); + if (*srqm_p == NULL) return HH_EAGAIN; + + vret= VIP_array_create_maxsize(initial_array_sz, max_srq, &(*srqm_p)->srq_array); + if (vret != VIP_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed VIP_array_create_maxsize (%u-%s)"), __func__, + vret, VAPI_strerror_sym(vret)); + ret= HH_EAGAIN; + goto vip_array_create_failed; + } + + ret= THH_hob_get_cmd_if(hob, &(*srqm_p)->cmdif); + if (ret != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_hob_get_cmd_if (%s)"), __func__, HH_strerror_sym(ret)); + goto get_failed; + } + + ret= THH_hob_get_mrwm(hob, &(*srqm_p)->mrwm); + if (ret != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_hob_get_mrwm (%s)"), __func__, HH_strerror_sym(ret)); + goto get_failed; + } + + ret= THH_hob_get_uldm(hob, &(*srqm_p)->uldm); + if (ret != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_hob_get_uldm (%s)"), __func__, HH_strerror_sym(ret)); + goto get_failed; + } + + (*srqm_p)->hob= hob; + (*srqm_p)->max_srq= max_srq; + (*srqm_p)->rsvd_srq= rsvd_srq; + + return HH_OK; + + get_failed: + VIP_array_destroy((*srqm_p)->srq_array, NULL); + vip_array_create_failed: + FREE(*srqm_p); + return ret; +} + + +HH_ret_t THH_srqm_destroy( + THH_srqm_t srqm /* IN */ +) +{ + VIP_common_ret_t vret; + + if (srqm == (THH_srqm_t)THH_INVALID_HNDL) { + MTL_ERROR1(MT_FLFMT("%s: Invoked for THH_INVALID_HNDL"), __func__); + return HH_EINVAL; + } + MTL_DEBUG1(MT_FLFMT("%s: Releasing SRQM handle 0x%p"), __func__, srqm); + + /* In case of abnormal HCA termination we may still have unreleased SRQ resources */ + vret= VIP_array_destroy(srqm->srq_array, free_srq_context); + if (vret != VIP_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed VIP_array_destroy (%u-%s) - completing SRQM destroy anyway"), + __func__, vret, VAPI_strerror_sym(vret)); + } + + FREE(srqm); + + return HH_OK; +} + + +HH_ret_t THH_srqm_create_srq( + THH_srqm_t srqm, /* IN */ + HH_pd_hndl_t pd, /* IN */ + THH_srq_ul_resources_t *srq_ul_resources_p, /* IO */ + HH_srq_hndl_t *srq_p /* OUT */ +) +{ + HH_ret_t ret; + THH_cmd_status_t cmd_ret; + VIP_common_ret_t vret; + VIP_array_handle_t vip_hndl; + THH_srq_t srq; + THH_internal_mr_t mr_props; + MOSAL_prot_ctx_t vm_ctx; + u_int32_t srqn; + THH_srq_context_t thh_srqc; + + if (srq_ul_resources_p->wqes_buf == 0) { + MTL_ERROR1(MT_FLFMT("%s: Got wqes_buf=NULL. WQEs in DDR-mem are not supported, yet."), + __func__); + return HH_ENOSYS; + } + + ret = THH_uldm_get_protection_ctx(srqm->uldm, pd, &vm_ctx); + if (ret != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_uldm_get_protection_ctx (%s)"), __func__, + mtl_strerror_sym(ret)); + return ret; + } + + srq= MALLOC(sizeof(struct THH_srq_st)); + if (srq == NULL) { + return HH_EAGAIN; + } + + vret= VIP_array_insert(srqm->srq_array, srq, &vip_hndl); + if (vret != VIP_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed VIP_array_insert (%u)"), __func__, vret); + ret= HH_EAGAIN; + goto vip_array_insert_failed; + } + + memset(&mr_props, 0, sizeof(mr_props)); + mr_props.start= srq_ul_resources_p->wqes_buf; + mr_props.size= srq_ul_resources_p->wqes_buf_sz; + mr_props.pd= pd; + mr_props.vm_ctx= vm_ctx; + mr_props.force_memkey = FALSE; + ret= THH_mrwm_register_internal(srqm->mrwm, &mr_props, &srq->lkey); + if (ret != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_mrwm_register_internal (%s)"), __func__, + HH_strerror_sym(ret)); + goto register_internal_failed; + } + + srqn= vip_hndl + srqm->rsvd_srq; + + thh_srqc.pd= pd; + thh_srqc.l_key= srq->lkey; + thh_srqc.wqe_addr_h= /* Upper 32b of WQEs buffer */ + (u_int32_t)((sizeof(MT_virt_addr_t) > 4) ? (srq_ul_resources_p->wqes_buf >> 32) : 0); + thh_srqc.ds= (u_int32_t)(srq_ul_resources_p->wqe_sz >> 4); /* 16B chunks */ + if (thh_srqc.ds > 0x3F) + thh_srqc.ds=0x3F; /* Stride may be 1024, but max WQE size is 1008 (ds is 6bit) */ + thh_srqc.uar= srq_ul_resources_p->uar_index; + cmd_ret= THH_cmd_SW2HW_SRQ(srqm->cmdif, srqn, &thh_srqc); + if (cmd_ret != THH_CMD_STAT_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_cmd_SW2HW_SRQ for srqn=0x%X (%s)"), __func__, + srqn, str_THH_cmd_status_t(cmd_ret)); + ret= HH_EFATAL; /* Unexpected error */ + goto sw2hw_failed; + } + + *srq_p= srqn; + MTL_DEBUG4(MT_FLFMT("%s: Allocated SRQn=0x%X"), __func__, srqn); + return HH_OK; + + sw2hw_failed: + THH_mrwm_deregister_mr(srqm->mrwm, srq->lkey); + register_internal_failed: + VIP_array_erase(srqm->srq_array, vip_hndl, NULL); + vip_array_insert_failed: + FREE(srq); + return ret; +} + +HH_ret_t THH_srqm_destroy_srq( + THH_srqm_t srqm, /* IN */ + HH_srq_hndl_t srqn /* IN */ +) +{ + HH_ret_t ret; + THH_cmd_status_t cmd_ret; + VIP_common_ret_t vret; + VIP_array_obj_t vip_obj; + THH_srq_t srq; + MT_bool have_fatal= FALSE; + + vret= VIP_array_erase_prepare(srqm->srq_array, srqn - srqm->rsvd_srq, &vip_obj); + if (vret != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed VIP_array_erase_prepare (%u)"), __func__, vret); + return HH_EINVAL; + } + srq= (THH_srq_t)vip_obj; + + cmd_ret= THH_cmd_HW2SW_SRQ(srqm->cmdif, srqn, NULL); + if (cmd_ret != THH_CMD_STAT_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed THH_cmd_SW2HW_SRQ for srqn=0x%X (%s)"), __func__, + srqn, str_THH_cmd_status_t(cmd_ret)); + have_fatal= TRUE; /* Unexpected error */ + } else { + ret= THH_mrwm_deregister_mr(srqm->mrwm, srq->lkey); + if (ret != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_mrwm_deregister_mr (%s)"), __func__, + HH_strerror_sym(ret)); + have_fatal= TRUE; + } + } + + if (!have_fatal) { + vret= VIP_array_erase_done(srqm->srq_array, srqn - srqm->rsvd_srq, NULL); + if (vret != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed VIP_array_erase_done (%u)"), __func__, vret); + have_fatal= TRUE; + } else { + FREE(srq); + } + + } else { + VIP_array_erase_undo(srqm->srq_array, srqn - srqm->rsvd_srq); + /* Leave for srqm_destroy cleanup */ + } + + return HH_OK; /* resource cleanup is always OK - even if fatal */ +} + + +HH_ret_t THH_srqm_query_srq( + THH_srqm_t srqm, /* IN */ + HH_srq_hndl_t srq, /* IN */ + u_int32_t *limit_p /* OUT */ +) +{ + VIP_common_ret_t vret; + + vret= VIP_array_find_hold(srqm->srq_array, srq - srqm->rsvd_srq, NULL); + if (vret != HH_OK) { + return HH_EINVAL_SRQ_HNDL; + } + + *limit_p= 0; /* Tavor does not support SRQ limit, so the limit event is disarmed */ + + VIP_array_find_release(srqm->srq_array, srq - srqm->rsvd_srq); + return HH_OK; +} + +HH_ret_t THH_srqm_modify_srq( + THH_srqm_t srqm, /* IN */ + HH_srq_hndl_t srqn, /* IN */ + THH_srq_ul_resources_t *srq_ul_resources_p /* IO */ +) +{ + HH_ret_t ret = HH_OK; + VIP_common_ret_t vret; + VIP_array_obj_t vip_obj; + THH_srq_t srq_obj; + THH_internal_mr_t mr_props; + THH_mrwm_modify_flags_t flags = THH_MRWM_MODIFY_FLAGS_TRANSLATION; + + MTL_TRACE1(MT_FLFMT("%s: SRQn=0x%X"), __func__, srqn); + + if (srq_ul_resources_p->wqes_buf_sz == 0) { + /* "Shrinking" SRQ succeeds with NOP */ + return HH_OK; + } + + if (srq_ul_resources_p->wqes_buf == 0) { + MTL_ERROR1(MT_FLFMT("%s: Got wqes_buf=NULL. WQEs in DDR-mem are not supported, yet."), + __func__); + return HH_ENOSYS; + } + + vret= VIP_array_find_hold(srqm->srq_array, srqn - srqm->rsvd_srq, &vip_obj); + if (vret != HH_OK) { + return HH_EINVAL_SRQ_HNDL; + } + srq_obj= (THH_srq_t)vip_obj; + + memset(&mr_props, 0, sizeof(mr_props)); + mr_props.start= srq_ul_resources_p->wqes_buf; + mr_props.size= srq_ul_resources_p->wqes_buf_sz; + + ret= THH_mrwm_modify_internal(srqm->mrwm, srq_obj->lkey, &mr_props, flags); + if (ret != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_mrwm_modify_internal (%d: %s), start="VIRT_ADDR_FMT", size="SIZE_T_DFMT), + __func__, ret, HH_strerror_sym(ret), srq_ul_resources_p->wqes_buf, srq_ul_resources_p->wqes_buf_sz); + } else { + MTL_DEBUG4(MT_FLFMT("%s: modified SRQn=0x%X. start="VIRT_ADDR_FMT", new size="SIZE_T_DFMT), + __func__, srqn, srq_ul_resources_p->wqes_buf, srq_ul_resources_p->wqes_buf_sz); + } + VIP_array_find_release(srqm->srq_array, srqn - srqm->rsvd_srq); + return ret; +} + + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.h new file mode 100644 index 00000000..70f56e5a --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thh_srqm/thh_srqm.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THH_SRQM_H +#define H_THH_SRQM_H + +#include +#include +#include +#include +#include + +/************************************************************************ + * Function: THH_srqm_create + * + * Arguments: + * hob - The THH_hob object in which this object will be included + * log2_max_srq - Size of SRQC table + * log2_rsvd_srq - Log2 number of reserved SRQs + * srqm_p - Returned SRQ object + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * HH_EAGAIN - Not enough resources available + * + * Description: + * This function creates the THH_srqm object. + */ +HH_ret_t THH_srqm_create( + THH_hob_t hob, /* IN */ + u_int8_t log2_max_srq, /* IN */ + u_int8_t log2_rsvd_srq, /* IN */ + THH_srqm_t* srqm_p /* OUT */ +); + + +/************************************************************************ + * Function: THH_srqm_destroy + * + * Arguments: + * srqm - The object to destroy + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid handle + * + * Description: + * Free all SRQM related resources. + */ +HH_ret_t THH_srqm_destroy( + THH_srqm_t srqm /* IN */ +); + + +/************************************************************************ + * Function: THH_srqm_create_srq + * + * Arguments: + * srqm - HCA (SRQM) context + * pd - PD of SRQ to create + * srq_ul_resources_p - THH's private SRQ attributes (WQEs buffer, etc.) + * srq_p - New SRQ handle + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * HH_EAGAIN - Not enough resources available to complete operation + * + * Description: + * Allocate a SRQ resource in the HCA. + */ +HH_ret_t THH_srqm_create_srq( + THH_srqm_t srqm, /* IN */ + HH_pd_hndl_t pd, /* IN */ + THH_srq_ul_resources_t *srq_ul_resources_p, /* IO */ + HH_srq_hndl_t *srq_p /* OUT */ +); + +/************************************************************************ + * Function: THH_srqm_destroy_srq + * + * Arguments: + * srqm - HCA (SRQM) context + * srq - SRQ to destroy + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameters + * + * Description: + * Release a SRQ resource. No checks for associated QPs (VIP's responsibility). + */ +HH_ret_t THH_srqm_destroy_srq( + THH_srqm_t srqm, /* IN */ + HH_srq_hndl_t srq /* IN */ +); + + +/************************************************************************ + * Function: THH_srqm_query_srq + * + * Arguments: + * srqm - HCA (SRQM) context + * srq - SRQ to query + * limit_p - Current SRQ limit + * + * Returns: + * HH_OK + * HH_ESRQ - SRQ is in error state + * HH_EINVAL - Invalid parameters + * + * Description: + * Query SRQ's limit (and state). + */ +HH_ret_t THH_srqm_query_srq( + THH_srqm_t srqm, /* IN */ + HH_srq_hndl_t srq, /* IN */ + u_int32_t *limit_p /* OUT */ +); + +/************************************************************************ + * Function: THH_srqm_modify_srq + * + * Arguments: + * srqm - HCA (SRQM) context + * srq - SRQ to modify + * + * + * Returns: + * HH_OK + * HH_ESRQ - SRQ is in error state + * HH_EINVAL - Invalid parameters + * HH_EAGAIN - Not enough resources available to complete operation + * + * Description: + * Modify SRQ's size or limit. + */ +HH_ret_t THH_srqm_modify_srq( + THH_srqm_t srqm, /* IN */ + HH_srq_hndl_t srq, /* IN */ + THH_srq_ul_resources_t *srq_ul_resources_p /* IO */ +); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul.h new file mode 100644 index 00000000..6604a31b --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THHUL_H +#define H_THHUL_H + +#include "thh_common.h" + +/* THHUL objects handles */ +typedef struct THHUL_hob_st *THHUL_hob_t; +typedef struct THHUL_pdm_st *THHUL_pdm_t; +typedef struct THHUL_cqm_st *THHUL_cqm_t; +typedef struct THHUL_qpm_st *THHUL_qpm_t; +typedef struct THHUL_srqm_st *THHUL_srqm_t; +typedef struct THHUL_mwm_st *THHUL_mwm_t; + +#endif /* H_THHUL_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c new file mode 100644 index 00000000..bcfa30f4 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.c @@ -0,0 +1,2084 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_THHUL_CQM_C + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "thhul_cqm.h" + +#include +MTPERF_NEW_SEGMENT(free_cqe,5000); + +/* Uncomment the line below in order to get CQ dump when the same WQE is used twice simultaneously*/ +/* #define THHUL_CQM_DEBUG_WQE_REUSE */ + +/* Always support fork (assumes number of CQs per process is limited) */ +#define MT_FORK_SUPPORT + + +/* Limit kmalloc to 2 pages (if this fails, vmalloc will fail too) */ +#define CQ_KMALLOC_LIMIT (2*MOSAL_SYS_PAGE_SIZE) + +/* Maximum CQ doorbell to coalesce/delay (too much is not effective ?) */ +#define MAX_CQDB2DELAY 255 + +/* CQE size */ +#define LOG2_CQE_SZ 5 /* 32 bytes */ +#define CQE_SZ (1U<> CQE_OWNER_SHIFT); + /* bit is '1 for HW-own.*/ +} + +/* Return CQE to HW ownership (change bit directly over CQE) */ +inline static void +set_cqe_to_hw_own( + IN volatile u_int32_t* const cqe_p ) +{ + ((volatile u_int8_t*)cqe_p)[CQE_OWNER_BYTE_OFFSET]= (1 << CQE_OWNER_SHIFT); +} + +/* Set ownership of CQE to hardware ownership (over original CQE) and */ +/* increment consumer index (software + hardware) */ +/* This function assumes CQ lock is already acquired by this thread */ +#if 0 /* oRiginal code */ +inline static void free_cqe(THHUL_cq_t *cq_p, volatile u_int32_t* cqe_p) +{ +#ifndef NO_CQ_CI_DBELL + HH_ret_t rc = HH_OK; +#endif + /* Pass ownership to HW */ + set_cqe_to_hw_own(cqe_p); + +#ifndef NO_CQ_CI_DBELL + if (cq_p->pending_cq_dbell >= cq_p->cur_spare_cqes) { + MTL_DEBUG4(MT_FLFMT("%s: Ringing CQ DB (CQN=0x%X) to increment CI by %u"),__func__, + cq_p->cq_num, cq_p->pending_cq_dbell+1); + /* Ring CQ-cmd doorbell to update consumer index (pending + this CQE)*/ + rc= THH_uar_cq_cmd(cq_p->uar,TAVOR_IF_UAR_CQ_INC_CI,cq_p->cq_num,cq_p->pending_cq_dbell); + if (MOSAL_EXPECT_FALSE(rc != HH_OK)) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_uar_cq_cmd (%s)"), __func__, HH_strerror_sym(rc)); + /* Even though, this is not a show stopper. Let's continue until we get CQ error. */ + cq_p->pending_cq_dbell++; /* Maybe we will get luckier next time */ + } else { + cq_p->pending_cq_dbell= 0; + } + } else { /* postpone CQ doorbell ringing on account of spare CQEs */ + cq_p->pending_cq_dbell++; + } +#endif /* NO_CQ_CI_DBELL */ + /* Update software consumer index */ + /*(modulo number of CQEs in buffer, which is one more than the maximum CQEs outstanding) */ + cq_p->cur_buf.consumer_index= (cq_p->cur_buf.consumer_index + 1) & MASK32(cq_p->cur_buf.log2_num_o_cqes); +} +#else /*free_wqe */ +//TODO: cnt should be 32-bits. +inline static void +dbell_cqe(THHUL_cq_t *cq_p, uint32_t cnt ) +{ +#ifndef NO_CQ_CI_DBELL + volatile u_int32_t chimeWords[2]; + THH_uar_t uar = cq_p->uar; + + cq_p->pending_cq_dbell += cnt; + + if( cq_p->pending_cq_dbell >= cq_p->cur_spare_cqes ) + { + MTL_DEBUG4(MT_FLFMT("%s: Ringing CQ DB (CQN=0x%X) to increment CI by %u"),__func__, + cq_p->cq_num, cq_p->pending_cq_dbell+1); + /* Ring CQ-cmd doorbell to update consumer index (pending + this CQE)*/ + chimeWords[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)cq_p->cq_num + | (TAVOR_IF_UAR_CQ_INC_CI << CQ_CMD_DBELL_BIT_OFFSET) + ); + /* Subtract one since the doorbell value of zero increments by one. */ + chimeWords[1] = MOSAL_cpu_to_be32(--cq_p->pending_cq_dbell); + +#ifdef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ + MOSAL_MMAP_IO_WRITE_QWORD(uar->uar_base + UAR_CQ_DBELL_OFFSET, *(volatile u_int64_t*)chimeWords); +#else + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); + MOSAL_MMAP_IO_WRITE_QWORD(uar->uar_base + UAR_CQ_DBELL_OFFSET, *(volatile u_int64_t*)chimeWords); + MOSAL_spinlock_unlock(&(uar->uar_lock)); +#endif + + cq_p->pending_cq_dbell= 0; + } +#endif /* NO_CQ_CI_DBELL */ +} + +inline static void free_cqe(THHUL_cq_t *cq_p, volatile u_int32_t* cqe_p) +{ + /* Pass ownership to HW */ + set_cqe_to_hw_own( cqe_p ); + + dbell_cqe( cq_p, 1 ); + + /* Update software consumer index */ + /*(modulo number of CQEs in buffer, which is one more than the maximum CQEs outstanding) */ + cq_p->cur_buf.consumer_index= (cq_p->cur_buf.consumer_index + 1) & MASK32(cq_p->cur_buf.log2_num_o_cqes); +} +#endif + + +/* Reuse given CQE in order to report "flush-error" for the next WQE in a queue */ +inline static void recycle_cqe(volatile u_int32_t* cqe_p, + u_int32_t next_wqe_addr_32lsb, u_int32_t new_dbd_cnt) +{ + /* Operations done directly on original "big-endian" CQE */ + /* Set next WQE's address */ + cqe_p[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,wqe_adr)>>2]= MOSAL_cpu_to_be32( + next_wqe_addr_32lsb & (~MASK32(CQE_WQE_ADR_BIT_SZ)) ); /* Mask off "wqe_adr" */ + /* Mark as "ERR_FLUSH" with updated dbd_cnt */ + cqe_p[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,immediate_ethertype_pkey_indx_eecredits)>>2]= + MOSAL_cpu_to_be32( + (TAVOR_IF_COMP_STATUS_ERR_FLUSH << (CQE_ERROR_SYNDROM_BIT_OFFSET & MASK32(5)) ) | + new_dbd_cnt); +} + +/* Translate from Tavor's error syndrom (in CQE.ib_syn) status encoding to VAPI's */ +inline static VAPI_wc_status_t decode_error_syndrome(tavor_if_comp_status_t tstatus) +{ + switch (tstatus) { + case TAVOR_IF_COMP_STATUS_ERR_LCL_LEN: + return VAPI_LOC_LEN_ERR; + case TAVOR_IF_COMP_STATUS_ERR_LCL_QP_OP: + return VAPI_LOC_QP_OP_ERR; + case TAVOR_IF_COMP_STATUS_ERR_LCL_EE_OP: + return VAPI_LOC_EE_OP_ERR; + case TAVOR_IF_COMP_STATUS_ERR_LCL_PROT: + return VAPI_LOC_PROT_ERR; + case TAVOR_IF_COMP_STATUS_ERR_FLUSH: + return VAPI_WR_FLUSH_ERR; + case TAVOR_IF_COMP_STATUS_ERR_MWIN_BIND: + return VAPI_MW_BIND_ERR; + case TAVOR_IF_COMP_STATUS_ERR_BAD_RESP: + return VAPI_BAD_RESP_ERR; + case TAVOR_IF_COMP_STATUS_ERR_LCL_ACCS: + return VAPI_LOC_ACCS_ERR; + case TAVOR_IF_COMP_STATUS_ERR_RMT_INVAL_REQ: + return VAPI_REM_INV_REQ_ERR; + case TAVOR_IF_COMP_STATUS_ERR_RMT_ACCSS: + return VAPI_REM_ACCESS_ERR; + case TAVOR_IF_COMP_STATUS_ERR_RMT_OP: + return VAPI_REM_OP_ERR; + case TAVOR_IF_COMP_STATUS_ERR_TRANS_RETRY_EX: + return VAPI_RETRY_EXC_ERR; + case TAVOR_IF_COMP_STATUS_ERR_RNR_RETRY_EX: + return VAPI_RNR_RETRY_EXC_ERR; + case TAVOR_IF_COMP_STATUS_ERR_LCL_RDD_VIOL: + return VAPI_LOC_RDD_VIOL_ERR; + case TAVOR_IF_COMP_STATUS_ERR_RMT_INVAL_REQ_RD: + return VAPI_REM_INV_RD_REQ_ERR; + case TAVOR_IF_COMP_STATUS_ERR_RMT_ABORT: + return VAPI_REM_ABORT_ERR; + case TAVOR_IF_COMP_STATUS_ERR_INVAL_EEC_NUM: + return VAPI_INV_EECN_ERR; + case TAVOR_IF_COMP_STATUS_ERR_INVAL_EEC_STT: + return VAPI_INV_EEC_STATE_ERR; + default: + MTL_ERROR1(MT_FLFMT("Invalid CQE error syndrome (0x%X)"),tstatus); + return VAPI_COMP_GENERAL_ERR; + } +} + +inline static ib_wc_status_t +decode_error_syndrome2( + IN tavor_if_comp_status_t tstatus ) +{ + switch( tstatus ) + { + case TAVOR_IF_COMP_STATUS_ERR_LCL_LEN: + return IB_WCS_LOCAL_LEN_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_LCL_QP_OP: + return IB_WCS_LOCAL_OP_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_LCL_PROT: + return IB_WCS_LOCAL_PROTECTION_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_FLUSH: + return IB_WCS_WR_FLUSHED_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_MWIN_BIND: + return IB_WCS_MEM_WINDOW_BIND_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_BAD_RESP: + return IB_WCS_TIMEOUT_RETRY_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_LCL_ACCS: + return IB_WCS_LOCAL_PROTECTION_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_RMT_INVAL_REQ: + return IB_WCS_REM_INVALID_REQ_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_RMT_ACCSS: + return IB_WCS_REM_ACCESS_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_RMT_OP: + return IB_WCS_REM_OP_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_TRANS_RETRY_EX: + return IB_WCS_TIMEOUT_RETRY_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_RNR_RETRY_EX: + return IB_WCS_RNR_RETRY_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_LCL_RDD_VIOL: + return IB_WCS_LOCAL_PROTECTION_ERR; + + case TAVOR_IF_COMP_STATUS_ERR_RMT_ABORT: + return IB_WCS_REM_OP_ERR; + + default: + MTL_ERROR1(MT_FLFMT("Invalid CQE error syndrome (0x%X)"),tstatus); + return IB_WCS_LOCAL_OP_ERR; + } +} + +inline static HH_ret_t decode_opcode(MT_bool send_q, u_int8_t cqe_opcode, + VAPI_cqe_opcode_t *vapi_cqe_opcode_p, MT_bool *immediate_valid_p) +{ + *immediate_valid_p= FALSE; /* Innocent until proven guilty... */ + if (send_q) { /* Send queue - use "nopcode" encoding */ + switch (cqe_opcode) { + case TAVOR_IF_NOPCODE_RDMAW: + *vapi_cqe_opcode_p= VAPI_CQE_SQ_RDMA_WRITE; + return HH_OK; + case TAVOR_IF_NOPCODE_RDMAW_IMM: + *vapi_cqe_opcode_p= VAPI_CQE_SQ_RDMA_WRITE; + *immediate_valid_p= TRUE; + return HH_OK; + case TAVOR_IF_NOPCODE_SEND: + *vapi_cqe_opcode_p= VAPI_CQE_SQ_SEND_DATA; + return HH_OK; + case TAVOR_IF_NOPCODE_SEND_IMM: + *vapi_cqe_opcode_p= VAPI_CQE_SQ_SEND_DATA; + *immediate_valid_p= TRUE; + return HH_OK; + case TAVOR_IF_NOPCODE_RDMAR: + *vapi_cqe_opcode_p= VAPI_CQE_SQ_RDMA_READ; + return HH_OK; + case TAVOR_IF_NOPCODE_ATOM_CMPSWP: + *vapi_cqe_opcode_p= VAPI_CQE_SQ_COMP_SWAP; + return HH_OK; + case TAVOR_IF_NOPCODE_ATOM_FTCHADD: + *vapi_cqe_opcode_p= VAPI_CQE_SQ_FETCH_ADD; + return HH_OK; + case TAVOR_IF_NOPCODE_BIND_MEMWIN: + *vapi_cqe_opcode_p= VAPI_CQE_SQ_BIND_MRW; + return HH_OK; + default: + return HH_EINVAL; /* Invalid opcode - shouldn't happen */ + } + + } else { /* receive queue - use IB encoding */ + /* bits 4:0 are of the opcode are common to all transport types */ + switch (cqe_opcode & MASK32(5)) { + case IB_OP_SEND_LAST: + case IB_OP_SEND_ONLY: + *vapi_cqe_opcode_p= VAPI_CQE_RQ_SEND_DATA; + return HH_OK; + case IB_OP_SEND_IMM_LAST: + case IB_OP_SEND_IMM_ONLY: + *vapi_cqe_opcode_p= VAPI_CQE_RQ_SEND_DATA; + *immediate_valid_p= TRUE; + return HH_OK; + case IB_OP_RDMAW_IMM_LAST: + case IB_OP_RDMAW_IMM_ONLY: + *vapi_cqe_opcode_p= VAPI_CQE_RQ_RDMA_WITH_IMM; + *immediate_valid_p= TRUE; + return HH_OK; + default: + return HH_EINVAL; + } + } +} + + +inline static HH_ret_t decode_opcode2( + IN boolean_t send_q, + IN uint8_t cqe_opcode, + OUT ib_wc_type_t *p_wc_type, + OUT boolean_t *p_imm_valid ) +{ + *p_imm_valid = FALSE; /* Innocent until proven guilty... */ + if( send_q ) + { + /* Send queue - use "nopcode" encoding */ + switch( cqe_opcode ) + { + case TAVOR_IF_NOPCODE_RDMAW_IMM: + *p_imm_valid = TRUE; + case TAVOR_IF_NOPCODE_RDMAW: + *p_wc_type = IB_WC_RDMA_WRITE; + return HH_OK; + + case TAVOR_IF_NOPCODE_SEND_IMM: + *p_imm_valid = TRUE; + case TAVOR_IF_NOPCODE_SEND: + *p_wc_type = IB_WC_SEND; + return HH_OK; + + case TAVOR_IF_NOPCODE_RDMAR: + *p_wc_type = IB_WC_RDMA_READ; + return HH_OK; + + case TAVOR_IF_NOPCODE_ATOM_CMPSWP: + *p_wc_type = IB_WC_COMPARE_SWAP; + return HH_OK; + + case TAVOR_IF_NOPCODE_ATOM_FTCHADD: + *p_wc_type = IB_WC_FETCH_ADD; + return HH_OK; + + case TAVOR_IF_NOPCODE_BIND_MEMWIN: + *p_wc_type = IB_WC_MW_BIND; + return HH_OK; + + default: + return HH_EINVAL; /* Invalid opcode - shouldn't happen */ + } + } + else + { + /* receive queue - use IB encoding */ + /* bits 4:0 are of the opcode are common to all transport types */ + switch( cqe_opcode & MASK32(5) ) + { + case IB_OP_SEND_IMM_LAST: + case IB_OP_SEND_IMM_ONLY: + *p_imm_valid = TRUE; + case IB_OP_SEND_LAST: + case IB_OP_SEND_ONLY: + *p_wc_type = IB_WC_RECV; + return HH_OK; + + case IB_OP_RDMAW_IMM_LAST: + case IB_OP_RDMAW_IMM_ONLY: + *p_wc_type = IB_WC_RECV_RDMA_WRITE; + *p_imm_valid = TRUE; + return HH_OK; + + default: + return HH_EINVAL; + } + } +} + + +/* Extract CQE fields but for "status", "free_res_count" and "id" (already filled in poll4cqe) */ +/* This function is used only for successfull completions. */ +/* Given CQE is already in CPU endianess */ +inline static HH_ret_t extract_cqe(u_int32_t *cqe, VAPI_wc_desc_t *vapi_cqe_p, + VAPI_special_qp_t qp_type, VAPI_ts_type_t qp_ts_type) +{ + HH_ret_t rc; + MT_bool send_cqe= MT_EXTRACT_ARRAY32(cqe, + MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,s), + MT_BIT_SIZE(tavorprm_completion_queue_entry_st,s)); + + rc= decode_opcode( + send_cqe , + MT_EXTRACT_ARRAY32(cqe, + MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,opcode), + MT_BIT_SIZE(tavorprm_completion_queue_entry_st,opcode) ), + &(vapi_cqe_p->opcode),&(vapi_cqe_p->imm_data_valid)); + if (rc != HH_OK) { + MTL_ERROR4(MT_FLFMT("Invalid %s Opcode=0x%X"),send_cqe ? "send" : "receive", + MT_EXTRACT_ARRAY32(cqe, + MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,opcode), + MT_BIT_SIZE(tavorprm_completion_queue_entry_st,opcode) ) ); + return rc; + } + if (send_cqe && + ((vapi_cqe_p->opcode == VAPI_CQE_SQ_COMP_SWAP) || + (vapi_cqe_p->opcode == VAPI_CQE_SQ_FETCH_ADD) + ) + ) { /* Atomic operations are always of length 8 */ + vapi_cqe_p->byte_len= 8; + } else { /* Get bytes transfered from CQE (see FM issue #15659) */ + vapi_cqe_p->byte_len= cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,byte_cnt)>>2]; + } + + /* Get data from immediate_ethertype_pkey_indx_eecredits if valid */ + + switch (qp_ts_type) { + case VAPI_TS_UD: + if (!send_cqe) { /* see IB-spec. 11.4.2.1: Output Modifiers */ + vapi_cqe_p->remote_node_addr.type= VAPI_RNA_UD; + vapi_cqe_p->remote_node_addr.qp_ety.qp= + cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,rqpn)>>2] & MASK32(24); + vapi_cqe_p->remote_node_addr.ee_dlid.dst_path_bits= MT_EXTRACT_ARRAY32(cqe, + MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,ml_path), + MT_BIT_SIZE(tavorprm_completion_queue_entry_st,ml_path) ); + vapi_cqe_p->remote_node_addr.slid= MT_EXTRACT_ARRAY32(cqe, + MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,rlid),MT_BIT_SIZE(tavorprm_completion_queue_entry_st,rlid)); + vapi_cqe_p->remote_node_addr.sl= MT_EXTRACT_ARRAY32(cqe, + MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,sl),MT_BIT_SIZE(tavorprm_completion_queue_entry_st,sl)); + vapi_cqe_p->grh_flag= MT_EXTRACT_ARRAY32(cqe, + MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,g),MT_BIT_SIZE(tavorprm_completion_queue_entry_st,g)); + } + break; + case VAPI_TS_RD: + if (!send_cqe) { /* see IB-spec. 11.4.2.1: Output Modifiers */ + vapi_cqe_p->remote_node_addr.type= VAPI_RNA_RD; + vapi_cqe_p->remote_node_addr.qp_ety.qp= + cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,rqpn)>>2] & MASK32(24); + vapi_cqe_p->remote_node_addr.ee_dlid.loc_eecn= + cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,my_ee)>>2] & MASK32(24); + } + break; + default: + break; + } + + + switch (qp_type) { + case VAPI_REGULAR_QP: + /*if (vapi_cqe_p->imm_data_valid) */ + /* Copy immediate_ethertype_pkey_indx_eecredits even if no immediate data, + * in order to get eecredits (for SQ) */ + vapi_cqe_p->imm_data = + cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,immediate_ethertype_pkey_indx_eecredits)>>2]; + break; + case VAPI_RAW_ETY_QP: + vapi_cqe_p->remote_node_addr.type= VAPI_RNA_RAW_ETY; +/*** warning C4242: '=' : conversion from 'u_int32_t' to 'VAPI_ethertype_t', possible loss of data ***/ + vapi_cqe_p->remote_node_addr.qp_ety.ety= (VAPI_ethertype_t) + cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,immediate_ethertype_pkey_indx_eecredits)>>2]; + break; + + case VAPI_GSI_QP: + vapi_cqe_p->pkey_ix= + cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,immediate_ethertype_pkey_indx_eecredits)>>2] + >> 16; /* Pkey index is on bits 31:16 */ + vapi_cqe_p->imm_data_valid= FALSE; + /* QP1's RQ requests complete as "send w/immediate", even though it is just a send */ + break; + + default: + break; + + } + + return HH_OK; +} + +#define CQE_RLID_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,rlid)>>2 +#define CQE_MY_EE_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,my_ee)>>2 +#define CQE_RQPN_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,rqpn)>>2 +#define CQE_MY_EE_BIT_MASK MASK32(24) +#define CQE_RQPN_BIT_MASK MASK32(24) + +#define CQE_S_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,s)>>2 +#define CQE_S_BIT_MASK MASK32(MT_BIT_SIZE(tavorprm_completion_queue_entry_st,s)) +#define CQE_S_SHIFT (MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,s) & MASK32(5)) + +/* this is optimized version of extract_cqe and passing opcode for poll4cqe */ +inline static HH_ret_t extract_cqe_new(u_int32_t *cqe, VAPI_wc_desc_t *vapi_cqe_p, + VAPI_special_qp_t qp_type, VAPI_ts_type_t qp_ts_type,u_int32_t opcode) +{ + //HH_ret_t rc; + + MT_bool send_cqe= (cqe[CQE_S_DWORD_OFFSET]>>CQE_S_SHIFT) & CQE_S_BIT_MASK; + + if (MOSAL_EXPECT_FALSE(decode_opcode(send_cqe, + (u_int8_t)opcode, + &(vapi_cqe_p->opcode), + &(vapi_cqe_p->imm_data_valid)) + != HH_OK)) + + { + MTL_ERROR4(MT_FLFMT("Invalid %s Opcode=0x%X"),send_cqe ? "send" : "receive", + MT_EXTRACT_ARRAY32(cqe, + MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,opcode), + MT_BIT_SIZE(tavorprm_completion_queue_entry_st,opcode) ) ); + return HH_EINVAL; + } + + /* short circuit this case */ + if (MOSAL_EXPECT_FALSE(qp_ts_type == VAPI_TS_RC)) { + return HH_OK; + } + +#define CQE_ML_PATH_BIT_MASK MASK32(MT_BIT_SIZE(tavorprm_completion_queue_entry_st,ml_path)) +#define CQE_ML_PATH_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,ml_path)>>2 +#define CQE_ML_PATH_SHIFT (MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,ml_path) & MASK32(5)) + +#define CQE_RLID_BIT_MASK MASK32(MT_BIT_SIZE(tavorprm_completion_queue_entry_st,rlid)) +#define CQE_RLID_SHIFT (MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,rlid) & MASK32(5)) +#define CQE_SL_BIT_MASK MASK32(MT_BIT_SIZE(tavorprm_completion_queue_entry_st,sl)) +#define CQE_SL_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,sl)>>2 +#define CQE_SL_SHIFT (MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,sl) & MASK32(5)) +#define CQE_G_BIT_MASK MASK32(MT_BIT_SIZE(tavorprm_completion_queue_entry_st,g)) +#define CQE_G_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,g)>>2 +#define CQE_G_SHIFT (MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,g) & MASK32(5)) + + + /* see IB-spec. 11.4.2.1: Output Modifiers */ + if (!send_cqe) { + if (qp_ts_type == VAPI_TS_UD) { + + vapi_cqe_p->remote_node_addr.type= VAPI_RNA_UD; + + vapi_cqe_p->remote_node_addr.qp_ety.qp= + cqe[CQE_RQPN_DWORD_OFFSET] & CQE_RQPN_BIT_MASK; + + vapi_cqe_p->remote_node_addr.ee_dlid.dst_path_bits = + (cqe[CQE_ML_PATH_DWORD_OFFSET]>>CQE_ML_PATH_SHIFT)& CQE_ML_PATH_BIT_MASK; + vapi_cqe_p->remote_node_addr.slid= + (cqe[CQE_RLID_DWORD_OFFSET]>>CQE_RLID_SHIFT)& CQE_RLID_BIT_MASK; + vapi_cqe_p->remote_node_addr.sl= + (cqe[CQE_SL_DWORD_OFFSET]>>CQE_SL_SHIFT)& CQE_SL_BIT_MASK; + vapi_cqe_p->grh_flag= (cqe[CQE_G_DWORD_OFFSET]>>CQE_G_SHIFT)& CQE_G_BIT_MASK; + } + else + { + if (qp_ts_type == VAPI_TS_RD) { + vapi_cqe_p->remote_node_addr.type = VAPI_RNA_RD; + + /*RQPN field not converted to be yet */ + vapi_cqe_p->remote_node_addr.qp_ety.qp= + cqe[CQE_RQPN_DWORD_OFFSET] & CQE_RQPN_BIT_MASK; + vapi_cqe_p->remote_node_addr.ee_dlid.loc_eecn= + cqe[CQE_MY_EE_DWORD_OFFSET] & CQE_MY_EE_BIT_MASK; + } + + } + } + + switch (qp_type) { + case VAPI_RAW_ETY_QP: + vapi_cqe_p->remote_node_addr.type = VAPI_RNA_RAW_ETY; + vapi_cqe_p->remote_node_addr.qp_ety.ety = (VAPI_ethertype_t)vapi_cqe_p->imm_data; + break; + + case VAPI_GSI_QP: + vapi_cqe_p->pkey_ix= vapi_cqe_p->imm_data >> 16;/* Pkey index is on bits 31:16 */ + vapi_cqe_p->imm_data_valid= FALSE; + /* QP1's RQ requests complete as "send w/immediate", even though it is just a send */ + + break; + default: + break; + + } + + return HH_OK; +} + +/* Given CQ must be locked when calling this function */ +static inline void sync_consumer_index(THHUL_cq_t *cq_p, const char* caller) +{ + HH_ret_t rc; + + if (cq_p->pending_cq_dbell > 0) { + MTL_DEBUG4(MT_FLFMT("%s: Ringing CQ DB (CQN=0x%X) to increment CI by %u"),caller, + cq_p->cq_num, cq_p->pending_cq_dbell); + rc= THH_uar_cq_cmd(cq_p->uar,TAVOR_IF_UAR_CQ_INC_CI,cq_p->cq_num,cq_p->pending_cq_dbell-1); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_uar_cq_cmd (%s)"), caller, HH_strerror_sym(rc)); + /* Even though, this is not a show stopper. Let's continue until we get CQ error. */ + } else { + cq_p->pending_cq_dbell= 0; + } + } +} + +/********************************************************************************************** + * Private functions declarations + **********************************************************************************************/ +static HH_ret_t cqe_buf_alloc(THHUL_cqe_buf_t *cqe_buf, VAPI_cqe_num_t num_o_cqes); + +static void cqe_buf_free(THHUL_cqe_buf_t *cqe_buf); + +static const char* cq_state_str(THHUL_cq_state_t cq_state) +{ + switch (cq_state) { + case THHUL_CQ_PREP: return "THHUL_CQ_PREP"; + case THHUL_CQ_IDLE: return "THHUL_CQ_IDLE"; + case THHUL_CQ_RESIZE_PREP: return "THHUL_CQ_RESIZE_PREP"; + default: return "(unknown state)"; + } +} + +static u_int32_t cqe_buf_cleanup(THHUL_cqe_buf_t *cqe_buf,IB_wqpn_t qp, + THHUL_srqm_t srqm, HHUL_srq_hndl_t srq, + u_int32_t *cur_producer_index_p); + +static void cqe_buf_cpy2resized( + THHUL_cqe_buf_t *cur_buf, + THHUL_cqe_buf_t *resized_buf, + MT_bool compute_new_pi); /* New resize-CQ flow */ + +static VAPI_cqe_num_t count_cqes(/*IN*/ THHUL_cq_t *cq_p, + /*IN*/ VAPI_cqe_num_t cqe_num_limit, + /*OUT*/ VAPI_cqe_num_t *hw_cqe_cnt_p); + + + +/********************************************************************************************** + * Public API Functions (defined in thhul_hob.h) + **********************************************************************************************/ + + +HH_ret_t THHUL_cqm_create( + /*IN*/ THHUL_hob_t hob, + /*OUT*/ THHUL_cqm_t *cqm_p +) +{ + THHUL_cqm_t new_cqm; + + new_cqm= (THHUL_cqm_t)MALLOC(sizeof(struct THHUL_cqm_st)); + if (new_cqm == NULL) { + MTL_ERROR1("THHUL_cqm_create: Failed to allocate memory for a new CQM.\n"); + return HH_EAGAIN; + } + new_cqm->cq_list= NULL; + MOSAL_mutex_init(&(new_cqm->cqm_lock)); + + *cqm_p= new_cqm; + return HH_OK; +} + +HH_ret_t THHUL_cqm_destroy ( + /*IN*/ THHUL_cqm_t cqm +) +{ + THHUL_cq_t *thhul_cq; + + while (cqm->cq_list) { + thhul_cq = cqm->cq_list; + cqm->cq_list= thhul_cq->next; + cqe_buf_free(&(thhul_cq->cur_buf)); + FREE(thhul_cq); + } + + MOSAL_mutex_free(&(cqm->cqm_lock)); + FREE(cqm); + return HH_OK; +} + + +HH_ret_t THHUL_cqm_create_cq_prep( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ VAPI_cqe_num_t num_o_cqes, + /*OUT*/ HHUL_cq_hndl_t *hhul_cq_p, + /*OUT*/ VAPI_cqe_num_t *num_o_cqes_p, + /*OUT*/ void/*THH_cq_ul_resources_t*/ *cq_ul_resources_p +) +{ + THHUL_cqm_t cqm; + THHUL_cq_t *new_cq; + THH_cq_ul_resources_t *ul_res_p= (THH_cq_ul_resources_t*)cq_ul_resources_p; + HH_ret_t rc; + THH_hca_ul_resources_t hca_ul_res; + + rc= THHUL_hob_get_cqm(hca,&cqm); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_cqm_create_cq_prep: Invalid HCA handle.\n"); + return HH_EINVAL; + } + + rc= THHUL_hob_get_hca_ul_res(hca,&hca_ul_res); + if (rc != HH_OK) { + MTL_ERROR2("THHUL_cqm_create_cq_prep: Failed THHUL_hob_get_hca_ul_res (err=%d).\n",rc); + return rc; + } + + if (num_o_cqes > hca_ul_res.max_num_ent_cq) { + MTL_ERROR2("THHUL_cqm_create_cq_prep: cq_num_of_entries requested exceeds hca cap\n"); + return HH_E2BIG_CQE_NUM; + } + + new_cq= (THHUL_cq_t*)MALLOC(sizeof(THHUL_cq_t)); + if (new_cq == NULL) { + MTL_ERROR1("THHUL_cqm_create_cq_prep: Failed to allocate THHUL_cq_t.\n"); + return HH_EAGAIN; + } + + rc= cqe_buf_alloc(&new_cq->cur_buf,num_o_cqes); + if (rc != HH_OK) goto failed_cqe_buf; + new_cq->cq_state= THHUL_CQ_PREP; + new_cq->cur_spare_cqes= new_cq->cur_buf.spare_cqes; + new_cq->pending_cq_dbell= 0; + + new_cq->cq_resize_fixed= (hca_ul_res.version.fw_ver_major >= 3); + + rc= THHUL_hob_get_uar(hca,&(new_cq->uar)); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_cqm_create_cq_prep: Failed getting THHUL_hob's UAR (err=%d).\n",rc); + goto failed_uar; + } + rc= THH_uar_get_index(new_cq->uar,&(ul_res_p->uar_index)); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_cqm_create_cq_prep: Failed getting UAR index.\n"); + goto failed_uar; + } + + rc= THHUL_hob_get_qpm(hca,&(new_cq->qpm)); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_cqm_create_cq_prep: Failed getting THHUL_hob's QPM (err=%d).\n",rc); + goto failed_qpm; + } + + new_cq->cq_num= INVALID_CQ_NUM; + MOSAL_spinlock_init(&(new_cq->cq_lock)); + /* Add CQ to CQs list */ + if (MOSAL_mutex_acq(&(cqm->cqm_lock),TRUE) != 0) {rc= HH_EINTR; goto failed_mutex;} + new_cq->next= cqm->cq_list; /* Add before first (if any) */ + cqm->cq_list= new_cq; + MOSAL_mutex_rel(&(cqm->cqm_lock)); + + /* Output modifiers */ + *hhul_cq_p= (HHUL_cq_hndl_t)new_cq; + /* One CQE is always reserved (for CQ-full indication) */ + *num_o_cqes_p= (1U << new_cq->cur_buf.log2_num_o_cqes) - 1 - new_cq->cur_buf.spare_cqes; + ul_res_p->cqe_buf= new_cq->cur_buf.cqe_buf_base; /* Registration is required only from here */ + ul_res_p->cqe_buf_sz= (1U << new_cq->cur_buf.log2_num_o_cqes) * CQE_SZ ; + /* ul_res_p->uar_index ==> already set above */ + return HH_OK; + + failed_mutex: + failed_qpm: + failed_uar: + cqe_buf_free(&(new_cq->cur_buf)); + failed_cqe_buf: + FREE(new_cq); + return rc; +} + + +HH_ret_t THHUL_cqm_create_cq_done( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_cq_hndl_t hhul_cq, + /*IN*/ HH_cq_hndl_t hh_cq, + /*IN*/ void/*THH_cq_ul_resources_t*/ *cq_ul_resources_p +) +{ + THHUL_cq_t *cq= (THHUL_cq_t*)hhul_cq; + + if (cq == NULL) { + MTL_ERROR1("THHUL_cqm_create_cq_done: NULL CQ handle.\n"); + return HH_EINVAL; + } + if (hh_cq > MAX_CQ_NUM) { + MTL_ERROR1("THHUL_cqm_create_cq_done: Invalid CQ number (0x%X).\n",hh_cq); + return HH_EINVAL; + } + + MOSAL_spinlock_dpc_lock(&(cq->cq_lock)); + if (cq->cq_state != THHUL_CQ_PREP) { + MOSAL_spinlock_unlock(&(cq->cq_lock)); + MTL_ERROR1("THHUL_cqm_create_cq_done: Library inconsistancy ! Given CQ is not in THHUL_CQ_PREP state.\n"); + return HH_ERR; + } + cq->cq_state= THHUL_CQ_IDLE; + cq->cq_num= hh_cq; + MOSAL_spinlock_unlock(&(cq->cq_lock)); + + return HH_OK; +} + + +HH_ret_t THHUL_cqm_destroy_cq_done( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq +) +{ + THHUL_cq_t *thhul_cq= (THHUL_cq_t*)cq; + THHUL_cq_t *prev_cq,*cur_cq; + THHUL_cqm_t cqm; + HH_ret_t rc; + + rc= THHUL_hob_get_cqm(hca_hndl,&cqm); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_cqm_destroy_cq_done: Invalid HCA handle.\n"); + return HH_EINVAL; + } + if (cq == NULL) { + MTL_ERROR1("THHUL_cqm_destroy_cq_done: NULL CQ handle.\n"); + return HH_EINVAL; + } + + if (thhul_cq->cq_state == THHUL_CQ_RESIZE_PREP) { + /* Someone invoked VAPI_destroy_cq while invoking VAPI_resize_cq */ + MTL_ERROR2(MT_FLFMT("%s: Invoked while in THHUL_CQ_RESIZE_PREP (cqn=0x%X) !"),__func__, + thhul_cq->cq_num); + return HH_EBUSY; + } + /* Remove from CQs list */ + MOSAL_mutex_acq_ui(&(cqm->cqm_lock)); + for (cur_cq= cqm->cq_list, prev_cq= NULL; cur_cq != NULL; + prev_cq= cur_cq, cur_cq= cur_cq->next) { + if (cur_cq == cq) break; + } + if (cur_cq == NULL) { /* CQ not found */ + MOSAL_mutex_rel(&(cqm->cqm_lock)); + MTL_ERROR1("THHUL_cqm_destroy_cq_done: invalid CQ handle (not found).\n"); + return HH_EINVAL; + } + if (prev_cq == NULL) { /* First in list */ + cqm->cq_list= thhul_cq->next; /* Make next be the first */ + } else { + prev_cq->next= thhul_cq->next; /* Link previous to next */ + } + MOSAL_mutex_rel(&(cqm->cqm_lock)); + + /* Cleanup CQ resources */ + cqe_buf_free(&(thhul_cq->cur_buf)); + FREE(thhul_cq); + return HH_OK; +} + + +ib_api_status_t THHUL_cqm_resize_cq_prep( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ VAPI_cqe_num_t num_o_cqes, + /*OUT*/ VAPI_cqe_num_t *num_o_cqes_p, + /*OUT*/ void/*THH_cq_ul_resources_t*/ *cq_ul_resources_p +) +{ + THHUL_cq_t *cq_p= (THHUL_cq_t*)cq; + THH_cq_ul_resources_t *ul_res_p= (THH_cq_ul_resources_t*)cq_ul_resources_p; + HH_ret_t rc; + THH_hca_ul_resources_t hca_ul_res; + THHUL_cqe_buf_t new_buf; + + if (cq_p == NULL) { + MTL_ERROR1("%s: NULL CQ handle.\n",__func__); + return IB_INVALID_CQ_HANDLE; + } + + rc= THHUL_hob_get_hca_ul_res(hca_hndl,&hca_ul_res); + if (rc != HH_OK) { + MTL_ERROR2("THHUL_cqm_create_cq_prep: Failed THHUL_hob_get_hca_ul_res (err=%d).\n",rc); + return IB_ERROR; + } + + if (num_o_cqes > hca_ul_res.max_num_ent_cq) { + MTL_ERROR2("THHUL_cqm_create_cq_prep: cq_num_of_entries requested exceeds hca cap\n"); + return IB_INVALID_CQ_SIZE; + } + + rc= cqe_buf_alloc(&new_buf,num_o_cqes); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Allocating buffer for resized CQ 0x%X has failed"),__func__, + cq_p->cq_num); + return IB_INSUFFICIENT_MEMORY; + } + + MOSAL_spinlock_dpc_lock(&(cq_p->cq_lock)); + if (cq_p->cq_state != THHUL_CQ_IDLE) { + MTL_ERROR1("%s: CQ is not in IDLE state (current state=%d=%s)\n", __func__, + cq_p->cq_state,cq_state_str(cq_p->cq_state)); + MOSAL_spinlock_unlock(&(cq_p->cq_lock)); + cqe_buf_free(&new_buf); + return IB_INVALID_STATE; + } + memcpy(&cq_p->resized_buf, &new_buf, sizeof(THHUL_cqe_buf_t)); + + /* Update CQ to real number of outstanding CQEs */ + /* (avoid failure in case of reduction in CQ size which matches real num. of outstanding CQEs)*/ + sync_consumer_index(cq_p, __func__); + /* start working with the limitations of the new CQEs buffer + * (after resizing we don't want to be in "overdraft") */ + if (cq_p->cur_buf.spare_cqes > cq_p->resized_buf.spare_cqes) { + cq_p->cur_spare_cqes = cq_p->resized_buf.spare_cqes; + } + + cq_p->cq_state= THHUL_CQ_RESIZE_PREP; + MOSAL_spinlock_unlock(&(cq_p->cq_lock)); + + memset(ul_res_p,0,sizeof(THH_cq_ul_resources_t)); + ul_res_p->cqe_buf= cq_p->resized_buf.cqe_buf_base; /* Registration is required only from here */ + ul_res_p->cqe_buf_sz= (1U<resized_buf.log2_num_o_cqes) * CQE_SZ ; + *num_o_cqes_p= (1U<resized_buf.log2_num_o_cqes) - 1 - cq_p->resized_buf.spare_cqes; + + return IB_SUCCESS; +} + + +HH_ret_t THHUL_cqm_resize_cq_done( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ void/*THH_cq_ul_resources_t*/ *cq_ul_resources_p +) +{ + THHUL_cq_t *cq_p= (THHUL_cq_t*)cq; + THH_cq_ul_resources_t *ul_res_p= (THH_cq_ul_resources_t*)cq_ul_resources_p; + THHUL_cqe_buf_t rm_buf; + + if (cq_p == NULL) { + MTL_ERROR1("THHUL_cqm_resize_cq_done: NULL CQ handle.\n"); + return HH_EINVAL; + } + + MOSAL_spinlock_dpc_lock(&(cq_p->cq_lock)); + + if (cq_p->cq_state == THHUL_CQ_RESIZE_PREP) { + /* Still polling in old buffer */ + if (!ul_res_p) { /* HH_resize_cq failed - clean up allocated buffer */ + /* save buffer info to perform cleanup outside of spinlock */ + memcpy(&rm_buf,&(cq_p->resized_buf),sizeof(THHUL_cqe_buf_t)); + /* restore original "spare_cqes" of cur_buf (may have been reduced in resize_cq_prep) */ + cq_p->cur_spare_cqes= cq_p->cur_buf.spare_cqes; + + } else { /* Activate resized buffer */ + cq_p->resized_buf.consumer_index= ul_res_p->new_producer_index; /* for old resize flow */ + /* copy CQEs from old buffer to resized buffer */ + cqe_buf_cpy2resized(&(cq_p->cur_buf),&(cq_p->resized_buf), cq_p->cq_resize_fixed); + memcpy(&rm_buf,&(cq_p->cur_buf),sizeof(THHUL_cqe_buf_t)); /* save old buffer */ + /* Make resize buffer current. New consumer index is already updated. */ + memcpy(&(cq_p->cur_buf),&(cq_p->resized_buf),sizeof(THHUL_cqe_buf_t)); + cq_p->cur_spare_cqes= cq_p->cur_buf.spare_cqes; /* work with the spare CQEs of new buffer */ + } + + } else if (cq_p->cq_state == THHUL_CQ_RESIZE_DONE) { + /* Transition to resized buffer already done. No CQEs to copy. Just free old buffer */ + /* (transition done in cq_transition_to_resized_buf) */ + if (!ul_res_p) { + /* Sanity check - RESIZE_CQ command is not suppose to fail if new buffer was activated */ + MTL_ERROR1(MT_FLFMT("%s: Inconsistancy ! Got failure in RESIZE_CQ" + " after finding CQEs in the new buffer (cqn=0x%X)"), + __func__, cq_p->cq_num); + } + memcpy(&rm_buf,&(cq_p->resized_buf),sizeof(THHUL_cqe_buf_t)); /* save old buffer */ + + } else { /* Invalid CQ state for this function call */ + MOSAL_spinlock_unlock(&(cq_p->cq_lock)); + MTL_ERROR1("THHUL_cqm_resize_cq_done: Given CQ is not in THHUL_CQ_RESIZE_PREP/DONE state.\n"); + return HH_ERR; + } + + /* Good flow finalization */ + cq_p->cq_state= THHUL_CQ_IDLE; /* new resize may be initiated */ + MOSAL_spinlock_unlock(&(cq_p->cq_lock)); + cqe_buf_free(&rm_buf); /* Free old CQEs buffer (must be done outside spinlock section) */ + return HH_OK; +} + + +HH_ret_t THHUL_cqm_cq_cleanup( + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ IB_wqpn_t qp, + /*IN*/ THHUL_srqm_t srqm, + /*IN*/ HHUL_srq_hndl_t srq +) +{ + THHUL_cq_t *thhul_cq_p= (THHUL_cq_t*)cq; + u_int32_t removed_cqes= 0; + u_int32_t cur_buf_pi; /* Current buffer producer index */ + HH_ret_t rc= HH_OK; + + MTL_DEBUG2("THHUL_cqm_cq_cleanup(cq_p=%p,qp=%d) {\n",thhul_cq_p,qp); + if (thhul_cq_p == NULL) { + MTL_ERROR1("THHUL_cqm_cq_cleanup: NULL CQ handle.\n"); + return HH_EINVAL; + } + + MOSAL_spinlock_dpc_lock(&(thhul_cq_p->cq_lock)); + + removed_cqes+= cqe_buf_cleanup(&(thhul_cq_p->cur_buf), qp, srqm, srq, &cur_buf_pi ); + /* In case we are resizing the CQ, new buffer may already include CQEs of that QP */ + if (thhul_cq_p->cq_state == THHUL_CQ_RESIZE_PREP) { + /* Update resized's CI based on new resize-cq flow (FM issue #16969). + * (This flow will fail for old flow anyway - so no need to distinguish between cases) + */ + thhul_cq_p->resized_buf.consumer_index= + cur_buf_pi & MASK32(thhul_cq_p->resized_buf.log2_num_o_cqes); + removed_cqes+= cqe_buf_cleanup(&(thhul_cq_p->resized_buf), qp, srqm, srq, &cur_buf_pi); + } + +#ifndef NO_CQ_CI_DBELL + /* Ring CQ-cmd doorbell to update consumer index (Removed CQEs + pending CQ doorbells) */ + if (removed_cqes) { + rc= THH_uar_cq_cmd(thhul_cq_p->uar,TAVOR_IF_UAR_CQ_INC_CI,thhul_cq_p->cq_num, + thhul_cq_p->pending_cq_dbell + removed_cqes - 1); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THH_uar_cq_cmd (%s)"), __func__, HH_strerror_sym(rc)); + /* Even though, this is not a show stopper. Let's continue until we get CQ error. */ +/*** warning C4242: '+=' : conversion from 'u_int32_t' to 'u_int16_t', possible loss of data ***/ + thhul_cq_p->pending_cq_dbell += (u_int16_t)removed_cqes; /* Maybe we will get luckier next time */ + } else { + thhul_cq_p->pending_cq_dbell= 0; + } + } +#endif + + MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); + + return rc; +} + +#ifdef THHUL_CQM_DEBUG_WQE_REUSE +void THHUL_cqm_dump_cq( + /*IN*/ HHUL_cq_hndl_t cq +) +{ + THHUL_cq_t *thhul_cq_p= (THHUL_cq_t*)cq; + volatile u_int32_t *cur_cqe; + int cqe_index; + static HHUL_cq_hndl_t last_cq= NULL; + static u_int32_t last_consumer_index= 0xFFFFFFFF; + + /* Do not dump again for the same CQ/consumer-index */ + if ((cq == last_cq) && (last_consumer_index == thhul_cq_p->cur_buf.consumer_index)) return; + last_cq= cq; last_consumer_index= thhul_cq_p->cur_buf.consumer_index; + + MTL_ERROR4("THHUL_cqm_dump_cq: cq=%d consumer_index=%d\n", + thhul_cq_p->cq_num,last_consumer_index); + for (cqe_index= 0; cqe_index < (1 << thhul_cq_p->cur_buf.log2_num_o_cqes); cqe_index++) { + cur_cqe= (volatile u_int32_t *) + (thhul_cq_p->cur_buf.cqe_buf_base + (cqe_index << LOG2_CQE_SZ)); + DUMP_CQE(thhul_cq_p->cq_num,cqe_index,cur_cqe); + } + +} +#endif + +/* Check if can transition to new (resized) CQEs buffer and make cur_buf<--resized_buf + * - Valid for fixed resize cq FW version + * - Return TRUE if transition and cur_cqe is updated to new location in resized buf. + * - Must be invoked with CQ lock locked and only when CQE at current CI is invalid + */ +static MT_bool cq_transition_to_resized_buf( + THHUL_cq_t *cq_p, + volatile u_int32_t **cur_cqe_p +) +{ + THHUL_cqe_buf_t rm_buf; + + if ((cq_p->cq_state == THHUL_CQ_RESIZE_PREP) && + (cq_p->cq_resize_fixed)) { /* Peek into resized buffer */ + cq_p->resized_buf.consumer_index= /* Expected new CI */ + cq_p->cur_buf.consumer_index & MASK32(cq_p->resized_buf.log2_num_o_cqes); + *cur_cqe_p= (volatile u_int32_t *) + (cq_p->resized_buf.cqe_buf_base + + (cq_p->resized_buf.consumer_index << LOG2_CQE_SZ)); + if (!is_cqe_hw_own(*cur_cqe_p)) { /* Found CQE in new (resized) buffer */ + MTL_DEBUG4(MT_FLFMT("%s: transition to resized (cqn=0x%x old_pi=%u new_pi=%u cur_cqe=%p)"), + __func__, + cq_p->cq_num, cq_p->cur_buf.consumer_index, cq_p->resized_buf.consumer_index, + *cur_cqe_p); + /* Transition to resized buffer */ + memcpy(&rm_buf,&(cq_p->cur_buf),sizeof(THHUL_cqe_buf_t)); /* save old buffer */ + /* Make resize buffer current. New consumer index is already updated. */ + memcpy(&(cq_p->cur_buf),&(cq_p->resized_buf),sizeof(THHUL_cqe_buf_t)); + cq_p->cur_spare_cqes= cq_p->cur_buf.spare_cqes; /* work with the spare CQEs of new buffer */ + /* Save old buffer to be freed in "resize_done" (no need to hurry...) */ + memcpy(&(cq_p->resized_buf),&rm_buf,sizeof(THHUL_cqe_buf_t)); + cq_p->cq_state= THHUL_CQ_RESIZE_DONE; /* THHUL_cqm_resize_cq_done is still expected */ + return TRUE; + } + } + return FALSE; +} + +#define CQE_IMMEDIATE_DWORD_OFFSET \ + MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,immediate_ethertype_pkey_indx_eecredits)>>2 +#define CQE_BYTE_CNT_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,byte_cnt)>>2 + + +#define CQE_WQE_ADDR_BYTE_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,wqe_adr)>>2 +#define CQE_WQE_ADDR_BIT_MASK (~MASK32(CQE_WQE_ADR_BIT_SZ)) + +#define CQE_MY_QPN_BYTE_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,my_qpn)>>2 +#define CQE_MY_QPN_BYTE_BIT_MASK MASK32(24) + +#define CQE_OPCODE_BIT_MASK MASK32(MT_BIT_SIZE(tavorprm_completion_queue_entry_st,opcode)) +#define CQE_OPCODE_DWORD_OFFSET MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,opcode)>>2 +#define CQE_OPCODE_SHIFT (MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,opcode) & MASK32(5)) + +#ifdef WIN32 +/* Successful completion */ +HH_ret_t +THHUL_cqm_comp_wc( + IN THHUL_cq_t* const p_thhul_cq, + IN const IB_wqpn_t qpn, + IN const uint32_t wqe_addr_32lsb, + IN volatile uint32_t* const cqe, + IN const uint8_t opcode, + IN const boolean_t send_cqe, + OUT ib_wc_t* const p_wc ) +{ + boolean_t imm_valid; + VAPI_special_qp_t qp_type; + VAPI_ts_type_t qp_ts_type; + u_int32_t i; + HH_ret_t rc = HH_OK; + u_int32_t free_res_count; + + rc = THHUL_qpm_comp_ok( p_thhul_cq->qpm, qpn, wqe_addr_32lsb, + &qp_type, &qp_ts_type, &(p_wc->wr_id), &free_res_count, NULL ); + if( MOSAL_EXPECT_FALSE( rc != HH_OK ) ) + { + MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP.\n"); + for( i= 0; i < (CQE_SZ>>2); i++ ) + { + MTL_ERROR1(MT_FLFMT("CQ[0x%X][%u][%u]=0x%X"),p_thhul_cq->cq_num, + (p_thhul_cq->cur_buf.consumer_index - 1) & MASK32(p_thhul_cq->cur_buf.log2_num_o_cqes), + i, cl_ntoh32(cqe[i])); + } + return rc; + } + + rc = decode_opcode2( send_cqe, opcode, &p_wc->wc_type, &imm_valid ); + if( MOSAL_EXPECT_FALSE( rc != HH_OK) ) + { + MTL_ERROR4(MT_FLFMT("Invalid %s Opcode=0x%X"), + send_cqe ? "send" : "receive", opcode ); + return rc; + } + p_wc->length = cl_ntoh32(cqe[CQE_BYTE_CNT_DWORD_OFFSET]); + + /* Need the op-code before copying the immediate data. */ + if( !send_cqe ) + { + + switch( qp_ts_type ) + { + case VAPI_TS_UD: + p_wc->recv.ud.recv_opt = 0; + + p_wc->recv.ud.remote_qp = + cqe[CQE_RQPN_DWORD_OFFSET] & + CL_HTON32(CQE_RQPN_BIT_MASK); + + p_wc->recv.ud.path_bits = + ((uint8_t*)cqe)[0xD]; + /* The path bits now also have the GRH valid flag. */ + if( p_wc->recv.ud.path_bits & 0x80 ) + { + p_wc->recv.ud.recv_opt |= IB_RECV_OPT_GRH_VALID; + p_wc->recv.ud.path_bits &= 0x7F; + } + p_wc->recv.ud.remote_lid = + ((net16_t*)cqe)[0x7]; + if( p_wc->recv.ud.remote_lid == IB_LID_PERMISSIVE || + cl_ntoh16(p_wc->recv.ud.remote_lid) >= IB_LID_MCAST_START_HO ) + { + p_wc->recv.ud.path_bits = 0; + } + + p_wc->recv.ud.remote_sl = + (((uint8_t*)cqe)[0xC] >> 4); + + if( qp_type == VAPI_GSI_QP ) + { + p_wc->recv.ud.pkey_index = + (uint16_t)(cl_ntoh32(cqe[CQE_IMMEDIATE_DWORD_OFFSET]) >> 16); + } + else if( imm_valid ) + { + p_wc->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE; + p_wc->recv.ud.immediate_data = cqe[CQE_IMMEDIATE_DWORD_OFFSET]; + } + break; + + case VAPI_TS_RC: + case VAPI_TS_UC: + if( imm_valid ) + { + p_wc->recv.conn.recv_opt |= IB_RECV_OPT_IMMEDIATE; + p_wc->recv.ud.immediate_data = cqe[CQE_IMMEDIATE_DWORD_OFFSET]; + } + else + { + p_wc->recv.conn.recv_opt = 0; + } + break; + + default: + break; + } + } + + p_wc->status = IB_WCS_SUCCESS; + + MTPERF_TIME_START(free_cqe); + /* + * Pass ownership to HW, but delay ringing the doorbell until + * we're done polling + */ + set_cqe_to_hw_own(cqe); + p_thhul_cq->cur_buf.consumer_index = + (p_thhul_cq->cur_buf.consumer_index + 1) & + MASK32(p_thhul_cq->cur_buf.log2_num_o_cqes); + MTPERF_TIME_END(free_cqe); + + return HH_OK; +} + + +/* Completion with error */ +HH_ret_t +THHUL_cqm_failed_wc( + IN THHUL_cq_t* const p_thhul_cq, + IN const IB_wqpn_t qpn, + IN const uint32_t wqe_addr_32lsb, + IN volatile uint32_t* const cqe, + IN const uint8_t opcode, + OUT ib_wc_t* const p_wc ) +{ + u_int32_t next_wqe_addr_32lsb; + u_int8_t dbd_bit; + u_int32_t i,dbd_cnt; + HH_ret_t rc = HH_OK; + u_int32_t free_res_count; + /* The CQE copy is required to hold in CPU endianess. */ + u_int32_t cqe_cpy[CQE_SZ>>2]; /* CQE copy */ + + + /* Make CQE copy in correct endianess */ + for (i= 0; i < (CQE_SZ>>2); i++) + cqe_cpy[i]= MOSAL_be32_to_cpu(cqe[i]); + + MTL_DEBUG4("THHUL_cqm_poll4cqe: completion with error: cq=%d consumer_index=%d\n", + p_thhul_cq->cq_num, p_thhul_cq->cur_buf.consumer_index); + DUMP_CQE(p_thhul_cq->cq_num, p_thhul_cq->cur_buf.consumer_index, cqe); + rc= THHUL_qpm_comp_err(p_thhul_cq->qpm, qpn, wqe_addr_32lsb, + &p_wc->wr_id,&free_res_count,&next_wqe_addr_32lsb,&dbd_bit); + if( rc != HH_OK ) + { + MTL_ERROR1("THHUL_cqm_poll4cqe: Failed updating associated QP.\n"); + return rc; + } + p_wc->status= decode_error_syndrome2((tavor_if_comp_status_t)MT_EXTRACT_ARRAY32(cqe_cpy, + CQE_ERROR_SYNDROM_BIT_OFFSET, CQE_ERROR_SYNDROM_BIT_SIZE) ); + p_wc->vendor_specific = MT_EXTRACT_ARRAY32(cqe_cpy, + CQE_ERROR_VENDOR_SYNDROM_BIT_OFFSET, CQE_ERROR_VENDOR_SYNDROM_BIT_SIZE); + dbd_cnt= MT_EXTRACT_ARRAY32(cqe_cpy,CQE_ERROR_DBDCNT_BIT_OFFSET, CQE_ERROR_DBDCNT_BIT_SIZE); + if ((next_wqe_addr_32lsb == THHUL_QPM_END_OF_WQE_CHAIN) || /* End of WQE chain */ + ((dbd_cnt + 1 - dbd_bit) == 0) ) /* or dbd counter reached 0 */ + { + MTPERF_TIME_START(free_cqe); + free_cqe( p_thhul_cq, cqe ); /* Free original CQE and update consumer index */ + MTPERF_TIME_END(free_cqe); + } + else + { + recycle_cqe( cqe, next_wqe_addr_32lsb, dbd_cnt - dbd_bit ); + } + /* + * Only WQE-ID, free_res_count and status are required for completion with error. + * No other CQE fields are extracted (see IB-spec. 11.4.2.1). + * Even though, for the sake of some legacy code: + * ...putting an opcode to distinguish completion of SQ from RQ + */ + if( opcode == CQE_ERROR_ON_SQ ) + { + p_wc->wc_type = IB_WC_SEND; + } + else + { + /* receive queue completion */ + p_wc->wc_type = IB_WC_RECV; + } + return HH_OK; +} + + +ib_api_status_t +THHUL_cqm_poll4wc( + IN HHUL_hca_hndl_t hca_hndl, + IN HHUL_cq_hndl_t cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ) +{ + THHUL_cq_t *thhul_cq_p= (THHUL_cq_t*)cq; + volatile u_int32_t *cur_cqe; + u_int32_t wqe_addr_32lsb; + IB_wqpn_t qpn; + uint8_t opcode; + uint8_t send_cqe; + HH_ret_t rc = HH_OK; + ib_wc_t *p_wc, **pp_next; + u_int32_t wc_cnt = 0; + + if (MOSAL_EXPECT_FALSE(thhul_cq_p == NULL)) + { + MTL_ERROR1("THHUL_cqm_poll4cqe: NULL CQ handle.\n"); + return IB_INVALID_CQ_HANDLE; + } + + CL_ASSERT( pp_free_wclist ); + CL_ASSERT( *pp_free_wclist ); + CL_ASSERT( pp_done_wclist ); + + MOSAL_spinlock_dpc_lock(&(thhul_cq_p->cq_lock)); + + /* Check if CQE at consumer index is valid */ + cur_cqe= (volatile u_int32_t*) + (thhul_cq_p->cur_buf.cqe_buf_base + + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); + pp_next = pp_done_wclist; + p_wc = *pp_free_wclist; + while( p_wc ) + { + if( is_cqe_hw_own(cur_cqe) && /* CQE is still in HW ownership */ + !cq_transition_to_resized_buf( thhul_cq_p, &cur_cqe ) ) + { + break; + } + + /* Extract QP/WQE context fields from the CQE */ + /* Byte 6 */ + wqe_addr_32lsb= MOSAL_be32_to_cpu(cur_cqe[CQE_WQE_ADDR_BYTE_OFFSET]) & + CQE_WQE_ADDR_BIT_MASK; + + /* Byte 0*/ + qpn = MOSAL_be32_to_cpu(cur_cqe[CQE_MY_QPN_BYTE_OFFSET]) & CQE_MY_QPN_BYTE_BIT_MASK; + + /* new CQE: completion status is taken from "opcode" field */ + opcode = ((volatile uint8_t*)cur_cqe)[CQE_OPCODE_BYTE_OFFSET]; + send_cqe = + ((volatile uint8_t*)cur_cqe)[CQE_S_BYTE_OFFSET] & CQE_S_BYTE_MASK; + if( MOSAL_EXPECT_TRUE((opcode & CQE_ERROR_STATUS_MASK) != CQE_ERROR_STATUS_MASK) ) + { + /* Completed OK */ + rc = THHUL_cqm_comp_wc( thhul_cq_p, qpn, wqe_addr_32lsb, cur_cqe, + opcode, send_cqe, p_wc ); + if( MOSAL_EXPECT_FALSE( rc != HH_OK ) ) + break; + wc_cnt++; + } + else + { + /* Completion with error */ + rc = THHUL_cqm_failed_wc( thhul_cq_p, qpn, wqe_addr_32lsb, cur_cqe, + opcode, p_wc ); + if( MOSAL_EXPECT_FALSE( rc != HH_OK ) ) + break; + } + + *pp_next = p_wc; + pp_next = &p_wc->p_next; + p_wc = p_wc->p_next; + cur_cqe= (volatile u_int32_t*) + (thhul_cq_p->cur_buf.cqe_buf_base + + (thhul_cq_p->cur_buf.consumer_index << LOG2_CQE_SZ)); + } + + if( wc_cnt ) + { + /* Ring the doorbell for all successful WCs. */ + dbell_cqe( thhul_cq_p, wc_cnt ); + } + + MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); + + /* Set the head of the free list. */ + *pp_free_wclist = p_wc; + /* Clear the tail of the done list. */ + *pp_next = NULL; + + if( rc != HH_OK ) + return IB_ERROR; + else if( *pp_done_wclist ) + return IB_SUCCESS; + else + return IB_NOT_FOUND; +} + + +ib_api_status_t +THHUL_cqm_count_cqe( + IN HHUL_hca_hndl_t hca_hndl, + IN HHUL_cq_hndl_t cq, + OUT uint32_t* const p_n_cqes ) +{ + THHUL_cq_t *cq_p= (THHUL_cq_t*)cq; + VAPI_cqe_num_t cqe_num; + + /* parameters checks */ + if (cq_p == NULL) { + MTL_ERROR1("THHUL_cqm_count_cqe: NULL CQ handle.\n"); + return IB_INVALID_CQ_HANDLE; + } + + if( !p_n_cqes ) + return IB_INVALID_PARAMETER; + + /* Find CQE and check ownership */ + MOSAL_spinlock_dpc_lock(&(cq_p->cq_lock)); + /* The following check must be done with CQ-lock locked, since resize may change cur_buf + on the same time */ + cqe_num = ((1U << cq_p->cur_buf.log2_num_o_cqes) - cq_p->cur_buf.spare_cqes - 1); + *p_n_cqes = count_cqes(cq_p,cqe_num,NULL); + + MOSAL_spinlock_unlock(&(cq_p->cq_lock)); + + return IB_SUCCESS; +} +#endif + + +HH_ret_t THHUL_cqm_peek_cq( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ VAPI_cqe_num_t cqe_num +) +{ + THHUL_cq_t *cq_p= (THHUL_cq_t*)cq; + volatile u_int32_t *cur_cqe; + HH_ret_t ret; + + /* parameters checks */ + if (cq_p == NULL) { + MTL_ERROR1("THHUL_cqm_peek_cq: NULL CQ handle.\n"); + return HH_EINVAL_CQ_HNDL; + } + + /* Find CQE and check ownership */ + MOSAL_spinlock_dpc_lock(&(cq_p->cq_lock)); + /* The following check must be done with CQ-lock locked, since resize may change cur_buf + on the same time */ + if ((cqe_num >= ((1U << cq_p->cur_buf.log2_num_o_cqes) - cq_p->cur_buf.spare_cqes)) || (cqe_num == 0)) { + /* reminder: 1 CQE is always reserved */ + MTL_ERROR2("THHUL_cqm_peek_cq(cqn=0x%X): cqe_num=%u , max_num_o_cqes=%u .\n", + cq_p->cq_num,cqe_num, + ((1U << cq_p->cur_buf.log2_num_o_cqes) - cq_p->cur_buf.spare_cqes - 1)); + ret= HH_E2BIG_CQE_NUM; + } else { + cur_cqe= (volatile u_int32_t *) + (cq_p->cur_buf.cqe_buf_base + + (((cq_p->cur_buf.consumer_index + cqe_num - 1) & MASK32(cq_p->cur_buf.log2_num_o_cqes)) + << LOG2_CQE_SZ)); + ret= ( (!is_cqe_hw_own(cur_cqe)) || (count_cqes(cq_p,cqe_num,NULL) >= cqe_num)) ? HH_OK : HH_CQ_EMPTY ; + } + + MOSAL_spinlock_unlock(&(cq_p->cq_lock)); + + return ret; +} + + +HH_ret_t THHUL_cqm_query_cq( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*OUT*/ VAPI_cqe_num_t *num_o_cqes_p) +{ + THHUL_cq_t *cq_p= (THHUL_cq_t*)cq; + HH_ret_t ret=HH_OK; + + /* parameters checks */ + if (cq_p == NULL) { + MTL_ERROR1("THHUL_cqm_query_cq: NULL CQ handle.\n"); + return HH_EINVAL_CQ_HNDL; + } + + /* Find CQE and check ownership */ + MOSAL_spinlock_dpc_lock(&(cq_p->cq_lock)); + *num_o_cqes_p= ((1U << cq_p->cur_buf.log2_num_o_cqes) -1 - cq_p->cur_buf.spare_cqes) ; + + MOSAL_spinlock_unlock(&(cq_p->cq_lock)); + + return ret; +} + +static void rearm_cq(THHUL_cq_t *cq_p, MT_bool solicitedNotification) { + volatile u_int32_t chimeWords[2]; + THH_uar_t uar = cq_p->uar; + +#ifndef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); +#endif + +#if THHUL_CQM_COALESCE_CQ_DOORBELLS + if (cq_p->coalesce_count) { + cq_p->coalesce_count = 0; + chimeWords[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)cq_p->cq_num + | (TAVOR_IF_UAR_CQ_SET_CI << 24) + ); + + chimeWords[1] = MOSAL_cpu_to_be32(cq_p->cur_buf.consumer_index); + MOSAL_MMAP_IO_WRITE_QWORD(uar->uar_base + UAR_CQ_DBELL_OFFSET, *(volatile u_int64_t*)chimeWords); + } +#endif + + chimeWords[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)cq_p->cq_num + | ((solicitedNotification ? TAVOR_IF_UAR_CQ_NOTIF_SOLIC_COMP: TAVOR_IF_UAR_CQ_NOTIF_NEXT_COMP) << 24) + ); + + chimeWords[1] = 0xffffffff; + MOSAL_MMAP_IO_WRITE_QWORD(uar->uar_base + UAR_CQ_DBELL_OFFSET, *(volatile u_int64_t*)chimeWords); + +#ifndef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ + MOSAL_spinlock_unlock(&(uar->uar_lock)); +#endif + +} + + +HH_ret_t THHUL_cqm_req_comp_notif( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ VAPI_cq_notif_type_t notif_type +) +{ + THHUL_cq_t *thhul_cq_p= (THHUL_cq_t*)cq; + u_int32_t last_consumer_index; + HH_ret_t rc; + + if (thhul_cq_p == NULL) { + MTL_ERROR1("THHUL_cqm_req_comp_notif: NULL CQ handle.\n"); + return HH_EINVAL_CQ_HNDL; + } + + MOSAL_spinlock_dpc_lock(&(thhul_cq_p->cq_lock)); +#ifdef NO_CQ_CI_DBELL + /* In "overrun ignore" mode, last consumed must be given */ + last_consumer_index= + ((thhul_cq_p->cur_buf.consumer_index - 1) & MASK32(thhul_cq_p->cur_buf.log2_num_o_cqes)); +#else + /* Consumer index is updated on every poll, so InfiniHost has its updated value */ + last_consumer_index= 0xFFFFFFFF ; /* Use current CI value */ +#endif + sync_consumer_index(thhul_cq_p, __func__);/*Consumer index must be updated before req. an event*/ + MOSAL_spinlock_unlock(&(thhul_cq_p->cq_lock)); + + switch (notif_type) { + case VAPI_SOLIC_COMP: + rc= THH_uar_cq_cmd(thhul_cq_p->uar,TAVOR_IF_UAR_CQ_NOTIF_SOLIC_COMP, + thhul_cq_p->cq_num,last_consumer_index); + break; + case VAPI_NEXT_COMP: + rc= THH_uar_cq_cmd(thhul_cq_p->uar,TAVOR_IF_UAR_CQ_NOTIF_NEXT_COMP, + thhul_cq_p->cq_num,last_consumer_index); + break; + default: + rc= HH_EINVAL; /* Invalid notification request */ + } + + return rc; +} + +HH_ret_t THHUL_cqm_req_ncomp_notif( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ VAPI_cqe_num_t cqe_num +) +{ + THHUL_cq_t *cq_p= (THHUL_cq_t*)cq; + VAPI_cqe_num_t hw_cqe_cnt,sw_cqe_cnt; + HH_ret_t rc; + + if (cq_p == NULL) { + MTL_ERROR1("THHUL_cqm_req_ncomp_notif: NULL CQ handle.\n"); + return HH_EINVAL; + } + + /* cqe_num fix (due to CQEs with error... external CQEs) : + * Check if cqe_num requirement was already fulfilled with external/sw CQEs + * If yes: generate immediate event by setting cqe_num to 1 + * Otherwise: cqe_num is set to the HW number of CQEs based on current difference from sw_cnt + */ + MOSAL_spinlock_dpc_lock(&(cq_p->cq_lock)); + /* Check cqe_num limits (must be done with lock held to avoid a race with "resize") */ + if ((cqe_num >= ((1U << cq_p->cur_buf.log2_num_o_cqes) - cq_p->cur_buf.spare_cqes)) || + (cqe_num == 0) || + (cqe_num > MAX_NCOMP_NOTIF)) { + /* reminder: 1 CQE is always reserved */ + MTL_ERROR2("THHUL_cqm_req_ncomp_notif(cqn=%d): cqe_num=%d , max_num_o_cqes=%d .\n", + cq_p->cq_num,cqe_num, + ((1U << cq_p->cur_buf.log2_num_o_cqes) - cq_p->cur_buf.spare_cqes - 1)); + MOSAL_spinlock_unlock(&(cq_p->cq_lock)); + return HH_E2BIG_CQE_NUM; + } + sw_cqe_cnt= count_cqes(cq_p,cqe_num,&hw_cqe_cnt); + cqe_num= (sw_cqe_cnt >= cqe_num) ? 1 : hw_cqe_cnt + (cqe_num - sw_cqe_cnt) ; + + sync_consumer_index(cq_p, __func__);/*Consumer index must be updated before req. an event*/ + + rc= THH_uar_cq_cmd(cq_p->uar,TAVOR_IF_UAR_CQ_NOTIF_NCOMP,cq_p->cq_num,cqe_num); + MOSAL_spinlock_unlock(&(cq_p->cq_lock)); + + return rc; +} + +/********************************************************************************************** + * Private Functions + **********************************************************************************************/ + + +static HH_ret_t cqe_buf_alloc(THHUL_cqe_buf_t *cqe_buf, VAPI_cqe_num_t num_o_cqes) +{ + u_int32_t i; + volatile u_int8_t* cur_cqe_owner_byte; + VAPI_cqe_num_t actual_num_o_cqes; /* Number of CQEs in the CQEs buffer */ + VAPI_cqe_num_t possible_spare_cqes; + + cqe_buf->log2_num_o_cqes= floor_log2(num_o_cqes) + 1; /* next power of 2 including extra CQE */ + actual_num_o_cqes= (1 << cqe_buf->log2_num_o_cqes); +#if defined(MT_KERNEL) && defined(__LINUX__) + if (((actual_num_o_cqes + 1) * CQE_SZ) <= CQ_KMALLOC_LIMIT) + cqe_buf->cqe_buf_orig= + MOSAL_pci_phys_alloc_consistent((actual_num_o_cqes + 1) * CQE_SZ, LOG2_CQE_SZ); /* one extra for alignment */ + else +#endif +#if !defined(MT_KERNEL) && defined(MT_FORK_SUPPORT) +/* Fork workaroud - cover full pages */ + cqe_buf->cqe_buf_orig= + MOSAL_pci_virt_alloc_consistent( + (MOSAL_SYS_PAGE_SIZE-1)/*for page alignement*/ + + MT_UP_ALIGNX_ULONG_PTR(actual_num_o_cqes * CQE_SZ, MOSAL_SYS_PAGE_SHIFT), + LOG2_CQE_SZ); +#else + cqe_buf->cqe_buf_orig= +#ifdef WIN32 + /* Use pageable memory, since it gets registered. */ + cl_pzalloc( (actual_num_o_cqes + 1) * CQE_SZ ); +#else /* WIN32 */ + MOSAL_pci_virt_alloc_consistent((actual_num_o_cqes + 1) * CQE_SZ, LOG2_CQE_SZ); /* one extra for alignment */ +#endif /* WIN32 */ +#endif + if (cqe_buf->cqe_buf_orig == NULL) { + MTL_ERROR1("%s: Failed to allocate CQEs buffer of 0x%X bytes.\n",__func__, + (actual_num_o_cqes + 1) * CQE_SZ); + return HH_EAGAIN; + } +#if !defined(MT_KERNEL) && defined(MT_FORK_SUPPORT) + cqe_buf->cqe_buf_base= MT_UP_ALIGNX_VIRT((MT_virt_addr_t)cqe_buf->cqe_buf_orig,MOSAL_SYS_PAGE_SHIFT); +#else + /* buffer must be aligned to CQE size */ + cqe_buf->cqe_buf_base= MT_UP_ALIGNX_VIRT((MT_virt_addr_t)cqe_buf->cqe_buf_orig,LOG2_CQE_SZ); +#endif + /* Initialize all CQEs to HW ownership */ + cur_cqe_owner_byte= (volatile u_int8_t*)(cqe_buf->cqe_buf_base+CQE_OWNER_BYTE_OFFSET); + for (i= 0; i < actual_num_o_cqes; i++) { + *cur_cqe_owner_byte= (1<spare_cqes= (possible_spare_cqes > MAX_CQDB2DELAY) ? MAX_CQDB2DELAY : possible_spare_cqes; + MTL_DEBUG4(MT_FLFMT("%s: spare_cqes=%u"),__func__,cqe_buf->spare_cqes); + cqe_buf->consumer_index= 0; + return HH_OK; +} + +static void cqe_buf_free(THHUL_cqe_buf_t *cqe_buf) +{ + /* Cleanup CQ resources */ +#if defined(MT_KERNEL) && defined(__LINUX__) + if ((((1U<log2_num_o_cqes)+1)*CQE_SZ) <= CQ_KMALLOC_LIMIT) + MOSAL_pci_phys_free_consistent(cqe_buf->cqe_buf_orig, ((1U<log2_num_o_cqes)+1)*CQE_SZ); + else +#endif +#ifdef WIN32 + cl_free( cqe_buf->cqe_buf_orig ); +#else + MOSAL_pci_virt_free_consistent(cqe_buf->cqe_buf_orig, ((1U<log2_num_o_cqes)+1)*CQE_SZ); +#endif +} + +/* Perform the "CQ cleanup" flow (removing of CQEs of a RESET QP) and return + * amount of removed CQEs (the change in consumer index) + * - cur_pi_p : return PI at given buffer (to be used in THHUL_CQ_RESIZE_PREP state) + */ +static u_int32_t cqe_buf_cleanup(THHUL_cqe_buf_t *cqe_buf,IB_wqpn_t qp, + THHUL_srqm_t srqm, HHUL_srq_hndl_t srq, + u_int32_t *cur_producer_index_p) +{ + u_int32_t cur_tail_index,next_consumer_index,cur_cqe_index; + u_int32_t outstanding_cqes,i; + u_int32_t removed_cqes= 0; + volatile u_int32_t *cur_cqe_p; + volatile u_int32_t *next_cqe_p; + IB_wqpn_t cur_qpn; + const u_int32_t num_o_cqes_mask= MASK32(cqe_buf->log2_num_o_cqes); + MT_bool is_rq_cqe; + u_int32_t wqe_addr_32lsb; + VAPI_wr_id_t wqe_id; + + + /* Find last CQE is software ownership (cur_tail) */ + outstanding_cqes= 0; + cur_tail_index= cqe_buf->consumer_index; +#ifndef NO_CQ_CI_DBELL + while (1) { /* Break out when find a CQE in HW ownership */ + /* (there must be at least one CQE in HW ownership - the reserved "full" CQE)*/ +#else + while (outstanding_cqes <= (1<log2_num_o_cqes)) { + /* In CQ-overrun-ignore mode, all CQEs in CQ may be in SW ownership... */ +#endif + cur_cqe_p= (volatile u_int32_t *) + (cqe_buf->cqe_buf_base + (cur_tail_index << LOG2_CQE_SZ)); + if (is_cqe_hw_own(cur_cqe_p)) break; /* no more outstanding CQEs */ + + outstanding_cqes++; + cur_tail_index= (cur_tail_index + 1) & num_o_cqes_mask; + } + *cur_producer_index_p= cur_tail_index; /* To be used on resized buffer */ + /* move back to last in SW ownership */ + cur_cqe_index= next_consumer_index= (cur_tail_index - 1) & num_o_cqes_mask; + + MTL_DEBUG4(MT_FLFMT("Found %d outstanding CQEs. Last CQE at index %d."), + outstanding_cqes,cur_cqe_index); + + /* Scan back CQEs (cur_cqe) and move all CQEs not of given qpn back to "cur_head" */ + for (i= 0; i < outstanding_cqes; i++) { + cur_cqe_p= (volatile u_int32_t *) + (cqe_buf->cqe_buf_base + (cur_cqe_index << LOG2_CQE_SZ)); + cur_qpn= + MOSAL_be32_to_cpu(cur_cqe_p[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,my_qpn)>>2]) + & MASK32(24); + + if (cur_qpn == qp) { /* A CQE to remove */ + /* Go back only with the cur_cqe_index, leave next_consumer_index behind (for next copy) */ + cur_cqe_index= (cur_cqe_index - 1) & num_o_cqes_mask; + + /* If associated with SRQ must invoke THHUL_srqm_comp to release WQE */ + if (srq != HHUL_INVAL_SRQ_HNDL) { + is_rq_cqe= (MT_EXTRACT32( + MOSAL_be32_to_cpu(cur_cqe_p[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,s)>>2]), + MT_BIT_OFFSET(tavorprm_completion_queue_entry_st,s) & MASK32(5), + MT_BIT_SIZE(tavorprm_completion_queue_entry_st,s)) == 0); + if (is_rq_cqe) { /* Completion must be reported to SRQ */ + wqe_addr_32lsb= /* Mask off size bits from dword of WQE addr. */ + MOSAL_be32_to_cpu( + cur_cqe_p[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st,wqe_adr)>>2] + ) & + (0xFFFFFFFF << MT_BIT_SIZE(tavorprm_completion_queue_entry_st,wqe_size)); + THHUL_srqm_comp(srqm, srq, wqe_addr_32lsb, &wqe_id); /* Release WQE */ + } + } + + } else { /* A CQE to copy (if any CQE was removed) */ + if (cur_cqe_index != next_consumer_index) { /* Copy required */ + MTL_DEBUG4(MT_FLFMT("Moving CQE at index %d to index %d\n"), + cur_cqe_index,next_consumer_index); + next_cqe_p= (volatile u_int32_t *) + (cqe_buf->cqe_buf_base + (next_consumer_index << LOG2_CQE_SZ)); + memcpy((void*)next_cqe_p,(void*)cur_cqe_p,CQE_SZ); + } + /* Go back with both indices */ + cur_cqe_index= (cur_cqe_index - 1) & num_o_cqes_mask; + next_consumer_index= (next_consumer_index - 1) & num_o_cqes_mask; + } + } + + if (cur_cqe_index != next_consumer_index) { /* CQEs were removed */ + /* Return to hardware ownership CQEs at amount of removed CQEs (from consumer index side) */ + for (cur_cqe_index= cqe_buf->consumer_index; ; + cur_cqe_index= (cur_cqe_index + 1) & num_o_cqes_mask) { + removed_cqes++; + cur_cqe_p= (volatile u_int32_t *) + (cqe_buf->cqe_buf_base + (cur_cqe_index << LOG2_CQE_SZ)); + set_cqe_to_hw_own(cur_cqe_p); + if (cur_cqe_index == next_consumer_index) break; /* Returned all including this one */ + + } + + /* update consumer index - go back to location of last copied */ + cqe_buf->consumer_index= (next_consumer_index + 1) & num_o_cqes_mask; + } + + return removed_cqes; +} + +/* This function copies any old CQEs left in cur_buf to resized_buf */ +/* (Should be called with CQ lock held) */ +static void cqe_buf_cpy2resized( + THHUL_cqe_buf_t *cur_buf, + THHUL_cqe_buf_t *resized_buf, + MT_bool compute_new_pi) /* New resize-CQ flow */ +{ + u_int32_t cur_cqe_index; + u_int32_t outs_cqes_at_ci=0,outs_cqes_at_cq_base=0; + u_int32_t *outs_cqes_p; /* pointer to currect count (one of the above) */ + volatile u_int32_t *cur_cqe_p; + void *cur_cpy_p; /* Pointer to copy (from) point */ + void *resized_cpy_p; /* Pointer to copy (to) point */ + u_int32_t resized_cqes_at_top; /* CQEs available to buffer's top - when wrap around */ + u_int32_t resized_cur_pi; /* Current "producer index" */ + const u_int32_t num_o_cqes_mask= MASK32(cur_buf->log2_num_o_cqes); + const u_int32_t new_num_o_cqes_mask= MASK32(resized_buf->log2_num_o_cqes); /* for modulo */ + + + /* Count number of CQEs in cur_buf */ + outs_cqes_p= &outs_cqes_at_ci; /* First count the CQEs above the consumer index */ + cur_cqe_index= cur_buf->consumer_index; + while (1) { /* Break out when find a CQE in HW ownership */ + /* (there must be at least one CQE in HW ownership - the reserved "full" CQE)*/ + cur_cqe_p= (volatile u_int32_t *) + (cur_buf->cqe_buf_base + (cur_cqe_index << LOG2_CQE_SZ)); + if (is_cqe_hw_own(cur_cqe_p)) break; /* no more outstanding CQEs */ + (*outs_cqes_p)++; + cur_cqe_index= (cur_cqe_index + 1) & num_o_cqes_mask; + if (cur_cqe_index == 0) { /* next CQEs are at CQ base */ + outs_cqes_p= &outs_cqes_at_cq_base; + } + } + + if (compute_new_pi) { + resized_buf->consumer_index= cur_cqe_index & new_num_o_cqes_mask; /* New fixed resize-CQ */ + MTL_DEBUG5(MT_FLFMT("%s: old_pi=%u new_pi=%u new_log2_sz=%u"), __func__, + cur_cqe_index, resized_buf->consumer_index, resized_buf->log2_num_o_cqes); + } else { /* legacy flow */ + /* Number of outstanding CQEs in old buffer is always less than new consumer index */ + if (resized_buf->consumer_index < outs_cqes_at_ci + outs_cqes_at_cq_base) { /* sanity check */ + MTL_ERROR1(MT_FLFMT( + "THHUL_cqm_resize_cq_done: Unexpected error !" + " found more outstanding CQEs (%d) than resized buffer's consumer index (%d) !"), + outs_cqes_at_ci + outs_cqes_at_cq_base,resized_buf->consumer_index); + return; + } + } + + resized_buf->consumer_index = /* This computation should work for legacy mode, too */ + (resized_buf->consumer_index - outs_cqes_at_ci - outs_cqes_at_cq_base) & new_num_o_cqes_mask; + resized_cur_pi= resized_buf->consumer_index; /* Where CQE copy starts at resized buffer */ + + if (outs_cqes_at_ci > 0) { /* First copy CQEs above consumer index */ + cur_cpy_p= (void *) + (cur_buf->cqe_buf_base + (cur_buf->consumer_index << LOG2_CQE_SZ)); + resized_cpy_p= (void *) + (resized_buf->cqe_buf_base + (resized_buf->consumer_index << LOG2_CQE_SZ)); + resized_cqes_at_top= (1U<log2_num_o_cqes) - resized_cur_pi; + if (resized_cqes_at_top > outs_cqes_at_ci) { /* enough room for all CQEs at CI ? */ + memcpy(resized_cpy_p, cur_cpy_p, outs_cqes_at_ci << LOG2_CQE_SZ); + } else { + memcpy(resized_cpy_p, cur_cpy_p, resized_cqes_at_top << LOG2_CQE_SZ); + resized_cpy_p= (void *)(resized_buf->cqe_buf_base); + (char*)cur_cpy_p += (resized_cqes_at_top << LOG2_CQE_SZ); + memcpy(resized_cpy_p, cur_cpy_p, (outs_cqes_at_ci - resized_cqes_at_top) << LOG2_CQE_SZ); + } + resized_cur_pi= (resized_cur_pi + outs_cqes_at_ci) & new_num_o_cqes_mask; + } + if (outs_cqes_at_cq_base > 0) { /* Next copy CQEs at CQ base (wrap around...) */ + cur_cpy_p= (void *) cur_buf->cqe_buf_base ; + resized_cpy_p= (void *) (resized_buf->cqe_buf_base + (resized_cur_pi << LOG2_CQE_SZ)) ; + resized_cqes_at_top= (1U<log2_num_o_cqes) - resized_cur_pi; + if (resized_cqes_at_top > outs_cqes_at_cq_base) { /* enough room for all CQEs at base ? */ + memcpy(resized_cpy_p, cur_cpy_p, outs_cqes_at_cq_base << LOG2_CQE_SZ); + } else { + memcpy(resized_cpy_p, cur_cpy_p, resized_cqes_at_top << LOG2_CQE_SZ); + resized_cpy_p= (void *)(resized_buf->cqe_buf_base); + (char*)cur_cpy_p += (resized_cqes_at_top << LOG2_CQE_SZ); + memcpy(resized_cpy_p, cur_cpy_p, (outs_cqes_at_cq_base - resized_cqes_at_top) << LOG2_CQE_SZ); + } + } + + return; +} + +/* Count number of real CQEs (i.e., including CQEs with error) up to given limit */ +/* (return the real number of CQEs) */ +/* The function must be invoked with CQ lock held */ +static VAPI_cqe_num_t count_cqes( + /*IN*/ THHUL_cq_t *cq_p, + /*IN*/ VAPI_cqe_num_t cqe_cnt_limit, /* Limit count up to given HW CQEs */ + /*OUT*/ VAPI_cqe_num_t *hw_cqe_cnt_p /* HW CQEs count (optional) */ +) +{ + volatile u_int32_t *cur_cqe; + VAPI_cqe_num_t sw_cqe_cntr= 0; + VAPI_cqe_num_t hw_cqe_cntr= 0; + u_int32_t wqe_addr_32lsb; + IB_wqpn_t qpn; + u_int8_t opcode; + u_int16_t dbdcnt= 0; + + /* Count CQEs including "external" of CQEs with error */ + while (hw_cqe_cntr < cqe_cnt_limit) { + /* Find CQE and check ownership */ + cur_cqe= (volatile u_int32_t *) + (cq_p->cur_buf.cqe_buf_base + + (((cq_p->cur_buf.consumer_index + hw_cqe_cntr) & MASK32(cq_p->cur_buf.log2_num_o_cqes)) + << LOG2_CQE_SZ)); + if (is_cqe_hw_own(cur_cqe)) break; /* no more CQEs */ + + opcode= ((volatile u_int8_t*)cur_cqe)[CQE_OPCODE_BYTE_OFFSET]; /* get completion status */ + if ((opcode & CQE_ERROR_STATUS_MASK) != CQE_ERROR_STATUS_MASK) { /* Completed OK */ + sw_cqe_cntr++; + } else { /* CQE with error - count external CQEs */ + + wqe_addr_32lsb= ( + MOSAL_cpu_to_be32( + cur_cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st, wqe_adr)>>2]) + & + (~MASK32(CQE_WQE_ADR_BIT_SZ))); + + qpn= ( + MOSAL_cpu_to_be32( + cur_cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st, my_qpn)>>2]) + & + MASK32(24) ); + +/*** warning C4244: '=' : conversion from 'unsigned long' to 'u_int16_t', possible loss of data ***/ + dbdcnt= (u_int16_t)( + MOSAL_cpu_to_be32( + cur_cqe[MT_BYTE_OFFSET(tavorprm_completion_queue_entry_st, + immediate_ethertype_pkey_indx_eecredits)>>2]) + & + MASK32(CQE_ERROR_DBDCNT_BIT_SIZE) ); + + /* Add total number of WQEs "hang" over given CQE with error */ + sw_cqe_cntr+= THHUL_qpm_wqe_cnt(cq_p->qpm, qpn, wqe_addr_32lsb, dbdcnt); + } + hw_cqe_cntr++; /* Continue to the next HW CQE */ + } + + if (hw_cqe_cnt_p) *hw_cqe_cnt_p= hw_cqe_cntr; + MTL_DEBUG5(MT_FLFMT("%s: cqe_cnt_limit=%d sw_cqe_cntr=%d hw_cqe_cntr=%d\n"),__func__, + cqe_cnt_limit,sw_cqe_cntr,hw_cqe_cntr); + return sw_cqe_cntr; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h new file mode 100644 index 00000000..170cc137 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_cqm/thhul_cqm.h @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THHUL_CQM_H +#define H_THHUL_CQM_H + +#include +#include +#include +#include + + +DLL_API HH_ret_t THHUL_cqm_create( + /*IN*/ THHUL_hob_t hob, + /*OUT*/ THHUL_cqm_t *cqm_p +); + +DLL_API HH_ret_t THHUL_cqm_destroy ( + /*IN*/ THHUL_cqm_t cqm +); + + +DLL_API HH_ret_t THHUL_cqm_create_cq_prep( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ VAPI_cqe_num_t num_o_cqes, + /*OUT*/ HHUL_cq_hndl_t *hhul_cq_p, + /*OUT*/ VAPI_cqe_num_t *num_o_cqes_p, + /*OUT*/ void/*THH_cq_ul_resources_t*/ *cq_ul_resources_p +); + + +DLL_API HH_ret_t THHUL_cqm_create_cq_done( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_cq_hndl_t hhul_cq, + /*IN*/ HH_cq_hndl_t hh_cq, + /*IN*/ void/*THH_cq_ul_resources_t*/ *cq_ul_resources_p +); + + +DLL_API HH_ret_t THHUL_cqm_destroy_cq_done( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq +); + + +DLL_API ib_api_status_t THHUL_cqm_resize_cq_prep( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ VAPI_cqe_num_t num_o_cqes, + /*OUT*/ VAPI_cqe_num_t *num_o_cqes_p, + /*OUT*/ void/*THH_cq_ul_resources_t*/ *cq_ul_resources_p +); + + +DLL_API HH_ret_t THHUL_cqm_resize_cq_done( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ void/*THH_cq_ul_resources_t*/ *cq_ul_resources_p +); + + +DLL_API HH_ret_t THHUL_cqm_cq_cleanup( + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ IB_wqpn_t qp, + /*IN*/ THHUL_srqm_t srqm, + /*IN*/ HHUL_srq_hndl_t srq +); + +#ifdef WIN32 +DLL_API ib_api_status_t +THHUL_cqm_poll4wc( + IN HHUL_hca_hndl_t hca_hndl, + IN HHUL_cq_hndl_t cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); + +DLL_API ib_api_status_t +THHUL_cqm_count_cqe( + IN HHUL_hca_hndl_t hca_hndl, + IN HHUL_cq_hndl_t cq, + OUT uint32_t* const p_n_cqes ); +#endif + +DLL_API HH_ret_t THHUL_cqm_query_cq( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*OUT*/ VAPI_cqe_num_t *num_o_cqes_p +); + +DLL_API HH_ret_t THHUL_cqm_peek_cq( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ VAPI_cqe_num_t cqe_num +); + +DLL_API HH_ret_t THHUL_cqm_req_comp_notif( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ VAPI_cq_notif_type_t notif_type +); + +DLL_API HH_ret_t THHUL_cqm_req_ncomp_notif( + /*IN*/ HHUL_hca_hndl_t hca_hndl, + /*IN*/ HHUL_cq_hndl_t cq, + /*IN*/ VAPI_cqe_num_t cqe_num +) ; +#endif /* H_THHUL_CQM_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c new file mode 100644 index 00000000..f49bf701 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.c @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_THHUL_HOB_C + +#include +#include +#include "thhul_hob.h" +#include +#include +#include +#include +#include +#include + +#define GET_HOB(hhul_dev) (hhul_dev ? ((THHUL_hob_t)(hhul_dev->device)) : NULL) + +struct THHUL_hob_st { /* *THHUL_hob_t; */ + HHUL_hca_dev_t hhul_hca; /* HHUL's device context */ + THH_hca_ul_resources_t ul_res; /* Resources allocated by HH_alloc_ul_resources() */ + /* Included objects */ + THH_uar_t uar; + THHUL_pdm_t pdm; + THHUL_cqm_t cqm; + THHUL_qpm_t qpm; + THHUL_srqm_t srqm; + THHUL_mwm_t mwm; + + /* global_resource_cnt: + * A counter for resources set up for within this THHUL_hob context. + * This counter enables avoiding object destruction in case there are still + * resources that were not freed (e.g. QPs). + * Each THHUL object is responsible to update this counter using THHUL_hob_res_add/rem */ + u_int32_t global_resource_cnt; + MOSAL_mutex_t cntr_lock; /* A lock for assuring atomicity of the counter lock */ +}; + +static HHUL_if_ops_t thhul_ops= +{ + THHUL_hob_destroy /* HHULIF_cleanup_user_level*/, + THHUL_pdm_alloc_pd_prep /* HHULIF_alloc_pd_prep */, + THHUL_pdm_alloc_pd_avs_prep /* HHULIF_alloc_pd_avs_prep */, + THHUL_pdm_alloc_pd_done /* HHULIF_alloc_pd_done */, + THHUL_pdm_free_pd_prep /* HHULIF_free_pd_prep */, + THHUL_pdm_free_pd_done /* HHULIF_free_pd_done */, + THHUL_mwm_alloc_mw /* HHULIF_alloc_mw */, + THHUL_mwm_bind_mw /* HHULIF_bind_mw */, + THHUL_mwm_free_mw /* HHULIF_free_mw */, + THHUL_pdm_create_ud_av /* HHULIF_create_ud_av */, + THHUL_pdm_modify_ud_av /* HHULIF_modify_ud_av */, + THHUL_pdm_query_ud_av /* HHULIF_query_ud_av */, + THHUL_pdm_destroy_ud_av /* HHULIF_destroy_ud_av */, + THHUL_cqm_create_cq_prep /* HHULIF_create_cq_prep */, + THHUL_cqm_create_cq_done /* HHULIF_create_cq_done */, + THHUL_cqm_resize_cq_prep /* HHULIF_resize_cq_prep */, + THHUL_cqm_resize_cq_done /* HHULIF_resize_cq_done */, + NULL /* HHULIF_poll4cqe */, + NULL /* HHULIF_poll_and_rearm_cq */, + THHUL_cqm_peek_cq /* HHULIF_peek_cq */, + THHUL_cqm_req_comp_notif /* HHULIF_req_comp_notif */, + THHUL_cqm_req_ncomp_notif /* HHULIF_req_ncomp_notif */, + THHUL_cqm_destroy_cq_done /* HHULIF_destroy_cq_done */, + THHUL_qpm_create_qp_prep /* HHULIF_create_qp_prep */, + THHUL_qpm_special_qp_prep /* HHULIF_special_qp_prep */, + THHUL_qpm_create_qp_done /* HHULIF_create_qp_done */, + THHUL_qpm_modify_qp_done /* HHULIF_modify_qp_done */, + THHUL_qpm_post_send_req /* HHULIF_post_send_req */, +#ifndef WIN32 + THHUL_qpm_post_send_req2 /* HHULIF_post_send_req2 */, +#else + NULL, +#endif + THHUL_qpm_post_inline_send_req /* HHULIF_post_inline_send_req */, + THHUL_qpm_post_send_reqs /* HHULIF_post_send_reqs */, + THHUL_qpm_post_gsi_send_req /* HHULIF_post_gsi_send_req */, + THHUL_qpm_post_recv_req /* HHULIF_post_recv_req */, +#ifndef WIN32 + THHUL_qpm_post_recv_req2 /* HHULIF_post_recv_req */, +#else + NULL, +#endif + THHUL_qpm_post_recv_reqs /* HHULIF_post_recv_reqs */, + THHUL_qpm_destroy_qp_done /* HHULIF_destroy_qp_done */, + THHUL_srqm_create_srq_prep/* HHULIF_create_srq_prep */, + THHUL_srqm_create_srq_done/* HHULIF_create_srq_done */, + THHUL_srqm_modify_srq_prep/* HHULIF_modify_srq_prep */, + THHUL_srqm_modify_srq_done/* HHULIF_modify_srq_done */, + THHUL_srqm_destroy_srq_done/* HHULIF_destroy_srq_done */, + THHUL_srqm_post_recv_reqs /* HHULIF_post_srq */ +}; + +/* Private functions prototypes */ +static HH_ret_t alloc_hob_context( + THHUL_hob_t *new_hob_p, + THH_hca_ul_resources_t *hca_ul_resources_p +); + + +/********************************************************************************************** + * Public API Functions (defined in thhul_hob.h) + **********************************************************************************************/ + +HH_ret_t THHUL_hob_create( + /*IN*/ void/*THH_hca_ul_resources_t*/ *hca_ul_resources_p, + /*IN*/ u_int32_t device_id, + /*OUT*/ HHUL_hca_hndl_t *hca_p +) +{ + THHUL_hob_t new_hob; + HH_ret_t rc; + + if (hca_ul_resources_p == NULL) { + MTL_ERROR1("THHUL_hob_create: NULL hca_ul_resources_p.\n"); + return HH_EINVAL; + } + + /* Allocate associated memory resources and included objects */ + rc= alloc_hob_context(&new_hob,(THH_hca_ul_resources_t*)hca_ul_resources_p); + if (rc != HH_OK) return rc; + + /* Fill the HHUL_hca_dev_t structure */ + new_hob->hhul_hca.hh_hndl= ((THH_hca_ul_resources_t*)hca_ul_resources_p)->hh_hca_hndl; + new_hob->hhul_hca.dev_desc= "InfiniHost(Tavor)"; + new_hob->hhul_hca.vendor_id= MT_MELLANOX_IEEE_VENDOR_ID; + new_hob->hhul_hca.dev_id= device_id; + + new_hob->hhul_hca.hw_ver= ((THH_hca_ul_resources_t*)hca_ul_resources_p)->version.hw_ver; + new_hob->hhul_hca.fw_ver= + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->version.fw_ver_major; + new_hob->hhul_hca.fw_ver= (new_hob->hhul_hca.fw_ver << 16) | + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->version.fw_ver_minor; + new_hob->hhul_hca.fw_ver= (new_hob->hhul_hca.fw_ver << 16) | + ((THH_hca_ul_resources_t*)hca_ul_resources_p)->version.fw_ver_subminor; + new_hob->hhul_hca.if_ops= &thhul_ops; + new_hob->hhul_hca.hca_ul_resources_sz= sizeof(THH_hca_ul_resources_t); + new_hob->hhul_hca.pd_ul_resources_sz= sizeof(THH_pd_ul_resources_t); + new_hob->hhul_hca.cq_ul_resources_sz= sizeof(THH_cq_ul_resources_t); + new_hob->hhul_hca.srq_ul_resources_sz= sizeof(THH_srq_ul_resources_t); + new_hob->hhul_hca.qp_ul_resources_sz= sizeof(THH_qp_ul_resources_t); + /* Get a copy of allocated resources */ + memcpy(&(new_hob->ul_res),hca_ul_resources_p,sizeof(THH_hca_ul_resources_t)); + new_hob->hhul_hca.hca_ul_resources_p= &(new_hob->ul_res); + + new_hob->hhul_hca.device= new_hob; /* Connect to new THHUL_hob */ + + /* Return allocated HHUL device context */ + *hca_p= &(new_hob->hhul_hca); + + return HH_OK; +} + + +HH_ret_t THHUL_hob_destroy(/*IN*/ HHUL_hca_hndl_t hca) +{ + THHUL_hob_t hob= GET_HOB(hca); + + if (hob == NULL) return HH_EINVAL; /* Invalid handle */ + + THHUL_mwm_destroy(hob->mwm); + THHUL_qpm_destroy(hob->qpm); + THHUL_srqm_destroy(hob->srqm); + THHUL_pdm_destroy(hob->pdm); + THHUL_cqm_destroy(hob->cqm); + THH_uar_destroy(hob->uar); + FREE(hob); + return HH_OK; +} + + +HH_ret_t THHUL_hob_query_version( + /*IN*/ THHUL_hob_t hob, + /*OUT*/ THH_ver_info_t *version_p +) +{ + if ((hob == NULL) || (version_p == NULL)) return HH_EINVAL; /* Invalid handle/pointer */ + memcpy(version_p,&(hob->ul_res.version),sizeof(THH_ver_info_t)); + return HH_OK; +} + +HH_ret_t THHUL_hob_get_hca_ul_handle +( + /*IN*/ THHUL_hob_t hob, + /*OUT*/ HHUL_hca_hndl_t *hca_ul_p +) +{ + if ((hob == NULL) || (hca_ul_p == NULL)){ + return HH_EINVAL; /* Invalid handle/pointer */ + } + + *hca_ul_p = &hob->hhul_hca; + return HH_OK; +} + +HH_ret_t THHUL_hob_get_hca_ul_res_handle( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ HH_hca_hndl_t *hca_ul_p) +{ + THHUL_hob_t hob= (THHUL_hob_t)(hca->device); + + if ((hob == NULL) || (hca_ul_p == NULL)) { + MTL_ERROR1("%s Wrong parameters: hob = %p, hca_ul_p=%p\n", __func__, hob, hca_ul_p); + return HH_EINVAL; /* Invalid handle/pointer */ + } + *hca_ul_p = hob->ul_res.hh_hca_hndl; + return HH_OK; +} + + +HH_ret_t THHUL_hob_get_hca_ul_res( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THH_hca_ul_resources_t *hca_ul_res_p +) +{ + THHUL_hob_t hob= (THHUL_hob_t)(hca->device); + + if ((hob == NULL) || (hca_ul_res_p == NULL)) { + MTL_ERROR1("%s Wrong parameters: hob = %p, hca_ul_res_p=%p\n", __func__, hob, hca_ul_res_p); + return HH_EINVAL; /* Invalid handle/pointer */ + } + memcpy(hca_ul_res_p,&(hob->ul_res),sizeof(THH_hca_ul_resources_t)); + return HH_OK; +} + +HH_ret_t THHUL_hob_get_pdm(/*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_pdm_t *pdm_p) +{ + THHUL_hob_t hob= (THHUL_hob_t)(hca->device); + + if ((hob == NULL) || (pdm_p == NULL)) return HH_EINVAL; /* Invalid handle/pointer */ + *pdm_p= hob->pdm; + return HH_OK; +} + +HH_ret_t THHUL_hob_get_cqm (/*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_cqm_t *cqm_p) +{ + THHUL_hob_t hob= (THHUL_hob_t)(hca->device); + + if ((hob == NULL) || (cqm_p == NULL)) return HH_EINVAL; /* Invalid handle/pointer */ + *cqm_p= hob->cqm; + return HH_OK; +} + +HH_ret_t THHUL_hob_get_qpm (/*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_qpm_t *qpm_p) +{ + THHUL_hob_t hob= (THHUL_hob_t)(hca->device); + + if ((hob == NULL) || (qpm_p == NULL)) return HH_EINVAL; /* Invalid handle/pointer */ + *qpm_p= hob->qpm; + return HH_OK; +} + +HH_ret_t THHUL_hob_get_srqm (/*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_srqm_t *srqm_p) +{ + THHUL_hob_t hob= (THHUL_hob_t)(hca->device); + + if ((hob == NULL) || (srqm_p == NULL)) return HH_EINVAL; /* Invalid handle/pointer */ + *srqm_p= hob->srqm; + return HH_OK; +} + +HH_ret_t THHUL_hob_get_uar (/*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THH_uar_t *uar_p) +{ + THHUL_hob_t hob= (THHUL_hob_t)(hca->device); + + if ((hob == NULL) || (uar_p == NULL)) return HH_EINVAL; /* Invalid handle/pointer */ + *uar_p= hob->uar; + return HH_OK; +} + + +HH_ret_t THHUL_hob_get_mwm ( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_mwm_t *mwm_p +) +{ + THHUL_hob_t hob= (THHUL_hob_t)(hca->device); + + if ((hob == NULL) || (mwm_p == NULL)) return HH_EINVAL; /* Invalid handle/pointer */ + *mwm_p= hob->mwm; + return HH_OK; +} + + +HH_ret_t THHUL_hob_is_priv_ud_av( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ MT_bool *is_priv_ud_av_p +) +{ + THHUL_hob_t hob= (THHUL_hob_t)(hca->device); + + if ((hob == NULL) || (is_priv_ud_av_p == NULL)) return HH_EINVAL; /* Invalid handle/pointer */ + *is_priv_ud_av_p= hob->ul_res.priv_ud_av; + return HH_OK; +} + + +/********************************************************************************************** + * Private Functions + **********************************************************************************************/ + + +/******************************************************* + * Function: alloc_hob_context + * + * Description: Allocate the THHUL_hob object memory and included objects + * + * Arguments: new_hob_p - Object to allocate for + * hca_ul_resources_p - As given to THHUL_hob_create() + * + * Returns: HH_OK + * HH_EAGAIN + *******************************************************/ +static HH_ret_t alloc_hob_context( + THHUL_hob_t *new_hob_p, + THH_hca_ul_resources_t *hca_ul_resources_p +) +{ + HH_ret_t rc; + + /* Allocate THHUL_hob own context */ + *new_hob_p= (THHUL_hob_t)MALLOC(sizeof(struct THHUL_hob_st)); + if (*new_hob_p == NULL) return HH_EAGAIN; + + /* Create included objects */ + rc= THH_uar_create( + &(hca_ul_resources_p->version), + hca_ul_resources_p->uar_index, + (void*)(hca_ul_resources_p->uar_map), + &((*new_hob_p)->uar)); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_hob_create: Failed creating THHUL_uar (err=%d).\n",rc); + goto failed_uar; + } + + rc= THHUL_pdm_create((*new_hob_p),hca_ul_resources_p->priv_ud_av,&((*new_hob_p)->pdm)); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_hob_create: Failed creating THHUL_pdm (%d=%s).\n", rc, HH_strerror_sym(rc)); + goto failed_pdm; + } + + rc= THHUL_cqm_create((*new_hob_p),&((*new_hob_p)->cqm)); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_hob_create: Failed creating THHUL_cqm (%d=%s).\n", rc, HH_strerror_sym(rc)); + goto failed_cqm; + } + + rc= THHUL_srqm_create((*new_hob_p),&((*new_hob_p)->srqm)); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_hob_create: Failed creating THHUL_srqm (%d=%s).\n", rc, HH_strerror_sym(rc)); + goto failed_srqm; + } + + rc= THHUL_qpm_create((*new_hob_p), (*new_hob_p)->srqm, &((*new_hob_p)->qpm)); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_hob_create: Failed creating THHUL_qpm (%d=%s).\n", rc, HH_strerror_sym(rc)); + goto failed_qpm; + } + + rc= THHUL_mwm_create((*new_hob_p),hca_ul_resources_p->log2_mpt_size,&((*new_hob_p)->mwm)); + if (rc != HH_OK) { + MTL_ERROR1("THHUL_hob_create: Failed creating THHUL_mwm (%d=%s).\n", rc, HH_strerror_sym(rc)); + goto failed_mwm; + } + + return HH_OK; + + /* Failure cleanup (error exit flow) */ + failed_mwm: + THHUL_qpm_destroy((*new_hob_p)->qpm); + failed_qpm: + THHUL_srqm_destroy((*new_hob_p)->srqm); + failed_srqm: + THHUL_cqm_destroy((*new_hob_p)->cqm); + failed_cqm: + THHUL_pdm_destroy((*new_hob_p)->pdm); + failed_pdm: + THH_uar_destroy((*new_hob_p)->uar); + failed_uar: + FREE(*new_hob_p); + return rc; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.h new file mode 100644 index 00000000..787539f2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_hob/thhul_hob.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THHUL_HOB_H +#define H_THHUL_HOB_H + +#include +#include +#include + + +DLL_API HH_ret_t THHUL_hob_create( + /*IN*/ void/*THH_hca_ul_resources_t*/ *hca_ul_resources_p, + /*IN*/ u_int32_t device_id, + /*OUT*/ HHUL_hca_hndl_t *hca_p +); + + +DLL_API HH_ret_t THHUL_hob_destroy(/*IN*/ HHUL_hca_hndl_t hca); + + +DLL_API HH_ret_t THHUL_hob_query_version( + /*IN*/ THHUL_hob_t hob, + /*OUT*/ THH_ver_info_t *version_p +); + +DLL_API HH_ret_t THHUL_hob_get_hca_ul_handle( + /*IN*/ THHUL_hob_t hob, + /*OUT*/ HHUL_hca_hndl_t *hca_ul_p +); + +DLL_API HH_ret_t THHUL_hob_get_hca_ul_res( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THH_hca_ul_resources_t *hca_ul_res_p +); + +DLL_API HH_ret_t THHUL_hob_get_hca_ul_res_handle( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ HH_hca_hndl_t *hca_ul_p +); + +DLL_API HH_ret_t THHUL_hob_get_pdm( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_pdm_t *pdm_p +); + + +DLL_API HH_ret_t THHUL_hob_get_cqm ( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_cqm_t *cqm_p +); + + +DLL_API HH_ret_t THHUL_hob_get_qpm ( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_qpm_t *qpm_p +); + +DLL_API HH_ret_t THHUL_hob_get_srqm ( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_srqm_t *srqm_p +); + +DLL_API HH_ret_t THHUL_hob_get_uar ( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THH_uar_t *uar_p +); + +DLL_API HH_ret_t THHUL_hob_get_mwm ( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ THHUL_mwm_t *mwm_p +); + +DLL_API HH_ret_t THHUL_hob_is_priv_ud_av( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ MT_bool *is_priv_ud_av_p +); + + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.c new file mode 100644 index 00000000..9d1e03e1 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.c @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_THHUL_MWM_C + +#include +#include +#include +#include +#include + +typedef struct mwm_ul_ctx_st { + /* TD: change the name.*/ + IB_rkey_t key; + struct mwm_ul_ctx_st *next_p; + struct mwm_ul_ctx_st *back_p; +} mwm_ul_ctx; + +typedef struct THHUL_mwm_st { + // TD: next member possibly redundant: + u_int32_t log2_mpt_size; + MOSAL_mutex_t mtx; + mwm_ul_ctx *head_p; +} tmwm_t; + +HH_ret_t THHUL_mwm_create +( + /*IN*/ THHUL_hob_t hob, + /*IN*/ u_int32_t log2_mpt_size, + /*OUT*/ THHUL_mwm_t *mwm_p +) +{ + /* + HH_ret_t rc; + HHUL_hca_hndl_t hca; + THH_hca_ul_resources_t hca_ul_res; + */ + THHUL_mwm_t mwm; + + FUNC_IN; + + mwm = TMALLOC(tmwm_t); + + if( mwm == NULL ) + { + MTL_ERROR1("%s mwm malloc failed\n", __func__); + return HH_EAGAIN; + } + + /* change to THHUL_hob_get_hca_ul_hob_handle()*/ + /* + if( (rc = THHUL_hob_get_hca_ul_handle(hob,&hca)) != HH_OK ) + { + MTL_ERROR1("%sTHHUL_hob_get_hca_ul_handle() failed, ret=%d\n", __func__,rc); + return rc; + } + + if( (rc = THHUL_hob_get_hca_ul_res(hca,&hca_ul_res)) != HH_OK ) + { + MTL_ERROR1("%sTHHUL_hob_get_hca_ul_res() failed, ret=%d\n", __func__,rc); + return rc; + } + */ + mwm->log2_mpt_size = log2_mpt_size; + mwm->head_p = NULL; + MOSAL_mutex_init(&(mwm->mtx)); + + *mwm_p = mwm; + FUNC_OUT; + + return HH_OK; +} + + +HH_ret_t THHUL_mwm_destroy +( + /*IN*/ THHUL_mwm_t mwm +) +{ + mwm_ul_ctx *cur_mw_p; + + while (mwm->head_p) { + cur_mw_p= mwm->head_p; + mwm->head_p = cur_mw_p->next_p; + FREE(cur_mw_p); + } + + MOSAL_mutex_free(&(mwm->mtx)); + FREE(mwm); + MT_RETURN(HH_OK); +} + + +HH_ret_t THHUL_mwm_alloc_mw +( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ IB_rkey_t initial_rkey, + /*OUT*/ HHUL_mw_hndl_t* mw_p +) +{ + THHUL_mwm_t mwm; +#ifdef THHUL_MWM_DEBUG_LIST + mwm_ul_ctx *cur_mw_p; +#endif + + FUNC_IN; + + if( ( THHUL_hob_get_mwm(hca,&mwm) != HH_OK ) || ( mwm == NULL ) ) + { + MTL_ERROR1(MT_FLFMT("Error while retrieving mwm handle.\n")); + return HH_EINVAL; + } + + *mw_p = MALLOC(sizeof(mwm_ul_ctx)); + if( mw_p == NULL ) { + MTL_ERROR1("%sallocation failed.\n", __func__); + return HH_EAGAIN; + } + + if (MOSAL_mutex_acq(&mwm->mtx,TRUE) != MT_OK) { + FREE(mw_p); + return HH_EINTR; + } + + if( mwm->head_p ) + mwm->head_p->back_p = (struct mwm_ul_ctx_st *)*mw_p; + ((mwm_ul_ctx *) *(mw_p))->next_p = mwm->head_p; + ((mwm_ul_ctx *) *(mw_p))->back_p = NULL; + ((mwm_ul_ctx *) *(mw_p))->key = initial_rkey; + + mwm->head_p = (struct mwm_ul_ctx_st *)*mw_p; + +#ifdef THHUL_MWM_DEBUG_LIST + MTL_DEBUG5(MT_FLFMT("List check/dump:")); + cur_mw_p= mwm->head_p; + while (cur_mw_p) { /* Verify list consistancy */ + MTL_DEBUG5(MT_FLFMT("Rkey=0x%X"),cur_mw_p->key); + /* verify next point back to current */ + if ((cur_mw_p->next_p) && (cur_mw_p->next_p->back_p != cur_mw_p)) { + MTL_ERROR1(MT_FLFMT("Linked list is found to be inconsistant")); + MOSAL_mutex_rel(&mwm->mtx); + return HH_EINVAL; + } + cur_mw_p= cur_mw_p->next_p; + } + cur_mw_p= mwm->head_p; + while (cur_mw_p) { /* Scan list to assure given handle is in the list */ + if (cur_mw_p == *mw_p) break; + cur_mw_p= cur_mw_p->next_p; + } + if (cur_mw_p == NULL) { + MTL_ERROR1(MT_FLFMT("New memory windows is not found in the list)")); + MOSAL_mutex_rel(&mwm->mtx); + return HH_EINVAL; + } +#endif + + MOSAL_mutex_rel(&mwm->mtx); + + FUNC_OUT; + + return HH_OK;; +} + +HH_ret_t THHUL_mwm_bind_mw +( + /*IN*/ HHUL_hca_hndl_t hhul_hndl, + /*IN*/ HHUL_mw_hndl_t mw, + /*IN*/ HHUL_mw_bind_t* bind_prop_p, + /*OUT*/ IB_rkey_t* bind_rkey_p +) +{ + u_int32_t rc,new_key; + THHUL_mwm_t mwm; + + FUNC_IN; + + MTL_DEBUG1("%s - dump of bind req:\n", __func__); + MTL_DEBUG1("{\n"); + MTL_DEBUG1("init r_key: 0x%x.\n",((mwm_ul_ctx *) mw)->key); + MTL_DEBUG1("acl: 0x%x.\n",bind_prop_p->acl); + MTL_DEBUG1("comp_type: 0x%x.\n",bind_prop_p->comp_type); + MTL_DEBUG1("id: 0x%x.\n",(u_int32_t) bind_prop_p->id); + MTL_DEBUG1("lkey: 0x%x.\n",bind_prop_p->mr_lkey); + MTL_DEBUG1("qp: " MT_ULONG_PTR_FMT ".\n",(MT_ulong_ptr_t) bind_prop_p->qp); + MTL_DEBUG1("start: 0x%x:%x.\n",(u_int32_t) (bind_prop_p->start >> 32),(u_int32_t) bind_prop_p->start); + MTL_DEBUG1("size: 0x%x:%x.\n",(u_int32_t) (bind_prop_p->size >> 32),(u_int32_t) bind_prop_p->size); + MTL_DEBUG1("}\n"); + + if( ( THHUL_hob_get_mwm(hhul_hndl,&mwm) != HH_OK ) || ( mwm == NULL ) ) + { + MTL_ERROR1(MT_FLFMT("Error while retrieving mwm handle.\n")); + return HH_EINVAL; + } + + // req to bind a window to zero len is in fact an unbind req. + // if unbunding, window e_key remains the same. + // if binding, new r_key tag is the previous tag incremented by 1: + new_key = ((mwm_ul_ctx *) mw)->key; + /* TD: conventions */ + if( bind_prop_p->size > 0 ) { + new_key += (1 << mwm->log2_mpt_size); + } + + if( (rc = THHUL_qpm_post_bind_req(bind_prop_p,new_key)) != HH_OK ) { + MTL_ERROR1("%s failed to post bind descriptor.\n", __func__); + return rc; + } + + ((mwm_ul_ctx *) mw)->key = new_key; + *bind_rkey_p = new_key; + + FUNC_OUT; + + return HH_OK; +} + + +HH_ret_t THHUL_mwm_free_mw +( + /*IN*/ HHUL_hca_hndl_t hhul_hndl, + /*IN*/ HHUL_mw_hndl_t mw +) +{ + THHUL_mwm_t mwm; +#ifdef THHUL_MWM_DEBUG_LIST + mwm_ul_ctx *cur_mw_p; +#endif + + FUNC_IN; + + if( ( THHUL_hob_get_mwm(hhul_hndl,&mwm) != HH_OK ) || ( mwm == NULL ) ) + { + MTL_ERROR1(MT_FLFMT("Error while retrieving mwm handle.\n")); + return HH_EINVAL; + } + + MOSAL_mutex_acq_ui(&mwm->mtx); + +#ifdef THHUL_MWM_DEBUG_LIST + MTL_DEBUG5(MT_FLFMT("List check/dump (removal of Rkey=0x%X):"),((mwm_ul_ctx*)mw)->key); + cur_mw_p= mwm->head_p; + while (cur_mw_p) { /* Verify list consistancy */ + MTL_DEBUG5(MT_FLFMT("Rkey=0x%X"),cur_mw_p->key); + /* verify next point back to current */ + if ((cur_mw_p->next_p) && (cur_mw_p->next_p->back_p != cur_mw_p)) { + MTL_ERROR1(MT_FLFMT("Linked list is found to be inconsistant")); + MOSAL_mutex_rel(&mwm->mtx); + return HH_EINVAL; + } + cur_mw_p= cur_mw_p->next_p; + } + cur_mw_p= mwm->head_p; + while (cur_mw_p) { /* Scan list to assure given handle is in the list */ + if (cur_mw_p == mw) break; + cur_mw_p= cur_mw_p->next_p; + } + if (cur_mw_p == NULL) { + MTL_ERROR1(MT_FLFMT("Given memory window handle %p is unknown (not in list)"), + (mwm_ul_ctx *) mw); + MOSAL_mutex_rel(&mwm->mtx); + return HH_EINVAL; + } +#endif + // window list is empty: + if( mwm->head_p == NULL ) + { + MOSAL_mutex_rel(&mwm->mtx); + return HH_EINVAL; + } + + // single window in the list: + if( mwm->head_p->next_p == NULL ) + { + mwm->head_p = NULL; + goto mwm_free_mw_exit; + } + + // unlink from previous entry: + if( ((mwm_ul_ctx *) mw)->back_p ) + { + ((mwm_ul_ctx *) mw)->back_p->next_p = ((mwm_ul_ctx *) mw)->next_p; + } else { /* Removing first - Make next (if any) the first */ + mwm->head_p= ((mwm_ul_ctx *) mw)->next_p; + } + + // unlink from next entry: + if( ((mwm_ul_ctx *) mw)->next_p ) + { + ((mwm_ul_ctx *) mw)->next_p->back_p = ((mwm_ul_ctx *) mw)->back_p; + } + +mwm_free_mw_exit: + MOSAL_mutex_rel(&mwm->mtx); + FREE(mw); + + FUNC_OUT; + + return HH_OK; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.h new file mode 100644 index 00000000..f4ccbb71 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_mwm/thhul_mwm.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THHUL_MWM_H +#define H_THHUL_MWM_H + +#include +#include +#include + + +DLL_API HH_ret_t THHUL_mwm_create( + /*IN*/ THHUL_hob_t hob, + /*IN*/ u_int32_t log2_mpt_size, + /*OUT*/ THHUL_mwm_t *mwm_p +); + + +DLL_API HH_ret_t THHUL_mwm_destroy( + /*IN*/ THHUL_mwm_t mwm +); + + +DLL_API HH_ret_t THHUL_mwm_alloc_mw( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ IB_rkey_t initial_rkey, + /*OUT*/ HHUL_mw_hndl_t* mw_p +); + + +DLL_API HH_ret_t THHUL_mwm_bind_mw( + /*IN*/ HHUL_hca_hndl_t hhul_hndl, + /*IN*/ HHUL_mw_hndl_t mw, + /*IN*/ HHUL_mw_bind_t* bind_prop_p, + /*OUT*/ IB_rkey_t* bind_rkey_p +); + + +DLL_API HH_ret_t THHUL_mwm_free_mw( + /*IN*/ HHUL_hca_hndl_t hhul_hndl, + /*IN*/ HHUL_mw_hndl_t mw +); + +#endif /* H_THHUL_QPM_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.c new file mode 100644 index 00000000..f2692f08 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.c @@ -0,0 +1,704 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "thhul_pdm_priv.h" + +#define MT_FORK_SUPPORT + +HH_ret_t THHUL_pdm_create ( + THHUL_hob_t hob, + MT_bool priv_ud_av, + THHUL_pdm_t *pdm_p ) +{ + THHUL_pdm_t new_pdm_obj; + HH_ret_t ret; + VIP_common_ret_t vret; + /* create new pdm object */ + new_pdm_obj = TMALLOC(struct THHUL_pdm_st); + if (!new_pdm_obj) { + return HH_EINVAL; + } + + memset(new_pdm_obj, 0, sizeof(struct THHUL_pdm_st)); + + ret = THHUL_hob_query_version( hob, &(new_pdm_obj->version)); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_create: ERROR (%d) : could not get version info\n", ret); + FREE(new_pdm_obj); + return HH_EINVAL; + } + + new_pdm_obj->priv_ud_av = priv_ud_av; + vret = VIP_array_create(THHUL_PDM_INITIAL_NUM_PDS,&(new_pdm_obj->pd_array)); + if (vret != VIP_OK) { + MTL_ERROR1("THHUL_pdm_create: ERROR (%d) : could not create PD array\n", vret); + FREE(new_pdm_obj); + return HH_EAGAIN; + } + + *pdm_p = new_pdm_obj; + return HH_OK; +} + +HH_ret_t THHUL_pdm_destroy ( THHUL_pdm_t pdm) +{ + THHUL_pd_t *ul_pd; + HH_ret_t ret; + THH_udavm_t udavm; + VIP_common_ret_t vret; + VIP_array_handle_t pd_h; + + + VIP_ARRAY_FOREACH(pdm->pd_array,vret,pd_h,(VIP_array_obj_t*)&ul_pd) + { + udavm = ul_pd->udavm; + /* For non-privileged UDAVs, destroy the UDAV user-level object allocated for this PD */ + if (!(pdm->priv_ud_av)) { + /*udavm can legally be null if THHUL_pdm_alloc_pd_done was not called, due to + * failure of THH_pdm_alloc_pd + */ + if (udavm != NULL) { + ret = THH_udavm_destroy(udavm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_free_pd_done: ERROR (%d) : Could not destroy associated UDAV object\n", ret); + /* continue, to free up the ul_pd anyway., and report successful 'free' */ + } + } + + /* If udav was not allocated in DDR, free the allocated memory here */ + if (ul_pd->udav_nonddr_table != (MT_virt_addr_t) 0) { + MOSAL_pci_virt_free_consistent((void *)ul_pd->udav_nonddr_table, ul_pd->uadv_nonddr_table_alloc_size); + ul_pd->udav_nonddr_table = (MT_virt_addr_t) 0; + ul_pd->udav_nonddr_table_aligned = (MT_virt_addr_t) 0; + ul_pd->uadv_nonddr_table_alloc_size = 0; + } + } + ul_pd->valid = FALSE; /* just in case OS does not detect heap errors, and does not zero entries */ + FREE(ul_pd); + } + + VIP_array_destroy(pdm->pd_array,NULL); + FREE(pdm); + return HH_OK; +} + +HH_ret_t THHUL_pdm_alloc_pd_avs_prep ( + HHUL_hca_hndl_t hca, + u_int32_t max_num_avs, + HH_pdm_pd_flags_t pd_flags, + HHUL_pd_hndl_t *pd_p, + void *pd_ul_resources_p ) +{ + HH_ret_t ret; + THHUL_pdm_t pdm; + THHUL_pd_t *new_pd_p; + THH_pd_ul_resources_t *pd_ul_res = (THH_pd_ul_resources_t *)pd_ul_resources_p; + MT_size_t ud_av_table_sz = 0; + VIP_common_ret_t vret; + VIP_array_handle_t local_pd_hndl; + + ret = THHUL_hob_get_pdm(hca, &pdm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_alloc_pd_avs_prep: ERROR (%d) : PDM object has not yet been created\n", ret); + return HH_EINVAL; + } + + new_pd_p = TMALLOC(THHUL_pd_t); + memset(new_pd_p, 0, sizeof(THHUL_pd_t)); + memset(pd_ul_res, 0, sizeof(THH_pd_ul_resources_t)); + + /* first, see if need to create a UDAV table (non-priv mode) */ + if (!pdm->priv_ud_av) { + + if (max_num_avs == 0) { + MTL_ERROR1("THHUL_pdm_alloc_pd_avs_prep: max_num_avs requested cannot be zero.\n"); + FREE(new_pd_p); + return HH_EINVAL; + } + + if (max_num_avs == EVAPI_DEFAULT_AVS_PER_PD) { + max_num_avs = THHUL_PDM_MAX_UL_UDAV_PER_PD; + MTL_DEBUG4("THHUL_pdm_alloc_pd_avs_prep: using default AVs per PD (=%u)\n", max_num_avs); + } + /* guarantee that table size is a multiple of page size. */ + ud_av_table_sz = max_num_avs * (sizeof(struct tavorprm_ud_address_vector_st) / 8); +#if !defined(__KERNEL__) && defined(MT_FORK_SUPPORT) + /* Add 1 page for page alignment + one page to cover last page */ + new_pd_p->uadv_nonddr_table_alloc_size = + MT_UP_ALIGNX_SIZE(ud_av_table_sz, MOSAL_SYS_PAGE_SHIFT) + MOSAL_SYS_PAGE_SIZE - 1; +#else + /* malloc an extra udav entry to use for table-start alignment purposes */ + new_pd_p->uadv_nonddr_table_alloc_size = ud_av_table_sz + + (1<udav_nonddr_table = + (MT_virt_addr_t) MOSAL_pci_virt_alloc_consistent(new_pd_p->uadv_nonddr_table_alloc_size, + ceil_log2(sizeof(struct tavorprm_ud_address_vector_st) / 8) ); + + if (new_pd_p->udav_nonddr_table == VA_NULL ) { + MTL_ERROR1("THHUL_pdm_alloc_pd_avs_prep: ERROR : Could not Vmalloc UDAV table\n"); + ret = HH_ENOMEM; + goto thh_pdm_udavm_create_err; + } + memset((void *)new_pd_p->udav_nonddr_table, 0, new_pd_p->uadv_nonddr_table_alloc_size); + +#if !defined(__KERNEL__) && defined(MT_FORK_SUPPORT) + new_pd_p->udav_nonddr_table_aligned = /* Align to page start */ + MT_UP_ALIGNX_VIRT((new_pd_p->udav_nonddr_table), MOSAL_SYS_PAGE_SHIFT); +#else + /* now, align the buffer to the entry size */ + new_pd_p->udav_nonddr_table_aligned = + MT_UP_ALIGNX_VIRT((new_pd_p->udav_nonddr_table), + ceil_log2((sizeof(struct tavorprm_ud_address_vector_st) / 8))); +#endif + } + + /* add to array */ + if ((vret=VIP_array_insert(pdm->pd_array, (VIP_array_obj_t)new_pd_p, &local_pd_hndl)) != VIP_OK) { + MTL_ERROR1("THHUL_pdm_alloc_pd_avs_prep: ERROR (%d) : Insertion failure.\n", vret); + if (!pdm->priv_ud_av) { + MOSAL_pci_virt_free_consistent((void *)new_pd_p->udav_nonddr_table, new_pd_p->uadv_nonddr_table_alloc_size); + } + FREE(new_pd_p); + return HH_EAGAIN; + } + + pd_ul_res->udavm_buf = new_pd_p->udav_nonddr_table_aligned; + pd_ul_res->udavm_buf_sz = ud_av_table_sz; + pd_ul_res->pd_flags = pd_flags; + /* do a free_pd_prep here so that if the kernel call to create-pd fails, we can call + * HHUL_free_pd_done directly (without worrying about a free-prep step. The logic needs + * to be that explicitly call HHUL_free_prep ONLY if we successfully called HHUL_alloc_done */ + ret = THHUL_pdm_free_pd_prep (hca,(HHUL_pd_hndl_t)local_pd_hndl, FALSE); + if (ret != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: THHUL_pdm_free_pd_prep failure (%d:%s)"), + __func__, ret, HH_strerror_sym(ret)); + goto thh_pdm_udavm_create_err; + } + *pd_p = new_pd_p->hhul_pd_hndl = (HHUL_pd_hndl_t)local_pd_hndl; /* return allocated PD handle */ + return HH_OK; + +thh_pdm_udavm_create_err: + FREE(new_pd_p); + return ret; + +} + +HH_ret_t THHUL_pdm_alloc_pd_prep ( + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t *pd_p, + void *pd_ul_resources_p ) +{ + return THHUL_pdm_alloc_pd_avs_prep(hca, 256, PD_NO_FLAGS, pd_p, pd_ul_resources_p); +} + + +HH_ret_t THHUL_pdm_alloc_pd_done ( + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t hhul_pd, + HH_pd_hndl_t hh_pd, + void *pd_ul_resources_p ) +{ + HH_ret_t ret; + THHUL_pdm_t pdm; + THH_pd_ul_resources_t *pd_ul_res = (THH_pd_ul_resources_t *)pd_ul_resources_p; + THHUL_pd_t *ul_pd; + VIP_common_ret_t vret; + + MTL_DEBUG3("==> THHUL_pdm_alloc_pd_done\n"); + + ret = THHUL_hob_get_pdm(hca, &pdm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_alloc_pd_done: ERROR (%d) : PDM object has not yet been created\n", ret); + return HH_EINVAL; + } + + /* undo the erase prep performed at the end of alloc prep */ + ret = THHUL_pdm_free_pd_prep (hca, hhul_pd,TRUE ); + if (ret != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: THHUL_pdm_free_pd_prep UNDO failure (%d:%s)"), + __func__, ret, HH_strerror_sym(ret)); + return ret; + } + + if ((vret=VIP_array_find_hold(pdm->pd_array,(VIP_array_handle_t)hhul_pd, + (VIP_array_obj_t *)&ul_pd)) != VIP_OK) { + if (vret == VIP_EBUSY) { + MTL_DEBUG4("THHUL_pdm_alloc_pd_done: PD object is busy\n"); + return HH_EBUSY; + } else { + MTL_ERROR1("THHUL_pdm_alloc_pd_done: ERROR (%d) : Could not find PD object\n", vret); + return HH_EINVAL_PD_HNDL; + } + } + ul_pd->hh_pd_hndl = hh_pd; + + if (!pdm->priv_ud_av) { + if (pd_ul_res->udavm_buf != ul_pd->udav_nonddr_table_aligned) { + MTL_DEBUG3("THHUL_pdm_alloc_pd_done. USING DDR MEMORY.udavm_buf=" + VIRT_ADDR_FMT", nonddr_table="VIRT_ADDR_FMT", extra=%d\n", + pd_ul_res->udavm_buf, ul_pd->udav_nonddr_table, + (int)(sizeof(struct tavorprm_ud_address_vector_st) / 8) ); + MOSAL_pci_virt_free_consistent((void *)ul_pd->udav_nonddr_table, ul_pd->uadv_nonddr_table_alloc_size); + ul_pd->udav_nonddr_table = (MT_virt_addr_t) 0; + ul_pd->udav_nonddr_table_aligned = (MT_virt_addr_t) 0; + ul_pd->uadv_nonddr_table_alloc_size = 0; + } else { + MTL_DEBUG3("THHUL_pdm_alloc_pd_done. USING HOST MEMORY.udavm_buf (aligned) =" + VIRT_ADDR_FMT", non-aligned nonddr_table="VIRT_ADDR_FMT"\n", + pd_ul_res->udavm_buf, ul_pd->udav_nonddr_table); + } + ret = THH_udavm_create( &(pdm->version), pd_ul_res->udavm_buf_memkey, + pd_ul_res->udavm_buf, + pd_ul_res->udavm_buf_sz, + (ul_pd->uadv_nonddr_table_alloc_size != 0), + &(ul_pd->udavm), + &(ul_pd->av_ddr_base), + &(ul_pd->av_host_base)); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_alloc_pd_done: ERROR (%d) : Could not create UDAV manager object\n", ret); + MTL_DEBUG4("<== THHUL_pdm_alloc_pd_done. ERROR\n"); + VIP_array_find_release(pdm->pd_array,hhul_pd); + return ret; + } + } + else { + THH_hca_ul_resources_t hca_ul_res; + ret = THHUL_hob_get_hca_ul_res(hca,&hca_ul_res); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_alloc_pd_done: ERROR (%d) : THHUL_hob_get_hca_ul_res failed\n", ret); + VIP_array_find_release(pdm->pd_array,hhul_pd); + return ret; + } else { + ul_pd->av_ddr_base = hca_ul_res.av_ddr_base; + ul_pd->av_host_base = hca_ul_res.av_host_base; + } + } + + + /* save the memory key in all cases, for use by THHUL_qpm */ + ul_pd->lkey = pd_ul_res->udavm_buf_memkey; + ul_pd->valid = TRUE; + VIP_array_find_release(pdm->pd_array,hhul_pd); + MTL_DEBUG3("<== THHUL_pdm_alloc_pd_done\n"); + return HH_OK; +} + + +HH_ret_t THHUL_pdm_free_pd_prep ( + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t hhul_pd, + MT_bool undo_flag ) +{ + HH_ret_t ret; + THHUL_pdm_t pdm; + VIP_common_ret_t vret; + + ret = THHUL_hob_get_pdm(hca, &pdm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_free_pd_prep: ERROR (%d) : PDM object has not yet been created\n", ret); + return HH_EINVAL; + } + + /* undoing a previous prep */ + if (undo_flag == TRUE) { + if ((vret=VIP_array_erase_undo(pdm->pd_array,(VIP_array_handle_t)hhul_pd)) != VIP_OK){ + MTL_ERROR1("THHUL_pdm_free_pd_prep: ERROR (%d) : invalid handle\n", vret); + return HH_EINVAL_PD_HNDL; + } + return HH_OK; + } + + /* preparing a PD FREE */ + /* need to find pd table entry in pd list */ + /* and signal it as prepared for erase. Purpose here is to see if still have outstanding AVs */ + /* on this PD, in which case erase_prepare will return busy. */ + if ((vret=VIP_array_erase_prepare(pdm->pd_array,(VIP_array_handle_t)hhul_pd, NULL)) != VIP_OK){ + if (vret == VIP_EBUSY) { + MTL_DEBUG4("THHUL_pdm_free_pd_prep: PD object is busy\n"); + return HH_EBUSY; + } else { + MTL_ERROR1("THHUL_pdm_free_pd_prep: ERROR (%d) : Could not find PD object\n", vret); + return HH_EINVAL_PD_HNDL; + } + } + return HH_OK; +} + +HH_ret_t THHUL_pdm_free_pd_done ( + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t hhul_pd ) +{ + HH_ret_t ret; + THHUL_pdm_t pdm; + THHUL_pd_t *ul_pd; + THH_udavm_t udavm; + VIP_common_ret_t vret; + + ret = THHUL_hob_get_pdm(hca, &pdm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_free_pd_done: ERROR (%d) : PDM object has not yet been created\n", ret); + return HH_EINVAL; + } + + /* need to find pd table entry in pd list */ + /* and destroy udavm if needed, and delete the entry from list */ + if ((vret=VIP_array_erase_done(pdm->pd_array,(VIP_array_handle_t)hhul_pd, + (VIP_array_obj_t*) &ul_pd)) != VIP_OK){ + MTL_ERROR1("THHUL_pdm_free_pd_done: ERROR (%d) : Could not find PD object\n", vret); + return HH_EINVAL_PD_HNDL; + } + udavm = ul_pd->udavm; + + + /* For non-privileged UDAVs, destroy the UDAV user-level object allocated for this PD */ + if (!(pdm->priv_ud_av)) { + /*udavm can legally be null if THHUL_pdm_alloc_pd_done was not called, due to + * failure of THH_pdm_alloc_pd + */ + if (udavm != NULL) { + ret = THH_udavm_destroy(udavm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_free_pd_done: ERROR (%d) : Could not destroy associated UDAV object\n", ret); + /* continue, to free up the ul_pd anyway., and report successful 'free' */ + } + } + + /* If udav was not allocated in DDR, free the allocated memory here */ + if (ul_pd->udav_nonddr_table != (MT_virt_addr_t) 0) { + MOSAL_pci_virt_free_consistent((void *)ul_pd->udav_nonddr_table, ul_pd->uadv_nonddr_table_alloc_size); + ul_pd->udav_nonddr_table = (MT_virt_addr_t) 0; + ul_pd->udav_nonddr_table_aligned = (MT_virt_addr_t) 0; + ul_pd->uadv_nonddr_table_alloc_size = 0; + } + } + ul_pd->valid = FALSE; /* just in case OS does not detect heap errors, and does not zero entries */ + FREE(ul_pd); + return HH_OK; +} + + +HH_ret_t THHUL_pdm_create_ud_av ( + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t hhul_pd, + VAPI_ud_av_t *av_p, + HHUL_ud_av_hndl_t *ah_p ) +{ + HH_ret_t ret; + THHUL_pdm_t pdm; + THHUL_pd_t *ul_pd ; + VIP_common_ret_t vret; + + /* pre-allocation checks */ + if (av_p == NULL) { + MTL_ERROR4("THHUL_pdm_create_ud_av: av_p is NULL.\n"); + MT_RETURN(HH_EINVAL_PARAM); + } + + if (av_p->port == 0 || av_p->port > THHUL_TAVOR_NUM_PORTS) { + MTL_ERROR1("THHUL_pdm_create_ud_av: ERROR: invalid port number specified (%d)\n" + ,av_p->port); + return HH_EINVAL_PORT; + } + + ret = THHUL_hob_get_pdm(hca, &pdm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_create_ud_av: ERROR (%d) : PDM object has not yet been created\n", ret); + return HH_EINVAL; + } + + if (pdm->priv_ud_av) { + MTL_ERROR1("THHUL_pdm_create_ud_av: non_privileged UDAVs not configured\n"); + return HH_EINVAL; + } + + /* need to find pd table entry in pd array */ + if ((vret=VIP_array_find_hold(pdm->pd_array,(VIP_array_handle_t)hhul_pd, + (VIP_array_obj_t*) &ul_pd)) != VIP_OK) { + MTL_ERROR1("THHUL_pdm_create_ud_av: ERROR (%d) : Could not find PD object\n", vret); + return HH_EINVAL_PD_HNDL; + } + + if (ul_pd->valid == FALSE) { + MTL_ERROR1("THHUL_pdm_create_ud_av: ERROR: This PD is not allocated\n"); + ret = HH_EINVAL_PD_HNDL; + goto err; + } + + if (ul_pd->udavm == NULL) { + MTL_ERROR1("THHUL_pdm_create_ud_av: ERROR: UDAVM object not allocated\n"); + ret = HH_EINVAL; + goto err; + } + + /* now, do it */ + ret = THH_udavm_create_av(ul_pd->udavm, ul_pd->hh_pd_hndl, av_p, (HH_ud_av_hndl_t *)ah_p); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_create_ud_av: ERROR (%d) : Could not create address vector\n", ret); + goto err; + } + + return HH_OK; + +err: + VIP_array_find_release(pdm->pd_array,(VIP_array_handle_t)hhul_pd); + return ret; +} + + +HH_ret_t THHUL_pdm_modify_ud_av ( + HHUL_hca_hndl_t hca, + HHUL_ud_av_hndl_t ah, + VAPI_ud_av_t *av_p ) +{ + HH_ret_t ret; + THHUL_pdm_t pdm; + THHUL_pd_t *ul_pd; + VIP_common_ret_t vret; + VIP_array_handle_t local_pd_hndl; + MT_bool found = FALSE; + + /* error checks */ + if (av_p->port == 0 || av_p->port > THHUL_TAVOR_NUM_PORTS) { + MTL_ERROR1("THHUL_pdm_modify_ud_av: ERROR: invalid port number specified (%d)\n" + ,av_p->port); + return HH_EINVAL_PORT; + } + + ret = THHUL_hob_get_pdm(hca, &pdm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_modify_ud_av: ERROR (%d) : PDM object has not yet been created\n", ret); + return HH_EINVAL; + } + + if (pdm->priv_ud_av) { + MTL_ERROR1("THHUL_pdm_modify_ud_av: non_privileged UDAVs not configured\n"); + return HH_EINVAL; + } + + /* find the associated PD handle for this AV */ + VIP_ARRAY_FOREACH_HOLD(pdm->pd_array, vret, local_pd_hndl, (VIP_array_obj_t *)&ul_pd, TRUE) { + if ((vret != VIP_OK) && (vret != VIP_EAGAIN)) { + MTL_ERROR1("THHUL_pdm_modify_ud_av: ERROR (%d) : Could not find PD object\n", vret); + /* return invalid AV handle, because a PD error return is not acceptable here */ + /* IB Spec demands that destroy PD shall fail if it has any outstanding resources. */ + /* Essentially, then, we have allowed destroy PD anyway, so that the AV handle is */ + /* no loger valid. */ + return HH_EINVAL_AV_HNDL; + } + if (ul_pd->valid == FALSE) { + /* ignore if PD is in process of being created */ + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + continue; + } + /* try to modify the UDAVM */ + ret = THH_udavm_modify_av(ul_pd->udavm,ah,av_p); + if (ret == HH_OK) { + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + found = TRUE; + break; + } else if (ret != HH_EINVAL_AV_HNDL) { + MTL_ERROR1("THHUL_pdm_modify_ud_av: ERROR (%d) : invalid parameter\n", ret); + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + return ret; + } + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + } + + if (found == FALSE) + { + return HH_EINVAL_AV_HNDL; + } + + return HH_OK; +} + + +HH_ret_t THHUL_pdm_query_ud_av ( + HHUL_hca_hndl_t hca, + HHUL_ud_av_hndl_t ah, + VAPI_ud_av_t *av_p ) +{ + HH_ret_t ret; + THHUL_pdm_t pdm; + THHUL_pd_t *ul_pd; + VIP_common_ret_t vret; + VIP_array_handle_t local_pd_hndl; + MT_bool found = FALSE; + + /* error checks */ + ret = THHUL_hob_get_pdm(hca, &pdm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_query_ud_av: ERROR (%d) : PDM object has not yet been created\n", ret); + return HH_EINVAL; + } + + if (pdm->priv_ud_av) { + MTL_ERROR1("THHUL_pdm_query_ud_av: non_privileged UDAVs not configured\n"); + return HH_EINVAL; + } + + /* find the associated PD handle for this AV */ + VIP_ARRAY_FOREACH_HOLD(pdm->pd_array, vret, local_pd_hndl, (VIP_array_obj_t *)&ul_pd, TRUE) { + if ((vret != VIP_OK) && (vret != VIP_EAGAIN)) { + MTL_ERROR1("THHUL_pdm_query_ud_av: ERROR (%d) : Could not find PD object\n", vret); + /* return invalid AV handle, because a PD error return is not acceptable here */ + /* IB Spec demands that destroy PD shall fail if it has any outstanding resources. */ + /* Essentially, then, we have allowed destroy PD anyway, so that the AV handle is */ + /* no loger valid. */ + return HH_EINVAL_AV_HNDL; + } + if (ul_pd->valid == FALSE) { + /* ignore if PD is in process of being created */ + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + continue; + } + /* try to modify the UDAVM */ + ret = THH_udavm_query_av(ul_pd->udavm,ah,av_p); + if (ret == HH_OK) { + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + found = TRUE; + break; + } else if (ret != HH_EINVAL_AV_HNDL) { + MTL_ERROR1("THHUL_pdm_query_ud_av: ERROR (%d) : invalid parameter\n", ret); + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + return ret; + } + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + } + + if (found == FALSE) + { + return HH_EINVAL_AV_HNDL; + } + return HH_OK; +} + +HH_ret_t THHUL_pdm_destroy_ud_av ( + HHUL_hca_hndl_t hca, + HHUL_ud_av_hndl_t ah ) +{ + HH_ret_t ret; + THHUL_pdm_t pdm; + THHUL_pd_t *ul_pd; + VIP_common_ret_t vret; + VIP_array_handle_t local_pd_hndl; + MT_bool found = FALSE; + + /* error checks */ + ret = THHUL_hob_get_pdm(hca, &pdm); + if (ret != HH_OK) { + MTL_ERROR1("THHUL_pdm_destroy_ud_av: ERROR (%d) : PDM object has not yet been created\n", ret); + return HH_EINVAL; + } + + if (pdm->priv_ud_av) { + MTL_ERROR1("THHUL_pdm_destroy_ud_av: non_privileged UDAVs not configured\n"); + return HH_EINVAL; + } + + /* find the associated PD handle for this AV */ + VIP_ARRAY_FOREACH_HOLD(pdm->pd_array, vret, local_pd_hndl, (VIP_array_obj_t *)&ul_pd, TRUE) { + if ((vret != VIP_OK) && (vret != VIP_EAGAIN)) { + MTL_ERROR1("THHUL_pdm_destroy_ud_av: ERROR (%d) : Could not find PD object\n", vret); + /* We were unable to find a PD to which this udav handle was registered. */ + /* Return invalid AV handle, because a PD error return is not acceptable here */ + return HH_EINVAL_AV_HNDL; + } + /* try to modify the UDAVM */ + if (ul_pd->valid == FALSE) { + /* ignore if PD is in process of being created */ + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + continue; + } + ret = THH_udavm_destroy_av(ul_pd->udavm,ah); + if (ret == HH_OK) { + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + found = TRUE; + break; + } else if (ret != HH_EINVAL_AV_HNDL) { + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + MTL_ERROR1("THHUL_pdm_destroy_ud_av: ERROR (%d) : invalid parameter\n", ret); + return ret; + } + if (vret == VIP_OK) {VIP_array_find_release(pdm->pd_array,local_pd_hndl);} + } + + if (found == TRUE){ + /* decrement udav reference count for the PD */ + VIP_array_find_release(pdm->pd_array,local_pd_hndl); + return HH_OK; + } else { + return HH_EINVAL_AV_HNDL; + } +} + +HH_ret_t THHUL_pdm_get_ud_av_memkey_sqp_ok( + /*IN*/ THHUL_pdm_t pdm, + /*IN*/ HHUL_pd_hndl_t hhul_pd, + /*OUT*/MT_bool *ok_for_sqp, + /*OUT*/ VAPI_lkey_t *ud_av_memkey_p, + /*OUT*/ char **av_ddr_base, + /*OUT*/ char **av_host_base +) +{ + /* sanity check */ + THHUL_pd_t *ul_pd; + VIP_common_ret_t vret; + +/* need to find pd table entry in pd array */ + if ((vret=VIP_array_find_hold(pdm->pd_array,(VIP_array_handle_t)hhul_pd, + (VIP_array_obj_t*) &ul_pd)) != VIP_OK) { + if (vret == VIP_EBUSY) { + MTL_DEBUG4("THHUL_pdm_get_ud_av_memkey: PD object is busy\n"); + return HH_EBUSY; + } else { + MTL_ERROR1("THHUL_pdm_get_ud_av_memkey: ERROR (%d) : Could not find PD object\n", vret); + return HH_EINVAL_PD_HNDL; + } + } + *ud_av_memkey_p = ((THHUL_pd_t *)(ul_pd))->lkey; + *av_ddr_base = ((THHUL_pd_t *)(ul_pd))->av_ddr_base; + *av_host_base = ((THHUL_pd_t *)(ul_pd))->av_host_base; + + /* is OK for special QP iff udav table is located in host memory */ +// *ok_for_sqp = ((THHUL_pd_t *)(ul_pd))->uadv_nonddr_table_alloc_size == 0 ? FALSE : TRUE; + *ok_for_sqp = TRUE; // AV copy in host memory makes it always ok + VIP_array_find_release(pdm->pd_array,(VIP_array_handle_t)hhul_pd); + return HH_OK; + +} /* THHUL_pdm_get_ud_av_memkey */ + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.h new file mode 100644 index 00000000..28e09ec2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm.h @@ -0,0 +1,130 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THHUL_PDM_H +#define H_THHUL_PDM_H + +#include +#include +#include +#include + +#define THHUL_PDM_MAX_UL_UDAV_PER_PD 256 + +DLL_API HH_ret_t THHUL_pdm_create ( + /*IN*/ THHUL_hob_t hob, + /*IN*/ MT_bool priv_ud_av, + /*OUT*/ THHUL_pdm_t *pdm_p +); + +DLL_API HH_ret_t THHUL_pdm_destroy (/*IN*/ THHUL_pdm_t pdm); + +DLL_API HH_ret_t THHUL_pdm_alloc_pd_prep ( + /*IN*/ HHUL_hca_hndl_t hca, + /*OUT*/ HHUL_pd_hndl_t *pd_p, + /*OUT*/ void/*THH_pd_ul_resources_t*/ *pd_ul_resources_p +); + + +DLL_API HH_ret_t THHUL_pdm_alloc_pd_done ( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_pd_hndl_t hhul_pd, + /*IN*/ HH_pd_hndl_t hh_pd, + /*IN*/ void/*THH_pd_ul_resources_t*/ *pd_ul_resources_p +); + +DLL_API HH_ret_t THHUL_pdm_free_pd_prep ( + /*IN*/HHUL_hca_hndl_t hca, + /*IN*/HHUL_pd_hndl_t hhul_pd, + /*IN*/MT_bool undo_flag +); + +DLL_API HH_ret_t THHUL_pdm_free_pd_done ( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_pd_hndl_t hhul_pd +); + + +DLL_API HH_ret_t THHUL_pdm_create_ud_av ( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_pd_hndl_t hhul_pd, + /*IN*/ VAPI_ud_av_t *av_p, + /*OUT*/ HHUL_ud_av_hndl_t *ah_p +); + + +DLL_API HH_ret_t THHUL_pdm_modify_ud_av ( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_ud_av_hndl_t ah, + /*IN*/ VAPI_ud_av_t *av_p +); + + +DLL_API HH_ret_t THHUL_pdm_query_ud_av ( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_ud_av_hndl_t ah, + /*OUT*/ VAPI_ud_av_t *av_p +); + + +DLL_API HH_ret_t THHUL_pdm_destroy_ud_av ( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_ud_av_hndl_t ah +); + + +DLL_API HH_ret_t THHUL_pdm_get_hh_pd( + /*IN*/ THHUL_pdm_t pdm, + /*IN*/ HHUL_pd_hndl_t hhul_pd, + /*OUT*/ HH_pd_hndl_t *hh_pd_p +); + + +DLL_API HH_ret_t THHUL_pdm_get_ud_av_memkey_sqp_ok( + /*IN*/ THHUL_pdm_t pdm, + /*IN*/ HHUL_pd_hndl_t hhul_pd, + /*OUT*/MT_bool *ok_for_sqp, + /*OUT*/ VAPI_lkey_t *ud_av_memkey_p, + /*OUT*/ char **av_ddr_base, + /*OUT*/ char **av_host_base +); + +DLL_API HH_ret_t THHUL_pdm_alloc_pd_avs_prep ( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ u_int32_t max_num_avs, + /*IN*/ HH_pdm_pd_flags_t pd_flags, + /*IN*/ HHUL_pd_hndl_t *pd_p, + /*OUT*/ void *pd_ul_resources_p +); + + +#endif /* H_THHUL_PDM_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm_priv.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm_priv.h new file mode 100644 index 00000000..2d14f093 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_pdm/thhul_pdm_priv.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THHUL_PDM_PRIV_H +#define H_THHUL_PDM_PRIV_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* number of tavor physical ports defined here */ +/* for use in checking port number provided in addr vector */ +#define THHUL_TAVOR_NUM_PORTS 2 + +/* allocate initially space for 16 PDs for user-space processes, and 256 for kernel-space */ +/* ----- OS-dependent implementation ----- */ +#ifndef MT_KERNEL +#define THHUL_PDM_INITIAL_NUM_PDS 16 +#else +#define THHUL_PDM_INITIAL_NUM_PDS 256 +#endif + +/* PD entry */ +typedef struct THHUL_pd_st { + HH_pd_hndl_t hh_pd_hndl; + THH_udavm_t udavm; + char *av_ddr_base; + char *av_host_base; + + MT_virt_addr_t udav_nonddr_table; + MT_virt_addr_t udav_nonddr_table_aligned; + MT_size_t uadv_nonddr_table_alloc_size; + VAPI_lkey_t lkey; + MT_bool valid; + HHUL_pd_hndl_t hhul_pd_hndl; + +} THHUL_pd_t; + +struct THHUL_pdm_st { + MT_bool priv_ud_av; + THH_ver_info_t version; + VIP_array_p_t pd_array; +} ; + +#endif /* H_THHUL_PDM_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.c new file mode 100644 index 00000000..0fd8eda2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.c @@ -0,0 +1,5103 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_THHUL_QPM_C + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* THH_qpm Pkey and GID table access for usage of the special QPs (mlx IB headers) */ +#ifdef MT_KERNEL +#include +#endif + +#if defined(MT_KERNEL) && defined(__LINUX__) +#include +#include +#include +#endif + +#include "thhul_qpm.h" + +#ifdef WIN32 +#include "hca_data.h" +#endif + +#include +MTPERF_NEW_SEGMENT(THH_uar_sendq_dbell,200); +MTPERF_NEW_SEGMENT(WQE_build_send,2000); +MTPERF_NEW_SEGMENT(SQ_WQE_copy,2000); + +#define USE_FAST_POST 1 + +#ifndef MT_KERNEL +/* instead of "ifdef"ing all over the code we define an empty macro */ +#define MOSAL_pci_phys_free_consistent(addr,sz) do {} while(0); +#endif + + +/* Limit kmalloc to 2 pages (if this fails, vmalloc will fail too) */ +#define WQ_KMALLOC_LIMIT (4*MOSAL_SYS_PAGE_SIZE) +#define SMALL_VMALLOC_AREA (1<<28) /* VMALLOC area of 256MB or less is considered a scarce resource */ + +#define LOG2_QP_HASH_TBL_SZ 8 +#define QP_HASH_TBL_SZ (1<<8) + +#define WQE_ALIGN_SHIFT 6 /* WQE address should be aligned to 64 Byte */ +#define WQE_SZ_MULTIPLE_SHIFT 4 /* WQE size must be 16 bytes multiple */ +/* WQE segments sizes */ +#define WQE_SEG_SZ_NEXT (sizeof(struct wqe_segment_next_st)/8) /* NEXT segment */ +#define WQE_SEG_SZ_CTRL (sizeof(struct wqe_segment_ctrl_send_st)/8) /* CTRL segment */ +#define WQE_SEG_SZ_RD (sizeof(struct wqe_segment_rd_st)/8) /* DATAGRAM:RD */ +#define WQE_SEG_SZ_UD (sizeof(struct wqe_segment_ud_st)/8) /* DATAGRAM:UD */ +#define WQE_SEG_SZ_RADDR (sizeof(struct wqe_segment_remote_address_st)/8) /* Remote address */ +#define WQE_SEG_SZ_ATOMIC (sizeof(struct wqe_segment_atomic_st)/8) /* Atomic */ +#define WQE_SEG_SZ_BIND (sizeof(struct wqe_segment_bind_st)/8) /* Bind */ +/* There is either BIND or RADDR+ATOMIC */ +#define WQE_SEG_SZ_BIND_RADDR_ATOMIC ((WQE_SEG_SZ_RADDR+WQE_SEG_SZ_ATOMIC) > WQE_SEG_SZ_BIND ? \ + (WQE_SEG_SZ_RADDR+WQE_SEG_SZ_ATOMIC) : WQE_SEG_SZ_BIND ) +#define WQE_SEG_SZ_SG_ENTRY (sizeof(struct wqe_segment_data_ptr_st)/8)/* Scatter/Gather entry(ptr)*/ +#define WQE_SEG_SZ_SG_ENTRY_DW (sizeof(struct wqe_segment_data_ptr_st)/32)/* (same in DWORDs) */ +/* INLINE segment for UD headers (SMI/GSI) */ +#define IB_RWH_SZ 4 +#define IB_ICRC_SZ 4 +#define WQE_INLINE_SZ_BCOUNT 4 +/* INLINE segment for UD headers (SMI/GSI) */ +#define WQE_INLINE_SZ_UD_HDR \ + MT_UP_ALIGNX_U32((WQE_INLINE_SZ_BCOUNT+IB_LRH_LEN+IB_GRH_LEN+IB_BTH_LEN+IB_DETH_LEN),4) +/* INLINE segment for RAW-Ethertype */ +#define WQE_INLINE_SZ_RAW_HDR \ + MT_UP_ALIGNX_U32((WQE_INLINE_SZ_BCOUNT+IB_LRH_LEN+IB_RWH_SZ),4) +#define WQE_INLINE_ICRC MT_UP_ALIGNX_U32(WQE_INLINE_SZ_BCOUNT+IB_ICRC_SZ,4) +#define MAX_WQE_SZ 1008 +#define BIND_WQE_SZ (WQE_SEG_SZ_NEXT+WQE_SEG_SZ_CTRL+WQE_SEG_SZ_BIND) + +#define MAX_ALLOC_RETRY 3 /* Maximum retries to get WQEs buffer which does not cross 4GB boundry */ + +#define IS_VALID_QPN(qpn) ((qpn) <= 0x00FFFFFF) +#define DEFAULT_PKEY 0xFFFF +#define QP1_PKEY_INDEX 0xFFFFFFFF + +#define RESERVED_MEMBIND_EECN 0 /* Pseudo EE-context reserved for memory binding processing */ + +/* Dpool in size granularity of 1KB */ +#define THHUL_DPOOL_SZ_MIN_KB 1 /* Minimum WQEs buffer of 1KB */ +#define THHUL_DPOOL_SZ_MAX_KB 64 /* Max. is 64KB */ +#define THHUL_DPOOL_SZ_UNIT_SHIFT 10 /* 1KB units shift */ +#define THHUL_DPOOL_GRANULARITY_SHIFT 10 /* 1KB garnularity - for alignment */ +#define THHUL_DPOOL_SZ_BASE_BUF_KB \ + (THHUL_DPOOL_SZ_MAX_KB*2) /* Size of buffer shared among dpools*/ + +/* Descriptors pool for small QPs */ +/* This data structure allows sharing of locked pages among QPs in order to reduce amount of + locked pages and assure they cover full pages (fork support) */ +typedef struct THHUL_qp_dpool_st { + MT_size_t buf_size_kb; /* Each buffer in the pool */ + void* free_buf_list; + unsigned long ref_cnt; /* When reached zero, may be freed */ + void* orig_buf; /* Pointer to allocated memory chunk */ + MT_size_t orig_size; + MT_bool used_virt_alloc; + struct THHUL_qp_dpool_st *prev; /* list of dpools of same size */ + struct THHUL_qp_dpool_st *next; /* list of dpools of same size */ +} THHUL_qpm_dpool_t; + +typedef struct { + VAPI_ud_av_t av; + MPGA_headers_t hdrs; +} special_qp_temp_t; + +#define CHIME_WORDS_PREFIX volatile +#define WQE_IO_WRITE MOSAL_MMAP_IO_WRITE_DWORD + +typedef struct { /* Queue resources context */ + MT_virt_addr_t wqe_buf; /* The buffer for this queue WQEs - aligned to WQE size */ + VAPI_wr_id_t *wqe_id; /* Array of max_outs entries for holding each WQE ID (WQE index based) */ + u_int32_t max_outs; /* Max. outstanding (number of WQEs in buffer) */ + u_int32_t cur_outs; /* Currently outstanding */ + u_int32_t max_sg_sz; /* Max. Scatter/Gather list size */ + MT_size_t log2_max_wqe_sz; /* WQE size is a power of 2 (software implementation requirement) */ + u_int32_t max_inline_data; /* For send queue only */ + u_int32_t next2post_index; /* Next WQE to use for posting (producer index)*/ + u_int32_t next2free_index; /* Next WQE to use free (consumer index) */ + volatile u_int32_t* last_posted_p; /* For WQE chain linkage (== NULL if none) */ + special_qp_temp_t *wqe_tmp; /* For av,headers in special QP Send */ + u_int32_t *wqe_draft; + /* Implementation note: + * Using the "wqe_draft" scratchpad is required since we may + * perform many read-modify-writes while packing the WQE fields and we + * have no idea on WQEs buffer location. In cases where the actual WQE is + * in the attached DDR memory, a direct WQE packing will increase the + * building latency since that memory is not cached and each "read-modify-write" + * would consume time as well as PCI bandwidth. + * So we build the WQE on the local stack and then copy it (along with the + * swapping to big-endian, if needed). + * Also note that this allows us to allocate the WQE on after all WQE is formatted, + * thus minimizing the QP (spin)locking time. + */ + VAPI_qp_state_t qp_state; /* User level assumed QP state */ + /* Implementation note: + * qp_state is held per queue in order to avoid race in qp_state updates + * which may result from polling different CQs for each queue. + * We would also like to keep the common THHUL_qp_t static during the life + * of the QP in order to avoid additional synchronization between send and + * receive queue + */ + MOSAL_spinlock_t q_lock; /* Protect concurrent usage of the queue */ +} queue_res_t; + +/* HHUL_qp_hndl_t is a pointer to this structure */ +typedef struct THHUL_qp_st { + VAPI_special_qp_t sqp_type; /* VAPI_REGULAR_QP for non-special QP */ + IB_ts_t ts_type; + IB_wqpn_t qpn; + HHUL_pd_hndl_t pd; + THH_uar_t uar; /* UAR to use for this QP */ + char *av_ddr_base; + char *av_host_base; + MT_bool is_priv_ud_av; /* Privileged UD AVs are enforced */ + VAPI_lkey_t ud_av_memkey; /* Memory key to put for UD AV handles */ + HHUL_cq_hndl_t sq_cq; + HHUL_cq_hndl_t rq_cq; + void* wqe_buf_orig; /* Pointer returned by qpm_malloc_within_4GB() for WQE buffer */ + MT_bool used_virt_alloc; /* Used "MOSAL_pci_virt_alloc_consistent" for buffer allocation */ + MT_size_t wqe_buf_orig_size; /* size in bytes of wqe_buf_orig */ + THHUL_qpm_dpool_t *dpool_p; /* If not NULL, wqe_buf_orig taken from this descriptors pool */ + queue_res_t sq_res; /* Send queue resources */ + queue_res_t rq_res; /* Receive queue resources */ + HHUL_srq_hndl_t srq; /* Set to HHUL_INVAL_SRQ_HNDL if not associated with a SRQ */ +} *THHUL_qp_t; + +#define QPM_USE_FIXED_QP_ARRAY 1 +#define TOTAL_QP_ARRAY_PER_QPM 2048 +#define QPM_QP_PER_ARRAY TOTAL_QP_ARRAY_PER_QPM/QP_HASH_TBL_SZ +#define QP_ARRAY_REUSE 0xFFFFFFFE /* reuse qp array */ +#define QP_ARRAY_UNUSED 0xFFFFFFFF /* first unused array */ + +typedef struct qp_array_st { + IB_wqpn_t qpn; + THHUL_qp_t qp; +} qp_array_t; + + +typedef struct qp_hash_entry_st { /* QPN-to-QP hash table entry */ + IB_wqpn_t qpn; + THHUL_qp_t qp; + struct qp_hash_entry_st *next; /* next in this hash bin */ +} qp_hash_entry_t; + + +typedef struct qp_array_entry_st { + qp_array_t qp_array[QPM_QP_PER_ARRAY+1]; /* set last one to QP_ARRAY_UNUSED*/ +}qp_array_entry_t; + +/* fixed array table is two dimensional. + * QP_ARRAY_UNUSED : next entry in array. + * QP_ARRAY_REUSE : Used previousely, but qp was destroyed. This can be recycled. + * ex: array_tbl[i]->[qpn][QP_ARRAY_REUSE(can be reused)][qpn][QP_ARRAY_UNUSED(next entry)]... + */ +struct THHUL_qpm_st { /* THHUL_qpm_t is a pointer to this */ + qp_hash_entry_t* hash_tbl[QP_HASH_TBL_SZ]; + qp_array_entry_t array_tbl[QP_HASH_TBL_SZ]; + u_int32_t qp_cnt; /* Total number of QPs */ + MOSAL_spinlock_t hash_lock; /* used for qp_cnt protection, too */ + THHUL_qpm_dpool_t *dpool_p[THHUL_DPOOL_SZ_MAX_KB - THHUL_DPOOL_SZ_MIN_KB + 1];/* KB garanularity */ +#ifdef THHUL_QPM_DEBUG_DPOOL + unsigned long dpool_cnt; +#endif + MOSAL_mutex_t dpool_lock; + THHUL_srqm_t srqm; +}; + +/********************************************************************************************** + * Private functions protoypes declarations + **********************************************************************************************/ +static HH_ret_t qp_prep( + HHUL_hca_hndl_t hca, + VAPI_special_qp_t qp_type, + HHUL_qp_init_attr_t *qp_init_attr_p, + HHUL_qp_hndl_t *qp_hndl_p, + VAPI_qp_cap_t *qp_cap_out_p, + THH_qp_ul_resources_t *qp_ul_resources_p, + MT_bool in_ddr_mem /* WQEs buffer allocated in attached DDR mem. or in main memory */ +); + +static HH_ret_t init_qp( + HHUL_hca_hndl_t hca, + HHUL_qp_init_attr_t *qp_init_attr_p, + THHUL_qp_t new_qp +); + +static HH_ret_t qpm_alloc_wqe_buf( + /*IN*/ THHUL_qpm_t qpm, + /*IN*/ MT_bool in_ddr_mem, /* Allocation of WQEs buffer is requested in attached DDR mem. */ + /*IN*/ u_int32_t max_outs_wqes, /* HCA cap. */ + /*IN*/ u_int32_t max_sg_ent, /* HCA cap. of max.s/g entries */ + /*IN/OUT*/ THHUL_qp_t new_qp, + /*OUT*/ THH_qp_ul_resources_t *qp_ul_resources_p +); + +static HH_ret_t qpm_alloc_aux_data_buf( + /*IN/OUT*/ THHUL_qp_t new_qp +); + +static HH_ret_t insert_to_hash(THHUL_qpm_t qpm, THHUL_qp_t qp); + +static HH_ret_t remove_from_hash(THHUL_qpm_t qpm, THHUL_qp_t qp); + +#ifndef __KERNEL__ +static void* dpool_alloc(THHUL_qpm_t qpm, u_int8_t buf_size_kb, THHUL_qpm_dpool_t **dpool_pp); + +static void dpool_free(THHUL_qpm_t qpm, THHUL_qpm_dpool_t *dpool_p, void* buf); + +#else +#define dpool_free(qpm,dpool_p,buf) \ + MTL_ERROR1(MT_FLFMT("%s: Invoked dpool_free in kernel by mistake"), __func__) + +#endif + +/********************************************************************************************** + * Private inline functions + **********************************************************************************************/ +/* Computer hash value (bin index) for given QP number */ +inline static u_int32_t get_hash_index(IB_wqpn_t qpn) +{ + return (qpn & MASK32(LOG2_QP_HASH_TBL_SZ)); +} + +inline static u_int32_t get_wqe_index( + /*IN*/ queue_res_t *q_res_p, + /*IN*/ u_int32_t wqe_addr_32lsb, + /*OUT*/ u_int32_t *wqe_index_p +) +{ + u_int32_t wqe_buf_base_32lsb; + + /* TBD: On QP resize this will have to be modified (buffers may change during QP life cycle) */ + + wqe_buf_base_32lsb= (u_int32_t)(q_res_p->wqe_buf); + if (wqe_addr_32lsb >= wqe_buf_base_32lsb) { /* Assure index computation is positive */ + *wqe_index_p= (wqe_addr_32lsb - wqe_buf_base_32lsb) >> q_res_p->log2_max_wqe_sz; + if (*wqe_index_p < q_res_p->max_outs) { /* WQE is within this queue */ + /* TBD: check if given wqe_addr_32lsb is aligned to WQE size */ + return HH_OK; + } + } + + return HH_EINVAL; /* WQE is not withing this queue */ +} + + + +static void dump_qp(qp_hash_entry_t *qp_p) +{ + MTL_ERROR1("==== dump of qpn=%d ====\n", qp_p->qp->qpn); + MTL_ERROR1("sqp_type=%s\n", VAPI_special_qp_sym(qp_p->qp->sqp_type)); + MTL_ERROR1("ts_type=%s\n", VAPI_ts_type_sym(qp_p->qp->ts_type)); + MTL_ERROR1("pd=%lu\n", qp_p->qp->pd); + MTL_ERROR1("uar=%p\n", qp_p->qp->uar); + MTL_ERROR1("is_priv_ud_av=%s\n", qp_p->qp->is_priv_ud_av ? "Yes" : "No"); + MTL_ERROR1("ud_av_memkey=0x%x\n", qp_p->qp->ud_av_memkey); + MTL_ERROR1("sq_cq=%p\n", qp_p->qp->sq_cq); + MTL_ERROR1("rq_cq=%p\n", qp_p->qp->rq_cq); + MTL_ERROR1("wqe_buf_orig=%p\n", qp_p->qp->wqe_buf_orig); + MTL_ERROR1("used_virt_alloc=%s\n", qp_p->qp->used_virt_alloc ? "Yes" : "No"); + MTL_ERROR1("wqe_buf_orig_size="SIZE_T_FMT"\n", qp_p->qp->wqe_buf_orig_size); + MTL_ERROR1("dpool_p=%p\n", qp_p->qp->dpool_p); +} + + +#if QPM_USE_FIXED_QP_ARRAY + +inline static HH_ret_t find_wqe_from_array( + /*IN*/ THHUL_qpm_t qpm, + /*IN*/ IB_wqpn_t qpn, + /*IN*/ u_int32_t wqe_addr_32lsb, + /*OUT*/ THHUL_qp_t *qp_p, + /*OUT*/ queue_res_t **q_res_pp, + /*OUT*/ u_int32_t *wqe_index_p, + /*OUT*/ VAPI_wr_id_t *wqe_id_p +) +{ + + u_int32_t hash_index= get_hash_index(qpn); + int i = 0; + qp_array_entry_t *qp_array_p = &qpm->array_tbl[hash_index]; + IB_wqpn_t m_qpn = qp_array_p->qp_array[i].qpn; + + + while(m_qpn != QP_ARRAY_UNUSED) + { + if(m_qpn == qpn) + { + THHUL_qp_t qp = *qp_p= qp_array_p->qp_array[i].qp; + + /* check if this WQE is of SQ */ + if ((*wqe_index_p = (wqe_addr_32lsb - (u_int32_t)qp->sq_res.wqe_buf) >> qp->sq_res.log2_max_wqe_sz) + < qp->sq_res.max_outs) + { + + *q_res_pp= &((*qp_p)->sq_res); + *wqe_id_p= (*q_res_pp)->wqe_id[*wqe_index_p]; + return HH_OK; + } + /* check if this WQE is of RQ */ + if ((*qp_p)->srq == HHUL_INVAL_SRQ_HNDL) { + if ((*wqe_index_p = (wqe_addr_32lsb - (u_int32_t)qp->rq_res.wqe_buf) >> qp->rq_res.log2_max_wqe_sz) + < qp->rq_res.max_outs) + { + *q_res_pp= &((*qp_p)->rq_res); + *wqe_id_p= (*q_res_pp)->wqe_id[*wqe_index_p]; + return HH_OK; + } + } else { /* From SRQ ? */ + HH_ret_t rc; + *q_res_pp= NULL; + rc= THHUL_srqm_comp(qpm->srqm, (*qp_p)->srq, wqe_addr_32lsb, wqe_id_p); + if (rc == HH_OK) { + return HH_OK; + } + } + } + m_qpn = qp_array_p->qp_array[++i].qpn; + } + + return HH_EINVAL; /* Invalid WQE address for this QP */ +} +#endif + +/* Find the queue context from the QP number and WQE address - using the hash table */ +#if 0 /*find_wqe */ +inline static HH_ret_t find_wqe( + /*IN*/ THHUL_qpm_t qpm, + /*IN*/ IB_wqpn_t qpn, + /*IN*/ u_int32_t wqe_addr_32lsb, + /*OUT*/ THHUL_qp_t *qp_p, + /*OUT*/ queue_res_t **q_res_pp, + /*OUT*/ u_int32_t *wqe_index_p, + /*OUT*/ VAPI_wr_id_t *wqe_id_p +) +{ + u_int32_t hash_index= get_hash_index(qpn); + qp_hash_entry_t *cur_entry; + HH_ret_t rc; + +#if QPM_USE_FIXED_QP_ARRAY + if(find_wqe_from_array(qpm,qpn,wqe_addr_32lsb,qp_p,q_res_pp,wqe_index_p,wqe_id_p) == HH_OK) + return HH_OK; +#endif + + MOSAL_spinlock_dpc_lock(&(qpm->hash_lock)); + for (cur_entry= qpm->hash_tbl[hash_index]; cur_entry != NULL; + cur_entry= cur_entry->next) { + if (cur_entry->qpn == qpn) break; + } + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + if (cur_entry == NULL) { + MTL_ERROR1(MT_FLFMT("%s(pid="MT_PID_FMT"): failed to find qpn=0x%x in the hash table"), + __func__, MOSAL_getpid(), qpn); + return HH_EINVAL_QP_NUM; /* not found */ + } + *qp_p= cur_entry->qp; + + /* check if this WQE is of SQ */ + *q_res_pp= &((*qp_p)->sq_res); + rc= get_wqe_index(*q_res_pp,wqe_addr_32lsb,wqe_index_p); + if (rc == HH_OK) { + *wqe_id_p= (*q_res_pp)->wqe_id[*wqe_index_p]; + return HH_OK; + } + + /* check if this WQE is of RQ */ + if ((*qp_p)->srq == HHUL_INVAL_SRQ_HNDL) { + *q_res_pp= &((*qp_p)->rq_res); + rc= get_wqe_index(*q_res_pp,wqe_addr_32lsb,wqe_index_p); + if (rc == HH_OK) { + *wqe_id_p= (*q_res_pp)->wqe_id[*wqe_index_p]; + return HH_OK; + } + } else { /* From SRQ ? */ + *q_res_pp= NULL; + rc= THHUL_srqm_comp(qpm->srqm, (*qp_p)->srq, wqe_addr_32lsb, wqe_id_p); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed to find WQE in SRQ (WQE=0x%X QPn=0x%X)"), __func__, + wqe_addr_32lsb, (*qp_p)->qpn); + } + } + + MTL_ERROR1(MT_FLFMT("%s(pid="MT_PID_FMT"): failed to find wqe"), __func__, MOSAL_getpid()); + dump_qp(cur_entry); + return rc; /* Invalid WQE address for this QP */ +} +#else /* find_wqe */ + +/* optimized version of find_wqe */ +inline static HH_ret_t find_wqe( + /*IN*/ THHUL_qpm_t qpm, + /*IN*/ IB_wqpn_t qpn, + /*IN*/ u_int32_t wqe_addr_32lsb, + /*OUT*/ THHUL_qp_t *qp_p, + /*OUT*/ queue_res_t **q_res_pp, + /*OUT*/ u_int32_t *wqe_index_p, + /*OUT*/ VAPI_wr_id_t *wqe_id_p +) +{ + u_int32_t hash_index; + qp_hash_entry_t *cur_entry; +#if QPM_USE_FIXED_QP_ARRAY + if(find_wqe_from_array(qpm,qpn,wqe_addr_32lsb,qp_p,q_res_pp,wqe_index_p,wqe_id_p) == HH_OK) + return HH_OK; +#endif + + hash_index = get_hash_index(qpn); + + MOSAL_spinlock_dpc_lock(&(qpm->hash_lock)); + for (cur_entry= qpm->hash_tbl[hash_index]; cur_entry != NULL; + cur_entry= cur_entry->next) + { + if (cur_entry->qpn == qpn) + { + THHUL_qp_t qp = *qp_p= cur_entry->qp; + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + /* check if this WQE is of SQ */ + if ((*wqe_index_p = (wqe_addr_32lsb - (u_int32_t)qp->sq_res.wqe_buf) >> qp->sq_res.log2_max_wqe_sz) + < qp->sq_res.max_outs) + { + *q_res_pp= &((*qp_p)->sq_res); + *wqe_id_p= (*q_res_pp)->wqe_id[*wqe_index_p]; + return HH_OK; + } + /* check if this WQE is of RQ */ + if ((*qp_p)->srq == HHUL_INVAL_SRQ_HNDL) + { + if ((*wqe_index_p = (wqe_addr_32lsb - (u_int32_t)qp->rq_res.wqe_buf) >> qp->rq_res.log2_max_wqe_sz) + < qp->rq_res.max_outs) + { + *q_res_pp= &((*qp_p)->rq_res); + *wqe_id_p= (*q_res_pp)->wqe_id[*wqe_index_p]; + return HH_OK; + } + } + else + { /* From SRQ ? */ + HH_ret_t rc; + *q_res_pp= NULL; + rc= THHUL_srqm_comp(qpm->srqm, (*qp_p)->srq, wqe_addr_32lsb, wqe_id_p); + if (rc != HH_OK) + { + MTL_ERROR2(MT_FLFMT("%s: Failed to find WQE in SRQ (WQE=0x%X QPn=0x%X)"), __func__, + wqe_addr_32lsb, (*qp_p)->qpn); + } + return rc; + } + } + } + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + + return HH_EINVAL; /* Invalid WQE address for this QP */ +} +#endif /*find_wqe */ + +#if 0 /* valid_2send, valid2recv */ + +inline static MT_bool is_qpstate_valid_2send(VAPI_qp_state_t cur_state) +{ + switch (cur_state) { + case VAPI_RTS: + case VAPI_SQD: + case VAPI_ERR: + case VAPI_SQE: return TRUE; + break; + default: return FALSE; + } + +} +inline static MT_bool is_qpstate_valid_2recv(VAPI_qp_state_t cur_state) +{ + switch (cur_state) { + case VAPI_INIT: + case VAPI_RTR: + case VAPI_RTS: + case VAPI_SQD: + case VAPI_ERR: + case VAPI_SQE: return TRUE; + break; + default: return FALSE; + } + +} +#else /* valid_2send, valid2recv */ + +inline static bool is_qpstate_valid_2send(VAPI_qp_state_t cur_state) +{ + if(MOSAL_EXPECT_FALSE(cur_state < VAPI_RTS)) + return FALSE; + return TRUE; +} + +inline static bool is_qpstate_valid_2recv(VAPI_qp_state_t cur_state) +{ + if(MOSAL_EXPECT_FALSE(cur_state < VAPI_INIT)) + return FALSE; + return TRUE; +} + +#endif /* valid_2send, valid2recv */ + +inline static tavor_if_nopcode_t encode_nopcode(VAPI_wr_opcode_t opcode) +{ + switch (opcode) { + case VAPI_RDMA_WRITE: + return TAVOR_IF_NOPCODE_RDMAW; + case VAPI_RDMA_WRITE_WITH_IMM: + return TAVOR_IF_NOPCODE_RDMAW_IMM; + case VAPI_SEND: + return TAVOR_IF_NOPCODE_SEND; + case VAPI_SEND_WITH_IMM: + return TAVOR_IF_NOPCODE_SEND_IMM; + case VAPI_RDMA_READ: + return TAVOR_IF_NOPCODE_RDMAR; + case VAPI_ATOMIC_CMP_AND_SWP: + return TAVOR_IF_NOPCODE_ATOM_CMPSWP; + case VAPI_ATOMIC_FETCH_AND_ADD: + return TAVOR_IF_NOPCODE_ATOM_FTCHADD; + default: + return TAVOR_IF_NOPCODE_NOP; + } +} + +/*********** WQE building functions ***********/ +inline u_int64_t translate_av(THHUL_qp_t qp, u_int64_t ah) +{ + return ah - (u_int64_t)(MT_ulong_ptr_t)qp->av_host_base + (u_int64_t)(MT_ulong_ptr_t)qp->av_ddr_base; +} + +/* Init a not-connected (invalid) "next" segment (i.e. NDS=0) */ +#if 0 /* qpm_WQE_init_next */ +inline static u_int32_t qpm_WQE_init_next(u_int32_t *wqe_buf) +{ + memset(wqe_buf,0,WQE_SEG_SZ_NEXT); + return WQE_SEG_SZ_NEXT; +} +#else /* qpm_WQE_init_next */ +/* Optimized qpm_WQE_init_next */ +inline static u_int32_t qpm_WQE_init_next(u_int32_t *wqe_buf) +{ + /* WQE_SEG_SZ_NEXT = 8bytes, so write 64bit zero to address */ + *(u_int64_t *)wqe_buf = 0; + + return WQE_SEG_SZ_NEXT; +} +#endif + +inline static u_int32_t qpm_WQE_pack_send_next(u_int32_t *segment_p, + tavor_if_nopcode_t nopcode, MT_bool fence, u_int32_t dbd, + u_int32_t next_wqe_32lsb, u_int32_t wqe_sz_16B_chunks, + IB_eecn_t eecn) +{ + memset(segment_p,0,WQE_SEG_SZ_NEXT); /* Clear all "RESERVED" */ + segment_p[MT_BYTE_OFFSET(wqe_segment_next_st,nda_31_6)>>2]= next_wqe_32lsb & (~MASK32(6)); + MT_INSERT_ARRAY32(segment_p,nopcode, + MT_BIT_OFFSET(wqe_segment_next_st,nopcode),MT_BIT_SIZE(wqe_segment_next_st,nopcode)); + MT_INSERT_ARRAY32(segment_p,fence ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_next_st,f),MT_BIT_SIZE(wqe_segment_next_st,f)); + MT_INSERT_ARRAY32(segment_p,dbd, + MT_BIT_OFFSET(wqe_segment_next_st,dbd),MT_BIT_SIZE(wqe_segment_next_st,dbd)); + MT_INSERT_ARRAY32(segment_p,wqe_sz_16B_chunks, + MT_BIT_OFFSET(wqe_segment_next_st,nds),MT_BIT_SIZE(wqe_segment_next_st,nds)); + MT_INSERT_ARRAY32(segment_p,eecn, + MT_BIT_OFFSET(wqe_segment_next_st,nee),MT_BIT_SIZE(wqe_segment_next_st,nee)); + return WQE_SEG_SZ_NEXT; +} + +// u_int32_t offsets within wqe_segment_next_st structure +#define NEXT_ST_NDA_31_6_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_next_st,nda_31_6)>>2 +#define NEXT_ST_NDS_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_next_st,nds)>>2 + + // bit offsets within the given u_int32_t within wqe_segment_next_st +#define BIT_MASK_FOR_NEXT_WQE_31SB (~MASK32(6)) +#define NEXT_ST_NDS_BIT_OFFSET (MT_BIT_OFFSET(wqe_segment_next_st,nds) & 0x1f) +#define NEXT_ST_DBD_BIT_OFFSET (MT_BIT_OFFSET(wqe_segment_next_st,dbd) & 0x1f) +#define NEXT_ST_F_BIT_OFFSET (MT_BIT_OFFSET(wqe_segment_next_st,f) & 0x1f) +#define NEXT_ST_NEE_BIT_OFFSET (MT_BIT_OFFSET(wqe_segment_next_st,nee) & 0x1f) +/* Converted into Big Endian version */ +inline static u_int32_t WQE_pack_send_next_be(u_int32_t *segment_p, + tavor_if_nopcode_t nopcode, MT_bool fence, u_int32_t dbd, + u_int32_t next_wqe_32lsb, u_int32_t wqe_sz_16B_chunks, + IB_eecn_t eecn) +{ + segment_p[NEXT_ST_NDA_31_6_DWORD_OFFSET] = MOSAL_cpu_to_be32(0 + | (u_int32_t)nopcode + | (next_wqe_32lsb & BIT_MASK_FOR_NEXT_WQE_31SB )); + + + segment_p[NEXT_ST_NDS_DWORD_OFFSET] = MOSAL_cpu_to_be32(0 + | (wqe_sz_16B_chunks << NEXT_ST_NDS_BIT_OFFSET ) // specify in 16 byte chunks + | (fence << NEXT_ST_F_BIT_OFFSET ) + | (dbd << NEXT_ST_DBD_BIT_OFFSET) + | (eecn << NEXT_ST_NEE_BIT_OFFSET) + ); + return WQE_SEG_SZ_NEXT; +} + +/* Pack Control segment (for sends) */ +inline static u_int32_t WQE_pack_ctrl_send(u_int32_t *segment_p, + VAPI_comp_type_t comp_type, MT_bool se_bit, u_int32_t event_bit, + u_int32_t imm_data) +{ + memset(segment_p,0,WQE_SEG_SZ_CTRL); /* Clear all "RESERVED" */ + MT_INSERT_ARRAY32(segment_p,1, + MT_BIT_OFFSET(wqe_segment_ctrl_send_st,always1),MT_BIT_SIZE(wqe_segment_ctrl_send_st,always1)); + MT_INSERT_ARRAY32(segment_p,(comp_type == VAPI_SIGNALED) ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_send_st,c),MT_BIT_SIZE(wqe_segment_ctrl_send_st,c)); + MT_INSERT_ARRAY32(segment_p,se_bit ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_send_st,s),MT_BIT_SIZE(wqe_segment_ctrl_send_st,s)); + MT_INSERT_ARRAY32(segment_p,event_bit, + MT_BIT_OFFSET(wqe_segment_ctrl_send_st,e),MT_BIT_SIZE(wqe_segment_ctrl_send_st,e)); + segment_p[MT_BYTE_OFFSET(wqe_segment_ctrl_send_st,immediate)>>2]= imm_data; + return WQE_SEG_SZ_CTRL; +} + +/* Optimized vwerion of WQE_pack_ctrl_send + * remove memset and pre-calculate offsets + */ +#define CTRL_SEND_IMMEDIATE_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ctrl_send_st,immediate)>>2 +#define CTRL_SEND_RESERVED0_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ctrl_send_st,reserved0)>>2 +#define CTRL_SEND_ALWAYS_BIT_OFFSET MT_BIT_OFFSET(wqe_segment_ctrl_send_st,always1) +#define CTRL_SEND_S_BIT_OFFSET MT_BIT_OFFSET(wqe_segment_ctrl_send_st,s) +#define CTRL_SEND_C_BIT_OFFSET MT_BIT_OFFSET(wqe_segment_ctrl_send_st,c) +#define CTRL_SEND_E_BIT_OFFSET MT_BIT_OFFSET(wqe_segment_ctrl_send_st,e) + +inline static u_int32_t WQE_pack_ctrl_send_be(u_int32_t *segment_p, + VAPI_comp_type_t comp_type, MT_bool se_bit, u_int32_t event_bit, + u_int32_t imm_data) +{ + + u_int32_t *cur_loc_p = segment_p; + segment_p[CTRL_SEND_RESERVED0_DWORD_OFFSET] = 0; + WQE_IO_WRITE(&cur_loc_p[0], MOSAL_cpu_to_be32(0 + | (1 << CTRL_SEND_ALWAYS_BIT_OFFSET ) // this bit must be on + | ((u_int32_t)(se_bit & 1) << CTRL_SEND_S_BIT_OFFSET ) // solicited event bit + | (0 << CTRL_SEND_E_BIT_OFFSET ) // event bit is always zero right now + | ((u_int32_t)((comp_type + 1) & 1) << CTRL_SEND_C_BIT_OFFSET ) + )); + + WQE_IO_WRITE(&cur_loc_p[CTRL_SEND_IMMEDIATE_DWORD_OFFSET], imm_data); + + return WQE_SEG_SZ_CTRL; +} + +inline static u_int32_t WQE_pack_ud(u_int32_t *segment_p, + VAPI_lkey_t ud_av_memkey, u_int64_t ah, + IB_wqpn_t destination_qp, IB_qkey_t q_key) +{ + memset(segment_p,0,WQE_SEG_SZ_UD); /* Clear all "RESERVED" */ + segment_p[MT_BYTE_OFFSET(wqe_segment_ud_st,l_key)>>2]= ud_av_memkey; + segment_p[MT_BYTE_OFFSET(wqe_segment_ud_st,av_address_63_32)>>2]= (u_int32_t)(ah>>32); + segment_p[MT_BYTE_OFFSET(wqe_segment_ud_st,av_address_31_5)>>2]= ((u_int32_t)ah & (~MASK32(5)) ); + MT_INSERT_ARRAY32(segment_p,destination_qp, + MT_BIT_OFFSET(wqe_segment_ud_st,destination_qp), + MT_BIT_SIZE(wqe_segment_ud_st,destination_qp)); + segment_p[MT_BYTE_OFFSET(wqe_segment_ud_st,q_key)>>2]= q_key; + return WQE_SEG_SZ_UD; +} + +/* Optimized version of WQE_pack_ud + * remove memset and pre-calculate offset + */ +#define UD_ST_RESERV0_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,reserved0)>>2 +#define UD_ST_RESERV1_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,reserved1)>>2 +#define UD_ST_RESERV2_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,reserved2)>>2 +#define UD_ST_RESERV2_DWORD_OFFSET1 ((MT_BYTE_OFFSET(wqe_segment_ud_st,reserved2)>>2)+ 1) +#define UD_ST_RESERV2_DWORD_OFFSET2 ((MT_BYTE_OFFSET(wqe_segment_ud_st,reserved2)>>2)+ 2) +#define UD_ST_RESERV2_DWORD_OFFSET3 ((MT_BYTE_OFFSET(wqe_segment_ud_st,reserved2)>>2)+ 3) + +#define UD_ST_RESERV3_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,reserved3)>>2 +#define UD_ST_RESERV4_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,reserved4)>>2 +#define UD_ST_RESERV4_DWORD_OFFSET1 ((MT_BYTE_OFFSET(wqe_segment_ud_st,reserved4)>>2) + 1) + +#define UD_ST_LKEY_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,l_key)>>2 +#define UD_ST_ADDR_63_32_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,av_address_63_32)>>2 +#define UD_ST_ADDR_31_5_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,av_address_31_5)>>2 +#define UD_ST_DESTINATION_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,destination_qp)>>2 +#define UD_ST_QKEY_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ud_st,q_key)>>2 +#define UD_ST_AH_MASK (~MASK32(5)) + +/* Convert into Big Endian version */ +inline static u_int32_t WQE_pack_ud_be(u_int32_t *segment_p, + VAPI_lkey_t ud_av_memkey, u_int64_t ah, + IB_wqpn_t destination_qp, IB_qkey_t q_key) +{ + /* Clear all "RESERVED" */ + /* zero out reserved fields, look at wqe_segment_ud_st in MT23108_PRM_append.h */ + segment_p[UD_ST_RESERV0_DWORD_OFFSET] = 0; + segment_p[UD_ST_RESERV1_DWORD_OFFSET] = 0; + segment_p[UD_ST_RESERV2_DWORD_OFFSET] = 0; + segment_p[UD_ST_RESERV2_DWORD_OFFSET1] = 0; + segment_p[UD_ST_RESERV2_DWORD_OFFSET2] = 0; + segment_p[UD_ST_RESERV2_DWORD_OFFSET3] = 0; + + segment_p[UD_ST_RESERV4_DWORD_OFFSET] = 0; + segment_p[UD_ST_RESERV4_DWORD_OFFSET1] = 0; + + segment_p[UD_ST_LKEY_DWORD_OFFSET]= MOSAL_cpu_to_be32(ud_av_memkey); + segment_p[UD_ST_ADDR_63_32_DWORD_OFFSET]= MOSAL_cpu_to_be32((u_int32_t)(ah>>32)); + segment_p[UD_ST_ADDR_31_5_DWORD_OFFSET]= MOSAL_cpu_to_be32(((u_int32_t)ah & UD_ST_AH_MASK )); +#ifdef WIN32 + segment_p[UD_ST_DESTINATION_DWORD_OFFSET] = destination_qp; + segment_p[UD_ST_QKEY_DWORD_OFFSET]= q_key; +#else + segment_p[UD_ST_DESTINATION_DWORD_OFFSET] = MOSAL_cpu_to_be32(destination_qp); + segment_p[UD_ST_QKEY_DWORD_OFFSET]= MOSAL_cpu_to_be32(q_key); +#endif + return WQE_SEG_SZ_UD; +} + +inline static u_int32_t WQE_pack_rd(u_int32_t *segment_p, + IB_wqpn_t destination_qp, IB_qkey_t q_key) +{ + memset(segment_p,0,WQE_SEG_SZ_RD); /* Clear all "RESERVED" */ + MT_INSERT_ARRAY32(segment_p,destination_qp, + MT_BIT_OFFSET(wqe_segment_rd_st,destination_qp), + MT_BIT_SIZE(wqe_segment_rd_st,destination_qp)); + segment_p[MT_BYTE_OFFSET(wqe_segment_rd_st,q_key)>>2]= q_key; + return WQE_SEG_SZ_RD; +} + +/* Optimized version + * remove memset and pre-calculate offset + */ +#define RD_ST_RESERV0_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_rd_st,reserved0)>>2 +#define RD_ST_RESERV1_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_rd_st,reserved1)>>2 +#define RD_ST_RESERV1_DWORD_OFFSET1 ((MT_BYTE_OFFSET(wqe_segment_rd_st,reserved1)>>2)+1) + +#define RD_ST_DESTINATION_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_rd_st,destination_qp)>>2 +#define RD_ST_QKEY_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_rd_st,q_key)>>2 + +/* Convert into Big Endian version */ +inline static u_int32_t WQE_pack_rd_be(u_int32_t *segment_p, + IB_wqpn_t destination_qp, IB_qkey_t q_key) +{ + segment_p[RD_ST_RESERV1_DWORD_OFFSET] = 0; + segment_p[RD_ST_RESERV1_DWORD_OFFSET1] = 0; + + segment_p[RD_ST_DESTINATION_DWORD_OFFSET]= MOSAL_cpu_to_be32(destination_qp); + segment_p[RD_ST_QKEY_DWORD_OFFSET]= MOSAL_cpu_to_be32(q_key); + + return WQE_SEG_SZ_RD; +} + + +inline static u_int32_t WQE_pack_remote_addr(u_int32_t *segment_p, + IB_virt_addr_t remote_addr, IB_rkey_t remote_rkey) +{ + memset(segment_p,0,WQE_SEG_SZ_RADDR); /* Clear all "RESERVED" */ + segment_p[MT_BYTE_OFFSET(wqe_segment_remote_address_st,remote_virt_addr_h)>>2]= + (u_int32_t)(remote_addr >> 32); + segment_p[MT_BYTE_OFFSET(wqe_segment_remote_address_st,remote_virt_addr_l)>>2]= + (u_int32_t)(remote_addr & 0xFFFFFFFF); + segment_p[MT_BYTE_OFFSET(wqe_segment_remote_address_st,rkey)>>2]= remote_rkey; + return WQE_SEG_SZ_RADDR; +} + + +/* Optimized version of WQE_pack_remote_addr + * remove memset and pre-calculaute offset + */ +#define REMOTE_ADR_ST_RESERV0_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_remote_address_st,reserved0)>>2 +#define REMOTE_ADR_VIRT_ADDR_H_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_remote_address_st,remote_virt_addr_h)>>2 +#define REMOTE_ADR_VIRT_ADDR_L_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_remote_address_st,remote_virt_addr_l)>>2 +#define REMOTE_ADR_RKEY_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_remote_address_st,rkey)>>2 + +/* Convert into Big Endian version */ +inline static u_int32_t WQE_pack_remote_addr_be(u_int32_t *segment_p, + IB_virt_addr_t remote_addr, IB_rkey_t remote_rkey) +{ + segment_p[REMOTE_ADR_ST_RESERV0_DWORD_OFFSET] = 0; + + segment_p[REMOTE_ADR_VIRT_ADDR_H_DWORD_OFFSET]= + MOSAL_cpu_to_be32((u_int32_t)(remote_addr >> 32)); + segment_p[REMOTE_ADR_VIRT_ADDR_L_DWORD_OFFSET]= + MOSAL_cpu_to_be32((u_int32_t)(remote_addr)); + segment_p[REMOTE_ADR_RKEY_DWORD_OFFSET]= MOSAL_cpu_to_be32(remote_rkey); + + return WQE_SEG_SZ_RADDR; +} +/* this is same as WQE_pack_remote_addr but return number of DWORD(32bits) + * written, instead bytes. + */ +inline static u_int32_t WQE_pack_remote_addr_req2(u_int32_t *segment_p, + IB_virt_addr_t remote_addr, IB_rkey_t remote_rkey) +{ +#define WQE_SEG_SZ_RADDR_DWORD WQE_SEG_SZ_RADDR>>2 + segment_p[REMOTE_ADR_ST_RESERV0_DWORD_OFFSET] = 0; + + segment_p[REMOTE_ADR_VIRT_ADDR_H_DWORD_OFFSET]= + MOSAL_cpu_to_be32((u_int32_t)(remote_addr >> 32)); + segment_p[REMOTE_ADR_VIRT_ADDR_L_DWORD_OFFSET]= + MOSAL_cpu_to_be32((u_int32_t)(remote_addr)); + segment_p[REMOTE_ADR_RKEY_DWORD_OFFSET]= MOSAL_cpu_to_be32(remote_rkey); + + return WQE_SEG_SZ_RADDR_DWORD; +} + +inline static u_int32_t qpm_WQE_pack_recv_next(u_int32_t *segment_p, + u_int32_t next_wqe_32lsb, u_int32_t wqe_sz_16B_chunks) +{ + memset(segment_p,0,WQE_SEG_SZ_NEXT); /* Clear all "RESERVED" */ + segment_p[MT_BYTE_OFFSET(wqe_segment_next_st,nda_31_6)>>2]= ( next_wqe_32lsb & (~MASK32(6)) ) + | 1 ; /* LS-bit is set to work around bug #16159/16160/16161 */; + MT_INSERT_ARRAY32(segment_p,1, /* DBD always '1 for RQ */ + MT_BIT_OFFSET(wqe_segment_next_st,dbd),MT_BIT_SIZE(wqe_segment_next_st,dbd)); + MT_INSERT_ARRAY32(segment_p,wqe_sz_16B_chunks, + MT_BIT_OFFSET(wqe_segment_next_st,nds),MT_BIT_SIZE(wqe_segment_next_st,nds)); + return WQE_SEG_SZ_NEXT; +} + +/* Optimized Version */ +/* remove memset */ +/* pre calculation for WQE_pack_recv_next */ +#define NEXT_ST_NDA_31_6_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_next_st,nda_31_6)>>2 +#define NEXT_ST_NDS_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_next_st,nds)>>2 + +#define BIT_MASK_FOR_NEXT_WQE_31SB (~MASK32(6)) + +inline static u_int32_t WQE_pack_recv_next_be(u_int32_t *segment_p, + u_int32_t next_wqe_32lsb, u_int32_t wqe_sz_16B_chunks) +{ + + segment_p[NEXT_ST_NDA_31_6_DWORD_OFFSET] = MOSAL_cpu_to_be32(0 + | (next_wqe_32lsb & BIT_MASK_FOR_NEXT_WQE_31SB )); + segment_p[NEXT_ST_NDS_DWORD_OFFSET] = MOSAL_cpu_to_be32(0 + | (wqe_sz_16B_chunks << NEXT_ST_NDS_BIT_OFFSET ) // specify in 16 byte chunks + | (1 << NEXT_ST_DBD_BIT_OFFSET) + ); + + return WQE_SEG_SZ_NEXT; +} + +inline static u_int32_t WQE_pack_atomic_cmpswp(u_int32_t *segment_p, + u_int64_t cmp_data, u_int64_t swap_data) +{ + segment_p[MT_BYTE_OFFSET(wqe_segment_atomic_st,swap_add_h)>>2]= (u_int32_t)(swap_data >> 32); + segment_p[MT_BYTE_OFFSET(wqe_segment_atomic_st,swap_add_l)>>2]= (u_int32_t)(swap_data & 0xFFFFFFFF); + segment_p[MT_BYTE_OFFSET(wqe_segment_atomic_st,compare_h)>>2]= (u_int32_t)(cmp_data >> 32); + segment_p[MT_BYTE_OFFSET(wqe_segment_atomic_st,compare_l)>>2]= (u_int32_t)(cmp_data & 0xFFFFFFFF); + return WQE_SEG_SZ_ATOMIC; +} + +#define ATOMIC_ST_SWAP_ADDR_H_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_atomic_st,swap_add_h)>>2 +#define ATOMIC_ST_SWAP_ADDR_L_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_atomic_st,swap_add_l)>>2 +#define ATOMIC_ST_COMPARE_H_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_atomic_st,compare_h)>>2 +#define ATOMIC_ST_CPMPARE_L_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_atomic_st,compare_l)>>2 +/* Convert into Big Endian version */ +inline static u_int32_t WQE_pack_atomic_cmpswp_be(u_int32_t *segment_p, + u_int64_t cmp_data, u_int64_t swap_data) +{ + segment_p[ATOMIC_ST_SWAP_ADDR_H_DWORD_OFFSET]= MOSAL_cpu_to_be32((u_int32_t)(swap_data >> 32)); + segment_p[ATOMIC_ST_SWAP_ADDR_L_DWORD_OFFSET]= MOSAL_cpu_to_be32((u_int32_t)(swap_data )); + segment_p[ATOMIC_ST_COMPARE_H_DWORD_OFFSET]= MOSAL_cpu_to_be32((u_int32_t)(cmp_data >> 32)); + segment_p[ATOMIC_ST_CPMPARE_L_DWORD_OFFSET]= MOSAL_cpu_to_be32((u_int32_t)(cmp_data )); + + return WQE_SEG_SZ_ATOMIC; +} + +inline static u_int32_t WQE_pack_atomic_fetchadd(u_int32_t *segment_p,u_int64_t add_data) +{ + segment_p[MT_BYTE_OFFSET(wqe_segment_atomic_st,swap_add_h)>>2]= (u_int32_t)(add_data >> 32); + segment_p[MT_BYTE_OFFSET(wqe_segment_atomic_st,swap_add_l)>>2]= (u_int32_t)(add_data & 0xFFFFFFFF); + return WQE_SEG_SZ_ATOMIC; +} + +#define ATOMIC_ST_SWAP_ADDR_H_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_atomic_st,swap_add_h)>>2 +#define ATOMIC_ST_SWAP_ADDR_L_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_atomic_st,swap_add_l)>>2 +/* Convert into Big Endian version */ +inline static u_int32_t WQE_pack_atomic_fetchadd_be(u_int32_t *segment_p,u_int64_t add_data) +{ + segment_p[ATOMIC_ST_SWAP_ADDR_H_DWORD_OFFSET]= MOSAL_cpu_to_be32((u_int32_t)(add_data >> 32)); + segment_p[ATOMIC_ST_SWAP_ADDR_L_DWORD_OFFSET]= MOSAL_cpu_to_be32((u_int32_t)(add_data & 0xFFFFFFFF)); + + return WQE_SEG_SZ_ATOMIC; +} + +/* Build the scatter/gather list (pointer segments) */ +#define DATA_PTR_BYTE_COUNT_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_data_ptr_st,byte_count)>>2 +#define DATA_PTR_LKEY_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_data_ptr_st,l_key)>>2 +#define DATA_PTR_LOCAL_ADDR_H_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_data_ptr_st,local_address_h)>>2 +#define DATA_PTR_LOCAL_ADDR_L_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_data_ptr_st,local_address_l)>>2 +#define DATA_PTR_LEN_MASK MASK32(31) + +inline static u_int32_t WQE_pack_sg_list(u_int32_t *segment_p, + u_int32_t sg_lst_len,VAPI_sg_lst_entry_t *sg_lst_p) +{ + u_int32_t i; + u_int32_t *cur_loc_p= segment_p; + + for (i= 0; i < sg_lst_len; i++ , cur_loc_p+= WQE_SEG_SZ_SG_ENTRY_DW) { + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,byte_count)>>2]= + (sg_lst_p[i].len & MASK32(31)); + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,l_key)>>2]= sg_lst_p[i].lkey; + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,local_address_h)>>2]= + (u_int32_t)(sg_lst_p[i].addr >> 32); + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,local_address_l)>>2]= + (u_int32_t)(sg_lst_p[i].addr & 0xFFFFFFFF); + } + return (u_int32_t)(((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)segment_p)); +} + +/* Convert into Big Endian version */ +inline static u_int32_t WQE_pack_sg_list_be(u_int32_t *segment_p, + u_int32_t sg_lst_len,VAPI_sg_lst_entry_t *sg_lst_p) +{ + u_int32_t i; + u_int32_t *cur_loc_p= segment_p; + + for (i= 0; i < sg_lst_len; i++ , cur_loc_p+= WQE_SEG_SZ_SG_ENTRY_DW) { + cur_loc_p[DATA_PTR_BYTE_COUNT_DWORD_OFFSET]= + MOSAL_cpu_to_be32((sg_lst_p[i].len & DATA_PTR_LEN_MASK )); + cur_loc_p[DATA_PTR_LKEY_DWORD_OFFSET]= MOSAL_cpu_to_be32(sg_lst_p[i].lkey); + cur_loc_p[DATA_PTR_LOCAL_ADDR_H_DWORD_OFFSET]= + MOSAL_cpu_to_be32((u_int32_t)(sg_lst_p[i].addr >> 32)); + cur_loc_p[DATA_PTR_LOCAL_ADDR_L_DWORD_OFFSET]= + MOSAL_cpu_to_be32((u_int32_t)(sg_lst_p[i].addr)); + } + return (u_int32_t)(((MT_ulong_ptr_t)cur_loc_p) - ((MT_ulong_ptr_t)segment_p)); +} + +/* Build the WQE in given wqe_buf. + * Return WQE size. + */ +inline static u_int32_t WQE_build_send( + THHUL_qp_t qp, + VAPI_sr_desc_t *send_req_p, + u_int32_t *wqe_buf) +{ + u_int8_t *cur_loc_p= (u_int8_t*)wqe_buf; /* Current location in the WQE */ + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_send((u_int32_t*)cur_loc_p, /* Pack Control segment */ + send_req_p->comp_type, send_req_p->set_se, 0/*event bit*/, + ((send_req_p->opcode == VAPI_RDMA_WRITE_WITH_IMM) || + (send_req_p->opcode == VAPI_SEND_WITH_IMM) ) ? send_req_p->imm_data : 0); + + /* Transport type checks: Datagram segment */ + switch (qp->ts_type) { + case VAPI_TS_UD: /* Check if UD (UD datagram segment) */ + cur_loc_p+= WQE_pack_ud((u_int32_t*)cur_loc_p, + qp->ud_av_memkey,translate_av(qp, (u_int64_t)send_req_p->remote_ah), + send_req_p->remote_qp,send_req_p->remote_qkey); + break; + case VAPI_TS_RD: /* Check if RD (RD datagram segment) */ + cur_loc_p+= WQE_pack_rd((u_int32_t*)cur_loc_p, + send_req_p->remote_qp,send_req_p->remote_qkey); + break; + default: + break; + } + + /* Opcode checks Remote-address/Atomic segments */ + switch (send_req_p->opcode) { + /* For RDMA operations: only Remote-address segment */ + case VAPI_RDMA_READ: + case VAPI_RDMA_WRITE: + case VAPI_RDMA_WRITE_WITH_IMM: + cur_loc_p+= WQE_pack_remote_addr((u_int32_t*)cur_loc_p, + send_req_p->remote_addr,send_req_p->r_key); + break; + + /* Check if Atomic operations (both remote-address and Atomic segments) */ + case VAPI_ATOMIC_CMP_AND_SWP: + cur_loc_p+= WQE_pack_remote_addr((u_int32_t*)cur_loc_p,send_req_p->remote_addr, + send_req_p->r_key); + cur_loc_p+= WQE_pack_atomic_cmpswp((u_int32_t*)cur_loc_p,send_req_p->compare_add, + send_req_p->swap); + break; + case VAPI_ATOMIC_FETCH_AND_ADD: + cur_loc_p+= WQE_pack_remote_addr((u_int32_t*)cur_loc_p,send_req_p->remote_addr, + send_req_p->r_key); + cur_loc_p+= WQE_pack_atomic_fetchadd((u_int32_t*)cur_loc_p,send_req_p->compare_add); + break; + default: /*NOP*/ + break; + } + + /* Pack scatter/gather list segments */ + cur_loc_p+= WQE_pack_sg_list((u_int32_t*)cur_loc_p,send_req_p->sg_lst_len,send_req_p->sg_lst_p); + + return (u_int32_t)(((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)wqe_buf)); +} + + +/* Build Big Endian version of WQE_build_send + * to remove extra copy from wqe_draft to wqe_buf. + */ +inline static u_int32_t WQE_build_send_be( + THHUL_qp_t qp, + VAPI_sr_desc_t *send_req_p, + u_int32_t *wqe_buf) +{ + u_int8_t *cur_loc_p= (u_int8_t*)wqe_buf; /* Current location in the WQE */ + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_send_be((u_int32_t*)cur_loc_p, /* Pack Control segment */ + send_req_p->comp_type, send_req_p->set_se, 0/*event bit*/, + ((send_req_p->opcode == VAPI_RDMA_WRITE_WITH_IMM) || + (send_req_p->opcode == VAPI_SEND_WITH_IMM) ) ? send_req_p->imm_data : 0); + + /* Transport type checks: Datagram segment */ + switch (qp->ts_type) { + case VAPI_TS_UD: /* Check if UD (UD datagram segment) */ + cur_loc_p+= WQE_pack_ud_be((u_int32_t*)cur_loc_p, + qp->ud_av_memkey,translate_av(qp, (u_int64_t)send_req_p->remote_ah), + send_req_p->remote_qp,send_req_p->remote_qkey); + break; + case VAPI_TS_RD: /* Check if RD (RD datagram segment) */ + cur_loc_p+= WQE_pack_rd_be((u_int32_t*)cur_loc_p, + send_req_p->remote_qp,send_req_p->remote_qkey); + break; + default: + break; + } + + /* Opcode checks Remote-address/Atomic segments */ + switch (send_req_p->opcode) { + /* For RDMA operations: only Remote-address segment */ + case VAPI_RDMA_READ: + case VAPI_RDMA_WRITE: + case VAPI_RDMA_WRITE_WITH_IMM: + cur_loc_p+= WQE_pack_remote_addr_be((u_int32_t*)cur_loc_p, + send_req_p->remote_addr,send_req_p->r_key); + break; + + /* Check if Atomic operations (both remote-address and Atomic segments) */ + case VAPI_ATOMIC_CMP_AND_SWP: + cur_loc_p+= WQE_pack_remote_addr_be((u_int32_t*)cur_loc_p,send_req_p->remote_addr, + send_req_p->r_key); + cur_loc_p+= WQE_pack_atomic_cmpswp_be((u_int32_t*)cur_loc_p,send_req_p->compare_add, + send_req_p->swap); + break; + case VAPI_ATOMIC_FETCH_AND_ADD: + cur_loc_p+= WQE_pack_remote_addr_be((u_int32_t*)cur_loc_p,send_req_p->remote_addr, + send_req_p->r_key); + cur_loc_p+= WQE_pack_atomic_fetchadd_be((u_int32_t*)cur_loc_p,send_req_p->compare_add); + break; + default: /*NOP*/ + break; + } + + /* Pack scatter/gather list segments */ + if(MOSAL_EXPECT_FALSE(send_req_p->sg_lst_len == 0 || send_req_p->sg_lst_p->len == 0)) + return (u_int32_t)(((MT_ulong_ptr_t)cur_loc_p) - ((MT_ulong_ptr_t)wqe_buf)); + + cur_loc_p+= WQE_pack_sg_list_be((u_int32_t*)cur_loc_p,send_req_p->sg_lst_len,send_req_p->sg_lst_p); + + return (u_int32_t)(((MT_ulong_ptr_t)cur_loc_p) - ((MT_ulong_ptr_t)wqe_buf)); +} + + + +/* This is optimized version of WQE_build_send + * This fcuntion can eliminate extra code because req2 + * ony support ReliableConnection and UnrliableDatagram. + * sg_list and remote_addr build is done seperate fcuntion, + * which use IbAccess structure directly. See thhul_qpm_iba.h + */ +inline static u_int32_t* WQE_build_send_be_req2( + THHUL_qp_t qp, + u_int32_t *wqe_buf, + VAPI_comp_type_t comp_type, + u_int64_t remote_ah, + IB_wqpn_t remote_qp, + IB_qkey_t remote_qkey, + MT_bool set_se, + u_int32_t imm_data + ) +{ + u_int8_t *cur_loc_p= (u_int8_t*)wqe_buf; /* Current location in the WQE */ + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_send_be((u_int32_t*)cur_loc_p, /* Pack Control segment */ + comp_type, set_se, 0/*event bit*/, + imm_data); + + /* Transport type checks: Datagram segment */ + /* Req2 suport only ReliableConnection and UnrliableDatagram */ + if(MOSAL_EXPECT_FALSE(qp->ts_type == VAPI_TS_UD)) + { + /* Check if UD (UD datagram segment) */ + cur_loc_p+= WQE_pack_ud_be((u_int32_t*)cur_loc_p, + qp->ud_av_memkey,translate_av(qp, (u_int64_t)remote_ah), + remote_qp,remote_qkey); + } + return (u_int32_t*)cur_loc_p; +} + + + +/* This is optimized version of WQE_build_send + * This function can eliminate extra code because req3 + * ony support ReliableConnection and UnrliableDatagram. + * sg_list and remote_addr build is done seperate fcuntion, + * which use IBAL structure directly. See thhul_qpm_ibal.h + */ +inline static u_int32_t* WQE_build_send_be_req3( + THHUL_qp_t qp, + u_int32_t *wqe_buf, + VAPI_comp_type_t comp_type, + ib_send_wr_t *p_wr, + MT_bool set_se, + u_int32_t imm_data + ) +{ + u_int8_t *cur_loc_p= (u_int8_t*)wqe_buf; /* Current location in the WQE */ + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_send_be((u_int32_t*)cur_loc_p, /* Pack Control segment */ + comp_type, set_se, 0/*event bit*/, + imm_data); + + /* Transport type checks: Datagram segment */ + /* Req3 suport only ReliableConnection and UnrliableDatagram */ + if(MOSAL_EXPECT_FALSE(qp->ts_type == VAPI_TS_UD)) + { + /* Check if UD (UD datagram segment) */ + cur_loc_p+= WQE_pack_ud_be((u_int32_t*)cur_loc_p, + qp->ud_av_memkey, + translate_av(qp, (u_int64_t)p_wr->dgrm.ud.h_av->h_av), + p_wr->dgrm.ud.remote_qp, p_wr->dgrm.ud.remote_qkey); + } + return (u_int32_t*)cur_loc_p; +} + + +/* Build UD header as inline data for management QPs over MLX "transport" */ +inline static u_int32_t WQE_pack_mlx_ud_header(u_int32_t *segment_p, + THHUL_qp_t qp, VAPI_sr_desc_t *send_req_p, VAPI_ud_av_t *av_p, HH_hca_hndl_t hh_hndl, + VAPI_pkey_ix_t pkey_index /* take this index instead of QP's, if not QP1_PKEY_INDEX */) +{ + MPGA_headers_t *hdrs; + IB_LRH_st *LRH_p; + IB_BTH_st *BTH_p; + IB_DETH_st *DETH_p; + u_int8_t *hdrs_buf_p; +#ifdef MT_LITTLE_ENDIAN + u_int32_t *hdrs_buf32_p; /* pointer for endiness swapping */ + u_int16_t i; +#endif + u_int16_t hdrs_sz; + MT_bool global= av_p->grh_flag; +#ifdef MT_KERNEL + IB_port_t num_ports; + IB_port_t port= (qp->qpn & 0xf); /* QPN of QP used for port 1 has the even index */ + IB_pkey_t cur_pkey= 0; + HH_ret_t rc; + + rc= THH_hob_get_num_ports(hh_hndl,&num_ports); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("Could not get number of HCA ports (%s).\n"),HH_strerror_sym(rc)); + return 0; + } + port = (port >= num_ports) ? ((port-num_ports)%num_ports)+1 : (port % num_ports)+1; +#endif + + hdrs_sz= IB_LRH_LEN+IB_BTH_LEN+IB_DETH_LEN; + if (global) hdrs_sz+= IB_GRH_LEN; + + /* Set inline entry control */ + *segment_p= ((1<<31) | hdrs_sz) ; /* inline entry | ByteCount */ + hdrs_buf_p= ((u_int8_t*)segment_p) + WQE_INLINE_SZ_BCOUNT /* inline ctrl */ + hdrs_sz; + + /* Put headers data into MPGA structures */ + hdrs = &qp->sq_res.wqe_tmp->hdrs; + if (global) { + LRH_p= &(hdrs->MPGA_G_ud_send_only.IB_LRH); + BTH_p= &(hdrs->MPGA_G_ud_send_only.IB_BTH); + DETH_p= &(hdrs->MPGA_G_ud_send_only.IB_DETH); + /* Set GRH fields */ + hdrs->MPGA_G_ud_send_only.IB_GRH.IPVer= 6; /* ? */ + hdrs->MPGA_G_ud_send_only.IB_GRH.TClass= av_p->traffic_class; + hdrs->MPGA_G_ud_send_only.IB_GRH.FlowLabel= av_p->flow_label; + hdrs->MPGA_G_ud_send_only.IB_GRH.PayLen= IB_BTH_LEN+IB_DETH_LEN+IB_MAD_LEN+IB_ICRC_SZ; + hdrs->MPGA_G_ud_send_only.IB_GRH.NxtHdr= 0x1B; /* IB-spec.: compliancy statement C8-7 */ + hdrs->MPGA_G_ud_send_only.IB_GRH.HopLmt= av_p->hop_limit; + memcpy(&(hdrs->MPGA_G_ud_send_only.IB_GRH.DGID),&(av_p->dgid),sizeof(IB_gid_t)); +#ifdef MT_KERNEL + /* SGID field is supported only in kernel space, due to limited access to the GID table */ + rc= THH_hob_get_sgid(hh_hndl,port,av_p->sgid_index, + &(hdrs->MPGA_G_ud_send_only.IB_GRH.SGID)); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("Error in GID table access (%s).\n"),HH_strerror_sym(rc)); + return 0; + } +#endif + + } else { /* local - no GRH */ + LRH_p= &(hdrs->MPGA_ud_send_only.IB_LRH); + BTH_p= &(hdrs->MPGA_ud_send_only.IB_BTH); + DETH_p= &(hdrs->MPGA_ud_send_only.IB_DETH); + } + + /* Set LRH fields */ + memset(LRH_p,0,sizeof(IB_LRH_st)); + /* VL must be set for internal loopback ("vl15" bit is ignored) */ + if (qp->sqp_type == VAPI_SMI_QP) LRH_p->VL= 15; + else LRH_p->VL= 0; + LRH_p->LVer= 0; + LRH_p->SL= av_p->sl; + LRH_p->LNH= global ? IBA_GLOBAL : IBA_LOCAL; + + LRH_p->DLID= av_p->dlid; + LRH_p->SLID= (av_p->dlid == PERMIS_LID) ? PERMIS_LID : (IB_lid_t) av_p->src_path_bits; + /* If DLID is permissive LID, we set SLID to the permissive LID too. */ + /* Otherwise, we put in the SLID field the source path bits, and SLR=0, so */ + /* the LID is composed of actual port's LID concatenated with given path bits */ + + LRH_p->PktLen= (hdrs_sz+IB_MAD_LEN + IB_ICRC_SZ) >> 2; + /* Set BTH fields */ + memset(BTH_p,0,sizeof(IB_BTH_st)); + BTH_p->OpCode= UD_SEND_ONLY_OP; + BTH_p->SE= send_req_p->set_se; + BTH_p->M= 1; + BTH_p->PadCnt= 0; /* MADs are always 4byte multiple */ + BTH_p->TVer= 0; +#ifdef MT_KERNEL + if (qp->sqp_type == VAPI_GSI_QP) { + if (pkey_index == QP1_PKEY_INDEX) { /* use QP's pkey */ + rc= THH_hob_get_qp1_pkey(hh_hndl,port,&cur_pkey); + } else { + rc= THH_hob_get_pkey(hh_hndl,port,pkey_index,&cur_pkey); + } + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: Error in P-key table access (%s) - using pkey_index 0x%X.\n"),__func__, + HH_strerror_sym(rc),pkey_index); + return 0; + } + } else { + cur_pkey = DEFAULT_PKEY; + } + BTH_p->P_KEY= cur_pkey; +#else + BTH_p->P_KEY= DEFAULT_PKEY; /* For user space we do not have access to the Pkey table */ +#endif + BTH_p->DestQP= send_req_p->remote_qp; + /* AckReq and PSN are meaningless for UD */ + /* Set DETH fields */ + memset(DETH_p,0,sizeof(IB_DETH_st)); + DETH_p->SrcQP= (qp->sqp_type == VAPI_SMI_QP) ? 0 : 1; /* invoked only for SMI or GSI */ + /* Qkey should be set according to IB-Spec. compliancy statement C10-15, But... + * Only QP1/GSI is the special QP which really validates Q-keys and it always uses + * 0x80010000 (C9-49). So for QP1 we always put this if the high-order bit of the Qkey + * is set. */ + if (qp->sqp_type == VAPI_GSI_QP) { + DETH_p->Q_Key= (send_req_p->remote_qkey & 0x80000000) ? 0x80010000: send_req_p->remote_qkey; + } else { /* QP0 */ + /* For QP0 we don't care (QP0 always sends to another QP0 - none of which validates the Qkey) */ + DETH_p->Q_Key= send_req_p->remote_qkey; + } + + /* Build the headers */ + if (MPGA_make_headers(hdrs,UD_SEND_ONLY_OP, + LRH_p->LNH,FALSE,IB_MAD_LEN,&hdrs_buf_p) != MT_OK) { + return 0; + } + /* Verify headers size */ + if (hdrs_buf_p != (((u_int8_t*)segment_p) + WQE_INLINE_SZ_BCOUNT)) {/*Should be segment begin*/ + MTL_ERROR2(MT_FLFMT("Error in headers size (%d instead of %d).\n"), + (unsigned) (hdrs_sz - (hdrs_buf_p - (((u_int8_t*)segment_p) + 4))), hdrs_sz); + return 0; + } + +#ifdef MT_LITTLE_ENDIAN + /* MPGA headers returned in BIG endian. WQE is built in CPU endianess - so swap bytes */ + for (i= 0 , hdrs_buf32_p= (u_int32_t*)hdrs_buf_p; i < (hdrs_sz>>2); i++) { + hdrs_buf32_p[i]= MOSAL_cpu_to_be32(hdrs_buf32_p[i]); + } +#endif + + return MT_UP_ALIGNX_U32(WQE_INLINE_SZ_BCOUNT + hdrs_sz , 4); /* Align to WQE segment size */ +} + +/* Build UD header as inline data for management QPs over MLX "transport" */ +inline static u_int32_t WQE_pack_mlx_ud_header2(u_int32_t *segment_p, + THHUL_qp_t qp, ib_send_wr_t* p_wr, VAPI_ud_av_t *av_p, HH_hca_hndl_t hh_hndl, + VAPI_pkey_ix_t pkey_index /* take this index instead of QP's, if not QP1_PKEY_INDEX */) +{ + MPGA_headers_t *hdrs; + IB_LRH_st *LRH_p; + IB_BTH_st *BTH_p; + IB_DETH_st *DETH_p; + u_int8_t *hdrs_buf_p; +#ifdef MT_LITTLE_ENDIAN + u_int32_t *hdrs_buf32_p; /* pointer for endiness swapping */ + u_int16_t i; +#endif + u_int16_t hdrs_sz; + MT_bool global= av_p->grh_flag; +#ifdef MT_KERNEL + IB_port_t port= 1 + (qp->qpn & 1); /* QPN of QP used for port 1 has the even index */ + IB_pkey_t cur_pkey= 0; + HH_ret_t rc; +#endif + + hdrs_sz= IB_LRH_LEN+IB_BTH_LEN+IB_DETH_LEN; + if (global) hdrs_sz+= IB_GRH_LEN; + + /* Set inline entry control */ + *segment_p= ((1<<31) | hdrs_sz) ; /* inline entry | ByteCount */ + hdrs_buf_p= ((u_int8_t*)segment_p) + WQE_INLINE_SZ_BCOUNT /* inline ctrl */ + hdrs_sz; + + /* Put headers data into MPGA structures */ + hdrs = &qp->sq_res.wqe_tmp->hdrs; + if (global) { + LRH_p= &(hdrs->MPGA_G_ud_send_only.IB_LRH); + BTH_p= &(hdrs->MPGA_G_ud_send_only.IB_BTH); + DETH_p= &(hdrs->MPGA_G_ud_send_only.IB_DETH); + /* Set GRH fields */ + hdrs->MPGA_G_ud_send_only.IB_GRH.IPVer= 6; /* ? */ + hdrs->MPGA_G_ud_send_only.IB_GRH.TClass= av_p->traffic_class; + hdrs->MPGA_G_ud_send_only.IB_GRH.FlowLabel= av_p->flow_label; + hdrs->MPGA_G_ud_send_only.IB_GRH.PayLen= IB_BTH_LEN+IB_DETH_LEN+IB_MAD_LEN+IB_ICRC_SZ; + hdrs->MPGA_G_ud_send_only.IB_GRH.NxtHdr= 0x1B; /* IB-spec.: compliancy statement C8-7 */ + hdrs->MPGA_G_ud_send_only.IB_GRH.HopLmt= av_p->hop_limit; + memcpy(&(hdrs->MPGA_G_ud_send_only.IB_GRH.DGID),&(av_p->dgid),sizeof(IB_gid_t)); +#ifdef MT_KERNEL + /* SGID field is supported only in kernel space, due to limited access to the GID table */ + rc= THH_hob_get_sgid(hh_hndl,port,av_p->sgid_index, + &(hdrs->MPGA_G_ud_send_only.IB_GRH.SGID)); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("Error in GID table access (%s).\n"),HH_strerror_sym(rc)); + return 0; + } +#endif + + } else { /* local - no GRH */ + LRH_p= &(hdrs->MPGA_ud_send_only.IB_LRH); + BTH_p= &(hdrs->MPGA_ud_send_only.IB_BTH); + DETH_p= &(hdrs->MPGA_ud_send_only.IB_DETH); + } + + /* Set LRH fields */ + memset(LRH_p,0,sizeof(IB_LRH_st)); + /* VL must be set for internal loopback ("vl15" bit is ignored) */ + if (qp->sqp_type == VAPI_SMI_QP) LRH_p->VL= 15; + else LRH_p->VL= 0; + LRH_p->LVer= 0; + LRH_p->SL= av_p->sl; + LRH_p->LNH= global ? IBA_GLOBAL : IBA_LOCAL; + + LRH_p->DLID= av_p->dlid; + LRH_p->SLID= (av_p->dlid == PERMIS_LID) ? PERMIS_LID : (IB_lid_t) av_p->src_path_bits; + /* If DLID is permissive LID, we set SLID to the permissive LID too. */ + /* Otherwise, we put in the SLID field the source path bits, and SLR=0, so */ + /* the LID is composed of actual port's LID concatenated with given path bits */ + + LRH_p->PktLen= (hdrs_sz+IB_MAD_LEN + IB_ICRC_SZ) >> 2; + /* Set BTH fields */ + memset(BTH_p,0,sizeof(IB_BTH_st)); + BTH_p->OpCode= UD_SEND_ONLY_OP; + BTH_p->SE= ((p_wr->send_opt & IB_SEND_OPT_SOLICITED) == IB_SEND_OPT_SOLICITED); + BTH_p->M= 1; + BTH_p->PadCnt= 0; /* MADs are always 4byte multiple */ + BTH_p->TVer= 0; +#ifdef MT_KERNEL + if (qp->sqp_type == VAPI_GSI_QP) { + if (pkey_index == QP1_PKEY_INDEX) { /* use QP's pkey */ + rc= THH_hob_get_qp1_pkey(hh_hndl,port,&cur_pkey); + } else { + rc= THH_hob_get_pkey(hh_hndl,port,pkey_index,&cur_pkey); + } + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: Error in P-key table access (%s) - using pkey_index 0x%X.\n"),__func__, + HH_strerror_sym(rc),pkey_index); + return 0; + } + } else { + cur_pkey = DEFAULT_PKEY; + } + BTH_p->P_KEY= cur_pkey; +#else + BTH_p->P_KEY= DEFAULT_PKEY; /* For user space we do not have access to the Pkey table */ +#endif + BTH_p->DestQP= cl_hton32( p_wr->dgrm.ud.remote_qp ); + /* AckReq and PSN are meaningless for UD */ + /* Set DETH fields */ + memset(DETH_p,0,sizeof(IB_DETH_st)); + DETH_p->SrcQP= (qp->sqp_type == VAPI_SMI_QP) ? 0 : 1; /* invoked only for SMI or GSI */ + /* Qkey should be set according to IB-Spec. compliancy statement C10-15, But... + * Only QP1/GSI is the special QP which really validates Q-keys and it always uses + * 0x80010000 (C9-49). So for QP1 we always put this if the high-order bit of the Qkey + * is set. */ + if( (qp->sqp_type == VAPI_GSI_QP) && (p_wr->dgrm.ud.remote_qkey & CL_HTON32(0x80000000)) ) + { + DETH_p->Q_Key= 0x80010000; + } else { /* QP0, or QKEY is not well known GSI QKEY */ + /* For QP0 we don't care (QP0 always sends to another QP0 - none of which validates the Qkey) */ + DETH_p->Q_Key= cl_hton32( p_wr->dgrm.ud.remote_qkey ); + } + + /* Build the headers */ + if (MPGA_make_headers(hdrs,UD_SEND_ONLY_OP, + LRH_p->LNH,FALSE,IB_MAD_LEN,&hdrs_buf_p) != MT_OK) { + return 0; + } + /* Verify headers size */ + if (hdrs_buf_p != (((u_int8_t*)segment_p) + WQE_INLINE_SZ_BCOUNT)) {/*Should be segment begin*/ + MTL_ERROR2(MT_FLFMT("Error in headers size (%d instead of %d).\n"), + (unsigned) (hdrs_sz - (hdrs_buf_p - (((u_int8_t*)segment_p) + 4))), hdrs_sz); + return 0; + } + +#ifdef MT_LITTLE_ENDIAN + /* MPGA headers returned in BIG endian. WQE is built in CPU endianess - so swap bytes */ + for (i= 0 , hdrs_buf32_p= (u_int32_t*)hdrs_buf_p; i < (hdrs_sz>>2); i++) { + hdrs_buf32_p[i]= MOSAL_cpu_to_be32(hdrs_buf32_p[i]); + } +#endif + + return MT_UP_ALIGNX_U32(WQE_INLINE_SZ_BCOUNT + hdrs_sz , 4); /* Align to WQE segment size */ +} + +/* Build ICRC segment for MLX (UD) */ +inline static u_int32_t WQE_pack_mlx_icrc_hw(u_int32_t *segment_p) +{ + segment_p[0]= (1<<31) | 4 ; /* Inline ICRC (32 bits = 4 bytes) */ + segment_p[1]= 0; /* Hardware generated ICRC */ + /* 2 dwords padded for a single Inline Data segment */ + return WQE_INLINE_ICRC; +} + +/* Build ICRC segment for MLX (UD) */ +inline static u_int32_t WQE_pack_mlx_icrc_hw_be(u_int32_t *segment_p) +{ + segment_p[0]= MOSAL_cpu_to_be32( (1<<31) | 4 ); /* Inline ICRC (32 bits = 4 bytes) */ + segment_p[1]= 0; /* Hardware generated ICRC */ + /* 2 dwords padded for a single Inline Data segment */ + return WQE_INLINE_ICRC; +} + +/* Pack Control segment (for mlx-sends) */ +inline static u_int32_t WQE_pack_ctrl_mlx(u_int32_t *segment_p, + VAPI_comp_type_t comp_type, MT_bool event_bit, + IB_sl_t sl, IB_static_rate_t max_statrate, MT_bool slr, MT_bool v15, + u_int16_t vcrc, IB_lid_t rlid) +{ + memset(segment_p,0,WQE_SEG_SZ_CTRL); /* Clear all "RESERVED" */ + MT_INSERT_ARRAY32(segment_p,(comp_type == VAPI_SIGNALED) ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,c),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,c)); + MT_INSERT_ARRAY32(segment_p,event_bit ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,e),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,e)); + MT_INSERT_ARRAY32(segment_p,sl, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,sl),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,sl)); + MT_INSERT_ARRAY32(segment_p,max_statrate > 0 ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,max_statrate), + MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,max_statrate)); + MT_INSERT_ARRAY32(segment_p,slr ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,slr),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,slr)); + MT_INSERT_ARRAY32(segment_p,v15 ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,v15),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,v15)); + MT_INSERT_ARRAY32(segment_p,vcrc, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,vcrc),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,vcrc)); + MT_INSERT_ARRAY32(segment_p,rlid, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,rlid),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,rlid)); + return WQE_SEG_SZ_CTRL; +} + +/* Pack Control segment (for mlx-sends) */ +inline static u_int32_t WQE_pack_ctrl_mlx_be(u_int32_t *segment_p, + VAPI_comp_type_t comp_type, MT_bool event_bit, + IB_sl_t sl, IB_static_rate_t max_statrate, MT_bool slr, MT_bool v15, + u_int16_t vcrc, IB_lid_t rlid) +{ + memset(segment_p,0,WQE_SEG_SZ_CTRL); /* Clear all "RESERVED" */ + MT_INSERT_ARRAY32_BE(segment_p,(comp_type == VAPI_SIGNALED) ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,c),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,c)); + MT_INSERT_ARRAY32_BE(segment_p,event_bit ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,e),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,e)); + MT_INSERT_ARRAY32_BE(segment_p,sl, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,sl),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,sl)); + MT_INSERT_ARRAY32_BE(segment_p,max_statrate > 0 ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,max_statrate), + MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,max_statrate)); + MT_INSERT_ARRAY32_BE(segment_p,slr ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,slr),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,slr)); + MT_INSERT_ARRAY32_BE(segment_p,v15 ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,v15),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,v15)); + MT_INSERT_ARRAY32_BE(segment_p,vcrc, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,vcrc),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,vcrc)); + MT_INSERT_ARRAY32_BE(segment_p,rlid, + MT_BIT_OFFSET(wqe_segment_ctrl_mlx_st,rlid),MT_BIT_SIZE(wqe_segment_ctrl_mlx_st,rlid)); + return WQE_SEG_SZ_CTRL; +} + +inline static u_int32_t WQE_build_send_mlx( + HH_hca_hndl_t hh_hndl, + THHUL_qp_t qp, + VAPI_sr_desc_t *send_req_p, + VAPI_pkey_ix_t pkey_index, /* take this index instead of QP's, if not QP1_PKEY_INDEX */ + u_int32_t *wqe_buf +) +{ + + VAPI_ud_av_t *av = &qp->sq_res.wqe_tmp->av; + u_int8_t *cur_loc_p= (u_int8_t*)wqe_buf; /* Current location in the WQE */ + u_int8_t *prev_loc_p; + HH_ret_t rc; + + rc= THH_udavm_parse_udav_entry((u_int32_t*)(send_req_p->remote_ah),av); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("Invalid UD AV handle - %s"), + HH_strerror_sym(rc)); + return 0; + } + + + if ((av->dlid == PERMIS_LID) && (qp->sqp_type != VAPI_SMI_QP)) { + MTL_ERROR1(MT_FLFMT("DLID==Permissive-LID while not an SMI QP.\n")); + return 0; + } + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_mlx((u_int32_t*)cur_loc_p, /* Pack Control segment */ + send_req_p->comp_type, FALSE/*event bit*/, + av->sl,av->static_rate,(av->dlid == PERMIS_LID),(qp->sqp_type == VAPI_SMI_QP), + 0/*VCRC*/,av->dlid); + + + /* Build inline headers */ + switch (qp->sqp_type) { + case VAPI_SMI_QP: + case VAPI_GSI_QP: + prev_loc_p= cur_loc_p; + cur_loc_p+= WQE_pack_mlx_ud_header((u_int32_t*)cur_loc_p,qp,send_req_p,av,hh_hndl,pkey_index); + if (cur_loc_p == prev_loc_p) { + return 0; + } + /* Pack scatter/gather list segments */ + cur_loc_p+= WQE_pack_sg_list((u_int32_t*)cur_loc_p,send_req_p->sg_lst_len,send_req_p->sg_lst_p); + cur_loc_p+= WQE_pack_mlx_icrc_hw((u_int32_t*)cur_loc_p); + break; + case VAPI_RAW_ETY_QP: + case VAPI_RAW_IPV6_QP: + default: + return 0; + } + + return (u_int32_t)(((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)wqe_buf)); +} + +#ifdef WIN32 +#include "thhul_qpm_ibal.h" + +inline static u_int32_t WQE_build_send_mlx2_be( + IN HH_hca_hndl_t hh_hndl, + IN THHUL_qp_t qp, + IN ib_send_wr_t *p_wr, + IN VAPI_pkey_ix_t pkey_index, /* take this index instead of QP's, if not QP1_PKEY_INDEX */ + IN OUT u_int32_t *wqe_buf ) +{ + VAPI_ud_av_t *av = &qp->sq_res.wqe_tmp->av; + u_int8_t *cur_loc_p = (u_int8_t*)wqe_buf; /* Current location in the WQE */ + u_int8_t *prev_loc_p; + HH_ret_t rc; + + rc= THH_udavm_parse_udav_entry((u_int32_t*)(p_wr->dgrm.ud.h_av->h_av),av); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("Invalid UD AV handle - %s"), + HH_strerror_sym(rc)); + return 0; + } + + if ((av->dlid == PERMIS_LID) && (qp->sqp_type != VAPI_SMI_QP)) { + MTL_ERROR1(MT_FLFMT("DLID==Permissive-LID while not an SMI QP.\n")); + return 0; + } + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_mlx_be((u_int32_t*)cur_loc_p, /* Pack Control segment */ + (p_wr->send_opt & IB_SEND_OPT_SIGNALED)? VAPI_SIGNALED : VAPI_UNSIGNALED, + FALSE/*event bit*/,av->sl,av->static_rate,(av->dlid == PERMIS_LID), + (qp->sqp_type == VAPI_SMI_QP),0/*VCRC*/,av->dlid); + + /* Build inline headers */ + switch( qp->sqp_type ) + { + case VAPI_SMI_QP: + case VAPI_GSI_QP: + prev_loc_p= cur_loc_p; + cur_loc_p+= WQE_pack_mlx_ud_header2((u_int32_t*)cur_loc_p,qp,p_wr,av,hh_hndl,pkey_index); + if (cur_loc_p == prev_loc_p) { + return 0; + } + + /* Swap UD header into big-endian. */ + for( prev_loc_p; prev_loc_p != cur_loc_p; prev_loc_p += 4 ) + *(uint32_t*)prev_loc_p = MOSAL_cpu_to_be32( *(uint32_t*)prev_loc_p ); + + /* Pack scatter/gather list segments in BE format */ + if( p_wr->send_opt & IB_SEND_OPT_INLINE ) + { + if( WQE_pack_inline_sgl_ibal( + p_wr, &(uint32_t*)cur_loc_p, qp->sq_res.max_inline_data ) != HH_OK ) + { + return 0; + } + } + else + { + WQE_pack_sgl_ibal( p_wr, &(uint32_t*)cur_loc_p ); + } + cur_loc_p+= WQE_pack_mlx_icrc_hw_be((u_int32_t*)cur_loc_p); + break; + case VAPI_RAW_ETY_QP: + case VAPI_RAW_IPV6_QP: + default: + return 0; + } + + return (u_int32_t)(((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)wqe_buf)); +} +#endif + + +/* Pack Control segment (for receive work requests) */ +inline static u_int32_t qpm_WQE_pack_ctrl_recv(u_int32_t *segment_p, + VAPI_comp_type_t comp_type, u_int32_t event_bit) +{ + memset(segment_p,0,WQE_SEG_SZ_CTRL); /* Clear all "RESERVED" */ + MT_INSERT_ARRAY32(segment_p,(comp_type == VAPI_SIGNALED) ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_recv_st,c),MT_BIT_SIZE(wqe_segment_ctrl_recv_st,c)); + MT_INSERT_ARRAY32(segment_p,event_bit, + MT_BIT_OFFSET(wqe_segment_ctrl_recv_st,e),MT_BIT_SIZE(wqe_segment_ctrl_recv_st,e)); + return WQE_SEG_SZ_CTRL; +} + +/* Optimized version of WQE_pack_ctrl_recv + * remove memset and precalculate offset + */ +#define CTRL_RECV_C_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ctrl_recv_st,c)>>2 +#define CTRL_RECV_RESERVED2_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_ctrl_recv_st,reserved2)>>2 + +#define CTRL_RECV_C_BIT_OFFSET MT_BIT_OFFSET(wqe_segment_ctrl_recv_st,c) +#define CTRL_RECV_E_BIT_OFFSET MT_BIT_OFFSET(wqe_segment_ctrl_recv_st,e) +/* Convert into Big Endian version */ +inline static u_int32_t WQE_pack_ctrl_recv_be(u_int32_t *segment_p, + VAPI_comp_type_t comp_type, u_int32_t event_bit) +{ + + segment_p[CTRL_RECV_RESERVED2_DWORD_OFFSET] = 0; + segment_p[CTRL_RECV_C_DWORD_OFFSET] = MOSAL_cpu_to_be32(0 + | (((comp_type == VAPI_SIGNALED) ? 1 : 0) << CTRL_RECV_C_BIT_OFFSET) + | ( event_bit << CTRL_RECV_E_BIT_OFFSET) + ); + + return WQE_SEG_SZ_CTRL; +} + +inline static u_int32_t qpm_WQE_build_recv( + THHUL_qp_t qp, + VAPI_rr_desc_t *recv_req_p, + u_int32_t *wqe_buf +) +{ + u_int8_t *cur_loc_p= (u_int8_t*)wqe_buf; /* Current location in the WQE */ + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= qpm_WQE_pack_ctrl_recv((u_int32_t*)cur_loc_p, + recv_req_p->comp_type, 0/*event bit*/); + /* Pack scatter/gather list segments */ + cur_loc_p+= WQE_pack_sg_list((u_int32_t*)cur_loc_p,recv_req_p->sg_lst_len,recv_req_p->sg_lst_p); + + return (u_int32_t)(((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)wqe_buf)); +} + +/* Build Big Endian version of WQE_build_recv to remove extra copy + * from draft. Optimized version of post_send_recv use wqe_buf directly, so + * should build big endian verison to wqe_buf + */ +inline static u_int32_t WQE_build_recv_be( + THHUL_qp_t qp, + VAPI_rr_desc_t *recv_req_p, + u_int32_t *wqe_buf +) +{ + u_int8_t *cur_loc_p= (u_int8_t*)wqe_buf; /* Current location in the WQE */ + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_recv_be((u_int32_t*)cur_loc_p, + recv_req_p->comp_type, 0/*event bit*/); + /* Pack scatter/gather list segments */ + cur_loc_p+= WQE_pack_sg_list_be((u_int32_t*)cur_loc_p,recv_req_p->sg_lst_len,recv_req_p->sg_lst_p); + + return (u_int32_t)(((MT_ulong_ptr_t)cur_loc_p) - ((MT_ulong_ptr_t)wqe_buf)); +} + + +/* This is for post_send_recv2. sg_list buld directly from IbAccess structure. + */ +inline static u_int32_t *WQE_build_recv_be_req2( + THHUL_qp_t qp, + u_int32_t *wqe_buf, + VAPI_comp_type_t comp_type +) +{ + u_int8_t *cur_loc_p= (u_int8_t*)wqe_buf; /* Current location in the WQE */ + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_recv_be((u_int32_t*)cur_loc_p, + comp_type, 0/*event bit*/); + + return (u_int32_t *)cur_loc_p; +} + + +inline static u_int32_t WQE_build_membind( + HHUL_mw_bind_t *bind_props_p, + IB_rkey_t new_rkey, + u_int32_t *wqe_buf +) +{ + u_int32_t *cur_loc_p= wqe_buf; /* Current location in the WQE */ + + cur_loc_p+= (qpm_WQE_init_next((u_int32_t*)cur_loc_p)>>2); /* Make "unlinked" "next" segment */ + cur_loc_p+= (WQE_pack_ctrl_send((u_int32_t*)cur_loc_p, /* Pack Control segment */ + bind_props_p->comp_type, 0/*SE bit*/, 0/*event bit*/,0/*Imm. data*/)>>2); + + memset(cur_loc_p,0,8); /* clear reserved bits of first 2 dwords */ + + /* Set access bits */ + if (bind_props_p->acl & VAPI_EN_REMOTE_READ) { + MT_INSERT_ARRAY32(cur_loc_p,1, + MT_BIT_OFFSET(wqe_segment_bind_st,rr),MT_BIT_SIZE(wqe_segment_bind_st,rr)); + } + if (bind_props_p->acl & VAPI_EN_REMOTE_WRITE) { + MT_INSERT_ARRAY32(cur_loc_p,1, + MT_BIT_OFFSET(wqe_segment_bind_st,rw),MT_BIT_SIZE(wqe_segment_bind_st,rw)); + } + if (bind_props_p->acl & VAPI_EN_REMOTE_ATOM) { + MT_INSERT_ARRAY32(cur_loc_p,1, + MT_BIT_OFFSET(wqe_segment_bind_st,a),MT_BIT_SIZE(wqe_segment_bind_st,a)); + } + + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_bind_st,new_rkey)>>2]= new_rkey; + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_bind_st,region_lkey)>>2]= bind_props_p->mr_lkey; + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_bind_st,start_address_h)>>2]= + (u_int32_t)(bind_props_p->start >> 32); + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_bind_st,start_address_l)>>2]= + (u_int32_t)(bind_props_p->start & 0xFFFFFFFF); + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_bind_st,length_h)>>2]= + (u_int32_t)(bind_props_p->size >> 32); + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_bind_st,length_l)>>2]= + (u_int32_t)(bind_props_p->size & 0xFFFFFFFF); + + return (u_int32_t)(WQE_SEG_SZ_BIND + ((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)wqe_buf)); +} + +/* - Allocate a WQE in given send queue, + - put given WQE in it, + - link to previos WQE and + - ring the doorbell + * q_lock must be acquired before invoking this function (to protect WQEs allocation). + */ +inline static HH_ret_t sq_alloc_wqe_link_and_ring(THHUL_qp_t qp, + u_int32_t* wqe_draft, u_int32_t wqe_sz_dwords, +#ifdef MT_LITTLE_ENDIAN + u_int32_t swap_sz_dwords, +#endif + VAPI_sr_desc_t *send_req_p, tavor_if_nopcode_t nopcode) +{ + u_int32_t next_draft[WQE_SEG_SZ_NEXT>>2]; /* Build "next" segment here */ + volatile u_int32_t* next_wqe; /* Actual WQE pointer */ + u_int32_t i; + THH_uar_sendq_dbell_t sq_dbell; + + /* Check if any WQEs are free to be consumed */ + if (qp->sq_res.max_outs == qp->sq_res.cur_outs) { + MTL_ERROR4("THHUL_qpm_post_send_req: Send queue is full (%u requests outstanding).\n", + qp->sq_res.cur_outs); + return HH_E2BIG_WR_NUM; + } + /* Allocate next WQE */ + next_wqe= (u_int32_t*)(qp->sq_res.wqe_buf + + (qp->sq_res.next2post_index << qp->sq_res.log2_max_wqe_sz) ); + qp->sq_res.wqe_id[qp->sq_res.next2post_index]= send_req_p->id; /* Save WQE ID */ + qp->sq_res.next2post_index = (qp->sq_res.next2post_index + 1) % qp->sq_res.max_outs ; + qp->sq_res.cur_outs++; + + /* copy (while swapping,if needed) the wqe_draft to the actual WQE */ + /* TBD: for big-endian machines we can optimize here and use memcpy */ + MTPERF_TIME_START(SQ_WQE_copy); +#ifdef MT_LITTLE_ENDIAN + for (i= 0; i < swap_sz_dwords; i++) { + next_wqe[i]= MOSAL_cpu_to_be32(wqe_draft[i]); + } + /* The rest of the WQE should be copied as is (inline data) */ + for (; i < wqe_sz_dwords; i++) { + next_wqe[i]= wqe_draft[i]; + } +#else /* big endian */ + for (i= 0; i < wqe_sz_dwords; i++) { + next_wqe[i]= wqe_draft[i]; + } +#endif + + MTPERF_TIME_END(SQ_WQE_copy); + + /* Update "next" segment of previous WQE (if any) */ + if (qp->sq_res.last_posted_p != NULL) { + /* Build linking "next" segment in last posted WQE*/ + qpm_WQE_pack_send_next(next_draft, nopcode, send_req_p->fence, + 1/*DBD*/, (u_int32_t)(MT_ulong_ptr_t) next_wqe, wqe_sz_dwords>>2, + (qp->ts_type==VAPI_TS_RD) ? send_req_p->eecn : 0); + for (i= 0;i < (WQE_SEG_SZ_NEXT>>2) ;i++) { + /* This copy assures big-endian as well as that NDS is written last */ + qp->sq_res.last_posted_p[i]= MOSAL_cpu_to_be32(next_draft[i]); + } + } + qp->sq_res.last_posted_p= next_wqe; + + /* Ring doorbell (send or rd-send) */ + sq_dbell.qpn= qp->qpn; + sq_dbell.nopcode= nopcode; + sq_dbell.fence= send_req_p->fence; + sq_dbell.next_addr_32lsb= (u_int32_t)((MT_virt_addr_t)next_wqe & 0xFFFFFFFF); + sq_dbell.next_size= wqe_sz_dwords>>2; + if (qp->ts_type == VAPI_TS_RD) { + THH_uar_sendq_rd_dbell(qp->uar,&sq_dbell,send_req_p->eecn); + } else { /* non-RD send request */ + MTPERF_TIME_START(THH_uar_sendq_dbell); + THH_uar_sendq_dbell(qp->uar,&sq_dbell); + MTPERF_TIME_END(THH_uar_sendq_dbell); + } + + return HH_OK; +} + +/* Optimized version of sq_alloc_wqe_link_and_ring + * remove devision and call inline version od dbell + * remove extra copy of Update "next" segment + */ +#define SEND_DOORBELL_F_BIT_OFFSET MT_BIT_OFFSET(tavorprm_send_doorbell_st,f) +#define SEDN_DOORBELL_QPN_BIT_OFFSET (MT_BIT_OFFSET(tavorprm_send_doorbell_st,qpn) & 0x1f) +inline static HH_ret_t sq_alloc_wqe_link_and_ring_be(THHUL_qp_t qp, + u_int32_t* wqe_draft, u_int32_t wqe_sz_dwords, +#ifdef MT_LITTLE_ENDIAN + u_int32_t swap_sz_dwords, +#endif + VAPI_sr_desc_t *send_req_p, tavor_if_nopcode_t nopcode) +{ + //THH_uar_sendq_dbell_t sq_dbell; + volatile u_int32_t chimeWords[4]; + THH_uar_t uar; + u_int32_t *cur_loc_p; + + + qp->sq_res.wqe_id[qp->sq_res.next2post_index]= send_req_p->id; /* Save WQE ID */ + + ++qp->sq_res.next2post_index; + if (MOSAL_EXPECT_FALSE(qp->sq_res.next2post_index >= qp->sq_res.max_outs)) + qp->sq_res.next2post_index = 0; + + qp->sq_res.cur_outs++; + + /* Update "next" segment of previous WQE (if any) */ + /* Build linking "next" segment in last posted WQE*/ + /* buld dirctly to wqe, so call big endian version */ + WQE_pack_send_next_be((u_int32_t*)qp->sq_res.last_posted_p, nopcode, send_req_p->fence, + 1/*DBD*/, (u_int32_t)(MT_long_ptr_t) wqe_draft, wqe_sz_dwords>>2, + (qp->ts_type==VAPI_TS_RD) ? send_req_p->eecn : 0); + + qp->sq_res.last_posted_p= wqe_draft; + + /* Ring doorbell */ + + uar = qp->uar; + chimeWords[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)nopcode + | ((send_req_p->fence & 0x1) << SEND_DOORBELL_F_BIT_OFFSET) + | ((u_int32_t)(MT_ulong_ptr_t)wqe_draft & 0xFFFFFFFF)); + chimeWords[1] = MOSAL_cpu_to_be32(0 + | (u_int32_t)(wqe_sz_dwords >> 2) // specify in 16 byte chunks + | ((u_int32_t)(qp->qpn) << SEDN_DOORBELL_QPN_BIT_OFFSET) + ); + + if (MOSAL_EXPECT_FALSE(qp->ts_type == VAPI_TS_RD)) { + cur_loc_p = (u_int32_t *)&uar->uar_base[UAR_SEND_DBELL_OFFSET]; + chimeWords[2] = MOSAL_cpu_to_be32(send_req_p->eecn << 18); + chimeWords[3] = MOSAL_cpu_to_be32((u_int32_t)qp->qpn << 8); + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); + MOSAL_MMAP_IO_WRITE_QWORD( cur_loc_p, *(volatile u_int64_t*)&chimeWords[2]); + MOSAL_MMAP_IO_WRITE_QWORD(&cur_loc_p[2],*(volatile u_int64_t*)chimeWords); + MOSAL_spinlock_unlock(&(uar->uar_lock)); + } else { +#ifdef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_SEND_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); +#else + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_SEND_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); + MOSAL_spinlock_unlock(&(uar->uar_lock)); +#endif + } + + + return HH_OK; +} + +/* Extract NDS directly from (big-endian) WQE */ +inline static u_int8_t qpm_WQE_extract_nds(volatile u_int32_t* wqe) +{ +/*** warning C4244: 'return' : conversion from 'unsigned long' to 'u_int8_t', possible loss of data ***/ +#if 0 + return (u_int8_t)MT_EXTRACT32(MOSAL_be32_to_cpu(wqe[MT_BYTE_OFFSET(wqe_segment_next_st,nds) >> 2]), + MT_BIT_OFFSET(wqe_segment_next_st,nds) & MASK32(5), + MT_BIT_SIZE(wqe_segment_next_st,nds) & MASK32(5)); +#endif + +#define NEXT_ST_BIT_MASK MASK32(5) + return (u_int8_t)MT_EXTRACT32(MOSAL_be32_to_cpu(wqe[NEXT_ST_NDS_DWORD_OFFSET]), + MT_BIT_OFFSET(wqe_segment_next_st,nds) & NEXT_ST_BIT_MASK, + MT_BIT_SIZE(wqe_segment_next_st,nds) & NEXT_ST_BIT_MASK); + +} + +/* Extract NDA directly from (big-endian) WQE */ +inline static u_int32_t qpm_WQE_extract_nda(volatile u_int32_t* wqe) +{ + return (MOSAL_be32_to_cpu(wqe[MT_BYTE_OFFSET(wqe_segment_next_st,nda_31_6) >> 2]) & (~MASK32(6)) ); +} + +inline static u_int8_t qpm_WQE_extract_dbd(volatile u_int32_t* wqe) +{ +/*** warning C4244: 'return' : conversion from 'unsigned long' to 'u_int8_t', possible loss of data ***/ +#if 0 + return (u_int8_t)MT_EXTRACT32(MOSAL_be32_to_cpu(wqe[MT_BYTE_OFFSET(wqe_segment_next_st,dbd) >> 2]), + return (u_int8_t)MT_EXTRACT32(MOSAL_be32_to_cpu(wqe[MT_BYTE_OFFSET(wqe_segment_next_st,dbd) >> 2]), + MT_BIT_OFFSET(wqe_segment_next_st,dbd) & MASK32(5), + MT_BIT_SIZE(wqe_segment_next_st,dbd) & MASK32(5)); +#endif +#define NEXT_ST_DBD_DWORD_OFFSET MT_BYTE_OFFSET(wqe_segment_next_st,dbd) >> 2 +#define NEXT_ST_DBD_BIT_MASK MASK32(5) + return (u_int8_t)MT_EXTRACT32(MOSAL_be32_to_cpu(wqe[NEXT_ST_DBD_DWORD_OFFSET]), + MT_BIT_OFFSET(wqe_segment_next_st,dbd) & NEXT_ST_DBD_BIT_MASK, + MT_BIT_SIZE(wqe_segment_next_st,dbd) & NEXT_ST_DBD_BIT_MASK); +} + +#ifdef DUMP_SEND_REQ + static void dump_send_req(THHUL_qp_t qp, HHUL_send_req_t *sr); +#endif + +/********************************************************************************************** + * Public API Functions (defined in thhul_hob.h) + **********************************************************************************************/ + +HH_ret_t THHUL_qpm_create( + THHUL_hob_t hob, + THHUL_srqm_t srqm, + THHUL_qpm_t *qpm_p +) +{ + int i; + + *qpm_p= (THHUL_qpm_t) MALLOC(sizeof(struct THHUL_qpm_st)); + if (*qpm_p == NULL) { + MTL_ERROR1("THHUL_qpm_create: Failed allocating THHUL_qpm_st.\n"); + return HH_EAGAIN; + } + + /* init internal data structures */ + for (i= 0; i < QP_HASH_TBL_SZ; i++) { + (*qpm_p)->hash_tbl[i]= NULL; +#if QPM_USE_FIXED_QP_ARRAY + (*qpm_p)->array_tbl[i].qp_array[0].qpn = QP_ARRAY_UNUSED; + (*qpm_p)->array_tbl[i].qp_array[QPM_QP_PER_ARRAY].qpn = QP_ARRAY_UNUSED; +#endif + } + (*qpm_p)->qp_cnt= 0; + (*qpm_p)->srqm= srqm; + for (i= THHUL_DPOOL_SZ_MIN_KB; i <= THHUL_DPOOL_SZ_MAX_KB; i++) { + (*qpm_p)->dpool_p[i - THHUL_DPOOL_SZ_MIN_KB]= NULL; + } + MOSAL_mutex_init(&(*qpm_p)->dpool_lock); + MOSAL_spinlock_init(&((*qpm_p)->hash_lock)); + + return HH_OK; +} + + +static void THHUL_qpm_qp_destroy( + THHUL_qpm_t qpm, + THHUL_qp_t qp) +{ + /* Clean all CQEs which refer to this QP */ + THHUL_cqm_cq_cleanup(qp->rq_cq, qp->qpn, qpm->srqm, qp->srq); + if (qp->sq_cq != qp->rq_cq) /* additional cleaning required only if SQ's CQ is different */ + THHUL_cqm_cq_cleanup(qp->sq_cq, qp->qpn, qpm->srqm, HHUL_INVAL_SRQ_HNDL); + + /* Free QP resources: Auxilary buffer + WQEs buffer */ + if (qp->sq_res.wqe_id != NULL) { + THH_SMART_FREE(qp->sq_res.wqe_id, qp->sq_res.max_outs * sizeof(VAPI_wr_id_t)); + } + if (qp->rq_res.wqe_id != NULL) { + THH_SMART_FREE(qp->rq_res.wqe_id, qp->rq_res.max_outs * sizeof(VAPI_wr_id_t)); + } + if (qp->wqe_buf_orig != NULL) {/* WQEs buffer were allocated in process mem. or by the THH_qpm ? */ + MTL_DEBUG4(MT_FLFMT("Freeing WQEs buffer at 0x%p"),qp->wqe_buf_orig); + if (qp->dpool_p == NULL) { /* direct allocation */ + if (qp->used_virt_alloc) + MOSAL_pci_virt_free_consistent(qp->wqe_buf_orig, qp->wqe_buf_orig_size); + else + MOSAL_pci_phys_free_consistent(qp->wqe_buf_orig, qp->wqe_buf_orig_size); + } else { /* used dpool */ + dpool_free(qpm, qp->dpool_p, qp->wqe_buf_orig); + } + } + FREE(qp->sq_res.wqe_draft); + if (qp->sq_res.wqe_tmp) + FREE(qp->sq_res.wqe_tmp); + FREE(qp->rq_res.wqe_draft); + FREE(qp); +} + + +HH_ret_t THHUL_qpm_destroy( + THHUL_qpm_t qpm +) +{ + + THHUL_qp_t qp; + qp_hash_entry_t *entry2remove_p; + int i; + +#if QPM_USE_FIXED_QP_ARRAY + /* clean up qp in fixed array */ + int j; + for (i= 0; i < QP_HASH_TBL_SZ; i++) { + qp_array_entry_t *qp_array_e = &qpm->array_tbl[i]; + j = 0; + while (qp_array_e->qp_array[j].qpn != QP_ARRAY_UNUSED) { + if(qp_array_e->qp_array[j].qpn != QP_ARRAY_REUSE) + { + qp = qp_array_e->qp_array[j].qp; + + /* clean up qp structure */ + THHUL_qpm_qp_destroy(qpm, qp); + qp_array_e->qp_array[j].qpn = QP_ARRAY_REUSE; + qp_array_e->qp_array[j].qp = 0; + } + j++; + }/* while (array_tbl[i]...) */ + } +#endif + /* clean up qp in hash table */ + for (i= 0; i < QP_HASH_TBL_SZ; i++) { + while (qpm->hash_tbl[i]) { + entry2remove_p = qpm->hash_tbl[i]; + qpm->hash_tbl[i] = entry2remove_p->next; + qp = entry2remove_p->qp; + FREE(entry2remove_p); + + /* clean up qp structure */ + THHUL_qpm_qp_destroy(qpm, qp); + + }/* while (hash_tbl[i]..)*/ + }/* for (i.. QP_HASH_TBL_SZ)*/ + + + FREE(qpm); + return HH_OK; +} + + +HH_ret_t THHUL_qpm_create_qp_prep( + HHUL_hca_hndl_t hca, + HHUL_qp_init_attr_t *qp_init_attr_p, + HHUL_qp_hndl_t *qp_hndl_p, + VAPI_qp_cap_t *qp_cap_out_p, + void/*THH_qp_ul_resources_t*/ *qp_ul_resources_p +) +{ + return qp_prep(hca,VAPI_REGULAR_QP,qp_init_attr_p,qp_hndl_p,qp_cap_out_p, + (THH_qp_ul_resources_t*)qp_ul_resources_p, + FALSE); /* Default is allocation of WQEs buffer in host's mem. */ +} + +HH_ret_t THHUL_qpm_special_qp_prep( + HHUL_hca_hndl_t hca, + VAPI_special_qp_t qp_type, + IB_port_t port, + HHUL_qp_init_attr_t *qp_init_attr_p, + HHUL_qp_hndl_t *qp_hndl_p, + VAPI_qp_cap_t *qp_cap_out_p, + void/*THH_qp_ul_resources_t*/ *qp_ul_resources_p +) +{ + return qp_prep(hca,qp_type,qp_init_attr_p,qp_hndl_p,qp_cap_out_p, + (THH_qp_ul_resources_t*)qp_ul_resources_p, + FALSE); /* For special QPs no performance issue - WQEs in main memory */ +} + + +HH_ret_t THHUL_qpm_create_qp_done( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + IB_wqpn_t hh_qp, + void/*THH_qp_ul_resources_t*/ *qp_ul_resources_p +) +{ + THHUL_qpm_t qpm; + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + THH_qp_ul_resources_t *ul_res_p= (THH_qp_ul_resources_t*)qp_ul_resources_p; + HH_ret_t rc; + + rc= THHUL_hob_get_qpm(hca,&qpm); + if (rc != HH_OK) { + MTL_ERROR4("THHUL_qpm_create_qp_done: Invalid HCA handle (%p).",hca); + return HH_EINVAL; + } + if (qp == NULL) { + MTL_ERROR4("THHUL_qpm_create_qp_done: NULL hhul_qp handle."); + return HH_EINVAL; + } + + if ((qp->wqe_buf_orig == NULL) && (qp->wqe_buf_orig_size != 0)) { + /* WQEs buffer allocated in DDR mem. by THH_qpm */ + if (ul_res_p->wqes_buf == 0) { + MTL_ERROR1(MT_FLFMT("Got NULL WQEs buffer from qp_ul_res for new qpn=%d.\n"),qp->qpn); + return HH_EINVAL; + } + /* Set the per queue resources */ + qp->rq_res.wqe_buf= MT_UP_ALIGNX_VIRT(ul_res_p->wqes_buf,qp->rq_res.log2_max_wqe_sz); + if (qp->rq_res.wqe_buf != ul_res_p->wqes_buf) { + MTL_ERROR1( + "THHUL_qpm_create_qp_done: Buffer allocated by THH_qpm ("VIRT_ADDR_FMT") " + "is not aligned to RQ WQE size (%d bytes).\n", + ul_res_p->wqes_buf,1<rq_res.log2_max_wqe_sz); + return HH_EINVAL; + } + /* SQ is after RQ - aligned to its WQE size */ + qp->sq_res.wqe_buf= MT_UP_ALIGNX_VIRT(qp->rq_res.wqe_buf + + (qp->rq_res.max_outs << qp->rq_res.log2_max_wqe_sz), /* End of RQ WQEs buffer */ + qp->sq_res.log2_max_wqe_sz); + } + /* point at the last two 32 bit words in the wqe_buf, + * this allows us to remove an if in VAPI_post_sr path */ + qp->sq_res.last_posted_p = (volatile u_int32_t*)(qp->sq_res.wqe_buf + (1 << qp->sq_res.log2_max_wqe_sz)); + qp->rq_res.last_posted_p = (volatile u_int32_t*)(qp->rq_res.wqe_buf + (1 << qp->rq_res.log2_max_wqe_sz)); + + qp->qpn= hh_qp; + /* Insert QP to the hash table with the given QP number */ + rc= insert_to_hash(qpm,qp); + if (rc != HH_OK) { + MTL_ERROR2("THHUL_qpm_create_qp_done: Failed inserting to hash table " + "(QP will remain unusable) !"); + qp->qpn= 0xFFFFFFFF; /* Mark that QP initialization was not completed with invalid QP num. */ + return rc; + } + + MTL_DEBUG4(MT_FLFMT("%s: qpn=0x%X rq_res{buf_p=%p, sz=0x%X} sq_res{buf_p=%p, sz=0x%X}"), __func__, + qp->qpn, + (void*)qp->rq_res.wqe_buf, (1 << qp->rq_res.log2_max_wqe_sz) * qp->rq_res.max_outs, + (void*)qp->sq_res.wqe_buf, (1 << qp->sq_res.log2_max_wqe_sz) * qp->sq_res.max_outs); + + return HH_OK; +} + + +HH_ret_t THHUL_qpm_destroy_qp_done( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp +) +{ + THHUL_qpm_t qpm; + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + HH_ret_t rc; + if ( !qp ) { + MTL_ERROR4("THHUL_qpm_destroy_qp_done: Invalid QP handle (%p).",qp); + return HH_EINVAL; + } + MTL_DEBUG1("THHUL_qpm_destroy_qp_done(hca=%s,hhul_qp=%p) {\n",hca->dev_desc,qp); + rc= THHUL_hob_get_qpm(hca,&qpm); + if (rc != HH_OK) { + MTL_ERROR4("THHUL_qpm_destroy_qp_done: Invalid HCA handle (%p).",hca); + return HH_EINVAL; + } + MTL_DEBUG4(MT_FLFMT("Got qpm with %d QPs"),qpm->qp_cnt); + + if (IS_VALID_QPN(qp->qpn)) { /* QP has completed THHUL_qpm_create_qp_done successfully */ + + /* Clean all CQEs which refer to this QP */ + THHUL_cqm_cq_cleanup(qp->rq_cq, qp->qpn, qpm->srqm, qp->srq); + if (qp->sq_cq != qp->rq_cq) /* additional cleaning required only if SQ's CQ is different */ + THHUL_cqm_cq_cleanup(qp->sq_cq, qp->qpn, qpm->srqm, HHUL_INVAL_SRQ_HNDL); + + /* Remove QP from hash table (after assured no more CQEs of this QP exist) */ + rc= remove_from_hash(qpm,qp); + if (rc != HH_OK) { + MTL_ERROR2("THHUL_qpm_destroy_qp_done: Failed removing qp from hash table " + "(assuming invalid QP handle) !"); + return HH_EINVAL_QP_NUM; + } + MTL_DEBUG4(MT_FLFMT("QP %d removed from hash table"),qp->qpn); + + } + + /* Free QP resources: Auxilary buffer + WQEs buffer */ + MTL_DEBUG4(MT_FLFMT("Freeing user level WQE-IDs auxilary buffers")); + if (qp->sq_res.wqe_id != NULL) { + THH_SMART_FREE(qp->sq_res.wqe_id, qp->sq_res.max_outs * sizeof(VAPI_wr_id_t)); + } + if (qp->rq_res.wqe_id != NULL) { + THH_SMART_FREE(qp->rq_res.wqe_id, qp->rq_res.max_outs * sizeof(VAPI_wr_id_t)); + } + if (qp->wqe_buf_orig != NULL) {/* WQEs buffer were allocated in process mem. or by the THH_qpm ? */ + MTL_DEBUG4(MT_FLFMT("Freeing WQEs buffer at 0x%p"),qp->wqe_buf_orig); + if (qp->dpool_p == NULL) { /* direct allocation */ +#ifdef WIN32 + cl_free( qp->wqe_buf_orig ); +#else + if (qp->used_virt_alloc) + MOSAL_pci_virt_free_consistent(qp->wqe_buf_orig, qp->wqe_buf_orig_size); + else + MOSAL_pci_phys_free_consistent(qp->wqe_buf_orig, qp->wqe_buf_orig_size); +#endif + } else { /* used dpool */ + dpool_free(qpm, qp->dpool_p, qp->wqe_buf_orig); + } + } + if (qp->sq_res.wqe_tmp) + FREE(qp->sq_res.wqe_tmp); + if ( qp->sq_res.wqe_draft ) FREE(qp->sq_res.wqe_draft); + if ( qp->rq_res.wqe_draft ) FREE(qp->rq_res.wqe_draft); + FREE(qp); + /* update QPs counter */ + MOSAL_spinlock_dpc_lock(&(qpm->hash_lock)); + qpm->qp_cnt--; + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + MTL_DEBUG1("} /* THHUL_qpm_destroy_qp_done */ \n"); + return HH_OK; +} + +HH_ret_t THHUL_qpm_modify_qp_done( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_qp_state_t cur_state +) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + THHUL_qpm_t qpm; + HH_ret_t rc; + + if (qp == NULL) { + MTL_ERROR1("THHUL_qpm_modify_qp_done: NULL hhul_qp.\n"); + return HH_EINVAL; + } + + rc= THHUL_hob_get_qpm(hca,&qpm); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed to get QPM handle (%d=%s)"), __func__, rc, HH_strerror_sym(rc)); + return rc; + } + + /* Update in RQ */ + if (cur_state == VAPI_RESET) { + /* Cleanup all CQEs of RQ (flush) when moving to reset state */ + THHUL_cqm_cq_cleanup(qp->rq_cq, qp->qpn, qpm->srqm, qp->srq); + MOSAL_spinlock_dpc_lock(&(qp->rq_res.q_lock)); + qp->rq_res.cur_outs= 0; + qp->rq_res.next2free_index= qp->rq_res.next2post_index= 0; + qp->rq_res.last_posted_p= NULL; + qp->rq_res.qp_state= VAPI_RESET; + + /* point at the last two 32 bit words in the wqe_buf, + * this allows us to remove an if in VAPI_post_sr path */ + qp->rq_res.last_posted_p = (volatile u_int32_t*)(qp->rq_res.wqe_buf + (1 << qp->rq_res.log2_max_wqe_sz)); + + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + } else { + qp->rq_res.qp_state= cur_state; + } + + /* Update in SQ */ + if (cur_state == VAPI_RESET) { + /* Cleanup all CQEs of SQ (flush) when moving to reset state */ + if (qp->sq_cq != qp->rq_cq) /* additional cleaning required only if SQ's CQ is different */ + THHUL_cqm_cq_cleanup(qp->sq_cq, qp->qpn, qpm->srqm, HHUL_INVAL_SRQ_HNDL); + MOSAL_spinlock_dpc_lock(&(qp->sq_res.q_lock)); + qp->sq_res.cur_outs= 0; + qp->sq_res.next2free_index= qp->sq_res.next2post_index= 0; + qp->sq_res.last_posted_p= NULL; + qp->sq_res.qp_state= VAPI_RESET; + + /* point at the last two 32 bit words in the wqe_buf, + * this allows us to remove an if in VAPI_post_sr path */ + qp->sq_res.last_posted_p = (volatile u_int32_t*)(qp->sq_res.wqe_buf + (1 << qp->sq_res.log2_max_wqe_sz)); + + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + } else { + qp->sq_res.qp_state= cur_state; + } + + return HH_OK; +} + + + +#if !(USE_FAST_POST) +/* Original version */ +HH_ret_t THHUL_qpm_post_send_req( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_sr_desc_t *send_req_p +) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + u_int32_t* wqe_draft= qp->sq_res.wqe_draft; + u_int32_t wqe_sz_dwords; + HH_hca_hndl_t hca_ul_res_handle; + HH_ret_t rc; + + if (!is_qpstate_valid_2send(qp->sq_res.qp_state)) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to send \n"),__func__,qp->sq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + if (qp->sq_res.max_sg_sz < send_req_p->sg_lst_len) { + MTL_ERROR2( + "THHUL_qpm_post_send_req: Scatter/Gather list is too large (%d entries > max_sg_sz=%d)\n", + send_req_p->sg_lst_len,qp->sq_res.max_sg_sz); + return HH_EINVAL_SG_NUM; + } + +#ifdef DUMP_SEND_REQ + dump_send_req(qp,send_req_p); +#endif + + MOSAL_spinlock_dpc_lock(&(qp->sq_res.q_lock)); /* protect wqe_draft and WQE allocation/link */ + + if (qp->sqp_type == VAPI_REGULAR_QP) { + MTPERF_TIME_START(WQE_build_send); + wqe_sz_dwords= (WQE_build_send(qp,send_req_p,wqe_draft) >> 2); + MTPERF_TIME_END(WQE_build_send); +#ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1U << qp->sq_res.log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT("QP 0x%X: Send WQE too large (%d > max=%d)"), + qp->qpn,(wqe_sz_dwords<<2),(1U << qp->sq_res.log2_max_wqe_sz)); + } +#endif + } else { /* special QP */ + if (send_req_p->opcode != VAPI_SEND) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return HH_EINVAL_OPCODE; + } + send_req_p->fence= FALSE; /* required for MLX requests */ + rc= THHUL_hob_get_hca_ul_res_handle(hca,&hca_ul_res_handle); + if (rc != HH_OK) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return HH_EINVAL_HCA_HNDL; + } + wqe_sz_dwords= + (WQE_build_send_mlx(hca_ul_res_handle, qp,send_req_p,QP1_PKEY_INDEX,wqe_draft) >> 2); + if (wqe_sz_dwords == 0) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + MTL_ERROR1(MT_FLFMT("Failed building MLX headers for special QP.\n")); + return HH_EINVAL_WQE; + } + } + + rc= sq_alloc_wqe_link_and_ring(qp,wqe_draft,wqe_sz_dwords, +#ifdef MT_LITTLE_ENDIAN + wqe_sz_dwords, +#endif + send_req_p,encode_nopcode(send_req_p->opcode)); + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return rc; +} + +#else /* USE_FAST_POST == 1 */ +/* Optimized version of post_send_req + * Remove double copy of building wqe from wqe_draft to wqe_buf, instead + * write driectly to wqe_buf. Conversion to be was done when copy to wqe_buf from + * wqe_draft, now ew have to build big endian version directly. All the function + * with ***_be extention build big_endian version of WQE. + */ +HH_ret_t THHUL_qpm_post_send_req( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_sr_desc_t *send_req_p +) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + u_int32_t wqe_sz_dwords; + THH_hca_ul_resources_t hca_ul_res; + HH_ret_t rc; + volatile u_int32_t* next_wqe; /* Actual WQE pointer */ + + + if(MOSAL_EXPECT_FALSE(qp->sq_res.qp_state < VAPI_RTS)) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to send \n"),__func__,qp->sq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + if (MOSAL_EXPECT_FALSE(qp->sq_res.max_sg_sz < send_req_p->sg_lst_len)) { + MTL_ERROR2( + "THHUL_qpm_post_send_req: Scatter/Gather list is too large (%d entries > max_sg_sz=%d)\n", + send_req_p->sg_lst_len,qp->sq_res.max_sg_sz); + return HH_EINVAL_SG_NUM; + } + +#ifdef DUMP_SEND_REQ + dump_send_req(qp,send_req_p); +#endif + + MOSAL_spinlock_dpc_lock(&(qp->sq_res.q_lock)); /* protect wqe_draft and WQE allocation/link */ + + /* Check if any WQEs are free to be consumed */ + if (MOSAL_EXPECT_FALSE(qp->sq_res.max_outs == qp->sq_res.cur_outs)) { + MTL_ERROR4("THHUL_qpm_post_send_req: Send queue is full (%u requests outstanding).\n", + qp->sq_res.cur_outs); + return HH_E2BIG_WR_NUM; + } + + /* Allocate next WQE */ + /* WQE build directly to wqe_buf, instead of draft, so should call big + * endian version of WQE_build_send + */ + next_wqe= (u_int32_t*)(qp->sq_res.wqe_buf + + (qp->sq_res.next2post_index << qp->sq_res.log2_max_wqe_sz) ); + + + if (MOSAL_EXPECT_TRUE(qp->sqp_type == VAPI_REGULAR_QP)) { + /* call big_endian version of WQE_build_send */ + wqe_sz_dwords= (WQE_build_send_be(qp,send_req_p,(u_int32_t*)next_wqe) >> 2); +#ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1 << qp->sq_res.log2_max_wqe_sz)) + MTL_ERROR1(MT_FLFMT("QP 0x%X: Send WQE too large (%d > max=%d)"), + qp->qpn,(wqe_sz_dwords<<2),(1 << qp->sq_res.log2_max_wqe_sz)); +#endif + } else { /* special QP */ + + u_int32_t* wqe_draft= qp->sq_res.wqe_draft; + unsigned int i; + + if (MOSAL_EXPECT_FALSE(send_req_p->opcode != VAPI_SEND)) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return HH_EINVAL_WQE; + } + send_req_p->fence= FALSE; /* required for MLX requests */ + if(MOSAL_EXPECT_FALSE(THHUL_hob_get_hca_ul_res(hca,&hca_ul_res) != HH_OK)) + { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return HH_EINVAL_HCA_HNDL; + } + wqe_sz_dwords= (WQE_build_send_mlx(hca_ul_res.hh_hca_hndl,qp,send_req_p,QP1_PKEY_INDEX,wqe_draft) >> 2); + if (MOSAL_EXPECT_FALSE(wqe_sz_dwords == 0)) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + MTL_ERROR1(MT_FLFMT("Failed building MLX headers for special QP.\n")); + return HH_EINVAL_WQE; + } + + /* we used a temporary (draft) memory space, so move it do destination, also + we used the standard routines that don't byte swap each word so byte swap + each word if we are on a little endian cpu */ + for(i = 0; i < wqe_sz_dwords; ++i) { + next_wqe[i] = MOSAL_cpu_to_be32(wqe_draft[i]); + } + + } + + rc= sq_alloc_wqe_link_and_ring_be(qp,(u_int32_t*)next_wqe,wqe_sz_dwords, +#ifdef MT_LITTLE_ENDIAN + wqe_sz_dwords, +#endif + send_req_p,encode_nopcode(send_req_p->opcode)); + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return rc; +} +#endif //USE_FAST_POST == 0 + + +#if USE_FAST_POST == 0 +/* Orignal version of THHUL_qpm_post_inline_send_req */ +HH_ret_t THHUL_qpm_post_inline_send_req( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_sr_desc_t *send_req_p +) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + u_int32_t* wqe_draft= qp->sq_res.wqe_draft; + u_int8_t *cur_loc_p= (u_int8_t*)wqe_draft; /* Current location in the WQE */ + u_int8_t *wqe_edge_p= ((u_int8_t*)wqe_draft)+(1<sq_res.log2_max_wqe_sz); + u_int32_t wqe_sz_dwords; + u_int32_t* inline_p; /* inline control word */ + u_int32_t i; + HH_ret_t rc; + +#ifdef DUMP_SEND_REQ + dump_send_req(qp,send_req_p); +#endif + + if (!is_qpstate_valid_2send(qp->sq_res.qp_state)) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to send \n"),__func__,qp->sq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + MOSAL_spinlock_dpc_lock(&(qp->sq_res.q_lock)); /* protect wqe_draft and WQE allocation/link */ + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_send((u_int32_t*)cur_loc_p, /* Pack Control segment */ + send_req_p->comp_type, send_req_p->set_se, 0/*event bit*/, + ((send_req_p->opcode == VAPI_RDMA_WRITE_WITH_IMM) || + (send_req_p->opcode == VAPI_SEND_WITH_IMM) ) ? send_req_p->imm_data : 0); + + /* Transport type checks: Datagram segment */ + switch (qp->ts_type) { + case VAPI_TS_UD: /* Check if UD (UD datagram segment) */ + cur_loc_p+= WQE_pack_ud((u_int32_t*)cur_loc_p, + qp->ud_av_memkey,translate_av(qp, (u_int64_t)send_req_p->remote_ah), + send_req_p->remote_qp,send_req_p->remote_qkey); + break; + case VAPI_TS_RD: /* Check if RD (RD datagram segment) */ + cur_loc_p+= WQE_pack_rd((u_int32_t*)cur_loc_p, + send_req_p->remote_qp,send_req_p->remote_qkey); + break; + default: + break; + } + + /* Opcode checks + Remote-address/Atomic segments */ + switch (send_req_p->opcode) { + /* For RDMA operations: only Remote-address segment */ + case VAPI_RDMA_WRITE: + case VAPI_RDMA_WRITE_WITH_IMM: + cur_loc_p+= WQE_pack_remote_addr((u_int32_t*)cur_loc_p, + send_req_p->remote_addr,send_req_p->r_key); + break; + + case VAPI_SEND: + case VAPI_SEND_WITH_IMM: + break; /* Valid opcodes for "inline" but no extra WQE segment */ + default: + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return HH_EINVAL_OPCODE; /* Invalid opcode */ + } + + inline_p= (u_int32_t*)cur_loc_p; + cur_loc_p+= WQE_INLINE_SZ_BCOUNT; + /* copy inline data to WQE */ + for (i= 0; i < send_req_p->sg_lst_len; i++) { + if ((cur_loc_p+send_req_p->sg_lst_p[i].len) > wqe_edge_p) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + MTL_ERROR2(MT_FLFMT("too much inline data for inline send request (qpn=0x%X)"),qp->qpn); + return HH_EINVAL_SG_NUM; + } + if (send_req_p->sg_lst_p[i].addr > (MT_virt_addr_t)MAKE_ULONGLONG(0xFFFFFFFFFFFFFFFF)) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + MTL_ERROR2(MT_FLFMT("sg list addr %d has non-zero upper bits (qpn=0x%X, addr="U64_FMT") \n"), + i,qp->qpn,send_req_p->sg_lst_p[i].addr ); + return HH_EINVAL_SG_FMT; + } + memcpy(cur_loc_p, (void*)(MT_virt_addr_t)(send_req_p->sg_lst_p[i].addr), + send_req_p->sg_lst_p[i].len); + cur_loc_p+= send_req_p->sg_lst_p[i].len; + } + *inline_p= + (u_int32_t)(0x80000000 | (((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)inline_p) - 4)); /*inline:size*/ + + wqe_sz_dwords= (MT_UP_ALIGNX_U32( (u_int32_t)(((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)wqe_draft)), + WQE_SZ_MULTIPLE_SHIFT) >> 2); +#ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1U << qp->sq_res.log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT("QP 0x%X: Send WQE too large (%d > max=%d) !!!!!!!!!"), + qp->qpn,(wqe_sz_dwords<<2),(1U << qp->sq_res.log2_max_wqe_sz)); + } +#endif + + rc= sq_alloc_wqe_link_and_ring(qp,wqe_draft,wqe_sz_dwords, +#ifdef MT_LITTLE_ENDIAN + (u_int32_t)(inline_p - wqe_draft + 1), /* swap all up to data */ +#endif + send_req_p,encode_nopcode(send_req_p->opcode)); + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return rc; +} +#else +/* Optimized version of THHUL_qpm_post_inline_send_req. + * Remove extra copy from wqe_draft to wqe, instead driect write to + * Wqe buffer. + */ +HH_ret_t THHUL_qpm_post_inline_send_req( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_sr_desc_t *send_req_p +) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + volatile u_int32_t* next_wqe; /* Actual WQE pointer */ + u_int8_t *cur_loc_p; /* Current location in the WQE */ + u_int8_t *wqe_edge_p;/* End of Wqe buffer */ + u_int32_t wqe_sz_dwords; + u_int32_t* inline_p; /* inline control word */ + u_int32_t i; + HH_ret_t rc; + +#ifdef DUMP_SEND_REQ + dump_send_req(qp,send_req_p); +#endif + + if(MOSAL_EXPECT_FALSE(qp->sq_res.qp_state < VAPI_RTS)) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to send \n"),__func__,qp->sq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + MOSAL_spinlock_dpc_lock(&(qp->sq_res.q_lock)); /* protect wqe_draft and WQE allocation/link */ + + /* Allocate next WQE */ + /* WQE build directly to wqe_buf, instead of draft, so should call big + * endian version of WQE_build_send + */ + next_wqe= (u_int32_t*)(qp->sq_res.wqe_buf + + (qp->sq_res.next2post_index << qp->sq_res.log2_max_wqe_sz) ); + + cur_loc_p = (u_int8_t *)next_wqe; + wqe_edge_p = ((u_int8_t*)next_wqe)+(1<sq_res.log2_max_wqe_sz); + + cur_loc_p+= qpm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_send_be((u_int32_t*)cur_loc_p, /* Pack Control segment */ + send_req_p->comp_type, send_req_p->set_se, 0/*event bit*/, + ((send_req_p->opcode == VAPI_RDMA_WRITE_WITH_IMM) || + (send_req_p->opcode == VAPI_SEND_WITH_IMM) ) ? send_req_p->imm_data : 0); + + /* Transport type checks: Datagram segment */ + switch (qp->ts_type) { + case VAPI_TS_UD: /* Check if UD (UD datagram segment) */ + cur_loc_p+= WQE_pack_ud_be((u_int32_t*)cur_loc_p, + qp->ud_av_memkey,translate_av(qp, (u_int64_t)send_req_p->remote_ah), + send_req_p->remote_qp,send_req_p->remote_qkey); + break; + case VAPI_TS_RD: /* Check if RD (RD datagram segment) */ + cur_loc_p+= WQE_pack_rd_be((u_int32_t*)cur_loc_p, + send_req_p->remote_qp,send_req_p->remote_qkey); + break; + default: + break; + } + + /* Opcode checks + Remote-address/Atomic segments */ + switch (send_req_p->opcode) { + /* For RDMA operations: only Remote-address segment */ + case VAPI_RDMA_WRITE: + case VAPI_RDMA_WRITE_WITH_IMM: + cur_loc_p+= WQE_pack_remote_addr_be((u_int32_t*)cur_loc_p, + send_req_p->remote_addr,send_req_p->r_key); + break; + + case VAPI_SEND: + case VAPI_SEND_WITH_IMM: + break; /* Valid opcodes for "inline" but no extra WQE segment */ + default: + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return HH_EINVAL_OPCODE; /* Invalid opcode */ + } + + inline_p= (u_int32_t*)cur_loc_p; + cur_loc_p+= WQE_INLINE_SZ_BCOUNT; + /* copy inline data to WQE */ + for (i= 0; i < send_req_p->sg_lst_len; i++) { + if (MOSAL_EXPECT_FALSE((cur_loc_p+send_req_p->sg_lst_p[i].len) > wqe_edge_p)) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + MTL_ERROR2(MT_FLFMT("too much inline data for inline send request (qpn=0x%X)"),qp->qpn); + return HH_EINVAL_SG_NUM; + } +//#ifdef MT_64BIT +#if 0 /* TBD: It this needed? */ + if (MOSAL_EXPECT_FALSE(send_req_p->sg_lst_p[i].addr > (MT_virt_addr_t)0xFFFFFFFFFFFFFFFF)) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + MTL_ERROR2(MT_FLFMT("sg list addr %d has non-zero upper bits (qpn=0x%X, addr="U64_FMT") \n"), + i,qp->qpn,send_req_p->sg_lst_p[i].addr ); + return HH_EINVAL_SG_FMT; + } +#endif + memcpy(cur_loc_p, (void*)(MT_virt_addr_t)(send_req_p->sg_lst_p[i].addr), + send_req_p->sg_lst_p[i].len); + cur_loc_p+= send_req_p->sg_lst_p[i].len; + } + *inline_p= + MOSAL_cpu_to_be32(0x80000000 | (u_int32_t)(((MT_ulong_ptr_t)cur_loc_p) - ((MT_ulong_ptr_t)inline_p) - 4)); /*inline:size*/ + + wqe_sz_dwords= (MT_UP_ALIGNX( (((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)next_wqe)), + WQE_SZ_MULTIPLE_SHIFT) >> 2); + +#ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1 << qp->sq_res.log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT("QP 0x%X: Send WQE too large (%d > max=%d) !!!!!!!!!"), + qp->qpn,(wqe_sz_dwords<<2),(1 << qp->sq_res.log2_max_wqe_sz)); + } +#endif + + rc= sq_alloc_wqe_link_and_ring_be(qp,(u_int32_t*)next_wqe,wqe_sz_dwords, +#ifdef MT_LITTLE_ENDIAN + (u_int32_t)(inline_p - next_wqe + 1), /* swap all up to data */ +#endif + send_req_p,encode_nopcode(send_req_p->opcode)); + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return rc; +} +#endif /* USE_FAST_POST == 0 */ + + +HH_ret_t THHUL_qpm_post_send_reqs( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ u_int32_t num_of_requests, + /*IN*/ VAPI_sr_desc_t *send_req_array +) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + u_int32_t* wqe_draft= qp->sq_res.wqe_draft; + u_int32_t next_draft[WQE_SEG_SZ_NEXT>>2]; /* Build "next" segment here */ + volatile u_int32_t* next_wqe= NULL; + volatile u_int32_t* prev_wqe_p= NULL; + MT_virt_addr_t first_wqe_nda= 0; + u_int32_t first_wqe_nds= 0; + u_int32_t wqe_sz_dwords,i; + u_int32_t next2post_index,reqi; + THH_uar_sendq_dbell_t sq_dbell; + + if (qp->sqp_type != VAPI_REGULAR_QP) { + MTL_ERROR4(MT_FLFMT("THHUL_qpm_post_send_reqs is not supporeted for special QPs")); + return HH_ENOSYS; + } + + if (num_of_requests == 0) { + MTL_ERROR4(MT_FLFMT("THHUL_qpm_post_send_reqs: num_of_requeusts=0 !")); + return HH_EINVAL_PARAM; + } + + if (!is_qpstate_valid_2send(qp->sq_res.qp_state)) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to send \n"),__func__,qp->sq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + MOSAL_spinlock_dpc_lock(&(qp->sq_res.q_lock)); /* protect wqe_draft as well as WQE allocation/link */ + + /* Check for available WQEs */ + if (qp->sq_res.max_outs < (qp->sq_res.cur_outs + num_of_requests)) { + MTL_ERROR4("THHUL_qpm_post_send_reqs: Not enough WQEs for %u requests (%u requests outstanding).\n", + num_of_requests,qp->sq_res.cur_outs); + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return HH_E2BIG_WR_NUM; + } + + /* We hold this value on a seperate var. for easy rollback in case of an error */ + next2post_index= qp->sq_res.next2post_index; + + /* Build and link all WQEs */ + for (reqi= 0; reqi < num_of_requests; reqi++) { + if (qp->sq_res.max_sg_sz < send_req_array[reqi].sg_lst_len) { + MTL_ERROR2( + "THHUL_qpm_post_send_req: S/G list of request %d is too large (%d entries > max_sg_sz=%d)\n", + reqi,send_req_array[reqi].sg_lst_len,qp->sq_res.max_sg_sz); + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return HH_EINVAL_SG_NUM; + } + + MTPERF_TIME_START(WQE_build_send); + wqe_sz_dwords= (WQE_build_send(qp,send_req_array+reqi,wqe_draft) >> 2); + MTPERF_TIME_END(WQE_build_send); +#ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1U << qp->sq_res.log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT("QP 0x%X: Send WQE too large (%d > max=%d)"), + qp->qpn,(wqe_sz_dwords<<2),(1U << qp->sq_res.log2_max_wqe_sz)); + } +#endif + /* Allocate next WQE */ + next_wqe= (u_int32_t*)(qp->sq_res.wqe_buf + + (next2post_index << qp->sq_res.log2_max_wqe_sz) ); + qp->sq_res.wqe_id[next2post_index]= send_req_array[reqi].id; /* Save WQE ID */ +// next2post_index = (next2post_index + 1) % qp->sq_res.max_outs ; + if (++next2post_index >= qp->sq_res.max_outs) { + next2post_index = 0; + } + /* copy (while swapping,if needed) the wqe_draft to the actual WQE */ + /* TBD: for big-endian machines we can optimize here and use memcpy */ + MTPERF_TIME_START(SQ_WQE_copy); + for (i= 0; i < wqe_sz_dwords; i++) { + next_wqe[i]= MOSAL_cpu_to_be32(wqe_draft[i]); + } + + if (reqi == 0) { /* For the first WQE save info for linking it later */ + first_wqe_nda= (MT_virt_addr_t)next_wqe; + first_wqe_nds= (wqe_sz_dwords>>2); + + } else { /* Not first - link to previous with DBD=0 */ + /* Build linking "next" segment in last posted WQE*/ + qpm_WQE_pack_send_next(next_draft, encode_nopcode(send_req_array[reqi].opcode), + send_req_array[reqi].fence,0/*DBD*/, (u_int32_t)(MT_ulong_ptr_t)next_wqe, wqe_sz_dwords>>2, + (qp->ts_type==VAPI_TS_RD) ? send_req_array[reqi].eecn : 0 ); + for (i= 0;i < (WQE_SEG_SZ_NEXT>>2) ;i++) { + /* This copy assures big-endian as well as that NDS is written last */ + prev_wqe_p[i]= MOSAL_cpu_to_be32(next_draft[i]); + } + } + prev_wqe_p= next_wqe; + + } + + if (qp->sq_res.last_posted_p != NULL) { /* link chain to previous WQE */ + /* Build linking "next" segment with DBD set */ + qpm_WQE_pack_send_next(next_draft, encode_nopcode(send_req_array[0].opcode), + send_req_array[0].fence,1/*DBD*/, (u_int32_t)first_wqe_nda, first_wqe_nds, + (qp->ts_type==VAPI_TS_RD) ? send_req_array[0].eecn : 0 ); + for (i= 0;i < (WQE_SEG_SZ_NEXT>>2) ;i++) { + /* This copy assures big-endian as well as that NDS is written last */ + qp->sq_res.last_posted_p[i]= MOSAL_cpu_to_be32(next_draft[i]); + } + } + + /* Update QP status */ + qp->sq_res.last_posted_p= next_wqe; + qp->sq_res.next2post_index= next2post_index; + qp->sq_res.cur_outs+= num_of_requests; + + /* Ring doorbell (send or rd-send) */ + sq_dbell.qpn= qp->qpn; + sq_dbell.nopcode= encode_nopcode(send_req_array[0].opcode); + sq_dbell.fence= send_req_array[0].fence; + sq_dbell.next_addr_32lsb= (u_int32_t)(first_wqe_nda & 0xFFFFFFFF); + sq_dbell.next_size= first_wqe_nds; + if (qp->ts_type == VAPI_TS_RD) { + THH_uar_sendq_rd_dbell(qp->uar,&sq_dbell,send_req_array[0].eecn); + } else { /* non-RD send request */ + MTPERF_TIME_START(THH_uar_sendq_dbell); + THH_uar_sendq_dbell(qp->uar,&sq_dbell); + MTPERF_TIME_END(THH_uar_sendq_dbell); + } + + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return HH_OK; +} + +HH_ret_t THHUL_qpm_post_gsi_send_req( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_sr_desc_t *send_req_p, + VAPI_pkey_ix_t pkey_index +) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + u_int32_t* wqe_draft= qp->sq_res.wqe_draft; + u_int32_t wqe_sz_dwords; + HH_hca_hndl_t hca_ul_res_handle; + HH_ret_t rc; + + if (MOSAL_EXPECT_FALSE(qp->sqp_type != VAPI_GSI_QP)) { + MTL_ERROR2(MT_FLFMT("Invoked for non-GSI QP (qpn=0x%X)"),qp->qpn); + return HH_EINVAL_QP_NUM; + } + + if (MOSAL_EXPECT_FALSE(qp->sq_res.max_sg_sz < send_req_p->sg_lst_len)) { + MTL_ERROR2( + "%s: Scatter/Gather list is too large (%d entries > max_sg_sz=%d)\n",__func__, + send_req_p->sg_lst_len,qp->sq_res.max_sg_sz); + return HH_EINVAL_SG_NUM; + } + + if (MOSAL_EXPECT_FALSE(!is_qpstate_valid_2send(qp->sq_res.qp_state))) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to send \n"),__func__,qp->sq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + +#ifdef DUMP_SEND_REQ + dump_send_req(qp,send_req_p); +#endif + + send_req_p->fence= FALSE; /* required for MLX requests */ + rc= THHUL_hob_get_hca_ul_res_handle(hca,&hca_ul_res_handle); + if (rc != HH_OK) { + return HH_EINVAL_HCA_HNDL; + } + + MOSAL_spinlock_dpc_lock(&(qp->sq_res.q_lock)); /* protect wqe_draft and WQE allocation/link */ + + wqe_sz_dwords= + (WQE_build_send_mlx(hca_ul_res_handle,qp,send_req_p,pkey_index,wqe_draft) >> 2); + if (wqe_sz_dwords == 0) { + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + MTL_ERROR1(MT_FLFMT("Failed building MLX headers for special QP.\n")); + return HH_EINVAL_WQE; + } + + rc= sq_alloc_wqe_link_and_ring(qp,wqe_draft,wqe_sz_dwords, +#ifdef MT_LITTLE_ENDIAN + wqe_sz_dwords, +#endif + send_req_p,encode_nopcode(send_req_p->opcode)); + + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return rc; +} + + +/* Orignal version of THHUL_qpm_post_recv_req */ +#if !(USE_FAST_POST) +HH_ret_t THHUL_qpm_post_recv_req( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_rr_desc_t *recv_req_p +) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + u_int32_t* wqe_draft= qp->rq_res.wqe_draft; + u_int32_t next_draft[WQE_SEG_SZ_NEXT>>2]; /* Build "next" segment here */ + volatile u_int32_t* next_wqe; /* Actual WQE pointer */ + u_int32_t i, wqe_sz_dwords; + THH_uar_recvq_dbell_t rq_dbell; + + if (qp->srq != HHUL_INVAL_SRQ_HNDL) { + MTL_ERROR1(MT_FLFMT("%s: Used for QP 0x%X which is associated with SRQ handle 0x%p"), __func__, + qp->qpn, qp->srq); + return HH_EINVAL_SRQ_HNDL; + } + + if (!is_qpstate_valid_2recv(qp->rq_res.qp_state)) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to recv \n"),__func__,qp->rq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + if (qp->rq_res.max_sg_sz < recv_req_p->sg_lst_len) { + MTL_ERROR2( + "THHUL_qpm_post_recv_req: Scatter/Gather list is too large (%d entries > max_sg_sz=%d)\n", + recv_req_p->sg_lst_len,qp->rq_res.max_sg_sz); + return HH_EINVAL_SG_NUM; + } + + MOSAL_spinlock_dpc_lock(&(qp->rq_res.q_lock)); /* protect wqe_draft as well as WQE allocation/link */ + + /* Build WQE */ + wqe_sz_dwords= (qpm_WQE_build_recv(qp,recv_req_p,wqe_draft) >> 2); +#ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1U << qp->rq_res.log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT("QP 0x%X: Receive WQE too large (%d > max=%d)"), + qp->qpn,(wqe_sz_dwords<<2),(1U << qp->rq_res.log2_max_wqe_sz)); + } +#endif + + /* Check if any WQEs are free to be consumed */ + if (qp->rq_res.max_outs == qp->rq_res.cur_outs) { + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + MTL_ERROR4("THHUL_qpm_post_recv_req: Receive queue is full (%d requests outstanding).\n", + qp->rq_res.cur_outs); + return HH_E2BIG_WR_NUM; + } + /* Allocate next WQE */ + next_wqe= (u_int32_t*) (qp->rq_res.wqe_buf + + (qp->rq_res.next2post_index << qp->rq_res.log2_max_wqe_sz) ); + qp->rq_res.wqe_id[qp->rq_res.next2post_index]= recv_req_p->id; /* Save WQE ID */ + qp->rq_res.next2post_index = (qp->rq_res.next2post_index + 1) % qp->rq_res.max_outs ; + qp->rq_res.cur_outs++; + + /* copy (while swapping,if needed) the wqe_draft to the actual WQE */ + /* TBD: for big-endian machines we can optimize here and use memcpy */ + for (i= 0; i < wqe_sz_dwords; i++) { + next_wqe[i]= MOSAL_cpu_to_be32(wqe_draft[i]); + } + + /* Update "next" segment of previous WQE (if any) */ + if (qp->rq_res.last_posted_p != NULL) { + /* Build linking "next" segment in last posted WQE */ + qpm_WQE_pack_recv_next(next_draft, (u_int32_t)(MT_ulong_ptr_t) next_wqe, wqe_sz_dwords>>2); + for (i= 0;i < (WQE_SEG_SZ_NEXT>>2) ;i++) { + /* This copy assures big-endian as well as that NDS is written last */ + qp->rq_res.last_posted_p[i]= MOSAL_cpu_to_be32(next_draft[i]); + } + } + qp->rq_res.last_posted_p= next_wqe; + + /* Ring doorbell */ + rq_dbell.qpn= qp->qpn; + rq_dbell.next_addr_32lsb= (u_int32_t)((MT_virt_addr_t)next_wqe & 0xFFFFFFFF); + rq_dbell.next_size= wqe_sz_dwords>>2; + rq_dbell.credits= 1; + THH_uar_recvq_dbell(qp->uar,&rq_dbell); + + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + return HH_OK; + +} +#else /* USE_FAST_POST == 1 */ +/* Optimized version of post_recv_reqs + * Remove double copy of building wqe from wqe_draft to wqe_buf, instead + * write driectly to wqe_buf. Conversion to be was done when copy to wqe_buf from + * wqe_draft, now ew have to build big endian version directly. All the function + * with ***_be extention build big_endian version of WQE. + */ +HH_ret_t THHUL_qpm_post_recv_req( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_rr_desc_t *recv_req_p +) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + volatile u_int32_t* next_wqe; /* Actual WQE pointer */ + u_int32_t wqe_sz_dwords; + CHIME_WORDS_PREFIX u_int32_t chimeWords[2]; + THH_uar_t uar; + + if (MOSAL_EXPECT_FALSE(qp->srq != HHUL_INVAL_SRQ_HNDL)) { + MTL_ERROR1(MT_FLFMT("%s: Used for QP 0x%X which is associated with SRQ handle 0x%p"), __func__, + qp->qpn, qp->srq); + return HH_EINVAL_SRQ_HNDL; + } + + if (MOSAL_EXPECT_FALSE(qp->rq_res.qp_state < VAPI_INIT )) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to recv \n"),__func__,qp->rq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + if (MOSAL_EXPECT_FALSE(qp->rq_res.max_sg_sz < recv_req_p->sg_lst_len)) { + MTL_ERROR2( + "THHUL_qpm_post_recv_req: Scatter/Gather list is too large (%d entries > max_sg_sz=%d)\n", + recv_req_p->sg_lst_len,qp->rq_res.max_sg_sz); + return HH_EINVAL_SG_NUM; + } + + MOSAL_spinlock_dpc_lock(&(qp->rq_res.q_lock)); /* protect wqe_draft as well as WQE allocation/link */ + + /* Check if any WQEs are free to be consumed */ + if (MOSAL_EXPECT_FALSE(qp->rq_res.max_outs == qp->rq_res.cur_outs)) { + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + MTL_ERROR4("THHUL_qpm_post_recv_req: Receive queue is full (%d requests outstanding).\n", + qp->rq_res.cur_outs); + return HH_E2BIG_WR_NUM; + } + /* Allocate next WQE */ + /* Build WQE directly to wqe_buf, instead of draft. This will eliminate extra copy, but + * shuold build big endian version */ + next_wqe= (u_int32_t*) (qp->rq_res.wqe_buf + + (qp->rq_res.next2post_index << qp->rq_res.log2_max_wqe_sz) ); + + /* Build WQE */ + /* Call big Endian version of WQE_build_recv */ + wqe_sz_dwords= (WQE_build_recv_be(qp,recv_req_p,(u_int32_t*)next_wqe) >> 2); +#ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1 << qp->rq_res.log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT("QP 0x%X: Receive WQE too large (%d > max=%d)"), + qp->qpn,(wqe_sz_dwords<<2),(1 << qp->rq_res.log2_max_wqe_sz)); + } +#endif + + qp->rq_res.wqe_id[qp->rq_res.next2post_index]= recv_req_p->id; /* Save WQE ID */ + ++qp->rq_res.next2post_index; + if(MOSAL_EXPECT_FALSE(qp->rq_res.next2post_index >= qp->rq_res.max_outs)) + qp->rq_res.next2post_index = 0; + + qp->rq_res.cur_outs++; + + /* Update "next" segment of previous WQE (if any) */ + /* Build linking "next" segment in last posted WQE */ + WQE_pack_recv_next_be((u_int32_t*)qp->rq_res.last_posted_p, + (u_int32_t)(MT_ulong_ptr_t) next_wqe, wqe_sz_dwords>>2); + + qp->rq_res.last_posted_p= next_wqe; + + /* Ring doorbell */ + uar = qp->uar; + + chimeWords[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)((MT_ulong_ptr_t)next_wqe & 0xFFFFFFFF) + | ((wqe_sz_dwords + 3) >> 2) // specify in 16 byte chunks + ); + + chimeWords[1] = MOSAL_cpu_to_be32(0 + | 1 // credits + | ((u_int32_t)(qp->qpn) << 8) + ); + +#ifdef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_RECV_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); +#else + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_RECV_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); + MOSAL_spinlock_unlock(&(uar->uar_lock)); +#endif + + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + + return HH_OK; +} +#endif + + +HH_ret_t THHUL_qpm_post_recv_reqs( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ u_int32_t num_of_requests, + /*IN*/ VAPI_rr_desc_t *recv_req_array + ) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + u_int32_t* wqe_draft= qp->rq_res.wqe_draft; + u_int32_t next_draft[WQE_SEG_SZ_NEXT>>2]; /* Build "next" segment here */ + volatile u_int32_t* next_wqe= NULL; /* Actual WQE pointer */ + volatile u_int32_t* prev_wqe_p= qp->rq_res.last_posted_p; + u_int32_t wqe_sz_dwords= 0; + u_int32_t i,reqi,next2post_index; + THH_uar_recvq_dbell_t rq_dbell; + u_int32_t remaining_reqs; + + if (qp->srq != HHUL_INVAL_SRQ_HNDL) { + MTL_ERROR1(MT_FLFMT("%s: Used for QP 0x%X which is associated with SRQ 0x%p"), __func__, + qp->qpn, qp->srq); + return HH_EINVAL_SRQ_HNDL; + } + + if (!is_qpstate_valid_2recv(qp->rq_res.qp_state)) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to recv \n"),__func__,qp->rq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + if (num_of_requests == 0) { + MTL_ERROR4(MT_FLFMT("THHUL_qpm_post_recv_reqs: num_of_requeusts=0 !")); + return HH_EINVAL_PARAM; + } + + /* Check parameters of all WQEs first - must assure all posted successfully */ + for (reqi= 0; reqi < num_of_requests; reqi++) { + if (qp->rq_res.max_sg_sz < recv_req_array[reqi].sg_lst_len) { + MTL_ERROR2( + "THHUL_qpm_post_recv_reqs: S/G list of req. #%d is too large (%d entries > max_sg_sz=%d)\n", + reqi,recv_req_array[reqi].sg_lst_len,qp->rq_res.max_sg_sz); + return HH_EINVAL_SG_NUM; + } + } + + MOSAL_spinlock_dpc_lock(&(qp->rq_res.q_lock)); /* protect wqe_draft as well as WQE allocation/link */ + + /* Check for available WQEs */ + if (qp->rq_res.max_outs < (qp->rq_res.cur_outs + num_of_requests)) { + MTL_ERROR4("THHUL_qpm_post_recv_reqs: Not enough WQEs for %d requests (%d requests outstanding).\n", + num_of_requests,qp->rq_res.cur_outs); + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + return HH_E2BIG_WR_NUM; + } + + rq_dbell.qpn= qp->qpn; /* Fixed for all doorbells */ + rq_dbell.credits= 0; /* During the loop, doorbell is rung every 256 WQEs */ + + /* We hold this value on a seperate var. for easy rollback in case of an error */ + next2post_index= qp->rq_res.next2post_index; + + /* Build and link and ring all WQEs */ + for (reqi= 0; reqi < num_of_requests; reqi++) { + + /* Build WQE */ + wqe_sz_dwords= (qpm_WQE_build_recv(qp,recv_req_array+reqi,wqe_draft) >> 2); + #ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1U << qp->rq_res.log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT("QP 0x%X: Receive WQE too large (%d > max=%d)"), + qp->qpn,(wqe_sz_dwords<<2),(1U << qp->rq_res.log2_max_wqe_sz)); + } + #endif + + /* Allocate next WQE */ + next_wqe= (u_int32_t*) (qp->rq_res.wqe_buf + + (next2post_index << qp->rq_res.log2_max_wqe_sz) ); + qp->rq_res.wqe_id[next2post_index]= recv_req_array[reqi].id; /* Save WQE ID */ + next2post_index = (next2post_index + 1) % qp->rq_res.max_outs ; + + /* copy (while swapping,if needed) the wqe_draft to the actual WQE */ + /* TBD: for big-endian machines we can optimize here and use memcpy */ + for (i= 0; i < wqe_sz_dwords; i++) { + next_wqe[i]= MOSAL_cpu_to_be32(wqe_draft[i]); + } + + if ((reqi & 0xFF) == 0) { + /* save NDA+NDS of first WQE in each 256 WQEs chain for the doorbell */ + rq_dbell.next_addr_32lsb= (u_int32_t)((MT_virt_addr_t)next_wqe & 0xFFFFFFFF); + rq_dbell.next_size= wqe_sz_dwords>>2; + } + + if (prev_wqe_p != NULL) { /* first in the chain may be the first since reset */ + /* Update "next" segment of previous WQE */ + /* Build linking "next" segment in last posted WQE */ + qpm_WQE_pack_recv_next(next_draft, (u_int32_t)(MT_ulong_ptr_t)next_wqe, wqe_sz_dwords>>2); + for (i= 0;i < (WQE_SEG_SZ_NEXT>>2) ;i++) { + /* This copy assures big-endian as well as that NDS is written last */ + prev_wqe_p[i]= MOSAL_cpu_to_be32(next_draft[i]); + } + } + prev_wqe_p= next_wqe; + + if ((reqi & 0xFF) == 0xFF) { /* last in 256 WQEs chain - ring doorbell */ + /* Ring doorbell on the first WQE only */ + THH_uar_recvq_dbell(qp->uar,&rq_dbell); + } + } + + if ((reqi & 0xFF) != 0) { /* Doorbel for last WQEs was not rung */ + rq_dbell.credits= (reqi & 0xFF); + THH_uar_recvq_dbell(qp->uar,&rq_dbell); + } + + qp->rq_res.last_posted_p= next_wqe; + + /* update producer index + cur. outstanding (now that no error was found) */ + qp->rq_res.next2post_index = next2post_index; + qp->rq_res.cur_outs+= num_of_requests; + + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + return HH_OK; +} + + + +HH_ret_t THHUL_qpm_post_bind_req( + /*IN*/ HHUL_mw_bind_t *bind_props_p, + /*IN*/ IB_rkey_t new_rkey +) +{ + THHUL_qp_t qp= (THHUL_qp_t)bind_props_p->qp; + u_int32_t wqe_draft[BIND_WQE_SZ>>2]; /* Build the WQE here */ + u_int32_t wqe_sz_dwords; + VAPI_sr_desc_t send_req; + + if ((qp->sqp_type != VAPI_REGULAR_QP) || + ((qp->ts_type != VAPI_TS_RC) && (qp->ts_type != VAPI_TS_RD) && (qp->ts_type != VAPI_TS_UC))){ + MTL_ERROR1(MT_FLFMT("Invalid QP type for binding memory windows (qpn=0x%X)."),qp->qpn); + return HH_EINVAL_QP_NUM; + } + + + if (!is_qpstate_valid_2send(qp->sq_res.qp_state)) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to send \n"),__func__,qp->sq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + + wqe_sz_dwords= (WQE_build_membind(bind_props_p,new_rkey,wqe_draft) >> 2); +#ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1U << qp->sq_res.log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT("QP 0x%X: Send WQE too large (%d > max=%d)"), + qp->qpn,(wqe_sz_dwords<<2),(1U << qp->sq_res.log2_max_wqe_sz)); + } +#endif + + send_req.id= bind_props_p->id; + send_req.fence= TRUE; /* just in case, though implicitly fenced */ + if (qp->ts_type == VAPI_TS_RD) { + send_req.eecn= RESERVED_MEMBIND_EECN; + } + + return sq_alloc_wqe_link_and_ring(qp,wqe_draft,wqe_sz_dwords, +#ifdef MT_LITTLE_ENDIAN + wqe_sz_dwords, +#endif + &send_req,TAVOR_IF_NOPCODE_BIND_MEMWIN); +} + + + +HH_ret_t THHUL_qpm_comp_ok( + THHUL_qpm_t qpm, + IB_wqpn_t qpn, + u_int32_t wqe_addr_32lsb, + VAPI_special_qp_t *qp_type_p, + IB_ts_t *qp_ts_type_p, + VAPI_wr_id_t *wqe_id_p, + u_int32_t *wqes_released_p +#ifdef IVAPI_THH + , u_int32_t *reserved_p +#endif +) +{ + u_int32_t freed_wqe_index; + queue_res_t *associated_q= NULL; + THHUL_qp_t qp; + HH_ret_t rc; + + rc= find_wqe(qpm,qpn,wqe_addr_32lsb,&qp,&associated_q,&freed_wqe_index,wqe_id_p); + if (MOSAL_EXPECT_FALSE(rc != HH_OK)) { + MTL_ERROR2("%s: Given QPN/WQE is not associated with any queue (qpn=0x%X,wqe=0x%X).\n", + __func__,qpn,wqe_addr_32lsb); + return HH_EINVAL; + } + + if (MOSAL_EXPECT_FALSE( (qp->ts_type == IB_TS_RD) && (qp->sqp_type == VAPI_REGULAR_QP))) { + /* RD is a completely different story due to out of order completion */ + MTL_ERROR4("THHUL_qpm_comp_ok: RD WQEs tracking not supported, yet.\n"); + return HH_ENOSYS; /* TBD: implement when THH should support RD */ + } + + *qp_type_p= qp->sqp_type; + *qp_ts_type_p= qp->ts_type; + + if (associated_q != NULL) { /* Release WQEs (if not from SRQ) */ + MOSAL_spinlock_dpc_lock(&(associated_q->q_lock)); + *wqes_released_p= + (associated_q->next2free_index <= freed_wqe_index) ? + /* Unsigned computation depends on cycic indecies relation (who is the upper index) */ + 1+ freed_wqe_index - associated_q->next2free_index : + 1+ associated_q->max_outs - (associated_q->next2free_index - freed_wqe_index); + /* The +1 results from the fact that next2free_index should be counted as well */ +// associated_q->next2free_index= (freed_wqe_index + 1) % associated_q->max_outs; + if (MOSAL_EXPECT_FALSE(++freed_wqe_index >= associated_q->max_outs)) { + freed_wqe_index = 0; + } + associated_q->next2free_index= freed_wqe_index; + associated_q->cur_outs -= *wqes_released_p; + MOSAL_spinlock_unlock(&(associated_q->q_lock)); + } + + return HH_OK; +} + + +HH_ret_t THHUL_qpm_comp_err( + THHUL_qpm_t qpm, + IB_wqpn_t qpn, + u_int32_t wqe_addr_32lsb, + VAPI_wr_id_t *wqe_id_p, + u_int32_t *wqes_released_p, + u_int32_t *next_wqe_32lsb_p, + u_int8_t *dbd_bit_p +) +{ + u_int32_t freed_wqe_index; + queue_res_t *associated_q; + THHUL_qp_t qp; + u_int32_t* completed_wqe; + HH_ret_t rc; + + rc= find_wqe(qpm,qpn,wqe_addr_32lsb,&qp,&associated_q,&freed_wqe_index,wqe_id_p); + if (rc != HH_OK) { + MTL_ERROR2( + "%s: Given QPN/WQE is not associated with any queue (qpn=0x%X,wqe=0x%X).\n",__func__, + qpn,wqe_addr_32lsb); + return HH_EINVAL; + } + + if ( (qp->ts_type == IB_TS_RD) && (qp->sqp_type == VAPI_REGULAR_QP) ) { + /* RD is a completely different story due to out of order completion */ + MTL_ERROR4("%s: RD WQEs tracking not supported, yet.\n", __func__); + return HH_ENOSYS; /* TBD: implement when THH should support RD */ + } + + if (associated_q != NULL) { /* Not from SRQ */ + MOSAL_spinlock_dpc_lock(&(associated_q->q_lock)); + *wqes_released_p= + (associated_q->next2free_index <= freed_wqe_index) ? + /* Unsigned computation depends on cycic indecies relation (who is the upper index) */ + 1+ freed_wqe_index - associated_q->next2free_index : + 1+ associated_q->max_outs - (associated_q->next2free_index - freed_wqe_index); + /* The +1 results from the fact that next2free_index should be counted as well */ +// associated_q->next2free_index= (freed_wqe_index + 1) % associated_q->max_outs; + if (++freed_wqe_index >= associated_q->max_outs) { + freed_wqe_index = 0; + } + associated_q->next2free_index= freed_wqe_index; + associated_q->cur_outs -= *wqes_released_p; + if (sizeof(MT_virt_addr_t) <= 4) { /* Optimization for 32bit machines */ + completed_wqe= (u_int32_t*)(MT_virt_addr_t) wqe_addr_32lsb; + } else { + completed_wqe= (u_int32_t*)(MT_virt_addr_t) + (((associated_q->wqe_buf) & MAKE_ULONGLONG(0xFFFFFFFF00000000)) | (u_int64_t)wqe_addr_32lsb ); + } + if (qpm_WQE_extract_nds(completed_wqe) == 0) { + *next_wqe_32lsb_p= THHUL_QPM_END_OF_WQE_CHAIN; /* Chain end reached */ + } else { + *next_wqe_32lsb_p= qpm_WQE_extract_nda(completed_wqe); + } + *dbd_bit_p= qpm_WQE_extract_dbd(completed_wqe); + MOSAL_spinlock_unlock(&(associated_q->q_lock)); + + } else { /* SRQ - all WQEs generate CQEs... no need to provide NDA */ + *wqes_released_p= 1; + *next_wqe_32lsb_p= THHUL_QPM_END_OF_WQE_CHAIN; /* Chain end reached */ + } + + return HH_OK; +} + +u_int32_t THHUL_qpm_wqe_cnt( + /*IN*/THHUL_qpm_t qpm, + /*IN*/IB_wqpn_t qpn, + /*IN*/u_int32_t wqe_addr_32lsb, + /*IN*/u_int16_t dbd_cnt) +{ + u_int32_t cur_wqe_index; + queue_res_t *associated_q; + THHUL_qp_t qp; + volatile u_int32_t *cur_wqe_p; + u_int32_t wqe_cntr= 0; + VAPI_wr_id_t wqe_id; + HH_ret_t rc; + + rc= find_wqe(qpm,qpn,wqe_addr_32lsb,&qp,&associated_q,&cur_wqe_index,&wqe_id); + if (rc != HH_OK) { + MTL_ERROR2( + "%s: Given QPN/WQE is not associated with any queue (qpn=%d,wqe=0x%X).\n",__func__, + qpn,wqe_addr_32lsb); + return 0; + } + + if ( (qp->ts_type == IB_TS_RD) && (qp->sqp_type == VAPI_REGULAR_QP) ) { + /* RD is a completely different story due to out of order completion */ + MTL_ERROR4("%s: RD WQEs tracking not supported, yet.\n",__func__); + return 0; /* TBD: implement when THH should support RD */ + } + + if (associated_q == NULL) { /* SRQ */ + /* Only one WQE per CQE for SRQs */ + return 1; + } + + dbd_cnt++; /* count down to zero (dbd_cnt==0 when waiting for next dbd bit set) */ + MOSAL_spinlock_dpc_lock(&(associated_q->q_lock)); + do { + wqe_cntr++; + cur_wqe_p= (u_int32_t*)(associated_q->wqe_buf + + (cur_wqe_index << associated_q->log2_max_wqe_sz) ); + dbd_cnt-= qpm_WQE_extract_dbd(cur_wqe_p); +// cur_wqe_index= (cur_wqe_index + 1) % associated_q->max_outs; + if (++cur_wqe_index >= associated_q->max_outs) { + cur_wqe_index=0; + } + } while ((dbd_cnt > 0) && (qpm_WQE_extract_nds(cur_wqe_p) != 0)); + MOSAL_spinlock_unlock(&(associated_q->q_lock)); + + return wqe_cntr; +} + +/********************************************************************************************** + * Private Functions + **********************************************************************************************/ + +/* + * Prepare QP resources before creation. + * To be used by both THH_qpm_create_qp_prep and THH_qpm_special_qp_prep + */ +static HH_ret_t qp_prep( + HHUL_hca_hndl_t hca, + VAPI_special_qp_t qp_type, + HHUL_qp_init_attr_t *qp_init_attr_p, + HHUL_qp_hndl_t *qp_hndl_p, + VAPI_qp_cap_t *qp_cap_out_p, + THH_qp_ul_resources_t *qp_ul_resources_p, + MT_bool in_ddr_mem /* WQEs buffer allocated in attached DDR mem. or in main memory */ +) +{ + THHUL_qpm_t qpm; + THH_hca_ul_resources_t hca_ul_res; + THHUL_qp_t new_qp; + HH_ret_t rc; + THHUL_pdm_t pdm; + MT_bool pd_ok_for_sqp; + VAPI_lkey_t ud_av_memkey; /*irrelevant here */ + char* av_ddr_base; /*irrelevant here */ + char* av_host_base; /*irrelevant here */ + + rc= THHUL_hob_get_qpm(hca,&qpm); + if (rc != HH_OK) { + MTL_ERROR4(MT_FLFMT("qp_prep: Invalid HCA handle (%p)."),hca); + return HH_EINVAL; + } + rc= THHUL_hob_get_hca_ul_res(hca,&hca_ul_res); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("qp_prep: Failed THHUL_hob_get_hca_ul_res (err=%d).\n"),rc); + return rc; + } + + rc= THHUL_hob_get_pdm(hca,&pdm); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("qp_prep: Failed THHUL_hob_get_pdm (err=%d).\n"),rc); + return rc; + } + + rc= THHUL_pdm_get_ud_av_memkey_sqp_ok(pdm,qp_init_attr_p->pd,&pd_ok_for_sqp,&ud_av_memkey, &av_ddr_base, &av_host_base); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("qp_prep: Failed THHUL_pdm_get_ud_av_memkey_sqp_ok (err=%d).\n"),rc); + return rc; + } + + if (qp_type != VAPI_REGULAR_QP && pd_ok_for_sqp == FALSE) { + /* the protection domain uses DDR memory for UDAV's -- not good for sqps */ + MTL_ERROR2(MT_FLFMT("***WARNING***: AVs for special QPs should use HOST memory; the provided PD has its AVs in DDR memory.\n")); + //return HH_EINVAL; + } + + (new_qp)= (THHUL_qp_t)MALLOC(sizeof(struct THHUL_qp_st)); + if (new_qp == NULL) { + MTL_ERROR1(MT_FLFMT("qp_prep: Failed allocating THHUL_qp_t.\n")); + return HH_EAGAIN; + } + memset(new_qp,0,sizeof(struct THHUL_qp_st)); + + new_qp->sqp_type= qp_type; + + rc= init_qp(hca,qp_init_attr_p,new_qp); + if (rc != HH_OK) { + goto failed_init_qp; + } + + rc= qpm_alloc_wqe_buf(qpm, in_ddr_mem,hca_ul_res.max_qp_ous_wr,hca_ul_res.max_num_sg_ent, + new_qp,qp_ul_resources_p); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT(": Failed allocating WQEs buffers.\n")); + goto failed_alloc_wqe; + } + + rc= qpm_alloc_aux_data_buf(new_qp); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT(": Failed allocating auxilary buffers.\n")); + goto failed_alloc_aux; + } + + /* Set output modifiers */ + *qp_hndl_p= new_qp; + qp_cap_out_p->max_oust_wr_rq= new_qp->rq_res.max_outs; + qp_cap_out_p->max_oust_wr_sq= new_qp->sq_res.max_outs; + qp_cap_out_p->max_sg_size_rq= new_qp->rq_res.max_sg_sz; + qp_cap_out_p->max_sg_size_sq= new_qp->sq_res.max_sg_sz; + qp_cap_out_p->max_inline_data_sq= new_qp->sq_res.max_inline_data; + rc= THH_uar_get_index(new_qp->uar,&(qp_ul_resources_p->uar_index)); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT(": Failed getting UAR index.\n")); + goto failed_uar_index; + } + /* wqe_buf data in qp_ul_resources_p is already set in qpm_alloc_wqe_buf */ + + /* update QPs counter */ + MOSAL_spinlock_dpc_lock(&(qpm->hash_lock)); + qpm->qp_cnt++; + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + + return HH_OK; + + /* Error cleanup */ + failed_uar_index: + if (new_qp->sq_res.wqe_id != NULL) { + THH_SMART_FREE(new_qp->sq_res.wqe_id, new_qp->sq_res.max_outs * sizeof(VAPI_wr_id_t)); + } + if (new_qp->rq_res.wqe_id != NULL) { + THH_SMART_FREE(new_qp->rq_res.wqe_id, new_qp->rq_res.max_outs * sizeof(VAPI_wr_id_t)); + } + failed_alloc_aux: + if (new_qp->wqe_buf_orig != NULL) {/* WQEs buffer were allocated in process mem. or by the THH_qpm ? */ + /* If allocated here than should be freed */ + if (new_qp->dpool_p == NULL) { /* direct allocation */ + if (new_qp->used_virt_alloc) + MOSAL_pci_virt_free_consistent(new_qp->wqe_buf_orig, new_qp->wqe_buf_orig_size); + else + MOSAL_pci_phys_free_consistent(new_qp->wqe_buf_orig, new_qp->wqe_buf_orig_size); + } else { /* used dpool */ + dpool_free(qpm, new_qp->dpool_p, new_qp->wqe_buf_orig); + } + } + failed_alloc_wqe: + failed_init_qp: + FREE(new_qp); + return rc; +} + + +/* Allocate THHUL_qp_t object and initialize it */ +static HH_ret_t init_qp( + HHUL_hca_hndl_t hca, + HHUL_qp_init_attr_t *qp_init_attr_p, + THHUL_qp_t new_qp +) +{ + HH_ret_t rc; + THHUL_pdm_t pdm; + MT_bool ok_sqp; /* irrelevant here */ + + rc= THHUL_hob_get_uar(hca,&(new_qp->uar)); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("init_qp: Failed getting THHUL_hob's UAR (err=%d).\n"),rc); + return rc; + } + rc= THHUL_hob_get_pdm(hca,&pdm); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("init_qp: Failed THHUL_hob_get_pdm (err=%d).\n"),rc); + return rc; + } + rc= THHUL_hob_is_priv_ud_av(hca,&(new_qp->is_priv_ud_av)); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("init_qp: Failed THHUL_hob_is_priv_ud_av (err=%d).\n"),rc); + return rc; + } + rc= THHUL_pdm_get_ud_av_memkey_sqp_ok(pdm,qp_init_attr_p->pd,&ok_sqp,&(new_qp->ud_av_memkey),&(new_qp->av_ddr_base),&(new_qp->av_host_base)); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("init_qp: Failed THHUL_pdm_get_ud_av_memkey (err=%d).\n"),rc); + return rc; + } + + new_qp->dpool_p= NULL; + new_qp->qpn= 0xFFFFFFFF; /* Init to invalid QP num. until create_qp_done is invoked */ + new_qp->pd= qp_init_attr_p->pd; + switch (new_qp->sqp_type) { /* Set transport type appropriate to QP type */ + case VAPI_REGULAR_QP: + new_qp->ts_type= qp_init_attr_p->ts_type; + break; + + case VAPI_SMI_QP: + case VAPI_GSI_QP: + new_qp->ts_type= VAPI_TS_UD; + break; + + case VAPI_RAW_IPV6_QP: + case VAPI_RAW_ETY_QP: + new_qp->ts_type= VAPI_TS_RAW; + break; + + default: + MTL_ERROR1(MT_FLFMT("Invalid QP type (sqp_type=%d)"),new_qp->sqp_type); + return HH_EINVAL; + } + + new_qp->srq= qp_init_attr_p->srq; + /* Init RQ */ + new_qp->rq_res.qp_state= VAPI_RESET; + if (qp_init_attr_p->srq == HHUL_INVAL_SRQ_HNDL) { + new_qp->rq_res.max_outs= qp_init_attr_p->qp_cap.max_oust_wr_rq; + new_qp->rq_res.max_sg_sz= qp_init_attr_p->qp_cap.max_sg_size_rq; + } else { /* QP associated with SRQ */ + MTL_DEBUG4(MT_FLFMT("%s: Ignoring RQ attributes for a SRQ associated QP"), __func__); + new_qp->rq_res.max_outs= 0; + new_qp->rq_res.max_sg_sz= 0; + } + new_qp->rq_res.next2free_index= new_qp->rq_res.next2post_index= 0; + new_qp->rq_res.cur_outs= 0; + new_qp->rq_res.last_posted_p= NULL; + new_qp->rq_cq= qp_init_attr_p->rq_cq; + MOSAL_spinlock_init(&(new_qp->rq_res.q_lock)); + + /* Init SQ */ + new_qp->sq_res.qp_state= VAPI_RESET; + new_qp->sq_res.max_outs= qp_init_attr_p->qp_cap.max_oust_wr_sq; + new_qp->sq_res.max_sg_sz= qp_init_attr_p->qp_cap.max_sg_size_sq; + new_qp->sq_res.max_inline_data= qp_init_attr_p->qp_cap.max_inline_data_sq; + new_qp->sq_res.cur_outs= 0; + new_qp->sq_res.next2free_index= new_qp->sq_res.next2post_index= 0; + new_qp->sq_res.last_posted_p= NULL; + new_qp->sq_cq= qp_init_attr_p->sq_cq; + MOSAL_spinlock_init(&(new_qp->sq_res.q_lock)); + + return HH_OK; +} + +inline static MT_bool qpm_within_4GB(void* base, MT_size_t bsize) +{ + u_int64_t start_addr; + u_int64_t end_addr; + + if (sizeof(MT_virt_addr_t) <=4) return TRUE; /* For 32 bits machines no check is required */ + start_addr= (u_int64_t)(MT_virt_addr_t)base; + end_addr= start_addr+bsize-1; + return ((start_addr >> 32) == (end_addr >> 32)); /* TRUE if 32 MS-bits equal */ + +} + +inline static void* qpm_malloc_within_4GB(MT_size_t bsize, MT_bool *used_virt_alloc_p) +{ + void* buf[MAX_ALLOC_RETRY]={NULL}; + MT_bool used_virt_alloc[MAX_ALLOC_RETRY]; + int i,j; + + for (i= 0; i < MAX_ALLOC_RETRY; i++) { /* Retry to avoid crossing 4GB */ +#if defined(MT_KERNEL) && defined(__LINUX__) + /* Consider using low memory (kmalloc) up to WQ_KMALLOC_LIMIT or for small vmalloc area */ + if (bsize <= WQ_KMALLOC_LIMIT) { + buf[i]= (void*)MOSAL_pci_phys_alloc_consistent(bsize,0); /* try to use kmalloc */ + used_virt_alloc[i]= FALSE; + } + if (buf[i] == NULL) /* failed kmalloc, or did not even try it */ +#endif + { + buf[i]= +#ifdef WIN32 + /* Use pageable memory, since it gets registered. */ + cl_pzalloc( bsize ); +#else + (void*)MOSAL_pci_virt_alloc_consistent(bsize, 0); //TODO: must pass proper alignment here. For now thhul_qpm is unused in Darwin. +#endif + used_virt_alloc[i]= TRUE; + } + if (buf[i] == NULL) { + MTL_ERROR3("qpm_malloc_within_4GB: Failed allocating buffer of "SIZE_T_FMT" bytes (iteration %d).\n", + bsize,i); + /* Free previously allocated buffers if any*/ + for (j= i; j > 0; j--) { + if (used_virt_alloc[j-1]) { + MOSAL_pci_virt_free_consistent(buf[j-1], bsize); + } else { + MOSAL_pci_phys_free_consistent(buf[j-1], bsize); + } + } + return NULL; + } + if (qpm_within_4GB(buf[i],bsize)) break; + } + if (i == MAX_ALLOC_RETRY) { /* Failed */ + MTL_ERROR2("qpm_malloc_within_4GB: Failed allocating buffer of "SIZE_T_FMT" bytes within 4GB boundry " + "(%d retries).\n", bsize, MAX_ALLOC_RETRY); + /* Free all allocated buffers */ + for (i= 0; i < MAX_ALLOC_RETRY; i++) { + if (used_virt_alloc[i]) { + MOSAL_pci_virt_free_consistent(buf[i], bsize); + } else { + MOSAL_pci_phys_free_consistent(buf[i], bsize); + } + } + return NULL; + } + /* Free disqualified buffers if any */ + for (j= i; j > 0; j--) { + if (used_virt_alloc[j-1]) { + MOSAL_pci_virt_free_consistent(buf[j-1], bsize); + } else { + MOSAL_pci_phys_free_consistent(buf[j-1], bsize); + } + } + + *used_virt_alloc_p= used_virt_alloc[i]; + return buf[i]; /* This is the one buffer which does not cross 4GB boundry */ +} + +/* Allocate the WQEs buffer for sendQ and recvQ */ +/* This function should be invoked after queue properties are set by alloc_init_qp */ +static HH_ret_t qpm_alloc_wqe_buf( + /*IN*/ THHUL_qpm_t qpm, + /*IN*/ MT_bool in_ddr_mem, /* Allocation of WQEs buffer is requested in attached DDR mem. */ + /*IN*/ u_int32_t max_outs_wqes, /* HCA cap. */ + /*IN*/ u_int32_t max_sg_ent, /* HCA cap. of max.s/g entries */ + /*IN/OUT*/ THHUL_qp_t new_qp, + /*OUT*/ THH_qp_ul_resources_t *qp_ul_resources_p +) +{ + u_int32_t wqe_sz_rq,buf_sz_rq,rq_wqe_base_sz; + u_int32_t wqe_sz_sq,buf_sz_sq,sq_wqe_base_sz; + u_int32_t sq_sg_seg_sz,sq_inline_seg_sz; + u_int8_t log2_wqe_sz_rq,log2_wqe_sz_sq; + HH_ret_t ret; + + /* Check requested capabilities */ + if ((new_qp->rq_res.max_outs == 0) && (new_qp->sq_res.max_outs == 0)) { + if (new_qp->srq == HHUL_INVAL_SRQ_HNDL) { + MTL_ERROR3(MT_FLFMT("Got a request for a QP with 0 WQEs on both SQ and RQ - rejecting !")); + return HH_EINVAL_PARAM; + } else { /* QP has no WQEs buffer - uses SRQ only */ + new_qp->rq_res.wqe_draft = NULL; + new_qp->sq_res.wqe_draft = NULL; + new_qp->wqe_buf_orig= NULL; + new_qp->wqe_buf_orig_size= 0; + qp_ul_resources_p->wqes_buf= 0; + qp_ul_resources_p->wqes_buf_sz= 0; /* No WQEs buffer to register */ + return HH_OK; + } + } + if ((new_qp->rq_res.max_outs > max_outs_wqes) || (new_qp->sq_res.max_outs > max_outs_wqes)) { + MTL_ERROR2(MT_FLFMT( + "QP cap. requested (rq_res.max_outs=%u, sq_res.max_outs=%u) exceeds HCA cap. (max_qp_ous_wr=%u)"), + new_qp->rq_res.max_outs, new_qp->sq_res.max_outs, max_outs_wqes); + return HH_E2BIG_WR_NUM; + } + /* Avoid a work queue of a single WQE (linking a WQE to itself may be problematic) */ + if (new_qp->rq_res.max_outs == 1) new_qp->rq_res.max_outs= 2; + if (new_qp->sq_res.max_outs == 1) new_qp->sq_res.max_outs= 2; + + if ((new_qp->rq_res.max_sg_sz > max_sg_ent) || (new_qp->sq_res.max_sg_sz > max_sg_ent)) { + MTL_ERROR2(MT_FLFMT( + "QP cap. requested (rq_res.max_sg_sz=%u, sq_res.max_sg_sz=%u) exceeds HCA cap. (max_sg_ent=%u)"), + new_qp->rq_res.max_sg_sz, new_qp->sq_res.max_sg_sz, max_sg_ent); + return HH_E2BIG_SG_NUM; + } + + /* Compute RQ WQE requirements */ + if (new_qp->rq_res.max_outs == 0) { + log2_wqe_sz_rq= 0; + wqe_sz_rq= 0; + buf_sz_rq= 0; + new_qp->rq_res.wqe_draft = NULL; + } else { + rq_wqe_base_sz= WQE_SEG_SZ_NEXT + WQE_SEG_SZ_CTRL; + wqe_sz_rq= rq_wqe_base_sz + (new_qp->rq_res.max_sg_sz * WQE_SEG_SZ_SG_ENTRY); + if (wqe_sz_rq > MAX_WQE_SZ) { + MTL_ERROR2( + MT_FLFMT("required RQ capabilities (max_sg_sz=%d) require a too large WQE (%d bytes)"), + new_qp->rq_res.max_sg_sz, wqe_sz_rq); + return HH_E2BIG_SG_NUM; + } + log2_wqe_sz_rq= ceil_log2(wqe_sz_rq); /* Align to next power of 2 */ + /* A WQE must be aligned to 64B (WQE_ALIGN_SHIFT) so we take at least this size */ + if (log2_wqe_sz_rq < WQE_ALIGN_SHIFT) log2_wqe_sz_rq= WQE_ALIGN_SHIFT; + wqe_sz_rq= (1<rq_res.max_sg_sz= (wqe_sz_rq - rq_wqe_base_sz) / WQE_SEG_SZ_SG_ENTRY; + /* Make sure we do not exceed reported HCA cap. */ + new_qp->rq_res.max_sg_sz= (new_qp->rq_res.max_sg_sz > max_sg_ent) ? + max_sg_ent : new_qp->rq_res.max_sg_sz; + new_qp->rq_res.wqe_tmp = NULL; + new_qp->rq_res.wqe_draft= (u_int32_t *)MALLOC(wqe_sz_rq); + if (new_qp->rq_res.wqe_draft == NULL) { + MTL_ERROR2(MT_FLFMT("Failed allocating %d bytes for RQ's wqe draft"),wqe_sz_rq); + return HH_EAGAIN; + } + } + + if (new_qp->sq_res.max_outs == 0) { + sq_wqe_base_sz= 0; + log2_wqe_sz_sq= 0; + wqe_sz_sq= 0; + buf_sz_sq= 0; + new_qp->sq_res.wqe_draft = NULL; + } else { + /* Compute SQ WQE requirements */ + wqe_sz_sq= /* "next" and "ctrl" are included in the WQE of any transport */ + WQE_SEG_SZ_NEXT + WQE_SEG_SZ_CTRL; + + switch (new_qp->sqp_type) { + /* For special QPs additional reservation required for the headers (MLX+inline) */ + case VAPI_SMI_QP: + case VAPI_GSI_QP: + /* SMI/GSI ==> UD headers */ + wqe_sz_sq+= WQE_INLINE_SZ_UD_HDR; + wqe_sz_sq+= WQE_INLINE_ICRC; + break; + case VAPI_RAW_ETY_QP: + /* Raw-Ethertype ==> LRH+RWH */ + wqe_sz_sq+= WQE_INLINE_SZ_RAW_HDR; + break; + case VAPI_RAW_IPV6_QP: + /* IPv6 routing headers are given by the consumer in the gather list (?) */ + break; + default: /* Normal QP - add relevant transport WQE segments */ + if (new_qp->ts_type == VAPI_TS_UD) { + wqe_sz_sq+= WQE_SEG_SZ_UD; + } else if (new_qp->ts_type == VAPI_TS_RD) { + wqe_sz_sq+= WQE_SEG_SZ_RD; + } + if ((new_qp->ts_type == VAPI_TS_RC) || + (new_qp->ts_type == VAPI_TS_RD) || + (new_qp->ts_type == VAPI_TS_UC) ) { + wqe_sz_sq+= WQE_SEG_SZ_BIND_RADDR_ATOMIC; + } + } + + if (wqe_sz_sq > MAX_WQE_SZ) { + MTL_ERROR2(MT_FLFMT("required SQ capabilities(max_sg_sz=%d , max_inline_data=%d) " + "require a too large WQE (%d bytes)"), + new_qp->sq_res.max_sg_sz, new_qp->sq_res.max_inline_data, wqe_sz_sq); + ret= HH_E2BIG_SG_NUM; + goto failed_sq2big; + } + + sq_wqe_base_sz= wqe_sz_sq; /* WQE base without data segments */ + /* Compute data segments size for sendQ */ + sq_sg_seg_sz= new_qp->sq_res.max_sg_sz * WQE_SEG_SZ_SG_ENTRY; /* data pointers segments */ + #ifndef QPM_SUPPORT_INLINE_DATA_SET + /* max_inline_data from create-qp cap. is not supported by default due to backward compat. */ + new_qp->sq_res.max_inline_data= 64; /* Current default minimum */ + #endif + sq_inline_seg_sz= /* Compute inline data segment size */ + MT_UP_ALIGNX_U32(WQE_INLINE_SZ_BCOUNT + new_qp->sq_res.max_inline_data,WQE_SZ_MULTIPLE_SHIFT); + wqe_sz_sq+= ((sq_inline_seg_sz > sq_sg_seg_sz) ? sq_inline_seg_sz : sq_sg_seg_sz); + + log2_wqe_sz_sq= ceil_log2(wqe_sz_sq); /* Align to next power of 2 */ + /* A WQE must be aligned to 64B (WQE_ALIGN_SHIFT) so we take at least this size */ + if (log2_wqe_sz_sq < WQE_ALIGN_SHIFT) log2_wqe_sz_sq= WQE_ALIGN_SHIFT; + wqe_sz_sq= (1<sq_res.max_sg_sz= (wqe_sz_sq - sq_wqe_base_sz) / WQE_SEG_SZ_SG_ENTRY; + /* Make sure we do not exceed reported HCA cap. */ + new_qp->sq_res.max_sg_sz= (new_qp->sq_res.max_sg_sz > max_sg_ent) ? + max_sg_ent : new_qp->sq_res.max_sg_sz; + new_qp->sq_res.wqe_tmp = NULL; + if (new_qp->sqp_type == VAPI_SMI_QP || new_qp->sqp_type == VAPI_GSI_QP) { + new_qp->sq_res.wqe_tmp = MALLOC(sizeof(*new_qp->sq_res.wqe_tmp)); + if (new_qp->sq_res.wqe_tmp == NULL) { + MTL_ERROR2(MT_FLFMT("Failed allocating "SIZE_T_FMT" bytes for RQ's wqe tmp"),sizeof(*new_qp->sq_res.wqe_tmp)); + ret = HH_EAGAIN; + goto failed_wqe_tmp; + } + } + new_qp->sq_res.wqe_draft= (u_int32_t *)MALLOC(wqe_sz_sq); + if (new_qp->sq_res.wqe_draft == NULL) { + MTL_ERROR2(MT_FLFMT("Failed allocating %d bytes for SQ's wqe draft"),wqe_sz_sq); + ret= HH_EAGAIN; + goto failed_sq_draft; + } + } + + + buf_sz_rq= new_qp->rq_res.max_outs * wqe_sz_rq; + buf_sz_sq= new_qp->sq_res.max_outs * wqe_sz_sq; + + + if ((in_ddr_mem) || /* Allocate WQEs buffer by THH_qpm in the attached DDR memory */ + (buf_sz_rq+buf_sz_sq == 0)) {/* Or no WQE allocation (possible if SRQ is used) */ + new_qp->wqe_buf_orig= NULL; + } else { /* Allocate WQEs buffer in main memory */ +#if defined( WIN32 ) || defined (__KERNEL__) + + new_qp->wqe_buf_orig_size = + buf_sz_rq+((wqe_sz_rq != 0) ? (wqe_sz_rq-1):0)+ + buf_sz_sq+((wqe_sz_sq != 0) ? (wqe_sz_sq-1):0); + new_qp->wqe_buf_orig= qpm_malloc_within_4GB(new_qp->wqe_buf_orig_size, &new_qp->used_virt_alloc); + /* Make RQ (first WQEs buffer) start at page boundry) */ + new_qp->rq_res.wqe_buf= MT_UP_ALIGNX_VIRT((MT_virt_addr_t)(new_qp->wqe_buf_orig), + log2_wqe_sz_rq); +#else +/* In user space we need to take care of pages sharing on memory locks (fork issues) */ + /* Allocate one more for each queue in order to make each aligned to its WQE size */ + /* (Assures no WQE crosses a page boundry, since we make WQE size a power of 2) */ + new_qp->wqe_buf_orig_size = buf_sz_rq+buf_sz_sq+((wqe_sz_sq != 0) ? (wqe_sz_sq-1):0); +#ifndef WIN32 + if (new_qp->wqe_buf_orig_size > (THHUL_DPOOL_SZ_MAX_KB << THHUL_DPOOL_SZ_UNIT_SHIFT)) { +#endif + /* Large WQEs buffer - allocate directly */ + /* Assure the buffer covers whole pages (no sharing of locked memory with other data) */ + + new_qp->wqe_buf_orig_size = ROUNDUP( new_qp->wqe_buf_orig_size, MOSAL_SYS_PAGE_SIZE ); + + /* Prevent other data reside in the last page of the buffer... */ + /* cover last page (last WQE can be at last page begin and its size is 64B min.)*/ + + new_qp->wqe_buf_orig= cl_zalloc (new_qp->wqe_buf_orig_size ); + + /* Make RQ (first WQEs buffer) start at page boundry) */ + new_qp->rq_res.wqe_buf= MT_UP_ALIGNX_VIRT((MT_virt_addr_t)(new_qp->wqe_buf_orig), + MOSAL_SYS_PAGE_SHIFT); +#ifndef WIN32 + } else { /* small WQEs buffer - use dpool */ + /* Round size up to next KB */ + new_qp->wqe_buf_orig_size= + MT_UP_ALIGNX_U32(new_qp->wqe_buf_orig_size, THHUL_DPOOL_GRANULARITY_SHIFT); + new_qp->wqe_buf_orig= dpool_alloc(qpm, (u_int8_t)new_qp->wqe_buf_orig_size >> THHUL_DPOOL_SZ_UNIT_SHIFT, + &new_qp->dpool_p); + new_qp->rq_res.wqe_buf= (MT_virt_addr_t)new_qp->wqe_buf_orig; /* no need to align to WQE size */ + /* All dpool buffers are aligned to at least 1KB - see comment in dpool_create() */ + } +#endif // WIN32 +#endif // __KERNEL__ + + if (new_qp->wqe_buf_orig == NULL) { + MTL_ERROR2("qpm_alloc_wqe_buf: Failed allocation of WQEs buffer of "SIZE_T_FMT" bytes within " + "4GB boundries.\n",new_qp->wqe_buf_orig_size); + ret= HH_EAGAIN; + goto failed_wqe_buf; + } + } + + /* Set the per queue resources */ + new_qp->rq_res.log2_max_wqe_sz= log2_wqe_sz_rq; + /* SQ is after RQ - aligned to its WQE size */ + new_qp->sq_res.wqe_buf= MT_UP_ALIGNX_VIRT((new_qp->rq_res.wqe_buf + buf_sz_rq),log2_wqe_sz_sq); + new_qp->sq_res.log2_max_wqe_sz= log2_wqe_sz_sq; + //MTL_DEBUG5(MT_FLFMT("sq_inline_seg_sz=%d sq_sg_seg_sz=%d"),sq_inline_seg_sz,sq_sg_seg_sz); + if (wqe_sz_sq <= MAX_WQE_SZ) { /* update actual space for inline data */ + new_qp->sq_res.max_inline_data= wqe_sz_sq - sq_wqe_base_sz - 4; + } else { /* Due to alignment we have a WQE of 1024B, but actual WQE is only MAX_WQE_SZ (1008B)*/ + new_qp->sq_res.max_inline_data= MAX_WQE_SZ - sq_wqe_base_sz - 4; + } + + /* Set the qp_ul_resources_p */ + if (in_ddr_mem) { + qp_ul_resources_p->wqes_buf= 0; /* Allocate in attached DDR memory */ + } else { + /* Actual buffer starts at beginning of the RQ WQEs buffer (if exists) */ + qp_ul_resources_p->wqes_buf= (buf_sz_rq != 0) ? new_qp->rq_res.wqe_buf : new_qp->sq_res.wqe_buf; + } + /* Actual buffer size is the difference from the real buffer start to end of SQ buffer */ + /* (even if buffer is allocated in DDR mem. and this computation is done from 0 it is valid) */ + qp_ul_resources_p->wqes_buf_sz= (new_qp->sq_res.wqe_buf + buf_sz_sq) - + qp_ul_resources_p->wqes_buf; + + return HH_OK; + + failed_wqe_buf: + if ( new_qp->sq_res.wqe_draft ) FREE(new_qp->sq_res.wqe_draft); + failed_sq2big: + failed_sq_draft: + if (new_qp->sq_res.wqe_tmp) + FREE(new_qp->sq_res.wqe_tmp); + failed_wqe_tmp: + if ( new_qp->rq_res.wqe_draft ) FREE(new_qp->rq_res.wqe_draft); + return ret; +} + + +/* Allocate the auxilary WQEs data + * (a software context of a WQE which does not have to be in the registered WQEs buffer) */ +static HH_ret_t qpm_alloc_aux_data_buf( + /*IN/OUT*/ THHUL_qp_t new_qp +) +{ + /* RQ auxilary buffer: WQE ID per WQE */ + if (new_qp->rq_res.max_outs > 0) { + new_qp->rq_res.wqe_id= (VAPI_wr_id_t*) + THH_SMART_MALLOC(new_qp->rq_res.max_outs * sizeof(VAPI_wr_id_t)); + if (new_qp->rq_res.wqe_id == NULL) { + MTL_ERROR1("qpm_alloc_aux_data_buf: Failed allocating RQ auxilary buffer.\n"); + return HH_EAGAIN; + } + } + + /* SQ auxilary buffer: WQE ID per WQE */ + if (new_qp->sq_res.max_outs > 0) { + new_qp->sq_res.wqe_id= (VAPI_wr_id_t*) + THH_SMART_MALLOC(new_qp->sq_res.max_outs * sizeof(VAPI_wr_id_t)); + if (new_qp->sq_res.wqe_id == NULL) { + MTL_ERROR1("qpm_alloc_aux_data_buf: Failed allocating RQ auxilary buffer.\n"); + /* Free any memory chunk allocated by this function */ + if (new_qp->rq_res.wqe_id != NULL) { + THH_SMART_FREE(new_qp->rq_res.wqe_id,new_qp->rq_res.max_outs * sizeof(VAPI_wr_id_t)); + } + return HH_EAGAIN; + } + } + + return HH_OK; +} + + +#if QPM_USE_FIXED_QP_ARRAY +/* Insert given QP to the QPM fixed array table. + * If table is filled, then go to hash table */ +static HH_ret_t insert_to_array(THHUL_qpm_t qpm, THHUL_qp_t qp) +{ + u_int32_t hash_index= get_hash_index(qp->qpn); + int i = 0; + MT_bool found = FALSE; + qp_array_entry_t *qp_array_p = &qpm->array_tbl[hash_index]; + + MOSAL_spinlock_dpc_lock(&(qpm->hash_lock)); + + while(i < QPM_QP_PER_ARRAY) + { + if((qp_array_p->qp_array[i].qpn == QP_ARRAY_REUSE) || + (qp_array_p->qp_array[i].qpn == QP_ARRAY_UNUSED)) + { + + /* if entry is QP_ARRAY_UNUSED, then set next entry as QP_ARRAY_UNUSED. + * The last entry(index:QPM_QP_PER_ARRAY) in ther array is always QPM_QP_PER_ARRAY. + */ + if(qp_array_p->qp_array[i].qpn == QP_ARRAY_UNUSED) + qp_array_p->qp_array[i+1].qpn = QP_ARRAY_UNUSED; + + qp_array_p->qp_array[i].qp = qp; + qp_array_p->qp_array[i].qpn = qp->qpn; + found = TRUE; + break; + } + i++; + } + + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + + if(found == FALSE) + { + MTL_DEBUG2("insert_to_array: Failed allocating array entry.\n"); + return HH_EINVAL; + } + + return HH_OK; +} +#endif + + +/* Insert given QP to the QPM's hash table */ +/* This function assumes this QP is not in hash table */ +static HH_ret_t insert_to_hash(THHUL_qpm_t qpm, THHUL_qp_t qp) +{ + u_int32_t hash_index= get_hash_index(qp->qpn); + qp_hash_entry_t* new_entry_p; + +#if QPM_USE_FIXED_QP_ARRAY + /* Insert fixed array first, if no space, then use hash tbl */ + if(insert_to_array(qpm,qp) == HH_OK) + return HH_OK; +#endif + + /* Allocate hash table entry for the new QP */ + new_entry_p= (qp_hash_entry_t*)MALLOC(sizeof(qp_hash_entry_t)); + if (new_entry_p == NULL) { + MTL_ERROR2("insert_to_hash: Failed allocating hash table entry.\n"); + return HH_EAGAIN; + } + /* Set entry key (QPN) and value (QP pointer) */ + new_entry_p->qpn= qp->qpn; + new_entry_p->qp= qp; + + /* Add to the hash bin */ + MOSAL_spinlock_dpc_lock(&(qpm->hash_lock)); + if (qpm->hash_tbl[hash_index] == NULL) { /* First entry in the bin */ + new_entry_p->next= NULL; + qpm->hash_tbl[hash_index]= new_entry_p; + } else { /* Add as first before existing entries in the bin */ + new_entry_p->next= qpm->hash_tbl[hash_index]; + qpm->hash_tbl[hash_index]= new_entry_p; + } + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + + return HH_OK; +} + +#if QPM_USE_FIXED_QP_ARRAY +/* if qp is in fixed array, then remove from array. + * Removing entry means set qpn = 0 for recycle. + */ +static HH_ret_t remove_from_array(THHUL_qpm_t qpm, THHUL_qp_t qp) +{ + u_int32_t hash_index= get_hash_index(qp->qpn); + + int i = 0; + MT_bool found = FALSE; + qp_array_entry_t *qp_array_p = &qpm->array_tbl[hash_index]; + + MOSAL_spinlock_dpc_lock(&(qpm->hash_lock)); + + while(i < QPM_QP_PER_ARRAY) + { + /* if qpn is found, then set as QO_ARRAY_REUSE for recycle*/ + if(qp_array_p->qp_array[i].qpn == qp->qpn) + { + qp_array_p->qp_array[i].qpn = QP_ARRAY_REUSE; + qp_array_p->qp_array[i].qp = 0; + found = TRUE; + break; + } + if(qp_array_p->qp_array[i].qpn == QP_ARRAY_UNUSED) + break; + i++; + } + + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + if(found == FALSE) + { + MTL_DEBUG2("THHUL_qpm::remove_from_array: qpn=%d not found in the array table.\n", + qp->qpn); + return HH_EINVAL; + } + + return HH_OK; +} +#endif + +/* Remove given QP from the QPM's hash table */ +static HH_ret_t remove_from_hash(THHUL_qpm_t qpm, THHUL_qp_t qp) +{ + u_int32_t hash_index= get_hash_index(qp->qpn); + qp_hash_entry_t *entry2remove_p; + qp_hash_entry_t *prev_p= NULL; + +#if QPM_USE_FIXED_QP_ARRAY + /* Remove from fixed array tbl first, if found */ + if(remove_from_array(qpm,qp) == HH_OK) + return HH_OK; +#endif + + MOSAL_spinlock_dpc_lock(&(qpm->hash_lock)); + + /* Scan hash bin to find given QP's entry */ + for (entry2remove_p= qpm->hash_tbl[hash_index]; entry2remove_p != NULL; + entry2remove_p= entry2remove_p->next) { + if (entry2remove_p->qp == qp) break; + prev_p= entry2remove_p; + } + if (entry2remove_p == NULL) { + MTL_ERROR4("THHUL_qpm::remove_from_hash: qpn=%d not found in the hash table.\n", + qp->qpn); + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + return HH_EINVAL; + } + /* Remove entry */ + /* prev==NULL ==> next should be put directly in hash array */ + if (prev_p == NULL) { + qpm->hash_tbl[hash_index]= entry2remove_p->next; + } else { /* else, attach next to prev */ + prev_p->next= entry2remove_p->next; + } + + MOSAL_spinlock_unlock(&(qpm->hash_lock)); + + FREE(entry2remove_p); + return HH_OK; +} + + +#ifndef __KERNEL__ +/******************************** + * Descriptors pool functions - not used in kernel space + ********************************/ + +#ifdef THHUL_QPM_DEBUG_DPOOL + +static void dpool_dump_list(THHUL_qpm_t qpm, MT_size_t size_index, + const char *context_text, THHUL_qpm_dpool_t *dpool_context) +{ + THHUL_qpm_dpool_t *cur_dpool_p; + unsigned long cntr= 0; + + MTL_ERROR1(MT_FLFMT("[%s - dpool_p=%p] Found inconsistancy in dpool list for buffers of %u KB:"), + context_text, dpool_context, size_index + THHUL_DPOOL_SZ_MIN_KB); + cur_dpool_p= qpm->dpool_p[size_index]; + while ((cur_dpool_p != NULL) && (cur_dpool_p->next != qpm->dpool_p[size_index]) && + (cntr < qpm->dpool_cnt)) { + MTL_ERROR1("(%p <- %p -> %p) ", cur_dpool_p->prev, cur_dpool_p, cur_dpool_p->next); + cntr++; + cur_dpool_p= cur_dpool_p->next; + } + MTL_ERROR1("(End of list)\n"); + getchar(); +} + +static MT_bool dpool_check_consistancy(THHUL_qpm_t qpm, + const char *context_text, THHUL_qpm_dpool_t *dpool_context) +{ + THHUL_qpm_dpool_t *cur_dpool_p; + MT_size_t size_index; + unsigned long cntr= 0; + + for (size_index= 0; + size_index < (THHUL_DPOOL_SZ_MAX_KB - THHUL_DPOOL_SZ_MIN_KB + 1); + size_index++) { + cur_dpool_p= qpm->dpool_p[size_index]; + while ((cur_dpool_p != NULL) && (cur_dpool_p->next != qpm->dpool_p[size_index])) { + if ((cur_dpool_p->next == NULL) || + (cur_dpool_p->prev == NULL) || + (cur_dpool_p->next->prev != cur_dpool_p) || + (cur_dpool_p->prev->next != cur_dpool_p)) { + dpool_dump_list(qpm, size_index, context_text, dpool_context); + return FALSE; + } + cntr++; + if (cntr > qpm->dpool_cnt) { + MTL_ERROR1(MT_FLFMT("Reading more dpool objects in list than total (%lu)"), + qpm->dpool_cnt); + dpool_dump_list(qpm, size_index, context_text, dpool_context); + return FALSE; + } + cur_dpool_p= cur_dpool_p->next; + } + } + return TRUE; +} + +#endif /*DEBUG_DPOOL*/ + + +static THHUL_qpm_dpool_t * dpool_create(THHUL_qpm_t qpm, u_int8_t buf_size_kb) +{ + THHUL_qpm_dpool_t *new_dpool_p; + MT_virt_addr_t orig_buf_limit; + MT_virt_addr_t cur_buf; + const MT_size_t size_index= buf_size_kb - THHUL_DPOOL_SZ_MIN_KB; + const MT_size_t buf_size= (buf_size_kb << THHUL_DPOOL_SZ_UNIT_SHIFT); + + new_dpool_p= TMALLOC(THHUL_qpm_dpool_t); + if (new_dpool_p == NULL) { + MTL_ERROR2(MT_FLFMT("%s: Failed allocating THHUL_qpm_dpool_t"), __func__); + return NULL; + } + + /* Allocate descriptors pool memory - aligned on page start */ + new_dpool_p->orig_size= (THHUL_DPOOL_SZ_BASE_BUF_KB << THHUL_DPOOL_SZ_UNIT_SHIFT) + + (MOSAL_SYS_PAGE_SIZE - 1); + new_dpool_p->orig_buf= qpm_malloc_within_4GB(new_dpool_p->orig_size, + &new_dpool_p->used_virt_alloc) ; + if (new_dpool_p->orig_buf == NULL) { + MTL_ERROR2(MT_FLFMT("%s: Failed allocating descriptors pool memory of "SIZE_T_FMT" B"), + __func__, new_dpool_p->orig_size); + goto failed_orig_buf; + } + orig_buf_limit= (MT_virt_addr_t) new_dpool_p->orig_buf + new_dpool_p->orig_size; + + new_dpool_p->free_buf_list= NULL; + /* First buffer starts at page boundry and all buffers are of 1KB size multiples */ + /* So all buffers of the dpool are aligned to 1KB, i.e., aligned to any size of our */ + /* WQEs which are all (stride) of power of 2 */ + for (cur_buf= MT_UP_ALIGNX_VIRT((MT_virt_addr_t)new_dpool_p->orig_buf,MOSAL_SYS_PAGE_SHIFT); + (cur_buf+buf_size) < orig_buf_limit ; + cur_buf+= buf_size ) { + *(void**)cur_buf= new_dpool_p->free_buf_list; /* link before first */ + new_dpool_p->free_buf_list= (void*)cur_buf; + } + + new_dpool_p->buf_size_kb= buf_size_kb; + new_dpool_p->ref_cnt= 0; + + if (qpm->dpool_p[size_index] == NULL) { /* first */ + new_dpool_p->next= new_dpool_p->prev= new_dpool_p; + } else { + new_dpool_p->next= qpm->dpool_p[size_index]; /* link to first */ + new_dpool_p->prev= new_dpool_p->next->prev; /* reverse-link to last */ + new_dpool_p->prev->next= new_dpool_p->next->prev= new_dpool_p; + } + qpm->dpool_p[size_index]= new_dpool_p; /* make first */ + +#ifdef THHUL_QPM_DEBUG_DPOOL + qpm->dpool_cnt++; + MTL_ERROR1("%s: dpool_cnt=%lu (%p <- %p -> %p) \n", __func__, + qpm->dpool_cnt, new_dpool_p->prev, new_dpool_p, new_dpool_p->next); + dpool_check_consistancy(qpm, "After inserting new dpool", new_dpool_p); +#endif + return new_dpool_p; + + failed_orig_buf: + FREE(new_dpool_p); + return NULL; +} + +static void dpool_destroy(THHUL_qpm_t qpm, THHUL_qpm_dpool_t *dpool_p) +{ + const MT_size_t size_index= dpool_p->buf_size_kb - THHUL_DPOOL_SZ_MIN_KB; + + /* Assumes ref_cnt==0 */ + /* bypass this item */ + dpool_p->prev->next= dpool_p->next; + dpool_p->next->prev= dpool_p->prev; + if (qpm->dpool_p[size_index] == dpool_p) { /* if it was the first */ + if (dpool_p->next == dpool_p) { /* and only... */ + qpm->dpool_p[size_index]= NULL; + } else { /* else, make next be first */ + qpm->dpool_p[size_index]= dpool_p->next; + } + } + +#ifdef THHUL_QPM_DEBUG_DPOOL + qpm->dpool_cnt--; + MTL_ERROR1(MT_FLFMT("%s: dpool_cnt=%lu (%p <- %p -> %p) "), __func__, + qpm->dpool_cnt, dpool_p->prev, dpool_p, dpool_p->next); + dpool_check_consistancy(qpm, "After removing a dpool", dpool_p); +#endif + + if (dpool_p->used_virt_alloc) + MOSAL_pci_virt_free_consistent(dpool_p->orig_buf, dpool_p->orig_size); + else + MOSAL_pci_phys_free_consistent(dpool_p->orig_buf, dpool_p->orig_size); + + FREE(dpool_p); +} + +static void* dpool_alloc(THHUL_qpm_t qpm, u_int8_t buf_size_kb, THHUL_qpm_dpool_t **dpool_pp) +{ + THHUL_qpm_dpool_t *dpool_p; + void* alloc_buf; + const MT_size_t size_index= buf_size_kb - THHUL_DPOOL_SZ_MIN_KB; + + if ((buf_size_kb < THHUL_DPOOL_SZ_MIN_KB) || (buf_size_kb > THHUL_DPOOL_SZ_MAX_KB)) { + MTL_ERROR2(MT_FLFMT("%s: Given buf_size_kb=0x%u " + "(THHUL_DPOOL_SZ_MIN_KB=%u , THHUL_DPOOL_SZ_MAX_KB=%u)"), __func__, + buf_size_kb, THHUL_DPOOL_SZ_MIN_KB, THHUL_DPOOL_SZ_MAX_KB); + return NULL; + } + + MOSAL_mutex_acq_ui(&qpm->dpool_lock); + + dpool_p= qpm->dpool_p[size_index]; + /* If no dpool for this size or existing dpool is full (empty free list) */ + if ((dpool_p == NULL) || (dpool_p->free_buf_list == NULL)) { + dpool_p= dpool_create(qpm, buf_size_kb); + if (dpool_p == NULL) return NULL; + } + + alloc_buf= dpool_p->free_buf_list; + dpool_p->free_buf_list= *(void**)alloc_buf; /* next is embedded in free buffer */ + dpool_p->ref_cnt++; + + if ((dpool_p->free_buf_list == NULL) && (dpool_p->prev != dpool_p)) { + /* If emptied and not the only dpool for this size - move to end of dpool list for this size */ + qpm->dpool_p[size_index]= dpool_p->next; /* "shift" first */ + } + +#ifdef THHUL_QPM_DEBUG_DPOOL + dpool_check_consistancy(qpm, "After moving dpool to end of list", dpool_p); +#endif + + MOSAL_mutex_rel(&qpm->dpool_lock); + + *dpool_pp= dpool_p; + return alloc_buf; +} + +static void dpool_free(THHUL_qpm_t qpm, THHUL_qpm_dpool_t *dpool_p, void* buf) +{ + const MT_size_t size_index= dpool_p->buf_size_kb - THHUL_DPOOL_SZ_MIN_KB; + /* no check on this - assumes dpool is trusted (value checked on creation) */ + + MOSAL_mutex_acq_ui(&qpm->dpool_lock); + /* put in free list of associated dpool */ + *(void**)buf= dpool_p->free_buf_list; + dpool_p->free_buf_list= buf; + dpool_p->ref_cnt--; + if (dpool_p->ref_cnt == 0) { + /* if reached ref_cnt 0, probably not much of this size - compact dpools list */ + dpool_destroy(qpm,dpool_p); + + } else if (qpm->dpool_p[size_index] != dpool_p) { + /* if not the first dpool for this size */ + /* Move to begining of dpool list for this size - it has what to offer... */ + if (dpool_p->next != dpool_p->prev) { + /* more than 2 items - really need to move */ + /* first disconnect */ + dpool_p->prev->next= dpool_p->next; + dpool_p->next->prev= dpool_p->prev; + /* Now connect between first and last */ + dpool_p->next= qpm->dpool_p[size_index]; /* link to first */ + dpool_p->prev= dpool_p->next->prev; /* reverse-link to last */ + dpool_p->prev->next= dpool_p->next->prev= dpool_p; + } + /* (after moved to new location) make first */ + qpm->dpool_p[size_index]= dpool_p; + } + +#ifdef THHUL_QPM_DEBUG_DPOOL + dpool_check_consistancy(qpm, "After moving dpool to start of list", dpool_p); +#endif + + MOSAL_mutex_rel(&qpm->dpool_lock); +} +#endif +#ifdef DUMP_SEND_REQ +static void dump_send_req(THHUL_qp_t qp, HHUL_send_req_t *sr) +{ + int i; + + MTL_DEBUG4(MT_FLFMT("QP 0x%X - Send: %d S/G entries"),qp->qpn,sr->sg_lst_len); /* Build WQE */ + for (i= 0; i < sr->sg_lst_len; i++) { + MTL_DEBUG4(MT_FLFMT("Entry %d: lkey=0x%X va=0x%X len=%d"),i, + sr->sg_lst_p[i].lkey,(MT_virt_addr_t)sr->sg_lst_p[i].addr,sr->sg_lst_p[i].len); + } +} +#endif + +#if defined(VXWORKS_OS) || defined(LINUX) +//#if 1 +#include "thhul_qpm_iba.h" +/* This is second optimized version od post_send_req + * This is to eliminate conversion time of IbAccess, so + * sg_list and remote_addr buld function(WQE_pack_remote_addr_and_sg_list_iba) + * use IbAccess data structure. See thhul_qpm_iba for detail. + * + * This version only supports ReliableConnection and UnrliableDataGRam. + */ + +#define SET_SE (MT_bool)work_request->Req.SendRC.Options.s.SolicitedEvent +#define FENCE work_request->Req.SendRC.Options.s.Fence +#define IMM_DATA_FLAG work_request->Req.SendRC.Options.s.ImmediateData +#define IMM_DATA_VALUE work_request->Req.SendRC.ImmediateData +#define REMOTE_QP work_request->Req.SendUD.QPNumber +#define REMOTE_QKEY work_request->Req.SendUD.Qkey +#define IB_OP_CODE work_request->Operation +#define REMOTE_ADDR work_request->Req.SendRC.RemoteDS.Address +#define REMOTE_RKEY work_request->Req.SendRC.RemoteDS.Rkey +#define REQUEST_ID work_request->WorkReqId +#define SG_LST_LEN work_request->DSListDepth + +VAPI_ret_t THHUL_qpm_post_send_req2( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_comp_type_t comp_type, + VAPI_ud_av_hndl_t remote_ah, + void *WorkRequest + ) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + u_int32_t wqe_sz_dwords; + HH_ret_t rc = HH_OK; + volatile u_int32_t* next_wqe; /* Actual WQE pointer */ + u_int32_t *cur_loc_p; + tavor_if_nopcode_t tavorOpCode; + int i; + CHIME_WORDS_PREFIX u_int32_t chimeWords[2]; + THH_uar_t uar; + u_int32_t wqe_sz_dwords_byte; + IB_WORK_REQ *work_request = (IB_WORK_REQ *)WorkRequest; + u_int32_t sg_lst_len = SG_LST_LEN; + + if(MOSAL_EXPECT_FALSE(qp->sq_res.qp_state < VAPI_RTS)) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to send \n"),__func__,qp->sq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + if (MOSAL_EXPECT_FALSE(qp->sq_res.max_sg_sz < sg_lst_len)) { + MTL_ERROR2( + "THHUL_qpm_post_send_req2: Scatter/Gather list is too large (%d entries > max_sg_sz=%d)\n", + sg_lst_len,qp->sq_res.max_sg_sz); + return HH_EINVAL_SG_NUM; + } + + MOSAL_spinlock_dpc_lock(&(qp->sq_res.q_lock)); /* protect wqe_draft and WQE allocation/link */ + + /* Check if any WQEs are free to be consumed */ + if (MOSAL_EXPECT_FALSE(qp->sq_res.max_outs == qp->sq_res.cur_outs)) { + MTL_ERROR4("THHUL_qpm_post_send_req2: Send queue is full (%u requests outstanding).\n", + qp->sq_res.cur_outs); + return HH_E2BIG_WR_NUM; + } + /* Allocate next WQE */ + next_wqe= (u_int32_t*)(qp->sq_res.wqe_buf + + (qp->sq_res.next2post_index << qp->sq_res.log2_max_wqe_sz) ); + + /* build new wqe */ + cur_loc_p = WQE_build_send_be_req2(qp,(u_int32_t*)next_wqe, + comp_type, + remote_ah, + REMOTE_QP, + REMOTE_QKEY, + SET_SE, + IMM_DATA_FLAG?IMM_DATA_VALUE:0 + ); + /* build remote_addr and sg_list field + * This is IbAccess specific and defined thhul_qpm_iba.h. + * This should be maintained every port with new vapi drop + */ + cur_loc_p = WQE_pack_remote_addr_and_sg_list_iba(WorkRequest, + cur_loc_p,sg_lst_len, + IMM_DATA_FLAG,&tavorOpCode); + + + wqe_sz_dwords = (((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)next_wqe)) >> 2; + + { + + wqe_sz_dwords_byte = wqe_sz_dwords>>2; + + /* Update "next" segment of previous WQE (if any) */ + /* this is same as WQE_pack_send_next call */ + qp->sq_res.last_posted_p[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)tavorOpCode + | ((u_int32_t)(unsigned long) next_wqe & BIT_MASK_FOR_NEXT_WQE_31SB )); + + qp->sq_res.last_posted_p[1] = MOSAL_cpu_to_be32(0 + | ((wqe_sz_dwords_byte) << NEXT_ST_NDS_BIT_OFFSET ) // specify in 16 byte chunks + | ( FENCE << NEXT_ST_F_BIT_OFFSET ) + | (1 << NEXT_ST_DBD_BIT_OFFSET) + ); + qp->sq_res.last_posted_p= next_wqe; + + /* Ring doorbell (send or rd-send) */ + /* This is same as THH_uar_sendq_dbell_inline */ + + uar = qp->uar; + + chimeWords[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)tavorOpCode + | ((FENCE & 0x1)<< SEND_DOORBELL_F_BIT_OFFSET) + | ((MT_virt_addr_t)next_wqe & 0xFFFFFFFF)); + chimeWords[1] = MOSAL_cpu_to_be32(0 + | (u_int32_t)(wqe_sz_dwords >> 2) // specify in 16 byte chunks + | ((u_int32_t)(qp->qpn) << SEDN_DOORBELL_QPN_BIT_OFFSET) + ); + +#ifdef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_SEND_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); +#else + cur_loc_p = (u_int32_t *)&uar->uar_base[UAR_SEND_DBELL_OFFSET]; + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); + MOSAL_MMAP_IO_WRITE_DWORD(&cur_loc_p[0], chimeWords[0]); + MOSAL_MMAP_IO_WRITE_DWORD(&cur_loc_p[1], chimeWords[1]); + MOSAL_spinlock_unlock(&(uar->uar_lock)); +#endif + + } + + /* save WorkRequest ID and update index */ + qp->sq_res.wqe_id[qp->sq_res.next2post_index]= REQUEST_ID; /* Save WQE ID */ + + i = ++qp->sq_res.next2post_index; + if (MOSAL_EXPECT_FALSE(i >= qp->sq_res.max_outs)) + qp->sq_res.next2post_index = 0; + + qp->sq_res.cur_outs++; + + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + return rc; +} + +/* This is second optimized version of THHUL_qpm_post_recv_req to + * eliminate IbAccess conversion time. All parameters needed to build + * WQE is passed from VapiHcaShime layer directly. + */ +VAPI_ret_t THHUL_qpm_post_recv_req2( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_comp_type_t comp_type, + u_int32_t sg_lst_len, + VAPI_wr_id_t ReqId, + VAPI_sg_lst_entry_t *sg_lst_p + ) +{ + THHUL_qp_t qp= (THHUL_qp_t)hhul_qp; + volatile u_int32_t* next_wqe; /* Actual WQE pointer */ + u_int32_t wqe_sz_dwords; + CHIME_WORDS_PREFIX u_int32_t chimeWords[2]; + u_int32_t* cur_loc_p; + u_int32_t wqe_sz_dwords_byte; + THH_uar_t uar; + //int i; + + if (MOSAL_EXPECT_FALSE(qp->rq_res.qp_state < VAPI_INIT )) { + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to recv \n"),__func__,qp->rq_res.qp_state); + return HH_EINVAL_QP_STATE; + } + + if (MOSAL_EXPECT_FALSE(qp->rq_res.max_sg_sz < sg_lst_len)) { + MTL_ERROR2( + "THHUL_qpm_post_recv_req: Scatter/Gather list is too large (%d entries > max_sg_sz=%d)\n", + sg_lst_len,qp->rq_res.max_sg_sz); + return HH_EINVAL_SG_NUM; + } + + MOSAL_spinlock_dpc_lock(&(qp->rq_res.q_lock)); /* protect wqe_draft as well as WQE allocation/link */ + + /* Check if any WQEs are free to be consumed */ + if (MOSAL_EXPECT_FALSE(qp->rq_res.max_outs == qp->rq_res.cur_outs)) { + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + MTL_ERROR4("THHUL_qpm_post_recv_req2: Receive queue is full (%d requests outstanding).\n", + qp->rq_res.cur_outs); + return HH_E2BIG_WR_NUM; + } + /* Allocate next WQE */ + next_wqe= (u_int32_t*) (qp->rq_res.wqe_buf + + (qp->rq_res.next2post_index << qp->rq_res.log2_max_wqe_sz) ); + + /* Build WQE */ + /* WQE_build_recv_be_req2 is same as WQE_build_recv_be but not build + * sg_list. + * Building sg_list is done WQE_pack_sg_list_iba + */ + cur_loc_p = WQE_build_recv_be_req2(qp,(u_int32_t*)next_wqe,comp_type); + + + /* build sg_list */ + /* This is same as calling WQE_build_send_sg_list */ + /* WQE_pack_sg_list_iba build sg_list directly from IbAccess structure. + * See thhul_qpm_iba.h + */ + cur_loc_p = WQE_pack_sg_list_iba(sg_lst_p,cur_loc_p,sg_lst_len); + + wqe_sz_dwords = (((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)next_wqe)) >> 2; + + qp->rq_res.wqe_id[qp->rq_res.next2post_index]= ReqId; /* Save WQE ID */ + ++qp->rq_res.next2post_index; + if(MOSAL_EXPECT_FALSE(qp->rq_res.next2post_index >= qp->rq_res.max_outs)) + qp->rq_res.next2post_index = 0; + + qp->rq_res.cur_outs++; + + /* Update "next" segment of previous WQE (if any) */ + + wqe_sz_dwords_byte = wqe_sz_dwords>>2; + + qp->rq_res.last_posted_p[NEXT_ST_NDA_31_6_DWORD_OFFSET] = MOSAL_cpu_to_be32(0 + | ((u_int32_t)(unsigned long) next_wqe & BIT_MASK_FOR_NEXT_WQE_31SB )); + qp->rq_res.last_posted_p[NEXT_ST_NDS_DWORD_OFFSET] = MOSAL_cpu_to_be32(0 + | (wqe_sz_dwords_byte << NEXT_ST_NDS_BIT_OFFSET ) // specify in 16 byte chunks + | (1 << NEXT_ST_DBD_BIT_OFFSET) + ); + + + qp->rq_res.last_posted_p= next_wqe; + + /* Ring doorbell */ + uar = qp->uar; + chimeWords[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)(uintn)next_wqe + | ((wqe_sz_dwords + 3) >> 2) // specify in 16 byte chunks + ); + chimeWords[1] = MOSAL_cpu_to_be32(0 + | 1 // credits + | ((u_int32_t)(qp->qpn) << 8) + ); + +#ifdef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_RECV_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); +#else + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_RECV_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); + MOSAL_spinlock_unlock(&(uar->uar_lock)); +#endif + + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + + + return HH_OK; +} + +#else /* WIN32 */ +/* This is optimized, IB_AL native version of post_send_req + * This is to eliminate conversion time, so + * sg_list and remote_addr buld function(WQE_pack_remote_addr_and_sg_list_ibal) + * use IBAL data structure. See thhul_qpm_ibal.h for detail. + * + * This version only supports ReliableConnection and UnrliableDataGRam. + */ + +ib_api_status_t +THHUL_qpm_post_send_wrs( + IN HHUL_hca_hndl_t hca, + IN HHUL_qp_hndl_t hhul_qp, + IN ib_send_wr_t *p_send_wr, + OUT ib_send_wr_t **pp_failed_wr OPTIONAL ) +{ + THHUL_qp_t qp = (THHUL_qp_t)hhul_qp; + u_int32_t wqe_sz_dwords; + HH_ret_t rc = HH_OK; + u_int32_t* next_wqe; /* Actual WQE pointer */ + u_int32_t *cur_loc_p; + tavor_if_nopcode_t opcode; + CHIME_WORDS_PREFIX u_int32_t chimeWords[2]; + THH_uar_t uar; + u_int32_t wqe_sz_dwords_byte; + ib_send_wr_t *p_wr; + + if( MOSAL_EXPECT_FALSE(qp->sq_res.qp_state < VAPI_RTS) ) + { + if( pp_failed_wr ) + *pp_failed_wr = p_send_wr; + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to send \n"),__func__,qp->sq_res.qp_state); + return IB_INVALID_QP_STATE; + } + + p_wr = p_send_wr; + MOSAL_spinlock_dpc_lock(&(qp->sq_res.q_lock)); /* protect wqe_draft and WQE allocation/link */ + while( p_wr ) + { + if( MOSAL_EXPECT_FALSE(qp->sq_res.max_sg_sz < p_wr->num_ds) ) + { + MTL_ERROR2( + "THHUL_qpm_post_send_req2: Scatter/Gather list is too large (%d entries > max_sg_sz=%d)\n", + p_wr->num_ds,qp->sq_res.max_sg_sz); + rc = HH_E2BIG_SG_NUM; + break; + } + + /* Check if any WQEs are free to be consumed */ + if (MOSAL_EXPECT_FALSE(qp->sq_res.max_outs == qp->sq_res.cur_outs)) { + MTL_ERROR4("THHUL_qpm_post_send_req2: Send queue is full (%u requests outstanding).\n", + qp->sq_res.cur_outs); + rc = HH_E2BIG_WR_NUM; + break; + } + + /* Allocate next WQE */ + next_wqe= (u_int32_t*)(qp->sq_res.wqe_buf + + (qp->sq_res.next2post_index << qp->sq_res.log2_max_wqe_sz) ); + + if( MOSAL_EXPECT_TRUE( qp->sqp_type == VAPI_REGULAR_QP ) ) + { + /* build new wqe */ + cur_loc_p = WQE_build_send_be_req3(qp, next_wqe, + p_wr->send_opt & IB_SEND_OPT_SIGNALED? VAPI_SIGNALED : VAPI_UNSIGNALED, + p_wr, + p_wr->send_opt & IB_SEND_OPT_SOLICITED? 1:0, + p_wr->immediate_data + ); + + /* build remote_addr and sg_list field + * This is IBAL specific and defined thhul_qpm_ibal.h. + * This should be maintained every port with new vapi drop + */ + rc = WQE_pack_rem_addr_and_sgl_ibal( p_wr, &cur_loc_p, + qp->sq_res.max_inline_data, &opcode ); + if( rc != HH_OK ) + break; + + wqe_sz_dwords = (u_int32_t)((((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)next_wqe)) >> 2); + } + else + { + THH_hca_ul_resources_t hca_ul_res; + u_int32_t* wqe_draft= qp->sq_res.wqe_draft; + unsigned int i; + + /* Code needs to match behavior of VAPI_SEND support only, that means no imm data. */ + if (MOSAL_EXPECT_FALSE(p_wr->wr_type != WR_SEND)) { + rc = HH_EINVAL_WQE; + break; + } + if( MOSAL_EXPECT_FALSE(p_wr->send_opt & IB_SEND_OPT_IMMEDIATE) ) + { + rc = HH_EINVAL_WQE; + break; + } + opcode = TAVOR_IF_NOPCODE_SEND; + p_wr->send_opt &= ~IB_SEND_OPT_FENCE;/* required for MLX requests */ + if(MOSAL_EXPECT_FALSE(THHUL_hob_get_hca_ul_res(hca,&hca_ul_res) != HH_OK)) + { + rc = HH_EINVAL_HCA_HNDL; + break; + } + /* + * Build the WQE in BE format. Note that we must still use the wqe_draft since + * the function still builds part of the WQE in LE and swaps. + */ + wqe_sz_dwords= (WQE_build_send_mlx2_be(hca_ul_res.hh_hca_hndl,qp,p_wr,QP1_PKEY_INDEX,wqe_draft) >> 2); + if (MOSAL_EXPECT_FALSE(wqe_sz_dwords == 0)) { + MTL_ERROR1(MT_FLFMT("Failed building MLX headers for special QP.\n")); + rc = HH_EINVAL_WQE; + break; + } + + /* we used a temporary (draft) memory space, so move it do destination */ + for(i = 0; i < wqe_sz_dwords; ++i) { + next_wqe[i] = wqe_draft[i]; + } + } + + /* Save WorkRequest ID */ + qp->sq_res.wqe_id[qp->sq_res.next2post_index]= p_wr->wr_id; + + wqe_sz_dwords_byte = wqe_sz_dwords>>2; + + /* Update "next" segment of previous WQE (if any) */ + /* this is same as WQE_pack_send_next call */ + qp->sq_res.last_posted_p[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)opcode + | ((u_int32_t)(MT_ulong_ptr_t) next_wqe & BIT_MASK_FOR_NEXT_WQE_31SB )); + + qp->sq_res.last_posted_p[1] = MOSAL_cpu_to_be32(0 + | ((wqe_sz_dwords_byte) << NEXT_ST_NDS_BIT_OFFSET ) // specify in 16 byte chunks + | ( ((p_wr->send_opt & IB_SEND_OPT_FENCE)?1:0) << NEXT_ST_F_BIT_OFFSET ) + | (1 << NEXT_ST_DBD_BIT_OFFSET) + ); + qp->sq_res.last_posted_p= next_wqe; + + /* Ring doorbell (send or rd-send) */ + /* This is same as THH_uar_sendq_dbell_inline */ + + uar = qp->uar; + + chimeWords[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)opcode + | (((p_wr->send_opt & IB_SEND_OPT_FENCE)?1:0)<< SEND_DOORBELL_F_BIT_OFFSET) + | ((u_int32_t)(MT_ulong_ptr_t)next_wqe & 0xFFFFFFFF)); + chimeWords[1] = MOSAL_cpu_to_be32(0 + | (u_int32_t)(wqe_sz_dwords >> 2) // specify in 16 byte chunks + | ((u_int32_t)(qp->qpn) << SEDN_DOORBELL_QPN_BIT_OFFSET) + ); + +#ifdef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_SEND_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); +#else + cur_loc_p = (u_int32_t *)&uar->uar_base[UAR_SEND_DBELL_OFFSET]; + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); + MOSAL_MMAP_IO_WRITE_DWORD(&cur_loc_p[0], chimeWords[0]); + MOSAL_MMAP_IO_WRITE_DWORD(&cur_loc_p[1], chimeWords[1]); + MOSAL_spinlock_unlock(&(uar->uar_lock)); +#endif + + /* Update index */ + if( MOSAL_EXPECT_FALSE( + ++qp->sq_res.next2post_index >= qp->sq_res.max_outs ) ) + { + qp->sq_res.next2post_index = 0; + } + qp->sq_res.cur_outs++; + p_wr = p_wr->p_next; + } + MOSAL_spinlock_unlock(&(qp->sq_res.q_lock)); + /* Set in all cases. If all went well, will be set to NULL. */ + if( pp_failed_wr ) + *pp_failed_wr = p_wr; + switch( rc ) + { + case HH_OK: + return IB_SUCCESS; + + case HH_EAGAIN: + return IB_INSUFFICIENT_RESOURCES; + + case HH_E2BIG_SG_NUM: + return IB_INVALID_MAX_SGE; + + case HH_E2BIG_WR_NUM: + return IB_INSUFFICIENT_RESOURCES; + + default: + return IB_ERROR; + } +} + +/* + * This is an optimized, IB_AL native version of THHUL_qpm_post_recv_req to + * eliminate conversion time. + */ +ib_api_status_t +THHUL_qpm_post_recv_wrs( + IN HHUL_hca_hndl_t hca, + IN HHUL_qp_hndl_t hhul_qp, + IN ib_recv_wr_t *p_recv_wr, + OUT ib_recv_wr_t **pp_failed_wr OPTIONAL ) +{ + THHUL_qp_t qp = (THHUL_qp_t)hhul_qp; + u_int32_t *next_wqe; /* Actual WQE pointer */ + u_int32_t wqe_sz_dwords; + CHIME_WORDS_PREFIX u_int32_t chimeWords[2]; + u_int32_t *cur_loc_p; + u_int32_t wqe_sz_dwords_byte; + THH_uar_t uar; + ib_recv_wr_t *p_wr; + HH_ret_t rc = HH_OK; + //int i; + + if (MOSAL_EXPECT_FALSE(qp->rq_res.qp_state < VAPI_INIT )) + { + if( pp_failed_wr ) + *pp_failed_wr = p_recv_wr; + MTL_ERROR1(MT_FLFMT("%s failed: qp state %d not valid to recv \n"),__func__,qp->rq_res.qp_state); + return IB_INVALID_QP_STATE; + } + + p_wr = p_recv_wr; + MOSAL_spinlock_dpc_lock(&(qp->rq_res.q_lock)); /* protect wqe_draft as well as WQE allocation/link */ + while( p_wr ) + { + if( MOSAL_EXPECT_FALSE(qp->rq_res.max_sg_sz < p_wr->num_ds) ) + { + MTL_ERROR2( + "THHUL_qpm_post_recv_req: Scatter/Gather list is too large (%d entries > max_sg_sz=%d)\n", + p_wr->num_ds,qp->rq_res.max_sg_sz); + rc = HH_EINVAL_SG_NUM; + break; + } + + /* Check if any WQEs are free to be consumed */ + if (MOSAL_EXPECT_FALSE(qp->rq_res.max_outs == qp->rq_res.cur_outs)) { + MTL_ERROR4("THHUL_qpm_post_recv_req2: Receive queue is full (%d requests outstanding).\n", + qp->rq_res.cur_outs); + rc = HH_E2BIG_WR_NUM; + break; + } + /* Allocate next WQE */ + next_wqe= (u_int32_t*) (qp->rq_res.wqe_buf + + (qp->rq_res.next2post_index << qp->rq_res.log2_max_wqe_sz) ); + + /* Build WQE */ + /* WQE_build_recv_be_req2 is same as WQE_build_recv_be but not build + * sg_list. + * Building sg_list is done WQE_pack_sg_list_iba + */ + cur_loc_p = WQE_build_recv_be_req2(qp,next_wqe,VAPI_SIGNALED); + + + /* build sg_list */ + /* This is same as calling WQE_build_send_sg_list */ + /* WQE_pack_sg_list_iba build sg_list directly from IbAccess structure. + * See thhul_qpm_iba.h + */ + cur_loc_p = WQE_pack_sg_list_ibal( p_wr, cur_loc_p ); + + wqe_sz_dwords = (u_int32_t)((((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)next_wqe)) >> 2); + + qp->rq_res.wqe_id[qp->rq_res.next2post_index]= p_wr->wr_id; /* Save WQE ID */ + if(MOSAL_EXPECT_FALSE(++qp->rq_res.next2post_index >= qp->rq_res.max_outs)) + qp->rq_res.next2post_index = 0; + + qp->rq_res.cur_outs++; + + /* Update "next" segment of previous WQE (if any) */ + wqe_sz_dwords_byte = wqe_sz_dwords>>2; + + qp->rq_res.last_posted_p[NEXT_ST_NDA_31_6_DWORD_OFFSET] = MOSAL_cpu_to_be32(0 + | ((u_int32_t)(MT_ulong_ptr_t) next_wqe & BIT_MASK_FOR_NEXT_WQE_31SB )); + qp->rq_res.last_posted_p[NEXT_ST_NDS_DWORD_OFFSET] = MOSAL_cpu_to_be32(0 + | (wqe_sz_dwords_byte << NEXT_ST_NDS_BIT_OFFSET ) // specify in 16 byte chunks + | (1 << NEXT_ST_DBD_BIT_OFFSET) + ); + + qp->rq_res.last_posted_p= next_wqe; + + /* Ring doorbell */ + uar = qp->uar; + chimeWords[0] = MOSAL_cpu_to_be32(0 + | (u_int32_t)(uintn_t)next_wqe + | ((wqe_sz_dwords + 3) >> 2) // specify in 16 byte chunks + ); + chimeWords[1] = MOSAL_cpu_to_be32(0 + | 1 // credits + | ((u_int32_t)(qp->qpn) << 8) + ); + +#ifdef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_RECV_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); +#else + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); + MOSAL_MMAP_IO_WRITE_QWORD(((u_int32_t *)&uar->uar_base[UAR_RECV_DBELL_OFFSET]),*(volatile u_int64_t*)chimeWords); + MOSAL_spinlock_unlock(&(uar->uar_lock)); +#endif + + p_wr = p_wr->p_next; + } + MOSAL_spinlock_unlock(&(qp->rq_res.q_lock)); + + /* Set in all cases. If all went well, will be set to NULL. */ + if( pp_failed_wr ) + *pp_failed_wr = p_wr; + switch( rc ) + { + case HH_OK: + return IB_SUCCESS; + + case HH_EAGAIN: + return IB_INSUFFICIENT_RESOURCES; + + case HH_EINVAL_SG_NUM: + return IB_INVALID_MAX_SGE; + + case HH_E2BIG_WR_NUM: + return IB_INVALID_MAX_WRS; + + default: + return IB_ERROR; + } +} +#endif /* WIN32 */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.h new file mode 100644 index 00000000..155fa543 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THHUL_QPM_H +#define H_THHUL_QPM_H + +#include +#include +#include + +/* The value of completed WQEs 32 ls-bits we return from THHUL_qpm_comp_ok/error + * in case of end of WQEs chain - for synchronizing flush-error CQE recycling flow. + * we use 1 since 0 is a valid value while 1 is not - it is not aligned to 64B */ +#define THHUL_QPM_END_OF_WQE_CHAIN 1 + + +DLL_API HH_ret_t THHUL_qpm_create( + /*IN*/ THHUL_hob_t hob, + /*IN*/ THHUL_srqm_t srqm, + /*OUT*/ THHUL_qpm_t *qpm_p +); + + +DLL_API HH_ret_t THHUL_qpm_destroy( + /*IN*/ THHUL_qpm_t qpm +); + + +DLL_API HH_ret_t THHUL_qpm_create_qp_prep( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_init_attr_t *qp_init_attr_p, + /*OUT*/ HHUL_qp_hndl_t *qp_hndl_p, + /*OUT*/ VAPI_qp_cap_t *qp_cap_out_p, + /*OUT*/ void/*THH_qp_ul_resources_t*/ *qp_ul_resources_p +); + +DLL_API HH_ret_t THHUL_qpm_special_qp_prep( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ VAPI_special_qp_t qp_type, + /*IN*/ IB_port_t port, + /*IN*/ HHUL_qp_init_attr_t *qp_init_attr_p, + /*OUT*/ HHUL_qp_hndl_t *qp_hndl_p, + /*OUT*/ VAPI_qp_cap_t *qp_cap_out_p, + /*OUT*/ void/*THH_qp_ul_resources_t*/ *qp_ul_resources_p +); + + +DLL_API HH_ret_t THHUL_qpm_create_qp_done( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ IB_wqpn_t hh_qp, + /*IN*/ void/*THH_qp_ul_resources_t*/ *qp_ul_resources_p +); + + +DLL_API HH_ret_t THHUL_qpm_destroy_qp_done( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp +); + +DLL_API HH_ret_t THHUL_qpm_modify_qp_done( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ VAPI_qp_state_t cur_state +); + + +DLL_API HH_ret_t THHUL_qpm_post_send_req( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ VAPI_sr_desc_t *send_req_p +); + +#ifndef WIN32 +DLL_API HH_ret_t THHUL_qpm_post_send_req2( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ VAPI_comp_type_t comp_type, + /*IN*/ VAPI_ud_av_hndl_t remote_ah, + /*IN*/ void* WorkReq +); +#else +#include +DLL_API ib_api_status_t +THHUL_qpm_post_send_wrs( + IN HHUL_hca_hndl_t hca, + IN HHUL_qp_hndl_t hhul_qp, + IN ib_send_wr_t *p_send_wr, + OUT ib_send_wr_t **pp_failed_wr ); +#endif + +DLL_API HH_ret_t THHUL_qpm_post_inline_send_req( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ VAPI_sr_desc_t *send_req_p +); + +DLL_API HH_ret_t THHUL_qpm_post_send_reqs( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ u_int32_t num_of_requests, + /*IN*/ VAPI_sr_desc_t *send_req_array +); + +DLL_API HH_ret_t THHUL_qpm_post_gsi_send_req( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_qp, + VAPI_sr_desc_t *send_req_p, + VAPI_pkey_ix_t pkey_index +); + +DLL_API HH_ret_t THHUL_qpm_post_recv_req( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ VAPI_rr_desc_t *recv_req_p +); + +#ifndef WIN32 +DLL_API HH_ret_t THHUL_qpm_post_recv_req2( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ VAPI_comp_type_t comp_type, + /*IN*/ u_int32_t sg_lst_len, + /*IN*/ VAPI_wr_id_t ReqId, + /*IN*/ VAPI_sg_lst_entry_t *sg_lst_p +); +#else +DLL_API ib_api_status_t +THHUL_qpm_post_recv_wrs( + IN HHUL_hca_hndl_t hca, + IN HHUL_qp_hndl_t hhul_qp, + IN ib_recv_wr_t *p_recv_wr, + OUT ib_recv_wr_t **pp_failed_wr ); +#endif + +DLL_API HH_ret_t THHUL_qpm_post_recv_reqs( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_qp_hndl_t hhul_qp, + /*IN*/ u_int32_t num_of_requests, + /*IN*/ VAPI_rr_desc_t *recv_req_array +); + +DLL_API HH_ret_t THHUL_qpm_post_bind_req( + /*IN*/ HHUL_mw_bind_t *bind_props_p, + /*IN*/ IB_rkey_t new_rkey +); + + +DLL_API HH_ret_t THHUL_qpm_comp_ok( + /*IN*/ THHUL_qpm_t qpm, + /*IN*/ IB_wqpn_t qpn, + /*IN*/ u_int32_t wqe_addr_32lsb, + /*OUT*/ VAPI_special_qp_t *qp_type_p, + /*OUT*/ IB_ts_t *qp_ts_type_p, + /*OUT*/ VAPI_wr_id_t *wqe_id_p, + /*OUT*/ u_int32_t *wqes_released_p +#ifdef IVAPI_THH + , u_int32_t *reserved_p +#endif +); + + +DLL_API HH_ret_t THHUL_qpm_comp_err( + /*IN*/ THHUL_qpm_t qpm, + /*IN*/ IB_wqpn_t qpn, + /*IN*/ u_int32_t wqe_addr_32lsb, + /*OUT*/ VAPI_wr_id_t *wqe_id_p, + /*OUT*/ u_int32_t *wqes_released_p, + /*OUT*/ u_int32_t *next_wqe_32lsb_p, + /*OUT*/ u_int8_t *dbd_bit_p +); + +DLL_API VAPI_cqe_num_t THHUL_qpm_wqe_cnt( + /*IN*/THHUL_qpm_t qpm, + /*IN*/IB_wqpn_t qpn, + /*IN*/u_int32_t wqe_addr_32lsb, + /*IN*/u_int16_t dbd_cnt +); +#endif /* H_THHUL_QPM_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm_ibal.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm_ibal.h new file mode 100644 index 00000000..240f8173 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_qpm/thhul_qpm_ibal.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THHUL_QPM_IBAL_H +#define H_THHUL_QPM_IBAL_H + +/******************************************************************************** + * This is IbAccess specific function of THHUL_qpm_post_send_req2. + * It build remote_addr based on IbAccess specific opcode and build sg_list + * directly from IB_LOCAL_DATASEGMENT to remove conversion in VapiHcAShim layer. + * THis function supportes only ReliableConnection and ReliableDataGram. + * + * Used IbAccess Specific Data: + * struct _IB_WORK_REQ *work_request; + * IB_LOCAL_DATASEGMENT *sg_lst_entry; + * work_request->Req.SendRC.RemoteDS.Address; + * work_request->Req.SendRC.RemoteDS.Rkey; + * + ********************************************************************************* + */ + +#include + + +inline int +WQE_pack_inline_sgl_ibal( + IN ib_send_wr_t* const p_wr, + IN u_int32_t **pp_cur_loc, + IN uint32_t max_wqe_inline_size ) +{ + uint32_t i, len = 0; + uint8_t *p_inline_data = (uint8_t*)( (*pp_cur_loc) + 1 ); + +#define WQE_INLINE_FLAG (0x80000000) +#define WQE_INLINE_LENGTH_MASK (0x000003FF) + + for( i = 0; i < p_wr->num_ds; i++ ) + { + if( p_wr->ds_array[i].length + len > max_wqe_inline_size ) + return HH_EINVAL_SG_NUM; + + cl_memcpy( p_inline_data, (void *)p_wr->ds_array[i].vaddr, p_wr->ds_array[i].length ); + p_inline_data += p_wr->ds_array[i].length; + len += p_wr->ds_array[i].length; + } + + if( len ) + { + **pp_cur_loc = cl_hton32(( WQE_INLINE_FLAG | ( len & WQE_INLINE_LENGTH_MASK ))); + /* Round up to nearest 16 */ + if( (uintn_t)p_inline_data & 0x0000000F ) + p_inline_data += 16 - ((uintn_t)p_inline_data & 0x0000000F); + *pp_cur_loc = (uint32_t*)p_inline_data; + CL_ASSERT( !((uintn_t)*pp_cur_loc & 0x0000000F) ); + } + return HH_OK; +} + + +inline void +WQE_pack_sgl_ibal( + IN ib_send_wr_t* const p_wr, + IN u_int32_t **pp_cur_loc ) +{ + uint32_t i; + + for( i = 0; i < p_wr->num_ds; i++, (*pp_cur_loc) += 4 ) + { + WQE_IO_WRITE( &((*pp_cur_loc)[DATA_PTR_BYTE_COUNT_DWORD_OFFSET]), + MOSAL_cpu_to_be32(p_wr->ds_array[i].length & 0x7fffffff) ); + WQE_IO_WRITE( &((*pp_cur_loc)[DATA_PTR_LKEY_DWORD_OFFSET]), + MOSAL_cpu_to_be32(p_wr->ds_array[i].lkey ) ); + WQE_IO_WRITE( &((*pp_cur_loc)[DATA_PTR_LOCAL_ADDR_H_DWORD_OFFSET]), + MOSAL_cpu_to_be32((u_int32_t)(p_wr->ds_array[i].vaddr >> 32)) ); + WQE_IO_WRITE( &((*pp_cur_loc)[DATA_PTR_LOCAL_ADDR_L_DWORD_OFFSET]), + MOSAL_cpu_to_be32((u_int32_t)p_wr->ds_array[i].vaddr) ); + } +} + + + +inline HH_ret_t +WQE_pack_rem_addr_and_sgl_ibal( + IN ib_send_wr_t* const p_wr, + IN u_int32_t **pp_cur_loc, + IN uint32_t max_wqe_inline_size, + OUT tavor_if_nopcode_t* const p_opcode ) +{ + + /* Req2 support only VAPI_RDMA_READ, VAPI_SEND, and VAPI_RDMA_WRITE */ + switch( p_wr->wr_type ) + { + default: + case WR_SEND: + if( p_wr->send_opt & IB_SEND_OPT_IMMEDIATE ) + *p_opcode = TAVOR_IF_NOPCODE_SEND_IMM; + else + *p_opcode = TAVOR_IF_NOPCODE_SEND; + + if( p_wr->send_opt & IB_SEND_OPT_INLINE ) + return WQE_pack_inline_sgl_ibal( p_wr, pp_cur_loc, max_wqe_inline_size ); + + break; + + case WR_RDMA_WRITE: + if( p_wr->send_opt & IB_SEND_OPT_IMMEDIATE ) + *p_opcode = TAVOR_IF_NOPCODE_RDMAW_IMM; + else + *p_opcode = TAVOR_IF_NOPCODE_RDMAW; + + *pp_cur_loc += WQE_pack_remote_addr_req2( *pp_cur_loc, + p_wr->remote_ops.vaddr, cl_ntoh32(p_wr->remote_ops.rkey) ); + + if( p_wr->send_opt & IB_SEND_OPT_INLINE ) + return WQE_pack_inline_sgl_ibal( p_wr, pp_cur_loc, max_wqe_inline_size ); + + break; + + case WR_RDMA_READ: + *p_opcode = TAVOR_IF_NOPCODE_RDMAR; + + *pp_cur_loc += WQE_pack_remote_addr_req2( *pp_cur_loc, + p_wr->remote_ops.vaddr, cl_ntoh32(p_wr->remote_ops.rkey) ); + + if( p_wr->send_opt & IB_SEND_OPT_INLINE ) + return HH_EINVAL_SG_FMT; + + break; + + case WR_COMPARE_SWAP: + *p_opcode = TAVOR_IF_NOPCODE_ATOM_CMPSWP; + + *pp_cur_loc += WQE_pack_remote_addr_req2( *pp_cur_loc, + p_wr->remote_ops.vaddr, cl_ntoh32(p_wr->remote_ops.rkey) ); + *pp_cur_loc += WQE_pack_atomic_cmpswp( *pp_cur_loc, + p_wr->remote_ops.atomic1, p_wr->remote_ops.atomic2 ); + + if( p_wr->num_ds != 1 ) + return HH_EINVAL_SG_NUM; + + if( p_wr->ds_array[0].length != 8 ) + return HH_EINVAL_SG_NUM; + + if( p_wr->send_opt & IB_SEND_OPT_INLINE ) + return HH_EINVAL_SG_FMT; + + break; + + case WR_FETCH_ADD: + *p_opcode = TAVOR_IF_NOPCODE_ATOM_FTCHADD; + + *pp_cur_loc += WQE_pack_remote_addr_req2( *pp_cur_loc, + p_wr->remote_ops.vaddr, cl_ntoh32(p_wr->remote_ops.rkey) ); + *pp_cur_loc += WQE_pack_atomic_fetchadd( *pp_cur_loc, + p_wr->remote_ops.atomic1 ); + + if( p_wr->num_ds != 1 ) + return HH_EINVAL_SG_NUM; + + if( p_wr->ds_array[0].length != 8 ) + return HH_EINVAL_SG_NUM; + + if( p_wr->send_opt & IB_SEND_OPT_INLINE ) + return HH_EINVAL_SG_FMT; + + break; + } + + WQE_pack_sgl_ibal( p_wr, pp_cur_loc ); + return HH_OK; +} + + +inline u_int32_t *WQE_pack_sg_list_ibal( + IN ib_recv_wr_t* const p_wr, + IN u_int32_t *p_cur_loc ) +{ + uint32_t i; + + for( i = 0; i < p_wr->num_ds; i++, p_cur_loc += 4 ) + { + WQE_IO_WRITE(&p_cur_loc[DATA_PTR_BYTE_COUNT_DWORD_OFFSET], + MOSAL_cpu_to_be32(p_wr->ds_array[i].length & 0x7fffffff)); + WQE_IO_WRITE(&p_cur_loc[DATA_PTR_LKEY_DWORD_OFFSET], + MOSAL_cpu_to_be32(p_wr->ds_array[i].lkey)); + WQE_IO_WRITE(&p_cur_loc[DATA_PTR_LOCAL_ADDR_H_DWORD_OFFSET], + MOSAL_cpu_to_be32((u_int32_t)(p_wr->ds_array[i].vaddr >> 32))); + WQE_IO_WRITE(&p_cur_loc[DATA_PTR_LOCAL_ADDR_L_DWORD_OFFSET], + MOSAL_cpu_to_be32((u_int32_t)p_wr->ds_array[i].vaddr)); + } + return p_cur_loc; +} + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.c new file mode 100644 index 00000000..e7bf7b43 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.c @@ -0,0 +1,1171 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_THHUL_SRQM_C + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "thhul_srqm.h" + +#ifndef MT_KERNEL +/* instead of "ifdef"ing all over the code we define an empty macro */ +#define MOSAL_pci_phys_free_consistent(addr,sz) do {} while(0); +#endif + +#define NULL_WQE_BUF_P ((void*)(-1)) /* To mark resize in progress but on shrinking request */ + +/* Limit kmalloc to 4 pages */ +#define WQ_KMALLOC_LIMIT (4*MOSAL_SYS_PAGE_SIZE) +#define SMALL_VMALLOC_AREA (1<<28) /* VMALLOC area of 256MB or less is considered a scarce resource */ + +#define WQE_ALIGN_SHIFT 6 /* WQE address should be aligned to 64 Byte */ +#define WQE_SZ_MULTIPLE_SHIFT 4 /* WQE size must be 16 bytes multiple */ +/* WQE segments sizes */ +#define WQE_SEG_SZ_NEXT (sizeof(struct wqe_segment_next_st)/8) /* NEXT segment */ +#define WQE_SEG_SZ_CTRL (sizeof(struct wqe_segment_ctrl_send_st)/8) /* CTRL segment */ +#define WQE_SEG_SZ_SG_ENTRY (sizeof(struct wqe_segment_data_ptr_st)/8)/* Scatter/Gather entry(ptr)*/ +#define WQE_SEG_SZ_SG_ENTRY_DW (sizeof(struct wqe_segment_data_ptr_st)/32)/* (same in DWORDs) */ +#define MAX_WQE_SZ 1008 + +#define MAX_ALLOC_RETRY 3 /* Maximum retries to get WQEs buffer which does not cross 4GB boundry */ + +#define SRQ_EMPTY_SENTRY_LKEY 1 + +typedef struct THHUL_srq_wqe_buf_st { + MT_virt_addr_t wqe_buf; /* The buffer for this queue WQEs - aligned to WQE size */ + u_int32_t max_outs; /* Max. outstanding (number of WQEs in buffer) */ + /* log2_max_wqe_sz is common to all buffers of a SRQ (no change) */ + void* wqe_buf_orig; /* If != NULL then resizing is in progress */ + MT_bool used_virt_alloc; + MT_size_t wqe_buf_orig_size; +} THHUL_srq_wqe_buf_t; + +struct THHUL_srq_st { /* SRQ context */ + MT_virt_addr_t hca_virt_wqe_buf; /* The WQEs buffer base in HCA's virtual addr space */ + /* Note: wqe_buf is in HCA virt. space */ + MT_virt_addr_t real_virt_offset; /* Offset of real (OS) WQE virt. address from HCA's (resize)*/ + VAPI_wr_id_t *wqe_id; /* Array of max_outs entries for holding each WQE ID (WQE index based) */ + u_int32_t srqn; /* SRQ number/index */ + u_int32_t cur_outs; /* Currently outstanding */ + u_int32_t max_sentries; /* Max. Scatter list size */ + u_int8_t log2_max_wqe_sz; /* WQE size is a power of 2 (software implementation requirement) */ + MT_virt_addr_t free_wqes_list; /* "next" of each WQE is put on the WQE beginning */ + u_int32_t *wqe_draft; + MT_virt_addr_t last_posted_hca_va; /* Virtual addr of last posted WQE in HCA's addr space */ + MOSAL_spinlock_t q_lock; /* Protect concurrent usage of the queue */ + HHUL_pd_hndl_t pd; + THH_uar_t uar; /* UAR to use for this QP */ + THHUL_srq_wqe_buf_t *cur_buf_p; /* Current WQEs buffer */ + THHUL_srq_wqe_buf_t *resized_buf_p; /* Resized WQEs buffer */ + MT_bool resize_in_progress; /* When set the resizing is in progress */ + /* Note: Resizing may be virtually in progress while resized_buf_p==NULL, when shrinking SRQ */ + struct THHUL_srq_st *next; /* SRQs list */ +}; +typedef struct THHUL_srq_st *THHUL_srq_t; + +struct THHUL_srqm_st { /* THHUL_srqm_t is a pointer to this */ + struct THHUL_srq_st* srqs_list; + MOSAL_mutex_t srqm_lock; +}; + +/********************************************************************************************** + * Private functions protoypes declarations + **********************************************************************************************/ +static HH_ret_t init_srq( + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t pd, + u_int32_t max_sentries, + THHUL_srq_t new_srq +); + +static HH_ret_t compute_wqe_sz( + /*IN*/ u_int32_t hca_max_sentries, /* HCA cap. of max.scatter entries for SRQs */ + /*IN/OUT*/ THHUL_srq_t new_srq +); + +static HH_ret_t alloc_wqe_buf( + /*IN*/ MT_bool in_ddr_mem, /* Allocation of WQEs buffer is requested in attached DDR mem. */ + /*IN*/ u_int32_t hca_max_outs, /* HCA cap. */ + /*IN*/ u_int32_t req_max_outs, /* Requested capabilities */ + /*IN*/ u_int8_t log2_max_wqe_sz, + /*OUT*/ THHUL_srq_wqe_buf_t **buf_pp, + /*OUT*/ THH_srq_ul_resources_t *srq_ul_resources_p +); + +static void free_wqe_buf(/*IN*/THHUL_srq_wqe_buf_t *buf_p); + +/* Append WQEs buffer extention to WQEs free list */ +/* Must be invoked with SRQ lock held */ +static void append_to_free_list( + /*IN*/ THHUL_srq_t srq, + /*IN*/ MT_virt_addr_t wqes_buf_extention, /* The part of the new WQEs buf above old part */ + /*IN*/ MT_size_t extention_sz_in_wqes /* Extention size in WQEs */ +); + +static HH_ret_t alloc_aux_data_buf( + /*IN/OUT*/ THHUL_srq_t new_srq +); + + +/********************************************************************************************** + * Private inline functions + **********************************************************************************************/ + +/*********** WQE building functions ***********/ + +/* Init a not-connected (invalid) "next" segment (i.e. NDS=0) */ +inline static u_int32_t srqm_WQE_init_next(u_int32_t *wqe_buf) +{ + memset(wqe_buf,0,WQE_SEG_SZ_NEXT); + return WQE_SEG_SZ_NEXT; +} + +inline static u_int32_t srqm_WQE_pack_recv_next(u_int32_t *segment_p,u_int32_t next_wqe_32lsb) +{ + memset(segment_p,0,WQE_SEG_SZ_NEXT); /* Clear all "RESERVED" */ + segment_p[MT_BYTE_OFFSET(wqe_segment_next_st,nda_31_6)>>2]= ( next_wqe_32lsb & (~MASK32(6)) ) + | 1 ; /* LS-bit is set to work around bug #16159/16160/16161 */; + MT_INSERT_ARRAY32(segment_p,1, /* DBD always '1 for RQ */ + MT_BIT_OFFSET(wqe_segment_next_st,dbd),MT_BIT_SIZE(wqe_segment_next_st,dbd)); + MT_INSERT_ARRAY32(segment_p,0, /* NDS always 0 for SRQs */ + MT_BIT_OFFSET(wqe_segment_next_st,nds),MT_BIT_SIZE(wqe_segment_next_st,nds)); + return WQE_SEG_SZ_NEXT; +} + +/* Build the scatter list (pointer segments) */ +inline static u_int32_t WQE_pack_slist(u_int32_t *segment_p, + u_int32_t sg_lst_len,VAPI_sg_lst_entry_t *sg_lst_p, u_int32_t desc_sentries) +{ + u_int32_t i; + u_int32_t *cur_loc_p= segment_p; + + for (i= 0; i < sg_lst_len; i++ , cur_loc_p+= WQE_SEG_SZ_SG_ENTRY_DW) { + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,byte_count)>>2]= + (sg_lst_p[i].len & MASK32(31)); + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,l_key)>>2]= sg_lst_p[i].lkey; + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,local_address_h)>>2]= + (u_int32_t)(sg_lst_p[i].addr >> 32); + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,local_address_l)>>2]= + (u_int32_t)(sg_lst_p[i].addr & 0xFFFFFFFF); + } + + for (;i < desc_sentries; i++ , cur_loc_p+= WQE_SEG_SZ_SG_ENTRY_DW) { + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,byte_count)>>2]= 0; + cur_loc_p[MT_BYTE_OFFSET(wqe_segment_data_ptr_st,l_key)>>2]= SRQ_EMPTY_SENTRY_LKEY; + } + return (u_int32_t)(((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)segment_p)); +} + + +/* Pack Control segment (for receive work requests) */ +inline static u_int32_t WQE_pack_ctrl_recv(u_int32_t *segment_p, + VAPI_comp_type_t comp_type, u_int32_t event_bit) +{ + memset(segment_p,0,WQE_SEG_SZ_CTRL); /* Clear all "RESERVED" */ + MT_INSERT_ARRAY32(segment_p,(comp_type == VAPI_SIGNALED) ? 1 : 0, + MT_BIT_OFFSET(wqe_segment_ctrl_recv_st,c),MT_BIT_SIZE(wqe_segment_ctrl_recv_st,c)); + MT_INSERT_ARRAY32(segment_p,event_bit, + MT_BIT_OFFSET(wqe_segment_ctrl_recv_st,e),MT_BIT_SIZE(wqe_segment_ctrl_recv_st,e)); + return WQE_SEG_SZ_CTRL; +} + +inline static u_int32_t srqm_WQE_build_recv( + THHUL_srq_t srq, + VAPI_rr_desc_t *recv_req_p, + u_int32_t *wqe_buf +) +{ + u_int8_t *cur_loc_p= (u_int8_t*)wqe_buf; /* Current location in the WQE */ + + cur_loc_p+= srqm_WQE_init_next((u_int32_t*)cur_loc_p); /* Make "unlinked" "next" segment */ + cur_loc_p+= WQE_pack_ctrl_recv((u_int32_t*)cur_loc_p, + recv_req_p->comp_type, 0/*event bit*/); + /* Pack scatter list segments */ + cur_loc_p+= WQE_pack_slist((u_int32_t*)cur_loc_p,recv_req_p->sg_lst_len,recv_req_p->sg_lst_p, + srq->max_sentries); + + return (u_int32_t)(((MT_virt_addr_t)cur_loc_p) - ((MT_virt_addr_t)wqe_buf)); +} + + +/* Extract NDS directly from (big-endian) WQE */ +inline static u_int8_t srqm_WQE_extract_nds(volatile u_int32_t* wqe) +{ +/*** warning C4244: 'return' : conversion from 'unsigned long' to 'u_int8_t', possible loss of data ***/ + return (u_int8_t)MT_EXTRACT32(MOSAL_be32_to_cpu(wqe[MT_BYTE_OFFSET(wqe_segment_next_st,nds) >> 2]), + MT_BIT_OFFSET(wqe_segment_next_st,nds) & MASK32(5), + MT_BIT_SIZE(wqe_segment_next_st,nds) & MASK32(5)); +} + +/* Extract NDA directly from (big-endian) WQE */ +inline static u_int32_t srqm_WQE_extract_nda(volatile u_int32_t* wqe) +{ + return (MOSAL_be32_to_cpu(wqe[MT_BYTE_OFFSET(wqe_segment_next_st,nda_31_6) >> 2]) & (~MASK32(6)) ); +} + +inline static u_int8_t srqm_WQE_extract_dbd(volatile u_int32_t* wqe) +{ +/*** warning C4244: 'return' : conversion from 'unsigned long' to 'u_int8_t', possible loss of data ***/ + return (u_int8_t)MT_EXTRACT32(MOSAL_be32_to_cpu(wqe[MT_BYTE_OFFSET(wqe_segment_next_st,dbd) >> 2]), + MT_BIT_OFFSET(wqe_segment_next_st,dbd) & MASK32(5), + MT_BIT_SIZE(wqe_segment_next_st,dbd) & MASK32(5)); +} + +/********************************************************************************************** + * Public API Functions (defined in thhul_hob.h) + **********************************************************************************************/ + +HH_ret_t THHUL_srqm_create( + THHUL_hob_t hob, + THHUL_srqm_t *srqm_p +) +{ + *srqm_p= (THHUL_srqm_t) MALLOC(sizeof(struct THHUL_srqm_st)); + if (*srqm_p == NULL) { + MTL_ERROR1("%s: Failed allocating THHUL_srqm_t.\n", __func__); + return HH_EAGAIN; + } + + (*srqm_p)->srqs_list= NULL; + MOSAL_mutex_init(&((*srqm_p)->srqm_lock)); + + return HH_OK; +} + + +HH_ret_t THHUL_srqm_destroy( + THHUL_srqm_t srqm +) +{ + + THHUL_srq_t srq; + + while (srqm->srqs_list) { + srq = srqm->srqs_list; + srqm->srqs_list= srq->next; + MTL_ERROR4(MT_FLFMT("%s: Releasing resource left-overs for SRQ 0x%X"), __func__, srq->srqn); + /* Free QP resources: Auxilary buffer + WQEs buffer */ + THH_SMART_FREE(srq->wqe_id, srq->cur_buf_p->max_outs * sizeof(VAPI_wr_id_t)); + free_wqe_buf(srq->cur_buf_p); + if (srq->resized_buf_p != NULL) free_wqe_buf(srq->resized_buf_p); + FREE(srq->wqe_draft); + FREE(srq); + } + + MOSAL_mutex_free(&(srqm->srqm_lock)); + FREE(srqm); + return HH_OK; +} + + +HH_ret_t THHUL_srqm_create_srq_prep( + /*IN*/ + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t pd, + u_int32_t max_outs, + u_int32_t max_sentries, + /*OUT*/ + HHUL_srq_hndl_t *srq_hndl_p, + u_int32_t *actual_max_outs_p, + u_int32_t *actual_max_sentries_p, + void /*THH_srq_ul_resources_t*/ *srq_ul_resources_p +) +{ + THHUL_srqm_t srqm; + THH_hca_ul_resources_t hca_ul_res; + THH_srq_ul_resources_t *ul_res_p= (THH_srq_ul_resources_t*)srq_ul_resources_p; + THHUL_srq_t new_srq; + HH_ret_t rc; + THHUL_pdm_t pdm; + + rc= THHUL_hob_get_srqm(hca,&srqm); + if (rc != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: Invalid HCA handle (%p)."), __func__, hca); + return HH_EINVAL_HCA_HNDL; + } + rc= THHUL_hob_get_hca_ul_res(hca,&hca_ul_res); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THHUL_hob_get_hca_ul_res (%d=%s).\n"), __func__, + rc,HH_strerror_sym(rc)); + return rc; + } + + rc= THHUL_hob_get_pdm(hca,&pdm); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THHUL_hob_get_pdm (%d=%s).\n"), __func__, + rc,HH_strerror_sym(rc)); + return rc; + } + + (new_srq)= (THHUL_srq_t)MALLOC(sizeof(struct THHUL_srq_st)); + if (new_srq == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating THHUL_srq_t."), __func__); + return HH_EAGAIN; + } + + rc= init_srq(hca,pd,max_sentries,new_srq); + if (rc != HH_OK) { + goto failed_init_srq; + } + + rc= compute_wqe_sz(hca_ul_res.max_num_sg_ent_srq, new_srq); + if (rc != HH_OK) goto failed_compute_wqe_sz; + + rc= alloc_wqe_buf(FALSE/*not in DDR*/,hca_ul_res.max_srq_ous_wr,max_outs, + new_srq->log2_max_wqe_sz,&new_srq->cur_buf_p, srq_ul_resources_p); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating WQEs buffers."), __func__); + goto failed_alloc_wqe; + } + + new_srq->wqe_draft= (u_int32_t *)MALLOC((size_t)1 << new_srq->log2_max_wqe_sz); + if (new_srq->wqe_draft == NULL) { + MTL_ERROR2(MT_FLFMT("%s: Failed allocating %u bytes for SRQ's wqe draft"), __func__, + 1 << new_srq->log2_max_wqe_sz); + goto failed_wqe_draft; + } + + + rc= alloc_aux_data_buf(new_srq); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating auxilary buffers."), __func__); + goto failed_alloc_aux; + } + + /* Set output modifiers */ + *srq_hndl_p= new_srq; + *actual_max_outs_p= new_srq->cur_buf_p->max_outs; + *actual_max_sentries_p= new_srq->max_sentries; + rc= THH_uar_get_index(new_srq->uar,&(ul_res_p->uar_index)); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT(": Failed getting UAR index.\n")); + goto failed_uar_index; + } + /* wqe_buf data in srq_ul_resources_p is already set in alloc_wqe_buf */ + + /* update SRQs list */ + MOSAL_mutex_acq_ui(&(srqm->srqm_lock)); + new_srq->next= srqm->srqs_list; + srqm->srqs_list= new_srq; + MOSAL_mutex_rel(&(srqm->srqm_lock)); + + return HH_OK; + + /* Error cleanup */ + failed_uar_index: + THH_SMART_FREE(new_srq->wqe_id, new_srq->cur_buf_p->max_outs * sizeof(VAPI_wr_id_t)); + failed_alloc_aux: + FREE(new_srq->wqe_draft); + failed_wqe_draft: + free_wqe_buf(new_srq->cur_buf_p); + failed_alloc_wqe: + failed_compute_wqe_sz: + failed_init_srq: + FREE(new_srq); + return rc; +} + + +HH_ret_t THHUL_srqm_create_srq_done( + HHUL_hca_hndl_t hca, + HHUL_srq_hndl_t hhul_srq, + HH_srq_hndl_t hh_srq, + void/*THH_srq_ul_resources_t*/ *srq_ul_resources_p +) +{ + THHUL_srqm_t srqm; + THHUL_srq_t srq= (THHUL_srq_t)hhul_srq; + THH_srq_ul_resources_t *ul_res_p= (THH_srq_ul_resources_t*)srq_ul_resources_p; + HH_ret_t rc; + + rc= THHUL_hob_get_srqm(hca,&srqm); + if (rc != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: Invalid HCA handle (%p)."), __func__, hca); + return HH_EINVAL_HCA_HNDL; + } + if (srq == NULL) { + MTL_ERROR4(MT_FLFMT("%s: NULL hhul_qp handle."), __func__); + return HH_EINVAL; + } + + if (srq->cur_buf_p->wqe_buf_orig == NULL) { /* WQEs buffer allocated in DDR mem. by THH_qpm */ + if (ul_res_p->wqes_buf == 0) { + MTL_ERROR1(MT_FLFMT("%s: Got NULL WQEs buffer from qp_ul_res for new srqn=0x%X."), __func__, + hh_srq); + return HH_EINVAL; + } + /* Set the per queue resources */ + srq->cur_buf_p->wqe_buf= MT_UP_ALIGNX_VIRT(ul_res_p->wqes_buf,srq->log2_max_wqe_sz); + if (srq->cur_buf_p->wqe_buf != ul_res_p->wqes_buf) { + MTL_ERROR1( + "THHUL_srqm_create_qp_done: Buffer allocated by THH_qpm ("VIRT_ADDR_FMT") " + "is not aligned to RQ WQE size (%d bytes).\n", + ul_res_p->wqes_buf,1<log2_max_wqe_sz); + return HH_EINVAL; + } + } + + srq->hca_virt_wqe_buf= srq->cur_buf_p->wqe_buf;/* first buffer is the HCA's virt. addr. */ + + /* Create free WQEs list of wqe_buf */ + append_to_free_list(srq, srq->cur_buf_p->wqe_buf,srq->cur_buf_p->max_outs); + + srq->srqn= hh_srq; + + MTL_DEBUG4(MT_FLFMT("%s: srqn=0x%X buf_p="VIRT_ADDR_FMT" sz=0x%X"), __func__, + srq->srqn, srq->cur_buf_p->wqe_buf, + (1 << srq->log2_max_wqe_sz) * srq->cur_buf_p->max_outs); + + return HH_OK; +} + +HH_ret_t THHUL_srqm_modify_srq_prep( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/HHUL_srq_hndl_t hhul_srq, + /*IN*/VAPI_srq_attr_t *srq_attr_p, + /*IN*/VAPI_srq_attr_mask_t srq_attr_mask, + /*OUT*/void/*THH_srq_ul_resources_t*/ *srq_ul_resources_p +) +{ + THHUL_srqm_t srqm; + THHUL_srq_t srq= (THHUL_srq_t)hhul_srq; + THH_srq_ul_resources_t *srq_ul_res_p= (THH_srq_ul_resources_t*)srq_ul_resources_p; + THH_hca_ul_resources_t hca_ul_res; + THHUL_srq_wqe_buf_t *new_buf_p; + HH_ret_t rc; + + /* Parameters check */ + if (srq_attr_mask != VAPI_SRQ_ATTR_MAX_OUTS_WR) { + MTL_ERROR1(MT_FLFMT( + "%s: Only VAPI_SRQ_ATTR_MAX_OUTS_WR flag is supported (got srq_attr_mask=0x%X)"), + __func__, srq_attr_mask); + return HH_ENOSYS; + } + rc= THHUL_hob_get_srqm(hca,&srqm); + if (rc != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: Invalid HCA handle (%p)."), __func__, hca); + return HH_EINVAL_HCA_HNDL; + } + if (srq == NULL) { + MTL_ERROR4(MT_FLFMT("%s: NULL hhul_qp handle."), __func__); + return HH_EINVAL; + } + rc= THHUL_hob_get_hca_ul_res(hca,&hca_ul_res); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed THHUL_hob_get_hca_ul_res (%d=%s).\n"), __func__, + rc,HH_strerror_sym(rc)); + return rc; + } + + if (srq_attr_p->max_outs_wr > srq->cur_buf_p->max_outs) { + rc= alloc_wqe_buf(FALSE/*not in DDR*/,hca_ul_res.max_srq_ous_wr,srq_attr_p->max_outs_wr, + srq->log2_max_wqe_sz,&new_buf_p, srq_ul_res_p); + if (rc != HH_OK) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating WQEs buffers."), __func__); + return rc; + } + } else { /* Shrinking implies no change for current implementation */ + new_buf_p= NULL; + /* Signal to THH_srqm that there is nothing to change (all fields of ul_res set to 0) */ + memset(srq_ul_res_p,0,sizeof(THH_srq_ul_resources_t)); + } + + MOSAL_spinlock_dpc_lock(&(srq->q_lock)); + if (srq->resize_in_progress) { + MOSAL_spinlock_unlock(&(srq->q_lock)); + MTL_ERROR1(MT_FLFMT("%s: Invoked while resize is in progress (SRQn=0x%X)"), __func__, + srq->srqn); + if (new_buf_p != NULL) free_wqe_buf(new_buf_p); + return HH_EBUSY; + } + + srq->resized_buf_p= new_buf_p; + if (srq->resized_buf_p != NULL) { + /* Duplicate WQEs buffer image into resized buffer */ + memcpy((void*)srq->resized_buf_p->wqe_buf,(void*)srq->cur_buf_p->wqe_buf, + srq->cur_buf_p->max_outs << srq->log2_max_wqe_sz); + } + srq->resize_in_progress= TRUE; + + MOSAL_spinlock_unlock(&(srq->q_lock)); + + return HH_OK; +} + + +HH_ret_t THHUL_srqm_modify_srq_done( + /*IN*/HHUL_hca_hndl_t hca, + /*IN*/HHUL_srq_hndl_t hhul_srq, + /*IN*/void/*THH_srq_ul_resources_t*/ *srq_ul_resources_p, + /*OUT*/u_int32_t *max_outs_wr_p /* Max. outstanding WQEs */ +) +{ + THHUL_srqm_t srqm; + THHUL_srq_t srq= (THHUL_srq_t)hhul_srq; + THH_srq_ul_resources_t *ul_res_p= (THH_srq_ul_resources_t*)srq_ul_resources_p; + HH_ret_t rc= HH_OK; + THHUL_srq_wqe_buf_t *freed_wqe_buf_p= NULL; + VAPI_wr_id_t *new_wqe_id_array,*freed_wqe_id_array= NULL; + + rc= THHUL_hob_get_srqm(hca,&srqm); + if (rc != HH_OK) { + MTL_ERROR4(MT_FLFMT("%s: Invalid HCA handle (%p)."), __func__, hca); + return HH_EINVAL_HCA_HNDL; + } + if (srq == NULL) { + MTL_ERROR4(MT_FLFMT("%s: NULL hhul_qp handle."), __func__); + return HH_EINVAL_SRQ_HNDL; + } + + + if (! srq->resize_in_progress) { + MOSAL_spinlock_unlock(&(srq->q_lock)); + MTL_ERROR1(MT_FLFMT("%s: Invoked while NO resize in progress (SRQn=0x%X)"), __func__, + srq->srqn); + return HH_EFATAL; + } + + if (srq->resized_buf_p == NULL) { /* Shrinking */ + /* When "shrinking" not resources were allocated - nothing to free */ + goto clean_exit; + } + + freed_wqe_buf_p= srq->resized_buf_p; /* Clean resized buffer for any of the following errors */ + + if (ul_res_p == NULL) { /* Failure - rollback the modify_prep */ + MTL_ERROR4(MT_FLFMT("%s: Got failure notification from VIPKL_modify_srq (SRQn=0x%X)"),__func__, + srq->srqn); + rc= HH_OK; /* No problem in rolling back the modify_prep... */ + goto clean_exit; + } + + if (srq->resized_buf_p->wqe_buf_orig == NULL) { /* WQEs buffer allocated in DDR mem. by THH_qpm */ + if (ul_res_p->wqes_buf == 0) { + MTL_ERROR1(MT_FLFMT("%s: Got NULL WQEs buffer from qp_ul_res for new srqn=0x%X."), __func__, + srq->srqn); + rc= HH_EAGAIN; + goto clean_exit; + } + /* Set the per queue resources */ + srq->resized_buf_p->wqe_buf= MT_UP_ALIGNX_VIRT(ul_res_p->wqes_buf,srq->log2_max_wqe_sz); + if (srq->resized_buf_p->wqe_buf != ul_res_p->wqes_buf) { + MTL_ERROR1( + "%s: Buffer allocated by THH_qpm ("VIRT_ADDR_FMT") " + "is not aligned to RQ WQE size (%d bytes).\n", __func__, + ul_res_p->wqes_buf,1<log2_max_wqe_sz); + return HH_EFATAL; /* Free nothing - inconsistancy problem */ + } + } + + new_wqe_id_array= (VAPI_wr_id_t*) + THH_SMART_MALLOC(srq->resized_buf_p->max_outs * sizeof(VAPI_wr_id_t)); + if (new_wqe_id_array == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating SRQ auxilary buffer (for 0x%X WQEs IDs)."), __func__, + srq->resized_buf_p->max_outs); + rc= HH_EAGAIN; + /* Failing to extend the WQEs ID buffer still requires using the new buffer + * (HCA already moved to new buffer). So we define it with the same size as current */ + srq->resized_buf_p->max_outs= srq->cur_buf_p->max_outs; + new_wqe_id_array= srq->wqe_id; /* use original */ + } + + MOSAL_spinlock_dpc_lock(&(srq->q_lock)); + if (new_wqe_id_array != srq->wqe_id) { + /* Copy old WQE IDs to new buffer */ + memcpy(new_wqe_id_array, srq->wqe_id, srq->cur_buf_p->max_outs * sizeof(VAPI_wr_id_t)); + freed_wqe_id_array= srq->wqe_id; /* save to free outside of spinlock */ + srq->wqe_id= new_wqe_id_array; + } + + freed_wqe_buf_p= srq->cur_buf_p; /* save to free outside of spinlock */ + srq->cur_buf_p= srq->resized_buf_p; /* Set resized as cur_buf */ + srq->real_virt_offset= srq->cur_buf_p->wqe_buf - srq->hca_virt_wqe_buf; + + /* Add to free WQEs the new WQEs */ + append_to_free_list(srq, + srq->cur_buf_p->wqe_buf + + (freed_wqe_buf_p->max_outs << srq->log2_max_wqe_sz), + srq->cur_buf_p->max_outs - freed_wqe_buf_p->max_outs); + + MOSAL_spinlock_unlock(&(srq->q_lock)); + + clean_exit: + /* Set output modifier */ + if (max_outs_wr_p != NULL) *max_outs_wr_p= srq->cur_buf_p->max_outs; + /* Free unused resources */ + if (freed_wqe_id_array != NULL) { + THH_SMART_FREE(freed_wqe_id_array, freed_wqe_buf_p->max_outs * sizeof(VAPI_wr_id_t)); + } + if (freed_wqe_buf_p != NULL) { + free_wqe_buf(freed_wqe_buf_p); + } + srq->resized_buf_p= NULL; + srq->resize_in_progress= FALSE; + + MTL_DEBUG4(MT_FLFMT("%s: srqn=0x%X buf_p="VIRT_ADDR_FMT" sz=0x%X"), __func__, + srq->srqn, srq->cur_buf_p->wqe_buf, + (1 << srq->log2_max_wqe_sz) * srq->cur_buf_p->max_outs); + + return rc; +} + +HH_ret_t THHUL_srqm_destroy_srq_done( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_srq +) +{ + THHUL_srqm_t srqm; + THHUL_srq_t srq= (THHUL_srq_t)hhul_srq; + THHUL_srq_t cur_srq,prev_srq; + HH_ret_t rc; + + rc= THHUL_hob_get_srqm(hca,&srqm); + if (rc != HH_OK) { + MTL_ERROR4("%s: Invalid HCA handle (%p).", __func__, hca); + return HH_EINVAL_HCA_HNDL; + } + + /* update SRQs list */ + MOSAL_mutex_acq_ui(&(srqm->srqm_lock)); + /* find SRQ in SRQs list */ + for (prev_srq= NULL, cur_srq= srqm->srqs_list; + (cur_srq != NULL) && (cur_srq != srq); + prev_srq= cur_srq , cur_srq= cur_srq->next); + if (cur_srq == NULL) { + MOSAL_mutex_rel(&(srqm->srqm_lock)); + MTL_ERROR2(MT_FLFMT("%s: Could not find given SRQ (hndl=0x%p , srqn=0x%X)"), __func__, + srq, srq->srqn); + return HH_EINVAL_SRQ_HNDL; + } + /* remove SRQ from list */ + if (prev_srq != NULL) prev_srq->next= srq->next; + else srqm->srqs_list= srq->next; + MOSAL_mutex_rel(&(srqm->srqm_lock)); + + /* Free SRQ resources: Auxilary buffer + WQEs buffer + WQE draft + SRQ object */ + MTL_DEBUG4(MT_FLFMT("Freeing user level WQE-IDs auxilary buffers")); + THH_SMART_FREE(srq->wqe_id, srq->cur_buf_p->max_outs * sizeof(VAPI_wr_id_t)); + free_wqe_buf(srq->cur_buf_p); + FREE(srq->wqe_draft); + FREE(srq); + + return HH_OK; +} + + + +HH_ret_t THHUL_srqm_post_recv_reqs( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_srq_hndl_t hhul_srq, + /*IN*/ u_int32_t num_of_requests, + /*IN*/ VAPI_rr_desc_t *recv_req_array, + /*OUT*/ u_int32_t *posted_requests_p + ) +{ + THHUL_srq_t srq= (THHUL_srq_t)hhul_srq; + u_int32_t* wqe_draft= srq->wqe_draft; + u_int32_t next_draft[WQE_SEG_SZ_NEXT>>2]; /* Build "next" segment here */ + volatile u_int32_t* next_wqe= NULL; /* Actual WQE pointer */ + volatile u_int32_t* resized_next_wqe= NULL; + MT_virt_addr_t next_wqe_hca_virt, resized_virt_offset=0; + volatile u_int32_t* prev_wqe_p; + volatile u_int32_t* resized_prev_wqe_p= NULL; + u_int32_t wqe_sz_dwords= 0; + u_int32_t i,reqi,next2post_index; + THH_uar_recvq_dbell_t rq_dbell; + HH_ret_t ret= HH_OK; + + *posted_requests_p= 0; + if (num_of_requests == 0) return HH_OK; /* nothing to do */ + + /* Init. invariant RQ doorbell fields */ + rq_dbell.qpn= srq->srqn; + rq_dbell.next_size= 0; /* For SRQs, NDS comes from SRQC */ + rq_dbell.credits= 0; /* For 256 WQEs quantums */ + + MOSAL_spinlock_dpc_lock(&(srq->q_lock)); /* protect wqe_draft as well as WQE allocation/link */ + + prev_wqe_p= (srq->last_posted_hca_va == 0) ? + NULL : + (void*)(srq->last_posted_hca_va + srq->real_virt_offset); + rq_dbell.next_addr_32lsb= (u_int32_t)srq->free_wqes_list; /* For first chain */ + + if (srq->resized_buf_p != NULL) { + resized_virt_offset= srq->resized_buf_p->wqe_buf - srq->hca_virt_wqe_buf; + resized_prev_wqe_p= (srq->last_posted_hca_va == 0) ? + NULL : + (void*)(srq->last_posted_hca_va + resized_virt_offset); + } + + /* Build and link all WQEs */ + for (reqi= 0; (reqi < num_of_requests) ; reqi++) { + + if (srq->free_wqes_list == 0) { + MTL_ERROR2(MT_FLFMT( + "%s: Posting only %u requests out of %u"), __func__, *posted_requests_p, num_of_requests); + ret= HH_EAGAIN; + break; + } + + if (srq->max_sentries < recv_req_array[reqi].sg_lst_len) { + MTL_ERROR2(MT_FLFMT( + "%s: Scatter list of req. #%u is too large (%u entries > max_sg_sz=%u)"), __func__, + reqi,recv_req_array[reqi].sg_lst_len,srq->max_sentries); + ret= HH_EINVAL_SG_NUM; + break; + } + + if (recv_req_array[reqi].opcode != VAPI_RECEIVE) { + MTL_ERROR2(MT_FLFMT( + "%s: Invalid opcode (%d=%s)in request #%d"), __func__, + recv_req_array[reqi].opcode, VAPI_wr_opcode_sym(recv_req_array[reqi].opcode), reqi); + ret= HH_EINVAL_OPCODE; + break; + } + + /* Build WQE */ + wqe_sz_dwords= (srqm_WQE_build_recv(srq,recv_req_array+reqi,wqe_draft) >> 2); + #ifdef MAX_DEBUG + if ((wqe_sz_dwords<<2) > (1U << srq->log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT("%s: SRQ 0x%X: WQE too large (%d > max=%d)"), __func__, + srq->srqn,(wqe_sz_dwords<<2),(1U << srq->log2_max_wqe_sz)); + } + #endif + + /* Allocate next WQE */ + next_wqe_hca_virt= srq->free_wqes_list ; + next_wqe= (volatile u_int32_t*)(next_wqe_hca_virt + srq->real_virt_offset); + srq->free_wqes_list= *((MT_virt_addr_t*)next_wqe);/* next WQE is in the WQE (when free) */ + /* Save WQE ID */ + next2post_index= (u_int32_t)(((u_int8_t*)next_wqe - (u_int8_t*)srq->cur_buf_p->wqe_buf) >> + srq->log2_max_wqe_sz); + MTL_DEBUG6(MT_FLFMT("%s: SRQ 0x%X posting WQE at index %u (real_va=%p, hca_va=" + VIRT_ADDR_FMT")"), __func__, + srq->srqn, next2post_index, next_wqe, next_wqe_hca_virt); //DEBUG + srq->wqe_id[next2post_index]= recv_req_array[reqi].id; /* Save WQE ID */ + + /* copy (while swapping,if needed) the wqe_draft to the actual WQE */ + /* TBD: for big-endian machines we can optimize here and use memcpy */ + for (i= 0; i < wqe_sz_dwords; i++) { + next_wqe[i]= MOSAL_cpu_to_be32(wqe_draft[i]); + } + + if (srq->resized_buf_p != NULL) { + resized_next_wqe= (volatile u_int32_t*)(next_wqe_hca_virt + resized_virt_offset); + for (i= 0; i < wqe_sz_dwords; i++) { /* Copy WQE to resized buffer */ + resized_next_wqe[i]= MOSAL_cpu_to_be32(wqe_draft[i]); + } + } + + if (prev_wqe_p != NULL) { + /* Update "next" segment of previous WQE */ + /* Build linking "next" segment in last posted WQE */ + srqm_WQE_pack_recv_next(next_draft, (u_int32_t)next_wqe_hca_virt); + for (i= 0;i < (WQE_SEG_SZ_NEXT>>2) ;i++) { + /* This copy assures big-endian as well as that DBD/NDS is written last */ + prev_wqe_p[i]= MOSAL_cpu_to_be32(next_draft[i]); + } + if (srq->resized_buf_p != NULL) { + for (i= 0;i < (WQE_SEG_SZ_NEXT>>2) ;i++) { /* Link in resized buffer, too */ + resized_prev_wqe_p[i]= MOSAL_cpu_to_be32(next_draft[i]); + } + } + } + + prev_wqe_p= next_wqe; + resized_prev_wqe_p= resized_next_wqe; + + (*posted_requests_p)++; + + if (((*posted_requests_p) & 0xFF) == 0) { /* ring RQ doorbell every 256 WQEs */ + THH_uar_recvq_dbell(srq->uar,&rq_dbell); + rq_dbell.next_addr_32lsb= (u_int32_t)srq->free_wqes_list; /* For next chain */ + } + } + + if (((*posted_requests_p) & 0xFF) != 0) { /* left-overs (less than 256 WQEs) */ + rq_dbell.credits= (*posted_requests_p) & 0xFF; + THH_uar_recvq_dbell(srq->uar,&rq_dbell); + } + + srq->last_posted_hca_va= ((MT_virt_addr_t)prev_wqe_p) - srq->real_virt_offset; + srq->cur_outs+= *posted_requests_p; /* redundant info - for debug */ + + MOSAL_spinlock_unlock(&(srq->q_lock)); + return ret; + +} + + + +/* Release this WQE only and return its WQE ID */ +HH_ret_t THHUL_srqm_comp( + THHUL_srqm_t srqm, + HHUL_srq_hndl_t hhul_srq, + u_int32_t wqe_addr_32lsb, + VAPI_wr_id_t *wqe_id_p +) +{ + THHUL_srq_t srq= (THHUL_srq_t)hhul_srq; + u_int32_t wqes_base_32lsb= (u_int32_t)(srq->hca_virt_wqe_buf & 0xFFFFFFFF) ; + u_int32_t freed_wqe_index; + MT_virt_addr_t wqe_buf_h= sizeof(u_int32_t*) > 4 ? (srq->hca_virt_wqe_buf >> 32) << 32 : 0; + MT_virt_addr_t wqe_addr= wqe_buf_h | wqe_addr_32lsb; + + if (wqe_addr_32lsb < wqes_base_32lsb) { + MTL_ERROR1(MT_FLFMT("%s: Got wqe_addr_32lsb=0x%X < wqes_base_32lsb=0x%X"), __func__, + wqe_addr_32lsb, wqes_base_32lsb); + return HH_EINVAL; + } + if (wqe_addr_32lsb & MASK32(srq->log2_max_wqe_sz)) { + MTL_ERROR1(MT_FLFMT( + "%s: Got wqe_addr_32lsb=0x%X which is not aligned to WQE size/stride 2^%u"), + __func__, wqe_addr_32lsb, srq->log2_max_wqe_sz); + return HH_EINVAL; + } + + freed_wqe_index= (wqe_addr_32lsb - wqes_base_32lsb) >> srq->log2_max_wqe_sz; + + MOSAL_spinlock_dpc_lock(&(srq->q_lock)); + if (freed_wqe_index > srq->cur_buf_p->max_outs) { + MTL_ERROR1(MT_FLFMT("%s: Got wqe_addr_32lsb=0x%X which is WQE index 0x%X " + "(max_outs=0x%X , wqes_base_32lsb=0x%X , log2_max_wqe_sz=0x%X)"), + __func__, wqe_addr_32lsb, freed_wqe_index, + srq->cur_buf_p->max_outs, wqes_base_32lsb, srq->log2_max_wqe_sz); + MOSAL_spinlock_unlock(&(srq->q_lock)); + return HH_EINVAL; + } + + /* Get WQE ID from auxilary buffer */ + *wqe_id_p= srq->wqe_id[freed_wqe_index]; + + /* Return WQE to free list */ + *((MT_virt_addr_t*)(wqe_addr+srq->real_virt_offset))= + srq->free_wqes_list; /* Link WQE to first in free list */ + srq->free_wqes_list= wqe_addr; /* Put as first in free list */ + srq->cur_outs --; /* (for debug purpose) */ + if (wqe_addr == srq->last_posted_hca_va) { + /* After WQE put in the free list, we should not link it to next WQE */ + srq->last_posted_hca_va= 0; + } + MOSAL_spinlock_unlock(&(srq->q_lock)); + + return HH_OK; +} + + + +/********************************************************************************************** + * Private Functions + **********************************************************************************************/ + + +/* Allocate THHUL_srq_t object and initialize it */ +static HH_ret_t init_srq( + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t pd, + u_int32_t max_sentries, + THHUL_srq_t new_srq +) +{ + HH_ret_t rc; + THHUL_pdm_t pdm; + + memset(new_srq,0,sizeof(struct THHUL_srq_st)); + + rc= THHUL_hob_get_uar(hca,&(new_srq->uar)); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed getting THHUL_hob's UAR (%d=%s)."), + __func__,rc,HH_strerror_sym(rc)); + return rc; + } + rc= THHUL_hob_get_pdm(hca,&pdm); + if (rc != HH_OK) { + MTL_ERROR2(MT_FLFMT("%s: Failed getting THHUL_hob_get_pdm's UAR (%d=%s)."), + __func__,rc,HH_strerror_sym(rc)); + return rc; + } + + new_srq->srqn= 0xFFFFFFFF; /* Init to invalid SRQ num. until create_qp_done is invoked */ + new_srq->pd= pd; + new_srq->max_sentries= max_sentries; + MOSAL_spinlock_init(&(new_srq->q_lock)); + + return HH_OK; +} + +inline static MT_bool srqm_within_4GB(void* base, MT_size_t bsize) +{ + u_int64_t start_addr; + u_int64_t end_addr; + + if (sizeof(MT_virt_addr_t) <=4) return TRUE; /* For 32 bits machines no check is required */ + start_addr= (u_int64_t)(MT_virt_addr_t)base; + end_addr= start_addr+bsize-1; + return ((start_addr >> 32) == (end_addr >> 32)); /* TRUE if 32 MS-bits equal */ + +} + +inline static void* srqm_malloc_within_4GB(MT_size_t bsize, MT_bool *used_virt_alloc_p) +{ + void* buf[MAX_ALLOC_RETRY]={NULL}; + MT_bool used_virt_alloc[MAX_ALLOC_RETRY]; + int i,j; + + for (i= 0; i < MAX_ALLOC_RETRY; i++) { /* Retry to avoid crossing 4GB */ +#if defined(MT_KERNEL) && defined(__LINUX__) + /* Consider using low memory (kmalloc) up to WQ_KMALLOC_LIMIT or for small vmalloc area */ + if (bsize <= WQ_KMALLOC_LIMIT) { + buf[i]= (void*)MOSAL_pci_phys_alloc_consistent(bsize,0); /* try to use kmalloc */ + used_virt_alloc[i]= FALSE; + } + if (buf[i] == NULL) /* failed kmalloc, or did not even try it */ +#endif + { + buf[i]= (void*)MOSAL_pci_virt_alloc_consistent(bsize, 0); //TODO: must pass proper alignment here. For now thhul_qpm is unused in Darwin. + used_virt_alloc[i]= TRUE; + } + if (buf[i] == NULL) { + MTL_ERROR3("srqm_malloc_within_4GB: Failed allocating buffer of "SIZE_T_FMT" bytes (iteration %d).\n", + bsize,i); + /* Free previously allocated buffers if any*/ + for (j= i; j > 0; j--) { + if (used_virt_alloc[j-1]) { + MOSAL_pci_virt_free_consistent(buf[j-1], bsize); + } else { + MOSAL_pci_phys_free_consistent(buf[j-1], bsize); + } + } + return NULL; + } + if (srqm_within_4GB(buf[i],bsize)) break; + } + if (i == MAX_ALLOC_RETRY) { /* Failed */ + MTL_ERROR2("srqm_malloc_within_4GB: Failed allocating buffer of "SIZE_T_FMT" bytes within 4GB boundry " + "(%d retries).\n", bsize, MAX_ALLOC_RETRY); + /* Free all allocated buffers */ + for (i= 0; i < MAX_ALLOC_RETRY; i++) { + if (used_virt_alloc[i]) { + MOSAL_pci_virt_free_consistent(buf[i], bsize); + } else { + MOSAL_pci_phys_free_consistent(buf[i], bsize); + } + } + return NULL; + } + /* Free disqualified buffers if any */ + for (j= i; j > 0; j--) { + if (used_virt_alloc[j-1]) { + MOSAL_pci_virt_free_consistent(buf[j-1], bsize); + } else { + MOSAL_pci_phys_free_consistent(buf[j-1], bsize); + } + } + + *used_virt_alloc_p= used_virt_alloc[i]; + return buf[i]; /* This is the one buffer which does not cross 4GB boundry */ +} + +/* Compute needed WQE size and set new_srq->log2_wqe_sz */ +static HH_ret_t compute_wqe_sz( + /*IN*/ u_int32_t hca_max_sentries, /* HCA cap. of max.scatter entries for SRQs */ + /*IN/OUT*/ THHUL_srq_t new_srq +) +{ + u_int32_t wqe_sz,wqe_base_sz; + + /* Check requested capabilities */ + if (new_srq->max_sentries > hca_max_sentries) { + MTL_ERROR2(MT_FLFMT( + "%s: Got request for %u scatter entries (HCA cap. for SRQ is %u scatter entries)"), + __func__, new_srq->max_sentries, hca_max_sentries); + return HH_E2BIG_SG_NUM; + } + + /* Compute RQ WQE requirements */ + wqe_base_sz= WQE_SEG_SZ_NEXT + WQE_SEG_SZ_CTRL; + wqe_sz= wqe_base_sz + (new_srq->max_sentries * WQE_SEG_SZ_SG_ENTRY); + if (wqe_sz > MAX_WQE_SZ) { + MTL_ERROR2( + MT_FLFMT("required SRQ capabilities (max_sentries=%d) require a too large WQE (%u bytes)"), + new_srq->max_sentries, wqe_sz); + return HH_E2BIG_SG_NUM; + } + new_srq->log2_max_wqe_sz= ceil_log2(wqe_sz); /* Align to next power of 2 */ + /* A WQE must be aligned to 64B (WQE_ALIGN_SHIFT) so we take at least this size */ + if (new_srq->log2_max_wqe_sz < WQE_ALIGN_SHIFT) + new_srq->log2_max_wqe_sz= WQE_ALIGN_SHIFT; + + wqe_sz= (1 << new_srq->log2_max_wqe_sz); + MTL_DEBUG4(MT_FLFMT("%s: Allocating SRQ WQE of size %d."), __func__, wqe_sz); + + /* Compute real number of s/g entries based on rounded up WQE size */ + new_srq->max_sentries= (wqe_sz - wqe_base_sz) / WQE_SEG_SZ_SG_ENTRY; + /* Make sure we do not exceed reported HCA cap. */ + new_srq->max_sentries= (new_srq->max_sentries > hca_max_sentries) ? + hca_max_sentries : new_srq->max_sentries; + + return HH_OK; +} + + +/* Allocate the WQEs buffer for sendQ and recvQ */ +/* This function should be invoked after queue properties are set by alloc_init_qp */ +static HH_ret_t alloc_wqe_buf( + /*IN*/ MT_bool in_ddr_mem, /* Allocation of WQEs buffer is requested in attached DDR mem. */ + /*IN*/ u_int32_t hca_max_outs, /* HCA cap. */ + /*IN*/ u_int32_t req_max_outs, /* Requested capabilities */ + /*IN*/ u_int8_t log2_max_wqe_sz, + /*OUT*/ THHUL_srq_wqe_buf_t **buf_pp, + /*OUT*/ THH_srq_ul_resources_t *srq_ul_resources_p +) +{ + u_int32_t wqe_sz= 1 << log2_max_wqe_sz; + u_int32_t buf_sz= req_max_outs << log2_max_wqe_sz; + THHUL_srq_wqe_buf_t *new_buf_p; + + /* Check requested capabilities */ + if ((req_max_outs == 0) || (req_max_outs > hca_max_outs)) { + MTL_ERROR3(MT_FLFMT("%s: Got a request for a SRQ with %u WQEs - rejecting !"), __func__, + req_max_outs); + return HH_E2BIG_WR_NUM; + } + + new_buf_p= TMALLOC(THHUL_srq_wqe_buf_t); + if (new_buf_p == NULL) { + MTL_ERROR2(MT_FLFMT("%s: Failed allocating new WQEs buffer context"), __func__); + return HH_EAGAIN; + } + + if (in_ddr_mem) { /* Allocate WQEs buffer by THH_srqm in the attached DDR memory */ + new_buf_p->wqe_buf_orig= NULL; + srq_ul_resources_p->wqes_buf= 0; /* Allocate in attached DDR memory */ + } else { /* Allocate WQEs buffer in main memory */ + /* Assure the buffer covers whole pages (no sharing of locked memory with other date) */ + new_buf_p->wqe_buf_orig_size = + (MOSAL_SYS_PAGE_SIZE-1)/* For alignment */+MT_UP_ALIGNX_U32(buf_sz, MOSAL_SYS_PAGE_SHIFT); + /* Prevent other data reside in the last page of the buffer... */ + /* cover last page (last WQE can be at last page begin and its size is 64B min.)*/ + + new_buf_p->wqe_buf_orig= srqm_malloc_within_4GB(new_buf_p->wqe_buf_orig_size,&new_buf_p->used_virt_alloc); + if (new_buf_p->wqe_buf_orig == NULL) { + MTL_ERROR2(MT_FLFMT("%s: Failed allocation of WQEs buffer of "SIZE_T_FMT" bytes within " + "4GB boundries."), __func__, new_buf_p->wqe_buf_orig_size); + FREE(new_buf_p); + return HH_EAGAIN; + } + + /* Page alignment assures that the WQE buffer is aligned to WQE size. + * In addition, after resizing the SRQ the new wqe_buf has the same alignment + * in the page (to retain original virtual addresses) + */ + new_buf_p->wqe_buf= MT_UP_ALIGNX_VIRT((MT_virt_addr_t)(new_buf_p->wqe_buf_orig), + MOSAL_SYS_PAGE_SHIFT); + + srq_ul_resources_p->wqes_buf= new_buf_p->wqe_buf; + } + new_buf_p->max_outs= req_max_outs; /* Allocated exactly as requested */ + + srq_ul_resources_p->wqes_buf_sz= buf_sz; + srq_ul_resources_p->wqe_sz= wqe_sz; + + *buf_pp= new_buf_p; + return HH_OK; +} + +static void free_wqe_buf(THHUL_srq_wqe_buf_t *buf_p) +{ + if (buf_p->wqe_buf_orig != NULL) {/* WQEs buffer were allocated in process mem. */ + if (buf_p->used_virt_alloc) + MOSAL_pci_virt_free_consistent(buf_p->wqe_buf_orig, buf_p->wqe_buf_orig_size); + else + MOSAL_pci_phys_free_consistent(buf_p->wqe_buf_orig, buf_p->wqe_buf_orig_size); + } + FREE(buf_p); +} + +/* Append WQEs buffer extention to WQEs free list */ +/* Must be invoked with SRQ lock held */ +static void append_to_free_list( + /*IN*/ THHUL_srq_t srq, + /*IN*/ MT_virt_addr_t wqes_buf_extention, /* The part of the new WQEs buf above old part */ + /*IN*/ MT_size_t extention_sz_in_wqes /* Extention size in WQEs */ +) +{ + u_int32_t i; + MT_virt_addr_t cur_wqe; + const u_int32_t wqe_sz= 1<log2_max_wqe_sz; + + MTL_TRACE2( + MT_FLFMT("%s(srq=%p SRQn=0x%X , wqe_buf_extention="VIRT_ADDR_FMT + ", extention_sz_in_wqes="SIZE_T_DFMT), + __func__, srq, srq->srqn, wqes_buf_extention, extention_sz_in_wqes); + /* Create free WQEs list of wqe_buf */ + for (i= 0 , cur_wqe= wqes_buf_extention; i < extention_sz_in_wqes; i++) { + *((MT_virt_addr_t*)cur_wqe)= srq->free_wqes_list; /* Link WQE to first in free list */ + srq->free_wqes_list= cur_wqe - srq->real_virt_offset; /* Put as first in free list */ + /* Note that real_virt_offset reduction puts in the free_list the next virt in HCA's space */ + MTL_DEBUG4(MT_FLFMT("%s: Added srq->free_wqes_list a WQE at "VIRT_ADDR_FMT),__func__, + srq->free_wqes_list); + cur_wqe+= wqe_sz; + } +} + +/* Allocate the auxilary WQEs data + * (a software context of a WQE which does not have to be in the registered WQEs buffer) */ +static HH_ret_t alloc_aux_data_buf( + /*IN/OUT*/ THHUL_srq_t new_srq +) +{ + /* RQ auxilary buffer: WQE ID per WQE */ + new_srq->wqe_id= (VAPI_wr_id_t*) + THH_SMART_MALLOC(new_srq->cur_buf_p->max_outs * sizeof(VAPI_wr_id_t)); + if (new_srq->wqe_id == NULL) { + MTL_ERROR1(MT_FLFMT("%s: Failed allocating SRQ auxilary buffer (for 0x%X WQEs IDs)."), __func__, + new_srq->cur_buf_p->max_outs); + return HH_EAGAIN; + } + + return HH_OK; +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.h new file mode 100644 index 00000000..7a6f6e94 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/thhul_srqm/thhul_srqm.h @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THHUL_SRQM_H +#define H_THHUL_SRQM_H + +#include + + +HH_ret_t THHUL_srqm_create( + THHUL_hob_t hob, + THHUL_srqm_t *srqm_p +); + + +HH_ret_t THHUL_srqm_destroy( + THHUL_srqm_t srqm +); + +HH_ret_t THHUL_srqm_create_srq_prep( + /*IN*/ + HHUL_hca_hndl_t hca, + HHUL_pd_hndl_t pd, + u_int32_t max_outs, + u_int32_t max_sentries, + /*OUT*/ + HHUL_srq_hndl_t *srq_hndl_p, + u_int32_t *actual_max_outs_p, + u_int32_t *actual_max_sentries_p, + void /*THH_srq_ul_resources_t*/ *srq_ul_resources_p +); + +HH_ret_t THHUL_srqm_create_srq_done( + HHUL_hca_hndl_t hca, + HHUL_srq_hndl_t hhul_srq, + HH_srq_hndl_t hh_srq, + void/*THH_srq_ul_resources_t*/ *srq_ul_resources_p +); + +HH_ret_t THHUL_srqm_modify_srq_prep( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/HHUL_srq_hndl_t hhul_srq, + /*IN*/VAPI_srq_attr_t *srq_attr_p, + /*IN*/VAPI_srq_attr_mask_t srq_attr_mask, + /*OUT*/void/*THH_srq_ul_resources_t*/ *srq_ul_resources_p +); + +HH_ret_t THHUL_srqm_modify_srq_done( + /*IN*/HHUL_hca_hndl_t hca, + /*IN*/HHUL_srq_hndl_t hhul_srq, + /*IN*/void/*THH_srq_ul_resources_t*/ *srq_ul_resources_p, + /*OUT*/u_int32_t *max_outs_wr_p /* Max. outstanding WQEs */ +); + +HH_ret_t THHUL_srqm_destroy_srq_done( + HHUL_hca_hndl_t hca, + HHUL_qp_hndl_t hhul_srq +); + +HH_ret_t THHUL_srqm_post_recv_reqs( + /*IN*/ HHUL_hca_hndl_t hca, + /*IN*/ HHUL_srq_hndl_t hhul_srq, + /*IN*/ u_int32_t num_of_requests, + /*IN*/ VAPI_rr_desc_t *recv_req_array, + /*OUT*/ u_int32_t *posted_requests_p + ); + +/* Release this WQE only and return its WQE ID */ +HH_ret_t THHUL_srqm_comp( + THHUL_srqm_t srqm, + HHUL_srq_hndl_t hhul_srq, + u_int32_t wqe_addr_32lsb, + VAPI_wr_id_t *wqe_id_p +); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uar/uar.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uar/uar.c new file mode 100644 index 00000000..8c96a882 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uar/uar.c @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_UAR_C +#include "uar.h" +#include + +#if 0 // move to uar.h +struct THH_uar_st { + THH_ver_info_t ver_info; + THH_uar_index_t uar_index; + volatile u_int32_t *uar_base; + MOSAL_spinlock_t uar_lock; +} /* *THH_uar_t */; + +/* Doorbells dword offsets */ +#define UAR_RD_SEND_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,rd_send_doorbell)>>2) +#define UAR_SEND_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,send_doorbell)>>2) +#define UAR_RECV_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,receive_doorbell)>>2) +#define UAR_CQ_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,cq_command_doorbell)>>2) +#define UAR_EQ_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,eq_command_doorbell)>>2) +/* Doorbells dword offsets */ +#define UAR_RD_SEND_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,rd_send_doorbell)>>2) +#define UAR_SEND_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,send_doorbell)>>2) +#define UAR_RECV_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,receive_doorbell)>>2) +#define UAR_CQ_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,cq_command_doorbell)>>2) +#define UAR_EQ_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,eq_command_doorbell)>>2) +#endif + +/* doorbell ringing for 2 dwords */ +#ifdef __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ +/* If Qword write is assured to be atomic (MMX or a 64-bit arch) - no need for the spinlock */ +#define RING_DBELL_2DW(uar,dword_offset,dbell_draft) \ + dbell_draft[0]= MOSAL_cpu_to_be32(dbell_draft[0]); \ + dbell_draft[1]= MOSAL_cpu_to_be32(dbell_draft[1]); \ + MOSAL_MMAP_IO_WRITE_QWORD(uar->uar_base+dword_offset,*(volatile u_int64_t*)dbell_draft); +#else +#define RING_DBELL_2DW(uar,dword_offset,dbell_draft) \ + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); \ + MOSAL_MMAP_IO_WRITE_DWORD(uar->uar_base+dword_offset,MOSAL_cpu_to_be32(dbell_draft[0])); \ + MOSAL_MMAP_IO_WRITE_DWORD(uar->uar_base+dword_offset+1,MOSAL_cpu_to_be32(dbell_draft[1])); \ + MOSAL_spinlock_unlock(&(uar->uar_lock)); +#endif /* Atomic Qword write */ + +#define RING_DBELL_4DW(uar,dword_offset,dbell_draft) \ + dbell_draft[0]= MOSAL_cpu_to_be32(dbell_draft[0]); \ + dbell_draft[1]= MOSAL_cpu_to_be32(dbell_draft[1]); \ + dbell_draft[2]= MOSAL_cpu_to_be32(dbell_draft[2]); \ + dbell_draft[3]= MOSAL_cpu_to_be32(dbell_draft[3]); \ + MOSAL_spinlock_dpc_lock(&(uar->uar_lock)); \ + MOSAL_MMAP_IO_WRITE_QWORD(uar->uar_base+dword_offset,((u_int64_t*)dbell_draft)[0]); \ + MOSAL_MMAP_IO_WRITE_QWORD(uar->uar_base+dword_offset+2,((u_int64_t*)dbell_draft)[1]); \ + MOSAL_spinlock_unlock(&(uar->uar_lock)); + + + +/************************************************************************/ +/* Public functions */ +/************************************************************************/ + +HH_ret_t THH_uar_create( + /*IN*/ THH_ver_info_t *version_p, + /*IN*/ THH_uar_index_t uar_index, + /*IN*/ void *uar_base, + /*OUT*/ THH_uar_t *uar_p +) +{ + THH_uar_t new_uar; + + new_uar= (THH_uar_t)MALLOC(sizeof(struct THH_uar_st)); + if (new_uar == NULL) { + MTL_ERROR1("THH_uar_create: Failed allocating object memory.\n"); + return HH_EAGAIN; + } + new_uar->uar_index= uar_index; + new_uar->uar_base= (volatile u_int32_t*)uar_base; + memcpy(&(new_uar->ver_info),version_p,sizeof(THH_ver_info_t)); + MOSAL_spinlock_init(&(new_uar->uar_lock)); + + *uar_p= new_uar; + return HH_OK; +} /* THH_uar_create */ + + +HH_ret_t THH_uar_destroy(THH_uar_t uar /* IN */) +{ + FREE(uar); + return HH_OK; +} /* THH_uar_destroy */ + + +HH_ret_t THH_uar_get_index( + /*IN*/ THH_uar_t uar, + /*OUT*/ THH_uar_index_t *uar_index_p +) +{ + if (uar == NULL) return HH_EINVAL; + *uar_index_p= uar->uar_index; + return HH_OK; +} + + +HH_ret_t THH_uar_sendq_dbell( + THH_uar_t uar, /* IN */ + THH_uar_sendq_dbell_t* sendq_dbell_p /* IN */ ) +{ + volatile u_int32_t dbell_draft[UAR_SEND_DBELL_SZ]= {0}; + +// MTL_DEBUG4(MT_FLFMT("SendQ Dbell: qpn=0x%X nda=0x%X nds=0x%X nopcode=0x%X"), +// sendq_dbell_p->qpn, sendq_dbell_p->next_addr_32lsb, +// sendq_dbell_p->next_size, sendq_dbell_p->nopcode); + + dbell_draft[MT_BYTE_OFFSET(tavorprm_send_doorbell_st,nda)>>2]= + sendq_dbell_p->next_addr_32lsb; /* 6 ls-bits will be masked anyway by f and nopcode */ + MT_INSERT_ARRAY32(dbell_draft, sendq_dbell_p->fence ? 1 : 0, + MT_BIT_OFFSET(tavorprm_send_doorbell_st,f), + MT_BIT_SIZE(tavorprm_send_doorbell_st,f)); + MT_INSERT_ARRAY32(dbell_draft, sendq_dbell_p->nopcode, + MT_BIT_OFFSET(tavorprm_send_doorbell_st,nopcode), + MT_BIT_SIZE(tavorprm_send_doorbell_st,nopcode)); + MT_INSERT_ARRAY32(dbell_draft, sendq_dbell_p->qpn, + MT_BIT_OFFSET(tavorprm_send_doorbell_st,qpn), + MT_BIT_SIZE(tavorprm_send_doorbell_st,qpn)); + MT_INSERT_ARRAY32(dbell_draft, sendq_dbell_p->next_size, + MT_BIT_OFFSET(tavorprm_send_doorbell_st,nds), + MT_BIT_SIZE(tavorprm_send_doorbell_st,nds)); + + RING_DBELL_2DW(uar,UAR_SEND_DBELL_OFFSET,dbell_draft); + + return HH_OK; +} /* THH_uar_sendq_dbell */ + + +HH_ret_t THH_uar_sendq_rd_dbell( + THH_uar_t uar, /* IN */ + THH_uar_sendq_dbell_t* sendq_dbell_p, /* IN */ + IB_eecn_t een /* IN */) +{ + volatile u_int32_t dbell_draft[UAR_RD_SEND_DBELL_SZ]= {0}; + + MT_INSERT_ARRAY32(dbell_draft, een, + MT_BIT_OFFSET(tavorprm_rd_send_doorbell_st,een), + MT_BIT_SIZE(tavorprm_rd_send_doorbell_st,qpn)); + MT_INSERT_ARRAY32(dbell_draft, sendq_dbell_p->qpn, + MT_BIT_OFFSET(tavorprm_rd_send_doorbell_st,qpn), + MT_BIT_SIZE(tavorprm_rd_send_doorbell_st,qpn)); + dbell_draft[MT_BYTE_OFFSET(tavorprm_rd_send_doorbell_st,snd_params.nda)>>2]= + sendq_dbell_p->next_addr_32lsb; /* 6 ls-bits will be masked anyway by f and nopcode */ + MT_INSERT_ARRAY32(dbell_draft, sendq_dbell_p->fence ? 1 : 0, + MT_BIT_OFFSET(tavorprm_rd_send_doorbell_st,snd_params.f), + MT_BIT_SIZE(tavorprm_rd_send_doorbell_st,snd_params.f)); + MT_INSERT_ARRAY32(dbell_draft, sendq_dbell_p->nopcode, + MT_BIT_OFFSET(tavorprm_rd_send_doorbell_st,snd_params.nopcode), + MT_BIT_SIZE(tavorprm_rd_send_doorbell_st,snd_params.nopcode)); + MT_INSERT_ARRAY32(dbell_draft, sendq_dbell_p->qpn, + MT_BIT_OFFSET(tavorprm_rd_send_doorbell_st,snd_params.qpn), + MT_BIT_SIZE(tavorprm_rd_send_doorbell_st,snd_params.qpn)); + MT_INSERT_ARRAY32(dbell_draft, sendq_dbell_p->next_size, + MT_BIT_OFFSET(tavorprm_rd_send_doorbell_st,snd_params.nds), + MT_BIT_SIZE(tavorprm_rd_send_doorbell_st,snd_params.nds)); + + RING_DBELL_4DW(uar,UAR_RD_SEND_DBELL_OFFSET,dbell_draft); + + return HH_OK; +} /* THH_uar_sendq_rd_dbell */ + + +HH_ret_t THH_uar_recvq_dbell( + THH_uar_t uar, /* IN */ + THH_uar_recvq_dbell_t* recvq_dbell_p /* IN */) +{ + volatile u_int32_t dbell_draft[UAR_RECV_DBELL_SZ]= {0}; + + /* nda field in the Doorbell is actually nda[31:6] - so we must shift it right before inse*/ + dbell_draft[MT_BYTE_OFFSET(tavorprm_receive_doorbell_st,nda) >> 2]= + ( recvq_dbell_p->next_addr_32lsb & (~MASK32(6)) ) | + ( recvq_dbell_p->next_size & MASK32(6) ); + dbell_draft[MT_BYTE_OFFSET(tavorprm_receive_doorbell_st,qpn) >> 2]= + ( recvq_dbell_p->qpn << (MT_BIT_OFFSET(tavorprm_receive_doorbell_st,qpn) & MASK32(5)) ) | + recvq_dbell_p->credits; + + RING_DBELL_2DW(uar,UAR_RECV_DBELL_OFFSET,dbell_draft); + + return HH_OK; +} /* THH_uar_recvq_dbell */ + + +HH_ret_t THH_uar_cq_cmd( + THH_uar_t uar, /* IN */ + THH_uar_cq_cmd_t cmd, /* IN */ + HH_cq_hndl_t cqn, /* IN */ + u_int32_t param /* IN */) +{ + volatile u_int32_t dbell_draft[UAR_CQ_DBELL_SZ]= {0}; + + MT_INSERT_ARRAY32(dbell_draft, cmd, + MT_BIT_OFFSET(tavorprm_cq_cmd_doorbell_st,cq_cmd), + MT_BIT_SIZE(tavorprm_cq_cmd_doorbell_st,cq_cmd)); + MT_INSERT_ARRAY32(dbell_draft, cqn, + MT_BIT_OFFSET(tavorprm_cq_cmd_doorbell_st,cqn), + MT_BIT_SIZE(tavorprm_cq_cmd_doorbell_st,cqn)); + MT_INSERT_ARRAY32(dbell_draft, param, + MT_BIT_OFFSET(tavorprm_cq_cmd_doorbell_st,cq_param), + MT_BIT_SIZE(tavorprm_cq_cmd_doorbell_st,cq_param)); + + RING_DBELL_2DW(uar,UAR_CQ_DBELL_OFFSET,dbell_draft); + + return HH_OK; +} /* THH_uar_cq_cmd */ + + +HH_ret_t THH_uar_eq_cmd( + THH_uar_t uar, /* IN */ + THH_uar_eq_cmd_t cmd, /* IN */ + THH_eqn_t eqn, /* IN */ + u_int32_t param /* IN */) +{ + volatile u_int32_t dbell_draft[UAR_EQ_DBELL_SZ]= {0}; + + MT_INSERT_ARRAY32(dbell_draft, cmd, + MT_BIT_OFFSET(tavorprm_eq_cmd_doorbell_st,eq_cmd), + MT_BIT_SIZE(tavorprm_eq_cmd_doorbell_st,eq_cmd)); + MT_INSERT_ARRAY32(dbell_draft, eqn, + MT_BIT_OFFSET(tavorprm_eq_cmd_doorbell_st,eqn), + MT_BIT_SIZE(tavorprm_eq_cmd_doorbell_st,eqn)); + MT_INSERT_ARRAY32(dbell_draft, param, + MT_BIT_OFFSET(tavorprm_eq_cmd_doorbell_st,eq_param), + MT_BIT_SIZE(tavorprm_eq_cmd_doorbell_st,eq_param)); +#if 2 <= MAX_DEBUG + { u_int32_t i; + for (i=0;i +#include +#include +#include +#include +#include +#include + + + +typedef struct { + + IB_wqpn_t qpn; /* QP number */ + tavor_if_nopcode_t nopcode; /* Next Send descriptor opcode (encoded) */ + MT_bool fence; /* Fence bit set */ + u_int32_t next_addr_32lsb; /* Address of next WQE (the one linked) */ + u_int32_t next_size ; /* Size of next WQE (16-byte chunks) */ + +} THH_uar_sendq_dbell_t; + +typedef struct { + + IB_wqpn_t qpn; /* QP number */ + u_int32_t next_addr_32lsb; /* Address of next WQE (the one linked) */ + u_int32_t next_size ; /* Size of next WQE (16-byte chunks) */ + u_int8_t credits; /* Number of WQEs attached with this doorbell (255 max.) */ + +} THH_uar_recvq_dbell_t; + +typedef tavor_if_uar_cq_cmd_t THH_uar_cq_cmd_t; +typedef tavor_if_uar_eq_cmd_t THH_uar_eq_cmd_t; + +struct THH_uar_st { + THH_ver_info_t ver_info; + THH_uar_index_t uar_index; + volatile u_int32_t *uar_base; + MOSAL_spinlock_t uar_lock; +} /* *THH_uar_t */; + +/* Doorbells dword offsets */ +#define UAR_RD_SEND_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,rd_send_doorbell)>>2) +#define UAR_SEND_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,send_doorbell)>>2) +#define UAR_RECV_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,receive_doorbell)>>2) +#define UAR_CQ_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,cq_command_doorbell)>>2) +#define UAR_EQ_DBELL_OFFSET (MT_BYTE_OFFSET(tavorprm_uar_st,eq_command_doorbell)>>2) +/* Doorbells dword offsets */ +#define UAR_RD_SEND_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,rd_send_doorbell)>>2) +#define UAR_SEND_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,send_doorbell)>>2) +#define UAR_RECV_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,receive_doorbell)>>2) +#define UAR_CQ_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,cq_command_doorbell)>>2) +#define UAR_EQ_DBELL_SZ (MT_BYTE_SIZE(tavorprm_uar_st,eq_command_doorbell)>>2) + +/************************************************************************ + * Function: THH_uar_create + * + * Arguments: + * version_p + * uar_index + * uar_base - Virtual address mapped to associated UAR + * uar_p - Created THH_uar object handle + * + * Returns: + * HH_OK + * HH_EAGAIN - Not enough resources to create object + * HH_EINVAL - Invalid parameters (NULL ptrs.etc.) + * + * Description: Create the THH_uar object. + */ +HH_ret_t THH_uar_create( + /*IN*/ THH_ver_info_t *version_p, + /*IN*/ THH_uar_index_t uar_index, + /*IN*/ void *uar_base, + /*OUT*/ THH_uar_t *uar_p + ); + + +/************************************************************************ + * + * Function: THH_uar_destroy + * + * Arguments: + * uar - Object handle + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid handle + * + * Description: Free UAR object context. + */ +HH_ret_t THH_uar_destroy(/*IN*/ THH_uar_t uar); + + +/************************************************************************ + * + * Function: THH_uar_get_index + * + * Arguments: + * uar - Object handle + * uar_index_p - Returned UAR page index of UAR associated with this object + * + * Returns: + * HH_OK + * HH_EINVAL + * + * Description: Get associated UAR page index. + */ +HH_ret_t THH_uar_get_index( + /*IN*/ THH_uar_t uar, + /*OUT*/ THH_uar_index_t *uar_index_p + ); + + +/************************************************************************ + * Function: THH_uar_sendq_dbell + * + * Arguments: + * uar - The THH_uar object handle + * sendq_dbell_p - Send queue doorbel data + * + * Returns: + * HH_OK + * HH_EINVAL -Invalid handles or NULL pointer + * + * Description: + * Ring the send section of the UAR. + */ +HH_ret_t THH_uar_sendq_dbell( + /*IN*/ THH_uar_t uar, + /*IN*/ THH_uar_sendq_dbell_t* sendq_dbell_p + ); + +#define SEND_DBELL_NDA_BYTE_OFFSET (MT_BYTE_OFFSET(tavorprm_send_doorbell_st,nda)>>2) +#define SEND_DBELL_QPN_BYTE_OFFSET (MT_BYTE_OFFSET(tavorprm_send_doorbell_st,qpn)>>2) +#define SEND_DBELL_F_BIT_OFFSET (MT_BIT_OFFSET(tavorprm_send_doorbell_st,f) & 0x1f) +#define SEND_DBELL_QPN_BIT_OFFSET (MT_BIT_OFFSET(tavorprm_send_doorbell_st,qpn) & 0x1f) +#define SEND_DBELL_NEXT_ADDR_BIT_MASK (~MASK32(6)) +#define SEND_DBELL_NOPCODE_BIT_MASK (MASK32(MT_BIT_SIZE(tavorprm_send_doorbell_st,nopcode))) +#define SEND_DBELL_NDA_BIT_MASK (MASK32(MT_BIT_SIZE(tavorprm_send_doorbell_st,nds))) + +/************************************************************************ + * Function: THH_uar_sendq_rd_dbell + * + * Arguments: + * uar - The THH_uar object handle + * sendq_dbell_p - Send queue doorbell data + * een - The EE context number for posted request + * + * Returns: + * HH_OK HH_EINVAL -Invalid handles or NULL pointer + * Description: + * Ring the rdd-send section of the UAR. + */ +HH_ret_t THH_uar_sendq_rd_dbell( + /*IN*/ THH_uar_t uar, + /*IN*/ THH_uar_sendq_dbell_t* sendq_dbell_p, + /*IN*/ IB_eecn_t een + ); + +#define SEND_RD_DBELL_EEN_BYTE_OFFSET (MT_BYTE_OFFSET(tavorprm_rd_send_doorbell_st,een) >> 2) +#define SEND_RD_DBELL_QPN_BYTE_OFFSET (MT_BYTE_OFFSET(tavorprm_rd_send_doorbell_st,qpn) >> 2) +#define SEND_RD_DBELL_NDA_PARAM_BYTE_OFFSET (MT_BYTE_OFFSET(tavorprm_rd_send_doorbell_st,snd_params.nda)>>2) +#define SEND_RD_DBELL_QPN_PARAM_BYTE_OFFSET (MT_BYTE_OFFSET(tavorprm_rd_send_doorbell_st,snd_params.qpn)>>2) +#define SEND_RD_DBELL_EEN_BIT_OFFSET (MT_BIT_OFFSET(tavorprm_rd_send_doorbell_st,een) & 0x1f) +#define SEND_RD_DBELL_QPN_BIT_OFFSET (MT_BIT_OFFSET(tavorprm_rd_send_doorbell_st,qpn) & 0x1f) + +#define THH_UAR_SENDQ_RD_DBELL(uar,dbell_draft,next_addr,next_size,fence,nopcode,qpn,een) \ + dbell_draft[SEND_RD_DBELL_EEN_BYTE_OFFSET] = MOSAL_cpu_to_be32( \ + een <> 2) +#define RECV_DBELL_QPN_BYTE_OFFSET (MT_BYTE_OFFSET(tavorprm_receive_doorbell_st,qpn) >> 2) +#define RECV_DBELL_QPN_BIT_SHIFT (MT_BIT_OFFSET(tavorprm_receive_doorbell_st,qpn) & MASK32(5)) +#define RECV_DBELL_NEXT_ADDR_BIT_MASK (~MASK32(6)) +#define RECV_DBELL_NEXT_SIZE_BIT_MASK (MASK32(6)) + +/************************************************************************ + * Function: THH_uar_cq_cmd + * + * Arguments: + * uar - The THH_uar object handle + * cmd - The CQ command code + * cqn - The CQC index of the CQ to perform command on + * param - The 32 bit parameter (local CPU endianess) + * + * Returns: + * HH_OK + * HH_EINVAL -Invalid handles + * + * Description: + * Invoke a CQ context update through the CQ_cmd section of the UAR. + */ +HH_ret_t THH_uar_cq_cmd( + THH_uar_t uar, /* IN */ + THH_uar_cq_cmd_t cmd, /* IN */ + HH_cq_hndl_t cqn, /* IN */ + u_int32_t param /* IN */); + +#define CQ_CMD_DBELL_BYTE_OFFSET (MT_BYTE_OFFSET(tavorprm_cq_cmd_doorbell_st,cq_cmd) >> 2) +#define CQ_PARAM_DBELL_BYTE_OFFSET (MT_BYTE_OFFSET(tavorprm_cq_cmd_doorbell_st,cq_param) >> 2) +#define CQ_CMD_DBELL_BIT_OFFSET (MT_BIT_OFFSET(tavorprm_cq_cmd_doorbell_st,cq_cmd)) + +/************************************************************************ + * Function: THH_uar_eq_cmd + * + * Arguments: + * uar - The THH_uar object handle + * cmd - The EQ command code + * eqn - The EQC index of the CQ to perform command on + * param - The 32 bit parameter + * + * Returns: + * HH_OK + * HH_EINVAL -Invalid handles + * + * Description: + * Invoke a EQ context update through the EQ_cmd section of the UAR. + */ +HH_ret_t THH_uar_eq_cmd( + THH_uar_t uar, /* IN */ + THH_uar_eq_cmd_t cmd, /* IN */ + THH_eqn_t eqn, /* IN */ + u_int32_t param /* IN */); + + +/************************************************************************ + * Function: THH_uar_blast + * + * Arguments: + * uar - The THH_uar object handle + * wqe_p - A pointer to the WQE structure to push to the "flame" + * wqe_sz - WQE size + * sendq_dbell_p ­ Send queue doorbell data + * een ­ The EE context number for posted request (valid for RD-send) + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid handles or NULL pointer + * + * Description: + * + * This function pushes the given WQE descriptor to the InfiniBlast(tm) + * buffer through the infini_blast section of the UAR, and then rings the + * "send" doorbell (in order to assure atomicity between the writing of the + * InfiniBlast buffer and the ringing of the "send" doorbell. + * If given een is valid (0-0xFFFFFF) the "rd-send" doorbell is used. + */ +HH_ret_t THH_uar_blast( + THH_uar_t uar, /* IN */ + void* wqe_p, /* IN */ + MT_size_t wqe_sz, /* IN */ + THH_uar_sendq_dbell_t *sendq_dbell_p, /* IN */ + IB_eecn_t een /* IN */ +); + +#endif /* H_UAR_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.c new file mode 100644 index 00000000..f7d37db9 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.c @@ -0,0 +1,723 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include + +#define UD_AV_ENTRY_SIZE (sizeof(struct tavorprm_ud_address_vector_st) / 8) +#define UD_AV_ENTRY_DWORD_SIZE (sizeof(struct tavorprm_ud_address_vector_st) / 32) +#define UD_AV_ENTRY_SIZE_LOG2 5 +#define UD_AV_ALIGNMENT_MASK (UD_AV_ENTRY_SIZE - 1) +/*================ macro definitions ===============================================*/ +/* check that ud_av_p is in the av table range and that it is aligned to entry size */ +#define IS_INVALID_AV_MEMBER(udavm, ud_av_p) \ + (((ud_av_p) < udavm->ud_av_table) || \ + (((MT_virt_addr_t)ud_av_p) >= ((MT_virt_addr_t)(udavm->ud_av_table))+(udavm->ud_av_table_sz)) ) +#define IS_INVALID_AV_ALIGN(ud_av_p) \ + (((MT_virt_addr_t)(ud_av_p)) & UD_AV_ALIGNMENT_MASK) + +#define IS_INVALID_AV(udavm, ud_av_host_p) \ + (((ud_av_host_p) < udavm->ud_av_table_host) || \ + (((MT_ulong_ptr_t)ud_av_host_p) >= ((MT_ulong_ptr_t)(udavm->ud_av_table_host))+(udavm->ud_av_table_sz)) || \ + (((((MT_ulong_ptr_t)udavm->ud_av_table_host) - ((MT_ulong_ptr_t)(ud_av_host_p))) & UD_AV_ALIGNMENT_MASK)) ) + +#define RGID_OFFSET (MT_BIT_OFFSET(tavorprm_ud_address_vector_st, rgid_127_96) >> 5) /* in DWORDS */ +#define LOG2_PORT_GID_TABLE_SIZE 5 /* 32 GID entries per port */ +//#define HANDLE_2_INDEX(udavm, ah) (((MT_ulong_ptr_t)ah - ((MT_ulong_ptr_t)(udavm)->ud_av_table)) >> UD_AV_ENTRY_SIZE_LOG2) +#define HANDLE_2_INDEX(udavm, ah) (((MT_ulong_ptr_t)ah - ((MT_ulong_ptr_t)(udavm)->ud_av_table_host)) >> UD_AV_ENTRY_SIZE_LOG2) + +/*================ type definitions ================================================*/ + + +/* The main UDAV-manager structure */ +struct THH_udavm_st { + u_int32_t* ud_av_table_ddr; /* the actual table of UDAV*/ + u_int32_t* ud_av_table_host; // == ud_av_table_ddr if av_in_host_mem + u_int32_t max_av; /* number of entries in table */ + u_int32_t used_avs_counter; + MT_size_t ud_av_table_sz; /* total table size in bytes */ + VAPI_lkey_t table_memkey; + VIP_array_p_t udavs_array; /* the array which holds the free list of AV entries */ + MOSAL_spinlock_t table_spinlock; /* protect on the table from destroying UDAV and modifying at the same time */ +}THH_udavm_int_t; + + +/*================ global variables definitions ====================================*/ + + + +/*================ static functions prototypes =====================================*/ + +static void fill_udav_entry(/*IN */ HH_pd_hndl_t pd, + /*IN */ MT_bool is_new_pd, + /*IN */ VAPI_ud_av_t *av_p, + /*IN */ u_int32_t *av_entry_p); + + + +#ifdef MAX_DEBUG +static void print_udav(VAPI_ud_av_t *av_p); +#endif + +/*================ global functions definitions ====================================*/ + + +HH_ret_t THH_udavm_create( /*IN */ THH_ver_info_t *version_p, + /*IN */ VAPI_lkey_t ud_av_table_memkey, + /*IN */ MT_virt_addr_t ud_av_table, + /*IN */ MT_size_t ud_av_table_sz, + /*IN */ MT_bool av_in_host_mem, + /*OUT*/ THH_udavm_t *udavm_p, + /*OUT*/ char **av_ddr_base, + /*OUT*/ char **av_host_base) + +{ + THH_udavm_t new_udavm_p = NULL; + u_int32_t* ud_av_table_host; + HH_ret_t ret; + + FUNC_IN; + + /* allocation of object structure */ + new_udavm_p = (THH_udavm_t)MALLOC(sizeof(THH_udavm_int_t)); + if (!new_udavm_p) { + MTL_ERROR4("%s: Cannot allocate UDAVM object.\n", __func__); + MT_RETURN( HH_EAGAIN); + } + + memset(new_udavm_p,0,sizeof(THH_udavm_int_t)); + + if (av_in_host_mem) { + ud_av_table_host = (u_int32_t*)ud_av_table; + } else { + ud_av_table_host = (u_int32_t*)VMALLOC(ud_av_table_sz); + if (ud_av_table_host == NULL) { + FREE(new_udavm_p); + MTL_ERROR4("%s: Cannot allocate UDAV host table.\n", __func__); + MT_RETURN( HH_EAGAIN); + } + memset(ud_av_table_host,0,ud_av_table_sz); + } + new_udavm_p->ud_av_table_host = ud_av_table_host; + + /* filling the of UDAV struct */ + if (MOSAL_spinlock_init(&(new_udavm_p->table_spinlock)) != MT_OK){ + MTL_ERROR4("%s: Failed to initializing spinlocks.\n", __func__); + ret= HH_ERR; + goto err_free_mem; + } + new_udavm_p->ud_av_table_ddr = (u_int32_t*)ud_av_table; + memset(new_udavm_p->ud_av_table_ddr,0,ud_av_table_sz); + new_udavm_p->max_av = (u_int32_t)(ud_av_table_sz/UD_AV_ENTRY_SIZE); + new_udavm_p->ud_av_table_sz = ud_av_table_sz; + new_udavm_p->used_avs_counter = 0; + new_udavm_p->table_memkey = ud_av_table_memkey; + /* init the free list */ + ret = VIP_array_create_maxsize(new_udavm_p->max_av,new_udavm_p->max_av,&(new_udavm_p->udavs_array)); + if ( ret != VIP_OK ) { + MTL_ERROR1("%s: VIP_array_create_maxsize failed, ret=%d \n", __func__, ret); + goto err_free_mem; + } + + /* succeeded to create object - return params: */ + + *udavm_p = new_udavm_p; + *av_ddr_base = (char *)new_udavm_p->ud_av_table_ddr; + *av_host_base = (char *)new_udavm_p->ud_av_table_host; + + MT_RETURN(HH_OK); + + /* error handling cleanup */ +err_free_mem: + VFREE(new_udavm_p); + VFREE(ud_av_table_host); + + MT_RETURN(ret); + +} + + +/************************************************************************************/ + +HH_ret_t THH_udavm_destroy( /*IN */ THH_udavm_t udavm ) +{ + + FUNC_IN; + if (udavm == NULL) { + MTL_ERROR4("%s: udavm is NULL.\n", __func__); + MT_RETURN(HH_EINVAL); + } + + /* destroy the handlers array */ + if (VIP_array_destroy(udavm->udavs_array, NULL) != VIP_OK) { + MTL_ERROR1("%s: VIP_array_destroy failed \n", __func__); + } + + if (udavm->ud_av_table_host != NULL + && udavm->ud_av_table_host != udavm->ud_av_table_ddr) { + VFREE(udavm->ud_av_table_host); + udavm->ud_av_table_host = NULL; + } + + FREE(udavm); + MT_RETURN(HH_OK); +} + + + +/************************************************************************************/ + +HH_ret_t THH_udavm_get_memkey( /*IN */ THH_udavm_t udavm, + /*IN */ VAPI_lkey_t *table_memkey_p ) +{ + FUNC_IN; + if ((udavm != NULL) && (table_memkey_p !=NULL)) { + *table_memkey_p = udavm->table_memkey; + MT_RETURN(HH_OK); + } + + MT_RETURN(HH_EINVAL); + +} + +/************************************************************************************/ +HH_ret_t THH_udavm_create_av( /*IN */ THH_udavm_t udavm, + /*IN */ HH_pd_hndl_t pd, + /*IN */ VAPI_ud_av_t *av_p, + /*OUT*/ HH_ud_av_hndl_t *ah_p) +{ + u_int32_t* av_entry_host_p; + u_int32_t ah_index=0; + VIP_common_ret_t ret; + + FUNC_IN; + + if (udavm == NULL) { + MTL_ERROR4("THH_udavm_create_av: udavm is NULL.\n"); + MT_RETURN(HH_EINVAL); + } + + if (av_p == NULL) { + MTL_ERROR4("THH_udavm_create_av: av_p is NULL.\n"); + MT_RETURN(HH_EINVAL); + } + + if (ah_p == NULL) { + MTL_ERROR4("THH_udavm_create_av: ah_p is NULL.\n"); + MT_RETURN(HH_EINVAL); + } + + if (av_p->dlid == 0) { + MTL_ERROR4("THH_udavm_create_av: invalid dlid (ZERO).\n"); + MT_RETURN(HH_EINVAL); + } + + /* check if max entries acceded */ + MOSAL_spinlock_dpc_lock(&(udavm->table_spinlock)); + if (udavm->max_av == udavm->used_avs_counter) { + MOSAL_spinlock_unlock(&(udavm->table_spinlock)); + MTL_ERROR4("THH_udavm_create_av: No free entries in UDAV table.\n"); + MT_RETURN(HH_EAGAIN); + } + else{ + udavm->used_avs_counter++; + } + MOSAL_spinlock_unlock(&(udavm->table_spinlock)); + + /* get a free index for the udav */ + ret = VIP_array_insert(udavm->udavs_array, NULL, &ah_index); + if (ret != VIP_OK) { + MTL_ERROR4("THH_udavm_create_av: Not enough resources.\n"); + /* decrement used AVs since this is failed */ + MOSAL_spinlock_dpc_lock(&(udavm->table_spinlock)); + udavm->used_avs_counter--; + MOSAL_spinlock_unlock(&(udavm->table_spinlock)); + MT_RETURN(HH_EAGAIN); + } + + /* fill out the host copy */ + av_entry_host_p = (u_int32_t*) (((MT_ulong_ptr_t)((udavm)->ud_av_table_host)) + + ((ah_index)*UD_AV_ENTRY_SIZE)); + memset(av_entry_host_p, 0, UD_AV_ENTRY_SIZE); + fill_udav_entry(pd, TRUE, av_p, av_entry_host_p); + + if (udavm->ud_av_table_host != udavm->ud_av_table_ddr) { + /* filling the DDR entry */ + u_int32_t* av_entry_ddr_p; + av_entry_ddr_p = (u_int32_t*) (((MT_ulong_ptr_t)((udavm)->ud_av_table_ddr)) + + ((ah_index)*UD_AV_ENTRY_SIZE)); + memcpy(av_entry_ddr_p, av_entry_host_p, UD_AV_ENTRY_SIZE); + } + + *ah_p = (HH_ud_av_hndl_t)av_entry_host_p; + MTL_DEBUG4(MT_FLFMT("Allocated address handle = " MT_ULONG_PTR_FMT ", entry index=%d"),*ah_p, ah_index); + MT_RETURN(HH_OK); +} + +/************************************************************************************/ +HH_ret_t THH_udavm_modify_av( /*IN */ THH_udavm_t udavm, + /*IN */ HH_ud_av_hndl_t ah, + /*IN */ VAPI_ud_av_t *av_p ) +{ + u_int32_t *ud_av_host_p= (u_int32_t*)(MT_ulong_ptr_t)ah; + HH_ret_t hh_ret; + u_int32_t ah_index; + VIP_common_ret_t ret=VIP_OK; + + FUNC_IN; + + if (udavm == NULL) { + MTL_ERROR4("THH_udavm_modify_av: udavm is NULL.\n"); + MT_RETURN(HH_EINVAL); + } + + if (av_p == NULL) { + MTL_ERROR4("THH_udavm_modify_av: av_p is NULL.\n"); + MT_RETURN(HH_EINVAL); + } + + if (av_p->dlid == 0) { + MTL_ERROR4("THH_udavm_modify_av: invalid dlid (ZERO).\n"); + MT_RETURN(HH_EINVAL); + } + + /* Check that ah within the table and aligned to entry beginning */ + if (IS_INVALID_AV(udavm, ud_av_host_p) || ud_av_host_p[0] == 0) { + MTL_DEBUG4("THH_udavm_modify_av: invalid ah (0x%lX).\n",ah); + //MTL_ERROR1("THH_udavm_modify_av: invalid ah=%p table=%p\n",ud_av_host_p, udavm->ud_av_table_host); + MT_RETURN(HH_EINVAL_AV_HNDL); + } +#if 0 /* new vapi 3.0 code */ + /* Check that ah within the table and aligned to entry beginning */ + if (IS_INVALID_AV_ALIGN(ud_av_p)) { + MTL_ERROR4("THH_udavm_modify_av: invalid av alignment.\n"); + MT_RETURN(HH_EINVAL); + } + if (IS_INVALID_AV_MEMBER(udavm, ud_av_p)) { + MTL_DEBUG4("THH_udavm_modify_av: invalid ah (" MT_ULONG_PTR_FMT ").\n",ah); + MT_RETURN(HH_EINVAL_AV_HNDL); + } +#endif + + /* check that ah is a valid (allocated) handle */ + ah_index = (u_int32_t)HANDLE_2_INDEX(udavm, ud_av_host_p); + ret = VIP_array_find_hold(udavm->udavs_array, ah_index, NULL); + MTL_DEBUG3(MT_FLFMT("Calling VIP_array_find_hold, ah_index=%d, ret=%d\n"), ah_index, ret); + if ( ret!=VIP_OK ) { + MTL_ERROR4("THH_udavm_modify_av: handle is not valid.\n"); + hh_ret = HH_EINVAL_AV_HNDL; + } else { + MOSAL_spinlock_dpc_lock(&(udavm->table_spinlock)); + fill_udav_entry(0, FALSE, av_p, ud_av_host_p); + if (udavm->ud_av_table_host != udavm->ud_av_table_ddr) { + // TBD - take advantage of ah_index and avoid pointer math + u_int32_t *ud_av_ddr_p = &udavm->ud_av_table_ddr[ud_av_host_p - udavm->ud_av_table_host]; + // follow actual DDR modification rules + ud_av_ddr_p[0] = 0; + memcpy(&ud_av_ddr_p[1], &ud_av_host_p[1], UD_AV_ENTRY_SIZE - sizeof(ud_av_ddr_p[0])); + ud_av_ddr_p[0] = ud_av_host_p[0]; + } + hh_ret = HH_OK; + MOSAL_spinlock_unlock(&(udavm->table_spinlock)); + } + ret = VIP_array_find_release(udavm->udavs_array, ah_index); + MTL_DEBUG3(MT_FLFMT("Calling VIP_array_find_release ret=%d"), ret); + if ( ret!=VIP_OK ) { + MTL_ERROR1("%s: Internal mismatch - hv_index (%d) is not in array\n", __func__,ah_index); + hh_ret = ret; + } + MTL_DEBUG4(MT_FLFMT("THH_udavm_modify_av: address handle = " MT_ULONG_PTR_FMT ", entry index=%d"),ah, ah_index); + MT_RETURN(hh_ret); +} + +/************************************************************************************/ +HH_ret_t THH_udavm_query_av( /*IN */ THH_udavm_t udavm, + /*IN */ HH_ud_av_hndl_t ah, + /*OUT*/ VAPI_ud_av_t *av_p ) +{ + HH_ret_t hh_ret; + u_int32_t ah_index; + VIP_common_ret_t ret=VIP_OK; + u_int32_t *ud_av_host_p= (u_int32_t*)(MT_ulong_ptr_t)ah; + + FUNC_IN; + + if (udavm == NULL) { + MTL_ERROR4("THH_udavm_query_av: udavm is NULL.\n"); + MT_RETURN(HH_EINVAL); + } + + if (av_p == NULL) { + MTL_ERROR4("THH_udavm_query_av: av_p is NULL.\n"); + MT_RETURN(HH_EINVAL); + } + +#if 0 /* ??? new vapi 3.0 code */ + /* Check that ah within the table and aligned to entry beginning */ + if (IS_INVALID_AV_ALIGN(ud_av_p)) { + MTL_ERROR4("THH_udavm_query_av: invalid av alignment.\n"); + MT_RETURN(HH_EINVAL); + } + if (IS_INVALID_AV_MEMBER(udavm, ud_av_p)) { + MTL_DEBUG4("THH_udavm_query_av: invalid ah (" MT_ULONG_PTR_FMT ").\n",ah); + MT_RETURN(HH_EINVAL_AV_HNDL); + } +#endif + + /* Check that ah within the table and aligned to entry beginning */ + if (IS_INVALID_AV(udavm, ud_av_host_p) || ud_av_host_p[0] == 0) { + MTL_DEBUG4("THH_udavm_query_av: invalid ah (0x%lX).\n",ah); + //MTL_ERROR1("THH_udavm_query_av: invalid ah=%p table=%p\n",ud_av_host_p, udavm->ud_av_table_host); + MT_RETURN(HH_EINVAL_AV_HNDL); + } + + /* check that ah is a valid (allocated) handle */ + ah_index = (u_int32_t)HANDLE_2_INDEX(udavm, ud_av_host_p); + ret = VIP_array_find_hold(udavm->udavs_array, ah_index, NULL); + MTL_DEBUG3(MT_FLFMT("Calling VIP_array_find_hold, hv_index=%d, ret=%d\n"), ah_index, ret); + if ( ret!=VIP_OK ) { + MTL_ERROR4("THH_udavm_query_av: handle is not valid.\n"); + hh_ret = HH_EINVAL_AV_HNDL; + } else { /* DO IT */ + MOSAL_spinlock_dpc_lock(&(udavm->table_spinlock)); + hh_ret = THH_udavm_parse_udav_entry(ud_av_host_p, av_p); + MOSAL_spinlock_unlock(&(udavm->table_spinlock)); + } + ret = VIP_array_find_release(udavm->udavs_array, ah_index); + MTL_DEBUG3(MT_FLFMT("Calling VIP_array_find_release ret=%d"), ret); + if ( ret!=VIP_OK ) { + MTL_ERROR1("%s: Internal mismatch - hv_index (%d) is not in array\n", __func__, ah_index); + hh_ret = ret; + } + + MTL_DEBUG4(MT_FLFMT("THH_udavm_query_av: address handle = " MT_ULONG_PTR_FMT ", entry index=%d"),ah, ah_index); +#if 1 <= MAX_DEBUG + print_udav(av_p); +#endif + MT_RETURN(hh_ret); + +} + +/************************************************************************************/ +HH_ret_t THH_udavm_destroy_av( /*IN */ THH_udavm_t udavm, + /*IN */ HH_ud_av_hndl_t ah ) +{ + u_int32_t ah_index; + VIP_common_ret_t ret=VIP_OK; + u_int32_t *ud_av_host_p= (u_int32_t*)(MT_ulong_ptr_t)ah; + HH_ret_t hh_ret = HH_OK; + + FUNC_IN; + + if (udavm == NULL) { + MTL_ERROR4("THH_udavm_destroy_av: udavm is NULL.\n"); + MT_RETURN(HH_EINVAL); + } + +#if 0 /* ??? new vapi_3.0 code */ + /* Check that given ah is within the table and aligned to entry size */ + if (IS_INVALID_AV_ALIGN(ud_av_p)) { + MTL_ERROR4("THH_udavm_destroy_av: invalid av alignment.\n"); + MT_RETURN(HH_EINVAL); + } + if (IS_INVALID_AV_MEMBER(udavm, ud_av_p)) { + MTL_DEBUG4("THH_udavm_destroy_av: invalid ah (" MT_ULONG_PTR_FMT ").\n",ah); + MT_RETURN(HH_EINVAL_AV_HNDL); + } +#endif + + /* Check that given ah is within the table and aligned to entry size */ + if (IS_INVALID_AV(udavm, ud_av_host_p) || ud_av_host_p[0] == 0) { + MTL_DEBUG4("THH_udavm_destroy_av: invalid ah (0x%lX).\n",ah); + //MTL_ERROR1("THH_udavm_destroy_av: invalid ah=%p table=%p\n",ud_av_host_p, udavm->ud_av_table_host); + MT_RETURN(HH_EINVAL_AV_HNDL); + } + + /* check that ah is a valid (allocated) handle and release it */ + ah_index = (u_int32_t)HANDLE_2_INDEX(udavm, ud_av_host_p); + ret = VIP_array_erase_prepare(udavm->udavs_array, ah_index, NULL); + MTL_DEBUG3(MT_FLFMT("Calling VIP_array_erase_prepare, hv_index=%d, ret=%d\n"), ah_index, ret); + if ( ret==VIP_OK ) { /* DO IT */ + /* Use the knowledge that PD is in the first Dword of UDAV entry - so only put zeros there */ + ud_av_host_p[0] = 0; + if (udavm->ud_av_table_host != udavm->ud_av_table_ddr) { + // TBD - take advantage of ah_index and avoid pointer math + u_int32_t *ud_av_ddr_p = &udavm->ud_av_table_ddr[ud_av_host_p - udavm->ud_av_table_host]; + ud_av_ddr_p[0] = 0; + } + ret = VIP_array_erase_done(udavm->udavs_array, ah_index, NULL); + if ( ret!=VIP_OK ) { + MTL_ERROR4("THH_udavm_destroy_av: internal error VIP_array_erase_done failed.\n"); + } + /* decrement used AVs*/ + MOSAL_spinlock_dpc_lock(&(udavm->table_spinlock)); + udavm->used_avs_counter--; + MOSAL_spinlock_unlock(&(udavm->table_spinlock)); + } else if ( ret==VIP_EBUSY ) { + MTL_ERROR4("THH_udavm_destroy_av: handle is busy (in modify or query).\n"); + hh_ret = HH_EBUSY; + } else if (ret == VIP_EINVAL_HNDL) { + MTL_ERROR4("THH_udavm_destroy_av: Invalid handle.\n"); + hh_ret = HH_EINVAL_AV_HNDL; + } + + MTL_DEBUG4(MT_FLFMT("THH_udavm_destroy_av: address handle = " MT_ULONG_PTR_FMT ", entry index=%d"),ah, ah_index); + MT_RETURN(ret); +} + + + + +/************ LOCAL FUNCTIONS ************************************************************************/ + +static void fill_udav_entry(/*IN */ HH_pd_hndl_t pd, + /*IN */ MT_bool is_new_pd, + /*IN */ VAPI_ud_av_t *av_p, + /*IN */ u_int32_t *av_entry_p) +{ + + int i; + u_int32_t new_av_arr[8] = {0,0,0,0,0,0,0,0}; /* entry size is 32 bytes */ + HH_pd_hndl_t pd_tmp; + u_int32_t pd_word = 0; + + FUNC_IN; + /*print_udav(av_p);*/ + + /* if not new PD the we have to clear PD so entry will not considered valid by HW while we modify it*/ + /* need to save the current PD and write it back at the end */ + if (!is_new_pd) { + pd_word = MOSAL_be32_to_cpu(*av_entry_p); + pd_tmp = MT_EXTRACT32(pd_word, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, pd), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, pd)); + av_entry_p[0] = 0; + } + else { + pd_tmp = pd; + } + + /* PD */ + MT_INSERT_ARRAY32(new_av_arr, pd_tmp, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, pd), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, pd)); + + /* port */ + MT_INSERT_ARRAY32(new_av_arr, av_p->port, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, port_number), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, port_number)); + + /* rlid */ + MT_INSERT_ARRAY32(new_av_arr, av_p->dlid, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, rlid), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, rlid)); + + /* mylid_path_bits */ + MT_INSERT_ARRAY32(new_av_arr, av_p->src_path_bits, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, my_lid_path_bits), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, my_lid_path_bits)); + /* grh enable */ + MT_INSERT_ARRAY32(new_av_arr, av_p->grh_flag, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, g), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, g)); + /* hop_limit */ + MT_INSERT_ARRAY32(new_av_arr, av_p->hop_limit, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, hop_limit), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, hop_limit)); + + /* max_stat_rate */ + /* 0 - Suited for matched links no flow control + All other values are transmitted to 1 since this is the only flow control of Tavor */ + MT_INSERT_ARRAY32(new_av_arr, ((av_p->static_rate ==0) ? 0:1), MT_BIT_OFFSET(tavorprm_ud_address_vector_st, max_stat_rate), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, max_stat_rate)); + + /* msg size - we put allays the max value */ + MT_INSERT_ARRAY32(new_av_arr, 3, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, msg), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, msg)); + + /* mgid_index (index to port GID table) - 6th (LOG2_PORT_GID_TABLE_SIZE+1) bit is (port-1) */ + MT_INSERT_ARRAY32(new_av_arr, + (av_p->sgid_index & MASK32(LOG2_PORT_GID_TABLE_SIZE)) | + ((av_p->port - 1) << LOG2_PORT_GID_TABLE_SIZE), + MT_BIT_OFFSET(tavorprm_ud_address_vector_st, mgid_index), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, mgid_index)); + + /* flow_label */ + MT_INSERT_ARRAY32(new_av_arr, av_p->flow_label, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, flow_label), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, flow_label)); + + /* tclass */ + MT_INSERT_ARRAY32(new_av_arr, av_p->traffic_class, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, tclass), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, tclass)); + + /* sl */ + MT_INSERT_ARRAY32(new_av_arr, av_p->sl, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, sl), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, sl)); + + + /* the gid is coming in BE format so we can insert it directly to the av_entry */ + /* need to fill it only if GRH bit is set */ + if (av_p->grh_flag) { + /* rgid_127_96 */ + MT_INSERT_ARRAY32(av_entry_p, ((u_int32_t*)(av_p->dgid))[0], MT_BIT_OFFSET(tavorprm_ud_address_vector_st, rgid_127_96), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, rgid_127_96)); + + /* rgid_95_64 */ + MT_INSERT_ARRAY32(av_entry_p, ((u_int32_t*)(av_p->dgid))[1], MT_BIT_OFFSET(tavorprm_ud_address_vector_st, rgid_95_64), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, rgid_95_64)); + + /* rgid_63_32 */ + MT_INSERT_ARRAY32(av_entry_p, ((u_int32_t*)(av_p->dgid))[2], MT_BIT_OFFSET(tavorprm_ud_address_vector_st, rgid_63_32), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, rgid_63_32)); + + /* rgid_31_0 */ + MT_INSERT_ARRAY32(av_entry_p, ((u_int32_t*)(av_p->dgid))[3], MT_BIT_OFFSET(tavorprm_ud_address_vector_st, rgid_31_0), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, rgid_31_0)); + } + else { /* Arbel mode workaround - must give GRH >1 in lowest bits*/ + /* rgid_31_0 */ + MT_INSERT_ARRAY32(av_entry_p, 2, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, rgid_31_0), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, rgid_31_0)); + + } + + /* now copy to the entry in the table with correct endianess */ + /* need to write the first DWORDs last since this is the PD that indicates + to HW that this entry is valid */ + + for (i = RGID_OFFSET-1; i >= 0; i--) { + /*MTL_DEBUG1(MT_FLFMT("(i=%d) %p <- 0x%X"),i,av_entry_p+i,new_av_arr[i]);*/ + MOSAL_MMAP_IO_WRITE_DWORD(av_entry_p+i,MOSAL_cpu_to_be32(new_av_arr[i])); + } + + FUNC_OUT; + +} + +/************************************************************************************/ + +HH_ret_t THH_udavm_parse_udav_entry(u_int32_t *ud_av_p, + VAPI_ud_av_t *av_p) +{ + u_int32_t i; + u_int32_t tmp_av_arr[8] = {0,0,0,0,0,0,0,0}; /* entry size is 32 bytes */ + HH_pd_hndl_t pd_tmp = 0; + + FUNC_IN; + /* read entry to tmp area */ + /* the gid should stay in BE format so we don't change its endianess */ + for (i=0; iport = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, port_number), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, port_number)); + + /* rlid */ + av_p->dlid = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, rlid), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, rlid)); + + /* mylid_path_bits */ + av_p->src_path_bits = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, my_lid_path_bits), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, my_lid_path_bits)); + /* grh enable */ + av_p->grh_flag = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, g), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, g)); + /* hop_limit */ + av_p->hop_limit = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, hop_limit), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, hop_limit)); + + /* max_stat_rate */ + /* 0 - stay 0, 1 transmitted to 3 as defined in IPD encoding: IB-spec. 9.11.1, table 63 */ + i = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, max_stat_rate), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, max_stat_rate)); + av_p->static_rate = (i ? 3:0); + + /* msg size - not needed for the av info */ + + /* mgid_index (index to port GID table)*/ + av_p->sgid_index = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, mgid_index), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, mgid_index)) & MASK32(LOG2_PORT_GID_TABLE_SIZE); + /* TBD: sanity check that 6th bit of mgid_index matches port field (-1) */ + + /* flow_label */ + av_p->flow_label = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, flow_label), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, flow_label)); + + /* tclass */ + av_p->traffic_class = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, tclass), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, tclass)); + + /* sl */ + av_p->sl = MT_EXTRACT_ARRAY32(tmp_av_arr, MT_BIT_OFFSET(tavorprm_ud_address_vector_st, sl), + MT_BIT_SIZE(tavorprm_ud_address_vector_st, sl)); + + /* gid stays in BE so it is extracted directly from the ud_av_p */ + /* rgid_127_96 */ + memcpy(av_p->dgid,ud_av_p+(MT_BYTE_OFFSET(tavorprm_ud_address_vector_st, rgid_127_96) >> 2), + sizeof(av_p->dgid)); + + +#if 1 <= MAX_DEBUG + print_udav(av_p); +#endif + MT_RETURN(HH_OK); + + +} + +/************************************************************************************/ +#ifdef MAX_DEBUG + +static void print_udav(VAPI_ud_av_t *av_p) +{ + FUNC_IN; + MTL_DEBUG1("UDAV values:\n==================\n sl = %d \n dlid = %d\n src_path_bits = %d\n static_rate = %d\n grh_flag = %d\n traffic_class = %d\n" + " flow_label = %d\n hop_limit = %d\n sgid_index = %d\n port = %d\n, dgid = %d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d\n", + av_p->sl, av_p->dlid, av_p->src_path_bits, av_p->static_rate, av_p->grh_flag, av_p->traffic_class, av_p->flow_label, + av_p->hop_limit, av_p->sgid_index, av_p->port, av_p->dgid[0],av_p->dgid[1],av_p->dgid[2],av_p->dgid[3],av_p->dgid[4],av_p->dgid[5], + av_p->dgid[6],av_p->dgid[7],av_p->dgid[8],av_p->dgid[9],av_p->dgid[10],av_p->dgid[11], + av_p->dgid[12],av_p->dgid[13],av_p->dgid[14],av_p->dgid[15]); + FUNC_OUT; +} + + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.h new file mode 100644 index 00000000..852d8bad --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/udavm/udavm.h @@ -0,0 +1,207 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(H_TUDAV_H) +#define H_TUDAV_H + +#include +#include +#include +#include +#include + + +/************************************************************************ + * Function: THH_udavm_create + * + * Arguments: + * version_p - Version information (see ...) + * ud_av_table_memkey + * ud_av_table + * ud_av_table ud_av_table_sz + * uadvm_p - Allocated object + * + * Returns: + * HH_OK + * HH_EINVAL -Invalid parameters (NULLs) + * HH_EAGAIN -Not enough resources to allocate object + * + * Description: + * Create the THH_udavm_t class instance. + ************************************************************************/ + +extern HH_ret_t THH_udavm_create( /*IN */ THH_ver_info_t *version_p, + /*IN */ VAPI_lkey_t ud_av_table_memkey, + /*IN */ MT_virt_addr_t ud_av_table, + /*IN */ MT_size_t ud_av_table_sz, + /*IN */ MT_bool av_in_host_mem, + /*OUT*/ THH_udavm_t *udavm_p, + /*OUT*/ char **av_ddr_base, + /*OUT*/ char **av_host_base); + + +/************************************************************************ + * Function: THH_udavm_destroy + * + Arguments: + udavm - object to destroy + + Returns: + HH_OK + HH_EINVAL - Unknown object + + Description: + Free associated memory resources of this object. + ************************************************************************/ + +extern HH_ret_t THH_udavm_destroy( /*IN */ THH_udavm_t udavm ); + + + + +/************************************************************************ + * Function: THH_udavm_get_memkey + * + Arguments: + udavm - the object to work + table_memkey_p - pointer to place the memory key of the registered table + + Returns: + HH_OK + HH_EINVAL - Memory key was not set yet (or NULL ptr.) or Unknown object + + Description: + Return the memory key associated with UD AVs of this object. + ************************************************************************/ + +extern HH_ret_t THH_udavm_get_memkey( /*IN */ THH_udavm_t udavm, + /*IN */ VAPI_lkey_t *table_memkey_p ); + + + + +/************************************************************************ + * Function: THH_udavm_create_av + * + Arguments: + udavm - + pd - PD of given UD AV + av_p - The address vector + ah_p - Returned address handle + + Returns: + HH_OK + HH_EINVAL - Invalid parameters + HH_EAGAIN - No available resources (UD AV entries) + + Description: Create address handle for given UD address vector. + + ************************************************************************/ + +extern HH_ret_t THH_udavm_create_av( /*IN */ THH_udavm_t udavm, + /*IN */ HH_pd_hndl_t pd, + /*IN */ VAPI_ud_av_t *av_p, + /*OUT*/ HH_ud_av_hndl_t *ah_p ); + + + +/************************************************************************ + * Function: THH_udavm_modify_av + * + Arguments: + udavm + ah - The address handle of UD AV to modify + av_p - The updated UD AV + + Returns: + HH_OK + HH_EINVAL - Invalid parameters + HH_EINVAL_AV_HNDL - Invalid address handle (no such handle) + + Description: + Modify the UD AV entry associated with given address handle. + + ************************************************************************/ + +extern HH_ret_t THH_udavm_modify_av( /*IN */ THH_udavm_t udavm, + /*IN */ HH_ud_av_hndl_t ah, + /*IN */ VAPI_ud_av_t *av_p ); + + +/************************************************************************ + * Function: THH_udavm_query_av + * + + Arguments: + udavm + ah - The address handle of UD AV to query + av_p - The UD AV associated with given handle + + Returns: + HH_OK + HH_EINVAL - Invalid parameters + HH_EINVAL_AV_HNDL - Invalid address handle (no such handle) + + Description: + Get the UD AV associated with given address handle. + + ************************************************************************/ +extern HH_ret_t THH_udavm_query_av( /*IN */ THH_udavm_t udavm, + /*IN */ HH_ud_av_hndl_t ah, + /*OUT*/ VAPI_ud_av_t *av_p ); + + +/************************************************************************ + * Function: THH_udavm_destroy_av + * + Arguments: + udavm + ah - The address handle of UD AV to destroy + + Returns: + HH_OK + HH_EINVAL - Invalid udavm + HH_EINVAL_AV_HNDL - Invalid address handle (no such handle) + + Description: + Free UD AV entry associated with given address handle. + **************************************************************************/ +extern HH_ret_t THH_udavm_destroy_av( /*IN */ THH_udavm_t udavm, + /*IN */ HH_ud_av_hndl_t ah ); + + +extern HH_ret_t THH_udavm_parse_udav_entry(u_int32_t *ud_av_p, + VAPI_ud_av_t *av_p); + + + +#endif /* H_TUDAV_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.c new file mode 100644 index 00000000..4e694b56 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.c @@ -0,0 +1,747 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +/****************************************************************************** + * Function: THH_uldm_create + *****************************************************************************/ +HH_ret_t THH_uldm_create( /*IN*/ THH_hob_t hob, + /*IN*/ MT_phys_addr_t uar_base, + /*IN*/ u_int8_t log2_max_uar, + /*IN*/ u_int8_t log2_uar_pg_sz, + /*IN*/ u_int32_t max_pd, + /*OUT*/ THH_uldm_t *uldm_p ) +{ + u_int32_t max_uar; + THH_uldm_t new_uldm_obj; + + MTL_DEBUG4("ENTERING THH_uldm_create: uar_base = " PHYS_ADDR_FMT ", log2_max_uar = %d\nlog2_uar_pg_sz=%d, max_pd=%d\n", + uar_base, log2_max_uar, log2_uar_pg_sz, max_pd); + /* create new uldm object */ + new_uldm_obj = (THH_uldm_t) MALLOC(sizeof(THH_uldm_obj_t)); + if (!new_uldm_obj) { + MTL_ERROR1("THH_uldm_create: MALLOC of new_uldm_obj failed\n"); + return HH_ENOMEM; + } + memset(new_uldm_obj, 0, sizeof(THH_uldm_obj_t)); + + max_uar = (1 << log2_max_uar); + new_uldm_obj->max_uar = max_uar; + + + /* create UAR free pool */ + new_uldm_obj->uldm_uar_table = (THH_uldm_uar_entry_t *) VMALLOC(max_uar * sizeof(THH_uldm_uar_entry_t)); + if (!new_uldm_obj->uldm_uar_table) { + FREE(new_uldm_obj); + MTL_ERROR1("THH_uldm_create: VMALLOC of uldm_uar_table failed\n"); + return HH_ENOMEM; + } + MTL_DEBUG4("THH_uldm_create: allocated uar table: addr = %p, size=%d\n", + (void *) (new_uldm_obj->uldm_uar_table), max_uar); + + memset(new_uldm_obj->uldm_uar_table, 0, max_uar * sizeof(THH_uldm_uar_entry_t)); + + MTL_TRACE4("THH_uldm_create: creating UAR pool\n"); + new_uldm_obj->uar_list.entries = new_uldm_obj->uldm_uar_table; + new_uldm_obj->uar_list.size = max_uar ; /* list size is number of ENTRIES */ + new_uldm_obj->uar_list.head = 0; + new_uldm_obj->uar_list.meta = &(new_uldm_obj->uar_meta); + + new_uldm_obj->uar_meta.entry_size = sizeof(THH_uldm_uar_entry_t); + new_uldm_obj->uar_meta.prev_struct_offset = (MT_ulong_ptr_t) &(((THH_uldm_uar_entry_t *) (NULL))->u1.epool.prev); + new_uldm_obj->uar_meta.next_struct_offset = (MT_ulong_ptr_t) &(((THH_uldm_uar_entry_t *) (NULL))->u1.epool.next); + + MTL_DEBUG4("THH_uldm_create: calling epool_init: entries = %p, size=%lu, head=%lu\nentry_size=%d, prev_offs=%d, next_offs=%d\n", + new_uldm_obj->uar_list.entries, new_uldm_obj->uar_list.size, new_uldm_obj->uar_list.head, + new_uldm_obj->uar_meta.entry_size, new_uldm_obj->uar_meta.prev_struct_offset, + new_uldm_obj->uar_meta.next_struct_offset); + epool_init(&(new_uldm_obj->uar_list)); + + /* set uar's 0 and 1 to unavailable */ + MTL_TRACE4("THH_uldm_create: Reserving UARs 0 and 1\n"); + epool_reserve(&(new_uldm_obj->uar_list), 0, 2); + + /* create PD free pool */ + MTL_TRACE4("THH_uldm_create: creating PD pool\n"); + new_uldm_obj->uldm_pd_table = (THH_uldm_pd_entry_t *) VMALLOC(max_pd * sizeof(THH_uldm_pd_entry_t)); + if (!new_uldm_obj->uldm_pd_table) { + VFREE(new_uldm_obj->uldm_uar_table); + FREE(new_uldm_obj); + return HH_ENOMEM; + } + memset(new_uldm_obj->uldm_pd_table, 0, max_pd * sizeof(THH_uldm_pd_entry_t)); + + + new_uldm_obj->pd_list.entries = new_uldm_obj->uldm_pd_table; + new_uldm_obj->pd_list.size = max_pd; /* list size is number of ENTRIES */ + new_uldm_obj->pd_list.head = 0; + new_uldm_obj->pd_list.meta = &(new_uldm_obj->pd_meta); + + new_uldm_obj->pd_meta.entry_size = sizeof(THH_uldm_pd_entry_t); + new_uldm_obj->pd_meta.prev_struct_offset = (MT_ulong_ptr_t) &(((THH_uldm_pd_entry_t *) (NULL))->u1.epool.prev); + new_uldm_obj->pd_meta.next_struct_offset = (MT_ulong_ptr_t) &(((THH_uldm_pd_entry_t *) (NULL))->u1.epool.next); + + MTL_DEBUG4("THH_uldm_create: calling epool_init: entries = %p, size=%lu, head=%lu\nentry_size=%d, prev_offs=%d, next_offs=%d\n", + new_uldm_obj->pd_list.entries, new_uldm_obj->pd_list.size, new_uldm_obj->pd_list.head, + new_uldm_obj->pd_meta.entry_size, new_uldm_obj->pd_meta.prev_struct_offset, + new_uldm_obj->pd_meta.next_struct_offset); + epool_init(&(new_uldm_obj->pd_list)); + + /* set pd's 0 and 1 to unavailable */ + MTL_TRACE4("THH_uldm_create: Reserving PDs 0 and 1\n"); + epool_reserve(&(new_uldm_obj->pd_list), 0, THH_NUM_RSVD_PD); + + new_uldm_obj->uar_base = uar_base; + new_uldm_obj->log2_max_uar = log2_max_uar; + new_uldm_obj->log2_uar_pg_sz = log2_uar_pg_sz; + new_uldm_obj->max_pd = max_pd; + + /* save HOB handle */ + new_uldm_obj->hob = hob; + + /* return the object handle */ + *uldm_p = new_uldm_obj; + + MTL_DEBUG4("LEAVING THH_uldm_create - OK\n"); + + return HH_OK; +} + +/****************************************************************************** + * Function: THH_uldm_destroy + *****************************************************************************/ +HH_ret_t THH_uldm_destroy( /*IN*/ THH_uldm_t uldm ) +{ + u_int32_t i; + + MTL_DEBUG4("==> THH_uldm_destroy\n"); + for (i=0; i< uldm->max_uar; i++) { + if (uldm->uldm_uar_table[i].valid) { + /* is uar valid check is within this function */ + THH_uldm_free_uar(uldm,i); + } + } + VFREE(uldm->uldm_uar_table); + VFREE(uldm->uldm_pd_table); + + epool_cleanup(&uldm->pd_list); + epool_cleanup(&uldm->uar_list); + FREE(uldm); + MTL_DEBUG4("<== THH_uldm_destroy\n"); + return HH_OK; +} + +/************************************************************************* + * Function: THH_uldm_alloc_ul_res + *************************************************************************/ +HH_ret_t THH_uldm_alloc_ul_res( /*IN*/ THH_uldm_t uldm, + /*IN*/ MOSAL_protection_ctx_t prot_ctx, + /*OUT*/ THH_hca_ul_resources_t *hca_ul_resources_p ) +{ + u_int32_t uar_index; + MT_virt_addr_t uar_map; + HH_ret_t ret = HH_OK; + +#ifndef __DARWIN__ + MTL_DEBUG4("==> THH_uldm_alloc_ul_res. prot_ctx = 0x%x\n", prot_ctx); +#else + MTL_DEBUG4("==> THH_uldm_alloc_ul_res.\n"); +#endif + + ret = THH_uldm_alloc_uar(uldm,prot_ctx, &uar_index, &uar_map); + if (ret != HH_OK) { + MTL_ERROR1("%s: failed allocating UAR. ERROR = %d\n", __func__, ret); + goto uldm_err; + } + + hca_ul_resources_p->uar_index = uar_index; + hca_ul_resources_p->uar_map = uar_map; + +uldm_err: + MTL_DEBUG4("<== THH_uldm_alloc_ul_res. ret = %d\n", ret); + return ret; +} + +/************************************************************************* + * Function: THH_uldm_free_ul_res + *************************************************************************/ +HH_ret_t THH_uldm_free_ul_res( /*IN*/ THH_uldm_t uldm, + /*IN*/ THH_hca_ul_resources_t *hca_ul_resources_p) +{ + HH_ret_t ret = HH_OK; + + MTL_DEBUG4("==> THH_uldm_free_ul_res. resources ptr = %p\n", (void *) hca_ul_resources_p); + if (hca_ul_resources_p->uar_index > 1) { + ret = THH_uldm_free_uar(uldm, hca_ul_resources_p->uar_index); + if (ret != HH_OK) { + MTL_ERROR1("%s: failed freeing UAR index %d. ERROR = %d\n", __func__, hca_ul_resources_p->uar_index, ret); + goto uldm_err; + } + } + + hca_ul_resources_p->uar_index = 0; + hca_ul_resources_p->uar_map = 0; + +uldm_err: + MTL_DEBUG4("<== THH_uldm_free_ul_res. ret = %d\n", ret); + return ret; +} + +/************************************************************************* + * Function: THH_uldm_alloc_uar + *************************************************************************/ +HH_ret_t THH_uldm_alloc_uar( /*IN*/ THH_uldm_t uldm, + /*IN*/ MOSAL_protection_ctx_t prot_ctx, + /*OUT*/ u_int32_t *uar_index, + /*OUT*/ MT_virt_addr_t *uar_map ) +{ + unsigned long i; + HH_ret_t ret = HH_OK; + +#ifndef __DARWIN__ + MTL_DEBUG4("==> THH_uldm_alloc_uar. prot_ctx = 0x%x\n", prot_ctx); +#else + MTL_DEBUG4("==> THH_uldm_alloc_uar.\n"); +#endif + + if (uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_uldm_alloc_uar: ERROR : HCA device has not yet been opened\n"); + ret = HH_EINVAL; + goto uldm_err; + } + + i = epool_alloc(&(uldm->uar_list)) ; + if (i == EPOOL_NULL) { + MTL_ERROR1("THH_uldm_alloc_uar: ERROR : Could not allocate a UAR from pool\n"); + ret = HH_EINVAL; + goto uldm_err; + } else { + *uar_index = i; + memset(&uldm->uldm_uar_table[i], 0, sizeof(THH_uldm_uar_entry_t)); + uldm->uldm_uar_table[i].u1.uar.prot_ctx = prot_ctx; + uldm->uldm_uar_table[i].valid = TRUE; + *uar_map = (MT_virt_addr_t) MOSAL_map_phys_addr(/* phys addr */ uldm->uar_base + (i * (1 << uldm->log2_uar_pg_sz)), + ((MT_size_t)1 << uldm->log2_uar_pg_sz), + MOSAL_MEM_FLAGS_NO_CACHE | MOSAL_MEM_FLAGS_PERM_WRITE, + prot_ctx); + if (*uar_map == (MT_virt_addr_t) NULL) { +#ifndef __DARWIN__ + MTL_ERROR1("THH_uldm_alloc_uar: MOSAL_map_phys_addr failed for prot ctx %d, addr " PHYS_ADDR_FMT ", size %d\n", + prot_ctx, + (MT_phys_addr_t) (uldm->uar_base + (i * (1 << uldm->log2_uar_pg_sz))), + (1U << uldm->log2_uar_pg_sz)); +#else + MTL_ERROR1("THH_uldm_alloc_uar: MOSAL_map_phys_addr failed, addr " PHYS_ADDR_FMT ", size %d\n", + (MT_phys_addr_t) (uldm->uar_base + (i * (1 << uldm->log2_uar_pg_sz))), + (1U << uldm->log2_uar_pg_sz)); +#endif + return HH_EINVAL; + } + uldm->uldm_uar_table[i].u1.uar.virt_addr = *uar_map; + MTL_DEBUG4("THH_uldm_alloc_uar: index = %d, addr = " VIRT_ADDR_FMT "\n", *uar_index, *uar_map); + ret = HH_OK; + } + +uldm_err: + MTL_DEBUG4("<== THH_uldm_alloc_uar. ret = %d\n", ret); + return ret; +} + +/****************************************************************************** + * Function: THH_uldm_free_uar + *****************************************************************************/ +HH_ret_t THH_uldm_free_uar( /*IN*/ THH_uldm_t uldm, + /*IN*/ u_int32_t uar_index ) +{ + call_result_t rc; + HH_ret_t ret = HH_OK; + + /* check that index is valid */ + MTL_DEBUG4("==> THH_uldm_free_uar. uar_index = %d\n", uar_index); + if (uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_uldm_free_uar: ERROR : HCA device has not yet been opened\n"); + ret = HH_EINVAL; + goto err; + } + + if (uar_index > uldm->max_uar ) { + MTL_ERROR1("THH_uldm_free_uar: uar_index out of range (%d)\n", uar_index); + ret = HH_EINVAL; + goto err; + } + + if (!(uldm->uldm_uar_table[uar_index].valid)) { + MTL_ERROR1("THH_uldm_free_uar: uar_index was not allocated (%d)\n", uar_index); + ret = HH_EINVAL; + goto err; + } + /* Unmap previously mapped physical (will be page aligned, of course) */ + + rc = MOSAL_unmap_phys_addr(uldm->uldm_uar_table[uar_index].u1.uar.prot_ctx, + (MT_virt_addr_t) /* (void *) */ uldm->uldm_uar_table[uar_index].u1.uar.virt_addr, + ((MT_size_t)1 << uldm->log2_uar_pg_sz)); + + if (rc != MT_OK) { +#ifndef __DARWIN__ + MTL_ERROR1("THH_uldm_free_uar: MOSAL_unmap_phys_addr failed for prot ctx %d, addr %p, size %d\n", + uldm->uldm_uar_table[uar_index].u1.uar.prot_ctx, + (void *)uldm->uldm_uar_table[uar_index].u1.uar.virt_addr, + (1 << uldm->log2_uar_pg_sz)); +#else + MTL_ERROR1("THH_uldm_free_uar: MOSAL_unmap_phys_addr failed, addr %p, size %d\n", + (void *)uldm->uldm_uar_table[uar_index].u1.uar.virt_addr, + (1 << uldm->log2_uar_pg_sz)); +#endif + ret = HH_EINVAL; + goto err; + } + + uldm->uldm_uar_table[uar_index].valid = FALSE; + epool_free(&(uldm->uar_list), uar_index); + +err: + MTL_DEBUG4("<== THH_uldm_free_uar. ret = %d\n", ret); + return ret; + +} + +#pragma optimize( "g", off ) +/****************************************************************************** + * Function: THH_uldm_alloc_pd + *****************************************************************************/ +HH_ret_t THH_uldm_alloc_pd( /*IN*/ THH_uldm_t uldm, + /*IN*/ MOSAL_protection_ctx_t prot_ctx, + /*IN-OUT*/ THH_pd_ul_resources_t *pd_ul_resources_p, + /*OUT*/ HH_pd_hndl_t *pd_p ) +{ + unsigned long i; + HH_ret_t ret = HH_OK; + THH_internal_mr_t mr_data; + VAPI_lkey_t lkey; + THH_mrwm_t mrwm; + THH_ddrmm_t ddrmm; + THH_udavm_t udavm; + MT_size_t adj_ud_av_table_sz = 0; + MT_virt_addr_t udav_virt_addr = 0; + MT_phys_addr_t udav_phys_addr = 0; + VAPI_size_t udav_phys_buf_size = 0; + MT_bool use_priv_udav, av_in_host_mem, hide_ddr; + u_int32_t pd_index; + u_int32_t max_ah_num = 0, max_requested_avs = 0; + unsigned int page_size; + call_result_t rc; + +#ifndef __DARWIN__ + MTL_DEBUG4("==> THH_uldm_alloc_pd. uldm = %p, prot_ctx = 0x%x, resources_p = %p\n", (void *) uldm, prot_ctx, (void *) pd_ul_resources_p); +#else + MTL_DEBUG4("==> THH_uldm_alloc_pd. uldm = %p, resources_p = %p\n", (void *) uldm, (void *) pd_ul_resources_p); +#endif + + if (uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("<==THH_uldm_alloc_pd: ERROR : HCA device has not yet been opened\n"); + return HH_EINVAL; + } + + + ret = THH_hob_get_udavm_info (uldm->hob, &udavm, &use_priv_udav, &av_in_host_mem, + &lkey, &max_ah_num, &hide_ddr ); + if (ret != HH_OK) { + MTL_ERROR1("<==THH_uldm_alloc_pd: ERROR: could not acquire udavm information (%d)\n", ret); + return ret; + } + + rc = MOSAL_get_page_size(prot_ctx, pd_ul_resources_p->udavm_buf, &page_size); + if ( rc != MT_OK ) { + MTL_ERROR1(MT_FLFMT("%s: could not obtain page size of address="VIRT_ADDR_FMT), __func__, + pd_ul_resources_p->udavm_buf); + return HH_ENOMEM; + } + + if (use_priv_udav && (((pd_ul_resources_p->udavm_buf_sz) & (page_size - 1)) != 0)){ + MTL_ERROR1("<==THH_uldm_alloc_pd: ERROR : udavm_buf_size ("SIZE_T_FMT") is not a multiple of the page size\n", + pd_ul_resources_p->udavm_buf_sz); + return HH_EINVAL; + } + + if (!use_priv_udav) { + /* check to see that we are not trying to allocate space for more AVs than max reported in HCA capabilities */ + max_requested_avs = (u_int32_t)((pd_ul_resources_p->udavm_buf_sz) / (sizeof(struct tavorprm_ud_address_vector_st) / 8)); + if (max_requested_avs > max_ah_num) { + MTL_ERROR1("<==THH_uldm_alloc_pd: max AVs too large: requested %d, max available=%d\n", + max_requested_avs, max_ah_num); + return HH_EINVAL; + } + } + + i = epool_alloc(&uldm->pd_list) ; + if (i == EPOOL_NULL) { + MTL_ERROR1("<==THH_uldm_alloc_pd: could not allocate a PD from pool\n"); + return HH_EAGAIN; + } else { + pd_index = (u_int32_t) i; + memset(&uldm->uldm_pd_table[pd_index], 0, sizeof(THH_uldm_pd_entry_t)); + uldm->uldm_pd_table[pd_index].u1.pd.prot_ctx = prot_ctx; + *pd_p = (HH_pd_hndl_t) i; + + if (use_priv_udav) { + pd_ul_resources_p->udavm_buf_memkey = lkey; + } else { + ret = THH_hob_get_mrwm(uldm->hob,&mrwm); + if (ret != HH_OK) { + MTL_ERROR1("THH_uldm_alloc_pd: could not acquire MRWM handle (%d)\n", ret); + goto udavm_get_mwrm_err; + } + /* Use DDR resources if appropriate flag set at module initialization, + or if Tavor firmware indicates that should not hide DDR memory, or if not a PD for a SQP + */ + if ((hide_ddr == FALSE) && (av_in_host_mem == FALSE) && (pd_ul_resources_p->pd_flags != PD_FOR_SQP)) { + ret = THH_hob_get_ddrmm(uldm->hob,&ddrmm); + if (ret != HH_OK) { + MTL_ERROR1("THH_uldm_alloc_pd: could not acquire DDRMM handle (%d)\n", ret); + } else { + + /* get memory in DDR. If cannot, then use regular memory allocated in */ + /* call to THHUL_uldm_alloc_pd_prep */ + adj_ud_av_table_sz = MT_UP_ALIGNX_SIZE((pd_ul_resources_p->udavm_buf_sz), MOSAL_SYS_PAGE_SHIFT); + udav_phys_buf_size = adj_ud_av_table_sz; + ret = THH_ddrmm_alloc(ddrmm, + adj_ud_av_table_sz, + MOSAL_SYS_PAGE_SHIFT, + &udav_phys_addr); + if (ret != HH_OK) { + MTL_DEBUG1("THH_uldm_alloc_pd: could not allocate protected udavm area in DDR(err = %d)\n", ret); + udav_phys_addr = (MT_phys_addr_t) 0; + } else { +#ifndef __DARWIN__ + MTL_DEBUG4("THH_uldm_alloc_pd. prot_ctx = 0x%x, phys_addr = " PHYS_ADDR_FMT + ", buf_size="SIZE_T_FMT", adj_size="SIZE_T_FMT"\n", + prot_ctx, + udav_phys_addr, pd_ul_resources_p->udavm_buf_sz, + adj_ud_av_table_sz); +#else + MTL_DEBUG4("THH_uldm_alloc_pd. phys_addr = " PHYS_ADDR_FMT + ", buf_size="SIZE_T_FMT", adj_size="SIZE_T_FMT"\n", + udav_phys_addr, pd_ul_resources_p->udavm_buf_sz, + adj_ud_av_table_sz); +#endif + udav_virt_addr = (MT_virt_addr_t) MOSAL_map_phys_addr( udav_phys_addr , +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + (MT_size_t)udav_phys_buf_size, + MOSAL_MEM_FLAGS_NO_CACHE | + MOSAL_MEM_FLAGS_PERM_WRITE | + MOSAL_MEM_FLAGS_PERM_READ , + prot_ctx); + MTL_DEBUG4("THH_uldm_alloc_pd. udav virt_addr = " VIRT_ADDR_FMT "\n", udav_virt_addr); + if (udav_virt_addr == (MT_virt_addr_t) NULL) { + MTL_ERROR1("THH_uldm_alloc_pd: could not map physical address " PHYS_ADDR_FMT " to virtual\n", + udav_phys_addr); +/*** warning C4242: 'function' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + ret = THH_ddrmm_free(ddrmm,udav_phys_addr,(MT_size_t)udav_phys_buf_size); + if (ret != HH_OK) { + MTL_ERROR1("THH_uldm_alloc_pd: could not free protected udavm area in DDR(err = %d)\n", ret); + } + udav_phys_addr = (MT_phys_addr_t) 0; + } else { + /* substitute DDR-allocated region for the one passed by pd_ul_resources_p */ + pd_ul_resources_p->udavm_buf = udav_virt_addr; + } + } + } + } + /* check if have a buffer allocated for udav table */ + if (pd_ul_resources_p->udavm_buf == (MT_virt_addr_t) NULL) { + MTL_ERROR1("THH_uldm_alloc_pd: no udavm area allocated.\n"); + /*value of ret was set above by some failure */ + goto no_udavm_area; + } + + memset(&mr_data, 0, sizeof(mr_data)); + mr_data.force_memkey = FALSE; + mr_data.memkey = 0; + mr_data.pd = (HH_pd_hndl_t) pd_index; + mr_data.size = pd_ul_resources_p->udavm_buf_sz; + mr_data.vm_ctx = prot_ctx; + if (udav_phys_addr) { + VAPI_phy_addr_t udav_phy = udav_phys_addr; + mr_data.num_bufs = 1; /* != 0 iff physical buffesrs supplied */ + mr_data.phys_buf_lst = &udav_phy; /* size = num_bufs */ + mr_data.buf_sz_lst = &udav_phys_buf_size; /* [num_bufs], corresponds to phys_buf_lst */ + mr_data.start = pd_ul_resources_p->udavm_buf; + } else { + /* using user-level buffer: check that buffer address is aligned to entry size; */ + if (pd_ul_resources_p->udavm_buf != + (MT_UP_ALIGNX_VIRT((pd_ul_resources_p->udavm_buf), + ceil_log2((sizeof(struct tavorprm_ud_address_vector_st) / 8))))) { + MTL_ERROR1("THH_uldm_alloc_pd: provided HOST MEM buffer not properly aligned.\n"); + /*value of ret was set above by some failure */ + ret = HH_EINVAL; + goto no_udavm_area; + } + mr_data.start = (IB_virt_addr_t)pd_ul_resources_p->udavm_buf; + MTL_DEBUG1(MT_FLFMT("%s: User level UDAV tbl = " U64_FMT), __func__, mr_data.start); + } + uldm->uldm_pd_table[pd_index].valid = TRUE; /* set to valid here, so that THH_uldm_get_protection_ctx will work */ + ret = THH_mrwm_register_internal(mrwm, &mr_data, &lkey); + if (ret != HH_OK) { + MTL_ERROR1("THH_uldm_alloc_pd: could not register udavm table (%d)\n", ret); + uldm->uldm_pd_table[pd_index].valid = FALSE; + goto udavm_table_register_err; + } + pd_ul_resources_p->udavm_buf_memkey = lkey; + uldm->uldm_pd_table[pd_index].u1.pd.lkey = lkey; + uldm->uldm_pd_table[pd_index].u1.pd.udav_table_ddr_phys_addr = udav_phys_addr; + uldm->uldm_pd_table[pd_index].u1.pd.udav_table_ddr_virt_addr = udav_virt_addr; +/*** warning C4242: '=' : conversion from 'VAPI_size_t' to 'MT_size_t', possible loss of data ***/ + uldm->uldm_pd_table[pd_index].u1.pd.udav_table_size = (udav_phys_addr ? (MT_size_t)udav_phys_buf_size : 0); + MTL_DEBUG4("THH_uldm_alloc_pd: PD %u: saving phys addr = " PHYS_ADDR_FMT ", virt addr = " VIRT_ADDR_FMT ", size="SIZE_T_FMT"\n", + pd_index, udav_phys_addr, udav_virt_addr, uldm->uldm_pd_table[pd_index].u1.pd.udav_table_size ); + } + /* set VALID flag only if successful */ + uldm->uldm_pd_table[pd_index].valid = TRUE; + ret = HH_OK; + MTL_DEBUG4("THH_uldm_alloc_pd. PD = %u\n", pd_index); + goto ok_retn; + } + +no_udavm_area: +udavm_get_mwrm_err: +udavm_table_register_err: + epool_free(&(uldm->pd_list),i); +ok_retn: + MTL_DEBUG4("<== THH_uldm_alloc_pd. ret = %d\n", ret); + return ret; + +} +#pragma optimize( "", on ) + +/****************************************************************************** + * Function: THH_uldm_free_pd + *****************************************************************************/ +HH_ret_t THH_uldm_free_pd( /*IN*/ THH_uldm_t uldm, + /*IN*/ HH_pd_hndl_t pd ) +{ + + HH_ret_t ret = HH_OK; + VAPI_lkey_t lkey; + THH_mrwm_t mrwm; + THH_udavm_t udavm; + THH_ddrmm_t ddrmm; + MT_virt_addr_t udav_virt_addr = 0; + MT_phys_addr_t udav_phys_addr = 0; + MT_size_t udav_size = 0; + MT_bool use_priv_udav, av_in_host_mem, hide_ddr; + call_result_t res; + u_int32_t max_ah_num = 0; + + MTL_DEBUG4("==> THH_uldm_free_pd\n"); + if (uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_uldm_free_pd: ERROR : HCA device has not yet been opened\n"); + ret = HH_EINVAL; + goto err_retn; + } + + if (pd >= uldm->max_pd ) { + MTL_ERROR1("THH_uldm_free_pd: pd out of range (%d)\n", pd); + ret = HH_EINVAL; + goto err_retn; + } + + if (!(uldm->uldm_pd_table[pd].valid)) { + MTL_ERROR1("THH_uldm_free_pd: pd was not allocated (%d)\n", pd); + ret = HH_EINVAL; + goto err_retn; + } + + ret = THH_hob_get_udavm_info (uldm->hob, &udavm, &use_priv_udav, &av_in_host_mem, + &lkey, &max_ah_num, &hide_ddr ); + if (ret != HH_OK) { + MTL_ERROR1("THH_uldm_free_pd: could not acquire udavm information (%d)\n", ret); + goto get_udavm_info_err; + } + MTL_DEBUG4("THH_uldm_free_pd: udavm=%p, use_priv_udav = %s, lkey = 0x%x\n", + udavm, (use_priv_udav ? "TRUE" : "FALSE"), lkey); + if (!use_priv_udav) { + + ret = THH_hob_get_mrwm(uldm->hob,&mrwm); + if (ret != HH_OK) { + MTL_ERROR1("THH_uldm_free_pd: could not acquire MRWM handle (%d)\n", ret); + ret = HH_OK; + goto udavm_get_mwrm_err; + } + + lkey = uldm->uldm_pd_table[pd].u1.pd.lkey; /* get lkey saved with this PD */ + MTL_DEBUG4("THH_uldm_free_pd: DEREGISTERING mem region with lkey = 0x%x\n", lkey); + ret = THH_mrwm_deregister_mr(mrwm, lkey); + if (ret != HH_OK) { + MTL_ERROR1("THH_uldm_free_pd: THH_mrwm_deregister_mr error (%d)\n", ret); + ret = HH_OK; + goto mrwm_unregister_err; + } + + udav_virt_addr = uldm->uldm_pd_table[pd].u1.pd.udav_table_ddr_virt_addr; + udav_phys_addr = uldm->uldm_pd_table[pd].u1.pd.udav_table_ddr_phys_addr; + udav_size = uldm->uldm_pd_table[pd].u1.pd.udav_table_size; + + MTL_DEBUG4("THH_uldm_free_pd: PD %d: udav_phys_addr =" PHYS_ADDR_FMT ", udav_virt_addr = " VIRT_ADDR_FMT ", udav_size = "SIZE_T_FMT"\n", + pd, udav_phys_addr, udav_virt_addr, udav_size); + /* If UDAV was allocated in DDR, free up the DDR memory here */ + if (udav_phys_addr != (MT_phys_addr_t) 0) { + ret = THH_hob_get_ddrmm(uldm->hob,&ddrmm); + if (ret != HH_OK) { + MTL_ERROR1("THH_uldm_free_pd: could not acquire DDRMM handle (%d)\n", ret); + ret = HH_OK; + goto udavm_get_ddrmm_err; + } + + if ((res = MOSAL_unmap_phys_addr( uldm->uldm_pd_table[pd].u1.pd.prot_ctx, + (MT_virt_addr_t) udav_virt_addr, + udav_size )) != MT_OK) { + MTL_ERROR1("THH_uldm_free_pd: MOSAL_unmap_phys_addr error for udavm:%d\n", res); + ret = HH_OK; + goto unmap_phys_addr_err; + } + ret = THH_ddrmm_free(ddrmm, udav_phys_addr, udav_size); + if (ret != HH_OK) { + MTL_ERROR1("THH_uldm_free_pd: THH_ddrmm_free error (%d)\n", ret); + ret = HH_OK; + goto ddrmm_free_err; + } + } + } + ret = HH_OK; + goto ok_retn; + +ddrmm_free_err: + unmap_phys_addr_err: +udavm_get_ddrmm_err: +mrwm_unregister_err: +udavm_get_mwrm_err: +ok_retn: + /* make resource inaccessible */ + uldm->uldm_pd_table[pd].valid = FALSE; + epool_free(&(uldm->pd_list), pd); + +get_udavm_info_err: +err_retn: + MTL_DEBUG4("<== THH_uldm_free_pd. ret = %d\n", ret); + return ret; +} + +/****************************************************************************** + * Function: THH_uldm_get_protection_ctx + *****************************************************************************/ +HH_ret_t THH_uldm_get_protection_ctx( /*IN*/ THH_uldm_t uldm, + /*IN*/ HH_pd_hndl_t pd, + /*OUT*/ MOSAL_protection_ctx_t *prot_ctx_p ) +{ + HH_ret_t ret = HH_OK; + + MTL_DEBUG4("==> THH_uldm_get_protection_ctx. uldm = %p, pd = %d, prot_ctx_p = %p\n", + (void *) uldm, pd, (void *)prot_ctx_p); + if (uldm == (THH_uldm_t)THH_INVALID_HNDL) { + MTL_ERROR1("THH_uldm_get_protection_ctx: ERROR : HCA device has not yet been opened\n"); + ret = HH_EINVAL; + goto err_retn; + } + + /* protect against bad calls */ + if (uldm == (THH_uldm_t)0) { + MTL_ERROR1("THH_uldm_get_protection_ctx: ERROR : uldm handle is ZERO\n"); + ret = HH_EINVAL; + goto err_retn; +} + + if (pd >= uldm->max_pd ) { + MTL_ERROR1("THH_uldm_get_protection_ctx: pd out of range (%d)\n", pd); + ret = HH_EINVAL; + goto err_retn; + } + + if (!(uldm->uldm_pd_table[pd].valid)) { + if (pd != THH_RESERVED_PD) { + MTL_ERROR1("THH_uldm_get_protection_ctx: pd was not allocated (%d)\n", pd); + } + ret = HH_EINVAL; + goto err_retn; + } + + *prot_ctx_p = uldm->uldm_pd_table[pd].u1.pd.prot_ctx; +#ifndef __DARWIN__ + MTL_DEBUG4("THH_uldm_get_protection_ctx. ctx = %d\n", *prot_ctx_p); +#else + MTL_DEBUG4("THH_uldm_get_protection_ctx\n"); +#endif + +err_retn: + MTL_DEBUG4("<== THH_uldm_get_protection_ctx. ret = %d\n", ret); + return ret; +} + +HH_ret_t THH_uldm_get_num_objs( /*IN*/ THH_uldm_t uldm, u_int32_t *num_alloc_us_res_p, + u_int32_t *num_alloc_pds_p) +{ + u_int32_t i; + u_int32_t alloc_res = 0; + + MTL_DEBUG4("==> THH_uldm_get_num_objs\n"); + if ((uldm == (THH_uldm_t)THH_INVALID_HNDL) || + (uldm == (THH_uldm_t)0) || + (!num_alloc_us_res_p && !num_alloc_pds_p)){ + MTL_ERROR1("THH_uldm_get_num_objs: Invalid uldm handle, or all return params are NULL\n"); + return HH_EINVAL; + } + + for (i=0; i< uldm->max_uar; i++) { + if (uldm->uldm_uar_table[i].valid) { + /* is uar valid check is within this function */ + alloc_res++; + } + } + if (num_alloc_us_res_p) { + *num_alloc_us_res_p=alloc_res; + } + + alloc_res = 0; + for (i=0; i< uldm->max_pd; i++) { + if (uldm->uldm_pd_table[i].valid) { + /* is uar valid check is within this function */ + alloc_res++; + } + } + if (num_alloc_pds_p) { + *num_alloc_pds_p=alloc_res; + } + + return HH_OK; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.h new file mode 100644 index 00000000..e768a478 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm.h @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THH_ULDM_H +#define H_THH_ULDM_H + +#include +#include +#include +#include +#include + + +/****************************************************************************** + * Function: THH_uldm_create + * + * Arguments: + * hob - The THH_hob object in which this object will be included + * uar_base - Physical base address of UARs (address of UAR0) + * log2_max_uar - Log2 of number of UARs (including 0 and 1) + * log2_uar_pg_sz - UAR page size as set in INIT_HCA + * max_pd - Maximum PDs allowed to be allocated + * uldm_p - Allocated THH_uldm object + + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * HH_EAGAIN - Not enough resources for creating this object + * + * Description: + * Create object context which manages the UAR and PD resources. + */ +HH_ret_t THH_uldm_create( /*IN*/ THH_hob_t hob, + /*IN*/ MT_phys_addr_t uar_base, + /*IN*/ u_int8_t log2_max_uar, + /*IN*/ u_int8_t log2_uar_pg_sz, + /*IN*/ u_int32_t max_pd, + /*OUT*/ THH_uldm_t *uldm_p ); + +/****************************************************************************** + * Function: THH_uldm_destroy + * + * Arguments: + * uldm - THH_uldm object to destroy + + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * + * Description: + * Free resources of the THH_uldm object. + */ +HH_ret_t THH_uldm_destroy( /*IN*/ THH_uldm_t uldm ); + +/************************************************************************* + * Function: THH_uldm_alloc_ul_res + * + * Arguments: + * uldm + * prot_ctx - Protection context of user level + * hca_ul_resources_p - Returned user level resources + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * HH_EAGAIN - No available resources to allocate + * + * Description: + * Allocate user level resources (UAR). + */ +HH_ret_t THH_uldm_alloc_ul_res( /*IN*/ THH_uldm_t uldm, + /*IN*/ MOSAL_protection_ctx_t prot_ctx, + /*OUT*/ THH_hca_ul_resources_t *hca_ul_resources_p ); + +/************************************************************************* + * Function: THH_uldm_free_ul_res + * + * Arguments: + * uldm + * hca_ul_resources_p - A copy of resources structure returned on + * resources allocation + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * + * Description: + * Free the resources allocated using THH_uldm_alloc_ul_res() + */ +HH_ret_t THH_uldm_free_ul_res( /*IN*/ THH_uldm_t uldm, + /*IN*/ THH_hca_ul_resources_t *hca_ul_resources_p); + +/************************************************************************* + * Function: THH_uldm_alloc_uar + * + * Arguments: + * uldm + * prot_ctx - User level protection context to map UAR to + * uar_index - Returned index of allocated UAR + * uar_map - Virtual address in user level context to which the + * allocated UAR is mapped + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * HH_EAGAIN - No available UAR + * + * Description: + * Allocate an available UAR and map it to user level memory space. + */ +HH_ret_t THH_uldm_alloc_uar( /*IN*/ THH_uldm_t uldm, + /*IN*/ MOSAL_protection_ctx_t prot_ctx, + /*OUT*/ u_int32_t *uar_index, + /*OUT*/ MT_virt_addr_t *uar_map ); + +/****************************************************************************** + * Function: THH_uldm_free_uar + * + * Arguments: + * uldm + * uar_index - Index of UAR to free + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * + * Description: + * Unmap given UAR from user level memory space and return it to free UARs pool. + */ +HH_ret_t THH_uldm_free_uar( /*IN*/ THH_uldm_t uldm, + /*IN*/ u_int32_t uar_index ); + +/****************************************************************************** + * Function: THH_uldm_alloc_pd + * + * Arguments: + * uldm + * prot_ctx - Protection context of user asking for this PD + * pd_ul_resources_p - Mostly UD AV table memory to register + * pd_p - Allocated PD handle + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * HH_EAGAIN - No free PDs to allocate + * + * Description: + * Allocate a PD for given protection context. + */ +HH_ret_t THH_uldm_alloc_pd( /*IN*/ THH_uldm_t uldm, + /*IN*/ MOSAL_protection_ctx_t prot_ctx, + /*IN*/ THH_pd_ul_resources_t *pd_ul_resources_p, + /*OUT*/ HH_pd_hndl_t *pd_p ); + +/****************************************************************************** + * Function: THH_uldm_free_pd + * + * Arguments: + * uldm + * pd - PD to free + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * + * Description: + * Free the PD. + */ +HH_ret_t THH_uldm_free_pd( /*IN*/ THH_uldm_t uldm, + /*IN*/ HH_pd_hndl_t pd ) ; + +/****************************************************************************** + * Function: THH_uldm_get_protection_ctx + * + * Arguments: + * uldm + * pd - PD for which the protection context is required + * prot_ctx_p - Returned protection context + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter + * + * Description: + * This function returns the protection context associated with a PD. It is used by + * THH_mrwm_register_internal() in memory locking and mapping of WQE buffers to physical + * pages. (the mrwm is given a PD handle, and needs to retrieve the associated protection + * context). + */ +HH_ret_t THH_uldm_get_protection_ctx( /*IN*/ THH_uldm_t uldm, + /*IN*/ HH_pd_hndl_t pd, + /*OUT*/ MOSAL_protection_ctx_t *prot_ctx_p ); + +/****************************************************************************** + * Function: THH_uldm_get_num_objs + * + * Arguments: + * uldm + * num_alloc_us_res_p - allocated resource count + * num_alloc_pds_p - allocated PDs count + * + * Returns: + * HH_OK + * HH_EINVAL - Invalid parameter (bad handle, or both return value ptrs are NULL + * + * Description: + * For debugging -- returns allocated resource count and/or number of allocated PDs. + * either num_alloc_us_res_p or num_alloc_pds_p (but not both) may be NULL; + */ +HH_ret_t THH_uldm_get_num_objs( /*IN*/ THH_uldm_t uldm, u_int32_t *num_alloc_us_res_p, + u_int32_t *num_alloc_pds_p); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm_priv.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm_priv.h new file mode 100644 index 00000000..a9e7b868 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/uldm/thh_uldm_priv.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_THH_ULDM_PRIV_H +#define H_THH_ULDM_PRIV_H + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct __THH_uldm_epool_st { + unsigned long next; + unsigned long prev; +} __THH_uldm_epool_t; + +typedef struct __THH_uldm_uar_data_st { + MOSAL_protection_ctx_t prot_ctx; + MT_virt_addr_t virt_addr; +} __THH_uldm_uar_data_t; + + +typedef struct __THH_uldm_pd_data_st { + MOSAL_protection_ctx_t prot_ctx; + VAPI_lkey_t lkey; /* for freeing non-privileged UDAV table with a PD */ + MT_virt_addr_t udav_table_ddr_virt_addr; + MT_phys_addr_t udav_table_ddr_phys_addr; + MT_size_t udav_table_size; +} __THH_uldm_pd_data_t; + +typedef struct THH_uldm_uar_entry_st { + union { + __THH_uldm_epool_t epool; + __THH_uldm_uar_data_t uar; + } u1; + u_int8_t valid; +} THH_uldm_uar_entry_t; + +typedef struct THH_uldm_pd_entry_st { + union { + __THH_uldm_epool_t epool; + __THH_uldm_pd_data_t pd; + } u1; + u_int8_t valid; +} THH_uldm_pd_entry_t; + +typedef struct THH_uldm_st { + THH_hob_t hob; /* saved during uldm create */ + MT_phys_addr_t uar_base; + u_int8_t log2_max_uar; + u_int32_t max_uar; + u_int8_t log2_uar_pg_sz; + u_int32_t max_pd; + THH_uldm_uar_entry_t *uldm_uar_table; + THH_uldm_pd_entry_t *uldm_pd_table; + EPool_t uar_list; + EPool_t pd_list; + EPool_meta_t uar_meta; + EPool_meta_t pd_meta; +} THH_uldm_obj_t; + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.c new file mode 100644 index 00000000..5f71f048 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.c @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include + +typedef unsigned long Index_t; + +#if EPOOL_FIFO +#define EPOOL_STATE_DEBUG4(s, epool, index) \ + MTL_DEBUG4("%s: %p index=%lu allocs=%lu deallocs=%lu head=%lu tail=%lu\n", \ + s, epool, index, epool->allocations, epool->deallocations, \ + epool->head, epool->tail) +#else +#define EPOOL_STATE_DEBUG4(s, epool, index) \ + MTL_DEBUG4("%s: %p index=%lu allocs=%lu deallocs=%lu head=%lu\n", \ + s, epool, index, epool->allocations, epool->deallocations, \ + epool->head) +#endif + +static Index_t get_next(EPool_t* epool, Index_t index) { + Index_t next; + + if (index >= epool->size) { + MTL_ERROR1("epool get_next invalid parameter epool=%p index=%lu\n", epool, index); + return EPOOL_NULL; + } + next = epool->list[index]; + MTL_DEBUG4("get_next ep=%p index=%lu list=%p next=%lu\n", + epool, index, epool->list, next); + + if ((next != EPOOL_NULL) && (next >= epool->size)) { + MTL_ERROR1("epool get_next corruption epool=%p index=%lu next=%lu\n", epool, index, next); + return EPOOL_NULL; + } + return next; +} + +static void set_next(EPool_t* epool, Index_t index, Index_t next) { + if (index >= epool->size) { + MTL_ERROR1("epool set_next invalid parameter epool=%p index=%lu\n", epool, index); + return; + } + if ((next != EPOOL_NULL) && (next >= epool->size)) { + MTL_ERROR1("epool set_next invalid parameter epool=%p index=%lu next=%lu\n", epool, index, next); + return; + } + + epool->list[index] = next; + MTL_DEBUG4("set_next ep=%p index=%lu list=%p next=%lu\n", + epool, index, epool->list, next); +} + +/************************************************************************/ +/************************************************************************/ +/* interface functions */ + +/************************************************************************/ +void epool_init(EPool_t* epool) { + unsigned int i; + + MTL_TRACE1(MT_FLFMT("{ epool_init: epool=%p size=0x%lx"), epool, epool->size); + MOSAL_spinlock_init(&epool->spinlock); + epool->head = EPOOL_NULL; +#if EPOOL_FIFO + epool->tail = EPOOL_NULL; +#endif + epool->allocations = 0; + epool->deallocations = 0; + i = epool->size * sizeof(Index_t); + + if (i > PAGE_SIZE) { + epool->list = VMALLOC(i); + } else + epool->list = MALLOC(i); + + if (epool->list == NULL) { + MTL_ERROR1("%s memory allocation failed size=%d\n", __func__, i); + return; + } + for(i = epool->size; i;) { + epool_free(epool, --i); + } + // reset this for consistency, since epool_free incremented it + epool->deallocations = 0; + MTL_TRACE1(MT_FLFMT("} epool_init: epool=%p"), epool); +} + +/************************************************************************/ +void epool_cleanup(EPool_t* epool) { + if (epool->list != NULL) { + if (epool->size * sizeof(Index_t) > PAGE_SIZE) { + VFREE(epool->list); + } else + FREE(epool->list); + epool->list = NULL; + } +} + +/************************************************************************/ +Index_t epool_alloc(EPool_t* epool) { + Index_t index; + + MTL_TRACE4(MT_FLFMT("{ epool_alloc: epool=%p"), epool); + + MOSAL_spinlock_dpc_lock(&epool->spinlock); + if ((index = epool->head) != EPOOL_NULL) { + epool->head = get_next(epool, index); + ++epool->allocations; + } + MOSAL_spinlock_unlock(&epool->spinlock); + EPOOL_STATE_DEBUG4("epool_alloc", epool, index); + return index; +} + +/************************************************************************/ +void epool_free(EPool_t* epool, Index_t index) { + +#if EPOOL_FIFO + set_next(epool, index, EPOOL_NULL); + MOSAL_spinlock_dpc_lock(&epool->spinlock); + if (epool->head == EPOOL_NULL) { + epool->head = index; + } else { + Index_t check; + check = get_next(epool, epool->tail); + if (check != EPOOL_NULL) { + MTL_ERROR1("epool_free check next failed tail=%lu check=%lu\n", epool->tail, check); + MTL_ERROR1("epool_free ignoring bad tail for now"); + } + + set_next(epool, epool->tail, index); + check = get_next(epool, epool->tail); + if (check != index) { + MTL_ERROR1("epool_free check tail failed tail=%lu check=%lu\n", epool->tail, check); + MTL_ERROR1("epool_free ignoring bad set for now"); + } + } + epool->tail = index; +#else /* EPOOL_FIFO */ + MOSAL_spinlock_dpc_lock(&epool->spinlock); + set_next(epool, index, epool->head); + epool->head = index; +#endif /* EPOOL_FIFO */ + ++epool->deallocations; + MOSAL_spinlock_unlock(&epool->spinlock); + EPOOL_STATE_DEBUG4("epool_free", epool, index); +} + +/************************************************************************/ +// Note on failure returns number not reserved, however no indication +// of which were actually reserved. Ok for now, callers mostly ignore +// return value and always reserve immediately after init +unsigned long epool_reserve(EPool_t *epool, unsigned long start_index, unsigned long res_size) { +#if EPOOL_FIFO + unsigned long i; + unsigned long j; + unsigned long unreserved = 0; + Index_t index; + MT_bool found; + + MTL_DEBUG4(MT_FLFMT("{ epool_reserve: epool=%p, start=0x%lx, sz=0x%lx"), + epool, start_index, res_size); + for(i = start_index; i < start_index + res_size; ++i) { + found = FALSE; + for(j = 0; j < epool->size; ++j) { + index = epool_alloc(epool); + if (index == EPOOL_NULL) + break; + if (index == i) { + found = TRUE; + break; + } + epool_free(epool, index); // take advantage of fifo behavior of list now + } + if (!found) { + ++unreserved; + MTL_ERROR1("epool reserve failure epool=%p index=%lu\n", epool, i); + } + } + MTL_DEBUG4(MT_FLFMT("} epool_reserve unreserved=%lu"), unreserved); + return unreserved; +#else /* EPOOL_FIFO */ + Index_t end_index = start_index + res_size; + Index_t index; + Index_t prev; + + MTL_DEBUG4(MT_FLFMT("{ epool_reserve: epool=%p, start=0x%lx, sz=0x%lx"), + epool, start_index, res_size); + MOSAL_spinlock_dpc_lock(&epool->spinlock); + index = epool->head; + prev = EPOOL_NULL; // indicates at head + while ((index != EPOOL_NULL) && (res_size)) { + if ((start_index <= index) && (index < end_index)) + { + // allocate index + if (prev == EPOOL_NULL) { + epool->head = get_next(epool, index); + } else { + set_next(epool, prev, get_next(epool, index)); + } + ++epool->allocations; + EPOOL_STATE_DEBUG4("epool_reserve", epool, index); + index = get_next(epool, index); + } else { + // skip index + prev = index; + index = get_next(epool, index); + } + } + MOSAL_spinlock_unlock(&epool->spinlock); + MTL_DEBUG4(MT_FLFMT("} epool_reserve unreserved=%lu"), res_size); + return res_size; +#endif /* EPOOL_FIFO */ +} + +/************************************************************************/ +void epool_unreserve(EPool_t *epool, unsigned long start_index, unsigned long res_size) { + MTL_DEBUG4(MT_FLFMT("{ epool_unreserve: epool=%p, start=0x%lx, sz=0x%lx"), + epool, start_index, res_size); + + while(res_size--) { + epool_free(epool, start_index + res_size); + } + MTL_DEBUG4(MT_FLFMT("} epool_unreserve")); +} diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.h new file mode 100644 index 00000000..6fe35d87 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/epool.h @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(_EPOOL_H) +#define _EPOOL_H + +#if !defined(EPOOL_TEST) +# include +#endif + + +#define EPOOL_NULL ((const u_int32_t) ~0ul) + +#define EPOOL_FIFO 0 + +typedef struct +{ + /* all in bytes */ + // these fields are no longer used, but are retained for backward + // compatibility with existing callers of epool functions + unsigned int entry_size; /* size of user entry structure */ + /* offsets within user entry to: */ + unsigned int prev_struct_offset; /* unsigned long 'prev' index */ + unsigned int next_struct_offset; /* unsigned long 'next' index */ +} EPool_meta_t; + +typedef struct +{ + void* entries; /* allocated by user */ + unsigned long size; /* number of entries */ + const EPool_meta_t* meta; /* can be shared */ + unsigned long head; /* used internally */ +#if EPOOL_FIFO + unsigned long tail; /* used internally */ +#endif + unsigned long *list; + unsigned long allocations; + unsigned long deallocations; + MOSAL_spinlock_t spinlock; /* used internally */ +} EPool_t; + +#ifdef __cplusplus + extern "C" { +#endif + +/* Notes: + * + * + The complexity of + * epool_alloc(.), epool_free(..) + * is constant O(1). + * + The complexity of epool_reserve(...) is + * O(res_size) where n is the size of the free list + * + The complexity of epool_unreserve(...) is O(res_size). + */ + +/* Function: epool_init + * Arguments: + * l - EPool_t to initialize. User is reponsible to pre-allocate + * l->entries, and set l->size and l->meta accordingly. + * Return: + * None. + */ +extern void epool_init(EPool_t* l); + +/* Function: epool_cleanup + * Arguments: + * l - EPool_t to clean. Note that entries are not freed. + * Return: + * None. + * Currently does noting. + */ +extern void epool_cleanup(EPool_t* l); + +/* Function: epool_alloc + * Arguments: + * l - EPool_t to get a free index from. + * Return: + * An index within l->entries, or EPOOL_NULL. + */ +extern unsigned long epool_alloc(EPool_t* l); /* EPOOL_NULL iff fail */ + +/* Function: epool_free + * Arguments: + * l - EPool_t to free an index to. + * index - Index to free. + * Return: + * None. + */ +extern void epool_free(EPool_t* l, unsigned long index); + +/* Function: epool_reserve + * Arguments: + * l - EPool_t from which to reserve range of indices. + * start_index - start of indices range. + * size - size of range. + * Return: + * 0 for success, or number of un-reserved indices. + */ +extern unsigned long epool_reserve( + EPool_t* l, + unsigned long start_index, + unsigned long res_size); + +/* Function: epool_unreserve + * Arguments: + * l - EPool_t from which to unreserve range of indices. + * start_index - start of indices range. + * size - size of range. + * Return: + * None. + * + * Notes: It is the responsiblity of the caller to provide valid range. + */ +extern void epool_unreserve( + EPool_t* l, + unsigned long start_index, + unsigned long res_size); + +#ifdef __cplusplus + } +#endif + +#endif /* _EPOOL_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.c new file mode 100644 index 00000000..fb58677c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.c @@ -0,0 +1,625 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "extbuddy.h" +#include +#include + +typedef struct Extbuddy_st +{ + u_int8_t log2_min; + u_int8_t log2_max; + VIP_hashp_p_t freelist[32]; + u_int32_t available_size; +} Extbuddy_t; + +typedef struct +{ + unsigned int curr_size; + unsigned int ptrs_buff_sz; + u_int32_t* ptrs_buff; +} Query_Filler; + + +typedef struct +{ + u_int32_t boundary_begin; + u_int32_t boundary_end; + u_int32_t target_size; + u_int32_t* pp; /* address of desired pointer to return*/ + u_int32_t chunk_start; + u_int8_t chunk_log2sz; + u_int32_t chunk_size; /* 1ul << chunk_log2sz --- cahced */ +} Bound_Ptr; + + +/* The following structure serves the reserve routines callbacks. + * It is designed for simple cached values rather than compactness. + */ +typedef struct +{ + Extbuddy_t* xbdy; + u_int32_t res_begin; + u_int32_t res_size; + u_int32_t res_end; /* = begin + size */ + u_int32_t total_intersection; /* should reach size */ + unsigned long n_segments[32]; + unsigned long n_segments_total; + u_int32_t* currseg; + u_int32_t* currseg_end; + int8_t curr_log2sz; +} Reserve_Segments; + + + +/************************************************************************/ +/************************************************************************/ +/* private functions */ + + +/************************************************************************/ +/* $$ [a_0,a_1) \cap [b_0,b_1) $$ %% (TeX format) */ +/* Note the half open segments: $ [m,n) = [m, n-1] $ */ +static MT_bool intersects( + u_int32_t a0, u_int32_t a1, + u_int32_t b0, u_int32_t b1 +) +{ + return ((b0 < a1) && (a0 < b1)); +} /* intersects */ + + +/************************************************************************/ +static u_int32_t intersection_size( + u_int32_t a0, u_int32_t a1, + u_int32_t b0, u_int32_t b1 +) +{ + u_int32_t x = (intersects(a0, a1, b0, b1) + ? (a1 < b1 ? a1 : b1) - (a0 < b0 ? b0 : a0) /* min(right) - max(left) */ + : 0); + return x; +} /* intersection_size */ + + +/************************************************************************/ +/* Fix given log2_sz to minimal size */ +static u_int8_t fix_log2(Extbuddy_t* xbdy, u_int8_t* log2_sz_p) +{ + if (*log2_sz_p < xbdy->log2_min) + { + *log2_sz_p = xbdy->log2_min; + } + return *log2_sz_p; +} /* fix_log2 */ + + +/************************************************************************/ +static MT_bool create_lists(Extbuddy_t* xbdy) +{ + MT_bool ok = TRUE; + int clg2; + for (clg2 = xbdy->log2_min; ok && (clg2 <= xbdy->log2_max); ++clg2) + { + if (VIP_hashp_create(0, &(xbdy->freelist[clg2])) != VIP_OK) + { + ok = FALSE; + } + } + return ok; +} /* create_lists */ + + +/************************************************************************/ +static void destroy_lists(Extbuddy_t* xbdy) +{ + int clg2; + for (clg2 = xbdy->log2_min; clg2 <= xbdy->log2_max; ++clg2) + { + VIP_hashp_p_t ph = xbdy->freelist[clg2]; + if (ph) + { + (void)VIP_hashp_destroy(ph, 0, 0); + xbdy->freelist[clg2] = NULL; + } + } +} /* destroy_lists */ + + +/************************************************************************/ +static MT_bool init_lists(Extbuddy_t* xbdy) +{ + MT_bool ok = create_lists(xbdy); + MTL_DEBUG4(MT_FLFMT("ok=%d"), ok); + if (ok) + { + int chunk = xbdy->log2_max; + u_int32_t chunk_size = 1ul << chunk; + u_int32_t offset = 0, next = offset + chunk_size; + while (ok && (next <= xbdy->available_size) && (chunk > xbdy->log2_min)) + { + //MTL_DEBUG4(MT_FLFMT("chunk=%d, offset=0x%x"), chunk, offset); + ok = (VIP_hashp_insert(xbdy->freelist[chunk], offset, 0) == VIP_OK); + offset = next; + do + { + --chunk; + chunk_size >>= 1; + next = offset + chunk_size; + } while ((next > xbdy->available_size) && (chunk > xbdy->log2_min)); + } + /* sub divide the rest to the segments of minimum chunks */ + while (ok && (next <= xbdy->available_size)) + { + offset = next; + //MTL_DEBUG4(MT_FLFMT("chunk=%d, offset=0x%x"), chunk, offset); + ok = (VIP_hashp_insert(xbdy->freelist[chunk], offset, 0) == VIP_OK); + next += chunk_size; + } + } + if (!ok) + { + destroy_lists(xbdy); + } + return ok; +} /* init_lists */ + + +/************************************************************************/ +static int get1p(VIP_hashp_key_t key, VIP_hashp_value_t val, void* vp) +{ + u_int32_t p = key; + u_int32_t* pp = (u_int32_t*)vp; + *pp = p; + return 0; /* just one call, and stop traverse */ +} /* get1p */ + + +/************************************************************************/ +static int get1p_bound(VIP_hashp_key_t key, VIP_hashp_value_t val, void* vp) +{ + u_int32_t p = key; + Bound_Ptr* bp = (Bound_Ptr*)vp; + u_int32_t xsize = intersection_size(bp->boundary_begin, bp->boundary_end, + p, p + bp->chunk_size); + int found = (bp->target_size <= xsize); + if (found) + { + *bp->pp = (p < bp->boundary_begin ? bp->boundary_begin : p); + bp->chunk_start = p; + } + return !found; +} /* get1p_bound */ + + +/************************************************************************/ +static int query_fill(VIP_hashp_key_t key, VIP_hashp_value_t val, void* vp) +{ + u_int32_t p = key; + Query_Filler* filler_p = (Query_Filler*)vp; + int go_on = (filler_p->curr_size < filler_p->ptrs_buff_sz); + if (go_on) + { + filler_p->ptrs_buff[filler_p->curr_size++] = p; + /* go_on = (filler_p->curr_size < filler_p->ptrs_buff_sz);, no real need */ + } + return go_on; +} /* query_fill */ + + +/************************************************************************/ +/* Reserve related functions */ + + +/************************************************************************/ +static int reserve_pass1(VIP_hashp_key_t key, VIP_hashp_value_t val, void* vp) +{ + MT_bool more; + u_int32_t p = key; + Reserve_Segments* rs = (Reserve_Segments*)vp; + u_int8_t log2sz = rs->curr_log2sz; + u_int32_t xsize = intersection_size(rs->res_begin, rs->res_end, + p, p + (1ul << log2sz)); + if (xsize != 0) + { + rs->total_intersection += xsize; + ++rs->n_segments_total; + ++rs->n_segments[log2sz]; + } + more = (rs->total_intersection < rs->res_size); + return more; +} /* reserve_pass1 */ + + +/************************************************************************/ +static int reserve_pass2(VIP_hashp_key_t key, VIP_hashp_value_t val, void* vp) +{ + MT_bool more; + u_int32_t p = key; + Reserve_Segments* rs = (Reserve_Segments*)vp; + u_int8_t log2sz = rs->curr_log2sz; + if (intersects(rs->res_begin, rs->res_end, p, p + (1ul << log2sz))) + { + *rs->currseg++ = p; + } + more = (rs->currseg != rs->currseg_end); + return more; +} /* reserve_pass2 */ + + +/************************************************************************/ +/* Recursive. Maximal depth is by log2sz */ +static MT_bool reserve_breaks_insert( + Reserve_Segments* rs, + u_int32_t p, + u_int8_t log2sz +) +{ + MT_bool ok = TRUE; + MTL_DEBUG4(MT_FLFMT("{breaks_insert: p=0x%x, log2sz=%d"), p, log2sz); + if (log2sz > rs->xbdy->log2_min) + { + unsigned int i2 = 0; + u_int32_t half = (1ul << --log2sz); + for (i2 = 0; i2 != 2; ++i2, p += half) + { + u_int32_t xsize = intersection_size(rs->res_begin, rs->res_end, + p, p + half); + // MTL_DEBUG4(MT_FLFMT("p=0x%x, xsize=0x%x"), p, xsize); + if (xsize == 0) /* we can use the whole half */ + { + VIP_common_ret_t + vrc = VIP_hashp_insert(rs->xbdy->freelist[log2sz], p, 0); + ok = (vrc == VIP_OK); + if (ok) + { + rs->xbdy->available_size += 1ul << log2sz; + } + } + else + { + if (xsize != half) /* we can use part of the half */ + { + ok = reserve_breaks_insert(rs, p, log2sz) && ok; /* recursive */ + //MTL_DEBUG4(MT_FLFMT("ok=%d"), ok); + } + } + } + } + MTL_DEBUG4(MT_FLFMT("}breaks_insert ok=%d"), (int)ok); + return ok; +} /* reserve_breaks_insert */ + + +/************************************************************************/ +static MT_bool reserve_break(Reserve_Segments* rs) +{ + MT_bool ok = TRUE; + u_int32_t p = *rs->currseg; + u_int8_t log2sz = rs->curr_log2sz; + ok = (VIP_hashp_erase(rs->xbdy->freelist[log2sz], p, 0) == VIP_OK); + if (ok) + { + rs->xbdy->available_size -= 1ul << log2sz; + ok = reserve_breaks_insert(rs, p, log2sz); + } + return ok; +} /* reserve_break */ + + +/************************************************************************/ +/************************************************************************/ +/* interface functions */ + + +/************************************************************************/ +Extbuddy_hndl extbuddy_create(u_int32_t size, u_int8_t log2_min_chunk) +{ + u_int8_t log2_max = floor_log2(size); + Extbuddy_t* xbdy = NULL; + if (log2_min_chunk <= log2_max) + { + xbdy = TMALLOC(Extbuddy_t); + } + if (xbdy) + { + u_int32_t mask = (1ul << log2_min_chunk) - 1; + memset(xbdy, 0, sizeof(Extbuddy_t)); /* in particular, null hash lists */ + xbdy->log2_min = log2_min_chunk; + xbdy->log2_max = log2_max; + xbdy->available_size = size & ~mask; + MTL_DEBUG4(MT_FLFMT("log2_min=%u, log2_max=%u, avail_size=%u"), + xbdy->log2_min, xbdy->log2_max,xbdy->available_size); + if (!init_lists(xbdy)) + { + FREE(xbdy); + xbdy = NULL; + } + } + return xbdy; +} /* extbuddy_create */ + + +/************************************************************************/ +void extbuddy_destroy(Extbuddy_hndl xbdy) +{ + destroy_lists(xbdy); + FREE(xbdy); +} /* extbuddy_destroy */ + + +/************************************************************************/ +u_int32_t extbuddy_alloc(Extbuddy_hndl xbdy, u_int8_t log2_sz) +{ + u_int32_t p = EXTBUDDY_NULL; + u_int32_t log2_max = xbdy->log2_max; + u_int8_t sz = fix_log2(xbdy, &log2_sz); + for ( ; (sz <= log2_max) && !extbuddy_chunks_available(xbdy, sz); ++sz); + if (sz <= xbdy->log2_max) /* we found a sufficient chunk */ + { + u_int8_t split_sz = log2_sz; + VIP_hashp_traverse(xbdy->freelist[sz], &get1p, &p); + VIP_hashp_erase(xbdy->freelist[sz], p, 0); /* can't fail */ + + /* If bigger than we need, we split chunk of 2^{split_sz} by halves, + * inserting chunks to free lists. + */ + while (split_sz != sz) + { + u_int32_t split = p + (1ul << split_sz); + if (VIP_hashp_insert(xbdy->freelist[split_sz], split, 0) != VIP_OK) + { + xbdy->available_size -= (1ul << split_sz); /* :)bad:(. */ + } + ++split_sz; + } + xbdy->available_size -= (1ul << log2_sz); + } + return p; +} /* extbuddy_alloc */ + + +/************************************************************************/ +u_int32_t extbuddy_alloc_bound( + Extbuddy_t* xbdy, + u_int8_t log2_sz, + u_int32_t area_start, + u_int32_t area_size +) +{ + u_int32_t p = EXTBUDDY_NULL; + u_int32_t log2_max = xbdy->log2_max; + u_int8_t sz = fix_log2(xbdy, &log2_sz); + u_int32_t target_size = (1ul << log2_sz); + Bound_Ptr bp; + + // Align the boundary beginning to the target size */ + bp.boundary_begin = area_start & ~(target_size - 1); + if (bp.boundary_begin != area_start) { bp.boundary_begin += target_size; } + bp.boundary_end = area_start + area_size; + bp.target_size = 1ul << log2_sz; + bp.pp = &p; + + for (; (p == EXTBUDDY_NULL) && (sz <= log2_max); ++sz) + { + if (extbuddy_chunks_available(xbdy, sz) != 0) + { + bp.chunk_log2sz = sz; + bp.chunk_size = 1ul << sz; + VIP_hashp_traverse(xbdy->freelist[sz], &get1p_bound, &bp); + if (p != EXTBUDDY_NULL) + { + u_int32_t buddy = p; + u_int8_t split_sz = log2_sz; + VIP_hashp_erase(xbdy->freelist[sz], bp.chunk_start, 0); /* can't fail */ + + /* If bigger than we need, we split chunk of 2^{split_sz} by halves, + * inserting chunks to free lists. This is more complicated than + * the above (non bound) extbuddy_alloc(...), since we keep + * the returned p, which is not necessarily chunk_start. + * So we use the buddy trick again. + */ + while (split_sz != sz) + { + u_int32_t buddy_bit = 1ul << split_sz; + buddy ^= buddy_bit; + if (VIP_hashp_insert(xbdy->freelist[split_sz], buddy, 0) != VIP_OK) + { + xbdy->available_size -= (1ul << split_sz); /* :)bad:(. */ + } + buddy &= ~buddy_bit; + ++split_sz; + } + xbdy->available_size -= (1ul << log2_sz); + } + } + } + return p; +} /* extbuddy_alloc_bound */ + + +/************************************************************************/ +/* p is assumed to be alligned to 1< xbdy->log2_max) { + MTL_ERROR1(MT_FLFMT("extbuddy_free: slot too large: %d (max is %d"), slot, xbdy->log2_max); + return FALSE; + } + while ((slot <= xbdy->log2_max) && + VIP_hashp_erase(xbdy->freelist[slot], buddy, 0) == VIP_OK) + { + p &= ~(1ul << slot); /* unite with buddy */ + ++slot; + buddy = p ^ (1ul << slot); + } + ok = (VIP_hashp_insert(xbdy->freelist[slot], p, 0) == VIP_OK); + if (ok) + { + xbdy->available_size += (1ul << log2sz); + } + return ok; +} /* extbuddy_free */ + + +/************************************************************************/ +unsigned int extbuddy_chunks_available(Extbuddy_t* xbdy, u_int8_t log2sz) +{ + unsigned int n = 0; + if (xbdy->log2_min <= log2sz && log2sz <= xbdy->log2_max) + { + n = VIP_hashp_get_num_of_objects(xbdy->freelist[log2sz]); + } + return n; +} /* extbuddy_chunks_available */ + + +/************************************************************************/ +u_int32_t extbuddy_total_available(Extbuddy_t* xbdy) +{ + return xbdy->available_size; +} /* extbuddy_total_available */ + + +/************************************************************************/ +u_int8_t extbuddy_log2_max_available(Extbuddy_t* xbdy) +{ + u_int8_t log2max = -1; + u_int8_t chunk = xbdy->log2_max + 1; + while ((log2max == -1) && (--chunk >= (int)xbdy->log2_min)) + { + if (VIP_hashp_get_num_of_objects(xbdy->freelist[chunk]) > 0) + { + log2max = chunk; + } + } + return log2max; +} /* extbuddy_log2_max_available */ + + +/************************************************************************/ +void extbuddy_query_chunks( + Extbuddy_t* xbdy, + u_int8_t log2sz, + unsigned int ptrs_buff_sz, + u_int32_t* ptrs_buff +) +{ + Query_Filler filler; + filler.curr_size = 0; + filler.ptrs_buff_sz = ptrs_buff_sz; + filler.ptrs_buff = ptrs_buff; + VIP_hashp_traverse(xbdy->freelist[log2sz], &query_fill, &filler); +} /* extbuddy_query_chunks */ + + +/************************************************************************/ +/* We go through the free lists, break free segments that intersect the + * reserved area and insert the non reserved sub-segments into smaller + * chunk lists. + * + * Three passes: + * 1. See if reservation is possible and count the number of + * free segments involved. + * 2. Collect the involved segments. + * 3. Go thru the involved segments. + * Break each segment by calling a recursive function. + * + * Note that the first two passes traverse the underlying hash tables. + * These tables are not modified until the third pass. + */ +MT_bool extbuddy_reserve(Extbuddy_t* xbdy, u_int32_t p, u_int32_t size) +{ + MT_bool ok = TRUE; + Reserve_Segments rs; + u_int32_t* segs = NULL; + u_int8_t log2_min_used = xbdy->log2_max; + + memset(&rs, 0, sizeof(rs)); /* clumsy but simple initialization */ + rs.xbdy = xbdy; + rs.res_begin = p; + rs.res_size = size; + rs.res_end = p + size; + + /* 1st pass */ + MTL_DEBUG4(MT_FLFMT("1st pass")); + for (rs.curr_log2sz = xbdy->log2_max; + (rs.curr_log2sz >= xbdy->log2_min) && (rs.total_intersection < size); + --rs.curr_log2sz) + { + VIP_hashp_traverse(xbdy->freelist[rs.curr_log2sz], &reserve_pass1, &rs); + if (rs.n_segments[rs.curr_log2sz]) + { + log2_min_used = rs.curr_log2sz; + } + } + + + ok = (rs.total_intersection == size); + segs = (ok ? TNVMALLOC(u_int32_t, rs.n_segments_total) : NULL); + ok = (segs != NULL); + + if (ok) + { + /* 2nd pass */ + MTL_DEBUG4(MT_FLFMT("2nd pass")); + rs.currseg = segs; + for (rs.curr_log2sz = xbdy->log2_max; rs.curr_log2sz >= log2_min_used; + --rs.curr_log2sz) + { + if (rs.n_segments[rs.curr_log2sz]) + { + rs.currseg_end = rs.currseg + rs.n_segments[rs.curr_log2sz]; + VIP_hashp_traverse(xbdy->freelist[rs.curr_log2sz], &reserve_pass2, &rs); + } + } + + /* 3rd pass */ + MTL_DEBUG4(MT_FLFMT("3rd pass")); + rs.currseg = segs; + for (rs.curr_log2sz = xbdy->log2_max; rs.curr_log2sz >= log2_min_used; + --rs.curr_log2sz) + { + //MTL_DEBUG4(MT_FLFMT("curr_log2sz=%d"), rs.curr_log2sz); + rs.currseg_end = rs.currseg + rs.n_segments[rs.curr_log2sz]; + for (; rs.currseg != rs.currseg_end; ++rs.currseg) + { + ok = reserve_break(&rs) && ok; + } + } + } + + if (segs) { VFREE(segs); } + return ok; +} /* extbuddy_reserve */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.h new file mode 100644 index 00000000..2982a80d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/extbuddy.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(_ExtBuddy_H) +#define _ExtBuddy_H + +#include + +typedef struct Extbuddy_st* Extbuddy_hndl; +#define EXTBUDDY_NULL ((const u_int32_t) ~0ul) + +#ifdef __cplusplus + extern "C" { +#endif +extern Extbuddy_hndl extbuddy_create(u_int32_t size, u_int8_t log2_min_chunk); +extern void extbuddy_destroy(Extbuddy_hndl handle); +extern u_int32_t extbuddy_alloc(Extbuddy_hndl handle, u_int8_t log2_sz); +extern u_int32_t extbuddy_alloc_bound( + Extbuddy_hndl handle, + u_int8_t log2_sz, + u_int32_t area_start, + u_int32_t area_size + ); +extern MT_bool extbuddy_free( + Extbuddy_hndl handle, + u_int32_t p, + u_int8_t log2_sz); +extern unsigned int extbuddy_chunks_available( + Extbuddy_hndl handle, + u_int8_t log2_sz); +extern unsigned int extbuddy_total_available(Extbuddy_hndl handle); +extern u_int8_t extbuddy_log2_max_available(Extbuddy_hndl handle); +extern void extbuddy_query_chunks( + Extbuddy_hndl handle, + u_int8_t log2_sz, + unsigned int ptrs_buff_sz, + u_int32_t* ptrs_buff); + +/* reserve interface */ +extern MT_bool extbuddy_reserve( /* Only before any allocation */ + Extbuddy_hndl handle, + u_int32_t p, + u_int32_t size /* here, not a log2 */); + +#ifdef __cplusplus + } +#endif + + +#endif /* _ExtBuddy_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.c new file mode 100644 index 00000000..47574fcf --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.c @@ -0,0 +1,430 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "MyMemCopy" +//-------------------------------------------------------------------------------------------------- +static void MyMemCopy(u_int8_t *pDst,u_int8_t *pSrc, u_int16_t nLen) +{ +#ifndef MT_BIG_ENDIAN + for(pSrc+=nLen;nLen--;*(pDst++)=*(--pSrc)); +#else + for(;nLen--;*(pDst++)=*(pSrc++)); + +#endif +} +//-------------------------------------------------------------------------------------------------- + +void MadBufPrint( void *madbuf) +{ + int i; + u_int8_t *iterator; + iterator = (u_int8_t *)madbuf; + + MTL_DEBUG3("MadBufPrint START\n"); + for (i = 0; i < 16; i++) { + MTL_DEBUG3("%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", + *iterator, *(iterator+1), *(iterator+2), *(iterator+3), + *(iterator+4), *(iterator+5), *(iterator+6), *(iterator+7), + *(iterator+8), *(iterator+9), *(iterator+10), *(iterator+11), + *(iterator+12), *(iterator+13), *(iterator+14), *(iterator+15)); + iterator += 16; + } + MTL_DEBUG3("MadBufPrint END\n"); + +} +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "FieldToBuf" +//-------------------------------------------------------------------------------------------------- +static void FieldToBuf(u_int8_t cField, u_int8_t *pBuf, u_int16_t nOffset, u_int16_t nLen) +{ + pBuf[(nOffset>>3)] &= ~(((0x1<>3)] |= ((cField & ((0x1<>3)]>>(8-nLen-(nOffset & 0x7))) & ((0x1<>3)] &= ~(((0x1<cBaseVersion; + pSMPBuf[IB_SMP_DATA_START+1]=pNodeInfo->cClassVersion; + pSMPBuf[IB_SMP_DATA_START+2]=pNodeInfo->cNodeType; + pSMPBuf[IB_SMP_DATA_START+3]=pNodeInfo->cNumPorts; + //reserved 64 bit + memset(pSMPBuf+4,(u_int8_t)0,8); + memcpy(pSMPBuf+IB_SMP_DATA_START+12,(u_int8_t *)&(pNodeInfo->qwNodeGUID),8); + memcpy(pSMPBuf+IB_SMP_DATA_START+20,(u_int8_t *)&(pNodeInfo->qwPortGUID),8); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+28,(u_int8_t *)&(pNodeInfo->wPartCap),2); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+30,(u_int8_t *)&(pNodeInfo->wDeviceID),2); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+32,(u_int8_t *)&(pNodeInfo->dwRevision),4); + pSMPBuf[IB_SMP_DATA_START+36]=pNodeInfo->cLocalPortNum; + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+37,(u_int8_t *)&(pNodeInfo->dwVendorID),3); //REV will not work when BIG_ENDIAN + memset(pSMPBuf+40,(u_int8_t)0,216); +} +//-------------------------------------------------------------------------------------------------- + + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "NodeInfoMADToSt" +//-------------------------------------------------------------------------------------------------- +void NodeInfoMADToSt(SM_MAD_NodeInfo_t *pNodeInfo,u_int8_t *pSMPBuf) +{ + pNodeInfo->cBaseVersion=pSMPBuf[IB_SMP_DATA_START]; + pNodeInfo->cClassVersion=pSMPBuf[IB_SMP_DATA_START+1]; + pNodeInfo->cNodeType=(IB_node_type_t)pSMPBuf[IB_SMP_DATA_START+2]; + pNodeInfo->cNumPorts=pSMPBuf[IB_SMP_DATA_START+3]; + //reserved 64 bit + memcpy((u_int8_t *)&(pNodeInfo->qwNodeGUID),pSMPBuf+IB_SMP_DATA_START+12,8); + memcpy((u_int8_t *)&(pNodeInfo->qwPortGUID),pSMPBuf+IB_SMP_DATA_START+20,8); + MyMemCopy((u_int8_t *)&(pNodeInfo->wPartCap),pSMPBuf+IB_SMP_DATA_START+28,2); + MyMemCopy((u_int8_t *)&(pNodeInfo->wDeviceID),pSMPBuf+IB_SMP_DATA_START+30,2); + MyMemCopy((u_int8_t *)&(pNodeInfo->dwRevision),pSMPBuf+IB_SMP_DATA_START+32,4); + pNodeInfo->cLocalPortNum=pSMPBuf[IB_SMP_DATA_START+36]; + MyMemCopy((u_int8_t *)&(pNodeInfo->dwVendorID),pSMPBuf+IB_SMP_DATA_START+37,3); //REV will not work when BIG_ENDIAN +} +//-------------------------------------------------------------------------------------------------- + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "NodeInfoPrint" +//-------------------------------------------------------------------------------------------------- +void NodeInfoPrint(SM_MAD_NodeInfo_t *pNodeInfo) +{ + MTL_DEBUG3(MT_FLFMT("cBaseVersion: 0x%02X"),pNodeInfo->cBaseVersion); + MTL_DEBUG3(MT_FLFMT("cClassVersion: 0x%02X"),pNodeInfo->cClassVersion); + MTL_DEBUG3(MT_FLFMT("cNodeType: 0x%02X"),pNodeInfo->cNodeType); + MTL_DEBUG3(MT_FLFMT("cNumPorts: 0x%02X"),pNodeInfo->cNumPorts); + MTL_DEBUG3(MT_FLFMT("qwNodeGUID = 0x%02x%02x%02x%02x%02x%02x%02x%02x"), + pNodeInfo->qwNodeGUID[0],pNodeInfo->qwNodeGUID[1],pNodeInfo->qwNodeGUID[2],pNodeInfo->qwNodeGUID[3], + pNodeInfo->qwNodeGUID[4],pNodeInfo->qwNodeGUID[5],pNodeInfo->qwNodeGUID[6],pNodeInfo->qwNodeGUID[7]); //REV show all bytes + MTL_DEBUG3(MT_FLFMT("qwPortGUID = 0x%02x%02x%02x%02x%02x%02x%02x%02x"), + pNodeInfo->qwPortGUID[0],pNodeInfo->qwPortGUID[1],pNodeInfo->qwPortGUID[2],pNodeInfo->qwPortGUID[3], + pNodeInfo->qwPortGUID[4],pNodeInfo->qwPortGUID[5],pNodeInfo->qwPortGUID[6],pNodeInfo->qwPortGUID[7]); //REV show all bytes + MTL_DEBUG3(MT_FLFMT("wPartCap: 0x%04X"),pNodeInfo->wPartCap); + MTL_DEBUG3(MT_FLFMT("wDeviceID: 0x%04X"),pNodeInfo->wDeviceID); + MTL_DEBUG3(MT_FLFMT("dwRevision: 0x%08X"),pNodeInfo->dwRevision); + MTL_DEBUG3(MT_FLFMT("cLocalPortNum: 0x%02X"),pNodeInfo->cLocalPortNum); + MTL_DEBUG3(MT_FLFMT("dwVendorID: 0x%08X"),pNodeInfo->dwVendorID); +} +//-------------------------------------------------------------------------------------------------- + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "GUIDInfoMADToSt" +//-------------------------------------------------------------------------------------------------- +void GUIDInfoMADToSt(SM_MAD_GUIDInfo_t *pGuidTable,u_int8_t *pSMPBuf) +{ + u_int16_t i; + + for (i=0;i<8;i++) + memcpy((u_int8_t *)&(pGuidTable->guid[i]), pSMPBuf+IB_SMP_DATA_START + (i*8), 8); +} +//-------------------------------------------------------------------------------------------------- + + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "NodeDescPrint" +//-------------------------------------------------------------------------------------------------- +void GUIDInfoPrint(SM_MAD_GUIDInfo_t *pGuidTable) +{ + u_int16_t i; + + for (i=0;i<8;i++) { + MTL_DEBUG3(MT_FLFMT("GUID[%d] = 0x%02x%02x%02x%02x%02x%02x%02x%02x"), i, + pGuidTable->guid[i][0],pGuidTable->guid[i][1],pGuidTable->guid[i][2],pGuidTable->guid[i][3], + pGuidTable->guid[i][4],pGuidTable->guid[i][5],pGuidTable->guid[i][6],pGuidTable->guid[i][7]); //REV show all bytes + } +} +//-------------------------------------------------------------------------------------------------- + + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "PKeyTableMADToSt" +//-------------------------------------------------------------------------------------------------- +void PKeyTableMADToSt(SM_MAD_Pkey_table_t *pKeyTable,u_int8_t *pSMPBuf) +{ + u_int16_t i; + + for (i=0;i<32;i++) + MyMemCopy((u_int8_t *)&(pKeyTable->pkey[i]), pSMPBuf+IB_SMP_DATA_START + (i*2), 2); +} +//-------------------------------------------------------------------------------------------------- + + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "PKeyTablePrint" +//-------------------------------------------------------------------------------------------------- +void PKeyTablePrint(SM_MAD_Pkey_table_t *pKeyTable) +{ + u_int16_t i; + + for (i=0;i<32;i++) { + MTL_DEBUG3(MT_FLFMT("PKey[%d] = 0x%X"), i, pKeyTable->pkey[i]); //REV show all bytes + } +} +//-------------------------------------------------------------------------------------------------- + + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "PortInfoStToMAD" +//-------------------------------------------------------------------------------------------------- +void PortInfoStToMAD(SM_MAD_PortInfo_t *pPortInfo,u_int8_t *pSMPBuf) +{ + MyMemCopy(pSMPBuf+IB_SMP_DATA_START,(u_int8_t *)&(pPortInfo->qwMKey),8); + memcpy(pSMPBuf+IB_SMP_DATA_START+8,(u_int8_t *)&(pPortInfo->qwGIDPrefix),8); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+16,(u_int8_t *)&(pPortInfo->wLID),2); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+18,(u_int8_t *)&(pPortInfo->wMasterSMLID),2); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+20,(u_int8_t *)&(pPortInfo->dwCapMask),4); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+24,(u_int8_t *)&(pPortInfo->wDiagCode),2); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+26,(u_int8_t *)&(pPortInfo->wMKLease),2); + FieldToBuf(pPortInfo->cLocalPortNum,pSMPBuf+IB_SMP_DATA_START,224,8); + FieldToBuf(pPortInfo->cLinkWidthEna,pSMPBuf+IB_SMP_DATA_START,232,8); + FieldToBuf(pPortInfo->cLinkWidthSup,pSMPBuf+IB_SMP_DATA_START,240,8); + FieldToBuf(pPortInfo->cLinkWidthAct,pSMPBuf+IB_SMP_DATA_START,248,8); + FieldToBuf(pPortInfo->cLinkSpeedSup,pSMPBuf+IB_SMP_DATA_START,256,4); + FieldToBuf((u_int8_t)pPortInfo->cPortState,pSMPBuf+IB_SMP_DATA_START,260,4); + FieldToBuf(pPortInfo->cPhyState,pSMPBuf+IB_SMP_DATA_START,264,4); + FieldToBuf(pPortInfo->cDownDefState,pSMPBuf+IB_SMP_DATA_START,268,4); + FieldToBuf(pPortInfo->cMKProtect,pSMPBuf+IB_SMP_DATA_START,272,2); + ZeroToBuf(pSMPBuf+IB_SMP_DATA_START,274,3); + FieldToBuf(pPortInfo->cLMC,pSMPBuf+IB_SMP_DATA_START,277,3); + FieldToBuf(pPortInfo->cLinkSpeedAct,pSMPBuf+IB_SMP_DATA_START,280,4); + FieldToBuf(pPortInfo->cLinkSpeedEna,pSMPBuf+IB_SMP_DATA_START,284,4); + FieldToBuf(pPortInfo->cNbMTU,pSMPBuf+IB_SMP_DATA_START,288,4); + FieldToBuf(pPortInfo->cMasterSMSL,pSMPBuf+IB_SMP_DATA_START,292,4); + FieldToBuf(pPortInfo->cVLCap,pSMPBuf+IB_SMP_DATA_START,296,4); + ZeroToBuf(pSMPBuf+IB_SMP_DATA_START,300,4); + FieldToBuf(pPortInfo->cVLHighLimit,pSMPBuf+IB_SMP_DATA_START,304,8); + FieldToBuf(pPortInfo->cVLArbHighCap,pSMPBuf+IB_SMP_DATA_START,312,8); + FieldToBuf(pPortInfo->cVLArbLowCap,pSMPBuf+IB_SMP_DATA_START,320,8); + ZeroToBuf(pSMPBuf+IB_SMP_DATA_START,328,4); + FieldToBuf(pPortInfo->cMTUCap,pSMPBuf+IB_SMP_DATA_START,332,4); + FieldToBuf(pPortInfo->cVLStallCnt,pSMPBuf+IB_SMP_DATA_START,336,3); + FieldToBuf(pPortInfo->cHOQLife,pSMPBuf+IB_SMP_DATA_START,339,5); + FieldToBuf(pPortInfo->cOperVL,pSMPBuf+IB_SMP_DATA_START,344,4); + FieldToBuf(pPortInfo->cPartEnfIn,pSMPBuf+IB_SMP_DATA_START,348,1); + FieldToBuf(pPortInfo->cPartEnfOut,pSMPBuf+IB_SMP_DATA_START,349,1); + FieldToBuf(pPortInfo->cFilterRawIn,pSMPBuf+IB_SMP_DATA_START,350,1); + FieldToBuf(pPortInfo->cFilterRawOut,pSMPBuf+IB_SMP_DATA_START,351,1); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+44,(u_int8_t *)&(pPortInfo->wMKViolations),2); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+46,(u_int8_t *)&(pPortInfo->wPKViolations),2); + MyMemCopy(pSMPBuf+IB_SMP_DATA_START+48,(u_int8_t *)&(pPortInfo->wQKViolations),2); + FieldToBuf(pPortInfo->bGUIDCap,pSMPBuf+IB_SMP_DATA_START,400,8); + ZeroToBuf(pSMPBuf+IB_SMP_DATA_START,408,3); + FieldToBuf(pPortInfo->cSubnetTO,pSMPBuf+IB_SMP_DATA_START,411,5); + ZeroToBuf(pSMPBuf+IB_SMP_DATA_START,416,3); + FieldToBuf(pPortInfo->cRespTimeValue,pSMPBuf+IB_SMP_DATA_START,419,5); + FieldToBuf(pPortInfo->cLocalPhyErr,pSMPBuf+IB_SMP_DATA_START,424,4); + FieldToBuf(pPortInfo->cOverrunErr,pSMPBuf+IB_SMP_DATA_START,428,4); +} +//-------------------------------------------------------------------------------------------------- + + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "PortInfoMADToSt" +//-------------------------------------------------------------------------------------------------- +void PortInfoMADToSt(SM_MAD_PortInfo_t *pPortInfo,u_int8_t *pSMPBuf) +{ + //parse pSMPBuf into PortInfo struct + MyMemCopy((u_int8_t *)&(pPortInfo->qwMKey),pSMPBuf+IB_SMP_DATA_START,8); + memcpy((u_int8_t *)&(pPortInfo->qwGIDPrefix),pSMPBuf+IB_SMP_DATA_START+8,8); + MyMemCopy((u_int8_t *)&(pPortInfo->wLID),pSMPBuf+IB_SMP_DATA_START+16,2); + MyMemCopy((u_int8_t *)&(pPortInfo->wMasterSMLID),pSMPBuf+IB_SMP_DATA_START+18,2); + MyMemCopy((u_int8_t *)&(pPortInfo->dwCapMask),pSMPBuf+IB_SMP_DATA_START+20,4); + MyMemCopy((u_int8_t *)&(pPortInfo->wDiagCode),pSMPBuf+IB_SMP_DATA_START+24,2); + MyMemCopy((u_int8_t *)&(pPortInfo->wMKLease),pSMPBuf+IB_SMP_DATA_START+26,2); + pPortInfo->cLocalPortNum=BufToField(pSMPBuf+IB_SMP_DATA_START,224,8); + pPortInfo->cLinkWidthEna=BufToField(pSMPBuf+IB_SMP_DATA_START,232,8); + pPortInfo->cLinkWidthSup=BufToField(pSMPBuf+IB_SMP_DATA_START,240,8); + pPortInfo->cLinkWidthAct=BufToField(pSMPBuf+IB_SMP_DATA_START,248,8); + pPortInfo->cLinkSpeedSup=BufToField(pSMPBuf+IB_SMP_DATA_START,256,4); + pPortInfo->cPortState=(IB_port_state_t)BufToField(pSMPBuf+IB_SMP_DATA_START,260,4); + pPortInfo->cPhyState=BufToField(pSMPBuf+IB_SMP_DATA_START,264,4); + pPortInfo->cDownDefState=BufToField(pSMPBuf+IB_SMP_DATA_START,268,4); + pPortInfo->cMKProtect=BufToField(pSMPBuf+IB_SMP_DATA_START,272,2); + pPortInfo->cReserved1=BufToField(pSMPBuf+IB_SMP_DATA_START,274,3); + pPortInfo->cLMC=BufToField(pSMPBuf+IB_SMP_DATA_START,277,3); + pPortInfo->cLinkSpeedAct=BufToField(pSMPBuf+IB_SMP_DATA_START,280,4); + pPortInfo->cLinkSpeedEna=BufToField(pSMPBuf+IB_SMP_DATA_START,284,4); + pPortInfo->cNbMTU=BufToField(pSMPBuf+IB_SMP_DATA_START,288,4); + pPortInfo->cMasterSMSL=BufToField(pSMPBuf+IB_SMP_DATA_START,292,4); + pPortInfo->cVLCap=BufToField(pSMPBuf+IB_SMP_DATA_START,296,4); + pPortInfo->cReserved2=BufToField(pSMPBuf+IB_SMP_DATA_START,300,4); + pPortInfo->cVLHighLimit=BufToField(pSMPBuf+IB_SMP_DATA_START,304,8); + pPortInfo->cVLArbHighCap=BufToField(pSMPBuf+IB_SMP_DATA_START,312,8); + pPortInfo->cVLArbLowCap=BufToField(pSMPBuf+IB_SMP_DATA_START,320,8); + pPortInfo->cReserved3=BufToField(pSMPBuf+IB_SMP_DATA_START,328,4); + pPortInfo->cMTUCap=BufToField(pSMPBuf+IB_SMP_DATA_START,332,4); + pPortInfo->cVLStallCnt=BufToField(pSMPBuf+IB_SMP_DATA_START,336,3); + pPortInfo->cHOQLife=BufToField(pSMPBuf+IB_SMP_DATA_START,339,5); + pPortInfo->cOperVL=BufToField(pSMPBuf+IB_SMP_DATA_START,344,4); + pPortInfo->cPartEnfIn=BufToField(pSMPBuf+IB_SMP_DATA_START,348,1); + pPortInfo->cPartEnfOut=BufToField(pSMPBuf+IB_SMP_DATA_START,349,1); + pPortInfo->cFilterRawIn=BufToField(pSMPBuf+IB_SMP_DATA_START,350,1); + pPortInfo->cFilterRawOut=BufToField(pSMPBuf+IB_SMP_DATA_START,351,1); + MyMemCopy((u_int8_t *)&(pPortInfo->wMKViolations),pSMPBuf+IB_SMP_DATA_START+44,2); + MyMemCopy((u_int8_t *)&(pPortInfo->wPKViolations),pSMPBuf+IB_SMP_DATA_START+46,2); + MyMemCopy((u_int8_t *)&(pPortInfo->wQKViolations),pSMPBuf+IB_SMP_DATA_START+48,2); + pPortInfo->bGUIDCap=BufToField(pSMPBuf+IB_SMP_DATA_START,400,8); + pPortInfo->cReserved4=BufToField(pSMPBuf+IB_SMP_DATA_START,408,3); + pPortInfo->cSubnetTO=BufToField(pSMPBuf+IB_SMP_DATA_START,411,5); + pPortInfo->cReserved5=BufToField(pSMPBuf+IB_SMP_DATA_START,416,3); + pPortInfo->cRespTimeValue=BufToField(pSMPBuf+IB_SMP_DATA_START,419,5); + pPortInfo->cLocalPhyErr=BufToField(pSMPBuf+IB_SMP_DATA_START,424,4); + pPortInfo->cOverrunErr=BufToField(pSMPBuf+IB_SMP_DATA_START,428,4); +} +//-------------------------------------------------------------------------------------------------- + +//-------------------------------------------------------------------------------------------------- +//#define FUNCTION_NAME "PortInfoPrint" +//-------------------------------------------------------------------------------------------------- +void PortInfoPrint(SM_MAD_PortInfo_t *pPortInfo) +{ +#ifndef VXWORKS_OS // vxworks doesn't printf 64 bits int right. + //MTL_DEBUG3(" qwMKey: 0x"U64_FMT" \n",pPortInfo->qwMKey); +#else + MTL_DEBUG3(MT_FLFMT("qwMKey: %08lX%08lX"),*(unsigned long *)&pPortInfo->qwMKey,*(((unsigned long *)&pPortInfo->qwMKey +1))); +#endif //VXWORKS_OS + MTL_DEBUG3(MT_FLFMT("qwGIDPrefix = 0x%02x%02x%02x%02x%02x%02x%02x%02x"), + pPortInfo->qwGIDPrefix[0],pPortInfo->qwGIDPrefix[1],pPortInfo->qwGIDPrefix[2],pPortInfo->qwGIDPrefix[3], + pPortInfo->qwGIDPrefix[4],pPortInfo->qwGIDPrefix[5],pPortInfo->qwGIDPrefix[6],pPortInfo->qwGIDPrefix[7]); //REV show all bytes + MTL_DEBUG3(MT_FLFMT("wLID:0x%04X"),pPortInfo->wLID); + MTL_DEBUG3(MT_FLFMT("wMasterSMLID:0x%04X"),pPortInfo->wMasterSMLID); + MTL_DEBUG3(MT_FLFMT("dwCapMask:0x%08X"),pPortInfo->dwCapMask); + MTL_DEBUG3(MT_FLFMT("wDiagCode:0x%04X"),pPortInfo->wDiagCode); + MTL_DEBUG3(MT_FLFMT("wMKLease:0x%04X"),pPortInfo->wMKLease); + MTL_DEBUG3(MT_FLFMT("cLocalPortNum:0x%02X"),pPortInfo->cLocalPortNum); + MTL_DEBUG3(MT_FLFMT("cLinkWidthEna:0x%02X"),pPortInfo->cLinkWidthEna); + MTL_DEBUG3(MT_FLFMT("cLinkWidthSup:0x%02X"),pPortInfo->cLinkWidthSup); + MTL_DEBUG3(MT_FLFMT("cLinkWidthAct:0x%02X"),pPortInfo->cLinkWidthAct); + MTL_DEBUG3(MT_FLFMT("cLinkSpeedSup:0x%02X"),pPortInfo->cLinkSpeedSup); + MTL_DEBUG3(MT_FLFMT("cPortState:0x%02X"),pPortInfo->cPortState); + MTL_DEBUG3(MT_FLFMT("cPhyState:0x%02X"),pPortInfo->cPhyState); + MTL_DEBUG3(MT_FLFMT("cDownDefState:0x%02X"),pPortInfo->cDownDefState); + MTL_DEBUG3(MT_FLFMT("cMKProtect:0x%02X"),pPortInfo->cMKProtect); + MTL_DEBUG3(MT_FLFMT("cReserved1:0x%02X"),pPortInfo->cReserved1); + MTL_DEBUG3(MT_FLFMT("cLMC:0x%02X"),pPortInfo->cLMC); + MTL_DEBUG3(MT_FLFMT("cLinkSpeedAct:0x%02X"),pPortInfo->cLinkSpeedAct); + MTL_DEBUG3(MT_FLFMT("cLinkSpeedEna:0x%02X"),pPortInfo->cLinkSpeedEna); + MTL_DEBUG3(MT_FLFMT("cNbMTU:0x%02X"),pPortInfo->cNbMTU); + MTL_DEBUG3(MT_FLFMT("cMasterSMSL:0x%02X"),pPortInfo->cMasterSMSL); + MTL_DEBUG3(MT_FLFMT("cVLCap:0x%02X"),pPortInfo->cVLCap); + MTL_DEBUG3(MT_FLFMT("cReserved2:0x%02X"),pPortInfo->cReserved2); + MTL_DEBUG3(MT_FLFMT("cVLHighLimit:0x%02X"),pPortInfo->cVLHighLimit); + MTL_DEBUG3(MT_FLFMT("cVLArbHighCap:0x%02X"),pPortInfo->cVLArbHighCap); + MTL_DEBUG3(MT_FLFMT("cVLArbLowCap:0x%02X"),pPortInfo->cVLArbLowCap); + MTL_DEBUG3(MT_FLFMT("cReserved3:0x%02X"),pPortInfo->cReserved3); + MTL_DEBUG3(MT_FLFMT("cMTUCap:0x%02X"),pPortInfo->cMTUCap); + MTL_DEBUG3(MT_FLFMT("cVLStallCnt:0x%02X"),pPortInfo->cVLStallCnt); + MTL_DEBUG3(MT_FLFMT("cHOQLife:0x%02X"),pPortInfo->cHOQLife); + MTL_DEBUG3(MT_FLFMT("cOperVL:0x%02X"),pPortInfo->cOperVL); + MTL_DEBUG3(MT_FLFMT("cPartEnfIn:0x%02X"),pPortInfo->cPartEnfIn); + MTL_DEBUG3(MT_FLFMT("cPartEnfOut:0x%02X"),pPortInfo->cPartEnfOut); + MTL_DEBUG3(MT_FLFMT("cFilterRawIn:0x%02X"),pPortInfo->cFilterRawIn); + MTL_DEBUG3(MT_FLFMT("cFilterRawOut:0x%02X"),pPortInfo->cFilterRawOut); + MTL_DEBUG3(MT_FLFMT("wMKViolations:0x%04X"),pPortInfo->wMKViolations); + MTL_DEBUG3(MT_FLFMT("wPKViolations:0x%04X"),pPortInfo->wPKViolations); + MTL_DEBUG3(MT_FLFMT("wQKViolations:0x%04X"),pPortInfo->wQKViolations); + MTL_DEBUG3(MT_FLFMT("bGUIDCap:0x%02X"),pPortInfo->bGUIDCap); + MTL_DEBUG3(MT_FLFMT("cReserved4:0x%02X"),pPortInfo->cReserved4); + MTL_DEBUG3(MT_FLFMT("cSubnetTO:0x%02X"),pPortInfo->cSubnetTO); + MTL_DEBUG3(MT_FLFMT("cReserved5:0x%02X"),pPortInfo->cReserved5); + MTL_DEBUG3(MT_FLFMT("cRespTimeValue:0x%02X"),pPortInfo->cRespTimeValue); + MTL_DEBUG3(MT_FLFMT("cLocalPhyErr:0x%02X"),pPortInfo->cLocalPhyErr); + MTL_DEBUG3(MT_FLFMT("cOverrunErr:0x%02X"),pPortInfo->cOverrunErr); +} +//-------------------------------------------------------------------------------------------------- + +//-------------------------------------------------------------------------------------------------- diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.h new file mode 100644 index 00000000..fe9e8155 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/sm_mad.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_SM_MAD_H +#define H_SM_MAD_H +/* -------------------------------------------------------------------------------------------------- */ +/* IB structures and parameters */ +/* -------------------------------------------------------------------------------------------------- */ +#include +#include + +/* -------------------------------------------------------------------------------------------------- */ +/* PORTINFO */ +/* -------------------------------------------------------------------------------------------------- */ +typedef struct SM_MAD_PortInfo_st { + u_int64_t qwMKey; /* 0,64 */ + IB_gid_prefix_t qwGIDPrefix; /* 64,64 */ + IB_lid_t wLID; /* 128,16 */ + IB_lid_t wMasterSMLID; /* 144,16 */ + u_int32_t dwCapMask; /* 160,32 */ + u_int16_t wDiagCode; /* 192,16 */ + u_int16_t wMKLease; /* 208,16 */ + u_int8_t cLocalPortNum; /* 224,8 */ + u_int8_t cLinkWidthEna; /* 232,8 */ + u_int8_t cLinkWidthSup; /* 240,8 */ + u_int8_t cLinkWidthAct; /* 248,8 */ + u_int8_t cLinkSpeedSup; /* 256,4 */ + IB_port_state_t cPortState; /* 260,4 */ + u_int8_t cPhyState; /* 264,4 */ + u_int8_t cDownDefState; /* 268,4 */ + u_int8_t cMKProtect; /* 272,2 */ + u_int8_t cReserved1; /* 274,3 */ + u_int8_t cLMC; /* 277,2 */ + u_int8_t cLinkSpeedAct; /* 280,4 */ + u_int8_t cLinkSpeedEna; /* 284,4 */ + u_int8_t cNbMTU; /* 288,4 */ + u_int8_t cMasterSMSL; /* 292,4 */ + u_int8_t cVLCap; /* 296,4 */ + u_int8_t cReserved2; /* 300,4 */ + u_int8_t cVLHighLimit; /* 304,8 */ + u_int8_t cVLArbHighCap; /* 312,8 */ + u_int8_t cVLArbLowCap; /* 320,8 */ + u_int8_t cReserved3; /* 328,4 */ + u_int8_t cMTUCap; /* 332,4 */ + u_int8_t cVLStallCnt; /* 336,3 */ + u_int8_t cHOQLife; /* 339,5 */ + u_int8_t cOperVL; /* 344,4 */ + u_int8_t cPartEnfIn; /* 348,1 */ + u_int8_t cPartEnfOut; /* 349,1 */ + u_int8_t cFilterRawIn; /* 350,1 */ + u_int8_t cFilterRawOut; /* 351,1 */ + u_int16_t wMKViolations; /* 352,16 */ + u_int16_t wPKViolations; /* 368,16 */ + u_int16_t wQKViolations; /* 384,16 */ + u_int8_t bGUIDCap; /* 400,8 */ + u_int8_t cReserved4; /* 408,3 */ + u_int8_t cSubnetTO; /* 411,5 */ + u_int8_t cReserved5; /* 416,3 */ + u_int8_t cRespTimeValue; /* 419,5 */ + u_int8_t cLocalPhyErr; /* 424,4 */ + u_int8_t cOverrunErr; /* 428,4 */ +} SM_MAD_PortInfo_t; + +#define IB_PORTINFO_PORTSTATE_DOWN 1 + +/* -------------------------------------------------------------------------------------------------- */ + + +/* -------------------------------------------------------------------------------------------------- */ +/* NODEINFO */ +/* -------------------------------------------------------------------------------------------------- */ +typedef struct SM_MAD_NodeInfo_st { + u_int8_t cBaseVersion; + u_int8_t cClassVersion; + IB_node_type_t cNodeType; + u_int8_t cNumPorts; + IB_guid_t qwNodeGUID; + IB_guid_t qwPortGUID; + u_int16_t wPartCap; + u_int16_t wDeviceID; + u_int32_t dwRevision; + u_int8_t cLocalPortNum; + u_int32_t dwVendorID; +} SM_MAD_NodeInfo_t; + +typedef struct SM_MAD_GUIDInfo_st { + IB_guid_t guid[8]; +} SM_MAD_GUIDInfo_t; + +typedef struct SM_MAD_Pkey_table_st { + u_int16_t pkey[32]; +} SM_MAD_Pkey_table_t; + +/* -------------------------------------------------------------------------------------------------- */ + + +/* -------------------------------------------------------------------------------------------------- */ +/* NODEDESC */ +/* -------------------------------------------------------------------------------------------------- */ +typedef struct NodeDesc { + char szNodeDesc[64]; +} NODEDESC; + + + +/* -------------------------------------------------------------------------------------------------- */ +#define GUID_DUMMY MAKE_ULONGLONG(0xFFFFFFFFFFFFFFFF) +#define GUID_INVALID MAKE_ULONGLONG(0x0000000000000000) + +/* -------------------------------------------------------------------------------------------------- */ + +#define IB_INVALID_LID 0x0000 +#define IB_PERMISSIVE_LID 0xFFFF + +#define IB_MAD_SIZE 256 +#define IB_SMP_DATA_START 64 + +/* REV convert to enum */ +#define IB_CLASS_SMP 0x01 +#define IB_CLASS_DIR_ROUTE 0x81 + +#define IB_METHOD_GET 0x01 +#define IB_METHOD_SET 0x02 + +typedef enum { + IB_SMP_ATTRIB_NODEINFO= 0x0011, + IB_SMP_ATTRIB_GUIDINFO= 0x0014, + IB_SMP_ATTRIB_PORTINFO= 0x0015, + IB_SMP_ATTRIB_PARTTABLE= 0x0016 +} SM_MAD_attrib_t; + +void MADHeaderBuild(u_int8_t cMgtClass, + u_int16_t wClSp, + u_int8_t cMethod, + u_int16_t wAttrib, + u_int32_t dwModif, + u_int8_t *pSMPBuf) ; + +void MadBufPrint(void *madbuf); +void NodeInfoStToMAD(SM_MAD_NodeInfo_t *pNodeInfo,u_int8_t *pSMPBuf); +void NodeInfoMADToSt(SM_MAD_NodeInfo_t *pNodeInfo,u_int8_t *pSMPBuf); +void NodeInfoPrint(SM_MAD_NodeInfo_t *pNodeInfo); +void GUIDInfoMADToSt(SM_MAD_GUIDInfo_t *pGuidTable,u_int8_t *pSMPBuf); +void GUIDInfoPrint(SM_MAD_GUIDInfo_t *pGuidTable); +void PKeyTableMADToSt(SM_MAD_Pkey_table_t *pKeyTable,u_int8_t *pSMPBuf); +void PKeyTablePrint(SM_MAD_Pkey_table_t *pKeyTable); +void PortInfoStToMAD(SM_MAD_PortInfo_t *pPortInfo,u_int8_t *pSMPBuf); +void PortInfoMADToSt(SM_MAD_PortInfo_t *pPortInfo,u_int8_t *pSMPBuf); +void PortInfoPrint(SM_MAD_PortInfo_t *pPortInfo); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.c b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.c new file mode 100644 index 00000000..27d63570 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.c @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +/************************************************************************ + * Divide and conquer to find the highest bits. + * But when getting within 8 bits, just a lookup into a + * static constant table. This also serves a quick optimization + * when x < 2^8 = 256. The constant table is set on compile time, + * and was generated by the help of our Python friend. + * Very efficient, if I may say so -- yotam + */ +u_int8_t floor_log2(u_int64_t x) +{ + enum { nLowBits = 8 }; + static const u_int64_t + highMask = ~(((u_int64_t)1 << nLowBits) - (u_int64_t)1); + static const unsigned char lowlog2[1<>= 1; /* /= 2 */ + high = (x >> step); + if (high) + { + p |= step; + x = high; + } + else + { + u_int64_t mask = ((u_int64_t)1 << step) - 1; + x &= mask; + } + } + + p |= lowlog2[x]; + return p; +} /* floor_log2 */ + + +/************************************************************************/ +u_int8_t ceil_log2(u_int64_t x) +{ + u_int8_t p = floor_log2(x); + if (((u_int64_t)1 << p) < x) + { + p += 1; + } + return p; +} /* ceil_log2 */ + + +/************************************************************************ + * Divide and conquer to find the lowest bit. + * But when getting within 8 bits, just a lookup into a + * static constant table. This also serves a quick optimization + * when x < 2^8 = 256. The constant table is set on compile time, + * and was generated by the help of our Python friend. + * Very efficient, if I may say so -- yotam + */ +u_int8_t lowest_bit(u_int64_t x) +{ + enum { nLowBits = 8 }; + static const u_int64_t one_64 = 1; + static const u_int64_t lowMask = ((u_int64_t)1 << nLowBits) - (u_int64_t)1; + static const unsigned char lowest_bit8[1< 1) and (((1<>= 1; /* /= 2 */ + low = x & ((one_64 << step) - 1); + if (low) + { + x = low; + } + else + { + p |= step; + x >>= step; + } + } + p |= lowest_bit8[xLow]; + } + + return p; +} /* lowest_bit */ + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.h b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.h new file mode 100644 index 00000000..577e1abb --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/hcahal/tavor/util/tlog2.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(_TLOG2_H) +#define _TLOG2_H + +#include + +#ifdef __cplusplus + extern "C" { +#endif + + +/************************************************************************ + * Function: floor_log2(n) + * floor_log2(0) = 0 if n=0 + * floor_log2(n) = floor(log_2(n)) if n>=1 + * + * Better than formal description: + * + * floor_log2(0) = 0 + * floor_log2(1) = 0 + * floor_log2(2) = 1 + * floor_log2(3) = 1 + * floor_log2(4) = 2 + * floor_log2(5) = 2 + * ... + * floor_log2(15) = 3 + * floor_log2(16) = 4 + * floor_log2(17) = 4 + * ... + */ +extern u_int8_t floor_log2(u_int64_t x); +/* extern unsigned int tlog2(u_int64_t x); / * obsolete, use floor_log2 */ + +/************************************************************************ + * Function: ceil_log2(n) + * + * Minimal p>=0, such that x <= 2^p. + * ceil_log2(0) = 0 if n=0 + * ceil_log2(n) = ceil(log_2(n)) if n>=1 + * + * Better than formal description: + * + * ceil_log2(0) = 0 + * ceil_log2(1) = 0 + * ceil_log2(2) = 1 + * ceil_log2(3) = 2 + * ceil_log2(4) = 2 + * ceil_log2(5) = 3 + * ... + * ceil_log2(15) = 4 + * ceil_log2(16) = 4 + * ceil_log2(17) = 5 + * ... + */ +extern u_int8_t ceil_log2(u_int64_t x); + +/************************************************************************ + * Function: lowest_bit(x) + * + * If x=0 return 64. Otherwise return minimal b such that ((1<HHIF_open_hca = &zombie_open_hca; + p->HHIF_close_hca = &zombie_close_hca; + p->HHIF_alloc_ul_resources = &zombie_alloc_ul_resources; + p->HHIF_free_ul_resources = &zombie_free_ul_resources; + p->HHIF_query_hca = &zombie_query_hca; + p->HHIF_modify_hca = &zombie_modify_hca; + p->HHIF_query_port_prop = &zombie_query_port_prop; + p->HHIF_get_pkey_tbl = &zombie_get_pkey_tbl; + p->HHIF_get_gid_tbl = &zombie_get_gid_tbl; + p->HHIF_get_lid = &zombie_get_lid; + p->HHIF_alloc_pd = &zombie_alloc_pd; + p->HHIF_free_pd = &zombie_free_pd; + p->HHIF_alloc_rdd = &zombie_alloc_rdd; + p->HHIF_free_rdd = &zombie_free_rdd; + p->HHIF_create_priv_ud_av = &zombie_create_priv_ud_av; + p->HHIF_modify_priv_ud_av = &zombie_modify_priv_ud_av; + p->HHIF_query_priv_ud_av = &zombie_query_priv_ud_av; + p->HHIF_destroy_priv_ud_av = &zombie_destroy_priv_ud_av; + p->HHIF_register_mr = &zombie_register_mr; + p->HHIF_reregister_mr = &zombie_reregister_mr; + p->HHIF_register_smr = &zombie_register_smr; + p->HHIF_deregister_mr = &zombie_deregister_mr; + p->HHIF_query_mr = &zombie_query_mr; + p->HHIF_alloc_mw = &zombie_alloc_mw; + p->HHIF_free_mw = &zombie_free_mw; + p->HHIF_create_cq = &zombie_create_cq; + p->HHIF_resize_cq = &zombie_resize_cq; + p->HHIF_query_cq = &zombie_query_cq; + p->HHIF_destroy_cq = &zombie_destroy_cq; + p->HHIF_create_qp = &zombie_create_qp; + p->HHIF_get_special_qp = &zombie_get_special_qp; + p->HHIF_modify_qp = &zombie_modify_qp; + p->HHIF_query_qp = &zombie_query_qp; + p->HHIF_destroy_qp = &zombie_destroy_qp; + p->HHIF_create_eec = &zombie_create_eec; + p->HHIF_modify_eec = &zombie_modify_eec; + p->HHIF_query_eec = &zombie_query_eec; + p->HHIF_destroy_eec = &zombie_destroy_eec; + p->HHIF_set_async_eventh = &zombie_set_async_eventh; + p->HHIF_set_comp_eventh = &zombie_set_comp_eventh; + p->HHIF_attach_to_multicast = &zombie_attach_to_multicast; + p->HHIF_detach_from_multicast = &zombie_detach_from_multicast; + p->HHIF_process_local_mad = &zombie_process_local_mad; +} /* zombie_init */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/allocator.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/allocator.h new file mode 100644 index 00000000..370ab42a --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/allocator.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef VIP_ALLOCATOR_H +#define VIP_ALLOCATOR_H + +#include +#include "vip_common.h" + +#ifdef VIP_ENABLE_MALLOC +/* Most code only uses pointer to free. + * Avoid unused function warning for them */ +static VIP_allocator_malloc_t VIP_malloc; +static void* VIP_malloc(size_t size) { + return MALLOC(size); +} +#endif + +static void VIP_free(void* p) { + FREE(p); +} + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common.def b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common.def new file mode 100644 index 00000000..1829e4f9 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common.def @@ -0,0 +1,116 @@ +EXPORTS + ; ------------------------------ + ; vip_array.c + ; ------------------------------ + VIP_array_create + VIP_array_create_maxsize + VIP_array_destroy + VIP_array_insert + VIP_array_insert2hndl + VIP_array_insert_ptr + VIP_array_erase + VIP_array_erase_prepare + VIP_array_erase_undo + VIP_array_erase_done + VIP_array_find + VIP_array_find_release + VIP_array_find_release_erase + VIP_array_find_release_erase_prepare + VIP_array_find_hold + VIP_array_get_num_of_objects + VIP_array_get_first_handle + VIP_array_get_next_handle + VIP_array_get_first_handle_hold + VIP_array_get_next_handle_hold + ; ------------------------------ + ; vip_hash.c + ; ------------------------------ + ; vip_hash.h + VIP_hash_create + VIP_hash_create_maxsize + VIP_hash_destroy + VIP_hash_insert + VIP_hash_insert_ptr + VIP_hash_erase + VIP_hash_find + VIP_hash_find_ptr + VIP_hash_may_grow + VIP_hash_traverse + VIP_hash_get_num_of_buckets + VIP_hash_get_num_of_objects + ; vip_hashp.h + VIP_hashp_create + VIP_hashp_create_maxsize + VIP_hashp_destroy + VIP_hashp_insert + VIP_hashp_insert_ptr + VIP_hashp_erase + VIP_hashp_find + VIP_hashp_find_ptr + VIP_hashp_get_num_of_buckets + VIP_hashp_get_num_of_objects + VIP_hashp_may_grow + VIP_hashp_traverse + ; vip_hashp2p.h + VIP_hashp2p_create + VIP_hashp2p_create_maxsize + VIP_hashp2p_destroy + VIP_hashp2p_insert + VIP_hashp2p_insert_ptr + VIP_hashp2p_erase + VIP_hashp2p_find + VIP_hashp2p_find_ptr + VIP_hashp2p_get_num_of_buckets + VIP_hashp2p_get_num_of_objects + VIP_hashp2p_may_grow + VIP_hashp2p_traverse + ; vip_hash64p.h + VIP_hash64p_create + VIP_hash64p_create_maxsize + VIP_hash64p_destroy + VIP_hash64p_insert + VIP_hash64p_insert_ptr + VIP_hash64p_erase + VIP_hash64p_find + VIP_hash64p_find_ptr + VIP_hash64p_get_num_of_buckets + VIP_hash64p_get_num_of_objects + VIP_hash64p_may_grow + VIP_hash64p_traverse + ; ------------------------------ + ; vapi_common.c + ; ------------------------------ + VAPI_strerror + VAPI_strerror_sym + VAPI_hca_cap_sym + VAPI_hca_attr_mask_sym + VAPI_qp_attr_mask_sym + VAPI_mrw_acl_mask_sym + VAPI_mr_change_mask_sym + VAPI_rdma_atom_acl_mask_sym + VAPI_atomic_cap_sym + VAPI_sig_type_sym + VAPI_ts_type_sym + VAPI_qp_state_sym + VAPI_mig_state_sym + VAPI_special_qp_sym + VAPI_mrw_type_sym + VAPI_remote_node_addr_sym + VAPI_wr_opcode_sym + VAPI_cqe_opcode_sym + VAPI_wc_status_sym + VAPI_comp_type_sym + VAPI_cq_notif_sym + VAPI_event_record_sym + VAPI_event_syndrome_sym + ; ------------------------------ + ; VIP_cirq.c + ; ------------------------------ +; VIP_cirq_stats_print +; VIP_cirq_create +; VIP_cirq_add +; VIP_cirq_peek +; VIP_cirq_remove +; VIP_cirq_empty +; VIP_cirq_destroy + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common_kl.def b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common_kl.def new file mode 100644 index 00000000..10c2f45c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_common_kl.def @@ -0,0 +1,126 @@ +EXPORTS + ; for OS only + DllInitialize private + DllUnload private + ; ------------------------------ + ; vip_array.c + ; ------------------------------ + VIP_array_create + VIP_array_create_maxsize + VIP_array_destroy + VIP_array_insert + VIP_array_insert2hndl + VIP_array_insert_ptr + VIP_array_erase + VIP_array_erase_prepare + VIP_array_erase_undo + VIP_array_erase_done + VIP_array_find + VIP_array_find_release + VIP_array_find_release_erase + VIP_array_find_release_erase_prepare + VIP_array_find_hold + VIP_array_get_num_of_objects + VIP_array_get_first_handle + VIP_array_get_next_handle + VIP_array_get_first_handle_hold + VIP_array_get_next_handle_hold + + ; ------------------------------ + ; vip_hash.c + ; ------------------------------ + ; vip_hash.h + VIP_hash_create + VIP_hash_create_maxsize + VIP_hash_destroy + VIP_hash_insert + VIP_hash_insert_ptr + VIP_hash_erase + VIP_hash_find + VIP_hash_find_ptr + VIP_hash_may_grow + VIP_hash_traverse + VIP_hash_get_num_of_buckets + VIP_hash_get_num_of_objects + ; vip_hashp.h + VIP_hashp_create + VIP_hashp_create_maxsize + VIP_hashp_destroy + VIP_hashp_insert + VIP_hashp_insert_ptr + VIP_hashp_erase + VIP_hashp_find + VIP_hashp_find_ptr + VIP_hashp_get_num_of_buckets + VIP_hashp_get_num_of_objects + VIP_hashp_may_grow + VIP_hashp_traverse + ; vip_hashp2p.h + VIP_hashp2p_create + VIP_hashp2p_create_maxsize + VIP_hashp2p_destroy + VIP_hashp2p_insert + VIP_hashp2p_insert_ptr + VIP_hashp2p_erase + VIP_hashp2p_find + VIP_hashp2p_find_ptr + VIP_hashp2p_get_num_of_buckets + VIP_hashp2p_get_num_of_objects + VIP_hashp2p_may_grow + VIP_hashp2p_traverse + ; vip_hash64p.h + VIP_hash64p_create + VIP_hash64p_create_maxsize + VIP_hash64p_destroy + VIP_hash64p_insert + VIP_hash64p_insert_ptr + VIP_hash64p_erase + VIP_hash64p_find + VIP_hash64p_find_ptr + VIP_hash64p_get_num_of_buckets + VIP_hash64p_get_num_of_objects + VIP_hash64p_may_grow + VIP_hash64p_traverse + ; ------------------------------ + ; vapi_common.c + ; ------------------------------ + VAPI_strerror + VAPI_strerror_sym + VAPI_hca_cap_sym + VAPI_hca_attr_mask_sym + VAPI_qp_attr_mask_sym + VAPI_mrw_acl_mask_sym + VAPI_mr_change_mask_sym + VAPI_rdma_atom_acl_mask_sym + VAPI_atomic_cap_sym + VAPI_sig_type_sym + VAPI_ts_type_sym + VAPI_qp_state_sym + VAPI_mig_state_sym + VAPI_special_qp_sym + VAPI_mrw_type_sym + VAPI_remote_node_addr_sym + VAPI_wr_opcode_sym + VAPI_cqe_opcode_sym + VAPI_wc_status_sym + VAPI_comp_type_sym + VAPI_cq_notif_sym + VAPI_event_record_sym + VAPI_event_syndrome_sym + ; ------------------------------ + ; VIP_cirq.c + ; ------------------------------ + VIP_cirq_stats_print + VIP_cirq_create + VIP_cirq_add + VIP_cirq_peek + VIP_cirq_remove + VIP_cirq_empty + VIP_cirq_destroy + ; ------------------------------ + ; VIP_delay_unlock.c + ; ------------------------------ + VIP_delay_unlock_create + VIP_delay_unlock_insert + VIP_delay_unlock_destroy + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_mod_obj.c b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_mod_obj.c new file mode 100644 index 00000000..927c791c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vapi_mod_obj.c @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifdef MT_KERNEL +#include "mtl_types.h" +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT pi_pDriverObject, + IN PUNICODE_STRING pi_pRegistryPath + ) +{ /* DriverEntry */ + + DbgPrint("\n***** VAPI_COMMON_KL: DriverEntry()"); + return STATUS_SUCCESS; + +} /* DriverEntry */ + +NTSTATUS DllInitialize(PUNICODE_STRING RegistryPath) +{ + DbgPrint("\n***** VAPI_COMMON_KL: DllInitialize()"); + return STATUS_SUCCESS; +} + +NTSTATUS DllUnload() +{ + DbgPrint("\n***** VAPI_COMMON_KL DllUnload()"); + return STATUS_SUCCESS; +} +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vip_imp.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vip_imp.h new file mode 100644 index 00000000..b518d175 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/os_dep/win/vip_imp.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef VIP_IMP_H +#define VIP_IMP_H + +#include + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vapi_common.c b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vapi_common.c new file mode 100644 index 00000000..e2fe827e --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vapi_common.c @@ -0,0 +1,524 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifdef MT_KERNEL + /* Taken care via vapi_common.h -> mtl_common.h -> ... -> + * -> /lib/modules/N.N.N/build/include/linux/string.h + * extern int strlen(const char*); + * extern char* (char*, const char*); + */ +#else +# include +#endif +#include "vapi_common.h" + +const char* VAPI_strerror( VAPI_ret_t errnum) +{ + switch (errnum) { +#define VAPI_ERROR_INFO(A, B, C) case A: return C; + VAPI_ERROR_LIST +#undef VAPI_ERROR_INFO + default: return "VAPI_UNKNOWN_ERROR"; + } + +} +const char* VAPI_strerror_sym( VAPI_ret_t errnum) +{ + switch (errnum) { +#define VAPI_ERROR_INFO(A, B, C) case A: return #A; + VAPI_ERROR_LIST +#undef VAPI_ERROR_INFO + default: return "VAPI_UNKNOWN_ERROR"; + } +} + + + +static char* safe_append( + char* cbuf, + char* buf_end, + u_int32_t mask, + u_int32_t flag, + const char* flag_sym +) +{ + if (mask & flag) + { + int l = (int)strlen(flag_sym); + if (cbuf + l + 2 < buf_end) + { + strcpy(cbuf, flag_sym); + cbuf += l; + *cbuf++ = '+'; + *cbuf = '\0'; + } + else + { + cbuf = NULL; + } + } + return cbuf; +} /* safe_append */ + + +static void end_mask_sym(char* buf, char* cbuf, int bufsz) +{ + if (bufsz > 0) + { + if (buf == cbuf) + { + *cbuf = '\0'; /* empty string */ + } + else if (cbuf == 0) /* was truncated */ + { + int l = (int)strlen(buf); + buf[l - 1] = '>'; + } + } +} /* end_mask_sym */ + + +#define INIT_BUF_SKIP(skipped_pfx) \ + int skip = (int)strlen(skipped_pfx); \ + char* cbuf = buf; \ + char* buf_end = buf + bufsz; \ + *buf = '\0'; + + +#define SAFE_APPEND(e) \ + if (cbuf) { cbuf = safe_append(cbuf, buf_end, mask, e, #e + skip); } + +const char* VAPI_hca_cap_sym(char* buf, int bufsz, u_int32_t mask) +{ + INIT_BUF_SKIP("VAPI_") + + SAFE_APPEND(VAPI_RESIZE_OUS_WQE_CAP) + SAFE_APPEND(VAPI_BAD_PKEY_COUNT_CAP) + SAFE_APPEND(VAPI_BAD_QKEY_COUNT_CAP) + SAFE_APPEND(VAPI_RAW_MULTI_CAP) + SAFE_APPEND(VAPI_AUTO_PATH_MIG_CAP) + SAFE_APPEND(VAPI_CHANGE_PHY_PORT_CAP) + SAFE_APPEND(VAPI_UD_AV_PORT_ENFORCE_CAP) + SAFE_APPEND(VAPI_CURR_QP_STATE_MOD_CAP) + SAFE_APPEND(VAPI_SHUTDOWN_PORT_CAP) + SAFE_APPEND(VAPI_INIT_TYPE_CAP) + SAFE_APPEND(VAPI_PORT_ACTIVE_EV_CAP) + SAFE_APPEND(VAPI_SYS_IMG_GUID_CAP) + SAFE_APPEND(VAPI_RC_RNR_NAK_GEN_CAP) + + end_mask_sym(buf, cbuf, bufsz); + return buf; +} /* VAPI_hca_cap_sym */ + + +const char* VAPI_hca_attr_mask_sym(char* buf, int bufsz, u_int32_t mask) +{ + INIT_BUF_SKIP("HCA_ATTR_") + SAFE_APPEND(HCA_ATTR_IS_SM) + SAFE_APPEND(HCA_ATTR_IS_SNMP_TUN_SUP) + SAFE_APPEND(HCA_ATTR_IS_DEV_MGT_SUP) + SAFE_APPEND(HCA_ATTR_IS_VENDOR_CLS_SUP) + SAFE_APPEND(HCA_ATTR_IS_CLIENT_REREGISTRATION_SUP) + SAFE_APPEND(HCA_ATTR_MAX) + end_mask_sym(buf, cbuf, bufsz); + return buf; +} /* VAPI_hca_attr_mask_sym */ + + +const char* VAPI_qp_attr_mask_sym(char* buf, int bufsz, u_int32_t mask) +{ + INIT_BUF_SKIP("QP_ATTR_") + + SAFE_APPEND(QP_ATTR_QP_STATE) + SAFE_APPEND(QP_ATTR_EN_SQD_ASYN_NOTIF) + SAFE_APPEND(QP_ATTR_QP_NUM) + SAFE_APPEND(QP_ATTR_REMOTE_ATOMIC_FLAGS) + SAFE_APPEND(QP_ATTR_PKEY_IX) + SAFE_APPEND(QP_ATTR_PORT) + SAFE_APPEND(QP_ATTR_QKEY) + SAFE_APPEND(QP_ATTR_AV) + SAFE_APPEND(QP_ATTR_PATH_MTU) + SAFE_APPEND(QP_ATTR_TIMEOUT) + SAFE_APPEND(QP_ATTR_RETRY_COUNT) + SAFE_APPEND(QP_ATTR_RNR_RETRY) + SAFE_APPEND(QP_ATTR_RQ_PSN) + SAFE_APPEND(QP_ATTR_QP_OUS_RD_ATOM) + SAFE_APPEND(QP_ATTR_ALT_PATH) + SAFE_APPEND(QP_ATTR_RSRV_1) + SAFE_APPEND(QP_ATTR_RSRV_2) + SAFE_APPEND(QP_ATTR_RSRV_3) + SAFE_APPEND(QP_ATTR_RSRV_4) + SAFE_APPEND(QP_ATTR_RSRV_5) + SAFE_APPEND(QP_ATTR_RSRV_6) + //SAFE_APPEND(QP_ATTR_ALT_TIMEOUT) + //SAFE_APPEND(QP_ATTR_ALT_RETRY_COUNT) + //SAFE_APPEND(QP_ATTR_ALT_RNR_RETRY) + //SAFE_APPEND(QP_ATTR_ALT_PKEY_IX) + //SAFE_APPEND(QP_ATTR_ALT_PORT) + SAFE_APPEND(QP_ATTR_MIN_RNR_TIMER) + SAFE_APPEND(QP_ATTR_SQ_PSN) + SAFE_APPEND(QP_ATTR_OUS_DST_RD_ATOM) + SAFE_APPEND(QP_ATTR_PATH_MIG_STATE) + SAFE_APPEND(QP_ATTR_CAP) + SAFE_APPEND(QP_ATTR_DEST_QP_NUM) + end_mask_sym(buf, cbuf, bufsz); + return buf; +} /* VAPI_qp_attr_mask_sym */ + + +const char* VAPI_mrw_acl_mask_sym(char* buf, int bufsz, u_int32_t mask) +{ + INIT_BUF_SKIP("VAPI_EN_") + + SAFE_APPEND(VAPI_EN_LOCAL_WRITE) + SAFE_APPEND(VAPI_EN_REMOTE_WRITE) + SAFE_APPEND(VAPI_EN_REMOTE_READ) + SAFE_APPEND(VAPI_EN_REMOTE_ATOM) + SAFE_APPEND(VAPI_EN_MEMREG_BIND) + end_mask_sym(buf, cbuf, bufsz); + return buf; +} /* VAPI_mrw_acl_mask_sym */ + + +const char* VAPI_mr_change_mask_sym(char* buf, int bufsz, u_int32_t mask) +{ + INIT_BUF_SKIP("VAPI_MR_") + + SAFE_APPEND(VAPI_MR_CHANGE_TRANS) + SAFE_APPEND(VAPI_MR_CHANGE_PD) + SAFE_APPEND(VAPI_MR_CHANGE_ACL) + end_mask_sym(buf, cbuf, bufsz); + return buf; +} /* VAPI_mr_change_mask_sym */ + + +const char* VAPI_rdma_atom_acl_mask_sym(char* buf, int bufsz, u_int32_t mask) +{ + INIT_BUF_SKIP("VAPI_EN_REM_") + SAFE_APPEND(VAPI_EN_REM_WRITE) + SAFE_APPEND(VAPI_EN_REM_READ) + SAFE_APPEND(VAPI_EN_REM_ATOMIC_OP) + end_mask_sym(buf, cbuf, bufsz); + return buf; +} /* VAPI_rdma_atom_acl_sym */ + + +#define CASE_SETSTR(e) case e: s = #e; break; +static const char* UnKnown = "UnKnown"; + +const char* VAPI_atomic_cap_sym(VAPI_atomic_cap_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_ATOMIC_CAP_NONE) + CASE_SETSTR(VAPI_ATOMIC_CAP_HCA) + CASE_SETSTR(VAPI_ATOMIC_CAP_GLOB) + default: ; + } + return s; +} /* VAPI_atomic_cap_sym */ + + +const char* VAPI_sig_type_sym(VAPI_sig_type_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_SIGNAL_ALL_WR) + CASE_SETSTR(VAPI_SIGNAL_REQ_WR) + default: ; + } + return s; +} /* VAPI_sig_type_sym */ + + +const char* VAPI_ts_type_sym(VAPI_ts_type_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_TS_RC) + CASE_SETSTR(VAPI_TS_RD) + CASE_SETSTR(VAPI_TS_UC) + CASE_SETSTR(VAPI_TS_UD) + CASE_SETSTR(VAPI_TS_RAW) + default: ; + } + return s; +} /* VAPI_ts_type_sym */ + + +const char* VAPI_qp_state_sym(VAPI_qp_state_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_RESET) + CASE_SETSTR(VAPI_INIT) + CASE_SETSTR(VAPI_RTR) + CASE_SETSTR(VAPI_RTS) + CASE_SETSTR(VAPI_SQD) + CASE_SETSTR(VAPI_SQE) + CASE_SETSTR(VAPI_ERR) + default: ; + } + return s; +} /* VAPI_qp_state_sym */ + + +const char* VAPI_mig_state_sym(VAPI_mig_state_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_MIGRATED) + CASE_SETSTR(VAPI_REARM) + CASE_SETSTR(VAPI_ARMED) + default: ; + } + return s; +} /* VAPI_mig_state_sym */ + + +const char* VAPI_special_qp_sym(VAPI_special_qp_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_REGULAR_QP) + CASE_SETSTR(VAPI_SMI_QP) + CASE_SETSTR(VAPI_GSI_QP) + CASE_SETSTR(VAPI_RAW_IPV6_QP) + CASE_SETSTR(VAPI_RAW_ETY_QP) + default: ; + } + return s; +} /* VAPI_special_qp_sym */ + + +const char* VAPI_mrw_type_sym(VAPI_mrw_type_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_MR) + CASE_SETSTR(VAPI_MW) + CASE_SETSTR(VAPI_MPR) + CASE_SETSTR(VAPI_MSHAR) + default: ; + } + return s; +} /* VAPI_mrw_type_sym */ + + +const char* VAPI_remote_node_addr_sym(VAPI_remote_node_addr_type_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_RNA_RD) + CASE_SETSTR(VAPI_RNA_UD) + CASE_SETSTR(VAPI_RNA_RAW_ETY) + CASE_SETSTR(VAPI_RNA_RAW_IPV6) + default: ; + } + return s; +} /* VAPI_remote_node_addr_sym */ + + +const char* VAPI_wr_opcode_sym(VAPI_wr_opcode_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_RDMA_WRITE) + CASE_SETSTR(VAPI_RDMA_WRITE_WITH_IMM) + CASE_SETSTR(VAPI_SEND) + CASE_SETSTR(VAPI_SEND_WITH_IMM) + CASE_SETSTR(VAPI_RDMA_READ) + CASE_SETSTR(VAPI_ATOMIC_CMP_AND_SWP) + CASE_SETSTR(VAPI_ATOMIC_FETCH_AND_ADD) + CASE_SETSTR(VAPI_RECEIVE) + default: ; + } + return s; +} /* VAPI_wr_opcode_sym */ + + +const char* VAPI_cqe_opcode_sym(VAPI_cqe_opcode_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_CQE_SQ_SEND_DATA) + CASE_SETSTR(VAPI_CQE_SQ_RDMA_WRITE) + CASE_SETSTR(VAPI_CQE_SQ_RDMA_READ) + CASE_SETSTR(VAPI_CQE_SQ_COMP_SWAP) + CASE_SETSTR(VAPI_CQE_SQ_FETCH_ADD) + CASE_SETSTR(VAPI_CQE_SQ_BIND_MRW) + CASE_SETSTR(VAPI_CQE_RQ_SEND_DATA) + CASE_SETSTR(VAPI_CQE_RQ_RDMA_WITH_IMM) + default: ; + } + return s; +} /* VAPI_cqe_opcode_sym */ + + +const char* VAPI_wc_status_sym(VAPI_wc_status_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_SUCCESS) + CASE_SETSTR(VAPI_LOC_LEN_ERR) + CASE_SETSTR(VAPI_LOC_QP_OP_ERR) + CASE_SETSTR(VAPI_LOC_EE_OP_ERR) + CASE_SETSTR(VAPI_LOC_PROT_ERR) + CASE_SETSTR(VAPI_WR_FLUSH_ERR) + CASE_SETSTR(VAPI_MW_BIND_ERR) + CASE_SETSTR(VAPI_BAD_RESP_ERR) + CASE_SETSTR(VAPI_LOC_ACCS_ERR) + CASE_SETSTR(VAPI_REM_INV_REQ_ERR) + CASE_SETSTR(VAPI_REM_ACCESS_ERR) + CASE_SETSTR(VAPI_REM_OP_ERR) + CASE_SETSTR(VAPI_RETRY_EXC_ERR) + CASE_SETSTR(VAPI_RNR_RETRY_EXC_ERR) + CASE_SETSTR(VAPI_LOC_RDD_VIOL_ERR) + CASE_SETSTR(VAPI_REM_INV_RD_REQ_ERR) + CASE_SETSTR(VAPI_REM_ABORT_ERR) + CASE_SETSTR(VAPI_INV_EECN_ERR) + CASE_SETSTR(VAPI_INV_EEC_STATE_ERR) + CASE_SETSTR(VAPI_COMP_FATAL_ERR) + CASE_SETSTR(VAPI_COMP_GENERAL_ERR) + default: ; + } + return s; +} /* VAPI_wc_status_sym */ + + +const char* VAPI_comp_type_sym(VAPI_comp_type_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_SIGNALED) + CASE_SETSTR(VAPI_UNSIGNALED) + default: ; + } + return s; +} /* VAPI_comp_type_sym */ + + +const char* VAPI_cq_notif_sym(VAPI_cq_notif_type_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_NOTIF_NONE) + CASE_SETSTR(VAPI_SOLIC_COMP) + CASE_SETSTR(VAPI_NEXT_COMP) + default: ; + } + return s; +} /* VAPI_cq_notif_sym */ + + +const char* VAPI_event_record_sym(VAPI_event_record_type_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_QP_PATH_MIGRATED) + CASE_SETSTR(VAPI_EEC_PATH_MIGRATED) + CASE_SETSTR(VAPI_QP_COMM_ESTABLISHED) + CASE_SETSTR(VAPI_EEC_COMM_ESTABLISHED) + CASE_SETSTR(VAPI_SEND_QUEUE_DRAINED) + CASE_SETSTR(VAPI_CQ_ERROR) + CASE_SETSTR(VAPI_LOCAL_WQ_INV_REQUEST_ERROR) + CASE_SETSTR(VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR) + CASE_SETSTR(VAPI_LOCAL_WQ_CATASTROPHIC_ERROR) + CASE_SETSTR(VAPI_PATH_MIG_REQ_ERROR) + CASE_SETSTR(VAPI_LOCAL_EEC_CATASTROPHIC_ERROR) + CASE_SETSTR(VAPI_LOCAL_CATASTROPHIC_ERROR) + CASE_SETSTR(VAPI_PORT_ERROR) + CASE_SETSTR(VAPI_PORT_ACTIVE) + CASE_SETSTR(VAPI_RECEIVE_QUEUE_DRAINED) + CASE_SETSTR(VAPI_SRQ_LIMIT_REACHED) + CASE_SETSTR(VAPI_SRQ_CATASTROPHIC_ERROR) + default: ; + } + return s; +} /* VAPI_event_record_sym */ + +const char* VAPI_event_syndrome_sym(VAPI_event_syndrome_t e) +{ + const char* s = UnKnown; + switch (e) + { + CASE_SETSTR(VAPI_EV_SYNDROME_NONE) + CASE_SETSTR(VAPI_CATAS_ERR_FW_INTERNAL) + CASE_SETSTR(VAPI_CATAS_ERR_EQ_OVERFLOW) + CASE_SETSTR(VAPI_CATAS_ERR_MISBEHAVED_UAR_PAGE) + CASE_SETSTR(VAPI_CATAS_ERR_UPLINK_BUS_ERR) + CASE_SETSTR(VAPI_CATAS_ERR_HCA_DDR_DATA_ERR) + CASE_SETSTR(VAPI_CATAS_ERR_INTERNAL_PARITY_ERR) + CASE_SETSTR(VAPI_CATAS_ERR_MASTER_ABORT) + CASE_SETSTR(VAPI_CATAS_ERR_GO_BIT) + CASE_SETSTR(VAPI_CATAS_ERR_CMD_TIMEOUT) + CASE_SETSTR(VAPI_CATAS_ERR_FATAL_CR) + CASE_SETSTR(VAPI_CATAS_ERR_FATAL_TOKEN) + CASE_SETSTR(VAPI_CATAS_ERR_GENERAL) + CASE_SETSTR(VAPI_CQ_ERR_OVERRUN) + CASE_SETSTR(VAPI_CQ_ERR_ACCESS_VIOL) + CASE_SETSTR(VAPI_CATAS_ERR_FATAL_EXTERNAL) + default: ; + } + return s; +} + + +#if defined(TEST_VAPI_COMMON) +/* compile via: + gcc -g -DTEST_VAPI_COMMON -I.. -I$MTHOME/include -o /tmp/x vapi_common.c + */ +int main(int argc, char** argv) +{ + char buffer[100]; + char* cbuffer = &buffer[0]; + u_int32_t m = VAPI_BAD_PKEY_COUNT_CAP | VAPI_AUTO_PATH_MIG_CAP; + printf("m=%s\n", VAPI_hca_cap_sym(buffer, sizeof(buffer), m)); + printf("trunc1: m=%s\n", VAPI_hca_cap_sym(buffer, 1, m)); + printf("trunc10: m=%s\n", VAPI_hca_cap_sym(buffer, 10, m)); + + return 0; +} +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vapi_common.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vapi_common.h new file mode 100644 index 00000000..f7004523 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vapi_common.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_VAPI_COMMON_H +#define H_VAPI_COMMON_H + +#include + +extern const char* VAPI_strerror( VAPI_ret_t errnum); +extern const char* VAPI_strerror_sym( VAPI_ret_t errnum); + +/* Mask to symbol function. User supplies buffer (buf) which is ensured + * not to overflow beyond bufsz. The buf pointer is conveniently returned. + */ +extern const char* VAPI_hca_cap_sym(char* buf, int bufsz, u_int32_t mask); +extern const char* VAPI_hca_attr_mask_sym(char* buf, int bufsz, u_int32_t mask); +extern const char* VAPI_qp_attr_mask_sym(char* buf, int bufsz, u_int32_t mask); +extern const char* VAPI_mrw_acl_mask_sym(char* buf, int bufsz, u_int32_t mask); +extern const char* VAPI_mr_change_mask_sym(char* buf, int bufsz, u_int32_t msk); +extern const char* VAPI_rdma_atom_acl_mask_sym(char*, int, u_int32_t); + +extern const char* VAPI_atomic_cap_sym(VAPI_atomic_cap_t e); +extern const char* VAPI_sig_type_sym(VAPI_sig_type_t e); +extern const char* VAPI_ts_type_sym(VAPI_ts_type_t e); +extern const char* VAPI_qp_state_sym(VAPI_qp_state_t e); +extern const char* VAPI_mig_state_sym(VAPI_mig_state_t e); +extern const char* VAPI_special_qp_sym(VAPI_special_qp_t e); +extern const char* VAPI_mrw_type_sym(VAPI_mrw_type_t e); +extern const char* VAPI_remote_node_addr_sym(VAPI_remote_node_addr_type_t e); +extern const char* VAPI_wr_opcode_sym(VAPI_wr_opcode_t e); +extern const char* VAPI_cqe_opcode_sym(VAPI_cqe_opcode_t e); +extern const char* VAPI_wc_status_sym(VAPI_wc_status_t e); +extern const char* VAPI_comp_type_sym(VAPI_comp_type_t e); +extern const char* VAPI_cq_notif_sym(VAPI_cq_notif_type_t e); +extern const char* VAPI_event_record_sym(VAPI_event_record_type_t e); +extern const char* VAPI_event_syndrome_sym(VAPI_event_syndrome_t e); + + +#define VAPI_RET_PRINT(ret) { MTL_ERROR1("%s: %d : %s (%s).\n", __func__, __LINE__,VAPI_strerror(ret),VAPI_strerror_sym(ret));} +#define VAPI_CHECK if ( ret != VAPI_OK) VAPI_RET_PRINT(ret); +#define VAPI_CHECK_RET if ( ret != VAPI_OK) { VAPI_RET_PRINT(ret); return(ret); } + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_array.c b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_array.c new file mode 100644 index 00000000..8d921fa0 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_array.c @@ -0,0 +1,1116 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include + +#include "vip_array.h" + +#define ARRAY_INIT_NUM_ENTRIES (1<<16) /*16K initial size */ +#define ARRAY_INCR_NUM_ENTRIES (1<<16) /*16K increment size */ +#define ARRAY_MIN_SIZE 2 +#define ARRAY_MAX_SIZE 0xFFFFFFFE +#define ARRAY_RESIZE_ERR 0xFFFFFFFF +#define ARRAY_DEFAULT_MAXSIZE (1 << 24) /* default max size is 16M entries */ +#define ARRAY_2ND_LVL_BLOCK_SIZE (2*MOSAL_SYS_PAGE_SIZE) +#define ARRAY_2ND_LVL_ENTRIES_PER_BLOCK (ARRAY_2ND_LVL_BLOCK_SIZE / sizeof(VIP_array_internal_obj_t)) +#define ARRAY_2ND_LVL_ENTRY_SIZE (sizeof(VIP_array_internal_obj_t)) + +#define CALCULATE_NEW_SIZE(curr_size, max_size, array) ((max_size - curr_size < ARRAY_INCR_NUM_ENTRIES) ? max_size : \ + curr_size + ARRAY_INCR_NUM_ENTRIES) +#define AT_MAX_SIZE(curr_size, max_size) ((max_size == curr_size ) ? TRUE : FALSE) + +#define CALC_MAX_2ND_LVL_BLOCKS(array) ((array->max_size + (array->sec_lvl_entries_per_blk_m_1)) / (array->sec_lvl_entries_per_blk)) +#define CALC_NUM_2ND_LVL_BLOCKS(size, array) ((size + (array->sec_lvl_entries_per_blk_m_1)) / (array->sec_lvl_entries_per_blk)) +#define KMALLOC_1ST_LVL_MAX (1<<15) /* max 32K for first level kmalloc */ + +typedef MT_size_t VIP_array_ref_cnt_t; + +typedef struct VIP_array_internal_obj_t { + MT_ulong_ptr_t array_obj; + VIP_array_ref_cnt_t ref_count; /* Handle reference count. (-1) if invalid. */ +} VIP_array_internal_obj_t; + +typedef VIP_array_internal_obj_t * VIP_array_1st_lvl_t; + +typedef struct VIP_array_t { + VIP_array_1st_lvl_t *begin; + VIP_array_ref_cnt_t first_invalid; + u_int32_t size; + u_int32_t watermark; + u_int32_t size_allocated; + u_int32_t max_size; + u_int32_t sec_lvl_entries_per_blk_m_1; /* number of second level entries - 1 per block*/ + u_int32_t sec_lvl_entries_per_blk; /* number of second level entries per block*/ + u_int32_t size_2nd_lvl_block; /* number of bytes per second level block */ + u_int8_t log2_2nd_lvl_entries_per_blk; /* log2 of number of second level entries per blk*/ + MT_bool first_level_uses_vmalloc; + + MOSAL_spinlock_t array_lock; + MOSAL_mutex_t resize_lock; //held while resize is in progress +} VIP_array_t; + +#define INVALID_REF_VAL ((unsigned long)-1) /* Value to put in "ref_cnt" to mark invalid entry */ +#define PREP_ERASE_VAL ((unsigned long)-2) /* Value to put in "ref_cnt" to mark status: prepare to erase */ + +#define GET_OBJ_BY_HNDL(array, handle) ((VIP_array_internal_obj_t *) &((*(array->begin+((handle) >> array->log2_2nd_lvl_entries_per_blk)))[(handle) & (array->sec_lvl_entries_per_blk_m_1)])) +#define GET_OBJ_ARR_OBJ_BY_HNDL(array, handle) (GET_OBJ_BY_HNDL(array,handle))->array_obj +#define GET_OBJ_REF_CNT_BY_HNDL(array, handle) (GET_OBJ_BY_HNDL(array,handle))->ref_count +#define IS_INVALID_ASSIGN(vip_array_p,obj,i) (((obj = GET_OBJ_BY_HNDL(vip_array_p,i))==NULL) || (obj->ref_count == INVALID_REF_VAL)||(obj->ref_count == PREP_ERASE_VAL)) +#define IS_INVALID_OBJ(obj) ((obj->ref_count == INVALID_REF_VAL)||(obj->ref_count == PREP_ERASE_VAL)) +#define SET_INVALID_OBJ(obj) (obj->ref_count = INVALID_REF_VAL) +#define SET_VALID_OBJ(obj) (obj->ref_count = 0) +#define RESIZE_REQUIRED(array_obj) (array_obj->watermark >= array_obj->size_allocated)) +#define IS_NOT_BUSY_OBJ(obj,fl) (IS_INVALID_OBJ(obj) || ((fl == TRUE) && (obj->ref_count ==0))) + +static unsigned int floor_log2(u_int64_t x) +{ + enum { nLowBits = 8 }; + static const u_int64_t + highMask = ~(((u_int64_t)1 << nLowBits) - (u_int64_t)1); + static const unsigned char lowlog2[1<>= 1; /* /= 2 */ + high = (x >> step); + if (high) + { + p |= step; + x = high; + } + else + { + u_int64_t mask = ((u_int64_t)1 << step) - 1; + x &= mask; + } + } + + p |= lowlog2[x]; + return p; +} /* floor_log2 */ + + +/************************************************************************/ +static unsigned int ceil_log2(u_int64_t x) +{ + unsigned int p = floor_log2(x); + if (((u_int64_t)1 << p) < x) + { + p += 1; + } + return p; +} /* ceil_log2 */ + +/********************************************************************************/ +/* Resize array to given size_to_alloc entries */ +static VIP_common_ret_t resize_array(VIP_array_p_t a,u_int32_t size_to_alloc) +{ + /* We can sample the value above with no lock, since there is only one thread + * which can perform resize at one time + */ + u_int32_t blocks_needed = 0, max_num_blocks = 0, curr_blocks = 0, block_size_to_allocate = 0; + int i,j; + + MTL_DEBUG4(MT_FLFMT("realloc: watermark=%d, size_to_alloc=%d, size allocated = 0x%x, max_size=0x%x"), + a->watermark, size_to_alloc, a->size_allocated, a->max_size); + /* now, insert second level blocks until initial size is reached. If this is also the max size, + * make sure that the last block allocation is only until the max number of entries needed. + * Note that do not need special protection when adding new blocks to the array, since we have + * not yet changed the allocated size for the array -- so the new blocks are not yet visible. + */ + + MOSAL_spinlock_dpc_lock(&(a->array_lock)); + if (size_to_alloc > a->max_size) { + MOSAL_spinlock_unlock(&(a->array_lock)); + MTL_ERROR1(MT_FLFMT("resize_array: requested new size (0x%x)greater than max (0x%x)"), + size_to_alloc, a->max_size); + return VIP_EINVAL_PARAM; + } + max_num_blocks = CALC_MAX_2ND_LVL_BLOCKS(a); + blocks_needed = CALC_NUM_2ND_LVL_BLOCKS(size_to_alloc,a); + curr_blocks = CALC_NUM_2ND_LVL_BLOCKS(a->size_allocated,a); + block_size_to_allocate = a->size_2nd_lvl_block; + MOSAL_spinlock_unlock(&(a->array_lock)); + + for (i = (int)curr_blocks; i < (int)blocks_needed; i++) { + if (i == (int)max_num_blocks - 1) { + block_size_to_allocate = (a->max_size - ((max_num_blocks-1)*(a->sec_lvl_entries_per_blk))) + * ARRAY_2ND_LVL_ENTRY_SIZE; + } + a->begin[i] = (VIP_array_internal_obj_t*)MALLOC(block_size_to_allocate); + if (a->begin[i] == NULL) { + MTL_ERROR1(MT_FLFMT("VIP_array_create_maxsize: malloc failure at 2nd level block %d"), i); + for (j = curr_blocks; j < i; j++) { + FREE(a->begin[j]); + a->begin[j] = NULL; + } + return VIP_EAGAIN; + } else { + memset(a->begin[i], 0xFF, block_size_to_allocate); + } + } + + /* adjust vip array object parameters */ + MOSAL_spinlock_dpc_lock(&(a->array_lock)); + a->size_allocated = size_to_alloc; + MOSAL_spinlock_unlock(&(a->array_lock)); + + return VIP_OK; +} /* resize_array */ + + +/* This function either initiates a resize or wait for another thread to complete it */ +/* The function must be called with array's lock held + */ +static VIP_common_ret_t resize_or_waitfor(VIP_array_p_t VIP_array, u_int32_t new_sz) +{ + call_result_t mt_rc; + VIP_common_ret_t rc; + + MTL_DEBUG4(MT_FLFMT("%s: Entering. new size = %d"),__func__, new_sz); + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + + + mt_rc = MOSAL_mutex_acq(&(VIP_array->resize_lock),TRUE); + if (mt_rc != MT_OK) + { + //assume MT_EINTR + rc= VIP_EINTR; + goto mutex_acq_lbl; + } + + if (new_sz <= VIP_array->size_allocated) + { + rc = VIP_OK; + goto size_check_lbl; + } + + rc= resize_array(VIP_array,new_sz); + if (rc) goto resize_lbl; + +resize_lbl: + +size_check_lbl: + MOSAL_mutex_rel(&(VIP_array->resize_lock)); + +mutex_acq_lbl: + MOSAL_spinlock_dpc_lock(&(VIP_array->array_lock)); + return rc; +} + +/******************************************************************************** + * Function: VIP_array_create_maxsize + * + * Arguments: + * VIP_array_p (OUT) - Return new VIP_array object here + * maxsize - max number of entries in the array. + * + * Returns: + * VIP_OK, + * VIP_EAGAIN: Not enough resources + * VIP_EINVAL_PARAM: requested an array larger than max permitted size + * + * Description: + * Create a new VIP_array table + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_create_maxsize(u_int32_t size, u_int32_t maxsize, VIP_array_p_t* VIP_array_p) +{ + VIP_common_ret_t rc = VIP_EAGAIN; + VIP_array_p_t array; + u_int32_t max_num_blocks = 0, size_1st_lvl = 0; + + MTL_DEBUG4(MT_FLFMT("VIP_array_create_maxsize: size=0x%x, maxsize=0x%x"), size, maxsize); + + if ( size > maxsize) { + MTL_ERROR1(MT_FLFMT("VIP_array_create_maxsize: requested size (0x%x) greater than supplied max size (0x%x"), + size, maxsize); + return VIP_EINVAL_PARAM; + } + if (size > ARRAY_MAX_SIZE) { + MTL_ERROR1(MT_FLFMT("VIP_array_create: requested size (0x%x) greater than max permitted"), size); + return VIP_EINVAL_PARAM; + } + array = TMALLOC(VIP_array_t); + if (array == NULL) { + MTL_ERROR1(MT_FLFMT("VIP_array_create: malloc failure")); + return VIP_EAGAIN; + } + + if (maxsize < ARRAY_MIN_SIZE) {maxsize = ARRAY_MIN_SIZE;} + + memset(array, 0, sizeof(VIP_array_t)); + array->max_size = maxsize; + array->sec_lvl_entries_per_blk_m_1 = ARRAY_2ND_LVL_ENTRIES_PER_BLOCK-1; + array->sec_lvl_entries_per_blk = ARRAY_2ND_LVL_ENTRIES_PER_BLOCK; + array->size_2nd_lvl_block = ARRAY_2ND_LVL_BLOCK_SIZE; +/*** warning C4242: '=' : conversion from 'unsigned int' to 'u_int8_t', possible loss of data ***/ + array->log2_2nd_lvl_entries_per_blk = (u_int8_t)ceil_log2(ARRAY_2ND_LVL_ENTRIES_PER_BLOCK); + + /* make sure that the first block set is always allocated in total -- unless max array size is less than entries + per second level block + */ + + if (maxsize <= ARRAY_INIT_NUM_ENTRIES) { + if (size < maxsize) {size = maxsize;} + } else { + if (size < ARRAY_INIT_NUM_ENTRIES) { + size = ARRAY_INIT_NUM_ENTRIES; + } + } + + /* compute size of and allocate first-level allocation */ + max_num_blocks = CALC_MAX_2ND_LVL_BLOCKS(array); + size_1st_lvl = sizeof(VIP_array_1st_lvl_t) * max_num_blocks; + + /* use KMALLOC if first level size is smaller than 32K */ + array->begin = (size_1st_lvl <= KMALLOC_1ST_LVL_MAX) ? TNMALLOC(VIP_array_1st_lvl_t, max_num_blocks) : NULL; + if (array->begin == NULL) { + array->first_level_uses_vmalloc = TRUE; + array->begin = TNVMALLOC(VIP_array_1st_lvl_t, max_num_blocks); + if (array->begin == NULL) { + MTL_ERROR1(MT_FLFMT("VIP_array_create: malloc failure for size 0x%x"), (u_int32_t)(size_1st_lvl)); + rc = VIP_EAGAIN; + goto first_lvl_fail; + } + } + + memset(array->begin, 0, size_1st_lvl); + array->first_invalid = INVALID_REF_VAL; + array->size = 0; + array->watermark = 0; + array->size_allocated = 0; + MOSAL_spinlock_init(&(array->array_lock)); + + rc = resize_array(array,size); + if (rc != VIP_OK) { + MTL_ERROR1(MT_FLFMT("VIP_array_create_maxsize: 2nd level alloc failure for size 0x%x"), + (u_int32_t)(sizeof(VIP_array_1st_lvl_t)* maxsize)); + goto second_lvl_fail; + } + MOSAL_mutex_init(&(array->resize_lock)); + *VIP_array_p = array; + rc = VIP_OK; + + MTL_DEBUG4(MT_FLFMT("VIP_array_create: rc=%d"), rc); + return rc; + +second_lvl_fail: + if (array->first_level_uses_vmalloc) { + VFREE(array->begin); + } else { + FREE(array->begin); + } +first_lvl_fail: + FREE(array); + return rc; +} /* VIP_array_create_maxsize */ + +VIP_common_ret_t VIP_array_create(u_int32_t size, VIP_array_p_t* VIP_array_p) +{ + return VIP_array_create_maxsize(size,ARRAY_DEFAULT_MAXSIZE,VIP_array_p); +} + + +/******************************************************************************** + * Function: VIP_array_destroy + * + * Arguments: + * VIP_array (IN) - Object to destroy + * force (IN) - If false do not destroy VIP_array that is not empty + * FREE_objects (IN) - If true destroy objects pointed from the array + * + * Returns: + * VIP_OK + * + * Description: + * cleanup resources for a VIP_array table + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_destroy(VIP_array_p_t VIP_array, + VIP_allocator_free_t free_objects_fun) +{ + VIP_common_ret_t ret=VIP_OK; + VIP_array_handle_t hdl; + VIP_array_obj_t obj; + u_int32_t max_num_blocks; + int i; + call_result_t mt_rc = MT_OK; + + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + MTL_DEBUG4("Inside " "%s:Array size %d\n", __func__, VIP_array->size); + + + + if (VIP_array->size != 0 && free_objects_fun) { + ret = VIP_array_get_first_handle(VIP_array, &hdl, &obj); + while (ret == VIP_OK) { + free_objects_fun(obj); + ret= VIP_array_get_next_handle(VIP_array,&hdl, &obj); + } + } + /*free second level allocations */ + + max_num_blocks = CALC_MAX_2ND_LVL_BLOCKS(VIP_array); + for (i = 0; i < (int)max_num_blocks; i++) { + if (VIP_array->begin[i] == NULL) {break;} + FREE(VIP_array->begin[i]); + } + + /* now, free first level allocation */ + if (VIP_array->first_level_uses_vmalloc) { + VFREE(VIP_array->begin); + } else { + FREE(VIP_array->begin); + } + mt_rc = MOSAL_mutex_free(&(VIP_array->resize_lock)); + if (mt_rc != MT_OK) { + MTL_ERROR2(MT_FLFMT("Failed MOSAL_syncobj_free (%s)"),mtl_strerror_sym(mt_rc)); + } + FREE(VIP_array); + return VIP_OK; +} + +/******************************************************************************** + * Function: VIP_array_insert + * + * Arguments: + * VIP_array (IN) - Insert in this table + * obj (IN) - object to insert + * handle_p (OUT) - handle for this object + * + * Returns: + * VIP_OK, + * VIP_EAGAIN: not enough resources + * + * Description: + * Associate this object with this handle. + * Return the object associated with this handle. + * Note: No check is done that the object is not already + * in the array. + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_insert(VIP_array_p_t VIP_array, VIP_array_obj_t obj, + VIP_array_handle_t* handle_p ) +{ + VIP_common_ret_t rc; + register VIP_array_internal_obj_t *itl_obj_p = NULL; + + MTL_DEBUG4(MT_FLFMT("VIP_array_insert: %p"), obj); + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + /* First, try to use the list of invalid */ + MOSAL_spinlock_dpc_lock(&(VIP_array->array_lock)); + + while (1) { + /* Try allocating free entry. In case of "resize", a retry is required + * since during spinlock release a lot could happen. + */ + if (VIP_array->first_invalid != INVALID_REF_VAL) { /* Check on free list */ + *handle_p = (VIP_array_handle_t)VIP_array->first_invalid; + /* Move the list head to point to the next invalid handle */ + itl_obj_p = GET_OBJ_BY_HNDL(VIP_array,(*handle_p)); + VIP_array->first_invalid = itl_obj_p->array_obj; + break; + } else { /* If free list is empty, take next free pointed by "watermark" */ + /* check for allocation: "watermark" is valid only if it does not exceeds array size */ + if (VIP_array->watermark >= VIP_array->size_allocated) {/* resize required ? */ + if (AT_MAX_SIZE(VIP_array->size_allocated, VIP_array->max_size)) { + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + MTL_ERROR1(MT_FLFMT("%s: Array size already at maximum (0x%x)"),__func__,VIP_array->max_size); + return VIP_EAGAIN; + } + rc= resize_or_waitfor(VIP_array, CALCULATE_NEW_SIZE(VIP_array->size_allocated, VIP_array->max_size, VIP_array)); + if (rc != VIP_OK) { /* Fatal error */ + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + MTL_ERROR1(MT_FLFMT("%s: Failed resizing array (%s %d)"),__func__,VAPI_strerror_sym((VAPI_ret_t)rc),rc); + return rc; + } + continue; + /* Retry all process - maybe some "frees" were done while spinlock was unlocked */ + } else { /* There are free entries at "watermark" */ + *handle_p = VIP_array->watermark++; + itl_obj_p = GET_OBJ_BY_HNDL(VIP_array,*handle_p); + break; + } /* if "watermark" */ + } /* if "free list" */ + } /* while */ + + /* Reaching this point means "*handle_p" and "itl_obj_p" are valid */ + itl_obj_p->array_obj = (MT_ulong_ptr_t) obj; /* Put object in entry */ + SET_VALID_OBJ(itl_obj_p); /* implies ref_cnt=0 */ + ++VIP_array->size; + + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + + return VIP_OK; /* If reached here, allocation was successful */ +} /* VIP_array_insert */ + + + +VIP_common_ret_t VIP_array_insert2hndl(VIP_array_p_t VIP_array, VIP_array_obj_t obj, + VIP_array_handle_t hndl ) +{ + VIP_common_ret_t rc; + MT_ulong_ptr_t prev_hndl,cur_hndl; + u_int32_t required_size= 0; + register VIP_array_internal_obj_t *itl_obj_p = NULL; + + + MTL_DEBUG4(MT_FLFMT("VIP_array_insert: %p"), obj); + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + if (VIP_array->max_size < hndl) { + MTL_ERROR1(MT_FLFMT("%s: requested handle (0x%x) greater than array maximum"),__func__,hndl); + return VIP_EINVAL_PARAM; + } + required_size = VIP_array->size_allocated; + while (required_size <= VIP_array->max_size) { + if (required_size >= hndl+1) {break;} + if (required_size == VIP_array->max_size) { + MTL_ERROR1(MT_FLFMT("%s: requested handle (0x%x) greater than array maximum"),__func__,hndl); + return VIP_EINVAL_PARAM; + } + //MTL_DEBUG3(MT_FLFMT("%s: loop: new required size = %d"),__func__, required_size ); + required_size = CALCULATE_NEW_SIZE(required_size, VIP_array->max_size, VIP_array); + } + + MTL_DEBUG3(MT_FLFMT("%s: new array size = %d"),__func__, required_size ); + + /* First, try to use the list of invalid */ + MOSAL_spinlock_dpc_lock(&(VIP_array->array_lock)); + /* retry resize until requested size is reached */ + while (required_size > (VIP_array->size_allocated)) { + rc= resize_or_waitfor(VIP_array,required_size); + if (rc != VIP_OK) { /* Fatal error */ + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + MTL_ERROR1(MT_FLFMT("%s: Failed resizing array (%s %d)"),__func__,VAPI_strerror_sym((VAPI_ret_t)rc),rc); + return rc; + } + } /* while resize_or_waitfor */ + + itl_obj_p = GET_OBJ_BY_HNDL(VIP_array, hndl); + if (!IS_INVALID_OBJ(itl_obj_p)) { + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + MTL_ERROR1(MT_FLFMT("%s: Handle %d is already in use"),__func__,hndl); + return VIP_EBUSY; + } + + if (VIP_array->watermark <= hndl) { /* taking a hndl above (or at) watermark */ + /* Insert handles up to requested to free list */ + while (VIP_array->watermark < hndl) { + VIP_array_internal_obj_t *loop_obj_p = GET_OBJ_BY_HNDL(VIP_array,VIP_array->watermark); + /* Attach to "free list" */ + loop_obj_p->array_obj = (MT_ulong_ptr_t) VIP_array->first_invalid; + loop_obj_p->ref_count = INVALID_REF_VAL; + VIP_array->first_invalid = VIP_array->watermark; + VIP_array->watermark++; + } + VIP_array->watermark++; /* Allocate entry at "hndl" */ + } else { /* Requested handle is in the free list */ + /* Look for the entry and remove from free list */ + for (prev_hndl= INVALID_REF_VAL, cur_hndl= VIP_array->first_invalid; + cur_hndl != INVALID_REF_VAL; + prev_hndl= cur_hndl, cur_hndl= GET_OBJ_ARR_OBJ_BY_HNDL(VIP_array, cur_hndl)) { + if (cur_hndl == (MT_ulong_ptr_t)hndl) break; /* handle found */ + } + if (cur_hndl == INVALID_REF_VAL) { + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + MTL_ERROR3(MT_FLFMT("%s: Unexpected error - could not find handle %d in free list"),__func__, + hndl); + return VIP_EFATAL; + } + /* Requested handle's entry found - removing from free list */ + if (prev_hndl != INVALID_REF_VAL) { + GET_OBJ_ARR_OBJ_BY_HNDL(VIP_array,prev_hndl)= GET_OBJ_ARR_OBJ_BY_HNDL(VIP_array,cur_hndl); /* connect next to prev. */ + } else { + VIP_array->first_invalid= GET_OBJ_ARR_OBJ_BY_HNDL(VIP_array,cur_hndl); /* next to first */ + } + } + + /* Reaching this point means given hndl was found */ + itl_obj_p->array_obj = (MT_ulong_ptr_t) obj; /* Put object in entry */ + SET_VALID_OBJ(itl_obj_p); /* implies ref_cnt=0 */ + ++VIP_array->size; + + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + + return VIP_OK; /* If reached here, allocation was successful */ +} + + +/******************************************************************************** + * Function: VIP_array_insert_ptr + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle_p (OUT) - handle for this object + * obj (OUT) - pointer to new object + * + * Returns: + * VIP_OK, + * VIP_EAGAIN: not enough resources + * + * Description: + * Associate a new object with this handle. + * This is like VIP_array_insert, but it returns + * a pointer into the array through which the pointer + * to the object can be set later + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_insert_ptr( + VIP_array_p_t a, + VIP_array_handle_t* handle_p, + VIP_array_obj_t** obj +) +{ + VIP_common_ret_t rc = VIP_EAGAIN; + + MTL_DEBUG4(MT_FLFMT("VIP_array_insert_ptr")); + rc= VIP_array_insert(a,0,handle_p); + if ((rc == VIP_OK) && (obj)) *obj = (VIP_array_obj_t *)&(GET_OBJ_ARR_OBJ_BY_HNDL(a,(MT_ulong_ptr_t)*handle_p)); + return rc; +} /* VIP_array_insert_ptr */ + + +/* selector for erase_handle() operation */ +typedef enum { + VIP_ARRAY_ERASE, + VIP_ARRAY_REL_ERASE, + VIP_ARRAY_ERASE_PREP, + VIP_ARRAY_REL_ERASE_PREP +} VIP_array_erase_type_t; + +/* erase VIP_array handle on if only_this_obj is the object at that handle. */ +/* If only_this_obj==NULL, no check is done */ +static VIP_common_ret_t erase_handle(VIP_array_erase_type_t etype, + VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj_p) +{ + register VIP_array_internal_obj_t * itl_obj_p = NULL; + if (a == NULL) return VIP_EINVAL_HNDL; + + MTL_DEBUG4(MT_FLFMT("VIP_array_erase: handle=%d, wmark=%d"), + handle, a->watermark); + + MOSAL_spinlock_dpc_lock(&(a->array_lock)); + + if (handle >= a->watermark || IS_INVALID_ASSIGN(a,itl_obj_p,handle) ) { /* Invalid handle */ + if (obj_p != NULL) *obj_p = NULL; /* Just "cosmetics" */ + MOSAL_spinlock_unlock(&(a->array_lock)); + return VIP_EINVAL_HNDL; + } + + if ((etype == VIP_ARRAY_REL_ERASE) || (etype == VIP_ARRAY_REL_ERASE_PREP)) { + itl_obj_p->ref_count--; /* Handle is release anyway */ + } + + if (itl_obj_p->ref_count > 0) { + MTL_DEBUG1(MT_FLFMT("%s: handle=%d ref_cnt="SIZE_T_FMT), __func__, + handle, itl_obj_p->ref_count); + MOSAL_spinlock_unlock(&(a->array_lock)); + return VIP_EBUSY; + } + + + if (obj_p != NULL) {*obj_p = (VIP_array_obj_t)(itl_obj_p->array_obj);} + switch (etype) { + case VIP_ARRAY_ERASE: + case VIP_ARRAY_REL_ERASE: + SET_INVALID_OBJ(itl_obj_p); + /* Attach to "free list" */ + itl_obj_p->array_obj = a->first_invalid; + a->first_invalid = (MT_ulong_ptr_t) handle; + --a->size; + break; + case VIP_ARRAY_ERASE_PREP: + case VIP_ARRAY_REL_ERASE_PREP: + itl_obj_p->ref_count = PREP_ERASE_VAL; + break; + default: + MOSAL_spinlock_unlock(&(a->array_lock)); + MTL_ERROR1(MT_FLFMT("%s: function used with invalid erase type (%d)"),__func__,etype); + return VIP_EINVAL_PARAM; + } + + MOSAL_spinlock_unlock(&(a->array_lock)); + return VIP_OK; +} + +/******************************************************************************** + * Function: VIP_array_erase + * + * Arguments: + * VIP_array (IN) - this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array + * VIP_EBUSY + * + * Description: + * Remove the object associated with this handle + * Note: fails if handle is not already in the VIP_array + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_erase(VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj_p ) +{ + return erase_handle(VIP_ARRAY_ERASE,a,handle,obj_p); +} + +/******************************************************************************** + * Function: VIP_array_find_release_erase + * + * Arguments: + * VIP_array (IN) - this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array, or only_this_obj don't match object at handle + * VIP_EBUSY: Handle is still busy (ref_cnt > 0 , after dec.). ref_cnt is updated anyway. + * + * Description: + * This function is a combination of VIP_array_find_release and VIP_array_erase. + * The function atomically decrements the handle's reference count and check if it reached 0. + * Only if the ref_cnt is 0, the object is erased. Otherwise, VIP_EBUSY is returned. + * Note: The reference count is decrement by 1 even on VIP_EBUSY error. + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_find_release_erase(VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj_p ) +{ + return erase_handle(VIP_ARRAY_REL_ERASE,a,handle,obj_p); +} + + +/******************************************************************************** + * Function: VIP_array_erase_prepare + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array + * VIP_EBUSY: Handle's reference count > 0 + * + * Description: + * invalidate the object in the array, not yet removing the object associated with this handle + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_erase_prepare(VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj_p) +{ + return erase_handle(VIP_ARRAY_ERASE_PREP,a,handle,obj_p); +} + +/******************************************************************************** + * Function: VIP_array_find_release_erase_prepare + * + * Arguments: + * VIP_array (IN) - this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array, or only_this_obj don't match object at handle + * VIP_EBUSY: Handle is still busy (ref_cnt > 0 , after dec.). ref_cnt is updated anyway. + * + * Description: + * This function is a combination of VIP_array_find_release and VIP_array_erase_prepare. + * The function atomically decrements the handle's reference count and check if it reached 0. + * Only if the ref_cnt is 0, the object is erased (prep.). Otherwise, VIP_EBUSY is returned. + * Note: The reference count is decrement by 1 even on VIP_EBUSY error. + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_find_release_erase_prepare(VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj_p ) +{ + return erase_handle(VIP_ARRAY_REL_ERASE_PREP,a,handle,obj_p); +} + +/******************************************************************************** + * Function: VIP_array_erase_undo + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle (IN) - object by this handle + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array or not was "erase prepare". + * + * Description: + * revalidates the object of this handle, undo the erasing operation + * see: VIP_array_erase_prepare + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_erase_undo(VIP_array_p_t a, VIP_array_handle_t handle) +{ + register VIP_array_internal_obj_t *itl_obj_p = NULL; + if (a == NULL) return VIP_EINVAL_HNDL; + MTL_DEBUG4(MT_FLFMT("VIP_array_erase_roll: handle=%d, wmark=%d"), + handle, a->watermark); + + MOSAL_spinlock_dpc_lock(&(a->array_lock)); + + if ((handle >= a->watermark) || ((itl_obj_p = GET_OBJ_BY_HNDL(a,handle)) == NULL) || (itl_obj_p->ref_count!= PREP_ERASE_VAL)) { /* Invalid handle */ + MOSAL_spinlock_unlock(&(a->array_lock)); + return VIP_EINVAL_HNDL; + } + SET_VALID_OBJ(itl_obj_p); + + MOSAL_spinlock_unlock(&(a->array_lock)); + return VIP_OK; +} + + +/******************************************************************************** + * Function: VIP_array_erase_done + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array or not was "erase prepare". + * + * Description: + * removes the object associated with this handle + * see: VIP_array_erase_prepare + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_erase_done(VIP_array_p_t a, VIP_array_handle_t handle, VIP_array_obj_t *obj) +{ + register VIP_array_internal_obj_t *itl_obj_p = NULL; + if (a == NULL) return VIP_EINVAL_HNDL; + + MTL_DEBUG4(MT_FLFMT("VIP_array_erase: handle=%d, wmark=%d"), + handle, a->watermark); + + MOSAL_spinlock_dpc_lock(&(a->array_lock)); + + if ((handle >= a->watermark) || ((itl_obj_p = GET_OBJ_BY_HNDL(a,handle)) == NULL) || (itl_obj_p->ref_count!= PREP_ERASE_VAL)) { /* Invalid handle */ + if (obj != NULL) *obj = NULL; /* Just "cosmetics" */ + MOSAL_spinlock_unlock(&(a->array_lock)); + return VIP_EINVAL_HNDL; + } + + if (obj != NULL) {*obj = (VIP_array_obj_t)(itl_obj_p->array_obj);} + SET_INVALID_OBJ(itl_obj_p); + /* Attach to "free list" */ + itl_obj_p->array_obj = a->first_invalid; + a->first_invalid = (MT_ulong_ptr_t) handle; + --a->size; + + MOSAL_spinlock_unlock(&(a->array_lock)); + return VIP_OK; +} + + +VIP_common_ret_t VIP_array_find(VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj ) +{ + register VIP_array_internal_obj_t *itl_obj_p = NULL; + if (a == NULL) return VIP_EINVAL_HNDL; + MOSAL_spinlock_dpc_lock(&(a->array_lock)); + + if (handle >= a->watermark || IS_INVALID_ASSIGN(a,itl_obj_p,handle) ) { /* Invalid handle */ + MOSAL_spinlock_unlock(&(a->array_lock)); + if (obj != NULL) *obj= NULL; + return VIP_EINVAL_HNDL; + } + if (obj != NULL) *obj=(VIP_array_obj_t)(itl_obj_p->array_obj); + + MOSAL_spinlock_unlock(&(a->array_lock)); + return VIP_OK; +} + +VIP_common_ret_t VIP_array_find_hold(VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj ) +{ + VIP_common_ret_t rc= VIP_OK; + register VIP_array_internal_obj_t *itl_obj_p = NULL; + + if (a == NULL) return VIP_EINVAL_HNDL; + MOSAL_spinlock_dpc_lock(&(a->array_lock)); + if (handle >= a->watermark || IS_INVALID_ASSIGN(a,itl_obj_p,handle)) { /* Invalid handle */ + if (obj != NULL) *obj= NULL; + rc= VIP_EINVAL_HNDL; + } else if ( itl_obj_p->ref_count == INVALID_REF_VAL-1) { /* protect from overflow */ + rc= VIP_EAGAIN; /* Try again later - when ref. cnt. will be smaller */ + } else { + (itl_obj_p->ref_count)++; + if (obj != NULL) *obj=(VIP_array_obj_t)(itl_obj_p->array_obj); + } + + MOSAL_spinlock_unlock(&(a->array_lock)); + return rc; +} + +VIP_common_ret_t VIP_array_find_release(VIP_array_p_t a, VIP_array_handle_t handle) +{ + VIP_common_ret_t rc= VIP_OK; + register VIP_array_internal_obj_t *itl_obj_p = NULL; + + if (a == NULL) return VIP_EINVAL_HNDL; + MOSAL_spinlock_dpc_lock(&(a->array_lock)); + + if (handle >= a->watermark || IS_INVALID_ASSIGN(a,itl_obj_p,handle)) { /* Invalid handle */ + rc= VIP_EINVAL_HNDL; + } else if (itl_obj_p->ref_count == 0) { /* Caller did not invoke VIP_array_find_hold for this handle */ + rc= VIP_EINVAL_HNDL; + } else { + (itl_obj_p->ref_count)--; + MTL_DEBUG6(MT_FLFMT("%s: handle=0x%X ref_count="SIZE_T_DFMT"->"SIZE_T_DFMT), __func__, handle, + itl_obj_p->ref_count+1 , itl_obj_p->ref_count); + } + + MOSAL_spinlock_unlock(&(a->array_lock)); + return rc; +} + +/******************************************************************************** + * Function: VIP_array_get_allocated_size + * + * Arguments: + * VIP_array (IN) - table + * + * Returns: + * current allocated size of the array + * + * Description: + * allocated size of the arrays + * + ********************************************************************************/ +u_int32_t VIP_array_get_allocated_size(VIP_array_p_t VIP_array) +{ + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + return VIP_array->size_allocated; +} + +/******************************************************************************** + * Function: VIP_array_get_num_of_objects + * + * Arguments: + * VIP_array (IN) - table + * + * Returns: + * number of objects in the array + * + * Description: + * Get number of objects + * + ********************************************************************************/ +u_int32_t VIP_array_get_num_of_objects(VIP_array_p_t VIP_array) +{ + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + return VIP_array->size; +} + +/******************************************************************************** + * Functions: VIP_array_get_first/next_handle + * + * Arguments: + * VIP_array (IN) - Go over this table + * hdl (OUT) - if non zero, returns the next valid handle here + * + * Returns: + * VIP_OK - this code was returned for all objects + * VIP_EINVAL_HNDL: no more valid handles in this array + * + * Description: + * These can be used to iterate over the array, and get all valid + * handles. Initialise handle with first_handle, then call next. + * VIP_EINVAL_HNDL is returned when there are no more handles. + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_get_first_handle(VIP_array_p_t VIP_array, + VIP_array_handle_t* hdl,VIP_array_obj_t* obj) +{ + VIP_array_handle_t i; + register VIP_array_internal_obj_t *itl_obj_p = NULL; + + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + MOSAL_spinlock_dpc_lock(&(VIP_array->array_lock)); + + for (i=0;iwatermark;++i) { + itl_obj_p = GET_OBJ_BY_HNDL(VIP_array, i); + if (itl_obj_p == NULL || IS_INVALID_OBJ(itl_obj_p)) + continue; + if (hdl) *hdl=i; + if (obj) *obj=(VIP_array_obj_t)(itl_obj_p->array_obj); + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + return VIP_OK; + } + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + return VIP_EINVAL_HNDL; +} + +VIP_common_ret_t VIP_array_get_next_handle(VIP_array_p_t VIP_array, + VIP_array_handle_t* hdl, VIP_array_obj_t* obj) +{ + VIP_array_handle_t i; + VIP_array_internal_obj_t *itl_obj_p = NULL; + + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + if (!hdl) return VIP_EINVAL_HNDL; + + MOSAL_spinlock_dpc_lock(&(VIP_array->array_lock)); + + for (i=*hdl+1;iwatermark;++i) { + itl_obj_p = GET_OBJ_BY_HNDL(VIP_array, i); + if (itl_obj_p == NULL || IS_INVALID_OBJ(itl_obj_p)) + continue; + *hdl=i; + if (obj) *obj=(VIP_array_obj_t)(itl_obj_p->array_obj); + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + return VIP_OK; + } + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + return VIP_EINVAL_HNDL; +} + +VIP_common_ret_t VIP_array_get_first_handle_hold(VIP_array_p_t VIP_array, + VIP_array_handle_t* hdl,VIP_array_obj_t* obj, MT_bool busy_only) +{ + VIP_common_ret_t rc= VIP_OK; + VIP_array_handle_t i; + register VIP_array_internal_obj_t *itl_obj_p = NULL; + + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + if (!hdl) return VIP_EINVAL_HNDL; + MOSAL_spinlock_dpc_lock(&(VIP_array->array_lock)); + + for (i=0;iwatermark;++i) { + itl_obj_p = GET_OBJ_BY_HNDL(VIP_array, i); + if ((itl_obj_p == NULL) || IS_NOT_BUSY_OBJ(itl_obj_p, busy_only)) {continue;} + if (itl_obj_p->ref_count == INVALID_REF_VAL-1) { /* protect from overflow */ + rc= VIP_EAGAIN; /* ref. cnt. is at max for this item */ + } else { + (itl_obj_p->ref_count)++; + } + if (hdl) *hdl=i; + if (obj) *obj=(VIP_array_obj_t)(itl_obj_p->array_obj); + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + return rc; + } + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + return VIP_EINVAL_HNDL; +} + +VIP_common_ret_t VIP_array_get_next_handle_hold(VIP_array_p_t VIP_array, + VIP_array_handle_t* hdl, VIP_array_obj_t* obj, MT_bool busy_only) +{ + VIP_common_ret_t rc= VIP_OK; + VIP_array_handle_t i; + register VIP_array_internal_obj_t *itl_obj_p = NULL; + + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + if (!hdl) return VIP_EINVAL_HNDL; + + MOSAL_spinlock_dpc_lock(&(VIP_array->array_lock)); + for (i=*hdl+1;iwatermark;++i) { + itl_obj_p = GET_OBJ_BY_HNDL(VIP_array, i); + if ((itl_obj_p == NULL) || IS_NOT_BUSY_OBJ(itl_obj_p, busy_only)) {continue;} + if (itl_obj_p->ref_count == INVALID_REF_VAL-1) { /* protect from overflow */ + rc= VIP_EAGAIN; /* ref. cnt. is at max for this item */ + } else { + (itl_obj_p->ref_count)++; + } + *hdl=i; + if (obj) *obj=(VIP_array_obj_t)(itl_obj_p->array_obj); + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + return rc; + } + MOSAL_spinlock_unlock(&(VIP_array->array_lock)); + return VIP_EINVAL_HNDL; +} + +u_int32_t VIP_array_get_max_size(VIP_array_p_t VIP_array) +{ + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + return VIP_array->max_size; +} + +u_int32_t VIP_array_get_watermark(VIP_array_p_t VIP_array) +{ + if (VIP_array == NULL) return VIP_EINVAL_HNDL; + return VIP_array->watermark; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_array.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_array.h new file mode 100644 index 00000000..e0dac1f8 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_array.h @@ -0,0 +1,539 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef VIP_COMMON_VIP_ARRAY_H +#define VIP_COMMON_VIP_ARRAY_H + +#include +#include "vip_common.h" + +typedef u_int32_t VIP_array_handle_t; +typedef void* VIP_array_obj_t; + +struct VIP_array_t; +typedef struct VIP_array_t* VIP_array_p_t; + +#ifdef __cplusplus + extern "C" { +#endif + +/******************************************************************************** + * Function: VIP_array_create_maxsize + * + * Arguments: + * + * size (IN) - initial size. Must be multiple of 8. + * maxsize (IN) - max number of elements that the array will hold + * + * VIP_array_p (OUT) - Return new VIP_array object here + * Set to NULL in case of an error + * + * Returns: + * VIP_OK, + * VIP_AGAIN: Not enough resources + * + * Description: + * Create a new VIP_array table + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_create_maxsize(u_int32_t size, u_int32_t maxsize, VIP_array_p_t* VIP_array_p); + +/******************************************************************************** + * Function: VIP_array_create + * + * Arguments: + * + * size (IN) - initial size. Must be multiple of 8. + * + * VIP_array_p (OUT) - Return new VIP_array object here + * Set to NULL in case of an error + * + * Returns: + * VIP_OK, + * VIP_AGAIN: Not enough resources + * + * Description: + * Create a new VIP_array table + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_create(u_int32_t size, VIP_array_p_t* VIP_array_p); + + +/******************************************************************************** + * Function: VIP_array_destroy + * + * Arguments: + * VIP_array (IN) - Object to destroy + * free_objects_fun (IN) - If non zero, call this function + * for each object in the array (can be used + * e.g. to deallocate memory). + * Even if zero, the array is still deallocated. + * + * Returns: + * VIP_OK, + * + * Description: + * cleanup resources for a VIP_array table + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_destroy(VIP_array_p_t VIP_array, + VIP_allocator_free_t free_objects_fun); + +/******************************************************************************** + * Function: VIP_array_insert + * + * Arguments: + * VIP_array (IN) - Insert in this table + * obj (IN) - object to insert + * handle_p (OUT) - handle for this object + * + * Returns: + * VIP_OK, + * VIP_EAGAIN: not enough resources + * + * Description: + * Inset given object to array + * Return the handle associated with this object. + * Note: No check is done that the object is not already + * in the array. + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_insert(VIP_array_p_t VIP_array, VIP_array_obj_t obj, + VIP_array_handle_t* handle_p ); + +/******************************************************************************** + * Function: VIP_array_insert2hndl + * + * Arguments: + * VIP_array (IN) - Insert in this table + * obj (IN) - object to insert + * hndl (IN) - Requested handle for this object + * + * Returns: + * VIP_OK, + * VIP_EBUSY: Given handle is already in use + * VIP_EAGAIN: not enough resources + * + * Description: + * Associate this object with given handle in the array (if handle not already used). + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_insert2hndl(VIP_array_p_t VIP_array, VIP_array_obj_t obj, + VIP_array_handle_t hndl ); + + +/******************************************************************************** + * Function: VIP_array_insert_ptr + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle_p (OUT) - handle for this object + * obj (OUT) - pointer to new object + * + * Returns: + * VIP_OK, + * VIP_EAGAIN: not enough resources + * + * Description: + * Associate a new object with this handle. + * This is like VIP_array_insert, but it returns + * a pointer into the array through which the pointer + * to the object can be set later. + * + * NOTE: the pointer returned is only valid until the next + * call to insert/erase! After this you must use the handle + * to access the value. + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_insert_ptr(VIP_array_p_t VIP_array, + VIP_array_handle_t* handle_p , VIP_array_obj_t** obj); + + +/******************************************************************************** + * Function: VIP_array_erase + * + * Arguments: + * VIP_array (IN) - this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array + * VIP_EBUSY + * + * Description: + * Remove the object associated with this handle + * Note: fails if handle is not already in the VIP_array + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_erase(VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj_p ); + +/******************************************************************************** + * Function: VIP_array_find_release_erase + * + * Arguments: + * VIP_array (IN) - this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array, or only_this_obj don't match object at handle + * VIP_EBUSY: Handle is still busy (ref_cnt > 0 , after dec.). ref_cnt is updated anyway. + * + * Description: + * This function is a combination of VIP_array_find_release and VIP_array_erase. + * The function atomically decrements the handle's reference count and check if it reached 0. + * Only if the ref_cnt is 0, the object is erased. Otherwise, VIP_EBUSY is returned. + * Note: The reference count is decrement by 1 even on VIP_EBUSY error. + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_find_release_erase(VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj_p ); + + +/******************************************************************************** + * Function: VIP_array_erase_prepare + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array + * VIP_EBUSY: Handle's reference count > 0 + * + * Description: + * invalidate the object in the array, not yet removing the object associated with this handle + * see: VIP_array_erase_done , VIP_array_erase_undo + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_erase_prepare(VIP_array_p_t VIP_array, VIP_array_handle_t handle, + VIP_array_obj_t* obj ); + +/******************************************************************************** + * Function: VIP_array_find_release_erase_prepare + * + * Arguments: + * VIP_array (IN) - this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array, or only_this_obj don't match object at handle + * VIP_EBUSY: Handle is still busy (ref_cnt > 0 , after dec.). ref_cnt is updated anyway. + * + * Description: + * This function is a combination of VIP_array_find_release and VIP_array_erase_prepare. + * The function atomically decrements the handle's reference count and check if it reached 0. + * Only if the ref_cnt is 0, the object is erased (prep.). Otherwise, VIP_EBUSY is returned. + * Note: The reference count is decrement by 1 even on VIP_EBUSY error. + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_find_release_erase_prepare(VIP_array_p_t a, VIP_array_handle_t handle, + VIP_array_obj_t* obj_p ); + +/******************************************************************************** + * Function: VIP_array_erase_undo + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle (IN) - object by this handle + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array or not was "erase prepare". + * + * Description: + * revalidates the object of this handle, undo the erasing operation + * see: VIP_array_erase_prepare + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_erase_undo(VIP_array_p_t VIP_array, VIP_array_handle_t handl); + + +/******************************************************************************** + * Function: VIP_array_erase_done + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle (IN) - remove object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array or not was "erase prepare". + * + * Description: + * removes the object associated with this handle + * see: VIP_array_erase_prepare + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_erase_done(VIP_array_p_t a, VIP_array_handle_t handle, VIP_array_obj_t *obj); + + + +/******************************************************************************** + * Function: VIP_array_find + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle (IN) - get object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array + * + * Description: + * Find the object associated with this handle + * Note: fails if handle is illegal in the VIP_array + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_find(VIP_array_p_t VIP_array, VIP_array_handle_t handle, + VIP_array_obj_t* obj ); + +/******************************************************************************** + * Function: VIP_array_find_hold + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle (IN) - get object by this handle + * obj (OUT) - if non zero, returns the object by this handle here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array + * + * Description: + * Same as VIP_array_find, but also updates object's reference count. + * VIP_array_erase will fail if reference count > 0 . + * Handle must be released with VIP_array_find_release + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_find_hold(VIP_array_p_t VIP_array, VIP_array_handle_t handle, + VIP_array_obj_t* obj ); + +/******************************************************************************** + * Function: VIP_array_find_release + * + * Arguments: + * VIP_array (IN) - Insert in this table + * handle (IN) - remove object by this handle + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: handle is not in the VIP_array + * + * Description: + * Decrement handle's reference count. + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_find_release(VIP_array_p_t VIP_array, VIP_array_handle_t handle); + +/******************************************************************************** + * Function: VIP_array_get_num_of_objects + * + * Arguments: + * VIP_array (IN) - table + * + * Returns: + * number of objects in the array + * + * Description: + * Get number of objects + * + ********************************************************************************/ +u_int32_t VIP_array_get_num_of_objects(VIP_array_p_t VIP_array); + +/******************************************************************************** + * Function: VIP_array_get_allocated_size + * + * Arguments: + * VIP_array (IN) - table + * + * Returns: + * current allocated size of the array + * + * Description: + * allocated size of the arrays + * + ********************************************************************************/ +u_int32_t VIP_array_get_allocated_size(VIP_array_p_t VIP_array); +u_int32_t VIP_array_get_max_size(VIP_array_p_t VIP_array); +u_int32_t VIP_array_get_watermark(VIP_array_p_t VIP_array); + +/******************************************************************************** + * Functions: VIP_array_get_first/next_handle + * + * Arguments: + * VIP_array (IN) - Go over this table + * hdl (OUT) - if non zero, returns the next valid handle here + * obj (OUT) - if non zero, returns the object of this handle + * + * Returns: + * VIP_OK - this code was returned for all objects + * VIP_EINVAL_HNDL: no more valid handles in this array + * + * Description: + * These can be used to iterate over the array, and get all valid + * handles. Initialise handle with get_first, then call get_next. + * VIP_EINVAL_HNDL is returned when there are no more handles. + * Usage example: + * VIP_array_handle_t hdl; + * for(ret=VIP_array_get_first_handle(VIP_array, &hdl, NULL); + * ret == VIP_OK; ret=VIP_array_get_next_handle(VIP_array,&hdl, NULL)) { + * } + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_get_first_handle(VIP_array_p_t VIP_array, + VIP_array_handle_t* hdl, VIP_array_obj_t* obj); + +VIP_common_ret_t VIP_array_get_next_handle(VIP_array_p_t VIP_array, + VIP_array_handle_t* hdl, VIP_array_obj_t* obj ); + +/******************************************************************************** + * Functions: VIP_array_get_first_handle_hold/next_handle_hold + * + * Arguments: + * VIP_array (IN) - Go over this table + * hdl (OUT) - returns the next valid busy handle here (MUST be non-zero) + * obj (OUT) - if non zero, returns the object of this handle + * busy_only (IN) - if TRUE, only returns busy valid items in the scan + * + * Returns: + * VIP_OK - this code was returned for all objects + * VIP_EAGAIN: reference count already at max. Info returned, but user must not release + * when done with item + * VIP_EINVAL_HNDL: no more valid/busy handles in this array + * + * Description: + * These can be used to iterate over the array, and get all valid/busy + * handles, updating ref count for items returned. + * Initialise handle with get_first_hold, then call get_next_hold. + * VIP_EINVAL_HNDL is returned when there are no more handles. + * When done with a given handle, user must call VIP_array_find_release on the returned + * handle to decrement its reference count. + * + * Usage example (for getting and holding all valid items): + * VIP_array_handle_t hdl; + * for(ret=VIP_array_get_next_handle_hold(VIP_array, &hdl, NULL, FALSE); + * ret == VIP_OK; ret=VIP_array_get_next_busy_handle(VIP_array,&hdl, NULL, FALSE)) { + * } + * + ********************************************************************************/ +VIP_common_ret_t VIP_array_get_first_handle_hold(VIP_array_p_t VIP_array, + VIP_array_handle_t* hdl, VIP_array_obj_t* obj, MT_bool busy_only); + +VIP_common_ret_t VIP_array_get_next_handle_hold(VIP_array_p_t VIP_array, + VIP_array_handle_t* hdl, VIP_array_obj_t* obj, MT_bool busy_only ); +/******************************************************************************** + * Macro: VIP_ARRAY_FOREACH + * + * Arguments: + * VIP_array_p_t VIP_array (IN) - Go over this table + * VIP_common_ret_t ret (OUT) - variable (lvalue) to hold the current return code + * VIP_array_handle_t hdl (OUT) - variable (lvalue) to hold current object handle + * VIP_array_obj_t* obj_p (OUT) - if non zero, returns the object of this handle + * + * Returns: + * + * Description: + * This macro can be used to iterate over the array. + * Only valid handles are returned. + * If you are not interested in objects but only in handles pass + * NULL instead of obj_p. + * + * Usage example (erase all object from the array, and free them): + * + * VIP_array_handle_t hdl; + * VIP_common_ret_t ret; + * VIP_array_obj_t obj; + * + * VIP_ARRAY_FOREACH(VIP_array, ret, hdl, &obj) { + * VIP_array_erase(VIP_array, hdl, NULL); + * FREE(obj); + * } + * + ********************************************************************************/ +#define VIP_ARRAY_FOREACH(VIP_array, ret, hdl, obj_p) \ + for(ret=VIP_array_get_first_handle(VIP_array, &hdl, obj_p);\ + ret == VIP_OK; ret=VIP_array_get_next_handle(VIP_array,&hdl, obj_p)) + +/******************************************************************************** + * Macro: VIP_ARRAY_FOREACH_HOLD + * + * Arguments: + * VIP_array_p_t VIP_array (IN) - Go over this table + * VIP_common_ret_t ret (OUT) - variable (lvalue) to hold the current return code + * VIP_array_handle_t hdl (OUT) - variable (lvalue) to hold current object handle + * VIP_array_obj_t* obj_p (OUT) - if non zero, returns the object of this handle + * busy_only -- if TRUE, return only items already busy (i.e., nonzero ref count) + * + * Returns: + * + * Description: + * This macro can be used to iterate over the array, holding each item returned. + * Only valid handles are returned. If the busy_only flag is TRUE, the item must + * already have a non-zero reference count to be returned. + * If you are not interested in objects but only in handles pass + * NULL instead of obj_p. + * + * When you are done with a returned handle, you MUST call VIP_array_find_release() on + * that handle (or the item will not be deletable). + * + * Usage example (erase all object from the array, and free them): + * + * VIP_array_handle_t hdl; + * VIP_common_ret_t ret; + * VIP_array_obj_t obj; + * + * VIP_ARRAY_FOREACH_HOLD(VIP_array, ret, hdl, &obj, TRUE) { + * ... do something with returned object ... + * if (ret == VIP_OK) VIP_array_find_release(VIP_array, hdl); + * } + * + ********************************************************************************/ +#define VIP_ARRAY_FOREACH_HOLD(VIP_array, ret, hdl, obj_p, busy_only) \ + for(ret=VIP_array_get_first_handle_hold(VIP_array, &hdl, obj_p, busy_only);\ + ((ret == VIP_OK) || (ret == VIP_EAGAIN)); ret=VIP_array_get_next_handle_hold(VIP_array,&hdl, obj_p, busy_only)) + +#ifdef __cplusplus + } +#endif + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_cirq.c b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_cirq.c new file mode 100644 index 00000000..9bc98533 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_cirq.c @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "vip_cirq.h" + +void VIP_cirq_stats_print(VIP_cirq_t *cirq_p) +{ + MTL_DEBUG4("%s: cirq 0x%p stats:\n", __func__, cirq_p->queue); + MTL_DEBUG4("%s: queue size: %d\n", __func__, cirq_p->q_size); + MTL_DEBUG4("%s: elt size: %d\n", __func__, cirq_p->element_size); + MTL_DEBUG4("%s: producer: %d\n", __func__, cirq_p->producer); + MTL_DEBUG4("%s: consumer: %d\n", __func__, cirq_p->consumer); + MTL_DEBUG4("%s: full flag: %s\n", __func__, (cirq_p->full == TRUE ? "TRUE" : "FALSE")); +} + +static int VIP_cirq_empty_no_mtx(VIP_cirq_t *cirq_p) +{ + int retval = FALSE; + if(cirq_p->consumer==cirq_p->producer && !(cirq_p->full)) + retval = TRUE; + return retval; +} + + +int VIP_cirq_create(int q_size, int element_size, VIP_cirq_t **cirq_p) +{ + VIP_cirq_t *new_cirq; + + MTL_DEBUG4(MT_FLFMT("VIP_cirq_create: queue size = %d, elt size = %d"),q_size,element_size); + new_cirq = (VIP_cirq_t *)MALLOC(sizeof(VIP_cirq_t)); + if (new_cirq == NULL) { + MTL_ERROR1(MT_FLFMT("VIP_cirq_create: MALLOC failure")); + return -1; + } + + new_cirq->queue = (void*)VMALLOC( element_size * q_size); + if (new_cirq == NULL) { + MTL_ERROR1(MT_FLFMT("VIP_cirq_create: VMALLOC failure")); + FREE(new_cirq); + return -1; + } + new_cirq->full = FALSE; + new_cirq->producer = new_cirq->consumer = 0; + new_cirq->q_size = q_size; + new_cirq->element_size = element_size; + MOSAL_mutex_init(&(new_cirq->cirq_access_mtx)); + *cirq_p = new_cirq; + return 0; +} + + +int VIP_cirq_add(VIP_cirq_t *cirq_p, void * elt) +{ + int retval = 0; + MTL_DEBUG4(MT_FLFMT("VIP_cirq_add, cirq=0x%p"), cirq_p); + MOSAL_mutex_acq(&(cirq_p->cirq_access_mtx), TRUE); + + if (cirq_p->full == TRUE || elt == NULL) { + MTL_ERROR1(MT_FLFMT("VIP_cirq_add: queue 0x%p full or element to add (0x%p)is null"), cirq_p->queue, elt); + retval = -1; + } else { + /* insert at the producer end */ + memcpy(((u_int8_t *)(cirq_p->queue)) + (cirq_p->producer * cirq_p->element_size), elt, cirq_p->element_size); + (cirq_p->producer)++; + if(cirq_p->producer==cirq_p->q_size) + cirq_p->producer=0; + if(cirq_p->producer==cirq_p->consumer) + { + cirq_p->full=1; + } + } + + // VIP_cirq_stats_print(cirq_p); + MOSAL_mutex_rel(&(cirq_p->cirq_access_mtx)); + return (retval); +} + +int VIP_cirq_peek(VIP_cirq_t *cirq_p, void * elt) +{ + int retval = 0; + MTL_DEBUG4(MT_FLFMT("VIP_cirq_peek")); + MOSAL_mutex_acq(&(cirq_p->cirq_access_mtx), TRUE); + + /* check that elt is not null and queue not empty */ + if (VIP_cirq_empty_no_mtx(cirq_p) || elt == NULL) { + MTL_DEBUG1(MT_FLFMT("VIP_cirq_peek: queue 0x%p is empty or element (0x%p) is NULL"), cirq_p->queue,elt); + retval = -1; + } else { + memcpy(elt, ((u_int8_t *)(cirq_p->queue)) + (cirq_p->consumer * cirq_p->element_size), cirq_p->element_size); + } + + MOSAL_mutex_rel(&(cirq_p->cirq_access_mtx)); + return retval; +} + +int VIP_cirq_peek_ptr(VIP_cirq_t *cirq_p, void **elt) +{ + int retval = 0; + MTL_DEBUG4(MT_FLFMT("VIP_cirq_peek_ptr")); + MOSAL_mutex_acq(&(cirq_p->cirq_access_mtx), TRUE); + + /* check that *elt is not null and queue not empty */ + if (VIP_cirq_empty_no_mtx(cirq_p) || elt == NULL) { + MTL_DEBUG1(MT_FLFMT("VIP_cirq_peek_ptr: queue 0x%p is empty or element (0x%p) is NULL"), cirq_p->queue, elt); + retval = -1; + } else { + *elt = ((u_int8_t *)(cirq_p->queue)) + (cirq_p->consumer * cirq_p->element_size); + } + + MOSAL_mutex_rel(&(cirq_p->cirq_access_mtx)); + return retval; +} + + +int VIP_cirq_remove(VIP_cirq_t *cirq_p, void * elt) +{ + int retval = 0; + + + MTL_DEBUG4(MT_FLFMT("VIP_cirq_remove, cirq=0x%p"), cirq_p); + MOSAL_mutex_acq(&(cirq_p->cirq_access_mtx), TRUE); + + if(VIP_cirq_empty_no_mtx(cirq_p)) { + MTL_DEBUG1(MT_FLFMT("VIP_cirq_remove: queue 0x%p is empty"), cirq_p->queue); + retval = -1; + } else { + /* remove(read) at the consumer end */ + if(elt != NULL){ + memcpy(elt, ((u_int8_t *)(cirq_p->queue)) + (cirq_p->consumer * cirq_p->element_size), + cirq_p->element_size); + } + (cirq_p->consumer)++; + cirq_p->full=0; + if(cirq_p->consumer==cirq_p->q_size) + cirq_p->consumer=0; + if(cirq_p->consumer==cirq_p->producer) { + MTL_DEBUG4(MT_FLFMT("VIP_cirq_add: queue 0x%p is empty"), cirq_p->queue); + cirq_p->consumer=cirq_p->producer=0; + cirq_p->full=0; + } + } + //VIP_cirq_stats_print(cirq_p); + MOSAL_mutex_rel(&(cirq_p->cirq_access_mtx)); + return(retval); +} + +int VIP_cirq_empty(VIP_cirq_t *cirq_p) +{ + int retval = FALSE; + MOSAL_mutex_acq(&(cirq_p->cirq_access_mtx), TRUE); + if(cirq_p->consumer==cirq_p->producer && !(cirq_p->full)) + retval = TRUE; + MOSAL_mutex_rel(&(cirq_p->cirq_access_mtx)); + return retval; +} + + +int VIP_cirq_destroy(VIP_cirq_t *cirq_p) +{ + call_result_t mt_rc; + + MTL_DEBUG4(MT_FLFMT("VIP_cirq_destroy, cirq=0x%p"), cirq_p); + MOSAL_mutex_acq(&(cirq_p->cirq_access_mtx), TRUE); + VFREE(cirq_p->queue); + MOSAL_mutex_rel(&(cirq_p->cirq_access_mtx)); + mt_rc = MOSAL_mutex_free(&(cirq_p->cirq_access_mtx)); + if (mt_rc != MT_OK) { + MTL_ERROR2(MT_FLFMT("Failed MOSAL_mutex_free (%s)"),mtl_strerror_sym(mt_rc)); + } + FREE(cirq_p); + return(0); +} diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_cirq.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_cirq.h new file mode 100644 index 00000000..26c6962a --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_cirq.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef VIP_COMMON_VIP_CIRQ_H +#define VIP_COMMON_VIP_CIRQ_H + + +#include +#include + +typedef struct VIP_cirq_st { + int consumer; + int producer; + int q_size; + MT_bool full; + void* queue; + int element_size; + MOSAL_mutex_t cirq_access_mtx; +} VIP_cirq_t; + +int VIP_cirq_create(int q_size, int element_size, VIP_cirq_t **cirq); +int VIP_cirq_remove(VIP_cirq_t *cirq_p, void * elt); +int VIP_cirq_add(VIP_cirq_t *cirq_p, void * elt); +void VIP_cirq_stats_print(VIP_cirq_t *cirq_p); +int VIP_cirq_peek(VIP_cirq_t *cirq_p, void *elt); +int VIP_cirq_peek_ptr(VIP_cirq_t *cirq_p, void **elt); +int VIP_cirq_destroy(VIP_cirq_t *cirq_p); +int VIP_cirq_empty(VIP_cirq_t *cirq_p); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_common.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_common.h new file mode 100644 index 00000000..2143c986 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_common.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_VIP_COMMON_H +#define H_VIP_COMMON_H + +#include +#include + +enum VIP_common_ret { + /* Remap VAPI return codes */ + + /* Non error return values */ + VIP_OK = VAPI_OK, + + /* General errors */ + VIP_EGEN = VAPI_EGEN, /* general error for entire stack */ + VIP_EAGAIN = VAPI_EAGAIN, /* Not enough resources (try again later...) */ + VIP_EBUSY = VAPI_EBUSY, /* Resource is in use */ + VIP_ETIMEDOUT = VAPI_ETIMEOUT, /* Operation timedout */ + VIP_EINTR = VAPI_EINTR, /* Interrupted blocking function (operation not completed) */ + VIP_EFATAL = VAPI_EFATAL, /* catastrophic error */ + VIP_ENOMEM = VAPI_ENOMEM, /* Invalid address of exhausted physical memory quota */ + VIP_EPERM = VAPI_EPERM, /* Not enough permissions */ + VIP_ENOSYS = VAPI_ENOSYS,/* Operation/option not supported */ + VIP_ESYSCALL = VAPI_ESYSCALL, /* Error in underlying O/S call */ + VIP_EINVAL_PARAM = VAPI_EINVAL_PARAM, /* invalid parameter*/ + + /*******************************************************/ + /* VIP specific errors */ + + VIP_COMMON_ERROR_MIN = VAPI_ERROR_MAX, /* Dummy error code: put this VIP error code first */ + + /* General errors */ + VIP_EINVAL_HNDL, /* Invalid (no such) handle */ + + VIP_COMMON_ERROR_MAX /* Dummy max error code : put all error codes before this */ +}; + +typedef int32_t VIP_common_ret_t; + +/* Memory mgmt functions */ + +/******************************************************************************** + * Function type: VIP_allocator_malloc_t (not used for now) + * + * Arguments: + * size (IN) - allocate this number of bytes on the heap + * + * Returns: + * pointer to allocated memory. + * 0 if resources were unavailable + * + * Description: + * allocate given amount of bytes memory + * + ********************************************************************************/ +typedef void* (VIP_allocator_malloc_t)(size_t size); +/******************************************************************************** + * Function type: VIP_allocator_free_t + * + * Arguments: + * void* (IN) - deallocate memory at this location + * + * Returns: + * void + * + * Description: + * deallocate memory at a given location + * + ********************************************************************************/ +typedef void (*VIP_allocator_free_t)(void *); + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.c b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.c new file mode 100644 index 00000000..d6ce1dcf --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + #include + #include + + int VIP_delay_unlock_create(VIP_delay_unlock_t * delay_unlock_obj_p) + { + VIP_delay_unlock_t new_obj = NULL; + + new_obj = (VIP_delay_unlock_t )MALLOC(sizeof(struct VIP_delay_unlock_st)); + if (new_obj == NULL) { + * delay_unlock_obj_p = NULL; + /* malloc failure */ + return -3; + } + new_obj->list_start = NULL; + new_obj->is_valid = TRUE; /* sets list to valid */ + MOSAL_spinlock_init(&new_obj->spl); + *delay_unlock_obj_p = new_obj; + return 0; + } + + int VIP_delay_unlock_insert(VIP_delay_unlock_t delay_unlock_obj, + MOSAL_iobuf_t iobuf) + { + + VIP_delay_unlock_elem_t * new_elt = NULL; + VIP_delay_unlock_elem_t * temp_next = NULL; + + if (delay_unlock_obj == NULL) { + return -1; /* invalid argument */ + } + + /* allocate a new stack element */ + new_elt = TMALLOC(VIP_delay_unlock_elem_t); + if (new_elt == NULL) { + /* malloc failure */ + MTL_ERROR1("%s: MALLOC failure. cannot defer delete of iobuf=0x%p\n", + __func__, (void *) iobuf); + return -3; + } + new_elt->iobuf = iobuf; + MTL_DEBUG1("%s: DEFERRING unlock of iobuf=0x%p \n", + __func__, (void *) iobuf); + MOSAL_spinlock_dpc_lock(&delay_unlock_obj->spl); + if (delay_unlock_obj->is_valid == FALSE) { + /* list has been deleted */ + MOSAL_spinlock_unlock(&delay_unlock_obj->spl); + MTL_ERROR1("%s: DEFERRED LIST has been deleted. FAIL deferring delete of iobuf=0x%p\n", + __func__, (void *) iobuf); + FREE(new_elt); + return -2; + } + temp_next = delay_unlock_obj->list_start; + delay_unlock_obj->list_start = new_elt; + new_elt->next = temp_next; + MOSAL_spinlock_unlock(&delay_unlock_obj->spl); + + return 0; + } + + int VIP_delay_unlock_destroy(VIP_delay_unlock_t delay_unlock_obj) + { + VIP_delay_unlock_elem_t * found_elt = NULL; + VIP_delay_unlock_elem_t * next_elt = NULL; + + if (delay_unlock_obj == NULL) { + return -1; /* invalid argument */ + } + /* make list invalid */ + MOSAL_spinlock_dpc_lock(&delay_unlock_obj->spl); + if (delay_unlock_obj->is_valid == FALSE) { + /* list has been deleted */ + MOSAL_spinlock_unlock(&delay_unlock_obj->spl); + return -2; + } + delay_unlock_obj->is_valid = FALSE; + found_elt = delay_unlock_obj->list_start; + MOSAL_spinlock_unlock(&delay_unlock_obj->spl); + + /* need no more spinlocks */ + while(found_elt != NULL) { + next_elt = found_elt->next; + MOSAL_iobuf_deregister(found_elt->iobuf); + MTL_DEBUG1("%s: DEFERRED unlock iobuf=0x%p\n", + __func__, found_elt->iobuf ); + FREE(found_elt); + found_elt = next_elt; + } + FREE(delay_unlock_obj); + + return 0; + } + + + + + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.h new file mode 100644 index 00000000..9a16205a --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef VIP_COMMON_VIP_DELAY_UNLOCK_H +#define VIP_COMMON_VIP_DELAY_UNLOCK_H + + +#include +#include + + +typedef struct VIP_delay_unlock_st * VIP_delay_unlock_t; + + +int VIP_delay_unlock_create(VIP_delay_unlock_t * delay_unlock_obj_p); + +int VIP_delay_unlock_insert(VIP_delay_unlock_t delay_unlock_obj,MOSAL_iobuf_t iobuf); +int VIP_delay_unlock_destroy(VIP_delay_unlock_t delay_unlock_obj); + + +#endif + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock_priv.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock_priv.h new file mode 100644 index 00000000..ecd7e4e4 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_delay_unlock_priv.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef VIP_COMMON_VIP_DELAY_UNLOCK_PRIV_H +#define VIP_COMMON_VIP_DELAY_UNLOCK_PRIV_H + +#include +#include +#include + +typedef struct VIP_delay_unlock_elem_st { + MOSAL_iobuf_t iobuf; + struct VIP_delay_unlock_elem_st * next; +} VIP_delay_unlock_elem_t; + + +struct VIP_delay_unlock_st { + struct VIP_delay_unlock_elem_st * list_start; + MT_bool is_valid; + MOSAL_spinlock_t spl; +}; + +#endif + + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.c b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.c new file mode 100644 index 00000000..864f7e53 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#define VIP_HASH_PRIME 0 // 1-> prime number, 0-> power of 2 + +#if VIP_HASH_PRIME +static const unsigned long prime_list[] = +{ + /*approx 100% table size growth per resize (except for 1st entry)*/ + 7ul, 53ul, 97ul, 193ul, 389ul, + 769ul, 1543ul, 3079ul, 6151ul, + + /*approx 20% table size growth per resize */ + 12289ul, 14747ul, 17707ul, 21269ul, 25523ul, + 30631ul, 36761ul, 44119ul, 52951ul, 63559ul, + 76283ul, 91541ul, 109859ul, 131837ul, 158209ul, + 189851ul, 227827ul, 273433ul, 328121ul, 393749ul, + 472523ul, 567031ul, 680441ul, 816539ul, 979873ul, + 1175849ul, 1411021ul, 1693249ul, 2031907ul, 2438309ul, + 2925973ul, 3511171ul, 4194301ul, 5056133ul, 6067361ul, + 7280863ul, 8737039ul, 10484471ul, 12581407ul, 15097711ul, 16777213ul, + 18117271ul, 21740729ul, 26088911ul, 31306697ul, 37568051ul, + 45081683ul, 54098059ul, 64917691ul, 77901247ul, 93481541ul, + 112177873ul, 134613491ul, 161536217ul, 193843493ul, 232612217ul, + 279134677ul, 334961647ul, 401953999ul, 482344801ul, 578813771ul, + 694576537ul, 833491849ul, 1000190263ul, 1200228319ul, 1440273997ul, + 1728328807ul, 2073994579ul, 2488793497ul, 2986552201ul, 3583862647ul, + 4294967291ul +}; +static const int n_primes = sizeof(prime_list)/sizeof(prime_list[0]); + +static unsigned long mtl_find_prime(unsigned long size) { + int i; + for (i = 0 ; (i != n_primes-1) && (size >= prime_list[i]) ;++i); + return prime_list[i]; +} + +#define VIP_HASH_BUCKET(key, num_buckets) ((key) % (num_buckets)) + +#else // VIP_HASH_PRIME + +static unsigned long mtl_find_power2(unsigned long size) { + unsigned long p2; + for (p2=8; p2 < 65536 && (size >= p2) ;p2 <<= 1); + return p2; +} +#define VIP_HASH_BUCKET(key, num_buckets) ((key) & ((num_buckets)-1)) + +#endif // VIP_HASH_PRIME + + +/************************************************************************/ +static inline u_int32_t hash_u64tou32(u_int64_t u64) +{ + u_int32_t high = (u_int32_t) (u64 >> 32); + u_int32_t low = (u_int32_t) (u64 & ~(u_int32_t)0); + u_int32_t h = high ^ low; + return h; +} /* hash_u64tou32 */ + + +/************************************************************************/ +static inline u_int32_t hash_uv4tou32(u_int32_t *uv4) +{ + u_int32_t h = uv4[0] ^ uv4[1] ^ uv4[2] ^ uv4[3]; + return h; +} /* hash_u64tou32 */ + + +#include "vip_hash.h" +#include "vip_hash.ic" /* 1st time, now __VIP_HASH_VARIANT == 0 */ + +#include "vip_hashp.h" +#include "vip_hash.ic" /* 2nd time, now __VIP_HASH_VARIANT == 1 */ + +#include "vip_hashp2p.h" +#include "vip_hash.ic" /* 3rd time, now __VIP_HASH_VARIANT == 2 */ + +#include "vip_hash64p.h" +#include "vip_hash.ic" /* 4th time, now __VIP_HASH_VARIANT == 3 */ + +#include "vip_hashv4p.h" +#include "vip_hash.ic" /* 5th time, now __VIP_HASH_VARIANT == 4 */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.h new file mode 100644 index 00000000..1ac42a5d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef VIP_COMMON_VIP_HASH_H +#define VIP_COMMON_VIP_HASH_H + +#include +#include "vip_common.h" +#include "vip_array.h" + +typedef VIP_array_handle_t VIP_hash_value_t; +typedef struct VIP_hash_t* VIP_hash_p_t; + +#undef __VIP_HASH_VARIANT +#define __VIP_HASH_VARIANT 0 +#include "vip_hash.ih" + +#endif /* VIP_COMMON_VIP_HASH_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.ic b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.ic new file mode 100644 index 00000000..5d8ed16b --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.ic @@ -0,0 +1,893 @@ +/* + * Copyright (c) 2005 InfiniCon Systems. All rights reserved. + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id:$ + */ + + +# undef __VIP_HASH_NODE_T +# undef __VIP_HASH_NODE_P_T +# undef __VIP_HASH_TBL_T +# undef __VIP_HASH_FUNC + +#if __VIP_HASH_VARIANT == 0 +# define __VIP_HASH_NODE_T VIP_hash_node_t +# define __VIP_HASH_NODE_P_T VIP_hash_node_p_t +# define __VIP_HASH_TBL_T VIP_hash_t +# define __VIP_HASH_FUNC(k) (k) +# undef resize +# define resize32 +#elif __VIP_HASH_VARIANT == 1 +# define __VIP_HASH_NODE_T VIP_hashp_node_t +# define __VIP_HASH_NODE_P_T VIP_hashp_node_p_t +# define __VIP_HASH_TBL_T VIP_hashp_t +# define __VIP_HASH_FUNC(k) (k) +# undef resize +# define resize resizep +#elif __VIP_HASH_VARIANT == 2 +# define __VIP_HASH_NODE_T VIP_hashp2p_node_t +# define __VIP_HASH_NODE_P_T VIP_hashp2p_node_p_t +# define __VIP_HASH_TBL_T VIP_hashp2p_t +# ifdef MT_64BIT +# define __VIP_HASH_FUNC(pkey) hash_u64tou32((u_int64_t) (pkey)) +# else +# define __VIP_HASH_FUNC(pkey) ((u_int32_t)(u_intn_t)(pkey)) +# endif +# undef resize +# define resize resizep2p +#elif __VIP_HASH_VARIANT == 3 +# define __VIP_HASH_NODE_T VIP_hash64p_node_t +# define __VIP_HASH_NODE_P_T VIP_hash64p_node_p_t +# define __VIP_HASH_TBL_T VIP_hash64p_t +# define __VIP_HASH_FUNC(pkey) hash_u64tou32((u_int64_t) (pkey)) +# undef resize +# define resize resize64p +#elif __VIP_HASH_VARIANT == 4 +# define __VIP_HASH_NODE_T VIP_hashv4p_node_t +# define __VIP_HASH_NODE_P_T VIP_hashv4p_node_p_t +# define __VIP_HASH_TBL_T VIP_hashv4p_t +# define __VIP_HASH_FUNC(pkey) hash_uv4tou32((pkey)) +# undef resize +# define resize resizev4p +#else +# error Unsupported __VIP_HASH_VARIANT variant +#endif + +#include + +typedef struct __VIP_HASH_NODE_T { + __VIP_HASH_KEY_T key; + __VIP_HASH_VAL_T val; + struct __VIP_HASH_NODE_T* next; +} __VIP_HASH_NODE_T; +typedef __VIP_HASH_NODE_T* __VIP_HASH_NODE_P_T; + + +typedef struct __VIP_HASH_TBL_T { + __VIP_HASH_NODE_P_T** nodes_1st_lvl_begin; + u_int32_t size; + u_int32_t buckets; + MT_bool may_grow; + MOSAL_spinlock_t hash_lock; + u_int32_t max_size; + u_int32_t max_buckets; + u_int32_t max_2nd_lvl_blocks; + u_int32_t sec_lvl_buckets_per_blk; + u_int32_t sec_lvl_buckets_per_blk_m_1; + u_int32_t size_2nd_lvl_block; + u_int32_t log2_2nd_lvl_entries_per_blk; + MT_bool resize_in_progress; /* Notify other threads of an ongoing resize */ +} __VIP_HASH_TBL_T; + +#define HASH_DEFAULT_MAXSIZE (16777212ul) /* default max size is 16M entries */ +#define HASH_LOG_PTR_SIZE ((sizeof(void *) == 8) ? 3 : 2) +#define HASH_LOG_2ND_LVL_BUCKETS_PER_BLOCK (MOSAL_SYS_PAGE_SHIFT + 1 - HASH_LOG_PTR_SIZE) +#define HASH_2ND_LVL_BLOCK_SIZE (2*MOSAL_SYS_PAGE_SIZE) +#define HASH_2ND_LVL_BUCKETS_PER_BLOCK (HASH_2ND_LVL_BLOCK_SIZE / sizeof(__VIP_HASH_NODE_P_T*)) +#define HASH_2ND_LVL_ENTRY_SIZE (sizeof(__VIP_HASH_NODE_P_T*)) + +#define HASH_CALC_MAX_2ND_LVL_BLOCKS(hash) ((hash->max_buckets + (hash->sec_lvl_buckets_per_blk_m_1)) / (hash->sec_lvl_buckets_per_blk)) +#define HASH_CALC_NUM_2ND_LVL_BLOCKS(buckets, hash) ((buckets + (hash->sec_lvl_buckets_per_blk_m_1)) / (hash->sec_lvl_buckets_per_blk)) +#define GET_BUCKET_BY_IX(hash, ix) ((__VIP_HASH_NODE_P_T*) &((*(hash->nodes_1st_lvl_begin+((ix) >> hash_tbl->log2_2nd_lvl_entries_per_blk))\ + )[(ix) & (hash->sec_lvl_buckets_per_blk_m_1)])) + + +/******************************************************************************/ +static call_result_t resize(__VIP_HASH_T hash_tbl, u_int32_t reserve) +{ +// if (hash_tbl->buckets < reserve) +// { +// } + u_int32_t blocks_needed = 0, curr_blocks = 0, block_size_to_allocate = 0; + u_int32_t old_last_allocated_blocksize = 0, new_first_allocated_blocksize = 0; + u_int32_t buckets_per_blk = hash_tbl->sec_lvl_buckets_per_blk; + int i,j; + u_int32_t old_buckets = hash_tbl->buckets; + u_int32_t old_bucket_ix; +#if VIP_HASH_PRIME + u_int32_t new_buckets = (u_int32_t) mtl_find_prime((unsigned long) reserve); +#else + u_int32_t new_buckets = (u_int32_t) mtl_find_power2((unsigned long) reserve); +#endif + + if (hash_tbl->buckets >= hash_tbl->max_buckets) { + /* we already have the max number of buckets allocated */ + hash_tbl->may_grow = FALSE; + return MT_EAGAIN; + } else if (new_buckets > hash_tbl->max_buckets) { + new_buckets = hash_tbl->max_buckets; + } + /* We may read the values below with no lock since only one thread will enter this + * function at one time (based on resize_in_progress flag), so these value cannot + * be modified at this time (here is the only location for such changes). + */ + + /* allocate extension of table. This extension is still invisible to users, because + * hash_tbl->buckets is not modified until after all the allocations succeed */ + blocks_needed = HASH_CALC_NUM_2ND_LVL_BLOCKS(new_buckets, hash_tbl); + curr_blocks = HASH_CALC_NUM_2ND_LVL_BLOCKS(hash_tbl->buckets,hash_tbl); + if (blocks_needed > curr_blocks) { + /* Need a larger first level structure */ + __VIP_HASH_NODE_P_T** new_1st_level; + __VIP_HASH_NODE_P_T** old_1st_level; + u_int32_t new_1st_lvl_size = sizeof(__VIP_HASH_NODE_P_T**)*(blocks_needed); + new_1st_level = (__VIP_HASH_NODE_P_T**)MALLOC(new_1st_lvl_size); + if (new_1st_level == NULL) { + hash_tbl->may_grow = FALSE; + MTL_ERROR1(MT_FLFMT("%s failed: cannot allocate memory for first level"), __func__ ); + return MT_EAGAIN; + } + memset(new_1st_level, 0, new_1st_lvl_size); + MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock)); + if (hash_tbl->nodes_1st_lvl_begin != NULL) { + memcpy(new_1st_level, hash_tbl->nodes_1st_lvl_begin, sizeof(__VIP_HASH_NODE_P_T**)*(curr_blocks)); + old_1st_level = hash_tbl->nodes_1st_lvl_begin; + hash_tbl->nodes_1st_lvl_begin = new_1st_level; + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + FREE(old_1st_level); + }else { + hash_tbl->nodes_1st_lvl_begin = new_1st_level; + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + } + } + block_size_to_allocate = hash_tbl->size_2nd_lvl_block; + + /* adjust previous last block for new number of buckets. Need spinlock when adjust + * the last second level block in the current table. */ + if (curr_blocks > 0) { + __VIP_HASH_NODE_P_T* new_2nd_lvl_block; + __VIP_HASH_NODE_P_T* old_2nd_lvl_block; + old_last_allocated_blocksize = + (hash_tbl->buckets - ((curr_blocks-1)*(hash_tbl->sec_lvl_buckets_per_blk)))*HASH_2ND_LVL_ENTRY_SIZE; + if (curr_blocks == blocks_needed) { + /* just allocate a new short block */ + new_first_allocated_blocksize = (new_buckets - + ((blocks_needed-1)*(hash_tbl->sec_lvl_buckets_per_blk)) + ) * HASH_2ND_LVL_ENTRY_SIZE; + } else { + new_first_allocated_blocksize = block_size_to_allocate; + } + + new_2nd_lvl_block = (__VIP_HASH_NODE_P_T*)MALLOC(new_first_allocated_blocksize); + if (new_2nd_lvl_block == NULL) { + hash_tbl->may_grow = FALSE; + MTL_ERROR1(MT_FLFMT("%s failed: cannot allocate memory for second level"), __func__ ); + return MT_EAGAIN; + } + memset(new_2nd_lvl_block, 0, new_first_allocated_blocksize); + MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock)); + old_2nd_lvl_block = hash_tbl->nodes_1st_lvl_begin[curr_blocks-1]; + memcpy(new_2nd_lvl_block,old_2nd_lvl_block,old_last_allocated_blocksize); + hash_tbl->nodes_1st_lvl_begin[curr_blocks-1] = new_2nd_lvl_block; + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + FREE(old_2nd_lvl_block); + } + + for (i = curr_blocks; i < (int) blocks_needed; i++) { + if (i == (int)blocks_needed - 1) { + /* allocate a smaller block for the last 2nd level block */ + block_size_to_allocate = (new_buckets - + ((blocks_needed-1)*(hash_tbl->sec_lvl_buckets_per_blk)) + ) * HASH_2ND_LVL_ENTRY_SIZE; + } + hash_tbl->nodes_1st_lvl_begin[i] = (__VIP_HASH_NODE_P_T*)MALLOC(block_size_to_allocate); + if (hash_tbl->nodes_1st_lvl_begin[i] == NULL) { + MTL_ERROR1(MT_FLFMT("hash resize: malloc failure at 2nd level block %d"), i); + for (j = curr_blocks; j < i; j++) { + FREE(hash_tbl->nodes_1st_lvl_begin[j]); + hash_tbl->nodes_1st_lvl_begin[j]=NULL; + } + hash_tbl->may_grow = FALSE; + return MT_EAGAIN; + } else { + memset(hash_tbl->nodes_1st_lvl_begin[i], 0, block_size_to_allocate); + } + } + + /* adjust vip array object parameters */ + + /* add required n */ + /* Rehash all nodes from buckets table to new positions */ + MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock)); + hash_tbl->buckets = new_buckets; + + old_bucket_ix = 0; + for (i = 0; i < (int)curr_blocks; i++) + { + __VIP_HASH_NODE_P_T* sec_lvl_block; + if (i == (int)curr_blocks-1) { + buckets_per_blk = old_buckets - (i * buckets_per_blk); + } + sec_lvl_block = hash_tbl->nodes_1st_lvl_begin[i]; + for (j = 0; j < (int)buckets_per_blk; old_bucket_ix++,j++) { + + __VIP_HASH_NODE_P_T* new_bucket; + __VIP_HASH_NODE_P_T node; + __VIP_HASH_NODE_P_T* prev = (sec_lvl_block+j); + node = *prev; + while (node != NULL) + { + u_int32_t new_bucket_idx; + new_bucket_idx = VIP_HASH_BUCKET(__VIP_HASH_FUNC(node->key), new_buckets); + if (old_bucket_ix == new_bucket_idx) { + /* rehashes to same bucket. just continue, updating prev pointer */ + prev = &(node->next); + node = node->next; + continue; + } + /* need to move entry to another bucket */ + /* a. remove from old */ + *prev = node->next; + /* b. insert node into a different bucket. done here without spinlocks, since + * entire table is currently locked + */ + new_bucket = GET_BUCKET_BY_IX(hash_tbl,new_bucket_idx); + node->next = *new_bucket; + *new_bucket = node; + /* go to next link in old bucket. prev guaranteed not null */ + node = *prev; + } + } + } + + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + return MT_OK; +} /* resize[p] */ + + +/******************************************************************************* + * Function: VIP_hash[p]_create + * + * Arguments: + * size (IN) - Approximate initial size. + * VIP_hash[p]_p (OUT) - Return new hash_tbl object here + * + * Returns: + * VIP_OK, + * VIP_EAGAIN: Not enough resources + * + * Description: + * Create a new hash_tbl table + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_create_maxsize, + VIP_hashp_create_maxsize, + VIP_hashp2p_create_maxsize, + VIP_hash64p_create_maxsize, + VIP_hashv4p_create_maxsize) +(u_int32_t reserve, u_int32_t max_size, __VIP_HASH_T* hash_p) +{ + __VIP_HASH_T hash_tbl = NULL; + + if (max_size == 0) {max_size = HASH_DEFAULT_MAXSIZE;} + if ( reserve > max_size) { + MTL_ERROR1(MT_FLFMT("%s: requested size (0x%x) greater than supplied max size (0x%x"), __func__, + reserve, max_size); + return VIP_EINVAL_PARAM; +} +if (reserve > HASH_DEFAULT_MAXSIZE) { + MTL_ERROR1(MT_FLFMT("%s: requested size (0x%x) greater than max permitted"), __func__, reserve); + return VIP_EINVAL_PARAM; +} + + hash_tbl = TMALLOC(__VIP_HASH_TBL_T); + if (hash_tbl == NULL) { + MTL_ERROR1(MT_FLFMT("%s failed: cannot allocate memory"), __func__ ); + return VIP_EAGAIN; + } + + *hash_p = hash_tbl; + + memset(hash_tbl, 0, sizeof(__VIP_HASH_TBL_T)); + hash_tbl->size = 0; + hash_tbl->buckets = 0; + hash_tbl->may_grow = TRUE; + hash_tbl->resize_in_progress= FALSE; + hash_tbl->max_size = max_size; +#if VIP_HASH_PRIME + hash_tbl->max_buckets = (u_int32_t) mtl_find_prime((unsigned long) max_size); +#else + hash_tbl->max_buckets = (u_int32_t) mtl_find_power2((unsigned long) max_size); +#endif + hash_tbl->sec_lvl_buckets_per_blk = HASH_2ND_LVL_BUCKETS_PER_BLOCK; + hash_tbl->sec_lvl_buckets_per_blk_m_1 = hash_tbl->sec_lvl_buckets_per_blk - 1; + hash_tbl->size_2nd_lvl_block = HASH_2ND_LVL_BLOCK_SIZE; + hash_tbl->log2_2nd_lvl_entries_per_blk = HASH_LOG_2ND_LVL_BUCKETS_PER_BLOCK; + hash_tbl->max_2nd_lvl_blocks = HASH_CALC_MAX_2ND_LVL_BLOCKS(hash_tbl); + + MOSAL_spinlock_init(&(hash_tbl->hash_lock)); + /* Allocate hash table of given "reserve" size (will be at least the minimum > 0)*/ + // MTL_DEBUG4(MT_FLFMT("going to call resize")); + if (resize(hash_tbl, reserve ) != MT_OK) + { /* nodes_begin should never be NULL */ + if (hash_tbl->nodes_1st_lvl_begin != NULL) { + FREE(hash_tbl->nodes_1st_lvl_begin); + } + FREE(hash_tbl); + return VIP_EAGAIN; + } + + return VIP_OK; +} /* VIP_hash[p]_create */ + + + +/******************************************************************************* + * Function: VIP_hash[p]_create + * + * Arguments: + * size (IN) - Approximate initial size. + * VIP_hash[p]_p (OUT) - Return new hash_tbl object here + * + * Returns: + * VIP_OK, + * VIP_EAGAIN: Not enough resources + * + * Description: + * Create a new hash_tbl table + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_create, + VIP_hashp_create, + VIP_hashp2p_create, + VIP_hash64p_create, + VIP_hashv4p_create) +(u_int32_t reserve, __VIP_HASH_T* hash_p) +{ + return __VIP_HASH_PICK(VIP_hash_create_maxsize, + VIP_hashp_create_maxsize, + VIP_hashp2p_create_maxsize, + VIP_hash64p_create_maxsize, + VIP_hashv4p_create_maxsize) (reserve, HASH_DEFAULT_MAXSIZE, hash_p); +} /* VIP_hash[p]_create */ + + +/******************************************************************************* + * Function: VIP_hash[p]_destroy + * + * Arguments: + * hash_tbl (IN) - Object to destroy + * + * Returns: + * VIP_OK + * + * Description: + * cleanup resources for a hash_tbl table + * + ********************************************************************************/ +VIP_common_ret_t +#if __VIP_HASH_VARIANT != 1 && __VIP_HASH_VARIANT != 2 && __VIP_HASH_VARIANT != 3 && __VIP_HASH_VARIANT != 4 + __VIP_HASH_PICK(VIP_hash_destroy, + VIP_hashp_destroy BUTNOTUSEDHERE, + VIP_hashp2p_destroy BUTNOTUSEDHERE, + VIP_hash64p_destroy BUTNOTUSEDHERE, + VIP_hashv4p_destroy BUTNOTUSEDHERE) + (__VIP_HASH_T hash_tbl) +#else /* __VIP_HASH_VARIANT == 1||2||3 | 4 */ + __VIP_HASH_PICK(VIP_hash_destroy BUTNOTUSEDHERE, + VIP_hashp_destroy, + VIP_hashp2p_destroy , + VIP_hash64p_destroy, + VIP_hashv4p_destroy) ( + __VIP_HASH_T hash_tbl, + void (*free_objects_fun)(__VIP_HASH_KEY_T key, __VIP_HASH_VAL_T val, void* priv_data), + void* priv_data + ) +#endif +{ + + int i,j; + register u_int32_t buckets_per_blk=0; + register u_int32_t curr_blocks; + register __VIP_HASH_NODE_P_T* sec_lvl_block; + register __VIP_HASH_NODE_P_T node; + register __VIP_HASH_NODE_P_T next; + + if (hash_tbl == NULL) return VIP_OK; + curr_blocks = HASH_CALC_NUM_2ND_LVL_BLOCKS(hash_tbl->buckets,hash_tbl); + buckets_per_blk = hash_tbl->sec_lvl_buckets_per_blk; + + /* No lock because: + * 1) process is not suppose to use the object at this stage + * 2) "free_objects_fun" may invoke "free" function + */ + /*MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock));*/ + + for (i = 0; i < (int)curr_blocks; i++) + { + if (i == (int)curr_blocks-1) { + /* adjust for last 2nd level block having fewer entries */ + buckets_per_blk = hash_tbl->buckets - (i * buckets_per_blk); + } + sec_lvl_block = hash_tbl->nodes_1st_lvl_begin[i]; + for (j = 0; j < (int)buckets_per_blk; j++) { + for (node = *(sec_lvl_block+j); node ; node = next) + { + next=node->next; +#if __VIP_HASH_VARIANT == 1 || __VIP_HASH_VARIANT == 2 || __VIP_HASH_VARIANT == 3 || __VIP_HASH_VARIANT == 4 + if (free_objects_fun) free_objects_fun(node->key,node->val,priv_data); +#endif + FREE(node); + } + } + FREE(sec_lvl_block); + } + + FREE(hash_tbl->nodes_1st_lvl_begin); + FREE(hash_tbl); + + return VIP_OK; +} + +/******************************************************************************** + * Function: VIP_hash[p]_insert + * + * Arguments: + * hash_tbl (IN) - Insert in this table + * key (IN) - Key to insert + * val (IN) - Value to insert + * + * Returns: + * VIP_OK, + * VIP_EBUSY: the key is already in the hash_tbl + * VIP_EAGAIN: not enough resources + * + * Description: + * Associate this value with this key. + * Return the value associated with this key. + * Note: if the key is in the hash_tbl, table is not changed, and so + * the returned value in tval_p may differ from val. + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_insert, + VIP_hashp_insert, + VIP_hashp2p_insert, + VIP_hash64p_insert, + VIP_hashv4p_insert) +( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T val) +{ + __VIP_HASH_VAL_T* val_p; + VIP_common_ret_t rc = + __VIP_HASH_PICK(VIP_hash_insert_ptr, + VIP_hashp_insert_ptr, + VIP_hashp2p_insert_ptr, + VIP_hash64p_insert_ptr, + VIP_hashv4p_insert_ptr)( + hash_tbl, key, &val_p); + if (rc == VIP_OK) + { + *val_p = val; + } + return rc; +} /* VIP_hash[p]_insert */ + + +/******************************************************************************** + * Function: VIP_hash[p]_insert_ptr + * + * Arguments: + * hash_tbl (IN) - Insert in this table + * key (IN) - Key to insert + * tval_p (OUT) - Value associated with the key + * + * Returns: + * VIP_OK, + * VIP_EBUSY: the key is already in the hash_tbl + * VIP_EAGAIN: not enough resources + * + * Description: + * Associate a new value with this key. + * This is like VIP_hash[p]_insert, but outputs a pointer + * to the value field, through which the value may be set later. + * + * Note: if the key is already in the hash_tbl, + * the returned value in tval_p points to the existing entry + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_insert_ptr, + VIP_hashp_insert_ptr, + VIP_hashp2p_insert_ptr, + VIP_hash64p_insert_ptr, + VIP_hashv4p_insert_ptr) +( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T** tval_p) +{ + u_int32_t bucket_n; + __VIP_HASH_NODE_P_T* bucket = NULL; /* init to silent warning */ + __VIP_HASH_NODE_P_T node; + __VIP_HASH_NODE_P_T new_node; + + if (hash_tbl == NULL) return VIP_EINVAL_HNDL; + /* Try to allocate new hash node before locking spinlock */ + new_node = TMALLOC(__VIP_HASH_NODE_T); + if (new_node == NULL) { + MTL_ERROR1(MT_FLFMT("VIP_hash_insert failed to allocate new node")); + return VIP_EAGAIN; + } + + MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock)); + + /* Check if resize is required */ + if ((hash_tbl->size >= hash_tbl->buckets) && (hash_tbl->may_grow) && + (! hash_tbl->resize_in_progress) ) { /* Only one resize at a time */ + hash_tbl->resize_in_progress= TRUE; + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + resize(hash_tbl, hash_tbl->size + 1); + /* It will be nice if resize succeeds, but we can continue anyway */ + MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock)); + hash_tbl->resize_in_progress= FALSE; + } + + bucket_n = VIP_HASH_BUCKET( __VIP_HASH_FUNC(key), hash_tbl->buckets); + bucket = GET_BUCKET_BY_IX(hash_tbl,bucket_n); + /* First check if givenkey is not already in use */ + for (node=*bucket;node;node=node->next) { +#if __VIP_HASH_VARIANT == 4 + if (!memcmp((const void *)&node->key[0], (const void *)&key[0], sizeof(__VIP_HASH_KEY_T))) { +#else + if (node->key == key) { +#endif + if (tval_p) { *tval_p=&(node->val); } + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + FREE(new_node); + return VIP_EBUSY; + } + } + + /* Now insert */ +#if __VIP_HASH_VARIANT == 4 + memcpy((void *)&new_node->key[0], (const void *)&key[0], sizeof(__VIP_HASH_KEY_T)); +#else + new_node->key = key; +#endif + new_node->next = *bucket; /* Insert as first in bucket */ + *bucket= new_node; + ++hash_tbl->size; + + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + + if (tval_p) { *tval_p = &(new_node->val); } + + return VIP_OK; +} /* VIP_hash[p]_insert_ptr */ + +/******************************************************************************* + * Function: VIP_hash[p]_erase + * + * Arguments: + * hash_tbl (IN) - Insert in this table + * key (IN) - remove value by this key + * val (OUT) - if non zero, returns the value by this key here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: key is not in the hash_tbl + * + * Description: + * Remove the value associated with this key + * Note: fails if key is not already in the hash_tbl + * + ********************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_erase, + VIP_hashp_erase, + VIP_hashp2p_erase, + VIP_hash64p_erase, + VIP_hashv4p_erase) +( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T* val) +{ + u_int32_t bucket_n; + __VIP_HASH_NODE_P_T* bucket; + __VIP_HASH_NODE_P_T node; + /*pointer in previous node to this one */ + __VIP_HASH_NODE_P_T* prev; + + if (hash_tbl == NULL) return VIP_EINVAL_HNDL; + + MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock)); + + bucket_n = VIP_HASH_BUCKET(__VIP_HASH_FUNC(key), hash_tbl->buckets); + bucket = GET_BUCKET_BY_IX(hash_tbl,bucket_n); + prev= bucket; + + /* Try to find */ + for (node=*bucket; node; prev=&(node->next), node= node->next) { +#if __VIP_HASH_VARIANT == 4 + if (!memcmp((const void *)&node->key[0], (const void *)&key[0], sizeof(__VIP_HASH_KEY_T))) { +#else + if (node->key == key) { +#endif + *prev= node->next; /* take out of bucket */ + --hash_tbl->size; + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + if (val) { *val= node->val; } /* return last value before freeing node */ + FREE(node); + return VIP_OK; + } + } + + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + return VIP_EINVAL_HNDL; +} /* VIP_hash[p]_erase */ + +/******************************************************************************** + * Function: VIP_hash[p]_find + * + * Arguments: + * hash_tbl (IN) - Insert in this table + * key (IN) - remove value by this key + * val (OUT) - if non zero, returns the value by this key here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: key is not in the hash_tbl + * + * Description: + * Find the value associated with this key + * Note: fails if key is not already in the hash_tbl + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_find, + VIP_hashp_find, + VIP_hashp2p_find, + VIP_hash64p_find, + VIP_hashv4p_find) +( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T* val) +{ + u_int32_t bucket_n; + __VIP_HASH_NODE_P_T* bucket; + __VIP_HASH_NODE_P_T node; + + if (hash_tbl == NULL) return VIP_EINVAL_HNDL; + + MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock)); + + bucket_n = VIP_HASH_BUCKET(__VIP_HASH_FUNC(key), hash_tbl->buckets); + bucket = GET_BUCKET_BY_IX(hash_tbl,bucket_n); + + /* Try to find */ + for (node=*bucket;node;node=node->next) { +#if __VIP_HASH_VARIANT == 4 + if (!memcmp((const void *)&node->key[0], (const void *)&key[0], sizeof(__VIP_HASH_KEY_T))) { +#else + if (node->key == key) { +#endif + if (val) *val=node->val; + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + return VIP_OK; + } + } + + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + return VIP_EINVAL_HNDL; +} /* VIP_hash[p]_find */ + + +/****************************************************************************** + * Function: VIP_hash[p]_find_ptr + * + * Arguments: + * hash_tbl (IN) - Insert in this table + * key (IN) - remove value by this key + * val_p (OUT) - if non zero, contains the pointer to the entry + * coresponding to given key + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: key is not in the hash_tbl + * + * Description: + * Find the value associated with this key + * This is like hashp_find, but returns pointer to the + * value field, which makes it possible to modify + * the value stored by this key. + * + * Note: fails if key is not already in the hash_tbl + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_find_ptr, + VIP_hashp_find_ptr, + VIP_hashp2p_find_ptr, + VIP_hash64p_find_ptr, + VIP_hashv4p_find_ptr) +( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T** val_p ) +{ + u_int32_t bucket_n; + __VIP_HASH_NODE_P_T* bucket; + __VIP_HASH_NODE_P_T node; + + if (hash_tbl == NULL) return VIP_EINVAL_HNDL; + + MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock)); + + bucket_n = __VIP_HASH_FUNC(key) % hash_tbl->buckets; + bucket = GET_BUCKET_BY_IX(hash_tbl,bucket_n); + + /* Try to find */ + for (node=*bucket;node;node=node->next) { +#if __VIP_HASH_VARIANT == 4 + if (!memcmp((const void *)&node->key[0], (const void *)&key[0], sizeof(__VIP_HASH_KEY_T))) { +#else + if (node->key == key) { +#endif + if (val_p) *val_p=&(node->val); + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + return VIP_OK; + } + } + + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); + return VIP_EINVAL_HNDL; +} /* VIP_hash[p]_find_ptr */ + + +/******************************************************************************** + * Function: VIP_hash[p]_get_num_of_objects + * + * Arguments: + * hash_tbl (IN) - table + * + * Returns: + * number of objects in the array + * + * Description: + * Get number of objects + * + ********************************************************************************/ +u_int32_t +__VIP_HASH_PICK(VIP_hash_get_num_of_objects, + VIP_hashp_get_num_of_objects, + VIP_hashp2p_get_num_of_objects, + VIP_hash64p_get_num_of_objects, + VIP_hashv4p_get_num_of_objects) +( + __VIP_HASH_T hash_tbl) +{ + return hash_tbl->size; +} /* VIP_hash[p]_get_num_of_objects */ + +/******************************************************************************** + * Function: VIP_hash[p]_get_num_of_buckets + * + * Arguments: + * hash_tbl (IN) - table + * + * Returns: + * number of buckets in the array + * + * Description: + * Get number of buckets + * + ********************************************************************************/ +u_int32_t +__VIP_HASH_PICK(VIP_hash_get_num_of_buckets, + VIP_hashp_get_num_of_buckets, + VIP_hashp2p_get_num_of_buckets, + VIP_hash64p_get_num_of_buckets, + VIP_hashv4p_get_num_of_buckets) +( + __VIP_HASH_T hash_tbl) +{ + return hash_tbl->buckets; +} /* VIP_hash[p]_get_num_of_objects */ + +/************************************************************************/ +MT_bool __VIP_HASH_PICK(VIP_hash_may_grow, + VIP_hashp_may_grow, + VIP_hashp2p_may_grow, + VIP_hash64p_may_grow, + VIP_hashv4p_may_grow) +( + __VIP_HASH_T hash_tbl, + MT_bool flag) +{ + MT_bool old = hash_tbl->may_grow; + hash_tbl->may_grow = flag; + return old; +} /* VIP_hash[p]_may_grow */ + + +/************************************************************************/ +void __VIP_HASH_PICK(VIP_hash_traverse, + VIP_hashp_traverse, + VIP_hashp2p_traverse, + VIP_hash64p_traverse, + VIP_hashv4p_traverse) +( + __VIP_HASH_T hash_tbl, + int (*ufunc)(__VIP_HASH_KEY_T key, __VIP_HASH_VAL_T val, void* vp), + void* udata +) +{ + int i,j,go = 1; + register u_int32_t buckets_per_blk=0; + register u_int32_t curr_blocks; + register __VIP_HASH_NODE_P_T* sec_lvl_block; + register __VIP_HASH_NODE_P_T node; + register __VIP_HASH_NODE_P_T next; + + if (hash_tbl == NULL) return; + + curr_blocks = HASH_CALC_NUM_2ND_LVL_BLOCKS(hash_tbl->buckets,hash_tbl); + buckets_per_blk = hash_tbl->sec_lvl_buckets_per_blk; + + MOSAL_spinlock_dpc_lock(&(hash_tbl->hash_lock)); + + for (i = 0; go && (i < (int)curr_blocks); i++) + { + if (i == (int)curr_blocks-1) { + /* adjust for last 2nd level block having fewer entries */ + buckets_per_blk = hash_tbl->buckets - (i * buckets_per_blk); + } + sec_lvl_block = hash_tbl->nodes_1st_lvl_begin[i]; + for (j = 0; go && (j < (int)buckets_per_blk); j++) { + for (node = *(sec_lvl_block+j); node && go; node = next) + { + next = node->next; + go = (*ufunc)(node->key, node->val, udata); + } + } + } + + MOSAL_spinlock_unlock(&(hash_tbl->hash_lock)); +} /* VIP_hash[p]_traverse */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.ih b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.ih new file mode 100644 index 00000000..3d0d183a --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash.ih @@ -0,0 +1,475 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id:$ + */ + + +/* + * Description: Common header for vip_hash{,p}.h + * + * For the sake of unifying functionality and implementation, + * we use here (and so in vip_hash.ic) magic macros that do + * "Manual C++-like template functions". + * Thus the function come in (currently two) flavors: + * VIP_hash_foo(...) - where entry's value is VIP_array_handle_t + * VIP_hashp_foo(...) - where entry's value is void* + * + * Notes: + * This 'inline' header file + * + must NOT be included directly by clients. + * + is intentionally not guarded with {#ifndef foo, #define foo, #endif}, + * + * Version: $Id$ + * + * Authors: + * MST - mtsirkin@mellanox.co.il + * yotam - yotamm@mellanox.co.il + * + * Changes: + */ + + +#if !defined(__VIP_HASH_VARIANT) +# error __VIP_HASH_VARIANT undefined +#endif + +#if !defined(__VIP_hash_key_t_DEFINED) +#define __VIP_hash_key_t_DEFINED + + typedef u_int32_t VIP_hash_key_t; + typedef VIP_hash_key_t VIP_hashp_key_t; + typedef void* VIP_hashp2p_key_t; + typedef u_int64_t VIP_hash64p_key_t; + typedef u_int32_t VIP_hashv4p_key_t[4]; + +#endif /* __VIP_hash_key_t_DEFINED */ + + +/* Pseudo C++ template for poor C */ +# undef __VIP_HASH_KEY_T +# undef __VIP_HASH_VAL_T +# undef __VIP_HASH_T +# undef __VIP_HASH_PICK + +#if __VIP_HASH_VARIANT == 0 +# define __VIP_HASH_KEY_T VIP_hash_key_t +# define __VIP_HASH_VAL_T VIP_hash_value_t +# define __VIP_HASH_T VIP_hash_p_t +# define __VIP_HASH_PICK(id, idp, idp2p, id64p, idv4p) id +#elif __VIP_HASH_VARIANT == 1 +# define __VIP_HASH_KEY_T VIP_hash_key_t +# define __VIP_HASH_VAL_T VIP_hashp_value_t +# define __VIP_HASH_T VIP_hashp_p_t +# define __VIP_HASH_PICK(id1, idp, idp2p, id64p, idv4p) idp +#elif __VIP_HASH_VARIANT == 2 +# define __VIP_HASH_KEY_T VIP_hashp2p_key_t +# define __VIP_HASH_VAL_T VIP_hashp2p_value_t +# define __VIP_HASH_T VIP_hashp2p_p_t +# define __VIP_HASH_PICK(id1, idp, idp2p, id64p, idv4p) idp2p +#elif __VIP_HASH_VARIANT == 3 +# define __VIP_HASH_KEY_T VIP_hash64p_key_t +# define __VIP_HASH_VAL_T VIP_hash64p_value_t +# define __VIP_HASH_T VIP_hash64p_p_t +# define __VIP_HASH_PICK(id1, idp, idp2p, id64p, idv4p) id64p +#elif __VIP_HASH_VARIANT == 4 +# define __VIP_HASH_KEY_T VIP_hashv4p_key_t +# define __VIP_HASH_VAL_T VIP_hashv4p_value_t +# define __VIP_HASH_T VIP_hashv4p_p_t +# define __VIP_HASH_PICK(id1, idp, idp2p, id64p, idv4p) idv4p +#else +# error Unsupported __VIP_HASH_VARIANT variant +#endif + + /******************************************************************************** + * Function: VIP_hash_create_maxsize + * + * Arguments: + * reserve (IN) - (Approximate) initial size of the table. + * Specify 0 for empty initialization that may grow. + * + * max_size(IN) - Approximate Maximum allowed number of buckets in table. The + * true number will be the next larger prime number in the table of + * primes in file vip_hash.c + * + * VIP_hash_p (OUT) - Return new VIP_hash object here + * Set to NULL in case of an error + * + * Returns: + * VIP_OK, + * VIP_EAGAIN: Not enough resources + * + * Description: + * Create a new VIP_hash table. + * The table maintans a logical flag controlling whether + * its buckets-size may grow. This can be set later with + * the VIP_hash_may_grow(flag) function (see later). + * This may_grow flag is initialized with TRUE. When the allocated number of + * buckets reaches the maximum value, the may_grow flag is set FALSE. + * + * When may_grow is TRUE, the table will try to set + * the internal buckets size to some prime larger than the + * the content size of the table. The buckets may grows + * upon insert. Insertion can succeed even when failing to grow. + * In such cases, buckets list will simply get longer. + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_create_maxsize, + VIP_hashp_create_maxsize, + VIP_hashp2p_create_maxsize, + VIP_hash64p_create_maxsize, + VIP_hashv4p_create_maxsize) +(u_int32_t reserve, u_int32_t max_size, __VIP_HASH_T* hash_p); + + +/******************************************************************************** + * Function: VIP_hash_create + * + * Arguments: + * reserve (IN) - (Approximate) initial size of the table. + * Specify 0 for empty initialization that may grow. + * + * VIP_hash_p (OUT) - Return new VIP_hash object here + * Set to NULL in case of an error + * + * Returns: + * VIP_OK, + * VIP_EAGAIN: Not enough resources + * + * Description: + * Create a new VIP_hash table. + * The table maintans a logical flag controlling whether + * its buckets-size may grow. This can be set later with + * the VIP_hash_may_grow(flag) function (see later). + * This may_grow flag is initialized with TRUE. + * + * When may_grow is TRUE, the table will try to set + * the internal buckets size to some prime larger than the + * the content size of the table. The buckets may grows + * upon insert. Insertion can succeed even when failing to grow. + * In such cases, buckets list will simply get longer. + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_create, + VIP_hashp_create, + VIP_hashp2p_create, + VIP_hash64p_create, + VIP_hashv4p_create) +(u_int32_t reserve, __VIP_HASH_T* hash_p); + + +/******************************************************************************** + * Function: VIP_hash_destroy + * + * Arguments: + * hash_tbl (IN) - Object to destroy + * For the 'hashp' variant: + * free_objects_fun (IN) - If non zero, call this function + * for each object in the array (can be used + * e.g. to deallocate memory). + * Even if zero, the table is still deallocated. + * priv_data (IN) - Private date (e.g., object context) common to all objects + * (valid on if free_objects_fun != NULL) + * + * Returns: + * VIP_OK + * + * Description: + * cleanup resources for a hash_tbl table + * + * NOTE: + * given free_objects_fun is invoked in DPC level. + * It may not "go to sleep" and may not invoke VIP_hash functions for this table + * + ******************************************************************************/ +VIP_common_ret_t +#if __VIP_HASH_VARIANT != 1 && __VIP_HASH_VARIANT != 2 && __VIP_HASH_VARIANT != 3 && __VIP_HASH_VARIANT != 4 + __VIP_HASH_PICK(VIP_hash_destroy, + VIP_hashp_destroy BUTNOTUSEDHERE, + VIP_hashp2p_destroy BUTNOTUSEDHERE, + VIP_hash64p_destroy BUTNOTUSEDHERE, + VIP_hashv4p_destroy BUTNOTUSEDHERE) + (__VIP_HASH_T hash_tbl) +#else /* __VIP_HASH_VARIANT == 1 */ + __VIP_HASH_PICK(VIP_hash_destroy BUTNOTUSEDHERE, + VIP_hashp_destroy, + VIP_hashp2p_destroy , + VIP_hash64p_destroy, + VIP_hashv4p_destroy) ( + __VIP_HASH_T hash_tbl, + void (*free_objects_fun)(__VIP_HASH_KEY_T key, __VIP_HASH_VAL_T val, void* priv_data), + void* priv_data + ) +#endif +; + +/******************************************************************************** + * Function: VIP_hash_insert + * + * Arguments: + * hash_tbl (IN) - Insert in this table + * key (IN) - Key to insert + * val (IN) - Value to insert + * + * Returns: + * VIP_OK, + * VIP_EBUSY: the key is already in the hash_tbl + * VIP_EAGAIN: not enough resources + * + * Description: + * Associate this value with this key. + * + ********************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_insert, + VIP_hashp_insert, + VIP_hashp2p_insert, + VIP_hash64p_insert, + VIP_hashv4p_insert)( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T val); + +/******************************************************************************** + * Function: VIP_hash_insert_ptr + * + * Arguments: + * hash_tbl (IN) - Insert in this table + * key (IN) - Key to insert + * tval_p (OUT) - Value associated with the key + * + * Returns: + * VIP_OK, + * VIP_EBUSY: the key is already in the hash_tbl + * VIP_EAGAIN: not enough resources + * + * Description: + * Associate a new value with this key. + * This is like VIP_hash_insert, but outputs a pointer + * to the value field, through which the value may be set later. + * + * Note: if the key is already in the hash_tbl, + * the returned value in tval_p points to the existing entry + * + *****************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_insert_ptr, + VIP_hashp_insert_ptr, + VIP_hashp2p_insert_ptr, + VIP_hash64p_insert_ptr, + VIP_hashv4p_insert_ptr)( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T** tval_p); + +/****************************************************************************** + * Function: VIP_hash_erase + * + * Arguments: + * hash_tbl (IN) - Erase in this table + * key (IN) - remove value by this key + * val (OUT) - if non zero, returns the value by this key here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: key is not in the hash_tbl + * + * Description: + * Remove the value associated with this key + * Note: fails if key is not already in the hash_tbl + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_erase, + VIP_hashp_erase, + VIP_hashp2p_erase, + VIP_hash64p_erase, + VIP_hashv4p_erase)( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T* val); + +/****************************************************************************** + * Function: VIP_hash_find + * + * Arguments: + * hash_tbl (IN) - Insert in this table + * key (IN) - remove value by this key + * val (OUT) - if non zero, returns the value by this key here + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: key is not in the hash_tbl + * + * Description: + * Find the value associated with this key + * Note: fails if key is not already in the hash_tbl + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_find, + VIP_hashp_find, + VIP_hashp2p_find, + VIP_hash64p_find, + VIP_hashv4p_find)( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T* val); + +/****************************************************************************** + * Function: VIP_hash_find_ptr + * + * Arguments: + * hash_tbl (IN) - Find in this table + * key (IN) - remove value by this key + * val_p (OUT) - if non zero, contains the pointer to the entry + * coresponding to given key + * + * Returns: + * VIP_OK, + * VIP_EINVAL_HNDL: key is not in the hash_tbl + * + * Description: + * Find the value associated with this key + * This is like hashp_find, but returns pointer to the + * value field, which makes it possible to modify + * the value stored by this key. + * + * Note: fails if key is not already in the hash_tbl + * + ******************************************************************************/ +VIP_common_ret_t __VIP_HASH_PICK(VIP_hash_find_ptr, + VIP_hashp_find_ptr, + VIP_hashp2p_find_ptr, + VIP_hash64p_find_ptr, + VIP_hashv4p_find_ptr)( + __VIP_HASH_T hash_tbl, + __VIP_HASH_KEY_T key, + __VIP_HASH_VAL_T** val_p ); + +/******************************************************************************** + * Function: VIP_hash[p]_get_num_of_buckets + * + * Arguments: + * hash_tbl (IN) - table + * + * Returns: + * number of buckets in the array + * + * Description: + * Get number of buckets + * + ********************************************************************************/ +u_int32_t +__VIP_HASH_PICK(VIP_hash_get_num_of_buckets, + VIP_hashp_get_num_of_buckets, + VIP_hashp2p_get_num_of_buckets, + VIP_hash64p_get_num_of_buckets, + VIP_hashv4p_get_num_of_buckets)( + __VIP_HASH_T hash_tbl); + +/******************************************************************************* + * Function: VIP_hash_get_num_of_objects + * + * Arguments: + * hash_tbl (IN) - table + * + * Returns: + * number of objects in the array + * + * Description: + * Get number of objects + * + ******************************************************************************/ +u_int32_t +__VIP_HASH_PICK(VIP_hash_get_num_of_objects, + VIP_hashp_get_num_of_objects, + VIP_hashp2p_get_num_of_objects, + VIP_hash64p_get_num_of_objects, + VIP_hashv4p_get_num_of_objects)( + __VIP_HASH_T hash_tbl); + +/******************************************************************************* + * Function: VIP_hash_may_grow + * + * Arguments: + * hash_tbl (IN) - The table handler. + * flag (IN) - Boolean value determining, whether the buckets table + * can grow in size. See VIP_hash_create(...) + * Returns: + * Current (old) setting. + * + * Description: + * Set the 'may_grow' flag to allow resizing of the buckets + * to some prime larger than the logical size upon insertion. + * Note, if the buckets are (still since initialized) are empty, + * this may_grow flag will remain TRUE, and the call will be ignored. + */ + +MT_bool __VIP_HASH_PICK(VIP_hash_may_grow, + VIP_hashp_may_grow, + VIP_hashp2p_may_grow, + VIP_hash64p_may_grow, + VIP_hashv4p_may_grow)( + __VIP_HASH_T hash_tbl, + MT_bool flag); + + + +/******************************************************************************* + * Function: VIP_hash_traverse + * + * Arguments: + * hash_tbl (IN) - table + * ufunc (IN) - function of given prototype. + * Traverse will continue as long as ufunc(...) + * returns non zero. + * udata (IN - client data pointer, to be passed to ufunc. + * + * Returns: + * None + * + * Description: + * Traverse the buckets, and call ufunc with all entries. + * Stops at end, or when user function returns 0. + * Could be significantly imporved, if we would keep + * a linked list of actually used buckets. + * + * Note: + * Given ufunc is invoked in DPC level. + * The given ufunc may NOT invoke any of VIP_hash functions (for this hash table). + * (it WILL cause a deadlock !) + * + ******************************************************************************/ +void __VIP_HASH_PICK(VIP_hash_traverse, + VIP_hashp_traverse, + VIP_hashp2p_traverse, + VIP_hash64p_traverse, + VIP_hashv4p_traverse)( + __VIP_HASH_T hash_tbl, + int (*ufunc)(__VIP_HASH_KEY_T key, __VIP_HASH_VAL_T val, void* vp), + void* udata +); + + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash64p.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash64p.h new file mode 100644 index 00000000..4a0baabd --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hash64p.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef VIP_COMMON_VIP_hash64p_H +#define VIP_COMMON_VIP_hash64p_H + +#include +#include "vip_common.h" + +typedef void* VIP_hash64p_value_t; +typedef struct VIP_hash64p_t* VIP_hash64p_p_t; + +#undef __VIP_HASH_VARIANT +#define __VIP_HASH_VARIANT 3 +#include "vip_hash.ih" + +#endif /* VIP_COMMON_VIP_hash64p_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashp.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashp.h new file mode 100644 index 00000000..addb8230 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashp.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef VIP_COMMON_VIP_hashp_H +#define VIP_COMMON_VIP_hashp_H + +#include +#include "vip_common.h" + +typedef void* VIP_hashp_value_t; +typedef struct VIP_hashp_t* VIP_hashp_p_t; + +#undef __VIP_HASH_VARIANT +#define __VIP_HASH_VARIANT 1 +#include "vip_hash.ih" + +#endif /* VIP_COMMON_VIP_hashp_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashp2p.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashp2p.h new file mode 100644 index 00000000..214d03d3 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashp2p.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef VIP_COMMON_VIP_hashp2p_H +#define VIP_COMMON_VIP_hashp2p_H + +#include +#include "vip_common.h" + +typedef void* VIP_hashp2p_value_t; +typedef struct VIP_hashp2p_t* VIP_hashp2p_p_t; + +#undef __VIP_HASH_VARIANT +#define __VIP_HASH_VARIANT 2 +#include "vip_hash.ih" + +#endif /* VIP_COMMON_VIP_hashp2p_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashv4p.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashv4p.h new file mode 100644 index 00000000..0f5ff5b8 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/common/vip_hashv4p.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef VIP_COMMON_VIP_hashv4p_H +#define VIP_COMMON_VIP_hashv4p_H + +#include +#include "vip_common.h" + +typedef void* VIP_hashv4p_value_t; +typedef struct VIP_hashv4p_t* VIP_hashv4p_p_t; + +#undef __VIP_HASH_VARIANT +#define __VIP_HASH_VARIANT 4 +#include "vip_hash.ih" + +#endif /* VIP_COMMON_VIP_hashv4p_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/evapi.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/evapi.h new file mode 100644 index 00000000..24d22c2c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/evapi.h @@ -0,0 +1,1430 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_EVAPI_H +#define H_EVAPI_H + +#include +#include + + +/********************************************************** + * + * Function: EVAPI_get_hca_hndl + * + * Arguments: + * hca_id : HCA ID to get handle for + * hca_hndl_p : Returned handle + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_ID : No such opened HCA. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (open device file or ioctl) + * has returned an error. + * + * Description: + * Get handle of an already opened HCA. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_get_hca_hndl( + IN VAPI_hca_id_t hca_id, + OUT VAPI_hca_hndl_t *hca_hndl_p +); + +/********************************************************** + * + * Function: EVAPI_release_hca_hndl + * + * Arguments: + * hca_hndl : HCA handle to for which to release process resources + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL : No such opened HCA. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Release all resources used by this process for an opened HCA. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_release_hca_hndl( + IN VAPI_hca_hndl_t hca_hndl +); + + + +/********************************************************** + * + * Function: EVAPI_k_get_cq_hndl + * + * Arguments: + * hca_hndl : HCA handle + * cq_hndl : VAPI cq handle + * k_cq_hndl_p: pointer to kernel level handle for the cq + * + * Returns: + * VIP_OK + * VIP_EINVAL_HCA_HNDL : No such opened HCA. + * VIP_EINVAL_CQ_HNDL : No such CQ. + * + * Description: + * Get the vipkl cq handle for the cq. This may be used by a user level process + * to pass this handle to another kernel driver which may then use + * EVAPI_k_set/clear_comp_eventh() to attach/detach kernel handlers + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_k_get_cq_hndl( IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + OUT VAPI_k_cq_hndl_t *k_cq_hndl_p); + + +#ifdef __KERNEL__ +/********************************************************** + * + * Function: EVAPI_k_set_comp_eventh (kernel space only) + * + * Arguments: + * k_hca_hndl : HCA handle + * k_cq_hndl : cq handle + * completion_handler : handler to call for completions on + * Completion Queue cq_hndl + * private_data : pointer to data for completion handler + * completion_handler_hndl: returned handle to use for clearing this + * completion handler + * + * Returns: + * VIP_OK + * VIP_EINVAL_HCA_HNDL : No such opened HCA. + * VIP_EINVAL_CQ_HNDL : No such CQ. + * + * Description: + * Registers a specific completion handler to handle completions + * for a specific completion queue. The private data give here + * is provided to the completion callback when a completion occurs + * on the given CQ. If the private data is a pointer, it should point + * to static or "malloc'ed" data; The private data must be available + * until this completion handler instance is cleared (with + * EVAPI_k_clear_comp_eventh). + * + * Note: + * This function is exposed to kernel modules only. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_k_set_comp_eventh( + IN VAPI_hca_hndl_t k_hca_hndl, + IN VAPI_k_cq_hndl_t k_cq_hndl, + IN VAPI_completion_event_handler_t completion_handler, + IN void * private_data, + OUT EVAPI_compl_handler_hndl_t *completion_handler_hndl ); + + +/********************************************************** + * + * Function: EVAPI_k_clear_comp_eventh + * + * Arguments: + * k_hca_hndl : HCA handle + * completion_handler_hndl: handle to use for clearing this + * completion handler + * + * Returns: + * VIP_OK + * VIP_EINVAL_HCA_HNDL : No such opened HCA. + * VIP_EINVAL_CQ_HNDL : No such CQ. + * + * Description: + * Clears a completion handler which was registered + * to handle completions for a specific completion queue. + * If a handler was not registered, returns OK anyway. + * + * Note: + * This function is exposed to kernel modules only. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_k_clear_comp_eventh( + IN VAPI_hca_hndl_t k_hca_hndl, + IN EVAPI_compl_handler_hndl_t completion_handler_hndl); + + +/********************************************************** + * + * Function: EVAPI_k_set_destroy_cq_cbk + * + * Arguments: + * k_hca_hndl : HCA handle + * k_cq_hndl: Kernel level CQ handle as known from EVAPI_k_get_cq_hndl() + * cbk_func: Callback function to invoke when the CQ is destroyed + * private_data: Caller's context to be used when invoking the callback + * + * Returns: + * VIP_OK + * VIP_EINVAL_HCA_HNDL : No such opened HCA. + * VIP_EINVAL_CQ_HNDL : No such CQ. + * VAPI_EBUSY: A destroy_cq callback is already set for this CQ + * + * + * Description: + * Set a callback function that notifies the caller (a kernel module that + * uses EVAPI_k_set_comp_eventh ) when a CQ is destroyed. + * The function is meant to be used in order to clean the kernel module's + * context for that resource, and not in order to clear the completion handler. + * The callback is invoked after the CQ handle is already invalid + * so EVAPI_k_clear_comp_eventh is not suppose to be called for the + * obsolete CQ (the completion_handler_hndl is already invalid). + * This callback is implicitly cleared after it is called. + * + * Note: Only a single context in kernel may invoke this function per CQ. + * Simultanous invocation by more than one kernel context, + * for the same CQ, will result in unexpected behavior. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_k_set_destroy_cq_cbk( + IN VAPI_hca_hndl_t k_hca_hndl, + IN VAPI_k_cq_hndl_t k_cq_hndl, + IN EVAPI_destroy_cq_cbk_t cbk_func, + IN void* private_data +); + +/********************************************************** + * + * Function: EVAPI_k_clear_destroy_cq_cbk + * + * Arguments: + * k_hca_hndl : HCA handle + * k_cq_hndl: Kernel level CQ handle as known from EVAPI_k_get_cq_hndl() + * + * Returns: + * VIP_OK + * VIP_EINVAL_HCA_HNDL : No such opened HCA. + * VIP_EINVAL_CQ_HNDL : No such CQ. + * + * Description: + * Clear the callback function set in EVAPI_k_set_destroy_cq_cbk(). + * Use this function when the kernel module stops using the given k_cq_hndl. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_k_clear_destroy_cq_cbk( + IN VAPI_hca_hndl_t k_hca_hndl, + IN VAPI_k_cq_hndl_t k_cq_hndl +); + + +#endif /* __KERNEL__ */ + + +/********************************************************** + * + * Function: EVAPI_set_comp_eventh + * + * Arguments: + * hca_hndl : HCA handle + * cq_hndl : cq handle + * completion_handler : handler to call for completions on + * Completion Queue cq_hndl + * private_data : pointer to data for completion handler + * completion_handler_hndl: returned handle to use for clearing this + * completion handler + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL : No such opened HCA. + * VAPI_EINVAL_CQ_HNDL : No such CQ. + * VAPI_EINVAL_PARAM: Event handler is NULL + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Registers a specific completion handler to handle completions + * for a specific completion queue. The private data give here + * is provided to the completion callback when a completion occurs + * on the given CQ. If the private data is a pointer, it should point + * to static or "malloc'ed" data; The private data must be available + * until this completion handler instance is cleared (with + * EVAPI_clear_comp_eventh). + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_set_comp_eventh( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + IN VAPI_completion_event_handler_t completion_handler, + IN void * private_data, + OUT EVAPI_compl_handler_hndl_t *completion_handler_hndl ); + + +/********************************************************** + * + * Function: EVAPI_clear_comp_eventh + * + * Arguments: + * hca_hndl : HCA handle + * completion_handler_hndl: handle to use for clearing this + * completion handler + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL : No such opened HCA. + * VAPI_EINVAL_CQ_HNDL : No such CQ. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Clears a completion handler which was registered + * to handle completions for a specific completion queue. + * If a handler was not registered, returns OK anyway. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_clear_comp_eventh( + IN VAPI_hca_hndl_t hca_hndl, + IN EVAPI_compl_handler_hndl_t completion_handler_hndl ); + +#define EVAPI_POLL_CQ_UNBLOCK_HANDLER ((VAPI_completion_event_handler_t)(-2)) +/********************************************************** + * + * Function: EVAPI_poll_cq_block + * + * Arguments: + * hca_hndl: Handle to HCA. + * cq_hndl: CQ Handle. + * timeout_usec: Timeout of blocking in micro-seconds (0 = infinite timeout) + * comp_desc_p: Pointer to work completion descriptor structure. + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle + * VAPI_EINVAL_PARAM: Event handler not initialized with EVAPI_POLL_CQ_UNBLOCK_HANDLER + * VAPI_ETIMEOUT: Blocking timed out (and got no completion event). + * VAPI_CQ_EMPTY: Blocking interrupted due to EVAPI_poll_cq_unblock() call OR + * got a completion event due to a previous call to this function or + * another request for completion notification. + * VAPI_EINTR: Operation interrupted (OS signal) + * + * Description: + * Poll given CQ and if empty, request completion notification event and + * then sleep until event received, then poll again and return result + * (even if still empty - to allow cancelling of blocking, e.g. on signals). + * + * Notes: + * 1) This function will block only if EVAPI_set_comp_eventh was invoked for this + * CQ with completion_handler=EVAPI_POLL_CQ_UNBLOCK_HANDLER. + * (EVAPI_clear_comp_eventh should be invoked for cleanup, as for regular callback) + * 2) One cannot set another completion event handler for this CQ + * (handler is bounded to CQ unblocking handler). + * 3) VAPI_req_comp_notif should not be invoked explicitly for a CQ using this facility. + * 4) One may still use (non-blocking) VAPI_poll_cq() for this CQ. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_poll_cq_block( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + IN MT_size_t timeout_usec, + OUT VAPI_wc_desc_t *comp_desc_p + ); + +/********************************************************** + * + * Function: EVAPI_poll_cq_unblock + * + * Arguments: + * hca_hndl: Handle to HCA. + * cq_hndl: CQ Handle. + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle + * + * Description: + * Signal a thread blocked with EVAPI_poll_cq_block() to "wake-up". + * ("waked-up" thread will poll the CQ again anyway and return result/completion) + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_poll_cq_unblock( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl + ); + + +/********************************************************** + * + * Function: EVAPI_peek_cq + * + * Arguments: + * hca_hndl: Handle to HCA. + * cq_hndl: CQ Handle. + * cqe_num: Number of CQE to peek to (next CQE is #1) + * + * Returns: + * VAPI_OK: At least cqe_num CQEs outstanding in given CQ + * VAPI_CQ_EMPTY: Less than cqe_num CQEs are outstanding in given CQ + * VAPI_E2BIG_CQ_NUM: cqe_index is beyond CQ size (or 0) + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * + * Description: + * Check if there are at least cqe_num CQEs outstanding in the CQ. + * (i.e., peek into the cqe_num CQE in the given CQ). + * No CQE is consumed from the CQ. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_peek_cq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + IN VAPI_cqe_num_t cqe_num +); + +/************************************************************************* + * Function: EVAPI_req_ncomp_notif + * + * Arguments: + * hca_hndl: Handle to HCA. + * cq_hndl: CQ Handle. + * cqe_num: Number of outstanding CQEs which trigger this notification + * This may be 1 up to CQ size, limited by HCA capability (0x7FFF for InfiniHost) + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle + * VAPI_E2BIG_CQ_NUM: cqe_index is beyond CQ size or beyond HCA notification capability (or 0) + * For InfiniHost cqe_num is limited to 0x7FFF. + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: + * Request notification when CQ holds at least N (non-polled) CQEs + * + * + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_req_ncomp_notif( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + IN VAPI_cqe_num_t cqe_num + ); + + +/***************************************************************************** + * Function: EVAPI_list_hcas + * + * Arguments: + * hca_id_buf_sz(IN) : Number of entries in supplied array 'hca_id_buf_p', + * num_of_hcas_p(OUT) : Actual number of currently available HCAs + * hca_id_buf_p(OUT) : points to an\n array allocated by the caller of + * 'VAPI_hca_id_t' items, able to hold 'hca_id_buf_sz' + * entries of that item. + + * + * Returns: VAPI_OK : operation successful. + * VAPI_EINVAL_PARAM : Invalid parameter. + * VAPI_EAGAIN : hca_id_buf_sz is smaller than num_of_hcas. In this case, NO hca_ids + * are returned in the provided array. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (open device file, or ioctl) + * has returned an error. + * + * Description: + * Used to get a list of the device IDs of the available devices. + * These names can then be used in VAPI_open_hca to open each + * device in turn. + * + * If the size of the supplied buffer is too small, the number of available devices + * is still returned in the num_of_hcas parameter, but the return code is set to + * HH_EAGAIN. In this case, NO device IDs are returned; the user must simply supply + * a larger array and call this procedure again. (The user may call this function + * with hca_id_buf_sz = 0 and hca_id_buf_p = NULL to get the number of hcas currently + * available). + *****************************************************************************/ + +VAPI_ret_t MT_API EVAPI_list_hcas(/* IN*/ u_int32_t hca_id_buf_sz, + /*OUT*/ u_int32_t* num_of_hcas_p, + /*OUT*/ VAPI_hca_id_t* hca_id_buf_p); + +/********************************************************** + * Function: EVAPI_process_local_mad + * + * Arguments: + * hca_hndl : HCA handle + * port : port which received the MAD packet + * slid : Source LID of incoming MAD. Required Mkey violation trap genenration. + * (this parameter is ignored if EVAPI_MAD_IGNORE_MKEY flag is set) + * proc_mad_opts: Modifiers to MAD processing. + * currently, only modifier is : EVAPI_MAD_IGNORE_MKEY + * mad_in_p: pointer to MAD packet received + * mad_out_p: pointer to response MAD packet, if any + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL : No such opened HCA. + * VAPI_EINVAL_PORT : No such port. + * VAPI_EINVAL_PARAM : invalid parameter (error in mad_in packet) + * VAPI_EGEN + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * submits a MAD packet to the local HCA for processing. + * Obtains the response MAD. mad_out_p must be a buffer of + * size 256 (IB_MAD_LEN) allocated by caller. + * + * for the proc_mad_opts argument, if EVAPI_MAD_IGNORE_MKEY is given, MKEY + * will be ignored when processing the MAD. If zero is given for this argument + * MKEY validation will be performed (this is the default), and the given slid + * may be used to generate a trap in case of Mkey violation, as defined in IB-spec. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_process_local_mad( + IN VAPI_hca_hndl_t hca_hndl, + IN IB_port_t port, + IN IB_lid_t slid, /* ignored on EVAPI_MAD_IGNORE_MKEY */ + IN EVAPI_proc_mad_opt_t proc_mad_opts, + IN const void * mad_in_p, + OUT void * mad_out_p); + + +/********************************************************** + * Function: EVAPI_set/get_priv_context4qp/cq + * + * Arguments: + * hca_hndl : HCA handle + * qp/cq : QP/CQ for which the private context refers + * priv_context : Set/Returned private context associated with given QP/CQ + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL : No such opened HCA. + * VAPI_EINVAL_QP_HNDL/VAPI_EINVAL_CQ_HNDL : Unknown QP/CQ within current context + * + * Description: + * Set/Get private context for QP/CQ. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_set_priv_context4qp( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp, + IN void * priv_context); + +VAPI_ret_t MT_API EVAPI_get_priv_context4qp( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp, + OUT void ** priv_context_p); + +VAPI_ret_t MT_API EVAPI_set_priv_context4cq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq, + IN void * priv_context); + +VAPI_ret_t MT_API EVAPI_get_priv_context4cq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq, + OUT void ** priv_context_p); + + +#ifdef __KERNEL__ +/* FMRs are not allowed in user-space */ + +/************************************************************************* + * Function: EVAPI_alloc_fmr + * + * Arguments: + * hca_hndl : HCA Handle. + * fmr_props_p: Pointer to the requested fast memory region properties. + * fmr_hndl_p: Pointer to the fast memory region handle. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_PD_HNDL: invalid PD handle + * VAPI_EINVAL_LEN: invalid length + * VAPI_EINVAL_ACL: invalid ACL specifier (e.g. VAPI_EN_MEMREG_BIND) + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * Allocate a fast memory region resource, to be used with EVAPI_map_fmr/EVAPI_unmap_fmr + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_alloc_fmr( + IN VAPI_hca_hndl_t hca_hndl, + IN EVAPI_fmr_t *fmr_props_p, + OUT EVAPI_fmr_hndl_t *fmr_hndl_p +); + +/************************************************************************* + * Function: EVAPI_map_fmr + * + * Arguments: + * hca_hndl : HCA Handle. + * fmr_hndl: The fast memory region handle. + * map_p: Properties of mapping request + * l_key_p: Allocated L-Key for the new mapping + * (may be different than prev. mapping of the same FMR) + * r_key_p: Allocated R-Key for the new mapping + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources (invoke EVAPI_unmap_fmr for this region and then retry) + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_MR_HNDL: invalid memory region handle (e.g. not a FMR region) + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Map given memory block to this fast memory region resource. + * Upon a return from this function, the new L-key/R-key may be used in regard to CI operations + * over given memory block. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_map_fmr( + IN VAPI_hca_hndl_t hca_hndl, + IN EVAPI_fmr_hndl_t fmr_hndl, + IN EVAPI_fmr_map_t *map_p, + OUT VAPI_lkey_t *l_key_p, + OUT VAPI_rkey_t *r_key_p +); + + +/************************************************************************* + * Function: EVAPI_unmap_fmr + * + * Arguments: + * hca_hndl : HCA Handle. + * num_of_fmrs_to_unmap: Number of memory regions handles in given array + * fmr_hndls_array: Array of num_of_fmrs_to_unmap FMR handles to unmap.(!max limit: 2000 handles) + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_MR_HNDL: invalid memory region handle (e.g. not a FMR region) + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Unmap given FMRs. + * In case of a failure other than VAPI_EINVAL_HCA_HNDL or VAPI_ESYSCALL, + * the state of the FMRs is undefined (some may still be mapped while others umapped). + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_unmap_fmr( + IN VAPI_hca_hndl_t hca_hndl, + IN MT_size_t num_of_fmrs_to_unmap, + IN EVAPI_fmr_hndl_t *fmr_hndls_array +); + +/************************************************************************* + * Function: EVAPI_free_fmr + * + * Arguments: + * hca_hndl : HCA Handle. + * fmr_hndl: The fast memory region handle. + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_MR_HNDL: invalid memory region handle (e.g. not a FMR region, or an u) + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Free given FMR resource. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_free_fmr( + IN VAPI_hca_hndl_t hca_hndl, + IN EVAPI_fmr_hndl_t mr_hndl +); + +#endif /* FMRs in kernel only */ + +/* ************************************************************************* + * Function: EVAPI_post_inline_sr + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_hndl: QP Handle. + * sr_desc_p: Pointer to the send request descriptor attributes structure. + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * VAPI_E2BIG_WR_NUM: Too many posted work requests. + * VAPI_EINVAL_OP: invalid operation + * VAPI_EINVAL_QP_STATE: invalid QP state + * VAPI_EINVAL_NOTIF_TYPE: invalid completion notification type + * VAPI_EINVAL_SG_FMT: invalid scatter/gather list format + * VAPI_EINVAL_SG_NUM: invalid scatter/gather list length + * (too much data for inline send with this QP) + * VAPI_EINVAL_AH: invalid address handle + * VAPI_EPERM: not enough permissions. + * + * Description: + * Post data in given gather list as inline data in a send WQE. + * (Only for Sends and RDMA-writes, with optional immediate) + * + * Note: + * 1) No L-key checks are done. Data is copied to WQE from given virtual address in + * this process memory space. + * 2) Maximum data is limited by maximum WQE size for this QP's + * send queue. Information on this limitation may be queried via VAPI_query_qp + * (property max_inline_data_sq in QP capabilities). + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_post_inline_sr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + IN VAPI_sr_desc_t *sr_desc_p +); + +/* ************************************************************************* + * Function: EVAPI_post_sr_list + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_hndl: QP Handle. + * num_of_requests: Number of send requests in the given array + * sr_desc_array: Pointer to an array of num_of_requests send requests + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * VAPI_E2BIG_WR_NUM: Too many posted work requests. + * VAPI_EINVAL_OP: invalid operation + * VAPI_EINVAL_QP_STATE: invlaid QP state + * VAPI_EINVAL_NOTIF_TYPE: invalid completion notification + * type + * VAPI_EINVAL_SG_FMT: invalid scatter/gather list format + * VAPI_EINVAL_SG_NUM: invalid scatter/gather list length + * VAPI_EINVAL_AH: invalid address handle + * VAPI_EAGAIN: not enough resources to complete operation (not enough WQEs) + * VAPI_EPERM: not enough permissions. + * VAPI_EINVAL_PARAM: num_of_requests is 0 or sr_desc_array is NULL + * + * Description: + * The verb posts num_of_requests send queue work requests, as given in the sr_desc_array + * In case of a failure none of the requests is posted ("all or nothing"). + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_post_sr_list( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + IN u_int32_t num_of_requests, + IN VAPI_sr_desc_t *sr_desc_array + ); + +/* ************************************************************************* + * Function: EVAPI_post_gsi_sr + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_hndl : QP Handle. + * sr_desc_p : Pointer to the send request descriptor attributes structure. + * pkey_index: P-Key index in Pkey table of the port of the QP to put in BTH of sent GMP + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle (or not a GSI QP) + * VAPI_E2BIG_WR_NUM: Too many posted work requests. + * VAPI_EINVAL_OP: invalid operation + * VAPI_EINVAL_QP_STATE: invlaid QP state + * VAPI_EINVAL_SG_NUM: invalid scatter/gather list length + * VAPI_EINVAL_AH: invalid address handle + * VAPI_EPERM: not enough permissions. + * + * Description: + * The verb posts a send queue work request to the given GSI QP, with given P-key index + * used in the GMP's BTH, instead of the QP's P-key. + * This function has identical sematics to VAPI_post_sr, but for the overriden P-Key index. + * Using this function allows one to change P-key used by the given GSI QP without having + * to modify to SQD state first. + * + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_post_gsi_sr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + IN VAPI_sr_desc_t *sr_desc_p, + IN VAPI_pkey_ix_t pkey_index + ); + +/************************************************************************ + * Function: EVAPI_post_rr_list + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_hndl: QP Handle. + * num_of_requests: Number of receive requests in the given array + * rr_desc_array: Pointer to an array of num_of_requests receive requests + * + * returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * VAPI_EINVAL_SRQ_HNDL: QP handle used for a QP associted with a SRQ (use VAPI_post_srq) + * VAPI_E2BIG_WR_NUM: Too many posted work requests. + * VAPI_EINVAL_OP: invalid operation + * VAPI_EINVAL_QP_STATE: invlaid QP state + * VAPI_EINVAL_SG_NUM: invalid scatter/gather list length + * VAPI_EAGAIN: Not enough resources to complete operation (not enough WQEs) + * VAPI_EPERM: not enough permissions. + * VAPI_EINVAL_PARAM: num_of_requests is 0 or rr_desc_array is NULL + * + * Description: + * The verb posts all the given receive requests to the receive queue. + * Given QP must have num_of_requests available WQEs in its receive queue. + * In case of a failure none of the requests is posted ("all or nothing"). + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_post_rr_list( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + IN u_int32_t num_of_requests, + IN VAPI_rr_desc_t *rr_desc_array + ); + + +/* ************************************************************************* + * Function: EVAPI_k_get_qp_hndl + * + * Arguments: + * 1) hca_hndl : HCA Handle. + * 2) qp_ul_hndl: user level QP Handle. + * 3) qp_kl_hndl_p: Pointer to the kernel level handle of the QP requested in argument 2. + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * + * Description: + * Retrieve the kernel level handle of a QP in user level. Should be invoked in user level + * to get a handle to be used by a kernel level code. This handle is valid only for special + * verbs as described below. + * + * Note: + * The kernel QP handle is passed to the kernel module by the application. It should use + * some IOCTL path to this kernel module. + * + *************************************************************************/ + +VAPI_ret_t MT_API EVAPI_k_get_qp_hndl( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ VAPI_qp_hndl_t qp_ul_hndl, + /*OUT*/ VAPI_k_qp_hndl_t *qp_kl_hndl); + + +#ifdef __KERNEL__ + +/* ************************************************************************* + * Function: EVAPI_k_modify_qp + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_kl_hndl: QP kernel level handle. + * qp_attr_p: Pointer to QP attributes to be modified. + * qp_attr_mask_p: Pointer to the attributes mask to be modified. + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * VAPI_EAGAIN: out of resources. + * VAPI_EINVAL_QP_HNDL: invalid QP handle. + * VAPI_ENOSYS_ATTR: QP attribute is not supported. + * VAPI_EINVAL_ATTR: can not change QP attribute. + * VAPI_EINVAL_PKEY_IX: PKey index out of range. + * VAPI_EINVAL_PKEY_TBL_ENTRY: Pkey index points to an invalid entry in pkey table. + * VAPI_EINVAL_QP_STATE: invalid QP state. + * VAPI_EINVAL_RDD_HNDL: invalid RDD domain handle. + * VAPI_EINVAL_MIG_STATE: invalid path migration state. + * VAPI_E2BIG_MTU: MTU exceeds HCA port capabilities + * VAPI_EINVAL_PORT: invalid port + * VAPI_EINVAL_SERVICE_TYPE: invalid service type + * VAPI_E2BIG_WR_NUM: maximum number of WR requested exceeds HCA capabilities + * VAPI_EINVAL_RNR_NAK_TIMER: invalid RNR NAK timer value + * VAPI_EPERM: not enough permissions. + * + * Description: + * Modify QP state of the user level QP by using the kernel level QP handle. + * + * Note: + * Supported only in kernel modules. + * + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_k_modify_qp( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ VAPI_k_qp_hndl_t qp_kl_hndl, + /*IN*/ VAPI_qp_attr_t *qp_attr_p, + /*IN*/ VAPI_qp_attr_mask_t *qp_attr_mask_p +); + +/********************************************************** + * + * Function: EVAPI_k_set_destroy_qp_cbk + * + * Arguments: + * k_hca_hndl : HCA handle + * k_qp_hndl: Kernel level QP handle as known from EVAPI_k_get_qp_hndl() + * cbk_func: Callback function to invoke when the QP is destroyed + * private_data: Caller's context to be used when invoking the callback + * + * Returns: + * VIP_OK + * VIP_EINVAL_HCA_HNDL : No such opened HCA. + * VIP_EINVAL_QP_HNDL : No such QP + * VAPI_EBUSY: A destroy_cq callback is already set for this QP + * + * + * Description: + * Set a callback function that notifies the caller (a kernel module that + * uses EVAPI_k_set_comp_eventh ) when a QP is destroyed. + * The function is meant to be used in order to clean the kernel module's + * context for that resource. + * This callback is implicitly cleared after it is called. + * + * Note: Only a single context in kernel may invoke this function per QP. + * Simultanous invocation by more than one kernel context, + * for the same QP, will result in unexpected behavior. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_k_set_destroy_qp_cbk( + IN VAPI_hca_hndl_t k_hca_hndl, + IN VAPI_k_qp_hndl_t k_qp_hndl, + IN EVAPI_destroy_qp_cbk_t cbk_func, + IN void* private_data +); + +/********************************************************** + * + * Function: EVAPI_k_clear_destroy_qp_cbk + * + * Arguments: + * k_hca_hndl : HCA handle + * k_qp_hndl: Kernel level QP handle as known from EVAPI_k_get_qp_hndl() + * + * Returns: + * VIP_OK + * VIP_EINVAL_HCA_HNDL : No such opened HCA. + * VIP_EINVAL_QP_HNDL : No such QP. + * + * Description: + * Clear the callback function set in EVAPI_k_set_destroy_qp_cbk(). + * Use this function when the kernel module stops using the given k_qp_hndl. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_k_clear_destroy_qp_cbk( + IN VAPI_hca_hndl_t k_hca_hndl, + IN VAPI_k_qp_hndl_t k_qp_hndl +); + +#endif /*__KERNEL__ */ + +/************************************************************************** + * Function: EVAPI_k_sync_qp_state + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_ul_hndl: user level QP Handle. + * curr_state: The state that should be synch to + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * + * Description: + * This function synchronized the user level QP with a QP state which was modified + * by kernel level agent (as returned from the kernel agent via it's IOCTL). + * + * Note: + * Failing to synch the QP state correctly may result in unexpected behavior for + * this QP as well as to other QPs which use the same CQ. There is no need + * to synch upon each QP state change, but application must synch when going back to RESET, + * and for any other state when user level application is going to use that QP in regard + * to that new state (e.g., user level will not allow posting requests to the send + * queue if it was not synch with a transition to RTS state). + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_k_sync_qp_state( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ VAPI_qp_hndl_t qp_ul_hndl, + /*IN*/ VAPI_qp_state_t curr_state +); + +/************************************************************************** + * Function: EVAPI_alloc_map_devmem + * + * Arguments: + * hca_hndl : HCA Handle. + * mem_type: Type of attached device memory + * bsize: Size in bytes of required memory buffer + * align_shift: log2 of alignment requirement + * note: in DDR: chunk should be aligned to its size + * buf_p: Returned physical address of allocated buffer + * virt_addr_p: pointer to virt. adrs mapped to phys adrs (if not NULL, io_remap is done). + * dm_hndl: device memory handle + + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_ENOSYS: Given memory type is not supported in given HCA device + * VAPI_EAGAIN: Not enough resources (memory) to satisfy request + * VAPI_EINVAL_PARAM: Invalid memory type or invalid alignment requirement + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Allocate (and map) device attached memory resources (e.g. in InfiniHost's: attached DDR-SDRAM) + * + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_alloc_map_devmem( + VAPI_hca_hndl_t hca_hndl, + EVAPI_devmem_type_t mem_type, + VAPI_size_t bsize, + u_int8_t align_shift, + VAPI_phy_addr_t* buf_p, + void** virt_addr_p, + VAPI_devmem_hndl_t* dm_hndl + +); + +/************************************************************************** + * Function: EVAPI_query_devmem + * + * Arguments: + * hca_hndl : HCA Handle. + * mem_type: Type of attached device memory + * align shift + * devmem_info_p: pointer to info structure + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_ENOSYS: Given memory type is not supported in given HCA device + * VAPI_EINVAL_PARAM: Invalid memory type or invalid alignment requirement + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * query device attached memory resources (e.g. in InfiniHost's attached DDR-SDRAM) + * + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_query_devmem( + /* IN */ VAPI_hca_hndl_t hca_hndl, + /* IN */ EVAPI_devmem_type_t mem_type, + u_int8_t align_shift, + /* OUT */ EVAPI_devmem_info_t *devmem_info_p); + + +/************************************************************************** + * Function: EVAPI_free_unmap_devmem + * + * Arguments: + * hca_hndl HCA Handle. + * dm_hndl: device memory handle + * + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_ENOSYS: Given memory type is not supported in given HCA device + * VAPI_EINVAL_PARAM: Invalid memory type or invalid alignment requirement + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Free device attached memory buffer allocated with EVAPI_alloc_devmem. + * + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_free_unmap_devmem( + VAPI_hca_hndl_t hca_hndl, + VAPI_devmem_hndl_t dm_hndl +); + + +/************************************************************************* + * Function: EVAPI_alloc_pd + * + * Arguments: + * hca_hndl: Handle to HCA. + * max_num_avs: max number of AVs which can be allocated for this PD + * pd_hndl_p: Pointer to Handle to Protection Domain object. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EAGAIN: not enough resources. + * VAPI_EINVAL_PARAM : invalid parameter + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * + * Description: + * + * This call is identical to VAPI_alloc_pd, except that the caller may specify the max + * number of AVs which will be allocatable for this PD. If the system default value is + * desired, the caller can specify EVAPI_DEFAULT_AVS_PER_PD for the requested_num_avs. + * + * Note that max_num_avs may not be zero. Furthermore, the minimum number of AVs allocated is + * 2, so if you ask for only 1 AV as the max, the actual maximum will be 2. For all other values + * (up to the maximum supported by the channel adapter) the maximum avs requested will be the + * maximum avs obtained. + * + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_alloc_pd( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ u_int32_t max_num_avs, + /*OUT*/ VAPI_pd_hndl_t *pd_hndl_p + ); + +/************************************************************************* + * Function: EVAPI_alloc_pd_sqp + * + * Arguments: + * hca_hndl: Handle to HCA. + * max_num_avs: max number of AVs which can be allocated for this PD + * pd_hndl_p: Pointer to Handle to Protection Domain object. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EAGAIN: not enough resources. + * VAPI_EINVAL_PARAM : invalid parameter + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * + * This call is identical to EVAPI_alloc_pd, except that it should be used. + * when allocating a protection domain for a special QP. If the caller wishes + * that the default number of AVs be used for this PD, use EVAPI_DEFAULT_AVS_PER_PD + * for the max_num_avs parameter. Using this function is highly recommended in order + * to prevent reads from DDR and to enhance performance of special QPs + * + * Note that max_num_avs may not be zero. + * + *************************************************************************/ +VAPI_ret_t MT_API EVAPI_alloc_pd_sqp( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ u_int32_t max_num_avs, + /*OUT*/ VAPI_pd_hndl_t *pd_hndl_p + ); +/********************************************************** + * + * Function: EVAPI_open_hca + * + * Arguments: + * hca_id : HCA ID to open + * profile_p : pointer to desired profile + * sugg_profile_p : pointer to returned actual values, or suggested values when + * a failure is due to parameter values that are too large. + * + * Returns: + * VAPI_OK + * VAPI_EBUSY : This HCA has already been opened. + * VAPI_EINVAL_PARAM : profile does not pass sanity checks + * VAPI_EINVAL_HCA_ID : No such HCA. + * VAPI_EAGAIN : Max number of supported HCAs on this host already open. + * VAPI_ENOMEM: some of the parameter values provided in the profile are too large + * in the case where the 'require' flag in the profile is set to TRUE. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (open device file, or ioctl) + * has returned an error. + * + * Description: + * Opens a registered HCA using the given profile. If profile is NULL, the default + * (internal, compiled) profile is used. If the sugg_profile_p is NULL, no profile data + * is returned. + * + * If sugg_profile_p is non-NULL: + * If the open succeeds, the profile data with which the HCA was opened is returned. + * If a profile structure was provided, and the require flag was false, these values + * may be smaller than the ones provided in the profile. + * + * If the open fails with VAPI_ENOMEM, a suggested set of values is returned sugg_profile_p. + * Otherwise the values given in profile_p (which may not be valid) are returned. + * + * In all cases, the returned value of the require flag is the value that was passed in + * profile_p. + * + * 'require' flag in the EVAPI_hca_profile_t structure: + * if this flag is set to FALSE, and the profile passes sanity checks, and the given + * parameter values use too many Tavor resources, the driver will attempt to reduce the + * given values until a set is found which does meet Tavor resource requirements. The HCA + * will be opened using thes reduced set of values. The reduced set of values is returned + * in the sugg_profile_p structure, if it is non-NULL. + * + * If the 'require' flag is set to TRUE, and the profile passes sanity checks, and the + * given set of parameter values use too many Tavor resources, this function will return + * VAPI_ENOMEM. If sugg_profile_p is non-NULL, a set of values will be returned in that + * structure which can be used to successfully open the HCA. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_open_hca(/*IN*/ VAPI_hca_id_t hca_id, + /*IN*/ EVAPI_hca_profile_t *profile_p, + /*OUT*/ EVAPI_hca_profile_t *sugg_profile_p + ); + +/********************************************************** + * + * Function: EVAPI_close_hca + * + * Arguments: + * hca_id : HCA ID to close + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_ID : No such HCA. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * closes an open HCA. This procedure is meant for administrative use only. + * User Level Apps should use EVAPI_release_hca_handle. + * + **********************************************************/ +VAPI_ret_t MT_API EVAPI_close_hca( + /*IN*/ VAPI_hca_id_t hca_id + ); + + +/*********************************************************************** + * Asynchronous events functions + ***********************************************************************/ + +/************************************************************************* + * Function: EVAPI_set_async_event_handler + * + * Arguments: + * hca_hndl: HCA Handle + * handler: Async Event Handler function address. + * private_data: Pointer to handler context (handler specific). + * async_handler_hndl: The handle to the registered handler function. Used in the clear function. + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_PARAM: handler given is NULL + * VAPI_EAGAIN: No enough system resources (e.g. memory) + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * + * Registers an asynch event handler to get all asynch events for this process. (affiliated and non-affiliated) + * Notes: + * 1. All non affiliated events are being "broadcast" to all registered handlers. + * 2. Affiliated events will be send only to the callback of the process that the resources (e.g. QP) are belong to. + * However, all kernel modules are treated as one process thus all of them will get all affiliated events of all + * kernel modules. + * 3. Multiple registration of handlers is allowed. + * + * The event handler function prototype is as follows: + * + * void + * VAPI_async_event_handler + * ( + * IN VAPI_hca_hndl_t hca_hndl, + * IN VAPI_event_record_t *event_record_p, + * IN void *private_data + * ) + * + * + * + * + *************************************************************************/ + +VAPI_ret_t MT_API EVAPI_set_async_event_handler( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_async_event_handler_t handler, + IN void* private_data, + OUT EVAPI_async_handler_hndl_t *async_handler_hndl_p + ); + + +/********************************************************** + * + * Function: EVAPI_clear_async_event_handler + * + * Arguments: + * hca_hndl: HCA Handle + * async_handler_hndl: The handle to the registered handler function. + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_PARAM: invalid async_handler_hndl + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Function which clears a previously registered by EVAPI_set_async_event_handler. + * This function must be called before calling to EVAPI_release_hca_hndl + * + ***********************************************************/ + + +VAPI_ret_t MT_API EVAPI_clear_async_event_handler( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ EVAPI_async_handler_hndl_t async_handler_hndl); + + + + +/********************************************************** + * + * Function: EVAPI_register_thread + * + * Arguments: + * + * Returns: + * VIP_OK success + * VIP_EAGAIN not enough resources + * VIP_ERROR error registering signals + * + * Description: + * This function register the thread in a data structure so that + * when forking all these threads are stopped until all fork + * support actions are carried out. + * + ***********************************************************/ +VAPI_ret_t EVAPI_register_thread(void); + + +/********************************************************** + * + * Function: EVAPI_deregister_thread + * + * Arguments: + * pid(in) The pid of the thread to be registered + * + * Returns: + * VIP_OK pid was succesfully deregistered + * VIP_EINVAL_PARAM pid not found + * + ***********************************************************/ +VAPI_ret_t EVAPI_deregister_thread(void); + +#if defined(MT_SUSPEND_QP) +/* ************************************************************************* + * Function: EVAPI_suspend_qp + * + * Arguments: + * 1) hca_hndl : HCA Handle. + * 2) qp_ul_hndl: user level QP Handle. + * 3) suspend_flag: TRUE--suspend, FALSE--unsuspend + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * VAPI_EINVAL_QP_STATE: QP is in RESET or INIT state for suspend, + * not in SUSPEND state for unsuspend + * VAPI_EAGAIN: QP is currently executing another command (busy) + * VAPI_ENOSYS: Operation not supported on channel adapter + * VAPI_EINTR Could not grab qp-modification mutex + * VAPI_EGEN: operation failed (internal error) + * + * Description: + * suspend_flag = TRUE: + * Suspends operation of the given QP (i.e., places it in the SUSPENDED state). + * The operation is valid only if the QP is currently NOT in RESET state, + * and NOT in INIT state. + * + * suspend_flag = FALSE: + * Transitions the given QP from the SUSPENDED to the state it was in + * when suspended. + * + *************************************************************************/ + +VAPI_ret_t EVAPI_suspend_qp( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ VAPI_qp_hndl_t qp_ul_hndl, + /*IN*/ MT_bool suspend_flag); + +/* ************************************************************************* + * Function: EVAPI_suspend_CQ + * + * Arguments: + * 1) hca_hndl : HCA Handle. + * 2) qp_ul_hndl: user level CQ Handle. + * 3) do_suspend: TRUE--suspend, FALSE--unsuspend + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid QP handle + * VAPI_EAGAIN: QP is currently executing another command (busy) + * VAPI_EGEN: operation failed (internal error) + * + * Description: + * do_suspend = TRUE: + * releases locking of CQ cookies region. + * + * do_suspend = FALSE: + * Locks the CQE internal mr again. + * + * NOTE: NO safety checks are performed. If there is an unsuspended QP which is + * currently using the cq, the results are not predictable (but will NOT be + * good). + * + *************************************************************************/ + +VAPI_ret_t EVAPI_suspend_cq( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ VAPI_cq_hndl_t cq_ul_hndl, + /*IN*/ MT_bool do_suspend); +#endif + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi.h new file mode 100644 index 00000000..b9dff2e5 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi.h @@ -0,0 +1,2191 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_VAPI_H +#define H_VAPI_H + +#include +#include +#include +#include + + +/******************************************************************************************** + * VAPI Calls Declarations + * + * + ********************************************************************************************/ + +/******************************************* + * 11.2.1 HCA + * + *******************************************/ +/************************************************************************* + * Function: VAPI_open_hca + * + * Arguments: + * hca_id: HCA identifier. + * hca_hndl_p: Pointer to the HCA object handle. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: not enough resources. + * VAPI_EINVAL_HCA_ID: invalid HCA identifier. + * VAPI_EBUSY: HCA already in use + * VAPI_ESYSCALL: A procedure call to the underlying O/S (open device file, or ioctl) + * has returned an error. + * + * + * + * Description: + * + * Creates a new HCA Object. + * + * The creation of an HCA Object will call HH_get_dev_prop in order to find out the + * device capabilities and so allocate enough resource. + * + * After the resource allocation is completed a call to HH_open_hca will be made in order + * to prepare the device for consumer use. + * This call will also create and CIO (Channel Interface Object) which is a container for + * any object related to the opened HCA. + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_open_hca( + IN VAPI_hca_id_t hca_id, + OUT VAPI_hca_hndl_t *hca_hndl_p + ); + + +/************************************************************************* + * Function: VAPI_query_hca_cap + * + * Arguments: + * hca_hndl: HCA object handle. + * hca_vendor_p: Pointer to HCA vendor specific information object. + * hca_cap_p: Pointer to HCA capabilities object + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EAGAIN: not enough resources. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Query HCA capabilities retrieves a structure of type VAPI_hca_vendor_t providing a + * list of the vendor specific information about the HCA, and a structure of type + * VAPI_hca_cap_t providing a detailed list of the HCA capabilities. Further informtion + * on the hca ports can be retrieved using the verb VAPI_query_hca_port_prop and + * VAPI_query_hca_port_tbl. + * + * + * This used to be AV, but it should be Add Handl + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_hca_cap( + IN VAPI_hca_hndl_t hca_hndl, + OUT VAPI_hca_vendor_t *hca_vendor_p, + OUT VAPI_hca_cap_t *hca_cap_p + ); + + +/************************************************************************* + * Function: VAPI_query_hca_port_prop + * + * Arguments: + * hca_hndl: HCA object handle. + * port_num: Port number + * hca_port_p: HCA port object describing the port properties. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_PORT: invalid port number + * VAPI_EAGAIN: not enough resources. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Query HCA port properties retrieves a structure of type VAPI_hca_port_t for the port + * specified in port_num. The number of the HCA physical ports ca be obtained using the + * verb VAPI_query_hca_cap. Further information about the port p-key table and gid + * table can be obtained using the verb VAPI_query_hca_port_tbl. + * + * Upon successful completion, the verb returns in hca_port_p structure of type + * VAPI_hca_port_t, which is described in the following table: + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_hca_port_prop( + IN VAPI_hca_hndl_t hca_hndl, + IN IB_port_t port_num, + OUT VAPI_hca_port_t *hca_port_p /* set to NULL if not interested */ + ); + + +/************************************************************************* + * Function: VAPI_query_hca_gid_tbl + * + * Arguments: + * hca_hndl: HCA object handle. + * port_num: Port number + * tbl_len_in: Number of entries in given gid_tbl_p buffer. + * tbl_len_out: Actual number of entries in this port GID table + * gid_tbl_p: The GID table buffer to return result in. + * + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_PORT: invalid port number + * VAPI_EAGAIN: tbl_len_out > tbl_len_in. + * VAPI_EINVAL_PARAM: .invalid port number. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * GID table of given port in returned in gid_tbl_p. + * If tbl_len_out (actual number of entries) is more than tbl_len_in, the function should be + * called again with a larger buffer. + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_hca_gid_tbl( + IN VAPI_hca_hndl_t hca_hndl, + IN IB_port_t port_num, + IN u_int16_t tbl_len_in, + OUT u_int16_t *tbl_len_out, + OUT IB_gid_t *gid_tbl_p + ); + + +/************************************************************************* + * Function: VAPI_query_hca_pkey_tbl + * + * Arguments: + * hca_hndl: HCA object handle. + * port_num: Port number + * tbl_len_in: Number of entries in given pkey_tbl_p buffer. + * tbl_len_out: Actual number of entries in this port PKEY table + * pkey_tbl_p: The PKEY table buffer to return result in. + * + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_PORT: invalid port number + * VAPI_EAGAIN: tbl_len_out > tbl_len_in. + * VAPI_EINVAL_PARAM: .invalid port number. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * * Description: + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_hca_pkey_tbl( + IN VAPI_hca_hndl_t hca_hndl, + IN IB_port_t port_num, + IN u_int16_t tbl_len_in, + OUT u_int16_t *tbl_len_out, + OUT VAPI_pkey_t *pkey_tbl_p + ); + + +/************************************************************************* + * Function: VAPI_modify_hca_attr + * + * Arguments: + * hca_hndl: Handle to HCA. + * port_num: Port number + * hca_attr_p: Pointer to the HCA attributes structure + * hca_attr_mask_p: Pointer to the HCA attributes mask + * + * + * Returns: VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_PORT: Invalid port number + * VAPI_EAGAIN: failed on resource allocation + * VAPI_EGEN: function called not from user level context + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * Description: + * + * Sets the HCA attributes specified in hca_attr_p to port number port_num. Only the + * values specified in hca_attr_mask_p are being modified. hca_attr_p is a pointer to a + * structure of type VAPI_hca_attr_t, which is specified in the following table: + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_modify_hca_attr( + IN VAPI_hca_hndl_t hca_hndl, + IN IB_port_t port_num, + IN VAPI_hca_attr_t *hca_attr_p, + IN VAPI_hca_attr_mask_t *hca_attr_mask_p + ); + + +/************************************************************************* + * Function: VAPI_close_hca + * + * Arguments: + * hca_hndl: Handle to HCA. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * This call will deallocate all the structures allocated during the called to + * VAPI_open_hca and any other resource in the domain of the CI. + * + * It is the responsibility of the consumers to free resources allocated for the HCA that are + * under its scope. + * + * VAPI will call HH_VClose_HCA in order to instruct the device to stop processing new + * requests and close in-process ones. This will be done before releasing any resource + * belonging to the CI. + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_close_hca( + IN VAPI_hca_hndl_t hca_hndl + ); + + +/* Protection Domain Verbs */ + +/************************************************************************* + * Function: VAPI_alloc_pd + * + * Arguments: + * hca_hndl: Handle to HCA. + * pd_hndl_p: Pointer to Handle to Protection Domain object. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EAGAIN: not enough resources. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * + * Description: + * + * This call register a new protection domain by calling VIP_Register_PD. Into the VIP + * layer it is the responsibility of the PDA to keep track of the different Protection + * Domains and the object associated to it. + * + * After registering the new allocated PD in the PDA. The VIP will call HH_register_PD. + * Some HCA HW implementation do not keep track of any Protection Domain Object + * internally turning the call to HH_register_PD into a dummy call. + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_alloc_pd( + IN VAPI_hca_hndl_t hca_hndl, + OUT VAPI_pd_hndl_t *pd_hndl_p + ); + + +/************************************************************************* + * Function: VAPI_dealloc_pd + * + * Arguments: + * hca_hndl: Handle to HCA. + * pd_hndl: Handle to Protection Domain Object. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: Invalid HCA handle. + * VAPI_EINVAL_PD_HNDL: Invalid Protection Domain + * VAPI_EBUSY: Protection Domain in use. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Deregister the Protection Domain from the PDA. The PDA is responsible to validate + * that there are no objects associated to the Protection Domain being deallocated. + * + * After deregistering the allocated PD from the PDA the VIP will call + * HH_deregister_PD. Some HCA HW implementation do not keep track of any Protec- + * tion Domain Object internally turning the call to HH_deregister_PD into a dummy call. + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_dealloc_pd( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_pd_hndl_t pd_hndl + ); + + +/* RD Are not supported at this rev */ +#if 0 + +/* Reliable Datagram Domain Verbs */ + +/************************************************************************* + * Function: VAPI_alloc_rdd + * + * Arguments: + * hca_hndl: HCA Handle. + * rdd_hndl_p: Pointer to Reliable Datagram Domain object handle. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EAGAIN: out of resources. + * VAPI_EINVAL_RD_UNSUPPORTED: RD is not supported + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: + * + * Allocates an RD domain + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_alloc_rdd ( + IN VAPI_hca_hndl_t hca_hndl, + OUT VAPI_rdd_hndl_t *rdd_hndl_p + ); + + +/************************************************************************* + * Function: VAPI_dealloc_rdd + * + * Arguments: + * hca_hndl: HCA Handle. + * rdd_hndl : Reliable Datagram Domain object handle. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EAGAIN: out of resources. + * VAPI_EINVAL_RD_UNSUPPORTED: RD is not supported + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: + * + * DeAllocates an RD domain + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_dealloc_rdd( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_rdd_hndl_t rdd_hndl + ); +#endif + +/******************************************* + * 11.2.2 Address Management Verbs + * + *******************************************/ + /************************************************************************* + * Function: VAPI_create_addr_hndl + * + * Arguments: + * hca_hndl: Handle to HCA. + * pd_hndl: Protection domain handle + * av_p: Pointer to Address Vector structure. + * av_hndl_p: Handle of Address Vector. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: Invalid HCA handle. + * VAPI_EINVAL_PD_HNDL: Invalid Protection Domain handle. + * VAPI_EAGAIN: Not enough resources. + * VAPI_EPERM: Not enough permissions. + * VAPI_EINVAL_PARAM: Invalid parameter + * VAPI_EINVAL_PORT: Invalid port number + * + * + * + * Description: + * + * Creates a new Address Vector Handle that can be used later when posting a WR to a + * UD QP. + * The AVL (Address Vector Library) does the accounting of the different Address Vec- + * tor the user creates, and responses to the JOD when the last posts descriptors to the + * device. The fields of the address vector are specified in the following table: + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_create_addr_hndl( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_pd_hndl_t pd_hndl, + IN VAPI_ud_av_t *av_p, + OUT VAPI_ud_av_hndl_t *av_hndl_p + ); + + +/************************************************************************* + * Function: VAPI_modify_addr_hndl + * + * Arguments: + * hca_hndl: Handle to HCA. + * av_hndl : Handle of Address Vector Handle + * av_p: Pointer to Address Vector structure. + * + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_AV_HNDL: invalid Address Vector handle. + * VAPI_EPERM: not enough permissions. + * VAPI_EINVAL_PORT: Invalid port number + * + * + * + * + * Description: + * + * Modify existent address vector handle to a new address vector. For address vector + * fields, refer to Table 6, “VAPI_av_t,” on page 20. + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_modify_addr_hndl( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_ud_av_hndl_t av_hndl, + IN VAPI_ud_av_t *av_p + ); + + +/************************************************************************* + * Function: VAPI_query_addr_hndl + * + * Arguments: + * hca_hndl: Handle to HCA. + * av_hndl : Handle of Address Vector + * av_p: Pointer to Address Vector structure. + * + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_AV_HNDL: invalid address vector handle. + * VAPI_EPERM: not enough permission. + * + * + * + * Description: + * + * Returns pointer to ADDR_VECP with information of the UD Address Vector repre- + * sented by AddrVecHandle. For address vector fields, refer to Table 6, “VAPI_av_t,” on + * page 20. + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_addr_hndl( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_ud_av_hndl_t av_hndl, + OUT VAPI_ud_av_t *av_p + ); + + +/************************************************************************* + * Function: VAPI_destroy_addr_hndl + * + * Arguments: + * hca_hndl: Handle to HCA. + * av_hndl : Handle of Address Vector + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_AV_HNDL: invalid address vector handle. + * VAPI_EPERM: not enough permission. + * + * + * + * Description: + * + * Removes address handle. + *************************************************************************/ +VAPI_ret_t MT_API VAPI_destroy_addr_hndl( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_ud_av_hndl_t av_hndl + ); + + +/******************************************* + * 11.2.3 Queue Pair + * + *******************************************/ + + /************************************************************************* + * Function: VAPI_create_qp + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_init_attr_p: Pointer to QP attribute to used for initialization. + * qp_hndl_p: Pointer to returned QP Handle number. + * qp_prop_p: Pointer to properties of created QP. + * + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: not enough resources. + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle. + * VAPI_E2BIG_WR_NUM: number of WR exceeds HCA cap. + * VAPI_E2BIG_SG_NUM: number of SG exceeds HCA cap. + * VAPI_EINVAL_PD_HNDL: invalid protection domain handle. + * VAPI_EINVAL_SERVICE_TYPE: invalid service type for this QP. + * VAPI_EINVAL_RDD: Invalid RD domain handle. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * Create a QP resource (in the reset state). + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_create_qp( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_init_attr_t *qp_init_attr_p, + OUT VAPI_qp_hndl_t *qp_hndl_p, + OUT VAPI_qp_prop_t *qp_prop_p + ); + + /************************************************************************* + * Function: VAPI_create_qp_ext + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_init_attr_p: Pointer to QP attribute to used for initialization. + * qp_ext_attr_p: Extended QP attributes (take care to init. with VAPI_QP_INIT_ATTR_EXT_T_INIT) + * qp_hndl_p: Pointer to returned QP Handle number. + * qp_prop_p: Pointer to properties of created QP. + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: not enough resources. + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle. + * VAPI_EINVAL_SRQ_HNDL: Given SRQ handle does not exist (when srq_handle!=VAPI_SRQ_INVAL_HNDL) + * VAPI_EINVAL_PD_HNDL: invalid protection domain handle + * OR (When SRQ is associated with this QP and HCA requires SRQ's PD to be as QP's) + * Given PD is different than associated SRQ's. + * VAPI_E2BIG_WR_NUM: number of WR exceeds HCA cap. + * VAPI_E2BIG_SG_NUM: number of SG exceeds HCA cap. + * VAPI_EINVAL_SERVICE_TYPE: invalid service type for this QP. + * VAPI_EINVAL_RDD: Invalid RD domain handle. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Create a QP resource in the reset state - extended version. + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_create_qp_ext( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_init_attr_t *qp_init_attr_p, + IN VAPI_qp_init_attr_ext_t *qp_ext_attr_p, + OUT VAPI_qp_hndl_t *qp_hndl_p, + OUT VAPI_qp_prop_t *qp_prop_p + ); + + +/************************************************************************* + * Function: VAPI_modify_qp + * + * Arguments: + * hca_hndl: HCA handle. + * qp_hndl: QP handle + * qp_attr_p: Pointer to QP attributes to be modified. + * qp_attr_mask_p: Pointer to the attributes mask to be modified. + * qp_cap_p: Pointer to QP actual capabilities returned. + * + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources. + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_QP_HNDL: invalid QP handle. + * VAPI_ENOSYS_ATTR: QP attribute is not supported. + * VAPI_EINVAL_ATTR: can not change QP attribute. + * VAPI_EINVAL_PKEY_IX: PKey index out of range. + * VAPI_EINVAL_PKEY_TBL_ENTRY: Pkey index points to an invalid entry in pkey table. + * VAPI_EINVAL_QP_STATE: invalid QP state. + * VAPI_EINVAL_RDD_HNDL: invalid RDD domain handle. + * VAPI_EINVAL_MIG_STATE: invalid path migration state. + * VAPI_EINVAL_MTU: MTU exceeds HCA port capabilities + * VAPI_EINVAL_PORT: invalid port + * VAPI_EINVAL_SERVICE_TYPE: invalid service type + * VAPI_E2BIG_WR_NUM: maximum number of WR requested exceeds HCA capabilities + * VAPI_EINVAL_RNR_NAK_TIMER: invalid RNR NAK timer value + * VAPI_EINVAL_LOCAL_ACK_TIMEOUT: invalid Local ACK timeout value (either primary or alt) + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Modify the QP attributes and transition into a new state. Note that only a subset of all + * the attributes can be modified in during a certain transition into a QP state. The + * qp_attr_mask_p specifies the actual attributes to be modified. + * + * The QP attributes specified are of type VAPI_qp_attr_t and are specified in the follow- + * ing table: + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_modify_qp( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + IN VAPI_qp_attr_t *qp_attr_p, + IN VAPI_qp_attr_mask_t *qp_attr_mask_p, + OUT VAPI_qp_cap_t *qp_cap_p + ); + + +/************************************************************************* + * Function: VAPI_query_qp + * + * Arguments: + * hca_hndl: HCA Handle. + * qp_hndl: QP Handle. + * qp_attr_p: Pointer to QP attributes. + * qp_attr_mask_p: Pointer to QP attributes mask. + * qp_init_attr_p: Pointer to init attributes + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_QP_HNDL: invalid QP handle. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * + * Returns a VAPI_qp_attr_t structure to the application with all the relevant information + * that applies to the QP matching qp_hndl. + * Note that only the relevant fields in qp_attr_p and qp_init_attr_p are valid. The valid + * fields in qp_attr_p are marked in the mask returned by qp_attr_mask_p. + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_qp( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + OUT VAPI_qp_attr_t *qp_attr_p, + OUT VAPI_qp_attr_mask_t *qp_attr_mask_p, + OUT VAPI_qp_init_attr_t *qp_init_attr_p + ); + +/************************************************************************* + * Function: VAPI_query_qp_ext + * + * Arguments: + * hca_hndl: HCA Handle. + * qp_hndl: QP Handle. + * qp_attr_p: Pointer to QP attributes. + * qp_attr_mask_p: Pointer to QP attributes mask. + * qp_init_attr_p: Pointer to init attributes + * qp_init_attr_ext_p: Pointer to extended init attributes + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_QP_HNDL: invalid QP handle. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: + * Same as VAPI_query_qp() but includes extended init. attributes. + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_qp_ext( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + OUT VAPI_qp_attr_t *qp_attr_p, + OUT VAPI_qp_attr_mask_t *qp_attr_mask_p, + OUT VAPI_qp_init_attr_t *qp_init_attr_p, + OUT VAPI_qp_init_attr_ext_t *qp_init_attr_ext_p + ); + +/************************************************************************* + * Function: VAPI_destroy_qp + * + * Arguments: + * hca_hndl: HCA Handle. + * qp_hndl: QP Handle. + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_QP_HNDL: invalid QP handle. + * VAPI_EPERM: not enough permissions. + * VAPI_EBUSY: QP is in use + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * releases all resources allocated by the CI to the qp + + *************************************************************************/ + VAPI_ret_t MT_API VAPI_destroy_qp( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl + ); + + +/************************************************************************* + * Function: VAPI_get_special_qp + * + * Arguments: + * hca_hndl: HCA Handle. + * port: Physical port (valid only for QP0 and QP1 + * qp: the qp type + * qp_init_attr_p: pointer to init attribute struct. + * qp_hndl: QP Handle. + * qp_cap_p: pointer to qp capabilities struct + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_QP_HNDL: invalid QP handle. + * VAPI_EINVAL_PORT: invalid port + * VAPI_EINVAL_PD_HNDL: invalid PD + * VAPI_EINVAL_CQ_HNDL: invalid CQ + * VAPI_EPERM: not enough permissions. + * VAPI_EAGAIN: not enough resources + * VAPI_EGEN: general error + * VAPI_EINVAL_PARAM : invalid parameter + * VAPI_EBUSY: resource is busy/in-use + * VAPI_ENOSYS: not supported (legacy mode only) + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * Description: this call creates a special qp that can generate MADs, + * RAW IPV6 or ethertype packets + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_get_special_qp( + IN VAPI_hca_hndl_t hca_hndl, + IN IB_port_t port, + IN VAPI_special_qp_t qp, + IN VAPI_qp_init_attr_t *qp_init_attr_p, + OUT VAPI_qp_hndl_t *qp_hndl_p, + OUT VAPI_qp_cap_t *qp_cap_p + ); + + + +/******************************************* + * Shared Receive Queue (SRQ) + * + *******************************************/ +/************************************************************************* + * Function: VAPI_create_srq + * + * Arguments: + * hca_hndl : HCA Handle. + * srq_attr_p : Requested SRQ's attributes + * srq_hndl_p : Returned SRQ handle + * actual_attr_p : Actual SRQ attributes + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_PD_HNDL: invalid protection domain handle. + * VAPI_E2BIG_WR_NUM: max_outs_wr exceeds HCA cap. + * VAPI_E2BIG_SG_NUM: max_sentries exceeds HCA cap. + * VAPI_EAGAIN: not enough resources. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: Kernel trap (IOCTL/system-call) failure. + * VAPI_ENOSYS: HCA does not support SRQs + * + * Description: + * Create a shared RQ with given attributes. + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_create_srq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_srq_attr_t *srq_props_p, + OUT VAPI_srq_hndl_t *srq_hndl_p, + OUT VAPI_srq_attr_t *actual_srq_props_p + ); + + +/************************************************************************* + * Function: VAPI_query_srq + * + * Arguments: + * hca_hndl : HCA Handle. + * srq_hndl : SRQ to query + * srq_attr_p : Returned SRQ attributes + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_SRQ_HNDL: invalid SRQ handle. + * VAPI_ESRQ: SRQ in error state + * VAPI_ESYSCALL: Kernel trap (IOCTL/system-call) failure. + * VAPI_ENOSYS: HCA does not support SRQs + * + * Description: + * Query a shared RQ. + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_srq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_srq_hndl_t srq_hndl, + OUT VAPI_srq_attr_t *srq_attr_p + ); + + +/************************************************************************* + * Function: VAPI_modify_srq + * + * Arguments: + * hca_hndl : HCA Handle. + * srq_hndl : SRQ to modify + * srq_attr_p : Requested SRQ's new attributes + * srq_attr_mask : Mask of valid attributes in *srq_attr_p (attr. to modify) + * actual_attr_p : Actual SRQ attributes + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_SRQ_HNDL: invalid SRQ handle. + * VAPI_E2BIG_WR_NUM: max_outs_wr exceeds HCA cap. + * OR smaller than number of currently outstanding WQEs + * VAPI_E2BIG_SRQ_LIMIT : Requested SRQ limit is larger than actual new size + * VAPI_ESRQ: SRQ in error state + * VAPI_EAGAIN: not enough resources. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: Kernel trap (IOCTL/system-call) failure. + * VAPI_ENOSYS: HCA does not support requested SRQ modifications. + * + * Description: + * Modify a shared RQ with given attributes. + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_modify_srq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_srq_hndl_t srq_hndl, + IN VAPI_srq_attr_t *srq_attr_p, + IN VAPI_srq_attr_mask_t srq_attr_mask, + OUT u_int32_t *max_outs_wr_p + ); + + +/************************************************************************* + * Function: VAPI_destroy_srq + * + * Arguments: + * hca_hndl : HCA Handle. + * srq_hndl : SRQ to destroy + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle. + * VAPI_EINVAL_SRQ_HNDL: invalid SRQ handle. + * VAPI_EBUSY: SRQ still has QPs associated with it. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: Kernel trap (IOCTL/system-call) failure. + * VAPI_ENOSYS: HCA does not support SRQs + * + * Description: + * Destroy a shared RQ. + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_destroy_srq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_srq_hndl_t srq_hndl + ); + + +/************************************************************************ + * Function: VAPI_post_srq + * + * Arguments: + * hca_hndl : HCA Handle. + * srq_hndl : SRQ Handle. + * rwqe_num : Number of posted receive WQEs + * rwqe_array: Pointer to an array of rwqe_num receive work requests. + * rwqe_posted_p: Returned actual number of posted WQEs. + * + * returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_SRQ_HNDL: invalid SRQ handle + * VAPI_E2BIG_WR_NUM: Too many posted work requests. + * VAPI_EINVAL_SG_NUM: invalid scatter list length + * VAPI_EINVAL_OP: invalid operation + * VAPI_EPERM: not enough permissions. + * + * Description: + * Post a receive request descriptor to the shared receive queue. + * An error refers only to the first WQE that was not posted (index *rwqe_posted_p). + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_post_srq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_srq_hndl_t srq_hndl, + IN u_int32_t rwqe_num, + IN VAPI_rr_desc_t *rwqe_array, + OUT u_int32_t *rwqe_posted_p); + + + + +/******************************************* + * 11.2.5 Compeletion Queue + * + *******************************************/ +/************************************************************************* + * Function: VAPI_create_cq + * + * Arguments: + * hca_hndl: HCA Handle. + * cqe_num: Minimum required number of entries in CQ. + * cq_hndl_p: Pointer to the created CQ handle + * num_of_entries_p: Actual number of entries in CQ + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_E2BIG_CQ_NUM: number of entries in CQ exceeds HCA + * capabilities + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Allocate the required data structures for the administration of a completion queue + * including completion queue buffer space which has to be large enough to be adequate to + * the maximum number of entries in the completion. + * + * Completion queue entries are accessed directly by the application. + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_create_cq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cqe_num_t cqe_num, + OUT VAPI_cq_hndl_t *cq_hndl_p, + OUT VAPI_cqe_num_t *num_of_entries_p + ); + + +/************************************************************************* + * Function: VAPI_query_cq + * + * Arguments: + * hca_hndl: HCA handle. + * cq_hndl: Completion Queue Handle. + * num_of_entries_p: Pointer to actual number of entries in CQ. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle. + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Retrieves the number of entries in the CQ. + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_cq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + OUT VAPI_cqe_num_t *num_of_entries_p + ); + + +/************************************************************************* + * Function: VAPI_resize_cq + * + * Arguments: + * hca_hndl: HCA Handle. + * cq_hndl: CQ Handle. + * cqe_num: Minimum entries required in resized CQ. + * num_of_entries_p: Pointer to actual number of entries in resized CQ. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle or the given CQ is in invalid state to resize (CQ error). + * VAPI_E2BIG_CQ_NUM: number of entries in CQ exceeds HCA + * capabilities or number of currently outstanding entries + * in CQ exceeds required size. + * VAPI_EBUSY: Another VAPI_resize_cq invocation for the same CQ is in progress + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * Description: + * Resize given CQ. + * Number of curretly outstanding CQEs in the CQ should be no more than the size of the new CQ. + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_resize_cq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + IN VAPI_cqe_num_t cqe_num, + OUT VAPI_cqe_num_t *num_of_entries_p + ); + + +/************************************************************************* + * Function: VAPI_destroy_cq + * + * Arguments: + * hca_hndl: HCA Handle. + * cq_hndl: CQ Handle. + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle. + * VAPI_EPERM: not enough permissions. + * VAPI_EBUSY. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * destroys cq and releases all resources associated to it. + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_destroy_cq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl + ); + + + +/******************************************* + * 11.2.6 EE Context + * + *******************************************/ +/************ EEC is not supported on this revision ********************/ +#if 0 + +/************************************************************************* + * Function: VAPI_create_eec + * + * Arguments: + * hca_hndl: HCA Handle. + * rdd: RD domain. + * eec_hndl_p: Pointer to EE returned Context Handle. + * + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: creates an ee context + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_create_eec( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_rdd_t rdd, + OUT VAPI_eec_hndl_t *eec_hndl_p + ); + + +/************************************************************************* + * Function: VAPI_modify_eec_attr + * + * Arguments: + * hca_hndl: HCA Handle. + * eec_hndl: EE Context Handle + * eec_attr_p: Pointer to EE Context Attributes Structure. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_EEC_HNDL: invalid EEC handle + * CANNOT_CHANGE_EE_CONTEXT_ATTR + * VAPI_EINVAL_EEC_STATE: invalid EEC state + * VAPI_EINVAL_RDD: invalid RD domain + * INVALID_CHANNEL_MIGRATION_STATE + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: modifies ee attributes + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_modify_eec_attr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_eec_hndl_t eec_hndl, + IN VAPI_eec_attr_t *eec_attr_p + ); + + +/************************************************************************* + * Function: VAPI_query_eec_attr + * + * Arguments: + * hca_hndl: HCA Handle. + * eec_hndl: EE context handle. + * eec_attr_p: Pointer to EE Context Attributes Structure. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_EEC_HNDL: invalid EEC handle + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: submits a query on eec attributes + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_eec_attr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_eec_hndl_t eec_hndl, + OUT VAPI_eec_attr_t *eec_attr_p + ); + + +/************************************************************************* + * Function: VAPI_destroy_eec + * + * Arguments: + * hca_hndl: HCA Handle. + * eec_hndl: EE context handle. + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_EEC_HNDL: invalid EEC handle + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: destroys an eec context + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_destroy_eec ( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_eec_hndl_t eec_hndl, + ); + +#endif + +/******************************************* + * 11.2.7 Memory Managemnet + * + *******************************************/ + + +/************************************************************************* + * Function: VAPI_register_mr + * + * Arguments: + * hca_hndl : HCA Handle. + * req_mrw_p: Pointer to the requested memory region properties. + * mr_hndl_p: Pointer to the memory region handle. + * rep_mrw: Pointer to the responded memory region properties. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_PD_HNDL: invalid PD handle + * VAPI_EINVAL_VA: invalid virtual address + * VAPI_EINVAL_LEN: invalid length + * VAPI_EINVAL_ACL: invalid ACL specifier (remote write or atomic , without local write) + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * The MMU administrates a list of memory regions/windows. The current version of the + * VIP supports only pinned buffers. In the future an extension of the MMU a support for + * pageable buffer will be considered. + * + * Memory Translation and protection tables are not store on the VIP but at the device + * driver due to their device specific orientation. + * + * The caller should fill the req_mrw_p structure fields with the type(VAPI_MR, VAPI_MPR), virtual start + * address, size, protection domain handle (pd_hndl) and the access control list (acl). + * for registration of physical mr, the caller should also fill the fields iova offset (offset + * of virt. start adrs from page start), pbuf_list_p (list of physical buffers) and pbuf_list_len. + * + * Upon successfull completion, the rep_mrw_p will include the l_key, the r_key (if + * remote access was requested). The memory region handle is returned in mr_hndl_p. + * VAPI_mr_t is described in the following table: + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_register_mr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_mr_t *req_mrw_p, + OUT VAPI_mr_hndl_t *mr_hndl_p, + OUT VAPI_mr_t *rep_mrw_p + ); + + + +/************************************************************************* + * Function: VAPI_query_mr + * + * Arguments: + * hca_hndl: HCA handle. + * mr_hndl: Memory Region Handle. + * rep_mrw_p: Pointer to Memory Region Attributes + * remote_start_p: Pointer to the remotly start address returned value + * remote_size_p: Pointer to the remotely size of the region returned + * value + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_MR_HNDL: invalid Memory Region handle + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Queries a memory region handle and returns a VAPI_mr_t. Upon successful comple- + * tion, the structure includes all the memory region properties: protection domain handle, + * ACL, LKey, RKey and actual protection bounds. The protection bounds returned in + * rep_mrw_p are the local protection bounds enforced by the HCA. The remote protec- + * tion bounds are returned in remote_start_p and remote_size_p and are valid only + * when remote access was requested. + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_mr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_mr_hndl_t mr_hndl, + OUT VAPI_mr_t *rep_mrw_p, + OUT VAPI_virt_addr_t *remote_start_p, + OUT VAPI_virt_addr_t *remote_size_p + ); + + +/************************************************************************* + * Function: VAPI_deregister_mr + * + * Arguments: + * hca_hndl: HCA handle. + * mr_hndl: Memory Region Handle + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_MR_HNDL: invalid memory region handle + * VAPI_EBUSY: memory region still has bound window(s) + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Destroy a registered memory region. The memory region deregistering has to be invali- + * dated from the CI. + * + * It is the roll of the MMU to validate that there are no bounded memory windows in + * order to allow the de-registration of the memory region. + * + * After the deregistration takes place is under the scope of the MMU to unpin all those + * memory pages that were not pinned before the memory registration was done. + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_deregister_mr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_mr_hndl_t mr_hndl + ); + + +/************************************************************************* + * Function: VAPI_reregister_mr + * + * Arguments: + * hca_hndl: HCA Handle. + * mr_hndl: Old Memory Region Handle. + * change_type: requested change type. + * req_mrw_p: Pointer to the requested memory region properties. + * rep_mr_hndl_p: Pointer to the returned new memory region handle + * rep_mrw_p: Pointer to the returned memory region properties. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_PARAM: invalid change type + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_MR_HNDL: invalid memory region handle + * VAPI_EINVAL_VA: invalid virtual address + * VAPI_EINVAL_LEN: invalid length + * VAPI_EINVAL_PD_HNDL: invalid protection domain handle + * VAPI_EINVAL_ACL: invalid ACL specifier + * VAPI_EBUSY: memory region still has bound window(s) + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * + * Description: + * + * Reregisters the memory region associated with the mr_hndl. The changes to be applied + * to the memory region are any combination of the following three flags, specified in the + * change_type input modifier: + * + * MR_CHANGE_TRANS - Change translation. The req_mr_p should contain the + * new start and size of the region as well as the new mr type:in mr_type (VAPI_MR,VAPI_MSHAR, VAPI_MPR ). + * + * MR_CHANGE_PD - Change the PD associated with this region. The req_mr_p + * should contain the new PD. + * + * MR_CHANGE_ACL - Change the ACL. The req_mr_p should contain the new + * ACL for this region. + * + * for registration of physical mr, the caller should also fill the fields iova offset (offset + * of virt. start adrs from page start), pbuf_list_p (list of physical buffers) and pbuf_list_len. + * + * Upon successful completion, the verb returns the new handle for this memory region, + * which may or may be not identical to the original one, but must be used for further ref- + * erences to this region. The LKey and the RKey (only when remote access permission + * was granted) are returned in the rep_mr_p. + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_reregister_mr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_mr_hndl_t mr_hndl, + IN VAPI_mr_change_t change_type, + IN VAPI_mr_t *req_mrw_p, + OUT VAPI_mr_hndl_t *rep_mr_hndl_p, + OUT VAPI_mr_t *rep_mrw_p + ); + + +/************************************************************************* + * Function: VAPI_register_smr + * + * Arguments: + * hca_hndl : HCA handle. + * orig_mr_hndl: Original memory region handle. + * req_mrw_p: Pointer to the requested memory region properties (valid fields:pd,ACL,start virt adrs) + * mr_hndl_p: Pointer to the responded memory region handle. + * rep_mrw: Pointer to the responded memory region properties. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN: out of resources + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_VA: invalid virtual address + * VAPI_EINVAL_MR_HNDL: invlalid MR handle + * VAPI_EINVAL_PD_HNDL: invalid PD handle + * VAPI_EINVAL_ACL: invalid ACL specifier + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Registers a shared memory region associated with the same physical buffers of an exist- + * ing memory region referenced by orig_mr_hndl. The req_mrw_p is a pointer to the + * requested memory region properties.the struct should contain the + * requested start virtual address (start field), the protection domain handle and the ACL. + * + * Upon successful completion, the new memory region handle is returned in mr_hndl_p + * and a struct rep_mrw_p of type VAPI_mr_t contains the actually assigned virtual + * address (start field), the LKey and the RKey (only if remote access rights were + * requested). + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_register_smr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_mr_hndl_t orig_mr_hndl, + IN VAPI_mr_t *req_mrw_p, + OUT VAPI_mr_hndl_t *mr_hndl_p, + OUT VAPI_mr_t *rep_mrw_p + ); + + + + +/************************************************************************* + * Function: VAPI_alloc_mw + * + * Arguments: + * hca_hnd: HCA Handle. + * pd_hndl: Protection Domain Handle. + * mw_hndl_p: Pointer to new allocated windows handle. + * rkey_p: Pointer to Windows unbounded Rkey + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN + * VAPI_EINVAL_HCA_HNDL + * VAPI_EINVAL_PD_HNDL + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * Allocate a MWO object than can be later bound to an RKey. + * + * The MMU will validate that there enough resources available for this allocations. + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_alloc_mw( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_pd_hndl_t pd, + OUT VAPI_mw_hndl_t *mw_hndl_p, + OUT VAPI_rkey_t *rkey_p + ); + + +/************************************************************************* + * Function: VAPI_query_mw + * + * Arguments: + * hca_window: HCA Handle. + * mw_hndl: Windows Handle. + * r_key_p: pointer to Rkey of Window. + * pd: pointer to rotection Domain Handle of Window. + * + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL + * VAPI_EINVAL_MW_HNDL + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * This call will return the current PD associated with the memory domain which will be + * retrieved from the PDA (no access to HW required). + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_query_mw( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_mw_hndl_t mw_hndl, + OUT VAPI_rkey_t *rkey_p, + OUT VAPI_pd_hndl_t *pd_p + ); + + +/************************************************************************* + * Function: VAPI_bind_mw + * + * Arguments: + * hca_hndl: HCA Handle. + * mw_hndl: Handle of memory windows. + * bind_prop_p: Binding properties. + * qp: QP to use for posting this binding request + * id: Work request ID to be used in this binding request + * comp_type Create CQE or not (for QPs set to singaling per request) + * new_rkey_p: pointer to RKey of bound windows. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL + * VAPI_EINVAL_MW_HNDL + * VAPI_EINVAL_PARAM + * VAPI_EAGAIN + * + * + * + * Description: + * + * This called is performed completely in user mode. The posted descriptor will return on + * completion an RKey that can be used in subsequent remote access to the bounded mem- + * ory region. + * + * Success of the operation is receive through any one of the immediate errors specified + * about or through the completion entry of the bind windows operation. + * + * The VAPI_bind_mw call is equivalent to the posting of descriptors. The implication of + * this is that both the MMU and the JOD will have to be involved in this call. + * + * + * + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_bind_mw( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_mw_hndl_t mw_hndl, + IN const VAPI_mw_bind_t* bind_prop_p, + IN VAPI_qp_hndl_t qp, + IN VAPI_wr_id_t id, + IN VAPI_comp_type_t comp_type, + /* IN MT_bool fence, - This should be added in order to be IB 1.1 compliant */ + OUT VAPI_rkey_t* new_rkey_p + ); + + +/************************************************************************* + * Function: VAPI_dealloc_mw + * + * Arguments: + * hca_hnd: HCA Handle. + * mw_hndl: New allocated windows handle. + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN + * VAPI_EINVAL_HCA_HNDL + * VAPI_EINVAL_MW_HNDL + * VAPI_EPERM: not enough permissions. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * Description: + * + * DeAllocate a MWO object. + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_dealloc_mw( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_mw_hndl_t mw_hndl + ); + +/******************************************* + * 11.3 Multicast Group + *******************************************/ + + + /************************************************************************* + * Function: VAPI_attach_to_multicast + * + * Arguments: + * hca_hndl: HCA Handle. + * mcg_dgid: gid address of multicast group + * qp_hndl: QP Handle + * mcg_lid: lid of MCG. Currently ignored + * + * + * + * Returns: + * VAPI_OK + * VAPI_EAGAIN - Insufficient resources to complete request + * VAPI_E2BIG_MCG_SIZE - Number of QPs attached to multicast groups exceeded. + * VAPI_EINVAL_MCG_GID - Invalid multicast DGID + * VAPI_EINVAL_QP_HNDL - Invalid QP handle + * VAPI_EINVAL_HCA_HNDL - Invalid HCA handle + * VAPI_EINVAL_SERVICE_TYPE - Invalid Service Type for this QP. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * Description: + * + * Attaches qp to multicast group.. + *************************************************************************/ +VAPI_ret_t MT_API VAPI_attach_to_multicast( + IN VAPI_hca_hndl_t hca_hndl, + IN IB_gid_t mcg_dgid, + IN VAPI_qp_hndl_t qp_hndl, + IN IB_lid_t mcg_dlid); + + +/************************************************************************* + * Function: VAPI_detach_from_multicast + * + * Arguments: + * hca_hndl: HCA Handle. + * mcg_dgid: multicast group -GID + * qp_hndl: QP Handle + * mcg_dlid: DLID of MCG. Currently ignored + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL - Invalid HCA handle + * VAPI_EINVAL_MCG_GID - Invalid multicast DGID + * VAPI_EINVAL_QP_HNDL - Invalid QP handle + * VAPI_EINVAL_SERVICE_TYPE - Invalid Service Type for this QP. + * VAPI_ESYSCALL: A procedure call to the underlying O/S (ioctl) + * has returned an error. + * + * + * + * + * Description: + * + * Detaches qp from multicast group.. + *************************************************************************/ +VAPI_ret_t MT_API VAPI_detach_from_multicast( + IN VAPI_hca_hndl_t hca_hndl, + IN IB_gid_t mcg_dgid, + IN VAPI_qp_hndl_t qp_hndl, + IN IB_lid_t mcg_dlid); + +/******************************************* + * 11.4 Work Request Processing + *******************************************/ + +/* Queue Pair Operations */ + +/* ************************************************************************* + * Function: VAPI_post_sr + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_hndl: QP Handle. + * sr_desc_p: Pointer to the send request descriptor attributes structure. + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * VAPI_E2BIG_WR_NUM: Too many posted work requests. + * VAPI_EINVAL_OP: invalid operation + * VAPI_EINVAL_QP_STATE: invlaid QP state + * VAPI_EINVAL_SG_FMT: invalid scatter/gather list format + * VAPI_EINVAL_SG_NUM: invalid scatter/gather list length + * VAPI_EINVAL_AH: invalid address handle + * VAPI_EPERM: not enough permissions. + * + * Description: + * The verb posts a send queue work request, the properties of which are specified in the + * structure pointed by sr_desc_p, which is of type VAPI_sr_desc_t: + * The sg_lst_p points to a gather list, the length of which is sg_lst_len, which is an array + * of local buffers used as the source of the data to be transmited in this Work Request. + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_post_sr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + IN VAPI_sr_desc_t *sr_desc_p + ); + + +/* ************************************************************************* + * Function: VAPI_post_sr2 + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_hndl: QP Handle. + * sr_desc_p: Pointer to the send request descriptor attributes structure. + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * VAPI_E2BIG_WR_NUM: Too many posted work requests. + * VAPI_EINVAL_OP: invalid operation + * VAPI_EINVAL_QP_STATE: invlaid QP state + * VAPI_EINVAL_NOTIF_TYPE: invalid completion notification + * type + * VAPI_EINVAL_SG_FMT: invalid scatter/gather list format + * VAPI_EINVAL_SG_NUM: invalid scatter/gather list length + * VAPI_EINVAL_AH: invalid address handle + * VAPI_EPERM: not enough permissions. + * + * Each one of the descriptor post will cause the JOD to prepare and PS_IDF (Post Send + * Independent Descriptor Format) which will be delivered to the HCAHAL using + * HH_Post_SR. The HCAHAL will be responsible for the translation of PS_IDF into + * PS_DDF (Post Send Dependent Descriptor Format). + * + * The verb posts a send queue work request, the properties of which are specified in the + * structure pointed by sr_desc_p, which is of type VAPI_sr_desc_t: + * + * + * The sg_lst_p points to a gather list, the length of which is sg_lst_len, which is an array + * of local buffers used as the source of the data to be transmited in this Work Request. + * Each entry in this array has the following format. + * + * + *************************************************************************/ + +VAPI_ret_t VAPI_post_sr2( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + IN VAPI_comp_type_t comp_type, + IN VAPI_ud_av_hndl_t remote_ah, + IN void* WorkReq + ); + +/************************************************************************ + * Function: VAPI_post_rr + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_hndl: QP Handle. + * rr_desc_p: Pointer to the receive request descriptor attributes structure. + * + * returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * VAPI_EINVAL_SRQ_HNDL: QP handle used for a QP associted with a SRQ (use VAPI_post_srq) + * VAPI_E2BIG_WR_NUM: Too many posted work requests. + * VAPI_EINVAL_OP: invalid operation + * VAPI_EINVAL_QP_STATE: invlaid QP state + * VAPI_EINVAL_SG_NUM: invalid scatter/gather list length + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: + * + * The verb posts a receive request descriptor to the receive queue. + */ + VAPI_ret_t MT_API VAPI_post_rr( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + IN VAPI_rr_desc_t *rr_desc_p + ); + +/************************************************************************ + * Function: VAPI_post_rr2 + * + * Arguments: + * hca_hndl : HCA Handle. + * qp_hndl: QP Handle. + * work_request: Pointer to the IB work request describing the receive operation + * + * returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_QP_HNDL: invalid QP handle + * VAPI_E2BIG_WR_NUM: Too many posted work requests. + * VAPI_EINVAL_OP: invalid operation + * VAPI_EINVAL_QP_STATE: invlaid QP state + * VAPI_EINVAL_SG_NUM: invalid scatter/gather list length + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: + * + * Each one of the descriptor post will cause the JOD to prepare and PR_IDF (Post + * Receive Independent Descriptor Format) which will be delivered to the HCAHAL using + * HH_Post_RR. The HCAHAL will be responsible for the translation of PR_IDF into + * PR_DDF (Post Receive Dependent Descriptor Format). + * + * The verb posts a receive request descriptor to the receive queue. The receive request + * descriptor is of type VAPI_rr_desc_t and is described in the following table: + */ + +VAPI_ret_t VAPI_post_rr2( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_qp_hndl_t qp_hndl, + IN VAPI_comp_type_t comp_type, + IN u_int32_t sg_lst_len, + IN VAPI_wr_id_t ReqId, + IN VAPI_sg_lst_entry_t *sg_lst_p + ); + +/* Completion Queue Operations */ + + +/************************************************************************* + * Function: VAPI_poll_cq + * + * Arguments: + * hca_hndl: Handle to HCA. + * cq_hndl: CQ Handle. + * wc_desc_p: Pointer to work completion descriptor structure. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle + * VAPI_CQ_EMPTY: CQ is empty + * VAPI_EPERM: not enough permissions. + * + * Description: + * + * This call will retrieve an ICQE (Independent Completion Queue Entry), which is a + * device independent structure used to retrieve completion status of WR posted to the + * Send/Receive Queue including VAPI_bind_mw. + * + * The verb retrieves a completion queue entry into the descriptor pointed by wc_desc_p + * which is of type VAPI_wc_desc_t and described in the following table: + * + * + * + * The remote_node_address is of type VAPI_remote_node_addr_t and is valid only for + * Datagram services. + * + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_poll_cq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + OUT VAPI_wc_desc_t *comp_desc_p + ); + +/************************************************************************* + * Function: VAPI_poll_and_rearm_cq + * + * Arguments: + * hca_hndl: Handle to HCA. + * cq_hndl: CQ Handle. + * wc_desc_p: Pointer to work completion descriptor structure. + * + * + * + * Returns: + * VAPI_OK: cqe is valid, cq has been rearmed, no more polling required + * VAPI_EAGAIN: cqe is valid, cq has not been rearmed, service cqe then poll again + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle + * VAPI_CQ_EMPTY: cqe is not valid, cq has been rearmed, no more polling required + * VAPI_EPERM: not enough permissions. + * + * Description: + * + * This call will retrieve an ICQE (Independent Completion Queue Entry), which is a + * device independent structure used to retrieve completion status of WR posted to the + * Send/Receive Queue including VAPI_bind_mw. + * + * The verb retrieves a completion queue entry into the descriptor pointed by wc_desc_p + * which is of type VAPI_wc_desc_t and described in the following table: + * + * + * + * The remote_node_address is of type VAPI_remote_node_addr_t and is valid only for + * Datagram services. + * + * + *************************************************************************/ +VAPI_ret_t VAPI_poll_and_rearm_cq( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + IN int solicitedNotification, /* false = next notification */ + OUT VAPI_wc_desc_t *comp_desc_p + ); + +/************************************************************************* + * Function: VAPI_req_comp_notif + * + * Arguments: + * hca_hndl: Handle to HCA. + * cq_hndl: CQ Handle. + * notif_type: CQ Notification type. + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EINVAL_CQ_HNDL: invalid CQ handle + * VAPI_EINVAL_NOTIF_TYPE: invalid notify type + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: + * + * the verb request a type specified in notif_type. + * + *************************************************************************/ +VAPI_ret_t MT_API VAPI_req_comp_notif( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_cq_hndl_t cq_hndl, + IN VAPI_cq_notif_type_t notif_type + ); + +/* TK - only later #ifdef __KERNEL__ */ + +/******************************************* + * 11.5 Event Handling - the global functions exposed only to kernel modules + * See evapi.h for the user level functions + *******************************************/ +/************************************************************************* + * Function: VAPI_set_comp_event_handler (kernel space only) + * + * Arguments: + * hca_hndl: HCA Handle + * handler: Completion Event Handler function address. + * private_data: Pointer to handler context (handler specific). + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: + * + * Registers a completion event handler. Only one CQ event handler can be registered per + * HCA. + * + * Exposed only to kernel modules + * + * The CQ event handler function prototype is as follows: + * + * void + * VAPI_completion_event_handler + * ( + * IN VAPI_hca_hndl_t hca_hndl, + * IN VAPI_cq_hndl_t cq_hndl, + * IN void *private_data + * ) + * + * + * + * + * + *************************************************************************/ +#ifdef __KERNEL__ +VAPI_ret_t MT_API VAPI_set_comp_event_handler( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_completion_event_handler_t handler, + IN void* private_data + ); +#endif + +/************************************************************************* + * Function: VAPI_set_async_event_handler (kernel space only) + * + * Arguments: + * hca_hndl: HCA Handle + * handler: Async Event Handler function address. + * private_data: Pointer to handler context (handler specific). + * + * + * + * + * Returns: + * VAPI_OK + * VAPI_EINVAL_HCA_HNDL: invalid HCA handle + * VAPI_EPERM: not enough permissions. + * + * + * + * Description: + * + * Registers an async event handler. + * Exposed only to kernel modules + * + * The event handler function prototype is as follows: + * + * void + * VAPI_async_event_handler + * ( + * IN VAPI_hca_hndl_t hca_hndl, + * IN VAPI_event_record_t *event_record_p, + * IN void *private_data + * ) + * + * + *************************************************************************/ +#ifdef __KERNEL__ +VAPI_ret_t MT_API VAPI_set_async_event_handler( + IN VAPI_hca_hndl_t hca_hndl, + IN VAPI_async_event_handler_t handler, + IN void* private_data + ); +#endif /* KERNEL */ + +#endif /*H_VAPI_H*/ + diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi_features.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi_features.h new file mode 100644 index 00000000..5ab730f5 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi_features.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_VAPI_FEATURES_H +#define H_VAPI_FEATURES_H + +/* VAPI_ features macros */ +#define VAPI_FEATURE_APM /* Automatic Path migration support */ +#define VAPI_FEATURE_ETIMEOUT /* defined new return code VAPI_ETIMEOUT */ +#define VAPI_FEATURE_RESIZE_CQ +#define VAPI_FEATURE_RESOURCE_TRACKING /* User level resource tracking supported */ +#define VAPI_FEATURE_SMR_VALIDATION +#define VAPI_FEATURE_ALT_RETRY_OBSOLETE /* retry count and rnr_retry are per QP, not per path */ +#define VAPI_FEATURE_DESTR_QP_FAIL_IF_MCG /* destroy QP fails if QP is attached to a mcg */ +#define VAPI_FEATURE_SRQ /* SRQ (Shared Receive Queue) support */ +#define VAPI_FEATURE_MODIFY_SRQ /* VAPI_modify_srq supported */ +#define VAPI_FEATURE_CQE_WITH_QP /* QP number included in VAPI_wc_desc_t */ +/* to be enabled when SQ Draining is entirely fixed*/ +/* #define VAPI_FEATURE_SQD */ + +/* EVAPI features macros */ +#define EVAPI_FEATURE_DP_HNDL_CHK /* Data path handles validation */ +#define EVAPI_FEATURE_FMR /* Fast Memory Regions support */ +#define EVAPI_FEATURE_INLINE_SR /* Inline send */ +#define EVAPI_FEATURE_CQBLK /* EVAPI_poll_cq_block() */ +#define EVAPI_FEATURE_PEEK_CQ /* EVAPI_peek_cq() */ +#define EVAPI_FEATURE_REQ_NCOMP_NOTIF /* EVAPI_req_ncomp_notif() */ +#define EVAPI_FEATURE_ALLOC_PD_AV /* EVAPI_alloc_pd() */ +#define EVAPI_FEATURE_PROC_MAD_OPTS /*change in EVAPI_process_local_mad() arglist */ +#define EVAPI_FEATURE_APM +#define EVAPI_FEATURE_DEVMEM +#define EVAPI_FEATURE_DEVMEM2 +#define EVAPI_FEATURE_LOCAL_MAD_BAD_PARAM +#define EVAPI_FEATURE_OPEN_CLOSE_HCA +#define EVAPI_FEATURE_ASYNC_EVENTH +#define EVAPI_FEATURE_LOCAL_MAD_SLID +#define EVAPI_FEATURE_USER_PROFILE /* user profile. Affects EVAPI_open_hca() arlist*/ +#define EVAPI_FEATURE_VENDOR_ERR_SYNDROME /* EVAPI_vendor_err_syndrome_t in VAPI_wc_desc_t */ +#define EVAPI_FEATURE_GSI_SEND_PKEY /* EVAPI_post_gsi_sr with Pkey parameter */ +#define EVAPI_FEATURE_ALLOC_PD_AV_SQP /* EVAPI_alloc_pd_sqp() */ +#define EVAPI_FEATURE_FORK_SUPPORT /* support fork in multithreaded apps */ +#define EVAPI_FEATURE_POLL_AND_REARM_CQ /* VAPI_poll_and_rearm_cq */ +#define EVAPI_FEATURE_POST_RR2 +#define EVAPI_FEATURE_POST_SR2 + +/* Fixed bugs (FlowManager issue numbers) */ +#define BUG_FIX_FM12831 +#define BUG_FIX_FM12549 +#define BUG_FIX_FM16939 + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi_types.h b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi_types.h new file mode 100644 index 00000000..b1bcb62c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/Hca/verbs/vapi_types.h @@ -0,0 +1,877 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_VAPI_TYPES_H +#define H_VAPI_TYPES_H + +#include +#include + + +/* This is same as constant in MDOAL and MDHAL */ +#define HCA_MAXNAME 32 + + +/* + * HCA Types + * + * + */ +typedef char VAPI_hca_id_t[HCA_MAXNAME]; /* NULL terminated string of up to HCA_MAXNAME-1 chars */ +typedef u_int32_t VAPI_hca_hndl_t; /* HCA handle */ +typedef MT_ulong_ptr_t VAPI_pd_hndl_t; /* Protection Domain handle */ +typedef MT_ulong_ptr_t VAPI_ud_av_hndl_t; /* UD Address Vector Handle */ +typedef MT_ulong_ptr_t VAPI_cq_hndl_t; /* Completion Queue handle */ +typedef MT_ulong_ptr_t VAPI_qp_hndl_t; /* Queue Pair handle */ +typedef MT_ulong_ptr_t VAPI_srq_hndl_t; /* Shared Receive Queue handle */ +typedef u_int32_t VAPI_devmem_hndl_t; +typedef u_int32_t VAPI_mr_hndl_t; /* Memory Region Handle */ +typedef MT_ulong_ptr_t VAPI_mw_hndl_t; /* Memory Window Handle */ +typedef u_int32_t VAPI_eec_hndl_t; /* E2E Context Handle */ +typedef u_int32_t VAPI_pkey_ix_t; /* Pkey index */ +typedef u_int32_t VAPI_rdd_hndl_t; /* RD domain handle */ +typedef u_int32_t VAPI_key_counter_t; /* QKEY/PKEY violation counter type */ +typedef IB_wqpn_t VAPI_qp_num_t; /* QP number (24 bits) */ +typedef IB_eecn_t VAPI_eec_num_t; /* EEC number (24 bits) */ +typedef u_int8_t VAPI_sched_queue_t; /* Schedule queue index */ +typedef IB_psn_t VAPI_psn_t; /* PSN number (24 bits) */ +typedef IB_qkey_t VAPI_qkey_t; /* QKey (32 bits) */ +typedef u_int8_t VAPI_retry_count_t; /* Number of retries */ +typedef u_int8_t VAPI_timeout_t; /* Timeout= 4.096usec * 2^ */ +typedef u_int32_t VAPI_cqe_num_t; /* number of entries in CQ*/ +typedef u_int32_t VAPI_lkey_t; /* L Key */ +typedef IB_rkey_t VAPI_rkey_t; /* R Key */ +typedef IB_virt_addr_t VAPI_virt_addr_t; /* Virtual Address (/Length) */ +typedef u_int64_t VAPI_phy_addr_t; /* Physical address (/length)*/ +typedef u_int64_t VAPI_size_t; +typedef u_int64_t VAPI_wr_id_t; /* Work request id */ +typedef u_int32_t VAPI_imm_data_t; /* Immediate data */ +typedef u_int16_t VAPI_ethertype_t; /* Ethertype */ +/* TBD: Those two types below may be removed... (check references) */ +typedef IB_gid_t VAPI_gid_t; /* GID */ +typedef IB_pkey_t VAPI_pkey_t; /* PKey (16 bits) */ + +/* Use the following macro to init handles and denote uninitialized handles */ +#define VAPI_INVAL_HNDL ((MT_long_ptr_t)-1) +#define VAPI_INVAL_SRQ_HNDL VAPI_INVAL_HNDL +#define VAPI_INVAL_PD_HNDL VAPI_INVAL_HNDL + +#define EVAPI_DEFAULT_AVS_PER_PD 0xFFFFFFFF + +/* HCA Cap Flags */ +typedef enum { + VAPI_RESIZE_OUS_WQE_CAP = 1, + VAPI_BAD_PKEY_COUNT_CAP = (1<<1), + VAPI_BAD_QKEY_COUNT_CAP = (1<<2), + VAPI_RAW_MULTI_CAP = (1<<3), + VAPI_AUTO_PATH_MIG_CAP = (1<<4), + VAPI_CHANGE_PHY_PORT_CAP = (1<<5), + VAPI_UD_AV_PORT_ENFORCE_CAP = (1<<6), /* IBTA comment #1821 */ + VAPI_CURR_QP_STATE_MOD_CAP = (1<<7), /*IB Spec 1.09 sec 11.2.1.2 */ + VAPI_SHUTDOWN_PORT_CAP = (1<<8), /*IB Spec 1.09 sec 11.2.1.2 */ + VAPI_INIT_TYPE_CAP = (1<<9), /*IB Spec 1.09 sec 11.2.1.2 */ + VAPI_PORT_ACTIVE_EV_CAP = (1<<10), /*IB Spec 1.09 sec 11.2.1.2 */ + VAPI_SYS_IMG_GUID_CAP = (1<<11), /*IB Spec 1.09 sec 11.2.1.2 */ + VAPI_RC_RNR_NAK_GEN_CAP = (1<<12) /*IB Spec 1.09 sec 11.2.1.2 */ +} VAPI_hca_cap_flags_t; + + +/* HCA attributes mask enumeration */ +typedef enum { + HCA_ATTR_IS_SM = 1, + HCA_ATTR_IS_SNMP_TUN_SUP = 2, + HCA_ATTR_IS_DEV_MGT_SUP = 4, + HCA_ATTR_IS_VENDOR_CLS_SUP = 8, + HCA_ATTR_IS_CLIENT_REREGISTRATION_SUP = 16, + HCA_ATTR_MAX = 32 /*Dummy enum entry: always keep it the last one */ +} VAPI_hca_attr_mask_enum_t; + + +/* HCA attributes mask */ +typedef u_int32_t VAPI_hca_attr_mask_t; + +#define HCA_ATTR_MASK_CLR_ALL(mask) ((mask)=0) +#define HCA_ATTR_MASK_SET_ALL(mask) ((mask)=(HCA_ATTR_MAX-1)) +#define HCA_ATTR_MASK_SET(mask,attr) ((mask)|=(attr)) +#define HCA_ATTR_MASK_CLR(mask,attr) ((mask)&=(~(attr))) +#define HCA_ATTR_IS_FLAGS_SET(mask) (((mask)&(\ + HCA_ATTR_IS_SM|\ + HCA_ATTR_IS_SNMP_TUN_SUP|\ + HCA_ATTR_IS_DEV_MGT_SUP|\ + HCA_ATTR_IS_VENDOR_CLS_SUP|\ + HCA_ATTR_IS_CLIENT_REREGISTRATION_SUP))!=0) +#define HCA_ATTR_IS_SET(mask,attr) (((mask)&(attr))!=0) + +/* QP attributes mask enumeration */ +typedef enum { + QP_ATTR_QP_STATE = 0x1, /* QP next state */ + QP_ATTR_EN_SQD_ASYN_NOTIF = 0x2, /* Enable SQD affiliated asynchronous event notification */ + QP_ATTR_QP_NUM = 0x4, /* Queue Pair Number. [Mellanox specific] */ + QP_ATTR_REMOTE_ATOMIC_FLAGS = 0x8, /* Enable/Disable RDMA and atomic */ + QP_ATTR_PKEY_IX = 0x10, /* Primary PKey index */ + QP_ATTR_PORT = 0x20, /* Primary port */ + QP_ATTR_QKEY = 0x40, /* QKey (UD/RD only) */ + QP_ATTR_AV = 0x80, /* Primary remote node address vector (RC/UC QP only) */ + QP_ATTR_PATH_MTU = 0x100, /* Path MTU : 6 bits (connected services only) */ + QP_ATTR_TIMEOUT = 0x200, /* Local Ack Timeout (RC only) */ + QP_ATTR_RETRY_COUNT = 0x400, /* retry count (RC only) */ + QP_ATTR_RNR_RETRY = 0x800, /* RNR retry count (RC only) */ + QP_ATTR_RQ_PSN = 0x1000, /* Packet Sequence Number for RQ */ + QP_ATTR_QP_OUS_RD_ATOM = 0x2000, /* Maximum number of oust. RDMA read/atomic as target */ + QP_ATTR_ALT_PATH = 0x4000, /* Alternate remote node address vector */ + QP_ATTR_RSRV_1 = 0x8000, /* reserved */ + QP_ATTR_RSRV_2 = 0x10000, /* reserved */ + QP_ATTR_RSRV_3 = 0x20000, /* reserved */ + QP_ATTR_RSRV_4 = 0x40000, /* reserved */ + QP_ATTR_RSRV_5 = 0x80000, /* reserved */ + QP_ATTR_RSRV_6 = 0x100000, /* reserved */ + QP_ATTR_MIN_RNR_TIMER = 0x200000, /* Minimum RNR NAK timer */ + QP_ATTR_SQ_PSN = 0x400000, /* Packet sequence number for SQ */ + QP_ATTR_OUS_DST_RD_ATOM = 0x800000, /* Number of outstanding RDMA rd/atomic ops at destination */ + QP_ATTR_PATH_MIG_STATE = 0x1000000, /* Migration state */ + QP_ATTR_CAP = 0x2000000, /* QP capabilities max_sq/rq_ous_wr only valid */ + QP_ATTR_DEST_QP_NUM = 0x4000000, /* Destination QP number (RC/UC) */ + QP_ATTR_SCHED_QUEUE = 0x8000000 /* Schedule queue for QoS association */ +} VAPI_qp_attr_mask_enum_t; + +/* QP attributes mask */ +typedef u_int32_t VAPI_qp_attr_mask_t; + +#define QP_ATTR_MASK_CLR_ALL(mask) ((mask)=0) +#define QP_ATTR_MASK_SET_ALL(mask) ((mask)=(0x07FFFFFF)) +#define QP_ATTR_MASK_SET(mask,attr) ((mask)=((mask)|(attr))) +#define QP_ATTR_MASK_CLR(mask,attr) ((mask)=((mask)&(~(attr)))) +#define QP_ATTR_IS_SET(mask,attr) (((mask)&(attr))!=0) + + +/* HCA Atomic Operation Capabilities */ +typedef enum { + VAPI_ATOMIC_CAP_NONE, /* No Atomic ops supported */ + VAPI_ATOMIC_CAP_HCA, /* Atomic cap supported within this HCA QPs */ + VAPI_ATOMIC_CAP_GLOB /* Atomicity supported among all entities in this sytem */ +} VAPI_atomic_cap_t; + +/* Signalling type */ +typedef enum { + VAPI_SIGNAL_ALL_WR, + VAPI_SIGNAL_REQ_WR +} VAPI_sig_type_t; + +/* Transport Service Type */ +enum { + VAPI_TS_RC=IB_TS_RC, + VAPI_TS_RD=IB_TS_RD, + VAPI_TS_UC=IB_TS_UC, + VAPI_TS_UD=IB_TS_UD, + VAPI_TS_RAW=IB_TS_RAW, + VAPI_NUM_TS_TYPES +}; +typedef IB_ts_t VAPI_ts_type_t; + +/* The following value to be used for reserved GRH buffer space in UD RQ */ +/* (The offset of the payload in buffers posted to the UD RQ) */ +#define VAPI_GRH_LEN 40 + +/* QP state */ +enum { + VAPI_RESET,VAPI_INIT,VAPI_RTR,VAPI_RTS,VAPI_SQD,VAPI_SQE,VAPI_ERR +}; +typedef u_int32_t VAPI_qp_state_t; + +/* Migration state */ +typedef enum { + VAPI_MIGRATED, VAPI_REARM, VAPI_ARMED +} VAPI_mig_state_t; + +/* Special QP Types */ +enum { + VAPI_REGULAR_QP= 0, /* Encoding for non-special QP */ + VAPI_SMI_QP, VAPI_GSI_QP, VAPI_RAW_IPV6_QP, VAPI_RAW_ETY_QP +}; +typedef u_int32_t VAPI_special_qp_t; + +/* Just a generic name for a type used to identify QP type */ +typedef VAPI_special_qp_t VAPI_qp_type_t; + +/* RDMA/Atomic Access Control */ +typedef enum { + VAPI_EN_REM_WRITE=1, VAPI_EN_REM_READ=2, VAPI_EN_REM_ATOMIC_OP=4 +} +VAPI_rdma_atom_acl_enum_t; +typedef u_int32_t VAPI_rdma_atom_acl_t; + +/* Memory region/window types */ +typedef enum { + VAPI_MR, /* Memory region */ + VAPI_MW, /* Memory Window */ + VAPI_MPR, /* Physical memory region */ + VAPI_MSHAR /* Shared memory region */ +} VAPI_mrw_type_t; + +/* Remote node address type */ +typedef enum { + VAPI_RNA_RD, + VAPI_RNA_UD, + VAPI_RNA_RAW_ETY, + VAPI_RNA_RAW_IPV6 +} VAPI_remote_node_addr_type_t; + +/* Memory region/window ACLs */ +enum { + VAPI_EN_LOCAL_WRITE= 1, + VAPI_EN_REMOTE_WRITE= 1<<1, + VAPI_EN_REMOTE_READ= 1<<2, + VAPI_EN_REMOTE_ATOM= 1<<3, + VAPI_EN_MEMREG_BIND= 1<<4 +}; +typedef u_int32_t VAPI_mrw_acl_t; + +/* Memory region change type */ +typedef enum { + VAPI_MR_CHANGE_TRANS= 1, + VAPI_MR_CHANGE_PD= 1<<1, + VAPI_MR_CHANGE_ACL= 1<<2 +} VAPI_mr_change_flags_t; + +typedef u_int32_t VAPI_mr_change_t; /* VAPI_mr_change_flags_t combination */ + +typedef enum { + EVAPI_DEVMEM_EXT_DRAM /* External attached SDRAM */ +}EVAPI_devmem_type_t; + + +/* Work requests opcodes */ +/* Note. The following enum must be maintained zero based without holes */ +typedef enum { + VAPI_RDMA_WRITE, + VAPI_RDMA_WRITE_WITH_IMM, + VAPI_SEND, + VAPI_SEND_WITH_IMM, + VAPI_RDMA_READ, + VAPI_ATOMIC_CMP_AND_SWP, + VAPI_ATOMIC_FETCH_AND_ADD, + VAPI_RECEIVE, + VAPI_NUM_OPCODES +} VAPI_wr_opcode_t; + +/* Completion Opcodes */ +typedef enum { + VAPI_CQE_SQ_SEND_DATA, + VAPI_CQE_SQ_RDMA_WRITE, + VAPI_CQE_SQ_RDMA_READ, + VAPI_CQE_SQ_COMP_SWAP, + VAPI_CQE_SQ_FETCH_ADD, + VAPI_CQE_SQ_BIND_MRW, + VAPI_CQE_RQ_SEND_DATA, + VAPI_CQE_RQ_RDMA_WITH_IMM, /* For RDMA Write Only */ + VAPI_CQE_INVAL_OPCODE = 0xFFFFFFFF /* special value to return on CQE with error */ +} VAPI_cqe_opcode_t; + + +/* Work completion status */ +enum { + VAPI_SUCCESS = IB_COMP_SUCCESS, + VAPI_LOC_LEN_ERR = IB_COMP_LOC_LEN_ERR, + VAPI_LOC_QP_OP_ERR = IB_COMP_LOC_QP_OP_ERR, + VAPI_LOC_EE_OP_ERR = IB_COMP_LOC_EE_OP_ERR, + VAPI_LOC_PROT_ERR = IB_COMP_LOC_PROT_ERR, + VAPI_WR_FLUSH_ERR = IB_COMP_WR_FLUSH_ERR, + VAPI_MW_BIND_ERR = IB_COMP_MW_BIND_ERR, + VAPI_BAD_RESP_ERR = IB_COMP_BAD_RESP_ERR, + VAPI_LOC_ACCS_ERR = IB_COMP_LOC_ACCS_ERR, + VAPI_REM_INV_REQ_ERR = IB_COMP_REM_INV_REQ_ERR, + VAPI_REM_ACCESS_ERR = IB_COMP_REM_ACCESS_ERR, + VAPI_REM_OP_ERR = IB_COMP_REM_OP_ERR, + VAPI_RETRY_EXC_ERR = IB_COMP_RETRY_EXC_ERR, + VAPI_RNR_RETRY_EXC_ERR = IB_COMP_RNR_RETRY_EXC_ERR, + VAPI_LOC_RDD_VIOL_ERR = IB_COMP_LOC_RDD_VIOL_ERR, + VAPI_REM_INV_RD_REQ_ERR = IB_COMP_REM_INV_RD_REQ_ERR, + VAPI_REM_ABORT_ERR= IB_COMP_REM_ABORT_ERR, + VAPI_INV_EECN_ERR = IB_COMP_INV_EECN_ERR, + VAPI_INV_EEC_STATE_ERR = IB_COMP_INV_EEC_STATE_ERR, +/* VAPI_COMP_LOC_TOUT = IB_COMP_LOC_TOUT,*/ /* Use VAPI_RETRY_EXC_ERR instead */ +/* VAPI_COMP_RNR_TOUT = IB_COMP_RNR_TOUT,*/ /* Use VAPI_RNR_RETRY_EXC_ERR instead */ + + VAPI_COMP_FATAL_ERR = IB_COMP_FATAL_ERR, + VAPI_COMP_GENERAL_ERR = IB_COMP_GENERAL_ERR +}; +typedef u_int32_t VAPI_wc_status_t; + +/* Vendor specific error syndrome */ +typedef u_int32_t EVAPI_vendor_err_syndrome_t; + +/* work request completion type */ +typedef enum { + VAPI_SIGNALED, VAPI_UNSIGNALED +} VAPI_comp_type_t; + +/* Completion Notification Type */ +typedef enum { + VAPI_NOTIF_NONE, /* No completion notification requested */ + VAPI_SOLIC_COMP, /* Notify on solicited completion event only */ + VAPI_NEXT_COMP /* Notify on next completion */ +} VAPI_cq_notif_type_t; + +typedef VAPI_cq_hndl_t EVAPI_compl_handler_hndl_t; /* EVAPI completion handler handle */ + +typedef u_int32_t VAPI_k_cq_hndl_t; /* Kernel level CQ access */ + +/* Completion Event Handler Pointer */ +typedef void (MT_API *VAPI_completion_event_handler_t)( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ VAPI_cq_hndl_t cq_hndl, + /*IN*/ void* private_data + ); + +/* CQ destruction callback */ +typedef void (MT_API *EVAPI_destroy_cq_cbk_t)( + IN VAPI_hca_hndl_t k_hca_hndl, + IN VAPI_k_cq_hndl_t k_cq_hndl, + IN void* private_data + ); + + +typedef u_int32_t VAPI_k_qp_hndl_t; /* Kernel level QP access */ + +typedef void (MT_API *EVAPI_destroy_qp_cbk_t)( + IN VAPI_hca_hndl_t k_hca_hndl, + IN VAPI_k_qp_hndl_t k_qp_hndl, + IN void* private_data + ); + + +/* Event Record Event Types, valid modifier mentionned where unclear */ +typedef enum { + VAPI_QP_PATH_MIGRATED, /*QP*/ + VAPI_EEC_PATH_MIGRATED, /*EEC*/ + VAPI_QP_COMM_ESTABLISHED, /*QP*/ + VAPI_EEC_COMM_ESTABLISHED, /*EEC*/ + VAPI_SEND_QUEUE_DRAINED, /*QP*/ + VAPI_RECEIVE_QUEUE_DRAINED, /*QP (Last WQE Reached) */ + VAPI_SRQ_LIMIT_REACHED, /*SRQ*/ + VAPI_SRQ_CATASTROPHIC_ERROR, /*SRQ*/ + VAPI_CQ_ERROR, /*CQ*/ + VAPI_LOCAL_WQ_INV_REQUEST_ERROR, /*QP*/ + VAPI_LOCAL_WQ_ACCESS_VIOL_ERROR, /*QP*/ + VAPI_LOCAL_WQ_CATASTROPHIC_ERROR, /*QP*/ + VAPI_PATH_MIG_REQ_ERROR, /*QP*/ + VAPI_LOCAL_EEC_CATASTROPHIC_ERROR, /*EEC*/ + VAPI_LOCAL_CATASTROPHIC_ERROR, /*none*/ + VAPI_PORT_ERROR, /*PORT*/ + VAPI_PORT_ACTIVE, /*PORT*/ + VAPI_CLIENT_REREGISTER +} VAPI_event_record_type_t; + + +typedef enum { + VAPI_EV_SYNDROME_NONE, /* no special syndrom for this event */ + VAPI_CATAS_ERR_FW_INTERNAL, + VAPI_CATAS_ERR_EQ_OVERFLOW, + VAPI_CATAS_ERR_MISBEHAVED_UAR_PAGE, + VAPI_CATAS_ERR_UPLINK_BUS_ERR, + VAPI_CATAS_ERR_HCA_DDR_DATA_ERR, + VAPI_CATAS_ERR_INTERNAL_PARITY_ERR, + VAPI_CATAS_ERR_MASTER_ABORT, + VAPI_CATAS_ERR_GO_BIT, + VAPI_CATAS_ERR_CMD_TIMEOUT, + VAPI_CATAS_ERR_FATAL_CR, /* unexpected read from CR space */ + VAPI_CATAS_ERR_FATAL_TOKEN, /* invalid token on command completion */ + VAPI_CATAS_ERR_GENERAL, /* reason is not known */ + VAPI_CQ_ERR_OVERRUN, + VAPI_CQ_ERR_ACCESS_VIOL, + VAPI_CATAS_ERR_FATAL_EXTERNAL /* externally generated artificial fatal error */ +} VAPI_event_syndrome_t; + + /* Event Record */ +typedef struct { + VAPI_event_record_type_t type; /* event record type */ + VAPI_event_syndrome_t syndrome; + union { + VAPI_qp_hndl_t qp_hndl; /* Affiliated QP handle */ + VAPI_srq_hndl_t srq_hndl; /* Affiliated SRQ handle */ + VAPI_eec_hndl_t eec_hndl; /* Affiliated EEC handle */ + VAPI_cq_hndl_t cq_hndl; /* Affiliated CQ handle */ + IB_port_t port_num; /* Affiliated Port number */ + } modifier; +} VAPI_event_record_t; + +/* Async Event Handler */ +typedef void (MT_API *VAPI_async_event_handler_t)( + /*IN*/ VAPI_hca_hndl_t hca_hndl, + /*IN*/ VAPI_event_record_t *event_record_p, + /*IN*/ void* private_data + ); + +typedef u_int32_t EVAPI_async_handler_hndl_t; /* EVAPI async event handler handle */ + + +/* HCA Verbs returns values */ +#define VAPI_ERROR_LIST \ +VAPI_ERROR_INFO( VAPI_OK, = 0 ,"Operation Completed Successfully") \ +VAPI_ERROR_INFO( VAPI_EGEN, =-255,"Generic error") \ +VAPI_ERROR_INFO( VAPI_EFATAL, EMPTY,"Fatal error (Local Catastrophic Error)") \ +VAPI_ERROR_INFO( VAPI_EAGAIN, EMPTY,"Resources temporary unavailable") \ +VAPI_ERROR_INFO( VAPI_ENOMEM, EMPTY,"Not enough memory") \ +VAPI_ERROR_INFO( VAPI_EBUSY, EMPTY,"Resource is busy") \ +VAPI_ERROR_INFO( VAPI_ETIMEOUT, EMPTY,"Operation timedout") \ +VAPI_ERROR_INFO( VAPI_EINTR, EMPTY,"Operation interrupted") \ +VAPI_ERROR_INFO( VAPI_EPERM, EMPTY,"Not enough permissions to perform operation")\ +VAPI_ERROR_INFO( VAPI_ENOSYS, EMPTY,"Not implemented") \ +VAPI_ERROR_INFO( VAPI_ESYSCALL, EMPTY,"Error in an underlying O/S call") \ +VAPI_ERROR_INFO( VAPI_EINVAL_PARAM, EMPTY,"Invalid Parameter") \ +VAPI_ERROR_INFO( VAPI_EINVAL_HCA_HNDL, EMPTY,"Invalid HCA Handle.") \ +VAPI_ERROR_INFO( VAPI_EINVAL_HCA_ID, EMPTY,"Invalid HCA identifier") \ +VAPI_ERROR_INFO( VAPI_EINVAL_COUNTER, EMPTY,"Invalid key counter index") \ +VAPI_ERROR_INFO( VAPI_EINVAL_COUNT_VAL, EMPTY,"Invalid counter value") \ +VAPI_ERROR_INFO( VAPI_EINVAL_PD_HNDL, EMPTY,"Invalid Protection Domain") \ +VAPI_ERROR_INFO( VAPI_EINVAL_RD_UNSUPPORTED, EMPTY,"RD is not supported") \ +VAPI_ERROR_INFO( VAPI_EINVAL_RDD_HNDL, EMPTY,"Invalid Reliable Datagram Domain") \ +VAPI_ERROR_INFO( VAPI_EINVAL_AV_HNDL, EMPTY,"Invalid Address Vector Handle") \ +VAPI_ERROR_INFO( VAPI_E2BIG_WR_NUM, EMPTY,"Max. WR number exceeds capabilities") \ +VAPI_ERROR_INFO( VAPI_E2BIG_SG_NUM, EMPTY,"Max. SG size exceeds capabilities") \ +VAPI_ERROR_INFO( VAPI_EINVAL_SERVICE_TYPE, EMPTY,"Invalid Service Type") \ +VAPI_ERROR_INFO( VAPI_ENOSYS_ATTR, EMPTY,"Unsupported attribute") \ +VAPI_ERROR_INFO( VAPI_EINVAL_ATTR, EMPTY,"Can not change attribute") \ +VAPI_ERROR_INFO( VAPI_ENOSYS_ATOMIC, EMPTY,"Atomic operations not supported") \ +VAPI_ERROR_INFO( VAPI_EINVAL_PKEY_IX, EMPTY,"Pkey index out of range") \ +VAPI_ERROR_INFO( VAPI_EINVAL_PKEY_TBL_ENTRY, EMPTY,"Pkey index point to invalid Pkey") \ +VAPI_ERROR_INFO( VAPI_EINVAL_QP_HNDL, EMPTY,"Invalid QP Handle") \ +VAPI_ERROR_INFO( VAPI_EINVAL_QP_STATE, EMPTY,"Invalid QP State") \ +VAPI_ERROR_INFO( VAPI_EINVAL_SRQ_HNDL, EMPTY,"Invalid SRQ Handle") \ +VAPI_ERROR_INFO( VAPI_ESRQ, EMPTY,"SRQ is in error state") \ +VAPI_ERROR_INFO( VAPI_EINVAL_EEC_HNDL, EMPTY,"Invalid EE-Context Handle") \ +VAPI_ERROR_INFO( VAPI_EINVAL_MIG_STATE, EMPTY,"Invalid Path Migration State") \ +VAPI_ERROR_INFO( VAPI_EINVAL_MTU, EMPTY,"MTU violation") \ +VAPI_ERROR_INFO( VAPI_EINVAL_PORT, EMPTY,"Invalid Port Number") \ +VAPI_ERROR_INFO( VAPI_EINVAL_RNR_NAK_TIMER, EMPTY,"Invalid RNR NAK timer field") \ +VAPI_ERROR_INFO( VAPI_EINVAL_LOCAL_ACK_TIMEOUT, EMPTY,"Invalid Local ACK timeout field") \ +VAPI_ERROR_INFO( VAPI_E2BIG_RAW_DGRAM_NUM, EMPTY,"Number of raw datagrams QP exeeded") \ +VAPI_ERROR_INFO( VAPI_EINVAL_QP_TYPE, EMPTY,"Invalid special QP type") \ +VAPI_ERROR_INFO( VAPI_ENOSYS_RAW, EMPTY,"Raw datagram QP not supported") \ +VAPI_ERROR_INFO( VAPI_EINVAL_CQ_HNDL, EMPTY,"Invalid Completion Queue Handle") \ +VAPI_ERROR_INFO( VAPI_E2BIG_CQ_NUM, EMPTY,"Number of entries in CQ exceeds Cap.") \ +VAPI_ERROR_INFO( VAPI_CQ_EMPTY, EMPTY,"CQ is empty") \ +VAPI_ERROR_INFO( VAPI_EINVAL_VA, EMPTY,"Invalid Virtual Address") \ +VAPI_ERROR_INFO( VAPI_EINVAL_LEN, EMPTY,"Invalid length") \ +VAPI_ERROR_INFO( VAPI_EINVAL_ACL, EMPTY,"Invalid ACL") \ +VAPI_ERROR_INFO( VAPI_EINVAL_PADDR, EMPTY,"Invalid physical address") \ +VAPI_ERROR_INFO( VAPI_EINVAL_OFST, EMPTY,"Invalid offset") \ +VAPI_ERROR_INFO( VAPI_EINVAL_MR_HNDL, EMPTY,"Invalid Memory Region Handle") \ +VAPI_ERROR_INFO( VAPI_EINVAL_MW_HNDL, EMPTY,"Invalid Memory Window Handle") \ +VAPI_ERROR_INFO( VAPI_EINVAL_OP, EMPTY,"Invalid operation") \ +VAPI_ERROR_INFO( VAPI_EINVAL_NOTIF_TYPE, EMPTY,"Invalid completion notification type") \ +VAPI_ERROR_INFO( VAPI_EINVAL_SG_FMT, EMPTY,"Invalid scatter/gather list format") \ +VAPI_ERROR_INFO( VAPI_EINVAL_SG_NUM, EMPTY,"Invalid scatter/gather list length") \ +VAPI_ERROR_INFO( VAPI_E2BIG_MCG_SIZE, EMPTY,"Number of QPs attached to multicast groups exceeded") \ +VAPI_ERROR_INFO( VAPI_EINVAL_MCG_GID, EMPTY,"Invalid Multicast group GID") \ +VAPI_ERROR_INFO( VAPI_COMPLETED, EMPTY,"Poll Loop Completed") \ +VAPI_ERROR_INFO( VAPI_POLL_NEEDED, EMPTY,"Drain CQ with poll_cq") \ +VAPI_ERROR_INFO( VAPI_EOL, EMPTY,"End Of List") \ +VAPI_ERROR_INFO( VAPI_ERROR_MAX, EMPTY,"Dummy max error code : put all error codes before it") \ + +enum { +#define VAPI_ERROR_INFO(A, B, C) A B, + VAPI_ERROR_LIST +#undef VAPI_ERROR_INFO + VAPI_ERROR_DUMMY_CODE +}; + +typedef int32_t VAPI_ret_t; + +typedef struct { + u_int32_t vendor_id; /* Vendor ID */ + u_int32_t vendor_part_id; /* Vendor Part ID */ + u_int32_t hw_ver; /* Hardware Version */ + u_int64_t fw_ver; /* Device's firmware version (device specific) */ +} VAPI_hca_vendor_t; + +/* HCA Port properties (port db) */ +typedef struct { + IB_mtu_t max_mtu; /* Max MTU */ + u_int32_t max_msg_sz; /* Max message size */ + IB_lid_t lid; /* Base IB_LID. */ + u_int8_t lmc; /* IB_LMC for port. */ + IB_port_state_t state; /* Port state */ + IB_port_cap_mask_t capability_mask; + u_int8_t max_vl_num; /* Maximum number of VL supported by this port. */ + VAPI_key_counter_t bad_pkey_counter; /* Bad PKey counter (if supported) */ + VAPI_key_counter_t qkey_viol_counter; /* QKey violation counter */ + IB_lid_t sm_lid; /* IB_LID of subnet manager to be used for this prot. */ + IB_sl_t sm_sl; /* IB_SL to be used in communication with subnet manager. */ + u_int16_t pkey_tbl_len; /* Current size of pkey table */ + u_int16_t gid_tbl_len; /* Current size of GID table */ + VAPI_timeout_t subnet_timeout; /* Subnet Timeout for this port (see PortInfo) */ + u_int8_t initTypeReply; /* optional InitTypeReply value. 0 if not supported */ +} VAPI_hca_port_t; + +/* HCA Capabilities Structure */ +typedef struct { + u_int32_t max_num_qp; /* Maximum Number of QPs supported. */ + u_int32_t max_qp_ous_wr; /* Maximum Number of oustanding WR on any WQ. */ + u_int32_t flags; /* Various flags (VAPI_hca_cap_flags_t) */ + u_int32_t max_num_sg_ent; /* Max num of scatter/gather entries for desc other than RD */ + u_int32_t max_num_sg_ent_rd; /* Max num of scatter/gather entries for RD desc */ + u_int32_t max_num_cq; /* Max num of supported CQs */ + u_int32_t max_num_ent_cq; /* Max num of supported entries per CQ */ + u_int32_t max_num_mr; /* Maximum number of memory region supported. */ + u_int64_t max_mr_size; /* Largest contigous block of memory region in bytes. */ + u_int32_t max_pd_num; /* Maximum number of protection domains supported. */ + u_int32_t page_size_cap; /* Largest page size supported by this HCA */ + IB_port_t phys_port_num; /* Number of physical port of the HCA. */ + u_int16_t max_pkeys; /* Maximum number of partitions supported . */ + IB_guid_t node_guid; /* Node GUID for this hca */ + VAPI_timeout_t local_ca_ack_delay; /* Log2 4.096usec Max. RX to ACK or NAK delay */ + u_int8_t max_qp_ous_rd_atom; /* Maximum number of oust. RDMA read/atomic as target */ + u_int8_t max_ee_ous_rd_atom; /* EE Maximum number of outs. RDMA read/atomic as target */ + u_int8_t max_res_rd_atom; /* Max. Num. of resources used for RDMA read/atomic as target */ + u_int8_t max_qp_init_rd_atom; /* Max. Num. of outs. RDMA read/atomic as initiator */ + u_int8_t max_ee_init_rd_atom; /* EE Max. Num. of outs. RDMA read/atomic as initiator */ + VAPI_atomic_cap_t atomic_cap; /* Level of Atomicity supported */ + u_int32_t max_ee_num; /* Maximum number of EEC supported. */ + u_int32_t max_rdd_num; /* Maximum number of IB_RDD supported */ + u_int32_t max_mw_num; /* Maximum Number of memory windows supported */ + u_int32_t max_raw_ipv6_qp; /* Maximum number of Raw IPV6 QPs supported */ + u_int32_t max_raw_ethy_qp; /* Maximum number of Raw Ethertypes QPs supported */ + u_int32_t max_mcast_grp_num; /* Maximum Number of multicast groups */ + u_int32_t max_mcast_qp_attach_num; /* Maximum number of QP per multicast group */ + u_int32_t max_total_mcast_qp_attach_num;/* Maximum number of QPs which can be attached to a mcast grp */ + u_int32_t max_ah_num; /* Maximum number of address vector handles */ + + /* Extended HCA capabilities */ + + /* FMRs (Fast Memory Regions) */ + u_int32_t max_num_fmr; /* maximum number FMRs */ + u_int32_t max_num_map_per_fmr; /* Maximum number of (re)maps per FMR before + an unmap operation in required */ + /* SRQs (Shared Receive Queues) */ + u_int32_t max_num_srq; /* Maximum number of SRQs. Zero if SRQs are not supported. */ + u_int32_t max_wqe_per_srq; /* Maximum number of WRs per SRQ.*/ + u_int32_t max_srq_sentries; /* Maximum scatter entries per SRQ WQE */ + MT_bool srq_resize_supported;/* Ability to modify the maximum number of WRs per SRQ.*/ + +} VAPI_hca_cap_t; + + +/* HCA Properties for Modify HCA verb */ +typedef struct { + MT_bool reset_qkey_counter; /* TRUE=> reset counter. FALSE=> do nothing */ + /* attributes in Capability Mask of port info that can be modified */ + MT_bool is_sm; + MT_bool is_snmp_tun_sup; + MT_bool is_dev_mgt_sup; + MT_bool is_vendor_cls_sup; + MT_bool is_client_reregister_sup; +} VAPI_hca_attr_t; + + +/* Address Vector (For UD AV as well as address-path in connected QPs */ +typedef struct { + IB_gid_t dgid MT_BYTE_ALIGN(4); /* Destination GID (alignment for IA64) */ + IB_sl_t sl; /* Service Level 4 bits */ + IB_lid_t dlid; /* Destination LID */ + u_int8_t src_path_bits; /* Source path bits 7 bits */ + IB_static_rate_t static_rate; /* Maximum static rate : 6 bits */ + + MT_bool grh_flag; /* Send GRH flag */ + /* For global destination or Multicast address:*/ + u_int8_t traffic_class; /* TClass 8 bits */ + u_int8_t hop_limit; /* Hop Limit 8 bits */ + u_int32_t flow_label; /* Flow Label 20 bits */ + u_int8_t sgid_index; /* SGID index in SGID table */ + + IB_port_t port; /* egress port (valid for UD AV only) */ + /* Following IBTA comment 1567 - should match QP's when UD AV port is enforced */ + +} VAPI_ud_av_t; + + +/* QP Capabilities */ +typedef struct { + u_int32_t max_oust_wr_sq; /* Max outstanding WR on the SQ */ + u_int32_t max_oust_wr_rq; /* Max outstanding WR on the RQ */ + u_int32_t max_sg_size_sq; /* Max scatter/gather descriptor entries on the SQ */ + u_int32_t max_sg_size_rq; /* Max scatter/gather descriptor entries on the RQ */ + u_int32_t max_inline_data_sq; /* Max bytes in inline data on the SQ */ + /* max_inline_data_sq is currently valid only for VAPI_query_qp (ignored for VAPI_create_qp) */ + /* In order to enlarge the max_inline_data_sq capability, enlarge the max_sg_size_sq parameter */ +} VAPI_qp_cap_t; + +/* Queue Pair Creation Attributes */ +typedef struct { + VAPI_cq_hndl_t sq_cq_hndl; /* CQ handle for the SQ */ + VAPI_cq_hndl_t rq_cq_hndl; /* CQ handle for the RQ */ + VAPI_qp_cap_t cap; /* Requested QP capabilities */ + VAPI_rdd_hndl_t rdd_hndl; /* Reliable Datagram Domain handle */ + VAPI_sig_type_t sq_sig_type; /* SQ Signalling type (SIGNAL_ALL_WR, SIGNAL_REQ_WR) */ + VAPI_sig_type_t rq_sig_type; /* RQ Signalling type (SIGNAL_ALL_WR, SIGNAL_REQ_WR) [Mellanox Specific]*/ + VAPI_pd_hndl_t pd_hndl; /* Protection domain handle */ + VAPI_ts_type_t ts_type; /* Transport Services Type */ +} VAPI_qp_init_attr_t; + +typedef struct { + VAPI_srq_hndl_t srq_hndl; /* Set to VAPI_INVAL_SRQ_HNDL when QP is not associated with a SRQ */ +} VAPI_qp_init_attr_ext_t; + +/* Init. the extended attributes structure with the macro below to assure forward compatibility */ +#define VAPI_QP_INIT_ATTR_EXT_T_INIT(qp_ext_attr_p) (qp_ext_attr_p)->srq_hndl= VAPI_INVAL_SRQ_HNDL + +/* Queue Pair Creation Returned actual Attributes */ +typedef struct { + VAPI_qp_num_t qp_num; /* QP number */ + VAPI_qp_cap_t cap; /* Actual QP capabilities */ +} VAPI_qp_prop_t; + + +/* Queue Pair Full Attributes (for modify QP) */ +typedef struct { + VAPI_qp_state_t qp_state; /* QP next state */ + MT_bool en_sqd_asyn_notif; /* Enable SQD affiliated asynchronous event notification */ + MT_bool sq_draining; /* query only - when (qp_state == VAPI_SQD) indicates whether sq is in drain process (TRUE), or drained.*/ + VAPI_qp_num_t qp_num; /* Queue Pair Number. [Mellanox specific] */ + VAPI_rdma_atom_acl_t remote_atomic_flags;/* Enable/Disable RDMA and atomic */ + VAPI_qkey_t qkey; /* QKey (UD/RD only) */ + IB_mtu_t path_mtu; /* Path MTU : 6 bits (connected services only) */ + VAPI_mig_state_t path_mig_state; /* Migration state */ + VAPI_psn_t rq_psn; /* Packet Sequence Number for RQ */ + VAPI_psn_t sq_psn; /* Packet sequence number for SQ */ + u_int8_t qp_ous_rd_atom; /* Maximum number of oust. RDMA read/atomic as target */ + u_int8_t ous_dst_rd_atom; /* Number of outstanding RDMA rd/atomic ops at destination */ + IB_rnr_nak_timer_code_t min_rnr_timer; /* Minimum RNR NAK timer */ + VAPI_qp_cap_t cap; /* QP capabilities max_sq/rq_ous_wr only valid */ + VAPI_qp_num_t dest_qp_num; /* Destination QP number (RC/UC) */ + VAPI_sched_queue_t sched_queue; /* Schedule queue (optional) */ + + /* Primary path (RC/UC only) */ + VAPI_pkey_ix_t pkey_ix; /* Primary PKey index */ + IB_port_t port; /* Primary port */ + VAPI_ud_av_t av; /* Primary remote node address vector (RC/UC QP only)*/ + VAPI_timeout_t timeout; /* Local Ack Timeout (RC only) */ + VAPI_retry_count_t retry_count; /* retry count (RC only) */ + VAPI_retry_count_t rnr_retry; /* RNR retry count (RC only) */ + + /* Alternate path (RC/UC only) */ + VAPI_pkey_ix_t alt_pkey_ix; /* Alternative PKey index */ + IB_port_t alt_port; /* Alternative port */ + VAPI_ud_av_t alt_av; /* Alternate remote node address vector */ + VAPI_timeout_t alt_timeout; /* Local Ack Timeout (RC only) */ +} VAPI_qp_attr_t; + +/* SRQ's attributes */ +typedef struct { + VAPI_pd_hndl_t pd_hndl; /* SRQ's PD. (Ignored on VAPI_modify_srq). */ + u_int32_t max_outs_wr; /* Max. outstanding WQEs */ + u_int32_t max_sentries;/* Max. scatter entries (Ignored on VAPI_modify_srq) */ + u_int32_t srq_limit; /* SRQ's limit (Ignored on VAPI_create_srq) */ +} VAPI_srq_attr_t; + +#define VAPI_SRQ_ATTR_T_INIT(srq_attr_p) { \ + (srq_attr_p)->pd_hndl= VAPI_INVAL_PD_HNDL; \ + (srq_attr_p)->max_outs_wr= 0; \ + (srq_attr_p)->max_sentries= 0; \ + (srq_attr_p)->srq_limit= 0; \ +} + +/* VAPI_modify_srq attributes mask */ +#define VAPI_SRQ_ATTR_LIMIT (1) +#define VAPI_SRQ_ATTR_MAX_OUTS_WR (1<<1) +typedef u_int8_t VAPI_srq_attr_mask_t; + + +/* Physical memory buffer */ +typedef struct { + VAPI_phy_addr_t start; + VAPI_phy_addr_t size; +} VAPI_phy_buf_t; + + +/* Memory Region/Window */ +typedef struct { + VAPI_mrw_type_t type; /* But not VAPI_MW */ + VAPI_lkey_t l_key; + VAPI_rkey_t r_key; + VAPI_virt_addr_t start; + VAPI_size_t size; + VAPI_pd_hndl_t pd_hndl; + VAPI_mrw_acl_t acl; + /* Physical buffers list : for physical memory region only (type==VAPI_MPR) */ + MT_size_t pbuf_list_len; + VAPI_phy_buf_t *pbuf_list_p; + VAPI_phy_addr_t iova_offset; /* Offset of "start" in first buffer */ +} VAPI_mr_t; + +typedef VAPI_mr_t VAPI_mrw_t; /* for backward compatibility */ + +typedef struct +{ + VAPI_lkey_t mr_lkey; /* L-Key of memory region to bind to */ + IB_virt_addr_t start; /* Memory window's virtual byte address */ + VAPI_size_t size; /* Size of memory window in bytes */ + VAPI_mrw_acl_t acl; /* Access Control (R/W permission - local/remote) */ +} VAPI_mw_bind_t; + + +/* Scatter/ Gather Entry */ +typedef struct { + VAPI_virt_addr_t addr; + u_int32_t len; + VAPI_lkey_t lkey; +} VAPI_sg_lst_entry_t; + +/* Send Request Descriptor */ +typedef struct { + VAPI_wr_id_t id; + VAPI_wr_opcode_t opcode; + VAPI_comp_type_t comp_type; + VAPI_sg_lst_entry_t *sg_lst_p; + u_int32_t sg_lst_len; + VAPI_imm_data_t imm_data; + MT_bool fence; + VAPI_ud_av_hndl_t remote_ah; + VAPI_qp_num_t remote_qp; + VAPI_qkey_t remote_qkey; + VAPI_ethertype_t ethertype; + IB_eecn_t eecn; + MT_bool set_se; + VAPI_virt_addr_t remote_addr; + VAPI_rkey_t r_key; + /* atomic_operands */ + u_int64_t compare_add; /* First operand: Used for both "Compare & Swap" and "Fetch & Add" */ + u_int64_t swap; /* Second operand: Used for "Compare & Swap" */ + /* Atomic's "data segment" is the scather list defined in sg_lst_p+sg_lst_len (like RDMA-Read) */ +} VAPI_sr_desc_t; + +/* Receive Request Descriptor */ +typedef struct { + VAPI_wr_id_t id; + VAPI_wr_opcode_t opcode; /* RECEIVE */ + VAPI_comp_type_t comp_type; /* Mellanox Specific */ + VAPI_sg_lst_entry_t *sg_lst_p; + u_int32_t sg_lst_len; +} VAPI_rr_desc_t; + +/* Remote node address for completion entry */ +typedef struct { + VAPI_remote_node_addr_type_t type; + IB_lid_t slid; + IB_sl_t sl; + + union { + VAPI_qp_num_t qp; /* source QP (valid on type==RD,UD) */ + VAPI_ethertype_t ety; /* ethertype (valid on type==RAW_ETY) */ + } qp_ety; + + union { + VAPI_eec_num_t loc_eecn; /* local EEC number (valid on type==RD) */ + u_int8_t dst_path_bits; /* dest path bits (valid on type==UD and RAW* ) */ + } ee_dlid; +} VAPI_remote_node_addr_t; + + +/* Work Completion Descriptor */ +typedef struct { + VAPI_wc_status_t status; + VAPI_wr_id_t id; + IB_wqpn_t local_qp_num; /* QP number this completion was generated for */ + VAPI_cqe_opcode_t opcode; + u_int32_t byte_len; /* Num. of bytes transferred */ + MT_bool imm_data_valid; /* Imm. data indicator */ + VAPI_imm_data_t imm_data; + VAPI_remote_node_addr_t remote_node_addr; + MT_bool grh_flag; + VAPI_pkey_ix_t pkey_ix; /* for GSI */ + /* Vendor specific error syndrome (valid when status != VAPI_SUCCESS) */ + EVAPI_vendor_err_syndrome_t vendor_err_syndrome; + u_int32_t free_res_count; +} VAPI_wc_desc_t; + + + +/**********************************/ +/* Fast memory regions data types */ +/**********************************/ + +typedef struct { + VAPI_pd_hndl_t pd_hndl; + VAPI_mrw_acl_t acl; + u_int32_t max_pages; /* Maximum number of pages that can be mapped using this region * + *(virtual mapping only) */ + u_int8_t log2_page_sz; /* Fixed page size for all maps on a given FMR */ + u_int32_t max_outstanding_maps; /* Maximum maps before invoking unmap for this region */ +} EVAPI_fmr_t; + +typedef struct { + VAPI_virt_addr_t start; /* Mapped memory virtual address */ + VAPI_size_t size; /* Size of memory mapped to this region */ + MT_size_t page_array_len; /* If >0 then no memory locking is done and page table is taken from array below */ + VAPI_phy_addr_t *page_array; /* Page size is set in EVAPI_alloc_fmr by log2_page_sz field */ +} EVAPI_fmr_map_t; + +typedef VAPI_mr_hndl_t EVAPI_fmr_hndl_t; + + +typedef struct { + VAPI_size_t total_mem; + VAPI_size_t free_mem; + VAPI_size_t largest_chunk; +} EVAPI_devmem_info_t; + + + +/*EVAPI_process_local_mad options*/ + +/* enumeration of options (effectively, bits in a bitmask) */ +typedef enum { + EVAPI_MAD_IGNORE_MKEY = 1 /* process_local_mad will not validate the MKEY */ +} EVAPI_proc_mad_opt_enum_t; + +/* Associated "bitmask" type for use in structs and argument lists */ +typedef u_int32_t EVAPI_proc_mad_opt_t; + + +/* profile typedef */ + +typedef struct EVAPI_hca_profile_t { + u_int32_t num_qp; /* min number of QPs to configure */ + u_int32_t num_cq; /* min number of CQs to configure */ + u_int32_t num_pd; /* min number of PDs to configure */ + u_int32_t num_mr; /* min number of mem regions to configure */ + u_int32_t num_mw; /* min number of mem windows to configure */ + u_int32_t max_qp_ous_rd_atom; /* max outstanding read/atomic operations as target PER QP */ + u_int32_t max_mcg; /* max number of multicast groups for this HCA */ + u_int32_t qp_per_mcg; /* max number of QPs per mcg */ + MT_bool require; /* if TRUE, EVAPI_open_hca will fail if cannot use exact profile values + to open the HCA */ +} EVAPI_hca_profile_t; + + +#endif /*H_VAPI_TYPES_H*/ + diff --git a/branches/Ndi/hw/mt23108/vapi/dirs b/branches/Ndi/hw/mt23108/vapi/dirs new file mode 100644 index 00000000..ddf0ed7d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/dirs @@ -0,0 +1,3 @@ +DIRS=\ + user \ + kernel diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/Makefile b/branches/Ndi/hw/mt23108/vapi/kernel/Makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/SOURCES b/branches/Ndi/hw/mt23108/vapi/kernel/SOURCES new file mode 100644 index 00000000..f021cb46 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/SOURCES @@ -0,0 +1,70 @@ +TARGETNAME=mt23108 +TARGETPATH=..\..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=EXPORT_DRIVER + +DLLDEF=mt23108.def + +SOURCES= \ + mtl_common_kl_sources.c \ + mosal_kl_sources.c \ + mpga_kl_sources.c \ + hh_kl_sources.c \ + thh_kl_sources.c \ + thhul_kl_sources.c \ + vapi_common_kl_sources.c \ + tdriver_sources.c \ + mt23108.rc + +MT_HOME=.. +MDT_HOME=$(MT_HOME)\mlxsys\os_dep\win\tdriver +MOSAL_HOME=$(MT_HOME)\mlxsys\mosal\os_dep\win +THH_HOME=$(MT_HOME)\hca\hcahal\tavor + +INCLUDES=.;..\..\kernel; \ + ..\..\..\..\inc;..\..\..\..\inc\kernel; \ + $(MT_HOME)\mlxsys\tools; \ + $(MT_HOME)\tavor_arch_db; \ + $(MT_HOME)\Hca\verbs; \ + $(MT_HOME)\Hca\verbs\common; \ + $(MT_HOME)\mlxsys\mpga\os_dep\win; \ + $(MT_HOME)\mlxsys\mpga; \ + $(MT_HOME)\mlxsys\mtl_types; \ + $(MT_HOME)\mlxsys\mtl_types\win; \ + $(MT_HOME)\mlxsys\mtl_types\win\win; \ + $(MT_HOME)\mlxsys\mtl_common; \ + $(MT_HOME)\mlxsys\mtl_common\os_dep\win; \ + $(MT_HOME)\mlxsys\mosal; \ + $(MT_HOME)\mlxsys\mosal\os_dep\win; \ + $(MT_HOME)\Hca\hcahal; \ + $(THH_HOME); \ + $(THH_HOME)\util; \ + $(THH_HOME)\thh_hob; \ + $(THH_HOME)\cmdif; \ + $(THH_HOME)\eventp; \ + $(THH_HOME)\uar; \ + $(THH_HOME)\mrwm; \ + $(THH_HOME)\thh_cqm; \ + $(THH_HOME)\udavm; \ + $(THH_HOME)\mcgm; \ + $(THH_HOME)\ddrmm; \ + $(THH_HOME)\thh_qpm; \ + $(THH_HOME)\thh_srqm; \ + $(THH_HOME)\uldm; \ + $(THH_HOME)\thhul_hob; \ + $(THH_HOME)\thhul_pdm; \ + $(THH_HOME)\thhul_cqm; \ + $(THH_HOME)\thhul_qpm; \ + $(THH_HOME)\thhul_mwm; \ + $(THH_HOME)\thhul_srqm; \ + $(THH_HOME)\os_dep\win; \ + $(MDT_HOME); + +C_DEFINES=$(C_DEFINES) -DDRIVER -D__MSC__ \ + -D__KERNEL__ -DMT_KERNEL -D__WIN__ -D__LITTLE_ENDIAN -DMT_LITTLE_ENDIAN \ + -DMAX_ERROR=4 -DIVAPI_THH \ + -DMTL_MODULE=MDT -DUSE_MOSAL -DMT_BUILD_LIB -D__DLL_EXPORTS__ \ + -DUSE_KMUTEX -DSIMULTANUOUS_DPC -DMAP_PHYS_ADDR_VIA_KERNEL + +TARGETLIBS=$(TARGETPATH)\*\complib.lib + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/hh_kl_sources.c b/branches/Ndi/hw/mt23108/vapi/kernel/hh_kl_sources.c new file mode 100644 index 00000000..9988a969 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/hh_kl_sources.c @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Include all files that are not in the current directory. + */ + +#include "../hca/hcahal/hh.c" +#include "../hca/hcahal/hh_common.c" diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/mdmsg.h b/branches/Ndi/hw/mt23108/vapi/kernel/mdmsg.h new file mode 100644 index 00000000..8174fb51 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/mdmsg.h @@ -0,0 +1,94 @@ +/*++ +==================================================================================== +Copyright (c) 2001 Mellanox Technologies + +Module Name: + + MdMsg.mc + +Abstract: + + MDDL Driver event log messages + +Authors: + + Leonid Keller + +Environment: + + User Mode . + +===================================================================================== +--*/ + +// +// Values are 32 bit values layed out as follows: +// +// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +---+-+-+-----------------------+-------------------------------+ +// |Sev|C|R| Facility | Code | +// +---+-+-+-----------------------+-------------------------------+ +// +// where +// +// Sev - is the severity code +// +// 00 - Success +// 01 - Informational +// 10 - Warning +// 11 - Error +// +// C - is the Customer code flag +// +// R - is a reserved bit +// +// Facility - is the facility code +// +// Code - is the facility's status code +// +// +// Define the facility codes +// +#define FACILITY_RPC_STUBS 0x3 +#define FACILITY_RPC_RUNTIME 0x2 +#define FACILITY_MD_ERROR_CODE 0x7 +#define FACILITY_IO_ERROR_CODE 0x4 + + +// +// Define the severity codes +// +#define STATUS_SEVERITY_WARNING 0x2 +#define STATUS_SEVERITY_SUCCESS 0x0 +#define STATUS_SEVERITY_INFORMATIONAL 0x1 +#define STATUS_SEVERITY_ERROR 0x3 + + +// +// MessageId: MD_EVENT_LOG_LOAD_OK +// +// MessageText: +// +// The Mellanox InfiniHost Driver has loaded Successfully. +// +#define MD_EVENT_LOG_LOAD_OK ((NTSTATUS)0x40070001L) + +// +// MessageId: MD_EVENT_LOG_LOAD_ERROR +// +// MessageText: +// +// The Mellanox InfiniHost Driver has failed to load +// +#define MD_EVENT_LOG_LOAD_ERROR ((NTSTATUS)0xC0070002L) + +// +// MessageId: MD_EVENT_LOG_LOAD_ERROR_FW +// +// MessageText: +// +// The Mellanox InfiniHost Driver has failed to load: THH_add_hca failed. Check FW. +// +#define MD_EVENT_LOG_LOAD_ERROR_FW ((NTSTATUS)0xC0070003L) + diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/mosal_kl_sources.c b/branches/Ndi/hw/mt23108/vapi/kernel/mosal_kl_sources.c new file mode 100644 index 00000000..3ddf0512 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/mosal_kl_sources.c @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Include all files that are not in the current directory. + */ + +#include "../mlxsys/mosal/mosal_gen_nos.c" +#include "../mlxsys/mosal/os_dep/win/mosal_bus.c" +#include "../mlxsys/mosal/os_dep/win/mosal_timer.c" +#include "../mlxsys/mosal/os_dep/win/mosal_mem.c" +#include "../mlxsys/mosal/os_dep/win/mosal_que.c" +#include "../mlxsys/mosal/os_dep/win/mosal_sync.c" +#include "../mlxsys/mosal/os_dep/win/mosal_gen.c" +#include "../mlxsys/mosal/os_dep/win/mosal_k2u_cbk.c" +#include "../mlxsys/mosal/os_dep/win/mosal_mlock.c" +#include "../mlxsys/mosal/os_dep/win/mosal_util.c" +#include "../mlxsys/mosal/os_dep/win/mosal_thread.c" +#include "../mlxsys/mosal/os_dep/win/mosal_iobuf.c" +#include "../mlxsys/mosal/os_dep/win/mosal_ntddk.c" diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/mpga_kl_sources.c b/branches/Ndi/hw/mt23108/vapi/kernel/mpga_kl_sources.c new file mode 100644 index 00000000..1a3a2017 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/mpga_kl_sources.c @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Include all files that are not in the current directory. + */ + +#include "../mlxsys/mpga/mpga.c" +#include "../mlxsys/mpga/packet_append.c" +#include "../mlxsys/mpga/internal_functions.c" +#include "../mlxsys/mpga/packet_utilities.c" +#include "../mlxsys/mpga/nMPGA_packet_append.c" +#include "../mlxsys/mpga/nMPGA.c" diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/mt23108.def b/branches/Ndi/hw/mt23108/vapi/kernel/mt23108.def new file mode 100644 index 00000000..029185d5 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/mt23108.def @@ -0,0 +1,7 @@ +LIBRARY mt23108.sys + +EXPORTS +; DllInitialize and DllUnload must be exported for the OS reference counting to +; work, and must be private for the compiler to accept them. +DllInitialize private +DllUnload private diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/mt23108.rc b/branches/Ndi/hw/mt23108/vapi/kernel/mt23108.rc new file mode 100644 index 00000000..63a513eb --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/mt23108.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Tavor HCA Function Driver (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Tavor HCA Function Driver" +#endif + +#define VER_INTERNALNAME_STR "mt23108.sys" +#define VER_ORIGINALFILENAME_STR "mt23108.sys" + +#include diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/mtl_common_kl_sources.c b/branches/Ndi/hw/mt23108/vapi/kernel/mtl_common_kl_sources.c new file mode 100644 index 00000000..c0cc31df --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/mtl_common_kl_sources.c @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Include all files that are not in the current directory. + */ + +#include "../mlxsys/mtl_common/mtl_common.c" diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/tdriver_sources.c b/branches/Ndi/hw/mt23108/vapi/kernel/tdriver_sources.c new file mode 100644 index 00000000..93c559fa --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/tdriver_sources.c @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Include all files that are not in the current directory. + */ + +#include "../mlxsys/os_dep/win/tdriver/Md.c" +#include "../mlxsys/os_dep/win/tdriver/MdDbg.c" +#include "../mlxsys/os_dep/win/tdriver/MdIoctl.c" +#include "../mlxsys/os_dep/win/tdriver/MdPnp.c" +#include "../mlxsys/os_dep/win/tdriver/MdPwr.c" +#include "../mlxsys/os_dep/win/tdriver/MdRdWr.c" +#include "../mlxsys/os_dep/win/tdriver/MdUtil.c" +#include "../mlxsys/os_dep/win/tdriver/MdConf.c" +#include "../mlxsys/os_dep/win/tdriver/MdPci.c" diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/thh_kl_sources.c b/branches/Ndi/hw/mt23108/vapi/kernel/thh_kl_sources.c new file mode 100644 index 00000000..b175f388 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/thh_kl_sources.c @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Include all files that are not in the current directory. + */ +#include "../hca/hcahal/tavor/thh_init.c" +#include "../hca/hcahal/tavor/thh_hob/thh_hob.c" +#include "../hca/hcahal/tavor/cmdif/cmdif.c" +#include "../hca/hcahal/tavor/cmdif/cmds_wrap.c" +#include "../hca/hcahal/tavor/eventp/eventp.c" +#include "../hca/hcahal/tavor/eventp/event_irqh.c" +#include "../hca/hcahal/tavor/uar/uar.c" +#include "../hca/hcahal/tavor/ddrmm/tddrmm.c" +#include "../hca/hcahal/tavor/uldm/thh_uldm.c" +#include "../hca/hcahal/tavor/mrwm/tmrwm.c" +#include "../hca/hcahal/tavor/thh_cqm/tcqm.c" +#include "../hca/hcahal/tavor/thh_qpm/tqpm.c" +#include "../hca/hcahal/tavor/thh_srqm/thh_srqm.c" +#include "../hca/hcahal/tavor/udavm/udavm.c" +#include "../hca/hcahal/tavor/mcgm/mcgm.c" +#include "../hca/hcahal/tavor/os_dep/win/thh_mod_obj.c" +#include "../hca/hcahal/tavor/util/tlog2.c" +#include "../hca/hcahal/tavor/util/epool.c" +#include "../hca/hcahal/tavor/util/sm_mad.c" +#include "../hca/hcahal/tavor/util/extbuddy.c" diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/thhul_kl_sources.c b/branches/Ndi/hw/mt23108/vapi/kernel/thhul_kl_sources.c new file mode 100644 index 00000000..8321fc78 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/thhul_kl_sources.c @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Include all files that are not in the current directory. + */ +#include "../hca/hcahal/tavor/thhul_hob/thhul_hob.c" +#include "../hca/hcahal/tavor/thhul_pdm/thhul_pdm.c" +#include "../hca/hcahal/tavor/thhul_cqm/thhul_cqm.c" +#include "../hca/hcahal/tavor/thhul_srqm/thhul_srqm.c" +#include "../hca/hcahal/tavor/thhul_qpm/thhul_qpm.c" +#include "../hca/hcahal/tavor/thhul_mwm/thhul_mwm.c" diff --git a/branches/Ndi/hw/mt23108/vapi/kernel/vapi_common_kl_sources.c b/branches/Ndi/hw/mt23108/vapi/kernel/vapi_common_kl_sources.c new file mode 100644 index 00000000..9f99a215 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/kernel/vapi_common_kl_sources.c @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Include all files that are not in the current directory. + */ + +#include "../hca/verbs/common/vip_array.c" +#include "../hca/verbs/common/vip_hash.c" +#include "../hca/verbs/common/vip_cirq.c" +#include "../hca/verbs/common/vip_delay_unlock.c" +#include "../hca/verbs/common/vapi_common.c" diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal.h new file mode 100644 index 00000000..f89d951b --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_H +#define H_MOSAL_H + +#ifdef __cplusplus +extern "C" { +#endif + +/* ----- common stuff ----- */ +#include + +/* ----- OS-dependent implementation ----- */ +#ifndef MT_KERNEL +#include +/* Initialization of user level library. + * For dynamic linking, invoked via _init/DllMain. + * For static linking, user/application (or another init point) must invoke this + * function before any invocation of of MOSAL functions. + */ +extern void MOSAL_user_lib_init(void); +#endif + +#include +#include + +#include +#include +#include + + +#include + + +#include +//#include + + +/* ----- mosal OS-specific types ----- */ +#include + +/* ----- bus access ----- */ +#include + +/* ----- interrupts, DPC and timers ----- */ +#include + +/* ----- memory services ----- */ +#include +#include +#include +#include + +/* ----- queue management ----- */ +#include + +/* ----- synchronization routines ----- */ +#include + + +/* ----- thread routines ----- */ +#include + +/* ----- socket routines ----- */ +//#include + + +#if !defined(VXWORKS_OS) +#include +#endif + + +#ifndef MT_KERNEL_ONLY + +/* callback management */ +#include + +#endif + +/* ----- driver services ----- */ +#ifdef __LINUX__ +#include +#endif + +/* ----- general services ----- */ +#include +#ifdef __WIN__ +#include +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* H_MOSAL_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_gen.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_gen.h new file mode 100644 index 00000000..80a0058d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_gen.h @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_GEN_H +#define H_MOSAL_GEN_H + + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_init + * + * Description: + * Init of mosal + * + * Parameters: + * major(IN) unsigned int + * device major number + * khz(IN) cpu frequency in khz + * + * Returns: + * call_result_t + * + ******************************************************************************/ +call_result_t MOSAL_init(unsigned int major); + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_cleanup + * + * Description: + * Cleanup of mosal niternal struct + * + * Parameters: + * + * Returns: + * call_result_t + * + ******************************************************************************/ +void MOSAL_cleanup(void); + +/****************************************************************************** + * Function: MOSAL_is_privileged + * + * Description: Check if current processing context is privileged. + * Use this function whenever operation should be limited to privileged + * users, e.g., root/administrator etc. The function gets no parameters + * as it relies on context of current process/thread. On systems with no + * levels of flat hierarchy, this should always return TRUE. + * + * Parameters: (none). + * + * Returns: + * MT_bool + * TRUE if privileged user/context, FALSE if not. + * + ******************************************************************************/ +MT_bool MOSAL_is_privileged(void); + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_getpid + * + * Description: + * Wrapper to getpid + * + * Parameters: (none) + * + * Returns: + * MOSAL_pid_t + * + ******************************************************************************/ +MOSAL_pid_t MOSAL_getpid(void); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_setpid + * + * Description: + * set current pid + * + * Parameters: + * pid(IN) MOSAL_pid_t + * points to the list head + * + * Returns: + * + ******************************************************************************/ +void MOSAL_setpid(MOSAL_pid_t pid); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_letobe64 + * + * Description: + * ULONG conversion from little endian to big endian + * + * Parameters: + * value(IN) u_int64_t + * + * Returns: + * u_int64_t + * + ******************************************************************************/ +u_int64_t MOSAL_letobe64( u_int64_t value ); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_letobe32 + * + * Description: + * ULONG conversion from little endian to big endian + * + * Parameters: + * value(IN) u_int32_t + * + * Returns: + * u_int32_t + * + ******************************************************************************/ +u_int32_t MOSAL_letobe32( u_int32_t value ); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_letobe16 + * + * Description: + * USHORT conversion from little endian to big endian + * + * Parameters: + * value(IN) u_int16_t + * + * Returns: + * u_int16_t + * + ******************************************************************************/ +u_int16_t MOSAL_letobe16( u_int16_t value ); + + + + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_betole32 + * + * Description: + * convert from big endian representation to little endian representation + * + * Parameters: + * be(IN) number to be transformed + * + * Returns: + * u_int32_t + * + ******************************************************************************/ +u_int32_t MOSAL_betole32(u_int32_t be); + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * + * Un-protected double-linked list management + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + +/* Doubly linked list structure. Can be used as either a list head, or as link words. */ + + + + +/* Doubly-linked list manipulation routines. Implemented as macros */ +/* but logically these are procedures. */ + + + +/* VOID MOSAL_dlist_init_head( PLIST_ENTRY ListHead ); */ + +#define MOSAL_dlist_init_head(ListHead) ((ListHead)->Flink = (ListHead)->Blink = (ListHead)) + + +/* BOOLEAN MOSAL_dlist_is_empty( PLIST_ENTRY ListHead ); */ + +#define MOSAL_dlist_is_empty(ListHead) ((ListHead)->Flink == (ListHead)) + + +/* PLIST_ENTRY MOSAL_dlist_remove_head(PLIST_ENTRY ListHead); */ + +/* !!! Usage: only 'el = MOSAL_dlist_remove_head(...)' */ +/* and NOT return MOSAL_dlist_remove_head(...) or if (MOSAL_dlist_remove_head(...) == ...) !!! */ + +#define MOSAL_dlist_remove_head(ListHead) (ListHead)->Flink; {MOSAL_dlist_remove_entry((ListHead)->Flink)} + + +/* PLIST_ENTRY MOSAL_dlist_remove_tail(PLIST_ENTRY ListHead); */ + +/* !!! Usage: only 'el = MOSAL_dlist_remove_tail(...)' */ +/* and NOT return MOSAL_dlist_remove_tail(...) or if (MOSAL_dlist_remove_tail(...) == ...) !!! */ + +#define MOSAL_dlist_remove_tail(ListHead) (ListHead)->Blink; {MOSAL_dlist_remove_entry((ListHead)->Blink)} + + +/* VOID MOSAL_dlist_remove_entry( PLIST_ENTRY Entry ); */ + + +#define MOSAL_dlist_remove_entry(Entry) {\ + PLIST_ENTRY _EX_Blink;\ + PLIST_ENTRY _EX_Flink;\ + _EX_Flink = (Entry)->Flink;\ + _EX_Blink = (Entry)->Blink;\ + _EX_Blink->Flink = _EX_Flink;\ + _EX_Flink->Blink = _EX_Blink;\ + } + + +/* VOID MOSAL_dlist_insert_tail( PLIST_ENTRY ListHead, PLIST_ENTRY Entry ); */ + + +#define MOSAL_dlist_insert_tail(ListHead,Entry) {\ + PLIST_ENTRY _EX_Blink;\ + PLIST_ENTRY _EX_ListHead;\ + _EX_ListHead = (ListHead);\ + _EX_Blink = _EX_ListHead->Blink;\ + (Entry)->Flink = _EX_ListHead;\ + (Entry)->Blink = _EX_Blink;\ + _EX_Blink->Flink = (Entry);\ + _EX_ListHead->Blink = (Entry);\ + } + + +/* VOID MOSAL_dlist_insert_head( PLIST_ENTRY ListHead, PLIST_ENTRY Entry ); */ + + +#define MOSAL_dlist_insert_head(ListHead,Entry) {\ + PLIST_ENTRY _EX_Flink;\ + PLIST_ENTRY _EX_ListHead;\ + _EX_ListHead = (ListHead);\ + _EX_Flink = _EX_ListHead->Flink;\ + (Entry)->Flink = _EX_Flink;\ + (Entry)->Blink = _EX_ListHead;\ + _EX_Flink->Blink = (Entry);\ + _EX_ListHead->Flink = (Entry);\ + } + + +#ifdef MT_KERNEL +typedef enum {MOSAL_IN_ISR=1, MOSAL_IN_DPC, MOSAL_IN_TASK} MOSAL_exec_ctx_t; + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_get_exec_ctx + * + * Description: + * get execution context of the current function (e.g. proccess or interrupt) + * + * Parameters: + * + * Returns: + * MT_context_t + * + ******************************************************************************/ +MOSAL_exec_ctx_t MOSAL_get_exec_ctx(void); + + +#endif /* MT_KERNEL */ + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_gen_nos.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_gen_nos.c new file mode 100644 index 00000000..8f326919 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_gen_nos.c @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include + +#ifdef __DARWIN__ +#error This function is unsupported in Darwin +#endif + +call_result_t MOSAL_PCI_get_cfg_hdr(u_int8_t bus, u_int8_t dev_func, MOSAL_PCI_cfg_hdr_t * cfg_hdr) +{ + call_result_t ret; + + u_int8_t offset; + u_int32_t dword; + + MOSAL_PCI_hdr_type0_t * t0 = 0; + MOSAL_PCI_hdr_type1_t * t1 = 0; + + for (offset = 0; offset < sizeof(MOSAL_PCI_cfg_hdr_t); offset += sizeof(u_int32_t)) + { + ret = MOSAL_PCI_read_config_dword(bus,dev_func, offset, &dword); + if (ret != MT_OK) + { + MTL_ERROR2("Failed to read from bus=%d devfun=%d\n", bus, dev_func); + return(ret); + } + + *((u_int32_t*)cfg_hdr + offset/sizeof(u_int32_t)) = dword; + } + + if (cfg_hdr->type0.header_type == MOSAL_PCI_HEADER_TYPE0) + { + t0 = &cfg_hdr->type0; + + MTL_DEBUG4 ("vendor ID = 0x%.4x\n", t0->vid); + MTL_DEBUG4 ("device ID = 0x%.4x\n", t0->devid); + MTL_DEBUG4 ("command register = 0x%.4x\n", t0->cmd); + MTL_DEBUG4 ("status register = 0x%.4x\n", t0->status); + MTL_DEBUG4 ("revision ID = 0x%.2x\n", t0->revid); + MTL_DEBUG4 ("class code = 0x%.2x\n", t0->class_code); + MTL_DEBUG4 ("sub class code = 0x%.2x\n", t0->subclass); + MTL_DEBUG4 ("programming interface = 0x%.2x\n", t0->progif); + MTL_DEBUG4 ("cache line = 0x%.2x\n", t0->cache_line); + MTL_DEBUG4 ("latency time = 0x%.2x\n", t0->latency); + MTL_DEBUG4 ("header type = 0x%.2x\n", t0->header_type); + MTL_DEBUG4 ("BIST = 0x%.2x\n", t0->bist); + MTL_DEBUG4 ("base address 0 = 0x%.8x\n", t0->base0); + MTL_DEBUG4 ("base address 1 = 0x%.8x\n", t0->base1); + MTL_DEBUG4 ("base address 2 = 0x%.8x\n", t0->base2); + MTL_DEBUG4 ("base address 3 = 0x%.8x\n", t0->base3); + MTL_DEBUG4 ("base address 4 = 0x%.8x\n", t0->base4); + MTL_DEBUG4 ("base address 5 = 0x%.8x\n", t0->base5); + MTL_DEBUG4 ("cardBus CIS pointer = 0x%.8x\n", t0->cis); + MTL_DEBUG4 ("sub system vendor ID = 0x%.4x\n", t0->sub_vid); + MTL_DEBUG4 ("sub system ID = 0x%.4x\n", t0->sub_sysid); + MTL_DEBUG4 ("expansion ROM base address = 0x%.8x\n", t0->rom_base); + MTL_DEBUG4 ("interrupt line = 0x%.2x\n", t0->int_line); + MTL_DEBUG4 ("interrupt pin = 0x%.2x\n", t0->int_pin); + MTL_DEBUG4 ("min Grant = 0x%.2x\n", t0->min_grant); + MTL_DEBUG4 ("max Latency = 0x%.2x\n", t0->max_latency); + + } else { + + t1 = &cfg_hdr->type1; + + MTL_DEBUG4 ("vendor ID = 0x%.4x\n", t1->vid); + MTL_DEBUG4 ("device ID = 0x%.4x\n", t1->devid); + MTL_DEBUG4 ("command register = 0x%.4x\n", t1->cmd); + MTL_DEBUG4 ("status register = 0x%.4x\n", t1->status); + MTL_DEBUG4 ("revision ID = 0x%.2x\n", t1->revid); + MTL_DEBUG4 ("class code = 0x%.2x\n", t1->class_code); + MTL_DEBUG4 ("sub class code = 0x%.2x\n", t1->sub_class); + MTL_DEBUG4 ("programming interface = 0x%.2x\n", t1->progif); + MTL_DEBUG4 ("cache line = 0x%.2x\n", t1->cache_line); + MTL_DEBUG4 ("latency time = 0x%.2x\n", t1->latency); + MTL_DEBUG4 ("header type = 0x%.2x\n", t1->header_type); + MTL_DEBUG4 ("BIST = 0x%.2x\n", t1->bist); + MTL_DEBUG4 ("base address 0 = 0x%.8x\n", t1->base0); + MTL_DEBUG4 ("base address 1 = 0x%.8x\n", t1->base1); + MTL_DEBUG4 ("primary bus number = 0x%.2x\n", t1->pri_bus); + MTL_DEBUG4 ("secondary bus number = 0x%.2x\n", t1->sec_bus); + MTL_DEBUG4 ("subordinate bus number = 0x%.2x\n", t1->sub_bus); + MTL_DEBUG4 ("secondary latency timer = 0x%.2x\n", t1->sec_latency); + MTL_DEBUG4 ("IO base = 0x%.2x\n", t1->iobase); + MTL_DEBUG4 ("IO limit = 0x%.2x\n", t1->iolimit); + MTL_DEBUG4 ("secondary status = 0x%.4x\n", t1->sec_status); + MTL_DEBUG4 ("memory base = 0x%.4x\n", t1->mem_base); + MTL_DEBUG4 ("memory limit = 0x%.4x\n", t1->mem_limit); + MTL_DEBUG4 ("prefetch memory base = 0x%.4x\n", t1->pre_base); + MTL_DEBUG4 ("prefetch memory limit = 0x%.4x\n", t1->pre_limit); + MTL_DEBUG4 ("prefetch memory base upper = 0x%.8x\n", t1->pre_base_upper); + MTL_DEBUG4 ("prefetch memory limit upper = 0x%.8x\n", t1->pre_limit_upper); + MTL_DEBUG4 ("IO base upper 16 bits = 0x%.4x\n", t1->io_base_upper); + MTL_DEBUG4 ("IO limit upper 16 bits = 0x%.4x\n", t1->io_limit_upper); + MTL_DEBUG4 ("expansion ROM base address = 0x%.8x\n", t1->rom_base); + MTL_DEBUG4 ("interrupt line = 0x%.2x\n", t1->int_line); + MTL_DEBUG4 ("interrupt pin = 0x%.2x\n", t1->int_pin); + MTL_DEBUG4 ("bridge control = 0x%.4x\n", t1->control); + + } + + return(MT_OK); + +} diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_i2c.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_i2c.h new file mode 100644 index 00000000..6481e261 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_i2c.h @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_I2C_H +#define H_MOSAL_I2C_H + +#include + + +#define I2C_MAX_DEV_NAME 17 /* TBD: MOSAL name? */ + + +/* Forward declaration of device handle */ +typedef struct MOSAL_i2c_dev_st * MOSAL_i2c_devh_t; + + +/* Forwad declaration of device */ +typedef struct MOSAL_i2c_dev_st MOSAL_i2c_dev_t; + + + +/*************************************************************************************************** + * Function: MOSAL_I2C_master_receive + * + * Description: master receive data + * + * Parameters: + * dev_h (IN) Device handle. + * slv_addr (IN) The address of the receipent + * buffer (OUT) (LEN @count) Pointer where to put data received + * count (IN) Number of bytes to be receive + * bytes_received_p (OUT) Pointer to var to receive number of bytes received + * sendSTOP (IN) Send stop after transaction + * key (IN) Key to autenticate the caller. + * + * Returns: MT_OK success + * MT_ERROR failure to initialize + * + * Notes: + * + **************************************************************************************************/ +call_result_t MOSAL_I2C_master_receive(MOSAL_i2c_devh_t dev_h, u_int8_t slv_addr, + void *buffer, u_int16_t count, u_int32_t *bytes_received_p, MT_bool sendSTOP, u_int32_t key); + +/*************************************************************************************************** + * + * Function(MOSAL): MOSAL_I2C_master_transmit + * + * Description: master transmit data + * + * + * Parameters: + * dev_h (IN) I2C device handle. + * slv_addr (IN) The address of the receipent. + * buffer (IN) (LEN @count) Pointer to data to be transmitted. + * count (IN) Number of bytes to be sent. + * bytes_sent_p (OUT) Pointer to var to receive number of bytes sent + * sendSTOP (IN) Flag when 1 send STOP - otherwise don't send STOP + * key_p (IN/OUT) Pointer to key. + * + * Returns: MT_OK success + * MT_ERROR failure to initialize + * + *****************************************************************************************/ +call_result_t MOSAL_I2C_master_transmit(MOSAL_i2c_devh_t dev_h, unsigned char slv_addr, void *buffer, + int count, int *bytes_sent_p, MT_bool sendSTOP, u_int32_t * const key_p); + +/*************************************************************************************************** + * Function: MOSAL_I2C_send_stop + * + * Description: send STOP to the i2c bus + * + * Parameters: + * dev_h (IN) device handle + * key (IN) pointer to a key previously received from master transmit + * + * Returns: MT_OK success + * MT_ERROR failure to initialize + * + * Notes: + * + **************************************************************************************************/ +call_result_t MOSAL_I2C_send_stop(MOSAL_i2c_devh_t dev_h, const u_int32_t key); + + + +/***************************************************************************************** + * Function: MOSAL_I2C_read + * + * + * Description: Read from i2c device. + * + * + * Parameters: + * dev_h (IN) Handle of device to use. + * i2c_addr (IN) I2C bus address of target device. + * addr (IN) Offset in target device for read start. + * data (OUT) (LEN @length) Buffer in local memory where data read will be written. + * length (IN) Number of bytes to read. + * + * + * Returns: MT_OK success + * MT_ERROR failed to read + * + *****************************************************************************************/ +call_result_t MOSAL_I2C_read(MOSAL_i2c_devh_t dev_h, u_int16_t i2c_addr, u_int32_t addr, + u_int8_t* data, u_int32_t length); + + +/***************************************************************************************** + * Function: MOSAL_I2C_write + * + * Description: write to i2c device. + * + * + * Parameters: + * dev_h (IN) Handle of device to use. + * i2c_addr (IN) I2C bus address of target device. + * addr (IN) Offset in target device for write start. + * data (IN) (LEN @length) Buffer in local memory where data to be written is found + * length (IN) Number of bytes to read. + * + * + * Returns: MT_OK success + * MT_ERROR failed to write + * + *****************************************************************************************/ +call_result_t MOSAL_I2C_write(MOSAL_i2c_devh_t dev_h, u_int16_t i2c_addr, + u_int32_t addr, u_int8_t *data, u_int32_t length); + + +/***************************************************************************************** + * Function: MOSAL_I2C_open + * + * Description: Open I2C device returning handle to it. + * + * + * Parameters: + * name (IN) (LEN s) Device name. + * dev_h (OUT) Handle of device to use. + * + * + * Returns: MT_OK success + * MT_ENODEV not device register for this name + * MT_ERROR generic error + * + *****************************************************************************************/ +call_result_t MOSAL_I2C_open(char * name, MOSAL_i2c_devh_t * dev_h); + + +/***************************************************************************************** + * Function: MOSAL_I2C_close + * + * Description: close a previously opened device. + * + * + * Parameters: + * dev_h (IN) Handle of device to use. + * + * + * Returns: MT_OK success + * MT_ENODEV Inavlid handle + * MT_ERROR generic error + * + *****************************************************************************************/ +call_result_t MOSAL_I2C_close(MOSAL_i2c_devh_t dev_h); + + + + + +/***************************************************************************************** + * + * Function (Kernel only): MOSAL_I2C_add_dev + * + * Description: Register an i2c device. + * + * Parameters: + * devst (IN) Device struture. + * devh (OUT) Return device handle. + * + * Returns: MT_OK + * MT_ENOMEM + * + * + * Notes: this function should be called only from the modules registering the device + * inside the kernel. + *****************************************************************************************/ +call_result_t MOSAL_I2C_add_dev(MOSAL_i2c_dev_t * devst, MOSAL_i2c_devh_t *devh); + +/***************************************************************************************** + * + * Function (Kernel only): MOSAL_I2C_del_dev + * + * Description: Deregister I2C device. + * + * Parameters: + * devh (IN) Device struture. + * + * Returns: MT_OK + * MT_EINVAL - invalid device. + * + * Notes: this function should be called only from the modules registering the device + * inside the kernel. + *****************************************************************************************/ +call_result_t MOSAL_I2C_del_dev(MOSAL_i2c_devh_t devh); + + +#endif /* H_MOSAL_I2C_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_iobuf.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_iobuf.h new file mode 100644 index 00000000..23a80b44 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_iobuf.h @@ -0,0 +1,282 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __MOSAL_IOBUF_H_ +#define __MOSAL_IOBUF_H_ + +typedef enum { + MOSAL_PERM_READ = 1, + MOSAL_PERM_WRITE = 1<<1 +} +MOSAL_mem_perm_enum_t; + +typedef u_int32_t MOSAL_mem_perm_t; + +typedef struct { + MT_virt_addr_t va; /* virtual address of the buffer */ + MT_size_t size; /* size in bytes of the buffer */ + MOSAL_prot_ctx_t prot_ctx; + u_int32_t nr_pages; + MT_u_int_t page_shift; + u_int32_t os_dep_flags; +} +MOSAL_iobuf_props_t; + + +typedef struct mosal_iobuf_iter_st MOSAL_iobuf_iter_t; +typedef struct mosal_iobuf_st *MOSAL_iobuf_t; + +/**** Note **** + Other platform must define specific flags in the same manner as bellow + and consume free numbers. Wheather flags may or may not be or'ed must + be specified specifically */ +/*=== Linux specific flags ===*/ +/* The following flags MAY NOT be or'ed */ +#define MOSAL_IOBUF_LNX_FLG_MARK_ALL_DONTCOPY ((u_int32_t)1<<0) +#define MOSAL_IOBUF_LNX_FLG_MARK_FULL_PAGES_DONTCOPY ((u_int32_t)1<<1) /* mark only regions that occupy integral number of pages */ + + +/* The following are os specific flags contained in MOSAL_iobuf_props_t + * The same rules for allocating numbers are as above */ +#define MOSAL_IOBUF_LNX_FLG_PROP_MARKED_DONT_COPY ((u_int32_t)1<<0) + +/****************************************************************************** + * Function (Kernel space only): MOSAL_iobuf_register + * + * Description: register a virtual buf and assure it is locked + * + * Parameters: + * va(in) virtual address to register + * size(in) size in bytes of area to be registered + * prot_ctx(in) context of the calling thread + * iobuf_p(out) pointer to return iobuf object + * flags(in) flags affecting function's behavior. Flags may have meaning + * in a subset of OS specific implementations of this function. + * Others can ignore them. Document all flags bellow. + * + * Returns: + * call_result_t + * MT_OK success + * MT_EINVAL invalid argument + * MT_EAGAIN not enough resources + * + * Notes: upon successful return the buffer pointed by va is guaranteed to be + * locked + * Flags: MOSAL_IOBUF_LNX_FLG_MARK_ALL_DONTCOPY + * MOSAL_IOBUF_LNX_FLG_MARK_FULL_PAGES_DONTCOPY + * + ******************************************************************************/ +DLL_API call_result_t +#ifndef MTL_TRACK_ALLOC +MOSAL_iobuf_register +#else +MOSAL_iobuf_register_memtrack +#endif + (MT_virt_addr_t va, + MT_size_t size, + MOSAL_prot_ctx_t prot_ctx, + MOSAL_mem_perm_t req_perm, + MOSAL_iobuf_t *iobuf_p, + u_int32_t flags); + +#ifdef MTL_TRACK_ALLOC +#define MOSAL_iobuf_register(va, size, prot_ctx, req_perm, iobuf_p, flags) \ + ({ \ + call_result_t rc; \ + rc = MOSAL_iobuf_register_memtrack(va, size, prot_ctx, req_perm, iobuf_p, flags); \ + if ( rc == MT_OK ) { \ + memtrack_alloc(MEMTRACK_IOBUF, (unsigned long)(*iobuf_p), size, __FILE__, __LINE__); \ + } \ + rc; \ + }) +#endif + + + +/****************************************************************************** + * Function (Kernel space only): MOSAL_iobuf_deregister + * + * Description: deregister the memory context and free resources + * + * Parameters: + * iobuf(in) iobuf object to be released + * + * Returns: + * call_result_t + * MT_OK success + * Notes: when the function returns the the memory area is no longer guaranteed + * to be locked and iobuf is no longer valid + ******************************************************************************/ +DLL_API call_result_t +#ifndef MTL_TRACK_ALLOC +MOSAL_iobuf_deregister +#else +MOSAL_iobuf_deregister_memtrack +#endif + (MOSAL_iobuf_t iobuf); + +#ifdef MTL_TRACK_ALLOC +#define MOSAL_iobuf_deregister(iobuf) \ + ({ \ + call_result_t rc; \ + memtrack_free(MEMTRACK_IOBUF, (unsigned long)(iobuf), __FILE__, __LINE__); \ + rc = MOSAL_iobuf_deregister_memtrack(iobuf); \ + rc; \ + }) +#endif + + + +/****************************************************************************** + * Function (Kernel space only): MOSAL_iobuf_get_props + * + * Description: get the properties of the iobuf + * + * Parameters: + * iobuf(in) iobuf object + * props_p(out) pointer by which to return props + * + * + * Returns: + * call_result_t + * MT_OK success + * + ******************************************************************************/ +call_result_t MOSAL_iobuf_get_props(MOSAL_iobuf_t iobuf, + MOSAL_iobuf_props_t *props_p); + + + +/****************************************************************************** + * Function (Kernel space only): MOSAL_iobuf_get_tpt + * + * Description: get the tpt of the iobuf + * + * Parameters: + * iobuf(in) iobuf object + * npages(in) number of entries in pa_arr + * pa_arr(out) pointer to hold physical addresses + * page_size_p(out) where to copy the page size + * (may be null if output is not desired) + * act_table_sz_p(out) where to copy actual number of entries in the table + * (may be null if output is not desired) + * + * Returns: + * call_result_t + * MT_OK success + * Notes: the physical addresses of the pages are returned as from the start of the buffer + * to up to npages translation or the max translations on the buffer - the minimum + * between the two + ******************************************************************************/ +call_result_t MOSAL_iobuf_get_tpt(MOSAL_iobuf_t iobuf, + u_int32_t npages, + MT_phys_addr_t *pa_arr, + u_int32_t *page_size_p, + u_int32_t *act_table_sz_p); + + +/****************************************************************************** + * Function (Kernel space only): MOSAL_iobuf_cmp_tpt + * + * Description: compare the tpt of two iobufs + * + * Parameters: + * iobuf_1(in) first iobuf object + * iobuf_2(in) second iobuf object + * + * Returns: + * 0 tpt's are equal + * != 0 tpt's differ + * Notes: The function compares iobufs belonging to the same protection context. + * If the protection context differs it returns -1; If the protection context + * is kernel than only the va and size of both iobus are compared. It is assumed + * that there can be no two vitual addresses that map to the same physical address. + * In user space it could be that the sizes of the buffers are not equal bu the + * number of pages is equal. In that case if the physical addresses of the two + * buffers are equal then the iobufs are considered equal + * + ******************************************************************************/ +int MOSAL_iobuf_cmp_tpt(MOSAL_iobuf_t iobuf_1, MOSAL_iobuf_t iobuf_2); + + +/****************************************************************************** + * Function (Kernel space only): MOSAL_iobuf_get_tpt_seg + * + * Description: get a number of entries from the tpt as specified by the iterator + * and the n_pages_in param + * + * Parameters: + * iobuf(in) iobuf object + * iterator_p(in/out) iterator for accessing translation tables + * n_pages_in(in) number of translations required + * n_pages_out_p(out) number of translations provided + * page_tbl_p(out) pointer to array where to return translations + * + * Returns: + * MT_OK + ******************************************************************************/ +call_result_t MOSAL_iobuf_get_tpt_seg(MOSAL_iobuf_t iobuf, MOSAL_iobuf_iter_t *iterator_p, + MT_size_t n_pages_in, MT_size_t *n_pages_out_p, + MT_phys_addr_t *page_tbl_p); + + +/****************************************************************************** + * Function (Kernel space only): MOSAL_iobuf_iter_init + * + * Description: initialize the iterator to the begginig of the translation + * table + * + * Parameters: + * iobuf(in) iobuf object + * iterator_p(out) iterator to initialize + * + * Returns: + * MT_OK + ******************************************************************************/ +call_result_t MOSAL_iobuf_iter_init(MOSAL_iobuf_t iobuf, MOSAL_iobuf_iter_t *iterator_p); + + +/****************************************************************************** + * Function (Kernel space only): MOSAL_iobuf_restore_perm + * + * Description: Restore the permissions on the page tables to the value as + * when the object was created + * + * Parameters: + * iobuf(in) iobuf object + * + * Returns: + * MT_OK + ******************************************************************************/ +call_result_t MOSAL_iobuf_restore_perm(MOSAL_iobuf_t iobuf); + +#endif /* __MOSAL_IOBUF_H_ */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk.h new file mode 100644 index 00000000..5f7aa650 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_K2U_CBK_H +#define H_MOSAL_K2U_CBK_H + +#include + +/* Max. num. of bytes for callback data */ +#define MAX_CBK_DATA_SZ 512 + +/* Per process resources handle */ +typedef int k2u_cbk_hndl_t; + +#define INVALID_K2U_CBK_HNDL ((k2u_cbk_hndl_t)(-1)) + +/* Callback ID for demultiplexing of different callbacks in the same process */ +typedef unsigned int k2u_cbk_id_t; + +/* Generic callback */ +typedef void (*k2u_cbk_t)(k2u_cbk_id_t cbk_id, void* data_p, MT_size_t size); + + +/****************************************************************************** + * Function (user-space only): k2u_cbk_register + * + * Description: Register a handler in the user-level generic callback agent. + * + * Parameters: + * cbk (IN) - Function pointer of the generic callback. + * cbk_hndl_p(OUT)- Callback handle for this process (per process, to be used by kernel caller) + * cbk_id_p (OUT)- ID to be used when calling this callback from kernel (per this process). + * + * Returns: MT_OK, + * MT_EAGAIN on problems with resources allocation. + * + *****************************************************************************/ +call_result_t k2u_cbk_register(k2u_cbk_t cbk, k2u_cbk_hndl_t *cbk_hndl_p, k2u_cbk_id_t *cbk_id_p); + + +/****************************************************************************** + * Function (user-space only): k2u_cbk_deregister + * + * Description: Deregister a handler in the user-level generic callback agent. + * + * Parameters: + * cbk_id (IN)- ID of callback to deregister. + * + * Returns: MT_OK, + * MT_EAGAIN on problems with resources allocation. + * + *****************************************************************************/ +call_result_t k2u_cbk_deregister(k2u_cbk_id_t cbk_id); + + +/****************************************************************************** + * Function (kernel-space only): k2u_cbk_invoke + * + * Description: Invoke from kernel a registered handler in user-level. + * + * Parameters: + * k2u_cbk_h(IN) - handle of cbk resources for process of callback to invoke. + * cbk_id(IN) - ID to be used when calling this callback from kernel (per this process). + * data_p(IN) (LEN MAX_CBK_DATA_SZ) - Pointer in kernel-space for data buffer (of up to MAX_CBK_DATA_SZ bytes). + * This data must be "vmalloc"ed. + * size(IN) - Number of valid bytes copies to data buffer. + * + * Returns: MT_OK, + * MT_EINVAL on invalid handle or size of data more than MAX_CBK_DATA_SZ, + * MT_EAGAIN on problems with resources allocation. + * + * Note: The data is not copied but given data_p is saved in the queue and vfreed + * when delivered (copied) to the user-level process. + * + *****************************************************************************/ +call_result_t k2u_cbk_invoke(k2u_cbk_hndl_t k2u_cbk_h, k2u_cbk_id_t cbk_id, + void *data_p, MT_size_t size); +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk_priv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk_priv.h new file mode 100644 index 00000000..9dd58dcf --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_k2u_cbk_priv.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_K2U_CBK_PRIV_H +#define H_MOSAL_K2U_CBK_PRIV_H + +/* This "callback ID" should be used to notify polling thread of cleanup request */ +#define K2U_CBK_CLEANUP_CBK_ID ((k2u_cbk_id_t)(-1)) + +/****************************************************************************** + * Function(No automatic wrapper): k2u_cbk_init + * + * Description: Init private resources for k2u callback for calling process (e.g. message q). + * + * Parameters: + * k2u_cbk_h_p (OUT) - returned handle (pointer in user-space) + * + * Returns: MT_OK, + * MT_EAGAIN on problems with resources allocation, + * MT_EBUSY if a handle is already allocated for this process (returned in k2u_cbk_h_p). + * + *****************************************************************************/ +call_result_t k2u_cbk_init(k2u_cbk_hndl_t *k2u_cbk_h_p); + + +/****************************************************************************** + * Function(No automatic wrapper): k2u_cbk_cleanup + * + * Description: Clean private resources for k2u callback for calling process. + * + * Parameters: + * k2u_cbk_h (IN) - handle of cbk resources for this process + * + * + * Returns: MT_OK, + * MT_EINVAL for invalid handle (e.g. this handle is not of this process). + * + *****************************************************************************/ +call_result_t k2u_cbk_cleanup(k2u_cbk_hndl_t k2u_cbk_h); + + +/****************************************************************************** + * Function: k2u_cbk_pollq + * + * Description: Poll the message queue (will block if no message in the queue). + * + * Parameters: + * k2u_cbk_h (IN) - handle of cbk resources for this process + * cbk_id_p (OUT) - Id for given callback data. + * data_p (OUT) (LEN MAX_CBK_DATA_SZ) - Pointer in user-space for data buffer (of MAX_CBK_DATA_SZ bytes). + * size_p (OUT) - Number of valid bytes copies to data buffer. + * + * Returns: MT_OK, + * MT_EINVAL for invalid handle (e.g. this handle is not of this process) or NULL ptrs, + * MT_EAGAIN if q is empty + * + *****************************************************************************/ +call_result_t k2u_cbk_pollq(k2u_cbk_hndl_t k2u_cbk_h, + k2u_cbk_id_t *cbk_id_p,void *data_p, MT_size_t *size_p); + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_mem.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_mem.h new file mode 100644 index 00000000..ed9df8c2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_mem.h @@ -0,0 +1,478 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_MEM_H +#define H_MOSAL_MEM_H + +#include +#include +#include +#include +#include "mosal_mlock.h" + + +/*============ macro definitions =============================================*/ +#define VA_NULL ((MT_virt_addr_t)0) +#define PA_NULL ((MT_phys_addr_t)0) +#define INVAL_PHYS_ADDR ((MT_phys_addr_t)_UI64_MAX) + +typedef u_int32_t mem_attr_t; + +#if !defined(__DARWIN__) || !defined(MT_KERNEL) +typedef char * MOSAL_shmem_key_t; + +/*MOSAL shared memory flags */ +#define MOSAL_SHM_CREATE 0x1 /*Create a new shared memory region */ +#define MOSAL_SHM_EXCL 0x2 /*Ensure that the new region has been created*/ +#define MOSAL_SHM_READONLY 0x4 /*Create read-only region*/ +#define MOSAL_SHM_HUGEPAGE 0x8 /*Create huge page */ +/************************************/ +#endif /* !defined(__DARWIN__) || !defined(MT_KERNEL) */ + + +#define MIN_PAGE_SZ MOSAL_SYS_PAGE_SIZE +#define MIN_PAGE_SZ_MASK (~((MT_virt_addr_t)MIN_PAGE_SZ - 1)) +#define MIN_PAGE_SZ_ALIGN(x) (((x) + ~MIN_PAGE_SZ_MASK) & MIN_PAGE_SZ_MASK) + +#define MOSAL_PAGE_ALIGN(va, size) (((va) + ~MOSAL_PAGE_MASK((va)+(size))) \ + & MOSAL_PAGE_MASK(va + size)) + +typedef enum { + MOSAL_MEM_FLAGS_NO_CACHE =1,/* non-chached mapping (to be used for address not in main memory) */ + MOSAL_MEM_FLAGS_PERM_READ = (1<<1), /* Read permission */ + MOSAL_MEM_FLAGS_PERM_WRITE= (1<<2) /* Write permission */ + /* Note: currently write permission implies read permissions too */ +} MOSAL_mem_flags_enum_t; + +typedef u_int32_t MOSAL_mem_flags_t; /* To be used with flags from MOSAL_mem_flags_enum_t */ + + +static __INLINE__ const char *MOSAL_prot_ctx_str(const MOSAL_prot_ctx_t prot_ctx) +{ + if ( prot_ctx == MOSAL_get_kernel_prot_ctx() ) { + return "KERNEL"; + } + else if ( prot_ctx == MOSAL_get_current_prot_ctx() ) { + return "USER"; + } + else { + return "INVALID"; + } +} + + + +/****************************************************************************** + * + * Function(User Space only): MOSAL_io_remap + * + * Description: Map a physical contigous buffer to virtual address. + * + * Parameters: + * pa (IN) MT_phys_addr_t + * Physical address. + * size (IN) u_int32_t + * Size of memory buffer in bytes + * + * + * Returns: On success returns pointer to new virtual memory buffer else + * returns zero. + * + * Notes: The returned address will be page alligned. In case 'size' is not + * page alligned the amount of allocated memory can be bigger than the + * requested. + * + ******************************************************************************/ +MT_virt_addr_t +#ifndef MTL_TRACK_ALLOC +MOSAL_io_remap +#else +MOSAL_io_remap_memtrack +#endif + (MT_phys_addr_t pa, MT_size_t size); + +#ifdef MTL_TRACK_ALLOC +#define MOSAL_io_remap(pa, size) \ + ({ \ + MT_virt_addr_t rc; \ + rc = MOSAL_io_remap_memtrack((pa), (size)); \ + if ( rc != VA_NULL ) { \ + memtrack_alloc(MEMTRACK_IOREMAP, (unsigned long)(rc), (size), __FILE__, __LINE__);\ + } \ + rc; \ + }) + +#endif + +void +#ifndef MTL_TRACK_ALLOC +MOSAL_io_unmap +#else +MOSAL_io_unmap_memtrack +#endif + (MT_virt_addr_t va); + + +#ifdef MTL_TRACK_ALLOC +#define MOSAL_io_unmap(va) do { \ + memtrack_free(MEMTRACK_IOREMAP, (unsigned long)(va), __FILE__, __LINE__); \ + MOSAL_io_unmap_memtrack(va); \ + } \ + while (0) +#endif + + +/****************************************************************************** + * + * Function: MOSAL_map_phys_addr + * + * Description: + * Map physical address range to given process/memory context + * MOSAL_io_remap is mapped to this with + * prot_ctx==MOSAL_PROT_CTX_KERNEL and flags=MOSAL_MEM_FLAGS_NO_CACHE + * + * Parameters: + * pa (IN) MT_phys_addr_t + * Physical address. + * bsize (IN) u_int32_t + * Size of memory buffer in bytes + * flags (IN) MOSAL_mem_flags_t + * Mapping attributes + * prot_ctx (IN) MOSAL_prot_ctx_t + * Protection/memory context to map to (kernel or current user level) + * + * Returns: + * On success returns pointer to new virtual address to which this + * physical memory is mapped (in given prot_ctx). + * NULL if failed. + * Note: Mapping must be made for IO memory only and not for RAM. + * + ******************************************************************************/ +MT_virt_addr_t MOSAL_map_phys_addr(MT_phys_addr_t pa, MT_size_t bsize, + MOSAL_mem_flags_t flags, MOSAL_prot_ctx_t prot_ctx); + +/****************************************************************************** + * + * Function: MOSAL_unmap_phys_addr + * + * Description: + * Unmap physical address range previously mapped using MOSAL_map_phys_addr + * + * Parameters: + * prot_ctx (IN) MOSAL_prot_ctx_t + * Protection context of memory space of given virtual address. + * virt (IN) MT_virt_addr_t + * Mapping address as returned by MOSAL_map_phys_addr (page aligned). + * bsize (IN) u_int32_t + * Size of memory buffer in bytes + * + * Returns: HH_OK, HH_EINVAL - invalid address + * + * + ******************************************************************************/ +call_result_t MOSAL_unmap_phys_addr(MOSAL_prot_ctx_t prot_ctx, MT_virt_addr_t virt, + MT_size_t bsize); + + +/****************************************************************************** + * + * Function: MOSAL_virt_to_phys + * + * Description: Translate virtual address to physical. + * + * Parameters: + * prot_ctx(IN) source of the address (kernel or user) + * va (IN) const MT_virt_addr_t + * Virtual address. + * pa_p(OUT) returned physical address + * + * Returns: MT_OK On success + * MT_ENOMEM when not in address space + * MT_ENORSC when physical page is not available + * MT_EINVAL invalid value of prot_ctx + * + ******************************************************************************/ +call_result_t MOSAL_virt_to_phys(MOSAL_prot_ctx_t prot_ctx, + const MT_virt_addr_t va, MT_phys_addr_t *pa_p); + +/****************************************************************************** + * + * Function (Kernel space only): MOSAL_virt_to_phys_ex + * + * Description: Translate virtual address to physical. + * + * Parameters: + * prot_ctx(IN) source of the address (kernel or user) + * va (IN) Virtual address + * page_pp(OUT) pointer to return struct page * mapped to va + * pa_p(OUT) returned physical address + * wr_enable(IN) if set pte is made write enabled + * + * Returns: MT_OK On success + * MT_ENOMEM when not in address space + * MT_ENORSC when physical page is not available + * MT_EINVAL invalid value of prot_ctx + * + ******************************************************************************/ +#if defined(__KERNEL__) && !defined(VXWORKS_OS) && !defined(__WIN__) +call_result_t MOSAL_virt_to_phys_ex(MOSAL_prot_ctx_t prot_ctx, const MT_virt_addr_t va, + struct page **page_pp, MT_phys_addr_t *pa_p, MT_u_int_t wr_enable); +#endif + +#if !defined(__DARWIN__) +/****************************************************************************** + * + * Function (Kernel space only): MOSAL_phys_to_virt + * + * Description: Translate physical address to virtual. + * + * Parameters: + * pa (IN) const MT_phys_addr_t + * Physical address. + * + * Returns: On success returns a virtual address in current address space, corresponding to pa. + * Else returns zero. + * + ******************************************************************************/ +MT_virt_addr_t MOSAL_phys_to_virt(const MT_phys_addr_t pa); + + +/****************************************************************************** + * + * Function (user_only): MOSAL_virt_to_bus + * + * Description: Translate a virtual address to a bus address. + * + * Parameters: + * va (IN) MT_virt_addr_t + * Virtual address. + * + * Returns: On success returns bus address pointed by va. Else returns zero. + * + ******************************************************************************/ +MT_phys_addr_t MOSAL_virt_to_bus(MT_virt_addr_t va); + + +/****************************************************************************** + * + * Function (differ when kernel or user): MOSAL_phys_ctg_get + * + * Description: allocate a physically contiguous pinned memory region. + * + * Parameters: + * size (IN) + * size of physically contiguous memory to be allocate. + * + * Returns: virtual address of memory or NULL if failed. + * + ******************************************************************************/ +MT_virt_addr_t MOSAL_phys_ctg_get(MT_size_t size); + +/****************************************************************************** + * + * Function (differ when kernel or user): MOSAL_phys_ctg_free + * + * Description: free an allocate physically contiguous pinned memory region. + * + * Parameters: + * va (IN) + * address of region to be freed. + * + * Returns: void + *******************************************************************************/ +call_result_t MOSAL_phys_ctg_free(MT_virt_addr_t addr, MT_size_t size); + +#endif /* __DARWIN__) */ + +#define MOSAL_atomic_cmp_xchg(va, new_val, cmp_val) MOSAL_arch_atomic_cmp_xchg((va), (new_val), (cmp_val)) +#define MOSAL_atomic_xchg( va, new_val ) MOSAL_arch_atomic_xchg((va), (new_val)) + +#define PAGE_SIZE_4M 0x400000 +#define PAGE_SIZE_2M 0x200000 +#define PAGE_SIZE_4K 0x1000 +#define PAGE_SIZE_8K 0x2000 +#define PAGE_SIZE_16K 0x4000 +#define PAGE_SIZE_64K 0x10000 +#define PAGE_SHIFT_4M 22 +#define PAGE_SHIFT_2M 21 +#define PAGE_SHIFT_4K 12 +#define PAGE_SHIFT_8K 13 +#define PAGE_SHIFT_16K 14 +#define PAGE_SHIFT_64K 16 + +/****************************************************************************** + * Function: MOSAL_get_page_shift + * + * Description: + * get page shift for the va in the protection context specified by prot_ctx. + * + * Parameters: + * prot_ctx (IN) protection context + * va (IN) virtual address + * page_shift_p (OUT) returned page shift + * + * Returns: MT_OK + * MT_ENOMEM when address not valid + * + * Note: If the address va does not belong to address space in the specified + * context the function fails. + * + ******************************************************************************/ +call_result_t MOSAL_get_page_shift( MOSAL_prot_ctx_t prot_ctx, MT_virt_addr_t va, + unsigned *page_shift_p); + +/****************************************************************************** + * Function: (inline function) MOSAL_get_page_size + * + * Description: + * get page size for the va in the protection context specified by prot_ctx. + * + * Parameters: + * prot_ctx(IN) protection context + * va(IN) virtual address + * page_size_p(OUT) returned page size + * + * Returns: MT_OK + * MT_ENOMEM when address not valid + * + * Note: If the address va does not belong to address space in the specified + * context the function fails. + * + ******************************************************************************/ +static __INLINE__ call_result_t MOSAL_get_page_size(MOSAL_prot_ctx_t prot_ctx, + MT_virt_addr_t va, + unsigned *page_size_p) +{ + unsigned int page_shift; + call_result_t rc; + + rc = MOSAL_get_page_shift(prot_ctx, va, &page_shift); + if ( rc != MT_OK ) { + return rc; + } + *page_size_p = 1< + +typedef struct MOSAL_mlock_ctx_st *MOSAL_mlock_ctx_t; + +#if !defined(__DARWIN__) +/****************************************************************************** + * + * Function: MOSAL_mlock + * + * Arguments: + * addr (IN) - Base of the region + * size (IN) - Size of the region + * + * Returns: + * MT_OK, + * appropriate error code otherwise + * + * Description: + * Locks memory region. + * + ********************************************************************************/ +call_result_t MOSAL_mlock(MT_virt_addr_t addr, MT_size_t size); + +/******************************************************************************** + * Function: MOSAL_munlock + * + * Arguments: + * addr (IN) - Base of the region + * size (IN) - Size of the region + * + * Returns: + * MT_OK, + * appropriate error code otherwise + * + * Description: + * Unlocks memory region. + * + ********************************************************************************/ +call_result_t MOSAL_munlock(MT_virt_addr_t addr, MT_size_t size); + + +/******************************************************************************** + * Function: (kernel-mode only): MOSAL_mlock_ctx_init + * + * Arguments: + * mlock_ctx_p (OUT): pointer to return mlock context in. + * + * Returns: + * MT_OK, + * appropriate error code otherwise + * + * Description: + * Initiailize MOSAL_mlock context for this process + * This function should only be called by MOSAL wrapper "open" entry point. + * Returned context should be saved in file descriptor (private) + ********************************************************************************/ +call_result_t MOSAL_mlock_ctx_init(MOSAL_mlock_ctx_t *mlock_ctx_p); + +/******************************************************************************** + * Function: (kernel-mode only): MOSAL_mlock_ctx_cleanup + * + * Arguments: + * mlock_ctx (IN): mlock context allocated with MOSAL_init_mlock_ctx + * Returns: + * + * Description: + * Free MOSAL_mlock context for this process + * This function should be invoked by the "close" entry point of MOSAL wrapper + ********************************************************************************/ +call_result_t MOSAL_mlock_ctx_cleanup(MOSAL_mlock_ctx_t mlock_ctx); + +/******************************************************************************** + * Function: (kernel-mode only): MOSAL_mlock_iobuf + * + * Arguments: + * va(IN): start address of the region to be locked + * size(IN): size of the area to be locked + * iobuf(IN/OUT): iobuf associated with this area + * page_shift(IN): page shift of the va + * Returns: + * + ********************************************************************************/ +call_result_t MOSAL_mlock_iobuf(MT_virt_addr_t addr, MT_size_t size, MOSAL_iobuf_t mosal_iobuf, + unsigned int page_shift); + +/******************************************************************************** + * Function: (kernel-mode only): MOSAL_munlock_iobuf + * + * Arguments: + * va(IN): start address of the region to be unlocked + * size(IN): size of the area to be unlocked + * iobuf(IN/OUT): iobuf associated with this area + * page_shift(IN): page shift of the va + * Returns: + * + ********************************************************************************/ +call_result_t MOSAL_munlock_iobuf(MT_virt_addr_t addr, MT_size_t size, MOSAL_iobuf_t mosal_iobuf, + unsigned int page_shift); + +#endif /* !defined(__DARWIN__) */ + +#endif /*__MOSAL_MLOCK_H__*/ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_prot_ctx.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_prot_ctx.h new file mode 100644 index 00000000..16cf182e --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_prot_ctx.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __MOSAL_PROT_CTX_H +#define __MOSAL_PROT_CTX_H + +typedef mosal_prot_ctx_t MOSAL_prot_ctx_t; +typedef mosal_pid_t MOSAL_pid_t; + +typedef MOSAL_prot_ctx_t MOSAL_protection_ctx_t; /* keep for backward compatibility */ + +#define MOSAL_get_current_prot_ctx() mosal_get_current_prot_ctx() +#define MOSAL_get_kernel_prot_ctx() mosal_get_kernel_prot_ctx() + +#ifdef __KERNEL__ +#define MY_PROT_CTX MOSAL_get_kernel_prot_ctx() +#else +#define MY_PROT_CTX MOSAL_get_current_prot_ctx() +#endif + +#endif /* __MOSAL_PROT_CTX_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_que.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_que.h new file mode 100644 index 00000000..ef05e327 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_que.h @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_QUE_H +#define H_MOSAL_QUE_H + + +/* Message queue defs. */ +typedef u_int32_t MOSAL_qhandle_t; +typedef void (*MOSAL_data_free_t)(void*); /* prototype for queue data freeing */ +#define MOSAL_MAX_QHANDLES 1024 +#define NULL_MOSAL_QHANDLE 0xFFFFFFFF + + +/*********************************** Event queues ****************************/ + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_qcreate + * + * Description: Create Event Queue + * This function creates new queue + * + * Parameters: + * qh(OUT) MOSAL_qhandle_t * + * The handle to be used in MOSAL_qget() and MOSAL_qput() and + * MOSAL_qdelete + * qdestroy_free(IN) MOSAL_data_free_t + * This function is called to free all message queue left-overs + * when qdestroy is called while queue is not empty. + * Set to NULL if no free is needed (there is another mechanism for +this data freeing) + * + * Returns: + * call_result_t + * MT_OK or MT_ERROR. + * + *****************************************************************************/ +call_result_t MOSAL_qcreate(MOSAL_qhandle_t *qh,MOSAL_data_free_t qdestroy_free); + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_isqempty + * + * Description: + * Check is it comething in queue + * Parameters: + * qh(IN) MOSAL_qhandle_t + * The event queue handle. + * + * Returns: + * call_result_t + * TRUE - the queue is empty + * FALSE - the queue isn't empty + * + *****************************************************************************/ +MT_bool MOSAL_isqempty(MOSAL_qhandle_t qh); + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_qget + * + * Description: + * reads the next data portion from event_queue + * Parameters: + * qh(IN) MOSAL_qhandle_t + * The event queue handle. + * size(OUT) int * + * Actual data size + * maxsize(IN) int + * Length of the data buffer + * data(OUT) (LEN @maxsize) void * + * Data buffer to fill + * block(IN) MT_bool + * If true and queue empty, the call is blocked. + * + * + * Returns: + * call_result_t + * MT_OK + * MT_EINTR + * MT_EAGAIN on non-blocking access if the queue is empty + * MT_ERROR. + * + *****************************************************************************/ +call_result_t MOSAL_qget(MOSAL_qhandle_t qh, int *size, void **data, MT_bool block); + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_qput + * + * Description: + * puts the next data portion from event_queue + * Parameters: + * qh(IN) MOSAL_qhandle_t + * The event queue handle. + * size(IN) int + * Actual data size + * data(IN) (LEN @size) void * + * Data buffer + * + * + * Returns: + * call_result_t + * MT_OK + * MT_ERROR. + * + *****************************************************************************/ +call_result_t MOSAL_qput(MOSAL_qhandle_t qh, int size, void *data); + + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_qdestroy + * + * Description: + * destroy the queue + * + * Parameters: + * qh(IN) MOSAL_qhandle_t + * The event queue handle. + * + * Returns: + * call_result_t + * MT_OK + * MT_ERROR. + * + *****************************************************************************/ +call_result_t MOSAL_qdestroy(MOSAL_qhandle_t qh); + +/****************************************************************************** + * Function (kernel-mode only): MOSAL_qprint + * + * Description: + * print the queue (data assumed to be a strings) + * + * Parameters: + * qh(IN) MOSAL_qhandle_t + * The event queue handle. + * + * Returns: + * call_result_t + * MT_OK + * + *****************************************************************************/ +call_result_t MOSAL_qprint(MOSAL_qhandle_t qh); + + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_sync.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_sync.h new file mode 100644 index 00000000..10c403df --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_sync.h @@ -0,0 +1,460 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_SYNC_H +#define H_MOSAL_SYNC_H + +#ifdef __cplusplus + extern "C" { +#endif + + +typedef struct { + u_int32_t sec; /* Seconds */ + u_int32_t msec; /* Milliseconds */ +} MOSAL_time_t; + + + +/* //////////////////////////////////// */ + +/* ////////////////////////////////////////////////////////////////////////////// */ +/* Synchronization object */ +/* ////////////////////////////////////////////////////////////////////////////// */ + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_init + * + * Description: + * Init sync object + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * in kernel always returns MT_OK + * in user mode may return MT_ERROR + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_init(MOSAL_syncobj_t *obj_p); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_free + * + * Description: + * Destroy sync object + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * MT_OK + * MT_ERR + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_free(MOSAL_syncobj_t *obj_p); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_waiton + * + * Description: + * cause process to sleep until synchonization object is signalled or time + * expires + * + * Parameters: + * obj_p(IN) pointer to synch object + * micro_sec(IN) max time to wait in microseconds. + * MOSAL_SYNC_TIMEOUT_INFINITE = inifinite timeout + * + * Returns: + * MT_OK - woke up by event + * MT_ETIMEDOUT - wokeup because of timeout + * MT_EINTR - waiting for event interrupted due to a (SYSV) signal + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_waiton(MOSAL_syncobj_t *obj_p, MT_size_t micro_sec); + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_signal + * + * Description: + * signal the synchronization object + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_syncobj_signal(MOSAL_syncobj_t *obj_p); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_clear + * + * Description: + * reset sync object (i.e. bring it to init - not-signalled -state) + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * + ******************************************************************************/ +void MOSAL_syncobj_clear(MOSAL_syncobj_t *obj_p); + + +/* ////////////////////////////////////////////////////////////////////////////// */ +/* Semaphores */ +/* ////////////////////////////////////////////////////////////////////////////// */ + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_sem_init + * + * Description: + * init semaphore + * + * Parameters: + * sem_p(OUT) pointer to semaphore to be initialized + * count(IN) max number of processes that can hold the semaphore at the same time + * + * Returns: + * OK + * MT_EAGAIN + * + ******************************************************************************/ +call_result_t MOSAL_sem_init(MOSAL_semaphore_t *sem_p, MT_size_t count); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_sem_free + * + * Description: + * free semaphore + * + * Parameters: + * sem_p(OUT) pointer to semaphore to be freed + * + * Returns: + * OK + * MT_ERROR - internal error (double free) + * + ******************************************************************************/ +call_result_t MOSAL_sem_free(MOSAL_semaphore_t *sem_p); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_sem_acq + * + * Description: + * acquire the semaphore + * + * Parameters: + * sem_p(IN) pointer to semaphore + * block(IN) if - FALSE, return immediately if could not acquire, otherwise block if necessary + * + * Returns: + * MT_OK - semaphore acquired + * MT_EINTR - interrupted (in blocking mode) + * MT_EAGAIN - semaphore not acquired (only - in non-blocking mode) + * + *******************************************************************************/ +call_result_t MOSAL_sem_acq(MOSAL_semaphore_t *sem_p, MT_bool block); + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_sem_rel + * + * Description: + * release the semaphore + * + * Parameters: + * sem_p(IN) pointer to semaphore + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_sem_rel(MOSAL_semaphore_t *sem_p); + + +/* ////////////////////////////////////////////////////////////////////////////// */ +/* Mutexes */ +/* ////////////////////////////////////////////////////////////////////////////// */ + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_init + * + * Description: + * init mutex + * + * Parameters: + * mtx_p(OUT) pointer to mutex to be initialized + * + * Returns: + * OK + * MT_EAGAIN + * + ******************************************************************************/ +call_result_t MOSAL_mutex_init(MOSAL_mutex_t *mtx_p); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_free + * + * Description: + * init mutex + * + * Parameters: + * mtx_p(OUT) pointer to mutex to be destroyed + * + * Returns: + * OK + * MT_ERROR + * + ******************************************************************************/ +call_result_t MOSAL_mutex_free(MOSAL_mutex_t *mtx_p); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_acq + * + * Description: + * acquire the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * block(IN) if - FALSE, return immediately if could not acquire, otherwise block if necessary + * + * Returns: + * MT_OK - mutex acquired + * MT_EINTR - mutex not acquired (only - in non-blocking mode) + * + ******************************************************************************/ +call_result_t MOSAL_mutex_acq(MOSAL_mutex_t *mtx_p, MT_bool block); + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_rel + * + * Description: + * release the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_mutex_rel(MOSAL_mutex_t *mtx_p); + + +/* ////////////////////////////////////////////////////////////////////////////// */ +/* Delay of execution */ +/* ////////////////////////////////////////////////////////////////////////////// */ + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_delay_execution + * + * Description: + * delay execution of this control path for a the specified time period. Note + * that in some implementaions it performs busy wait. + * + * Parameters: + * time_micro(IN) required delay time in microseconds + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_delay_execution(u_int32_t time_micro); + +/* ////////////////////////////////////////////////////////////////////////////// */ +/* Spinlocks */ +/* ////////////////////////////////////////////////////////////////////////////// */ + + +/************************************************************************************************** + * Function (different for kernel and user space): MOSAL_spinlock_init + * + * Description: Creates a locking mechanism to allow synchronization between different processors. + * It is initialized to an unlocked state + * + * Parameters: spinlock: pointer to spinlock element. + * + * + * Returns: MT_OK + * MT_AGAIN: not enough resources. + * + *************************************************************************************************/ +call_result_t MOSAL_spinlock_init(MOSAL_spinlock_t *sp); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_acq_ui + * + * Description: + * acquire the mutex - uninterruptable + * + * Parameters: + * mtx_p(IN) pointer to mutex + * + * + ******************************************************************************/ +void MOSAL_mutex_acq_ui(MOSAL_mutex_t *mtx_p); + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_sem_acq_ui + * + * Description: + * acquire the semaphore - uninterruptable + * + * Parameters: + * sem_p(IN) pointer to semaphore + * + *******************************************************************************/ +void MOSAL_sem_acq_ui(MOSAL_semaphore_t *sem_p); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_waiton_ui + * + * Description: + * cause process to sleep until synchonization object is signalled or time + * expires - uninterruptable + * + * Parameters: + * obj_p(IN) pointer to synch object + * micro_sec(IN) max time to wait in microseconds. + * MOSAL_SYNC_TIMEOUT_INFINITE = inifinite timeout + * + * Returns: + * MT_OK - woke up by event + * MT_ETIMEDOUT - wokeup because of timeout + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_waiton_ui(MOSAL_syncobj_t *obj_p, MT_size_t micro_sec); + +/****************************************************************************** + * Function + * MOSAL_sleep: + * + * Description: + * Suspends the execution of the current process (in Linux)/thread (in Wondows) + * for the given number of seconds + * + * Parameters: + * sec(IN) u_int32_t - number of seconds to sleep + * + * Returns: + * 0 - if sleep and parans were okay + * otherwise - if it wasn't + * Remarks: + * sec must be less that MAX_DWORD/1000 + * + ******************************************************************************/ +int MOSAL_sleep( u_int32_t sec ); + +/****************************************************************************** + * Function + * MOSAL_usleep: + * + * Description: + * Suspends the execution of the current process for the given number of + * microseconds. The function guarantees to go to sleep for at least usec + * microseconds + * Parameters: + * usec(IN) number of micro seconds to sleep + * + * Returns: + * MT_OK + * MT_EINTR signal received + * + ******************************************************************************/ +call_result_t MOSAL_usleep(u_int32_t usec); + +/****************************************************************************** + * Function + * MOSAL_usleep_ui: + * + * Description: + * Suspends the execution of the current process for the given number of + * microseconds. The function guarantees to go to sleep for at least usec + * microseconds. The function is non interruptile + * Parameters: + * usec(IN) number of micro seconds to sleep + * + * Returns: void + * + ******************************************************************************/ +void MOSAL_usleep_ui(u_int32_t usec); + +/****************************************************************************** + * Function + * MOSAL_gettimeofday: + * + * Description: + * retrns MOSAL_time_t struct defining the current time + * Parameters: + * time_p(OUT) MOSAL_time_t * - current time + * + * + ******************************************************************************/ +void MOSAL_gettimeofday(MOSAL_time_t * time_p); + + + + + + +#ifdef __cplusplus + } +#endif + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_thread.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_thread.h new file mode 100644 index 00000000..f8009de5 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_thread.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_THREAD_H +#define H_MOSAL_THREAD_H + +#ifdef __cplusplus + extern "C" { +#endif + +#define MOSAL_THREAD_FLAGS_DETACHED 1 /* Create thread as detached - valid for user-space only */ + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_thread_start + * + * Description: + * create a thread and run a t-function in its context + * The thread is created as JOINABLE unless the flag MOSAL_THREAD_FLAGS_DETACHED is used. + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * flags(IN) flags for thread creation (MOSAL_THREAD_FLAGS_DETACHED) + * mtf(IN) t-function + * mtf_ctx(IN) t-function context + * + * Returns: + * MT_OK - thread started; for blocking mode - t-function is running; + * MT_EAGAIN - for blocking mode - timeout; thread hasn't started yet; + * other - error; + * + ******************************************************************************/ +call_result_t MOSAL_thread_start( + MOSAL_thread_t *mto_p, /* pointer to MOSAL thread object */ + u_int32_t flags, /* flags for thread creation */ + MOSAL_thread_func_t mtf, /* t-function name */ + void *mtf_ctx /* t-function context (optionally) */ + ); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_thread_kill + * + * Description: + * terminate the tread brutally + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * + * Returns: + * MT_OK - thread terminated + * MT_ERROR - a failure on thread termination + * + ******************************************************************************/ +call_result_t MOSAL_thread_kill( + MOSAL_thread_t *mto_p /* pointer to MOSAL thread object */ + ); + + +#ifdef __cplusplus + } +#endif + + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_timer.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_timer.h new file mode 100644 index 00000000..d56efab3 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosal_timer.h @@ -0,0 +1,615 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_TIMER_H +#define H_MOSAL_TIMER_H + +#if defined(__LINUX__) && defined(__x86_64__) +#include +#endif + +#ifdef MT_KERNEL + +/* typedef for MOSAL INTERRUPT object */ +struct MOSAL_ISR; +typedef struct MOSAL_ISR MOSAL_ISR_t; + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_ISR_set + * + * Description: + * connect interrupt handler + * + * Parameters: + * isr_p(IN) MOSAL_ISR_t * + * interrupt object + * handler(IN) intr_handler_t + * Pointer to the intr. handler. + * irq(IN) MOSAL_IRQ_ID_t + * The IRQ number of the intr which invokes this handler. + * name(IN) char * + * Just informationa name. In Linux environment + * that name will be presented in /proc/interrupts + * dev_id(IN) MT_ulong_ptr_t + * Unique device ID. Use on intr sharing. + * If NULL, the device may NOT share the IRQ + * with other devices (handlers). + * Otherwise, this pointer may be used as the "This" + * pointer of device data object when calling handler. + * + * Returns: + * call_result_t + * 0-OK -1-Error + * + ******************************************************************************/ +call_result_t MOSAL_ISR_set( + MOSAL_ISR_t * isr_p, + MOSAL_ISR_func_t handler, + MOSAL_IRQ_ID_t irq, + char * name, + MT_ulong_ptr_t dev_id + ); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_ISR_unset + * + * Description: + * disconnect interrupt handler + * + * Parameters: + * isr_p(IN) MOSAL_ISR_t * + * interrupt object + * + * Returns: + * call_result_t + * 0-OK -1-Error + * + ******************************************************************************/ +call_result_t MOSAL_ISR_unset( MOSAL_ISR_t * isr_p ); + +#if !defined(__DARWIN__) +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_set_intr_handler + * + * Description: + * Sets the given interrupt handler to be called back on the given IRQ. + * If dev_id!=NULL, the handler would be linked to IRQ only if previously + * set handler were sharing handlers (had dev_id!=NULL), too. + * This means that a non-sharing handler may be the only handler which + * is called back on a given IRQ. + * + * Parameters: + * handler(IN) intr_handler_t + * Pointer to the intr. handler. + * irq(IN) MOSAL_IRQ_ID_t + * The IRQ number of the intr which invokes this handler. + * name(IN) char * + * Just informationa name. In Linux environment + * that name will be presented in /proc/interrupts + * dev_id(IN) void* + * Unique device ID. Use on intr sharing. + * If NULL, the device may NOT share the IRQ + * with other devices (handlers). + * Otherwise, this pointer may be used as the "This" + * pointer of device data object when calling handler. + * + * Returns: + * call_result_t + * 0-OK -1-Error + * + ******************************************************************************/ +call_result_t MOSAL_set_intr_handler(intr_handler_t handler, + MOSAL_IRQ_ID_t irq, + char *name, + void* dev_id); + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_unset_intr_handler + * + * Description: + * Removes the given interrupt handler of the callback chain on the given IRQ. + * + * Parameters: + * handler(IN) intr_handler_t + * Pointer to the intr. handler. + * irq(IN) MOSAL_IRQ_ID_t + * The IRQ number of the intr which invokes this handler. + * dev_id(IN) void* + * Unique device ID. Use on intr sharing. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_unset_intr_handler(intr_handler_t handler, + MOSAL_IRQ_ID_t irq, + void* dev_id); + +#endif /* ! defined__DARWIN__ */ + +#if ((defined(__LINUX__) && (LINUX_KERNEL_2_4 || LINUX_KERNEL_2_6))) || defined(__WIN__) || defined(VXWORKS_OS) || defined(__DARWIN__) +/* This code is only for kernel 2.4 or kernel 2.6 */ + + + +/* ////////////////////////////////////////////////////////////////////////////// */ + +/* DPC (=tasklet) functions */ + +/* ////////////////////////////////////////////////////////////////////////////// */ + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Component: + * MOSAL tasklet/DPC implementation [tasklets and DPC are synonyms] + * + * Data object: + * MOSAL DPC object - struct MOSAL_DPC, wrapping tasklet in Linux and DPC in Windows + * + * Functions: + * void MOSAL_DPC_init(MOSAL_DPC_t *d ...) - initialize a MOSAL DPC object; + * void MOSAL_DPC_schedule(MOSAL_DPC_t *d) - schedule a MOSAL DPC object; + * void MOSAL_DPC_schedule_ctx(MOSAL_DPC_t *d) - schedule a MOSAL DPC object with parameters; + * call_result_t MOSAL_DPC_add_ctx(MOSAL_DPC_t *d,...) - add DPC request context to a MOSAL DPC object; + * + * + * Macros: + * MOSAL_DPC_enable(MOSAL_DPC_t *d) - enable DPC (only Linux) + * MOSAL_DPC_disable(MOSAL_DPC_t *d) - disable DPC (only Linux) + * + * Example of usage (taken from CM\msgdspch.c and ported to MOSAL API): + * + * ** declare a MOSAL DPC object 'cm_machine_tasklet', calling function 'cm_machine' + * MOSAL_DPC_t cm_machine_tasklet; + * MOSAL_DPC_init (&cm_machine_tasklet, cm_machine, 0, 0); + * + * ** schedule it ** + * MOSAL_DPC_schedule( &cm_machine_tasklet ); + * + * Notes: + * 1. There are no static initialization like Linux's DECLARE_TASKLET() ! + * 2. DPC function has Linux's prototype: void a_DPC_function(unsigned long ), + * but the meaning of the parameters depends on the type of DPC. + * 3. There are 2 types of DPC: MOSAL_SINGLE_CTX and MOSAL_MULTIPLE_CTX. + * 4. DPC of MOSAL_SINGLE_CTX type: + * - allows only one interrupt per one DPC invocation; + * - doesn't relay dynamic parameters from ISR to DPC; + * - The parameter of a_DPC_function() is in fact a user callback context; + * 5. DPC of MULITPLE_CTX type: + * - allows several interrupts before and during a DPC invocation; + * - on every interrupt ISR fills a DPC context with static and dynamic parameters and + * enqueues it to the DPC context chain. The DPC handles all the chain while it works. + * - The parameter of a_DPC_function() is in fact a pointer to DPC_CONTEXT_t (see below); + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + +struct DPC_CONTEXT; +typedef struct DPC_CONTEXT DPC_CONTEXT_t; + +/* typedef for MOSAL DPC object */ +struct MOSAL_DPC; +typedef struct MOSAL_DPC MOSAL_DPC_t; + + +/* Functions */ + + + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_DPC_init + * + * Description: + * init a MOSAL DPC object + * + * Parameters: + * d - MOSAL DPC object + * func - user DPC; + * data - its data; + * type - type of DPC + * + * Returns: + * + * Notes: + * (Windows) Callers of this routine must be running at IRQL PASSIVE_LEVEL + * + ******************************************************************************/ +void MOSAL_DPC_init(MOSAL_DPC_t *d, MOSAL_DPC_func_t func, MT_ulong_ptr_t func_ctx, MOSAL_DPC_type_t type ); + + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_DPC_schedule + * + * Description: + * schedule user DPC + * + * Parameters: + * d - MOSAL DPC object + * + * Returns: + * + * Notes: + * (Windows) Callers of this routine must be running at IRQL PASSIVE_LEVEL + * + ******************************************************************************/ +MT_bool MOSAL_DPC_schedule(MOSAL_DPC_t *d); + + +#if !defined(__DARWIN__) + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_DPC_schedule_ctx + * + * Description: + * schedule user DPC with relaying a context + * + * Parameters: + * d - MOSAL DPC object + * isr_ctx1 - context, relayed by ISR, inserting DPC; + * isr_ctx2 - context, relayed by ISR, inserting DPC; + * + * Returns: + * + * Notes: + * (Windows) Callers of this routine must be running at IRQL PASSIVE_LEVEL + * + ******************************************************************************/ +MT_bool MOSAL_DPC_schedule_ctx(MOSAL_DPC_t *d, void * isr_ctx1, void * isr_ctx2); + +#ifdef SUPPORT_MULTIPLE_CTX + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_DPC_add_ctx + * + * Description: + * add DPC request context to a MOSAL DPC object + * + * Parameters: + * d - MOSAL DPC object + * ctx1 - context, relayed by ISR, inserting DPC; + * ctx2 - context, relayed by ISR, inserting DPC; + * + * Returns: + * MT_ENORSC - if no ctx structures + * MT_OK - otherwise + * + * Notes: + * A helper routine for ISR, inserting DPC + * + ******************************************************************************/ +call_result_t MOSAL_DPC_add_ctx(MOSAL_DPC_t *d, PVOID ctx1, PVOID ctx2); +# endif /* SUPPORT_MULTIPLE_CTX */ +#endif /* ! defined __DARWIN__ */ + +/* ////////////////////////////////////////////////////////////////////////////// */ + +/* Timer functions */ + +/* ////////////////////////////////////////////////////////////////////////////// */ + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * Component: + * MOSAL timer functions implementation + * + * Data object: + * MOSAL timer object - struct MOSAL_timer, wrapping OS-dependent structures + * + * Functions: + * void MOSAL_timer_init(MOSAL_timer_t *t) - initialize a MOSAL timer object; + * void MOSAL_timer_add(MOSAL_timer_t *t ...) - start timer; + * void MOSAL_timer_del(MOSAL_timer_t *t ...) - cancel timer; + * void MOSAL_timer_mod(MOSAL_timer_t *t ...) - restart timer; + * + * Example of usage (taken from CMkernel\cm_sm.c and ported to MOSAL API): + * + * ** declare a MOSAL timer object 'cm_machine_tasklet', calling function 'cm_machine' ** + * MOSAL_timer_t try_timer; + * MOSAL_timer_init (&try_timer); + * + * ** start timer ** + * MOSAL_timer_add( &try_timer, cmk_try_timeout, connection->local_com_id, usec ); + * + * Portability notes: + * 1. Functions MOSAL_timer_del() and MOSAL_timer_mod() return 'void' (and not 'int' as in Linux); + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/* timer object */ +struct MOSAL_timer; +typedef struct MOSAL_timer MOSAL_timer_t; + +#if !defined(__DARWIN__) + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_timer_init + * + * Description: + * init a MOSAL DPC object + * + * Parameters: + * d - MOSAL timer object + * func - user DPC; + * data - its data; + * + * Returns: + * + * Notes: + * 1. Callers of this routine must be running at IRQL PASSIVE_LEVEL; + * 2. Every timer must use its own MOSAL timer object ! + * 3. Different MOSAL timer objects may use the same DPC function; + * + ******************************************************************************/ +__INLINE__ void MOSAL_timer_init(MOSAL_timer_t *t); + + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_timer_add + * + * Description: + * start timer + * + * Parameters: + * d - MOSAL timer object + * func - user DPC; + * data - its data; + * usecs - interval; 'func' will be called in 'usecs' microseconds + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL <= DISPATCH_LEVEL + * + ******************************************************************************/ +__INLINE__ void MOSAL_timer_add(MOSAL_timer_t *t, MOSAL_DPC_func_t func, MT_ulong_ptr_t data, long usecs); + + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_timer_del + * + * Description: + * delete timer + * + * Parameters: + * d - MOSAL timer object + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL <= DISPATCH_LEVEL + * NOTE for Linux - *** do not use the callback function to call add_timer to + * add this timer to the list as it may cause a race condition *** + * + ******************************************************************************/ +__INLINE__ void MOSAL_timer_del(MOSAL_timer_t *t); + + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_timer_mod + * + * Description: + * stop the running timer and restart it in 'usecs' microseconds + * + * Parameters: + * d - MOSAL timer object + * usecs - interval; 'func' will be called in 'usecs' microseconds + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL <= DISPATCH_LEVEL + * + ******************************************************************************/ +__INLINE__ void MOSAL_timer_mod(MOSAL_timer_t *t, long usecs); + +#endif /* ! defined __DARWIN__ */ + + +#endif /* ((defined(__LINUX__) && LINUX_KERNEL_2_4)) || defined(__WIN__) */ +#endif /* MT_KERNEL */ + +/* ////////////////////////////////////////////////////////////////////////////// */ + +/* Time functions */ + +/* ////////////////////////////////////////////////////////////////////////////// */ + +#if defined(__WIN__) || defined(VXWORKS_OS) + +/* Time */ + + +/* taken from Linux/time.h */ +struct MOSAL_timespec { + long tv_sec; /* seconds */ + long tv_nsec; /* nanoseconds */ +}; + + +typedef struct MOSAL_timespec MOSAL_timespec_t; + +#endif /* __WIN__ || VXWORKS_OS */ + +#ifdef __DARWIN__ +typedef struct mach_timespec MOSAL_timespec_t; +#endif + +#if defined(__LINUX__) +typedef struct timespec MOSAL_timespec_t; +#endif + + +#ifdef MT_KERNEL + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_time_get_clock + * + * Description: + * get current system clock (in microseconds) + * + * Parameters: + * ts(OUT) - pointer to a structure, describing the time + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL <= DISPATCH_LEVEL + * + ******************************************************************************/ + void MOSAL_time_get_clock(MOSAL_timespec_t *ts); + + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_time_compare + * + * Description: + * compare 2 absolute times + * + * Parameters: + * ts1(IN) - pointer to a structure, describing the time + * ts2(IN) - pointer to a structure, describing the time + * + * Returns: + * positive - when ts1 > ts2 + * negative - when ts1 < ts2 + * zero - when ts1 = ts2 + * + * + ******************************************************************************/ +static __INLINE__ int MOSAL_time_compare(MOSAL_timespec_t *ts1, MOSAL_timespec_t *ts2 ) +{ + if (ts1->tv_sec > ts2->tv_sec) + return 1; + if (ts1->tv_sec < ts2->tv_sec) + return -1; + if (ts1->tv_nsec < ts2->tv_nsec) + return -1; + return ts1->tv_nsec > ts2->tv_nsec; +} + + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_time_add_usec + * + * Description: + * enlarge an absolute time 'ts' by 'usec' of microseconds + * + * Parameters: + * ts(IN) - pointer to a structure, describing the time + * usecs(IN) - a POSITIVE number of microseconds to add + * + * Returns: + * updated 'ts' + * + ******************************************************************************/ +static __INLINE__ MOSAL_timespec_t * MOSAL_time_add_usec(MOSAL_timespec_t *ts, long usecs ) +{ + ts->tv_sec += usecs / 1000000L; + ts->tv_nsec += (usecs % 1000000L) * 1000; + if (ts->tv_nsec > 1000000000L) { + ts->tv_sec++; + ts->tv_nsec -= 1000000000L; + } + return ts; +} + + /****************************************************************************** + * Function (kernel-mode only): + * MOSAL_time_init + * + * Description: + * Zero 'ts' structure + * + * Parameters: + * ts(IN) - pointer to a structure, describing the time + * usecs(IN) - a POSITIVE number of microseconds to add + * + * Returns: + * updated 'ts' + * + ******************************************************************************/ +static __INLINE__ void MOSAL_time_init(MOSAL_timespec_t *ts) +{ + ts->tv_sec = ts->tv_nsec = 0; +} +#endif /* MT_KERNEL */ + + /****************************************************************************** + * Function (inline): + * MOSAL_get_time_counter + * + * Description: + * get te current value of a counter that progresses monotonically with time + * + * Parameters: + * + * Returns: + * value of the counter or 0 if not supported by the architecture + * + ******************************************************************************/ +static __INLINE__ u_int64_t MOSAL_get_time_counter(void) +{ +#if (defined(__i386__) || defined(i386)) && (defined(__LINUX__) || defined(VXWORKS_OS)) + unsigned long long int x; + __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x)); + return x; + +#elif defined(__WIN__) + return win_get_time_counter(); /* defined in mtl_sys_defs.h */ + +#elif defined(__x86_64__) && defined(__LINUX__) + u_int32_t low, high; + rdtsc(low, high); + return (((u_int64_t)high)<<32) | (u_int64_t)low; +#else + return 0; +#endif +} + +/****************************************************************************** + * Function: MOSAL_get_counts_per_sec + * + * Description: get number of counts in 1 sec (refer to MOSAL_get_time_counter) + * + * Parameters: + * + * Returns: + * Number of counts in 1 sec or 0 if not supported + * + ******************************************************************************/ +u_int64_t MOSAL_get_counts_per_sec(void); + +#endif /* H_MOSAL_TIMER_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosalu_socket.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosalu_socket.h new file mode 100644 index 00000000..7ce21695 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/mosalu_socket.h @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MOSALU_SOCKET_H +#define _MOSALU_SOCKET_H + +#ifndef MT_KERNEL + +/****************************************************************************** + * Function + * MOSAL_socket_socket + * + * Description: + * create a socket + * + * Parameters: + * domain (IN) + * type (IN) + * protocol(IN) + * Returns: + * the socket on success, otherwise -1 + ******************************************************************************/ +int MOSAL_socket_socket(MOSAL_socket_domain_t domain,MOSAL_socket_type_t type,MOSAL_socket_protocol_t protocol); + +/****************************************************************************** + * Function + * MOSAL_socket_close + * + * Description: + * closes the socket + * + * Parameters: + * sock + * Returns: + * 0 on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_close(int sock); + + +/****************************************************************************** + * Function + * MOSAL_socket_connect + * + * Description: client + * connect to server + * + * Parameters: + * sock + * adrs(IN) server's adrs details + * len(IN) sizeof struct adrs + * Returns: + * 0 on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_connect(int sock ,const MOSAL_sockaddr_t* adrs, + MOSAL_socklen_t len); + + +/****************************************************************************** + * Function + * MOSAL_socket_bind + * + * Description: server + * bind the socket to adrs + * + * Parameters: + * sock (IN) + * adrs (IN) server's adrs details + * len (IN) size of struct adrs + * Returns: + * 0 on success, -1 otherwise + * + ******************************************************************************/ +int MOSAL_socket_bind(int sock,const MOSAL_sockaddr_t* adrs, + MOSAL_socklen_t len); + + +/****************************************************************************** + * Function + * MOSAL_socket_listen + * + * Description: server + * start listening on this socket + * + * Parameters: + * sock(IN) + * n (IN) length of queue of requests + * + * Returns: + * 0 on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_listen(int sock ,int n); + +/****************************************************************************** + * Function + * MOSAL_socket_accept + * + * Description: server + * extracts the first connection on the queue of pending connections, creates a new socket with + * the properties of sock, and allocates a new file descriptor. + * the socket + * + * Parameters: + * sock + * client_adrs_p(OUT) adrs of the first connection accepted + * len_p(OUT) sizeof adrs + * + * Returns: + * the new socket on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_accept(int sock,MOSAL_sockaddr_t* client_adrs,MOSAL_socklen_t* len_p); + +/****************************************************************************** + * Function + * MOSAL_socket_send + * + * Description: + * send len bytes from buffer through socket + * Parameters: + * sock(IN) + * buf + * len - num of bytes to send + * flags + * Returns: returns the number sent or -1 + * + ******************************************************************************/ +int MOSAL_socket_send(int sock,const void* buf,int len,int flags); + + +/****************************************************************************** + * Function + * MOSAL_socket_recv + * + * Description: + * recv len bytes from buffer through socket + * Parameters: + * sock(IN) pointer to MOSAL socket object + * buf + * len - num of bytes to read + * flags + * Returns: returns the number read or -1 + ******************************************************************************/ +int MOSAL_socket_recv(int sock,void* buf,int len,int flags); + + +/****************************************************************************** + * Function + * MOSAL_socket_sendto + * + * Description: + * send N bytes from buf on socket to peer at adrs adrs. + * Parameters: + * sock_p(IN) pointer to MOSAL socket object + * buf + * n - num of bytes to send + * flags + * adrs + * adrs_len + * + * Returns: returns the number sent or -1 + ******************************************************************************/ +int MOSAL_socket_sendto (int sock,void *buf, int n,int flags, MOSAL_sockaddr_t* adrs, + MOSAL_socklen_t adrs_len); + +/****************************************************************************** + * Function + * MOSAL_socket_recvfrom + * + * Description: + * read N bytes into buf on socket to peer at adrs adrs. + * If ADDR is not NULL, fill in *ADDR_LEN bytes of it with tha address of + * the sender, and store the actual size of the address in *ADDR_LEN. + * + * Parameters: + * sock(IN) pointer to MOSAL socket object + * buf + * n - num of bytes to read + * flags + * adrs + * adrs_len + * + * Returns: returns the number read or -1 + ******************************************************************************/ + +int MOSAL_socket_recvfrom (int sock, void *buf, int n, int flags, + MOSAL_sockaddr_t* adrs,MOSAL_socklen_t* adrs_len_p); + +/****************************************************************************** + * Function + * MOSAL_socket_setsockopt + * + * Description: + * set an option on socket or protocol level + * + * Parameters: + * sock(IN) pointer to MOSAL socket object + * level(IN) option level + * optname(IN) option name + * optval(IN) pointer to buffer, containing the option value + * optlen(IN) buffer size + * + * Returns: 0 on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_setsockopt(int sock, MOSAL_socket_optlevel_t level, + MOSAL_socket_optname_t optname, const void *optval, int optlen ); + +/****************************************************************************** + * Function + * MOSAL_socket_get_last_error + * + * Description: + * get last error on the socket + * + * Parameters: + * + * Returns: the error number + ******************************************************************************/ +int MOSAL_socket_get_last_error(void); + +#endif + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal.def b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal.def new file mode 100644 index 00000000..e17fb37c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal.def @@ -0,0 +1,108 @@ +EXPORTS +; +; mtl_common +; + mtl_log_set + mtl_log + mtl_strerror + mtl_strerror_sym +; +; MOSAL +; + ; ----- mosal_wrap_clt.c ----- + MOSAL_PCI_present + MOSAL_PCI_find_device + MOSAL_PCI_find_class + MOSAL_PCI_read_config_byte + MOSAL_PCI_read_config_word + MOSAL_PCI_write_config_byte + MOSAL_PCI_write_config_word + MOSAL_PCI_read_config_dword + MOSAL_PCI_write_config_dword + MOSAL_PCI_read_config_data + MOSAL_PCI_write_config_data + MOSAL_PCI_read_io_byte + MOSAL_PCI_read_io_word + MOSAL_PCI_read_io_dword + MOSAL_PCI_write_io_byte + MOSAL_PCI_write_io_word + MOSAL_PCI_write_io_dword + MOSAL_PCI_read_mem + MOSAL_PCI_write_mem + MOSAL_PCI_get_cfg_hdr + MOSAL_MPC860_present + MOSAL_MPC860_write + MOSAL_MPC860_read + MOSAL_is_privileged + k2u_cbk_init + k2u_cbk_cleanup + MOSAL_map_phys_addr + MOSAL_unmap_phys_addr + MOSAL_virt_to_phys + MOSAL_virt_to_phys + MOSAL_mlock + MOSAL_munlock + MOSAL_get_counts_per_sec + MOSAL_nsecs +; MOSAL_get_page_size + MOSAL_get_page_shift + ; ----- Mosalu_mem.c ----- + MOSAL_user_lib_init + MOSAL_io_remap + MOSAL_io_unmap + MOSAL_io_release + MOSAL_phys_ctg_get + MOSAL_phys_ctg_free + MOSAL_shmrm + MOSAL_shmdt + MOSAL_shmat + MOSAL_shmget + MOSAL_get_sys_page_size + MOSAL_get_sys_page_shift + ; ----- Mosalu_sync.c ----- + MOSAL_syncobj_init + MOSAL_syncobj_waiton + MOSAL_syncobj_waiton_ui + MOSAL_syncobj_signal + MOSAL_syncobj_clear + MOSAL_sem_init + MOSAL_sem_acq + MOSAL_sem_acq_ui + MOSAL_sem_acq_to + MOSAL_sem_rel + MOSAL_mutex_init + MOSAL_mutex_acq + MOSAL_mutex_acq_ui + MOSAL_mutex_acq_to + MOSAL_mutex_rel + MOSAL_delay_execution + MOSAL_spinlock_init + MOSAL_sleep + MOSAL_usleep + MOSAL_gettimeofday +; MOSAL_spinlock_lock +; MOSAL_spinlock_irq_lock +; MOSAL_spinlock_unlock + MOSAL_gettimeofday + ; ----- mosalu_k2u_cbk.c ----- + k2u_cbk_register + k2u_cbk_deregister + ; ----- Mosalu_thread.c ----- + MOSAL_thread_start + MOSAL_thread_kill + MOSAL_thread_wait_for_exit + ; ----- Mosalu_socket.c ----- + MOSAL_socket_socket + MOSAL_socket_close + MOSAL_socket_connect + MOSAL_socket_bind + MOSAL_socket_listen + MOSAL_socket_accept + MOSAL_socket_send + MOSAL_socket_recv + MOSAL_socket_sendto + MOSAL_socket_recvfrom + MOSAL_socket_setsockopt + MOSAL_socket_get_last_error + ; ----- Mosalu_dirver.c ----- + MOSAL_getpid diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_arch.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_arch.h new file mode 100644 index 00000000..1417975d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_arch.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_ARCH_H +#define H_MOSAL_ARCH_H + +#include +#include +#include + + +static __inline u_int32_t MOSAL_arch_atomic_cmp_xchg(volatile u_int32_t * addr, u_int32_t new_val, u_int32_t cmp_val ) +{ + return InterlockedCompareExchange( (PLONG)addr, new_val, cmp_val ); +} + +static __inline u_int32_t MOSAL_arch_atomic_xchg(volatile u_int32_t * addr, u_int32_t new_val) +{ + return InterlockedExchange( (PLONG)addr, new_val ); +} + +static __inline u_int32_t MOSAL_arch_atomic_inc32(volatile u_int32_t * addr) +{ + return InterlockedIncrement((PLONG)addr); +} + +static __inline u_int32_t MOSAL_arch_atomic_dec32(volatile u_int32_t * addr) +{ + return InterlockedDecrement((PLONG)addr); +} + + +/****************************************************************************** + * + * Function (only include): MOSAL_atomic_inc32 + * + * Description: atomically increment a dword (u_int32_t). + * + * Parameters: + * va (IN) void pointer to dword containing bit. + * + * Returns: incremented value. + * + ******************************************************************************/ +#define MOSAL_atomic_inc32(va) MOSAL_arch_atomic_inc32((va)) + +/****************************************************************************** + * + * Function (only include): MOSAL_atomic_dec32 + * + * Description: atomically decrement a dword (u_int32_t). + * + * Parameters: + * va (IN) void pointer to dword containing bit. + * + * Returns: decremented value. + * + ******************************************************************************/ +#define MOSAL_atomic_dec32(va) MOSAL_arch_atomic_dec32((va)) + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.c new file mode 100644 index 00000000..38dae9a7 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" + +#if 0 +// Without use of NTDDK + +call_result_t MOSAL_PCI_read_config_dword(u_int8_t bus, + u_int8_t dev_func, + u_int8_t offset, + u_int32_t* data_p) +{ + /* find dev_p */ + MOSAL_dev_t * dev_p = find_device_by_location( bus, dev_func ); + if (dev_p == NULL) + return MT_ERROR; + + return ReadWritePciConfig( dev_p, data_p, offset, sizeof(long), TRUE ); +} + + +call_result_t MOSAL_PCI_write_config_dword(u_int8_t bus, + u_int8_t dev_func, + u_int8_t offset, + u_int32_t data) +{ + /* find dev_p */ + MOSAL_dev_t * dev_p = find_device_by_location( bus, dev_func ); + return ReadWritePciConfig( dev_p, &data, offset, sizeof(long), FALSE ); +} + + +#endif + +bool MOSAL_PCI_present(void) +{ + return TRUE; +} + + +call_result_t MOSAL_PCI_find_class(u_int32_t class_code, + u_int16_t index, + u_int8_t * bus_p, + u_int8_t * dev_func_p) +{ + return MT_ENOSYS; +} + +call_result_t MOSAL_PCI_read_io_byte(u_int32_t addr, u_int8_t *data_p) +{ + return MT_ENOSYS; +} + +call_result_t MOSAL_PCI_read_io_word(u_int32_t addr, u_int16_t *data_p) +{ + return MT_ENOSYS; +} + +call_result_t MOSAL_PCI_read_io_dword(u_int32_t addr, u_int32_t *data_p) +{ + return MT_ENOSYS; +} + +call_result_t MOSAL_PCI_write_io_byte(u_int32_t addr, u_int8_t data) +{ + return MT_ENOSYS; +} + +call_result_t MOSAL_PCI_write_io_word(u_int32_t addr, u_int16_t data) +{ + return MT_ENOSYS; +} + +call_result_t MOSAL_PCI_write_io_dword(u_int32_t addr, u_int32_t data) +{ + return MT_ENOSYS; +} + + +call_result_t MOSAL_PCI_read_mem(u_int64_t addr, u_int64_t size, void *data_p) +{ + PULONG from = (PULONG)addr; + PULONG to = (PULONG)data_p; + ULONG count = (ULONG)size >> 2; + ULONG rest = (ULONG)size - (count<<2); + + if (count > 0) + READ_REGISTER_BUFFER_ULONG( from, to, count); + if (rest > 0) + { + from += count; + to += count; + READ_REGISTER_BUFFER_UCHAR( (PUCHAR)from, (PUCHAR)to, rest); + } + + return MT_OK; +} + +call_result_t MOSAL_PCI_write_mem(u_int64_t addr, u_int64_t size, void *data_p) +{ + PULONG to = (PULONG)addr; + PULONG from = (PULONG)data_p; + ULONG count = (ULONG)size >> 2; + ULONG rest = (ULONG)size - (count<<2); + + if (count > 0) + WRITE_REGISTER_BUFFER_ULONG( to, from, count); + if (rest > 0) + { + from += count; + to += count; + WRITE_REGISTER_BUFFER_UCHAR( (PUCHAR)to, (PUCHAR)from, rest); + } + + return MT_OK; +} + +/* + * MPC860 + * ------ + */ +bool MOSAL_MPC860_present() +{ + bool rc; + + MTL_TRACE1("\n-> MOSAL_MPC860_present \n"); + +#ifdef PPC_PRESENT + rc = TRUE; +#else + rc = FALSE; +#endif + + MTL_TRACE1("<- MOSAL_MPC860_present rc=%s, \n", rc ? "TRUE":"FALSE"); + return rc; +} + +call_result_t MOSAL_MPC860_write(u_int32_t addr, + u_int32_t size, + void * data_p) +{ + call_result_t rc; + + MTL_TRACE1("\n-> MOSAL_MPC860_write addr=%08x, size=%d, data_p=%p\n", + addr, size, data_p); + +#ifdef PPC_PRESENT + rc = MOSAL_MAPP_mem_write(addr, size, data_p); +#else + rc = MT_ENOSYS; +#endif + + MTL_TRACE1("<- MOSAL_MPC860_write rc=%d (%s)\n", + rc, mtl_strerror_sym(rc)); + return rc; +} + + +call_result_t MOSAL_MPC860_read(u_int32_t addr, + u_int32_t size, + void * data_p) +{ + call_result_t rc; + + MTL_TRACE1("\n-> MOSAL_MPC860_read addr=%08x, size=%d, data_p=%p\n", + addr, size, data_p); + +#ifdef PPC_PRESENT + rc = MOSAL_MAPP_mem_read(addr, size, data_p); +#else + rc = MT_ENOSYS; +#endif + + MTL_TRACE1("<- MOSAL_MPC860_read rc=%d (%s)\n", + rc, mtl_strerror_sym(rc)); + return rc; +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.h new file mode 100644 index 00000000..77d6bf0c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_bus.h @@ -0,0 +1,362 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_BUS_H +#define H_MOSAL_BUS_H + +#include +#include + + +/****************************************************************************** + * Function: MOSAL_PCI_present + * + * Description: Check on existance of PCI in system. + * This function should be called before using any of the following. + * + * Parameters: (none) + * + * Returns: + * bool + * TRUE if PCI exist. + * + ******************************************************************************/ +bool MOSAL_PCI_present(void); + +/****************************************************************************** + * Function: MOSAL_PCI_find_class + * + * Description: Find a PCI device based on class code. + * + * Parameters: + * class_code(IN) u_int32_t + * 24 Class code bits. + * index(IN) u_int16_t + * Occurance of device of given Vendor/Device IDs. + * bus_p(OUT) u_int8_t * + * Bus num of matching device. + * dev_func_p(OUT) u_int8_t * + * Device/Function ([7:3]/[2:0]) of matching device. + * + * Returns: + * call_result_t + * MT_OK if found, MT_ENODEV if such device not found. + * + * Note: + * For hot-swap support, the PCI bus should be really probed on device + * search, and not a preset DB (which was usually created during boot). + * + ******************************************************************************/ +call_result_t MOSAL_PCI_find_class(u_int32_t class_code, u_int16_t index, + u_int8_t *bus_p, u_int8_t *dev_func_p); + + +/****************************************************************************** + * Function: MOSAL_PCI_get_cfg_hdr + * + * Description: Get header type0 or type1 + * + * Parameters: + * bus(IN) u_int8_t + * Bus num of device. + * dev_func(IN) u_int8_t + * Device/Function ([7:3]/[2:0]) of device. + * + * cfg_hdr(OUT) MOSAL_PCI_cfg_hdr_t + * Union of type0 and type1 header. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR + * + * Notes: + * If header type is unknown it can be extracted from header_type field of + * type0 member offset or type1. + * + * + ******************************************************************************/ +call_result_t MOSAL_PCI_get_cfg_hdr(u_int8_t bus, u_int8_t dev_func, MOSAL_PCI_cfg_hdr_t * cfg_hdr); + + + + + +/****************************************************************************** + * Function: MOSAL_PCI_read_io_byte + * + * Description: Read byte of PCI I/O space. + * + * Parameters: + * addr(IN) u_int32_t + * I/O address to read. + * data_p(OUT) u_int8_t * + * Ptr to a byte data buffer. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_read_io_byte(u_int32_t addr, u_int8_t *data_p); + + + +/****************************************************************************** + * Function: MOSAL_PCI_read_io_word + * + * Description: Read word of PCI I/O space. + * + * Parameters: + * addr(IN) u_int32_t + * I/O address to read. + * data_p(OUT) u_int16_t * + * Ptr to a word data buffer. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_read_io_word(u_int32_t addr, u_int16_t *data_p); + + + +/****************************************************************************** + * Function: MOSAL_PCI_read_io_dword + * + * Description: Read dword of PCI I/O space. + * + * Parameters: + * addr(IN) u_int32_t + * I/O address to read. + * data_p(OUT) u_int32_t * + * Ptr to a dword data buffer. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_read_io_dword(u_int32_t addr, u_int32_t *data_p); + + + +/****************************************************************************** + * Function: MOSAL_PCI_write_io_byte + * + * Description: Write byte of PCI I/O space. + * + * Parameters: + * addr(IN) u_int32_t + * I/O address to write. + * data(IN) u_int8_t + * Byte data to write. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_write_io_byte(u_int32_t addr, u_int8_t data); + + + +/****************************************************************************** + * Function: MOSAL_PCI_write_io_word + * + * Description: Write word of PCI I/O space. + * + * Parameters: + * addr(IN) u_int32_t + * I/O address to write. + * data(IN) u_int16_t + * Word data to write. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_write_io_word(u_int32_t addr, u_int16_t data); + + + +/****************************************************************************** + * Function: MOSAL_PCI_write_io_dword + * + * Description: Write dword of PCI I/O space. + * + * Parameters: + * addr(IN) u_int32_t + * I/O address to write. + * data(IN) u_int32_t + * Dword data to write. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_write_io_dword(u_int32_t addr, u_int32_t data); + + + +/****************************************************************************** + * Function: MOSAL_PCI_read_mem + * + * Description: Read PCI I/O space. + * + * Parameters: + * addr(IN) u_int64_t + * I/O address to read. + * size(IN) u_int64_t + * Num. of bytes to read. + * data_p(OUT) (LEN @size) void * + * Ptr to data buffer of 'size' bytes at least. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + * Note: + * PCI access is optimized for maximum possible throughput/burst size. + * 64 bit transactions are issued if possible. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_read_mem(u_int64_t addr, u_int64_t size, void *data_p); + + + +/****************************************************************************** + * Function: MOSAL_PCI_write_mem + * + * Description: Write PCI I/O space. + * + * Parameters: + * addr(IN) u_int64_t + * I/O address to write. + * size(IN) u_int64_t + * Num. of bytes to write. + * data_p(IN) (LEN @size) void * + * Ptr to data buffer of 'size' bytes at least. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + * Note: + * PCI access is optimized for maximum possible throughput/burst size. + * 64 bit transactions are issued if possible. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_write_mem(u_int64_t addr, u_int64_t size, void *data_p); + + + + +/************************************************************************************************** + * MPC860 bus + **************************************************************************************************/ + + +/****************************************************************************** + * Function: MOSAL_MPC860_present + * + * Description: Check on existance of MPC860 bus in system. + * This function should be called before using any of the following. + * + * Parameters: (none) + * + * Returns: + * bool + * TRUE if MPC860 bus exist. + * + ******************************************************************************/ +bool MOSAL_MPC860_present(void); + +/****************************************************************************** + * Function: MOSAL_MPC860_read + * + * Description: Read MPC860 External Bus mem. space. + * + * Parameters: + * addr(IN) u_int32_t + * Address to read. + * size(IN) u_int32_t + * Num. of bytes to read. + * data_p(OUT) (LEN @size) void * + * Ptr to data buffer of 'size' bytes at least. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_MPC860_read(u_int32_t addr, u_int32_t size, void *data_p); + +/****************************************************************************** + * Function: MOSAL_MPC860_write + * + * Description: Write MPC860 External Bus mem. space. + * + * Parameters: + * addr(IN) u_int32_t + * Address to write. + * size(IN) u_int32_t + * Num. of bytes to write. + * data_p(IN) (LEN @size) void * + * Ptr to data buffer of 'size' bytes at least. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_MPC860_write(u_int32_t addr, u_int32_t size, void *data_p); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_reset_card + * + * Description: + * reset the card by its bus and dev_func + * + * Parameters: + * + * Returns: + * MT_OK or MT_ERROR + * + ******************************************************************************/ +call_result_t MOSAL_reset_card(u_int8_t bus, u_int8_t dev_func); + + + +#endif /* H_MOSAL_BUS_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_driver.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_driver.c new file mode 100644 index 00000000..da19d321 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_driver.c @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifdef __KERNEL__ + +#include "mosal_priv.h" +#include "MdIoctl.h" + +/* ----- Kernel Space ----- */ + +call_result_t MOSAL_manual_wrapper( MOSAL_rsct_t *res_p, int cmd, void *pi, void * po) +{ + call_result_t rc; + switch ( cmd ) { + case K2U_CBK_CBK_INIT: + { + k2u_cbk_hndl_t k2u_cbk_h; + + if ( res_p->k2u_cbk_h != INVALID_K2U_CBK_HNDL ) { + MTL_ERROR1(MT_FLFMT("%s: called k2u_cbk_init() more than once for the same process - pid=%d"), __func__, MOSAL_getpid()); + return MT_ERROR; + } + rc = k2u_cbk_init(&k2u_cbk_h); + if ( rc != MT_OK ) { + return rc; + } + res_p->k2u_cbk_h = k2u_cbk_h; + MTL_TRACE3(MT_FLFMT("%s: k2u_cbk_init() returned handle=%d"), __func__, k2u_cbk_h); + /* return the result */ + memcpy(po, &k2u_cbk_h, sizeof(k2u_cbk_h)); + return rc; + } + break; + + case K2U_CBK_CBK_CLEANUP: + { + k2u_cbk_hndl_t k2u_cbk_h = *(k2u_cbk_hndl_t*)pi; + rc = k2u_cbk_cleanup(k2u_cbk_h); + if ( rc == MT_OK ) { + if ( res_p->k2u_cbk_h != INVALID_K2U_CBK_HNDL ) { + res_p->k2u_cbk_h = INVALID_K2U_CBK_HNDL; + MTL_TRACE3(MT_FLFMT("%s: k2u_cbk_cleanup() freed handle %ld"), __func__, k2u_cbk_h); + } + else { + MTL_ERROR1(MT_FLFMT("%s: called k2u_cbk_cleanup() while there was an invalid handle in mosal resource tracking"), __func__); + } + } + return rc; + } + break; + + default: + return -ENOTTY; + + } +} + +#ifndef MT_BUILD_LIB +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT pi_pDriverObject, + IN PUNICODE_STRING pi_pRegistryPath + ) +{ /* DriverEntry */ + + DbgPrint("\n***** MOSAL_KL: DriverEntry()"); + return STATUS_SUCCESS; + +} /* DriverEntry */ + +NTSTATUS DllInitialize(PUNICODE_STRING RegistryPath) +{ + DbgPrint("\n***** MOSAL_KL: DllInitialize()"); + return STATUS_SUCCESS; +} + +NTSTATUS DllUnload() +{ + DbgPrint("\n***** MOSAL_KL: DllUnload()"); + return STATUS_SUCCESS; +} +#endif +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen.c new file mode 100644 index 00000000..7d0bec17 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen.c @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" +#include + +#ifdef USE_TRACE +void MOSAL_trace_init(); +#endif +call_result_t MOSAL_mem_init(); +void MOSAL_mem_cleanup(); +static void calib_counts_per_sec(void); + +/* k2u_cbk prototype. Implemented in mosal_k2u_cbk.c */ +call_result_t MOSAL_k2u_cbk_mod_init(void); + +/* external variables */ +MOSAL_pid_t MOSAL_pid = 0; + +/* global variables */ + +/* device DB */ +MOSAL_dev_t MOSAL_dev_db[MOSAL_MAXDEV]; + +/* current max irql */ +KIRQL cur_max_irql = DISPATCH_LEVEL; + +MOSAL_rsct_t * MOSAL_rsct_open(MOSAL_pid_t pid) +{ + /* allocate resource element */ + MOSAL_rsct_t *res_p = (MOSAL_rsct_t*)TMALLOC(MOSAL_rsct_t); + if (res_p == NULL) { + MTL_ERROR4("MOSAL_rsct_open: allocation failed \n"); + } + else { + /* store mosal_mem resources */ + res_p->mosal_mem = MOSAL_mem_rsct_open(pid); + /* store mosal_k2u_cbk resources */ + res_p->k2u_cbk_h = INVALID_K2U_CBK_HNDL; + } + return res_p; +} + +void MOSAL_rsct_close(MOSAL_rsct_t * res_p, MOSAL_pid_t pid) +{ + call_result_t rc; + /* clean mosal_mlock resources */ + MOSAL_mlock_cleanup_pcs(pid); + /* clean mosal_mem resources */ + MOSAL_mem_rsct_close(res_p->mosal_mem); + /* clean mosal_k2u_cbk resources */ + if ( res_p->k2u_cbk_h != INVALID_K2U_CBK_HNDL ) { + rc = k2u_cbk_cleanup(res_p->k2u_cbk_h); + if ( rc != MT_OK ) { + MTL_ERROR1(MT_FLFMT("k2u_cbk_cleanup failed, pid=%d (%s)"), + pid,mtl_strerror_sym(rc)); + return; + } + } + FREE(res_p); +} + +/* exported functions */ +call_result_t MOSAL_init(unsigned int major) +{ + call_result_t rc = init_dpc(); + if (rc) + return rc; + + /*Initialize memory locking mechanizm*/ + rc = MOSAL_mlock_init(); + if (rc != MT_OK) { + return(rc); + } + + /*Initialize memory API*/ + rc = MOSAL_mem_init(); + if (rc != MT_OK) { + return(rc); + } + + /* Initialize k2u_cbk mechanism */ + rc = MOSAL_k2u_cbk_mod_init(); + if (rc != MT_OK) + return(rc); + /* not in use for now + init_queues(); + */ + RtlZeroMemory(MOSAL_dev_db, sizeof(MOSAL_dev_db)); + //calib_counts_per_sec(); + + /* trace */ + #ifdef USE_TRACE + MOSAL_trace_init(); + #endif + + return MT_OK; +} + +void MOSAL_cleanup() +{ + RtlZeroMemory(MOSAL_dev_db, sizeof(MOSAL_dev_db)); + deinit_dpc(); + MOSAL_mem_cleanup(); + MOSAL_mlock_cleanup(); +} + +MOSAL_pid_t MOSAL_getpid(void) +{ + return (MOSAL_pid_t)IoGetCurrentProcess(); +} + +void MOSAL_setpid(MOSAL_pid_t pid) +{ + MOSAL_pid = (MOSAL_pid_t)IoGetCurrentProcess(); +} + +bool MOSAL_is_privileged() { return TRUE; } + +/* + * MOSAL_get_exec_ctx - get execution context + */ +MOSAL_exec_ctx_t MOSAL_get_exec_ctx() +{ + KIRQL irql = KeGetCurrentIrql(); + if (irql < DISPATCH_LEVEL) + return MOSAL_IN_TASK; + if (irql == DISPATCH_LEVEL) + return MOSAL_IN_DPC; + return MOSAL_IN_ISR; +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * + * Endian conversions + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_letobe64 + * + * Description: + * ULONG conversion from little endian to big endian + * + * Parameters: + * value(IN) u_int64_t + * + * Returns: + * u_int64_t + * + +******************************************************************************/ +u_int64_t MOSAL_letobe64( u_int64_t value ) +{ +#if 1 + /* it isn't supported really */ + return RtlUlonglongByteSwap(value); +#else + volatile u_int64_t res; + *((volatile int * const)&res + 1) = RtlUlongByteSwap(*(volatile int * const)&value); + *(volatile int * const)&res = RtlUlongByteSwap(*((volatile int * const)&value + 1)); + return res; +#endif +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_letobe32 + * + * Description: + * ULONG conversion from little endian to big endian + * + * Parameters: + * value(IN) u_int32_t + * + * Returns: + * u_int32_t + * + +******************************************************************************/ +u_int32_t MOSAL_letobe32( u_int32_t value ) +{ + return RtlUlongByteSwap(value); +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_letobe16 + * + * Description: + * USHORT conversion from little endian to big endian + * + * Parameters: + * value(IN) u_int16_t + * + * Returns: + * u_int16_t + * + +******************************************************************************/ +u_int16_t MOSAL_letobe16( u_int16_t value ) +{ + return RtlUshortByteSwap(value); +} + +u_int64_t MOSAL_get_counts_per_sec(void) +{ + /* Number of 100ns increments in a second. */ + return 10000000; +} + +u_int64_t MOSAL_nsecs(void) +{ + return KeQueryInterruptTime() * 100; +} + + +call_result_t MOSAL_reset_card(u_int8_t bus, u_int8_t dev_func) +{ + call_result_t rc; + MOSAL_dev_t *dev_p = find_device_by_location( bus, dev_func ); + if (dev_p == NULL) { + MTL_ERROR1("MOSAL_reset_card: failed to find appropriate device (bus %d, dev_func 0x%02x)\n", (int)bus, dev_func); + return MT_ERROR; + } + MTL_ERROR1("MOSAL_reset_card: calling driver to reset the card (bus %d, dev_func 0x%02x)\n", (int)bus, dev_func); + rc = (call_result_t)((Md_Mosal_Helper_t)dev_p->drv_helper)( dev_p->drv_helper_ctx, MD_HELPER_CARD_RESET ); + if (rc != MT_OK) + MTL_ERROR1("MOSAL_reset_card: failed to reset the card (rc 0x%x, bus %d, dev_func 0x%02x)\n", + (u_int32_t)rc, (int)bus, dev_func); + return rc; +} + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen_priv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen_priv.h new file mode 100644 index 00000000..a5ac0a42 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_gen_priv.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_GEN_PRIV_H +#define H_MOSAL_GEN_PRIV_H + +/****************************************************************************** + * Function: MOSAL_nsecs + * + * Description: read the counter of S_TICKs (S_TICK is usually of 100 nsec duration) + * + * Parameters: + * + * Returns: + * the value of counter of S_TICKs (n.b. S stands for "some" or "strange" ) + * + ******************************************************************************/ +u_int64_t MOSAL_nsecs(void); + +#define MOSAL_TAKE64_LOW_ADDR(arg) ((long*)&(arg)) +#define MOSAL_TAKE64_HIGH_ADDR(arg) ((long*)&(arg)+1) +#define MOSAL_TAKE64_LOW_VAL(arg) (*(MOSAL_TAKE64_LOW_ADDR(arg))) +#define MOSAL_TAKE64_HIGH_VAL(arg) (*(MOSAL_TAKE64_HIGH_ADDR(arg))) + +#include "mosal_k2u_cbk.h" +/* per process MOSAL resource tracking info */ +typedef struct { + void * mosal_mem; + k2u_cbk_hndl_t k2u_cbk_h; /* handle to callback db */ +} MOSAL_rsct_t; + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_rsct_open + * + ******************************************************************************/ +MOSAL_rsct_t *MOSAL_rsct_open(MOSAL_pid_t pid); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_rsct_close + * + ******************************************************************************/ +void MOSAL_rsct_close(MOSAL_rsct_t *p_rsct, MOSAL_pid_t pid); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_manual_wrapper + * + ******************************************************************************/ +call_result_t MOSAL_manual_wrapper( MOSAL_rsct_t *res_p, int cmd, void *pi, void * po); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf.c new file mode 100644 index 00000000..70bdae39 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf.c @@ -0,0 +1,639 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include "mosal_iobuf_imp.h" +#include "mosal_priv.h" + + +// non-WDM implementation + +NTKERNELAPI +PHYSICAL_ADDRESS +MmGetPhysicalAddress ( + IN PVOID BaseAddress + ); + +/* + * MOSAL_iobuf_get_props + */ +call_result_t MOSAL_iobuf_get_props(MOSAL_iobuf_t iobuf, + MOSAL_iobuf_props_t *props_p) +{ + call_result_t rc; + + // sanity check + if (!iobuf) return MT_EINVAL; + + props_p->size = iobuf->size; + props_p->va = iobuf->va; + props_p->nr_pages = iobuf->nr_pages; + props_p->prot_ctx = iobuf->prot_ctx; + rc = MOSAL_get_page_shift(MOSAL_get_current_prot_ctx(), iobuf->va, &props_p->page_shift); + if ( rc != MT_OK ) { + return rc; + } + + return MT_OK; +} + + + +/* + * MOSAL_iobuf_register + */ +static call_result_t register_segment(MT_virt_addr_t va, + MT_size_t size, + MOSAL_prot_ctx_t prot_ctx, + MOSAL_mem_perm_t req_perm, + MOSAL_iobuf_t iobuf_p) +{ + PMDL mdl_p; + call_result_t rc; + KPROCESSOR_MODE mode; + MOSAL_iobuf_seg_t new_iobuf; + static ULONG cnt=0; + LOCK_OPERATION Operation; + int n_tries=1, retry = 0, try_num=1; + int fake_ro =0; + + // set Operation + if ((req_perm & MOSAL_PERM_READ) && (req_perm & MOSAL_PERM_WRITE)) + Operation = IoModifyAccess; + else + if (req_perm & MOSAL_PERM_WRITE) + Operation = IoWriteAccess; + else + if (req_perm & MOSAL_PERM_READ) + Operation = IoReadAccess; + else { + MTL_ERROR4("MOSAL_iobuf_register: Illegal permission parameter (%d)\n", req_perm); + rc = MT_EINVAL; + goto err0; + } + + // allocate IOBUF segment object + new_iobuf = (MOSAL_iobuf_seg_t)TMALLOC(struct mosal_iobuf_seg_st); + if (new_iobuf == NULL) { + MTL_ERROR4("MOSAL_iobuf_register: TMALLOC failed\n"); + rc = MT_EKMALLOC; + goto err0; + } + + // allocate MDL + mdl_p = IoAllocateMdl( (PVOID)va, (ULONG)size, FALSE,FALSE,NULL); + if (mdl_p == NULL) { + MTL_ERROR4("MOSAL_iobuf_register: IoAllocateMdl failed, va %p, sz %d\n", va, size); + rc = MT_EKMALLOC; + goto err1; + } + + // make context-dependent things + if (prot_ctx == MOSAL_PROT_CTX_KERNEL) { /* Mapping to kernel virtual address */ +// MmBuildMdlForNonPagedPool(mdl_p); // fill MDL ??? - should we do that really ? + mode = KernelMode; + } + else { + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + mode = UserMode; + } + + // fake RO access: in case of MOSAL_PERM_READ access we replace by MOSAL_PERM_WRITE to enforce rw pa list + if (Operation == IoReadAccess) { + fake_ro = 1; + Operation = IoWriteAccess; + } + +check_va_validity: + __try + { /* try */ + MmProbeAndLockPages( mdl_p, mode, Operation ); /* lock memory */ + } /* try */ + + __except (EXCEPTION_EXECUTE_HANDLER) + { + NTSTATUS Status = GetExceptionCode(); + MTL_ERROR4("MOSAL_iobuf_register: Exception 0x%x on MmProbeAndLockPages(), va %p, sz %d, try_num %d\n", + Status, va, size, try_num); + if (Status != STATUS_ACCESS_VIOLATION) { + /* usually, c00000a1 - process memory quote is exceeded */ + rc = MT_EAGAIN; + goto err2; + } + // fake RO access + if (fake_ro) { + fake_ro = 0; + Operation = IoReadAccess; + goto check_va_validity; + } + + // + // case STATUS_ACCESS_VIOLATION: try to understand whether va is valid + // + + // mark, that we are in retry mode + retry = 1; + + // failure on IoModifyAccess mode is unclear case - we'll read and write in this case + if (Operation == IoModifyAccess) { + Operation = IoReadAccess; + n_tries = 2; + } + + // we perform all the tries with the same STATUS_ACCESS_VIOLATION result + if (n_tries <= 0) { + /* adress is not valid */ + rc = MT_ENOMEM; + goto err2; + } + + // switch the mode + if (Operation == IoReadAccess) + Operation = IoWriteAccess; + else + Operation = IoReadAccess; + + // try once more + n_tries--; try_num++; + goto check_va_validity; + + } + + // fake RO access + if (fake_ro) { + fake_ro = 0; + MmUnlockPages( mdl_p ); // unlock the buffer + Operation = IoReadAccess; + goto check_va_validity; + } + + // check, whether we failed the first time + if (retry) { + MmUnlockPages( mdl_p ); // unlock the buffer + rc = MT_EPERM; + goto err2; + } + + if( mode == UserMode ) + { + if (req_perm & MOSAL_PERM_WRITE) + { + new_iobuf->h_secure = MmSecureVirtualMemory( + (PVOID)va, (SIZE_T)size, PAGE_READWRITE ); + } + else + { + new_iobuf->h_secure = MmSecureVirtualMemory( + (PVOID)va, (SIZE_T)size, PAGE_READONLY ); + } + + if( !new_iobuf->h_secure ) + { + rc = MT_EPERM; + goto err3; + } + } + else + { + new_iobuf->h_secure = NULL; + } + + // fill IOBUF object + new_iobuf->va = va; + new_iobuf->size= size; + new_iobuf->nr_pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES( va, size ); + new_iobuf->mdl_p = mdl_p; + new_iobuf->prot_ctx = prot_ctx; + MOSAL_dlist_insert_tail( &iobuf_p->seg_que, &new_iobuf->link ); + return MT_OK; + +err3: + MmUnlockPages( mdl_p ); +err2: + IoFreeMdl(mdl_p); +err1: + FREE((PVOID)new_iobuf); +err0: + return rc; +} + +static void deregister_segment(MOSAL_iobuf_seg_t iobuf_seg_p) +{ + if( iobuf_seg_p->h_secure ) + MmUnsecureVirtualMemory( iobuf_seg_p->h_secure); + MmUnlockPages( iobuf_seg_p->mdl_p ); // unlock the buffer + IoFreeMdl( iobuf_seg_p->mdl_p ); // free MDL + FREE(iobuf_seg_p); +} + +/* + * MOSAL_iobuf_register + */ +call_result_t MOSAL_iobuf_register(MT_virt_addr_t va, + MT_size_t size, + MOSAL_prot_ctx_t prot_ctx, + MOSAL_mem_perm_t req_perm, + MOSAL_iobuf_t *iobuf_p, + u_int32_t flags) +{ + call_result_t rc; + MOSAL_iobuf_t new_iobuf; // new allocated IOBUF object + MT_virt_addr_t seg_va; // current segment start + MT_size_t seg_size; // current segment size + MT_size_t rdc; // remain data counter - what is rest to lock + MT_size_t delta; // he size of the last not full page of the first segment + MOSAL_iobuf_seg_t iobuf_seg_p; // pointer to current segment object + unsigned page_size; + +// 32 - for any case +#define PFNS_IN_PAGE_SIZE_MDL ((PAGE_SIZE - sizeof(struct _MDL) - 32) / sizeof(long)) +#define MIN_IOBUF_SEGMENT_SIZE (PAGE_SIZE * PFNS_IN_PAGE_SIZE_MDL) // 4MB + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // sanity checks + if ( !iobuf_p ) return MT_EINVAL; + if ( size == 0 ) return MT_EINVAL; + if ( va == VA_NULL ) return MT_EINVAL; + + // allocate IOBUF object + new_iobuf = (MOSAL_iobuf_t)TMALLOC(struct mosal_iobuf_st); + if (new_iobuf == NULL) { + MTL_ERROR4("MOSAL_iobuf_register: TMALLOC failed, va %p, sz %d\n", va, size); + rc = MT_EAGAIN; + goto err0; + } + + // init IOBUF object + MOSAL_dlist_init_head( &new_iobuf->seg_que ); + new_iobuf->seg_num = 0; + + // Round the seg_va down to a page boundary so that we always get a seg_size + // that is an integral number of pages. + delta = va & (PAGE_SIZE - 1); + seg_va = va - delta; + // Since we rounded down the seg_va, we need to round up the rdc and size. + seg_size = rdc = size + delta; + + // allocate segments + while (rdc > 0) { + // map a segment + rc = register_segment(seg_va, seg_size, prot_ctx, req_perm, new_iobuf ); + + // success - move to another segment + if (rc == MT_OK) { + rdc -= seg_size; + seg_va += seg_size; + new_iobuf->seg_num++; + if (seg_size > rdc) + seg_size = rdc; + continue; + } + + // failure - too large a buffer: lessen it and try once more + if (rc == MT_EKMALLOC) { + // no where to lessen - too low memory + if (seg_size <= MIN_IOBUF_SEGMENT_SIZE) + break; + // lessen the size + seg_size >>= 1; + // round the segment size to the page boundary (only for the first segment) + if (new_iobuf->seg_num == 0) { + rc = MOSAL_get_page_size( prot_ctx, seg_va, &page_size ); + if (rc != MT_OK) + break; + delta = ((ULONG_PTR)seg_va + seg_size) & (page_size - 1); + seg_size -= delta; + seg_size += page_size; + if (seg_size > rdc) + seg_size = rdc; + } + continue; + } + + // got unrecoverable error + break; + } + + // SUCCESS + if ( rc == MT_OK) { + // fill IOBUF object + new_iobuf->va = va; + new_iobuf->size= size; + new_iobuf->nr_pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES( va, size ); + new_iobuf->prot_ctx = prot_ctx; + *iobuf_p = new_iobuf; + MTL_DEBUG4("MOSAL_iobuf_register: registered buffer va %p, sz %d, seg_num %d\n", va, size, new_iobuf->seg_num); + return MT_OK; + } + + // FAILURE - release segments + while (!MOSAL_dlist_is_empty( &new_iobuf->seg_que )) { + iobuf_seg_p = (MOSAL_iobuf_seg_t)(PVOID)MOSAL_dlist_remove_tail( &new_iobuf->seg_que ); + deregister_segment(iobuf_seg_p); + new_iobuf->seg_num--; + } + ASSERT(new_iobuf->seg_num == 0); + + FREE((PVOID)new_iobuf); +err0: + return rc; +} + + + +/* + * MOSAL_iobuf_deregister + */ +call_result_t MOSAL_iobuf_deregister(MOSAL_iobuf_t iobuf_p) +{ + MOSAL_iobuf_seg_t iobuf_seg_p; // pointer to current segment object + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // sanity check + if (!iobuf_p) return MT_EINVAL; + + // release segments + while (!MOSAL_dlist_is_empty( &iobuf_p->seg_que )) { + iobuf_seg_p = (MOSAL_iobuf_seg_t)(PVOID)MOSAL_dlist_remove_tail( &iobuf_p->seg_que ); + deregister_segment(iobuf_seg_p); + iobuf_p->seg_num--; + } + ASSERT(iobuf_p->seg_num == 0); + + // release the rest + FREE(iobuf_p); + + return MT_OK; +} + + + +/* + * MOSAL_iobuf_get_tpt + */ +call_result_t MOSAL_iobuf_get_tpt(MOSAL_iobuf_t iobuf, + u_int32_t npages, + MT_phys_addr_t *pa_arr, + u_int32_t *page_size_p, + u_int32_t *act_table_sz_p) +{ + u_int32_t i, cnt = MT_MIN(npages,iobuf->nr_pages); + MOSAL_iobuf_seg_t iobuf_seg_p = (MOSAL_iobuf_seg_t)iobuf->seg_que.Flink; // pointer to current segment object + PPFN_NUMBER pfn_p; + MT_phys_addr_t *pa_buf_p = pa_arr; + call_result_t rc; + u_int32_t pg_sz; + u_int32_t pg_shift; + + // get page shift + rc = MOSAL_get_page_shift(MOSAL_get_current_prot_ctx(), iobuf->va, &pg_shift); + if ( rc != MT_OK ) { + return rc; + } + + // get page size + rc = MOSAL_get_page_size(MOSAL_get_current_prot_ctx(), iobuf->va, &pg_sz); + if ( rc != MT_OK ) { + return rc; + } + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // sanity check + if (!iobuf) return MT_EINVAL; + + // + // copy phys addresses + // + + // pass along the chain of segments + for (; (PVOID)iobuf_seg_p != (PVOID)&iobuf->seg_que; iobuf_seg_p = (MOSAL_iobuf_seg_t)iobuf_seg_p->link.Flink ) { + // get the start of PFN array + pfn_p = MmGetMdlPfnArray( iobuf_seg_p->mdl_p ); + // for all the pages in this array + for (i = 0; i < iobuf_seg_p->nr_pages; i++, pa_buf_p++, pfn_p++) { + // convert PFN to the physical address + *pa_buf_p = (MT_phys_addr_t)*pfn_p << pg_shift; + // for the very first page - add the offset from the start of the page + if (pa_buf_p == pa_arr) + *pa_buf_p |= iobuf->va & (pg_sz - 1); + // if the user's buffer is full - exit + if (--cnt == 0) + goto exit; + } + } + +exit: + if (page_size_p) *page_size_p = pg_sz; + if (act_table_sz_p) *act_table_sz_p = iobuf->nr_pages; + return MT_OK; +} + + +/* + * MOSAL_iobuf_iter_init + */ +call_result_t MOSAL_iobuf_iter_init(MOSAL_iobuf_t iobuf, MOSAL_iobuf_iter_t *iterator_p) +{ + iterator_p->seg_p = (MOSAL_iobuf_seg_t)iobuf->seg_que.Flink; + iterator_p->pfn_ix = 0; + return MT_OK; +} + +/* + * MOSAL_iobuf_cmp_tpt + */ +int MOSAL_iobuf_cmp_tpt( MOSAL_iobuf_t iobuf1_p, MOSAL_iobuf_t iobuf2_p ) +{ + u_int32_t i; + MOSAL_iobuf_seg_t seg1_p; // pointer to current segment object in 1st iobuf + MOSAL_iobuf_seg_t seg2_p; // pointer to current segment object in 2nd iobuf + PPFN_NUMBER pfn1_p, pfn2_p; + u_int32_t pfn_ix1, pfn_ix2; // inde of PFN in PFN array of the current segment + + // + // the case of different protection contexts -count that as an error + // + if ( iobuf1_p->prot_ctx != iobuf2_p->prot_ctx ) + return -1; + + // + // a case where both iobufs are in kernel space + // + if ( iobuf1_p->prot_ctx == MOSAL_get_kernel_prot_ctx() ) { + if ( (iobuf1_p->va!=iobuf2_p->va) || (iobuf1_p->size!=iobuf2_p->size) ) + return -1; + return 0; + } + + // + // a case where both iobufs are in user space(s) + // + + // quick checks + /* differrent sizes */ + if ( iobuf1_p->nr_pages !=iobuf2_p->nr_pages ) + return -1; + + // prepare to the loop + seg1_p = (MOSAL_iobuf_seg_t)iobuf1_p->seg_que.Flink; // first segment of the first iobuf + seg2_p = (MOSAL_iobuf_seg_t)iobuf2_p->seg_que.Flink; // first segment of the second iobuf + pfn1_p = MmGetMdlPfnArray( seg1_p->mdl_p ); + pfn2_p = MmGetMdlPfnArray( seg2_p->mdl_p ); + pfn_ix1= pfn_ix2=0; + + // pass along all the PFN arrays + for (i = 0; i < iobuf1_p->nr_pages; i++) { + // compare page numbers + if (*pfn1_p++ != *pfn2_p++) + return -1; + + // get to the next PFN of the 1st iobuf + if (++pfn_ix1 >= seg1_p->nr_pages) { + seg1_p = (MOSAL_iobuf_seg_t)seg1_p->link.Flink; + if ((PVOID)seg1_p == (PVOID)&iobuf1_p->seg_que) { + i++; + break; + } + pfn1_p = MmGetMdlPfnArray( seg1_p->mdl_p ); + pfn_ix1 = 0; + } + + // get to the next PFN of the 2nd iobuf + if (++pfn_ix2 >= seg2_p->nr_pages) { + seg2_p = (MOSAL_iobuf_seg_t)seg2_p->link.Flink; + if ((PVOID)seg2_p == (PVOID)&iobuf2_p->seg_que) { + i++; + break; + } + pfn2_p = MmGetMdlPfnArray( seg2_p->mdl_p ); + pfn_ix2 = 0; + } + } + + ASSERT( i == iobuf1_p->nr_pages); + return 0; +} + + + +/* + * MOSAL_iobuf_get_tpt_seg + */ +call_result_t MOSAL_iobuf_get_tpt_seg(MOSAL_iobuf_t iobuf, MOSAL_iobuf_iter_t *iterator_p, + MT_size_t n_pages_in, MT_size_t *n_pages_out_p, + MT_phys_addr_t *page_tbl_p) +{ + u_int32_t i=0; + MOSAL_iobuf_seg_t seg_p; // pointer to current segment object + PPFN_NUMBER pfn_p; + u_int32_t pfn_ix; // index of PFN in PFN array of the current segment + MT_phys_addr_t *pa_buf_p = page_tbl_p; + call_result_t rc; + u_int32_t pg_sz; + u_int32_t pg_shift; + + // get page shift + rc = MOSAL_get_page_shift(MOSAL_get_current_prot_ctx(), iobuf->va, &pg_shift); + if ( rc != MT_OK ) { + return rc; + } + + // get page size + rc = MOSAL_get_page_size(MOSAL_get_current_prot_ctx(), iobuf->va, &pg_sz); + if ( rc != MT_OK ) { + return rc; + } + + + // prepare to the loop + seg_p = iterator_p->seg_p; // first segment of the first iobuf + pfn_ix= iterator_p->pfn_ix; + if ((PVOID)seg_p == (PVOID)&iobuf->seg_que) + goto exit; + pfn_p = MmGetMdlPfnArray( seg_p->mdl_p ) + pfn_ix; + + // pass along all the PFN arrays + for (; i < n_pages_in; i++, pa_buf_p++) { + // convert PFN to the physical address + *pa_buf_p = (MT_phys_addr_t)*pfn_p++ << pg_shift; + + // for the very first page - add the offset from the start of the page + if (pa_buf_p == page_tbl_p) + *pa_buf_p |= iobuf->va & (pg_sz - 1); + + // get to the next PFN + if (++pfn_ix >= seg_p->nr_pages) { + seg_p = (MOSAL_iobuf_seg_t)seg_p->link.Flink; + pfn_ix = 0; + if ((PVOID)seg_p == (PVOID)&iobuf->seg_que) { + i++; + break; + } + pfn_p = MmGetMdlPfnArray( seg_p->mdl_p ); + } + } + +exit: + iterator_p->seg_p = seg_p; + iterator_p->pfn_ix = pfn_ix; + *n_pages_out_p = i; + return MT_OK; +} + +/* + * MOSAL_iobuf_ctx_init + */ +call_result_t MOSAL_iobuf_ctx_init(MOSAL_mlock_ctx_t *mlock_ctx_p) +{ + return MT_OK; // ?? +} + +/* + * MOSAL_iobuf_ctx_cleanup + */ +call_result_t MOSAL_iobuf_ctx_cleanup(MOSAL_mlock_ctx_t mlock_ctx) +{ + return MT_OK; // ?? +} + +/* + * MOSAL_iobuf_restore_perm + */ +call_result_t MOSAL_iobuf_restore_perm(MOSAL_iobuf_t iobuf) +{ + return MT_ENOSYS; +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf_imp.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf_imp.h new file mode 100644 index 00000000..85e8fcd3 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_iobuf_imp.h @@ -0,0 +1,73 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __MOSAL_IOBUF_IMP__H +#define __MOSAL_IOBUF_IMP__H + + +#ifdef __KERNEL__ +#include + +/* segment */ +struct mosal_iobuf_seg_st { + LIST_ENTRY link; + PMDL mdl_p; + MT_virt_addr_t va; /* virtual address of the buffer */ + MT_size_t size; /* size in bytes of the buffer */ + u_int32_t nr_pages; + MOSAL_prot_ctx_t prot_ctx; + void *h_secure; +}; + +typedef struct mosal_iobuf_seg_st * MOSAL_iobuf_seg_t; + +/* iterator for getting segments of tpt */ +struct mosal_iobuf_iter_st { + MOSAL_iobuf_seg_t seg_p; /* the item from where to take the next translations */ + unsigned int pfn_ix; /* index from where to take the next translation */ +}; + + + +struct mosal_iobuf_st { + MT_virt_addr_t va; /* virtual address of the buffer */ + MT_size_t size; /* size in bytes of the buffer */ + u_int32_t nr_pages; + MOSAL_prot_ctx_t prot_ctx; + LIST_ENTRY seg_que; + int seg_num; +}; +#endif + +call_result_t MOSAL_iobuf_init(void); +void MOSAL_iobuf_cleanup(void); + +#endif /* __MOSAL_IOBUF_IMP__H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_k2u_cbk.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_k2u_cbk.c new file mode 100644 index 00000000..ec94fcda --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_k2u_cbk.c @@ -0,0 +1,311 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_K2U_CBK_K_C + +#include + +#include "mosal_priv.h" +/* Maximum processes supported */ +#define MAX_PROCS 256 +/* Max outstanding calls per process - additional calls will be lost */ +#define LOG2_MAX_OUTS_CALLS_PER_PROC 7 +#define MAX_OUTS_CALLS_PER_PROC (1<cons= new_proc->prod= 0; /* init consumer-producer buffer */ + new_proc->tid= (MT_ulong_ptr_t)KeGetCurrentThread(); + new_proc->pid= MOSAL_getpid(); + new_proc->ref_cnt = 1; + MOSAL_sem_init(&(new_proc->qsem),0); + + /* Insert context to handles array */ + + MOSAL_spinlock_dpc_lock(&cbk_dat_lock); + + /* Find free handle */ + for (free_hndl= 0; free_hndl < MAX_PROCS; free_hndl++) { + if (cbk_db[free_hndl] == NULL) break; + } + if (free_hndl == MAX_PROCS) { /* no free entry */ + MOSAL_spinlock_unlock(&cbk_dat_lock); + FREE(new_proc); + MTL_ERROR4(MT_FLFMT("No resources for registering additional processes (max.=%d)"), + MAX_PROCS); + return MT_EAGAIN; + } + + cbk_db[free_hndl]= new_proc; + + MOSAL_spinlock_unlock(&cbk_dat_lock); + + MTL_DEBUG4("%s: allocated cbk handle %d for tid=" MT_ULONG_PTR_FMT ".\n", __func__, + free_hndl,KeGetCurrentThread()); + + *k2u_cbk_h_p = free_hndl; + return MT_OK; +} + + +call_result_t k2u_cbk_cleanup(k2u_cbk_hndl_t k2u_cbk_h) +{ + MOSAL_spinlock_dpc_lock(&cbk_dat_lock); + if ( (k2u_cbk_h>=MAX_PROCS) || + (k2u_cbk_h==INVALID_K2U_CBK_HNDL) || + (cbk_db[k2u_cbk_h]==NULL) ) { + MOSAL_spinlock_unlock(&cbk_dat_lock); + MTL_ERROR1(MT_FLFMT("%s: called with invalid handle"), __func__); + return MT_EINVAL; + } + MOSAL_spinlock_unlock(&cbk_dat_lock); + + k2u_cbk_invoke(k2u_cbk_h,K2U_CBK_CLEANUP_CBK_ID,NULL,0); + + return k2u_cbk_rsrc_cleanup(k2u_cbk_h); +} + +static inline call_result_t rm_db_entry(k2u_cbk_hndl_t k2u_cbk_h) +{ + proc_cbk_dat_t *rm_proc; + + rm_proc= cbk_db[k2u_cbk_h]; + MOSAL_spinlock_dpc_lock(&cbk_dat_lock); + cbk_db[k2u_cbk_h]= NULL; /* remove from db and only after free resources */ + MOSAL_spinlock_unlock(&cbk_dat_lock); + /* Free resources allocated in queue */ + FREE(rm_proc); + return MT_OK; +} + +static call_result_t k2u_cbk_rsrc_cleanup(k2u_cbk_hndl_t k2u_cbk_h) +{ + proc_cbk_dat_t *cbk_p = NULL; + + MOSAL_spinlock_dpc_lock(&cbk_dat_lock); + MTL_TRACE3(MT_FLFMT("%s called with handle=%d"), __func__, k2u_cbk_h); + if ( (k2u_cbk_h>=MAX_PROCS) || + (k2u_cbk_h==INVALID_K2U_CBK_HNDL) || + (cbk_db[k2u_cbk_h]==NULL) ) { + MOSAL_spinlock_unlock(&cbk_dat_lock); + MTL_ERROR1(MT_FLFMT("%s: invalid handle=%d"), __func__, k2u_cbk_h); + return MT_EINVAL; + } + if ( cbk_db[k2u_cbk_h]->pid != MOSAL_getpid() ) { + MOSAL_spinlock_unlock(&cbk_dat_lock); + MTL_ERROR1(MT_FLFMT("%s: tried to cleanup but belongs to a different pgrp"), __func__); + return MT_EPERM; + } + cbk_db[k2u_cbk_h]->ref_cnt--; + MTL_TRACE3(MT_FLFMT("%s: ref_cnt=%d"), __func__, cbk_db[k2u_cbk_h]->ref_cnt); + if ( cbk_db[k2u_cbk_h]->ref_cnt == 0 ) { + cbk_p = cbk_db[k2u_cbk_h]; + cbk_db[k2u_cbk_h] = NULL; + } + MOSAL_spinlock_unlock(&cbk_dat_lock); + if ( cbk_p ) { + FREE(cbk_p); + } + return MT_OK; +} + + +/* This function is assumed to be invoked by a single (cbk_polling) thread, per k2u_cbk handle */ +/* Since this is the only context that the callback entry may be removed + * we do not bother to hold the mutex. + * Only on entry removedl (rm_db_entry) the mutex is held to sync. with k2u_cbk_invoke() + */ +call_result_t k2u_cbk_pollq(k2u_cbk_hndl_t k2u_cbk_h, + k2u_cbk_id_t *cbk_id_p,void *data_p, MT_size_t *size_p) +{ + volatile cbk_msg_t *msg_p; + MT_ulong_ptr_t cur_tid = (MT_ulong_ptr_t)KeGetCurrentThread(); + + MOSAL_spinlock_dpc_lock(&cbk_dat_lock); + + if ( (k2u_cbk_h>MAX_PROCS) || + (k2u_cbk_h==INVALID_K2U_CBK_HNDL) || + (cbk_db[k2u_cbk_h] == NULL) ) { + MOSAL_spinlock_unlock(&cbk_dat_lock); + MTL_ERROR4(MT_FLFMT("%s: Invalid handle (%d)"), __func__, k2u_cbk_h); + return MT_EINVAL; + } + + if (cbk_db[k2u_cbk_h]->tid != cur_tid) { + MTL_ERROR1(MT_FLFMT( + "Polling request was tried by thread with tid " MT_ULONG_PTR_FMT " != polling thread tid (" MT_ULONG_PTR_FMT ")"), + cur_tid,cbk_db[k2u_cbk_h]->tid); + MOSAL_spinlock_unlock(&cbk_dat_lock); + return MT_EPERM; + } + + cbk_db[k2u_cbk_h]->ref_cnt++; + MTL_TRACE3(MT_FLFMT("%s: ref_cnt=%d"), __func__, cbk_db[k2u_cbk_h]->ref_cnt); + /* Now that we are sure that the real (only) queue owner is invoking this function + * we can relase the mutex since the handle entry cannot be removed by another + * thread (this function is the only access to rm_db_entry() ). + * No synchronization is required in regard to consumer index (update by this thread only) + */ + MOSAL_spinlock_unlock(&cbk_dat_lock); + + if (MOSAL_sem_acq(&(cbk_db[k2u_cbk_h]->qsem),TRUE) != MT_OK) { + MTL_DEBUG4(MT_FLFMT("MOSAL_sem_acq for qsem was interrupted.")); + k2u_cbk_rsrc_cleanup(k2u_cbk_h); + return MT_EINTR; + } + + MTL_TRACE3(MT_FLFMT("%s: woke up with a new id=%d"), __func__, *cbk_id_p); + + if (cbk_db[k2u_cbk_h]->cons == cbk_db[k2u_cbk_h]->prod) { + MTL_ERROR2(MT_FLFMT("Passed queue semaphore but found no message (hndl=%d, tid=" MT_ULONG_PTR_FMT ")"), + k2u_cbk_h,KeGetCurrentThread()); + k2u_cbk_rsrc_cleanup(k2u_cbk_h); + return MT_EAGAIN; + } + + msg_p= cbk_db[k2u_cbk_h]->msg_q+cbk_db[k2u_cbk_h]->cons; + *cbk_id_p = msg_p->id; + *size_p = msg_p->size; + if (msg_p->size > 0) { + memcpy( data_p,msg_p->data_p,msg_p->size); + FREE(msg_p->data_p); + } + cbk_db[k2u_cbk_h]->cons= (cbk_db[k2u_cbk_h]->cons+1) & MAX_OUTS_CALLS_MASK; + + k2u_cbk_rsrc_cleanup(k2u_cbk_h); + return MT_OK; +} + + +call_result_t k2u_cbk_invoke(k2u_cbk_hndl_t k2u_cbk_h, k2u_cbk_id_t cbk_id, + void *data_p, MT_size_t size) +{ + cbk_msg_t *msg_p; + u_int32_t next_prod; /* next producer index */ + MT_ulong_ptr_t cur_pid = MOSAL_getpid(); + MT_ulong_ptr_t cur_tid = (MT_ulong_ptr_t)KeGetCurrentThread(); + + if ((size > 0) && (data_p == NULL)) return MT_EINVAL; + + MOSAL_spinlock_dpc_lock(&cbk_dat_lock); + + if ( (k2u_cbk_h>=MAX_PROCS) || + (k2u_cbk_h==INVALID_K2U_CBK_HNDL) || + (cbk_db[k2u_cbk_h]==NULL) ) { + MOSAL_spinlock_unlock(&cbk_dat_lock); + MTL_ERROR4(MT_FLFMT("%s: Invalid handle (%d)"), __func__, + k2u_cbk_h); + return MT_EINVAL; + } + + MTL_TRACE4(__FUNCTION__ ": called for cbk_hndl=%d cbk_id=%d data size=0x"SIZE_T_FMT".\n", + k2u_cbk_h,cbk_id,size); + + /* for cleanup, check that current thread is in the same process group of the polling thread */ + if ((cbk_id == K2U_CBK_CLEANUP_CBK_ID) && (cbk_db[k2u_cbk_h]->pid != cur_pid)) { + MTL_ERROR1(MT_FLFMT( + "K2U_CBK_CLEANUP_CBK_ID is used by a thread (tid=" MT_ULONG_PTR_FMT ") which is in pid=" MT_ULONG_PTR_FMT " != " MT_ULONG_PTR_FMT " " + " of the polling thread (tid=" MT_ULONG_PTR_FMT ")"), + cur_tid,cur_pid,cbk_db[k2u_cbk_h]->pid,cbk_db[k2u_cbk_h]->tid); + MOSAL_spinlock_unlock(&cbk_dat_lock); + return MT_EPERM; + } + + if (size > MAX_CBK_DATA_SZ) { + MOSAL_spinlock_unlock(&cbk_dat_lock); + MTL_ERROR4(__FUNCTION__ ": given data size ("SIZE_T_FMT") is more than allowed (%d).\n", + size,MAX_CBK_DATA_SZ); + return MT_EINVAL; + } + + next_prod= (cbk_db[k2u_cbk_h]->prod+1) & MAX_OUTS_CALLS_MASK; + if (next_prod == cbk_db[k2u_cbk_h]->cons) { + MOSAL_spinlock_unlock(&cbk_dat_lock); + MTL_ERROR4("%s: Call queue is full - invocation is dropped.\n", __func__); + return MT_EAGAIN; + } + msg_p= cbk_db[k2u_cbk_h]->msg_q+cbk_db[k2u_cbk_h]->prod; + msg_p->id= cbk_id; + msg_p->data_p= data_p; + msg_p->size= size; + cbk_db[k2u_cbk_h]->prod= next_prod; + MOSAL_sem_rel(&(cbk_db[k2u_cbk_h]->qsem)); /* signal for new message in queue */ + MTL_TRACE1(MT_FLFMT("%s: signaled process for a new event: event id=%d"), __func__, cbk_id); + + MOSAL_spinlock_unlock(&cbk_dat_lock); + + return MT_OK; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_kl.def b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_kl.def new file mode 100644 index 00000000..7aeee031 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_kl.def @@ -0,0 +1,178 @@ +EXPORTS + ; for OS only + DllInitialize private + DllUnload private + MOSAL_manual_wrapper + ; + ; mtl_common + ; + mtl_log_set + mtl_log + mtl_strerror + mtl_strerror_sym + ; windows implementation of tracing + NT_trace + NT_trace1 + NT_trace2 + NT_trace3 + NT_trace4 + NT_trace5 + NT_trace6 + NT_trace7 + NT_trace8 + NT_trace9 + NT_error + NT_error1 + NT_error2 + NT_error3 + NT_error4 + NT_error5 + NT_error6 + NT_error7 + NT_error8 + NT_error9 + NT_debug + NT_debug1 + NT_debug2 + NT_debug3 + NT_debug4 + NT_debug5 + NT_debug6 + NT_debug7 + NT_debug8 + NT_debug9 + + ; + ; MOSAL + ; + + ; ----- Mosal_ntddk.c ----- + MOSAL_PCI_find_device + MOSAL_PCI_read_config_byte + MOSAL_PCI_read_config_word + MOSAL_PCI_read_config_dword + MOSAL_PCI_read_config_data + MOSAL_PCI_write_config_byte + MOSAL_PCI_write_config_word + MOSAL_PCI_write_config_dword + MOSAL_PCI_write_config_data + ; ----- Mosal_bus.c ----- + MOSAL_PCI_find_class + MOSAL_PCI_read_io_byte + MOSAL_PCI_read_io_word + MOSAL_PCI_read_io_dword + MOSAL_PCI_write_io_byte + MOSAL_PCI_write_io_word + MOSAL_PCI_write_io_dword + MOSAL_PCI_read_mem + MOSAL_PCI_write_mem + MOSAL_MPC860_write + MOSAL_MPC860_read + ; ----- Mosal_gen.c ----- + MOSAL_init + MOSAL_cleanup + MOSAL_is_privileged + MOSAL_getpid + MOSAL_setpid + MOSAL_letobe64 + MOSAL_letobe32 + MOSAL_letobe16 + MOSAL_get_exec_ctx + MOSAL_mark_time + MOSAL_calc_diff + MOSAL_set_counts_per_sec + MOSAL_rsct_open + MOSAL_rsct_close + MOSAL_nsecs + MOSAL_reset_card + ; ----- Mosal_k2u_cbk.c ----- + k2u_cbk_init + k2u_cbk_cleanup + k2u_cbk_invoke + k2u_cbk_pollq + ; ----- Mosal_mem.c ----- + MOSAL_get_va_attr + MOSAL_set_vmbuff_attr + MOSAL_io_remap + MOSAL_io_unmap + MOSAL_map_phys_addr + MOSAL_unmap_phys_addr + MOSAL_virt_to_phys + MOSAL_virt_to_phys + MOSAL_write_phys_mem + MOSAL_read_phys_mem + MOSAL_vfree + MOSAL_phys_ctg_get + MOSAL_phys_ctg_free + MOSAL_mem_alloc + MOSAL_mem_free + MOSAL_la_list_init + MOSAL_la_list_alloc + MOSAL_la_list_free + MOSAL_la_list_delete + MOSAL_get_page_shift + ; ----- Mosal_mlock.c ----- + MOSAL_mlock + MOSAL_munlock + MOSAL_mlock_init + ; ----- Mosal_que.c ----- + MOSAL_qcreate + MOSAL_isqempty + MOSAL_qget + MOSAL_qput + MOSAL_qdestroy + ; ----- Mosal_sync.c ----- + MOSAL_syncobj_init + MOSAL_syncobj_waiton + MOSAL_syncobj_waiton_ui + MOSAL_syncobj_signal + MOSAL_syncobj_clear + MOSAL_sem_init + MOSAL_sem_acq + MOSAL_sem_acq_ui + MOSAL_sem_rel + MOSAL_mutex_init + MOSAL_mutex_acq + MOSAL_mutex_acq_ui + MOSAL_mutex_acq_to + MOSAL_mutex_rel + MOSAL_delay_execution + MOSAL_spinlock_init +; MOSAL_spinlock_lock +; MOSAL_spinlock_irq_lock +; MOSAL_spinlock_unlock + MOSAL_usleep + MOSAL_usleep_ui + ; ----- Mosal_timer.c ----- + MOSAL_ISR_set + MOSAL_ISR_unset + MOSAL_set_intr_handler + MOSAL_unset_intr_handler + MOSAL_DPC_init + MOSAL_DPC_schedule + MOSAL_DPC_add_ctx + MOSAL_DPC_schedule_ctx + MOSAL_get_counts_per_sec + MOSAL_time_get_clock + MOSAL_get_cnt + ; ----- Mosal_util.c ----- + MOSAL_add_device + MOSAL_remove_device + ; ----- Mosal_thread.c ----- + MOSAL_thread_start + MOSAL_thread_kill + MOSAL_thread_wait_for_exit + MOSAL_thread_set_name + ; ----- Mosal_wrap_kernel.c ----- + MOSAL_ioctl + ; ----- mosal_iobuf.c ----- + MOSAL_iobuf_get_props + MOSAL_iobuf_register + MOSAL_iobuf_deregister + MOSAL_iobuf_get_tpt + MOSAL_iobuf_get_tpt_seg + MOSAL_iobuf_cmp_tpt + MOSAL_iobuf_iter_init + MOSAL_iobuf_restore_perm + ; ----- mosal_gen_nos.c ----- + MOSAL_PCI_get_cfg_hdr diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem.c new file mode 100644 index 00000000..4e3aacfc --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem.c @@ -0,0 +1,1067 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include "mosal_priv.h" + +/* hash using */ +#ifdef MT_64BIT + +/* 64-bit platform */ +#include +#define hash_insert VIP_hashv4p_insert +#define hash_erase VIP_hashv4p_erase +#define hash_traverse VIP_hashv4p_traverse +#define hash_create VIP_hashv4p_create +#define hash_destroy VIP_hashv4p_destroy +#define hash_p_t VIP_hashv4p_p_t +#define hash_key_t VIP_hashv4p_key_t +#define hash_value_t VIP_hashv4p_value_t + +#define HASH_INSERT(hash,sp,val,label) \ + { \ + u_int64_t key[2]; \ + key[0] = (u_int64_t)MOSAL_getpid(); \ + key[1] = *(u_int64_t*)(sp); \ + if (hash_insert( hash, (u_int32_t*)&key[0], val) != 0) \ + goto label; \ + } + +#define HASH_ERASE(hash,sp,val,label) \ + { \ + u_int64_t key[2]; \ + key[0] = (u_int64_t)MOSAL_getpid(); \ + key[1] = *(u_int64_t*)(sp); \ + if (hash_erase( hash, (u_int32_t*)&key[0], (void*)&(val)) != 0) \ + goto label; \ + } + +#else + +/* 32-bit platform */ +#include +#define hash_insert VIP_hash64p_insert +#define hash_erase VIP_hash64p_erase +#define hash_traverse VIP_hash64p_traverse +#define hash_create VIP_hash64p_create +#define hash_destroy VIP_hash64p_destroy +#define hash_p_t VIP_hash64p_p_t +#define hash_key_t VIP_hash64p_key_t +#define hash_value_t VIP_hash64p_value_t + +#define HASH_INSERT(hash,sp,val,label) \ + { \ + ULARGE_INTEGER key; \ + key.HighPart = (LONG)MOSAL_getpid(); \ + key.LowPart = *(PULONG)(sp); \ + if (hash_insert( hash, *(u_int64_t*)&key, val) != 0) \ + goto label; \ + } + +#define HASH_ERASE(hash,sp,val,label) \ + { \ + ULARGE_INTEGER key; \ + key.HighPart = (LONG)MOSAL_getpid(); \ + key.LowPart = *(PULONG)(sp); \ + if (hash_erase( hash, *(u_int64_t*)&key, (void*)&(val)) != 0) \ + goto label; \ + } + +#endif + +#include "mosal_ntddk.h" + + +/* prototypes from */ +NTKERNELAPI +PVOID +MmAllocateContiguousMemory ( + IN SIZE_T NumberOfBytes, + IN PHYSICAL_ADDRESS HighestAcceptableAddress + ); + +NTKERNELAPI +VOID +MmFreeContiguousMemory ( + IN PVOID BaseAddress + ); + +static hash_p_t hash_tbl_mdl = NULL; +static MOSAL_semaphore_t sem; + + +/* + * + * resource tracking + * + */ + +typedef enum { TYPE_IO_REMAP, TYPE_PHYS_CTG, TYPE_PHYS_ADDR , TYPE_PHYS_ADDR_DIR } mem_rmv_type_t; + +typedef struct { + LIST_ENTRY link; + mem_rmv_type_t type; + MOSAL_pid_t pid; + PMDL mdl_p; + MT_phys_addr_t pa; + MT_virt_addr_t va; + } io_remap_el_t; + +typedef struct { + LIST_ENTRY link; + mem_rmv_type_t type; + MOSAL_pid_t pid; + PMDL mdl_p; + MT_virt_addr_t va; + } phys_ctg_el_t; + +typedef struct { + LIST_ENTRY link; + mem_rmv_type_t type; + MOSAL_pid_t pid; + PMDL mdl_p; + MT_virt_addr_t va; + MT_size_t bsize; + } phys_addr_el_t; + + +PVOID MOSAL_mem_rsct_open(MOSAL_pid_t pid) +{ + /* allocate resource list */ + mem_rmv_t *rmv_p = (mem_rmv_t*)TMALLOC(mem_rmv_t); + if (rmv_p == NULL) { + MTL_ERROR4("MOSAL_mem_rsct_open: allocation failed \n"); + } + else { + /* fill resource list */ + rmv_p->pid = pid; + InitializeListHead( &rmv_p->io_remap_que ); + InitializeListHead( &rmv_p->phys_ctg_que ); + InitializeListHead( &rmv_p->phys_addr_que ); + } + + return (PVOID)rmv_p; +} + +static void remove_hash_els( + mem_rmv_t *rmv_p + ) +{ + MOSAL_pid_t pid = rmv_p->pid; + + /* remove io_remap requests */ + while (!IsListEmpty( &rmv_p->io_remap_que )) { + io_remap_el_t *rmv_el_p = (io_remap_el_t*)RemoveHeadList( &rmv_p->io_remap_que ); + MOSAL_io_release_for_user(rmv_el_p->pa); + } + + /* remove phys_ctg requests */ + while (!IsListEmpty( &rmv_p->phys_ctg_que )) { + phys_ctg_el_t *rmv_el_p = (phys_ctg_el_t*)RemoveHeadList( &rmv_p->phys_ctg_que ); + MOSAL_phys_ctg_free_for_user(rmv_el_p->va); + } + + /* remove phys_addr requests */ + while (!IsListEmpty( &rmv_p->phys_addr_que )) { + phys_addr_el_t *rmv_el_p = (phys_addr_el_t*)RemoveHeadList( &rmv_p->phys_addr_que ); + MOSAL_unmap_phys_addr(MOSAL_PROT_CTX_CURRENT_USER, rmv_el_p->va, rmv_el_p->bsize); + } + +} + +static int clean_hash_pcs(hash_key_t key, hash_value_t val, void* vp) +{ + mem_rmv_t *rmv_p = (mem_rmv_t*)vp; + io_remap_el_t * el_p_tmp = (io_remap_el_t *)val; + MT_virt_addr_t system_range_start = (MT_virt_addr_t)(MT_WIN_SYSTEM_SPACE_START); /* MmSystemRangeStart */ + + /* check for val to be a pointer in kernel space */ + if ( (MT_virt_addr_t)val < system_range_start) + return MT_EAGAIN; + + if (el_p_tmp->pid == rmv_p->pid) { + /* it's our process- let's build remove element */ + switch (el_p_tmp->type) { + case TYPE_IO_REMAP: + { + io_remap_el_t * el_p = (io_remap_el_t *)val; + InsertTailList( &rmv_p->io_remap_que, &el_p->link ); + } + break; + + case TYPE_PHYS_CTG: + { + phys_ctg_el_t * el_p = (phys_ctg_el_t *)val; + InsertTailList( &rmv_p->phys_ctg_que, &el_p->link ); + } + break; + + case TYPE_PHYS_ADDR: + case TYPE_PHYS_ADDR_DIR: + { + phys_addr_el_t * el_p = (phys_addr_el_t *)val; + InsertTailList( &rmv_p->phys_addr_que, &el_p->link ); + } + break; + } + } + return MT_EAGAIN; +} + +void MOSAL_mem_rsct_close(mem_rmv_t *rmv_p) +{ + MOSAL_sem_acq( &sem, TRUE ); + hash_traverse( hash_tbl_mdl, clean_hash_pcs, (void*)rmv_p ); + remove_hash_els( rmv_p ); + MOSAL_sem_rel( &sem ); + FREE(rmv_p); +} + + + +/* + * + * init/cleanup + * + */ + +call_result_t MOSAL_mem_init() +{ + /* init MDL hash */ + if (hash_tbl_mdl == NULL) { + if (hash_create( 0, &hash_tbl_mdl ) != 0) { + MTL_ERROR2("MOSAL_mem_init: hash_create failed\n"); + return MT_ERROR; + } + } + MOSAL_sem_init( &sem, 1 ); + return MT_OK; +} + +void MOSAL_mem_cleanup() +{ + if (hash_tbl_mdl != NULL) + hash_destroy( hash_tbl_mdl, NULL, NULL ); + hash_tbl_mdl = NULL; +} + + +/* + * + * Functions for user space + * + */ + +/* + * MOSAL_io_remap_for_user + */ +MT_virt_addr_t MOSAL_io_remap_for_user(MT_phys_addr_t pa, MT_size_t size) +{ + PVOID kernel_p; + PMDL mdl_p; + PVOID user_p; + PHYSICAL_ADDRESS wpa; + io_remap_el_t *remap_el_p; + MOSAL_pid_t pid = MOSAL_getpid(); + + /* map physical address to a kernel virtual one */ + wpa.QuadPart = pa; + kernel_p = MmMapIoSpace( wpa, size, MmNonCached ); + if (kernel_p == NULL) goto err0; + + /* allocate MDL */ + mdl_p = IoAllocateMdl( kernel_p, (ULONG)size, FALSE,FALSE,NULL); + if (mdl_p == NULL) goto err1; + + /* fill MDL */ + MmBuildMdlForNonPagedPool(mdl_p); + + /* map the buffer into user space */ + /* use NTDDK function */ + user_p = MmMapLockedPagesSpecifyCache( mdl_p, UserMode, + MmNonCached, NULL, FALSE, NormalPagePriority ); + if (user_p == NULL) goto err2; + + /* allocate hash element */ + remap_el_p = (io_remap_el_t*)TMALLOC(io_remap_el_t); + if (remap_el_p == NULL) { + MTL_ERROR4("MOSAL_io_remap_for_user: allocation failed \n"); + goto err3; + } + + /* fill hash element */ + remap_el_p->type = TYPE_IO_REMAP; + remap_el_p->pid = pid; + remap_el_p->mdl_p = mdl_p; + remap_el_p->pa = pa; + remap_el_p->va = (MT_virt_addr_t)user_p; + + /* store mdl for future release */ + HASH_INSERT(hash_tbl_mdl,&wpa,remap_el_p,err4); + + return (MT_virt_addr_t)user_p; + +err4: + FREE(remap_el_p); +err3: + MmUnmapLockedPages((PVOID)user_p, mdl_p ); +err2: + IoFreeMdl( mdl_p ); +err1: + MmUnmapIoSpace( kernel_p, size ); +err0: + return (MT_virt_addr_t)0; +} + +/* + * MOSAL_io_release_for_user + */ +void MOSAL_io_release_for_user(MT_phys_addr_t pa) +{ + PMDL mdl_p; + io_remap_el_t *remap_el_p = NULL; + + /* get saved mdl */ + HASH_ERASE(hash_tbl_mdl,&pa,remap_el_p,err1); + if (remap_el_p == NULL) { + MTL_ERROR4("MOSAL_io_release_for_user: internal error \n"); + goto err0; + } + + /* release resources */ + mdl_p = remap_el_p->mdl_p; + if (mdl_p != NULL) { + PVOID kernel_p; + ULONG size; + + /* unmap buffer from user space */ + MmUnmapLockedPages((PVOID)remap_el_p->va, mdl_p ); + + /* unmap kernel memory */ + kernel_p = MmGetSystemAddressForMdlSafe( mdl_p, HighPagePriority ); + size = MmGetMdlByteCount( mdl_p); + if (kernel_p != NULL) + MmUnmapIoSpace( kernel_p, size ); + + /* free MDL */ + IoFreeMdl( mdl_p ); + } + FREE(remap_el_p); + return; +err1: + MTL_ERROR4("MOSAL_io_release_for_user: hash_erase failed \n"); +err0: + return; +} + + +/* + * Allocate physically contiguous memory + * + * + */ +MT_virt_addr_t MOSAL_phys_ctg_get_for_user(MT_size_t size) +{ + char * kernel_p; + PMDL mdl_p; + MT_virt_addr_t user_p; + PHYSICAL_ADDRESS wpa; + phys_ctg_el_t *phys_ctg_el_p; + MOSAL_pid_t pid = MOSAL_getpid(); + + /* map physical address to a kernel virtual one */ + wpa.QuadPart = _UI64_MAX; + + /* map physical address to a kernel virtual one */ + kernel_p = MmAllocateContiguousMemory( size, wpa ); + if (kernel_p == NULL) goto err0; + + /* allocate MDL */ + mdl_p = IoAllocateMdl( kernel_p, (ULONG)size, FALSE,FALSE,NULL); + if (mdl_p == NULL) goto err1; + + /* fill MDL */ + MmBuildMdlForNonPagedPool(mdl_p); + + /* map the buffer into user space */ + /* use NTDDK function */ + user_p = (MT_virt_addr_t)MmMapLockedPagesSpecifyCache( mdl_p, + UserMode, MmNonCached, NULL, FALSE, NormalPagePriority ); + if (user_p == (MT_virt_addr_t)0) goto err2; + + /* allocate hash element */ + phys_ctg_el_p = (phys_ctg_el_t*)TMALLOC(phys_ctg_el_t); + if (phys_ctg_el_p == NULL) { + MTL_ERROR4("MOSAL_phys_ctg_get_for_user: allocation failed \n"); + goto err3; + } + + /* fill hash element */ + phys_ctg_el_p->type = TYPE_PHYS_CTG; + phys_ctg_el_p->pid = pid; + phys_ctg_el_p->mdl_p = mdl_p; + phys_ctg_el_p->va = user_p; + + + /* store mdl for future release */ + HASH_INSERT(hash_tbl_mdl,&user_p,phys_ctg_el_p,err4); + + return user_p; + +err4: + FREE(phys_ctg_el_p); +err3: + MmUnmapLockedPages((PVOID)user_p, mdl_p ); +err2: + IoFreeMdl( mdl_p ); +err1: + MmFreeContiguousMemory( kernel_p ); +err0: + return (MT_virt_addr_t)0; +} + +void MOSAL_phys_ctg_free_for_user(MT_virt_addr_t va) +{ + phys_ctg_el_t *phys_ctg_el_p = NULL; + PMDL mdl_p; + + /* find mdl */ + HASH_ERASE(hash_tbl_mdl,&va,phys_ctg_el_p,err0); + if (phys_ctg_el_p == NULL) { + MTL_ERROR4("MOSAL_phys_ctg_free_for_user: internal error \n"); + return; + } + + /* release resources */ + mdl_p = phys_ctg_el_p->mdl_p; + if (mdl_p != NULL) { + PVOID kernel_p; + + /* unmap buffer from user space */ + MmUnmapLockedPages((PVOID)va, mdl_p ); + + /* free contiguous memory */ + kernel_p = MmGetSystemAddressForMdlSafe( mdl_p, HighPagePriority ); + if (kernel_p != NULL) + MmFreeContiguousMemory( kernel_p ); + + /* free MDL */ + IoFreeMdl( mdl_p ); + } + FREE(phys_ctg_el_p); +err0: + return; +} + +/* + * + * Functions for kernel space + * + */ + + +MT_virt_addr_t MOSAL_io_remap(MT_phys_addr_t pa, MT_size_t size) +{ + char * buffer_p; + PHYSICAL_ADDRESS wpa; + + /* map physical address to a kernel virtual one */ + wpa.QuadPart = pa; + buffer_p = MmMapIoSpace( wpa, size, MmNonCached ); + + /* store size for future release */ + if (buffer_p == NULL) + goto err0; + + HASH_INSERT(hash_tbl_mdl,&buffer_p,(void*)size,err1); + + return (MT_virt_addr_t)buffer_p; + +err1: + MmUnmapIoSpace( buffer_p, size ); +err0: + return (MT_virt_addr_t)NULL; +} + +void MOSAL_io_unmap(MT_virt_addr_t va) +{ + MT_size_t size; + HASH_ERASE(hash_tbl_mdl,&va,size,err); + MmUnmapIoSpace( (void *)va, size ); +err: + return; +} + +/* + * MOSAL_vfree + */ +void MOSAL_vfree(MT_virt_addr_t va) +{ +#ifdef __KERNEL__ + VFREE((void *)va); +#endif +} + +MT_virt_addr_t MOSAL_map_phys_addr_via_kernel(MT_phys_addr_t pa, MT_size_t bsize, + MOSAL_mem_flags_t flags, MOSAL_prot_ctx_t prot_ctx) +{ + char * buffer_p; + PHYSICAL_ADDRESS wpa; + MEMORY_CACHING_TYPE mtype = (flags & MOSAL_MEM_FLAGS_NO_CACHE) ? MmNonCached : MmCached; + PMDL mdl_p; + PVOID user_p; + LOCK_OPERATION Operation = IoReadAccess; + phys_addr_el_t *phys_addr_el_p; + MOSAL_pid_t pid = MOSAL_getpid(); + + /* map physical address to a kernel virtual one */ + wpa.QuadPart = pa; + buffer_p = MmMapIoSpace( wpa, bsize, mtype ); + if (buffer_p == NULL) + goto err0; + + if (prot_ctx == MOSAL_PROT_CTX_KERNEL) { /* Mapping to kernel virtual address */ + MTL_DEBUG2("MOSAL_map_phys_addr: Mapped phys.="PHYS_ADDR_FMT" to virt.=" VIRT_ADDR_FMT"\n",pa,buffer_p); + return (MT_virt_addr_t)buffer_p; + } + + /* else: MOSAL_PROT_CTX_CURRENT_USER */ + + /* allocate MDL */ + mdl_p = IoAllocateMdl( buffer_p, (ULONG)bsize, FALSE,TRUE,NULL); + if (mdl_p == NULL) goto err1; + + /* fill MDL */ + MmBuildMdlForNonPagedPool(mdl_p); + + /* lock pages and set acess flags */ +#ifdef LOCK_PHYS_PAGES + if ((flags & MOSAL_MEM_FLAGS_PERM_READ)) + Operation = IoReadAccess; + if ((flags & MOSAL_MEM_FLAGS_PERM_WRITE)) + Operation = IoWriteAccess; + /* The probing fails, seems to be, because it probes physical memory */ + MmProbeAndLockPages( mdl_p, UserMode, Operation ); +#endif + + /* map the buffer into user space */ + /* use NTDDK function */ + user_p = MmMapLockedPagesSpecifyCache( mdl_p, UserMode, mtype, NULL, FALSE, NormalPagePriority ); + if (user_p == NULL) goto err2; + + /* allocate hash element */ + phys_addr_el_p = (phys_addr_el_t*)TMALLOC(phys_addr_el_t); + if (phys_addr_el_p == NULL) { + MTL_ERROR4("MOSAL_map_phys_addr: allocation failed \n"); + goto err3; + } + + /* fill hash element */ + phys_addr_el_p->type = TYPE_PHYS_ADDR; + phys_addr_el_p->pid = pid; + phys_addr_el_p->mdl_p = mdl_p; + phys_addr_el_p->va = (MT_virt_addr_t)user_p; + phys_addr_el_p->bsize = bsize; + + /* store mdl for future release */ + HASH_INSERT(hash_tbl_mdl,&user_p,phys_addr_el_p,err4); + + MTL_DEBUG2("MOSAL_map_phys_addr: Mapped phys.="PHYS_ADDR_FMT" to virt.=" VIRT_ADDR_FMT"\n",pa,user_p); + + return (MT_virt_addr_t)user_p; + +err4: + FREE(phys_addr_el_p); +err3: + MmUnmapLockedPages((PVOID)user_p, mdl_p ); +err2: + IoFreeMdl( mdl_p ); +err1: + MmUnmapIoSpace( buffer_p, bsize ); +err0: + return (MT_virt_addr_t)0; + +} + +MT_virt_addr_t MOSAL_map_phys_addr_directly(MT_phys_addr_t pa, MT_size_t bsize, + MOSAL_mem_flags_t flags, MOSAL_prot_ctx_t prot_ctx) +{ + PHYSICAL_MEMORY_INFO info; + NTSTATUS ntStatus; + MOSAL_dev_t *dev_p; + MT_virt_addr_t user_p; + phys_addr_el_t *phys_addr_el_p; + MOSAL_pid_t pid = MOSAL_getpid(); + + /* for KERNEL - do it in the previous style */ + if (prot_ctx == MOSAL_PROT_CTX_KERNEL) { /* Mapping to kernel virtual address */ + return MOSAL_map_phys_addr_via_kernel(pa, bsize, flags, prot_ctx); + } + + /* if can't find the device - do it in the previous style */ + dev_p = find_device_by_phys_addr( pa, bsize ); + if (dev_p == NULL) + return MOSAL_map_phys_addr_via_kernel(pa, bsize, flags, prot_ctx); + + /* fill the parameters */ + info.InterfaceType = PCIBus; + info.BusNumber = (ULONG)dev_p->bus; + info.BusAddress.QuadPart = pa; + info.AddressSpace = 0; + info.Length = (ULONG)bsize; + + /* map it now: if failed - do it in the previous style */ + ntStatus = MapMemMapTheMemory( NULL, &info, sizeof(info), sizeof(PVOID) ); + if (!NT_SUCCESS(ntStatus)) + return MOSAL_map_phys_addr_via_kernel(pa, bsize, flags, prot_ctx); + else + user_p = *(MT_virt_addr_t *)&info; + + /* allocate hash element */ + phys_addr_el_p = (phys_addr_el_t*)TMALLOC(phys_addr_el_t); + if (phys_addr_el_p == NULL) { + MTL_ERROR4("MOSAL_map_phys_addr: allocation failed \n"); + goto err0; + } + + /* fill hash element */ + phys_addr_el_p->type = TYPE_PHYS_ADDR_DIR; + phys_addr_el_p->pid = pid; + phys_addr_el_p->mdl_p = NULL; + phys_addr_el_p->va = user_p; + phys_addr_el_p->bsize = bsize; + + /* store mdl for future release */ + HASH_INSERT(hash_tbl_mdl,&user_p,phys_addr_el_p,err1); + MTL_DEBUG4("MOSAL_map_phys_addr_directly: Mapped phys.="PHYS_ADDR_FMT" for task 0x%x to virt.=" VIRT_ADDR_FMT"\n", + pa,(LONG)MOSAL_getpid(), user_p); + + return user_p; + +err1: + MTL_ERROR2("MOSAL_map_phys_addr: HASH_INSERT failed for task 0x%x, va 0x%x\n", + (LONG)MOSAL_getpid(), (ULONG)user_p); + FREE(phys_addr_el_p); +err0: + ntStatus = MapMemUnmapTheMemory( NULL, (PVOID)&user_p, sizeof(MT_virt_addr_t), 0); + if (ntStatus) { + MTL_ERROR2("MOSAL_map_phys_addr_directly: unwinding after error: MapMemUnmapTheMemory failed (0x%x)\n",ntStatus); + } + return (MT_virt_addr_t)0; +} + +MT_virt_addr_t MOSAL_map_phys_addr(MT_phys_addr_t pa, MT_size_t bsize, + MOSAL_mem_flags_t flags, MOSAL_prot_ctx_t prot_ctx) +{ + #ifdef MAP_PHYS_ADDR_VIA_KERNEL + return MOSAL_map_phys_addr_via_kernel(pa, bsize, flags, prot_ctx); + #else + return MOSAL_map_phys_addr_directly(pa, bsize, flags, prot_ctx); + #endif +} + +call_result_t MOSAL_unmap_phys_addr(MOSAL_prot_ctx_t prot_ctx, MT_virt_addr_t virt, + MT_size_t bsize) +{ + NTSTATUS ntStatus; + if (prot_ctx == MOSAL_PROT_CTX_KERNEL) { /* Mapping to kernel virtual address */ + MmUnmapIoSpace( (PVOID)virt, bsize ); + } + else { + PMDL mdl_p; + phys_addr_el_t *phys_addr_el_p = NULL; + void * kvirt; + + /* find MDL */ + HASH_ERASE(hash_tbl_mdl,&virt,phys_addr_el_p,err0); + if (phys_addr_el_p == NULL) { + MTL_ERROR4("MOSAL_unmap_phys_addr: internal error \n"); + goto err0; + } + + #ifndef MAP_PHYS_ADDR_VIA_KERNEL + if (phys_addr_el_p->type == TYPE_PHYS_ADDR_DIR) { + FREE(phys_addr_el_p); /* free book keeping info */ + ntStatus = MapMemUnmapTheMemory( NULL, (PVOID)&virt, sizeof(MT_virt_addr_t), 0); + if (ntStatus) { + MTL_ERROR2("MOSAL_unmap_phys_addr_directly: MapMemUnmapTheMemory failed for task 0x%x, virt 0x%x (0x%x)\n", + (LONG)MOSAL_getpid(), (ULONG)virt, ntStatus); + } + #if 0 + else { + MTL_DEBUG4("MOSAL_unmap_phys_addr: unmapping done for task 0x%x, va 0x%x\n", + (LONG)MOSAL_getpid(), (ULONG)virt); + } + #endif + return ntStatus; + } + #endif + + mdl_p = phys_addr_el_p->mdl_p; + + /* release resources */ + + /* unmap buffer from user space */ + MmUnmapLockedPages((PVOID)virt, mdl_p ); + + /* unlock pages */ +#ifdef LOCK_PHYS_PAGES + MmUnlockPages(mdl_p); +#endif + + /* unmap kernel memory */ + kvirt = MmGetMdlVirtualAddress(mdl_p); + MmUnmapIoSpace( kvirt, bsize ); + + /* free MDL */ + IoFreeMdl( mdl_p ); + + /* free book keeping info */ + FREE(phys_addr_el_p); + } + + return MT_OK; +err0: + MTL_ERROR2("MOSAL_unmap_phys_addr: HASH_ERASE failed for task 0x%x, va 0x%x\n", + (LONG)MOSAL_getpid(), (ULONG)virt); + return MT_ERROR; + +} + +/* + * Allocate physically contiguous memory + * + * + */ +MT_virt_addr_t MOSAL_phys_ctg_get(MT_size_t size) +{ + MT_virt_addr_t buffer_p; + PHYSICAL_ADDRESS wpa; + + /* map physical address to a kernel virtual one */ + wpa.QuadPart = _UI64_MAX; + + /* map physical address to a kernel virtual one */ + buffer_p = (MT_virt_addr_t)MmAllocateContiguousMemory( size, wpa ); + + /* reset memory */ + memset((void *)buffer_p, 0xda, size); + + return buffer_p; +} + +call_result_t MOSAL_phys_ctg_free(MT_virt_addr_t va, MT_size_t size) +{ + MmFreeContiguousMemory((void*)va); + return MT_OK; +} + + +call_result_t MOSAL_get_va_attr(MT_virt_addr_t va, mem_attr_t *attr) +{ + return(MT_ENOSYS); +} + +call_result_t MOSAL_set_vmbuff_attr(MT_virt_addr_t va, u_int32_t size, +mem_attr_t attr) +{ + return(MT_ENOSYS); +} + +#if 1 + +// non-WDM implementation + +NTKERNELAPI +PHYSICAL_ADDRESS +MmGetPhysicalAddress ( + IN PVOID BaseAddress + ); + +NTKERNELAPI +PVOID +MmGetVirtualForPhysical ( + IN PHYSICAL_ADDRESS PhysicalAddress + ); + + +MT_virt_addr_t MOSAL_phys_to_virt(const MT_phys_addr_t pa) +{ + PHYSICAL_ADDRESS wpa; + + wpa.QuadPart = pa; + return (MT_virt_addr_t)MmGetVirtualForPhysical(wpa); +} + +call_result_t MOSAL_virt_to_phys(MOSAL_prot_ctx_t prot_ctx, + const MT_virt_addr_t va, MT_phys_addr_t *pa_p) +{ + PHYSICAL_ADDRESS wpa = MmGetPhysicalAddress((void*)(MT_ulong_ptr_t)va); + *pa_p = (MT_phys_addr_t)wpa.QuadPart; + if (!*pa_p) + return MT_ENORSC; + return MT_OK; +} + +/* + * write by physical address + * + * TBD: Add check for top of physical memory + */ +u_int32_t MOSAL_write_phys_mem(MT_phys_addr_t pa, u_int32_t val) +{ + MT_virt_addr_t va = (MT_virt_addr_t)(MOSAL_phys_to_virt(pa)); + MOSAL_MMAP_IO_WRITE_DWORD( (PULONG)va, val ); + return val; +} + +/* + * read by physical address + * + * TBD: Add check for top of physical memory + */ +u_int32_t MOSAL_read_phys_mem(MT_phys_addr_t pa) +{ + return MOSAL_MMAP_IO_READ_DWORD((PULONG)MOSAL_phys_to_virt(pa)); +} + + +#else + +// WDM implementation: +// 1) works only on PASSIVE LEVEL +// 2) very inefficient: performs map/unmap on every read/write + +u_int32_t MOSAL_write_phys_mem(MT_phys_addr_t pa, u_int32_t val) +{ + if(KeGetCurrentIrql() == PASSIVE_LEVEL) + { + MT_virt_addr_t va = MOSAL_io_remap(pa, sizeof(u_int32_t)); + *(u_int32_t*)va = val; + MmUnmapIoSpace((void*)va, sizeof(u_int32_t)); + return val; + } + else + return(0); +} + +u_int32_t MOSAL_read_phys_mem(MT_phys_addr_t pa) +{ + if(KeGetCurrentIrql() == PASSIVE_LEVEL) + { + MT_virt_addr_t va = MOSAL_io_remap(pa, sizeof(u_int32_t)); + u_int32_t val = *(u_int32_t*)va; + MmUnmapIoSpace((void*)va, sizeof(u_int32_t)); + return val; + } + else + return(0); +} +#endif + +/****************************************************************************** + * Function: + * MOSAL_mem_alloc + * + * Description: + * allocate kernel memory + * + * Parameters: + * size(IN) MT_size_t + * Size of memory to allocate + * flags(IN) u_int32_t + * Flags + * in LINUX - flags like GFP_KERNEL, GFP_ATOMIC; in NT - Pool Tag or 0 ( +without tag) + * + * Returns: + * MT_virt_addr_t + * + +******************************************************************************/ +MT_virt_addr_t MOSAL_mem_alloc( MT_size_t size, u_int32_t flags ) +{ + if (flags) + return (MT_virt_addr_t)ExAllocatePoolWithTag(NonPagedPool,size,flags); + else + return (MT_virt_addr_t)ExAllocatePool(NonPagedPool,size); +} + +/****************************************************************************** + * Function: + * MOSAL_mem_free + * + * Description: + * release kernel memory + * + * Parameters: + * addr(IN) MT_virt_addr_t + * + * Returns: + * + +******************************************************************************/ +void MOSAL_mem_free( MT_virt_addr_t addr ) +{ + ExFreePool((void *)addr); +} + +/* PAGE_SIZE API */ +call_result_t MOSAL_get_page_shift( + MOSAL_prot_ctx_t prot_ctx, + MT_virt_addr_t va, + unsigned int *page_shift_p +) +{ + *page_shift_p = MOSAL_SYS_PAGE_SHIFT; + return MT_OK; +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * + * Look-aside List Management for non-paged memory + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/****************************************************************************** + * Function: + * MOSAL_la_list_init + * + * Description: + * initializes a lookaside list for nonpaged entries of the specified size + * + * Parameters: + * Lookaside(IN) PNPAGED_LOOKASIDE_LIST + * Pointer to the caller-supplied memory for the lookaside list head to be +initialized + * Allocate(IN) PALLOCATE_FUNCTION + * Either points to a caller-supplied routine for allocating an entry when +the lookaside list is empty, or this parameter can be NULL + * Free(IN) PFREE_FUNCTION + * Either points to a caller-supplied routine for freeing an entry whenever +the lookaside list is full, or this parameter can be NULL + * Flags(IN) ULONG + * Reserved. Must be zero + * Size(IN) SIZE_T + * Specifies the size in bytes for each nonpaged entry to be allocated +subsequently + * Tag(IN) ULONG + * Specifies the pool tag for lookaside list entries + * Depth(IN) USHORT + * Reserved. Must be zero + * + * Returns: + * + +******************************************************************************/ +void MOSAL_la_list_init( + PNPAGED_LOOKASIDE_LIST Lookaside, + PALLOCATE_FUNCTION Allocate OPTIONAL, + PFREE_FUNCTION Free OPTIONAL, + ULONG Flags, + SIZE_T Size, + ULONG Tag, + USHORT Depth + ) +{ + ExInitializeNPagedLookasideList( Lookaside, Allocate, Free, Flags, Size, Tag +, Depth ); +} + +/****************************************************************************** + * Function: + * MOSAL_la_list_alloc + * + * Description: + * returns a pointer to a nonpaged entry from the given lookaside list, + * or it returns a pointer to a newly allocated nonpaged entry + * + * Parameters: + * Lookaside(IN) PNPAGED_LOOKASIDE_LIST + * pointer to the list head + * + * Returns: + * void* + * a pointer to an entry if one can be allocated. Otherwise, it returns NULL. + * + +******************************************************************************/ +void *MOSAL_la_list_alloc( PNPAGED_LOOKASIDE_LIST Lookaside ) +{ + return ExAllocateFromNPagedLookasideList(Lookaside); +} + +/****************************************************************************** + * Function: + * MOSAL_la_list_free + * + * Description: + * returns an entry to the pool + * + * Parameters: + * Lookaside(IN) PNPAGED_LOOKASIDE_LIST + * pointer to the list head + * Entry(IN) PVOID + * Entry to be returned + * + * Returns: + * + +******************************************************************************/ +void MOSAL_la_list_free( PNPAGED_LOOKASIDE_LIST Lookaside, PVOID Entry ) +{ + ExFreeToNPagedLookasideList(Lookaside, Entry); +} + +/****************************************************************************** + * Function: + * MOSAL_la_list_delete + * + * Description: + * deletes the list + * + * Parameters: + * Lookaside(IN) PNPAGED_LOOKASIDE_LIST + * pointer to the list head + * + * Returns: + * + +******************************************************************************/ +void MOSAL_la_list_delete( PNPAGED_LOOKASIDE_LIST Lookaside ) +{ + ExDeleteNPagedLookasideList(Lookaside); +} + + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_imp.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_imp.h new file mode 100644 index 00000000..e5d8fc31 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_imp.h @@ -0,0 +1,242 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_MOSAL_MEM_IMP_H +#define H_MOSAL_MEM_IMP_H + +/* PAGE_SIZE API */ +#ifdef MT_KERNEL +#define MOSAL_SYS_PAGE_SHIFT PAGE_SHIFT +#define MOSAL_SYS_PAGE_SIZE (1 << MOSAL_SYS_PAGE_SHIFT) + +#else +u_int32_t MOSAL_get_sys_page_size(MT_virt_addr_t va); +u_int32_t MOSAL_get_sys_page_shift(MT_virt_addr_t va); + +#define MOSAL_SYS_PAGE_SIZE MOSAL_get_sys_page_size(0) +#define MOSAL_SYS_PAGE_SHIFT MOSAL_get_sys_page_shift(0) +#endif + +/* #define __MOSAL_MMAP_IO_WRITE_QWORD_ATOMIC__ */ +//static __inline void MOSAL_io_write_qword(u_int64_t data, volatile u_int64_t *target_p) +//{ +// #ifdef MT_KERNEL +// WRITE_REGISTER_BUFFER_ULONG((PULONG)target_p,(PULONG)&data,2); +// #else +// *target_p = data; +// #endif +//} + +static __inline u_int64_t MOSAL_io_read_qword(volatile u_int64_t *source_p) +{ + #ifdef MT_KERNEL + u_int64_t x; + READ_REGISTER_BUFFER_ULONG((PULONG)&x,(PULONG)source_p,2); + return x; + #else + return *source_p; + #endif +} + +static __inline void MOSAL_io_write_dword(volatile u_int32_t *target_p, u_int32_t data) +{ + #ifdef MT_KERNEL + WRITE_REGISTER_ULONG((PULONG)(target_p),(ULONG)(data)); + #else + *target_p = data; + #endif +} + +static __inline u_int32_t MOSAL_io_read_dword(volatile u_int32_t *target_p) +{ + #ifdef MT_KERNEL + return READ_REGISTER_ULONG((PULONG)(target_p)); + #else + return *target_p; + #endif +} + +/* access to memory-mapped physical memory */ +#ifdef MT_KERNEL +#define MOSAL_MMAP_IO_READ_BYTE(reg) READ_REGISTER_UCHAR((PUCHAR)(reg)) +#define MOSAL_MMAP_IO_READ_WORD(reg) READ_REGISTER_USHORT((PUSHORT)(reg)) +#define MOSAL_MMAP_IO_READ_DWORD(reg) READ_REGISTER_ULONG((PULONG)(reg)) /* MOSAL_io_read_dword(reg) // */ +#define MOSAL_MMAP_IO_READ_QWORD(reg,buf) READ_REGISTER_BUFFER_ULONG((PULONG)(reg),(PULONG)(buf),2) +#define MOSAL_MMAP_IO_READ_BUF_BYTE(reg,buf,size) READ_REGISTER_BUFFER_UCHAR((PUCHAR)(reg),(PUCHAR)(buf),size) +#define MOSAL_MMAP_IO_READ_BUF_WORD(reg,buf,size) READ_REGISTER_BUFFER_USHORT((PUSHORT)(reg),(PUSHORT)(buf),size) +#define MOSAL_MMAP_IO_READ_BUF_DWORD(reg,buf,size) READ_REGISTER_BUFFER_ULONG((PULONG)(reg),(PULONG)(buf),size) +#define MOSAL_MMAP_IO_WRITE_BYTE(reg,data) WRITE_REGISTER_UCHAR((PUCHAR)(reg),(UCHAR)(data)) +#define MOSAL_MMAP_IO_WRITE_WORD(reg,data) WRITE_REGISTER_USHORT((PUSHORT)(reg),(USHORT)(data)) +//#define MOSAL_MMAP_IO_WRITE_DWORD(reg,data) MOSAL_io_write_dword((volatile u_int32_t *)(reg),(u_int32_t)(data)) //WRITE_REGISTER_ULONG((PULONG)(reg),(ULONG)(data)) +#define MOSAL_MMAP_IO_WRITE_DWORD(reg,data) WRITE_REGISTER_ULONG((PULONG)(reg),(ULONG)(data)) +#define MOSAL_MMAP_IO_WRITE_QWORD(reg,data) WRITE_REGISTER_BUFFER_ULONG((PULONG)(reg),(PULONG)&(data),2); + //{ \ + // volatile u_int64_t l_int64 = data; \ + // WRITE_REGISTER_BUFFER_ULONG((PULONG)(reg),(PULONG)&l_int64,2); \ + //} +#define MOSAL_MMAP_IO_WRITE_BUF_BYTE(reg,buf,size) WRITE_REGISTER_BUFFER_UCHAR((PUCHAR)(reg),(PUCHAR)(buf),size) +#define MOSAL_MMAP_IO_WRITE_BUF_WORD(reg,buf,size) WRITE_REGISTER_BUFFER_USHORT((PUSHORT)(reg),(PUSHORT)(buf),size) +#define MOSAL_MMAP_IO_WRITE_BUF_DWORD(reg,buf,size) WRITE_REGISTER_BUFFER_ULONG((PULONG)(reg),(PULONG)(buf),size) +#else +#define MOSAL_MMAP_IO_READ_BYTE(reg) (*(volatile u_int8_t * const)(reg)) +#define MOSAL_MMAP_IO_READ_WORD(reg) (*(volatile u_int16_t * const)(reg)) +#define MOSAL_MMAP_IO_READ_DWORD(reg) (*(volatile u_int32_t * const)(reg)) +#define MOSAL_MMAP_IO_READ_QWORD(reg) MOSAL_io_read_qword((volatile u_int64_t*)(reg)) +/* +#define MOSAL_MMAP_IO_READ_QWORD(reg,buf) \ + *(u_int32_t*)(buf) = *(u_int32_t *)(reg); *((u_int32_t*)(buf)+1) = *((u_int32_t *)(reg)+1) +*/ +#define MOSAL_MMAP_IO_READ_BUF_BYTE(reg,buf,size) memcpy((volatile void*)(buf),(volatile void*)(reg),size) +#define MOSAL_MMAP_IO_READ_BUF_WORD(reg,buf,size) memcpy((volatile void*)(buf),(volatile void*)(reg),(size)<<1) +#define MOSAL_MMAP_IO_READ_BUF_DWORD(reg,buf,size) memcpy((volatile void*)(buf),(volatile void*)(reg),(size)<<2) +#define MOSAL_MMAP_IO_WRITE_BYTE(reg,data) *(volatile u_int8_t* const)(reg) = data +#define MOSAL_MMAP_IO_WRITE_WORD(reg,data) *(volatile u_int16_t* const)(reg) = data +#define MOSAL_MMAP_IO_WRITE_DWORD(reg,data) *(volatile u_int32_t* const)(reg) = data +#define MOSAL_MMAP_IO_WRITE_QWORD(reg,data) *(volatile u_int64_t* const)(reg) = data +/* +#define MOSAL_MMAP_IO_WRITE_QWORD(reg,buf) \ + *(u_int32_t*)(reg) = *(u_int32_t *)(buf); *((u_int32_t*)(reg)+1) = *((u_int32_t *)(buf)+1) +*/ +#define MOSAL_MMAP_IO_WRITE_BUF_BYTE(reg,buf,size) memcpy((volatile void*)(reg),(volatile void*)(buf),size) +#define MOSAL_MMAP_IO_WRITE_BUF_WORD(reg,buf,size) memcpy((volatile void*)(reg),(volatile void*)(buf),(size)<<1) +#define MOSAL_MMAP_IO_WRITE_BUF_DWORD(reg,buf,size) memcpy((volatile void*)(reg),(volatile void*)(buf),(size)<<2) + + +#ifndef PAGE_SIZE +#define PAGE_SIZE MOSAL_get_sys_page_size( 0 ) +#endif + + +#endif + + +/****************************************************************************** + * MOSAL_pci_virt_alloc_consistent + * + * Description: + * allocate virtually contigous consistent memory (coherent) + * + * Parameters: + * size(IN) the required allocation size in bytes + * alignment (IN) the required alignment in bytes + * + * Returns: virtual address of allocated area + * 0 if failed + * + ******************************************************************************/ +/* void *MOSAL_pci_virt_alloc_consistent(MT_size_t size, u_int8_t alignment); */ +#ifdef CONFIG_NOT_COHERENT_CACHE + /* none coherent cache */ + #ifdef MT_KERNEL + #define MOSAL_pci_virt_alloc_consistent(size, alignment) VMALLOC((size)) + #else + /* no support in user level */ + #endif +#else + #define MOSAL_pci_virt_alloc_consistent(size, alignment) VMALLOC((size)) +#endif + + +/****************************************************************************** + * MOSAL_pci_virt_free_consistent + * + * Description: + * de-allocate virtually contigous consistent memory (coherent) + * + * Parameters: + * vaddr(IN) address of freed allocation + * size(IN) size of area to be freed in bytes + * + * Returns: + * + ******************************************************************************/ +/*void MOSAL_pci_virt_free_consistent(void *vaddr, MT_size_t size);*/ +#ifdef CONFIG_NOT_COHERENT_CACHE + /* none coherent cache */ + #ifdef MT_KERNEL + #define MOSAL_pci_virt_free_consistent(vaddr, size) VFREE((vaddr)) + #else + /* no support in user level */ + #endif +#else + #define MOSAL_pci_virt_free_consistent(vaddr, size) VFREE((vaddr)) +#endif + + +/****************************************************************************** + * MOSAL_pci_phys_alloc_consistent + * + * Description: + * allocate physically contigous consistent memory (coherent) + * + * Parameters: + * size(IN) the required allocation size in bytes + * alignment(IN) the required alignment in bytes + * + * Returns: virtual address of allocated area + * 0 if failed + * + ******************************************************************************/ +/* void *MOSAL_pci_phys_alloc_consistent(MT_size_t size, u_int8_t alignment); */ +#ifdef MT_KERNEL + #define MOSAL_pci_phys_alloc_consistent(size,alignment) ((void*)(MT_ulong_ptr_t)MOSAL_phys_ctg_get(size)) + +#else + /* no support in user level */ +#endif + + +/****************************************************************************** + * MOSAL_pci_phys_free_consistent + * + * Description: + * de-allocate physically contigous consistent memory (coherent) + * + * Parameters: + * vaddr(IN) address of freed allocation + * size(IN) size of area to be freed in bytes + * + * Returns: + * + ******************************************************************************/ +/*void MOSAL_pci_phys_free_consistent(void *vaddr, MT_size_t size);*/ +#ifdef MT_KERNEL + #define MOSAL_pci_phys_free_consistent(vaddr, size) \ + MOSAL_phys_ctg_free((MT_virt_addr_t)(MT_ulong_ptr_t)(vaddr), size) +#else + /* no support in user level */ +#endif + + +#endif /* H_MOSAL_MEM_IMP_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_priv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_priv.h new file mode 100644 index 00000000..120234fc --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mem_priv.h @@ -0,0 +1,217 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_MEM_PRIV_H +#define H_MOSAL_MEM_PRIV_H + + +/****************************************************************************** + * + * Function: MOSAL_io_remap_for_user + * + * Description: Map a physical contiguous buffer to virtual address. + * + * Parameters: + * pa (IN) MT_phys_addr_t + * Physical address. + * size (IN) MT_size_t + * Size of memory buffer in bytes + * + * + * Returns: On success returns pointer to new virtual memory buffer else + * returns zero. + * + * Notes: The returned address will be page alligned. In case 'size' is not + * page alligned the amount of allocated memory can be bigger than the + * requested. + * + ******************************************************************************/ +MT_virt_addr_t MOSAL_io_remap_for_user(MT_phys_addr_t pa, MT_size_t size); + +/****************************************************************************** + * + * Function: MOSAL_io_release_for_user + * + * Description: Unmap a physical contiguous buffer to virtual address. + * + * Parameters: + * pa (IN) MT_phys_addr_t + * Physical address. + * + * Returns: + * + ******************************************************************************/ +void MOSAL_io_release_for_user(MT_phys_addr_t pa); + +/****************************************************************************** + * + * Function: MOSAL_phys_ctg_get_for_user + * + * Description: allocate a physically contiguous pinned memory region. + * + * Parameters: + * size (IN) + * size of physically contiguous memory to be allocate. + * + * Returns: virtual address of memory or NULL if failed. + * + ******************************************************************************/ +MT_virt_addr_t MOSAL_phys_ctg_get_for_user(MT_size_t size); + + +/****************************************************************************** + * + * Function: MOSAL_phys_ctg_free_for_user + * + * Description: release a physically contiguous pinned memory region. + * + * Parameters: + * addr (IN) + * address of physically contiguous memory to be released. + * + * Returns: virtual address of memory or NULL if failed. + * + ******************************************************************************/ +void MOSAL_phys_ctg_free_for_user(MT_virt_addr_t addr); + +#ifdef __KERNEL__ + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * + * Look-aside List Management for non-paged memory + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_la_list_init + * + * Description: + * initializes a lookaside list for nonpaged entries of the specified size + * + * Parameters: + * Lookaside(IN) PNPAGED_LOOKASIDE_LIST + * Pointer to the caller-supplied memory for the lookaside list head to be initialized + * Allocate(IN) PALLOCATE_FUNCTION + * Either points to a caller-supplied routine for allocating an entry when the lookaside list is empty, or this parameter can be NULL + * Free(IN) PFREE_FUNCTION + * Either points to a caller-supplied routine for freeing an entry whenever the lookaside list is full, or this parameter can be NULL + * Flags(IN) ULONG + * Reserved. Must be zero + * Size(IN) SIZE_T + * Specifies the size in bytes for each nonpaged entry to be allocated subsequently + * Tag(IN) ULONG + * Specifies the pool tag for lookaside list entries + * Depth(IN) USHORT + * Reserved. Must be zero + * + * Returns: + * + ******************************************************************************/ +void MOSAL_la_list_init( + PNPAGED_LOOKASIDE_LIST Lookaside, + PALLOCATE_FUNCTION Allocate, + PFREE_FUNCTION Free, + ULONG Flags, + SIZE_T Size, + ULONG Tag, + USHORT Depth + ); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_la_list_alloc + * + * Description: + * returns a pointer to a nonpaged entry from the given lookaside list, + * or it returns a pointer to a newly allocated nonpaged entry + * + * Parameters: + * Lookaside(IN) PNPAGED_LOOKASIDE_LIST + * pointer to the list head + * + * Returns: + * void* + * a pointer to an entry if one can be allocated. Otherwise, it returns NULL. + * + ******************************************************************************/ +void *MOSAL_la_list_alloc( PNPAGED_LOOKASIDE_LIST Lookaside ); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_la_list_free + * + * Description: + * returns an entry to the pool + * + * Parameters: + * Lookaside(IN) PNPAGED_LOOKASIDE_LIST + * pointer to the list head + * Entry(IN) PVOID + * Entry to be returned + * + * Returns: + * + ******************************************************************************/ +void MOSAL_la_list_free( PNPAGED_LOOKASIDE_LIST Lookaside, PVOID Entry ); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_la_list_delete + * + * Description: + * deletes the list + * + * Parameters: + * Lookaside(IN) PNPAGED_LOOKASIDE_LIST + * pointer to the list head + * + * Returns: + * + ******************************************************************************/ +void MOSAL_la_list_delete( PNPAGED_LOOKASIDE_LIST Lookaside ); + +typedef struct { + MOSAL_pid_t pid; + LIST_ENTRY io_remap_que; + LIST_ENTRY phys_ctg_que; + LIST_ENTRY phys_addr_que; +} mem_rmv_t; + + +PVOID MOSAL_mem_rsct_open(MOSAL_pid_t pid); +void MOSAL_mem_rsct_close(mem_rmv_t *rmv_p); + + +#endif /* __KERNEL__ */ + +#endif /* H_MOSAL_MEM_PRIV_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock.c new file mode 100644 index 00000000..79d13fe4 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock.c @@ -0,0 +1,476 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" + +/* hash using */ +#include +#ifdef MT_64BIT + +/* 64-bit platform */ +#include +#define hash_insert VIP_hashv4p_insert +#define hash_erase VIP_hashv4p_erase +#define hash_create VIP_hashv4p_create +#define hash_destroy VIP_hashv4p_destroy +#define hash_find VIP_hashv4p_find +#define hash_p_t VIP_hashv4p_p_t +#define HASH_KEY_DEFINE(key) u_int64_t key[2] +#define HASH_KEY_BUILD(key,a,s) key[0] = (u_int64_t)(a); key[1] = (u_int64_t)(s) +#define HASH_KEY_VALUE(key) (u_int32_t*)(key) +#define HASH_KEY_ASSIGN(dest, src) memcpy( dest, src, sizeof(VIP_hashv4p_key_t) ) +#else + +/* 32-bit platform */ +#define hash_insert VIP_hash64p_insert +#define hash_erase VIP_hash64p_erase +#define hash_create VIP_hash64p_create +#define hash_destroy VIP_hash64p_destroy +#define hash_find VIP_hash64p_find +#define hash_p_t VIP_hash64p_p_t +#define HASH_KEY_DEFINE(key) ULARGE_INTEGER key +#define HASH_KEY_BUILD(key,a,s) key.HighPart = (LONG)(a); key.LowPart = (ULONG)(s) +#define HASH_KEY_VALUE(key) *(u_int64_t*)&(key) +#define HASH_KEY_ASSIGN(dest, src) dest = src +#endif + + +static VIP_hash64p_p_t hash_pcs = NULL; +static MOSAL_semaphore_t sem; +static LIST_ENTRY pcs_que; + +/* process element in the hash_pcs */ +typedef struct { + LIST_ENTRY link; /* link for pcs_que */ + LIST_ENTRY que; /* que for the same addr/size request elements */ + hash_p_t hash_req_p; + ULONG cnt; + MOSAL_pid_t pid; +} mlock_hash_t; + +/* element, containing list mlock requests with the same addr/size (sits in the process' hash) */ +typedef struct { + LIST_ENTRY link; /* link for mlock_hash_t.que */ + LIST_ENTRY que; /* que for mlock requests */ + ULONG cnt; + HASH_KEY_DEFINE(key); +} mlock_req_que_t; + +/* element for an mlock requst */ +typedef struct { + LIST_ENTRY link; /* link for mlock_req_que_t.que */ + PMDL mdl_p; + void *h_secure; /* handle returned by MmSecureVirtualMemory */ + +} mlock_req_t; + +/* The problem of the MOSAL_mlock() function is that it can be called +SEVERAL times with the SAME parameters from the SAME or +DIFFERENT processes. +The implementation is as follows: + 1. On mlock_init() a HASH for processes - hash_pcs - is created. + 2. For every process its own HASH64 - hash_req - is created and + the pointer to it is put into hash_pcs; + 3. On every mlock request we do the following: + - find process descriptor in hash_pcs; if not found - create; + - find mlock request queue structure - req_que - in hash_req; + if not found - allocate and put it into hash_req. This + structure is inserted by addr-size key; + - allocate mlock request structure - req - and chain it to req_que; + - increment que depth counter; + 4. On every munlock request: + - find process hash; + - find req_que element by addr-size key; + - erase the last element in the queue; + - decrement the que depth counter; +*/ +call_result_t MOSAL_mlock(MT_virt_addr_t addr, MT_size_t size) +{ + PMDL mdl_p; + HASH_KEY_DEFINE(key); + MT_virt_addr_t system_range_start = (MT_virt_addr_t)(MT_WIN_SYSTEM_SPACE_START); /* MmSystemRangeStart */ + MOSAL_pid_t pid = MOSAL_getpid(); + hash_p_t hash_req_p = NULL; + mlock_req_que_t * req_que_p = NULL; + mlock_req_t * req_p = NULL; + BOOLEAN hash_created = FALSE; + BOOLEAN hash_inserted = FALSE; + BOOLEAN req_inserted = FALSE; + mlock_hash_t * hash_p = NULL; + int is_kernel_memory = (addr >= system_range_start); + + MOSAL_sem_acq( &sem, TRUE ); + + /* on first request from process - create its hash_pcs */ + if (VIP_hash64p_find( hash_pcs, (u_int64_t)pid, &hash_p) != 0) + { /* hash not found - create new */ + + /* create hash */ + if (hash_create( 0, &hash_req_p ) != 0) { + MTL_ERROR4("MOSAL_mlock: hash_create failed\n"); + goto err_exit; + } + hash_created = TRUE; + + /* allocate hash element */ + hash_p = (mlock_hash_t*)MALLOC(sizeof(mlock_hash_t)); + if (hash_p == NULL) { + MTL_ERROR4("MOSAL_mlock: MALLOC0 failed\n"); + goto err0; + } + + /* fill hash element */ + hash_p->pid = pid; + hash_p->cnt = 0; + hash_p->hash_req_p = hash_req_p; + InitializeListHead( &hash_p->que ); + + /* insert hash */ + if (VIP_hash64p_insert( hash_pcs, (u_int64_t)pid, hash_p) != 0) { + MTL_ERROR4("MOSAL_mlock: VIP_hashp_insert failed\n"); + goto err1; + } + hash_inserted = TRUE; + + } /* hash not found - create new */ + else + hash_req_p = hash_p->hash_req_p; + + /* allocate request element */ + req_p = (mlock_req_t*)MALLOC(sizeof(mlock_req_t)); + if (req_p == NULL) { + MTL_ERROR4("MOSAL_mlock: MALLOC1 failed\n"); + goto err2; + } + + /* make key */ + HASH_KEY_BUILD(key,addr,size); + + /* on first request with this key create request element */ + if (hash_find( hash_req_p, HASH_KEY_VALUE(key), &req_que_p) != 0) + { /* mlock element not found - let's create it */ + + /* allocate request element */ + req_que_p = (mlock_req_que_t*)MALLOC(sizeof(mlock_req_que_t)); + if (req_que_p == NULL) { + MTL_ERROR4("MOSAL_mlock: MALLOC2 failed\n"); + goto err3; + } + + /* init it */ + req_que_p->cnt = 0; + InitializeListHead( &req_que_p->que ); + + /* insert in hash */ + if (hash_insert( hash_req_p, HASH_KEY_VALUE(key), req_que_p) != 0) { + MTL_ERROR4("MOSAL_mlock: hash_insert failed\n"); + goto err4; + } + req_inserted = TRUE; + + } /* mlock element not found - let's create it */ + + /* allocate MDL */ + mdl_p = IoAllocateMdl( (PVOID)addr, (ULONG)size, FALSE,TRUE,NULL); + if (mdl_p == NULL) { + MTL_ERROR4("MOSAL_mlock: IoAllocateMdl failed\n"); + goto err5; + } + + /* Now we have all stuff to do the task: + mdl_p - pointer to MDL, locking the memory; + req_p - pointer to mlock request element, keeping 'mdl_p'; + req_que_p - pointer to queue header, containing all mlock requests + with the same addr/size pair; + hash_p - pointer to hash_pcs element, owning all queue headers; + */ + + /* fill MDL */ + if (is_kernel_memory) + MmBuildMdlForNonPagedPool(mdl_p); + + __try + { /* try */ + + /* lock memory */ + MmProbeAndLockPages( mdl_p, + (is_kernel_memory) ? KernelMode : UserMode, + IoWriteAccess ); + + } /* try */ + + __except (EXCEPTION_EXECUTE_HANDLER) + { + NTSTATUS Status = GetExceptionCode(); + MTL_ERROR4("MOSAL_mlock: Exception 0x%x on MmProbeAndLockPages(), addr 0x%p, size %d\n", Status, addr, size); + goto err6; + } + + __try + { + req_p->h_secure = + MmSecureVirtualMemory( (PVOID)addr, (SIZE_T)size, PAGE_READWRITE ); + } + __except( EXCEPTION_EXECUTE_HANDLER ) + { + goto err7; + } + + /* store MDL */ + req_p->mdl_p = mdl_p; + InsertTailList( &req_que_p->que, &req_p->link ); + req_que_p->cnt++; + HASH_KEY_ASSIGN(req_que_p->key, key); + hash_p->cnt++; + + /* link new elements */ + if (hash_inserted) + InsertTailList( &pcs_que, &hash_p->link ); + if (req_inserted) + InsertTailList( &hash_p->que, &req_que_p->link ); + + MOSAL_sem_rel( &sem ); + return MT_OK; + +err7: + MmUnlockPages(mdl_p); +err6: + IoFreeMdl(mdl_p); +err5: + if (req_inserted && hash_req_p != NULL) + hash_erase( hash_req_p, HASH_KEY_VALUE(key), &req_que_p); +err4: + if (req_que_p != NULL) + FREE(req_que_p); +err3: + if (req_p != NULL) + FREE(req_p); +err2: + if (hash_inserted && hash_pcs != NULL) + VIP_hash64p_erase( hash_pcs, (u_int64_t)pid, &hash_p); +err1: + if (hash_p != NULL) + FREE(hash_p); +err0: + if (hash_created && hash_req_p != NULL) + hash_destroy( hash_req_p, NULL, NULL); +err_exit: + MOSAL_sem_rel( &sem ); + return MT_ERROR; +} + +call_result_t MOSAL_munlock(MT_virt_addr_t addr, MT_size_t size) +{ + MOSAL_pid_t pid = MOSAL_getpid(); + mlock_hash_t * hash_p = NULL; + mlock_hash_t * hash_tst_p = NULL; + hash_p_t hash_req_p; + mlock_req_que_t * req_que_p = NULL; + mlock_req_que_t * req_que_tst_p = NULL; + HASH_KEY_DEFINE(key); + mlock_req_t* req_p; + PMDL mdl_p; + + /* protect from mlock */ + MOSAL_sem_acq( &sem, TRUE ); + + if (VIP_hash64p_find( hash_pcs, (u_int64_t)pid, &hash_p) != 0) { + MOSAL_sem_rel( &sem ); + MTL_ERROR4("MOSAL_munlock: Not found info for that pid (0x%x) \n", pid); + return MT_ERROR; + } + + /* make key */ + HASH_KEY_BUILD(key,addr,size); + + /* get que element */ + hash_req_p = hash_p->hash_req_p; + if (hash_find( hash_req_p, HASH_KEY_VALUE(key), &req_que_p) != 0) { + MTL_ERROR4("MOSAL_munlock: no que element for that pid (0x%x) \n", hash_p->pid); + return MT_ERROR; + } + + /* get mlock request element and MDL */ + if (IsListEmpty( &req_que_p->que )) { + MTL_ERROR4("MOSAL_munlock: no req elements for that pid (0x%x) \n", hash_p->pid); + return MT_ERROR; + } + req_p = (mlock_req_t*)RemoveTailList( &req_que_p->que ); + mdl_p = req_p->mdl_p; + + /* Now we have all the stuff to do the task: + mdl_p - pointer to MDL, locking the memory; + req_p - pointer to mlock request element, keeping 'mdl_p'; + req_que_p - pointer to queue header, containing all mlock requests + with the same addr/size pair; + hash_p - pointer to hash_pcs element, owning all queue headers; + */ + + /* unlock pages */ + if (mdl_p) { + MmUnlockPages(mdl_p); + IoFreeMdl(mdl_p); + } + + MmUnsecureVirtualMemory( req_p->h_secure ); + + /* + * remove bookkeeping info + */ + + /* (1) remove mlock request element */ + FREE(req_p); + + /* (2) when no pending requests, release queue element */ + if (--req_que_p->cnt == 0) { + if (hash_erase( hash_req_p, HASH_KEY_VALUE(key), &req_que_tst_p) != 0) { + MTL_ERROR4("MOSAL_munlock_pid: hash_erase failed \n"); + } + else + ASSERT(req_que_tst_p == req_que_p); + RemoveEntryList(&req_que_p->link); + FREE(req_que_p); + } + + /* (3) when no pending queues, release hash element */ + if (--hash_p->cnt == 0) { + if (VIP_hash64p_erase( hash_pcs, (u_int64_t)hash_p->pid, &hash_tst_p) != 0) { + MTL_ERROR4("MOSAL_munlock_pid: hash_erase failed \n"); + } + else + ASSERT(hash_tst_p == hash_p); + hash_destroy( hash_req_p, NULL, NULL); + RemoveEntryList(&hash_p->link); + FREE(hash_p); + } + + MOSAL_sem_rel( &sem ); + return MT_OK; +} + +static void cleanup_req(mlock_hash_t * hash_p, mlock_req_que_t * req_que_p) +{ + mlock_req_t * req_p = NULL; + mlock_req_que_t * req_que_tst_p = NULL; + hash_p_t hash_req_p = hash_p->hash_req_p; + + /* remove all requests of this request queue */ + while (!IsListEmpty( &req_que_p->que )) { + req_p = (mlock_req_t*)RemoveTailList( &req_que_p->que ); + if (req_p->mdl_p) { /* unlock pages */ + MmUnsecureVirtualMemory( req_p->h_secure ); + MmUnlockPages(req_p->mdl_p); + IoFreeMdl(req_p->mdl_p); + } + FREE(req_p); + } + + /* remove request que element */ + if (hash_erase( hash_req_p, HASH_KEY_VALUE(req_que_p->key), &req_que_tst_p) != 0) { + MTL_ERROR4("cleanup_req: hash_erase failed \n"); + } + else + ASSERT(req_que_tst_p == req_que_p); + RemoveEntryList(&req_que_p->link); + FREE(req_que_p); +} + +static void cleanup_pcs_stuff(mlock_hash_t * hash_p) +{ + mlock_req_que_t * req_que_p = NULL; + mlock_hash_t * hash_tst_p; + /* remove all requests queues of this process */ + while (!IsListEmpty( &hash_p->que )) { + req_que_p = (mlock_req_que_t*)RemoveTailList( &hash_p->que ); + cleanup_req(hash_p, req_que_p); + } + + /* remove process element */ + if (VIP_hash64p_erase( hash_pcs, (u_int64_t)hash_p->pid, &hash_tst_p) != 0) { + MTL_ERROR4("cleanup_pcs_stuff: hash_erase failed \n"); + } + else + ASSERT(hash_tst_p == hash_p); + hash_destroy( hash_p->hash_req_p, NULL, NULL); + RemoveEntryList(&hash_p->link); + FREE(hash_p); +} + +void MOSAL_mlock_cleanup_pcs(MOSAL_pid_t pid) +{ + mlock_hash_t * hash_p; + PLIST_ENTRY link_p; + MT_bool found = FALSE; + + MOSAL_sem_acq( &sem, TRUE ); + if (hash_pcs != NULL) { + for ( link_p = pcs_que.Flink; link_p != &pcs_que; link_p = link_p->Flink ) { + hash_p = (mlock_hash_t*)CONTAINING_RECORD(link_p, mlock_hash_t, link); + if (hash_p->pid == pid) { + found = TRUE; + break; + } + } + if (found == TRUE) + cleanup_pcs_stuff(hash_p); + } + MOSAL_sem_rel( &sem ); +} + +void MOSAL_mlock_cleanup(void) +{ + mlock_hash_t * hash_p; + + MOSAL_sem_acq( &sem, TRUE ); + if (hash_pcs != NULL) { + while (!IsListEmpty( &pcs_que )) { + hash_p = (mlock_hash_t*)RemoveTailList( &pcs_que ); + cleanup_pcs_stuff(hash_p); + } + VIP_hash64p_destroy( hash_pcs, NULL, NULL ); + hash_pcs = NULL; + } + MOSAL_sem_rel( &sem ); +} + +call_result_t MOSAL_mlock_init(void) +{ + hash_pcs = NULL; + if (VIP_hash64p_create( 0, &hash_pcs ) != 0) { + MTL_ERROR4("MOSAL_mlock_init: hash_create failed\n"); + return MT_ERROR; + } + MOSAL_sem_init( &sem, 1 ); + InitializeListHead( &pcs_que ); + return(MT_OK); +} + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock_priv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock_priv.h new file mode 100644 index 00000000..980c58da --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_mlock_priv.h @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_MLOCK_PRIV_H +#define H_MOSAL_MLOCK_PRIV_H + +/******************************************************************************** + * Function: + * MOSAL_mlock_init + * + * Arguments: + * void + * Returns: + * MT_OK, + * appropriate error code otherwise + * + * Description: + * Initializes data structures needed for MOSAL_mlock/MOSAL_munlock functions + * + ********************************************************************************/ +call_result_t MOSAL_mlock_init(void); + + +/******************************************************************************** + * Function: + * MOSAL_mlock_cleanup + * + * Arguments: + * void + * Returns: + * void + * Description: + * Cleans data structures needed for MOSAL_mlock/MOSAL_munlock functions + * + ********************************************************************************/ +void MOSAL_mlock_cleanup(void); + +void MOSAL_mlock_cleanup_pcs(MOSAL_pid_t pid); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.c new file mode 100644 index 00000000..09431e14 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.c @@ -0,0 +1,570 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mtl_types.h" +#include "mosal_ntddk.h" +#include + +// leo +#define DONT_TRANSLATE_BUS_ADDRESS 1 + +/* + * PCI + * --- + */ +call_result_t MOSAL_PCI_find_device(u_int16_t vendor_id, + u_int16_t dev_id, + u_int16_t index, + u_int8_t * bus_p, + u_int8_t * dev_func_p) +{ +#define N_BUSES 8 +#define N_SLOTS 32 + ULONG l_Bus; + ULONG l_Slot; + ULONG l_DevId; + ULONG l_Bytes; + ULONG OurDevId = ((ULONG)dev_id << 16) | (ULONG)vendor_id; + ULONG l_Function = 0; /* don't know how to find it */ + int cnt = 0; + + for (l_Bus = 0; l_Bus < N_BUSES; l_Bus++ ) { + for (l_Slot = 0; l_Slot < N_SLOTS; l_Slot++ ) { + l_Bytes = HalGetBusDataByOffset( + PCIConfiguration, + l_Bus, + l_Slot, + (PVOID)&l_DevId, + 0, + sizeof(ULONG) + ); + if (l_Bytes != sizeof(ULONG)) + continue; /* as if - "not found" */ + if (l_DevId == OurDevId) { + if (index == cnt) + goto found; + else + cnt++; + } + } + } + +found: + if (l_DevId == OurDevId && (index == cnt)) { + *bus_p = (u_int8_t)l_Bus; + *dev_func_p = (u_int8_t)((l_Slot<<3) | l_Function); + return MT_OK; + } + else + return MT_ENORSC; +} + +call_result_t MOSAL_PCI_read_config_byte(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int8_t* data_p) +{ + ULONG l_Bytes; + l_Bytes = HalGetBusDataByOffset( + PCIConfiguration, + bus, + dev_func>>3, + (PVOID)data_p, + offset, + sizeof(u_int8_t) + ); + if (l_Bytes != sizeof(u_int8_t)) + return MT_ENORSC; + return MT_OK; +} + +call_result_t MOSAL_PCI_read_config_word(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int16_t* data_p) +{ + ULONG l_Bytes; + l_Bytes = HalGetBusDataByOffset( + PCIConfiguration, + bus, + dev_func>>3, + (PVOID)data_p, + offset, + sizeof(u_int16_t) + ); + if (l_Bytes != sizeof(u_int16_t)) + return MT_ENORSC; + return MT_OK; +} + + +call_result_t MOSAL_PCI_read_config_dword(u_int8_t bus, + u_int8_t dev_func, + u_int8_t offset, + u_int32_t* data_p) +{ + ULONG l_Bytes; + l_Bytes = HalGetBusDataByOffset( + PCIConfiguration, + bus, + dev_func>>3, + (PVOID)data_p, + offset, + sizeof(ULONG) + ); + if (l_Bytes != sizeof(ULONG)) + return MT_ENORSC; + return MT_OK; +} + +call_result_t MOSAL_PCI_read_config_data(u_int8_t bus, + u_int8_t dev_func, + u_int8_t offset, + u_int32_t length, + u_int8_t* data_p + ) +{ + ULONG l_Bytes; + l_Bytes = HalGetBusDataByOffset( + PCIConfiguration, + bus, + dev_func>>3, + (PVOID)data_p, + offset, + length + ); + if (l_Bytes != length) + return MT_ENORSC; + return MT_OK; + } + + +call_result_t MOSAL_PCI_write_config_byte(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int8_t data) +{ + ULONG l_Bytes; + l_Bytes = HalSetBusDataByOffset( + PCIConfiguration, + bus, + dev_func>>3, + (PVOID)&data, + offset, + sizeof(u_int8_t) + ); + if (l_Bytes != sizeof(u_int8_t)) + return MT_ENORSC; + return MT_OK; +} + +call_result_t MOSAL_PCI_write_config_word(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int16_t data) +{ + ULONG l_Bytes; + l_Bytes = HalSetBusDataByOffset( + PCIConfiguration, + bus, + dev_func>>3, + (PVOID)&data, + offset, + sizeof(u_int16_t) + ); + if (l_Bytes != sizeof(u_int16_t)) + return MT_ENORSC; + return MT_OK; +} + + + +call_result_t MOSAL_PCI_write_config_dword(u_int8_t bus, + u_int8_t dev_func, + u_int8_t offset, + u_int32_t data) +{ + ULONG l_Bytes; + l_Bytes = HalSetBusDataByOffset( + PCIConfiguration, + bus, + dev_func>>3, + (PVOID)&data, + offset, + sizeof(ULONG) + ); + if (l_Bytes != sizeof(ULONG)) + return MT_ENORSC; + return MT_OK; +} + +call_result_t MOSAL_PCI_write_config_data(u_int8_t bus, + u_int8_t dev_func, + u_int8_t offset, + u_int32_t length, + u_int8_t* data_p + ) +{ + ULONG l_Bytes; + l_Bytes = HalSetBusDataByOffset( + PCIConfiguration, + bus, + dev_func>>3, + (PVOID)data_p, + offset, + length + ); + if (l_Bytes != length) + return MT_ENORSC; + return MT_OK; + } + + +NTSTATUS +MapMemMapTheMemory( + IN PDEVICE_OBJECT DeviceObject, + IN OUT PVOID IoBuffer, + IN ULONG InputBufferLength, + IN ULONG OutputBufferLength + ) +/*++ + +Routine Description: + + Given a physical address, maps this address into a user mode process's + address space + +Arguments: + + DeviceObject - pointer to a device object + + IoBuffer - pointer to the I/O buffer + + InputBufferLength - input buffer length + + OutputBufferLength - output buffer length + +Return Value: + + STATUS_SUCCESS if sucessful, otherwise + STATUS_UNSUCCESSFUL, + STATUS_INSUFFICIENT_RESOURCES, + (other STATUS_* as returned by kernel APIs) + +--*/ +{ + + PPHYSICAL_MEMORY_INFO ppmi = (PPHYSICAL_MEMORY_INFO) IoBuffer; + + INTERFACE_TYPE interfaceType; + ULONG busNumber; + PHYSICAL_ADDRESS physicalAddress; + ULONG length; + UNICODE_STRING physicalMemoryUnicodeString; + OBJECT_ATTRIBUTES objectAttributes; + HANDLE physicalMemoryHandle = NULL; + PVOID PhysicalMemorySection = NULL; + ULONG inIoSpace, inIoSpace2; + NTSTATUS ntStatus; + PHYSICAL_ADDRESS physicalAddressBase; + PHYSICAL_ADDRESS physicalAddressEnd; + PHYSICAL_ADDRESS viewBase; + PHYSICAL_ADDRESS mappedLength; +#ifndef DONT_TRANSLATE_BUS_ADDRESS + BOOLEAN translateBaseAddress; + BOOLEAN translateEndAddress; +#endif + PVOID virtualAddress; + SIZE_T viewSize; + + if ( ( InputBufferLength < sizeof (PHYSICAL_MEMORY_INFO) ) || + ( OutputBufferLength < sizeof (PVOID) ) ) + { + DbgPrint("(MapMemMapTheMemory) Insufficient input or output buffer\n"); + + ntStatus = STATUS_INSUFFICIENT_RESOURCES; + + goto done; + } + + interfaceType = ppmi->InterfaceType; + busNumber = ppmi->BusNumber; + physicalAddress = ppmi->BusAddress; + inIoSpace = inIoSpace2 = ppmi->AddressSpace; + length = ppmi->Length; + + + // + // Get a pointer to physical memory... + // + // - Create the name + // - Initialize the data to find the object + // - Open a handle to the oject and check the status + // - Get a pointer to the object + // - Free the handle + // + + RtlInitUnicodeString (&physicalMemoryUnicodeString, + L"\\Device\\PhysicalMemory"); + + InitializeObjectAttributes (&objectAttributes, + &physicalMemoryUnicodeString, + OBJ_CASE_INSENSITIVE, + (HANDLE) NULL, + (PSECURITY_DESCRIPTOR) NULL); + + ntStatus = ZwOpenSection (&physicalMemoryHandle, + SECTION_ALL_ACCESS, + &objectAttributes); + + if (!NT_SUCCESS(ntStatus)) + { + DbgPrint("(MapMemMapTheMemory) ZwOpenSection failed (0x%x)\n", ntStatus); + + goto done; + } + + ntStatus = ObReferenceObjectByHandle (physicalMemoryHandle, + SECTION_ALL_ACCESS, + (POBJECT_TYPE) NULL, + KernelMode, + &PhysicalMemorySection, + (POBJECT_HANDLE_INFORMATION) NULL); + + if (!NT_SUCCESS(ntStatus)) + { + DbgPrint("(MapMemMapTheMemory) ObReferenceObjectByHandle failed (0x%x)\n", ntStatus); + + goto close_handle; + } + + // + // Initialize the physical addresses that will be translated + // + + physicalAddressEnd.QuadPart = physicalAddress.QuadPart + length; + + // + // Translate the physical addresses. + // + +#ifdef DONT_TRANSLATE_BUS_ADDRESS + physicalAddressBase = physicalAddress; +#else + translateBaseAddress = + HalTranslateBusAddress (interfaceType, + busNumber, + physicalAddress, + &inIoSpace, + &physicalAddressBase); + + translateEndAddress = + HalTranslateBusAddress (interfaceType, + busNumber, + physicalAddressEnd, + &inIoSpace2, + &physicalAddressEnd); + + if ( !(translateBaseAddress && translateEndAddress) ) { + DbgPrint("(MapMemMapTheMemory) HalTranslatephysicalAddress failed\n"); + + ntStatus = STATUS_UNSUCCESSFUL; + + goto close_handle; + } +#endif + + // + // Calculate the length of the memory to be mapped + // + + mappedLength.QuadPart = + physicalAddressEnd.QuadPart - physicalAddressBase.QuadPart; + // + // If the mappedlength is zero, somthing very weird happened in the HAL + // since the Length was checked against zero. + // + + if (mappedLength.LowPart == 0) { + DbgPrint("(MapMemMapTheMemory) mappedLength.LowPart == 0\n"); + + ntStatus = STATUS_UNSUCCESSFUL; + + goto close_handle; + } + + length = mappedLength.LowPart; + + // + // If the address is in io space, just return the address, otherwise + // go through the mapping mechanism + // + + if (inIoSpace) { + *((PVOID *) IoBuffer) = (PVOID) physicalAddressBase.QuadPart; + } else { + // + // initialize view base that will receive the physical mapped + // address after the MapViewOfSection call. + // + + viewBase = physicalAddressBase; + + // + // Let ZwMapViewOfSection pick an address + // + + virtualAddress = NULL; + + // + // Map the section + // + viewSize = length; + ntStatus = ZwMapViewOfSection (physicalMemoryHandle, + (HANDLE) -1, + &virtualAddress, + 0L, + length, + &viewBase, + &viewSize, + ViewShare, + 0, + PAGE_READWRITE | PAGE_NOCACHE); + + if (!NT_SUCCESS(ntStatus)) + { + DbgPrint("(MapMemMapTheMemory) ZwMapViewOfSection failed (0x%x)\n", ntStatus); + + goto close_handle; + } + + // + // Mapping the section above rounded the physical address down to the + // nearest 64 K boundary. Now return a virtual address that sits where + // we want by adding in the offset from the beginning of the section. + // + + (char*) virtualAddress += (ULONG)physicalAddressBase.LowPart - + (ULONG)viewBase.LowPart; + + *((PVOID *) IoBuffer) = virtualAddress; + + } + + ntStatus = STATUS_SUCCESS; + +close_handle: + ZwClose (physicalMemoryHandle); + +done: + return ntStatus; +} + +// leo +// for some reason Compaq driver doesn't make ObDereference +// i fixed it it here +NTSTATUS +MapMemUnmapTheMemory( + IN PDEVICE_OBJECT DeviceObject, + IN PVOID IoBuffer, + IN ULONG InputBufferLength, + IN ULONG OutputBufferLength + ) +/*++ + +Routine Description: + + Given a physical address, maps this address into a user mode process's + address space + +Arguments: + + DeviceObject - pointer to a device object + + IoBuffer - pointer to the I/O buffer + + InputBufferLength - input buffer length + + OutputBufferLength - output buffer length + +Return Value: + + STATUS_SUCCESS if sucessful, otherwise + STATUS_UNSUCCESSFUL, + STATUS_INSUFFICIENT_RESOURCES, + (other STATUS_* as returned by kernel APIs) + +--*/ +{ + UNICODE_STRING physicalMemoryUnicodeString; + OBJECT_ATTRIBUTES objectAttributes; + HANDLE physicalMemoryHandle = NULL; + PVOID PhysicalMemorySection = NULL; + NTSTATUS ntStatus, ntStatusUnmap; + PVOID virtualAddress = *(PVOID*)IoBuffer; + + // unmap + ntStatusUnmap = ZwUnmapViewOfSection ((HANDLE) -1, virtualAddress ); + if (!NT_SUCCESS(ntStatusUnmap)) { + DbgPrint("(MapMemUnmapTheMemory) ZwUnmapViewOfSection failed (0x%x)\n", ntStatusUnmap); + // no goto error - we'll try to dereference the object any way + } + + // + // dereference the section object + // + + // open the section object + RtlInitUnicodeString (&physicalMemoryUnicodeString, L"\\Device\\PhysicalMemory"); + InitializeObjectAttributes (&objectAttributes, &physicalMemoryUnicodeString, + OBJ_CASE_INSENSITIVE, (HANDLE) NULL, (PSECURITY_DESCRIPTOR) NULL); + ntStatus = ZwOpenSection (&physicalMemoryHandle, SECTION_ALL_ACCESS, &objectAttributes); + if (!NT_SUCCESS(ntStatus)) { + DbgPrint("(MapMemUnmapTheMemory) ZwOpenSection failed. Can't dereference the section object (0x%x)\n", ntStatus); + goto done; + } + + // get pointer to the object + ntStatus = ObReferenceObjectByHandle (physicalMemoryHandle, SECTION_ALL_ACCESS, + (POBJECT_TYPE) NULL, KernelMode, &PhysicalMemorySection, (POBJECT_HANDLE_INFORMATION) NULL); + if (!NT_SUCCESS(ntStatus)) { + DbgPrint("(MapMemUnmapTheMemory) ObReferenceObjectByHandle failed. Can't dereference the section object (0x%x)\n", ntStatus); + goto close_handle; + } + + // dereference the object (twice - because we made the second reference in order to get a pointer to the object) + ObDereferenceObject( PhysicalMemorySection ); + ObDereferenceObject( PhysicalMemorySection ); + + // close the object +close_handle: + ZwClose (physicalMemoryHandle); + +done: + if (!NT_SUCCESS(ntStatusUnmap)) + ntStatus = ntStatusUnmap; + + return ntStatus; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.h new file mode 100644 index 00000000..bd0f0927 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_ntddk.h @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_NTDDK_H +#define H_MOSAL_NTDDK_H + +#include +#if 0 +#include +#else +typedef char int8_t; +typedef unsigned char u_int8_t; +typedef short int int16_t; +typedef unsigned short int u_int16_t; +typedef INT32 int32_t; +typedef UINT32 u_int32_t; +typedef INT64 int64_t; +typedef UINT64 u_int64_t; +#endif + +/****************************************************************************** + * Function: MOSAL_PCI_find_device + * + * Description: Find a PCI device based on Vendor and deviceID. + * + * Parameters: + * vendor_id(IN) u_int16_t + * Vendor ID. + * dev_id(IN) u_int16_t + * Device ID. + * index(IN) u_int16_t + * Occurance of device of given Vendor/Device IDs. + * bus_p(OUT) u_int8_t * + * Bus num of matching device. + * dev_func_p(OUT) u_int8_t * + * Device/Function ([7:3]/[2:0]) of matching device. + * + * Returns: + * call_result_t + * MT_OK if found, MT_ENODEV if such device not found. + * + * Note: + * For hot-swap support, the PCI bus should be really probed on device + * search, and not a preset DB (which was usually created during boot). + * + ******************************************************************************/ +call_result_t MOSAL_PCI_find_device(u_int16_t vendor_id, u_int16_t dev_id, + u_int16_t index, + u_int8_t *bus_p, u_int8_t *dev_func_p); + + + +/****************************************************************************** + * Function: MOSAL_PCI_read_config_byte + * + * Description: Read byte of PCI config space. + * + * Parameters: + * bus(IN) u_int8_t + * Bus num of device. + * dev_func(IN) u_int8_t + * Device/Function ([7:3]/[2:0]) of device. + * offset(IN) u_int8_t + * Offset in device's config header. + * data_p(OUT) u_int8_t* + * Ptr to a byte data buffer which holds read val. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_read_config_byte(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int8_t* data_p); + + + +/****************************************************************************** + * Function: MOSAL_PCI_read_config_word + * + * Description: Read byte of PCI config space. + * + * Parameters: + * bus(IN) u_int8_t + * Bus num of device. + * dev_func(IN) u_int8_t + * Device/Function ([7:3]/[2:0]) of device. + * offset(IN) u_int8_t + * Offset in device's config header. + * data_p(OUT) u_int16_t* + * Ptr to a word data buffer which holds read val. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_read_config_word(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int16_t* data_p); + + + + +/****************************************************************************** + * Function: MOSAL_PCI_read_config_dword + * + * Description: Read byte of PCI config space. + * + * Parameters: + * bus(IN) u_int8_t + * Bus num of device. + * dev_func(IN) u_int8_t + * Device/Function ([7:3]/[2:0]) of device. + * offset(IN) u_int8_t + * Offset in device's config header. + * data_p(OUT) u_int32_t* + * Ptr to a dword data buffer which holds read val. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_read_config_dword(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int32_t* data_p); + + +/****************************************************************************** + * Function: MOSAL_PCI_read_config_data + * + * Description: Read byte of PCI config space. + * + * Parameters: + * bus(IN) u_int8_t + * Bus num of device. + * dev_func(IN) u_int8_t + * Device/Function ([7:3]/[2:0]) of device. + * offset(IN) u_int8_t + * Offset in device's config header. + * length(IN) u_int32_t + * Length of data. + * data_p(OUT) u_int8_t* + * Ptr to a dword data buffer which holds read val. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_read_config_data(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int32_t length, u_int8_t* data_p); + + + +/****************************************************************************** + * Function: MOSAL_PCI_write_config_byte + * + * Description: Write byte to PCI config space. + * + * Parameters: + * bus(IN) u_int8_t + * Bus num of device. + * dev_func(IN) u_int8_t + * Device/Function ([7:3]/[2:0]) of device. + * offset(IN) u_int8_t + * Offset in device's config header. + * data(IN) u_int8_t + * Val to write. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_write_config_byte(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int8_t data); + + + +/****************************************************************************** + * Function: MOSAL_PCI_write_config_word + * + * Description: Write word of PCI config space. + * + * Parameters: + * bus(IN) u_int8_t + * Bus num of device. + * dev_func(IN) u_int8_t + * Device/Function ([7:3]/[2:0]) of device. + * offset(IN) u_int8_t + * Offset in device's config header. + * data(IN) u_int16_t + * Val to write. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_write_config_word(u_int8_t bus, u_int8_t dev_func, + u_int8_t offset, u_int16_t data); + + +/****************************************************************************** + * Function: MOSAL_PCI_write_config_dword + * + * Description: Write dword of PCI config space. + * + * Parameters: + * bus(IN) u_int8_t + * Bus num of device. + * dev_func(IN) u_int8_t + * Device/Function ([7:3]/[2:0]) of device. + * offset(IN) u_int8_t + * Offset in device's config header. + * data(IN) u_int32_t + * Val to write. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_write_config_dword(u_int8_t bus, + u_int8_t dev_func, + u_int8_t offset, + u_int32_t data); + +/****************************************************************************** + * Function: MOSAL_PCI_write_config_data + * + * Description: Write dword of PCI config space. + * + * Parameters: + * bus(IN) u_int8_t + * Bus num of device. + * dev_func(IN) u_int8_t + * Device/Function ([7:3]/[2:0]) of device. + * offset(IN) u_int8_t + * Offset in device's config header. + * length(IN) u_int32_t + * Length of data. + * data_p(IN) u_int8_t* + * Val to write. + * + * Returns: + * call_result_t + * MT_OK if success, MT_ERROR if failed. + * + ******************************************************************************/ +call_result_t MOSAL_PCI_write_config_data(u_int8_t bus, + u_int8_t dev_func, + u_int8_t offset, + u_int32_t length, + u_int8_t* data_p); + + +#ifdef __KERNEL__ +/* + * Our user mode app will pass an initialized structure like this + * down to the kernel mode driver + */ + +typedef struct +{ + INTERFACE_TYPE InterfaceType; /* Isa, Eisa, etc.... */ + ULONG BusNumber; /* Bus number */ + PHYSICAL_ADDRESS BusAddress; /* Bus-relative address */ + ULONG AddressSpace; /* 0 is memory, 1 is I/O */ + ULONG Length; /* Length of section to map */ + +} PHYSICAL_MEMORY_INFO, *PPHYSICAL_MEMORY_INFO; + +NTSTATUS +MapMemMapTheMemory( + IN PDEVICE_OBJECT DeviceObject, + IN OUT PVOID IoBuffer, + IN ULONG InputBufferLength, + IN ULONG OutputBufferLength + ); + +NTSTATUS +MapMemUnmapTheMemory( + IN PDEVICE_OBJECT DeviceObject, + IN PVOID IoBuffer, + IN ULONG InputBufferLength, + IN ULONG OutputBufferLength + ); +#endif + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_priv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_priv.h new file mode 100644 index 00000000..67d4e9dd --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_priv.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_PRIV_H +#define H_MOSAL_PRIV_H + +/* ----- common stuff ----- */ +#include +#include +#include + + +/* ----- mosal OS-independent types ----- */ + +/* exported services */ +#include "mosal_timer_priv.h" +#include "mosal_que_priv.h" +#include "mosal_mem_priv.h" +#include "mosal_mlock_priv.h" +#include "mosal_que_priv.h" +#include "mosal_gen_priv.h" +#include "mosal.h" +#include "mosal_sync_priv.h" +#include "mosal_k2u_cbk_priv.h" + +/* ----- helpers ----- */ +#include "mosal_util.h" + +#endif + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_prot_ctx_imp.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_prot_ctx_imp.h new file mode 100644 index 00000000..7b6153cc --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_prot_ctx_imp.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_PROT_CTX_IMP_H +#define H_MOSAL_PROT_CTX_IMP_H + +typedef MT_ulong_ptr_t mosal_pid_t; + +/* Protection and virtual-memory context */ +typedef enum { + MOSAL_PROT_CTX_KERNEL, /* Kernel protection/memory context */ + MOSAL_PROT_CTX_CURRENT_USER /* Current user level protection/memory context */ +} mosal_prot_ctx_t; + +/* "functions" for backward compatibility */ +#define mosal_get_current_prot_ctx() MOSAL_PROT_CTX_CURRENT_USER +#define mosal_get_kernel_prot_ctx() MOSAL_PROT_CTX_KERNEL + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que.c new file mode 100644 index 00000000..3a02981f --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que.c @@ -0,0 +1,372 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" + +extern KIRQL cur_max_irql; + + +/* structures */ +/* Mosal queue defs. */ +struct mosalq_el +{ + // must be the first + LIST_ENTRY link; + union { + struct { /* data element */ + int size; + void * data; + }; + struct { /* process element */ + KEVENT h_event; + }; + }; +}; + +struct mosalq_st +{ + LIST_ENTRY link; + LIST_ENTRY proc_que; + LIST_ENTRY data_que; + MOSAL_data_free_t qdestroy_free; /* Use to free of data on qdestroy */ + BOOLEAN remove; + int proc_num; +}; + +/* queue support */ +static struct mosalq_st q_hdr[MOSAL_MAX_QHANDLES]; /* q db */ +static struct mosalq_el q_el[MOSAL_MAX_QELEMENTS]; +static LIST_ENTRY q_free_el; +static LIST_ENTRY q_free_hdr; + +#define QPTR_2_HANDLE(ptr) (((char*)ptr - (char*)&q_hdr[0]) / sizeof(struct mosalq_st)) +#define QHANDLE_2_PTR(ix) (&q_hdr[ix]) + +/* Helpers */ +void init_queues() +{ + int i; + struct mosalq_el * q_el_p = &q_el[0]; + struct mosalq_st * q_hdr_p = &q_hdr[0]; + + /* zero DBs */ + memset(q_hdr, 0, sizeof(q_hdr)); + memset(q_el, 0, sizeof(q_el)); + + /* init free chain headers */ + InitializeListHead( &q_free_el ); + InitializeListHead( &q_free_hdr ); + + /* init q_el and link them into LIFO chain */ + for (i=0; ilink ); + } + + /* init q_hdr and link them into LIFO chain */ + for (i=0; iproc_que ); + InitializeListHead( &q_hdr_p->data_que ); + InsertTailList( &q_free_hdr, &q_hdr_p->link ); + } + +} + +struct mosalq_st *alloc_qhdr() +{ + struct mosalq_st * hdr; + + if ( IsListEmpty( &q_free_hdr ) ) + return NULL; + hdr = (struct mosalq_st *)RemoveHeadList( &q_free_hdr ); + return hdr; +} + +struct mosalq_el *alloc_qel() +{ + struct mosalq_el * el; + + if ( IsListEmpty( &q_free_el ) ) + return NULL; + el = (struct mosalq_el *)RemoveHeadList( &q_free_el ); + return el; +} + +void free_qhdr(struct mosalq_st *q_hdr_p) +{ + InsertHeadList( &q_free_hdr, &q_hdr_p->link ); +} + +void free_qel(struct mosalq_el *q_el_p) +{ + InsertHeadList( &q_free_el, &q_el_p->link ); +} + + +call_result_t MOSAL_qcreate(MOSAL_qhandle_t *qh, MOSAL_data_free_t qdestroy_free) +{ + call_result_t rc = MT_OK; + struct mosalq_st * q_hdr_p = alloc_qhdr(); + + MTL_TRACE1("-> MOSAL_qcreate(...)\n"); + + if (q_hdr_p == NULL) + return MT_EAGAIN; /* no free q handle */ + memset( q_hdr_p, 0, sizeof(struct mosalq_st) ); + InitializeListHead( &q_hdr_p->proc_que ); + InitializeListHead( &q_hdr_p->data_que ); + q_hdr_p->qdestroy_free = qdestroy_free; + q_hdr_p->proc_num = 0; + q_hdr_p->remove = FALSE; + *qh = (MOSAL_qhandle_t)QPTR_2_HANDLE(q_hdr_p); + + MTL_TRACE1("<- MOSAL_qcreate qh=%d rc=%d (%s)\n\n", + *qh, rc, mtl_strerror_sym(rc)); + return rc; +} + +bool MOSAL_isqempty(MOSAL_qhandle_t qh) +{ + /* check parameters */ + if (qh >= MOSAL_MAX_QHANDLES) + return MT_ENORSC; /* no such queue */ + return IsListEmpty(&(QHANDLE_2_PTR(qh))->data_que); +} + +#ifdef SUSPEND_IRP +call_result_t complete_MOSAL_qget(call_result_t rc, int *size, void **data, struct mosalq_st * q_hdr_p ) +{ + struct mosalq_el *q_data_p; + + if (rc == MT_OK) + { /* get data */ + if (!IsListEmpty(&q_hdr_p->data_que)) + { + q_data_p = (struct mosalq_el*)RemoveHeadList( &q_hdr_p->data_que); + *size = q_data_p->size; + *data = q_data_p->data; + free_qel(q_data_p); + } + else + { /* it can only be on qdestroy with pending processes */ + *size = 0; + *data = NULL; + rc = MT_ERROR; + // free the queue header + q_hdr_p->proc_num--; + if (q_hdr_p->remove && !q_hdr_p->proc_num) + free_qhdr( q_hdr_p ); + } /* it can only be on qdestroy with pending processes */ + } /* get data */ + + MTL_TRACE1("<- MOSAL_qget, size=%d rc=%d (%s)\n\n", + *size, rc, mtl_strerror_sym(rc)); + return rc; +} +#endif + +call_result_t MOSAL_qget(MOSAL_qhandle_t qh, int *size, void **data, bool block) +{ + call_result_t rc = MT_OK; + struct mosalq_st * q_hdr_p; + struct mosalq_el *q_data_p, *q_proc_p; + bool was_empty = FALSE; + KIRQL irql; + + MTL_TRACE1("-> MOSAL_qget(0x%x, ..., %d)\n", (u_int32_t)qh, block); + /* check parameters */ + if (qh >= MOSAL_MAX_QHANDLES) + { + rc = MT_ENORSC; /* no such queue */ + goto exit; + } + q_hdr_p = QHANDLE_2_PTR(qh); + + // ??? Race with ISR: Here must be SpinLockAcquire() + KeRaiseIrql( (KIRQL)cur_max_irql, &irql); + if ( IsListEmpty( &q_hdr_p->data_que )) + { /* no data pending */ + if (block) + { /* blocking mode */ + + /* get proc element */ + q_proc_p = alloc_qel(); + if (q_proc_p == NULL) + { + rc = MT_ENORSC; + goto fix_irql; + } + else + { /* put the process on wait */ + MTL_TRACE4(" MOSAL_qget(0x%x, ...) - go sleep\n", (u_int32_t)qh); + // init the proc element + KeInitializeEvent( &q_proc_p->h_event, NotificationEvent, FALSE); + // queue it + InsertTailList( &q_hdr_p->proc_que, &q_proc_p->link ); + q_hdr_p->proc_num++; + // ??? Race with ISR: Here must be SpinLockRelease() + KeLowerIrql(irql); + // Wait until the IRP will be complete + KeWaitForSingleObject( + &q_proc_p->h_event, // event to wait for + Executive, // thread type (to wait into its context) + KernelMode, // mode of work + FALSE, // alertable + NULL // timeout + ); + goto exit; + + } /* put the process on wait */ + + } /* blocking mode */ + else + { + // ??? Race with ISR: Here must be SpinLockRelease() + rc = MT_EAGAIN; + goto fix_irql; + } + + } /* no data pending */ +fix_irql: + KeLowerIrql(irql); + +exit: +#ifdef SUSPEND_IRP + return complete_MOSAL_qget(rc, size, data, q_hdr_p ); +#else + + if (rc == MT_OK) + { /* get data */ + if (!IsListEmpty(&q_hdr_p->data_que)) + { + q_data_p = (struct mosalq_el*)RemoveHeadList( &q_hdr_p->data_que); + *size = q_data_p->size; + *data = q_data_p->data; + free_qel(q_data_p); + } + else + { /* it can only be on qdestroy with pending processes */ + *size = 0; + *data = NULL; + rc = MT_ERROR; + // free the queue header + q_hdr_p->proc_num--; + if (q_hdr_p->remove && !q_hdr_p->proc_num) + free_qhdr( q_hdr_p ); + } /* it can only be on qdestroy with pending processes */ + } /* get data */ + + MTL_TRACE1("<- MOSAL_qget, size=%d rc=%d (%s)\n\n", + *size, rc, mtl_strerror_sym(rc)); + return rc; +#endif +} + +call_result_t MOSAL_qput(MOSAL_qhandle_t qh, int size, void *data) +{ + call_result_t rc = MT_OK; + struct mosalq_st * q_hdr_p; + struct mosalq_el *q_data_p, *q_proc_p; + bool was_empty = FALSE; + + MTL_TRACE1("-> MOSAL_qput(%d,%d,)\n", (u_int32_t)qh, size); + /* check parameters */ + if (qh >= MOSAL_MAX_QHANDLES) + return MT_ENORSC; /* no such queue */ + q_hdr_p = QHANDLE_2_PTR(qh); + + /* get data element */ + q_data_p = alloc_qel(); + if (q_data_p == NULL) + return MT_ENORSC; + + /* fill data element */ + q_data_p->size = size; + q_data_p->data = data; + + /* put element into the queue */ + if ( IsListEmpty( &q_hdr_p->data_que )) + was_empty = TRUE; + InsertTailList( &q_hdr_p->data_que, &q_data_p->link ); + if (was_empty && !IsListEmpty( &q_hdr_p->proc_que )) + { /* awake waiting process */ + MTL_TRACE4(" MOSAL_qput(0x%x) - try to wake up\n", (u_int32_t)qh); + q_proc_p = (struct mosalq_el*)RemoveHeadList( &q_hdr_p->proc_que ); + q_hdr_p->proc_num--; + KeSetEvent (&q_proc_p->h_event, IO_NO_INCREMENT, FALSE); + free_qel( q_proc_p ); + } + + MTL_TRACE1("<- MOSAL_qput rc=%d (%s)\n\n", rc, mtl_strerror_sym(rc)); + return rc; +} + +call_result_t MOSAL_qdestroy(MOSAL_qhandle_t qh) +{ + call_result_t rc = MT_OK; + struct mosalq_st * q_hdr_p; + struct mosalq_el *q_data_p, *q_proc_p; + + MTL_TRACE1("-> MOSAL_qdestroy(0x%x)\n", (u_int32_t)qh); + /* check parameters */ + if (qh >= MOSAL_MAX_QHANDLES) + return MT_ENORSC; /* no such queue */ + q_hdr_p = QHANDLE_2_PTR(qh); + + // free data, if any + while ( !IsListEmpty( &q_hdr_p->data_que ) ) + { /* free data elements */ + // get first data elemenet + q_data_p = (struct mosalq_el*)RemoveHeadList( &q_hdr_p->data_que); + // release its data + if (q_hdr_p->qdestroy_free && q_data_p->data) + q_hdr_p->qdestroy_free(q_data_p->data); + // release the element itself + free_qel( q_data_p ); + } + + // post waiting processes, if any + while ( !IsListEmpty( &q_hdr_p->proc_que ) ) + { + q_proc_p = (struct mosalq_el*)RemoveHeadList( &q_hdr_p->proc_que ); + KeSetEvent (&q_proc_p->h_event, IO_NO_INCREMENT, FALSE); + free_qel( q_proc_p ); + q_hdr_p->remove = TRUE; + } + + // free the queue header + if (!q_hdr_p->remove) + free_qhdr( q_hdr_p ); + + MTL_TRACE1("<- MOSAL_qdestroy rc=%d (%s)\n\n", rc, mtl_strerror_sym(rc)); + return rc; +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que_priv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que_priv.h new file mode 100644 index 00000000..a77df184 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_que_priv.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_QUE_PRIV_H +#define H_MOSAL_QUE_PRIV_H + +#define MOSAL_MAX_QELEMENTS 1024 +void init_queues(); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync.c new file mode 100644 index 00000000..2b2b0a19 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync.c @@ -0,0 +1,647 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* !!! function ExTryToAcquireFastMutex() is not defined for WDM drivers, so we'll use KMUTEX :( */ +/* Defined in the SOURCES file as build flags */ +//#define USE_KMUTEX 0 + +#include "mosal_priv.h" + +//////////////////////////////////////////////////////////////////////////////// +// Debug Tools +//////////////////////////////////////////////////////////////////////////////// +#ifdef USE_TRACE +#define TRACE_ARRAY_SIZE 1024 +#define TRACE_CNT_NUM 8 +int g_trace_ix = 0; +char g_trace_buf[TRACE_ARRAY_SIZE]; +KSPIN_LOCK g_trace_sp; +long g_trace_cnt[TRACE_CNT_NUM]; +void MOSAL_trace_init() +{ + KeInitializeSpinLock(&g_trace_sp); + g_trace_ix = 0; + RtlZeroMemory( g_trace_cnt, sizeof(g_trace_cnt) ); +} +#endif + +void MOSAL_trace_log(char who, char value1, char value2, char value3) +{ +#ifdef USE_TRACE + KIRQL irql; + + irql = KeAcquireSpinLockRaiseToSynch(&g_trace_sp); + if (who < TRACE_CNT_NUM) + g_trace_cnt[who]++; + g_trace_buf[g_trace_ix++] = who; + g_trace_buf[g_trace_ix++] = value1; + g_trace_buf[g_trace_ix++] = value2; + g_trace_buf[g_trace_ix++] = value3; + if (g_trace_ix >= TRACE_ARRAY_SIZE) + g_trace_ix = 0; + KeReleaseSpinLock(&g_trace_sp, irql ); +#endif +} + + +//////////////////////////////////////////////////////////////////////////////// +// Synchronization object +//////////////////////////////////////////////////////////////////////////////// + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_init + * + * Description: + * Init sync object + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_init(MOSAL_syncobj_t *obj_p) +{ + KeInitializeEvent( &obj_p->event, NotificationEvent , FALSE ); + return MT_OK; +} + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_waiton + * + * Description: + * cause process to sleep until synchonization object is signalled or time + * expires + * + * Parameters: + * obj_p(IN) pointer to synch object + * micro_sec(IN) max time to wait in microseconds + * + * Returns: + * MT_OK - woke up by event + * MT_EINTR - woke up by signal + * MT_ETIMEDOUT - wokeup because of timeout + * MT_ERROR - some other error + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_waiton(MOSAL_syncobj_t *obj_p, MT_size_t micro_sec) +{ + NTSTATUS status; + LARGE_INTEGER timeout; + KPROCESSOR_MODE pcs_mode = ExGetPreviousMode(); + + timeout.QuadPart = ((int64_t)(u_int64_t)micro_sec) * (-10); + + if ( micro_sec == MOSAL_SYNC_TIMEOUT_INFINITE ) + status = KeWaitForSingleObject( &obj_p->event, Executive, pcs_mode, TRUE, NULL ); + else + status = KeWaitForSingleObject( &obj_p->event, Executive, pcs_mode, TRUE, &timeout ); + if (status == STATUS_SUCCESS) + return MT_OK; + if (status == STATUS_TIMEOUT) + return MT_ETIMEDOUT; + return MT_EINTR; +} + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_waiton + * + * Description: + * cause process to sleep until synchonization object is signalled or time + * expires + * + * Parameters: + * obj_p(IN) pointer to synch object + * micro_sec(IN) max time to wait in microseconds + * + * Returns: + * MT_OK - woke up by event + * MT_ETIMEDOUT - wokeup because of timeout + * MT_ERROR - some other error + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_waiton_ui(MOSAL_syncobj_t *obj_p, MT_size_t micro_sec) +{ + NTSTATUS status; + LARGE_INTEGER timeout; + KPROCESSOR_MODE pcs_mode = ExGetPreviousMode(); + + timeout.QuadPart = ((int64_t)(u_int64_t)micro_sec) * (-10); + +try_once_more: + if ( micro_sec == MOSAL_SYNC_TIMEOUT_INFINITE ) + status = KeWaitForSingleObject( &obj_p->event, Executive, pcs_mode, FALSE, NULL ); + else + status = KeWaitForSingleObject( &obj_p->event, Executive, pcs_mode, FALSE, &timeout ); + if (status == STATUS_SUCCESS) + return MT_OK; + if (status == STATUS_TIMEOUT) + return MT_ETIMEDOUT; + goto try_once_more; +} + + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_signal + * + * Description: + * signal the synchronization object + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_syncobj_signal(MOSAL_syncobj_t *obj_p) +{ + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeSetEvent( &obj_p->event, 0, FALSE ); +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_syncobj_clear + * + * Description: + * reset sync object (i.e. bring it to init - not-signalled -state) + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * + ******************************************************************************/ +void MOSAL_syncobj_clear(MOSAL_syncobj_t *obj_p) +{ + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeClearEvent( &obj_p->event ); +} + + + +//////////////////////////////////////////////////////////////////////////////// +// Semaphores +//////////////////////////////////////////////////////////////////////////////// + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_sem_init + * + * Description: + * init semaphore + * + * Parameters: + * sem_p(OUT) pointer to semaphore to be initialized + * count(IN) max number of processes that can hold the semaphore at the same time + * + * Returns: + * + +******************************************************************************/ +call_result_t MOSAL_sem_init(MOSAL_semaphore_t *sem_p, MT_size_t count) +{ + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + KeInitializeSemaphore( &sem_p->sem, (LONG)count, LONG_MAX ); + return MT_OK; +} + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_sem_acq + * + * Description: + * acquire the semaphore + * + * Parameters: + * sem_p(IN) pointer to semaphore + * block(IN) if - FALSE, return immediately if could not acquire, otherwise block if necessary + * + * Returns: + * MT_OK - semaphore acquired + * MT_EAGAIN - semaphore not acquired (only - in non-blocking mode) + * + *******************************************************************************/ +call_result_t MOSAL_sem_acq(MOSAL_semaphore_t *sem_p, MT_bool block) +{ + NTSTATUS status; + LARGE_INTEGER timeout = { 0, 0 }; + KPROCESSOR_MODE pcs_mode = ExGetPreviousMode(); + + if (block) { + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + status = KeWaitForSingleObject( &sem_p->sem, Executive, pcs_mode, TRUE, NULL ); + } + else { + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + status = KeWaitForSingleObject( &sem_p->sem, Executive, pcs_mode, TRUE, &timeout ); + } + if (status == STATUS_SUCCESS) + return MT_OK; + if (status == STATUS_TIMEOUT) + return MT_ETIMEDOUT; + return MT_EINTR; +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_sem_acq_ui + * + * Description: + * acquire the semaphore + * + * Parameters: + * sem_p(IN) pointer to semaphore + * + * Returns: + * + *******************************************************************************/ +void MOSAL_sem_acq_ui(MOSAL_semaphore_t *sem_p) +{ + NTSTATUS status; + LARGE_INTEGER timeout = { 0, 0 }; + KPROCESSOR_MODE pcs_mode = ExGetPreviousMode(); + + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + while (1) { + status = KeWaitForSingleObject( &sem_p->sem, Executive, pcs_mode, FALSE, NULL ); + if (status == STATUS_SUCCESS) + break; + } +} + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_sem_rel + * + * Description: + * release the semaphore + * + * Parameters: + * sem_p(IN) pointer to semaphore + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_sem_rel(MOSAL_semaphore_t *sem_p) +{ + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeReleaseSemaphore( &sem_p->sem, 0, 1, FALSE ); +} + +//////////////////////////////////////////////////////////////////////////////// +// Mutexes +//////////////////////////////////////////////////////////////////////////////// + +typedef struct MOSAL_mutex MOSAL_mutex_t; + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_init + * + * Description: + * init mutex + * + * Parameters: + * mtx_p(OUT) pointer to mutex to be initialized + * + * Returns: + * + ******************************************************************************/ +call_result_t MOSAL_mutex_init(MOSAL_mutex_t *mtx_p) +{ + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + #ifdef USE_KMUTEX + KeInitializeMutex( &mtx_p->mutex, 0 ); + #else + ExInitializeFastMutex( &mtx_p->mutex ); + #endif + return MT_OK; +} + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_acq + * + * Description: + * acquire the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * block(IN) if - FALSE, return immediately if could not acquire, otherwise block if necessary + * + * Returns: + * MT_OK - mutex acquired + * MT_ETIMEDOUT - mutex not acquired (only - in non-blocking mode) + * MT_EINTR - mutex not acquired out of some other error + * +******************************************************************************/ +call_result_t MOSAL_mutex_acq(MOSAL_mutex_t *mtx_p, MT_bool block) +{ + NTSTATUS status = STATUS_SUCCESS; + LARGE_INTEGER timeout = { 0, 0 }; + KPROCESSOR_MODE pcs_mode = ExGetPreviousMode(); + + if (block) { + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + #ifdef USE_KMUTEX + status = KeWaitForSingleObject( &mtx_p->mutex, Executive, pcs_mode, TRUE, NULL ); + #else + ExAcquireFastMutex( &mtx_p->mutex ); + #endif + } + else { + #ifdef USE_KMUTEX + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + status = KeWaitForSingleObject( &mtx_p->mutex, Executive, pcs_mode, TRUE, &timeout ); + #else + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + /* !!! this function is not defined for WDM drivers, so we'll use KMUTEX :( */ + if (!ExTryToAcquireFastMutex( &mtx_p->mutex )) + status = STATUS_TIMEOUT; + #endif + } + if (status == STATUS_SUCCESS) + return MT_OK; + if (status == STATUS_TIMEOUT) + return MT_ETIMEDOUT; + return MT_EINTR; +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_acq_ui + * + * Description: + * acquire the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * + * Returns: + * MT_OK - mutex acquired + * +******************************************************************************/ +void MOSAL_mutex_acq_ui(MOSAL_mutex_t *mtx_p) +{ + NTSTATUS status = STATUS_SUCCESS; + LARGE_INTEGER timeout = { 0, 0 }; + KPROCESSOR_MODE pcs_mode = ExGetPreviousMode(); + + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + while (1) { + #ifdef USE_KMUTEX + status = KeWaitForSingleObject( &mtx_p->mutex, Executive, pcs_mode, FALSE, NULL ); + #else + ExAcquireFastMutex( &mtx_p->mutex ); + #endif + if (status == STATUS_SUCCESS) + break; + } +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_acq_to + * + * Description: + * acquire the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * micro_sec(IN) wait period + * + * Returns: + * MT_OK - mutex acquired + * MT_ETIMEDOUT - mutex not acquired out of timeout + * MT_EINTR - mutex not acquired out of some other error + * + ******************************************************************************/ +call_result_t MOSAL_mutex_acq_to(MOSAL_mutex_t *mtx_p, MT_size_t micro_sec) +{ + NTSTATUS status; + LARGE_INTEGER timeout; + KPROCESSOR_MODE pcs_mode = ExGetPreviousMode(); + + timeout.QuadPart = ((int64_t)(u_int64_t)micro_sec) * (-10); + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + if ( micro_sec == MOSAL_SYNC_TIMEOUT_INFINITE ) + status = KeWaitForSingleObject( &mtx_p->mutex, Executive, pcs_mode, TRUE, NULL ); + else + status = KeWaitForSingleObject( &mtx_p->mutex, Executive, pcs_mode, TRUE, &timeout ); + if (status == STATUS_SUCCESS) + return MT_OK; + if (status == STATUS_TIMEOUT) + return MT_ETIMEDOUT; + return MT_EINTR; +} + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_rel + * + * Description: + * release the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_mutex_rel(MOSAL_mutex_t *mtx_p) +{ + #ifdef USE_KMUTEX + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeReleaseMutex( &mtx_p->mutex, FALSE ); + #else + ASSERT(KeGetCurrentIrql() == APC_LEVEL); + ExReleaseFastMutex( &mtx_p->mutex ); + #endif +} + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_get_stat + * + * Description: + * get mutex status + * + * Parameters: + * mtx_p(IN) pointer to mutex + * + * Returns: + * If the return value is one, the state of the mutex object is signaled + * + ******************************************************************************/ +#if WINVER == 0x500 +/* + * The ntddk.h header file in the 3790 DDK for Win2k references but doesn't + * define this function. + */ +NTKERNELAPI +LONG +KeReadStateMutant( + IN PRKMUTEX Mutex + ); +#endif + +LONG MOSAL_mutex_get_stat(MOSAL_mutex_t *mtx_p) +{ + #ifdef USE_KMUTEX + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + return KeReadStateMutex( &mtx_p->mutex ); + #else + ASSERT(FALSE); + return 0; + #endif +} + + +//////////////////////////////////////////////////////////////////////////////// +// Delay of execution +//////////////////////////////////////////////////////////////////////////////// + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_delay_execution + * + * Description: + * delay execution of this control path for a the specified time period. Note + * that in some implementaions it performs busy wait. + * + * Parameters: + * time_micro(IN) required delay time in microseconds + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_delay_execution(u_int32_t time_micro) +{ + LARGE_INTEGER timeout; + timeout.QuadPart = ((int64_t)(u_int64_t)time_micro) * (-10); + KeDelayExecutionThread( KernelMode, FALSE, &timeout ); +} + +/****************************************************************************** + * Function + * MOSAL_usleep: + * + * Description: + * Suspends the execution of the current process for the given number of + * microseconds. The function guarantees to go to sleep for at least usec + * microseconds + * Parameters: + * usec(IN) number of micro seconds to sleep + * + * Returns: + * MT_OK + * MT_EINTR signal received + * + ******************************************************************************/ +call_result_t MOSAL_usleep(u_int32_t usec) +{ + LARGE_INTEGER timeout = RtlEnlargedIntegerMultiply( - 10, usec ); + NTSTATUS status = KeDelayExecutionThread( KernelMode, TRUE, &timeout ); + if (status == STATUS_SUCCESS) + return MT_OK; + return MT_EINTR; +} + + +/****************************************************************************** + * Function + * MOSAL_usleep_ui: + * + * Description: + * Suspends the execution of the current process for the given number of + * microseconds. The function guarantees to go to sleep for at least usec + * microseconds. The function is non interruptile + * Parameters: + * usec(IN) number of micro seconds to sleep + * + * Returns: void + * + ******************************************************************************/ +void MOSAL_usleep_ui(u_int32_t usec) +{ + LARGE_INTEGER timeout = RtlEnlargedIntegerMultiply( - 10, usec ); + KeDelayExecutionThread( KernelMode, FALSE, &timeout ); +} + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * + * SpinLocks + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_spinlock_init + * + * Description: + * initialize spinlock + * + * Parameters: + * sp(IN) MOSAL_spinlock_t* + * + * Returns: + * + ******************************************************************************/ +call_result_t MOSAL_spinlock_init(MOSAL_spinlock_t *sp) +{ + KeInitializeSpinLock(&sp->lock); + return MT_OK; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_imp.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_imp.h new file mode 100644 index 00000000..fc41ccf9 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_imp.h @@ -0,0 +1,313 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_SYNC_IMP_H +#define H_MOSAL_SYNC_IMP_H + +/* spinlock */ +#ifdef __KERNEL__ + +#ifdef USE_TRACE +/* function names */ +#define FUNC_MOSAL_spinlock_lock 1 +#define FUNC_MOSAL_spinlock_unlock 2 +#define FUNC_MOSAL_spinlock_irq_lock 3 + +void MOSAL_trace_log(char who, char value1, char value2, char value3); + +#define TRACE_LOG(who,val1,val2,val3) MOSAL_trace_log(who,val1,val2,val3) +#else +#define TRACE_LOG(who,val1,val2,val3) +#endif + + +/* sync object */ +struct MOSAL_syncobj { + KEVENT event; +}; + +/* semaphore */ +struct MOSAL_semaphore { + KSEMAPHORE sem; +}; + +/* mutex */ +struct MOSAL_mutex { +#ifdef USE_KMUTEX + KMUTEX mutex; +#else + FAST_MUTEX mutex; +#endif +}; + + +struct MOSAL_spinlock +{ + KSPIN_LOCK lock; + u_int32_t flags; + KIRQL irql; +}; + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_spinlock_lock + * + * Description: + * acquire spinlock (after elevating IRQL) + * + * Parameters: + * sp(IN) MOSAL_spinlock_t* + * + * Returns: + * + ******************************************************************************/ +static _inline call_result_t MOSAL_spinlock_lock(struct MOSAL_spinlock *sp) +{ +#ifdef USE_TRACE + KIRQL irql = KeGetCurrentIrql(); +#endif + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeAcquireSpinLock(&sp->lock, &sp->irql); + TRACE_LOG(FUNC_MOSAL_spinlock_lock, irql,KeGetCurrentIrql(),0); + return(MT_OK); +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_spinlock_unlock + * + * Description: + * release spinlock (and decrease IRQL) + * + * Parameters: + * sp(IN) MOSAL_spinlock_t* + * + * Returns: + * + ******************************************************************************/ +static _inline void MOSAL_spinlock_unlock(struct MOSAL_spinlock *sp) +{ +#ifdef USE_TRACE + KIRQL irql = KeGetCurrentIrql(); +#endif +#ifdef USE_SPINLOCK_SANITY_CHECKS +#ifdef SYNCH_LEVEL + /* sanity check */ + if (sp->irql == SYNCH_LEVEL) { + DbgPrint( "MOSAL_spinlock_unlock: IRQLs: current %d, new %d \n", KeGetCurrentIrql(), sp->irql); + } +#endif +#endif + + TRACE_LOG(FUNC_MOSAL_spinlock_unlock, irql, sp->irql,0); + KeReleaseSpinLock(&sp->lock, sp->irql ); +} + +NTKERNELAPI +KIRQL +FASTCALL +KeAcquireSpinLockRaiseToSynch ( + PKSPIN_LOCK SpinLock + ); + + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_spinlock_irq_lock + * + * Description: + * acquire spinlock in ISR + * + * Parameters: + * sp(IN) MOSAL_spinlock_t* + * + * Returns: + * + ******************************************************************************/ +static _inline call_result_t MOSAL_spinlock_irq_lock( struct MOSAL_spinlock *sp ) +{ +#ifdef USE_TRACE + KIRQL irql = KeGetCurrentIrql(); +#endif + sp->irql = KeAcquireSpinLockRaiseToSynch(&sp->lock); +#ifdef USE_SPINLOCK_SANITY_CHECKS +#ifdef SYNCH_LEVEL + /* sanity */ + if (sp->irql == SYNCH_LEVEL) { + DbgPrint( "MOSAL_spinlock_irq_lock: prev IRQL %d\n", sp->irql); + } +#endif +#endif + TRACE_LOG(FUNC_MOSAL_spinlock_irq_lock, irql, KeGetCurrentIrql(), 0); + return(MT_OK); +} + +#define MOSAL_spinlock_dpc_lock MOSAL_spinlock_lock + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_get_stat + * + * Description: + * get mutex status + * + * Parameters: + * mtx_p(IN) pointer to mutex + * + * Returns: + * If the return value is one, the state of the mutex object is signaled + * + ******************************************************************************/ +LONG MOSAL_mutex_get_stat(struct MOSAL_mutex *mtx_p); + +#else +/* user mode */ + + +/* sync object - TBD */ +struct MOSAL_syncobj { + HANDLE event; +}; + +/* semaphore - TBD */ +struct MOSAL_semaphore { + HANDLE sem; +}; + +/* mutex object - TBD */ +struct MOSAL_mutex { + HANDLE mutex; +}; + +/* spinlock */ +typedef cl_spinlock_t MOSAL_spinlock_t; +#define UL_SPIN_LOCK_UNLOCKED 1 + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_spinlock_lock + * + * Description: + * acquire spinlock (after elevating IRQL) + * + * Parameters: + * sp(IN) MOSAL_spinlock_t* + * + * Returns: + * + ******************************************************************************/ +static _inline call_result_t priv_spinlock_lock(MOSAL_spinlock_t *sp) +{ + cl_spinlock_acquire (sp ); + return MT_OK; +} + +static _inline call_result_t MOSAL_spinlock_lock(MOSAL_spinlock_t *sp) +{ + return priv_spinlock_lock(sp); +} + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_spinlock_unlock + * + * Description: + * release spinlock (and decrease IRQL) + * + * Parameters: + * sp(IN) MOSAL_spinlock_t* + * + * Returns: + * + ******************************************************************************/ +static _inline void MOSAL_spinlock_unlock(MOSAL_spinlock_t *sp) +{ + cl_spinlock_release( sp); +} + + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_spinlock_irq_lock + * + * Description: + * acquire spinlock in ISR (no change of IRQL) + * + * Parameters: + * sp(IN) MOSAL_spinlock_t* + * + * Returns: + * + ******************************************************************************/ +static _inline call_result_t MOSAL_spinlock_irq_lock( MOSAL_spinlock_t *sp ) +{ + return priv_spinlock_lock(sp); +} + +#define MOSAL_spinlock_dpc_lock MOSAL_spinlock_lock + +#endif + +typedef struct MOSAL_mutex MOSAL_mutex_t; +typedef struct MOSAL_syncobj MOSAL_syncobj_t; +typedef struct MOSAL_semaphore MOSAL_semaphore_t; + +#if defined(__KERNEL__) +typedef struct MOSAL_spinlock MOSAL_spinlock_t; +#endif + +#define MOSAL_UL_SPINLOCK_STATIC_INIT {UL_SPIN_LOCK_UNLOCKED} + +call_result_t MOSAL_mutex_acq_to(MOSAL_mutex_t *mtx_p, MT_size_t micro_sec); + +/* "free" functions are dummy for both kernel and user space under linux*/ +static inline call_result_t MOSAL_syncobj_free(MOSAL_syncobj_t *obj_p) +{ + UNREFERENCED_PARAMETER( obj_p ); + return MT_OK; +} + +static inline call_result_t MOSAL_sem_free(MOSAL_semaphore_t *sem_p) +{ + UNREFERENCED_PARAMETER( sem_p ); + return MT_OK; +} + +static inline call_result_t MOSAL_mutex_free(MOSAL_mutex_t *mtx_p) +{ + UNREFERENCED_PARAMETER( mtx_p ); + return MT_OK; +} + +#define MOSAL_SYNC_TIMEOUT_INFINITE 0 + +#endif /* H_MOSAL_SYNC_IMP_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_priv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_priv.h new file mode 100644 index 00000000..03829a0d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_sync_priv.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_SYNC_PRIV_H +#define H_MOSAL_SYNC_PRIV_H + +#include "mosal_sync_imp.h" + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread.c new file mode 100644 index 00000000..fc13a2b4 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread.c @@ -0,0 +1,173 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" + +#define THREAD_WAIT_FOR_EXIT 0x80000000 + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_thread_start + * + * Description: + * create a tread and run a t-function in its context + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * flags(IN) // flags for thread creation + * mtf(IN) t-function + * mtf_ctx(IN) t-function context + * + * Returns: + * MT_OK - thread started; for blocking mode - t-function is running; + * MT_EAGAIN - for blocking mode - timeout; thread hasn't started yet; + * other - error; + * + ******************************************************************************/ +static void ThreadProc(void * lpParameter ) +{ + MOSAL_thread_t *mto_p = (MOSAL_thread_t *)lpParameter; + mto_p->res = (u_int32_t)mto_p->func(mto_p->func_ctx); + MOSAL_syncobj_signal( &mto_p->sync ); + mto_p->th = 0; + PsTerminateSystemThread(mto_p->res); +} + +call_result_t MOSAL_thread_start( + MOSAL_thread_t *mto_p, // pointer to MOSAL thread object + u_int32_t flags, // flags for thread creation + MOSAL_thread_func_t mtf, // t-function name + void *mtf_ctx // t-function context (optionally) + ) +{ + NTSTATUS status; + + // sanity checks + if (mtf == NULL) + return MT_EINVAL; + + // init thread object + mto_p->func = mtf; + mto_p->func_ctx = mtf_ctx; + mto_p->flags = flags; + MOSAL_syncobj_init( &mto_p->sync ); + + // create and run the thread + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + status = PsCreateSystemThread( &mto_p->th, (ACCESS_MASK)0L , NULL, NULL, NULL, ThreadProc, mto_p ); + if (status != STATUS_SUCCESS) + return MT_ERROR; + + return status; +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_thread_kill + * + * Description: + * terminate the tread brutally + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * + * Returns: + * MT_OK - thread terminated + * MT_ERROR - a failure on thread termination + * + ******************************************************************************/ +call_result_t MOSAL_thread_kill( + MOSAL_thread_t *mto_p // pointer to MOSAL thread object + ) + { + /* didn't find a way to perform that */ + return MT_ERROR; + } + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_thread_wait_for_exit + * + * Description: + * create a tread and run a t-function in its context + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * micro_sec(IN) timeout in mcs; MOSAL_THREAD_WAIT_FOREVER means ENDLESS + * exit_code(OUT) return code of the thread + * + * Returns: + * MT_OK - thread started; for blocking mode - t-function is running; + * MT_EAGAIN - for blocking mode - timeout; thread hasn't started yet; + * other - error; + * + ******************************************************************************/ +call_result_t MOSAL_thread_wait_for_exit( + MOSAL_thread_t *mto_p, // pointer to MOSAL thread object + MT_size_t micro_sec, // timeout in mcs; MOSAL_THREAD_WAIT_FOREVER means ENDLESS + u_int32_t *exit_code // return code of the thread + ) +{ + call_result_t status; + mto_p->flags |= THREAD_WAIT_FOR_EXIT; + status = MOSAL_syncobj_waiton(&mto_p->sync, micro_sec); + if (exit_code != NULL) { + if (status == MT_OK ) + *exit_code = mto_p->res; + else + *exit_code = status; + } + return status; +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_thread_set_name + * + * Description: + * set thread name + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * name(IN) thread name + * + * Returns: + * + ******************************************************************************/ + void MOSAL_thread_set_name( + MOSAL_thread_t *mto_p, // pointer to MOSAL thread object + char *name // thread name + ) +{ +} + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread_imp.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread_imp.h new file mode 100644 index 00000000..993f91b2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_thread_imp.h @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_THREAD_IMP_H +#define H_MOSAL_THREAD_IMP_H + +#define MOSAL_KTHREAD_CLONE_FLAGS 0 + +/* MOSAL thread object */ +struct MOSAL_thread; +typedef struct MOSAL_thread MOSAL_thread_t; + +#ifndef __KERNEL__ +typedef void* (*MOSAL_thread_func_t)( void * ); +#else +typedef int (*MOSAL_thread_func_t)( void * ); +#endif + +/* MOSAL thread object implementation */ +struct MOSAL_thread { + HANDLE th; /* thread handle */ + MOSAL_thread_func_t func; /* t-function */ + void * func_ctx; /* t-function context */ + u_int32_t flags; /* flags for thread creation */ + u_int32_t res; /* return code */ + MOSAL_syncobj_t sync; /* sync object */ +}; + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_thread_is_in_work + * + * Description: + * check, whether thread is working (i.e hasn't exited yet) + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * + * Returns: + * TRUE - in work + * FALSE - exited + * + ******************************************************************************/ +static _inline MT_bool MOSAL_thread_is_in_work( + MOSAL_thread_t *mto_p /* pointer to MOSAL thread object */ + ) +{ + return (MT_bool)(mto_p->th != 0); +} + + +/****************************************************************************** + * Function: + * MOSAL_thread_wait_for_exit + * + * Description: + * wait for a target thread to exit + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * micro_sec(IN) timeout in mcs; MOSAL_THREAD_WAIT_FOREVER means ENDLESS + * exit_code(OUT) return code of the thread + * + * Returns: + * MT_OK - thread started; for blocking mode - t-function is running; + * MT_EAGAIN - for blocking mode - timeout; thread hasn't started yet; + * other - error; + * + ******************************************************************************/ +call_result_t MOSAL_thread_wait_for_exit( + MOSAL_thread_t *mto_p, /* pointer to MOSAL thread object */ + MT_size_t micro_sec, /* timeout in mcs; MOSAL_THREAD_WAIT_FOREVER means ENDLESS */ + u_int32_t *exit_code /* return code of the thread */ + ); + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_thread_wait_for_term + * + * Description: + * wait till the target thread exits + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * exit_code(OUT) return code of the thread + * + * Returns: + * MT_OK - thread started; for blocking mode - t-function is running; + * MT_EAGAIN - thread hasn't started yet; + * other - error; + * + ******************************************************************************/ +#define MOSAL_thread_wait_for_term(mto_p,exit_code) MOSAL_thread_wait_for_exit(mto_p,MOSAL_SYNC_TIMEOUT_INFINITE,exit_code) + +#ifdef __KERNEL__ + + +/* + * cloning flags (taken from sched.h) + */ +#define MOSAL_KTHREAD_CSIGNAL 0 /* signal mask to be sent at exit */ +#define MOSAL_KTHREAD_CLONE_VM 0 /* set if VM shared between processes */ +#define MOSAL_KTHREAD_CLONE_FS 0 /* set if fs info shared between processes */ +#define MOSAL_KTHREAD_CLONE_FILES 0 /* set if open files shared between processes */ +#define MOSAL_KTHREAD_CLONE_SIGHAND 0 /* set if signal handlers and blocked signals shared */ +#define MOSAL_KTHREAD_CLONE_PID 0 /* set if pid shared */ +#define MOSAL_KTHREAD_CLONE_PTRACE 0 /* set if we want to let tracing continue on the child too */ +#define MOSAL_KTHREAD_CLONE_VFORK 0 /* set if the parent wants the child to wake it up on mm_release */ +#define MOSAL_KTHREAD_CLONE_PARENT 0 /* set if we want to have the same parent as the cloner */ +#define MOSAL_KTHREAD_CLONE_THREAD 0 /* Same thread group? */ +#define MOSAL_KTHREAD_CLONE_NEWNS 0 /* New namespace group? */ + +#define MOSAL_KTHREAD_CLONE_SIGNAL 0 + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_thread_set_name + * + * Description: + * set thread name + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * name(IN) thread name + * + * Returns: + * + ******************************************************************************/ + void MOSAL_thread_set_name( + MOSAL_thread_t *mto_p, /* pointer to MOSAL thread object */ + char *name /* thread name */ + ); + + +#endif + +#endif /* H_MOSAL_SYNC_IMP_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer.c new file mode 100644 index 00000000..5c12a2d9 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer.c @@ -0,0 +1,726 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" + + + +// +// externals +// +// TAVOR +extern MOSAL_dev_t MOSAL_dev_db[MOSAL_MAXDEV]; + +// +// Restrictrions +// +#define MAX_SIMULT_ISRS 100 // number of pending ISRs in DPC context queues + +// +// Static data +// +LIST_ENTRY ctx_lifo; +void * ctx_array = NULL; + +//////////////////////////////////////////////////////////////////////////////// +// ISR functions +//////////////////////////////////////////////////////////////////////////////// + +/****************************************************************************** + * Function: general_isr + * + * Description: device ISR handler + * + * Parameters: + * pi_pIntObject........... Interrupt Object + * pi_pContext............. My device context + * + * Returns: + * + *****************************************************************************/ +static BOOLEAN +generic_isr( + PKINTERRUPT pi_pIntObject, + PVOID pi_pContext + ) +{ /* general_isr */ + MOSAL_ISR_t * isr_p = (MOSAL_ISR_t *)pi_pContext; + + return( isr_p->func(isr_p->ctx, NULL, pi_pIntObject ) ); + +} /* general_isr */ + + +/* + * Interrupt handler registration + */ +call_result_t MOSAL_ISR_set( + MOSAL_ISR_t * isr_p, + MOSAL_ISR_func_t handler, + MOSAL_IRQ_ID_t irq, + char * name, + MT_ulong_ptr_t ctx + ) +{ + int name_sz = (int)strlen(name) + 1; + MOSAL_dev_t * dev_p = find_device_by_name(name); + NTSTATUS status; + + // allocate memory + isr_p->name = TNVMALLOC( char, name_sz ); + if (!isr_p->name) + return MT_EMALLOC; + + // store parameters in the object + isr_p->func = handler; + isr_p->irq = irq; + isr_p->ctx = ctx; + RtlCopyMemory(isr_p->name, name, name_sz); + + /* connect interrupt */ + dev_p->int_object_p = NULL; + status = IoConnectInterrupt( + &isr_p->int_object_p, /* InterruptObject */ + (PKSERVICE_ROUTINE) generic_isr, /* ISR */ + (PVOID)isr_p, /* ISR context */ + &dev_p->isr_lock, /* spinlock */ + irq, /* interrupt vector */ + dev_p->irql, /* IRQL */ + dev_p->irql, /* Synchronize IRQL */ + dev_p->int_mode, /* interrupt type: LATCHED or LEVEL */ + dev_p->int_shared, /* vector shared or not */ + dev_p->affinity, /* interrupt affinity */ + FALSE /* whether to save Float registers */ + ); + + if (!NT_SUCCESS(status)) + return MT_ERROR; /* failed to connect interrupt */ + else + return MT_OK; +} + +/* + * Interrupt handler registration + */ +call_result_t MOSAL_ISR_unset( + MOSAL_ISR_t * isr_p + ) +{ + // free memory + if ( isr_p->name ) + VFREE( isr_p->name ); + + /* disconnect interrupt */ + if (isr_p->int_object_p != NULL) + { + IoDisconnectInterrupt( isr_p->int_object_p ); + memset( isr_p, 0, sizeof(MOSAL_ISR_t) ); + } + + return MT_OK; +} + +static BOOLEAN +general_isr( + PKINTERRUPT pi_pIntObject, + PVOID pi_pContext + ) +{ /* general_isr */ + MOSAL_dev_t *dev_p = (MOSAL_dev_t *)pi_pContext; + + /* call device handler (in fact, it's general MDHAL handler */ +#ifdef HANDLE_INTERRUTS_AT_DPC + dev_p->thandler((MT_ulong_ptr_t)dev_p->dev_id, (void*)dev_p, NULL ); +#else + dev_p->ghandler(dev_p->irq_num, dev_p->dev_id, NULL ); +#endif + + return TRUE; + +} /* general_isr */ + + +call_result_t MOSAL_set_intr_handler(intr_handler_t handler, MOSAL_IRQ_ID_t irq, +char *name, void* dev_id) +{ + MOSAL_dev_t * dev_p = find_device_by_name(name); + NTSTATUS status; + + /* get pointer to device object */ + if (dev_p == NULL) + return MT_ENODEV; + + /* store MDHAL interrupt context */ + dev_p->ghandler = handler; + dev_p->irq_num = irq; + dev_p->dev_id = dev_id; + + /* connect interrupt */ + dev_p->int_object_p = NULL; + status = IoConnectInterrupt( + &dev_p->int_object_p, /* InterruptObject */ + (PKSERVICE_ROUTINE) general_isr, /* ISR */ + (PVOID)dev_p, /* ISR context */ + &dev_p->isr_lock, /* spinlock */ + irq, /* interrupt vector */ + dev_p->irql, /* IRQL */ + dev_p->irql, /* Synchronize IRQL */ + dev_p->int_mode, /* interrupt type: LATCHED or LEVEL */ + dev_p->int_shared, /* vector shared or not */ + dev_p->affinity, /* interrupt affinity */ + FALSE /* whether to save Float registers */ + ); + + if (!NT_SUCCESS(status)) + return MT_ERROR; /* failed to connect interrupt */ + else + return MT_OK; +} + +call_result_t MOSAL_unset_intr_handler(intr_handler_t handler, + MOSAL_IRQ_ID_t irq, + void* dev_id) +{ + MOSAL_dev_t * dev_p = &MOSAL_dev_db[0]; + int i; + + /* get pointer to device object */ + for (i=0; ighandler == handler && dev_p->irq_num == irq && dev_p->dev_id == dev_id) + break; + } + if (i >= MOSAL_MAXDEV) + return MT_ENODEV; + + /* disconnect interrupt */ + if (dev_p->int_object_p != NULL) + { + IoDisconnectInterrupt( dev_p->int_object_p ); + dev_p->int_object_p = NULL; + dev_p->thandler = NULL; + dev_p->ghandler = NULL; + dev_p->irq_num = 0; + dev_p->dev_id = 0; + } + + return MT_OK; +} + + +//////////////////////////////////////////////////////////////////////////////// +// DPC functions +//////////////////////////////////////////////////////////////////////////////// + +/****************************************************************************** + * Function: + * deinit_dpc + * + * Description: + * deinit all stuff, related to DPC handling + * + * Parameters: + * + * Returns: + * + ******************************************************************************/ + +void deinit_dpc() +{ +#ifdef SUPPORT_MULTIPLE_CTX + // release DPC contexts + if (ctx_array != NULL) + ExFreePool( ctx_array ); +#endif +} + +/****************************************************************************** + * Function: + * init_dpc + * + * Description: + * init all stuff, related to DPC handling + * + * Parameters: + * + * Returns: + * MT_OK - OK, error - otherwise + * + ******************************************************************************/ + +call_result_t init_dpc() +{ +#ifdef SUPPORT_MULTIPLE_CTX + int i; + LDPC_CONTEXT_t * p; + + // init static data + InitializeListHead( &ctx_lifo ); + ctx_array = NULL; + + // allocate array of DPC contexts + p = (LDPC_CONTEXT_t *)ExAllocatePoolWithTag( NonPagedPool, + sizeof(LDPC_CONTEXT_t) * MAX_SIMULT_ISRS, ' cpd' ); + if (p == NULL) + return MT_ENORSC; + ctx_array = (PVOID)p; + + // create LIFO list of DPC contexts + for (i=0; i < MAX_SIMULT_ISRS; i++, p++) + { + InsertTailList( &ctx_lifo, &p->link ); + } +#endif + return MT_OK; +} + +/****************************************************************************** + * Function: + * generic_dpc + * + * Description: + * calls user platform-independent DPC for all requests, pending the same MOSAL DPC object + * + * Parameters: + * Dpc - Win2K DPC object + * DeferredContext - it's context + * SystemArgument1 - context, relayed by ISR, inserting DPC; not in use + * SystemArgument2 - context, relayed by ISR, inserting DPC; not in use + * + * Returns: + * + * Notes: + * Before inserting DPC ISR must add its context parameters to the MOSAL DPC + * object's context chain + * + ******************************************************************************/ +BOOLEAN get_dpc_ctx( PVOID SynchronizeContext ) +{ +#ifdef SUPPORT_MULTIPLE_CTX + MOSAL_DPC_t *d = (MOSAL_DPC_t *)SynchronizeContext; + d->dpc_ctx_p = (LDPC_CONTEXT_t *)RemoveHeadList( &d->ctx_head ); +#endif + return TRUE; +} + +BOOLEAN free_dpc_ctx( PVOID SynchronizeContext ) +{ +#ifdef SUPPORT_MULTIPLE_CTX + LDPC_CONTEXT_t *p = (LDPC_CONTEXT_t *)SynchronizeContext; + InsertTailList( &ctx_lifo, &p->link ); +#endif + return TRUE; +} + +static VOID generic_dpc( + IN PKDPC Dpc, + IN PVOID DeferredContext, + IN PVOID SystemArgument1, + IN PVOID SystemArgument2 + ) +{ + MOSAL_DPC_t *d = (MOSAL_DPC_t *)DeferredContext; + + if ( d->type == MOSAL_SINGLE_CTX) + { /* the DPC has no race conditions */ + + // fill DPC context + d->dpc_ctx.isr_ctx1 = SystemArgument1; + d->dpc_ctx.isr_ctx2 = SystemArgument2; + + // call user's DPC + if (d->func) + (d->func)(&d->dpc_ctx); + + } /* the DPC has no race conditions */ + + + else + + // if it is MOSAL_NO_CTX DPC - all is simple + if ( d->type == MOSAL_NO_CTX) + { /* the DPC has no race conditions */ + + // call user's DPC + if (d->func) + (d->func)(&d->dpc_ctx); + + } /* the DPC has no race conditions */ + + // if it is MOSAL_NO_CTX DPC - all is simple + +#ifdef SUPPORT_MULTIPLE_CTX + + else + /* !!! Pay attention: + In order to use this part, one has to use + MOSAL_DPC_schedule_ctx( user_dpc_ctx, whatever, int_object_p ); + */ + + { /* the DPC can have race conditions with ISRs and itself */ + PKINTERRUPT int_object_p = (PKINTERRUPT)SystemArgument2; + + // + // One DPC can be inserted several times by the same or different ISR + // Because only one DPC can stay in queue, the ISR would first link the relayed to context + // to the DPC context queue and then would try to enqueue the DPC + // + while (1) + { /* handle all the requests to this DPC */ + + // the requests are added by ISRs with guarded routines + // so the below removing is to be guarded against DPC-ISR race + // Note that the queue can be empty in case when the same DPC was scheduled on 2 + // processors and the first one has already emptied the queue ! + // Note also that 2 DPCs can run simultaneously on 2 processors + // over the same MOSAL_DPC object ! In this case the same user DPC can be simultaneously + // called with 2 different contexts + + // get first DPC context + KeSynchronizeExecution( int_object_p, get_dpc_ctx, (PVOID)d ); + if (d->ldpc_ctx_p == NULL) + break; + + // call user's DPC + if (d->func) + (d->func)(&d->ldpc_ctx_p->dpc_ctx); + + // release the request element + KeSynchronizeExecution( int_object_p, free_dpc_ctx, (PVOID)d->ldpc_ctx_p ); + + } /* handle all the requests to this DPC */ + + } /* the DPC can have race conditions with ISRs and itself */ + +#endif +} + + + /****************************************************************************** + * Function: + * MOSAL_DPC_add_ctx + * + * Description: + * add DPC request context to a MOSAL DPC object + * + * Parameters: + * d - MOSAL DPC object + * ctx1 - context, relayed by ISR, inserting DPC; + * ctx2 - context, relayed by ISR, inserting DPC; + * + * Returns: + * MT_ENORSC - if no ctx structures + * MT_OK - otherwise + * + * Notes: + * A helper routine for ISR, inserting DPC + * + ******************************************************************************/ +call_result_t MOSAL_DPC_add_ctx(MOSAL_DPC_t *d, PVOID ctx1, PVOID ctx2) +{ +#ifdef SUPPORT_MULTIPLE_CTX + // allocate a context structure + LDPC_CONTEXT_t *p; + MOSAL_spinlock_irq_lock( &d->lock ); + p = (LDPC_CONTEXT_t *)RemoveHeadList( &ctx_lifo); + if (p == NULL) { + MOSAL_spinlock_unlock( &d->lock ); + return MT_ENORSC; + } + + // fill the context + p->dpc_ctx.func_ctx = d->dpc_ctx.func_ctx; + p->dpc_ctx.isr_ctx1 = ctx1; + p->dpc_ctx.isr_ctx2 = ctx2; + + // add it to the MOSAL DPC object + InsertTailList( &d->ctx_head, &p->link ); + MOSAL_spinlock_unlock( &d->lock ); +#endif + + return MT_OK; +} + + + /****************************************************************************** + * Function: + * MOSAL_DPC_init + * + * Description: + * init a MOSAL DPC object + * + * Parameters: + * d - MOSAL DPC object + * func - user DPC; + * data - its data; + * type - type of DPC + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL PASSIVE_LEVEL + * + ******************************************************************************/ + +void MOSAL_DPC_init(MOSAL_DPC_t *d, MOSAL_DPC_func_t func, MT_ulong_ptr_t func_ctx, MOSAL_DPC_type_t type ) +{ + // init MOSAL DPC object + d->func = (void *)func; + d->dpc_ctx.func_ctx = func_ctx; + d->type = type; + MOSAL_spinlock_init( &d->lock ); + +#ifdef SUPPORT_MULTIPLE_CTX + InitializeListHead( &d->ctx_head ); +#endif + + // init OS DPC object with generic DPC instead of user one + KeInitializeDpc( &d->dpc, generic_dpc, (PVOID)d ); +} + + /****************************************************************************** + * Function: + * MOSAL_DPC_schedule + * + * Description: + * schedule user DPC + * + * Parameters: + * d - MOSAL DPC object + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL PASSIVE_LEVEL + * + ******************************************************************************/ + +MT_bool MOSAL_DPC_schedule(MOSAL_DPC_t *d) +{ +#if defined( DPC_IS_DIRECT_CALL ) + /* call user's DPC directly */ + if (d->func) + { + (d->func)(&d->dpc_ctx); + return TRUE; + } + else + { + return FALSE; + } +#else + return KeInsertQueueDpc( &d->dpc, NULL, NULL ); +#endif +} + + /****************************************************************************** + * Function: + * MOSAL_DPC_schedule_ctx + * + * Description: + * schedule user DPC with relaying a context + * + * Parameters: + * d - MOSAL DPC object + * isr_ctx1 - context, relayed by ISR, inserting DPC; + * isr_ctx2 - context, relayed by ISR, inserting DPC; + * + * Returns: + * + * Notes: + * (Windows) Callers of this routine must be running at IRQL PASSIVE_LEVEL + * + ******************************************************************************/ +MT_bool MOSAL_DPC_schedule_ctx(MOSAL_DPC_t *d, void * isr_ctx1, void * isr_ctx2) +{ + return KeInsertQueueDpc( &d->dpc, isr_ctx1, isr_ctx2 ); +} + +//////////////////////////////////////////////////////////////////////////////// +// Timer functions +//////////////////////////////////////////////////////////////////////////////// + + /****************************************************************************** + * Function: + * MOSAL_timer_init + * + * Description: + * init a MOSAL DPC object + * + * Parameters: + * d - MOSAL timer object + * func - user DPC; + * data - its data; + * + * Returns: + * + * Notes: + * 1. Callers of this routine must be running at IRQL PASSIVE_LEVEL; + * 2. Every timer must use its own MOSAL timer object ! + * 3. Different MOSAL timer objects may use the same DPC function; + * + ******************************************************************************/ + +__INLINE__ void MOSAL_timer_init(MOSAL_timer_t *t) +{ + // init MOSAL DPC object + MOSAL_DPC_init( &t->mdpc, NULL, (MT_ulong_ptr_t)NULL, MOSAL_NO_CTX); + KeInitializeTimer( &t->timer ); +} + + /****************************************************************************** + * Function: + * MOSAL_timer_add + * + * Description: + * start timer + * + * Parameters: + * d - MOSAL timer object + * func - user DPC; + * data - its data; + * usecs - interval; 'func' will be called in 'usecs' microseconds + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL <= DISPATCH_LEVEL + * + ******************************************************************************/ + +__INLINE__ void MOSAL_timer_add(MOSAL_timer_t *t, MOSAL_DPC_func_t func, MT_ulong_ptr_t data, long usecs) +{ + // recalculate timeout value (it is in usecs) + // our tick is a 100 nanosec. Negative value means time, relative to the current + LARGE_INTEGER DueTime; + DueTime.QuadPart = ((int64_t)usecs * (-10)); + + // update the DPC object + MOSAL_spinlock_irq_lock( &t->mdpc.lock ); + t->mdpc.func = func; + t->mdpc.dpc_ctx.func_ctx = data; + MOSAL_spinlock_unlock( &t->mdpc.lock ); + + // start timer + KeSetTimer( &t->timer, DueTime, &t->mdpc.dpc ); +} + + /****************************************************************************** + * Function: + * MOSAL_timer_del + * + * Description: + * delete timer + * + * Parameters: + * d - MOSAL timer object + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL <= DISPATCH_LEVEL + * + ******************************************************************************/ + +__INLINE__ void MOSAL_timer_del(MOSAL_timer_t *t) +{ + KeCancelTimer( &t->timer ); +} + + /****************************************************************************** + * Function: + * MOSAL_timer_mod + * + * Description: + * stop the running timer and restart it in 'usecs' microseconds + * + * Parameters: + * d - MOSAL timer object + * usecs - interval; 'func' will be called in 'usecs' microseconds + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL <= DISPATCH_LEVEL + * + ******************************************************************************/ + +__INLINE__ void MOSAL_timer_mod(MOSAL_timer_t *t, long usecs) +{ + // recalculate timeout value (it is in usecs) + // our tick is a 100 nanosec. Negative value means time, relative to the current + LARGE_INTEGER DueTime; + DueTime.QuadPart = ((int64_t)usecs * (-10)); + + // start timer + KeSetTimer( &t->timer, DueTime, &t->mdpc.dpc ); +} + +//////////////////////////////////////////////////////////////////////////////// +// Time functions +//////////////////////////////////////////////////////////////////////////////// + + + /****************************************************************************** + * Function: + * MOSAL_time_get_clock + * + * Description: + * get current system clock (in microseconds) + * + * Parameters: + * ts(OUT) - pointer to a structure, describing the time + * + * Returns: + * + * Notes: + * Callers of this routine must be running at IRQL <= DISPATCH_LEVEL + * + ******************************************************************************/ +void MOSAL_time_get_clock(MOSAL_timespec_t *ts) +{ + // get abs time in ticks (100 nanosec units) + LARGE_INTEGER AbsTime; + KeQuerySystemTime( &AbsTime ); + + // convert to MOSAL_timespec_t + ts->tv_sec = (ULONG)(AbsTime.QuadPart / 10000000); + ts->tv_nsec = (ULONG)(AbsTime.QuadPart % 10000000); +} + +u_int32_t MOSAL_get_cnt() +{ + static LONG cnt = 0; + return (u_int32_t)InterlockedIncrement(&cnt); +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_imp.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_imp.h new file mode 100644 index 00000000..549e4cc1 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_imp.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_TIMER_IMP_H +#define H_MOSAL_TIMER_IMP_H + +/* IRQ back compatibility */ +#ifndef IRQ_HANDLED + typedef void irqreturn_t; + #define IRQ_HANDLED +#endif + +typedef u_int32_t MOSAL_IRQ_ID_t; +typedef void MOSAL_intr_regs_t; +typedef BOOLEAN (*intr_handler_t)(int irq, void *dev_id, MOSAL_intr_regs_t* regs_p); +/* user ISR function prototype */ +typedef BOOLEAN (*MOSAL_ISR_func_t)(MT_ulong_ptr_t func_ctx, void * isr_ctx1, void * isr_ctx2 ); + +#ifdef MT_KERNEL + +#include "mosal_sync_imp.h" + +/* ISRs */ + + +struct MOSAL_ISR { + MOSAL_ISR_func_t func; + MOSAL_IRQ_ID_t irq; + char * name; + MT_ulong_ptr_t ctx; + void * isr_ctx1; + void * isr_ctx2; + PKINTERRUPT int_object_p; +}; + + +/* DPCs */ + +/* the structure contains the context, relayed to user DPC */ + +struct DPC_CONTEXT { + MT_ulong_ptr_t func_ctx; /* DPC ("static") context */ + void * isr_ctx1; /* "dynamic" context, relayed by ISR */ + void * isr_ctx2; /* "dynamic" context, relayed by ISR */ +}; + + +// user DPC function prototype +typedef void (*MOSAL_DPC_func_t)(struct DPC_CONTEXT * func_ctx); + + +/* linked DPC context */ +struct LDPC_CONTEXT { + LIST_ENTRY link; /* must be the first ! */ + struct DPC_CONTEXT dpc_ctx; /* user DPC context */ +}; + +typedef struct LDPC_CONTEXT LDPC_CONTEXT_t; + +/* MOSAL DPC object */ +struct MOSAL_dev; + +/* types of DPC (if doesn't need to relay info from ISR to DPC - use MOSAL_NO_CTX value) */ +typedef enum { MOSAL_NO_CTX, MOSAL_SINGLE_CTX, MOSAL_MULTIPLE_CTX } MOSAL_DPC_type_t; + +struct MOSAL_DPC { + KDPC dpc; /* OS DPC object */ + MOSAL_DPC_func_t func; /* user DPC to be called */ + struct DPC_CONTEXT dpc_ctx; /* user DPC context */ + MOSAL_DPC_type_t type; /* type of DPC */ + MOSAL_spinlock_t lock; /* spinlock */ + +#ifdef SUPPORT_MULTIPLE_CTX + LIST_ENTRY ctx_head; /* queue of requests */ + LDPC_CONTEXT_t * ldpc_ctx_p; /* returned value from a synchronized routine */ + struct MOSAL_dev * isr_ctx; /* pointer to MOSAL device */ +#endif +}; + +/* Macros */ +#define MOSAL_DPC_enable(MOSAL_DPC_p) +#define MOSAL_DPC_disable(MOSAL_DPC_p) + + +/* TIMERs */ + + +/* MOSAL timer object */ +struct MOSAL_timer { + KTIMER timer; /* OS DPC object */ + struct MOSAL_DPC mdpc; /* MOSAL DPC object */ +}; + +u_int32_t MOSAL_get_cnt(); + +#endif /* __KERNEL__ */ + +#endif /* H_MOSAL_TIMER_IMP_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_priv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_priv.h new file mode 100644 index 00000000..0ccf4bfe --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_timer_priv.h @@ -0,0 +1,69 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_TIMER_PRIV_H +#define H_MOSAL_TIMER_PRIV_H + +#include "mosal_timer_imp.h" + +#ifdef __KERNEL__ + + +/* INTERRUPTS */ + + + + +/****************************************************************************** + * Function: general_isr + * + * Description: device ISR handler + * + * Parameters: + * pi_pIntObject........... Interrupt Object + * pi_pContext............. My device context + * + * Returns: + * + *****************************************************************************/ +BOOLEAN +general_isr( + PKINTERRUPT pi_pIntObject, + PVOID pi_pContext + ); + + +void deinit_dpc(); +call_result_t init_dpc(); + +#endif /* __KERNEL__ */ + +#endif /* H_MOSAL_TIMER_PRIV_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_types.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_types.h new file mode 100644 index 00000000..74e0c496 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_types.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_TYPES_H +#define H_MOSAL_TYPES_H + +/* TAVOR */ +#define MOSAL_MAXDEV 32 +#define MOSAL_MAXNAME 32 + +#define MOSAL_EXPECT_TRUE(cond) (cond) +#define MOSAL_EXPECT_FALSE(cond) (cond) + + +typedef HANDLE MOSAL_shmid_t; + +#endif + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.c new file mode 100644 index 00000000..2f392e41 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.c @@ -0,0 +1,426 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" + + + +/* device DB */ +extern MOSAL_dev_t MOSAL_dev_db[MOSAL_MAXDEV]; + +extern KIRQL cur_max_irql; + +/****************************************************************************** + * Function: + * find_device_by_name + * + * Description: + * find device entry in MOSAL device DB by device name + * + * Parameters: + * name - device name + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +MOSAL_dev_t *find_device_by_name( char *name) +{ + int i; + + /* find place for new device */ + for (i=0; i= (pa + bsize))) + return &MOSAL_dev_db[i]; + // check UAR + dpa = (MT_phys_addr_t)MOSAL_dev_db[i].m_Uar.m_MemPhysAddr.QuadPart; + dbsize = (MT_size_t)MOSAL_dev_db[i].m_Uar.m_ulMemSize; + if ((dpa <= pa) && ((dpa + dbsize) >= (pa + bsize))) + return &MOSAL_dev_db[i]; + // check CR + dpa = (MT_phys_addr_t)MOSAL_dev_db[i].m_Cr.m_MemPhysAddr.QuadPart; + dbsize = (MT_size_t)MOSAL_dev_db[i].m_Cr.m_ulMemSize; + if ((dpa <= pa) && ((dpa + dbsize) >= (pa + bsize))) + return &MOSAL_dev_db[i]; + } + return NULL; +} + +/****************************************************************************** + * Function: + * SendAwaitIrpCompletion + * + * Description: + * IRP completion routine + * + * Parameters: + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +NTSTATUS +SendAwaitIrpCompletion ( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp, + IN PVOID Context + ) +{ + UNREFERENCED_PARAMETER (DeviceObject); + KeSetEvent ((PKEVENT) Context, IO_NO_INCREMENT, FALSE); + return STATUS_MORE_PROCESSING_REQUIRED; // Keep this IRP +} + +/****************************************************************************** + * Function: + * SendAwaitIrp + * + * Description: + * Create and send IRP stack down the stack and wait for the response ( +Blocking Mode) + * + * Parameters: + * pi_pDeviceExt.......... ointer to USB device extension + * pi_MajorCode........... IRP major code + * pi_MinorCode........... IRP minor code + * pi_pBuffer............. parameter buffer + * pi_nSize............... size of the buffer + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +NTSTATUS +SendAwaitIrp( + IN PDEVICE_OBJECT pi_pFdo, + IN PDEVICE_OBJECT pi_pLdo, + IN ULONG pi_MajorCode, + IN ULONG pi_MinorCode, + IN PVOID pi_pBuffer, + IN int pi_nSize + ) +/*++ + + Routine Description: + + Create and send IRP stack down the stack and wait for the response ( +Blocking Mode) + + Arguments: + + pi_pFdo................ our device + pi_pLdo................ lower device + pi_MajorCode........... IRP major code + pi_MinorCode........... IRP minor code + pi_pBuffer............. parameter buffer + pi_nSize............... size of the buffer + + Returns: + + standard NTSTATUS return codes. + + Notes: + +--*/ +{ /* SendAwaitIrp */ + // Event + KEVENT l_hEvent; + // Pointer to IRP + PIRP l_pIrp; + // Stack location + PIO_STACK_LOCATION l_pStackLocation; + // Returned status + NTSTATUS l_Status; + + // call validation + if(KeGetCurrentIrql() != PASSIVE_LEVEL) + return STATUS_SUCCESS; + + // create event + KeInitializeEvent(&l_hEvent, NotificationEvent, FALSE); + + // build IRP request to USBD driver + l_pIrp = IoAllocateIrp( pi_pFdo->StackSize, FALSE ); + + // validate request + if (!l_pIrp) + { + //MdKdPrint( DBGLVL_MAXIMUM, ("(SendAwaitIrp) Unable to allocate IRP !\n")); + return STATUS_INSUFFICIENT_RESOURCES; + } + + // fill IRP + l_pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + // set completion routine + IoSetCompletionRoutine(l_pIrp,SendAwaitIrpCompletion, &l_hEvent, TRUE, +TRUE, TRUE); + + // fill stack location + l_pStackLocation = IoGetNextIrpStackLocation(l_pIrp); + l_pStackLocation->MajorFunction= (UCHAR)pi_MajorCode; + l_pStackLocation->MinorFunction= (UCHAR)pi_MinorCode; + RtlCopyMemory( &l_pStackLocation->Parameters, pi_pBuffer, pi_nSize ); + + // Call lower driver perform request + l_Status = IoCallDriver( pi_pLdo, l_pIrp ); + + // if the request not performed --> wait + if (l_Status == STATUS_PENDING) + { + // Wait until the IRP will be complete + KeWaitForSingleObject( + &l_hEvent, // event to wait for + Executive, // thread type (to wait into its context) + KernelMode, // mode of work + FALSE, // alertable + NULL // timeout + ); + l_Status = l_pIrp->IoStatus.Status; + } + + IoFreeIrp(l_pIrp); + + return l_Status; + +} /* SendAwaitIrp */ + +/****************************************************************************** + * Function: + * ReadWritePciConfig + * + * Description: + * Create and send IRP stack down the stack and wait for the response ( +Blocking Mode) + * + * Parameters: + * pi_pDeviceExt.......... ointer to USB device extension + * pi_MajorCode........... IRP major code + * pi_MinorCode........... IRP minor code + * pi_pBuffer............. parameter buffer + * pi_nSize............... size of the buffer + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +NTSTATUS +ReadWritePciConfig( + IN MOSAL_dev_handle_t pi_pDev, + IN PVOID pi_pDataBuffer, + IN ULONG pi_nPciSpaceOffset, + IN ULONG pi_nDataLength, + IN BOOLEAN pi_fReadConfig + ) + +{ /* ReadWritePciConfig */ + + // parameter buffer for the request + ReadWriteConfig_t l_RwParams; + + // parameter validation + //MDASSERT(pi_pDataBuffer); + //MDASSERT(pi_nDataLength); + + // fill request parameters + l_RwParams.Buffer = pi_pDataBuffer; + l_RwParams.Length = pi_nDataLength; + l_RwParams.Offset = pi_nPciSpaceOffset; + l_RwParams.WhichSpace = PCI_WHICHSPACE_CONFIG; + + return SendAwaitIrp( pi_pDev->fdo_p, pi_pDev->ldo_p, IRP_MJ_PNP, + pi_fReadConfig ? IRP_MN_READ_CONFIG : IRP_MN_WRITE_CONFIG, &l_RwParams, +sizeof(ReadWriteConfig_t)); + +} /* ReadWritePciConfig */ + +/****************************************************************************** + * Function: MOSAL_add_device + * + * Description: add OS- and Driver-specific parameters of the device + * + * Parameters: + * + * Returns: + * MT_OK - success + * MT_EBUSY - Device already exists + * + *****************************************************************************/ +call_result_t MOSAL_add_device( + MOSAL_dev_handle_t * dev_pp, /* returned handle */ + MOSAL_dev_t * parm_p /* entry, filled */ + ) +{ + MOSAL_dev_t * dev_p; + int i; + + *dev_pp = NULL; + for (i=0; iname)) + { + /* Device already exists !!! */ + return MT_EBUSY; + } + } + + for (i=0; istate = MW_BUSY; + KeInitializeSpinLock( &dev_p->isr_lock ); + *dev_pp = dev_p; + /* calculate current max irql */ + if (cur_max_irql < dev_p->irql) + cur_max_irql = dev_p->irql; + return MT_OK; + } + } + return MT_ENORSC; +} + +/****************************************************************************** + * Function: MOSAL_remove_device + * + * Description: add OS- and Driver-specific parameters of the device + * + * Parameters: + * + * Returns: + * MT_OK - success + * MT_EBUSY - Device already exists + * + *****************************************************************************/ +call_result_t MOSAL_remove_device(MOSAL_dev_handle_t dev_p) +{ + int i; + + if (dev_p == NULL) + return MT_ERROR; + + /* recalculate current max irql */ + cur_max_irql = DISPATCH_LEVEL; + dev_p->irq_num = 0; + for (i=0; iirql) + cur_max_irql = dev_p->irql; + } + } + /* disconnect interrupt */ + if (dev_p->int_object_p != NULL) + IoDisconnectInterrupt( dev_p->int_object_p ); + memset( dev_p, 0, sizeof(MOSAL_dev_t) ); + dev_p->state = MW_FREE; + return MT_OK; +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.h new file mode 100644 index 00000000..234b3c45 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosal_util.h @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSAL_UTIL_H +#define H_MOSAL_UTIL_H + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * + * Structures + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + +typedef enum {MW_FREE, MW_BUSY} mw_dev_state_t; + +#ifdef __KERNEL__ +/* taken from wdm.h */ +typedef struct { + ULONG WhichSpace; + PVOID Buffer; + ULONG Offset; + ULONG POINTER_ALIGNMENT Length; +} ReadWriteConfig_t, *PReadWriteConfig_t; + + +/* device configuration and other specific information */ + +typedef struct MD_BAR_S { + PHYSICAL_ADDRESS m_MemPhysAddr; + ULONG m_ulMemSize; + USHORT m_usMemFlags; + PUCHAR m_pKernelAddr; + ULONG m_ulKernelSize; + ULONG m_ulKernelOffset; +} MD_BAR_T, *PMD_BAR_T; + + + +/* OS- and Driver-specific device parameters */ +typedef struct MOSAL_dev { + mw_dev_state_t state; /* Device status flag */ + char name[MOSAL_MAXNAME]; /* Device name */ + PKINTERRUPT int_object_p; /* NT interrupt object */ + MOSAL_ISR_func_t thandler; /* device interrupt handler */ + intr_handler_t ghandler; /* device interrupt handler */ + void * dev_id; /* interrupt handler context */ + MOSAL_IRQ_ID_t irq_num; /* IRQ vector number */ + KIRQL irql; /* interrupt level */ + KAFFINITY affinity; /* processor affinity */ + BOOLEAN int_shared; /* whether interrupt is shared */ + KINTERRUPT_MODE int_mode; /* interrupt mode */ + PDEVICE_OBJECT fdo_p; /* functional device object */ + PDEVICE_OBJECT ldo_p; /* lower device object */ + u_int8_t bus; /* PCI bus number of the IB card */ + u_int8_t dev_func; /* device/function of the IB card */ + KSPIN_LOCK isr_lock; /* lock for the ISR */ + MD_BAR_T m_Cr; /* CR space */ + MD_BAR_T m_Uar; /* UAR-space region */ + MD_BAR_T m_Ddr; /* DDR-space region */ + void * drv_helper; /* driver function for perfrming tings, that MOSAL can't do */ + void * drv_helper_ctx; /* it's context */ +} MOSAL_dev_t; + +typedef MOSAL_dev_t * MOSAL_dev_handle_t; + + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * +* * * * * * * * * + * + * Helpers + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * +* * * * * * * * */ + +/****************************************************************************** + * Function: + * find_device_by_name + * + * Description: + * find device entry in MOSAL device DB by device name + * + * Parameters: + * name - device name + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +MOSAL_dev_t *find_device_by_name( char *name); + +/****************************************************************************** + * Function: + * find_device_by_location + * + * Description: + * find device entry in MOSAL device DB by device location + * + * Parameters: + * name - device name + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +MOSAL_dev_t *find_device_by_location( u_int8_t bus, u_int8_t dev_func ); + +/****************************************************************************** + * Function: + * find_device_by_phys_addr + * + * Description: + * find device entry in MOSAL device DB by physical address + * + * Parameters: + * pa - phys address + * bsize - region size + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +MOSAL_dev_t *find_device_by_phys_addr( MT_phys_addr_t pa, MT_size_t bsize ); + +/****************************************************************************** + * Function: + * SendAwaitIrpCompletion + * + * Description: + * IRP completion routine + * + * Parameters: + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +NTSTATUS +SendAwaitIrpCompletion ( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp, + IN PVOID Context + ); + +/****************************************************************************** + * Function: + * SendAwaitIrp + * + * Description: + * Create and send IRP stack down the stack and wait for the response ( +Blocking Mode) + * + * Parameters: + * pi_pDeviceExt.......... ointer to USB device extension + * pi_MajorCode........... IRP major code + * pi_MinorCode........... IRP minor code + * pi_pBuffer............. parameter buffer + * pi_nSize............... size of the buffer + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +NTSTATUS +SendAwaitIrp( + IN PDEVICE_OBJECT pi_pFdo, + IN PDEVICE_OBJECT pi_pLdo, + IN ULONG pi_MajorCode, + IN ULONG pi_MinorCode, + IN PVOID pi_pBuffer, + IN int pi_nSize + ); + +/****************************************************************************** + * Function: + * ReadWritePciConfig + * + * Description: + * Create and send IRP stack down the stack and wait for the response ( +Blocking Mode) + * + * Parameters: + * pi_pDeviceExt.......... ointer to USB device extension + * pi_MajorCode........... IRP major code + * pi_MinorCode........... IRP minor code + * pi_pBuffer............. parameter buffer + * pi_nSize............... size of the buffer + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + +******************************************************************************/ +NTSTATUS +ReadWritePciConfig( + IN MOSAL_dev_handle_t pi_pDev, + IN PVOID pi_pDataBuffer, + IN ULONG pi_nPciSpaceOffset, + IN ULONG pi_nDataLength, + IN BOOLEAN pi_fReadConfig + ); + + +/****************************************************************************** + * Function: MOSAL_add_device + * + * Description: add OS- and Driver-specific parameters of the device + * + * Parameters: + * + * Returns: + * MT_OK - success + * MT_EBUSY - Device already exists + * + *****************************************************************************/ +call_result_t MOSAL_add_device( + MOSAL_dev_handle_t * dev_pp, /* returned handle */ + MOSAL_dev_t * parm_p /* entry, filled */ + ); + +/****************************************************************************** + * Function: MOSAL_remove_device + * + * Description: add OS- and Driver-specific parameters of the device + * + * Parameters: + * + * Returns: + * MT_OK - success + * MT_EBUSY - Device already exists + * + *****************************************************************************/ +call_result_t MOSAL_remove_device(MOSAL_dev_t* dev_p); +#endif +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_driver.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_driver.c new file mode 100644 index 00000000..71d842ae --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_driver.c @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if WINVER >= 0x0502 +#include +#endif +#include +#include + +#ifdef MT_KERNEL +#if 0 +BOOL mosal_main( HANDLE hModule, + DWORD ul_reason_for_call, + LPVOID lpReserved + ) +{ + /* The status to return */ + BOOL l_fRetCode = TRUE; + /* socket dll version */ + WORD wVersionRequested; + /* socket dll information */ + WSADATA WsaData; + /* return code */ + int rc; + /* print buffer */ + char buf[360]; + + switch (ul_reason_for_call) + { + case DLL_PROCESS_ATTACH: + { /* DLL_PROCESS_ATTACH */ + + sprintf( buf, "MOSAL:DllMain: >> DLL_PROCESS_ATTACH: Pid 0x%x\n", + GetCurrentProcessId() ); + //OutputDebugString(buf); + + /* start sockets */ + wVersionRequested = MAKEWORD( 2, 0 ); + rc = WSAStartup( wVersionRequested, &WsaData ); + if (rc != 0) { + sprintf( buf, "Error 0x%x in call to WSAStartup()\r\n", rc ); + OutputDebugString(buf); + l_fRetCode = FALSE; + break; + } + else { + sprintf( buf, "MOSAL:DllMain: Ver 0x%x, MaxSock %d, Descr '%s'\n", + WsaData.wHighVersion, + WsaData.iMaxSockets, + WsaData.szDescription + ); + //OutputDebugString(buf); + } + + }/* DLL_PROCESS_ATTACH */ + break; + + case DLL_PROCESS_DETACH: + { /* DLL_PROCESS_DETACH */ + + /* end up with sockets */ + WSACleanup(); + + } /* DLL_PROCESS_DETACH */ + break; + + case DLL_THREAD_ATTACH: + break; + + + case DLL_THREAD_DETACH: + break; + } + return l_fRetCode; +} +#endif + +unsigned long MOSAL_getpid() +{ + return (unsigned long)GetCurrentProcessId(); +} +#endif // MT_KERNEL diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.c new file mode 100644 index 00000000..f64ac028 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.c @@ -0,0 +1,272 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#define C_K2U_CBK_U_C + +#include +#include "mosal_priv.h" +#include "mosalu_k2u_cbk.h" +#include + +static pcs_cbk_t s_pcs_cbk = { 0, 0, 0, 0, 0, 0, NULL }; +static void k2u_cbk_thread_cleanup(void); + + +/* THREAD ENTRY POINT */ +DWORD WINAPI start_thread( + pcs_cbk_p_t pi_pCbk // pointer to callback info +) +{ + k2u_cbk_id_t id; + u_int8_t data[MAX_CBK_DATA_SZ]; + MT_size_t size; + call_result_t rc; + + /* Make initialization of kernel resources from this thread to allow owner validation in kernel */ + if ((rc= k2u_cbk_init(&pi_pCbk->this_proc_cbk_hndl)) != MT_OK) { + MTL_ERROR1("%s: failed initializing kernel stub (%s).\n", __func__,mtl_strerror(rc)); + return 0; + } + + /* release main thread */ + SetEvent( pi_pCbk->event ); + + /* poll private message queue until MT_EAGAIN (empty) */ + while (1) { + rc= k2u_cbk_pollq(pi_pCbk->this_proc_cbk_hndl,&id,data,&size); + if (rc == MT_EINTR) { + MTL_DEBUG2(MT_FLFMT("k2u_cbk_pollq returned with MT_EINTR")); + continue; /* poll interrupted (signal pending...) */ + } + if (rc != MT_OK) break; + if (id == K2U_CBK_CLEANUP_CBK_ID) break; + if (MOSAL_mutex_acq(&pi_pCbk->mutex, TRUE) != MT_OK) break; + if ((id < MAX_CBK) && (pi_pCbk->cbks[id] != NULL)) { /* found matching callback */ + pi_pCbk->cbks[id](id,data,size); /* invoke callback */ + } + MOSAL_mutex_rel(&pi_pCbk->mutex); + } + if (id != K2U_CBK_CLEANUP_CBK_ID) { + MTL_ERROR1(MT_FLFMT("k2u_cbk_pollq returned error (%s). k2u_cbk will be disabled."), + mtl_strerror_sym(rc)); + } + return 0; +} + +call_result_t k2u_cbk_register(k2u_cbk_t cbk, k2u_cbk_hndl_t *cbk_hndl_p, k2u_cbk_id_t *cbk_id_p) +{ + k2u_cbk_id_t id; + pcs_cbk_p_t l_pCbk = &s_pcs_cbk; + DWORD status; + HANDLE wait_list[3]; + MT_bool first_cbk; + + if (cbk == NULL) { + MTL_ERROR2("%s: NULL callback.\n", __func__); + return MT_EINVAL; + } + + if (l_pCbk->num_o_cbks == 0) { /* first callback */ + MOSAL_mutex_init(&l_pCbk->mutex); + } + + if (MOSAL_mutex_acq(&l_pCbk->mutex, TRUE) != MT_OK) { + return MT_EAGAIN; + } + + /* Find free cbk id */ + if (l_pCbk->num_o_cbks == MAX_CBK) { + MTL_ERROR2("%s: All callback resources are used (%d).\n", __func__,MAX_CBK); + MOSAL_mutex_rel(&l_pCbk->mutex); + return MT_OK; + } + + /* From this point we are sure that we will find at least one free id */ + for (id= 0; id < MAX_CBK; id++) { + if (l_pCbk->cbks[id] == NULL) break; + } /* for */ + + #ifdef MAX_DEBUG + if (id == MAX_CBK) { + MTL_ERROR1("%s: No free callback entry was found !\n", __func__); + MOSAL_mutex_rel(&l_pCbk->mutex); + return MT_OK; + } + #endif + + l_pCbk->num_o_cbks++; + + l_pCbk->cbks[id]= cbk; /* Assure assignment is done only when data is valid*/ + first_cbk= (l_pCbk->num_o_cbks == 1); + /* block additional allocations while setting up polling thread (with mutex unlocked) */ + if (first_cbk) l_pCbk->num_o_cbks= MAX_CBK; + MOSAL_mutex_rel(&l_pCbk->mutex); + + if (first_cbk) { /* first callback */ + + /* init data */ + l_pCbk->event = CreateEvent( NULL, FALSE, FALSE, NULL ); + if ( l_pCbk->event == NULL ) { + MTL_ERROR1( "k2u_cbk_register: Cannot create event (0x%x)\n", GetLastError()); + goto event_fail; + } + + /* Wait for old thread to complete cleanup */ + WaitForSingleObject( l_pCbk->threadHandle, INFINITE ); + CloseHandle( l_pCbk->threadHandle ); + l_pCbk->threadHandle = NULL; + + /* run helper thread to poll the callbacks */ + l_pCbk->threadHandle = CreateThread( + NULL, // pointer to security attributes + 0, // initial thread stack size + start_thread, // pointer to thread function + l_pCbk, // argument for new thread + 0, // creation flags: run immediately + &l_pCbk->threadId // pointer to receive thread ID + ); + + if (l_pCbk->threadHandle == NULL) { + MTL_ERROR1( "k2u_cbk_register: Cannot create thread for k2u_cbk_t\n"); + goto thread_fail; + } + + /* wait thread to start or exit */ + wait_list[0] = l_pCbk->threadHandle; + wait_list[1] = l_pCbk->event; + status = WaitForMultipleObjects( 2, wait_list, FALSE, INFINITE ); + if (status == WAIT_OBJECT_0) { /* thread exited */ + MTL_ERROR1( "k2u_cbk_register: Polling thread failed to start\n"); + goto thread_exit; + } + l_pCbk->num_o_cbks = 1; + + /* register cleanup function */ + mtib_RegisterCF( k2u_cbk_thread_cleanup ); + } + + *cbk_id_p= id; + *cbk_hndl_p= l_pCbk->this_proc_cbk_hndl; + return MT_OK; + +thread_exit: + CloseHandle( l_pCbk->threadHandle ); + l_pCbk->threadHandle = NULL; +thread_fail: + l_pCbk->cbks[id]= NULL; + l_pCbk->num_o_cbks= 0; +event_fail: + return MT_EAGAIN; +} + +call_result_t k2u_cbk_deregister(k2u_cbk_id_t cbk_id) +{ + pcs_cbk_p_t l_pCbk = &s_pcs_cbk; + + if (MOSAL_mutex_acq(&l_pCbk->mutex, TRUE) != MT_OK) { + return MT_EAGAIN; + } + + if ((cbk_id > MAX_CBK) || (l_pCbk->cbks[cbk_id] == NULL)) { + MOSAL_mutex_rel(&l_pCbk->mutex); + MTL_ERROR2( "k2u_cbk_deregister: Invalid callback ID (%d).\n",cbk_id); + return MT_EINVAL; + } + + /* Remove from table before freeing (avoid inconsistant data while getting signal) */ + l_pCbk->cbks[cbk_id] = NULL; + l_pCbk->num_o_cbks--; + if (l_pCbk->num_o_cbks == 0) { /* no more callbacks - restore old handler */ + k2u_cbk_cleanup(l_pCbk->this_proc_cbk_hndl); /* free kernel resources */ + MTL_DEBUG4( "k2u_cbk_deregister: cleaned up this process cbk resources.\n"); + } + + MOSAL_mutex_rel(&l_pCbk->mutex); + return MT_OK; +} + +static void k2u_cbk_thread_cleanup(void) +{ + unsigned int i; + pcs_cbk_p_t l_pCbk = &s_pcs_cbk; + MTL_DEBUG1( "k2u_cbk_thread_cleanup: \n"); + + if (l_pCbk->num_o_cbks != 0) { + for (i=0; inum_o_cbks; i++) + k2u_cbk_deregister(i); + } +} + +/* + * MANUAL WRAPPER + */ + +#include + +static UDLL_THREAD_EXEC_FUNC_T perform_ioctl; +static first_time = 0; + +int k2u_ioctl_wrapper(int ops, void *pi, u_int32_t pi_sz, void *po, u_int32_t po_sz) +{ + if (!first_time) + { /* the tx library is not open yet */ + + // register DLL + if( mtib_RegisterWL(&perform_ioctl) ) + return MT_ENOMOD; + + // mark successful open + first_time = 1; + + } /* the tx library is not open yet */ + + return perform_ioctl( (int)ops, pi, pi_sz, po, po_sz); +} + +call_result_t k2u_cbk_init(k2u_cbk_hndl_t *k2u_cbk_h_p) +{ + int ioctl_rc; + + ioctl_rc = k2u_ioctl_wrapper(K2U_CBK_CBK_INIT, NULL, 0, k2u_cbk_h_p, sizeof(k2u_cbk_hndl_t)); + + return ioctl_rc ? MT_ERROR : MT_OK; +} + + +call_result_t k2u_cbk_cleanup(k2u_cbk_hndl_t k2u_cbk_h) +{ + int ioctl_rc; + + ioctl_rc = k2u_ioctl_wrapper(K2U_CBK_CBK_CLEANUP, (void*)&k2u_cbk_h, sizeof(k2u_cbk_hndl_t), NULL, 0); + return ioctl_rc ? MT_ERROR : MT_OK; +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.h new file mode 100644 index 00000000..a6ce6ca0 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_k2u_cbk.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MOSALU_K2U_CBK_H +#define H_MOSALU_K2U_CBK_H + +#include "mosal_priv.h" + +/* Maximum callbacks per process */ +#define MAX_CBK 32 + +typedef struct { + k2u_cbk_hndl_t this_proc_cbk_hndl; /* Init when registering first callback */ + u_int32_t num_o_cbks; /* current number of callbacks */ + DWORD threadId; /* thread ID: for any case */ + HANDLE threadHandle; /* thread handle */ + HANDLE event; /* "thread started" event */ + MOSAL_mutex_t mutex; /* mutex, protecting the structure */ + k2u_cbk_t cbks[MAX_CBK]; /* callbacks (index=id) */ +} pcs_cbk_t, *pcs_cbk_p_t; + + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_mem.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_mem.c new file mode 100644 index 00000000..ef9562f7 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_mem.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" + +#if 0 + +void MOSAL_user_lib_init(void) { } + +/* + * Map io memory to user address space + * + */ +MT_virt_addr_t MOSAL_io_remap(MT_phys_addr_t pa, MT_size_t size) +{ + MT_virt_addr_t bp; + + /* ??? How we do it in Windows */ + #if 0 + if(getuid() != 0){ + MTL_ERROR1("MOSAL_io_remap: Only root can do that\n"); + return(0); + } + #endif + + /* Check for alignments */ + if(size < MIN_PAGE_SZ_ALIGN(size) || pa < MIN_PAGE_SZ_ALIGN(pa)){ + MTL_ERROR1("MOSAL_io_remap: Physical address or size not aligned to min. page size\n"); + return(0); + } + +#ifdef STRICT_ALIGNEMENT + + if(pa < ((pa + (size - 1)) & (~((~0UL + size))))) { + MTL_ERROR1("MOSAL_io_remap: Physical address must be sized aligned\n"); + return(0); + } + +#endif /* STRICT ALIGNEMENT */ + + bp = MOSAL_io_remap_for_user(pa, size); + + if(bp == (MT_virt_addr_t)0) { + MTL_ERROR('1', "MOSAL_io_remap: Failed to mmap file\n"); + return(0); + } + + return (bp); +} + +void MOSAL_io_unmap(MT_virt_addr_t va) +{ + MTL_ERROR1("MOSAL_io_unmap: Not supported yet !!! \n"); +} + +void MOSAL_io_release(MT_phys_addr_t pa) +{ + MOSAL_io_release_for_user(pa); +} + +MT_virt_addr_t MOSAL_phys_ctg_get(MT_size_t size) +{ + return MOSAL_phys_ctg_get_for_user(size); +} + +call_result_t MOSAL_phys_ctg_free(MT_virt_addr_t addr, MT_size_t size) +{ + MOSAL_phys_ctg_free_for_user(addr); + return 0; +} + +call_result_t MOSAL_shmget(MOSAL_shmem_key_t key, + MT_size_t size, + u_int32_t flags, + MOSAL_shmid_t * id_p) +{ + DWORD ret; + DWORD win_flags=0; + u_int64_t size64 = size; + DWORD dwMaximumSizeHigh = (DWORD)(size64 >> 32); + DWORD dwMaximumSizeLow = (DWORD)(size64 & 0xffffffff); + + if (flags & MOSAL_SHM_READONLY) { + win_flags = PAGE_READONLY; //ReadOnly for everybody + } else { + win_flags = PAGE_READWRITE; //ReadWrite for everybody + } + + //If it should be created + if (flags & MOSAL_SHM_CREATE) { + *id_p = CreateFileMapping(INVALID_HANDLE_VALUE, NULL, win_flags, dwMaximumSizeHigh, dwMaximumSizeLow, key); + } else { + //If it should be opened + if (flags & MOSAL_SHM_READONLY) { + win_flags = FILE_MAP_READ; + } else { + win_flags = FILE_MAP_ALL_ACCESS; + } + + *id_p = OpenFileMapping(win_flags, FALSE , key); + } + + ret = GetLastError(); + + if ( (flags & MOSAL_SHM_EXCL ) && (flags & MOSAL_SHM_CREATE) ) { + if (ret == ERROR_ALREADY_EXISTS) { + CloseHandle(*id_p); + return MT_EBUSY; + } + } + + + if (*id_p == NULL) { + + switch (ret) { + case ERROR_ACCESS_DENIED : return MT_EACCES; + case ERROR_TOO_MANY_OPEN_FILES : + case ERROR_NOT_ENOUGH_MEMORY: return MT_EAGAIN; + default: return MT_EINVAL; + } + } else { + return MT_OK; + } +} + +call_result_t MOSAL_shmat(MOSAL_shmid_t id, int flags, void ** addr_p) +{ + DWORD win_flags=0; + DWORD ret; + + + if (flags & MOSAL_SHM_READONLY) { + win_flags |= FILE_MAP_READ; + } else { + win_flags |= FILE_MAP_ALL_ACCESS; + } + + *addr_p = MapViewOfFile(id, win_flags, 0, 0, 0); + ret = GetLastError(); + + if (*addr_p == NULL) { + switch (ret) { + case ERROR_ACCESS_DENIED : return MT_EACCES; + case ERROR_TOO_MANY_OPEN_FILES : + case ERROR_NOT_ENOUGH_MEMORY: return MT_EAGAIN; + case ERROR_INVALID_ACCESS: return MT_EINVAL; + default: return MT_ERROR; + } + } else { + return MT_OK; + } +} + +call_result_t MOSAL_shmdt(void * addr) +{ + if (!UnmapViewOfFile(addr)) { + return MT_EINVAL; + } else { + return MT_OK; + } +} + +call_result_t MOSAL_shmrm(MOSAL_shmid_t id) +{ + if (!CloseHandle(id)) { + switch (GetLastError()) { + case ERROR_ACCESS_DENIED : return MT_EACCES; + default: return MT_EINVAL; + } + } else { + return MT_OK; + } +} + +#endif + +/* PAGE_SIZE API */ + +u_int32_t MOSAL_get_sys_page_size(MT_virt_addr_t va) +{ + static page_size = 0; + SYSTEM_INFO lpSystemInfo; + if (!page_size) { + GetSystemInfo( &lpSystemInfo ); + page_size = (u_int32_t)lpSystemInfo.dwPageSize; + } + return page_size; +} + +u_int32_t MOSAL_get_sys_page_shift(MT_virt_addr_t va) +{ + static page_shift = 0; + u_int32_t i, page_size = MOSAL_get_sys_page_size( va ); + if (!page_shift) { + for (i=0; page_size; page_size >>= 1, i++); + page_shift = i - 1; + } + return page_shift; +} + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket.c new file mode 100644 index 00000000..d81c1e30 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket.c @@ -0,0 +1,382 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" + +/****************************************************************************** + * Function + * MOSAL_socket_socket + * + * Description: + * create a socket + * + * Parameters: + * domain (IN) + * type (IN) + * protocol(IN) + * Returns: + * the socket on success, otherwise -1 + ******************************************************************************/ +int MOSAL_socket_socket(MOSAL_socket_domain_t domain,MOSAL_socket_type_t type,MOSAL_socket_protocol_t protocol) +{ + SOCKET sd = socket( domain, type, protocol ); + if (sd == INVALID_SOCKET) + return -1; + else + return (int)sd; +} + + +/****************************************************************************** + * Function + * MOSAL_socket_close + * + * Description: + * closes the socket + * + * Parameters: + * sock + * Returns: + * 0 on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_close(int sock) +{ + int rc = closesocket( (SOCKET)sock ); + if (rc == SOCKET_ERROR) + return -1; + else + return 0; +} + + +/****************************************************************************** + * Function + * MOSAL_socket_connect + * + * Description: client + * connect to server + * + * Parameters: + * sock + * adrs(IN) server's adrs details + * len(IN) sizeof struct adrs + * Returns: + * 0 on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_connect(int sock ,const MOSAL_sockaddr_t* adrs, + MOSAL_socklen_t len) +{ + int rc = connect( (SOCKET)sock, (const struct sockaddr FAR *)adrs, len ); + if (rc == SOCKET_ERROR) + return -1; + else + return 0; +} + + +/****************************************************************************** + * Function + * MOSAL_socket_bind + * + * Description: server + * bind the socket to adrs + * + * Parameters: + * sock (IN) + * adrs (IN) server's adrs details + * len (IN) size of struct adrs + * Returns: + * 0 on success, -1 otherwise + * + ******************************************************************************/ +int MOSAL_socket_bind(int sock,const MOSAL_sockaddr_t* adrs, + MOSAL_socklen_t len) +{ + int rc = bind( (SOCKET)sock, (const struct sockaddr FAR *)adrs, len ); + if (rc == SOCKET_ERROR) + return -1; + else + return 0; +} + + +/****************************************************************************** + * Function + * MOSAL_socket_listen + * + * Description: server + * start listening on this socket + * + * Parameters: + * sock(IN) + * n (IN) length of queue of requests + * + * Returns: + * 0 on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_listen(int sock ,int n) +{ + int rc = listen( (SOCKET)sock, n ); + if (rc == SOCKET_ERROR) + return -1; + else + return 0; +} + + +/****************************************************************************** + * Function + * MOSAL_socket_accept + * + * Description: server + * extracts the first connection on the queue of pending connections, creates a new socket with + * the properties of sock, and allocates a new file descriptor. + * the socket + * + * Parameters: + * sock + * client_adrs_p(OUT) adrs of the first connection accepted + * len_p(OUT) sizeof adrs + * + * Returns: + * the new socket on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_accept(int sock,MOSAL_sockaddr_t* client_adrs,MOSAL_socklen_t* len_p) +{ + SOCKET sd = accept( (SOCKET)sock, (struct sockaddr FAR *)client_adrs, (int FAR *)len_p ); + if (sd == INVALID_SOCKET) + return -1; + else + return (int)sd; +} + + +/****************************************************************************** + * Function + * MOSAL_socket_send + * + * Description: + * send len bytes from buffer through socket + * Parameters: + * sock(IN) + * buf + * len - num of bytes to send + * flags + * Returns: returns the number sent or -1 + * + ******************************************************************************/ +int MOSAL_socket_send(int sock,const void* buf,int len,int flags) +{ + int byte_cnt = send( (SOCKET)sock, (const char FAR *)buf, len, 0 ); + if (byte_cnt == SOCKET_ERROR) + return -1; + else + return byte_cnt; +} + + +/****************************************************************************** + * Function + * MOSAL_socket_recv + * + * Description: + * recv len bytes from buffer through socket + * Parameters: + * sock(IN) pointer to MOSAL socket object + * buf + * len - num of bytes to read + * flags + * Returns: returns the number read or -1 + ******************************************************************************/ +int MOSAL_socket_recv(int sock,void* buf,int len,int flags) +{ + int byte_cnt = recv( (SOCKET)sock, (char FAR *)buf, len, 0 ); + if (byte_cnt == SOCKET_ERROR) + return -1; + + if (byte_cnt == 0) + return -1; /* socket closed */ + else + return byte_cnt; +} + + +/****************************************************************************** + * Function + * MOSAL_socket_sendto + * + * Description: + * send N bytes from buf on socket to peer at adrs adrs. + * Parameters: + * sock(IN) - pointer to MOSAL socket object + * buf(IN) + * n(IN) - num of bytes to send + * flags + * adrs + * adrs_len + * + * Returns: returns the number sent or -1 + ******************************************************************************/ +int MOSAL_socket_sendto (int sock,void *buf, int n,int flags, MOSAL_sockaddr_t* adrs, + MOSAL_socklen_t adrs_len) +{ + int byte_cnt = sendto( (SOCKET)sock, (const char FAR *)buf, n, 0, + (const struct sockaddr FAR *)adrs, (int)adrs_len ); + if (byte_cnt == SOCKET_ERROR) + return -1; + + if (byte_cnt == 0) + return -1; /* socket closed */ + else + return byte_cnt; +} + + +/****************************************************************************** + * Function + * MOSAL_socket_recvfrom + * + * Description: + * read N bytes into buf on socket to peer at adrs adrs. + * If ADDR is not NULL, fill in *ADDR_LEN bytes of it with tha address of + * the sender, and store the actual size of the address in *ADDR_LEN. + * + * Parameters: + * sock(IN) pointer to MOSAL socket object + * buf + * n - num of bytes to read + * flags + * adrs + * adrs_len + * + * Returns: returns the number read or -1 + ******************************************************************************/ +int MOSAL_socket_recvfrom (int sock, void *buf, int n, int flags, + MOSAL_sockaddr_t* adrs,MOSAL_socklen_t* adrs_len_p) +{ + int byte_cnt = recvfrom( (SOCKET)sock, (char FAR*)buf, n, 0, + (struct sockaddr FAR *)adrs, (int FAR *)adrs_len_p); + if (byte_cnt == SOCKET_ERROR) + return -1; + else + return byte_cnt; +} + +/****************************************************************************** + * Function + * MOSAL_socket_setsockopt + * + * Description: + * set an option on socket or protocol level + * + * Parameters: + * sock(IN) pointer to MOSAL socket object + * level(IN) option level + * optname(IN) option name + * optval(IN) pointer to buffer, containing the option value + * optlen(IN) buffer size + * + * Returns: 0 on success, -1 otherwise + ******************************************************************************/ +int MOSAL_socket_setsockopt(int sock, MOSAL_socket_optlevel_t level, + MOSAL_socket_optname_t optname, const void *optval, int optlen ) +{ + int rc = setsockopt( (SOCKET)sock, (int)level, (int)optname, + (const char FAR *)optval, optlen ); + if (rc == SOCKET_ERROR) + return -1; + else + return 0; +} + +/****************************************************************************** + * Function + * MOSAL_socket_get_last_error + * + * Description: + * get last error on the socket + * + * Parameters: + * + * Returns: the error number + ******************************************************************************/ +int MOSAL_socket_get_last_error(void) +{ + int rc = WSAGetLastError(); + return rc; +} + +#if 0 + +/****************************************************************************** + * Function + * MOSAL_inet_aton + * + * Description: + * convert IP address in decimal notation to 'struct in_addr' + * + * Parameters: + * + * Returns: 0 on error, IP address - otherwise + ******************************************************************************/ +int inet_aton( const char *ip, struct in_addr *adrs ) +{ + ULONG l_ulIpAddr; + struct hostent * l_pHostEntry; + SOCKET sid; + int rc; + struct sockaddr_in srv_sock; + int addr_size = sizeof(struct sockaddr); + ULONG l_ulIpAddr; + + /* suppose, it's a decimal IP address string. Convert into binary value */ + l_ulIpAddr = inet_addr(ip); + + if (l_ulIpAddr == INADDR_NONE) + { /* incorrect IP address or a URL name */ + + /* try to resolve the name */ + l_pHostEntry = gethostbyname( ip_addr ); + if (l_pHostEntry == NULL) { + DebugPrint("\nError 0x%x in call to gethostbyname()", WSAGetLastError()); + return 0; + } + /* copy the first IP address to the SOCKET structure */ + l_ulIpAddr = *(ULONG*)l_pHostEntry->h_addr_list[0]; + } + + *adrs->S_addr = l_ulIpAddr; + + return (int)l_ulIpAddr; +} + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket_imp.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket_imp.h new file mode 100644 index 00000000..11f02f69 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_socket_imp.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MOSALU_SOCKET_IMP_H +#define _MOSALU_SOCKET_IMP_H + +#ifndef __KERNEL__ + +//#include + + +typedef enum {MOSAL_PF_INET=PF_INET, MOSAL_PF_LOCAL=PF_UNIX } MOSAL_socket_domain_t; +typedef enum {MOSAL_SOCK_STREAM=SOCK_STREAM, MOSAL_SOCK_DGRAM= SOCK_DGRAM } MOSAL_socket_type_t; +typedef enum {MOSAL_IPPROTO_TCP=IPPROTO_TCP, MOSAL_IPPROTO_IP=IPPROTO_IP } MOSAL_socket_protocol_t; +typedef enum {MOSAL_SOL_SOCKET=SOL_SOCKET, MOSAL_SOL_IPPROTO_TCP=IPPROTO_TCP } MOSAL_socket_optlevel_t; +typedef enum {MOSAL_SO_REUSEADDR=SO_REUSEADDR } MOSAL_socket_optname_t; + +/* , MOSAL_SO_EXCLUSIVEADDRUSE=SO_EXCLUSIVEADDRUSE */ + +typedef int MOSAL_socklen_t; +typedef struct sockaddr_in MOSAL_sockaddr_t; + +#define inet_aton(ip,in_addr_p) ((in_addr_p)->s_addr=inet_addr(ip)) + +#endif +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_sync.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_sync.c new file mode 100644 index 00000000..52e3e078 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_sync.c @@ -0,0 +1,558 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#undef MTL_MODULE +#define MTL_MODULE MOSAL + +#include "mosal_priv.h" +#if !defined(__KERNEL__) +/* user mode */ +#include +#endif + +//////////////////////////////////////////////////////////////////////////////// +// Synchronization object +//////////////////////////////////////////////////////////////////////////////// + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_syncobj_init + * + * Description: + * Init sync object + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_init(MOSAL_syncobj_t *obj_p) +{ + obj_p->event = CreateEvent( + NULL, // default security descriptor + TRUE, // reset type: manual (by MOSAL_syncobj_clear) + FALSE, // initial state: non-signalled + NULL // object name: unnamed + ); + if (obj_p->event == NULL) + return MT_ERROR; + return MT_OK; +} + + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_syncobj_waiton + * + * Description: + * cause process to sleep until synchonization object is signalled or time + * expires + * + * Parameters: + * obj_p(IN) pointer to synch object + * micro_sec(IN) max time to wait in microseconds + * + * Returns: + * MT_OK - woke up by event + * MT_EAGAIN - wokeup because of timeout + * + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_waiton(MOSAL_syncobj_t *obj_p, MT_size_t micro_sec) +{ + DWORD status; + DWORD msecs = (DWORD)(micro_sec / 1000); + + if (!msecs) msecs = 1; + if ( micro_sec == MOSAL_SYNC_TIMEOUT_INFINITE ) + status = WaitForSingleObject( + obj_p->event, // handle to object + INFINITE // time-out interval + ); + else + status = WaitForSingleObject( + obj_p->event, // handle to object + msecs // time-out interval + ); + if (status == WAIT_OBJECT_0) + return MT_OK; + if (status == WAIT_TIMEOUT) + return MT_ETIMEDOUT; + return MT_ERROR; +} + + +/****************************************************************************** + * Function (userl-mode only): + * MOSAL_syncobj_waiton + * + * Description: + * cause process to sleep until synchonization object is signalled or time + * expires + * + * Parameters: + * obj_p(IN) pointer to synch object + * micro_sec(IN) max time to wait in microseconds + * + * Returns: + * MT_OK - woke up by event + * MT_EAGAIN - wokeup because of timeout + * + ******************************************************************************/ +call_result_t MOSAL_syncobj_waiton_ui(MOSAL_syncobj_t *obj_p, MT_size_t micro_sec) +{ + return MOSAL_syncobj_waiton(obj_p, micro_sec); +} + + + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_syncobj_signal + * + * Description: + * signal the synchronization object + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_syncobj_signal(MOSAL_syncobj_t *obj_p) +{ + SetEvent( obj_p->event ); +} + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_syncobj_clear + * + * Description: + * reset sync object (i.e. bring it to init - not-signalled -state) + * + * Parameters: + * obj_p(IN) pointer to synch object + * + * Returns: + * + ******************************************************************************/ +void MOSAL_syncobj_clear(MOSAL_syncobj_t *obj_p) +{ + ResetEvent( obj_p->event ); +} + + +//////////////////////////////////////////////////////////////////////////////// +// Semaphores +//////////////////////////////////////////////////////////////////////////////// + + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_sem_init + * + * Description: + * init semaphore + * + * Parameters: + * sem_p(OUT) pointer to semaphore to be initialized + * count(IN) max number of processes that can hold the semaphore at the same time + * + * Returns: + * + +******************************************************************************/ +call_result_t MOSAL_sem_init(MOSAL_semaphore_t *sem_p, MT_size_t count) +{ + sem_p->sem = CreateSemaphore( + NULL, // default security descriptor + (LONG)count, // initial count + LONG_MAX, // maximum count + NULL // object name: unnamed + ); + if (sem_p->sem != NULL) + return MT_OK; + return MT_ERROR; +} + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_sem_acq + * + * Description: + * acquire the semaphore + * + * Parameters: + * sem_p(IN) pointer to semaphore + * block(IN) true if return immediatly if could not acquire, otherwise block if necessary + * + * Returns: + * MT_OK - semaphore acquired + * MT_EAGAIN - semaphore not acquired (only - in non-blocking mode) + * + *******************************************************************************/ +call_result_t MOSAL_sem_acq(MOSAL_semaphore_t *sem_p, MT_bool block) +{ + DWORD status; + + if (block) { + status = WaitForSingleObject( + sem_p->sem, // handle to object + INFINITE // time-out interval + ); + } + else { + status = WaitForSingleObject( + sem_p->sem, // handle to object + 0 // time-out interval + ); + } + + if (status == WAIT_OBJECT_0) + return MT_OK; + return MT_EAGAIN; +} + + + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_sem_acq_ui + * + * Description: + * acquire the semaphore + * + * Parameters: + * sem_p(IN) pointer to semaphore + * + * Returns: + * + *******************************************************************************/ +void MOSAL_sem_acq_ui(MOSAL_semaphore_t *sem_p) +{ + WaitForSingleObject( sem_p->sem, INFINITE ); +} + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_sem_acq_to + * + * Description: + * acquire the semaphore with timeout + * + * Parameters: + * sem_p(IN) pointer to semaphore + * micro_sec(IN) timeout + * + * Returns: + * MT_OK - semaphore acquired + * MT_EAGAIN - semaphore not acquired because of timeout + * MT_ERROR - semaphore not acquired by other reason + * + *******************************************************************************/ +call_result_t MOSAL_sem_acq_to(MOSAL_semaphore_t *sem_p, MT_size_t micro_sec) +{ + DWORD status; + DWORD msecs = (DWORD)(micro_sec / 1000); + + if (!msecs) msecs = 1; + if ( micro_sec == MOSAL_SYNC_TIMEOUT_INFINITE ) + status = WaitForSingleObject( + sem_p->sem, // handle to object + INFINITE // time-out interval + ); + else + status = WaitForSingleObject( + sem_p->sem, // handle to object + msecs // time-out interval + ); + if (status == WAIT_OBJECT_0) + return MT_OK; + if (status == WAIT_TIMEOUT) + return MT_EAGAIN; + return MT_ERROR; +} + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_sem_rel + * + * Description: + * release the semaphore + * + * Parameters: + * sem_p(IN) pointer to semaphore + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_sem_rel(MOSAL_semaphore_t *sem_p) +{ + LONG prev; + + ReleaseSemaphore( + sem_p->sem, // handle to semaphore + 1, // count increment amount + &prev // previous count + ); +} + +//////////////////////////////////////////////////////////////////////////////// +// Mutexes +//////////////////////////////////////////////////////////////////////////////// + +typedef struct MOSAL_mutex MOSAL_mutex_t; + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_mutex_init + * + * Description: + * init mutex + * + * Parameters: + * mtx_p(OUT) pointer to mutex to be initialized + * + * Returns: + * + ******************************************************************************/ +call_result_t MOSAL_mutex_init(MOSAL_mutex_t *mtx_p) +{ + mtx_p->mutex = CreateMutex( + NULL, // default security descriptor + FALSE, // not to acquire on creation + NULL // object name: unnamed + ); + return MT_OK; +} + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_mutex_acq + * + * Description: + * acquire the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * block(IN) true if return immediatly if could not acquire, otherwise block if necessary + * + * Returns: + * MT_OK - mutex acquired + * MT_EAGAIN - mutex not acquired (only - in non-blocking mode) + * + ******************************************************************************/ +call_result_t MOSAL_mutex_acq(MOSAL_mutex_t *mtx_p, MT_bool block) +{ + DWORD status; + + if (block) { + status = WaitForSingleObject( + mtx_p->mutex, // handle to object + INFINITE // time-out interval + ); + } + else { + status = WaitForSingleObject( + mtx_p->mutex, // handle to object + 0 // time-out interval + ); + } + + if (status == WAIT_OBJECT_0) + return MT_OK; + return MT_EAGAIN; +} + + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_mutex_acq_ui + * + * Description: + * acquire the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * + * Returns: + * + ******************************************************************************/ +void MOSAL_mutex_acq_ui(MOSAL_mutex_t *mtx_p) +{ + WaitForSingleObject( mtx_p->mutex, INFINITE ); +} + +/****************************************************************************** + * Function (kernel-mode only): + * MOSAL_mutex_acq_to + * + * Description: + * acquire the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * micro_sec(IN) wait interval + * + * Returns: + * MT_OK - mutex acquired + * MT_EAGAIN - mutex not acquired out of timeout + * MT_ERROR - mutex not acquired out of some other error + * + ******************************************************************************/ +call_result_t MOSAL_mutex_acq_to(MOSAL_mutex_t *mtx_p, MT_size_t micro_sec) +{ + DWORD status; + DWORD timeout = (DWORD)((micro_sec + 999) / 1000); + + if ( micro_sec == MOSAL_SYNC_TIMEOUT_INFINITE ) + status = WaitForSingleObject( + mtx_p->mutex, // handle to object + INFINITE // time-out interval + ); + else + status = WaitForSingleObject( + mtx_p->mutex, // handle to object + timeout // time-out interval + ); + + if (status == WAIT_OBJECT_0) + return MT_OK; + if (status == WAIT_TIMEOUT) + return MT_ETIMEDOUT; + return MT_EINTR; +} + + + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_mutex_rel + * + * Description: + * release the mutex + * + * Parameters: + * mtx_p(IN) pointer to mutex + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_mutex_rel(MOSAL_mutex_t *mtx_p) +{ + ReleaseMutex( mtx_p->mutex ); +} + + +//////////////////////////////////////////////////////////////////////////////// +// Delay of execution +//////////////////////////////////////////////////////////////////////////////// + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_delay_execution + * + * Description: + * delay execution of this control path for a the specified time period. Note + * that in some implementaions it performs busy wait. + * + * Parameters: + * time_micro(IN) required delay time in microseconds + * + * Returns: + * N/A + * + ******************************************************************************/ +void MOSAL_delay_execution(u_int32_t time_micro) +{ + Sleep( time_micro/1000 ); +} + +/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * + * + * SpinLocks + * + * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */ + + +/****************************************************************************** + * Function (user-mode only): + * MOSAL_spinlock_init + * + * Description: + * initialize spinlock + * + * Parameters: + * sp(IN) MOSAL_spinlock_t* + * + * Returns: + * + ******************************************************************************/ + +call_result_t MOSAL_spinlock_init(MOSAL_spinlock_t *sp) +{ + cl_spinlock_init( sp ); + return MT_OK; +} + + +int MOSAL_sleep( u_int32_t sec ) +{ + if (sec >= MAXDWORD/1000) { + MTL_ERROR1("The tout is too big\n"); + return 1; + } + + Sleep(sec*1000); + + return 0; +} + +//int MOSAL_usleep( u_int32_t usec ) +//{ +// usleep(usec); +// return 0; +//} + +void MOSAL_gettimeofday(MOSAL_time_t * time_p) +{ + SYSTEMTIME tv; + GetSystemTime(&tv); + + time_p->sec = tv.wSecond; + time_p->msec = tv.wMilliseconds; +} diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_thread.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_thread.c new file mode 100644 index 00000000..edbebf61 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mosal/os_dep/win/mosalu_thread.c @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mosal_priv.h" + +/****************************************************************************** + * Function: + * MOSAL_thread_start + * + * Description: + * create a tread and run a t-function in its context + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * flags(IN) flags for thread creation + * mtf(IN) t-function + * mtf_ctx(IN) t-function context + * + * Returns: + * MT_OK - thread created + * other - error on creating a thread; + * + ******************************************************************************/ +static u_int32_t WINAPI ThreadProc( + LPVOID lpParameter // thread data +) +{ + MOSAL_thread_t *mto_p = (MOSAL_thread_t *)lpParameter; + mto_p->res = (u_int32_t)(u_int64_t)mto_p->func(mto_p->func_ctx); + MOSAL_syncobj_signal( &mto_p->sync ); + mto_p->th = 0; + ExitThread(mto_p->res); + return mto_p->res; +} + +call_result_t MOSAL_thread_start( + MOSAL_thread_t *mto_p, // pointer to MOSAL thread object + u_int32_t flags, // flags for thread creation + MOSAL_thread_func_t mtf, // t-function name + void *mtf_ctx // t-function context (optionally) + ) +{ + // sanity checks + if (mtf == NULL) + return MT_EINVAL; + + // init thread object + mto_p->func = mtf; + mto_p->func_ctx = mtf_ctx; + MOSAL_syncobj_init( &mto_p->sync ); + + // create and run the thread + mto_p->th = CreateThread( NULL, 0, ThreadProc, mto_p, 0, NULL ); + if (mto_p->th == NULL) + return MT_ERROR; + + return MT_OK; +} + +/****************************************************************************** + * Function: + * MOSAL_thread_kill + * + * Description: + * terminate the tread brutally + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * + * Returns: + * MT_OK - thread terminated + * MT_ERROR - a failure on thread termination + * + ******************************************************************************/ +call_result_t MOSAL_thread_kill( + MOSAL_thread_t *mto_p // pointer to MOSAL thread object + ) + { + if (TerminateThread( mto_p->th, 0 )) + return MT_OK; + return MT_ERROR; + } + +/****************************************************************************** + * Function: + * MOSAL_thread_wait_for_exit + * + * Description: + * create a tread and run a t-function in its context + * + * Parameters: + * mto_p(IN) pointer to MOSAL thread object + * micro_sec(IN) timeout in mcs; MOSAL_THREAD_WAIT_FOREVER means ENDLESS + * exit_code(OUT) return code of the thread + * + * Returns: + * MT_OK - thread started; for blocking mode - t-function is running; + * MT_EAGAIN - for blocking mode - timeout; thread hasn't started yet; + * other - error; + * + ******************************************************************************/ +call_result_t MOSAL_thread_wait_for_exit( + MOSAL_thread_t *mto_p, // pointer to MOSAL thread object + MT_size_t micro_sec, // timeout in mcs; MOSAL_THREAD_WAIT_FOREVER means ENDLESS + u_int32_t *exit_code // return code of the thread + ) +{ + call_result_t status; + status = MOSAL_syncobj_waiton( &mto_p->sync, micro_sec ); + if (exit_code != NULL) { + if (status == MT_OK ) + *exit_code = mto_p->res; + else + *exit_code = status; + } + return status; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/MPGA_headers.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/MPGA_headers.h new file mode 100644 index 00000000..e2741b46 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/MPGA_headers.h @@ -0,0 +1,2077 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MPGA_headers_H +#define MPGA_headers_H + + + +typedef u_int8_t nMPGA_bit_t; + + + +struct IB_LRH_p_t{ /* ***** LRH ***** Local Route Header(8 bytes)*/ + nMPGA_bit_t VL[0x04]; /*"Only 4 LS-bits"The virtual lane that the packet is using */ + nMPGA_bit_t LVer[0x04]; /*"Only 4 LS-bits"Link level protocol of the packet*/ + nMPGA_bit_t SL[0x04]; /*"Only 4 LS-bits"Service level requested within the subnet*/ + nMPGA_bit_t reserved1[0x02]; /*"Only 2 LS-bits"Transmitted as 0,ignored on receive. **internally modified** */ + nMPGA_bit_t LNH[0x02]; /*"Only 2 LS-bits"Identifies the headers that follow the LRH. **internally modified** */ + nMPGA_bit_t DLID[0x10]; /*The destination port and path on the local subnet*/ + nMPGA_bit_t reserved2[0x05]; /*"Only 5 LS-bits"Transmitted as 0,ignored on receive.**internally modified** */ + nMPGA_bit_t PktLen[0x0b]; /*"Only 11 LS-bits"The size of tha packet in four-byte words. **internally modified** */ + nMPGA_bit_t SLID[0x10]; /*The source port (injection point) on the local subnet*/ +}; + +struct IB_GRH_p_t{ /* **** GRH **** Global Route Header(40 bytes)*/ + nMPGA_bit_t IPVer[0x04]; /*"Only 4 LS-bits"The version og the GRH*/ + nMPGA_bit_t TClass[0x08]; /*Used by IBA to communicate global service level*/ + nMPGA_bit_t FlowLabel[0x14]; /*"Only 20 LS-bits"Sequences of packets requiring special handl*/ + nMPGA_bit_t PayLen[0x10]; /*The length of the packet in bytes **internally modified** */ + nMPGA_bit_t NxtHdr[0x08]; /*Identifies the headers that follow the GRH*/ + nMPGA_bit_t HopLmt[0x08]; /*Bound on the number of hops between subnets*/ + nMPGA_bit_t SGID[0x80]; /*Global indentifier for the source port*/ + nMPGA_bit_t DGID[0x80]; /*Global indentifier for the detination port*/ +}; + +struct IB_BTH_p_t{ /* **** BTH **** Base Transport Header (12 bytes)*/ + nMPGA_bit_t OpCode[0x08]; /*IBA packet type and which extentions follows **internally modified** */ + nMPGA_bit_t SE; /*"Only 1 LS-bit"If an event sould be gen by responder or not*/ + nMPGA_bit_t M; /*"Only 1 LS-bit"Communication migration state*/ + nMPGA_bit_t PadCnt[0x02]; /*"Only 2 LS-bits"Number of bytes that align to 4 byte boundary **internally modified** */ + nMPGA_bit_t TVer[0x04]; /*"Only 4 LS-bits"IBA transport headers version. **internally modified** */ + nMPGA_bit_t P_KEY[0x10]; /*Logical partition associated with this packet*/ + nMPGA_bit_t reserved1[0x08]; /*Transmitted as 0,ignored on receive. Not included in the icrc. **internally modified** */ + nMPGA_bit_t DestQP[0x18]; /*"Only 24 LS-bits"Destination work queu pair number*/ + nMPGA_bit_t A; /*"Only 1 LS-bit"If an ack should be returnd by the responder*/ + nMPGA_bit_t reserved2[0x07]; /*"only 7 LS-bits"Transmitted as 0,ignored .included in icrc. **internally modified** */ + nMPGA_bit_t PSN[0x18]; /*"Only 24 LS-bits"detect a missing or duplicate packet*/ +}; + +struct IB_RDETH_p_t{ /* **** RDETH **** (4 bytes)*/ + /*Reliable Datagram Extended Transport Header*/ + nMPGA_bit_t reserved1[0x08]; /*Transmitted as 0,ignored on receive.*/ + nMPGA_bit_t EECnxt[0x18]; /*"Only 24 LS-bits"Which end to end context for this packet*/ +}; + +struct IB_DETH_p_t{ /* **** DETH ****(8 bytes)*/ + /*Datagram Extended Transport Header */ + nMPGA_bit_t Q_Key[0x20]; /*For an authorize access to destination queue*/ + nMPGA_bit_t reserved1[0x08]; /*ransmitted as 0,ignored on receive.*/ + nMPGA_bit_t SrcQP[0x18]; /*"Only 24 LS-bits"Work queu nuber at the source*/ +}; + +struct IB_RETH_p_t{ /* **** RETH ****(16 bytes)*/ + /*RDMA Extended Transport Header */ + nMPGA_bit_t VA[0x40]; /*Virtual address of the RDMA operation*/ + nMPGA_bit_t R_Key[0x20]; /*Remote key that authorize access for the RDMA operation*/ + nMPGA_bit_t DMALen[0x20]; /*The length of the DMA operation*/ +}; + +struct IB_AtomicETH_p_t{ /* **** AtomicETH ****(28 bytes)*/ + /*Atomic Extended Transport Header */ + nMPGA_bit_t VA[0x40]; /*Remote virtual address */ + nMPGA_bit_t R_Key[0x20]; /*Remote key that authorize access to the remote virtual address*/ + nMPGA_bit_t SwapDt[0x40]; /*An operand in atomic operations*/ + nMPGA_bit_t CmpDt[0x40]; /*An operand in cmpswap atomic operation*/ +}; + +struct IB_AETH_p_t{ /* *** ACK ****(4 bytes)*/ + /*ACK Extended Transport Header */ + nMPGA_bit_t Syndrome[0x08]; /*Indicates ACK or NAK plus additional info*/ + nMPGA_bit_t MSN[0x18]; /*Sequence number of the last message completed*/ +}; + +struct IB_AtomicAckETH_p_t{ /* **** AtomicAckETH ****(8 bytes)*/ + /* Atomic ACK Extended Transport Header */ + nMPGA_bit_t OrigRemDt[0x40]; /*Return oprand in atomic operation and contains the data*/ + /*in the remote memory location before the atomic operation*/ +}; + +struct IB_ImmDt_p_t{ /* **** Immediate Data **** (4 bytes)*/ + /* Contains the additional data that is placed in the */ + nMPGA_bit_t ImmDt[0x20]; /* received Completion Queue Element (CQE). */ + /* The ImmDt is Only in Send or RDMA-Write packets. */ +}; + + + +/*IBA LOCAL*/ +struct MPGA_rc_send_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_rc_send_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_rc_send_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_rc_send_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_rc_send_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_rc_send_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + + +/*RDMA Write types*/ +struct MPGA_rc_write_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_rc_write_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_rc_write_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_rc_write_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_rc_write_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_rc_write_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +/*RDMA read types*/ +struct MPGA_rc_read_req_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_rc_read_res_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_rc_read_res_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_rc_read_res_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_rc_read_res_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +/* Other Types*/ +struct MPGA_rc_ack_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_rc_atomic_ack_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; + struct IB_AtomicAckETH_p_t IB_AtomicAckETH; +}; + +struct MPGA_rc_CmpSwap_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AtomicETH_p_t IB_AtomicETH; +}; + +struct MPGA_rc_FetchAdd_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AtomicETH_p_t IB_AtomicETH; +}; + +/* Unreliable Connection */ +/*Send Types*/ +struct MPGA_uc_send_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_uc_send_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_uc_send_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_uc_send_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_uc_send_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_uc_send_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +/*RDMA Write types*/ +struct MPGA_uc_write_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_uc_write_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_uc_write_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_uc_write_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_uc_write_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_uc_write_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +/* Reliable Datagram */ + +/*Send Types*/ +struct MPGA_rd_send_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_rd_send_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_rd_send_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_rd_send_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_rd_send_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_rd_send_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + + +/*RDMA Write types*/ +struct MPGA_rd_write_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_rd_write_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_rd_write_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_rd_write_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_rd_write_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_rd_write_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_RETH_p_t IB_RETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_rd_read_req_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_rd_read_res_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_rd_read_res_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; +}; + +struct MPGA_rd_read_res_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_rd_read_res_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + + +struct MPGA_rd_ack_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_rd_atomic_ack_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; + struct IB_AtomicAckETH_p_t IB_AtomicAckETH_P; +}; + +struct MPGA_rd_CmpSwap_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_AtomicETH_p_t IB_AtomicETH_P; +}; + +struct MPGA_rd_FetchAdd_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_AtomicETH_p_t IB_AtomicETH_P; +}; + +struct MPGA_rd_resync_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +/* Unreliable Datagram */ + +struct MPGA_ud_send_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_ud_send_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + + + + +/*IBA GLOBAL*/ +struct MPGA_G_rc_send_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_rc_send_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_rc_send_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_rc_send_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_G_rc_send_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_rc_send_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + + +/*RDMA Write types*/ +struct MPGA_G_rc_write_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_G_rc_write_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_rc_write_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_rc_write_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_G_rc_write_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_G_rc_write_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +/*RDMA read types*/ +struct MPGA_G_rc_read_req_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_G_rc_read_res_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_G_rc_read_res_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_rc_read_res_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_G_rc_read_res_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +/* Other Types*/ +struct MPGA_G_rc_ack_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_G_rc_atomic_ack_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AETH_p_t IB_AETH_P; + struct IB_AtomicAckETH_p_t IB_AtomicAckETH; +}; + +struct MPGA_G_rc_CmpSwap_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AtomicETH_p_t IB_AtomicETH; +}; + +struct MPGA_G_rc_FetchAdd_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_AtomicETH_p_t IB_AtomicETH; +}; + +/* Unreliable Connection */ +/*Send Types*/ +struct MPGA_G_uc_send_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_uc_send_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_uc_send_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_uc_send_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_G_uc_send_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_uc_send_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +/*RDMA Write types*/ +struct MPGA_G_uc_write_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_G_uc_write_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_uc_write_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; +}; + +struct MPGA_G_uc_write_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_G_uc_write_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_G_uc_write_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RETH_p_t IB_RETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +/* Reliable Datagram */ + +/*Send Types*/ +struct MPGA_G_rd_send_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_G_rd_send_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_G_rd_send_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_G_rd_send_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_G_rd_send_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_G_rd_send_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + + +/*RDMA Write types*/ +struct MPGA_G_rd_write_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_G_rd_write_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_G_rd_write_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_G_rd_write_last_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_G_rd_write_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_G_rd_write_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_RETH_p_t IB_RETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + +struct MPGA_G_rd_read_req_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_RETH_p_t IB_RETH_P; +}; + +struct MPGA_G_rd_read_res_first_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_G_rd_read_res_middle_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; +}; + +struct MPGA_G_rd_read_res_last_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_G_rd_read_res_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + + +struct MPGA_G_rd_ack_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; +}; + +struct MPGA_G_rd_atomic_ack_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_AETH_p_t IB_AETH_P; + struct IB_AtomicAckETH_p_t IB_AtomicAckETH_P; +}; + +struct MPGA_G_rd_CmpSwap_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_AtomicETH_p_t IB_AtomicETH_P; +}; + +struct MPGA_G_rd_FetchAdd_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_AtomicETH_p_t IB_AtomicETH_P; +}; + +struct MPGA_G_rd_resync_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_RDETH_p_t IB_RDETH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +/* Unreliable Datagram */ + +struct MPGA_G_ud_send_only_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_DETH_p_t IB_DETH_P; +}; + +struct MPGA_G_ud_send_only_ImmDt_p_t{ + struct IB_LRH_p_t IB_LRH_P; + struct IB_GRH_p_t IB_GRH_P; + struct IB_BTH_p_t IB_BTH_P; + struct IB_DETH_p_t IB_DETH_P; + struct IB_ImmDt_p_t IB_ImmDt_P; +}; + + +union MPGA_headers_p_t{ + /* IBA LOCAL*/ + /* RC - Reliable Connection - opcode prefix 000*/ /* OpCode */ + struct MPGA_rc_send_first_p_t MPGA_rc_send_first_p; /* 00000 */ + struct MPGA_rc_send_middle_p_t MPGA_rc_send_middle_p; /* 00001 */ + struct MPGA_rc_send_last_p_t MPGA_rc_send_last_p; /* 00010 */ + struct MPGA_rc_send_last_ImmDt_p_t MPGA_rc_send_last_ImmDt_p; /* 00011 */ + struct MPGA_rc_send_only_p_t MPGA_rc_send_only_p; /* 00100 */ + struct MPGA_rc_send_only_ImmDt_p_t MPGA_rc_send_only_ImmDt_p; /* 00101 */ + + struct MPGA_rc_write_first_p_t MPGA_rc_write_first_p; /* 00110 */ + struct MPGA_rc_write_middle_p_t MPGA_rc_write_middle_p; /* 00111 */ + struct MPGA_rc_write_last_p_t MPGA_rc_write_last_p; /* 01000 */ + struct MPGA_rc_write_last_ImmDt_p_t MPGA_rc_write_last_ImmDt_p;/* 01001 */ + struct MPGA_rc_write_only_p_t MPGA_rc_write_only_p; /* 01010 */ + struct MPGA_rc_write_only_ImmDt_p_t MPGA_rc_write_only_ImmDt_p;/* 01011 */ + + struct MPGA_rc_read_req_p_t MPGA_rc_read_req_p; /* 01100 */ + struct MPGA_rc_read_res_first_p_t MPGA_rc_read_res_first_p; /* 01101 */ + struct MPGA_rc_read_res_middle_p_t MPGA_rc_read_res_middle_p; /* 01110 */ + struct MPGA_rc_read_res_last_p_t MPGA_rc_read_res_last_p; /* 01111 */ + struct MPGA_rc_read_res_only_p_t MPGA_rc_read_res_only_p; /* 10000 */ + + struct MPGA_rc_ack_p_t MPGA_rc_ack_p; /* 10001 */ + struct MPGA_rc_atomic_ack_p_t MPGA_rc_atomic_ack_p; /* 10010 */ + struct MPGA_rc_CmpSwap_p_t MPGA_rc_CmpSwap_p; /* 10011 */ + struct MPGA_rc_FetchAdd_p_t MPGA_rc_FetchAdd_p; /* 10100 */ + + /* UC - Unreliable Connection - opcode prefix 001*/ + struct MPGA_uc_send_first_p_t MPGA_uc_send_first_p; /* 00000 */ + struct MPGA_uc_send_middle_p_t MPGA_uc_send_middle_p; /* 00001 */ + struct MPGA_uc_send_last_p_t MPGA_uc_send_last_p; /* 00010 */ + struct MPGA_uc_send_last_ImmDt_p_t MPGA_uc_send_last_ImmDt_p; /* 00011 */ + struct MPGA_uc_send_only_p_t MPGA_uc_send_only_p; /* 00100 */ + struct MPGA_uc_send_only_ImmDt_p_t MPGA_uc_send_only_ImmDt_p; /* 00101 */ + + struct MPGA_uc_write_first_p_t MPGA_uc_write_first_p; /* 00110 */ + struct MPGA_uc_write_middle_p_t MPGA_uc_write_middle_p; /* 00111 */ + struct MPGA_uc_write_last_p_t MPGA_uc_write_last_p; /* 01000 */ + struct MPGA_uc_write_last_ImmDt_p_t MPGA_uc_write_last_ImmDt_p;/* 01001 */ + struct MPGA_uc_write_only_p_t MPGA_uc_write_only_p; /* 01010 */ + struct MPGA_uc_write_only_ImmDt_p_t MPGA_uc_write_only_ImmDt_p;/* 01011 */ + + /* RD - Reliable Datagram - opcode prefix 010*/ + struct MPGA_rd_send_first_p_t MPGA_rd_send_first_p; /* 00000 */ + struct MPGA_rd_send_middle_p_t MPGA_rd_send_middle_p; /* 00001 */ + struct MPGA_rd_send_last_p_t MPGA_rd_send_last_p; /* 00010 */ + struct MPGA_rd_send_last_ImmDt_p_t MPGA_rd_send_last_ImmDt_p; /* 00011 */ + struct MPGA_rd_send_only_p_t MPGA_rd_send_only_p; /* 00100 */ + struct MPGA_rd_send_only_ImmDt_p_t MPGA_rd_send_only_ImmDt_p; /* 00101 */ + + struct MPGA_rd_write_first_p_t MPGA_rd_write_first_p; /* 00110 */ + struct MPGA_rd_write_middle_p_t MPGA_rd_write_middle_p; /* 00111 */ + struct MPGA_rd_write_last_p_t MPGA_rd_write_last_p; /* 01000 */ + struct MPGA_rd_write_last_ImmDt_p_t MPGA_rd_write_last_ImmDt_p;/* 01001 */ + struct MPGA_rd_write_only_p_t MPGA_rd_write_only_p; /* 01010 */ + struct MPGA_rd_write_only_ImmDt_p_t MPGA_rd_write_only_ImmDt_p;/* 01011 */ + + struct MPGA_rd_read_req_p_t MPGA_rd_read_req_p; /* 01100 */ + struct MPGA_rd_read_res_first_p_t MPGA_rd_read_res_first_p; /* 01101 */ + struct MPGA_rd_read_res_middle_p_t MPGA_rd_read_res_middle_p; /* 01110 */ + struct MPGA_rd_read_res_last_p_t MPGA_rd_read_res_last_p; /* 01111 */ + struct MPGA_rd_read_res_only_p_t MPGA_rd_read_res_only_p; /* 10000 */ + + struct MPGA_rd_ack_p_t MPGA_rd_ack_p; /* 10001 */ + struct MPGA_rd_atomic_ack_p_t MPGA_rd_atomic_ack_p; /* 10010 */ + struct MPGA_rd_CmpSwap_p_t MPGA_rd_CmpSwap_p; /* 10011 */ + struct MPGA_rd_FetchAdd_p_t MPGA_rd_FetchAdd_p; /* 10100 */ + struct MPGA_rd_resync_p_t MPGA_rd_resync_p; /* 10101 */ + + /* UD - UnReliable Datagram - opcode prefix 011*/ + struct MPGA_ud_send_only_p_t MPGA_ud_send_only_p; /* 00100 */ + struct MPGA_ud_send_only_ImmDt_p_t MPGA_ud_send_only_ImmDt_p; /* 00101 */ + + + /*IBA GLOBAL*/ + /* RC - Reliable Connection - opcode prefix 000*/ /* OpCode */ + struct MPGA_G_rc_send_first_p_t MPGA_G_rc_send_first_p; /* 00000 */ + struct MPGA_G_rc_send_middle_p_t MPGA_G_rc_send_middle_p; /* 00001 */ + struct MPGA_G_rc_send_last_p_t MPGA_G_rc_send_last_p; /* 00010 */ + struct MPGA_G_rc_send_last_ImmDt_p_t MPGA_G_rc_send_last_ImmDt_p; /* 00011 */ + struct MPGA_G_rc_send_only_p_t MPGA_G_rc_send_only_p; /* 00100 */ + struct MPGA_G_rc_send_only_ImmDt_p_t MPGA_G_rc_send_only_ImmDt_p; /* 00101 */ + + struct MPGA_G_rc_write_first_p_t MPGA_G_rc_write_first_p; /* 00110 */ + struct MPGA_G_rc_write_middle_p_t MPGA_G_rc_write_middle_p; /* 00111 */ + struct MPGA_G_rc_write_last_p_t MPGA_G_rc_write_last_p; /* 01000 */ + struct MPGA_G_rc_write_last_ImmDt_p_t MPGA_G_rc_write_last_ImmDt_p;/* 01001 */ + struct MPGA_G_rc_write_only_p_t MPGA_G_rc_write_only_p; /* 01010 */ + struct MPGA_G_rc_write_only_ImmDt_p_t MPGA_G_rc_write_only_ImmDt_p;/* 01011 */ + + struct MPGA_G_rc_read_req_p_t MPGA_G_rc_read_req_p; /* 01100 */ + struct MPGA_G_rc_read_res_first_p_t MPGA_G_rc_read_res_first_p; /* 01101 */ + struct MPGA_G_rc_read_res_middle_p_t MPGA_G_rc_read_res_middle_p; /* 01110 */ + struct MPGA_G_rc_read_res_last_p_t MPGA_G_rc_read_res_last_p; /* 01111 */ + struct MPGA_G_rc_read_res_only_p_t MPGA_G_rc_read_res_only_p; /* 10000 */ + + struct MPGA_G_rc_ack_p_t MPGA_G_rc_ack_p; /* 10001 */ + struct MPGA_G_rc_atomic_ack_p_t MPGA_G_rc_atomic_ack_p; /* 10010 */ + struct MPGA_G_rc_CmpSwap_p_t MPGA_G_rc_CmpSwap_p; /* 10011 */ + struct MPGA_G_rc_FetchAdd_p_t MPGA_G_rc_FetchAdd_p; /* 10100 */ + + /* UC - Unreliable Connection - opcode prefix 001*/ + struct MPGA_G_uc_send_first_p_t MPGA_G_uc_send_first_p; /* 00000 */ + struct MPGA_G_uc_send_middle_p_t MPGA_G_uc_send_middle_p; /* 00001 */ + struct MPGA_G_uc_send_last_p_t MPGA_G_uc_send_last_p; /* 00010 */ + struct MPGA_G_uc_send_last_ImmDt_p_t MPGA_G_uc_send_last_ImmDt_p; /* 00011 */ + struct MPGA_G_uc_send_only_p_t MPGA_G_uc_send_only_p; /* 00100 */ + struct MPGA_G_uc_send_only_ImmDt_p_t MPGA_G_uc_send_only_ImmDt_p; /* 00101 */ + + struct MPGA_G_uc_write_first_p_t MPGA_G_uc_write_first_p; /* 00110 */ + struct MPGA_G_uc_write_middle_p_t MPGA_G_uc_write_middle_p; /* 00111 */ + struct MPGA_G_uc_write_last_p_t MPGA_G_uc_write_last_p; /* 01000 */ + struct MPGA_G_uc_write_last_ImmDt_p_t MPGA_G_uc_write_last_ImmDt_p;/* 01001 */ + struct MPGA_G_uc_write_only_p_t MPGA_G_uc_write_only_p; /* 01010 */ + struct MPGA_G_uc_write_only_ImmDt_p_t MPGA_G_uc_write_only_ImmDt_p;/* 01011 */ + + /* RD - Reliable Datagram - opcode prefix 010*/ + struct MPGA_G_rd_send_first_p_t MPGA_G_rd_send_first_p; /* 00000 */ + struct MPGA_G_rd_send_middle_p_t MPGA_G_rd_send_middle_p; /* 00001 */ + struct MPGA_G_rd_send_last_p_t MPGA_G_rd_send_last_p; /* 00010 */ + struct MPGA_G_rd_send_last_ImmDt_p_t MPGA_G_rd_send_last_ImmDt_p; /* 00011 */ + struct MPGA_G_rd_send_only_p_t MPGA_G_rd_send_only_p; /* 00100 */ + struct MPGA_G_rd_send_only_ImmDt_p_t MPGA_G_rd_send_only_ImmDt_p; /* 00101 */ + + struct MPGA_G_rd_write_first_p_t MPGA_G_rd_write_first_p; /* 00110 */ + struct MPGA_G_rd_write_middle_p_t MPGA_G_rd_write_middle_p; /* 00111 */ + struct MPGA_G_rd_write_last_p_t MPGA_G_rd_write_last_p; /* 01000 */ + struct MPGA_G_rd_write_last_ImmDt_p_t MPGA_G_rd_write_last_ImmDt_p;/* 01001 */ + struct MPGA_G_rd_write_only_p_t MPGA_G_rd_write_only_p; /* 01010 */ + struct MPGA_G_rd_write_only_ImmDt_p_t MPGA_G_rd_write_only_ImmDt_p;/* 01011 */ + + struct MPGA_G_rd_read_req_p_t MPGA_G_rd_read_req_p; /* 01100 */ + struct MPGA_G_rd_read_res_first_p_t MPGA_G_rd_read_res_first_p; /* 01101 */ + struct MPGA_G_rd_read_res_middle_p_t MPGA_G_rd_read_res_middle_p; /* 01110 */ + struct MPGA_G_rd_read_res_last_p_t MPGA_G_rd_read_res_last_p; /* 01111 */ + struct MPGA_G_rd_read_res_only_p_t MPGA_G_rd_read_res_only_p; /* 10000 */ + + struct MPGA_G_rd_ack_p_t MPGA_G_rd_ack_p; /* 10001 */ + struct MPGA_G_rd_atomic_ack_p_t MPGA_G_rd_atomic_ack_p; /* 10010 */ + struct MPGA_G_rd_CmpSwap_p_t MPGA_G_rd_CmpSwap_p; /* 10011 */ + struct MPGA_G_rd_FetchAdd_p_t MPGA_G_rd_FetchAdd_p; /* 10100 */ + struct MPGA_G_rd_resync_p_t MPGA_G_rd_resync_p; /* 10101 */ + + /* UD - UnReliable Datagram - opcode prefix 011*/ + struct MPGA_G_ud_send_only_p_t MPGA_G_ud_send_only_p; /* 00100 */ + struct MPGA_G_ud_send_only_ImmDt_p_t MPGA_G_ud_send_only_ImmDt_p; /* 00101 */ + + +}; + + + +/* IBA LOCAL */ +/* RC - Reliable Connected types */ +/*-------------------------------*/ +/*Send Types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_rc_send_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_rc_send_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_rc_send_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_rc_send_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_rc_send_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_rc_send_only_ImmDt_t; + + +/*RDMA Write types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_rc_write_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_rc_write_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_rc_write_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_rc_write_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_rc_write_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_rc_write_only_ImmDt_t; +/*RDMA read types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_rc_read_req_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; +} MPGA_rc_read_res_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_rc_read_res_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; +} MPGA_rc_read_res_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; +} MPGA_rc_read_res_only_t; +/* Other Types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; +} MPGA_rc_ack_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; + IB_AtomicAckETH_st IB_AtomicAckETH; +} MPGA_rc_atomic_ack_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_AtomicETH_st IB_AtomicETH; +} MPGA_rc_CmpSwap_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_AtomicETH_st IB_AtomicETH; +} MPGA_rc_FetchAdd_t; + +/* Unreliable Connection */ +/*Send Types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_uc_send_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_uc_send_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_uc_send_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_uc_send_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_uc_send_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_uc_send_only_ImmDt_t; + +/*RDMA Write types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_uc_write_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_uc_write_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; +} MPGA_uc_write_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_uc_write_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_uc_write_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_uc_write_only_ImmDt_t; + +/* Reliable Datagram */ + +/*Send Types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_rd_send_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_rd_send_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_rd_send_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_rd_send_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_rd_send_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_rd_send_only_ImmDt_t; + + +/*RDMA Write types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_RETH_st IB_RETH; +} MPGA_rd_write_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_rd_write_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_rd_write_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_rd_write_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_RETH_st IB_RETH; +} MPGA_rd_write_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_RETH_st IB_RETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_rd_write_only_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_RETH_st IB_RETH; +} MPGA_rd_read_req_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; +} MPGA_rd_read_res_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; +} MPGA_rd_read_res_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; +} MPGA_rd_read_res_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; +} MPGA_rd_read_res_only_t; + + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; +} MPGA_rd_ack_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; + IB_AtomicAckETH_st IB_AtomicAckETH; +} MPGA_rd_atomic_ack_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_AtomicETH_st IB_AtomicETH; +} MPGA_rd_CmpSwap_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_AtomicETH_st IB_AtomicETH; +} MPGA_rd_FetchAdd_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_rd_resync_t; + +/* Unreliable Datagram */ + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_DETH_st IB_DETH; +} MPGA_ud_send_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_BTH_st IB_BTH; + IB_DETH_st IB_DETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_ud_send_only_ImmDt_t; + + +/* IBA GLOBAL */ +/* RC - Reliable Connected types */ +/*-------------------------------*/ +/*Send Types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_rc_send_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_rc_send_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_rc_send_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_rc_send_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_rc_send_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_rc_send_only_ImmDt_t; + + +/*RDMA Write types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_G_rc_write_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_rc_write_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_rc_write_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_rc_write_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_G_rc_write_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_rc_write_only_ImmDt_t; +/*RDMA read types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_G_rc_read_req_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; +} MPGA_G_rc_read_res_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_rc_read_res_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; +} MPGA_G_rc_read_res_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; +} MPGA_G_rc_read_res_only_t; +/* Other Types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; +} MPGA_G_rc_ack_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_AETH_st IB_AETH; + IB_AtomicAckETH_st IB_AtomicAckETH; +} MPGA_G_rc_atomic_ack_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_AtomicETH_st IB_AtomicETH; +} MPGA_G_rc_CmpSwap_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_AtomicETH_st IB_AtomicETH; +} MPGA_G_rc_FetchAdd_t; + +/* Unreliable Connection */ +/*Send Types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_uc_send_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_uc_send_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_uc_send_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_uc_send_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_uc_send_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_uc_send_only_ImmDt_t; + +/*RDMA Write types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_G_uc_write_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_uc_write_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; +} MPGA_G_uc_write_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_uc_write_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; +} MPGA_G_uc_write_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RETH_st IB_RETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_uc_write_only_ImmDt_t; + +/* Reliable Datagram */ + +/*Send Types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_G_rd_send_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_G_rd_send_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_G_rd_send_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_rd_send_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_G_rd_send_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_rd_send_only_ImmDt_t; + + +/*RDMA Write types*/ +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_RETH_st IB_RETH; +} MPGA_G_rd_write_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_G_rd_write_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_G_rd_write_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_rd_write_last_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_RETH_st IB_RETH; +} MPGA_G_rd_write_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_RETH_st IB_RETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_rd_write_only_ImmDt_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_RETH_st IB_RETH; +} MPGA_G_rd_read_req_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; +} MPGA_G_rd_read_res_first_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; +} MPGA_G_rd_read_res_middle_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; +} MPGA_G_rd_read_res_last_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; +} MPGA_G_rd_read_res_only_t; + + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; +} MPGA_G_rd_ack_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_AETH_st IB_AETH; + IB_AtomicAckETH_st IB_AtomicAckETH; +} MPGA_G_rd_atomic_ack_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_AtomicETH_st IB_AtomicETH; +} MPGA_G_rd_CmpSwap_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; + IB_AtomicETH_st IB_AtomicETH; +} MPGA_G_rd_FetchAdd_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_RDETH_st IB_RDETH; + IB_DETH_st IB_DETH; +} MPGA_G_rd_resync_t; + +/* Unreliable Datagram */ + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_DETH_st IB_DETH; +} MPGA_G_ud_send_only_t; + +typedef struct { + IB_LRH_st IB_LRH; + IB_GRH_st IB_GRH; + IB_BTH_st IB_BTH; + IB_DETH_st IB_DETH; + IB_ImmDt_st IB_ImmDt; +} MPGA_G_ud_send_only_ImmDt_t; + + +typedef union { + + /*IBA LOCAL*/ + /* RC - Reliable Connection - opcode prefix 000*/ /* OpCode */ + MPGA_rc_send_first_t MPGA_rc_send_first; /* 00000 */ + MPGA_rc_send_middle_t MPGA_rc_send_middle; /* 00001 */ + MPGA_rc_send_last_t MPGA_rc_send_last; /* 00010 */ + MPGA_rc_send_last_ImmDt_t MPGA_rc_send_last_ImmDt; /* 00011 */ + MPGA_rc_send_only_t MPGA_rc_send_only; /* 00100 */ + MPGA_rc_send_only_ImmDt_t MPGA_rc_send_only_ImmDt; /* 00101 */ + + MPGA_rc_write_first_t MPGA_rc_write_first; /* 00110 */ + MPGA_rc_write_middle_t MPGA_rc_write_middle; /* 00111 */ + MPGA_rc_write_last_t MPGA_rc_write_last; /* 01000 */ + MPGA_rc_write_last_ImmDt_t MPGA_rc_write_last_ImmDt;/* 01001 */ + MPGA_rc_write_only_t MPGA_rc_write_only; /* 01010 */ + MPGA_rc_write_only_ImmDt_t MPGA_rc_write_only_ImmDt;/* 01011 */ + + MPGA_rc_read_req_t MPGA_rc_read_req; /* 01100 */ + MPGA_rc_read_res_first_t MPGA_rc_read_res_first; /* 01101 */ + MPGA_rc_read_res_middle_t MPGA_rc_read_res_middle; /* 01110 */ + MPGA_rc_read_res_last_t MPGA_rc_read_res_last; /* 01111 */ + MPGA_rc_read_res_only_t MPGA_rc_read_res_only; /* 10000 */ + + MPGA_rc_ack_t MPGA_rc_ack; /* 10001 */ + MPGA_rc_atomic_ack_t MPGA_rc_atomic_ack; /* 10010 */ + MPGA_rc_CmpSwap_t MPGA_rc_CmpSwap; /* 10011 */ + MPGA_rc_FetchAdd_t MPGA_rc_FetchAdd; /* 10100 */ + + /* UC - Unreliable Connection - opcode prefix 001*/ + MPGA_uc_send_first_t MPGA_uc_send_first; /* 00000 */ + MPGA_uc_send_middle_t MPGA_uc_send_middle; /* 00001 */ + MPGA_uc_send_last_t MPGA_uc_send_last; /* 00010 */ + MPGA_uc_send_last_ImmDt_t MPGA_uc_send_last_ImmDt; /* 00011 */ + MPGA_uc_send_only_t MPGA_uc_send_only; /* 00100 */ + MPGA_uc_send_only_ImmDt_t MPGA_uc_send_only_ImmDt; /* 00101 */ + + MPGA_uc_write_first_t MPGA_uc_write_first; /* 00110 */ + MPGA_uc_write_middle_t MPGA_uc_write_middle; /* 00111 */ + MPGA_uc_write_last_t MPGA_uc_write_last; /* 01000 */ + MPGA_uc_write_last_ImmDt_t MPGA_uc_write_last_ImmDt;/* 01001 */ + MPGA_uc_write_only_t MPGA_uc_write_only; /* 01010 */ + MPGA_uc_write_only_ImmDt_t MPGA_uc_write_only_ImmDt;/* 01011 */ + + /* RD - Reliable Datagram - opcode prefix 010*/ + MPGA_rd_send_first_t MPGA_rd_send_first; /* 00000 */ + MPGA_rd_send_middle_t MPGA_rd_send_middle; /* 00001 */ + MPGA_rd_send_last_t MPGA_rd_send_last; /* 00010 */ + MPGA_rd_send_last_ImmDt_t MPGA_rd_send_last_ImmDt; /* 00011 */ + MPGA_rd_send_only_t MPGA_rd_send_only; /* 00100 */ + MPGA_rd_send_only_ImmDt_t MPGA_rd_send_only_ImmDt; /* 00101 */ + + MPGA_rd_write_first_t MPGA_rd_write_first; /* 00110 */ + MPGA_rd_write_middle_t MPGA_rd_write_middle; /* 00111 */ + MPGA_rd_write_last_t MPGA_rd_write_last; /* 01000 */ + MPGA_rd_write_last_ImmDt_t MPGA_rd_write_last_ImmDt;/* 01001 */ + MPGA_rd_write_only_t MPGA_rd_write_only; /* 01010 */ + MPGA_rd_write_only_ImmDt_t MPGA_rd_write_only_ImmDt;/* 01011 */ + + MPGA_rd_read_req_t MPGA_rd_read_req; /* 01100 */ + MPGA_rd_read_res_first_t MPGA_rd_read_res_first; /* 01101 */ + MPGA_rd_read_res_middle_t MPGA_rd_read_res_middle; /* 01110 */ + MPGA_rd_read_res_last_t MPGA_rd_read_res_last; /* 01111 */ + MPGA_rd_read_res_only_t MPGA_rd_read_res_only; /* 10000 */ + + MPGA_rd_ack_t MPGA_rd_ack; /* 10001 */ + MPGA_rd_atomic_ack_t MPGA_rd_atomic_ack; /* 10010 */ + MPGA_rd_CmpSwap_t MPGA_rd_CmpSwap; /* 10011 */ + MPGA_rd_FetchAdd_t MPGA_rd_FetchAdd; /* 10100 */ + MPGA_rd_resync_t MPGA_rd_resync; /* 10101 */ + + /* UD - UnReliable Datagram - opcode prefix 011*/ + MPGA_ud_send_only_t MPGA_ud_send_only; /* 01010 */ + MPGA_ud_send_only_ImmDt_t MPGA_ud_send_only_ImmDt; /* 01011 */ + + + + /* IBA GLOBAL */ + /* RC - Reliable Connection - opcode prefix 000*/ /* OpCode */ + MPGA_G_rc_send_first_t MPGA_G_rc_send_first; /* 00000 */ + MPGA_G_rc_send_middle_t MPGA_G_rc_send_middle; /* 00001 */ + MPGA_G_rc_send_last_t MPGA_G_rc_send_last; /* 00010 */ + MPGA_G_rc_send_last_ImmDt_t MPGA_G_rc_send_last_ImmDt; /* 00011 */ + MPGA_G_rc_send_only_t MPGA_G_rc_send_only; /* 00100 */ + MPGA_G_rc_send_only_ImmDt_t MPGA_G_rc_send_only_ImmDt; /* 00101 */ + + MPGA_G_rc_write_first_t MPGA_G_rc_write_first; /* 00110 */ + MPGA_G_rc_write_middle_t MPGA_G_rc_write_middle; /* 00111 */ + MPGA_G_rc_write_last_t MPGA_G_rc_write_last; /* 01000 */ + MPGA_G_rc_write_last_ImmDt_t MPGA_G_rc_write_last_ImmDt;/* 01001 */ + MPGA_G_rc_write_only_t MPGA_G_rc_write_only; /* 01010 */ + MPGA_G_rc_write_only_ImmDt_t MPGA_G_rc_write_only_ImmDt;/* 01011 */ + + MPGA_G_rc_read_req_t MPGA_G_rc_read_req; /* 01100 */ + MPGA_G_rc_read_res_first_t MPGA_G_rc_read_res_first; /* 01101 */ + MPGA_G_rc_read_res_middle_t MPGA_G_rc_read_res_middle; /* 01110 */ + MPGA_G_rc_read_res_last_t MPGA_G_rc_read_res_last; /* 01111 */ + MPGA_G_rc_read_res_only_t MPGA_G_rc_read_res_only; /* 10000 */ + + MPGA_G_rc_ack_t MPGA_G_rc_ack; /* 10001 */ + MPGA_G_rc_atomic_ack_t MPGA_G_rc_atomic_ack; /* 10010 */ + MPGA_G_rc_CmpSwap_t MPGA_G_rc_CmpSwap; /* 10011 */ + MPGA_G_rc_FetchAdd_t MPGA_G_rc_FetchAdd; /* 10100 */ + + /* UC - Unreliable Connection - opcode prefix 001*/ + MPGA_G_uc_send_first_t MPGA_G_uc_send_first; /* 00000 */ + MPGA_G_uc_send_middle_t MPGA_G_uc_send_middle; /* 00001 */ + MPGA_G_uc_send_last_t MPGA_G_uc_send_last; /* 00010 */ + MPGA_G_uc_send_last_ImmDt_t MPGA_G_uc_send_last_ImmDt; /* 00011 */ + MPGA_G_uc_send_only_t MPGA_G_uc_send_only; /* 00100 */ + MPGA_G_uc_send_only_ImmDt_t MPGA_G_uc_send_only_ImmDt; /* 00101 */ + + MPGA_G_uc_write_first_t MPGA_G_uc_write_first; /* 00110 */ + MPGA_G_uc_write_middle_t MPGA_G_uc_write_middle; /* 00111 */ + MPGA_G_uc_write_last_t MPGA_G_uc_write_last; /* 01000 */ + MPGA_G_uc_write_last_ImmDt_t MPGA_G_uc_write_last_ImmDt;/* 01001 */ + MPGA_G_uc_write_only_t MPGA_G_uc_write_only; /* 01010 */ + MPGA_G_uc_write_only_ImmDt_t MPGA_G_uc_write_only_ImmDt;/* 01011 */ + + /* RD - Reliable Datagram - opcode prefix 010*/ + MPGA_G_rd_send_first_t MPGA_G_rd_send_first; /* 00000 */ + MPGA_G_rd_send_middle_t MPGA_G_rd_send_middle; /* 00001 */ + MPGA_G_rd_send_last_t MPGA_G_rd_send_last; /* 00010 */ + MPGA_G_rd_send_last_ImmDt_t MPGA_G_rd_send_last_ImmDt; /* 00011 */ + MPGA_G_rd_send_only_t MPGA_G_rd_send_only; /* 00100 */ + MPGA_G_rd_send_only_ImmDt_t MPGA_G_rd_send_only_ImmDt; /* 00101 */ + + MPGA_G_rd_write_first_t MPGA_G_rd_write_first; /* 00110 */ + MPGA_G_rd_write_middle_t MPGA_G_rd_write_middle; /* 00111 */ + MPGA_G_rd_write_last_t MPGA_G_rd_write_last; /* 01000 */ + MPGA_G_rd_write_last_ImmDt_t MPGA_G_rd_write_last_ImmDt;/* 01001 */ + MPGA_G_rd_write_only_t MPGA_G_rd_write_only; /* 01010 */ + MPGA_G_rd_write_only_ImmDt_t MPGA_G_rd_write_only_ImmDt;/* 01011 */ + + MPGA_G_rd_read_req_t MPGA_G_rd_read_req; /* 01100 */ + MPGA_G_rd_read_res_first_t MPGA_G_rd_read_res_first; /* 01101 */ + MPGA_G_rd_read_res_middle_t MPGA_G_rd_read_res_middle; /* 01110 */ + MPGA_G_rd_read_res_last_t MPGA_G_rd_read_res_last; /* 01111 */ + MPGA_G_rd_read_res_only_t MPGA_G_rd_read_res_only; /* 10000 */ + + MPGA_G_rd_ack_t MPGA_G_rd_ack; /* 10001 */ + MPGA_G_rd_atomic_ack_t MPGA_G_rd_atomic_ack; /* 10010 */ + MPGA_G_rd_CmpSwap_t MPGA_G_rd_CmpSwap; /* 10011 */ + MPGA_G_rd_FetchAdd_t MPGA_G_rd_FetchAdd; /* 10100 */ + MPGA_G_rd_resync_t MPGA_G_rd_resync; /* 10101 */ + + /* UD - UnReliable Datagram - opcode prefix 011*/ + MPGA_G_ud_send_only_t MPGA_G_ud_send_only; /* 01010 */ + MPGA_G_ud_send_only_ImmDt_t MPGA_G_ud_send_only_ImmDt; /* 01011 */ + +}MPGA_headers_t; + +#endif /* MPGA_headers_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/ib_opcodes.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/ib_opcodes.h new file mode 100644 index 00000000..03b9b27d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/ib_opcodes.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef __IB_OPCODES_H +#define __IB_OPCODES_H + +/***********************************************/ +/* Define all base transport OpCode fields */ +/***********************************************/ + +typedef u_int8_t IB_opcode_t; + +typedef enum{ + + IB_ST_RC = 0, /* Reliable Connection (RC). */ + + IB_ST_UC = 1, /* Unreliable Connection (UC). */ + + IB_ST_RD = 2, /* Reliable Datagram (RD). */ + + IB_ST_UD = 3 /* Unreliable Datagram (UD). */ + +} IB_service_type_t; +/***********************************************/ +/* reliable Connection (RC) */ +/***********************************************/ +#define RC_SEND_FIRST_OP 0x00 +#define RC_SEND_MIDDLE_OP 0x01 +#define RC_SEND_LAST_OP 0x02 +#define RC_SEND_LAST_W_IM_OP 0x03 +#define RC_SEND_ONLY_OP 0x04 +#define RC_SEND_ONLY_W_IM_OP 0x05 + +#define RC_WRITE_FIRST_OP 0x06 +#define RC_WRITE_MIDDLE_OP 0x07 +#define RC_WRITE_LAST_OP 0x08 +#define RC_WRITE_LAST_W_IM_OP 0x09 +#define RC_WRITE_ONLY_OP 0x0A +#define RC_WRITE_ONLY_W_IM_OP 0x0B + +#define RC_READ_REQ_OP 0x0C +#define RC_READ_RESP_FIRST_OP 0x0D +#define RC_READ_RESP_MIDDLE_OP 0x0E +#define RC_READ_RESP_LAST_OP 0x0F +#define RC_READ_RESP_ONLY_OP 0x10 + +#define RC_ACKNOWLEDGE_OP 0x11 +#define RC_ATOMIC_ACKNOWLEDGE_OP 0x12 + +#define RC_CMP_SWAP_OP 0x13 +#define RC_FETCH_ADD_OP 0x14 + +/***********************************************/ +/* Unreliable Connection (UC) */ +/***********************************************/ + +#define UC_SEND_FIRST_OP 0x20 +#define UC_SEND_MIDDLE_OP 0x21 +#define UC_SEND_LAST_OP 0x22 +#define UC_SEND_LAST_W_IM_OP 0x23 +#define UC_SEND_ONLY_OP 0x24 +#define UC_SEND_ONLY_W_IM_OP 0x25 + +#define UC_WRITE_FIRST_OP 0x26 +#define UC_WRITE_MIDDLE_OP 0x27 +#define UC_WRITE_LAST_OP 0x28 +#define UC_WRITE_LAST_W_IM_OP 0x29 +#define UC_WRITE_ONLY_OP 0x2A +#define UC_WRITE_ONLY_W_IM_OP 0x2B + +/***********************************************/ +/* Reliable Datagram (RD) */ +/***********************************************/ + +#define RD_SEND_FIRST_OP 0x40 +#define RD_SEND_MIDDLE_OP 0x41 +#define RD_SEND_LAST_OP 0x42 +#define RD_SEND_LAST_W_IM_OP 0x43 +#define RD_SEND_ONLY_OP 0x44 +#define RD_SEND_ONLY_W_IM_OP 0x45 + +#define RD_WRITE_FIRST_OP 0x46 +#define RD_WRITE_MIDDLE_OP 0x47 +#define RD_WRITE_LAST_OP 0x48 +#define RD_WRITE_LAST_W_IM_OP 0x49 +#define RD_WRITE_ONLY_OP 0x4A +#define RD_WRITE_ONLY_W_IM_OP 0x4B + +#define RD_READ_REQ_OP 0x4C +#define RD_READ_RESP_FIRST_OP 0x4D +#define RD_READ_RESP_MIDDLE_OP 0x4E +#define RD_READ_RESP_LAST_OP 0x4F +#define RD_READ_RESP_ONLY_OP 0x50 + +#define RD_ACKNOWLEDGE_OP 0x51 +#define RD_ATOMIC_ACKNOWLEDGE_OP 0x52 + +#define RD_CMP_SWAP_OP 0x53 +#define RD_FETCH_ADD_OP 0x54 + +/***********************************************/ +/* Unreliable Datagram (UD) */ +/***********************************************/ + +#define UD_SEND_ONLY_OP 0x64 +#define UD_SEND_ONLY_W_IM_OP 0x65 + + + +#endif /* __IB_OPCODES_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/internal_functions.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/internal_functions.c new file mode 100644 index 00000000..9cf39736 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/internal_functions.c @@ -0,0 +1,548 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef MT_KERNEL + + #include + #include + +#endif /* MT_KERNEL */ + +/* MPGA Includes */ +#include + +/* Layers Includes */ +#include +#ifdef __WIN__ +#include +#endif + + + +/*For the calc of the ICRC */ +static u_int32_t crc32_table[256] = { /* The Polynomial used is 0x04C11DB7 seed 0xFFFFFFFF */ +0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, +0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, +0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, +0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, +0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, +0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, +0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, +0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, +0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, +0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, +0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, +0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, +0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, +0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, +0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, +0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, +0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, +0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, +0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, +0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, +0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, +0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, +0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, +0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, +0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, +0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, +0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, +0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, +0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, +0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, +0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, +0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D +}; + +static u_int16_t crc16_table[256] = { /* The Polynomial used is 0x100B seed 0xFFFF */ + 0x0000, 0x1BA1, 0x3742, 0x2CE3, 0x6E84, 0x7525, 0x59C6, 0x4267, + 0xDD08, 0xC6A9, 0xEA4A, 0xF1EB, 0xB38C, 0xA82D, 0x84CE, 0x9F6F, + 0x1A01, 0x01A0, 0x2D43, 0x36E2, 0x7485, 0x6F24, 0x43C7, 0x5866, + 0xC709, 0xDCA8, 0xF04B, 0xEBEA, 0xA98D, 0xB22C, 0x9ECF, 0x856E, + 0x3402, 0x2FA3, 0x0340, 0x18E1, 0x5A86, 0x4127, 0x6DC4, 0x7665, + 0xE90A, 0xF2AB, 0xDE48, 0xC5E9, 0x878E, 0x9C2F, 0xB0CC, 0xAB6D, + 0x2E03, 0x35A2, 0x1941, 0x02E0, 0x4087, 0x5B26, 0x77C5, 0x6C64, + 0xF30B, 0xE8AA, 0xC449, 0xDFE8, 0x9D8F, 0x862E, 0xAACD, 0xB16C, + 0x6804, 0x73A5, 0x5F46, 0x44E7, 0x0680, 0x1D21, 0x31C2, 0x2A63, + 0xB50C, 0xAEAD, 0x824E, 0x99EF, 0xDB88, 0xC029, 0xECCA, 0xF76B, + 0x7205, 0x69A4, 0x4547, 0x5EE6, 0x1C81, 0x0720, 0x2BC3, 0x3062, + 0xAF0D, 0xB4AC, 0x984F, 0x83EE, 0xC189, 0xDA28, 0xF6CB, 0xED6A, + 0x5C06, 0x47A7, 0x6B44, 0x70E5, 0x3282, 0x2923, 0x05C0, 0x1E61, + 0x810E, 0x9AAF, 0xB64C, 0xADED, 0xEF8A, 0xF42B, 0xD8C8, 0xC369, + 0x4607, 0x5DA6, 0x7145, 0x6AE4, 0x2883, 0x3322, 0x1FC1, 0x0460, + 0x9B0F, 0x80AE, 0xAC4D, 0xB7EC, 0xF58B, 0xEE2A, 0xC2C9, 0xD968, + 0xD008, 0xCBA9, 0xE74A, 0xFCEB, 0xBE8C, 0xA52D, 0x89CE, 0x926F, + 0x0D00, 0x16A1, 0x3A42, 0x21E3, 0x6384, 0x7825, 0x54C6, 0x4F67, + 0xCA09, 0xD1A8, 0xFD4B, 0xE6EA, 0xA48D, 0xBF2C, 0x93CF, 0x886E, + 0x1701, 0x0CA0, 0x2043, 0x3BE2, 0x7985, 0x6224, 0x4EC7, 0x5566, + 0xE40A, 0xFFAB, 0xD348, 0xC8E9, 0x8A8E, 0x912F, 0xBDCC, 0xA66D, + 0x3902, 0x22A3, 0x0E40, 0x15E1, 0x5786, 0x4C27, 0x60C4, 0x7B65, + 0xFE0B, 0xE5AA, 0xC949, 0xD2E8, 0x908F, 0x8B2E, 0xA7CD, 0xBC6C, + 0x2303, 0x38A2, 0x1441, 0x0FE0, 0x4D87, 0x5626, 0x7AC5, 0x6164, + 0xB80C, 0xA3AD, 0x8F4E, 0x94EF, 0xD688, 0xCD29, 0xE1CA, 0xFA6B, + 0x6504, 0x7EA5, 0x5246, 0x49E7, 0x0B80, 0x1021, 0x3CC2, 0x2763, + 0xA20D, 0xB9AC, 0x954F, 0x8EEE, 0xCC89, 0xD728, 0xFBCB, 0xE06A, + 0x7F05, 0x64A4, 0x4847, 0x53E6, 0x1181, 0x0A20, 0x26C3, 0x3D62, + 0x8C0E, 0x97AF, 0xBB4C, 0xA0ED, 0xE28A, 0xF92B, 0xD5C8, 0xCE69, + 0x5106, 0x4AA7, 0x6644, 0x7DE5, 0x3F82, 0x2423, 0x08C0, 0x1361, + 0x960F, 0x8DAE, 0xA14D, 0xBAEC, 0xF88B, 0xE32A, 0xCFC9, 0xD468, + 0x4B07, 0x50A6, 0x7C45, 0x67E4, 0x2583, 0x3E22, 0x12C1, 0x0960 +}; + +/*static u_int8_t test_array[] = { +0x70, 0x12, 0x37, 0x5C, 0x00, 0x0E, 0x17, 0xD2, 0x0A, 0x20, 0x24, 0x87, +0x00, 0x87, 0xB1, 0xB3, 0x00, 0x0D, 0xEC, 0x2A, 0x01, 0x71, 0x0A, 0x1C, +0x01, 0x5D, 0x40, 0x02, 0x38, 0xF2, 0x7A, 0x05, 0x00, 0x00, 0x00, 0x0E, +0xBB, 0x88, 0x4D, 0x85, 0xFD, 0x5C, 0xFB, 0xA4, 0x72, 0x8B, 0xC0, 0x69, +0x0E, 0xD4, 0x00, 0x00 +};*/ + +/*static u_int8_t test_array2[] = { +0x70, 0x13, 0x37, 0x5C, 0x00, 0x18, 0x17, 0xD2, 0x60, 0x00, 0x00, 0x00, 0x00, 0x32, +0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x25, 0x00, 0x00, 0x00, 0x00, +0x00, 0x00, 0x00, 0x26, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x17, 0x00, 0x00, +0x00, 0x00, 0x00, 0x00, 0x00, 0x96, 0x0A, 0x20, 0x24, 0x87, 0x00, 0x87, 0xB1, 0xB3, +0x00, 0x0D, 0xEC, 0x2A, 0x01, 0x71, 0x0A, 0x1C, 0x01, 0x5D, 0x40, 0x02, 0x38, 0xF2, +0x7A, 0x05, 0x00, 0x00, 0x00, 0x0E, 0xBB, 0x88, 0x4D, 0x85, 0xFD, 0x5C, 0xFB, 0xA4, +0x72, 0x8B, 0xC0, 0x69, 0x0E, 0xD4, 0x00, 0x00 +};*/ + +/*static u_int8_t test_array_vcrc[] = { +0x70, 0x12, 0x37, 0x5C, 0x00, 0x0E, 0x17, 0xD2, 0x0A, 0x20, 0x24, 0x87, +0x00, 0x87, 0xB1, 0xB3, 0x00, 0x0D, 0xEC, 0x2A, 0x01, 0x71, 0x0A, 0x1C, +0x01, 0x5D, 0x40, 0x02, 0x38, 0xF2, 0x7A, 0x05, 0x00, 0x00, 0x00, 0x0E, +0xBB, 0x88, 0x4D, 0x85, 0xFD, 0x5C, 0xFB, 0xA4, 0x72, 0x8B, 0xC0, 0x69, +0x0E, 0xD4, 0x00, 0x00, 0x96, 0x25, 0xB7, 0x5A +};*/ +/***********************************************************************************/ +/* Allocate Packet */ +/***********************************************************************************/ +call_result_t +allocate_packet(u_int16_t payload_size, u_int16_t *payload_buf_p, + u_int16_t packet_size, u_int16_t **packet_buf_p) +{ + u_int8_t* temp_buffer_p,i; + u_int8_t* start_of_payload_p; + u_int8_t align; + + align = ((IBWORD - (payload_size % IBWORD)) % IBWORD); + if(packet_size < payload_size) return MT_EINVAL; + /*preventing the memcpy from writing on non alocated mem */ + + if((temp_buffer_p = ALLOCATE(u_int8_t,packet_size))== NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate temp_buffer_p"); + return(MT_EKMALLOC); + }; + + for(i=0;i < align ;i++) + { + *(temp_buffer_p + (packet_size) - (i + 1)) = 0x00; + /*Appending Zeros at the end of the packet to align to 4 byte long*/ + } + + start_of_payload_p = temp_buffer_p + (packet_size - payload_size - align ); + /*The address of the first byte in the packet payload*/ + MTL_TRACE('5', "\n the allocated mem is %p start payload is %p",temp_buffer_p,start_of_payload_p); + if(payload_buf_p != NULL){ + memcpy(start_of_payload_p,payload_buf_p, payload_size); + /*Coping the given buffer (payload) to the end of th buffer living place for the header*/ + } + + MTL_TRACE('5', "\n start of payload[0] inside = %d",start_of_payload_p[0]); + + (*packet_buf_p) = (u_int16_t *)start_of_payload_p;/*init the given pointer*/ + + MTL_TRACE('5', "\n *packet_buf[0] = %d",(*packet_buf_p)[0]); + MTL_TRACE('5', "\n packet_buf_p is %p\n",(*packet_buf_p)); + + return(MT_OK); +} + +/***********************************************************************************/ +/* Allocate Packet_LRH */ +/***********************************************************************************/ +call_result_t +allocate_packet_LRH(u_int16_t TCRC_packet_size, u_int16_t t_packet_size, + u_int16_t *t_packet_buf_p, u_int16_t packet_size, u_int16_t **packet_buf_p) +{ + u_int8_t* temp_buffer_p; + u_int8_t* end_of_LRH_p; + if(packet_size < TCRC_packet_size) return MT_EINVAL; + /*preventing the memcpy from writing on non alocated mem */ + + if((temp_buffer_p = ALLOCATE(u_int8_t,(packet_size)))== NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate temp_buffer_p"); + return(MT_EKMALLOC); + }; + + end_of_LRH_p = temp_buffer_p + (packet_size - TCRC_packet_size); + /*The address of the first byte in the packet payload*/ + + if(t_packet_buf_p != NULL){ + memcpy(end_of_LRH_p, t_packet_buf_p, t_packet_size); + /*Coping the given buffer (payload) to the end of th buffer living place for the header*/ + } + + (*packet_buf_p) = (u_int16_t *)end_of_LRH_p;/*init the given pointer*/ + + return(MT_OK); +} + +/***********************************************************************************/ +/* init packet struct */ +/***********************************************************************************/ +call_result_t +init_pkt_st(IB_PKT_st *pkt_st_p) +{ + pkt_st_p->lrh_st_p = NULL; + pkt_st_p->grh_st_p = NULL; + pkt_st_p->bth_st_p = NULL; + pkt_st_p->rdeth_st_p = NULL; + pkt_st_p->deth_st_p = NULL; + pkt_st_p->reth_st_p = NULL; + pkt_st_p->atomic_eth_st_p = NULL; + pkt_st_p->aeth_st_p = NULL; + pkt_st_p->atomic_acketh_st_p = NULL; + pkt_st_p->payload_buf = NULL; + pkt_st_p->payload_size = 0;/*Not a pointer like every one Not every one is perfect*/ + pkt_st_p->packet_size = 0; + return(MT_OK); +} + +/***********************************************************************************/ +/* is little endian */ +/***********************************************************************************/ +u_int8_t +is_little_endian() +{ + int* p_2_int; + u_int8_t* p_2_8_bit; + + p_2_int = ALLOCATE(int,1); + *p_2_int = 1; + p_2_8_bit =(u_int8_t *)p_2_int + (sizeof(int) - 1); /*higer byte*/ + + if(*p_2_8_bit == 1){ + MTL_TRACE('3', "\n\n\n\n ********** This is a big endian machine **********\n\n\n\n"); + FREE(p_2_int); + return(BIG_ENDIAN_TYPE); + }else{ + MTL_TRACE('3', "\n\n\n\n ********** This is a little endian machine **********\n\n\n\n"); + FREE(p_2_int); + return(LITTLE_ENDIAN_TYPE); + } +} + +/***********************************************************************************/ +/* little endain 16 */ +/***********************************************************************************/ +u_int16_t +little_endian_16(u_int8_t byte_0, u_int8_t byte_1) +{ + u_int8_t convert_arry[2]; + u_int16_t *p_2_16bit; + + convert_arry[0] = byte_0; + convert_arry[1] = byte_1; + + p_2_16bit = (u_int16_t*)convert_arry; + + return(*(p_2_16bit)); +} + +/***********************************************************************************/ +/* little endain 32 */ +/***********************************************************************************/ +u_int32_t +little_endian_32(u_int8_t byte_0, u_int8_t byte_1, u_int8_t byte_2, u_int8_t byte_3) +{ + u_int8_t convert_arry[4]; + u_int32_t *p_2_32bit; + + convert_arry[0] = byte_0; + convert_arry[1] = byte_1; + convert_arry[2] = byte_2; + convert_arry[3] = byte_3; + + p_2_32bit = (u_int32_t*)convert_arry; + + return(*(p_2_32bit)); +} + +/***********************************************************************************/ +/* little endain 64 */ +/***********************************************************************************/ +u_int64_t +little_endian_64(u_int8_t byte_0, u_int8_t byte_1, u_int8_t byte_2, u_int8_t byte_3, + u_int8_t byte_4, u_int8_t byte_5, u_int8_t byte_6, u_int8_t byte_7) +{ + u_int8_t convert_arry[8]; + u_int64_t *p_2_64bit; + + convert_arry[0] = byte_0; + convert_arry[1] = byte_1; + convert_arry[2] = byte_2; + convert_arry[3] = byte_3; + convert_arry[4] = byte_4; + convert_arry[5] = byte_5; + convert_arry[6] = byte_6; + convert_arry[7] = byte_7; + + p_2_64bit = (u_int64_t*)convert_arry; + + return(*(p_2_64bit)); +} + +/***********************************************************************************/ +/* fast calc ICRC */ +/***********************************************************************************/ +u_int32_t +fast_calc_ICRC(u_int16_t packet_size, u_int16_t *packet_buf_p,LNH_t LNH) +{ + u_int8_t *start_ICRC; + u_int8_t *packet_start; + u_int32_t ICRC; + +if((LNH != IBA_LOCAL) && (LNH != IBA_GLOBAL)) return (MT_OK); + /* This is not a IBA trans port (No need for ICRC*/ + + start_ICRC = (u_int8_t*)packet_buf_p +packet_size -VCRC_LEN -ICRC_LEN; + packet_start =(u_int8_t*) packet_buf_p; + + ICRC = update_ICRC((u_int8_t *)packet_start, (u_int16_t)(packet_size -VCRC_LEN -ICRC_LEN), LNH); + /*ICRC = update_ICRC(test_array1, 52);*/ + return(ICRC); +} + +/***********************************************************************************/ +/* Update ICRC */ +/***********************************************************************************/ +u_int32_t +update_ICRC(u_int8_t *byte, u_int16_t size,LNH_t LNH) /* size of the buffer in bytes*/ +{ + u_int8_t VL_mask = 0xF0; + u_int8_t TClass_mask = 0x0F; + u_int8_t reserved_mask = 0xFF; + u_int16_t index = 0; + u_int32_t ICRC = 0xFFFFFFFF; + + + + if(LNH == IBA_LOCAL){ + VL_mask = VL_mask | byte[0]; /*LRH field*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (VL_mask)]; /*masked 1111____*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[1]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[2]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[3]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[4]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[5]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[6]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[7]) ]; + /*BTH field*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[8]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[9]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[10]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[11]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ reserved_mask ]; /*masked 11111111*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[13]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[14]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[15]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[16]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[17]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[18]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[19]) ]; + + size = size - BTH_LEN - LRH_LEN; + index = BTH_LEN + LRH_LEN; + + while(size--) + { + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[index]) ]; + index ++; + } + }else{/*It is IBA Global*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; /*masking all the LRH field*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; /* 0 - 7 bytes*/ + + TClass_mask = TClass_mask | byte[8]; /*GRH field 40 byte*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (TClass_mask)]; /*masked _ _ _ _ 1111*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; /*byte 9 masking the FLow Lable*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; /*end of masking FLow Lable*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[12]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[13]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[14]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (0xFF) ]; /*Masking the HopLmt 1111111*/ + + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[16]) ]; /*SGID DGID*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[17]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[18]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[19]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[20]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[21]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[22]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[23]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[24]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[25]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[26]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[27]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[28]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[29]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[30]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[31]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[32]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[33]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[34]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[35]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[36]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[37]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[38]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[39]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[40]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[41]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[42]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[43]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[44]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[45]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[46]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[47]) ]; + /*BTH field 12 bytes*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[48]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[49]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[50]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[51]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (reserved_mask)];/*masking reserved field*/ + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[53]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[54]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[55]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[56]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[57]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[58]) ]; + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[59]) ]; + + size = size - LRH_LEN - GRH_LEN - BTH_LEN ; + index = LRH_LEN + GRH_LEN + BTH_LEN; + + while(size--) /*The rest of The packet*/ + { + ICRC = (ICRC >> 8) ^ crc32_table[(ICRC & 0xFF) ^ (byte[index]) ]; + index ++; + } + } + + return (ICRC ^ 0xFFFFFFFF); +} + + +/***********************************************************************************/ +/* fast calc VCRC */ +/***********************************************************************************/ +u_int16_t +fast_calc_VCRC(u_int16_t packet_size, u_int16_t *packet_buf_p) +{ + u_int8_t *start_VCRC; + u_int8_t *packet_start; + u_int16_t VCRC = 0 ; + + start_VCRC = (u_int8_t*)packet_buf_p +packet_size -VCRC_LEN ; + packet_start =(u_int8_t*) packet_buf_p; + + VCRC = update_VCRC((u_int8_t *)packet_start, (u_int16_t)(packet_size -VCRC_LEN)); + /*VCRC = update_VCRC(test_array_vcrc, 56);*/ + return(VCRC); +} +/***********************************************************************************/ +/* update VCRC */ +/***********************************************************************************/ +u_int16_t +update_VCRC(u_int8_t *byte, u_int16_t size) +{ + u_int16_t VCRC = 0xFFFF; /*VCRC SEED */ + + while( size-- ){ + VCRC = (VCRC >> 8) ^ crc16_table[(VCRC & 0xFF) ^ *byte++]; + } + + return(VCRC ^ 0xFFFF); +} + +/***********************************************************************************/ +/* Check ICRC */ +/***********************************************************************************/ +call_result_t +check_ICRC(IB_PKT_st *pkt_st_p, u_int16_t *packet_start_p) +{ + u_int32_t extracted_ICRC; + u_int32_t calc_ICRC; + u_int8_t* start_ICRC_p; + LNH_t LNH; + + LNH = (pkt_st_p->lrh_st_p)->LNH;/*For calc ICRC */ + start_ICRC_p =(u_int8_t*)(packet_start_p) + ((pkt_st_p->lrh_st_p)->PktLen) * IBWORD - ICRC_LEN; + + extract_ICRC((u_int16_t*)start_ICRC_p, &extracted_ICRC); + calc_ICRC = fast_calc_ICRC(pkt_st_p->packet_size, packet_start_p, LNH); +// calc_ICRC = __be32_to_cpu(calc_ICRC); + + if(calc_ICRC != extracted_ICRC){ + MTL_TRACE('1', "\n** ERROR extracted ICRC != calc ICRC **\n"); + return(MT_ERROR); + }else return(MT_OK); +} +/***********************************************************************************/ +/* Check VCRC */ +/***********************************************************************************/ +call_result_t +check_VCRC(IB_PKT_st *pkt_st_p, u_int16_t *packet_start_p) +{ + u_int16_t extracted_VCRC; + u_int16_t calc_VCRC; + u_int8_t* start_VCRC_p; + + start_VCRC_p =(u_int8_t*)(packet_start_p) + ((pkt_st_p->lrh_st_p)->PktLen) * IBWORD ; + + extract_VCRC((u_int16_t*)start_VCRC_p, &extracted_VCRC); + calc_VCRC = fast_calc_VCRC(pkt_st_p->packet_size, packet_start_p); +// calc_VCRC = __be16_to_cpu(calc_VCRC); + + if(calc_VCRC != extracted_VCRC){ + MTL_TRACE('1', "\n** ERROR extracted VCRC != calc VCRC **\n"); + return(MT_ERROR); + }else return(MT_OK); +} diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/internal_functions.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/internal_functions.h new file mode 100644 index 00000000..80fa2557 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/internal_functions.h @@ -0,0 +1,338 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_INTERNAL_FUNCTIONS_H +#define H_INTERNAL_FUNCTIONS_H + +/* Layers Includes */ +#ifdef VXWORKS_OS +#include +#endif /* VXWORKS_OS */ +#include +#include + +/* MPGA Includes */ +#include + + +#define LITTLE_ENDIAN_TYPE 0 +#define BIG_ENDIAN_TYPE 1 + +#define ALLOCATE(__type,__num) (__type *)INTR_MALLOC((__num)*sizeof(__type)) + +#define INSERTF(W,O1,F,O2,S) ( MT_INSERT32(W, MT_EXTRACT32(F, O2, S), O1, S) ) +#define IS_LITTLE_ENDIAN (is_little_endian()) +#define IS_BIG_ENDIAN (is_little_endian()) + + +/****************************************************************************** +* Function: allocate_packet +* +* Description: This function aloocate IB packets for the transport layer only. +* it allocates the buf acording to the packet size given , +* The function will make the malloc for the packet buffer. +* and will appdate the payload_buf_p. +* +* +* Parameters: +* payload_size(in) u_int16_t +* The size of the packet payload. +* payload_buf_p(in) u_int16_t * +* A pointer to the payload buffer. +* packet_size_p(out) u_int16_t * +* The full size of the packet (for transport layer). will update if need. +* packet_buf_p(out) u_int16_t ** +* A pointer to the packet pointer (will be allocated by the function. +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR . +* MT_EKMALLOC. could not allocate mem . +* +*****************************************************************************/ +call_result_t +allocate_packet(u_int16_t payload_size, u_int16_t *payload_buf_p, + u_int16_t packet_size, u_int16_t **packet_buf_p); + +/****************************************************************************** +* Function: allocate_packet_LRH +* +* Description: This function aloocate IB packets for the transport layer only. +* it allocates the buf acording to the packet size given , +* The function will make the malloc for the packet buffer. +* and will appdate the payload_buf_p. +* +* +* Parameters: +* TCRC_packet_size(in) u_int16_t +* The size of the Transport packet with the crc . +* t_packet_size(in) u_int16_t +* The size of the Transport packet with out the crc . +* t_packet_buf_p(in) u_int16_t * +* A pointer to the transport packet. +* packet_size(in) u_int16_t +* The full size of the packet with the LRH. +* packet_buf_p(out) u_int16_t ** +* A pointer to the packet pointer (will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR . +* MT_EKMALLOC. could not allocate mem . +* +*****************************************************************************/ +call_result_t +allocate_packet_LRH(u_int16_t TCRC_packet_size, u_int16_t t_packet_size, + u_int16_t *t_packet_buf_p, u_int16_t packet_size, u_int16_t **packet_buf_p); + +/****************************************************************************** +* Function: analyze_trans_packet +* +* Description: This function Analyze transport layer packets . +* and updates the needed structures acording to its content. +* +* Parameters: +* pkt_st_p(out) IB_Pkt_st * +* A pointer to a packet structure that will be update by the function. +* packet_buf_p(in) u_int16_t ** +* A pointer to the start of the packet (Must have LRH field) . +* +* NOTE : the function will allocate mem for the inside buffers +* and it is the user responsibility for free it. +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR +* +*****************************************************************************/ +call_result_t +analyze_trans_packet(IB_PKT_st *pkt_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** +* Function: is_it_little_endian +* +* Description: This function checks if the machine is a big or little endian. +* it will return IS_LITTLE_ENDAIN (0) if it is a little endain and +* IS_BIG_ENDAIN (1) if it is a big_endian machine. +* +* Parameters: +* +* Returns: +* u_int8_t: +* LITTLE_ENDAIN_TYPE (0). +* BIG_ENDAIN_TYPE (1). +* +*****************************************************************************/ +u_int8_t is_little_endian(void); + +/****************************************************************************** +* Function: little_endian_16 +* +* Description: This function convert Big endain implimintation to little. +* +* all The parametrs are: +* byte_x(in) u_int8_t +* The first byte is the LSB and so on +* +* Returns: +* u_int16_t: +******************************************************************************/ +u_int16_t +little_endian_16(u_int8_t byte_0, u_int8_t byte_1); + +/****************************************************************************** +* Function: little_endian_32 +* +* Description: This function convert Big endain implimintation to little. +* +* all The parametrs are: +* byte_x(in) u_int8_t +* The first byte is the LSB and so on +* +* Returns: +* u_int32_t: +******************************************************************************/ +u_int32_t +little_endian_32(u_int8_t byte_0, u_int8_t byte_1, u_int8_t byte_2, u_int8_t byte_3); + +/****************************************************************************** +* Function: little_endian_64 +* +* Description: This function convert Big endain implimintation to little. +* +* all The parametrs are: +* byte_x(in) u_int8_t +* The first byte is the LSB and so on +* +* Returns: +* u_int64_t: +******************************************************************************/ +u_int64_t +little_endian_64(u_int8_t byte_0, u_int8_t byte_1, u_int8_t byte_2, u_int8_t byte_3, + u_int8_t byte_4, u_int8_t byte_5, u_int8_t byte_6, u_int8_t byte_7); + +/****************************************************************************** +* Function: init pkt st (init packet struct) +* +* Description: This function inisilize the pkt_st members with null pointer. +* and zero for the packet size member. +* +* all The parametrs are: +* pkt_st_p(out) IB_PKY_st +* packet struct pointer . +* +* Returns: +* MT_OK +* MT_ERROR +******************************************************************************/ +call_result_t +init_pkt_st(IB_PKT_st *pkt_st_p); + +/****************************************************************************** +* Function: Fast calc ICRC +* +* Description: This function calculate the ICRC only if it is an IBA_GLOBAL +* or IBA_LOCAL packet . +* +* all The parametrs are: +* packet_size(in) u_int8_t +* The packet size . +* packet_buf_p(in) u_int16_t* +* Pointer to the start of the packet befor the LRH field +* LNH(in) LNH_t +* Packet kind IBA_GLOBAL LOCAL (RAW GRH) (RAW RWH) +* +* Returns: +* CALC ICRC (u_int32_t) +******************************************************************************/ +u_int32_t +fast_calc_ICRC(u_int16_t packet_size, u_int16_t *packet_buf_p,LNH_t LNH); + +/****************************************************************************** +* Function: Fast calc VCRC +* +* Description: This function calculate the VCRC of the IB Packet. +* +* all The parametrs are: +* packet_size(in) u_int8_t +* The packet size . +* packet_buf_p(in) u_int16_t* +* Pointer to the start of the packet befor the LRH field +* +* Returns: +* CALC VCRC (u_int16_t) +******************************************************************************/ +u_int16_t +fast_calc_VCRC(u_int16_t packet_size, u_int16_t *packet_buf_p); + +/****************************************************************************** +* Function: Update ICRC +* +* Description: This function is updating The ICRC using the crc32 . +* table for a fast calcculation. +* +* all The parametrs are: +* packet_size(in) u_int8_t +* The packet size . +* packet_buf_p(in) u_int16_t* +* Pointer to the start of the packet befor the LRH field +* LNH(in) LNH_t +* Packet kind IBA_GLOBAL LOCAL (RAW GRH) (RAW RWH) +* +* Returns: +* CALC ICRC (u_int32_t) +******************************************************************************/ +u_int32_t +update_ICRC(u_int8_t *byte, u_int16_t size, LNH_t LNH); + +/****************************************************************************** +* Function: Update VCRC +* +* Description: This function is updating The VCRC using the crc16 . +* table for a fast calcculation. +* +* all The parametrs are: +* packet_size(in) u_int8_t +* The packet size . +* packet_buf_p(in) u_int16_t* +* Pointer to the start of the packet befor the LRH field +* +* Returns: +* CALC VCRC (u_int16_t) +******************************************************************************/ +u_int16_t +update_VCRC(u_int8_t *byte, u_int16_t size); + +/****************************************************************************** +* Function: Cheak VCRC +* +* Description: This function is cheaking The VCRC using the crc16 . +* table for a fast calcculation and extract_VCRC function for compering. +* the 2 results. +* +* all The parametrs are: +* pkt_st_p(in) IB_PKT_ST * +* General struct packet . +* packet_start_p(in) u_int16_t* +* Pointer to the start of the packet befor the LRH field +* +* Returns: +* MT_OK results the same . +* MT_ERROR . +******************************************************************************/ +call_result_t +check_VCRC(IB_PKT_st *pkt_st_p, u_int16_t *packet_start_p); + +/****************************************************************************** +* Function: Cheak ICRC +* +* Description: This function is cheaking The ICRC using the crc16 . +* table for a fast calcculation and extract_ICRC function for compering. +* the 2 results. +* +* all The parametrs are: +* pkt_st_p(in) IB_PKT_ST * +* General struct packet . +* packet_start_p(in) u_int16_t* +* Pointer to the start of the packet befor the LRH field +* +* Returns: +* MT_OK +* MT_ERROR. +******************************************************************************/ +call_result_t +check_ICRC(IB_PKT_st *pkt_st_p, u_int16_t *packet_start_p); + +#endif /* internal function */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.c new file mode 100644 index 00000000..55cdb829 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.c @@ -0,0 +1,1215 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/************************************/ + + +#ifdef __LINUX__ +#ifdef MT_KERNEL +#include +//#include +#endif +#endif + + +#include +#include +#include +#include +#include + +/************************************/ + +#ifndef VXWORKS_OS +#ifdef __WIN__ +#define MLOCK(__buff, __size) 0 +#else + +#ifndef MT_KERNEL +#include /* for mlock function */ +#define MLOCK(__buff, __size) mlock(__buff, __size) +#else +#define MLOCK(__buff, __size) 0 +#endif + +#endif + +#else /* VXWORKS_OS */ +#define MLOCK(__buff, __size) 0 +#endif /* VXWORKS_OS */ + + +/*********************************************************************************/ +/* build packet with lrh */ +/*********************************************************************************/ +call_result_t +MPGA_build_pkt_lrh (IB_LRH_st *lrh_st_p, u_int16_t t_packet_size, void *t_packet_buf_vp, + u_int16_t *packet_size_p, void **packet_buf_vp,LNH_t LNH) +{ + u_int8_t *start_LRH_p; + u_int16_t TCRC_packet_size = 0;/*This arg will be send to the allocate function*/ + u_int32_t ICRC = 0; /*for making space for the crc fileds*/ + u_int16_t VCRC = 0; + u_int8_t *start_ICRC_p; + u_int8_t *start_VCRC_p; + u_int16_t **packet_buf_p; + u_int16_t *t_packet_buf_p; + u_int8_t align = 0; + + packet_buf_p = (u_int16_t**)packet_buf_vp; /*casting to u_int16_t */ + t_packet_buf_p = (u_int16_t*)t_packet_buf_vp; + + if(LNH == IBA_LOCAL) TCRC_packet_size = t_packet_size + ICRC_LEN + VCRC_LEN; + else{ + if(LNH == RAW) align = (4 - (t_packet_size % IBWORD)) % IBWORD; /*should be RAW packet at this stage*/ + TCRC_packet_size = t_packet_size + VCRC_LEN + align;/*for sending to the allocate functiom*/ + } + + (*packet_size_p) = TCRC_packet_size + LRH_LEN; /*CRC fields are included*/ + + + if((allocate_packet_LRH(TCRC_packet_size, t_packet_size, t_packet_buf_p, + *packet_size_p, packet_buf_p)) != MT_OK) return(MT_EKMALLOC); + /*packet_bup_p is a p2p*/ + +/*Update the fields in the given lrh struct*/ + lrh_st_p->LNH = (u_int8_t)LNH; + lrh_st_p->PktLen = (*packet_size_p - VCRC_LEN) / IBWORD; + /*from the firest byte of the LRH till the VCRC in 4 byte word*/ + lrh_st_p->reserved1 = 0; + lrh_st_p->reserved2 = 0; + + + start_LRH_p = (u_int8_t*)(*packet_buf_p) - LRH_LEN; + + start_LRH_p[0] = INSERTF(start_LRH_p[0],4,lrh_st_p->VL,0,4); + start_LRH_p[0] = INSERTF(start_LRH_p[0],0,lrh_st_p->LVer,0,4); + start_LRH_p[1] = INSERTF(start_LRH_p[1],4,lrh_st_p->SL,0,4); + start_LRH_p[1] = INSERTF(start_LRH_p[1],2,lrh_st_p->reserved1,0,2); + start_LRH_p[1] = INSERTF(start_LRH_p[1],0,lrh_st_p->LNH,0,2); + start_LRH_p[2] = INSERTF(start_LRH_p[2],0,lrh_st_p->DLID,8,8); + start_LRH_p[3] = INSERTF(start_LRH_p[3],0,lrh_st_p->DLID,0,8); + start_LRH_p[4] = INSERTF(start_LRH_p[4],3,lrh_st_p->reserved2,0,5); + start_LRH_p[4] = INSERTF(start_LRH_p[4],0,lrh_st_p->PktLen,8,3); + start_LRH_p[5] = INSERTF(start_LRH_p[5],0,lrh_st_p->PktLen,0,8); + start_LRH_p[6] = INSERTF(start_LRH_p[6],0,lrh_st_p->SLID,8,8); + start_LRH_p[7] = INSERTF(start_LRH_p[7],0,lrh_st_p->SLID,0,8); + + (*packet_buf_p) = (u_int16_t*)start_LRH_p; + + if(LNH == IBA_LOCAL){ /*appending the ICRC */ + start_ICRC_p = (u_int8_t*)start_LRH_p + LRH_LEN + t_packet_size; + ICRC = fast_calc_ICRC(*packet_size_p, *packet_buf_p, LNH); + append_ICRC((u_int16_t*)start_ICRC_p, ICRC); + } + + start_VCRC_p = (u_int8_t*)start_LRH_p + LRH_LEN + TCRC_packet_size -VCRC_LEN; + VCRC = fast_calc_VCRC(*packet_size_p, *packet_buf_p); /* appendinf the VCRC*/ + append_VCRC((u_int16_t*)start_VCRC_p, VCRC); + + return(MT_OK); +} + + +/*******************************************************************************/ +/* reliable send */ +/*******************************************************************************/ +call_result_t +MPGA_reliable_send(IB_BTH_st *bth_st_p, u_int16_t payload_size, void *payload_buf_vp, + u_int16_t *packet_size_p, void **packet_buf_vp, IB_pkt_place packet_place) +{ + return(MT_ENOSYS); +} + +/*******************************************************************************/ +/* reliable send only */ +/*******************************************************************************/ +call_result_t +MPGA_rc_send_only(IB_BTH_st *bth_st_p, u_int16_t payload_size, void *payload_buf_vp, + u_int16_t *packet_size_p, void **packet_buf_vp) +{ + u_int16_t header_size; + u_int16_t packet_size; + u_int16_t *payload_buf_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + payload_buf_p = (u_int16_t*)payload_buf_vp;/*casting the void to u_int16_t* ,data could be 4096B*/ + + header_size = RC_SEND_ONLY_LEN; /*init parameters*/ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD); + + /*Updating fields and given arguments*/ +(*packet_size_p) = packet_size; + bth_st_p->OpCode = RC_SEND_ONLY_OP; /*opcode is 00000100 */ + + if((allocate_packet(payload_size, payload_buf_p, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC); + + /*packet_bup_p is a p2p*/ + /*printf("\n in before append bth reliable packet_buf_p is %d",(*packet_buf_p));*/ + + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + return(MT_OK); +} + +/***********************************************************************************/ +/* reliable rdma write only */ +/***********************************************************************************/ +call_result_t +MPGA_rc_rdma_w_only(IB_BTH_st *bth_st_p, IB_RETH_st *reth_st_p, + u_int16_t payload_size, void *payload_buf_vp, + u_int16_t *packet_size_p, void **packet_buf_vp) +{ + u_int16_t header_size; + u_int16_t packet_size; + u_int16_t *payload_buf_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + payload_buf_p = (u_int16_t*)payload_buf_vp;/*casting the void to u_int16_t* ,data could be 4096B*/ + header_size = RC_WRITE_ONLY_LEN; /*init parametrs */ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD); + + (*packet_size_p) = packet_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_WRITE_ONLY_OP; /*opcode is 00001001 */ + + if((allocate_packet(payload_size, payload_buf_p, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC);/*packet_bup_p is a p2p*/ + + /*appending the wanted fields*/ + if((append_RETH (reth_st_p, packet_buf_p)) != MT_OK) return(MT_ERROR); + /*appending the reth field*/ + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + return(MT_OK); +} + + + +/***********************************************************************************/ +/* reliable rdma write first */ +/***********************************************************************************/ +call_result_t +MPGA_rc_rdma_w_first(IB_BTH_st *bth_st_p, IB_RETH_st *reth_st_p, + u_int16_t payload_size, void *payload_buf_vp, + u_int16_t *packet_size_p, void **packet_buf_vp) +{ + u_int16_t header_size; + u_int16_t packet_size; + u_int16_t *payload_buf_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + payload_buf_p = (u_int16_t*)payload_buf_vp;/*casting the void to u_int16_t* ,data could be 4096B*/ + header_size = RC_WRITE_FIRST_LEN; /*init parametrs */ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD); + + (*packet_size_p) = packet_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_WRITE_FIRST_OP; /*opcode is 00001001 */ + + if((allocate_packet(payload_size, payload_buf_p, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC);/*packet_bup_p is a p2p*/ + + /*appending the wanted fields*/ + if((append_RETH (reth_st_p, packet_buf_p)) != MT_OK) return(MT_ERROR); + /*appending the reth field*/ + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + return(MT_OK); +} + + + + +/***********************************************************************************/ +/* reliable rdma write middle */ +/***********************************************************************************/ +call_result_t +MPGA_rc_rdma_w_middle(IB_BTH_st *bth_st_p, u_int16_t payload_size, + void *payload_buf_vp, u_int16_t *packet_size_p, + void **packet_buf_vp) +{ + u_int16_t header_size; + u_int16_t packet_size; + u_int16_t *payload_buf_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + payload_buf_p = (u_int16_t*)payload_buf_vp;/*casting the void to u_int16_t* ,data could be 4096B*/ + header_size = RC_WRITE_MIDDLE_LEN; /*init parametrs */ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD); + + (*packet_size_p) = packet_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_WRITE_MIDDLE_OP; /*opcode is 00001001 */ + + if((allocate_packet(payload_size, payload_buf_p, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC);/*packet_bup_p is a p2p*/ + + /*appending the bth field */ + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + + + return(MT_OK); +} + + +/***********************************************************************************/ +/* reliable rdma write last */ +/***********************************************************************************/ +call_result_t +MPGA_rc_rdma_w_last(IB_BTH_st *bth_st_p, u_int16_t payload_size, + void *payload_buf_vp, u_int16_t *packet_size_p, + void **packet_buf_vp) +{ + u_int16_t header_size; + u_int16_t packet_size; + u_int16_t *payload_buf_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + payload_buf_p = (u_int16_t*)payload_buf_vp;/*casting the void to u_int16_t* ,data could be 4096B*/ + header_size = RC_WRITE_LAST_LEN; /*init parametrs */ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD); + + (*packet_size_p) = packet_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_WRITE_LAST_OP; /*opcode is 00001001 */ + + if((allocate_packet(payload_size, payload_buf_p, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC);/*packet_bup_p is a p2p*/ + + /*appending the bth field */ + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + + + return(MT_OK); +} + + +/***********************************************************************************/ +/* reliable rdma read request only */ +/***********************************************************************************/ +call_result_t +MPGA_rc_rdma_r_req(IB_BTH_st *bth_st_p, IB_RETH_st *reth_st_p, + u_int16_t *packet_size_p, void **packet_buf_vp) +{ + u_int16_t header_size; + u_int16_t packet_size; + u_int16_t payload_size = 0;/*For passing on to the functions*/ + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + header_size = RC_READ_REQ_LEN; /*init parametrs */ + packet_size = header_size; /*No payload in this packet*/ + + (*packet_size_p) = packet_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_READ_REQ_OP; /*opcode is 00001100 (overwrite)*/ + + if((allocate_packet(payload_size, NULL, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC); /*packet_bup_p is a p2p*/ + + if((append_RETH (reth_st_p, packet_buf_p)) != MT_OK) return(MT_ERROR); + /*appending the reth field*/ + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + return(MT_OK); +} + +/***********************************************************************************/ +/* reliable rdma read response (First Middle or Last) */ +/***********************************************************************************/ +call_result_t +MPGA_rc_rdma_r_resp(IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, + u_int16_t payload_size, void *payload_buf_vp, u_int16_t *packet_size_p, + void **packet_buf_vp, IB_pkt_place packet_place) +{ + u_int16_t header_size = 0; + u_int16_t packet_size = 0; + u_int16_t *payload_buf_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + payload_buf_p = (u_int16_t*)payload_buf_vp;/*casting the void to u_int16_t* ,data could be 4096B*/ + + switch(packet_place){ + case FIRST_PACKET: bth_st_p->OpCode = RC_READ_RESP_FIRST_OP; + header_size = RC_READ_RESP_FIRST_LEN; /*init parametrs */ + break; + case MIDDLE_PACKET: bth_st_p->OpCode = RC_READ_RESP_MIDDLE_OP; + header_size = RC_READ_RESP_MIDDLE_LEN; /*init parametrs */ + break; + case LAST_PACKET: bth_st_p->OpCode = RC_READ_RESP_LAST_OP; + header_size = RC_READ_RESP_LAST_LEN; /*init parametrs */ + break; + default: MTL_ERROR('1', "\nERROR (PLACE) IN rdma r resp\n"); + }; + + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD); + (*packet_size_p) = packet_size;/*Update given arg to the packet size with out LRH or GRH */ + + if((allocate_packet(payload_size, payload_buf_p, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC);/*packet_bup_p is a p2p*/ + + /*appending the wanted fields*/ + if(packet_place != MIDDLE_PACKET){ + if((append_AETH (aeth_st_p, packet_buf_p)) != MT_OK) return(MT_ERROR); + } + /*appending the reth field*/ + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + return(MT_OK); +} + +/***********************************************************************************/ +/* reliable rdma read response only */ +/***********************************************************************************/ +call_result_t +MPGA_rc_rdma_r_resp_only(IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, + u_int16_t payload_size, void *payload_buf_vp, + u_int16_t *packet_size_p, void **packet_buf_vp) +{ + u_int16_t header_size; + u_int16_t packet_size; + u_int16_t *payload_buf_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + payload_buf_p = (u_int16_t*)payload_buf_vp;/*casting the void to u_int16_t* ,data could be 4096B*/ + header_size = RC_READ_RESP_ONLY_LEN; /*init parametrs */ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD); + + (*packet_size_p) = packet_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_READ_RESP_ONLY_OP; /*opcode is 00001001 */ + + if((allocate_packet(payload_size, payload_buf_p, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC);/*packet_bup_p is a p2p*/ + + /*appending the wanted fields*/ + if((append_AETH (aeth_st_p, packet_buf_p)) != MT_OK) return(MT_ERROR); + /*appending the reth field*/ + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + return(MT_OK); +} + +/***********************************************************************************/ +/* unreliable Datagram send only */ +/***********************************************************************************/ +call_result_t +MPGA_ud_send_only(IB_BTH_st *bth_st_p, IB_DETH_st *deth_st_p, + u_int16_t payload_size, void *payload_buf_vp, + u_int16_t *packet_size_p, void **packet_buf_vp) +{ + u_int16_t header_size; + u_int16_t packet_size; + u_int16_t *payload_buf_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + payload_buf_p = (u_int16_t*)payload_buf_vp;/*casting the void to u_int16_t* ,data could be 4096B*/ + header_size = UD_SEND_ONLY_LEN; /*init parametrs */ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD); + + (*packet_size_p) = packet_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = UD_SEND_ONLY_OP; /*opcode is 01100100 */ + + if((allocate_packet(payload_size, payload_buf_p, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC);/*packet_bup_p is a p2p*/ + + /*appending the wanted fields*/ + if((append_DETH (deth_st_p, packet_buf_p)) != MT_OK) return(MT_ERROR); + /*appending the reth field*/ + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + return(MT_OK); +} + + + +/************************************************************************/ +/* Bulding headers only */ +/************************************************************************/ + + + +/*************************************************************************/ +/* fast RC send first */ +/*************************************************************************/ +call_result_t +MPGA_fast_rc_send_first(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p) +{ + u_int16_t header_size = 0, packet_size; + u_int8_t* temp_header_buff; + + if (LNH != IBA_LOCAL) return(MT_ENOSYS); + + header_size = RC_SEND_FIRST_LEN + LRH_LEN; + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_SEND_FIRST_OP; /*opcode is 01100100 */ + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p"); + return(MT_ENOMEM); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + + temp_header_buff += header_size; /* Building the header from end to start */ + + /*************appending the wanted fields**********************/ + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} +/*************************************************************************/ +/* fast RC send middle */ +/*************************************************************************/ +call_result_t +MPGA_fast_rc_send_middle(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p) +{ + u_int16_t header_size = 0, packet_size; + u_int8_t* temp_header_buff; + + if (LNH != IBA_LOCAL) return(MT_ENOSYS); + + header_size = RC_SEND_MIDDLE_LEN + LRH_LEN; + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_SEND_MIDDLE_OP; + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p"); + return(MT_ENOSYS); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + temp_header_buff += header_size; /* Building the header from end to start */ + + /*************appending the wanted fields**********************/ + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} +/*************************************************************************/ +/* fast RC send last */ +/*************************************************************************/ +call_result_t +MPGA_fast_rc_send_last(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p) +{ + u_int16_t header_size = 0, packet_size; + u_int8_t* temp_header_buff; + + if (LNH != IBA_LOCAL) return(MT_ENOSYS); + + header_size = RC_SEND_LAST_LEN + LRH_LEN; + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_SEND_LAST_OP; + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p"); + return(MT_EAGAIN); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + + temp_header_buff += header_size; /* Building the header from end to start */ + + /*************appending the wanted fields**********************/ + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} +/*************************************************************************/ +/* fast RC send only */ +/*************************************************************************/ +call_result_t +MPGA_fast_rc_send_only(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p) +{ + u_int16_t header_size = 0, packet_size; + u_int8_t* temp_header_buff; + + if (LNH != IBA_LOCAL) return(MT_EAGAIN); + + header_size = RC_SEND_ONLY_LEN + LRH_LEN; + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_SEND_ONLY_OP; /*opcode is 01100100 */ + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p"); + return(MT_ENOMEM); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + temp_header_buff += header_size; /* Building the header from end to start */ + + /*************appending the wanted fields**********************/ + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} +/*************************************************************************/ +/* fast RC RDMA read response first */ +/*************************************************************************/ +call_result_t +MPGA_fast_rc_read_resp_first(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, LNH_t LNH, + u_int16_t payload_size, u_int16_t *header_size_p, + void **header_buf_p) +{ + u_int16_t header_size = 0, packet_size; + u_int8_t* temp_header_buff; + + if (LNH != IBA_LOCAL) return(MT_ENOSYS); + + header_size = RC_READ_RESP_FIRST_LEN + LRH_LEN; + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_READ_RESP_FIRST_OP; + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p in rc read resp first"); + return(MT_ENOMEM); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + temp_header_buff += header_size; /* Building the header from end to start */ + + /*************appending the wanted fields**********************/ + if ((append_AETH (aeth_st_p, (u_int16_t**)&temp_header_buff)) != MT_OK) return(MT_ERROR); + /*appending the aeth field */ + + + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} +/*************************************************************************/ +/* fast RC RDMA read response middle */ +/*************************************************************************/ +call_result_t +MPGA_fast_rc_read_resp_middle(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p) +{ + u_int16_t header_size = 0, packet_size; + u_int8_t* temp_header_buff; + + if (LNH != IBA_LOCAL) return(MT_ENOSYS); + + header_size = RC_READ_RESP_MIDDLE_LEN + LRH_LEN; + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_READ_RESP_MIDDLE_OP; + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p in rc read resp middle"); + return(MT_ENOMEM); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + + temp_header_buff += header_size; /* Building the header from end to start */ + + /*************appending the wanted fields**********************/ + + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} +/*************************************************************************/ +/* fast RC RDMA read response last */ +/*************************************************************************/ +call_result_t +MPGA_fast_rc_read_resp_last(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, LNH_t LNH, + u_int16_t payload_size, u_int16_t *header_size_p, + void **header_buf_p) +{ + u_int16_t header_size = 0, packet_size; + u_int8_t* temp_header_buff; + + if (LNH != IBA_LOCAL) return(MT_ENOSYS); + + header_size = RC_READ_RESP_LAST_LEN + LRH_LEN; + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_READ_RESP_LAST_OP; + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p in rc read resp last"); + return(MT_ENOMEM); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + temp_header_buff += header_size; /* Building the header from end to start */ + + /*************appending the wanted fields**********************/ + if ((append_AETH (aeth_st_p, (u_int16_t**)&temp_header_buff)) != MT_OK) return(MT_ERROR); + /*appending the aeth field */ + + + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} +/*************************************************************************/ +/* fast RC RDMA read response only */ +/*************************************************************************/ +call_result_t +MPGA_fast_rc_read_resp_only(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, LNH_t LNH, + u_int16_t payload_size, u_int16_t *header_size_p, + void **header_buf_p) +{ + u_int16_t header_size = 0, packet_size; + u_int8_t* temp_header_buff; + + if (LNH != IBA_LOCAL) return(MT_ENOSYS); + + header_size = RC_READ_RESP_ONLY_LEN + LRH_LEN; + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_READ_RESP_ONLY_OP; + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p in rc read resp only"); + return(MT_ENOMEM); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + + temp_header_buff += header_size; /* Building the header from end to start */ + + /*************appending the wanted fields**********************/ + if ((append_AETH (aeth_st_p, (u_int16_t**)&temp_header_buff)) != MT_OK) return(MT_ERROR); + /*appending the aeth field */ + + + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} + +/*************************************************************************/ +/* fast RC ACKNOW */ +/*************************************************************************/ +call_result_t +MPGA_fast_rc_acknowledge(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, LNH_t LNH, + u_int16_t *header_size_p, void **header_buf_p) +{ + u_int16_t header_size = 0, packet_size; + u_int8_t* temp_header_buff; + u_int16_t payload_size = 0; /* NO payload in Acknowledge packet */ + + if (LNH != IBA_LOCAL) return(MT_ENOSYS); + + header_size = RC_ACKNOWLEDGE_LEN + LRH_LEN; /*UD is for transport only init parametrs */ + packet_size = header_size + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size;/*Update given arg to the packet size with out LRH or GRH */ + bth_st_p->OpCode = RC_ACKNOWLEDGE_OP; /*opcode is 01100100 */ + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p"); + return(MT_ENOMEM); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + temp_header_buff += header_size; + + /*************appending the wanted fields**********************/ + if ((append_AETH (aeth_st_p, (u_int16_t**)&temp_header_buff)) != MT_OK) return(MT_ERROR); + /*appending the aeth field*/ + + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} + +/*************************************************************************/ +/* fast UD packet send only */ +/*************************************************************************/ +call_result_t +MPGA_fast_ud_send_only(IB_LRH_st *lrh_st_p, IB_BTH_st *bth_st_p, + IB_DETH_st *deth_st_p, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p) +{ + u_int16_t header_size, packet_size; + u_int8_t* temp_header_buff; + LNH_t LNH; + + header_size = UD_SEND_ONLY_LEN + LRH_LEN; /*UD is for transport only init parametrs */ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size; + bth_st_p->OpCode = UD_SEND_ONLY_OP; + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p"); + return(MT_ENOMEM); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + + temp_header_buff += header_size; + + /*************appending the wanted fields**********************/ + if ((append_DETH (deth_st_p, (u_int16_t**)&temp_header_buff)) != MT_OK) return(MT_ERROR); + /*appending the deth field*/ + + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + LNH = IBA_LOCAL; + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + + return(MT_OK); +} + +/*************************************************************************/ +/* fast UD packet send only with grh */ +/*************************************************************************/ +call_result_t +MPGA_fast_ud_send_grh(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_DETH_st *deth_st_p, + u_int16_t payload_size, u_int16_t *header_size_p, + void **header_buf_p) +{ + u_int16_t header_size, packet_size; + u_int8_t* temp_header_buff; + LNH_t LNH; + + header_size = UD_SEND_ONLY_LEN + LRH_LEN + GRH_LEN; /*UD is for transport only init parametrs */ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD) + ICRC_LEN + VCRC_LEN; + + (*header_size_p) = header_size; + bth_st_p->OpCode = UD_SEND_ONLY_OP; /*opcode is 01100100 */ + + if ((temp_header_buff = ALLOCATE(u_int8_t,(header_size)))== NULL) + { /* Allocting size in bytes*/ + MTL_TRACE('5', "\nfailed to allocate temp_buffer_p"); + return(MT_ENOMEM); + }; + + if(MLOCK(temp_header_buff, header_size)){ + MTL_TRACE('5', "\nfailed to lock temp_head_buff"); + return(MT_ENOMEM); + }; + + + temp_header_buff += header_size; + + /*************appending the wanted fields**********************/ + if ((append_DETH (deth_st_p, (u_int16_t**)&temp_header_buff)) != MT_OK) return(MT_ERROR); + /*appending the deth field*/ + + if ((append_BTH (bth_st_p, (u_int16_t**)&temp_header_buff, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + if ((append_GRH (grh_st_p, packet_size, (u_int16_t**)&temp_header_buff )) != MT_OK) return(MT_ERROR); + + LNH = IBA_GLOBAL; + if ((append_LRH (lrh_st_p, packet_size, (u_int16_t**)&temp_header_buff, LNH)) != MT_OK) return(MT_ERROR); + /*appending the lrh field */ + + *header_buf_p = temp_header_buff; + + return(MT_OK); +} + +/***********************************************************************************/ +/* Analyze Packet */ +/***********************************************************************************/ +call_result_t +MPGA_analyze_packet(IB_PKT_st *pkt_st_p, void *packet_buf_vp) +{ + u_int16_t *packet_buf_p; + call_result_t return_val = MT_OK; + + packet_buf_p = (u_int16_t*)packet_buf_vp; + init_pkt_st(pkt_st_p);/*inisilize the given struct all the poiters to Null size 0*/ + + if((pkt_st_p->lrh_st_p = ALLOCATE(IB_LRH_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\n** ERROR failed to allocate pkt_st_p->lrh_st"); + return(MT_EKMALLOC); + }; + + MTL_TRACE('5', "\n Extracting lrh field"); + extract_LRH((pkt_st_p->lrh_st_p),&packet_buf_p); +/*Sendind start_packet_p and not packet_buf_p */ + + /*Init the pkt_st_p parameterers */ + pkt_st_p->packet_size = ((pkt_st_p->lrh_st_p)->PktLen * 4) + VCRC_LEN; + pkt_st_p->payload_size = ((pkt_st_p->lrh_st_p)->PktLen * 4) - ICRC_LEN - LRH_LEN; + +switch((pkt_st_p->lrh_st_p)->LNH){ + + case RAW: /* 0x0 |LRH|... (Etertype)*/ + MTL_TRACE('5', "\n Analayze RAW packet"); + (pkt_st_p->payload_buf) = packet_buf_p;/*Updating the pointer to the payload point NO GRH*/ + (pkt_st_p->payload_size) += ICRC_LEN; + if(check_VCRC(pkt_st_p, (u_int16_t*)packet_buf_vp)== MT_ERROR){ + return_val = MT_ERROR;/*Checking the VCRC*/ + } + break; + case IP_NON_IBA_TRANS: /* 0x1 |LRH|GRH|... */ + MTL_TRACE('5', "\n Analayze NON IBA packet"); + break; + case IBA_LOCAL: /* 0x2 |LRH|BTH|... */ + MTL_TRACE('5', "\n Analayze LOCAL packet"); + if(check_ICRC(pkt_st_p, (u_int16_t*)packet_buf_vp)== MT_ERROR){ + return_val = MT_ERROR;/*Checking the ICRC*/ + } + if(check_VCRC(pkt_st_p, (u_int16_t*)packet_buf_vp)== MT_ERROR){ + return_val = MT_ERROR;/*Checking the VCRC*/ + } + if((analyze_trans_packet(pkt_st_p, &packet_buf_p)) == MT_ERROR){ + return_val = MT_ERROR; + } + break; + case IBA_GLOBAL: /* 0x3 |LRH|GRH|BTH|... */ + MTL_TRACE('5', "\n Analayze GLOBAL packet"); + return_val = MT_ERROR; + break; + default: + MTL_ERROR('1', "\n ERROR case in analyze packet\n"); + return_val = MT_ERROR; + break; + } + + return(return_val); +} + +/***********************************************************************************/ +/* Analyze Transport Packet */ +/***********************************************************************************/ +call_result_t +analyze_trans_packet(IB_PKT_st *pkt_st_p, u_int16_t **packet_p) +{ + + if((pkt_st_p->bth_st_p = ALLOCATE(IB_BTH_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->bth_st"); + return(MT_EKMALLOC); + }; + MTL_TRACE('5', "\n Extracting the BTH field"); + extract_BTH((pkt_st_p->bth_st_p), packet_p); + (pkt_st_p->payload_size) -= BTH_LEN; + + switch((pkt_st_p->bth_st_p)->OpCode){ + + case RC_SEND_FIRST_OP: + case RC_SEND_MIDDLE_OP: + case RC_SEND_LAST_OP: + case RC_SEND_ONLY_OP: /*0x4 /BTH/pyload/ */ + (pkt_st_p->payload_buf) = *packet_p;/*Updating the pointer to the packet buf*/ + break; + case RC_WRITE_ONLY_OP: /*0xa /BTH/RETH/pyload/ */ + case RC_WRITE_FIRST_OP: + if((pkt_st_p->reth_st_p = ALLOCATE(IB_RETH_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->reth_st"); + return(MT_EKMALLOC); + }; + extract_RETH((pkt_st_p->reth_st_p),packet_p); + (pkt_st_p->payload_size) -= RETH_LEN; + (pkt_st_p->payload_buf) = *packet_p; + break; + + case RC_WRITE_LAST_W_IM_OP: + case RC_WRITE_ONLY_W_IM_OP: + if((pkt_st_p->reth_st_p = ALLOCATE(IB_RETH_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->reth_st"); + return(MT_EKMALLOC); + }; + extract_RETH((pkt_st_p->reth_st_p),packet_p); + (pkt_st_p->payload_size) -= RETH_LEN; + (pkt_st_p->payload_buf) = *packet_p; + + if((pkt_st_p->immdt_st_p = ALLOCATE(IB_ImmDt_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->IB_ImmDt_st"); + return(MT_EKMALLOC); + }; + extract_ImmDt((pkt_st_p->immdt_st_p),packet_p); + (pkt_st_p->payload_size) -= ImmDt_LEN; + (pkt_st_p->payload_buf) = *packet_p; + break; + + case RC_WRITE_LAST_OP: /* BTH */ + case RC_WRITE_MIDDLE_OP: + break; + case RC_SEND_ONLY_W_IM_OP: + case RC_SEND_LAST_W_IM_OP: + if((pkt_st_p->immdt_st_p = ALLOCATE(IB_ImmDt_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->IB_ImmDt_st"); + return(MT_EKMALLOC); + }; + extract_ImmDt((pkt_st_p->immdt_st_p),packet_p); + (pkt_st_p->payload_size) -= ImmDt_LEN; + (pkt_st_p->payload_buf) = *packet_p; + break; + + case RC_READ_REQ_OP: /*0xc /BTH/RETH/ */ + if((pkt_st_p->reth_st_p = ALLOCATE(IB_RETH_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->reth_st"); + return(MT_EKMALLOC); + }; + extract_RETH((pkt_st_p->reth_st_p),packet_p); + (pkt_st_p->payload_size) -= RETH_LEN; /* should be zero */ + /*No payload to this packet*/ + break; + + case RC_READ_RESP_FIRST_OP: + if((pkt_st_p->aeth_st_p = ALLOCATE(IB_AETH_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->aeth_st"); + return(MT_EKMALLOC); + }; + extract_AETH((pkt_st_p->aeth_st_p),packet_p); + (pkt_st_p->payload_size) -= AETH_LEN; + (pkt_st_p->payload_buf) = *packet_p; + break; + + case RC_READ_RESP_MIDDLE_OP: + (pkt_st_p->payload_buf) = *packet_p; + break; + + case RC_READ_RESP_LAST_OP: /*BTH/AETH/pyload*/ + if((pkt_st_p->aeth_st_p = ALLOCATE(IB_AETH_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->aeth_st"); + return(MT_EKMALLOC); + }; + extract_AETH((pkt_st_p->aeth_st_p),packet_p); + (pkt_st_p->payload_size) -= AETH_LEN; + (pkt_st_p->payload_buf) = *packet_p; + break; + + case RC_READ_RESP_ONLY_OP: /*0x10 /BTH/AETH/payload/ */ + if((pkt_st_p->aeth_st_p = ALLOCATE(IB_AETH_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->aeth_st"); + return(MT_EKMALLOC); + }; + extract_AETH((pkt_st_p->aeth_st_p),packet_p); + (pkt_st_p->payload_size) -= AETH_LEN; + (pkt_st_p->payload_buf) = *packet_p; + break; + + case RC_ACKNOWLEDGE_OP: + + if ((pkt_st_p->aeth_st_p = ALLOCATE(IB_AETH_st,1)) == NULL) + { /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->aeth_st"); + return(MT_EKMALLOC); + }; + extract_AETH((pkt_st_p->aeth_st_p),packet_p); + pkt_st_p->payload_size -= AETH_LEN; + (pkt_st_p->payload_buf) = *packet_p; + MTL_TRACE('5', "\n this is a ack packet PSN is 0x%X MSN is 0x%X\n",pkt_st_p->bth_st_p->PSN,pkt_st_p->aeth_st_p->MSN); + break; + + /****************************************************/ + /* unreliable data Gram UD */ + /****************************************************/ + case UD_SEND_ONLY_OP: /*0x64 /BTH/DETH/payload/ */ + if((pkt_st_p->deth_st_p = ALLOCATE(IB_DETH_st,1)) == NULL){ /* Allocting size in bytes*/ + MTL_ERROR('1', "\nfailed to allocate pkt_st_p->deth_st"); + return(MT_EKMALLOC); + } + extract_DETH((pkt_st_p->deth_st_p),packet_p); + (pkt_st_p->payload_size) -= DETH_LEN; + (pkt_st_p->payload_buf) = *packet_p; + break; + + /****************************************************/ + /* unreliable connection UC */ + /*****************************************************/ + + + default: + MTL_ERROR('1', "\n The Function does not support this kind of a packet\n"); + return(MT_ERROR); + break; + } + + return(MT_OK); +} + +#ifdef MT_KERNEL +#ifdef __WIN__ +int MPGA_init_module(void) +#else +int init_module(void) +#endif +{ + MTL_TRACE('1', "MPGA: loading module\n"); + return(0); +} + +#ifdef __WIN__ +void MPGA_cleanup_module(void) +#else +void cleanup_module(void) +#endif +{ + MTL_TRACE('1', "MPGA: removing module\n"); + return; +} + +#endif /* MT_KERNEL */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.h new file mode 100644 index 00000000..a164a29f --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga.h @@ -0,0 +1,872 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_PACKET_GEN_H +#define H_PACKET_GEN_H + +/* Layers Includes */ +#include + +/* MPGA Includes */ +#include +#include +#include + +/****************************************************************************** + * Function: MPGA_build_pkt_lrh (build pakcet with lrh field) + * + * Description: This function is appending LRH to IB packets . + * To use this function you must have a LRH struct, + * with all the detailes to create the wanted packet. + * The function should generate an IB packet . + * + * + * Parameters: + * lrh_st_p(*in) IB_LRH_st * + * Local route header of the generated packet. + * t_packet_size(in) u_int16_t + * The transport packet size in bytes. + * t_packet_buf_p(in) u_int16_t * + * A pointer to the transport packet buffer that the lrh will be appended on. + * packet_size_p(out) u_int16_t * + * A pointer to the size in bytes include the VCRC of the packet (will be calc by the func). + * sould be allocted be the user . + * packet_buf_p(out) void ** + * A pointer to the full packet . + * The function will allocate this buf and apdate the pointer. + * LNH(in) LNH_T + * Link Next Header Definition. + * * The LNH given will be placed on in the lrh_st_p->LNH field. + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +MPGA_build_pkt_lrh(IB_LRH_st *lrh_st_p, u_int16_t t_packet_size, void *t_packet_buf_p, + u_int16_t *packet_size_p, void **packet_buf_p, LNH_t LNH); + +/****************************************************************************** +* Function: reliable_send (First , Middle or Last) +* +* Description: This function generats IB packets for the transport layer only. +* it appends the BTH field to the given payload , +* The function will make the malloc for the packet buffer. +* and will update both packet_size_p and packet_buf_p +* +* +* Parameters: +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* payload_buf_p(in) void * +* A pointer to the payload buffer. +* packet_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* packet_buf_p(out) void ** +* A pointer to the packet pointer (will be allocated by the function). +* packet_place(in) IB_packet_place +* Indicate if it is a first middle or last packet send . +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* +*****************************************************************************/ +call_result_t +MPGA_reliable_send(IB_BTH_st *bth_st_p, u_int16_t payload_size, void *payload_buf_p, + u_int16_t *packet_size_p, void **packet_buf_p, IB_pkt_place packet_place); + +/****************************************************************************** +* Function: reliable_send_only (Send Only) +* +* Description: This function generats IB packets (reliable send only) +* for the transport layer only. +* it appends the BTH field to the given payload , +* The function will make the malloc for the packet buffer. +* and will update both packet_size_p and packet_buf_p +* +* +* Parameters: +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* payload_buf_p(in) void * +* A pointer to the payload buffer. +* packet_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* packet_buf_p(out) void ** +* A pointer to the packet pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* +*****************************************************************************/ +call_result_t +MPGA_rc_send_only(IB_BTH_st *bth_st_p, u_int16_t payload_size, + void *payload_buf_p, u_int16_t *packet_size_p, void **packet_buf_p); + +/****************************************************************************** +* Function: reliable_rdma_w_only +* +* Description: This function generats IB packets (reliable rdma write only) +* for the transport layer only. +* it appends the BTH and RETH field to the given payload , +* The function will make the malloc for the packet buffer. +* and will update both packet_size_p and packet_buf_p +* +* +* Parameters: +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* reth_st_p(in) IB_RETH_st * +* RDMA Extended trasport header . +* payload_size(in) u_int16_t +* The size of the packet payload. +* payload_buf_p(in) void * +* A pointer to the payload buffer. +* packet_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* packet_buf_p(out) void ** +* A pointer to the packet pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* +*****************************************************************************/ +call_result_t +MPGA_rc_rdma_w_only(IB_BTH_st *bth_st_p, IB_RETH_st *reth_st_p, + u_int16_t payload_size, void *payload_buf_p, + u_int16_t *packet_size_p, void **packet_buf_p); + +/****************************************************************************** +* Function: reliable_rdma_w_first +* +* Description: This function generats IB packets (reliable rdma write first) +* for the transport layer only. +* it appends the BTH and RETH field to the given payload , +* The function will make the malloc for the packet buffer. +* and will update both packet_size_p and packet_buf_p +* +* +* Parameters: +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* reth_st_p(in) IB_RETH_st * +* RDMA Extended trasport header . +* payload_size(in) u_int16_t +* The size of the packet payload. +* payload_buf_p(in) void * +* A pointer to the payload buffer. +* packet_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* packet_buf_p(out) void ** +* A pointer to the packet pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* +*****************************************************************************/ +call_result_t +MPGA_rc_rdma_w_first(IB_BTH_st *bth_st_p, IB_RETH_st *reth_st_p, + u_int16_t payload_size, void *payload_buf_p, + u_int16_t *packet_size_p, void **packet_buf_p); + +/****************************************************************************** +* Function: reliable_rdma_w_middle +* +* Description: This function generats IB packets (reliable rdma write middle) +* for the transport layer only. +* it appends the given payload to the BTH. +* The function will make the malloc for the packet buffer. +* and will update both packet_size_p and packet_buf_p +* +* +* Parameters: +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* payload_buf_p(in) void * +* A pointer to the payload buffer. +* packet_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* packet_buf_p(out) void ** +* A pointer to the packet pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* +*****************************************************************************/ +call_result_t +MPGA_rc_rdma_w_middle(IB_BTH_st *bth_st_p, u_int16_t payload_size, + void *payload_buf_p, u_int16_t *packet_size_p, + void **packet_buf_p); + +/****************************************************************************** +* Function: reliable_rdma_w_last +* +* Description: This function generats IB packets (reliable rdma write last) +* for the transport layer only. +* it appends the given payload to the BTH. +* The function will make the malloc for the packet buffer. +* and will update both packet_size_p and packet_buf_p +* +* +* Parameters: +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* payload_buf_p(in) void * +* A pointer to the payload buffer. +* packet_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* packet_buf_p(out) void ** +* A pointer to the packet pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* +*****************************************************************************/ +call_result_t +MPGA_rc_rdma_w_last(IB_BTH_st *bth_st_p, u_int16_t payload_size, + void *payload_buf_p, u_int16_t *packet_size_p, + void **packet_buf_p); + +/****************************************************************************** +* Function: reliable_rdma_read_request +* +* Description: This function generats IB packets (reliable rdma read request) +* for the transport layer only. +* it appends the BTH and RETH field to the given payload , +* +* +* Parameters: +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* reth_st_p(in) IB_RETH_st * +* RDMA Extended trasport header . +* packet_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* packet_buf_p(out) void ** +* A pointer to the packet pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR. +* +*****************************************************************************/ +call_result_t +MPGA_rc_rdma_r_req(IB_BTH_st *bth_st_p, IB_RETH_st *reth_st_p, + u_int16_t *packet_size_p, void **packet_buf_p); + +/****************************************************************************** + * Function: reliable_rdma_read_response (First Middle or Last) + * + * Description: This function generats IB packets (reliable rdma read response First Middle or Last) + * for the transport layer only. + * it appends the BTH and AETH (if needed) field to the given payload , + * The function will allocate the packet buffer. + * and will update both packet_size_p and packet_buf_p + * + * + * Parameters: + * bth_st_p(in) IB_BTH_st * + * Base transport header (no need for opcode field). + * aeth_st_p(in) IB_AETH_st * + * ACK Extended Transport Header + * payload_size(in) u_int16_t + * The size of the packet payload. + * payload_buf_p(in) void * + * A pointer to the payload buffer. + * packet_size_p(out) u_int16_t * + * A pointer to the size of the packet . + * packet_buf_p(out) void ** + * A pointer to the packet pointer(will be allocated by the function). + * packet_place(IN) IB_pkt_place (enum). + * FISRT_PACKET MIDDLE_PACKET LAST_PACKET (0,1,2). + * + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + * + ******************************************************************************/ +call_result_t +MPGA_rc_rdma_r_resp(IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, + u_int16_t payload_size, void *payload_buf_vp, u_int16_t *packet_size_p, + void **packet_buf_vp, IB_pkt_place packet_place); + +/****************************************************************************** +* Function: reliable_rdma_read_response_only +* +* Description: This function generats IB packets (reliable rdma read response only) +* for the transport layer only. +* it appends the BTH and AETH field to the given payload , +* The function will make the malloc for the packet buffer. +* and will update both packet_size_p and packet_buf_p +* +* +* Parameters: +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* aeth_st_p(in) IB_AETH_st * +* ACK Extended Transport Header +* payload_size(in) u_int16_t +* The size of the packet payload. +* payload_buf_p(in) void * +* A pointer to the payload buffer. +* packet_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* packet_buf_p(out) void ** +* A pointer to the packet pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* +*****************************************************************************/ +call_result_t +MPGA_rc_rdma_r_resp_only(IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, + u_int16_t payload_size, void *payload_buf_p, + u_int16_t *packet_size_p, void **packet_buf_p); + +/****************************************************************************** +* From this part the declaration of unreliable send IB packts functions +******************************************************************************/ + +/****************************************************************************** +* Function: unreliable_datagram_send_only (Send Only) +* +* Description: This function generats IB packets (unreliable datagram send only) +* for the transport layer only. +* it appends the BTH and DETH field to the given payload , +* The function will make the malloc for the packet buffer. +* and will update both packet_size_p and packet_buf_p +* +* Parameters: +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* deth_st_p(in) IB_DETH_st * +* Datagram Extended Transport Header +* payload_size(in) u_int16_t +* The size of the packet payload. +* payload_buf_p(in) void * +* A pointer to the payload buffer. +* packet_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* packet_buf_p(out) void ** +* A pointer to the packet pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +*****************************************************************************/ +call_result_t +MPGA_ud_send_only(IB_BTH_st *bth_st_p, IB_DETH_st *deth_st_p, + u_int16_t payload_size, void *payload_buf_p, + u_int16_t *packet_size_p, void **packet_buf_p); + + +/************************************************************************/ +/* Bulding headers only */ +/************************************************************************/ +/****************************************************************************** +* Function: unreliable_datagram_send_only (Send Only) +* +* Description: This function generats IB packets (unreliable datagram send only) +* for the transport layer and link layer. +* it will create the LRH BTH and DETH field to the given payload , +* The function will make the malloc for the header. +* and will update both packet_size_p and header_buf_p +* +* Parameters: +* lrh_st_p(in) IB_LRH_st * +* Local route header of the generated header. +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* deth_st_p(in) IB_DETH_st * +* Datagram Extended Transport Header +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_size_p(out) u_int16_t * +* A pointer to the size of the generated packet . +* header_buf_p(out) void ** +* A pointer to the header pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was send. +* MT_ENOSYS +*****************************************************************************/ +call_result_t +MPGA_fast_ud_send_only(IB_LRH_st *lrh_st_p, IB_BTH_st *bth_st_p, + IB_DETH_st *deth_st_p, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p); + + +/****************************************************************************** +* Function: unreliable_datagram_send_only (Send Only with grh) +* +* Description: This function generats IB packets (unreliable datagram send only) +* for the transport layer and link layer. +* it will create the LRH BTH and DETH field to the given payload , +* The function will make the malloc for the header. +* and will update both packet_size_p and header_buf_p +* +* Parameters: +* lrh_st_p(in) IB_LRH_st * +* Local route header of the generated header. +* grh_st_p(in) IB_GRH_st * +* Global route header of the generated header. +* bth_st_p(in) IB_BTH_st * +* Base transport header (no need for opcode field). +* deth_st_p(in) IB_DETH_st * +* Datagram Extended Transport Header +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_size_p(out) u_int16_t * +* A pointer to the size of the generated packet . +* header_buf_p(out) void ** +* A pointer to the header pointer(will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was send. +* MT_ENOSYS +*****************************************************************************/ +call_result_t +MPGA_fast_ud_send_grh(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_DETH_st *deth_st_p, + u_int16_t payload_size, u_int16_t *header_size_p, + void **header_buf_p); + +/****************************************************************************** +* Function: reliable_send (First) +* +* Description: This function generats IB packets header for the transport and link layers. +* it appends the LRH BTH field to the given header , +* The function will make the malloc for the packet buffer. +* and will update both header_size_p and header_buf_p +* +* +* Parameters: +* lrh_st_p(out) IB_LRH_st * +* local route header. +* grh_st_p(out) IB_GRH_st * +* global route header. (not supported yet). +* bth_st_p(out) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_buf_p(in) void * +* A pointer to the payload buffer. +* header_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* header_buf_p(out) void ** +* A pointer to the packet pointer (will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* MT_ENOSYS +* +*****************************************************************************/ +call_result_t +MPGA_fast_rc_send_first(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p); + +/****************************************************************************** +* Function: reliable_send (middle) +* +* Description: This function generats IB packets header for the transport and link layers. +* it appends the LRH BTH field to the given header , +* The function will make the malloc for the packet buffer. +* and will update both header_size_p and header_buf_p +* +* +* Parameters: +* lrh_st_p(out) IB_LRH_st * +* local route header. +* grh_st_p(out) IB_GRH_st * +* global route header. (not supported yet). +* bth_st_p(out) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_buf_p(in) void * +* A pointer to the payload buffer. +* header_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* header_buf_p(out) void ** +* A pointer to the packet pointer (will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* MT_ENOSYS +* +*****************************************************************************/ +call_result_t +MPGA_fast_rc_send_middle(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p); + +/****************************************************************************** +* Function: reliable_send (last) +* +* Description: This function generats IB packets header for the transport and link layers. +* it appends the LRH BTH field to the given header , +* The function will make the malloc for the packet buffer. +* and will update both header_size_p and header_buf_p +* +* +* Parameters: +* lrh_st_p(out) IB_LRH_st * +* local route header. +* grh_st_p(out) IB_GRH_st * +* global route header. (not supported yet). +* bth_st_p(out) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_buf_p(in) void * +* A pointer to the payload buffer. +* header_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* header_buf_p(out) void ** +* A pointer to the packet pointer (will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* MT_ENOSYS +* +*****************************************************************************/ +call_result_t +MPGA_fast_rc_send_last(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p); + + +/****************************************************************************** +* Function: reliable_send (only) +* +* Description: This function generats IB packets header for the transport and link layers. +* it appends the LRH BTH field to the given header , +* The function will make the malloc for the packet buffer. +* and will update both header_size_p and header_buf_p +* +* +* Parameters: +* lrh_st_p(out) IB_LRH_st * +* local route header. +* grh_st_p(out) IB_GRH_st * +* global route header. (not supported yet). +* bth_st_p(out) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_buf_p(in) void * +* A pointer to the payload buffer. +* header_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* header_buf_p(out) void ** +* A pointer to the packet pointer (will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* MT_ENOSYS +* +*****************************************************************************/ +call_result_t +MPGA_fast_rc_send_only(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p); + +/****************************************************************************** +* Function: reliable_c RDMA READ RESPONSE (First) +* +* Description: This function generats IB packets header for the transport and link layers. +* it appends the LRH BTH AETH field to the given header , +* The function will make the malloc for the packet buffer. +* and will update both header_size_p and header_buf_p +* +* +* Parameters: +* lrh_st_p(out) IB_LRH_st * +* local route header. +* grh_st_p(out) IB_GRH_st * +* global route header. (not supported yet). +* aeth_st_p(out) IB_AETH_st * +* Ack extended transport header. +* bth_st_p(out) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_buf_p(in) void * +* A pointer to the payload buffer. +* header_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* header_buf_p(out) void ** +* A pointer to the packet pointer (will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* MT_ENOSY not supported. +* +*****************************************************************************/ +call_result_t +MPGA_fast_rc_read_resp_first(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, LNH_t LNH, + u_int16_t payload_size, u_int16_t *header_size_p, + void **header_buf_p); + +/****************************************************************************** +* Function: reliable_c RDMA READ RESPONSE (middle) +* +* Description: This function generats IB packets header for the transport and link layers. +* it appends the LRH BTH AETH field to the given header , +* The function will make the malloc for the packet buffer. +* and will update both header_size_p and header_buf_p +* +* +* Parameters: +* lrh_st_p(out) IB_LRH_st * +* local route header. +* grh_st_p(out) IB_GRH_st * +* global route header. (not supported yet). +* bth_st_p(out) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_buf_p(in) void * +* A pointer to the payload buffer. +* header_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* header_buf_p(out) void ** +* A pointer to the packet pointer (will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* MT_ENOSY not supported. +* +*****************************************************************************/ +call_result_t +MPGA_fast_rc_read_resp_middle(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, LNH_t LNH, u_int16_t payload_size, + u_int16_t *header_size_p, void **header_buf_p); + +/****************************************************************************** +* Function: reliable_c RDMA READ RESPONSE (last) +* +* Description: This function generats IB packets header for the transport and link layers. +* it appends the LRH BTH AETH field to the given header , +* The function will make the malloc for the packet buffer. +* and will update both header_size_p and header_buf_p +* +* +* Parameters: +* lrh_st_p(out) IB_LRH_st * +* local route header. +* grh_st_p(out) IB_GRH_st * +* global route header. (not supported yet). +* aeth_st_p(out) IB_AETH_st * +* Ack extended transport header. +* bth_st_p(out) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_buf_p(in) void * +* A pointer to the payload buffer. +* header_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* header_buf_p(out) void ** +* A pointer to the packet pointer (will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* MT_ENOSY not supported. +* +*****************************************************************************/ +call_result_t +MPGA_fast_rc_read_resp_last(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, LNH_t LNH, + u_int16_t payload_size, u_int16_t *header_size_p, + void **header_buf_p); + +/****************************************************************************** +* Function: reliable_c RDMA READ RESPONSE (only) +* +* Description: This function generats IB packets header for the transport and link layers. +* it appends the LRH BTH AETH field to the given header , +* The function will make the malloc for the packet buffer. +* and will update both header_size_p and header_buf_p +* +* +* Parameters: +* lrh_st_p(out) IB_LRH_st * +* local route header. +* grh_st_p(out) IB_GRH_st * +* global route header. (not supported yet). +* aeth_st_p(out) IB_AETH_st * +* Ack extended transport header. +* bth_st_p(out) IB_BTH_st * +* Base transport header (no need for opcode field). +* payload_size(in) u_int16_t +* The size of the packet payload. +* header_buf_p(in) void * +* A pointer to the payload buffer. +* header_size_p(out) u_int16_t * +* A pointer to the size of the packet . +* header_buf_p(out) void ** +* A pointer to the packet pointer (will be allocated by the function). +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR if no packet was generated. +* MT_ENOSY not supported. +* +*****************************************************************************/ +call_result_t +MPGA_fast_rc_read_resp_only(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, LNH_t LNH, + u_int16_t payload_size, u_int16_t *header_size_p, + void **header_buf_p); + + + + +call_result_t +MPGA_fast_rc_acknowledge(IB_LRH_st *lrh_st_p, IB_GRH_st *grh_st_p, + IB_BTH_st *bth_st_p, IB_AETH_st *aeth_st_p, LNH_t LNH, + u_int16_t *header_size_p, void **header_buf_p); + + + + + + +/*****************************************************************************/ +/* Analyzer functions */ +/*****************************************************************************/ + +/****************************************************************************** +* Function: analyze_packet +* +* Description: This function Analyze IB packets . +* and updates the needed structures acording to its content. +* +* Parameters: +* pkt_st_p(out) IB_Pkt_st * +* A pointer to a packet structure that will be update by the function. +* packet_buf_p(in) void * +* A pointer to the start of the packet that have LRH field . +* +* NOTE : the function will allocate mem for the inside buffers +* and it is the user responsibility for free it. +* +* Returns: +* call_result_t +* MT_OK, +* MT_ERROR +* +*****************************************************************************/ +call_result_t +MPGA_analyze_packet(IB_PKT_st *pkt_st_p, void *packet_buf_p); + +/****************************************************************************** + * Function: Packet_generator + * + * Description: This function generats IB packets . + * To use this function you must have a general packet struct, + * with all the detailes to create the wanted packet. + * The function will make the malloc for the packet buffer. + * and will update both packet_size_p and packet_buf_p + * + * Parameters: + * struct(in) packet_fields + * A general packet struct. + * payload_size(in) int32_t + * The size of the packet payload. + * payload_buf_p(in) void + * A pointer to the payload buffer. + * packet_size_p(out) int32_t * + A pointer to the size of the packet . + * packet_buf_p(out) void + * A pointer to the full packet . + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +/*call_result_t packet_generator ("struct packet_fields", int32_t payload_size, + u_int8_t *payload_buf_p, int32_t *packet_size_p, + u_int8_t *packet_buf_p);*/ + + +#endif /* H_PACKET_GEN_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.c new file mode 100644 index 00000000..f29584c4 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/************************************/ +#ifndef MT_KERNEL + + #include + #include + +#endif /* MT_KERNEL */ + +#include + +#include +#include +#include +#include + +/* Layers Includes */ +#include + +/************************************/ + + +/*********************************************************************************/ +/* build packet with lrh */ +/*********************************************************************************/ +call_result_t +MPGA_build_pkt_lrh_sv (IB_LRH_st *lrh_st_p, u_int16_t t_packet_size, void *t_packet_buf_vp, + u_int16_t *packet_size_p, void **packet_buf_vp,LNH_t LNH, struct MPGA_error_st error_st) +{ + u_int8_t *start_LRH_p; + u_int16_t TCRC_packet_size = 0;/*This arg will be send to the allocate function*/ + u_int32_t ICRC = 0, tmpICRC = 0; /*for making space for the crc fileds*/ + u_int16_t VCRC = 0, tmpVCRC = 0; + u_int8_t *start_ICRC_p; + u_int8_t *start_VCRC_p; + u_int16_t **packet_buf_p; + u_int16_t *t_packet_buf_p; + u_int8_t align = 0; + + + packet_buf_p = (u_int16_t**)packet_buf_vp; /*casting to u_int16_t */ + t_packet_buf_p = (u_int16_t*)t_packet_buf_vp; + + if(LNH == IBA_LOCAL) TCRC_packet_size = t_packet_size + ICRC_LEN + VCRC_LEN; + else{ + if(LNH == RAW) align = (4 - (t_packet_size % IBWORD)) % IBWORD; /*should be a RAW packet at this stage*/ + TCRC_packet_size = t_packet_size + VCRC_LEN + align;/*for sending to the allocate functiom*/ + } + + (*packet_size_p) = TCRC_packet_size + LRH_LEN; /*CRC fields are included*/ + + + if((allocate_packet_LRH(TCRC_packet_size, t_packet_size, t_packet_buf_p, + *packet_size_p, packet_buf_p)) != MT_OK) return(MT_EKMALLOC); + /*packet_bup_p is a p2p*/ + +/*Update the fields in the given lrh struct*/ + lrh_st_p->LNH = (u_int8_t)LNH; + lrh_st_p->PktLen = (*packet_size_p - VCRC_LEN) / IBWORD; + /*from the firest byte of the LRH till the VCRC in 4 byte word*/ + lrh_st_p->reserved1 = 0; + lrh_st_p->reserved2 = 0; + + /*************************** random error **********************/ + srand((unsigned int)time((time_t *)NULL)); + + if(error_st.LNH == YES) lrh_st_p->LNH = (LNH + 1) % 4; /* LNH can be 0,1,2,3*/ + if(error_st.PktLen_long == YES) lrh_st_p->PktLen += (u_int16_t)((rand() % lrh_st_p->PktLen) + 1); + if(error_st.PktLen_short == YES) lrh_st_p->PktLen -= (u_int16_t)((rand() % (lrh_st_p->PktLen - 1)) + 1); + if(error_st.LVer == YES) lrh_st_p->LVer = rand() % MAX_LVer + 1; /* not zero (1-15)*/ + else lrh_st_p->LVer = 0; + if(error_st.VL == YES) lrh_st_p->VL = (rand() % 7) + 8; /* 8 to 14 */ + else lrh_st_p->VL = 7; + if(error_st.LNH == YES) lrh_st_p->LNH = ((rand() % 3 + 1) + LNH) % 4;/* every num 0 to 3 but not LNH*/ + if(error_st.lrh_reserved1 == YES) lrh_st_p->reserved1 = (rand() % 4); + if(error_st.lrh_reserved2 == YES) lrh_st_p->reserved2 = (rand() % 4); + /*****************************************************************/ + + start_LRH_p = (u_int8_t*)(*packet_buf_p) - LRH_LEN; + + start_LRH_p[0] = INSERTF(start_LRH_p[0],4,lrh_st_p->VL,0,4); + start_LRH_p[0] = INSERTF(start_LRH_p[0],0,lrh_st_p->LVer,0,4); + start_LRH_p[1] = INSERTF(start_LRH_p[1],4,lrh_st_p->SL,0,4); + start_LRH_p[1] = INSERTF(start_LRH_p[1],2,lrh_st_p->reserved1,0,2); + start_LRH_p[1] = INSERTF(start_LRH_p[1],0,lrh_st_p->LNH,0,2); + start_LRH_p[2] = INSERTF(start_LRH_p[2],0,lrh_st_p->DLID,8,8); + start_LRH_p[3] = INSERTF(start_LRH_p[3],0,lrh_st_p->DLID,0,8); + start_LRH_p[4] = INSERTF(start_LRH_p[4],3,lrh_st_p->reserved2,0,5); + start_LRH_p[4] = INSERTF(start_LRH_p[4],0,lrh_st_p->PktLen,8,3); + start_LRH_p[5] = INSERTF(start_LRH_p[5],0,lrh_st_p->PktLen,0,8); + start_LRH_p[6] = INSERTF(start_LRH_p[6],0,lrh_st_p->SLID,8,8); + start_LRH_p[7] = INSERTF(start_LRH_p[7],0,lrh_st_p->SLID,0,8); + + (*packet_buf_p) = (u_int16_t*)start_LRH_p; + /* calc ICRC */ + if(LNH == IBA_LOCAL){ /*appending the ICRC */ + start_ICRC_p = (u_int8_t*)start_LRH_p + LRH_LEN + t_packet_size; + ICRC = fast_calc_ICRC(*packet_size_p, *packet_buf_p, LNH); + /* rand error*/ + if(error_st.ICRC_IN == YES){ + tmpICRC = ((rand()% 0xFFFFFFFE) + 1); /* IRCR will be higer or lower*/ + tmpICRC != ICRC ? ICRC = tmpICRC : ICRC++; + } + if(error_st.ICRC_OUT == YES) start_LRH_p[1] += 4; /*reserved will be chaged*/ + append_ICRC((u_int16_t*)start_ICRC_p, ICRC); + } + + /* calc VCRC */ + start_VCRC_p = (u_int8_t*)start_LRH_p + LRH_LEN + TCRC_packet_size -VCRC_LEN; + VCRC = fast_calc_VCRC(*packet_size_p, *packet_buf_p); /* appendinf the VCRC*/ + /* rand error*/ + if(error_st.VCRC_IN == YES){ + tmpVCRC += (u_int16_t)((rand()% 0xFFFE) + 1); /* VRCR will be higer or lower*/ + tmpVCRC != VCRC ? VCRC = tmpVCRC : VCRC++; + } + if(error_st.VCRC_OUT == YES) start_LRH_p[1] += 4; /*reserved will be changed*/ + append_VCRC((u_int16_t*)start_VCRC_p, VCRC); + + return(MT_OK); +} + +/*******************************************************************************/ +/* reliable send only */ +/*******************************************************************************/ +call_result_t +MPGA_rc_send_only_sv(IB_BTH_st *bth_st_p, u_int16_t payload_size, void *payload_buf_vp, + u_int16_t *packet_size_p, void **packet_buf_vp, struct MPGA_error_st error_st) +{ + u_int16_t header_size; + u_int16_t packet_size; + u_int16_t *payload_buf_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; + payload_buf_p = (u_int16_t*)payload_buf_vp;/*casting the void to u_int16_t* ,data could be 4096B*/ + + header_size = RC_SEND_ONLY_LEN; /*init parameters*/ + packet_size = header_size + payload_size + ((IBWORD - (payload_size % IBWORD)) % IBWORD); + + /*************************************/ + /* random error */ + /*************************************/ + if(error_st.PktLen_not_align == YES){ + packet_size += (rand() % 3) + 1; + error_st.PktLen_long = YES; + } + + /*Updating fields and given arguments*/ +(*packet_size_p) = packet_size; + bth_st_p->OpCode = RC_SEND_ONLY_OP; /*opcode is 00000100 */ + + if((allocate_packet(payload_size, payload_buf_p, packet_size, packet_buf_p)) != MT_OK) + return(MT_EKMALLOC); + + /*packet_bup_p is a p2p*/ + /*printf("\n in before append bth reliable packet_buf_p is %d",(*packet_buf_p));*/ + + if((append_BTH (bth_st_p, packet_buf_p, payload_size)) != MT_OK) return(MT_ERROR); + /*appending the bth field */ + + return(MT_OK); +} diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.h new file mode 100644 index 00000000..7df67c18 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/mpga_sv.h @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _MPGA_SV_H +#define _MPGA_SV_H + +/* Layers Includes */ +#include + +/* MPGA Includes */ +#include +#include +#include + +/*****************************************************************************/ +/* This Struct defines the ERROR generator in the mpga lib */ +/*****************************************************************************/ +#define MAX_LVer 0xf + +typedef enum{ + YES = 0, + NO = 1 +} mpga_error_gen; + +typedef struct MPGA_error_st MPGA_error_st; + +struct MPGA_error_st{ + mpga_error_gen PktLen_short; + mpga_error_gen PktLen_long; + mpga_error_gen PktLen_not_align; + mpga_error_gen LVer; /* we support only Lver = 0*/ + mpga_error_gen VL; + mpga_error_gen VCRC_IN; + mpga_error_gen VCRC_OUT; + mpga_error_gen ICRC_IN; + mpga_error_gen ICRC_OUT; + + + mpga_error_gen LNH; /* only tca will drop*/ + mpga_error_gen lrh_reserved1; + mpga_error_gen lrh_reserved2; + mpga_error_gen PadCnt; + mpga_error_gen TVer; + mpga_error_gen bth_reserved1; + mpga_error_gen bth_reserved2; + mpga_error_gen PSN; + + mpga_error_gen BAD_10_bit; +}; + +/****************************************************************************** + * Function: MPGA_build_pkt_lrh_sv (build pakcet with lrh field for sv) + * + * Description: This function is appending LRH to IB packets . + * To use this function you must have a LRH struct, + * with all the detailes to create the wanted packet. + * The function should generate an IB packet . + * + * + * Parameters: + * lrh_st_p(*in) IB_LRH_st * + * Local route header of the generated packet. + * t_packet_size(in) u_int16_t + * The transport packet size in bytes. + * t_packet_buf_p(in) u_int16_t * + * A pointer to the transport packet buffer that the lrh will be appended on. + * packet_size_p(out) u_int16_t * + * A pointer to the size of the packet (will be calc by the func). + * sould be allocted be the user . + * packet_buf_p(out) void ** + * A pointer to the full packet . + * The function will allocate this buf and apdate the pointer. + * LNH(in) LNH_T + * Link Next Header Definition. + * * The LNH given will be placed on in the lrh_st_p->LNH field. + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +MPGA_build_pkt_lrh_sv(IB_LRH_st *lrh_st_p, u_int16_t t_packet_size, void *t_packet_buf_p, + u_int16_t *packet_size_p, void **packet_buf_p, LNH_t LNH, struct MPGA_error_st error_st); + +/****************************************************************************** + * * Function: reliable_send_only_sv (Send Only) + * * + * * Description: This function generats IB packets (reliable send only) + * * for the transport layer only. + * * it appends the BTH field to the given payload , + * * The function will make the malloc for the packet buffer. + * * and will appdate both packet_size_p and packet_buf_p + * * + * * + * * Parameters: + * * bth_st_p(in) IB_BTH_st * + * * Base transport header (no need for opcode field). + * * payload_size(in) u_int16_t + * * The size of the packet payload. + * * payload_buf_p(in) void * + * * A pointer to the payload buffer. + * * packet_size_p(out) u_int16_t * + * * A pointer to the size of the packet . + * * packet_buf_p(out) void ** + * * A pointer to the packet pointer(will be allocated by the function). + * * + * * Returns: + * * call_result_t + * * MT_OK, + * * MT_ERROR if no packet was generated. + * * + * *****************************************************************************/ +call_result_t +MPGA_rc_send_only_sv(IB_BTH_st *bth_st_p, u_int16_t payload_size, + void *payload_buf_p, u_int16_t *packet_size_p, + void **packet_buf_p, struct MPGA_error_st error_st); +#endif + /* _MPGA_SV_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA.c new file mode 100644 index 00000000..af5fb4e2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA.c @@ -0,0 +1,1420 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "nMPGA.h" +#include "nMPGA_packet_append.h" +#include + +#undef MT_BIT_OFFSET +#define MT_BIT_OFFSET(struct_ancore,reg_path) \ + ((MT_offset_t) &( ((struct struct_ancore *)(0))-> reg_path )) +#undef MT_BIT_SIZE +#define MT_BIT_SIZE(struct_ancore,reg_path) \ + ((MT_size_t) sizeof( ((struct struct_ancore *)(0))-> reg_path )) +#undef MT_BIT_OFFSET_SIZE +#define MT_BIT_OFFSET_SIZE(struct_ancore,reg_path) \ + MT_BIT_OFFSET(struct_ancore,reg_path) , MT_BIT_SIZE(struct_ancore,reg_path) + +call_result_t +MPGA_get_headers_size(IB_opcode_t opcode, + LNH_t LNH, + u_int16_t payload_len, + MT_bool icrc, /*if set - icrc exist*/ + MT_bool vcrc, /*if set - vcrc exist*/ + u_int16_t *packet_len) /*(OUT) packet length in bytes*/ +{ + *packet_len=0; + switch (LNH) + { + case RAW: /* |LRH|... (Etertype)*/ + MTL_ERROR1("%s: Unsupported LNH (%d)\n", __func__, LNH); + return(MT_ERROR); + case IP_NON_IBA_TRANS: /* |LRH|GRH|... */ + MTL_ERROR1("%s: Unsupported LNH (%d)\n", __func__, LNH); + return(MT_ERROR); + case IBA_LOCAL: /* |LRH|BTH|... */ + break; + case IBA_GLOBAL: /* |LRH|GRH|BTH|... */ + *packet_len = GRH_LEN; + break; + default: + MTL_ERROR1("%s: Invalid LNH (%d)\n", __func__, LNH); + return(MT_ERROR); + break; + + } + + *packet_len += payload_len + ( icrc ? ICRC_LEN : 0 ) + ( vcrc ? VCRC_LEN : 0 ); + + switch (opcode) + { + + /***********************************************/ + /* reliable Connection (RC) */ + /***********************************************/ + + case RC_SEND_FIRST_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case RC_SEND_MIDDLE_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case RC_SEND_LAST_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case RC_SEND_LAST_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+ImmDt_LEN; + return(MT_OK); + + case RC_SEND_ONLY_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case RC_SEND_ONLY_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+ImmDt_LEN; + return(MT_OK); + + case RC_WRITE_FIRST_OP: + *packet_len+=LRH_LEN+BTH_LEN+RETH_LEN; + return(MT_OK); + + case RC_WRITE_MIDDLE_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case RC_WRITE_LAST_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case RC_WRITE_LAST_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+ImmDt_LEN; + return(MT_OK); + + case RC_WRITE_ONLY_OP: + *packet_len+=LRH_LEN+BTH_LEN+RETH_LEN; + return(MT_OK); + + case RC_WRITE_ONLY_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+RETH_LEN+ImmDt_LEN; + return(MT_OK); + + case RC_READ_REQ_OP: + *packet_len+=LRH_LEN+BTH_LEN+RETH_LEN; + return(MT_OK); + + case RC_READ_RESP_FIRST_OP: + *packet_len+=LRH_LEN+BTH_LEN+AETH_LEN; + return(MT_OK); + + case RC_READ_RESP_MIDDLE_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case RC_READ_RESP_LAST_OP: + *packet_len+=LRH_LEN+BTH_LEN+AETH_LEN; + return(MT_OK); + + case RC_READ_RESP_ONLY_OP: + *packet_len+=LRH_LEN+BTH_LEN+AETH_LEN; + return(MT_OK); + + case RC_ACKNOWLEDGE_OP: + *packet_len+=LRH_LEN+BTH_LEN+AETH_LEN; + return(MT_OK); + + case RC_ATOMIC_ACKNOWLEDGE_OP: + *packet_len+=LRH_LEN+BTH_LEN+AETH_LEN+AtomAETH_LEN; + return(MT_OK); + + case RC_CMP_SWAP_OP: + *packet_len+=LRH_LEN+BTH_LEN+AtomETH_LEN; + return(MT_OK); + + case RC_FETCH_ADD_OP: + *packet_len+=LRH_LEN+BTH_LEN+AtomETH_LEN; + return(MT_OK); + +/***********************************************/ +/* Unreliable Connection (UC) */ +/***********************************************/ + + case UC_SEND_FIRST_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case UC_SEND_MIDDLE_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case UC_SEND_LAST_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case UC_SEND_LAST_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+ImmDt_LEN; + return(MT_OK); + + case UC_SEND_ONLY_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case UC_SEND_ONLY_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+ImmDt_LEN; + return(MT_OK); + + case UC_WRITE_FIRST_OP: + *packet_len+=LRH_LEN+BTH_LEN+RETH_LEN; + return(MT_OK); + + case UC_WRITE_MIDDLE_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case UC_WRITE_LAST_OP: + *packet_len+=LRH_LEN+BTH_LEN; + return(MT_OK); + + case UC_WRITE_LAST_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+ImmDt_LEN; + return(MT_OK); + + case UC_WRITE_ONLY_OP: + *packet_len+=LRH_LEN+BTH_LEN+RETH_LEN; + return(MT_OK); + + case UC_WRITE_ONLY_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+RETH_LEN+ImmDt_LEN; + return(MT_OK); + +/***********************************************/ +/* Reliable Datagram (RD) */ +/***********************************************/ + + case RD_SEND_FIRST_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN; + return(MT_OK); + + case RD_SEND_MIDDLE_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN; + return(MT_OK); + + case RD_SEND_LAST_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN; + return(MT_OK); + + case RD_SEND_LAST_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+ImmDt_LEN; + return(MT_OK); + + case RD_SEND_ONLY_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN; + return(MT_OK); + + case RD_SEND_ONLY_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+ImmDt_LEN; + return(MT_OK); + + case RD_WRITE_FIRST_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN; + return(MT_OK); + + case RD_WRITE_MIDDLE_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN; + return(MT_OK); + + case RD_WRITE_LAST_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN; + return(MT_OK); + + case RD_WRITE_LAST_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+ImmDt_LEN; + return(MT_OK); + + case RD_WRITE_ONLY_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN; + return(MT_OK); + + case RD_WRITE_ONLY_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN+ImmDt_LEN; + return(MT_OK); + + case RD_READ_REQ_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN; + return(MT_OK); + + case RD_READ_RESP_FIRST_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+AETH_LEN; + return(MT_OK); + + case RD_READ_RESP_MIDDLE_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN; + return(MT_OK); + + case RD_READ_RESP_LAST_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+AETH_LEN; + return(MT_OK); + + case RD_READ_RESP_ONLY_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+AETH_LEN; + return(MT_OK); + + case RD_ACKNOWLEDGE_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+AETH_LEN; + return(MT_OK); + + case RD_ATOMIC_ACKNOWLEDGE_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+AETH_LEN+AtomAETH_LEN; + return(MT_OK); + + case RD_CMP_SWAP_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+AtomETH_LEN; + return(MT_OK); + + case RD_FETCH_ADD_OP: + *packet_len+=LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+AtomETH_LEN; + return(MT_OK); + +/***********************************************/ +/* Unreliable Datagram (UD) */ +/***********************************************/ + + case UD_SEND_ONLY_OP: + *packet_len+=LRH_LEN+BTH_LEN+DETH_LEN; + return(MT_OK); + case UD_SEND_ONLY_W_IM_OP: + *packet_len+=LRH_LEN+BTH_LEN+DETH_LEN+ImmDt_LEN; + return(MT_OK); + default: + MTL_ERROR1("%s: Invalid Opcode (%d)\n", __func__, opcode); + return(MT_ERROR); + break; + + } +} + +call_result_t +MPGA_make_fast(MPGA_headers_t *MPGA_headers_p, + LNH_t LNH, + u_int16_t payload_size, + u_int8_t **packet_p_p) +{ IB_LRH_st *LRH=NULL; + IB_BTH_st *BTH=NULL; + u_int16_t packet_len; + + /* Calculating headers position*/ + LRH = (IB_LRH_st*)MPGA_headers_p; + BTH = (IB_BTH_st*)MPGA_headers_p + (LNH == IBA_GLOBAL ? GRH_LEN : 0); + + LRH->LNH = (u_int8_t)LNH; + /* Calculating pad_count*/ + BTH->PadCnt = (IBWORD-payload_size%IBWORD)%IBWORD; + /* Calculating pkt_len*/ + MPGA_get_headers_size(BTH->OpCode,LNH,0,TRUE,FALSE,&packet_len); + LRH->PktLen = (packet_len + payload_size + BTH->PadCnt) / IBWORD; + + return(MPGA_make_headers(MPGA_headers_p,BTH->OpCode,LNH,FALSE,FALSE,packet_p_p)); + +} + + +call_result_t +MPGA_make_headers(MPGA_headers_t *MPGA_headers_p, /*pointer to a headers union*/ + IB_opcode_t opcode, + LNH_t LNH, + MT_bool CRC, + u_int16_t payload_size, + u_int8_t **packet_p_p) /* pointer to packet buffer*/ + +{ + u_int8_t *start_ICRC; + u_int16_t packet_len; + u_int8_t *packet_p; + + packet_p=*packet_p_p; + if ((LNH!=IBA_LOCAL) && (LNH!=IBA_GLOBAL)) return(MT_ERROR); /*only IBA_LOCAL and IBA_GLOBAL are supported by now*/ + if (CRC && (!payload_size)) return(MT_ERROR); /*payload_size must be provided if CRC append is asked*/ + if (CRC && (LNH!=IBA_LOCAL && LNH!=IBA_GLOBAL)) return(MT_ERROR); /*CRC calculation is supported only with IBA_LOCAL or IBA_GLOBAL*/ + else start_ICRC = packet_p + payload_size; + + switch (LNH) + { + case IBA_LOCAL: + switch (opcode) + { + + /***********************************************/ + /* reliable Connection (RC) */ + /***********************************************/ + + case RC_SEND_FIRST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_send_first.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_send_first.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case RC_SEND_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_send_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_send_middle.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case RC_SEND_LAST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_send_last.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_send_last.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case RC_SEND_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_rc_send_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_send_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_send_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case RC_SEND_ONLY_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_send_only.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_send_only.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case RC_SEND_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_rc_send_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_send_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_send_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case RC_WRITE_FIRST_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_rc_write_first.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_write_first.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_write_first.IB_LRH),packet_p-RETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RETH_LEN); + break; + + case RC_WRITE_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_write_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_write_middle.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case RC_WRITE_LAST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_write_last.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_write_last.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case RC_WRITE_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_rc_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_write_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_write_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case RC_WRITE_ONLY_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_rc_write_only.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_write_only.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_write_only.IB_LRH),packet_p-RETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RETH_LEN); + break; + + case RC_WRITE_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_rc_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_rc_write_only.IB_RETH),packet_p-ImmDt_LEN-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_write_only.IB_BTH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_write_only.IB_LRH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RETH_LEN+ImmDt_LEN); + break; + + case RC_READ_REQ_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_rc_read_req.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_read_req.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_read_req.IB_LRH),packet_p-RETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RETH_LEN); + break; + + case RC_READ_RESP_FIRST_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_rc_read_res_first.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_read_res_first.IB_BTH),packet_p-AETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_read_res_first.IB_LRH),packet_p-AETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RC_READ_RESP_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_read_res_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_read_res_middle.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RC_READ_RESP_LAST_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_rc_read_res_last.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_read_res_last.IB_BTH),packet_p-AETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_read_res_last.IB_LRH),packet_p-AETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RC_READ_RESP_ONLY_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_rc_read_res_only.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_read_res_only.IB_BTH),packet_p-AETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_read_res_only.IB_LRH),packet_p-AETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RC_ACKNOWLEDGE_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_rc_ack.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_ack.IB_BTH),packet_p-AETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_ack.IB_LRH),packet_p-AETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+AETH_LEN); + break; + + + case RC_ATOMIC_ACKNOWLEDGE_OP: + MTL_ERROR1("%s: Unsupported packet type(opcode) (%d)\n", __func__, opcode); + return(MT_ERROR); + + case RC_CMP_SWAP_OP: + MTL_ERROR1("%s: Unsupported packet type(opcode) (%d)\n", __func__, opcode); + return(MT_ERROR); + + case RC_FETCH_ADD_OP: + MTL_ERROR1("%s: Unsupported packet type(opcode) (%d)\n", __func__, opcode); + return(MT_ERROR); + +/***********************************************/ +/* Unreliable Connection (UC) */ +/***********************************************/ + case UC_SEND_FIRST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_send_first.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_send_first.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case UC_SEND_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_send_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_send_middle.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case UC_SEND_LAST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_send_last.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_send_last.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case UC_SEND_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_uc_send_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_send_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_send_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case UC_SEND_ONLY_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_send_only.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_send_only.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case UC_SEND_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_uc_send_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_send_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_send_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case UC_WRITE_FIRST_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_uc_write_first.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_write_first.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_write_first.IB_LRH),packet_p-RETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RETH_LEN); + break; + + case UC_WRITE_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_write_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_write_middle.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case UC_WRITE_LAST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_write_last.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_write_last.IB_LRH),packet_p-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN); + break; + + case UC_WRITE_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_uc_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_write_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_write_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case UC_WRITE_ONLY_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_uc_write_only.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_write_only.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_write_only.IB_LRH),packet_p-RETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RETH_LEN); + break; + + case UC_WRITE_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_uc_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_uc_write_only.IB_RETH),packet_p-ImmDt_LEN-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_uc_write_only.IB_BTH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_uc_write_only.IB_LRH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RETH_LEN+ImmDt_LEN); + break; + +/***********************************************/ +/* Reliable Datagram (RD) */ +/***********************************************/ + + case RD_SEND_FIRST_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_send_first.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_send_first.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_send_first.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_send_first.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_SEND_MIDDLE_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_send_middle.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_send_middle.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_send_middle.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_send_middle.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_SEND_LAST_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_send_last.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_send_last.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_send_last.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_send_last.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_SEND_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_rd_send_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_send_last_ImmDt.IB_DETH),packet_p-ImmDt_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_send_last_ImmDt.IB_RDETH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_send_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_send_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+ImmDt_LEN); + break; + + case RD_SEND_ONLY_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_send_only.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_send_only.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_send_only.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_send_only.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_SEND_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_rd_send_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_send_only_ImmDt.IB_DETH),packet_p-ImmDt_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_send_only_ImmDt.IB_RDETH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_send_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_send_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+ImmDt_LEN); + break; + + case RD_WRITE_FIRST_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_rd_write_first.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_write_first.IB_DETH),packet_p-RETH_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_write_first.IB_RDETH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_write_first.IB_BTH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_write_first.IB_LRH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN); + break; + + case RD_WRITE_MIDDLE_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_write_middle.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_write_middle.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_write_middle.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_write_middle.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_WRITE_LAST_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_write_last.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_write_last.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_write_last.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_write_last.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_WRITE_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_rd_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_write_last_ImmDt.IB_DETH),packet_p-ImmDt_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_write_last_ImmDt.IB_RDETH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_write_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_write_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+ImmDt_LEN); + break; + + case RD_WRITE_ONLY_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_write_only.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_write_only.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_write_only.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-RETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_write_only.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-RETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN); + break; + + case RD_WRITE_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_rd_write_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_rd_write_only_ImmDt.IB_RETH),packet_p-ImmDt_LEN-RETH_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_write_only_ImmDt.IB_DETH),packet_p-ImmDt_LEN-RETH_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_write_only_ImmDt.IB_RDETH),packet_p-ImmDt_LEN-RETH_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_write_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_write_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN+ImmDt_LEN); + break; + + case RD_READ_REQ_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_rd_read_req.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_rd_read_req.IB_DETH),packet_p-RETH_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_read_req.IB_RDETH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_read_req.IB_BTH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_read_req.IB_LRH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RETH_LEN); + break; + + case RD_READ_RESP_FIRST_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_rd_read_res_first.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_read_res_first.IB_RDETH),packet_p-AETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_read_res_first.IB_BTH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_read_res_first.IB_LRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RD_READ_RESP_MIDDLE_OP: + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_read_res_first.IB_RDETH),packet_p-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_read_res_middle.IB_BTH),packet_p-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_read_res_middle.IB_LRH),packet_p-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RD_READ_RESP_LAST_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_rc_read_res_last.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_read_res_first.IB_RDETH),packet_p-AETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_read_res_last.IB_BTH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_read_res_last.IB_LRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RD_READ_RESP_ONLY_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_rc_read_res_only.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_read_res_first.IB_RDETH),packet_p-AETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rc_read_res_only.IB_BTH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rc_read_res_only.IB_LRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RD_ACKNOWLEDGE_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_rd_ack.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_rd_ack.IB_RDETH),packet_p-AETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_rd_ack.IB_BTH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_rd_ack.IB_LRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+RDETH_LEN+AETH_LEN); + break; + case RD_ATOMIC_ACKNOWLEDGE_OP: + case RD_CMP_SWAP_OP: + case RD_FETCH_ADD_OP: + MTL_ERROR1("%s: Unsupported packet type(opcode) (%d)\n", __func__, opcode); + return(MT_ERROR); + +/***********************************************/ +/* Unreliable Datagram (UD) */ +/***********************************************/ + + case UD_SEND_ONLY_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_ud_send_only.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_ud_send_only.IB_BTH),packet_p-DETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_ud_send_only.IB_LRH),packet_p-DETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+DETH_LEN); + break; + + case UD_SEND_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_ud_send_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_ud_send_only_ImmDt.IB_DETH),packet_p-ImmDt_LEN-DETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_ud_send_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-DETH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_ud_send_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-DETH_LEN-BTH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+BTH_LEN+DETH_LEN+ImmDt_LEN); + break; + + default: + MTL_ERROR1("%s: Invalid Opcode (%d)\n", __func__, opcode); + return(MT_ERROR); + break; + + } + break; + case IBA_GLOBAL: + switch (opcode) + { + + /***********************************************/ + /* reliable Connection (RC) */ + /***********************************************/ + + case RC_SEND_FIRST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_send_first.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_send_first.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_send_first.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case RC_SEND_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_send_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_send_middle.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_send_middle.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case RC_SEND_LAST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_send_last.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_send_last.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_send_last.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case RC_SEND_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_rc_send_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_send_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_send_last_ImmDt.IB_GRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_send_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case RC_SEND_ONLY_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_send_only.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_send_only.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_send_only.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case RC_SEND_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_rc_send_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_send_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_send_only_ImmDt.IB_GRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_send_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case RC_WRITE_FIRST_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_rc_write_first.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_write_first.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_write_first.IB_GRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_write_first.IB_LRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RETH_LEN); + break; + + case RC_WRITE_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_write_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_write_middle.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_write_middle.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case RC_WRITE_LAST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_write_last.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_write_last.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_write_last.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case RC_WRITE_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_rc_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_write_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_write_last_ImmDt.IB_GRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_write_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case RC_WRITE_ONLY_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_rc_write_only.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_write_only.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_write_only.IB_GRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_write_only.IB_LRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RETH_LEN); + break; + + case RC_WRITE_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_rc_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_rc_write_only.IB_RETH),packet_p-ImmDt_LEN-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_write_only.IB_BTH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_write_only.IB_GRH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_write_only.IB_LRH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RETH_LEN+ImmDt_LEN); + break; + + case RC_READ_REQ_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_rc_read_req.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_read_req.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_read_req.IB_GRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_read_req.IB_LRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RETH_LEN); + break; + + case RC_READ_RESP_FIRST_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_G_rc_read_res_first.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_read_res_first.IB_BTH),packet_p-AETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_read_res_first.IB_GRH),packet_p-AETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_read_res_first.IB_LRH),packet_p-AETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RC_READ_RESP_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_read_res_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_read_res_middle.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_read_res_middle.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RC_READ_RESP_LAST_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_G_rc_read_res_last.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_read_res_last.IB_BTH),packet_p-AETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_read_res_last.IB_GRH),packet_p-AETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_read_res_last.IB_LRH),packet_p-AETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RC_READ_RESP_ONLY_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_G_rc_read_res_only.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_read_res_only.IB_BTH),packet_p-AETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_read_res_only.IB_GRH),packet_p-AETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_read_res_only.IB_LRH),packet_p-AETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RC_ACKNOWLEDGE_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_G_rc_ack.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_ack.IB_BTH),packet_p-AETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_ack.IB_GRH),packet_p-AETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_ack.IB_LRH),packet_p-AETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+AETH_LEN); + break; + + + case RC_ATOMIC_ACKNOWLEDGE_OP: + MTL_ERROR1("%s: Unsupported packet type(opcode) (%d)\n", __func__, opcode); + return(MT_ERROR); + + case RC_CMP_SWAP_OP: + MTL_ERROR1("%s: Unsupported packet type(opcode) (%d)\n", __func__, opcode); + return(MT_ERROR); + + case RC_FETCH_ADD_OP: + MTL_ERROR1("%s: Unsupported packet type(opcode) (%d)\n", __func__, opcode); + return(MT_ERROR); + +/***********************************************/ +/* Unreliable Connection (UC) */ +/***********************************************/ + case UC_SEND_FIRST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_send_first.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_send_first.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_send_first.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case UC_SEND_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_send_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_send_middle.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_send_middle.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case UC_SEND_LAST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_send_last.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_send_last.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_send_last.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case UC_SEND_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_uc_send_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_send_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_send_last_ImmDt.IB_GRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_send_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case UC_SEND_ONLY_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_send_only.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_send_only.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_send_only.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case UC_SEND_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_uc_send_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_send_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_send_only_ImmDt.IB_GRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_send_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case UC_WRITE_FIRST_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_uc_write_first.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_write_first.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_write_first.IB_GRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_write_first.IB_LRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RETH_LEN); + break; + + case UC_WRITE_MIDDLE_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_write_middle.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_write_middle.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_write_middle.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case UC_WRITE_LAST_OP: + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_write_last.IB_BTH),packet_p-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_write_last.IB_GRH),packet_p-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_write_last.IB_LRH),packet_p-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN); + break; + + case UC_WRITE_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_uc_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_write_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_write_last_ImmDt.IB_GRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_write_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+ImmDt_LEN); + break; + + case UC_WRITE_ONLY_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_uc_write_only.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_write_only.IB_BTH),packet_p-RETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_write_only.IB_GRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_write_only.IB_LRH),packet_p-RETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RETH_LEN); + break; + + case UC_WRITE_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_uc_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_uc_write_only.IB_RETH),packet_p-ImmDt_LEN-RETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_uc_write_only.IB_BTH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_uc_write_only.IB_GRH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_uc_write_only.IB_LRH),packet_p-ImmDt_LEN-RETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RETH_LEN+ImmDt_LEN); + break; + +/***********************************************/ +/* Reliable Datagram (RD) */ +/***********************************************/ + + case RD_SEND_FIRST_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_send_first.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_send_first.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_send_first.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_send_first.IB_GRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_send_first.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_SEND_MIDDLE_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_send_middle.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_send_middle.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_send_middle.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_send_middle.IB_GRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_send_middle.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_SEND_LAST_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_send_last.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_send_last.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_send_last.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_send_last.IB_GRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_send_last.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_SEND_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_rd_send_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_send_last_ImmDt.IB_DETH),packet_p-ImmDt_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_send_last_ImmDt.IB_RDETH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_send_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_send_last_ImmDt.IB_GRH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_send_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+ImmDt_LEN); + break; + + case RD_SEND_ONLY_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_send_only.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_send_only.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_send_only.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_send_only.IB_GRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_send_only.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_SEND_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_rd_send_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_send_only_ImmDt.IB_DETH),packet_p-ImmDt_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_send_only_ImmDt.IB_RDETH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_send_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_send_only_ImmDt.IB_GRH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_send_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+ImmDt_LEN); + break; + + case RD_WRITE_FIRST_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_rd_write_first.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_write_first.IB_DETH),packet_p-RETH_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_write_first.IB_RDETH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_write_first.IB_BTH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_write_first.IB_GRH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_write_first.IB_LRH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN); + break; + + case RD_WRITE_MIDDLE_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_write_middle.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_write_middle.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_write_middle.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_write_middle.IB_GRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_write_middle.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_WRITE_LAST_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_write_last.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_write_last.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_write_last.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_write_last.IB_GRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_write_last.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN); + break; + + case RD_WRITE_LAST_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_rd_write_last_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_write_last_ImmDt.IB_DETH),packet_p-ImmDt_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_write_last_ImmDt.IB_RDETH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_write_last_ImmDt.IB_BTH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_write_last_ImmDt.IB_GRH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_write_last_ImmDt.IB_LRH),packet_p-ImmDt_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+ImmDt_LEN); + break; + + case RD_WRITE_ONLY_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_write_only.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_write_only.IB_RDETH),packet_p-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_write_only.IB_BTH),packet_p-DETH_LEN-RDETH_LEN-RETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_write_only.IB_GRH),packet_p-DETH_LEN-RDETH_LEN-RETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_write_only.IB_LRH),packet_p-DETH_LEN-RDETH_LEN-RETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN); + break; + + case RD_WRITE_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_rd_write_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_rd_write_only_ImmDt.IB_RETH),packet_p-ImmDt_LEN-RETH_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_write_only_ImmDt.IB_DETH),packet_p-ImmDt_LEN-RETH_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_write_only_ImmDt.IB_RDETH),packet_p-ImmDt_LEN-RETH_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_write_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_write_only_ImmDt.IB_GRH),packet_p-ImmDt_LEN-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-BTH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_write_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+DETH_LEN+RETH_LEN+ImmDt_LEN); + break; + + case RD_READ_REQ_OP: + nMPGA_append_RETH(&(MPGA_headers_p->MPGA_G_rd_read_req.IB_RETH),packet_p-RETH_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_rd_read_req.IB_DETH),packet_p-RETH_LEN-DETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_read_req.IB_RDETH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_read_req.IB_BTH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_read_req.IB_GRH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_read_req.IB_LRH),packet_p-RETH_LEN-DETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RETH_LEN); + break; + + case RD_READ_RESP_FIRST_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_G_rd_read_res_first.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_read_res_first.IB_RDETH),packet_p-AETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_read_res_first.IB_BTH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_read_res_first.IB_GRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_read_res_first.IB_LRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RD_READ_RESP_MIDDLE_OP: + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_read_res_first.IB_RDETH),packet_p-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_read_res_middle.IB_BTH),packet_p-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_read_res_middle.IB_GRH),packet_p-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_read_res_middle.IB_LRH),packet_p-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RD_READ_RESP_LAST_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_G_rc_read_res_last.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_read_res_first.IB_RDETH),packet_p-AETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_read_res_last.IB_BTH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_read_res_last.IB_GRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_read_res_last.IB_LRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RD_READ_RESP_ONLY_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_G_rc_read_res_only.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_read_res_first.IB_RDETH),packet_p-AETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rc_read_res_only.IB_BTH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rc_read_res_only.IB_GRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rc_read_res_only.IB_LRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+AETH_LEN); + break; + + case RD_ACKNOWLEDGE_OP: + nMPGA_append_AETH(&(MPGA_headers_p->MPGA_G_rd_ack.IB_AETH),packet_p-AETH_LEN); + nMPGA_append_RDETH(&(MPGA_headers_p->MPGA_G_rd_ack.IB_RDETH),packet_p-AETH_LEN-RDETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_rd_ack.IB_BTH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_rd_ack.IB_GRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_rd_ack.IB_LRH),packet_p-AETH_LEN-RDETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+RDETH_LEN+AETH_LEN); + break; + case RD_ATOMIC_ACKNOWLEDGE_OP: + case RD_CMP_SWAP_OP: + case RD_FETCH_ADD_OP: + MTL_ERROR1("%s: Unsupported packet type(opcode) (%d)\n", __func__, opcode); + return(MT_ERROR); + +/***********************************************/ +/* Unreliable Datagram (UD) */ +/***********************************************/ + + case UD_SEND_ONLY_OP: + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_ud_send_only.IB_DETH),packet_p-DETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_ud_send_only.IB_BTH),packet_p-DETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_ud_send_only.IB_GRH),packet_p-DETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_ud_send_only.IB_LRH),packet_p-DETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+DETH_LEN); + break; + + case UD_SEND_ONLY_W_IM_OP: + nMPGA_append_ImmDt(&(MPGA_headers_p->MPGA_G_ud_send_only_ImmDt.IB_ImmDt),packet_p-ImmDt_LEN); + nMPGA_append_DETH(&(MPGA_headers_p->MPGA_G_ud_send_only_ImmDt.IB_DETH),packet_p-ImmDt_LEN-DETH_LEN); + nMPGA_append_BTH(&(MPGA_headers_p->MPGA_G_ud_send_only_ImmDt.IB_BTH),packet_p-ImmDt_LEN-DETH_LEN-BTH_LEN); + nMPGA_append_GRH(&(MPGA_headers_p->MPGA_G_ud_send_only_ImmDt.IB_GRH),packet_p-ImmDt_LEN-DETH_LEN-BTH_LEN-GRH_LEN); + nMPGA_append_LRH(&(MPGA_headers_p->MPGA_G_ud_send_only_ImmDt.IB_LRH),packet_p-ImmDt_LEN-DETH_LEN-BTH_LEN-GRH_LEN-LRH_LEN); + packet_p-=(LRH_LEN+GRH_LEN+BTH_LEN+DETH_LEN+ImmDt_LEN); + break; + + default: + MTL_ERROR1("%s: Invalid Opcode (%d)\n", __func__, opcode); + return(MT_ERROR); + break; + + } + default: + break; + } + if (CRC) + { + MPGA_get_headers_size(opcode,LNH,payload_size,FALSE,FALSE,&packet_len); + append_ICRC((u_int16_t*)start_ICRC,fast_calc_ICRC(packet_len,(u_int16_t*)packet_p,LNH)); + append_VCRC((u_int16_t*)start_ICRC + ICRC_LEN,fast_calc_VCRC(packet_len,(u_int16_t*)packet_p)); + } + *packet_p_p=packet_p; + return(MT_OK); +} + + + + +//call_result_t MPGA_set_field(u_int8_t *packet, /*pointer to packet buffer*/ +// MT_offset_t bit_offset,/*bit offset*/ +// MT_size_t bit_size, /*bit size*/ +// u_int32_t data) +//{ +// /* dividing into 3 parts: */ +// /* :01234567:01234567:01234567:01234567: */ +// /* : *****: : : : First part */ +// /* : :********:********: : Second part*/ +// /* : : : :** : Last part */ +// +// u_int8_t length1,length2,length3; +// u_int8_t offset1; +// u_int8_t *address1,*address2,*address3; +// +// /* Part One*/ +// length1 = (8-bit_offset%8)%8 < bit_size ? (8-bit_offset%8)%8 : bit_size; +// offset1 = bit_offset%8; +// address1 = &(packet[bit_offset/8]); +// if (length1>0) INSERTF(*address1,offset1,data,0,length1); +// if (length1>=bit_size) return MT_OK; /*finished*/ +// data=data >> length1; +// /* Part Two*/ +// length3 = (bit_size - length1) % 8; +// length2 = bit_size - length1 - length3; +// address2 = &(packet[(bit_offset/8)+ (length1&&1)]); +// if(length2>0) memcpy(address2, &data, length2/8); +// if (length1+length2>=bit_size) return MT_OK; /*finished*/ +// data=data >> length2; +// /* Part Three */ +// address3 = length2 > 0 ? (address2 + 1) : (address1 + 1); +// INSERTF(*address3,0,data , 0,length3); +// return MT_OK; +// +//} + +call_result_t +MPGA_set_field(u_int8_t *packet, /*pointer to packet buffer*/ + MT_offset_t bit_offset,/*bit offset*/ + MT_size_t bit_size, /*bit size*/ + u_int32_t data) +{ + u_int32_t temp=0; + u_int32_t bit_offset2; + if ( (bit_size+bit_offset/32) > 31) return MT_ERROR; + bit_offset2=(u_int32_t)(32-bit_offset%32-bit_size); + + temp=0; /*this is done in order to avoid compile error of unused variable*/ +#ifdef MT_LITTLE_ENDIAN + temp=((u_int32_t*)packet)[bit_offset/32]; + temp=mswab32(temp); + MT_INSERT32(temp,data,bit_offset2%32,bit_size); + temp=mswab32(temp); + ((u_int32_t*)packet)[bit_offset/32]=temp; + return MT_OK; +#else + MT_INSERT32(((u_int32_t*)packet)[bit_offset/32],data,bit_offset%32,bit_size); + return MT_OK; +#endif +} + + +call_result_t +MPGA_read_field(u_int8_t *packet, /*pointer to packet buffer*/ + MT_offset_t bit_offset,/*bit offset*/ + MT_size_t bit_size, /*bit size*/ + u_int32_t *data) +{ +#ifdef MT_LITTLE_ENDIAN + u_int32_t temp; + if ( (bit_size+bit_offset/32) > 31) return MT_ERROR; + temp=((u_int32_t*)packet)[bit_offset/32]; + temp=mswab32(temp); + bit_offset=32-bit_offset%32-bit_size; + *data=MT_EXTRACT32(temp,bit_offset%32,bit_size); + return MT_OK; +#else + if ( (bit_size+bit_offset/32) > 31) return MT_ERROR; + *data=MT_EXTRACT32((packet[bit_offset/32]),bit_offset%32,bit_size); + return MT_OK; +#endif +} + +call_result_t +MPGA_extract_LNH(u_int8_t *packet, /*pointer to packet buffer*/ + LNH_t *LNH) +{ + if (packet) MPGA_read_field(packet,MT_BIT_OFFSET_SIZE(IB_LRH_p_t,LNH),LNH); + else return(MT_ERROR); + return(MT_OK); +} + +call_result_t +MPGA_get_BTH_offset(u_int8_t *packet, + u_int32_t *offset) +{ + LNH_t LNH; + if (!packet) return(MT_ERROR); + MPGA_extract_LNH(packet,&LNH); + switch (LNH) + { + case RAW: /* |LRH|... (Etertype)*/ + MTL_ERROR1("%s: Unsupported LNH (%d)\n", __func__, LNH); + return(MT_ERROR); + case IP_NON_IBA_TRANS: /* |LRH|GRH|... */ + MTL_ERROR1("%s: Unsupported LNH (%d)\n", __func__, LNH); + return(MT_ERROR); + case IBA_LOCAL: /* |LRH|BTH|... */ + *offset = LRH_LEN*8; + break; + case IBA_GLOBAL: /* |LRH|GRH|BTH|... */ + *offset = LRH_LEN*8 + GRH_LEN*8; + break; + default: + MTL_ERROR1("%s: Invalid LNH (%d)\n", __func__, LNH); + return(MT_ERROR); + break; + } + return(MT_OK); +} + +call_result_t +MPGA_extract_opcode(u_int8_t *packet, + IB_opcode_t *opcode) +{ + u_int32_t BTH_offset; + u_int32_t data=0; + if (!packet) return(MT_ERROR); + MPGA_get_BTH_offset(packet,&BTH_offset); + MPGA_read_field(packet,MT_BIT_OFFSET(IB_BTH_p_t,OpCode)+BTH_offset,MT_BIT_SIZE(IB_BTH_p_t,OpCode),&data); + *opcode=(IB_opcode_t)data; + return(MT_OK); +} + +call_result_t +MPGA_extract_PadCnt(u_int8_t *packet, + u_int8_t *PadCnt) +{ + u_int32_t BTH_offset; + u_int32_t data=0; + if (!packet) return(MT_ERROR); + MPGA_get_BTH_offset(packet,&BTH_offset); + MPGA_read_field(packet,MT_BIT_OFFSET(IB_BTH_p_t,OpCode)+BTH_offset,MT_BIT_SIZE(IB_BTH_p_t,OpCode),&data); + *PadCnt=(u_int8_t)data; + return(MT_OK); +} + +call_result_t +MPGA_new_from_old(u_int8_t *old_packet, /*pointer to the buffer where the old headers are*/ + u_int8_t *new_packet, /*pointer to the buffer where the new headers should be*/ + u_int16_t buffer_size) /*total byte size allocated for headers starting from packet_p*/ +/*will be used to avoid illegal memory access*/ +{ + u_int16_t headers_size; + IB_opcode_t opcode; + LNH_t LNH; + + MPGA_extract_LNH(old_packet,&LNH); + MPGA_extract_opcode(old_packet,&opcode); + MPGA_get_headers_size(opcode,LNH,0,0,0,&headers_size); + if (buffer_size +#include +#include +#include + + +#define nMPGA_MT_BIT_OFFSET(reg_path) \ + ((MT_offset_t) &( ((union MPGA_headers_p_t *)(0))-> (reg_path) )) +#define nMPGA_MT_BIT_SIZE(reg_path) \ + ((MT_size_t) sizeof( ((union MPGA_headers_p_t *)(0))-> (reg_path) )) +#define nMPGA_MT_BIT_OFFSET_SIZE(reg_path) \ + nMPGA_MT_BIT_OFFSET(reg_path) , nMPGA_MT_BIT_SIZE(reg_path) + +/* +#define MT_BIT_OFFSET(struct_ancore,reg_path) \ + ((MT_offset_t) &( ((struct (struct_ancore), *)(0))-> (reg_path) )) +#define MT_BIT_SIZE(reg_path) \ + ((MT_size_t) sizeof( ((struct (struct_ancore) *)(0))-> (reg_path) )) +#define MT_BIT_OFFSET_SIZE(struct_ancore,reg_path) \ + nMPGA_MT_BIT_OFFSET((struct_ancore),(reg_path)) , nMPGA_MT_BIT_SIZE((struct_ancore),(reg_path)) + +*/ + + +/****************************************************************************************** +* Function: MPGA_make_headers +* +* Description: Packs the headers according to the given opcode into the given buffer +* , ICRC/VCRC will be added if asked . The packing does not modify the +* headers themselves or in their packed state. +* +* Supported types: +* All IBA_LOCAL and IBA_GLOBAL except: +* ATOMIC_ACKNOWLEDGE_OP, RD_CMP_SWAP_OP, RD_FETCH_ADD_OP +* +* Parameters: +* +* MPGA_headers_t *MPGA_headers_p (IN) - Pointer to union which include the headers of the packet. +* IB_opcode_t opcode (IN) - The headers will be build accordinly to the opcode +* LNH_t LNH (IN) - idetify next header (e.g IBA_LOCAL) +* MT_bool CRC (IN) - if true then ICRC and VCRC will be added after the payload +* In this case the payload length should be provided +* And the packet should be contiguous(*) in the buffer. +* (*):packet_p (explained later) should point to a buffer with +* sufficiant space before it for the headers (as usuall), +* payload immediatly after it and after the payload 6 allocated free bytes +* for the I/VCRC. +* u_int16_t payload_size (IN) - Used only if CRC==true +* u_int8_t packet_p (OUT)- Pointer to pre-alocated buffer - SHOULDN'T point to the buffer start, +* instead should have 126 free and allocated bytes before it,this is +* where the headers will be written.the headers end will be where +* the given pointer is.the pointer will be modified to point to +* the start of the packed headers.(READ the NOTE!) +* Example: +* Before: | 126 bytes buffer P | +* ^Pointer +* +* After: | P(P.H)-(P.H) | +* Pointer^|||||||||| +* Packed Headers +* NOTE FOR ADVANCED USERS: the headers will be built from this pointer +* backwards. a good way to use it is give a pointer with atleast +* 128 bytes free for use behind it,so it would fit to any kind +* of headers.If one wishes to allocate exactly the space needed +* he can use the function MPGA_get_headers_size +* +* Returns: +* MT_OK +* MT_ERROR +* +*****************************************************************************/ +call_result_t MPGA_make_headers(MPGA_headers_t *MPGA_headers_p, /*pointer to a headers union*/ + IB_opcode_t opcode, + LNH_t LNH, + MT_bool CRC, + u_int16_t payload_size, + u_int8_t **packet_p_p); /* pointer to packet buffer*/ +/********************************************************************************* +* Function: MPGA_make_fast +* +*Description: Generaly the same as MPGA_make_headers, with some enhancment. +* LNH,pad_count and packet length field are filled automaticaly. +* The packet will be build acording to given fields but will overrun the automatic calculated fields. +* MPGA_headers_t *MPGA_headers_p (IN) - Pointer to union which include the headers of the packet. +* LNH_t LNH (IN) - idetify next header (e.g IBA_LOCAL) +* u_int16_t payload_size (IN) - Used only if CRC==true +* u_int8_t packet_p (OUT)- Pointer to pre-alocated buffer - SHOULDN'T point to the buffer start, +*********************************************************************************/ +call_result_t MPGA_make_fast(MPGA_headers_t *MPGA_headers_p, /*pointer to headers union*/ + LNH_t LNH, + u_int16_t payload_size, + u_int8_t **packet_p_p); + +/****************************************************************************************** +* Function: MPGA_set_field +* +* Description: updates a field within a packed packet,the field is stated using +* bit_offset (counting from the packet start) and bit_size. +* It's advicable to use the macro nMPGA_MT_BIT_OFFSET_SIZE or like. +* +* Parameters: +* u_int8_t *packet (IN) pointer to packet buffer +* MT_offset_t bit_offset (IN) +* MT_size_t bit_size (IN) +* u_int32_t data (OUT) +* +* Returns: +* MT_OK +* MT_ERROR +* +*****************************************************************************/ +call_result_t MPGA_set_field(u_int8_t *packet, /*pointer to packet buffer*/ + MT_offset_t bit_offset,/*bit offset*/ + MT_size_t bit_size, /*bit size*/ + u_int32_t data); + +/****************************************************************************************** +* Function: MPGA_read_field +* +* Description: read a field within a packed packet,the field is stated using +* bit_offset (counting from the packet start) and bit_size. +* It's advicable to use the macro nMPGA_MT_BIT_OFFSET_SIZE or like. +* +* Parameters: +* u_int8_t *packet (IN) pointer to packet buffer +* MT_offset_t bit_offset (IN) +* MT_size_t bit_size (IN) +* u_int32_t *data (OUT) +* +* Returns: +* MT_OK +* MT_ERROR +* +*****************************************************************************/ +call_result_t MPGA_read_field(u_int8_t *packet, /*pointer to packet buffer*/ + MT_offset_t bit_offset,/*bit offset*/ + MT_size_t bit_size, /*bit size*/ + u_int32_t *data); + +/****************************************************************************************** +* Function: MPGA_new_from_old +* +* Description: Copies the headers of an already packed packet to a new buffer +* +* Supported types: same as MPGA_get_headers_size +* +* Parameters: +* u_int8_t *old_packet pointer to the buffer where the old headers are +* u_int8_t *new_packet pointer to the buffer where the new headers should be +* u_int16_t buffer_size total byte size allocated for headers starting from packet_p +* will be used to avoid illegal memory access +* +* Returns: +* MT_OK +* MT_ERROR +* +*****************************************************************************/ +call_result_t MPGA_new_from_old(u_int8_t *old_packet, /*pointer to the buffer where the old headers are*/ + u_int8_t *new_packet, /*pointer to the buffer where the new headers should be*/ + u_int16_t buffer_size);/*total byte size allocated for headers starting from packet_p*/ + /*will be used to avoid illegal memory access*/ +/*************************************************************************************** + * Function: MPGA_get_headers_size + * Description: Returns the size of the buffer that is need to hold a given packet + * NOTE: This isn't equal to the Pkt_Len field in LRH header, no padding is + * added and VCRC shouldn't be counted (if not explicitly asked) + * supported types: IBA_LOCA - send and RDMA_write + * parameters: + * IB_opcode_t opcode (IN) + * LNH_t LNH (IN) + * u_int16_t payload_len (IN) + * MT_bool icrc (IN) if set - icrc exist + * MT_bool vcrc (IN) if set - vcrc exist + * u_int16_t *packet_len(OUT) packet length in bytes + * Returns: + * MT_OK + * MT_ERROR + ***************************************************************************************/ +call_result_t MPGA_get_headers_size(IB_opcode_t opcode, + LNH_t LNH, + u_int16_t payload_len, + MT_bool icrc, /*if set - icrc exist*/ + MT_bool vcrc, /*if set - vcrc exist*/ + u_int16_t *packet_len); /*packet length in bytes*/ + +/*************************************************************************************** + * Function: MPGA_extract_LNH + * Description: extract the LNH field from a given packed packet (pointer is to the packet start) + * Parameters: + * u_int8_t *packet (OUT) pointer to packet buffer + * LNH_t *LNH (IN/OUT) will be modified (but not allocated!) + * Returns: + * MT_OK + * MT_ERROR + ***************************************************************************************/ +call_result_t MPGA_extract_LNH(u_int8_t *packet, /*pointer to packet buffer*/ + LNH_t *LNH); + +/*************************************************************************************** + * Function: MPGA_extract_opcode + * Description: extract the opcode field from a given packed packet (pointer is to the packet start) + * Parameters: + * u_int8_t *packet (OUT) pointer to packet buffer + * IB_opcode_t *opcode (IN/OUT) will be modified (but not allocated!) + * Returns: + * MT_OK + * MT_ERROR +***************************************************************************************/ +call_result_t MPGA_extract_opcode(u_int8_t *packet, + IB_opcode_t *opcode); + +/*************************************************************************************** + * Function: MPGA_extract_PadCnt + * Description: extract the PadCnt field from a given packed packet (pointer is to the packet start) + * Parameters: + * u_int8_t *packet (OUT) pointer to packet buffer + * u_int8_t *PadCnt (IN/OUT) will be modified (but not allocated!) + * Returns: + * MT_OK + * MT_ERROR + ***************************************************************************************/ +call_result_t MPGA_extract_PadCnt(u_int8_t *packet, + u_int8_t *PadCnt); diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.c new file mode 100644 index 00000000..fb6fc102 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.c @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +/* Layers Include */ +#include +#ifdef __WIN__ +#include +#endif +#include + +/* MPGA Includes */ +#include "nMPGA_packet_append.h" +#include +#include + + + +/*************************************************************************/ +/* nMPGA_append lrh */ +/*************************************************************************/ +call_result_t +nMPGA_append_LRH (IB_LRH_st *lrh_st_p, + u_int8_t *start_LRH_p) +{ + INSERTF(start_LRH_p[0],4,lrh_st_p->VL,0,4); + INSERTF(start_LRH_p[0],0,lrh_st_p->LVer,0,4); + INSERTF(start_LRH_p[1],4,lrh_st_p->SL,0,4); + INSERTF(start_LRH_p[1],2,lrh_st_p->reserved1,0,2); + INSERTF(start_LRH_p[1],0,lrh_st_p->LNH,0,2); + INSERTF(start_LRH_p[2],0,lrh_st_p->DLID,8,8); + INSERTF(start_LRH_p[3],0,lrh_st_p->DLID,0,8); + INSERTF(start_LRH_p[4],3,lrh_st_p->reserved2,0,5); + INSERTF(start_LRH_p[4],0,lrh_st_p->PktLen,8,3); + INSERTF(start_LRH_p[5],0,lrh_st_p->PktLen,0,8); + INSERTF(start_LRH_p[6],0,lrh_st_p->SLID,8,8); + INSERTF(start_LRH_p[7],0,lrh_st_p->SLID,0,8); + return(MT_OK); +} + +/*************************************************************************/ +/* nMPGA_append grh */ +/*************************************************************************/ +call_result_t +nMPGA_append_GRH (IB_GRH_st *grh_st_p, + u_int8_t *start_GRH_p) +{ + INSERTF(start_GRH_p[0],4,grh_st_p->IPVer,0,4); + INSERTF(start_GRH_p[0],0,grh_st_p->TClass,4,4); + INSERTF(start_GRH_p[1],4,grh_st_p->TClass,0,4); + INSERTF(start_GRH_p[1],0,grh_st_p->FlowLabel,16,4); + INSERTF(start_GRH_p[2],0,grh_st_p->FlowLabel,8,8); + INSERTF(start_GRH_p[3],0,grh_st_p->FlowLabel,0,8); + INSERTF(start_GRH_p[4],0,grh_st_p->PayLen,8,8); + INSERTF(start_GRH_p[5],0,grh_st_p->PayLen,0,8); + start_GRH_p[6] = grh_st_p->NxtHdr; + start_GRH_p[7] = grh_st_p->HopLmt; + memcpy(&(start_GRH_p[8]), grh_st_p->SGID, sizeof(IB_gid_t)); + memcpy(&(start_GRH_p[24]), grh_st_p->DGID, sizeof(IB_gid_t)); + return(MT_OK); +} +/*********************************************************************************/ +/* nMPGA_append BTH */ +/*********************************************************************************/ +call_result_t +nMPGA_append_BTH (IB_BTH_st *bth_st_p, u_int8_t *start_BTH_p) +{ + INSERTF(start_BTH_p[0],0,bth_st_p->OpCode,0,8); + INSERTF(start_BTH_p[1],7,bth_st_p->SE,0,1); + INSERTF(start_BTH_p[1],6,bth_st_p->M,0,1); + INSERTF(start_BTH_p[1],4,bth_st_p->PadCnt,0,2); + INSERTF(start_BTH_p[1],0,bth_st_p->TVer,0,4); + INSERTF(start_BTH_p[2],0,bth_st_p->P_KEY,8,8); + INSERTF(start_BTH_p[3],0,bth_st_p->P_KEY,0,8); + INSERTF(start_BTH_p[4],0,bth_st_p->reserved1,0,8); + INSERTF(start_BTH_p[5],0,bth_st_p->DestQP,16,8); + INSERTF(start_BTH_p[6],0,bth_st_p->DestQP,8,8); + INSERTF(start_BTH_p[7],0,bth_st_p->DestQP,0,8); + INSERTF(start_BTH_p[8],7,bth_st_p->A,0,1); + INSERTF(start_BTH_p[8],0,bth_st_p->reserved2,0,7); + INSERTF(start_BTH_p[9],0,bth_st_p->PSN,16,8); + INSERTF(start_BTH_p[10],0,bth_st_p->PSN,8,8); + INSERTF(start_BTH_p[11],0,bth_st_p->PSN,0,8); + return(MT_OK); +} + +/*********************************************************************************/ +/* nMPGA_append RETH */ +/*********************************************************************************/ +call_result_t +nMPGA_append_RETH (IB_RETH_st *reth_st_p, u_int8_t *start_RETH_p) +{ + u_int8_t *start_VA_p; + u_int8_t *start_R_Key_p; + u_int8_t *start_DMALen_p; + + start_VA_p = start_RETH_p;/*The first field*/ + start_R_Key_p = start_RETH_p + 8; /*1st field 8 byte long*/ + start_DMALen_p = start_RETH_p + 12;/*2nd fiels 4 byte + 12 1st*/ + + (*((u_int64_t*)start_VA_p)) = MOSAL_cpu_to_be64(reth_st_p->VA); /*64bit field (big endian)*/ + (*((u_int32_t*)start_R_Key_p)) = MOSAL_cpu_to_be32(reth_st_p->R_Key);/*32bit field (big endian)*/ + (*((u_int32_t*)start_DMALen_p)) = MOSAL_cpu_to_be32(reth_st_p->DMALen);/*32bit field (big e)*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* nMPGA_append AETH */ +/*********************************************************************************/ +call_result_t +nMPGA_append_AETH (IB_AETH_st *aeth_st_p, u_int8_t *start_AETH_p) +{ + INSERTF(start_AETH_p[0],0,aeth_st_p->Syndrome,0,8);/*8 bitf (big endain)*/ + INSERTF(start_AETH_p[1],0,aeth_st_p->MSN,16,8);/*24 bitf (big endain)*/ + INSERTF(start_AETH_p[2],0,aeth_st_p->MSN,8,8); /*it is dangerus to use bm*/ + INSERTF(start_AETH_p[3],0,aeth_st_p->MSN,0,8); + return(MT_OK); +} + +/*********************************************************************************/ +/* nMPGA_append RDETH */ +/*********************************************************************************/ +call_result_t +nMPGA_append_RDETH (IB_RDETH_st *rdeth_st_p, u_int8_t *start_RDETH_p) +{ + start_RDETH_p[0] = rdeth_st_p->reserved1;/*8bit field (big endian)*/ + INSERTF(start_RDETH_p[1],0,rdeth_st_p->EECnxt,16,8);/*24bit field (big endian)*/ + INSERTF(start_RDETH_p[2],0,rdeth_st_p->EECnxt,8,8); + INSERTF(start_RDETH_p[3],0,rdeth_st_p->EECnxt,0,8); + return(MT_OK); +} + +/*********************************************************************************/ +/* nMPGA_append DETH */ +/*********************************************************************************/ +call_result_t +nMPGA_append_DETH (IB_DETH_st *deth_st_p, u_int8_t *start_DETH_p) +{ + u_int8_t *start_Q_Key_p; + + start_Q_Key_p = start_DETH_p; + + (*((u_int32_t*)start_Q_Key_p)) = MOSAL_cpu_to_be32(deth_st_p->Q_Key); /*32bit field (big endian)*/ + start_DETH_p[4] = deth_st_p->reserved1;/*8bit field (big endian)*/ + INSERTF(start_DETH_p[5],0,deth_st_p->SrcQP,16,8);/*24bit field (big endian)*/ + INSERTF(start_DETH_p[6],0,deth_st_p->SrcQP,8,8); + INSERTF(start_DETH_p[7],0,deth_st_p->SrcQP,0,8); + return(MT_OK); +} +/*********************************************************************************/ +/* nMPGA_append ImmDt */ +/*********************************************************************************/ +call_result_t +nMPGA_append_ImmDt (IB_ImmDt_st *ImmDt_st_p, u_int8_t *start_ImmDt_p) +{ + (*((u_int32_t*)start_ImmDt_p)) = MOSAL_cpu_to_be32(ImmDt_st_p->ImmDt); /*32bit field (big endian)*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* nMPGA_append ICRC */ +/*********************************************************************************/ +call_result_t +nMPGA_append_ICRC(u_int16_t *start_ICRC, u_int32_t ICRC) +{ + *((u_int32_t*)start_ICRC) = MOSAL_cpu_to_le32(ICRC); + return(MT_OK); +} + +/*********************************************************************************/ +/* nMPGA_append VCRC */ +/*********************************************************************************/ +call_result_t +nMPGA_append_VCRC(u_int16_t *start_VCRC, u_int16_t VCRC) +{ + *((u_int16_t*)start_VCRC) = MOSAL_cpu_to_le16(VCRC); + return(MT_OK); +} diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.h new file mode 100644 index 00000000..b80c503e --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/nMPGA_packet_append.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_nMPGA_PACKET_APPEND_H +#define H_nMPGA_PACKET_APPEND_H + +/* Layers Includes */ +#include + +/*******************/ + +#ifdef __WIN__ +#include +#endif + +#if !defined(__DARWIN__) && defined(__LINUX__) && !defined(__KERNEL__) + #include +#endif + +#include + +/*Start of function declarations*/ +/****************************************************************************** + * Function: nMPGA_append_LRH + * + * Description: This function is appending LRH to IB packets . + * To use this function you must have a LRH struct, + * with all the detailes to create the wanted packet. + * additionaly you should give a pointer for the new appended LRH + * + * Parameters: + * IB_LRH_st *lrh_st_p(IN) Link next header . + * u_int8_t *start_LRH_p(OUT) preallocated buffer + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +nMPGA_append_LRH(IB_LRH_st *lrh_st_p, u_int8_t *start_LRH_p); +/****************************************************************************** + * Function: nMPGA_append_GRH + * + * Description: This function is appending GRH to IB packets . + * To use this function you must have a GRH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with free space for the GRH field + * + * Parameters: + * IB_GRH_st *grh_st_p(IN) Global Route Header. + * u_int8_t *start_GRH_p(OUT) preallocated buffer. + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if the field was not appended. + *****************************************************************************/ +call_result_t +nMPGA_append_GRH(IB_GRH_st *grh_st_p,u_int8_t *start_GRH_p); + +/****************************************************************************** + * Function: nMPGA_append_BTH + * + * Description: This function is appending BTH to IB packets . + * To use this function you must have a BTH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with free space for the BTH field + * + * Parameters: + * IB_BTH_st *bth_st_p(out) + * u_int8_t *start_BTH_p(IN) + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +nMPGA_append_BTH(IB_BTH_st *bth_st_p, u_int8_t *start_BTH_p); + +/****************************************************************************** + * Function: nMPGA_append_RETH + * + * Description: This function is appending RETH to IB packets . + * To use this function you must have a RETH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with a free space for the RETH field. + * + * Parameters: + * IB_RETH_st *reth_st_p(in) + * u_int8_t *start_RETH_p(out) + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +nMPGA_append_RETH(IB_RETH_st *reth_st_p, u_int8_t *start_RETH_p); + +/****************************************************************************** + * Function: nMPGA_append_AETH + * + * Description: This function is appending AETH to IB packets . + * To use this function you must have a AETH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with a free space for the AETH field. + * + * Parameters: + * IB_AETH_st *aeth_st_p(in) + * u_int8_t *start_AETH_p(out) + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +nMPGA_append_AETH(IB_AETH_st *aeth_st_p, u_int8_t *start_AETH_p); + +/*****************************************************************************/ +/* From this point the function is Datagram */ +/*****************************************************************************/ + +/****************************************************************************** + * Function: nMPGA_append_DETH + * + * Description: This function is appending DETH to IB packets . + * To use this function you must have a DETH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with a free space for the DETH field. + * + * Parameters: + * IB_DETH_st *deth_st_p(in) + * u_int8_t *start_DETH_p(out) + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR . + *****************************************************************************/ +call_result_t +nMPGA_append_DETH(IB_DETH_st *deth_st_p, u_int8_t *start_DETH_p); + +/****************************************************************************** + * Function: nMPGA_append_RDETH + * + * Description: This function is appending RDETH to IB packets . + * To use this function you must have a RDETH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with a free space for the RDETH field. + * + * Parameters: + * IB_RDETH_st *deth_st_p(in) + * u_int8_t *start_RDETH_p(out) + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR . + *****************************************************************************/ +call_result_t +nMPGA_append_RDETH(IB_RDETH_st *deth_st_p, u_int8_t *start_RDETH_p); + +/****************************************************************************** + * Function: nMPGA_append_ImmDt + * + * Description: This function is appending ImmDt to IB packets . + * To use this function you must have a ImmDt struct, + * with all the detailes to create the wanted packet. + * and an allocated area with a free space for the ImmDt field. + * + * Parameters: + * IB_ImmDt_st *ImmDt_st_p(in) + * u_int8_t *start_ImmDt_p(out) + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR . + *****************************************************************************/ +call_result_t +nMPGA_append_ImmDt(IB_ImmDt_st *ImmDt_st_p, u_int8_t *start_ImmDt_p); + + /****************************************************************************** + * Function: nMPGA_append_ICRC + * + * Description: This function is appending the ICRC to the IB packets . + * + * Parameters: + * start_ICRC(in) u_int16_t * + * pointer to the start of the ICRC field + * ICRC(in) u_int32_t + * The ICRC to insert + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +nMPGA_append_ICRC(u_int16_t *start_ICRC, u_int32_t ICRC); + + /****************************************************************************** + * Function: nMPGA_append_VCRC + * + * Description: This function is appending the VCRC to the IB packets . + * + * Parameters: + * start_VCRC(in) u_int16_t * + * pointer to the start of the VCRC field + * VCRC(in) u_int32_t + * The VCRC to insert + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +nMPGA_append_VCRC(u_int16_t *start_VCRC, u_int16_t VCRC); + +#endif /* H_nMPGA_PACKET_APPEND_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga.def b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga.def new file mode 100644 index 00000000..e47c55ed --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga.def @@ -0,0 +1,44 @@ +EXPORTS + MPGA_build_pkt_lrh + MPGA_reliable_send + MPGA_rc_send_only + MPGA_rc_rdma_w_only + MPGA_rc_rdma_w_first + MPGA_rc_rdma_w_middle + MPGA_rc_rdma_w_last + MPGA_rc_rdma_r_req + MPGA_rc_rdma_r_resp + MPGA_rc_rdma_r_resp_only + MPGA_ud_send_only + MPGA_fast_ud_send_only + MPGA_fast_rc_send_first + MPGA_fast_rc_send_middle + MPGA_fast_rc_send_last + MPGA_fast_rc_send_only + MPGA_fast_rc_read_resp_first + MPGA_fast_rc_read_resp_middle + MPGA_fast_rc_read_resp_last + MPGA_fast_rc_read_resp_only + MPGA_fast_rc_acknowledge + MPGA_analyze_packet + MPGA_print_pkt + MPGA_free_pkt_st_fields + + MPGA_build_pkt_lrh_sv + MPGA_rc_send_only_sv + MPGA_fast_ud_send_grh + extract_RETH + extract_BTH + extract_GRH + fast_calc_VCRC + extract_VCRC + fast_calc_ICRC + extract_ICRC + extract_LRH + MPGA_new_from_old + MPGA_set_field + MPGA_read_field + MPGA_extract_LNH + MPGA_extract_opcode + MPGA_make_headers + MPGA_get_headers_size diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_driver.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_driver.c new file mode 100644 index 00000000..01d0d775 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_driver.c @@ -0,0 +1,51 @@ +/* + This software is available to you under a choice of one of two + licenses. You may choose to be licensed under the terms of the GNU + General Public License (GPL) Version 2, available at + , or the OpenIB.org BSD + license, available in the LICENSE.TXT file accompanying this + software. These details are also available at + . + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + + Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. +*/ +#ifdef __KERNEL__ + +#include "mtl_types.h" + +/* ----- Kernel Space ----- */ + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT pi_pDriverObject, + IN PUNICODE_STRING pi_pRegistryPath + ) +{ /* DriverEntry */ + + DbgPrint("\n***** MPGA_KL: DriverEntry()"); + return STATUS_SUCCESS; + +} /* DriverEntry */ + +NTSTATUS DllInitialize(PUNICODE_STRING RegistryPath) +{ + DbgPrint("\n***** MPGA_KL: DllInitialize()"); + return STATUS_SUCCESS; +} + +NTSTATUS DllUnload() +{ + DbgPrint("\n***** MPGA_KL: DllUnload()"); + return STATUS_SUCCESS; +} +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_kl.def b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_kl.def new file mode 100644 index 00000000..d7568d6a --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/os_dep/win/mpga_kl.def @@ -0,0 +1,46 @@ +EXPORTS + DllInitialize private + DllUnload private + MPGA_init_module=init_module + MPGA_cleanup_module=cleanup_module + MPGA_build_pkt_lrh + MPGA_reliable_send + MPGA_rc_send_only + MPGA_rc_rdma_w_only + MPGA_rc_rdma_w_first + MPGA_rc_rdma_w_middle + MPGA_rc_rdma_w_last + MPGA_rc_rdma_r_req + MPGA_rc_rdma_r_resp + MPGA_rc_rdma_r_resp_only + MPGA_ud_send_only + MPGA_fast_ud_send_only + MPGA_fast_rc_send_first + MPGA_fast_rc_send_middle + MPGA_fast_rc_send_last + MPGA_fast_rc_send_only + MPGA_fast_rc_read_resp_first + MPGA_fast_rc_read_resp_middle + MPGA_fast_rc_read_resp_last + MPGA_fast_rc_read_resp_only + MPGA_fast_rc_acknowledge + MPGA_analyze_packet + MPGA_print_pkt + MPGA_free_pkt_st_fields + MPGA_fast_ud_send_grh + extract_RETH + extract_BTH + extract_GRH + fast_calc_VCRC + extract_VCRC + fast_calc_ICRC + extract_ICRC + extract_LRH + MPGA_new_from_old + MPGA_set_field + MPGA_read_field + MPGA_extract_LNH + MPGA_extract_opcode + MPGA_make_headers + MPGA_get_headers_size + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_append.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_append.c new file mode 100644 index 00000000..8cfa3be5 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_append.c @@ -0,0 +1,523 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +/* Layers Include */ +#include +#ifdef __WIN__ +#include +#endif +#include + +/* MPGA Includes */ +#include +#include + + +/*************************************************************************/ +/* append lrh */ +/*************************************************************************/ +call_result_t +append_LRH (IB_LRH_st *lrh_st_p, u_int16_t packet_size, + u_int16_t **packet_buf_vp,LNH_t LNH) +{ + u_int8_t *start_LRH_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; /*casting to u_int16_t */ + +/*Update the fields in the given lrh struct*/ + lrh_st_p->LNH = (u_int8_t)LNH; + lrh_st_p->PktLen = (packet_size - VCRC_LEN) / IBWORD; + /*from the firest byte of the LRH till the VCRC in 4 byte word*/ + lrh_st_p->reserved1 = 0; + lrh_st_p->reserved2 = 0; + + start_LRH_p = (u_int8_t*)(*packet_buf_p) - LRH_LEN; + + start_LRH_p[0] = INSERTF(start_LRH_p[0],4,lrh_st_p->VL,0,4); + start_LRH_p[0] = INSERTF(start_LRH_p[0],0,lrh_st_p->LVer,0,4); + start_LRH_p[1] = INSERTF(start_LRH_p[1],4,lrh_st_p->SL,0,4); + start_LRH_p[1] = INSERTF(start_LRH_p[1],2,lrh_st_p->reserved1,0,2); + start_LRH_p[1] = INSERTF(start_LRH_p[1],0,lrh_st_p->LNH,0,2); + start_LRH_p[2] = INSERTF(start_LRH_p[2],0,lrh_st_p->DLID,8,8); + start_LRH_p[3] = INSERTF(start_LRH_p[3],0,lrh_st_p->DLID,0,8); + start_LRH_p[4] = INSERTF(start_LRH_p[4],3,lrh_st_p->reserved2,0,5); + start_LRH_p[4] = INSERTF(start_LRH_p[4],0,lrh_st_p->PktLen,8,3); + start_LRH_p[5] = INSERTF(start_LRH_p[5],0,lrh_st_p->PktLen,0,8); + start_LRH_p[6] = INSERTF(start_LRH_p[6],0,lrh_st_p->SLID,8,8); + start_LRH_p[7] = INSERTF(start_LRH_p[7],0,lrh_st_p->SLID,0,8); + + (*packet_buf_p) = (u_int16_t*)start_LRH_p; + + return(MT_OK); +} + +/*********************************************************************************/ +/* Extract LRH */ +/*********************************************************************************/ +call_result_t +extract_LRH(IB_LRH_st *lrh_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_LRH_p; + u_int8_t *end_LRH_p; + + + memset(lrh_st_p, 0, sizeof(IB_LRH_st)); + + start_LRH_p = (u_int8_t*)(*packet_buf_p); + end_LRH_p = (u_int8_t*)(*packet_buf_p) + LRH_LEN; + + + + lrh_st_p->VL = INSERTF(lrh_st_p->VL,0,start_LRH_p[0],4,4); + lrh_st_p->LVer = INSERTF(lrh_st_p->LVer,0,start_LRH_p[0],0,4); + lrh_st_p->SL = INSERTF(lrh_st_p->SL,0,start_LRH_p[1],4,4); + lrh_st_p->reserved1 = INSERTF(lrh_st_p->reserved1,0,start_LRH_p[1],2,2); + lrh_st_p->LNH = INSERTF(lrh_st_p->LNH,0,start_LRH_p[1],0,2); + lrh_st_p->DLID = INSERTF(lrh_st_p->DLID,8,start_LRH_p[2],0,8); + lrh_st_p->DLID = INSERTF(lrh_st_p->DLID,0,start_LRH_p[3],0,8); + lrh_st_p->reserved2 = INSERTF(lrh_st_p->reserved2,0,start_LRH_p[4],3,5); + lrh_st_p->PktLen = INSERTF(lrh_st_p->PktLen,8,start_LRH_p[4],0,3); + lrh_st_p->PktLen = INSERTF(lrh_st_p->PktLen,0,start_LRH_p[5],0,8); + lrh_st_p->SLID = INSERTF(lrh_st_p->SLID,8,start_LRH_p[6],0,8); + lrh_st_p->SLID = INSERTF(lrh_st_p->SLID,0,start_LRH_p[7],0,8); + + (*packet_buf_p) = (u_int16_t *)end_LRH_p; + /*Updating The packet_puf_p to be at the end of the LRH field*/ + return(MT_OK); +} + + +/*************************************************************************/ +/* append grh */ +/*************************************************************************/ +call_result_t +append_GRH (IB_GRH_st *grh_st_p, u_int16_t packet_size, + u_int16_t **packet_buf_vp) +{ + u_int8_t *start_GRH_p; + u_int16_t **packet_buf_p; + + packet_buf_p = (u_int16_t**)packet_buf_vp; /*casting to u_int16_t */ + +/*Update the fields in the given grh struct*/ + grh_st_p->NxtHdr = NON_RAW_IBA; /* it is static for now */ + grh_st_p->PayLen = packet_size - LRH_LEN - GRH_LEN - VCRC_LEN; + /*from the firest byte of the end of the GRH till the VCRC in bytes*/ + + start_GRH_p = (u_int8_t*)(*packet_buf_p) - GRH_LEN; + + start_GRH_p[0] = INSERTF(start_GRH_p[0],4,grh_st_p->IPVer,0,4); + start_GRH_p[0] = INSERTF(start_GRH_p[0],0,grh_st_p->TClass,4,4); + start_GRH_p[1] = INSERTF(start_GRH_p[1],4,grh_st_p->TClass,0,4); + start_GRH_p[1] = INSERTF(start_GRH_p[1],0,grh_st_p->FlowLabel,16,4); + start_GRH_p[2] = INSERTF(start_GRH_p[2],0,grh_st_p->FlowLabel,8,8); + start_GRH_p[3] = INSERTF(start_GRH_p[3],0,grh_st_p->FlowLabel,0,8); + + + start_GRH_p[4] = INSERTF(start_GRH_p[4],0,grh_st_p->PayLen,8,8); + start_GRH_p[5] = INSERTF(start_GRH_p[5],0,grh_st_p->PayLen,0,8); + + start_GRH_p[6] = grh_st_p->NxtHdr; + start_GRH_p[7] = grh_st_p->HopLmt; + + memcpy(&(start_GRH_p[8]), grh_st_p->SGID, sizeof(IB_gid_t)); + memcpy(&(start_GRH_p[24]), grh_st_p->DGID, sizeof(IB_gid_t)); + + (*packet_buf_p) = (u_int16_t*)start_GRH_p; + + return(MT_OK); +} + +/*********************************************************************************/ +/* Extract GRH */ +/*********************************************************************************/ +call_result_t +extract_GRH(IB_GRH_st *grh_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_GRH_p; + u_int8_t *end_GRH_p; + + + memset(grh_st_p, 0, sizeof(IB_GRH_st)); + + start_GRH_p = (u_int8_t*)(*packet_buf_p); + end_GRH_p = (u_int8_t*)(*packet_buf_p) + GRH_LEN; + + grh_st_p->IPVer = INSERTF(grh_st_p->IPVer,0,start_GRH_p[0],4,4); + grh_st_p->TClass = INSERTF(grh_st_p->TClass,4,start_GRH_p[0],0,4); + grh_st_p->TClass = INSERTF(grh_st_p->TClass,0,start_GRH_p[1],4,4); + grh_st_p->FlowLabel = INSERTF(grh_st_p->FlowLabel,16,start_GRH_p[1],0,4); + grh_st_p->FlowLabel = INSERTF(grh_st_p->FlowLabel,8,start_GRH_p[2],0,8); + grh_st_p->FlowLabel = INSERTF(grh_st_p->FlowLabel,0,start_GRH_p[3],0,8); + + grh_st_p->PayLen = INSERTF(grh_st_p->PayLen,8,start_GRH_p[4],0,8); + grh_st_p->PayLen = INSERTF(grh_st_p->PayLen,0,start_GRH_p[5],0,8); + + grh_st_p->NxtHdr = start_GRH_p[6]; + grh_st_p->HopLmt = start_GRH_p[7]; + + memcpy(grh_st_p->SGID, &(start_GRH_p[8]), sizeof(IB_gid_t)); + memcpy(grh_st_p->DGID, &(start_GRH_p[24]), sizeof(IB_gid_t)); + + + (*packet_buf_p) = (u_int16_t *)end_GRH_p; + /*Updating The packet_puf_p to be at the end of the GRH field*/ + return(MT_OK); +} + + + +/*********************************************************************************/ +/* Append BTH */ +/*********************************************************************************/ +call_result_t +append_BTH (IB_BTH_st *bth_st_p, u_int16_t **packet_buf_p,u_int16_t payload_size) +{ + u_int8_t *start_BTH_p; + + start_BTH_p = (u_int8_t*)(*packet_buf_p) - BTH_LEN; + /*Assuming that the pointer is BTH_LEN (12) bytes ahead*/ + bth_st_p->PadCnt = (IBWORD - (payload_size % IBWORD)) % IBWORD; /*To align to 4 byte boundary*/ + bth_st_p->reserved1 = 0 ; + bth_st_p->reserved2 = 0 ; + bth_st_p->TVer = IBA_TRANSPORT_HEADER_VERSION; + + start_BTH_p[0] = INSERTF(start_BTH_p[0],0,bth_st_p->OpCode,0,8); + start_BTH_p[1] = INSERTF(start_BTH_p[1],7,bth_st_p->SE,0,1); + start_BTH_p[1] = INSERTF(start_BTH_p[1],6,bth_st_p->M,0,1); + start_BTH_p[1] = INSERTF(start_BTH_p[1],4,bth_st_p->PadCnt,0,2); + start_BTH_p[1] = INSERTF(start_BTH_p[1],0,bth_st_p->TVer,0,4); + start_BTH_p[2] = INSERTF(start_BTH_p[2],0,bth_st_p->P_KEY,8,8); + start_BTH_p[3] = INSERTF(start_BTH_p[3],0,bth_st_p->P_KEY,0,8); + start_BTH_p[4] = INSERTF(start_BTH_p[4],0,bth_st_p->reserved1,0,8); + start_BTH_p[5] = INSERTF(start_BTH_p[5],0,bth_st_p->DestQP,16,8); + start_BTH_p[6] = INSERTF(start_BTH_p[6],0,bth_st_p->DestQP,8,8); + start_BTH_p[7] = INSERTF(start_BTH_p[7],0,bth_st_p->DestQP,0,8); + start_BTH_p[8] = INSERTF(start_BTH_p[8],7,bth_st_p->A,0,1); + start_BTH_p[8] = INSERTF(start_BTH_p[8],0,bth_st_p->reserved2,0,7); + start_BTH_p[9] = INSERTF(start_BTH_p[9],0,bth_st_p->PSN,16,8); + start_BTH_p[10] = INSERTF(start_BTH_p[10],0,bth_st_p->PSN,8,8); + start_BTH_p[11] = INSERTF(start_BTH_p[11],0,bth_st_p->PSN,0,8); + (*packet_buf_p) = (u_int16_t*)start_BTH_p;/*Update the pointer to the start of the BTH field*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* Extract BTH */ +/*********************************************************************************/ +call_result_t +extract_BTH(IB_BTH_st *bth_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_BTH_p; + u_int8_t *end_BTH_p; + + + memset(bth_st_p, 0, sizeof(IB_BTH_st)); + + start_BTH_p = (u_int8_t*)(*packet_buf_p); + end_BTH_p = (u_int8_t*)(*packet_buf_p) + BTH_LEN; + + bth_st_p->OpCode = INSERTF(bth_st_p->OpCode,0,start_BTH_p[0],0,8); + bth_st_p->SE = INSERTF(bth_st_p->SE,0,start_BTH_p[1],7,1); + bth_st_p->M = INSERTF(bth_st_p->M,0,start_BTH_p[1],6,1); + bth_st_p->PadCnt = INSERTF(bth_st_p->PadCnt,0,start_BTH_p[1],4,2); + bth_st_p->TVer = INSERTF(bth_st_p->TVer,0,start_BTH_p[1],0,4); + bth_st_p->P_KEY = INSERTF(bth_st_p->P_KEY,8,start_BTH_p[2],0,8); + bth_st_p->P_KEY = INSERTF(bth_st_p->P_KEY,0,start_BTH_p[3],0,8); + bth_st_p->reserved1 = INSERTF(bth_st_p->reserved1,0,start_BTH_p[4],0,8); + bth_st_p->DestQP = INSERTF(bth_st_p->DestQP,16,start_BTH_p[5],0,8); + bth_st_p->DestQP = INSERTF(bth_st_p->DestQP,8,start_BTH_p[6],0,8); + bth_st_p->DestQP = INSERTF(bth_st_p->DestQP,0,start_BTH_p[7],0,8); + bth_st_p->A = INSERTF(bth_st_p->A,0,start_BTH_p[8],7,1); + bth_st_p->reserved2 = INSERTF(bth_st_p->reserved2,0,start_BTH_p[8],0,7); + bth_st_p->PSN = INSERTF(bth_st_p->PSN,16,start_BTH_p[9],0,8); + bth_st_p->PSN = INSERTF(bth_st_p->PSN,8,start_BTH_p[10],0,8); + bth_st_p->PSN = INSERTF(bth_st_p->PSN,0,start_BTH_p[11],0,8); + + (*packet_buf_p) = (u_int16_t *)end_BTH_p; + /*Updating The packet_puf_p to be at the end of the DETH field*/ + return(MT_OK); +} + + +/*********************************************************************************/ +/* Append RETH */ +/*********************************************************************************/ +call_result_t +append_RETH (IB_RETH_st *reth_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_RETH_p; + u_int8_t *start_VA_p; + u_int8_t *start_R_Key_p; + u_int8_t *start_DMALen_p; + + start_RETH_p = (u_int8_t*)(*packet_buf_p) - RETH_LEN; + + start_VA_p = start_RETH_p;/*The first field*/ + start_R_Key_p = start_RETH_p + 8; /*1st field 8 byte long*/ + start_DMALen_p = start_RETH_p + 12;/*2nd fiels 4 byte + 12 1st*/ + + (*((u_int64_t*)start_VA_p)) = MOSAL_cpu_to_be64(reth_st_p->VA); /*64bit field (big endian)*/ + (*((u_int32_t*)start_R_Key_p)) = MOSAL_cpu_to_be32(reth_st_p->R_Key);/*32bit field (big endian)*/ + (*((u_int32_t*)start_DMALen_p)) = MOSAL_cpu_to_be32(reth_st_p->DMALen);/*32bit field (big e)*/ + + (*packet_buf_p) = (u_int16_t *)start_RETH_p; + /*Updating The packet_puf_p to be at the start of the RETH field*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* Extract RETH */ +/*********************************************************************************/ +call_result_t +extract_RETH(IB_RETH_st *reth_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_RETH_p; + u_int8_t *start_VA_p; + u_int8_t *start_R_Key_p; + u_int8_t *start_DMALen_p; + u_int8_t *end_RETH_p; + + memset(reth_st_p, 0, sizeof(IB_RETH_st)); + + start_RETH_p = (u_int8_t*)(*packet_buf_p); + end_RETH_p = (u_int8_t*)(*packet_buf_p) + RETH_LEN; + start_VA_p = start_RETH_p;/*The first field*/ + start_R_Key_p = start_RETH_p + 8; /*1st field 8 byte long*/ + start_DMALen_p = start_RETH_p + 12;/*2nd fiels 4 byte + 12 1st*/ + + reth_st_p->VA = MOSAL_be64_to_cpu(*((u_int64_t*)start_VA_p)); /*64bit field(big endain)*/ + reth_st_p->R_Key = MOSAL_be32_to_cpu(*((u_int32_t*)start_R_Key_p)); /*32bit field(big endain)*/ + reth_st_p->DMALen = MOSAL_be32_to_cpu(*((u_int32_t*)start_DMALen_p)); /*32bit field(big endain)*/ + + (*packet_buf_p) = (u_int16_t *)end_RETH_p; + /*Updating The packet_puf_p to be at the end of the DETH field*/ + return(MT_OK); +} +/*********************************************************************************/ +/* Append AETH */ +/*********************************************************************************/ +call_result_t +append_AETH (IB_AETH_st *aeth_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_AETH_p; + + start_AETH_p = (u_int8_t*)(*packet_buf_p) - AETH_LEN; + + start_AETH_p[0] = INSERTF(start_AETH_p[0],0,aeth_st_p->Syndrome,0,8);/*8 bitf (big endain)*/ + + start_AETH_p[1] = INSERTF(start_AETH_p[1],0,aeth_st_p->MSN,16,8);/*24 bitf (big endain)*/ + start_AETH_p[2] = INSERTF(start_AETH_p[2],0,aeth_st_p->MSN,8,8); /*it is dangerus to use bm*/ + start_AETH_p[3] = INSERTF(start_AETH_p[3],0,aeth_st_p->MSN,0,8); + + (*packet_buf_p) = (u_int16_t *)start_AETH_p; + /*Updating The packet_puf_p to be at the start of the AETH field*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* Extract AETH */ +/*********************************************************************************/ +call_result_t +extract_AETH (IB_AETH_st *aeth_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_AETH_p; + u_int8_t *end_AETH_p; + u_int8_t *start_MSN_p; + u_int32_t temp32; + + memset(aeth_st_p, 0, sizeof(IB_AETH_st)); + + start_AETH_p = (u_int8_t*)(*packet_buf_p); + start_MSN_p = start_AETH_p + 1;/*2nd field 1 byte after Syndrome*/ + end_AETH_p = (u_int8_t*)(*packet_buf_p) + AETH_LEN; + + aeth_st_p->Syndrome = start_AETH_p[0];/*8bit field (big endian)*/ + + temp32 = *((u_int32_t*)start_MSN_p);/*24bit field (big endian)*/ + +#ifdef MT_LITTLE_ENDIAN + temp32 <<= 8; + +#else + temp32 >>= 8; +#endif + + aeth_st_p->MSN = MOSAL_be32_to_cpu(temp32); + + (*packet_buf_p) = (u_int16_t *)end_AETH_p; + /*Updating The packet_puf_p to be at the start of the AETH field*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* Append DETH */ +/*********************************************************************************/ +call_result_t +append_DETH (IB_DETH_st *deth_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_DETH_p; + u_int8_t *start_Q_Key_p; + + start_DETH_p = (u_int8_t*)(*packet_buf_p) - DETH_LEN; + start_Q_Key_p = start_DETH_p; + + (*((u_int32_t*)start_Q_Key_p)) = MOSAL_cpu_to_be32(deth_st_p->Q_Key); /*32bit field (big endian)*/ + + start_DETH_p[4] = deth_st_p->reserved1;/*8bit field (big endian)*/ + + start_DETH_p[5] = INSERTF(start_DETH_p[5],0,deth_st_p->SrcQP,16,8);/*24bit field (big endian)*/ + start_DETH_p[6] = INSERTF(start_DETH_p[6],0,deth_st_p->SrcQP,8,8); + start_DETH_p[7] = INSERTF(start_DETH_p[7],0,deth_st_p->SrcQP,0,8); + + (*packet_buf_p) = (u_int16_t *)start_DETH_p; + /*Updating The packet_puf_p to be at the start of the DETH field*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* Extract DETH */ +/*********************************************************************************/ +call_result_t +extract_DETH(IB_DETH_st *deth_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_DETH_p; + u_int8_t *end_DETH_p; + u_int8_t *start_SrcQP_p; + u_int32_t temp32; + + memset(deth_st_p, 0, sizeof(IB_DETH_st)); + + + start_DETH_p = (u_int8_t*)(*packet_buf_p); + start_SrcQP_p = start_DETH_p + 5; /*The start of the SrcQP field*/ + end_DETH_p = start_DETH_p + DETH_LEN; + + deth_st_p->Q_Key = MOSAL_be32_to_cpu(*((u_int32_t*)start_DETH_p));/*32bit field (big endian)*/ + deth_st_p->reserved1 = *((u_int8_t*)(start_DETH_p + 4));/*8bit field (big endian)*/ + + temp32 = *((u_int32_t*)start_SrcQP_p); /*24bit field (big endian)*/ + +#ifdef MT_LITTLE_ENDIAN + MTL_TRACE('5', "\nLittle Endian\n"); + temp32 <<= 8; +#else + temp32 >>= 8; + MTL_TRACE('5', "\nBig Endian \n"); +#endif + + deth_st_p->SrcQP = MOSAL_be32_to_cpu(temp32); + + (*packet_buf_p) = (u_int16_t *)end_DETH_p; + /*Updating The packet_puf_p to be at the end of the DETH field*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* Append ImmDt */ +/*********************************************************************************/ +call_result_t +append_ImmDt (IB_ImmDt_st *ImmDt_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_ImmDt_p; + + start_ImmDt_p = (u_int8_t*)(*packet_buf_p) - ImmDt_LEN; + (*((u_int32_t*)start_ImmDt_p)) = MOSAL_cpu_to_be32(ImmDt_st_p->ImmDt); /*32bit field (big endian)*/ + + (*packet_buf_p) = (u_int16_t *)start_ImmDt_p; + /*Updating The packet_puf_p to be at the start of the ImmDt field*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* Extract ImmDt */ +/*********************************************************************************/ +call_result_t +extract_ImmDt(IB_ImmDt_st *ImmDt_st_p, u_int16_t **packet_buf_p) +{ + u_int8_t *start_ImmDt_p; + u_int8_t *end_ImmDt_p; + + memset(ImmDt_st_p, 0, sizeof(IB_ImmDt_st)); + + start_ImmDt_p = (u_int8_t*)(*packet_buf_p); + end_ImmDt_p = start_ImmDt_p + ImmDt_LEN; + + ImmDt_st_p->ImmDt = MOSAL_be32_to_cpu(*((u_int32_t*)start_ImmDt_p));/*32bit field (big endian)*/ + + (*packet_buf_p) = (u_int16_t *)end_ImmDt_p; + /*Updating The packet_puf_p to be at the end of the ImmDt field*/ + return(MT_OK); +} + +/*********************************************************************************/ +/* Append ICRC */ +/*********************************************************************************/ +call_result_t +append_ICRC(u_int16_t *start_ICRC, u_int32_t ICRC) +{ + *((u_int32_t*)start_ICRC) = MOSAL_cpu_to_le32(ICRC); + return(MT_OK); +} + +/*********************************************************************************/ +/* Extract ICRC */ +/*********************************************************************************/ +call_result_t +extract_ICRC(u_int16_t *start_ICRC, u_int32_t *ICRC) +{ + *ICRC = MOSAL_le32_to_cpu(*((u_int32_t*)start_ICRC)); + return(MT_OK); +} + +/*********************************************************************************/ +/* Append VCRC */ +/*********************************************************************************/ +call_result_t +append_VCRC(u_int16_t *start_VCRC, u_int16_t VCRC) +{ + *((u_int16_t*)start_VCRC) = MOSAL_cpu_to_le16(VCRC); + return(MT_OK); +} + +/*********************************************************************************/ +/* Extract VCRC */ +/*********************************************************************************/ +call_result_t +extract_VCRC(u_int16_t *start_VCRC, u_int16_t *VCRC) +{ + *VCRC = MOSAL_le16_to_cpu(*((u_int16_t*)start_VCRC)); + return(MT_OK); +} diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_append.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_append.h new file mode 100644 index 00000000..838a9b9a --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_append.h @@ -0,0 +1,651 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_PACKET_APPEND_H +#define H_PACKET_APPEND_H + +/* Layers Includes */ +#include + +/*******************/ + +#ifdef __WIN__ +#include +#endif + +#if !defined(__DARWIN__) && defined(__LINUX__) && !defined(__KERNEL__) + #include +#endif + +#if defined(VXWORKS_OS) && defined(MT_BIG_ENDIAN) +#define __cpu_to_le32(x) (x) +#define __cpu_to_le16(x) (x) +#endif + +#define IBA_TRANSPORT_HEADER_VERSION 0 +/*Define of all fields length*/ +#define IBWORD 4 /* 4 bytes */ +#define LRH_LEN 8 /* (LRH_LEN = 8 byte) */ +#define RWH_LEN 4 +#define GRH_LEN 40 +#define BTH_LEN 12 +#define RDETH_LEN 4 +#define DETH_LEN 8 +#define RETH_LEN 16 +#define AETH_LEN 4 +#define ImmDt_LEN 4 +#define AtomETH_LEN 28 +#define AtomAETH_LEN 8 +#define ICRC_LEN 4 +#define VCRC_LEN 2 + +/*Define all transport layer packet length with out LRH or GRH*/ + +/********************* Reliable Connection (RC) *************************/ + +#define RC_SEND_FIRST_LEN (BTH_LEN) +#define RC_SEND_MIDDLE_LEN (BTH_LEN) +#define RC_SEND_LAST_LEN (BTH_LEN) +#define RC_SEND_ONLY_LEN (BTH_LEN) /*need to write all the relavent packets*/ +#define RC_WRITE_ONLY_LEN (BTH_LEN + RETH_LEN) +#define RC_WRITE_FIRST_LEN (BTH_LEN + RETH_LEN) +#define RC_WRITE_MIDDLE_LEN (BTH_LEN) +#define RC_WRITE_LAST_LEN (BTH_LEN) +#define RC_READ_REQ_LEN (BTH_LEN + RETH_LEN) +#define RC_READ_RESP_FIRST_LEN (BTH_LEN + AETH_LEN) +#define RC_READ_RESP_MIDDLE_LEN (BTH_LEN) +#define RC_READ_RESP_LAST_LEN (BTH_LEN + AETH_LEN) +#define RC_READ_RESP_ONLY_LEN (BTH_LEN + AETH_LEN) +#define RC_ACKNOWLEDGE_LEN (BTH_LEN + AETH_LEN) + +/********************* Unreliable Connection (UC) ************************/ + + +/********************* Reliable Datagram (RD) ****************************/ + + +/********************* Unreliable Datagram (UD) ***************************/ + +#define UD_SEND_ONLY_LEN (BTH_LEN + DETH_LEN) + +/*Define Link Next Header Definition */ +enum { + RAW = 0x0, /* |LRH|... (Etertype)*/ + IP_NON_IBA_TRANS = 0x1, /* |LRH|GRH|... */ + IBA_LOCAL = 0x2, /* |LRH|BTH|... */ + IBA_GLOBAL = 0x3 /* |LRH|GRH|BTH|... */ +}; +typedef u_int32_t LNH_t; + +typedef enum{ + NON_RAW_IBA = 0x1B /* |LRH|GRH|BTH|...*/ + /* TBD IETF RFC 1700 et.seq*/ + /* All the rest is ipver6 headers*/ +} NxtHdr_t; + + +typedef enum{ + FIRST_PACKET=0, + MIDDLE_PACKET=1, + LAST_PACKET=2 +} IB_pkt_place; + +/**************************************** fields structures define *******************************/ +typedef struct IB_LRH_st IB_LRH_st; + +struct IB_LRH_st{ /* ***** LRH ***** Local Route Header(8 bytes)*/ + u_int8_t VL; /*"Only 4 LS-bits"The virtual lane that the packet is using */ + u_int8_t LVer; /*"Only 4 LS-bits"Link level protocol of the packet*/ + u_int8_t SL; /*"Only 4 LS-bits"Service level requested within the subnet*/ + u_int8_t reserved1; /*"Only 2 LS-bits"Transmitted as 0,ignored on receive. **internally modified** */ + u_int8_t LNH; /*"Only 2 LS-bits"Identifies the headers that follow the LRH. **internally modified** */ + u_int16_t DLID; /*The destination port and path on the local subnet*/ + u_int8_t reserved2; /*"Only 5 LS-bits"Transmitted as 0,ignored on receive.**internally modified** */ + u_int16_t PktLen; /*"Only 11 LS-bits"The size of tha packet in four-byte words. **internally modified** */ + u_int16_t SLID; /*The source port (injection point) on the local subnet*/ + }; + +typedef struct IB_GRH_st IB_GRH_st; + +struct IB_GRH_st{ /* **** GRH **** Global Route Header(40 bytes)*/ + u_int8_t IPVer; /*"Only 4 LS-bits"The version og the GRH*/ + u_int8_t TClass; /*Used by IBA to communicate global service level*/ + u_int32_t FlowLabel; /*"Only 20 LS-bits"Sequences of packets requiring special handl*/ + u_int16_t PayLen; /*The length of the packet in bytes **internally modified** */ + u_int8_t NxtHdr; /*Identifies the headers that follow the GRH*/ + u_int8_t HopLmt; /*Bound on the number of hops between subnets*/ + u_int8_t SGID[16]; /*Global indentifier for the source port*/ + u_int8_t DGID[16]; /*Global indentifier for the detination port*/ +}; + +typedef struct IB_BTH_st IB_BTH_st; + +struct IB_BTH_st{ /* **** BTH **** Base Transport Header (12 bytes)*/ + u_int8_t OpCode; /*IBA packet type and which extentions follows **internally modified** */ + u_int8_t SE; /*"Only 1 LS-bit"If an event sould be gen by responder or not*/ + u_int8_t M; /*"Only 1 LS-bit"Communication migration state*/ + u_int8_t PadCnt; /*"Only 2 LS-bits"Number of bytes that align to 4 byte boundary **internally modified** */ + u_int8_t TVer; /*"Only 4 LS-bits"IBA transport headers version. **internally modified** */ + u_int16_t P_KEY; /*Logical partition associated with this packet*/ + u_int8_t reserved1; /*Transmitted as 0,ignored on receive. Not included in the icrc. **internally modified** */ + u_int32_t DestQP; /*"Only 24 LS-bits"Destination work queu pair number*/ + u_int8_t A; /*"Only 1 LS-bit"If an ack should be returnd by the responder*/ + u_int8_t reserved2; /*"only 7 LS-bits"Transmitted as 0,ignored .included in icrc. **internally modified** */ + u_int32_t PSN; /*"Only 24 LS-bits"detect a missing or duplicate packet*/ +}; + +typedef struct IB_RDETH_st IB_RDETH_st; + +struct IB_RDETH_st{ /* **** RDETH **** (4 bytes)*/ + /*Reliable Datagram Extended Transport Header*/ + u_int8_t reserved1; /*Transmitted as 0,ignored on receive.*/ + u_int32_t EECnxt; /*"Only 24 LS-bits"Which end to end context for this packet*/ +}; + +typedef struct IB_DETH_st IB_DETH_st; + +struct IB_DETH_st{ /* **** DETH ****(8 bytes)*/ + /*Datagram Extended Transport Header */ + u_int32_t Q_Key; /*For an authorize access to destination queue*/ + u_int8_t reserved1; /*ransmitted as 0,ignored on receive.*/ + u_int32_t SrcQP; /*"Only 24 LS-bits"Work queu nuber at the source*/ +}; + +typedef struct IB_RETH_st IB_RETH_st; + +struct IB_RETH_st{ /* **** RETH ****(16 bytes)*/ + /*RDMA Extended Transport Header */ + u_int64_t VA; /*Virtual address of the RDMA operation*/ + u_int32_t R_Key; /*Remote key that authorize access for the RDMA operation*/ + u_int32_t DMALen; /*The length of the DMA operation*/ +}; + +typedef struct IB_AtomicETH_st IB_AtomicETH_st; + +struct IB_AtomicETH_st{ /* **** AtomicETH ****(28 bytes)*/ + /*Atomic Extended Transport Header */ + u_int64_t VA; /*Remote virtual address */ + u_int32_t R_Key; /*Remote key that authorize access to the remote virtual address*/ + u_int64_t SwapDt; /*An operand in atomic operations*/ + u_int64_t CmpDt; /*An operand in cmpswap atomic operation*/ +}; + +typedef struct IB_AETH_st IB_AETH_st; + +struct IB_AETH_st{ /* **** AETH ****(4 bytes)*/ + /*ACK Extended Transport Header */ + u_int8_t Syndrome; /*Indicates if NAK or ACK and additional info about ACK & NAK*/ + u_int32_t MSN; /*"Only 24 Ls-bit"Sequence number of the last message comleted*/ +}; + +typedef struct IB_AtomicAckETH_st IB_AtomicAckETH_st; + +struct IB_AtomicAckETH_st{ /* **** AtomicAckETH ****(8 bytes)*/ + /* Atomic ACK Extended Transport Header */ + u_int64_t OrigRemDt; /*Return oprand in atomic operation and contains the data*/ + /*in the remote memory location before the atomic operation*/ +}; + +typedef struct IB_ImmDt_st IB_ImmDt_st; + +struct IB_ImmDt_st{ /* **** Immediate Data **** (4 bytes)*/ + /* Contains the additional data that is placed in the */ + u_int32_t ImmDt; /* received Completion Queue Element (CQE). */ + /* The ImmDt is Only in Send or RDMA-Write packets. */ +}; + +typedef struct IB_PKT_st IB_PKT_st; + +struct IB_PKT_st{ /*IB packet structure Analayze structure*/ + IB_LRH_st *lrh_st_p; + IB_GRH_st *grh_st_p; + IB_BTH_st *bth_st_p; + IB_RDETH_st *rdeth_st_p; + IB_DETH_st *deth_st_p; + IB_RETH_st *reth_st_p; + IB_AtomicETH_st *atomic_eth_st_p; + IB_AETH_st *aeth_st_p; + IB_AtomicAckETH_st *atomic_acketh_st_p; + IB_ImmDt_st *immdt_st_p; + u_int16_t packet_size; + u_int16_t *payload_buf; + u_int16_t payload_size; +}; + + +/*Start of function declarations*/ +/****************************************************************************** + * Function: append_LRH + * + * Description: This function is appending LRH to IB packets . + * To use this function you must have a LRH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with free space for the LRH field + * The function Ignores the OpCode,PadCnt,reserved1,reserved2 fields it overwrite + * The given information. + * + * Parameters: + * lrh_st_p(IN) IB_LRH_st * + * Link next header . + * packet_size (out) u_int16_t. + * Full packet size include ICRC VCRC; + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * The function will return the pointer 8 bytes back). + * LNH ( out) LNH_t + * Link next header IB local , global , RAW ... + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +append_LRH (IB_LRH_st *lrh_st_p, u_int16_t packet_size, + u_int16_t **packet_buf_p,LNH_t LNH); + +/****************************************************************************** + * Function: extract_LRH + * + * Description: This function is extractinging the LRH from the IB packets . + * The function will update all the members in the IB_LRH struct. + * + * Parameters: + * lrh_st_p(out) IB_LRH_st * + * Local route header of the generated packet. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will return the pointer 8 bytes forward). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +extract_LRH(IB_LRH_st *lrh_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** + * Function: append_GRH + * + * Description: This function is appending GRH to IB packets . + * To use this function you must have a GRH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with free space for the GRH field + * Note : The function Ignores the PayLen and NxtHdr fields it overwrite + * The given information. + * + * + * Parameters: + * grh_st_p(IN) IB_GRH_st * + * Global Route Header. + * packet_size (out) u_int16_t. + * Full packet size include ICRC VCRC; + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * The function will return the pointer 40 bytes back). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if the field was not appended. + *****************************************************************************/ +call_result_t +append_GRH (IB_GRH_st *grh_st_p, u_int16_t packet_size, + u_int16_t **packet_buf_vp); + +/****************************************************************************** + * Function: extract_GRH + * + * Description: This function is extractinging the GRH from the IB packets . + * The function will update all the members in the IB_GRH struct. + * + * Parameters: + * grh_st_p(out) IB_GRH_st * + * Global route header of the generated packet. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will return the pointer 40 bytes forward). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +extract_GRH(IB_GRH_st *grh_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** + * Function: append_BTH + * + * Description: This function is appending BTH to IB packets . + * To use this function you must have a BTH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with free space for the BTH field + * The function Ignores the OpCode,PadCnt,reserved1,reserved2 fields it overwrite + * The given information. + * + * Parameters: + * bth_st_p(out) IB_BTH_st * + * Base trasport header of the generated packet. (the func ignores the reserved1/2 fields). + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * The function will return the pointer 12 bytes back). + * payload_size(in) u_int16_t + * The payload_size in bytes for calc the PadCnt. + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +append_BTH (IB_BTH_st *bth_st_p, u_int16_t **packet_buf_p, u_int16_t payload_size); + +/****************************************************************************** + * Function: extract_BTH + * + * Description: This function is extractinging the BTH from the IB packets . + * The function will update all the members in the IB_BTH struct. + * + * Parameters: + * bth_st_p(out) IB_BTH_st * + * Base trasport header of the generated packet. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will return the pointer 12 bytes forward). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +extract_BTH(IB_BTH_st *bth_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** + * Function: append_RETH + * + * Description: This function is appending RETH to IB packets . + * To use this function you must have a RETH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with a free space for the RETH field. + * + * Parameters: + * reth_st_p(in) IB_RETH_st * + * RDMA Extended Transport Header. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * The function will move the pointer 16 bytes back). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +append_RETH (IB_RETH_st *reth_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** + * Function: extract_RETH + * + * Description: This function is extractinging the RETH from the IB packets . + * The function will update all the members in the IB_RETH struct. + * + * Parameters: + * reth_st_p(out) IB_RETH_st * + * RDMA Extended Trasport header of the generated packet. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will return the pointer 16 bytes forward). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +extract_RETH(IB_RETH_st *reth_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** + * Function: append_AETH + * + * Description: This function is appending AETH to IB packets . + * To use this function you must have a AETH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with a free space for the AETH field. + * + * Parameters: + * aeth_st_p(in) IB_AETH_st * + * ACK Extended Trasport Header. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will move the pointer 4 bytes back). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR if no packet was generated. + *****************************************************************************/ +call_result_t +append_AETH (IB_AETH_st *aeth_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** + * Function: extract_AETH + * + * Description: This function is extractinging the AETH from the IB packets . + * The function will update all the members in the IB_AETH struct. + * + * Parameters: + * aeth_st_p(out) IB_AETH_st * + * ACK Extended Trasport header of the generated packet. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will return the pointer 4 bytes forward). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +extract_AETH (IB_AETH_st *aeth_st_p, u_int16_t **packet_buf_p); + +/*****************************************************************************/ +/* From this point the functiom is Datagram */ +/*****************************************************************************/ + +/****************************************************************************** + * Function: append_DETH + * + * Description: This function is appending DETH to IB packets . + * To use this function you must have a DETH struct, + * with all the detailes to create the wanted packet. + * and an allocated area with a free space for the DETH field. + * + * Parameters: + * deth_st_p(in) IB_DETH_st * + * Datagram Extended Trasport Header. (the func ignores the reserved1 field). + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will move the pointer 8 bytes back). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR . + *****************************************************************************/ +call_result_t +append_DETH (IB_DETH_st *deth_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** + * Function: extract_DETH + * + * Description: This function is extractinging the DETH from the IB packets . + * The function will update all the members in the IB_DETH struct. + * + * Parameters: + * deth_st_p(out) IB_DETH_st * + * Datagram Extended Trasport header of the generated packet. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will return the pointer 8 bytes forward). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +extract_DETH(IB_DETH_st *deth_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** + * Function: append_ImmDt + * + * Description: This function is appending ImmDt to IB packets . + * To use this function you must have a ImmDt struct, + * with all the detailes to create the wanted packet. + * and an allocated area with a free space for the ImmDt field. + * + * Parameters: + * ImmDt_st_p(in) IB_ImmDt_st * + * Contains the additional data of the generated packet. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will move the pointer 8 bytes back). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR . + *****************************************************************************/ +call_result_t +append_ImmDt(IB_ImmDt_st *ImmDt_st_p, u_int16_t **packet_buf_p); + +/****************************************************************************** + * Function: extract_ImmDt + * + * Description: This function is extractinging the ImmDt from the IB packets . + * The function will update all the members in the IB_ImmDt struct. + * + * Parameters: + * ImmDt_st_p(out) IB_ImmDt_st * + * Contains the additional data of the generated packet. + * packet_buf_p(out) u_int16_t ** + * pointer to the pointer of the full packet . + * (The function will return the pointer 4 bytes forward). + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +extract_ImmDt(IB_ImmDt_st *ImmDt_st_p, u_int16_t **packet_buf_p); + + /****************************************************************************** + * Function: append_ICRC + * + * Description: This function is appending the ICRC to the IB packets . + * + * Parameters: + * start_ICRC(in) u_int16_t * + * pointer to the start of the ICRC field + * ICRC(in) u_int32_t + * The ICRC to insert + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +append_ICRC(u_int16_t *start_ICRC, u_int32_t ICRC); + + /****************************************************************************** + * Function: V + * + * Description: This function is extractinging the ICRC from the IB packets . + * + * Parameters: + * start_ICRC(in) u_int16_t * + * pointer to the start of the ICRC field + * ICRC(out) u_int32_t * + * The ICRC to extract + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +extract_ICRC(u_int16_t *start_ICRC, u_int32_t *ICRC); + + /****************************************************************************** + * Function: append_VCRC + * + * Description: This function is appending the VCRC to the IB packets . + * + * Parameters: + * start_VCRC(in) u_int16_t * + * pointer to the start of the VCRC field + * VCRC(in) u_int32_t + * The VCRC to insert + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +append_VCRC(u_int16_t *start_VCRC, u_int16_t VCRC); + +/****************************************************************************** + * Function: extract_ICRC + * + * Description: This function is extractinging the ICRC from the IB packets . + * + * Parameters: + * start_ICRC(in) u_int16_t * + * pointer to the start of the ICRC field + * ICRC(out) u_int32_t * + * The ICRC to extract + * + * Returns: + * call_result_t + * MT_OK, + * MT_ERROR + *****************************************************************************/ +call_result_t +extract_VCRC(u_int16_t *start_VCRC, u_int16_t *VCRC); + + +#endif /* H_PACKET_APPEND_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_utilities.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_utilities.c new file mode 100644 index 00000000..7b540ccb --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_utilities.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/***************************/ +#ifndef MT_KERNEL + + #include + #include + +#endif /* MT_KERNEL */ + +/* MPGA Includes */ +#include +#include +#include +#include +/***************************/ + +#define FREE_PKT_FIELD(_parm) if ( _parm != NULL ) FREE(_parm) +#define BYTES_IN_LINE 4 + +/*************************************************************************************/ +/* print packet */ +/*************************************************************************************/ +void +MPGA_print_pkt( u_int8_t *packet_buf_p, u_int16_t packet_size) +{ + int index = 0; + int pad = 0; + int gap = 0; + pad = packet_size % BYTES_IN_LINE ; + gap = (packet_size / BYTES_IN_LINE) + 1 ; + packet_size = (packet_size / BYTES_IN_LINE) + 1 ; + MTL_TRACE('1', "\n Packet \n ----------------------------------------------------\n"); + + while(packet_size--){ + MTL_TRACE('1', "\b %d) \b 0x%02X\t %d) 0x%02X\t %d) 0x%02X\t %d) 0x%02X \n",index, packet_buf_p[index], + index+gap, packet_buf_p[index+gap], index+(2*gap), packet_buf_p[index+(2*gap)], + index+(3*gap), packet_buf_p[index+(3*gap)]); + if(packet_size == pad){ + index++; + while(pad--){ + MTL_TRACE('1', " %d) 0x%02X\t %d) 0x%02X\t %d) 0x%02X \n",index, packet_buf_p[index], + index+gap, packet_buf_p[index+gap], index+(2*gap), packet_buf_p[index+(2*gap)]); + index++; + } + packet_size = 0; + } + index++; + } +} + +/*************************************************************************************/ +/* FREE IB_PKT_st */ +/*************************************************************************************/ +call_result_t +MPGA_free_pkt_st_fields(IB_PKT_st *pkt_st_p) +{ + FREE_PKT_FIELD(pkt_st_p->lrh_st_p); + FREE_PKT_FIELD(pkt_st_p->grh_st_p); + FREE_PKT_FIELD(pkt_st_p->bth_st_p); + FREE_PKT_FIELD(pkt_st_p->rdeth_st_p); + FREE_PKT_FIELD(pkt_st_p->deth_st_p); + FREE_PKT_FIELD(pkt_st_p->reth_st_p); + FREE_PKT_FIELD(pkt_st_p->atomic_eth_st_p); + FREE_PKT_FIELD(pkt_st_p->aeth_st_p); + FREE_PKT_FIELD(pkt_st_p->atomic_acketh_st_p); + + /*FREE_PKT_FIELD(pkt_st_p->payload_buf_p);*/ + /*No need to free the payload it is a pointer to the packet payload the user must free himself*/ + return(MT_OK); +} + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_utilities.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_utilities.h new file mode 100644 index 00000000..c93296cc --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/packet_utilities.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_PACKET_UTILITIES_H +#define H_PACKET_UTILITIES_H + +/**************************************/ +#ifndef MT_KERNEL + + #include + #include + +#endif /* MT_KERNEL */ + +/* MPGA Includes */ +#include +#include +/**************************************/ + + +/****************************************************************************** + * Function: Print Packet + * + * Description: This function is printing the given packet . + * + * Parameters: + * packet_buf_p(IN) u_int8_t * + * A pointer to the first byte in the packet. + * packet_size(IN) u_int16_t + * The packet size in bytes + * + * Returns: (void function) + * + *****************************************************************************/ +void +MPGA_print_pkt( u_int8_t *packet_buf_p, u_int16_t packet_size); + +/****************************************************************************** + * Function: free PKT struct fields + * + * Description: This function will free all the allocted structures . + * in the IB_PKT_st (IB packet struct). + * Parameters: + * pkt_st_p(out) IB_PKT_st * + * A pointer to the IB packet struct. + * + * Returns: + * MT_OK + * MT_ERROR + *****************************************************************************/ +call_result_t +MPGA_free_pkt_st_fields(IB_PKT_st *pkt_st_p); + + +#endif /* PACKET_UTILITIES */ + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/ud_pack_fmt.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/ud_pack_fmt.h new file mode 100644 index 00000000..31b34a7f --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mpga/ud_pack_fmt.h @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_IBPACKET_H +#define H_IBPACKET_H + +#define IBPCK_GRH_IPVER 6 +#define IBPCK_GRH_NEXT_HEADER 0x1B + +/* 5.2.5 Table 8 */ + +typedef struct datagram_extended_transport_header_le { + unsigned int Q_Key; + + +#ifdef MT_LITTLE_ENDIAN +/* --------------------------------------------------------- */ + unsigned int SrcQP:24; + unsigned int :8; +/* --------------------------------------------------------- */ +#else +/* --------------------------------------------------------- */ + unsigned int :8; + unsigned int SrcQP:24; +/* --------------------------------------------------------- */ +#endif +} IBPCK_deth_le_t; + +/* 5.2.3 Table 6 */ + +typedef struct base_transport_header_le { +#ifdef MT_LITTLE_ENDIAN + unsigned int P_KEY:16; + unsigned int TVer:4; + unsigned int PadCnt:2; + unsigned int M:1; + unsigned int SE:1; + unsigned int OpCode:8; +/* --------------------------------------------------------- */ + unsigned int DestQP:24; + unsigned int :8; +/* --------------------------------------------------------- */ + unsigned int PSN:24; + unsigned int :7; + unsigned int A:1; +/* --------------------------------------------------------- */ +#else + unsigned int OpCode:8; + unsigned int SE:1; + unsigned int M:1; + unsigned int PadCnt:2; + unsigned int TVer:4; + unsigned int P_KEY:16; +/* --------------------------------------------------------- */ + unsigned int :8; + unsigned int DestQP:24; +/* --------------------------------------------------------- */ + unsigned int A:1; + unsigned int :7; + unsigned int PSN:24; +/* --------------------------------------------------------- */ +#endif +} IBPCK_bth_le_t; + +/* 5.2.1 Table 4 */ + +typedef struct local_route_header_le { +#ifdef MT_LITTLE_ENDIAN + unsigned int DLID:16; + unsigned int LNH:2; + unsigned int :2; + unsigned int SL:4; + unsigned int LVer:4; + unsigned int VL:4; +/* --------------------------------------------------------- */ + unsigned int SLID:16; + unsigned int PktLen:11; + unsigned int :5; +/* --------------------------------------------------------- */ +#else + unsigned int VL:4; + unsigned int LVer:4; + unsigned int SL:4; + unsigned int :2; + unsigned int LNH:2; + unsigned int DLID:16; +/* --------------------------------------------------------- */ + unsigned int :5; + unsigned int PktLen:11; + unsigned int SLID:16; +/* --------------------------------------------------------- */ +#endif +} IBPCK_lrh_le_t; + +/* 5.2.2 Table 5 */ + +typedef struct global_route_header_le { +#ifdef MT_LITTLE_ENDIAN + unsigned int flow_lable:20; + unsigned int traffic_class:8; + unsigned int IPvers:4; + + unsigned int hop_limit:8; + unsigned int next_hdr:8; + unsigned int pay_len:16; + + unsigned int sgid_3; + unsigned int sgid_2; + unsigned int sgid_1; + unsigned int sgid_0; + + unsigned int dgid_3; + unsigned int dgid_2; + unsigned int dgid_1; + unsigned int dgid_0; +#else + unsigned int IPvers:4; + unsigned int traffic_class:8; + unsigned int flow_lable:20; + + unsigned int pay_len:16; + unsigned int next_hdr:8; + unsigned int hop_limit:8; + + unsigned int sgid_3; + unsigned int sgid_2; + unsigned int sgid_1; + unsigned int sgid_0; + + unsigned int dgid_3; + unsigned int dgid_2; + unsigned int dgid_1; + unsigned int dgid_0; + +#endif +} IBPCK_grh_le_t; + +typedef struct +{ + struct local_route_header_le lrh; + struct base_transport_header_le bth; + struct datagram_extended_transport_header_le deth; + +} IBPCK_local_udh_t; + +typedef struct +{ + struct local_route_header_le lrh; + struct global_route_header_le grh; + struct base_transport_header_le bth; + struct datagram_extended_transport_header_le deth; + +} IBPCK_global_udh_t; + + + +/*Define Link Next Header Definition */ +typedef enum{ + IBPCK_RAW = 0x0, /* |LRH|... (Etertype)*/ + IBPCK_IP_NON_IBA_TRANS = 0x1, /* |LRH|GRH|... */ + IBPCK_IBA_LOCAL = 0x2, /* |LRH|BTH|... */ + IBPCK_IBA_GLOBAL = 0x3 /* |LRH|GRH|BTH|... */ +} IBPCK_LNH_t; + + + + +#endif /* H_IB_PACKET_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.c new file mode 100644 index 00000000..047cb1fa --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.c @@ -0,0 +1,1029 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Save log at DRAM and not file system. + */ +#ifndef MT_KERNEL + +#if defined( __LINUX__ ) || defined( __DARWIN__ ) +#include +#elif defined (__WIN__ ) +#include +HANDLE log_lock; +#endif + +#define MAX_FILENAME 256 +#define MTL_LOG_ENV "MTL_LOG" +#define MTL_LOG_DEF "-" + + +# ifdef __DARWIN__ +# include +# include + /* The function is not supplied in kernel, so it is added + * here below for Darwin Kernel */ + static char *strstr (const char *phaystack, const char *pneedle); +# define printk(...) IOLog(__VA_ARGS__) +# endif /* defined __DARWIN__ */ + +#endif /* MT_KERNEL */ + + +/* Here we force this feature out */ +#undef MTL_LOG_MALLOC + +/* Take local header before getting it from the release. */ +#if MT_KERNEL +#include "mtl_sys_defs.h" +#else +#include "mtl_sys_defs.h" +#include "complib/cl_debug.h" +#endif + +#include "mtl_common.h" + +#ifdef __WIN__ +char *cur_module; +#endif + + +const char* mtl_strerror( call_result_t errnum) +{ + switch (errnum) { +#define INFO(A, B, C) case A: return C; + ERROR_LIST +#undef INFO + default: return "Unknown error"; + } +} + +const char* mtl_strerror_sym( call_result_t errnum) +{ + switch (errnum) { +#define INFO(A, B, C) case A: return #A; + ERROR_LIST +#undef INFO + default: return "Unknown error"; + } +} + +const char* mtl_basename(const char* filename) +{ + const char* slash_p = strchr(filename, '/'); + if (slash_p) + { + filename = slash_p + 1; + } + return filename; +} /* mtl_basename */ + + +static MT_bool extract_key(const char *str, const char *key, const char *suffix, + char *result, const int result_max_len) +{ + char *p, *pstr = (char *)str; + int i=0; + + if (!pstr) + return FALSE; + + while ((p = strstr(pstr, key))) + { + if (!strncmp(p + strlen(key), suffix, strlen(suffix))) + { + char *q = p+strlen(key)+strlen(suffix); + while (*q && *q != ' ') + { + *result++ = *q++; + if (++i >= result_max_len) + break; + } + *result = '\0'; + return TRUE; + } + else + pstr = p + strlen(key); + } + return FALSE; +} + +/************** Kernel Specific Code ************************/ + +#ifdef MT_KERNEL + +static struct log_info { + char *name; // Module name + struct log_info *next; // Pointer to next record + struct print_info { + char *name; + char sevs[MAX_MTL_LOG_SEVERITIES+1]; + } print_info[MAX_MTL_LOG_TYPES]; +} *log_first = (struct log_info *)NULL, + log_default = { + NULL, NULL, + { + { "trace", "" }, + { "debug", "" }, + { "error", "1234" } + } + }; + +//TODO: Bug: strlen run on user space buffer! +//static char *mtl_strdup(const char *str) +// return s ? strcpy(s, str) : NULL; +//#define mtl_strdup(str) ({ char *s = (char*)MALLOC(strlen(str)+1); s? strcpy(s, (str)) : NULL; }) +static __INLINE__ char *mtl_strdup(const char *str) +{ + char *s = (char*)MALLOC(strlen(str)+1); + return s? strcpy(s, (str)) : NULL; +} + + +static int debug_print = 0; + +void mtl_log_set(char* layer, char *info) +{ + struct log_info *p, *prev; + + printk("mtl_log_set: layer '%s', info '%s'\n", layer, info ); + + if (!strcmp( layer, "mtl_log_dbg_print" )) { + debug_print ^= 1; + return; + } + + if (strcmp(layer, "print")) + { + /* + * Find necessary record + */ + for(prev = p = log_first; p; prev = p, p = p->next) + { + if (!strcmp(layer, p->name)) + break; + } + if (!p) + { + /* Not found - create */ + /* Do not use MALLOC macro to avoid infinite recursion */ + p = (struct log_info *)QMALLOC(sizeof(struct log_info)); + memcpy(p, &log_default, sizeof(struct log_info)); + p->name = mtl_strdup(layer); + if (prev) + prev->next = p; + else + log_first = p; + } + /* + * Now "p" is a pointer to corresponding record, either existed + * or created just now + */ + extract_key(info, "trace", ":", p->print_info[mtl_log_trace].sevs, + MAX_MTL_LOG_SEVERITIES); + extract_key(info, "debug", ":", p->print_info[mtl_log_debug].sevs, + MAX_MTL_LOG_SEVERITIES); + extract_key(info, "error", ":",p->print_info[mtl_log_error].sevs, + MAX_MTL_LOG_SEVERITIES); + + } + else + { + printk("<1>\n"); + printk("<1> Layers and severities for print\n"); + printk("<1> -------------------------------\n"); + for(p=log_first; p; p = p->next) + { + int i; + + printk("<1> Layer - \"%s\":\n", p->name); + for (i=0; i Name=\"%s\", severities=\"%s\"\n", + p->print_info[i].name, p->print_info[i].sevs); + } + } + printk("<1>\n"); + } +} + +/* + * mtl_common_cleanup + */ +void mtl_common_cleanup(void) +{ + struct log_info *p = log_first, *next; + + while ( p ) { + if ( p->name ) { + FREE(p->name); + p->name = NULL; + } + next = p->next; + FREE(p); + p = next; + } +} + +void mtl_log(const char* layer, mtl_log_types log_type, char sev, + const char *fmt, ...) +{ + char pbuff[MAX_MTL_LOG_LEN]; + struct log_info *p, *prev; + va_list ap; + if (debug_print) + printk("***** mtl_log:DEBUG: layer '%s', type %d, sev '%c'\n", layer, log_type, sev); + + /* + * Find necessary record + */ + for(prev = p = log_first; p; prev = p, p = p->next) + { + if (!strcmp(layer, p->name)) + break; + } + + if (!p) + { + if (debug_print) + printk("***** mtl_log:DEBUG: Not found layer '%s' - create\n", layer); + + // Not found - create + // Avoid call to MALLOC to avoid infinite recursion + p = (struct log_info *)QMALLOC(sizeof(struct log_info)); + if (p == NULL) { + /* malloc failure. just return */ + return; + } + memcpy(p, &log_default, sizeof(struct log_info)); + p->name = mtl_strdup(layer); + if (p->name == NULL) { + FREE(p); + return; + } + if (prev) + prev->next = p; + else + log_first = p; + } + else + { + if (debug_print) + printk("***** mtl_log:DEBUG: Found layer '%s', Name=\"%s\", sev=\"%s\"\n", + p->name, p->print_info[log_type].name, p->print_info[log_type].sevs ); + } + + /* + * Now "p" is a pointer to corresponding record, either existed + * or created just now + */ + + + /* + * Log printing + */ + if (strchr(p->print_info[log_type].sevs, sev)) + { + if (debug_print) + printk("***** mtl_log:DEBUG: print string\n" ); + va_start (ap, fmt); + vsprintf (pbuff, fmt, ap); + va_end (ap); + printk("<1> %s(%c): %s", layer, sev, pbuff); + } +} + +/************************************************************************/ +/* Kernel memory allocation logging/debugging */ + +#if 0 +/* Alas, we do not have such machinery for the kernel (yet?) */ + +/* #include */ +static const char* getBackTrace(char* buf, int bufLen) +{ + enum {Depth = 5}; + char* pBuf = buf; + void* array[Depth]; + size_t size = backtrace(array, Depth); + if (size > 0) + { + char* pBufLimit = buf + bufLen - 2; + char** syms = backtrace_symbols(array, size); + char** symsEnd = syms += size; + char* sym = syms[0]; + char* pBufNext = pBuf + strlen(sym); + while ((syms != symsEnd) && (pBufNext < pBufLimit)) + { + strcpy(pBuf, sym); + pBuf = pBufNext; + *pBuf++ = '|'; + ++syms; + if (syms != symsEnd) + { + sym = *syms; + pBufNext += strlen(sym); + } + } + } + *pBuf = '\0'; + return buf; +} /* getBackTrace */ +#endif + +static const char* mtl_malloc_modname = "MEMCHECK"; +static const char* mallog_magic = "takeme4memcheck"; + +void* mtl_log_vmalloc(const char* fn, int ln, int bsize) +{ + /* char symTrace[256]; + * getBackTrace(symTrace, sizeof(symTrace)); */ + void* ptr = (void*)QVMALLOC(bsize); + mtl_log(mtl_malloc_modname, mtl_log_debug, '1', + "%s[%d]: 0x%p := vmalloc(%d) %s %s\n", + fn, ln, ptr, bsize, mallog_magic, ""); + return ptr; +} + +void mtl_log_vfree(const char* fn, int ln, void* ptr) +{ + mtl_log(mtl_malloc_modname, mtl_log_debug, '1', + "%s[%d]: vfree(0x%p) %s\n", fn, ln, ptr, mallog_magic); + QVFREE(ptr); +} + +void* mtl_log_kmalloc(const char* fn, int ln, int bsize, unsigned g) +{ + /* char symTrace[256]; + * getBackTrace(symTrace, sizeof(symTrace)); */ + void* ptr = (void*)QCMALLOC(bsize, g); + mtl_log(mtl_malloc_modname, mtl_log_debug, '1', + "%s[%d]: 0x%p := kmalloc(%d, 0x%x) %s %s\n", + fn, ln, ptr, bsize, g, mallog_magic, ""); + return ptr; +} + +void mtl_log_kfree(const char* fn, int ln, void *ptr) +{ + mtl_log(mtl_malloc_modname, mtl_log_debug, '1', + "%s[%d]: kfree(0x%p) %s\n", fn, ln, ptr, mallog_magic); + QFREE(ptr); +} + +/* */ +/************************************************************************/ +#else /* not defined MT_KERNEL */ + +/************** User Specific Code ************************/ + +static struct log_info { + char *name; // Module name + FILE *fp; // File for output + int first; // first usage == 1 + struct log_info *next; // Pointer to next record + struct print_info { + char *name; + char sevs[MAX_MTL_LOG_SEVERITIES+1]; + } print_info[MAX_MTL_LOG_TYPES]; +} *log_first = (struct log_info *)NULL, + log_default = { + NULL, NULL, 1, NULL, + { + { "trace", "" }, + { "debug", "" }, + { "error", "" } + } + }; + +static FILE *open_logfile(const char *fname) +{ + FILE *rc; +/* + if (!lstrcmp(fname, "-") || !lstrcmp(fname, "&")) + rc = stderr; + else if (!lstrcmp(fname, ">")) + rc = stdout; + else if ((rc = fopen(fname, "w")) == NULL) + { + fprintf(stderr, "Can't open \"%s\" - %s\nUse stderr instead.\n", + fname, strerror(errno)); + rc = stderr; + } + */ + return stdout; +} + +void mtl_log_DB_print() +{ + struct log_info *p; + DebugPrint("<1>\n"); + DebugPrint("<1> Layers and severities for print\n"); + DebugPrint("<1> -------------------------------\n"); + for(p=log_first; p; p = p->next) + { + int i; + + DebugPrint("<1> Layer - \"%s\":\n", p->name); + for (i=0; i Name=\"%s\", severities=\"%s\"\n", + p->print_info[i].name, p->print_info[i].sevs); + } + } + DebugPrint("<1>\n"); +} + +static __INLINE__ char *mtl_strdup(const char *str) +{ + char *s = (char*)MALLOC(strlen(str)+1); + return s? lstrcpy(s, (str)) : NULL; +} + + + +void mtl_log(const char* layer, mtl_log_types log_type, char sev, + const char *fmt, ...) +{ + char pbuff[MAX_MTL_LOG_LEN]; + struct log_info *p, *prev; + va_list ap; + /* + * Find necessary record + */ + for(prev = p = log_first; p; prev = p, p = p->next) + { + if (!lstrcmpi(layer, p->name)) + break; + } + + if (!p) + { + + // Not found - create + // Avoid call to MALLOC to avoid infinite recursion + p = (struct log_info *)MALLOC(sizeof(struct log_info)); + if (p == NULL) { + /* malloc failure. just return */ + return; + } + CopyMemory(p, &log_default, sizeof(struct log_info)); + p->name = mtl_strdup(layer); + if (p->name == NULL) { + FREE(p); + return; + } + if (prev) + prev->next = p; + else + log_first = p; + } + + /* + * Now "p" is a pointer to corresponding record, either existed + * or created just now + */ + + + /* + * Log printing + */ + //if (strchr(p->print_info[log_type].sevs, sev)) + + va_start (ap, fmt); + wvsprintf (pbuff, fmt, ap); + va_end (ap); + DebugPrint(" %s(%c): %s", layer, sev, pbuff); + } + +#endif /* __KERNEL__ */ + + + +#if defined(__WIN__) || defined(VXWORKS_OS) + +#define MAX_MOD_NAME_LEN 32 + +/* + * The only reason why mt_strtoull function is here + * - I didn't find such function in MSDN library. + * + * The code of mt_strtoull borrowed from glibc library + * with small changes + */ +/* Convert NPTR to an `unsigned long int' or `long int' in base BASE. + If BASE is 0 the base is determined by the presence of a leading + zero, indicating octal or a leading "0x" or "0X", indicating hexadecimal. + If BASE is < 2 or > 36, it is reset to 10. + If ENDPTR is not NULL, a pointer to the character after the last + one converted is stored in *ENDPTR. */ +#if 0 +u_int64_t mt_strtoull (const char *nptr, char **endptr, int base) +{ + int overflow, negative; + u_int64_t i, cutlim, cutoff; + const char *s; + char c; + const char *save; + + if (base < 0 || base == 1 || base > 36) + base = 10; + + s = nptr; + + /* Skip white space. */ + while (isspace (*s)) + ++s; + if (*s == '\0') + goto noconv; + + /* Check for a sign. */ + if (*s == '-') + { + negative = 1; + ++s; + } + else if (*s == '+') + { + negative = 0; + ++s; + } + else + negative = 0; + + if (base == 16 && s[0] == '0' && toupper (s[1]) == 'X') + s += 2; + + /* If BASE is zero, figure it out ourselves. */ + if (base == 0) + { + if (*s == '0') + { + if (toupper (s[1]) == 'X') + { + s += 2; + base = 16; + } + else + base = 8; + } + else + base = 10; + } + + /* Save the pointer so we can check later if anything happened. */ + save = s; + + cutoff = _UI64_MAX / (u_int32_t) base; + cutlim = _UI64_MAX % (u_int32_t) base; + + overflow = 0; + i = 0; + for (c = *s; c != '\0'; c = *++s) + { + if (isdigit (c)) + c -= '0'; + else if (isalpha (c)) + c = toupper (c) - 'A' + 10; + else + break; + if (c >= base) + break; + /* Check for overflow. */ + if (i > cutoff || (i == cutoff && c > cutlim)) + overflow = 1; + else + { + i *= (unsigned long int) base; + i += c; + } + } + + /* Check if anything actually happened. */ + if (s == save) + goto noconv; + + /* Store in ENDPTR the address of one character + past the last character we converted. */ + if (endptr != NULL) + *endptr = (char *) s; + + if (overflow) + return _UI64_MAX; + + /* Return the result of the appropriate sign. */ + return (negative ? (u_int64_t)(-(int64_t)i) : i); + +noconv: + /* There was no number to convert. */ + if (endptr != NULL) + *endptr = (char *) nptr; + return 0L; +} +#endif // 0 + +#endif + +#if defined(__WIN__) + +#ifdef __KERNEL__ + + +static KSPIN_LOCK s_mod_name_sp; +static char s_mod_name[MAX_MOD_NAME_LEN]; +static KIRQL s_irql=0; +static KIRQL s_irql_synch=0; + +#ifdef USE_RELAY_MOD_NAME +#define NT_CALL_MT_LOG(sev,type) \ + char mod_name[MAX_MOD_NAME_LEN]; \ + char pbuff[MAX_MTL_LOG_LEN]; \ + va_list ap; \ + mtl_log_get_name(mod_name); \ + va_start (ap, fmt); \ + vsprintf (pbuff, fmt, ap); \ + va_end (ap); \ + mtl_log( mod_name, type, sev, "%s", pbuff) +#else +#define NT_CALL_MT_LOG(sev,type) \ + char pbuff[MAX_MTL_LOG_LEN]; \ + va_list ap; \ + va_start (ap, fmt); \ + vsprintf (pbuff, fmt, ap); \ + va_end (ap); \ + mtl_log( MAKE_MOD_NAME, type, sev, "%s", pbuff) +#endif + + +NTKERNELAPI +KIRQL +FASTCALL +KeAcquireSpinLockRaiseToSynch ( + PKSPIN_LOCK SpinLock + ); + +void mtl_log_set_name( char * mod_name ) +{ +#ifndef USE_RELAY_MOD_NAME + DbgPrint( "mtl_log_set_name: can't be here, irql = %d!!!\n", KeGetCurrentIrql()); + ASSERT(0); +#endif + s_irql = KeAcquireSpinLockRaiseToSynch(&s_mod_name_sp); + strcpy( s_mod_name, mod_name ); +} + +static __inline void mtl_log_get_name( char * mod_name ) +{ + KIRQL irql = KeGetCurrentIrql(); +#ifndef USE_RELAY_MOD_NAME + DbgPrint( "mtl_log_set_name: can't be here, irql = %d!!!\n", irql); + ASSERT(0); +#endif + if (irql != s_irql_synch) { + DbgPrint( "MDT.SYS: mtl_log_get_name: WARNING: unexpected current IRQL (%d), new (=prev) irql = %d!!\n", irql, s_irql); + ASSERT(0); + } + irql = s_irql; + strcpy( mod_name, s_mod_name ); + s_irql = 0; + KeReleaseSpinLock(&s_mod_name_sp, irql ); +} + +#ifndef MT_BUILD_LIB +NTSTATUS DllInitialize(PUNICODE_STRING RegistryPath) +{ +#ifdef USE_RELAY_MOD_NAME + KIRQL irql; + // init spinlock + KeInitializeSpinLock(&s_mod_name_sp); + // find SYNC IRQL + irql = KeAcquireSpinLockRaiseToSynch(&s_mod_name_sp); + s_irql_synch = KeGetCurrentIrql(); + KeReleaseSpinLock(&s_mod_name_sp, irql ); +#endif + + DbgPrint("\n***** MTL_COMMON_KL: DllInitialize()"); + return STATUS_SUCCESS; +} + +NTSTATUS DllUnload() +{ + DbgPrint("\n***** MTL_COMMON_KL: DllUnload()"); + return STATUS_SUCCESS; +} + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT pi_pDriverObject, + IN PUNICODE_STRING pi_pRegistryPath + ) +{ /* DriverEntry */ + + DbgPrint("\n***** MTL_COMMON_KL: DriverEntry()"); + return STATUS_SUCCESS; + +} /* DriverEntry */ +#endif /* ifndef MT_BUILD_LIB */ + + +#else /* ifdef __KERNEL__ */ + +/* ----- User Space ----- */ + + +#include + +int s_mod_name_init = 1; +HANDLE s_mod_name_mutex = NULL; +char s_mod_name[MAX_MOD_NAME_LEN]; + +#ifdef USE_RELAY_MOD_NAME +#define NT_CALL_MT_LOG(sev,type) \ + char mod_name[MAX_MOD_NAME_LEN]; \ + char pbuff[MAX_MTL_LOG_LEN]; \ + va_list ap; \ + mtl_log_get_name(mod_name); \ + va_start (ap, fmt); \ + wvsprintf (pbuff, fmt, ap); \ + va_end (ap); \ + mtl_log( mod_name, type, sev, "%s", pbuff) +#else +#define NT_CALL_MT_LOG(sev,type) \ + char pbuff[MAX_MTL_LOG_LEN]; \ + va_list ap; \ + va_start (ap, fmt); \ + wvsprintf (pbuff, fmt, ap); \ + va_end (ap); \ + mtl_log( MAKE_MOD_NAME, type, sev, "%s", pbuff) +#endif + +void mtl_log_get_name( char * mod_name ) +{ +#ifndef USE_RELAY_MOD_NAME + DebugPrint( "MTL_COMMON: mtl_log_get_name: error in build - we can't get here !!!\n"); +#endif + lstrcpy( mod_name, s_mod_name ); + if (s_mod_name_mutex != NULL) + ReleaseMutex( s_mod_name_mutex ); +} + +void mtl_log_set_name( char * mod_name ) +{ +#ifndef USE_RELAY_MOD_NAME + DebugPrint( "MTL_COMMON: mtl_log_set_name: error in build - we can't get here !!!\n"); +#endif + if (s_mod_name_init) { + s_mod_name_mutex = CreateMutex( + NULL, // default security descriptor + FALSE, // not to acquire on creation + "MOD_NAME_MUTEX" // object name: unnamed + ); + if (s_mod_name_mutex == NULL) { + mtl_log( "MTLCOMMON", mtl_log_error, 1, "(mtl_log_set_name) CreateMutex failed (0x%x)\n", GetLastError() ); + } + s_mod_name_init = 0; + } + + if (s_mod_name_mutex != NULL) { + WaitForSingleObject( + s_mod_name_mutex, // handle to object + INFINITE // time-out interval + ); + } + strcpy( s_mod_name, mod_name ); +} + + +BOOL mtl_common_main( HANDLE hModule, + DWORD ul_reason_for_call, + LPVOID lpReserved + ) +{ + BOOL l_fRetCode = TRUE; + + switch (ul_reason_for_call) + { + case DLL_PROCESS_ATTACH: + log_lock = CreateMutex( NULL, FALSE, NULL ); + break; + + case DLL_PROCESS_DETACH: + if (log_lock != NULL) + CloseHandle(log_lock); + break; + + case DLL_THREAD_ATTACH: + case DLL_THREAD_DETACH: + break; + } + return l_fRetCode; +} + +VOID +DebugPrint( + IN PUCHAR pi_szFormat, + ... + ) +/*++ + +Routine Description: + Writes a formatted ( printf() like ) string to output + +Arguments: + + pi_nDbgLogLevel...... Level of debugging log. + pi_szFormat.......... The format of the log. + +Return Value: + + None . + +--*/ +{ /* DebugPrint */ + + /* Log buffer for DebugPrint */ + static CHAR l_vLogBuff[ 1024 ]; + /* Formatted string length */ + int l_nStrLen ; + + /* Variable argument list */ + va_list l_Argptr; + + /* Init the variable argument list */ + va_start(l_Argptr, pi_szFormat); + + /* Build the formatted string */ + l_nStrLen = wvsprintf((char*)&l_vLogBuff[0] , (const char *)pi_szFormat , l_Argptr); + + /* If debug mode , print to debug window*/ + OutputDebugString(l_vLogBuff); + + /* Term the variable argument list */ + va_end(l_Argptr); + +} /* DebugPrint */ + +#endif /* ifdef __KERNEL__ */ + +#if defined(_M_IX86) +#pragma message( "***** The code is being built for __i386__ architecture *****" ) +#elif defined(_M_IA64) +#pragma message( "***** The code is being built for __ia64__ architecture *****" ) +#elif defined(_M_AMD64) +#pragma message( "***** The code is being built for __amd64__ architecture *****" ) +#else +#error Platform is not supported yet +#endif + + +void NT_trace(char sev, char *fmt, ...) { NT_CALL_MT_LOG(sev,mtl_log_trace); } +void NT_trace1(char *fmt, ...) { NT_CALL_MT_LOG('1',mtl_log_trace); } +void NT_trace2(char *fmt, ...) { NT_CALL_MT_LOG('2',mtl_log_trace); } +void NT_trace3(char *fmt, ...) { NT_CALL_MT_LOG('3',mtl_log_trace); } +void NT_trace4(char *fmt, ...) { NT_CALL_MT_LOG('4',mtl_log_trace); } +void NT_trace5(char *fmt, ...) { NT_CALL_MT_LOG('5',mtl_log_trace); } +void NT_trace6(char *fmt, ...) { NT_CALL_MT_LOG('6',mtl_log_trace); } +void NT_trace7(char *fmt, ...) { NT_CALL_MT_LOG('7',mtl_log_trace); } +void NT_trace8(char *fmt, ...) { NT_CALL_MT_LOG('8',mtl_log_trace); } +void NT_trace9(char *fmt, ...) { NT_CALL_MT_LOG('9',mtl_log_trace); } +void NT_debug(char sev, char *fmt, ...) { NT_CALL_MT_LOG(sev,mtl_log_debug); } +void NT_debug1(char *fmt, ...) { NT_CALL_MT_LOG('1',mtl_log_debug); } +void NT_debug2(char *fmt, ...) { NT_CALL_MT_LOG('2',mtl_log_debug); } +void NT_debug3(char *fmt, ...) { NT_CALL_MT_LOG('3',mtl_log_debug); } +void NT_debug4(char *fmt, ...) { NT_CALL_MT_LOG('4',mtl_log_debug); } +void NT_debug5(char *fmt, ...) { NT_CALL_MT_LOG('5',mtl_log_debug); } +void NT_debug6(char *fmt, ...) { NT_CALL_MT_LOG('6',mtl_log_debug); } +void NT_debug7(char *fmt, ...) { NT_CALL_MT_LOG('7',mtl_log_debug); } +void NT_debug8(char *fmt, ...) { NT_CALL_MT_LOG('8',mtl_log_debug); } +void NT_debug9(char *fmt, ...) { NT_CALL_MT_LOG('9',mtl_log_debug); } +void NT_error(char sev, char *fmt, ...) { NT_CALL_MT_LOG(sev,mtl_log_error); } +void NT_error1(char *fmt, ...) { NT_CALL_MT_LOG('1',mtl_log_error); } +void NT_error2(char *fmt, ...) { NT_CALL_MT_LOG('2',mtl_log_error); } +void NT_error3(char *fmt, ...) { NT_CALL_MT_LOG('3',mtl_log_error); } +void NT_error4(char *fmt, ...) { NT_CALL_MT_LOG('4',mtl_log_error); } +void NT_error5(char *fmt, ...) { NT_CALL_MT_LOG('5',mtl_log_error); } +void NT_error6(char *fmt, ...) { NT_CALL_MT_LOG('6',mtl_log_error); } +void NT_error7(char *fmt, ...) { NT_CALL_MT_LOG('7',mtl_log_error); } +void NT_error8(char *fmt, ...) { NT_CALL_MT_LOG('8',mtl_log_error); } +void NT_error9(char *fmt, ...) { NT_CALL_MT_LOG('9',mtl_log_error); } + + +#endif /* defined(__WIN__)*/ + +#if defined(__DARWIN__) && defined(MT_KERNEL) + +/* copy paste function implementation for strstr */ +static char *strstr (const char *phaystack, const char *pneedle) +{ + const unsigned char *haystack, *needle; + char b, c; + + haystack = (const unsigned char *) phaystack; + needle = (const unsigned char *) pneedle; + + b = *needle; + if (b != '\0') + { + haystack--; + do + { + c = *++haystack; + if (c == '\0') + goto ret0; + } + while (c != b); + + c = *++needle; + if (c == '\0') + goto foundneedle; + ++needle; + goto jin; + + for (;;) + { + char a; + const unsigned char *rhaystack, *rneedle; + + do + { + a = *++haystack; + if (a == '\0') + goto ret0; + if (a == b) + break; + a = *++haystack; + if (a == '\0') + goto ret0; +shloop:; } + while (a != b); + +jin: a = *++haystack; + if (a == '\0') + goto ret0; + + if (a != c) + goto shloop; + + rhaystack = haystack-- + 1; + rneedle = needle; + a = *rneedle; + + if (*rhaystack == a) + do + { + if (a == '\0') + goto foundneedle; + ++rhaystack; + a = *++needle; + if (*rhaystack != a) + break; + if (a == '\0') + goto foundneedle; + ++rhaystack; + a = *++needle; + } + while (*rhaystack == a); + + needle = rneedle; + + if (a == '\0') + break; + } + } +foundneedle: + return (char*) haystack; +ret0: + return 0; +} + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.h new file mode 100644 index 00000000..dd6b7f59 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_common.h @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_MTL_COMMON_H +#define H_MTL_COMMON_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if defined(__mppc__) && ! defined(MT_BIG_ENDIAN) +#define MT_BIG_ENDIAN 1 +#endif + +#ifdef VXWORKS_OS + +/* + * General include files required by VxWorks applications. + */ +#include "vxWorks.h" +#include "stdio.h" +#include "stdlib.h" +#include "string.h" +#include "stdarg.h" +#include "errno.h" +#include "taskLib.h" +#include "semLib.h" + +/* + * No print kernel under VxWorks. + */ +#define printk printf + +/* + * MDDK assumes that random returns values upto 32k which is true only to rand. + */ +#define random rand +#define srandom srand + +/* + * If CPU is already big endian, degenerate big endian macros. + */ +#ifdef MT_BIG_ENDIAN +#error MT_BIG_ENDIAN defined +#define __cpu_to_be32(x) (x) +#define __be32_to_cpu(x) (x) +#define __cpu_to_be64(x) (x) +#define __be64_to_cpu(x) (x) +#define __cpu_to_be16(x) (x) +#define __be16_to_cpu(x) (x) +#endif /* MT_BIG_ENDIAN */ + +/* + * Limits definitions. + */ +#include "limits.h" + +/* + * Global semaphore which prevents more than one application to access CR space. + */ +extern SEM_ID appl_is_running_sem; + +#endif /* VXWORKS_OS */ + +#include +#include + +#define MAX_MTL_LOG_TYPES 3 +#define MAX_MTL_LOG_SEVERITIES 8 +#define MAX_MTL_LOG_LEN 512 +#define MAX_MTL_LOG_LAYER 32 +#define MAX_MTL_LOG_INFO 512 +typedef enum { + mtl_log_trace=0, + mtl_log_debug=1, + mtl_log_error=2 +} mtl_log_types; + +/* Prepend File-name + Line-number to formatted message. + * Example: MTL_DEBUG4(MT_FLFMT("(1+1/n)^n -> %g (%s)"), M_E, "Euler") + */ +#define MT_FLFMT(fmt) "%s[%d]: " fmt "\n", mtl_basename(__FILE__), __LINE__ + +/* OS-dependent stuff */ +#include + +/* Convenient macros doing cast & sizeof */ +#define TNMALLOC(t, n) (t*)MALLOC((n) * sizeof(t)) +#define TMALLOC(t) TNMALLOC(t, 1) +#define TNVMALLOC(t, n) (t*)VMALLOC((n) * sizeof(t)) +#define TVMALLOC(t) TNVMALLOC(t, 1) +#define TNINTR_MALLOC(t, n) (t*)INTR_MALLOC((n) * sizeof(t)) +#define TINTR_MALLOC(t) TNINTR_MALLOC(t, 1) + + +#ifndef POWER2 +#define POWER2(power) (1 << (power)) +#else +#error use different name for power of 2 +#endif + +#ifdef USE_RELAY_MOD_NAME +extern void mtl_log_set_name( char * mod_name ); +#endif + + +extern const char* mtl_strerror( call_result_t errnum); +extern const char* mtl_strerror_sym( call_result_t errnum); +extern const char* mtl_basename(const char* filename); /* trim dir-path */ + +/****************************************************************************** + * Function: mtl_log_set + * + * Description: Setup log print in kernel module. + * + * Parameters: + * layer(IN) (LEN s) char* + * info(IN) (LEN s) char* + * + * Returns: + ******************************************************************************/ +void mtl_log_set(char* layer, char *info); + + +extern void mtl_log(const char* layer, mtl_log_types log_type, char sev, + const char *fmt, ...) __attribute__ ((format (printf, 4, 5))); + +extern void mtl_common_cleanup(void); + +/** + * MT_DOWN_XXX + * + * Clears lower 'mask' bit of 'value'. + * + * e.g. MT_MASKX(0xFFFFF,8) -> 0xFFF00 + */ + +/** + * MT_UP_XXX + * + * Upward aligns 'value' to is lower 'mask' bits. + * + * e.g. MT_UP_ALIGNX(0x1002, 8) -> 0x1100 + */ + +/* for MT_virt_addr_t type of value */ +#define MT_DOWN_ALIGNX_VIRT(value, mask) ((MT_virt_addr_t)(value) & (~((MT_virt_addr_t)0) << (mask))) +#define MT_UP_ALIGNX_VIRT(value, mask) MT_DOWN_ALIGNX_VIRT(((value) + ~(~((MT_virt_addr_t)0) << (mask))), (mask)) + +/* for MT_phys_addr_t type of value */ +#define MT_DOWN_ALIGNX_PHYS(value, mask) ((MT_phys_addr_t)(value) & (~((MT_phys_addr_t)0) << (mask))) +#define MT_UP_ALIGNX_PHYS(value, mask) MT_DOWN_ALIGNX_PHYS(((value) + ~(~((MT_phys_addr_t)0) << (mask))), (mask)) + +/* for u_int32_t type of value */ +#define MT_DOWN_ALIGNX_U32(value, mask) ((u_int32_t)(value) & (~((u_int32_t)0) << (mask))) +#define MT_UP_ALIGNX_U32(value, mask) MT_DOWN_ALIGNX_U32(((value) + ~(~((u_int32_t)0) << (mask))), (mask)) + +/* for u_int64_t type of value */ +#define MT_DOWN_ALIGNX_U64(value, mask) ((u_int64_t)(value) & (~((u_int64_t)0) << (mask))) +#define MT_UP_ALIGNX_U64(value, mask) MT_DOWN_ALIGNX_U64(((value) + ~(~((u_int64_t)0) << (mask))), (mask)) + +/* for MT_ulong_ptr_t type of value */ +#define MT_DOWN_ALIGNX_ULONG_PTR(value, mask) ((MT_ulong_ptr_t)(value) & (~((MT_ulong_ptr_t)0) << (mask))) +#define MT_UP_ALIGNX_ULONG_PTR(value, mask) MT_DOWN_ALIGNX_ULONG_PTR(((value) + ~(~((MT_ulong_ptr_t)0) << (mask))), (mask)) + +/* for MT_size_t type of value */ +#define MT_DOWN_ALIGNX_SIZE(value, mask) ((MT_size_t)(value) & (~((MT_size_t)0) << (mask))) +#define MT_UP_ALIGNX_SIZE(value, mask) MT_DOWN_ALIGNX_SIZE(((value) + ~(~((MT_size_t)0) << (mask))), (mask)) + +/* for unsigned long type of value */ +#define MT_DOWN_ALIGNX_ULONG(value, mask) ((unsigned long)(value) & (~((unsigned long)0) << (mask))) +#define MT_UP_ALIGNX_ULONG(value, mask) MT_DOWN_ALIGNX_ULONG(((value) + ~(~((unsigned long)0) << (mask))), (mask)) + +/* for unsigned long type of value , */ +/* PLEASE DON'T USE THIS MACRO. IT's KEPT JUST FOR BACKWARD COMPATABILITY REASONS */ +#define MT_DOWN_ALIGNX(value, mask) ((unsigned long)(value) & (~((unsigned long)0) << (mask))) +#define MT_UP_ALIGNX(value, mask) MT_DOWN_ALIGNX(((value) + ~(~((unsigned long)0) << (mask))), (mask)) + + +enum { + MT_MELLANOX_IEEE_VENDOR_ID = 0x02c9, + MT_MELLANOX_PCI_VENDOR_ID = 0x15B3, + MT_TOPSPIN_PCI_VENDOR_ID = 0x1867 +}; + +/* some standard macros for tracing */ +#define FUNC_IN MTL_DEBUG2("==> %s\n", __func__) +#define FUNC_OUT MTL_DEBUG2("<== %s\n", __func__) +#define MT_RETURN(rc) { FUNC_OUT; \ + return (rc); } + +#define MT_RETV { FUNC_OUT ; \ + return; } + + + +#if defined(__LINUX__) && defined(MT_KERNEL) && defined(__i386__) +#define STACK_OK ( \ + { \ + u_int32_t vsp=0, left, ret; \ + asm ("movl %%esp, %0;" \ + : "=r"(vsp) \ + : ); \ + left = vsp-((u_int32_t)current+sizeof(struct task_struct)); \ + if ( left < 0x400 ) { \ + MTL_ERROR1("you have less then 0x400 bytes of stack left\n"); \ + ret = 0; \ + } \ + else { \ + MTL_DEBUG1("%s: stack depth left = %d bytes\n", __FUNCTION__, left); \ + ret = 1; \ + } \ + ret; \ +} \ +) + +#define MT_RETURN_IF_LOW_STACK(stack_watermark) {\ + u_int32_t vsp=0, left; \ + asm ("movl %%esp, %0;" \ + : "=r"(vsp) \ + : ); \ + left = vsp-((u_int32_t)current+sizeof(struct task_struct)); \ + if ( left < stack_watermark) { \ + MTL_ERROR1(MT_FLFMT("%s: you have less then %u bytes of stack left (%uB left)\n"),__func__,\ + stack_watermark,left); \ + return -255;\ + }\ +} + +#else /* __LINUX__ && defined MT_KERNEL */ +#define STACK_OK 1 +#define MT_RETURN_IF_LOW_STACK(stack_watermark) do {} while (0) +#endif + +/* an empty macro */ +#define EMPTY + +#ifdef __cplusplus +} +#endif + +#endif /* H_MTL_COMMON_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_log.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_log.h new file mode 100644 index 00000000..1df14d23 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/mtl_log.h @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_MTL_LOG_H +#define H_MTL_LOG_H + +#define MTL_TRACE(S, F, A...) _MTL_TRACE(MTL_MODULE, S, F, ## A) +#define _MTL_TRACE(M, S, F, A...) __MTL_TRACE(M, S, F, ## A) +#define __MTL_TRACE(M, S, F, A...) ___MTL_TRACE(#M, S, F, ## A) +#if 1 <= MAX_TRACE +#define ___MTL_TRACE(M, S, F, A...) mtl_log(M, mtl_log_trace, S, F, ## A) +#else +#define ___MTL_TRACE(M, S, F, A...) +#endif + +#define MTL_TRACE1(F, A...) _MTL_TRACE1(MTL_MODULE, F, ## A) +#define _MTL_TRACE1(M, F, A...) __MTL_TRACE1(M, F, ## A) +#define __MTL_TRACE1(M, F, A...) ___MTL_TRACE1(#M, F, ## A) +#if 1 <= MAX_TRACE +#define ___MTL_TRACE1(M, F, A...) mtl_log(M, mtl_log_trace, '1', F, ## A) +#else +#define ___MTL_TRACE1(M, F, A...) +#endif + +#define MTL_TRACE2(F, A...) _MTL_TRACE2(MTL_MODULE, F, ## A) +#define _MTL_TRACE2(M, F, A...) __MTL_TRACE2(M, F, ## A) +#define __MTL_TRACE2(M, F, A...) ___MTL_TRACE2(#M, F, ## A) +#if 2 <= MAX_TRACE +#define ___MTL_TRACE2(M, F, A...) mtl_log(M, mtl_log_trace, '2', F, ## A) +#else +#define ___MTL_TRACE2(M, F, A...) +#endif + +#define MTL_TRACE3(F, A...) _MTL_TRACE3(MTL_MODULE, F, ## A) +#define _MTL_TRACE3(M, F, A...) __MTL_TRACE3(M, F, ## A) +#define __MTL_TRACE3(M, F, A...) ___MTL_TRACE3(#M, F, ## A) +#if 3 <= MAX_TRACE +#define ___MTL_TRACE3(M, F, A...) mtl_log(M, mtl_log_trace, '3', F, ## A) +#else +#define ___MTL_TRACE3(M, F, A...) +#endif + +#define MTL_TRACE4(F, A...) _MTL_TRACE4(MTL_MODULE, F, ## A) +#define _MTL_TRACE4(M, F, A...) __MTL_TRACE4(M, F, ## A) +#define __MTL_TRACE4(M, F, A...) ___MTL_TRACE4(#M, F, ## A) +#if 4 <= MAX_TRACE +#define ___MTL_TRACE4(M, F, A...) mtl_log(M, mtl_log_trace, '4', F, ## A) +#else +#define ___MTL_TRACE4(M, F, A...) +#endif + +#define MTL_TRACE5(F, A...) _MTL_TRACE5(MTL_MODULE, F, ## A) +#define _MTL_TRACE5(M, F, A...) __MTL_TRACE5(M, F, ## A) +#define __MTL_TRACE5(M, F, A...) ___MTL_TRACE5(#M, F, ## A) +#if 5 <= MAX_TRACE +#define ___MTL_TRACE5(M, F, A...) mtl_log(M, mtl_log_trace, '5', F, ## A) +#else +#define ___MTL_TRACE5(M, F, A...) +#endif + +#define MTL_TRACE6(F, A...) _MTL_TRACE6(MTL_MODULE, F, ## A) +#define _MTL_TRACE6(M, F, A...) __MTL_TRACE6(M, F, ## A) +#define __MTL_TRACE6(M, F, A...) ___MTL_TRACE6(#M, F, ## A) +#if 6 <= MAX_TRACE +#define ___MTL_TRACE6(M, F, A...) mtl_log(M, mtl_log_trace, '6', F, ## A) +#else +#define ___MTL_TRACE6(M, F, A...) +#endif + +#define MTL_TRACE7(F, A...) _MTL_TRACE7(MTL_MODULE, F, ## A) +#define _MTL_TRACE7(M, F, A...) __MTL_TRACE7(M, F, ## A) +#define __MTL_TRACE7(M, F, A...) ___MTL_TRACE7(#M, F, ## A) +#if 7 <= MAX_TRACE +#define ___MTL_TRACE7(M, F, A...) mtl_log(M, mtl_log_trace, '7', F, ## A) +#else +#define ___MTL_TRACE7(M, F, A...) +#endif + +#define MTL_TRACE8(F, A...) _MTL_TRACE8(MTL_MODULE, F, ## A) +#define _MTL_TRACE8(M, F, A...) __MTL_TRACE8(M, F, ## A) +#define __MTL_TRACE8(M, F, A...) ___MTL_TRACE8(#M, F, ## A) +#if 8 <= MAX_TRACE +#define ___MTL_TRACE8(M, F, A...) mtl_log(M, mtl_log_trace, '8', F, ## A) +#else +#define ___MTL_TRACE8(M, F, A...) +#endif + +#define MTL_TRACE9(F, A...) _MTL_TRACE9(MTL_MODULE, F, ## A) +#define _MTL_TRACE9(M, F, A...) __MTL_TRACE9(M, F, ## A) +#define __MTL_TRACE9(M, F, A...) ___MTL_TRACE9(#M, F, ## A) +#if 9 <= MAX_TRACE +#define ___MTL_TRACE9(M, F, A...) mtl_log(M, mtl_log_trace, '9', F, ## A) +#else +#define ___MTL_TRACE9(M, F, A...) +#endif + +#define MTL_DEBUG(S, F, A...) _MTL_DEBUG(MTL_MODULE, S, F, ## A) +#define _MTL_DEBUG(M, S, F, A...) __MTL_DEBUG(M, S, F, ## A) +#define __MTL_DEBUG(M, S, F, A...) ___MTL_DEBUG(#M, S, F, ## A) +#if 1 <= MAX_DEBUG +#define ___MTL_DEBUG(M, S, F, A...) mtl_log(M, mtl_log_debug, S, F, ## A) +#else +#define ___MTL_DEBUG(M, S, F, A...) +#endif + +#define MTL_DEBUG1(F, A...) _MTL_DEBUG1(MTL_MODULE, F, ## A) +#define _MTL_DEBUG1(M, F, A...) __MTL_DEBUG1(M, F, ## A) +#define __MTL_DEBUG1(M, F, A...) ___MTL_DEBUG1(#M, F, ## A) +#if 1 <= MAX_DEBUG +#define ___MTL_DEBUG1(M, F, A...) mtl_log(M, mtl_log_debug, '1', F, ## A) +#else +#define ___MTL_DEBUG1(M, F, A...) +#endif + +#define MTL_DEBUG2(F, A...) _MTL_DEBUG2(MTL_MODULE, F, ## A) +#define _MTL_DEBUG2(M, F, A...) __MTL_DEBUG2(M, F, ## A) +#define __MTL_DEBUG2(M, F, A...) ___MTL_DEBUG2(#M, F, ## A) +#if 2 <= MAX_DEBUG +#define ___MTL_DEBUG2(M, F, A...) mtl_log(M, mtl_log_debug, '2', F, ## A) +#else +#define ___MTL_DEBUG2(M, F, A...) +#endif + +#define MTL_DEBUG3(F, A...) _MTL_DEBUG3(MTL_MODULE, F, ## A) +#define _MTL_DEBUG3(M, F, A...) __MTL_DEBUG3(M, F, ## A) +#define __MTL_DEBUG3(M, F, A...) ___MTL_DEBUG3(#M, F, ## A) +#if 3 <= MAX_DEBUG +#define ___MTL_DEBUG3(M, F, A...) mtl_log(M, mtl_log_debug, '3', F, ## A) +#else +#define ___MTL_DEBUG3(M, F, A...) +#endif + +#define MTL_DEBUG4(F, A...) _MTL_DEBUG4(MTL_MODULE, F, ## A) +#define _MTL_DEBUG4(M, F, A...) __MTL_DEBUG4(M, F, ## A) +#define __MTL_DEBUG4(M, F, A...) ___MTL_DEBUG4(#M, F, ## A) +#if 4 <= MAX_DEBUG +#define ___MTL_DEBUG4(M, F, A...) mtl_log(M, mtl_log_debug, '4', F, ## A) +#else +#define ___MTL_DEBUG4(M, F, A...) +#endif + +#define MTL_DEBUG5(F, A...) _MTL_DEBUG5(MTL_MODULE, F, ## A) +#define _MTL_DEBUG5(M, F, A...) __MTL_DEBUG5(M, F, ## A) +#define __MTL_DEBUG5(M, F, A...) ___MTL_DEBUG5(#M, F, ## A) +#if 5 <= MAX_DEBUG +#define ___MTL_DEBUG5(M, F, A...) mtl_log(M, mtl_log_debug, '5', F, ## A) +#else +#define ___MTL_DEBUG5(M, F, A...) +#endif + +#define MTL_DEBUG6(F, A...) _MTL_DEBUG6(MTL_MODULE, F, ## A) +#define _MTL_DEBUG6(M, F, A...) __MTL_DEBUG6(M, F, ## A) +#define __MTL_DEBUG6(M, F, A...) ___MTL_DEBUG6(#M, F, ## A) +#if 6 <= MAX_DEBUG +#define ___MTL_DEBUG6(M, F, A...) mtl_log(M, mtl_log_debug, '6', F, ## A) +#else +#define ___MTL_DEBUG6(M, F, A...) +#endif + +#define MTL_DEBUG7(F, A...) _MTL_DEBUG7(MTL_MODULE, F, ## A) +#define _MTL_DEBUG7(M, F, A...) __MTL_DEBUG7(M, F, ## A) +#define __MTL_DEBUG7(M, F, A...) ___MTL_DEBUG7(#M, F, ## A) +#if 7 <= MAX_DEBUG +#define ___MTL_DEBUG7(M, F, A...) mtl_log(M, mtl_log_debug, '7', F, ## A) +#else +#define ___MTL_DEBUG7(M, F, A...) +#endif + +#define MTL_DEBUG8(F, A...) _MTL_DEBUG8(MTL_MODULE, F, ## A) +#define _MTL_DEBUG8(M, F, A...) __MTL_DEBUG8(M, F, ## A) +#define __MTL_DEBUG8(M, F, A...) ___MTL_DEBUG8(#M, F, ## A) +#if 8 <= MAX_DEBUG +#define ___MTL_DEBUG8(M, F, A...) mtl_log(M, mtl_log_debug, '8', F, ## A) +#else +#define ___MTL_DEBUG8(M, F, A...) +#endif + +#define MTL_DEBUG9(F, A...) _MTL_DEBUG9(MTL_MODULE, F, ## A) +#define _MTL_DEBUG9(M, F, A...) __MTL_DEBUG9(M, F, ## A) +#define __MTL_DEBUG9(M, F, A...) ___MTL_DEBUG9(#M, F, ## A) +#if 9 <= MAX_DEBUG +#define ___MTL_DEBUG9(M, F, A...) mtl_log(M, mtl_log_debug, '9', F, ## A) +#else +#define ___MTL_DEBUG9(M, F, A...) +#endif + +#define MTL_ERROR(S, F, A...) _MTL_ERROR(MTL_MODULE, S, F, ## A) +#define _MTL_ERROR(M, S, F, A...) __MTL_ERROR(M, S, F, ## A) +#define __MTL_ERROR(M, S, F, A...) ___MTL_ERROR(#M, S, F, ## A) +#if 1 <= MAX_ERROR +#define ___MTL_ERROR(M, S, F, A...) mtl_log(M, mtl_log_error, S, F, ## A) +#else +#define ___MTL_ERROR(M, S, F, A...) +#endif + +#define MTL_ERROR1(F, A...) _MTL_ERROR1(MTL_MODULE, F, ## A) +#define _MTL_ERROR1(M, F, A...) __MTL_ERROR1(M, F, ## A) +#define __MTL_ERROR1(M, F, A...) ___MTL_ERROR1(#M, F, ## A) +#if 1 <= MAX_ERROR +#define ___MTL_ERROR1(M, F, A...) mtl_log(M, mtl_log_error, '1', F, ## A) +#else +#define ___MTL_ERROR1(M, F, A...) +#endif + +#define MTL_ERROR2(F, A...) _MTL_ERROR2(MTL_MODULE, F, ## A) +#define _MTL_ERROR2(M, F, A...) __MTL_ERROR2(M, F, ## A) +#define __MTL_ERROR2(M, F, A...) ___MTL_ERROR2(#M, F, ## A) +#if 2 <= MAX_ERROR +#define ___MTL_ERROR2(M, F, A...) mtl_log(M, mtl_log_error, '2', F, ## A) +#else +#define ___MTL_ERROR2(M, F, A...) +#endif + +#define MTL_ERROR3(F, A...) _MTL_ERROR3(MTL_MODULE, F, ## A) +#define _MTL_ERROR3(M, F, A...) __MTL_ERROR3(M, F, ## A) +#define __MTL_ERROR3(M, F, A...) ___MTL_ERROR3(#M, F, ## A) +#if 3 <= MAX_ERROR +#define ___MTL_ERROR3(M, F, A...) mtl_log(M, mtl_log_error, '3', F, ## A) +#else +#define ___MTL_ERROR3(M, F, A...) +#endif + +#define MTL_ERROR4(F, A...) _MTL_ERROR4(MTL_MODULE, F, ## A) +#define _MTL_ERROR4(M, F, A...) __MTL_ERROR4(M, F, ## A) +#define __MTL_ERROR4(M, F, A...) ___MTL_ERROR4(#M, F, ## A) +#if 4 <= MAX_ERROR +#define ___MTL_ERROR4(M, F, A...) mtl_log(M, mtl_log_error, '4', F, ## A) +#else +#define ___MTL_ERROR4(M, F, A...) +#endif + +#define MTL_ERROR5(F, A...) _MTL_ERROR5(MTL_MODULE, F, ## A) +#define _MTL_ERROR5(M, F, A...) __MTL_ERROR5(M, F, ## A) +#define __MTL_ERROR5(M, F, A...) ___MTL_ERROR5(#M, F, ## A) +#if 5 <= MAX_ERROR +#define ___MTL_ERROR5(M, F, A...) mtl_log(M, mtl_log_error, '5', F, ## A) +#else +#define ___MTL_ERROR5(M, F, A...) +#endif + +#define MTL_ERROR6(F, A...) _MTL_ERROR6(MTL_MODULE, F, ## A) +#define _MTL_ERROR6(M, F, A...) __MTL_ERROR6(M, F, ## A) +#define __MTL_ERROR6(M, F, A...) ___MTL_ERROR6(#M, F, ## A) +#if 6 <= MAX_ERROR +#define ___MTL_ERROR6(M, F, A...) mtl_log(M, mtl_log_error, '6', F, ## A) +#else +#define ___MTL_ERROR6(M, F, A...) +#endif + +#define MTL_ERROR7(F, A...) _MTL_ERROR7(MTL_MODULE, F, ## A) +#define _MTL_ERROR7(M, F, A...) __MTL_ERROR7(M, F, ## A) +#define __MTL_ERROR7(M, F, A...) ___MTL_ERROR7(#M, F, ## A) +#if 7 <= MAX_ERROR +#define ___MTL_ERROR7(M, F, A...) mtl_log(M, mtl_log_error, '7', F, ## A) +#else +#define ___MTL_ERROR7(M, F, A...) +#endif + +#define MTL_ERROR8(F, A...) _MTL_ERROR8(MTL_MODULE, F, ## A) +#define _MTL_ERROR8(M, F, A...) __MTL_ERROR8(M, F, ## A) +#define __MTL_ERROR8(M, F, A...) ___MTL_ERROR8(#M, F, ## A) +#if 8 <= MAX_ERROR +#define ___MTL_ERROR8(M, F, A...) mtl_log(M, mtl_log_error, '8', F, ## A) +#else +#define ___MTL_ERROR8(M, F, A...) +#endif + +#define MTL_ERROR9(F, A...) _MTL_ERROR9(MTL_MODULE, F, ## A) +#define _MTL_ERROR9(M, F, A...) __MTL_ERROR9(M, F, ## A) +#define __MTL_ERROR9(M, F, A...) ___MTL_ERROR9(#M, F, ## A) +#if 9 <= MAX_ERROR +#define ___MTL_ERROR9(M, F, A...) mtl_log(M, mtl_log_error, '9', F, ## A) +#else +#define ___MTL_ERROR9(M, F, A...) +#endif + +#endif /* H_MTL_LOG_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common.def b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common.def new file mode 100644 index 00000000..9986466a --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common.def @@ -0,0 +1,40 @@ +EXPORTS + mt_strtoull + mtl_log + mtl_basename + mtl_strerror + mtl_strerror_sym + DebugPrint +; windows implementation of tracing + mtl_log_set_name + NT_trace + NT_trace1 + NT_trace2 + NT_trace3 + NT_trace4 + NT_trace5 + NT_trace6 + NT_trace7 + NT_trace8 + NT_trace9 + NT_error + NT_error1 + NT_error2 + NT_error3 + NT_error4 + NT_error5 + NT_error6 + NT_error7 + NT_error8 + NT_error9 + NT_debug + NT_debug1 + NT_debug2 + NT_debug3 + NT_debug4 + NT_debug5 + NT_debug6 + NT_debug7 + NT_debug8 + NT_debug9 + cur_module diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common_kl.def b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common_kl.def new file mode 100644 index 00000000..65090a2e --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_common_kl.def @@ -0,0 +1,45 @@ +EXPORTS + DllInitialize private + DllUnload private + mtl_basename + mtl_strerror + mtl_strerror_sym + mtl_log_set + mtl_log + mtl_log_vmalloc + mtl_log_vfree + mtl_log_kmalloc + mtl_log_kfree +; windows implementation of tracing + mtl_log_set_name + NT_trace + NT_trace1 + NT_trace2 + NT_trace3 + NT_trace4 + NT_trace5 + NT_trace6 + NT_trace7 + NT_trace8 + NT_trace9 + NT_error + NT_error1 + NT_error2 + NT_error3 + NT_error4 + NT_error5 + NT_error6 + NT_error7 + NT_error8 + NT_error9 + NT_debug + NT_debug1 + NT_debug2 + NT_debug3 + NT_debug4 + NT_debug5 + NT_debug6 + NT_debug7 + NT_debug8 + NT_debug9 + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_log_win.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_log_win.h new file mode 100644 index 00000000..50cc4cb7 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_common/os_dep/win/mtl_log_win.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MTL_LOG_WIN_H +#define H_MTL_LOG_WIN_H + +#ifndef MTL_MODULE +#define MTL_MODULE __FILE__ +#endif +#define MAKE_MOD_NAME _MAKE_MOD_NAME(MTL_MODULE) +#define _MAKE_MOD_NAME(a) __MAKE_MOD_NAME(a) +#define __MAKE_MOD_NAME(a) #a + +#ifdef USE_RELAY_MOD_NAME +#define RELAY_MOD_NAME mtl_log_set_name( MAKE_MOD_NAME ); +#else +#define RELAY_MOD_NAME +#endif + +void NT_trace(char sev, char *fmt, ...); +void NT_trace1(char *fmt, ...); +void NT_trace2(char *fmt, ...); +void NT_trace3(char *fmt, ...); +void NT_trace4(char *fmt, ...); +void NT_trace5(char *fmt, ...); +void NT_trace6(char *fmt, ...); +void NT_trace7(char *fmt, ...); +void NT_trace8(char *fmt, ...); +void NT_trace9(char *fmt, ...); +void NT_debug(char sev, char *fmt, ...); +void NT_debug1(char *fmt, ...); +void NT_debug2(char *fmt, ...); +void NT_debug3(char *fmt, ...); +void NT_debug4(char *fmt, ...); +void NT_debug5(char *fmt, ...); +void NT_debug6(char *fmt, ...); +void NT_debug7(char *fmt, ...); +void NT_debug8(char *fmt, ...); +void NT_debug9(char *fmt, ...); +void NT_error(char sev, char *fmt, ...); +void NT_error1(char *fmt, ...); +void NT_error2(char *fmt, ...); +void NT_error3(char *fmt, ...); +void NT_error4(char *fmt, ...); +void NT_error5(char *fmt, ...); +void NT_error6(char *fmt, ...); +void NT_error7(char *fmt, ...); +void NT_error8(char *fmt, ...); +void NT_error9(char *fmt, ...); + +static inline void empty_function( char *fmt, ... ) { UNREFERENCED_PARAMETER( fmt ); } +static inline void empty_function1( const int level, ... ) { UNREFERENCED_PARAMETER( level ); } + + +#if 1 <= MAX_TRACE +#define MTL_TRACE RELAY_MOD_NAME NT_trace +#else +#define MTL_TRACE empty_function1 +#endif + +#if 1 <= MAX_TRACE +#define MTL_TRACE1 RELAY_MOD_NAME NT_trace1 +#else +#define MTL_TRACE1 empty_function +#endif + +#if 2 <= MAX_TRACE +#define MTL_TRACE2 RELAY_MOD_NAME NT_trace2 +#else +#define MTL_TRACE2 empty_function +#endif + +#if 3 <= MAX_TRACE +#define MTL_TRACE3 RELAY_MOD_NAME NT_trace3 +#else +#define MTL_TRACE3 empty_function +#endif + +#if 4 <= MAX_TRACE +#define MTL_TRACE4 RELAY_MOD_NAME NT_trace4 +#else +#define MTL_TRACE4 empty_function +#endif + +#if 5 <= MAX_TRACE +#define MTL_TRACE5 RELAY_MOD_NAME NT_trace5 +#else +#define MTL_TRACE5 empty_function +#endif + +#if 6 <= MAX_TRACE +#define MTL_TRACE6 RELAY_MOD_NAME NT_trace6 +#else +#define MTL_TRACE6 empty_function +#endif + +#if 7 <= MAX_TRACE +#define MTL_TRACE7 RELAY_MOD_NAME NT_trace7 +#else +#define MTL_TRACE7 empty_function +#endif + +#if 8 <= MAX_TRACE +#define MTL_TRACE8 RELAY_MOD_NAME NT_trace8 +#else +#define MTL_TRACE8 empty_function +#endif + +#if 9 <= MAX_TRACE +#define MTL_TRACE9 RELAY_MOD_NAME NT_trace9 +#else +#define MTL_TRACE9 empty_function +#endif + +#if 1 <= MAX_DEBUG +#define MTL_DEBUG RELAY_MOD_NAME NT_debug +#else +#define MTL_DEBUG empty_function1 +#endif + +#if 1 <= MAX_DEBUG +#define MTL_DEBUG1 RELAY_MOD_NAME NT_debug1 +#else +#define MTL_DEBUG1 empty_function +#endif + +#if 2 <= MAX_DEBUG +#define MTL_DEBUG2 RELAY_MOD_NAME NT_debug2 +#else +#define MTL_DEBUG2 empty_function +#endif + +#if 3 <= MAX_DEBUG +#define MTL_DEBUG3 RELAY_MOD_NAME NT_debug3 +#else +#define MTL_DEBUG3 empty_function +#endif + +#if 4 <= MAX_DEBUG +#define MTL_DEBUG4 RELAY_MOD_NAME NT_debug4 +#else +#define MTL_DEBUG4 empty_function +#endif + +#if 5 <= MAX_DEBUG +#define MTL_DEBUG5 RELAY_MOD_NAME NT_debug5 +#else +#define MTL_DEBUG5 empty_function +#endif + +#if 6 <= MAX_DEBUG +#define MTL_DEBUG6 RELAY_MOD_NAME NT_debug6 +#else +#define MTL_DEBUG6 empty_function +#endif + +#if 7 <= MAX_DEBUG +#define MTL_DEBUG7 RELAY_MOD_NAME NT_debug7 +#else +#define MTL_DEBUG7 empty_function +#endif + +#if 8 <= MAX_DEBUG +#define MTL_DEBUG8 RELAY_MOD_NAME NT_debug8 +#else +#define MTL_DEBUG8 empty_function +#endif + +#if 9 <= MAX_DEBUG +#define MTL_DEBUG9 RELAY_MOD_NAME NT_debug9 +#else +#define MTL_DEBUG9 empty_function +#endif + +#if 1 <= MAX_ERROR +#define MTL_ERROR RELAY_MOD_NAME NT_error +#else +#define MTL_ERROR empty_function1 +#endif + +#if 1 <= MAX_ERROR +#define MTL_ERROR1 RELAY_MOD_NAME NT_error1 +#else +#define MTL_ERROR1 empty_function +#endif + +#if 2 <= MAX_ERROR +#define MTL_ERROR2 RELAY_MOD_NAME NT_error2 +#else +#define MTL_ERROR2 empty_function +#endif + +#if 3 <= MAX_ERROR +#define MTL_ERROR3 RELAY_MOD_NAME NT_error3 +#else +#define MTL_ERROR3 empty_function +#endif + +#if 4 <= MAX_ERROR +#define MTL_ERROR4 RELAY_MOD_NAME NT_error4 +#else +#define MTL_ERROR4 empty_function +#endif + +#if 5 <= MAX_ERROR +#define MTL_ERROR5 RELAY_MOD_NAME NT_error5 +#else +#define MTL_ERROR5 empty_function +#endif + +#if 6 <= MAX_ERROR +#define MTL_ERROR6 RELAY_MOD_NAME NT_error6 +#else +#define MTL_ERROR6 empty_function +#endif + +#if 7 <= MAX_ERROR +#define MTL_ERROR7 RELAY_MOD_NAME NT_error7 +#else +#define MTL_ERROR7 empty_function +#endif + +#if 8 <= MAX_ERROR +#define MTL_ERROR8 RELAY_MOD_NAME NT_error8 +#else +#define MTL_ERROR8 empty_function +#endif + +#if 9 <= MAX_ERROR +#define MTL_ERROR9 RELAY_MOD_NAME NT_error9 +#else +#define MTL_ERROR9 empty_function +#endif + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/bit_ops.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/bit_ops.h new file mode 100644 index 00000000..a9d47c5f --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/bit_ops.h @@ -0,0 +1,82 @@ +/* + This software is available to you under a choice of one of two + licenses. You may choose to be licensed under the terms of the GNU + General Public License (GPL) Version 2, available at + , or the OpenIB.org BSD + license, available in the LICENSE.TXT file accompanying this + software. These details are also available at + . + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + SOFTWARE. + + Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. +*/ + + + +#ifndef H_MT_BIT_OPS_H +#define H_MT_BIT_OPS_H + +#include + + +/***************************************************************************************** + * Bit manipulation macros + *****************************************************************************************/ + +/* MASK generate a bit mask S bits width */ +#define MASK32(S) ( ((u_int32_t) ~0L) >> (32-(S)) ) + +/* + * BITS generate a bit mask with bits O+S..O set (assumes 32 bit integer). + * numbering bits as following: 31........................76543210 + */ +#define BITS32(O,S) ( MASK32(S) << (O) ) + +/* + * MT_EXTRACT32 macro extracts S bits from (u_int32_t)W with offset O + * and shifts them O places to the right (right justifies the field extracted). + */ +#define MT_EXTRACT32(W,O,S) ( ((W)>>(O)) & MASK32(S) ) + +/* + * MT_INSERT32 macro inserts S bits with offset O from field F into word W (u_int32_t) + */ +#define MT_INSERT32(W,F,O,S) ((W)= ( ( (W) & (~BITS32(O,S)) ) | (((F) & MASK32(S))<<(O)) )) + +/* + * MT_INSERT32 macro inserts S bits with offset O from field F into word W (u_int32_t) + */ +#define MT_INSERT32_BE(W,F,O,S) ((W)= ( ( (W) & CL_HTON32(~BITS32(O,S)) ) | cl_ntoh32(((F) & MASK32(S))<<(O)) )) + +/* + * MT_EXTRACT_ARRAY32 macro is similar to EXTRACT but works on an array of (u_int32_t), + * thus offset may be larger than 32 (but not size). + */ +#define MT_EXTRACT_ARRAY32(A,O,S) MT_EXTRACT32(((u_int32_t*)A)[O >> 5],(O & MASK32(5)),S) + +/* + * MT_INSERT_ARRAY32 macro is similar to INSERT but works on an array of (u_int32_t), + * thus offset may be larger than 32 (but not size). + */ +#define MT_INSERT_ARRAY32(A,F,O,S) MT_INSERT32(((u_int32_t*)A)[O >> 5],F,(O & MASK32(5)),S) + +/* + * MT_INSERT_ARRAY32 macro is similar to INSERT but works on an array of (u_int32_t), + * thus offset may be larger than 32 (but not size). + */ +#define MT_INSERT_ARRAY32_BE(A,F,O,S) MT_INSERT32_BE(((u_int32_t*)A)[O >> 5],F,(O & MASK32(5)),S) + + +/* swap 32 bit number */ +#define mswab32(x) ((((x) >> 24)&0xff) | (((x) >> 8)&0xff00) | (((x) << 8)&0xff0000) | (((x) << 24)&0xff000000)) + + +#endif /* H_MTL_COMMON_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/ib_defs.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/ib_defs.h new file mode 100644 index 00000000..32ee2f4b --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/ib_defs.h @@ -0,0 +1,427 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_IB_DEFS_H +#define H_IB_DEFS_H + +#ifdef __cplusplus +extern "C" { +#endif + +#include + +typedef u_int8_t IB_port_t; +#define NULL_IB_PORT 0xFF + +typedef u_int16_t IB_lid_t; +typedef u_int8_t IB_gid_t[16]; /* GID (aka IPv6) H-to-L (big) (network) endianess */ + +/* IB-spec. Vol.1 (chapter 4): LID ranges */ +#define MIN_UC_LID 0x0001 /* Unicast LID limits */ +#define MAX_UC_LID 0xBFFF +#define MIN_MC_LID 0xC000 /* Multicast limits */ +#define MAX_MC_LID 0xFFFE +#define PERMIS_LID 0xFFFF /* Permissive DLID */ + +/* Special Multicast QP num */ +#define IB_MULTICAST_QP 0xFFFFFF +#define IB_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF) +#define IB_VALID_MULTICAST_LID(lid) (((lid) >= MIN_MC_LID) && ((lid) <= MIN_MC_LID)) + + +typedef u_int32_t IB_wqpn_t; /* Work QP number: Only 24 LSbits */ +typedef u_int32_t IB_eecn_t; /* EE Context number: Only 24 LSbits */ +typedef u_int8_t IB_guid_t[8]; /* EUI-64: Big-Endinan (H-to-L) */ +typedef u_int8_t IB_gid_prefix_t[8]; /* EUI-64: Big-Endinan (H-to-L) */ +typedef u_int8_t IB_sl_t; /* 0-15 */ +typedef u_int8_t IB_vl_t; /* 0-15 */ +typedef u_int8_t IB_arbitration_weight_t; +typedef enum{PRIO_HIGH=0,PRIO_LOW=1} IB_arbitration_prio_t; +typedef u_int8_t IB_high_prio_limit_t; +typedef u_int64_t IB_virt_addr_t; +typedef u_int32_t IB_rkey_t; +typedef u_int16_t IB_pkey_t; +typedef u_int32_t IB_qkey_t; + +/*** Note *** The following enum must be maintained zero based without holes */ +enum { IB_TS_RC, IB_TS_RD, IB_TS_UC, IB_TS_UD, IB_TS_RAW }; +typedef u_int32_t IB_ts_t; + + + +#define INVALID_PKEY ((IB_pkey_t)0) /* invalid PKEY. 0x8000 is also invalid but we'll use 0 */ + +typedef u_int32_t IB_psn_t; + +typedef u_int8_t IB_link_width_t; /* Set to a combination of following masks ("OR"ed) */ +#define W1X 1 +#define W4X 2 +#define W12X 8 +#define W_SET2SUPPORTED 255 /* Set LinkWidthEnabled to LinkWidthSupported */ + +/* 9.7.6.1.3 DETECTING LOST ACKNOWLEDGE MESSAGES AND TIMEOUTS (C9-140)*/ +#define IB_LOCAL_ACK_TIMEOUT_NUM_BITS 5 + +/* IB-spec. 9.7.5.2.8, table 45 + * RNR timer values symbols/macros use the convention: IB_RNR_NAK_TIMER_MMM_mm + * for the encoding of MMM.mm milliseconds + */ +#define IB_RNR_NAK_TIMER_NUM_BITS 5 +enum { + IB_RNR_NAK_TIMER_655_36 = 0, + IB_RNR_NAK_TIMER_0_01 = 1, + IB_RNR_NAK_TIMER_0_02 = 2, + IB_RNR_NAK_TIMER_0_03 = 3, + IB_RNR_NAK_TIMER_0_04 = 4, + IB_RNR_NAK_TIMER_0_06 = 5, + IB_RNR_NAK_TIMER_0_08 = 6, + IB_RNR_NAK_TIMER_0_12 = 7, + IB_RNR_NAK_TIMER_0_16 = 8, + IB_RNR_NAK_TIMER_0_24 = 9, + IB_RNR_NAK_TIMER_0_32 = 10, + IB_RNR_NAK_TIMER_0_48 = 11, + IB_RNR_NAK_TIMER_0_64 = 12, + IB_RNR_NAK_TIMER_0_96 = 13, + IB_RNR_NAK_TIMER_1_28 = 14, + IB_RNR_NAK_TIMER_1_92 = 15, + IB_RNR_NAK_TIMER_2_56 = 16, + IB_RNR_NAK_TIMER_3_84 = 17, + IB_RNR_NAK_TIMER_5_12 = 18, + IB_RNR_NAK_TIMER_7_68 = 19, + IB_RNR_NAK_TIMER_10_24 = 20, + IB_RNR_NAK_TIMER_15_36 = 21, + IB_RNR_NAK_TIMER_20_48 = 22, + IB_RNR_NAK_TIMER_30_72 = 23, + IB_RNR_NAK_TIMER_40_96 = 24, + IB_RNR_NAK_TIMER_61_44 = 25, + IB_RNR_NAK_TIMER_81_92 = 26, + IB_RNR_NAK_TIMER_122_88 = 27, + IB_RNR_NAK_TIMER_163_84 = 28, + IB_RNR_NAK_TIMER_245_76 = 29, + IB_RNR_NAK_TIMER_327_68 = 30, + IB_RNR_NAK_TIMER_491_52 = 31 +}; +typedef u_int32_t IB_rnr_nak_timer_code_t; + +typedef enum { + S_NOP=0, + S2GB5=1 +} IB_link_speed_t; + +typedef enum { + PORT_NOP=0, /* No state change */ + PORT_DOWN=1, + PORT_INITIALIZE=2, + PORT_ARMED=3, + PORT_ACTIVE=4 +} IB_port_state_t; + +typedef enum { + PHY_NOP=0, /* No state change */ + PHY_SLEEP=1, + PHY_POLLING=2, + PHY_DISABLED=3, + PHY_PORT_CONF_TRAINING=4, + PHY_LINK_UP=5, + PHY_LINK_ERR_REC0=6 +} IB_phy_state_t; + +enum{MTU256=1,MTU512=2,MTU1024=3,MTU2048=4,MTU4096=5}; +typedef u_int32_t IB_mtu_t; + +typedef enum{VL0=1,VL0_1=2,VL0_3=3,VL0_7=4,VL0_14=5} IB_vl_cap_t; + +typedef u_int8_t IB_static_rate_t; /* IPD encoding: IB-spec. 9.11.1, table 63 */ + +typedef enum{NODE_CA=1,NODE_SWITCH=2,NODE_ROUTER=3} IB_node_type_t; + +typedef u_int16_t IB_dev_id_t; + +typedef enum { +/* 0: Reserved */ +/* 1: */ IB_CAP_MASK_IS_SM = (1<<1), +/* 2: */ IB_CAP_MASK_IS_NOTICE_SUP = (1<<2), +/* 3: */ IB_CAP_MASK_IS_TRAP_SUP = (1<<3), +/* 4:Reserved */ +/* 5: */ IB_CAP_MASK_IS_AUTO_MIGR_SUP = (1<<5), +/* 6: */ IB_CAP_MASK_IS_SL_MAP_SUP = (1<<6), +/* 7: */ IB_CAP_MASK_IS_MKEY_NVRAM = (1<<7), +/* 8: */ IB_CAP_MASK_IS_PKEY_NVRAM = (1<<8), +/* 9: */ IB_CAP_MASK_IS_LED_INFO_SUP = (1<<9), +/*10: */ IB_CAP_MASK_IS_SM_DISABLED = (1<<10), +/*11: */ IB_CAP_MASK_IS_SYS_IMAGE_GUID_SUP = (1<<11), +/*12: */ IB_CAP_MASK_IS_PKEY_SW_EXT_PORT_TRAP_SUP = (1<<12), +/*13 - 15: RESERVED */ +/*16: */ IB_CAP_MASK_IS_CONN_MGMT_SUP = (1<<16), +/*17: */ IB_CAP_MASK_IS_SNMP_TUNN_SUP = (1<<17), +/*18: */ IB_CAP_MASK_IS_REINIT_SUP = (1<<18), +/*19: */ IB_CAP_MASK_IS_DEVICE_MGMT_SUP = (1<<19), +/*20: */ IB_CAP_MASK_IS_VENDOR_CLS_SUP = (1<<20), +/*21: */ IB_CAP_MASK_IS_DR_NOTICE_SUP = (1<<21), +/*22: */ IB_CAP_MASK_IS_CAP_MASK_NOTICE_SUP = (1<<22), +/*23: */ IB_CAP_MASK_IS_BOOT_MGMT_SUP = (1<<23), +/*24: */ IB_CAP_MASK_IS_LINK_ROUND_TRIP_LATENCY_SUP = (1<<24), //???? NEW +/*25: */ IB_CAP_MASK_IS_CLIENT_REREGISTRATION_SUP = (1<<25) +/*26 - 31: RESERVED */ + +} IB_capability_mask_bits_t; + + +typedef u_int32_t IB_port_cap_mask_t; /* To be used with flags in IB_capability_mask_bits_t */ + +#define IB_CAP_MASK_CLR_ALL(mask) ((mask)=0) +#define IB_CAP_MASK_SET(mask,attr) ((mask)|=(attr)) +#define IB_CAP_MASK_CLR(mask,attr) ((mask)&=(~(attr))) +#define IB_CAP_IS_SET(mask,attr) (((mask)&(attr))!=0) +/* + * This is an internal representation of PortInfo. + * It does not map directly to PortInfo bits. + */ +struct IB_port_info_st { + u_int64_t m_key; + IB_gid_prefix_t gid_prefix; /* Big-endinan (H-to-L) */ + IB_lid_t lid; + IB_lid_t master_sm_lid; + IB_port_cap_mask_t capability_mask; + u_int16_t diag_code; + u_int16_t m_key_lease_period; + IB_port_t local_port_num; + IB_link_width_t link_width_enabled; + IB_link_width_t link_width_supported; + IB_link_width_t link_width_active; + IB_link_speed_t link_speed_supported; + IB_port_state_t port_state; + IB_phy_state_t phy_state; + IB_phy_state_t down_default_state; + u_int8_t m_key_protect; /* 0-3 */ + u_int8_t lmc; /* 0-7 */ + IB_link_speed_t link_speed_active; + IB_link_speed_t link_speed_enabled; + IB_mtu_t neighbor_mtu; + IB_sl_t master_sm_sl; /* 0-15 */ + IB_vl_cap_t vl_cap; + u_int8_t vl_high_limit; + u_int8_t vl_arbitration_high_cap; + u_int8_t vl_arbitration_low_cap; + IB_mtu_t mtu_cap; + u_int8_t vl_stall_count; /* 0-7 */ + u_int8_t hoq_life; /* 0-31 */ + IB_vl_cap_t operational_vl; + MT_bool partition_enforcement_inbound; + MT_bool partition_enforcement_outbound; + MT_bool filter_raw_inbound; + MT_bool filter_raw_outbound; + u_int16_t m_key_violations; + u_int16_t p_key_violations; + u_int16_t q_key_violations; + u_int8_t guid_cap; /* 0-15 */ + u_int8_t subnet_t_o; /* SubnetTimeOut: 0-31 */ + u_int8_t resp_time_val; /* 0-15 */ + u_int8_t local_phy_errs; /* 0-15 */ + u_int8_t overrun_errs; /* 0-15 */ +}; + +struct IB_node_info_st { + u_int8_t base_version; + u_int8_t class_version; + IB_node_type_t node_type; + u_int8_t num_ports; + IB_guid_t node_guid; + IB_guid_t port_guid; + u_int16_t partition_cap; + IB_dev_id_t dev_id; + u_int32_t dev_rev; + IB_port_t local_port_num; + u_int32_t vendor_id; /* Only 24 LS-bits are valid */ +}; + +typedef u_int8_t IB_node_description_t[64]; /* consider other UNICODE string representation */ + +struct IB_switch_info_st { + u_int16_t linear_fdb_cap; + u_int16_t random_fdb_cap; + u_int16_t mcast_fdb_cap; + u_int16_t linear_fdb_top; + IB_port_t default_port; + IB_port_t default_mcast_primary_port; + IB_port_t default_mcast_not_primary_port; + u_int8_t lifetime_val; /* Only 5 LS-bits are valid */ + MT_bool port_state_change; + u_int16_t lids_per_port; + u_int16_t partition_enforcement_cap; + MT_bool inbound_enforcement_cap; + MT_bool outbound_enforcement_cap; + MT_bool filter_raw_packet_inbound_cap; + MT_bool filter_raw_packet_outbound_cap; +}; + +typedef struct IB_grh_st { + u_int8_t IP_version; /* Only 4 LS-bits */ + u_int8_t traffic_class; + u_int32_t flow_label; /* Only 20 LS-bits */ + u_int16_t payload_length; + u_int8_t next_header; + u_int8_t hop_limit; + IB_gid_t sgid; /* H-to-L (big) (network) endianess */ + IB_gid_t dgid; +}IB_grh_t; + +/* IB headers sizes in bytes */ +#define IB_LRH_LEN 8 +#define IB_GRH_LEN 40 /* size of the GRH (in the actual packet) */ +#define IB_BTH_LEN 12 +#define IB_DETH_LEN 8 +#define IB_MAD_LEN 256 /* size of a MAD payload */ + + +struct IB_vl_weight_element_st { + IB_vl_t vl; + IB_arbitration_weight_t weight; +}; +#define SET_END_OF_VL_WEIGHT_TAB(vlw) (vlw).weight = 0 +#define IS_END_OF_VL_WEIGHT_TAB(vlw) ((vlw).weight == 0) +#define IB_MAX_VL_ARBITRATION_ENTRIES 64 + + +typedef enum { + IB_COMP_SUCCESS, + IB_COMP_LOC_LEN_ERR, + IB_COMP_LOC_QP_OP_ERR, + IB_COMP_LOC_EE_OP_ERR, + IB_COMP_LOC_PROT_ERR, + IB_COMP_WR_FLUSH_ERR, + IB_COMP_MW_BIND_ERR, + IB_COMP_BAD_RESP_ERR, + IB_COMP_LOC_ACCS_ERR, + IB_COMP_REM_INV_REQ_ERR, + IB_COMP_REM_ACCESS_ERR, + IB_COMP_REM_OP_ERR, + IB_COMP_RETRY_EXC_ERR, + IB_COMP_RNR_RETRY_EXC_ERR, + IB_COMP_LOC_RDD_VIOL_ERR, + IB_COMP_REM_INV_RD_REQ_ERR, + IB_COMP_REM_ABORT_ERR, + IB_COMP_INV_EECN_ERR, + IB_COMP_INV_EEC_STATE_ERR, +/* IB_COMP_LOC_TOUT,*/ /* Use IB_COMP_RETRY_EXC_ERR instead */ +/* IB_COMP_RNR_TOUT,*/ /* Use IB_COMP_RNR_RETRY_EXC_ERR instead */ + + IB_COMP_FATAL_ERR, + IB_COMP_GENERAL_ERR +} IB_comp_status_t; + +#define IB_PSN_MAX ((int32_t)0xffffff) +#define IB_PSN_ADD(a,b) (((int32_t)(a)+(int32_t)(b))& IB_PSN_MAX) +#define IB_PSN_SUB(a,b) (((int32_t)(a)-(int32_t)(b))& IB_PSN_MAX) +/* a <= b, that is b follows a: FIXME: might be off by one here */ +#define IB_PSN_LE(a,b) (IB_PSN_SUB((b),(a)) <= IB_PSN_MAX/2) +#define IB_PSN_GE(a,b) (IB_PSN_LE((b),(a))) + +#define IB_PSN_IS_VALID(a) ((((int32_t)(a)) & (~ IB_PSN_MAX)) == 0 ) +#define IB_PSN_IS_INVALID(a) (((int32_t)(a)) & (~ IB_PSN_MAX)) + + +/* + * xCA interface general data strcutures. + * + */ + + + +typedef void* IB_wrid_t; +#define IB_INVALID_WRID 0 + +typedef struct { + u_int64_t ibva; + + u_int32_t ibva_l; /* TBD - remove this in the future */ + u_int32_t ibva_h; /* TBD - remove this in the near future */ + + IB_rkey_t rkey; +} IB_raddr_t; + +typedef struct { + union + { + MT_virt_addr_t va; + MT_phys_addr_t pa; + } addr; + + u_int32_t lkey; + u_int32_t size; +} IB_sge_t; + +typedef struct { + u_int32_t byte_count; /* Sum of size of all s/g entries */ + u_int32_t entry_count; /* Number of s/g entries in list */ + + enum {IB_SGE_VIRT, IB_SGE_PHYS} addr_type; + + IB_sge_t *list; + +} IB_sge_list_t; + + +typedef enum { + IB_WR_RDMA_WRITE, + IB_WR_RDMA_WRITE_WITH_IMM, + IB_WR_SEND, + IB_WR_SEND_WITH_IMM, + IB_WR_RDMA_READ, + IB_WR_ATOMIC_CMP_AND_SWP, + IB_WR_ATOMIC_FETCH_AND_ADD, + IB_WR_RECEIVE +} IB_wr_opcode_t; + +/* Address Vector */ +typedef struct { + IB_sl_t sl; /* Service Level 4 bits */ + IB_lid_t dlid; /* Destination LID */ + u_int8_t src_path_bits; /* Source path bits 7 bits */ + IB_static_rate_t static_rate; /* Maximum static rate : 6 bits */ + + MT_bool grh_flag; /* Send GRH flag */ + /* For global destination or Multicast address:*/ + u_int8_t traffic_class; /* TClass 8 bits */ + u_int32_t flow_label; /* Flow Label 20 bits */ + u_int8_t hop_limit; /* Hop Limit 8 bits */ + u_int8_t sgid_index; /* SGID index in SGID table */ + IB_gid_t dgid; /* Destination GID */ + +} IB_ud_av_t; + + + +#ifdef __cplusplus +} +#endif + +#endif /* H_IB_DEFS_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_errno.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_errno.h new file mode 100644 index 00000000..80700c34 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_errno.h @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_MTL_ERRNO_H +#define H_MTL_ERRNO_H + +#ifndef __DARWIN__ + #if defined(__KERNEL__) && defined(__LINUX__) + #include + #else + #include + #endif +#else + #include +#endif + + /****************** General Purpose Error Codes (0 to -999) *****************/ +#ifndef ETIMEDOUT +#define ETIMEDOUT (110) +#endif + +#ifndef ENOSYS +#define ENOSYS 38 /* Function not implemented */ +#endif + +#ifndef EINVAL +#define EINVAL 22 /* Invalid argument */ +#endif + + + +#define ERROR_LIST_GENERAL \ + INFO( MT_OK, 0, "success" ) \ + INFO( MT_ERROR, -1, "generic error" ) \ + INFO( MT_ENOINIT, -2, "module not initialized" ) \ + INFO( MT_EINVAL, -3, "invalid argument" ) \ + INFO( MT_ENORSC, -4, "No such resource (probably out of range)" ) \ + INFO( MT_EPERM, -5, "Not enough permissions to perform operation" ) \ + INFO( MT_ENOSYS, -6, "The system doesn't support requested operation" ) \ + INFO( MT_EAGAIN, -7, "Resource temporarily unavailable" ) \ + INFO( MT_EALIGN, -8, "Alignment error (offset/size not aligned)" ) \ + INFO( MT_EDEADLK, -9, "Resource deadlock avoided" ) \ + INFO( MT_ENOENT, -10, "No such file or directory" ) \ + INFO( MT_EACCES, -11, "Permission denied" ) \ + INFO( MT_EINTR, -12, "process received interrupt") \ + INFO( MT_ESTATE, -13, "Invalid state") \ + INFO( MT_ESYSCALL, -14,"Error in an underlying O/S call") \ + INFO( MT_ETIMEDOUT, -ETIMEDOUT,"Operation timed out" ) \ + INFO( MT_SYS_EINVAL, -EINVAL, "Invalid argument")\ + INFO( MT_ENOMOD, -ENOSYS, "module not loaded") /* When module not loaded, syscall return ENOSYS */ + + + + /**************** Memory Handling Error Codes (-1000 to -1199) **************/ + + +#define ERROR_LIST_MEMORY \ + INFO( MT_EKMALLOC, -1000, "Can't allocate kernel memory" ) \ + INFO( MT_ENOMEM, -1001, "Given address doesn't match process address space" ) \ + INFO( MT_EMALLOC, -1002, "malloc fail") \ + INFO( MT_EFAULT, -1003, "Bad address" ) + + /****************** General Device Error Codes (-1200 to -1399) *************/ + +#define ERROR_LIST_DEVICE \ + INFO( MT_ENODEV, -1200, "No such device" ) \ + INFO( MT_EBUSY, -1201, "Device or resource busy (or used by another)" ) \ + INFO( MT_EBUSBUSY, -1202, "Bus busy" ) + + /*********************** I2C Error Codes (-1400 to -1499) *******************/ + +#define ERROR_LIST_I2C \ + INFO( MT_EI2CNACK, -1400, "I2C: received NACK from slave" ) \ + INFO( MT_EI2PINHI, -1401, "I2C: Pending Interrupt Not does no become low" ) \ + INFO( MT_EI2TOUT, -1402, "I2C: Operation has been timed out" ) + +#define ERROR_LIST ERROR_LIST_GENERAL ERROR_LIST_MEMORY ERROR_LIST_DEVICE ERROR_LIST_I2C + + /** + ** See at end of file the full list of POSIX errors + **/ + + +typedef enum { +#define INFO(A,B,C) A = B, + ERROR_LIST +#undef INFO + MT_DUMMY_ERROR /* this one is needed to quite warning by -pedantic */ +} call_result_t; + +#endif /* H_MTL_ERRNO_H */ + +#if 0 + + The following list derrived automatically from + ISO/IEC 9945-1: 1996 ANSI/IEEE Std 1003.1, 1996 Edition + Chapter 2.4 Error Numbers + + + If you add a new MT_ error please consider one from this list + + INFO( E2BIG, xxx, "Arg list too long" ) \ + INFO( EAGAIN, xxx, "Resource temporarily unavailable" ) \ + INFO( EBADF, xxx, "Bad file descriptor" ) \ + INFO( EBADMSG, xxx, "Bad message" ) \ + INFO( EBUSY, xxx, "Resource busy" ) \ + INFO( ECANCELED, xxx, "Operation canceled" ) \ + INFO( ECHILD, xxx, "No child processes" ) \ + INFO( EDEADLK, xxx, "Resource deadlock avoided" ) \ + INFO( EDOM, xxx, "Domain error" ) \ + INFO( EEXIST, xxx, "File exists" ) \ + INFO( EFAULT, xxx, "Bad address" ) \ + INFO( EFBIG, xxx, "File too large" ) \ + INFO( EINPROGRESS, xxx, "Operation in progress" ) \ + INFO( EINTR, xxx, "Interrupted function call" ) \ + INFO( EINVAL, xxx, "Invalid argument" ) \ + INFO( EISDIR, xxx, "Is a directory" ) \ + INFO( EMFILE, xxx, "Too many open files" ) \ + INFO( EMLINK, xxx, "Too many links" ) \ + INFO( EMSGSIZE, xxx, "Inappropriate message buffer length" ) \ + INFO( ENAMETOOLONG, xxx, "Filename too long" ) \ + INFO( ENFILE, xxx, "Too many open files in system" ) \ + INFO( ENODEV, xxx, "No such device" ) \ + INFO( ENOEXEC, xxx, "Exec format error" ) \ + INFO( ENOLCK, xxx, "No locks available" ) \ + INFO( ENOMEM, xxx, "Not enough space" ) \ + INFO( ENOSPC, xxx, "No space left on device" ) \ + INFO( ENOSYS, xxx, "Function not implemented" ) \ + INFO( ENOTDIR, xxx, "Not a directory" ) \ + INFO( ENOTEMPTY, xxx, "Directory not empty" ) \ + INFO( ENOTSUP, xxx, "Not supported" ) \ + INFO( ENOTTY, xxx, "Inappropriate I/O control operation" ) \ + INFO( ENXIO, xxx, "No such device or address" ) \ + INFO( EPERM, xxx, "Operation not permitted" ) \ + INFO( EPIPE, xxx, "Broken pipe" ) \ + INFO( ERANGE, xxx, "Result too large" ) \ + INFO( EROFS, xxx, "Read-only file system" ) \ + INFO( ESPIPE, xxx, "Invalid seek" ) \ + INFO( ESRCH, xxx, "No such process" ) \ + INFO( EXDEV, xxx, "Improper link" ) \ + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_pci_types.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_pci_types.h new file mode 100644 index 00000000..ac314980 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_pci_types.h @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#define MOSAL_PCI_HEADER_TYPE1 0x01 /* Bridge type device */ +#define MOSAL_PCI_HEADER_TYPE0 0x00 /* Normal type device */ + + +#ifdef MT_LITTLE_ENDIAN + +typedef struct { + u_int16_t vid; /* vendor ID */ + u_int16_t devid; /* device ID */ + u_int16_t cmd; /* command register */ + u_int16_t status; /* status register */ + u_int8_t revid; /* revision ID */ + u_int8_t class_code; /* class code */ + u_int8_t subclass; /* sub class code */ + u_int8_t progif; /* programming interface */ + u_int8_t cache_line; /* cache line */ + u_int8_t latency; /* latency time */ + u_int8_t header_type; /* header type */ + u_int8_t bist; /* BIST */ + u_int32_t base0; /* base address 0 */ + u_int32_t base1; /* base address 1 */ + u_int32_t base2; /* base address 2 */ + u_int32_t base3; /* base address 3 */ + u_int32_t base4; /* base address 4 */ + u_int32_t base5; /* base address 5 */ + u_int32_t cis; /* cardBus CIS pointer */ + u_int16_t sub_vid; /* sub system vendor ID */ + u_int16_t sub_sysid; /* sub system ID */ + u_int32_t rom_base; /* expansion ROM base address */ + u_int32_t reserved0; /* reserved */ + u_int32_t reserved1; /* reserved */ + u_int8_t int_line; /* interrupt line */ + u_int8_t int_pin; /* interrupt pin */ + u_int8_t min_grant; /* min Grant */ + u_int8_t max_latency; /* max Latency */ +} MOSAL_PCI_hdr_type0_t; + + + +typedef struct { + u_int16_t vid; /* vendor ID */ + u_int16_t devid; /* device ID */ + u_int16_t cmd; /* command register */ + u_int16_t status; /* status register */ + u_int8_t revid; /* revision ID */ + u_int8_t class_code; /* class code */ + u_int8_t sub_class; /* sub class code */ + u_int8_t progif; /* programming interface */ + u_int8_t cache_line; /* cache line */ + u_int8_t latency; /* latency time */ + u_int8_t header_type; /* header type */ + u_int8_t bist; /* BIST */ + u_int32_t base0; /* base address 0 */ + u_int32_t base1; /* base address 1 */ + u_int8_t pri_bus; /* primary bus number */ + u_int8_t sec_bus; /* secondary bus number */ + u_int8_t sub_bus; /* subordinate bus number */ + u_int8_t sec_latency; /* secondary latency timer */ + u_int8_t iobase; /* IO base */ + u_int8_t iolimit; /* IO limit */ + u_int16_t sec_status; /* secondary status */ + u_int16_t mem_base; /* memory base */ + u_int16_t mem_limit; /* memory limit */ + u_int16_t pre_base; /* prefetchable memory base */ + u_int16_t pre_limit; /* prefetchable memory limit */ + u_int32_t pre_base_upper; /* prefetchable memory base upper 32 bits */ + u_int32_t pre_limit_upper; /* prefetchable memory base upper 32 bits */ + u_int16_t io_base_upper; /* IO base upper 16 bits */ + u_int16_t io_limit_upper; /* IO limit upper 16 bits */ + u_int32_t reserved; /* reserved */ + u_int32_t rom_base; /* expansion ROM base address */ + u_int8_t int_line; /* interrupt line */ + u_int8_t int_pin; /* interrupt pin */ + u_int16_t control; /* bridge control */ + +} MOSAL_PCI_hdr_type1_t; + +#else /* MT_BIG_ENDIAN */ + +typedef struct { + + u_int16_t devid; /* device ID */ + u_int16_t vid; /* vendor ID */ + + u_int16_t status; /* status register */ + u_int16_t cmd; /* command register */ + + u_int8_t progif; /* programming interface */ + u_int8_t subclass; /* sub class code */ + u_int8_t class_code; /* class code */ + u_int8_t revid; /* revision ID */ + + u_int8_t bist; /* BIST */ + u_int8_t header_type; /* header type */ + u_int8_t latency; /* latency time */ + u_int8_t cache_line; /* cache line */ + + u_int32_t base0; /* base address 0 */ + u_int32_t base1; /* base address 1 */ + u_int32_t base2; /* base address 2 */ + u_int32_t base3; /* base address 3 */ + u_int32_t base4; /* base address 4 */ + u_int32_t base5; /* base address 5 */ + + u_int32_t cis; /* cardBus CIS pointer */ + + u_int16_t sub_sysid; /* sub system ID */ + u_int16_t sub_vid; /* sub system vendor ID */ + + u_int32_t rom_base; /* expansion ROM base address */ + u_int32_t reserved0; /* reserved */ + u_int32_t reserved1; /* reserved */ + + u_int8_t max_latency; /* max Latency */ + u_int8_t min_grant; /* min Grant */ + u_int8_t int_pin; /* interrupt pin */ + u_int8_t int_line; /* interrupt line */ + +} MOSAL_PCI_hdr_type0_t; + + + +typedef struct { + u_int16_t devid; /* device ID */ + u_int16_t vid; /* vendor ID */ + + u_int16_t status; /* status register */ + u_int16_t cmd; /* command register */ + + u_int8_t progif; /* programming interface */ + u_int8_t sub_class; /* sub class code */ + u_int8_t class_code; /* class code */ + u_int8_t revid; /* revision ID */ + + u_int8_t bist; /* BIST */ + u_int8_t header_type; /* header type */ + u_int8_t latency; /* latency time */ + u_int8_t cache_line; /* cache line */ + + + u_int32_t base0; /* base address 0 */ + u_int32_t base1; /* base address 1 */ + + u_int8_t sec_latency; /* secondary latency timer */ + u_int8_t sub_bus; /* subordinate bus number */ + u_int8_t sec_bus; /* secondary bus number */ + u_int8_t pri_bus; /* primary bus number */ + + u_int16_t sec_status; /* secondary status */ + u_int8_t iolimit; /* IO limit */ + u_int8_t iobase; /* IO base */ + + u_int16_t mem_limit; /* memory limit */ + u_int16_t mem_base; /* memory base */ + + u_int16_t pre_limit; /* prefetchable memory limit */ + u_int16_t pre_base; /* prefetchable memory base */ + + u_int32_t pre_base_upper; /* prefetchable memory base upper 32 bits */ + u_int32_t pre_limit_upper; /* prefetchable memory base upper 32 bits */ + + u_int16_t io_limit_upper; /* IO limit upper 16 bits */ + u_int16_t io_base_upper; /* IO base upper 16 bits */ + + u_int32_t reserved; /* reserved */ + u_int32_t rom_base; /* expansion ROM base address */ + + u_int16_t control; /* bridge control */ + u_int8_t int_pin; /* interrupt pin */ + u_int8_t int_line; /* interrupt line */ + +} MOSAL_PCI_hdr_type1_t; + +#endif + +typedef union { + MOSAL_PCI_hdr_type0_t type0; + MOSAL_PCI_hdr_type1_t type1; +} MOSAL_PCI_cfg_hdr_t; + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_types.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_types.h new file mode 100644 index 00000000..3d3c6f6c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/mtl_types.h @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MTL_TYPES_H +#define H_MTL_TYPES_H + +#ifdef __cplusplus +extern "C" { +#endif + +#if (defined(__KERNEL__) || defined(KERNEL)) && ! defined(MT_KERNEL) +#define MT_KERNEL 1 +#endif + + + +#include +#include + +typedef unsigned char MT_bool; +typedef unsigned int MT_u_int_t; +#ifndef __cplusplus +/* avoid collision with curses.h */ +#ifndef bool +#define bool MT_bool +#endif +#endif + +#ifndef MT_KERNEL +#ifndef FALSE +#define FALSE 0 +#undef TRUE +#define TRUE (!FALSE) +#endif +#endif + +#define IS_FALSE(b) ((b) == FALSE) +#define IS_TRUE(b) ((b) != FALSE) + +typedef enum{LOGIC_LOW = 0, LOGIC_HIGH = 1} logic_t; + +typedef u_int32_t MT_dev_id_t; + +#define EMPTY + +#define MT_BUS_LIST \ +MT_BUS_ELEM(MEM, =0, "Memory") \ +MT_BUS_ELEM(PCI, EMPTY, "PCI") \ +MT_BUS_ELEM(I2C, EMPTY, "I2C") \ +MT_BUS_ELEM(MPC860, EMPTY, "MPC860") \ +MT_BUS_ELEM(SIM, EMPTY, "SIM") + + + + +typedef enum { +#define MT_BUS_ELEM(x,y,z) x y, + MT_BUS_LIST +#undef MT_BUS_ELEM + MT_DUMMY_BUS +} MT_bus_t; + + +static inline const char* MT_strbus( MT_bus_t bustype) +{ + switch (bustype) { +#define MT_BUS_ELEM(A, B, C) case A: return C; + MT_BUS_LIST +#undef MT_BUS_ELEM + default: return "Unknown bus"; + } +} + + + + +typedef void (*void_func_t)(void); +typedef void (*rx_func_t)(MT_virt_addr_t data, u_int32_t size, void *priv); + +#ifndef NULL +#define NULL 0 +#endif /*NULL*/ + +#ifdef __cplusplus +} +#endif + +#endif /* H_MTL_TYPES_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctl.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctl.h new file mode 100644 index 00000000..7e743d52 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctl.h @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_IOCTL_H_ +#define _MD_IOCTL_H_ + + +/* + * Driver control device name + */ +#define MD_CTL_DEVICE_NAME "MDCTL" + + +/* Base and offset for IO controls codes */ +#define MD_IOCTL_BASE 2050 /* Base code for kernel mode drivers */ + +// +// special ("tools") ioctls +// + +#define MD_SPECIAL_BASE MD_IOCTL_BASE /* 2050 */ +#define MD_SPECIAL_RSRV 10 + +/* common ioctls */ +#define MD_COMMON_NUM 5 +#define MD_COMMON_BASE MD_SPECIAL_BASE /* 2050 */ +#define MD_COMMON_END (MD_COMMON_BASE + MD_COMMON_NUM) /* 2055 */ + + #define WIN_SOFT_RESET (MD_COMMON_BASE + 0) + +/* PCICONF ioctls */ +#define MD_PCICONF_NUM 5 +#define MD_PCICONF_BASE MD_COMMON_END /* 2055 */ +#define MD_PCICONF_END (MD_PCICONF_BASE + MD_PCICONF_NUM) /* 2060 */ + + #define WIN_PCICONF_READ4 (MD_PCICONF_BASE + 0) + #define WIN_PCICONF_WRITE4 (MD_PCICONF_BASE + 1) + #define WIN_PCICONF_MODIFY (MD_PCICONF_BASE + 2) + +/* PCI ioctls */ +#define MD_PCI_NUM 5 +#define MD_PCI_BASE MD_PCICONF_END /* 2060 */ +#define MD_PCI_END (MD_PCI_BASE + MD_PCI_NUM) /* 2065 */ + + #define WIN_PCI_GET_BAR_INFO (MD_PCI_BASE + 0) + +/* common ioctls */ +#define MD_CTL_NUM 5 +#define MD_CTL_BASE MD_PCI_END /* 2065 */ +#define MD_CTL_END (MD_CTL_BASE + MD_CTL_NUM) /* 2070 */ + + #define WIN_CTL_ENUM (MD_CTL_BASE + 0) + +#define MD_SPECIAL_END (MD_CTL_END + MD_SPECIAL_RSRV) /* 2080 */ + +// +// Tavor ioctls +// + +#define MD_TAVOR_BASE MD_SPECIAL_END /* 2080 */ +#define MD_TAVOR_RSRV 200 + +/* MOSAL ioctls */ +#define MOSAL_FUNC_NUM 150 +#define MOSAL_FUNC_MANUAL_NUM 10 /* 2220 */ +#define MOSAL_FUNC_BASE MD_TAVOR_BASE /* 2080 */ +#define MOSAL_FUNC_MANUAL (MOSAL_FUNC_BASE + MOSAL_FUNC_NUM - MOSAL_FUNC_MANUAL_NUM) + + #define K2U_CBK_CBK_INIT (MOSAL_FUNC_MANUAL + 0) /* 2220 */ + #define K2U_CBK_CBK_CLEANUP (MOSAL_FUNC_MANUAL + 1) /* 2221 */ + + +#define MOSAL_FUNC_END (MOSAL_FUNC_BASE + MOSAL_FUNC_NUM) /* 2230 */ + +/* VAPI ioctls */ +#define VAPI_FUNC_NUM 100 +#define VAPI_FUNC_BASE MOSAL_FUNC_END /* 2230 */ +#define VAPI_FUNC_END (VAPI_FUNC_BASE + VAPI_FUNC_NUM) /* 2330 */ + +/* IB_MGT ioctls */ +#define IBMGT_FUNC_NUM 50 +#define IBMGT_FUNC_BASE VAPI_FUNC_END /* 2330 */ + +#define IBMGT_FUNC_END (IBMGT_FUNC_BASE + IBMGT_FUNC_NUM) /* 2380 */ + + +#define MD_TAVOR_END (IBMGT_FUNC_END + MD_TAVOR_RSRV) + + +// +// Gamla ioctls +// + +#define MD_GAMLA_BASE MD_TAVOR_END +#define MD_GAMLA_RSRV 100 + +/* MDD ioctls */ +#define MDD_FUNC_NUM 200 +#define MDD_FUNC_BASE MD_GAMLA_BASE +#define MDD_FUNC_END (MDD_FUNC_BASE + MDD_FUNC_NUM) + + #undef MDD_SYS_BASE + #define MDD_SYS_BASE MDD_FUNC_BASE + +#define MD_GAMLA_END (MDD_FUNC_END + MD_GAMLA_RSRV) + + +/* create function number from IOCTL */ +#define UDLL_MAKE_FUNC(code) ((code >> 2) & 0x00000fff) + +/* create IOCTL from function number - OUT_DIRECT method */ +#define UDLL_MAKE_IOCTL(code) \ + ((FILE_DEVICE_UNKNOWN) << 16) | ((FILE_ANY_ACCESS) << 14) | ((code) << 2) | (METHOD_OUT_DIRECT) + +/* create IOCTL from function number - BUFFERED method */ +#define UDLL_MAKE_IOCTL_BUF(code) \ + ((FILE_DEVICE_UNKNOWN) << 16) | ((FILE_ANY_ACCESS) << 14) | ((code) << 2) | (METHOD_BUFFERED) + +/* MdMosalHelper typedef */ +typedef int (*Md_Mosal_Helper_t)(void *, int ); + +/* commands for MdMosalHelper */ +#define MD_HELPER_CARD_RESET 1 + +#endif // end, #ifndef _MD_IOCTL_H_ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctlSpec.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctlSpec.h new file mode 100644 index 00000000..b51027b4 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/MdIoctlSpec.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_IOCTL_SPEC_H_ +#define _MD_IOCTL_SPEC_H_ + +/* structures */ +typedef struct PCICONF_WRITE4_S { + unsigned long offset; + unsigned long data; +} PCICONF_WRITE4_T, * PPCICONF_WRITE4_T; + +typedef struct PCICONF_MODIFY_S { + unsigned long offset; + unsigned long data; + unsigned long mask; +} PCICONF_MODIFY_T, * PPCICONF_MODIFY_T; + +typedef struct PCI_BAR_INFO_S { + MT_ulong_ptr_t ptr; + MT_ulong_ptr_t size; + unsigned long LowPhysAddr; + long HighPhysAddr; + unsigned long TotalMemSize; + unsigned long MappedSize; + unsigned long MappedOffset; +} PCICONF_BAR_INFO_T, * PPCICONF_BAR_INFO_T; + +typedef struct CTL_ENUM_S { + unsigned long size; + unsigned long cnt; + unsigned char data[1]; +} CTL_ENUM_T, * PCTL_ENUM_T; + + +#endif + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/endian.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/endian.h new file mode 100644 index 00000000..7b9da2b0 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/endian.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifdef __WIN__ +#include +#else +#error "Mustn't be included. Something get wrong ! +#endif \ No newline at end of file diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_defs.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_defs.h new file mode 100644 index 00000000..5f809ee8 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_defs.h @@ -0,0 +1,375 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MTL_SYS_DEFS_H +#define H_MTL_SYS_DEFS_H + + +#include +#include +#include +#include + + +#ifdef MT_KERNEL + /* include common things */ + #include + #include + #include + #include +#else + /* replace non-ANSI to the ANSI call */ + #define strdup _strdup + + /* + * Includes + */ + #include + #include + #include + #include + + /* usleep */ + #ifndef usleep + #include + //#define usleep(x) Sleep((x)/1000) + #endif +#endif + +/* definitions both for Kernel and User space */ + +/* export attribute */ +#ifdef __DLL_EXPORTS__ +#define DLL_API __declspec(dllexport) +#else +#define DLL_API +//#define DLL_API __declspec(dllimport) +#endif +#define DLL_INLINE DLL_API inline + +/* no bool type */ +#ifndef __cplusplus +#ifndef bool +#define bool BOOLEAN +#endif +#endif + +/* variable for log implmentation in NT */ +extern char * cur_module; + +/* inline */ +#define __INLINE__ _inline +#ifndef inline +#define inline _inline +#endif + +/* MODULE_LICENSE */ +#define MODULE_LICENSE(a) + +/* MTL_LOG */ +/* common for kernel and user */ +#include + +#define __attribute__(a) +#define asmlinkage + + +/* long long constants */ +#define MAKE_LONGLONG(a) (a##i64) +#define MAKE_ULONGLONG(a) (a##ui64) + +/* replace not ANSI function */ +#define strcasecmp stricmp + +/* MS compiler doesn't understand the usage of the macro */ +#define MODULE_PARM(p,a) + +/* no PVOID arithmetic in cl */ +#define PCHAR_CAST (char *) + +/* allocation on stack */ +#define ALLOCA(a) _alloca(a) +#define FREEA(a) + +/* no __func__ in cl */ +#define __func__ __FUNCTION__ + +/* rename functions */ +#define srandom srand +#define random rand +#define vsnprintf _vsnprintf + +/* Endian Conversions */ +#if defined(MT_LITTLE_ENDIAN) || defined (__LITTLE_ENDIAN) + #define mt_swap64be(x) letobe64(x) + #define mt_swap32be(x) letobe32(x) + #define mt_swap16be(x) letobe16(x) + #define mt_swap64le(x) (x) + #define mt_swap32le(x) (x) + #define mt_swap16le(x) (x) +#else + #define mt_swap64le(x) letobe64(x) + #define mt_swap32le(x) letobe32(x) + #define mt_swap16le(x) letobe16(x) + #define mt_swap64be(x) (x) + #define mt_swap32be(x) (x) + #define mt_swap16be(x) (x) +#endif + +#define MOSAL_cpu_to_be64(x) mt_swap64be(x) +#define MOSAL_be64_to_cpu(x) mt_swap64be(x) +#define MOSAL_cpu_to_be32(x) mt_swap32be(x) +#define MOSAL_be32_to_cpu(x) mt_swap32be(x) +#define MOSAL_cpu_to_be16(x) mt_swap16be(x) +#define MOSAL_be16_to_cpu(x) mt_swap16be(x) + +#define MOSAL_cpu_to_le64(x) mt_swap64le(x) +#define MOSAL_le64_to_cpu(x) mt_swap64le(x) +#define MOSAL_cpu_to_le32(x) mt_swap32le(x) +#define MOSAL_le32_to_cpu(x) mt_swap32le(x) +#define MOSAL_cpu_to_le16(x) mt_swap16le(x) +#define MOSAL_le16_to_cpu(x) mt_swap16le(x) + +#define __cpu_to_be64(x) mt_swap64be(x) +#define __be64_to_cpu(x) mt_swap64be(x) +#define __cpu_to_be32(x) mt_swap32be(x) +#define __be32_to_cpu(x) mt_swap32be(x) +#define __cpu_to_be16(x) mt_swap16be(x) +#define __be16_to_cpu(x) mt_swap16be(x) + +#define __cpu_to_le64(x) mt_swap64le(x) +#define __le64_to_cpu(x) mt_swap64le(x) +#define __cpu_to_le32(x) mt_swap32le(x) +#define __le32_to_cpu(x) mt_swap32le(x) +#define __cpu_to_le16(x) mt_swap16le(x) +#define __le16_to_cpu(x) mt_swap16le(x) + + +/* functions */ +u_int64_t MOSAL_nsecs(void); +static __INLINE__ u_int64_t win_get_time_counter(void) +{ + return MOSAL_nsecs(); +} + + +u_int64_t mt_strtoull (const char *nptr, char **endptr, int base); +#define strtoull mt_strtoull + +#ifndef MIN +#define MIN(a,b) (((a) < (b)) ? (a) : (b)) +#endif +#define MT_MIN(a,b) (((a) < (b)) ? (a) : (b)) + +#ifndef MAX +#define MAX(a,b) (((a) > (b)) ? (a) : (b)) +#endif +#define MT_MAX(a,b) (((a) > (b)) ? (a) : (b)) + +// +// Calculate the byte offset of a field in a structure of type type. +// + +#ifndef FIELD_OFFSET +#define FIELD_OFFSET(type, field) ((LONG)(LONG_PTR)&(((type *)0)->field)) +#endif + + +// +// Calculate the address of the base of the structure given its type, and an +// address of a field within the structure. +// + +#ifndef CONTAINING_RECORD +#define CONTAINING_RECORD(address, type, field) ((type *)( (PCHAR)(address) - (ULONG_PTR)(&((type *)0)->field))) +#endif + + + +/* definitions for KERNEL space */ + +#ifdef MT_KERNEL + +/* + * Includes + */ + +/* + * general + */ + +/* replace direct call to a Linux function 'printk' */ +#define printk DbgPrint +// #include "..\MDD\mosal\mosal.h" + + +/* + * Defines + */ +#define MT_WIN_SYSTEM_SPACE_START (MM_LOWEST_SYSTEM_ADDRESS) +#define VMALLOC(bsize) QVMALLOC(bsize) +#define VFREE(ptr) QVFREE(ptr) +#define MALLOC(bsize) QMALLOC(bsize) +#define INTR_MALLOC(bsize) QINTR_MALLOC(bsize) +#define FREE(ptr) QFREE(ptr) + +//DLL_API void MOSAL_mem_free( MT_virt_addr_t addr ); +//DLL_API MT_virt_addr_t MOSAL_mem_alloc( MT_size_t size, u_int32_t flags ); +#define QMALLOC(bsize) cl_malloc(bsize) +//(void*)MOSAL_mem_alloc((bsize),0) +#define QINTR_MALLOC(bsize) cl_malloc(bsize) +//(void*)MOSAL_mem_alloc((bsize),1) +#define QFREE(ptr) cl_free((void*)ptr) +//MOSAL_mem_free(ptr) +#define QVMALLOC(bsize) cl_malloc(bsize) +//(void*)MOSAL_mem_alloc((bsize),0) +#define QVFREE(ptr) cl_free((void*)ptr) +//MOSAL_mem_free(ptr) +#define QCMALLOC(bsize,g) cl_malloc(bsize) +//(void*)MOSAL_mem_alloc((bsize),g) + +/* no isalpha() in NTOSKRNL.EXE */ +#ifndef isalpha +#define isalpha(c) (((c) >= 'a' && (c) <= 'z') || ((c) >= 'A' && (c) <= 'Z')) +#endif + +/* swap */ +#define letobe64(x) RtlUlonglongByteSwap(x) +//#define letobe32(x) USER_letobe32(x) // MOSAL_letobe32(x) +#define letobe32(x) RtlUlongByteSwap(x) +#define letobe16(x) RtlUshortByteSwap(x) + +/* functions */ +static inline void MOSAL_rdtsc(volatile u_int64_t *arg) +{ + *arg = KeQueryPerformanceCounter( NULL ).QuadPart; +} + +/* end of Kernel-space stuff */ + +#else + +/* definitions for User space */ + + +u_int64_t MOSAL_get_counts_per_sec(void); + +/* functions */ +static inline void MOSAL_rdtsc(volatile u_int64_t *arg) +{ + QueryPerformanceCounter( (LARGE_INTEGER*)arg ); +} + +_inline void usleep( unsigned long x) +{ + #define MOSAL_CALL_TIME_USECS 110 + #define MIN_TIME_FOR_LONG_SLEEP_USECS 5000 + + static u_int64_t clocks_per_sec = 0; + volatile u_int64_t m1, m2; + u_int32_t elapsed_time_usecs; + + if (x > MIN_TIME_FOR_LONG_SLEEP_USECS) { + Sleep(x / 1000); + return; + } + + /* get CPU calibration value the first time */ + while (!clocks_per_sec) { + + /* get CPU calibration value from MOSAL - it takes around MOSAL_CALL_TIME_USECS */ + clocks_per_sec = MOSAL_get_counts_per_sec(); + + /* if we failed for some reason - try it once more - just to spend the time */ + if (!clocks_per_sec) { + if (x > MOSAL_CALL_TIME_USECS) { + x -= MOSAL_CALL_TIME_USECS; + continue; + } + else + return; + } + else { + if (x > MOSAL_CALL_TIME_USECS) { + x -= MOSAL_CALL_TIME_USECS; + break; + } + else + return; + } + } + + /* wait for the rest time */ + m1 = win_get_time_counter(); + do { + m2 = win_get_time_counter(); + elapsed_time_usecs = (u_int32_t)(((m2 - m1) * 1000000) / clocks_per_sec); + } + while (elapsed_time_usecs < x); + +} + +/* + * Defines + */ + +#define MALLOC(bsize) cl_malloc(bsize) +#define INTR_MALLOC(bsize) cl_malloc(bsize) +#define FREE(ptr) cl_free(ptr) +#define VMALLOC(bsize) cl_malloc(bsize) +#define VFREE(ptr) cl_free(ptr) + +/* swap */ + +#define letobe64(x) cl_hton64(x) +#define letobe32(x) cl_hton32(x) +#define letobe16(x) cl_hton16(x) + + +/* + * prototypes + */ + VOID +DebugPrint( + IN PUCHAR pi_szFormat, + ... + ); + + +/* end of User-space stuff */ + +#endif + +#endif /* H_MTL_SYS_DEFS_H */ + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_types.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_types.h new file mode 100644 index 00000000..abfd3577 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/mtl_sys_types.h @@ -0,0 +1,74 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef H_MTL_SYS_TYPES_H +#define H_MTL_SYS_TYPES_H + +#include + +#ifndef MT_KRNL_CALL +#define MT_KRNL_CALL __stdcall +#endif + +#ifndef MT_USER_CALL +#define MT_USER_CALL __cdecl +#endif + +#ifdef __KERNEL__ +#define MT_API MT_KRNL_CALL +#else +#define MT_API MT_USER_CALL +#endif + +//typedef char int8_t; +typedef uint8_t u_int8_t; +//typedef short int int16_t; +typedef uint16_t u_int16_t; +//typedef int int32_t; +typedef uint32_t u_int32_t; +//typedef __int64 int64_t; +typedef uint64_t u_int64_t; +//typedef _W64 __int3264 intn_t; +typedef uintn_t u_intn_t; + + +/* + * arch_types are defined in such a way that the build environment + * will automatically size things properly. + */ +#include + +#define MT_BYTE_ALIGN(n) +#define MT_PID_FMT MT_ULONG_PTR_FMT + +#endif /* H_MTL_SYS_TYPES_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/unistd.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/unistd.h new file mode 100644 index 00000000..7df58d86 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/unistd.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifdef __WIN__ +#include +#else +#error "It shouldn't be included. Call Leonid" +#endif \ No newline at end of file diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/win/mtl_arch_types.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/win/mtl_arch_types.h new file mode 100644 index 00000000..a02fc90b --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/mtl_types/win/win/mtl_arch_types.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MTL_ARCH_TYPES_H +#define H_MTL_ARCH_TYPES_H + + +#if defined( i386 ) +#define __i386__ +#elif defined( IA64 ) +#define __ia64__ +#elif defined( AMD64 ) +#define __x86_64__ +#endif + +/* + * Memory sizes + * + */ + +#define PAGESHIFT PAGE_SHIFT +#define PAGESIZE PAGE_SIZE + +#define __ARCH_MIN_PAGE_SIZE PAGESIZE + +/* + * Mosal memory managment types + */ + +#ifdef _WIN64 +#define SIZE_T_XFMT "0x%I64X" +#define SIZE_T_DFMT "%I64" +#define OFF_T_FMT "%I64" +#define MT_ULONG_PTR_FMT "0x%I64X" +#else +#define SIZE_T_XFMT "0x%X" +#define SIZE_T_DFMT "%u" +#define OFF_T_FMT "%u" +#define MT_ULONG_PTR_FMT "0x%X" +#endif + + +#define SIZE_T_FMT SIZE_T_XFMT +#define U64_FMT "0x%I64X" +#define VIRT_ADDR_FMT "%p" +/* + * Physical addresses are always 64-bits. There is no difference + * between PAE and non-PAE drivers in Windows. + */ +#define PHYS_ADDR_FMT "%I64X" +typedef u_int64_t MT_phys_addr_t; +/* MT_virt_addr_t is a byte pointer to support arithmetic. */ +typedef u_intn_t MT_virt_addr_t; +typedef u_intn_t MT_offset_t; +typedef u_intn_t MT_size_t; +typedef intn_t MT_long_ptr_t; +typedef u_intn_t MT_ulong_ptr_t; + +#endif /* H_MTL_ARCH_TYPES_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.c new file mode 100644 index 00000000..f28fb874 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.c @@ -0,0 +1,679 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#define _MD_C_ + +#include "MdGen.h" +#include "thh_hob.h" +#include + + +#define CHECK_INIT(lbl) \ + if (l_MddkStatus != MT_OK) \ + { \ + l_Status = (NTSTATUS)l_MddkStatus; \ + goto lbl; \ + } + +/* extern functions */ +void MPGA_init_module(void); +int MPGA_cleanup_module(void); + +/*------------------------------------------------------------------------------------------------------*/ +// buf_p points to the buffer of the following format: +// tNdNeN[;...] , meaning +// trace:12...N debug:12...N error:12...N +// N:0..9 +// +// Returns 0 on OK +int make_log_str(PCHAR buf_p,PCHAR layer_p,PCHAR log_str_p) +{ + char *ptr, *str, *scale="123456789"; + int i, cnt; + + *log_str_p = '\0'; + ptr = strchr( buf_p, ' '); + if (ptr == NULL) { + MdKdPrint( DBGLVL_MINIMUM ,("(make_log_str) DebugString - illegal format (%s)\n", buf_p)); + return 1; + } + *ptr = '\0'; + strcpy( layer_p, buf_p ); + buf_p = ptr + 1; + for (i=0; i<3 && *buf_p && *buf_p !='\n'; i++,buf_p+=2) { + switch (*buf_p) { + case 'T': case 't': + str = "trace:"; + break; + + case 'D': case 'd': + str = "debug:"; + break; + + case 'E': case 'e': + str = "error:"; + break; + + default: + MdKdPrint( DBGLVL_MINIMUM ,("(make_log_str) DebugString - illegal format (%s)\n", buf_p)); + return 1; + } + cnt = *(buf_p+1) - '0'; + if (cnt < 0 || cnt > 9) { + MdKdPrint( DBGLVL_MINIMUM ,("(make_log_str) DebugString - illegal format (%s)\n", buf_p)); + return 1; + } + strcat( log_str_p, str ); + ptr = log_str_p + strlen(log_str_p); + memcpy( ptr, scale, cnt ); + ptr += cnt; + *ptr = ' '; + *(ptr+1) = '\0'; + } + return 0; +} + +void CallMtlLogSet(PANSI_STRING aStr) +{ + char *ptr, *ptr1; + ptr = aStr->Buffer; + ptr1 = strchr(ptr, ' '); + if (ptr1 == NULL) { + MdKdPrint( DBGLVL_MINIMUM ,("(CallMtlLogSet) DebugMultiString - illegal format (%s)\n", ptr)); + } + else { + *ptr1 = '\0'; + ptr1 += 1; + mtl_log_set( ptr, ptr1 ); /* there was a value - set it */ + } +} + +NTSTATUS +GetRegistryParameters() +{ + /* debug break */ + ULONG l_uDebugBreak; + /* buffer for Registry string parameter */ + /* temp */ + ULONG val; + + MD_GET_REGISTRY_DWORD(L"DebugBreak",0,l_uDebugBreak); + if (l_uDebugBreak) + DbgBreakPoint(); + +#if DBG + /* "DebugLevel" - set debug print level in the Driver */ + MD_GET_REGISTRY_DWORD(L"DebugLevel",DBGLVL_DEFAULT,g_pDrvContext->m_nDebugPrintLevel); + + /* "DebugString" - Debug Print for Tavor - old format */ + { + char *ptr1,*ptr2; + UCHAR l_sBuf[250]; + MD_GET_REGISTRY_STR(L"DebugString",l_sBuf,sizeof(l_sBuf)); + + // Buffer format: tNdNeN[;...] (N:0..9), meaning + // trace:12...N debug:12...N error:12...N + for (ptr1=l_sBuf; *ptr1; ptr1=ptr2+1) + { + char log_str[60], layer[20]; + ptr2 = strchr( ptr1, ';' ); + if (ptr2 == NULL) { + ptr2 = strchr( ptr1, '\0' ); + if (ptr2 == NULL) + break; + else + *(ptr2+1) = '\0'; + } + *ptr2='\0'; + if (!make_log_str(ptr1,layer, log_str)) + mtl_log_set( layer, log_str ); /* there was a value - set it */ + } + + } + + /* "DebugMultiString" - Debug Print for Tavor - new format */ + { + WCHAR Defaults[] = { L"\0" }; + MD_GET_REGISTRY_MULTI_STR(L"DebugMultiString",Defaults,sizeof(Defaults), CallMtlLogSet); + } + + +#endif + + /* "UseIbMgt" - run IbMgt module */ + MD_GET_REGISTRY_DWORD(L"UseIbMgt",TRUE,val); + g_pDrvContext->m_fSupportIbMgt = (BOOLEAN)val; + + /* "ThhLegacySqp" - work in 'legacy sqp mode' */ + MD_GET_REGISTRY_DWORD(L"ThhLegacySqp",0,g_pDrvContext->m_ThhLegacySqp); + if (g_pDrvContext->m_ThhLegacySqp) + g_pDrvContext->m_fSupportIbMgt = FALSE; + + /* "AvInHostMem" - AV in host memory' */ + MD_GET_REGISTRY_DWORD(L"AvInHostMem",0,g_pDrvContext->m_AvInHostMem); + + /* "InfiniteCmdTimeout" */ + MD_GET_REGISTRY_DWORD(L"InfiniteCmdTimeout",0,g_pDrvContext->m_InfiniteCmdTimeout); + + /* "IbMgtQp0Only" - for IB_MGT: use only QP0 */ + MD_GET_REGISTRY_DWORD(L"IbMgtQp0Only",0,g_pDrvContext->m_IbMgtQp0Only); + + /* "NumCmdsOuts" - number of outstanding commands' */ + MD_GET_REGISTRY_DWORD(L"NumCmdsOuts",0,g_pDrvContext->m_NumCmdsOuts); + + /* Other THH params */ + MD_GET_REGISTRY_DWORD(L"FatalDelayHalt",0,g_pDrvContext->m_FatalDelayHalt); + MD_GET_REGISTRY_DWORD(L"AsyncEqSize",0,g_pDrvContext->m_AsyncEqSize); + MD_GET_REGISTRY_DWORD(L"CmdifUseUar0",0,g_pDrvContext->m_CmdifUseUar0); + MD_GET_REGISTRY_DWORD(L"IgnoreSubsystemId",0,g_pDrvContext->m_IgnoreSubsystemId); + + /* "SupportTavor" - support Tavor functionality; otherwise - only tools (InfinBurn etc.) */ + MD_GET_REGISTRY_DWORD(L"SupportTavor",TRUE,val); + g_pDrvContext->m_fSupportTavor = (BOOLEAN)val; + if (!g_pDrvContext->m_fSupportTavor) + g_pDrvContext->m_fSupportIbMgt = FALSE; + + return STATUS_SUCCESS; +} + +/*------------------------------------------------------------------------------------------------------*/ +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT pi_pDriverObject, + IN PUNICODE_STRING pi_pwsRegistryPath + ) +/*++ + +Routine Description: + + Perform the following actions: + - create Control Device names; + - allocate and init Driver context; + - read Registry parameters, if any; + - call MdDevInit to create and init Control Device context; + - init MDDK; + - register Driver entry points with IO manager + +Arguments: + + pi_pDriverObject - pointer to the driver object + + pi_pwsRegistryPath - pointer to a unicode string representing the path + to driver-specific key in the registry + +Return Value: + + STATUS_SUCCESS if successful, + STATUS_UNSUCCESSFUL otherwise + +--*/ +{ + NTSTATUS l_Status = STATUS_SUCCESS; + PDEVICE_OBJECT l_pFdo = NULL; + /* driver context */ + PMD_DRV_CONTEXT_T l_pDrvContext = NULL; + /* device context */ + PMD_DEV_CONTEXT_T l_pCtrlDevContext; + /* The device Win32 name as unicde string */ + UNICODE_STRING l_usNtDeviceName = { 0 , 0 , NULL }; + /* The Dos device name as unicde string */ + UNICODE_STRING l_usDosDeviceName = { 0 , 0 , NULL }; + /* MDDK status */ + call_result_t l_MddkStatus; + /* logging */ + char * l_pReason = "Unknown"; + /* THH params */ + THH_module_params_t l_ThhParams; + + /* no context yet */ + g_pDrvContext = NULL; + + l_Status = CL_INIT; + if( !NT_SUCCESS(l_Status) ) + return l_Status; + + /* create Control Device names */ + if (!MdCreateDeviceNames(MD_CTL_DEVICE_NAME, &l_usNtDeviceName, &l_usDosDeviceName)) + { /* failed - no resources */ + + l_pReason = "Failed creation of name for the Control Device"; + l_Status = STATUS_INSUFFICIENT_RESOURCES; + goto err; + + } /* failed - no resources */ + + // Allocate memory for Driver Context + l_pDrvContext = (PMD_DRV_CONTEXT_T)MdExAllocatePool( NonPagedPool , sizeof(MD_DRV_CONTEXT_T)); + + if (l_pDrvContext == NULL) + { /* allocation failed */ + + l_pReason = "Failed allocation memory for Driver Context"; + l_Status = STATUS_INSUFFICIENT_RESOURCES; + goto ErrExit1; + + } /* allocation failed */ + + /* store it in global variable */ + g_pDrvContext = l_pDrvContext; + + /* Clear local device info memory */ + RtlZeroMemory(l_pDrvContext, sizeof(MD_DRV_CONTEXT_T)); + + // store driver object + l_pDrvContext->m_pDrvObject = pi_pDriverObject; + + // get parameters + + /* break - for debugging */ + GetRegistryParameters(); + + // Only now one can start debug printing + MdKdPrint( DBGLVL_LOW,("(DriverEntry) MDT started **********\n")); + MdKdPrint( DBGLVL_MINIMUM ,("(DriverEntry) Enter: RegistryPath=\n %ws\n", pi_pwsRegistryPath->Buffer )); + + /* + * Init the fields of the driver context + */ + + /* prottection */ + INIT_LOCK_IT( &l_pDrvContext->m_SpinLock ); + + // init queue header + InitializeListHead( &l_pDrvContext->m_DevQue ); + + /* create control device */ + l_Status = MdDevInit( l_pDrvContext, MD_DEV_IX_CTRL, &l_usNtDeviceName, &l_usDosDeviceName, &l_pCtrlDevContext); + + if (!NT_SUCCESS(l_Status)) + { /* device creation failed */ + + l_Status = STATUS_INSUFFICIENT_RESOURCES; + goto ErrExit1; + + } /* device creation failed */ + + /* store control device in the driver context */ + l_pDrvContext->m_pCtlDevContext = l_pCtrlDevContext; + + /* MOSAL */ + l_MddkStatus = MOSAL_init(0); + CHECK_INIT(ErrExit1) + + if (g_pDrvContext->m_fSupportTavor) + { /* init Tavor modules */ + + /* init mpga_kl */ + MPGA_init_module(); + + /* init HH modules: vapi_common_kl, hh_kl */ + l_MddkStatus = HH_init_module(); + CHECK_INIT(ErrExit3) + + /* init Tavor modules: thh_kl, vip_kl, vapi_kl */ + /* set DEBUG level: logset THH all */ + mtl_log_set( "THH", "all"); + + /* init THH */ + l_ThhParams.thh_legacy_sqp = g_pDrvContext->m_ThhLegacySqp; + l_ThhParams.av_in_host_mem = g_pDrvContext->m_AvInHostMem; + l_ThhParams.infinite_cmd_timeout = g_pDrvContext->m_InfiniteCmdTimeout; + l_ThhParams.num_cmds_outs = g_pDrvContext->m_NumCmdsOuts; + l_ThhParams.fatal_delay_halt = g_pDrvContext->m_FatalDelayHalt; + l_ThhParams.async_eq_size = g_pDrvContext->m_AsyncEqSize; + l_ThhParams.cmdif_use_uar0 = g_pDrvContext->m_CmdifUseUar0; + l_ThhParams.ignore_subsystem_id = g_pDrvContext->m_IgnoreSubsystemId; + + l_MddkStatus = THH_init_module( &l_ThhParams ); + CHECK_INIT(ErrExit4) + + /* VIP */ + // l_MddkStatus = VIPKL_init_module(); + // CHECK_INIT(ErrExit1) + // + ///* VAPI */ + // l_MddkStatus = VAPI_init_module(); + // CHECK_INIT(ErrExit1) + + } /* init Tavor modules */ + + // resource tracking + + // Create dispatch points for create, close, unload + pi_pDriverObject->DriverUnload = MdUnload; + pi_pDriverObject->MajorFunction[IRP_MJ_CREATE] = MdCreate; + pi_pDriverObject->MajorFunction[IRP_MJ_CLOSE] = MdClose; + pi_pDriverObject->MajorFunction[IRP_MJ_DEVICE_CONTROL] = MdProcessIoctl; +#ifdef MD_RW_SUPPORT + pi_pDriverObject->MajorFunction[IRP_MJ_WRITE] = MdWrite; + pi_pDriverObject->MajorFunction[IRP_MJ_READ] = MdRead; +#endif + pi_pDriverObject->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = MdProcessSysControlIrp; + pi_pDriverObject->MajorFunction[IRP_MJ_PNP] = MdProcessPnPIrp; + pi_pDriverObject->MajorFunction[IRP_MJ_POWER] = MdProcessPowerIrp; + pi_pDriverObject->DriverExtension->AddDevice = MdPnPAddDevice; + + // Debug print + MdKdPrint( DBGLVL_DEFAULT,("(DriverEntry) Exit: Status %x\n", l_Status)); + + return l_Status; + +ErrExit4: + HH_cleanup_module(); + +ErrExit3: + MPGA_cleanup_module(); + MOSAL_cleanup(); + +ErrExit1: + /* Free the NT device name path buffer */ + if (l_usNtDeviceName.Buffer) + MdExFreePool(l_usNtDeviceName.Buffer); + + /* Free the Dos device name path buffer */ + if (l_usDosDeviceName.Buffer) + MdExFreePool(l_usDosDeviceName.Buffer); + + /* release driver context */ + if (l_pDrvContext != NULL) + { + g_pDrvContext = NULL; + MdExFreePool( l_pDrvContext ); + } + +err: +#pragma warning( push ) +#pragma warning( disable:4296 ) + MdKdPrint( DBGLVL_ALWAYS ,("(MdDeviceInit) Device failed to initialize \n")); +#pragma warning( pop ) + + CL_DEINIT; + + /* Write to event log */ + WriteEventLogEntry( pi_pDriverObject, MD_EVENT_LOG_LOAD_ERROR, + 0, l_Status, 1, l_Status ); + return STATUS_UNSUCCESSFUL; +} + + + +NTSTATUS DllInitialize(PUNICODE_STRING RegistryPath) +{ + DbgPrint("\n***** MT23108: DllInitialize()\n"); + return STATUS_SUCCESS; +} + +NTSTATUS DllUnload() +{ + DbgPrint("\n***** MT23108: DllUnload()\n"); + return STATUS_SUCCESS; +} + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdProcessSysControlIrp( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp + ) +/*++ + +Routine Description: + + Main dispatch table routine for IRP_MJ_SYSTEM_CONTROL + We basically just pass these down to the PDO + +Arguments: + + DeviceObject - pointer to FDO device object + + Irp - pointer to an I/O Request Packet + +Return Value: + + Status returned from lower driver + + +--*/ +{ + + PIO_STACK_LOCATION irpStack; + PMD_DEV_CONTEXT_T l_pMdDevContext; + NTSTATUS l_Status = STATUS_SUCCESS; + PDEVICE_OBJECT stackDeviceObject; + + //Irp->IoStatus.Status = STATUS_SUCCESS; + //Irp->IoStatus.Information = 0; + + // + // Get a pointer to the current location in the Irp. This is where + // the function codes and parameters are located. + // + + irpStack = IoGetCurrentIrpStackLocation (Irp); + + // + // Get a pointer to the device extension + // + + l_pMdDevContext = DeviceObject->DeviceExtension; + stackDeviceObject = l_pMdDevContext->m_pLdo; + + MdKdPrint( DBGLVL_HIGH, ( "(MdProcessSysControlIrp) enter \n") ); + + MdIncrementIoCount(l_pMdDevContext); + + MDASSERT( IRP_MJ_SYSTEM_CONTROL == irpStack->MajorFunction ); + + IoCopyCurrentIrpStackLocationToNext(Irp); + + + l_Status = IoCallDriver(stackDeviceObject, + Irp); + + MdDecrementIoCount(l_pMdDevContext); + + MdKdPrint( DBGLVL_HIGH,("(MdProcessSysControlIrp) Exit: MdProcessSysControlIrp %x\n", l_Status)); + + return l_Status; +} + +/*------------------------------------------------------------------------------------------------------*/ + +VOID +MdUnload( + IN PDRIVER_OBJECT pi_pDriverObject + ) +/*++ + +Routine Description: + + Free all the allocated resources, etc. + +Arguments: + + pi_pDriverObject - pointer to a driver object + +Return Value: + + +--*/ +{ + /* driver context */ + PMD_DRV_CONTEXT_T l_pDrvContext = g_pDrvContext; + PRE_LOCK_IT; + + MdKdPrint( DBGLVL_HIGH,("(MdUnload) enter \n")); + + // resources tracking + + +if (l_pDrvContext->m_fSupportTavor) +{ /* cleanup Tavor modules */ + //VAPI_cleanup_module(); + //VIPKL_cleanup_module(); + THH_cleanup_module(); + HH_cleanup_module(); + MPGA_cleanup_module(); +} /* cleanup Tavor modules */ + + // de-init MOSAL + MOSAL_cleanup(); + + // remove control device + if (l_pDrvContext->m_pCtlDevContext != NULL) + MdDevDeInit( l_pDrvContext->m_pCtlDevContext ); + l_pDrvContext->m_pCtlDevContext = NULL; + + // check whether there are more devices + LOCK_IT( &l_pDrvContext->m_SpinLock ); + if ( !IsListEmpty( &l_pDrvContext->m_DevQue ) ) + { /* not all the devices removed - error */ + + UNLOCK_IT( &l_pDrvContext->m_SpinLock ); + MdKdPrint( DBGLVL_HIGH,("(MdUnload) Error - not all the devices removed\n")); + return; + + } /* not all the devices removed - error */ + UNLOCK_IT( &l_pDrvContext->m_SpinLock ); + + MdKdPrint( DBGLVL_DEFAULT,("(MdUnload) exit \n")); + + /* Cleanup mtl_common */ + mtl_common_cleanup(); + + /* free the driver context */ + MdExFreePool( l_pDrvContext ); + + CL_DEINIT; + + g_pDrvContext = NULL; + + MDASSERT( g_pDbgData->m_nExAllocCount == 0 ); + + +} + +/*------------------------------------------------------------------------------------------------------*/ + +LONG +MdDecrementIoCount( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +/*++ + +Routine Description: + + We keep a pending IO count ( extension->PendingIoCount ) in the device extension. + The first increment of this count is done on adding the device. + Subsequently, the count is incremented for each new IRP received and + decremented when each IRP is completed or passed on. + + Transition to 'one' therefore indicates no IO is pending and signals + deviceExtension->NoPendingIoEvent. This is needed for processing + IRP_MN_QUERY_REMOVE_DEVICE + + Transition to 'zero' signals an event ( deviceExtension->RemoveEvent ) + to enable device removal. This is used in processing for IRP_MN_REMOVE_DEVICE + +Arguments: + + pi_pMdDevContext...... The device context + +Return Value: + + pi_pMdDevContext->PendingIoCount + + +--*/ + +{ + KIRQL oldIrql; + LONG ioCnt; + MdKdPrint( DBGLVL_MAXIMUM,("(MdDecrementIoCount) Enter: Pending io count = %x\n", pi_pMdDevContext->m_nPendingIoCnt)); + + KeAcquireSpinLock (&pi_pMdDevContext->m_IoCntSpinLock, &oldIrql); + ioCnt = InterlockedDecrement( &pi_pMdDevContext->m_nPendingIoCnt ); + + MdTrapCond( DBGLVL_HIGH,( 0 > pi_pMdDevContext->m_nPendingIoCnt ) ); + if (ioCnt == 1) + KeSetEvent(&pi_pMdDevContext->m_NoPendingIoEvent, 1, FALSE); // trigger no pending io + + if (ioCnt == 0) + KeSetEvent(&pi_pMdDevContext->m_RemoveEvent, 1, FALSE); // trigger remove-device event + + KeReleaseSpinLock (&pi_pMdDevContext->m_IoCntSpinLock, oldIrql); + MdKdPrint( DBGLVL_HIGH,("(MdDecrementIoCount) Exit: Pending io count = %x\n", pi_pMdDevContext->m_nPendingIoCnt)); + return pi_pMdDevContext->m_nPendingIoCnt; +} + +/*------------------------------------------------------------------------------------------------------*/ + +VOID +MdIncrementIoCount( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +/*++ + +Routine Description: + We keep a pending IO count ( extension->PendingIoCount ) in the device extension. + The first increment of this count is done on adding the device. + Subsequently, the count is incremented for each new IRP received and + decremented when each IRP is completed or passed on. + + +Arguments: + + pi_pMdDevContext...... The device context + +Return Value: + + none. + +--*/ +{ + KIRQL oldIrql; + LONG ioCnt; + MdKdPrint( DBGLVL_MAXIMUM,("(MdIncrementIoCount) Enter: Pending io count = %x\n", pi_pMdDevContext->m_nPendingIoCnt)); + KeAcquireSpinLock (&pi_pMdDevContext->m_IoCntSpinLock, &oldIrql); + ioCnt = InterlockedIncrement( &pi_pMdDevContext->m_nPendingIoCnt ); + + if (ioCnt > 1) + KeClearEvent(&pi_pMdDevContext->m_NoPendingIoEvent); // trigger pending io + + KeReleaseSpinLock (&pi_pMdDevContext->m_IoCntSpinLock, oldIrql); + MdKdPrint( DBGLVL_HIGH,("MdIncrementIoCount) Exit: Pending io count = %x\n", pi_pMdDevContext->m_nPendingIoCnt)); +} + + +#undef _MD_C_ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.h new file mode 100644 index 00000000..2f6610cf --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_H_ +#define _MD_H_ + +/* Base and offset for IO controls codes */ +#define MD_IOCTL_BASE 2050 /* Base code for kernel mode drivers */ + +/* +============================================================================== +|Description : Defines the control codes passed to the adapter using the DLL | +============================================================================== +*/ +#define MD_RESET_DEVICE ( MD_IOCTL_BASE+0 ) +#define MD_READ ( MD_IOCTL_BASE+1 ) +#define MD_WRITE ( MD_IOCTL_BASE+2 ) + +/* +============================================================================== +|Description : NT format of IO control codes based on the GSS codes. | +============================================================================== +*/ +#define MD_IOCTL_RESET_DEVICE CTL_CODE(FILE_DEVICE_UNKNOWN, MD_RESET_DEVICE, METHOD_BUFFERED, FILE_ANY_ACCESS) +#define MD_IOCTL_READ CTL_CODE(FILE_DEVICE_UNKNOWN, MD_READ, METHOD_IN_DIRECT, FILE_ANY_ACCESS) +#define MD_IOCTL_WRITE CTL_CODE(FILE_DEVICE_UNKNOWN, MD_WRITE, METHOD_OUT_DIRECT, FILE_ANY_ACCESS) + + + +#endif /* end, #ifndef _MD_H_ */ + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.rc b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.rc new file mode 100644 index 00000000..4fca3a20 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/Md.rc @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +//Microsoft Developer Studio generated resource script. +// +#include "resource.h" +#include "version.h" + +#define APSTUDIO_READONLY_SYMBOLS +///////////////////////////////////////////////////////////////////////////// +// +// Generated from the TEXTINCLUDE 2 resource. +// +#define APSTUDIO_HIDDEN_SYMBOLS +#include "windows.h" +#undef APSTUDIO_HIDDEN_SYMBOLS +#include "ntverp.h" + +///////////////////////////////////////////////////////////////////////////// +#undef APSTUDIO_READONLY_SYMBOLS + +///////////////////////////////////////////////////////////////////////////// +// English (U.S.) resources + +#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) +#ifdef _WIN32 +LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US +#pragma code_page(1252) +#endif //_WIN32 + +#ifndef _MAC +///////////////////////////////////////////////////////////////////////////// +// +// Version +// + +VS_VERSION_INFO VERSIONINFO + FILEVERSION FV1,FV2,FV3,FV4 + PRODUCTVERSION PV1,PV2,PV3,PV4 + FILEFLAGSMASK 0x3fL +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x40004L + FILETYPE 0x2L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "Comments", "\0" + VALUE "CompanyName", "Mellanox Technologies Ltd.\0" + VALUE "FileDescription", FILE_DESCRIPTION + VALUE "FileVersion", FILE_VERSION + VALUE "InternalName", INTERNAL_NAME + VALUE "LegalCopyright", "Copyright (C) Mellanox Technologies Ltd. 2001\0" + VALUE "LegalTrademarks", "\0" + VALUE "OriginalFilename", ORIGINAL_NAME + VALUE "PrivateBuild", PRIVATE_BUILD + VALUE "ProductName", PRODUCT_NAME + VALUE "ProductVersion", PRODUCT_VERSION + VALUE "SpecialBuild", SPECIAL_BUILD + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END + +#endif // !_MAC + + +#ifdef APSTUDIO_INVOKED +///////////////////////////////////////////////////////////////////////////// +// +// TEXTINCLUDE +// + +1 TEXTINCLUDE DISCARDABLE +BEGIN + "resource.h\0" +END + +2 TEXTINCLUDE DISCARDABLE +BEGIN + "#define APSTUDIO_HIDDEN_SYMBOLS\r\n" + "#include ""windows.h""\r\n" + "#undef APSTUDIO_HIDDEN_SYMBOLS\r\n" + "#include ""ntverp.h""\r\n" + "\0" +END + +3 TEXTINCLUDE DISCARDABLE +BEGIN + "\r\n" + "\0" +END + +#endif // APSTUDIO_INVOKED + + +///////////////////////////////////////////////////////////////////////////// +// +// Dialog +// + +IDD_DIALOG1 DIALOG DISCARDABLE 0, 0, 186, 95 +STYLE DS_MODALFRAME | WS_POPUP | WS_CAPTION | WS_SYSMENU +CAPTION "Dialog" +FONT 8, "MS Sans Serif" +BEGIN + DEFPUSHBUTTON "OK",IDOK,129,7,50,14 + PUSHBUTTON "Cancel",IDCANCEL,129,24,50,14 +END + + +///////////////////////////////////////////////////////////////////////////// +// +// DESIGNINFO +// + +#ifdef APSTUDIO_INVOKED +GUIDELINES DESIGNINFO DISCARDABLE +BEGIN + IDD_DIALOG1, DIALOG + BEGIN + LEFTMARGIN, 7 + RIGHTMARGIN, 179 + TOPMARGIN, 7 + BOTTOMMARGIN, 88 + END +END +#endif // APSTUDIO_INVOKED + +#endif // English (U.S.) resources +///////////////////////////////////////////////////////////////////////////// + + + +#ifndef APSTUDIO_INVOKED +///////////////////////////////////////////////////////////////////////////// +// +// Generated from the TEXTINCLUDE 3 resource. +// + + +///////////////////////////////////////////////////////////////////////////// +#endif // not APSTUDIO_INVOKED + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCard.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCard.h new file mode 100644 index 00000000..f4bda55b --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCard.h @@ -0,0 +1,740 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_CARD_H +#define _MD_CARD_H + +#include +#include +#include "MdDbg.h" +#include "mosal.h" +#include "mosal_util.h" +#include +#include + +//////////////////////////////////////////////////////////////////////////// +// RESTRICTIONS +//////////////////////////////////////////////////////////////////////////// + +/* + * Maximal device name length + */ +#define MD_MAX_DEV_NAME_LEN 255 + +/* + * Maximal device params length + */ +#define MD_MAX_DEV_PARAMS_LEN 255 + +/* + * Device name DB size + */ + #define MD_MAX_DEV_DB_SIZE 1024 + +/* + * PCI header size + */ + #define PCI_HDR_SIZE 64 + +//////////////////////////////////////////////////////////////////////////// +// DEFAULTS +//////////////////////////////////////////////////////////////////////////// +#define MD_DFLT_CONF_ADDR 88 +#define MD_DFLT_CONF_DATA 92 +#define MD_DFLT_DDR_OFFSET 0 +#define MD_DFLT_DDR_SIZE 0x10000000 + + +//////////////////////////////////////////////////////////////////////////// +// CONSTANTS +//////////////////////////////////////////////////////////////////////////// + +/* + * Driver control device name + */ +#define MD_CTL_DEVICE_NAME "MDCTL" +#define MLX_VENDOR_ID 0x15B3 + +//////////////////////////////////////////////////////////////////////////// +// ENUMERATIONS +//////////////////////////////////////////////////////////////////////////// + +// Device Ids +typedef enum { + MD_DEV_ID_CTRL=0, + MD_DEV_ID_TAVOR=23108, + MD_DEV_ID_TAVOR_BD=23109, + MD_DEV_ID_TAVOR_SD=23130, + MD_DEV_ID_ARBEL_TM=25208 /* ARBEL in TAvor mode */ +} MD_DEV_ID_E; + +// IB Device type +typedef enum { + MD_DEV_IX_CTRL=0, + MD_DEV_IX_TAVOR, + MD_DEV_IX_TAVOR_BD, + MD_DEV_IX_TAVOR_SD, + MD_DEV_IX_ARBEL_TM, /* ARBEL in TAvor mode */ + MD_DEV_IX_LAST +} MD_DEV_IX_E; + + +//////////////////////////////////////////////////////////////////////////// +// MACROS +//////////////////////////////////////////////////////////////////////////// + + +/* + * Work with CR_SPACE + */ +#define MD_CR_DWORD_WRITE(dev,addr,value) \ + if (dev->m_fUsePorts) ? WRITE_PORT_ULONG((addr),(value)) : WRITE_REGISTER_ULONG((addr),(value)) +#define MD_CR_DWORD_READ(dev,addr,value) \ + value = (dev->m_fUsePorts) ? READ_PORT_ULONG((addr)) : READ_REGISTER_ULONG((addr)) + +// for WinNt +#define PRE_LOCK_IT KIRQL l_OldIrql +#define INIT_LOCK_IT(a) KeInitializeSpinLock(a) +#define LOCK_IT(a) KeAcquireSpinLock((a),&l_OldIrql) +#define UNLOCK_IT(a) KeReleaseSpinLock((a),l_OldIrql) + +// mutexes +#define KMUTEX_INIT(a) KeInitializeMutex(a,0) +#define KMUTEX_ACQ(a) KeWaitForMutexObject( a, Executive, KernelMode, FALSE, NULL ) +#define KMUTEX_REL(a) KeReleaseMutex(a, FALSE) + +#define FMUTEX_INIT(a) ExInitializeFastMutex(a) +#define FMUTEX_ACQ(a) ExAcquireFastMutex(a) +#define FMUTEX_REL(a) ExReleaseFastMutex(a) + +// semaphores +#define KSEM_INIT(a) KeInitializeSemaphore(a,1,1) +#define KSEM_ACQ(a) KeWaitForSingleObject( a, Executive, KernelMode, FALSE, NULL ) +#define KSEM_REL(a) KeReleaseSemaphore(a, 0, 1, FALSE) + + + +//////////////////////////////////////////////////////////////////////////// +// STRUCTURES +//////////////////////////////////////////////////////////////////////////// + +// +// MDHAL DB +// +typedef struct MD_HAL_DEV_PARAMS_S { + + /* buffer for DevId string */ + WCHAR m_DevIdWstr[5]; + // format string for building internal name + PCHAR m_Format; + // format string for building exported name + PCHAR m_ExFormat; + // Device ID (in PCI configuration) + MD_DEV_ID_E m_DevId; + // Device IX (in PCI configuration) + MD_DEV_IX_E m_DevIx; + // export to user + BOOLEAN m_fExpose; + // BAR sizes (approximately) + ULONG m_SizeBar0; + ULONG m_SizeBar1; + ULONG m_SizeBar2; + // the device clock frequency + u_int32_t m_ClockFreq; + // number of bits per device word + u_int8_t m_WordSize; + // 1 means opposite to platform endianess + u_int8_t m_Endianess; + +} MD_HAL_DEV_PARAMS_T, *PMD_HAL_DEV_PARAMS_T; + + +typedef struct MD_DEV_CONTEXT_S *PMD_DEV_CONTEXT_T; + +typedef PFILE_OBJECT PCS_HANDLE_T; + +// +// Process context - saved in PFILE_OBJECT->FsContext +// +typedef struct MD_PCS_CONTEXT_S { + + // TAVOR (VIPKL) info + // IB_MGT resource tracking + PVOID m_hIbMgt; + // VIPKL resource tracking + PVOID m_hVipkl; + // MOSAL info + PVOID m_hMosal; + + // IB_MGT info + + // MDCTL info + // MDL for CR + PMDL m_pCrMdl; + // MDL for UAR + PMDL m_pUarMdl; + // MDL for DDR + PMDL m_pDdrMdl; + + // SANITY CHECK info + // PID + MOSAL_pid_t m_Pid; + +} MD_PCS_CONTEXT_T, *PMD_PCS_CONTEXT_T; + +#define MAKE_PCS(a) ((PMD_PCS_CONTEXT_T)(a)) + + +// +// Driver context +// +typedef struct MD_DRV_CONTEXT_S { + + // driver object + PDRIVER_OBJECT m_pDrvObject; + + // Debug Print Level + ULONG m_nDebugPrintLevel; + + // FIFO queue of devices, added (for debug purposes) + LIST_ENTRY m_DevQue; + + // current number of devices + ULONG m_uDevNo; + + // current number of adapters + ULONG m_uCardNo; + + + /* control device context */ + PMD_DEV_CONTEXT_T m_pCtlDevContext; + + /* device name DB */ + ULONG m_DevNamesDbSize; + ULONG m_DevNamesDbCnt; + char m_DevNamesDb[MD_MAX_DEV_DB_SIZE]; + + /* protection */ + KSPIN_LOCK m_SpinLock; + + /* various features support */ + ULONG m_ThhLegacySqp; + BOOLEAN m_fSupportIbMgt; + BOOLEAN m_fSupportTavor; + ULONG m_AvInHostMem; /* AV in host memory (and not in DDR) */ + ULONG m_InfiniteCmdTimeout; /* when 1 we use inifinite timeouts on commands completion */ + ULONG m_NumCmdsOuts; /* max number of outstanding commands */ + ULONG m_FatalDelayHalt; + ULONG m_AsyncEqSize; + ULONG m_CmdifUseUar0; + ULONG m_IgnoreSubsystemId; + + int m_IbMgtQp0Only; /* for IB_MGT: use QP0 only */ + +} MD_DRV_CONTEXT_T, *PMD_DRV_CONTEXT_T; + +// +// Control device context +// +typedef struct MD_CTL_DEV_CONTEXT_S { + int m_DummyCtl; +} MD_CTL_DEV_CONTEXT_T, *PMD_CTL_DEV_CONTEXT_T; + +typedef struct MD_IB_DEV_TAVOR_S { + VAPI_hca_hndl_t m_hHca; + HH_hca_hndl_t m_hHhHca; +} MD_IB_DEV_TAVOR_T, *PMD_IB_DEV_TAVOR_T; + +typedef struct MD_IB_DEV_TAVOR_BD_S { + int m_DummyTavorBd; +} MD_IB_DEV_TAVOR_BD_T, *PMD_IB_DEV_TAVOR_BD_T; + +typedef struct MD_IB_DEV_TAVOR_SD_S { + PMD_DEV_CONTEXT_T m_pBdDevContext; +} MD_IB_DEV_TAVOR_SD_T, *PMD_IB_DEV_TAVOR_SD_T; + +// +// PCI Header info +// +typedef struct MD_PCI_HDR_S { + ULONG m_Hdr[PCI_HDR_SIZE]; + ULONG m_Bus; + ULONG m_Slot; +} MD_PCI_HDR_T, *PMD_PCI_HDR_T; + +// +// IB device context +// +typedef struct MD_IB_DEV_CONTEXT_S { + + + //////////////////////////////////////////////////////////////////////////// + // Device stack information + //////////////////////////////////////////////////////////////////////////// + + // next-lower driver's device object, representing the target device + PDEVICE_OBJECT m_pLdo; + + // The bus driver object + PDEVICE_OBJECT m_pPdo; + + /* buffer for ASCII device name */ + char m_AsciiDevName[MD_MAX_DEV_NAME_LEN + 1]; + + /* card number */ + ULONG m_uCardNo; + + //////////////////////////////////////////////////////////////////////////// + // Device configuration information + //////////////////////////////////////////////////////////////////////////// + + // device location + ULONG m_BusNumber; + ULONG m_DevNumber; + ULONG m_Function; + + // IRQL + ULONG m_ulIntVector; + KIRQL m_ulIntLevel; + KAFFINITY m_Affinity; + BOOLEAN m_fIntShared; + KINTERRUPT_MODE m_IntMode; + + // CR space + MD_BAR_T m_Cr; + + // UAR-space region + MD_BAR_T m_Uar; + + // memory range + MD_BAR_T m_Ddr; + ULONG m_ulDdrMapOffset; + ULONG m_ulDdrMapSize; + + // register offsets + ULONG m_ulAddrOffset; + ULONG m_ulDataOffset; + + // PCI headers + BOOLEAN m_fMayReset; + MD_PCI_HDR_T m_MyHdr; + MD_PCI_HDR_T m_HcaHdr; + MD_PCI_HDR_T m_BridgeHdr; + ULONG m_PerformReset; + + //////////////////////////////////////////////////////////////////////////// + // Power management + //////////////////////////////////////////////////////////////////////////// + + // current device power state + DEVICE_POWER_STATE m_CurrentDevicePowerState; + + //Bus drivers set the appropriate values in this structure in response + //to an IRP_MN_QUERY_CAPABILITIES IRP. Function and filter drivers might + //alter the capabilities set by the bus driver. + DEVICE_CAPABILITIES m_DeviceCapabilities; + + // used to save the currently-being-handled system-requested power irp request + PIRP m_PowerIrp; + + // set to signal driver-generated power request is finished + KEVENT m_SelfRequestedPowerIrpEvent; + + // flag set when IRP_MN_WAIT_WAKE is received and we're in a power state + // where we can signal a wait + BOOLEAN m_EnabledForWakeup; + + // used to flag that we're currently handling a self-generated power request + BOOLEAN m_SelfPowerIrp; + + // default power state to power down to on self-suspend + ULONG m_PowerDownLevel; + + //////////////////////////////////////////////////////////////////////////// + // PnP handling + //////////////////////////////////////////////////////////////////////////// + + //flag set when processing IRP_MN_REMOVE_DEVICE + BOOLEAN m_DeviceRemoved; + + // flag set when driver has answered success to IRP_MN_QUERY_REMOVE_DEVICE + BOOLEAN m_RemoveDeviceRequested; + + // flag set when driver has answered success to IRP_MN_QUERY_STOP_DEVICE + BOOLEAN m_StopDeviceRequested; + + // MOSAL device handle + MOSAL_dev_handle_t m_hMosal; + + // pointer to device parameters + PMD_HAL_DEV_PARAMS_T m_pMdhalParams; + + /* DEVICE SPECIFIC PART */ + union { + MD_IB_DEV_TAVOR_T; + MD_IB_DEV_TAVOR_BD_T; + MD_IB_DEV_TAVOR_SD_T; + }; + +} MD_IB_DEV_CONTEXT_T, *PMD_IB_DEV_CONTEXT_T; + +// +// A structure representing the instance information associated with +// this particular device. +// + +typedef struct MD_DEV_CONTEXT_S { + + // FIFO queue of devices + LIST_ENTRY m_Link; + + // global context + PMD_DRV_CONTEXT_T m_pDrvContext; + + // device type + MD_DEV_IX_E m_eDevType; + + // NT device handle + PDEVICE_OBJECT m_pFdo; + + /* NT device name */ + UNICODE_STRING m_usNtDeviceName; + + /* DOS device name */ + UNICODE_STRING m_usDosDeviceName; + + /* device level spinlock */ + KSPIN_LOCK m_SpinLock; + + /* signature - for checking the handle */ + ULONG m_Signature; + + // spinlock used to protect inc/dec iocount logic + KSPIN_LOCK m_IoCntSpinLock; + + /* pending IRP count */ + int m_nPendingIoCnt; + + // set when PendingIoCount goes to 0; flags device can be removed + KEVENT m_RemoveEvent; + + // set when PendingIoCount goes to 1 ( 1st increment was on add device ) + // this indicates no IO requests outstanding, either user, system, or self-staged + KEVENT m_NoPendingIoEvent; + + // flag set when device has been successfully started + BOOLEAN m_DeviceStarted; + + // process list + LIST_ENTRY m_PcsQue; + + // flag + BOOLEAN m_fDeletePending; + + // semaphore + KSEMAPHORE m_Sem; + + // mutex + KMUTEX m_Mutex; + + // number of OPENs for this device + int m_nOpenCount; + + // interface for r/w to PCI config space + BUS_INTERFACE_STANDARD m_Interface; + + /* DEVICE SPECIFIC PART */ + union { + MD_CTL_DEV_CONTEXT_T; + MD_IB_DEV_CONTEXT_T; + }; + +} MD_DEV_CONTEXT_T, *PMD_DEV_CONTEXT_T; + + +//////////////////////////////////////////////////////////////////////////// +// GLOBAL VARIABLES +//////////////////////////////////////////////////////////////////////////// + +#ifdef _MD_C_ + PMD_DRV_CONTEXT_T g_pDrvContext = NULL; + MD_HAL_DEV_PARAMS_T g_DevParams[MD_DEV_IX_LAST] = { + /* for CTRL */ + { L"0000", "", "", MD_DEV_ID_CTRL, MD_DEV_IX_CTRL, FALSE, 0,0,0, 0, 0, 0 }, + /* for TAVOR */ + { L"5A44", "InfiniHost%d", "InfiniHost%d", MD_DEV_ID_TAVOR, MD_DEV_IX_TAVOR, TRUE, 0x00100000, 0x01000000, 0x00000000, 167000000, 4, 0 }, + /* for TAVOR_BD */ + { L"5A45","mt%d_pciconf%d", "mt%d_pciconf%d", MD_DEV_ID_TAVOR_BD, MD_DEV_IX_TAVOR_BD, TRUE, 0x00000000, 0x00000000, 0x00000000, 167000000, 4, 0 }, + /* for TAVOR_SD */ + { L"5A5A", "mt%d_pci%d", "mt%d_pci%d", MD_DEV_ID_TAVOR_SD, MD_DEV_IX_TAVOR_SD, TRUE, 0x00100000, 0x01000000, 0x00000000, 167000000, 4, 0 }, + /* for TAVOR_SD */ + { L"6278", "InfiniHostEx%d", "InfiniHosEx%d", MD_DEV_ID_ARBEL_TM, MD_DEV_IX_ARBEL_TM, TRUE, 0x00100000, 0x01000000, 0x00000000, 167000000, 4, 0 }, + }; + +#else + extern PMD_DRV_CONTEXT_T g_pDrvContext; + extern MD_HAL_DEV_PARAMS_T g_DevParams[MD_DEV_IX_LAST]; +#endif + +//////////////////////////////////////////////////////////////////////////// +// function prototypes +//////////////////////////////////////////////////////////////////////////// + +NTSTATUS +MdProcessPnPIrp( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp + ); + +NTSTATUS +MdProcessSysControlIrp( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp + ); + +VOID +MdUnload( + IN PDRIVER_OBJECT DriverObject + ); + +NTSTATUS +MdStartDevice( + IN PDEVICE_OBJECT pi_pFdo, + IN PIRP pi_pIrp + ); + +NTSTATUS +MdStopDevice( + IN PDEVICE_OBJECT DeviceObject + ); + +NTSTATUS +MdRemoveDevice( + IN PDEVICE_OBJECT DeviceObject + ); + +NTSTATUS +MdPnPAddDevice( + IN PDRIVER_OBJECT DriverObject, + IN PDEVICE_OBJECT pi_pPdo + ); + +NTSTATUS +MdCreateDeviceObject( + IN PDRIVER_OBJECT DriverObject, + IN PDEVICE_OBJECT pi_pPdo, + IN PDEVICE_OBJECT *DeviceObject + ); + +NTSTATUS +MdConfigureDevice( + IN PDEVICE_OBJECT DeviceObject + ); + +NTSTATUS +MdIrpCompletionRoutine( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp, + IN PVOID Context + ); + +NTSTATUS +MdPoRequestCompletion( + IN PDEVICE_OBJECT DeviceObject, + IN UCHAR MinorFunction, + IN POWER_STATE PowerState, + IN PVOID Context, + IN PIO_STATUS_BLOCK IoStatus + ); + +NTSTATUS +MdPoSelfRequestCompletion( + IN PDEVICE_OBJECT DeviceObject, + IN UCHAR MinorFunction, + IN POWER_STATE PowerState, + IN PVOID Context, + IN PIO_STATUS_BLOCK IoStatus + ); + +NTSTATUS +MdGetPortStatus( + IN PDEVICE_OBJECT DeviceObject, + IN PULONG PortStatus + ); + +NTSTATUS +MdResetParentPort( + IN IN PDEVICE_OBJECT DeviceObject + ); + +NTSTATUS +MdSelfRequestPowerIrp( + IN PDEVICE_OBJECT DeviceObject, + IN POWER_STATE PowerState + ); + +BOOLEAN +MdSetDevicePowerState( + IN PDEVICE_OBJECT DeviceObject, + IN DEVICE_POWER_STATE DeviceState + ); + +NTSTATUS +MdAsyncReadWrite_Complete( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp, + IN PVOID Context + ); + +NTSTATUS +MdSimpleReadWrite_Complete( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp, + IN PVOID Context + ); + + +NTSTATUS +MdPowerIrp_Complete( + IN PDEVICE_OBJECT NullDeviceObject, + IN PIRP Irp, + IN PVOID Context + ); + +NTSTATUS +MdQueryCapabilities( + IN PDEVICE_OBJECT pi_pLowerDevObject, + OUT PDEVICE_CAPABILITIES po_pDeviceCapabilities + ); + + +NTSTATUS +MdWrite( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp + ); + +NTSTATUS +MdCreate( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp + ); + + +NTSTATUS +MdRead( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp + ); + + +NTSTATUS +MdAbortInPgsReqs( + IN PDEVICE_OBJECT DeviceObject + ); + + +NTSTATUS +MdProcessIoctl( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp + ); + + +NTSTATUS +MdResetDevice( + IN PDEVICE_OBJECT DeviceObject + ); + +NTSTATUS +MdClose( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp + ); + + +VOID +MdIncrementIoCount( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ); + +LONG +MdDecrementIoCount( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ); + + +NTSTATUS +MdProcessPowerIrp( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp + ); + + +NTSTATUS +MdStagedReadWrite( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp, + IN BOOLEAN Read + ); + +NTSTATUS +MdSelfSuspendOrActivate( + IN PDEVICE_OBJECT DeviceObject, + IN BOOLEAN fSuspend + ); + +NTSTATUS +MdSymbolicLink( + IN PDEVICE_OBJECT DeviceObject, + IN OUT PUNICODE_STRING deviceLinkUnicodeString + ); + + +BOOLEAN +MdCancelPendingIo( + IN PDEVICE_OBJECT DeviceObject + ); + +BOOLEAN +MdCanAcceptIoRequests( + IN PDEVICE_OBJECT DeviceObject + ); + +#endif // already included + +/* + * Interface GUID - defined outside of conditional include statement on purpose. + */ +// {4FAD14C1-D7D3-40bc-9C83-88498FE114F3} +DEFINE_GUID(GUID_MD_INTERFACE, +0x4fad14c1, 0xd7d3, 0x40bc, 0x9c, 0x83, 0x88, 0x49, 0x8f, 0xe1, 0x14, 0xf3); diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.c new file mode 100644 index 00000000..12b8db2e --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.c @@ -0,0 +1,1300 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "MdGen.h" +#include "MdConfPriv.h" +#include "MdIoctlSpec.h" +#include +#include + +// select implementation of PciHdrWrite +//#define USE_HalSetBusData 1 +//#define USE_HalSetBusDataByOffset 1 +#define USE_ReadWritePciConfig 1 + + +#define NOT_USE_MDNTDDK 1 +//#ifdef NOT_USE_MDNTDDK +///* from ntddk.h */ +//// +//// Define types of bus information. +//// +// +//typedef enum _BUS_DATA_TYPE { +// ConfigurationSpaceUndefined = -1, +// Cmos, +// EisaConfiguration, +// Pos, +// CbusConfiguration, +// PCIConfiguration, +// VMEConfiguration, +// NuBusConfiguration, +// PCMCIAConfiguration, +// MPIConfiguration, +// MPSAConfiguration, +// PNPISAConfiguration, +// SgiInternalConfiguration, +// MaximumBusDataType +//} BUS_DATA_TYPE, *PBUS_DATA_TYPE; +//#ifdef __i386__ +//NTHALAPI +//ULONG +//HalGetBusData( +// IN BUS_DATA_TYPE BusDataType, +// IN ULONG BusNumber, +// IN ULONG SlotNumber, +// IN PVOID Buffer, +// IN ULONG Length +// ); +// +//NTHALAPI +//ULONG +//HalGetBusDataByOffset( +// IN BUS_DATA_TYPE BusDataType, +// IN ULONG BusNumber, +// IN ULONG SlotNumber, +// IN PVOID Buffer, +// IN ULONG Offset, +// IN ULONG Length +// ); +// +//NTHALAPI +//ULONG +//HalSetBusData( +// IN BUS_DATA_TYPE BusDataType, +// IN ULONG BusNumber, +// IN ULONG SlotNumber, +// IN PVOID Buffer, +// IN ULONG Length +// ); +// +//NTHALAPI +//ULONG +//HalSetBusDataByOffset( +// IN BUS_DATA_TYPE BusDataType, +// IN ULONG BusNumber, +// IN ULONG SlotNumber, +// IN PVOID Buffer, +// IN ULONG Offset, +// IN ULONG Length +// ); +//#endif +//#endif + +#ifndef USE_MOSAL +/****************************************************************************** + * Function: + * SendAwaitIrpCompletion + * + * Description: + * IRP completion routine + * + * Parameters: + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + ******************************************************************************/ +NTSTATUS +SendAwaitIrpCompletion ( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp, + IN PVOID Context + ) +{ + UNREFERENCED_PARAMETER (DeviceObject); + KeSetEvent ((PKEVENT) Context, IO_NO_INCREMENT, FALSE); + return STATUS_MORE_PROCESSING_REQUIRED; // Keep this IRP +} + +/****************************************************************************** + * Function: + * SendAwaitIrp + * + * Description: + * Create and send IRP stack down the stack and wait for the response (Blocking Mode) + * + * Parameters: + * pi_pDeviceExt.......... ointer to USB device extension + * pi_MajorCode........... IRP major code + * pi_MinorCode........... IRP minor code + * pi_pBuffer............. parameter buffer + * pi_nSize............... size of the buffer + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + ******************************************************************************/ +NTSTATUS +SendAwaitIrp( + IN PDEVICE_OBJECT pi_pFdo, + IN PDEVICE_OBJECT pi_pLdo, + IN ULONG pi_MajorCode, + IN ULONG pi_MinorCode, + IN PVOID pi_pBuffer, + IN int pi_nSize + ) +/*++ + + Routine Description: + + Create and send IRP stack down the stack and wait for the response ( +Blocking Mode) + + Arguments: + + pi_pFdo................ our device + pi_pLdo................ lower device + pi_MajorCode........... IRP major code + pi_MinorCode........... IRP minor code + pi_pBuffer............. parameter buffer + pi_nSize............... size of the buffer + + Returns: + + standard NTSTATUS return codes. + + Notes: + +--*/ +{ /* SendAwaitIrp */ + // Event + KEVENT l_hEvent; + // Pointer to IRP + PIRP l_pIrp; + // Stack location + PIO_STACK_LOCATION l_pStackLocation; + // Returned status + NTSTATUS l_Status; + + // call validation + if(KeGetCurrentIrql() != PASSIVE_LEVEL) + return STATUS_SUCCESS; + + // create event + KeInitializeEvent(&l_hEvent, NotificationEvent, FALSE); + + // build IRP request to USBD driver + l_pIrp = IoAllocateIrp( pi_pFdo->StackSize, FALSE ); + + // validate request + if (!l_pIrp) + { + //MdKdPrint( DBGLVL_MAXIMUM, ("(SendAwaitIrp) Unable to allocate IRP !\n")); + return STATUS_INSUFFICIENT_RESOURCES; + } + + // fill IRP + l_pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + // set completion routine + IoSetCompletionRoutine(l_pIrp,SendAwaitIrpCompletion, &l_hEvent, TRUE, TRUE, TRUE); + + // fill stack location + l_pStackLocation = IoGetNextIrpStackLocation(l_pIrp); + l_pStackLocation->MajorFunction= (UCHAR)pi_MajorCode; + l_pStackLocation->MinorFunction= (UCHAR)pi_MinorCode; + RtlCopyMemory( &l_pStackLocation->Parameters, pi_pBuffer, pi_nSize ); + + // Call lower driver perform request + l_Status = IoCallDriver( pi_pLdo, l_pIrp ); + + // if the request not performed --> wait + if (l_Status == STATUS_PENDING) + { + // Wait until the IRP will be complete + KeWaitForSingleObject( + &l_hEvent, // event to wait for + Executive, // thread type (to wait into its context) + KernelMode, // mode of work + FALSE, // alertable + NULL // timeout + ); + l_Status = l_pIrp->IoStatus.Status; + } + + IoFreeIrp(l_pIrp); + + return l_Status; + +} /* SendAwaitIrp */ + + +#endif + +/****************************************************************************** + * Function: + * DrvReadWritePciConfig + * + * Description: + * Create and send IRP stack down the stack and wait for the response (Blocking Mode) + * + * Parameters: + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * + ******************************************************************************/ +NTSTATUS +DrvReadWritePciConfig( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PVOID pi_pDataBuffer, + IN ULONG pi_nPciSpaceOffset, + IN ULONG pi_nDataLength, + IN BOOLEAN pi_fReadConfig + ) + +{ /* DrvReadWritePciConfig */ + + // parameter buffer for the request + DrvReadWriteConfig_t l_RwParams; + ULONG l_nBytes; + + // parameter validation + //MDASSERT(pi_pDataBuffer); + //MDASSERT(pi_nDataLength); + + // try to do it directly + if (pi_fReadConfig && pi_pMdDevContext->m_Interface.GetBusData) { + l_nBytes = pi_pMdDevContext->m_Interface.GetBusData( + pi_pMdDevContext->m_Interface.Context, + PCI_WHICHSPACE_CONFIG, + pi_pDataBuffer, + pi_nPciSpaceOffset, + pi_nDataLength + ); + return (l_nBytes != pi_nDataLength) ? STATUS_UNSUCCESSFUL : STATUS_SUCCESS; + } + else + if (!pi_fReadConfig && pi_pMdDevContext->m_Interface.SetBusData) { + pi_pMdDevContext->m_Interface.SetBusData( + pi_pMdDevContext->m_Interface.Context, + PCI_WHICHSPACE_CONFIG, + pi_pDataBuffer, + pi_nPciSpaceOffset, + pi_nDataLength + ); + return STATUS_SUCCESS; + } + else { + // fill request parameters + l_RwParams.Buffer = pi_pDataBuffer; + l_RwParams.Length = pi_nDataLength; + l_RwParams.Offset = pi_nPciSpaceOffset; + l_RwParams.WhichSpace = PCI_WHICHSPACE_CONFIG; + + return SendAwaitIrp( pi_pMdDevContext->m_pFdo, pi_pMdDevContext->m_pLdo, IRP_MJ_PNP, + pi_fReadConfig ? IRP_MN_READ_CONFIG : IRP_MN_WRITE_CONFIG, &l_RwParams, sizeof(DrvReadWriteConfig_t)); + } + +} /* DrvReadWritePciConfig */ +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +PciIfOpen( + IN PDEVICE_OBJECT pi_pFdo, + IN PDEVICE_OBJECT pi_pLdo, + IN PBUS_INTERFACE_STANDARD pi_pInterface + ) + +{ /* GetDirectPciInterface */ + + // parameter buffer for the request + IO_STACK_LOCATION l_Stack; + + // clean interface data + RtlZeroMemory( (PCHAR)pi_pInterface, sizeof(BUS_INTERFACE_STANDARD) ); + + // fill request parameters + l_Stack.Parameters.QueryInterface.InterfaceType = (LPGUID) &GUID_BUS_INTERFACE_STANDARD; + l_Stack.Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD); + l_Stack.Parameters.QueryInterface.Version = 1; + l_Stack.Parameters.QueryInterface.Interface = (PINTERFACE)pi_pInterface; + l_Stack.Parameters.QueryInterface.InterfaceSpecificData = NULL; + + return SendAwaitIrp( pi_pFdo, pi_pLdo, IRP_MJ_PNP, + IRP_MN_QUERY_INTERFACE, &l_Stack.Parameters, sizeof(l_Stack.Parameters)); + +} /* GetDirectPciInterface */ + +PciIfClose( + IN PBUS_INTERFACE_STANDARD pi_pInterface + ) +{ + if (pi_pInterface->InterfaceDereference) + pi_pInterface->InterfaceDereference((PVOID)pi_pInterface->Context); +} + +#ifdef NOT_USE_MDNTDDK + +/*------------------------------------------------------------------------------------------------------*/ + +/* from wdm.h +typedef struct _PCI_SLOT_NUMBER { + union { + struct { + ULONG DeviceNumber:5; + ULONG FunctionNumber:3; + ULONG Reserved:24; + } bits; + ULONG AsULONG; + } u; +} PCI_SLOT_NUMBER, *PPCI_SLOT_NUMBER; +*/ + +#ifdef __i386__ +BOOLEAN PciFindDeviceByBusAndId( + IN ULONG pi_MyBus, + IN ULONG pi_DevId, + IN OUT PULONG po_pSlot ) +{ +#define N_SLOTS 32 + ULONG l_Slot; + ULONG l_DevId; + ULONG l_Bytes; + + for (l_Slot = *po_pSlot; l_Slot < N_SLOTS; l_Slot++ ) { +#pragma warning( push ) +#pragma warning( disable:4996 ) + l_Bytes = HalGetBusDataByOffset( + PCIConfiguration, + pi_MyBus, + l_Slot, + (PVOID)&l_DevId, + 0, + sizeof(ULONG) + ); +#pragma warning( pop ) + if (l_Bytes != sizeof(ULONG)) + continue; /* as if - "not found" */ + if (l_DevId == pi_DevId) + break; + } + + if (l_DevId == pi_DevId) { + *po_pSlot = l_Slot; + return TRUE; + } + else + return FALSE; +} + +BOOLEAN PciFindDeviceById( + IN ULONG pi_DevId, + IN OUT PULONG po_pBus, + IN OUT PULONG po_pSlot ) +{ +#define N_BUSES 16 + ULONG l_Bus; + ULONG l_Slot = *po_pSlot; + + for (l_Bus= *po_pBus; l_Bus < N_BUSES; l_Bus++, l_Slot=0) { + if (PciFindDeviceByBusAndId(l_Bus, pi_DevId, &l_Slot)) + break; + } + if (l_Bus >= N_BUSES) + return FALSE; + *po_pBus = l_Bus; + *po_pSlot = l_Slot; + return TRUE; +} + +BOOLEAN PciFindBridgeByBus( + IN ULONG pi_SecBus, + OUT PULONG po_pBus, + OUT PULONG po_pSlot ) +{ +#define N_CARDS 8 + ULONG l_CardNo; + ULONG l_Slot=0, l_Bus=0; + ULONG l_DevId = ((int)(23110) << 16) | MLX_VENDOR_ID; + ULONG l_SecBus, l_tmp, l_Bytes; + + for (l_CardNo= 0; l_CardNo < N_CARDS; l_CardNo++, l_Slot=0, l_Bus++) { + if (PciFindDeviceById(l_DevId, &l_Bus, &l_Slot)) { + /* found a bridge */ +#pragma warning( push ) +#pragma warning( disable:4996 ) + l_Bytes = HalGetBusDataByOffset( + PCIConfiguration, + l_Bus, + l_Slot, + (PVOID)&l_tmp, + 24, /* 24 - PrimaryBus, 25 - SecondaryBus, 26 - SubordinateBus */ + sizeof(ULONG) + ); +#pragma warning( pop ) + if (l_Bytes != sizeof(ULONG)) + continue; /* as if - "not found" */ + l_SecBus = (l_tmp >> 16) & 255; + if ( l_SecBus == pi_SecBus ) + break; /* found !!! */ + } + } + if (l_CardNo >= N_CARDS) + return FALSE; + *po_pBus = l_Bus; + *po_pSlot = l_Slot; + return TRUE; +} +#endif + +BOOLEAN PciFindPdoByPdoAndLocation( + IN PDEVICE_OBJECT pi_pPdo, + IN ULONG pi_Bus, + IN ULONG pi_Slot, + IN ULONG pi_Function, + OUT PDEVICE_OBJECT * po_pPdo ) +{ + PDRIVER_OBJECT l_pDrv; + PDEVICE_OBJECT l_pPdo; + NTSTATUS l_Status; + ULONG l_Slot, l_Bus, l_Function; + + // get to the PCI driver + l_pDrv = pi_pPdo->DriverObject; + + // loop over all the PCI driver devices + for ( l_pPdo = l_pDrv->DeviceObject; l_pPdo; l_pPdo = l_pPdo->NextDevice ) { + // if it's not PDO - skip it + if (!(l_pPdo->Flags & DO_BUS_ENUMERATED_DEVICE)) + continue; + // get the location of the device of that PDO + l_Status = MdGetDevLocation( l_pPdo, &l_Bus, &l_Slot, &l_Function ); + if (l_Status != STATUS_SUCCESS) + continue; + // check, whether it's our device + if (l_Bus == pi_Bus && l_Slot == pi_Slot && l_Function == pi_Function) + break; + } + + // check whether we found the PDO + if (!l_pPdo) + return FALSE; + *po_pPdo = l_pPdo; + return TRUE; +} + +/*------------------------------------------------------------------------------------------------------*/ +#ifdef __i386__ + +NTSTATUS +PciHdrRead( + IN PVOID pi_pBuffer, + IN ULONG pi_Bus, + IN ULONG pi_Slot + ) +{ + int l_Bytes; + +#pragma warning( push ) +#pragma warning( disable:4996 ) + l_Bytes = HalGetBusData( + PCIConfiguration, + pi_Bus, + pi_Slot, + pi_pBuffer, + PCI_HDR_SIZE<<2 + ); +#pragma warning( pop ) + if (l_Bytes != PCI_HDR_SIZE<<2) { + return STATUS_UNSUCCESSFUL; + } + + return STATUS_SUCCESS; +} +#endif +/*------------------------------------------------------------------------------------------------------*/ +NTSTATUS +PciFixCmdReg( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +{ + PDEVICE_OBJECT l_pLdo = pi_pMdDevContext->m_pLdo; /* Shrimp's PDO */ + BUS_INTERFACE_STANDARD l_Interface, *l_pInterface = &l_Interface; + ULONG l_Value, l_NewValue; + NTSTATUS l_Status = STATUS_SUCCESS; + PDEVICE_OBJECT l_pFdo; + + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_SD) + { /* fix HCA command register from SHRIMP */ + + /* get the PDO of Bridge */ + if (!PciFindPdoByPdoAndLocation( l_pLdo, + pi_pMdDevContext->m_BridgeHdr.m_Bus, + pi_pMdDevContext->m_BridgeHdr.m_Slot, + 0, &l_pLdo )) { + MdKdPrint( DBGLVL_LOW,("(PciHdrWrite) Not found bridge PDO - can't restore the PCI header \n" )); + return STATUS_UNSUCCESSFUL; + } + l_pFdo = l_pLdo->AttachedDevice; + + // open interface to PCI driver + l_Status = PciIfOpen( l_pFdo, l_pLdo, l_pInterface ); + if (!NT_SUCCESS(l_Status)) { + MdKdPrint( DBGLVL_LOW,("(PciFixCmdReg) PciIfOpen failed (0x%x) \n", l_Status )); + return l_Status; + } + + // read reg + if (l_pInterface->GetBusData) + l_pInterface->GetBusData( l_pInterface->Context, PCI_WHICHSPACE_CONFIG, + (PVOID)&l_Value, 4, sizeof(ULONG) ); + + // fix + l_NewValue = l_Value | 7; + + // write reg + if (l_pInterface->SetBusData) + l_pInterface->SetBusData( l_pInterface->Context, PCI_WHICHSPACE_CONFIG, + (PVOID)&l_NewValue, 4, sizeof(ULONG) ); + + // close interface + PciIfClose( l_pInterface ); + + } /* fix HCA command register from SHRIMP */ + + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR + || pi_pMdDevContext->m_eDevType ==MD_DEV_IX_ARBEL_TM) + { /* fix command register for TAVOR */ + + l_pInterface = &pi_pMdDevContext->m_Interface; + // read reg + if (l_pInterface->GetBusData) + l_pInterface->GetBusData( l_pInterface->Context, PCI_WHICHSPACE_CONFIG, + (PVOID)&l_Value, 4, sizeof(ULONG) ); + + // fix + l_NewValue = l_Value | 7; + + // write reg + if (l_pInterface->SetBusData) + l_pInterface->SetBusData( l_pInterface->Context, PCI_WHICHSPACE_CONFIG, + (PVOID)&l_NewValue, 4, sizeof(ULONG) ); + + } /* fix command register for TAVOR */ + + MdKdPrint( DBGLVL_LOW,("(PciFixCmdReg) Cmd register: Old 0x%x New 0x%x \n", + l_Value, l_NewValue )); + + return STATUS_SUCCESS; +} + +/*------------------------------------------------------------------------------------------------------*/ + +#ifdef __i386__ +typedef enum { WRITE_WITH_SET_BUS, WRITE_WITH_SET_BUS_BY_OFFSET, WRITE_WITH_PCI_CONFIG } PCI_WRITE_T; + +NTSTATUS +PciHdrWrite( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PCHAR pi_pBuffer, + IN ULONG pi_Bus, + IN ULONG pi_Slot, + IN PCI_WRITE_T pi_Technique + ) +{ + if (pi_Technique == WRITE_WITH_PCI_CONFIG) { + NTSTATUS l_Status = STATUS_SUCCESS; + PDEVICE_OBJECT l_pLdo = pi_pMdDevContext->m_pLdo; /* Shrimp's PDO */ + BUS_INTERFACE_STANDARD l_Interface; + PDEVICE_OBJECT l_pFdo; + + /* the following algorithm sutes only for SD */ + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_SD) { + + /* protect */ + KSEM_ACQ(&pi_pMdDevContext->m_Sem); + + /* get the PDO of Bridge */ + if (!PciFindPdoByPdoAndLocation( l_pLdo, + pi_pMdDevContext->m_BridgeHdr.m_Bus, + pi_pMdDevContext->m_BridgeHdr.m_Slot, + 0, &l_pLdo )) { + MdKdPrint( DBGLVL_LOW,("(PciHdrWrite) Not found bridge PDO - can't restore the PCI header \n" )); + return STATUS_UNSUCCESSFUL; + } + l_pFdo = l_pLdo->AttachedDevice; + + // open interface to PCI driver + l_Status = PciIfOpen( l_pFdo, l_pLdo, &l_Interface ); + if (!NT_SUCCESS(l_Status)) { + MdKdPrint( DBGLVL_LOW,("(PciHdrWrite) PciIfOpen failed (0x%x) \n", l_Status )); + return l_Status; + } + + // write header + if (l_Interface.SetBusData) { + l_Interface.SetBusData( l_Interface.Context, PCI_WHICHSPACE_CONFIG, + pi_pBuffer + 0x08, 0x08, PCI_HDR_SIZE - 0x08 ); + } + + // write some fields once more + if (l_Interface.SetBusData) { + /* Bridge Control Register */ + l_Interface.SetBusData( l_Interface.Context, PCI_WHICHSPACE_CONFIG, + pi_pBuffer + 0x3c, 0x3c, 4 ); + /* The rest of header, including PCIX command register */ + l_Interface.SetBusData( l_Interface.Context, PCI_WHICHSPACE_CONFIG, + pi_pBuffer + PCI_HDR_SIZE, PCI_HDR_SIZE, (PCI_HDR_SIZE<<2) - PCI_HDR_SIZE ); + /* Command Register */ + l_Interface.SetBusData( l_Interface.Context, PCI_WHICHSPACE_CONFIG, + pi_pBuffer + 0x04, 0x04, 4 ); + } + + // close interface + PciIfClose( &l_Interface ); + + KSEM_REL(&pi_pMdDevContext->m_Sem); + return l_Status; + } + else + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR || + pi_pMdDevContext->m_eDevType == MD_DEV_IX_ARBEL_TM) { + + /* protect */ + KSEM_ACQ(&pi_pMdDevContext->m_Sem); + + /* get the PDO of Bridge */ + if (!PciFindPdoByPdoAndLocation( l_pLdo, + pi_pMdDevContext->m_BridgeHdr.m_Bus, + pi_pMdDevContext->m_BridgeHdr.m_Slot, + 0, &l_pLdo )) { + MdKdPrint( DBGLVL_LOW,("(PciHdrWrite) Not found bridge PDO - can't restore the PCI header \n" )); + return STATUS_UNSUCCESSFUL; + } + l_pFdo = l_pLdo->AttachedDevice; + + // open interface to PCI driver + l_Status = PciIfOpen( l_pFdo, l_pLdo, &l_Interface ); + if (!NT_SUCCESS(l_Status)) { + MdKdPrint( DBGLVL_LOW,("(PciHdrWrite) PciIfOpen failed (0x%x) \n", l_Status )); + return l_Status; + } + + // write header + if (l_Interface.SetBusData) { + l_Interface.SetBusData( l_Interface.Context, PCI_WHICHSPACE_CONFIG, + pi_pBuffer + 0x08, 0x08, PCI_HDR_SIZE - 0x08 ); + } + + // write some fields once more + if (l_Interface.SetBusData) { + /* Bridge Control Register */ + l_Interface.SetBusData( l_Interface.Context, PCI_WHICHSPACE_CONFIG, + pi_pBuffer + 0x3c, 0x3c, 4 ); + /* The rest of header, including PCIX command register */ + l_Interface.SetBusData( l_Interface.Context, PCI_WHICHSPACE_CONFIG, + pi_pBuffer + PCI_HDR_SIZE, PCI_HDR_SIZE, (PCI_HDR_SIZE<<2) - PCI_HDR_SIZE ); + /* Command Register */ + l_Interface.SetBusData( l_Interface.Context, PCI_WHICHSPACE_CONFIG, + pi_pBuffer + 0x04, 0x04, 4 ); + } + + // close interface + PciIfClose( &l_Interface ); + KSEM_REL(&pi_pMdDevContext->m_Sem); + return l_Status; + } + else + return STATUS_UNSUCCESSFUL; + + } + + else + + if (pi_Technique == WRITE_WITH_SET_BUS) { + int l_Bytes; + +#pragma warning( push ) +#pragma warning( disable:4996 ) + l_Bytes = HalSetBusData( + PCIConfiguration, + pi_Bus, + pi_Slot, + pi_pBuffer, + PCI_HDR_SIZE<<2 + ); +#pragma warning( pop ) + return STATUS_SUCCESS; + } + + else + + if (pi_Technique == WRITE_WITH_SET_BUS_BY_OFFSET) { + int l_Bytes; + int i; + + PULONG l_pBuf = (PULONG)pi_pBuffer; + + for (i = 0; i < PCI_HDR_SIZE; i++) { +#pragma warning( push ) +#pragma warning( disable:4996 ) + l_Bytes = HalSetBusDataByOffset( + PCIConfiguration, + pi_Bus, + pi_Slot, + &l_pBuf[i], + i<<2, + sizeof(ULONG) + ); +#pragma warning( pop ) + } + return STATUS_SUCCESS; + } + + else + return STATUS_UNSUCCESSFUL; +} +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +PciHdrSave( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +{ + ULONG l_MyBus = pi_pMdDevContext->m_BusNumber; + ULONG l_Slot, l_SecBus; + ULONG l_DevId; + NTSTATUS l_Status = STATUS_SUCCESS; + + /* prevent RESET if header not saved */ + pi_pMdDevContext->m_fMayReset = FALSE; + + /* NB: functionality supported only for MD_DEV_IX_TAVOR_SD and MD_DEV_IX_TAVOR_BD */ + if ((pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_SD) || + (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_BD)) { + + /* + * My device + */ + l_DevId = (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_BD) ? + (((int)(MD_DEV_ID_TAVOR_BD) << 16) | MLX_VENDOR_ID) : + (((int)(MD_DEV_ID_TAVOR_SD) << 16) | MLX_VENDOR_ID); + l_Slot = 0; + if (PciFindDeviceByBusAndId( l_MyBus, l_DevId, &l_Slot )) { + l_Status = PciHdrRead( &pi_pMdDevContext->m_MyHdr.m_Hdr[0], l_MyBus, l_Slot); + if (l_Status != STATUS_SUCCESS) + return l_Status; + } + else { /* not found my device */ + return STATUS_UNSUCCESSFUL; + } + pi_pMdDevContext->m_MyHdr.m_Bus = l_MyBus; + pi_pMdDevContext->m_MyHdr.m_Slot = l_Slot; + + + /* + * HCA device + */ + l_DevId = ((int)(MD_DEV_ID_TAVOR) << 16) | MLX_VENDOR_ID; + l_Slot = 0; + if (PciFindDeviceByBusAndId( l_MyBus, l_DevId, &l_Slot )) { + l_Status = PciHdrRead( &pi_pMdDevContext->m_HcaHdr.m_Hdr[0], l_MyBus, l_Slot); + if (l_Status != STATUS_SUCCESS) + return l_Status; + } + else { /* not found HCA device */ + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_SD) + return STATUS_UNSUCCESSFUL; + pi_pMdDevContext->m_HcaHdr.m_Hdr[0] = 0; /* mark, that PCI header was not saved */ + } + pi_pMdDevContext->m_HcaHdr.m_Bus = l_MyBus; + pi_pMdDevContext->m_HcaHdr.m_Slot = l_Slot; + + /* + * Bridge device + */ + l_SecBus = l_MyBus; + if (PciFindBridgeByBus( l_SecBus, &l_MyBus, &l_Slot )) { + l_Status = PciHdrRead( &pi_pMdDevContext->m_BridgeHdr.m_Hdr[0], l_MyBus, l_Slot); + if (l_Status != STATUS_SUCCESS) + return l_Status; + } + else { /* not found Bridge device */ + return STATUS_UNSUCCESSFUL; + } + pi_pMdDevContext->m_BridgeHdr.m_Bus = l_MyBus; + pi_pMdDevContext->m_BridgeHdr.m_Slot = l_Slot; + + } + else + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR || + pi_pMdDevContext->m_eDevType == MD_DEV_IX_ARBEL_TM) { + + /* + * HCA device + */ + l_DevId = ((int)(MD_DEV_ID_TAVOR) << 16) | MLX_VENDOR_ID; + pi_pMdDevContext->m_HcaHdr.m_Hdr[0] = 0; /* mark, that PCI header was not saved */ + l_Slot = 0; + if (PciFindDeviceByBusAndId( l_MyBus, l_DevId, &l_Slot )) { + l_Status = PciHdrRead( &pi_pMdDevContext->m_HcaHdr.m_Hdr[0], l_MyBus, l_Slot); + if (l_Status != STATUS_SUCCESS) + return l_Status; + } + else { /* not found HCA device */ + return STATUS_UNSUCCESSFUL; + } + pi_pMdDevContext->m_HcaHdr.m_Bus = l_MyBus; + pi_pMdDevContext->m_HcaHdr.m_Slot = l_Slot; + + /* + * Bridge device + */ + l_SecBus = l_MyBus; + if (PciFindBridgeByBus( l_SecBus, &l_MyBus, &l_Slot )) { + l_Status = PciHdrRead( &pi_pMdDevContext->m_BridgeHdr.m_Hdr[0], l_MyBus, l_Slot); + if (l_Status != STATUS_SUCCESS) + return l_Status; + } + else { /* not found Bridge device */ + return STATUS_UNSUCCESSFUL; + } + pi_pMdDevContext->m_BridgeHdr.m_Bus = l_MyBus; + pi_pMdDevContext->m_BridgeHdr.m_Slot = l_Slot; + } + else + return STATUS_UNSUCCESSFUL; + + /* enable Reset */ + pi_pMdDevContext->m_fMayReset = TRUE; + + return STATUS_SUCCESS; +} + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +PciHdrRestore( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +{ + + if (((pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_SD) || + (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_BD))) { + /* + * Bridge device + */ + if (pi_pMdDevContext->m_BridgeHdr.m_Hdr[0]) { + PciHdrWrite( pi_pMdDevContext, + (PCHAR)&pi_pMdDevContext->m_BridgeHdr.m_Hdr[0], + pi_pMdDevContext->m_BridgeHdr.m_Bus, + pi_pMdDevContext->m_BridgeHdr.m_Slot, + WRITE_WITH_PCI_CONFIG); + } + + /* + * My device + */ + if (pi_pMdDevContext->m_MyHdr.m_Hdr[0]) { + PciHdrWrite( pi_pMdDevContext, + (PCHAR)&pi_pMdDevContext->m_MyHdr.m_Hdr[0], + pi_pMdDevContext->m_MyHdr.m_Bus, + pi_pMdDevContext->m_MyHdr.m_Slot, + WRITE_WITH_SET_BUS); + } + + /* + * HCA device + */ + if (pi_pMdDevContext->m_HcaHdr.m_Hdr[0]) { + PciHdrWrite( pi_pMdDevContext, + (PCHAR)&pi_pMdDevContext->m_HcaHdr.m_Hdr[0], + pi_pMdDevContext->m_HcaHdr.m_Bus, + pi_pMdDevContext->m_HcaHdr.m_Slot, + WRITE_WITH_SET_BUS); + } + } + else + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR || + pi_pMdDevContext->m_eDevType == MD_DEV_IX_ARBEL_TM) { + + /* + * Bridge device + */ + if (pi_pMdDevContext->m_BridgeHdr.m_Hdr[0]) { + PciHdrWrite( pi_pMdDevContext, + (PCHAR)&pi_pMdDevContext->m_BridgeHdr.m_Hdr[0], + pi_pMdDevContext->m_BridgeHdr.m_Bus, + pi_pMdDevContext->m_BridgeHdr.m_Slot, + WRITE_WITH_PCI_CONFIG); + } + + /* + * HCA device + */ + if (pi_pMdDevContext->m_HcaHdr.m_Hdr[0]) { + PciHdrWrite( pi_pMdDevContext, + (PCHAR)&pi_pMdDevContext->m_HcaHdr.m_Hdr[0], + pi_pMdDevContext->m_HcaHdr.m_Bus, + pi_pMdDevContext->m_HcaHdr.m_Slot, + WRITE_WITH_SET_BUS); + } + } + else + return STATUS_UNSUCCESSFUL; + + return STATUS_SUCCESS; +} + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +PciReset( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +{ + ULONG l_ResetOffset = 0x0f0010; + ULONG l_ResetValue = 0x01000000; /* 1 in BigEndian */ + NTSTATUS l_Status; + + /* + * RESET + */ + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_BD) { /* we are burner device */ + PCICONF_WRITE( pi_pMdDevContext, + l_ResetOffset, + l_ResetValue, + l_Status ); + } + else + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR || + pi_pMdDevContext->m_eDevType == MD_DEV_IX_ARBEL_TM) { /* we are Tavor device */ + l_Status = PciDevReset(pi_pMdDevContext, l_ResetOffset, l_ResetValue ); + } + else + if (pi_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_SD) { /* we are service device */ + l_Status = PciDevReset(pi_pMdDevContext, l_ResetOffset, l_ResetValue ); + } + else { /* other devices not supported */ + l_Status = STATUS_UNSUCCESSFUL; + } + + /* + * wait for RESET end + */ + { + #define TIMEOUT_IN_USECS 1500000 /* 1.5 sec */ + LARGE_INTEGER timeout; + timeout.QuadPart = - 10 * TIMEOUT_IN_USECS; + KeDelayExecutionThread( KernelMode, FALSE, &timeout ); + } + + /* + * Restore + */ + l_Status = PciHdrRestore( pi_pMdDevContext ); + + return l_Status; + +} + +#endif +#endif + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS ReadBarInfo( + PMD_DEV_CONTEXT_T pi_pMdDevContext, + ULONG pi_LowOffset, + ULONG pi_HighOffset, + ULONG pi_SizeOffset, + PMD_BAR_T pi_pBar, + PCHAR pi_BarName, + ULONG pi_BarNum + ) +/*++ + +Routine Description: + + This routine reads BAR inforamtion from CR-space and stores it to device context + +Arguments: + + pi_pMdDevContext....... My device context + pi_LowOffset........... offset of low part of BAR address + pi_HighOffset.......... offset of high part of BAR address + pi_SizeOffset.......... offset of BAR size + pi_pBar ............... pointer to BAR descriptor + pi_BarName............. BAR name for debug print + pi_BarNum.............. BAR number + +Return Value: + + ERROR code. + +--*/ +{ /* ReadBarInfo */ + + ULONG l_LowPart, l_HighPart, l_Size; + NTSTATUS l_Status; + LARGE_INTEGER l_Offset = { 0,0 }; + + // + // read BAR0 + // + + /* read CR-space */ + PCICONF_READ( pi_pMdDevContext, pi_LowOffset, &l_LowPart, l_Status ); + if (!NT_SUCCESS(l_Status)) + return l_Status; + PCICONF_READ( pi_pMdDevContext, pi_HighOffset, &l_HighPart, l_Status ); + if (!NT_SUCCESS(l_Status)) + return l_Status; + PCICONF_READ( pi_pMdDevContext, pi_SizeOffset, &l_Size, l_Status ); + if (!NT_SUCCESS(l_Status)) + return l_Status; + + /* store BAR parameters */ + pi_pBar->m_MemPhysAddr.LowPart = l_LowPart & 0xfff00000; + pi_pBar->m_MemPhysAddr.HighPart = l_HighPart; + pi_pBar->m_ulMemSize = 1 << ((l_Size & 63) + 20); /* l_size is 6 bit field */ + pi_pBar->m_usMemFlags = 0; + pi_pBar->m_ulKernelSize = pi_pBar->m_ulMemSize; + pi_pBar->m_ulKernelOffset = 0; + + /* recalculate boudaries of mapped memory for DDR */ + if (pi_BarNum == 2 && pi_pMdDevContext->m_ulDdrMapSize != -1) { + pi_pBar->m_ulKernelSize = pi_pMdDevContext->m_ulDdrMapSize; + pi_pBar->m_ulKernelOffset = pi_pMdDevContext->m_ulDdrMapOffset; + l_Offset.LowPart = pi_pMdDevContext->m_ulDdrMapOffset; + } /* for DDR - map some subset of memory */ + + /* map physical address into virtual kernel one */ + l_Offset.QuadPart += pi_pBar->m_MemPhysAddr.QuadPart; + pi_pBar->m_pKernelAddr = (PUCHAR) MmMapIoSpace( + l_Offset, pi_pBar->m_ulKernelSize, MmNonCached); + if (!pi_pBar->m_pKernelAddr) return STATUS_NO_MEMORY; + + /* debug print */ + MdKdPrint( DBGLVL_LOW ,("(ReadBarInfo) Dev %d %s: Phys 0x%I64x Size 0x%x, Virt 0x%x Size 0x%x \n", + g_DevParams[pi_pMdDevContext->m_eDevType].m_DevId, + pi_BarName, pi_pBar->m_MemPhysAddr, pi_pBar->m_ulMemSize, + pi_pBar->m_pKernelAddr, pi_pBar->m_ulKernelSize )); + + return STATUS_SUCCESS; +} + +NTSTATUS ReadBars(PMD_DEV_CONTEXT_T pi_pMdDevContext) +{ + NTSTATUS l_Status; + PMD_BAR_T l_pBar; + + /* BAR0 */ + l_pBar = &pi_pMdDevContext->m_Cr; + l_Status = ReadBarInfo( pi_pMdDevContext, + BYTE_OFFSET_A(Tavor->pcu0.pcu_address_decoder.hca_bar_0_lsbs), + BYTE_OFFSET_A(Tavor->pcu0.pcu_address_decoder.hca_bar_0_msbs), + BYTE_OFFSET_A(Tavor->pcu0.pcu_address_decoder.hca_bar_size_0), + l_pBar, "CR", 0 ); + if (!NT_SUCCESS(l_Status)) + return l_Status; + + /* BAR1 */ + l_pBar = &pi_pMdDevContext->m_Uar; + l_Status = ReadBarInfo( pi_pMdDevContext, + BYTE_OFFSET_A(Tavor->pcu0.pcu_address_decoder.hca_bar_1_lsbs), + BYTE_OFFSET_A(Tavor->pcu0.pcu_address_decoder.hca_bar_1_msbs), + BYTE_OFFSET_A(Tavor->pcu0.pcu_address_decoder.hca_bar_size_1), + l_pBar, "UAR", 1 ); + if (!NT_SUCCESS(l_Status)) + return l_Status; + + /* BAR2 */ + l_pBar = &pi_pMdDevContext->m_Ddr; + l_Status = ReadBarInfo( pi_pMdDevContext, + BYTE_OFFSET_A(Tavor->pcu0.pcu_address_decoder.dmu_bar_0_lsbs), + BYTE_OFFSET_A(Tavor->pcu0.pcu_address_decoder.dmu_bar_0_msbs), + BYTE_OFFSET_A(Tavor->pcu0.pcu_address_decoder.dmu_bar_size_0), + l_pBar, "DDR", 2 ); + if (!NT_SUCCESS(l_Status)) + return l_Status; + + return STATUS_SUCCESS; +} + +NTSTATUS ConfIoctl( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PMD_PCS_CONTEXT_T pi_pPcsContext, + IN ULONG pi_nIoControlCode, + IN PVOID pi_pInBuffer, + IN ULONG pi_nInBufLength, + IN PVOID pi_pOutBuffer, + IN ULONG pi_nOutBufLength, + OUT PULONG po_pnBytes + ) +{ + NTSTATUS l_Status; + PULONG l_pOffset; + PULONG l_pData, l_Data; + PPCICONF_MODIFY_T pm; + PPCICONF_WRITE4_T pw; + + *po_pnBytes = 0; + + switch (pi_nIoControlCode) + { /* handle Ioctls */ + + case UDLL_MAKE_IOCTL(WIN_PCICONF_READ4): + /* call_result_t PciConfRead4( HANDLE h, DWORD offset, DWORD * p_data ) */ + l_pOffset = (PULONG)pi_pInBuffer; + l_pData = (PULONG)pi_pOutBuffer; + PCICONF_READ( pi_pMdDevContext, *l_pOffset, l_pData, l_Status ); + if (!NT_SUCCESS(l_Status)) *po_pnBytes = sizeof(ULONG); + break; + + case UDLL_MAKE_IOCTL(WIN_PCICONF_WRITE4): + /* call_result_t PciConfWrite4( HANDLE h, PPCICONF_WRITE4_T params ) */ + pw =(PPCICONF_WRITE4_T)pi_pInBuffer; + PCICONF_WRITE( pi_pMdDevContext, pw->offset, pw->data, l_Status ); + break; + + case UDLL_MAKE_IOCTL(WIN_PCICONF_MODIFY): + /* call_result_t PciConfModify( HANDLE h, PPCICONF_MODIFY_T params, DWORD * p_old_data ) */ + pm = (PPCICONF_MODIFY_T)pi_pInBuffer; + l_pData = (PULONG)pi_pOutBuffer; + + PCICONF_READ( pi_pMdDevContext, pm->offset, &l_Data, l_Status ); + if (!NT_SUCCESS(l_Status)) break; + *l_Data = (*l_Data & ~pm->mask) | (pm->data & pm->mask); + l_Status = PCICONF_DATA_WRITE(pi_pMdDevContext, l_Data ); + if (!NT_SUCCESS(l_Status)) *po_pnBytes = sizeof(ULONG); + break; + + case UDLL_MAKE_IOCTL(WIN_SOFT_RESET): + #ifdef __i386__ + l_Status = PciReset( pi_pMdDevContext ); + #endif + break; + + default: + MdKdPrint( DBGLVL_DEFAULT,("(ConfIoctl) Unsupported Ioctl 0x%x\n", pi_nIoControlCode)); + l_Status = STATUS_NOT_IMPLEMENTED; + break; + + } /* handle Ioctls */ + + return l_Status; +} + +NTSTATUS +ConfIoctlFast( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PIRP pi_pIrp + ) +{ + NTSTATUS l_Status; + PIO_STACK_LOCATION l_pIrpStack; + + /* get pointer to IRP stack */ + l_pIrpStack = IoGetCurrentIrpStackLocation (pi_pIrp); + + switch (l_pIrpStack->Parameters.DeviceIoControl.IoControlCode) + { /* handle Ioctls */ + + case UDLL_MAKE_IOCTL_BUF(WIN_PCICONF_READ4): + /* call_result_t PciConfRead4( HANDLE h, DWORD offset, DWORD * p_data ) */ + PCICONF_READ( pi_pMdDevContext, + *(PULONG)pi_pIrp->AssociatedIrp.SystemBuffer, + (PULONG)pi_pIrp->AssociatedIrp.SystemBuffer, + l_Status ); + if (NT_SUCCESS(l_Status)) + pi_pIrp->IoStatus.Information = sizeof(ULONG); + else + pi_pIrp->IoStatus.Information = 0; + break; + + case UDLL_MAKE_IOCTL_BUF(WIN_PCICONF_WRITE4): + /* call_result_t PciConfWrite4( HANDLE h, PPCICONF_WRITE4_T params ) */ + PCICONF_WRITE( pi_pMdDevContext, + *((PULONG)pi_pIrp->AssociatedIrp.SystemBuffer + 0), + *((PULONG)pi_pIrp->AssociatedIrp.SystemBuffer + 1), + l_Status ); + pi_pIrp->IoStatus.Information = 0; + break; + + case UDLL_MAKE_IOCTL(WIN_SOFT_RESET): + #ifdef __i386__ + l_Status = PciReset( pi_pMdDevContext ); + #endif + pi_pIrp->IoStatus.Information = 0; + break; + + default: + MdKdPrint( DBGLVL_DEFAULT,("(ConfIoctl) Unsupported Ioctl 0x%x\n", + l_pIrpStack->Parameters.DeviceIoControl.IoControlCode)); + pi_pIrp->IoStatus.Information = 0; + l_Status = STATUS_NOT_IMPLEMENTED; + break; + + } /* handle Ioctls */ + + return l_Status; +} + +void SetPciMasterBit( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext +) +{ + ULONG l_Data; + ULONG l_Offset = 4; + + PCICONF_DWORD_READ( pi_pMdDevContext, &l_Data, l_Offset ); + MdKdPrint( DBGLVL_LOW,("(SetPciMasterBit) CmdStatus Reg 0x%x\n",l_Data)); + l_Data |= 4; + PCICONF_DWORD_WRITE( pi_pMdDevContext, l_Data, l_Offset ); + PCICONF_DWORD_READ( pi_pMdDevContext, &l_Data, l_Offset ); + MdKdPrint( DBGLVL_LOW,("(SetPciMasterBit) CmdStatus Reg 0x%x\n",l_Data)); +} + +NTSTATUS MdMosalHelper( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN int cmd +) +{ + NTSTATUS l_Status = STATUS_NOT_IMPLEMENTED; + switch (cmd) { + case MD_HELPER_CARD_RESET: +#ifdef __i386__ + //l_Status = PciReset( pi_pMdDevContext ); +#endif + break; + default: + break; + } + return l_Status; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.h new file mode 100644 index 00000000..3fddba49 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConf.h @@ -0,0 +1,98 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_CONF_H_ +#define _MD_CONF_H_ + +NTSTATUS ReadBars(PMD_DEV_CONTEXT_T pi_pMdDevContext); + +NTSTATUS ConfIoctl( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PMD_PCS_CONTEXT_T pi_pPcsContext, + IN ULONG pi_nIoControlCode, + IN PVOID pi_pInBuffer, + IN ULONG pi_nInBufLength, + IN PVOID pi_pOutBuffer, + IN ULONG pi_nOutBufLength, + OUT PULONG po_pnBytes + ); + +NTSTATUS +ConfIoctlFast( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PIRP pi_pIrp + ); + +NTSTATUS +PciIfOpen( + IN PDEVICE_OBJECT pi_pFdo, + IN PDEVICE_OBJECT pi_pLdo, + IN PBUS_INTERFACE_STANDARD pi_pInterface + ); + +PciIfClose( + IN PBUS_INTERFACE_STANDARD pi_pInterface + ); + +void SetPciMasterBit( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext +); + +NTSTATUS +PciReset( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ); + +NTSTATUS +PciHdrSave( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ); + +NTSTATUS +PciFixCmdReg( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ); + +NTSTATUS +DrvReadWritePciConfig( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PVOID pi_pDataBuffer, + IN ULONG pi_nPciSpaceOffset, + IN ULONG pi_nDataLength, + IN BOOLEAN pi_fReadConfig + ); + +NTSTATUS MdMosalHelper( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN int cmd +); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConfPriv.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConfPriv.h new file mode 100644 index 00000000..93dc0375 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdConfPriv.h @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_CONF_PRIV_H_ +#define _MD_CONF_PRIV_H_ + +#include +#include +/* #include */ +typedef u_int32_t tavor_offset_t; + +static struct Tavor_st* const Tavor = (struct Tavor_st*)0; + +#define TAVOR_BIT_OFFSET(reg_path) \ + ((tavor_offset_t)(MT_ulong_ptr_t)&(reg_path )) + +/************************************************* +*returned offset is in bytes align to word.....!!!!!!!!!!!!!! +*************************************************/ +#define BYTE_OFFSET_A(reg_path) \ + ((tavor_offset_t)((TAVOR_BIT_OFFSET(reg_path)>>5)<<2)) + + +#define PCICONF_DWORD_READ(dev,buf,offset) DrvReadWritePciConfig(dev,buf,(offset),sizeof(long), TRUE ) +#define PCICONF_DWORD_WRITE(dev,buf,offset) DrvReadWritePciConfig(dev,&buf,(offset),sizeof(long), FALSE ) + +#define PCICONF_SET_ADDR(dev,buf) PCICONF_DWORD_WRITE(dev, (buf), dev->m_ulAddrOffset) +#define PCICONF_DATA_READ(dev,buf) PCICONF_DWORD_READ(dev, (buf), dev->m_ulDataOffset) +#define PCICONF_DATA_WRITE(dev,buf) PCICONF_DWORD_WRITE(dev, (buf), dev->m_ulDataOffset) + +#define PCICONF_READ(dev,off,buf,rc) \ + { \ + rc = PCICONF_SET_ADDR( (dev), (off) ); \ + if (NT_SUCCESS(rc)) \ + rc = PCICONF_DATA_READ((dev), (buf) ); \ + } + +#define PCICONF_WRITE(dev,off,buf,rc) \ + { \ + rc = PCICONF_SET_ADDR( (dev), (off) ); \ + if (NT_SUCCESS(rc)) \ + rc = PCICONF_DATA_WRITE((dev), (buf) ); \ + } + +typedef struct { + ULONG WhichSpace; + PVOID Buffer; + ULONG Offset; + ULONG POINTER_ALIGNMENT Length; +} DrvReadWriteConfig_t, *PDrvReadWriteConfig_t; + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.c new file mode 100644 index 00000000..dafe42f8 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "MdGen.h" +#include "MdIoctlSpec.h" + +call_result_t MOSAL_ioctl(int ops, void *pi, u_int32_t pi_sz, void *po, u_int32_t po_sz, u_int32_t* ret_po_sz_p ); +VIP_ret_t VIPKL_ioctl(int ops, void* rsct_arr, void *pi, u_int32_t isz, void *po, + u_int32_t osz, u_int32_t* bs_p ); +int IB_MGT_sys(int ops, void *proc_state_p, void *pi, u_int32_t pi_sz, + void *po, u_int32_t po_sz, u_int32_t* ret_po_sz_p ); +call_result_t MOSAL_manual_wrapper( void *res_p, int cmd, void *pi, void * po); + + + + +NTSTATUS CtlIoctl( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PMD_PCS_CONTEXT_T pi_pPcsContext, + IN ULONG pi_nIoControlCode, + IN PVOID pi_pInBuffer, + IN ULONG pi_nInBufLength, + IN PVOID pi_pOutBuffer, + IN ULONG pi_nOutBufLength, + OUT PULONG po_pnBytes + ) +{ + NTSTATUS l_Status; + PULONG l_BufSize; + PCTL_ENUM_T l_pBuf; + int cmd = UDLL_MAKE_FUNC(pi_nIoControlCode); + + *po_pnBytes = 0; + + /* handle MOSAL requests */ + if (cmd >= MOSAL_FUNC_BASE && cmd < MOSAL_FUNC_MANUAL) + { /* call MOSAL wrapper */ + l_Status = (NTSTATUS)MOSAL_ioctl(cmd, pi_pInBuffer, pi_nInBufLength, + pi_pOutBuffer, pi_nOutBufLength, po_pnBytes); + if (!l_Status) + *po_pnBytes = pi_nOutBufLength; + return l_Status; + } /* call MOSAL wrapper */ + + if (cmd >= MOSAL_FUNC_MANUAL && cmd < MOSAL_FUNC_END) + { /* call MOSAL manual wrapper */ + l_Status = (NTSTATUS)MOSAL_manual_wrapper(pi_pPcsContext->m_hMosal, cmd, pi_pInBuffer, pi_pOutBuffer); + if (!l_Status) + *po_pnBytes = pi_nOutBufLength; + return l_Status; + } /* call MOSAL manual wrapper */ + + /* handle VAPI requests */ + if (cmd >= VAPI_FUNC_BASE && cmd < VAPI_FUNC_END) + { /* call VAPI wrapper */ + return VIPKL_ioctl(cmd, pi_pPcsContext->m_hVipkl, pi_pInBuffer, pi_nInBufLength, + pi_pOutBuffer, pi_nOutBufLength, po_pnBytes ); + } /* call VAPI wrapper */ + + /* handle IB_MGT requests */ + if (cmd >= IBMGT_FUNC_BASE && cmd < IBMGT_FUNC_END) + { /* call IB_MGT wrapper */ + return IB_MGT_sys(cmd, pi_pPcsContext->m_hIbMgt, pi_pInBuffer, pi_nInBufLength, + pi_pOutBuffer, pi_nOutBufLength, po_pnBytes ); + } /* call IB_MGT wrapper */ + + switch (pi_nIoControlCode) + { /* handle Ioctls */ + + case UDLL_MAKE_IOCTL(WIN_CTL_ENUM): + /* call_result_t CtlEnum( HANDLE h, DWORD data_size, PCTL_ENUM_T p_data ) */ + l_BufSize = (PULONG)pi_pInBuffer; + l_pBuf = (PCTL_ENUM_T)pi_pOutBuffer; + l_pBuf->size = g_pDrvContext->m_DevNamesDbSize; + if (*l_BufSize < l_pBuf->size) { + MdKdPrint( DBGLVL_DEFAULT,("(CtlIoctl) Buffer too small: given %d, req-d %d\n", + *l_BufSize, l_pBuf->size)); + *po_pnBytes = sizeof(ULONG); + l_Status = STATUS_BUFFER_TOO_SMALL; + } + else { + RtlCopyMemory( &l_pBuf->data[0], &g_pDrvContext->m_DevNamesDb[0], l_pBuf->size ); + l_pBuf->cnt = g_pDrvContext->m_DevNamesDbCnt; + *po_pnBytes = sizeof(ULONG) + sizeof(ULONG) + l_pBuf->size; + l_Status = STATUS_SUCCESS; + } + break; + + default: + MdKdPrint( DBGLVL_DEFAULT,("(CtlIoctl) Unsupported Ioctl 0x%x\n", pi_nIoControlCode)); + l_Status = STATUS_NOT_IMPLEMENTED; + break; + + } /* handle Ioctls */ + + return l_Status; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.h new file mode 100644 index 00000000..620d25f2 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdCtl.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_CTL_H_ +#define _MD_CTL_H_ + +NTSTATUS CtlIoctl( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PMD_PCS_CONTEXT_T pi_pPcsContext, + IN ULONG pi_nIoControlCode, + IN PVOID pi_pInBuffer, + IN ULONG pi_nInBufLength, + IN PVOID pi_pOutBuffer, + IN ULONG pi_nOutBufLength, + OUT PULONG po_pnBytes + ); + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.c new file mode 100644 index 00000000..ea227f0c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.c @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if DBG + +#include "MdGen.h" + + +// begin, data/code used only in DBG build +/* global data */ +MD_DBG_DATA_T g_DbgData = { 0, 0 }; +PMD_DBG_DATA_T g_pDbgData = &g_DbgData; + +/*------------------------------------------------------------------------------------------------------*/ + +PVOID +MdExAllocatePool( + IN POOL_TYPE PoolType, + IN ULONG NumberOfBytes + ) +{ + g_pDbgData->m_nExAllocCount++; + MdKdPrint( DBGLVL_HIGH,("MdExAllocatePool() nExAllocCount = %d\n", g_pDbgData->m_nExAllocCount )); + return ExAllocatePool( PoolType, NumberOfBytes ); + +} + +/*------------------------------------------------------------------------------------------------------*/ + + +VOID +MdExFreePool( + IN PVOID p + ) +{ + g_pDbgData->m_nExAllocCount--; + MdKdPrint( DBGLVL_HIGH,("MdExFreePool() nExAllocCount = %d\n", g_pDbgData->m_nExAllocCount )); + ExFreePool( p ); +} + + +#endif // end , if DBG + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.h new file mode 100644 index 00000000..6f972744 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdDbg.h @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_DBG_H_ +#define _MD_DBG_H_ + + +#if DBG + + +/* The above will print if g_pDrvContext->m_nDebugPrintLevel >= DBGLVL_MEDIUM */ + +#define DBGLVL_ALWAYS 0 /* always printed */ +#define DBGLVL_MINIMUM 1 /* minimum verbosity */ +#define DBGLVL_LOW 2 /* medium verbosity */ +#define DBGLVL_MEDIUM 3 /* medium verbosity */ +#define DBGLVL_HIGH 4 /* highest 'safe' level (without severely affecting timing ) */ +#define DBGLVL_MAXIMUM 5 /* maximum level, may be dangerous */ +#define DBGLVL_DEFAULT DBGLVL_LOW /* default verbosity level if no registry override */ + + + +#ifndef DBGSTR_PREFIX +#define DBGSTR_PREFIX "Mdt: " +#endif + + + + +#define DPRINT DbgPrint + +#define TRAP() DbgBreakPoint(); + + +#define MdDBGOUTSIZE 512 + + +typedef struct MD_DBG_DATA_S { + + /* mirrors device extension pending io count */ + ULONG m_nPendingIoCount; + + /* allocations counter */ + ULONG m_nExAllocCount; + +} MD_DBG_DATA_T, *PMD_DBG_DATA_T; + +/* these declared in debug 'c' file */ +extern PMD_DBG_DATA_T g_pDbgData; + + +static const PCHAR szIrpMajFuncDesc[] = +{ /* note this depends on corresponding values to the indexes in wdm.h */ + "IRP_MJ_CREATE", + "IRP_MJ_CREATE_NAMED_PIPE", + "IRP_MJ_CLOSE", + "IRP_MJ_READ", + "IRP_MJ_WRITE", + "IRP_MJ_QUERY_INFORMATION", + "IRP_MJ_SET_INFORMATION", + "IRP_MJ_QUERY_EA", + "IRP_MJ_SET_EA", + "IRP_MJ_FLUSH_BUFFERS", + "IRP_MJ_QUERY_VOLUME_INFORMATION", + "IRP_MJ_SET_VOLUME_INFORMATION", + "IRP_MJ_DIRECTORY_CONTROL", + "IRP_MJ_FILE_SYSTEM_CONTROL", + "IRP_MJ_DEVICE_CONTROL", + "IRP_MJ_INTERNAL_DEVICE_CONTROL", + "IRP_MJ_SHUTDOWN", + "IRP_MJ_LOCK_CONTROL", + "IRP_MJ_CLEANUP", + "IRP_MJ_CREATE_MAILSLOT", + "IRP_MJ_QUERY_SECURITY", + "IRP_MJ_SET_SECURITY", + "IRP_MJ_POWER", + "IRP_MJ_SYSTEM_CONTROL", + "IRP_MJ_DEVICE_CHANGE", + "IRP_MJ_QUERY_QUOTA", + "IRP_MJ_SET_QUOTA", + "IRP_MJ_PNP" +}; +/* IRP_MJ_MAXIMUM_FUNCTION defined in wdm.h */ + + +static const PCHAR szPnpMnFuncDesc[] = +{ /* note this depends on corresponding values to the indexes in wdm.h */ + + "IRP_MN_START_DEVICE", + "IRP_MN_QUERY_REMOVE_DEVICE", + "IRP_MN_REMOVE_DEVICE", + "IRP_MN_CANCEL_REMOVE_DEVICE", + "IRP_MN_STOP_DEVICE", + "IRP_MN_QUERY_STOP_DEVICE", + "IRP_MN_CANCEL_STOP_DEVICE", + "IRP_MN_QUERY_DEVICE_RELATIONS", + "IRP_MN_QUERY_INTERFACE", + "IRP_MN_QUERY_CAPABILITIES", + "IRP_MN_QUERY_RESOURCES", + "IRP_MN_QUERY_RESOURCE_REQUIREMENTS", + "IRP_MN_QUERY_DEVICE_TEXT", + "IRP_MN_FILTER_RESOURCE_REQUIREMENTS", + "IRP_MN_??? Unsupported code 0x0E", + "IRP_MN_READ_CONFIG", + "IRP_MN_WRITE_CONFIG", + "IRP_MN_EJECT", + "IRP_MN_SET_LOCK", + "IRP_MN_QUERY_ID", + "IRP_MN_QUERY_PNP_DEVICE_STATE", + "IRP_MN_QUERY_BUS_INFORMATION", + "IRP_MN_DEVICE_USAGE_NOTIFICATION", + "IRP_MN_SURPRISE_REMOVAL", + "IRP_MN_??? Unsupported code 0x18", + "IRP_MN_??? Unsupported code 0x19" +}; + + +#define IRP_PNP_MN_FUNCMAX IRP_MN_SURPRISE_REMOVAL + + + +static const PCHAR szSystemPowerState[] = +{ + "PowerSystemUnspecified", + "PowerSystemWorking", + "PowerSystemSleeping1", + "PowerSystemSleeping2", + "PowerSystemSleeping3", + "PowerSystemHibernate", + "PowerSystemShutdown", + "PowerSystemMaximum" +}; + +static const PCHAR szDevicePowerState[] = +{ + "PowerDeviceUnspecified", + "PowerDeviceD0", + "PowerDeviceD1", + "PowerDeviceD2", + "PowerDeviceD3", + "PowerDeviceMaximum" +}; + + + + +#define MdKdPrintCond( ilev, cond, _x_) \ + if(( g_pDrvContext && ilev <= g_pDrvContext->m_nDebugPrintLevel ) && ( cond )) { \ + DPRINT( DBGSTR_PREFIX ); \ + DPRINT _x_ ; \ + } + + + +#define MdKdPrint( ilev, _x_) MdKdPrintCond( ilev, TRUE, _x_ ) + + +#define MdTrapCond( ilev, cond ) if (( ilev <= g_pDrvContext->m_nDebugPrintLevel ) && (cond) ) TRAP() +#define MdTrap( ilev ) MdTrapCond( ilev, TRUE ) + + +#define MDASSERT( cond ) ASSERT( cond ) + +#define MdStringForDevState( devState ) szDevicePowerState[ devState ] + +#define MdStringForSysState( sysState ) szSystemPowerState[ sysState ] + +#define MdStringForPnpMnFunc( mnfunc ) szPnpMnFuncDesc[ mnfunc ] + +#define MdStringForIrpMjFunc( mjfunc ) szIrpMajFuncDesc[ mjfunc ] + +PVOID + MdExAllocatePool( + IN POOL_TYPE PoolType, + IN ULONG NumberOfBytes + ); + + +VOID + MdExFreePool( + IN PVOID p + ); + + + +#else /* if not DBG */ + +/* dummy definitions that go away in the retail build */ + +#define MdKdPrintCond( ilev, cond, _x_) +#define MdKdPrint( ilev, _x_) +#define MdTrapCond( ilev, cond ) +#define MdTrap( ilev ) +#define MDASSERT( cond ) +#define MdStringForDevState( devState ) +#define MdStringForSysState( sysState ) +#define MdStringForPnpMnFunc( mnfunc ) +#define MdStringForIrpMjFunc( mjfunc ) + +#define MdExAllocatePool( typ, siz ) ExAllocatePool( typ, siz ) +#define MdExFreePool( p ) ExFreePool( p ) + + +#endif /* DBG */ + +#endif /* included */ + + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGen.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGen.h new file mode 100644 index 00000000..340d1b69 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGen.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_GEN_H_ +#define _MD_GEN_H_ + +/* system files */ +#include +#include +#include +#include + +/* driver files */ +#include "MdCard.h" +#include "MdDbg.h" +#include "MdUtil.h" +#include "MdIoctl.h" +#include "MdMsg.h" +#include "MdConf.h" +#include "MdCtl.h" +#include "MdPci.h" + +/* DLL API header files */ +#include "mosal.h" +#include "infinihost.h" + +/* macros */ +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGuid.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGuid.h new file mode 100644 index 00000000..5d6827a5 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdGuid.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef GUID829H_INC +#define GUID829H_INC + +//#include + + +/* {FF836FB3-142B-4f52-8404-40F59CCBC8BD} */ +DEFINE_GUID(GUID_CLASS_GAMLA_CARD, +0xff836fb3, 0x142b, 0x4f52, 0x84, 0x4, 0x40, 0xf5, 0x9c, 0xcb, 0xc8, 0xbd); + +#endif /* end, #ifndef GUID829H_INC */ + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdIoctl.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdIoctl.c new file mode 100644 index 00000000..ea967bd3 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdIoctl.c @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#undef BYTE_OFFSET +//#include +//#include "mtib.h" + + +NTSTATUS +MdProcessIoctl( + IN PDEVICE_OBJECT pi_pDeviceObject, + IN PIRP pi_pIrp + ) +/*++ + +Routine Description: + + Dispatch table handler for IRP_MJ_DEVICE_CONTROL; + Handle DeviceIoControl() calls from User mode + + +Arguments: + + DeviceObject - pointer to the FDO for this instance of the 82930 device. + + +Return Value: + + NT status code + +--*/ +{ /* MdProcessIoctl */ + + NTSTATUS l_Status; + PIO_STACK_LOCATION l_pIrpStack; + PVOID l_pInBuffer; + PVOID l_pOutBuffer; + ULONG l_nInBufLength; + ULONG l_nOutBufLength; + PMD_DEV_CONTEXT_T l_pMdDevContext = (PMD_DEV_CONTEXT_T)pi_pDeviceObject->DeviceExtension; + ULONG l_nIoControlCode; + PCHAR l_pTmp; + PFILE_OBJECT l_pFileObject; + PMD_PCS_CONTEXT_T l_pPcs; + + /* register ioctl */ + MdKdPrint( DBGLVL_MAXIMUM,("(MdProcessIoctl) Enter\n")); + MdIncrementIoCount(l_pMdDevContext); + + /* handle service call */ + if ( l_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_SD ) { + KSEM_ACQ(&l_pMdDevContext->m_Sem); + l_Status = PciIoctlFast( l_pMdDevContext, pi_pIrp ); + KSEM_REL(&l_pMdDevContext->m_Sem); + goto Done; + } + if (l_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR_BD) { + KSEM_ACQ(&l_pMdDevContext->m_Sem); + l_Status = ConfIoctlFast( l_pMdDevContext, pi_pIrp ); + KSEM_REL(&l_pMdDevContext->m_Sem); + goto Done; + } + + /* handle functional call */ + + /* set default status */ + pi_pIrp->IoStatus.Status = STATUS_SUCCESS; + pi_pIrp->IoStatus.Information = 0; + + // Can't accept a new io request if: + // 1) device is removed, + // 2) has never been started, + // 3) is stopped, + // 4) has a remove request pending, + // 5) has a stop device pending + if ( !MdCanAcceptIoRequests( pi_pDeviceObject ) ) + { /* the request can't be accepted */ + + l_Status = STATUS_DELETE_PENDING; + goto Done; + + } /* the request can't be accepted */ + + /* get pointer to IRP stack */ + l_pIrpStack = IoGetCurrentIrpStackLocation (pi_pIrp); + + /* get file handle, serving as Process Id */ + l_pFileObject = l_pIrpStack->FileObject; + l_pPcs = (PMD_PCS_CONTEXT_T) l_pFileObject->FsContext; + MdKdPrint( DBGLVL_HIGH,("(RunRequest) File Object 0x%x, Pid 0x%x\n", l_pFileObject, MOSAL_getpid() )); + + // get pointers and lengths of the caller's (user's) IO buffer + l_nOutBufLength = l_pIrpStack->Parameters.DeviceIoControl.OutputBufferLength; + l_pOutBuffer = l_nOutBufLength ? MmGetSystemAddressForMdlSafe(pi_pIrp->MdlAddress, HighPagePriority ) : NULL; + l_nInBufLength = l_pIrpStack->Parameters.DeviceIoControl.InputBufferLength; + l_pInBuffer = l_nInBufLength ? pi_pIrp->AssociatedIrp.SystemBuffer : NULL; + l_nIoControlCode = l_pIrpStack->Parameters.DeviceIoControl.IoControlCode; + l_pTmp = l_pInBuffer; + + + // + // Handle Ioctls from User mode + // + + switch (l_pMdDevContext->m_eDevType) + { /* handle Ioctls */ + case MD_DEV_IX_TAVOR_BD: + l_Status = ConfIoctl( l_pMdDevContext, l_pPcs, l_nIoControlCode, l_pInBuffer, + l_nInBufLength, l_pOutBuffer, l_nOutBufLength, (PULONG)&pi_pIrp->IoStatus.Information ); + break; + + case MD_DEV_IX_TAVOR_SD: + case MD_DEV_IX_TAVOR: + case MD_DEV_IX_ARBEL_TM: + l_Status = PciIoctl( l_pMdDevContext, l_pPcs, l_nIoControlCode, l_pInBuffer, + l_nInBufLength, l_pOutBuffer, l_nOutBufLength, (PULONG)&pi_pIrp->IoStatus.Information ); + break; + + //case MD_DEV_IX_CTRL: + // l_Status = CtlIoctl( l_pMdDevContext, l_pPcs, l_nIoControlCode, l_pInBuffer, + // l_nInBufLength, l_pOutBuffer, l_nOutBufLength, (PULONG)&pi_pIrp->IoStatus.Information ); + // break; + + default: + MdKdPrint( DBGLVL_DEFAULT,("(RunRequest) Internal error - unknown device type %d\n", l_pMdDevContext->m_eDevType)); + l_Status = STATUS_NOT_IMPLEMENTED; + break; + + } /* handle Ioctls */ + + +Done: + + /* store status */ + pi_pIrp->IoStatus.Status = l_Status; + + /* complete IRP */ + IoCompleteRequest( pi_pIrp, IO_NO_INCREMENT ); + + /* de-register ioctl */ + MdDecrementIoCount(l_pMdDevContext); + + return l_Status; + +} /* MdProcessIoctl */ + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdResetDevice( + IN PDEVICE_OBJECT DeviceObject + ) +/*++ + +Routine Description: + Checks port status; if OK, return success and do no more; + If bad, attempt reset + +Arguments: + + DeviceObject - pointer to the device object for this instance of the 82930 + device. + + +Return Value: + + NT status code + +--*/ +{ + NTSTATUS ntStatus = STATUS_SUCCESS; + + MdKdPrint(DBGLVL_MAXIMUM,("(MdResetDevice) Enter \n")); + return ntStatus; +} + + + + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.c new file mode 100644 index 00000000..763ed883 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "MdGen.h" +#include "MdConfPriv.h" +#include "MdIoctlSpec.h" + +void PciRelease( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PMD_PCS_CONTEXT_T pi_pPcsContext + ) +{ + PMDL l_pMdl; + + /* release CR */ + l_pMdl = pi_pPcsContext->m_pCrMdl; + pi_pPcsContext->m_pCrMdl = NULL; +#ifdef DO_USER_UNMAP + if (l_pMdl) + MmUnlockPages(l_pMdl); +#endif + if (l_pMdl) + IoFreeMdl( l_pMdl ); + + /* release UAR */ + l_pMdl = pi_pPcsContext->m_pUarMdl; + pi_pPcsContext->m_pUarMdl = NULL; +#ifdef DO_USER_UNMAP + if (l_pMdl) + MmUnlockPages(l_pMdl); +#endif + if (l_pMdl) + IoFreeMdl( l_pMdl ); + + /* release CR */ + l_pMdl = pi_pPcsContext->m_pDdrMdl; + pi_pPcsContext->m_pDdrMdl = NULL; +#ifdef DO_USER_UNMAP + if (l_pMdl) + MmUnlockPages(l_pMdl); +#endif + if (l_pMdl) + IoFreeMdl( l_pMdl ); + +} + +NTSTATUS PciIoctl( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PMD_PCS_CONTEXT_T pi_pPcsContext, + IN ULONG pi_nIoControlCode, + IN PVOID pi_pInBuffer, + IN ULONG pi_nInBufLength, + IN PVOID pi_pOutBuffer, + IN ULONG pi_nOutBufLength, + OUT PULONG po_pnBytes + ) +{ + NTSTATUS l_Status = STATUS_SUCCESS; + + /* prepare for error exit */ + *po_pnBytes = 0; + + switch (pi_nIoControlCode) + { /* handle Ioctls */ + + case UDLL_MAKE_IOCTL(WIN_PCI_GET_BAR_INFO): + { /* call_result_t PciGetBarInfo( HANDLE h, ULONG num, PPCICONF_BAR_INFO_T bar_p ) */ + + PULONG l_pBarNum = (PULONG)pi_pInBuffer; + PPCICONF_BAR_INFO_T l_pInfo = (PPCICONF_BAR_INFO_T)pi_pOutBuffer; + PMD_BAR_T l_pBar; + PCHAR l_pUserAddr; + PMDL l_pMdl, *l_ppSavedMdl; + + /* find bar descriptor */ + switch (*l_pBarNum) { + case 0: l_pBar = &pi_pMdDevContext->m_Cr; l_ppSavedMdl = &pi_pPcsContext->m_pCrMdl; break; + case 1: l_pBar = &pi_pMdDevContext->m_Uar; l_ppSavedMdl = &pi_pPcsContext->m_pUarMdl; break; + case 2: l_pBar = &pi_pMdDevContext->m_Ddr; l_ppSavedMdl = &pi_pPcsContext->m_pDdrMdl; break; + default: + MdKdPrint( DBGLVL_LOW,("(PciIoctl) Illegal BAR number %d\n", *l_pBarNum)); + return STATUS_INVALID_PARAMETER; + } + + /* prepare for mapping to user space */ + l_pMdl = IoAllocateMdl( l_pBar->m_pKernelAddr, l_pBar->m_ulKernelSize, FALSE,FALSE,NULL); + if (l_pMdl == NULL) { + MdKdPrint( DBGLVL_LOW,("(PciIoctl) IoAllocateMdl failed\n")); + return STATUS_INSUFFICIENT_RESOURCES; + } + MmBuildMdlForNonPagedPool(l_pMdl); /* fill MDL */ + + /* map the buffer into user space */ + l_pUserAddr = MmMapLockedPagesSpecifyCache( l_pMdl, UserMode, MmNonCached, + NULL, FALSE, NormalPagePriority ); + if (l_pUserAddr == NULL) { + MdKdPrint( DBGLVL_LOW,("(PciIoctl) MmMapLockedPagesSpecifyCache failed\n")); + IoFreeMdl( l_pMdl ); + return STATUS_UNSUCCESSFUL; + } + *l_ppSavedMdl = l_pMdl; + + /* store the results */ + l_pInfo->ptr = (MT_ulong_ptr_t)l_pUserAddr; + l_pInfo->size = l_pBar->m_ulKernelSize; + l_pInfo->LowPhysAddr = l_pBar->m_MemPhysAddr.LowPart; + l_pInfo->HighPhysAddr = l_pBar->m_MemPhysAddr.HighPart; + l_pInfo->TotalMemSize = l_pBar->m_ulMemSize; + l_pInfo->MappedSize = l_pBar->m_ulKernelSize; + l_pInfo->MappedOffset = l_pBar->m_ulKernelOffset; + *po_pnBytes = sizeof(PCICONF_BAR_INFO_T); + l_Status = STATUS_SUCCESS; + } + break; + + case UDLL_MAKE_IOCTL(WIN_SOFT_RESET): + #ifdef __i386__ + l_Status = PciReset( pi_pMdDevContext ); + #endif + break; + + default: + MdKdPrint( DBGLVL_DEFAULT,("(PciIoctl) Unsupported Ioctl 0x%x\n", pi_nIoControlCode)); + l_Status = STATUS_NOT_IMPLEMENTED; + break; + + } /* handle Ioctls */ + + return l_Status; +} + +NTSTATUS +PciDevReset( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN ULONG pi_Offset, + IN ULONG pi_Value +) +{ + PMD_BAR_T l_pBar = &pi_pMdDevContext->m_Cr; + PULONG l_pResetReg; + if (!l_pBar->m_pKernelAddr) + return STATUS_UNSUCCESSFUL; + + l_pResetReg = (PULONG)(l_pBar->m_pKernelAddr + pi_Offset); + WRITE_REGISTER_ULONG( l_pResetReg, pi_Value ); + return STATUS_SUCCESS; +} + +NTSTATUS +PciIoctlFast( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PIRP pi_pIrp + ) +{ + NTSTATUS l_Status = STATUS_SUCCESS; + PIO_STACK_LOCATION l_pIrpStack; + + /* get pointer to IRP stack */ + l_pIrpStack = IoGetCurrentIrpStackLocation (pi_pIrp); + + switch (l_pIrpStack->Parameters.DeviceIoControl.IoControlCode) + { /* handle Ioctls */ + + case UDLL_MAKE_IOCTL_BUF(WIN_PCICONF_READ4): + /* call_result_t PciConfRead4( HANDLE h, DWORD offset, DWORD * p_data ) */ + PCICONF_READ( pi_pMdDevContext, + *(PULONG)pi_pIrp->AssociatedIrp.SystemBuffer, + (PULONG)pi_pIrp->AssociatedIrp.SystemBuffer, + l_Status ); + if (NT_SUCCESS(l_Status)) + pi_pIrp->IoStatus.Information = sizeof(ULONG); + else + pi_pIrp->IoStatus.Information = 0; + break; + + case UDLL_MAKE_IOCTL_BUF(WIN_PCICONF_WRITE4): + /* call_result_t PciConfWrite4( HANDLE h, PPCICONF_WRITE4_T params ) */ + PCICONF_WRITE( pi_pMdDevContext, + *((PULONG)pi_pIrp->AssociatedIrp.SystemBuffer + 0), + *((PULONG)pi_pIrp->AssociatedIrp.SystemBuffer + 1), + l_Status ); + pi_pIrp->IoStatus.Information = 0; + break; + + case UDLL_MAKE_IOCTL(WIN_PCI_GET_BAR_INFO): + { /* call_result_t PciGetBarInfo( HANDLE h, ULONG num, PPCICONF_BAR_INFO_T bar_p ) */ + + PULONG l_pBarNum = (PULONG)pi_pIrp->AssociatedIrp.SystemBuffer; + PPCICONF_BAR_INFO_T l_pInfo = (PPCICONF_BAR_INFO_T)MmGetSystemAddressForMdlSafe(pi_pIrp->MdlAddress, HighPagePriority ); + PMD_BAR_T l_pBar; + PCHAR l_pUserAddr; + PMDL l_pMdl, *l_ppSavedMdl; + PFILE_OBJECT l_pFileObject; + PMD_PCS_CONTEXT_T l_pPcs; + + pi_pIrp->IoStatus.Information = 0; + + /* get file handle, serving as Process Id */ + l_pFileObject = l_pIrpStack->FileObject; + l_pPcs = (PMD_PCS_CONTEXT_T) l_pFileObject->FsContext; + MdKdPrint( DBGLVL_HIGH,("(PciIoctlFast) File Object 0x%x, Pid 0x%x\n", l_pFileObject, MOSAL_getpid())); + + + /* find bar descriptor */ + switch (*l_pBarNum) { + case 0: l_pBar = &pi_pMdDevContext->m_Cr; l_ppSavedMdl = &l_pPcs->m_pCrMdl; break; + case 1: l_pBar = &pi_pMdDevContext->m_Uar; l_ppSavedMdl = &l_pPcs->m_pUarMdl; break; + case 2: l_pBar = &pi_pMdDevContext->m_Ddr; l_ppSavedMdl = &l_pPcs->m_pDdrMdl; break; + default: + MdKdPrint( DBGLVL_LOW,("(PciIoctl) Illegal BAR number %d\n", *l_pBarNum)); + l_Status = STATUS_INVALID_PARAMETER; + break; + } + + /* prepare for mapping to user space */ + l_pMdl = IoAllocateMdl( l_pBar->m_pKernelAddr, l_pBar->m_ulKernelSize, FALSE,FALSE,NULL); + if (l_pMdl == NULL) { + MdKdPrint( DBGLVL_LOW,("(PciIoctl) IoAllocateMdl failed\n")); + l_Status = STATUS_INSUFFICIENT_RESOURCES; + break; + } + MmBuildMdlForNonPagedPool(l_pMdl); /* fill MDL */ + + /* map the buffer into user space */ + l_pUserAddr = MmMapLockedPagesSpecifyCache( l_pMdl, UserMode, MmNonCached, + NULL, FALSE, NormalPagePriority ); + if (l_pUserAddr == NULL) { + MdKdPrint( DBGLVL_LOW,("(PciIoctl) MmMapLockedPagesSpecifyCache failed\n")); + IoFreeMdl( l_pMdl ); + l_Status = STATUS_UNSUCCESSFUL; + break; + } + *l_ppSavedMdl = l_pMdl; + + /* store the results */ + l_pInfo->ptr = (MT_ulong_ptr_t)l_pUserAddr; + l_pInfo->size = l_pBar->m_ulKernelSize; + l_pInfo->LowPhysAddr = l_pBar->m_MemPhysAddr.LowPart; + l_pInfo->HighPhysAddr = l_pBar->m_MemPhysAddr.HighPart; + l_pInfo->TotalMemSize = l_pBar->m_ulMemSize; + l_pInfo->MappedSize = l_pBar->m_ulKernelSize; + l_pInfo->MappedOffset = l_pBar->m_ulKernelOffset; + pi_pIrp->IoStatus.Information = sizeof(PCICONF_BAR_INFO_T); + l_Status = STATUS_SUCCESS; + break; + } + + case UDLL_MAKE_IOCTL(WIN_SOFT_RESET): + #ifdef __i386__ + l_Status = PciReset( pi_pMdDevContext ); + #endif + break; + + + default: + MdKdPrint( DBGLVL_DEFAULT,("(PciIoctlFast) Unsupported Ioctl 0x%x\n", + l_pIrpStack->Parameters.DeviceIoControl.IoControlCode)); + pi_pIrp->IoStatus.Information = 0; + l_Status = STATUS_NOT_IMPLEMENTED; + break; + + } /* handle Ioctls */ + + return l_Status; +} + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.h new file mode 100644 index 00000000..2ded9024 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPci.h @@ -0,0 +1,64 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MD_PCI_H_ +#define _MD_PCI_H_ + +NTSTATUS PciIoctl( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PMD_PCS_CONTEXT_T pi_pPcsContext, + IN ULONG pi_nIoControlCode, + IN PVOID pi_pInBuffer, + IN ULONG pi_nInBufLength, + IN PVOID pi_pOutBuffer, + IN ULONG pi_nOutBufLength, + OUT PULONG po_pnBytes + ); + +void PciRelease( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PMD_PCS_CONTEXT_T pi_pPcsContext + ); + +NTSTATUS +PciDevReset( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN ULONG pi_Offset, + IN ULONG pi_Value +); + +NTSTATUS +PciIoctlFast( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PIRP pi_pIrp + ); + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPnp.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPnp.c new file mode 100644 index 00000000..2dd3c8a9 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPnp.c @@ -0,0 +1,1244 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "MdGen.h" +#include +#include "MdCard.h" +#include "mosal_util.h" + +static int IB_MGT_started = 0; +extern NTSTATUS MdMosalHelper( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN int cmd +); + +int IsIbMgtOn() { return (IB_MGT_started>0) ? 1 : 0; } +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdProcessPnPIrp( + IN PDEVICE_OBJECT pi_pFdo, + IN PIRP pi_pIrp + ) +/*++ + +Routine Description: + + Dispatch table routine for IRP_MJ_PNP. + Process the Plug and Play IRPs sent to this device. + +Arguments: + + pi_pFdo - pointer to our FDO (Functional Device Object) + + pi_pIrp - pointer to an I/O Request Packet + +Return Value: + + NT status code + +--*/ +{ + + PIO_STACK_LOCATION l_pIrpStack; + PMD_DEV_CONTEXT_T l_pMdDevContext; + NTSTATUS l_Status = STATUS_SUCCESS; + NTSTATUS l_WaitStatus; + PDEVICE_OBJECT l_pLdo; + KEVENT startDeviceEvent; + + + // + // Get a pointer to the current location in the Irp. This is where + // the function codes and parameters are located. + // + l_pIrpStack = IoGetCurrentIrpStackLocation (pi_pIrp); + + // + // Get a pointer to the device extension + // + + l_pMdDevContext = (PMD_DEV_CONTEXT_T)pi_pFdo->DeviceExtension; + l_pLdo = l_pMdDevContext->m_pLdo; + + MdKdPrint( DBGLVL_MEDIUM, ( "enter MdProcessPnPIrp() IRP_MJ_PNP, minor %s\n", + MdStringForPnpMnFunc( l_pIrpStack->MinorFunction ) )); + + // inc the FDO device extension's pending IO count for this Irp + MdIncrementIoCount(l_pMdDevContext); + + MDASSERT( IRP_MJ_PNP == l_pIrpStack->MajorFunction ); + + switch (l_pIrpStack->MinorFunction) { + case IRP_MN_START_DEVICE: + + // The PnP Manager sends this IRP after it has assigned resources, + // if any, to the device. The device may have been recently enumerated + // and is being started for the first time, or the device may be + // restarting after being stopped for resource reconfiguration. + + // Initialize an event we can wait on for the PDO to be done with this irp + KeInitializeEvent(&startDeviceEvent, NotificationEvent, FALSE); + IoCopyCurrentIrpStackLocationToNext(pi_pIrp); + + // Set a completion routine so it can signal our event when + // the PDO is done with the Irp + IoSetCompletionRoutine(pi_pIrp, + MdIrpCompletionRoutine, + &startDeviceEvent, // pass the event to the completion routine as the Context + TRUE, // invoke on success + TRUE, // invoke on error + TRUE); // invoke on cancellation + + // let the PDO process the IRP + l_Status = IoCallDriver(l_pLdo, + pi_pIrp); + + // if PDO is not done yet, wait for the event to be set in our completion routine + if (l_Status == STATUS_PENDING) { + // wait for irp to complete + + l_WaitStatus = KeWaitForSingleObject( + &startDeviceEvent, + Suspended, + KernelMode, + FALSE, + NULL); + + l_Status = pi_pIrp->IoStatus.Status; + } + + if (NT_SUCCESS(l_Status)) { + + // Now we're ready to do our own startup processing. + // USB client drivers such as us set up URBs (USB Request Packets) to send requests + // to the host controller driver (HCD). The URB structure defines a format for all + // possible commands that can be sent to a USB device. + // Here, we request the device descriptor and store it, + // and configure the device. + l_Status = MdStartDevice(pi_pFdo, pi_pIrp); + pi_pIrp->IoStatus.Status = l_Status; + } + + IoCompleteRequest (pi_pIrp, + IO_NO_INCREMENT + ); + + MdDecrementIoCount(l_pMdDevContext); + return l_Status; // end, case IRP_MN_START_DEVICE + + case IRP_MN_QUERY_STOP_DEVICE: + + // The IRP_MN_QUERY_STOP_DEVICE/IRP_MN_STOP_DEVICE sequence only occurs + // during "polite" shutdowns, such as the user explicitily requesting the + // service be stopped in, or requesting unplug from the Pnp tray icon. + // This sequence is NOT received during "impolite" shutdowns, + // such as someone suddenly yanking the USB cord or otherwise + // unexpectedly disabling/resetting the device. + + // If a driver sets STATUS_SUCCESS for this IRP, + // the driver must not start any operations on the device that + // would prevent that driver from successfully completing an IRP_MN_STOP_DEVICE + // for the device. + // For mass storage devices such as disk drives, while the device is in the + // stop-pending state,the driver holds IRPs that require access to the device, + // but for most USB devices, there is no 'persistent storage', so we will just + // refuse any more IO until restarted or the stop is cancelled + + // If a driver in the device stack determines that the device cannot be + // stopped for resource reconfiguration, the driver is not required to pass + // the IRP down the device stack. If a query-stop IRP fails, + // the PnP Manager sends an IRP_MN_CANCEL_STOP_DEVICE to the device stack, + // notifying the drivers for the device that the query has been cancelled + // and that the device will not be stopped. + + + // It is possible to receive this irp when the device has not been started + // ( as on a boot device ) + if (!l_pMdDevContext->m_DeviceStarted) { // if get when never started, just pass on + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPnPIrp() IRP_MN_QUERY_STOP_DEVICE when device not started\n")); + IoSkipCurrentIrpStackLocation (pi_pIrp); + l_Status = IoCallDriver (l_pMdDevContext->m_pLdo, pi_pIrp); + MdDecrementIoCount(l_pMdDevContext); + + return l_Status; + } + + + // fail the request if we have any IRPS in progress + if( g_pDrvContext->m_pCtlDevContext->m_nPendingIoCnt > 1) { + l_Status = STATUS_UNSUCCESSFUL; + } + else { + // We'll not veto it; pass it on and flag that stop was requested. + // Once m_StopDeviceRequested is set no new IOCTL or read/write irps will be passed + // down the stack to lower drivers; all will be quickly failed + l_pMdDevContext->m_StopDeviceRequested = TRUE; + pi_pIrp->IoStatus.Status = STATUS_SUCCESS; + } + + break; // end, case IRP_MN_QUERY_STOP_DEVICE + + case IRP_MN_CANCEL_STOP_DEVICE: + + // The PnP Manager uses this IRP to inform the drivers for a device + // that the device will not be stopped for resource reconfiguration. + // This should only be received after a successful IRP_MN_QUERY_STOP_DEVICE. + + + // It is possible to receive this irp when the device has not been started + if (!l_pMdDevContext->m_DeviceStarted) { // if get when never started, just pass on + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPnPIrp() IRP_MN_CANCEL_STOP_DEVICE when device not started\n")); + IoSkipCurrentIrpStackLocation (pi_pIrp); + l_Status = IoCallDriver (l_pMdDevContext->m_pLdo, pi_pIrp); + MdDecrementIoCount(l_pMdDevContext); + return l_Status; + } + + // Reset this flag so new IOCTL and IO Irp processing will be re-enabled + l_pMdDevContext->m_StopDeviceRequested = FALSE; + pi_pIrp->IoStatus.Status = STATUS_SUCCESS; + break; // end, case IRP_MN_CANCEL_STOP_DEVICE + + case IRP_MN_STOP_DEVICE: + + // The PnP Manager sends this IRP to stop a device so it can reconfigure + // its hardware resources. The PnP Manager only sends this IRP if a prior + // IRP_MN_QUERY_STOP_DEVICE completed successfully. + + + // Cancel any pending io requests. (there shouldn't be any) + MdCancelPendingIo( pi_pFdo ); + // + // stop the device + // + l_Status = MdStopDevice(pi_pFdo); + pi_pIrp->IoStatus.Status = l_Status; + + break; // end, case IRP_MN_STOP_DEVICE + + + + case IRP_MN_QUERY_REMOVE_DEVICE: + + // In response to this IRP, drivers indicate whether the device can be + // removed without disrupting the system. + // If a driver determines it is safe to remove the device, + // the driver completes any outstanding I/O requests, arranges to hold any subsequent + // read/write requests, and sets pi_pIrp->IoStatus.Status to STATUS_SUCCESS. Function + // and filter drivers then pass the IRP to the next-lower driver in the device stack. + // The underlying bus driver calls IoCompleteRequest. + + // If a driver sets STATUS_SUCCESS for this IRP, the driver must not start any + // operations on the device that would prevent that driver from successfully completing + // an IRP_MN_REMOVE_DEVICE for the device. If a driver in the device stack determines + // that the device cannot be removed, the driver is not required to pass the + // query-remove IRP down the device stack. If a query-remove IRP fails, the PnP Manager + // sends an IRP_MN_CANCEL_REMOVE_DEVICE to the device stack, notifying the drivers for + // the device that the query has been cancelled and that the device will not be removed. + + // It is possible to receive this irp when the device has not been started + if (!l_pMdDevContext->m_DeviceStarted) { // if get when never started, just pass on + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPnPIrp() IRP_MN_QUERY_STOP_DEVICE when device not started\n")); + IoSkipCurrentIrpStackLocation (pi_pIrp); + l_Status = IoCallDriver (l_pMdDevContext->m_pLdo, pi_pIrp); + MdDecrementIoCount(l_pMdDevContext); + + return l_Status; + } + + // Once m_RemoveDeviceRequested is set no new IOCTL or read/write irps will be passed + // down the stack to lower drivers; all will be quickly failed + l_pMdDevContext->m_RemoveDeviceRequested = TRUE; + + // Wait for any io request pending in our driver to + // complete before returning success. + // This event is set when l_pMdDevContext->PendingIoCount goes to 1 +#if 0 + l_WaitStatus = KeWaitForSingleObject( + &l_pMdDevContext->m_NoPendingIoEvent, + Suspended, + KernelMode, + FALSE, + NULL); + +#else + { // wait for all applications IRPs to all cards and all PnP and Power IRPs to this card to be finished + PVOID l_Objects[] = { &g_pDrvContext->m_pCtlDevContext->m_NoPendingIoEvent, &l_pMdDevContext->m_NoPendingIoEvent }; + + // Leo: match the inc at the begining of the dispatch routine to get signal from MdDecrementIoCount() + MdDecrementIoCount(l_pMdDevContext); + + // wait + l_WaitStatus = KeWaitForMultipleObjects( 2, l_Objects, WaitAll, Executive, KernelMode, FALSE, NULL, NULL ); + + // Leo: return the inc at the begining of the dispatch routine + MdIncrementIoCount(l_pMdDevContext); + } +#endif + + pi_pIrp->IoStatus.Status = STATUS_SUCCESS; + break; // end, case IRP_MN_QUERY_REMOVE_DEVICE + + case IRP_MN_CANCEL_REMOVE_DEVICE: + + // The PnP Manager uses this IRP to inform the drivers + // for a device that the device will not be removed. + // It is sent only after a successful IRP_MN_QUERY_REMOVE_DEVICE. + + if (!l_pMdDevContext->m_DeviceStarted) { // if get when never started, just pass on + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPnPIrp() IRP_MN_CANCEL_REMOVE_DEVICE when device not started\n")); + IoSkipCurrentIrpStackLocation (pi_pIrp); + l_Status = IoCallDriver (l_pMdDevContext->m_pLdo, pi_pIrp); + MdDecrementIoCount(l_pMdDevContext); + return l_Status; + } + + // Reset this flag so new IOCTL and IO Irp processing will be re-enabled + l_pMdDevContext->m_RemoveDeviceRequested = FALSE; + pi_pIrp->IoStatus.Status = STATUS_SUCCESS; + + break; // end, case IRP_MN_CANCEL_REMOVE_DEVICE + + case IRP_MN_SURPRISE_REMOVAL: + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPnPIrp() IRP_MN_SURPRISE_REMOVAL\n")); + + // For a surprise-style device removal ( i.e. sudden cord yank ), + // the physical device has already been removed so the PnP Manager sends + // the remove IRP without a prior query-remove. A device can be in any state + // when it receives a remove IRP as a result of a surprise-style removal. + + // match the inc at the begining of the dispatch routine + MdDecrementIoCount(l_pMdDevContext); + + // + // Once m_DeviceRemoved is set no new IOCTL or read/write irps will be passed + // down the stack to lower drivers; all will be quickly failed + // + l_pMdDevContext->m_DeviceRemoved = TRUE; + + // Cancel any pending io requests; we may not have gotten a query first! + MdCancelPendingIo( pi_pFdo ); + + // Cancels all in progress requests. + MdAbortInPgsReqs( pi_pFdo ); + + // + // Mark this handled + // + pi_pIrp->IoStatus.Status = STATUS_SUCCESS; + + // We don't explicitly wait for the below driver to complete, but just make + // the call and go on, finishing cleanup + IoCopyCurrentIrpStackLocationToNext(pi_pIrp); + + l_Status = IoCallDriver(l_pLdo, + pi_pIrp); + + return l_Status; + + case IRP_MN_REMOVE_DEVICE: + + + // The PnP Manager uses this IRP to direct drivers to remove a device. + // For a "polite" device removal, the PnP Manager sends an + // IRP_MN_QUERY_REMOVE_DEVICE prior to the remove IRP. In this case, + // the device is in the remove-pending state when the remove IRP arrives. + // For a surprise-style device removal ( i.e. sudden cord yank ), + // the physical device has already been removed and the PnP Manager may not + // have sent IRP_MN_SURPRISE_REMOVAL. A device can be in any state + // when it receives a remove IRP as a result of a surprise-style removal. + + // match the inc at the begining of the dispatch routine + MdDecrementIoCount(l_pMdDevContext); + + // + // Once m_DeviceRemoved is set no new IOCTL or read/write irps will be passed + // down the stack to lower drivers; all will be quickly failed + // + l_pMdDevContext->m_DeviceRemoved = TRUE; + + // Cancel any pending io requests; we may not have gotten a query first! + MdCancelPendingIo( pi_pFdo ); + + // Cancels all in progress requests. + MdAbortInPgsReqs( pi_pFdo ); + + // + // Delete MDDK device + // + MdStopDevice(pi_pFdo); + + // We don't explicitly wait for the below driver to complete, but just make + // the call and go on, finishing cleanup + IoCopyCurrentIrpStackLocationToNext(pi_pIrp); + + l_Status = IoCallDriver(l_pLdo, + pi_pIrp); + // + // The final decrement to device extension PendingIoCount == 0 + // will set l_pMdDevContext->m_RemoveEvent, enabling device removal. + // If there is no pending IO at this point, the below decrement will be it. + // + MdDecrementIoCount(l_pMdDevContext); + + // wait for any io request pending in our driver to + // complete for finishing the remove + +#if 0 + KeWaitForSingleObject( + &l_pMdDevContext->m_RemoveEvent, + Suspended, + KernelMode, + FALSE, + NULL); + +#else + { // wait for all applications IRPs to all cards and all PnP and Power IRPs to this card to be finished + PVOID l_Objects[] = { &g_pDrvContext->m_pCtlDevContext->m_NoPendingIoEvent, &l_pMdDevContext->m_RemoveEvent }; + + KeWaitForMultipleObjects( 2, l_Objects, WaitAll, Executive, KernelMode, FALSE, NULL, NULL ); + } +#endif + + // + // Detach the device + // + MdKdPrint( DBGLVL_DEFAULT,("MdProcessPnPIrp() Detaching from %08X\n", + l_pMdDevContext->m_pLdo)); + + IoDetachDevice(l_pMdDevContext->m_pLdo); + + // + // Delete the link and FDO we created + // + MdRemoveDevice(pi_pFdo); + + // + // if it was the last device - remove also the control device + // + if (g_pDrvContext->m_uDevNo == 1 ) + { /* it was the last IB device */ + + PMD_DEV_CONTEXT_T l_pCtlDevContext = g_pDrvContext->m_pCtlDevContext; + + KMUTEX_ACQ(&l_pCtlDevContext->m_Mutex); + if (l_pCtlDevContext->m_nOpenCount > 0) + { /* there is open handles - postpone removing */ + + // mark delete pending + l_pCtlDevContext->m_fDeletePending = TRUE; + KMUTEX_REL(&l_pCtlDevContext->m_Mutex); + + // wait for handles to be closed + KeWaitForSingleObject( + &l_pCtlDevContext->m_RemoveEvent, + Suspended, + KernelMode, + FALSE, + NULL); + + } /* there is open handles - postpone removing */ + else + KMUTEX_REL(&l_pCtlDevContext->m_Mutex); + + // remove control device + if (l_pCtlDevContext != NULL) + MdDevDeInit( l_pCtlDevContext ); + g_pDrvContext->m_pCtlDevContext = NULL; + + + } /* it was the last IB device */ + + return l_Status; // end, case IRP_MN_REMOVE_DEVICE + + case IRP_MN_QUERY_INTERFACE: + // Handle the query interface call to retrieve the HH handle for + // our HCA instance. + if( IsEqualGUID( l_pIrpStack->Parameters.QueryInterface.InterfaceType, + &GUID_MD_INTERFACE ) ) + { + if( l_pIrpStack->Parameters.QueryInterface.InterfaceSpecificData ) + { + struct _hca_if { + HH_hca_hndl_t hh_hndl; + void * kernel_crspace_addr; + ULONG kernel_crspace_size; + } *if_p = l_pIrpStack->Parameters.QueryInterface.InterfaceSpecificData; + + // Our interface. Return the HH HCA handle and other data + if_p->hh_hndl = l_pMdDevContext->m_hHhHca; + if_p->kernel_crspace_addr = l_pMdDevContext->m_Cr.m_pKernelAddr; + if_p->kernel_crspace_size = l_pMdDevContext->m_Cr.m_ulKernelSize; + + pi_pIrp->IoStatus.Status = STATUS_SUCCESS; + l_Status = STATUS_SUCCESS; + break; + } + } + // Fall through. + + default: + + // + // In this case we must not touch the status. As l_Status is + // STATUS_SUCCESS, we will skip the failure case and pass down the IRP + // untouched. + // + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPnPIrp() Minor PnP IOCTL 0x%x not handled\n", l_pIrpStack->MinorFunction)); + } /* case MinorFunction */ + + + if (!NT_SUCCESS(l_Status)) { + + // if anything went wrong, return failure without passing Irp down + pi_pIrp->IoStatus.Status = l_Status; + IoCompleteRequest (pi_pIrp, + IO_NO_INCREMENT + ); + + MdDecrementIoCount(l_pMdDevContext); + + MdKdPrint( DBGLVL_MINIMUM,("MdProcessPnPIrp() Exit MdProcessPnPIrp FAILURE %x\n", l_Status)); + return l_Status; + } + + IoCopyCurrentIrpStackLocationToNext(pi_pIrp); + + // + // All PNP_POWER messages get passed to the m_pLdo + // we were given in PnPAddDevice + // + + MdKdPrint( DBGLVL_MAXIMUM,("MdProcessPnPIrp() Passing PnP Irp down, status = %x\n", l_Status)); + + l_Status = IoCallDriver(l_pLdo, + pi_pIrp); + + MdDecrementIoCount(l_pMdDevContext); + + MdKdPrint( DBGLVL_MAXIMUM,("MdProcessPnPIrp() Exit MdProcessPnPIrp %x\n", l_Status)); + + return l_Status; +} + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdPnPAddDevice( + IN PDRIVER_OBJECT pi_pDrvObject, + IN PDEVICE_OBJECT pi_pPdo + ) +/*++ + +Routine Description: + + This routine is called to create and initialize our Functional Device Object (FDO). + +Arguments: + + pi_pDrvObject - pointer to the driver object for this instance of BulkUsb + pi_pPdo - pointer to a device object created by the bus + +Return Value: + + STATUS_SUCCESS if successful, + STATUS_UNSUCCESSFUL otherwise + +--*/ +{ + /* status */ + NTSTATUS l_Status = STATUS_SUCCESS; + /* device object we create for the added device */ + PDEVICE_OBJECT l_pFdo = NULL; + /* our context to this device */ + PMD_DEV_CONTEXT_T l_pMdDevContext; + /* index */ + ULONG l_nIx; + /* The device Win32 name as unicode string */ + UNICODE_STRING l_usNtDeviceName = { 0 , 0 , NULL }; + /* The Dos device name as unicde string */ + UNICODE_STRING l_usDosDeviceName = { 0 , 0 , NULL }; + // size of the returned DevId string + ULONG l_DevIdWstrSize; + // size of the returned DevId string + ULONG l_DevIdWstrSizeReqd; + /* buffer for DevId string */ + WCHAR l_DevIdWstr[5]; + /* pointer to buffer for all Hardware ID strings */ + WCHAR * l_pDevIdWstr; + /* Device BD id */ + int l_DevIx; + /* Logging */ + char * l_pReason = "Unknown"; + char l_Buf[120]; + + /* debug print */ + MdKdPrint( DBGLVL_DEFAULT,("(MdPnPAddDevice) enter \n")); + + do + { /* find out the type of the device */ + + + // + // get the Device ID + // + l_DevIdWstrSize = 512; + while (1) + { /* get device ID */ + + /* allocate buffer */ + l_pDevIdWstr = (WCHAR *)MdExAllocatePool( PagedPool , l_DevIdWstrSize); + if (l_pDevIdWstr == NULL) + { /* error */ + MdKdPrint( DBGLVL_LOW,("(MdPnPAddDevice) MdExAllocatePool failed req-d size %d \n", + l_DevIdWstrSize )); + sprintf( l_Buf, "MdExAllocatePool failed req-d size %d \n", l_DevIdWstrSize); + l_pReason = l_Buf; + l_Status = STATUS_INSUFFICIENT_RESOURCES; + goto err; + } /* error */ + + /* Get the device Hardware ID */ + l_Status = IoGetDeviceProperty(pi_pPdo, DevicePropertyHardwareID , + l_DevIdWstrSize, (void*)l_pDevIdWstr, &l_DevIdWstrSizeReqd); + + if (NT_SUCCESS(l_Status)) + { /* suceeded - get Device ID */ + + /* print the first string */ + MdKdPrint( DBGLVL_LOW,("(MdPnPAddDevice) first Device HW ID %ws, total size %d \n", + l_pDevIdWstr, l_DevIdWstrSizeReqd)); + + /* extract DevId */ + memcpy( l_DevIdWstr, l_pDevIdWstr + 17, 8 ); + l_DevIdWstr[4] = 0; + + /* free all the buffer */ + MdExFreePool(l_pDevIdWstr); + break; + + } /* suceeded - get Device ID */ + else + if (l_Status == STATUS_BUFFER_TOO_SMALL) + { /* prepare for reallocate */ + + MdExFreePool(l_pDevIdWstr); + l_DevIdWstrSize = l_DevIdWstrSizeReqd; + continue; + + } /* prepare for reallocate */ + else + { /* error */ + + MdKdPrint( DBGLVL_LOW,("(MdPnPAddDevice) getting of DevicePropertyHardwareID failed (0x%x): ID %ws, size %d, req-d size %d \n", + l_Status, sizeof(l_DevIdWstr), l_DevIdWstrSize )); + sprintf( l_Buf, "Getting of DevicePropertyHardwareID failed (0x%x): ID %ws, size %d, req-d size %d \n", + l_Status, l_pDevIdWstr, sizeof(l_DevIdWstr), l_DevIdWstrSize); + l_pReason = l_Buf; + goto err; + + } /* error */ + + } /* get device ID */ + + /* find TAVOR device */ + for (l_DevIx=0; l_DevIx<(int)MD_DEV_IX_LAST; l_DevIx++) { + + if (NULL != wcsstr( l_DevIdWstr, g_DevParams[l_DevIx].m_DevIdWstr )) + { + MdKdPrint( DBGLVL_LOW,("(MdPnPAddDevice) Found device with ID %ws ! \n", + g_DevParams[l_DevIx].m_DevIdWstr)); + break; + } + } + + /* get at the device */ + if (l_DevIx >= MD_DEV_IX_LAST) { + MdKdPrint( DBGLVL_LOW,("(MdPnPAddDevice) Failed to identify device with ID %ws ! \n", l_DevIdWstr)); + sprintf( l_Buf, "Failed to identify device with ID %ws ! \n", l_DevIdWstr); + l_pReason = l_Buf; + l_Status = STATUS_NOT_SUPPORTED; + goto err; + } + + } /* find out the type of the device */ + while (0); + + l_Status = CreateOneDevice( l_DevIx, &l_usNtDeviceName, &l_usDosDeviceName, &l_pMdDevContext); + if (!NT_SUCCESS(l_Status)) { + sprintf( l_Buf, "Failed to create device '%s' ! \n", g_DevParams[l_DevIx].m_Format); + l_pReason = l_Buf; + goto err; + } + + // store card number + l_pMdDevContext->m_uCardNo = g_pDrvContext->m_uCardNo; + + // + // remember the Physical device Object + // + l_pMdDevContext->m_pPdo = pi_pPdo; + + // + // Attach to the PDO + // + l_pFdo = l_pMdDevContext->m_pFdo; + l_pMdDevContext->m_pLdo = IoAttachDeviceToDeviceStack(l_pFdo, pi_pPdo); + + /* open direct PCI interface */ + PciIfOpen( l_pMdDevContext->m_pFdo, l_pMdDevContext->m_pLdo, &l_pMdDevContext->m_Interface ); + + /* fix command register of HCA */ + PciFixCmdReg( l_pMdDevContext ); + + /* set PCI master bit: no need for aux drivers */ + //SetPciMasterBit( l_pMdDevContext ); + + // Get a copy of the physical device's capabilities into a + // DEVICE_CAPABILITIES struct in our device extension; + // We are most interested in learning which system power states + // are to be mapped to which device power states for handling + // IRP_MJ_SET_POWER Irps. + MdQueryCapabilities(l_pMdDevContext->m_pLdo, &l_pMdDevContext->m_DeviceCapabilities); + + + // We want to determine what level to auto-powerdown to; This is the lowest + // sleeping level that is LESS than D3; + // If all are set to D3, auto powerdown/powerup will be disabled. + + l_pMdDevContext->m_PowerDownLevel = PowerDeviceUnspecified; // init to disabled + + for (l_nIx=PowerSystemSleeping1; l_nIx<= PowerSystemSleeping3; l_nIx++) + { + if ( l_pMdDevContext->m_DeviceCapabilities.DeviceState[l_nIx] < PowerDeviceD3 ) + l_pMdDevContext->m_PowerDownLevel = l_pMdDevContext->m_DeviceCapabilities.DeviceState[l_nIx]; + } + +#if DBG + { + PDEVICE_CAPABILITIES l_pCap = &l_pMdDevContext->m_DeviceCapabilities; + // + // display the device caps + // + + MdKdPrint( DBGLVL_MEDIUM,(" >>>>>> ---------- DeviceCaps -----------\n")); + MdKdPrint( DBGLVL_MEDIUM,(" Version %d, Address 0x%x, UINum 0x%x, D1Lat %d, D2Lat %d, D3Lat %d\n", + l_pCap->Version, l_pCap->Address, l_pCap->UINumber, l_pCap->D1Latency, l_pCap->D2Latency, l_pCap->D3Latency )); + MdKdPrint( DBGLVL_MEDIUM,(" DevD1 %d, DevD2 %d, Lock %d, Eject %d, Removable %d, Dock %d\n", + l_pCap->DeviceD1, l_pCap->DeviceD2, l_pCap->LockSupported, + l_pCap->EjectSupported, l_pCap->Removable, l_pCap->DockDevice )); + MdKdPrint( DBGLVL_MEDIUM,(" UniqueId %d, Silent %d, RawDevOK %d, SurpriseRmvOK %d, HwDis %d, NonDyn %d\n", + l_pCap->UniqueID, l_pCap->SilentInstall, l_pCap->RawDeviceOK, + l_pCap->SurpriseRemovalOK, l_pCap->HardwareDisabled, l_pCap->NonDynamic )); + MdKdPrint( DBGLVL_MEDIUM,(" WakeD0 %d, WakeD1 %d, WakeD2 %d, WakeD3 %d WarmEject %d\n", + l_pCap->WakeFromD0, l_pCap->WakeFromD1, l_pCap->WakeFromD2, + l_pCap->WakeFromD3, l_pCap->WarmEjectSupported )); + MdKdPrint( DBGLVL_MEDIUM,(" SystemWake = %s\n", MdStringForSysState( l_pCap->SystemWake ) )); + MdKdPrint( DBGLVL_MEDIUM,(" DeviceWake = %s\n", MdStringForDevState( l_pCap->DeviceWake) )); + + for (l_nIx=PowerSystemUnspecified; l_nIx< PowerSystemMaximum; l_nIx++) + { + + MdKdPrint( DBGLVL_MEDIUM,(" Device State Map: sysstate %s = devstate %s\n", + MdStringForSysState( l_nIx ), + MdStringForDevState( l_pCap->DeviceState[l_nIx] ) )); + + } + MdKdPrint( DBGLVL_MEDIUM,(" <<<<<<<< ---------- DeviceCaps -----------\n")); + + } + +#endif + // We keep a pending IO count ( extension->PendingIoCount ) in the device extension. + // The first increment of this count is done on adding the device. + // Subsequently, the count is incremented for each new IRP received and + // decremented when each IRP is completed or passed on. + + // Transition to 'one' therefore indicates no IO is pending and signals + // l_pMdDevContext->NoPendingIoEvent. This is needed for processing + // IRP_MN_QUERY_REMOVE_DEVICE + + // Transition to 'zero' signals an event ( l_pMdDevContext->m_RemoveEvent ) + // to enable device removal. This is used in processing for IRP_MN_REMOVE_DEVICE + // + MdIncrementIoCount(l_pMdDevContext); + + if( NT_SUCCESS( l_Status ) ) + { + NTSTATUS l_ActStat; + + // try to power down device until IO actually requested + l_ActStat = MdSelfSuspendOrActivate( l_pFdo, TRUE ); + } + + // init DPC object + //IoInitializeDpcRequest(l_pFdo, MdDpcForIsr); + + // unmark delete pending + if ( g_pDrvContext->m_pCtlDevContext ) + { + KMUTEX_ACQ(&g_pDrvContext->m_pCtlDevContext->m_Mutex); + g_pDrvContext->m_pCtlDevContext->m_fDeletePending = FALSE; + KMUTEX_REL(&g_pDrvContext->m_pCtlDevContext->m_Mutex); + } + + // mark the end of device init + l_pFdo->Flags &= ~DO_DEVICE_INITIALIZING; + + MdKdPrint( DBGLVL_DEFAULT,("(MdPnPAddDevice) exit: (%x)\n", l_Status)); + return l_Status; + +err: +#pragma warning( push ) +#pragma warning( disable:4296 ) + MdKdPrint( DBGLVL_ALWAYS ,("(MdDeviceInit) Device failed to initialize \n")); +#pragma warning( pop ) + + /* Write to event log */ + WriteEventLogEntry( g_pDrvContext->m_pDrvObject, MD_EVENT_LOG_LOAD_ERROR, + 0, l_Status, 1, l_Status ); + return STATUS_UNSUCCESSFUL; +} + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdStartDevice( + IN PDEVICE_OBJECT pi_pFdo, + IN PIRP pi_pIrp + ) +/*++ + +Routine Description: + + Called from MdProcessPnPIrp(), the dispatch routine for IRP_MJ_PNP. + Performs: + - re-read cards PCI configuration data and stores on the device context; + - add device to MDDK + +Arguments: + + pi_pFdo - pointer to FDO + pi_pIrp - pointer to IRP + +Return Value: + + NT status code + +--*/ +{ + PMD_DEV_CONTEXT_T l_pMdDevContext = (PMD_DEV_CONTEXT_T)pi_pFdo->DeviceExtension; + NTSTATUS l_Status; + PIO_STACK_LOCATION l_pIrpStack = IoGetCurrentIrpStackLocation (pi_pIrp); + int l_nDevIx; + call_result_t l_MddkStatus; + MOSAL_dev_t l_MosalDevParams; + /* Logging */ + char * l_pReason = "Unknown"; + char l_Buf[120]; + ULONG l_EventCode = MD_EVENT_LOG_LOAD_ERROR; + card_hw_props_t l_HwProps; + + MdKdPrint( DBGLVL_DEFAULT,("(MdStartDevice) enter \n")); + + // init PCI card resources + l_Status = MdInitPciCfgCard( l_pMdDevContext, + l_pIrpStack->Parameters.StartDevice.AllocatedResources, + l_pIrpStack->Parameters.StartDevice.AllocatedResourcesTranslated ); + + // check the results + if (!NT_SUCCESS(l_Status)) { + l_pReason = "Read Pci Card Configuration failed"; + goto err; + } + +#ifdef __i386__ + /* save PCI header */ + PciHdrSave( l_pMdDevContext ); + + /* Reset the card */ + if (l_pMdDevContext->m_PerformReset == 1 && l_pMdDevContext->m_fMayReset) + PciReset( l_pMdDevContext ); +#endif + + /* + * add device to MOSAL + */ + + /* fill specific params */ + memset( &l_MosalDevParams, 0, sizeof(MOSAL_dev_t) ); + l_MosalDevParams.irq_num = (MOSAL_IRQ_ID_t)l_pMdDevContext->m_ulIntVector; + l_MosalDevParams.irql = l_pMdDevContext->m_ulIntLevel; + l_MosalDevParams.affinity = l_pMdDevContext->m_Affinity; + l_MosalDevParams.int_shared = l_pMdDevContext->m_fIntShared; + l_MosalDevParams.int_mode = l_pMdDevContext->m_IntMode; + l_MosalDevParams.fdo_p = l_pMdDevContext->m_pFdo; + l_MosalDevParams.ldo_p = l_pMdDevContext->m_pLdo; + l_MosalDevParams.bus = (u_int8_t)l_pMdDevContext->m_BusNumber; + l_MosalDevParams.dev_func = (u_int8_t)((l_pMdDevContext->m_DevNumber << 3) | l_pMdDevContext->m_Function); + strcpy( l_MosalDevParams.name, l_pMdDevContext->m_AsciiDevName ); + memcpy( (PVOID)&l_MosalDevParams.m_Cr, (PVOID)&l_pMdDevContext->m_Cr, sizeof(MD_BAR_T)); + memcpy( (PVOID)&l_MosalDevParams.m_Uar, (PVOID)&l_pMdDevContext->m_Uar, sizeof(MD_BAR_T)); + memcpy( (PVOID)&l_MosalDevParams.m_Ddr, (PVOID)&l_pMdDevContext->m_Ddr, sizeof(MD_BAR_T)); + l_MosalDevParams.drv_helper = (void*)MdMosalHelper; + l_MosalDevParams.drv_helper_ctx = (void*)l_pMdDevContext; + + /* add specific parameters to MOSAL */ + l_MddkStatus = MOSAL_add_device( &l_pMdDevContext->m_hMosal, &l_MosalDevParams ); + + /* find the device parameters table by PCI configuration Device ID */ + l_pMdDevContext->m_pMdhalParams = NULL; + for (l_nDevIx=0; l_nDevIx < MD_DEV_IX_LAST; l_nDevIx++) + { /* find the type */ + + if (g_DevParams[l_nDevIx].m_DevIx == l_pMdDevContext->m_eDevType) + { /* found the device type */ + l_pMdDevContext->m_pMdhalParams = &g_DevParams[l_nDevIx]; + break; + } /* found the device type */ + + } /* find the type */ + + if ( l_pMdDevContext->m_pMdhalParams == NULL ) { + l_Status = STATUS_NOT_SUPPORTED; + goto err; + } + + /* + * add device to TAVOR + */ + + if ((l_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR || + l_pMdDevContext->m_eDevType == MD_DEV_IX_ARBEL_TM) + && g_pDrvContext->m_fSupportTavor) + { /* add HCA to Tavor functional DLLs */ + + unsigned char l_RevId = 0; + char l_IntPin = 1; + HH_ret_t l_HhRet; + VAPI_ret_t l_VapiRet; + VAPI_hca_hndl_t l_hVapiHca; + HH_hca_hndl_t l_hHhHca; + + /* get hw revision id */ + DrvReadWritePciConfig( l_pMdDevContext, + &l_RevId, FIELD_OFFSET( PCI_COMMON_CONFIG, RevisionID ), + sizeof(l_RevId), IRP_MN_READ_CONFIG ); + + /* get interrupt pin (Who, the hell, needs it ?) */ + DrvReadWritePciConfig( l_pMdDevContext, + &l_IntPin, + FIELD_OFFSET( PCI_COMMON_CONFIG, u ) + + FIELD_OFFSET( struct _PCI_HEADER_TYPE_0, InterruptPin ), + sizeof(l_IntPin), IRP_MN_READ_CONFIG ); + + /* add HCA */ + l_pMdDevContext->m_hHca = VAPI_INVAL_HNDL; + l_pMdDevContext->m_hHhHca = (HH_hca_hndl_t)NULL; + l_HwProps.bus = (u_int8_t)l_pMdDevContext->m_BusNumber; + l_HwProps.dev_func = (u_int8_t)((l_pMdDevContext->m_DevNumber << 3) | l_pMdDevContext->m_Function); + l_HwProps.device_id = g_DevParams[l_pMdDevContext->m_eDevType].m_DevId; + l_HwProps.pci_vendor_id = MLX_VENDOR_ID; + l_HwProps.hw_ver = l_RevId; + l_HwProps.cr_base = (MT_phys_addr_t)l_pMdDevContext->m_Cr.m_MemPhysAddr.QuadPart & 0xFFF00000; + l_HwProps.uar_base = (MT_phys_addr_t)l_pMdDevContext->m_Uar.m_MemPhysAddr.QuadPart & 0xFFF00000; + l_HwProps.ddr_base = (MT_phys_addr_t)l_pMdDevContext->m_Ddr.m_MemPhysAddr.QuadPart & 0xFFF00000; + l_HwProps.interrupt_props.irq = (MOSAL_IRQ_ID_t)l_pMdDevContext->m_ulIntVector; + l_HwProps.interrupt_props.intr_pin = l_IntPin; + + l_HhRet = THH_add_hca( l_pMdDevContext->m_uCardNo-1, &l_HwProps, &l_hHhHca ); + if (l_HhRet != MT_OK) { + l_Status = l_HhRet; + l_pReason = "THH_add_hca failed. Check FW"; + l_EventCode = MD_EVENT_LOG_LOAD_ERROR_FW; + goto err; + } + + /* open HCA */ + // l_VapiRet = VAPI_open_hca(l_pMdDevContext->m_AsciiDevName, &l_hVapiHca); + // if (l_VapiRet == VAPI_EBUSY) { + // MdKdPrint( DBGLVL_LOW,("HCA [%s] is already open.\n",l_pMdDevContext->m_AsciiDevName)); + // l_Status = l_VapiRet; + // sprintf( l_Buf, "HCA [%s] is already open.\n",l_pMdDevContext->m_AsciiDevName); + // l_pReason = l_Buf; + // goto err; + // } + // else + // if (l_VapiRet != VAPI_OK) { + // MdKdPrint( DBGLVL_LOW,("Failed opening HCA [%s] - %s\n", + // l_pMdDevContext->m_AsciiDevName, VAPI_strerror_sym(l_VapiRet) )); + // l_Status = l_VapiRet; + // sprintf( l_Buf, "Failed opening HCA [%s] - %s\n", + // l_pMdDevContext->m_AsciiDevName, VAPI_strerror_sym(l_VapiRet) ); + // l_pReason = l_Buf; + // } else { + // MdKdPrint( DBGLVL_LOW,("HCA [%s] is open \n", l_pMdDevContext->m_AsciiDevName)); + // } + // + ///* save HCA handles */ + //l_pMdDevContext->m_hHca = l_hVapiHca; + l_pMdDevContext->m_hHhHca = l_hHhHca; + + + /* init IB_MGT */ + //if (g_pDrvContext->m_fSupportIbMgt) { + // if (IB_MGT_started) { + // l_MddkStatus = IB_MGT_reattach_hca( l_pMdDevContext->m_AsciiDevName ); + // if (l_MddkStatus != MT_OK) + // { + // MdKdPrint( DBGLVL_LOW,("IB_MGT_reattach_hca failed (%d)\n",l_MddkStatus)); + // sprintf( l_Buf, "IB_MT init failed (%d)\n",l_MddkStatus ); + // l_pReason = l_Buf; + // } + // else { + // MdKdPrint( DBGLVL_LOW,("'%s' attached to IB_MGT\n",l_pMdDevContext->m_AsciiDevName)); + // IB_MGT_started++; + // } + // } + // else { + // l_MddkStatus = IB_MGT_init_module(g_pDrvContext->m_IbMgtQp0Only); + // if (l_MddkStatus != MT_OK) + // { + // MdKdPrint( DBGLVL_LOW,("IB_MGT_init_module failed (%d)\n",l_MddkStatus)); + // sprintf( l_Buf, "IB_MGT_init_module failed (%d)\n",l_MddkStatus ); + // l_pReason = l_Buf; + // } + // else { + // IB_MGT_started++; + // MdKdPrint( DBGLVL_LOW,("IB_MGT started over '%s'\n",l_pMdDevContext->m_AsciiDevName)); + // } + // } + //} + + } /* add HCA to Tavor functional DLLs */ + + // mark device started + l_pMdDevContext->m_DeviceStarted = TRUE; + + // exit + /* Write to event log */ + WriteEventLogEntry( g_pDrvContext->m_pDrvObject, MD_EVENT_LOG_LOAD_OK, 0, 0, 0 ); + MdKdPrint( DBGLVL_DEFAULT, ("(MdStartDevice) exit (%x)\n", l_Status)); + return l_Status; + +err: +#pragma warning( push ) +#pragma warning( disable:4296 ) + MdKdPrint( DBGLVL_ALWAYS ,("(MdStartDevice) Device failed to initialize \n")); +#pragma warning( pop ) + + /* Write to event log */ + WriteEventLogEntry( g_pDrvContext->m_pDrvObject, l_EventCode, + 0, l_Status, 1, l_Status ); + return STATUS_UNSUCCESSFUL; +} + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdRemoveDevice( + IN PDEVICE_OBJECT pi_pFdo + ) +/*++ + +Routine Description: + + Called from MdProcessPnPIrp() to + clean up our device instance's allocated buffers; free symbolic links + +Arguments: + + pi_pFdo - pointer to the FDO + +Return Value: + + NT status code from free symbolic link operation + +--*/ +{ + PMD_DEV_CONTEXT_T l_pMdDevContext = (PMD_DEV_CONTEXT_T)pi_pFdo->DeviceExtension; + NTSTATUS l_Status = STATUS_SUCCESS; + + MdKdPrint( DBGLVL_DEFAULT,("enter MdRemoveDevice\n")); + + /* remove functional device object (FDO) */ + MdDevDeInit( l_pMdDevContext ); + + MdKdPrint( DBGLVL_DEFAULT,("exit MdRemoveDevice() status = 0x%x\n", l_Status )); + + return l_Status; +} + + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdStopDevice( + IN PDEVICE_OBJECT pi_pFdo + ) +/*++ + +Routine Description: + + Stops a given instance of a 82930 device on the USB. + We basically just tell USB this device is now 'unconfigured' + +Arguments: + + pi_pFdo - pointer to the device object for this instance of a 82930 + +Return Value: + + NT status code + +--*/ +{ + PMD_DEV_CONTEXT_T l_pMdDevContext = pi_pFdo->DeviceExtension; + NTSTATUS l_Status = STATUS_SUCCESS; + HH_hca_hndl_t l_hHhHca = l_pMdDevContext->m_hHhHca; + + MdKdPrint( DBGLVL_DEFAULT,("enter MdStopDevice\n")); + + // mark device stopping + l_pMdDevContext->m_StopDeviceRequested = TRUE; + + // TBD + // stop the card by sending some commands to it or suspend all new requests and wait for the end of in-progress ones + // + + if ((l_pMdDevContext->m_eDevType == MD_DEV_IX_TAVOR || + l_pMdDevContext->m_eDevType == MD_DEV_IX_ARBEL_TM)&& g_pDrvContext->m_fSupportTavor) + { /* remove HCA from Tavor functional DLLs */ + + HH_ret_t l_HhRet; + //VAPI_ret_t l_VapiRet; + + /* de-init IB_MGT */ + //if (g_pDrvContext->m_fSupportIbMgt) { + // if (--IB_MGT_started > 0) { +// if (l_pMdDevContext->m_hHca != VAPI_INVAL_HNDL) + // IB_MGT_fatal_delete_hca( l_pMdDevContext->m_hHca ); + // MdKdPrint( DBGLVL_LOW,("'%s' detached from IB_MGT\n",l_pMdDevContext->m_AsciiDevName)); + // } + // else { + // IB_MGT_cleanup_module(); + // MdKdPrint( DBGLVL_LOW,("The last HCA '%s' detached from IB_MGT\n",l_pMdDevContext->m_AsciiDevName)); + // } + //} + + /* close HCA handle */ + //if (l_pMdDevContext->m_hHca != VAPI_INVAL_HNDL) + // l_VapiRet = VAPI_close_hca( l_pMdDevContext->m_hHca ); + //if (l_VapiRet) { + // DbgPrint("MdStopDevice: error 0x%x on VAPI_close_hca\n", l_VapiRet); + //} + + /* remove HCA */ + //l_HhRet = THH_rmv_hca( l_pMdDevContext->m_uCardNo-1 ); + l_pMdDevContext->m_hHhHca = (HH_hca_hndl_t)NULL; + if ( l_hHhHca != (HH_hca_hndl_t)NULL) + l_HhRet = THH_hob_destroy( l_hHhHca ); + if (l_HhRet) { + DbgPrint("MdStopDevice: error 0x%x on THH_rmv_hca\n", l_HhRet); + } + + /* save HCA handle */ + l_pMdDevContext->m_hHca = VAPI_INVAL_HNDL; + + } /* remove HCA from Tavor functional DLLs */ + + // remove device from MOSAL + MOSAL_remove_device( l_pMdDevContext->m_hMosal ); + + // release PCI card resources + MdDeInitPciCfgCard( l_pMdDevContext ); + + // mark device stopping + l_pMdDevContext->m_DeviceStarted = FALSE; + l_pMdDevContext->m_StopDeviceRequested = FALSE; + + MdKdPrint( DBGLVL_DEFAULT,("exit MdStopDevice() (%x)\n", l_Status)); + + return l_Status; +} + + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdIrpCompletionRoutine( + IN PDEVICE_OBJECT pi_pFdo, + IN PIRP pi_pIrp, + IN PVOID Context + ) +/*++ + +Routine Description: + + Used as a general purpose completion routine so it can signal an event, + passed as the Context, when the next lower driver is done with the input Irp. + This routine is used by both PnP and Power Management logic. + + Even though this routine does nothing but set an event, it must be defined and + prototyped as a completetion routine for use as such + + +Arguments: + + pi_pFdo - Pointer to the device object for the class device. + + pi_pIrp - Irp completed. + + Context - Driver defined context, in this case a pointer to an event. + +Return Value: + + The function value is the final status from the operation. + +--*/ +{ + PKEVENT event = Context; + + // Set the input event + KeSetEvent(event, + 1, // Priority increment for waiting thread. + FALSE); // Flag this call is not immediately followed by wait. + + // This routine must return STATUS_MORE_PROCESSING_REQUIRED because we have not yet called + // IoFreeIrp() on this IRP. + return STATUS_MORE_PROCESSING_REQUIRED; + +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPwr.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPwr.c new file mode 100644 index 00000000..bf848122 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdPwr.c @@ -0,0 +1,934 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "MdGen.h" + + + +NTSTATUS +MdProcessPowerIrp( + IN PDEVICE_OBJECT pi_pFdo, + IN PIRP pi_pIrp + ) +/*++ + +Routine Description: + + This is our FDO's dispatch table function for IRP_MJ_POWER. + It processes the Power IRPs sent to the PDO for this device. + + For every power IRP, drivers must call PoStartNextPowerIrp and use PoCallDriver + to pass the IRP all the way down the driver stack to the underlying PDO. + + +Arguments: + + pi_pFdo - pointer to our device object (FDO) + + pi_pIrp - pointer to an I/O Request Packet + +Return Value: + + NT status code + +--*/ +{ + + PIO_STACK_LOCATION l_pIrpStack; + NTSTATUS l_Status = STATUS_SUCCESS; + PMD_DEV_CONTEXT_T l_pMdDevContext; + BOOLEAN fGoingToD0 = FALSE; + POWER_STATE l_SysPowerState, l_DesiredDevicePowerState; + KEVENT l_Event; + + MdKdPrint( DBGLVL_MEDIUM,(" MdProcessPowerIrp() IRP_MJ_POWER\n")); + + l_pMdDevContext = (PMD_DEV_CONTEXT_T) pi_pFdo->DeviceExtension; + l_pIrpStack = IoGetCurrentIrpStackLocation (pi_pIrp); + MdIncrementIoCount(l_pMdDevContext); + + switch (l_pIrpStack->MinorFunction) { + case IRP_MN_WAIT_WAKE: + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Enter IRP_MN_WAIT_WAKE\n")); + + // A driver sends IRP_MN_WAIT_WAKE to indicate that the system should + // wait for its device to signal a wake l_Event. The exact nature of the l_Event + // is device-dependent. + // Drivers send this IRP for two reasons: + // 1) To allow a device to wake the system + // 2) To wake a device that has been put into a sleep state to save power + // but still must be able to communicate with its driver under certain circumstances. + // When a wake l_Event occurs, the driver completes the IRP and returns + // STATUS_SUCCESS. If the device is sleeping when the l_Event occurs, + // the driver must first wake up the device before completing the IRP. + // In a completion routine, the driver calls PoRequestPowerIrp to send a + // PowerDeviceD0 request. When the device has powered up, the driver can + // handle the IRP_MN_WAIT_WAKE request. + + // l_pMdDevContext->m_DeviceCapabilities.DeviceWake specifies the lowest device power state (least powered) + // from which the device can signal a wake l_Event + l_pMdDevContext->m_PowerDownLevel = l_pMdDevContext->m_DeviceCapabilities.DeviceWake; + + + if ( ( PowerDeviceD0 == l_pMdDevContext->m_CurrentDevicePowerState ) || + ( l_pMdDevContext->m_DeviceCapabilities.DeviceWake > l_pMdDevContext->m_CurrentDevicePowerState ) ) { + // + // STATUS_INVALID_DEVICE_STATE is returned if the device in the PowerD0 state + // or a state below which it can support waking, or if the SystemWake state + // is below a state which can be supported. A pending IRP_MN_WAIT_WAKE will complete + // with this error if the device's state is changed to be incompatible with the wake + // request. + + // If a driver fails this IRP, it should complete the IRP immediately without + // passing the IRP to the next-lower driver. + l_Status = STATUS_INVALID_DEVICE_STATE; + pi_pIrp->IoStatus.Status = l_Status; + IoCompleteRequest (pi_pIrp,IO_NO_INCREMENT ); + MdKdPrint( DBGLVL_HIGH, ( "Exit MdProcessPowerIrp(), l_Status STATUS_INVALID_DEVICE_STATE\n" ) ); + MdDecrementIoCount(l_pMdDevContext); + return l_Status; + } + + // flag we're enabled for wakeup + l_pMdDevContext->m_EnabledForWakeup = TRUE; + + // init an l_Event for our completion routine to signal when PDO is done with this Irp + KeInitializeEvent(&l_Event, NotificationEvent, FALSE); + + // If not failing outright, pass this on to our PDO for further handling + IoCopyCurrentIrpStackLocationToNext(pi_pIrp); + + // Set a completion routine so it can signal our l_Event when + // the PDO is done with the Irp + IoSetCompletionRoutine(pi_pIrp, + MdIrpCompletionRoutine, + &l_Event, // pass the l_Event to the completion routine as the Context + TRUE, // invoke on success + TRUE, // invoke on error + TRUE); // invoke on cancellation + + PoStartNextPowerIrp(pi_pIrp); + l_Status = PoCallDriver(l_pMdDevContext->m_pLdo, + pi_pIrp); + + // if PDO is not done yet, wait for the l_Event to be set in our completion routine + if (l_Status == STATUS_PENDING) { + // wait for irp to complete + + NTSTATUS waitStatus = KeWaitForSingleObject( + &l_Event, + Suspended, + KernelMode, + FALSE, + NULL); + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() done waiting for PDO to finish IRP_MN_WAIT_WAKE\n")); + } + + // now tell the device to actually wake up + MdSelfSuspendOrActivate( pi_pFdo, FALSE ); + + // flag we're done with wakeup irp + l_pMdDevContext->m_EnabledForWakeup = FALSE; + + MdDecrementIoCount(l_pMdDevContext); + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Exit IRP_MN_WAIT_WAKE\n")); + break; + + case IRP_MN_SET_POWER: + { + + // The system power policy manager sends this IRP to set the system power state. + // A device power policy manager sends this IRP to set the device power state for a device. + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Enter IRP_MN_SET_POWER\n")); + + // Set Irp->IoStatus.Status to STATUS_SUCCESS to indicate that the device + // has entered the requested state. Drivers cannot fail this IRP. + + switch (l_pIrpStack->Parameters.Power.Type) { + case SystemPowerState: + + // Get input system power state + l_SysPowerState.SystemState = l_pIrpStack->Parameters.Power.State.SystemState; + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Set Power, type SystemPowerState = %s\n", + MdStringForSysState( l_SysPowerState.SystemState ) )); + + // If system is in working state always set our device to D0 + // regardless of the wait state or system-to-device state power map + if ( l_SysPowerState.SystemState == PowerSystemWorking) { + l_DesiredDevicePowerState.DeviceState = PowerDeviceD0; + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() PowerSystemWorking, will set D0, not use state map\n")); + + + } else { + // set to corresponding system state if IRP_MN_WAIT_WAKE pending + if ( l_pMdDevContext->m_EnabledForWakeup ) { // got a WAIT_WAKE IRP pending? + + // Find the device power state equivalent to the given system state. + // We get this info from the DEVICE_CAPABILITIES struct in our device + // extension (initialized in MdPnPAddDevice() ) + l_DesiredDevicePowerState.DeviceState = + l_pMdDevContext->m_DeviceCapabilities.DeviceState[ l_SysPowerState.SystemState ]; + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() IRP_MN_WAIT_WAKE pending, will use state map\n")); + + } else { + // if no wait pending and the system's not in working state, just turn off + l_DesiredDevicePowerState.DeviceState = PowerDeviceD3; + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Not m_EnabledForWakeup and the system's not in working state,\n settting PowerDeviceD3 (off )\n")); + } + } + + // + // We've determined the desired device state; are we already in this state? + // + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Set Power, l_DesiredDevicePowerState = %s\n", + MdStringForDevState( l_DesiredDevicePowerState.DeviceState ) )); + + if (l_DesiredDevicePowerState.DeviceState != + l_pMdDevContext->m_CurrentDevicePowerState) { + + // MdIncrementIoCount(l_pMdDevContext); + + // No, request that we be put into this state + // by requesting a new Power Irp from the Pnp manager + l_pMdDevContext->m_PowerIrp = pi_pIrp; + l_Status = PoRequestPowerIrp(l_pMdDevContext->m_pPdo, + IRP_MN_SET_POWER, + l_DesiredDevicePowerState, + // completion routine will pass the Irp down to the PDO + MdPoRequestCompletion, + pi_pFdo, + NULL); + + } else { + // Yes, just pass it on to PDO (Physical Device Object) + IoCopyCurrentIrpStackLocationToNext(pi_pIrp); + PoStartNextPowerIrp(pi_pIrp); + l_Status = PoCallDriver(l_pMdDevContext->m_pLdo, + pi_pIrp); + + MdDecrementIoCount(l_pMdDevContext); + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Exit IRP_MN_SET_POWER\n")); + + } + break; + + case DevicePowerState: + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Set Power, type DevicePowerState = %s\n", + MdStringForDevState( l_pIrpStack->Parameters.Power.State.DeviceState ) )); + + // For requests to D1, D2, or D3 ( sleep or off states ), + // sets l_pMdDevContext->m_CurrentDevicePowerState to DeviceState immediately. + // This enables any code checking state to consider us as sleeping or off + // already, as this will imminently become our state. + + // For requests to DeviceState D0 ( fully on ), sets fGoingToD0 flag TRUE + // to flag that we must set a completion routine and update + // l_pMdDevContext->m_CurrentDevicePowerState there. + // In the case of powering up to fully on, we really want to make sure + // the process is completed before updating our m_CurrentDevicePowerState, + // so no IO will be attempted or accepted before we're really ready. + + fGoingToD0 = MdSetDevicePowerState(pi_pFdo, + l_pIrpStack->Parameters.Power.State.DeviceState + ); // returns TRUE for D0 + + IoCopyCurrentIrpStackLocationToNext(pi_pIrp); + + if (fGoingToD0) { + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Set m_PowerIrp Completion Routine, fGoingToD0 =%d\n", fGoingToD0)); + IoSetCompletionRoutine(pi_pIrp, + MdPowerIrp_Complete, + // Always pass FDO to completion routine as its Context; + // This is because the DriverObject passed by the system to the routine + // is the Physical Device Object ( PDO ) not the Functional Device Object ( FDO ) + pi_pFdo, + TRUE, // invoke on success + TRUE, // invoke on error + TRUE); // invoke on cancellation of the Irp + } + + PoStartNextPowerIrp(pi_pIrp); + l_Status = PoCallDriver(l_pMdDevContext->m_pLdo, + pi_pIrp); + + if ( !fGoingToD0 ) // completion routine will decrement + MdDecrementIoCount(l_pMdDevContext); + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() Exit IRP_MN_SET_POWER\n")); + break; + } /* case irpStack->Parameters.Power.Type */ + + } + break; /* IRP_MN_SET_POWER */ + + case IRP_MN_QUERY_POWER: + // + // A power policy manager sends this IRP to determine whether it can change + // the system or device power state, typically to go to sleep. + // + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() IRP_MN_QUERY_POWER\n")); + + // We do nothing special here, just let the PDO handle it + IoCopyCurrentIrpStackLocationToNext(pi_pIrp); + PoStartNextPowerIrp(pi_pIrp); + l_Status = PoCallDriver(l_pMdDevContext->m_pLdo, + pi_pIrp); + + + MdDecrementIoCount(l_pMdDevContext); + + break; /* IRP_MN_QUERY_POWER */ + + default: + + MdKdPrint( DBGLVL_MEDIUM,("MdProcessPowerIrp() UNKNOWN POWER MESSAGE (%x)\n", l_pIrpStack->MinorFunction)); + + // + // All unhandled power messages are passed on to the PDO + // + + IoCopyCurrentIrpStackLocationToNext(pi_pIrp); + PoStartNextPowerIrp(pi_pIrp); + l_Status = PoCallDriver(l_pMdDevContext->m_pLdo, pi_pIrp); + + MdDecrementIoCount(l_pMdDevContext); + + } /* l_pIrpStack->MinorFunction */ + + MdKdPrint( DBGLVL_MEDIUM, ( "Exit MdProcessPowerIrp() l_Status = 0x%x\n", l_Status ) ); + return l_Status; +} + + +NTSTATUS +MdPoRequestCompletion( + IN PDEVICE_OBJECT pi_pDeviceObject, + IN UCHAR pi_MinorFunction, + IN POWER_STATE pi_PowerState, + IN PVOID pi_pContext, + IN PIO_STATUS_BLOCK pi_pIoStatus + ) +/*++ + +Routine Description: + + This is the completion routine set in a call to PoRequestPowerIrp() + that was made in MdProcessPowerIrp() in response to receiving + an IRP_MN_SET_POWER of type 'SystemPowerState' when the device was + not in a compatible device power state. In this case, a pointer to + the IRP_MN_SET_POWER Irp is saved into the FDO device extension + (l_pMdDevContext->m_PowerIrp), and then a call must be + made to PoRequestPowerIrp() to put the device into a proper power state, + and this routine is set as the completion routine. + + We decrement our pending io count and pass the saved IRP_MN_SET_POWER Irp + on to the next driver + +Arguments: + + pi_pDeviceObject - Pointer to the device object for the class device. + Note that we must get our own device object from the Context + + pi_pContext - Driver defined context, in this case our own functional device object ( FDO ) + +Return Value: + + The function value is the final status from the operation. + +--*/ +{ + PIRP l_pIrp; + PMD_DEV_CONTEXT_T l_pMdDevContext; + PDEVICE_OBJECT l_pDeviceObject = pi_pContext; + NTSTATUS l_Status; + + l_pMdDevContext = l_pDeviceObject->DeviceExtension; + + // Get the Irp we saved for later processing in MdProcessPowerIrp() + // when we decided to request the Power Irp that this routine + // is the completion routine for. + l_pIrp = l_pMdDevContext->m_PowerIrp; + + // We will return the status set by the PDO for the power request we're completing + l_Status = pi_pIoStatus->Status; + + MdKdPrint( DBGLVL_HIGH,("(MdPoRequestCompletion) Enter\n")); + + // we should not be in the midst of handling a self-generated power irp + MDASSERT( !l_pMdDevContext->m_SelfPowerIrp ); + + // we must pass down to the next driver in the stack + IoCopyCurrentIrpStackLocationToNext(l_pIrp); + + // Calling PoStartNextPowerIrp() indicates that the driver is finished + // with the previous power IRP, if any, and is ready to handle the next power IRP. + // It must be called for every power IRP.Although power IRPs are completed only once, + // typically by the lowest-level driver for a device, PoStartNextPowerIrp must be called + // for every stack location. Drivers must call PoStartNextPowerIrp while the current IRP + // stack location points to the current driver. Therefore, this routine must be called + // before IoCompleteRequest, IoSkipCurrentStackLocation, and PoCallDriver. + + PoStartNextPowerIrp(l_pIrp); + + // PoCallDriver is used to pass any power IRPs to the PDO instead of IoCallDriver. + // When passing a power IRP down to a lower-level driver, the caller should use + // IoSkipCurrentIrpStackLocation or IoCopyCurrentIrpStackLocationToNext to copy the IRP to + // the next stack location, then call PoCallDriver. Use IoCopyCurrentIrpStackLocationToNext + // if processing the IRP requires setting a completion routine, or IoSkipCurrentStackLocation + // if no completion routine is needed. + + PoCallDriver(l_pMdDevContext->m_pLdo, l_pIrp); + + MdDecrementIoCount(l_pMdDevContext); + + MdKdPrint( DBGLVL_MEDIUM,("(MdPoRequestCompletion) Exit IRP_MN_SET_POWER\n")); + + l_pMdDevContext->m_PowerIrp = NULL; + + return l_Status; +} + + +NTSTATUS +MdPowerIrp_Complete( + IN PDEVICE_OBJECT pi_pNullDeviceObject, + IN PIRP pi_pIrp, + IN PVOID pi_pContext + ) +/*++ + +Routine Description: + + This routine is called when An IRP_MN_SET_POWER of type 'DevicePowerState' + has been received by MdProcessPowerIrp(), and that routine has determined + 1) the request is for full powerup ( to PowerDeviceD0 ), and + 2) We are not already in that state + A call is then made to PoRequestPowerIrp() with this routine set as the completion routine. + + +Arguments: + + pi_pNullDeviceObject - Pointer to the device object for the class device. + + pi_pIrp - Irp completed. + + pi_pContext - Driver defined context. + +Return Value: + + The function value is the final status from the operation. + +--*/ +{ + NTSTATUS l_Status = STATUS_SUCCESS; + PDEVICE_OBJECT l_pDeviceObject; + PIO_STACK_LOCATION l_pIrpStack; + PMD_DEV_CONTEXT_T l_pMdDevContext; + + MdKdPrint( DBGLVL_HIGH,("(MdPowerIrp_Complete) enter\n")); + + l_pDeviceObject = (PDEVICE_OBJECT) pi_pContext; + + l_pMdDevContext = (PMD_DEV_CONTEXT_T)l_pDeviceObject->DeviceExtension; + + // If the lower driver returned PENDING, mark our stack location as pending also. + if (pi_pIrp->PendingReturned) + { + IoMarkIrpPending(pi_pIrp); + } + + l_pIrpStack = IoGetCurrentIrpStackLocation (pi_pIrp); + + // We can assert that we're a device powerup-to D0 request, + // because that was the only type of request we set a completion routine + // for in the first place + MDASSERT(l_pIrpStack->MajorFunction == IRP_MJ_POWER); + MDASSERT(l_pIrpStack->MinorFunction == IRP_MN_SET_POWER); + MDASSERT(l_pIrpStack->Parameters.Power.Type==DevicePowerState); + MDASSERT(l_pIrpStack->Parameters.Power.State.DeviceState==PowerDeviceD0); + + // Now that we know we've let the lower drivers do what was needed to power up, + // we can set our device extension flags accordingly + l_pMdDevContext->m_CurrentDevicePowerState = PowerDeviceD0; + + pi_pIrp->IoStatus.Status = l_Status; + + MdDecrementIoCount(l_pMdDevContext); + + MdKdPrint( DBGLVL_MEDIUM,("exit MdPowerIrp_Complete Exit IRP_MN_SET_POWER D0 complete\n")); + return l_Status; +} + + + +NTSTATUS +MdSelfSuspendOrActivate( + IN PDEVICE_OBJECT pi_pDeviceObject, + IN BOOLEAN pi_fSuspend + ) +/*++ + +Routine Description: + + Called on MdPnPAddDevice() to power down until needed (i.e., till a pipe is actually opened). + Called on MdCreate() to power up device to D0 before opening 1st pipe. + Called on MdClose() to power down device if this is the last pipe. + +Arguments: + + pi_pDeviceObject - Pointer to the device object + + pi_fSuspend; TRUE to Suspend, FALSE to acivate. + + +Return Value: + + If the operation is not attemtped, SUCCESS is returned. + If the operation is attemtped, the value is the final status from the operation. + +--*/ +{ + NTSTATUS l_Status = STATUS_SUCCESS; + POWER_STATE l_PowerState; + PMD_DEV_CONTEXT_T l_pMdDevContext; + + + l_pMdDevContext = pi_pDeviceObject->DeviceExtension; + MdKdPrint( DBGLVL_MAXIMUM,("(MdSelfSuspendOrActivate) Enter: fSuspend = %d\n", pi_fSuspend)); + + + // Can't accept request if: + // 1) device is removed, + // 2) has never been started, + // 3) is stopped, + // 4) has a remove request pending, + // 5) has a stop device pending + if ( !MdCanAcceptIoRequests( pi_pDeviceObject ) ) { + l_Status = STATUS_DELETE_PENDING; + + MdKdPrint( DBGLVL_MEDIUM,("ABORTING MdSelfSuspendOrActivate()\n")); + return l_Status; + } + + + // don't do anything if any System-generated Device Pnp irps are pending + if ( NULL != l_pMdDevContext->m_PowerIrp ) { + MdKdPrint( DBGLVL_MAXIMUM,("Exit MdSelfSuspendOrActivate(),refusing on pending l_pMdDevContext->m_PowerIrp 0x%x\n", l_pMdDevContext->m_PowerIrp)); + return l_Status; + } + + // don't do anything if any self-generated Device Pnp irps are pending + if ( l_pMdDevContext->m_SelfPowerIrp ) { + MdKdPrint( DBGLVL_MAXIMUM,("Exit MdSelfSuspendOrActivate(),refusing on pending l_pMdDevContext->m_SelfPowerIrp\n" )); + return l_Status; + } + + // dont do anything if registry CurrentControlSet\Services\BulkUsb\Parameters\m_PowerDownLevel + // has been set to zero, PowerDeviceD0 ( 1 ), or a bogus high value + if ( ( l_pMdDevContext->m_PowerDownLevel == PowerDeviceD0 ) || + ( l_pMdDevContext->m_PowerDownLevel == PowerDeviceUnspecified) || + ( l_pMdDevContext->m_PowerDownLevel >= PowerDeviceMaximum ) ) { + MdKdPrint( DBGLVL_MAXIMUM,("Exit MdSelfSuspendOrActivate(), refusing on l_pMdDevContext->m_PowerDownLevel == %d\n", l_pMdDevContext->m_PowerDownLevel)); + return l_Status; + } + + if ( pi_fSuspend ) + l_PowerState.DeviceState = l_pMdDevContext->m_PowerDownLevel; + else + l_PowerState.DeviceState = PowerDeviceD0; // power up all the way; we're probably just about to do some IO + + l_Status = MdSelfRequestPowerIrp( pi_pDeviceObject, l_PowerState ); + + MdKdPrint( DBGLVL_MAXIMUM,("MdSelfSuspendOrActivate() status 0x%x on setting dev state %s\n", l_Status, MdStringForDevState(l_PowerState.DeviceState ) )); + + return l_Status; + +} + + +NTSTATUS +MdSelfRequestPowerIrp( + IN PDEVICE_OBJECT pi_pDeviceObject, + IN POWER_STATE pi_PowerState + ) +/*++ + +Routine Description: + + This routine is called by MdSelfSuspendOrActivate() to + actually make the system request for a powerdown/up to PowerState. + It first checks to see if we are already in Powerstate and immediately + returns SUCCESS with no further processing if so + + +Arguments: + + pi_pDeviceObject - Pointer to the device object + + pi_PowerState. power state requested, e.g PowerDeviceD0. + + +Return Value: + + The function value is the final status from the operation. + +--*/ +{ + NTSTATUS l_Status = STATUS_SUCCESS; + PMD_DEV_CONTEXT_T l_pMdDevContext; + PIRP l_pIrp = NULL; + + l_pMdDevContext = pi_pDeviceObject->DeviceExtension; + + // This should have been reset in completion routine + MDASSERT( !l_pMdDevContext->m_SelfPowerIrp ); + + if ( l_pMdDevContext->m_CurrentDevicePowerState == pi_PowerState.DeviceState ) + return STATUS_SUCCESS; // nothing to do + + MdKdPrint( DBGLVL_HIGH,("Enter MdSelfRequestPowerIrp() will request power irp to state %s\n", + MdStringForDevState( pi_PowerState.DeviceState ))); + + MdIncrementIoCount(l_pMdDevContext); + + // flag we're handling a self-generated power irp + l_pMdDevContext->m_SelfPowerIrp = TRUE; + + // actually request the Irp + l_Status = PoRequestPowerIrp(l_pMdDevContext->m_pPdo, + IRP_MN_SET_POWER, + pi_PowerState, + MdPoSelfRequestCompletion, + pi_pDeviceObject, + NULL); + + + if ( l_Status == STATUS_PENDING ) { + // status pending is the return code we wanted + + // We only need to wait for completion if we're powering up + if ( (ULONG) pi_PowerState.DeviceState < l_pMdDevContext->m_PowerDownLevel ) { + + NTSTATUS waitStatus; + + waitStatus = KeWaitForSingleObject( + &l_pMdDevContext->m_SelfRequestedPowerIrpEvent, + Suspended, + KernelMode, + FALSE, + NULL); + + } + + l_Status = STATUS_SUCCESS; + + l_pMdDevContext->m_SelfPowerIrp = FALSE; + + MdKdPrint( DBGLVL_HIGH, ("MdSelfRequestPowerIrp() SUCCESS\n IRP 0x%x to state %s\n", + l_pIrp, MdStringForDevState(pi_PowerState.DeviceState) )); + + + } + else { + // The return status was not STATUS_PENDING; any other codes must be considered in error here; + // i.e., it is not possible to get a STATUS_SUCCESS or any other non-error return from this call; + MdKdPrint( DBGLVL_HIGH, ("MdSelfRequestPowerIrp() to state %s FAILED, status = 0x%x\n", + MdStringForDevState( pi_PowerState.DeviceState ),l_Status)); + } + + return l_Status; +} + + + +NTSTATUS +MdPoSelfRequestCompletion( + IN PDEVICE_OBJECT DeviceObject, + IN UCHAR MinorFunction, + IN POWER_STATE pi_PowerState, + IN PVOID Context, + IN PIO_STATUS_BLOCK IoStatus + ) +/*++ + +Routine Description: + + This routine is called when the driver completes a self-originated power IRP + that was generated by a call to MdSelfSuspendOrActivate(). + We power down whenever the last pipe is closed and power up when the first pipe is opened. + + For power-up , we set an l_Event in our FDO extension to signal this IRP done + so the power request can be treated as a synchronous call. + We need to know the device is powered up before opening the first pipe, for example. + For power-down, we do not set the l_Event, as no caller waits for powerdown complete. + +Arguments: + + DeviceObject - Pointer to the device object for the class device. ( Physical Device Object ) + + Context - Driver defined context, in this case our FDO ( functional device object ) + +Return Value: + + The function value is the final status from the operation. + +--*/ +{ + PDEVICE_OBJECT deviceObject = Context; + PMD_DEV_CONTEXT_T l_pMdDevContext = deviceObject->DeviceExtension; + NTSTATUS l_Status = IoStatus->Status; + + // we should not be in the midst of handling a system-generated power irp + MDASSERT( NULL == l_pMdDevContext->m_PowerIrp ); + + // We only need to set the l_Event if we're powering up; + // No caller waits on power down complete + if ( (ULONG) pi_PowerState.DeviceState < l_pMdDevContext->m_PowerDownLevel ) { + + // Trigger Self-requested power irp completed l_Event; + // The caller is waiting for completion + KeSetEvent(&l_pMdDevContext->m_SelfRequestedPowerIrpEvent, 1, FALSE); + } + + MdDecrementIoCount(l_pMdDevContext); + + MdKdPrintCond( DBGLVL_HIGH, !NT_SUCCESS(l_Status),("Exit MdPoSelfRequestCompletion() FAILED, l_Status = 0x%x\n", l_Status )); + + return l_Status; +} + + +BOOLEAN +MdSetDevicePowerState( + IN PDEVICE_OBJECT DeviceObject, + IN DEVICE_POWER_STATE DeviceState + ) +/*++ + +Routine Description: + + This routine is called when An IRP_MN_SET_POWER of type 'DevicePowerState' + has been received by MdProcessPowerIrp(). + + +Arguments: + + DeviceObject - Pointer to the device object for the class device. + + DeviceState - Device specific power state to set the device in to. + + +Return Value: + + For requests to DeviceState D0 ( fully on ), returns TRUE to signal caller + that we must set a completion routine and finish there. + +--*/ +{ + NTSTATUS l_Status = STATUS_SUCCESS; + PMD_DEV_CONTEXT_T l_pMdDevContext; + BOOLEAN fRes = FALSE; + + l_pMdDevContext = (PMD_DEV_CONTEXT_T) DeviceObject->DeviceExtension; + + switch (DeviceState) { + case PowerDeviceD3: + + // + // Device will be going OFF, + // + MdKdPrint( DBGLVL_MEDIUM,("MdSetDevicePowerState() PowerDeviceD3 (OFF)\n")); + MdStopDevice(DeviceObject); + l_pMdDevContext->m_CurrentDevicePowerState = DeviceState; + break; + + case PowerDeviceD1: + case PowerDeviceD2: + // + // power states D1,D2 translate to USB suspend + + MdKdPrint( DBGLVL_MEDIUM,("MdSetDevicePowerState() %s\n", + MdStringForDevState(DeviceState) )); + + l_pMdDevContext->m_CurrentDevicePowerState = DeviceState; + break; + + case PowerDeviceD0: + + + MdKdPrint( DBGLVL_MEDIUM,("MdSetDevicePowerState() PowerDeviceD0 (ON)\n")); + + // We'll need to finish the rest in the completion routine; + // signal caller we're going to D0 and will need to set a completion routine + fRes = TRUE; + + // Caller will pass on to PDO ( Physical Device object ) + break; + + default: + + MdKdPrint( DBGLVL_MEDIUM,(" Bogus DeviceState = %x\n", DeviceState)); + } + + return fRes; +} + + + +NTSTATUS +MdQueryCapabilities( + IN PDEVICE_OBJECT pi_pLowerDevObject, + OUT PDEVICE_CAPABILITIES po_pm_DeviceCapabilities + ) + +/*++ + +Routine Description: + + This routine generates an internal IRP from this driver to the lower portion + of the driver stack to obtain information on the Device Object's + capabilities. We are most interested in learning which system power states + are to be mapped to which device power states for honoring + IRP_MJ_SET_POWER Irps. + + This is a blocking call which waits for the IRP completion routine + to set an l_Event on finishing. + +Arguments: + + pi_pLowerDevObject - DeviceObject beneath this driver in the stack. + po_pm_DeviceCapabilities - Device Capabilities structure + +Return Value: + + NTSTATUS value from the IoCallDriver() call. + +--*/ + +{ + PIO_STACK_LOCATION l_pNextStack; + PIRP l_pIrp; + NTSTATUS l_Status; + KEVENT l_Event; + + + // This is a DDK-defined DBG-only macro that ASSERTS we are not running pageable code + // at higher than APC_LEVEL. + PAGED_CODE(); + + + // Build an IRP for us to generate an internal query request to the PDO + l_pIrp = IoAllocateIrp( pi_pLowerDevObject->StackSize, FALSE); + + if (!l_pIrp) + { + return STATUS_INSUFFICIENT_RESOURCES; + } + + // + // Preinit the device capability structures appropriately. + // + RtlZeroMemory( po_pm_DeviceCapabilities, sizeof(DEVICE_CAPABILITIES) ); + po_pm_DeviceCapabilities->Size = sizeof(DEVICE_CAPABILITIES); + po_pm_DeviceCapabilities->Version = 1; + po_pm_DeviceCapabilities->Address = -1; + po_pm_DeviceCapabilities->UINumber = -1; + + // IoGetNextIrpStackLocation gives a higher level driver access to the next-lower + // driver's I/O stack location in an IRP so the caller can set it up for the lower driver. + l_pNextStack = IoGetNextIrpStackLocation(l_pIrp); + l_pNextStack->MajorFunction= IRP_MJ_PNP; + l_pNextStack->MinorFunction= IRP_MN_QUERY_CAPABILITIES; + + // init an l_Event to tell us when the completion routine's been called + KeInitializeEvent(&l_Event, NotificationEvent, FALSE); + + // Set a completion routine so it can signal our l_Event when + // the next lower driver is done with the Irp + IoSetCompletionRoutine(l_pIrp, + MdIrpCompletionRoutine, + &l_Event, // pass the l_Event as Context to completion routine + TRUE, // invoke on success + TRUE, // invoke on error + TRUE); // invoke on cancellation of the Irp + + + // set our pointer to the DEVICE_CAPABILITIES struct + l_pNextStack->Parameters.DeviceCapabilities.Capabilities = po_pm_DeviceCapabilities; + + // preset the irp to report not supported + l_pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + l_Status = IoCallDriver(pi_pLowerDevObject, l_pIrp); + + MdKdPrint( DBGLVL_MEDIUM,("(MdQueryCapabilities) l_Status from IoCallDriver to PCI = 0x%x\n", l_Status)); + + if (l_Status == STATUS_PENDING) + { /* wait for irp to complete */ + + KeWaitForSingleObject( + &l_Event, + Suspended, + KernelMode, + FALSE, + NULL); + + l_Status = l_pIrp->IoStatus.Status; + + } /* wait for irp to complete */ + + // failed? this is probably a bug + MdKdPrintCond( DBGLVL_DEFAULT,(!NT_SUCCESS(l_Status)), ("(MdQueryCapabilities) failed\n")); + + IoFreeIrp(l_pIrp); + + return l_Status; +} + + + + + + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdRdWr.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdRdWr.c new file mode 100644 index 00000000..ea85c7b7 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdRdWr.c @@ -0,0 +1,329 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "MdGen.h" + +//void* VIPKL_open(void); +//void VIPKL_close(void* hca_stat_p); +//void * IB_MGT_dev_open(void); +//void IB_MGT_dev_close(void * proc_state_p); +void *MOSAL_rsct_open(MOSAL_pid_t pid); +void MOSAL_rsct_close(void *p_rsct, MOSAL_pid_t pid); +//int IsIbMgtOn(); + + + + +NTSTATUS +MdClose( + IN PDEVICE_OBJECT pi_pFdo, + IN PIRP pi_pIrp + ) +/*++ + +Routine Description: + + This is the dispatch table routine for IRP_MJ_CLOSE. + It handles user mode CloseHandle() calls for a pipe + It closes the File Object for the pipe handle it represents. + +Arguments: + + pi_pFdo - pointer to our FDO (Functional Device Object ) + + +Return Value: + + NT status code + +--*/ +{ + NTSTATUS l_Status= STATUS_SUCCESS; + PFILE_OBJECT l_pFileObject; + PIO_STACK_LOCATION l_pIrpStack; + PMD_DEV_CONTEXT_T l_pMdDevContext = (PMD_DEV_CONTEXT_T)pi_pFdo->DeviceExtension; + PMD_PCS_CONTEXT_T l_pPcs; + + MdKdPrint( DBGLVL_HIGH,("entering MdClose\n")); + + MdIncrementIoCount(l_pMdDevContext); + + // get file handle + l_pIrpStack = IoGetCurrentIrpStackLocation (pi_pIrp); + l_pFileObject = l_pIrpStack->FileObject; + + // sanity check + if (l_pFileObject->FsContext == NULL) + { /* unexpectable: noone is using this field ! We can't proceed !! */ +#pragma warning( push ) +#pragma warning( disable:4296 ) + MdKdPrint( DBGLVL_ALWAYS,("MdClose: Someone has taken our private file context (FsContext=%p)- can't proceed \n", + l_pFileObject->FsContext)); +#pragma warning( pop ) + l_Status = STATUS_UNSUCCESSFUL; + goto done; + } /* unexpectable: someone is using this field ! We can't proceed !! */ + + // get process context + l_pPcs = (PMD_PCS_CONTEXT_T)l_pFileObject->FsContext; + + // resource tracking + switch (l_pMdDevContext->m_eDevType) + { /* handle Ioctls */ + case MD_DEV_IX_TAVOR_SD: + PciRelease( l_pMdDevContext, l_pPcs ); + break; + + case MD_DEV_IX_TAVOR: + case MD_DEV_IX_ARBEL_TM: + // MDCTL + PciRelease( l_pMdDevContext, l_pPcs ); + break; + + case MD_DEV_IX_TAVOR_BD: + break; + + case MD_DEV_IX_CTRL: + // IB_MGT + //if (IsIbMgtOn()) + // IB_MGT_dev_close(l_pPcs->m_hIbMgt); + // VIPKL + // VIPKL_close(l_pPcs->m_hVipkl); + // MOSAL + MOSAL_rsct_close(l_pPcs->m_hMosal, l_pPcs->m_Pid); + break; + + default: + MdKdPrint( DBGLVL_DEFAULT,("(MdClose) Internal error - unknown device type %d\n", l_pMdDevContext->m_eDevType)); + l_Status = STATUS_NOT_IMPLEMENTED; + break; + + } /* handle Ioctls */ + + + // remove the process context + MdExFreePool( (PVOID)l_pPcs ); + + /* decrement reference counter */ + InterlockedDecrement( &l_pMdDevContext->m_nOpenCount ); + + /* remove control device on need */ + if (l_pMdDevContext->m_fDeletePending == TRUE && l_pMdDevContext->m_nOpenCount == 0) + { /* no applications - one can remove the control device */ + + // The final decrement to device extension PendingIoCount == 0 + // will set l_pMdDevContext->m_RemoveEvent, enabling device removal. + // If there is no pending IO at this point, the below decrement will be it. + MdDecrementIoCount(l_pMdDevContext); + + } /* no applications - one can remove the control device */ + +done: + pi_pIrp->IoStatus.Status = l_Status; + pi_pIrp->IoStatus.Information = 0; + + IoCompleteRequest (pi_pIrp, + IO_NO_INCREMENT + ); + + MdDecrementIoCount(l_pMdDevContext); + + // try to power down device if this is the last pipe + // ??? actStat = MdSelfSuspendOrActivate( DeviceObject, TRUE ); + + MdKdPrint( DBGLVL_HIGH,("exit MdClose status %x\n", l_Status)); + + return l_Status; +} + + +NTSTATUS +MdCreate( + IN PDEVICE_OBJECT pi_pFdo, + IN PIRP pi_pIrp + ) +/*++ + +Routine Description: + + This is the dispatch table routine for IRP_MJ_CREATE. + It's the entry point for CreateFile() calls + user mode apps may open ".\yy" + where yy is the internal pipe id + +Arguments: + + pi_pFdo - pointer to our FDO ( Functional Device Object ) + + +Return Value: + + NT status code + +--*/ +{ + NTSTATUS l_Status = STATUS_SUCCESS; + PIO_STACK_LOCATION l_pIrpStack; + PMD_DEV_CONTEXT_T l_pMdDevContext = (PMD_DEV_CONTEXT_T)pi_pFdo->DeviceExtension; + PFILE_OBJECT l_pFileObject; + PMD_PCS_CONTEXT_T l_pPcs; + + MdKdPrint( DBGLVL_HIGH,("entering MdCreate\n")); + + MdIncrementIoCount(l_pMdDevContext); + + // Can't accept a new io request if: + // 1) device is removed, + // 2) has never been started, + // 3) is stopped, + // 4) has a remove request pending, + // 5) has a stop device pending + if ( !MdCanAcceptIoRequests( pi_pFdo ) ) { + l_Status = STATUS_DELETE_PENDING; + MdKdPrint( DBGLVL_DEFAULT,("ABORTING MdCreate\n")); + goto done; + } + + // get file handle + l_pIrpStack = IoGetCurrentIrpStackLocation (pi_pIrp); + l_pFileObject = l_pIrpStack->FileObject; + //MdKdPrint( DBGLVL_ALWAYS,("MdCreate: file handle %p\n",l_pFileObject)); + + // sanity check + if (l_pFileObject->FsContext != NULL) + { /* unexpectable: someone is using this field ! We can't proceed !! */ +#pragma warning( push ) +#pragma warning( disable:4296 ) + MdKdPrint( DBGLVL_ALWAYS,("MdCreate: Someone is using private file context (FsContext=%p)- can't proceed \n",l_pFileObject->FsContext)); +#pragma warning( pop ) + l_Status = STATUS_UNSUCCESSFUL; + goto done; + } /* unexpectable: someone is using this field ! We can't proceed !! */ + + // create process context + l_pPcs = (PMD_PCS_CONTEXT_T)MdExAllocatePool(NonPagedPool, sizeof(MD_PCS_CONTEXT_T)); + if (l_pPcs == NULL ) + { + l_Status = STATUS_INSUFFICIENT_RESOURCES; + goto done; + } + l_pFileObject->FsContext = (PVOID)l_pPcs; + + // fill process context + RtlZeroMemory( l_pPcs, sizeof(MD_PCS_CONTEXT_T) ); + l_pPcs->m_Pid = MOSAL_getpid(); + + // resource tracking + if ( l_pMdDevContext->m_eDevType == MD_DEV_IX_CTRL ) { + // MOSAL + l_pPcs->m_hMosal = MOSAL_rsct_open(l_pPcs->m_Pid); + // // VIPKL + //l_pPcs->m_hVipkl = VIPKL_open(); + // // IB_MGT + // l_pPcs->m_hIbMgt = IB_MGT_dev_open(); + } + + // increment counter of open files (= counter of processes, working with this device) + InterlockedIncrement( &l_pMdDevContext->m_nOpenCount ); + + MdKdPrint( DBGLVL_HIGH,("(MdCreate) File Object 0x%x, Pcs 0x%x, Pid 0x%x \n", + l_pFileObject, l_pPcs, l_pPcs->m_Pid )); +done: + pi_pIrp->IoStatus.Status = l_Status; + pi_pIrp->IoStatus.Information = 0; + + + IoCompleteRequest (pi_pIrp, + IO_NO_INCREMENT + ); + + MdDecrementIoCount(l_pMdDevContext); + + MdKdPrint( DBGLVL_HIGH,("exit MdCreate %x\n", l_Status)); + + + return l_Status; +} + +BOOLEAN +MdCancelPendingIo( + IN PDEVICE_OBJECT DeviceObject + ) +/*++ + +Routine Description: + Cancels pending IO, as on a sudden IRP_MN_REMOVE_DEVICE + +Arguments: + + DeviceObject - pointer to the device object for this instance of the 82930 + device. + + +Return Value: + + TRUE if cancelled any, else FALSE + +--*/ +{ + return TRUE; +} + + + +NTSTATUS +MdAbortInPgsReqs( + IN PDEVICE_OBJECT DeviceObject + ) +/*++ + +Routine Description: + + Called as part of sudden device removal handling. + Cancels all in progress requests. + +Arguments: + + Ptrs to our FDO + +Return Value: + + NT status code + +--*/ +{ + NTSTATUS ntStatus = STATUS_SUCCESS; + return ntStatus; +} + + + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.c new file mode 100644 index 00000000..ec7839f3 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.c @@ -0,0 +1,1757 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "MdGen.h" + +/*------------------------------------------------------------------------------------------------------*/ + +VOID +WriteEventLogEntry( + PVOID pi_pIoObject, + ULONG pi_ErrorCode, + ULONG pi_UniqueErrorCode, + ULONG pi_FinalStatus, + ULONG pi_nDataItems, + ... + ) +/*++ + +Routine Description: + Writes an event log entry to the event log. + +Arguments: + + pi_pIoObject......... The IO object ( driver object or device object ). + pi_ErrorCode......... The error code. + pi_UniqueErrorCode... A specific error code. + pi_FinalStatus....... The final status. + pi_nDataItems........ Number of data items. + . + . data items values + . + +Return Value: + + None . + +--*/ +{ /* WriteEventLogEntry */ + + /* Variable argument list */ + va_list l_Argptr; + /* Pointer to an error log entry */ + PIO_ERROR_LOG_PACKET l_pErrorLogEntry; + + /* Init the variable argument list */ + va_start(l_Argptr, pi_nDataItems); + + /* Allocate an error log entry */ + l_pErrorLogEntry = + (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry( + pi_pIoObject, + (UCHAR)(sizeof(IO_ERROR_LOG_PACKET)+pi_nDataItems*sizeof(ULONG)) + ); + /* Check allocation */ + if ( l_pErrorLogEntry != NULL) + { /* OK */ + + /* Data item index */ + USHORT l_nDataItem ; + + /* Set the error log entry header */ + l_pErrorLogEntry->ErrorCode = pi_ErrorCode; + l_pErrorLogEntry->DumpDataSize = (USHORT) (pi_nDataItems*sizeof(ULONG)); + l_pErrorLogEntry->SequenceNumber = 0; + l_pErrorLogEntry->MajorFunctionCode = 0; + l_pErrorLogEntry->IoControlCode = 0; + l_pErrorLogEntry->RetryCount = 0; + l_pErrorLogEntry->UniqueErrorValue = pi_UniqueErrorCode; + l_pErrorLogEntry->FinalStatus = pi_FinalStatus; + + /* Insert the data items */ + for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++) + { /* Inset a data item */ + + /* Current data item */ + int l_CurDataItem ; + + /* Get next data item */ + l_CurDataItem = va_arg( l_Argptr, int); + + /* Put it into the data array */ + l_pErrorLogEntry->DumpData[l_nDataItem] = l_CurDataItem ; + + } /* Inset a data item */ + + /* Write the packet */ + IoWriteErrorLogEntry(l_pErrorLogEntry); + + } /* OK */ + + /* Term the variable argument list */ + va_end(l_Argptr); + +} /* WriteEventLogEntry */ + +/*------------------------------------------------------------------------------------------------------*/ + + +NTSTATUS +MdAddDevice( + IN PDRIVER_OBJECT pi_pDriverObject, + IN PUNICODE_STRING pi_pNtDeviceName, + IN PUNICODE_STRING pi_pWin32DeviceName, + IN int pi_nExtSize, + OUT PMD_DEV_CONTEXT_T * po_ppMdDevContext + ) +/*++ + +Routine Description: + This routine creates a device object, the symbolic link in \DosDevices and allocates device extension + + A symbolic link must be created between the device name and an entry + in \DosDevices in order to allow Win32 applications to open the device. + + +Arguments: + + pi_pDriverObject........ pointer to driver object + pi_pNtDeviceName........ NT device name + pi_pWin32DeviceName..... Win32 device name + pi_nExtSize............. Device xontext size + po_ppMdDevContext....... The device context + +Return Value: + + STATUS_SUCCESS if the device and link are created correctly, otherwise + an error indicating the reason for failure. + +--*/ +{ /* MdAddDevice */ + + /* Status of utility calls */ + NTSTATUS l_Status; + /* The address of the new created device object */ + PDEVICE_OBJECT l_pFdo = NULL; + /* The Md device context ( l_pDeviceObject extension ) */ + PMD_DEV_CONTEXT_T l_pMdDevContext; + + + /* Create a new device */ + l_Status = IoCreateDevice( + pi_pDriverObject, + pi_nExtSize, + pi_pNtDeviceName, + FILE_DEVICE_UNKNOWN, + 0, + FALSE, /* Not Exclusive */ + &l_pFdo + ); + + /* If device creation failed return the error code */ + if ( !NT_SUCCESS(l_Status) ) + { /* Device creation failed */ + + /* Return error code */ + return l_Status; + + } /* Device creation failed */ + + /* + * Create the symbolic link. + */ + l_Status = IoCreateSymbolicLink( pi_pWin32DeviceName, pi_pNtDeviceName ); + + /* + * If we we couldn't create the link then + * abort installation... delete the created device + */ + if ( !NT_SUCCESS( l_Status ) ) + { /* Create symbolic link failed */ + + /* Delete the created device */ + IoDeleteDevice( l_pFdo ); + + return l_Status; + + } /* Create symbolic link failed */ + + /* + * Set up the rest of the device info : + */ + + /* Use direct IO */ + + /* These are used for IRP_MJ_READ and IRP_MJ_WRITE which. */ + l_pFdo->Flags |= DO_DIRECT_IO ; + l_pFdo->Flags &= ~DO_DEVICE_INITIALIZING; + + /* Word alignment */ + l_pFdo->AlignmentRequirement = FILE_BYTE_ALIGNMENT; + + /* + * Set up some device context fields + */ + + /* get device context */ + l_pMdDevContext = (PMD_DEV_CONTEXT_T)l_pFdo->DeviceExtension; + + /* zero the context */ + RtlZeroMemory(l_pFdo->DeviceExtension, pi_nExtSize); + + /* store handle to device */ + l_pMdDevContext->m_pFdo = l_pFdo; + + /* Copy the Win32 device name */ + #if 0 + UCopyString( &l_pMdDevContext->m_usDosDeviceName, pi_pWin32DeviceName, TRUE ); + MdExFreePool( pi_pWin32DeviceName->Buffer ); + #else + l_pMdDevContext->m_usDosDeviceName.Length = pi_pWin32DeviceName->Length; + l_pMdDevContext->m_usDosDeviceName.MaximumLength = pi_pWin32DeviceName->MaximumLength; + l_pMdDevContext->m_usDosDeviceName.Buffer = pi_pWin32DeviceName->Buffer; + #endif + pi_pWin32DeviceName->Buffer = NULL; + + /* Copy the Nt device name */ + #if 0 + UCopyString( &l_pMdDevContext->m_usNtDeviceName, pi_pNtDeviceName, TRUE ); + MdExFreePool( pi_pNtDeviceName->Buffer ); + #else + l_pMdDevContext->m_usNtDeviceName.Length = pi_pNtDeviceName->Length; + l_pMdDevContext->m_usNtDeviceName.MaximumLength = pi_pNtDeviceName->MaximumLength; + l_pMdDevContext->m_usNtDeviceName.Buffer = pi_pNtDeviceName->Buffer; + #endif + pi_pNtDeviceName->Buffer = NULL; + + /* store the result */ + *po_ppMdDevContext = l_pMdDevContext; + + return STATUS_SUCCESS; + +} /* MdAddDevice */ + +/*------------------------------------------------------------------------------------------------------*/ + +VOID +MdDelDevice( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +/*++ + +Routine Description: + This routine is called to remove the device. + + +Arguments: + + pi_pMdDevContext...... The device context + +Return Value: + + STATUS_SUCCESS if the device and link are created correctly, otherwise + an error indicating the reason for failure. + +--*/ +{ /* MdDelDevice */ + + /* Delete the symbolic link */ + IoDeleteSymbolicLink(&pi_pMdDevContext->m_usDosDeviceName); + + /* Free the DOS device names string buffer */ + if (pi_pMdDevContext->m_usDosDeviceName.Buffer != NULL) + { + PUSHORT l_pBuf = pi_pMdDevContext->m_usDosDeviceName.Buffer; + pi_pMdDevContext->m_usDosDeviceName.Buffer = NULL; + MdExFreePool( l_pBuf ); + } + + /* Free the NT device names string buffer */ + if (pi_pMdDevContext->m_usNtDeviceName.Buffer != NULL) + { + PUSHORT l_pBuf = pi_pMdDevContext->m_usNtDeviceName.Buffer; + pi_pMdDevContext->m_usNtDeviceName.Buffer = NULL; + MdExFreePool( l_pBuf ); + } + + /* Delete the created device */ + if (pi_pMdDevContext->m_pFdo) + { + PDEVICE_OBJECT l_pFdo = pi_pMdDevContext->m_pFdo; + pi_pMdDevContext->m_pFdo = NULL; + IoDeleteDevice( l_pFdo ); + } + +} /* MdDelDevice */ + +/*------------------------------------------------------------------------------------------------------*/ + +static void GetRegistryDword( + IN PWCHAR pi_ParamName, + IN ULONG pi_DfltValue, + OUT PLONG po_Result +) +{ + ULONG l_uValue = 0; + + // read parameter the registry + MdGetRegistryDword( MD_REGISTRY_PARAMETERS_PATH, //absolute registry path + pi_ParamName, // REG_DWORD ValueName + &l_uValue ); // Value receiver + + if (!l_uValue) + { /* no Registry value - set default */ + *po_Result = pi_DfltValue; + } /* no Registry value - set default */ + else + { /* store Registry value */ + *po_Result = l_uValue; + } /* store Registry value */ +} + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdCreateDevContext( + IN PDRIVER_OBJECT pi_pDriverObject, + IN MD_DEV_IX_E pi_eDevType, + IN PUNICODE_STRING pi_pNtDeviceName, + IN PUNICODE_STRING pi_pWin32DeviceName, + IN OUT PVOID* pio_ppDevContext + ) +/*++ + +Routine Description: + This routine creates the device extension + + +Arguments: + + pi_pDriverObject........ pointer to driver object + pi_eDevType............. The device type. + pi_pNtDeviceName........ The \Device\????\ name + pi_pWin32DeviceName..... The \DosDevices\????\ name + pio_ppDevContext........ Pointer to created device context. + +Return Value: + + STATUS_SUCCESS if the device extension was created correctly, otherwise + an error indicating the reason for failure. + +--*/ +{ /* MdCreateDevContext */ + + /* Status of utility calls */ + NTSTATUS l_Status = STATUS_SUCCESS; + /* The Md device context ( l_pDeviceObject extension ) */ + PMD_DEV_CONTEXT_T l_pMdDevContext; + + /* + * Initialize the device context with the template device context + */ + + /* Create the device */ + l_Status = MdAddDevice( + pi_pDriverObject, + pi_pNtDeviceName, + pi_pWin32DeviceName, + sizeof(MD_DEV_CONTEXT_T), + &l_pMdDevContext + ); + + /* Check if device create ok */ + if ( !NT_SUCCESS(l_Status) ) + { /* Device creation failed */ + + /* Set the device context to 0 */ + *pio_ppDevContext = NULL; + + /* Return error code */ + return l_Status; + + } /* Device creation failed */ + + /* Save device type/channel */ + l_pMdDevContext->m_eDevType = pi_eDevType; + + /* + * Init the rest fields of the driver context + */ + + /* spinlock */ + INIT_LOCK_IT( &l_pMdDevContext->m_SpinLock ); + + // spinlock used to protect inc/dec iocount logic + KeInitializeSpinLock (&l_pMdDevContext->m_IoCntSpinLock); + + // init events + KeInitializeEvent(&l_pMdDevContext->m_RemoveEvent, NotificationEvent, FALSE); + KeInitializeEvent(&l_pMdDevContext->m_NoPendingIoEvent, NotificationEvent, FALSE); + KeSetEvent(&l_pMdDevContext->m_NoPendingIoEvent, 1, FALSE); + + // init queue header + InitializeListHead( &l_pMdDevContext->m_PcsQue ); + + // init semaphore + KSEM_INIT( &l_pMdDevContext->m_Sem ); + + // init mutex + KMUTEX_INIT( &l_pMdDevContext->m_Mutex ); + + // set flag + l_pMdDevContext->m_fDeletePending = FALSE; + + switch (l_pMdDevContext->m_eDevType) + { /* device-specific init */ + + case MD_DEV_IX_TAVOR_BD: + GetRegistryDword( L"ConfAddr", MD_DFLT_CONF_ADDR, &l_pMdDevContext->m_ulAddrOffset ); + GetRegistryDword( L"ConfData", MD_DFLT_CONF_DATA, &l_pMdDevContext->m_ulDataOffset ); + l_pMdDevContext->m_nPendingIoCnt = 0; + break; + + case MD_DEV_IX_TAVOR_SD: + GetRegistryDword( L"ConfAddr", MD_DFLT_CONF_ADDR, &l_pMdDevContext->m_ulAddrOffset ); + GetRegistryDword( L"ConfData", MD_DFLT_CONF_DATA, &l_pMdDevContext->m_ulDataOffset ); + GetRegistryDword( L"DdrMapOffset", MD_DFLT_CONF_DATA, &l_pMdDevContext->m_ulDdrMapOffset ); + GetRegistryDword( L"DdrMapSize", MD_DFLT_CONF_DATA, &l_pMdDevContext->m_ulDdrMapSize ); + l_pMdDevContext->m_nPendingIoCnt = 0; + break; + + case MD_DEV_IX_TAVOR: + case MD_DEV_IX_ARBEL_TM: + GetRegistryDword( L"DdrMapOffset", MD_DFLT_CONF_DATA, &l_pMdDevContext->m_ulDdrMapOffset ); + GetRegistryDword( L"DdrMapSize", MD_DFLT_CONF_DATA, &l_pMdDevContext->m_ulDdrMapSize ); + GetRegistryDword( L"ResetCard", MD_DFLT_CONF_DATA, &l_pMdDevContext->m_PerformReset ); + l_pMdDevContext->m_nPendingIoCnt = 0; + l_pMdDevContext->m_hHhHca = NULL; + break; + + case MD_DEV_IX_CTRL: + { /* specific Control device initialization */ + /* pending IO requests: setting 1 here for "adding" the Control device ('cause OS won't call to AddDevice) */ + l_pMdDevContext->m_nPendingIoCnt = 1; + + /* Device open counter */ + l_pMdDevContext->m_nOpenCount = 0; + + // mark device started + l_pMdDevContext->m_DeviceStarted = TRUE; + + break; + } /* specific Control device initialization */ + + } /* device-specific init */ + + /* Return the new created extension */ + *pio_ppDevContext = l_pMdDevContext; + + return l_Status; + +} /* MdCreateDevContext */ + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdRemoveDevContext ( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +/*++ + +Routine Description: + This routine releases the device context resources and remove the device + + +Arguments: + + pi_pMdDevContext........ Pointer to the device context. + +Return Value: + + STATUS_SUCCESS if the device extension was created correctly, otherwise + an error indicating the reason for failure. + +--*/ +{ /* MdRemoveDevContext */ + + + if (pi_pMdDevContext != NULL) + { + /* close direct PCI interface */ + if (pi_pMdDevContext->m_eDevType != MD_DEV_IX_CTRL) + PciIfClose( &pi_pMdDevContext->m_Interface ); + + /* Remove the device */ + MdDelDevice( pi_pMdDevContext ); + + /* remove device context */ + /* NO NEED, BECAUSE IT WAS RELEASED ON IoDeleteDevice() ! + MdExFreePool( pi_pMdDevContext ); + */ + } + + return STATUS_SUCCESS; + +} /* MdRemoveDevContext */ + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdDevInit( + IN PMD_DRV_CONTEXT_T pi_pDrvContext, + IN MD_DEV_IX_E pi_eDevType, + IN PUNICODE_STRING pi_pNtDeviceName, + IN PUNICODE_STRING pi_pWin32DeviceName, + IN OUT PVOID* pio_ppDevContext + ) +/*++ + +Routine Description: + This routine creates the device and connect to the driver context + + +Arguments: + + pi_pDrvContext.......... Driver context + pi_eDevType............. Device type. + pi_pNtDeviceName........ The \Device\????\ name + pi_pWin32DeviceName..... The \DosDevices\????\ name + pio_ppDevContext........ The device extension ofthe created object + +Return Value: + + STATUS_SUCCESS on success, otherwise an error indicating the reason for failure. + +--*/ +{ /* MdDevInit */ + + /* Local status variable */ + NTSTATUS l_Status; + /* Default debug ON/OFF flag */ + ULONG l_nDfltDebugPrintLevel = FALSE ; + /* Device context */ + PMD_DEV_CONTEXT_T l_pMdDevContext; + + MdKdPrint( DBGLVL_MINIMUM ,("(MdDevInit) init device '%ws'\n", pi_pWin32DeviceName->Buffer )); + + /* + * Create the device and the device context + */ + l_Status = MdCreateDevContext( + pi_pDrvContext->m_pDrvObject, + pi_eDevType, + pi_pNtDeviceName, + pi_pWin32DeviceName, + &l_pMdDevContext + ); + + /* Check creation status */ + if ( NT_SUCCESS(l_Status) ) + { /* Loaded OK */ + PRE_LOCK_IT; + + MdKdPrint( DBGLVL_DEFAULT ,("(MdDeviceInit) Device successfuly initialized\n")); + + /* Store the device context */ + *pio_ppDevContext = l_pMdDevContext; + + /* Add the device context to the driver list */ + LOCK_IT( &pi_pDrvContext->m_SpinLock); + InsertTailList( &pi_pDrvContext->m_DevQue, &l_pMdDevContext->m_Link ); + UNLOCK_IT( &pi_pDrvContext->m_SpinLock); + + /* connect device to driver */ + l_pMdDevContext->m_pDrvContext = pi_pDrvContext; + + /* Update number of device instances */ + InterlockedIncrement( &pi_pDrvContext->m_uDevNo ); + + // increment the number of cards + { + int l_ix = (int)l_pMdDevContext->m_eDevType; + PMD_HAL_DEV_PARAMS_T l_pDev = &g_DevParams[l_ix]; + if (l_pDev->m_fExpose) + InterlockedIncrement( &g_pDrvContext->m_uCardNo ); + } + } /* Loaded OK */ + + return l_Status ; + +} /* MdDevInit */ + +/*------------------------------------------------------------------------------------------------------*/ + +void +ReleasePendingRequests( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +/*++ + +Routine Description: + + Release pending requests of the Control Device + +Arguments: + + pi_pMdDevContext - pointer to the Control Device + +Return Value: + +--*/ +{ /* ReleasePendingRequests */ + MdKdPrint( DBGLVL_MINIMUM ,("(ReleasePendingRequests) Enter \n" )); +} /* ReleasePendingRequests */ + + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdDevDeInit( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +/*++ + +Routine Description: + This routine DeInitialize the device setting and context + + +Arguments: + + pi_pMdDevContext........The device context +Return Value: + + None. + +--*/ +{ /* MdDevDeInit */ + /* Current device object */ + PDEVICE_OBJECT l_pFdo ; + /* The Driver context */ + PMD_DRV_CONTEXT_T l_pMdDrvContext; + // prepare lock + PRE_LOCK_IT; + + MdKdPrint( DBGLVL_MINIMUM ,("(MdDevDeInit) Unloading device '%ws' \n", pi_pMdDevContext->m_usDosDeviceName.Buffer)); + + /* Parameter validation*/ + if ( pi_pMdDevContext == NULL ) + { /* no device context */ + return STATUS_UNSUCCESSFUL; + } /* no device context */ + + /* Get the first device object */ + l_pFdo = pi_pMdDevContext->m_pFdo; + + /* Check if driver has any device */ + if ( l_pFdo == NULL ) + { /* no NT device object */ + return STATUS_UNSUCCESSFUL; + } /* no NT device object */ + + /* get driver context */ + l_pMdDrvContext = pi_pMdDevContext->m_pDrvContext; + + /* + * Remove the device from the Driver device list + */ + + // acquire access to the critical section + LOCK_IT( &pi_pMdDevContext->m_SpinLock ); + + // disconnect the global context with device one + RemoveEntryList( &pi_pMdDevContext->m_Link ); + + // release access to the critical section + UNLOCK_IT( &pi_pMdDevContext->m_SpinLock ); + + // decrement the number of created devices + LOCK_IT( &l_pMdDrvContext->m_SpinLock); + if ( l_pMdDrvContext->m_uDevNo > 0) + InterlockedDecrement( &l_pMdDrvContext->m_uDevNo ); + UNLOCK_IT( &l_pMdDrvContext->m_SpinLock); + + // decrement the number of cards + { + int l_ix = (int)pi_pMdDevContext->m_eDevType; + PMD_HAL_DEV_PARAMS_T l_pDev = &g_DevParams[l_ix]; + if (l_pDev->m_fExpose && l_pMdDrvContext->m_uCardNo > 0) + InterlockedDecrement( &l_pMdDrvContext->m_uCardNo ); + } + /* Free the Device context */ + return MdRemoveDevContext( pi_pMdDevContext ); + +} /* MdDevDeInit */ + +NTSTATUS +MdGetDevLocation( + IN PDEVICE_OBJECT pi_pPdo, + IN ULONG * pi_pBus, + IN ULONG * pi_pSlot, + IN ULONG * pi_pFunction + ) +{ + ULONG l_BusNumber, l_DevNumber, l_Function, l_ResultLength = 0; + WCHAR l_Buffer[40], *l_pEnd, *l_pBuf = l_Buffer, *l_pBufEnd = l_Buffer + sizeof(l_Buffer); + NTSTATUS l_Status; + UNICODE_STRING l_UnicodeNumber; + + /* prepare */ + l_ResultLength = 0; + RtlZeroMemory( l_Buffer, sizeof(l_Buffer) ); + + /* Get the device number */ + l_Status = IoGetDeviceProperty(pi_pPdo, + DevicePropertyLocationInformation, sizeof(l_Buffer), &l_Buffer, &l_ResultLength); + + /* Verify if the function was successful */ + if ( !NT_SUCCESS(l_Status) || !l_ResultLength ) { + MdKdPrint( DBGLVL_DEFAULT ,("(MdInitPciCfgCard) Unable to get device number: Status 0x%x, ResultSize %d \n", + l_Status, l_ResultLength )); + goto exit; + } + + // ALL THE BELOW CRAP WE DO INSTEAD OF + // sscanf(l_Buffer, "PCI bus %d, device %d, function %d", &l_BusNumber, &l_DevNumber, &l_Function ); + + /* take bus number */ + l_pBuf = WcharFindChar( l_pBuf, l_pBufEnd, L'0', L'9' ); + if (l_pBuf == NULL) goto err; + l_pEnd = WcharFindChar( l_pBuf, l_pBufEnd, L',', L',' ); + if (l_pEnd == NULL) goto err; + l_UnicodeNumber.Length = l_UnicodeNumber.MaximumLength = (USHORT)((PCHAR)l_pEnd - (PCHAR)l_pBuf); + l_UnicodeNumber.Buffer = l_pBuf; l_pBuf = l_pEnd; + RtlUnicodeStringToInteger( &l_UnicodeNumber, 10, &l_BusNumber); + + /* take slot number */ + l_pBuf = WcharFindChar( l_pBuf, l_pBufEnd, L'0', L'9' ); + if (l_pBuf == NULL) goto err; + l_pEnd = WcharFindChar( l_pBuf, l_pBufEnd, L',', L',' ); + if (l_pEnd == NULL) goto err; + l_UnicodeNumber.Length = l_UnicodeNumber.MaximumLength = (USHORT)((PCHAR)l_pEnd - (PCHAR)l_pBuf); + l_UnicodeNumber.Buffer = l_pBuf; l_pBuf = l_pEnd; + RtlUnicodeStringToInteger( &l_UnicodeNumber, 10, &l_DevNumber); + + /* take function number */ + *(l_Buffer + (l_ResultLength>>1)) = 0; /* set end of string */ + l_pBuf = WcharFindChar( l_pBuf, l_pBufEnd, L'0', L'9' ); + if (l_pBuf == NULL) goto err; + l_pEnd = WcharFindChar( l_pBuf, l_pBufEnd, 0, 0 ); + if (l_pEnd == NULL) goto err; + l_UnicodeNumber.Length = l_UnicodeNumber.MaximumLength = (USHORT)((PCHAR)l_pEnd - (PCHAR)l_pBuf); + l_UnicodeNumber.Buffer = l_pBuf; l_pBuf = l_pEnd; + RtlUnicodeStringToInteger( &l_UnicodeNumber, 10, &l_Function); + + /* return the results */ + *pi_pBus = l_BusNumber; + *pi_pSlot = l_DevNumber; + *pi_pFunction = l_Function; + + goto exit; + +err: + l_Status = STATUS_UNSUCCESSFUL; +exit: + return l_Status; +} + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +MdInitPciCfgCard( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PCM_RESOURCE_LIST pi_pRawResources, + IN PCM_RESOURCE_LIST pi_pTranslatedResources + ) +/*++ + +Routine Description: + + This routine prepares the Driver to the work with a PCI card: + - gets card resources from IRP; + - store them on the device context; + - translate physical address into virtual; + - connect interrupt; + +Arguments: + + pi_pMdDevContext....... My device context + pi_pRawResources....... Card raw resources + pi_pTranslatedResources Card translated resources + +Return Value: + + None. + +--*/ +{ /* MdInitPciCfgCard */ + + /* full resource descriptor */ + PCM_FULL_RESOURCE_DESCRIPTOR l_pFullResDesc; + /* partial resource list */ + PCM_PARTIAL_RESOURCE_LIST l_pPartResList; + /* partial resource descriptor */ + PCM_PARTIAL_RESOURCE_DESCRIPTOR l_pPartResDesc; + /* number of resources */ + ULONG l_nResCnt; + /* current resource number */ + ULONG l_nCurResNum; + /* temporary address */ + PUCHAR l_pPortAddr = NULL; + /* status */ + NTSTATUS l_Status; + // Device IX (in PCI configuration) + MD_DEV_IX_E l_DevIx = pi_pMdDevContext->m_eDevType; + // Device BD + PMD_HAL_DEV_PARAMS_T l_pDev = &g_DevParams[l_DevIx]; + // current BAR descriptor + PMD_BAR_T l_pBar; + // for Debug + char * l_BarName; + + MdKdPrint( DBGLVL_DEFAULT ,("(MdInitPciCfgCard) Enter \n" )); + + /* zero the fields */ +#define ZEROF(field) pi_pMdDevContext->field = 0; + ZEROF(m_BusNumber); + ZEROF(m_DevNumber); + ZEROF(m_Function); + ZEROF(m_ulIntVector); + ZEROF(m_ulIntLevel); + ZEROF(m_Affinity); + ZEROF(m_fIntShared); + ZEROF(m_IntMode); + RtlZeroMemory( (void*)&pi_pMdDevContext->m_Cr, sizeof(MD_BAR_T)); + RtlZeroMemory( (void*)&pi_pMdDevContext->m_Uar, sizeof(MD_BAR_T)); + RtlZeroMemory( (void*)&pi_pMdDevContext->m_Ddr, sizeof(MD_BAR_T)); + + if (l_DevIx == MD_DEV_IX_TAVOR_SD) + { /* get BARs via PCI configuration header */ + + l_Status = ReadBars( pi_pMdDevContext ); + if (!NT_SUCCESS(l_Status)) + return l_Status; + + } /* get BARs via PCI configuration header */ + + else + { /* get resources from resource descriptors */ + + /* validate parameters */ + if (pi_pTranslatedResources == NULL) { + MdKdPrint( DBGLVL_LOW ,("(MdInitPciCfgCard) No resources ! \n" )); + return STATUS_SUCCESS; + } + + /* prepare for getting resources */ + l_pFullResDesc = &pi_pTranslatedResources->List[0]; + l_pPartResList = &l_pFullResDesc->PartialResourceList; + l_pPartResDesc = &l_pPartResList->PartialDescriptors[0]; + l_nResCnt = l_pPartResList->Count; + + /* store other parameters */ + for (l_nCurResNum = 0; l_nCurResNum < l_nResCnt; ++l_nCurResNum, ++l_pPartResDesc) + { + switch (l_pPartResDesc->Type) + { + case CmResourceTypeBusNumber: + /* Store IB card bus number */ + //MDASSERT( pi_pMdDevContext->m_ulBusNumber == l_pPartResDesc->u.BusNumber.Start ); + pi_pMdDevContext->m_BusNumber = l_pPartResDesc->u.BusNumber.Start; + break; + + case CmResourceTypeInterrupt: + pi_pMdDevContext->m_ulIntVector = l_pPartResDesc->u.Interrupt.Vector; + pi_pMdDevContext->m_ulIntLevel = (KIRQL)l_pPartResDesc->u.Interrupt.Level; + pi_pMdDevContext->m_Affinity = (KAFFINITY)l_pPartResDesc->u.Interrupt.Affinity; + pi_pMdDevContext->m_fIntShared = l_pPartResDesc->ShareDisposition == CmResourceShareShared; + pi_pMdDevContext->m_IntMode = (l_pPartResDesc->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? Latched : LevelSensitive; + MdKdPrint( DBGLVL_LOW ,("(MdInitPciCfgCard) Dev %d Interrupt: Vector %d, Level %d, Affinity %d, Shared %d, Mode %d\n", + l_pDev->m_DevId, pi_pMdDevContext->m_ulIntVector, pi_pMdDevContext->m_ulIntLevel, + pi_pMdDevContext->m_Affinity, pi_pMdDevContext->m_fIntShared, pi_pMdDevContext->m_IntMode )); + break; + + case CmResourceTypeMemory: + { + ULONG l_BarNum; + LARGE_INTEGER l_Offset = { 0,0 }; + + /* decide, what BAR is it about */ + if (l_pDev->m_SizeBar0 >= l_pPartResDesc->u.Memory.Length) { + l_pBar = &pi_pMdDevContext->m_Cr; l_BarName = "CR"; l_BarNum = 0; } + else + if (l_pDev->m_SizeBar1 >= l_pPartResDesc->u.Memory.Length) { + l_pBar = &pi_pMdDevContext->m_Uar; l_BarName = "UAR"; l_BarNum = 1; } + else { + l_pBar = &pi_pMdDevContext->m_Ddr; l_BarName = "DDR"; l_BarNum = 2; } + + /* store BAR parameters */ + l_pBar->m_MemPhysAddr = l_pPartResDesc->u.Memory.Start; + l_pBar->m_ulMemSize = l_pPartResDesc->u.Memory.Length; + l_pBar->m_usMemFlags = l_pPartResDesc->Flags; + l_pBar->m_ulKernelSize = l_pBar->m_ulMemSize; + l_pBar->m_ulKernelOffset = 0; + + /* recalculate boundaries of mapped mempry for DDR */ + if (l_BarNum == 2 && pi_pMdDevContext->m_ulDdrMapSize != -1) { + l_pBar->m_ulKernelSize = pi_pMdDevContext->m_ulDdrMapSize; + l_pBar->m_ulKernelOffset = pi_pMdDevContext->m_ulDdrMapOffset; + l_Offset.LowPart = pi_pMdDevContext->m_ulDdrMapOffset; + } /* for DDR - map some subset of memory */ + + /* map physical address into virtual kernel one */ + if (l_pBar->m_ulKernelSize) { + l_Offset.QuadPart += l_pBar->m_MemPhysAddr.QuadPart; + l_pBar->m_pKernelAddr = (PUCHAR) MmMapIoSpace( + l_Offset, + l_pBar->m_ulKernelSize, MmNonCached); + } + if (!l_pBar->m_pKernelAddr) return STATUS_NO_MEMORY; + + /* debug print */ + MdKdPrint( DBGLVL_LOW ,("(MdInitPciCfgCard) Dev %d %s: Phys 0x%I64x Size 0x%x, Virt 0x%x Size 0x%x \n", + l_pDev->m_DevId, l_BarName, l_pBar->m_MemPhysAddr, l_pBar->m_ulMemSize, + l_pBar->m_pKernelAddr, l_pBar->m_ulKernelSize )); + } + break; + + default: + MdKdPrint( DBGLVL_DEFAULT ,("(MdInitPciCfgCard) Unsupported resource type 0x%x \n", l_pPartResDesc->Type )); + break; + } + } + + } /* get resources from resource descriptors */ + + // + // get the device location information + // + { + ULONG l_BusNumber, l_DevNumber, l_Function; + + l_Status = MdGetDevLocation( pi_pMdDevContext->m_pPdo, &l_BusNumber, &l_DevNumber, &l_Function ); + + if ( !NT_SUCCESS(l_Status) ) + { // fill default values + l_BusNumber = g_pDrvContext->m_uDevNo; + l_DevNumber = g_pDrvContext->m_uDevNo; + l_Function = 0; + } + + /* store the slot number */ + pi_pMdDevContext->m_BusNumber = l_BusNumber; + pi_pMdDevContext->m_DevNumber = l_DevNumber; + pi_pMdDevContext->m_Function = l_Function; + + MdKdPrint( DBGLVL_LOW ,("(MdInitPciCfgCard) Dev %d location is %d:%d:%d) \n", + l_pDev->m_DevId, l_BusNumber, l_DevNumber, l_Function )); + } + + + return STATUS_SUCCESS; + +} /* MdInitPciCfgCard */ + +/*------------------------------------------------------------------------------------------------------*/ + +VOID +MdDeInitPciCfgCard( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ) +/*++ + +Routine Description: + + This routine releases OS resources, allocaetd for the work with a PCI card: + - disconnect the interrupt; + - unmap virtual address of the CR-space; + + +Arguments: + + pi_pMdDevContext....... My device context + +Return Value: + + None. + +--*/ +{ /* MdDeInitPciCfgCard */ + + // current BAR descriptor + PMD_BAR_T l_pBar; + // CR space address + PUCHAR l_pAddr; + + // unmap CR-space + l_pBar = &pi_pMdDevContext->m_Cr; + l_pAddr = l_pBar->m_pKernelAddr; + l_pBar->m_pKernelAddr = NULL; + if (l_pAddr) + MmUnmapIoSpace(l_pAddr, l_pBar->m_ulKernelSize); + + // unmap UAR-space + l_pBar = &pi_pMdDevContext->m_Uar; + l_pAddr = l_pBar->m_pKernelAddr; + l_pBar->m_pKernelAddr = NULL; + if (l_pAddr) + MmUnmapIoSpace(l_pAddr, l_pBar->m_ulKernelSize); + + // unmap DDR-space + l_pBar = &pi_pMdDevContext->m_Ddr; + l_pAddr = l_pBar->m_pKernelAddr; + l_pBar->m_pKernelAddr = NULL; + if (l_pAddr) + MmUnmapIoSpace(l_pAddr, l_pBar->m_ulKernelSize); + +} /* MdDeInitPciCfgCard */ + +/*------------------------------------------------------------------------------------------------------*/ + + +/*------------------------------------------------------------------------------------------------------*/ + +PCHAR +WcharToAscii( + OUT PUCHAR pi_TargetString, + IN const USHORT * pi_SourceString, + IN ULONG pi_Size + ) +/*++ + +Routine Description: + Converts wide-character string into ASCII + +Arguments: + + pi_TargetString...... result string + pi_SourceString...... source string + pi_Size.............. size of the source string + +Return Value: + + The result stringbytes. + +--*/ +{ /* WcharToAscii */ + int i, size = (pi_Size + 1) >> 1; + PCHAR l_pResult = pi_TargetString; + + for (i=0; i= pi_FromPattern && *l_pResult <= pi_ToPattern) + return l_pResult; + l_pResult++; + } + + return NULL; + +} /* WcharFindChar */ + +/*------------------------------------------------------------------------------------------------------*/ + +USHORT +AsciiToUnicode( + PUNICODE_STRING pi_puTargetString, + PUCHAR pi_szFormat, + ... + ) +/*++ + +Routine Description: + Writes a formatted ( printf like ) string to into a uniocde string. + +Arguments: + + pi_nDbgLogLevel...... Level of debugging log. + pi_szFormat.......... The format of the log. + +Return Value: + + The formatted string length in bytes. + +--*/ +{ /* AsciiToUnicode */ + + /* Auxilary scratch buffer */ + static UCHAR s_vScratchBuff[1024]; + + /* Variable argument list */ + va_list l_Argptr; + /* Length of the formated string in bytes */ + int l_nStrLenBytes ; + /* Char index */ + USHORT l_nCharNo ; + /* Maximum lemgth */ + USHORT l_nMaxLength; + + /* Init the variable argument list */ + va_start(l_Argptr, pi_szFormat); + + /* Build the formatted string */ + l_nStrLenBytes = vsprintf(&s_vScratchBuff[0] , pi_szFormat , l_Argptr); + + /* Check if we need to allocate buffer */ + if ( pi_puTargetString->Buffer == NULL ) + { /* Need to allocate buffer */ + + /* Allocate the UNICODE string buffer */ + pi_puTargetString->Buffer = + (PWCHAR)MdExAllocatePool(NonPagedPool,(l_nStrLenBytes+1)*sizeof(WCHAR)); + + /* If allocation failed return */ + if ( pi_puTargetString->Buffer == NULL ) return 0; + + /* Set the UNICODE string new parameters */ + pi_puTargetString->MaximumLength = (USHORT)((l_nStrLenBytes+1)*sizeof(WCHAR)) ; + + } /* Need to allocate buffer */ + + /* Reset the string actual length */ + pi_puTargetString->Length = 0; + + /* Calc max length */ + l_nMaxLength = MT_MIN((pi_puTargetString->MaximumLength/sizeof(WCHAR) - 1),(USHORT)l_nStrLenBytes); + + /* Convert to UNICODE */ + for ( l_nCharNo=0; l_nCharNoBuffer[ l_nCharNo ] = (WCHAR)s_vScratchBuff[l_nCharNo] ; + + /* Update actual length */ + pi_puTargetString->Length += sizeof(WCHAR) ; + + } /* Convert a CHAR to WCHAR */ + + /* NULL terminate */ + pi_puTargetString->Buffer[ l_nCharNo ] = (WCHAR)'\0'; + + /* Term the variable argument list */ + va_end(l_Argptr); + + return pi_puTargetString->Length ; + +} /* AsciiToUnicode */ + +/*------------------------------------------------------------------------------------------------------*/ + +BOOLEAN +MdCanAcceptIoRequests( + IN PDEVICE_OBJECT DeviceObject + ) +/*++ + +Routine Description: + + Check device extension status flags; + + Can't accept a new io request if device: + 1) is removed, + 2) has never been started, + 3) is stopped, + 4) has a remove request pending, or + 5) has a stop device pending + + +Arguments: + + DeviceObject - pointer to the device object for this instance of the 82930 + device. + + +Return Value: + + return TRUE if can accept new io requests, else FALSE + +--*/ +{ + PMD_DEV_CONTEXT_T l_pMdDevContext = (PMD_DEV_CONTEXT_T)DeviceObject->DeviceExtension; + BOOLEAN l_fCan = FALSE; + + + //flag set when processing IRP_MN_REMOVE_DEVICE + if ( !l_pMdDevContext->m_DeviceRemoved && + // device must be started( enabled ) + l_pMdDevContext->m_DeviceStarted && + // flag set when driver has answered success to IRP_MN_QUERY_REMOVE_DEVICE + !l_pMdDevContext->m_RemoveDeviceRequested && + // flag set when driver has answered success to IRP_MN_QUERY_STOP_DEVICE + !l_pMdDevContext->m_StopDeviceRequested && + // control device (MDCTL) marked for deleting) + !l_pMdDevContext->m_fDeletePending) { + l_fCan = TRUE; + } + + MdKdPrintCond( DBGLVL_MAXIMUM, !l_fCan, ("**** FALSE return from MdCanAcceptIoRequests()!\n")); + + return l_fCan; +} + +/*------------------------------------------------------------------------------------------------------*/ + +BOOLEAN +MdCreateDeviceNames( + IN PCHAR pi_pDevName, + OUT PUNICODE_STRING po_pNtName, + OUT PUNICODE_STRING po_pDosName + ) +/*++ + +Routine Description: + + Creates Nt and Dos names of the device; + Convert them into Unicode format and put the results into given Unicode strings + Allocates buffers for the Unicode strings from non-paged pool + +Arguments: + + pi_pDevName - ASCII name of the device without path. + po_pNtName - pointer to Unicode string descriptor for NT device name + po_pDosName - pointer to Unicode string descriptor for DOS device name + + +Return Value: + + return TRUE if succeeded to create strings, else FALSE + +--*/ +{ /* MdCreateDeviceNames */ + + /* buffer for ASCII device name */ + char l_DevName[MD_MAX_DEV_NAME_LEN + 1]; + /* Unicode string length */ + int l_nUnicodeNameLen; + + /* build NT name */ + strcpy( l_DevName, "\\Device\\" ); + strcat( l_DevName, pi_pDevName ); + + /* get the length */ + l_nUnicodeNameLen = (int)((strlen(l_DevName) + 1) * sizeof(WCHAR)); + + /* + * Init the NT device name + */ + /* Allocate buffers for the NT device name */ + po_pNtName->Buffer = MdExAllocatePool(NonPagedPool, l_nUnicodeNameLen); + + /* Verify allocation */ + if ( po_pNtName->Buffer == NULL ) + { /* Allocation failed */ + + return FALSE; + + } /* Allocation failed */ + + /* store lengths */ + po_pNtName->Length = (USHORT)(l_nUnicodeNameLen - sizeof(WCHAR)); + po_pNtName->MaximumLength = (unsigned short)l_nUnicodeNameLen; + + /* Build Unicode NT device name */ + AsciiToUnicode( + po_pNtName, + l_DevName + ); + + /* build DOS name */ + strcpy( l_DevName, "\\DosDevices\\" ); + strcat( l_DevName, pi_pDevName ); + + /* get the length */ + l_nUnicodeNameLen = (int)((strlen(l_DevName) + 1) * sizeof(WCHAR)); + + /* Allocate buffers for the DOS device name */ + po_pDosName->Buffer = MdExAllocatePool(NonPagedPool, l_nUnicodeNameLen); + + /* Verify allocation */ + if ( po_pDosName->Buffer == NULL ) + { /* Allocation failed */ + + /* Free the NT device name path buffer */ + MdExFreePool(po_pNtName->Buffer); + + return FALSE; + + } /* Allocation failed */ + + /* store lengths */ + po_pDosName->Length = (USHORT)(l_nUnicodeNameLen - sizeof(WCHAR)); + po_pDosName->MaximumLength = (unsigned short)l_nUnicodeNameLen; + + /* Build the NT device name */ + AsciiToUnicode( + po_pDosName, + l_DevName + ); + + return TRUE; + +} /* MdCreateDeviceNames */ + +/*------------------------------------------------------------------------------------------------------*/ + +/********************************** + * Device name DB handling * + **********************************/ +int AddDevNameToDb(char *pi_DevName) + { + ULONG l_Size = (ULONG)strlen(pi_DevName) + 1; + ULONG l_DbSize = g_pDrvContext->m_DevNamesDbSize; + + if ( l_DbSize + l_Size <= sizeof(g_pDrvContext->m_DevNamesDb)) + { + RtlCopyMemory( &g_pDrvContext->m_DevNamesDb[l_DbSize], pi_DevName, l_Size); + g_pDrvContext->m_DevNamesDbCnt++; + g_pDrvContext->m_DevNamesDbSize += l_Size; + return 0; + } + else + { + return 1; + } + } + +/*------------------------------------------------------------------------------------------------------*/ +/*------------------------------------------------------------------------------------------------------*/ +/*------------------------------------------------------------------------------------------------------*/ +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS CreateOneDevice( + IN int pi_DevIx, /* index device BD info */ + UNICODE_STRING * pi_pusNtDeviceName, /* NT name */ + UNICODE_STRING * pi_pusDosDeviceName, /* Win32 name */ + OUT PMD_DEV_CONTEXT_T * pi_ppMdDevContext /* context of created device */ +) +{ + /* status */ + NTSTATUS l_Status = STATUS_SUCCESS; + /* our context to this device */ + PMD_DEV_CONTEXT_T l_pMdDevContext; + /* device object we create for the added device */ + PDEVICE_OBJECT l_pFdo = NULL; + /* dev info */ + PMD_HAL_DEV_PARAMS_T l_pDevInfo; /* device BD info */ + /* buffer for ASCII device name */ + char l_DevName[MD_MAX_DEV_NAME_LEN + 1]; + + /* create and keep the exposed the name */ + l_pDevInfo = &g_DevParams[pi_DevIx]; + if (l_pDevInfo->m_DevId == MD_DEV_ID_TAVOR || l_pDevInfo->m_DevId == MD_DEV_ID_ARBEL_TM) + sprintf( l_DevName, l_pDevInfo->m_ExFormat, g_pDrvContext->m_uCardNo ); + else + sprintf( l_DevName, l_pDevInfo->m_ExFormat, l_pDevInfo->m_DevId, g_pDrvContext->m_uCardNo ); + if (l_pDevInfo->m_fExpose) { + if (AddDevNameToDb(l_DevName)) + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* create Control Device names */ + /* !!! from now on work with ARBEL_TM as with TAVOR */ + //l_pDevInfo = (pi_DevIx == (int)MD_DEV_IX_ARBEL_TM) ? &g_DevParams[MD_DEV_IX_TAVOR] : &g_DevParams[pi_DevIx]; + l_pDevInfo = &g_DevParams[pi_DevIx]; + if (l_pDevInfo->m_DevId == MD_DEV_ID_TAVOR || l_pDevInfo->m_DevId == MD_DEV_ID_ARBEL_TM) + sprintf( l_DevName, l_pDevInfo->m_Format, g_pDrvContext->m_uCardNo ); + else + sprintf( l_DevName, l_pDevInfo->m_Format, l_pDevInfo->m_DevId, g_pDrvContext->m_uCardNo ); + + if (!MdCreateDeviceNames(l_DevName, pi_pusNtDeviceName, pi_pusDosDeviceName)) + return STATUS_INSUFFICIENT_RESOURCES ; + + /* build NT name */ + MdKdPrint( DBGLVL_LOW,("(CreateOneDevice) Generated device name %s \n", l_DevName)); + + /* create functional device object (FDO) */ + l_Status = MdDevInit( g_pDrvContext, l_pDevInfo->m_DevIx, pi_pusNtDeviceName, pi_pusDosDeviceName, &l_pMdDevContext); + + if (!NT_SUCCESS(l_Status)) + { /* device creation failed */ + + /* Free the NT device name path buffer */ + if (pi_pusNtDeviceName->Buffer) + MdExFreePool(pi_pusNtDeviceName->Buffer); + + /* Free the Dos device name path buffer */ + if (pi_pusDosDeviceName->Buffer) + MdExFreePool(pi_pusDosDeviceName->Buffer); + + return l_Status; + } /* device creation failed */ + + /* save ASCII name */ + strcpy ( l_pMdDevContext->m_AsciiDevName, l_DevName ); + + /* get FDO handle */ + l_pFdo = l_pMdDevContext->m_pFdo; + + //Set this flag causes the driver to not receive a IRP_MN_STOP_DEVICE + //during suspend and also not get an IRP_MN_START_DEVICE during resume. + l_pFdo->Flags |= DO_POWER_PAGABLE; + + *pi_ppMdDevContext = l_pMdDevContext; + return STATUS_SUCCESS; +} + +/*------------------------------------------------------------------------------------------------------*/ + +BOOLEAN +MdGetRegistryDword( + IN PWCHAR RegPath, + IN PWCHAR ValueName, + IN OUT PULONG Value + ) + +/*++ + +Routine Description: + + Obtain a Dword value from the registry + + +Arguments: + + RegPath -- supplies absolute registry path + ValueName - Supplies the Value Name. + Value - receives the REG_DWORD value. + +Return Value: + + TRUE if successfull, FALSE on fail. + +--*/ + +{ + UNICODE_STRING path; + RTL_QUERY_REGISTRY_TABLE paramTable[2]; //zero'd second table terminates parms + ULONG lDef = *Value; // default + NTSTATUS status; + BOOLEAN fres; + WCHAR wbuf[ MAXIMUM_FILENAME_LENGTH ]; + + MdKdPrint( DBGLVL_HIGH,("Enter MdGetRegistryDword() RegPath = %ws\n ValueName =%ws\n", RegPath, ValueName)); + path.Length = 0; + path.MaximumLength = MAXIMUM_FILENAME_LENGTH * sizeof( WCHAR ); // MAXIMUM_FILENAME_LENGTH defined in wdm.h + path.Buffer = wbuf; + + + RtlZeroMemory(path.Buffer, path.MaximumLength); + RtlMoveMemory(path.Buffer, RegPath, wcslen( RegPath) * sizeof( WCHAR )); + + MdKdPrint( DBGLVL_HIGH,("MdGetRegistryDword() path= %ws\n", path.Buffer )); + + RtlZeroMemory(paramTable, sizeof(paramTable)); + + paramTable[0].Flags = RTL_QUERY_REGISTRY_DIRECT; + + paramTable[0].Name = ValueName; + + paramTable[0].EntryContext = Value; + paramTable[0].DefaultType = REG_DWORD; + paramTable[0].DefaultData = &lDef; + paramTable[0].DefaultLength = sizeof(ULONG); + + + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE | RTL_REGISTRY_OPTIONAL, + path.Buffer, paramTable, NULL, NULL); + + if (NT_SUCCESS(status)) { + MdKdPrint( DBGLVL_MEDIUM,("Exit MdGetRegistryDWord() SUCCESS, value = decimal %d 0x%x\n", *Value, *Value)); + fres = TRUE; + + } else { + + MdKdPrintCond( DBGLVL_MEDIUM, (status == STATUS_INVALID_PARAMETER) ,("MdGetRegistryDWord() STATUS_INVALID_PARAMETER\n")); + + MdKdPrintCond( DBGLVL_MEDIUM, (status == STATUS_OBJECT_NAME_NOT_FOUND) ,("MdGetRegistryDWord() STATUS_OBJECT_NAME_NOT_FOUND\n")); + + fres = FALSE; + + } + + return fres; +} + +/*------------------------------------------------------------------------------------------------------*/ + +BOOLEAN +MdGetRegistryString( + IN PWCHAR RegPath, + IN PWCHAR ValueName, + IN PUNICODE_STRING DfltValue, + IN OUT PUNICODE_STRING Value + ) + +/*++ + +Routine Description: + + Obtain a string value from the registry + + +Arguments: + + RegPath -- supplies absolute registry path + ValueName - Supplies the Value Name. + Value - receives the REG_DWORD value. + +Return Value: + + TRUE if successfull, FALSE on fail. + +--*/ + +{ + UNICODE_STRING path; + RTL_QUERY_REGISTRY_TABLE paramTable[2]; //zero'd second table terminates parms + NTSTATUS status; + BOOLEAN fres; + WCHAR wbuf[ MAXIMUM_FILENAME_LENGTH ]; + + MdKdPrint( DBGLVL_HIGH,("MdGetRegistryString(Enter) RegPath = %ws\n ValueName =%ws\n", RegPath, ValueName)); + path.Length = 0; + path.MaximumLength = MAXIMUM_FILENAME_LENGTH * sizeof( WCHAR ); // MAXIMUM_FILENAME_LENGTH defined in wdm.h + path.Buffer = wbuf; + + + RtlZeroMemory(path.Buffer, path.MaximumLength); + RtlMoveMemory(path.Buffer, RegPath, wcslen( RegPath) * sizeof( WCHAR )); + + MdKdPrint( DBGLVL_HIGH,("MdGetRegistryString() path= %ws\n", path.Buffer )); + + RtlZeroMemory(paramTable, sizeof(paramTable)); + + paramTable[0].Flags = RTL_QUERY_REGISTRY_DIRECT; + paramTable[0].Name = ValueName; + paramTable[0].EntryContext = Value; + paramTable[0].DefaultType = REG_SZ; + paramTable[0].DefaultData = DfltValue; + paramTable[0].DefaultLength = DfltValue->MaximumLength; + + + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE | RTL_REGISTRY_OPTIONAL, + path.Buffer, ¶mTable[0], NULL, NULL); + + if (NT_SUCCESS(status)) { + MdKdPrint( DBGLVL_MEDIUM,("MdGetRegistryString(Exit) SUCCESS, value = %ws \n", Value)); + fres = TRUE; + + } else { + + MdKdPrintCond( DBGLVL_MEDIUM, (status == STATUS_INVALID_PARAMETER) ,("MdGetRegistryString(Exit) STATUS_INVALID_PARAMETER\n")); + + MdKdPrintCond( DBGLVL_MEDIUM, (status == STATUS_OBJECT_NAME_NOT_FOUND) ,("MdGetRegistryString(Exit) STATUS_OBJECT_NAME_NOT_FOUND\n")); + + fres = FALSE; + + } + + return fres; +} + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS +RegistryMultiSzCallBack( + IN PWSTR ValueName, + IN ULONG ValueType, + IN PVOID ValueData, + IN ULONG ValueLength, + IN PVOID Context, + IN PVOID EntryContext + ) +/*++ + +Routine Description: + + This callback for a registry SZ or MULTI_SZ is called once for each + SZ in the value. It will attempt to match the data with the + UNICODE_STRING passed in as Context, and modify EntryContext if a + match is found. Written for ClasspCheckRegistryForMediaChangeCompletion + +Arguments: + + ValueName - name of the key that was opened + ValueType - type of data stored in the value (REG_SZ for this routine) + ValueData - data in the registry, in this case a wide string + ValueLength - length of the data including the terminating null + Context - unicode string to compare against ValueData + EntryContext - should be initialized to 0, will be set to 1 if match found + +Return Value: + + STATUS_SUCCESS + EntryContext will be 1 if found + +--*/ +{ + UNICODE_STRING uStr; + ANSI_STRING aStr; + + PAGED_CODE(); + UNREFERENCED_PARAMETER(ValueName); + UNREFERENCED_PARAMETER(ValueLength); + UNREFERENCED_PARAMETER(EntryContext); + + // if the data is not a terminated string, exit + if (ValueType != REG_SZ) return STATUS_SUCCESS; + + // convert wide string to ASCII + RtlInitUnicodeString( &uStr, ValueData ); + RtlInitAnsiString( &aStr, NULL ); + RtlUnicodeStringToAnsiString( &aStr, &uStr, TRUE ); \ + + // call user routine + ((RegUserCallback_t)Context)(&aStr); + + // free resources + RtlFreeAnsiString( &aStr ); + + return STATUS_SUCCESS; +} + +BOOLEAN +MdGetRegistryMultiString( + IN PWCHAR RegPath, + IN PWCHAR ValueName, + IN PVOID DfltValue, + IN ULONG DfltValueSize, + IN RegUserCallback_t Func + ) + +/*++ + +Routine Description: + + Obtain a string value from the registry + + +Arguments: + + RegPath -- supplies absolute registry path + ValueName - Supplies the Value Name. + Value - receives the REG_DWORD value. + +Return Value: + + TRUE if successfull, FALSE on fail. + +--*/ + +{ + UNICODE_STRING path; + RTL_QUERY_REGISTRY_TABLE paramTable[2]; //zero'd second table terminates parms + NTSTATUS status; + BOOLEAN fres; + WCHAR wbuf[ MAXIMUM_FILENAME_LENGTH ]; + ULONG ulDummy; + + MdKdPrint( DBGLVL_HIGH,("MdGetRegistryString(Enter) RegPath = %ws\n ValueName =%ws\n", RegPath, ValueName)); + path.Length = 0; + path.MaximumLength = MAXIMUM_FILENAME_LENGTH * sizeof( WCHAR ); // MAXIMUM_FILENAME_LENGTH defined in wdm.h + path.Buffer = wbuf; + + + RtlZeroMemory(path.Buffer, path.MaximumLength); + RtlMoveMemory(path.Buffer, RegPath, wcslen( RegPath) * sizeof( WCHAR )); + + MdKdPrint( DBGLVL_HIGH,("MdGetRegistryString() path= %ws\n", path.Buffer )); + + RtlZeroMemory(paramTable, sizeof(paramTable)); + + paramTable[0].QueryRoutine = RegistryMultiSzCallBack; + paramTable[0].Flags = RTL_QUERY_REGISTRY_REQUIRED; + paramTable[0].Name = ValueName; + paramTable[0].EntryContext = &ulDummy; + paramTable[0].DefaultType = REG_MULTI_SZ; + paramTable[0].DefaultData = DfltValue; + paramTable[0].DefaultLength = DfltValueSize; + + + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE | RTL_REGISTRY_OPTIONAL, + path.Buffer, ¶mTable[0], Func, NULL); + + if (NT_SUCCESS(status)) { + fres = TRUE; + + } else { + + MdKdPrintCond( DBGLVL_MEDIUM, (status == STATUS_INVALID_PARAMETER) ,("MdGetRegistryString(Exit) STATUS_INVALID_PARAMETER\n")); + + MdKdPrintCond( DBGLVL_MEDIUM, (status == STATUS_OBJECT_NAME_NOT_FOUND) ,("MdGetRegistryString(Exit) STATUS_OBJECT_NAME_NOT_FOUND\n")); + + fres = FALSE; + + } + + return fres; +} + + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.h new file mode 100644 index 00000000..7122d376 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MdUtil.h @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MT_UTIL_H +#define _MT_UTIL_H + +/* registry path used for parameters global to all instances of the driver */ +#define MD_REGISTRY_PARAMETERS_PATH \ + L"\\REGISTRY\\Machine\\System\\CurrentControlSet\\SERVICES\\mt23108\\Parameters" + + +/* + * Copy a UNICODE_STRING + */ +#define UCopyString(pi_pDestination,pi_pSource,pi_fAllocateDest) \ +{\ + PUNICODE_STRING _D = (pi_pDestination);\ + PUNICODE_STRING _S = (pi_pSource);\ + _D->Length = _S->Length;\ + _D->MaximumLength = _D->Length + sizeof(WCHAR);\ + if( pi_fAllocateDest ) _D->Buffer = (PWCHAR)MdExAllocatePool(NonPagedPool, _D->MaximumLength);\ + if ( _D->Buffer != NULL ){\ + RtlMoveMemory(_D->Buffer , _S->Buffer, _S->Length);\ + _D->Buffer[_D->Length/sizeof(WCHAR)] = UNICODE_NULL;\ + }\ + else {\ + _D->Length = _D->MaximumLength = 0;\ + }\ +} + +/* Get argument from va_list pointer */ +#define GET(VAR, TYPE) do { \ + RtlMoveMemory(&(VAR), l_pTmp, sizeof(TYPE)); \ + l_pTmp += sizeof(TYPE); \ + } while(0) + +/* read a DWORD from registry */ +#define MD_GET_REGISTRY_DWORD(name,dflt_val,result) \ + { \ + ULONG l_uValue = dflt_val; \ + if (MdGetRegistryDword( MD_REGISTRY_PARAMETERS_PATH, name, &l_uValue )) \ + result = l_uValue; \ + else \ + result = dflt_val; \ + } + +typedef void (*RegUserCallback_t)(PANSI_STRING); + +#define MD_GET_REGISTRY_STR(name,buf,size) \ + { \ + WCHAR l_swStr[250]; \ + UNICODE_STRING l_uStr = {0,sizeof(l_swStr),&l_swStr[0]}; \ + ANSI_STRING l_aStr = {0,size,buf}; \ + RtlZeroMemory( l_swStr, sizeof(l_swStr)); \ + MdGetRegistryString( MD_REGISTRY_PARAMETERS_PATH, name, &l_uStr, &l_uStr ); \ + RtlUnicodeStringToAnsiString( &l_aStr, &l_uStr, FALSE ); \ + } + +#define MD_GET_REGISTRY_MULTI_STR(name, dflt_buf, dflt_size, Func) \ + MdGetRegistryMultiString( MD_REGISTRY_PARAMETERS_PATH, name, dflt_buf, dflt_size,Func ) + +BOOLEAN +MdGetRegistryDword( + IN PWCHAR RegPath, + IN PWCHAR ValueName, + IN OUT PULONG Value + ); + +BOOLEAN +MdGetRegistryString( + IN PWCHAR RegPath, + IN PWCHAR ValueName, + IN PUNICODE_STRING DfltValue, + IN OUT PUNICODE_STRING Value + ); + +BOOLEAN +MdGetRegistryMultiString( + IN PWCHAR RegPath, + IN PWCHAR ValueName, + IN PVOID DfltValue, + IN ULONG DfltValueSize, + IN RegUserCallback_t Func + ); + + +VOID +WriteEventLogEntry( + PVOID pi_pIoObject, + ULONG pi_ErrorCode, + ULONG pi_UniqueErrorCode, + ULONG pi_FinalStatus, + ULONG pi_nDataItems, + ... + ); + + +PCHAR +WcharToAscii( + OUT PUCHAR pi_TargetString, + IN const USHORT * pi_SourceString, + IN ULONG pi_Size + ); +/*++ + +Routine Description: + Converts wide-character string into ASCII + +Arguments: + + pi_TargetString...... result string + pi_SourceString...... source string + pi_Size.............. size of the source string + +Return Value: + + The result stringbytes. + +--*/ + +PWCHAR +WcharFindChar( + IN PWCHAR pi_BufStart, + IN PWCHAR pi_BufEnd, + IN WCHAR pi_FromPattern, + IN WCHAR pi_ToPattern + ); +/*++ + +Routine Description: + Converts wide-character string into ASCII + +Arguments: + + pi_BufStart.......... start of the source string + pi_BufEnd............ end of the source string + pi_FromPattern....... start of pattern range to find + pi_ToPattern......... end of pattern range to find + +Return Value: + + pointer to the first pattern found or NULL (when reached the end) + +--*/ +/*++ + +Routine Description: + Writes an event log entry to the event log. + +Arguments: + + pi_pIoObject......... The IO object ( driver object or device object ). + pi_ErrorCode......... The error code. + pi_UniqueErrorCode... A specific error code. + pi_FinalStatus....... The final status. + pi_nDataItems........ Number of data items. + . + . data items values + . + +Return Value: + + None . + +--*/ + +USHORT +AsciiToUnicode( + PUNICODE_STRING pi_puTargetString, + PUCHAR pi_szFormat, + ... + ); +/*++ + +Routine Description: + Writes a formatted ( printf like ) string to into a uniocde string. + +Arguments: + + pi_nDbgLogLevel...... Level of debugging log. + pi_szFormat.......... The format of the log. + +Return Value: + + The formatted string length in bytes. + +--*/ + + +NTSTATUS +MdDevInit( + IN PMD_DRV_CONTEXT_T pi_pDrvContext, + IN MD_DEV_ID_E pi_eDevType, + IN PUNICODE_STRING pi_pNtDeviceName, + IN PUNICODE_STRING pi_pWin32DeviceName, + IN OUT PVOID* pio_ppDevContext + ); +/*++ + +Routine Description: + This routine creates the device and connect to the driver context + + +Arguments: + + pi_pDrvContext.......... Driver context + pi_eDevType............. Device type. + pi_pNtDeviceName........ The \Device\????\ name + pi_pWin32DeviceName..... The \DosDevices\????\ name + pio_ppDevContext........ The device extension ofthe created object + +Return Value: + + STATUS_SUCCESS on success, otherwise an error indicating the reason for failure. + +--*/ + +NTSTATUS +MdDevDeInit( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ); +/*++ + +Routine Description: + This routine DeInitialize the device setting and context + + +Arguments: + + pi_pMdDevContext........The device context +Return Value: + + None. + +--*/ +BOOLEAN +MdCanAcceptIoRequests( + IN PDEVICE_OBJECT DeviceObject + ); +/*++ + +Routine Description: + + Check device extension status flags; + + Can't accept a new io request if device: + 1) is removed, + 2) has never been started, + 3) is stopped, + 4) has a remove request pending, or + 5) has a stop device pending + + +Arguments: + + DeviceObject - pointer to the device object for this instance of the 82930 + device. + + +Return Value: + + return TRUE if can accept new io requests, else FALSE + +--*/ + +NTSTATUS +MdInitPciCfgCard( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext, + IN PCM_RESOURCE_LIST pi_pRawResources, + IN PCM_RESOURCE_LIST pi_pTranslatedResources + ); +/*++ + +Routine Description: + + This routine prepares the Driver to the work with a PCI card: + - gets card resources from IRP; + - store them on the device context; + - translate physical address into virtual; + - connect interrupt; + +Arguments: + + pi_pMdDevContext....... My device context + pi_pRawResources....... Card raw resources + pi_pTranslatedResources Card translated resources + +Return Value: + + None. + +--*/ + +VOID +MdDeInitPciCfgCard( + IN PMD_DEV_CONTEXT_T pi_pMdDevContext + ); +/*++ + +Routine Description: + + This routine releases OS resources, allocaetd for the work with a PCI card: + - disconnect the interrupt; + - unmap virtual address of the CR-space; + + +Arguments: + + pi_pMdDevContext....... My device context + +Return Value: + + None. + +--*/ + +BOOLEAN +MdCreateDeviceNames( + IN PCHAR pi_pDevName, + OUT PUNICODE_STRING po_pNtName, + OUT PUNICODE_STRING po_pDosName + ); +/*++ + +Routine Description: + + Creates Nt and Dos names of the device; + Convert them into Unicode format and put the results into given Unicode strings + Allocates buffers for the Unicode strings from non-paged pool + +Arguments: + + pi_pDevName - ASCII name of the device without path. + po_pNtName - pointer to Unicode string descriptor for NT device name + po_pDosName - pointer to Unicode string descriptor for DOS device name + + +Return Value: + + return TRUE if succeeded to create strings, else FALSE + +--*/ + + + +/*------------------------------------------------------------------------------------------------------*/ + +NTSTATUS CreateOneDevice( + IN int pi_DevIx, /* index device BD info */ + UNICODE_STRING * pi_pusNtDeviceName, /* NT name */ + UNICODE_STRING * pi_pusDosDeviceName, /* Win32 name */ + OUT PMD_DEV_CONTEXT_T * pi_ppMdDevContext /* context of created device */ +); + +/*------------------------------------------------------------------------------------------------------*/ + +int AddDevNameToDb(char *pi_DevName); + +/*------------------------------------------------------------------------------------------------------*/ +NTSTATUS +MdGetDevLocation( + IN PDEVICE_OBJECT pi_pPdo, + IN ULONG * pi_pBus, + IN ULONG * pi_pSlot, + IN ULONG * pi_pFunction + ); + +/*------------------------------------------------------------------------------------------------------*/ +/*------------------------------------------------------------------------------------------------------*/ + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MddLib.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MddLib.h new file mode 100644 index 00000000..2b702b0d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/MddLib.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MDD_LIB_H_ +#define _MDD_LIB_H_ + +/* no PVOID arithmetic in MS */ +#ifdef __WIN__ +#define PVOID_PTR char * +#endif +#ifdef __LINUX__ +#define PVOID_PTR void * +#endif + +/*==========================================================================*/ +/*================== Only __WIN__ dependent stuff ==========================*/ +/*==========================================================================*/ + +#ifdef __WIN__ + +/*==========================================================================*/ +/*================================ Includes ================================*/ +/*==========================================================================*/ + +/* + * system + */ +#include +#include + +/* + * proprietary + */ +#include +#include +#include +#include + +/*==========================================================================*/ + + +#endif /* __WIN__ */ + +#endif /* _MDD_LIB_H_ */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/infinihost.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/infinihost.h new file mode 100644 index 00000000..b27c7a0f --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/infinihost.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _INFINIHOST_H_ +#define _INFINIHOST_H_ + +/* headers */ +#include "thh_mod_obj.h" +#include "thh_hob.h" +#include +#include +//#include +#include + +/* exports */ +int HH_init_module(void); +//int VIPKL_init_module(void); +int VAPI_init_module(void); +void VAPI_cleanup_module(void); +//int VIPKL_cleanup_module(void); +int HH_cleanup_module(void); +int IB_MGT_init_module(int qp0_only); +int IB_MGT_cleanup_module(void); +int IB_MGT_reattach_hca( /* IN*/ const char * dev_name); +void IB_MGT_fatal_delete_hca(VAPI_hca_hndl_t vapi_hca_hndl); +//HH_ret_t THH_hob_destroy(HH_hca_hndl_t hca_hndl); + + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.c b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.c new file mode 100644 index 00000000..826d7803 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.c @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +BOOL WINAPI +DllMain( + IN HANDLE pi_hInst, + IN ULONG pi_nReason, + IN LPVOID pi_pReserved + ) +/*++ + +Routine Description: + + DLL main entry point. + +Arguments: + + pi_hInst.......... + pi_nReason........ + pi_pReserved...... + +Return Value: + + Always TRUE + +--*/ +{ /* DllMain */ + + return TRUE ; + +} /* DllMain */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.mc b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.mc new file mode 100644 index 00000000..9fbd633e --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/mdmsg/MdMsg.mc @@ -0,0 +1,65 @@ +;/*++ +;==================================================================================== +;Copyright (c) 2001 Mellanox Technologies +; +;Module Name: +; +; MdMsg.mc +; +;Abstract: +; +; MDDL Driver event log messages +; +;Authors: +; +; Leonid Keller +; +;Environment: +; +; User Mode . +; +;===================================================================================== +;--*/ +; +MessageIdTypedef = NTSTATUS + +SeverityNames = ( + Success = 0x0:STATUS_SEVERITY_SUCCESS + Informational = 0x1:STATUS_SEVERITY_INFORMATIONAL + Warning = 0x2:STATUS_SEVERITY_WARNING + Error = 0x3:STATUS_SEVERITY_ERROR + ) + +FacilityNames = ( + System = 0x0 + RpcRuntime = 0x2:FACILITY_RPC_RUNTIME + RpcStubs = 0x3:FACILITY_RPC_STUBS + Io = 0x4:FACILITY_IO_ERROR_CODE + Md = 0x7:FACILITY_MD_ERROR_CODE + ) + + +MessageId=0x0001 +Facility=Md +Severity=Informational +SymbolicName=MD_EVENT_LOG_LOAD_OK +Language=English +The Mellanox InfiniHost Driver has loaded Successfully. +. + +MessageId=+1 +Facility=Md +Severity=Error +SymbolicName=MD_EVENT_LOG_LOAD_ERROR +Language=English +The Mellanox InfiniHost Driver has failed to load +. + +MessageId=+1 +Facility=Md +Severity=Error +SymbolicName=MD_EVENT_LOG_LOAD_ERROR_FW +Language=English +The Mellanox InfiniHost Driver has failed to load: THH_add_hca failed. Check FW. +. + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/resource.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/resource.h new file mode 100644 index 00000000..f0ae2712 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/resource.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* {{NO_DEPENDENCIES}} */ +/* Microsoft Developer Studio generated include file. */ +/* Used by Md.rc */ + +#define IDD_DIALOG1 101 + +/* Next default values for new objects */ + +#ifdef APSTUDIO_INVOKED +#ifndef APSTUDIO_READONLY_SYMBOLS +#define _APS_NO_MFC 1 +#define _APS_NEXT_RESOURCE_VALUE 102 +#define _APS_NEXT_COMMAND_VALUE 40001 +#define _APS_NEXT_CONTROL_VALUE 1000 +#define _APS_NEXT_SYMED_VALUE 101 +#endif +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/tavor_csp.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/tavor_csp.h new file mode 100644 index 00000000..5adc903c --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/tavor_csp.h @@ -0,0 +1,13869 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_bits_tav_csp_H +#define H_bits_tav_csp_H + + +/* */ + +struct PCIX_Status_Reg_device_st { /* Little Endian */ + pseudo_bit_t func_num[0x00003]; + pseudo_bit_t dev_num[0x00005]; + pseudo_bit_t bus_num[0x00008]; + pseudo_bit_t dev_64bit[0x00001]; + pseudo_bit_t cap_133Mhz[0x00001]; + pseudo_bit_t splitc_discard[0x00001]; + pseudo_bit_t unexpected_splc[0x00001]; + pseudo_bit_t device_complex[0x00001]; + pseudo_bit_t designed_max_mem_r_bc[0x00002]; + pseudo_bit_t designed_max_outstand_splt[0x00003]; + pseudo_bit_t designed_max_cum_r_size[0x00003]; + pseudo_bit_t rec_splc_err_msg[0x00001]; + pseudo_bit_t reserved0[0x00002]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Pcix_command_reg_st { /* Little Endian */ + pseudo_bit_t data_perr_r_en[0x00001]; + pseudo_bit_t en_relaxed_order[0x00001]; + pseudo_bit_t max_mem_r_bc[0x00002]; + pseudo_bit_t max_outs_splt[0x00003]; + pseudo_bit_t resrvd[0x00009]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Message_Ctrl_Reg_st { /* Little Endian */ + pseudo_bit_t msi_en[0x00001]; + pseudo_bit_t multiple_msg_cap[0x00003]; + pseudo_bit_t multiple_msg_en[0x00003]; + pseudo_bit_t cap_64bit_addt[0x00001]; + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct MSIX_Message_Ctrl_Reg_st { /* Little Endian */ + pseudo_bit_t Table_Size[0x0000b]; + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t function_mask[0x00001]; + pseudo_bit_t msix_en[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Exp_ROM_st { /* Little Endian */ + pseudo_bit_t exp_rom_en[0x00001]; + pseudo_bit_t res[0x0000a]; + pseudo_bit_t addr[0x00015]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Bar_st { /* Little Endian */ + pseudo_bit_t memory_or_io[0x00001]; + pseudo_bit_t type[0x00002]; + pseudo_bit_t prefetchable[0x00001]; + pseudo_bit_t addr[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Status_Register_st { /* Little Endian */ + pseudo_bit_t rsv2[0x00003]; + pseudo_bit_t interrupt_status[0x00001]; + pseudo_bit_t capability_list[0x00001]; + pseudo_bit_t cap_66Mhz[0x00001]; + pseudo_bit_t rsv1[0x00001]; + pseudo_bit_t fast_b2b[0x00001]; + pseudo_bit_t master_data_perr[0x00001]; + pseudo_bit_t devsel_timing[0x00002]; + pseudo_bit_t sig_target_abort[0x00001]; + pseudo_bit_t rec_target_abort[0x00001]; + pseudo_bit_t rec_master_abort[0x00001]; + pseudo_bit_t sig_sys_err[0x00001]; + pseudo_bit_t detected_perr[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Command_Register_st { /* Little Endian */ + pseudo_bit_t io_space[0x00001]; + pseudo_bit_t mem_space[0x00001]; + pseudo_bit_t bus_master[0x00001]; + pseudo_bit_t special_cycle[0x00001]; + pseudo_bit_t mem_w_and_invalid_en[0x00001]; + pseudo_bit_t vga_snoop_en[0x00001]; + pseudo_bit_t parity_err_response[0x00001]; + pseudo_bit_t stepping_ctrl[0x00001]; + pseudo_bit_t serr_en[0x00001]; + pseudo_bit_t fast_b2b[0x00001]; + pseudo_bit_t interrupt_disable[0x00001]; + pseudo_bit_t rsv0[0x00005]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct PCIX_Status_Reg_bridge_st { /* Little Endian */ + pseudo_bit_t func_num[0x00003]; + pseudo_bit_t dev_num[0x00005]; + pseudo_bit_t bus_num[0x00008]; + pseudo_bit_t dev_64bit[0x00001]; + pseudo_bit_t cap_133Mhz[0x00001]; + pseudo_bit_t splitc_discard[0x00001]; + pseudo_bit_t unexpected_splc[0x00001]; + pseudo_bit_t splc_overrun[0x00001]; + pseudo_bit_t splr_delayed[0x00001]; + pseudo_bit_t reserved0[0x0000a]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct PCIX_Sec_Status_Reg_st { /* Little Endian */ + pseudo_bit_t dev_64bit[0x00001]; + pseudo_bit_t cap_133Mhz[0x00001]; + pseudo_bit_t splitc_discard[0x00001]; + pseudo_bit_t unexpected_splc[0x00001]; + pseudo_bit_t splc_overrun[0x00001]; + pseudo_bit_t splr_delayed[0x00001]; + pseudo_bit_t sec_clock_freq[0x00003]; + pseudo_bit_t reserved0[0x00007]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Bridge_Ctrl_st { /* Little Endian */ + pseudo_bit_t parity_err_response[0x00001]; + pseudo_bit_t serr_en[0x00001]; + pseudo_bit_t isa_en[0x00001]; + pseudo_bit_t vga_en[0x00001]; + pseudo_bit_t res1[0x00001]; + pseudo_bit_t master_abort_mode[0x00001]; + pseudo_bit_t sec_bus_reset[0x00001]; + pseudo_bit_t fast_b2b_en[0x00001]; + pseudo_bit_t pri_discard_timeout[0x00001]; + pseudo_bit_t sec_discard_timeout[0x00001]; + pseudo_bit_t discard_timer_stat[0x00001]; + pseudo_bit_t discard_timer_serr_en[0x00001]; + pseudo_bit_t res0[0x00004]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Prefetch_mem_lsb_st { /* Little Endian */ + pseudo_bit_t addr_type[0x00004]; + pseudo_bit_t addr[0x0000c]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Secondary_Status_Reg_st { /* Little Endian */ + pseudo_bit_t res1[0x00004]; + pseudo_bit_t capability_list[0x00001]; + pseudo_bit_t cap_66Mhz[0x00001]; + pseudo_bit_t res0[0x00001]; + pseudo_bit_t fast_b2b[0x00001]; + pseudo_bit_t data_parity_reported[0x00001]; + pseudo_bit_t devsel_timing[0x00002]; + pseudo_bit_t sig_target_abort[0x00001]; + pseudo_bit_t rec_target_abort[0x00001]; + pseudo_bit_t rec_master_abort[0x00001]; + pseudo_bit_t rec_sys_err[0x00001]; + pseudo_bit_t detected_perr[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct IO_lsb_st { /* Little Endian */ + pseudo_bit_t addr_type[0x00004]; + pseudo_bit_t addr[0x00004]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg29_device_st { /* Little Endian */ + struct PCIX_Status_Reg_device_st pcix_status_reg; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg28_device_st { /* Little Endian */ + pseudo_bit_t pcix_cap_id[0x00008]; + pseudo_bit_t pcix_next_cap_ptr[0x00008]; + struct Pcix_command_reg_st pcix_command; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg27_st { /* Little Endian */ + pseudo_bit_t message_data_reg[0x00010]; + pseudo_bit_t reserved0[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg26_st { /* Little Endian */ + pseudo_bit_t message_addr_msb[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg25_st { /* Little Endian */ + pseudo_bit_t message_addr_lsb[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg24_st { /* Little Endian */ + pseudo_bit_t msi_cap_id[0x00008]; + pseudo_bit_t msi_next_cap_ptr[0x00008]; + struct Message_Ctrl_Reg_st message_control_reg; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg23_st { /* Little Endian */ + pseudo_bit_t cr_space_data[0x00020];/* Holds the data that should be put in cr space in address (from prev word) */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg22_st { /* Little Endian */ + pseudo_bit_t cr_space_addr[0x00020];/* Holds addres in cr-space .use to burn eprom */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg_vendor_st { /* Little Endian */ + pseudo_bit_t vendor_specific[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg18_device_st { /* Little Endian */ + pseudo_bit_t PBA_BIR[0x00003]; + pseudo_bit_t PBA_Offset[0x0001d]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg17_device_st { /* Little Endian */ + pseudo_bit_t Table_BIR[0x00003]; + pseudo_bit_t Table_Offset[0x0001d]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg16_device_st { /* Little Endian */ + pseudo_bit_t msix_cap_id[0x00008]; + pseudo_bit_t msix_next_cap_ptr[0x00008]; + struct MSIX_Message_Ctrl_Reg_st message_control_reg; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg15_device_st { /* Little Endian */ + pseudo_bit_t interrupt_line[0x00008]; + pseudo_bit_t interrupt_pin[0x00008]; + pseudo_bit_t min_gnt[0x00008]; + pseudo_bit_t max_latency[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg14_device_st { /* Little Endian */ + pseudo_bit_t reserevd[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg13_st { /* Little Endian */ + pseudo_bit_t cap_ptr[0x00008]; + pseudo_bit_t reserevd[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg12_device_st { /* Little Endian */ + struct Exp_ROM_st eprom_base_addr; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg11_device_st { /* Little Endian */ + pseudo_bit_t subsys_vendor_id[0x00010]; + pseudo_bit_t subsys_id[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg10_device_st { /* Little Endian */ + pseudo_bit_t cardbus_cis_ptr[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg9_device_st { /* Little Endian */ + struct Bar_st bar5; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg8_device_st { /* Little Endian */ + struct Bar_st bar4; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg7_device_st { /* Little Endian */ + struct Bar_st bar3; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg6_device_st { /* Little Endian */ + struct Bar_st bar2; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg5_st { /* Little Endian */ + struct Bar_st bar1; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg4_st { /* Little Endian */ + struct Bar_st bar0; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg3_st { /* Little Endian */ + pseudo_bit_t cache_line_size[0x00008]; + pseudo_bit_t latency_timer[0x00008]; + pseudo_bit_t header_type[0x00008]; + pseudo_bit_t bist[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg2_st { /* Little Endian */ + pseudo_bit_t revision_id[0x00008]; + pseudo_bit_t class_code[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg1_st { /* Little Endian */ + struct Command_Register_st command_reg; + struct Status_Register_st status_reg; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg0_st { /* Little Endian */ + pseudo_bit_t vendor_id[0x00010]; + pseudo_bit_t device_id[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg31_bridge_st { /* Little Endian */ + pseudo_bit_t downstream_split_ctrl_reg[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg30_bridge_st { /* Little Endian */ + pseudo_bit_t upstream_split_ctrl_reg[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg29_bridge_st { /* Little Endian */ + struct PCIX_Status_Reg_bridge_st pcix_status_reg; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg28_bridge_st { /* Little Endian */ + pseudo_bit_t pcix_cap_id[0x00008]; + pseudo_bit_t pcix_next_cap_ptr[0x00008]; + struct PCIX_Sec_Status_Reg_st pcix_sec_status_reg; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg15_bridge_st { /* Little Endian */ + pseudo_bit_t interrupt_line[0x00008]; + pseudo_bit_t interrupt_pin[0x00008]; + struct Bridge_Ctrl_st bridge_ctrl; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg14_bridge_st { /* Little Endian */ + struct Exp_ROM_st eprom_base_addr; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg12_bridge_st { /* Little Endian */ + pseudo_bit_t io_base_upper16[0x00010]; + pseudo_bit_t io_limit_upper16[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg11_bridge_st { /* Little Endian */ + pseudo_bit_t prefetch_limit_upper32[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg10_bridge_st { /* Little Endian */ + pseudo_bit_t prefetch_base_upper32[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg9_bridge_st { /* Little Endian */ + struct Prefetch_mem_lsb_st prefetch_mem_base; + struct Prefetch_mem_lsb_st prefetch_mem_limit; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg8_bridge_st { /* Little Endian */ + pseudo_bit_t mem_base[0x00010]; + pseudo_bit_t mem_limit[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg7_bridge_st { /* Little Endian */ + struct IO_lsb_st io_base; + struct IO_lsb_st io_limit; + struct Secondary_Status_Reg_st sec_status; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Reg6_bridge_st { /* Little Endian */ + pseudo_bit_t pri_bus_num[0x00008]; + pseudo_bit_t sec_bus_num[0x00008]; + pseudo_bit_t subo_bus_num[0x00008]; + pseudo_bit_t sec_latency_timer[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine CECC Debug */ + +struct CECCDEBUG_st { /* Little Endian */ + pseudo_bit_t cc_agent[0x00002]; /* cc_agent */ + pseudo_bit_t cc_agentlast[0x00002]; /* cc_agentlast */ + pseudo_bit_t cccmd_ps[0x00003]; /* cccmd_ps fsm */ + pseudo_bit_t reserved0[0x00019]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine EVENT Debug */ + +struct CEEVENTDEBUG_st { /* Little Endian */ + pseudo_bit_t eve_agent[0x00002]; /* eve_agent */ + pseudo_bit_t eve_agentlast[0x00002];/* eve_agentlast */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine CENSI Debug */ + +struct CENSIDEBUG_st { /* Little Endian */ + pseudo_bit_t nsi_agent[0x00002]; /* nsi_agent */ + pseudo_bit_t nsi_agentlast[0x00002];/* nsi_agentlast */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine TPT Debug */ + +struct CETPTDEBUG_st { /* Little Endian */ + pseudo_bit_t tpt_agent[0x00002]; /* tpt_agent */ + pseudo_bit_t tpt_agentlast[0x00002];/* tpt_agentlast */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine LDB Debug */ + +struct CELDBDEBUG_st { /* Little Endian */ + pseudo_bit_t ldb_res_ps[0x00001]; /* ldb_res_ps fsm */ + pseudo_bit_t reserved0[0x0001f]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine CEINSTAGE Debug */ + +struct CEINSTAGEDEBUG_st { /* Little Endian */ + pseudo_bit_t rdexarb_ps[0x00001]; /* rdexarb_ps fsm */ + pseudo_bit_t fifopop_ps[0x00001]; /* fifopop_ps fsm */ + pseudo_bit_t reserved0[0x0001e]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine FIFO Debug */ + +struct CEINFIFODEBUG_st { /* Little Endian */ + pseudo_bit_t write_counter[0x00008];/* write counter */ + pseudo_bit_t read_counter[0x00008]; /* read counter */ + pseudo_bit_t fifo_empty[0x00001]; /* fifo empty signal */ + pseudo_bit_t reserved0[0x0000f]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine CEINDRE Debug */ + +struct CEINRDEDEBUG_st { /* Little Endian */ + pseudo_bit_t push_ps[0x00002]; /* push_ps fsm */ + pseudo_bit_t reserved0[0x0001e]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine NSWR Debug */ + +struct CENSWRDEBUG_st { /* Little Endian */ + pseudo_bit_t cens_corefree[0x00001];/* cens_corefree signal */ + pseudo_bit_t nswr_done[0x00001]; /* nswr_done signal */ + pseudo_bit_t cens_acknk[0x00002]; /* cens_acknk signals */ + pseudo_bit_t nswr_pushfifo_ps[0x00001];/* nswr_pushfifo_ps fsm */ + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine Core Debug */ + +struct CECOREDEBUG_st { /* Little Endian */ + pseudo_bit_t produceridx_reg[0x00020];/* producer index */ +/* --------------------------------------------------------- */ + pseudo_bit_t ldb_entry_idx[0x00018];/* ldb entry idx */ + pseudo_bit_t ldb_pop_counter[0x00004];/* ldb pop counter */ + pseudo_bit_t ldb_access_op_reg[0x00003];/* ldb access opcode */ + pseudo_bit_t reserved0[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t completion_counter[0x00011];/* completion_counter */ + pseudo_bit_t reserved1[0x0000f]; +/* --------------------------------------------------------- */ + pseudo_bit_t event_generate[0x00003];/* event_generate by core */ + pseudo_bit_t solicitidx_update[0x00001];/* solicited index update indication */ + pseudo_bit_t lasteventidx_update[0x00001];/* last event index update indication */ + pseudo_bit_t cq_rep_around[0x00001];/* cq rep around occur */ + pseudo_bit_t reserved2[0x00006]; + pseudo_bit_t corestate_ps[0x00003]; /* corestate_ps fsm */ + pseudo_bit_t reserved3[0x00001]; + pseudo_bit_t rqst_state[0x00003]; /* Requester PS */ + pseudo_bit_t reserved4[0x00003]; + pseudo_bit_t coreldbin_ps[0x00001]; /* coreldbin_ps fsm */ + pseudo_bit_t corecccmd_ps[0x00002]; /* corecccmd_ps fsm */ + pseudo_bit_t coreccdata_ps[0x00002];/* coreccdat_ps fsm */ + pseudo_bit_t reserved5[0x00005]; +/* --------------------------------------------------------- */ + pseudo_bit_t solicitidx[0x00020]; /* soilicited index */ +/* --------------------------------------------------------- */ + pseudo_bit_t lasteventidx[0x00020]; /* last event index */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* ce in fifo size */ + +struct CEINFIFOCFG_st { /* Little Endian */ + pseudo_bit_t ceinfifo_addr[0x00008];/* last address - changing fifo size + 0 means not using this value */ + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* CE CQC Configuration */ + +struct CECQCCFG_st { /* Little Endian */ + pseudo_bit_t cqc_credits[0x00005]; /* ce credits to CQC */ + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ +}; + +/* @clear nswr */ + +struct CENSWRCFG_st { /* Little Endian */ + pseudo_bit_t erp_clr_nswr_ptr[0x00001];/* clear fifo ptr (reset fifo) */ + pseudo_bit_t nswr_nsi_ps[0x00003]; /* nswr_nsi_ps fsm */ + pseudo_bit_t nswr_tpt_ps[0x00002]; /* nswr_tpt_ps fsm */ + pseudo_bit_t nswr_cc_ps[0x00002]; /* nswr_cc_ps fsm */ + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* CE Core Configuration */ + +struct CECORECFG_st { /* Little Endian */ + pseudo_bit_t ver[0x00004]; /* version */ + pseudo_bit_t cfg_hcaid[0x00003]; /* hca id */ + pseudo_bit_t cfg_ignore_hdrx[0x00001];/* not to wait to hdrx for posted write to NSI */ + pseudo_bit_t nsb_page_size[0x00004];/* page_size encoding + 0000 - reserved + 0001 - reserved + 0010 - 4K (default) + 0011 - 8K + 0100 - 16K + 0101 - 32K + 0110 - 64K + Others - reserved + */ + pseudo_bit_t rep_interrupt[0x00001];/* interrupt on rep or page boundry */ + pseudo_bit_t cfg_no_errpsn[0x00001];/* no modified errPSN command to LDB */ + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t cfg_ldb_pop_limit[0x00004];/* limit of ldb reads before doing pop. Max is 8 (which is also the default) */ + pseudo_bit_t reserved1[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t cecc_timer_limit[0x00010];/* read cqc timer */ + pseudo_bit_t cecc_timer_count_limit[0x00010];/* timer counter limit */ +/* --------------------------------------------------------- */ + pseudo_bit_t stop_tpt_nsi[0x00001]; /* stop sending to tpt/nsi */ + pseudo_bit_t ce_stoped_tpt_nsi[0x00001];/* ce stoped sending to tpt/nsi */ + pseudo_bit_t reserved2[0x0001e]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct debug_bits_st { /* Little Endian */ + pseudo_bit_t remote[0x00001]; + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t dbg_en[0x00001]; + pseudo_bit_t dbg_crdy[0x00001]; + pseudo_bit_t dbg_ret[0x00001]; + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t int_d[0x00001]; + pseudo_bit_t rint[0x00001]; + pseudo_bit_t reserved2[0x00002]; + pseudo_bit_t code_bp_reg[0x00001]; + pseudo_bit_t read_bp_reg[0x00001]; + pseudo_bit_t write_bp_reg[0x00001]; + pseudo_bit_t rtrap_reg[0x00001]; + pseudo_bit_t misacc[0x00001]; + pseudo_bit_t reserved3[0x0000f]; +/* --------------------------------------------------------- */ +}; + +/* Pop FIFO Control Register */ + +struct Irisc_pop_fifo_ctrl_st { /* Little Endian */ + pseudo_bit_t f[0x00001]; /* Full */ + pseudo_bit_t af[0x00001]; /* Almost Full - Same as Full if FIFO does not implement AF output. */ + pseudo_bit_t e[0x00001]; /* Empty */ + pseudo_bit_t ae[0x00001]; /* Almost Empty - May be same as Empty if FIFO does not implement AE output. */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t state[0x00008]; /* number of free entries in fifo */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t pop[0x00001]; /* Write Only (not defined on read) - When a 1 is written a Pop will be done to the FIFO. */ + pseudo_bit_t poprd[0x00001]; /* PopRd - When set, every read from FIFO DWord 0 will result in a Pop to the FIFO. */ + pseudo_bit_t reserved2[0x00005]; + pseudo_bit_t flock[0x00001]; /* FIFOLocked - This bit is set when the gw is being used by some enitity. Sw is required to read the fifocontrol field before using a fifo. This field behaves as a semaphore, if it was cleared the read returns a 0 in this bit and the bit is set by the hw atomically. Every subsequent read access to the field will return 1 until the agent that locked the fifo writes a zero. */ +/* --------------------------------------------------------- */ +}; + +/* FIFO Control Register */ + +struct FIFOCONTROL_st { /* Little Endian */ + pseudo_bit_t f[0x00001]; /* Full */ + pseudo_bit_t af[0x00001]; /* Almost Full - Same as Full if FIFO does not implement AF output. */ + pseudo_bit_t e[0x00001]; /* Empty */ + pseudo_bit_t ae[0x00001]; /* Almost Empty - May be same as Empty if FIFO does not implement AE output. */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t state[0x00008]; /* number of free entries in fifo */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t pop[0x00001]; /* Write Only (not defined on read) - When a 1 is written a Pop will be done to the FIFO. */ + pseudo_bit_t poprd[0x00001]; /* PopRd - When set, every read from FIFO DWord 0 will result in a Pop to the FIFO. */ + pseudo_bit_t reserved2[0x00005]; + pseudo_bit_t flock[0x00001]; /* FIFOLocked - This bit is set when the gw is being used by some enitity. Sw is required to read the fifocontrol field before using a fifo. This field behaves as a semaphore, if it was cleared the read returns a 0 in this bit and the bit is set by the hw atomically. Every subsequent read access to the field will return 1 until the agent that locked the fifo writes a zero. */ +/* --------------------------------------------------------- */ +}; + +/* Gateway Control Register */ + +struct GWCONTROL_st { /* Little Endian */ + pseudo_bit_t gwaddress[0x00018]; + pseudo_bit_t gwcmd[0x00006]; /* Command/Status - Sw writes to this field the desired command. Hw executes the cmd and returns in this field the status of the operation. For details on commands and status, see instance description. */ + pseudo_bit_t gwbusy[0x00001]; /* Written to 1 by SW together with command to trigger + HW. It is cleared by hw after completing the required + operation and seting the status in the Command/Status + field. */ + pseudo_bit_t gwlocked[0x00001]; /* Gateway Locked - This bit is set when the gw is being used by some enitity. Sw is required to read the gwcontrol field before using a gw. This field behaves as a semaphore, if it was cleared the read returns a 0 in this bit and the bit is set by the hw atomically. Every subsequent read access to the field will return 1 until the agent that locked the gw writes a zero. */ +/* --------------------------------------------------------- */ +}; + +/* Doorbell FIFO Controller */ + +struct DB_FIFOCNTL_st { /* Little Endian */ + pseudo_bit_t f[0x00001]; /* fifo full */ + pseudo_bit_t af[0x00001]; /* almost_full */ + pseudo_bit_t e[0x00001]; /* fifo empty */ + pseudo_bit_t ae[0x00001]; /* almost_empty */ + pseudo_bit_t reserved0[0x00014]; + pseudo_bit_t pop[0x00001]; /* pop */ + pseudo_bit_t poprd[0x00001]; /* PopRd - When set, every read from FIFO DWord 0 will result in a Pop to the FIFO. */ + pseudo_bit_t reserved1[0x00005]; + pseudo_bit_t flock[0x00001]; /* FIFOLocked - This bit is set when the gw is being used by some enitity. Sw is required to read the fifocontrol field before using a fifo. This field behaves as a semaphore, if it was cleared the read returns a 0 in this bit and the bit is set by the hw atomically. Every subsequent read access to the field will return 1 until the agent that locked the fifo writes a zero. */ +/* --------------------------------------------------------- */ +}; + +/* Performance Counters */ + +struct QPC_Performance_Counters_st { /* Little Endian */ + pseudo_bit_t sqpc_miss_cnt[0x00020];/* SQPC cache miss count */ +/* --------------------------------------------------------- */ + pseudo_bit_t rqpc_miss_cnt[0x00020];/* RQPC cache miss count */ +/* --------------------------------------------------------- */ + pseudo_bit_t cqc_miss_cnt[0x00020]; /* CQC cache miss count */ +/* --------------------------------------------------------- */ +}; + +/* Performance Counters */ + +struct TPT_Performance_Counters_st { /* Little Endian */ + pseudo_bit_t mpt_miss_cnt[0x00020]; /* MPT cache miss count */ +/* --------------------------------------------------------- */ + pseudo_bit_t mtt_miss_cnt[0x00020]; /* MTT cache miss count */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Device_header_st { /* Little Endian */ + struct Reg0_st reg0; +/* --------------------------------------------------------- */ + struct Reg1_st reg1; +/* --------------------------------------------------------- */ + struct Reg2_st reg2; +/* --------------------------------------------------------- */ + struct Reg3_st reg3; +/* --------------------------------------------------------- */ + struct Reg4_st reg4; +/* --------------------------------------------------------- */ + struct Reg5_st reg5; +/* --------------------------------------------------------- */ + struct Reg6_device_st reg6; +/* --------------------------------------------------------- */ + struct Reg7_device_st reg7; +/* --------------------------------------------------------- */ + struct Reg8_device_st reg8; +/* --------------------------------------------------------- */ + struct Reg9_device_st reg9; +/* --------------------------------------------------------- */ + struct Reg10_device_st reg10; +/* --------------------------------------------------------- */ + struct Reg11_device_st reg11; +/* --------------------------------------------------------- */ + struct Reg12_device_st reg12; +/* --------------------------------------------------------- */ + struct Reg13_st reg13; +/* --------------------------------------------------------- */ + struct Reg14_device_st reg14; +/* --------------------------------------------------------- */ + struct Reg15_device_st reg15; +/* --------------------------------------------------------- */ + struct Reg16_device_st reg16; +/* --------------------------------------------------------- */ + struct Reg17_device_st reg17; +/* --------------------------------------------------------- */ + struct Reg18_device_st reg18; +/* --------------------------------------------------------- */ + struct Reg_vendor_st reg19; +/* --------------------------------------------------------- */ + struct Reg_vendor_st reg20; +/* --------------------------------------------------------- */ + struct Reg_vendor_st reg21; +/* --------------------------------------------------------- */ + struct Reg22_st reg22; +/* --------------------------------------------------------- */ + struct Reg23_st reg23; +/* --------------------------------------------------------- */ + struct Reg24_st reg24; +/* --------------------------------------------------------- */ + struct Reg25_st reg25; +/* --------------------------------------------------------- */ + struct Reg26_st reg26; +/* --------------------------------------------------------- */ + struct Reg27_st reg27; +/* --------------------------------------------------------- */ + struct Reg28_device_st reg28; +/* --------------------------------------------------------- */ + struct Reg29_device_st reg29; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct Bridge_header_st { /* Little Endian */ + struct Reg0_st reg0; +/* --------------------------------------------------------- */ + struct Reg1_st reg1; +/* --------------------------------------------------------- */ + struct Reg2_st reg2; +/* --------------------------------------------------------- */ + struct Reg3_st reg3; +/* --------------------------------------------------------- */ + struct Reg4_st reg4; +/* --------------------------------------------------------- */ + struct Reg5_st reg5; +/* --------------------------------------------------------- */ + struct Reg6_bridge_st reg6; +/* --------------------------------------------------------- */ + struct Reg7_bridge_st reg7; +/* --------------------------------------------------------- */ + struct Reg8_bridge_st reg8; +/* --------------------------------------------------------- */ + struct Reg9_bridge_st reg9; +/* --------------------------------------------------------- */ + struct Reg10_bridge_st reg10; +/* --------------------------------------------------------- */ + struct Reg11_bridge_st reg11; +/* --------------------------------------------------------- */ + struct Reg12_bridge_st reg12; +/* --------------------------------------------------------- */ + struct Reg13_st reg13; +/* --------------------------------------------------------- */ + struct Reg14_bridge_st reg14; +/* --------------------------------------------------------- */ + struct Reg15_bridge_st reg15; +/* --------------------------------------------------------- */ + struct Reg_vendor_st reg16; +/* --------------------------------------------------------- */ + struct Reg_vendor_st reg17; +/* --------------------------------------------------------- */ + struct Reg_vendor_st reg18; +/* --------------------------------------------------------- */ + struct Reg_vendor_st reg19; +/* --------------------------------------------------------- */ + struct Reg_vendor_st reg20; +/* --------------------------------------------------------- */ + struct Reg_vendor_st reg21; +/* --------------------------------------------------------- */ + struct Reg22_st reg22; +/* --------------------------------------------------------- */ + struct Reg23_st reg23; +/* --------------------------------------------------------- */ + struct Reg24_st reg24; +/* --------------------------------------------------------- */ + struct Reg25_st reg25; +/* --------------------------------------------------------- */ + struct Reg26_st reg26; +/* --------------------------------------------------------- */ + struct Reg27_st reg27; +/* --------------------------------------------------------- */ + struct Reg28_bridge_st reg28; +/* --------------------------------------------------------- */ + struct Reg29_bridge_st reg29; +/* --------------------------------------------------------- */ + struct Reg30_bridge_st reg30; +/* --------------------------------------------------------- */ + struct Reg31_bridge_st reg31; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct FW_IMAGE_st { /* Little Endian */ + pseudo_bit_t FIA[0x00020]; /* FW image address */ +/* --------------------------------------------------------- */ + pseudo_bit_t LOG_DEV_SIZE[0x00020]; /* Log device size */ +/* --------------------------------------------------------- */ + pseudo_bit_t ROM[0x00020]; /* Expansion ROM offset */ +/* --------------------------------------------------------- */ + pseudo_bit_t LAST[0x00020]; /* Last address read */ +/* --------------------------------------------------------- */ + pseudo_bit_t START[0x00020]; /* Offset of static configuration in NVRAM */ +/* --------------------------------------------------------- */ + pseudo_bit_t RET[0x00020]; /* Used by flash loader */ +/* --------------------------------------------------------- */ +}; + +/* DIMM info from DMU discovery */ + +struct DIMM_REQ_st { /* Little Endian */ + pseudo_bit_t OFFSET[0x00020]; /* offset in CR-space */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t BAR[0x00020]; /* bar for this DIMM (bit[0] = dimm_en) */ +/* --------------------------------------------------------- */ + pseudo_bit_t MASK[0x00020]; /* mask for this DIMM (-size) */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct DIMM_SYND_st { /* Little Endian */ + pseudo_bit_t SYND[0x00004]; /* Error syndrome */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* Loader command interface. Built to match kast word of HCR */ + +struct LOADER_CMD_IF_st { /* Little Endian */ + pseudo_bit_t opcode[0x0000c]; + pseudo_bit_t opcode_modifier[0x00004]; + pseudo_bit_t reserved0[0x00006]; + pseudo_bit_t e[0x00001]; + pseudo_bit_t go[0x00001]; + pseudo_bit_t status[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* Translate CAS latency from bit number to JEDEC encoding */ + +struct CAS_TABLE_st { /* Little Endian */ + pseudo_bit_t CL1_0[0x00020]; /* CAS Latency = 1.0 */ +/* --------------------------------------------------------- */ + pseudo_bit_t CL1_5[0x00020]; /* CAS Latency = 1.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t CL2_0[0x00020]; /* CAS Latency = 2 */ +/* --------------------------------------------------------- */ + pseudo_bit_t CL2_5[0x00020]; /* CAS Latency = 2.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t CL3_0[0x00020]; /* CAS Latency = 3 */ +/* --------------------------------------------------------- */ + pseudo_bit_t CL3_5[0x00020]; /* CAS Latency = 3.5 */ +/* --------------------------------------------------------- */ +}; + +/* Table to translate Refresh Period to DRAM clock units + 10000 * (val: usec) * dram_frequency / 10000 + 1 */ + +struct TAR_TABLE_st { /* Little Endian */ + pseudo_bit_t NORMAL[0x00020]; /* Normal (15.625 usec) */ +/* --------------------------------------------------------- */ + pseudo_bit_t REDUCED_25x[0x00020]; /* Reduced (.25x)... 3.9 usec */ +/* --------------------------------------------------------- */ + pseudo_bit_t REDUCED_5x[0x00020]; /* Reduced (.5x) ... 7.8 usec */ +/* --------------------------------------------------------- */ + pseudo_bit_t EXTENDED_2x[0x00020]; /* Extended (2x) ... 31.3 usec */ +/* --------------------------------------------------------- */ + pseudo_bit_t EXTENDED_4x[0x00020]; /* Extended (4x) ... 62.5 usec */ +/* --------------------------------------------------------- */ + pseudo_bit_t EXTENDED_8x[0x00020]; /* Extended (8x) ... 125 usec */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct FW_TRACE_MASK_L_st { /* Little Endian */ + pseudo_bit_t DB[0x00001]; /* Doorbells */ + pseudo_bit_t CMD_IF[0x00001]; /* Cmd I/F */ + pseudo_bit_t BP[0x00001]; /* Firmware Break Points */ + pseudo_bit_t CM[0x00001]; /* Internal Cache Misses */ + pseudo_bit_t MAD[0x00001]; /* MAD */ + pseudo_bit_t QP_FLUSH[0x00001]; /* QP Flush */ + pseudo_bit_t SCHD[0x00001]; /* Scheduler */ + pseudo_bit_t IT_INT[0x00001]; /* IR Interrupt */ + pseudo_bit_t TPT_XLATE[0x00001]; /* TPT Access */ + pseudo_bit_t INFO[0x00001]; /* Miscellaneous Info */ + pseudo_bit_t reserved0[0x00015]; + pseudo_bit_t DEBUG[0x00001]; /* DEBUG Msg. Not in release. */ +/* --------------------------------------------------------- */ +}; + +/* SDRAM Device Attributes: General */ + +struct DEVATTR_st { /* Little Endian */ + pseudo_bit_t WEAKDRV[0x00001]; /* Includes Weak Driver */ + pseudo_bit_t QFC[0x00001]; /* Includes QFC Output */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t LOWVDD[0x00001]; /* Lower VDD tolerance: 0 = 0.2V 1 = TBD */ + pseudo_bit_t UPVDD[0x00001]; /* Upper VDD tolerance: 0 = 0.2V 1 = TBD */ + pseudo_bit_t CONCAP[0x00001]; /* Concurrent Auto- Precharge */ + pseudo_bit_t FASTAP[0x00001]; /* Supports Fast AP: 0 = tRAP is tRAS; 1 = tRAP is tRCD */ +/* --------------------------------------------------------- */ +}; + +/* Depicts various aspects of the module */ + +struct MODATTR_st { /* Little Endian */ + pseudo_bit_t BUF[0x00001]; /* Buffered address and control inputs */ + pseudo_bit_t REG[0x00001]; /* Registered address and control inputs */ + pseudo_bit_t PLL[0x00001]; /* On card PLL (Clock) */ + pseudo_bit_t FETCE[0x00001]; /* FET Switch On-Card enable */ + pseudo_bit_t FETEE[0x00001]; /* FET Switch External enable */ + pseudo_bit_t DIFCLK[0x00001]; /* Differential Clock Input */ + pseudo_bit_t reserved0[0x00002]; +/* --------------------------------------------------------- */ +}; + +/* Which of the programmable CAS latencies (CAS to data out) are acceptable for the SDRAM devices used on the module. */ + +struct CASLAT_st { /* Little Endian */ + pseudo_bit_t CL1_0[0x00001]; /* CAS Latency = 1 */ + pseudo_bit_t CL1_5[0x00001]; /* CAS Latency = 1.5 */ + pseudo_bit_t CL2_0[0x00001]; /* CAS Latency = 2 */ + pseudo_bit_t CL2_5[0x00001]; /* CAS Latency = 2.5 */ + pseudo_bit_t CL3_0[0x00001]; /* CAS Latency = 3 */ + pseudo_bit_t CL3_5[0x00001]; /* CAS Latency = 3.5 */ + pseudo_bit_t reserved0[0x00002]; +/* --------------------------------------------------------- */ +}; + +/* SDRAMs width */ + +struct WIDTH_st { /* Little Endian */ + pseudo_bit_t PRIMARY[0x00007]; /* Indicates the primary width of the SDRAMs */ + pseudo_bit_t FLAG[0x00001]; /* Flag which is set to 1 when there is a second physical bank on the module which is of different size from the first physical bank (the second physical bank's SDRAMs are 2X the width of those on the first physical bank). */ +/* --------------------------------------------------------- */ +}; + +/* This byte describes the module's refresh rate and type. */ + +struct REFRESH_st { /* Little Endian */ + pseudo_bit_t RATE[0x00007]; /* Refresh rate */ + pseudo_bit_t SLFR[0x00001]; /* Self refresh flag */ +/* --------------------------------------------------------- */ +}; + +/* Number of addresses for physical banks 1/2 */ + +struct NALEN_st { /* Little Endian */ + pseudo_bit_t BANK1[0x00004]; /* If there is one physical bank on the module or if there are two physical banks of the same size and organization, represents the number of addresses for each physical bank. If the module has two physical banks of asymmetric size, represents the number of addresses for physical bank 1 */ + pseudo_bit_t BANK2[0x00004]; /* If the module has two physical banks of asymmetric size, represents the number of row addresses for physical bank 2 */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct MSIX_TableEntry_st { /* Little Endian */ + pseudo_bit_t Msg_Addr[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Msg_Upper_Addr[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Msg_Data[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Vector_Control[0x00020];/* Only low bit is meaningful */ +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Buffer Debug */ + +struct TCUPBDEBUG_st { /* Little Endian */ + pseudo_bit_t pb_lines[0x00009]; /* packet buffer lines in use */ + pseudo_bit_t reserved0[0x00017]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks Payload Debug */ + +struct TCUDIPYLDDEBUG_st { /* Little Endian */ + pseudo_bit_t di_header_ngrh_ps[0x00005];/* header_ngrh_ps state */ + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t di_header_ygrh_ps[0x00005];/* header_ygrh_ps state */ + pseudo_bit_t reserved1[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t wait4credits_ps[0x00001];/* wait4credits_ps state */ + pseudo_bit_t reserved2[0x0001f]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks TCUDICMD Debug */ + +struct TCUDICMDDEBUG_st { /* Little Endian */ + pseudo_bit_t popcmd_ps[0x00001]; /* popcmd_ps state */ + pseudo_bit_t reserved0[0x0001f]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks TCUTCQPCWR Debug */ + +struct TCUTCQPCWRDEBUG_st { /* Little Endian */ + pseudo_bit_t qpcwr_ps[0x00004]; /* qpcwr_ps state */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks TCUTCQPCRD Debug */ + +struct TCUTCQPCRDDEBUG_st { /* Little Endian */ + pseudo_bit_t qpcrd_ps[0x00002]; /* qpcrd_ps state */ + pseudo_bit_t reserved0[0x0001e]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks Header Debug */ + +struct TCUTCHEADERDEBUG_st { /* Little Endian */ + pseudo_bit_t hdr_header_ngrh_ps[0x00005];/* header_ngrh_ps state */ + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t hdr_header_ygrh_ps[0x00005];/* header_ygrh_ps state */ + pseudo_bit_t reserved1[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t multicast_ps[0x00001]; /* multicast_ps state */ + pseudo_bit_t real_dqp_reg[0x00018]; /* real qp number from packet in the case of qp0, qp1, qp = ffffff */ + pseudo_bit_t reserved2[0x00007]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks TCUTCCHKDI Debug */ + +struct TCUTCCHKDI_st { /* Little Endian */ + pseudo_bit_t cmd_ctr[0x00004]; /* command send to RDE command fifo counter */ + pseudo_bit_t dicmddest_ps[0x00001]; /* dicmddest_ps state */ + pseudo_bit_t dicmd_ps[0x00001]; /* dicmd_ps state */ + pseudo_bit_t dipyldest_ps[0x00001]; /* dipyldest_ps state */ + pseudo_bit_t reserved0[0x00019]; +/* --------------------------------------------------------- */ +}; + +/* @TCU Packet Checker Debug */ + +struct TCUTCCHKER_st { /* Little Endian */ + pseudo_bit_t drop_count[0x00020]; /* drop packets counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t checker_ps[0x00001]; /* checker_ps state */ + pseudo_bit_t reserved0[0x0001f]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks QP2EE Debug */ + +struct TCUTCQP2EEDEBUG_st { /* Little Endian */ + pseudo_bit_t qp2ee_ps[0x00003]; /* qp2ee_ps state */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t qp_not_exists_ff[0x00001];/* qp_not_exists_ff */ + pseudo_bit_t qp_ts_is_not_rd_ff[0x00001];/* qp_ts_is_not_rd_ff */ + pseudo_bit_t qp_state_is_not_ok_ff[0x00001];/* qp_state_is_not_ok_ff */ + pseudo_bit_t qp_locked_ff[0x00001]; /* qp_locked_ff */ + pseudo_bit_t ee_not_exists_ff[0x00001];/* ee_not_exists_ff */ + pseudo_bit_t rdd_miss_ff[0x00001]; /* rdd_miss_ff */ + pseudo_bit_t ee_last_op_not_last_ff[0x00001];/* ee_last_op_not_last_ff */ + pseudo_bit_t ee_lapsn_not_fits_epsn_ff[0x00001];/* ee_lapsn_not_fits_epsn_ff */ + pseudo_bit_t packet_psn_not_fits_epsn_ff[0x00001];/* packet_psn_not_fits_epsn_ff */ + pseudo_bit_t reserved1[0x00013]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks TCUCLI Debug */ + +struct TCUCLIDEBUG_st { /* Little Endian */ + pseudo_bit_t txinprgrs_ps[0x00002]; /* txinprgrs_ps state */ + pseudo_bit_t drdy_1st_ps[0x00001]; /* drdy_1st_ps state */ + pseudo_bit_t cldataen_ps[0x00001]; /* cldataen_ps state */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct EXT_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t clrcause[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t setcause[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t evtserviced[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t evtena0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t evtena1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine Debug Status Registers */ + +struct CEDEBUG_st { /* Little Endian */ + struct CECOREDEBUG_st cecoredebug; /* core debug */ +/* --------------------------------------------------------- */ + struct CENSWRDEBUG_st censwr1debug; /* nswr debug */ +/* --------------------------------------------------------- */ + struct CENSWRDEBUG_st censwr2debug; /* nswr debug */ +/* --------------------------------------------------------- */ + struct CEINRDEDEBUG_st ceinrdedebug;/* ceinrde debug */ +/* --------------------------------------------------------- */ + struct CEINFIFODEBUG_st ceinfifodebug;/* cein fifo debug */ +/* --------------------------------------------------------- */ + struct CEINSTAGEDEBUG_st ceinstagedebug;/* ceinstage debug */ +/* --------------------------------------------------------- */ + struct CELDBDEBUG_st celdbdebug; /* celdb debug */ +/* --------------------------------------------------------- */ + struct CETPTDEBUG_st cetptdebug; /* cetpt debug */ +/* --------------------------------------------------------- */ + struct CENSIDEBUG_st censidebug; /* censi debug */ +/* --------------------------------------------------------- */ + struct CEEVENTDEBUG_st ceeventdebug;/* ceevent debug */ +/* --------------------------------------------------------- */ + struct CECCDEBUG_st ceccdebug; /* cecc debug */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x005c0]; +/* --------------------------------------------------------- */ +}; + +/* CE Gateway for Exe to TCU (Farewell for Diego) */ + +struct TCUCEEXEGW_st { /* Little Endian */ + pseudo_bit_t reserved0[0x0001d]; + pseudo_bit_t ceexegwctl[0x00003]; /* Control register: + 31 - Semaphore + 30 - Busy + 29 - Cmd: go */ +/* --------------------------------------------------------- */ + pseudo_bit_t ceexegw0[0x00009]; /* 8 - EE/QP# + 7:0 - QP[23:16] if RD Bind */ + pseudo_bit_t reserved1[0x00017]; +/* --------------------------------------------------------- */ + pseudo_bit_t ceexegw1[0x00020]; /* 31:16 - QP[15:0] if RD Bind + 15:0 - QP[23:8]/EE[23:8] number */ +/* --------------------------------------------------------- */ + pseudo_bit_t ceexegw2[0x00020]; /* 31:24 - QP[7:0]/EE[7:0] + 23:0 - CQ[23:0] + */ +/* --------------------------------------------------------- */ +}; + +/* Completion Engine General Configuration */ + +struct CEGRLCFG_st { /* Little Endian */ + struct CECORECFG_st cecorecfg; /* ce core cfg */ +/* --------------------------------------------------------- */ + struct CENSWRCFG_st censwr1cfg; /* nswr clear */ +/* --------------------------------------------------------- */ + struct CENSWRCFG_st censwr2cfg; /* nswr clear */ +/* --------------------------------------------------------- */ + struct CECQCCFG_st cecqccfg; /* CQC cfg */ +/* --------------------------------------------------------- */ + struct CEINFIFOCFG_st ceinfifocfg; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00100]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine LDB Entry */ + +struct CELDB_st { /* Little Endian */ + pseudo_bit_t ldbsyndrome[0x00008]; /* The LDB entry syndrome */ + pseudo_bit_t ldbpsn[0x00018]; /* PSN this LDB entry relates to */ +/* --------------------------------------------------------- */ + pseudo_bit_t ldbdessize[0x00006]; /* LDB entry descriptor size (NDS) */ + pseudo_bit_t ldbdesaddr[0x0001a]; /* LDB entry descriptor address (NDA) */ +/* --------------------------------------------------------- */ + pseudo_bit_t ldbopcode[0x00005]; /* Descriptor opcode format */ + pseudo_bit_t ldbunreliable[0x00001];/* unreliable bit */ + pseudo_bit_t ldbsigcomp[0x00001]; /* signalled completion */ + pseudo_bit_t ldbeventreq[0x00001]; /* event request */ + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine CQ Entry */ + +struct CECQC_st { /* Little Endian */ + pseudo_bit_t cccqbaseaddr_63_32[0x00020];/* cq base address [58:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t cccqbaseaddr31_5[0x0001b];/* cq base address [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cccqlkey[0x00020]; /* cq L-Key */ +/* --------------------------------------------------------- */ + pseudo_bit_t cceqn[0x00008]; /* event Q number */ + pseudo_bit_t ccpd[0x00018]; /* protection domain */ +/* --------------------------------------------------------- */ + pseudo_bit_t ccproduceridx[0x00020];/* Producer Index of the CQ */ +/* --------------------------------------------------------- */ + pseudo_bit_t ccconsumneridx[0x00020];/* CQ's Consumer pointer */ +/* --------------------------------------------------------- */ + pseudo_bit_t ccstatus[0x00008]; /* cq status */ + pseudo_bit_t cccqsize[0x00005]; /* cq size */ + pseudo_bit_t ccldtseqid[0x00006]; /* LDT sequence id */ + pseudo_bit_t cctr[0x00001]; /* translation required */ + pseudo_bit_t ccvl[0x00001]; /* vl */ + pseudo_bit_t ccnp[0x00001]; /* non-posted */ + pseudo_bit_t cccqbreakpoint[0x00001];/* BreakPoint indication on this CQ */ + pseudo_bit_t ccignoreoverrun[0x00001];/* ignore overrun bit */ + pseudo_bit_t ccstc[0x00002]; /* stc */ + pseudo_bit_t ccsts[0x00002]; /* sts */ + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t cclock[0x00001]; /* cqc is locked bit */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine Input FIFO Entry */ + +struct CEINFIFO_st { /* Little Endian */ + pseudo_bit_t ceincqn[0x00018]; /* CQ number this entry relates to */ + pseudo_bit_t ceinopcode[0x00008]; /* Opcode */ +/* --------------------------------------------------------- */ + pseudo_bit_t ceinqpn[0x00018]; /* QP number this entry relates to. */ + pseudo_bit_t ceinsyndrome[0x00008]; /* The syndrome of this entry */ +/* --------------------------------------------------------- */ + pseudo_bit_t ceindesctr[0x00020]; /* descriptor ptr */ +/* --------------------------------------------------------- */ + pseudo_bit_t ceinnbbytes[0x00020]; /* number of bytes transfered */ +/* --------------------------------------------------------- */ + pseudo_bit_t ceinimmdt[0x00020]; /* immediate value */ +/* --------------------------------------------------------- */ + pseudo_bit_t ceineen[0x00018]; /* ee context */ + pseudo_bit_t ceindlid[0x00007]; /* destination LID */ + pseudo_bit_t ceingrh[0x00001]; /* grh bit */ +/* --------------------------------------------------------- */ + pseudo_bit_t ceinsqpn[0x00018]; /* source qp number */ + pseudo_bit_t ceinde[0x00001]; /* descriptor event bit */ + pseudo_bit_t ceinsl[0x00004]; /* sl */ + pseudo_bit_t ceinsrc[0x00002]; /* 00 - RDE + 10-EXE with QP + 11-EXE with EE (this is bind on RD) */ + pseudo_bit_t ceinse[0x00001]; /* Solicited Event bit */ +/* --------------------------------------------------------- */ + pseudo_bit_t ceinpsn[0x00018]; /* psn */ + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t ceinslid[0x00010]; /* source LID */ + pseudo_bit_t reserved1[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x000e0]; +/* --------------------------------------------------------- */ +}; + +/* Multicast Cache QPN Table */ + +struct MCCACHEQPN_st { /* Little Endian */ + pseudo_bit_t qpn00[0x00018]; /* qp number */ + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn01[0x00018]; /* qp number */ + pseudo_bit_t reserved1[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn02[0x00018]; /* qp number */ + pseudo_bit_t reserved2[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn03[0x00018]; /* qp number */ + pseudo_bit_t reserved3[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn04[0x00018]; /* qp number */ + pseudo_bit_t reserved4[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn05[0x00018]; /* qp number */ + pseudo_bit_t reserved5[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn06[0x00018]; /* qp number */ + pseudo_bit_t reserved6[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn07[0x00018]; /* qp number */ + pseudo_bit_t reserved7[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn08[0x00018]; /* qp number */ + pseudo_bit_t reserved8[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn09[0x00018]; /* qp number */ + pseudo_bit_t reserved9[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn10[0x00018]; /* qp number */ + pseudo_bit_t reserved10[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn11[0x00018]; /* qp number */ + pseudo_bit_t reserved11[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn12[0x00018]; /* qp number */ + pseudo_bit_t reserved12[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn13[0x00018]; /* qp number */ + pseudo_bit_t reserved13[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn14[0x00018]; /* qp number */ + pseudo_bit_t reserved14[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn15[0x00018]; /* qp number */ + pseudo_bit_t reserved15[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* Multicast Cache GID Entry */ + +struct MCCACHEGID_st { /* Little Endian */ + pseudo_bit_t gidh[0x00008]; /* Scope and Flag bits of GID */ + pseudo_bit_t reserved0[0x00017]; + pseudo_bit_t v[0x00001]; /* multicast entry is valid */ +/* --------------------------------------------------------- */ + pseudo_bit_t gidl[0x00020]; /* GID [31:0] */ +/* --------------------------------------------------------- */ +}; + +/* @tcutcdigw ram */ + +struct TCUDIGWRAM_st { /* Little Endian */ + pseudo_bit_t line0w0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line0w1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line1w0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line1w1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line2w0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line2w1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line3w0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line3w1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line4w0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line4w1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line5w0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line5w1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line6w0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line6w1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line7w0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line7w1[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Injection Gateway Control */ + +struct TCUTCDIGW_st { /* Little Endian */ + pseudo_bit_t pkt_len[0x0000b]; /* packet length */ + pseudo_bit_t cmd[0x00012]; /* command to digw + // 1. If destination is RDE + // + // ------------------------------------------------------------------------------------------------------------ + ------------------------------------------------------------------- + // | V | RESERVED | No PYLD | CMD destination | PYLD DESTINATION (source of data,destination of data) | RESERVE + D | + // ------------------------------------------------------------------------------------------------------------ + ------------------------------------------------------------------- + // 28 27:19 18 17:16 15:12 (15,14:12) 11 + // + // + // V - command is valid bit + // + // No PYLD - if set no push to pyld destination will be done + // + // PYLD DEST Parameters: + // Destination + // parameter DCRD = 3'b000; // Discard - Not valid from EB + // parameter RDNG = 3'b001; // RDE, no GRH scatter + // parameter RDYG = 3'b010; // RDE, GRH scatter + // parameter EBMV = 3'b011; // EB - Not valid from EB + // Source + // parameter PBS = 1'b0; // source is PB + // parameter EBS = 1'b1; // source is EB + + // CMD destination Parameters: + ////////////////////////////// + //parameter NOCOMND = 2'b00; // no command (pyld will be discard) - Not valid from EB + //parameter RDEATOM = 2'b01; // atomic request + //parameter RDENATM = 2'b11; // no atomic request + + // + // 2. If destination is CLI + // + // -------------------------------------- + // | LMC | STATUS (erorr , port number) | + // -------------------------------------- + // 28:26 25:11 (25:15 , 14:11) + */ + pseudo_bit_t dest[0x00001]; /* destination bit: CLI or RDE + 0 - RDE + 1 - CLI + */ + pseudo_bit_t b[0x00001]; /* busy bit */ + pseudo_bit_t s[0x00001]; /* s bit */ +/* --------------------------------------------------------- */ +}; + +/* QPC Line */ + +struct QPCLINE_st { /* Little Endian */ + pseudo_bit_t line_127_96_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line_95_64_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line_63_32_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t line_31_0_[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* QPC Command Header */ + +struct QPC_command_header_st { /* Little Endian */ + pseudo_bit_t Token[0x00010]; + pseudo_bit_t Condition[0x00008]; + pseudo_bit_t CL[0x00003]; /* condition line */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t WordSel[0x00002]; /* Word selector for condition evaluation. + 00 - selected word = line[31:0] + 01 - selected word = line[63:32] + 10 - selected word = line[95:64] + 11 - selected word = line[127:96] */ + pseudo_bit_t TimerState[0x00002]; /* State for Timer FSM. (Updated only by ERP) */ +/* --------------------------------------------------------- */ + pseudo_bit_t Mask[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t CompareOperand[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t QPN[0x00018]; /* QPN- QP/EEC number. */ + pseudo_bit_t EE[0x00001]; /* `0': QP , `1':EE */ + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t Interrupt[0x00001]; + pseudo_bit_t reserved2[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t OPLE[0x00008]; /* OpLE - Opcode Line Enable */ + pseudo_bit_t Opcode[0x00008]; /* HW Defined Opcodes: + QPC_OPCODE_READ 0x41 + QPC_OPCODE_WRITE 0X81 + QPC_OPCODE_START_TIMER 0x02 + QPC_OPCODE_RESET_TIMER 0x03 + QPC_OPCODE_STOP_TIMER 0x04 + QPC_OPCODE_WRITE_TIMER 0x05 + QPC_OPCODE_ADD 0x82 + QPC_OPCODE_SUB 0x83 + QPC_OPCODE_AND 0x84 + QPC_OPCODE_OR 0x85 + QPC_OPCODE_XOR 0x86 + QPC_OPCODE_FETCH_ADD 0xc2 + QPC_OPCODE_FETCH_SUB 0xc3 + QPC_OPCODE_FETCH_AND 0xc4 + QPC_OPCODE_FETCH_OR 0xc5 + QPC_OPCODE_FETCH_XOR 0xc6 + + FW Defined Opcodes (processed by FW through miss processing): + QPC_OPCODE_CE 0x06 + QPC_OPCODE_WRITE_BACK_OP 0xc7 + QPC_OPCODE_HW2SW_EQ 0xc8 + QPC_OPCODE_SW2HW_EQ 0xc9 + QPC_OPCODE_MAP_EQ 0xca + QPC_OPCODE_QUERY_EQC 0xcb + QPC_OPCODE_UAR_BASE_ADX 0xcc + QPC_OPCODE_EQC_TABLE_BASE_ADX 0xcd + */ + pseudo_bit_t reserved3[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* GID */ + +struct GID_st { /* Little Endian */ + pseudo_bit_t gid_127_96_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t gid_95_64_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t gid_63_32_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t gid_31_0_[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet Link Layer Check Info */ + +struct IBTCUdata_st { /* Little Endian */ + pseudo_bit_t portid[0x00004]; /* port number of the current packet */ + pseudo_bit_t ib_pktlen[0x0000b]; /* real packet length (as count in the CLI) */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t ib_lmc[0x00003]; /* LMC of the current packet */ + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t ib_error_reg[0x0000b]; /* link errors (ib_status [14:4]) */ + pseudo_bit_t reserved2[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet immDt Field */ + +struct immDt_st { /* Little Endian */ + pseudo_bit_t immdt[0x00020]; /* immediate data */ +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet AtomicAckETH Fields */ + +struct AtomicAckETH_st { /* Little Endian */ + pseudo_bit_t atmackh[0x00020]; /* original remote data [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t atmackl[0x00020]; /* original remote data [31:0] */ +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet AETH Fields */ + +struct AETH_st { /* Little Endian */ + pseudo_bit_t msn[0x00018]; /* MSN */ + pseudo_bit_t syndrome[0x00008]; /* syndrome */ +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet AtomicETH Fields */ + +struct AtomicETH_st { /* Little Endian */ + pseudo_bit_t vahi[0x00020]; /* virtual address [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t valo[0x00020]; /* virtual address [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t r_key[0x00020]; /* remote key */ +/* --------------------------------------------------------- */ + pseudo_bit_t swaph[0x00020]; /* swap or add data [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t swapl[0x00020]; /* swap or add data [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cmph[0x00020]; /* compare data [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cmpl[0x00020]; /* compare data [31:0] */ +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet RETH Fields */ + +struct RETH_st { /* Little Endian */ + pseudo_bit_t vahi[0x00020]; /* virtual address [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t valo[0x00020]; /* virtual address [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t r_key[0x00020]; /* remote key */ +/* --------------------------------------------------------- */ + pseudo_bit_t dmalen[0x00020]; /* DMA length */ +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet DETH Fields */ + +struct DETH_st { /* Little Endian */ + pseudo_bit_t qkey[0x00020]; /* Queue key */ +/* --------------------------------------------------------- */ + pseudo_bit_t sqp[0x00018]; /* source qp */ + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet RDEHT Fields */ + +struct RDETH_st { /* Little Endian */ + pseudo_bit_t eecnxt[0x00018]; /* EE-context */ + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet BTH Fields */ + +struct BTH_st { /* Little Endian */ + pseudo_bit_t p_key[0x00010]; /* patition key */ + pseudo_bit_t tver[0x00004]; /* TVER */ + pseudo_bit_t pad[0x00002]; /* pad count */ + pseudo_bit_t migreq[0x00001]; /* migreq */ + pseudo_bit_t se[0x00001]; /* solicited event */ + pseudo_bit_t opcode[0x00008]; /* opcode */ +/* --------------------------------------------------------- */ + pseudo_bit_t dqp[0x00018]; /* destination qp */ + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t psn[0x00018]; /* PSN */ + pseudo_bit_t reserved1[0x00007]; + pseudo_bit_t ackreq[0x00001]; /* acknowledge request */ +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet GRH Fields */ + +struct GRH_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00014]; + pseudo_bit_t tclass[0x00008]; /* traffic class */ + pseudo_bit_t ipver[0x00004]; /* IP version */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t nxthdr[0x00008]; /* next header */ + pseudo_bit_t reserved2[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t sgid_127_96[0x00020]; /* source GID [127:96] */ +/* --------------------------------------------------------- */ + pseudo_bit_t sgid_95_64[0x00020]; /* source GID [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t sgid_63_32[0x00020]; /* source GID [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t sgid_31_0[0x00020]; /* source GID [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t dgid_127_96[0x00020]; /* destination GID [127:96] */ +/* --------------------------------------------------------- */ + pseudo_bit_t dgid_95_64[0x00020]; /* destination GID [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t dgid_63_32[0x00020]; /* destination GID [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t dgid_31_0[0x00020]; /* destination GID [31:0] */ +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet LRH Fields */ + +struct LRH_st { /* Little Endian */ + pseudo_bit_t dlid[0x00010]; /* destination local id */ + pseudo_bit_t lnh[0x00002]; /* link next header */ + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t sl[0x00004]; /* service level */ + pseudo_bit_t reserved1[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t slid[0x00010]; /* source local id */ + pseudo_bit_t pkt_pktlen[0x0000b]; /* packet length as written in the LRH field */ + pseudo_bit_t reserved2[0x00005]; +/* --------------------------------------------------------- */ +}; + +/* RD Responder Copy QP to EE Machine Configuration */ + +struct TCUTCQP2EE_st { /* Little Endian */ + pseudo_bit_t cs_syndrome_only[0x00001];/* if set qp2ee mechine will return only syndrome if copy has failed. if reset the mechine will return data strb as required with the syndrome. */ + pseudo_bit_t cs_ignore_a_w_r_copy[0x00001];/* when copy qp to ee , if set the fields A W and R will not be copy. */ + pseudo_bit_t cs_ignore_maxmsgsize_copy[0x00001];/* ignore max message size when copy qp to ee */ + pseudo_bit_t reserved0[0x0001d]; +/* --------------------------------------------------------- */ +}; + +/* TCU CLI Packet Input Control */ + +struct TCUTCCLI_st { /* Little Endian */ + pseudo_bit_t tcucli_erplock[0x00001];/* lock cli fron getting packets from CLI */ + pseudo_bit_t cli_is_locked_reg[0x00001];/* cli is lock indication */ + pseudo_bit_t reserved0[0x0001e]; +/* --------------------------------------------------------- */ +}; + +/* TCU CLI */ + +struct TCUCLI_st { /* Little Endian */ + pseudo_bit_t cfg_cli_no_pre_dlast[0x00001];/* not doing pre dlast "bring packet" */ + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t cfg_cli_drdy2dataen[0x00004];/* number of cycles for pre dlast operation */ + pseudo_bit_t reserved1[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks Configuration */ + +struct TCUTCCFG_st { /* Little Endian */ + pseudo_bit_t cfg_gid_prefix_63_32[0x00020];/* cfg gid prefix [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_gid_prefix_31_0[0x00020];/* cfg gid prefix [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_nxthdr[0x00008]; /* cfg nxthdr */ + pseudo_bit_t cfg_ipver[0x00004]; /* cfg ipver */ + pseudo_bit_t reserved0[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_qp0[0x00018]; /* qp0 number - Replaces the QP number for packets destined to QP0. Packets coming from IB port1 will go to qp0_number, packets coming from IB port 2 will go to qp0_number + 1 + + * Dest QP should not be configured to zero, see bug 4984 */ + pseudo_bit_t reserved1[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_qp1[0x00018]; /* qp1 number + + * Dest QP should not be configured to zero, see bug 4984 */ + pseudo_bit_t reserved2[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_unlock_timer[0x00010];/* cfg unlock timer. the time tcu wait before re-reading QPC because QP was locked. */ + pseudo_bit_t cfg_retry_counter[0x00010];/* number of retries in the case of qp is lock. If 0 then the retry counter is disable */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_mc_qp[0x00018]; /* cfg multicast qp tp change in the case of no multicast cache hit + + * Dest QP should not be configured to zero, see bug 4984 */ + pseudo_bit_t reserved3[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* TCU Initial Credits */ + +struct TCUINITCREDITS_st { /* Little Endian */ + pseudo_bit_t rde_pyld_credits[0x0000c];/* credits to RDE pyld fifo */ + pseudo_bit_t reserved0[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t rde_cmd_cred[0x00007]; /* credits to RDE command fifo */ + pseudo_bit_t reserved1[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t pb_credits[0x00008]; /* PB credits to CLI */ + pseudo_bit_t reserved2[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t sqpc_credits[0x00005]; /* credits for SQPC */ + pseudo_bit_t reserved3[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t rqpc_credits[0x00005]; /* credits to RQPC */ + pseudo_bit_t reserved4[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_digw_initial_credits[0x00008];/* Intial credits to statrt pushing new packet to PB by TCUCLI. after starting pushing the packet, this value is not relevant. */ + pseudo_bit_t reserved5[0x00008]; + pseudo_bit_t cfg_digw_enough_credits[0x00008];/* the value that mean "zero" credits of PB. */ + pseudo_bit_t reserved6[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* tcu checks cause data0 */ + +struct TCUCHKCAUSE_REG3_st { /* Little Endian */ + pseudo_bit_t bad_syn_rnrnack[0x00001]; + pseudo_bit_t bad_syn_psn_seq_err[0x00001]; + pseudo_bit_t bad_syn_inv_req[0x00001]; + pseudo_bit_t bad_syn_rae[0x00001]; + pseudo_bit_t bad_syn_roe[0x00001]; + pseudo_bit_t bad_syn_inv_rd_req[0x00001]; + pseudo_bit_t bad_malformed_synd[0x00001]; + pseudo_bit_t bad_rdres_syn[0x00001]; + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* tcu checks cause data0 */ + +struct TCUCHKCAUSE_REG2_st { /* Little Endian */ + pseudo_bit_t bad_rel_dupl_psn[0x00001]; + pseudo_bit_t bad_rel_inv_psn[0x00001]; + pseudo_bit_t bad_qp2ee_eepsnerr[0x00001]; + pseudo_bit_t bad_qpstate_bit_req[0x00001]; + pseudo_bit_t bad_eestate_bit_req[0x00001]; + pseudo_bit_t bad_xxstate_psn_req[0x00001]; + pseudo_bit_t bad_req_supp_rel[0x00001]; + pseudo_bit_t bad_unrel_inv_psn[0x00001]; + pseudo_bit_t bad_uc_epsn[0x00001]; + pseudo_bit_t bad_ud_epsn[0x00001]; + pseudo_bit_t bad_req_supp_unrel[0x00001]; + pseudo_bit_t bad_opc_req_seq[0x00001]; + pseudo_bit_t bad_payload_req_pkt[0x00001]; + pseudo_bit_t bad_req_padcount[0x00001]; + pseudo_bit_t bad_e2ecredits_unrel[0x00001]; + pseudo_bit_t bad_e2ecredits_rel[0x00001]; + pseudo_bit_t bad_ghost_resp[0x00001]; + pseudo_bit_t unsolicited_ack[0x00001]; + pseudo_bit_t bad_qpstate_bit_res[0x00001]; + pseudo_bit_t bad_eestate_bit_res[0x00001]; + pseudo_bit_t bad_rdrsp_trash[0x00001]; + pseudo_bit_t bad_rdres_aack_mix[0x00001]; + pseudo_bit_t bad_opc_res_seq[0x00001]; + pseudo_bit_t bad_rsp_supp[0x00001]; + pseudo_bit_t bad_payload_res_pkt[0x00001]; + pseudo_bit_t bad_res_padcount[0x00001]; + pseudo_bit_t bad_rdrsp_implnck[0x00001]; + pseudo_bit_t bad_ack_implnck[0x00001]; + pseudo_bit_t reserved0[0x00004]; +/* --------------------------------------------------------- */ +}; + +/* tcu checks cause data0 */ + +struct TCUCHKCAUSE_REG1_st { /* Little Endian */ + pseudo_bit_t bad_exception_qp[0x00001]; + pseudo_bit_t qp_is_qp1[0x00001]; + pseudo_bit_t qp_is_qp0[0x00001]; + pseudo_bit_t bad_opcode_int_erp[0x00001]; + pseudo_bit_t bad_router_packet[0x00001]; + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t bad_no_qpcrd[0x00001]; + pseudo_bit_t bad_qp2ee_eeisbusy[0x00001]; + pseudo_bit_t bad_migreq[0x00001]; + pseudo_bit_t bad_mygid_nrd[0x00001]; + pseudo_bit_t bad_rmtgid_nrd[0x00001]; + pseudo_bit_t bad_mylid_nrd[0x00001]; + pseudo_bit_t bad_rmtlid_nrd[0x00001]; + pseudo_bit_t bad_grhen[0x00001]; + pseudo_bit_t bad_qp2ee_qpnvalid[0x00001]; + pseudo_bit_t bad_qp2ee_qpstate[0x00001]; + pseudo_bit_t bad_qp2ee_rddmiss[0x00001]; + pseudo_bit_t bad_qp2ee_qptsnrd[0x00001]; + pseudo_bit_t bad_qp_is_lock[0x00001]; + pseudo_bit_t bad_eedestqp[0x00001]; + pseudo_bit_t bad_eesource[0x00001]; + pseudo_bit_t bad_mygid_rd[0x00001]; + pseudo_bit_t bad_rmtgid_rd[0x00001]; + pseudo_bit_t bad_mylid_rd[0x00001]; + pseudo_bit_t bad_rmtlid_rd[0x00001]; + pseudo_bit_t bad_pkey[0x00001]; + pseudo_bit_t bad_qkey[0x00001]; + pseudo_bit_t bad_multicast_pkt[0x00001]; + pseudo_bit_t bad_multicast_match[0x00001]; + pseudo_bit_t reserved2[0x00002]; +/* --------------------------------------------------------- */ +}; + +/* tcu checks cause data0 */ + +struct TCUCHKCAUSE_REG0_st { /* Little Endian */ + pseudo_bit_t packet_is_raw_ipv6[0x00001]; + pseudo_bit_t packet_is_raw_ethertype[0x00001]; + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t bad_lrh_rsv[0x00001]; + pseudo_bit_t bad_pktlen[0x00001]; + pseudo_bit_t bad_icrc[0x00001]; + pseudo_bit_t bad_cli_check[0x00001]; + pseudo_bit_t bad_nxthdr[0x00001]; + pseudo_bit_t bad_ipver[0x00001]; + pseudo_bit_t bad_no_bth[0x00001]; + pseudo_bit_t bad_tver[0x00001]; + pseudo_bit_t bad_bth_rsv_var[0x00001]; + pseudo_bit_t bad_bth_rsv[0x00001]; + pseudo_bit_t bad_malformed_packet[0x00001]; + pseudo_bit_t bad_portid[0x00001]; + pseudo_bit_t bad_opcode_int_drop[0x00001]; + pseudo_bit_t bad_qpvalid[0x00001]; + pseudo_bit_t bad_qpstate[0x00001]; + pseudo_bit_t bad_opcode_ts[0x00001]; + pseudo_bit_t bad_eevalid[0x00001]; + pseudo_bit_t bad_eestate[0x00001]; + pseudo_bit_t bad_qp2ee_eenvalid[0x00001]; + pseudo_bit_t bad_qp2ee_eestate[0x00001]; + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t reserved3[0x00001]; + pseudo_bit_t reserved4[0x00001]; + pseudo_bit_t reserved5[0x00001]; + pseudo_bit_t reserved6[0x00001]; + pseudo_bit_t reserved7[0x00001]; + pseudo_bit_t reserved8[0x00001]; + pseudo_bit_t reserved9[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* iRISC Debug Hooks */ + +struct IRISCDEBUG_st { /* Little Endian */ + struct debug_bits_st debug; /* Debug hooks control and status */ +/* --------------------------------------------------------- */ + struct debug_bits_st setdebug; /* Set Debug hooks */ +/* --------------------------------------------------------- */ + struct debug_bits_st cleardebug; /* Clear Debug hooks */ +/* --------------------------------------------------------- */ + pseudo_bit_t dbg_code[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t f_ip[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t last_e_ip[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Inter iRisc Communication FIFO */ + +struct IPCFIFO_st { /* Little Endian */ + struct FIFOCONTROL_st ipcfifopushctrl;/* state field holds the number of free entries in the fifo */ +/* --------------------------------------------------------- */ + pseudo_bit_t ipcfifopushdata[0x00020];/* writing to this register, advances the fifo write pointer */ +/* --------------------------------------------------------- */ + struct Irisc_pop_fifo_ctrl_st ipcfifopopctrl;/* state field holds number of used entries in the fifo */ +/* --------------------------------------------------------- */ + pseudo_bit_t ipcfifopopdata[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* InfiniRISC Breakpoint */ + +struct IRiscBP_st { /* Little Endian */ + pseudo_bit_t address[0x00020]; /* breakpoint address (see mask field) */ +/* --------------------------------------------------------- */ + pseudo_bit_t mask[0x00006]; /* Breakpoint is set to address specified in address field masked with 2^mask */ + pseudo_bit_t reserved0[0x00019]; + pseudo_bit_t valid[0x00001]; /* If set then breakpoint is valid */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct CCR_st { /* Little Endian */ + pseudo_bit_t cczero[0x00001]; /* Zero */ + pseudo_bit_t ccsl[0x00001]; /* Signed Less Than */ + pseudo_bit_t ccsg[0x00001]; /* Signed Greater Than */ + pseudo_bit_t ccul[0x00001]; /* Unsigned Less Than */ + pseudo_bit_t ccug[0x00001]; /* Unisgned Greater Than */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t ccpsn0[0x00001]; /* PZ: PSN is Zero */ + pseudo_bit_t ccpsndup[0x00001]; /* PL: PSN is duplicate (less than) */ + pseudo_bit_t ccpsnoos[0x00001]; /* PG: PSN is out of sequence (greater than) */ + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t ccfieleq[0x00001]; /* Field is Equal */ + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t ccrl[0x00001]; /* msb of last operation */ + pseudo_bit_t ccrg[0x00001]; + pseudo_bit_t reserved3[0x00001]; + pseudo_bit_t cccarryo[0x00001]; /* Carry Out */ + pseudo_bit_t reserved4[0x0000f]; + pseudo_bit_t cctrue[0x00001]; /* This bit is always 1 */ +/* --------------------------------------------------------- */ +}; + +/* iRISC Cache Gateway */ + +struct IRISCCACHEGW_st { /* Little Endian */ + struct GWCONTROL_st irisc_cache_gateway_control;/* Gateway command + + 0x00 Reserved + 0x01 Code Tag Read + 0x02 Code Tag Write + 0x03 Code Read + 0x04 Code Write + 0x05-0x10 Reserved + 0x11 Data Tag Read + 0x12 Data Tag Write + 0x13 Data Read + 0x14 Data Write + 0x15 Data Force Writeback + 0x16-0x3F Reserved + + Address: + [23:20] Way + [19:0] Address LSb (bits 0 and 1 are ignored) + + For Data/Code Read/Write: + gwdata[31:12] is Tag + gwdata[2] is Lock + gwdata[1] is Dirty + gwdata[0] is Valid */ +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata[0x00020]; /* when accessing Cache Data: Data to writeTo/readFrom cache. + when accessing Cache Tags: tag[31:3],lock[2],dirty[1],valid[0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x000c0]; +/* --------------------------------------------------------- */ +}; + +/* Execution Engine Fetch Descriptor Controller */ + +struct EXEFDESCCTL_st { /* Little Endian */ + pseudo_bit_t fetchnds_plus_1[0x00006];/* Size of next descriptor to be fetched plus 1 entry - Initialized to the QPCNDS+1 when the command given to the execution engine is "with clear". + */ + pseudo_bit_t reserved0[0x0000a]; + pseudo_bit_t descwrptr[0x00006]; /* Descriptor write pointer within the descriptor fifo of the execution engine. (in units of 16 bytes). Should point to the entry above the first free entry in the FIFO. Initialized by HW when the command is "with clear". */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t descwrptr_firstentry[0x00006];/* Descriptor write pointer within the descriptor fifo of the execution engine. (in units of 16 bytes). Should point to the first free entry in the FIFO. Initialized by HW when the command is "with clear". */ + pseudo_bit_t reserved2[0x00002]; +/* --------------------------------------------------------- */ + pseudo_bit_t usedcredits[0x00006]; /* Number of used entries in the execution engine descriptor fifo (in units of 16 bytes) */ + pseudo_bit_t reserved3[0x0000a]; + pseudo_bit_t ftcnotexe[0x00004]; /* Number of descriptors fectched not yet executed in engine descriptor fifo */ + pseudo_bit_t reserved4[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t fetchnopcode[0x00005]; /* This field holds the opcode for the next to be fetched descriptor. - Initialized to the QPCNOPCODE when the command given to the execution engine is "with clear". */ + pseudo_bit_t reserved5[0x00001]; + pseudo_bit_t fetchnda[0x0001a]; /* Address of Next Descriptor to be Fecthed - Initialized to the QPCNDA when the command given to the execution engine is "with clear". */ +/* --------------------------------------------------------- */ + pseudo_bit_t fetchnds[0x00006]; /* Size of next descriptor to be fetched - Initialized to the QPCNDS when the command given to the execution engine is "with clear". + */ + pseudo_bit_t fetchndfence[0x00001]; /* Initialized to the QPCNDFence when the command given to the execution engine is "with clear". */ + pseudo_bit_t fetchnddbd[0x00001]; /* This bit is set when the next descriptor to be fecthed was doorbelled. - Initialized to the QPCNDDBD when the command given to the execution engine is "with clear". */ + pseudo_bit_t ndee[0x00018]; /* Address of Next EE (for RD QPs) - Initialized to the QPCNEE when the command given to the execution engine is "with clear". */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x0000d]; + pseudo_bit_t rbfentry[0x00003]; /* Responder Blue Flame Entry in the internal BF fifo number */ + pseudo_bit_t fetchdbcnt[0x00010]; /* fetch doorbell counter - Initialized to the QPCdbcnt when the command given to the execution engine is "with clear". */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* Execution Engine Status */ + +struct EXESTATUS_st { /* Little Endian */ + pseudo_bit_t busy[0x00001]; /* Execution engine is processing */ + pseudo_bit_t ldbentryavail[0x00001];/* Execution engine has a reserved credit in the LDB */ + pseudo_bit_t reserved0[0x0001d]; + pseudo_bit_t nonsiouts[0x00001]; /* This bit is set by the hw when there are no outstanding transactions to the nsi. All translated addresses have completed their respective nsw transactions. Writes have been transmitted. Read responses have been received. + Clearing the tpt_stop bit in the fetchertptnsiif, will clear this bit. + Setting tpt_stop in the fetchertptnsiif, will cause this bit to become set after all outstanding transactions have been completed. + + * This bit is used for TPT Flush completion indication, see bug 5064 */ +/* --------------------------------------------------------- */ +}; + +/* Execution Engine Event Cause Register */ + +struct EXEEVTCAUSE_st { /* Little Endian */ + pseudo_bit_t dbcntzero[0x00001]; /* Doorbell counter reached zero. */ + pseudo_bit_t preempted[0x00001]; + pseudo_bit_t error[0x00001]; /* Error while processing last descriptor. Error details are in errorsyndrome field. */ + pseudo_bit_t nullndsreached[0x00001];/* Next Descriptor Size is Zero */ + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t desccompleted[0x00001];/* Descriptor completed - valid only for responder. For requester use gather length committed. (see bug 5195) */ + pseudo_bit_t reserved1[0x00005]; + pseudo_bit_t bindisbusy[0x00001]; /* Next descriptor is Bind and Bind machine is busy */ + pseudo_bit_t noldbcredits[0x00001]; /* No LDB credits */ + pseudo_bit_t noscatterentries[0x00001];/* Next descriptor is read and there are not enough scatter entries in RDE to store the read scatter list. */ + pseudo_bit_t reserved2[0x00004]; + pseudo_bit_t opcodebp[0x00001]; /* opcode breakpoint */ + pseudo_bit_t fenced[0x00001]; /* Next descriptor is fenced and there are outstanding reads/atomics */ + pseudo_bit_t nordrqcredits[0x00001];/* No available outstanding read/atomic credits */ + pseudo_bit_t noe2ecredits[0x00001]; /* No available end to end credits */ + pseudo_bit_t reserved3[0x00004]; + pseudo_bit_t exebquotadone[0x00001];/* Programmed number of bytes was sent */ + pseudo_bit_t exepquotadone[0x00001];/* Programmed number of packets was sent */ + pseudo_bit_t exedquotadone[0x00001];/* Programmed number of descriptors was executed. */ + pseudo_bit_t ftcdquotadone[0x00001];/* Programmed number of descriptors was fetched */ +/* --------------------------------------------------------- */ + pseudo_bit_t eu_status[0x00008]; /* EXE FSMs state: + 0 EU_EXE_IDLE EU is not is used! (not scheduled) + 1 EU_EXE_CHK EU is scheduled. Checking if can exe more + 2 EU_EXE_REQ EU is requesting exe from the exeer + 3 EU_EXE_EXTING EU is waiting for a descriptor to arrive + 4 EU_EXE_W4_DESC no descs ready for execution. + 5 EU_EXE_W4_BIND Waiting for TPT to finish bind + 6 EU_EXE_ERROR Error in descriptor defined in error_vector + 7 EU_EXE_DONE EU has finished exeing (quota/NULL) + */ + pseudo_bit_t eu_fetch_status[0x00008];/* EU Fetcher Status: + 0 EU_IDLE EU is not is used! (not scheduled) + 1 EU_SCHEDULED EU is scheduled. Checking if can fetch more + 2 EU_FETCH_W4CRD EU limit not expired, but need credits + 3 EU_FETCH_REQ EU is requesting fetch from the fetcher + 4 EU_FETCH_WAIT EU is waiting for a descriptor to arrive + 5 EU_FETCH_LMT EU has finished fetching (QUOTA) + 6 EU_FETCH_ERR EU has finished fetching (ERROR) + 7 EU_FETCH_DONE EU has finished fetching (NULL) + + */ + pseudo_bit_t reserved4[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t errorsyndrome[0x00008];/* Execution Engine Event Error Syndrome + + 0x00 Reserved + 0x02-0x07 TPT Access Syndrome in Descriptor Address Translation (see TPT syndrome coding in TPT MAS): + 0x02 - PD Violation + 0x03 - Access Rights Violation + 0x04 - Length Violation + 0x05 - Cross Page Boundary Violation + 0x06 - Master abort + 0x07 - Reserved + 0x08 Opcode not supported by QP Service Type (e.g. RDMAR in UD QP) + 0x09 Opcode not supported by QP (A,W,R) + 0x0A Too Short NDS (e.g. Atomic with NDS equal to 1) + 0x0B Last Immdt descriptor segment length exceeds descriptor size. + 0x0C Bus Error while reading descriptor + 0x0D Immdt segment when scatter entry was expected (e.g. Immdt in RDMAR descriptor) + 0x0E Total descriptor byte count exceeds QP.maxmsgsize + 0x0F Reserved + 0x11 WQE Reserved Opcode + 0x12-0x13 Reserved + 0x14 Gather Engine Error (see extended syndrome in gather engine register) + 0x15 UD AV PD Miss + 0x16 UD AV Port Miss + 0x17 UD Msg is larger than AV.MTU + 0x18-0xFF Reserved + */ + pseudo_bit_t reserved6[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* Exe GID entry (not really BIG endian) */ + +struct EXE_GID_st { /* Little Endian */ + pseudo_bit_t gid_31_0_[0x00020]; /* This field is write only, see bug 5134 */ +/* --------------------------------------------------------- */ + pseudo_bit_t gid_63_32_[0x00020]; /* This field is write only, see bug 5134 */ +/* --------------------------------------------------------- */ + pseudo_bit_t gid_95_64_[0x00020]; /* This field is write only, see bug 5134 */ +/* --------------------------------------------------------- */ + pseudo_bit_t gid_127_96_[0x00020]; /* This field is write only, see bug 5134 */ +/* --------------------------------------------------------- */ +}; + +/* Internal Doorbells FIFO */ + +struct INTDBFIFO_st { /* Little Endian */ + struct DB_FIFOCNTL_st internal_db_fifo;/* internal db fifo controler */ +/* --------------------------------------------------------- */ + pseudo_bit_t intdbdata[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* QP Doorbell FIFO */ + +struct QPDBFIFO_st { /* Little Endian */ + struct DB_FIFOCNTL_st qf_fifo_cntl; +/* --------------------------------------------------------- */ + pseudo_bit_t fifodata0[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* TPT/NSI Interface and Status */ + +struct TPTNSIIF_st { /* Little Endian */ + pseudo_bit_t stoptptaccess[0x00001];/* When set, the engine will not request any further translation from the tpt. (used for tpt flush) + TPT Flush completion indication is obtaind in the nonsiouts bit in the execution engine status. (see bug 5064) */ + pseudo_bit_t reserved0[0x0001f]; +/* --------------------------------------------------------- */ +}; + +/* Gather Engine Internal Logic - GeCtor Status */ + +struct GECTORSTAT_st { /* Little Endian */ + pseudo_bit_t packets_inflight[0x00008];/* how many packets ar inserted to LinklList waiting to come from mem */ + pseudo_bit_t walk_ps[0x00003]; /* LinkList walking State Machine */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t snd_packets[0x0000c]; /* how many packets got YesUcan and not yes sent Lreq */ + pseudo_bit_t cloreq_ps[0x00002]; /* CLO request FSM state */ + pseudo_bit_t reserved1[0x00002]; +/* --------------------------------------------------------- */ +}; + +/* Gather Agent Internal Status */ + +struct GASTAT_st { /* Little Endian */ + pseudo_bit_t gb_4kb_base_ptr[0x0000d];/* byte pointer in RAM of Gather Buffer */ + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t gb_read_addr[0x00008]; /* Gather Buffer read address in Gather agent (1 of 4) */ + pseudo_bit_t gdpf_wm1_ctr[0x00007]; /* GDPF words counter.... minus 1.... */ + pseudo_bit_t reserved1[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* NTU Gateway Control Register */ + +struct GWCONTROL_NTU_st { /* Little Endian */ + pseudo_bit_t gwaddress[0x00018]; + pseudo_bit_t gwcmd[0x00006]; /* Command/Status - Sw writes to this field the desired command. Hw executes the cmd and returns in this field the status of the operation. For details on commands and status, see instance description. */ + pseudo_bit_t gwbusy[0x00001]; /* Written to 1 by SW together with command to trigger + HW. It is cleared by hw after completing the required + operation and seting the status in the Command/Status + field. */ + pseudo_bit_t gwlocked[0x00001]; /* Gateway Locked - This bit is set when the gw is being used by some enitity. Sw is required to read the gwcontrol field before using a gw. This field behaves as a semaphore, if it was cleared the read returns a 0 in this bit and the bit is set by the hw atomically. Every subsequent read access to the field will return 1 until the agent that locked the gw writes a zero. */ +/* --------------------------------------------------------- */ +}; + +/* Performance counters interrupt info register */ + +struct NTU_PERF_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t qac_en[0x00001]; /* QLT Access count enable */ + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t qac_rst[0x00001]; /* QLT Access count reset */ + pseudo_bit_t reserved2[0x00003]; + pseudo_bit_t qac_int_en[0x00001]; /* QLT Access count interrupt enable */ + pseudo_bit_t reserved3[0x00003]; + pseudo_bit_t qac_ovf[0x00001]; /* QLT Access count overflow (write 1 to clear) */ + pseudo_bit_t reserved4[0x00002]; + pseudo_bit_t qlt_acc_cnt_ind[0x00005];/* QLT Access count indication (EvCnt1) */ + pseudo_bit_t reserved5[0x0000b]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t qlt_acc_cnt[0x00020]; /* QLT Access Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpw_lst_miss_time_en[0x00001];/* QPW Last Miss time enable */ + pseudo_bit_t qpw_acc_cnt_en[0x00001];/* QPW access count enable */ + pseudo_bit_t qpw_miss_cnt_en[0x00001];/* QLT Miss Count Enable */ + pseudo_bit_t reserved8[0x00001]; + pseudo_bit_t qpw_lst_miss_time_rst[0x00001];/* QPW Last Miss Time Reset */ + pseudo_bit_t qpw_acc_cnt_rst[0x00001];/* QPW Access Count Reset */ + pseudo_bit_t qpw_miss_cnt_rst[0x00001];/* QPW Miss Count Reset */ + pseudo_bit_t reserved9[0x00001]; + pseudo_bit_t qpw_lst_miss_time_int_en[0x00001];/* QPW Last Miss Time Interrupt Enable */ + pseudo_bit_t qpw_acc_time_int_en[0x00001];/* QPW Access Time Interrupt Enable */ + pseudo_bit_t qpw_miss_cnt_int_en[0x00001];/* QPW Miss Count Interrupt Enable */ + pseudo_bit_t reserved10[0x00001]; + pseudo_bit_t qpw_lst_miss_time_ovf[0x00001];/* QPW Last Miss Time Overflow (write 1 to clear) */ + pseudo_bit_t qpw_access_cnt_ovf[0x00001];/* QPW Access Count Overflow (write 1 to clear) */ + pseudo_bit_t qpw_miss_cnt_ovf[0x00001];/* QPW Miss Count Overflow (write 1 to clear) */ + pseudo_bit_t reserved11[0x00001]; + pseudo_bit_t qpw_acc_cnt_ind[0x00005];/* QPW Access Count Indication (EvCnt1) */ + pseudo_bit_t reserved12[0x00003]; + pseudo_bit_t qpw_miss_cnt_ind[0x00005];/* QPW Miss Count Indication (EvCnt2) */ + pseudo_bit_t reserved13[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpw_lst_miss_time_cnt[0x00020];/* QPW Last Miss Time Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t qpw_acc_cnt[0x00020]; /* QPW Access Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t qpw_miss_cnt[0x00020]; /* QPW Miss Counter */ +/* --------------------------------------------------------- */ +}; + +/* @COMPERRINFO */ + +struct COMPERRINFO_st { /* Little Endian */ + pseudo_bit_t cmp_syndrom[0x00008]; + pseudo_bit_t cmp_c[0x00001]; + pseudo_bit_t reserved0[0x00015]; + pseudo_bit_t int_clr[0x00001]; + pseudo_bit_t int_set[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t cmp_qpn[0x00018]; + pseudo_bit_t reserved1[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* @QLTMISSINFO */ + +struct QLTMISSINFO_st { /* Little Endian */ + pseudo_bit_t q_nsb_addr_63_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t q_nsb_addr_31_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t q_segment[0x00008]; + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x000a0]; +/* --------------------------------------------------------- */ +}; + +/* dmu wr calib. paramters */ + +struct DMUWRCLB_st { /* Little Endian */ + pseudo_bit_t wr_dqs_dly[0x00005]; /* the dly on the written data strob of byte number X + */ + pseudo_bit_t wr_dq_dly[0x00005]; /* the dly on the written data of byte number X + */ + pseudo_bit_t reserved0[0x00016]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* the rd dqs en calib.parameters */ + +struct DmuRdDqsenClb_st { /* Little Endian */ + pseudo_bit_t dqs_dqsendly_str[0x00004]; + pseudo_bit_t dqs_dqsendly[0x00004]; + pseudo_bit_t dqs_dqsdly_wind[0x00005]; + pseudo_bit_t reserved0[0x00013]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* the rd calib param */ + +struct DMURDDQSCLB_st { /* Little Endian */ + pseudo_bit_t dqs_dqsdly_str[0x00006]; + pseudo_bit_t dqs_dqsdly[0x00006]; + pseudo_bit_t dqs_dqsdc[0x00003]; + pseudo_bit_t dqs_dqsdly_wind[0x00007]; + pseudo_bit_t reserved0[0x0000a]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* DIMM Timing Configuration */ + +struct DIMM_timimg_config_st { /* Little Endian */ + pseudo_bit_t tmrd[0x00003]; /* more register set command cycle time */ + pseudo_bit_t tras[0x00004]; /* Active to precharge command. Available from SPD. */ + pseudo_bit_t trap[0x00003]; /* ????? */ + pseudo_bit_t trc[0x00004]; /* Active to active/auto refresh command period. . Available from SPD. */ + pseudo_bit_t trfc[0x00004]; /* Auto refresh to active/auto refresh command period. . Available from SPD. */ + pseudo_bit_t trp[0x00003]; /* Precharge command period. Available from SPD. */ + pseudo_bit_t trcd[0x00003]; /* Active to read or write delay. Available from SPD */ + pseudo_bit_t trrd[0x00003]; /* Active bank A to active bank B command. Available from SPD */ + pseudo_bit_t twr[0x00003]; /* Write recovery time */ + pseudo_bit_t twtr[0x00002]; /* Internal write to read command delay */ +/* --------------------------------------------------------- */ + pseudo_bit_t tdal[0x00004]; /* Auto precharge write recovery + precharge time */ + pseudo_bit_t mode_register[0x0000c];/* [2:0] - burst len + [3:3] - type of burst (sequencial / interleaved) + [6:4] - CAS latency + [11:7] - Operating mode */ + pseudo_bit_t extended_mode_register[0x0000c];/* Defined in JEDEC spec. + controls DLL, drive strength, QFC and operating mode . */ + pseudo_bit_t reserved0[0x00004]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* DIMM General Configuration Registers */ + +struct DIMMGeneralConfig_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t stop[0x00001]; /* When asserted, all access to DIMM is stoped. + the dimm will no performed any NSB request only ! */ + pseudo_bit_t add_mode[0x00002]; /* address mode -> how to take BA,RA,CA from the 64 bit NSB address */ + pseudo_bit_t init_mode[0x00002]; /* Which DIMM initialization sequence is used + TBD */ + pseudo_bit_t ca_length[0x00004]; /* Column address length: + 0000 - 7 bits length, 0001 - 8 bits length + 0010 - 9 bits length, 0011 - 10 bits length + 0100 - 11 bits length, 0101 - 12 bits length + 0110 - 13 bits length, 0111 - 14 bits length + 1000 - 15 bits length, 1001 - 16 bits length + + */ + pseudo_bit_t ra_length[0x00004]; /* Row address length: + 0000 - 7 bits length, 0001 - 8 bits length + 0010 - 9 bits length, 0011 - 10 bits length + 0100 - 11 bits length, 0101 - 12 bits length + 0110 - 13 bits length, 0111 - 14 bits length + 1000 - 15 bits length, 1001 - 16 bits length + + */ + pseudo_bit_t width[0x00003]; /* DIMM Data width: + 000 - x4 + 001 - x8 + 010 - x16 + 011 - x32 + other - reserved */ + pseudo_bit_t reserved1[0x0000f]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* DIMM Statistics Counters */ + +struct DIMMStatistics_st { /* Little Endian */ + pseudo_bit_t DIMM_Hit_Counter[0x00020];/* 32 bit counter for the numer of hit accesses to this DIMM. */ +/* --------------------------------------------------------- */ + pseudo_bit_t DIMM_miss_counter[0x00020];/* 32 bit counter for the numer of miss accesses to this DIMM. */ +/* --------------------------------------------------------- */ + pseudo_bit_t DIMM_Precharge_counter[0x00020];/* 32 bit counter for the numer of precharge accesses to this DIMM. */ +/* --------------------------------------------------------- */ + pseudo_bit_t DIMM_DI_errors_counter[0x00020];/* 32 bit counter for the numer of data integrity errors for this DIMM. */ +/* --------------------------------------------------------- */ +}; + +/* Open Pages Monitoring Register */ + +struct statistics_openpages_st { /* Little Endian */ + pseudo_bit_t static_clr_req[0x00001];/* when HIGH are the statistical counter are cleared. */ + pseudo_bit_t op_count[0x00004]; /* current number of open pages */ + pseudo_bit_t max_op_count[0x00004]; /* maximum number of open pages (since the last req) */ + pseudo_bit_t reserved0[0x00017]; +/* --------------------------------------------------------- */ +}; + +/* @PCIX only. Attributes of the configuration transaction for futher sending Split Completion transaction via SWCYCLES port. */ + +struct cfg_attributes_st { /* Little Endian */ + pseudo_bit_t secondary_bus_number[0x00008];/* Secondary Bus Number. This field is valid in the case of Configuration Type 0 commands and undefined in the case of Configuration Type 1 commands. */ + pseudo_bit_t reqfuncnum[0x00003]; /* Requestor's function number */ + pseudo_bit_t reqdevnum[0x00005]; /* Requestor's Device Number */ + pseudo_bit_t reqbusnum[0x00008]; /* Requestor's Bus Number */ + pseudo_bit_t tag[0x00005]; /* Requestor's Tag */ + pseudo_bit_t ro[0x00001]; /* Requestor's Relaxed Ordering bit. */ + pseudo_bit_t ns[0x00001]; /* Requestor's No Snoop bit */ + pseudo_bit_t reserved0[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* Software Generation of PCI Cycles Status */ + +struct swcycle_internal_st { /* Little Endian */ + pseudo_bit_t swce_sc_tag[0x00005]; /* Tag, which SWCE uses to its transactions. */ + pseudo_bit_t port[0x00001]; /* pmu port number */ + pseudo_bit_t rest_pops[0x00002]; /* number of expected pops from pmu */ + pseudo_bit_t first_data[0x00001]; + pseudo_bit_t pt_sce_bit[0x00001]; + pseudo_bit_t exec_status[0x00002]; + pseudo_bit_t sense_ps[0x00002]; + pseudo_bit_t main_ps[0x00002]; + pseudo_bit_t curr_retry_counter[0x00008]; + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t rec_ta[0x00001]; + pseudo_bit_t rec_ma[0x00001]; + pseudo_bit_t rec_mdpe[0x00001]; + pseudo_bit_t reserved1[0x00002]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct pcu_gp_cfg_h1_st { /* Little Endian */ + pseudo_bit_t pmu_rd_ptr[0x00001]; + pseudo_bit_t gp_cfg2[0x00001]; + pseudo_bit_t ptu_rd_ptr[0x00001]; + pseudo_bit_t gp_cfg3[0x00005]; + pseudo_bit_t pmu_mask[0x00005]; + pseudo_bit_t gp_cfg4[0x00003]; + pseudo_bit_t ptu_mask[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct pcu_gp_cfg_st { /* Little Endian */ + pseudo_bit_t array_redundency0[0x00008];/* + + 31 - Enable cfg_cycle engine. When cleared, cfg cycles respond with retry */ + pseudo_bit_t pcma_use_one_port[0x00001]; + pseudo_bit_t target_abort_on_be_ff[0x00001]; + pseudo_bit_t gp_cfg0[0x00003]; + pseudo_bit_t virtual_fifo_size[0x00003]; + pseudo_bit_t min_prefetch_timeout[0x00004]; + pseudo_bit_t nsbtx_sw_usr[0x00001]; + pseudo_bit_t pxm_frameoe_s_en[0x00001]; + pseudo_bit_t sw_100m_en[0x00001]; + pseudo_bit_t sw_100m[0x00001]; + pseudo_bit_t page_disc_en[0x00001]; + pseudo_bit_t bug5275[0x00001]; + pseudo_bit_t gp_cfg1[0x00005]; + pseudo_bit_t en_cfg_cycle_engine[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* NSI Gateway RAM */ + +struct NSIGWRAM_st { /* Little Endian */ + pseudo_bit_t nsi_buffer0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer9[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer10[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer11[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer12[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer13[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer14[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer15[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer16[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer17[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer18[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer19[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer20[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer21[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer22[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer23[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer24[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer25[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer26[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer27[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer28[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer29[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer30[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer31[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer33[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer34[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer35[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer36[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer37[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer38[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer39[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer40[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer41[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffe42[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer43[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer44[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer45[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer46[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer47[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer48[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer49[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer50[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer51[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer52[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer53[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer54[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer55[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer56[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer57[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer58[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer59[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer60[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer61[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer62[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_buffer63[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* NSI Gateway Extended Control */ + +struct NSIGWEXTCTRL_st { /* Little Endian */ + pseudo_bit_t address_63_32[0x00020];/* see details in address_31_0 */ +/* --------------------------------------------------------- */ + pseudo_bit_t address_31_0[0x00020]; /* Cmd: Read/Write + Address[63:0] is the selected address (key,pd,te,nsvl,np,va_pa,nsq fields are used) + + Cmd: ReadQP/WriteQP + Address[63:32] is reserved. + Address[23:0] is QP# + Address[24] is 1 for EE, 0 to QP + Address[26:25] is 01 for Send, 10 for Receive, 11 for Completion + (relevant base address,key,pd,te,nsvl,np,va_pa, and nsq fields are taken from the qpchost registers) + Address[31:27] - Reserved + + Cmd: Swap + Address[63:32] is reserved. + Address[9:0] is Offset to Cache Array + Address[24:10] is Reserved + Address[26:25] is 01 for Send, 10 for Receive, 11 for Completion + Address[31:27] - Reserved + + Cmd: Transmit + Address[63:48] is Token + Address[47:40] is QPState + Address[39:32] is Syndrome + Address[15:0] is Line Enable + Address[24:16] is Reserved + Address[26:25] is 01 for Send, 10 for Receive, 11 for Completion + Address[31:27] - Reserved + */ +/* --------------------------------------------------------- */ + pseudo_bit_t key[0x00020]; /* Key for TPT access. Used for the Read and Write commands + For ReadQP and WriteQP commands the key field in the qpcbase registers is used */ +/* --------------------------------------------------------- */ + pseudo_bit_t pd[0x00018]; /* Protection Domain used for TPT access for Read and Write commands. + For ReadQP and WriteQP commands the pd field in the qpcbase registers is used */ + pseudo_bit_t te[0x00001]; /* TPT Access Enabled - When set, the TPT will be accessed. When cleared, the address will be regarded as physical and the nsq field will be used to access the NSI. This bit is relevant for Read and Write. For ReadQP and WriteQP commands this te bit is not relevant (the te bit in the qpcbase registers is used) */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t va_pa[0x00001]; /* 1 = virtual address + 0 = physical address + This bit does not control wether TPT is accessed or not (this is done by the te field). If te is set, TPT will be accessed (regardless of va_pa value) for address decoding purposes. + This bit is relevant for Read and Write. For ReadQP and WriteQP commands this va_pa bit is not relevant (the va_pa bit in the qpcbase registers is used) + */ + pseudo_bit_t np[0x00001]; /* When set, NP bit is set in TPT access. + This bit is relevant for Read and Write. For ReadQP and WriteQP commands this np bit is not relevant (the np bit in the qpcbase registers is used) */ + pseudo_bit_t nsvl[0x00001]; /* VL to use in North Switch + This bit is relevant for Read and Write. For ReadQP and WriteQP commands this nsvl bit is not relevant (the nsvl bit in the qpcbase registers is used) */ +/* --------------------------------------------------------- */ + pseudo_bit_t nsq_for_reads[0x00006];/* This field is used to access the NSI when the TPT is not used (te field is cleared). + For ReadQP commands the nsq for reads field in the qpcbase registers is used (when relevant). + + {VL,Nswitch lane[1:0],Nswitch destination id[2:0]}; + + VL 0 is regular memory access + VL 1 is privileged access (cache replacements) + + LANE_POSTED = 2'b01; + LANE_NONPOSTED = 2'b10; + LANE_RESPONSE = 2'b11; + + NTU = 3'b000; + PCU = 3'b100; + DMU = 3'b001; + HCA = 3'b011; + */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t nsq_for_writes[0x00006];/* This field is used to access the NSI when the TPT is not used (te field is cleared). + For WriteQP commands the nsq for writes field in the qpcbase registers is used (when relevant). + + {VL,Nswitch lane[1:0],Nswitch destination id[2:0]}; + + VL 0 is regular memory access + VL 1 is privileged access (cache replacements) + + LANE_POSTED = 2'b01; + LANE_NONPOSTED = 2'b10; + LANE_RESPONSE = 2'b11; + + NTU = 3'b000; + PCU = 3'b100; + DMU = 3'b001; + HCA = 3'b011; + */ + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t mem_io[0x00001]; /* mem/io bit to be used in the NSI transaction if the te bit is cleared (no tpt access) + mem = 0 + i/o = 1 */ + pseudo_bit_t nsi_syndrome[0x00008]; + pseudo_bit_t reserved3[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_token[0x00010]; + pseudo_bit_t reserved4[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t size[0x00010]; + pseudo_bit_t offset[0x00004]; /* offset within the data buffer (in units of 16 bytes) + offset must be aligned to size */ + pseudo_bit_t reserved5[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* NSI Gateway Main Control */ + +struct NSIGWCTRL_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00018]; + pseudo_bit_t gwcmd[0x00006]; /* Command/Status - Sw writes to this field the desired command. Hw executes the cmd and returns in this field the status of the operation. For details on commands and status, see instance description. */ + pseudo_bit_t gwbusy[0x00001]; /* When set, it means the hw is executing a command. It is set by hw when the cmd field is written to a value different than zero. It is cleared by hw after completing the required operation and seting the status in the cmd field. */ + pseudo_bit_t gwlocked[0x00001]; /* Gateway Locked - This bit is set when the gw is being used by some enitity. Sw is required to read the gwcontrol field before using a gw. This field behaves as a semaphore, if it was cleared the read returns a 0 in this bit and the bit is set by the hw atomically. Every subsequent read access to the field will return 1 until the agent that locked the gw writes a zero. */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct STD_QC_FSMS_st { /* Little Endian */ + pseudo_bit_t ts_ps[0x00001]; + pseudo_bit_t gw_ps_tag[0x00002]; + pseudo_bit_t exe_ps[0x00002]; + pseudo_bit_t miss_exe_ps[0x00002]; + pseudo_bit_t srw_ps[0x00001]; + pseudo_bit_t qpc_core_ps[0x00004]; /* IDELL = 4'b0000 + DECOD = 4'b0001 + ERROR = 4'b0010 + MISSS = 4'b0011 + W4SLT = 4'b0100 + RDMW0 = 4'b0101 + GNCON = 4'b0110 + RDMW1 = 4'b0111 + GNEVL = 4'b1000 + WDRAN = 4'b1001 + EVALT = 4'b1010 + PREVL = 4'b1011 */ + pseudo_bit_t misdrn_ps[0x00002]; + pseudo_bit_t hoq_ps[0x00002]; /* IDLE = 2'b00 + HEDR = 2'b01 + DATA = 2'b10 */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct timeoutfifoctrl_st { /* Little Endian */ + pseudo_bit_t full[0x00001]; + pseudo_bit_t almost_full[0x00001]; + pseudo_bit_t empty[0x00001]; + pseudo_bit_t almost_empty[0x00001]; + pseudo_bit_t reserved0[0x00014]; + pseudo_bit_t pop[0x00001]; + pseudo_bit_t poprd[0x00001]; + pseudo_bit_t reserved1[0x00005]; + pseudo_bit_t fifolocked[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* Context Cache Set Lock Control */ + +struct QCGATEKEEPER_st { /* Little Endian */ + pseudo_bit_t reserved0[0x000c0]; +/* --------------------------------------------------------- */ + pseudo_bit_t GK63_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t GK31_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x000c0]; +/* --------------------------------------------------------- */ + pseudo_bit_t CGK1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t CGK0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x000c0]; +/* --------------------------------------------------------- */ + pseudo_bit_t SGK1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SGK0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t stop[0x00001]; /* stops all sets (see bug 5168) */ + pseudo_bit_t reserved3[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t af_level[0x00005]; /* Miss FIFO almost full watermark. + When Miss FIFO reaches almost full watermark, input to the cache is closed for all sets. After Miss FIFO has reached almost + full watermark, 12 more lines can be pushed. + If this field is configured to 1, then cache operates as a blocking cache (ie every miss locks all sets). */ + pseudo_bit_t reserved4[0x00003]; + pseudo_bit_t ae_level[0x00005]; /* Miss FIFO almost level watermark. */ + pseudo_bit_t reserved5[0x00013]; +/* --------------------------------------------------------- */ + pseudo_bit_t clear_gk_all[0x00001]; /* writing a one to this bit clears the blocking gatekeeper function (relevant only if af_level is 1) */ + pseudo_bit_t reserved6[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t set_gk_all[0x00001]; /* writing a one to this bit sets the blocking gatekeeper function (relevant only if af_level is 1) */ + pseudo_bit_t reserved7[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00080]; +/* --------------------------------------------------------- */ +}; + +/* Miss Reexecute Control */ + +struct REEXEFSMCTL_st { /* Little Endian */ + pseudo_bit_t pipe_clean[0x00001]; /* This bit is never set, see bug 5168 */ + pseudo_bit_t reserved0[0x00017]; + pseudo_bit_t cmd[0x00006]; + pseudo_bit_t busy[0x00001]; + pseudo_bit_t reserved1[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* OpcodeMask of QPC */ + +struct QC_OP_MASK_st { /* Little Endian */ + pseudo_bit_t fetch_and_xor[0x00001]; + pseudo_bit_t fetch_and_or[0x00001]; + pseudo_bit_t fetch_and_and[0x00001]; + pseudo_bit_t fetch_and_sub[0x00001]; + pseudo_bit_t fetch_and_add[0x00001]; + pseudo_bit_t xor[0x00001]; + pseudo_bit_t or[0x00001]; + pseudo_bit_t and[0x00001]; + pseudo_bit_t sub[0x00001]; + pseudo_bit_t add[0x00001]; + pseudo_bit_t write_timer[0x00001]; + pseudo_bit_t stop_timer[0x00001]; + pseudo_bit_t reset_timer[0x00001]; + pseudo_bit_t start_timer[0x00001]; + pseudo_bit_t write[0x00001]; + pseudo_bit_t read[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* Context Cache Tag Access Gateway */ + +struct QCTAGGW_st { /* Little Endian */ + pseudo_bit_t Address[0x0000a]; + pseudo_bit_t reserved0[0x0000e]; + pseudo_bit_t gwcmd[0x00006]; + pseudo_bit_t gwbusy[0x00001]; + pseudo_bit_t gwlocked[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00006]; + pseudo_bit_t tag[0x00012]; + pseudo_bit_t reserved2[0x00005]; + pseudo_bit_t qe[0x00001]; + pseudo_bit_t v[0x00001]; + pseudo_bit_t eq[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* Context Miss Gateway */ + +struct QCMISSGW_st { /* Little Endian */ + pseudo_bit_t full[0x00001]; + pseudo_bit_t af[0x00001]; /* Allmost Full */ + pseudo_bit_t e[0x00001]; /* empty */ + pseudo_bit_t ae[0x00001]; /* almost empty */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t num_of_lines[0x00006]; + pseudo_bit_t reserved1[0x0000a]; + pseudo_bit_t pop[0x00001]; + pseudo_bit_t poprd[0x00001]; + pseudo_bit_t reserved2[0x00005]; + pseudo_bit_t gwlocked[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss0[0x00020]; /* bit 31:19 reserved + bit 18 - hit + bit 17 - locked + bit 16 - opcode2irisc + */ +/* --------------------------------------------------------- */ + pseudo_bit_t miss1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss4[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Context Cache Array Access Gateway */ + +struct QCGW_st { /* Little Endian */ + pseudo_bit_t Address[0x0000d]; + pseudo_bit_t reserved0[0x0000b]; + pseudo_bit_t gwcmd[0x00006]; + pseudo_bit_t gwbusy[0x00001]; + pseudo_bit_t gwlocked[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata0[0x00020]; /* Line[127:96] */ +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata1[0x00020]; /* Line[95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata2[0x00020]; /* Line[63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata3[0x00020]; /* Line[31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t ByteEnable[0x00010]; /* bit 15 - is for Line[127:120] + bit 14 - is for Line[119:112] + . + . + bit 0 - is for Line[7:0] */ + pseudo_bit_t reserved1[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* CQ FIFO Control Register */ + +struct CQFIFOGWCTL_st { /* Little Endian */ + pseudo_bit_t f[0x00001]; /* see tavor mas */ + pseudo_bit_t af[0x00001]; + pseudo_bit_t e[0x00001]; /* see tavor mas */ + pseudo_bit_t ae[0x00001]; /* see tavor mas */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t depth_0_7[0x00007]; /* depth in units of 32 bit. If 128, check f bit. */ + pseudo_bit_t last_pop[0x00001]; /* 1 if last POP, 0 if last PUSH */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t pop[0x00001]; + pseudo_bit_t poprd[0x00001]; + pseudo_bit_t reserved2[0x00005]; + pseudo_bit_t fifolocked[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct EXUR_SCRPAD_st { /* Little Endian */ + pseudo_bit_t word[54][0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t DB_DROP[0x00020]; /* Number of times DB was dropped */ +/* --------------------------------------------------------- */ + pseudo_bit_t CHECK_STACK[0x00020]; /* Write to memory at this address means stack overflow */ +/* --------------------------------------------------------- */ + pseudo_bit_t DEBUG[8][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct EXUS_SCRPAD_st { /* Little Endian */ + pseudo_bit_t word[53][0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SUPER_DB[0x00020]; /* how many times Super DB succeeded */ +/* --------------------------------------------------------- */ + pseudo_bit_t DB_DROP[0x00020]; /* Number of times DB was dropped */ +/* --------------------------------------------------------- */ + pseudo_bit_t CHECK_STACK[0x00020]; /* Write to memory at this address means stack overflow */ +/* --------------------------------------------------------- */ + pseudo_bit_t DEBUG[8][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct TCU_SCRPAD_st { /* Little Endian */ + pseudo_bit_t word[50][0x00020]; /* instead of RESERVED 248 */ +/* --------------------------------------------------------- */ + pseudo_bit_t MILI_SEC[0x00020]; /* value to set to timer to count milisecs */ +/* --------------------------------------------------------- */ + pseudo_bit_t TCU_INT_CNT[0x00020]; /* Counter for debug */ +/* --------------------------------------------------------- */ + pseudo_bit_t DB_DROP[0x00020]; /* Number of times DB was dropped */ +/* --------------------------------------------------------- */ + pseudo_bit_t UD_DROP[0x00020]; /* Number of times DB was dropped */ +/* --------------------------------------------------------- */ + pseudo_bit_t UC_DROP[0x00020]; /* Number of times DB was dropped */ +/* --------------------------------------------------------- */ + pseudo_bit_t CHECK_STACK[0x00020]; /* Write to memory at this address means stack overflow */ +/* --------------------------------------------------------- */ + pseudo_bit_t DEBUG[8][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct QPC_SCRPAD_st { /* Little Endian */ + pseudo_bit_t word[50][0x00020]; +/* --------------------------------------------------------- */ + struct QPC_Performance_Counters_st CNTR;/* QPC Performance counters */ +/* --------------------------------------------------------- */ + pseudo_bit_t ERR_SYN[0x00020]; /* Last error syndrome */ +/* --------------------------------------------------------- */ + pseudo_bit_t DB_DROP[0x00020]; /* Number of times DB was dropped */ +/* --------------------------------------------------------- */ + pseudo_bit_t CHECK_STACK[0x00020]; /* Write to memory at this address means stack overflow */ +/* --------------------------------------------------------- */ + pseudo_bit_t DEBUG[8][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct NTU_SCRPAD_st { /* Little Endian */ + pseudo_bit_t word[54][0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SCRUBB_COUNTER[0x00020];/* Counts the number of times scrubbing was called. */ +/* --------------------------------------------------------- */ + pseudo_bit_t PERIODIC_COUNTER[0x00020];/* Counts the number of times FW read the whiole DDR. */ +/* --------------------------------------------------------- */ + pseudo_bit_t CHECK_STACK[0x00020]; /* Write to memory at this address means stack overflow */ +/* --------------------------------------------------------- */ + pseudo_bit_t DEBUG[7][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct TPT_SCRPAD_st { /* Little Endian */ + pseudo_bit_t apm_req_qpn[0x00020]; /* TPT pass QPN for APM request */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00260]; +/* --------------------------------------------------------- */ + pseudo_bit_t NSIGW_UNLOCK[0x00020]; /* TPT shall unlock NSIGW when busy goes to 0 */ +/* --------------------------------------------------------- */ + pseudo_bit_t word[32][0x00020]; /* instead of RESERVED 248 */ +/* --------------------------------------------------------- */ + struct TPT_Performance_Counters_st CNTR;/* TPT Performance counters */ +/* --------------------------------------------------------- */ + pseudo_bit_t CHECK_STACK[0x00020]; /* Write to memory at this address means stack overflow */ +/* --------------------------------------------------------- */ + pseudo_bit_t DEBUG[8][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* BOOT2 Scratch pad params */ + +struct BOOT2_SCRPAD_st { /* Little Endian */ + pseudo_bit_t DIMM_SLV[4][0x00020]; /* DIMM slv addr info: 0x50 0x52 0x51 0x53 */ +/* --------------------------------------------------------- */ + pseudo_bit_t XSR_ICLK_WAIT[0x00020];/* How many iclocks we need to wait after exit self refresh. */ +/* --------------------------------------------------------- */ + pseudo_bit_t FAST_SR[0x00020]; /* fast self sefresh feature */ +/* --------------------------------------------------------- */ + pseudo_bit_t NS_TO_CLOCKS[0x00020]; /* 0x82518 bits 0-4 ns/10 */ +/* --------------------------------------------------------- */ + pseudo_bit_t TACTUAL[0x00020]; /* DRAM clock, units: Bits 0-3: 0.1 ns; Bits 4-7: ns (round up) */ +/* --------------------------------------------------------- */ + pseudo_bit_t NS_TO_8K_CLOCKS[0x00020];/* 10 * dram_frequency * 8K / 10000 */ +/* --------------------------------------------------------- */ + struct TAR_TABLE_st TAR_TABLE; /* Translates Refresh Period to DRAM clock */ +/* --------------------------------------------------------- */ + struct CAS_TABLE_st CAS_TABLE; /* Translates CAS latency to JEDEC encoding */ +/* --------------------------------------------------------- */ + pseudo_bit_t DIMM_TYPE[4][0x00020]; /* DIMM type supported by this slot: 1-unbuffered,2-registered, 3-both (3 2 3 2) */ +/* --------------------------------------------------------- */ + pseudo_bit_t TACTUAL_DOWN[0x00020]; /* DRAM clock, units of 0.25 ns (round down) */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x002a0]; +/* --------------------------------------------------------- */ + struct LOADER_CMD_IF_st LOADER_CMD_IF; +/* --------------------------------------------------------- */ + struct DIMM_SYND_st DIMM_SYND[4]; /* DIMM syndrome */ +/* --------------------------------------------------------- */ + struct DIMM_REQ_st DIMM_REQ[4]; /* DIMM Info from DMU discovery */ +/* --------------------------------------------------------- */ + pseudo_bit_t DOOR_BELL[0x00020]; /* Size of doorbell area (in units of 1M) */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t DDR_SIZE_LIMIT[0x00020];/* Max supported total DDR size (units of 1M) */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t FW_SIZE[0x00020]; /* 0x82634 */ +/* --------------------------------------------------------- */ + pseudo_bit_t DI_MODE[0x00020]; /* 0x82638 */ +/* --------------------------------------------------------- */ + pseudo_bit_t AP_MODE[0x00020]; /* 0x8263c */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x000a0]; +/* --------------------------------------------------------- */ + struct FW_IMAGE_st FW_IMAGE; /* Data on active FW image */ +/* --------------------------------------------------------- */ + struct Device_header_st SHRIMP_MASK_HEADER;/* 0x82838 .. 0x828bc */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00080]; +/* --------------------------------------------------------- */ + pseudo_bit_t ACTUAL_DDR_SIZE_MSB[0x00020];/* 0x82700 */ +/* --------------------------------------------------------- */ + pseudo_bit_t ACTUAL_DDR_SIZE_LSB[0x00020];/* 0x82704 */ +/* --------------------------------------------------------- */ + pseudo_bit_t CALIB_STAT[0x00020]; /* 0x82708 */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00080]; +/* --------------------------------------------------------- */ + pseudo_bit_t SYSENA_SP[0x00020]; /* Used to store NTU stack pointer in SYSEN->SYSDIS */ +/* --------------------------------------------------------- */ + struct Bridge_header_st PCU_HEADER; /* 0x82720 .. 0x827a4 */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00040]; +/* --------------------------------------------------------- */ + struct Device_header_st HCA_HEADER; /* 0x827ac .. 0x82830 */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00040]; +/* --------------------------------------------------------- */ + struct Bridge_header_st B_MASK_HEADER;/* 0x82838 .. 0x828bc */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00040]; +/* --------------------------------------------------------- */ + struct Device_header_st D_MASK_HEADER;/* 0x828c4 .. 0x82948 */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x00040]; +/* --------------------------------------------------------- */ + struct Device_header_st SHRIMP_HEADER;/* 0x82950 .. 0x829d4 */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t DIMM_DISCOVERY_ERROR[0x00020];/* RO, for debugging */ +/* --------------------------------------------------------- */ + pseudo_bit_t DIMM_DISCOVERY_STAGE[0x00020];/* RO, for debugging */ +/* --------------------------------------------------------- */ + pseudo_bit_t HIDE_DDR_EN[0x00020]; /* if eq 1 then we should hide the ddr */ +/* --------------------------------------------------------- */ + pseudo_bit_t Hidden_ddr_lsb[0x00020];/* 32 lsb bits of hidden ddr address */ +/* --------------------------------------------------------- */ + pseudo_bit_t Hidden_ddr_msb[0x00020];/* 32 msb bits of hidden ddr address */ +/* --------------------------------------------------------- */ + pseudo_bit_t DEBUG[5][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct EQN_MAP_st { /* Little Endian */ + pseudo_bit_t eqn4type_3[0x00008]; + pseudo_bit_t eqn4type_2[0x00008]; + pseudo_bit_t eqn4type_1[0x00008]; + pseudo_bit_t eqn4type_0[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t eqn4type_7[0x00008]; + pseudo_bit_t eqn4type_6[0x00008]; + pseudo_bit_t eqn4type_5[0x00008]; + pseudo_bit_t eqn4type_4[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t eqn4type_B[0x00008]; + pseudo_bit_t eqn4type_A[0x00008]; + pseudo_bit_t eqn4type_9[0x00008]; + pseudo_bit_t eqn4type_8[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t eqn4type_F[0x00008]; + pseudo_bit_t eqn4type_E[0x00008]; + pseudo_bit_t eqn4type_D[0x00008]; + pseudo_bit_t eqn4type_C[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t eqn4type_13[0x00008]; + pseudo_bit_t eqn4type_12[0x00008]; + pseudo_bit_t eqn4type_11[0x00008]; + pseudo_bit_t eqn4type_10[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t eqn4type_17[0x00008]; + pseudo_bit_t eqn4type_16[0x00008]; + pseudo_bit_t eqn4type_15[0x00008]; + pseudo_bit_t eqn4type_14[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t eqn4type_1B[0x00008]; + pseudo_bit_t eqn4type_1A[0x00008]; + pseudo_bit_t eqn4type_19[0x00008]; + pseudo_bit_t eqn4type_18[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t eqn4type_1F[0x00008]; + pseudo_bit_t eqn4type_1E[0x00008]; + pseudo_bit_t eqn4type_1D[0x00008]; + pseudo_bit_t eqn4type_1C[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct INIT_HCA_st { /* Little Endian */ + pseudo_bit_t eqpc_base_addr_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t eqpc_base_addr_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t eeec_base_addr_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t eeec_base_addr_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t eqc_base_addr_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t log_num_eq[0x00004]; + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t eqc_base_addr_l[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t mc_base_addr_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mc_base_addr_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t log_mc_table_entry_sz[0x00010]; + pseudo_bit_t log_mc_table_sz[0x00005]; + pseudo_bit_t mc_hash_fn[0x00003]; + pseudo_bit_t reserved1[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t mc_table_hash_sz[0x00011]; + pseudo_bit_t reserved2[0x0000f]; +/* --------------------------------------------------------- */ + pseudo_bit_t mtt_segment_sz[0x00003];/* Actual SegmentSize in bytes: 2^(6+mtt_segment_sz) */ + pseudo_bit_t pfto[0x00005]; /* PageFault TimeOut for RNR NAK */ + pseudo_bit_t reserved3[0x00012]; + pseudo_bit_t mtt_base_addr_l_31_26[0x00006]; +/* --------------------------------------------------------- */ + pseudo_bit_t uar_scratch_base_addr_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t uar_scratch_base_addr_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t responder_exu[0x00010];/* how many exu engines are responder (0x0 => Auto(8)) */ + pseudo_bit_t wqe_quota[0x00010]; /* Max WQEs to execute per arbitration */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct FW_VERSION_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00010]; + pseudo_bit_t MAJOR[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t SUBMINOR[0x00010]; + pseudo_bit_t MINOR[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t Hour[0x00008]; + pseudo_bit_t Minutes[0x00008]; + pseudo_bit_t Seconds[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t Day[0x00008]; + pseudo_bit_t Month[0x00008]; + pseudo_bit_t Year[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct FW_TRACE_st { /* Little Endian */ + pseudo_bit_t FW_TRACE_MASK_H[0x00020];/* High mask. Unused. */ +/* --------------------------------------------------------- */ + struct FW_TRACE_MASK_L_st FW_TRACE_MASK_L; +/* --------------------------------------------------------- */ + pseudo_bit_t buf_trace_sz[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t EXU0_addr[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t EXU1_addr[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t TCU_addr[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t TPT_addr[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t QPC_addr[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t NTU_addr[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* SPD header (#Byte Function [Notes]) */ + +struct SPDH_st { /* Little Endian */ + pseudo_bit_t NMANB[0x00008]; /* 0 Number of Serial PD Bytes written during module production[1] */ + pseudo_bit_t TSPDB[0x00008]; /* 1 Total number of Bytes in Serial PD device[2] */ + pseudo_bit_t TYPE[0x00008]; /* 2 Fundamental Memory Type (FPM, EDO, SDRAM &) */ + struct NALEN_st RALEN; /* 3 Number of Row Addresses on this assembly */ +/* --------------------------------------------------------- */ + struct NALEN_st CALEN; /* 4 Number of Column Addresses on this assembly */ + pseudo_bit_t NDIMMS[0x00008]; /* 5 Number of Physical Banks on DIMM */ + pseudo_bit_t DWIDTH[0x00010]; /* 6-7 Data Width of this assembly */ +/* --------------------------------------------------------- */ + pseudo_bit_t VOLTL[0x00008]; /* 8 Voltage Interface Level of this assembly */ + pseudo_bit_t TCKCLX[0x00008]; /* 9 SDRAM Device Cycle time (t CK) at Maximum Supported CAS Latency (CL), CL=X[3] */ + pseudo_bit_t TACMAX[0x00008]; /* 10 SDRAM Device Access from Clock (t AC) at CLX */ + pseudo_bit_t CTYPE[0x00008]; /* 11 DIMM configuration type (Non-parity, Parity or ECC) */ +/* --------------------------------------------------------- */ + struct REFRESH_st REFRESH; /* 12 Refresh Rate/Type [3,4] */ + struct WIDTH_st DSDRAMW; /* 13 Data SDRAM Width */ + struct WIDTH_st ESDRAMW; /* 14 Error Checking SDRAM Width */ + pseudo_bit_t MINB2BC[0x00008]; /* 15 SDRAM Device Attributes: Minimum Clock Delay, Back-to-Back Random Column Access */ +/* --------------------------------------------------------- */ + pseudo_bit_t BURSTLN[0x00008]; /* 16 SDRAM Device Attributes: Burst Lengths Supported */ + pseudo_bit_t NBANKS[0x00008]; /* 17 SDRAM Device Attributes: Number of Banks on SDRAM Device[3] */ + struct CASLAT_st CL; /* 18 SDRAM Device Attributes: CAS Latency[3] */ + pseudo_bit_t CSLAT[0x00008]; /* 19 SDRAM Device Attributes: Chip Select Latency[3] */ +/* --------------------------------------------------------- */ + pseudo_bit_t WRLAT[0x00008]; /* 20 SDRAM Device Attributes: Write Latency[3] */ + struct MODATTR_st MODATTR; /* 21 SDRAM Module Attributes */ + struct DEVATTR_st DEVATTR; /* 22 SDRAM Device Attributes: General[3] */ + pseudo_bit_t TCK0_5[0x00008]; /* 23 SDRAM Device Minimum Clock Cycle at CLX-0.5[3] */ +/* --------------------------------------------------------- */ + pseudo_bit_t TAC0_5[0x00008]; /* 24 SDRAM Device Maximum Data Access Time (t AC ) from Clock at CLX-0.5[3] */ + pseudo_bit_t TCK1_0[0x00008]; /* 25 SDRAM Device Minimum Clock Cycle at CLX-1[3] */ + pseudo_bit_t TAC1_0[0x00008]; /* 26 SDRAM Device Maximum Data Access Time (t AC ) from Clock at CLX-1[3] */ + pseudo_bit_t TRP[0x00008]; /* 27 SDRAM Device Minimum Row Precharge Time (t RP )[3] */ +/* --------------------------------------------------------- */ + pseudo_bit_t TRRD[0x00008]; /* 28 SDRAM Device Minimum Row Active to Row Active Delay (t RRD )[3] */ + pseudo_bit_t TRCD[0x00008]; /* 29 SDRAM Device Minimum RAS to CAS Delay (t RCD )[3] */ + pseudo_bit_t TRAS[0x00008]; /* 30 SDRAM Device Minimum Active to Precharge Time (t RAS )[3] */ + pseudo_bit_t DENSITY[0x00008]; /* 31 Module Bank Density */ +/* --------------------------------------------------------- */ + pseudo_bit_t STPAC[0x00008]; /* 32 Address and Command Input Setup Time Before Clock[3] */ + pseudo_bit_t HLDAC[0x00008]; /* 33 Address and Command Input Hold Time After Clock[3] */ + pseudo_bit_t STPMSK[0x00008]; /* 34 SDRAM Device Data/Data Mask Input Setup Time Before Data Strobe[3] */ + pseudo_bit_t HLDMSK[0x00008]; /* 35 SDRAM Device Data/Data Mask Input Hold Time After Data Strobe[3] */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t TRC[0x00008]; /* 41 Minimum Active/Auto-Refresh Time (t RC )[3] */ + pseudo_bit_t TRFC[0x00008]; /* 42 SDRAM Device Minimum Auto-Refresh to Active/Auto-Refresh Command Period (t RFC ) [3] */ + pseudo_bit_t TCKMAX[0x00008]; /* 43 SDRAM Device Maximum Cycle Time (t CK max)[3] */ +/* --------------------------------------------------------- */ + pseudo_bit_t TDQSQ[0x00008]; /* 44 SDRAM Device Maximum DQS-DQ Skew Time (t DQSQ )[3] */ + pseudo_bit_t TQHS[0x00008]; /* 45 SDRAM Device Maximum Read Data Hold Skew Factor (t QHS )[3] */ + pseudo_bit_t reserved2[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00060]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00010]; + pseudo_bit_t REV[0x00008]; /* 62 SPD Revision */ + pseudo_bit_t CHKSUM[0x00008]; /* 63 Checksum for Bytes 0-62 + Process for Calculating the Checksum + 1. Convert binary information, in byte locations 0 - 62, to decimal. + 2. Add together (sum) all decimal values for addresses 0 - 6 + 2. + 3. Divide sum by 25 + 6. + 4. Convert remainder to binary (will be less than 256). + 5. Store result (single byte) in address 63 as Checksum. + Note: The same result can be obtained by adding the binary values in addresses 0 - 62 and eliminating all but the low order byte. + The low order byte is the Checksum. */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct GUID_INFO_st { /* Little Endian */ + pseudo_bit_t NODE_GUID_INFO_H[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t NODE_GUID_INFO_L[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t PORT1_GUID_INFO_H[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t PORT1_GUID_INFO_L[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t PORT2_GUID_INFO_H[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t PORT2_GUID_INFO_L[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SYSTEM_GUID_INFO_H[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SYSTEM_GUID_INFO_L[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t parttion_cap[0x00010]; + pseudo_bit_t reserved0[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* Transport and CI Error Counters */ + +struct Transport_and_CI_Error_Counters_st { /* Little Endian */ + pseudo_bit_t rq_num_lle[0x00020]; /* Responder - number of local length errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_lle[0x00020]; /* Requester - number of local length errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t rq_num_lqpoe[0x00020]; /* Responder - number local QP operation error */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_lqpoe[0x00020]; /* Requester - number local QP operation error */ +/* --------------------------------------------------------- */ + pseudo_bit_t rq_num_leeoe[0x00020]; /* Responder - number local EE operation error */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_leeoe[0x00020]; /* Requester - number local EE operation error */ +/* --------------------------------------------------------- */ + pseudo_bit_t rq_num_lpe[0x00020]; /* Responder - number of local protection errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_lpe[0x00020]; /* Requester - number of local protection errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t rq_num_wrfe[0x00020]; /* Responder - number of WR flushed errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_wrfe[0x00020]; /* Requester - number of WR flushed errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_mwbe[0x00020]; /* Requester - number of memory window bind errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_bre[0x00020]; /* Requester - number of bad response errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t rq_num_lae[0x00020]; /* Responder - number of local access errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_rire[0x00020]; /* Responder - number of remote invalid request errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_rae[0x00020]; /* Requester - number of remote access errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_roe[0x00020]; /* Requester - number of remote operation errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_tree[0x00020]; /* Requester - number of transport retries exceeded errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_rree[0x00020]; /* Requester - number of RNR nak retries exceeded errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_lrdve[0x00020]; /* Requester - number of local RDD violation errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t rq_num_rirdre[0x00020];/* Responder - number of remote invalid RD request errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_rabrte[0x00020];/* Requester - number of remote aborted errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_ieecne[0x00020];/* Requester - number of invalid EE context number errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_ieecse[0x00020];/* Requester - invalid EE context state errors */ +/* --------------------------------------------------------- */ + pseudo_bit_t rq_num_oos[0x00020]; /* Responder - number of out of sequence requests received */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_oos[0x00020]; /* Requester - number of out of sequence Naks received */ +/* --------------------------------------------------------- */ + pseudo_bit_t rq_num_mce[0x00020]; /* Responder - number of bad multicast packets received */ +/* --------------------------------------------------------- */ + pseudo_bit_t rq_num_rsync[0x00020]; /* Responder - number of RESYNC operations */ +/* --------------------------------------------------------- */ + pseudo_bit_t sq_num_rsync[0x00020]; /* Requester - number of RESYNC operations */ +/* --------------------------------------------------------- */ + pseudo_bit_t num_cqovf[0x00020]; /* Number of CQ overflows */ +/* --------------------------------------------------------- */ + pseudo_bit_t num_eqovf[0x00020]; /* Number of EQ overflows */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct MSIX_st { /* Little Endian */ + struct MSIX_TableEntry_st MSIX_TableEntry[32]; +/* --------------------------------------------------------- */ + pseudo_bit_t MSIX_PendingBits[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct TPTGW_DATA_st { /* Little Endian */ + pseudo_bit_t miss_memkey[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss_opcode[0x00008]; + pseudo_bit_t miss_pd[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss_token[0x00010]; + pseudo_bit_t miss_flags[0x00008]; /* PXL,HXL,CPB,POK,AOK,LOK,PRW,HRW */ + pseudo_bit_t miss_page_size[0x00005]; + pseudo_bit_t reserved0[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss_va_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss_va_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss_length[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss_lkey[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mpt_cache_lkey[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mpt_cache_va_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mpt_cache_va_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mpt_cache_len_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mpt_cache_len_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mpt_cache_pd[0x00018]; + pseudo_bit_t mpt_cache_page_size[0x00005]; + pseudo_bit_t reserved1[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t mpt_cache_acl[0x00005]; + pseudo_bit_t reserved2[0x00002]; + pseudo_bit_t mpt_cache_memkey_tag[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x0000c]; + pseudo_bit_t mpt_cache_mtt_20bit_ptr[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t mpt_cache_phy[0x00001]; + pseudo_bit_t mpt_cache_lock[0x00001]; + pseudo_bit_t mpt_cache_valid[0x00001]; + pseudo_bit_t mpt_cache_nsvl[0x00001]; + pseudo_bit_t mpt_cache_iomem_[0x00001]; + pseudo_bit_t reserved4[0x0000b]; + pseudo_bit_t mpt_cache_hit_vec[0x00004]; + pseudo_bit_t mpt_cache_valid_vec[0x00004]; + pseudo_bit_t mpt_cache_lock_vec[0x00004]; + pseudo_bit_t reserved5[0x00004]; +/* --------------------------------------------------------- */ + pseudo_bit_t prot_mtt_offset_51_20[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t prot_len_ok[0x00001]; + pseudo_bit_t prot_acl_ok[0x00001]; + pseudo_bit_t prot_pd_ok[0x00001]; + pseudo_bit_t prot_page_crossed[0x00001]; + pseudo_bit_t reserved6[0x00008]; + pseudo_bit_t prot_mtt_offset_19_0[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mtt_cache_lkey[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mtt_cache_va_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mtt_cache_va_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mtt_cache_pa_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mtt_cache_pa_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t mtt_cache_hit_vec[0x00004]; + pseudo_bit_t mtt_cache_valid_vec[0x00004]; + pseudo_bit_t mtt_cache_lock_vec[0x00004]; + pseudo_bit_t reserved9[0x00004]; + pseudo_bit_t mtt_cache_ent_valid[0x00001]; + pseudo_bit_t mtt_cache_ent_locked[0x00001]; + pseudo_bit_t reserved10[0x0000e]; +/* --------------------------------------------------------- */ + pseudo_bit_t xmit_pa_h[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t xmit_pa_l[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t xmit_token[0x00010]; + pseudo_bit_t reserved11[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t xmit_length[0x0000d]; + pseudo_bit_t reserved12[0x00003]; + pseudo_bit_t xmit_ns_destid[0x00003]; + pseudo_bit_t xmit_ns_lane[0x00002]; /* posted 1, nonposted 2 , response ???? */ + pseudo_bit_t xmit_nsvl[0x00001]; + pseudo_bit_t xmit_iomem_[0x00001]; + pseudo_bit_t reserved13[0x00001]; + pseudo_bit_t xmit_xstat[0x00003]; + pseudo_bit_t reserved14[0x00005]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct TPTGW_CTRL_st { /* Little Endian */ + pseudo_bit_t address[0x00008]; /* address (set) in caches. (in MTT , when accessing NOT thru Associativity MUX */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t way[0x00003]; /* 0x4 = random way, 0x7 = ALL ways (e.g. when doing InValidate) */ + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t thru_ass_mux[0x00001]; /* Read/Write MTT are done thru associativity MUX */ + pseudo_bit_t mtt_mpt_[0x00001]; /* '0': access MPT cache, '1': access MTT cache */ + pseudo_bit_t cmd[0x00006]; /* cache WRITE 0x01 + cache READ 0x02 + cache PROB 0x03 + miss fifo POP 0x04 + miss fifo READ 0x05 + miss fifo PUSH 0x06 + EXECUTE 0x07 + XMIT 0x08 + tmx CLOSE 0x09 + tmx OPEN 0x0a + protection PROB 0x0b + tmx OPEN debug 0x0c */ + pseudo_bit_t busy[0x00001]; + pseudo_bit_t lock[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* TCU Exception Buffer */ + +struct TCUEB_st { /* Little Endian */ + pseudo_bit_t ebw0_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_9[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_9[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_9[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_9[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_10[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_10[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_10[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_10[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_11[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_11[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_11[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_11[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_12[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_12[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_12[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_12[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_13[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_13[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_13[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_13[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_14[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_14[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_14[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_14[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_15[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_15[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_15[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_15[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_16[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_16[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_16[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_16[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_17[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_17[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_17[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_17[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_18[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_18[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_18[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_18[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_19[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_19[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_19[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_19[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_20[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_20[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_20[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_20[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_21[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_21[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_21[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_21[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_22[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_22[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_22[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_22[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_23[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_23[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_23[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_23[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_24[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_24[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_24[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_24[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_25[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_25[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_25[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_25[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_26[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_26[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_26[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_26[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_27[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_27[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_27[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_27[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_28[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_28[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_28[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_28[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_29[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_29[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_29[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_29[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_30[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_30[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_30[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_30[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_31[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_31[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_31[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_31[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_33[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_33[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_33[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_33[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_34[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_34[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_34[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_34[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_35[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_35[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_35[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_35[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_36[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_36[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_36[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_36[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_37[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_37[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_37[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_37[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_38[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_38[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_38[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_38[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_39[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_39[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_39[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_39[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_40[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_40[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_40[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_40[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_41[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_41[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_41[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_41[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_42[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_42[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_42[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_42[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_43[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_43[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_43[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_43[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_44[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_44[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_44[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_44[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_45[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_45[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_45[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_45[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_46[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_46[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_46[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_46[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_47[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_47[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_47[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_47[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_48[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_48[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_48[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_48[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_49[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_49[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_49[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_49[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_50[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_50[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_50[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_50[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_51[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_51[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_51[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_51[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_52[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_52[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_52[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_52[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_53[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_53[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_53[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_53[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_54[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_54[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_54[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_54[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_55[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_55[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_55[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_55[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_56[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_56[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_56[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_56[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_57[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_57[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_57[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_57[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_58[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_58[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_58[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_58[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_59[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_59[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_59[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_59[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_60[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_60[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_60[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_60[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_61[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_61[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_61[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_61[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_62[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_62[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_62[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_62[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_63[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_63[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_63[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_63[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_64[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_64[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_64[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_64[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_65[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_65[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_65[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_65[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_66[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_66[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_66[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_66[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_67[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_67[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_67[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_67[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_68[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_68[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_68[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_68[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_69[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_69[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_69[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_69[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_70[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_70[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_70[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_70[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_71[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_71[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_71[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_71[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_72[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_72[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_72[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_72[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_73[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_73[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_73[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_73[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_74[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_74[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_74[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_74[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_75[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_75[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_75[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_75[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_76[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_76[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_76[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_76[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_77[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_77[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_77[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_77[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_78[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_78[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_78[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_78[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_79[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_79[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_79[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_79[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_80[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_80[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_80[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_80[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_81[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_81[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_81[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_81[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_82[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_82[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_82[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_82[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_83[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_83[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_83[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_83[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_84[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_84[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_84[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_84[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_85[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_85[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_85[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_85[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_86[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_86[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_86[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_86[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_87[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_87[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_87[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_87[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_88[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_88[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_88[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_88[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_89[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_89[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_89[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_89[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_90[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_90[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_90[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_90[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_91[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_91[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_91[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_91[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_92[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_92[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_92[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_92[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_93[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_93[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_93[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_93[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_94[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_94[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_94[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_94[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_95[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_95[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_95[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_95[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_96[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_96[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_96[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_96[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_97[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_97[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_97[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_97[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_98[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_98[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_98[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_98[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_99[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_99[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_99[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_99[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_100[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_100[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_100[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_100[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_101[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_101[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_101[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_101[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_102[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_102[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_102[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_102[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_103[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_103[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_103[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_103[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_104[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_104[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_104[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_104[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_105[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_105[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_105[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_105[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_106[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_106[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_106[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_106[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_107[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_107[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_107[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_107[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_108[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_108[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_108[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_108[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_109[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_109[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_109[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_109[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_110[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_110[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_110[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_110[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_111[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_111[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_111[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_111[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_112[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_112[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_112[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_112[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_113[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_113[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_113[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_113[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_114[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_114[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_114[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_114[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_115[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_115[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_115[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_115[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_116[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_116[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_116[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_116[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_117[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_117[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_117[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_117[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_118[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_118[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_118[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_118[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_119[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_119[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_119[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_119[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_120[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_120[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_120[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_120[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_121[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_121[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_121[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_121[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_122[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_122[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_122[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_122[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_123[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_123[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_123[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_123[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_124[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_124[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_124[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_124[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_125[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_125[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_125[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_125[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_126[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_126[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_126[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_126[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_127[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_127[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_127[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_127[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_128[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_128[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_128[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_128[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_129[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_129[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_129[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_129[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_130[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_130[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_130[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_130[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_131[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_131[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_131[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_131[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_132[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_132[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_132[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_132[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_133[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_133[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_133[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_133[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_134[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_134[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_134[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_134[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_135[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_135[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_135[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_135[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_136[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_136[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_136[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_136[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_137[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_137[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_137[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_137[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_138[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_138[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_138[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_138[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_139[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_139[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_139[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_139[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_140[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_140[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_140[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_140[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_141[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_141[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_141[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_141[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_142[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_142[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_142[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_142[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw0_143[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw1_143[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw2_143[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ebw3_143[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x03800]; +/* --------------------------------------------------------- */ +}; + +/* TCU Exception Buffer FIFO Controller */ + +struct TCUEBCTL_st { /* Little Endian */ + pseudo_bit_t eb_write_ptr[0x00008]; /* eb write pointer */ + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t eb_read_ptr[0x00008]; /* eb write poiner */ + pseudo_bit_t reserved1[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* @TCU_BIST */ + +struct TCU_BIST_st { /* Little Endian */ + pseudo_bit_t tcu_bist0_rdw_0[0x00007];/* 131 */ + pseudo_bit_t reserved0[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tcu_bist0_rdw_1[0x00007];/* 131 */ + pseudo_bit_t reserved1[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tcu_bist2_wdw_0[0x00006];/* 131 */ + pseudo_bit_t reserved2[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t tcu_bist2_wdw_1[0x00006];/* 131 */ + pseudo_bit_t reserved3[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t tcu_bist2_wdw_2[0x00006];/* 132 */ + pseudo_bit_t reserved4[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t tcu_bist2_wdw_3[0x00006];/* 132 */ + pseudo_bit_t reserved5[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t tcu_bist3_wdw_0[0x00007];/* 132 */ + pseudo_bit_t reserved6[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tcu_bist3_wdw_1[0x00007];/* 132 */ + pseudo_bit_t reserved7[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data_rdw_0[0x00006];/* 160 */ + pseudo_bit_t reserved8[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data_rdw_1[0x00006];/* 160 */ + pseudo_bit_t reserved9[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_code_rdw_0[0x00006];/* 160 */ + pseudo_bit_t reserved10[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_code_rdw_1[0x00006];/* 160 */ + pseudo_bit_t reserved11[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat[0x0000c]; + pseudo_bit_t reserved12[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat[0x0000d]; + pseudo_bit_t reserved13[0x00013]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat[0x00017]; + pseudo_bit_t reserved14[0x00009]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00220]; +/* --------------------------------------------------------- */ +}; + +/* TCU Packet Checks Debug */ + +struct TCUTCDEBUG_st { /* Little Endian */ + struct TCUCLIDEBUG_st tcuclidebug; /* tcucli debug */ +/* --------------------------------------------------------- */ + struct TCUTCQP2EEDEBUG_st tcutcqp2eedebug;/* tcutcqp2ee debug */ +/* --------------------------------------------------------- */ + struct TCUTCCHKER_st tcutcchker; /* tcutcchker debug */ +/* --------------------------------------------------------- */ + struct TCUTCCHKDI_st tcutcchkdi; /* tcutcchkdi debug */ +/* --------------------------------------------------------- */ + struct TCUTCHEADERDEBUG_st tcutcheaderdebug;/* tcutcheader debug */ +/* --------------------------------------------------------- */ + struct TCUTCQPCRDDEBUG_st tcutcqpcrddebug;/* tcutcqpc debug */ +/* --------------------------------------------------------- */ + struct TCUTCQPCWRDEBUG_st tcutcqpcwrdebug;/* tcutcqpcwr debug */ +/* --------------------------------------------------------- */ + struct TCUDICMDDEBUG_st tcudicmddebug;/* tcudipyld debug */ +/* --------------------------------------------------------- */ + struct TCUDIPYLDDEBUG_st tcudipylddebug;/* tcudipyld debug */ +/* --------------------------------------------------------- */ + struct TCUPBDEBUG_st tcupbdebug; /* tcu packet buffer debug */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Fence Mechanism */ + +struct CEFENCE_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00018]; + pseudo_bit_t Command[0x00006]; /* Command + + 0x00 Reserved + 0x01 Copy incount to limitcount + 0x02 Do not copy incount to limitcount + 0x03-0x3F Reserved */ + pseudo_bit_t b[0x00001]; /* Busy - When sw sets this bit the command in the command field is executed by hardware. */ + pseudo_bit_t s[0x00001]; /* Semaphore - When read if it was zero it is set to one and zero is returned. */ +/* --------------------------------------------------------- */ + pseudo_bit_t incount[0x00020]; /* Input Counter, incremented for every entry that enters the pipe */ +/* --------------------------------------------------------- */ + pseudo_bit_t outcount[0x00020]; /* Output Counter - Incremented for every entry that exits the pipe. */ +/* --------------------------------------------------------- */ + pseudo_bit_t apm_req_qpn[0x00020]; /* TCU pass QPN for APM request */ +/* --------------------------------------------------------- */ +}; + +/* Fence Mechanism */ + +struct FENCE_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00018]; + pseudo_bit_t Command[0x00006]; /* Command + + 0x00 Reserved + 0x01 Copy incount to limitcount + 0x02 Do not copy incount to limitcount + 0x03-0x3F Reserved */ + pseudo_bit_t b[0x00001]; /* Busy - When sw sets this bit the command in the command field is executed by hardware. */ + pseudo_bit_t s[0x00001]; /* Semaphore - When read if it was zero it is set to one and zero is returned. */ +/* --------------------------------------------------------- */ + pseudo_bit_t incount[0x00020]; /* Input Counter, incremented for every entry that enters the pipe */ +/* --------------------------------------------------------- */ + pseudo_bit_t outcount[0x00020]; /* Output Counter - Incremented for every entry that exits the pipe. */ +/* --------------------------------------------------------- */ + pseudo_bit_t limitcount[0x00020]; /* Limit Count - If b bit is set, when outcount reaches the value in limitcount the b bit is cleared. */ +/* --------------------------------------------------------- */ +}; + +/* Cause Registers */ + +struct TCU_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t pb_parity_err[0x00001]; + pseudo_bit_t eb_parity_err[0x00001]; + pseudo_bit_t ce_parity_err[0x00001]; + pseudo_bit_t tcu_crtimeout_occurred[0x00001]; + pseudo_bit_t irisc_nsw_error[0x00001]; + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ + struct EXT_CAUSEREG_st extended_cause; +/* --------------------------------------------------------- */ +}; + +/* Partition Keys (P_Key) Table */ + +struct PKEYTABLE_st { /* Little Endian */ + pseudo_bit_t pkey0[0x00010]; + pseudo_bit_t reserved0[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey1[0x00010]; + pseudo_bit_t reserved1[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey2[0x00010]; + pseudo_bit_t reserved2[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey3[0x00010]; + pseudo_bit_t reserved3[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey4[0x00010]; + pseudo_bit_t reserved4[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey5[0x00010]; + pseudo_bit_t reserved5[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey6[0x00010]; + pseudo_bit_t reserved6[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey7[0x00010]; + pseudo_bit_t reserved7[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey8[0x00010]; + pseudo_bit_t reserved8[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey9[0x00010]; + pseudo_bit_t reserved9[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey10[0x00010]; + pseudo_bit_t reserved10[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey11[0x00010]; + pseudo_bit_t reserved11[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey12[0x00010]; + pseudo_bit_t reserved12[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey13[0x00010]; + pseudo_bit_t reserved13[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey14[0x00010]; + pseudo_bit_t reserved14[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey15[0x00010]; + pseudo_bit_t reserved15[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey16[0x00010]; + pseudo_bit_t reserved16[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey17[0x00010]; + pseudo_bit_t reserved17[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey18[0x00010]; + pseudo_bit_t reserved18[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey19[0x00010]; + pseudo_bit_t reserved19[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey20[0x00010]; + pseudo_bit_t reserved20[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey21[0x00010]; + pseudo_bit_t reserved21[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey22[0x00010]; + pseudo_bit_t reserved22[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey23[0x00010]; + pseudo_bit_t reserved23[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey24[0x00010]; + pseudo_bit_t reserved24[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey25[0x00010]; + pseudo_bit_t reserved25[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey26[0x00010]; + pseudo_bit_t reserved26[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey27[0x00010]; + pseudo_bit_t reserved27[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey28[0x00010]; + pseudo_bit_t reserved28[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey29[0x00010]; + pseudo_bit_t reserved29[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey30[0x00010]; + pseudo_bit_t reserved30[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey31[0x00010]; + pseudo_bit_t reserved31[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey32[0x00010]; + pseudo_bit_t reserved32[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey33[0x00010]; + pseudo_bit_t reserved33[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey34[0x00010]; + pseudo_bit_t reserved34[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey35[0x00010]; + pseudo_bit_t reserved35[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey36[0x00010]; + pseudo_bit_t reserved36[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey37[0x00010]; + pseudo_bit_t reserved37[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey38[0x00010]; + pseudo_bit_t reserved38[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey39[0x00010]; + pseudo_bit_t reserved39[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey40[0x00010]; + pseudo_bit_t reserved40[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey41[0x00010]; + pseudo_bit_t reserved41[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey42[0x00010]; + pseudo_bit_t reserved42[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey43[0x00010]; + pseudo_bit_t reserved43[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey44[0x00010]; + pseudo_bit_t reserved44[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey45[0x00010]; + pseudo_bit_t reserved45[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey46[0x00010]; + pseudo_bit_t reserved46[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey47[0x00010]; + pseudo_bit_t reserved47[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey48[0x00010]; + pseudo_bit_t reserved48[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey49[0x00010]; + pseudo_bit_t reserved49[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey50[0x00010]; + pseudo_bit_t reserved50[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey51[0x00010]; + pseudo_bit_t reserved51[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey52[0x00010]; + pseudo_bit_t reserved52[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey53[0x00010]; + pseudo_bit_t reserved53[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey54[0x00010]; + pseudo_bit_t reserved54[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey55[0x00010]; + pseudo_bit_t reserved55[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey56[0x00010]; + pseudo_bit_t reserved56[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey57[0x00010]; + pseudo_bit_t reserved57[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey58[0x00010]; + pseudo_bit_t reserved58[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey59[0x00010]; + pseudo_bit_t reserved59[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey60[0x00010]; + pseudo_bit_t reserved60[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey61[0x00010]; + pseudo_bit_t reserved61[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey62[0x00010]; + pseudo_bit_t reserved62[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey63[0x00010]; + pseudo_bit_t reserved63[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey64[0x00010]; + pseudo_bit_t reserved64[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey65[0x00010]; + pseudo_bit_t reserved65[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey66[0x00010]; + pseudo_bit_t reserved66[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey67[0x00010]; + pseudo_bit_t reserved67[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey68[0x00010]; + pseudo_bit_t reserved68[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey69[0x00010]; + pseudo_bit_t reserved69[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey70[0x00010]; + pseudo_bit_t reserved70[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey71[0x00010]; + pseudo_bit_t reserved71[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey72[0x00010]; + pseudo_bit_t reserved72[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey73[0x00010]; + pseudo_bit_t reserved73[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey74[0x00010]; + pseudo_bit_t reserved74[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey75[0x00010]; + pseudo_bit_t reserved75[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey76[0x00010]; + pseudo_bit_t reserved76[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey77[0x00010]; + pseudo_bit_t reserved77[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey78[0x00010]; + pseudo_bit_t reserved78[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey79[0x00010]; + pseudo_bit_t reserved79[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey80[0x00010]; + pseudo_bit_t reserved80[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey81[0x00010]; + pseudo_bit_t reserved81[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey82[0x00010]; + pseudo_bit_t reserved82[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey83[0x00010]; + pseudo_bit_t reserved83[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey84[0x00010]; + pseudo_bit_t reserved84[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey85[0x00010]; + pseudo_bit_t reserved85[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey86[0x00010]; + pseudo_bit_t reserved86[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey87[0x00010]; + pseudo_bit_t reserved87[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey88[0x00010]; + pseudo_bit_t reserved88[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey89[0x00010]; + pseudo_bit_t reserved89[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey90[0x00010]; + pseudo_bit_t reserved90[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey91[0x00010]; + pseudo_bit_t reserved91[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey92[0x00010]; + pseudo_bit_t reserved92[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey93[0x00010]; + pseudo_bit_t reserved93[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey94[0x00010]; + pseudo_bit_t reserved94[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey95[0x00010]; + pseudo_bit_t reserved95[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey96[0x00010]; + pseudo_bit_t reserved96[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey97[0x00010]; + pseudo_bit_t reserved97[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey98[0x00010]; + pseudo_bit_t reserved98[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey99[0x00010]; + pseudo_bit_t reserved99[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey100[0x00010]; + pseudo_bit_t reserved100[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey101[0x00010]; + pseudo_bit_t reserved101[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey102[0x00010]; + pseudo_bit_t reserved102[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey103[0x00010]; + pseudo_bit_t reserved103[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey104[0x00010]; + pseudo_bit_t reserved104[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey105[0x00010]; + pseudo_bit_t reserved105[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey106[0x00010]; + pseudo_bit_t reserved106[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey107[0x00010]; + pseudo_bit_t reserved107[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey108[0x00010]; + pseudo_bit_t reserved108[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey109[0x00010]; + pseudo_bit_t reserved109[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey110[0x00010]; + pseudo_bit_t reserved110[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey111[0x00010]; + pseudo_bit_t reserved111[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey112[0x00010]; + pseudo_bit_t reserved112[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey113[0x00010]; + pseudo_bit_t reserved113[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey114[0x00010]; + pseudo_bit_t reserved114[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey115[0x00010]; + pseudo_bit_t reserved115[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey116[0x00010]; + pseudo_bit_t reserved116[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey117[0x00010]; + pseudo_bit_t reserved117[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey118[0x00010]; + pseudo_bit_t reserved118[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey119[0x00010]; + pseudo_bit_t reserved119[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey120[0x00010]; + pseudo_bit_t reserved120[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey121[0x00010]; + pseudo_bit_t reserved121[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey122[0x00010]; + pseudo_bit_t reserved122[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey123[0x00010]; + pseudo_bit_t reserved123[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey124[0x00010]; + pseudo_bit_t reserved124[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey125[0x00010]; + pseudo_bit_t reserved125[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey126[0x00010]; + pseudo_bit_t reserved126[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t pkey127[0x00010]; + pseudo_bit_t reserved127[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine Configuration */ + +struct CECFG_st { /* Little Endian */ + struct CEINFIFO_st ceinputfifo; +/* --------------------------------------------------------- */ + struct CECQC_st cecqc; +/* --------------------------------------------------------- */ + struct CELDB_st celdb; +/* --------------------------------------------------------- */ + pseudo_bit_t ce_error[0x00009]; /* Completion Engine Exception Status + + */ + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t ce_current_unit_reg[0x00002];/* current nswr fifo which cecore works with (2'b01 or 2'b10) + */ + pseudo_bit_t reserved1[0x00012]; +/* --------------------------------------------------------- */ + pseudo_bit_t irisc_release_cmd[0x00005];/* Completion Engine Exception Command + */ + pseudo_bit_t reserved2[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00040]; +/* --------------------------------------------------------- */ + struct CEGRLCFG_st cegrlcfg; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00100]; +/* --------------------------------------------------------- */ + struct TCUCEEXEGW_st ceexegw; /* Completion Engine Gateway to generate Exe entries in the CE */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00080]; +/* --------------------------------------------------------- */ + struct CEDEBUG_st cedebug; +/* --------------------------------------------------------- */ +}; + +/* TCU Multicast Cache */ + +struct TCUMCCache_st { /* Little Endian */ + struct MCCACHEGID_st mcgid00; /* multicast cache gid 00 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid01; /* multicast cache gid 01 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid02; /* multicast cache gid 02 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid03; /* multicast cache gid 03 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid04; /* multicast cache gid 04 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid05; /* multicast cache gid 05 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid06; /* multicast cache gid 06 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid07; /* multicast cache gid 07 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid08; /* multicast cache gid 08 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid09; /* multicast cache gid 09 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid10; /* multicast cache gid 10 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid11; /* multicast cache gid 11 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid12; /* multicast cache gid 12 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid13; /* multicast cache gid 13 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid14; /* multicast cache gid 14 */ +/* --------------------------------------------------------- */ + struct MCCACHEGID_st mcgid15; /* multicast cache gid 15 */ +/* --------------------------------------------------------- */ + struct MCCACHEQPN_st mcqpn; /* multicast QP numbers */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_multi_dgid_111_96[0x00010];/* multicast Constant GID bits [111:32] - bits [111:96] */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t cfg_multi_dgid_127_120[0x00008];/* multicast constand GID bits[127:120] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_multi_dgid_95_64[0x00020];/* multicast Constant GID bits [111:32] - bits [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_multi_dgid_63_32[0x00020];/* multicast Constant GID bits [111:32] - bits [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x009a0]; +/* --------------------------------------------------------- */ +}; + +/* tcu gateway */ + +struct TCUTCGW_st { /* Little Endian */ + struct TCUTCDIGW_st digw; /* tcu tc gateway controller + Setting the GW for HW is by writting: + 1. destination RDE and Command valid bit set + 2. destination CLI. + */ +/* --------------------------------------------------------- */ + struct TCUDIGWRAM_st tcudigwram; /* array of the tcu tc gateway */ +/* --------------------------------------------------------- */ +}; + +/* TCU End of Packet Trap Command */ + +struct TCUPKTMV_st { /* Little Endian */ + pseudo_bit_t erp_move[0x00004]; /* IRISC order the where to send packet with error: + TCU_CONT 0x11 + TCU_DROP 0x12 + TCU_COPY 0x14 + */ + pseudo_bit_t pkt_toeb_last[0x00001];/* indication that packet has pushed into exception buffer. When this bit is reset thew packet is in the Exception buffer */ + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ +}; + +/* QPC Access Gateway */ + +struct QPCGW_st { /* Little Endian */ + pseudo_bit_t syndrome[0x00008]; /* output: valid during gwbusy=0 after "read" and "read&then" qpc commands */ + pseudo_bit_t qpstate[0x00004]; /* output: valid during gwbusy=0 after "read" and "read&then" qpc commands. */ + pseudo_bit_t illaddr[0x00001]; /* error indication of access to illegal address in QPCGW. + output: asserted at the moment of the event. deasserted at rose(gwbusy). + In the all error cases the operation is dropped. */ + pseudo_bit_t illread[0x00001]; /* error indication of reading from any QPCGW register, other than QPCGW header, during gwbusy=1 + output: asserted at the moment of the event. deasserted at rose(gwbusy). + The read data during this operation is invalid. */ + pseudo_bit_t illwrite[0x00001]; + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t cqpc_eu[0x00005]; /* Execution unit number of EXE_CQPC. + input: must be valid during EXE_CQPC commands at gwbusy=1 */ + pseudo_bit_t cache_selector[0x00002];/* input: must be valid during gwbusy=1 + 0 - SQPC + 1 - RQPC + 2 - CQC + */ + pseudo_bit_t gwopcode[0x00006]; /* input: must be valid during gwbusy=1 + POSSIBLE OPCODES: + 0 - read from EXE_CQPC to the GW + 1 - write from the GW to EXE_CQPC + 2 - execute QPC command + */ + pseudo_bit_t gwbusy[0x00001]; /* input-output: asserted by FW, deasserted by HW + This bit also known as GO bit. */ + pseudo_bit_t gwlocked[0x00001]; /* input-output: asserted by HW, deasserted by FW */ +/* --------------------------------------------------------- */ + struct QPC_command_header_st QPC_command_header; +/* --------------------------------------------------------- */ + struct QPCLINE_st line0; +/* --------------------------------------------------------- */ + struct QPCLINE_st line1; +/* --------------------------------------------------------- */ + struct QPCLINE_st line2; +/* --------------------------------------------------------- */ + struct QPCLINE_st line3; +/* --------------------------------------------------------- */ + struct QPCLINE_st line4; +/* --------------------------------------------------------- */ + struct QPCLINE_st line5; +/* --------------------------------------------------------- */ + struct QPCLINE_st line6; +/* --------------------------------------------------------- */ + struct QPCLINE_st line7; +/* --------------------------------------------------------- */ + pseudo_bit_t BE0[0x00010]; /* Byte Enables for QPC data line 0 + + bit 15 - is for line[127:120] + bit 14 - is for line[119:112] + . + . + bit 0 - is for line[7:0] + + * This field is write only (see bug 4963) */ + pseudo_bit_t reserved1[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t BE1[0x00010]; /* Byte Enables for QPC data line 1 + + * This field is write only (see bug 4963) */ + pseudo_bit_t reserved2[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t BE2[0x00010]; /* Byte Enables for QPC data line 2 + + * This field is write only (see bug 4963) */ + pseudo_bit_t reserved3[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t BE3[0x00010]; /* Byte Enables for QPC data line 3 + + * This field is write only (see bug 4963) */ + pseudo_bit_t reserved4[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t BE4[0x00010]; /* Byte Enables for QPC data line 4 + + * This field is write only (see bug 4963) */ + pseudo_bit_t reserved5[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t BE5[0x00010]; /* Byte Enables for QPC data line 5 + + * This field is write only (see bug 4963) */ + pseudo_bit_t reserved6[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t BE6[0x00010]; /* Byte Enables for QPC data line 6 + + * This field is write only (see bug 4963) */ + pseudo_bit_t reserved7[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t BE7[0x00010]; /* Byte Enables for QPC data line 7 + + * This field is write only (see bug 4963) */ + pseudo_bit_t reserved8[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00240]; +/* --------------------------------------------------------- */ +}; + +/* Local GIDs Table */ + +struct MGIDTABLE_st { /* Little Endian */ + struct GID_st mgid0; +/* --------------------------------------------------------- */ + struct GID_st mgid1; +/* --------------------------------------------------------- */ + struct GID_st mgid2; +/* --------------------------------------------------------- */ + struct GID_st mgid3; +/* --------------------------------------------------------- */ + struct GID_st mgid4; +/* --------------------------------------------------------- */ + struct GID_st mgid5; +/* --------------------------------------------------------- */ + struct GID_st mgid6; +/* --------------------------------------------------------- */ + struct GID_st mgid7; +/* --------------------------------------------------------- */ + struct GID_st mgid8; +/* --------------------------------------------------------- */ + struct GID_st mgid9; +/* --------------------------------------------------------- */ + struct GID_st mgid10; +/* --------------------------------------------------------- */ + struct GID_st mgid11; +/* --------------------------------------------------------- */ + struct GID_st mgid12; +/* --------------------------------------------------------- */ + struct GID_st mgid13; +/* --------------------------------------------------------- */ + struct GID_st mgid14; +/* --------------------------------------------------------- */ + struct GID_st mgid15; +/* --------------------------------------------------------- */ + struct GID_st mgid16; +/* --------------------------------------------------------- */ + struct GID_st mgid17; +/* --------------------------------------------------------- */ + struct GID_st mgid18; +/* --------------------------------------------------------- */ + struct GID_st mgid19; +/* --------------------------------------------------------- */ + struct GID_st mgid20; +/* --------------------------------------------------------- */ + struct GID_st mgid21; +/* --------------------------------------------------------- */ + struct GID_st mgid22; +/* --------------------------------------------------------- */ + struct GID_st mgid23; +/* --------------------------------------------------------- */ + struct GID_st mgid24; +/* --------------------------------------------------------- */ + struct GID_st mgid25; +/* --------------------------------------------------------- */ + struct GID_st mgid26; +/* --------------------------------------------------------- */ + struct GID_st mgid27; +/* --------------------------------------------------------- */ + struct GID_st mgid28; +/* --------------------------------------------------------- */ + struct GID_st mgid29; +/* --------------------------------------------------------- */ + struct GID_st mgid30; +/* --------------------------------------------------------- */ + struct GID_st mgid31; +/* --------------------------------------------------------- */ + struct GID_st mgid32; +/* --------------------------------------------------------- */ + struct GID_st mgid33; +/* --------------------------------------------------------- */ + struct GID_st mgid34; +/* --------------------------------------------------------- */ + struct GID_st mgid35; +/* --------------------------------------------------------- */ + struct GID_st mgid36; +/* --------------------------------------------------------- */ + struct GID_st mgid37; +/* --------------------------------------------------------- */ + struct GID_st mgid38; +/* --------------------------------------------------------- */ + struct GID_st mgid39; +/* --------------------------------------------------------- */ + struct GID_st mgid40; +/* --------------------------------------------------------- */ + struct GID_st mgid41; +/* --------------------------------------------------------- */ + struct GID_st mgid42; +/* --------------------------------------------------------- */ + struct GID_st mgid43; +/* --------------------------------------------------------- */ + struct GID_st mgid44; +/* --------------------------------------------------------- */ + struct GID_st mgid45; +/* --------------------------------------------------------- */ + struct GID_st mgid46; +/* --------------------------------------------------------- */ + struct GID_st mgid47; +/* --------------------------------------------------------- */ + struct GID_st mgid48; +/* --------------------------------------------------------- */ + struct GID_st mgid49; +/* --------------------------------------------------------- */ + struct GID_st mgid50; +/* --------------------------------------------------------- */ + struct GID_st mgid51; +/* --------------------------------------------------------- */ + struct GID_st mgid52; +/* --------------------------------------------------------- */ + struct GID_st mgid53; +/* --------------------------------------------------------- */ + struct GID_st mgid54; +/* --------------------------------------------------------- */ + struct GID_st mgid55; +/* --------------------------------------------------------- */ + struct GID_st mgid56; +/* --------------------------------------------------------- */ + struct GID_st mgid57; +/* --------------------------------------------------------- */ + struct GID_st mgid58; +/* --------------------------------------------------------- */ + struct GID_st mgid59; +/* --------------------------------------------------------- */ + struct GID_st mgid60; +/* --------------------------------------------------------- */ + struct GID_st mgid61; +/* --------------------------------------------------------- */ + struct GID_st mgid62; +/* --------------------------------------------------------- */ + struct GID_st mgid63; +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet Context Data Read from QPC */ + +struct TCUTCQPC_st { /* Little Endian */ + pseudo_bit_t migreq[0x00001]; /* migreq data from QPC */ + pseudo_bit_t ts[0x00003]; /* trasport service from QPC */ + pseudo_bit_t exqp[0x00001]; /* exception QP from QPC */ + pseudo_bit_t rd[0x00001]; /* read bit from QPC */ + pseudo_bit_t wr[0x00001]; /* write bit from QPC */ + pseudo_bit_t at[0x00001]; /* atomic bit from QPC */ + pseudo_bit_t grhen[0x00001]; /* GRH enable bit from QPC */ + pseudo_bit_t mylid[0x00007]; /* my LID from QPC */ + pseudo_bit_t rmtlid[0x00010]; /* remote LID from QPC */ +/* --------------------------------------------------------- */ + pseudo_bit_t rmtgid_127_96[0x00020];/* remote GID in QPC */ +/* --------------------------------------------------------- */ + pseudo_bit_t rmtgid_95_64[0x00020]; /* remote GID in QPC */ +/* --------------------------------------------------------- */ + pseudo_bit_t rmtgid_63_32[0x00020]; /* remote GID in QPC */ +/* --------------------------------------------------------- */ + pseudo_bit_t rmtgid_31_0[0x00020]; /* remote GID in QPC */ +/* --------------------------------------------------------- */ + pseudo_bit_t errpsn[0x00018]; /* expected read response PSN from QPC */ + pseudo_bit_t errpsn_v[0x00001]; /* excpected read response PSN valid bit from QPC */ + pseudo_bit_t no_err_psn_lock[0x00001];/* no expected PSN lock bit from QPC */ + pseudo_bit_t rdatm[0x00001]; /* read_atomic_ bit from QPC. */ + pseudo_bit_t tculck[0x00001]; /* tcu lock bit from QPC (for Read Response/ Atomic lock) */ + pseudo_bit_t reserved0[0x00004]; +/* --------------------------------------------------------- */ + pseudo_bit_t eesqpn[0x00018]; /* EE source QP number in QPC */ + pseudo_bit_t pkey_idx[0x00008]; /* P key index in QPC */ +/* --------------------------------------------------------- */ + pseudo_bit_t eedqpn[0x00018]; /* EE destination QP number in QPC */ + pseudo_bit_t mygid[0x00008]; /* my GID in QPC */ +/* --------------------------------------------------------- */ + pseudo_bit_t epsn[0x00018]; /* expected PSN */ + pseudo_bit_t opc_last[0x00002]; /* Opcoe last field in QPC */ + pseudo_bit_t rsvd_opc_last[0x00006];/* the reset of the bits in the byte where opcode last field is. */ +/* --------------------------------------------------------- */ + pseudo_bit_t highestpsn[0x00018]; /* highest psn fiels. */ + pseudo_bit_t portid[0x00004]; /* port number field in QPC */ + pseudo_bit_t mtu[0x00003]; /* MTU field in QPC */ + pseudo_bit_t reserved1[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t onapsn[0x00018]; /* Oldest non acked PSN */ + pseudo_bit_t reserved2[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t lstackreqpsn[0x00018]; /* last ackreq PSN */ + pseudo_bit_t reserved3[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t qkey[0x00020]; /* Q key */ +/* --------------------------------------------------------- */ + pseudo_bit_t e2ecredits[0x00010]; /* e2e credits field in QPC */ + pseudo_bit_t reserved4[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00140]; +/* --------------------------------------------------------- */ +}; + +/* Trapped Incoming Packet Details */ + +struct TCUTCHEADER_st { /* Little Endian */ + struct LRH_st LRH; +/* --------------------------------------------------------- */ + struct GRH_st GRH; +/* --------------------------------------------------------- */ + struct BTH_st BTH; /* BTH fileds */ +/* --------------------------------------------------------- */ + struct RDETH_st RDETH; /* RDETH fields */ +/* --------------------------------------------------------- */ + struct DETH_st DETH; /* DETH fields */ +/* --------------------------------------------------------- */ + struct RETH_st RETH; /* RETH firlds */ +/* --------------------------------------------------------- */ + struct AtomicETH_st AtomicETH; /* AtomicETH fields */ +/* --------------------------------------------------------- */ + struct AETH_st AETH; /* AETH fields */ +/* --------------------------------------------------------- */ + struct AtomicAckETH_st AtomicAckETH;/* AtomicAckETH fields */ +/* --------------------------------------------------------- */ + struct immDt_st immDT; /* immediate data field */ +/* --------------------------------------------------------- */ + struct IBTCUdata_st IBtcudata; /* data from IB: port number, LMC ,real packet length */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* TCU General Configuration */ + +struct TCUGRLCFG_st { /* Little Endian */ + struct TCUINITCREDITS_st tcuinitcredits;/* tcu credits */ +/* --------------------------------------------------------- */ + struct TCUTCCFG_st tcutccfg; /* tcu tc cfg */ +/* --------------------------------------------------------- */ + struct TCUCLI_st tcucli; /* tcucli cfg */ +/* --------------------------------------------------------- */ + struct TCUTCCLI_st tcutccli; /* tcutccli cfg */ +/* --------------------------------------------------------- */ + struct TCUTCQP2EE_st tcutcqp2ee; /* tcu tc qp2ee cfg */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00018]; + pseudo_bit_t gp_cfg[0x00008]; /* General Purpose Configuration Register + bit 0 - used for disable (when set) non-starvation in ce input fifo + */ +/* --------------------------------------------------------- */ +}; + +/* tcu checks cause data */ + +struct TCUCHKCAUSE_st { /* Little Endian */ + struct TCUCHKCAUSE_REG0_st cause_reg_0;/* checker cause register 0: + // 31. 1'b0, + // 30. 1'b0, + // 29. 1'b0, + // 28. 1'b0, + // 27. 1'b0, + // 26. 1'b0, + // 25. 1'b0, + // 24. 1'b0, + // 23. bad_qp2ee_eestate, + // 22. bad_qp2ee_eenvalid, + // 21. bad_eestate, + // 20. bad_eevalid, + // 19. bad_opcode_ts, + // 18. bad_qpstate, + // 17. bad_qpvalid, + // 16. bad_opcode_int_drop, + // 15. bad_portid, + // 14. bad_malformed_packet, + // 13. bad_bth_rsv, + // 12. bad_bth_rsv_var, + // 11. bad_tver, + // 10. bad_no_bth, + // 09. bad_ipver, + // 08. bad_nxthdr, + // 07. bad_cli_check, + // 06. bad_icrc, + // 05. bad_pktlen, + // 04. bad_lrh_rsv, + // 03. 1'b0, + // 02. 1'b0, + // 01. packet_is_raw_ethertype, + // 00. packet_is_raw_ipv6 + */ +/* --------------------------------------------------------- */ + struct TCUCHKCAUSE_REG1_st cause_reg_1;/* checker cause register 1: + 31. 1'b0, + 30. 1'b0, + 29. bad_multicast_match, + 28. bad_multicast_pkt, + 27. bad_qkey, + 26. bad_pkey, + 25. bad_rmtlid_rd, + 24. bad_mylid_rd, + 23. bad_rmtgid_rd, + 22. bad_mygid_rd, + 21. bad_eesource, + 20. bad_eedestqp, + 19. bad_qp_is_lock, + 18. bad_qp2ee_rddmiss, + 17. bad_qp2ee_qptsnrd, + 16. bad_qp2ee_qpstate, + 15. bad_qp2ee_qpnvalid, + 14. bad_grhen, + 13. bad_rmtlid_nrd, + 12. bad_mylid_nrd, + 11. bad_rmtgid_nrd, + 10. bad_mygid_nrd, + 09. bad_migreq, + 08. bad_qp2ee_eeisbusy, + 07. bad_no_qpcrd, + 06. 1'b0, + 05. 1'b0, + 04. bad_router_packet, + 03. bad_opcode_int_erp, + 02. qp_is_qp0, + 01. qp_is_qp1, + 00.bad_exception_qp + + */ +/* --------------------------------------------------------- */ + struct TCUCHKCAUSE_REG2_st cause_reg_2;/* checker cause register 2 + 31.1'b0, + 30.1'b0, + 29.1'b0, + 28.1'b0, + 27.bad_ack_implnck, + 26.bad_rdrsp_implnck, + 25.bad_res_padcount, + 24.bad_payload_res_pkt, + 23.bad_rsp_supp, + 22.bad_opc_res_seq, + 21.bad_rdres_aack_mix, + 20.bad_rdrsp_trash, + 19.bad_eestate_bit_res, + 18.bad_qpstate_bit_res, + 17.unsolicited_ack, + 16.bad_ghost_resp, + 15.bad_e2ecredits_rel, + 14.bad_e2ecredits_unrel, + 13.bad_req_padcount, + 12.bad_payload_req_pkt, + 11.bad_opc_req_seq, + 10.bad_req_supp_unrel, + 09.bad_ud_epsn, + 08.bad_uc_epsn, + 07.bad_unrel_inv_psn, + 06.bad_req_supp_rel, + 05.bad_xxstate_psn_req, + 04.bad_eestate_bit_req, + 03.bad_qpstate_bit_req, + 02.bad_qp2ee_eepsnerr, + 01.bad_rel_inv_psn, + 00.bad_rel_dupl_psn + + */ +/* --------------------------------------------------------- */ + struct TCUCHKCAUSE_REG3_st cause_reg_3;/* checker cause register 3: + 31:8 - 0 + 07.bad_rdres_syn, + 06.bad_malformed_synd, + 05.bad_syn_inv_rd_req, + 04.bad_syn_roe, + 03.bad_syn_rae, + 02.bad_syn_inv_req, + 01.bad_syn_psn_seq_err, + 00.bad_syn_rnrnack + */ +/* --------------------------------------------------------- */ +}; + +/* Incoming Packet Exception Masks - Ignore / Drop / Trap */ + +struct EXCEPTIONMASKS_st { /* Little Endian */ + pseudo_bit_t cfg_exception_hmask_127_96[0x00020];/* high priority mask for exception vector in TCU [127:96] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_hmask_95_64[0x00020];/* high priority mask for exception vector in TCU [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_hmask_63_32[0x00020];/* high priority mask for exception vector in TCU [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_hmask_31_0[0x00020];/* high priority mask for exception vector in TCU [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_lmask_127_96[0x00020];/* low priority mask for exception vector in TCU [127:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_lmask_95_64[0x00020];/* low priority mask for exception vector in TCU [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_lmask_63_32[0x00020];/* low priority mask for exception vector in TCU [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_lmask_31_0[0x00020];/* low priority mask for exception vector in TCU [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_ignore_127_96[0x00020];/* ignore mask for exception vector in TCU [127:96] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_ignore_95_64[0x00020];/* ignore mask for exception vector in TCU [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_ignore_63_32[0x00020];/* ignore mask for exception vector in TCU [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_exception_ignore_31_0[0x00020];/* ignore mask for exception vector in TCU [31:0] */ +/* --------------------------------------------------------- */ +}; + +/* Opcode Masks - Drop / Trap */ + +struct OPCODEMASKS_st { /* Little Endian */ + pseudo_bit_t line0[0x00020]; /* drop/erp opcode mask [15:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line1[0x00020]; /* drop/erp opcode mask [31:16] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line2[0x00020]; /* drop/erp opcode mask [47:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line3[0x00020]; /* drop/erp opcode mask [63:48] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line4[0x00020]; /* drop/erp opcode mask [79:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line5[0x00020]; /* drop/erp opcode mask [95:60] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line6[0x00020]; /* drop/erp opcode mask [111:96] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line7[0x00020]; /* drop/erp opcode mask [127:112] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line8[0x00020]; /* drop/erp opcode mask [143:128] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line9[0x00020]; /* drop/erp opcode mask [159:144] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line10[0x00020]; /* drop/erp opcode mask [175:160] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line11[0x00020]; /* drop/erp opcode mask [191:176] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line12[0x00020]; /* drop/erp opcode mask [207:192] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line13[0x00020]; /* drop/erp opcode mask [223:208] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line14[0x00020]; /* drop/erp opcode mask [239:224] */ +/* --------------------------------------------------------- */ + pseudo_bit_t line15[0x00020]; /* drop/erp opcode mask [255:240] */ +/* --------------------------------------------------------- */ +}; + +/* TCU Router Mode Configuration */ + +struct ROUTERMODE_st { /* Little Endian */ + pseudo_bit_t cfg_hca_router[0x00001];/* router mode set */ + pseudo_bit_t reserved0[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_router_tclass[0x00008];/* Tclass data for router packets without GRH */ + pseudo_bit_t reserved1[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_router_bth_95_64[0x00020];/* BTH data for router mode [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_router_bth_63_32[0x00020];/* BTH data for router mode [63:32] + + * Dest QP should not be configured to zero when working in router mode, see bug 4984 */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_router_bth_31_0[0x00020];/* BTH data for router mode [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_router_deth_63_32[0x00020];/* DETH data for router mode [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_router_deth_31_0[0x00020];/* DETH data for router mode [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Raw Packets Handling Registers */ + +struct RAW_st { /* Little Endian */ + pseudo_bit_t cfg_raw_bth_95_64[0x00020];/* bth to use for this incomming packets [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_raw_bth_63_32[0x00020];/* bth to use for this incomming packets [63:32] + + * Dest QP should not be configured to zero, see bug 4984 */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_raw_bth_31_0[0x00020];/* bth to use for this incomming packets [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_raw_deth_63_32[0x00020];/* deth to use for this incomming packets [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_raw_deth_31_0[0x00020];/* deth to use for this incomming packets [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* InfiniRISC */ + +struct IRisc_st { /* Little Endian */ + pseudo_bit_t iriscboot[0x00020]; /* The boot ROM will keep the iRISC on an infinite loop polling for this register. When ready, the bootstrap software updates this register with the code entry point vector address and then the iRISC jumps to that address to continue executing. */ +/* --------------------------------------------------------- */ + pseudo_bit_t irisc_id[0x00020]; /* This read only register returns the base address in cr-space of the configuration registers for the irisc in question (can be used to identify the irisc via a loadcr instruction) */ +/* --------------------------------------------------------- */ + pseudo_bit_t datacacheways[0x00004];/* Number of Ways in iRISC Data Cache */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t datacachelinesize[0x00004]; + pseudo_bit_t reserved1[0x00004]; + pseudo_bit_t data_cache_lines_per_way[0x00010];/* Size of Data Cache - in units of cache lines */ +/* --------------------------------------------------------- */ + pseudo_bit_t codecacheways[0x00004];/* Number of Ways in iRISC Code Cache */ + pseudo_bit_t reserved2[0x00004]; + pseudo_bit_t codecachelinesize[0x00004]; + pseudo_bit_t reserved3[0x00004]; + pseudo_bit_t code_cache_lines_per_way[0x00010];/* Size of the implemented code cache - in units of cache lines */ +/* --------------------------------------------------------- */ + pseudo_bit_t memaddrbase_63_32_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00010]; + pseudo_bit_t memaddrbase_31_16_[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t readnsq[0x00006]; /* nsq bits to be used for read access to the nsi + + (update default with value used for read (NP) access to DMU) */ + pseudo_bit_t reserved5[0x00002]; + pseudo_bit_t writensq[0x00006]; /* nsq bits to be used for write access to the nsi + + (update default with value used for Posted access to DMU) */ + pseudo_bit_t reserved6[0x00001]; + pseudo_bit_t iomem_[0x00001]; /* io/mem bit to use in NSW + 1 - I/O + 0 - Mem */ + pseudo_bit_t reserved7[0x00006]; + pseudo_bit_t ldst4far_base[0x0000a];/* 10 msb of address when load/store 4 far is executed. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00020]; +/* --------------------------------------------------------- */ + struct IRISCCACHEGW_st irisccachegw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00180]; +/* --------------------------------------------------------- */ + pseudo_bit_t exinstptr[0x00020]; /* Address of last executed instruction */ +/* --------------------------------------------------------- */ + pseudo_bit_t nxtinstptr[0x00020]; /* Points to next to be executed Instruction */ +/* --------------------------------------------------------- */ + pseudo_bit_t gpreg[0x00020]; /* General Purpose LoadCR/StoreCR register */ +/* --------------------------------------------------------- */ + struct CCR_st CCR; /* Condition Code Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t intbase[0x00020]; /* Interrupt Address */ +/* --------------------------------------------------------- */ + pseudo_bit_t intpins[0x00020]; /* This read only register shows the state of the 32 interrupt inputs to the iRISC (before the edge detector that sets corresponding bits in the cause register). */ +/* --------------------------------------------------------- */ + pseudo_bit_t rintbase[0x00020]; /* Remote Interrupt Base Address */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x000a0]; +/* --------------------------------------------------------- */ + pseudo_bit_t intcause_r_w_[0x00020];/* Interrupt Cause + Bits are set upon detection of a possitive edge (transition from 0 to 1) in corresponding signal of the iRISC interface. + The resulting register is bitwise ANDed with the intenable register. A non-zero result triggers an interrupt to the iRISC core (unless intmask bit is set). + + Interrupt mapping for each of the 32 bits is specified for each iRISC in the FW flows document. + + The 32 bit input to the edge detector can be read in the intpins register. + + cause bit [0] - Code Breakpoint + cause bit [1] - Data Breakpoint + cause bit [2] - Performance Counters 0 + cause bit [3] - Performance Counters 1 + cause bit [4] - Code Cache miss + cause bit [5] - Data Cache miss + cause bit [16] - IPC + cause bit [31] - timer + + */ +/* --------------------------------------------------------- */ + pseudo_bit_t intcause_clear_[0x00020];/* Clear Interrupt Cause + Writing to this register results in the bits set being cleared in the cause register. Bits not set in the write data are left unchanged. + Reading from this register returns undefined results. */ +/* --------------------------------------------------------- */ + pseudo_bit_t intcause_set_[0x00020];/* Set Interrupt Cause + Writing to this register results in the bits set being set in the cause register. Bits not set in the write data are left unchanged. + Reading from this register returns undefined results. */ +/* --------------------------------------------------------- */ + pseudo_bit_t intenable[0x00020]; /* Interrupt Enable Mask - When set corresponding interrupt is enabled. */ +/* --------------------------------------------------------- */ + pseudo_bit_t intra[0x00020]; /* Interrupt Return Address */ +/* --------------------------------------------------------- */ + pseudo_bit_t intm[0x00001]; /* Interrupt Mask */ + pseudo_bit_t intpm[0x00001]; /* Previous Interrupt Mask */ + pseudo_bit_t reserved11[0x00002]; + pseudo_bit_t rintm[0x00001]; /* remote Interrupt Mask */ + pseudo_bit_t reserved12[0x00019]; + pseudo_bit_t endl[0x00001]; /* Endianess for Loads + 0 - Little Endian + 1 - Big Endian */ + pseudo_bit_t ends[0x00001]; /* Endianess for Store + 0 - Little Endian + 1 - Big Endian */ +/* --------------------------------------------------------- */ + pseudo_bit_t rintra[0x00020]; /* Remote Interrupt Return Address */ +/* --------------------------------------------------------- */ + pseudo_bit_t callra[0x00020]; /* Call Return Address */ +/* --------------------------------------------------------- */ + struct IRiscBP_st DataBP0; +/* --------------------------------------------------------- */ + struct IRiscBP_st CodeBP0; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00580]; +/* --------------------------------------------------------- */ + pseudo_bit_t irisctimer[0x00020]; /* This is a free running counter that generates an interrupt every time it wraps around. It can be written to any a value through cr-space thus allowing programming the time between interrupts. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved14[0x001e0]; +/* --------------------------------------------------------- */ + struct IPCFIFO_st ipcfifo; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00080]; +/* --------------------------------------------------------- */ + struct IRISCDEBUG_st iriscdebug; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved16[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Execution Unit */ + +struct EXENGINE_st { /* Little Endian */ + struct EXEEVTCAUSE_st exeevtcause; /* Any bit set in the cause register will result in the bit corresponding to the execution engine being set in the exeevt register. */ +/* --------------------------------------------------------- */ + struct EXESTATUS_st exestatus; +/* --------------------------------------------------------- */ + pseudo_bit_t execmd[0x00008]; /* Execution Engine Command + + 0x00 Reserved + 0x01 Go with clear (hw clears/programs descriptor fifo before starting) + 0x02 Go Continue (used when EXU was preempted) - see bug 5183 + 0x03 Go with Blue Flame (hw clear/programs descriptor fifo before starting and copies cqpc ND* to descfifo fecthnd*. It is FW responsibility to update the cqpc NDS to the actual size of the writen BF data before giving the command in order to avoid access to non-written parts of the bf buffer) + 0x04 Preemption + 0x05 - Go without Clear (used when descriptor fifo is programmed by fw) + 0x06-0xFF Reserved + */ + pseudo_bit_t reserved0[0x00007]; + pseudo_bit_t preemptonly[0x00001]; /* when set the exeuction engine will stop only when preempted. + See details in Tavor MAS. */ + pseudo_bit_t blueflameentry[0x00003];/* Blue flame entry number in responder flow blue flame FIFO. Relevant when command is GO with Blue Flame */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t syngen[0x00001]; /* set syndrome bit in LDB entries generated. + this is used to control the generation of an internal event to the TCU ERP when the LDB entries are popped by the CE (in order to generate completion). To be used for example for retries when we want one single outstanding message. */ + pseudo_bit_t updateqpc[0x00001]; /* when set, qpc is updated after sending each packet accordingly + when cleared, the qpc is not updated. (ldb entries are not also pushed at the end of msg) + for example, this is useful for sending NAKs. the cqpc context values are used (as preprogrammed by scheduler) and the real qpc is not touched. */ + pseudo_bit_t ackenable[0x00001]; /* this bit is only relevant when the engine is configured for responder operation. + if set, when the engine finishes sending all outstanding read responses, an ACK will be sent if donePSN is greater than ackedPSN. ackedPSN will be updated to donePSN. */ + pseudo_bit_t requester[0x00001]; /* This bit specifies whether the engine is processing a requester or a responder queue. When set the engine is configure to process a requester queue. When cleared the engine will process a responder queue. */ + pseudo_bit_t reserved2[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn[0x00018]; /* Queue Pair Number */ + pseudo_bit_t ee[0x00001]; /* ee bit - if set then the engine is processing using an end to end context which number is stored in the qpn field */ + pseudo_bit_t reserved3[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ftcdlimit[0x0000f]; /* Fetch Descriptor Limit - See description in MAS */ + pseudo_bit_t ftcdlimitv[0x00001]; /* Fetch Descriptor Limit Valid Bit - See description in MAS */ + pseudo_bit_t ftcdcnt[0x0000f]; /* Number of descriptors fetched since last command + Value is NOT reset when a command is received. */ + pseudo_bit_t ftcdcnto[0x00001]; /* ftcdcnt overflow + Value is reset when a command is received. */ +/* --------------------------------------------------------- */ + pseudo_bit_t exedlimit[0x0000f]; /* Exer Descriptor Limit - See description in MAS */ + pseudo_bit_t exedlimitv[0x00001]; /* Exer Descriptor Limit Valid Bit - See description in MAS */ + pseudo_bit_t exedcnt[0x0000f]; /* Number of descriptors executed since last command + Value is NOT reset when a command is received. */ + pseudo_bit_t exedcnto[0x00001]; /* exedcnt overflow */ +/* --------------------------------------------------------- */ + pseudo_bit_t exeplimit[0x0000f]; /* Exer Packet Limit - See description in MAS + */ + pseudo_bit_t exeplimitv[0x00001]; /* Exer Packet Limit Valid Bit - See description in MAS */ + pseudo_bit_t exepcnt[0x0000f]; /* Number of packets sent since last command + Value is NOT reset when a command is received. */ + pseudo_bit_t exepcnto[0x00001]; /* exepcnt overflow */ +/* --------------------------------------------------------- */ + pseudo_bit_t exeblimit[0x0000f]; /* Exer Bytes Limit - (in units of 64 bytes) - See description in MAS */ + pseudo_bit_t exeblimitv[0x00001]; /* Exer Bytes Limit Valid Bit - See description in MAS */ + pseudo_bit_t exebcnt[0x0000f]; /* Bytes sent since last command (in units of 64 bytes). + Value is NOT reset when a command is received. */ + pseudo_bit_t exebcnto[0x00001]; /* exebcnt overflow + Value is reset when a command is received. */ +/* --------------------------------------------------------- */ + pseudo_bit_t gatherengine[0x00004]; /* Gather Engine Number that will be used by the execution engine + + 0x0-0x3 Gather Engine Number + 0x4-0xF Reserved */ + pseudo_bit_t reserved5[0x00004]; + pseudo_bit_t fetchwatermark[0x00006];/* Descriptor Fecth Watermark - Fetcher will stop bringing new desctiptors when descriptors in FIFO exceed watermark level. Specified in units of 16 bytes. */ + pseudo_bit_t reserved6[0x00001]; + pseudo_bit_t prio[0x00001]; /* Priority - If set then engine is high priority. */ + pseudo_bit_t rrweight[0x00008]; /* Round Robin Weight for arbitration (in units of 256 bytes) + Two level weighted round robin (see prio bit for priority) */ + pseudo_bit_t reserved7[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00260]; +/* --------------------------------------------------------- */ + struct EXEFDESCCTL_st exefdescctl; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00300]; +/* --------------------------------------------------------- */ +}; + +/* Exe MGID Table */ + +struct EXE_MGIDTABLE_st { /* Little Endian */ + struct EXE_GID_st mgid0; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid1; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid2; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid3; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid4; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid5; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid6; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid7; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid8; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid9; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid10; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid11; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid12; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid13; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid14; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid15; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid16; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid17; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid18; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid19; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid20; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid21; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid22; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid23; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid24; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid25; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid26; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid27; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid28; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid29; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid30; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid31; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid32; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid33; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid34; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid35; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid36; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid37; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid38; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid39; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid40; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid41; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid42; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid43; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid44; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid45; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid46; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid47; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid48; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid49; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid50; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid51; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid52; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid53; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid54; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid55; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid56; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid57; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid58; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid59; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid60; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid61; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid62; +/* --------------------------------------------------------- */ + struct EXE_GID_st mgid63; +/* --------------------------------------------------------- */ +}; + +/* Execution Unit General Configuration */ + +struct EXUGRLCFG_st { /* Little Endian */ + pseudo_bit_t grh_next_header[0x00008];/* This is the value placed in the GRH.NxtHdr field of outgoing packets. */ + pseudo_bit_t lver[0x00004]; /* value to put in the LRH.LVer field of generated packets */ + pseudo_bit_t tver[0x00004]; /* value to put in the BTH.TVer field of generated packets */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t gp_cfg[0x00008]; /* General Purpose Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t force_ackreq[0x00001]; /* Will force assertion of BTH.AckReq at the last packet of each request message sent */ + pseudo_bit_t udportcheck[0x00001]; /* When set, Port number in UD AV is checked to match Port field of QP in question. If thet are not equal, descriptor is not executed and corresponding error is reported (in engine cause register and syndrome). When cleared, AV Port field check is ignored. */ + pseudo_bit_t reserved1[0x00006]; + pseudo_bit_t system_log2pagesize[0x00006];/* log 2 of page size + This page size should be not bigger than the nsb_page_size. + By default it will be initialized by fw upon HCA initialization to the same page size as the nsb_page_size in TPT. The smaller values allow freedom in spliting the SDE gathers to smaller sizes. + Note: for Tavor it MUST be greater than MTU */ + pseudo_bit_t reserved2[0x00002]; + pseudo_bit_t min_ge_credits2start[0x00008];/* Minimum GE credits needed to start a new packet */ + pseudo_bit_t reserved3[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t ge0_credits[0x00008]; /* Number of gather entries available in gather engine 0 */ + pseudo_bit_t ge1_credits[0x00008]; /* see ge0_credits */ + pseudo_bit_t ge2_credits[0x00008]; /* see ge0_credits */ + pseudo_bit_t ge3_credits[0x00008]; /* see ge0_credits */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t rrscentriescredits[0x00009];/* Number of available entries in the scatter lists buffer in the RDE */ + pseudo_bit_t reserved6[0x00017]; +/* --------------------------------------------------------- */ + pseudo_bit_t gff_credits[0x00009]; /* descriptor read filler available credits */ + pseudo_bit_t reserved7[0x00017]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00100]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct EXEARB_st { /* Little Endian */ + pseudo_bit_t ex_penalty_map2[0x00020];/* Penalty map for arbitter */ +/* --------------------------------------------------------- */ + pseudo_bit_t ex_penalty_map3[0x00020];/* Penalty map for arbitter */ +/* --------------------------------------------------------- */ + pseudo_bit_t ex_penalty_map4[0x00020];/* Penalty map for arbitter */ +/* --------------------------------------------------------- */ + pseudo_bit_t ex_penalty_map5[0x00020];/* Penalty map for arbitter */ +/* --------------------------------------------------------- */ + pseudo_bit_t ex_penalty_map6[0x00020];/* Penalty map for arbitter */ +/* --------------------------------------------------------- */ + pseudo_bit_t ex_penalty_map7[0x00020];/* Penalty map for arbitter */ +/* --------------------------------------------------------- */ + pseudo_bit_t ex_penalty_map8[0x00020];/* Penalty map for arbitter */ +/* --------------------------------------------------------- */ + pseudo_bit_t ex_livelock_cnt[0x00010];/* Livelock counter, preventing starvation of low priority EUs. */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t ex_cfg_no_burstfee[0x00001]; + pseudo_bit_t ex_cfg_iter_noclps[0x00001]; + pseudo_bit_t ex_cfg_unlink_qufee[0x00001]; + pseudo_bit_t reserved1[0x00005]; +/* --------------------------------------------------------- */ +}; + +/* Cause Registers */ + +struct EXE_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t irisc0_nsw_error[0x00001]; + pseudo_bit_t irisc1_nsw_error[0x00001]; + pseudo_bit_t crbus_timeout[0x00001]; + pseudo_bit_t reserved0[0x0001d]; +/* --------------------------------------------------------- */ + struct EXT_CAUSEREG_st extended_cause; +/* --------------------------------------------------------- */ +}; + +/* Descriptor Gateway */ + +struct DESCFIFOGW_st { /* Little Endian */ + struct GWCONTROL_st descgwctrl; /* Command: + 0x00 Reserved + 0x01 Read Entry + 0x02 Write Entry + 0x03-0x3F Reserved + + Status + 0x00 Success + 0x01 Error + 0x02-0x3F Reserved + Address + [23:21] Reserved + [20:16] Execution Unit Number + [15:06] Reserved + [05:00] Descriptor Entry Number */ +/* --------------------------------------------------------- */ + pseudo_bit_t descgwdata_127_96_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t descgwdata_95_64_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t descgwdata_63_32_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t descgwdata_31_0_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* Reliable Datagram EE Disconnect FIFO */ + +struct RDEEDISCFIFO_st { /* Little Endian */ + struct FIFOCONTROL_st rdeediscfifoctrl; +/* --------------------------------------------------------- */ + pseudo_bit_t rdeediscdata[0x00018]; /* bits 23:0 hold the ee context to be disconnected */ + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* Internal Doorbells */ + +struct INTDB_st { /* Little Endian */ + struct INTDBFIFO_st intdbfifo; /* Internal Doorbell FIFO */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t int_bf_array_status[0x00008];/* the state of the entries in the internal blueflame array. + 1 indicate that the entry is caputre, 0 - free. */ + pseudo_bit_t reserved1[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t int_bf_array_set[0x00008];/* set to the int_bf_array_status. each bit set the corrospoding bit in the status field. the filed return 0 when reading. */ + pseudo_bit_t reserved2[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t int_bf_array_clear[0x00008];/* clear to the int_bf_array_status. each bit clear the corrospoding bit in the status field. the filed return 0 when reading. */ + pseudo_bit_t reserved3[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x000a0]; +/* --------------------------------------------------------- */ +}; + +/* External Doorbells */ + +struct EXTDB_st { /* Little Endian */ + pseudo_bit_t bf_lenght[0x00007]; /* this field contain the lenght of the current blueflame ( if exist ) by dword . if there is no blueflame thr field contsin 0. */ + pseudo_bit_t reserved0[0x00009]; + pseudo_bit_t uar_page_size[0x00006];/* hold the size of the uar page. + the uar size in bits is 2^(12+uar_page_size) bytes. */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t disfilter[0x00001]; /* Disable DB Preprocessor Filter - When set Preprocessor will not filter writes to non-valid DB areas. */ + pseudo_bit_t disdbpp[0x00001]; /* Disable Doorbell Preprocessor. When set the doorbell preprocessor is disabled. Entries in the Pre DB FIFO are written directly to the QP DB FIFO. */ +/* --------------------------------------------------------- */ + pseudo_bit_t bfstate[0x00002]; /* Blue Flame buffer State - Managed by DB Preprocessor (and Execution Units after reading from it). Can be written by FW (ie when freeing the buffer after deciding not to use it). + 0x0 - Empty + 0x1 - In Progress + 0x2 - Full + 0x3:0xF - Reserved */ + pseudo_bit_t reserved2[0x0001e]; +/* --------------------------------------------------------- */ + pseudo_bit_t read_fsm_state[0x00003];/* the state of the fsm that handle the writing of the db to the qp fifo and the cq fifo */ + pseudo_bit_t reserved3[0x0001d]; +/* --------------------------------------------------------- */ + pseudo_bit_t write_fsm[0x00003]; + pseudo_bit_t reserved4[0x0001d]; +/* --------------------------------------------------------- */ + struct QPDBFIFO_st qpdbfifo; +/* --------------------------------------------------------- */ +}; + +/* Execution Engine iRISC Assignement Mask */ + +struct EXEENGIRISCMASK_st { /* Little Endian */ + pseudo_bit_t engiriscmask[0x00010]; /* bit set means irisc receives interrupt when corresponding execution engine is not busy */ + pseudo_bit_t reserved0[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* Execution Engines Event Register */ + +struct EXEEVENT_st { /* Little Endian */ + pseudo_bit_t exe00[0x00001]; /* Set when execution engine 00 is not busy */ + pseudo_bit_t exe01[0x00001]; /* Set when execution engine 01 is not busy */ + pseudo_bit_t exe02[0x00001]; /* Set when execution engine 02 is not busy */ + pseudo_bit_t exe03[0x00001]; /* Set when execution engine 03 is not busy */ + pseudo_bit_t exe04[0x00001]; /* Set when execution engine 04 is not busy */ + pseudo_bit_t exe05[0x00001]; /* Set when execution engine 05 is not busy */ + pseudo_bit_t exe06[0x00001]; /* Set when execution engine 06 is not busy */ + pseudo_bit_t exe07[0x00001]; /* Set when execution engine 07 is not busy */ + pseudo_bit_t exe08[0x00001]; /* Set when execution engine 08 is not busy */ + pseudo_bit_t exe09[0x00001]; /* Set when execution engine 09 is not busy */ + pseudo_bit_t exe10[0x00001]; /* Set when execution engine 10 is not busy */ + pseudo_bit_t exe11[0x00001]; /* Set when execution engine 11 is not busy */ + pseudo_bit_t exe12[0x00001]; /* Set when execution engine 12 is not busy */ + pseudo_bit_t exe13[0x00001]; /* Set when execution engine 13 is not busy */ + pseudo_bit_t exe14[0x00001]; /* Set when execution engine 14 is not busy */ + pseudo_bit_t exe15[0x00001]; /* Set when execution engine 15 is not busy */ + pseudo_bit_t reserved0[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* @EXU_BIST */ + +struct EXU_BIST_st { /* Little Endian */ + pseudo_bit_t exe_bistdbpp0_rdw_0[0x00006];/* 143 */ + pseudo_bit_t reserved0[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t exe_bistdbpp1_wdw_0[0x00006];/* 143 */ + pseudo_bit_t reserved1[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t exe_bistdescfifo_rdw_0[0x00007];/* 152 */ + pseudo_bit_t reserved2[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t exe_bistdescfifo_rdw_1[0x00007];/* 152 */ + pseudo_bit_t reserved3[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t exe_bistdescfifo_rdw_2[0x00007];/* 153 */ + pseudo_bit_t reserved4[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t exe_bistdescfifo_rdw_3[0x00007];/* 153 */ + pseudo_bit_t reserved5[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t exe_bistsdegff1_wdw_0[0x00007];/* 150 */ + pseudo_bit_t reserved6[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t exe_bistsdegff1_wdw_1[0x00007];/* 150 */ + pseudo_bit_t reserved7[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t exe_bistuppsncis_wdw_0[0x00008];/* 153 */ + pseudo_bit_t reserved8[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data0_rdw_0[0x00006];/* 162 */ + pseudo_bit_t reserved9[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data0_rdw_1[0x00006];/* 162 */ + pseudo_bit_t reserved10[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code0_rdw_0[0x00006];/* 162 */ + pseudo_bit_t reserved11[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code0_rdw_1[0x00006];/* 162 */ + pseudo_bit_t reserved12[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code0_rdw_2[0x00006];/* 162 */ + pseudo_bit_t reserved13[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code0_rdw_3[0x00006];/* 162 */ + pseudo_bit_t reserved14[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data1_rdw_0[0x00006];/* 162 */ + pseudo_bit_t reserved15[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data1_rdw_1[0x00006];/* 162 */ + pseudo_bit_t reserved16[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code1_rdw_0[0x00006];/* 162 */ + pseudo_bit_t reserved17[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code1_rdw_1[0x00006];/* 162 */ + pseudo_bit_t reserved18[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code1_rdw_2[0x00006];/* 162 */ + pseudo_bit_t reserved19[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code1_rdw_3[0x00006];/* 162 */ + pseudo_bit_t reserved20[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat[0x00015]; + pseudo_bit_t reserved21[0x0000b]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat[0x0001f]; + pseudo_bit_t reserved22[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat_1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat_2[0x00006]; + pseudo_bit_t reserved23[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved24[0x000c0]; +/* --------------------------------------------------------- */ +}; + +/* EXE General Purpose Semaphores */ + +struct EXE_SEMAPHORES_st { /* Little Endian */ + pseudo_bit_t semaphore0[0x00001]; /* Read returns 0 if lock succeeded + 1 if lock failed + Write of 0 releases semaphore + 1 force lock (don't use it at home) */ + pseudo_bit_t reserved0[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t semaphore1[0x00001]; /* Read returns 0 if lock succeeded + 1 if lock failed + Write of 0 releases semaphore + 1 force lock (don't use it at home) */ + pseudo_bit_t reserved1[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t semaphore2[0x00001]; /* Read returns 0 if lock succeeded + 1 if lock failed + Write of 0 releases semaphore + 1 force lock (don't use it at home) */ + pseudo_bit_t reserved2[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t semaphore3[0x00001]; /* Read returns 0 if lock succeeded + 1 if lock failed + Write of 0 releases semaphore + 1 force lock (don't use it at home) */ + pseudo_bit_t reserved3[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t semaphore4[0x00001]; /* Read returns 0 if lock succeeded + 1 if lock failed + Write of 0 releases semaphore + 1 force lock (don't use it at home) */ + pseudo_bit_t reserved4[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t semaphore5[0x00001]; /* Read returns 0 if lock succeeded + 1 if lock failed + Write of 0 releases semaphore + 1 force lock (don't use it at home) */ + pseudo_bit_t reserved5[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t semaphore6[0x00001]; /* Read returns 0 if lock succeeded + 1 if lock failed + Write of 0 releases semaphore + 1 force lock (don't use it at home) */ + pseudo_bit_t reserved6[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t semaphore7[0x00001]; /* Read returns 0 if lock succeeded + 1 if lock failed + Write of 0 releases semaphore + 1 force lock (don't use it at home) */ + pseudo_bit_t reserved7[0x0001f]; +/* --------------------------------------------------------- */ +}; + +/* Descriptor Opcode Breakpoint */ + +struct EXEOPCODEBP_st { /* Little Endian */ + pseudo_bit_t bp_rq_nop[0x00001]; + pseudo_bit_t bp_rq_r01[0x00001]; + pseudo_bit_t bp_rq_r02[0x00001]; + pseudo_bit_t bp_rq_r03[0x00001]; + pseudo_bit_t bp_rq_r04[0x00001]; + pseudo_bit_t bp_rq_r05[0x00001]; + pseudo_bit_t bp_rq_r06[0x00001]; + pseudo_bit_t bp_rq_r07[0x00001]; + pseudo_bit_t bp_rq_rdmaw[0x00001]; + pseudo_bit_t bp_rq_rdmaw_imm[0x00001]; + pseudo_bit_t bp_rq_send[0x00001]; + pseudo_bit_t bp_rq_send_imm[0x00001]; + pseudo_bit_t bp_rq_r12[0x00001]; + pseudo_bit_t bp_rq_r13[0x00001]; + pseudo_bit_t bp_rq_r14[0x00001]; + pseudo_bit_t bp_rq_r15[0x00001]; + pseudo_bit_t bp_rq_rdmar[0x00001]; + pseudo_bit_t bp_rq_cmpswp[0x00001]; + pseudo_bit_t bp_rq_fetchadd[0x00001]; + pseudo_bit_t bp_rq_r19[0x00001]; + pseudo_bit_t bp_rq_r20[0x00001]; + pseudo_bit_t bp_rq_r21[0x00001]; + pseudo_bit_t bp_rq_r22[0x00001]; + pseudo_bit_t bp_rq_r23[0x00001]; + pseudo_bit_t bp_rq_bind[0x00001]; + pseudo_bit_t bp_rq_r25[0x00001]; + pseudo_bit_t bp_rq_r26[0x00001]; + pseudo_bit_t bp_rq_r27[0x00001]; + pseudo_bit_t bp_rq_r28[0x00001]; + pseudo_bit_t bp_rq_r29[0x00001]; + pseudo_bit_t bp_rq_r30[0x00001]; + pseudo_bit_t bp_rq_r31[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x0000c]; + pseudo_bit_t bp_rp_rd[0x00001]; + pseudo_bit_t reserved1[0x00006]; + pseudo_bit_t bp_rp_cmpswp[0x00001]; + pseudo_bit_t bp_rp_fetchadd[0x00001]; + pseudo_bit_t reserved2[0x0000b]; +/* --------------------------------------------------------- */ +}; + +/* System Memory Access Parameters */ + +struct MEMACCESSPARAMS_st { /* Little Endian */ + pseudo_bit_t key[0x00020]; /* Memory Key */ +/* --------------------------------------------------------- */ + pseudo_bit_t pd[0x00018]; /* Protection Domain */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t xlation_en[0x00001]; /* When cleared, baseaddress is physical address and no translation will be done. When set, address is virtual. TPT will be accessed in both cases for address decoding purposes. */ + pseudo_bit_t np[0x00001]; /* Non Posted Access */ + pseudo_bit_t nsvl[0x00001]; /* North Switch Virtual Lane */ +/* --------------------------------------------------------- */ + pseudo_bit_t baseaddress_63_32_[0x00020];/* Base Address */ +/* --------------------------------------------------------- */ + pseudo_bit_t baseaddress_31_0_[0x00020];/* Base Address */ +/* --------------------------------------------------------- */ +}; + +/* System Memory Access Parameters */ + +struct RCVDB_RDBPARAMS_st { /* Little Endian */ + pseudo_bit_t rcvdb_qpn_credits[0x00020];/* use to be Memory Key in HW */ +/* --------------------------------------------------------- */ + pseudo_bit_t rcvdb_uar[0x00018]; /* use to be protection Doma in HW */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t rdb_xlation_en[0x00001];/* When cleared, baseaddress is physical address and no translation will be done. When set, address is virtual. TPT will be accessed in both cases for address decoding purposes. */ + pseudo_bit_t rdb_np[0x00001]; /* Non Posted Access */ + pseudo_bit_t rdb_nsvl[0x00001]; /* North Switch Virtual Lane */ +/* --------------------------------------------------------- */ + pseudo_bit_t rdb_baseaddress_63_32_[0x00020];/* Base Address */ +/* --------------------------------------------------------- */ + pseudo_bit_t rcvdb_nda_nds[0x00020];/* use to be Base Address in HW */ +/* --------------------------------------------------------- */ +}; + +/* @RDE_BIST */ + +struct RDE_BIST_st { /* Little Endian */ + pseudo_bit_t rdebistsb_rdw_0[0x00007];/* 131 */ + pseudo_bit_t reserved0[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistsb_rdw_1[0x00007];/* 131 */ + pseudo_bit_t reserved1[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistsb_rdw_2[0x00007];/* 131 */ + pseudo_bit_t reserved2[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistsb_rdw_3[0x00007];/* 131 */ + pseudo_bit_t reserved3[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistsl6474_rdw_0[0x00007];/* 144 */ + pseudo_bit_t reserved4[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistsl6474_rdw_1[0x00007];/* 144 */ + pseudo_bit_t reserved5[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistsl6477_rdw_0[0x00007];/* 144 */ + pseudo_bit_t reserved6[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistsl6477_rdw_1[0x00007];/* 144 */ + pseudo_bit_t reserved7[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistpsfifo0_wdw_0[0x00007];/* 146 */ + pseudo_bit_t reserved8[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistpsfifo0_wdw_1[0x00007];/* 146 */ + pseudo_bit_t reserved9[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistpsfifo0_wdw_2[0x00007];/* 146 */ + pseudo_bit_t reserved10[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistpsfifo1_wdw_0[0x00007];/* 146 */ + pseudo_bit_t reserved11[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistpsfifo1_wdw_1[0x00007];/* 146 */ + pseudo_bit_t reserved12[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistpsfifo1_wdw_2[0x00007];/* 146 */ + pseudo_bit_t reserved13[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebist256len_wdw_0[0x00007];/* 144 */ + pseudo_bit_t reserved14[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistdffd_wdw_0[0x00008];/* 138 */ + pseudo_bit_t reserved15[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdebistdffd_wdw_1[0x00008];/* 138 */ + pseudo_bit_t reserved16[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat[0x00011]; + pseudo_bit_t reserved17[0x0000f]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat[0x0000f]; + pseudo_bit_t reserved18[0x00011]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat[0x00016]; + pseudo_bit_t reserved19[0x0000a]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved20[0x00080]; +/* --------------------------------------------------------- */ +}; + +/* RDE Debug */ + +struct RDEDEBUG_st { /* Little Endian */ + pseudo_bit_t cqpc_falv_freed[0x00020];/* cqpc falv freed */ +/* --------------------------------------------------------- */ + pseudo_bit_t cqpc_falv_allocated[0x00020];/* cqpc falv allocated */ +/* --------------------------------------------------------- */ + pseudo_bit_t cqpc_falv_locked[0x00020];/* cqpc falv locked */ +/* --------------------------------------------------------- */ + pseudo_bit_t cqpc_falv_valid[0x00020];/* cqpc falv valid */ +/* --------------------------------------------------------- */ + pseudo_bit_t cqpc_falv_hold_relock[0x00020];/* cqpc falv hold relock */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ + pseudo_bit_t cqpc_falv_allocate[0x00020];/* cqpc falv allocate */ +/* --------------------------------------------------------- */ + pseudo_bit_t cqpc_falv_lock[0x00020];/* cqpc falv lock */ +/* --------------------------------------------------------- */ + pseudo_bit_t cqpc_falv_unlock[0x00020];/* cqpc falv unlock */ +/* --------------------------------------------------------- */ + pseudo_bit_t cqpc_falv_relock[0x00020];/* cqpc falv relock */ +/* --------------------------------------------------------- */ + pseudo_bit_t cqpc_falv_free[0x00020];/* cqpc falv free */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_read_inflight[0x00020];/* for each cqpc entry, is it reading now QPC */ +/* --------------------------------------------------------- */ + pseudo_bit_t desc_inflight[0x00020];/* for each cqpc entry is there a descriptor inflight from memory . + */ +/* --------------------------------------------------------- */ + pseudo_bit_t whole_dsl_inside[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ti_error[0x00020]; /* error occured in tptinit pipe stage or after it for each cqpc entry . */ +/* --------------------------------------------------------- */ + pseudo_bit_t np_error[0x00020]; /* error propagated to Final pipe stage or after it for each cqpc entry . */ +/* --------------------------------------------------------- */ + pseudo_bit_t sb_write_ptr[0x00009]; /* scatter buffer writer pointer */ + pseudo_bit_t reserved2[0x00007]; + pseudo_bit_t sb_read_ptr[0x00009]; /* scatter buffer read pointer . */ + pseudo_bit_t reserved3[0x00006]; + pseudo_bit_t sb_empty[0x00001]; /* scatter buffer is empty . */ +/* --------------------------------------------------------- */ + pseudo_bit_t cmdfifo_empty[0x00001];/* tcu command fifo is empty . */ + pseudo_bit_t reserved4[0x00003]; + pseudo_bit_t fq2tififo_empty[0x00001];/* FQPC to TPTINIT fifo empty . */ + pseudo_bit_t fq2tififo_full[0x00001];/* FQPC to TPTINIT fifo empty . */ + pseudo_bit_t reserved5[0x00002]; + pseudo_bit_t ti2nsfifo_empy[0x00001];/* TPTINIT to NSINIT fifo empty . */ + pseudo_bit_t ti2nsfifo_full[0x00001];/* TPTINIT to NSINIT fifo full . */ + pseudo_bit_t reserved6[0x00002]; + pseudo_bit_t ns2tffifo_empty[0x00001];/* NSINIT to TPTFIN fifo empty . */ + pseudo_bit_t ns2tffifo_full[0x00001];/* NSINIT to TPTFIN fifo full . */ + pseudo_bit_t reserved7[0x00002]; + pseudo_bit_t tf2sefifo_empty[0x00001];/* TPTFIN to scatter engine fifo entry . */ + pseudo_bit_t tf2sefifo_full[0x00001];/* TPTFIN to scatter engine fifo full . */ + pseudo_bit_t reserved8[0x00002]; + pseudo_bit_t se2npfifo_empty[0x00001];/* scatter engine to NONPOST fifo empty . */ + pseudo_bit_t se2npfifo_full[0x00001];/* scatter engine to NONPOST fifo full */ + pseudo_bit_t reserved9[0x0000a]; +/* --------------------------------------------------------- */ + pseudo_bit_t stop_tpt[0x00001]; /* stop tpt accesses - requested by the stoptptaccess register */ + pseudo_bit_t reserved10[0x00019]; + pseudo_bit_t ti_phyfifo_empty[0x00001];/* TPTINIT PhysicalAdressTranlatedFIFO is empty */ + pseudo_bit_t tf_phyfifo_empty[0x00001];/* TPTFIN PhysicalAdressTranlatedFIFO is empty */ + pseudo_bit_t no_outstanding_np_write[0x00001];/* no outstanding non posted write. */ + pseudo_bit_t atreda_freed[0x00001]; /* atomic reply data entry is freed . */ + pseudo_bit_t no_outstanding_read[0x00001];/* no outstanding read */ + pseudo_bit_t nsw_stopped[0x00001]; /* no outstanding nswitch accesses . */ +/* --------------------------------------------------------- */ + pseudo_bit_t fqpc_ps[0x00003]; /* FQPC FSM state + fqpc_ps + -------------------- + 3'b000 FQPCIDLE + 3'b001 CACHEACC + 3'b010 READXQPC + 3'b011 USEOTHER + 3'b100 ATAGADOL + 3'b101 THROWQPC + 3'b110 THROWDSC + 3'b111 PTPTINIT + */ + pseudo_bit_t reserved11[0x00001]; + pseudo_bit_t tptreq_ps[0x00003]; /* TPTINIT TPTreq FSM state + tptreq_ps + -------------------- + 3'b000 TPTIDLE + 3'b001 GTREADY + 3'b010 PNSINIT + 3'b011 TPTRQST + 3'b100 XSTROB1 + + */ + pseudo_bit_t reserved12[0x00001]; + pseudo_bit_t cda_ps[0x00003]; /* CurrentDescriptorAddress FSM state in TPTINIT pipestage + cda_ps + -------------------- + 3'b000 CDAIDLE + 3'b001 RDNDCDA + 3'b010 ENOUGHS + 3'b011 WRITCDA + 3'b100 CHKSPCE + 3'b101 THRWDSC + */ + pseudo_bit_t reserved13[0x00001]; + pseudo_bit_t cqpc_ps[0x00003]; /* TPTINIT cqpc FSM state + cqpc_ps + -------------------- + 3'b000 CQPCIDLE + 3'b001 QPCREADY + 3'b010 CHECKRDB + 3'b011 CQPCDONE + 3'b100 CQPCREAD + */ + pseudo_bit_t reserved14[0x00001]; + pseudo_bit_t dbctr_ps[0x00002]; /* TPTINIT DoorBell counter FSM state + dbctr_ps + -------------------- + 2'b00 DBCTRIDLE + 2'b01 WAITPTFSM + 2'b10 RQPCARBRQ + 2'b11 PUSHXRQPC + */ + pseudo_bit_t reserved15[0x00002]; + pseudo_bit_t nsinit_ps[0x00003]; /* NSINIT FSM state + nsinit_ps + -------------------- + 3'b000 NSWIDLE + 3'b001 NSWRQST + 3'b010 NSWCMND + 3'b011 NSWATMC + 3'b100 PTPTFIN + */ + pseudo_bit_t reserved16[0x00009]; +/* --------------------------------------------------------- */ + pseudo_bit_t tptfin_ps[0x00003]; /* TPTFIN FSM state + tptfin_ps + -------------------- + 3'b000 TPTFINIDLE + 3'b001 READXXCQPC + 3'b010 CHECKERROR + 3'b011 FLSHALLDSC + 3'b100 QPC2UPDATE + 3'b101 SPITOFDESC + 3'b110 SCATERDATA + */ + pseudo_bit_t reserved17[0x00001]; + pseudo_bit_t ce_ps[0x00002]; /* CompletionEvent (NDA/NDS setion in descriptor) FSM state in TPTFIN pipe stage + ce_ps + -------------------- + 2'b00 CEIDLE + 2'b01 READCE + 2'b10 POPXCE + 2'b11 DONECE + */ + pseudo_bit_t reserved18[0x00002]; + pseudo_bit_t dsl_ps[0x00003]; /* DescriptorScatterList FSM state in TPTFIN pipe stage + dsl_ps + -------------------- + 3'b000 DSLIDLE + 3'b001 DONTXDO + 3'b010 NEED2DO + 3'b011 UNLOCK1 + 3'b100 DSLFREE + */ + pseudo_bit_t reserved19[0x00001]; + pseudo_bit_t flush_ps[0x00002]; /* Flush Descriptor FSM state in TPTFIN pipe stage + flush_ps + -------------------- + 2'b00 FLSHIDLE + 2'b01 ISDESCIN + 2'b10 POP1DESC + */ + pseudo_bit_t reserved20[0x00002]; + pseudo_bit_t updateqpc_ps[0x00003]; /* Update QPC FSM state in TPTFIN pipe stage + updateqpc_ps + -------------------- + 3'b000 UPDATEIDLE + 3'b001 JUSTCOMITD + 3'b010 SCTRLSTPTR + 3'b011 ALSOCOMITD + 3'b100 RDXQPXXNDA + 3'b101 XQPLOCK4RD + 3'b110 XXXQPXXNDA + 3'b111 UPDATEDONE + */ + pseudo_bit_t reserved21[0x00001]; + pseudo_bit_t tptreq2_ps[0x00002]; /* TPT request FSM state in TPTFIN pipe stage + tptreq2_ps + -------------------- + 2'b00 TPTIDLE + 2'b01 TPTRQST + 2'b10 XSTROB1 + */ + pseudo_bit_t reserved22[0x0000a]; +/* --------------------------------------------------------- */ + pseudo_bit_t dispatch_ps[0x00003]; /* dispatch FSM state in ScatterEngine pipe stage + dispatch_ps + -------------------- + 3'b000 SCTRIDLE + 3'b001 WRITERDB + 3'b010 WRITDATA + 3'b011 ISNTDATA + 3'b100 HDRXWAIT + 3'b101 SE2NPUSH + */ + pseudo_bit_t reserved23[0x00001]; + pseudo_bit_t nmxif_ps[0x00003]; /* Nmux Interface FSM state in ScatterEngine pipe stage + nmxif_ps + -------------------- + 3'b000 NMUXIDLE + 3'b001 NMUXRQST + 3'b010 NSWITCHC + 3'b011 NSWITCHD + 3'b100 IDLECYCL + */ + pseudo_bit_t reserved24[0x00001]; + pseudo_bit_t nonpost_ps[0x00003]; /* NONPOST FSM state + nonpost_ps + -------------------- + 3'b000 IDLENONPOST + 3'b001 GETCQPCATTR + 3'b010 WAIT4DBCPLT + 3'b011 IRISCXERROR + 3'b100 QPCXXUPDATE + */ + pseudo_bit_t reserved25[0x00001]; + pseudo_bit_t doorbell_ps[0x00002]; /* DoorBell FSM state - internal doorbell - in NONPOST pipe stage + doorbell_ps + -------------------- + 2'b00 DOORBELLIDLE + 2'b01 BLUEFLAMPUSH + 2'b10 DOORBELLRING + 2'b11 WAIT4MAINFSM + */ + pseudo_bit_t reserved26[0x00002]; + pseudo_bit_t completion_ps0[0x00002];/* Completion FSM state in NONPOST pipe stage + completion_ps + -------------------- + 2'b00 CPLTIDLE + 2'b01 COMPLETE + 2'b10 WAITMAIN + */ + pseudo_bit_t reserved27[0x0000e]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved28[0x000c0]; +/* --------------------------------------------------------- */ +}; + +/* Cause Registers */ + +struct RDE_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t cr_slave_timeout[0x00001]; + pseudo_bit_t parity_err[0x00001]; + pseudo_bit_t reserved0[0x0001e]; +/* --------------------------------------------------------- */ + struct EXT_CAUSEREG_st extended_cause; +/* --------------------------------------------------------- */ +}; + +/* RDE Credits */ + +struct RDECREDITS_st { /* Little Endian */ + pseudo_bit_t max_rqpc_credits[0x00005];/* credits twards rqpc */ + pseudo_bit_t reserved0[0x0000b]; + pseudo_bit_t used_rqpc_credits[0x00005];/* how many rqpc credits used . */ + pseudo_bit_t reserved1[0x0000b]; +/* --------------------------------------------------------- */ + pseudo_bit_t max_sqpc_credits[0x00005];/* credits twards sqpc */ + pseudo_bit_t reserved2[0x0000b]; + pseudo_bit_t used_sqpc_credits[0x00005];/* how many rqpc credits used . */ + pseudo_bit_t reserved3[0x0000b]; +/* --------------------------------------------------------- */ + pseudo_bit_t max_filler_credits[0x00008];/* how many descriptors read filler cfredits are allowed */ + pseudo_bit_t reserved4[0x00008]; + pseudo_bit_t used_filler_credits[0x00008];/* how many filler credits are used */ + pseudo_bit_t reserved5[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t max_scatterlist_credits[0x00009];/* how may descriptors scatterlist entrys are allowed . */ + pseudo_bit_t reserved6[0x00007]; + pseudo_bit_t used_scatterlist_credits[0x00009];/* how many scattrerlist entrys are used . */ + pseudo_bit_t reserved7[0x00007]; +/* --------------------------------------------------------- */ +}; + +/* Receive Data Engine General Configuration */ + +struct RDEGRLCFG_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00080]; +/* --------------------------------------------------------- */ + pseudo_bit_t system_log2pagesize[0x00006];/* log 2 page size + This page size should be not bigger than the nsb_page_size. + By default it will be initialized by fw upon HCA initialization to the same page size as the nsb_page_size in TPT. The smaller values allow freedom in spliting the RDE scatters to smaller sizes. */ + pseudo_bit_t reserved1[0x00019]; + pseudo_bit_t host_is_little_endian[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t pipe_limit[0x00008]; /* disable rde piping */ + pseudo_bit_t packets_in_pipe[0x00008];/* How many packets are inside the pipe (after command FIFO) */ + pseudo_bit_t cqpc_falv_limit[0x00004];/* how many packets are allowed for a QP in the CQPC cache. */ + pseudo_bit_t dsl_falv_limit[0x00004];/* how many Descriptors are allowed for a QP in the CQPC cache. */ + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t force_qpc_clken[0x00001];/* force the clock gate of the QPC boundary FF to be enabled */ + pseudo_bit_t force_tpt_clken[0x00001];/* force the clock gate of the TPT boundary FF to be enabled */ + pseudo_bit_t force_nsw_clken[0x00001];/* force the clock gate of the NSW boundary FF to be enabled */ + pseudo_bit_t interrupt_every_packet_after_error[0x00001];/* Generate event to iRISC for every packet that reaches the top of the pipe if this CQPC QP had error already */ + pseudo_bit_t interrupt_every_packet[0x00001];/* Generate event to iRISC for every packet that reaches the top of the pipe. */ + pseudo_bit_t implicit_ackreq[0x00001];/* for each packet, behave as if it AckReq bit is SET (relevant only for Responder Write/Send....)) */ + pseudo_bit_t ignore_headerx[0x00001];/* to ignore the header-x indication from NSI */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00018]; + pseudo_bit_t gp_cfg[0x00008]; /* General Purpose Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00120]; +/* --------------------------------------------------------- */ +}; + +/* Completion Engine Entry Access Gateway */ + +struct CEACCESSGW_st { /* Little Endian */ + struct GWCONTROL_st cegw_ctl; +/* --------------------------------------------------------- */ + pseudo_bit_t cq[0x00018]; + pseudo_bit_t opcode[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t mqpn[0x00018]; + pseudo_bit_t error_sdrm[0x00008]; /* If not 0 traps to TCU irisc */ +/* --------------------------------------------------------- */ + pseudo_bit_t slid_15_8[0x00008]; + pseudo_bit_t ee[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t psn_or_source_qp[0x00018]; + pseudo_bit_t slid_7_0[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t descriptor_ptr[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t length_commited[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t immediate_data[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t solicited_event[0x00001]; + pseudo_bit_t event_request[0x00001]; + pseudo_bit_t reserved0[0x00012]; + pseudo_bit_t sl[0x00004]; + pseudo_bit_t dlid_path[0x00007]; + pseudo_bit_t grh_bit[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* RDE Gateway */ + +struct RDEGW_st { /* Little Endian */ + struct GWCONTROL_st rdegwcntrl; /* RDE GateWay Command encoding + ================================================== + PUSH 0x10 + POP 0x11 + READ 0x12 + WRITE 0x13 + POPDESC 0x15 + + + RDE GateWay Addresses encoding + ================================================== + bits7:0 structure other bits Relevant + value accessed meaning Commands + ================================================== + 0 RDE release 17:16 - "00": release All Same + final PIPE - "01": clear the error + stage interrupt in FinalPipeStage, + of Error so another Non-Zero + syndrome will + interrupt iRISC. + (however, subsequent + packets will not + be executed) + - "10": Generate an EXTRA + Entry to CE with all + parameters from RDE's + FinalPipeStage (the + "release" command + will generate another + entry, depends upon the + IB_OpCode and syndrome) + - "11": Generate ACK to CE with + PSN-1 + (all parameters are + taken from RDE's + FinalPipeStage) + ================================================== + bits7:0 structure other bits Relevant + value accessed meaning Commands + ================================================== + 1 RDE final PIPE stage 10:8 - line number READ/WRITE + parameters 23:11 - RESERVED + --------------------------------------------------------------------------- + 2 Requester Scatter List 23:8 - ScatterList Pointer All Commands + --------------------------------------------------------------------------- + 3 CQPC 12:8 - CQPC index READ/WRITE + 15:13 - RESERVED + 17:16 - "00": CQPC Constants + - "01": CQPC r256length + - "10": CQPC RDB_and_more + - "11": RESERVED + 23:18 - RESERVED + --------------------------------------------------------------------------- + 4 Responder Scatter List 12:8 - CQPC index All Commands + --------------------------------------------------------------------------- + 5 NDA/CDA list and FALVs 12:8 - CQPC index READ/WRITE + 15:13 - RESERVED + 18:16 - 0: NDA, CDA contents + WRITE ERAD + ********************************* + For NDA, CDA:POP DESC. - 1: allocate_nda allocated_nda + in addition:READ/WRITE - 2: lock_nda locked_nda + For the FALVs:WRITE to - 3: free_nda freed_nda + move the FALVs - 4: lock_dsl locked_dsl + READ to see who is in this . - 5: unlock_dsl valid_nda + FALV state. - 6: relock_dsl valid_dsl + - 7: free_dsl freed_dsl + 23:19 - RESERVED + --------------------------------------------------------------------------- + 6-255 RESERVED + */ +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata0[0x00012]; + pseudo_bit_t reserved0[0x0000e]; +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t gwdata4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* @SDE_BIST */ + +struct SDE_BIST_st { /* Little Endian */ + pseudo_bit_t sdebistgffd_wdw_0[0x00007];/* 137 */ + pseudo_bit_t reserved0[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgffd_wdw_1[0x00007];/* 137 */ + pseudo_bit_t reserved1[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgdpf0_wdw_0[0x00007];/* 139 */ + pseudo_bit_t reserved2[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgdpf0_wdw_1[0x00007];/* 139 */ + pseudo_bit_t reserved3[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgdpf1_wdw_0[0x00007];/* 140 */ + pseudo_bit_t reserved4[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgdpf1_wdw_1[0x00007];/* 140 */ + pseudo_bit_t reserved5[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgdpf2_wdw_0[0x00007];/* 140 */ + pseudo_bit_t reserved6[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgdpf2_wdw_1[0x00007];/* 140 */ + pseudo_bit_t reserved7[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgdpf3_wdw_0[0x00007];/* 140 */ + pseudo_bit_t reserved8[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgdpf3_wdw_1[0x00007];/* 140 */ + pseudo_bit_t reserved9[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgbl_rdw_0[0x00007];/* 134 */ + pseudo_bit_t reserved10[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgbl_rdw_1[0x00007];/* 134 */ + pseudo_bit_t reserved11[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgbr_rdw_0[0x00007];/* 134 */ + pseudo_bit_t reserved12[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t sdebistgbr_rdw_1[0x00007];/* 134 */ + pseudo_bit_t reserved13[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat[0x0000e]; + pseudo_bit_t reserved14[0x00012]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat[0x0000a]; + pseudo_bit_t reserved15[0x00016]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat[0x00009]; + pseudo_bit_t reserved16[0x00017]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved17[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* @Gather Engine Causes when Gathered Data fo an EU (1 of 32) */ + +struct GeEuCause_st { /* Little Endian */ + pseudo_bit_t excause[32][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Cause Registers */ + +struct SDE_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t cr_slave_timeout[0x00001]; + pseudo_bit_t parity_err[0x00001]; + pseudo_bit_t reserved0[0x0001e]; +/* --------------------------------------------------------- */ + struct EXT_CAUSEREG_st extended_cause; +/* --------------------------------------------------------- */ +}; + +/* Send Data Engine General Configuration */ + +struct SDEGRLCFG_st { /* Little Endian */ + struct TPTNSIIF_st getptnsi; +/* --------------------------------------------------------- */ + pseudo_bit_t xcredits[0x00005]; /* How many outstanding Xlations Twds TPT are allwed. Do NOT put more than 0x10 */ + pseudo_bit_t reserved0[0x0000b]; + pseudo_bit_t xused[0x00005]; /* how many TPT xlations are infligh */ + pseudo_bit_t reserved1[0x0000b]; +/* --------------------------------------------------------- */ + pseudo_bit_t gff_credits_cfg[0x0000c];/* How many credits (16bytes) are in GatherFiller. Do NOT put more than 0x100 */ + pseudo_bit_t gff_usedcred[0x0000c]; /* how many lines in GatherFiller are used */ + pseudo_bit_t performance_gf_livelock[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t stop_tpt[0x00001]; /* stop tpt accesses */ + pseudo_bit_t tpt_stopped[0x00001]; /* no OutStanding TPT accesses */ + pseudo_bit_t reserved2[0x0001c]; + pseudo_bit_t stop_nsw[0x00001]; /* Stop Nswitch Accesses + There is a bug in this bit. + when you write 1 to bit 0 OR to bit 1 - this bi will be SET. + if you write '1' to this bit - it will have no impact. */ + pseudo_bit_t nsw_stopped[0x00001]; /* no OutStanding NSW reads */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00018]; + pseudo_bit_t gp_cfg[0x00008]; /* General Purpose Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00160]; +/* --------------------------------------------------------- */ +}; + +/* Gather Engine */ + +struct GATHERENG_st { /* Little Endian */ + pseudo_bit_t portnum[0x00004]; /* Port Number to which the Gather Engine is connected */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ + struct GASTAT_st ga_status; +/* --------------------------------------------------------- */ + struct GECTORSTAT_st gector_status; +/* --------------------------------------------------------- */ + pseudo_bit_t max_linklist_entries[0x00006];/* what is the maximum entries in the Gather Linklist allowed. it is the upper bound of the number of OutStanding reads */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t busy_entries[0x00006]; /* how many linklist entries are in use */ + pseudo_bit_t reserved2[0x00012]; +/* --------------------------------------------------------- */ +}; + +/* SerDes DFT Register */ + +struct SERDESDFT_st { /* Little Endian */ + pseudo_bit_t stop_pd1[0x00001]; + pseudo_bit_t stop_pd2[0x00001]; + pseudo_bit_t override_pd2[0x00001]; + pseudo_bit_t inc_pd2[0x00001]; + pseudo_bit_t pdsel[0x00001]; + pseudo_bit_t pdstate_en[0x00001]; /* This is an enable signal to pdstate. */ + pseudo_bit_t inc_pd1[0x00001]; + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t reserved1[0x0000c]; + pseudo_bit_t pdstate[0x0000c]; +/* --------------------------------------------------------- */ +}; + +/* IB port Event2 */ + +struct IB_port_Event2_st { /* Little Endian */ + pseudo_bit_t FCupdateWDtimer[0x00001]; + pseudo_bit_t GRHVL15discarded[0x00001]; + pseudo_bit_t RAWVL15discarded[0x00001]; + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t P_KEYInboundDiscarded[0x00001]; + pseudo_bit_t P_KEYOutboundDiscarded[0x00001]; + pseudo_bit_t PortRcvEBP[0x00001]; + pseudo_bit_t PortXmitDiscardRaw[0x00001]; + pseudo_bit_t PortRcvBadFCop[0x00001]; + pseudo_bit_t ExcessiveBufferOverrunError[0x00001]; + pseudo_bit_t PortRcvPktSymbolError2[0x00001]; + pseudo_bit_t PortRcvRemotePhyErrors[0x00001]; + pseudo_bit_t PortXmitConstraintError[0x00001]; + pseudo_bit_t PortRcvConstraintError[0x00001]; + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t MCASTVL15discarded[0x00001]; + pseudo_bit_t CauseSigDet[0x00001]; + pseudo_bit_t CauseComDet[0x00001]; + pseudo_bit_t ConfigSpaceTimeOut[0x00001]; + pseudo_bit_t reserved2[0x00008]; + pseudo_bit_t reserved3[0x00001]; + pseudo_bit_t RQDatafifo1ParityError[0x00001]; + pseudo_bit_t RQDatafifo2ParityError[0x00001]; + pseudo_bit_t RQDescFIFOParityError[0x00001]; + pseudo_bit_t LoopBsckFIFOparityError[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* IB Port Event */ + +struct IB_port_event_st { /* Little Endian */ + pseudo_bit_t ProgFuncCounterCause[0x00001];/* Programmable_Function_Counter */ + pseudo_bit_t PortData[0x00001]; /* PortData */ + pseudo_bit_t PortRcvErrorsCause[0x00001];/* PortRcvErrors */ + pseudo_bit_t PortXmitPktDiscards[0x00001];/* PortXmitPktDiscards */ + pseudo_bit_t VL15Dropped[0x00001]; /* VL15Dropped */ + pseudo_bit_t PortRcvPktLen[0x00001];/* PortRcvPktLen */ + pseudo_bit_t PortRcvPktCRC[0x00001];/* PortRcvPktCRC */ + pseudo_bit_t PortRcvPktSymbolError[0x00001];/* PortRcvPktSymbolError */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t PortRcvPktVL[0x00001]; /* PortRcvPktVL */ + pseudo_bit_t PortRcvCreditsExceeded[0x00001];/* PortRcvCreditsExceeded */ + pseudo_bit_t PortRcvDiscardOther[0x00001];/* PortRcvDiscardOther */ + pseudo_bit_t PortXmitHOQTimeout[0x00001];/* PortXmitHOQTimeout */ + pseudo_bit_t PortXmitExcessFC[0x00001];/* PortXmitExcessFC */ + pseudo_bit_t PortXmitInactive[0x00001];/* PortXmitInactive */ + pseudo_bit_t PortXmitMTUExceeded[0x00001];/* PortXmitMTUExceeded */ + pseudo_bit_t PortXmitDiscardOther[0x00001];/* PortXmitDiscardOther */ + pseudo_bit_t PortRcvFull[0x00001]; /* PortRcvFull */ + pseudo_bit_t PortRcvDynamicFull[0x00001];/* PortRcvDynamicFull */ + pseudo_bit_t PortRcvEmpty[0x00001]; /* PortRcvEmpty */ + pseudo_bit_t PortXmitVLArbLivelock[0x00001];/* PortXmitVLArbLivelock */ + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t PhyLinkUp[0x00001]; /* Phy_Link_Up */ + pseudo_bit_t PhyLinkDown[0x00001]; /* Phy_Link_Down */ + pseudo_bit_t PhyError[0x00001]; /* Phy_Error */ + pseudo_bit_t PhyUnsuccessfulTraining[0x00001];/* Phy_Unsuccessful_Training */ + pseudo_bit_t LocalLinkIntegrity[0x00001];/* Local_Link_Integrity */ + pseudo_bit_t SWControlled27[0x00001];/* SW Controlled Event 27 */ + pseudo_bit_t SWControlled28[0x00001];/* SW Controlled Event 28 */ + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t SWControlled30[0x00001];/* SW Controlled Event 30 */ + pseudo_bit_t SWControlled31[0x00001];/* SW Controlled Event 31 */ +/* --------------------------------------------------------- */ +}; + +/* IB Port IO Configuration */ + +struct IB_port_IO_config_st { /* Little Endian */ + pseudo_bit_t SD[0x00005]; /* Sync Delay */ + pseudo_bit_t reserved0[0x00011]; + pseudo_bit_t ParallelLoopBack[0x00001];/* Operating in loopback mode - bypassing the serdes */ + pseudo_bit_t reserved1[0x00006]; + pseudo_bit_t IODD[0x00002]; /* I/O Data Delay */ + pseudo_bit_t reserved2[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* IB Link Transmit */ + +struct ib_link_transmit_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00011]; + pseudo_bit_t HCBTBD[0x00001]; /* Half Clock BtoB Disable */ + pseudo_bit_t MXTBSS[0x0000e]; /* Max Time between Skip Sets */ +/* --------------------------------------------------------- */ +}; + +/* NTU debug fsm */ + +struct NTU_debug_fsm_st { /* Little Endian */ + pseudo_bit_t PCUPWcmd[0x00001]; + pseudo_bit_t PCUPWdata[0x00001]; + pseudo_bit_t PCUPWovf[0x00001]; + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t PCUNPcmd[0x00001]; + pseudo_bit_t PCUNPdata[0x00001]; + pseudo_bit_t PCUNPovf[0x00001]; + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t HCAPWcmd[0x00001]; + pseudo_bit_t HCAPWdata[0x00001]; + pseudo_bit_t HCAPWovf[0x00001]; + pseudo_bit_t reserved2[0x00002]; + pseudo_bit_t HCANPcmd[0x00001]; + pseudo_bit_t HCANPdata[0x00001]; + pseudo_bit_t HCANPovf[0x00001]; + pseudo_bit_t reserved3[0x0000e]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntpsm_cmd_ps[0x00004]; + pseudo_bit_t ntpsm_rdplc_ps[0x00002]; + pseudo_bit_t ntpsm_attwr_ps[0x00002]; + pseudo_bit_t ntpsm_err_ps[0x00003]; + pseudo_bit_t reserved4[0x00001]; + pseudo_bit_t ntpsm_att_ps[0x00003]; + pseudo_bit_t ntpsm_att_empty[0x00001]; + pseudo_bit_t reserved5[0x00002]; + pseudo_bit_t ntpsm_main_ps[0x00002]; + pseudo_bit_t ntpsm_desc_ps[0x00003]; + pseudo_bit_t ntpsm_drbl_ps[0x00002]; + pseudo_bit_t reserved6[0x00003]; + pseudo_bit_t ntpsm_ge_copy_ps_0_[0x00001]; + pseudo_bit_t ntpsm_ge_copy_ps_1_[0x00001]; + pseudo_bit_t ntpsm_ge_copy_ps_2_[0x00001]; + pseudo_bit_t ntpsm_ge_copy_ps_3_[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntdl_arb_ps[0x00003]; + pseudo_bit_t ntdl_wr_ps[0x00003]; + pseudo_bit_t ntdl_rd_ps[0x00002]; + pseudo_bit_t ntdl_cmp_ps[0x00003]; + pseudo_bit_t ntdl_empty_ps[0x00002]; + pseudo_bit_t ntdl_pall_ps[0x00002]; + pseudo_bit_t ntdl_empty_cnt[0x00008]; + pseudo_bit_t reserved7[0x00003]; + pseudo_bit_t ntob_hit_ps[0x00002]; + pseudo_bit_t ntob_nout_ps[0x00004]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntcq_ca_ps[0x00002]; + pseudo_bit_t ntcq_entry_valid_0_[0x00001]; + pseudo_bit_t ntcq_entry_valid_1_[0x00001]; + pseudo_bit_t ntcq_entry_valid_2_[0x00001]; + pseudo_bit_t ntcq_entry_valid_3_[0x00001]; + pseudo_bit_t ntcq_wr_ps[0x00002]; + pseudo_bit_t ntcq_err_if_0_[0x00001]; + pseudo_bit_t ntcq_err_if_1_[0x00001]; + pseudo_bit_t reserved8[0x00004]; + pseudo_bit_t nthw_rdr_ps[0x00003]; + pseudo_bit_t nthw_buffer_valid_0_[0x00001]; + pseudo_bit_t nthw_buffer_valid_1_[0x00001]; + pseudo_bit_t nthw_buffer_valid_2_[0x00001]; + pseudo_bit_t nthw_buffer_valid_3_[0x00001]; + pseudo_bit_t reserved9[0x00009]; + pseudo_bit_t ntcfg_arb_ps[0x00002]; +/* --------------------------------------------------------- */ + pseudo_bit_t nttx_hca_tx_arb[0x00001]; + pseudo_bit_t nttx_hca_tx_ps[0x00003]; + pseudo_bit_t nttx_hca_pcrd[0x00003]; + pseudo_bit_t nttx_hca_rcrd[0x00003]; + pseudo_bit_t reserved10[0x00003]; + pseudo_bit_t nttx_pci_tx_arb[0x00002]; + pseudo_bit_t nttx_pci_tx_ps[0x00003]; + pseudo_bit_t nttx_pci_rcrd[0x00003]; + pseudo_bit_t reserved11[0x00008]; + pseudo_bit_t nthr_rd_ps[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* NTU PCU Error FIFO Gateway */ + +struct NTUERRFIFO_st { /* Little Endian */ + struct GWCONTROL_NTU_st ntuerrfifoctrl; +/* --------------------------------------------------------- */ + pseudo_bit_t NSB_syndrome[0x00008]; + pseudo_bit_t reserved0[0x00016]; + pseudo_bit_t err_IO[0x00001]; + pseudo_bit_t err_RW[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t NSB_Address_63_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t NSB_Address_31_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ERR_NSB_Size[0x00010]; + pseudo_bit_t NSB_Seq_Id[0x00008]; + pseudo_bit_t NSB_segm[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* NTU NSB Attribute FIFO */ + +struct NTUATTRFIFO_st { /* Little Endian */ + struct GWCONTROL_NTU_st ntuattrfifoctrl; +/* --------------------------------------------------------- */ + pseudo_bit_t ATTR_QPN[0x00018]; + pseudo_bit_t S[0x00001]; + pseudo_bit_t SR[0x00001]; + pseudo_bit_t DB[0x00002]; + pseudo_bit_t PW[0x00001]; + pseudo_bit_t att_RW[0x00001]; + pseudo_bit_t C[0x00001]; + pseudo_bit_t att_CMD[0x00001]; /* Differs between command and data */ +/* --------------------------------------------------------- */ + pseudo_bit_t Read_Prefetch_Length[0x0000c];/* or Data */ + pseudo_bit_t MTU[0x00003]; /* Maximum Transfer Unit + or Data */ + pseudo_bit_t Data_Size[0x00009]; /* or Data */ + pseudo_bit_t Data[0x00008]; /* valid only for data + Reserved in command */ +/* --------------------------------------------------------- */ + pseudo_bit_t RADR_95_64[0x00020]; /* or Data */ +/* --------------------------------------------------------- */ + pseudo_bit_t RADR_63_32[0x00020]; /* or Data */ +/* --------------------------------------------------------- */ + pseudo_bit_t RADR_31_0[0x00020]; /* or Data */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* NTU NSW Cell Exception FIFO */ + +struct NTUEXCPFIFO_st { /* Little Endian */ + struct GWCONTROL_NTU_st ntuexcgwctrl; +/* --------------------------------------------------------- */ + pseudo_bit_t ExecFifo_QPN[0x00018]; /* Queue pair number */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t exc_rdb[0x00001]; + pseudo_bit_t exc_int[0x00001]; + pseudo_bit_t WRM[0x00001]; + pseudo_bit_t exc_fence[0x00002]; + pseudo_bit_t exc_err[0x00001]; + pseudo_bit_t exc_CMD[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t NSB_Address_H[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t NSB_Address_L[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Exec_NSB_Size[0x00010]; + pseudo_bit_t exc_NSB_Seq_Id[0x00008]; + pseudo_bit_t exc_NSB_Segm[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t exc_data[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t exc_io[0x00001]; + pseudo_bit_t NP[0x00001]; + pseudo_bit_t exc_RW[0x00001]; + pseudo_bit_t reserved1[0x0001d]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* NTU Events */ + +struct NTUEVENTS_st { /* Little Endian */ + struct QLTMISSINFO_st qltmissinfo; +/* --------------------------------------------------------- */ + struct COMPERRINFO_st comperrinfo; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00080]; +/* --------------------------------------------------------- */ + struct NTU_PERF_st ntu_perf; /* NTU Performance Counter Interrupt Info Register (see Datasheet) */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00100]; +/* --------------------------------------------------------- */ +}; + +/* NTU Descriptors Gateway */ + +struct NTUDESCGW_st { /* Little Endian */ + struct GWCONTROL_NTU_st ntudescgwctrl;/* NTU Descriptor Gateway Control Reister + + Command: + 0x00 Reserved + 0x01 Read Data and NextPtr - Address is entry number in FIFO (8bits) + 0x02 Write Data and NextPtr - Address is entry number in FIFO (8bits) + 0x03 Pop - Address is QP# (7 bit) + 0x04 Push - Address is QP# (7bit) + 0x05-0x3F Reserved + + Status + 0x00 OK + 0x01-0x3F Reserved */ +/* --------------------------------------------------------- */ + pseudo_bit_t fatal_err_ext_synd[0x00018];/* desriptor entry0 [127:96] */ + pseudo_bit_t fatal_err_synd[0x00008];/* descriptor entry0 [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_ip[0x00020]; /* descriptor entry0 [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t tcu_ip[0x00020]; /* descriptor entry0 [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t exer_ip[0x00020]; /* descriptor entry1 [127:96] */ +/* --------------------------------------------------------- */ + pseudo_bit_t exes_ip[0x00020]; /* descriptor entry1 [95:64] */ +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_ip[0x00020]; /* descriptor entry1 [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t ntu_ip[0x00020]; /* descriptor entry1 [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t fatal_err_int[0x00008]; + pseudo_bit_t reserved9[0x00008]; + pseudo_bit_t last_size[0x00002]; + pseudo_bit_t reserved10[0x00006]; + pseudo_bit_t write_push_opcode[0x00002];/* Descriptor opcode */ + pseudo_bit_t reserved11[0x00006]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntuemptyptr[0x00008]; /* acces to empty pointer */ + pseudo_bit_t reserved12[0x00008]; + pseudo_bit_t empty_counter[0x00008]; + pseudo_bit_t reserved13[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntudescQPN[0x00018]; /* Queue Pair Number */ + pseudo_bit_t reserved14[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00180]; +/* --------------------------------------------------------- */ +}; + +/* NTU QP Window Context Gateway */ + +struct NTUQPWGW_st { /* Little Endian */ + struct GWCONTROL_NTU_st ntuqpwgwctrl;/* NTU QP Window Context Gateway Control + + Command + 0x00 Reserved + 0x01 Read + 0x02 Write + 0x03-0x3F Reserved + + Status + 0x00 OK + 0x01-0x3F Reserved + + Address + QPN in bits 6:0 */ +/* --------------------------------------------------------- */ + pseudo_bit_t First[0x00008]; + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t Last[0x00008]; + pseudo_bit_t reserved1[0x00007]; + pseudo_bit_t V[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00007]; + pseudo_bit_t QPN_23_7[0x00011]; + pseudo_bit_t reserved3[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* NTU QLT Gateway */ + +struct NTUQLTGW_st { /* Little Endian */ + struct GWCONTROL_NTU_st ntuqltgw; /* NTU Gateway Control Register + + Commands: + 0x00 Reserved + 0x01 Read + 0x02 Write + 0x03 Make Valid (validates more than required. bug 4986) + 0x04 Make Invalid (invalidates more than required. bug 4986) + 0x05-0x3F Reserved + + Status + 0x00 OK + 0x01 Error + 0x01-0x3F Reserved + + QLT Address is in bits 6:0 */ +/* --------------------------------------------------------- */ + pseudo_bit_t WM[0x00002]; + pseudo_bit_t MTU[0x00003]; + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t Read_Length[0x00008]; + pseudo_bit_t reserved1[0x0000a]; + pseudo_bit_t Fence[0x00002]; /* 00 - Normal flow + 01 - Reserved + 10 - Breakpoint, no fence + 11 - Breakpoint with fence - don't use it, see bug 5130 */ + pseudo_bit_t qlt_e[0x00001]; + pseudo_bit_t S[0x00001]; + pseudo_bit_t S_R_[0x00001]; + pseudo_bit_t Err[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t TAG_47_32[0x00010]; + pseudo_bit_t reserved3[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t TAG_31_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Virtual_Address_63_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x0000c]; + pseudo_bit_t Virtual_Address_31_12[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t RKey[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Target_QPN[0x00018]; + pseudo_bit_t reserved6[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x000c0]; +/* --------------------------------------------------------- */ +}; + +/* @ntu bist */ + +struct NTU_BIST_st { /* Little Endian */ + pseudo_bit_t ntbist_qlt_msb_rdw_0[0x00007];/* 144 */ + pseudo_bit_t reserved0[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_desc_rdw_0[0x00007];/* 135 */ + pseudo_bit_t reserved1[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_desc_rdw_1[0x00007];/* 135 */ + pseudo_bit_t reserved2[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_rdb_rdw_0[0x00007];/* 132 */ + pseudo_bit_t reserved3[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_rdb_rdw_1[0x00007];/* 133 */ + pseudo_bit_t reserved4[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_rdb_rdw_2[0x00007];/* 133 */ + pseudo_bit_t reserved5[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_rdb_rdw_3[0x00007];/* 133 */ + pseudo_bit_t reserved6[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_hca_rx_pfifo_wdw_0[0x00007];/* 160 */ + pseudo_bit_t reserved7[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_hca_rx_pfifo_wdw_1[0x00007];/* 160 */ + pseudo_bit_t reserved8[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_hca_rx_pfifo_wdw_2[0x00007];/* 160 */ + pseudo_bit_t reserved9[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_hca_rx_pfifo_wdw_3[0x00007];/* 160 */ + pseudo_bit_t reserved10[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_pci_rx_pfifo_wdw_0[0x00007];/* 160 */ + pseudo_bit_t reserved11[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_pci_rx_pfifo_wdw_1[0x00007];/* 160 */ + pseudo_bit_t reserved12[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_pci_rx_pfifo_wdw_2[0x00007];/* 160 */ + pseudo_bit_t reserved13[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntbist_pci_rx_pfifo_wdw_3[0x00007];/* 160 */ + pseudo_bit_t reserved14[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data_rdw_0[0x00006];/* 158 */ + pseudo_bit_t reserved15[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data_rdw_1[0x00006];/* 158 */ + pseudo_bit_t reserved16[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code_rdw_0[0x00006];/* 158 */ + pseudo_bit_t reserved17[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code_rdw_1[0x00006];/* 158 */ + pseudo_bit_t reserved18[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code_rdw_2[0x00006];/* 158 */ + pseudo_bit_t reserved19[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist4waydata_code_rdw_3[0x00006];/* 158 */ + pseudo_bit_t reserved20[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat[0x00015]; + pseudo_bit_t reserved21[0x0000b]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat[0x0000f]; + pseudo_bit_t reserved22[0x00011]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat[0x00011]; + pseudo_bit_t reserved23[0x0000f]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved24[0x00100]; +/* --------------------------------------------------------- */ +}; + +/* @NTUWRB */ + +struct NTUWRB_st { /* Little Endian */ + pseudo_bit_t reserved0[0x0001d]; + pseudo_bit_t clr[0x00001]; + pseudo_bit_t kill[0x00001]; + pseudo_bit_t err[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpn[0x00018]; + pseudo_bit_t nda[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00016]; + pseudo_bit_t wrb_fsm[0x00003]; + pseudo_bit_t reserved3[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t wrb_nsb_current_address_63_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t wrb_nsb_current_address_31_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t wrb_nsb_current_size[0x0000c]; + pseudo_bit_t reserved5[0x00003]; + pseudo_bit_t io[0x00001]; + pseudo_bit_t wrb_nsb_current_sequence_id[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x000e0]; +/* --------------------------------------------------------- */ +}; + +/* @NTURDB */ + +struct NTURDB_st { /* Little Endian */ + pseudo_bit_t reserved0[0x0001d]; + pseudo_bit_t clr[0x00001]; + pseudo_bit_t kill[0x00001]; + pseudo_bit_t err[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t QPN[0x0001b]; + pseudo_bit_t NDA[0x00001]; + pseudo_bit_t reserved1[0x00004]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdb_start_address[0x0001b]; + pseudo_bit_t reserved2[0x00005]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdb_prefetch_address[0x0001b]; + pseudo_bit_t reserved3[0x00004]; + pseudo_bit_t wm[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdb_in_counter[0x00008]; + pseudo_bit_t rdb_sampled_size[0x0000c]; + pseudo_bit_t flush[0x00001]; + pseudo_bit_t error[0x00001]; + pseudo_bit_t rdb_fsm[0x00003]; + pseudo_bit_t rdb_out_fsm[0x00003]; + pseudo_bit_t reserved4[0x00004]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdb_read_valid_counter[0x00010]; + pseudo_bit_t reserved5[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdb_nsb_current_address_63_32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdb_nsb_current_address_31_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdb_nsb_current_size[0x0000c]; + pseudo_bit_t reserved6[0x00003]; + pseudo_bit_t io[0x00001]; + pseudo_bit_t rdb_nsb_current_sequence_id[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t rdb_prefetch_size[0x0000c]; + pseudo_bit_t reserved7[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x000c0]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct CAUSEREG_st { /* Little Endian */ + pseudo_bit_t cause[0x00020]; /* cause register data */ +/* --------------------------------------------------------- */ + pseudo_bit_t clrcause[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t setcause[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t evtserviced[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t evtena0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t evtena1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* Fencing causes every PCU transaction to be routed to error FIFO instead of normal flow. - This table controls that */ + +struct NTUFENCE_st { /* Little Endian */ + pseudo_bit_t reserved0[0x0001e]; + pseudo_bit_t fence_trap_clr[0x00001]; + pseudo_bit_t fence_trap_set[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* @NTUQLTVALID */ + +struct NTUQLTVALID_st { /* Little Endian */ + pseudo_bit_t valid0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t valid1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t valid2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t valid3[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* NTU General Configuration */ + +struct NTUGENERAL_st { /* Little Endian */ + pseudo_bit_t NtuDdestid[0x00003]; /* N-Switch id of NTU + gen_reg_0[2:0] + ntcfg_dest_id_2_0 */ + pseudo_bit_t PciDestid[0x00003]; /* N-Switch id of PCU + gen_reg_0[5:3] + ntcfg_dest_id_5_3 */ + pseudo_bit_t HcaDestid[0x00003]; /* N-Switch id of HCA + gen_reg_0[8:6] + ntcfg_dest_id_8_6 */ + pseudo_bit_t DmuDestid[0x00003]; /* N-Switch id of DMU + gen_reg_0[11:9] + ntcfg_dest_id_11_9 */ + pseudo_bit_t LdtDestid[0x00003]; + pseudo_bit_t reserved0[0x0000f]; + pseudo_bit_t RDB_mode[0x00001]; + pseudo_bit_t WRM[0x00001]; /* ??? + gen_reg_0[12:12] */ +/* --------------------------------------------------------- */ + pseudo_bit_t NSWriteCutTimeOut[0x00008];/* counted time of woting for NS-trans from PCU , before cutting this trans , and sending a Blue-Flame to HCA + gen_reg_1[7:0] + ntcfg_ntu_wrto */ + pseudo_bit_t Desc_array_size[0x00002]; + pseudo_bit_t reserved1[0x00006]; + pseudo_bit_t HCA_syndrome[0x00008]; + pseudo_bit_t PCI_syndrome[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t p2p_page_size[0x00004];/* page size at qlt + gen_reg_2[3:0] + + page_size encoding + 0000 - 4KB + 0001 - 8KB + 0010 - 64KB + 0011 - 1MB + 0100 - 4MB + 0101 - 16MB + 0110 - 64MB + 0111 - 128MB + 1000 -1111 - Reseved + */ + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t PCU_RX_rsp_cred[0x00003]; + pseudo_bit_t reserved4[0x00001]; + pseudo_bit_t PCU_RX_pw_cred[0x00004]; + pseudo_bit_t reserved5[0x00004]; + pseudo_bit_t PCU_RX_np_cred[0x00003]; + pseudo_bit_t reserved6[0x00001]; + pseudo_bit_t PCU_TX_rsp_cred[0x00003]; + pseudo_bit_t reserved7[0x00001]; + pseudo_bit_t PCU_TX_pw_cred[0x00003]; + pseudo_bit_t reserved8[0x00001]; + pseudo_bit_t PCU_TX_np_cred[0x00003]; + pseudo_bit_t reserved9[0x00005]; +/* --------------------------------------------------------- */ + pseudo_bit_t HCA_RX_rsp_cred[0x00003]; + pseudo_bit_t reserved10[0x00001]; + pseudo_bit_t HCA_RX_pw_cred[0x00003]; + pseudo_bit_t reserved11[0x00005]; + pseudo_bit_t HCA_RX_np_cred[0x00003]; + pseudo_bit_t reserved12[0x00001]; + pseudo_bit_t HCA_TX_rsp_cred[0x00003]; + pseudo_bit_t reserved13[0x00001]; + pseudo_bit_t HCA_TX_pw_cred[0x00003]; + pseudo_bit_t reserved14[0x00001]; + pseudo_bit_t HCA_TX_np_cred[0x00003]; + pseudo_bit_t reserved15[0x00005]; +/* --------------------------------------------------------- */ + pseudo_bit_t RdResponseValidTimeout[0x00010];/* ??? + gen_reg_6[15:0] */ + pseudo_bit_t reserved16[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t gp_cfg[0x00008]; /* general purpose configuration register */ + pseudo_bit_t reserved17[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved18[0x000e0]; +/* --------------------------------------------------------- */ +}; + +/* North Swith Bus Address Decoding */ + +struct NSWADDRDEC_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00180]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntu_bar_0_msbs[0x00020];/* This bar is used for completions, scatters and descriptor reads from the HCA to the NTU */ +/* --------------------------------------------------------- */ + pseudo_bit_t ntu_bar_size[0x00006]; /* in megabytes (minimum 2) */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t enable_ntu_0[0x00001]; + pseudo_bit_t reserved2[0x0000b]; + pseudo_bit_t ntu_bar_0_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t hca_bar_msbs[0x00020]; /* UAR in the HCA for D.B. and B.F. */ +/* --------------------------------------------------------- */ + pseudo_bit_t hca_bar_size[0x00006]; + pseudo_bit_t reserved4[0x00002]; + pseudo_bit_t enable_hca[0x00001]; + pseudo_bit_t reserved5[0x0000b]; + pseudo_bit_t hca_bar_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x005c0]; +/* --------------------------------------------------------- */ +}; + +/* SW Access to DIMMs Control Registers */ + +struct dmu_chk_dimm_st { /* Little Endian */ + struct GWCONTROL_st chk_gwctl; /* commad= 0 - resereved. + commad = 1 - init request + commad = 2 - sr request + commad = 3 - xsr request + commad = 4 - chk wr request + commad = 5 - chk rd request + commad = 6 - chk wr then read then compare request + commad = 7 - chk stop request + commad = 8 - chk wr calibration request + commad = 9 - reset dqs registers in the IO. + commad = 10 - start calibration stage 0 + commad = 11 - start calibration stage 1 */ +/* --------------------------------------------------------- */ + pseudo_bit_t stretch_en[0x00001]; /* when high the strech circutis is enable */ + pseudo_bit_t calib_cfg_test[0x00001];/* the activate the calibration process by the IRISC */ + pseudo_bit_t short_calib0[0x00001]; + pseudo_bit_t calib_rd_dqsen_dly[0x00004];/* the dly of poping the data from the EB relative to + the calibrated dqs_en !! */ + pseudo_bit_t reserved0[0x00009]; + pseudo_bit_t nondist_mode[0x00001]; /* when high the calibration is perfromed with no distractive to data */ + pseudo_bit_t nondist_cnt[0x00004]; /* for the dqs en calibration in non destructive mode */ + pseudo_bit_t rrelim_en[0x00001]; + pseudo_bit_t rrelim_lim[0x00006]; + pseudo_bit_t reserved1[0x00004]; +/* --------------------------------------------------------- */ + pseudo_bit_t calib0_rst_cnt[0x00004]; + pseudo_bit_t calib0_dqsen_cnt[0x00004]; + pseudo_bit_t calib0_hold_cnt[0x00004]; + pseudo_bit_t calib1_rst_cnt[0x00004]; + pseudo_bit_t calib1_hold_cnt[0x00004]; + pseudo_bit_t reserved2[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t dly_clka_0[0x00005]; + pseudo_bit_t dly_clka_2[0x00005]; + pseudo_bit_t dly_clka_1[0x00005]; + pseudo_bit_t dly_clkb_0[0x00005]; + pseudo_bit_t dly_clkb_1[0x00005]; + pseudo_bit_t dly_clkb_2[0x00005]; + pseudo_bit_t reserved3[0x00002]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t chk_end_da[0x00002]; + pseudo_bit_t chk_end_ba[0x00002]; + pseudo_bit_t chk_start_da[0x00002]; + pseudo_bit_t chk_start_ba[0x00002]; + pseudo_bit_t chk_ap_mode[0x00002]; + pseudo_bit_t reserved5[0x00016]; +/* --------------------------------------------------------- */ + pseudo_bit_t chk_start_ra[0x00010]; + pseudo_bit_t chk_start_ca[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t chk_end_ra[0x00010]; + pseudo_bit_t chk_end_ca[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t chk_clib_pad[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ca_in_check[0x00010]; /* current column address in the check process */ + pseudo_bit_t ra_in_check[0x00010]; /* current row address in the check process */ +/* --------------------------------------------------------- */ + pseudo_bit_t da_in_chk[0x00002]; + pseudo_bit_t ba_in_chk[0x00002]; + pseudo_bit_t reserved6[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x001e0]; +/* --------------------------------------------------------- */ + pseudo_bit_t pvt_en[0x00001]; /* when high the pvt circuit is enabled */ + pseudo_bit_t rddqs_drift_dly[0x00002];/* the dly boundry for the dqs to dected dqs drift */ + pseudo_bit_t dq_delay[0x00002]; /* Controls the delay of the dq[63:0] and ecc[7:0] pads in the read path. */ + pseudo_bit_t reserved8[0x00003]; + pseudo_bit_t mdat_powerup[0x00001]; /* when high the dimm input are enabled !! */ + pseudo_bit_t mecc_powerup[0x00001]; + pseudo_bit_t mdatx4_powerup[0x00001]; + pseudo_bit_t meccx4_powerup[0x00001]; + pseudo_bit_t reserved9[0x00004]; + pseudo_bit_t mclka_oe[0x00003]; /* clock group A OE */ + pseudo_bit_t mclkb_oe[0x00003]; /* clock group B OE */ + pseudo_bit_t mrst_oe[0x00001]; /* the dram reset OE */ + pseudo_bit_t mckea_en_oe[0x00001]; /* the cke 'A' OE */ + pseudo_bit_t mckeb_en_oe[0x00001]; /* the clock enable B OE */ + pseudo_bit_t mcmda_oe[0x00001]; /* oe for all the signals in command A */ + pseudo_bit_t mcmdb_oe[0x00001]; /* oe of all command B group */ + pseudo_bit_t mdat_oe[0x00001]; /* OE for all the dq/dqs/dqm of the data outputs */ + pseudo_bit_t mecc_oe[0x00001]; /* OE for all the dq/dqs/dqm of the data outputs */ + pseudo_bit_t reserved10[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t pvt_res0[0x00020]; /* the pvt map from the dmu io */ +/* --------------------------------------------------------- */ + pseudo_bit_t pvt_res1[0x00020]; /* the pvt map from the dmu io */ +/* --------------------------------------------------------- */ + pseudo_bit_t pvt_res2[0x00020]; /* the pvt map from the dmu io */ +/* --------------------------------------------------------- */ + pseudo_bit_t pvt_res3[0x00020]; /* the pvt map from the dmu io */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00400]; +/* --------------------------------------------------------- */ + pseudo_bit_t dimm0_chk_pad[16][0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t dimm1_chk_pad[16][0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t dimm2_chk_pad[16][0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t dimm3_chk_pad[16][0x00020]; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb0; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb1; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb2; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb3; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb4; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb5; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb6; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb7; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb8; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb9; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb10; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb11; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb12; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb13; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb14; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb15; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb16; +/* --------------------------------------------------------- */ + struct DMURDDQSCLB_st dmurddqsclb17; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x00080]; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb0; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb1; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb2; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb3; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb4; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb5; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb6; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb7; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb8; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb9; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb10; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb11; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb12; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb13; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb14; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb15; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb16; +/* --------------------------------------------------------- */ + struct DmuRdDqsenClb_st dmurddqsenclb17; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00200]; +/* --------------------------------------------------------- */ + struct DMUWRCLB_st dmuwrclb0; +/* --------------------------------------------------------- */ + struct DMUWRCLB_st dmuwrclb1; +/* --------------------------------------------------------- */ + struct DMUWRCLB_st dmuwrclb2; +/* --------------------------------------------------------- */ + struct DMUWRCLB_st dmuwrclb3; +/* --------------------------------------------------------- */ + struct DMUWRCLB_st dmuwrclb4; +/* --------------------------------------------------------- */ + struct DMUWRCLB_st dmuwrclb5; +/* --------------------------------------------------------- */ + struct DMUWRCLB_st dmuwrclb6; +/* --------------------------------------------------------- */ + struct DMUWRCLB_st dmuwrclb7; +/* --------------------------------------------------------- */ + struct DMUWRCLB_st dmuwrclb8; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved14[0x005c0]; +/* --------------------------------------------------------- */ +}; + +/* DIMM Configuration */ + +struct DIMMConfig_st { /* Little Endian */ + struct DIMMGeneralConfig_st DIMMGeneralConfig; +/* --------------------------------------------------------- */ + struct DIMM_timimg_config_st DIMMTimingConfig; +/* --------------------------------------------------------- */ +}; + +/* DMU Debug External Domain */ + +struct DMUDEBUGEXT_st { /* Little Endian */ + pseudo_bit_t chk_err[0x00001]; /* check machine error reporting due to data mismatch in + the WR/RD/COMPARE command. */ + pseudo_bit_t tx_rdinfly_perr[0x00001]; + pseudo_bit_t tx_rmwfifo_perr[0x00001]; + pseudo_bit_t rx_datafifo_perr0[0x00001]; + pseudo_bit_t rx_np_cmdfifo_perr0[0x00001]; + pseudo_bit_t rx_p_cmdfifo_perr0[0x00001]; + pseudo_bit_t rx_datafifo_perr1[0x00001]; + pseudo_bit_t rx_np_cmdfifo_perr1[0x00001]; + pseudo_bit_t rx_p_cmdfifo_perr1[0x00001]; + pseudo_bit_t tx_rmwfifo_underrun[0x00001]; + pseudo_bit_t tx_rdinfly_underrun[0x00001]; + pseudo_bit_t tx_rmwfifo_overflow[0x00001]; + pseudo_bit_t tx_rdinfly_overflow[0x00001]; + pseudo_bit_t tx_cmdfifo_overflow[0x00001]; + pseudo_bit_t tx_datafifo_overflow[0x00001]; + pseudo_bit_t tx_errfifo_overflow[0x00001]; + pseudo_bit_t rx_p_datafifo_underrun0[0x00001]; + pseudo_bit_t rx_p_cmdfifo_underrun0[0x00001]; + pseudo_bit_t rx_np_datafifo_underrun0[0x00001]; + pseudo_bit_t rx_np_cmdfifo_underrun0[0x00001]; + pseudo_bit_t rx_p_datafifo_underrun1[0x00001]; + pseudo_bit_t rx_p_cmdfifo_underrun1[0x00001]; + pseudo_bit_t rx_np_datafifo_underrun1[0x00001]; + pseudo_bit_t rx_np_cmdfifo_underrun1[0x00001]; + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* DMU status registers */ + +struct dmustatus_st { /* Little Endian */ + pseudo_bit_t init_in_process[0x00001];/* the dimms init process is alive */ + pseudo_bit_t ar_in_process[0x00001];/* the auto refresh is in process */ + pseudo_bit_t sr_in_process[0x00001];/* self refresh in process */ + pseudo_bit_t sr_done[0x00001]; /* the self refresh process is over ( exit self refresh can be asserted) */ + pseudo_bit_t xsr_in_process[0x00001];/* exiting from self refresh is in process */ + pseudo_bit_t chk_in_process[0x00001];/* the check dimm machine is working */ + pseudo_bit_t calib0_in_process[0x00001];/* the DIMMs calibration stage 0 is in process */ + pseudo_bit_t calib1_in_process[0x00001];/* the DIMMs calibration stage 1 is in process */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t dimms_stop[0x00004]; /* the stopped dimms map, for NSB read/write transaction. + check dimm /calibration/SR/AR may be performed. + */ + pseudo_bit_t reserved1[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t dimms_idle[0x00004]; /* the idimms parser machine are idle */ + pseudo_bit_t sch_idle[0x00001]; /* scheduler is idle */ + pseudo_bit_t rx_cmd_empty0[0x00001];/* dmu rx commad fifo in port 0 is empty */ + pseudo_bit_t rx_data_empty0[0x00001];/* dmu rx data fifo in port 0 is empty */ + pseudo_bit_t rx_cmd_empty1[0x00001];/* dmu rx commad fifo in port 1 is empty */ + pseudo_bit_t rx_data_empty1[0x00001];/* dmu rx data fifo in port 1 is empty */ + pseudo_bit_t rmwfifo_empty[0x00001];/* dmu rmw fifo is empty */ + pseudo_bit_t reqinfly_empty[0x00001];/* dmu requests in fly fifo is empty */ + pseudo_bit_t reserved2[0x00015]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* Rx External Domain Configuration Register */ + +struct DMURXCONFIGEXT_st { /* Little Endian */ + pseudo_bit_t resp_cred[0x00004]; /* Credits for responses to requests arriving to this port */ + pseudo_bit_t port_quota[0x00008]; /* wieght of this port (for the wighted round robin scheduling) */ + pseudo_bit_t pwr_mng_rate[0x00008]; /* Gap between commands sent for execution in the DIMM. For power reduction. */ + pseudo_bit_t Ap_mode[0x00002]; /* Auto precharge mode: + 00 - No auto precharge + 01 - Auto precharge per transaction + 10 - Auto precharge per cell + 11 - reserved */ + pseudo_bit_t trgid[0x00003]; /* the NSB id connected to this port + */ + pseudo_bit_t reserved0[0x00007]; +/* --------------------------------------------------------- */ +}; + +/* DMUstatistical counters */ + +struct DMUSTATISTIC_st { /* Little Endian */ + struct statistics_openpages_st open_pages_statistics; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ + struct DIMMStatistics_st dimm0statistics; +/* --------------------------------------------------------- */ + struct DIMMStatistics_st dimm1statistics; +/* --------------------------------------------------------- */ + struct DIMMStatistics_st dimm2statistics; +/* --------------------------------------------------------- */ + struct DIMMStatistics_st dimm3statistics; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00580]; +/* --------------------------------------------------------- */ +}; + +/* DMU External Interface General Configuration Registers */ + +struct DMUGENERALEXT_st { /* Little Endian */ + pseudo_bit_t Burst_mode[0x00001]; /* in this mode, full write cache line transaction, toward the dimm, are performed. */ + pseudo_bit_t AR_disable[0x00001]; /* Auto refresh: + 1 - disable + 0 - enable */ + pseudo_bit_t errors_injection[0x00002];/* 00 - disable + 01 - flip one bit + 11 - flip two bits + 10 - reserved */ + pseudo_bit_t nop_default[0x00001]; /* when this field is high , the command that the DMU drives + toward the DIMMs is NOP , otherwise it is DESELECT */ + pseudo_bit_t sr_shutclk_en[0x00001];/* in Self refrsh process, shut off the DIMMs clocks */ + pseudo_bit_t shut_clk_high[0x00001];/* When shuting the clock it high - default shut clock to zero */ + pseudo_bit_t dynamic_pd_en[0x00001];/* Enable the dynamic power down mode. */ + pseudo_bit_t Command_gap[0x00008]; /* idle gap, in dram clock, between any command toward the DIMMs */ + pseudo_bit_t reserved0[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t dimm_cl[0x00003]; /* Dimms Cas Latency - it the same paramters dor all the imms + 0 - 1.5 clock + 1 - 2 clocks + 2 - 2.5 clocks + 3 - 3 clocks + 4 - 3.5 clocks */ + pseudo_bit_t dimm_reg[0x00001]; /* the dimms are registers or unbuffered */ + pseudo_bit_t di_mode[0x00002]; /* 0 - no di + 1 - parity + 2 - ecc without correction + 3 - ecc with correction */ + pseudo_bit_t di_report[0x00002]; /* 00 - dont report + 01 - report only muti+ parity error + 10 -report all errors */ + pseudo_bit_t rdi_report[0x00002]; /* the same as di_report but to rmw transactions + */ + pseudo_bit_t di_ignore[0x00001]; /* ignore di errors */ + pseudo_bit_t rdi_ignore[0x00001]; /* ignore di errors on rmw transactions */ + pseudo_bit_t reserved1[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t Tar[0x00010]; /* Auto Refresh Period in dram clock */ + pseudo_bit_t Tcke[0x00010]; /* Power Up interval before activating the CKE in the initiation phase, in dram clocks. */ +/* --------------------------------------------------------- */ + pseudo_bit_t Tdll[0x0000c]; /* idle period until the DIMM DLL is synchronized after the initiation phase, in dram clocks. */ + pseudo_bit_t pll_bypass_xor_mode[0x00001];/* In dmu pll bypass mode: + When this register is 1, the clock is a xor of two pins + When this register is 0, the pll bypass works in the regular manner */ + pseudo_bit_t gp_iocfg[0x00003]; /* reserved + was: idle period for self refresh exit unit non read command, in dram clocks */ + pseudo_bit_t Txsrd[0x00008]; /* idle period for self refresh exit unit read command, in dram clocks */ + pseudo_bit_t Tpre_ar[0x00004]; /* hold period before performing Auto Refresh command, in dram clocks */ + pseudo_bit_t Tpre_sr[0x00004]; /* hold period before performing Self Refresh command, in dram clocks */ +/* --------------------------------------------------------- */ + pseudo_bit_t sr_pre_shut_clk[0x00008];/* The period between self refresh and clocks shut down */ + pseudo_bit_t sr_post_shut_clk[0x00008];/* period between clocks reactivation and CLK reactivation */ + pseudo_bit_t sr_interval[0x00008]; /* The time interval between assertion of self refresh command on port A and port B */ + pseudo_bit_t sr_duration[0x00008]; /* The minimum duration in self refresh state */ +/* --------------------------------------------------------- */ + pseudo_bit_t sr_pre_shut_rst[0x00008];/* the period in clocks between asserting the SR command and activate the DIMM MRST_ in case of registered DIMM + shut the clocks, after activated the MRST the DMU wait for "sr_pre_shut_clk" then shuting of the clocks */ + pseudo_bit_t sr_post_shut_rst[0x00008];/* this period in existing from self refresh , after activating the clocks the DMU still activating the MRST for "sr_post_shut_rst" then it deactivating the MRST and asserting XSR command. + (this issue is related to registered DIMM with shuting the clocks is enabled). */ + pseudo_bit_t reserved2[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t prs_starv_lim[0x00008]; + pseudo_bit_t reserved3[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x003a0]; +/* --------------------------------------------------------- */ +}; + +/* DIMM Bar and Mask registers */ + +struct DIMMBarMask_st { /* Little Endian */ + pseudo_bit_t dimm_en[0x00001]; /* 1=Enabled + 0=Disabled (DIMM not present) */ + pseudo_bit_t reserved0[0x00013]; + pseudo_bit_t bar_lsb[0x0000c]; /* lsb bits of the bar */ +/* --------------------------------------------------------- */ + pseudo_bit_t bar_msb[0x00020]; /* msb bits of the bar */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00014]; + pseudo_bit_t mask_lsb[0x0000c]; /* lsb bits of the mask */ +/* --------------------------------------------------------- */ + pseudo_bit_t mask_msb[0x00020]; /* msb bits of the mask */ +/* --------------------------------------------------------- */ +}; + +/* DMU Debug Internal Domain */ + +struct DMUDEBUGINT_st { /* Little Endian */ + pseudo_bit_t crslavet_err[0x00001]; /* crslcave macro timeout error */ + pseudo_bit_t tx_cmdfifo_perr[0x00001];/* the data parity happened on the tx command fifo */ + pseudo_bit_t tx_datafifo_perr[0x00001];/* the data parity happened on the tx command fifo */ + pseudo_bit_t errorfifo_perr[0x00001];/* the data parity happened on the error fifo */ + pseudo_bit_t tx_cmdfifo_underrun[0x00001];/* the underrun indecation in tx command fifo */ + pseudo_bit_t tx_datafifo_underrun[0x00001];/* the underrun indecation in tx data fifo */ + pseudo_bit_t error_fifo_underrun[0x00001];/* the underrun indecation in error fifo */ + pseudo_bit_t rx_pdatafifo_overflow0[0x00001];/* overflow indecation of posted data fifo in NSB RX port 0 */ + pseudo_bit_t rx_pcmdfifo_overflow0[0x00001];/* overflow indecation of posted command fifo in NSB RX port 0 */ + pseudo_bit_t tx_npdatafifo_overflow0[0x00001];/* overflow indecation of non- posted datafifo in NSB RX port 0 */ + pseudo_bit_t tx_npcmdfifo_overflow0[0x00001];/* overflow indecation of non - posted command fifo in NSB RX port 0 */ + pseudo_bit_t tx_pdatafifo_overflow1[0x00001];/* overflow indecation of posted data fifo in NSB RX port 1 */ + pseudo_bit_t tx_pcmdfifo_overflow1[0x00001];/* overflow indecation of posted command fifo in NSB RX port 1 */ + pseudo_bit_t tx_npdatafifo_overflow1[0x00001];/* overflow indecation of non posted data fifo in NSB RX port 1 */ + pseudo_bit_t tx_npcmdfifo_overflow1[0x00001];/* overflow indecation of non - posted command fifo in NSB RX port 1 */ + pseudo_bit_t str_add_nohit0[0x00001];/* NSB error port 0 : transaction started address had no hit in any defined dimm address segments */ + pseudo_bit_t str_add_multhit0[0x00001];/* NSB error port 0 : transaction started address had multi hit in the defined dimm address segments */ + pseudo_bit_t end_add_nohit0[0x00001];/* NSB error port 0 : transaction ending address had no hit in any defined dimm address segments */ + pseudo_bit_t end_add_multhit0[0x00001];/* NSB error port 0 : transaction ending address had multi hit in the defined dimm address segments */ + pseudo_bit_t out_of_range0[0x00001];/* NSB error port 0 : transaction starting & ending address has hit to differnat segments */ + pseudo_bit_t str_add_nohit1[0x00001];/* NSB error port 0 : transaction started address had no hit in any defined dimm address segments */ + pseudo_bit_t str_add_multhit1[0x00001];/* NSB error port 1 : transaction started address had multi hit in the defined dimm address segments */ + pseudo_bit_t end_add_nohit1[0x00001];/* NSB error port 1 : transaction ending address had no hit in any defined dimm address segments */ + pseudo_bit_t end_add_multhit1[0x00001];/* NSB error port 1 : transaction ending address had multi hit in the defined dimm address segments */ + pseudo_bit_t out_of_range1[0x00001];/* NSB error port 1 : transaction starting & ending address has hit to differnat segments */ + pseudo_bit_t nsb_page_violation0[0x00001]; + pseudo_bit_t nsb_page_violation1[0x00001]; + pseudo_bit_t reserved0[0x00005]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* DMU Errors FIFO */ + +struct DMUERRORFIFO_st { /* Little Endian */ + struct FIFOCONTROL_st dmuerrorfifoctl;/* This fifo control gateway is responsible of the DRAMS data integrity errors reporting fifo. */ +/* --------------------------------------------------------- */ + pseudo_bit_t cause_lsb[0x00003]; /* data integrty error in cause: + (error in the 64bit lsb data, on the rise edge of the clock) + cause[0] - single ECC error + cause[1] - multiple ECC error + cause[2] - parity error */ + pseudo_bit_t MSBcause[0x00003]; /* data integrty error cause: + (error in the 64bit msb data, on the fall edge of the clock) + cause[0] - single ECC error + cause[1] - multiple ECC error + cause[2] - parity error + */ + pseudo_bit_t err_RMW[0x00001]; /* Data integrity error on Read data, and this Read command is performed due to RMW transaction */ + pseudo_bit_t err_src_id[0x00003]; /* NSB Unit source of the transaction */ + pseudo_bit_t err_da[0x00002]; /* Error DIMM address. + */ + pseudo_bit_t err_ba[0x00002]; /* Error bank address */ + pseudo_bit_t reserved0[0x00012]; +/* --------------------------------------------------------- */ + pseudo_bit_t err_ra[0x00010]; /* Error row address */ + pseudo_bit_t err_ca[0x00010]; /* Error column address */ +/* --------------------------------------------------------- */ +}; + +/* Rx Internal Domain Configuration Register */ + +struct DMURXCONFIGINT_st { /* Little Endian */ + pseudo_bit_t cred_rel_mode[0x00002];/* Credit release mode: + 00 - Release credit when cell service begin + (for better performance) + 01 - Release credit when cell service end + 1x - Reserved */ + pseudo_bit_t rx_stop[0x00001]; /* When asserted, posted/non poseted credit release by the port is disabled + */ + pseudo_bit_t reserved0[0x0001d]; +/* --------------------------------------------------------- */ +}; + +/* Cause Registers */ + +struct DMU_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t error_fifo_not_empty[0x00001];/* Data Integrity Fifo is not empty. */ + pseudo_bit_t error_fifo_overflow[0x00001];/* Data Integrity Fifo had overflow. */ + pseudo_bit_t calibration_drift[0x00001];/* Calibration drift indecation. */ + pseudo_bit_t NSB_Rx_port0[0x00001]; /* NSB Rx port 0 error. */ + pseudo_bit_t NSB_Rx_port1[0x00001]; /* NSB Rx port 1 error. */ + pseudo_bit_t cr_slave_macro_error[0x00001];/* Crslave macro error. */ + pseudo_bit_t reserved0[0x0001a]; +/* --------------------------------------------------------- */ + struct EXT_CAUSEREG_st extended_cause; +/* --------------------------------------------------------- */ +}; + +/* @DMU_BIST */ + +struct DMU_BIST_st { /* Little Endian */ + pseudo_bit_t bists_stat[0x00009]; + pseudo_bit_t reserved0[0x00017]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat[0x00012]; + pseudo_bit_t reserved1[0x0000e]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* DMU Internal Interface General Configuration Registers */ + +struct DMUGENERALINT_st { /* Little Endian */ + pseudo_bit_t nsb_id[0x00003]; /* This field describes the DMU NSB ID - 1 */ + pseudo_bit_t rx_order_mode[0x00002];/* NSB RX Ports ordering policy (per transactions): + 00 - Address hit order + 01 - Global order + 10 - no order + 11 reserved */ + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t bar_num[0x00001]; /* Number of bars used to map the DIMMs memory */ + pseudo_bit_t bars_type[0x00002]; /* type of teh bars used to map the DIMMs memory. The lsb bit is related to the first bar. If single bar exist (bar_num=0), the msb bit of this register is not valid. + 0 - prefetchable + 1 - non-prefetchable */ + pseudo_bit_t reserved1[0x00005]; + pseudo_bit_t nsb_page_size[0x00004];/* page_size encoding + 0000 - reserved + 0001 - reserved + 0010 - 4K (default) + 0011 - 8K + 0100 - 16K + 0101 - 32K + 0110 - 64K + Others - reserved + */ + pseudo_bit_t reserved2[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t dqsm_single_buf[0x00001];/* 0 - use double buffer for the I/O + 1- use single buffer for the I/O + Note that in test mode, double buffer is used */ + pseudo_bit_t addra_single_buf[0x00001];/* 0 - use double buffer for the I/O + 1- use single buffer for the I/O + Note that in test mode, double buffer is used */ + pseudo_bit_t addrb_single_buf[0x00001];/* 0 - use double buffer for the I/O + 1- use single buffer for the I/O + Note that in test mode, double buffer is used */ + pseudo_bit_t cmda_single_buf[0x00001];/* 0 - use double buffer for the I/O + 1- use single buffer for the I/O + Note that in test mode, double buffer is used */ + pseudo_bit_t cmdb_single_buf[0x00001];/* 0 - use double buffer for the I/O + 1- use single buffer for the I/O + Note that in test mode, double buffer is used */ + pseudo_bit_t clkouta_single_buf[0x00001];/* 0 - use double buffer for the I/O + 1- use single buffer for the I/O + Note that in test mode, double buffer is used */ + pseudo_bit_t clkoutb_single_buf[0x00001];/* 0 - use double buffer for the I/O + 1- use single buffer for the I/O + Note that in test mode, double buffer is used */ + pseudo_bit_t cs_single_buf[0x00001];/* 0 - use double buffer for the I/O + 1- use single buffer for the I/O + Note that in test mode, double buffer is used */ + pseudo_bit_t reserved3[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x000a0]; +/* --------------------------------------------------------- */ + pseudo_bit_t rxdb_idle[0x00001]; /* the dmu rx data base is idle (no transaction in the dmu rx ports) */ + pseudo_bit_t tx_cmd_empty[0x00001]; /* dmu tx commad fifo is empty. */ + pseudo_bit_t tx_data_empty[0x00001];/* tx data fifo is empty */ + pseudo_bit_t reserved5[0x0001d]; +/* --------------------------------------------------------- */ + pseudo_bit_t dmu_dqs_drift[0x00012];/* DQS calibration machine early 'out of sync' alarm per nibble. */ + pseudo_bit_t reserved6[0x0000e]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00340]; +/* --------------------------------------------------------- */ +}; + +/* @PCU_BIST */ + +struct PCU_BIST_st { /* Little Endian */ + pseudo_bit_t bists_stat[0x0000e]; + pseudo_bit_t reserved0[0x00012]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat[0x0001d]; + pseudo_bit_t reserved1[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x000c0]; +/* --------------------------------------------------------- */ +}; + +/* PCU General Configuration - Internal Domain */ + +struct PCUGENERALINT_st { /* Little Endian */ + pseudo_bit_t i1_ntu_id[0x00003]; + pseudo_bit_t i1_pcu_id[0x00003]; + pseudo_bit_t i1_hca_id[0x00003]; + pseudo_bit_t i1_dmu_id[0x00003]; + pseudo_bit_t reserved0[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t i1pcf_gp_cfg[0x00020]; /* general purpose cfg i1 registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* monitoring internal state for debug */ + +struct pcu_monitor_st { /* Little Endian */ + pseudo_bit_t pcu_monitor0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t pcu_monitor1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t pcu_monitor2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t pcu_monitor3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t pcu_monitor4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t pcu_monitor5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t pcu_monitor6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* PCU init register */ + +struct pcu_init_st { /* Little Endian */ + pseudo_bit_t sw_req64[0x00001]; /* When sw_req64_en set, the value in sw_req64 register overrides the sampled value. */ + pseudo_bit_t sw_req64_en[0x00001]; /* When set, the value in sw_req64 register overrides the sampled value. */ + pseudo_bit_t reserved0[0x00006]; + pseudo_bit_t sampled_req64[0x00001];/* The sampled value for req64 */ + pseudo_bit_t bit64_supported[0x00001];/* When set, 64 bit mode is supported. This is the result of sw_req64, sw_req64_en and sampled_req64 */ + pseudo_bit_t reserved1[0x00016]; +/* --------------------------------------------------------- */ + pseudo_bit_t sw_pcix_mode[0x00001]; /* When sw_pcix_mode_en set, the value in sw_pcix_mode register overrides the sampled value. */ + pseudo_bit_t sw_pcix_mode_en[0x00001];/* When sw_pcix_mode_en set, the value in sw_pcix_mode register overrides the sampled value. */ + pseudo_bit_t reserved2[0x00006]; + pseudo_bit_t sampled_pcix_mode[0x00001];/* sampled value of pcix mode */ + pseudo_bit_t pcix_en[0x00001]; /* When set, pcix mode is enabled. This is the result of sw_pcix_mode, sw_pcix_mode_en and sampled_pcix_mode */ + pseudo_bit_t reserved3[0x00016]; +/* --------------------------------------------------------- */ + pseudo_bit_t init_pattern_drive_en[0x00001];/* When this bit is set, init pattern is being drived by the Tavor when RST# is asserted (by Tavor) */ + pseudo_bit_t init_pattern[0x00003]; /* init pattern to drive when init_pattern_drive_en is set. + 1 - devsel_ + 2 - stop_ + 3 - trdy_ */ + pseudo_bit_t reserved4[0x00004]; + pseudo_bit_t gpio15_ctrl[0x00003]; /* 0 - gpio15 ctrl en + 1 - gpio15 value to drive + 2 - gpio15 oe to drive + This register is used to drive the pci bus reset when in */ + pseudo_bit_t reserved5[0x00005]; + pseudo_bit_t init_req64_drive_en[0x00001];/* enable to drive req64 at reset */ + pseudo_bit_t init_req64[0x00001]; /* req64 to drive at reset if req64_drive_en is set */ + pseudo_bit_t reserved6[0x0000e]; +/* --------------------------------------------------------- */ + pseudo_bit_t sw_ignore_idsel[0x00001];/* When sw_ignore_idsel_en set, the value in sw_ignore_idsel register overrides the sampled value. */ + pseudo_bit_t sw_ignore_idsel_en[0x00001];/* When sw_ignore_idsel_en set, the value in sw_ignore_idsel register overrides the sampled value. */ + pseudo_bit_t reserved7[0x00006]; + pseudo_bit_t sampled_ignore_idsel[0x00001];/* ignore_idsel calculated from the strapping options */ + pseudo_bit_t ignore_idsel[0x00001]; /* the resulting ignore_idsel */ + pseudo_bit_t reserved8[0x00016]; +/* --------------------------------------------------------- */ +}; + +/* error logging */ + +struct pcu_err_log_st { /* Little Endian */ + pseudo_bit_t ptu_err_log0[0x00020]; /* Address caused the error [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t ptu_err_log1[0x00020]; /* Address caused the error [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t ptu_err_log2[0x00020]; /* [24] PTU write pointer + [23:16] Error counter + [15:0] Cause vector for the error */ +/* --------------------------------------------------------- */ + pseudo_bit_t ptu_err_log3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t pmu_err_log0[0x00020]; /* Address of the error transaction [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t pmu_err_log1[0x00020]; /* Address of the transaction caused error [63:32] + see bug 5205 */ +/* --------------------------------------------------------- */ + pseudo_bit_t pmu_err_log2[0x00020]; /* [24] PMU write pointer + [23:16] Error counter + [4:0] Cause vector for the error */ +/* --------------------------------------------------------- */ + pseudo_bit_t pmu_err_log3[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* PCU Configuration Cycles Register */ + +struct PCUCONFIGCYCLES_st { /* Little Endian */ + pseudo_bit_t cmd[0x00001]; /* cfg command - read (0) or write (1) */ + pseudo_bit_t reserved0[0x00007]; + pseudo_bit_t byte_enables[0x00004]; /* Byte enables of configuration command */ + pseudo_bit_t fw_done[0x00001]; /* Indication that the FW execute the configuration cycle. In case of PCI read - the read data is in the cfg_data register. In case of PCI write, the data have been written to the appropriate configuration register. In the case of PCIX, the required information for the generation of split completion have been captured by the FW. + */ + pseudo_bit_t trans_nack[0x00001]; /* trans_nack shows the result of configuration transaction. + 0 - ACK + 1 - NACK (implies that the FW failed to execute the cycle and target abort will be executed by HW) + This bit is set by FW and cleared by HW after it finishes to process the current transaction. This bit should be valid during fw_done. */ + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t pcix[0x00001]; /* PCIX mode. In this mode the requested data at read commands and the write acknoledgement on write commands should be returned to PCI bus as split completion via PCUSWCYCLES port. */ + pseudo_bit_t reserved2[0x0000a]; + pseudo_bit_t discard[0x00001]; /* Discard pending delayed transaction (write only) + + This bit is used to implement firmware Discard Timer, firmware should set this bit, in order to discard pending delayed transaction (PCI). When setting this bit, fw_done and trans_nak should be zero. + This bit is also used to discard incoming transation with parity error (PCI). In this case firmware should write discard=1 with fw_done=1 and trans_nak=0 + */ + pseudo_bit_t reserved3[0x00001]; + pseudo_bit_t pe[0x00001]; /* Recieved Parity Error. This bit is set by HW during the capture of the cfg cycle. If asserted, In PCIX mode FW should send Split Completion Message with SCE, class 2, index 01h. + */ + pseudo_bit_t service_required[0x00001];/* Indication that config cycle have been captured and require service. + In PCIX this bit is cleard when fw_done is set. + In PCI, this bit is cleared when pending delayed transaction has been completed OR firmware requested discard (Discard Timer) */ + pseudo_bit_t reserved4[0x00001]; + pseudo_bit_t lock[0x00001]; /* HW sets this bit to 1 after every reading of PCUCONFIGCYCLES register 00h. At writes it just stores the input. FW usually uses this bit as test&set semaphore in the case of multiagent work. + HW does not use this bit internally. */ +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_address[0x00020]; /* Address of cfg command. Contains configuration type (1:0), + register number (7:2), function number (10:8). For configuration type 1 it also contains device number (15:11) and bus number (23:16). Bits 31:24 are reserved. */ +/* --------------------------------------------------------- */ + struct cfg_attributes_st cfg_attributes; +/* --------------------------------------------------------- */ + pseudo_bit_t cfg_data[0x00020]; /* At configuration write command, this is the data latched from AD bus during the data phase. It is valid starting assertion of + a cause bit "Configuration Cycle command has arrived" (CAUSEREG) until fw_done (bit 12). + At the configuration read command of PCI (NOT PCIX), the requested data should be written here. It will be transferred to PCI bus. + In the case of PCIX (bit 8 of PCUCONFIGCYCLES register 00h): The data should be returned as split completion via PCUSWCYCLES port. Register 04h contains requestor's attributes for the split completion. + */ +/* --------------------------------------------------------- */ +}; + +/* Software Generation of PCI Cycles */ + +struct PCUSWCYCLES_st { /* Little Endian */ + pseudo_bit_t r_w[0x00001]; /* 0=read + 1=write, split completion + */ + pseudo_bit_t io[0x00001]; /* 0=memory / 1=io + This bit should be asserted (io) in mutual exclusion with cfg sc and special bits. Bit rw defines, if Memory/IO Read or Memory/IO Write command will be issued to PCI bus. Will a memory command be burst or DWORD transaction depends from bytecount. */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t sc[0x00001]; /* Split Completion transaction command. + This bit should be asserted in mutual exclusion with cfg, mem_io, special. r_w must be 1 + */ + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t special[0x00001]; /* Special Cycle or Interrupt Acknowledge transaction command. + This bit should be asserted in mutual exclusion with cfg, sc and mem_io bits. + rw=0: Interrupt Acknowledge + rw=1: Special Cycle */ + pseudo_bit_t cfg[0x00001]; /* Configuration transaction command. + This bit should be asserted in mutual exclusion with sc, mem_io and special bits. Bit rw defines, if Configuration Read or Configuration Write command will be issued to PCI bus. */ + pseudo_bit_t persist[0x00001]; /* When this bit is asserted, the PCU master is dedicated for this transaction and no other transactions will interleave its execution until its completion (deassertion of the GO bit). */ + pseudo_bit_t be[0x00004]; /* Byte Enables (BE) - active low. + BE can be used for I/O, configuration and INTA cycles. + For memory cycles with BE!=0, it is possible only to use BE for single data phase transactions. + * Non 4 bytes aligned I/O READ cycles can not be generated in PCIX. see bug 5138 */ + pseudo_bit_t reserved2[0x00003]; + pseudo_bit_t pcix[0x00001]; /* pcix mode */ + pseudo_bit_t MaxRetry[0x00008]; /* The maximum number of retry responses to this transaction before the cycle is considered as completed by this mechanism. */ + pseudo_bit_t status[0x00006]; /* Transaction completion status + 000000 - Normal completion + 000001 - Retry Timeout + 000010 - Split Completion Timeout + 000011 - Unexpected Split Completion received (Correct Sequence ID, but bytecount or address[6:0] mismatch) + 000100 - Split Completion Error received (SCM with SCE) + 001000 - Target Abort + 010000 - Master Abort + 100000 - Parity Error + others - reserved */ + pseudo_bit_t GO[0x00001]; /* This bit is used to trigger the transaction and to report its completion */ + pseudo_bit_t lock[0x00001]; +/* --------------------------------------------------------- */ + struct swcycle_internal_st sw_int_status;/* Reserved for Monitoring and debug use */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_address_lsb5_0[0x00006];/* This field contains 6 lowest bits of the address to be driven to the PCI bus during the address phase + * Non 4 bytes aligned I/O READ cycles can not be generated in PCIX. see bug 5138 */ + pseudo_bit_t pci_address_lsb31_6[0x0001a];/* This field contains bits [31:7] of the address to be driven to the PCI bus during the address phase */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_address_msb[0x00020];/* This field contains the 32 msb bits of the address to be driven to the PCI bus during the address phase */ +/* --------------------------------------------------------- */ + pseudo_bit_t attributes6_0[0x00007];/* This field contains lowest 7 bits of the attribute to be driven to the PCIX bus during the attribute phase. Ignored in PCI mode */ + pseudo_bit_t attributes31_7[0x00019];/* This field contains bits[31:7] of the attribute to be driven to the PCIX bus during the attribute phase. Ignored in PCI mode. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00160]; +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data0[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data1[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data2[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data3[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data4[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data5[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data6[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data7[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data8[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data9[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data10[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data11[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data12[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data13[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data14[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t pci_data15[0x00020]; /* In write transactions - the data to be driven to the pci bus during the write phase. + In read transaction, this is the location to store the read data. */ +/* --------------------------------------------------------- */ +}; + +/* Fence Command Directed to iRISC */ + +struct cmd_out_st { /* Little Endian */ + pseudo_bit_t r_w[0x00001]; + pseudo_bit_t mem_io[0x00001]; + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t split[0x00001]; + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t inta[0x00001]; + pseudo_bit_t cfg[0x00001]; + pseudo_bit_t reserved2[0x00017]; + pseudo_bit_t go[0x00001]; + pseudo_bit_t lock[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t pci_address_lsb[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t pci_address_msb[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t byte_cnt[0x00007]; + pseudo_bit_t reserved3[0x00011]; + pseudo_bit_t tag[0x00005]; + pseudo_bit_t relexed_order[0x00001]; + pseudo_bit_t no_snoop[0x00001]; + pseudo_bit_t reserved4[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t online_stat[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00160]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data9[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data10[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data11[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data12[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data13[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data14[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t Data15[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* NSwitch Address Decoder Registers */ + +struct Nswitch_address_deocder_st { /* Little Endian */ + pseudo_bit_t reserved0[0x0000c]; + pseudo_bit_t io_base_pcu[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x0000c]; + pseudo_bit_t io_limt_pcu[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00014]; + pseudo_bit_t mem_base_pcu[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00014]; + pseudo_bit_t mem_limt_pcu[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t preftch_mem_base_pcu_msbs[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00014]; + pseudo_bit_t preftch_mem_base_pcu_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t preftch_mem_limt_pcu_msbs[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00014]; + pseudo_bit_t preftch_mem_limt_pcu_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t ns_is_primary4pcu[0x00001];/* nswitch_is_primary4pcu */ + pseudo_bit_t subtractive_dec_pcu_on_pci[0x00001]; + pseudo_bit_t subtractive_dec_pcu_on_nsb[0x00001]; + pseudo_bit_t isa_mode4pcu[0x00001]; + pseudo_bit_t reserved6[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x000e0]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x0000c]; + pseudo_bit_t io_base_ntu[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x0000c]; + pseudo_bit_t io_limt_ntu[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00014]; + pseudo_bit_t mem_base_ntu[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00014]; + pseudo_bit_t mem_limt_ntu[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t preftch_mem_base_ntu_msbs[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x00014]; + pseudo_bit_t preftch_mem_base_ntu_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t preftch_mem_limt_ntu_msbs[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00014]; + pseudo_bit_t preftch_mem_limt_ntu_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t ns_is_primary4ntu[0x00001]; + pseudo_bit_t reserved14[0x00001]; + pseudo_bit_t substractive_dec_ntu[0x00001]; + pseudo_bit_t isa_mode4ntu[0x00001]; + pseudo_bit_t reserved15[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved16[0x000e0]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntu_bar_0_msbs[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ntu_bar_size_0[0x00006]; + pseudo_bit_t reserved17[0x00002]; + pseudo_bit_t enable_ntu_0[0x00001]; + pseudo_bit_t reserved18[0x0000b]; + pseudo_bit_t ntu_bar_0_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved19[0x001c0]; +/* --------------------------------------------------------- */ + pseudo_bit_t hca_bar_0_msbs[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t hca_bar_size_0[0x00006]; + pseudo_bit_t reserved20[0x00002]; + pseudo_bit_t enable_hca_0[0x00001]; + pseudo_bit_t reserved21[0x0000b]; + pseudo_bit_t hca_bar_0_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved22[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t hca_bar_1_msbs[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t hca_bar_size_1[0x00006]; + pseudo_bit_t reserved23[0x00002]; + pseudo_bit_t enable_hca_1[0x00001]; + pseudo_bit_t reserved24[0x0000b]; + pseudo_bit_t hca_bar_1_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved25[0x00140]; +/* --------------------------------------------------------- */ + pseudo_bit_t dmu_bar_0_msbs[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t dmu_bar_size_0[0x00006]; + pseudo_bit_t reserved26[0x00002]; + pseudo_bit_t enable_dmu_0[0x00001]; + pseudo_bit_t reserved27[0x0000b]; + pseudo_bit_t dmu_bar_0_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved28[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t dmu_bar_1_msbs[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t dmu_bar_size_1[0x00006]; + pseudo_bit_t reserved29[0x00002]; + pseudo_bit_t enable_dmu_1[0x00001]; + pseudo_bit_t reserved30[0x0000b]; + pseudo_bit_t dmu_bar_1_lsbs[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved31[0x00740]; +/* --------------------------------------------------------- */ +}; + +/* @pci registers */ + +struct conf_header_registers_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t io_space[0x00001]; /* Enable for IO decoders. When 0, the device do not respond to IO transactions. From the Command Register (p2p page 27, pcix spec page 131) */ + pseudo_bit_t mem_space[0x00001]; /* Enable for memory decoders. When 0, the device do not respond to memory transactions.From the Command Register */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t mri_en[0x00001]; /* memory write and invalidate enable. Relevant for PCI mode. + Ignored in PCIX mode.From the Command Register */ + pseudo_bit_t P_perr_en[0x00001]; /* When set to one, the device can report parity errors through PERR#. From the Command Register */ + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t P_serr_en[0x00001]; /* SERR# enable for the primary bus. From the Command Register */ + pseudo_bit_t reserved3[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t S_perr_en[0x00001]; + pseudo_bit_t S_serr_en[0x00001]; + pseudo_bit_t reserved4[0x00003]; + pseudo_bit_t master_abort_mode[0x00001];/* Master abort mode + 0 - Master abort on read return all '1. On write, ignored. + 1 - Master abort on one bus cause target abort on the other bus. Master abort on posted write cause SERR + (from the bridge control register) */ + pseudo_bit_t reserved5[0x00002]; + pseudo_bit_t primary_discard_timeout[0x00004];/* number of PCI clocks that the PCU waits for a master on the primary bus (When in Primary mode) to repeate a delayed transaction. Relevant in PCI only. Ignored in PCIX mode. + (relevant for inbound non-posted) + The number of PCI cycles that we wait for split completion. If expired, appropriate cause bit is set. Encoding: + 0 - 2^6 cycles + 1 - 2^8 + 2 - 2^10 + 3 - 2^12 + 4 - 2^14 + 5 - 2^16 + 6 - 2^18 + 7 - 2^20 + 8 - 2^22 + 9 - 2^24 + 10- 2^26 + 11- 2^28 + 12- 2^30 + else - disable + Reset value is 7 */ + pseudo_bit_t reserved6[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t cls[0x00008]; /* Cache line size. Defined in PCI mode. No meaning in PCIX mode. + In dword increment. + When zero, the master can not use mri command + Tavor supports cls that are larger than ??? + */ + pseudo_bit_t latency_timer[0x00008];/* latency timer as defined in the pci/x spec. + In Primary mode it is acts as the "primary latency timer" and in Secondary mode as the "secondary latency timer" + If PCIX mode, the default value should be 64. This should be set by FW. */ + pseudo_bit_t reserved7[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t p_bus_num[0x00008]; + pseudo_bit_t s_bus_number[0x00008]; + pseudo_bit_t subord_bus_num[0x00008]; + pseudo_bit_t reserved8[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t function_number[0x00003];/* Indicates the number of this function; i.e., the number in the Function Number field (AD[10::08]) of + the address of a Type 0 configuration transaction to which this bridge responds. + This register is from the PCI-X Bridge Status Register. + This register is read only in respect to config ycle and is automatically updated from the type 0 cfg cycles. So is the device number */ + pseudo_bit_t device_number[0x00005];/* It indicates the number of this device; i.e., the number in the Device Number field (AD[15::11]) of the address of a Type 0 configuration transaction that is assigned to this bridge + by the connection of the system hardware. + This register is from the PCI-X Bridge Status Register */ + pseudo_bit_t reserved9[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t up_splt_commitment_limit[0x00010]; + pseudo_bit_t reserved10[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00120]; +/* --------------------------------------------------------- */ +}; + +/* PCI/X Arbiter Configuration */ + +struct pci_arbiter_st { /* Little Endian */ + pseudo_bit_t parb_pair_used[0x00007];/* used to select the number of active external agents connected to the arbiter. This bit mask controls the pin usage and overrides any GPIO configuration. See “PCI Arbiter Pin Usage” on page 105. In order to support external PCI agents through the GPIO pins, this register has to be programmed. The default GPIO configuration results in the GPIO pins being inputs right after reset. Therefore, weak pull ups are required on the corresponding pin so that connected PCI agents do not receive a floating GNT after reset is deasserted while the PCI Arb Pair Used has not yet been programmed to its desired value. To ensure a glitch free transition on the GNT pins at the moment the PCI Arb Pair Used is programmed, the GPIO Data register should keep its reset value until the it has been configured. + 1 - corresponding GPIO pair is used for an external agentÂ’s REQ# and GNT# signals + 0 - GPIO controls the pins and corresponding REQ input to the internal arbiter is masked. See “PCI Arbiter Pin Usage” on page 105. + The reset value is 0 (meaning no GPIO pair is used by the arbiter by default */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t parb_TimeOut[0x00004]; /* Number of PCI cycles which arbiter will continue to provide Grant to an agent that does not initiate a cycle on an IDLE PCI bus. After TimeOut +2 cycles the arbiter removes grant and continues normal operation. The timed out agent will not be granted the bus until it removes the request for at least one PCI cycle. Reset value is 0xE. */ + pseudo_bit_t parb_P[0x00001]; /* Priority for internal agent. + 1 - internal agent is high priority + 0 - internal agent is low priority + Reset value is 1. */ + pseudo_bit_t parb_H[0x00004]; /* Number of external high priority agents. Valid values are 0 to 8. Greater than 8 is reserved. Agents 1 to H are high priority agents. Agents H+1 to 8 are low priority agents. Reset value is 0. */ + pseudo_bit_t reserved1[0x00006]; + pseudo_bit_t parb_park_agt[0x00004];/* Agent to receive the Grant when no requests are active. Note, this field should be configured so an existing connected agent is the parked one. Reset value is 0 (internal agent parked) + park_agt encoding is as follows: + 0000-1001 - agent number + 1010-1110 - reserved + 1111 - park on last + */ + pseudo_bit_t parb_en[0x00001]; /* Arbiter Enabled. + 1 - internal PCI Arbiter is enabled. PCI REQ# and GNT# pins are used for connecting external agent 1 to the internal arbiter. + 0 - internal PCI Arbiter is disabled. Internal agent uses PCI REQ# and GNT# pins for connecting to an external arbiter. Reset value for the A bit is controlled using strapping pin. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* PCU Debug */ + +struct pcu_debug_st { /* Little Endian */ + pseudo_bit_t out_p_en[0x00001]; /* enable outbound-posted transactions. Default value is 0 as after reset. the PCI defined configuration - bus_master is 0. */ + pseudo_bit_t out_np_en[0x00001]; /* enable outbound non-posted transactions. Default value is 0 as after reset. the PCI defined configuration - bus_master is 0. */ + pseudo_bit_t in_p_en[0x00001]; /* enable inbound-posted transactions */ + pseudo_bit_t out_np_burst_en[0x00001];/* When 0, np write burst is disabled. The np write would be dispersed into dword transactions */ + pseudo_bit_t in_read_np_engine_en[0x00004];/* enable working of the inbound non-posted engine (for read). a bit for each engine + 0 - engine disabled + 1 - engine enabled */ + pseudo_bit_t in_write_np_engine_en[0x00004];/* enable working of the inbound non-posted engine (for write). a bit for each engine + 0 - engine disabled + 1 - engine enabled */ + pseudo_bit_t reserved0[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t out_np_persist[0x00001];/* If this bit is set, outbound non-posted request would not be preempted from the engine in the case of retry */ + pseudo_bit_t packbuff_bypass_en[0x00001]; + pseudo_bit_t reserved1[0x0001e]; +/* --------------------------------------------------------- */ + pseudo_bit_t min_inter_frame_gap[0x00004];/* minimum interval between the falling edge of 2 consequtive frame. + 2 - 4 cycles f2f (frame to frame) + 3 - 5 cycles + etc + */ + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* PCU Prefetch Configuration */ + +struct pcu_prefetch_st { /* Little Endian */ + pseudo_bit_t max_prefetch[0x00004]; /* The maximum amount of prefetched data for inbound read. + 0000 - 128 bytes + 0001 - 256 bytes + 0010 - 512 bytes + 0011 - 1k bytes + 0100 - 2K bytes + 0101 - 4K bytes + 0110 - 8Kbytes + 0111 - 16KBytes */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t discard_timeout[0x00004];/* Maximum time to hold prefetched data. When this timer expire, the prefetched data is discarded. + 0 - 2^6 cycles + 1 - 2^7 + 2 - 2^8 + 3 - 2^9 + 4 - 2^10 + 5 - 2^11 + 6 - 2^13 + 7 - 2^15 + 8-15 - disable + Reset value is 7 + */ + pseudo_bit_t reserved1[0x00004]; + pseudo_bit_t prefetch_discard_mode[0x00004];/* mode for discarding prefetched data: + 0000 - discard only due to timeout + 0001 - write hit to the memory bar of the prefetched data + 0010 - write hit to the page of the prefetched data + 0100 - immediate discard (when the read transaction is disconneted) + + others - reserved */ + pseudo_bit_t reserved2[0x00004]; + pseudo_bit_t prefetch_watermark[0x00005];/* Buffer empty level for starting prefetch. The value of this register is the line number in the fifo. default velue is f, implies half buffer. + */ + pseudo_bit_t reserved3[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t decoder_prefetch_ctrl[0x00008];/* bit 0 - prefetch low - prefetch en for the first 4G (in upstream read transactions in secondary mode and in subtractive) Value of 0 is enable + bit 1 - prefetch high + bit 2 - prefetch en (0 - enable) - for enable disable for the whole prefetch feature. + bit 3 - ISA en + + **** all those bits exist in the pci configuration header. They would by accessed by cfg cycles and updated by FW */ + pseudo_bit_t reserved4[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* PCU NSWITCH TX Configuration */ + +struct pcu_tx_st { /* Little Endian */ + pseudo_bit_t dmu_p_cred[0x00004]; /* Credits to send posted requests to the DMU */ + pseudo_bit_t dmu_np_cred[0x00004]; /* Credits to send non-posted requests to the DMU */ + pseudo_bit_t ntu_p_cred[0x00004]; /* Credits to send posted requests to the NTU */ + pseudo_bit_t ntu_np_cred[0x00004]; /* Credits to send non-posted requests to the NTU */ + pseudo_bit_t hca_p_cred[0x00004]; /* Credits to send posted requests to the HCA */ + pseudo_bit_t hca_np_cred[0x00004]; /* Credits to send non-posted requests to the HCA */ + pseudo_bit_t hca_resp_cred[0x00004];/* Credits to send responses to the HCA */ + pseudo_bit_t reserved0[0x00004]; +/* --------------------------------------------------------- */ +}; + +/* PCU General Configuration - External Domain */ + +struct PCUGENERALEXT_st { /* Little Endian */ + pseudo_bit_t p1_ntu_id[0x00003]; + pseudo_bit_t p1_pcu_id[0x00003]; + pseudo_bit_t p1_hca_id[0x00003]; + pseudo_bit_t p1_dmu_id[0x00003]; + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t Primary_mode[0x00001]; /* This bit is used to inform Tavor if it is in Primary or secondary mode. + 0 - Secondary mode + 1 - Primary mode */ + pseudo_bit_t reserved1[0x0000f]; +/* --------------------------------------------------------- */ + pseudo_bit_t pci_retry_timer[0x00008];/* defines the number of PCI Clocks that the Master will wait between retries of a delayed transaction from the moment bus returned to IDLE. This field is specified in units of 2 PCI cycles. When set to zero, the transaction will be retried as fast as possible. Reset value is 4 (8 PCI cycles) */ + pseudo_bit_t max_pci_retry_times[0x00018];/* used by the PCI Master to set the maximum times a delayed request will be retried on the bus. If no success is achieved after the specified amount of retries, the PCIM will report a Master Abort condition. Reset value is 0x000FFF. */ +/* --------------------------------------------------------- */ + pseudo_bit_t split_timeout[0x00004];/* The number of PCI cycles that we wait for split completion. If expired, appropriate cause bit is set. Encoding: + 0 - 2^6 cycles + 1 - 2^7 + 2 - 2^8 + 3 - 2^9 + 4 - 2^10 + 5 - 2^11 + 6 - 2^13 + 7 - 2^15 + 8-15 - disable + Reset value is 7 */ + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t slowdec[0x00001]; /* When this bit is asserted, address decoding is slow (rather than medium) */ + pseudo_bit_t mwdcl[0x00001]; /* When set, even when latency timer expire, memory write cotinue until the next cache line boundry. Default value is 0. */ + pseudo_bit_t reserved3[0x00006]; + pseudo_bit_t nsb_page_size[0x00004];/* page_size encoding + 0000 - 1K (reserved) + 0001 - 2K (reserved) + 0010 - 4K (default) + 0011 - 8K + 0100 - 16K + 0101 - 32K + 0110 - 64K + Others - reserved + */ + pseudo_bit_t reserved4[0x00004]; + pseudo_bit_t pci_page_size[0x00004];/* page_size encoding + 000 - 0.5K (default value) + 001 - 1K + 010 - 2K + 011 - 4K + 100 - 128 byte + 101 - 256 byte */ + pseudo_bit_t reserved5[0x00004]; + pseudo_bit_t data_to_pci_req_waterlevel[0x00004];/* The amount of bytes that should be accumulated in the posted write buffer before initiating outbound transaction + + Do we have such waterlevel for inbound ??? TBD */ + pseudo_bit_t data_to_pci_disc_waterlevel[0x00004];/* The amount of bytes that should be left in the posted write buffer before disconnecting outbound transaction */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00008]; + pseudo_bit_t functions[0x00003]; /* The number of functions implemented. + This value is being checked when accepting type 0 config cycle */ + pseudo_bit_t reserved7[0x00015]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00003]; + pseudo_bit_t max_outstanding_split_trans[0x00004];/* Maximum Outstanding Split Transactions in the PCIX Command register. This register sets the maximum number of Split Transactions the device is permitted to have outstanding at one time. + + Register Maximum Outstanding + 0000 - 1 + 0001 - 2 + 0010 - 3 + ... + 1111 - 16 + */ + pseudo_bit_t reserved9[0x00019]; +/* --------------------------------------------------------- */ + struct pcu_gp_cfg_st gp_cfg; /* general purpose configuration register + + 3 1 - Enable cfg_cycle engine. When cleared, cfg cycles respond with retry */ +/* --------------------------------------------------------- */ + pseudo_bit_t array_redundency1[0x00020];/* general purpose configuration register */ +/* --------------------------------------------------------- */ + struct pcu_gp_cfg_h1_st gp_cfg_h1; +/* --------------------------------------------------------- */ +}; + +/* BIST Debug */ + +struct BIST_DEBUG_st { /* Little Endian */ + pseudo_bit_t fail_number[0x0000a]; + pseudo_bit_t reserved0[0x00006]; + pseudo_bit_t unit_number[0x00005]; + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t bist_number[0x00003]; + pseudo_bit_t reserved2[0x00004]; + pseudo_bit_t data_valid[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t bist_debug_data0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t bist_debug_data1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t bist_debug_data2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t bist_debug_data3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t bist_debug_data4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t bist_debug_data5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t bist_debug_data6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t bist_debug_data7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00160]; +/* --------------------------------------------------------- */ +}; + +/* General Purpose Semaphores */ + +struct cs_semaphores_st { /* Little Endian */ + pseudo_bit_t PERFCNT[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t LED[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t CRBUS[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP9[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP10[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP11[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP12[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP13[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP14[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP15[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP16[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP17[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP18[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP19[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP20[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP21[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP22[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP23[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP24[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP25[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP26[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP27[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP28[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP29[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP30[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP31[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP32[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP33[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP34[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP35[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP36[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP37[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP38[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP39[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP40[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP41[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP42[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP43[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP44[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP45[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP46[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP47[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP48[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP49[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP50[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP51[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP52[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP53[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP54[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP55[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP56[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP57[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP58[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP59[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP60[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP61[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SemaP62[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Misc Cause Register */ + +struct miscellaneous_cause_register_st { /* Little Endian */ + pseudo_bit_t crbus_brkpnt_cause[0x00001]; + pseudo_bit_t crbus_to_cause[0x00001]; + pseudo_bit_t ibml_not_idle_cause[0x00001]; + pseudo_bit_t i2c_master_idle_cause[0x00001]; + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t crbus_brkpnt_clear[0x00001]; + pseudo_bit_t crbus_to_clear[0x00001]; + pseudo_bit_t ibml_not_idle_clr[0x00001]; + pseudo_bit_t i2c_master_idle_clr[0x00001]; + pseudo_bit_t reserved1[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t crbus_brkpnt_set[0x00001]; + pseudo_bit_t crbus_to_set[0x00001]; + pseudo_bit_t ibml_not_idle_set[0x00001]; + pseudo_bit_t i2c_master_idle_set[0x00001]; + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t crbus_brkpnt_ena[0x00001]; + pseudo_bit_t crbus_to_ena[0x00001]; + pseudo_bit_t ibml_not_idle_ena[0x00001]; + pseudo_bit_t i2c_master_idle_ena[0x00001]; + pseudo_bit_t reserved3[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t crbus_brkpnt_serviced[0x00001]; + pseudo_bit_t crbus_to_serviced[0x00001]; + pseudo_bit_t ibml_not_idle_serviced[0x00001]; + pseudo_bit_t i2c_master_idle_serviced[0x00001]; + pseudo_bit_t reserved4[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t vir_addr1[0x00014]; + pseudo_bit_t reserved5[0x0000a]; + pseudo_bit_t vir_cmd1[0x00001]; + pseudo_bit_t vir_active1[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t vir_data0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vir_addr2[0x00014]; + pseudo_bit_t reserved6[0x0000a]; + pseudo_bit_t vir_cmd2[0x00001]; + pseudo_bit_t vir_active2[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t vir_data2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00080]; +/* --------------------------------------------------------- */ +}; + +/* CR Address Breakpoints */ + +struct cs_brk_point_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t Brkpoint0addr[0x00012]; + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t Brkpoint0mask[0x00008]; + pseudo_bit_t ER0[0x00001]; + pseudo_bit_t EW0[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00002]; + pseudo_bit_t Brkpoint1addr[0x00012]; + pseudo_bit_t reserved3[0x00002]; + pseudo_bit_t Brkpoint1mask[0x00008]; + pseudo_bit_t ER1[0x00001]; + pseudo_bit_t EW1[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00002]; + pseudo_bit_t Brkpoint2addr[0x00012]; + pseudo_bit_t reserved5[0x00002]; + pseudo_bit_t Brkpoint2mask[0x00008]; + pseudo_bit_t ER2[0x00001]; + pseudo_bit_t EW2[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00002]; + pseudo_bit_t Brkpoint3addr[0x00012]; + pseudo_bit_t reserved7[0x00002]; + pseudo_bit_t Brkpoint3mask[0x00008]; + pseudo_bit_t ER3[0x00001]; + pseudo_bit_t EW3[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00002]; + pseudo_bit_t Brkpoint4addr[0x00012]; + pseudo_bit_t reserved9[0x00002]; + pseudo_bit_t Brkpoint4mask[0x00008]; + pseudo_bit_t ER4[0x00001]; + pseudo_bit_t EW4[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00002]; + pseudo_bit_t Brkpoint5addr[0x00012]; + pseudo_bit_t reserved11[0x00002]; + pseudo_bit_t Brkpoint5mask[0x00008]; + pseudo_bit_t ER5[0x00001]; + pseudo_bit_t EW5[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x00002]; + pseudo_bit_t Brkpoint6addr[0x00012]; + pseudo_bit_t reserved13[0x00002]; + pseudo_bit_t Brkpoint6mask[0x00008]; + pseudo_bit_t ER6[0x00001]; + pseudo_bit_t EW6[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved14[0x00002]; + pseudo_bit_t BrkpointLatch[0x00012]; + pseudo_bit_t reserved15[0x0000a]; + pseudo_bit_t R_latch[0x00001]; + pseudo_bit_t W_latch[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved16[0x00100]; +/* --------------------------------------------------------- */ +}; + +/* Consolidated Cause Registers */ + +struct CONSCAUSE_st { /* Little Endian */ + pseudo_bit_t misc_cause[0x00001]; /* Consolidated Cause Register */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t dmu0_cause[0x00001]; + pseudo_bit_t dmu1_cause[0x00001]; + pseudo_bit_t ntu0_cause[0x00001]; + pseudo_bit_t ntu1_cause[0x00001]; + pseudo_bit_t pcu0_cause[0x00001]; + pseudo_bit_t pcu1_cause[0x00001]; + pseudo_bit_t nsi0_cause[0x00001]; + pseudo_bit_t nsi1_cause[0x00001]; + pseudo_bit_t tpt0_cause[0x00001]; + pseudo_bit_t tpt1_cause[0x00001]; + pseudo_bit_t qpc0_cause[0x00001]; + pseudo_bit_t qpc1_cause[0x00001]; + pseudo_bit_t ldb0_cause[0x00001]; + pseudo_bit_t ldb1_cause[0x00001]; + pseudo_bit_t sde0_cause[0x00001]; + pseudo_bit_t sde1_cause[0x00001]; + pseudo_bit_t exe0_cause[0x00001]; + pseudo_bit_t exe1_cause[0x00001]; + pseudo_bit_t rde0_cause[0x00001]; + pseudo_bit_t rde1_cause[0x00001]; + pseudo_bit_t tcu0_cause[0x00001]; + pseudo_bit_t tcu1_cause[0x00001]; + pseudo_bit_t timer_cause[0x00001]; + pseudo_bit_t gpio_cause[0x00001]; + pseudo_bit_t encoded_cause[0x00001]; + pseudo_bit_t reserved1[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t ib1_cause0[0x00001]; + pseudo_bit_t ib1_cause1[0x00001]; + pseudo_bit_t ib2_cause0[0x00001]; + pseudo_bit_t ib2_cause1[0x00001]; + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t erpena_virbus[0x00001]; + pseudo_bit_t reserved3[0x00005]; + pseudo_bit_t erpena[0x00019]; + pseudo_bit_t reserved4[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t erpena2[0x00004]; + pseudo_bit_t reserved5[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t intb_virbus[0x00001]; + pseudo_bit_t reserved6[0x00005]; + pseudo_bit_t intb[0x00019]; + pseudo_bit_t reserved7[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t intb2[0x00004]; + pseudo_bit_t reserved8[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t into_virbus[0x00001]; + pseudo_bit_t reserved10[0x00005]; + pseudo_bit_t into[0x00019]; + pseudo_bit_t reserved11[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t into2[0x00004]; + pseudo_bit_t reserved12[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t serr_virbus[0x00001]; + pseudo_bit_t reserved13[0x00005]; + pseudo_bit_t serr[0x00019]; + pseudo_bit_t reserved14[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t serr2[0x00004]; + pseudo_bit_t reserved15[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved16[0x00080]; +/* --------------------------------------------------------- */ +}; + +/* Flash Memory Interface Control */ + +struct flash_memory_st { /* Little Endian */ + pseudo_bit_t data_read_wait_cycle[0x00008]; + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t data_write_wait_cycle[0x00008]; + pseudo_bit_t reserved1[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t address[0x00013]; + pseudo_bit_t reserved2[0x0000a]; + pseudo_bit_t flash_CMD[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t flash_data3[0x00008]; + pseudo_bit_t flash_data2[0x00008]; + pseudo_bit_t flash_data1[0x00008]; + pseudo_bit_t flash_data0[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x000a0]; +/* --------------------------------------------------------- */ +}; + +/* Serial Port Master */ + +struct serial_port_master_st { /* Little Endian */ + pseudo_bit_t ADR7[0x00007]; /* Addr[6:0] */ + pseudo_bit_t ADR10[0x00003]; /* Addr[9:7] */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t A[0x00001]; + pseudo_bit_t STS[0x00003]; /* Status */ + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t VBT[0x00002]; /* Valid Bytes */ + pseudo_bit_t reserved2[0x00005]; + pseudo_bit_t CMD[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t DAT_3[0x00008]; /* Serial Master Data 3 */ + pseudo_bit_t DAT_2[0x00008]; /* Serial Master Data 2 */ + pseudo_bit_t DAT_1[0x00008]; /* Serial Master Data 1 */ + pseudo_bit_t DAT_0[0x00008]; /* Serial Master Data 0 */ +/* --------------------------------------------------------- */ + pseudo_bit_t SMTO[0x00010]; /* Serial Master Timeout */ + pseudo_bit_t GFRC[0x00008]; /* Glitch Free Rise Counter */ + pseudo_bit_t GFFC[0x00008]; /* Glitch Free Fall Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t CLKG[0x00010]; /* Serial Master Clock Generator */ + pseudo_bit_t reserved3[0x00008]; + pseudo_bit_t CLKSC[0x00008]; /* Clock Stretching Counter */ +/* --------------------------------------------------------- */ +}; + +/* IBML Slave */ + +struct IBML_slave_st { /* Little Endian */ + pseudo_bit_t ADR0[0x00007]; /* IBMLSlaveAddress */ + pseudo_bit_t E0[0x00001]; /* IBML Slave address 0 decoding enable */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t CAP_ADDR[0x00007]; /* The actual captured IB-ML address by the slave */ + pseudo_bit_t reserved1[0x00005]; + pseudo_bit_t W4BSTP[0x00001]; /* IBML detected stop condition after 4 byte write (should be cleaned by software) */ + pseudo_bit_t STS[0x00003]; /* Status */ +/* --------------------------------------------------------- */ + pseudo_bit_t DAT[0x00020]; /* IBML Data */ +/* --------------------------------------------------------- */ + pseudo_bit_t ADR1[0x00007]; + pseudo_bit_t E1[0x00001]; + pseudo_bit_t ADR2[0x00007]; + pseudo_bit_t E2[0x00001]; + pseudo_bit_t ADR3[0x00007]; + pseudo_bit_t E3[0x00001]; + pseudo_bit_t ADR4[0x00007]; + pseudo_bit_t E4[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t ADR5[0x00007]; + pseudo_bit_t E5[0x00001]; + pseudo_bit_t ADR6[0x00007]; + pseudo_bit_t E6[0x00001]; + pseudo_bit_t ADR7[0x00007]; + pseudo_bit_t E7[0x00001]; + pseudo_bit_t ADR8[0x00007]; + pseudo_bit_t E8[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* Serial Port Slave */ + +struct serial_port_slave_st { /* Little Endian */ + pseudo_bit_t slave_ADR[0x00007]; /* Serial Slave Address */ + pseudo_bit_t E[0x00001]; /* Enable slave address decoding + (default value: if serial eprom not present - enabled, otherwise disabled) */ + pseudo_bit_t reserved0[0x00017]; + pseudo_bit_t F[0x00001]; +/* --------------------------------------------------------- */ +}; + +/* Parallel CPU Port */ + +struct parallel_CPU_port_st { /* Little Endian */ + pseudo_bit_t CPUT[0x00004]; /* CPU Type */ + pseudo_bit_t reserved0[0x0001a]; + pseudo_bit_t CpuMode[0x00002]; +/* --------------------------------------------------------- */ +}; + +/* Parallel Port Debug (global) */ + +struct par_debug_p_st { /* Little Endian */ + pseudo_bit_t PowerUp[0x00001]; + pseudo_bit_t extend[0x00001]; + pseudo_bit_t exthigh[0x00001]; + pseudo_bit_t extmore[0x00001]; + pseudo_bit_t RBCdelay[0x00005]; + pseudo_bit_t datadelay[0x00002]; + pseudo_bit_t TBCgenpolar[0x00001]; + pseudo_bit_t rx_reversed_bit_order[0x00001]; + pseudo_bit_t tx_reversed_bit_order[0x00001]; + pseudo_bit_t reserved0[0x00012]; +/* --------------------------------------------------------- */ +}; + +/* System Monitoring Control */ + +struct system_monitoring_ctrl_st { /* Little Endian */ + pseudo_bit_t CLKDIV[0x00008]; /* System Monitoring Clock Divider */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t CHBLEN[0x0000b]; /* System Monitoring Chain Bit Length + Default Value is 0x33C loaded 1 cycle after reset . */ + pseudo_bit_t reserved1[0x00005]; +/* --------------------------------------------------------- */ +}; + +/* @serdes_rsu */ + +struct serdes_rsu_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00018]; + pseudo_bit_t RSR[0x00005]; /* reserved bits */ + pseudo_bit_t CMD[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t RSU_value[0x00014]; + pseudo_bit_t reserved1[0x0000c]; +/* --------------------------------------------------------- */ +}; + +/* SerDes General Control */ + +struct serdes_general_cont_st { /* Little Endian */ + pseudo_bit_t SDDC0[0x00001]; + pseudo_bit_t SDTHLD0[0x00001]; + pseudo_bit_t reserved0[0x0001e]; +/* --------------------------------------------------------- */ +}; + +/* Encoded Interrupt Control Registers */ + +struct encoded_intr_ctlr_regs_st { /* Little Endian */ + pseudo_bit_t GPIO_EnCauseHigh[0x00020];/* Encoded Interrupt Cause [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_EnCauseLow[0x00020];/* Encoded Interrupt Cause [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_EnClearHigh[0x00020];/* Clear Encoded Interrupt [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_EnClearLow[0x00020];/* Clear Encoded Interrupt [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_EnSetHigh[0x00020];/* Set Encoded Interrupt [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_EnSetLow[0x00020];/* Set Encoded Interrupt [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_EnEnableHigh[0x00020];/* Encoded Interrupt Enable [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_EnEnableLow[0x00020];/* Encoded Interrupt Enable [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_EnServicedHigh[0x00020];/* Encoded Interrupt Serviced # [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_EnServicedLow[0x00020];/* Encoded Interrupt Serviced # [31:0] */ +/* --------------------------------------------------------- */ +}; + +/* GPIO */ + +struct GPIO_st { /* Little Endian */ + pseudo_bit_t GPIO_DataHigh[0x00008];/* GPIO Data [39:32] */ + pseudo_bit_t reserved0[0x00015]; + pseudo_bit_t serr_bit[0x00001]; /* sw controlled access to serr pin */ + pseudo_bit_t intb_bit[0x00001]; /* sw controlled access to intb pin */ + pseudo_bit_t into_bit[0x00001]; /* sw controlled access to into pin */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DataLow[0x00020]; /* GPIO Data [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DirHigh[0x00008]; /* GPIO Direction [39:32] */ + pseudo_bit_t reserved1[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DirLow[0x00020]; /* GPIO Direction [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_PolHigh[0x00008]; /* GPIO Polarity [39:32] */ + pseudo_bit_t reserved2[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_PolLow[0x00020]; /* GPIO Polarity [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_ModeHigh[0x00008];/* GPIO Output Mode [39:32] */ + pseudo_bit_t reserved3[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_ModeLow[0x00020]; /* GPIO Output Mode [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeCauseHigh[0x00008];/* GPIO Interrupt Cause [39:32] */ + pseudo_bit_t reserved4[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeCauseLow[0x00020];/* GPIO Interrupt Cause [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeClearHigh[0x00008];/* Clear GPIO Interrupt Cause [39:32] */ + pseudo_bit_t reserved5[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeClearLow[0x00020];/* Clear GPIO Interrupt Cause [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeSetHigh[0x00008];/* Set GPIO Interrupt Cause [39:32] */ + pseudo_bit_t reserved6[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeSetLow[0x00020];/* Set GPIO Interrupt Cause [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeEnableHigh[0x00008];/* GPIO Interrupt Enabled [39:32] */ + pseudo_bit_t reserved7[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeEnableLow[0x00020];/* GPIO Interrupt Enabled [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeServicedHigh[0x00008];/* GPIO Interrupt Serviced # [39:32] */ + pseudo_bit_t reserved8[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DeServicedLow[0x00020];/* GPIO Interrupt Serviced # [31:0] */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DataClearHigh[0x00008]; + pseudo_bit_t reserved10[0x00015]; + pseudo_bit_t serrclear[0x00001]; + pseudo_bit_t intbclear[0x00001]; + pseudo_bit_t intoclear[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DataClearLow[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIIO_DataSetHigh[0x00008]; + pseudo_bit_t reserved11[0x00015]; + pseudo_bit_t serrset[0x00001]; + pseudo_bit_t intbset[0x00001]; + pseudo_bit_t intoset[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t GPIO_DataSetLow[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Programmable Timers */ + +struct prog_timers_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00080]; +/* --------------------------------------------------------- */ + pseudo_bit_t Timer_Limit0[0x0001e]; /* Use value bigger than 1 */ + pseudo_bit_t M0[0x00001]; + pseudo_bit_t S0[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t Timer_Limit1[0x0001e]; /* Use value bigger than 1 */ + pseudo_bit_t M1[0x00001]; + pseudo_bit_t S1[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t Timer_Limit2[0x0001e]; /* Use value bigger than 1 */ + pseudo_bit_t M2[0x00001]; + pseudo_bit_t S2[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t Timer_Limit3[0x0001e]; /* Use value bigger than 1 */ + pseudo_bit_t M3[0x00001]; + pseudo_bit_t S3[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t Timer0cause[0x00001]; + pseudo_bit_t Timer1cause[0x00001]; + pseudo_bit_t Timer2cause[0x00001]; + pseudo_bit_t Timer3cause[0x00001]; + pseudo_bit_t reserved1[0x0001a]; + pseudo_bit_t SMPT[0x00001]; /* Sample Start */ + pseudo_bit_t SMPP[0x00001]; /* Sample Stop */ +/* --------------------------------------------------------- */ + pseudo_bit_t Timer0CLR[0x00001]; /* Clear Timer-Cause */ + pseudo_bit_t Timer1CLR[0x00001]; + pseudo_bit_t Timer2CLR[0x00001]; + pseudo_bit_t Timer3CLR[0x00001]; + pseudo_bit_t reserved2[0x0001a]; + pseudo_bit_t CSMPT[0x00001]; /* Clear Sample Start */ + pseudo_bit_t CSMPP[0x00001]; /* Clear Sample Stop */ +/* --------------------------------------------------------- */ + pseudo_bit_t Timer0SET[0x00001]; /* Set Timer Cause */ + pseudo_bit_t Timer1SET[0x00001]; + pseudo_bit_t Timer2SET[0x00001]; + pseudo_bit_t Timer3SET[0x00001]; + pseudo_bit_t reserved3[0x0001a]; + pseudo_bit_t SSMPT[0x00001]; /* Set Sample Start */ + pseudo_bit_t SSMPP[0x00001]; /* Set Sample Stop */ +/* --------------------------------------------------------- */ + pseudo_bit_t ENB[0x00004]; /* Timer Events Enabled */ + pseudo_bit_t reserved4[0x0001a]; + pseudo_bit_t SMPTE[0x00001]; /* Sample Start Enabled */ + pseudo_bit_t SMPPE[0x00001]; /* Sample Stop Enabled */ +/* --------------------------------------------------------- */ + pseudo_bit_t Timer0srv[0x00001]; + pseudo_bit_t Timer1srv[0x00001]; + pseudo_bit_t Timer2srv[0x00001]; + pseudo_bit_t Timer3srv[0x00001]; + pseudo_bit_t reserved5[0x0001a]; + pseudo_bit_t SMPTSRV_N[0x00001]; /* Sample Start Serviced # */ + pseudo_bit_t SMPPSRV_N[0x00001]; /* Sample Stop Serviced # */ +/* --------------------------------------------------------- */ +}; + +/* IB Bist Result */ + +struct IB_BIST_result_st { /* Little Endian */ + pseudo_bit_t IBP1BC[0x00001]; /* IB Port 1 Bist complited */ + pseudo_bit_t IBP1BF[0x00001]; /* IB Port 1 bist failed */ + pseudo_bit_t IBP2BC[0x00001]; /* IB Port 2 Bist complited */ + pseudo_bit_t IBP2BF[0x00001]; /* IB Port 2 Bist failed */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* IB Port Clock Timer */ + +struct IB_port_clock_timer_st { /* Little Endian */ + pseudo_bit_t PSMPT[0x00010]; /* Stop Samples Timer */ + pseudo_bit_t TSMPT[0x00010]; /* Start Samples Timer */ +/* --------------------------------------------------------- */ + pseudo_bit_t ib_phy_timeout[0x00010];/* IB Phy Timeout Generator */ + pseudo_bit_t SMPTK[0x00010]; /* Samples Tick Timer */ +/* --------------------------------------------------------- */ +}; + +/* BIST Result */ + +struct BIST_result_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t DMU0[0x00001]; + pseudo_bit_t DMU1[0x00001]; + pseudo_bit_t NTU0[0x00001]; + pseudo_bit_t NTU1[0x00001]; + pseudo_bit_t PCU0[0x00001]; + pseudo_bit_t PCU1[0x00001]; + pseudo_bit_t NSI0[0x00001]; + pseudo_bit_t NSI1[0x00001]; + pseudo_bit_t TPT0[0x00001]; + pseudo_bit_t TPT1[0x00001]; + pseudo_bit_t QPC0[0x00001]; + pseudo_bit_t QPC1[0x00001]; + pseudo_bit_t LDB0[0x00001]; + pseudo_bit_t LDB1[0x00001]; + pseudo_bit_t SDE0[0x00001]; + pseudo_bit_t SDE1[0x00001]; + pseudo_bit_t EXE0[0x00001]; + pseudo_bit_t EXE1[0x00001]; + pseudo_bit_t RDE0[0x00001]; + pseudo_bit_t RDE1[0x00001]; + pseudo_bit_t TCU0[0x00001]; + pseudo_bit_t TCU1[0x00001]; + pseudo_bit_t SERC[0x00001]; /* Serial EEPROM Read Completed */ + pseudo_bit_t SERF[0x00001]; /* Serial EEPROM Read Failed */ +/* --------------------------------------------------------- */ +}; + +/* INIT and CTRL */ + +struct init_and_ctrl_st { /* Little Endian */ + pseudo_bit_t RST[0x00001]; /* Reset */ + pseudo_bit_t PcuPll_od[0x00001]; + pseudo_bit_t last_reset_swhw[0x00001];/* 1 means that the last reset was a SW reset. + 0 means that the last reset was a HW reset */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t Serial_EEPROM_HW_Address[0x00007]; + pseudo_bit_t EEPROM[0x00001]; + pseudo_bit_t PCIXCLKDLY[0x00002]; /* PCI IO Timing Strapping */ + pseudo_bit_t PCIXCLKFEEDBACK[0x00002]; + pseudo_bit_t BISTS[0x00001]; /* BIST Strapping 0 */ + pseudo_bit_t FTS[0x00001]; /* Funct TEst Strapping */ + pseudo_bit_t SerDes_BISTS[0x00001]; + pseudo_bit_t BISTS1[0x00001]; /* BIST Strapping 1 */ + pseudo_bit_t EEPROM_type[0x00001]; + pseudo_bit_t PcuPll_idin[0x00001]; + pseudo_bit_t PcuPll_idinbk[0x00001]; + pseudo_bit_t PcuPllctrl[0x00001]; + pseudo_bit_t ERP_hold[0x00001]; + pseudo_bit_t bist_debug_mode[0x00001]; + pseudo_bit_t init_pci[0x00001]; + pseudo_bit_t init_ib[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t DEVID[0x00010]; + pseudo_bit_t REVID[0x00008]; + pseudo_bit_t reserved1[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* HCA General Configuration Registers */ + +struct HCA_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00010]; + pseudo_bit_t fcupdatetimedivider[0x00005]; + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t fcperiodtimedivider[0x00004]; + pseudo_bit_t reserved2[0x00004]; +/* --------------------------------------------------------- */ + pseudo_bit_t LVer[0x00004]; + pseudo_bit_t TVer[0x00004]; + pseudo_bit_t dreqwm_4x_4x[0x00006]; + pseudo_bit_t dreqwm_4x_1x[0x00006]; + pseudo_bit_t dreqwm_1x_4x[0x00006]; + pseudo_bit_t dreqwm_1x_1x[0x00006]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* Cause Registers */ + +struct LDB_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t parity_err[0x00001]; + pseudo_bit_t dequeue_wrong_number[0x00001]; + pseudo_bit_t dequeue_when_linklist_empty[0x00001]; + pseudo_bit_t enqueue_when_linklist_full[0x00001]; + pseudo_bit_t cr_slave_err[0x00001]; + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ + struct EXT_CAUSEREG_st extended_cause; +/* --------------------------------------------------------- */ +}; + +/* LDB Access Gateway */ + +struct LDBGW_st { /* Little Endian */ + struct GWCONTROL_st ldbgwctrl; /* Command + 0x00 Reserved + 0x01 Execute LDB operation as written in the data + 0x02 request for LDB credit + 0x03-0x3F Reserved + + Status + 0x00 Success + 0x01 Error + + Address + For the LDB command, Address is not applicable. Specific command and Data are specified in the GW data according to LDB interface format (see LDB MAS). + + Data + For the LDB Commands, Data contents is described in LDB MAS */ +/* --------------------------------------------------------- */ + pseudo_bit_t ldbgwdata_127_96_[0x00020];/* LDB Gateway Data */ +/* --------------------------------------------------------- */ + pseudo_bit_t ldbgwdata_95_64_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ldbgwdata_63_32_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ldbgwdata_31_0_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* @LDB_BIST */ + +struct LDB_BIST_st { /* Little Endian */ + pseudo_bit_t ldb_bist_rdw_0[0x00007];/* 126 */ + pseudo_bit_t reserved0[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat[0x00001]; + pseudo_bit_t reserved1[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat[0x00001]; + pseudo_bit_t reserved2[0x0001f]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* LDB Debug */ + +struct ldb_debug_st { /* Little Endian */ + pseudo_bit_t ldbarb_agent[0x00003]; /* arbiter agent */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t ldbarb_agentlast[0x00003];/* arbiter last agent */ + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t arbiter_stage_ps[0x00001];/* ldb arbiter_stage FSM */ + pseudo_bit_t reserved2[0x00003]; + pseudo_bit_t send_data_to[0x00005]; /* unit to send response to */ + pseudo_bit_t reserved3[0x0000f]; +/* --------------------------------------------------------- */ + pseudo_bit_t qp_num[0x00019]; /* qp/ee number LDB is dealing with */ + pseudo_bit_t reserved4[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_state_ps[0x00005]; /* ldb_qpc2ldb FSM */ + pseudo_bit_t reserved5[0x00003]; + pseudo_bit_t qp_ready[0x00001]; /* ldb_qpc2ldb ready signal */ + pseudo_bit_t ldb_doorbell_ps[0x00001];/* ldb_qpc2ldb doorbell FSM */ + pseudo_bit_t reserved6[0x00016]; +/* --------------------------------------------------------- */ + pseudo_bit_t rwe_state_ps[0x00003]; /* rwe FSM */ + pseudo_bit_t reserved7[0x00001]; + pseudo_bit_t rwe_ready[0x00001]; /* rwe ready signal */ + pseudo_bit_t reserved8[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t tail[0x00010]; /* tail to LinkList */ + pseudo_bit_t head[0x00010]; /* head to LinkList */ +/* --------------------------------------------------------- */ + pseudo_bit_t ldbcred_agent[0x00002];/* ldb_credmng agent */ + pseudo_bit_t reserved9[0x00002]; + pseudo_bit_t ldbcred_agentlast[0x00002];/* agent last in ldb_credmng */ + pseudo_bit_t reserved10[0x0001a]; +/* --------------------------------------------------------- */ +}; + +/* This bit when set indicates that ERP will treat each request coming from the corresponding agent as miss. */ + +struct QPCDIRTY_st { /* Little Endian */ + pseudo_bit_t ex_sqpc_dirty[0x00001]; + pseudo_bit_t reserved0[0x00007]; + pseudo_bit_t ld_sqpc_dirty[0x00001]; + pseudo_bit_t reserved1[0x00007]; + pseudo_bit_t tc_sqpc_dirty[0x00001]; + pseudo_bit_t reserved2[0x00007]; + pseudo_bit_t rd_sqpc_dirty[0x00001]; + pseudo_bit_t reserved3[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t ex_rqpc_dirty[0x00001]; + pseudo_bit_t reserved4[0x00007]; + pseudo_bit_t tc_rqpc_dirty[0x00001]; + pseudo_bit_t reserved5[0x00007]; + pseudo_bit_t rd_rqpc_dirty[0x00001]; + pseudo_bit_t reserved6[0x00007]; + pseudo_bit_t tc_cqc_dirty[0x00001]; + pseudo_bit_t reserved7[0x00007]; +/* --------------------------------------------------------- */ +}; + +/* @QPC_BIST */ + +struct QPC_BIST_st { /* Little Endian */ + pseudo_bit_t qpc_bist1_rdw_0[0x00004];/* 129 */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist1_rdw_1[0x00004];/* 129 */ + pseudo_bit_t reserved1[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist1_rdw_2[0x00004];/* 129 */ + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist1_rdw_3[0x00004];/* 129 */ + pseudo_bit_t reserved3[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist1_rdw_4[0x00004];/* 130 */ + pseudo_bit_t reserved4[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist1_rdw_5[0x00004];/* 130 */ + pseudo_bit_t reserved5[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist1_rdw_6[0x00004];/* 130 */ + pseudo_bit_t reserved6[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist1_rdw_7[0x00004];/* 130 */ + pseudo_bit_t reserved7[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist2_rdw_0[0x00004];/* 130 */ + pseudo_bit_t reserved8[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist2_rdw_1[0x00004];/* 130 */ + pseudo_bit_t reserved9[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist2_rdw_2[0x00004];/* 130 */ + pseudo_bit_t reserved10[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist2_rdw_3[0x00004];/* 130 */ + pseudo_bit_t reserved11[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist2_rdw_4[0x00004];/* 130 */ + pseudo_bit_t reserved12[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist2_rdw_5[0x00004];/* 130 */ + pseudo_bit_t reserved13[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist2_rdw_6[0x00004];/* 130 */ + pseudo_bit_t reserved14[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist2_rdw_7[0x00004];/* 130 */ + pseudo_bit_t reserved15[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist3_rdw_0[0x00004];/* 130 */ + pseudo_bit_t reserved16[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist3_rdw_1[0x00004];/* 130 */ + pseudo_bit_t reserved17[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist3_rdw_2[0x00004];/* 130 */ + pseudo_bit_t reserved18[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist3_rdw_3[0x00004];/* 130 */ + pseudo_bit_t reserved19[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist3_rdw_4[0x00004];/* 130 */ + pseudo_bit_t reserved20[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist3_rdw_5[0x00004];/* 130 */ + pseudo_bit_t reserved21[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist3_rdw_6[0x00004];/* 130 */ + pseudo_bit_t reserved22[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist3_rdw_7[0x00004];/* 130 */ + pseudo_bit_t reserved23[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist4_rdw_0[0x00004];/* 130 */ + pseudo_bit_t reserved24[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist4_rdw_1[0x00004];/* 130 */ + pseudo_bit_t reserved25[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist4_rdw_2[0x00004];/* 130 */ + pseudo_bit_t reserved26[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist4_rdw_3[0x00004];/* 130 */ + pseudo_bit_t reserved27[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist4_rdw_4[0x00004];/* 130 */ + pseudo_bit_t reserved28[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist4_rdw_5[0x00004];/* 130 */ + pseudo_bit_t reserved29[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist4_rdw_6[0x00004];/* 130 */ + pseudo_bit_t reserved30[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist4_rdw_7[0x00004];/* 130 */ + pseudo_bit_t reserved31[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist5_rdw_0[0x00004];/* 130 */ + pseudo_bit_t reserved32[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist5_rdw_1[0x00004];/* 130 */ + pseudo_bit_t reserved33[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist5_rdw_2[0x00004];/* 130 */ + pseudo_bit_t reserved34[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist5_rdw_3[0x00004];/* 130 */ + pseudo_bit_t reserved35[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist5_rdw_4[0x00004];/* 130 */ + pseudo_bit_t reserved36[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist5_rdw_5[0x00004];/* 130 */ + pseudo_bit_t reserved37[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist6_rdw_0[0x00004];/* 130 */ + pseudo_bit_t reserved38[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist6_rdw_1[0x00004];/* 130 */ + pseudo_bit_t reserved39[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist6_rdw_2[0x00004];/* 130 */ + pseudo_bit_t reserved40[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist6_rdw_3[0x00004];/* 130 */ + pseudo_bit_t reserved41[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist6_rdw_4[0x00004];/* 130 */ + pseudo_bit_t reserved42[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist6_rdw_5[0x00004];/* 130 */ + pseudo_bit_t reserved43[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist7_rdw_0[0x00004];/* 130 */ + pseudo_bit_t reserved44[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist7_rdw_1[0x00004];/* 130 */ + pseudo_bit_t reserved45[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist7_rdw_2[0x00004];/* 130 */ + pseudo_bit_t reserved46[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist7_rdw_3[0x00004];/* 130 */ + pseudo_bit_t reserved47[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist8_rdw_0[0x00004];/* 130 */ + pseudo_bit_t reserved48[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist8_rdw_1[0x00004];/* 130 */ + pseudo_bit_t reserved49[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist8_rdw_2[0x00004];/* 130 */ + pseudo_bit_t reserved50[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist8_rdw_3[0x00004];/* 130 */ + pseudo_bit_t reserved51[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist8_rdw_4[0x00004];/* 130 */ + pseudo_bit_t reserved52[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist8_rdw_5[0x00004];/* 130 */ + pseudo_bit_t reserved53[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist8_rdw_6[0x00004];/* 130 */ + pseudo_bit_t reserved54[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist8_rdw_7[0x00004];/* 130 */ + pseudo_bit_t reserved55[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist9_rdw_0[0x00004];/* 130 */ + pseudo_bit_t reserved56[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist9_rdw_1[0x00004];/* 130 */ + pseudo_bit_t reserved57[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist9_rdw_2[0x00004];/* 130 */ + pseudo_bit_t reserved58[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist9_rdw_3[0x00004];/* 130 */ + pseudo_bit_t reserved59[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist9_rdw_4[0x00004];/* 130 */ + pseudo_bit_t reserved60[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist9_rdw_5[0x00004];/* 130 */ + pseudo_bit_t reserved61[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist9_rdw_6[0x00004];/* 130 */ + pseudo_bit_t reserved62[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist9_rdw_7[0x00004];/* 130 */ + pseudo_bit_t reserved63[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist13_wdw_0[0x00005];/* 134 */ + pseudo_bit_t reserved64[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist13_wdw_1[0x00005];/* 134 */ + pseudo_bit_t reserved65[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist13_wdw_2[0x00005];/* 134 */ + pseudo_bit_t reserved66[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist13_wdw_3[0x00005];/* 134 */ + pseudo_bit_t reserved67[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist13_wdw_4[0x00005];/* 134 */ + pseudo_bit_t reserved68[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist13_wdw_5[0x00005];/* 134 */ + pseudo_bit_t reserved69[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist13_wdw_6[0x00005];/* 134 */ + pseudo_bit_t reserved70[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist13_wdw_7[0x00005];/* 134 */ + pseudo_bit_t reserved71[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist14_wdw_0[0x00005];/* 134 */ + pseudo_bit_t reserved72[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist14_wdw_1[0x00005];/* 134 */ + pseudo_bit_t reserved73[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist14_wdw_2[0x00005];/* 134 */ + pseudo_bit_t reserved74[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist14_wdw_3[0x00005];/* 134 */ + pseudo_bit_t reserved75[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist14_wdw_4[0x00005];/* 134 */ + pseudo_bit_t reserved76[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist14_wdw_5[0x00005];/* 134 */ + pseudo_bit_t reserved77[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist14_wdw_6[0x00005];/* 134 */ + pseudo_bit_t reserved78[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist14_wdw_7[0x00005];/* 134 */ + pseudo_bit_t reserved79[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist24_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved80[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist25_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved81[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist26_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved82[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist27_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved83[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist28_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved84[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist29_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved85[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist30_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved86[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist31_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved87[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist32_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved88[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist32_wdw_1[0x00008];/* 134 */ + pseudo_bit_t reserved89[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist33_wdw_0[0x00008];/* 134 */ + pseudo_bit_t reserved90[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist35_wdw_0[0x00005];/* 134 */ + pseudo_bit_t reserved91[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist35_wdw_1[0x00005];/* 134 */ + pseudo_bit_t reserved92[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist35_wdw_2[0x00005];/* 134 */ + pseudo_bit_t reserved93[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist35_wdw_3[0x00005];/* 134 */ + pseudo_bit_t reserved94[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist35_wdw_4[0x00005];/* 134 */ + pseudo_bit_t reserved95[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist35_wdw_5[0x00005];/* 134 */ + pseudo_bit_t reserved96[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist35_wdw_6[0x00005];/* 134 */ + pseudo_bit_t reserved97[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_bist35_wdw_7[0x00005];/* 134 */ + pseudo_bit_t reserved98[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data_rdw_0[0x00006];/* 159 */ + pseudo_bit_t reserved99[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data_rdw_1[0x00006];/* 159 */ + pseudo_bit_t reserved100[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_code_rdw_0[0x00006];/* 159 */ + pseudo_bit_t reserved101[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_code_rdw_1[0x00006];/* 159 */ + pseudo_bit_t reserved102[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat_1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat_2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat_3[0x00007]; + pseudo_bit_t reserved103[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat_1[0x00008]; + pseudo_bit_t reserved104[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat_0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat_1[0x00002]; + pseudo_bit_t reserved105[0x0001e]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved106[0x00220]; +/* --------------------------------------------------------- */ +}; + +/* QPC General Configuration */ + +struct QPCGRLCFG_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00018]; + pseudo_bit_t gp_cfg[0x00008]; /* General Purpose Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* QPC/EEC/CQC Memory Access Parameters */ + +struct QPCBASEADDR_st { /* Little Endian */ + pseudo_bit_t key[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t pd[0x00018]; + pseudo_bit_t te[0x00001]; + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t va_pa[0x00001]; /* When cleared, baseaddress is physical address and no translation will be done. When set, address is virtual. This bit does not control wether TPT is accessed or not (this is done by the te field). If te is set, TPT will be accessed (regardless of va_pa value) for address decoding purposes. */ + pseudo_bit_t np[0x00001]; + pseudo_bit_t nsvl[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsq_for_reads[0x00006]; + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t nsq_for_writes[0x00006]; + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t mem_io[0x00001]; /* mem/io bit to be used in the NSI transaction if the te bit is cleared (no tpt access) + 1 - memory + 0 - I/O */ + pseudo_bit_t reserved3[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t base_addr_srqp_high[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t base_addr_srqp_low[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t log_num_of_qp[0x00005]; + pseudo_bit_t reserved5[0x0000b]; + pseudo_bit_t log_qpc_entry_size[0x00004]; + pseudo_bit_t reserved6[0x00004]; + pseudo_bit_t log_qpc_entry_stride[0x00005]; + pseudo_bit_t reserved7[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t base_addr_ee_high[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t timeout_qpn[0x00020]; /* in HW this field was calledbase_addr_ee_low now + it use to pass qpn with T.O from QPC to EXES */ +/* --------------------------------------------------------- */ + pseudo_bit_t log_num_of_ee[0x00005]; + pseudo_bit_t reserved9[0x0000b]; + pseudo_bit_t log_eec_entry_size[0x00004]; + pseudo_bit_t reserved10[0x00004]; + pseudo_bit_t log_eec_entry_stride[0x00005]; + pseudo_bit_t reserved11[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t base_addr_cqp_high[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t base_addr_cqp_low[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t log_num_of_cq[0x00005]; + pseudo_bit_t reserved13[0x00013]; + pseudo_bit_t log_cqc_entry_stride[0x00005]; + pseudo_bit_t reserved14[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* QPC NSI Gateway */ + +struct QPCNSIGW_st { /* Little Endian */ + struct NSIGWCTRL_st nsigw_ctrl; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ + struct NSIGWEXTCTRL_st nsi_gateway_ext_control; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00080]; +/* --------------------------------------------------------- */ + pseudo_bit_t crl_fsm[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x005e0]; +/* --------------------------------------------------------- */ + struct NSIGWRAM_st nsi_gateway_ram; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct COMP_FSM_st { /* Little Endian */ + pseudo_bit_t fifo_ps[0x00001]; + pseudo_bit_t gw_ps_sram[0x00002]; + pseudo_bit_t tag_ps[0x00002]; + pseudo_bit_t qpstate_ps[0x00002]; + struct STD_QC_FSMS_st qc_fsms; + pseudo_bit_t reserved0[0x00009]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct RECEIVE_FSM_st { /* Little Endian */ + pseudo_bit_t in_use_ps[0x00002]; + pseudo_bit_t gw_ps_sram[0x00002]; + pseudo_bit_t tag_ps[0x00002]; + pseudo_bit_t qpstate_ps[0x00002]; + struct STD_QC_FSMS_st qc_fsms; + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ +}; + +/* */ + +struct SEND_FSM_st { /* Little Endian */ + pseudo_bit_t in_use_ps[0x00002]; + pseudo_bit_t gw_ps_sram[0x00002]; + pseudo_bit_t tag_ps[0x00002]; + pseudo_bit_t qpstate_ps[0x00002]; + struct STD_QC_FSMS_st qc_fsms; + pseudo_bit_t c2t_ps[0x00001]; + pseudo_bit_t time_access_ps[0x00002]; + pseudo_bit_t gw_ps_timers[0x00002]; + pseudo_bit_t reserved0[0x00003]; +/* --------------------------------------------------------- */ +}; + +/* Transport Timeout Mechanism Access */ + +struct QPCTIMERS_st { /* Little Endian */ + pseudo_bit_t limit[0x00009]; + pseudo_bit_t reserved0[0x00017]; +/* --------------------------------------------------------- */ + pseudo_bit_t Address[0x00009]; + pseudo_bit_t reserved1[0x0000f]; + pseudo_bit_t gwcmd[0x00006]; + pseudo_bit_t gwbusy[0x00001]; + pseudo_bit_t gwlocked[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t timers_gw_data[0x00010]; + pseudo_bit_t reserved2[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t to_status[0x00020]; +/* --------------------------------------------------------- */ + struct timeoutfifoctrl_st timeoutfifoctrl; +/* --------------------------------------------------------- */ + pseudo_bit_t timeoutfifodata[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* Context Gateways */ + +struct QCGWS_st { /* Little Endian */ + struct QCGW_st qcgw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00040]; +/* --------------------------------------------------------- */ + struct QCMISSGW_st qcmissgw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x001c0]; +/* --------------------------------------------------------- */ + struct QCTAGGW_st qctaggw; +/* --------------------------------------------------------- */ + struct QC_OP_MASK_st opcode_mask; + pseudo_bit_t reserved2[0x00008]; + pseudo_bit_t condtion_mask[0x00006]; + pseudo_bit_t reserved3[0x00002]; +/* --------------------------------------------------------- */ + struct REEXEFSMCTL_st ReexeFsmCtl; +/* --------------------------------------------------------- */ + struct QCGATEKEEPER_st qcgatekeeper; +/* --------------------------------------------------------- */ +}; + +/* Cache Performance Counters */ + +struct CACHEPERF_st { /* Little Endian */ + pseudo_bit_t accesscounter[0x00020];/* Access Counter: Incremented for every access to the cache. + Generates an event to the unit iRISC every time it wraps around. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* Cause Registers */ + +struct QPC_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t irisc_nsw_error[0x00001]; + pseudo_bit_t SQPC_parity_err[0x00001]; + pseudo_bit_t Hit_more_one_way_SQPC[0x00001]; + pseudo_bit_t RQPC_parity_err[0x00001]; + pseudo_bit_t Hit_more_one_way_RQPC[0x00001]; + pseudo_bit_t CQC_parity_err[0x00001]; + pseudo_bit_t Hit_more_one_way_CQC[0x00001]; + pseudo_bit_t CR_slave_timeout[0x00001]; + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ + struct EXT_CAUSEREG_st extended_cause; +/* --------------------------------------------------------- */ +}; + +/* CQ Doorbell FIFO */ + +struct CQDBFIFO_st { /* Little Endian */ + struct CQFIFOGWCTL_st cqfifo_gwctl; +/* --------------------------------------------------------- */ + pseudo_bit_t fifodata0[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Virtual CR Space Access Register */ + +struct CRGW_st { /* Little Endian */ + pseudo_bit_t address[0x00014]; /* Address in CR-Space */ + pseudo_bit_t reserved0[0x0000a]; + pseudo_bit_t cmd[0x00001]; /* 1 - Read + 0 - Write */ + pseudo_bit_t active[0x00001]; /* if cleared, CR-Space acceses through this slave are done by hardware. If set, the cr-space gateway is used by the slave to access the CR-Space. Transaction details are stored in the cmd, address and data register and busy bit is set to trigger FW handling of the transaction. FW clears the busy field to notify hardware transaction completion. */ +/* --------------------------------------------------------- */ + pseudo_bit_t data[0x00020]; /* For write - the data to be writen. + For read - return data to here. + Wrting to this register signals the end of the ERP processing - so it need to be done to read & writes !! */ +/* --------------------------------------------------------- */ +}; + +/* NSI BIST */ + +struct nsi_bist_st { /* Little Endian */ + pseudo_bit_t nsi_bist_dmunturx_wdw_0[0x00008];/* 153 */ + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_bist_dmunturx_wdw_1[0x00008];/* 153 */ + pseudo_bit_t reserved1[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_bist_dmunturx_wdw_2[0x00008];/* 153 */ + pseudo_bit_t reserved2[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_bist_pcurx_wdw_0[0x00008];/* 144 */ + pseudo_bit_t reserved3[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_bist_pcurx_wdw_1[0x00008];/* 145 */ + pseudo_bit_t reserved4[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_bist_pcurx_wdw_2[0x00008];/* 145 */ + pseudo_bit_t reserved5[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t nsi_bist_pcurx_wdw_3[0x00008];/* 145 */ + pseudo_bit_t reserved6[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat[0x00007]; + pseudo_bit_t reserved7[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat[0x00004]; + pseudo_bit_t reserved8[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat[0x00006]; + pseudo_bit_t reserved9[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* Those register hold the credits ( one credit - 128 bit ) to the NSI fifos in the send side. + Defalt value are OK. + HardWare change those register all the time. + If because of a bug those registers need to be changed it should be done before some one accessed NSI !!! */ + +struct ns_vl_hexcreds_st { /* Little Endian */ + pseudo_bit_t hexcred_p[0x00005]; + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t hexcred_nonp[0x00005]; + pseudo_bit_t reserved1[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t hexcred_res[0x00005]; + pseudo_bit_t reserved2[0x0001b]; +/* --------------------------------------------------------- */ +}; + +/* NSwitch Agent Credits */ + +struct ns_creds_ntu_st { /* Little Endian */ + pseudo_bit_t cred_p[0x00004]; + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t cred_nonp[0x00004]; + pseudo_bit_t reserved1[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t cred_res[0x00004]; + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* NSwitch Agent Credits */ + +struct ns_creds_pcu_st { /* Little Endian */ + pseudo_bit_t cred_p[0x00004]; + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t cred_nonp[0x00004]; + pseudo_bit_t reserved1[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t cred_res[0x00004]; + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* NSwitch Agent Credits */ + +struct ns_creds_dmu_st { /* Little Endian */ + pseudo_bit_t cred_p[0x00004]; + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t cred_nonp[0x00004]; + pseudo_bit_t reserved1[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t cred_res[0x00004]; + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ +}; + +/* CR Space Base Address Register */ + +struct mem_bar_st { /* Little Endian */ + pseudo_bit_t bar_msms[0x00020]; /* CR space address lsbs */ +/* --------------------------------------------------------- */ + pseudo_bit_t size[0x00006]; /* CR space address space BASE = BAR. + CR space address sapce LIMIT = BAR + ( 2 ^ size ) + Any address the is BASE <= address <= LIMIT will go to CR space */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t enable[0x00001]; /* Enable bit for all this address decoder */ + pseudo_bit_t reserved1[0x0000c]; + pseudo_bit_t bar_lsbs[0x0000c]; /* CR space address msbs */ +/* --------------------------------------------------------- */ +}; + +/* @nsi_nonp_cfg */ + +struct nsi_nonp_cfg_st { /* Little Endian */ + pseudo_bit_t vl[0x00001]; + pseudo_bit_t io_mem[0x00001]; /* This bit controls the io_mem bit in the Nswitch read response header. */ + pseudo_bit_t return_deadbeef[0x00001];/* When set read responses will return with data phases of ffffffff..... + When clear returns read responses header only - In this configuration the Syndrom must be set to error - other wise the chip will get stuck !!! */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t syndorm[0x00008]; /* This syndrom will be returned in the read response packet - check Nswitch spec for those values. */ + pseudo_bit_t reserved1[0x00010]; +/* --------------------------------------------------------- */ +}; + +/* FW Scratch Pad */ + +struct SCRPAD_st { /* Little Endian */ + struct MSIX_st msix; +/* --------------------------------------------------------- */ + pseudo_bit_t word[42][0x00020]; /* instead of RESERVED */ +/* --------------------------------------------------------- */ + struct Transport_and_CI_Error_Counters_st error_counters;/* Transport and CI error counters */ +/* --------------------------------------------------------- */ + struct GUID_INFO_st guid_info; /* GUID_INFO holding node,port1 and port2 GUIDs */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ + struct SPDH_st spd[4]; /* SPD info from DDR DIMMS (part of boot2?) */ +/* --------------------------------------------------------- */ + struct FW_TRACE_st fw_trace; /* FW_TRACE params */ +/* --------------------------------------------------------- */ + struct FW_VERSION_st fw_version; /* FW Version */ +/* --------------------------------------------------------- */ + struct INIT_HCA_st init_hca; /* INIT_HCA params */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00100]; +/* --------------------------------------------------------- */ + struct EQN_MAP_st eqn_map; +/* --------------------------------------------------------- */ + struct BOOT2_SCRPAD_st boot2; +/* --------------------------------------------------------- */ + struct TPT_SCRPAD_st tpt; +/* --------------------------------------------------------- */ + struct NTU_SCRPAD_st ntu; +/* --------------------------------------------------------- */ + struct QPC_SCRPAD_st qpc; +/* --------------------------------------------------------- */ + struct TCU_SCRPAD_st tcu; +/* --------------------------------------------------------- */ + struct EXUS_SCRPAD_st exus; +/* --------------------------------------------------------- */ + struct EXUR_SCRPAD_st exur; +/* --------------------------------------------------------- */ +}; + +/* TPT BIST */ + +struct tpt_bist_st { /* Little Endian */ + pseudo_bit_t tpt_rw26bist_wdw_0[0x00005];/* 138 */ + pseudo_bit_t reserved0[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw26bist_wdw_1[0x00005];/* 138 */ + pseudo_bit_t reserved1[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw26bist_wdw_2[0x00005];/* 138 */ + pseudo_bit_t reserved2[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw26bist_wdw_3[0x00005];/* 138 */ + pseudo_bit_t reserved3[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw112_bist_way0l_rdw_0[0x00007];/* 157 */ + pseudo_bit_t reserved4[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw112_bist_way0m_rdw_0[0x00007];/* 157 */ + pseudo_bit_t reserved5[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw112_bist_way1l_rdw_0[0x00007];/* 157 */ + pseudo_bit_t reserved6[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw112_bist_way1m_rdw_0[0x00007];/* 157 */ + pseudo_bit_t reserved7[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw112_bist_way2l_rdw_0[0x00007];/* 157 */ + pseudo_bit_t reserved8[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw112_bist_way2m_rdw_0[0x00007];/* 157 */ + pseudo_bit_t reserved9[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw112_bist_way3l_rdw_0[0x00007];/* 157 */ + pseudo_bit_t reserved10[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_rw112_bist_way3m_rdw_0[0x00007];/* 157 */ + pseudo_bit_t reserved11[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_xl68_bist_way_0_rdw_0[0x00007];/* 157 */ + pseudo_bit_t reserved12[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_xl68_bist_way_0_rdw_1[0x00007];/* 157 */ + pseudo_bit_t reserved13[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_xl68_bist_way_1_rdw_0[0x00007];/* 158 */ + pseudo_bit_t reserved14[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_xl68_bist_way_1_rdw_1[0x00007];/* 158 */ + pseudo_bit_t reserved15[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_xl68_bist_way_2_rdw_0[0x00007];/* 158 */ + pseudo_bit_t reserved16[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_xl68_bist_way_2_rdw_1[0x00007];/* 158 */ + pseudo_bit_t reserved17[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_xl68_bist_way_3_rdw_0[0x00007];/* 158 */ + pseudo_bit_t reserved18[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_xl68_bist_way_3_rdw_1[0x00007];/* 158 */ + pseudo_bit_t reserved19[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_array_bist_rdw_0[0x00006];/* 145 */ + pseudo_bit_t reserved20[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data_rdw_0[0x00006];/* 158 */ + pseudo_bit_t reserved21[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_data_rdw_1[0x00006];/* 158 */ + pseudo_bit_t reserved22[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_code_rdw_0[0x00006];/* 158 */ + pseudo_bit_t reserved23[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t ir_bist2waydata_code_rdw_1[0x00006];/* 158 */ + pseudo_bit_t reserved24[0x0001a]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat[0x00019]; + pseudo_bit_t reserved25[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t bists_stat[0x00017]; + pseudo_bit_t reserved26[0x00009]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat[0x00011]; + pseudo_bit_t reserved27[0x0000f]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved28[0x00c80]; +/* --------------------------------------------------------- */ +}; + +/* TPT NSI Gateway */ + +struct TPTNSIGW_st { /* Little Endian */ + struct NSIGWCTRL_st nsigw_ctrl; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ + struct NSIGWEXTCTRL_st nsi_gateway_ext_control; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00080]; +/* --------------------------------------------------------- */ + pseudo_bit_t crl_fsm[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x005e0]; +/* --------------------------------------------------------- */ + struct NSIGWRAM_st nsi_gateway_ram; +/* --------------------------------------------------------- */ +}; + +/* Event Queues Cause Register */ + +struct ECR_st { /* Little Endian */ + pseudo_bit_t ecr_63_32_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ecr_31_0_[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t ecr_clear_63_32_[0x00020];/* bits set when writing to this register results in corresponding bits in ecr[63:32] being cleared. bits not set are not changed in ecr[63:32]. reading this register returns undefined value. */ +/* --------------------------------------------------------- */ + pseudo_bit_t ecr_clear_31_0_[0x00020];/* bits set when writing to this register results in corresponding bits in ecr[31:0] being cleared. bits not set are not changed in ecr[31:0]. reading this register returns undefined value. */ +/* --------------------------------------------------------- */ + pseudo_bit_t ecr_set_63_32_[0x00020];/* bits set when writing to this register results in corresponding bits in ecr[63:32] being set. bits not set are not changed in ecr[63:32]. reading this register returns undefined value. */ +/* --------------------------------------------------------- */ + pseudo_bit_t ecr_set_31_0_[0x00020];/* bits set when writing to this register results in corresponding bits in ecr[31:0] being set. bits not set are not changed in ecr[31:0]. reading this register returns undefined value. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* HCA Command Interface */ + +struct HCACMDIFACE_st { /* Little Endian */ + pseudo_bit_t in_param_h[0x00020]; /* Input parameter[63:32] / pointer to mailbox. */ +/* --------------------------------------------------------- */ + pseudo_bit_t in_param_l[0x00020]; /* Input parameter[31:0] / pointer to mailbox. */ +/* --------------------------------------------------------- */ + pseudo_bit_t input_modifier[0x00020];/* Command interface input modifier */ +/* --------------------------------------------------------- */ + pseudo_bit_t out_param_h[0x00020]; /* Output parameter [63:32] / output mailbox pointer */ +/* --------------------------------------------------------- */ + pseudo_bit_t out_param_l[0x00020]; /* Output parameter [31:0] / output mailbox pointer */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00010]; + pseudo_bit_t token[0x00010]; /* Software assigned token to the command */ +/* --------------------------------------------------------- */ + pseudo_bit_t opcode[0x0000c]; /* Command opcode */ + pseudo_bit_t opcode_modifier[0x00004];/* Command opcode modifier */ + pseudo_bit_t reserved1[0x00006]; + pseudo_bit_t e[0x00001]; /* Event required */ + pseudo_bit_t go[0x00001]; /* Go - set this bit to execute the command + 1 - HW ownership of the HCR + 0 - SW ownership of the HCR + */ + pseudo_bit_t status[0x00008]; /* Command execution status + 0 = success */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Cause Registers */ + +struct TPT_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t tp_crtimeout_occured[0x00001]; + pseudo_bit_t tpt_pipe0[0x00001]; + pseudo_bit_t tpt_pipe1[0x00001]; + pseudo_bit_t tpt_pipe2[0x00001]; + pseudo_bit_t tpt_pipe3[0x00001]; + pseudo_bit_t tpt_pipe4[0x00001]; + pseudo_bit_t xl_perr[0x00001]; + pseudo_bit_t rw_perr[0x00001]; + pseudo_bit_t irisc_nsw_error[0x00001]; + pseudo_bit_t reserved0[0x00017]; +/* --------------------------------------------------------- */ + struct EXT_CAUSEREG_st extended_cause; +/* --------------------------------------------------------- */ +}; + +/* TPT Cache Gateway */ + +struct TPTGW_st { /* Little Endian */ + struct TPTGW_CTRL_st tptgwctrl; +/* --------------------------------------------------------- */ + struct TPTGW_DATA_st tptgwdata; +/* --------------------------------------------------------- */ +}; + +/* Transport and Completion Unit (TCU) */ + +struct TCU_st { /* Little Endian */ + struct IRisc_st irisc; /* Interrupt Routing: + ---------------------------- + 0 missaligned access + 1 code bp + 2 data bp + 3 trap + + 4 ipc + 5 ccm + 6 dcm + 7 pcnt0 + 8 pcnt1 + + 9 tpterr - Request from translation from TPT failed. + 10 nswnak - non posted write to NSW failed. + 11 entint, e.g. NAK + 12 cqerror - cq does not exist + 13 cqlock - CQC entry is locked for reading. Retry counter exceeded. + 14 cqbp - CQ BreakPoint + 15 cqoverfl - CQ overflow + 16 ldberr - syndrome read from LDB != ok + 17 wrppb - warparound / page boundary + 18 i1rde_gpint0 - top of RDE fifo detected error + 19 i1rde_gpint1 - RDE finished TPT flush mechanism. + 20 i1rde_gpint2 - general purpose register from RDE + 21 i1rde_gpint3 - general purpose register from RDE + 23 excp - packet received with bp (error, trap, qp0 etc.) + 24 digw - busy bit released of dispatcher + 25 qpcgw - qpc gw busy bit released + 26 drppkt - counter of dropped packets reached 0xffffffff (and wrapped around) + + 31 timer + */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00800]; +/* --------------------------------------------------------- */ + struct RAW_st raweth; /* RAW BTH and DETH for Ethertype */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00100]; +/* --------------------------------------------------------- */ + struct RAW_st rawipv6; /* RAW BTH and DETH for IPV6 */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00100]; +/* --------------------------------------------------------- */ + struct ROUTERMODE_st routermode; /* router mode cfg data. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00b00]; +/* --------------------------------------------------------- */ + struct OPCODEMASKS_st opcodemasks; /* Opcode Masks - Drop and Trap */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00100]; +/* --------------------------------------------------------- */ + struct EXCEPTIONMASKS_st exceptionmasks;/* TCU Exception Masks - Ignore, Drop and Trap */ +/* --------------------------------------------------------- */ + struct TCUCHKCAUSE_st tcuchkcause; /* checker cause registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00080]; +/* --------------------------------------------------------- */ + struct TCUGRLCFG_st tcugrlcfg; /* tcu general cfg */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00800]; +/* --------------------------------------------------------- */ + struct TCUTCHEADER_st tcutcheader; +/* --------------------------------------------------------- */ + struct TCUTCQPC_st tcutcqpcrd; /* tcu data that was sampled from QPC */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00080]; +/* --------------------------------------------------------- */ + struct MGIDTABLE_st mgidtable; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x02000]; +/* --------------------------------------------------------- */ + struct QPCGW_st tcuqpcgw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00180]; +/* --------------------------------------------------------- */ + struct TCUPKTMV_st tcupktmv; /* irisc order for what to do with packet with excetion vector not null */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00040]; +/* --------------------------------------------------------- */ + struct TCUTCGW_st tcutcgw; /* tcu tc gate way for sending packets from exception buffer to CLI or to RDE */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00400]; +/* --------------------------------------------------------- */ + struct TCUMCCache_st mccache; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x00800]; +/* --------------------------------------------------------- */ + struct CECFG_st completionenginecfg; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00800]; +/* --------------------------------------------------------- */ + struct PKEYTABLE_st pkeytable; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved14[0x01000]; +/* --------------------------------------------------------- */ + struct TCU_CAUSEREG_st tcucause; /* Cause Bits: + 31:5 - reserved + 4 - ir_nsw_error + 3 - tcu_crtimeout_occurred + 2 - ce_parity_error + 1 - eb_parity_error + 0 - pb_parity_error */ +/* --------------------------------------------------------- */ + struct FENCE_st tcufence; /* Fence for the transport checks. incount is incremented for every packet received from cli. outcount is incremented for every packet dropped or handed over to the rde. + + * This feature does not work, see bug 4982 */ +/* --------------------------------------------------------- */ + pseudo_bit_t tcufence_outincred[0x00020];/* Increment Output Counter from CrSpace + + * This feature does not work, see bug 4982 */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00060]; +/* --------------------------------------------------------- */ + struct CEFENCE_st cefence; /* Fence for the completion engine. incount is incremented for every entry pushed in the input fifo. outcount is incremented for every entry that has finished processing at the completion engine. + + * This feature does not work see bug 4961 */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved16[0x00580]; +/* --------------------------------------------------------- */ + struct TCUTCDEBUG_st tcutcdebug; /* TCU TC debug */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved17[0x00600]; +/* --------------------------------------------------------- */ + struct TCU_BIST_st tcu_bist; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved18[0x00bc0]; +/* --------------------------------------------------------- */ + struct TCUEBCTL_st tcuebctl; /* tcu exception buffer read/write pointers */ +/* --------------------------------------------------------- */ + struct TCUEB_st tcueb; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved19[0x08000]; +/* --------------------------------------------------------- */ +}; + +/* Execution Engines Unit (EXU) */ + +struct EXU_st { /* Little Endian */ + struct IRisc_st irisc0; /* Interrupt Routing: + ---------------------------- + 0 missaligned access + 1 code bp + 2 data bp + 3 trap + + 4 ipc + 5 ccm + 6 dcm + 7 pcnt0 + 8 pcnt1 + + 9 sdb - send qp doorbell (FIFO not empty) + 10 exus - Execution unit halted + 11 rdb - receive qp doorbell (FIFO not empty) + 12 qpcgw0 - qpc gw 0 busy bit released + 13 qpcgw1 - qpc gw 1 busy bit released + 14 eeddb - ee disconnect DB (FIFO not empty) + + 18 i1sde_gpint0 - general purpose int pin from SDE + 19 i1sde_gpint1 - general purpose int pin from SDE + 20 i1sde_gpint2 - general purpose int pin from SDE + 21 i1sde_gpint3 - general purpose int pin from SDE + + 31 timer + */ +/* --------------------------------------------------------- */ + struct IRisc_st irisc1; /* Interrupt Routing: + ---------------------------- + 0 missaligned access + 1 code bp + 2 data bp + 3 trap + + 4 ipc + 5 ccm + 6 dcm + 7 pcnt0 + 8 pcnt1 + + 9 sdb - send qp doorbell (FIFO not empty) + 10 exus - Execution unit halted + 11 rdb - receive qp doorbell (FIFO not empty) + 12 qpcgw0 - qpc gw 0 busy bit released + 13 qpcgw1 - qpc gw 1 busy bit released + 14 eeddb - ee disconnect DB (FIFO not empty) + + 18 i1sde_gpint0 - general purpose int pin from SDE + 19 i1sde_gpint1 - general purpose int pin from SDE + 20 i1sde_gpint2 - general purpose int pin from SDE + 21 i1sde_gpint3 - general purpose int pin from SDE + + 31 timer + */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00800]; +/* --------------------------------------------------------- */ + struct RCVDB_RDBPARAMS_st rcvdb_rdbparams;/* Base Address is only 63:32 - + baseaddress[31:0] is reserved + + A copy of this table is held in the RDE. It is up to config sw to keep them equal. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00080]; +/* --------------------------------------------------------- */ + struct MEMACCESSPARAMS_st avtableparams; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00080]; +/* --------------------------------------------------------- */ + struct EXEOPCODEBP_st opcodebp; /* when corresponding bit is set, execution engine stops upon detection of opcode */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x005c0]; +/* --------------------------------------------------------- */ + struct QPCGW_st exuqpcgw0; +/* --------------------------------------------------------- */ + struct QPCGW_st exuqpcgw1; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x01000]; +/* --------------------------------------------------------- */ + struct EXE_SEMAPHORES_st semaphores;/* Exe general purpose semaphores */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00700]; +/* --------------------------------------------------------- */ + struct EXU_BIST_st exu_bist; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x01c00]; +/* --------------------------------------------------------- */ + struct EXEEVENT_st exeevent; /* Any bit set will cause interrupt in the corresponding exuirisc if mask bit is set (see engiriscXena) */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00080]; +/* --------------------------------------------------------- */ + struct EXEENGIRISCMASK_st engirisc0ena; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00080]; +/* --------------------------------------------------------- */ + struct EXEENGIRISCMASK_st engirisc1ena; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00d80]; +/* --------------------------------------------------------- */ + struct EXTDB_st extdb; /* External Doorbells Control */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00740]; +/* --------------------------------------------------------- */ + struct INTDB_st intdb; /* Internal Doorbells Control */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00080]; +/* --------------------------------------------------------- */ + struct RDEEDISCFIFO_st rdeediscfifo;/* fifo with end to end contexts whose last message has been completed. iRisc pops this fifo and disconnects qp from the ee in question. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x000c0]; +/* --------------------------------------------------------- */ + struct TPTNSIIF_st fetchertptnsiif; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x000e0]; +/* --------------------------------------------------------- */ + struct DESCFIFOGW_st exudesc; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved14[0x00300]; +/* --------------------------------------------------------- */ + struct EXE_CAUSEREG_st exucause; /* Cause bits: + 31:3 - reserved + 2 - CRBus Timeout + 1 - IRISC 1 Error + 0 - IRISC 0 Error */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00100]; +/* --------------------------------------------------------- */ + struct EXEARB_st exearb; /* Arbiter properties */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved16[0x00100]; +/* --------------------------------------------------------- */ + struct EXUGRLCFG_st exugrlcfg; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved17[0x00200]; +/* --------------------------------------------------------- */ + struct PKEYTABLE_st pkeytable; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved18[0x01000]; +/* --------------------------------------------------------- */ + struct EXE_MGIDTABLE_st mgidtable; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved19[0x02000]; +/* --------------------------------------------------------- */ + struct EXENGINE_st exu[16]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved20[0x08000]; +/* --------------------------------------------------------- */ +}; + +/* Receive Data Engine (RDE) */ + +struct RDE_st { /* Little Endian */ + struct RDEGW_st rdegw; /* LINE 0 + ------------------------------------ + 63:56 IB OpCode + 55:32 PSN + 31 AckReq + 28:24 CQPC index + 23 SE + 19:12 RDB wptr + 11 RDB wptr wrap + 10 GRH bit + 9 Signalled Completion + 8 Event Request + 7:0 Error Syndrome + ------------------------------------ + LINE 1 + ------------------------------------ + 63:32 ReadReq:dmalen + Req/w/immdt:ImmediateData + 31:0 Responder:Rkey/DescriptorPTR + 23:0 Requester:DQP for Requester EE + ------------------------------------ + LINE 2 + ------------------------------------ + 63:0 ReadReq/Atomicreq:VA + 31:0 Send/Write/ReadResp:Length Committed + ------------------------------------ + LINE 3 + ------------------------------------ + 63:0 Atomic:Atomic Reply Data of AtomicREQ + 63 DoorBell counter was decremented + 62:56 DLID path bits + 55:32 Source QP + 31:28 SL + 15:0 SLID + ------------------------------------ + LINE 4 + ------------------------------------ + 23:0 CQ number + ------------------------------------ + LINE 5 + ------------------------------------ + 63:56 RDB Base LSB + MTU + 55:32 DQP for Responder EE + 24 `1' when opcode[7:5]=="010" (RD service) + 23:0 QP/EE number + ================================================== + CQPC Constants + ------------------------------------ + 124 NSVL + 123 NonPosted + 122 EventEn + 121 SignalledCompletion + 120 XlationEN + 119:117 MaxReadAtom + 116:112 MaxMsgSize + 111:88 PD + 87:64 CQ + 63:32 Descriptor L_key + 31:0 Descriptor BAR + ================================================== + CQPC r256length + ------------------------------------ + 127:104 256bytes Length Committed + 95:64 Write R_key + 63:0 Write VA + ================================================== + CQPC RDB_and_more + ------------------------------------ + 121:120 Tag MSB (Requester_Responder#,QP/EE) (READ ONLY) + 119:96 QP/EE number (READ ONLY) + 95:88 RDB read Pointer + 87:80 RDB write Pointer + 79:64 ScatterList Pointer + 63 Freed Entry + 62 Allocated Entry + 61 Locked Entry + 60 Valid Entry + 59 Hold Relock Entry + 57 Descriptor is Inflight (from Memory) + 56 Descriptor is Inside + 55:32 DQP for Responder EE + 31:5 RDB base + 3 LastPushPOP# + 2:0 MTU + ================================================== + Requester ScatterList + ------------------------------------ + 145 RESERVED + 144 Entry is Last + 143:128 Next ScatterList Pointer (linklist) (READ ONLY) + 127:96 Entry Length + 95:64 Entry L_key + 63:0 Entry VA + ================================================== + Responder ScatterList + ------------------------------------ + 145 Entry Read with Error + 144 Entry is Last + 143:130 RESERVED + 129:128 C/E bits of descriptor + 127:96 Entry Length + 95:64 Entry L_key + 63:0 Entry VA + ================================================== + NDA / CDA list + ------------------------------------ + 127:97 RESERVED + 96 CDDBD + 95:70 CDA + 69:64 CDS + 63:33 RESERVED + 32 NDDBD + 31:6 NDA + 5:0 NDS + ================================================== + Syndromes + syndrome[7:0] description PIPE stage + --------------------------------------------------------------------------- + 00000001 atomic alignment is not on 8bytes TPTINIT + 00000010 RDB exceeded TPTINIT + 00000100 null reached but need a descriptor TPTINIT + 00001ttt TPT INIT access violation. NSINIT + ttt=Xstat from TPT + 001abcde a: max_msg_exceeded TPTFIN + b: requester_sl_but_nodata + c: requester_data_but_nosl + d: responder_data_but_nosl + e: NDA BusErr + 01000ttt TPT FINAL access violation. ScatterEngine + ttt=Xstat from TPT + 01001abc a: error_buserr on descriptor ScatterEngine + b: error_longlist + c: error_longdata + 01010000 Atomic Read/Write error ScatterEngine + (nonposted Queue....) + 01010001 NonPosted Write Failed ScatterEngine + */ +/* --------------------------------------------------------- */ + struct CEACCESSGW_st ceaccessgw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00e00]; +/* --------------------------------------------------------- */ + struct TPTNSIIF_st rdetptnsiif; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00760]; +/* --------------------------------------------------------- */ + struct RDEGRLCFG_st rdegrlcfg; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00080]; +/* --------------------------------------------------------- */ + struct RDECREDITS_st rdecredits; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00100]; +/* --------------------------------------------------------- */ + struct RDE_CAUSEREG_st rdecause; /* Cause Bits: + 31:2 - reserved + 1 - Parity Error + 0 - CRBus Timeout Error */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00100]; +/* --------------------------------------------------------- */ + struct MEMACCESSPARAMS_st rdbparams;/* Base Address is only 63:32 - + baseaddress[31:0] is reserved + + A copy of this table is held in the EXE. It is up to config sw to keep them equal. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00180]; +/* --------------------------------------------------------- */ + struct RDEDEBUG_st rde_debug_hooks; /* rde_debug_hooks */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00400]; +/* --------------------------------------------------------- */ + struct RDE_BIST_st rde_bist; /* rde bist */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x05500]; +/* --------------------------------------------------------- */ +}; + +/* Send Data Engine (SDE) */ + +struct SDE_st { /* Little Endian */ + struct GATHERENG_st ge0; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00080]; +/* --------------------------------------------------------- */ + struct GATHERENG_st ge1; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00080]; +/* --------------------------------------------------------- */ + struct GATHERENG_st ge2; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00080]; +/* --------------------------------------------------------- */ + struct GATHERENG_st ge3; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00480]; +/* --------------------------------------------------------- */ + struct SDEGRLCFG_st sdegrlcfg; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00200]; +/* --------------------------------------------------------- */ + struct SDE_CAUSEREG_st sdecause; /* Cause Bits: + 31:2 - reserved + 1 - Parity Error + 0 - CRBus Timeout Error */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00300]; +/* --------------------------------------------------------- */ + struct GeEuCause_st geeucause; +/* --------------------------------------------------------- */ + pseudo_bit_t exerror[0x00020]; /* errror bit per EU while gathering */ +/* --------------------------------------------------------- */ + pseudo_bit_t exerror_set[0x00020]; /* set to the bits of EU error */ +/* --------------------------------------------------------- */ + pseudo_bit_t exerror_rst[0x00020]; /* set to exerror */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00ba0]; +/* --------------------------------------------------------- */ + struct SDE_BIST_st sde_bist; /* sde bist */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x05d80]; +/* --------------------------------------------------------- */ +}; + +/* IB Port Debug */ + +struct port_debug_config_st { /* Little Endian */ + pseudo_bit_t TS1DetectDisabled[0x00001]; + pseudo_bit_t ZeroCredits[0x00001]; /* Zero Credits */ + pseudo_bit_t TransmitHold[0x00001]; /* Transmit Hold */ + pseudo_bit_t TransmitHoldVL15[0x00001];/* Transmit Hold VL15 */ + pseudo_bit_t TransmitHoldCredits[0x00001];/* Transmit Hold Credits */ + pseudo_bit_t IgnoreCredits[0x00001];/* Ignore Credits */ + pseudo_bit_t IgnoreVCRC[0x00001]; /* Ignore VCRC */ + pseudo_bit_t ignore_disparity[0x00001]; + pseudo_bit_t RBCActivityWDParam[0x00004]; + pseudo_bit_t rq_watchdog_disable[0x00001]; + pseudo_bit_t enable_skip_delay[0x00001];/* EnableSkipDelay */ + pseudo_bit_t debounce_debug_mode[0x00001]; + pseudo_bit_t ignore_training_error[0x00001]; + pseudo_bit_t rbc_wd_disable[0x00001]; + pseudo_bit_t active_wd_disable[0x00001]; + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t DVCRC[0x00001]; + pseudo_bit_t SignalDetectDisabled[0x00001]; + pseudo_bit_t CommSkipWdDisable[0x00001]; + pseudo_bit_t reserved1[0x0000a]; +/* --------------------------------------------------------- */ +}; + +/* Loopback Buffer Status and Drop Control */ + +struct LBCTRL_st { /* Little Endian */ + pseudo_bit_t lb_full[0x00001]; /* Loopback buffer has a complete packet in it (pending delivery to TCU) */ + pseudo_bit_t ib_packet_in_rdy[0x00001];/* There is at least one complete IB packet in the input FIFO */ + pseudo_bit_t reserved0[0x0001d]; + pseudo_bit_t drop_loopback[0x00001];/* When a 1 is written to this field the loopback buffer is released. TCU input needs to be closed before this command is executed. */ +/* --------------------------------------------------------- */ +}; + +/* Packet Discard Configuration Mask */ + +struct PktDiscConfigMask_st { /* Little Endian */ + pseudo_bit_t LinkError[0x00001]; /* Link error mask bit */ + pseudo_bit_t VL15Error[0x00001]; /* VL15 error */ + pseudo_bit_t VLError[0x00001]; /* VL error */ + pseudo_bit_t DLIDError[0x00001]; /* Dlid error */ + pseudo_bit_t MTULenError[0x00001]; /* length is greater then the MTU or less then min length */ + pseudo_bit_t LenError[0x00001]; /* LRH length is different then the packet length */ + pseudo_bit_t LverError[0x00001]; /* lver is greater then the configured lver */ + pseudo_bit_t VCRCError[0x00001]; /* vcrc error */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t ICRCError[0x00001]; /* icrc error */ + pseudo_bit_t reserved2[0x00015]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* IB Port General Configuration */ + +struct IB_port_general_config_st { /* Little Endian */ + pseudo_bit_t tq_watermark[0x00005]; /* TQ WaterMark (Max value for TQWM should be 20 see bug 5943) */ + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t tqcredits[0x00008]; /* Number of free lines in the TQ FIFO + Can be programmed to a value lower than the default in order to overcome possible edge problems. Usually configured (if needed) before the port is activated. It is strongly suggested that this value is not changed during normal operation. */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t HOQdisable[0x00001]; /* disabling hoq timier in TQ */ + pseudo_bit_t LoopbackDisable[0x00001];/* a bit disabling a mechanism of loopback in clo */ + pseudo_bit_t reserved2[0x00002]; + pseudo_bit_t hoq_timer_divider[0x00004];/* HOQTimerDivider */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t brr[0x00001]; /* Bit Reorder RQ */ + pseudo_bit_t brt[0x00001]; /* Bit Reorder TQ */ + pseudo_bit_t spare_reg[0x00001]; /* cfg_ib_lb_fair_disable: + + Disables waited roud robin in accsessing tcu from crcore ( giving abselute priority to loopback packets) + + to */ + pseudo_bit_t sl2vl_lb_disable[0x00001];/* bit for disabling mechanism of not translating sl2vl for loopback + + */ + pseudo_bit_t reserved4[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Parallel Port Debug (per Port) */ + +struct Parallel_Debug_Port_Configuration_st { /* Little Endian */ + pseudo_bit_t SerDesRX_Parallel_Port_Select[0x00004]; + pseudo_bit_t reserved0[0x0000c]; + pseudo_bit_t SerDesTX_Parallel_Port_Select[0x00004]; + pseudo_bit_t reserved1[0x0000c]; +/* --------------------------------------------------------- */ +}; + +/* Flow Control update watchdog register */ + +struct Flow_Control_update_watchdog_register_st { /* Little Endian */ + pseudo_bit_t VL0_FC_update_WD[0x00001]; + pseudo_bit_t VL1_FC_update_WD[0x00001]; + pseudo_bit_t VL2_FC_update_WD[0x00001]; + pseudo_bit_t VL3_FC_update_WD[0x00001]; + pseudo_bit_t VL4_FC_update_WD[0x00001]; + pseudo_bit_t VL5_FC_update_WD[0x00001]; + pseudo_bit_t VL6_FC_update_WD[0x00001]; + pseudo_bit_t VL7_FC_update_WD[0x00001]; + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* @IB1_BIST */ + +struct IB1_BIST_st { /* Little Endian */ + pseudo_bit_t bists_stat[0x00009]; /* Automaticly add for bist */ + pseudo_bit_t reserved0[0x00017]; +/* --------------------------------------------------------- */ + pseudo_bit_t rqtq4x_bists_stat[0x00008];/* rq descriptor fifo repair control register */ + pseudo_bit_t reserved1[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t rep_arrays_stat[0x00009];/* Automaticly add for bist */ + pseudo_bit_t reserved2[0x00017]; +/* --------------------------------------------------------- */ + pseudo_bit_t nonrep_arrays_stat[0x0000a];/* Automaticly add for bist */ + pseudo_bit_t reserved3[0x00016]; +/* --------------------------------------------------------- */ + pseudo_bit_t ibhbistrqdscf_rdw_0[0x00005];/* Automaticly add for bist */ + pseudo_bit_t reserved4[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t ibhbistclolbf_rdw_0[0x00007];/* Automaticly add for bist */ + pseudo_bit_t reserved5[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ibhbistclolbf_rdw_1[0x00007];/* Automaticly add for bist */ + pseudo_bit_t reserved6[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ibhbistdtf1_l_rdw_0[0x00007];/* Automaticly add for bist */ + pseudo_bit_t reserved7[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ibhbistdtf1_m_rdw_0[0x00007];/* Automaticly add for bist */ + pseudo_bit_t reserved8[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ibhbistdtf2_l_rdw_0[0x00007];/* Automaticly add for bist */ + pseudo_bit_t reserved9[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ibhbistdtf2_m_rdw_0[0x00007];/* Automaticly add for bist */ + pseudo_bit_t reserved10[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ibhbisttqdtf_wdw_0[0x00007];/* Automaticly add for bist */ + pseudo_bit_t reserved11[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t ibhbisttqdtf_wdw_1[0x00007];/* Automaticly add for bist */ + pseudo_bit_t reserved12[0x00019]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* VL Mapping */ + +struct VL_mapping_st { /* Little Endian */ + pseudo_bit_t vl_for_sl0[0x00004]; /* mapping of sl0 */ + pseudo_bit_t reserved0[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl1[0x00004]; /* mapping of sl1 */ + pseudo_bit_t reserved2[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl2[0x00004]; /* mapping sl2 */ + pseudo_bit_t reserved4[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl3[0x00004]; /* mapping sl3 */ + pseudo_bit_t reserved6[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl4[0x00004]; /* mapping sl4 */ + pseudo_bit_t reserved8[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl5[0x00004]; /* mapping sl5 */ + pseudo_bit_t reserved10[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl6[0x00004]; /* mapping sl6 */ + pseudo_bit_t reserved12[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl7[0x00004]; /* mapping sl7 */ + pseudo_bit_t reserved14[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl8[0x00004]; /* mapping sl8 */ + pseudo_bit_t reserved16[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved17[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl9[0x00004]; /* mapping sl9 */ + pseudo_bit_t reserved18[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved19[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl10[0x00004]; /* mapping sl10 */ + pseudo_bit_t reserved20[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved21[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl11[0x00004]; /* mapping sl11 */ + pseudo_bit_t reserved22[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved23[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl12[0x00004]; /* mapping sl12 */ + pseudo_bit_t reserved24[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved25[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl13[0x00004]; /* mapping sl13 */ + pseudo_bit_t reserved26[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved27[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl14[0x00004]; /* mapping sl14 */ + pseudo_bit_t reserved28[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved29[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t vl_for_sl15[0x00004]; /* mapping sl15 */ + pseudo_bit_t reserved30[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved31[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Extended Buffer Allocation Register */ + +struct Extended_Buffer_Allocation_Register_st { /* Little Endian */ + pseudo_bit_t MaxDynCred_0[0x00008]; + pseudo_bit_t ExtMaxDynCred0[0x00001]; + pseudo_bit_t reserved0[0x00007]; + pseudo_bit_t StatCred0[0x00007]; + pseudo_bit_t ExtStatCred0[0x00002]; + pseudo_bit_t reserved1[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn_0[0x00008]; + pseudo_bit_t ExtUsedDyn0[0x00001]; + pseudo_bit_t reserved2[0x00007]; + pseudo_bit_t Used_StatCred0[0x00007]; + pseudo_bit_t ExtUsedStat0[0x00002]; + pseudo_bit_t reserved3[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred_1[0x00008]; + pseudo_bit_t ExtMaxDynCred1[0x00001]; + pseudo_bit_t reserved4[0x00007]; + pseudo_bit_t StatCred1[0x00007]; + pseudo_bit_t ExtStatCred1[0x00002]; + pseudo_bit_t reserved5[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn_1[0x00008]; + pseudo_bit_t ExtUsedDyn1[0x00001]; + pseudo_bit_t reserved6[0x00007]; + pseudo_bit_t Used_StatCred1[0x00007]; + pseudo_bit_t ExtUsedStat1[0x00002]; + pseudo_bit_t reserved7[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred_2[0x00008]; + pseudo_bit_t ExtMaxDynCred2[0x00001]; + pseudo_bit_t reserved8[0x00007]; + pseudo_bit_t StatCred2[0x00007]; + pseudo_bit_t ExtStatCred2[0x00002]; + pseudo_bit_t reserved9[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn_2[0x00008]; + pseudo_bit_t ExtUsedDyn2[0x00001]; + pseudo_bit_t reserved10[0x00007]; + pseudo_bit_t Used_StatCred2[0x00007]; + pseudo_bit_t ExtUsedStat2[0x00002]; + pseudo_bit_t reserved11[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred_3[0x00008]; + pseudo_bit_t ExtMaxDynCred3[0x00001]; + pseudo_bit_t reserved12[0x00007]; + pseudo_bit_t StatCred3[0x00007]; + pseudo_bit_t ExtStatCred3[0x00002]; + pseudo_bit_t reserved13[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn_3[0x00008]; + pseudo_bit_t ExtUsedDyn3[0x00001]; + pseudo_bit_t reserved14[0x00007]; + pseudo_bit_t Used_StatCred3[0x00007]; + pseudo_bit_t ExtUsedStat3[0x00002]; + pseudo_bit_t reserved15[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred_4[0x00008]; + pseudo_bit_t ExtMaxDynCred4[0x00001]; + pseudo_bit_t reserved16[0x00007]; + pseudo_bit_t StatCred4[0x00007]; + pseudo_bit_t ExtStatCred4[0x00002]; + pseudo_bit_t reserved17[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn_4[0x00008]; + pseudo_bit_t ExtUsedDyn4[0x00001]; + pseudo_bit_t reserved18[0x00007]; + pseudo_bit_t UsedStatCred4[0x00007]; + pseudo_bit_t ExtUsedStat4[0x00002]; + pseudo_bit_t reserved19[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred_5[0x00008]; + pseudo_bit_t ExtMaxDynCred5[0x00001]; + pseudo_bit_t reserved20[0x00007]; + pseudo_bit_t StatCred5[0x00007]; + pseudo_bit_t ExtStatCred5[0x00002]; + pseudo_bit_t reserved21[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn_5[0x00008]; + pseudo_bit_t ExtUsedDyn5[0x00001]; + pseudo_bit_t reserved22[0x00007]; + pseudo_bit_t Used_StatCred5[0x00007]; + pseudo_bit_t ExtUsedStat5[0x00002]; + pseudo_bit_t reserved23[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred_6[0x00008]; + pseudo_bit_t ExtMaxDynCred6[0x00001]; + pseudo_bit_t reserved24[0x00007]; + pseudo_bit_t StatCred6[0x00007]; + pseudo_bit_t ExtStatCred6[0x00002]; + pseudo_bit_t reserved25[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn_6[0x00008]; + pseudo_bit_t ExtUsedDyn6[0x00001]; + pseudo_bit_t reserved26[0x00007]; + pseudo_bit_t Used_StatCred6[0x00007]; + pseudo_bit_t ExtUsedStat6[0x00002]; + pseudo_bit_t reserved27[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred_7[0x00008]; + pseudo_bit_t ExtMaxDynCred7[0x00001]; + pseudo_bit_t reserved28[0x00007]; + pseudo_bit_t StatCred7[0x00007]; + pseudo_bit_t ExtStatCred7[0x00002]; + pseudo_bit_t reserved29[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn_7[0x00008]; + pseudo_bit_t ExtUsedDyn7[0x00001]; + pseudo_bit_t reserved30[0x00007]; + pseudo_bit_t Used_StatCred7[0x00007]; + pseudo_bit_t ExtUsedStat7[0x00002]; + pseudo_bit_t reserved31[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved32[0x001c0]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred_15[0x00008]; + pseudo_bit_t ExtMaxDynCred15[0x00001]; + pseudo_bit_t reserved33[0x00007]; + pseudo_bit_t StatCred15[0x00007]; + pseudo_bit_t ExtStatCred15[0x00002]; + pseudo_bit_t reserved34[0x00007]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn_15[0x00008]; + pseudo_bit_t ExtUsedDyn15[0x00001]; + pseudo_bit_t reserved35[0x00007]; + pseudo_bit_t Used_StatCred15[0x00007]; + pseudo_bit_t ExtUsedStat15[0x00002]; + pseudo_bit_t reserved36[0x00007]; +/* --------------------------------------------------------- */ +}; + +/* IB Port Link Phy Configuration Register */ + +struct IB_Port_Link_Phy_Configuration_Register_st { /* Little Endian */ + pseudo_bit_t RxLanePolarity[0x00004]; + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t TxLanePolarity[0x00004]; + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t RxRevLanes[0x00001]; + pseudo_bit_t TxRevLanes[0x00001]; + pseudo_bit_t reserved2[0x00003]; + pseudo_bit_t LinkPolarity_AutoConfig[0x00001]; + pseudo_bit_t RxLaneRev_AutoConfig[0x00001]; + pseudo_bit_t TxLaneRev_AutoConfig[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t RxPolarityResult[0x00004]; + pseudo_bit_t reserved3[0x00014]; + pseudo_bit_t RxRevLanesResult[0x00001]; + pseudo_bit_t TxRevLanesResult[0x00001]; + pseudo_bit_t reserved4[0x00006]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* SerDes Configuration and Debug */ + +struct SerDes_Configuration_and_Debug_st { /* Little Endian */ + pseudo_bit_t TBCMode[0x00004]; + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t FullSw[0x00004]; /* 0xF - Half Swing + 0x0 - Full Swing + Note: this is exactly the opposite than Anafa. */ + pseudo_bit_t reserved1[0x0000b]; + pseudo_bit_t ComDetMode[0x00001]; + pseudo_bit_t ComDetSel[0x00001]; + pseudo_bit_t EnComDet[0x00001]; + pseudo_bit_t TRate[0x00001]; + pseudo_bit_t RRate[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t TxEmp[0x00004]; + pseudo_bit_t reserved2[0x00008]; + pseudo_bit_t EnEql[0x00004]; + pseudo_bit_t reserved3[0x00008]; + pseudo_bit_t TestSel0[0x00002]; + pseudo_bit_t TestSel1[0x00002]; + pseudo_bit_t TestSel2[0x00002]; + pseudo_bit_t TestSel3[0x00002]; +/* --------------------------------------------------------- */ + pseudo_bit_t SLoop[0x00004]; + pseudo_bit_t reserved4[0x00008]; + pseudo_bit_t PLoop[0x00004]; + pseudo_bit_t reserved5[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t RxPwrDwn[0x00004]; + pseudo_bit_t reserved6[0x00008]; + pseudo_bit_t TxPwrDwn[0x00004]; + pseudo_bit_t reserved7[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t SigDet[0x00004]; + pseudo_bit_t reserved8[0x00008]; + pseudo_bit_t ComDet[0x00004]; + pseudo_bit_t reserved9[0x00010]; +/* --------------------------------------------------------- */ + struct SERDESDFT_st sd0; +/* --------------------------------------------------------- */ + struct SERDESDFT_st sd1; +/* --------------------------------------------------------- */ + struct SERDESDFT_st sd2; +/* --------------------------------------------------------- */ + struct SERDESDFT_st sd3; +/* --------------------------------------------------------- */ +}; + +/* Congestion Control */ + +struct Congestion_Control_Register_st { /* Little Endian */ + pseudo_bit_t CongCtlTimerDivider[0x00004];/* a prescaler for congestion control counter */ + pseudo_bit_t reserved0[0x0000c]; + pseudo_bit_t VL0Enable[0x00001]; + pseudo_bit_t VL1Enable[0x00001]; /* enabling for cheating mode in vl1 */ + pseudo_bit_t VL2Enable[0x00001]; /* enabling for cheating mode in vl2 */ + pseudo_bit_t VL3Enable[0x00001]; /* enabling for cheating mode in vl3 */ + pseudo_bit_t VL4Enable[0x00001]; /* enabling for cheating mode in vl4 */ + pseudo_bit_t VL5Enable[0x00001]; /* enabling for cheating mode in vl5 */ + pseudo_bit_t VL6Enable[0x00001]; /* enabling for cheating mode in vl6 */ + pseudo_bit_t VL7Enable[0x00001]; /* enabling for cheating mode in vl7 */ + pseudo_bit_t reserved1[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t CongestionTimerLimit[0x00008];/* max value for counting the time when the number of credits is less then MTU */ + pseudo_bit_t reserved2[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00040]; +/* --------------------------------------------------------- */ +}; + +/* IB Port Performance Management and Event Generation */ + +struct IB_port_PMEG_hdr_st { /* Little Endian */ + struct IB_port_event_st KOZ; /* IB Port Event Cause Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t ClearCauseReg1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SetCauseReg1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t EventServicedReg1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t EventEnable0Reg1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t EventEnable1Reg1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t PortSamplesCount[0x00020];/* PortSamples Counting Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t SamplingCounterSelector[0x00004];/* Sampling Counter Selector */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t ProgFuncCounterSelector[0x00008];/* Programmable Function Counter Selector */ + pseudo_bit_t VLCounterMask_7_0[0x00008];/* per VL Counter Mask */ + pseudo_bit_t VLCounterMask_14_8[0x00007]; + pseudo_bit_t VLCounterMask_15[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t PortXmitData[0x00020]; /* PortXmitData Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t PortRcvData[0x00020]; /* PortRcvData Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t PortXmitPkts[0x00020]; /* PortXmitPkts Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t PortRcvPkts[0x00020]; /* PortRcvPkts Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t PortRcvErrors[0x00010];/* PortRcvErrors Counter */ + pseudo_bit_t reserved1[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t PortXmitDiscards[0x00010];/* PortXmitPktDiscards Counter */ + pseudo_bit_t reserved2[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t VL15DroppedCounter[0x00010];/* VL15Dropped */ + pseudo_bit_t reserved3[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t PortXmitWait[0x00020]; /* PortXMitWait Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t LocalIntegrityErrors[0x00008];/* Local Integrity Errors */ + pseudo_bit_t reserved4[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t PortRcvRemotePhysicalErrors[0x00010]; + pseudo_bit_t reserved5[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t PortXmitConstraintErrors[0x00008]; + pseudo_bit_t reserved6[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t PortRcvConstraintErrors[0x00008]; + pseudo_bit_t reserved7[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t SymbolErrorCounter[0x00010];/* Phy Layer Errors Counter */ + pseudo_bit_t reserved8[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t LinkErrorRecoveryCounter[0x00010];/* Phy Successful Recovery Counter */ + pseudo_bit_t LinkDownedCounter[0x00010];/* Phy Unsuccessful Recovery Counter */ +/* --------------------------------------------------------- */ + pseudo_bit_t ProgFuncCounter[0x00010];/* Programmable Function Counter */ + pseudo_bit_t reserved9[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00040]; +/* --------------------------------------------------------- */ + pseudo_bit_t ExcessiveBufferOverrunErrors[0x00004]; + pseudo_bit_t reserved11[0x00004]; + pseudo_bit_t LocalLinkIntegrityErrors[0x00004]; + pseudo_bit_t reserved12[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x000a0]; +/* --------------------------------------------------------- */ + pseudo_bit_t PortRcvErrorsMask[0x0000a]; + pseudo_bit_t reserved14[0x00004]; + pseudo_bit_t PortRcvDataMask[0x00001]; + pseudo_bit_t reserved15[0x00001]; + pseudo_bit_t PortRcvRemotePhysicalErrorsMask[0x00002]; + pseudo_bit_t reserved16[0x00005]; + pseudo_bit_t PortXmitDiscardMask[0x00005]; + pseudo_bit_t PortConstraintErrorMask[0x00004]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved17[0x00040]; +/* --------------------------------------------------------- */ + struct IB_port_Event2_st IB_port_Event_Cause_Register2; +/* --------------------------------------------------------- */ + pseudo_bit_t ClearCauseReg2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t SetCauseReg2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t EventServicedReg2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t EventEnable0Reg2[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t EventEnable1Reg2[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* IB Link */ + +struct ib_link_st { /* Little Endian */ + struct ib_link_transmit_st TXL; /* IB Port Transmit Link Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ + struct IB_port_IO_config_st IOC; /* IB Port I/O Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t LinkPhyState[0x00008]; + pseudo_bit_t reserved1[0x00018]; +/* --------------------------------------------------------- */ +}; + +/* Buffer Allocation */ + +struct buffer_allocation_st { /* Little Endian */ + pseudo_bit_t MaxDynCred1[0x00008]; /* Max Dyn Cred 1 */ + pseudo_bit_t StatCred1[0x00007]; /* Stat Cred 1 */ + pseudo_bit_t U_1[0x00001]; + pseudo_bit_t MaxDynCred0[0x00008]; /* Max Dyn Cred 0 */ + pseudo_bit_t StatCred0[0x00007]; /* Stat Cred 0 */ + pseudo_bit_t U_0[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn1[0x00008]; /* Used Dyn Cred 1 */ + pseudo_bit_t UsedStat1[0x00007]; /* Used Stat Cred 1 */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t UsedDyn0[0x00008]; /* Used Dyn Cred 0 */ + pseudo_bit_t UsedStat0[0x00007]; /* Used Stat Cred 0 */ + pseudo_bit_t reserved1[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred3[0x00008]; /* Max Dyn Cred 3 */ + pseudo_bit_t StatCred3[0x00007]; /* Stat Cred 3 */ + pseudo_bit_t U_3[0x00001]; + pseudo_bit_t MaxDynCred2[0x00008]; /* Max Dyn Cred 2 */ + pseudo_bit_t StatCred2[0x00007]; /* Stat Cred 2 */ + pseudo_bit_t U_2[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn3[0x00008]; /* Used Dyn Cred 3 */ + pseudo_bit_t UsedStat3[0x00007]; /* Used Stat Cred 3 */ + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t UsedDyn2[0x00008]; /* Used Dyn Cred 2 */ + pseudo_bit_t UsedStat2[0x00007]; /* Used Stat Cred 2 */ + pseudo_bit_t reserved3[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred5[0x00008]; /* Max Dyn Cred 5 */ + pseudo_bit_t StatCred5[0x00007]; /* Stat Cred 5 */ + pseudo_bit_t U_5[0x00001]; + pseudo_bit_t MaxDynCred4[0x00008]; /* Max Dyn Cred 4 */ + pseudo_bit_t StatCred4[0x00007]; /* Stat Cred 4 */ + pseudo_bit_t U_4[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn5[0x00008]; /* Used Dyn Cred 5 */ + pseudo_bit_t UsedStat5[0x00007]; /* Used Stat Cred 5 */ + pseudo_bit_t reserved4[0x00001]; + pseudo_bit_t UsedDyn4[0x00008]; /* Used Dyn Cred 4 */ + pseudo_bit_t UsedStat4[0x00007]; /* Used Stat Cred 4 */ + pseudo_bit_t reserved5[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred7[0x00008]; /* Max Dyn Cred 7 */ + pseudo_bit_t StatCred7[0x00007]; /* Stat Cred 7 */ + pseudo_bit_t U_7[0x00001]; + pseudo_bit_t MaxDynCred6[0x00008]; /* Max Dyn Cred 6 */ + pseudo_bit_t StatCred6[0x00007]; /* Stat Cred 6 */ + pseudo_bit_t U_6[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn7[0x00008]; /* Used Dyn Cred 7 */ + pseudo_bit_t UsedStat7[0x00007]; /* Used Stat Cred 7 */ + pseudo_bit_t reserved6[0x00001]; + pseudo_bit_t UsedDyn6[0x00008]; /* Used Dyn Cred 6 */ + pseudo_bit_t UsedStat6[0x00007]; /* Used Stat Cred 6 */ + pseudo_bit_t reserved7[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x000c0]; +/* --------------------------------------------------------- */ + pseudo_bit_t MaxDynCred15[0x00008]; /* Max Dyn Cred 15 */ + pseudo_bit_t StatCred15[0x00007]; /* Stat Cred 15 */ + pseudo_bit_t U_15[0x00001]; + pseudo_bit_t reserved9[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t UsedDyn15[0x00008]; /* Used Dyn Cred 15 */ + pseudo_bit_t UsedStat15[0x00007]; /* Used Stat Cred 15 */ + pseudo_bit_t reserved10[0x00011]; +/* --------------------------------------------------------- */ + pseudo_bit_t UnallocatedDyn[0x00010];/* Unallocated Dynamic (this is how many credits are in the dynamic pool) */ + pseudo_bit_t BufferSize[0x00010]; /* Buffer Size */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t CreditMinTime[0x0000a];/* Min Time Between Credit Packets */ + pseudo_bit_t CreditMaxTime[0x00008];/* Max time between credit packets */ + pseudo_bit_t reserved12[0x0000d]; + pseudo_bit_t FCU[0x00001]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Port Status */ + +struct Port_status_st { /* Little Endian */ + pseudo_bit_t RQFIFOF[0x00001]; /* RQ FIFO Full */ + pseudo_bit_t reserved0[0x00007]; + pseudo_bit_t VL0Stalled[0x00001]; /* VL0 Stalled */ + pseudo_bit_t VL1Stalled[0x00001]; /* VL1 Stalled */ + pseudo_bit_t VL2Stalled[0x00001]; /* VL2 Stalled */ + pseudo_bit_t VL3Stalled[0x00001]; /* VL3 Stalled */ + pseudo_bit_t VL4Stalled[0x00001]; /* VL4 Stalled */ + pseudo_bit_t VL5Stalled[0x00001]; /* VL5 Stalled */ + pseudo_bit_t VL6Stalled[0x00001]; /* VL6 Stalled */ + pseudo_bit_t VL7Stalled[0x00001]; /* VL7 Stalled */ + pseudo_bit_t RBCACT[0x00001]; /* RBC Active */ + pseudo_bit_t TS2DTCT[0x00001]; /* TS2 Detected */ + pseudo_bit_t SERACT[0x00001]; /* Serial Activity */ + pseudo_bit_t TRERR[0x00001]; /* Training Error */ + pseudo_bit_t IDLRCV[0x00001]; /* Idle Received */ + pseudo_bit_t TS1Detected[0x00001]; + pseudo_bit_t CommaDetectEnable[0x00001]; + pseudo_bit_t reserved1[0x00009]; +/* --------------------------------------------------------- */ +}; + +/* VL Arbitration Table */ + +struct VL_arbitration_table_st { /* Little Endian */ + pseudo_bit_t WTR0C1[0x00008]; /* WeightRow0Col1 */ + pseudo_bit_t VLR0C1[0x00004]; /* VL */ + pseudo_bit_t WTR0C0[0x00008]; /* WeightRow0Col0 */ + pseudo_bit_t VLR0C0[0x00004]; /* VL */ + pseudo_bit_t reserved0[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t WTR0C3[0x00008]; /* WeightRow0Col3 */ + pseudo_bit_t VLR0C3[0x00004]; /* VL */ + pseudo_bit_t WTR0C2[0x00008]; /* WeightRow0Col2 */ + pseudo_bit_t VLR0C2[0x00004]; /* VL */ + pseudo_bit_t reserved1[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t WTR0C5[0x00008]; /* WeightRow0Col5 */ + pseudo_bit_t VLR0C5[0x00004]; /* VL */ + pseudo_bit_t WTR0C4[0x00008]; /* WeightRow0Col4 */ + pseudo_bit_t VLR0C4[0x00004]; /* VL */ + pseudo_bit_t reserved2[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t WTR0C7[0x00008]; /* WeightRow0Col7 */ + pseudo_bit_t VLR0C7[0x00004]; /* VL */ + pseudo_bit_t WTR0C6[0x00008]; /* WeightRow0Col6 */ + pseudo_bit_t VLR0C6[0x00004]; /* VL */ + pseudo_bit_t reserved3[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00080]; +/* --------------------------------------------------------- */ + pseudo_bit_t WTR1C1[0x00008]; /* WeightRow1Col1 */ + pseudo_bit_t VLR1C1[0x00004]; /* VL */ + pseudo_bit_t WTR1C0[0x00008]; /* WeightRow1Col0 */ + pseudo_bit_t VLR1C0[0x00004]; /* VL */ + pseudo_bit_t reserved5[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t WTR1C3[0x00008]; /* WeightRow1Col3 */ + pseudo_bit_t VLR1C3[0x00004]; /* VL */ + pseudo_bit_t WTR1C2[0x00008]; /* WeightRow1Col2 */ + pseudo_bit_t VLR1C2[0x00004]; /* VL */ + pseudo_bit_t reserved6[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t WTR1C5[0x00008]; /* WeightRow1Col5 */ + pseudo_bit_t VLR1C5[0x00004]; /* VL */ + pseudo_bit_t WTR1C4[0x00008]; /* WeightRow1Col4 */ + pseudo_bit_t VLR1C4[0x00004]; /* VL */ + pseudo_bit_t reserved7[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t WTR1C7[0x00008]; /* WeightRow1Col7 */ + pseudo_bit_t VLR1C7[0x00004]; /* VL */ + pseudo_bit_t WTR1C6[0x00008]; /* WeightRow1Col6 */ + pseudo_bit_t VLR1C6[0x00004]; /* VL */ + pseudo_bit_t reserved8[0x00008]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00080]; +/* --------------------------------------------------------- */ +}; + +/* IB PortInfo and GUIDInfo */ + +struct IB_port_info_GUID_info_st { /* Little Endian */ + pseudo_bit_t MK0[0x00020]; /* M_Key */ +/* --------------------------------------------------------- */ + pseudo_bit_t MK1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t GIDPR0[0x00020]; /* GIDPrefix */ +/* --------------------------------------------------------- */ + pseudo_bit_t GIDPR1[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t MSMLID[0x00010]; /* MasterSMLID */ + pseudo_bit_t LID[0x00010]; +/* --------------------------------------------------------- */ + pseudo_bit_t CAPMSK[0x00020]; /* CapabilityMask */ +/* --------------------------------------------------------- */ + pseudo_bit_t MKLP[0x00010]; /* M_KeyLeasePeriod */ + pseudo_bit_t DIAG[0x00010]; /* DiagCode */ +/* --------------------------------------------------------- */ + pseudo_bit_t link_width_active[0x00008];/* LinkWidthActive */ + pseudo_bit_t link_width_supported[0x00008];/* LinkWidthSupported */ + pseudo_bit_t link_width_enable[0x00008];/* LinkWidthEnabled */ + pseudo_bit_t LPNUM[0x00008]; /* LocalPortNum */ +/* --------------------------------------------------------- */ + pseudo_bit_t LSPENB[0x00004]; /* LinkSpeedEnabled */ + pseudo_bit_t LSPACT[0x00004]; /* LinkSpeedActive */ + pseudo_bit_t LMC[0x00003]; + pseudo_bit_t reserved0[0x00003]; + pseudo_bit_t MKPROT[0x00002]; /* M_KeyProtectBits */ + pseudo_bit_t link_down_default_state[0x00004];/* LinkDownDefaultState */ + pseudo_bit_t link_phy_state[0x00004]; + pseudo_bit_t port_state[0x00004]; /* PortState */ + pseudo_bit_t LSPSUP[0x00004]; /* LinkSpeedSupported */ +/* --------------------------------------------------------- */ + pseudo_bit_t VLARHCAP[0x00008]; /* VLArbitrationHighCap */ + pseudo_bit_t VLHLIM[0x00008]; /* VLHighLimit */ + pseudo_bit_t reserved1[0x00004]; + pseudo_bit_t VLCAP[0x00004]; /* VLCap */ + pseudo_bit_t MSMSL[0x00004]; /* MasterSMSL */ + pseudo_bit_t NeighborMTU[0x00004]; /* NeighborMTU */ +/* --------------------------------------------------------- */ + pseudo_bit_t FilterRawOutbound[0x00001];/* FilterRawOutbound */ + pseudo_bit_t FilterRawInbound[0x00001];/* FilterRawInbound */ + pseudo_bit_t PartEnfOutbound[0x00001];/* PartEnfOutbound */ + pseudo_bit_t PartEnfInbound[0x00001];/* PartEnfInbound */ + pseudo_bit_t OperationalVLs[0x00004];/* Operational VL */ + pseudo_bit_t HOQLife[0x00005]; /* HOQ Life */ + pseudo_bit_t VLStallCount[0x00003]; /* VLStallCount */ + pseudo_bit_t MTUCAP[0x00004]; /* MTUCap */ + pseudo_bit_t reserved2[0x00004]; + pseudo_bit_t VLARLCAP[0x00008]; /* VLArbitrationLowCap */ +/* --------------------------------------------------------- */ + pseudo_bit_t PKVIO[0x00010]; /* P_KeyViolations */ + pseudo_bit_t MKVIO[0x00010]; /* M_Key_Violations */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00003]; + pseudo_bit_t SUBNTOUT[0x00005]; /* SubnetTimeout */ + pseudo_bit_t GUIDCAP[0x00008]; + pseudo_bit_t QKVIO[0x00010]; /* Q_KeyViolations */ +/* --------------------------------------------------------- */ + pseudo_bit_t local_phy_errors_threshold[0x00008];/* local_phy_errors_threshold */ + pseudo_bit_t Overrun_Errors[0x00004]; + pseudo_bit_t reserved4[0x0000f]; + pseudo_bit_t RTVAL[0x00005]; /* RespTimeValue */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00080]; +/* --------------------------------------------------------- */ +}; + +/* (NTU) */ + +struct NTU_st { /* Little Endian */ + struct IRisc_st irisc; /* Interrupt Routing: + ---------------------------- + 0 missaligned access + 1 code bp + 2 data bp + 3 trap + + 4 ipc + 5 ccm + 6 dcm + 7 pcnt0 + 8 pcnt1 + + 9 qltm - QLT miss + 10 cmpe - NTU completion with error + 11 cellef - Cell exception FIFO not empty + 12 halt - qlt trap and NTU halted + 13 i1pc_gpint0 - general purpose register from PCU + 14 i1pc_gpint1 - general purpose register from PCU + 15 i1pc_gpint2 - general purpose register from PCU + 16 i1pc_gpint3 - system error from PCU + 17 i1cp_cfgint - got a configuration cycle in PCU + 18 pcuerp - PCU has received a transaction from N-switch with ERP bit and need to be serviced + 19 swgcle - from PCU - SW cycle generator busy bit released (trasn. To PCI) + 21 cmdif - command interface from TPT + 22 cons - YU consolidated cause register + 23 vc_pci - Virtual CrSpace. Comes from the NSI + 24 vc_i2c - Virtual CrSpace. Comes from YU + 25 vc_cpu - Virtual CrSpace. Comes from YU + 26 perf0 - performance counter for QLT + 27 i1dm_clbdrft_int - calibration + 28 i1dm_gpint1 - general purpose register from DMU + 29 i1dm_gpint2 - general purpose register from DMU + 30 i1dm_gpint3 - general purpose register from DMU + + 31 timer */ +/* --------------------------------------------------------- */ + struct NSWADDRDEC_st ntunswdec; +/* --------------------------------------------------------- */ + struct NTUGENERAL_st ntugeneral; +/* --------------------------------------------------------- */ + struct NTUQLTVALID_st ntuqltvalid; +/* --------------------------------------------------------- */ + struct NTUFENCE_st ntu_fence; /* ntu fence */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00100]; +/* --------------------------------------------------------- */ + struct CAUSEREG_st ntucause; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00300]; +/* --------------------------------------------------------- */ + struct NTURDB_st ntuoutsbufattr0; +/* --------------------------------------------------------- */ + struct NTURDB_st ntuoutsbufattr1; +/* --------------------------------------------------------- */ + struct NTURDB_st ntuoutsbufattr2; +/* --------------------------------------------------------- */ + struct NTURDB_st ntuoutsbufattr3; +/* --------------------------------------------------------- */ + struct NTUWRB_st ntuoutsbufattr4; +/* --------------------------------------------------------- */ + struct NTUWRB_st ntuoutsbufattr5; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00400]; +/* --------------------------------------------------------- */ + struct NTU_BIST_st ntu_bist; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00c00]; +/* --------------------------------------------------------- */ + struct NTUQLTGW_st ntuqltgw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00600]; +/* --------------------------------------------------------- */ + struct NTUQPWGW_st ntuqpwgw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00780]; +/* --------------------------------------------------------- */ + struct NTUDESCGW_st ntudescgw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00400]; +/* --------------------------------------------------------- */ + struct NTUEVENTS_st ntuevents; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00400]; +/* --------------------------------------------------------- */ + struct NTUEXCPFIFO_st ntuexcfifo; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00700]; +/* --------------------------------------------------------- */ + struct NTUATTRFIFO_st ntuattrfifo; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00700]; +/* --------------------------------------------------------- */ + struct NTUERRFIFO_st ntupcuerrgw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00300]; +/* --------------------------------------------------------- */ + struct NTU_debug_fsm_st ntu_debug_fsm; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x08b00]; +/* --------------------------------------------------------- */ +}; + +/* DDR Memory Interface Unit (DMU) */ + +struct DMU_st { /* Little Endian */ + struct DMUGENERALINT_st dmuigeneral; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00080]; +/* --------------------------------------------------------- */ + struct DMU_BIST_st dmu_bist; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00280]; +/* --------------------------------------------------------- */ + struct DMU_CAUSEREG_st dmucause0; /* bit 0 - Data Integrity Fifo is not empty. + bit 1 - Data Integrity Fifo had overflow. + bit 2 - Calibration drift indecation. + bit 3 - NSB Rx port 0 error. + bit 4 - NSB Rx port 1 error. + bit 5 - Crslave macro error. + */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00700]; +/* --------------------------------------------------------- */ + struct DMURXCONFIGINT_st dmurx0i; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x001e0]; +/* --------------------------------------------------------- */ + struct DMURXCONFIGINT_st dmurx1i; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x001e0]; +/* --------------------------------------------------------- */ + struct DMUERRORFIFO_st dmuerrfifo; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x001a0]; +/* --------------------------------------------------------- */ + struct DMUDEBUGINT_st dmudebugint; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00180]; +/* --------------------------------------------------------- */ + struct DIMMBarMask_st dmudimm0i; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00180]; +/* --------------------------------------------------------- */ + struct DIMMBarMask_st dmudimm1i; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00180]; +/* --------------------------------------------------------- */ + struct DIMMBarMask_st dmudimm2i; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00180]; +/* --------------------------------------------------------- */ + struct DIMMBarMask_st dmudimm3i; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x02180]; +/* --------------------------------------------------------- */ + struct DMUGENERALEXT_st dmudgeneral; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00380]; +/* --------------------------------------------------------- */ + struct DMUSTATISTIC_st dmustatistics; +/* --------------------------------------------------------- */ + struct DMURXCONFIGEXT_st dmurx0d; /* rx port configuration 1 register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x001e0]; +/* --------------------------------------------------------- */ + struct DMURXCONFIGEXT_st dmurx1d; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00060]; +/* --------------------------------------------------------- */ + struct dmustatus_st dmustatus; /* the status registers of the dmu */ +/* --------------------------------------------------------- */ + struct DMUDEBUGEXT_st dmudebugext; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved14[0x00080]; +/* --------------------------------------------------------- */ + struct DIMMConfig_st dmudimm0d; +/* --------------------------------------------------------- */ + struct DIMMConfig_st dmudimm1d; +/* --------------------------------------------------------- */ + struct DIMMConfig_st dmudimm2d; +/* --------------------------------------------------------- */ + struct DIMMConfig_st dmudimm3d; +/* --------------------------------------------------------- */ + struct dmu_chk_dimm_st dmu_chk_dimm; +/* --------------------------------------------------------- */ +}; + +/* PCI/PCIX Interface Unit (PCU) */ + +struct PCU_st { /* Little Endian */ + struct PCUGENERALEXT_st pcu_general; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x000e0]; +/* --------------------------------------------------------- */ + struct pcu_tx_st pcutx; +/* --------------------------------------------------------- */ + struct pcu_prefetch_st pcu_prefetch; +/* --------------------------------------------------------- */ + struct pcu_debug_st pcu_debug; /* includes internal state too. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00060]; +/* --------------------------------------------------------- */ + struct pci_arbiter_st pci_arbiter; +/* --------------------------------------------------------- */ + struct conf_header_registers_st cfg_header_regs; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00a00]; +/* --------------------------------------------------------- */ + struct Nswitch_address_deocder_st pcu_address_decoder; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00400]; +/* --------------------------------------------------------- */ + struct cmd_out_st cmd_out; +/* --------------------------------------------------------- */ + struct PCUSWCYCLES_st pcuswcycles; +/* --------------------------------------------------------- */ + struct PCUCONFIGCYCLES_st pcuconfigcycles; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00380]; +/* --------------------------------------------------------- */ + struct pcu_err_log_st pcu_err_log; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00100]; +/* --------------------------------------------------------- */ + struct pcu_init_st pcu_init; /* PCU init control register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00180]; +/* --------------------------------------------------------- */ + struct pcu_monitor_st pcu_monitor; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00b00]; +/* --------------------------------------------------------- */ + struct CAUSEREG_st pcucause; /* Cause bits: + 31 - reserved + 30 - pcix_enable - Don't use this cause bit. PCIX mode bit can be found at: /pcu0/pcu_init/pcix_en. It should not be a cause bit. + 29 - Overflow in command fifo to NSW + 28 - Overflow in data fifo to NSW + 27 - Overflow in posted lane credits to HCA + 26 - Overflow in non-posted lane credits to HCA + 25 - Overflow in credits of responses HCA + 24 - Changed in A-1. Now it is inb_pw_perr_hca_dmu (bug 6424) - data parity error has occurred during posted write transaction targeted to HCA or DMU. + 23 - Changed in A-1. Now it is bug6010_posted_flush_cause (bug 6010) - outbound posted write was flushed due to target abort or master abort. + 22 - Changed in A-1. Now it is bug5128_commit_discard_cause - discard timer expiration (relevant in PCI mode only). + 21 - Changed in A-1. Now it is tran_buff_discard_entry (bug 5276 in A-1) - in + dicates that split completion timeout expired and caused an entry in tranbuff to be discarded. + 20 - Unexpected split completion arrived at the swcycles gate-way. + 19 - Don't use this cause bit. 64-bit supported bit can be found elsewhere in crspace. It should not be a cause bit. + 18 - Received-Master-Abort status bit from PCI/X master. + 17 - Received-Target-Abort status bit from PCI/X master. + 16 - Master-Detected-Parity-Error status bit from PCI/X master + 15 - Parity-Error-Detected status bit from PCI/X master + 14 - Parity-Error-Detected status bit from PCI/X target + 13 - Signalled-SERR# status bit + 12 - Signalled-Target-Abort from PCI/X target + 11 - Received-SERR# status bit + 10 - Received-Target-Abort status bit from PCI/X target + 9 - Received-Master-Abort status bit from PCI/X target + 8 - Master-Detected-Parity-Error status bit from PCI/X target + 7 - Attribute parity error detected by PCI/X target + 6 - Master-abort occurred on secondary bus (NSW) during inbound posted write + 5 - Master-abort occurred on secondary bus (NSW) during inbound non-posted transaction + 4 - Target-abort occurred on secondary bus (NSW) during inbound non-posted transaction, i.e. a NACK was received from HCA/DMU/NTU. + 3 - Split completion was discarded by non-posted inbound engines, due to target abort or master abort on PCIX bus + 2 - pcr_rd_sc_discard - Don't use this bit. + 1 - Unexpected split completion arrived + 0 - Split completion message error arrived + + + + */ +/* --------------------------------------------------------- */ + struct CAUSEREG_st pcu_pcix_cause; /* Cause Bits: + 31:10 - reserved + 9 - Overflow in posted lane sync-fifo from HCA + 8 - Overflow in non-posted lane sync-fifo from HCA + 7 - Overflow in response sync-fifo from HCA + 6 - Overflow in response sync-fifo from DMU + 5 - Overflow in response sync-fifo from NTU + 4 - Not used. Always '0. + 3 - cfgerr: err output of crslavet // Stuck on cr-space access + 2:0 - reserved + --------- + PCI/X oriented cause bits: + PCI (Primary) status register: + 0 - master_data_perr (24) + 1 - signaled_target_abort (27) + 2 - recieved_target_abort (28) + 3 - recieved_master_abort (29) + 4 - signaled_serr (30) + 5 - detected_perr (31) + Bridge Secondary status register + 9 - split_comp_discarded + 10 - unexpected_split_comp + 11 - split completion overrun + 12 - split completion delayed + Bridge Status Register + - Primary Split completion discarded + - Primary Unexpected split completion + - Primary split completion overrun + - Primary split completion delayed + - discard timer expired (inbound, np, pci only) + */ +/* --------------------------------------------------------- */ + struct PCUGENERALINT_st pcu_general_i1; +/* --------------------------------------------------------- */ + struct PCU_BIST_st pcu_bist; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x03c80]; +/* --------------------------------------------------------- */ +}; + +/* Miscellaneous Control Registers */ + +struct misc_st { /* Little Endian */ + struct HCA_st HCA; +/* --------------------------------------------------------- */ + struct init_and_ctrl_st INTCTL; /* Initialization and Control Register */ +/* --------------------------------------------------------- */ + struct BIST_result_st BISTR; /* BIST Result Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t TCLK[0x00020]; /* Time Clock Register */ +/* --------------------------------------------------------- */ + struct IB_port_clock_timer_st IBPCLKT;/* IB Port Clock Timer Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t TCLK2[0x00020]; /* Time Clock Register */ +/* --------------------------------------------------------- */ + struct IB_BIST_result_st BISTR2; /* IB Bist result register */ +/* --------------------------------------------------------- */ + struct prog_timers_st PRGT; /* Programmable Timers Registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x000e0]; +/* --------------------------------------------------------- */ + struct GPIO_st GPIO; /* GPIO Registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00100]; +/* --------------------------------------------------------- */ + struct encoded_intr_ctlr_regs_st EICTLR;/* Encoded Interrupt Controller Registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00060]; +/* --------------------------------------------------------- */ + struct serdes_general_cont_st SERGCNTRL; +/* --------------------------------------------------------- */ + struct serdes_rsu_st RSU; +/* --------------------------------------------------------- */ + pseudo_bit_t GMON[0x00020]; /* General Monitor Register */ +/* --------------------------------------------------------- */ + struct system_monitoring_ctrl_st SMONCTL;/* System Monitoring Control Registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t CFGVER[0x00020]; +/* --------------------------------------------------------- */ + struct par_debug_p_st ParallelDebugPort;/* Parallel Debug port */ +/* --------------------------------------------------------- */ + struct parallel_CPU_port_st PCPUP; /* Parallel CPU Port Register */ +/* --------------------------------------------------------- */ + struct serial_port_slave_st SPS; /* Serial Port Slave Registers */ +/* --------------------------------------------------------- */ + struct IBML_slave_st IBMLS; /* IB-ML Slave Registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x000c0]; +/* --------------------------------------------------------- */ + struct serial_port_master_st SPM; /* Serial Port Master Registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00080]; +/* --------------------------------------------------------- */ + struct flash_memory_st FLASH; +/* --------------------------------------------------------- */ + struct CONSCAUSE_st consolidatedcause; +/* --------------------------------------------------------- */ + struct cs_brk_point_st CSBRKP; +/* --------------------------------------------------------- */ + pseudo_bit_t CONFBUSTO[0x00020]; /* Configuration bus timeou register */ +/* --------------------------------------------------------- */ + struct miscellaneous_cause_register_st miscellaneous_cause; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00440]; +/* --------------------------------------------------------- */ + struct cs_semaphores_st SEMAPHOR; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00800]; +/* --------------------------------------------------------- */ + struct BIST_DEBUG_st bist_debug; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x0d580]; +/* --------------------------------------------------------- */ +}; + +/* Local Outstanding Requests Data Base (LDB) */ + +struct LDB_st { /* Little Endian */ + pseudo_bit_t ldb_qpc_token[0x00008];/* token to qpc - 8 msb bits: (slice number, ldb, 0) */ + pseudo_bit_t reserved0[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00018]; + pseudo_bit_t gp_cfg[0x00008]; /* General Purpose Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00040]; +/* --------------------------------------------------------- */ + struct ldb_debug_st ldb_debug; /* debug registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t qpc_credits[0x00005]; /* credits between LDB and SQPC */ + pseudo_bit_t reserved3[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t erp_stop[0x00002]; /* Stop LDB - LDB will work only with gateway. See LDB mas for details. */ + pseudo_bit_t reserved4[0x0001e]; +/* --------------------------------------------------------- */ + pseudo_bit_t ldb_credits[0x0000c]; /* credits of LDB. from 0 to the size of LinkList. */ + pseudo_bit_t reserved5[0x00014]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00060]; +/* --------------------------------------------------------- */ + struct LDB_BIST_st ldb_bist; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00180]; +/* --------------------------------------------------------- */ + struct LDBGW_st ldbgw; /* LDB Gateway */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00300]; +/* --------------------------------------------------------- */ + struct LDB_CAUSEREG_st ldbcausereg; /* Cause Bits: + 31:5 - reserved + 4 - crslave_err + 3 - enqueue when linklist full + 2 - dequeue when linklist empty + 1 - dequeue wrong number + 0 - parity error */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x07700]; +/* --------------------------------------------------------- */ +}; + +/* Queue Pairs and Completion Queues Context (QPC) */ + +struct QPC_st { /* Little Endian */ + struct IRisc_st irisc; /* Interrupt Routing: + ---------------------------- + 0 missaligned access + 1 code bp + 2 data bp + 3 trap + + 4 ipc + 5 ccm + 6 dcm + 7 pcnt0 + 8 pcnt1 + + 9 sqpm - Send QPC cache miss + 10 rqpm - Receive QPC cache miss + 11 cqm - Completion Context cache miss + 12 cqdb - CQ doorbell (FIFO not empty) + 13 nsigw - nsi gw busy bit released + 14 tmot - QPC timeout on a QP + 17 perf0 - performance counter on SQPC + 18 perf1 - performance counter on RQPC + 19 perf2 - performance counter on RQPC + 22 cons - consolidated cause register from YU + 27 i1ldb_gpint0 - general purpose int pin from LDB + 28 i1ldb_gpint1 - general purpose int pin from LDB + 29 i1ldb_gpint2 - general purpose int pin from LDB + 30 i1ldb_gpint3 - general purpose int pin from LDB + + 31 timer */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00180]; +/* --------------------------------------------------------- */ + struct CQDBFIFO_st cqdbfifo; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00240]; +/* --------------------------------------------------------- */ + struct QPC_CAUSEREG_st qpccause; /* Cause Bits: + 31:8 - reserved + 7 - CRSlave timeout + 6 - Hit in more than one way in CQC + 5 - CQC Parity Error + 4 - Hit in more than one way in RQPC + 3 - RQPC Parity Error + 2 - Hit in more than one way in SQPC + 1 - SQPC Parity Error + 0 - IRISC Error */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00300]; +/* --------------------------------------------------------- */ + struct CACHEPERF_st sqpcperf; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00180]; +/* --------------------------------------------------------- */ + struct CACHEPERF_st rqpcperf; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00180]; +/* --------------------------------------------------------- */ + struct CACHEPERF_st cqcperf; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00380]; +/* --------------------------------------------------------- */ + struct QCGWS_st recieve_gws; /* Receive (Responder Flow) Context Gateways */ +/* --------------------------------------------------------- */ + struct QCGWS_st comp_gws; /* Completion Queues Context Gateways */ +/* --------------------------------------------------------- */ + struct QCGWS_st send_gws; +/* --------------------------------------------------------- */ + struct QPCTIMERS_st send_timers; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00300]; +/* --------------------------------------------------------- */ + struct SEND_FSM_st send_fsm; +/* --------------------------------------------------------- */ + struct RECEIVE_FSM_st receive_fsm; +/* --------------------------------------------------------- */ + struct COMP_FSM_st comp_fsm; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x003a0]; +/* --------------------------------------------------------- */ + struct QPCNSIGW_st qpc_nsigw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00800]; +/* --------------------------------------------------------- */ + struct QPCBASEADDR_st qpcbaseaddr; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00200]; +/* --------------------------------------------------------- */ + struct QPCGRLCFG_st qpcgrlcfg; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00380]; +/* --------------------------------------------------------- */ + struct QPC_BIST_st qpc_bist; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved11[0x00480]; +/* --------------------------------------------------------- */ + struct QPCDIRTY_st qpc_dirty; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x00b40]; +/* --------------------------------------------------------- */ +}; + +/* HCA North Switch Interface */ + +struct NSI_st { /* Little Endian */ + pseudo_bit_t vl_arb_ap[0x00001]; /* This bit conifigures the Virtual Lanes arbiter to give absolute prairety to to some VL. If not set round roubin is done between the VLs.If set the next bit select which VL will have the Absoulte praiurety. + */ + pseudo_bit_t which_vl_has_ap[0x00001];/* This bit configured the NSI Virtual Lane arbitrtion Mechnizime to give Absoult praiorety to one of the VLs. + Default Value gives prauierty to VL 1 , the control VL. + If for some reasen this will change - VL 1 will be the data VL - only then this default value need to be changed + This feture ( Absolute praierty ) has an eanble bit ( vl_arb_ap ) */ + pseudo_bit_t disable_saf4c[0x00001];/* When set this bit diables the NSI store & forward for Nswitch cell in the send side - I have reasons to belive it does not work !! */ + pseudo_bit_t host_is_littel_endian[0x00001];/* When set the Atomic oparation FSMs in the NSI think that the host is little endian. + This bit has an other instance in the RDE - they must be configured to same value !!!! */ + pseudo_bit_t one_tx[0x00001]; /* In the NSI upstream configuration this bit conigures the number of TXs that the NSI will use. + When set to one life is easy but preormance suckes. + When set to zero - 2 TXs are used - other registers in the NSI need to be conigured in a specil way. + Those registers are cfg4nmxvl_0 & cfg4nmxl_1, theri description explanies how they should be configured. + */ + pseudo_bit_t cr_vl[0x00001]; /* CR space read response will go one this VL */ + pseudo_bit_t reserved0[0x0000a]; + pseudo_bit_t dbarb_ap[0x00003]; /* Doorbell arbiter abosulte praierty configuration register. + This arbiter arbitrates between the NTU , PCU. + Values : + 3'b001 : Round robin. + 3'b010 : NTU has absulte prairty. + 3'b100 : PCU has absulte pariorety. + Dont try any hting else */ + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t rx_ap_cfg[0x00003]; /* This register configures the arbiter of the responses from the NTU,PCU & DMU. + One agent will always get Absolet prairety. + Values : + 3'b001 : PCU has absolute pairety. + 3'b010 : DMU has absolute pairety. + 3'b100 : NTU has absolute pairety. */ + pseudo_bit_t reserved2[0x00007]; +/* --------------------------------------------------------- */ + struct nsi_nonp_cfg_st nsi_nonp_cfg;/* This register configurates all kind of values that are needed in order to return read responses to read requests to DoorBell pages. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t status_0[0x00020]; /* Debug feature */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_1[0x00020]; /* Debug feature */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_2[0x00020]; /* Debug feature */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_3[0x00020]; /* Debug feature */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_4[0x00020]; /* Debug feature */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_5[0x00020]; /* Debug feature */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_6[0x00020]; /* Debug feature */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_7[0x00020]; /* Debug feature */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00020]; +/* --------------------------------------------------------- */ + struct mem_bar_st crsapce; /* CR space base address & size - Those registers need to be configured with the CR address in the Tavor address space. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00040]; +/* --------------------------------------------------------- */ + struct CAUSEREG_st nsicause; /* only the fist two registers are valid : + Bit 0 = CR space timeout accured. + Bit 1 = Virtual CR space access. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x00200]; +/* --------------------------------------------------------- */ + struct ns_creds_dmu_st dmu_creds; /* DMU creds for posted , nonposted & responses. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x00020]; +/* --------------------------------------------------------- */ + struct ns_creds_pcu_st pcu_creds; /* PCU creds for posted , nonposted & responses. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x00160]; +/* --------------------------------------------------------- */ + struct ns_creds_ntu_st ntu_creds; /* NTU creds for posted , nonposted & responses. */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x00060]; +/* --------------------------------------------------------- */ + pseudo_bit_t ex1dbcred[0x00008]; /* This register hold the number of credits ( credit if for one line of 16 bytes ) form the NSI to EXE Unit 1. + HardWare changes this register all the time. + If because of bugs this register value will need to be changed it should be in a state before HW had consumed any credits ( Before the first DoorBell ) + CR space access has praiorety one HW accesss. */ + pseudo_bit_t reserved10[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t ex2dbcred[0x00008]; /* This register hold the number of credits ( credit if for one line of 16 bytes ) form the NSI to EXE Unit 2. + HardWare changes this register all the time. + If because of bugs this register value will need to be changed it should be in a state before HW had consumed any credits ( Before the first DoorBell ) + CR space access has praiorety one HW accesss. */ + pseudo_bit_t reserved11[0x00018]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved12[0x00140]; +/* --------------------------------------------------------- */ + struct ns_vl_hexcreds_st ns_vl_hexcreds_0;/* Those register hold the credits ( one credit - 128 bit ) to the NSI fifos in the send side. + Defalt value are OK. + HardWare change those register all the time. + If because of a bug those registers need to be changed it should be done before some one accessed NSI !!! */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved13[0x00120]; +/* --------------------------------------------------------- */ + struct ns_vl_hexcreds_st ns_vl_hexcreds_1;/* Those register hold the credits ( one credit - 128 bit ) to the NSI fifos in the send side. + Defalt value are OK. + HardWare change those register all the time. + If because of a bug those registers need to be changed it should be done before some one accessed NSI !!! */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved14[0x000a0]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00018]; + pseudo_bit_t gp_cfg[0x00008]; /* General Purpose Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved16[0x000e0]; +/* --------------------------------------------------------- */ + struct nsi_bist_st nsi_bist; +/* --------------------------------------------------------- */ + struct CRGW_st crgw; /* Virtual CR space access mechanizem. + */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved17[0x00140]; +/* --------------------------------------------------------- */ +}; + +/* Translation and Protection Table (TPT) */ + +struct TPT_st { /* Little Endian */ + struct IRisc_st irisc; /* Interrupt Routing: + ---------------------------- + 0 missaligned access + 1 code bp + 2 data bp + 3 trap + + 4 ipc + 5 ccm + 6 dcm + 7 pcnt0 + 8 pcnt1 + + 9 tptm - Region / Translation cache miss + 10 tptfl - RDE, SDE, EXE, CE "finished tpt translation requests and all nsi requests from these translations" - and logic of these 4 units inputs + 11 bind - Bind request (Bind FIFO not empty) + 12 nsigw - nsi gw busy bit released + 13 i1pc_gpint0 - general purpose register from PCU + 14 i1pc_gpint1 - general purpose register from PCU + 15 i1pc_gpint2 - general purpose register from PCU + 16 i1pc_gpint3 - system error from PCU + 17 i1cp_cfgint - configuration cycle (See also NTU cause register) + 18 pcuerp - PCU has received a transaction from N-switch with ERP bit and need to be serviced + 19 swgcle - from PCU - SW cycle generator busy bit released (trasn. To PCI) + 21 cmdif - Command interface + 22 cons - YU Consolidated cause register, e.g. gpio, virtual crspace. + 23 vc_pci - Virtual CrSpace. Comes from the NSI + 24 vc_i2c - Virtual CrSpace. Comes from YU + 25 vc_cpu - Virtual CrSpace. Comes from YU + 26 perf0 - performance counter for TPT cache + 27 i1dm_clbdrft_int- calibration + 28 i1dm_gpint1 - general purpose register from DMU + 29 i1dm_gpint2 - general purpose register from DMU + 30 i1dm_gpint3 - general purpose register from DMU + + 31 timer */ +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_cfg_tptenabled[0x00001];/* TPT modes : + 1 - TPT Defualt mode - all TPT functnalty is done. + 0 - TPT Disabled mode - TPT does only : + Nswitch address decoding ( Calcolating nsq bits ) + Page size crossing logic + It does not do Virtual to Phsical address transaltion & does not check all the access rightes. */ + pseudo_bit_t tpt_cfg_len_msb_ptr[0x00001];/* MAS 3.3.2 + When set the high 32 bits of the region lenght ignored by hardware. In that case those bits can be used for other perpuse ( e.g write your name - if you can write it in hex , or write the pointer to the page table of this region ) */ + pseudo_bit_t reserved0[0x00006]; + pseudo_bit_t tpt_cfg_xlcache_conf[0x00004];/* When accessing the transaltion cache one need to select the index & tag bits from this vector + {Virtual address[63:12] , Lkey[31:0] } This register selects which values go into the tag & which go into the index. + always @ ( tpt_cfg_xlcache_conf or req_va_page_align or srw_entry_lkey) + case(tpt_cfg_xlcache_conf) + 4'h0 : begin + xl_cache_index_pre = { srw_entry_lkey[ 7:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 0],srw_entry_lkey[31:8]}; + end + 4'h1 : begin + xl_cache_index_pre = {req_va_page_align[ 0],srw_entry_lkey[ 6:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 1],srw_entry_lkey[31:7]}; + end + 4'h2 : begin + xl_cache_index_pre = {req_va_page_align[ 1: 0],srw_entry_lkey[ 5:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 2],srw_entry_lkey[31:6]}; + end + 4'h3 : begin + xl_cache_index_pre = {req_va_page_align[ 2: 0],srw_entry_lkey[ 4:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 3],srw_entry_lkey[31:5]}; + end + 4'h4 : begin + xl_cache_index_pre = {req_va_page_align[ 3: 0],srw_entry_lkey[ 3:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 4],srw_entry_lkey[31:4]}; + end + 4'h5 : begin + xl_cache_index_pre = {req_va_page_align[ 4: 0],srw_entry_lkey[ 2:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 5],srw_entry_lkey[31:3]}; + end + 4'h6 : begin + xl_cache_index_pre = {req_va_page_align[ 5: 0],srw_entry_lkey[ 1:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 6],srw_entry_lkey[31:2]}; + end + 4'h7 : begin + xl_cache_index_pre = {req_va_page_align[ 6: 0],srw_entry_lkey[ 0]}; + xl_cache_tag_pre = {req_va_page_align[51: 7],srw_entry_lkey[31:1]}; + end + 4'h8 : begin + xl_cache_index_pre = {req_va_page_align[ 7: 0] }; + xl_cache_tag_pre = {req_va_page_align[51: 8],srw_entry_lkey[31:0]}; + end + // For pages bigger then 4k: + 4'h9 : begin + xl_cache_index_pre = {req_va_page_align[ 4] ,srw_entry_lkey[ 6:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 5],req_va_page_align[3:0],srw_entry_lkey[31:7]}; + end + 4'ha : begin + xl_cache_index_pre = {req_va_page_align[ 5: 4] ,srw_entry_lkey[ 5:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 6],req_va_page_align[3:0],srw_entry_lkey[31:6]}; + end + 4'hb : begin + xl_cache_index_pre = {req_va_page_align[ 6: 4] ,srw_entry_lkey[ 4:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 7],req_va_page_align[3:0],srw_entry_lkey[31:5]}; + end + 4'hc : begin + xl_cache_index_pre = {req_va_page_align[ 7: 4] ,srw_entry_lkey[ 3:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 8],req_va_page_align[3:0],srw_entry_lkey[31:4]}; + end + 4'hd : begin + xl_cache_index_pre = {req_va_page_align[ 8: 4] ,srw_entry_lkey[ 2:0]}; + xl_cache_tag_pre = {req_va_page_align[51: 9],req_va_page_align[3:0],srw_entry_lkey[31:3]}; + end + 4'he : begin + xl_cache_index_pre = {req_va_page_align[ 9: 4] ,srw_entry_lkey[ 1:0]}; + xl_cache_tag_pre = {req_va_page_align[51:10],req_va_page_align[3:0],srw_entry_lkey[31:2]}; + end + 4'hf : begin + xl_cache_index_pre = {req_va_page_align[10: 4] ,srw_entry_lkey[ 0]}; + xl_cache_tag_pre = {req_va_page_align[51:11],req_va_page_align[3:0],srw_entry_lkey[31:1]}; + end */ + pseudo_bit_t reserved1[0x00004]; + pseudo_bit_t nsb_page_size[0x00004];/* page_size encoding + 0000 - reserved + 0001 - reserved + 0010 - 4K (default) + 0011 - 8K + 0100 - 16K + 0101 - 32K + 0110 - 64K + Others - reserved + */ + pseudo_bit_t reserved2[0x0000c]; +/* --------------------------------------------------------- */ + pseudo_bit_t tpt_cfg_disable_cross_page_bndry[0x00001];/* When set TPTdoes not check page boundery cross. */ + pseudo_bit_t tpt_cfg_disable_pd_check[0x00001];/* When set TPT does not check protection domain */ + pseudo_bit_t tpt_cfg_disable_ar_check[0x00001];/* When set TPT does not check Access righets */ + pseudo_bit_t tpt_cfg_disable_len_check[0x00001];/* When set TPT does not check lenght of access */ + pseudo_bit_t reserved3[0x00004]; + pseudo_bit_t tpt_cfg_int_cross_page_bndry[0x00001];/* When set TPT interrupts on Page cross vaiolation. */ + pseudo_bit_t tpt_cfg_int_pd[0x00001];/* When set TPT interrupts on Protection domain vaiolation. */ + pseudo_bit_t tpt_cfg_int_ar[0x00001];/* When set TPT interrupts on Access rightes vaiolation. */ + pseudo_bit_t tpt_cfg_int_len[0x00001];/* When set TPT interrupts on Access lenght vaiolation. */ + pseudo_bit_t reserved4[0x0000c]; + pseudo_bit_t gp_cfg[0x00008]; /* General Purpose Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x001c0]; +/* --------------------------------------------------------- */ + pseudo_bit_t rwt_base_add_63_32[0x00020];/* Base Address of Region & WIndow table */ +/* --------------------------------------------------------- */ + pseudo_bit_t rwt_base_add_31_0[0x00020];/* TPT MAS 3.3.1 */ +/* --------------------------------------------------------- */ + pseudo_bit_t rw_cache_enable[0x00001];/* Region & WIndow cache enable */ + pseudo_bit_t reserved6[0x00007]; + pseudo_bit_t rw_entry_size[0x00004];/* Log of Region & Window entry size. */ + pseudo_bit_t reserved7[0x00004]; + pseudo_bit_t rw_entry_stride[0x00005];/* Log of Region & Window entry stride. */ + pseudo_bit_t reserved8[0x00003]; + pseudo_bit_t rwt_lenght[0x00005]; /* Log of Region & Window table entries. */ + pseudo_bit_t reserved9[0x00003]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved10[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t translation_table_msbs[0x00020];/* Translation table base address is a concatnation of those bit & the bits in the Region enrty. */ +/* --------------------------------------------------------- */ + pseudo_bit_t xl_cache_enable[0x00001];/* Translation cache enable. */ + pseudo_bit_t reserved11[0x00007]; + pseudo_bit_t xl_entry_size[0x00004];/* This register has a BUG !!!!!!! + The Log of Transltion entry size need to be writen to bits[3:1] + Bit[0] is ignored by HW !!!!!! + HW looks on bits[3:1] & thinks this is the Log og the entry size. */ + pseudo_bit_t reserved12[0x00004]; + pseudo_bit_t xl_entry_stride[0x00005];/* Log of Transltion entry stride */ + pseudo_bit_t reserved13[0x0000b]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved14[0x00140]; +/* --------------------------------------------------------- */ + struct TPTGW_st tptgw; +/* --------------------------------------------------------- */ + pseudo_bit_t status_0[0x00020]; /* TPT Status and internal states - See TPT MAS section 6.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_1[0x00020]; /* TPT Status and internal states - See TPT MAS section 6.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_2[0x00020]; /* TPT Status and internal states - See TPT MAS section 6.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_3[0x00020]; /* TPT Status and internal states - See TPT MAS section 6.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_4[0x00020]; /* TPT Status and internal states - See TPT MAS section 6.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_5[0x00020]; /* TPT Status and internal states - See TPT MAS section 6.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_6[0x00020]; /* TPT Status and internal states - See TPT MAS section 6.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t status_7[0x00020]; /* TPT Status and internal states - See TPT MAS section 6.5 */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved15[0x00300]; +/* --------------------------------------------------------- */ + pseudo_bit_t miss_bp_wm[0x00005]; /* When the number of misses in the miss fifo reaches this number - Back Pressure is asserted to the TMX & no more transactions can enter the TPT. + Default value is OK. */ + pseudo_bit_t reserved16[0x0001b]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved17[0x003e0]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved18[0x00001]; + pseudo_bit_t bind_full0[0x00001]; /* Bind fifo is full */ + pseudo_bit_t bind_full1[0x00001]; /* Bind fifo full */ + pseudo_bit_t bind_empty0[0x00001]; /* Bind Fifo Empty */ + pseudo_bit_t bind_empty1[0x00001]; /* Bind Fifo Empty */ + pseudo_bit_t reserved19[0x00004]; + pseudo_bit_t rd_addr[0x00003]; /* Bind fifo read pointer */ + pseudo_bit_t reserved20[0x00001]; + pseudo_bit_t wr_addr[0x00003]; /* Bind fifo write pointer */ + pseudo_bit_t reserved21[0x00007]; + pseudo_bit_t bind_poponrd_ro[0x00001];/* Bind GW Pop On Read bit + * This bit is read only see bug 5210 */ + pseudo_bit_t bind_pop[0x00001]; /* Pop bit og Bind GW */ + pseudo_bit_t bind_poponrd[0x00001]; /* Bind GW Pop On Read bit + * This bit is write only see bug 5210 */ + pseudo_bit_t reserved22[0x00005]; + pseudo_bit_t bind_gw_locked[0x00001];/* locked bit of bind GW */ +/* --------------------------------------------------------- */ + pseudo_bit_t bind_data[0x00020]; /* Bind fifo is read from here */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved23[0x007c0]; +/* --------------------------------------------------------- */ + pseudo_bit_t bind_result[0x00004]; /* Wrting to this register tranmit the bind sttus to the EXE. + After the write the data is erased. */ + pseudo_bit_t reserved24[0x0001c]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved25[0x007e0]; +/* --------------------------------------------------------- */ + struct TPT_CAUSEREG_st tptcause; /* Cause Bits: + 31:9 - reserved + 8 - nsw_error + 7 - rw_perr + 6 - xl_perr + 5 - tpt_pipe4 (debug) + 4 - tpt_pipe3 (debug) + 3 - tpt_pipe2 (debug) + 2 - tpt_pipe1 (debug) + 1 - tpt_pipe0 (debug) + 0 - tp_crtimeout_occured */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved26[0x00100]; +/* --------------------------------------------------------- */ + struct CACHEPERF_st tptperf; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved27[0x00180]; +/* --------------------------------------------------------- */ + struct HCACMDIFACE_st hca_command_interface;/* Writing to the last word of this filed cause an interupt */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved28[0x00300]; +/* --------------------------------------------------------- */ + struct ECR_st eventcauseregister; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved29[0x04700]; +/* --------------------------------------------------------- */ + struct Nswitch_address_deocder_st TPT_address_decoder; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved30[0x01000]; +/* --------------------------------------------------------- */ + struct TPTNSIGW_st tpt_nsigw; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved31[0x01000]; +/* --------------------------------------------------------- */ + struct tpt_bist_st tpt_bist; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved32[0x03000]; +/* --------------------------------------------------------- */ + struct SCRPAD_st scratchpad; /* FW General usage */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved33[0x08000]; +/* --------------------------------------------------------- */ +}; + +/* Transport Slice */ + +struct TSlice_st { /* Little Endian */ + struct SDE_st sde; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x08000]; +/* --------------------------------------------------------- */ + struct RDE_st rde; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x08000]; +/* --------------------------------------------------------- */ + struct EXU_st exu; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x20000]; +/* --------------------------------------------------------- */ + struct TCU_st tcu; +/* --------------------------------------------------------- */ +}; + +/* InfiniBand Port */ + +struct ib_st { /* Little Endian */ + struct IB_port_info_GUID_info_st IBCPC;/* Port IB Compliant Configuration Register */ +/* --------------------------------------------------------- */ + struct VL_arbitration_table_st VLARBT;/* VL Arbitration Table */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ + struct Port_status_st PORTSR; /* Post Status Register */ +/* --------------------------------------------------------- */ + struct buffer_allocation_st BUFALO; /* Buffer Allocation Registers */ +/* --------------------------------------------------------- */ + struct ib_link_st link; /* IB Port Link Configuration Register */ +/* --------------------------------------------------------- */ + struct IB_port_PMEG_hdr_st PMEG; /* IB Port Performance Management and Event Generation Header */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x00480]; +/* --------------------------------------------------------- */ + struct Congestion_Control_Register_st Congestion_Control_Register; +/* --------------------------------------------------------- */ + struct SerDes_Configuration_and_Debug_st SerDes_configuration_and_Debug; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x00060]; +/* --------------------------------------------------------- */ + struct IB_Port_Link_Phy_Configuration_Register_st IBPPHYCON; +/* --------------------------------------------------------- */ + struct Extended_Buffer_Allocation_Register_st ExBUFA; +/* --------------------------------------------------------- */ + struct VL_mapping_st VLM; /* VL Mapping Registers */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x00080]; +/* --------------------------------------------------------- */ + struct IB1_BIST_st ib_bist; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x00020]; +/* --------------------------------------------------------- */ + struct Flow_Control_update_watchdog_register_st Flow_Ctrl_Up_WD; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x00060]; +/* --------------------------------------------------------- */ + struct Parallel_Debug_Port_Configuration_st Parallel_Debug_Port_Config; +/* --------------------------------------------------------- */ + struct IB_port_general_config_st PGC;/* IB Port General Configuration Register */ +/* --------------------------------------------------------- */ + struct PktDiscConfigMask_st PktDiscConfigMask; +/* --------------------------------------------------------- */ + struct LBCTRL_st loopbackctrl; +/* --------------------------------------------------------- */ + struct port_debug_config_st PBUGC; /* Port Debug Configuration Register */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x02000]; +/* --------------------------------------------------------- */ +}; + +/* Area reserved for backwards compatibility */ + +struct bw_compat_st { /* Little Endian */ + pseudo_bit_t reserved0[0x80000]; +/* --------------------------------------------------------- */ +}; + +/* Copy from PRM */ + +struct PRM_QPCBASEADDR_st { /* Little Endian */ + pseudo_bit_t QPC_Base_Addr_H[0x00020];/* QPC Base Address [63:32] + Table must be aligned on its size */ +/* --------------------------------------------------------- */ + pseudo_bit_t log_num_of_qp[0x00005];/* Log base 2 of number of supported QPs */ + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t QPC_Base_Addr_L[0x00019];/* QPC Base Address [31:7] + Table must be aligned on its size */ +/* --------------------------------------------------------- */ + pseudo_bit_t EEC_Base_Addr_H[0x00020];/* EEC Base Address [63:32] + Table must be aligned on its size. + Address may be set to zero if RD is not supported. */ +/* --------------------------------------------------------- */ + pseudo_bit_t log_num_of_ee[0x00005];/* Log base 2 of number of supported EEs. */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t EEC_Base_Addr_L[0x00019];/* EEC Base Address [31:7] + Table must be aligned on its size + Address may be set to zero if RD is not supported. */ +/* --------------------------------------------------------- */ + pseudo_bit_t CQC_Base_Addr_H[0x00020];/* CQC Base Address [63:32] + Table must be aligned on its size */ +/* --------------------------------------------------------- */ + pseudo_bit_t log_num_of_cq[0x00005];/* Log base 2 of number of supported CQs. */ + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t CQC_Base_Addr_L[0x0001a];/* CQC Base Address [31:6] + Table must be aligned on its size */ +/* --------------------------------------------------------- */ + pseudo_bit_t EQPC_Base_Addr_H[0x00020];/* Extended QPC Base Address [63:32] + Table has same number of entries as QPC table. + Table must be aligned to entry size. */ +/* --------------------------------------------------------- */ + pseudo_bit_t EQPC_Base_Addr_L[0x00020];/* Extended QPC Base Address [31:0] + Table has same number of entries as QPC table. + Table must be aligned to entry size. */ +/* --------------------------------------------------------- */ + pseudo_bit_t EEEC_Base_Addr_H[0x00020];/* Extended EEC Base Address [63:32] + Table has same number of entries as EEC table. + Table must be aligned to entry size. + Address may be set to zero if RD is not supported. */ +/* --------------------------------------------------------- */ + pseudo_bit_t EEEC_Base_Addr_L[0x00020];/* Extended EEC Base Address [31:0] + Table has same number of entries as EEC table. + Table must be aligned to entry size. + Address may be set to zero if RD is not supported. */ +/* --------------------------------------------------------- */ + pseudo_bit_t EQC_Base_Addr_H[0x00020];/* EQC Base Address [63:32] + Address may be set to zero if EQs are not supported. + Table must be aligned to entry size. */ +/* --------------------------------------------------------- */ + pseudo_bit_t log_num_eq[0x00004]; /* Log base 2 of number of supported EQs. + Must be 6 or less in InfiniHost. */ + pseudo_bit_t reserved3[0x00002]; + pseudo_bit_t EQC_Base_Addr_L[0x0001a];/* EQC Base Address [31:6] + Address may be set to zero if EQs are not supported. + Table must be aligned to entry size. */ +/* --------------------------------------------------------- */ + pseudo_bit_t RDB_Base_Addr_H[0x00020];/* Base address of table that holds remote read and remote atomic requests [63:32]. + Table must be aligned to RDB entry size (32 bytes). + Address may be set to zero if remote RDMA reads are not supported. + Please refer to QP and EE chapter for further explanation on RDB allocation. */ +/* --------------------------------------------------------- */ + pseudo_bit_t RDB_Base_Addr_L[0x00020];/* Base address of table that holds remote read and remote atomic requests [31:0]. + Table must be aligned to RDB entry size (32 bytes). + This field must always be zero. + Please refer to QP and EE chapter for further explanation on RDB allocation. */ +/* --------------------------------------------------------- */ +}; + +/* */ + +struct IR_SCRPAD_st { /* Little Endian */ + pseudo_bit_t word[55][0x00020]; /* instead of RESERVED 248 */ +/* --------------------------------------------------------- */ + pseudo_bit_t CHECK_STACK[0x00020]; /* Write to memory at this address means stack overflow */ +/* --------------------------------------------------------- */ + pseudo_bit_t DEBUG[8][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* Victor: buffer of 16 words */ + +struct BUFF16_st { /* Little Endian */ + pseudo_bit_t word[16][0x00020]; +/* --------------------------------------------------------- */ +}; + +/* NSwitch Agent Credits */ + +struct ns_creds_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00060]; +/* --------------------------------------------------------- */ +}; + +/* Cause Registers */ + +struct PCU_CAUSEREG_st { /* Little Endian */ + pseudo_bit_t split_comp_err[0x00001];/* Split completion message error arrived */ + pseudo_bit_t unexpected_split_comp[0x00001];/* Unexpected split completion arrived */ + pseudo_bit_t pcr_rd_sc_discard[0x00001];/* pcr_rd_sc_discard - Don't use this bit. */ + pseudo_bit_t split_comp_discarded[0x00001];/* Split completion was discarded by non-posted inbound engines, due to target abort or master abort on PCIX bus */ + pseudo_bit_t TA_secondary_bus[0x00001];/* Target-abort occurred on secondary bus (NSW) during inbound non-posted transaction, i.e. a NACK was received from HCA/DMU/NTU. */ + pseudo_bit_t MA_secondary_bus[0x00001];/* Master-abort occurred on secondary bus (NSW) during inbound non-posted transaction */ + pseudo_bit_t MA_secondary_bus_posted[0x00001];/* Master-abort occurred on secondary bus (NSW) during inbound posted write */ + pseudo_bit_t target_parity_err[0x00001];/* Attribute parity error detected by PCI/X target */ + pseudo_bit_t parity_err0[0x00001]; /* Master-Detected-Parity-Error status bit from PCI/X target */ + pseudo_bit_t rcv_MA_from_target[0x00001];/* Received-Master-Abort status bit from PCI/X target */ + pseudo_bit_t rcv_TA_from_target[0x00001];/* Received-Target-Abort status bit from PCI/X target */ + pseudo_bit_t rcv_SERR[0x00001]; /* Received-SERR# status bit */ + pseudo_bit_t signal_TAfrom_target[0x00001];/* Signalled-Target-Abort from PCI/X target */ + pseudo_bit_t parity_err1[0x00001]; /* Parity-Error-Detected status bit from PCI/X target */ + pseudo_bit_t parity_err2[0x00001]; /* Master-Detected-Parity-Error status bit from PCI/X master */ + pseudo_bit_t cause[0x00011]; +/* --------------------------------------------------------- */ + struct EXT_CAUSEREG_st extended_cause; +/* --------------------------------------------------------- */ +}; + +/* RDB Entry */ + +struct RDBENTRY_st { /* Little Endian */ + pseudo_bit_t psn[0x00018]; /* PSN of Request */ + pseudo_bit_t opcode[0x00008]; /* OpCode */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x00020]; +/* --------------------------------------------------------- */ + pseudo_bit_t replydata_63_32_[0x00020];/* Reply Data[63:32] (for Atomics) */ +/* --------------------------------------------------------- */ + pseudo_bit_t replydata_31_0_[0x00020];/* ReplyData[31:0] for Atomics */ +/* --------------------------------------------------------- */ + pseudo_bit_t len[0x00020]; /* Length - For RDMA Read */ +/* --------------------------------------------------------- */ + pseudo_bit_t rkey[0x00020]; /* R_Key */ +/* --------------------------------------------------------- */ + pseudo_bit_t va_63_32_[0x00020]; /* Virtual Address [63:32] */ +/* --------------------------------------------------------- */ + pseudo_bit_t va_31_0_[0x00020]; /* Virtual Address [31:0] */ +/* --------------------------------------------------------- */ +}; + +/* Tavor */ + +struct Tavor_st { /* Little Endian */ + struct bw_compat_st bw_compat; /* CR region saved for backwards compatibility */ +/* --------------------------------------------------------- */ + struct ib_st IB[2]; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved0[0x178000]; +/* --------------------------------------------------------- */ + struct TSlice_st ts0; /* Transport Slice 0 Configuration Space */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved1[0x180000]; +/* --------------------------------------------------------- */ + struct TPT_st tpt; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved2[0x10000]; +/* --------------------------------------------------------- */ + struct NSI_st nsi; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved3[0x0f000]; +/* --------------------------------------------------------- */ + struct QPC_st qpc; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved4[0x18000]; +/* --------------------------------------------------------- */ + struct LDB_st ldb; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved5[0x318000]; +/* --------------------------------------------------------- */ + struct misc_st MISC; /* Miscellaneous Configuration Registers Summary */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved6[0x08000]; +/* --------------------------------------------------------- */ + struct PCU_st pcu0; /* PCI/PCIX Unit Configuration Space */ +/* --------------------------------------------------------- */ + pseudo_bit_t reserved7[0x08000]; +/* --------------------------------------------------------- */ + struct DMU_st dmu0; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved8[0x10000]; +/* --------------------------------------------------------- */ + struct NTU_st ntu0; +/* --------------------------------------------------------- */ + pseudo_bit_t reserved9[0x30000]; +/* --------------------------------------------------------- */ +}; +#endif /* H_bits_tav_csp_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/version.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/version.h new file mode 100644 index 00000000..2a62c2ce --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/os_dep/win/tdriver/version.h @@ -0,0 +1,52 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _VERSION_H +#define _VERSION_H + +#define FILE_VERSION "2.0.0.0\0" +#define FV1 2 +#define FV2 0 +#define FV3 0 +#define FV4 0 +#define PV1 FV1 +#define PV2 FV2 +#define PV3 FV3 +#define PV4 FV4 +#define PRODUCT_VERSION FILE_VERSION +#define PRIVATE_BUILD "\0" +#define SPECIAL_BUILD PRIVATE_BUILD +#define FILE_DESCRIPTION "MT23108 Card Driver\0" +#define INTERNAL_NAME "Mdt.sys\0" +#define ORIGINAL_NAME INTERNAL_NAME +#define PRODUCT_NAME "Mellanox Driver Development Kit\0" +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/mlxsys/tools/mtperf.h b/branches/Ndi/hw/mt23108/vapi/mlxsys/tools/mtperf.h new file mode 100644 index 00000000..7e87fb62 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/mlxsys/tools/mtperf.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MTPERF_H +#define H_MTPERF_H + +#ifdef MTPERF + + +#include +#include + + +typedef struct { + u_int64_t time_accum; /* Total time of segment accumulated so far */ + u_int32_t samples_cntr; /* Counter for number of time the segment was sampled */ + u_int64_t start; /* Start time of current sample */ + u_int64_t sample_time; /* temporary var. to compute sample time */ + u_int64_t sample_accum; /* for supporting pause/cont */ + u_int64_t latency_limit; /* Limit of sample - to exclude context switches */ + u_int32_t exceed_cntr; /* counter for measures which exceed latency_limit */ +} MTPERF_segment_t; + +/* Define a new performance measurement segment */ +#define MTPERF_NEW_SEGMENT(segment_name,estimated_tick_latency) \ + MTPERF_segment_t MTPERF_##segment_name = {0,0,0,0,0,estimated_tick_latency*100,0} + +/* Declare a segment which was defined in another source file */ +#define MTPERF_EXTERN_SEGMENT(segment_name) extern MTPERF_segment_t MTPERF_##segment_name + +/* Start a sample of the given segment (sample current time) */ +#define MTPERF_TIME_START(segment_name) MTPERF_##segment_name.start = MOSAL_get_time_counter() + +/* Pause sample of current segment (time accumulator updated , but counter does not) */ +#define MTPERF_TIME_PAUSE(segment_name) /* not supported yet */ + +/* Continue segment time measure */ +#define MTPERF_TIME_CONT(segment_name) /* not supported yet */ + +/* End time accumulation of the segment (counter++) */ +#define MTPERF_TIME_END(segment_name) \ + MTPERF_##segment_name.sample_time = \ + (MOSAL_get_time_counter()- MTPERF_##segment_name.start) ; \ + if (MTPERF_##segment_name.sample_time > MTPERF_##segment_name.latency_limit) { \ + MTPERF_##segment_name.exceed_cntr++ ; \ + } else { /* normal sample */ \ + MTPERF_##segment_name.time_accum += MTPERF_##segment_name.sample_time; \ + MTPERF_##segment_name.samples_cntr++ ; \ + } + +/* MTPERF_TIME_END only if the given condition is met */ +#define MTPERF_TIME_END_IF(segment_name,condition) \ + if (condition) {MTPERF_TIME_END(segment_name)} + +/* Return current status */ +#define MTPERF_REPORT(segment_name,samples_cnt_p,total_ticks_p,exceed_cntr_p) \ + *samples_cnt_p= MTPERF_##segment_name.samples_cntr; \ + *total_ticks_p= MTPERF_##segment_name.time_accum; \ + *exceed_cntr_p= MTPERF_##segment_name.exceed_cntr; + +/* Output current status using printf */ + +#ifndef MT_KERNEL +#define MTPERF_REPORT_PRINTF(segment_name) \ + printf("%s segment stats: %d times in " U64_FMT_SPEC "u ticks - average= " U64_FMT_SPEC "u ticks (%d exc.).\n", \ + #segment_name,MTPERF_##segment_name.samples_cntr, \ + (u_int64_t) MTPERF_##segment_name.time_accum, \ + (u_int64_t) ((MTPERF_##segment_name.samples_cntr > 0) ? \ + MTPERF_##segment_name.time_accum/MTPERF_##segment_name.samples_cntr : 0), \ + MTPERF_##segment_name.exceed_cntr) +#else +#define MTPERF_REPORT_PRINTF(segment_name) \ + printk("%s segment stats: %d times in " U64_FMT_SPEC "u ticks - average= %d ticks (%d exc.).\n", \ + #segment_name,MTPERF_##segment_name.samples_cntr, \ + (u_int64_t) MTPERF_##segment_name.time_accum, \ + (MTPERF_##segment_name.samples_cntr > 0) ? \ + (u_int32_t)(MTPERF_##segment_name.time_accum)/MTPERF_##segment_name.samples_cntr : 0, \ + MTPERF_##segment_name.exceed_cntr) +#endif + +/* Reset counter and samples accumulator of given segment */ +#define MTPERF_RESET(segment_name) \ + MTPERF_##segment_name.samples_cntr= 0; \ + MTPERF_##segment_name.time_accum= 0; \ + MTPERF_##segment_name.exceed_cntr= 0 + +#else /* MTPERF not defined */ +/* Define empty macros */ + +#define MTPERF_NEW_SEGMENT(segment_name,estimated_tick_latency) + +#define MTPERF_EXTERN_SEGMENT(segment_name) + +#define MTPERF_TIME_START(segment_name) + +#define MTPERF_TIME_PAUSE(segment_name) + +#define MTPERF_TIME_CONT(segment_name) + +#define MTPERF_TIME_END(segment_name) + +#define MTPERF_TIME_END_IF(segment_name,condition) + +#define MTPERF_REPORT(segment_name,samples_cnt_p,total_ticks_p,exceed_cntr_p) + +#define MTPERF_REPORT_PRINTF(segment_name) + +#define MTPERF_RESET(segment_name) + + +#endif /* MTPERF */ +#endif /* #ifndef H_MTPERF_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108.h b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108.h new file mode 100644 index 00000000..d055b181 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_MT23108_H +#define H_MT23108_H + +#include +#include +#include +#include +#include +#include + +#endif diff --git a/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108_PRM.h b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108_PRM.h new file mode 100644 index 00000000..457bbdd6 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108_PRM.h @@ -0,0 +1,2506 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/*** + *** This file was generated at "Thu Apr 29 10:25:56 2004" + *** by: + *** % csp_bf -copyright=/mswg/misc/license-header.txt -prefix tavorprm_ -bits -fixnames MT23108_PRM.csp + ***/ + +#ifndef H_prefix_tavorprm_bits_fixnames_MT23108_PRM_csp_H +#define H_prefix_tavorprm_bits_fixnames_MT23108_PRM_csp_H + + +/* Send doorbell */ + +struct tavorprm_send_doorbell_st { /* Little Endian */ + pseudo_bit_t nopcode[0x00005]; /* Opcode of descriptor to be executed */ + pseudo_bit_t f[0x00001]; /* Fence bit. If set, descriptor is fenced */ + pseudo_bit_t nda[0x0001a]; /* Bits 31:6 of descriptors virtual address */ +/* -------------- */ + pseudo_bit_t nds[0x00006]; /* Next descriptor size (in 16-byte chunks) */ + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t qpn[0x00018]; /* QP number this doorbell is rung on */ +/* -------------- */ +}; + +/* Address Path */ + +struct tavorprm_address_path_st { /* Little Endian */ + pseudo_bit_t pkey_index[0x00007]; /* PKey table index */ + pseudo_bit_t reserved0[0x00011]; + pseudo_bit_t port_number[0x00002]; /* Specific port associated with this QP/EE. + 1 - Port 1 + 2 - Port 2 + other - reserved */ + pseudo_bit_t reserved1[0x00006]; +/* -------------- */ + pseudo_bit_t rlid[0x00010]; /* Remote (Destination) LID */ + pseudo_bit_t my_lid_path_bits[0x00007];/* Source LID - the lower 7 bits (upper bits are taken from PortInfo) */ + pseudo_bit_t g[0x00001]; /* Global address enable - if set, GRH will be formed for packet header */ + pseudo_bit_t reserved2[0x00005]; + pseudo_bit_t rnr_retry[0x00003]; /* RNR retry count (see C9-132 in IB spec Vol 1) + 0-6 - number of retries + 7 - infinite */ +/* -------------- */ + pseudo_bit_t hop_limit[0x00008]; /* IPv6 hop limit */ + pseudo_bit_t max_stat_rate[0x00003];/* Maximum static rate control. + 0 - 4X injection rate + 1 - 1X injection rate + other - reserved + */ + pseudo_bit_t reserved3[0x00005]; + pseudo_bit_t mgid_index[0x00006]; /* Index to port GID table */ + pseudo_bit_t reserved4[0x00005]; + pseudo_bit_t ack_timeout[0x00005]; /* Local ACK timeout - Transport timer for activation of retransmission mechanism. Refer to IB spec Vol1 9.7.6.1.3 for further details. + The transport timer is set to 4.096us*2^ack_timeout, if ack_timeout is 0 then transport timer is disabled. */ +/* -------------- */ + pseudo_bit_t flow_label[0x00014]; /* IPv6 flow label */ + pseudo_bit_t tclass[0x00008]; /* IPv6 TClass */ + pseudo_bit_t sl[0x00004]; /* InfiniBand Service Level (SL) */ +/* -------------- */ + pseudo_bit_t rgid_127_96[0x00020]; /* Remote GID[127:96] */ +/* -------------- */ + pseudo_bit_t rgid_95_64[0x00020]; /* Remote GID[95:64] */ +/* -------------- */ + pseudo_bit_t rgid_63_32[0x00020]; /* Remote GID[63:32] */ +/* -------------- */ + pseudo_bit_t rgid_31_0[0x00020]; /* Remote GID[31:0] */ +/* -------------- */ +}; + +/* HCA Command Register (HCR) */ + +struct tavorprm_hca_command_register_st { /* Little Endian */ + pseudo_bit_t in_param_h[0x00020]; /* Input Parameter: parameter[63:32] or pointer[63:32] to input mailbox (see command description) */ +/* -------------- */ + pseudo_bit_t in_param_l[0x00020]; /* Input Parameter: parameter[31:0] or pointer[31:0] to input mailbox (see command description) */ +/* -------------- */ + pseudo_bit_t input_modifier[0x00020];/* Input Parameter Modifier */ +/* -------------- */ + pseudo_bit_t out_param_h[0x00020]; /* Output Parameter: parameter[63:32] or pointer[63:32] to output mailbox (see command description) */ +/* -------------- */ + pseudo_bit_t out_param_l[0x00020]; /* Output Parameter: parameter[31:0] or pointer[31:0] to output mailbox (see command description) */ +/* -------------- */ + pseudo_bit_t reserved0[0x00010]; + pseudo_bit_t token[0x00010]; /* Software assigned token to the command, to uniquely identify it. The token is returned to the software in the EQE reported. */ +/* -------------- */ + pseudo_bit_t opcode[0x0000c]; /* Command opcode */ + pseudo_bit_t opcode_modifier[0x00004];/* Opcode Modifier, see specific description for each command. */ + pseudo_bit_t reserved1[0x00006]; + pseudo_bit_t e[0x00001]; /* Event Request + 0 - Don't report event (software will poll the GO bit) + 1 - Report event to EQ when the command completes */ + pseudo_bit_t go[0x00001]; /* Go (0=Software ownership for the HCR, 1=Hardware ownership for the HCR) + Software can write to the HCR only if Go bit is cleared. + Software must set the Go bit to trigger the HW to execute the command. Software must not write to this register value other than 1 for the Go bit. */ + pseudo_bit_t status[0x00008]; /* Command execution status report. Valid only if command interface in under SW ownership (Go bit is cleared) + 0 - command completed without error. If different than zero, command execution completed with error. Syndrom encoding is depended on command executed and is defined for each command */ +/* -------------- */ +}; + +/* EQ Doorbell */ + +struct tavorprm_eq_cmd_doorbell_st { /* Little Endian */ + pseudo_bit_t eqn[0x00006]; /* EQ accessed */ + pseudo_bit_t reserved0[0x00012]; + pseudo_bit_t eq_cmd[0x00008]; /* Command to be executed on EQ + 01 - increment Consumer_indx by one + 02 - Request notification for next event (Arm EQ) + 03 - Disarm CQ (CQ number is specified in EQ_param) + 04 - set Consumer_indx to value of EQ_param + 05 - move EQ to Always Armed state + other - reserved */ +/* -------------- */ + pseudo_bit_t eq_param[0x00020]; /* parameter to be used by EQ command */ +/* -------------- */ +}; + +/* CQ Doorbell */ + +struct tavorprm_cq_cmd_doorbell_st { /* Little Endian */ + pseudo_bit_t cqn[0x00018]; /* CQ number accessed */ + pseudo_bit_t cq_cmd[0x00008]; /* Command to be executed on CQ + 01 - increment Consumer_indx by cq_param plus 1 + 02 - Request notification for next Solicited or Unsolicited completion event. CQ_param must contain last succesfully polled consumer index. For newly generated CQs the CQ_param should contain (-1) modulu CQ size. When working with CQs with overrun detection, CQ_param can be set to 0xFFFFFFFF (HW will use the last polled index). + 03 - Request notification for next Solicited completion event CQ_param must contain last succesfully polled consumer index. For newly generated CQs the CQ_param should contain (-1) modulu CQ size. When working with CQs with overrun detection, CQ_param can be set to 0xFFFFFFFF (HW will use the last polled index). + 04 - set Consumer_indx to value of CQ_param + other - reserved */ +/* -------------- */ + pseudo_bit_t cq_param[0x00020]; /* parameter to be used by CQ command */ +/* -------------- */ +}; + +/* Receive doorbell */ + +struct tavorprm_receive_doorbell_st { /* Little Endian */ + pseudo_bit_t nds[0x00006]; /* Next descriptor size (in 16-byte chunks) + Must be zero for SRQ doorbells */ + pseudo_bit_t nda[0x0001a]; /* Bits 31:6 of descriptors virtual address */ +/* -------------- */ + pseudo_bit_t credits[0x00008]; /* Amount of credits ((length of the chain) posted with the doorbell on receive queue. Chain of up to 256 descriptors can be linked with single doorbell. Zero value in this field means 256. */ + pseudo_bit_t qpn[0x00018]; /* QP number or SRQ number this doorbell is rung on */ +/* -------------- */ +}; + +/* RD-send doorbell */ + +struct tavorprm_rd_send_doorbell_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t een[0x00018]; /* End-to-end context number (reliable datagram) + Must be zero for Nop and Bind operations */ +/* -------------- */ + pseudo_bit_t reserved1[0x00008]; + pseudo_bit_t qpn[0x00018]; /* QP number this doorbell is rung on */ +/* -------------- */ + struct tavorprm_send_doorbell_st snd_params;/* Send parameters */ +/* -------------- */ +}; + +/* Multicast Group Member QP */ + +struct tavorprm_mgmqp_st { /* Little Endian */ + pseudo_bit_t qpn_i[0x00018]; /* QPN_i: QP number which is a member in this multicast group. Valid only if Qi bit is set. Length of the QPN_i list is set in INIT_HCA */ + pseudo_bit_t reserved0[0x00007]; + pseudo_bit_t qi[0x00001]; /* Qi: QPN_i is valid */ +/* -------------- */ +}; + +/* Logical DIMM Information */ + +struct tavorprm_dimminfo_st { /* Little Endian */ + pseudo_bit_t dimmsize[0x00010]; /* Size of DIMM in units of 2^20 Bytes. This value is valid only when DIMMStatus is 0. */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t dimmstatus[0x00001]; /* DIMM Status + 0 - Enabled + 1 - Disabled + */ + pseudo_bit_t dh[0x00001]; /* When set, the DIMM is Hidden and can not be accessed from the PCI bus. */ + pseudo_bit_t wo[0x00001]; /* When set, the DIMM is write only. + If data integrity is configured (other than none), the DIMM must be + only targeted by write transactions where the address and size are multiples of 16 bytes. */ + pseudo_bit_t reserved1[0x00005]; +/* -------------- */ + pseudo_bit_t spd[0x00001]; /* 0 - DIMM SPD was read from DIMM + 1 - DIMM SPD was read from InfiniHost NVMEM */ + pseudo_bit_t sladr[0x00003]; /* SPD Slave Address 3 LSBits. + Valid only if spd bit is 0. */ + pseudo_bit_t sock_num[0x00002]; /* DIMM socket number (for double sided DIMM one of the two numbers will be reported) */ + pseudo_bit_t syn[0x00004]; /* Error syndrome (valid regardless of status value) + 0 - DIMM has no error + 1 - SPD error (e.g. checksum error, no response, error while reading) + 2 - DIMM out of bounds (e.g. DIMM rows number is not between 7 and 14, DIMM type is not 2) + 3 - DIMM conflict (e.g. mix of registered and unbuffered DIMMs, CAS latency conflict) + 5 - DIMM size trimmed due to configuration (size exceeds) + other - Error, reserved + */ + pseudo_bit_t reserved2[0x00016]; +/* -------------- */ + pseudo_bit_t vendor_id_h[0x00020]; /* JDEC Manufacturer ID[63:32] */ +/* -------------- */ + pseudo_bit_t vendor_id_l[0x00020]; /* JDEC Manufacturer ID[31:0] */ +/* -------------- */ + pseudo_bit_t dimm_start_adr_h[0x00020];/* DDR memory start address [63:32]. This value is valid only when DIMMStatus is 0. */ +/* -------------- */ + pseudo_bit_t dimm_start_adr_l[0x00020];/* DDR memory start address [31:0]. This value is valid only when DIMMStatus is 0. */ +/* -------------- */ + pseudo_bit_t reserved3[0x00040]; +/* -------------- */ +}; + +/* UAR Parameters */ + +struct tavorprm_uar_params_st { /* Little Endian */ + pseudo_bit_t uar_base_addr_h[0x00020];/* UAR Base Address [63:32] (QUERY_HCA only) */ +/* -------------- */ + pseudo_bit_t reserved0[0x00014]; + pseudo_bit_t uar_base_addr_l[0x0000c];/* UAR Base Address [31:20] (QUERY_HCA only) */ +/* -------------- */ + pseudo_bit_t uar_page_sz[0x00008]; /* This field defines the size of each UAR page. + Size of UAR Page is 4KB*2^UAR_Page_Size */ + pseudo_bit_t reserved1[0x00018]; +/* -------------- */ + pseudo_bit_t reserved2[0x00020]; +/* -------------- */ + pseudo_bit_t uar_scratch_base_addr_h[0x00020];/* Base address of UAR scratchpad [63:32]. + Number of entries in table is UAR BAR size divided by UAR Page Size. + Table must be aligned to entry size. */ +/* -------------- */ + pseudo_bit_t uar_scratch_base_addr_l[0x00020];/* Base address of UAR scratchpad [31:0]. + Number of entries in table is UAR BAR size divided by UAR Page Size. + Table must be aligned to entry size. */ +/* -------------- */ + pseudo_bit_t reserved3[0x00040]; +/* -------------- */ +}; + +/* Translation and Protection Tables Parameters */ + +struct tavorprm_tptparams_st { /* Little Endian */ + pseudo_bit_t mpt_base_adr_h[0x00020];/* MPT - Memory Protection Table base physical address [63:32]. + Entry size is 64 bytes. + Table must be aligned to its size. + Address may be set to zero if address translation and protection is not supported. */ +/* -------------- */ + pseudo_bit_t mpt_base_adr_l[0x00020];/* MPT - Memory Protection Table base physical address [31:0]. + Entry size is 64 bytes. + Table must be aligned to its size. + Address may be set to zero if address translation and protection is not supported. */ +/* -------------- */ + pseudo_bit_t log_mpt_sz[0x00006]; /* Log (base 2) of the number of region/windows entries in the MPT table. */ + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t pfto[0x00005]; /* Page Fault RNR Timeout - + The field returned in RNR Naks generated when a page fault is detected. + It has no effect when on-demand-paging is not used. */ + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t mtt_segment_size[0x00003];/* The size of MTT segment is 64*2^MTT_Segment_Size bytes */ + pseudo_bit_t reserved2[0x0000d]; +/* -------------- */ + pseudo_bit_t mtt_version[0x00008]; /* Version of MTT page walk. Must be zero */ + pseudo_bit_t reserved3[0x00018]; +/* -------------- */ + pseudo_bit_t mtt_base_addr_h[0x00020];/* MTT - Memory Translation table base physical address [63:32]. + Table must be aligned to its size. + Address may be set to zero if address translation and protection is not supported. */ +/* -------------- */ + pseudo_bit_t mtt_base_addr_l[0x00020];/* MTT - Memory Translation table base physical address [31:0]. + Table must be aligned to its size. + Address may be set to zero if address translation and protection is not supported. */ +/* -------------- */ + pseudo_bit_t reserved4[0x00040]; +/* -------------- */ +}; + +/* Multicast Support Parameters */ + +struct tavorprm_multicastparam_st { /* Little Endian */ + pseudo_bit_t mc_base_addr_h[0x00020];/* Base Address of the Multicast Table [63:32]. + The base address must be aligned to the entry size. + Address may be set to zero if multicast is not supported. */ +/* -------------- */ + pseudo_bit_t mc_base_addr_l[0x00020];/* Base Address of the Multicast Table [31:0]. + The base address must be aligned to the entry size. + Address may be set to zero if multicast is not supported. */ +/* -------------- */ + pseudo_bit_t reserved0[0x00040]; +/* -------------- */ + pseudo_bit_t log_mc_table_entry_sz[0x00010];/* Log2 of the Size of multicast group member (MGM) entry. + Must be greater than 5 (to allow CTRL and GID sections). + That implies the number of QPs per MC table entry. */ + pseudo_bit_t reserved1[0x00010]; +/* -------------- */ + pseudo_bit_t mc_table_hash_sz[0x00011];/* Number of entries in multicast DGID hash table (must be power of 2) + INIT_HCA - the required number of entries + QUERY_HCA - the actual number of entries assigned by firmware (will be less than or equal to the amount required in INIT_HCA) */ + pseudo_bit_t reserved2[0x0000f]; +/* -------------- */ + pseudo_bit_t log_mc_table_sz[0x00005];/* Log2 of the overall number of MC entries in the MCG table (includes both hash and auxiliary tables) */ + pseudo_bit_t reserved3[0x00013]; + pseudo_bit_t mc_hash_fn[0x00003]; /* Multicast hash function + 0 - Default hash function + other - reserved */ + pseudo_bit_t reserved4[0x00005]; +/* -------------- */ + pseudo_bit_t reserved5[0x00020]; +/* -------------- */ +}; + +/* Memory Access Parameters for UD Address Vector Table */ + +struct tavorprm_udavtable_memory_parameters_st { /* Little Endian */ + pseudo_bit_t l_key[0x00020]; /* L_Key used to access TPT */ +/* -------------- */ + pseudo_bit_t pd[0x00018]; /* PD used by TPT for matching against PD of region entry being accessed. */ + pseudo_bit_t reserved0[0x00005]; + pseudo_bit_t xlation_en[0x00001]; /* When cleared, address is physical address and no translation will be done. When set, address is virtual. TPT will be accessed in both cases for address decoding purposes. */ + pseudo_bit_t reserved1[0x00002]; +/* -------------- */ +}; + +/* QPC/EEC/CQC/EQC/RDB Parameters */ + +struct tavorprm_qpcbaseaddr_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00080]; +/* -------------- */ + pseudo_bit_t qpc_base_addr_h[0x00020];/* QPC Base Address [63:32] + Table must be aligned on its size */ +/* -------------- */ + pseudo_bit_t log_num_of_qp[0x00005];/* Log base 2 of number of supported QPs */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t qpc_base_addr_l[0x00019];/* QPC Base Address [31:7] + Table must be aligned on its size */ +/* -------------- */ + pseudo_bit_t reserved2[0x00040]; +/* -------------- */ + pseudo_bit_t eec_base_addr_h[0x00020];/* EEC Base Address [63:32] + Table must be aligned on its size. + Address may be set to zero if RD is not supported. */ +/* -------------- */ + pseudo_bit_t log_num_of_ee[0x00005];/* Log base 2 of number of supported EEs. */ + pseudo_bit_t reserved3[0x00002]; + pseudo_bit_t eec_base_addr_l[0x00019];/* EEC Base Address [31:7] + Table must be aligned on its size + Address may be set to zero if RD is not supported. */ +/* -------------- */ + pseudo_bit_t srqc_base_addr_h[0x00020];/* SRQ Context Base Address [63:32] + Table must be aligned on its size + Address may be set to zero if SRQ is not supported. */ +/* -------------- */ + pseudo_bit_t log_num_of_srq[0x00005];/* Log base 2 of number of supported SRQs. */ + pseudo_bit_t srqc_base_addr_l[0x0001b];/* SRQ Context Base Address [31:5] + Table must be aligned on its size + Address may be set to zero if SRQ is not supported. */ +/* -------------- */ + pseudo_bit_t cqc_base_addr_h[0x00020];/* CQC Base Address [63:32] + Table must be aligned on its size */ +/* -------------- */ + pseudo_bit_t log_num_of_cq[0x00005];/* Log base 2 of number of supported CQs. */ + pseudo_bit_t reserved4[0x00001]; + pseudo_bit_t cqc_base_addr_l[0x0001a];/* CQC Base Address [31:6] + Table must be aligned on its size */ +/* -------------- */ + pseudo_bit_t reserved5[0x00040]; +/* -------------- */ + pseudo_bit_t eqpc_base_addr_h[0x00020];/* Extended QPC Base Address [63:32] + Table has same number of entries as QPC table. + Table must be aligned to entry size. */ +/* -------------- */ + pseudo_bit_t eqpc_base_addr_l[0x00020];/* Extended QPC Base Address [31:0] + Table has same number of entries as QPC table. + Table must be aligned to entry size. */ +/* -------------- */ + pseudo_bit_t reserved6[0x00040]; +/* -------------- */ + pseudo_bit_t eeec_base_addr_h[0x00020];/* Extended EEC Base Address [63:32] + Table has same number of entries as EEC table. + Table must be aligned to entry size. + Address may be set to zero if RD is not supported. */ +/* -------------- */ + pseudo_bit_t eeec_base_addr_l[0x00020];/* Extended EEC Base Address [31:0] + Table has same number of entries as EEC table. + Table must be aligned to entry size. + Address may be set to zero if RD is not supported. */ +/* -------------- */ + pseudo_bit_t reserved7[0x00040]; +/* -------------- */ + pseudo_bit_t eqc_base_addr_h[0x00020];/* EQC Base Address [63:32] + Address may be set to zero if EQs are not supported. + Table must be aligned to entry size. */ +/* -------------- */ + pseudo_bit_t log_num_eq[0x00004]; /* Log base 2 of number of supported EQs. + Must be 6 or less in InfiniHost. */ + pseudo_bit_t reserved8[0x00002]; + pseudo_bit_t eqc_base_addr_l[0x0001a];/* EQC Base Address [31:6] + Address may be set to zero if EQs are not supported. + Table must be aligned to entry size. */ +/* -------------- */ + pseudo_bit_t reserved9[0x00040]; +/* -------------- */ + pseudo_bit_t rdb_base_addr_h[0x00020];/* Base address of table that holds remote read and remote atomic requests [63:32]. + Table must be aligned to RDB entry size (32 bytes). + Address may be set to zero if remote RDMA reads are not supported. + Please refer to QP and EE chapter for further explanation on RDB allocation. */ +/* -------------- */ + pseudo_bit_t rdb_base_addr_l[0x00020];/* Base address of table that holds remote read and remote atomic requests [31:0]. + Table must be aligned to RDB entry size (32 bytes). + This field must always be zero. + Please refer to QP and EE chapter for further explanation on RDB allocation. */ +/* -------------- */ + pseudo_bit_t reserved10[0x00040]; +/* -------------- */ +}; + +/* Performance Monitors */ + +struct tavorprm_performance_monitors_st { /* Little Endian */ + pseudo_bit_t e0[0x00001]; /* Enables counting of respective performance counter */ + pseudo_bit_t e1[0x00001]; /* Enables counting of respective performance counter */ + pseudo_bit_t e2[0x00001]; /* Enables counting of respective performance counter */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t r0[0x00001]; /* If written to as '1 - resets respective performance counter, if written to az '0 - no change to matter */ + pseudo_bit_t r1[0x00001]; /* If written to as '1 - resets respective performance counter, if written to az '0 - no change to matter */ + pseudo_bit_t r2[0x00001]; /* If written to as '1 - resets respective performance counter, if written to az '0 - no change to matter */ + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t i0[0x00001]; /* Interrupt enable on respective counter overflow. '1 - interrupt enabled, '0 - interrupt disabled. */ + pseudo_bit_t i1[0x00001]; /* Interrupt enable on respective counter overflow. '1 - interrupt enabled, '0 - interrupt disabled. */ + pseudo_bit_t i2[0x00001]; /* Interrupt enable on respective counter overflow. '1 - interrupt enabled, '0 - interrupt disabled. */ + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t f0[0x00001]; /* Overflow flag. If set, overflow occurred on respective counter. Cleared if written to as '1 */ + pseudo_bit_t f1[0x00001]; /* Overflow flag. If set, overflow occurred on respective counter. Cleared if written to as '1 */ + pseudo_bit_t f2[0x00001]; /* Overflow flag. If set, overflow occurred on respective counter. Cleared if written to as '1 */ + pseudo_bit_t reserved3[0x00001]; + pseudo_bit_t ev_cnt1[0x00005]; /* Specifies event to be counted by Event_counter1 See XXX for events' definition. */ + pseudo_bit_t reserved4[0x00003]; + pseudo_bit_t ev_cnt2[0x00005]; /* Specifies event to be counted by Event_counter2 See XXX for events' definition. */ + pseudo_bit_t reserved5[0x00003]; +/* -------------- */ + pseudo_bit_t clock_counter[0x00020]; +/* -------------- */ + pseudo_bit_t event_counter1[0x00020]; +/* -------------- */ + pseudo_bit_t event_counter2[0x00020];/* Read/write event counter, counting events specified by EvCntl and EvCnt2 fields repsectively. When the event counter reaches is maximum value of 0xFFFFFF, the next event will cause it to roll over to zero, set F1 or F2 bit respectively and generate interrupt by I1 I2 bit respectively. */ +/* -------------- */ +}; + +/* QP and EE Context Entry */ + +struct tavorprm_queue_pair_ee_context_entry_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t de[0x00001]; /* Send/Receive Descriptor Event enable - if set, events can be generated upon descriptors' completion on send/receive queue (controlled by E bit in WQE). Invalid in EE context */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t pm_state[0x00002]; /* Path migration state (Migrated, Armed or Rearm) + 11-Migrated + 00-Armed + 01-Rearm + 10-Reserved + Should be set to 11 for UD QPs and for QPs which do not support APM */ + pseudo_bit_t reserved2[0x00003]; + pseudo_bit_t st[0x00003]; /* Service type (invalid in EE context): + 000-Reliable Connection + 001-Unreliable Connection + 010-Reliable Datagram (Not supported for InfiniHost MT23108) + 011-Unreliable Datagram + 111-MLX transport (raw bits injection). Used for management QPs and RAW */ + pseudo_bit_t reserved3[0x00009]; + pseudo_bit_t state[0x00004]; /* QP/EE state: + 0 - RST + 1 - INIT + 2 - RTR + 3 - RTS + 4 - SQEr + 5 - SQD (Send Queue Drained) + 6 - ERR + 7 - Send Queue Draining + 8 - F - RESERVED + (Valid for QUERY_QPEE and ERR2RST_QPEE commands only) */ +/* -------------- */ + pseudo_bit_t sched_queue[0x00004]; /* Schedule queue to be used for WQE scheduling to execution. Determines QOS for this QP. */ + pseudo_bit_t reserved4[0x0001c]; +/* -------------- */ + pseudo_bit_t reserved5[0x00018]; + pseudo_bit_t msg_max[0x00005]; /* Max message size allowed on the QP. Maximum message size is 2^msg_Max. + Must be equal to MTU for UD and MLX QPs. */ + pseudo_bit_t mtu[0x00003]; /* MTU of the QP (Must be the same for both paths: primary and alternative): + 0x1 - 256 bytes + 0x2 - 512 + 0x3 - 1024 + 0x4 - 2048 + other - reserved + + Should be configured to 0x4 for UD and MLX QPs. */ +/* -------------- */ + pseudo_bit_t usr_page[0x00018]; /* Index (offset) of user page allocated for this QP (see "non_privileged Access to the HCA Hardware"). Not valid (reserved) in EE context. */ + pseudo_bit_t reserved6[0x00008]; +/* -------------- */ + pseudo_bit_t local_qpn_een[0x00018];/* Local QP/EE number Lower bits determine position of this record in QPC table, and - thus - constrained + This field is valid for QUERY and ERR2RST commands only. */ + pseudo_bit_t reserved7[0x00008]; +/* -------------- */ + pseudo_bit_t remote_qpn_een[0x00018];/* Remote QP/EE number */ + pseudo_bit_t reserved8[0x00008]; +/* -------------- */ + pseudo_bit_t reserved9[0x00040]; +/* -------------- */ + struct tavorprm_address_path_st primary_address_path;/* Primary address path for the QP/EE */ +/* -------------- */ + struct tavorprm_address_path_st alternative_address_path;/* Alternate address path for the QP/EE */ +/* -------------- */ + pseudo_bit_t rdd[0x00018]; /* Reliable Datagram Domain */ + pseudo_bit_t reserved10[0x00008]; +/* -------------- */ + pseudo_bit_t pd[0x00018]; /* QP protection domain. Not valid (reserved) in EE context. */ + pseudo_bit_t reserved11[0x00008]; +/* -------------- */ + pseudo_bit_t wqe_base_adr[0x00020]; /* Bits 63:32 of WQE address for both SQ and RQ. + Reserved for EE context. */ +/* -------------- */ + pseudo_bit_t wqe_lkey[0x00020]; /* memory key (L-Key) to be used to access WQEs. Not valid (reserved) in EE context. */ +/* -------------- */ + pseudo_bit_t reserved12[0x00003]; + pseudo_bit_t ssc[0x00001]; /* Send Signaled Completion + 1 - all send WQEs generate CQEs. + 0 - only send WQEs with C bit set generate completion. + Not valid (reserved) in EE context. */ + pseudo_bit_t sic[0x00001]; /* If set - Ignore end to end credits on send queue. Not valid (reserved) in EE context. */ + pseudo_bit_t cur_retry_cnt[0x00003];/* Current transport retry counter (QUERY_QPEE only). + The current transport retry counter can vary from retry_count down to 1, where 1 means that the last retry attempt is currently executing. */ + pseudo_bit_t cur_rnr_retry[0x00003];/* Current RNR retry counter (QUERY_QPEE only). + The current RNR retry counter can vary from rnr_retry to 1, where 1 means that the last retry attempt is currently executing. */ + pseudo_bit_t reserved13[0x00002]; + pseudo_bit_t sae[0x00001]; /* If set - Atomic operations enabled on send queue. Not valid (reserved) in EE context. */ + pseudo_bit_t swe[0x00001]; /* If set - RDMA - write enabled on send queue. Not valid (reserved) in EE context. */ + pseudo_bit_t sre[0x00001]; /* If set - RDMA - read enabled on send queue. Not valid (reserved) in EE context. */ + pseudo_bit_t retry_count[0x00003]; /* Transport timeout Retry count */ + pseudo_bit_t reserved14[0x00002]; + pseudo_bit_t sra_max[0x00003]; /* Maximum number of outstanding RDMA-read/Atomic operations allowed in the send queue. Maximum number is 2^SRA_Max. Must be zero in EE context. */ + pseudo_bit_t flight_lim[0x00004]; /* Number of outstanding (in-flight) messages on the wire allowed for this send queue. + Number of outstanding messages is 2^Flight_Lim. + Use 0xF for unlimited number of outstanding messages. */ + pseudo_bit_t ack_req_freq[0x00004]; /* ACK required frequency. ACK required bit will be set in every 2^AckReqFreq packets at least. Not valid for RD QP. */ +/* -------------- */ + pseudo_bit_t reserved15[0x00020]; +/* -------------- */ + pseudo_bit_t next_send_psn[0x00018];/* Next PSN to be sent */ + pseudo_bit_t reserved16[0x00008]; +/* -------------- */ + pseudo_bit_t cqn_snd[0x00018]; /* CQ number completions from the send queue to be reported to. Not valid (reserved) in EE context. */ + pseudo_bit_t reserved17[0x00008]; +/* -------------- */ + pseudo_bit_t next_snd_wqe_0[0x00020];/* Pointer and properties of next WQE on send queue. The format is same as next segment (first 8 bytes) in the WQE. This field is read-only and provided for debug purposes. Not valid (reserved) in EE context. */ +/* -------------- */ + pseudo_bit_t next_snd_wqe_1[0x00020];/* Pointer and properties of next WQE on send queue. The format is same as next segment (first 8 bytes) in the WQE. This field is read-only and provided for debug purposes. Not valid (reserved) in EE context. */ +/* -------------- */ + pseudo_bit_t last_acked_psn[0x00018];/* The last acknowledged PSN for the requester (QUERY_QPEE only) */ + pseudo_bit_t reserved18[0x00008]; +/* -------------- */ + pseudo_bit_t ssn[0x00018]; /* Requester Send Sequence Number (QUERY_QPEE only) */ + pseudo_bit_t reserved19[0x00008]; +/* -------------- */ + pseudo_bit_t reserved20[0x00003]; + pseudo_bit_t rsc[0x00001]; /* 1 - all receive WQEs generate CQEs. + 0 - only receive WQEs with C bit set generate completion. + Not valid (reserved) in EE context. + */ + pseudo_bit_t ric[0x00001]; /* Invalid Credits. + 1 - place "Invalid Credits" to ACKs sent from this queue. + 0 - ACKs report the actual number of end to end credits on the connection. + Not valid (reserved) in EE context. + Must be set to 1 on QPs which are attached to SRQ. */ + pseudo_bit_t reserved21[0x00008]; + pseudo_bit_t rae[0x00001]; /* If set - Atomic operations enabled. on receive queue. Not valid (reserved) in EE context. */ + pseudo_bit_t rwe[0x00001]; /* If set - RDMA - write enabled on receive queue. Not valid (reserved) in EE context. */ + pseudo_bit_t rre[0x00001]; /* If set - RDMA - read enabled on receive queue. Not valid (reserved) in EE context. */ + pseudo_bit_t reserved22[0x00005]; + pseudo_bit_t rra_max[0x00003]; /* Maximum number of outstanding RDMA-read/Atomic operations allowed on receive queue is 2^RRA_Max. + Must be 0 for EE context. */ + pseudo_bit_t reserved23[0x00008]; +/* -------------- */ + pseudo_bit_t next_rcv_psn[0x00018]; /* Next (expected) PSN on receive */ + pseudo_bit_t min_rnr_nak[0x00005]; /* Minimum RNR NAK timer value (TTTTT field encoding according to the IB spec Vol1 9.7.5.2.8). + Not valid (reserved) in EE context. */ + pseudo_bit_t reserved24[0x00003]; +/* -------------- */ + pseudo_bit_t reserved25[0x00005]; + pseudo_bit_t ra_buff_indx[0x0001b]; /* Index to outstanding read/atomic buffer. + This field constructs the address to the RDB for maintaining the incoming RDMA read and atomic requests. */ +/* -------------- */ + pseudo_bit_t cqn_rcv[0x00018]; /* CQ number completions from receive queue to be reported to. Not valid (reserved) in EE context. */ + pseudo_bit_t reserved26[0x00008]; +/* -------------- */ + pseudo_bit_t next_rcv_wqe_0[0x00020];/* Pointer and properties of next WQE on the receive queue. This format is same as next segment (first 8 bytes) in the WQE.This field is read-only and provided for debug purposes. Not valid (reserved) in EE context. */ +/* -------------- */ + pseudo_bit_t next_rcv_wqe_1[0x00020];/* Pointer and properties of next WQE on the receive queue. This format is same as next segment (first 8 bytes) in the WQE.This field is read-only and provided for debug purposes. Not valid (reserved) in EE context. */ +/* -------------- */ + pseudo_bit_t q_key[0x00020]; /* Q_Key to be validated against received datagrams. + On send datagrams, if Q_Key[31] specified in the WQE is set, then this Q_Key will be transmitted in the outgoing message. + Not valid (reserved) in EE context. */ +/* -------------- */ + pseudo_bit_t srqn[0x00018]; /* SRQN - Shared Receive Queue Number - specifies the SRQ number from which the QP dequeues receive descriptors. + SRQN is valid only if SRQ bit is set. Not valid (reserved) in EE context. */ + pseudo_bit_t srq[0x00001]; /* SRQ - Shared Receive Queue. If this bit is set, then the QP is associated with a SRQ. Not valid (reserved) in EE context. */ + pseudo_bit_t reserved27[0x00007]; +/* -------------- */ + pseudo_bit_t rmsn[0x00018]; /* Responder current message sequence number (QUERY_QPEE only) */ + pseudo_bit_t reserved28[0x00008]; +/* -------------- */ + pseudo_bit_t reserved29[0x00260]; +/* -------------- */ +}; + +/* MOD_STAT_CFG */ + +struct tavorprm_mod_stat_cfg_st { /* Little Endian */ + pseudo_bit_t log_max_srqs[0x00005]; /* Log (base 2) of the number of SRQs to allocate (0 if no SRQs are required), valid only if srq bit is set. */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t srq[0x00001]; /* When set SRQs are supported */ + pseudo_bit_t srq_m[0x00001]; /* Modify SRQ parameters */ + pseudo_bit_t reserved1[0x00018]; +/* -------------- */ + pseudo_bit_t tpt_map[0x00004]; + pseudo_bit_t reserved2[0x00003]; + pseudo_bit_t tpt_map_m[0x00001]; + pseudo_bit_t reserved3[0x00018]; +/* -------------- */ + pseudo_bit_t reserved4[0x007c0]; +/* -------------- */ +}; + +/* SRQ Context */ + +struct tavorprm_srq_context_st { /* Little Endian */ + pseudo_bit_t wqe_addr_h[0x00020]; /* WQE base address for the SRQ [63:32] + Must be set at SW2HW_SRQ */ +/* -------------- */ + pseudo_bit_t ds[0x00006]; /* Descriptor Size on the SRQ in units of 16 bytes */ + pseudo_bit_t next_wqe_addr_l[0x0001a];/* Next WQE address for the SRQ [31:6] + Valid only on QUERY_SRQ and HW2SW_SRQ commands. */ +/* -------------- */ + pseudo_bit_t pd[0x00018]; /* SRQ PD - used for descriptor fetching on the SRQ and for data scatter on send operations on QPs attached to SRQ. + In InfiniHost MT23108 SRQ.PD must be equal to the PD of all QPs which are attached to the SRQ */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t state[0x00004]; /* SRQ State: + 1111 - SW Ownership + 0000 - HW Ownership + 0001 - Error + Valid only on QUERY_SRQ and HW2SW_SRQ commands. */ +/* -------------- */ + pseudo_bit_t l_key[0x00020]; /* L_Key for descriptor fetching on the SRQ */ +/* -------------- */ + pseudo_bit_t uar[0x00018]; /* SRQ User Access Region - Index (offset) of user page allocated for the SRQ (see "Non Privileged Access to the HCA HW"). */ + pseudo_bit_t reserved1[0x00008]; +/* -------------- */ + pseudo_bit_t wqe_cnt[0x00010]; /* WQE count on the SRQ. + Valid only on QUERY_SRQ and HW2SW_SRQ commands. */ + pseudo_bit_t reserved2[0x00010]; +/* -------------- */ + pseudo_bit_t reserved3[0x00010]; + pseudo_bit_t reserved4[0x00010]; +/* -------------- */ + pseudo_bit_t reserved5[0x00020]; +/* -------------- */ +}; + +/* InfiniHost Configuration Registers */ + +struct tavorprm_mt23108_configuration_registers_st { /* Little Endian */ + pseudo_bit_t reserved0[0x403400]; +/* -------------- */ + struct tavorprm_hca_command_register_st hca_command_interface_register;/* HCA Command Register */ +/* -------------- */ + pseudo_bit_t reserved1[0x00320]; +/* -------------- */ + pseudo_bit_t ecr_h[0x00020]; /* Event Cause Register[63:32]. Each bit in the ECR corresponds to one of the 64 Event Queues in InfiniHost. If bit is set, interrupt was asserted due to event reported on corresponding event queue. This register is read-only; writing to this register will cause undefined results + */ +/* -------------- */ + pseudo_bit_t ecr_l[0x00020]; /* Event Cause Register[31:0]. Each bit in the ECR corresponds to one of the 64 Event Queues in InfiniHost. If bit is set, interrupt was asserted due to event reported on corresponding event queue. This register is read-only; writing to this register will cause undefined results + */ +/* -------------- */ + pseudo_bit_t clr_ecr_h[0x00020]; /* Clear Event Cause Register[63:32]. + This register is used to clear bits in ECR register. Each set bit in data written to this register clears corresponding bit in the ECR register, Each bit written with zero has no effect. This register is write-only. Reading from this register will cause undefined result + */ +/* -------------- */ + pseudo_bit_t clr_ecr_l[0x00020]; /* Clear Event Cause Register[31:0]. + This register is used to clear bits in ECR register. Each set bit in data written to this register clears corresponding bit in the ECR register, Each bit written with zero has no effect. This register is write-only. Reading from this register will cause undefined result + */ +/* -------------- */ + pseudo_bit_t reserved2[0x4c780]; +/* -------------- */ + pseudo_bit_t reserved3[0x01000]; +/* -------------- */ + pseudo_bit_t reserved4[0x32f6c0]; +/* -------------- */ + pseudo_bit_t clr_int_h[0x00020]; /* Clear Interrupt [63:32] + This register is used to clear (de-assert) interrupt output pins of InfiniHost. The value to be written in this register is obtained by executing QUERY_ADAPTER command on command interface after system boot. This register is write-only. Reading from this register will cause undefined result */ +/* -------------- */ + pseudo_bit_t clr_int_l[0x00020]; /* Clear Interrupt [31:0] + This register is used to clear (de-assert) interrupt output pins of InfiniHost. The value to be written in this register is obtained by executing QUERY_ADAPTER command on command interface after system boot. This register is write-only. Reading from this register will cause undefined result */ +/* -------------- */ + pseudo_bit_t reserved5[0x7f900]; +/* -------------- */ +}; + +/* Schedule queues configuration */ + +struct tavorprm_cfg_schq_st { /* Little Endian */ + pseudo_bit_t quota[0x00008]; /* Number of WQEs that are executed until preemption of the scheduling queue and switching to the next schedule queue */ + pseudo_bit_t reserved0[0x00018]; +/* -------------- */ + pseudo_bit_t rqsq0[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq0[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq1[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq1[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq2[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq2[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq3[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq3[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq4[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq4[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq5[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq5[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq6[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq6[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq7[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq7[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq8[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq8[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq9[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq9[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq10[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq10[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq11[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq11[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq12[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq12[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq13[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq13[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq14[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq14[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq15[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq15[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq16[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq16[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq17[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq17[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq18[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq18[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq19[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq19[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq20[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq20[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq21[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq21[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq22[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq22[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq23[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq23[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq24[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq24[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq25[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq25[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq26[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq26[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq27[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq27[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq28[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq28[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq29[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq29[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t rqsq30[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq30[0x00008]; /* Weight for responder schedule queue */ + pseudo_bit_t rqsq31[0x00008]; /* Weight for requestor schedule queue */ + pseudo_bit_t rssq31[0x00008]; /* Weight for responder schedule queue */ +/* -------------- */ + pseudo_bit_t reserved1[0x005e0]; +/* -------------- */ +}; + +/* Query BAR */ + +struct tavorprm_query_bar_st { /* Little Endian */ + pseudo_bit_t bar_base_h[0x00020]; /* BAR base [63:32] */ +/* -------------- */ + pseudo_bit_t reserved0[0x00014]; + pseudo_bit_t bar_base_l[0x0000c]; /* BAR base [31:20] */ +/* -------------- */ +}; + +/* Performance Counters */ + +struct tavorprm_performance_counters_st { /* Little Endian */ + pseudo_bit_t sqpc_access_cnt[0x00020];/* SQPC cache access count */ +/* -------------- */ + pseudo_bit_t sqpc_miss_cnt[0x00020];/* SQPC cache miss count */ +/* -------------- */ + pseudo_bit_t reserved0[0x00040]; +/* -------------- */ + pseudo_bit_t rqpc_access_cnt[0x00020];/* RQPC cache access count */ +/* -------------- */ + pseudo_bit_t rqpc_miss_cnt[0x00020];/* RQPC cache miss count */ +/* -------------- */ + pseudo_bit_t reserved1[0x00040]; +/* -------------- */ + pseudo_bit_t cqc_access_cnt[0x00020];/* CQC cache access count */ +/* -------------- */ + pseudo_bit_t cqc_miss_cnt[0x00020]; /* CQC cache miss count */ +/* -------------- */ + pseudo_bit_t reserved2[0x00040]; +/* -------------- */ + pseudo_bit_t tpt_access_cnt[0x00020];/* TPT cache access count */ +/* -------------- */ + pseudo_bit_t mpt_miss_cnt[0x00020]; /* MPT cache miss count */ +/* -------------- */ + pseudo_bit_t mtt_miss_cnt[0x00020]; /* MTT cache miss count */ +/* -------------- */ + pseudo_bit_t reserved3[0x00620]; +/* -------------- */ +}; + +/* Transport and CI Error Counters */ + +struct tavorprm_transport_and_ci_error_counters_st { /* Little Endian */ + pseudo_bit_t rq_num_lle[0x00020]; /* Responder - number of local length errors. + Local Length Errors: Inbound "Send" request message exceeded the responders available buffer space. */ +/* -------------- */ + pseudo_bit_t sq_num_lle[0x00020]; /* Requester - number of local length errors. + Length Errors: RDMA READ response message contained too much or too little payload data. */ +/* -------------- */ + pseudo_bit_t rq_num_lqpoe[0x00020]; /* Responder - number local QP operation error. + 1. Malformed WQE: Responder detected a malformed Receive Queue WQE while processing the packet. + 2. Local QP Error: Responder detected a local QP related error while executing the request message. The local error prevented the responder from completing the request. */ +/* -------------- */ + pseudo_bit_t sq_num_lqpoe[0x00020]; /* Requester - number local QP operation error + 1. Local Operation Error: (WQE gather, affiliated or unaffiliated): An error occurred in the requesters local channel interface that either cannot be associated with a certain WQE, or occurred when reading a WQE. + */ +/* -------------- */ + pseudo_bit_t rq_num_leeoe[0x00020]; /* Responder - number local EE operation error. + RD */ +/* -------------- */ + pseudo_bit_t sq_num_leeoe[0x00020]; /* Requester - number local EE operation error. + RD */ +/* -------------- */ + pseudo_bit_t rq_num_lpe[0x00020]; /* Responder - number of local protection errors. + Local QP (Protection) Error: Responder detected a local access violation error while executing a send request message. The error prevented the responder from completing the request. */ +/* -------------- */ + pseudo_bit_t sq_num_lpe[0x00020]; /* Requester - number of local protection errors. + Local Memory Protection Error: Requester detected a memory translation/protection (TPT) error. + */ +/* -------------- */ + pseudo_bit_t rq_num_wrfe[0x00020]; /* Responder - number of WR flushed errors. + Incremented each time a CQE with error is generated. */ +/* -------------- */ + pseudo_bit_t sq_num_wrfe[0x00020]; /* Requester - number of WR flushed errors. + Incremented each time a CQE with error is generated. */ +/* -------------- */ + pseudo_bit_t reserved0[0x00020]; +/* -------------- */ + pseudo_bit_t sq_num_mwbe[0x00020]; /* Requester - number of memory window bind errors. */ +/* -------------- */ + pseudo_bit_t reserved1[0x00020]; +/* -------------- */ + pseudo_bit_t sq_num_bre[0x00020]; /* Requester - number of bad response errors. + Bad response: Unexpected opcode for the response packet received at the expected response PSN. */ +/* -------------- */ + pseudo_bit_t rq_num_lae[0x00020]; /* Responder - number of local access errors. + Unused. */ +/* -------------- */ + pseudo_bit_t reserved2[0x00040]; +/* -------------- */ + pseudo_bit_t sq_num_rire[0x00020]; /* Requester - number of remote invalid request errors. + NAK-Invalid Request on: + 1. Unsupported OpCode: Responder detected an unsupported OpCode. + 2. Unexpected OpCode: Responder detected an error in the sequence of OpCodes, such as a missing "Last" packet. + Note: there is no PSN error, thus this does not indicate a dropped packet. */ +/* -------------- */ + pseudo_bit_t rq_num_rire[0x00020]; /* Responder - number of remote invalid request errors. + NAK may or may not be sent. + 1. Unsupported or Reserved OpCode: Inbound request OpCode was either reserved, or was for a function not supported by this QP. (E.G. RDMA or ATOMIC on QP not set up for this). For RC this is "QP Async affiliated". + 2. Misaligned ATOMIC: VA does not point to an aligned address on an atomic operation. + 3. Too many RDMA READ or ATOMIC Requests: There were more requests received and not ACKed than allowed for the connection. + 4. Out of Sequence OpCode, current packet is "first" or "Only": The Responder detected an error in the sequence of OpCodes; a missing "Last" packet + 5. Out of Sequence OpCode, current packet is not "first" or "Only": The Responder detected an error in the sequence of OpCodes; a missing "First" packet + 6. Local Length Error: Inbound "Send" request message exceeded the responder.s available buffer space. + 7. Length error: RDMA WRITE request message contained too much or too little payload data compared to the DMA length advertised in the first or only packet. + 8. Length error: Payload length was not consistent with the opcode: + a: 0 byte <= "only" <= PMTU bytes + b: ("first" or "middle") == PMTU bytes + c: 1byte <= "last" <= PMTU bytes + 9. Length error: Inbound message exceeded the size supported by the CA port. */ +/* -------------- */ + pseudo_bit_t sq_num_rae[0x00020]; /* Requester - number of remote access errors. + NAK-Remote Access Error on: + R_Key Violation: Responder detected an invalid R_Key while executing an RDMA Request. */ +/* -------------- */ + pseudo_bit_t rq_num_rae[0x00020]; /* Responder - number of remote access errors. + R_Key Violation Responder detected an R_Key violation while executing an RDMA request. + NAK may or may not be sent. */ +/* -------------- */ + pseudo_bit_t sq_num_roe[0x00020]; /* Requester - number of remote operation errors. + NAK-Remote Operation Error on: + Remote Operation Error: Responder encountered an error, (local to the responder), which prevented it from completing the request. */ +/* -------------- */ + pseudo_bit_t rq_num_roe[0x00020]; /* Responder - number of remote operation errors. + NAK-Remote Operation Error on: + 1. Malformed WQE: Responder detected a malformed Receive Queue WQE while processing the packet. + 2. Remote Operation Error: Responder encountered an error, (local to the responder), which prevented it from completing the request. */ +/* -------------- */ + pseudo_bit_t sq_num_tree[0x00020]; /* Requester - number of transport retries exceeded errors. + 1. Packet sequence error: Retry limit exceeded. Responder detected a PSN larger than it expected. The requestor performed retries, and automatic path migration and additional retries, if applicable, but all attempts failed. + 2. Implied NAK sequence error: Retry limit exceeded. Requestor detected an ACK with a PSN larger than the expected PSN for an RDMA READ or atomic response. The requestor performed retries, and automatic path migration and additional retries, if applicable, but all attempts failed. + 3. Local Ack Timeout error: Retry limit exceeded. No ACK response within timer interval. The requestor performed retries, and automatic path migration and additional retries, but all attempts failed. */ +/* -------------- */ + pseudo_bit_t reserved3[0x00020]; +/* -------------- */ + pseudo_bit_t sq_num_rree[0x00020]; /* Requester - number of RNR nak retries exceeded errors. + RNR NAK Retry error. Retry limit exceeded. Excessive RNR NAKs returned by the responder: Requestor retried the request "n" times, but received RNR NAK each time. */ +/* -------------- */ + pseudo_bit_t reserved4[0x00020]; +/* -------------- */ + pseudo_bit_t sq_num_lrdve[0x00020]; /* Requester - number of local RDD violation errors. + RD only. */ +/* -------------- */ + pseudo_bit_t rq_num_rirdre[0x00020];/* Responder - number of remote invalid RD request errors. + RD only. */ +/* -------------- */ + pseudo_bit_t reserved5[0x00040]; +/* -------------- */ + pseudo_bit_t sq_num_rabrte[0x00020];/* Requester - number of remote aborted errors. + RD only. */ +/* -------------- */ + pseudo_bit_t reserved6[0x00020]; +/* -------------- */ + pseudo_bit_t sq_num_ieecne[0x00020];/* Requester - number of invalid EE context number errors. + RD only. */ +/* -------------- */ + pseudo_bit_t reserved7[0x00020]; +/* -------------- */ + pseudo_bit_t sq_num_ieecse[0x00020];/* Requester - invalid EE context state errors. + RD only. */ +/* -------------- */ + pseudo_bit_t reserved8[0x00380]; +/* -------------- */ + pseudo_bit_t rq_num_oos[0x00020]; /* Responder - number of out of sequence requests received. + Out of Sequence Request Packet: Packet PSN of the inbound request is outside the responders valid PSN window. + NAK may or may not be sent. */ +/* -------------- */ + pseudo_bit_t sq_num_oos[0x00020]; /* Requester - number of out of sequence Naks received. + NAK-Sequence Error on: + 1. Packet sequence error. Retry limit not exceeded: Responder detected a PSN larger than it expected. Requester may retry the request. + 2. Packet sequence error. Retry limit exceeded: Responder detected a PSN larger than it expected. The requestor performed retries, and automatic path migration and additional retries, if applicable, but all attempts failed. */ +/* -------------- */ + pseudo_bit_t rq_num_mce[0x00020]; /* Responder - number of bad multicast packets received. + Missing GID or bad GID. */ +/* -------------- */ + pseudo_bit_t reserved9[0x00020]; +/* -------------- */ + pseudo_bit_t rq_num_rsync[0x00020]; /* Responder - number of RESYNC operations. + RD only. */ +/* -------------- */ + pseudo_bit_t sq_num_rsync[0x00020]; /* Requester - number of RESYNC operations. + RD only. */ +/* -------------- */ + pseudo_bit_t rq_num_udsdprd[0x00020];/* The number of UD packets silently discarded on the receive queue due to lack of receive descriptor. + Resources Not Ready Error: A UD WQE is not currently available. */ +/* -------------- */ + pseudo_bit_t reserved10[0x00020]; +/* -------------- */ + pseudo_bit_t rq_num_ucsdprd[0x00020];/* The number of UC packets silently discarded on the receive queue due to lack of receive descriptor. + Resources Not Ready Error: A UC WQE is not currently available. */ +/* -------------- */ + pseudo_bit_t reserved11[0x003e0]; +/* -------------- */ + pseudo_bit_t num_cqovf[0x00020]; /* Number of CQ overflows. + Incremented each time a completion is discarded due CQ overflow. */ +/* -------------- */ + pseudo_bit_t num_eqovf[0x00020]; /* Number of EQ overflows. + Incremented each time EQ enters the overflow state. */ +/* -------------- */ + pseudo_bit_t num_baddb[0x00020]; /* Number of bad doorbells. + Doorbell dropped due to UAR violation or bad resource state. */ +/* -------------- */ + pseudo_bit_t reserved12[0x002a0]; +/* -------------- */ +}; + +/* Event_data Field - HCR Completion Event */ + +struct tavorprm_hcr_completion_event_st { /* Little Endian */ + pseudo_bit_t token[0x00010]; /* HCR Token */ + pseudo_bit_t reserved0[0x00010]; +/* -------------- */ + pseudo_bit_t reserved1[0x00020]; +/* -------------- */ + pseudo_bit_t status[0x00008]; /* HCR Status */ + pseudo_bit_t reserved2[0x00018]; +/* -------------- */ + pseudo_bit_t out_param_h[0x00020]; /* HCR Output Parameter [63:32] */ +/* -------------- */ + pseudo_bit_t out_param_l[0x00020]; /* HCR Output Parameter [31:0] */ +/* -------------- */ + pseudo_bit_t reserved3[0x00020]; +/* -------------- */ +}; + +/* Completion with Error CQE */ + +struct tavorprm_completion_with_error_st { /* Little Endian */ + pseudo_bit_t myqpn[0x00018]; /* Indicates the QP for which completion is being reported */ + pseudo_bit_t reserved0[0x00008]; +/* -------------- */ + pseudo_bit_t reserved1[0x00060]; +/* -------------- */ + pseudo_bit_t db_cnt[0x00010]; /* Doorbell count */ + pseudo_bit_t reserved2[0x00008]; + pseudo_bit_t syndrome[0x00008]; /* Completion with error syndrome: + 0x01 - Local Length Error + 0x02 - Local QP Operation Error + 0x03 - Local EE Context Operation Error + 0x04 - Local Protection Error + 0x05 - Work Request Flushed Error + 0x06 - Memory Window Bind Error + 0x10 - Bad Response Error + 0x11 - Local Access Error + 0x12 - Remote Invalid Request Error + 0x13 - Remote Access Error + 0x14 - Remote Operation Error + 0x15 - Transport Retry Counter Exceeded + 0x16 - RNR Retry Counter Exceeded + 0x20 - Local RDD Violation Error + 0x21 - Remote Invalid RD Request + 0x22 - Remote Aborted Error + 0x23 - Invalid EE Context Number + 0x24 - Invalid EE Context State + other - Reserved + Syndrome is defined according to the IB specification volume 1. For detailed explanation of the syndromes, refer to chapters 10-11 of the IB specification rev 1.1. */ +/* -------------- */ + pseudo_bit_t reserved3[0x00020]; +/* -------------- */ + pseudo_bit_t wqe_size[0x00006]; /* Size (in 16-byte chunks) of WQE completion is reported for */ + pseudo_bit_t wqe_addr[0x0001a]; /* Bits 31:6 of WQE virtual address completion is reported for. The 6 least significant bits are zero. */ +/* -------------- */ + pseudo_bit_t reserved4[0x00007]; + pseudo_bit_t owner[0x00001]; /* Owner field. Zero value of this field means SW ownership of CQE. */ + pseudo_bit_t reserved5[0x00010]; + pseudo_bit_t opcode[0x00008]; /* The opcode of WQE completion is reported for. + + The following values are reported in case of completion with error: + 0xFE - For completion with error on Receive Queues + 0xFF - For completion with error on Send Queues */ +/* -------------- */ +}; + +/* Resize CQ Input Mailbox */ + +struct tavorprm_resize_cq_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00020]; +/* -------------- */ + pseudo_bit_t start_addr_h[0x00020]; /* Start address of CQ[63:32]. + Must be aligned on CQE size (32 bytes) */ +/* -------------- */ + pseudo_bit_t start_addr_l[0x00020]; /* Start address of CQ[31:0]. + Must be aligned on CQE size (32 bytes) */ +/* -------------- */ + pseudo_bit_t reserved1[0x00018]; + pseudo_bit_t log_cq_size[0x00005]; /* Log (base 2) of the CQ size (in entries) */ + pseudo_bit_t reserved2[0x00003]; +/* -------------- */ + pseudo_bit_t reserved3[0x00060]; +/* -------------- */ + pseudo_bit_t l_key[0x00020]; /* Memory key (L_Key) to be used to access CQ */ +/* -------------- */ + pseudo_bit_t reserved4[0x00100]; +/* -------------- */ +}; + +/* SYS_EN Output Parameter */ + +struct tavorprm_sys_en_out_param_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00020]; +/* -------------- */ + pseudo_bit_t spd[0x00001]; /* 0 - DIMM SPD was read from DIMM + 1 - DIMM SPD was read from InfiniHost NVMEM */ + pseudo_bit_t sladr[0x00003]; /* SPD Slave Address 3 LSBits. + Valid only if spd bit is 0. */ + pseudo_bit_t sock_num[0x00002]; /* DIMM socket number (for double sided DIMM one of the two numbers will be reported) */ + pseudo_bit_t syn[0x00004]; /* Error Syndrome + 0 - reserved + 1 - SPD error (e.g. checksum error, no response, error while reading) + 2 - DIMM out of bounds (e.g. DIMM rows number is not between 7 and 14, DIMM type is not 2) + 3 - DIMM conflict (e.g. mix of registered and unbuffered DIMMs, CAS latency conflict) + 4 - Calibration error + other - Error, reserved */ + pseudo_bit_t reserved1[0x00016]; +/* -------------- */ +}; + +/* Query Debug Message */ + +struct tavorprm_query_debug_msg_st { /* Little Endian */ + pseudo_bit_t base_addr_h[0x00020]; /* Debug Buffers Base Address [63:32] */ +/* -------------- */ + pseudo_bit_t base_addr_l[0x00020]; /* Debug Buffers Base Address [31:0] */ +/* -------------- */ + pseudo_bit_t buf_sz[0x00020]; /* Debug Buffer Size (in bytes) */ +/* -------------- */ + pseudo_bit_t reserved0[0x00020]; +/* -------------- */ + pseudo_bit_t trc_hdr_sz[0x00020]; /* Trace message header size in dwords. */ +/* -------------- */ + pseudo_bit_t trc_arg_num[0x00020]; /* The number of arguments per trace message. */ +/* -------------- */ + pseudo_bit_t reserved1[0x000c0]; +/* -------------- */ + pseudo_bit_t dbg_msk_h[0x00020]; /* Debug messages mask [63:32] */ +/* -------------- */ + pseudo_bit_t dbg_msk_l[0x00020]; /* Debug messages mask [31:0] */ +/* -------------- */ + pseudo_bit_t reserved2[0x00040]; +/* -------------- */ + pseudo_bit_t fs_base_addr0_h[0x00020];/* Base address for format string for irisc 0 bits[63:32] */ +/* -------------- */ + pseudo_bit_t fs_base_addr0_l[0x00020];/* Base address for format string for irisc 0 bits[31:0] */ +/* -------------- */ + pseudo_bit_t fs_base_addr1_h[0x00020];/* Base address for format string for irisc 1 bits[63:32] */ +/* -------------- */ + pseudo_bit_t fs_base_addr1_l[0x00020];/* Base address for format string for irisc 1 bits[31:0] */ +/* -------------- */ + pseudo_bit_t fs_base_addr2_h[0x00020];/* Base address for format string for irisc 2 bits[63:32] */ +/* -------------- */ + pseudo_bit_t fs_base_addr2_l[0x00020];/* Base address for format string for irisc 2 bits[31:0] */ +/* -------------- */ + pseudo_bit_t fs_base_addr3_h[0x00020];/* Base address for format string for irisc 3 bits[63:32] */ +/* -------------- */ + pseudo_bit_t fs_base_addr3_l[0x00020];/* Base address for format string for irisc 3 bits[31:0] */ +/* -------------- */ + pseudo_bit_t fs_base_addr4_h[0x00020];/* Base address for format string for irisc 4 bits[63:32] */ +/* -------------- */ + pseudo_bit_t fs_base_addr4_l[0x00020];/* Base address for format string for irisc 4 bits[31:0] */ +/* -------------- */ + pseudo_bit_t fs_base_addr5_h[0x00020];/* Base address for format string for irisc 5 bits[63:32] */ +/* -------------- */ + pseudo_bit_t fs_base_addr5_l[0x00020];/* Base address for format string for irisc 5 bits[31:0] */ +/* -------------- */ + pseudo_bit_t reserved3[0x00480]; +/* -------------- */ +}; + +/* User Access Region */ + +struct tavorprm_uar_st { /* Little Endian */ + struct tavorprm_rd_send_doorbell_st rd_send_doorbell;/* Reliable Datagram SQ Doorbell */ +/* -------------- */ + struct tavorprm_send_doorbell_st send_doorbell;/* SQ Doorbell */ +/* -------------- */ + struct tavorprm_receive_doorbell_st receive_doorbell;/* RQ Doorbell */ +/* -------------- */ + struct tavorprm_cq_cmd_doorbell_st cq_command_doorbell;/* CQ Doorbell */ +/* -------------- */ + struct tavorprm_eq_cmd_doorbell_st eq_command_doorbell;/* EQ Doorbell */ +/* -------------- */ + pseudo_bit_t reserved0[0x01e80]; +/* -------------- */ + pseudo_bit_t infini_blast[256][0x00020];/* InfiniBlast buffer (same format as WQE format) + Infiniblast is not supported by InfiniHost MT23108 */ +/* -------------- */ +}; + +/* SET_IB Parameters */ + +struct tavorprm_set_ib_st { /* Little Endian */ + pseudo_bit_t rqk[0x00001]; /* Reset QKey Violation Counter */ + pseudo_bit_t reserved0[0x00011]; + pseudo_bit_t sig[0x00001]; /* Set System Image GUID to system_image_guid specified. + system_image_guid and sig must be the same for all ports. */ + pseudo_bit_t reserved1[0x0000d]; +/* -------------- */ + pseudo_bit_t capability_mask[0x00020];/* PortInfo Capability Mask */ +/* -------------- */ + pseudo_bit_t system_image_guid_h[0x00020];/* System Image GUID[63:32], takes effect only if the SIG bit is set + Must be the same for both ports. */ +/* -------------- */ + pseudo_bit_t system_image_guid_l[0x00020];/* System Image GUID[31:0], takes effect only if the SIG bit is set + Must be the same for both ports. */ +/* -------------- */ + pseudo_bit_t reserved2[0x00180]; +/* -------------- */ +}; + +/* Multicast Group Member */ + +struct tavorprm_mgm_entry_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00006]; + pseudo_bit_t next_gid_index[0x0001a];/* Index of next Multicast Group Member whose GID maps to same MGID_HASH number. + The index is into the Multicast Group Table, which is the comprised the MGHT and AMGM tables. + next_gid_index=0 means end of the chain. */ +/* -------------- */ + pseudo_bit_t reserved1[0x00060]; +/* -------------- */ + pseudo_bit_t mgid_128_96[0x00020]; /* Multicast group GID[128:96] in big endian format. + Use the Reserved GID 0:0:0:0:0:0:0:0 for an invalid entry. */ +/* -------------- */ + pseudo_bit_t mgid_95_64[0x00020]; /* Multicast group GID[95:64] in big endian format. + Use the Reserved GID 0:0:0:0:0:0:0:0 for an invalid entry. */ +/* -------------- */ + pseudo_bit_t mgid_63_32[0x00020]; /* Multicast group GID[63:32] in big endian format. + Use the Reserved GID 0:0:0:0:0:0:0:0 for an invalid entry. */ +/* -------------- */ + pseudo_bit_t mgid_31_0[0x00020]; /* Multicast group GID[31:0] in big endian format. + Use the Reserved GID 0:0:0:0:0:0:0:0 for an invalid entry. */ +/* -------------- */ + struct tavorprm_mgmqp_st mgmqp_0; /* Multicast Group Member QP */ +/* -------------- */ + struct tavorprm_mgmqp_st mgmqp_1; /* Multicast Group Member QP */ +/* -------------- */ + struct tavorprm_mgmqp_st mgmqp_2; /* Multicast Group Member QP */ +/* -------------- */ + struct tavorprm_mgmqp_st mgmqp_3; /* Multicast Group Member QP */ +/* -------------- */ + struct tavorprm_mgmqp_st mgmqp_4; /* Multicast Group Member QP */ +/* -------------- */ + struct tavorprm_mgmqp_st mgmqp_5; /* Multicast Group Member QP */ +/* -------------- */ + struct tavorprm_mgmqp_st mgmqp_6; /* Multicast Group Member QP */ +/* -------------- */ + struct tavorprm_mgmqp_st mgmqp_7; /* Multicast Group Member QP */ +/* -------------- */ +}; + +/* INIT_IB Parameters */ + +struct tavorprm_init_ib_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t vl_cap[0x00004]; /* Maximum VLs supported on the port, excluding VL15 */ + pseudo_bit_t port_width_cap[0x00004];/* IB Port Width + 1 - 1x + 3 - 1x, 4x + 11 - 1x, 4x or 12x (must not be used in InfiniHost MT23108) + else - Reserved */ + pseudo_bit_t mtu_cap[0x00004]; /* Maximum MTU Supported + 0x0 - Reserved + 0x1 - 256 + 0x2 - 512 + 0x3 - 1024 + 0x4 - 2048 + 0x5 - 0xF Reserved */ + pseudo_bit_t g0[0x00001]; /* Set port GUID0 to GUID0 specified */ + pseudo_bit_t ng[0x00001]; /* Set node GUID to node_guid specified. + node_guid and ng must be the same for all ports. */ + pseudo_bit_t sig[0x00001]; /* Set System Image GUID to system_image_guid specified. + system_image_guid and sig must be the same for all ports. */ + pseudo_bit_t reserved1[0x0000d]; +/* -------------- */ + pseudo_bit_t max_gid[0x00010]; /* Maximum number of GIDs for the port */ + pseudo_bit_t reserved2[0x00010]; +/* -------------- */ + pseudo_bit_t max_pkey[0x00010]; /* Maximum pkeys for the port. + Must be the same for both ports. */ + pseudo_bit_t reserved3[0x00010]; +/* -------------- */ + pseudo_bit_t reserved4[0x00020]; +/* -------------- */ + pseudo_bit_t guid0_h[0x00020]; /* EUI-64 GUID assigned by the manufacturer, takes effect only if the G0 bit is set (bits 63:32) */ +/* -------------- */ + pseudo_bit_t guid0_l[0x00020]; /* EUI-64 GUID assigned by the manufacturer, takes effect only if the G0 bit is set (bits 31:0) */ +/* -------------- */ + pseudo_bit_t node_guid_h[0x00020]; /* Node GUID[63:32], takes effect only if the NG bit is set + Must be the same for both ports. */ +/* -------------- */ + pseudo_bit_t node_guid_l[0x00020]; /* Node GUID[31:0], takes effect only if the NG bit is set + Must be the same for both ports. */ +/* -------------- */ + pseudo_bit_t system_image_guid_h[0x00020];/* System Image GUID[63:32], takes effect only if the SIG bit is set + Must be the same for both ports. */ +/* -------------- */ + pseudo_bit_t system_image_guid_l[0x00020];/* System Image GUID[31:0], takes effect only if the SIG bit is set + Must be the same for both ports. */ +/* -------------- */ + pseudo_bit_t reserved5[0x006c0]; +/* -------------- */ +}; + +/* Query Device Limitations */ + +struct tavorprm_query_dev_lim_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00080]; +/* -------------- */ + pseudo_bit_t log_max_qp[0x00005]; /* Log2 of the Maximum number of QPs supported */ + pseudo_bit_t reserved1[0x00003]; + pseudo_bit_t log2_rsvd_qps[0x00004];/* Log (base 2) of the number of QPs reserved for firmware use + The reserved resources are numbered from 0 to 2^log2_rsvd_qps-1 */ + pseudo_bit_t reserved2[0x00004]; + pseudo_bit_t log_max_qp_sz[0x00008];/* Log2 of the maximum WQEs allowed on the RQ or the SQ */ + pseudo_bit_t log_max_srq_sz[0x00008];/* Log2 of the maximum WQEs allowed on the SRQ */ +/* -------------- */ + pseudo_bit_t log_max_ee[0x00005]; /* Log2 of the Maximum number of EE contexts supported */ + pseudo_bit_t reserved3[0x00003]; + pseudo_bit_t log2_rsvd_ees[0x00004];/* Log (base 2) of the number of EECs reserved for firmware use + The reserved resources are numbered from 0 to 2^log2_rsvd_ees-1 */ + pseudo_bit_t reserved4[0x00004]; + pseudo_bit_t log_max_srqs[0x00005]; /* Log base 2 of the maximum number of SRQs supported, valid only if SRQ bit is set. + */ + pseudo_bit_t reserved5[0x00007]; + pseudo_bit_t log2_rsvd_srqs[0x00004];/* Log (base 2) of the number of reserved SRQs for firmware use + The reserved resources are numbered from 0 to 2^log2_rsvd_srqs-1 + This parameter is valid only if the SRQ bit is set. */ +/* -------------- */ + pseudo_bit_t log_max_cq[0x00005]; /* Log2 of the Maximum number of CQs supported */ + pseudo_bit_t reserved6[0x00003]; + pseudo_bit_t log2_rsvd_cqs[0x00004];/* Log (base 2) of the number of CQs reserved for firmware use + The reserved resources are numbered from 0 to 2^log2_rsrvd_cqs-1 */ + pseudo_bit_t reserved7[0x00004]; + pseudo_bit_t log_max_cq_sz[0x00008];/* Log2 of the Maximum CQEs allowed in a CQ */ + pseudo_bit_t reserved8[0x00008]; +/* -------------- */ + pseudo_bit_t log_max_eq[0x00003]; /* Log2 of the Maximum number of EQs */ + pseudo_bit_t reserved9[0x00005]; + pseudo_bit_t num_rsvd_eqs[0x00004]; /* The number of EQs reserved for firmware use + The reserved resources are numbered from 0 to num_rsvd_eqs-1 + If 0 - no resources are reserved. */ + pseudo_bit_t reserved10[0x00004]; + pseudo_bit_t log_max_mpts[0x00006]; /* Log (base 2) of the maximum number of MPT entries (the number of Regions/Windows) */ + pseudo_bit_t reserved11[0x0000a]; +/* -------------- */ + pseudo_bit_t log_max_mtt_seg[0x00006];/* Log2 of the Maximum number of MTT segments */ + pseudo_bit_t reserved12[0x00002]; + pseudo_bit_t log2_rsvd_mrws[0x00004];/* Log (base 2) of the number of MPTs reserved for firmware use + The reserved resources are numbered from 0 to 2^log2_rsvd_mrws-1 */ + pseudo_bit_t reserved13[0x00004]; + pseudo_bit_t log_max_mrw_sz[0x00008];/* Log2 of the Maximum Size of Memory Region/Window */ + pseudo_bit_t reserved14[0x00004]; + pseudo_bit_t log2_rsvd_mtts[0x00004];/* Log (base 2) of the number of MTT segments reserved for firmware use + The reserved resources are numbered from 0 to 2^log2_rsvd_mtts-1 + */ +/* -------------- */ + pseudo_bit_t log_max_av[0x00006]; /* Log2 of the Maximum number of Address Vectors */ + pseudo_bit_t reserved15[0x0001a]; +/* -------------- */ + pseudo_bit_t log_max_ra_res_qp[0x00006];/* Log2 of the Maximum number of outstanding RDMA read/Atomic per QP as a responder */ + pseudo_bit_t reserved16[0x0000a]; + pseudo_bit_t log_max_ra_req_qp[0x00006];/* Log2 of the maximum number of outstanding RDMA read/Atomic per QP as a requester */ + pseudo_bit_t reserved17[0x0000a]; +/* -------------- */ + pseudo_bit_t log_max_ra_res_global[0x00006];/* Log2 of the maximum number of RDMA read/atomic operations the HCA responder can support globally. That implies the RDB table size. */ + pseudo_bit_t reserved18[0x0001a]; +/* -------------- */ + pseudo_bit_t reserved19[0x00020]; +/* -------------- */ + pseudo_bit_t num_ports[0x00004]; /* Number of IB ports. */ + pseudo_bit_t max_vl[0x00004]; /* Maximum VLs supported on each port, excluding VL15 */ + pseudo_bit_t max_port_width[0x00004];/* IB Port Width + 1 - 1x + 3 - 1x, 4x + 11 - 1x, 4x or 12x + else - Reserved */ + pseudo_bit_t max_mtu[0x00004]; /* Maximum MTU Supported + 0x0 - Reserved + 0x1 - 256 + 0x2 - 512 + 0x3 - 1024 + 0x4 - 2048 + 0x5 - 0xF Reserved */ + pseudo_bit_t local_ca_ack_delay[0x00005];/* The Local CA ACK Delay. This is the value recommended to be returned in Query HCA verb. + The delay value in microseconds is computed using 4.096us * 2^(Local_CA_ACK_Delay). */ + pseudo_bit_t reserved20[0x0000b]; +/* -------------- */ + pseudo_bit_t log_max_gid[0x00004]; /* Log2 of the maximum number of GIDs per port */ + pseudo_bit_t reserved21[0x0001c]; +/* -------------- */ + pseudo_bit_t log_max_pkey[0x00004]; /* Log2 of the max PKey Table Size (per IB port) */ + pseudo_bit_t reserved22[0x0001c]; +/* -------------- */ + pseudo_bit_t reserved23[0x00020]; +/* -------------- */ + pseudo_bit_t rc[0x00001]; /* RC Transport supported */ + pseudo_bit_t uc[0x00001]; /* UC Transport Supported */ + pseudo_bit_t ud[0x00001]; /* UD Transport Supported */ + pseudo_bit_t rd[0x00001]; /* RD Transport Supported + RD is not supported in InfiniHost MT23108 */ + pseudo_bit_t raw_ipv6[0x00001]; /* Raw IPv6 Transport Supported */ + pseudo_bit_t raw_ether[0x00001]; /* Raw Ethertype Transport Supported */ + pseudo_bit_t srq[0x00001]; /* SRQ is supported + */ + pseudo_bit_t reserved24[0x00001]; + pseudo_bit_t pkv[0x00001]; /* PKey Violation Counter Supported */ + pseudo_bit_t qkv[0x00001]; /* QKey Violation Coutner Supported */ + pseudo_bit_t reserved25[0x00006]; + pseudo_bit_t mw[0x00001]; /* Memory windows supported */ + pseudo_bit_t apm[0x00001]; /* Automatic Path Migration Supported */ + pseudo_bit_t atm[0x00001]; /* Atomic operations supported (atomicity is guaranteed between QPs on this HCA) */ + pseudo_bit_t rm[0x00001]; /* Raw Multicast Supported */ + pseudo_bit_t avp[0x00001]; /* Address Vector Port checking supported */ + pseudo_bit_t udm[0x00001]; /* UD Multicast Supported */ + pseudo_bit_t reserved26[0x00002]; + pseudo_bit_t pg[0x00001]; /* Paging on demand supported */ + pseudo_bit_t r[0x00001]; /* Router mode supported */ + pseudo_bit_t reserved27[0x00006]; +/* -------------- */ + pseudo_bit_t log_pg_sz[0x00008]; /* Minimum system page size supported (log2) . + For proper operation it must be less than or equal the hosting platform (CPU) minimum page size. */ + pseudo_bit_t reserved28[0x00008]; + pseudo_bit_t uar_sz[0x00006]; /* UAR Area Size = 1MB * 2^uar_sz */ + pseudo_bit_t reserved29[0x00006]; + pseudo_bit_t num_rsvd_uars[0x00004];/* The number of UARs reserved for firmware use + The reserved resources are numbered from 0 to num_reserved_uars-1 + Note that UAR 1 is always for the kernel + If 0 - no resources are reserved. */ +/* -------------- */ + pseudo_bit_t reserved30[0x00020]; +/* -------------- */ + pseudo_bit_t max_desc_sz[0x00010]; /* Max descriptor size in bytes */ + pseudo_bit_t max_sg[0x00008]; /* The maximum S/G list elements in a WQE (max_desc_sz/16 - 3) */ + pseudo_bit_t reserved31[0x00008]; +/* -------------- */ + pseudo_bit_t reserved32[0x00060]; +/* -------------- */ + pseudo_bit_t log_max_mcg[0x00008]; /* Log2 of the maximum number of multicast groups */ + pseudo_bit_t num_rsvd_mcgs[0x00004];/* The number of MGMs reserved for firmware use in the MGHT. + The reserved resources are numbered from 0 to num_reserved_mcgs-1 + If 0 - no resources are reserved. */ + pseudo_bit_t reserved33[0x00004]; + pseudo_bit_t log_max_qp_mcg[0x00008];/* Log2 of the maximum number of QPs per multicast group */ + pseudo_bit_t reserved34[0x00008]; +/* -------------- */ + pseudo_bit_t log_max_rdds[0x00006]; /* Log2 of the maximum number of RDDs */ + pseudo_bit_t reserved35[0x00006]; + pseudo_bit_t num_rsvd_rdds[0x00004];/* The number of RDDs reserved for firmware use + The reserved resources are numbered from 0 to num_reserved_rdds-1. + If 0 - no resources are reserved. */ + pseudo_bit_t log_max_pd[0x00006]; /* Log2 of the maximum number of PDs */ + pseudo_bit_t reserved36[0x00006]; + pseudo_bit_t num_rsvd_pds[0x00004]; /* The number of PDs reserved for firmware use + The reserved resources are numbered from 0 to num_reserved_pds-1 + If 0 - no resources are reserved. */ +/* -------------- */ + pseudo_bit_t reserved37[0x000c0]; +/* -------------- */ + pseudo_bit_t qpc_entry_sz[0x00010]; /* QPC Entry Size for the device + For the InfiniHost MT23108 entry size is 256 bytes */ + pseudo_bit_t eec_entry_sz[0x00010]; /* EEC Entry Size for the device + For the InfiniHost MT23108 entry size is 256 bytes */ +/* -------------- */ + pseudo_bit_t eqpc_entry_sz[0x00010];/* Extended QPC entry size for the device + For the InfiniHost MT23108 entry size is 32 bytes */ + pseudo_bit_t eeec_entry_sz[0x00010];/* Extended EEC entry size for the device + For the InfiniHost MT23108 entry size is 32 bytes */ +/* -------------- */ + pseudo_bit_t cqc_entry_sz[0x00010]; /* CQC entry size for the device + For the InfiniHost MT23108 entry size is 64 bytes */ + pseudo_bit_t eqc_entry_sz[0x00010]; /* EQ context entry size for the device + For the InfiniHost MT23108 entry size is 64 bytes */ +/* -------------- */ + pseudo_bit_t uar_scratch_entry_sz[0x00010];/* UAR Scratchpad Entry Size + For the InfiniHost MT23108 entry size is 32 bytes */ + pseudo_bit_t srq_entry_sz[0x00010]; /* SRQ context entry size for the device + For the InfiniHost MT23108 entry size is 32 bytes */ +/* -------------- */ + pseudo_bit_t reserved38[0x00380]; +/* -------------- */ +}; + +/* QUERY_ADAPTER Parameters Block */ + +struct tavorprm_query_adapter_st { /* Little Endian */ + pseudo_bit_t vendor_id[0x00020]; /* Adapter vendor ID */ +/* -------------- */ + pseudo_bit_t device_id[0x00020]; /* Adapter Device ID */ +/* -------------- */ + pseudo_bit_t revision_id[0x00020]; /* Adapter Revision ID */ +/* -------------- */ + pseudo_bit_t reserved0[0x00020]; +/* -------------- */ + pseudo_bit_t reserved1[0x00018]; + pseudo_bit_t intapin[0x00008]; /* Interrupt Signal ID of HCA device pin that is connected to the INTA trace in the HCA board. + 0..39 and 63 are valid values + 255 means INTA trace in board is not connected to the HCA device. + All other values are reserved */ +/* -------------- */ + pseudo_bit_t reserved2[0x00760]; +/* -------------- */ +}; + +/* QUERY_FW Parameters Block */ + +struct tavorprm_query_fw_st { /* Little Endian */ + pseudo_bit_t fw_rev_major[0x00010]; /* Firmware Revision - Major */ + pseudo_bit_t reserved0[0x00010]; +/* -------------- */ + pseudo_bit_t fw_rev_minor[0x00010]; /* Firmware Revision - Minor */ + pseudo_bit_t fw_rev_subminor[0x00010];/* Firmware Sub-minor version (Patch level). */ +/* -------------- */ + pseudo_bit_t cmd_interface_rev[0x00010];/* Command Interface Interpreter Revision ID */ + pseudo_bit_t reserved1[0x00010]; +/* -------------- */ + pseudo_bit_t log_max_outstanding_cmd[0x00008];/* Log2 of the maximum number of commands the HCR can support simultaneously */ + pseudo_bit_t reserved2[0x00017]; + pseudo_bit_t dt[0x00001]; /* Debug Trace Support + 0 - Debug trace is not supported + 1 - Debug trace is supported */ +/* -------------- */ + pseudo_bit_t reserved3[0x00080]; +/* -------------- */ + pseudo_bit_t fw_base_addr_h[0x00020];/* Physical Address of Firmware Area in DDR Memory [63:32] */ +/* -------------- */ + pseudo_bit_t fw_base_addr_l[0x00020];/* Physical Address of Firmware Area in DDR Memory [31:0] */ +/* -------------- */ + pseudo_bit_t fw_end_addr_h[0x00020];/* End of firmware address in DDR memory [63:32] */ +/* -------------- */ + pseudo_bit_t fw_end_addr_l[0x00020];/* End of firmware address in DDR memory [31:0] */ +/* -------------- */ + pseudo_bit_t error_buf_start_h[0x00020];/* Read Only buffer for catastrofic error reports. */ +/* -------------- */ + pseudo_bit_t error_buf_start_l[0x00020]; +/* -------------- */ + pseudo_bit_t error_buf_size[0x00020];/* Size in words */ +/* -------------- */ + pseudo_bit_t reserved4[0x00620]; +/* -------------- */ +}; + +/* QUERY_DDR Parameters Block */ + +struct tavorprm_query_ddr_st { /* Little Endian */ + pseudo_bit_t ddr_start_adr_h[0x00020];/* DDR memory start address [63:32] */ +/* -------------- */ + pseudo_bit_t ddr_start_adr_l[0x00020];/* DDR memory start address [31:0] */ +/* -------------- */ + pseudo_bit_t ddr_end_adr_h[0x00020];/* DDR memory end address [63:32] */ +/* -------------- */ + pseudo_bit_t ddr_end_adr_l[0x00020];/* DDR memory end address [31:0] */ +/* -------------- */ + pseudo_bit_t di[0x00002]; /* Data Integrity Configuration: + 00 - none + 01 - Parity + 10 - ECC Detection Only + 11 - ECC With Correction */ + pseudo_bit_t ap[0x00002]; /* Auto Precharge Mode + 00 - No auto precharge + 01 - Auto precharge per transaction + 10 - Auto precharge per 64 bytes + 11 - reserved */ + pseudo_bit_t dh[0x00001]; /* When Set DDR is Hidden and can not be accessed from PCI bus */ + pseudo_bit_t reserved0[0x0001b]; +/* -------------- */ + pseudo_bit_t reserved1[0x00160]; +/* -------------- */ + struct tavorprm_dimminfo_st dimm0; /* Logical DIMM 0 Parameters */ +/* -------------- */ + struct tavorprm_dimminfo_st dimm1; /* Logical DIMM 1 Parameters */ +/* -------------- */ + struct tavorprm_dimminfo_st dimm2; /* Logical DIMM 2 Parameters */ +/* -------------- */ + struct tavorprm_dimminfo_st dimm3; /* Logical DIMM 3 Parameters */ +/* -------------- */ + pseudo_bit_t reserved2[0x00200]; +/* -------------- */ +}; + +/* INIT_HCA & QUERY_HCA Parameters Block */ + +struct tavorprm_init_hca_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00060]; +/* -------------- */ + pseudo_bit_t reserved1[0x00018]; + pseudo_bit_t hca_core_clock[0x00008];/* Internal Clock Period (in units of 1/16 ns) (QUERY_HCA only) */ +/* -------------- */ + pseudo_bit_t reserved2[0x00008]; + pseudo_bit_t router_qp[0x00010]; /* Upper 16 bit to be used as a QP number for router mode. Low order 8 bits are taken from the TClass field of the incoming packet. + Valid only if RE bit is set */ + pseudo_bit_t reserved3[0x00007]; + pseudo_bit_t re[0x00001]; /* Router Mode Enable + If this bit is set, entire packet (including all headers and ICRC) will be considered as a data payload and will be scattered to memory as specified in the descriptor that is posted on the QP matching the TClass field of packet. */ +/* -------------- */ + pseudo_bit_t udp[0x00001]; /* UD Port Check Enable + 0 - Port field in Address Vector is ignored + 1 - HCA will check the port field in AV entry (fetched for UD descriptor) against the Port of the UD QP executing the descriptor. */ + pseudo_bit_t he[0x00001]; /* Host Endianess - Used for Atomic Operations + 0 - Host is Little Endian + 1 - Host is Big endian + */ + pseudo_bit_t ud[0x00001]; /* Force UD address vector protection check. If this bit is set, Passing address vector as immediate data in WQE is suppressed and privileged memory key will be used by hardware to access UD address vector table. */ + pseudo_bit_t reserved4[0x00005]; + pseudo_bit_t responder_exu[0x00004];/* How many execution engines are dedicated to the responder. Legal values are 0x0-0xF. 0 is "auto" */ + pseudo_bit_t reserved5[0x00004]; + pseudo_bit_t wqe_quota[0x0000f]; /* Maximum number of WQEs that are executed prior to preemption of execution unit. 0 - reserved. */ + pseudo_bit_t wqe_quota_en[0x00001]; /* If set - wqe_quota field is used. If cleared - WQE quota is set to "auto" value */ +/* -------------- */ + pseudo_bit_t reserved6[0x00040]; +/* -------------- */ + struct tavorprm_qpcbaseaddr_st qpc_eec_cqc_eqc_rdb_parameters; +/* -------------- */ + pseudo_bit_t reserved7[0x00080]; +/* -------------- */ + struct tavorprm_udavtable_memory_parameters_st udavtable_memory_parameters;/* Memory Access Parameters for UD Address Vector Table. Used for QPs/EEc that are configured to use protected Address Vectors. */ +/* -------------- */ + pseudo_bit_t reserved8[0x00040]; +/* -------------- */ + struct tavorprm_multicastparam_st multicast_parameters; +/* -------------- */ + pseudo_bit_t reserved9[0x00080]; +/* -------------- */ + struct tavorprm_tptparams_st tpt_parameters; +/* -------------- */ + pseudo_bit_t reserved10[0x00080]; +/* -------------- */ + struct tavorprm_uar_params_st uar_parameters;/* UAR Parameters */ +/* -------------- */ + pseudo_bit_t reserved11[0x00600]; +/* -------------- */ +}; + +/* Event Queue Context Table Entry */ + +struct tavorprm_eqc_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t st[0x00002]; /* Event delivery state machine + 01 - Armed + 10 - Fired + 11 - Always_Armed (auto-rearm) + 00 - Reserved */ + pseudo_bit_t reserved1[0x00007]; + pseudo_bit_t oi[0x00001]; /* Ignore overrun on this EQ if this bit is set */ + pseudo_bit_t tr[0x00001]; /* Translation Required. If set - EQ access undergo address translation. */ + pseudo_bit_t reserved2[0x00005]; + pseudo_bit_t owner[0x00004]; /* 0 - SW ownership + 1 - HW ownership + Valid for the QUERY_EQ and HW2SW_EQ commands only */ + pseudo_bit_t status[0x00004]; /* EQ status: + 0000 - OK + 1001 - EQ overflow + 1010 - EQ write failure + Valid for the QUERY_EQ and HW2SW_EQ commands only */ +/* -------------- */ + pseudo_bit_t start_address_h[0x00020];/* Start Address of Event Queue[63:32]. + Must be aligned on 32-byte boundary */ +/* -------------- */ + pseudo_bit_t start_address_l[0x00020];/* Start Address of Event Queue[31:0]. + Must be aligned on 32-byte boundary */ +/* -------------- */ + pseudo_bit_t usr_page[0x00018]; + pseudo_bit_t log_eq_size[0x00005]; /* Amount of entries in this EQ is 2^log_eq_size. + Log_eq_size must be bigger than 1 */ + pseudo_bit_t reserved3[0x00003]; +/* -------------- */ + pseudo_bit_t pd[0x00018]; /* PD to be used to access EQ */ + pseudo_bit_t reserved4[0x00008]; +/* -------------- */ + pseudo_bit_t intr[0x00008]; /* Interrupt (message) to be generated to report event to INT layer. + 00iiiiii - specifies GPIO pin to be asserted (according to INTA given in QUERY_ADAPTER) + 10jjjjjj - specificies type of interrupt message to be generated (total 64 different messages supported). + + If interrupt generation is not required one of the two following options should be set: + 1. ST must be set on creation to Fired state and not EQ arming doorbell should be performed. In this case hardware will not generate any interrupt. + 2. intr should be set to 60 decimal + */ + pseudo_bit_t reserved5[0x00018]; +/* -------------- */ + pseudo_bit_t lost_count[0x00020]; /* Number of events lost due to EQ overrun */ +/* -------------- */ + pseudo_bit_t lkey[0x00020]; /* Memory key (L-Key) to be used to access EQ */ +/* -------------- */ + pseudo_bit_t reserved6[0x00040]; +/* -------------- */ + pseudo_bit_t consumer_indx[0x00020];/* Contains next entry to be read upon polling the event queue. + Must be initalized to '0 while opening EQ */ +/* -------------- */ + pseudo_bit_t producer_indx[0x00020];/* Contains next entry in EQ to be written by the HCA. + Must be initalized to '0 while opening EQ. */ +/* -------------- */ + pseudo_bit_t reserved7[0x00080]; +/* -------------- */ +}; + +/* Memory Translation Table (MTT) Entry */ + +struct tavorprm_mtt_st { /* Little Endian */ + pseudo_bit_t ptag_h[0x00020]; /* High-order bits of physical tag. The size of the field depends on the page size of the region. Maximum PTAG size is 52 bits. */ +/* -------------- */ + pseudo_bit_t p[0x00001]; /* Present bit. If set, page entry is valid. If cleared, access to this page will generate 'non-present page access fault'. */ + pseudo_bit_t reserved0[0x0000b]; + pseudo_bit_t ptag_l[0x00014]; /* Low-order bits of Physical tag. The size of the field depends on the page size of the region. Maximum PTAG size is 52 bits. */ +/* -------------- */ +}; + +/* Memory Protection Table (MPT) Entry */ + +struct tavorprm_mpt_st { /* Little Endian */ + pseudo_bit_t ver[0x00004]; /* Version. Must be zero for InfiniHost */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t r_w[0x00001]; /* Defines whether this entry is Region (1) or Window (0) */ + pseudo_bit_t pa[0x00001]; /* Physical address. If set, no virtual-to-physical address translation will be performed for this region */ + pseudo_bit_t lr[0x00001]; /* If set - local read access enabled */ + pseudo_bit_t lw[0x00001]; /* If set - local write access enabled */ + pseudo_bit_t rr[0x00001]; /* If set - Remote read access enabled. */ + pseudo_bit_t rw[0x00001]; /* If set - remote write access enabled */ + pseudo_bit_t a[0x00001]; /* If set - Remote Atomic access is enabled */ + pseudo_bit_t eb[0x00001]; /* If set - Bind is enabled. Valid for region entry only. */ + pseudo_bit_t reserved1[0x00001]; + pseudo_bit_t m_io[0x00001]; /* Memory / I/O + 1 - Memory commands used on the uplink bus + 0 - I/O commands used on the uplink bus + Must be 1 for the InfiniHost MT23108. */ + pseudo_bit_t reserved2[0x0000a]; + pseudo_bit_t status[0x00004]; /* Regios/Window Status + 0xF - not valid (SW ownership) + else - HW ownership + Note that an unbound Window is denoted by the reg_wnd_len field equals zero. */ +/* -------------- */ + pseudo_bit_t page_size[0x00005]; /* Page size used for the region. Actual size is [4K]*2^Page_size bytes. + page_size should be less than 20. */ + pseudo_bit_t reserved3[0x00002]; + pseudo_bit_t reserved4[0x00001]; + pseudo_bit_t reserved5[0x00018]; +/* -------------- */ + pseudo_bit_t mem_key[0x00020]; /* The memory Key. This field is compared to key used to access the region/window. Lower-order bits are restricted (index to the table). */ +/* -------------- */ + pseudo_bit_t pd[0x00018]; /* Protection Domain */ + pseudo_bit_t reserved6[0x00001]; + pseudo_bit_t reserved7[0x00001]; + pseudo_bit_t reserved8[0x00001]; + pseudo_bit_t reserved9[0x00001]; + pseudo_bit_t reserved10[0x00001]; + pseudo_bit_t reserved11[0x00003]; +/* -------------- */ + pseudo_bit_t start_address_h[0x00020];/* Start Address[63:32] - Virtual Address where this region/window starts */ +/* -------------- */ + pseudo_bit_t start_address_l[0x00020];/* Start Address[31:0] - Virtual Address where this region/window starts */ +/* -------------- */ + pseudo_bit_t reg_wnd_len_h[0x00020];/* Region/Window Length[63:32] */ +/* -------------- */ + pseudo_bit_t reg_wnd_len_l[0x00020];/* Region/Window Length[31:0] */ +/* -------------- */ + pseudo_bit_t lkey[0x00020]; /* Must be 0 for SW2HW_MPT. + On QUERY_MPTand HW2SW_MPT commands for Memory Window it reflects the LKey of the Region that the Window is bound to. */ +/* -------------- */ + pseudo_bit_t win_cnt[0x00020]; /* Number of windows bound to this region. Valid for regions only. + The field is valid only for the QUERY_MPT and HW2SW_MPT commands. */ +/* -------------- */ + pseudo_bit_t win_cnt_limit[0x00020];/* The number of windows (limit) that can be bound to this region. If a bind operation is attempted when WIN_CNT == WIN_CNT_LIMIT, the operation will be aborted, a CQE with error will be generated, and the QP will be moved into the error state. + Zero means no limit. + Note that for best hardware performance, win_cnt_limit should be set to zero. */ +/* -------------- */ + pseudo_bit_t mtt_seg_adr_h[0x00020];/* Base (first) address of the MTT segment, aligned on segment_size boundary (bits 63:31). */ +/* -------------- */ + pseudo_bit_t reserved12[0x00006]; + pseudo_bit_t mtt_seg_adr_l[0x0001a];/* Base (first) address of the MTT segment, aligned on segment_size boundary (bits 31:6). */ +/* -------------- */ + pseudo_bit_t reserved13[0x00060]; +/* -------------- */ +}; + +/* Completion Queue Context Table Entry */ + +struct tavorprm_completion_queue_context_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t st[0x00004]; /* Event delivery state machine + 0x0 - DISARMED + 0x1 - ARMED (Request for Notification) + 0x4 - ARMED SOLICITED (Request Solicited Notification) + 0xA - FIRED + other - reserved */ + pseudo_bit_t reserved1[0x00005]; + pseudo_bit_t oi[0x00001]; /* Ignore overrun of this CQ if this bit is set */ + pseudo_bit_t tr[0x00001]; /* Translation Required + 1 - accesses to CQ will undergo address translation + 0 - accesses to CQ will not undergo address translation */ + pseudo_bit_t reserved2[0x00009]; + pseudo_bit_t status[0x00004]; /* CQ status + 0000 - OK + 1001 - CQ overflow + 1010 - CQ write failure + Valid for the QUERY_CQ and HW2SW_CQ commands only */ +/* -------------- */ + pseudo_bit_t start_address_h[0x00020];/* Start address of CQ[63:32]. + Must be aligned on CQE size (32 bytes) */ +/* -------------- */ + pseudo_bit_t start_address_l[0x00020];/* Start address of CQ[31:0]. + Must be aligned on CQE size (32 bytes) */ +/* -------------- */ + pseudo_bit_t usr_page[0x00018]; /* UAR page this CQ can be accessed through (ringinig CQ doorbells) */ + pseudo_bit_t log_cq_size[0x00005]; /* Log (base 2) of the CQ size (in entries). + Maximum CQ size is 128K CQEs (max log_cq_size is 17) */ + pseudo_bit_t reserved3[0x00003]; +/* -------------- */ + pseudo_bit_t e_eqn[0x00008]; /* Event Queue this CQ reports errors to (e.g. CQ overflow) + Valid values are 0 to 63 + If configured to value other than 0-63, error events will not be reported on the CQ. */ + pseudo_bit_t reserved4[0x00018]; +/* -------------- */ + pseudo_bit_t c_eqn[0x00008]; /* Event Queue this CQ reports completion events to. + Valid values are 0 to 63 + If configured to value other than 0-63, completion events will not be reported on the CQ. */ + pseudo_bit_t reserved5[0x00018]; +/* -------------- */ + pseudo_bit_t pd[0x00018]; /* Protection Domain to be used to access CQ. + Must be the same PD of the CQ L_Key. */ + pseudo_bit_t reserved6[0x00008]; +/* -------------- */ + pseudo_bit_t l_key[0x00020]; /* Memory key (L_Key) to be used to access CQ */ +/* -------------- */ + pseudo_bit_t last_notified_indx[0x00020];/* Maintained by HW. + Valid for QUERY_CQ and HW2SW_CQ commands only. */ +/* -------------- */ + pseudo_bit_t solicit_producer_indx[0x00020];/* Maintained by HW. + Valid for QUERY_CQ and HW2SW_CQ commands only. + */ +/* -------------- */ + pseudo_bit_t consumer_indx[0x00020];/* Contains index to the next entry to be read upon poll for completion. The first completion after passing ownership of CQ from software to hardware will be reported to value passed in this field. Only the low log_cq_size bits may be non-zero. */ +/* -------------- */ + pseudo_bit_t producer_indx[0x00020];/* Points to the next entry to be written to by Hardware. CQ overrun is reported if Producer_indx + 1 equals to Consumer_indx. + Maintained by HW (valid for the QUERY_CQ and HW2SW_CQ commands only) */ +/* -------------- */ + pseudo_bit_t cqn[0x00018]; /* CQ number. Least significant bits are constrained by the position of this CQ in CQC table + Valid for the QUERY_CQ and HW2SW_CQ commands only */ + pseudo_bit_t reserved7[0x00008]; +/* -------------- */ + pseudo_bit_t reserved8[0x00060]; +/* -------------- */ +}; + +/* UD Address Vector */ + +struct tavorprm_ud_address_vector_st { /* Little Endian */ + pseudo_bit_t pd[0x00018]; /* Protection Domain */ + pseudo_bit_t port_number[0x00002]; /* Port number + 1 - Port 1 + 2 - Port 2 + other - reserved */ + pseudo_bit_t reserved0[0x00006]; +/* -------------- */ + pseudo_bit_t rlid[0x00010]; /* Remote (Destination) LID */ + pseudo_bit_t my_lid_path_bits[0x00007];/* Source LID - the lower 7 bits (upper bits are taken from PortInfo) */ + pseudo_bit_t g[0x00001]; /* Global address enable - if set, GRH will be formed for packet header */ + pseudo_bit_t reserved1[0x00008]; +/* -------------- */ + pseudo_bit_t hop_limit[0x00008]; /* IPv6 hop limit */ + pseudo_bit_t max_stat_rate[0x00003];/* Maximum static rate control. + 0 - 4X injection rate + 1 - 1X injection rate + other - reserved + */ + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t msg[0x00002]; /* Max Message size, size is 256*2^MSG bytes */ + pseudo_bit_t reserved3[0x00002]; + pseudo_bit_t mgid_index[0x00006]; /* Index to port GID table + mgid_index = (port_number-1) * 2^log_max_gid + gid_index + Where: + 1. log_max_gid is taken from QUERY_DEV_LIM command + 2. gid_index is the index to the GID table */ + pseudo_bit_t reserved4[0x0000a]; +/* -------------- */ + pseudo_bit_t flow_label[0x00014]; /* IPv6 flow label */ + pseudo_bit_t tclass[0x00008]; /* IPv6 TClass */ + pseudo_bit_t sl[0x00004]; /* InfiniBand Service Level (SL) */ +/* -------------- */ + pseudo_bit_t rgid_127_96[0x00020]; /* Remote GID[127:96] */ +/* -------------- */ + pseudo_bit_t rgid_95_64[0x00020]; /* Remote GID[95:64] */ +/* -------------- */ + pseudo_bit_t rgid_63_32[0x00020]; /* Remote GID[63:32] */ +/* -------------- */ + pseudo_bit_t rgid_31_0[0x00020]; /* Remote GID[31:0] */ +/* -------------- */ +}; + +/* Event_data Field - QP/EE Events */ + +struct tavorprm_qp_ee_event_st { /* Little Endian */ + pseudo_bit_t qpn_een[0x00018]; /* QP/EE/SRQ number event is reported for */ + pseudo_bit_t reserved0[0x00008]; +/* -------------- */ + pseudo_bit_t reserved1[0x00020]; +/* -------------- */ + pseudo_bit_t reserved2[0x0001c]; + pseudo_bit_t e_q[0x00001]; /* If set - EEN if cleared - QP in the QPN/EEN field + Not valid on SRQ events */ + pseudo_bit_t reserved3[0x00003]; +/* -------------- */ + pseudo_bit_t reserved4[0x00060]; +/* -------------- */ +}; + +/* InfiniHost Type0 Configuration Header */ + +struct tavorprm_mt23108_type0_st { /* Little Endian */ + pseudo_bit_t vendor_id[0x00010]; /* Hardwired to 0x15B3 */ + pseudo_bit_t device_id[0x00010]; /* hardwired to 23108 */ +/* -------------- */ + pseudo_bit_t command[0x00010]; /* PCI Command Register */ + pseudo_bit_t status[0x00010]; /* PCI Status Register */ +/* -------------- */ + pseudo_bit_t revision_id[0x00008]; + pseudo_bit_t class_code_hca_class_code[0x00018]; +/* -------------- */ + pseudo_bit_t cache_line_size[0x00008];/* Cache Line Size */ + pseudo_bit_t latency_timer[0x00008]; + pseudo_bit_t header_type[0x00008]; /* hardwired to zero */ + pseudo_bit_t bist[0x00008]; +/* -------------- */ + pseudo_bit_t bar0_ctrl[0x00004]; /* hard-wired to '0100 */ + pseudo_bit_t reserved0[0x00010]; + pseudo_bit_t bar0_l[0x0000c]; /* Lower bits of BAR0 (configuration space) */ +/* -------------- */ + pseudo_bit_t bar0_h[0x00020]; /* Upper 32 bits of BAR0 (configuration space) */ +/* -------------- */ + pseudo_bit_t bar1_ctrl[0x00004]; /* Hardwired to '1100 */ + pseudo_bit_t reserved1[0x00010]; + pseudo_bit_t bar1_l[0x0000c]; /* Lower bits of BAR1 */ +/* -------------- */ + pseudo_bit_t bar1_h[0x00020]; /* upper 32 bits of BAR1 (User Access Revion - UAR - space) */ +/* -------------- */ + pseudo_bit_t bar2_ctrl[0x00004]; /* Hardwired to '1100 */ + pseudo_bit_t reserved2[0x00010]; + pseudo_bit_t bar2_l[0x0000c]; /* Lower bits of BAR2 */ +/* -------------- */ + pseudo_bit_t bar2_h[0x00020]; /* Upper 32 bits of BAR2 - DDR (attached memory) BAR */ +/* -------------- */ + pseudo_bit_t cardbus_cis_pointer[0x00020]; +/* -------------- */ + pseudo_bit_t subsystem_vendor_id[0x00010];/* Programmed via InfiniBurn */ + pseudo_bit_t subsystem_id[0x00010]; /* programmed via InfiniBurn */ +/* -------------- */ + pseudo_bit_t expansion_rom_base_address[0x00020];/* Programmed via InfiniBurn if expansion ROM enabled */ +/* -------------- */ + pseudo_bit_t capabilities_pointer[0x00008];/* Programmed via InfiniBurn */ + pseudo_bit_t reserved3[0x00018]; +/* -------------- */ + pseudo_bit_t reserved4[0x00020]; +/* -------------- */ + pseudo_bit_t interrupt_line[0x00008]; + pseudo_bit_t interrupt_pin[0x00008]; + pseudo_bit_t min_gnt[0x00008]; + pseudo_bit_t max_latency[0x00008]; +/* -------------- */ + pseudo_bit_t reserved5[0x00100]; +/* -------------- */ + pseudo_bit_t msi_cap_id[0x00008]; + pseudo_bit_t msi_next_cap_ptr[0x00008]; + pseudo_bit_t msi_en[0x00001]; + pseudo_bit_t multiple_msg_cap[0x00003]; + pseudo_bit_t multiple_msg_en[0x00003]; + pseudo_bit_t cap_64_bit_addr[0x00001]; + pseudo_bit_t reserved6[0x00008]; +/* -------------- */ + pseudo_bit_t msg_addr_l[0x00020]; +/* -------------- */ + pseudo_bit_t msg_addr_h[0x00020]; +/* -------------- */ + pseudo_bit_t msg_data[0x00010]; + pseudo_bit_t reserved7[0x00010]; +/* -------------- */ + pseudo_bit_t pcix_cap_id[0x00008]; + pseudo_bit_t pcix_next_cap_ptr[0x00008]; + pseudo_bit_t pcix_command_reg[0x00010];/* PCIX command register */ +/* -------------- */ + pseudo_bit_t pcix_status_reg[0x00020];/* PCIX Status Register */ +/* -------------- */ + pseudo_bit_t reserved8[0x00440]; +/* -------------- */ +}; + +/* NTU QP Map Table Entry */ + +struct tavorprm_ntu_qpm_st { /* Little Endian */ + pseudo_bit_t va_h[0x00020]; /* Bits 63:32 of the virtual address to be used in IB request, Number of bits to be actually used depends on the page size (eg. will use all 52 for 4K page, 51 for 8K page etc). */ +/* -------------- */ + pseudo_bit_t wm[0x00002]; /* Amount of data to fill in to the read response buffer prior to delivering read response to uplink + 00 - forward + 01 - MTU + 10 - full message + 11 - Reserved */ + pseudo_bit_t mtu[0x00002]; /* MTUI of the channel to be used by this page, value is 256*2MU bytes */ + pseudo_bit_t rd_len[0x00003]; /* Length of speculative prefetch for read, value is 16*2RD_Len bytes */ + pseudo_bit_t fence[0x00002]; + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t err_fence[0x00001]; /* 0,00 - No action in NTU - normal flow + 0,01 - Reserved (fence bits value of "01" is not defined) + 0,10 - Enter PCU transaction to Error fifo, NO fence trap to consequent transaction + 0,11 - Enter PCU transaction to Error fifo, fence trap to consequent transactions + 1,xx - Enter PCU transaction to Error fifo, mark QRM indication in error fifo. */ + pseudo_bit_t va_l[0x00014]; /* Bits 31:12 of the virtual address to be used in IB request, Number of bits to be actually used depends on the page size (eg. will use all 52 for 4K page, 51 for 8K page etc). */ +/* -------------- */ + pseudo_bit_t rkey[0x00020]; /* RKey to be places for RDMA IB requests message */ +/* -------------- */ + pseudo_bit_t my_qpn[0x00018]; /* Local QO this page is mapped to */ + pseudo_bit_t s[0x00001]; /* Force solicit event bit in the descriptor */ + pseudo_bit_t e[0x00001]; /* Force E-bit in the descriptor */ + pseudo_bit_t s_r[0x00001]; /* S/R# - generate Send as a result of write hit to this page */ + pseudo_bit_t b[0x00001]; /* Breakpoint - ptransfer control to firmware for every cycle that hits this page */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t ce[0x00001]; /* Cache Enable - entry can be cached if this bit is set. */ + pseudo_bit_t v[0x00001]; /* Valid bit - the entry is valid only if this bit is set */ +/* -------------- */ +}; + +/* Event Data Field - Performance Monitor */ + +struct tavorprm_performance_monitor_event_st { /* Little Endian */ + struct tavorprm_performance_monitors_st performance_monitor_snapshot;/* Performance monitor snapshot */ +/* -------------- */ + pseudo_bit_t monitor_number[0x00008];/* 0x01 - SQPC + 0x02 - RQPC + 0x03 - CQC + 0x04 - Rkey + 0x05 - TLB + 0x06 - port0 + 0x07 - port1 */ + pseudo_bit_t reserved0[0x00018]; +/* -------------- */ + pseudo_bit_t reserved1[0x00040]; +/* -------------- */ +}; + +/* Event_data Field - Page Faults */ + +struct tavorprm_page_fault_event_data_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00040]; +/* -------------- */ + pseudo_bit_t s_r[0x00001]; /* Send (1) or Receive (0) queue caused page fault */ + pseudo_bit_t r_l[0x00001]; /* Remote (1) or local (0) access caused fault */ + pseudo_bit_t w_d[0x00001]; /* WQE (1) or data (0) access caused fault */ + pseudo_bit_t wqv[0x00001]; /* Indicates whether message caused fault consumes descriptor (valid for receive queue only). */ + pseudo_bit_t fault_type[0x00004]; /* 0000-0111 - RESERVED + 1000 - Translation page not present + 1001 - RESERVED + 1010 - Page write access violation + 1011 - 1101 - RESERVED + 1110 - Unsupported non-present page fault + 1111 - unsupported write access fault */ + pseudo_bit_t reserved1[0x00018]; +/* -------------- */ + pseudo_bit_t va_h[0x00020]; /* Virtual address that caused access fault[63:32] */ +/* -------------- */ + pseudo_bit_t va_l[0x00020]; /* Virtual address that caused access fault[31:0] */ +/* -------------- */ + pseudo_bit_t mem_key[0x00020]; /* Memory Key used for address translation */ +/* -------------- */ +}; + +/* Event_data Field - Port State Change */ + +struct tavorprm_port_state_change_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00040]; +/* -------------- */ + pseudo_bit_t reserved1[0x0001c]; + pseudo_bit_t p[0x00002]; /* Port number (1 or 2) */ + pseudo_bit_t reserved2[0x00002]; +/* -------------- */ + pseudo_bit_t reserved3[0x00060]; +/* -------------- */ +}; + +/* Event_data Field - Completion Queue Error */ + +struct tavorprm_completion_queue_error_st { /* Little Endian */ + pseudo_bit_t cqn[0x00018]; /* CQ number event is reported for */ + pseudo_bit_t reserved0[0x00008]; +/* -------------- */ + pseudo_bit_t reserved1[0x00020]; +/* -------------- */ + pseudo_bit_t syndrome[0x00008]; /* Error syndrome + 0x01 - CQ overrun + 0x02 - CQ access violation error */ + pseudo_bit_t reserved2[0x00018]; +/* -------------- */ + pseudo_bit_t reserved3[0x00060]; +/* -------------- */ +}; + +/* Event_data Field - Completion Event */ + +struct tavorprm_completion_event_st { /* Little Endian */ + pseudo_bit_t cqn[0x00018]; /* CQ number event is reported for */ + pseudo_bit_t reserved0[0x00008]; +/* -------------- */ + pseudo_bit_t reserved1[0x000a0]; +/* -------------- */ +}; + +/* Event Queue Entry */ + +struct tavorprm_event_queue_entry_st { /* Little Endian */ + pseudo_bit_t event_sub_type[0x00008];/* Event Sub Type. + Defined for events which have sub types, zero elsewhere. */ + pseudo_bit_t reserved0[0x00008]; + pseudo_bit_t event_type[0x00008]; /* Event Type */ + pseudo_bit_t reserved1[0x00008]; +/* -------------- */ + pseudo_bit_t event_data[6][0x00020];/* Delivers auxilary data to handle event. */ +/* -------------- */ + pseudo_bit_t reserved2[0x00007]; + pseudo_bit_t owner[0x00001]; /* Owner of the entry + 0 SW + 1 HW */ + pseudo_bit_t reserved3[0x00018]; +/* -------------- */ +}; + +/* QP/EE State Transitions Command Parameters */ + +struct tavorprm_qp_ee_state_transitions_st { /* Little Endian */ + pseudo_bit_t opt_param_mask[0x00020];/* This field defines which optional parameters are passed. Each bit specifies whether optional parameter is passed (set) or not (cleared). The optparammask is defined for each QP/EE command. */ +/* -------------- */ + pseudo_bit_t reserved0[0x00020]; +/* -------------- */ + struct tavorprm_queue_pair_ee_context_entry_st qpc_eec_data;/* QPC/EEC data */ +/* -------------- */ + pseudo_bit_t reserved1[0x007c0]; +/* -------------- */ +}; + +/* Completion Queue Entry Format */ + +struct tavorprm_completion_queue_entry_st { /* Little Endian */ + pseudo_bit_t my_qpn[0x00018]; /* Indicates the QP for which completion is being reported */ + pseudo_bit_t reserved0[0x00004]; + pseudo_bit_t ver[0x00004]; /* CQE version. + 0 for InfiniHost */ +/* -------------- */ + pseudo_bit_t my_ee[0x00018]; /* EE context (for RD only). + Invalid for Bind and Nop operation on RD. */ + pseudo_bit_t reserved1[0x00008]; +/* -------------- */ + pseudo_bit_t rqpn[0x00018]; /* Remote (source) QP number. Valid in Responder CQE only for Datagram QP. */ + pseudo_bit_t reserved2[0x00008]; +/* -------------- */ + pseudo_bit_t rlid[0x00010]; /* Remote (source) LID of the message. Valid in Responder of UD QP CQE only. */ + pseudo_bit_t ml_path[0x00007]; /* My (destination) LID path bits - these are the lowemost LMC bits of the DLID in an incoming UD packet, higher bits of this field, that are not part of the LMC bits are zeroed by HW. + Valid in responder of UD QP CQE only. + Invalid if incoming message DLID is the permissive LID or incoming message is multicast. */ + pseudo_bit_t g[0x00001]; /* GRH present indicator. Valid in Responder of UD QP CQE only. */ + pseudo_bit_t reserved3[0x00001]; + pseudo_bit_t reserved4[0x00003]; + pseudo_bit_t sl[0x00004]; /* Service Level of the message. Valid in Responder of UD QP CQE only. */ +/* -------------- */ + pseudo_bit_t immediate_ethertype_pkey_indx_eecredits[0x00020];/* Valid for receive queue completion only. + If Opcode field indicates that this was send/write with immediate, this field contains immediate field of the packet. + If completion corresponds to RAW receive queue, bits 15:0 contain Ethertype field of the packet. + If completion corresponds to GSI receive queue, bits 31:16 contain index in PKey table that matches PKey of the message arrived. + For CQE of send queue of the reliable connection service, bits [4:0] of this field contain the encoded EEcredits received in last ACK of the message. + */ +/* -------------- */ + pseudo_bit_t byte_cnt[0x00020]; /* Byte count of data actually transferred (valid for receive queue completions only) */ +/* -------------- */ + pseudo_bit_t wqe_size[0x00006]; /* Size (in 16-byte chunks) of WQE completion is reported for */ + pseudo_bit_t wqe_adr[0x0001a]; /* Bits 31:6 of WQE virtual address completion is reported for. The 6 least significant bits are zero. */ +/* -------------- */ + pseudo_bit_t reserved5[0x00007]; + pseudo_bit_t owner[0x00001]; /* Owner field. Zero value of this field means SW ownership of CQE. */ + pseudo_bit_t reserved6[0x0000d]; + pseudo_bit_t reserved7[0x00001]; + pseudo_bit_t reserved8[0x00001]; + pseudo_bit_t s[0x00001]; /* If set, completion is reported for Send queue, if cleared - receive queue. */ + pseudo_bit_t opcode[0x00008]; /* The opcode of WQE completion is reported for. + For CQEs corresponding to send completion, NOPCODE field of the WQE is copied to this field. + For CQEs corresponding to receive completions, opcode field of last packet in the message copied to this field. + For CQEs corresponding to the receive queue of QPs mapped to QP1, the opcode will be SEND with Immediate (messages are guaranteed to be SEND only) + + The following values are reported in case of completion with error: + 0xFE - For completion with error on Receive Queues + 0xFF - For completion with error on Send Queues */ +/* -------------- */ +}; + +/* 0 */ + +struct tavorprm_tavor_prm_st { /* Little Endian */ + struct tavorprm_completion_queue_entry_st completion_queue_entry;/* Completion Queue Entry Format */ +/* -------------- */ + pseudo_bit_t reserved0[0x7ff00]; +/* -------------- */ + struct tavorprm_qp_ee_state_transitions_st qp_ee_state_transitions;/* QP/EE State Transitions Command Parameters */ +/* -------------- */ + pseudo_bit_t reserved1[0x7f000]; +/* -------------- */ + struct tavorprm_event_queue_entry_st event_queue_entry;/* Event Queue Entry */ +/* -------------- */ + pseudo_bit_t reserved2[0x7ff00]; +/* -------------- */ + struct tavorprm_completion_event_st completion_event;/* Event_data Field - Completion Event */ +/* -------------- */ + pseudo_bit_t reserved3[0x7ff40]; +/* -------------- */ + struct tavorprm_completion_queue_error_st completion_queue_error;/* Event_data Field - Completion Queue Error */ +/* -------------- */ + pseudo_bit_t reserved4[0x7ff40]; +/* -------------- */ + struct tavorprm_port_state_change_st port_state_change;/* Event_data Field - Port State Change */ +/* -------------- */ + pseudo_bit_t reserved5[0xfff40]; +/* -------------- */ + struct tavorprm_page_fault_event_data_st page_fault_event_data;/* Event_data Field - Page Faults */ +/* -------------- */ + pseudo_bit_t reserved6[0x7ff40]; +/* -------------- */ + struct tavorprm_performance_monitor_event_st performance_monitor_event;/* Event Data Field - Performance Monitor */ +/* -------------- */ + pseudo_bit_t reserved7[0x7ff20]; +/* -------------- */ + struct tavorprm_ntu_qpm_st ntu_qpm; /* NTU QP Map Table Entry */ +/* -------------- */ + pseudo_bit_t reserved8[0x7ff80]; +/* -------------- */ + struct tavorprm_mt23108_type0_st mt23108_type0;/* InfiniHost Type0 Configuration Header */ +/* -------------- */ + pseudo_bit_t reserved9[0x7f800]; +/* -------------- */ + struct tavorprm_qp_ee_event_st qp_ee_event;/* Event_data Field - QP/EE Events */ +/* -------------- */ + pseudo_bit_t reserved10[0x7ff40]; +/* -------------- */ + struct tavorprm_ud_address_vector_st ud_address_vector;/* UD Address Vector */ +/* -------------- */ + pseudo_bit_t reserved11[0x7ff00]; +/* -------------- */ + struct tavorprm_queue_pair_ee_context_entry_st queue_pair_ee_context_entry;/* QP and EE Context Entry */ +/* -------------- */ + pseudo_bit_t reserved12[0x7f800]; +/* -------------- */ + struct tavorprm_address_path_st address_path;/* Address Path */ +/* -------------- */ + pseudo_bit_t reserved13[0x7ff00]; +/* -------------- */ + struct tavorprm_completion_queue_context_st completion_queue_context;/* Completion Queue Context Table Entry */ +/* -------------- */ + pseudo_bit_t reserved14[0x7fe00]; +/* -------------- */ + struct tavorprm_mpt_st mpt; /* Memory Protection Table (MPT) Entry */ +/* -------------- */ + pseudo_bit_t reserved15[0x7fe00]; +/* -------------- */ + struct tavorprm_mtt_st mtt; /* Memory Translation Table (MTT) Entry */ +/* -------------- */ + pseudo_bit_t reserved16[0x7ffc0]; +/* -------------- */ + struct tavorprm_eqc_st eqc; /* Event Queue Context Table Entry */ +/* -------------- */ + pseudo_bit_t reserved17[0x7fe00]; +/* -------------- */ + struct tavorprm_performance_monitors_st performance_monitors;/* Performance Monitors */ +/* -------------- */ + pseudo_bit_t reserved18[0x7ff80]; +/* -------------- */ + struct tavorprm_hca_command_register_st hca_command_register;/* HCA Command Register (HCR) */ +/* -------------- */ + pseudo_bit_t reserved19[0xfff20]; +/* -------------- */ + struct tavorprm_init_hca_st init_hca;/* INIT_HCA & QUERY_HCA Parameters Block */ +/* -------------- */ + pseudo_bit_t reserved20[0x7f000]; +/* -------------- */ + struct tavorprm_qpcbaseaddr_st qpcbaseaddr;/* QPC/EEC/CQC/EQC/RDB Parameters */ +/* -------------- */ + pseudo_bit_t reserved21[0x7fc00]; +/* -------------- */ + struct tavorprm_udavtable_memory_parameters_st udavtable_memory_parameters;/* Memory Access Parameters for UD Address Vector Table */ +/* -------------- */ + pseudo_bit_t reserved22[0x7ffc0]; +/* -------------- */ + struct tavorprm_multicastparam_st multicastparam;/* Multicast Support Parameters */ +/* -------------- */ + pseudo_bit_t reserved23[0x7ff00]; +/* -------------- */ + struct tavorprm_tptparams_st tptparams;/* Translation and Protection Tables Parameters */ +/* -------------- */ + pseudo_bit_t reserved24[0x7ff00]; +/* -------------- */ + struct tavorprm_query_ddr_st query_ddr;/* QUERY_DDR Parameters Block */ +/* -------------- */ + pseudo_bit_t reserved25[0x7f800]; +/* -------------- */ + struct tavorprm_dimminfo_st dimminfo;/* Logical DIMM Information */ +/* -------------- */ + pseudo_bit_t reserved26[0x7ff00]; +/* -------------- */ + struct tavorprm_query_fw_st query_fw;/* QUERY_FW Parameters Block */ +/* -------------- */ + pseudo_bit_t reserved27[0x7f800]; +/* -------------- */ + struct tavorprm_query_adapter_st query_adapter;/* QUERY_ADAPTER Parameters Block */ +/* -------------- */ + pseudo_bit_t reserved28[0x7f800]; +/* -------------- */ + struct tavorprm_query_dev_lim_st query_dev_lim;/* Query Device Limitations */ +/* -------------- */ + pseudo_bit_t reserved29[0x7f800]; +/* -------------- */ + struct tavorprm_uar_params_st uar_params;/* UAR Parameters */ +/* -------------- */ + pseudo_bit_t reserved30[0x7ff00]; +/* -------------- */ + struct tavorprm_init_ib_st init_ib; /* INIT_IB Parameters */ +/* -------------- */ + pseudo_bit_t reserved31[0x7f800]; +/* -------------- */ + struct tavorprm_mgm_entry_st mgm_entry;/* Multicast Group Member */ +/* -------------- */ + pseudo_bit_t reserved32[0x7fe00]; +/* -------------- */ + struct tavorprm_set_ib_st set_ib; /* SET_IB Parameters */ +/* -------------- */ + pseudo_bit_t reserved33[0x7fe00]; +/* -------------- */ + struct tavorprm_rd_send_doorbell_st rd_send_doorbell;/* RD-send doorbell */ +/* -------------- */ + pseudo_bit_t reserved34[0x7ff80]; +/* -------------- */ + struct tavorprm_send_doorbell_st send_doorbell;/* Send doorbell */ +/* -------------- */ + pseudo_bit_t reserved35[0x7ffc0]; +/* -------------- */ + struct tavorprm_receive_doorbell_st receive_doorbell;/* Receive doorbell */ +/* -------------- */ + pseudo_bit_t reserved36[0x7ffc0]; +/* -------------- */ + struct tavorprm_cq_cmd_doorbell_st cq_cmd_doorbell;/* CQ Doorbell */ +/* -------------- */ + pseudo_bit_t reserved37[0x7ffc0]; +/* -------------- */ + struct tavorprm_eq_cmd_doorbell_st eq_cmd_doorbell;/* EQ Doorbell */ +/* -------------- */ + pseudo_bit_t reserved38[0x7ffc0]; +/* -------------- */ + struct tavorprm_uar_st uar; /* User Access Region */ +/* -------------- */ + pseudo_bit_t reserved39[0x7c000]; +/* -------------- */ + struct tavorprm_mgmqp_st mgmqp; /* Multicast Group Member QP */ +/* -------------- */ + pseudo_bit_t reserved40[0x7ffe0]; +/* -------------- */ + struct tavorprm_query_debug_msg_st query_debug_msg;/* Query Debug Message */ +/* -------------- */ + pseudo_bit_t reserved41[0x7f800]; +/* -------------- */ + struct tavorprm_sys_en_out_param_st sys_en_out_param;/* SYS_EN Output Parameter */ +/* -------------- */ + pseudo_bit_t reserved42[0x7ffc0]; +/* -------------- */ + struct tavorprm_resize_cq_st resize_cq;/* Resize CQ Input Mailbox */ +/* -------------- */ + pseudo_bit_t reserved43[0x7fe00]; +/* -------------- */ + struct tavorprm_completion_with_error_st completion_with_error;/* Completion with Error CQE */ +/* -------------- */ + pseudo_bit_t reserved44[0x7ff00]; +/* -------------- */ + struct tavorprm_hcr_completion_event_st hcr_completion_event;/* Event_data Field - HCR Completion Event */ +/* -------------- */ + pseudo_bit_t reserved45[0x7ff40]; +/* -------------- */ + struct tavorprm_transport_and_ci_error_counters_st transport_and_ci_error_counters;/* Transport and CI Error Counters */ +/* -------------- */ + pseudo_bit_t reserved46[0x7f000]; +/* -------------- */ + struct tavorprm_performance_counters_st performance_counters;/* Performance Counters */ +/* -------------- */ + pseudo_bit_t reserved47[0x7f800]; +/* -------------- */ + struct tavorprm_query_bar_st query_bar;/* Query BAR */ +/* -------------- */ + pseudo_bit_t reserved48[0x7ffc0]; +/* -------------- */ + struct tavorprm_cfg_schq_st cfg_schq;/* Schedule queues configuration */ +/* -------------- */ + pseudo_bit_t reserved49[0x7f800]; +/* -------------- */ + struct tavorprm_mt23108_configuration_registers_st mt23108_configuration_registers;/* InfiniHost Configuration Registers - Used in Mem-Free mode only */ +/* -------------- */ + pseudo_bit_t reserved50[0x80000]; +/* -------------- */ + pseudo_bit_t reserved51[0x00100]; +/* -------------- */ + pseudo_bit_t reserved52[0x7ff00]; +/* -------------- */ + pseudo_bit_t reserved53[0x00100]; +/* -------------- */ + pseudo_bit_t reserved54[0x7ff00]; +/* -------------- */ + struct tavorprm_srq_context_st srq_context;/* SRQ Context */ +/* -------------- */ + pseudo_bit_t reserved55[0x7ff00]; +/* -------------- */ + struct tavorprm_mod_stat_cfg_st mod_stat_cfg;/* MOD_STAT_CFG */ +/* -------------- */ + pseudo_bit_t reserved56[0x00080]; +/* -------------- */ + pseudo_bit_t reserved57[0x00040]; +/* -------------- */ + pseudo_bit_t reserved58[0x1bff740]; +/* -------------- */ +}; + +/* Event_data Field - ECC Detection Event */ + +struct tavorprm_scrubbing_event_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00080]; +/* -------------- */ + pseudo_bit_t ecc_err_fifo_word1[0x00010];/* For debug: ECC error was discovered and corrected by InfiniHost */ + pseudo_bit_t reserved1[0x0000f]; + pseudo_bit_t overflow[0x00001]; /* Fatal: ECC error FIFO overflow - ECC errors were detected, which may or may not have been corrected by InfiiHost */ +/* -------------- */ + pseudo_bit_t ecc_err_fifo_word2[0x00020];/* For debug: ECC error was discovered and corrected by InfiniHost */ +/* -------------- */ +}; + +/* PBL */ + +struct tavorprm_pbl_st { /* Little Endian */ + pseudo_bit_t mtt_0_h[0x00020]; /* First MTT[63:32] */ +/* -------------- */ + pseudo_bit_t mtt_0_l[0x00020]; /* First MTT[31:0] */ +/* -------------- */ + pseudo_bit_t mtt_1_h[0x00020]; /* Second MTT[63:32] */ +/* -------------- */ + pseudo_bit_t mtt_1_l[0x00020]; /* Second MTT[31:0] */ +/* -------------- */ + pseudo_bit_t mtt_2_h[0x00020]; /* Third MTT[63:32] */ +/* -------------- */ + pseudo_bit_t mtt_2_l[0x00020]; /* Third MTT[31:0] */ +/* -------------- */ + pseudo_bit_t mtt_3_h[0x00020]; /* Fourth MTT[63:32] */ +/* -------------- */ + pseudo_bit_t mtt_3_l[0x00020]; /* Fourth MTT[31:0] */ +/* -------------- */ +}; + +/* Miscellaneous Counters */ + +struct tavorprm_misc_counters_st { /* Little Endian */ + pseudo_bit_t ddr_scan_cnt[0x00020]; /* Number of times whole of DDR was scanned */ +/* -------------- */ + pseudo_bit_t reserved0[0x007e0]; +/* -------------- */ +}; + +/* Fast_Registration_Segment */ + +struct tavorprm_fast_registration_segment_st { /* Little Endian */ + pseudo_bit_t reserved0[0x0001b]; + pseudo_bit_t lr[0x00001]; /* If set - Local Read access will be enabled */ + pseudo_bit_t lw[0x00001]; /* If set - Local Write access will be enabled */ + pseudo_bit_t rr[0x00001]; /* If set - Remote Read access will be enabled */ + pseudo_bit_t rw[0x00001]; /* If set - Remote Write access will be enabled */ + pseudo_bit_t a[0x00001]; /* If set - Remote Atomic access will be enabled */ +/* -------------- */ + pseudo_bit_t pbl_ptr_63_32[0x00020];/* Physical address pointer [63:32] to the physical block list */ +/* -------------- */ + pseudo_bit_t mem_key[0x00020]; /* Memory Key on which the fast registration is executed on. */ +/* -------------- */ + pseudo_bit_t page_size[0x00005]; /* Page size used for the region. Actual size is [4K]*2^Page_size bytes. + page_size should be less than 20. */ + pseudo_bit_t reserved1[0x00002]; + pseudo_bit_t zb[0x00001]; /* Zero Based Region */ + pseudo_bit_t pbl_ptr_31_8[0x00018]; /* Physical address pointer [31:8] to the physical block list */ +/* -------------- */ + pseudo_bit_t start_address_h[0x00020];/* Start Address[63:32] - Virtual Address where this region starts */ +/* -------------- */ + pseudo_bit_t start_address_l[0x00020];/* Start Address[31:0] - Virtual Address where this region starts */ +/* -------------- */ + pseudo_bit_t reg_len_h[0x00020]; /* Region Length[63:32] */ +/* -------------- */ + pseudo_bit_t reg_len_l[0x00020]; /* Region Length[31:0] */ +/* -------------- */ +}; +#endif /* H_prefix_tavorprm_bits_fixnames_MT23108_PRM_csp_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108_PRM_append.h b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108_PRM_append.h new file mode 100644 index 00000000..bf2e299e --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/MT23108_PRM_append.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/*** + *** This file was generated at "Thu Apr 29 10:25:56 2004" + *** by: + *** % csp_bf -copyright=/mswg/misc/license-header.txt -bits MT23108_PRM_append.csp + ***/ + +#ifndef H_bits_MT23108_PRM_append_csp_H +#define H_bits_MT23108_PRM_append_csp_H + + +/* Gather entry with inline data */ + +struct wqe_segment_data_inline_st { /* Little Endian */ + pseudo_bit_t byte_count[0x0000a]; /* Not including padding for 16Byte chunks */ + pseudo_bit_t reserved0[0x00015]; + pseudo_bit_t always1[0x00001]; +/* -------------- */ + pseudo_bit_t data[0x00018]; /* Data may be more this segment size - in 16Byte chunks */ +/* -------------- */ +}; + +/* Scatter/Gather entry with a pointer */ + +struct wqe_segment_data_ptr_st { /* Little Endian */ + pseudo_bit_t byte_count[0x0001f]; + pseudo_bit_t always0[0x00001]; +/* -------------- */ + pseudo_bit_t l_key[0x00020]; +/* -------------- */ + pseudo_bit_t local_address_h[0x00020]; +/* -------------- */ + pseudo_bit_t local_address_l[0x00020]; +/* -------------- */ +}; + +/* */ + +struct wqe_segment_atomic_st { /* Little Endian */ + pseudo_bit_t swap_add_h[0x00020]; +/* -------------- */ + pseudo_bit_t swap_add_l[0x00020]; +/* -------------- */ + pseudo_bit_t compare_h[0x00020]; +/* -------------- */ + pseudo_bit_t compare_l[0x00020]; +/* -------------- */ +}; + +/* */ + +struct wqe_segment_remote_address_st { /* Little Endian */ + pseudo_bit_t remote_virt_addr_h[0x00020]; +/* -------------- */ + pseudo_bit_t remote_virt_addr_l[0x00020]; +/* -------------- */ + pseudo_bit_t rkey[0x00020]; +/* -------------- */ + pseudo_bit_t reserved0[0x00020]; +/* -------------- */ +}; + +/* Bind memory window segment */ + +struct wqe_segment_bind_st { /* Little Endian */ + pseudo_bit_t reserved0[0x0001d]; + pseudo_bit_t rr[0x00001]; /* Remote read */ + pseudo_bit_t rw[0x00001]; /* Remote write */ + pseudo_bit_t a[0x00001]; /* atomic */ +/* -------------- */ + pseudo_bit_t reserved1[0x00020]; +/* -------------- */ + pseudo_bit_t new_rkey[0x00020]; +/* -------------- */ + pseudo_bit_t region_lkey[0x00020]; +/* -------------- */ + pseudo_bit_t start_address_h[0x00020]; +/* -------------- */ + pseudo_bit_t start_address_l[0x00020]; +/* -------------- */ + pseudo_bit_t length_h[0x00020]; +/* -------------- */ + pseudo_bit_t length_l[0x00020]; +/* -------------- */ +}; + +/* */ + +struct wqe_segment_ud_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00020]; +/* -------------- */ + pseudo_bit_t l_key[0x00020]; /* memory key for UD AV */ +/* -------------- */ + pseudo_bit_t av_address_63_32[0x00020]; +/* -------------- */ + pseudo_bit_t reserved1[0x00005]; + pseudo_bit_t av_address_31_5[0x0001b]; +/* -------------- */ + pseudo_bit_t reserved2[0x00080]; +/* -------------- */ + pseudo_bit_t destination_qp[0x00018]; + pseudo_bit_t reserved3[0x00008]; +/* -------------- */ + pseudo_bit_t q_key[0x00020]; +/* -------------- */ + pseudo_bit_t reserved4[0x00040]; +/* -------------- */ +}; + +/* */ + +struct wqe_segment_rd_st { /* Little Endian */ + pseudo_bit_t destination_qp[0x00018]; + pseudo_bit_t reserved0[0x00008]; +/* -------------- */ + pseudo_bit_t q_key[0x00020]; +/* -------------- */ + pseudo_bit_t reserved1[0x00040]; +/* -------------- */ +}; + +/* */ + +struct wqe_segment_ctrl_recv_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t e[0x00001]; /* WQE event */ + pseudo_bit_t c[0x00001]; /* Create CQE (for "requested signalling" QP) */ + pseudo_bit_t reserved1[0x0001c]; +/* -------------- */ + pseudo_bit_t reserved2[0x00020]; +/* -------------- */ +}; + +/* */ + +struct wqe_segment_ctrl_mlx_st { /* Little Endian */ + pseudo_bit_t reserved0[0x00002]; + pseudo_bit_t e[0x00001]; /* WQE event */ + pseudo_bit_t c[0x00001]; /* Create CQE (for "requested signalling" QP) */ + pseudo_bit_t reserved1[0x00004]; + pseudo_bit_t sl[0x00004]; + pseudo_bit_t max_statrate[0x00003]; + pseudo_bit_t reserved2[0x00001]; + pseudo_bit_t slr[0x00001]; /* 0= take slid from port. 1= take slid from given headers */ + pseudo_bit_t v15[0x00001]; /* Send packet over VL15 */ + pseudo_bit_t reserved3[0x0000e]; +/* -------------- */ + pseudo_bit_t vcrc[0x00010]; /* Packet's VCRC (if not 0 - otherwise computed by HW) */ + pseudo_bit_t rlid[0x00010]; /* Destination LID (must match given headers) */ +/* -------------- */ +}; + +/* */ + +struct wqe_segment_ctrl_send_st { /* Little Endian */ + pseudo_bit_t always1[0x00001]; + pseudo_bit_t s[0x00001]; /* Solicited event */ + pseudo_bit_t e[0x00001]; /* WQE event */ + pseudo_bit_t c[0x00001]; /* Create CQE (for "requested signalling" QP) */ + pseudo_bit_t reserved0[0x0001c]; +/* -------------- */ + pseudo_bit_t immediate[0x00020]; +/* -------------- */ +}; + +/* */ + +struct wqe_segment_next_st { /* Little Endian */ + pseudo_bit_t nopcode[0x00005]; /* next opcode */ + pseudo_bit_t reserved0[0x00001]; + pseudo_bit_t nda_31_6[0x0001a]; /* NDA[31:6] */ +/* -------------- */ + pseudo_bit_t nds[0x00006]; + pseudo_bit_t f[0x00001]; /* fence bit */ + pseudo_bit_t dbd[0x00001]; /* doorbell rung */ + pseudo_bit_t nee[0x00018]; /* next EE */ +/* -------------- */ +}; +#endif /* H_bits_MT23108_PRM_append_csp_H */ diff --git a/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/cr_types.h b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/cr_types.h new file mode 100644 index 00000000..de4988d9 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/cr_types.h @@ -0,0 +1,60 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_CR_TYPES_H +#define H_CR_TYPES_H + + +/* Macros to use with device's header file */ + +#define MT_BIT_OFFSET(object_struct,reg_path) \ + ((MT_offset_t) &( ((struct object_struct *)(0))-> reg_path )) + +#define MT_BIT_SIZE(object_struct,reg_path) \ + ((MT_size_t) sizeof( ((struct object_struct *)(0))-> reg_path )) + +#define MT_BIT_OFFSET_SIZE(object_struct,reg_path) \ + MT_BIT_OFFSET(object_struct,reg_path),MT_BIT_SIZE(object_struct,reg_path) + +#undef MT_BYTE_OFFSET +#define MT_BYTE_OFFSET(object_struct,reg_path) \ + ((MT_offset_t) (MT_BIT_OFFSET(object_struct,reg_path)/8)) + +#define MT_BYTE_SIZE(object_struct,reg_path) \ + ((MT_size_t) MT_BIT_SIZE(object_struct,reg_path)/8) + +#define MT_BYTE_OFFSET_SIZE(object_struct,reg_path) \ + MT_BYTE_OFFSET(object_struct,reg_path),MT_BYTE_SIZE(object_struct,reg_path) + +typedef u_int8_t pseudo_bit_t; + +#endif + diff --git a/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/tavor_dev_defs.h b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/tavor_dev_defs.h new file mode 100644 index 00000000..d46d50de --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/tavor_dev_defs.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ +#ifndef H_TAVOR_DEV_DEFS_H_ +#define H_TAVOR_DEV_DEFS_H_ + +#define MT23108_DEV_ID 23108 +#define MT25208_DEV_ID 25208 +/* cr-space ofsetts */ +#define TAVOR_HCR_OFFSET_FROM_CR_BASE \ + MT_BYTE_OFFSET(tavorprm_mt23108_configuration_registers_st,hca_command_interface_register) +/* offset of HCR from cr-space base */ +#define TAVOR_ECR_H_OFFSET_FROM_CR_BASE \ + MT_BYTE_OFFSET(tavorprm_mt23108_configuration_registers_st,ecr_h) +#define TAVOR_ECR_L_OFFSET_FROM_CR_BASE \ + MT_BYTE_OFFSET(tavorprm_mt23108_configuration_registers_st,ecr_l) +#define TAVOR_CLR_ECR_H_OFFSET_FROM_CR_BASE \ + MT_BYTE_OFFSET(tavorprm_mt23108_configuration_registers_st,clr_ecr_h) +#define TAVOR_CLR_ECR_L_OFFSET_FROM_CR_BASE \ + MT_BYTE_OFFSET(tavorprm_mt23108_configuration_registers_st,clr_ecr_l) +#define TAVOR_CLR_INT_H_OFFSET_FROM_CR_BASE \ + MT_BYTE_OFFSET(tavorprm_mt23108_configuration_registers_st,clr_int_h) +#define TAVOR_CLR_INT_L_OFFSET_FROM_CR_BASE \ + MT_BYTE_OFFSET(tavorprm_mt23108_configuration_registers_st,clr_int_l) + + + +#endif /* H_TAVOR_DEV_DEFS_H_ */ diff --git a/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/tavor_if_defs.h b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/tavor_if_defs.h new file mode 100644 index 00000000..76eabe9d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/tavor_arch_db/tavor_if_defs.h @@ -0,0 +1,548 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef H_TAVOR_IF_DEFS_H +#define H_TAVOR_IF_DEFS_H + +// FW TEAM: timeouts are divided into 3 classes - values are in usec: +//#define TAVOR_IF_CMD_ETIME_CLASS_A 5000000 +//#define TAVOR_IF_CMD_ETIME_CLASS_B 10000000 +//#define TAVOR_IF_CMD_ETIME_CLASS_C 20000000 +//#define TAVOR_IF_CMD_ETIME_UNKNOWN_LAT 50000000 + +/* TK: FW cannot guarantee commands completion timeout due to starvation with door-bells + So we put the 15 minutes hoping that the DBs will stop and the + command will have time to complete */ +#define TAVOR_IF_CMD_ETIME_CLASS_A 300000000 +#define TAVOR_IF_CMD_ETIME_CLASS_B 300000000 +#define TAVOR_IF_CMD_ETIME_CLASS_C 300000000 + +/* macros to define the estimated time in microseconds to execute a command */ +#define TAVOR_IF_CMD_ETIME_SYS_EN TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_SYS_DIS TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_QUERY_DEV_LIM TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_QUERY_FW TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_ACCESS_DDR TAVOR_IF_CMD_ETIME_UNKNOWN_LAT +#define TAVOR_IF_CMD_ETIME_QUERY_DDR TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_QUERY_ADAPTER TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_INIT_HCA TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_CLOSE_HCA TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_INIT_IB TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_CLOSE_IB TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_QUERY_HCA TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_SET_IB TAVOR_IF_CMD_ETIME_CLASS_B + +#define TAVOR_IF_CMD_ETIME_SW2HW_MPT TAVOR_IF_CMD_ETIME_CLASS_B +#define TAVOR_IF_CMD_ETIME_QUERY_MPT TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_HW2SW_MPT TAVOR_IF_CMD_ETIME_CLASS_B +#define TAVOR_IF_CMD_ETIME_READ_MTT TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_WRITE_MTT TAVOR_IF_CMD_ETIME_CLASS_B +#define TAVOR_IF_CMD_ETIME_SYNC_TPT TAVOR_IF_CMD_ETIME_CLASS_B + +#define TAVOR_IF_CMD_ETIME_MAP_EQ TAVOR_IF_CMD_ETIME_CLASS_B +#define TAVOR_IF_CMD_ETIME_SW2HW_EQ TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_HW2SW_EQ TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_QUERY_EQ TAVOR_IF_CMD_ETIME_CLASS_A + +#define TAVOR_IF_CMD_ETIME_SW2HW_CQ TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_HW2SW_CQ TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_QUERY_CQ TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_RESIZE_CQ TAVOR_IF_CMD_ETIME_CLASS_A + +#define TAVOR_IF_CMD_ETIME_RST2INIT_QPEE TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_INIT2INIT_QPEE TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_INIT2RTR_QPEE TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_RTR2RTS_QPEE TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_RTS2RTS_QPEE TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_SQERR2RTS_QPEE TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_2ERR_QPEE TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_RTS2SQD TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_SQD2RTS_QPEE TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_ERR2RST_QPEE TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_QUERY_QPEE TAVOR_IF_CMD_ETIME_CLASS_C + +#define TAVOR_IF_CMD_ETIME_CONF_SPECIAL_QP TAVOR_IF_CMD_ETIME_CLASS_B +#define TAVOR_IF_CMD_ETIME_MAD_IFC TAVOR_IF_CMD_ETIME_CLASS_C + +#define TAVOR_IF_CMD_ETIME_READ_MGM TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_WRITE_MGM TAVOR_IF_CMD_ETIME_CLASS_A +#define TAVOR_IF_CMD_ETIME_MGID_HASH TAVOR_IF_CMD_ETIME_CLASS_A + +#define TAVOR_IF_CMD_ETIME_CONF_PM TAVOR_IF_CMD_ETIME_CLASS_C + +#define TAVOR_IF_CMD_ETIME_CONF_NTU TAVOR_IF_CMD_ETIME_CLASS_C +#define TAVOR_IF_CMD_ETIME_QUERY_NTU TAVOR_IF_CMD_ETIME_CLASS_C + +#define TAVOR_IF_CMD_ETIME_DIAG_RPRT TAVOR_IF_CMD_ETIME_UNKNOWN_LAT +#define TAVOR_IF_CMD_ETIME_QUERY_DEBUG_MSG TAVOR_IF_CMD_ETIME_UNKNOWN_LAT +#define TAVOR_IF_CMD_ETIME_SET_DEBUG_MSG TAVOR_IF_CMD_ETIME_UNKNOWN_LAT + + +//////////////////////// ADDED BY FW TEAM ///////////////////////// +#define TAVOR_IF_STRIDE_QPC_BIT 8 +#define TAVOR_IF_STRIDE_EEC_BIT 8 +#define TAVOR_IF_STRIDE_SRQC_BIT 5 +#define TAVOR_IF_STRIDE_CQC_BIT 6 +#define TAVOR_IF_STRIDE_EQC_BIT 6 +#define TAVOR_IF_STRIDE_MPT_BIT 6 +#define TAVOR_IF_STRIDE_MTT_BIT 3 +#define TAVOR_IF_STRIDE_EQPC_BIT 5 +#define TAVOR_IF_STRIDE_EEEC_BIT 5 +#define TAVOR_IF_STRIDE_APM_BIT 5 +#define TAVOR_IF_STRIDE_MCST_BIT 5 +#define TAVOR_IF_STRIDE_UARSCR_BIT 5 + +#define TAVOR_IF_STRIDE_QPC (1<=N=cq_param)*/ +} tavor_if_uar_cq_cmd_t; + +/* EQ-command doorbell command encoding */ +typedef enum tavor_if_uar_eq_cmd { + TAVOR_IF_UAR_EQ_INC_CI =1,/* Increment EQ's consumer index */ + TAVOR_IF_UAR_EQ_INT_ARM =2,/* Request interrupt for next event (next EQE posted)*/ + TAVOR_IF_UAR_EQ_DISARM_CQ =3,/* Disarm CQ request notification state machine (param= CQ num)*/ + TAVOR_IF_UAR_EQ_SET_CI =4,/* Set EQ's consumer index to value given in param. */ + TAVOR_IF_UAR_EQ_INT_ALWAYS_ARM =5 /* interrupts are generated for every EQE generated */ +} tavor_if_uar_eq_cmd_t; + +/* QP-state encoding */ +typedef enum tavor_if_qp_state { + TAVOR_IF_QP_STATE_RESET = 0, + TAVOR_IF_QP_STATE_INIT = 1, + TAVOR_IF_QP_STATE_RTR = 2, + TAVOR_IF_QP_STATE_RTS = 3, + TAVOR_IF_QP_STATE_SQER = 4, + TAVOR_IF_QP_STATE_SQD = 5, + TAVOR_IF_QP_STATE_ERR = 6, + TAVOR_IF_QP_STATE_DRAINING = 7, + TAVOR_IF_QP_STATE_BUSY = 8, + TAVOR_IF_QP_STATE_SUSPENDED = 9 +} tavor_if_qp_state_t; + +/* Old CQ state encoding (for STS,STC). + * Kept here for informational purposes. + * + TAVOR_IF_CQ_STATE_DISARMED = 0, + TAVOR_IF_CQ_STATE_ARMED = 1, + TAVOR_IF_CQ_STATE_FIRED = 2 + * + */ + +/* CQ state encoding */ +typedef enum tavor_if_cq_state { + TAVOR_IF_CQ_STATE_DISARMED = 0x0, + TAVOR_IF_CQ_STATE_ARMED = 0x1, + TAVOR_IF_CQ_STATE_ARMED_SOL = 0x4, + TAVOR_IF_CQ_STATE_FIRED = 0xA +} tavor_if_cq_state_t; + +/* EQ state encoding */ + +typedef enum tavor_if_eq_state { + TAVOR_IF_EQ_STATE_ARMED = 0x1, + TAVOR_IF_EQ_STATE_FIRED = 0x2, + TAVOR_IF_EQ_STATE_ALWAYS_ARMED = 0x3 +} tavor_if_eq_state_t; + +/* Miscellaneous Values: limits, tunable paramaters, etc. */ +enum +{ + TAVOR_IF_HOST_BIGENDIAN = 1, /* host is big endian*/ + TAVOR_NUM_RESERVED_PDS = 0, /* Obselete: will be moved to internal FW define once QUERY_DEV_LIM is implemented in full in driver */ + TAVOR_NUM_RESERVED_EQS = 0, /* Obselete: will be moved to internal FW define once QUERY_DEV_LIM is implemented in full in driver */ + TAVOR_NUM_RESERVED_RDDS = 0, /* Obselete: will be moved to internal FW define once QUERY_DEV_LIM is implemented in full in driver */ + TAVOR_IF_HOST_LTLENDIAN = 0, /* host is little endian*/ + + /* Limits on QP in UD mode: max message and MTU */ + TAVOR_LOG2_MAX_MTU = 11, + + TAVOR_IF_MAX_MPT_PAGE_SIZE = 31 /* (log2) Maximum page size for an MPT */ + /*sharon: 23.3.2003: changed from 32 at the req of tziporet */ +}; + +typedef enum tavor_sys_en_syn +{ + TAVOR_SYS_EN_SYN_OK = 0x0, /* No syndrome: When command succeeds */ + TAVOR_SYS_EN_SYN_SPD = 0x1, /* SPD error (e. g. checksum error, + no response, error while reading) */ + TAVOR_SYS_EN_SYN_DIMM = 0x2, /* DIMM out of bounds (e. g. DIMM rows + number is not between 7 and 14, + DIMM type is not 2) */ + TAVOR_SYS_EN_SYN_CONF = 0x3, /* DIMM conflict (e.g. mix of registered and + unbuffered DIMMs, CAS latency conflict) */ + TAVOR_SYS_EN_SYN_CALB = 0x4, /* Calibration error */ + TAVOR_SYS_EN_SYN_TSIZ = 0x5, /* Total size out of bounds: + E.g. total memory size exceeds the + maximum supported value. */ + TAVOR_SYS_EN_SYN_DCHK = 0x6 /*dimm check error occured*/ +} tavor_sys_en_syn_t; + +typedef enum tavor_diag_rprt +{ + TAVOR_DIAG_RPRT_QUERY_ERR = 0x2, /* Query transport and CI error counters */ + TAVOR_DIAG_RPRT_RESET_ERR = 0x3, /* Query and reset error counters */ + TAVOR_DIAG_RPRT_QUERY_PERF = 0x4, /* Query performance counters */ + TAVOR_DIAG_RPRT_RESET_PERF = 0x5, /* Query and reset performance counters */ + TAVOR_DIAG_RPRT_QUERY_MISC = 0x6, /* Query MISC counters */ + TAVOR_DIAG_RPRT_RESET_MISC = 0x7, /* Query and reset MISC counters */ +} tavor_diag_rprt_t; + +#define CMDIF_OUTPRM_ALIGN 16 /* alignment requirement for out params */ + +#endif /* H_TAVOR_IF_DEFS_H */ + diff --git a/branches/Ndi/hw/mt23108/vapi/user/Makefile b/branches/Ndi/hw/mt23108/vapi/user/Makefile new file mode 100644 index 00000000..9c985f57 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/user/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/Ndi/hw/mt23108/vapi/user/SOURCES b/branches/Ndi/hw/mt23108/vapi/user/SOURCES new file mode 100644 index 00000000..9fd8ec5d --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/user/SOURCES @@ -0,0 +1,63 @@ +TARGETNAME=vapi +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=LIBRARY + +SOURCES= \ + mtl_common_ul_sources.c \ + mosal_ul_sources.c \ + mpga_ul_sources.c \ + hh_ul_sources.c \ + thhul_ul_sources.c \ + vapi_common_ul_sources.c + +MT_HOME=.. +OS_DEP_HOME=$(MT_HOME)\mlxsys\os_dep\win\ +MOSAL_HOME=$(MT_HOME)\mlxsys\mosal\os_dep\win +THH_HOME=$(MT_HOME)\hca\hcahal\tavor + +INCLUDES= \ + ..\..\user; \ + $(MT_HOME)\mlxsys\tools; \ + $(MT_HOME)\tavor_arch_db; \ + $(MT_HOME)\Hca\verbs; \ + $(MT_HOME)\Hca\verbs\common; \ + $(MT_HOME)\mlxsys\mpga\os_dep\win; \ + $(MT_HOME)\mlxsys\mpga; \ + $(MT_HOME)\mlxsys\mtl_types; \ + $(MT_HOME)\mlxsys\mtl_types\win; \ + $(MT_HOME)\mlxsys\mtl_types\win\win; \ + $(MT_HOME)\mlxsys\mtl_common\os_dep\win; \ + $(MT_HOME)\mlxsys\mtl_common; \ + $(MT_HOME)\mlxsys\mosal; \ + $(MT_HOME)\mlxsys\mosal\os_dep\win; \ + $(MT_HOME)\Hca\hcahal; \ + $(THH_HOME); \ + $(THH_HOME)\os_dep\win; \ + $(THH_HOME)\thhul_hob; \ + $(THH_HOME)\thhul_pdm; \ + $(THH_HOME)\thhul_cqm; \ + $(THH_HOME)\thhul_qpm; \ + $(THH_HOME)\thhul_srqm; \ + $(THH_HOME)\thhul_mwm; \ + $(THH_HOME)\thh_hob; \ + $(THH_HOME)\thh_srqm; \ + $(THH_HOME)\thh_qpm; \ + $(THH_HOME)\thh_cqm; \ + $(THH_HOME)\util; \ + $(THH_HOME)\uar; \ + $(THH_HOME)\uldm; \ + $(THH_HOME)\udavm; \ + $(THH_HOME)\eventp; \ + $(THH_HOME)\mrwm; \ + $(THH_HOME)\mcgm; \ + $(THH_HOME)\ddrmm; \ + $(THH_HOME)\cmdif; \ + $(MT_HOME)\mlxsys\os_dep\win\tdriver; \ + ..\..\..\..\inc; \ + ..\..\..\..\inc\user; \ + ..\..\..\..\core\al; \ + .; + +C_DEFINES=$(C_DEFINES) -DUMT23108_EXPORTS -DIVAPI_THH -DMT_LITTLE_ENDIAN -D__WIN__ -D__MSC__ -D__LITTLE_ENDIAN -D__DLL_EXPORTS__ -Di386 -DCL_NO_TRACK_MEM + +MSC_WARNING_LEVEL= /W3 # /Wp64 /ZI /Gz /TP /Gm /EHsc /RTC1 /MTd diff --git a/branches/Ndi/hw/mt23108/vapi/user/hh_ul_sources.c b/branches/Ndi/hw/mt23108/vapi/user/hh_ul_sources.c new file mode 100644 index 00000000..f0582553 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/user/hh_ul_sources.c @@ -0,0 +1,6 @@ +#include "..\Hca\hcahal\hh.c" +#include "..\Hca\hcahal\hh.h" +#include "..\Hca\hcahal\hh_common.c" +#include "..\Hca\hcahal\hh_common.h" +#include "..\Hca\hcahal\hh_init.h" + diff --git a/branches/Ndi/hw/mt23108/vapi/user/mosal_ul_sources.c b/branches/Ndi/hw/mt23108/vapi/user/mosal_ul_sources.c new file mode 100644 index 00000000..654d8778 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/user/mosal_ul_sources.c @@ -0,0 +1,4 @@ +#include "..\mlxsys\mosal\os_dep\win\mosalu_driver.c" +#include "..\mlxsys\mosal\os_dep\win\mosalu_mem.c" +#include "..\mlxsys\mosal\os_dep\win\mosalu_sync.c" +#include "..\mlxsys\mosal\os_dep\win\mosalu_thread.c" diff --git a/branches/Ndi/hw/mt23108/vapi/user/mpga_ul_sources.c b/branches/Ndi/hw/mt23108/vapi/user/mpga_ul_sources.c new file mode 100644 index 00000000..7766fbe5 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/user/mpga_ul_sources.c @@ -0,0 +1,8 @@ +#include "..\mlxsys\mpga\mpga.c" +#include "..\mlxsys\mpga\packet_append.c" +#include "..\mlxsys\mpga\internal_functions.c" +#include "..\mlxsys\mpga\packet_utilities.c" +#include "..\mlxsys\mpga\nMPGA_packet_append.c" +#include "..\mlxsys\mpga\nMPGA.c" + + diff --git a/branches/Ndi/hw/mt23108/vapi/user/mtl_common_ul_sources.c b/branches/Ndi/hw/mt23108/vapi/user/mtl_common_ul_sources.c new file mode 100644 index 00000000..e0c7aaa8 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/user/mtl_common_ul_sources.c @@ -0,0 +1,2 @@ + +#include "..\mlxsys\mtl_common\mtl_common.c" diff --git a/branches/Ndi/hw/mt23108/vapi/user/thhul_ul_sources.c b/branches/Ndi/hw/mt23108/vapi/user/thhul_ul_sources.c new file mode 100644 index 00000000..a24c4aaf --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/user/thhul_ul_sources.c @@ -0,0 +1,11 @@ +#include "..\Hca\hcahal\tavor\thhul_cqm\thhul_cqm.c" +#include "..\Hca\hcahal\tavor\thhul_srqm\thhul_srqm.c" +#include "..\Hca\hcahal\tavor\thhul_hob\thhul_hob.c" +#include "..\Hca\hcahal\tavor\thhul_qpm\thhul_qpm.c" +#include "..\Hca\hcahal\tavor\thhul_pdm\thhul_pdm.c" +#include "..\Hca\hcahal\tavor\thhul_mwm\thhul_mwm.c" +#include "..\Hca\hcahal\tavor\uar\uar.c" +#include "..\Hca\hcahal\tavor\udavm\udavm.c" +#include "..\Hca\hcahal\tavor\util\epool.c" +#include "..\Hca\hcahal\tavor\util\tlog2.c" + diff --git a/branches/Ndi/hw/mt23108/vapi/user/vapi_common_ul_sources.c b/branches/Ndi/hw/mt23108/vapi/user/vapi_common_ul_sources.c new file mode 100644 index 00000000..56587508 --- /dev/null +++ b/branches/Ndi/hw/mt23108/vapi/user/vapi_common_ul_sources.c @@ -0,0 +1,4 @@ +#include "..\Hca\verbs\common\vapi_common.c" +#include "..\Hca\verbs\common\vip_array.c" +#include "..\Hca\verbs\common\vip_cirq.c" +#include "..\Hca\verbs\common\vip_hash.c" diff --git a/branches/Ndi/hw/mthca/dirs b/branches/Ndi/hw/mthca/dirs new file mode 100644 index 00000000..aa698135 --- /dev/null +++ b/branches/Ndi/hw/mthca/dirs @@ -0,0 +1,3 @@ +DIRS=\ + kernel \ + user diff --git a/branches/Ndi/hw/mthca/hca_utils.c b/branches/Ndi/hw/mthca/hca_utils.c new file mode 100644 index 00000000..a86b5ab5 --- /dev/null +++ b/branches/Ndi/hw/mthca/hca_utils.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "mthca_dev.h" + + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hca_data.tmh" +#endif + + +mthca_qp_access_t +map_qp_ibal_acl( + IN ib_access_t ibal_acl) +{ +#define IBAL_ACL(ifl,mfl) if (ibal_acl & ifl) mthca_acl |= mfl + mthca_qp_access_t mthca_acl = 0; + + IBAL_ACL(IB_AC_RDMA_READ,MTHCA_ACCESS_REMOTE_READ); + IBAL_ACL(IB_AC_RDMA_WRITE,MTHCA_ACCESS_REMOTE_WRITE); + IBAL_ACL(IB_AC_ATOMIC,MTHCA_ACCESS_REMOTE_ATOMIC); + IBAL_ACL(IB_AC_LOCAL_WRITE,MTHCA_ACCESS_LOCAL_WRITE); + IBAL_ACL(IB_AC_MW_BIND,MTHCA_ACCESS_MW_BIND); + + return mthca_acl; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_access_t +map_qp_mthca_acl( + IN mthca_qp_access_t mthca_acl) +{ +#define ACL_IBAL(mfl,ifl) if (mthca_acl & mfl) ibal_acl |= ifl + ib_access_t ibal_acl = 0; + + ACL_IBAL(MTHCA_ACCESS_REMOTE_READ,IB_AC_RDMA_READ); + ACL_IBAL(MTHCA_ACCESS_REMOTE_WRITE,IB_AC_RDMA_WRITE); + ACL_IBAL(MTHCA_ACCESS_REMOTE_ATOMIC,IB_AC_ATOMIC); + ACL_IBAL(MTHCA_ACCESS_LOCAL_WRITE,IB_AC_LOCAL_WRITE); + ACL_IBAL(MTHCA_ACCESS_MW_BIND,IB_AC_MW_BIND); + + return ibal_acl; +} + + diff --git a/branches/Ndi/hw/mthca/hca_utils.h b/branches/Ndi/hw/mthca/hca_utils.h new file mode 100644 index 00000000..9b8a5683 --- /dev/null +++ b/branches/Ndi/hw/mthca/hca_utils.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __HCA_UTILS_H__ +#define __HCA_UTILS_H__ + +#include +#include + +mthca_qp_access_t +map_qp_ibal_acl( + IN ib_access_t ibal_acl) +; + +ib_access_t +map_qp_mthca_acl( + IN mthca_qp_access_t mthca_acl); + +#endif + diff --git a/branches/Ndi/hw/mthca/kernel/Makefile b/branches/Ndi/hw/mthca/kernel/Makefile new file mode 100644 index 00000000..1c8f2940 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/Makefile @@ -0,0 +1,6 @@ +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE ..\..\..\inc\openib.def# diff --git a/branches/Ndi/hw/mthca/kernel/SOURCES b/branches/Ndi/hw/mthca/kernel/SOURCES new file mode 100644 index 00000000..bff6b391 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/SOURCES @@ -0,0 +1,86 @@ +TRUNK=..\..\.. + +TARGETNAME=mthca +TARGETPATH=$(TRUNK)\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=DRIVER + +!if $(FREEBUILD) +ENABLE_EVENT_TRACING=1 +!else +#ENABLE_EVENT_TRACING=1 +!endif + +SOURCES= \ + mthca_log.mc \ + mthca_log.rc \ + hca.rc \ + mthca_log.c \ + \ + ..\hca_utils.c \ + ..\mt_utils.c \ + \ + hca_data.c \ + hca_direct.c \ + hca_driver.c \ + hca_mcast.c \ + hca_memory.c \ + hca_pci.c \ + hca_pnp.c \ + hca_verbs.c \ + \ + mt_cache.c \ + mt_device.c \ + mt_l2w.c \ + mt_memory.c \ + mt_packer.c \ + mt_reset_tavor.c \ + mt_ud_header.c \ + mt_uverbs.c \ + mt_verbs.c \ + mt_pa_cash.c \ + \ + mthca_allocator.c \ + mthca_av.c \ + mthca_catas.c \ + mthca_cmd.c \ + mthca_cq.c \ + mthca_eq.c \ + mthca_mad.c \ + mthca_main.c \ + mthca_mcg.c \ + mthca_memfree.c \ + mthca_mr.c \ + mthca_pd.c \ + mthca_profile.c \ + mthca_provider.c \ + mthca_qp.c \ + mthca_srq.c \ + mthca_uar.c \ + + +INCLUDES=\ + ..; \ + $(TRUNK)\inc; \ + $(TRUNK)\inc\kernel; + +C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS -D__LITTLE_ENDIAN + +TARGETLIBS= \ + $(TARGETPATH)\*\complib.lib \ + $(DDK_LIB_PATH)\wdmguid.lib + + +#LINKER_FLAGS=/MAP + +!IFDEF ENABLE_EVENT_TRACING + +C_DEFINES = $(C_DEFINES) -DEVENT_TRACING +RUN_WPP= $(SOURCES) -km -ext: .c .h .C .H \ + -scan:hca_debug.h \ + -func:HCA_PRINT(LEVEL,FLAGS,(MSG,...)) +!ENDIF + +# -func:HCA_PRINT_EV(LEVEL,FLAGS,(MSG,...)) \ + +MSC_OPTIMIZATION=/Oi +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/hw/mthca/kernel/hca.rc b/branches/Ndi/hw/mthca/kernel/hca.rc new file mode 100644 index 00000000..345f4397 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca.rc @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN +#ifdef DBG +#define VER_FILEDESCRIPTION_STR "HCA Driver (checked)" +#else +#define VER_FILEDESCRIPTION_STR "HCA Driver" +#endif +#define VER_INTERNALNAME_STR "mthca.sys" +#define VER_ORIGINALFILENAME_STR "mthca.sys" +#include diff --git a/branches/Ndi/hw/mthca/kernel/hca_data.c b/branches/Ndi/hw/mthca/kernel/hca_data.c new file mode 100644 index 00000000..5805306e --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_data.c @@ -0,0 +1,907 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "hca_driver.h" +#include "hca_utils.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hca_data.tmh" +#endif + +#include "mthca_dev.h" +#include + +static cl_spinlock_t hob_lock; + + + +uint32_t g_mlnx_dpc2thread = 0; + + +cl_qlist_t mlnx_hca_list; + +mlnx_hob_t mlnx_hob_array[MLNX_NUM_HOBKL]; // kernel HOB - one per HCA (cmdif access) +mlnx_hobul_t *mlnx_hobul_array[MLNX_NUM_HOBUL]; // kernel HOBUL - one per HCA (kar access) + +///////////////////////////////////////////////////////// +// ### HCA +///////////////////////////////////////////////////////// +void +mlnx_hca_insert( + IN mlnx_hca_t *p_hca ) +{ + cl_spinlock_acquire( &hob_lock ); + cl_qlist_insert_tail( &mlnx_hca_list, &p_hca->list_item ); + cl_spinlock_release( &hob_lock ); +} + +void +mlnx_hca_remove( + IN mlnx_hca_t *p_hca ) +{ + cl_spinlock_acquire( &hob_lock ); + cl_qlist_remove_item( &mlnx_hca_list, &p_hca->list_item ); + cl_spinlock_release( &hob_lock ); +} + +mlnx_hca_t* +mlnx_hca_from_guid( + IN ib_net64_t guid ) +{ + cl_list_item_t *p_item; + mlnx_hca_t *p_hca = NULL; + + cl_spinlock_acquire( &hob_lock ); + p_item = cl_qlist_head( &mlnx_hca_list ); + while( p_item != cl_qlist_end( &mlnx_hca_list ) ) + { + p_hca = PARENT_STRUCT( p_item, mlnx_hca_t, list_item ); + if( p_hca->guid == guid ) + break; + p_item = cl_qlist_next( p_item ); + p_hca = NULL; + } + cl_spinlock_release( &hob_lock ); + return p_hca; +} + +/* +void +mlnx_names_from_guid( + IN ib_net64_t guid, + OUT char **hca_name_p, + OUT char **dev_name_p) +{ + unsigned int idx; + + if (!hca_name_p) return; + if (!dev_name_p) return; + + for (idx = 0; idx < mlnx_num_hca; idx++) + { + if (mlnx_hca_array[idx].ifx.guid == guid) + { + *hca_name_p = mlnx_hca_array[idx].hca_name_p; + *dev_name_p = mlnx_hca_array[idx].dev_name_p; + } + } +} +*/ + +///////////////////////////////////////////////////////// +// ### HCA +///////////////////////////////////////////////////////// +cl_status_t +mlnx_hcas_init( void ) +{ + cl_qlist_init( &mlnx_hca_list ); + return cl_spinlock_init( &hob_lock ); +} + + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +ib_api_status_t +mlnx_hobs_set_cb( + IN mlnx_hob_t *hob_p, + IN ci_completion_cb_t comp_cb_p, + IN ci_async_event_cb_t async_cb_p, + IN const void* const ib_context) +{ + cl_status_t cl_status; + + // Setup the callbacks + if (!hob_p->async_proc_mgr_p) + { + hob_p->async_proc_mgr_p = cl_malloc( sizeof( cl_async_proc_t ) ); + if( !hob_p->async_proc_mgr_p ) + { + return IB_INSUFFICIENT_MEMORY; + } + cl_async_proc_construct( hob_p->async_proc_mgr_p ); + cl_status = cl_async_proc_init( hob_p->async_proc_mgr_p, MLNX_NUM_CB_THR, "CBthread" ); + if( cl_status != CL_SUCCESS ) + { + cl_async_proc_destroy( hob_p->async_proc_mgr_p ); + cl_free(hob_p->async_proc_mgr_p); + hob_p->async_proc_mgr_p = NULL; + return IB_INSUFFICIENT_RESOURCES; + } + } + + hob_p->comp_cb_p = comp_cb_p; + hob_p->async_cb_p = async_cb_p; + hob_p->ca_context = ib_context; // This is the context our CB forwards to IBAL + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hca_idx %d context 0x%p\n", (int)(hob_p - mlnx_hob_array), ib_context)); + return IB_SUCCESS; +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mlnx_hobs_remove( + IN mlnx_hob_t *hob_p) +{ + cl_async_proc_t *p_async_proc; + + + cl_spinlock_acquire( &hob_lock ); + + hob_p->mark = E_MARK_INVALID; + + p_async_proc = hob_p->async_proc_mgr_p; + hob_p->async_proc_mgr_p = NULL; + + hob_p->comp_cb_p = NULL; + hob_p->async_cb_p = NULL; + hob_p->ca_context = NULL; + hob_p->cl_device_h = NULL; + + cl_spinlock_release( &hob_lock ); + + if( p_async_proc ) + { + cl_async_proc_destroy( p_async_proc ); + cl_free( p_async_proc ); + } + + + + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM,("CL: hobs_remove idx %d \n", (int)(hob_p - mlnx_hob_array))); +} + +///////////////////////////////////////////////////////// +///////////////////////////////////////////////////////// +void +mthca_port_cap_to_ibal( + IN u32 mthca_port_cap, + OUT ib_port_cap_t *ibal_port_cap_p) +{ +#define SET_CAP(flag,cap) if (mthca_port_cap & flag) ibal_port_cap_p->cap = TRUE + + SET_CAP(IB_PORT_CM_SUP,cm); + SET_CAP(IB_PORT_SNMP_TUNNEL_SUP,snmp); + SET_CAP(IB_PORT_DEVICE_MGMT_SUP,dev_mgmt); + SET_CAP(IB_PORT_VENDOR_CLASS_SUP,vend); + SET_CAP(IB_PORT_SM_DISABLED,sm_disable); + SET_CAP(IB_PORT_SM,sm); + SET_CAP(IB_PORT_NOTICE_SUP,notice); + SET_CAP(IB_PORT_TRAP_SUP,trap); + SET_CAP(IB_PORT_AUTO_MIGR_SUP,apm); + SET_CAP(IB_PORT_SL_MAP_SUP,slmap); + SET_CAP(IB_PORT_LED_INFO_SUP,ledinfo); + SET_CAP(IB_PORT_CAP_MASK_NOTICE_SUP,capm_notice); + SET_CAP(IB_PORT_CLIENT_REG_SUP,client_reregister); + SET_CAP(IB_PORT_SYS_IMAGE_GUID_SUP,sysguid); + SET_CAP(IB_PORT_BOOT_MGMT_SUP,boot_mgmt); + SET_CAP(IB_PORT_DR_NOTICE_SUP,dr_notice); + SET_CAP(IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP,pkey_switch_ext_port); + SET_CAP(IB_PORT_LINK_LATENCY_SUP,link_rtl); + SET_CAP(IB_PORT_REINIT_SUP,reinit); + SET_CAP(IB_PORT_OPT_IPD_SUP,ipd); + SET_CAP(IB_PORT_MKEY_NVRAM,mkey_nvram); + SET_CAP(IB_PORT_PKEY_NVRAM,pkey_nvram); + // there no MTHCA flags for qkey_ctr, pkey_ctr, port_active, bm IBAL capabilities; +} + + +///////////////////////////////////////////////////////// +void +mlnx_conv_hca_cap( + IN struct ib_device *ib_dev, + IN struct ib_device_attr *hca_info_p, + IN struct ib_port_attr *hca_ports, + OUT ib_ca_attr_t *ca_attr_p) +{ + uint8_t port_num; + ib_port_attr_t *ibal_port_p; + struct ib_port_attr *mthca_port_p; + + ca_attr_p->vend_id = hca_info_p->vendor_id; + ca_attr_p->dev_id = (uint16_t)hca_info_p->vendor_part_id; + ca_attr_p->revision = (uint16_t)hca_info_p->hw_ver; + ca_attr_p->fw_ver = hca_info_p->fw_ver; + ca_attr_p->ca_guid = *(UNALIGNED64 uint64_t *)&ib_dev->node_guid; + ca_attr_p->num_ports = ib_dev->phys_port_cnt; + ca_attr_p->max_qps = hca_info_p->max_qp; + ca_attr_p->max_wrs = hca_info_p->max_qp_wr; + ca_attr_p->max_sges = hca_info_p->max_sge; + ca_attr_p->max_rd_sges = hca_info_p->max_sge_rd; + ca_attr_p->max_cqs = hca_info_p->max_cq; + ca_attr_p->max_cqes = hca_info_p->max_cqe; + ca_attr_p->max_pds = hca_info_p->max_pd; + ca_attr_p->init_regions = hca_info_p->max_mr; + ca_attr_p->init_windows = hca_info_p->max_mw; + ca_attr_p->init_region_size = hca_info_p->max_mr_size; + ca_attr_p->max_addr_handles = hca_info_p->max_ah; + ca_attr_p->atomicity = hca_info_p->atomic_cap; + ca_attr_p->max_partitions = hca_info_p->max_pkeys; + ca_attr_p->max_qp_resp_res =(uint8_t) hca_info_p->max_qp_rd_atom; + ca_attr_p->max_resp_res = (uint8_t)hca_info_p->max_res_rd_atom; + ca_attr_p->max_qp_init_depth = (uint8_t)hca_info_p->max_qp_init_rd_atom; + ca_attr_p->max_ipv6_qps = hca_info_p->max_raw_ipv6_qp; + ca_attr_p->max_ether_qps = hca_info_p->max_raw_ethy_qp; + ca_attr_p->max_mcast_grps = hca_info_p->max_mcast_grp; + ca_attr_p->max_mcast_qps = hca_info_p->max_total_mcast_qp_attach; + ca_attr_p->max_qps_per_mcast_grp = hca_info_p->max_mcast_qp_attach; + ca_attr_p->max_fmr = hca_info_p->max_fmr; + ca_attr_p->max_map_per_fmr = hca_info_p->max_map_per_fmr; + ca_attr_p->max_srq = hca_info_p->max_srq; + ca_attr_p->max_srq_wrs = hca_info_p->max_srq_wr; + ca_attr_p->max_srq_sges = hca_info_p->max_srq_sge; + + ca_attr_p->local_ack_delay = hca_info_p->local_ca_ack_delay; + ca_attr_p->bad_pkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_PKEY_CNTR; + ca_attr_p->bad_qkey_ctr_support = hca_info_p->device_cap_flags & IB_DEVICE_BAD_QKEY_CNTR; + ca_attr_p->raw_mcast_support = hca_info_p->device_cap_flags & IB_DEVICE_RAW_MULTI; + ca_attr_p->apm_support = hca_info_p->device_cap_flags & IB_DEVICE_AUTO_PATH_MIG; + ca_attr_p->av_port_check = hca_info_p->device_cap_flags & IB_DEVICE_UD_AV_PORT_ENFORCE; + ca_attr_p->change_primary_port = hca_info_p->device_cap_flags & IB_DEVICE_CHANGE_PHY_PORT; + ca_attr_p->modify_wr_depth = hca_info_p->device_cap_flags & IB_DEVICE_RESIZE_MAX_WR; + ca_attr_p->modify_srq_depth = hca_info_p->device_cap_flags & IB_DEVICE_SRQ_RESIZE; + ca_attr_p->hw_agents = FALSE; // in the context of IBAL then agent is implemented on the host + + ca_attr_p->num_page_sizes = 1; + ca_attr_p->p_page_size[0] = PAGE_SIZE; // TBD: extract an array of page sizes from HCA cap + + for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) + { + // Setup port pointers + ibal_port_p = &ca_attr_p->p_port_attr[port_num]; + mthca_port_p = &hca_ports[port_num]; + + // Port Cabapilities + cl_memclr(&ibal_port_p->cap, sizeof(ib_port_cap_t)); + mthca_port_cap_to_ibal(mthca_port_p->port_cap_flags, &ibal_port_p->cap); + + // Port Atributes + ibal_port_p->port_num = port_num + start_port(ib_dev); + ibal_port_p->port_guid = ibal_port_p->p_gid_table[0].unicast.interface_id; + ibal_port_p->lid = cl_ntoh16(mthca_port_p->lid); + ibal_port_p->lmc = mthca_port_p->lmc; + ibal_port_p->max_vls = mthca_port_p->max_vl_num; + ibal_port_p->sm_lid = cl_ntoh16(mthca_port_p->sm_lid); + ibal_port_p->sm_sl = mthca_port_p->sm_sl; + ibal_port_p->link_state = (mthca_port_p->state != 0) ? (uint8_t)mthca_port_p->state : IB_LINK_DOWN; + ibal_port_p->num_gids = (uint16_t)mthca_port_p->gid_tbl_len; + ibal_port_p->num_pkeys = mthca_port_p->pkey_tbl_len; + ibal_port_p->pkey_ctr = (uint16_t)mthca_port_p->bad_pkey_cntr; + ibal_port_p->qkey_ctr = (uint16_t)mthca_port_p->qkey_viol_cntr; + ibal_port_p->max_msg_size = mthca_port_p->max_msg_sz; + ibal_port_p->mtu = (uint8_t)mthca_port_p->max_mtu; + + ibal_port_p->subnet_timeout = mthca_port_p->subnet_timeout; + // ibal_port_p->local_ack_timeout = 3; // TBD: currently ~32 usec + HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM ,("Port %d port_guid 0x%I64x\n", + ibal_port_p->port_num, cl_ntoh64(ibal_port_p->port_guid))); + } +} + +void cq_comp_handler(struct ib_cq *cq, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + struct mthca_cq *mcq =(struct mthca_cq *)cq; + HCA_ENTER(HCA_DBG_CQ); + if (hob_p && hob_p->comp_cb_p) { + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Invoking completion callback\n")); + (hob_p->comp_cb_p)(mcq->cq_context); + } + else { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("Incorrect context. Completion callback was not invoked\n")); + } + HCA_EXIT(HCA_DBG_CQ); +} + +void ca_event_handler(struct ib_event *ev, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + ib_event_rec_t event_rec; + + // prepare parameters + event_rec.context = (void *)hob_p->ca_context; + event_rec.trap.info.port_num = ev->element.port_num; + event_rec.type = ev->event; + if (event_rec.type > IB_AE_UNKNOWN) { + // CL_ASSERT(0); // This shouldn't happen + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,("Unmapped E_EV_CA event of type 0x%x. Replaced by 0x%x (IB_AE_LOCAL_FATAL)\n", + event_rec.type, IB_AE_LOCAL_FATAL)); + event_rec.type = IB_AE_LOCAL_FATAL; + } + + // call the user callback + if (hob_p && hob_p->async_cb_p) + (hob_p->async_cb_p)(&event_rec); + else { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n")); + } +} + +void srq_event_handler(struct ib_event *ev, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + ib_event_rec_t event_rec; + struct mthca_srq *srq_p; + + // prepare parameters + event_rec.type = ev->event; + event_rec.vendor_specific = ev->vendor_specific; + srq_p = (struct mthca_srq *)ev->element.srq; + event_rec.context = srq_p->srq_context; + + // call the user callback + if (hob_p) + (hob_p->async_cb_p)(&event_rec); + else { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n")); + } +} + + +void qp_event_handler(struct ib_event *ev, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + ib_event_rec_t event_rec; + struct mthca_qp *qp_p; + + // prepare parameters + event_rec.type = ev->event; + event_rec.vendor_specific = ev->vendor_specific; + qp_p = (struct mthca_qp *)ev->element.qp; + event_rec.context = qp_p->qp_context; + + // call the user callback + if (hob_p) + (hob_p->async_cb_p)(&event_rec); + else { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n")); + } +} + +void cq_event_handler(struct ib_event *ev, void *context) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)context; + ib_event_rec_t event_rec; + struct mthca_cq *cq_p; + + // prepare parameters + event_rec.type = ev->event; + cq_p = (struct mthca_cq *)ev->element.cq; + event_rec.context = cq_p->cq_context; + + // call the user callback + if (hob_p) + (hob_p->async_cb_p)(&event_rec); + else { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Incorrect context. Async callback was not invoked\n")); + } +} + +ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps) +{ +#define MAP_QPS(val1,val2) case val1: ib_qps = val2; break + ib_qp_state_t ib_qps; + switch (qps) { + MAP_QPS( IBQPS_RESET, IB_QPS_RESET ); + MAP_QPS( IBQPS_INIT, IB_QPS_INIT ); + MAP_QPS( IBQPS_RTR, IB_QPS_RTR ); + MAP_QPS( IBQPS_RTS, IB_QPS_RTS ); + MAP_QPS( IBQPS_SQD, IB_QPS_SQD ); + MAP_QPS( IBQPS_SQE, IB_QPS_SQERR ); + MAP_QPS( IBQPS_ERR, IB_QPS_ERROR ); + default: + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped MTHCA qp_state %d\n", qps)); + ib_qps = 0xffffffff; + } + return ib_qps; +} + +enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps) +{ +#define MAP_IBQPS(val1,val2) case val1: qps = val2; break + enum ib_qp_state qps; + switch (ib_qps) { + MAP_IBQPS( IB_QPS_RESET, IBQPS_RESET ); + MAP_IBQPS( IB_QPS_INIT, IBQPS_INIT ); + MAP_IBQPS( IB_QPS_RTR, IBQPS_RTR ); + MAP_IBQPS( IB_QPS_RTS, IBQPS_RTS ); + MAP_IBQPS( IB_QPS_SQD, IBQPS_SQD ); + MAP_IBQPS( IB_QPS_SQD_DRAINING, IBQPS_SQD ); + MAP_IBQPS( IB_QPS_SQD_DRAINED, IBQPS_SQD ); + MAP_IBQPS( IB_QPS_SQERR, IBQPS_SQE ); + MAP_IBQPS( IB_QPS_ERROR, IBQPS_ERR ); + default: + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped IBAL qp_state %d\n", ib_qps)); + qps = 0xffffffff; + } + return qps; +} + +ib_api_status_t +mlnx_conv_qp_modify_attr( + IN const struct ib_qp *ib_qp_p, + IN ib_qp_type_t qp_type, + IN const ib_qp_mod_t *modify_attr_p, + OUT struct ib_qp_attr *qp_attr_p, + OUT int *qp_attr_mask_p + ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p; + + RtlZeroMemory( qp_attr_p, sizeof *qp_attr_p ); + *qp_attr_mask_p = IB_QP_STATE; + qp_attr_p->qp_state = mlnx_qps_from_ibal( modify_attr_p->req_state ); + + // skipped cases + if (qp_p->state == IBQPS_RESET && modify_attr_p->req_state != IB_QPS_INIT) + return IB_NOT_DONE; + + switch (modify_attr_p->req_state) { + case IB_QPS_RESET: + case IB_QPS_ERROR: + case IB_QPS_SQERR: + case IB_QPS_TIME_WAIT: + break; + + case IB_QPS_INIT: + + switch (qp_type) { + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + *qp_attr_mask_p |= IB_QP_PORT | IB_QP_PKEY_INDEX |IB_QP_ACCESS_FLAGS; + qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.init.access_ctrl); + break; + case IB_QPT_UNRELIABLE_DGRM: + case IB_QPT_QP0: + case IB_QPT_QP1: + default: + *qp_attr_mask_p |= IB_QP_PORT | IB_QP_QKEY | IB_QP_PKEY_INDEX ; + qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.init.qkey); + break; + } + + // IB_QP_PORT + qp_attr_p->port_num = modify_attr_p->state.init.primary_port; + + // IB_QP_PKEY_INDEX + qp_attr_p->pkey_index = modify_attr_p->state.init.pkey_index; + + break; + + case IB_QPS_RTR: + /* modifying the WQE depth is not supported */ + if( modify_attr_p->state.rtr.opts & IB_MOD_QP_SQ_DEPTH || + modify_attr_p->state.rtr.opts & IB_MOD_QP_RQ_DEPTH ) { + status = IB_UNSUPPORTED; + break; + } + + switch (qp_type) { + case IB_QPT_RELIABLE_CONN: + *qp_attr_mask_p |= /* required flags */ + IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | + IB_QP_AV |IB_QP_PATH_MTU | IB_QP_MIN_RNR_TIMER; + + // IB_QP_DEST_QPN + qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp); + + // IB_QP_RQ_PSN + qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn); + + // IB_QP_MAX_DEST_RD_ATOMIC + qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rtr.resp_res; + + // IB_QP_AV, IB_QP_PATH_MTU: Convert primary RC AV (mandatory) + err = mlnx_conv_ibal_av(ib_qp_p->device, + &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr); + if (err) { + status = IB_ERROR; + break; + } + qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu; // MTU + qp_attr_p->timeout = modify_attr_p->state.rtr.primary_av.conn.local_ack_timeout; // MTU + qp_attr_p->retry_cnt = modify_attr_p->state.rtr.primary_av.conn.seq_err_retry_cnt; // MTU + qp_attr_p->rnr_retry = modify_attr_p->state.rtr.primary_av.conn.rnr_retry_cnt; // MTU + + // IB_QP_MIN_RNR_TIMER, required in RTR, optional in RTS. + qp_attr_p->min_rnr_timer = modify_attr_p->state.rtr.rnr_nak_timeout; + + // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) { + *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flag */ + qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rtr.access_ctrl); + } + + // IB_QP_ALT_PATH: Convert alternate RC AV + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) { + *qp_attr_mask_p |= IB_QP_ALT_PATH; /* required flag */ + err = mlnx_conv_ibal_av(ib_qp_p->device, + &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr); + if (err) { + status = IB_ERROR; + break; + } + qp_attr_p->alt_timeout = modify_attr_p->state.rtr.alternate_av.conn.local_ack_timeout; // XXX: conv + } + + // IB_QP_PKEY_INDEX + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) { + *qp_attr_mask_p |= IB_QP_PKEY_INDEX; + qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index; + } + break; + + case IB_QPT_UNRELIABLE_CONN: + *qp_attr_mask_p |= /* required flags */ + IB_QP_DEST_QPN |IB_QP_RQ_PSN | IB_QP_AV | IB_QP_PATH_MTU; + + // IB_QP_DEST_QPN + qp_attr_p->dest_qp_num = cl_ntoh32 (modify_attr_p->state.rtr.dest_qp); + + // IB_QP_RQ_PSN + qp_attr_p->rq_psn = cl_ntoh32 (modify_attr_p->state.rtr.rq_psn); + + // IB_QP_PATH_MTU + qp_attr_p->path_mtu = modify_attr_p->state.rtr.primary_av.conn.path_mtu; + + // IB_QP_AV: Convert primary AV (mandatory) + err = mlnx_conv_ibal_av(ib_qp_p->device, + &modify_attr_p->state.rtr.primary_av, &qp_attr_p->ah_attr); + if (err) { + status = IB_ERROR; + break; + } + + // IB_QP_ACCESS_FLAGS: Convert Remote Atomic Flags + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ACCESS_CTRL) { + *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flag */ + qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rtr.access_ctrl); + } + + // IB_QP_ALT_PATH: Convert alternate RC AV + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_ALTERNATE_AV) { + *qp_attr_mask_p |= IB_QP_ALT_PATH; /* required flag */ + err = mlnx_conv_ibal_av(ib_qp_p->device, + &modify_attr_p->state.rtr.alternate_av, &qp_attr_p->alt_ah_attr); + if (err) { + status = IB_ERROR; + break; + } + } + + // IB_QP_PKEY_INDEX + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) { + *qp_attr_mask_p |= IB_QP_PKEY_INDEX; + qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index; + } + break; + + case IB_QPT_UNRELIABLE_DGRM: + case IB_QPT_QP0: + case IB_QPT_QP1: + default: + // IB_QP_PKEY_INDEX + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_PKEY) { + *qp_attr_mask_p |= IB_QP_PKEY_INDEX; + qp_attr_p->pkey_index = modify_attr_p->state.rtr.pkey_index; + } + + // IB_QP_QKEY + if (modify_attr_p->state.rtr.opts & IB_MOD_QP_QKEY) { + *qp_attr_mask_p |= IB_QP_QKEY; + qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.rtr.qkey); + } + break; + + } + break; + + case IB_QPS_RTS: + /* modifying the WQE depth is not supported */ + if( modify_attr_p->state.rts.opts & IB_MOD_QP_SQ_DEPTH || + modify_attr_p->state.rts.opts & IB_MOD_QP_RQ_DEPTH ) + { + status = IB_UNSUPPORTED; + break; + } + + switch (qp_type) { + case IB_QPT_RELIABLE_CONN: + if (qp_p->state != IBQPS_RTS) + *qp_attr_mask_p |= /* required flags */ + IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC | IB_QP_TIMEOUT | + IB_QP_RETRY_CNT |IB_QP_RNR_RETRY; + + // IB_QP_MAX_QP_RD_ATOMIC + qp_attr_p->max_rd_atomic = modify_attr_p->state.rts.init_depth; + + // IB_QP_TIMEOUT + qp_attr_p->timeout = modify_attr_p->state.rts.local_ack_timeout; // XXX: conv + + // IB_QP_RETRY_CNT + qp_attr_p->retry_cnt = modify_attr_p->state.rts.retry_cnt; + + // IB_QP_RNR_RETRY + qp_attr_p->rnr_retry = modify_attr_p->state.rts.rnr_retry_cnt; + + // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS) + if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) { + *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC; + qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res; + } + +#ifdef WIN_TO_BE_REMOVED + //TODO: do we need that ? + // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition. + + // IB_QP_PKEY_INDEX + if (modify_attr_p->state.rts.opts & IB_MOD_QP_PKEY) { + *qp_attr_mask_p |= IB_QP_PKEY_INDEX; + qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index; + } +#endif + + // IB_QP_MIN_RNR_TIMER + if (modify_attr_p->state.rts.opts & IB_MOD_QP_RNR_NAK_TIMEOUT) { + *qp_attr_mask_p |= IB_QP_MIN_RNR_TIMER; + qp_attr_p->min_rnr_timer = modify_attr_p->state.rts.rnr_nak_timeout; + } + + // IB_QP_PATH_MIG_STATE + if (modify_attr_p->state.rts.opts & IB_MOD_QP_APM_STATE) { + *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE; + qp_attr_p->path_mig_state = modify_attr_p->state.rts.apm_state; + } + + // IB_QP_ACCESS_FLAGS + if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) { + *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flags */ + qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rts.access_ctrl); + } + + // IB_QP_ALT_PATH: Convert alternate RC AV + if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) { + *qp_attr_mask_p |= IB_QP_ALT_PATH; /* optional flag */ + err = mlnx_conv_ibal_av(ib_qp_p->device, + &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr); + if (err) { + status = IB_ERROR; + break; + } + qp_attr_p->alt_timeout = modify_attr_p->state.rts.alternate_av.conn.local_ack_timeout; // XXX: conv + } + break; + + case IB_QPT_UNRELIABLE_CONN: + if (qp_p->state != IBQPS_RTS) + *qp_attr_mask_p |= /* required flags */ + IB_QP_SQ_PSN; + + // IB_QP_MAX_DEST_RD_ATOMIC: Update the responder resources for RDMA/ATOMIC (optional for SQD->RTS) + if (modify_attr_p->state.rts.opts & IB_MOD_QP_RESP_RES) { + *qp_attr_mask_p |= IB_QP_MAX_DEST_RD_ATOMIC; + qp_attr_p->max_dest_rd_atomic = modify_attr_p->state.rts.resp_res; + } + +#ifdef WIN_TO_BE_REMOVED + //TODO: do we need that ? + // Linux patch 4793: PKEY_INDEX is not a legal parameter in the RTR->RTS transition. + + // IB_QP_PKEY_INDEX + if (modify_attr_p->state.rts.opts & IB_MOD_QP_PKEY) { + *qp_attr_mask_p |= IB_QP_PKEY_INDEX; + qp_attr_p->pkey_index = modify_attr_p->state.rts.pkey_index; + } +#endif + + // IB_QP_PATH_MIG_STATE + if (modify_attr_p->state.rts.opts & IB_MOD_QP_APM_STATE) { + *qp_attr_mask_p |= IB_QP_PATH_MIG_STATE; + qp_attr_p->path_mig_state = modify_attr_p->state.rts.apm_state; + } + + // IB_QP_ACCESS_FLAGS + if (modify_attr_p->state.rts.opts & IB_MOD_QP_ACCESS_CTRL) { + *qp_attr_mask_p |= IB_QP_ACCESS_FLAGS; /* optional flags */ + qp_attr_p->qp_access_flags = map_qp_ibal_acl(modify_attr_p->state.rts.access_ctrl); + } + + // IB_QP_ALT_PATH: Convert alternate RC AV + if (modify_attr_p->state.rts.opts & IB_MOD_QP_ALTERNATE_AV) { + *qp_attr_mask_p |= IB_QP_ALT_PATH; /* optional flag */ + err = mlnx_conv_ibal_av(ib_qp_p->device, + &modify_attr_p->state.rts.alternate_av, &qp_attr_p->alt_ah_attr); + if (err) { + status = IB_ERROR; + break; + } + } + break; + + case IB_QPT_UNRELIABLE_DGRM: + case IB_QPT_QP0: + case IB_QPT_QP1: + default: + if (qp_p->state != IBQPS_RTS) + *qp_attr_mask_p |= /* required flags */ + IB_QP_SQ_PSN; + + // IB_QP_QKEY + if (modify_attr_p->state.rts.opts & IB_MOD_QP_QKEY) { + *qp_attr_mask_p |= IB_QP_QKEY; + qp_attr_p->qkey = cl_ntoh32 (modify_attr_p->state.rts.qkey); + } + break; + + break; + + } + + // IB_QP_SQ_PSN: common for all + qp_attr_p->sq_psn = cl_ntoh32 (modify_attr_p->state.rts.sq_psn); + //NB: IB_QP_CUR_STATE flag is not provisioned by IBAL + break; + + case IB_QPS_SQD: + case IB_QPS_SQD_DRAINING: + case IB_QPS_SQD_DRAINED: + *qp_attr_mask_p |= IB_QP_EN_SQD_ASYNC_NOTIFY; + qp_attr_p->en_sqd_async_notify = (u8)modify_attr_p->state.sqd.sqd_event; + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SHIM ,("IB_QP_EN_SQD_ASYNC_NOTIFY seems like unsupported\n")); + break; + + default: + //NB: is this an error case and we need this message ? What about returning an error ? + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Unmapped qp_state %d\n", modify_attr_p->req_state)); + break; + + } + + return status; +} + +int +mlnx_conv_ibal_av( + IN const struct ib_device *ib_dev_p, + IN const ib_av_attr_t *ibal_av_p, + OUT struct ib_ah_attr *ah_attr_p) +{ + int err = 0; + u8 port_num; + u16 gid_index; + + ah_attr_p->port_num = ibal_av_p->port_num; + ah_attr_p->sl = ibal_av_p->sl; + ah_attr_p->dlid = cl_ntoh16(ibal_av_p->dlid); + //TODO: how static_rate is coded ? + ah_attr_p->static_rate = + (ibal_av_p->static_rate == IB_PATH_RECORD_RATE_10_GBS ? 0 : 3); + ah_attr_p->src_path_bits = ibal_av_p->path_bits; // PATH: + + /* For global destination or Multicast address:*/ + if (ibal_av_p->grh_valid) + { + ah_attr_p->ah_flags |= IB_AH_GRH; + ah_attr_p->grh.hop_limit = ibal_av_p->grh.hop_limit; + ib_grh_get_ver_class_flow( ibal_av_p->grh.ver_class_flow, NULL, + &ah_attr_p->grh.traffic_class, &ah_attr_p->grh.flow_label ); + err = ib_find_cached_gid((struct ib_device *)ib_dev_p, + (union ib_gid *)ibal_av_p->grh.src_gid.raw, &port_num, &gid_index); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid failed %d (%#x). Using default: sgid_index = 0\n", err, err)); + gid_index = 0; + } + else if (port_num != ah_attr_p->port_num) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("ib_find_cached_gid returned wrong port_num %u (Expected - %u). Using the expected.\n", + (u32)port_num, (u32)ah_attr_p->port_num)); + } + ah_attr_p->grh.sgid_index = (u8)gid_index; + RtlCopyMemory(ah_attr_p->grh.dgid.raw, ibal_av_p->grh.dest_gid.raw, sizeof(ah_attr_p->grh.dgid)); + } + + return err; +} + +int +mlnx_conv_mthca_av( + IN const struct ib_ah *ib_ah_p, + OUT ib_av_attr_t *ibal_av_p) +{ + int err = 0; + struct ib_ud_header header; + struct mthca_ah *ah_p = (struct mthca_ah *)ib_ah_p; + struct ib_device *ib_dev_p = ib_ah_p->pd->device; + struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p; + + err = mthca_read_ah( dev_p, ah_p, &header); + if (err) + goto err_read_ah; + + // common part + ibal_av_p->sl = header.lrh.service_level; + mthca_get_av_params(ah_p, &ibal_av_p->port_num, + &ibal_av_p->dlid, &ibal_av_p->static_rate, &ibal_av_p->path_bits ); + + // GRH + ibal_av_p->grh_valid = header.grh_present; + if (ibal_av_p->grh_valid) { + ibal_av_p->grh.ver_class_flow = ib_grh_set_ver_class_flow( + header.grh.ip_version, header.grh.traffic_class, header.grh.flow_label ); + ibal_av_p->grh.hop_limit = header.grh.hop_limit; + RtlCopyMemory(ibal_av_p->grh.src_gid.raw, + header.grh.source_gid.raw, sizeof(ibal_av_p->grh.src_gid)); + RtlCopyMemory(ibal_av_p->grh.src_gid.raw, + header.grh.destination_gid.raw, sizeof(ibal_av_p->grh.dest_gid)); + } + + //TODO: don't know, how to fill conn. Note, that previous version didn't fill it also. + +err_read_ah: + return err; +} + +void +mlnx_modify_ah( + IN const struct ib_ah *ib_ah_p, + IN const struct ib_ah_attr *ah_attr_p) +{ + struct ib_device *ib_dev_p = ib_ah_p->pd->device; + struct mthca_dev *dev_p = (struct mthca_dev *)ib_dev_p; + + mthca_set_av_params(dev_p, (struct mthca_ah *)ib_ah_p, (struct ib_ah_attr *)ah_attr_p ); +} + diff --git a/branches/Ndi/hw/mthca/kernel/hca_data.h b/branches/Ndi/hw/mthca/kernel/hca_data.h new file mode 100644 index 00000000..a102c612 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_data.h @@ -0,0 +1,395 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __HCA_DATA_H__ +#define __HCA_DATA_H__ + + +#include +#include +#include +#include + + +extern char mlnx_uvp_lib_name[]; +extern uint32_t g_skip_tavor_reset; +extern uint32_t g_disable_tavor_reset; +extern uint32_t g_tune_pci; +extern uint32_t g_processor_affinity; +extern uint32_t g_max_DPC_time_us; +extern uint32_t g_profile_qp_num; +extern uint32_t g_profile_rd_out; + + +#define MLNX_MAX_HCA 4 +#define MLNX_NUM_HOBKL MLNX_MAX_HCA +#define MLNX_NUM_HOBUL MLNX_MAX_HCA +#define MLNX_NUM_CB_THR 1 +#define MLNX_SIZE_CB_POOL 256 +#define MLNX_UAL_ALLOC_HCA_UL_RES 1 +#define MLNX_UAL_FREE_HCA_UL_RES 2 + + +// Defines for QP ops +#define MLNX_MAX_NUM_SGE 8 +#define MLNX_MAX_WRS_PER_CHAIN 4 + +#define MLNX_NUM_RESERVED_QPS 16 + +/* + * Completion model. + * 0: No DPC processor assignment + * 1: DPCs per-CQ, processor affinity set at CQ initialization time. + * 2: DPCs per-CQ, processor affinity set at runtime. + * 3: DPCs per-CQ, no processor affinity set. + */ +#define MLNX_COMP_MODEL 3 + +#ifdef DBG +#define VALIDATE_INDEX(index, limit, error, label) \ + { \ + if (index >= limit) \ + { \ + status = error; \ + HCA_PRINT(TRACE_LEVEL_ERROR , g_mlnx_dbg_lvl ,("file %s line %d\n", __FILE__, __LINE__)));\ + goto label; \ + } \ + } +#else +#define VALIDATE_INDEX(index, limit, error, label) +#endif + + + +// Typedefs + +typedef enum { + E_EV_CA=1, + E_EV_QP, + E_EV_CQ, + E_EV_LAST +} ENUM_EVENT_CLASS; + +typedef enum { + E_MARK_CA=1, // Channel Adaptor + E_MARK_PD, // Protection Domain + E_MARK_CQ, // Completion Queue + E_MARK_QP, // Queue Pair + E_MARK_AV, // Address Vector (UD) + E_MARK_MG, // Multicast Group + E_MARK_MR, // Memory Region + E_MARK_MW, // Memory Windows + E_MARK_INVALID, +} ENUM_MARK; + +typedef enum { + E_MR_PHYS=1, + E_MR_SHARED, + E_MR_ANY, + E_MR_INVALID +} ENUM_MR_TYPE; + +/* + * Attribute cache for port info saved to expedite local MAD processing. + * Note that the cache accounts for the worst case GID and PKEY table size + * but is allocated from paged pool, so it's nothing to worry about. + */ + +typedef struct _guid_block +{ + boolean_t valid; + ib_guid_info_t tbl; + +} mlnx_guid_block_t; + +typedef struct _port_info_cache +{ + boolean_t valid; + ib_port_info_t info; + +} mlnx_port_info_cache_t; + +typedef struct _pkey_block +{ + boolean_t valid; + ib_pkey_table_t tbl; + +} mlnx_pkey_block_t; + +typedef struct _sl_vl_cache +{ + boolean_t valid; + ib_slvl_table_t tbl; + +} mlnx_sl_vl_cache_t; + +typedef struct _vl_arb_block +{ + boolean_t valid; + ib_vl_arb_table_t tbl; + +} mlnx_vl_arb_block_t; + +typedef struct _attr_cache +{ + mlnx_guid_block_t guid_block[32]; + mlnx_port_info_cache_t port_info; + mlnx_pkey_block_t pkey_tbl[2048]; + mlnx_sl_vl_cache_t sl_vl; + mlnx_vl_arb_block_t vl_arb[4]; + +} mlnx_cache_t; + +typedef struct _ib_ca { + ENUM_MARK mark; + ci_completion_cb_t comp_cb_p; + ci_async_event_cb_t async_cb_p; + const void *ca_context; + void *cl_device_h; + uint32_t index; + cl_async_proc_t *async_proc_mgr_p; + +} mlnx_hob_t; + +typedef struct HOBUL_t { + int dummy; +#ifdef WIN_TO_BE_REMOVED + pd_info_t *pd_info_tbl; + HH_hca_hndl_t hh_hndl; /* For HH direct access */ + HHUL_hca_hndl_t hhul_hndl; /* user level HCA resources handle for HH */ + uint32_t cq_idx_mask; /* */ + uint32_t qp_idx_mask; /* */ + uint32_t vendor_id; /* \ */ + uint32_t device_id; /* > 3 items needed for initializing user level */ + void *hca_ul_resources_p; /* / */ + MT_size_t cq_ul_resources_sz; /* Needed for allocating user resources for CQs */ + MT_size_t qp_ul_resources_sz; /* Needed for allocating user resources for QPs */ + MT_size_t pd_ul_resources_sz; /* Needed for allocating user resources for PDs */ + uint32_t max_cq; /* Max num. of CQs - size of following table */ + cq_info_t *cq_info_tbl; + uint32_t max_qp; /* Max num. of QPs - size of following table */ + qp_info_t *qp_info_tbl; + uint32_t max_pd; /* Max num. of PDs - size of following table */ + uint32_t log2_mpt_size; + atomic32_t count; +#endif +} mlnx_hobul_t, *mlnx_hobul_hndl_t; + +typedef struct _ib_mcast { + ib_gid_t mcast_gid; + struct ib_qp *ib_qp_p; + uint16_t mcast_lid; +} mlnx_mcast_t; + +typedef struct _mlnx_hca_t { + cl_list_item_t list_item; // to include in the HCA chain + net64_t guid; // HCA node Guid + struct mthca_dev *mdev; // VP Driver device + uint32_t hw_ver; // HCA HW version + mlnx_hob_t hob; // HOB - IBAL-related HCA resources + +#ifdef WIN_TO_BE_REMOVED + // removed as it is found in p_ext->cl_ext.p_pdo + const void* __ptr64 p_dev_obj; // Driver PDO +#endif +} mlnx_hca_t; + + +typedef mlnx_hob_t *mlnx_hca_h; + +// Global Variables +//extern mlnx_hca_t mlnx_hca_array[]; +//extern uint32_t mlnx_num_hca; + +extern mlnx_hob_t mlnx_hob_array[]; +extern mlnx_hobul_t *mlnx_hobul_array[]; + +// Functions +void +setup_ci_interface( + IN const ib_net64_t ca_guid, + IN const int is_livefish, + OUT ci_interface_t *p_interface ); + +void +mlnx_hca_insert( + IN mlnx_hca_t *p_hca ); + +void +mlnx_hca_remove( + IN mlnx_hca_t *p_hca ); + +mlnx_hca_t* +mlnx_hca_from_guid( + IN ib_net64_t guid ); + +/* +void +mlnx_names_from_guid( + IN ib_net64_t guid, + OUT char **hca_name_p, + OUT char **dev_name_p); +*/ + +cl_status_t +mlnx_hcas_init( void ); + +cl_status_t +mlnx_hobs_init( void ); + +ib_api_status_t +mlnx_hobs_insert( + IN mlnx_hca_t *p_hca, + OUT mlnx_hob_t **hob_p); + + +ib_api_status_t +mlnx_hobs_set_cb( + IN mlnx_hob_t *hob_p, + IN ci_completion_cb_t comp_cb_p, + IN ci_async_event_cb_t async_cb_p, + IN const void* const ib_context); + +ib_api_status_t +mlnx_hobs_get_context( + IN mlnx_hob_t *hob_p, + OUT void **context_p); + +ib_api_status_t +mlnx_hobs_create_device( + IN mlnx_hob_t *hob_p, + OUT char* dev_name); + +void +mlnx_hobs_remove( + IN mlnx_hob_t *hob_p); + +mlnx_hobul_t * +mlnx_hobs_get_hobul( + IN mlnx_hob_t *hob_p); + +void +mlnx_hobul_get( + IN mlnx_hob_t *hob_p, + OUT void **resources_p ); + +void +mlnx_hobul_delete( + IN mlnx_hob_t *hob_p); + +void +mlnx_conv_hca_cap( + IN struct ib_device *ib_dev, + IN struct ib_device_attr *hca_info_p, + IN struct ib_port_attr *hca_ports, + OUT ib_ca_attr_t *ca_attr_p); + +ib_api_status_t +mlnx_local_mad ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_av_attr_t *p_src_av_attr, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ); + +void +mlnx_memory_if( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_memory_if_livefish( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_ecc_if( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_direct_if( + IN OUT ci_interface_t *p_interface ); + +void +mlnx_mcast_if( + IN OUT ci_interface_t *p_interface ); + +ib_api_status_t +fw_access_ctrl( + IN const void* __ptr64 context, + IN const void* __ptr64* const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL); + +void unmap_crspace_for_all( struct ib_ucontext *p_context ); + +void cq_comp_handler(struct ib_cq *cq, void *context); + +void ca_event_handler(struct ib_event *ev, void *context); + +void srq_event_handler(struct ib_event *ev, void *context); + +void qp_event_handler(struct ib_event *ev, void *context); + +void cq_event_handler(struct ib_event *ev, void *context); + +ib_qp_state_t mlnx_qps_to_ibal(enum ib_qp_state qps); + +enum ib_qp_state mlnx_qps_from_ibal(ib_qp_state_t ib_qps); + +ib_api_status_t +mlnx_conv_qp_modify_attr( + IN const struct ib_qp *ib_qp_p, + IN ib_qp_type_t qp_type, + IN const ib_qp_mod_t *modify_attr_p, + OUT struct ib_qp_attr *qp_attr_p, + OUT int *qp_attr_mask_p + ); + +int +mlnx_conv_ibal_av( + IN const struct ib_device *ib_dev_p, + IN const ib_av_attr_t *ibal_av_p, + OUT struct ib_ah_attr *ah_attr_p); + +int +mlnx_conv_mthca_av( + IN const struct ib_ah *ib_ah_p, + OUT ib_av_attr_t *ibal_av_p); + +void +mlnx_modify_ah( + IN const struct ib_ah *ib_ah_p, + IN const struct ib_ah_attr *ah_attr_p); + +void set_skip_tavor_reset(); + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/hca_debug.h b/branches/Ndi/hw/mthca/kernel/hca_debug.h new file mode 100644 index 00000000..18dba8f0 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_debug.h @@ -0,0 +1,180 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _HCA_DEBUG_H_ +#define _HCA_DEBUG_H_ + + +extern uint32_t g_mthca_dbg_level; +extern uint32_t g_mthca_dbg_flags; +#define MAX_LOG_BUF_LEN 512 +extern WCHAR g_wlog_buf[ MAX_LOG_BUF_LEN ]; +extern UCHAR g_slog_buf[ MAX_LOG_BUF_LEN ]; + +static void _build_str( const char * format, ... ) +{ + va_list p_arg; + va_start(p_arg, format); + vsprintf((char *)g_slog_buf , format , p_arg); + swprintf(g_wlog_buf, L"%S", g_slog_buf); + va_end(p_arg); +} + +#define HCA_PRINT_TO_EVENT_LOG(_obj_,_level_,_flag_,_msg_) \ + { \ + NTSTATUS event_id; \ + switch (_level_) { \ + case TRACE_LEVEL_FATAL: case TRACE_LEVEL_ERROR: event_id = EVENT_MTHCA_ANY_ERROR; break; \ + case TRACE_LEVEL_WARNING: event_id = EVENT_MTHCA_ANY_WARN; break; \ + default: event_id = EVENT_MTHCA_ANY_INFO; break; \ + } \ + _build_str _msg_; \ + WriteEventLogEntryStr( _obj_, (ULONG)event_id, 0, 0, g_wlog_buf, 0, 0 ); \ + } + +#define HCA_PRINT_EV_MDEV(_level_,_flag_,_msg_) \ + HCA_PRINT_TO_EVENT_LOG(mdev->ext->cl_ext.p_self_do,_level_,_flag_,_msg_) + + +#if defined(EVENT_TRACING) +// +// Software Tracing Definitions +// + +#define WPP_CONTROL_GUIDS \ + WPP_DEFINE_CONTROL_GUID(HCACtlGuid,(8BF1F640,63FE,4743,B9EF,FA38C695BFDE), \ + WPP_DEFINE_BIT( HCA_DBG_DEV) \ + WPP_DEFINE_BIT( HCA_DBG_PNP) \ + WPP_DEFINE_BIT( HCA_DBG_INIT) \ + WPP_DEFINE_BIT( HCA_DBG_MAD) \ + WPP_DEFINE_BIT( HCA_DBG_PO) \ + WPP_DEFINE_BIT( HCA_DBG_PD)\ + WPP_DEFINE_BIT( HCA_DBG_CQ) \ + WPP_DEFINE_BIT( HCA_DBG_QP) \ + WPP_DEFINE_BIT( HCA_DBG_MEMORY) \ + WPP_DEFINE_BIT( HCA_DBG_AV) \ + WPP_DEFINE_BIT( HCA_DBG_SRQ) \ + WPP_DEFINE_BIT( HCA_DBG_MCAST) \ + WPP_DEFINE_BIT( HCA_DBG_LOW) \ + WPP_DEFINE_BIT( HCA_DBG_SHIM)) + + +#define WPP_GLOBALLOGGER + + +#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl) +#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags) +#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE) +#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags) + + +// begin_wpp config +// HCA_ENTER(FLAG); +// HCA_EXIT(FLAG); +// USEPREFIX(HCA_PRINT, "%!STDPREFIX! [MTHCA] :%!FUNC!() :"); +// USESUFFIX(HCA_ENTER, " [MTHCA] :%!FUNC!()["); +// USESUFFIX(HCA_EXIT, " [MTHCA] :%!FUNC!()]"); +// end_wpp + + + +#define HCA_PRINT_EV(_level_,_flag_,_msg_) \ + { \ + HCA_PRINT_EV_MDEV(_level_,_flag_,_msg_) \ + } + + +#else + + +#include + +/* + * Debug macros + */ + + +#define HCA_DBG_DEV (1 << 0) +#define HCA_DBG_PNP (1<<1) +#define HCA_DBG_INIT (1 << 2) +#define HCA_DBG_MAD (1 << 3) +#define HCA_DBG_PO (1 << 4) +#define HCA_DBG_PD (1<<5) +#define HCA_DBG_QP (1 << 6) +#define HCA_DBG_CQ (1 << 7) +#define HCA_DBG_MEMORY (1 << 8) +#define HCA_DBG_AV (1<<9) +#define HCA_DBG_SRQ (1 << 10) +#define HCA_DBG_MCAST (1<<11) +#define HCA_DBG_LOW (1 << 12) +#define HCA_DBG_SHIM (1 << 13) + + +#if DBG + +// assignment of _level_ is need to to overcome warning C4127 +#define HCA_PRINT(_level_,_flag_,_msg_) \ + { \ + int __lvl = _level_; \ + if (g_mthca_dbg_level >= (_level_) && \ + (g_mthca_dbg_flags & (_flag_))) { \ + DbgPrint ("~%d:[MTHCA] %s() :", KeGetCurrentProcessorNumber(), __FUNCTION__); \ + if(__lvl == TRACE_LEVEL_ERROR) DbgPrint ("***ERROR*** "); \ + DbgPrint _msg_; \ + } \ + } + +#else + +#define HCA_PRINT(lvl ,flags, msg) + +#endif + +#define HCA_PRINT_EV(_level_,_flag_,_msg_) \ + { \ + HCA_PRINT(_level_,_flag_,_msg_) \ + HCA_PRINT_EV_MDEV(_level_,_flag_,_msg_) \ + } + +#define HCA_ENTER(flags)\ + HCA_PRINT(TRACE_LEVEL_VERBOSE, flags,("[\n")); + +#define HCA_EXIT(flags)\ + HCA_PRINT(TRACE_LEVEL_VERBOSE, flags, ("]\n" )); + + +#endif //EVENT_TRACING + + +#endif /*_HCA_DEBUG_H_ */ + + diff --git a/branches/Ndi/hw/mthca/kernel/hca_direct.c b/branches/Ndi/hw/mthca/kernel/hca_direct.c new file mode 100644 index 00000000..69e91355 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_direct.c @@ -0,0 +1,310 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "hca_driver.h" +#include "hca_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hca_direct.tmh" +#endif +#include "mthca_dev.h" + + +/* Controls whether to use the VAPI entrypoints in THH, or the IBAL native ones. */ +#define MLNX_SEND_NATIVE 1 +#define MLNX_RECV_NATIVE 1 +#define MLNX_POLL_NATIVE 1 + + +/* +* Work Request Processing Verbs. +*/ + + +ib_api_status_t +mlnx_post_send ( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t *p_send_wr, + OUT ib_send_wr_t **pp_failed ) +{ + int err; + ib_api_status_t status; + struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp; + struct ib_device *ib_dev = ib_qp_p->device; + + HCA_ENTER(HCA_DBG_QP); + + err = ib_dev->post_send(ib_qp_p, p_send_wr, pp_failed ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP, + ("post_send failed (%d)\n", err)); + if (err == -ENOMEM) + status = IB_INSUFFICIENT_RESOURCES; + else + status = errno_to_iberr(err); + goto err_post_send; + } + + status = IB_SUCCESS; + +err_post_send: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_QP); + return status; + +} + + +ib_api_status_t +mlnx_post_recv ( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t *p_recv_wr, + OUT ib_recv_wr_t **pp_failed OPTIONAL ) +{ + int err; + ib_api_status_t status; + struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp; + struct ib_device *ib_dev = ib_qp_p->device; + + HCA_ENTER(HCA_DBG_QP); + + err = ib_dev->post_recv(ib_qp_p, p_recv_wr, pp_failed ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP, + ("post_recv failed (%d)\n", err)); + if (err == -ENOMEM) + status = IB_INSUFFICIENT_RESOURCES; + else + status = errno_to_iberr(err); + goto err_post_recv; + } + + status = IB_SUCCESS; + +err_post_recv: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_QP); + return status; + +} + +ib_api_status_t +mlnx_post_srq_recv ( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t *p_recv_wr, + OUT ib_recv_wr_t **pp_failed OPTIONAL ) +{ + int err; + ib_api_status_t status; + struct ib_srq *ib_srq_p = (struct ib_srq *)h_srq; + struct ib_device *ib_dev = ib_srq_p->device; + + HCA_ENTER(HCA_DBG_SRQ); + + err = ib_dev->post_srq_recv(ib_srq_p, p_recv_wr, pp_failed ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ, + ("post_srq_recv failed (%d)\n", err)); + if (err == -ENOMEM) + status = IB_INSUFFICIENT_RESOURCES; + else + status = errno_to_iberr(err); + goto err_post_recv; + } + + status = IB_SUCCESS; + +err_post_recv: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_SRQ); + return status; + +} + +/* +* Completion Processing and Completion Notification Request Verbs. +*/ + +ib_api_status_t +mlnx_peek_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_n_cqes ) +{ + UNREFERENCED_PARAMETER(h_cq); + UNREFERENCED_PARAMETER(p_n_cqes); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("mlnx_peek_cq not implemented\n")); + return IB_INVALID_CA_HANDLE; +} + +ib_api_status_t +mlnx_poll_cq ( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq; + PREP_IBDEV_FOR_PRINT(ib_cq_p->device); + + HCA_ENTER(HCA_DBG_CQ); + + // sanity checks + if (!pp_free_wclist || !pp_done_wclist || !*pp_free_wclist) { + status = IB_INVALID_PARAMETER; + goto err_invalid_params; + } + + // poll CQ + err = mthca_poll_cq_list(ib_cq_p, pp_free_wclist, pp_done_wclist ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ, + ("mthca_poll_cq_list failed (%d)\n", err)); + status = errno_to_iberr(err); + }else if (!*pp_done_wclist) + status = IB_NOT_FOUND; + +err_invalid_params: + if (status != IB_SUCCESS && status != IB_NOT_FOUND) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_CQ); + return status; + +} + +ib_api_status_t +mlnx_enable_cq_notify ( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq; + PREP_IBDEV_FOR_PRINT(ib_cq_p->device); + + HCA_ENTER(HCA_DBG_CQ); + + // REARM CQ + err = ib_req_notify_cq(ib_cq_p, (solicited) ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ, + ("ib_req_notify_cq failed (%d)\n", err)); + status = errno_to_iberr(err); + } + + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_CQ); + return status; +} + +ib_api_status_t +mlnx_enable_ncomp_cq_notify ( + IN const ib_cq_handle_t h_cq, + IN const uint32_t n_cqes ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq; + PREP_IBDEV_FOR_PRINT(ib_cq_p->device); + + HCA_ENTER(HCA_DBG_CQ); + + err = ib_req_ncomp_notif(ib_cq_p, n_cqes ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ, + ("ib_req_ncomp_notif failed (%d)\n", err)); + status = errno_to_iberr(err); + } + + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_CQ); + return status; +} + +ib_api_status_t +mlnx_bind_mw ( + IN const ib_mw_handle_t h_mw, + IN const ib_qp_handle_t h_qp, + IN ib_bind_wr_t* const p_mw_bind, + OUT net32_t* const p_rkey ) +{ + UNREFERENCED_PARAMETER(h_mw); + UNREFERENCED_PARAMETER(h_qp); + UNREFERENCED_PARAMETER(p_mw_bind); + UNREFERENCED_PARAMETER(p_rkey); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM,("mlnx_bind_mw not implemented\n")); + return IB_INVALID_CA_HANDLE; +} + + +void +mlnx_direct_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->post_send = mlnx_post_send; + p_interface->post_recv = mlnx_post_recv; + p_interface->post_srq_recv = mlnx_post_srq_recv; + + p_interface->enable_ncomp_cq_notify = mlnx_enable_ncomp_cq_notify; + p_interface->peek_cq = NULL; /* mlnx_peek_cq: Not implemented */ + p_interface->poll_cq = mlnx_poll_cq; + p_interface->enable_cq_notify = mlnx_enable_cq_notify; + + p_interface->bind_mw = mlnx_bind_mw; +} diff --git a/branches/Ndi/hw/mthca/kernel/hca_driver.c b/branches/Ndi/hw/mthca/kernel/hca_driver.c new file mode 100644 index 00000000..a91abe59 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_driver.c @@ -0,0 +1,1038 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Provides the driver entry points for the Tavor VPD. + */ + +#include +#include "hca_driver.h" +#include "hca_debug.h" + +#include "mthca_log.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hca_driver.tmh" +#endif +#include "mthca_dev.h" +#include +#include +#pragma warning( push, 3 ) +//#include "MdCard.h" +#pragma warning( pop ) +#include +#include "mthca/mthca_vc.h" +#include "mt_pa_cash.h" +/* from \inc\platform\evntrace.h +#define TRACE_LEVEL_NONE 0 // Tracing is not on +#define TRACE_LEVEL_FATAL 1 // Abnormal exit or termination +#define TRACE_LEVEL_ERROR 2 // Severe errors that need logging +#define TRACE_LEVEL_WARNING 3 // Warnings such as allocation failure +#define TRACE_LEVEL_INFORMATION 4 // Includes non-error cases(e.g.,Entry-Exit) +#define TRACE_LEVEL_VERBOSE 5 // Detailed traces from intermediate steps +*/ +uint32_t g_mthca_dbg_level = TRACE_LEVEL_INFORMATION; +uint32_t g_mthca_dbg_flags= 0xffff; +WCHAR g_wlog_buf[ MAX_LOG_BUF_LEN ]; +UCHAR g_slog_buf[ MAX_LOG_BUF_LEN ]; +uint32_t g_skip_tavor_reset=0; /* skip reset for Tavor cards */ +uint32_t g_disable_tavor_reset=1; /* disable Tavor reset for the next driver load */ +uint32_t g_tune_pci=0; /* 0 - skip tuning PCI configuration space of HCAs */ +uint32_t g_processor_affinity = 0; +uint32_t g_max_DPC_time_us = 10000; +uint32_t g_profile_qp_num = 0; +uint32_t g_profile_rd_out = 0xffffffff; + +UNICODE_STRING g_param_path; + + +/* + * UVP name does not include file extension. For debug builds, UAL + * will append "d.dll". For release builds, UAL will append ".dll" + */ +char mlnx_uvp_lib_name[MAX_LIB_NAME] = {"mthcau"}; + +void reregister_hca( hca_dev_ext_t *p_ext ); + + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_driver_obj, + IN PUNICODE_STRING p_registry_path ); + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_Param_Path ); + +static void +hca_drv_unload( + IN PDRIVER_OBJECT p_driver_obj ); + +static NTSTATUS +hca_sysctl( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ); + +static NTSTATUS +__pnp_notify_target( + IN TARGET_DEVICE_REMOVAL_NOTIFICATION *p_notify, + IN void *context ); + +static NTSTATUS +__pnp_notify_ifc( + IN DEVICE_INTERFACE_CHANGE_NOTIFICATION *p_notify, + IN void *context ); + +static NTSTATUS +fw_access_pciconf ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN ULONG op_flag, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ); + +static NTSTATUS +fw_flash_write_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ); + +static NTSTATUS +fw_flash_read_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ); + +static NTSTATUS +fw_flash_read4( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t addr, + IN OUT uint32_t *p_data); + +static NTSTATUS +fw_flash_readbuf( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t offset, + IN OUT void *p_data, + IN uint32_t len); +static NTSTATUS +fw_set_bank( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t bank ); + +static NTSTATUS +fw_flash_init( + IN BUS_INTERFACE_STANDARD *p_BusInterface ); + +static NTSTATUS +fw_flash_deinit( + IN BUS_INTERFACE_STANDARD *p_BusInterface ); + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (INIT, DriverEntry) +#pragma alloc_text (INIT, __read_registry) +#pragma alloc_text (PAGE, hca_drv_unload) +#pragma alloc_text (PAGE, hca_sysctl) +#endif + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_driver_obj, + IN PUNICODE_STRING p_registry_path ) +{ + NTSTATUS status; + cl_status_t cl_status; +#if defined(EVENT_TRACING) + WPP_INIT_TRACING(p_driver_obj ,p_registry_path); +#endif + HCA_ENTER( HCA_DBG_DEV ); + + /* init common mechanisms */ + fill_bit_tbls(); + + status = __read_registry( p_registry_path ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, + ("__read_registry_path returned 0x%X.\n", status)); + return status; + } + + /* Initialize Adapter DB */ + cl_status = mlnx_hcas_init(); + if( cl_status != CL_SUCCESS ) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT , + ("mlnx_hcas_init returned %s.\n", cl_status_text[cl_status])); + return cl_to_ntstatus( cl_status ); + } +// cl_memclr( mlnx_hca_array, MLNX_MAX_HCA * sizeof(ci_interface_t) ); + + /* init pa cash */ + status = pa_cash_init(); + if (status) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT , + ("pa_cash_init failed.\n")); + return status; + } + + /*leo: init function table */ + hca_init_vfptr(); + + p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp; + p_driver_obj->MajorFunction[IRP_MJ_POWER] = cl_power; + p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = hca_sysctl; + p_driver_obj->DriverUnload = hca_drv_unload; + p_driver_obj->DriverExtension->AddDevice = hca_add_device; + + /* init core */ + if (ib_core_init()) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("Failed to init core, aborting.\n")); + return STATUS_UNSUCCESSFUL; + } + + /* init uverbs module */ + if (ib_uverbs_init()) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("Failed ib_uverbs_init, aborting.\n")); + return STATUS_UNSUCCESSFUL; + } + HCA_EXIT( HCA_DBG_DEV ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_registry_path ) +{ + NTSTATUS status; + /* Remember the terminating entry in the table below. */ + RTL_QUERY_REGISTRY_TABLE table[10]; + + HCA_ENTER( HCA_DBG_DEV ); + + RtlInitUnicodeString( &g_param_path, NULL ); + g_param_path.MaximumLength = p_registry_path->Length + + sizeof(L"\\Parameters"); + g_param_path.Buffer = cl_zalloc( g_param_path.MaximumLength ); + if( !g_param_path.Buffer ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, + ("Failed to allocate parameters path buffer.\n")); + return STATUS_INSUFFICIENT_RESOURCES; + } + + RtlAppendUnicodeStringToString( &g_param_path, p_registry_path ); + RtlAppendUnicodeToString( &g_param_path, L"\\Parameters" ); + + /* + * Clear the table. This clears all the query callback pointers, + * and sets up the terminating table entry. + */ + cl_memclr( table, sizeof(table) ); + + /* Setup the table entries. */ + table[0].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[0].Name = L"DebugLevel"; + table[0].EntryContext = &g_mthca_dbg_level; + table[0].DefaultType = REG_DWORD; + table[0].DefaultData = &g_mthca_dbg_level; + table[0].DefaultLength = sizeof(ULONG); + + + table[1].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[1].Name = L"DebugFlags"; + table[1].EntryContext = &g_mthca_dbg_flags; + table[1].DefaultType = REG_DWORD; + table[1].DefaultData = &g_mthca_dbg_flags; + table[1].DefaultLength = sizeof(ULONG); + + table[2].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[2].Name = L"SkipTavorReset"; + table[2].EntryContext = &g_skip_tavor_reset; + table[2].DefaultType = REG_DWORD; + table[2].DefaultData = &g_skip_tavor_reset; + table[2].DefaultLength = sizeof(ULONG); + + table[3].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[3].Name = L"DisableTavorResetOnFailure"; + table[3].EntryContext = &g_disable_tavor_reset; + table[3].DefaultType = REG_DWORD; + table[3].DefaultData = &g_disable_tavor_reset; + table[3].DefaultLength = sizeof(ULONG); + + table[4].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[4].Name = L"TunePci"; + table[4].EntryContext = &g_tune_pci; + table[4].DefaultType = REG_DWORD; + table[4].DefaultData = &g_tune_pci; + table[4].DefaultLength = sizeof(ULONG); + + table[5].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[5].Name = L"ProcessorAffinity"; + table[5].EntryContext = &g_processor_affinity; + table[5].DefaultType = REG_DWORD; + table[5].DefaultData = &g_processor_affinity; + table[5].DefaultLength = sizeof(ULONG); + + table[6].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[6].Name = L"MaxDpcTimeUs"; + table[6].EntryContext = &g_max_DPC_time_us; + table[6].DefaultType = REG_DWORD; + table[6].DefaultData = &g_max_DPC_time_us; + table[6].DefaultLength = sizeof(ULONG); + + table[7].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[7].Name = L"ProfileQpNum"; + table[7].EntryContext = &g_profile_qp_num; + table[7].DefaultType = REG_DWORD; + table[7].DefaultData = &g_profile_qp_num; + table[7].DefaultLength = sizeof(ULONG); + + table[8].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[8].Name = L"ProfileRdOut"; + table[8].EntryContext = &g_profile_rd_out; + table[8].DefaultType = REG_DWORD; + table[8].DefaultData = &g_profile_rd_out; + table[8].DefaultLength = sizeof(ULONG); + + /* Have at it! */ + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, + g_param_path.Buffer, table, NULL, NULL ); + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_INIT, + ("debug level %d debug flags 0x%.8x SkipTavorReset %d DisableTavorReset %d TunePci %d" + "g_processor_affinity %d g_max_DPC_time_us %d g_profile_qp_num %d g_profile_rd_out %d\n", + g_mthca_dbg_level, g_mthca_dbg_flags, + g_skip_tavor_reset, g_disable_tavor_reset, + g_tune_pci, g_processor_affinity, g_max_DPC_time_us, + g_profile_qp_num, g_profile_rd_out )); + + HCA_EXIT( HCA_DBG_DEV ); + return status; +} + +void set_skip_tavor_reset() +{ + NTSTATUS status; + HANDLE key_handle; + UNICODE_STRING key_name; + ULONG val = 1; + OBJECT_ATTRIBUTES oa; + + HCA_ENTER( HCA_DBG_DEV ); + + InitializeObjectAttributes( &oa, &g_param_path, + OBJ_CASE_INSENSITIVE | OBJ_KERNEL_HANDLE, NULL, NULL ); + + + status = ZwOpenKey( &key_handle, GENERIC_WRITE, &oa ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW, + ("ZwOpenKey failed (%#x)\n", status)); + goto err_open_key; + } + + RtlInitUnicodeString( &key_name, L"SkipTavorReset" ); + status = ZwSetValueKey( key_handle, &key_name, 0, + REG_DWORD, &val, sizeof(ULONG) ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW, + ("ZwSetValueKey failed (%#x)\n", status)); + } + + ZwClose( key_handle ); + +err_open_key: + HCA_EXIT( HCA_DBG_DEV ); +} + +static void +hca_drv_unload( + IN PDRIVER_OBJECT p_driver_obj ) +{ + HCA_ENTER( HCA_DBG_DEV ); + + UNUSED_PARAM( p_driver_obj ); + + pa_cash_release(); + ib_uverbs_cleanup(); + ib_core_cleanup(); + cl_free( g_param_path.Buffer ); + + HCA_EXIT( HCA_DBG_DEV ); +#if defined(EVENT_TRACING) + WPP_CLEANUP(p_driver_obj); +#endif + +} + + +static NTSTATUS +hca_sysctl( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_DEV ); + + p_ext = p_dev_obj->DeviceExtension; + + IoSkipCurrentIrpStackLocation( p_irp ); + status = IoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + + HCA_EXIT( HCA_DBG_DEV ); + return status; +} + +typedef struct Primary_Sector{ + uint32_t fi_addr; + uint32_t fi_size; + uint32_t signature; + uint32_t fw_reserved[5]; + uint32_t vsd[56]; + uint32_t branch_to; + uint32_t crc016; +} primary_sector_t; + +static uint32_t old_dir; +static uint32_t old_pol; +static uint32_t old_mod; +static uint32_t old_dat; + +static NTSTATUS +fw_access_pciconf ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN ULONG op_flag, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + + ULONG bytes; + NTSTATUS status = STATUS_SUCCESS; + + PAGED_CODE(); + + if( !p_buffer ) + return STATUS_INVALID_PARAMETER; + + if (p_BusInterface) + { + + bytes = p_BusInterface->SetBusData( + p_BusInterface->Context, + PCI_WHICHSPACE_CONFIG, + (PVOID)&offset, + PCI_CONF_ADDR, + sizeof(ULONG) ); + + if( op_flag == 0 ) + { + if ( bytes ) + bytes = p_BusInterface->GetBusData( + p_BusInterface->Context, + PCI_WHICHSPACE_CONFIG, + p_buffer, + PCI_CONF_DATA, + length ); + if ( !bytes ) + status = STATUS_NOT_SUPPORTED; + } + + else + { + if ( bytes ) + bytes = p_BusInterface->SetBusData( + p_BusInterface->Context, + PCI_WHICHSPACE_CONFIG, + p_buffer, + PCI_CONF_DATA, + length); + + if ( !bytes ) + status = STATUS_NOT_SUPPORTED; + } + } + return status; +} + + +static NTSTATUS +__map_crspace( + IN struct ib_ucontext * p_context, + IN mlnx_hob_t * p_hob, + IN PVOID p_buf, + IN ULONG buf_size + ) +{ + NTSTATUS status; + PMDL p_mdl; + PVOID ua, ka; + ULONG sz; + hca_dev_ext_t *p_ext = EXT_FROM_HOB(p_hob); + map_crspace *p_res = (map_crspace *)p_buf; + + HCA_ENTER( HCA_DBG_PNP ); + + // sanity checks + if ( buf_size < sizeof *p_res || !p_buf ) { + status = STATUS_INVALID_PARAMETER; + goto err_invalid_params; + } + + // map memory + sz =(ULONG)p_ext->bar[HCA_BAR_TYPE_HCR].size; + if (!p_ext->bar[HCA_BAR_TYPE_HCR].virt) { + PHYSICAL_ADDRESS pa; + pa.QuadPart = p_ext->bar[HCA_BAR_TYPE_HCR].phys; + ka = MmMapIoSpace( pa, sz, MmNonCached ); + if ( ka == NULL) { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, + ("No kernel mapping of CR space.\n") ); + status = STATUS_INSUFFICIENT_RESOURCES; + goto err_map_to_kernel; + } + p_ext->bar[HCA_BAR_TYPE_HCR].virt = ka; + } + ka = p_ext->bar[HCA_BAR_TYPE_HCR].virt; + + // prepare for mapping to user space + p_mdl = IoAllocateMdl( ka, sz, FALSE,FALSE,NULL); + if (p_mdl == NULL) { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, + ("IoAllocateMdl failed.\n") ); + status = STATUS_INSUFFICIENT_RESOURCES; + goto err_alloc_mdl; + } + + // fill MDL + MmBuildMdlForNonPagedPool(p_mdl); + + // map the buffer into user space + __try + { + ua = MmMapLockedPagesSpecifyCache( p_mdl, UserMode, MmNonCached, + NULL, FALSE, NormalPagePriority ); + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, + ("MmMapLockedPagesSpecifyCache failed.\n") ); + status = STATUS_INSUFFICIENT_RESOURCES; + goto err_map_to_user; + } + + // fill the results + p_res->va = (uint64_t)(ULONG_PTR)ua; + p_res->size = sz; + + // resource tracking + p_context->p_mdl = p_mdl; + p_context->va = ua; + +#if 0 + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SHIM, + ("MTHCA: __map_crspace succeeded with .ka %I64x, size %I64x va %I64x, size %x, pa %I64x \n", + p_ext->bar[HCA_BAR_TYPE_HCR].virt, p_ext->bar[HCA_BAR_TYPE_HCR].size, + p_res->va, p_res->size, p_ext->bar[HCA_BAR_TYPE_HCR].phys )); +#endif + status = STATUS_SUCCESS; + goto out; + +err_map_to_user: + IoFreeMdl( p_mdl ); +err_alloc_mdl: +err_map_to_kernel: +err_invalid_params: +out: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static void +__unmap_crspace( + IN struct ib_ucontext * p_context + ) +{ + HCA_ENTER( HCA_DBG_PNP ); + + if (p_context->va && p_context->p_mdl) { + MmUnmapLockedPages(p_context->va, p_context->p_mdl); + IoFreeMdl( p_context->p_mdl ); + p_context->va = p_context->p_mdl = NULL; + //NB: the unmap of IO space is being done in __UnmapHcaMemoryResources + } + + HCA_EXIT( HCA_DBG_PNP ); +} + + +static void +__open_fw_access( + IN struct ib_ucontext* p_context, + IN PBUS_INTERFACE_STANDARD p_bus_interface ) +{ + if( !p_context->fw_if_open ) + { + p_bus_interface->InterfaceReference( p_bus_interface->Context ); + p_context->fw_if_open = TRUE; + } +} + + +static void +__close_fw_access( + IN struct ib_ucontext * p_context, + IN PBUS_INTERFACE_STANDARD p_bus_interface + ) +{ + if (p_context->fw_if_open ) { + p_bus_interface->InterfaceDereference((PVOID)p_bus_interface->Context); + p_context->fw_if_open = FALSE; + } +} + + +void +unmap_crspace_for_all( struct ib_ucontext *p_context ) +{ + mlnx_hob_t *p_hob = HOB_FROM_IBDEV( p_context->device ); + hca_dev_ext_t *p_ext = EXT_FROM_HOB(p_hob); + PBUS_INTERFACE_STANDARD p_bus_interface = &p_ext->hcaBusIfc; + + HCA_ENTER( HCA_DBG_PNP ); + + down( &p_context->mutex ); + __unmap_crspace( p_context); + __close_fw_access(p_context, p_bus_interface); + up( &p_context->mutex ); + + HCA_EXIT( HCA_DBG_PNP ); +} + +ib_api_status_t +fw_access_ctrl( + IN const ib_ca_handle_t h_ca, + IN const void* __ptr64* const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + DEVICE_OBJECT *p_dev_obj; + PBUS_INTERFACE_STANDARD p_bus_interface; + NTSTATUS status = STATUS_SUCCESS; + PVOID p_data; + ULONG offset; + ULONG POINTER_ALIGNMENT length; + struct ib_ucontext * p_context; + mlnx_hob_t *p_hob; + hca_dev_ext_t *p_ext; + + UNREFERENCED_PARAMETER(handle_array); + UNREFERENCED_PARAMETER(num_handles); + + if( !p_umv_buf ) + return IB_UNSUPPORTED; + + p_context = (struct ib_ucontext *)h_ca; + p_hob = HOB_FROM_IBDEV( p_context->device ); + p_ext = EXT_FROM_HOB(p_hob); + p_dev_obj = (DEVICE_OBJECT *)p_ext->cl_ext.p_self_do; + p_bus_interface = &p_ext->hcaBusIfc; + + if ( !p_ci_op ) + return IB_INVALID_PARAMETER; + + length = p_ci_op->buf_size; + offset = p_ci_op->buf_info; + p_data = p_ci_op->p_buf; + + down( &p_context->mutex ); + + switch ( p_ci_op->command ) + { + case FW_REREGISTER_HCA: + reregister_hca(p_ext); + break; + + case FW_MAP_CRSPACE: + status = __map_crspace(p_context, p_hob, p_data, length); + break; + + case FW_UNMAP_CRSPACE: + __unmap_crspace(p_context); + break; + + case FW_OPEN_IF: // open BusInterface + __open_fw_access( p_context, p_bus_interface ); + break; + + case FW_READ: // read data from flash + if ( p_context->fw_if_open ) + status = fw_flash_read_data(p_bus_interface, p_data, offset, length); + break; + + case FW_WRITE: // write data to flash + if ( p_context->fw_if_open ) + status = fw_flash_write_data(p_bus_interface, p_data, offset, length); + break; + + case FW_READ_CMD: + if ( p_context->fw_if_open ) + status = fw_access_pciconf(p_bus_interface, 0 , p_data, offset, 4); + break; + + case FW_WRITE_CMD: + if ( p_context->fw_if_open ) + status = fw_access_pciconf(p_bus_interface, 1 , p_data, offset, 4); + break; + + case FW_CLOSE_IF: // close BusInterface + __close_fw_access(p_context, p_bus_interface); + break; + + default: + status = STATUS_INVALID_DEVICE_REQUEST; + } + + if ( status != STATUS_SUCCESS ) { + __close_fw_access(p_context, p_bus_interface); + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, + ("fw_access_ctrl failed, ntstatus: %08x.\n", status)); + } + + up( &p_context->mutex ); + + switch( status ) { + case STATUS_SUCCESS: return IB_SUCCESS; + case STATUS_INVALID_DEVICE_REQUEST: return IB_UNSUPPORTED; + case STATUS_INSUFFICIENT_RESOURCES: return IB_INSUFFICIENT_RESOURCES; + default: return IB_ERROR; + } +} + +static NTSTATUS +fw_flash_write_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + NTSTATUS status; + uint32_t cnt = 0; + uint32_t lcl_data; + + if (!length) + return IB_INVALID_PARAMETER; + + lcl_data = (*((uint32_t*)p_buffer) << 24); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET+4, length ); + if ( status != STATUS_SUCCESS ) + return status; + lcl_data = ( WRITE_BIT | (offset & ADDR_MSK)); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &lcl_data, FLASH_OFFSET, 4 ); + if ( status != STATUS_SUCCESS ) + return status; + + lcl_data = 0; + + do + { + if (++cnt > 5000) + { + return STATUS_DEVICE_NOT_READY; + } + + status = fw_access_pciconf(p_BusInterface, FW_READ , &lcl_data, FLASH_OFFSET, 4 ); + if ( status != STATUS_SUCCESS ) + return status; + + } while(lcl_data & CMD_MASK); + + return status; +} + +static NTSTATUS +fw_flash_read_data ( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t cnt = 0; + uint32_t lcl_data = ( READ_BIT | (offset & ADDR_MSK)); + + if (!length) + return IB_INVALID_PARAMETER; + + status = fw_access_pciconf(p_BusInterface, FW_WRITE, &lcl_data, FLASH_OFFSET, 4 ); + if ( status != STATUS_SUCCESS ) + return status; + + lcl_data = 0; + do + { + // Timeout checks + if (++cnt > 5000 ) + { + return STATUS_DEVICE_NOT_READY; + } + + status = fw_access_pciconf(p_BusInterface, FW_READ, &lcl_data, FLASH_OFFSET, 4 ); + + if ( status != STATUS_SUCCESS ) + return status; + + } while(lcl_data & CMD_MASK); + + status = fw_access_pciconf(p_BusInterface, FW_READ, p_buffer, FLASH_OFFSET+4, length ); + return status; +} + +static NTSTATUS +fw_flash_read4( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t addr, + IN OUT uint32_t *p_data) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t lcl_data = 0; + uint32_t bank; + static uint32_t curr_bank = 0xffffffff; + + if (addr & 0x3) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, + ("Invalid address %08x\n", addr) ); + return STATUS_INVALID_PARAMETER; + } + + bank = addr & BANK_MASK; + if (bank != curr_bank) + { + curr_bank = bank; + if ((status = fw_set_bank(p_BusInterface, bank)) != STATUS_SUCCESS ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, + ("fw_set_bank returned %08x\n", status) ); + return STATUS_INVALID_PARAMETER; + } + } + status = fw_flash_read_data(p_BusInterface, &lcl_data, addr, 4); + *p_data = cl_ntoh32(lcl_data); + return STATUS_SUCCESS; +} + +static NTSTATUS +fw_flash_readbuf( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t offset, + IN OUT void *p_data, + IN uint32_t len) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t *p_lcl_data; + uint32_t i; + + if (offset & 0x3) + { + //Address should be 4-bytes aligned + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, + ("Invalid address %08x\n", offset) ); + return STATUS_INVALID_PARAMETER; + } + if (len & 0x3) + { + //Length should be 4-bytes aligned + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_INIT, + ("Invalid length %d\n", len) ); + return STATUS_INVALID_PARAMETER; + } + p_lcl_data = (uint32_t *)p_data; + + for ( i=0; i < (len >> 2); i++) + { + if ( (status = fw_flash_read_data( p_BusInterface, p_lcl_data, offset, sizeof(uint32_t) )) != STATUS_SUCCESS ) + return status; + offset += 4; + p_lcl_data++; + } + return STATUS_SUCCESS; +} // Flash::flash_read + +static NTSTATUS +fw_flash_writebuf( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN PVOID p_buffer, + IN ULONG offset, + IN ULONG POINTER_ALIGNMENT length ) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t i; + uint8_t *p_data = (uint8_t *)p_buffer; + + for ( i = 0; i < length; i++ ) + { + status = fw_flash_write_data (p_BusInterface, p_data, offset, 1 ); + if (status != STATUS_SUCCESS ) + return status; + p_data++; + offset++; + } + return status; +} +static NTSTATUS +fw_flash_init( + IN BUS_INTERFACE_STANDARD *p_BusInterface ) +{ + uint32_t dir; + uint32_t pol; + uint32_t mod; + + uint32_t cnt=0; + uint32_t data; + NTSTATUS status = STATUS_SUCCESS; + uint32_t semaphore = 0; + + while ( !semaphore ) + { + status = fw_access_pciconf(p_BusInterface, FW_READ , &data, SEMAP63, 4); + if ( status != STATUS_SUCCESS ) + break; + if( !data ) + { + semaphore = 1; + break; + } + if (++cnt > 5000 ) + { + break; + } + } + + if ( !semaphore ) + { + return STATUS_NOT_SUPPORTED; + } + + // Save old values + + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dir,GPIO_DIR_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_pol,GPIO_POL_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_mod,GPIO_MOD_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_READ , &old_dat,GPIO_DAT_L , 4); + + // Set Direction=1, Polarity=0, Mode=0 for 3 GPIO lower bits + dir = old_dir | 0x70; + pol = old_pol & ~0x70; + mod = old_mod & ~0x70; + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &dir,GPIO_DIR_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &pol,GPIO_POL_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &mod,GPIO_MOD_L , 4); + if ( status == STATUS_SUCCESS ) + // Set CPUMODE + status = fw_access_pciconf(p_BusInterface, FW_READ , &data, CPUMODE, 4); + if ( status == STATUS_SUCCESS ) + { + data &= ~CPUMODE_MSK; + data |= 1 << CPUMODE_SHIFT; + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, CPUMODE, 4); + } + if ( status == STATUS_SUCCESS ) + { + // Reset flash + data = 0xf0; + status = fw_flash_write_data(p_BusInterface, &data, 0x0, 4); + } + return status; +} + +static NTSTATUS +fw_flash_deinit( + IN BUS_INTERFACE_STANDARD *p_BusInterface ) +{ + uint32_t data = 0; + NTSTATUS status = STATUS_SUCCESS; + + status = fw_set_bank(p_BusInterface, 0); + if ( status == STATUS_SUCCESS ) + // Restore origin values + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dir,GPIO_DIR_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_pol,GPIO_POL_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_mod,GPIO_MOD_L , 4); + if ( status == STATUS_SUCCESS ) + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &old_dat,GPIO_DAT_L , 4); + if ( status == STATUS_SUCCESS ) + // Free GPIO Semaphore + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, SEMAP63, 4); + return status; +} + +static NTSTATUS +fw_set_bank( + IN BUS_INTERFACE_STANDARD *p_BusInterface, + IN uint32_t bank ) +{ + NTSTATUS status = STATUS_SUCCESS; + uint32_t data = ( (uint32_t)0x70 << 24 ); + uint32_t mask = ((bank >> (BANK_SHIFT-4)) << 24 ); + + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATACLEAR_L, 4); + if (status == STATUS_SUCCESS) + { + // A1 + data &= mask; + //data |= mask; // for A0 + status = fw_access_pciconf(p_BusInterface, FW_WRITE , &data, GPIO_DATASET_L, 4); + } + return status; +} diff --git a/branches/Ndi/hw/mthca/kernel/hca_driver.h b/branches/Ndi/hw/mthca/kernel/hca_driver.h new file mode 100644 index 00000000..4e8fc3b6 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_driver.h @@ -0,0 +1,246 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined( _HCA_DRIVER_H_ ) +#define _HCA_DRIVER_H_ + + +#include +#include +#include +#include +#include "mthca/mthca_vc.h" +#include "hca_data.h" +#include "mt_l2w.h" +#include "hca_debug.h" + + +#include "hca_pnp.h" +#include "hca_pci.h" + +#if !defined(FILE_DEVICE_INFINIBAND) // Not defined in WXP DDK +#define FILE_DEVICE_INFINIBAND 0x0000003B +#endif + +/****s* HCA/hca_reg_state_t +* NAME +* hca_reg_state_t +* +* DESCRIPTION +* State for tracking registration with AL. This state is independent of the +* device PnP state, and both are used to properly register with AL. +* +* SYNOPSIS +*/ +typedef enum _hca_reg_state +{ + HCA_SHUTDOWN, + HCA_ADDED, + HCA_STARTED, + HCA_IFC_DEREFERENCED, + HCA_REGISTERED + +} hca_reg_state_t; +/* +* VALUES +* HCA_SHUTDOWN +* Cleaning up. +* +* HCA_ADDED +* AddDevice was called and successfully registered for interface +* notifications. +* +* HCA_STARTED +* IRP_MN_START_DEVICE was called. The HCA is fully functional. +* +* HCA_IFC_DEREFERENCED +* DEVICE_QUERY_REMOVE for IBBUS was received. +* +* HCA_REGISTERED +* Fully functional and registered with the bus root. +*********/ + + +typedef enum _hca_bar_type +{ + HCA_BAR_TYPE_HCR, + HCA_BAR_TYPE_UAR, + HCA_BAR_TYPE_DDR, + HCA_BAR_TYPE_MAX + +} hca_bar_type_t; + + +typedef struct _hca_bar +{ + uint64_t phys; + void *virt; + SIZE_T size; + +} hca_bar_t; + + +typedef struct _hca_dev_ext +{ + /* ------------------------------------------------- + * PNP DATA + * ------------------------------------------------ */ + cl_pnp_po_ext_t cl_ext; /* COMPLIB PnP object */ + void * pnp_ifc_entry; /* Notification entry for PnP interface events. */ + void * pnp_target_entry; /* Notification entry for PnP target events. */ + PNP_DEVICE_STATE pnpState; /* state for PnP Manager */ + + /* ------------------------------------------------- + * POWER MANAGER DATA + * ------------------------------------------------ */ + /* Cache of the system to device power states. */ + DEVICE_POWER_STATE DevicePower[PowerSystemMaximum]; + DEVICE_POWER_STATE DevicePowerState; + SYSTEM_POWER_STATE SystemPowerState; + PIO_WORKITEM pPoWorkItem; + + /* ------------------------------------------------- + * IB_AL DATA + * ------------------------------------------------ */ + ib_ci_ifc_t ci_ifc; /* Interface for the lower edge of the IB_AL device. */ + hca_reg_state_t state; /* State for tracking registration with AL */ + DEVICE_OBJECT * p_al_dev; /* IB_AL FDO */ + FILE_OBJECT * p_al_file_obj; /* IB_AL file object */ + UNICODE_STRING al_sym_name; /* IB_AL symbolic name */ + + /* ------------------------------------------------- + * LOW LEVEL DRIVER' DATA + * ------------------------------------------------ */ + mlnx_hca_t hca; + atomic32_t usecnt; /* the number of working applications*/ + cl_spinlock_t uctx_lock; // spinlock for the below chain + cl_qlist_t uctx_list; // chain of user contexts + + /* ------------------------------------------------- + * OS DATA + * ------------------------------------------------ */ + hca_bar_t bar[HCA_BAR_TYPE_MAX]; /* HCA memory bars */ + CM_PARTIAL_RESOURCE_DESCRIPTOR interruptInfo; /* HCA interrupt resources */ + PKINTERRUPT int_obj; /* HCA interrupt object */ + spinlock_t isr_lock; /* lock for the ISR */ + ULONG bus_number; /* HCA's bus number */ + BUS_INTERFACE_STANDARD hcaBusIfc; /* PCI bus interface */ + + /* ------------------------------------------------- + * VARIABLES + * ------------------------------------------------ */ + DMA_ADAPTER * p_dma_adapter; /* HCA adapter object */ + ULONG n_map_regs; /* num of allocated adapter map registers */ + PCI_COMMON_CONFIG hcaConfig; /* saved HCA PCI configuration header */ + int hca_hidden; /* flag: when set - no attached DDR memory */ + +} hca_dev_ext_t; + +#define EXT_FROM_HOB(hob_p) (container_of(hob_p, hca_dev_ext_t, hca.hob)) +#define HCA_FROM_HOB(hob_p) (container_of(hob_p, mlnx_hca_t, hob)) +#define MDEV_FROM_HOB(hob_p) (HCA_FROM_HOB(hob_p)->mdev) +#define IBDEV_FROM_HOB(hob_p) (&EXT_FROM_HOB(hob_p)->hca.mdev->ib_dev) +#define HOBUL_FROM_HOB(hob_p) (&EXT_FROM_HOB(hob_p)->hca.hobul) +#define HOB_FROM_IBDEV(dev_p) (mlnx_hob_t *)&dev_p->mdev->ext->hca.hob + + +#define IB_GET_ERR_STR ib_dev->mdev->ext->ci_ifc.get_err_str +#if DBG || defined( EVENT_TRACING ) +#define PREP_IBDEV_FOR_PRINT(val) struct ib_device *ib_dev = val +#else +#define PREP_IBDEV_FOR_PRINT(val) +#endif + +/*********************************** +Firmware Update definitions +***********************************/ +#define PCI_CONF_ADDR (0x00000058) +#define PCI_CONF_DATA (0x0000005c) +#define FLASH_OFFSET (0x000f01a4) +#define READ_BIT (1<<29) +#define WRITE_BIT (2<<29) +#define ADDR_MSK (0x0007ffff) +#define CMD_MASK (0xe0000000) +#define BANK_SHIFT (19) +#define BANK_MASK (0xfff80000) +#define MAX_FLASH_SIZE (0x80000) // 512K + +#define SEMAP63 (0xf03fc) +#define GPIO_DIR_L (0xf008c) +#define GPIO_POL_L (0xf0094) +#define GPIO_MOD_L (0xf009c) +#define GPIO_DAT_L (0xf0084) +#define GPIO_DATACLEAR_L (0xf00d4) +#define GPIO_DATASET_L (0xf00dc) + +#define CPUMODE (0xf0150) +#define CPUMODE_MSK (0xc0000000UL) +#define CPUMODE_SHIFT (30) + +/* Definitions intended to become shared with UM. Later... */ +#define FW_READ 0x00 +#define FW_WRITE 0x01 +#define FW_READ_CMD 0x08 +#define FW_WRITE_CMD 0x09 +#define FW_OPEN_IF 0xe7 +#define FW_CLOSE_IF 0x7e + +#define FW_SIGNATURE (0x5a445a44) +#define FW_SECT_SIZE (0x10000) + +static inline errno_to_iberr(int err) +{ +#define MAP_ERR(err,ibstatus) case err: ib_status = ibstatus; break + ib_api_status_t ib_status = IB_UNKNOWN_ERROR; + if (err < 0) + err = -err; + switch (err) { + MAP_ERR( ENOENT, IB_NOT_FOUND ); + MAP_ERR( EINTR, IB_INTERRUPTED ); + MAP_ERR( EAGAIN, IB_RESOURCE_BUSY ); + MAP_ERR( ENOMEM, IB_INSUFFICIENT_MEMORY ); + MAP_ERR( EACCES, IB_INVALID_PERMISSION ); + MAP_ERR( EFAULT, IB_ERROR ); + MAP_ERR( EBUSY, IB_RESOURCE_BUSY ); + MAP_ERR( ENODEV, IB_UNSUPPORTED ); + MAP_ERR( EINVAL, IB_INVALID_PARAMETER ); + MAP_ERR( ENOSYS, IB_UNSUPPORTED ); + MAP_ERR( ERANGE, IB_INVALID_SETTING ); + default: + //HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + // "Unmapped errno (%d)\n", err); + break; + } + return ib_status; +} + +#endif /* !defined( _HCA_DRIVER_H_ ) */ diff --git a/branches/Ndi/hw/mthca/kernel/hca_mcast.c b/branches/Ndi/hw/mthca/kernel/hca_mcast.c new file mode 100644 index 00000000..1df61bfa --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_mcast.c @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include + +#include "hca_driver.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hca_mcast.tmh" +#endif +#include "mthca_dev.h" + +/* +* Multicast Support Verbs. +*/ +ib_api_status_t +mlnx_attach_mcast ( + IN const ib_qp_handle_t h_qp, + IN const ib_gid_t *p_mcast_gid, + IN const uint16_t mcast_lid, + OUT ib_mcast_handle_t *ph_mcast, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + int err; + ib_api_status_t status; + struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp; + PREP_IBDEV_FOR_PRINT(ib_qp_p->device); + mlnx_mcast_t *mcast_p; + + HCA_ENTER(HCA_DBG_MCAST); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MCAST, + ("User mode is not supported yet\n")); + status = IB_UNSUPPORTED; + goto err_user_unsupported; + } + + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + if (!p_mcast_gid || !ph_mcast) { + status = IB_INVALID_PARAMETER; + goto err_invalid_param; + } + + // allocate structure + mcast_p = (mlnx_mcast_t*)kmalloc(sizeof *mcast_p, GFP_ATOMIC ); + if (mcast_p == NULL) { + status = IB_INSUFFICIENT_MEMORY; + goto err_no_mem; + } + + // attach to mcast group + if( p_umv_buf && p_umv_buf->command ) { + //TODO: call uverbs + } + else { + err = ibv_attach_mcast(ib_qp_p, (union ib_gid *)p_mcast_gid, (u16)mcast_lid); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MCAST, + ("ibv_attach_mcast failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_attach; + } + } + + // fill the structure + mcast_p->ib_qp_p = ib_qp_p; + mcast_p->mcast_lid = mcast_lid; + RtlCopyMemory(mcast_p->mcast_gid.raw, p_mcast_gid->raw, sizeof *p_mcast_gid); + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_MCAST, + ("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", + mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid, + cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[0]), + cl_ntoh64(*(uint64_t*)&mcast_p->mcast_gid.raw[8] ))); + + // return the result + if (ph_mcast) *ph_mcast = (ib_mcast_handle_t)mcast_p; + + status = IB_SUCCESS; + goto end; + +err_attach: + kfree(mcast_p); +err_no_mem: +err_invalid_param: +err_unsupported: +err_user_unsupported: +end: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MCAST, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_MCAST); + return status; +} + +ib_api_status_t +mlnx_detach_mcast ( + IN const ib_mcast_handle_t h_mcast) +{ + ib_api_status_t status = IB_INVALID_PARAMETER; + int err; + mlnx_mcast_t *mcast_p = (mlnx_mcast_t*)h_mcast; + struct ib_device *ib_dev; + + + HCA_ENTER(HCA_DBG_MCAST); + // sanity check + if (!mcast_p || !mcast_p->ib_qp_p) + { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MCAST, + ("completes with ERROR status IB_INVALID_PARAMETER\n")); + status = IB_INVALID_PARAMETER; + goto err_invalid_param; + } + ib_dev = mcast_p->ib_qp_p->device; + + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_MCAST, + ("mcasth %p, qp_p %p, mlid %hx, mgid %I64x`%I64x\n", + mcast_p, mcast_p->ib_qp_p, mcast_p->mcast_lid, + *(uint64_t*)&mcast_p->mcast_gid.raw[0], + *(uint64_t*)&mcast_p->mcast_gid.raw[8] )); + + // detach + err = ibv_detach_mcast( mcast_p->ib_qp_p, + (union ib_gid *)&mcast_p->mcast_gid, mcast_p->mcast_lid ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MCAST, + ("ibv_detach_mcast failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_detach_mcast; + } + + status = IB_SUCCESS; + +err_detach_mcast: + kfree(mcast_p); +err_unsupported: +err_invalid_param: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MCAST, + ("completes with ERROR status %d\n", status)); + } + HCA_EXIT(HCA_DBG_MCAST); + return status; +} + + +void +mlnx_mcast_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->attach_mcast = mlnx_attach_mcast; + p_interface->detach_mcast = mlnx_detach_mcast; +} diff --git a/branches/Ndi/hw/mthca/kernel/hca_memory.c b/branches/Ndi/hw/mthca/kernel/hca_memory.c new file mode 100644 index 00000000..6e9e90a0 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_memory.c @@ -0,0 +1,609 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "hca_utils.h" +#include "mthca_dev.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hca_memory.tmh" +#endif + +/* + * Memory Management Verbs. + */ + +ib_api_status_t +mlnx_register_mr ( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t *p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t *ph_mr, + IN boolean_t um_call ) +{ + ib_api_status_t status; + int err; + struct ib_mr *mr_p; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + PREP_IBDEV_FOR_PRINT(ib_pd_p->device); + + HCA_ENTER(HCA_DBG_MEMORY); + + // sanity checks + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + if (!p_mr_create || 0 == p_mr_create->length) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY, + ("invalid attributes\n")); + status = IB_INVALID_PARAMETER; + goto err_invalid_parm; + } + /* + * Local write permission is required if remote write or + * remote atomic permission is also requested. + */ + if (p_mr_create->access_ctrl & (IB_AC_RDMA_WRITE | IB_AC_ATOMIC) && + !(p_mr_create->access_ctrl & IB_AC_LOCAL_WRITE)) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY, + ("invalid access rights\n")); + status = IB_INVALID_PERMISSION; + goto err_invalid_access; + } + + // register mr + mr_p = ibv_reg_mr(ib_pd_p, map_qp_ibal_acl(p_mr_create->access_ctrl), + p_mr_create->vaddr, p_mr_create->length, + (uint64_t)p_mr_create->vaddr, um_call ); + if (IS_ERR(mr_p)) { + err = PTR_ERR(mr_p); + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY, + ("ibv_reg_mr failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_reg_mr; + } + + // results + *p_lkey = mr_p->lkey; + *p_rkey = cl_hton32( mr_p->rkey ); + if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p; + status = IB_SUCCESS; + +err_reg_mr: +err_invalid_access: +err_invalid_parm: +err_unsupported: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_MEMORY); + return status; +} + +ib_api_status_t +mlnx_register_pmr ( + IN const ib_pd_handle_t h_pd, + IN const ib_phys_create_t* const p_pmr_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ) +{ + ib_api_status_t status; + int err; + struct ib_mr *mr_p; + struct ib_phys_buf *buffer_list; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + PREP_IBDEV_FOR_PRINT(ib_pd_p->device); + + UNUSED_PARAM( um_call ); + + HCA_ENTER(HCA_DBG_MEMORY); + + if (mthca_is_livefish(to_mdev(ib_pd_p->device))) { + mr_p = kzalloc(sizeof *mr_p, GFP_KERNEL); + if (!mr_p) { + status = IB_INSUFFICIENT_MEMORY; + goto err_mem; + } + mr_p->device = ib_pd_p->device; + mr_p->pd = ib_pd_p; + goto done; + } + + // sanity checks + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + if (!p_vaddr || !p_pmr_create || + 0 == p_pmr_create->length ) { + status = IB_INVALID_PARAMETER; + goto err_invalid_parm; + } + + // prepare parameters + buffer_list = (void*)p_pmr_create->range_array; + //NB: p_pmr_create->buf_offset is not used, i.e. supposed that region is page-aligned + //NB: p_pmr_create->hca_page_size is not used, i.e. supposed it is always the same + + // register pmr + if (p_pmr_create->length == (uint64_t)-1i64) + { + mr_p = ibv_get_dma_mr( ib_pd_p, + map_qp_ibal_acl(p_pmr_create->access_ctrl) ); + } + else + mr_p = ibv_reg_phys_mr(ib_pd_p, buffer_list, p_pmr_create->num_ranges, + map_qp_ibal_acl(p_pmr_create->access_ctrl), p_vaddr ); + if (IS_ERR(mr_p)) { + err = PTR_ERR(mr_p); + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY, + ("mthca_reg_phys_mr failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_reg_phys_mr; + } + + // results +done: + if (ph_mr) *ph_mr = (ib_mr_handle_t)mr_p; + *p_lkey = mr_p->lkey; + *p_rkey = cl_hton32( mr_p->rkey ); + //NB: p_vaddr was not changed + status = IB_SUCCESS; + +err_reg_phys_mr: +err_invalid_parm: +err_unsupported: +err_mem: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_MEMORY); + return status; + +} + +ib_api_status_t +mlnx_query_mr ( + IN const ib_mr_handle_t h_mr, + OUT ib_mr_attr_t *p_mr_query ) +{ + UNREFERENCED_PARAMETER(h_mr); + UNREFERENCED_PARAMETER(p_mr_query); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_query_mr not implemented\n")); + return IB_UNSUPPORTED; +} + + +ib_api_status_t +mlnx_modify_mr ( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mem_modify_req, + IN const ib_mr_create_t *p_mr_create, + OUT uint32_t *p_lkey, + OUT uint32_t *p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ) +{ + UNREFERENCED_PARAMETER(h_mr); + UNREFERENCED_PARAMETER(mem_modify_req); + UNREFERENCED_PARAMETER(p_mr_create); + UNREFERENCED_PARAMETER(p_lkey); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(um_call); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_modify_mr not implemented\n")); + return IB_UNSUPPORTED; +} + + +ib_api_status_t +mlnx_modify_pmr ( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mem_modify_req, + IN const ib_phys_create_t* const p_pmr_create, + IN OUT uint64_t* const p_vaddr, + OUT uint32_t* const p_lkey, + OUT uint32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ) +{ + UNREFERENCED_PARAMETER(h_mr); + UNREFERENCED_PARAMETER(mem_modify_req); + UNREFERENCED_PARAMETER(p_pmr_create); + UNREFERENCED_PARAMETER(p_vaddr); + UNREFERENCED_PARAMETER(p_lkey); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(um_call); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_modify_pmr not implemented\n")); + return IB_UNSUPPORTED; +} + +ib_api_status_t +mlnx_register_smr ( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ) +{ + UNREFERENCED_PARAMETER(h_mr); + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(access_ctrl); + UNREFERENCED_PARAMETER(p_vaddr); + UNREFERENCED_PARAMETER(p_lkey); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(ph_mr); + UNREFERENCED_PARAMETER(um_call); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_register_smr not implemented\n")); + return IB_UNSUPPORTED; +} + +ib_api_status_t +mlnx_deregister_mr ( + IN const ib_mr_handle_t h_mr) +{ + ib_api_status_t status; + int err; + struct ib_mr *ib_mr = (struct ib_mr *)h_mr; + PREP_IBDEV_FOR_PRINT(ib_mr->device); + + HCA_ENTER(HCA_DBG_SHIM); + + if (mthca_is_livefish(to_mdev(ib_mr->device))) { + kfree(ib_mr); + goto done; + } + + // sanity checks + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + // deregister + err = ibv_dereg_mr((struct ib_mr *)h_mr); + if (err) { + status = errno_to_iberr(err); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY, + ("mthca_dereg_mr failed (%d)", status)); + goto err_dereg_mr; + } + +done: + status = IB_SUCCESS; + +err_dereg_mr: +err_unsupported: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_MEMORY); + return status; + +} + +ib_api_status_t +mlnx_alloc_fmr( + IN const ib_pd_handle_t h_pd, + IN const mlnx_fmr_create_t* const p_fmr_create, + OUT mlnx_fmr_handle_t* const ph_fmr + ) +{ + ib_api_status_t status; + int err; + struct ib_fmr * fmr_p; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + struct ib_fmr_attr fmr_attr; + PREP_IBDEV_FOR_PRINT(ib_pd_p->device); + + HCA_ENTER(HCA_DBG_MEMORY); + + // sanity checks + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + if (!p_fmr_create ) { + status = IB_INVALID_PARAMETER; + goto err_invalid_parm; + } + // TODO: check Max remap in AL + + // prepare parameters + RtlZeroMemory(&fmr_attr, sizeof(struct ib_fmr_attr)); + fmr_attr.max_maps = p_fmr_create->max_maps; + fmr_attr.max_pages = p_fmr_create->max_pages; + fmr_attr.page_shift = p_fmr_create->page_size; + + // register mr + fmr_p = ibv_alloc_fmr(ib_pd_p, + map_qp_ibal_acl(p_fmr_create->access_ctrl), &fmr_attr); + if (IS_ERR(fmr_p)) { + err = PTR_ERR(fmr_p); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY , + ("mthca_alloc_fmr failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_fmr; + } + + // results + if (ph_fmr) *ph_fmr = (mlnx_fmr_handle_t)fmr_p; + status = IB_SUCCESS; + +err_alloc_fmr: +err_invalid_parm: +err_unsupported: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_MEMORY); + return status; + +} + +ib_api_status_t +mlnx_map_phys_fmr ( + IN const mlnx_fmr_handle_t h_fmr, + IN const uint64_t* const page_list, + IN const int list_len, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey + ) +{ + int err; + ib_api_status_t status; + struct ib_fmr *ib_fmr = (struct ib_fmr *)h_fmr; + uint64_t vaddr = (*p_vaddr) & ~(PAGE_SIZE - 1); + PREP_IBDEV_FOR_PRINT(ib_fmr->device); + + HCA_ENTER(HCA_DBG_MEMORY); + + // mapping + err = ibv_map_phys_fmr(ib_fmr, (u64*)page_list, list_len, (uint64_t)(ULONG_PTR)vaddr); + if (err) { + status = errno_to_iberr(err); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY , + ("ibv_map_phys_fmr failed (%d) for mr %p\n", err, h_fmr)); + goto err_dealloc_fmr; + } + + // return the results + *p_vaddr = vaddr; + *p_lkey = ib_fmr->lkey; + *p_rkey = cl_hton32( ib_fmr->rkey ); + + status = IB_SUCCESS; + +err_dealloc_fmr: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_MEMORY); + return status; +} + + + +ib_api_status_t +mlnx_unmap_fmr ( + IN const mlnx_fmr_handle_t *ph_fmr) +{ + ib_api_status_t status; + int err; + struct ib_fmr *ib_fmr = (struct ib_fmr *)*ph_fmr; + struct list_head fmr_list; + PREP_IBDEV_FOR_PRINT(ib_fmr->device); + + HCA_ENTER(HCA_DBG_MEMORY); + + // sanity checks + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + INIT_LIST_HEAD(&fmr_list); + while(*ph_fmr) + { + ib_fmr = (struct ib_fmr*)*ph_fmr; + list_add_tail(&ib_fmr->list, &fmr_list); + ph_fmr ++; + } + + err = ibv_unmap_fmr(&fmr_list); + if (err) { + status = errno_to_iberr(err); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY , + ("ibv_unmap_fmr failed (%d) \n", err)); + goto err_unmap_fmr; + } + + status = IB_SUCCESS; + +err_unmap_fmr: +err_unsupported: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_MEMORY); + return status; + + +} + + + +ib_api_status_t +mlnx_dealloc_fmr ( + IN const mlnx_fmr_handle_t h_fmr + ) +{ + ib_api_status_t status; + int err; + struct ib_fmr *fmr = (struct ib_fmr *)h_fmr; + PREP_IBDEV_FOR_PRINT(fmr->device); + UNUSED_PARAM_WOWPP(fmr); + + HCA_ENTER(HCA_DBG_MEMORY); + + // sanity checks + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + + // deregister + err = ibv_dealloc_fmr((struct ib_fmr *)h_fmr); + if (err) { + status = errno_to_iberr(err); + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY , + ("ibv_dealloc_fmr failed (%d) for mr %p\n",err, h_fmr)); + goto err_dealloc_fmr; + } + + status = IB_SUCCESS; + +err_dealloc_fmr: +err_unsupported: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_MEMORY); + return status; + +} + + + +/* +* Memory Window Verbs. +*/ + +ib_api_status_t +mlnx_create_mw ( + IN const ib_pd_handle_t h_pd, + OUT net32_t* const p_rkey, + OUT ib_mw_handle_t *ph_mw, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + UNREFERENCED_PARAMETER(h_pd); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(ph_mw); + UNREFERENCED_PARAMETER(p_umv_buf); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_create_mw not implemented\n")); + return IB_UNSUPPORTED; +} + +ib_api_status_t +mlnx_query_mw ( + IN const ib_mw_handle_t h_mw, + OUT ib_pd_handle_t *ph_pd, + OUT net32_t* const p_rkey, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + UNREFERENCED_PARAMETER(h_mw); + UNREFERENCED_PARAMETER(ph_pd); + UNREFERENCED_PARAMETER(p_rkey); + UNREFERENCED_PARAMETER(p_umv_buf); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_query_mw not implemented\n")); + return IB_UNSUPPORTED; +} + +ib_api_status_t +mlnx_destroy_mw ( + IN const ib_mw_handle_t h_mw) +{ + UNREFERENCED_PARAMETER(h_mw); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_MEMORY ,("mlnx_destroy_mw not implemented\n")); + return IB_UNSUPPORTED; +} + + +void +mlnx_memory_if( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->register_mr = mlnx_register_mr; + p_interface->register_pmr = mlnx_register_pmr; + p_interface->query_mr = mlnx_query_mr; + p_interface->modify_mr = mlnx_modify_mr; + p_interface->modify_pmr = mlnx_modify_pmr; + p_interface->register_smr = mlnx_register_smr; + p_interface->deregister_mr = mlnx_deregister_mr; + + p_interface->alloc_mlnx_fmr = mlnx_alloc_fmr; + p_interface->map_phys_mlnx_fmr = mlnx_map_phys_fmr; + p_interface->unmap_mlnx_fmr = mlnx_unmap_fmr; + p_interface->dealloc_mlnx_fmr = mlnx_dealloc_fmr; + + p_interface->create_mw = mlnx_create_mw; + p_interface->query_mw = mlnx_query_mw; + p_interface->destroy_mw = mlnx_destroy_mw; +} + +void +mlnx_memory_if_livefish( + IN OUT ci_interface_t *p_interface ) +{ + p_interface->register_pmr = mlnx_register_pmr; + p_interface->deregister_mr = mlnx_deregister_mr; +} + + diff --git a/branches/Ndi/hw/mthca/kernel/hca_pci.c b/branches/Ndi/hw/mthca/kernel/hca_pci.c new file mode 100644 index 00000000..7f6afeb2 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_pci.c @@ -0,0 +1,769 @@ + +#include "hca_driver.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hca_pci.tmh" +#endif +#include +#include +#include + +#define HCA_RESET_HCR_OFFSET 0x000F0010 +#define HCA_RESET_TOKEN CL_HTON32(0x00000001) + +#define PCI_CAPABILITY_ID_VPD 0x03 +#define PCI_CAPABILITY_ID_PCIX 0x07 +#define PCI_CAPABILITY_ID_PCIEXP 0x10 + +boolean_t +FindBridgeIf( + IN hca_dev_ext_t *pi_ext, + IN PBUS_INTERFACE_STANDARD pi_pInterface + ); + + +/* + * Vital Product Data Capability + */ +typedef struct _PCI_VPD_CAPABILITY { + + PCI_CAPABILITIES_HEADER Header; + + USHORT Flags; + ULONG Data; + +} PCI_VPD_CAPABILITY, *PPCI_VPD_CAPABILITY; + + +/* + * PCI-X Capability + */ +typedef struct _PCI_PCIX_CAPABILITY { + + PCI_CAPABILITIES_HEADER Header; + + USHORT Command; + ULONG Status; + +/* for Command: */ +} PCI_PCIX_CAPABILITY, *PPCI_PCIX_CAPABILITY; + +#define PCI_X_CMD_MAX_READ 0x000c /* Max Memory Read Byte Count */ + +/* + * PCI-Express Capability + */ +typedef struct _PCI_PCIEXP_CAPABILITY { + + PCI_CAPABILITIES_HEADER Header; + + USHORT Flags; + ULONG DevCapabilities; + USHORT DevControl; + USHORT DevStatus; + ULONG LinkCapabilities; + USHORT LinkControl; + USHORT LinkStatus; + ULONG SlotCapabilities; + USHORT SlotControl; + USHORT SlotStatus; + USHORT RootControl; + USHORT RootCapabilities; + USHORT RootStatus; +} PCI_PCIEXP_CAPABILITY, *PPCI_PCIEXP_CAPABILITY; + +/* for DevControl: */ +#define PCI_EXP_DEVCTL_READRQ 0x7000 /* Max_Read_Request_Size */ + +static NTSTATUS +__get_bus_ifc( + IN DEVICE_OBJECT* const pDevObj, + IN const GUID* const pGuid, + OUT BUS_INTERFACE_STANDARD *pBusIfc ); + +static void +__fixup_pci_capabilities( + IN PCI_COMMON_CONFIG* const pConfig ); + +static NTSTATUS +__save_pci_config( + IN BUS_INTERFACE_STANDARD *pBusIfc, + OUT PCI_COMMON_CONFIG* const pConfig ); + +static NTSTATUS +__restore_pci_config( + IN BUS_INTERFACE_STANDARD *pBusIfc, + IN PCI_COMMON_CONFIG* const pConfig, + IN const int is_bridge ); + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, __get_bus_ifc) +#pragma alloc_text (PAGE, __fixup_pci_capabilities) +#pragma alloc_text (PAGE, __save_pci_config) +#pragma alloc_text (PAGE, __restore_pci_config) +#endif + +/* + * Returns the offset in configuration space of the PCI-X capabilites. + */ +static ULONG +__FindCapability( + IN PCI_COMMON_CONFIG* const pConfig, + IN char cap_id + ) +{ + ULONG offset = 0; + PCI_CAPABILITIES_HEADER *pHdr = NULL; + UCHAR *pBuf = (UCHAR*)pConfig; + + HCA_ENTER( HCA_DBG_PNP ); + + if ( pConfig->HeaderType == PCI_DEVICE_TYPE ) { + if( pConfig->u.type0.CapabilitiesPtr ) + { + pHdr = (PCI_CAPABILITIES_HEADER*) + (pBuf + pConfig->u.type0.CapabilitiesPtr); + } + } + + if ( pConfig->HeaderType == PCI_BRIDGE_TYPE ) { + if( pConfig->u.type1.CapabilitiesPtr ) + { + pHdr = (PCI_CAPABILITIES_HEADER*) + (pBuf + pConfig->u.type1.CapabilitiesPtr); + } + } + + /* + * Fix up any fields that might cause changes to the + * device - like writing VPD data. + */ + while( pHdr ) + { + if( pHdr->CapabilityID == cap_id ) + { + offset = (UCHAR)(((ULONG_PTR)pHdr) - ((ULONG_PTR)pConfig)); + break; + } + + if( pHdr->Next ) + pHdr = (PCI_CAPABILITIES_HEADER*)(pBuf + pHdr->Next); + else + pHdr = NULL; + } + + HCA_EXIT( HCA_DBG_PNP ); + return offset; +} + +/* Forwards the request to the HCA's PDO. */ +static NTSTATUS +__get_bus_ifc( + IN DEVICE_OBJECT* const pDevObj, + IN const GUID* const pGuid, + OUT BUS_INTERFACE_STANDARD *pBusIfc ) +{ + NTSTATUS status; + IRP *pIrp; + IO_STATUS_BLOCK ioStatus; + IO_STACK_LOCATION *pIoStack; + DEVICE_OBJECT *pDev; + KEVENT event; + + HCA_ENTER( HCA_DBG_PNP ); + + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + + pDev = IoGetAttachedDeviceReference( pDevObj ); + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Build the IRP for the HCA. */ + pIrp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, pDev, + NULL, 0, NULL, &event, &ioStatus ); + if( !pIrp ) + { + ObDereferenceObject( pDev ); + HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, + ("IoBuildSynchronousFsdRequest failed.\n")); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Copy the request query parameters. */ + pIoStack = IoGetNextIrpStackLocation( pIrp ); + pIoStack->MinorFunction = IRP_MN_QUERY_INTERFACE; + pIoStack->Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD); + pIoStack->Parameters.QueryInterface.Version = 1; + pIoStack->Parameters.QueryInterface.InterfaceType = pGuid; + pIoStack->Parameters.QueryInterface.Interface = (INTERFACE*)pBusIfc; + pIoStack->Parameters.QueryInterface.InterfaceSpecificData = NULL; + + pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( pDev, pIrp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = ioStatus.Status; + } + ObDereferenceObject( pDev ); + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +/* + * Reads and saves the PCI configuration of the device accessible + * through the provided bus interface. Does not read registers 22 or 23 + * as directed in Tavor PRM 1.0.1, Appendix A. InfiniHost Software Reset. + */ +static NTSTATUS +__save_pci_config( + IN BUS_INTERFACE_STANDARD *pBusIfc, + OUT PCI_COMMON_CONFIG* const pConfig ) +{ + ULONG len; + UINT32 *pBuf; + + HCA_ENTER( HCA_DBG_PNP ); + + pBuf = (UINT32*)pConfig; + + /* + * Read the lower portion of the configuration, up to but excluding + * register 22. + */ + len = pBusIfc->GetBusData( + pBusIfc->Context, PCI_WHICHSPACE_CONFIG, &pBuf[0], 0, 88 ); + if( len != 88 ) + { + HCA_PRINT( TRACE_LEVEL_ERROR , HCA_DBG_PNP ,("Failed to read HCA config.\n")); + return STATUS_DEVICE_NOT_READY; + } + + /* Read the upper portion of the configuration, from register 24. */ + len = pBusIfc->GetBusData( + pBusIfc->Context, PCI_WHICHSPACE_CONFIG, &pBuf[24], 96, 160 ); + if( len != 160 ) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to read HCA config.\n")); + return STATUS_DEVICE_NOT_READY; + } + + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static void +__fixup_pci_capabilities( + IN PCI_COMMON_CONFIG* const pConfig ) +{ + UCHAR *pBuf; + PCI_CAPABILITIES_HEADER *pHdr, *pNextHdr; + + HCA_ENTER( HCA_DBG_PNP ); + + pBuf = (UCHAR*)pConfig; + + if( pConfig->HeaderType == PCI_DEVICE_TYPE ) + { + if( pConfig->u.type0.CapabilitiesPtr ) + { + pNextHdr = (PCI_CAPABILITIES_HEADER*) + (pBuf + pConfig->u.type0.CapabilitiesPtr); + } + else + { + pNextHdr = NULL; + } + } + else + { + ASSERT( pConfig->HeaderType == PCI_BRIDGE_TYPE ); + if( pConfig->u.type1.CapabilitiesPtr ) + { + pNextHdr = (PCI_CAPABILITIES_HEADER*) + (pBuf + pConfig->u.type1.CapabilitiesPtr); + } + else + { + pNextHdr = NULL; + } + } + + /* + * Fix up any fields that might cause changes to the + * device - like writing VPD data. + */ + while( pNextHdr ) + { + pHdr = pNextHdr; + if( pNextHdr->Next ) + pNextHdr = (PCI_CAPABILITIES_HEADER*)(pBuf + pHdr->Next); + else + pNextHdr = NULL; + + switch( pHdr->CapabilityID ) + { + case PCI_CAPABILITY_ID_VPD: + /* Clear the flags field so we don't cause a write. */ + ((PCI_VPD_CAPABILITY*)pHdr)->Flags = 0; + break; + + default: + break; + } + } + + HCA_EXIT( HCA_DBG_PNP ); +} + + +/* + * Restore saved PCI configuration, skipping registers 22 and 23, as well + * as any registers where writing will have side effects such as the flags + * field of the VPD and vendor specific capabilities. The function also delays + * writing the command register, bridge control register (if applicable), and + * PCIX command register (if present). + */ +static NTSTATUS +__restore_pci_config( + IN BUS_INTERFACE_STANDARD *pBusIfc, + IN PCI_COMMON_CONFIG* const pConfig, + IN const int is_bridge ) +{ + NTSTATUS status = STATUS_SUCCESS; + int i, *pci_hdr = (int*)pConfig; + int hca_pcix_cap = 0; + + HCA_ENTER( HCA_DBG_PNP ); + + /* get capabilities */ + hca_pcix_cap = __FindCapability( pConfig, PCI_CAPABILITY_ID_PCIX ); + + /* restore capabilities*/ + if (is_bridge) { + if ( 4 != pBusIfc->SetBusData( pBusIfc->Context, PCI_WHICHSPACE_CONFIG, + &pci_hdr[(hca_pcix_cap + 0x8) / 4], hca_pcix_cap + 0x8, 4) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Couldn't restore HCA bridge Upstream split transaction control, aborting.\n")); + status = STATUS_UNSUCCESSFUL; + goto out; + } + if ( 4 != pBusIfc->SetBusData( pBusIfc->Context, PCI_WHICHSPACE_CONFIG, + &pci_hdr[(hca_pcix_cap + 0xc) / 4], hca_pcix_cap + 0xc, 4) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Couldn't restore HCA bridge Downstream split transaction control, aborting.\n")); + status = STATUS_UNSUCCESSFUL; + goto out; + } + } + else { + int hca_pcie_cap = __FindCapability( pConfig, PCI_CAPABILITY_ID_PCIEXP ); + PCI_PCIEXP_CAPABILITY *pPciExpCap = (PCI_PCIEXP_CAPABILITY*)(((UCHAR*)pConfig) + hca_pcie_cap); + + if (hca_pcix_cap) { + if ( 4 != pBusIfc->SetBusData( pBusIfc->Context, PCI_WHICHSPACE_CONFIG, + &pci_hdr[hca_pcix_cap/4], hca_pcix_cap, 4) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Couldn't restore HCA PCI-X command register, aborting.\n")); + status = STATUS_UNSUCCESSFUL; + goto out; + } + } + + if (hca_pcie_cap) { + /* restore HCA PCI Express Device Control register */ + if ( sizeof( pPciExpCap->DevControl ) != pBusIfc->SetBusData( + pBusIfc->Context, PCI_WHICHSPACE_CONFIG, + &pPciExpCap->DevControl, hca_pcie_cap + + offsetof( PCI_PCIEXP_CAPABILITY, DevControl), + sizeof( pPciExpCap->DevControl ) )) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Couldn't restore HCA PCI Express Device Control register, aborting.\n")); + status = STATUS_UNSUCCESSFUL; + goto out; + } + /* restore HCA PCI Express Link Control register */ + if ( sizeof( pPciExpCap->LinkControl ) != pBusIfc->SetBusData( + pBusIfc->Context, PCI_WHICHSPACE_CONFIG, + &pPciExpCap->LinkControl, hca_pcie_cap + + offsetof( PCI_PCIEXP_CAPABILITY, LinkControl), + sizeof( pPciExpCap->LinkControl ) )) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Couldn't restore HCA PCI Express Link Control register, aborting.\n")); + status = STATUS_UNSUCCESSFUL; + goto out; + } + } + } + + /* write basic part */ + for (i = 0; i < 16; ++i) { + if (i == 1) + continue; + + if (4 != pBusIfc->SetBusData( pBusIfc->Context, + PCI_WHICHSPACE_CONFIG, &pci_hdr[i], i * 4, 4 )) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP , + ("Couldn't restore PCI cfg reg %x, aborting.\n", i)); + status = STATUS_DEVICE_NOT_READY; + goto out; + } + } + + /* Write the command register. */ + if (4 != pBusIfc->SetBusData( pBusIfc->Context, + PCI_WHICHSPACE_CONFIG, &pci_hdr[1], 4, 4 )) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Couldn't restore COMMAND.\n")); + status = STATUS_DEVICE_NOT_READY; + } + +out: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + +NTSTATUS +hca_reset( DEVICE_OBJECT* const pDevObj, int is_tavor ) +{ + NTSTATUS status = STATUS_SUCCESS; + PCI_COMMON_CONFIG hcaConfig, brConfig; + BUS_INTERFACE_STANDARD hcaBusIfc; + BUS_INTERFACE_STANDARD brBusIfc = {0}; // to bypass C4701 + hca_dev_ext_t *pExt = (hca_dev_ext_t*)pDevObj->DeviceExtension; + + HCA_ENTER( HCA_DBG_PNP ); + + /* sanity check */ + if (is_tavor && g_skip_tavor_reset) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_PNP ,("Card reset is skipped, trying to proceed.\n")); + goto resetExit; + } + + /* get the resources */ + { + /* Get the HCA's bus interface. */ + status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to get HCA bus interface.\n")); + goto resetErr1; + } + + /* Get the HCA Bridge's bus interface, if any */ + if (is_tavor) { + if (!FindBridgeIf( pExt, &brBusIfc )) + goto resetErr2; + } + } + + /* Save the HCA's PCI configuration headers */ + { + status = __save_pci_config( &hcaBusIfc, &hcaConfig ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to save HCA config.\n")); + goto resetErr3; + } + + /* Save the HCA bridge's configuration, if any */ + if (is_tavor) { + int hca_pcix_cap; + status = __save_pci_config( &brBusIfc, &brConfig ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to save bridge config.\n")); + goto resetErr3; + } + hca_pcix_cap = __FindCapability( &brConfig, PCI_CAPABILITY_ID_PCIX ); + if (!hca_pcix_cap) { + status = STATUS_UNSUCCESSFUL; + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Couldn't locate HCA bridge PCI-X capability, aborting.\n")); + goto resetErr3; + } + } + } + + /* reset the card */ + { + PULONG reset_p; + PHYSICAL_ADDRESS pa; + /* map reset register */ + pa.QuadPart = pExt->bar[HCA_BAR_TYPE_HCR].phys + (uint64_t)HCA_RESET_HCR_OFFSET; + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Mapping reset register with address 0x%I64x\n", pa.QuadPart)); + reset_p = MmMapIoSpace( pa, 4, MmNonCached ); + if( !reset_p ) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to map reset register with address 0x%I64x\n", pa.QuadPart)); + status = STATUS_UNSUCCESSFUL; + goto resetErr3; + } + + /* Issue the reset. */ + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Resetting the chip ...\n")); + WRITE_REGISTER_ULONG( reset_p, HCA_RESET_TOKEN ); + + /* unmap the reset register */ + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Unmapping reset register \n")); + MmUnmapIoSpace( reset_p, 4 ); + + /* Wait a second. */ + cl_thread_suspend( 1000 ); + } + + /* Read the configuration register until it doesn't return 0xFFFFFFFF */ + { + ULONG data, i, reset_failed = 1; + BUS_INTERFACE_STANDARD *p_ifc = (is_tavor) ? &brBusIfc : &hcaBusIfc; + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Read the configuration register \n")); + for( i = 0; i < 100; i++ ) { + if (4 != p_ifc->GetBusData( p_ifc->Context, + PCI_WHICHSPACE_CONFIG, &data, 0, 4)) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to read device configuration data. Card reset failed !\n")); + status = STATUS_UNSUCCESSFUL; + break; + } + /* See if we got valid data. */ + if( data != 0xFFFFFFFF ) { + reset_failed = 0; + break; + } + + cl_thread_suspend( 100 ); + } + + if (reset_failed) { + /* on Tavor reset failure, if configured so, we disable the reset for next time */ + if (is_tavor && g_disable_tavor_reset) + set_skip_tavor_reset(); + + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Doh! PCI device did not come back after reset!\n")); + status = STATUS_UNSUCCESSFUL; + goto resetErr3; + } + } + + /* restore the HCA's PCI configuration headers */ + { + if (is_tavor) { + /* Restore the HCA's bridge configuration. */ + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Restoring bridge PCI configuration \n")); + status = __restore_pci_config( &brBusIfc, &brConfig, TRUE ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to restore bridge config. Card reset failed !\n")); + goto resetErr3; + } + } + + /* Restore the HCA's configuration. */ + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Restoring HCA PCI configuration \n")); + status = __restore_pci_config( &hcaBusIfc, &hcaConfig, FALSE ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to restore HCA config. Card reset failed !\n")); + } + } + +resetErr3: + if (is_tavor) + brBusIfc.InterfaceDereference( brBusIfc.Context ); + +resetErr2: + hcaBusIfc.InterfaceDereference( hcaBusIfc.Context ); + +resetErr1: +resetExit: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +/* + * Tunes PCI configuration as described in 13.3.2 in the Tavor PRM. + */ +NTSTATUS +hca_tune_pci( + IN DEVICE_OBJECT* const pDevObj, + OUT uplink_info_t *p_uplink_info ) +{ + NTSTATUS status; + PCI_COMMON_CONFIG hcaConfig; + BUS_INTERFACE_STANDARD hcaBusIfc; + ULONG len; + ULONG capOffset; + PCI_PCIX_CAPABILITY *pPciXCap; + PCI_PCIEXP_CAPABILITY *pPciExpCap; + + HCA_ENTER( HCA_DBG_PNP ); + + /* Get the HCA's bus interface. */ + status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, &hcaBusIfc ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to get HCA bus interface.\n")); + return status; + } + + /* Save the HCA's configuration. */ + status = __save_pci_config( &hcaBusIfc, &hcaConfig ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to save HCA config.\n")); + status = STATUS_UNSUCCESSFUL; + goto tweakErr; + } + status = 0; + + /* + * PCIX Capability + */ + capOffset = __FindCapability( &hcaConfig, PCI_CAPABILITY_ID_PCIX ); + if( capOffset ) + { + pPciXCap = (PCI_PCIX_CAPABILITY*)(((UCHAR*)&hcaConfig) + capOffset); + + /* fill uplink features */ + p_uplink_info->bus_type = UPLINK_BUS_PCIX; + if (pPciXCap->Status & (1 << 17)) + p_uplink_info->u.pci_x.capabilities = UPLINK_BUS_PCIX_133; + + /* Update the command field to max the read byte count if needed. */ + if ( g_tune_pci && (pPciXCap->Command & 0x000C) != 0x000C ) + { + HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_PNP, + ("Updating max recv byte count of PCI-X capability.\n")); + pPciXCap->Command = (pPciXCap->Command & ~PCI_X_CMD_MAX_READ) | (3 << 2); + len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG, + &pPciXCap->Command, + capOffset + offsetof( PCI_PCIX_CAPABILITY, Command), + sizeof( pPciXCap->Command ) ); + if( len != sizeof( pPciXCap->Command ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to update PCI-X maximum read byte count.\n")); + status = STATUS_UNSUCCESSFUL; + goto tweakErr; + } + } + } + + + /* + * PCI Express Capability + */ + capOffset = __FindCapability( &hcaConfig, PCI_CAPABILITY_ID_PCIEXP ); + if( capOffset ) + { + pPciExpCap = (PCI_PCIEXP_CAPABILITY*)(((UCHAR*)&hcaConfig) + capOffset); + + /* fill uplink features */ + p_uplink_info->bus_type = UPLINK_BUS_PCIE; + if ((pPciExpCap->LinkStatus & 15) == 1) + p_uplink_info->u.pci_e.link_speed = UPLINK_BUS_PCIE_SDR; + if ((pPciExpCap->LinkStatus & 15) == 2) + p_uplink_info->u.pci_e.link_speed = UPLINK_BUS_PCIE_DDR; + p_uplink_info->u.pci_e.link_width = (uint8_t)((pPciExpCap->LinkStatus >> 4) & 0x03f); + p_uplink_info->u.pci_e.capabilities = (uint8_t)((pPciExpCap->LinkCapabilities >> 2) & 0xfc); + p_uplink_info->u.pci_e.capabilities |= pPciExpCap->LinkCapabilities & 3; + + if (g_tune_pci) { + /* Update Max_Read_Request_Size. */ + HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP, + ("Updating max recv byte count of PCI-Express capability.\n")); + pPciExpCap->DevControl = (pPciExpCap->DevControl & ~PCI_EXP_DEVCTL_READRQ) | (5 << 12); + len = hcaBusIfc.SetBusData( hcaBusIfc.Context, PCI_WHICHSPACE_CONFIG, + &pPciExpCap->DevControl, + capOffset + offsetof( PCI_PCIEXP_CAPABILITY, DevControl), + sizeof( pPciExpCap->DevControl ) ); + if( len != sizeof( pPciExpCap->DevControl ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to update PCI-Exp maximum read byte count.\n")); + goto tweakErr; + } + } + } + + +tweakErr: + hcaBusIfc.InterfaceDereference( hcaBusIfc.Context ); + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +/* leo */ + +NTSTATUS +hca_enable_pci( + IN DEVICE_OBJECT* const pDevObj, + OUT PBUS_INTERFACE_STANDARD phcaBusIfc, + OUT PCI_COMMON_CONFIG* pHcaConfig + ) +{ + NTSTATUS status; + ULONG len; + + HCA_ENTER( HCA_DBG_PNP ); + + /* Get the HCA's bus interface. */ + status = __get_bus_ifc( pDevObj, &GUID_BUS_INTERFACE_STANDARD, phcaBusIfc ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR , HCA_DBG_PNP ,("Failed to get HCA bus interface.\n")); + return STATUS_DEVICE_NOT_READY; + } + + /* Save the HCA's configuration. */ + status = __save_pci_config( phcaBusIfc, pHcaConfig ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to save HCA config.\n")); + goto pciErr; + } + + /* fix command register (set PCI Master bit) */ + // NOTE: we change here the saved value of the command register + pHcaConfig->Command |= 7; + len = phcaBusIfc->SetBusData( phcaBusIfc->Context, PCI_WHICHSPACE_CONFIG, + (PVOID)&pHcaConfig->Command , 4, sizeof(ULONG) ); + if( len != sizeof(ULONG) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Failed to write command register.\n")); + status = STATUS_DEVICE_NOT_READY; + goto pciErr; + } + status = STATUS_SUCCESS; + goto out; + + pciErr: + phcaBusIfc->InterfaceDereference( phcaBusIfc->Context ); + phcaBusIfc->InterfaceDereference = NULL; + out: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + +void hca_disable_pci(PBUS_INTERFACE_STANDARD phcaBusIfc) +{ + // no need to disable the card, so just release the PCI bus i/f + if (phcaBusIfc->InterfaceDereference) { + phcaBusIfc->InterfaceDereference( phcaBusIfc->Context ); + phcaBusIfc->InterfaceDereference = NULL; + } +} + diff --git a/branches/Ndi/hw/mthca/kernel/hca_pci.h b/branches/Ndi/hw/mthca/kernel/hca_pci.h new file mode 100644 index 00000000..dd8e9c0e --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_pci.h @@ -0,0 +1,24 @@ +#ifndef HCI_PCI_H +#define HCI_PCI_H + + +NTSTATUS +hca_reset( + IN DEVICE_OBJECT* const pDevObj, int is_tavor ); + +NTSTATUS +hca_enable_pci( + IN DEVICE_OBJECT* const pDevObj, + OUT PBUS_INTERFACE_STANDARD phcaBusIfc, + OUT PCI_COMMON_CONFIG* pHcaConfig + ); + +void hca_disable_pci( + IN PBUS_INTERFACE_STANDARD phcaBusIfc); + +NTSTATUS + hca_tune_pci( + IN DEVICE_OBJECT* const pDevObj, + OUT uplink_info_t *p_uplink_info ); + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/hca_pnp.c b/branches/Ndi/hw/mthca/kernel/hca_pnp.c new file mode 100644 index 00000000..fa1c5b71 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_pnp.c @@ -0,0 +1,1765 @@ +/* BEGIN_ICS_COPYRIGHT **************************************** +** END_ICS_COPYRIGHT ****************************************/ + +/* + $Revision: 1.1 $ +*/ + + +/* + * Provides the driver entry points for the Tavor VPD. + */ + +#include "hca_driver.h" +#include "mthca_dev.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hca_pnp.tmh" +#endif +#include "mthca.h" +#include +#include + +extern const char *mthca_version; + +static NTSTATUS +hca_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_query_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_cancel_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static void +hca_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ); + +static NTSTATUS +hca_cancel_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_query_pnp_state( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_query_bus_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_query_removal_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_query_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static NTSTATUS +hca_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static ci_interface_t* +__alloc_hca_ifc( + IN hca_dev_ext_t* const p_ext ); + +static NTSTATUS +__get_ci_interface( + IN DEVICE_OBJECT* const p_dev_obj ); + +static void +__hca_deregister( + IN hca_dev_ext_t *p_ext ); + +static NTSTATUS +__hca_register( + IN DEVICE_OBJECT *p_dev_obj ); + +static NTSTATUS +__pnp_notify_target( + IN void *pNotifyStruct, + IN void *context ); + +static NTSTATUS +__pnp_notify_ifc( + IN void *pNotifyStruct, + IN void *context ); + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, hca_add_device) +#pragma alloc_text (PAGE, hca_start) +#pragma alloc_text (PAGE, hca_query_stop) +#pragma alloc_text (PAGE, hca_stop) +#pragma alloc_text (PAGE, hca_cancel_stop) +#pragma alloc_text (PAGE, hca_query_remove) +#pragma alloc_text (PAGE, hca_release_resources) +#pragma alloc_text (PAGE, hca_cancel_remove) +#pragma alloc_text (PAGE, hca_surprise_remove) +#pragma alloc_text (PAGE, hca_query_capabilities) +#pragma alloc_text (PAGE, hca_query_pnp_state) +#pragma alloc_text (PAGE, hca_query_bus_relations) +#pragma alloc_text (PAGE, hca_query_removal_relations) +#pragma alloc_text (PAGE, hca_set_power) +#pragma alloc_text (PAGE, __alloc_hca_ifc) +#pragma alloc_text (PAGE, __get_ci_interface) +#pragma alloc_text (PAGE, __hca_register) +#pragma alloc_text (PAGE, __pnp_notify_target) +#pragma alloc_text (PAGE, __pnp_notify_ifc) +#endif + + +static cl_vfptr_pnp_po_t vfptrHcaPnp; + + +void +hca_init_vfptr( void ) +{ + vfptrHcaPnp.identity = "HCA driver"; + vfptrHcaPnp.pfn_start = hca_start; + vfptrHcaPnp.pfn_query_stop = hca_query_stop; + vfptrHcaPnp.pfn_stop = hca_stop; + vfptrHcaPnp.pfn_cancel_stop = hca_cancel_stop; + vfptrHcaPnp.pfn_query_remove = hca_query_remove; + vfptrHcaPnp.pfn_release_resources = hca_release_resources; + vfptrHcaPnp.pfn_remove = cl_do_remove; + vfptrHcaPnp.pfn_cancel_remove = hca_cancel_remove; + vfptrHcaPnp.pfn_surprise_remove = hca_surprise_remove; + vfptrHcaPnp.pfn_query_capabilities = hca_query_capabilities; + vfptrHcaPnp.pfn_query_pnp_state = hca_query_pnp_state; + vfptrHcaPnp.pfn_filter_res_req = cl_irp_skip; + vfptrHcaPnp.pfn_dev_usage_notification = cl_do_sync_pnp; + vfptrHcaPnp.pfn_query_bus_relations = hca_query_bus_relations; + vfptrHcaPnp.pfn_query_ejection_relations = cl_irp_ignore; + vfptrHcaPnp.pfn_query_removal_relations = hca_query_removal_relations; + vfptrHcaPnp.pfn_query_target_relations = cl_irp_ignore; + vfptrHcaPnp.pfn_unknown = cl_irp_ignore; + vfptrHcaPnp.pfn_query_resources = cl_irp_ignore; + vfptrHcaPnp.pfn_query_res_req = cl_irp_ignore; + vfptrHcaPnp.pfn_query_bus_info = cl_irp_ignore; + vfptrHcaPnp.pfn_query_interface = cl_irp_ignore; + vfptrHcaPnp.pfn_read_config = cl_irp_ignore; + vfptrHcaPnp.pfn_write_config = cl_irp_ignore; + vfptrHcaPnp.pfn_eject = cl_irp_ignore; + vfptrHcaPnp.pfn_set_lock = cl_irp_ignore; + vfptrHcaPnp.pfn_query_power = hca_query_power; + vfptrHcaPnp.pfn_set_power = hca_set_power; + vfptrHcaPnp.pfn_power_sequence = cl_irp_ignore; + vfptrHcaPnp.pfn_wait_wake = cl_irp_ignore; +} + + +NTSTATUS +hca_add_device( + IN PDRIVER_OBJECT pDriverObj, + IN PDEVICE_OBJECT pPdo ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_dev_obj, *pNextDevObj; + hca_dev_ext_t *p_ext; + + HCA_ENTER(HCA_DBG_PNP); + + /* + * Create the device so that we have a device extension to store stuff in. + */ + status = IoCreateDevice( pDriverObj, sizeof(hca_dev_ext_t), + NULL, FILE_DEVICE_INFINIBAND, FILE_DEVICE_SECURE_OPEN, + FALSE, &p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("IoCreateDevice returned 0x%08X.\n", status)); + return status; + } + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + cl_memclr( p_ext, sizeof(hca_dev_ext_t) ); + cl_spinlock_init( &p_ext->uctx_lock ); + cl_qlist_init( &p_ext->uctx_list ); + atomic_set(&p_ext->usecnt, 0); + + /* Attach to the device stack. */ + pNextDevObj = IoAttachDeviceToDeviceStack( p_dev_obj, pPdo ); + if( !pNextDevObj ) + { + //cl_event_destroy( &p_ext->mutex ); + IoDeleteDevice( p_dev_obj ); + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("IoAttachDeviceToDeviceStack failed.\n")); + return STATUS_NO_SUCH_DEVICE; + } + + /* Inititalize the complib extension. */ + cl_init_pnp_po_ext( p_dev_obj, pNextDevObj, pPdo, 0, + &vfptrHcaPnp, NULL ); + + p_ext->state = HCA_ADDED; + + HCA_EXIT(HCA_DBG_PNP); + return status; +} + + +static NTSTATUS +__get_ci_interface( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + NTSTATUS status; + IRP *p_irp; + hca_dev_ext_t *p_ext; + IO_STATUS_BLOCK ioStatus; + IO_STACK_LOCATION *pIoStack; + KEVENT event; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Query for the verbs interface. */ + p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_ext->p_al_dev, + NULL, 0, NULL, &event, &ioStatus ); + if( !p_irp ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("IoBuildSynchronousFsdRequest failed.\n")); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Format the IRP. */ + pIoStack = IoGetNextIrpStackLocation( p_irp ); + pIoStack->MinorFunction = IRP_MN_QUERY_INTERFACE; + pIoStack->Parameters.QueryInterface.Version = IB_CI_INTERFACE_VERSION; + pIoStack->Parameters.QueryInterface.Size = sizeof(ib_ci_ifc_t); + pIoStack->Parameters.QueryInterface.Interface = + (INTERFACE*)&p_ext->ci_ifc; + pIoStack->Parameters.QueryInterface.InterfaceSpecificData = NULL; + pIoStack->Parameters.QueryInterface.InterfaceType = + &GUID_IB_CI_INTERFACE; + p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( p_ext->p_al_dev, p_irp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + + status = ioStatus.Status; + } + + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, + ("Query interface for verbs returned %08x.\n", status)); + return status; + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +__pnp_notify_target( + IN void *pNotifyStruct, + IN void *context ) +{ + NTSTATUS status = STATUS_SUCCESS; + DEVICE_OBJECT *p_dev_obj; + hca_dev_ext_t *p_ext; + TARGET_DEVICE_REMOVAL_NOTIFICATION *pNotify; + + HCA_ENTER( HCA_DBG_PNP ); + + pNotify = (TARGET_DEVICE_REMOVAL_NOTIFICATION*)pNotifyStruct; + p_dev_obj = (DEVICE_OBJECT*)context; + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + if( IsEqualGUID( &pNotify->Event, &GUID_TARGET_DEVICE_QUERY_REMOVE ) ) + { + if ( p_ext->state == HCA_REGISTERED) { + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + p_ext->state = HCA_IFC_DEREFERENCED; + } + + /* Release AL's file object so that it can unload. */ + CL_ASSERT( p_ext->p_al_dev ); + CL_ASSERT( p_ext->p_al_file_obj ); + CL_ASSERT( p_ext->p_al_file_obj == pNotify->FileObject ); + if( p_ext->p_al_file_obj ) { + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + } + } + else if( IsEqualGUID( &pNotify->Event, + &GUID_TARGET_DEVICE_REMOVE_COMPLETE ) ) + { + if (p_ext->ci_ifc.deregister_ca) { + /* Notify AL that the CA is being removed. */ + p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); + p_ext->ci_ifc.deregister_ca = NULL; + } + + if ( p_ext->state == HCA_REGISTERED) { + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + } + p_ext->state = HCA_STARTED; + + /* Release AL's file object so that it can unload. */ + if( p_ext->p_al_file_obj ) + { + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; + } + + /* Cancel our target device change registration. */ + if (p_ext->pnp_target_entry) { + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + } + + } + else if( IsEqualGUID( &pNotify->Event, + &GUID_TARGET_DEVICE_REMOVE_CANCELLED ) ) + { + /* Cancel our target device change registration. */ + if (p_ext->pnp_target_entry) { + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + } + + /* Get the device object pointer for the AL. */ + CL_ASSERT( !p_ext->p_al_file_obj ); + CL_ASSERT( !p_ext->p_al_dev ); + /* Get the AL device object. */ + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("Calling IoGetDeviceObjectPointer.\n")); + status = IoGetDeviceObjectPointer( &p_ext->al_sym_name, + FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + ("IoGetDeviceObjectPointer returned %08x.\n", status )); + return STATUS_SUCCESS; + } + + /* Register for removal notification of the IB Fabric root device. */ + status = IoRegisterPlugPlayNotification( + EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, + p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, + &p_ext->pnp_target_entry ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("IoRegisterPlugPlayNotification returned %08x.\n", status)); + return status; + } + + CL_ASSERT( p_ext->state == HCA_IFC_DEREFERENCED ); + if ( p_ext->state == HCA_IFC_DEREFERENCED) { + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceReference( p_ext->ci_ifc.wdm.Context ); + p_ext->state = HCA_REGISTERED; + } + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static ci_interface_t* +__alloc_hca_ifc( + IN hca_dev_ext_t* const p_ext ) +{ + ci_interface_t *pIfc; + + HCA_ENTER( HCA_DBG_PNP ); + + pIfc = + (ci_interface_t*)ExAllocatePool( PagedPool, sizeof(ci_interface_t) ); + if( !pIfc ) + { + HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, + ("Failed to allocate ci_interface_t (%d bytes).\n", + sizeof(ci_interface_t))); + return NULL; + } + + setup_ci_interface( p_ext->hca.guid, + !!mthca_is_livefish(p_ext->hca.mdev), + pIfc ); + + pIfc->p_hca_dev = p_ext->cl_ext.p_pdo; + pIfc->vend_id = (uint32_t)p_ext->hcaConfig.VendorID; + pIfc->dev_id = (uint16_t)p_ext->hcaConfig.DeviceID; + pIfc->dev_revision = (uint16_t)p_ext->hca.hw_ver; + + HCA_EXIT( HCA_DBG_PNP ); + return pIfc; +} + +static void +__hca_deregister( + IN hca_dev_ext_t *p_ext ) +{ + HCA_ENTER( HCA_DBG_PNP ); + + if ( p_ext->state == HCA_REGISTERED) { + if (p_ext->ci_ifc.deregister_ca) { + /* Notify AL that the CA is being removed. */ + p_ext->ci_ifc.deregister_ca( p_ext->hca.guid ); + p_ext->ci_ifc.deregister_ca = NULL; + /* Release AL's CI interface. */ + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + p_ext->state = HCA_STARTED; + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP, + ("***** HCA deregistered \n")); + } + } + + HCA_EXIT( HCA_DBG_PNP ); +} + +static NTSTATUS +__hca_register( + IN DEVICE_OBJECT *p_dev_obj ) +{ + hca_dev_ext_t *p_ext; + NTSTATUS status; + ib_api_status_t ib_status; + ci_interface_t *p_hca_ifc; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + ASSERT( p_ext->state == HCA_STARTED ); + ASSERT( p_ext->p_al_dev ); + + /* Get the AL's lower interface. */ + status = __get_ci_interface( p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, + ("__get_ci_interface returned %08x.\n", status)); + goto exit; + } + + /* Allocate and populate our HCA interface structure. */ + p_hca_ifc = __alloc_hca_ifc( p_ext ); + if( !p_hca_ifc ) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("__alloc_hca_ifc failed.\n")); + status = STATUS_NO_MEMORY; + goto exit; + } + + /* Notify AL that we're available... */ + ib_status = p_ext->ci_ifc.register_ca( p_hca_ifc ); + ExFreePool( p_hca_ifc ); + if( ib_status != IB_SUCCESS ) + { + p_ext->ci_ifc.wdm.InterfaceDereference( p_ext->ci_ifc.wdm.Context ); + status = STATUS_INSUFFICIENT_RESOURCES; + goto exit; + } + + p_ext->state = HCA_REGISTERED; + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP, + ("***** HCA registered \n")); +exit: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +__pnp_notify_ifc( + IN void *pNotifyStruct, + IN void *context ) +{ + NTSTATUS status = STATUS_SUCCESS; + DEVICE_OBJECT *p_dev_obj; + hca_dev_ext_t *p_ext; + DEVICE_INTERFACE_CHANGE_NOTIFICATION *pNotify; + + HCA_ENTER( HCA_DBG_PNP ); + + pNotify = (DEVICE_INTERFACE_CHANGE_NOTIFICATION*)pNotifyStruct; + p_dev_obj = (DEVICE_OBJECT*)context; + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + if( !IsEqualGUID( &pNotify->Event, &GUID_DEVICE_INTERFACE_ARRIVAL ) ) + goto done; + + /* + * Sanity check. We should only be getting notifications of the + * CI interface exported by AL. + */ + ASSERT( + IsEqualGUID( &pNotify->InterfaceClassGuid, &GUID_IB_CI_INTERFACE ) ); + + if( p_ext->state != HCA_STARTED ) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("Invalid state: %d\n", p_ext->state)); + goto done; + } + + /* save symbolic name of IBAL for a case of cancelled IBAL removal */ + if (!p_ext->al_sym_name.Buffer) { + p_ext->al_sym_name.Length = pNotify->SymbolicLinkName->Length; + p_ext->al_sym_name.MaximumLength = pNotify->SymbolicLinkName->MaximumLength; + p_ext->al_sym_name.Buffer = ExAllocatePool( NonPagedPool, + p_ext->al_sym_name.MaximumLength * sizeof(wchar_t) ); + if (!p_ext->al_sym_name.Buffer) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_PNP ,("allocation of sym IBAL name failed.\n")); + goto done; + } + RtlCopyUnicodeString( &p_ext->al_sym_name, pNotify->SymbolicLinkName ); + } + + ASSERT( !p_ext->p_al_dev ); + ASSERT( !p_ext->p_al_file_obj ); + + /* Get the AL device object. */ + HCA_PRINT( TRACE_LEVEL_INFORMATION ,HCA_DBG_PNP ,("Calling IoGetDeviceObjectPointer.\n")); + status = IoGetDeviceObjectPointer( pNotify->SymbolicLinkName, + FILE_ALL_ACCESS, &p_ext->p_al_file_obj, &p_ext->p_al_dev ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("IoGetDeviceObjectPointer returned %08x.\n", status )); + goto done; + } + + /* Register for removal notification of the IB Fabric root device. */ + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PNP, + ("Registering for target notifications.\n")); + status = IoRegisterPlugPlayNotification( + EventCategoryTargetDeviceChange, 0, p_ext->p_al_file_obj, + p_dev_obj->DriverObject, __pnp_notify_target, p_dev_obj, + &p_ext->pnp_target_entry ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("IoRegisterPlugPlayNotification returned %08x.\n", status)); + goto err_reg_notify; + } + + status = __hca_register( p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("__get_ci_interface returned %08x.\n", status)); + goto err_reg_hca; + } + goto done; + +err_reg_hca: + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; +err_reg_notify: + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + p_ext->p_al_dev = NULL; +done: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +/* + * Walk the resource lists and store the information. The write-only + * flag is not set for the UAR region, so it is indistinguishable from the + * DDR region since both are prefetchable. The code here assumes that the + * resources get handed in order - HCR, UAR, DDR. + * - Configuration Space: not prefetchable, read/write + * - UAR space: prefetchable, write only. + * - DDR: prefetchable, read/write. + */ +static NTSTATUS +__SetupHcaResources( + IN DEVICE_OBJECT* const p_dev_obj, + IN CM_RESOURCE_LIST* const pHcaResList, + IN CM_RESOURCE_LIST* const pHostResList ) +{ + NTSTATUS status = STATUS_SUCCESS; + hca_dev_ext_t *p_ext; + USHORT i; + hca_bar_type_t type = HCA_BAR_TYPE_HCR; + + CM_PARTIAL_RESOURCE_DESCRIPTOR *pHcaRes, *pHostRes; + + HCA_ENTER( HCA_DBG_PNP ); + + // there will be no resources for "livefish" (PCI memory controller mode) + if (!pHcaResList || !pHostResList) + goto done; + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + // store the bus number for reset of Tavor + p_ext->bus_number = pHostResList->List[0].BusNumber; + + for( i = 0; i < pHostResList->List[0].PartialResourceList.Count; i++ ) + { + pHcaRes = + &pHcaResList->List[0].PartialResourceList.PartialDescriptors[i]; + pHostRes = + &pHostResList->List[0].PartialResourceList.PartialDescriptors[i]; + + + /* + * Save the interrupt information so that we can power the device + * up and down. Since the device will lose state when powered down + * we have to fully disable it. Note that we can leave memory mapped + * resources in place when powered down as the resource assignments + * won't change. However, we must disconnect our interrupt, and + * reconnect it when powering up. + */ + if( pHcaRes->Type == CmResourceTypeInterrupt ) + { + p_ext->interruptInfo = *pHostRes; + continue; + } + + if( pHcaRes->Type != CmResourceTypeMemory ) + continue; + + /* + * Sanity check that our assumption on how resources + * are reported hold. + */ + if( type == HCA_BAR_TYPE_HCR && + (pHcaRes->Flags & CM_RESOURCE_MEMORY_PREFETCHABLE) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("First memory resource is prefetchable - expected HCR.\n")); + status = STATUS_UNSUCCESSFUL; + break; + } + + p_ext->bar[type].phys = pHcaRes->u.Memory.Start.QuadPart; + p_ext->bar[type].size = pHcaRes->u.Memory.Length; +#ifdef MAP_ALL_HCA_MEMORY + /*leo: no need to map all the resources */ + p_ext->bar[type].virt = MmMapIoSpace( pHostRes->u.Memory.Start, + pHostRes->u.Memory.Length, MmNonCached ); + if( !p_ext->bar[type].virt ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to map memory resource type %d\n", type)); + status = STATUS_UNSUCCESSFUL; + break; + } +#else + p_ext->bar[type].virt = NULL; +#endif + + type++; + } + + if( type == HCA_BAR_TYPE_DDR) + { + p_ext->hca_hidden = 1; + } + else + if( type != HCA_BAR_TYPE_MAX ) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("Failed to map all memory resources.\n")); + status = STATUS_UNSUCCESSFUL; + } + + if( p_ext->interruptInfo.Type != CmResourceTypeInterrupt ) + { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("No interrupt resource.\n")); + status = STATUS_UNSUCCESSFUL; + } + +done: + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static void +__UnmapHcaMemoryResources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + hca_dev_ext_t *p_ext; + USHORT i; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + for( i = 0; i < HCA_BAR_TYPE_MAX; i++ ) + { + if( p_ext->bar[i].virt ) + { + MmUnmapIoSpace( p_ext->bar[i].virt, p_ext->bar[i].size ); + cl_memclr( &p_ext->bar[i], sizeof(hca_bar_t) ); + } + } + + HCA_EXIT( HCA_DBG_PNP ); +} + + +static int mthca_get_livefish_info(struct mthca_dev *mdev, __be64 *node_guid, u32 *hw_id) +{ + *node_guid = cl_hton64((uint64_t)(ULONG_PTR)mdev); + mdev->ib_dev.node_guid = *node_guid; + *hw_id = 0; + return 0; +} + +static NTSTATUS +hca_start( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *pIoStack; + POWER_STATE powerState; + DEVICE_DESCRIPTION devDesc; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + /* Handled on the way up. */ + status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Lower drivers failed IRP_MN_START_DEVICE (%#x).\n", status)); + return status; + } + + pIoStack = IoGetCurrentIrpStackLocation( p_irp ); + + /* + * Walk the resource lists and store the information. The write-only + * flag is not set for the UAR region, so it is indistinguishable from the + * DDR region since both are prefetchable. The code here assumes that the + * resources get handed in order - HCR, UAR, DDR. + * - Configuration Space: not prefetchable, read/write + * - UAR space: prefetchable, write only. + * - DDR: prefetchable, read/write. + */ + status = __SetupHcaResources( p_dev_obj, + pIoStack->Parameters.StartDevice.AllocatedResources, + pIoStack->Parameters.StartDevice.AllocatedResourcesTranslated ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("__ProcessResources returned %08X.\n", status)); + return status; + } + + /* save PCI bus i/f, PCI configuration info and enable device */ + hca_enable_pci( p_dev_obj, &p_ext->hcaBusIfc, &p_ext->hcaConfig ); + + /* + * Get the DMA adapter representing the HCA so we can + * allocate common buffers. + */ + RtlZeroMemory( &devDesc, sizeof(devDesc) ); + devDesc.Version = DEVICE_DESCRIPTION_VERSION2; + devDesc.Master = TRUE; + devDesc.ScatterGather = TRUE; + devDesc.Dma32BitAddresses = TRUE; + devDesc.Dma64BitAddresses = TRUE; + devDesc.InterfaceType = PCIBus; + + // get the adapter object + // 0x80000000 is a threshold, that's why - 1 + devDesc.MaximumLength = 0x80000000 - 1; + p_ext->p_dma_adapter = IoGetDmaAdapter( + p_ext->cl_ext.p_pdo, &devDesc, &p_ext->n_map_regs ); + if( !p_ext->p_dma_adapter ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("Failed to get DMA_ADAPTER for HCA.\n")); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Initialize the HCA now. */ + status = mthca_init_one( p_ext ); + if( !NT_SUCCESS( status ) ) + { + //TODO: no cleanup on error + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("mthca_init_one returned %08X\n", status)); + return status; + } + + /*leo: get node GUID */ + { + int err; + if (mthca_is_livefish(p_ext->hca.mdev)) + err = mthca_get_livefish_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver ); + else + err = mthca_get_dev_info( p_ext->hca.mdev, &p_ext->hca.guid, &p_ext->hca.hw_ver ); + if (err) { + //TODO: no cleanup on error + HCA_PRINT( TRACE_LEVEL_ERROR,HCA_DBG_PNP, + ("can't get guid - mthca_query_port()")); + return STATUS_INSUFFICIENT_RESOURCES; + } + } + + /* queue HCA */ + mlnx_hca_insert( &p_ext->hca ); + + /* + * Change the state since the PnP callback can happen + * before the callback returns. + */ + p_ext->state = HCA_STARTED; + + /* Register for interface arrival of the IB_AL device. */ + status = IoRegisterPlugPlayNotification( + EventCategoryDeviceInterfaceChange, + PNPNOTIFY_DEVICE_INTERFACE_INCLUDE_EXISTING_INTERFACES, + (void*)&GUID_IB_CI_INTERFACE, p_dev_obj->DriverObject, + __pnp_notify_ifc, p_dev_obj, &p_ext->pnp_ifc_entry ); + if( !NT_SUCCESS( status ) ) + { + p_ext->state = HCA_ADDED; + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("IoRegisterPlugPlayNotification returned %08x.\n", status)); + } + + /* We get started fully powered. */ + p_ext->DevicePowerState = PowerDeviceD0; + powerState.DeviceState = PowerDeviceD0; + powerState = PoSetPowerState ( p_ext->cl_ext.p_self_do, DevicePowerState, powerState ); + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PNP, + ("PoSetPowerState: old state %d, new state to %d\n", + powerState.DeviceState, p_ext->DevicePowerState )); + + + { + struct mthca_dev *mdev = p_ext->hca.mdev; + HCA_PRINT_EV(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW , + ("Ven %x Dev %d Hw %x Fw %d.%d.%d Drv %s (%s)", + (unsigned)p_ext->hcaConfig.VendorID, (unsigned)p_ext->hcaConfig.DeviceID, + p_ext->hca.hw_ver, (int) (mdev->fw_ver >> 32), + (int) (mdev->fw_ver >> 16) & 0xffff, (int) (mdev->fw_ver & 0xffff), + DRV_VERSION, DRV_RELDATE + )); + HCA_PRINT_EV(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW , + ("Flags %s%s%s%s%s%s%s\n", + (mdev->mthca_flags & MTHCA_FLAG_LIVEFISH) ? "Flash Recovery Mode:" : "", + (mdev->mthca_flags & MTHCA_FLAG_MEMFREE) ? "MemFree:" : "", + (mdev->mthca_flags & MTHCA_FLAG_NO_LAM) ? "NoLam:" : "", + (mdev->mthca_flags & MTHCA_FLAG_FMR) ? "Fmr:" : "", + (mdev->mthca_flags & MTHCA_FLAG_SRQ) ? "Srq:" : "", + (mdev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN) ? "HideDdr:" : "", + (mdev->mthca_flags & MTHCA_FLAG_PCIE) ? "PciEx:" : "" + )); + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +/* release the resources, allocated in hca_start */ +static void +__hca_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + hca_dev_ext_t *p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + HCA_ENTER( HCA_DBG_PNP ); + + switch( p_ext->state ) + { + case HCA_REGISTERED: + __hca_deregister( p_ext ); + + /* Fall through. */ + case HCA_STARTED: + /* dequeue HCA */ + mlnx_hca_remove( &p_ext->hca ); + } + + if (p_ext->al_sym_name.Buffer) { + ExFreePool( p_ext->al_sym_name.Buffer ); + p_ext->al_sym_name.Buffer = NULL; + } + + if( p_ext->pnp_target_entry ) + { + ASSERT( p_ext->pnp_ifc_entry ); + IoUnregisterPlugPlayNotification( p_ext->pnp_target_entry ); + p_ext->pnp_target_entry = NULL; + } + + if( p_ext->pnp_ifc_entry ) { + IoUnregisterPlugPlayNotification( p_ext->pnp_ifc_entry ); + p_ext->pnp_ifc_entry = NULL; + } + + if( p_ext->p_al_file_obj ) { + ObDereferenceObject( p_ext->p_al_file_obj ); + p_ext->p_al_file_obj = NULL; + } + + mthca_remove_one( p_ext ); + + if( p_ext->p_dma_adapter ) { + p_ext->p_dma_adapter->DmaOperations->PutDmaAdapter( p_ext->p_dma_adapter ); + p_ext->p_dma_adapter = NULL; + } + + hca_disable_pci( &p_ext->hcaBusIfc ); + + //cl_event_destroy( &p_ext->mutex ); + __UnmapHcaMemoryResources( p_dev_obj ); + + p_ext->state = HCA_ADDED; + + HCA_EXIT( HCA_DBG_PNP ); +} + + +static void +hca_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + hca_dev_ext_t *p_ext; + POWER_STATE powerState; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + /* release all the resources, allocated in hca_start */ + __hca_release_resources(p_dev_obj); + + /* Notify the power manager that the device is powered down. */ + p_ext->DevicePowerState = PowerDeviceD3; + powerState.DeviceState = PowerDeviceD3; + powerState = PoSetPowerState ( p_ext->cl_ext.p_self_do, DevicePowerState, powerState ); + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PNP, + ("PoSetPowerState: old state %d, new state to %d\n", + powerState.DeviceState, p_ext->DevicePowerState )); + + + /* Clear the PnP state in case we get restarted. */ + p_ext->pnpState = 0; + + HCA_EXIT( HCA_DBG_PNP ); +} + + +static NTSTATUS +hca_query_removal_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + if( p_ext->state == HCA_REGISTERED ) + { + status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp ); + if( !NT_SUCCESS( status ) ) + { + *p_action = IrpComplete; + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("AL get_relations returned %08x.\n", status)); + return status; + } + } + + *p_action = IrpPassDown; + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +hca_query_bus_relations( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + DEVICE_RELATIONS *p_rel; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = p_dev_obj->DeviceExtension; + + //cl_event_wait_on( &p_ext->mutex, EVENT_NO_TIMEOUT, FALSE ); + if( p_ext->state == HCA_REGISTERED ) + { + status = p_ext->ci_ifc.get_relations( p_ext->hca.guid, p_irp ); + if( !NT_SUCCESS( status ) ) + { + //cl_event_signal( &p_ext->mutex ); + *p_action = IrpComplete; + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("AL get_relations returned %08x.\n", status)); + return status; + } + } + else + { + status = cl_alloc_relations( p_irp, 1 ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("cl_alloc_relations returned %08x.\n", status)); + return status; + } + + p_rel = (DEVICE_RELATIONS*)p_irp->IoStatus.Information; + p_rel->Count = 0; + p_rel->Objects[0] = NULL; + } + + //cl_event_signal( &p_ext->mutex ); + + *p_action = IrpPassDown; + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +hca_query_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + /* All kernel clients will get notified through the device hierarchy. */ + + /* TODO: set a flag to fail creation of any new IB resources. */ + return cl_irp_skip( p_dev_obj, p_irp, p_action ); +} + + +static NTSTATUS +hca_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + /* + * Must disable everything. Complib framework will + * call ReleaseResources handler. + */ + return cl_irp_skip( p_dev_obj, p_irp, p_action ); +} + + +static NTSTATUS +hca_cancel_stop( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + /* Handled on the way up. */ + return cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); +} + + +static NTSTATUS +hca_query_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + hca_dev_ext_t*p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + if (atomic_read(&p_ext->usecnt)) { + DbgPrint( "MTHCA: Can't get unloaded. %d applications are still in work\n", p_ext->usecnt); + p_irp->IoStatus.Status = STATUS_UNSUCCESSFUL; + return cl_irp_complete( p_dev_obj, p_irp, p_action ); + } + /* TODO: set a flag to fail creation of any new IB resources. */ + return cl_irp_skip( p_dev_obj, p_irp, p_action ); +} + + +static NTSTATUS +hca_cancel_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + /* Handled on the way up. */ + return cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); +} + + +static NTSTATUS +hca_surprise_remove( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + /* + * TODO: Set state so that all further requests + * automatically succeed/fail as needed. + */ + return cl_irp_skip( p_dev_obj, p_irp, p_action ); +} + + +static NTSTATUS +hca_query_capabilities( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *pIoStack; + DEVICE_CAPABILITIES *pCaps; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + /* Process on the way up. */ + status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); + if( !NT_SUCCESS( status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("cl_do_sync_pnp returned %08X.\n", status)); + return status; + } + + pIoStack = IoGetCurrentIrpStackLocation( p_irp ); + pCaps = pIoStack->Parameters.DeviceCapabilities.Capabilities; + + /* + * Store the device power mapping into our extension since we're + * the power policy owner. The mapping is used when handling + * IRP_MN_SET_POWER IRPs. + */ + cl_memcpy( + p_ext->DevicePower, pCaps->DeviceState, sizeof(p_ext->DevicePower) ); + + if( pCaps->DeviceD1 ) + { + HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP, + ("WARNING: Device reports support for DeviceD1 power state.\n")); + pCaps->DeviceD1 = FALSE; + } + + if( pCaps->DeviceD2 ) + { + HCA_PRINT( TRACE_LEVEL_WARNING,HCA_DBG_PNP, + ("WARNING: Device reports support for DeviceD2 power state.\n")); + pCaps->DeviceD2 = FALSE; + } + + if( pCaps->SystemWake != PowerSystemUnspecified ) + { + HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_PNP, + ("WARNING: Device reports support for system wake.\n")); + pCaps->SystemWake = PowerSystemUnspecified; + } + + if( pCaps->DeviceWake != PowerDeviceUnspecified ) + { + HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_PNP, + ("WARNING: Device reports support for device wake.\n")); + pCaps->DeviceWake = PowerDeviceUnspecified; + } + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + + +static NTSTATUS +hca_query_pnp_state( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PNP ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + + p_irp->IoStatus.Information |= p_ext->pnpState; + + *p_action = IrpSkip; + + HCA_EXIT( HCA_DBG_PNP ); + return STATUS_SUCCESS;; +} + +static NTSTATUS +hca_query_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status = STATUS_SUCCESS; + IO_STACK_LOCATION *pIoStack; + + HCA_ENTER(HCA_DBG_PO); + + UNUSED_PARAM( p_dev_obj ); + + pIoStack = IoGetCurrentIrpStackLocation( p_irp ); + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PO, + ("QUERY_POWER for FDO %p: type %s, state %d, action %d, IRQL %d, IRP %p\n", + p_dev_obj, + (pIoStack->Parameters.Power.Type) ? "DevicePowerState" : "SystemPowerState", + pIoStack->Parameters.Power.State.DeviceState, + pIoStack->Parameters.Power.ShutdownType, KeGetCurrentIrql(), p_irp )); + + switch( pIoStack->Parameters.Power.Type ) + { + case SystemPowerState: + /* Fail any requests to hibernate or sleep the system. */ + switch( pIoStack->Parameters.Power.State.SystemState ) + { + case PowerSystemSleeping1: // STANDBY support + case PowerSystemHibernate: + { + hca_dev_ext_t*p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + if (atomic_read(&p_ext->usecnt)) + status = STATUS_UNSUCCESSFUL; + break; + } + + case PowerSystemWorking: + case PowerSystemShutdown: + break; + + default: + status = STATUS_NOT_SUPPORTED; + } + break; + + case DevicePowerState: + /* Fail any query for low power states. */ + switch( pIoStack->Parameters.Power.State.DeviceState ) + { + case PowerDeviceD0: + case PowerDeviceD3: + /* We only support fully powered or off power states. */ + break; + + default: + status = STATUS_NOT_SUPPORTED; + } + break; + } + + if( status == STATUS_SUCCESS ) + *p_action = IrpSkip; + else + *p_action = IrpComplete; + + HCA_EXIT( HCA_DBG_PO ); + return status; +} + + +static void +__RequestPowerCompletion( + IN DEVICE_OBJECT *p_dev_obj, + IN UCHAR minorFunction, + IN POWER_STATE powerState, + IN void *context, + IN IO_STATUS_BLOCK *pIoStatus ) +{ + IRP *p_irp; + cl_pnp_po_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PO ); + + UNUSED_PARAM( minorFunction ); + UNUSED_PARAM( powerState ); + + p_irp = (IRP*)context; + p_ext = (cl_pnp_po_ext_t*)p_dev_obj->DeviceExtension; + + /* Propagate the device IRP status to the system IRP status. */ + p_irp->IoStatus.Status = pIoStatus->Status; + + /* Continue Power IRP processing. */ + PoStartNextPowerIrp( p_irp ); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->remove_lock, p_irp ); + HCA_EXIT( HCA_DBG_PO ); +} + + +/*NOTE: Completion routines must NEVER be pageable. */ +static NTSTATUS +__SystemPowerCompletion( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp, + IN void *context ) +{ + NTSTATUS status; + POWER_STATE state; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *pIoStack; + + HCA_ENTER( HCA_DBG_PO ); + + UNUSED_PARAM( context ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + pIoStack = IoGetCurrentIrpStackLocation( p_irp ); + + if( !NT_SUCCESS( p_irp->IoStatus.Status ) ) + { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PO, + ("IRP_MN_SET_POWER for system failed by lower driver with %08x.\n", + p_irp->IoStatus.Status)); + status = STATUS_SUCCESS; + PoStartNextPowerIrp( p_irp ); + goto release; + } + + state.DeviceState = + p_ext->DevicePower[pIoStack->Parameters.Power.State.SystemState]; + + /* + * Send a device power IRP to our devnode. Using our device object will + * only work on win2k and other NT based systems. + */ + status = PoRequestPowerIrp( p_dev_obj, IRP_MN_SET_POWER, state, + __RequestPowerCompletion, p_irp, NULL ); + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PO, + ("PoRequestPowerIrp: SET_POWER 'PowerDeviceD%d', status %#x\n", + state.DeviceState - 1, status )); + + if( status != STATUS_PENDING ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PO, + ("PoRequestPowerIrp returned %08x.\n", status)); + p_irp->IoStatus.Status = status; /* Propagate the failure. */ + PoStartNextPowerIrp( p_irp ); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + goto release; + } + + status = STATUS_MORE_PROCESSING_REQUIRED; + goto exit; + +release: + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); +exit: + HCA_EXIT( HCA_DBG_PO ); + return status; +} + + +/* Work item callback to handle DevicePowerD0 IRPs at passive level. */ +static void +__DevicePowerUpCompletionWorkItem( + IN DEVICE_OBJECT* p_dev_obj, + IN void* context ) +{ + NTSTATUS status; + IO_STACK_LOCATION *pIoStack; + hca_dev_ext_t *p_ext; + IRP *p_irp; + POWER_STATE powerState; + + HCA_ENTER( HCA_DBG_PO ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + p_irp = (IRP*)context; + pIoStack = IoGetCurrentIrpStackLocation( p_irp ); + + IoFreeWorkItem( p_ext->pPoWorkItem ); + p_ext->pPoWorkItem = NULL; + + /* restart the HCA */ + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PO, + ("***** Restart the HCA, IRQL %d\n", KeGetCurrentIrql())); + + status = mthca_init_one( p_ext ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PO, + ("!!! mthca_init_one failed (%#x) \n", status)); + goto err_mthca_init; + } + + if( p_ext->p_al_dev ) { + status = __hca_register( p_dev_obj ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PO, + ("!!! __hca_register failed (%#x) \n", status)); + goto err_hca_reg; + } + } + + p_ext->DevicePowerState = pIoStack->Parameters.Power.State.DeviceState; + powerState = PoSetPowerState( p_dev_obj, DevicePowerState, + pIoStack->Parameters.Power.State ); + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PO, + ("PoSetPowerState: old state %d, new state to %d\n", + powerState.DeviceState, p_ext->DevicePowerState )); + + goto exit; + +err_hca_reg: +err_mthca_init: + /* Flag device as having failed. */ + p_ext->pnpState |= PNP_DEVICE_FAILED; + IoInvalidateDeviceState( p_ext->cl_ext.p_pdo ); +exit: + PoStartNextPowerIrp( p_irp ); + IoCompleteRequest( p_irp, IO_NO_INCREMENT ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + HCA_EXIT( HCA_DBG_PO ); +} + +/*NOTE: Completion routines must NEVER be pageable. */ +static NTSTATUS +__DevicePowerUpCompletion( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp, + IN void *context ) +{ + NTSTATUS status = STATUS_SUCCESS; + hca_dev_ext_t *p_ext; + IO_STACK_LOCATION *pIoStack; + + HCA_ENTER( HCA_DBG_PO ); + + UNUSED_PARAM( context ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + pIoStack = IoGetCurrentIrpStackLocation( p_irp ); + + if( !NT_SUCCESS( p_irp->IoStatus.Status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PO, + ("IRP_MN_SET_POWER for device failed by lower driver with %08x.\n", + p_irp->IoStatus.Status)); + status = STATUS_SUCCESS; + PoStartNextPowerIrp( p_irp ); + goto release; + } + + /* Process in a work item - mthca_start blocks. */ + ASSERT( !p_ext->pPoWorkItem ); + p_ext->pPoWorkItem = IoAllocateWorkItem( p_dev_obj ); + if( !p_ext->pPoWorkItem ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PO, + ("Failed to allocate work item.\n" )); + status = STATUS_SUCCESS; + p_ext->pnpState |= PNP_DEVICE_FAILED; + IoInvalidateDeviceState( p_ext->cl_ext.p_pdo ); + PoStartNextPowerIrp( p_irp ); + goto release; + } + + /* Process in work item callback. */ + IoMarkIrpPending( p_irp ); + IoQueueWorkItem( p_ext->pPoWorkItem, + __DevicePowerUpCompletionWorkItem, DelayedWorkQueue, p_irp ); + status = STATUS_MORE_PROCESSING_REQUIRED; + goto exit; + +release: + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); +exit: + HCA_EXIT( HCA_DBG_PO ); + return status; +} + +static NTSTATUS __DevicePowerDownWorkItemCompletion( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp, + IN void *context ) +{ + hca_dev_ext_t *p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + UNUSED_PARAM( context ); + + HCA_ENTER( HCA_DBG_PO ); + + PoStartNextPowerIrp( p_irp ); + IoReleaseRemoveLock( &p_ext->cl_ext.remove_lock, p_irp ); + + HCA_EXIT( HCA_DBG_PO ); + return STATUS_SUCCESS; +} + +/* Work item callback to handle DevicePowerD3 IRPs at passive level. */ +static void +__DevicePowerDownWorkItem( + IN DEVICE_OBJECT* p_dev_obj, + IN void* context ) +{ + IO_STACK_LOCATION *pIoStack; + hca_dev_ext_t *p_ext; + IRP *p_irp; + POWER_STATE powerState; + + HCA_ENTER( HCA_DBG_PO ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + p_irp = (IRP*)context; + pIoStack = IoGetCurrentIrpStackLocation( p_irp ); + + IoFreeWorkItem( p_ext->pPoWorkItem ); + p_ext->pPoWorkItem = NULL; + + p_ext->DevicePowerState = pIoStack->Parameters.Power.State.DeviceState; + powerState = PoSetPowerState( p_dev_obj, DevicePowerState, + pIoStack->Parameters.Power.State ); + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PO, + ("PoSetPowerState: old state %d, new state to %d, IRQL %d\n", + powerState.DeviceState, p_ext->DevicePowerState, KeGetCurrentIrql() )); + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PO, + ("***** Remove the HCA \n")); + + { + __hca_deregister( p_ext ); + mthca_remove_one( p_ext ); + } + + IoCopyCurrentIrpStackLocationToNext( p_irp ); +#pragma warning( push, 3 ) + IoSetCompletionRoutine( p_irp, __DevicePowerDownWorkItemCompletion, + NULL, TRUE, TRUE, TRUE ); +#pragma warning( pop ) + PoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + + HCA_EXIT( HCA_DBG_PO ); +} + + +static NTSTATUS +hca_set_power( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + IO_STACK_LOCATION *pIoStack; + hca_dev_ext_t *p_ext; + + HCA_ENTER( HCA_DBG_PO ); + + p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + pIoStack = IoGetCurrentIrpStackLocation( p_irp ); + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PO, + ("SET_POWER for FDO %p (ext %p): type %s, state %d, action %d, IRQL %d \n", + p_dev_obj, p_ext, + (pIoStack->Parameters.Power.Type) ? "DevicePowerState" : "SystemPowerState", + pIoStack->Parameters.Power.State.DeviceState, + pIoStack->Parameters.Power.ShutdownType, KeGetCurrentIrql() )); + + switch( pIoStack->Parameters.Power.Type ) + { + case SystemPowerState: + p_ext->SystemPowerState = pIoStack->Parameters.Power.State.SystemState; + + /* + * Process on the way up the stack. We cannot block since the + * power dispatch function can be called at elevated IRQL if the + * device is in a paging/hibernation/crash dump path. + */ + IoMarkIrpPending( p_irp ); + IoCopyCurrentIrpStackLocationToNext( p_irp ); +#pragma warning( push, 3 ) + IoSetCompletionRoutine( p_irp, __SystemPowerCompletion, NULL, + TRUE, TRUE, TRUE ); +#pragma warning( pop ) + PoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + + *p_action = IrpDoNothing; + status = STATUS_PENDING; + break; + + case DevicePowerState: + IoMarkIrpPending( p_irp ); + if( pIoStack->Parameters.Power.State.DeviceState == PowerDeviceD0 && + p_ext->SystemPowerState == PowerSystemWorking) + { /* power up */ + /* If we're already powered up, just pass down. */ + if( p_ext->DevicePowerState == PowerDeviceD0 ) + { + status = STATUS_SUCCESS; + *p_action = IrpIgnore; + break; + } + + /* Process in I/O completion callback. */ + IoCopyCurrentIrpStackLocationToNext( p_irp ); +#pragma warning( push, 3 ) + IoSetCompletionRoutine( p_irp, __DevicePowerUpCompletion, NULL, + TRUE, TRUE, TRUE ); +#pragma warning( pop ) + PoCallDriver( p_ext->cl_ext.p_next_do, p_irp ); + } + else + { /* power down */ + + /* Process in a work item - deregister_ca and HcaDeinit block. */ + ASSERT( !p_ext->pPoWorkItem ); + p_ext->pPoWorkItem = IoAllocateWorkItem( p_dev_obj ); + if( !p_ext->pPoWorkItem ) + { + status = STATUS_INSUFFICIENT_RESOURCES; + break; + } + + /* Process in work item callback. */ + IoQueueWorkItem( + p_ext->pPoWorkItem, __DevicePowerDownWorkItem, DelayedWorkQueue, p_irp ); + } + *p_action = IrpDoNothing; + status = STATUS_PENDING; + break; + + default: + /* Pass down and let the PDO driver handle it. */ + *p_action = IrpIgnore; + status = STATUS_SUCCESS; + break; + } + + if( !NT_SUCCESS( status ) ) + *p_action = IrpComplete; + + HCA_EXIT( HCA_DBG_PNP ); + return status; +} + +static void +__reregister_hca_cb( + IN DEVICE_OBJECT* p_dev_obj, + IN void* context ) +{ +#define SLEEP_TIME 100000 // 100 msec +#define POLL_TRIES 20 // to wait for 2 sec + int i; + NTSTATUS status; + LARGE_INTEGER interval; + hca_dev_ext_t *p_ext = (hca_dev_ext_t*)p_dev_obj->DeviceExtension; + PIO_WORKITEM pPoWorkItem = (PIO_WORKITEM)context; + + HCA_ENTER( HCA_DBG_PO ); + + IoFreeWorkItem( pPoWorkItem ); + + /* wait SLEEP_TIME_USEC usec for application to exit */ + interval.QuadPart = (-10) * SLEEP_TIME; + KeDelayExecutionThread( KernelMode, FALSE, &interval ); + for (i=0; p_ext->usecnt && i < POLL_TRIES; ++i) { + KeDelayExecutionThread( KernelMode, FALSE, &interval ); + } + + if (!p_ext->usecnt) { + /* reregister HCA */ + __hca_deregister( p_ext ); + + if( p_ext->p_al_dev ) { + status = __hca_register( p_dev_obj ); + if( !NT_SUCCESS( status ) ) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_PNP, + ("__hca_register returned 0x%08X.\n", status)); + } + } + } + + HCA_EXIT( HCA_DBG_PO ); +} + + +void reregister_hca( hca_dev_ext_t *p_ext ) +{ + DEVICE_OBJECT *p_dev_obj = (DEVICE_OBJECT *)p_ext->cl_ext.p_self_do; + PIO_WORKITEM pPoWorkItem; + + /* Process in a work item - deregister_ca and HcaDeinit block. */ + pPoWorkItem = IoAllocateWorkItem( p_dev_obj ); + if( pPoWorkItem ) + IoQueueWorkItem( pPoWorkItem, __reregister_hca_cb, + DelayedWorkQueue, pPoWorkItem ); + +} + diff --git a/branches/Ndi/hw/mthca/kernel/hca_pnp.h b/branches/Ndi/hw/mthca/kernel/hca_pnp.h new file mode 100644 index 00000000..bc74c8e1 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_pnp.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _HCA_PNP_H_ +#define _HCA_PNP_H_ + +void hca_init_vfptr( void ); + +NTSTATUS +hca_add_device( + IN PDRIVER_OBJECT pDriverObj, + IN PDEVICE_OBJECT pPdo ); + + +#endif + + diff --git a/branches/Ndi/hw/mthca/kernel/hca_verbs.c b/branches/Ndi/hw/mthca/kernel/hca_verbs.c new file mode 100644 index 00000000..2afd30bd --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/hca_verbs.c @@ -0,0 +1,1665 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "hca_driver.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "hca_verbs.tmh" +#endif +#include "mthca_dev.h" +#include "ib_cache.h" +#include "mx_abi.h" +#include "mt_pa_cash.h" + +#define PTR_ALIGN(size) (((size) + sizeof(void*) - 1) & ~(sizeof(void*) - 1)) + + +// Local declarations +ib_api_status_t +mlnx_query_qp ( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t *p_qp_attr, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* CA Access Verbs +*/ +ib_api_status_t +mlnx_open_ca ( + IN const ib_net64_t ca_guid, // IN const char * ca_name, + IN const ci_completion_cb_t pfn_completion_cb, + IN const ci_async_event_cb_t pfn_async_event_cb, + IN const void*const ca_context, + OUT ib_ca_handle_t *ph_ca) +{ + mlnx_hca_t *p_hca; + ib_api_status_t status = IB_NOT_FOUND; + struct ib_device *ib_dev; + + HCA_ENTER(HCA_DBG_SHIM); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM, + ("context 0x%p\n", ca_context)); + + // find CA object + p_hca = mlnx_hca_from_guid( ca_guid ); + if( !p_hca ) { + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, + ("completes with ERROR status IB_NOT_FOUND\n")); + } + HCA_EXIT(HCA_DBG_SHIM); + return IB_NOT_FOUND; + } + + ib_dev = &p_hca->mdev->ib_dev; + + if (mthca_is_livefish(p_hca->mdev)) + goto done; + + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM, + ("context 0x%p\n", ca_context)); + status = mlnx_hobs_set_cb(&p_hca->hob, + pfn_completion_cb, + pfn_async_event_cb, + ca_context); + if (IB_SUCCESS != status) { + goto err_set_cb; + } + + + //TODO: do we need something for kernel users ? + + // Return pointer to HOB object +done: + if (ph_ca) *ph_ca = &p_hca->hob; + status = IB_SUCCESS; + +//err_mad_cache: +err_set_cb: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_SHIM); + return status; +} + +ib_api_status_t +mlnx_query_ca ( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t *p_ca_attr, + IN OUT uint32_t *p_byte_count, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + uint32_t size, required_size; + uint8_t port_num, num_ports; + uint32_t num_gids, num_pkeys; + uint32_t num_page_sizes = 1; // TBD: what is actually supported + uint8_t *last_p; + struct ib_device_attr props; + struct ib_port_attr *hca_ports = NULL; + int i; + + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p ); + int err; + + HCA_ENTER(HCA_DBG_SHIM); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM ,("User mode is not supported yet\n")); + p_umv_buf->status = status = IB_UNSUPPORTED; + goto err_user_unsupported; + } + + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + if (NULL == p_byte_count) { + status = IB_INVALID_PARAMETER; + goto err_byte_count; + } + + // query the device + err = mthca_query_device(ib_dev, &props ); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + ("ib_query_device failed (%d)\n",err)); + status = errno_to_iberr(err); + goto err_query_device; + } + + // alocate arrary for port properties + num_ports = ib_dev->phys_port_cnt; /* Number of physical ports of the HCA */ + if (NULL == (hca_ports = cl_zalloc( num_ports * sizeof *hca_ports))) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("Failed to cl_zalloc ports array\n")); + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_ports; + } + + // start calculation of ib_ca_attr_t full size + num_gids = 0; + num_pkeys = 0; + required_size = PTR_ALIGN(sizeof(ib_ca_attr_t)) + + PTR_ALIGN(sizeof(uint32_t) * num_page_sizes) + + PTR_ALIGN(sizeof(ib_port_attr_t) * num_ports)+ + PTR_ALIGN(MTHCA_BOARD_ID_LEN)+ + PTR_ALIGN(sizeof(uplink_info_t)); /* uplink info */ + + // get port properties + for (port_num = 0; port_num <= end_port(ib_dev) - start_port(ib_dev); ++port_num) { + // request + err = mthca_query_port(ib_dev, port_num + start_port(ib_dev), &hca_ports[port_num]); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, ("ib_query_port failed(%d) for port %d\n",err, port_num)); + status = errno_to_iberr(err); + goto err_query_port; + } + + // calculate GID table size + num_gids = hca_ports[port_num].gid_tbl_len; + size = PTR_ALIGN(sizeof(ib_gid_t) * num_gids); + required_size += size; + + // calculate pkeys table size + num_pkeys = hca_ports[port_num].pkey_tbl_len; + size = PTR_ALIGN(sizeof(uint16_t) * num_pkeys); + required_size += size; + } + + // resource sufficience check + if (NULL == p_ca_attr || *p_byte_count < required_size) { + *p_byte_count = required_size; + status = IB_INSUFFICIENT_MEMORY; + if ( p_ca_attr != NULL) { + HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("Failed *p_byte_count (%d) < required_size (%d)\n", *p_byte_count, required_size )); + } + goto err_insuff_mem; + } + + // Space is sufficient - setup table pointers + last_p = (uint8_t*)p_ca_attr; + last_p += PTR_ALIGN(sizeof(*p_ca_attr)); + + p_ca_attr->p_page_size = (uint32_t*)last_p; + last_p += PTR_ALIGN(num_page_sizes * sizeof(uint32_t)); + + p_ca_attr->p_port_attr = (ib_port_attr_t *)last_p; + last_p += PTR_ALIGN(num_ports * sizeof(ib_port_attr_t)); + + for (port_num = 0; port_num < num_ports; port_num++) { + p_ca_attr->p_port_attr[port_num].p_gid_table = (ib_gid_t *)last_p; + size = PTR_ALIGN(sizeof(ib_gid_t) * hca_ports[port_num].gid_tbl_len); + last_p += size; + + p_ca_attr->p_port_attr[port_num].p_pkey_table = (uint16_t *)last_p; + size = PTR_ALIGN(sizeof(uint16_t) * hca_ports[port_num].pkey_tbl_len); + last_p += size; + } + + //copy vendor specific data + cl_memcpy(last_p,to_mdev(ib_dev)->board_id, MTHCA_BOARD_ID_LEN); + last_p += PTR_ALIGN(MTHCA_BOARD_ID_LEN); + *(uplink_info_t*)last_p = to_mdev(ib_dev)->uplink_info; + last_p += PTR_ALIGN(sizeof(uplink_info_t)); /* uplink info */ + + // Separate the loops to ensure that table pointers are always setup + for (port_num = 0; port_num < num_ports; port_num++) { + + // get pkeys, using cache + for (i=0; i < hca_ports[port_num].pkey_tbl_len; ++i) { + err = ib_get_cached_pkey( ib_dev, port_num + start_port(ib_dev), i, + &p_ca_attr->p_port_attr[port_num].p_pkey_table[i] ); + if (err) { + status = errno_to_iberr(err); + HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("ib_get_cached_pkey failed (%d) for port_num %d, index %d\n", + err, port_num + start_port(ib_dev), i)); + goto err_get_pkey; + } + } + + // get gids, using cache + for (i=0; i < hca_ports[port_num].gid_tbl_len; ++i) { + union ib_gid * __ptr64 gid = (union ib_gid *)&p_ca_attr->p_port_attr[port_num].p_gid_table[i]; + err = ib_get_cached_gid( ib_dev, port_num + start_port(ib_dev), i, (union ib_gid *)gid ); + //TODO: do we need to convert gids to little endian + if (err) { + status = errno_to_iberr(err); + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + ("ib_get_cached_gid failed (%d) for port_num %d, index %d\n", + err, port_num + start_port(ib_dev), i)); + goto err_get_gid; + } + } + + HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM,("port %d gid0:\n", port_num)); + HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM, + (" 0x%x%x%x%x%x%x%x%x-0x%x%x%x%x%x%x%x%x\n", + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[0], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[1], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[2], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[3], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[4], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[5], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[6], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[7], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[8], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[9], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[10], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[11], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[12], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[13], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[14], + p_ca_attr->p_port_attr[port_num].p_gid_table[0].raw[15])); + } + + // set result size + p_ca_attr->size = required_size; + CL_ASSERT( required_size == (((uintn_t)last_p) - ((uintn_t)p_ca_attr)) ); + HCA_PRINT(TRACE_LEVEL_VERBOSE, HCA_DBG_SHIM , ("Space required %d used %d\n", + required_size, (int)((uintn_t)last_p - (uintn_t)p_ca_attr) )); + + // !!! GID/PKEY tables must be queried before this call !!! + mlnx_conv_hca_cap(ib_dev, &props, hca_ports, p_ca_attr); + + status = IB_SUCCESS; + +err_get_gid: +err_get_pkey: +err_insuff_mem: +err_query_port: + cl_free(hca_ports); +err_alloc_ports: +err_query_device: +err_byte_count: +err_unsupported: +err_user_unsupported: + if( status != IB_INSUFFICIENT_MEMORY && status != IB_SUCCESS ) + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + HCA_EXIT(HCA_DBG_SHIM); + return status; +} + +ib_api_status_t +mlnx_modify_ca ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t modca_cmd, + IN const ib_port_attr_mod_t *p_port_attr) +{ +#define SET_CAP_MOD(al_mask, al_fld, ib) \ + if (modca_cmd & al_mask) { \ + if (p_port_attr->cap.##al_fld) \ + props.set_port_cap_mask |= ib; \ + else \ + props.clr_port_cap_mask |= ib; \ + } + + ib_api_status_t status; + int err; + struct ib_port_modify props; + int port_modify_mask = 0; + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p ); + + HCA_ENTER(HCA_DBG_SHIM); + + //sanity check + if( !cl_is_blockable() ) { + status = IB_UNSUPPORTED; + goto err_unsupported; + } + + if (port_num < start_port(ib_dev) || port_num > end_port(ib_dev)) { + status = IB_INVALID_PORT; + goto err_port; + } + + // prepare parameters + RtlZeroMemory(&props, sizeof(props)); + SET_CAP_MOD(IB_CA_MOD_IS_SM, sm, IB_PORT_SM); + SET_CAP_MOD(IB_CA_MOD_IS_SNMP_SUPPORTED, snmp, IB_PORT_SNMP_TUNNEL_SUP); + SET_CAP_MOD(IB_CA_MOD_IS_DEV_MGMT_SUPPORTED, dev_mgmt, IB_PORT_DEVICE_MGMT_SUP); + SET_CAP_MOD(IB_CA_MOD_IS_VEND_SUPPORTED, vend, IB_PORT_VENDOR_CLASS_SUP); + if ((modca_cmd & IB_CA_MOD_QKEY_CTR) && (p_port_attr->qkey_ctr == 0)) + port_modify_mask |= IB_PORT_RESET_QKEY_CNTR; + + // modify port + err = mthca_modify_port(ib_dev, port_num, port_modify_mask, &props ); + if (err) { + status = errno_to_iberr(err); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("mthca_modify_port failed (%d) \n",err)); + goto err_modify_port; + } + + status = IB_SUCCESS; + +err_modify_port: +err_port: +err_unsupported: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_SHIM, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_SHIM); + return status; +} + +ib_api_status_t +mlnx_close_ca ( + IN ib_ca_handle_t h_ca) +{ + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + HCA_ENTER(HCA_DBG_SHIM); + + if (mthca_is_livefish(MDEV_FROM_HOB( hob_p ))) + goto done; + + mlnx_hobs_remove(h_ca); + +done: + HCA_EXIT(HCA_DBG_SHIM); + + return IB_SUCCESS; +} + + +static ib_api_status_t +mlnx_um_open( + IN const ib_ca_handle_t h_ca, + IN OUT ci_umv_buf_t* const p_umv_buf, + OUT ib_ca_handle_t* const ph_um_ca ) +{ + int err; + ib_api_status_t status; + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p ); + struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p ); + struct ib_ucontext *p_context; + struct ibv_get_context_resp *uresp_p; + struct ibv_alloc_pd_resp resp; + ci_umv_buf_t umv_buf; + + HCA_ENTER(HCA_DBG_SHIM); + + // sanity check + ASSERT( p_umv_buf ); + if( !p_umv_buf->command ) + { + p_context = cl_zalloc( sizeof(struct ib_ucontext) ); + if( !p_context ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_ucontext; + } + /* Copy the dev info. */ + p_context->device = ib_dev; + p_umv_buf->output_size = 0; + goto done; + } + + // create user context in kernel + p_context = mthca_alloc_ucontext(ib_dev, p_umv_buf); + if (IS_ERR(p_context)) { + err = PTR_ERR(p_context); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM, + ("mthca_alloc_ucontext failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_ucontext; + } + + /* allocate pd */ + umv_buf.command = 1; + umv_buf.input_size = umv_buf.status = 0; + umv_buf.output_size = sizeof(struct ibv_alloc_pd_resp); + umv_buf.p_inout_buf = &resp; + //NB: Pay attention ! Ucontext parameter is important here: + // when it is present (i.e. - for user space) - mthca_alloc_pd won't create MR + p_context->pd = ibv_alloc_pd(ib_dev, p_context, &umv_buf); + if (IS_ERR(p_context->pd)) { + err = PTR_ERR(p_context->pd); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM, + ("ibv_alloc_pd failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_pd; + } + + // fill more parameters for user (sanity checks are in mthca_alloc_ucontext) + uresp_p = (struct ibv_get_context_resp *)(void*)p_umv_buf->p_inout_buf; + uresp_p->uar_addr = (uint64_t)(UINT_PTR)p_context->user_uar; + uresp_p->pd_handle = resp.pd_handle; + uresp_p->pdn = resp.pdn; + uresp_p->vend_id = (uint32_t)ext_p->hcaConfig.VendorID; + uresp_p->dev_id = (uint16_t)ext_p->hcaConfig.DeviceID; + +done: + // some more inits + p_context->va = p_context->p_mdl = NULL; + p_context->fw_if_open = FALSE; + KeInitializeMutex( &p_context->mutex, 0 ); + // chain user context to the device + cl_spinlock_acquire( &ext_p->uctx_lock ); + cl_qlist_insert_tail( &ext_p->uctx_list, &p_context->list_item ); + cl_atomic_inc(&ext_p->usecnt); + cl_spinlock_release( &ext_p->uctx_lock ); + + // return the result + if (ph_um_ca) *ph_um_ca = (ib_ca_handle_t)p_context; + + status = IB_SUCCESS; + goto end; + +err_alloc_pd: + mthca_dealloc_ucontext(p_context); +err_alloc_ucontext: +end: + if (p_umv_buf && p_umv_buf->command) + p_umv_buf->status = status; + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_SHIM); + return status; +} + +static void +mlnx_um_close( + IN ib_ca_handle_t h_ca, + IN ib_ca_handle_t h_um_ca ) +{ + struct ib_ucontext *p_ucontext = (struct ib_ucontext *)h_um_ca; + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + hca_dev_ext_t *ext_p = EXT_FROM_HOB( hob_p ); + + if (mthca_is_livefish(to_mdev(p_ucontext->device))) + goto done; + unmap_crspace_for_all(p_ucontext); +done: + cl_spinlock_acquire( &ext_p->uctx_lock ); + cl_qlist_remove_item( &ext_p->uctx_list, &p_ucontext->list_item ); + cl_atomic_dec(&ext_p->usecnt); + cl_spinlock_release( &ext_p->uctx_lock ); + if( !p_ucontext->pd ) + cl_free( h_um_ca ); + else + ibv_um_close(p_ucontext); + pa_cash_print(); + return; +} + + +/* +* Protection Domain and Reliable Datagram Domain Verbs +*/ + +ib_api_status_t +mlnx_allocate_pd ( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_type_t type, + OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + struct ib_device *ib_dev; + struct ib_ucontext *p_context; + struct ib_pd *ib_pd_p; + int err; + + //TODO: how are we use it ? + UNREFERENCED_PARAMETER(type); + + HCA_ENTER(HCA_DBG_PD); + + if( p_umv_buf ) { + p_context = (struct ib_ucontext *)h_ca; + ib_dev = p_context->device; + } + else { + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + p_context = NULL; + ib_dev = IBDEV_FROM_HOB( hob_p ); + } + + // create PD + ib_pd_p = ibv_alloc_pd(ib_dev, p_context, p_umv_buf); + if (IS_ERR(ib_pd_p)) { + err = PTR_ERR(ib_pd_p); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD, + ("ibv_alloc_pd failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_pd; + } + + // return the result + if (ph_pd) *ph_pd = (ib_pd_handle_t)ib_pd_p; + + status = IB_SUCCESS; + +err_alloc_pd: + if (p_umv_buf && p_umv_buf->command) + p_umv_buf->status = status; + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_PD); + return status; +} + +ib_api_status_t +mlnx_deallocate_pd ( + IN ib_pd_handle_t h_pd) +{ + ib_api_status_t status; + int err; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + PREP_IBDEV_FOR_PRINT(ib_pd_p->device); + + HCA_ENTER( HCA_DBG_PD); + + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_PD, + ("pcs %p\n", PsGetCurrentProcess())); + + // dealloc pd + err = ibv_dealloc_pd( ib_pd_p ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_PD + ,("ibv_dealloc_pd failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_dealloc_pd; + } + status = IB_SUCCESS; + +err_dealloc_pd: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_PD + ,("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_PD); + return status; +} + +/* +* Address Vector Management Verbs +*/ +ib_api_status_t +mlnx_create_av ( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t *p_addr_vector, + OUT ib_av_handle_t *ph_av, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + int err = 0; + ib_api_status_t status = IB_SUCCESS; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + struct ib_device *ib_dev = ib_pd_p->device; + struct ib_ah *ib_av_p; + struct ib_ah_attr ah_attr; + struct ib_ucontext *p_context = NULL; + + HCA_ENTER(HCA_DBG_AV); + + if( p_umv_buf && p_umv_buf->command ) { + // sanity checks + if (p_umv_buf->input_size < sizeof(struct ibv_create_ah) || + p_umv_buf->output_size < sizeof(struct ibv_create_ah_resp) || + !p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto err_inval_params; + } + p_context = ib_pd_p->ucontext; + } + else + p_context = NULL; + + // fill parameters + RtlZeroMemory(&ah_attr, sizeof(ah_attr)); + mlnx_conv_ibal_av( ib_dev, p_addr_vector, &ah_attr ); + + ib_av_p = ibv_create_ah(ib_pd_p, &ah_attr, p_context, p_umv_buf); + if (IS_ERR(ib_av_p)) { + err = PTR_ERR(ib_av_p); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_AV, + ("ibv_create_ah failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_av; + } + + // return the result + if (ph_av) *ph_av = (ib_av_handle_t)ib_av_p; + + status = IB_SUCCESS; + +err_alloc_av: +err_inval_params: + if (p_umv_buf && p_umv_buf->command) + p_umv_buf->status = status; + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_AV); + return status; +} + +ib_api_status_t +mlnx_query_av ( + IN const ib_av_handle_t h_av, + OUT ib_av_attr_t *p_addr_vector, + OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_ah *ib_ah_p = (struct ib_ah *)h_av; + PREP_IBDEV_FOR_PRINT(ib_ah_p->device); + + HCA_ENTER(HCA_DBG_AV); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV, + ("User mode is not supported yet\n")); + status = IB_UNSUPPORTED; + goto err_user_unsupported; + } + + // query AV +#ifdef WIN_TO_BE_CHANGED + //TODO: not implemented in low-level driver + err = ibv_query_ah(ib_ah_p, &ah_attr) + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_AV, + ("ibv_query_ah failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_query_ah; + } + // convert to IBAL structure: something like that + mlnx_conv_mthca_av( p_addr_vector, &ah_attr ); +#else + + err = mlnx_conv_mthca_av( ib_ah_p, p_addr_vector ); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV, + ("mlnx_conv_mthca_av failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_conv_mthca_av; + } +#endif + + // results + *ph_pd = (ib_pd_handle_t)ib_ah_p->pd; + +err_conv_mthca_av: +err_user_unsupported: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_AV); + return status; +} + +ib_api_status_t +mlnx_modify_av ( + IN const ib_av_handle_t h_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + struct ib_ah_attr ah_attr; + ib_api_status_t status = IB_SUCCESS; + struct ib_ah *ib_ah_p = (struct ib_ah *)h_av; + struct ib_device *ib_dev = ib_ah_p->pd->device; + + HCA_ENTER(HCA_DBG_AV); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV, + ("User mode is not supported yet\n")); + status = IB_UNSUPPORTED; + goto err_user_unsupported; + } + + // fill parameters + RtlZeroMemory(&ah_attr, sizeof(ah_attr)); + mlnx_conv_ibal_av( ib_dev, p_addr_vector, &ah_attr ); + + // modify AH +#ifdef WIN_TO_BE_CHANGED + //TODO: not implemented in low-level driver + err = ibv_modify_ah(ib_ah_p, &ah_attr) + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR,HCA_DBG_AV, + ("ibv_query_ah failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_query_ah; + } +#else + + mlnx_modify_ah( ib_ah_p, &ah_attr ); +#endif + +err_user_unsupported: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_AV); + return status; +} + +ib_api_status_t +mlnx_destroy_av ( + IN const ib_av_handle_t h_av) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_ah *ib_ah_p = (struct ib_ah *)h_av; + PREP_IBDEV_FOR_PRINT(ib_ah_p->device); + + HCA_ENTER(HCA_DBG_AV); + + // destroy AV + err = ibv_destroy_ah( ib_ah_p ); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_AV, + ("ibv_destroy_ah failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_destroy_ah; + } + +err_destroy_ah: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_AV); + return status; +} + +/* +* Shared Queue Pair Management Verbs +*/ + + +ib_api_status_t +mlnx_create_srq ( + IN const ib_pd_handle_t h_pd, + IN const void *srq_context, + IN const ib_srq_attr_t * const p_srq_attr, + OUT ib_srq_handle_t *ph_srq, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + int err; + ib_api_status_t status; + struct ib_srq *ib_srq_p; + struct mthca_srq *srq_p; + struct ib_srq_init_attr srq_init_attr; + struct ib_ucontext *p_context = NULL; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + struct ib_device *ib_dev = ib_pd_p->device; + mlnx_hob_t *hob_p = HOB_FROM_IBDEV(ib_dev); + + HCA_ENTER(HCA_DBG_SRQ); + + if( p_umv_buf && p_umv_buf->command) { + + // sanity checks + if (p_umv_buf->input_size < sizeof(struct ibv_create_srq) || + p_umv_buf->output_size < sizeof(struct ibv_create_srq_resp) || + !p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto err_inval_params; + } + p_context = ib_pd_p->ucontext; + } + + // prepare the parameters + RtlZeroMemory(&srq_init_attr, sizeof(srq_init_attr)); + srq_init_attr.event_handler = srq_event_handler; + srq_init_attr.srq_context = hob_p; + srq_init_attr.attr = *p_srq_attr; + + // allocate srq + ib_srq_p = ibv_create_srq(ib_pd_p, &srq_init_attr, p_context, p_umv_buf ); + if (IS_ERR(ib_srq_p)) { + err = PTR_ERR(ib_srq_p); + HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SRQ, ("ibv_create_srq failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_create_srq; + } + + // fill the object + srq_p = (struct mthca_srq *)ib_srq_p; + srq_p->srq_context = (void*)srq_context; + + // return the result + if (ph_srq) *ph_srq = (ib_srq_handle_t)srq_p; + + status = IB_SUCCESS; + +err_create_srq: +err_inval_params: + if (p_umv_buf && p_umv_buf->command) + p_umv_buf->status = status; + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_SRQ); + return status; +} + + +ib_api_status_t +mlnx_modify_srq ( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_srq *ib_srq = (struct ib_srq *)h_srq; + struct ib_device *ib_dev = ib_srq->device; + UNUSED_PARAM(p_umv_buf); + UNUSED_PARAM_WOWPP(ib_dev); + + HCA_ENTER(HCA_DBG_SRQ); + + err = ibv_modify_srq(ib_srq, (void*)p_srq_attr, srq_attr_mask); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV, + ("ibv_modify_srq failed (%d)\n", err)); + status = errno_to_iberr(err); + } + + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_SRQ); + return status; +} + +ib_api_status_t +mlnx_query_srq ( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_srq *ib_srq = (struct ib_srq *)h_srq; + struct ib_device *ib_dev = ib_srq->device; + UNUSED_PARAM(p_umv_buf); + UNUSED_PARAM_WOWPP(ib_dev); + + HCA_ENTER(HCA_DBG_SRQ); + + err = ibv_query_srq(ib_srq, p_srq_attr); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV, + ("ibv_query_srq failed (%d)\n", err)); + status = errno_to_iberr(err); + } + + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_SRQ); + return status; +} + +ib_api_status_t +mlnx_destroy_srq ( + IN const ib_srq_handle_t h_srq ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ib_srq *ib_srq = (struct ib_srq *)h_srq; + struct ib_device *ib_dev = ib_srq->device; + UNUSED_PARAM_WOWPP(ib_dev); + + HCA_ENTER(HCA_DBG_SRQ); + + err = ibv_destroy_srq(ib_srq); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR, HCA_DBG_AV, + ("ibv_destroy_srq failed (%d)\n", err)); + status = errno_to_iberr(err); + } + + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SRQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_SRQ); + return status; +} + +/* +* Queue Pair Management Verbs +*/ + + +static ib_api_status_t +_create_qp ( + IN const ib_pd_handle_t h_pd, + IN const uint8_t port_num, + IN const void *qp_context, + IN const ib_qp_create_t *p_create_attr, + OUT ib_qp_attr_t *p_qp_attr, + OUT ib_qp_handle_t *ph_qp, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + int err; + ib_api_status_t status; + struct ib_qp * ib_qp_p; + struct mthca_qp *qp_p; + struct ib_qp_init_attr qp_init_attr; + struct ib_ucontext *p_context = NULL; + struct ib_pd *ib_pd_p = (struct ib_pd *)h_pd; + struct ib_device *ib_dev = ib_pd_p->device; + mlnx_hob_t *hob_p = HOB_FROM_IBDEV(ib_dev); + + HCA_ENTER(HCA_DBG_QP); + + if( p_umv_buf && p_umv_buf->command ) { + // sanity checks + if (p_umv_buf->input_size < sizeof(struct ibv_create_qp) || + p_umv_buf->output_size < sizeof(struct ibv_create_qp_resp) || + !p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto err_inval_params; + } + p_context = ib_pd_p->ucontext; + } + + // prepare the parameters + RtlZeroMemory(&qp_init_attr, sizeof(qp_init_attr)); + qp_init_attr.qp_type = p_create_attr->qp_type; + qp_init_attr.event_handler = qp_event_handler; + qp_init_attr.qp_context = hob_p; + qp_init_attr.recv_cq = (struct ib_cq *)p_create_attr->h_rq_cq; + qp_init_attr.send_cq = (struct ib_cq *)p_create_attr->h_sq_cq; + qp_init_attr.srq = (struct ib_srq *)p_create_attr->h_srq; + qp_init_attr.cap.max_recv_sge = p_create_attr->rq_sge; + qp_init_attr.cap.max_send_sge = p_create_attr->sq_sge; + qp_init_attr.cap.max_recv_wr = p_create_attr->rq_depth; + qp_init_attr.cap.max_send_wr = p_create_attr->sq_depth; + qp_init_attr.sq_sig_type = (p_create_attr->sq_signaled) ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR; + qp_init_attr.port_num = port_num; + + + // create qp + ib_qp_p = ibv_create_qp( ib_pd_p, &qp_init_attr, p_context, p_umv_buf ); + if (IS_ERR(ib_qp_p)) { + err = PTR_ERR(ib_qp_p); + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_QP, + ("ibv_create_qp failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_create_qp; + } + + // fill the object + qp_p = (struct mthca_qp *)ib_qp_p; + qp_p->qp_context = (void*)qp_context; + qp_p->qp_init_attr = qp_init_attr; + + // Query QP to obtain requested attributes + if (p_qp_attr) { + status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf); + if (status != IB_SUCCESS) + goto err_query_qp; + } + + // return the results + if (ph_qp) *ph_qp = (ib_qp_handle_t)ib_qp_p; + + status = IB_SUCCESS; + goto end; + +err_query_qp: + ibv_destroy_qp( ib_qp_p ); +err_create_qp: +err_inval_params: +end: + if (p_umv_buf && p_umv_buf->command) + p_umv_buf->status = status; + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_QP); + return status; +} + +ib_api_status_t +mlnx_create_spl_qp ( + IN const ib_pd_handle_t h_pd, + IN const uint8_t port_num, + IN const void *qp_context, + IN const ib_qp_create_t *p_create_attr, + OUT ib_qp_attr_t *p_qp_attr, + OUT ib_qp_handle_t *ph_qp ) +{ + ib_api_status_t status; + PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device); + + HCA_ENTER(HCA_DBG_SHIM); + + status = _create_qp( h_pd, port_num, + qp_context, p_create_attr, p_qp_attr, ph_qp, NULL ); + + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_QP); + return status; +} + +ib_api_status_t +mlnx_create_qp ( + IN const ib_pd_handle_t h_pd, + IN const void *qp_context, + IN const ib_qp_create_t *p_create_attr, + OUT ib_qp_attr_t *p_qp_attr, + OUT ib_qp_handle_t *ph_qp, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status; + PREP_IBDEV_FOR_PRINT(((struct ib_pd*)h_pd)->device); + + //NB: algorithm of mthca_alloc_sqp() requires port_num + // PRM states, that special pares are created in couples, so + // looks like we can put here port_num = 1 always + uint8_t port_num = 1; + + HCA_ENTER(HCA_DBG_QP); + + status = _create_qp( h_pd, port_num, + qp_context, p_create_attr, p_qp_attr, ph_qp, p_umv_buf ); + + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_QP); + return status; +} + +ib_api_status_t +mlnx_modify_qp ( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t *p_modify_attr, + OUT ib_qp_attr_t *p_qp_attr OPTIONAL, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ) +{ + ib_api_status_t status; + int err; + struct ib_qp_attr qp_attr; + int qp_attr_mask; + struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp; + PREP_IBDEV_FOR_PRINT(ib_qp_p->device); + + HCA_ENTER(HCA_DBG_QP); + + // sanity checks + if( p_umv_buf && p_umv_buf->command ) { + // sanity checks + if (p_umv_buf->output_size < sizeof(struct ibv_modify_qp_resp) || + !p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto err_inval_params; + } + } + + // fill parameters + status = mlnx_conv_qp_modify_attr( ib_qp_p, ib_qp_p->qp_type, + p_modify_attr, &qp_attr, &qp_attr_mask ); + if (status == IB_NOT_DONE) + goto query_qp; + if (status != IB_SUCCESS ) + goto err_mode_unsupported; + + // modify QP + err = ibv_modify_qp(ib_qp_p, &qp_attr, qp_attr_mask); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_QP, + ("ibv_modify_qp failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_modify_qp; + } + + // Query QP to obtain requested attributes +query_qp: + if (p_qp_attr) { + status = mlnx_query_qp ((ib_qp_handle_t)ib_qp_p, p_qp_attr, p_umv_buf); + if (status != IB_SUCCESS) + goto err_query_qp; + } + + if( p_umv_buf && p_umv_buf->command ) + { + struct ibv_modify_qp_resp resp; + resp.attr_mask = qp_attr_mask; + resp.qp_state = qp_attr.qp_state; + err = ib_copy_to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_modify_qp_resp)); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_SHIM ,("ib_copy_to_umv_buf failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_copy; + } + } + + status = IB_SUCCESS; + +err_copy: +err_query_qp: +err_modify_qp: +err_mode_unsupported: +err_inval_params: + if (p_umv_buf && p_umv_buf->command) + p_umv_buf->status = status; + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_QP); + return status; +} + +ib_api_status_t +mlnx_query_qp ( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t *p_qp_attr, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status = IB_SUCCESS; + struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp; + struct mthca_qp *qp_p = (struct mthca_qp *)ib_qp_p; + + UNREFERENCED_PARAMETER(p_umv_buf); + + HCA_ENTER( HCA_DBG_QP); + // sanity checks + + // clean the structure + RtlZeroMemory( p_qp_attr, sizeof *p_qp_attr ); + + // fill the structure + //TODO: this function is to be implemented via ibv_query_qp, which is not supported now + p_qp_attr->h_pd = (ib_pd_handle_t)qp_p->ibqp.pd; + p_qp_attr->qp_type = qp_p->ibqp.qp_type; + p_qp_attr->sq_max_inline = qp_p->qp_init_attr.cap.max_inline_data; + p_qp_attr->sq_depth = qp_p->qp_init_attr.cap.max_send_wr; + p_qp_attr->rq_depth = qp_p->qp_init_attr.cap.max_recv_wr; + p_qp_attr->sq_sge = qp_p->qp_init_attr.cap.max_send_sge; + p_qp_attr->rq_sge = qp_p->qp_init_attr.cap.max_recv_sge; + p_qp_attr->resp_res = qp_p->resp_depth; + p_qp_attr->h_sq_cq = (ib_cq_handle_t)qp_p->ibqp.send_cq; + p_qp_attr->h_rq_cq = (ib_cq_handle_t)qp_p->ibqp.recv_cq; + p_qp_attr->sq_signaled = qp_p->sq_policy == IB_SIGNAL_ALL_WR; + p_qp_attr->state = mlnx_qps_to_ibal( qp_p->state ); + p_qp_attr->num = cl_hton32(qp_p->ibqp.qp_num); + +#ifdef WIN_TO_BE_CHANGED +//TODO: don't know how to fill the following fields without support of query_qp in MTHCA + p_qp_attr->access_ctrl = qp_p-> + p_qp_attr->pkey_index = qp_p-> + p_qp_attr->dest_num = qp_p- + p_qp_attr->init_depth = qp_p- + p_qp_attr->qkey = qp_p- + p_qp_attr->sq_psn = qp_p- + p_qp_attr->rq_psn = qp_p- + p_qp_attr->primary_port = qp_p- + p_qp_attr->alternate_port = qp_p- + p_qp_attr->primary_av = qp_p- + p_qp_attr->alternate_av = qp_p- + p_qp_attr->apm_state = qp_p- +#endif + + status = IB_SUCCESS; + + HCA_EXIT(HCA_DBG_QP); + return status; +} + +ib_api_status_t +mlnx_destroy_qp ( + IN const ib_qp_handle_t h_qp, + IN const uint64_t timewait ) +{ + ib_api_status_t status; + int err; + struct ib_qp *ib_qp_p = (struct ib_qp *)h_qp; + PREP_IBDEV_FOR_PRINT(ib_qp_p->device); + + UNUSED_PARAM( timewait ); + + HCA_ENTER( HCA_DBG_QP); + + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM , + ("qpnum %#x, pcs %p\n", ib_qp_p->qp_num, PsGetCurrentProcess()) ); + + err = ibv_destroy_qp( ib_qp_p ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP, + ("ibv_destroy_qp failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_destroy_qp; + } + + status = IB_SUCCESS; + +err_destroy_qp: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_QP); + return status; +} + +/* +* Completion Queue Managment Verbs. +*/ + +ib_api_status_t +mlnx_create_cq ( + IN const ib_ca_handle_t h_ca, + IN const void *cq_context, + IN OUT uint32_t *p_size, + OUT ib_cq_handle_t *ph_cq, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + int err; + ib_api_status_t status; + struct ib_cq *ib_cq_p; + struct mthca_cq *cq_p; + mlnx_hob_t *hob_p; + struct ib_device *ib_dev; + struct ib_ucontext *p_context; + + HCA_ENTER(HCA_DBG_CQ); + + if( p_umv_buf ) { + + p_context = (struct ib_ucontext *)h_ca; + hob_p = HOB_FROM_IBDEV(p_context->device); + ib_dev = p_context->device; + + // sanity checks + if (p_umv_buf->input_size < sizeof(struct ibv_create_cq) || + p_umv_buf->output_size < sizeof(struct ibv_create_cq_resp) || + !p_umv_buf->p_inout_buf) { + status = IB_INVALID_PARAMETER; + goto err_inval_params; + } + } + else { + hob_p = (mlnx_hob_t *)h_ca; + p_context = NULL; + ib_dev = IBDEV_FROM_HOB( hob_p ); + } + + /* sanity check */ + if (!*p_size || *p_size > (uint32_t)ib_dev->mdev->limits.max_cqes) { + status = IB_INVALID_CQ_SIZE; + goto err_cqe; + } + + // allocate cq + ib_cq_p = ibv_create_cq(ib_dev, + cq_comp_handler, cq_event_handler, + hob_p, *p_size, p_context, p_umv_buf ); + if (IS_ERR(ib_cq_p)) { + err = PTR_ERR(ib_cq_p); + HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_CQ, ("ibv_create_cq failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_create_cq; + } + + // fill the object + cq_p = (struct mthca_cq *)ib_cq_p; + cq_p->cq_context = (void*)cq_context; + + // return the result +// *p_size = *p_size; // return the same value + *p_size = ib_cq_p->cqe; + + if (ph_cq) *ph_cq = (ib_cq_handle_t)cq_p; + + status = IB_SUCCESS; + +err_create_cq: +err_inval_params: +err_cqe: + if (p_umv_buf && p_umv_buf->command) + p_umv_buf->status = status; + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_CQ); + return status; +} + +ib_api_status_t +mlnx_resize_cq ( + IN const ib_cq_handle_t h_cq, + IN OUT uint32_t *p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + UNREFERENCED_PARAMETER(h_cq); + UNREFERENCED_PARAMETER(p_size); + if (p_umv_buf && p_umv_buf->command) { + p_umv_buf->status = IB_UNSUPPORTED; + } + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_CQ,("mlnx_resize_cq not implemented\n")); + return IB_UNSUPPORTED; +} + +ib_api_status_t +mlnx_query_cq ( + IN const ib_cq_handle_t h_cq, + OUT uint32_t *p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + UNREFERENCED_PARAMETER(h_cq); + UNREFERENCED_PARAMETER(p_size); + if (p_umv_buf && p_umv_buf->command) { + p_umv_buf->status = IB_UNSUPPORTED; + } + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ,("mlnx_query_cq not implemented\n")); + return IB_UNSUPPORTED; +} + +ib_api_status_t +mlnx_destroy_cq ( + IN const ib_cq_handle_t h_cq) +{ + + ib_api_status_t status; + int err; + struct ib_cq *ib_cq_p = (struct ib_cq *)h_cq; + PREP_IBDEV_FOR_PRINT(ib_cq_p->device); + + HCA_ENTER( HCA_DBG_QP); + + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ, + ("cqn %#x, pcs %p\n", ((struct mthca_cq*)ib_cq_p)->cqn, PsGetCurrentProcess()) ); + + // destroy CQ + err = ibv_destroy_cq( ib_cq_p ); + if (err) { + HCA_PRINT (TRACE_LEVEL_ERROR ,HCA_DBG_SHIM, + ("ibv_destroy_cq failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_destroy_cq; + } + + status = IB_SUCCESS; + +err_destroy_cq: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_CQ, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_CQ); + return status; +} + + +ib_api_status_t +mlnx_local_mad ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_av_attr_t* p_av_attr, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + mlnx_hob_t *hob_p = (mlnx_hob_t *)h_ca; + struct ib_device *ib_dev = IBDEV_FROM_HOB( hob_p ); + //TODO: do we need use flags (IB_MAD_IGNORE_MKEY, IB_MAD_IGNORE_BKEY) ? + int mad_flags = 0; + struct _ib_wc *wc_p = NULL; + //TODO: do we need use grh ? + struct _ib_grh *grh_p = NULL; + + HCA_ENTER(HCA_DBG_MAD); + + // sanity checks + if (port_num > 2) { + status = IB_INVALID_PARAMETER; + goto err_port_num; + } + + if (p_av_attr){ + wc_p = cl_zalloc(sizeof(struct _ib_wc)); + if(!wc_p){ + status = IB_INSUFFICIENT_MEMORY ; + goto err_wc_alloc; + } + //Copy part of the attributes need to fill the mad extended fields in mellanox devices + wc_p->recv.ud.remote_lid = p_av_attr->dlid; + wc_p->recv.ud.remote_sl = p_av_attr->sl; + wc_p->recv.ud.path_bits = p_av_attr->path_bits; + wc_p->recv.ud.recv_opt = p_av_attr->grh_valid?IB_RECV_OPT_GRH_VALID:0; + + if(wc_p->recv.ud.recv_opt &IB_RECV_OPT_GRH_VALID){ + grh_p = cl_zalloc(sizeof(struct _ib_grh)); + if(!grh_p){ + status = IB_INSUFFICIENT_MEMORY ; + goto err_grh_alloc; + } + cl_memcpy(grh_p, &p_av_attr->grh, sizeof(ib_grh_t)); + } + + + } + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_MAD, + ("MAD: Class %02x, Method %02x, Attr %02x, HopPtr %d, HopCnt %d, \n", + (uint32_t)((ib_smp_t *)p_mad_in)->mgmt_class, + (uint32_t)((ib_smp_t *)p_mad_in)->method, + (uint32_t)((ib_smp_t *)p_mad_in)->attr_id, + (uint32_t)((ib_smp_t *)p_mad_in)->hop_ptr, + (uint32_t)((ib_smp_t *)p_mad_in)->hop_count)); + + + // process mad + + err = mthca_process_mad(ib_dev, mad_flags, (uint8_t)port_num, + wc_p, grh_p, (struct ib_mad*)p_mad_in, (struct ib_mad*)p_mad_out); + if (!err) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_MAD, + ("MAD failed:\n\tClass 0x%x\n\tMethod 0x%x\n\tAttr 0x%x", + p_mad_in->mgmt_class, p_mad_in->method, p_mad_in->attr_id )); + status = IB_ERROR; + goto err_process_mad; + } + + if( (p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR || + p_mad_in->mgmt_class == IB_MCLASS_SUBN_LID) && + p_mad_in->attr_id == IB_MAD_ATTR_PORT_INFO ) + { + ib_port_info_t *p_pi_in, *p_pi_out; + + if( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + p_pi_in = (ib_port_info_t*) + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_in ); + p_pi_out = (ib_port_info_t*) + ib_smp_get_payload_ptr( (ib_smp_t*)p_mad_out ); + } + else + { + p_pi_in = (ib_port_info_t*)(p_mad_in + 1); + p_pi_out = (ib_port_info_t*)(p_mad_out + 1); + } + + /* Work around FW bug 33958 */ + p_pi_out->subnet_timeout &= 0x7F; + if( p_mad_in->method == IB_MAD_METHOD_SET ) + p_pi_out->subnet_timeout |= (p_pi_in->subnet_timeout & 0x80); + } + + /* Modify direction for Direct MAD */ + if ( p_mad_in->mgmt_class == IB_MCLASS_SUBN_DIR ) + p_mad_out->status |= IB_SMP_DIRECTION; + + +err_process_mad: + if(grh_p) + cl_free(grh_p); +err_grh_alloc: + if(wc_p) + cl_free(wc_p); +err_wc_alloc: +err_port_num: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MAD, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_MAD); + return status; +} + + +void +setup_ci_interface( + IN const ib_net64_t ca_guid, + IN const int is_livefish, + IN OUT ci_interface_t *p_interface ) +{ + cl_memclr(p_interface, sizeof(*p_interface)); + + /* Guid of the CA. */ + p_interface->guid = ca_guid; + + /* Version of this interface. */ + p_interface->version = VERBS_VERSION; + + /* UVP name */ + cl_memcpy( p_interface->libname, mlnx_uvp_lib_name, MAX_LIB_NAME); + + HCA_PRINT(TRACE_LEVEL_VERBOSE , HCA_DBG_SHIM ,("UVP filename %s\n", p_interface->libname)); + + /* The real interface. */ + p_interface->open_ca = mlnx_open_ca; + p_interface->query_ca = mlnx_query_ca; + p_interface->close_ca = mlnx_close_ca; + p_interface->um_open_ca = mlnx_um_open; + p_interface->um_close_ca = mlnx_um_close; + + p_interface->allocate_pd = mlnx_allocate_pd; + p_interface->deallocate_pd = mlnx_deallocate_pd; + p_interface->vendor_call = fw_access_ctrl; + + if (is_livefish) { + mlnx_memory_if_livefish(p_interface); + } + else { + p_interface->modify_ca = mlnx_modify_ca; + + p_interface->create_av = mlnx_create_av; + p_interface->query_av = mlnx_query_av; + p_interface->modify_av = mlnx_modify_av; + p_interface->destroy_av = mlnx_destroy_av; + + p_interface->create_srq = mlnx_create_srq; + p_interface->modify_srq = mlnx_modify_srq; + p_interface->query_srq = mlnx_query_srq; + p_interface->destroy_srq = mlnx_destroy_srq; + + p_interface->create_qp = mlnx_create_qp; + p_interface->create_spl_qp = mlnx_create_spl_qp; + p_interface->modify_qp = mlnx_modify_qp; + p_interface->query_qp = mlnx_query_qp; + p_interface->destroy_qp = mlnx_destroy_qp; + + p_interface->create_cq = mlnx_create_cq; + p_interface->resize_cq = mlnx_resize_cq; + p_interface->query_cq = mlnx_query_cq; + p_interface->destroy_cq = mlnx_destroy_cq; + + p_interface->local_mad = mlnx_local_mad; + + + mlnx_memory_if(p_interface); + mlnx_direct_if(p_interface); + mlnx_mcast_if(p_interface); + } + + return; +} + diff --git a/branches/Ndi/hw/mthca/kernel/ib_cache.h b/branches/Ndi/hw/mthca/kernel/ib_cache.h new file mode 100644 index 00000000..debfd1f5 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/ib_cache.h @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _IB_CACHE_H +#define _IB_CACHE_H + +#include + +/** + * ib_get_cached_gid - Returns a cached GID table entry + * @device: The device to query. + * @port_num: The port number of the device to query. + * @index: The index into the cached GID table to query. + * @gid: The GID value found at the specified index. + * + * ib_get_cached_gid() fetches the specified GID table entry stored in + * the local software cache. + */ +int ib_get_cached_gid(struct ib_device *device, + u8 port_num, + int index, + union ib_gid *gid); + +/** + * ib_find_cached_gid - Returns the port number and GID table index where + * a specified GID value occurs. + * @device: The device to query. + * @gid: The GID value to search for. + * @port_num: The port number of the device where the GID value was found. + * @index: The index into the cached GID table where the GID was found. This + * parameter may be NULL. + * + * ib_find_cached_gid() searches for the specified GID value in + * the local software cache. + */ +int ib_find_cached_gid(struct ib_device *device, + union ib_gid *gid, + u8 *port_num, + u16 *index); + +/** + * ib_get_cached_pkey - Returns a cached PKey table entry + * @device: The device to query. + * @port_num: The port number of the device to query. + * @index: The index into the cached PKey table to query. + * @pkey: The PKey value found at the specified index. + * + * ib_get_cached_pkey() fetches the specified PKey table entry stored in + * the local software cache. + */ +int ib_get_cached_pkey(struct ib_device *device_handle, + u8 port_num, + int index, + u16 *pkey); + +/** + * ib_find_cached_pkey - Returns the PKey table index where a specified + * PKey value occurs. + * @device: The device to query. + * @port_num: The port number of the device to search for the PKey. + * @pkey: The PKey value to search for. + * @index: The index into the cached PKey table where the PKey was found. + * + * ib_find_cached_pkey() searches the specified PKey table in + * the local software cache. + */ +int ib_find_cached_pkey(struct ib_device *device, + u8 port_num, + u16 pkey, + u16 *index); + + +int ib_cache_setup(void); +void ib_cache_cleanup(void); + +#endif /* _IB_CACHE_H */ diff --git a/branches/Ndi/hw/mthca/kernel/ib_mad.h b/branches/Ndi/hw/mthca/kernel/ib_mad.h new file mode 100644 index 00000000..e8a80806 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/ib_mad.h @@ -0,0 +1,579 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined( IB_MAD_H ) +#define IB_MAD_H + +#include + +/* Management base version */ +#define IB_MGMT_BASE_VERSION 1 + +/* Management classes */ +#define IB_MGMT_CLASS_SUBN_LID_ROUTED 0x01 +#define IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE 0x81 +#define IB_MGMT_CLASS_SUBN_ADM 0x03 +#define IB_MGMT_CLASS_PERF_MGMT 0x04 +#define IB_MGMT_CLASS_BM 0x05 +#define IB_MGMT_CLASS_DEVICE_MGMT 0x06 +#define IB_MGMT_CLASS_CM 0x07 +#define IB_MGMT_CLASS_SNMP 0x08 +#define IB_MGMT_CLASS_VENDOR_RANGE2_START 0x30 +#define IB_MGMT_CLASS_VENDOR_RANGE2_END 0x4F + +#define IB_OPENIB_OUI (0x001405) + +/* Management methods */ +#define IB_MGMT_METHOD_GET 0x01 +#define IB_MGMT_METHOD_SET 0x02 +#define IB_MGMT_METHOD_GET_RESP 0x81 +#define IB_MGMT_METHOD_SEND 0x03 +#define IB_MGMT_METHOD_TRAP 0x05 +#define IB_MGMT_METHOD_REPORT 0x06 +#define IB_MGMT_METHOD_REPORT_RESP 0x86 +#define IB_MGMT_METHOD_TRAP_REPRESS 0x07 + +#define IB_MGMT_METHOD_RESP 0x80 + +#define IB_MGMT_MAX_METHODS 128 + +/* RMPP information */ +#define IB_MGMT_RMPP_VERSION 1 + +#define IB_MGMT_RMPP_TYPE_DATA 1 +#define IB_MGMT_RMPP_TYPE_ACK 2 +#define IB_MGMT_RMPP_TYPE_STOP 3 +#define IB_MGMT_RMPP_TYPE_ABORT 4 + +#define IB_MGMT_RMPP_FLAG_ACTIVE 1 +#define IB_MGMT_RMPP_FLAG_FIRST (1<<1) +#define IB_MGMT_RMPP_FLAG_LAST (1<<2) + +#define IB_MGMT_RMPP_NO_RESPTIME 0x1F + +#define IB_MGMT_RMPP_STATUS_SUCCESS 0 +#define IB_MGMT_RMPP_STATUS_RESX 1 +#define IB_MGMT_RMPP_STATUS_ABORT_MIN 118 +#define IB_MGMT_RMPP_STATUS_T2L 118 +#define IB_MGMT_RMPP_STATUS_BAD_LEN 119 +#define IB_MGMT_RMPP_STATUS_BAD_SEG 120 +#define IB_MGMT_RMPP_STATUS_BADT 121 +#define IB_MGMT_RMPP_STATUS_W2S 122 +#define IB_MGMT_RMPP_STATUS_S2B 123 +#define IB_MGMT_RMPP_STATUS_BAD_STATUS 124 +#define IB_MGMT_RMPP_STATUS_UNV 125 +#define IB_MGMT_RMPP_STATUS_TMR 126 +#define IB_MGMT_RMPP_STATUS_UNSPEC 127 +#define IB_MGMT_RMPP_STATUS_ABORT_MAX 127 + +#define IB_QP1_QKEY 0x00000180 /* big endian */ +#define IB_QP_SET_QKEY 0x00000080 /* big endian */ + +struct ib_mad_hdr { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + __be16 class_specific; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; +}; + +struct ib_rmpp_hdr { + u8 rmpp_version; + u8 rmpp_type; + u8 rmpp_rtime_flags; + u8 rmpp_status; + __be32 seg_num; + __be32 paylen_newwin; +}; + +typedef u64 ib_sa_comp_mask; + +#define IB_SA_COMP_MASK(n) ((ib_sa_comp_mask) cl_hton64(1ull << n)) + +/* + * ib_sa_hdr and ib_sa_mad structures must be packed because they have + * 64-bit fields that are only 32-bit aligned. 64-bit architectures will + * lay them out wrong otherwise. (And unfortunately they are sent on + * the wire so we can't change the layout) + */ +#pragma pack(push,1) +struct ib_sa_hdr { + __be64 sm_key; + __be16 attr_offset; + __be16 reserved; + ib_sa_comp_mask comp_mask; +}; +#pragma pack(pop) + +struct ib_mad { + struct ib_mad_hdr mad_hdr; + u8 data[232]; +}; + +struct ib_rmpp_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + u8 data[220]; +}; + +#pragma pack(push,1) +struct ib_sa_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + struct ib_sa_hdr sa_hdr; + u8 data[200]; +}; +#pragma pack(pop) + +struct ib_vendor_mad { + struct ib_mad_hdr mad_hdr; + struct ib_rmpp_hdr rmpp_hdr; + u8 reserved; + u8 oui[3]; + u8 data[216]; +}; + +/** + * ib_mad_send_buf - MAD data buffer and work request for sends. + * @mad: References an allocated MAD data buffer. The size of the data + * buffer is specified in the @send_wr.length field. + * @mapping: DMA mapping information. + * @mad_agent: MAD agent that allocated the buffer. + * @context: User-controlled context fields. + * @send_wr: An initialized work request structure used when sending the MAD. + * The wr_id field of the work request is initialized to reference this + * data structure. + * @sge: A scatter-gather list referenced by the work request. + * + * Users are responsible for initializing the MAD buffer itself, with the + * exception of specifying the payload length field in any RMPP MAD. + */ +struct ib_mad_send_buf { + struct ib_mad *mad; + dma_addr_t mapping; + struct ib_mad_agent *mad_agent; + void *context[2]; + struct _ib_send_wr send_wr; + struct ib_sge sge; +}; + +/** + * ib_get_rmpp_resptime - Returns the RMPP response time. + * @rmpp_hdr: An RMPP header. + */ +static inline u8 ib_get_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr) +{ + return rmpp_hdr->rmpp_rtime_flags >> 3; +} + +/** + * ib_get_rmpp_flags - Returns the RMPP flags. + * @rmpp_hdr: An RMPP header. + */ +static inline u8 ib_get_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr) +{ + return rmpp_hdr->rmpp_rtime_flags & 0x7; +} + +/** + * ib_set_rmpp_resptime - Sets the response time in an RMPP header. + * @rmpp_hdr: An RMPP header. + * @rtime: The response time to set. + */ +static inline void ib_set_rmpp_resptime(struct ib_rmpp_hdr *rmpp_hdr, u8 rtime) +{ + rmpp_hdr->rmpp_rtime_flags = ib_get_rmpp_flags(rmpp_hdr) | (rtime << 3); +} + +/** + * ib_set_rmpp_flags - Sets the flags in an RMPP header. + * @rmpp_hdr: An RMPP header. + * @flags: The flags to set. + */ +static inline void ib_set_rmpp_flags(struct ib_rmpp_hdr *rmpp_hdr, u8 flags) +{ + rmpp_hdr->rmpp_rtime_flags = (rmpp_hdr->rmpp_rtime_flags & 0xF1) | + (flags & 0x7); +} + +struct ib_mad_agent; +struct ib_mad_send_wc; +struct ib_mad_recv_wc; + +/** + * ib_mad_send_handler - callback handler for a sent MAD. + * @mad_agent: MAD agent that sent the MAD. + * @mad_send_wc: Send work completion information on the sent MAD. + */ +typedef void (*ib_mad_send_handler)(struct ib_mad_agent *mad_agent, + struct ib_mad_send_wc *mad_send_wc); + +/** + * ib_mad_snoop_handler - Callback handler for snooping sent MADs. + * @mad_agent: MAD agent that snooped the MAD. + * @send_wr: Work request information on the sent MAD. + * @mad_send_wc: Work completion information on the sent MAD. Valid + * only for snooping that occurs on a send completion. + * + * Clients snooping MADs should not modify data referenced by the @send_wr + * or @mad_send_wc. + */ +typedef void (*ib_mad_snoop_handler)(struct ib_mad_agent *mad_agent, + struct _ib_send_wr *send_wr, + struct ib_mad_send_wc *mad_send_wc); + +/** + * ib_mad_recv_handler - callback handler for a received MAD. + * @mad_agent: MAD agent requesting the received MAD. + * @mad_recv_wc: Received work completion information on the received MAD. + * + * MADs received in response to a send request operation will be handed to + * the user after the send operation completes. All data buffers given + * to registered agents through this routine are owned by the receiving + * client, except for snooping agents. Clients snooping MADs should not + * modify the data referenced by @mad_recv_wc. + */ +typedef void (*ib_mad_recv_handler)(struct ib_mad_agent *mad_agent, + struct ib_mad_recv_wc *mad_recv_wc); + +/** + * ib_mad_agent - Used to track MAD registration with the access layer. + * @device: Reference to device registration is on. + * @qp: Reference to QP used for sending and receiving MADs. + * @mr: Memory region for system memory usable for DMA. + * @recv_handler: Callback handler for a received MAD. + * @send_handler: Callback handler for a sent MAD. + * @snoop_handler: Callback handler for snooped sent MADs. + * @context: User-specified context associated with this registration. + * @hi_tid: Access layer assigned transaction ID for this client. + * Unsolicited MADs sent by this client will have the upper 32-bits + * of their TID set to this value. + * @port_num: Port number on which QP is registered + * @rmpp_version: If set, indicates the RMPP version used by this agent. + */ +struct ib_mad_agent { + struct ib_device *device; + struct ib_qp *qp; + struct ib_mr *mr; + ib_mad_recv_handler recv_handler; + ib_mad_send_handler send_handler; + ib_mad_snoop_handler snoop_handler; + void *context; + u32 hi_tid; + u8 port_num; + u8 rmpp_version; +}; + +/** + * ib_mad_send_wc - MAD send completion information. + * @wr_id: Work request identifier associated with the send MAD request. + * @status: Completion status. + * @vendor_err: Optional vendor error information returned with a failed + * request. + */ +struct ib_mad_send_wc { + u64 wr_id; + enum ib_wc_status status; + u32 vendor_err; +}; + +/** + * ib_mad_recv_buf - received MAD buffer information. + * @list: Reference to next data buffer for a received RMPP MAD. + * @grh: References a data buffer containing the global route header. + * The data refereced by this buffer is only valid if the GRH is + * valid. + * @mad: References the start of the received MAD. + */ +struct ib_mad_recv_buf { + struct list_head list; + struct ib_grh *grh; + struct ib_mad *mad; +}; + +/** + * ib_mad_recv_wc - received MAD information. + * @wc: Completion information for the received data. + * @recv_buf: Specifies the location of the received data buffer(s). + * @rmpp_list: Specifies a list of RMPP reassembled received MAD buffers. + * @mad_len: The length of the received MAD, without duplicated headers. + * + * For received response, the wr_id field of the wc is set to the wr_id + * for the corresponding send request. + */ +struct ib_mad_recv_wc { + struct _ib_wc *wc; + struct ib_mad_recv_buf recv_buf; + struct list_head rmpp_list; + int mad_len; +}; + +/** + * ib_mad_reg_req - MAD registration request + * @mgmt_class: Indicates which management class of MADs should be receive + * by the caller. This field is only required if the user wishes to + * receive unsolicited MADs, otherwise it should be 0. + * @mgmt_class_version: Indicates which version of MADs for the given + * management class to receive. + * @oui: Indicates IEEE OUI when mgmt_class is a vendor class + * in the range from 0x30 to 0x4f. Otherwise not used. + * @method_mask: The caller will receive unsolicited MADs for any method + * where @method_mask = 1. + */ +struct ib_mad_reg_req { + u8 mgmt_class; + u8 mgmt_class_version; + u8 oui[3]; + DECLARE_BITMAP(method_mask, IB_MGMT_MAX_METHODS); +}; + +/** + * ib_register_mad_agent - Register to send/receive MADs. + * @device: The device to register with. + * @port_num: The port on the specified device to use. + * @qp_type: Specifies which QP to access. Must be either + * IB_QPT_QP0 or IB_QPT_QP1. + * @mad_reg_req: Specifies which unsolicited MADs should be received + * by the caller. This parameter may be NULL if the caller only + * wishes to receive solicited responses. + * @rmpp_version: If set, indicates that the client will send + * and receive MADs that contain the RMPP header for the given version. + * If set to 0, indicates that RMPP is not used by this client. + * @send_handler: The completion callback routine invoked after a send + * request has completed. + * @recv_handler: The completion callback routine invoked for a received + * MAD. + * @context: User specified context associated with the registration. + */ +struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, + u8 port_num, + enum ib_qp_type_t qp_type, + struct ib_mad_reg_req *mad_reg_req, + u8 rmpp_version, + ib_mad_send_handler send_handler, + ib_mad_recv_handler recv_handler, + void *context); + +enum ib_mad_snoop_flags { + /*IB_MAD_SNOOP_POSTED_SENDS = 1,*/ + /*IB_MAD_SNOOP_RMPP_SENDS = (1<<1),*/ + IB_MAD_SNOOP_SEND_COMPLETIONS = (1<<2), + /*IB_MAD_SNOOP_RMPP_SEND_COMPLETIONS = (1<<3),*/ + IB_MAD_SNOOP_RECVS = (1<<4) + /*IB_MAD_SNOOP_RMPP_RECVS = (1<<5),*/ + /*IB_MAD_SNOOP_REDIRECTED_QPS = (1<<6)*/ +}; + +/** + * ib_register_mad_snoop - Register to snoop sent and received MADs. + * @device: The device to register with. + * @port_num: The port on the specified device to use. + * @qp_type: Specifies which QP traffic to snoop. Must be either + * IB_QPT_QP0 or IB_QPT_QP1. + * @mad_snoop_flags: Specifies information where snooping occurs. + * @send_handler: The callback routine invoked for a snooped send. + * @recv_handler: The callback routine invoked for a snooped receive. + * @context: User specified context associated with the registration. + */ +struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, + u8 port_num, + enum ib_qp_type_t qp_type, + int mad_snoop_flags, + ib_mad_snoop_handler snoop_handler, + ib_mad_recv_handler recv_handler, + void *context); + +/** + * ib_unregister_mad_agent - Unregisters a client from using MAD services. + * @mad_agent: Corresponding MAD registration request to deregister. + * + * After invoking this routine, MAD services are no longer usable by the + * client on the associated QP. + */ +int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent); + +/** + * ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated + * with the registered client. + * @mad_agent: Specifies the associated registration to post the send to. + * @send_wr: Specifies the information needed to send the MAD(s). + * @bad_send_wr: Specifies the MAD on which an error was encountered. + * + * Sent MADs are not guaranteed to complete in the order that they were posted. + * + * If the MAD requires RMPP, the data buffer should contain a single copy + * of the common MAD, RMPP, and class specific headers, followed by the class + * defined data. If the class defined data would not divide evenly into + * RMPP segments, then space must be allocated at the end of the referenced + * buffer for any required padding. To indicate the amount of class defined + * data being transferred, the paylen_newwin field in the RMPP header should + * be set to the size of the class specific header plus the amount of class + * defined data being transferred. The paylen_newwin field should be + * specified in network-byte order. + */ +int ib_post_send_mad(struct ib_mad_agent *mad_agent, + struct _ib_send_wr *send_wr, + struct _ib_send_wr **bad_send_wr); + +/** + * ib_coalesce_recv_mad - Coalesces received MAD data into a single buffer. + * @mad_recv_wc: Work completion information for a received MAD. + * @buf: User-provided data buffer to receive the coalesced buffers. The + * referenced buffer should be at least the size of the mad_len specified + * by @mad_recv_wc. + * + * This call copies a chain of received MAD segments into a single data buffer, + * removing duplicated headers. + */ +void ib_coalesce_recv_mad(struct ib_mad_recv_wc *mad_recv_wc, void *buf); + +/** + * ib_free_recv_mad - Returns data buffers used to receive a MAD. + * @mad_recv_wc: Work completion information for a received MAD. + * + * Clients receiving MADs through their ib_mad_recv_handler must call this + * routine to return the work completion buffers to the access layer. + */ +void ib_free_recv_mad(struct ib_mad_recv_wc *mad_recv_wc); + +/** + * ib_cancel_mad - Cancels an outstanding send MAD operation. + * @mad_agent: Specifies the registration associated with sent MAD. + * @wr_id: Indicates the work request identifier of the MAD to cancel. + * + * MADs will be returned to the user through the corresponding + * ib_mad_send_handler. + */ +void ib_cancel_mad(struct ib_mad_agent *mad_agent, u64 wr_id); + +/** + * ib_modify_mad - Modifies an outstanding send MAD operation. + * @mad_agent: Specifies the registration associated with sent MAD. + * @wr_id: Indicates the work request identifier of the MAD to modify. + * @timeout_ms: New timeout value for sent MAD. + * + * This call will reset the timeout value for a sent MAD to the specified + * value. + */ +int ib_modify_mad(struct ib_mad_agent *mad_agent, u64 wr_id, u32 timeout_ms); + +/** + * ib_redirect_mad_qp - Registers a QP for MAD services. + * @qp: Reference to a QP that requires MAD services. + * @rmpp_version: If set, indicates that the client will send + * and receive MADs that contain the RMPP header for the given version. + * If set to 0, indicates that RMPP is not used by this client. + * @send_handler: The completion callback routine invoked after a send + * request has completed. + * @recv_handler: The completion callback routine invoked for a received + * MAD. + * @context: User specified context associated with the registration. + * + * Use of this call allows clients to use MAD services, such as RMPP, + * on user-owned QPs. After calling this routine, users may send + * MADs on the specified QP by calling ib_mad_post_send. + */ +struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp, + u8 rmpp_version, + ib_mad_send_handler send_handler, + ib_mad_recv_handler recv_handler, + void *context); + +/** + * ib_process_mad_wc - Processes a work completion associated with a + * MAD sent or received on a redirected QP. + * @mad_agent: Specifies the registered MAD service using the redirected QP. + * @wc: References a work completion associated with a sent or received + * MAD segment. + * + * This routine is used to complete or continue processing on a MAD request. + * If the work completion is associated with a send operation, calling + * this routine is required to continue an RMPP transfer or to wait for a + * corresponding response, if it is a request. If the work completion is + * associated with a receive operation, calling this routine is required to + * process an inbound or outbound RMPP transfer, or to match a response MAD + * with its corresponding request. + */ +int ib_process_mad_wc(struct ib_mad_agent *mad_agent, + struct _ib_wc *wc); + +/** + * ib_create_send_mad - Allocate and initialize a data buffer and work request + * for sending a MAD. + * @mad_agent: Specifies the registered MAD service to associate with the MAD. + * @remote_qpn: Specifies the QPN of the receiving node. + * @pkey_index: Specifies which PKey the MAD will be sent using. This field + * is valid only if the remote_qpn is QP 1. + * @ah: References the address handle used to transfer to the remote node. + * @rmpp_active: Indicates if the send will enable RMPP. + * @hdr_len: Indicates the size of the data header of the MAD. This length + * should include the common MAD header, RMPP header, plus any class + * specific header. + * @data_len: Indicates the size of any user-transferred data. The call will + * automatically adjust the allocated buffer size to account for any + * additional padding that may be necessary. + * @gfp_mask: GFP mask used for the memory allocation. + * + * This is a helper routine that may be used to allocate a MAD. Users are + * not required to allocate outbound MADs using this call. The returned + * MAD send buffer will reference a data buffer usable for sending a MAD, along + * with an initialized work request structure. Users may modify the returned + * MAD data buffer or work request before posting the send. + * + * The returned data buffer will be cleared. Users are responsible for + * initializing the common MAD and any class specific headers. If @rmpp_active + * is set, the RMPP header will be initialized for sending. + */ +struct ib_mad_send_buf * ib_create_send_mad(struct ib_mad_agent *mad_agent, + u32 remote_qpn, u16 pkey_index, + struct ib_ah *ah, int rmpp_active, + int hdr_len, int data_len, + unsigned int gfp_mask); + +/** + * ib_free_send_mad - Returns data buffers used to send a MAD. + * @send_buf: Previously allocated send data buffer. + */ +void ib_free_send_mad(struct ib_mad_send_buf *send_buf); + +#endif /* IB_MAD_H */ diff --git a/branches/Ndi/hw/mthca/kernel/ib_pack.h b/branches/Ndi/hw/mthca/kernel/ib_pack.h new file mode 100644 index 00000000..deb42e6c --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/ib_pack.h @@ -0,0 +1,245 @@ +/* + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef IB_PACK_H +#define IB_PACK_H + +#include + +enum { + IB_LRH_BYTES = 8, + IB_GRH_BYTES = 40, + IB_BTH_BYTES = 12, + IB_DETH_BYTES = 8 +}; + +struct ib_field { + int struct_offset_bytes; + int struct_size_bytes; + int offset_words; + int offset_bits; + int size_bits; + char *field_name; +}; + +#define RESERVED \ + .field_name = "reserved" + +/* + * This macro cleans up the definitions of constants for BTH opcodes. + * It is used to define constants such as IB_OPCODE_UD_SEND_ONLY, + * which becomes IB_OPCODE_UD + IB_OPCODE_SEND_ONLY, and this gives + * the correct value. + * + * In short, user code should use the constants defined using the + * macro rather than worrying about adding together other constants. +*/ +#define IB_OPCODE(transport, op) \ + IB_OPCODE_ ## transport ## _ ## op = \ + IB_OPCODE_ ## transport + IB_OPCODE_ ## op + +enum { + /* transport types -- just used to define real constants */ + IB_OPCODE_RC = 0x00, + IB_OPCODE_UC = 0x20, + IB_OPCODE_RD = 0x40, + IB_OPCODE_UD = 0x60, + + /* operations -- just used to define real constants */ + IB_OPCODE_SEND_FIRST = 0x00, + IB_OPCODE_SEND_MIDDLE = 0x01, + IB_OPCODE_SEND_LAST = 0x02, + IB_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03, + IB_OPCODE_SEND_ONLY = 0x04, + IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05, + IB_OPCODE_RDMA_WRITE_FIRST = 0x06, + IB_OPCODE_RDMA_WRITE_MIDDLE = 0x07, + IB_OPCODE_RDMA_WRITE_LAST = 0x08, + IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09, + IB_OPCODE_RDMA_WRITE_ONLY = 0x0a, + IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b, + IB_OPCODE_RDMA_READ_REQUEST = 0x0c, + IB_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d, + IB_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e, + IB_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f, + IB_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10, + IB_OPCODE_ACKNOWLEDGE = 0x11, + IB_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12, + IB_OPCODE_COMPARE_SWAP = 0x13, + IB_OPCODE_FETCH_ADD = 0x14, + + /* real constants follow -- see comment about above IB_OPCODE() + macro for more details */ + + /* RC */ + IB_OPCODE(RC, SEND_FIRST), + IB_OPCODE(RC, SEND_MIDDLE), + IB_OPCODE(RC, SEND_LAST), + IB_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE), + IB_OPCODE(RC, SEND_ONLY), + IB_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE), + IB_OPCODE(RC, RDMA_WRITE_FIRST), + IB_OPCODE(RC, RDMA_WRITE_MIDDLE), + IB_OPCODE(RC, RDMA_WRITE_LAST), + IB_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE), + IB_OPCODE(RC, RDMA_WRITE_ONLY), + IB_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE), + IB_OPCODE(RC, RDMA_READ_REQUEST), + IB_OPCODE(RC, RDMA_READ_RESPONSE_FIRST), + IB_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE), + IB_OPCODE(RC, RDMA_READ_RESPONSE_LAST), + IB_OPCODE(RC, RDMA_READ_RESPONSE_ONLY), + IB_OPCODE(RC, ACKNOWLEDGE), + IB_OPCODE(RC, ATOMIC_ACKNOWLEDGE), + IB_OPCODE(RC, COMPARE_SWAP), + IB_OPCODE(RC, FETCH_ADD), + + /* UC */ + IB_OPCODE(UC, SEND_FIRST), + IB_OPCODE(UC, SEND_MIDDLE), + IB_OPCODE(UC, SEND_LAST), + IB_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE), + IB_OPCODE(UC, SEND_ONLY), + IB_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE), + IB_OPCODE(UC, RDMA_WRITE_FIRST), + IB_OPCODE(UC, RDMA_WRITE_MIDDLE), + IB_OPCODE(UC, RDMA_WRITE_LAST), + IB_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE), + IB_OPCODE(UC, RDMA_WRITE_ONLY), + IB_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE), + + /* RD */ + IB_OPCODE(RD, SEND_FIRST), + IB_OPCODE(RD, SEND_MIDDLE), + IB_OPCODE(RD, SEND_LAST), + IB_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE), + IB_OPCODE(RD, SEND_ONLY), + IB_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE), + IB_OPCODE(RD, RDMA_WRITE_FIRST), + IB_OPCODE(RD, RDMA_WRITE_MIDDLE), + IB_OPCODE(RD, RDMA_WRITE_LAST), + IB_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE), + IB_OPCODE(RD, RDMA_WRITE_ONLY), + IB_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE), + IB_OPCODE(RD, RDMA_READ_REQUEST), + IB_OPCODE(RD, RDMA_READ_RESPONSE_FIRST), + IB_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE), + IB_OPCODE(RD, RDMA_READ_RESPONSE_LAST), + IB_OPCODE(RD, RDMA_READ_RESPONSE_ONLY), + IB_OPCODE(RD, ACKNOWLEDGE), + IB_OPCODE(RD, ATOMIC_ACKNOWLEDGE), + IB_OPCODE(RD, COMPARE_SWAP), + IB_OPCODE(RD, FETCH_ADD), + + /* UD */ + IB_OPCODE(UD, SEND_ONLY), + IB_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE) +}; + +enum { + IB_LNH_RAW = 0, + IB_LNH_IP = 1, + IB_LNH_IBA_LOCAL = 2, + IB_LNH_IBA_GLOBAL = 3 +}; + +struct ib_unpacked_lrh { + u8 virtual_lane; + u8 link_version; + u8 service_level; + u8 link_next_header; + __be16 destination_lid; + __be16 packet_length; + __be16 source_lid; +}; + +struct ib_unpacked_grh { + u8 ip_version; + u8 traffic_class; + __be32 flow_label; + __be16 payload_length; + u8 next_header; + u8 hop_limit; + union ib_gid source_gid; + union ib_gid destination_gid; +}; + +struct ib_unpacked_bth { + u8 opcode; + u8 solicited_event; + u8 mig_req; + u8 pad_count; + u8 transport_header_version; + __be16 pkey; + __be32 destination_qpn; + u8 ack_req; + __be32 psn; +}; + +struct ib_unpacked_deth { + __be32 qkey; + __be32 source_qpn; +}; + +struct ib_ud_header { + struct ib_unpacked_lrh lrh; + int grh_present; + struct ib_unpacked_grh grh; + struct ib_unpacked_bth bth; + struct ib_unpacked_deth deth; + int immediate_present; + __be32 immediate_data; +}; + +void ib_pack(const struct ib_field *desc, + int desc_len, + void *structure, + void *buf); + +void ib_unpack(const struct ib_field *desc, + int desc_len, + void *buf, + void *structure); + +void ib_ud_header_init(int payload_bytes, + int grh_present, + struct ib_ud_header *header); + +int ib_ud_header_pack(struct ib_ud_header *header, + void *buf); + +int ib_ud_header_unpack(void *buf, + struct ib_ud_header *header); + +#endif /* IB_PACK_H */ diff --git a/branches/Ndi/hw/mthca/kernel/ib_smi.h b/branches/Ndi/hw/mthca/kernel/ib_smi.h new file mode 100644 index 00000000..8cfe1a2a --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/ib_smi.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined( IB_SMI_H ) +#define IB_SMI_H + +#include + +#define IB_SMP_DATA_SIZE 64 +#define IB_SMP_MAX_PATH_HOPS 64 + +#pragma pack(push,1) +struct ib_smp { + u8 base_version; + u8 mgmt_class; + u8 class_version; + u8 method; + __be16 status; + u8 hop_ptr; + u8 hop_cnt; + __be64 tid; + __be16 attr_id; + __be16 resv; + __be32 attr_mod; + __be64 mkey; + __be16 dr_slid; + __be16 dr_dlid; + u8 reserved[28]; + u8 data[IB_SMP_DATA_SIZE]; + u8 initial_path[IB_SMP_MAX_PATH_HOPS]; + u8 return_path[IB_SMP_MAX_PATH_HOPS]; +}; +#pragma pack(pop) + + +/* Subnet management attributes */ +#define IB_SMP_ATTR_NOTICE cl_hton16(0x0002) +#define IB_SMP_ATTR_NODE_DESC cl_hton16(0x0010) +#define IB_SMP_ATTR_NODE_INFO cl_hton16(0x0011) +#define IB_SMP_ATTR_SWITCH_INFO cl_hton16(0x0012) +#define IB_SMP_ATTR_GUID_INFO cl_hton16(0x0014) +#define IB_SMP_ATTR_PORT_INFO cl_hton16(0x0015) +#define IB_SMP_ATTR_PKEY_TABLE cl_hton16(0x0016) +#define IB_SMP_ATTR_SL_TO_VL_TABLE cl_hton16(0x0017) +#define IB_SMP_ATTR_VL_ARB_TABLE cl_hton16(0x0018) +#define IB_SMP_ATTR_LINEAR_FORWARD_TABLE cl_hton16(0x0019) +#define IB_SMP_ATTR_RANDOM_FORWARD_TABLE cl_hton16(0x001A) +#define IB_SMP_ATTR_MCAST_FORWARD_TABLE cl_hton16(0x001B) +#define IB_SMP_ATTR_SM_INFO cl_hton16(0x0020) +#define IB_SMP_ATTR_VENDOR_DIAG cl_hton16(0x0030) +#define IB_SMP_ATTR_LED_INFO cl_hton16(0x0031) +#define IB_SMP_ATTR_VENDOR_MASK cl_hton16(0xFF00) + +static inline u8 +ib_get_smp_direction(struct ib_smp *smp) +{ + return (u8)((smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION); +} + +#endif /* IB_SMI_H */ diff --git a/branches/Ndi/hw/mthca/kernel/ib_verbs.h b/branches/Ndi/hw/mthca/kernel/ib_verbs.h new file mode 100644 index 00000000..af98d422 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/ib_verbs.h @@ -0,0 +1,1343 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(IB_VERBS_H) +#define IB_VERBS_H + +#include +#include +#include + +union ib_gid { + u8 raw[16]; + struct { + __be64 subnet_prefix; + __be64 interface_id; + } global; +}; + +enum ib_node_type { + IB_NODE_CA = 1, + IB_NODE_SWITCH, + IB_NODE_ROUTER +}; + +enum ib_device_cap_flags { + IB_DEVICE_RESIZE_MAX_WR = 1, + IB_DEVICE_BAD_PKEY_CNTR = (1<<1), + IB_DEVICE_BAD_QKEY_CNTR = (1<<2), + IB_DEVICE_RAW_MULTI = (1<<3), + IB_DEVICE_AUTO_PATH_MIG = (1<<4), + IB_DEVICE_CHANGE_PHY_PORT = (1<<5), + IB_DEVICE_UD_AV_PORT_ENFORCE = (1<<6), + IB_DEVICE_CURR_QP_STATE_MOD = (1<<7), + IB_DEVICE_SHUTDOWN_PORT = (1<<8), + IB_DEVICE_INIT_TYPE = (1<<9), + IB_DEVICE_PORT_ACTIVE_EVENT = (1<<10), + IB_DEVICE_SYS_IMAGE_GUID = (1<<11), + IB_DEVICE_RC_RNR_NAK_GEN = (1<<12), + IB_DEVICE_SRQ_RESIZE = (1<<13), + IB_DEVICE_N_NOTIFY_CQ = (1<<14), +}; + +struct ib_device_attr { + u64 fw_ver; + __be64 sys_image_guid; + u64 max_mr_size; + u64 page_size_cap; + u32 vendor_id; + u32 vendor_part_id; + u32 hw_ver; + int max_qp; + int max_qp_wr; + int device_cap_flags; + int max_sge; + int max_sge_rd; + int max_cq; + int max_cqe; + int max_mr; + int max_pd; + int max_qp_rd_atom; + int max_ee_rd_atom; + int max_res_rd_atom; + int max_qp_init_rd_atom; + int max_ee_init_rd_atom; + enum ib_atomic_cap atomic_cap; + int max_ee; + int max_rdd; + int max_mw; + int max_raw_ipv6_qp; + int max_raw_ethy_qp; + int max_mcast_grp; + int max_mcast_qp_attach; + int max_total_mcast_qp_attach; + int max_ah; + int max_fmr; + int max_map_per_fmr; + int max_srq; + int max_srq_wr; + int max_srq_sge; + u16 max_pkeys; + u8 local_ca_ack_delay; +}; + +static inline int ib_mtu_enum_to_int(int mtu) +{ + switch (mtu) { + case IB_MTU_LEN_256: return 256; + case IB_MTU_LEN_512: return 512; + case IB_MTU_LEN_1024: return 1024; + case IB_MTU_LEN_2048: return 2048; + case IB_MTU_LEN_4096: return 4096; + default: return -1; + } +} + +enum ib_port_state { + IB_PORT_NOP = 0, + IB_PORT_DOWN = 1, + IB_PORT_INIT = 2, + IB_PORT_ARMED = 3, + IB_PORT_ACTIVE = 4, + IB_PORT_ACTIVE_DEFER = 5 +}; + +enum ib_port_cap_flags { + IB_PORT_SM = 1 << 1, + IB_PORT_NOTICE_SUP = 1 << 2, + IB_PORT_TRAP_SUP = 1 << 3, + IB_PORT_OPT_IPD_SUP = 1 << 4, + IB_PORT_AUTO_MIGR_SUP = 1 << 5, + IB_PORT_SL_MAP_SUP = 1 << 6, + IB_PORT_MKEY_NVRAM = 1 << 7, + IB_PORT_PKEY_NVRAM = 1 << 8, + IB_PORT_LED_INFO_SUP = 1 << 9, + IB_PORT_SM_DISABLED = 1 << 10, + IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11, + IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12, + IB_PORT_CM_SUP = 1 << 16, + IB_PORT_SNMP_TUNNEL_SUP = 1 << 17, + IB_PORT_REINIT_SUP = 1 << 18, + IB_PORT_DEVICE_MGMT_SUP = 1 << 19, + IB_PORT_VENDOR_CLASS_SUP = 1 << 20, + IB_PORT_DR_NOTICE_SUP = 1 << 21, + IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22, + IB_PORT_BOOT_MGMT_SUP = 1 << 23, + IB_PORT_LINK_LATENCY_SUP = 1 << 24, + IB_PORT_CLIENT_REG_SUP = 1 << 25 +}; + +enum ib_port_width { + IB_WIDTH_1X = 1, + IB_WIDTH_4X = 2, + IB_WIDTH_8X = 4, + IB_WIDTH_12X = 8 +}; + +static inline int ib_width_enum_to_int(enum ib_port_width width) +{ + switch (width) { + case IB_WIDTH_1X: return 1; + case IB_WIDTH_4X: return 4; + case IB_WIDTH_8X: return 8; + case IB_WIDTH_12X: return 12; + default: return -1; + } +} + +struct ib_port_attr { + enum ib_port_state state; + enum ib_mtu max_mtu; + enum ib_mtu active_mtu; + int gid_tbl_len; + u32 port_cap_flags; + u32 max_msg_sz; + u32 bad_pkey_cntr; + u32 qkey_viol_cntr; + u16 pkey_tbl_len; + u16 lid; + u16 sm_lid; + u8 lmc; + u8 max_vl_num; + u8 sm_sl; + u8 subnet_timeout; + u8 init_type_reply; + u8 active_width; + u8 active_speed; + u8 phys_state; +}; + +enum ib_device_modify_flags { + IB_DEVICE_MODIFY_SYS_IMAGE_GUID = 1 +}; + +struct ib_device_modify { + u64 sys_image_guid; +}; + +enum ib_port_modify_flags { + IB_PORT_SHUTDOWN = 1, + IB_PORT_INIT_TYPE = (1<<2), + IB_PORT_RESET_QKEY_CNTR = (1<<3) +}; + +struct ib_port_modify { + u32 set_port_cap_mask; + u32 clr_port_cap_mask; + u8 init_type; +}; + +enum ib_event_type { + IB_EVENT_CQ_ERR = IB_AE_CQ_ERROR, + IB_EVENT_QP_FATAL = IB_AE_QP_FATAL, + IB_EVENT_QP_REQ_ERR = IB_AE_WQ_REQ_ERROR, + IB_EVENT_QP_ACCESS_ERR = IB_AE_WQ_ACCESS_ERROR, + IB_EVENT_COMM_EST = IB_AE_QP_COMM, + IB_EVENT_SQ_DRAINED = IB_AE_SQ_DRAINED, + IB_EVENT_PATH_MIG = IB_AE_QP_APM, + IB_EVENT_PATH_MIG_ERR = IB_AE_QP_APM_ERROR, + IB_EVENT_DEVICE_FATAL = IB_AE_LOCAL_FATAL, + IB_EVENT_PORT_ACTIVE = IB_AE_PORT_ACTIVE, + IB_EVENT_PORT_ERR = IB_AE_PORT_DOWN, + IB_EVENT_SRQ_LIMIT_REACHED = IB_AE_SRQ_LIMIT_REACHED, + IB_EVENT_SRQ_CATAS_ERROR = IB_AE_SRQ_CATAS_ERROR, + IB_EVENT_SRQ_QP_LAST_WQE_REACHED = IB_AE_SRQ_QP_LAST_WQE_REACHED, + IB_EVENT_LID_CHANGE = IB_AE_UNKNOWN + 1, + IB_EVENT_PKEY_CHANGE, + IB_EVENT_SM_CHANGE +}; + +struct ib_event { + struct ib_device *device; + union { + struct ib_cq *cq; + struct ib_qp *qp; + struct ib_srq *srq; + u8 port_num; + } element; + enum ib_event_type event; + uint64_t vendor_specific; +}; + +struct ib_event_handler { + struct ib_device *device; + void (*handler)(struct ib_event_handler *, struct ib_event *); + struct list_head list; +}; + +#define INIT_IB_EVENT_HANDLER(_ptr, _device, _handler) \ + (_ptr)->device = _device; \ + (_ptr)->handler = _handler; \ + INIT_LIST_HEAD(&(_ptr)->list) + +struct ib_global_route { + union ib_gid dgid; + u32 flow_label; + u8 sgid_index; + u8 hop_limit; + u8 traffic_class; +}; + +struct ib_grh { + __be32 version_tclass_flow; + __be16 paylen; + u8 next_hdr; + u8 hop_limit; + union ib_gid sgid; + union ib_gid dgid; +}; + +enum { + IB_MULTICAST_QPN = 0xffffff +}; + +enum ib_ah_flags { + IB_AH_GRH = 1 +}; + +struct ib_ah_attr { + struct ib_global_route grh; + u16 dlid; + u8 sl; + u8 src_path_bits; + u8 static_rate; + u8 ah_flags; + u8 port_num; +}; + +#ifdef WIN_TO_BE_REMOVE +//define in ib_types.h +enum ib_wc_status { + IB_WC_SUCCESS, + IB_WC_LOC_LEN_ERR, + IB_WC_LOC_QP_OP_ERR, + IB_WC_LOC_EEC_OP_ERR, + IB_WC_LOC_PROT_ERR, + IB_WC_WR_FLUSH_ERR, + IB_WC_MW_BIND_ERR, + IB_WC_BAD_RESP_ERR, + IB_WC_LOC_ACCESS_ERR, + IB_WC_REM_INV_REQ_ERR, + IB_WC_REM_ACCESS_ERR, + IB_WC_REM_OP_ERR, + IB_WC_RETRY_EXC_ERR, + IB_WC_RNR_RETRY_EXC_ERR, + IB_WC_LOC_RDD_VIOL_ERR, + IB_WC_REM_INV_RD_REQ_ERR, + IB_WC_REM_ABORT_ERR, + IB_WC_INV_EECN_ERR, + IB_WC_INV_EEC_STATE_ERR, + IB_WC_FATAL_ERR, + IB_WC_RESP_TIMEOUT_ERR, + IB_WC_GENERAL_ERR +}; +#endif + +enum ib_cq_notify { + IB_CQ_SOLICITED, + IB_CQ_NEXT_COMP +}; + +struct ib_srq_init_attr { + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + ib_srq_attr_t attr; +}; + +struct ib_qp_cap { + u32 max_send_wr; + u32 max_recv_wr; + u32 max_send_sge; + u32 max_recv_sge; + u32 max_inline_data; +}; + +enum ib_sig_type { + IB_SIGNAL_ALL_WR, + IB_SIGNAL_REQ_WR +}; + +struct ib_qp_init_attr { + void (*event_handler)(struct ib_event *, void *); + void *qp_context; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + struct ib_srq *srq; + struct ib_qp_cap cap; + enum ib_sig_type sq_sig_type; + enum ib_qp_type_t qp_type; + u8 port_num; /* special QP types only */ +}; + +enum ib_rnr_timeout { + IB_RNR_TIMER_655_36 = 0, + IB_RNR_TIMER_000_01 = 1, + IB_RNR_TIMER_000_02 = 2, + IB_RNR_TIMER_000_03 = 3, + IB_RNR_TIMER_000_04 = 4, + IB_RNR_TIMER_000_06 = 5, + IB_RNR_TIMER_000_08 = 6, + IB_RNR_TIMER_000_12 = 7, + IB_RNR_TIMER_000_16 = 8, + IB_RNR_TIMER_000_24 = 9, + IB_RNR_TIMER_000_32 = 10, + IB_RNR_TIMER_000_48 = 11, + IB_RNR_TIMER_000_64 = 12, + IB_RNR_TIMER_000_96 = 13, + IB_RNR_TIMER_001_28 = 14, + IB_RNR_TIMER_001_92 = 15, + IB_RNR_TIMER_002_56 = 16, + IB_RNR_TIMER_003_84 = 17, + IB_RNR_TIMER_005_12 = 18, + IB_RNR_TIMER_007_68 = 19, + IB_RNR_TIMER_010_24 = 20, + IB_RNR_TIMER_015_36 = 21, + IB_RNR_TIMER_020_48 = 22, + IB_RNR_TIMER_030_72 = 23, + IB_RNR_TIMER_040_96 = 24, + IB_RNR_TIMER_061_44 = 25, + IB_RNR_TIMER_081_92 = 26, + IB_RNR_TIMER_122_88 = 27, + IB_RNR_TIMER_163_84 = 28, + IB_RNR_TIMER_245_76 = 29, + IB_RNR_TIMER_327_68 = 30, + IB_RNR_TIMER_491_52 = 31 +}; + +enum ib_qp_attr_mask { + IB_QP_STATE = 1, + IB_QP_CUR_STATE = (1<<1), + IB_QP_EN_SQD_ASYNC_NOTIFY = (1<<2), + IB_QP_ACCESS_FLAGS = (1<<3), + IB_QP_PKEY_INDEX = (1<<4), + IB_QP_PORT = (1<<5), + IB_QP_QKEY = (1<<6), + IB_QP_AV = (1<<7), + IB_QP_PATH_MTU = (1<<8), + IB_QP_TIMEOUT = (1<<9), + IB_QP_RETRY_CNT = (1<<10), + IB_QP_RNR_RETRY = (1<<11), + IB_QP_RQ_PSN = (1<<12), + IB_QP_MAX_QP_RD_ATOMIC = (1<<13), + IB_QP_ALT_PATH = (1<<14), + IB_QP_MIN_RNR_TIMER = (1<<15), + IB_QP_SQ_PSN = (1<<16), + IB_QP_MAX_DEST_RD_ATOMIC = (1<<17), + IB_QP_PATH_MIG_STATE = (1<<18), + IB_QP_CAP = (1<<19), + IB_QP_DEST_QPN = (1<<20) +}; + +//TODO: these literals are also defined in ib_types.h and have there ANOTHER VALUES !!! +enum ib_qp_state { + IBQPS_RESET, + IBQPS_INIT, + IBQPS_RTR, + IBQPS_RTS, + IBQPS_SQD, + IBQPS_SQE, + IBQPS_ERR +}; + + +struct ib_qp_attr { + enum ib_qp_state qp_state; + enum ib_qp_state cur_qp_state; + enum ib_mtu path_mtu; + ib_apm_state_t path_mig_state; + u32 qkey; + u32 rq_psn; + u32 sq_psn; + u32 dest_qp_num; + int qp_access_flags; + struct ib_qp_cap cap; + struct ib_ah_attr ah_attr; + struct ib_ah_attr alt_ah_attr; + u16 pkey_index; + u16 alt_pkey_index; + u8 en_sqd_async_notify; + u8 sq_draining; + u8 max_rd_atomic; + u8 max_dest_rd_atomic; + u8 min_rnr_timer; + u8 port_num; + u8 timeout; + u8 retry_cnt; + u8 rnr_retry; + u8 alt_port_num; + u8 alt_timeout; +}; + +struct ib_sge { + u64 addr; + u32 length; + u32 lkey; +}; + + +typedef enum MTHCA_QP_ACCESS_FLAGS { + MTHCA_ACCESS_LOCAL_WRITE = 1, + MTHCA_ACCESS_REMOTE_WRITE = (1<<1), + MTHCA_ACCESS_REMOTE_READ = (1<<2), + MTHCA_ACCESS_REMOTE_ATOMIC = (1<<3), + MTHCA_ACCESS_MW_BIND = (1<<4) +} mthca_qp_access_t; + +struct ib_phys_buf { + u64 addr; + u64 size; +}; + +struct ib_mr_attr { + struct ib_pd *pd; + u64 device_virt_addr; + u64 size; + mthca_qp_access_t mr_access_flags; + u32 lkey; + u32 rkey; +}; + +enum ib_mr_rereg_flags { + IB_MR_REREG_TRANS = 1, + IB_MR_REREG_PD = (1<<1), + IB_MR_REREG_ACCESS = (1<<2) +}; + +struct ib_mw_bind { + struct ib_mr *mr; + u64 wr_id; + u64 addr; + u32 length; + int send_flags; + int mw_access_flags; +}; + +struct ib_fmr_attr { + int max_pages; + int max_maps; + u8 page_shift; +}; + +struct ib_ucontext { + struct ib_device *device; + PVOID user_uar; + struct ib_pd *pd; + atomic_t usecnt; /* count all resources */ + ULONG is_removing; + cl_list_item_t list_item; // chain of user contexts + // for tools support + KMUTEX mutex; + PMDL p_mdl; + PVOID va; + int fw_if_open; +}; + +struct ib_uobject { + u64 user_handle; /* handle given to us by userspace */ + struct ib_ucontext *context; /* associated user context */ + struct list_head list; /* link to context's list */ + u32 id; /* index into kernel idr */ +}; + +struct ib_umem { + u64 user_base; + u64 virt_base; + u64 length; + int offset; + int page_size; + int writable; + struct list_head chunk_list; +}; + +#pragma warning( disable : 4200 ) +struct ib_umem_chunk { + struct list_head list; + int nents; + int nmap; + struct scatterlist page_list[0]; +}; +#pragma warning( default : 4200 ) + +#define IB_UMEM_MAX_PAGE_CHUNK \ + ((PAGE_SIZE - offsetof(struct ib_umem_chunk, page_list)) / \ + ((char *) &((struct ib_umem_chunk *) 0)->page_list[1] - \ + (char *) &((struct ib_umem_chunk *) 0)->page_list[0])) + +struct ib_pd { + struct list_head list; /* for chaining AV MRs (for user mode only) */ + struct ib_device *device; + struct ib_ucontext *ucontext; + atomic_t usecnt; /* count all resources */ + KMUTEX mutex; /* for chaining AV MRs (for user mode only) */ +}; + +struct ib_ah { + struct ib_device *device; + struct ib_pd *pd; + struct ib_ucontext *ucontext; +}; + +typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context); + +struct ib_cq { + struct ib_device *device; + struct ib_ucontext *ucontext; + struct ib_mr *ib_mr; + ib_comp_handler comp_handler; + void (*event_handler)(struct ib_event *, void *); + void * cq_context; + int cqe; + atomic_t usecnt; /* count number of work queues */ +}; + +struct ib_srq { + struct ib_device *device; + struct ib_pd *pd; + struct ib_ucontext *ucontext; + struct ib_mr *ib_mr; + void (*event_handler)(struct ib_event *, void *); + void *srq_context; + atomic_t usecnt; /* count number of work queues */ +}; + +struct ib_qp { + struct ib_device *device; + struct ib_pd *pd; + struct ib_cq *send_cq; + struct ib_cq *recv_cq; + struct ib_srq *srq; + struct ib_ucontext *ucontext; + struct ib_mr *ib_mr; + void (*event_handler)(struct ib_event *, void *); + void *qp_context; + u32 qp_num; + enum ib_qp_type_t qp_type; +}; + +struct ib_mr { + struct list_head list; /* for chaining AV MRs (for user mode only) */ + struct ib_device *device; + struct ib_pd *pd; + u32 lkey; + u32 rkey; + atomic_t usecnt; /* count number of MWs */ +}; + +struct ib_mw { + struct ib_device *device; + struct ib_pd *pd; + u32 rkey; +}; + +struct ib_fmr { + struct ib_device *device; + struct ib_pd *pd; + struct list_head list; + u32 lkey; + u32 rkey; +}; + +struct ib_mad; +struct ib_grh; + +enum ib_process_mad_flags { + IB_MAD_IGNORE_MKEY = 1, + IB_MAD_IGNORE_BKEY = 2, + IB_MAD_IGNORE_ALL = IB_MAD_IGNORE_MKEY | IB_MAD_IGNORE_BKEY +}; + +enum ib_mad_result { + IB_MAD_RESULT_FAILURE = 0, /* (!SUCCESS is the important flag) */ + IB_MAD_RESULT_SUCCESS = 1 << 0, /* MAD was successfully processed */ + IB_MAD_RESULT_REPLY = 1 << 1, /* Reply packet needs to be sent */ + IB_MAD_RESULT_CONSUMED = 1 << 2 /* Packet consumed: stop processing */ +}; + +#define IB_DEVICE_NAME_MAX 64 + +struct ib_cache { + rwlock_t lock; + struct ib_event_handler event_handler; + struct ib_pkey_cache **pkey_cache; + struct ib_gid_cache **gid_cache; +}; + +struct mthca_dev; + +struct ib_device { + struct mthca_dev *mdev; + + char name[IB_DEVICE_NAME_MAX]; + + struct list_head event_handler_list; + spinlock_t event_handler_lock; + + struct list_head core_list; + struct list_head client_data_list; + spinlock_t client_data_lock; + + struct ib_cache cache; + + u32 flags; + + int (*query_device)(struct ib_device *device, + struct ib_device_attr *device_attr); + int (*query_port)(struct ib_device *device, + u8 port_num, + struct ib_port_attr *port_attr); + int (*query_gid_chunk)(struct ib_device *device, + u8 port_num, int index, + union ib_gid gid[8]); + int (*query_pkey_chunk)(struct ib_device *device, + u8 port_num, u16 index, u16 pkey[32]); + int (*modify_device)(struct ib_device *device, + int device_modify_mask, + struct ib_device_modify *device_modify); + int (*modify_port)(struct ib_device *device, + u8 port_num, int port_modify_mask, + struct ib_port_modify *port_modify); + struct ib_ucontext * (*alloc_ucontext)(struct ib_device *device, + ci_umv_buf_t* const p_umv_buf); + int (*dealloc_ucontext)(struct ib_ucontext *context); + struct ib_pd * (*alloc_pd)(struct ib_device *device, + struct ib_ucontext *context, + ci_umv_buf_t* const p_umv_buf); + int (*dealloc_pd)(struct ib_pd *pd); + struct ib_ah * (*create_ah)(struct ib_pd *pd, + struct ib_ah_attr *ah_attr); + int (*modify_ah)(struct ib_ah *ah, + struct ib_ah_attr *ah_attr); + int (*query_ah)(struct ib_ah *ah, + struct ib_ah_attr *ah_attr); + int (*destroy_ah)(struct ib_ah *ah); + struct ib_srq * (*create_srq)(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + ci_umv_buf_t* const p_umv_buf); + int (*modify_srq)(struct ib_srq *srq, + ib_srq_attr_t *srq_attr, + ib_srq_attr_mask_t srq_attr_mask); + int (*query_srq)(struct ib_srq *srq, + ib_srq_attr_t *srq_attr); + int (*destroy_srq)(struct ib_srq *srq); + int (*post_srq_recv)(struct ib_srq *srq, + struct _ib_recv_wr *recv_wr, + struct _ib_recv_wr **bad_recv_wr); + struct ib_qp * (*create_qp)(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr, + ci_umv_buf_t* const p_umv_buf); + int (*modify_qp)(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask); + int (*query_qp)(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); + int (*destroy_qp)(struct ib_qp *qp); + int (*post_send)(struct ib_qp *qp, + struct _ib_send_wr *send_wr, + struct _ib_send_wr **bad_send_wr); + int (*post_recv)(struct ib_qp *qp, + struct _ib_recv_wr *recv_wr, + struct _ib_recv_wr **bad_recv_wr); + struct ib_cq * (*create_cq)(struct ib_device *device, int cqe, + struct ib_ucontext *context, + ci_umv_buf_t* const p_umv_buf); + int (*destroy_cq)(struct ib_cq *cq); + int (*resize_cq)(struct ib_cq *cq, int *cqe); + int (*poll_cq)(struct ib_cq *cq, int num_entries, + struct _ib_wc *wc); + int (*peek_cq)(struct ib_cq *cq, int wc_cnt); + int (*req_notify_cq)(struct ib_cq *cq, + enum ib_cq_notify cq_notify); + int (*req_ncomp_notif)(struct ib_cq *cq, + int wc_cnt); + struct ib_mr * (*get_dma_mr)(struct ib_pd *pd, + mthca_qp_access_t mr_access_flags); + struct ib_mr * (*reg_phys_mr)(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + mthca_qp_access_t mr_access_flags, + u64 *iova_start); + struct ib_mr * (*reg_virt_mr)(struct ib_pd *pd, + void* __ptr64 vaddr, uint64_t length, uint64_t hca_va, + mthca_qp_access_t acc, boolean_t um_call); + int (*query_mr)(struct ib_mr *mr, + struct ib_mr_attr *mr_attr); + int (*dereg_mr)(struct ib_mr *mr); + int (*rereg_phys_mr)(struct ib_mr *mr, + int mr_rereg_mask, + struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + mthca_qp_access_t mr_access_flags, + u64 *iova_start); + struct ib_mw * (*alloc_mw)(struct ib_pd *pd); + int (*bind_mw)(struct ib_qp *qp, + struct ib_mw *mw, + struct ib_mw_bind *mw_bind); + int (*dealloc_mw)(struct ib_mw *mw); + struct ib_fmr * (*alloc_fmr)(struct ib_pd *pd, + mthca_qp_access_t mr_access_flags, + struct ib_fmr_attr *fmr_attr); + int (*map_phys_fmr)(struct ib_fmr *fmr, + u64 *page_list, int list_len, + u64 iova); + int (*unmap_fmr)(struct list_head *fmr_list); + int (*dealloc_fmr)(struct ib_fmr *fmr); + int (*attach_mcast)(struct ib_qp *qp, + union ib_gid *gid, + u16 lid); + int (*detach_mcast)(struct ib_qp *qp, + union ib_gid *gid, + u16 lid); + int (*process_mad)(struct ib_device *device, + int process_mad_flags, + u8 port_num, + struct _ib_wc *in_wc, + struct _ib_grh *in_grh, + struct ib_mad *in_mad, + struct ib_mad *out_mad); + + struct list_head port_list; + + u64 uverbs_cmd_mask; + __be64 node_guid; + u8 node_type; + u8 phys_port_cnt; +}; + +struct ib_client { + char *name; + void (*add) (struct ib_device *); + void (*remove)(struct ib_device *); + + struct list_head list; +}; + +struct ib_device *ib_alloc_device(size_t size); +void ib_dealloc_device(struct ib_device *device); + +int ib_register_device (struct ib_device *device); +void ib_unregister_device(struct ib_device *device); + +int ib_register_client (struct ib_client *client); +void ib_unregister_client(struct ib_client *client); + +void *ib_get_client_data(struct ib_device *device, struct ib_client *client); +void ib_set_client_data(struct ib_device *device, struct ib_client *client, + void *data); + +int ib_core_init(void); + +void ib_core_cleanup(void); + +int ib_register_event_handler (struct ib_event_handler *event_handler); +int ib_unregister_event_handler(struct ib_event_handler *event_handler); +void ib_dispatch_event(struct ib_event *event); + +int ib_query_device(struct ib_device *device, + struct ib_device_attr *device_attr); + +int ib_query_port(struct ib_device *device, + u8 port_num, struct ib_port_attr *port_attr); + +int ib_query_gid_chunk(struct ib_device *device, + u8 port_num, int index, union ib_gid gid[8]); + +int ib_query_pkey_chunk(struct ib_device *device, + u8 port_num, u16 index, u16 pkey[32]); + +int ib_modify_device(struct ib_device *device, + int device_modify_mask, + struct ib_device_modify *device_modify); + +int ib_modify_port(struct ib_device *device, + u8 port_num, int port_modify_mask, + struct ib_port_modify *port_modify); + +/** + * ibv_alloc_pd - Allocates an unused protection domain. + * @device: The device on which to allocate the protection domain. + * @context: user process context (for application calls only) + * @p_umv_buf: parameters structure (for application calls only) + * + * A protection domain object provides an association between QPs, shared + * receive queues, address handles, memory regions, and memory windows. + */ +struct ib_pd *ibv_alloc_pd(struct ib_device *device, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); + +/** + * ibv_dealloc_pd - Deallocates a protection domain. + * @pd: The protection domain to deallocate. + */ +int ibv_dealloc_pd(struct ib_pd *pd); + +/** + * ibv_create_ah - Creates an address handle for the given address vector. + * @pd: The protection domain associated with the address handle. + * @ah_attr: The attributes of the address vector. + * @context: user process context (for application calls only) + * @p_umv_buf: parameters structure (for application calls only) + * + * The address handle is used to reference a local or global destination + * in all UD QP post sends. + */ +struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); + +/** + * ibv_create_ah_from_wc - Creates an address handle associated with the + * sender of the specified work completion. + * @pd: The protection domain associated with the address handle. + * @wc: Work completion information associated with a received message. + * @grh: References the received global route header. This parameter is + * ignored unless the work completion indicates that the GRH is valid. + * @port_num: The outbound port number to associate with the address. + * + * The address handle is used to reference a local or global destination + * in all UD QP post sends. + */ +struct ib_ah *ibv_create_ah_from_wc(struct ib_pd *pd, struct _ib_wc *wc, + struct ib_grh *grh, u8 port_num); + +/** + * ibv_modify_ah - Modifies the address vector associated with an address + * handle. + * @ah: The address handle to modify. + * @ah_attr: The new address vector attributes to associate with the + * address handle. + */ +int ibv_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); + +/** + * ibv_query_ah - Queries the address vector associated with an address + * handle. + * @ah: The address handle to query. + * @ah_attr: The address vector attributes associated with the address + * handle. + */ +int ibv_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr); + +/** + * ibv_destroy_ah - Destroys an address handle. + * @ah: The address handle to destroy. + */ +int ibv_destroy_ah(struct ib_ah *ah); + +/** + * ibv_create_srq - Creates a SRQ associated with the specified protection + * domain. + * @pd: The protection domain associated with the SRQ. + * @srq_init_attr: A list of initial attributes required to create the + * SRQ. If SRQ creation succeeds, then the attributes are updated to + * the actual capabilities of the created SRQ. + * @context: user process context (for application calls only) + * @p_umv_buf: parameters structure (for application calls only) + * + * srq_attr->max_wr and srq_attr->max_sge are read the determine the + * requested size of the SRQ, and set to the actual values allocated + * on return. If ibv_create_srq() succeeds, then max_wr and max_sge + * will always be at least as large as the requested values. + */ +struct ib_srq *ibv_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); + + +/** + * ibv_modify_srq - Modifies the attributes for the specified SRQ. + * @srq: The SRQ to modify. + * @srq_attr: On input, specifies the SRQ attributes to modify. On output, + * the current values of selected SRQ attributes are returned. + * @srq_attr_mask: A bit-mask used to specify which attributes of the SRQ + * are being modified. + * + * The mask may contain IB_SRQ_MAX_WR to resize the SRQ and/or + * IB_SRQ_LIMIT to set the SRQ's limit and request notification when + * the number of receives queued drops below the limit. + */ +int ibv_modify_srq(struct ib_srq *srq, + ib_srq_attr_t *srq_attr, + ib_srq_attr_mask_t srq_attr_mask); + +/** + * ibv_query_srq - Returns the attribute list and current values for the + * specified SRQ. + * @srq: The SRQ to query. + * @srq_attr: The attributes of the specified SRQ. + */ +int ibv_query_srq(struct ib_srq *srq, + ib_srq_attr_t *srq_attr); + +/** + * ibv_destroy_srq - Destroys the specified SRQ. + * @srq: The SRQ to destroy. + */ +int ibv_destroy_srq(struct ib_srq *srq); + +/** + * ibv_post_srq_recv - Posts a list of work requests to the specified SRQ. + * @srq: The SRQ to post the work request on. + * @recv_wr: A list of work requests to post on the receive queue. + * @bad_recv_wr: On an immediate failure, this parameter will reference + * the work request that failed to be posted on the QP. + */ +static inline int ibv_post_srq_recv(struct ib_srq *srq, + struct _ib_recv_wr *recv_wr, + struct _ib_recv_wr **bad_recv_wr) +{ + return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr); +} + +/** + * ibv_create_qp - Creates a QP associated with the specified protection + * domain. + * @pd: The protection domain associated with the QP. + * @qp_init_attr: A list of initial attributes required to create the + * QP. If QP creation succeeds, then the attributes are updated to + * the actual capabilities of the created QP. + * @context: user process context (for application calls only) + * @p_umv_buf: parameters structure (for application calls only) + */ +struct ib_qp *ibv_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); + +/** + * ibv_modify_qp - Modifies the attributes for the specified QP and then + * transitions the QP to the given state. + * @qp: The QP to modify. + * @qp_attr: On input, specifies the QP attributes to modify. On output, + * the current values of selected QP attributes are returned. + * @qp_attr_mask: A bit-mask used to specify which attributes of the QP + * are being modified. + */ +int ibv_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask); + +/** + * ibv_query_qp - Returns the attribute list and current values for the + * specified QP. + * @qp: The QP to query. + * @qp_attr: The attributes of the specified QP. + * @qp_attr_mask: A bit-mask used to select specific attributes to query. + * @qp_init_attr: Additional attributes of the selected QP. + * + * The qp_attr_mask may be used to limit the query to gathering only the + * selected attributes. + */ +int ibv_query_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr); + +/** + * ibv_destroy_qp - Destroys the specified QP. + * @qp: The QP to destroy. + */ +int ibv_destroy_qp(struct ib_qp *qp); + +/** + * ib_post_send - Posts a list of work requests to the send queue of + * the specified QP. + * @qp: The QP to post the work request on. + * @send_wr: A list of work requests to post on the send queue. + * @bad_send_wr: On an immediate failure, this parameter will reference + * the work request that failed to be posted on the QP. + */ +static inline int ib_post_send(struct ib_qp *qp, + struct _ib_send_wr *send_wr, + struct _ib_send_wr **bad_send_wr) +{ + return qp->device->post_send(qp, send_wr, bad_send_wr); +} + +/** + * ib_post_recv - Posts a list of work requests to the receive queue of + * the specified QP. + * @qp: The QP to post the work request on. + * @recv_wr: A list of work requests to post on the receive queue. + * @bad_recv_wr: On an immediate failure, this parameter will reference + * the work request that failed to be posted on the QP. + */ +static inline int ib_post_recv(struct ib_qp *qp, + struct _ib_recv_wr *recv_wr, + struct _ib_recv_wr **bad_recv_wr) +{ + return qp->device->post_recv(qp, recv_wr, bad_recv_wr); +} + +/** + * ibv_create_cq - Creates a CQ on the specified device. + * @device: The device on which to create the CQ. + * @comp_handler: A user-specified callback that is invoked when a + * completion event occurs on the CQ. + * @event_handler: A user-specified callback that is invoked when an + * asynchronous event not associated with a completion occurs on the CQ. + * @cq_context: Context associated with the CQ returned to the user via + * the associated completion and event handlers. + * @cqe: The minimum size of the CQ. + * @context: user process context (for application calls only) + * @p_umv_buf: parameters structure (for application calls only) + * + * Users can examine the cq structure to determine the actual CQ size. + */ +struct ib_cq *ibv_create_cq(struct ib_device *device, + ib_comp_handler comp_handler, + void (*event_handler)(struct ib_event *, void *), + void *cq_context, int cqe, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf); + +/** + * ibv_resize_cq - Modifies the capacity of the CQ. + * @cq: The CQ to resize. + * @cqe: The minimum size of the CQ. + * + * Users can examine the cq structure to determine the actual CQ size. + */ +int ibv_resize_cq(struct ib_cq *cq, int cqe); + +/** + * ibv_destroy_cq - Destroys the specified CQ. + * @cq: The CQ to destroy. + */ +int ibv_destroy_cq(struct ib_cq *cq); + +/** + * ib_poll_cq - poll a CQ for completion(s) + * @cq:the CQ being polled + * @num_entries:maximum number of completions to return + * @wc:array of at least @num_entries &struct _ib_wc where completions + * will be returned + * + * Poll a CQ for (possibly multiple) completions. If the return value + * is < 0, an error occurred. If the return value is >= 0, it is the + * number of completions returned. If the return value is + * non-negative and < num_entries, then the CQ was emptied. + */ +static inline int ib_poll_cq(struct ib_cq *cq, int num_entries, + struct _ib_wc *wc) +{ + return cq->device->poll_cq(cq, num_entries, wc); +} + +/** + * ib_peek_cq - Returns the number of unreaped completions currently + * on the specified CQ. + * @cq: The CQ to peek. + * @wc_cnt: A minimum number of unreaped completions to check for. + * + * If the number of unreaped completions is greater than or equal to wc_cnt, + * this function returns wc_cnt, otherwise, it returns the actual number of + * unreaped completions. + */ +int ib_peek_cq(struct ib_cq *cq, int wc_cnt); + +/** + * ib_req_notify_cq - Request completion notification on a CQ. + * @cq: The CQ to generate an event for. + * @cq_notify: If set to %IB_CQ_SOLICITED, completion notification will + * occur on the next solicited event. If set to %IB_CQ_NEXT_COMP, + * notification will occur on the next completion. + */ +static inline int ib_req_notify_cq(struct ib_cq *cq, + enum ib_cq_notify cq_notify) +{ + return cq->device->req_notify_cq(cq, cq_notify); +} + +/** + * ib_req_ncomp_notif - Request completion notification when there are + * at least the specified number of unreaped completions on the CQ. + * @cq: The CQ to generate an event for. + * @wc_cnt: The number of unreaped completions that should be on the + * CQ before an event is generated. + */ +static inline int ib_req_ncomp_notif(struct ib_cq *cq, int wc_cnt) +{ + return cq->device->req_ncomp_notif ? + cq->device->req_ncomp_notif(cq, wc_cnt) : + -ENOSYS; +} + +/** + * ibv_reg_mr - Prepares a virtually addressed memory region for use + * by an HCA. + * @pd: The protection domain associated assigned to the registered region. + * @vaddr: virtual address of the region + * @length: Specifies the size of the region. + * @hca_va: virtual address in HCA + * @mr_access_flags: Specifies the memory access rights. + * @um_call: call from user, when TRUE. + */ +struct ib_mr *ibv_reg_mr(struct ib_pd *pd, + mthca_qp_access_t mr_access_flags, + void* __ptr64 vaddr, + uint64_t length, + uint64_t hca_va, + boolean_t um_call + ); + +/** + * ibv_get_dma_mr - Returns a memory region for system memory that is + * usable for DMA. + * @pd: The protection domain associated with the memory region. + * @mr_access_flags: Specifies the memory access rights. + */ +struct ib_mr *ibv_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t mr_access_flags); + +/** + * ibv_reg_phys_mr - Prepares a virtually addressed memory region for use + * by an HCA. + * @pd: The protection domain associated assigned to the registered region. + * @phys_buf_array: Specifies a list of physical buffers to use in the + * memory region. + * @num_phys_buf: Specifies the size of the phys_buf_array. + * @mr_access_flags: Specifies the memory access rights. + * @iova_start: The offset of the region's starting I/O virtual address. + */ +struct ib_mr *ibv_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + mthca_qp_access_t mr_access_flags, + u64 *iova_start); + +/** + * ibv_rereg_phys_mr - Modifies the attributes of an existing memory region. + * Conceptually, this call performs the functions deregister memory region + * followed by register physical memory region. Where possible, + * resources are reused instead of deallocated and reallocated. + * @mr: The memory region to modify. + * @mr_rereg_mask: A bit-mask used to indicate which of the following + * properties of the memory region are being modified. + * @pd: If %IB_MR_REREG_PD is set in mr_rereg_mask, this field specifies + * the new protection domain to associated with the memory region, + * otherwise, this parameter is ignored. + * @phys_buf_array: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this + * field specifies a list of physical buffers to use in the new + * translation, otherwise, this parameter is ignored. + * @num_phys_buf: If %IB_MR_REREG_TRANS is set in mr_rereg_mask, this + * field specifies the size of the phys_buf_array, otherwise, this + * parameter is ignored. + * @mr_access_flags: If %IB_MR_REREG_ACCESS is set in mr_rereg_mask, this + * field specifies the new memory access rights, otherwise, this + * parameter is ignored. + * @iova_start: The offset of the region's starting I/O virtual address. + */ +int ibv_rereg_phys_mr(struct ib_mr *mr, + int mr_rereg_mask, + struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + mthca_qp_access_t mr_access_flags, + u64 *iova_start); + +/** + * ibv_query_mr - Retrieves information about a specific memory region. + * @mr: The memory region to retrieve information about. + * @mr_attr: The attributes of the specified memory region. + */ +int ibv_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr); + +/** + * ibv_dereg_mr - Deregisters a memory region and removes it from the + * HCA translation table. + * @mr: The memory region to deregister. + */ +int ibv_dereg_mr(struct ib_mr *mr); + +/** + * ibv_alloc_mw - Allocates a memory window. + * @pd: The protection domain associated with the memory window. + */ +struct ib_mw *ibv_alloc_mw(struct ib_pd *pd); + +/** + * ib_bind_mw - Posts a work request to the send queue of the specified + * QP, which binds the memory window to the given address range and + * remote access attributes. + * @qp: QP to post the bind work request on. + * @mw: The memory window to bind. + * @mw_bind: Specifies information about the memory window, including + * its address range, remote access rights, and associated memory region. + */ +static inline int ib_bind_mw(struct ib_qp *qp, + struct ib_mw *mw, + struct ib_mw_bind *mw_bind) +{ + /* XXX reference counting in corresponding MR? */ + return mw->device->bind_mw ? + mw->device->bind_mw(qp, mw, mw_bind) : + -ENOSYS; +} + +/** + * ibv_dealloc_mw - Deallocates a memory window. + * @mw: The memory window to deallocate. + */ +int ibv_dealloc_mw(struct ib_mw *mw); + +/** + * ibv_alloc_fmr - Allocates a unmapped fast memory region. + * @pd: The protection domain associated with the unmapped region. + * @mr_access_flags: Specifies the memory access rights. + * @fmr_attr: Attributes of the unmapped region. + * + * A fast memory region must be mapped before it can be used as part of + * a work request. + */ +struct ib_fmr *ibv_alloc_fmr(struct ib_pd *pd, + mthca_qp_access_t mr_access_flags, + struct ib_fmr_attr *fmr_attr); + +/** + * ib_map_phys_fmr - Maps a list of physical pages to a fast memory region. + * @fmr: The fast memory region to associate with the pages. + * @page_list: An array of physical pages to map to the fast memory region. + * @list_len: The number of pages in page_list. + * @iova: The I/O virtual address to use with the mapped region. + */ +int ibv_map_phys_fmr(struct ib_fmr *fmr, + u64 *page_list, int list_len, + u64 iova); + +/** + * ibv_unmap_fmr - Removes the mapping from a list of fast memory regions. + * @fmr_list: A linked list of fast memory regions to unmap. + */ +int ibv_unmap_fmr(struct list_head *fmr_list); + +/** + * ibv_dealloc_fmr - Deallocates a fast memory region. + * @fmr: The fast memory region to deallocate. + */ +int ibv_dealloc_fmr(struct ib_fmr *fmr); + +/** + * ibv_attach_mcast - Attaches the specified QP to a multicast group. + * @qp: QP to attach to the multicast group. The QP must be type + * IB_QPT_UNRELIABLE_DGRM. + * @gid: Multicast group GID. + * @lid: Multicast group LID in host byte order. + * + * In order to send and receive multicast packets, subnet + * administration must have created the multicast group and configured + * the fabric appropriately. The port associated with the specified + * QP must also be a member of the multicast group. + */ +int ibv_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); + +/** + * ibv_detach_mcast - Detaches the specified QP from a multicast group. + * @qp: QP to detach from the multicast group. + * @gid: Multicast group GID. + * @lid: Multicast group LID in host byte order. + */ +int ibv_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid); + +/** + * ibv_um_close - Releases application. + * @h_um_ca: application context + */ +void ibv_um_close(struct ib_ucontext * h_um_ca); + +#endif /* IB_VERBS_H */ diff --git a/branches/Ndi/hw/mthca/kernel/mt_atomic.h b/branches/Ndi/hw/mthca/kernel/mt_atomic.h new file mode 100644 index 00000000..4d1411e7 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_atomic.h @@ -0,0 +1,49 @@ +#ifndef MT_ATOMIC_H +#define MT_ATOMIC_H + +#include "complib/cl_atomic.h" + +typedef atomic32_t atomic_t; + +#define atomic_inc cl_atomic_inc +#define atomic_dec cl_atomic_dec + +static inline atomic_t atomic_read(atomic_t *pval) +{ + return *pval; +} + +static inline void atomic_set(atomic_t *pval, long val) +{ + *pval = (atomic_t)val; +} + +/** +* atomic_inc_and_test - decrement and test +* pval: pointer of type atomic_t +* +* Atomically increments pval by 1 and +* returns true if the result is 0, or false for all other +* cases. +*/ +static inline int +atomic_inc_and_test(atomic_t *pval) +{ + return cl_atomic_inc(pval) == 0; +} + +/** +* atomic_dec_and_test - decrement and test +* pval: pointer of type atomic_t +* +* Atomically decrements pval by 1 and +* returns true if the result is 0, or false for all other +* cases. +*/ +static inline int +atomic_dec_and_test(atomic_t *pval) +{ + return cl_atomic_dec(pval) == 0; +} + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/mt_bitmap.h b/branches/Ndi/hw/mthca/kernel/mt_bitmap.h new file mode 100644 index 00000000..550528d3 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_bitmap.h @@ -0,0 +1,107 @@ +#ifndef MT_BITMAP_H +#define MT_BITMAP_H + +#include + +// DECLARE_BITMAP +#define DECLARE_BITMAP(name,bits) \ + unsigned long name[BITS_TO_LONGS(bits)] + +/** +* atomic_set_bit - Atomically set a bit in memory +* @nr: the bit to set +* @addr: the address to start counting from +* +* This function is atomic and may not be reordered. See __set_bit() +* if you do not require the atomic guarantees. +* +* Note: there are no guarantees that this function will not be reordered +* on non x86 architectures, so if you are writting portable code, +* make sure not to rely on its reordering guarantees. +* +* Note that @nr may be almost arbitrarily large; this function is not +* restricted to acting on a single-word quantity. +*/ +static inline unsigned long atomic_set_bit(int nr, volatile long * addr) +{ + return InterlockedOr( addr, (1 << nr) ); +} + +/** +* atomic_clear_bit - Clears a bit in memory +* @nr: Bit to clear +* @addr: Address to start counting from +* +* clear_bit() is atomic and may not be reordered. However, it does +* not contain a memory barrier, so if it is used for locking purposes, +* you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() +* in order to ensure changes are visible on other processors. +*/ +static inline unsigned long atomic_clear_bit(int nr, volatile long * addr) +{ + return InterlockedAnd( addr, ~(1 << nr) ); +} + +static inline int set_bit(int nr,long * addr) +{ + addr += nr >> 5; + return atomic_set_bit( nr & 0x1f, (volatile long *)addr ); +} + +static inline int clear_bit(int nr, long * addr) +{ + addr += nr >> 5; + return atomic_clear_bit( nr & 0x1f, (volatile long *)addr ); +} + +static inline int test_bit(int nr, const unsigned long * addr) +{ + int mask; + + addr += nr >> 5; + mask = 1 << (nr & 0x1f); + return ((mask & *addr) != 0); +} + + +/** +* bitmap_zero - clear the bitmap +* @dst: the bitmap address +* @nbits: the bitmap size in bits +* +*/ +static inline void bitmap_zero(unsigned long *dst, int nbits) +{ + if (nbits <= BITS_PER_LONG) + *dst = 0UL; + else { + int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); + RtlZeroMemory(dst, len); + } +} + +#define BITMAP_LAST_WORD_MASK(nbits) \ + ( ((nbits) % BITS_PER_LONG) ? (1UL<<((nbits) % BITS_PER_LONG))-1 : ~0UL ) + +int __bitmap_full(const unsigned long *bitmap, int bits); + +static inline int bitmap_full(const unsigned long *src, int nbits) +{ + if (nbits <= BITS_PER_LONG) + return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_full(src, nbits); +} + +int __bitmap_empty(const unsigned long *bitmap, int bits); + +static inline int bitmap_empty(const unsigned long *src, int nbits) +{ + if (nbits <= BITS_PER_LONG) + return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); + else + return __bitmap_empty(src, nbits); +} + + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/mt_cache.c b/branches/Ndi/hw/mthca/kernel/mt_cache.c new file mode 100644 index 00000000..6dc9ce72 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_cache.c @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_cache.tmh" +#endif +#include + +#include "ib_cache.h" + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, ib_cache_setup) +#pragma alloc_text (PAGE, ib_cache_cleanup) +#endif + + +#pragma warning( disable : 4200) +struct ib_pkey_cache { + int table_len; + u16 table[0]; +}; + +struct ib_gid_cache { + int table_len; + union ib_gid table[0]; +}; +#pragma warning( default : 4200) + +struct ib_update_work { + PIO_WORKITEM work_item; + struct ib_device *device; + u8 port_num; +}; + +int ib_get_cached_gid(struct ib_device *device, + u8 port_num, + int index, + union ib_gid *gid) +{ + struct ib_gid_cache *cache; + int ret = 0; + SPIN_LOCK_PREP(lh); + + // sanity checks + if (port_num < start_port(device) || port_num > end_port(device)) + return -EINVAL; + if (!device->cache.gid_cache) + return -EFAULT; + + read_lock_irqsave(&device->cache.lock, &lh); + + cache = device->cache.gid_cache[port_num - start_port(device)]; + + if (index < 0 || index >= cache->table_len) + ret = -EINVAL; + else + *gid = cache->table[index]; + + read_unlock_irqrestore(&lh); + + return ret; +} + +int ib_find_cached_gid(struct ib_device *device, + union ib_gid *gid, + u8 *port_num, + u16 *index) +{ + struct ib_gid_cache *cache; + int i; + u8 p; + int ret = -ENOENT; + SPIN_LOCK_PREP(lh); + + *port_num = (u8)-1; + if (index) + *index = (u16)-1; + + read_lock_irqsave(&device->cache.lock, &lh); + + for (p = 0; p <= end_port(device) - start_port(device); ++p) { + cache = device->cache.gid_cache[p]; + for (i = 0; i < cache->table_len; ++i) { + if (!memcmp(gid, &cache->table[i], sizeof *gid)) { + *port_num = p + start_port(device); + if (index) + *index = (u16)i; + ret = 0; + goto found; + } + } + } +found: + read_unlock_irqrestore(&lh); + + return ret; +} + +int ib_get_cached_pkey(struct ib_device *device, + u8 port_num, + int index, + u16 *pkey) +{ + struct ib_pkey_cache *cache; + int ret = 0; + SPIN_LOCK_PREP(lh); + + // sanity checks + if (port_num < start_port(device) || port_num > end_port(device)) + return -EINVAL; + if (!device->cache.gid_cache) + return -EFAULT; + + read_lock_irqsave(&device->cache.lock, &lh); + + cache = device->cache.pkey_cache[port_num - start_port(device)]; + + if (index < 0 || index >= cache->table_len) + ret = -EINVAL; + else + *pkey = cache->table[index]; + + read_unlock_irqrestore(&lh); + + return ret; +} + +int ib_find_cached_pkey(struct ib_device *device, + u8 port_num, + u16 pkey, + u16 *index) +{ + struct ib_pkey_cache *cache; + int i; + int ret = -ENOENT; + SPIN_LOCK_PREP(lh); + + if (port_num < start_port(device) || port_num > end_port(device)) + return -EINVAL; + + read_lock_irqsave(&device->cache.lock, &lh); + + cache = device->cache.pkey_cache[port_num - start_port(device)]; + + *index = (u16)-1; + + for (i = 0; i < cache->table_len; ++i) + if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { + *index = (u16)i; + ret = 0; + break; + } + + read_unlock_irqrestore(&lh); + + return ret; +} + +static void ib_cache_update(struct ib_device *device, + u8 port) +{ + struct ib_port_attr *tprops = NULL; + struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; + struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; + int i; + int ret; + SPIN_LOCK_PREP(lh); + + tprops = kmalloc(sizeof *tprops, GFP_KERNEL); + if (!tprops) + return; + + ret = ib_query_port(device, port, tprops); + if (ret) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("ib_query_port failed (%d) for %s, port %d\n", + ret, device->name, port)); + goto err; + } + + pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * + sizeof *pkey_cache->table, GFP_KERNEL); + if (!pkey_cache) + goto err; + + pkey_cache->table_len = tprops->pkey_tbl_len; + + gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len * + sizeof *gid_cache->table, GFP_KERNEL); + if (!gid_cache) + goto err; + + gid_cache->table_len = tprops->gid_tbl_len; + + for (i = 0; i < pkey_cache->table_len; i+=32) { + u16 pkey_chunk[32]; + int size; + ret = ib_query_pkey_chunk(device, port, (u16)i, pkey_chunk); + if (ret) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("ib_query_pkey_chunk failed (%d) for %s (index %d)\n", + ret, device->name, i)); + goto err; + } + size = min(32, pkey_cache->table_len - i); + RtlCopyMemory(pkey_cache->table + i, pkey_chunk, size*sizeof(u16)); + } + + for (i = 0; i < gid_cache->table_len; i+=8) { + union ib_gid gid_chunk[8]; + int size; + ret = ib_query_gid_chunk(device, port, i, gid_chunk); + if (ret) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("ib_query_gid_chunk failed (%d) for %s (index %d)\n", + ret, device->name, i)); + goto err; + } + size = min(8, gid_cache->table_len - i); + RtlCopyMemory(gid_cache->table + i, gid_chunk, size*sizeof(union ib_gid)); + } + + write_lock_irq(&device->cache.lock, &lh); + + old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; + old_gid_cache = device->cache.gid_cache [port - start_port(device)]; + + device->cache.pkey_cache[port - start_port(device)] = pkey_cache; + device->cache.gid_cache [port - start_port(device)] = gid_cache; + + write_unlock_irq(&lh); + + kfree(old_pkey_cache); + kfree(old_gid_cache); + kfree(tprops); + return; + +err: + kfree(pkey_cache); + kfree(gid_cache); + kfree(tprops); +} + +static void ib_cache_task(void *work_ptr) +{ + struct ib_update_work *work = work_ptr; + + ib_cache_update(work->device, work->port_num); +} + +/* leo: wrapper for Linux work_item callback */ +VOID + ib_work_item ( + IN PDEVICE_OBJECT DeviceObject, + IN PVOID Context + ) +{ + struct ib_update_work *work = (struct ib_update_work *)Context; + UNREFERENCED_PARAMETER(DeviceObject); + ib_cache_task(Context); + IoFreeWorkItem(work->work_item); + kfree(Context); +} + +static void ib_cache_event(struct ib_event_handler *handler, + struct ib_event *event) +{ + struct ib_update_work *work; + static int temp_skip = 10; + + if (temp_skip-- <= 0) + return; + + if (event->event == IB_EVENT_PORT_ERR || + event->event == IB_EVENT_PORT_ACTIVE || + event->event == IB_EVENT_LID_CHANGE || + event->event == IB_EVENT_PKEY_CHANGE || + event->event == IB_EVENT_SM_CHANGE) { + work = kmalloc(sizeof *work, GFP_ATOMIC); + //TODO: what will happen on allocation failure ? + if (work) { + work->device = event->device; + work->port_num = event->element.port_num; + + { // schedule a work item to work + // get PDO + PDEVICE_OBJECT pdo = handler->device->mdev->ext->cl_ext.p_self_do; + + // allocate work item + work->work_item = IoAllocateWorkItem(pdo); + if (work->work_item == NULL) { + //TODO: at least - print error. Need to return code, but the function is void + } + else { // schedule the work + IoQueueWorkItem( + work->work_item, + ib_work_item, + DelayedWorkQueue, + work + ); + } + } + + } + } +} + +static void ib_cache_setup_one(struct ib_device *device) +{ + u8 p; + + rwlock_init(&device->cache.lock); + + device->cache.pkey_cache = + kmalloc(sizeof *device->cache.pkey_cache * + (end_port(device) - start_port(device) + 1), GFP_KERNEL); + device->cache.gid_cache = + kmalloc(sizeof *device->cache.gid_cache * + (end_port(device) - start_port(device) + 1), GFP_KERNEL); + + if (!device->cache.pkey_cache || !device->cache.gid_cache) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("Couldn't allocate cache " + "for %s\n", device->name)); + goto err; + } + + for (p = 0; p <= end_port(device) - start_port(device); ++p) { + device->cache.pkey_cache[p] = NULL; + device->cache.gid_cache [p] = NULL; + ib_cache_update(device, p + start_port(device)); + } + + INIT_IB_EVENT_HANDLER(&device->cache.event_handler, + device, ib_cache_event); + if (ib_register_event_handler(&device->cache.event_handler)) + goto err_cache; + + return; + +err_cache: + for (p = 0; p <= end_port(device) - start_port(device); ++p) { + kfree(device->cache.pkey_cache[p]); + kfree(device->cache.gid_cache[p]); + } + +err: + kfree(device->cache.pkey_cache); + kfree(device->cache.gid_cache); +} + +static void ib_cache_cleanup_one(struct ib_device *device) +{ + int p; + + ib_unregister_event_handler(&device->cache.event_handler); + //TODO: how to do that ? + // LINUX: flush_scheduled_work(); + + for (p = 0; p <= end_port(device) - start_port(device); ++p) { + kfree(device->cache.pkey_cache[p]); + kfree(device->cache.gid_cache[p]); + } + + kfree(device->cache.pkey_cache); + kfree(device->cache.gid_cache); +} + +static struct ib_client cache_client = { "cache", ib_cache_setup_one, ib_cache_cleanup_one }; + +int ib_cache_setup(void) +{ + return ib_register_client(&cache_client); +} + +void ib_cache_cleanup(void) +{ + ib_unregister_client(&cache_client); +} + diff --git a/branches/Ndi/hw/mthca/kernel/mt_device.c b/branches/Ndi/hw/mthca/kernel/mt_device.c new file mode 100644 index 00000000..93504583 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_device.c @@ -0,0 +1,567 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "hca_driver.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_device.tmh" +#endif +#include "ib_verbs.h" +#include "ib_cache.h" + +struct ib_client_data { + struct list_head list; + struct ib_client *client; + void * data; +}; + +static LIST_HEAD(device_list); +static LIST_HEAD(client_list); + +/* + * device_mutex protects access to both device_list and client_list. + * There's no real point to using multiple locks or something fancier + * like an rwsem: we always access both lists, and we're always + * modifying one list or the other list. In any case this is not a + * hot path so there's no point in trying to optimize. + */ +KMUTEX device_mutex; + +static int ib_device_check_mandatory(struct ib_device *device) +{ +#define IB_MANDATORY_FUNC(x) { offsetof(struct ib_device, x), #x } + static const struct { + size_t offset; + char *name; + } mandatory_table[] = { + IB_MANDATORY_FUNC(query_device), + IB_MANDATORY_FUNC(query_port), + IB_MANDATORY_FUNC(query_pkey_chunk), + IB_MANDATORY_FUNC(query_gid_chunk), + IB_MANDATORY_FUNC(alloc_pd), + IB_MANDATORY_FUNC(dealloc_pd), + IB_MANDATORY_FUNC(create_ah), + IB_MANDATORY_FUNC(destroy_ah), + IB_MANDATORY_FUNC(create_qp), + IB_MANDATORY_FUNC(modify_qp), + IB_MANDATORY_FUNC(destroy_qp), + IB_MANDATORY_FUNC(post_send), + IB_MANDATORY_FUNC(post_recv), + IB_MANDATORY_FUNC(create_cq), + IB_MANDATORY_FUNC(destroy_cq), + IB_MANDATORY_FUNC(poll_cq), + IB_MANDATORY_FUNC(req_notify_cq), + IB_MANDATORY_FUNC(get_dma_mr), + IB_MANDATORY_FUNC(dereg_mr) + }; + int i; + + for (i = 0; i < sizeof mandatory_table / sizeof mandatory_table[0]; ++i) { + if (!*(void **) ((u8 *) device + mandatory_table[i].offset)) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("Device %s is missing mandatory function %s\n", + device->name, mandatory_table[i].name)); + return -EINVAL; + } + } + + return 0; +} + +static struct ib_device *__ib_device_get_by_name(const char *name) +{ + struct ib_device *device; + + list_for_each_entry(device, &device_list, core_list,struct ib_device) + if (!strncmp(name, device->name, IB_DEVICE_NAME_MAX)) + return device; + + return NULL; +} + +static int __extract_number(char *dest_str, const char *format, int *num) +{ + char *ptr; + UNREFERENCED_PARAMETER(format); + for (ptr = dest_str; *ptr; ptr++) { + if (*ptr >= '0' && *ptr <= '9') { + *num = atoi(ptr); + return 1; + } + } + return 0; +} +static int alloc_name(char *name) +{ + long *inuse; + char buf[IB_DEVICE_NAME_MAX]; + struct ib_device *device; + int i; + + inuse = (long *) get_zeroed_page(GFP_KERNEL); + if (!inuse) + return -ENOMEM; + + list_for_each_entry(device, &device_list, core_list,struct ib_device) { + if (!__extract_number(device->name, name, &i)) + continue; + if (i < 0 || i >= PAGE_SIZE * 8) + continue; + snprintf(buf, sizeof(buf)-1, name, i); + buf[sizeof(buf)-1] = '\0'; + if (!strncmp(buf, device->name, IB_DEVICE_NAME_MAX)) + set_bit(i, inuse); + } + + i = find_first_zero_bit((const unsigned long *)inuse, PAGE_SIZE * 8); + free_page(inuse); + snprintf(buf, sizeof(buf)-1, name, i); + buf[sizeof(buf)-1] = '\0'; + + if (__ib_device_get_by_name(buf)) + return -ENFILE; + + strlcpy(name, buf, IB_DEVICE_NAME_MAX); + return 0; +} + +static int add_client_context(struct ib_device *device, struct ib_client *client) +{ + struct ib_client_data *context; + SPIN_LOCK_PREP(lh); + + context = kmalloc(sizeof *context, GFP_KERNEL); + if (!context) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW,("Couldn't allocate client context for %s/%s\n", + device->name, client->name)); + return -ENOMEM; + } + + context->client = client; + context->data = NULL; + + spin_lock_irqsave(&device->client_data_lock, &lh); + list_add(&context->list, &device->client_data_list); + spin_unlock_irqrestore(&lh); + + return 0; +} + +/** + * ib_register_device - Register an IB device with IB core + * @device:Device to register + * + * Low-level drivers use ib_register_device() to register their + * devices with the IB core. All registered clients will receive a + * callback for each device that is added. @device must be allocated + * with ib_alloc_device(). + */ +int ib_register_device(struct ib_device *device) +{ + int ret = 0; + + down(&device_mutex); + + if (strchr(device->name, '%')) { + ret = alloc_name(device->name); + if (ret) + goto out; + } + + if (ib_device_check_mandatory(device)) { + ret = -EINVAL; + goto out; + } + + INIT_LIST_HEAD(&device->event_handler_list); + INIT_LIST_HEAD(&device->client_data_list); + spin_lock_init(&device->event_handler_lock); + spin_lock_init(&device->client_data_lock); + + list_add_tail(&device->core_list, &device_list); + + { + struct ib_client *client; + + list_for_each_entry(client, &client_list, list,struct ib_client) + if (client->add && !add_client_context(device, client)) + client->add(device); + } + + out: + up(&device_mutex); + return ret; +} + + +/** + * ib_unregister_device - Unregister an IB device + * @device:Device to unregister + * + * Unregister an IB device. All clients will receive a remove callback. + */ +void ib_unregister_device(struct ib_device *device) +{ + struct ib_client *client; + struct ib_client_data *context, *tmp; + SPIN_LOCK_PREP(lh); + + down(&device_mutex); + + list_for_each_entry_reverse(client, &client_list, list,struct ib_client) + if (client->remove) + client->remove(device); + + list_del(&device->core_list); + + up(&device_mutex); + + spin_lock_irqsave(&device->client_data_lock, &lh); + list_for_each_entry_safe(context, tmp, &device->client_data_list, list,struct ib_client_data,struct ib_client_data) + kfree(context); + spin_unlock_irqrestore(&lh); + +} + + +/** + * ib_register_client - Register an IB client + * @client:Client to register + * + * Upper level users of the IB drivers can use ib_register_client() to + * register callbacks for IB device addition and removal. When an IB + * device is added, each registered client's add method will be called + * (in the order the clients were registered), and when a device is + * removed, each client's remove method will be called (in the reverse + * order that clients were registered). In addition, when + * ib_register_client() is called, the client will receive an add + * callback for all devices already registered. + */ +int ib_register_client(struct ib_client *client) +{ + struct ib_device *device; + + down(&device_mutex); + + list_add_tail(&client->list, &client_list); + list_for_each_entry(device, &device_list, core_list,struct ib_device) + if (client->add && !add_client_context(device, client)) + client->add(device); + + up(&device_mutex); + + return 0; +} + + +/** + * ib_unregister_client - Unregister an IB client + * @client:Client to unregister + * + * Upper level users use ib_unregister_client() to remove their client + * registration. When ib_unregister_client() is called, the client + * will receive a remove callback for each IB device still registered. + */ +void ib_unregister_client(struct ib_client *client) +{ + struct ib_client_data *context, *tmp; + struct ib_device *device; + SPIN_LOCK_PREP(lh); + + down(&device_mutex); + + list_for_each_entry(device, &device_list, core_list,struct ib_device) { + if (client->remove) + client->remove(device); + + spin_lock_irqsave(&device->client_data_lock, &lh); + list_for_each_entry_safe(context, tmp, &device->client_data_list, list,struct ib_client_data,struct ib_client_data) + if (context->client == client) { + list_del(&context->list); + kfree(context); + } + spin_unlock_irqrestore(&lh); + } + list_del(&client->list); + + up(&device_mutex); +} + + +/** + * ib_get_client_data - Get IB client context + * @device:Device to get context for + * @client:Client to get context for + * + * ib_get_client_data() returns client context set with + * ib_set_client_data(). + */ +void *ib_get_client_data(struct ib_device *device, struct ib_client *client) +{ + struct ib_client_data *context; + void *ret = NULL; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&device->client_data_lock, &lh); + list_for_each_entry(context, &device->client_data_list, list,struct ib_client_data) + if (context->client == client) { + ret = context->data; + break; + } + spin_unlock_irqrestore(&lh); + + return ret; +} + + +/** + * ib_set_client_data - Get IB client context + * @device:Device to set context for + * @client:Client to set context for + * @data:Context to set + * + * ib_set_client_data() sets client context that can be retrieved with + * ib_get_client_data(). + */ +void ib_set_client_data(struct ib_device *device, struct ib_client *client, + void *data) +{ + struct ib_client_data *context; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&device->client_data_lock, &lh); + list_for_each_entry(context, &device->client_data_list, list,struct ib_client_data) + if (context->client == client) { + context->data = data; + goto out; + } + + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("No client context found for %s/%s\n", + device->name, client->name)); + +out: + spin_unlock_irqrestore(&lh); +} + + +/** + * ib_register_event_handler - Register an IB event handler + * @event_handler:Handler to register + * + * ib_register_event_handler() registers an event handler that will be + * called back when asynchronous IB events occur (as defined in + * chapter 11 of the InfiniBand Architecture Specification). This + * callback may occur in interrupt context. + */ +int ib_register_event_handler (struct ib_event_handler *event_handler) +{ + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&event_handler->device->event_handler_lock, &lh); + list_add_tail(&event_handler->list, + &event_handler->device->event_handler_list); + spin_unlock_irqrestore(&lh); + + return 0; +} + + +/** + * ib_unregister_event_handler - Unregister an event handler + * @event_handler:Handler to unregister + * + * Unregister an event handler registered with + * ib_register_event_handler(). + */ +int ib_unregister_event_handler(struct ib_event_handler *event_handler) +{ + SPIN_LOCK_PREP(lh); + spin_lock_irqsave(&event_handler->device->event_handler_lock, &lh); + list_del(&event_handler->list); + spin_unlock_irqrestore(&lh); + + return 0; +} + + +/** + * ib_dispatch_event - Dispatch an asynchronous event + * @event:Event to dispatch + * + * Low-level drivers must call ib_dispatch_event() to dispatch the + * event to all registered event handlers when an asynchronous event + * occurs. + */ +void ib_dispatch_event(struct ib_event *event) +{ + struct ib_event_handler *handler; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&event->device->event_handler_lock, &lh); + + list_for_each_entry(handler, &event->device->event_handler_list, list,struct ib_event_handler) + handler->handler(handler, event); + + spin_unlock_irqrestore(&lh); +} + + +/** + * ib_query_device - Query IB device attributes + * @device:Device to query + * @device_attr:Device attributes + * + * ib_query_device() returns the attributes of a device through the + * @device_attr pointer. + */ +int ib_query_device(struct ib_device *device, + struct ib_device_attr *device_attr) +{ + return device->query_device(device, device_attr); +} + + +/** + * ib_query_port - Query IB port attributes + * @device:Device to query + * @port_num:Port number to query + * @port_attr:Port attributes + * + * ib_query_port() returns the attributes of a port through the + * @port_attr pointer. + */ +int ib_query_port(struct ib_device *device, + u8 port_num, + struct ib_port_attr *port_attr) +{ + if (port_num < start_port(device) || port_num > end_port(device)) + return -EINVAL; + return device->query_port(device, port_num, port_attr); +} + + +/** + * ib_query_gid_chunk - Get a chunk of GID table entries + * @device:Device to query + * @port_num:Port number to query + * @index:GID table index to query + * @gid:Returned GIDs chunk + * + * ib_query_gid_chunk() fetches the specified GID table enties chunk. + */ +int ib_query_gid_chunk(struct ib_device *device, + u8 port_num, int index, union ib_gid gid[8]) +{ + return device->query_gid_chunk(device, port_num, index, gid); +} + + +/** + * ib_query_pkey_chunk - Get a chunk of P_Key table entries + * @device:Device to query + * @port_num:Port number to query + * @index:P_Key table index to query + * @pkey:Returned P_Keys chunk + * + * ib_query_pkey_chunk() fetches the specified P_Key table entries chunk. + */ +int ib_query_pkey_chunk(struct ib_device *device, + u8 port_num, u16 index, u16 pkey[32]) +{ + return device->query_pkey_chunk(device, port_num, index, pkey); +} + + +/** + * ib_modify_device - Change IB device attributes + * @device:Device to modify + * @device_modify_mask:Mask of attributes to change + * @device_modify:New attribute values + * + * ib_modify_device() changes a device's attributes as specified by + * the @device_modify_mask and @device_modify structure. + */ +int ib_modify_device(struct ib_device *device, + int device_modify_mask, + struct ib_device_modify *device_modify) +{ + return device->modify_device(device, device_modify_mask, + device_modify); +} + + +/** + * ib_modify_port - Modifies the attributes for the specified port. + * @device: The device to modify. + * @port_num: The number of the port to modify. + * @port_modify_mask: Mask used to specify which attributes of the port + * to change. + * @port_modify: New attribute values for the port. + * + * ib_modify_port() changes a port's attributes as specified by the + * @port_modify_mask and @port_modify structure. + */ +int ib_modify_port(struct ib_device *device, + u8 port_num, int port_modify_mask, + struct ib_port_modify *port_modify) +{ + if (port_num < start_port(device) || port_num > end_port(device)) + return -EINVAL; + + return device->modify_port(device, port_num, port_modify_mask, + port_modify); +} + +int ib_core_init(void) +{ + int ret; + + /* leo: added because there is no static init of semaphore in Windows */ + KeInitializeMutex(&device_mutex,0); + + ret = ib_cache_setup(); + if (ret) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Couldn't set up InfiniBand P_Key/GID cache\n")); + } + + return ret; +} + +void ib_core_cleanup(void) +{ + ib_cache_cleanup(); +} + diff --git a/branches/Ndi/hw/mthca/kernel/mt_fmr_pool.c b/branches/Ndi/hw/mthca/kernel/mt_fmr_pool.c new file mode 100644 index 00000000..13ef9a57 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_fmr_pool.c @@ -0,0 +1,511 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include + +#include + +#include "core_priv.h" + +enum { + IB_FMR_MAX_REMAPS = 32, + + IB_FMR_HASH_BITS = 8, + IB_FMR_HASH_SIZE = 1 << IB_FMR_HASH_BITS, + IB_FMR_HASH_MASK = IB_FMR_HASH_SIZE - 1 +}; + +/* + * If an FMR is not in use, then the list member will point to either + * its pool's free_list (if the FMR can be mapped again; that is, + * remap_count < IB_FMR_MAX_REMAPS) or its pool's dirty_list (if the + * FMR needs to be unmapped before being remapped). In either of + * these cases it is a bug if the ref_count is not 0. In other words, + * if ref_count is > 0, then the list member must not be linked into + * either free_list or dirty_list. + * + * The cache_node member is used to link the FMR into a cache bucket + * (if caching is enabled). This is independent of the reference + * count of the FMR. When a valid FMR is released, its ref_count is + * decremented, and if ref_count reaches 0, the FMR is placed in + * either free_list or dirty_list as appropriate. However, it is not + * removed from the cache and may be "revived" if a call to + * ib_fmr_register_physical() occurs before the FMR is remapped. In + * this case we just increment the ref_count and remove the FMR from + * free_list/dirty_list. + * + * Before we remap an FMR from free_list, we remove it from the cache + * (to prevent another user from obtaining a stale FMR). When an FMR + * is released, we add it to the tail of the free list, so that our + * cache eviction policy is "least recently used." + * + * All manipulation of ref_count, list and cache_node is protected by + * pool_lock to maintain consistency. + */ + +struct ib_fmr_pool { + spinlock_t pool_lock; + + int pool_size; + int max_pages; + int dirty_watermark; + int dirty_len; + struct list_head free_list; + struct list_head dirty_list; + struct hlist_head *cache_bucket; + + void (*flush_function)(struct ib_fmr_pool *pool, + void * arg); + void *flush_arg; + + struct task_struct *thread; + + atomic_t req_ser; + atomic_t flush_ser; + + wait_queue_head_t force_wait; +}; + +static inline u32 ib_fmr_hash(u64 first_page) +{ + return jhash_2words((u32) first_page, (u32) (first_page >> 32), 0) & + (IB_FMR_HASH_SIZE - 1); +} + +/* Caller must hold pool_lock */ +static inline struct ib_pool_fmr *ib_fmr_cache_lookup(struct ib_fmr_pool *pool, + u64 *page_list, + int page_list_len, + u64 io_virtual_address) +{ + struct hlist_head *bucket; + struct ib_pool_fmr *fmr; + struct hlist_node *pos; + + if (!pool->cache_bucket) + return NULL; + + bucket = pool->cache_bucket + ib_fmr_hash(*page_list); + + hlist_for_each_entry(fmr, pos, bucket, cache_node) + if (io_virtual_address == fmr->io_virtual_address && + page_list_len == fmr->page_list_len && + !memcmp(page_list, fmr->page_list, + page_list_len * sizeof *page_list)) + return fmr; + + return NULL; +} + +static void ib_fmr_batch_release(struct ib_fmr_pool *pool) +{ + int ret; + struct ib_pool_fmr *fmr; + LIST_HEAD(unmap_list); + LIST_HEAD(fmr_list); + + spin_lock_irq(&pool->pool_lock); + + list_for_each_entry(fmr, &pool->dirty_list, list) { + hlist_del_init(&fmr->cache_node); + fmr->remap_count = 0; + list_add_tail(&fmr->fmr->list, &fmr_list); + +#ifdef DEBUG + if (fmr->ref_count !=0) { + printk(KERN_WARNING "Unmapping FMR 0x%08x with ref count %d", + fmr, fmr->ref_count); + } +#endif + } + + list_splice(&pool->dirty_list, &unmap_list); + INIT_LIST_HEAD(&pool->dirty_list); + pool->dirty_len = 0; + + spin_unlock_irq(&pool->pool_lock); + + if (list_empty(&unmap_list)) { + return; + } + + ret = ib_unmap_fmr(&fmr_list); + if (ret) + printk(KERN_WARNING "ib_unmap_fmr returned %d", ret); + + spin_lock_irq(&pool->pool_lock); + list_splice(&unmap_list, &pool->free_list); + spin_unlock_irq(&pool->pool_lock); +} + +static int ib_fmr_cleanup_thread(void *pool_ptr) +{ + struct ib_fmr_pool *pool = pool_ptr; + + do { + if (pool->dirty_len >= pool->dirty_watermark || + atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) { + ib_fmr_batch_release(pool); + + atomic_inc(&pool->flush_ser); + wake_up_interruptible(&pool->force_wait); + + if (pool->flush_function) + pool->flush_function(pool, pool->flush_arg); + } + + set_current_state(TASK_INTERRUPTIBLE); + if (pool->dirty_len < pool->dirty_watermark && + atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 && + !kthread_should_stop()) + schedule(); + __set_current_state(TASK_RUNNING); + } while (!kthread_should_stop()); + + return 0; +} + +/** + * ib_create_fmr_pool - Create an FMR pool + * @pd:Protection domain for FMRs + * @params:FMR pool parameters + * + * Create a pool of FMRs. Return value is pointer to new pool or + * error code if creation failed. + */ +struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, + struct ib_fmr_pool_param *params) +{ + struct ib_device *device; + struct ib_fmr_pool *pool; + int i; + int ret; + + if (!params) + return ERR_PTR(-EINVAL); + + device = pd->device; + if (!device->alloc_fmr || !device->dealloc_fmr || + !device->map_phys_fmr || !device->unmap_fmr) { + printk(KERN_WARNING "Device %s does not support fast memory regions", + device->name); + return ERR_PTR(-ENOSYS); + } + + pool = kmalloc(sizeof *pool, GFP_KERNEL); + if (!pool) { + printk(KERN_WARNING "couldn't allocate pool struct"); + return ERR_PTR(-ENOMEM); + } + + pool->cache_bucket = NULL; + + pool->flush_function = params->flush_function; + pool->flush_arg = params->flush_arg; + + INIT_LIST_HEAD(&pool->free_list); + INIT_LIST_HEAD(&pool->dirty_list); + + if (params->cache) { + pool->cache_bucket = + kmalloc(IB_FMR_HASH_SIZE * sizeof *pool->cache_bucket, + GFP_KERNEL); + if (!pool->cache_bucket) { + printk(KERN_WARNING "Failed to allocate cache in pool"); + ret = -ENOMEM; + goto out_free_pool; + } + + for (i = 0; i < IB_FMR_HASH_SIZE; ++i) + INIT_HLIST_HEAD(pool->cache_bucket + i); + } + + pool->pool_size = 0; + pool->max_pages = params->max_pages_per_fmr; + pool->dirty_watermark = params->dirty_watermark; + pool->dirty_len = 0; + spin_lock_init(&pool->pool_lock); + atomic_set(&pool->req_ser, 0); + atomic_set(&pool->flush_ser, 0); + init_waitqueue_head(&pool->force_wait); + + pool->thread = kthread_create(ib_fmr_cleanup_thread, + pool, + "ib_fmr(%s)", + device->name); + if (IS_ERR(pool->thread)) { + printk(KERN_WARNING "couldn't start cleanup thread"); + ret = PTR_ERR(pool->thread); + goto out_free_pool; + } + + { + struct ib_pool_fmr *fmr; + struct ib_fmr_attr attr = { + .max_pages = params->max_pages_per_fmr, + .max_maps = IB_FMR_MAX_REMAPS, + .page_shift = params->page_shift + }; + + for (i = 0; i < params->pool_size; ++i) { + fmr = kmalloc(sizeof *fmr + params->max_pages_per_fmr * sizeof (u64), + GFP_KERNEL); + if (!fmr) { + printk(KERN_WARNING "failed to allocate fmr struct " + "for FMR %d", i); + goto out_fail; + } + + fmr->pool = pool; + fmr->remap_count = 0; + fmr->ref_count = 0; + INIT_HLIST_NODE(&fmr->cache_node); + + fmr->fmr = ib_alloc_fmr(pd, params->access, &attr); + if (IS_ERR(fmr->fmr)) { + printk(KERN_WARNING "fmr_create failed for FMR %d", i); + kfree(fmr); + goto out_fail; + } + + list_add_tail(&fmr->list, &pool->free_list); + ++pool->pool_size; + } + } + + return pool; + + out_free_pool: + kfree(pool->cache_bucket); + kfree(pool); + + return ERR_PTR(ret); + + out_fail: + ib_destroy_fmr_pool(pool); + + return ERR_PTR(-ENOMEM); +} +EXPORT_SYMBOL(ib_create_fmr_pool); + +/** + * ib_destroy_fmr_pool - Free FMR pool + * @pool:FMR pool to free + * + * Destroy an FMR pool and free all associated resources. + */ +void ib_destroy_fmr_pool(struct ib_fmr_pool *pool) +{ + struct ib_pool_fmr *fmr; + struct ib_pool_fmr *tmp; + LIST_HEAD(fmr_list); + int i; + + kthread_stop(pool->thread); + ib_fmr_batch_release(pool); + + i = 0; + list_for_each_entry_safe(fmr, tmp, &pool->free_list, list) { + if (fmr->remap_count) { + INIT_LIST_HEAD(&fmr_list); + list_add_tail(&fmr->fmr->list, &fmr_list); + ib_unmap_fmr(&fmr_list); + } + ib_dealloc_fmr(fmr->fmr); + list_del(&fmr->list); + kfree(fmr); + ++i; + } + + if (i < pool->pool_size) + printk(KERN_WARNING "pool still has %d regions registered", + pool->pool_size - i); + + kfree(pool->cache_bucket); + kfree(pool); +} +EXPORT_SYMBOL(ib_destroy_fmr_pool); + +/** + * ib_flush_fmr_pool - Invalidate all unmapped FMRs + * @pool:FMR pool to flush + * + * Ensure that all unmapped FMRs are fully invalidated. + */ +int ib_flush_fmr_pool(struct ib_fmr_pool *pool) +{ + int serial; + + atomic_inc(&pool->req_ser); + /* + * It's OK if someone else bumps req_ser again here -- we'll + * just wait a little longer. + */ + serial = atomic_read(&pool->req_ser); + + wake_up_process(pool->thread); + + if (wait_event_interruptible(pool->force_wait, + atomic_read(&pool->flush_ser) - + atomic_read(&pool->req_ser) >= 0)) + return -EINTR; + + return 0; +} +EXPORT_SYMBOL(ib_flush_fmr_pool); + +/** + * ib_fmr_pool_map_phys - + * @pool:FMR pool to allocate FMR from + * @page_list:List of pages to map + * @list_len:Number of pages in @page_list + * @io_virtual_address:I/O virtual address for new FMR + * + * Map an FMR from an FMR pool. + */ +struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, + u64 *page_list, + int list_len, + u64 *io_virtual_address) +{ + struct ib_fmr_pool *pool = pool_handle; + struct ib_pool_fmr *fmr; + unsigned long flags; + int result; + + if (list_len < 1 || list_len > pool->max_pages) + return ERR_PTR(-EINVAL); + + spin_lock_irqsave(&pool->pool_lock, flags); + fmr = ib_fmr_cache_lookup(pool, + page_list, + list_len, + *io_virtual_address); + if (fmr) { + /* found in cache */ + ++fmr->ref_count; + if (fmr->ref_count == 1) { + list_del(&fmr->list); + } + + spin_unlock_irqrestore(&pool->pool_lock, flags); + + return fmr; + } + + if (list_empty(&pool->free_list)) { + spin_unlock_irqrestore(&pool->pool_lock, flags); + return ERR_PTR(-EAGAIN); + } + + fmr = list_entry(pool->free_list.next, struct ib_pool_fmr, list); + list_del(&fmr->list); + hlist_del_init(&fmr->cache_node); + spin_unlock_irqrestore(&pool->pool_lock, flags); + + result = ib_map_phys_fmr(fmr->fmr, page_list, list_len, + *io_virtual_address); + + if (result) { + spin_lock_irqsave(&pool->pool_lock, flags); + list_add(&fmr->list, &pool->free_list); + spin_unlock_irqrestore(&pool->pool_lock, flags); + + printk(KERN_WARNING "fmr_map returns %d\n", + result); + + return ERR_PTR(result); + } + + ++fmr->remap_count; + fmr->ref_count = 1; + + if (pool->cache_bucket) { + fmr->io_virtual_address = *io_virtual_address; + fmr->page_list_len = list_len; + memcpy(fmr->page_list, page_list, list_len * sizeof(*page_list)); + + spin_lock_irqsave(&pool->pool_lock, flags); + hlist_add_head(&fmr->cache_node, + pool->cache_bucket + ib_fmr_hash(fmr->page_list[0])); + spin_unlock_irqrestore(&pool->pool_lock, flags); + } + + return fmr; +} +EXPORT_SYMBOL(ib_fmr_pool_map_phys); + +/** + * ib_fmr_pool_unmap - Unmap FMR + * @fmr:FMR to unmap + * + * Unmap an FMR. The FMR mapping may remain valid until the FMR is + * reused (or until ib_flush_fmr_pool() is called). + */ +int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr) +{ + struct ib_fmr_pool *pool; + unsigned long flags; + + pool = fmr->pool; + + spin_lock_irqsave(&pool->pool_lock, flags); + + --fmr->ref_count; + if (!fmr->ref_count) { + if (fmr->remap_count < IB_FMR_MAX_REMAPS) { + list_add_tail(&fmr->list, &pool->free_list); + } else { + list_add_tail(&fmr->list, &pool->dirty_list); + ++pool->dirty_len; + wake_up_process(pool->thread); + } + } + +#ifdef DEBUG + if (fmr->ref_count < 0) + printk(KERN_WARNING "FMR %p has ref count %d < 0", + fmr, fmr->ref_count); +#endif + + spin_unlock_irqrestore(&pool->pool_lock, flags); + + return 0; +} +EXPORT_SYMBOL(ib_fmr_pool_unmap); diff --git a/branches/Ndi/hw/mthca/kernel/mt_fmr_pool.h b/branches/Ndi/hw/mthca/kernel/mt_fmr_pool.h new file mode 100644 index 00000000..5cca5cb0 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_fmr_pool.h @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined(IB_FMR_POOL_H) +#define IB_FMR_POOL_H + +#include + +struct ib_fmr_pool; + +/** + * struct ib_fmr_pool_param - Parameters for creating FMR pool + * @max_pages_per_fmr:Maximum number of pages per map request. + * @page_shift: Log2 of sizeof "pages" mapped by this fmr + * @access:Access flags for FMRs in pool. + * @pool_size:Number of FMRs to allocate for pool. + * @dirty_watermark:Flush is triggered when @dirty_watermark dirty + * FMRs are present. + * @flush_function:Callback called when unmapped FMRs are flushed and + * more FMRs are possibly available for mapping + * @flush_arg:Context passed to user's flush function. + * @cache:If set, FMRs may be reused after unmapping for identical map + * requests. + */ +struct ib_fmr_pool_param { + int max_pages_per_fmr; + int page_shift; + enum ib_access_flags access; + int pool_size; + int dirty_watermark; + void (*flush_function)(struct ib_fmr_pool *pool, + void * arg); + void *flush_arg; + unsigned cache:1; +}; + +struct ib_pool_fmr { + struct ib_fmr *fmr; + struct ib_fmr_pool *pool; + struct list_head list; + struct hlist_node cache_node; + int ref_count; + int remap_count; + u64 io_virtual_address; + int page_list_len; + u64 page_list[0]; +}; + +struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd, + struct ib_fmr_pool_param *params); + +void ib_destroy_fmr_pool(struct ib_fmr_pool *pool); + +int ib_flush_fmr_pool(struct ib_fmr_pool *pool); + +struct ib_pool_fmr *ib_fmr_pool_map_phys(struct ib_fmr_pool *pool_handle, + u64 *page_list, + int list_len, + u64 *io_virtual_address); + +int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr); + +#endif /* IB_FMR_POOL_H */ diff --git a/branches/Ndi/hw/mthca/kernel/mt_l2w.c b/branches/Ndi/hw/mthca/kernel/mt_l2w.c new file mode 100644 index 00000000..f1b2f027 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_l2w.c @@ -0,0 +1,132 @@ +#include +#include +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_l2w.tmh" +#endif + +pci_pool_t * +pci_pool_create (const char *name, struct mthca_dev *mdev, + size_t size, size_t align, size_t allocation) +{ + pci_pool_t *pool; + UNREFERENCED_PARAMETER(align); + UNREFERENCED_PARAMETER(allocation); + + MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // allocation parameter is not handled yet + ASSERT(allocation == 0); + + // allocate object + pool = (pci_pool_t *)ExAllocatePoolWithTag( NonPagedPool, sizeof(pci_pool_t), MT_TAG_PCIPOOL ); + if (pool == NULL) + return NULL; + + //TODO: not absolutely correct: Linux's pci_pool_alloc provides contiguous physical memory, + // while default alloc function - ExAllocatePoolWithTag -doesn't. + // But for now it is used for elements of size <= PAGE_SIZE + // Anyway - a sanity check: + ASSERT(size <= PAGE_SIZE); + if (size > PAGE_SIZE) + return NULL; + + //TODO: not too effective: one can read its own alloc/free functions + ExInitializeNPagedLookasideList( &pool->pool_hdr, NULL, NULL, 0, size, MT_TAG_PCIPOOL, 0 ); + + // fill the object + pool->mdev = mdev; + pool->size = size; + strncpy( pool->name, name, sizeof pool->name ); + + return pool; +} + +// from lib/string.c +/** +* strlcpy - Copy a %NUL terminated string into a sized buffer +* @dest: Where to copy the string to +* @src: Where to copy the string from +* @size: size of destination buffer +* +* Compatible with *BSD: the result is always a valid +* NUL-terminated string that fits in the buffer (unless, +* of course, the buffer size is zero). It does not pad +* out the result like strncpy() does. +*/ +SIZE_T strlcpy(char *dest, const char *src, SIZE_T size) +{ + SIZE_T ret = strlen(src); + + if (size) { + SIZE_T len = (ret >= size) ? size-1 : ret; + memcpy(dest, src, len); + dest[len] = '\0'; + } + return ret; +} + + +int __bitmap_full(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (~bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} + +int __bitmap_empty(const unsigned long *bitmap, int bits) +{ + int k, lim = bits/BITS_PER_LONG; + for (k = 0; k < lim; ++k) + if (bitmap[k]) + return 0; + + if (bits % BITS_PER_LONG) + if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) + return 0; + + return 1; +} + +int request_irq( + IN CM_PARTIAL_RESOURCE_DESCRIPTOR *int_info, /* interrupt resources */ + IN KSPIN_LOCK *isr_lock, /* spin lock for ISR */ + IN PKSERVICE_ROUTINE isr, /* ISR */ + IN void *isr_ctx, /* ISR context */ + OUT PKINTERRUPT *int_obj /* interrupt object */ + ) +{ + NTSTATUS status; + + status = IoConnectInterrupt( + int_obj, /* InterruptObject */ + isr, /* ISR */ + isr_ctx, /* ISR context */ + isr_lock, /* spinlock */ + int_info->u.Interrupt.Vector, /* interrupt vector */ + (KIRQL)int_info->u.Interrupt.Level, /* IRQL */ + (KIRQL)int_info->u.Interrupt.Level, /* Synchronize IRQL */ + (BOOLEAN)((int_info->Flags == CM_RESOURCE_INTERRUPT_LATCHED) ? + Latched : LevelSensitive), /* interrupt type: LATCHED or LEVEL */ + (BOOLEAN)(int_info->ShareDisposition == CmResourceShareShared), /* vector shared or not */ + g_processor_affinity ? g_processor_affinity : (KAFFINITY)int_info->u.Interrupt.Affinity, /* interrupt affinity */ + FALSE /* whether to save Float registers */ + ); + + if (!NT_SUCCESS(status)) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("IoConnectInterrupt failed status %d (did you change the processor_affinity ? )\n",status)); + return -EFAULT; /* failed to connect interrupt */ + } + else + return 0; +} + diff --git a/branches/Ndi/hw/mthca/kernel/mt_l2w.h b/branches/Ndi/hw/mthca/kernel/mt_l2w.h new file mode 100644 index 00000000..faf34055 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_l2w.h @@ -0,0 +1,92 @@ +#ifndef MT_L2W_H +#define MT_L2W_H + +// =========================================== +// INCLUDES +// =========================================== + +// OS +#include +#include +#include +#include + +// ours - the order is important +#include +#include +#include +#include +#include +#include +#include +#include +#include +//#include +#include +#include +#include + + +// =========================================== +// SUBSTITUTIONS +// =========================================== + +#define BUG_ON(exp) ASSERT(!(exp)) /* in Linux follows here panic() !*/ +#define WARN_ON(exp) ASSERT(!(exp)) /* in Linux follows here panic() !*/ +#define snprintf _snprintf + +// memory barriers +#define wmb KeMemoryBarrier +#define rmb KeMemoryBarrier +#define mb KeMemoryBarrier + +// =========================================== +// LITERALS +// =========================================== + + + + +// =========================================== +// TYPES +// =========================================== + +// rw_lock +typedef spinlock_t rwlock_t; + +// dummy function +typedef void (*MT_EMPTY_FUNC)(); + +// =========================================== +// MACROS +// =========================================== + +// ARRAY_SIZE +#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + +// ALIGN +#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) + +// there is a bug in Microsoft compiler, that when _byteswap_uint64() gets an expression +// it executes the expression but doesn't swap tte dwords +// So, there's a workaround +#ifdef BYTESWAP_UINT64_BUG_FIXED +#define CPU_2_BE64_PREP +#define CPU_2_BE64(x) cl_hton64(x) +#else +#define CPU_2_BE64_PREP unsigned __int64 __tmp__ +#define CPU_2_BE64(x) ( __tmp__ = x, cl_hton64(__tmp__) ) +#endif + + +SIZE_T strlcpy(char *dest, const char *src, SIZE_T size); +void MT_time_calibrate(); + +#define ERR_PTR(error) ((void*)(LONG_PTR)(error)) +#define PTR_ERR(ptr) ((long)(LONG_PTR)(void*)(ptr)) +//TODO: there are 2 assumptions here: +// - pointer can't be too big (around -1) +// - error can't be bigger than 1000 +#define IS_ERR(ptr) ((ULONG_PTR)ptr > (ULONG_PTR)-1000L) + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/mt_list.h b/branches/Ndi/hw/mthca/kernel/mt_list.h new file mode 100644 index 00000000..9fa96d8b --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_list.h @@ -0,0 +1,168 @@ +#ifndef MT_LIST_H +#define MT_LIST_H + +// taken from list.h + +/* + * These are non-NULL pointers that will result in page faults + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +#define LIST_POISON1 ((void *) 0x00100100) +#define LIST_POISON2 ((void *) 0x00200200) + +/* +* Simple doubly linked list implementation. +* +* Some of the internal functions ("__xxx") are useful when +* manipulating whole lists rather than single entries, as +* sometimes we already know the next/prev entries and we can +* generate better code by using them directly rather than +* using the generic single-entry routines. +*/ + +struct list_head { + struct list_head *next, *prev; +}; + +#define LIST_HEAD_INIT(name) { &(name), &(name) } + +#define LIST_HEAD(name) \ + struct list_head name = LIST_HEAD_INIT(name) + +#define INIT_LIST_HEAD(ptr) \ + (ptr)->next = (ptr); (ptr)->prev = (ptr) + + +/* +* Insert a new entry between two known consecutive entries. +* +* This is only for internal list manipulation where we know +* the prev/next entries already! +*/ +static inline void __list_add(struct list_head *new, + struct list_head *prev, + struct list_head *next) +{ + next->prev = new; + new->next = next; + new->prev = prev; + prev->next = new; +} + +/** +* list_add - add a new entry +* @new: new entry to be added +* @head: list head to add it after +* +* Insert a new entry after the specified head. +* This is good for implementing stacks. +*/ +static inline void list_add(struct list_head *new, struct list_head *head) +{ + __list_add(new, head, head->next); +} + +/** +* list_add_tail - add a new entry +* @new: new entry to be added +* @head: list head to add it before +* +* Insert a new entry before the specified head. +* This is useful for implementing queues. +*/ +static inline void list_add_tail(struct list_head *new, struct list_head *head) +{ + __list_add(new, head->prev, head); +} + + /* + * Delete a list entry by making the prev/next entries + * point to each other. + * + * This is only for internal list manipulation where we know + * the prev/next entries already! + */ + static inline void __list_del(struct list_head * prev, struct list_head * next) + { + next->prev = prev; + prev->next = next; + } + + /** + * list_del - deletes entry from list. + * @entry: the element to delete from the list. + * Note: list_empty on entry does not return true after this, the entry is + * in an undefined state. + */ + static inline void list_del(struct list_head *entry) + { + __list_del(entry->prev, entry->next); + entry->next = LIST_POISON1; + entry->prev = LIST_POISON2; + } + +/** +* list_empty - tests whether a list is empty +* @head: the list to test. +*/ +static inline int list_empty(const struct list_head *head) +{ + return head->next == head; +} + + /** + * list_entry - get the struct for this entry + * @ptr: the &struct list_head pointer. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + */ +#define list_entry(ptr, type, member) \ + container_of(ptr, type, member) + +//leo: macro changed out of unportable operator typeof +/** +* list_for_each_entry - iterate over list of given type +* @pos: the type * to use as a loop counter. +* @head: the head for your list. +* @member: the name of the list_struct within the struct. +* @type: typeof(*pos) +*/ +#define list_for_each_entry(pos, head, member,type) \ + for (pos = list_entry((head)->next, type, member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.next, type, member)) + + +//leo: macro changed out of unportable operator typeof +/** +* list_for_each_entry_reverse - iterate backwards over list of given type. +* @pos: the type * to use as a loop counter. +* @head: the head for your list. +* @member: the name of the list_struct within the struct. +* @type: typeof(*pos) +*/ +#define list_for_each_entry_reverse(pos, head, member,type) \ + for (pos = list_entry((head)->prev, type, member); \ + &pos->member != (head); \ + pos = list_entry(pos->member.prev, type, member)) + + +//leo: macro changed out of unportable operator typeof +/** +* list_for_each_entry_safe - iterate over list of given type safe against removal of list entry +* @pos: the type * to use as a loop counter. +* @n: another type * to use as temporary storage +* @head: the head for your list. +* @member: the name of the list_struct within the struct. +* @type: typeof(*pos) +* @type_n: typeof(*n) +*/ +#define list_for_each_entry_safe(pos, n, head, member,type,type_n) \ + for (pos = list_entry((head)->next, type, member), \ + n = list_entry(pos->member.next, type, member); \ + &pos->member != (head); \ + pos = n, n = list_entry(n->member.next, type_n, member)) + + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/mt_memory.c b/branches/Ndi/hw/mthca/kernel/mt_memory.c new file mode 100644 index 00000000..868472a9 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_memory.c @@ -0,0 +1,761 @@ +/* + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + #include "hca_driver.h" +#include "mthca_dev.h" +#if defined (EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_memory.tmh" +#endif + +#include "mt_pa_cash.h" + + +/* +* Function: map user buffer to kernel and lock it +* +* Return: +*/ +int get_user_pages( + IN struct mthca_dev *dev, /* device */ + IN u64 start, /* address in user space */ + IN int npages, /* size in pages */ + IN int write_access, /* access rights */ + OUT struct scatterlist *sg /* s/g list */ + ) +{ + PMDL mdl_p; + int size = npages << PAGE_SHIFT; + int access = (write_access) ? IoWriteAccess : IoReadAccess; + int err; + void * kva; /* kernel virtual address */ + + UNREFERENCED_PARAMETER(dev); + + HCA_ENTER(HCA_DBG_MEMORY); + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + + /* allocate MDL */ + mdl_p = IoAllocateMdl( (PVOID)(ULONG_PTR)start, (ULONG)size, + FALSE, + FALSE, /* not charge quota */ + NULL); + if (mdl_p == NULL) { + err = -ENOMEM; + goto err0; + } + + /* lock memory */ + __try { + MmProbeAndLockPages( mdl_p, UserMode, access ); + } + __except (EXCEPTION_EXECUTE_HANDLER) + { + NTSTATUS Status = GetExceptionCode(); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("Exception 0x%x on MmProbeAndLockPages(), addr 0x%I64x, size %d\n", Status, start, size)); + switch(Status){ + case STATUS_WORKING_SET_QUOTA: + err = -ENOMEM;break; + case STATUS_ACCESS_VIOLATION: + err = -EACCES;break; + default : + err = -EINVAL; + } + + goto err1; + } + + /* map it to kernel */ + kva = MmMapLockedPagesSpecifyCache( mdl_p, + KernelMode, MmNonCached, + NULL, FALSE, NormalPagePriority ); + if (kva == NULL) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY ,("MmMapLockedPagesSpecifyCache failed\n")); + err = -EFAULT; + goto err2; + } + + sg->page = kva; + sg->length = size; + sg->offset = (unsigned int)(start & ~PAGE_MASK); + sg->p_mdl = mdl_p; + sg->dma_address = MmGetPhysicalAddress(kva).QuadPart; + return 0; + +err2: + MmUnlockPages(mdl_p); +err1: + IoFreeMdl(mdl_p); +err0: + HCA_EXIT(HCA_DBG_MEMORY); + return err; + + } + +void put_page(struct scatterlist *sg) +{ + if (sg->p_mdl) { + MmUnmapLockedPages( sg->page, sg->p_mdl ); + MmUnlockPages(sg->p_mdl); + IoFreeMdl(sg->p_mdl); + } +} + +VOID + AdapterListControl( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp, + IN PSCATTER_GATHER_LIST ScatterGather, + IN PVOID Context + ) +{ + struct scatterlist *p_sg = (struct scatterlist *)Context; + + UNREFERENCED_PARAMETER(DeviceObject); + UNREFERENCED_PARAMETER(Irp); + + // sanity checks + if (!ScatterGather || !Context) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("AdapterListControl failed: invalid parameters\n")); + return; + } + if (ScatterGather->NumberOfElements > 1) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("AdapterListControl failed: unexpected sg size; %d elements \n", + ScatterGather->NumberOfElements )); + } + if (ScatterGather->Elements[0].Length != p_sg->length) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("AdapterListControl failed: unexpected buffer size %#x (expected %#x) \n", + ScatterGather->Elements[0].Length, p_sg->length )); + } + + // results + p_sg->dma_address = ScatterGather->Elements[0].Address.QuadPart; // get logical address + p_sg->p_os_sg = ScatterGather; // store sg list address for releasing + //NB: we do not flush the buffers by FlushAdapterBuffers(), because we don't really transfer data +} + +/* Returns: the number of mapped sg elements */ +int pci_map_sg(struct mthca_dev *dev, + struct scatterlist *sg, int nents, int direction) +{ +#ifndef USE_GET_SG_LIST + + UNREFERENCED_PARAMETER(dev); + UNREFERENCED_PARAMETER(sg); + UNREFERENCED_PARAMETER(direction); + + // mapping was performed in alloc_dma_mem + return nents; + +#else + + int i; + NTSTATUS status; + hca_dev_ext_t *p_ext = dev->ext; + struct scatterlist *p_sg = sg; + KIRQL irql = KeRaiseIrqlToDpcLevel(); + + for (i=0; ip_dma_adapter->DmaOperations->GetScatterGatherList( + p_ext->p_dma_adapter, p_ext->cl_ext.p_self_do, p_sg->p_mdl, p_sg->page, + p_sg->length, AdapterListControl, sg, (BOOLEAN)direction ); + if (!NT_SUCCESS(status)) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("GetScatterGatherList failed %#x\n", status))); + break; + } + } + KeLowerIrql(irql); + return i; /* i.e., we mapped all the entries */ + +#endif +} + +/* Returns: the number of unmapped sg elements */ +int pci_unmap_sg(struct mthca_dev *dev, + struct scatterlist *sg, int nents, int direction) +{ +#ifndef USE_GET_SG_LIST + + UNREFERENCED_PARAMETER(dev); + UNREFERENCED_PARAMETER(sg); + UNREFERENCED_PARAMETER(direction); + // mapping was performed in alloc_dma_mem + return nents; + +#else + + int i; + hca_dev_ext_t *p_ext = dev->ext; + struct scatterlist *p_sg = sg; + KIRQL irql = KeRaiseIrqlToDpcLevel(); + void *p_os_sg = p_sg->p_os_sg; + + for (i=0; ip_os_sg = NULL; + p_ext->p_dma_adapter->DmaOperations->PutScatterGatherList( + p_ext->p_dma_adapter, p_os_sg, (BOOLEAN)direction ); + } + KeLowerIrql(irql); + return i; /* i.e., we mapped all the entries */ + +#endif +} + +/* The function zeroes 'struct scatterlist' and then fills it with values. + On error 'struct scatterlist' is returned zeroed */ +void *alloc_dma_mem( + IN struct mthca_dev *dev, + IN unsigned long size, + OUT struct scatterlist *p_sg) +{ + void *va; + DMA_ADAPTER *p_dma = dev->ext->p_dma_adapter; + +#ifndef USE_GET_SG_LIST + + PHYSICAL_ADDRESS pa = {0}; + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + + RtlZeroMemory(p_sg,sizeof *p_sg); + if (!size) + return NULL; + + va = p_dma->DmaOperations->AllocateCommonBuffer( + p_dma, size, &pa, FALSE ); + if (va) { + p_sg->length = size; + p_sg->dma_address = pa.QuadPart; + p_sg->page = va; + } + +#else + + int err; + PHYSICAL_ADDRESS la = {0}, ba = {0}, ha = {(u64)(-1I64)}; + PMDL p_mdl; + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + RtlZeroMemory(p_sg,sizeof *p_sg); + if (!size) + return NULL; + + // allocate memory + va = MmAllocateContiguousMemorySpecifyCache( + size, la, ha, ba, MmNonCached ); + if (!va) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("MmAllocateContiguousMemorySpecifyCache failed on %#x size\n", size ))); + goto err_alloc; + } + + // allocate MDL + p_mdl = IoAllocateMdl( va, size, FALSE, FALSE, NULL ); + if (!p_mdl) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("MmAllocateContiguousMemorySpecifyCache failed on %#x size\n", size ))); + goto err_mdl; + } + MmBuildMdlForNonPagedPool( p_mdl ); + + p_sg->p_mdl = p_mdl; + p_sg->length = size; + p_sg->page = va; + + goto end; + +err_mdl: + MmFreeContiguousMemory(va); + va = NULL; +err_alloc: +end: + +#endif + + return va; +} + +void free_dma_mem( + IN struct mthca_dev *dev, + IN struct scatterlist *p_sg) +{ +#ifndef USE_GET_SG_LIST + + PHYSICAL_ADDRESS pa; + DMA_ADAPTER *p_dma = dev->ext->p_dma_adapter; + + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + + if (p_sg->length) { + pa.QuadPart = p_sg->dma_address; + p_dma->DmaOperations->FreeCommonBuffer( + p_dma, p_sg->length, pa, + p_sg->page, FALSE ); + } + +#else + + PMDL p_mdl = p_sg->p_mdl; + PVOID page = p_sg->page; + + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + if (p_mdl) { + p_sg->p_mdl = NULL; + IoFreeMdl( p_mdl ); + } + if (page) { + p_sg->page = NULL; + MmFreeContiguousMemory(page); + } + +#endif +} + + +typedef struct _mt_iobuf_seg { + LIST_ENTRY link; + PMDL mdl_p; + u64 va; /* virtual address of the buffer */ + u64 size; /* size in bytes of the buffer */ + u32 nr_pages; + int is_user; +} mt_iobuf_seg_t; + +// Returns: 0 on success, -ENOMEM or -EACCESS on error +static int register_segment( + IN u64 va, + IN u64 size, + IN int is_user, + IN ib_access_t acc, + OUT mt_iobuf_seg_t **iobuf_seg) +{ + PMDL mdl_p; + int rc; + KPROCESSOR_MODE mode; + mt_iobuf_seg_t * new_iobuf; + static ULONG cnt=0; + LOCK_OPERATION Operation; + + // set Operation + if (acc & IB_AC_LOCAL_WRITE) + Operation = IoModifyAccess; + else + Operation = IoReadAccess; + + // allocate IOBUF segment object + new_iobuf = (mt_iobuf_seg_t *)kmalloc(sizeof(mt_iobuf_seg_t), GFP_KERNEL ); + if (new_iobuf == NULL) { + rc = -ENOMEM; + goto err_nomem; + } + + // allocate MDL + mdl_p = IoAllocateMdl( (PVOID)(ULONG_PTR)va, (ULONG)size, FALSE,FALSE,NULL); + if (mdl_p == NULL) { + rc = -ENOMEM; + goto err_alloc_mdl; + } + + // make context-dependent things + if (is_user) { + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + mode = UserMode; + } + else { /* Mapping to kernel virtual address */ + // MmBuildMdlForNonPagedPool(mdl_p); // fill MDL ??? - should we do that really ? + mode = KernelMode; + } + + __try { /* try */ + MmProbeAndLockPages( mdl_p, mode, Operation ); /* lock memory */ + } /* try */ + + __except (EXCEPTION_EXECUTE_HANDLER) { + HCA_PRINT(TRACE_LEVEL_ERROR, HCA_DBG_MEMORY, + ("MOSAL_iobuf_register: Exception 0x%x on MmProbeAndLockPages(), va %I64d, sz %I64d\n", + GetExceptionCode(), va, size)); + rc = -EACCES; + goto err_probe; + } + + // fill IOBUF object + new_iobuf->va = va; + new_iobuf->size= size; + new_iobuf->nr_pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES( va, size ); + new_iobuf->mdl_p = mdl_p; + new_iobuf->is_user = is_user; + *iobuf_seg = new_iobuf; + return 0; + +err_probe: + IoFreeMdl(mdl_p); +err_alloc_mdl: + ExFreePool((PVOID)new_iobuf); +err_nomem: + return rc; +} + +void iobuf_init( + IN u64 va, + IN u64 size, + IN int is_user, + IN OUT mt_iobuf_t *iobuf_p) +{ + iobuf_p->va = va; + iobuf_p->size= size; + iobuf_p->is_user = is_user; + InitializeListHead( &iobuf_p->seg_que ); + iobuf_p->seg_num = 0; + iobuf_p->nr_pages = 0; + iobuf_p->is_cashed = 0; +} + +int iobuf_register( + IN u64 va, + IN u64 size, + IN int is_user, + IN ib_access_t acc, + IN OUT mt_iobuf_t *iobuf_p) +{ + int rc=0; + u64 seg_va; // current segment start + u64 seg_size; // current segment size + u64 rdc; // remain data counter - what is rest to lock + u64 delta; // he size of the last not full page of the first segment + mt_iobuf_seg_t * new_iobuf; + unsigned page_size = PAGE_SIZE; + +// 32 - for any case +#define PFNS_IN_PAGE_SIZE_MDL ((PAGE_SIZE - sizeof(struct _MDL) - 32) / sizeof(long)) +#define MIN_IOBUF_SEGMENT_SIZE (PAGE_SIZE * PFNS_IN_PAGE_SIZE_MDL) // 4MB + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // we'll try to register all at once. + seg_va = va; + seg_size = rdc = size; + + // allocate segments + while (rdc > 0) { + // map a segment + rc = register_segment(seg_va, seg_size, is_user, acc, &new_iobuf ); + + // success - move to another segment + if (!rc) { + rdc -= seg_size; + seg_va += seg_size; + InsertTailList( &iobuf_p->seg_que, &new_iobuf->link ); + iobuf_p->seg_num++; + // round the segment size to the next page boundary + delta = (seg_va + seg_size) & (page_size - 1); + if (delta) { + seg_size -= delta; + seg_size += page_size; + } + if (seg_size > rdc) + seg_size = rdc; + continue; + } + + // failure - too large a buffer: lessen it and try once more + if (rc == -ENOMEM) { + // no where to lessen - too low memory + if (seg_size <= MIN_IOBUF_SEGMENT_SIZE) + break; + // lessen the size + seg_size >>= 1; + // round the segment size to the next page boundary + delta = (seg_va + seg_size) & (page_size - 1); + if (delta) { + seg_size -= delta; + seg_size += page_size; + } + if (seg_size > rdc) + seg_size = rdc; + continue; + } + + // got unrecoverable error + break; + } + + // SUCCESS + if (rc) + iobuf_deregister( iobuf_p ); + else + iobuf_p->nr_pages += ADDRESS_AND_SIZE_TO_SPAN_PAGES( va, size ); + + return rc; +} + + +static void __iobuf_copy( + IN OUT mt_iobuf_t *dst_iobuf_p, + IN mt_iobuf_t *src_iobuf_p + ) +{ + int i; + mt_iobuf_seg_t *iobuf_seg_p; + + *dst_iobuf_p = *src_iobuf_p; + InitializeListHead( &dst_iobuf_p->seg_que ); + for (i=0; iseg_num; ++i) { + iobuf_seg_p = (mt_iobuf_seg_t *)(PVOID)RemoveHeadList( &src_iobuf_p->seg_que ); + InsertTailList( &dst_iobuf_p->seg_que, &iobuf_seg_p->link ); + } +} + +/* if the buffer to be registered overlaps a buffer, already registered, + a race can happen between HCA, writing to the previously registered + buffer and the probing functions (MmProbeAndLockPages, MmSecureVirtualMemory), + used in the algorithm of memory registration. + To prevent the race we maintain reference counters for the physical pages, being registered, + and register every physical page FOR THE WRITE ACCESS only once.*/ + +int iobuf_register_with_cash( + IN u64 vaddr, + IN u64 size, + IN int is_user, + IN OUT ib_access_t *acc_p, + IN OUT mt_iobuf_t *iobuf_p) +{ + int rc, pa_in; + mt_iobuf_t sec_iobuf; + int i, page_in , page_out, page_in_total; + int nr_pages; + char *subregion_start, *va; + u64 subregion_size; + u64 rdc; // remain data counter - what is rest to lock + u64 delta; // he size of the last not full page of the first segment + ib_access_t acc; + + down(&g_pa_mutex); + + // register memory for read access to bring pages into the memory + rc = iobuf_register( vaddr, size, is_user, 0, iobuf_p); + + // on error or read access - exit + if (rc || !(*acc_p & IB_AC_LOCAL_WRITE)) + goto exit; + + // re-register buffer with the correct access rights + iobuf_init( (u64)vaddr, size, is_user, &sec_iobuf ); + nr_pages = ADDRESS_AND_SIZE_TO_SPAN_PAGES( vaddr, size ); + subregion_start = va = (char*)(ULONG_PTR)vaddr; + rdc = size; + pa_in = page_in = page_in_total = page_out = 0; + + for (i=0; i rdc) + subregion_size = rdc; + + // register the subregion + rc = iobuf_register( (u64)subregion_start, subregion_size, is_user, acc, &sec_iobuf); + if (rc) + goto cleanup; + + // prepare to the next loop + rdc -= subregion_size; + subregion_start +=subregion_size; + } + } + + // prepare to registration of the subregion + if (pa_in) { // SUBREGION WITH READ ACCESS + acc = 0; + subregion_size = (u64)page_in * PAGE_SIZE; + } + else { // SUBREGION WITH WRITE ACCESS + acc = IB_AC_LOCAL_WRITE; + subregion_size = (u64)page_out * PAGE_SIZE; + } + + // round the subregion size to the page boundary + delta = (u64)(subregion_start + subregion_size) & (PAGE_SIZE - 1); + subregion_size -= delta; + if (subregion_size > rdc) + subregion_size = rdc; + + // register the subregion + rc = iobuf_register( (u64)subregion_start, subregion_size, is_user, acc, &sec_iobuf); + if (rc) + goto cleanup; + + // cash phys pages + rc = pa_register(iobuf_p); + if (rc) + goto err_pa_reg; + + // replace the iobuf + iobuf_deregister( iobuf_p ); + sec_iobuf.is_cashed = TRUE; + __iobuf_copy( iobuf_p, &sec_iobuf ); + + // buffer is a part of also registered buffer - change the rights + if (page_in_total) + *acc_p = MTHCA_ACCESS_REMOTE_READ; + + goto exit; + +err_pa_reg: + iobuf_deregister( &sec_iobuf ); +cleanup: + iobuf_deregister( iobuf_p ); +exit: + up(&g_pa_mutex); + return rc; +} + +static void deregister_segment(mt_iobuf_seg_t * iobuf_seg_p) +{ + MmUnlockPages( iobuf_seg_p->mdl_p ); // unlock the buffer + IoFreeMdl( iobuf_seg_p->mdl_p ); // free MDL + ExFreePool(iobuf_seg_p); +} + +void iobuf_deregister(mt_iobuf_t *iobuf_p) +{ + mt_iobuf_seg_t *iobuf_seg_p; // pointer to current segment object + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + + // release segments + while (!IsListEmpty( &iobuf_p->seg_que )) { + iobuf_seg_p = (mt_iobuf_seg_t *)(PVOID)RemoveTailList( &iobuf_p->seg_que ); + deregister_segment(iobuf_seg_p); + iobuf_p->seg_num--; + } + ASSERT(iobuf_p->seg_num == 0); +} + +void iobuf_deregister_with_cash(mt_iobuf_t *iobuf_p) +{ + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + + down(&g_pa_mutex); + if (iobuf_p->is_cashed) + pa_deregister(iobuf_p); + iobuf_deregister(iobuf_p); + up(&g_pa_mutex); +} + +void iobuf_iter_init( + IN mt_iobuf_t *iobuf_p, + IN OUT mt_iobuf_iter_t *iterator_p) +{ + iterator_p->seg_p = iobuf_p->seg_que.Flink; + iterator_p->pfn_ix = 0; +} + +// the function returns phys addresses of the pages, also for the first page +// if one wants to get the phys address of the buffer, one has to +// add the offset from the start of the page to the first phys address +// Returns: the number of entries, filled in page_tbl_p +// Returns 0 while at the end of list. +uint32_t iobuf_get_tpt_seg( + IN mt_iobuf_t *iobuf_p, + IN OUT mt_iobuf_iter_t *iterator_p, + IN uint32_t n_pages_in, + IN OUT uint64_t *page_tbl_p ) +{ + uint32_t i=0; // has to be initialized here for a premature exit + mt_iobuf_seg_t *seg_p; // pointer to current segment object + PPFN_NUMBER pfn_p; + uint32_t pfn_ix; // index of PFN in PFN array of the current segment + uint64_t *pa_buf_p = page_tbl_p; + + // prepare to the loop + seg_p = iterator_p->seg_p; // first segment of the first iobuf + pfn_ix= iterator_p->pfn_ix; + + // check, whether we at the end of the list + if ((PVOID)seg_p == (PVOID)&iobuf_p->seg_que) + goto exit; + pfn_p = MmGetMdlPfnArray( seg_p->mdl_p ) + pfn_ix; + + // pass along all the PFN arrays + for (; i < n_pages_in; i++, pa_buf_p++) { + // convert PFN to the physical address + *pa_buf_p = (uint64_t)*pfn_p++ << PAGE_SHIFT; + + // get to the next PFN + if (++pfn_ix >= seg_p->nr_pages) { + seg_p = (mt_iobuf_seg_t*)seg_p->link.Flink; + pfn_ix = 0; + if ((PVOID)seg_p == (PVOID)&iobuf_p->seg_que) { + i++; + break; + } + pfn_p = MmGetMdlPfnArray( seg_p->mdl_p ); + } + } + +exit: + iterator_p->seg_p = seg_p; + iterator_p->pfn_ix = pfn_ix; + return i; +} + + + + diff --git a/branches/Ndi/hw/mthca/kernel/mt_memory.h b/branches/Ndi/hw/mthca/kernel/mt_memory.h new file mode 100644 index 00000000..66be696d --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_memory.h @@ -0,0 +1,307 @@ +#ifndef MT_MEMORY_H +#define MT_MEMORY_H + +#include "iba/ib_types.h" + +// =========================================== +// CONSTANTS +// =========================================== + +#define MT_TAG_ATOMIC 'MOTA' +#define MT_TAG_KERNEL 'LNRK' +#define MT_TAG_HIGH 'HGIH' +#define MT_TAG_PCIPOOL 'PICP' +#define MT_TAG_IOMAP 'PAMI' + +// =========================================== +// SUBSTITUTIONS +// =========================================== + +#define memcpy_toio memcpy + +// =========================================== +// MACROS +// =========================================== + +#define PAGE_MASK (~(PAGE_SIZE-1)) +#define NEXT_PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + + +// =========================================== +// SYSTEM MEMORY +// =========================================== + +// memory +#define __GFP_NOWARN 0 /* Suppress page allocation failure warning */ +#define __GFP_HIGHMEM 0 + +#define GFP_ATOMIC 1 /* can't wait (i.e. DPC or higher) */ +#define GFP_KERNEL 2 /* can wait (npaged) */ +#define GFP_HIGHUSER 4 /* GFP_KERNEL, that can be in HIGH memory */ + + +#define SLAB_ATOMIC GFP_ATOMIC +#define SLAB_KERNEL GFP_KERNEL + +#if 1 +static inline void * kmalloc( SIZE_T bsize, unsigned int gfp_mask) +{ + void *ptr; + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + switch (gfp_mask) { + case GFP_ATOMIC: + ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_ATOMIC ); + break; + case GFP_KERNEL: + ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_KERNEL ); + break; + case GFP_HIGHUSER: + ptr = ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_HIGH ); + break; + default: + DbgPrint("kmalloc: unsupported flag %d\n", gfp_mask); + ptr = NULL; + break; + } + return ptr; +} +#else +#define kmalloc(bsize,flags) ExAllocatePoolWithTag( NonPagedPool, bsize, MT_TAG_KERNEL ) +#endif + +static inline void * kzalloc( SIZE_T bsize, unsigned int gfp_mask) +{ + void* va = kmalloc(bsize, gfp_mask); + if (va) + RtlZeroMemory(va, bsize); + return va; +} + +static inline void kfree (const void *pobj) +{ + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + if (pobj) + ExFreePool((void *)pobj); +} + +#define get_zeroed_page(mask) kzalloc(PAGE_SIZE, mask) +#define free_page(ptr) kfree(ptr) + + +// =========================================== +// IO SPACE <==> SYSTEM MEMORY +// =========================================== + + +/** +* ioremap - map bus memory into CPU space +* @offset: bus address of the memory +* @size: size of the resource to map +* +* ioremap performs a platform specific sequence of operations to +* make bus memory CPU accessible via the readb/readw/readl/writeb/ +* writew/writel functions and the other mmio helpers. The returned +* address is not guaranteed to be usable directly as a virtual +* address. +*/ +static inline void *ioremap(io_addr_t addr, SIZE_T size, SIZE_T* psize) +{ + PHYSICAL_ADDRESS pa; + void *va; + + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + pa.QuadPart = addr; + va = MmMapIoSpace( pa, size, MmNonCached ); + *psize = size; + return va; +} + +static inline void iounmap(void *va, SIZE_T size) +{ + MmUnmapIoSpace( va, size); +} + + // =========================================== + // DMA SUPPORT + // =========================================== + +#define PCI_DMA_BIDIRECTIONAL 0 +#define PCI_DMA_TODEVICE 1 +#define PCI_DMA_FROMDEVICE 2 +#define DMA_TO_DEVICE PCI_DMA_TODEVICE + + struct scatterlist { + dma_addr_t dma_address; /* logical (device) address */ + void * page; /* kernel virtual address */ + PMDL p_mdl; /* MDL, if any (used for user space buffers) */ + PSCATTER_GATHER_LIST p_os_sg; /* adapter scatter-gather list */ + unsigned int offset; /* offset in the first page */ + unsigned int length; /* buffer length */ + }; + + #define sg_dma_address(sg) ((sg)->dma_address) + #define sg_dma_len(sg) ((sg)->length) + + struct mthca_dev; + + int pci_map_sg(struct mthca_dev *dev, + struct scatterlist *sg, int nents, int direction); + + int pci_unmap_sg(struct mthca_dev *dev, + struct scatterlist *sg, int nents, int direction); + + void free_dma_mem( + IN struct mthca_dev *dev, + IN struct scatterlist *p_sg); + + void *alloc_dma_mem( + IN struct mthca_dev *dev, + IN unsigned long size, + OUT struct scatterlist *p_sg); + +static inline void *alloc_dma_zmem( + IN struct mthca_dev *dev, + IN unsigned long size, + OUT struct scatterlist *p_sg) +{ + void *va = alloc_dma_mem( dev, size, p_sg ); + if (va) + RtlZeroMemory(va, size); + return va; +} + +static inline void *alloc_dma_zmem_map( + IN struct mthca_dev *dev, + IN unsigned long size, + IN int direction, + OUT struct scatterlist *p_sg) +{ + void *va = alloc_dma_zmem( dev, size, p_sg ); + if (va) { + if (!pci_map_sg( dev, p_sg, 1, direction )) { + free_dma_mem( dev, p_sg ); + va = NULL; + } + } + return va; +} + +static inline void free_dma_mem_map( + IN struct mthca_dev *dev, + IN struct scatterlist *p_sg, + IN int direction ) +{ + pci_unmap_sg( dev, p_sg, 1, direction ); + free_dma_mem( dev, p_sg ); +} + + static inline dma_addr_t pci_mape_page(struct mthca_dev *dev, + void *va, unsigned long offset, SIZE_T size, int direction) + { + UNREFERENCED_PARAMETER(dev); + UNREFERENCED_PARAMETER(va); + UNREFERENCED_PARAMETER(offset); + UNREFERENCED_PARAMETER(size); + UNREFERENCED_PARAMETER(direction); + /* suppose, that pages where always translated to DMA space */ + return 0; /* i.e., we unmapped all the entries */ + } + + // =========================================== + // HELPERS + // =========================================== + + static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +static inline int long_log2(unsigned long x) +{ + int r = 0; + for (x >>= 1; x > 0; x >>= 1) + r++; + return r; +} + +static inline unsigned long roundup_pow_of_two(unsigned long x) +{ + return (1UL << fls(x - 1)); +} + +// =========================================== +// PROTOTYPES +// =========================================== + +void put_page(struct scatterlist *sg); +int get_user_pages( + IN struct mthca_dev *dev, /* device */ + IN u64 start, /* address in user space */ + IN int npages, /* size in pages */ + IN int write_access, /* access rights */ + OUT struct scatterlist *sg /* s/g list */ + ); + +typedef struct _mt_iobuf { + u64 va; /* virtual address of the buffer */ + u64 size; /* size in bytes of the buffer */ + LIST_ENTRY seg_que; + u32 nr_pages; + int is_user; + int seg_num; + int is_cashed; +} mt_iobuf_t; + +/* iterator for getting segments of tpt */ +typedef struct _mt_iobuf_iter { + void * seg_p; /* the item from where to take the next translations */ + unsigned int pfn_ix; /* index from where to take the next translation */ +} mt_iobuf_iter_t; + +void iobuf_deregister_with_cash(IN mt_iobuf_t *iobuf_p); + +void iobuf_deregister(IN mt_iobuf_t *iobuf_p); + +void iobuf_init( + IN u64 va, + IN u64 size, + IN int is_user, + IN OUT mt_iobuf_t *iobuf_p); + +int iobuf_register_with_cash( + IN u64 vaddr, + IN u64 size, + IN int is_user, + IN OUT ib_access_t *acc_p, + IN OUT mt_iobuf_t *iobuf_p); + +int iobuf_register( + IN u64 va, + IN u64 size, + IN int is_user, + IN ib_access_t acc, + IN OUT mt_iobuf_t *iobuf_p); + +void iobuf_iter_init( + IN mt_iobuf_t *iobuf_p, + IN OUT mt_iobuf_iter_t *iterator_p); + +uint32_t iobuf_get_tpt_seg( + IN mt_iobuf_t *iobuf_p, + IN OUT mt_iobuf_iter_t *iterator_p, + IN uint32_t n_pages_in, + IN OUT uint64_t *page_tbl_p ); + +unsigned long copy_from_user(void *to, const void *from, unsigned long n); +unsigned long copy_to_user(void *to, const void *from, unsigned long n); + + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/mt_pa_cash.c b/branches/Ndi/hw/mthca/kernel/mt_pa_cash.c new file mode 100644 index 00000000..3b0f27bc --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_pa_cash.c @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mlnx_uvp_cq.c 1611 2006-08-20 14:48:55Z sleybo $ + */ + +#include "mt_pa_cash.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_pa_cash.tmh" +#endif + +/////////////////////////////////////////////////////////////////////////// +// +// RESTRICTIONS +// +/////////////////////////////////////////////////////////////////////////// + +#ifdef _WIN64 +#define MAX_PAGES_SUPPORTED (64 * 1024 * 1024) // 256 GB +#else +#define MAX_PAGES_SUPPORTED (16 * 1024 * 1024) // 64 GB +#endif + +#define FREE_LIST_TRESHOLD 256 // max number of pages in free list + +/////////////////////////////////////////////////////////////////////////// +// +// CONSTANTS +// +/////////////////////////////////////////////////////////////////////////// + +#define PA_TABLE_ENTRY_SIZE sizeof(pa_table_entry_t) +#define PA_TABLE_ENTRY_NUM (PAGE_SIZE / PA_TABLE_ENTRY_SIZE) +#define PA_TABLE_SIZE (PA_TABLE_ENTRY_SIZE * PA_TABLE_ENTRY_NUM) + +#define PA_DIR_ENTRY_SIZE sizeof(pa_dir_entry_t) +#define PA_DIR_ENTRY_NUM (MAX_PAGES_SUPPORTED /PA_TABLE_ENTRY_NUM) +#define PA_DIR_SIZE (PA_DIR_ENTRY_SIZE * PA_DIR_ENTRY_NUM) + + +/////////////////////////////////////////////////////////////////////////// +// +// STRUCTURES +// +/////////////////////////////////////////////////////////////////////////// + +typedef struct { + int ref_cnt; +} pa_table_entry_t; + +typedef struct { + pa_table_entry_t *pa_te; /* pointer to one page of pa_table_entry_t elements */ + int used; /* number of pa_table_entry_t elements, used now. When 0 - the page may be freed */ +} pa_dir_entry_t; + +typedef struct pa_cash_s { + pa_dir_entry_t *pa_dir; + SINGLE_LIST_ENTRY free_list_hdr; + uint32_t free_nr_pages; + uint32_t free_list_threshold; + uint32_t max_nr_pages; + uint32_t cur_nr_pages; +} pa_cash_t; + + + +/////////////////////////////////////////////////////////////////////////// +// +// GLOBALS +// +/////////////////////////////////////////////////////////////////////////// + +KMUTEX g_pa_mutex; +u64 g_pa[1024]; +pa_cash_t g_cash; + + +/////////////////////////////////////////////////////////////////////////// +// +// STATIC FUNCTIONS +// +/////////////////////////////////////////////////////////////////////////// + +static uint32_t __calc_threshold() +{ + // threshold expresses the max length of free pages list, which gets released only at driver unload time + // so it can be calculated to be proportional to the system memory size + return FREE_LIST_TRESHOLD; +} + +static pa_table_entry_t *__alloc_page() +{ + pa_table_entry_t *pa_te; + + /* take from the list of reserved if it is not empty */ + if (g_cash.free_nr_pages) { + pa_te = (pa_table_entry_t *)PopEntryList( &g_cash.free_list_hdr ); + ((SINGLE_LIST_ENTRY*)pa_te)->Next = NULL; + g_cash.free_nr_pages--; + } + else /* allocate new page */ + pa_te = (pa_table_entry_t *)kzalloc( PA_TABLE_SIZE, GFP_KERNEL ); + + return pa_te; +} + +static void __free_page(pa_table_entry_t *pa_te) +{ + if (g_cash.free_nr_pages < g_cash.free_list_threshold) { + PushEntryList( &g_cash.free_list_hdr, (SINGLE_LIST_ENTRY*)pa_te ); + g_cash.free_nr_pages++; + } + else + kfree(pa_te); +} + +static pa_table_entry_t * __get_page(uint32_t ix) +{ + pa_table_entry_t *pa_te = g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].pa_te; + + /* no this page_table - add a new one */ + if (!pa_te) { + pa_te = __alloc_page(); + if (!pa_te) + return NULL; + g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].pa_te = pa_te; + g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].used = 0; + g_cash.cur_nr_pages++; + } + + return pa_te; +} + +static void __put_page(uint32_t ix) +{ + __free_page(g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].pa_te); + g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].pa_te = NULL; + g_cash.cur_nr_pages--; +} + +static int __add_pa(uint64_t pa) +{ + uint32_t ix = (uint32_t)(pa >> PAGE_SHIFT); + pa_table_entry_t *pa_te; + + /* or pa is incorrect or memory that big is not supported */ + if (ix > g_cash.max_nr_pages) { + ASSERT(FALSE); + return -EFAULT; + } + + /* get page address */ + pa_te = __get_page(ix); + if (!pa_te) + return -ENOMEM; + + /* register page address */ + if (!pa_te[ix % PA_TABLE_ENTRY_NUM].ref_cnt) + ++g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].used; + ++pa_te[ix % PA_TABLE_ENTRY_NUM].ref_cnt; + + return 0; +} + + +static int __rmv_pa(uint64_t pa) +{ + uint32_t ix = (uint32_t)(pa >> PAGE_SHIFT); + pa_table_entry_t *pa_te; + + /* or pa is incorrect or memory that big is not supported */ + if (ix > g_cash.max_nr_pages) { + ASSERT(FALSE); + return -EFAULT; + } + + pa_te = g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].pa_te; + + /* no this page_table - error*/ + if (!pa_te) { + ASSERT(FALSE); + return -EFAULT; + } + + /* deregister page address */ + --pa_te[ix % PA_TABLE_ENTRY_NUM].ref_cnt; + ASSERT(pa_te[ix % PA_TABLE_ENTRY_NUM].ref_cnt >= 0); + + /* release the page on need */ + if (!pa_te[ix % PA_TABLE_ENTRY_NUM].ref_cnt) + --g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].used; + if (!g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].used) + __put_page(ix); + + return 0; +} + + + +/////////////////////////////////////////////////////////////////////////// +// +// PUBLIC FUNCTIONS +// +/////////////////////////////////////////////////////////////////////////// + + +int pa_register(mt_iobuf_t *iobuf_p) +{ + int i,j,n; + mt_iobuf_iter_t iobuf_iter; + + iobuf_iter_init( iobuf_p, &iobuf_iter ); + n = 0; + for (;;) { + i = iobuf_get_tpt_seg( iobuf_p, &iobuf_iter, + sizeof(g_pa) / sizeof (u64), g_pa ); + if (!i) + break; + for (j=0; j> PAGE_SHIFT); + pa_table_entry_t *pa_te; + + /* or pa is incorrect or memory that big is not supported */ + if (ix > g_cash.max_nr_pages) { + ASSERT(FALSE); + return -EFAULT; + } + + pa_te = g_cash.pa_dir[ix / PA_TABLE_ENTRY_NUM].pa_te; + + /* no this page_table */ + if (!pa_te) + return 0; + + return pa_te[ix % PA_TABLE_ENTRY_NUM].ref_cnt; +} + +int pa_cash_init() +{ + void *pa_dir; + pa_dir = kzalloc(PA_DIR_SIZE, GFP_KERNEL); + + if (!pa_dir) + return -ENOMEM; + g_cash.pa_dir = pa_dir; + g_cash.max_nr_pages = PA_TABLE_ENTRY_NUM * PA_DIR_ENTRY_NUM; + g_cash.free_list_hdr.Next = NULL; + g_cash.cur_nr_pages = 0; + g_cash.free_nr_pages = 0; + g_cash.free_list_threshold = __calc_threshold(); + KeInitializeMutex(&g_pa_mutex, 0); + return 0; +} + diff --git a/branches/Ndi/hw/mthca/kernel/mt_pa_cash.h b/branches/Ndi/hw/mthca/kernel/mt_pa_cash.h new file mode 100644 index 00000000..4ca6eb57 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_pa_cash.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mlnx_uvp_cq.c 1611 2006-08-20 14:48:55Z sleybo $ + */ + +#include "mthca_dev.h" + +extern KMUTEX g_pa_mutex; + +int pa_cash_init(); + +void pa_cash_release(); + +int pa_is_registered(uint64_t pa); + +int pa_register(mt_iobuf_t *iobuf_p); + +void pa_deregister(mt_iobuf_t *iobuf_p); + +void pa_cash_print(); + diff --git a/branches/Ndi/hw/mthca/kernel/mt_packer.c b/branches/Ndi/hw/mthca/kernel/mt_packer.c new file mode 100644 index 00000000..a61cece8 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_packer.c @@ -0,0 +1,205 @@ +/* + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_packer.tmh" +#endif + +static u64 value_read(int offset, int size, u8 *structure) +{ + switch (size) { + case 1: return *(u8 *) (structure + offset); + case 2: return cl_ntoh16(*(__be16 *) (structure + offset)); + case 4: return cl_ntoh32(*(__be32 *) (structure + offset)); + case 8: return cl_ntoh64(*(__be64 *) (structure + offset)); + default: + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Field size %d bits not handled\n", size * 8)); + return 0; + } +} + +/** + * ib_pack - Pack a structure into a buffer + * @desc:Array of structure field descriptions + * @desc_len:Number of entries in @desc + * @structure:Structure to pack from + * @buf:Buffer to pack into + * + * ib_pack() packs a list of structure fields into a buffer, + * controlled by the array of fields in @desc. + */ +void ib_pack(const struct ib_field *desc, + int desc_len, + u8 *structure, + u8 *buf) +{ + int i; + CPU_2_BE64_PREP; + + for (i = 0; i < desc_len; ++i) { + if (desc[i].size_bits <= 32) { + int shift; + u32 val; + __be32 mask; + __be32 *addr; + + shift = 32 - desc[i].offset_bits - desc[i].size_bits; + if (desc[i].struct_size_bytes) + val = (u32)value_read(desc[i].struct_offset_bytes, + desc[i].struct_size_bytes, + structure) << shift; + else + val = 0; + + mask = cl_hton32(((1Ui64 << desc[i].size_bits) - 1) << shift); + addr = (__be32 *) buf + desc[i].offset_words; + *addr = (*addr & ~mask) | (cl_hton32(val) & mask); + } else if (desc[i].size_bits <= 64) { + int shift; + u64 val; + __be64 mask; + __be64 *addr; + + shift = 64 - desc[i].offset_bits - desc[i].size_bits; + if (desc[i].struct_size_bytes) + val = value_read(desc[i].struct_offset_bytes, + desc[i].struct_size_bytes, + structure) << shift; + else + val = 0; + + mask = CPU_2_BE64((~0Ui64 >> (64 - desc[i].size_bits)) << shift); + addr = (__be64 *) ((__be32 *) buf + desc[i].offset_words); + *addr = (*addr & ~mask) | (cl_hton64(val) & mask); + } else { + if (desc[i].offset_bits % 8 || + desc[i].size_bits % 8) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Structure field %s of size %d " + "bits is not byte-aligned\n", + desc[i].field_name, desc[i].size_bits)); + } + + if (desc[i].struct_size_bytes) + memcpy(buf + desc[i].offset_words * 4 + + desc[i].offset_bits / 8, + structure + desc[i].struct_offset_bytes, + desc[i].size_bits / 8); + else + RtlZeroMemory(buf + desc[i].offset_words * 4 + desc[i].offset_bits / 8, + desc[i].size_bits / 8); + } + } +} + +static void value_write(int offset, int size, u64 val, u8 *structure) +{ + switch (size * 8) { + case 8: *( u8 *) (structure + offset) = (u8)val; break; + case 16: *(__be16 *) (structure + offset) = cl_hton16((u16)val); break; + case 32: *(__be32 *) (structure + offset) = cl_hton32((u32)val); break; + case 64: *(__be64 *) (structure + offset) = cl_hton64(val); break; + default: + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Field size %d bits not handled\n", size * 8)); + } +} + +/** + * ib_unpack - Unpack a buffer into a structure + * @desc:Array of structure field descriptions + * @desc_len:Number of entries in @desc + * @buf:Buffer to unpack from + * @structure:Structure to unpack into + * + * ib_pack() unpacks a list of structure fields from a buffer, + * controlled by the array of fields in @desc. + */ +void ib_unpack(const struct ib_field *desc, + int desc_len, + u8 *buf, + u8 *structure) +{ + int i; + + for (i = 0; i < desc_len; ++i) { + if (!desc[i].struct_size_bytes) + continue; + + if (desc[i].size_bits <= 32) { + int shift; + u32 val; + u32 mask; + __be32 *addr; + + shift = 32 - desc[i].offset_bits - desc[i].size_bits; + mask = ((1Ui64 << desc[i].size_bits) - 1) << shift; + addr = (__be32 *) buf + desc[i].offset_words; + val = (cl_ntoh32(*addr) & mask) >> shift; + value_write(desc[i].struct_offset_bytes, + desc[i].struct_size_bytes, + val, + structure); + } else if (desc[i].size_bits <= 64) { + int shift; + u64 val; + u64 mask; + __be64 *addr; + + shift = 64 - desc[i].offset_bits - desc[i].size_bits; + mask = (~0Ui64 >> (64 - desc[i].size_bits)) << shift; + addr = (__be64 *) buf + desc[i].offset_words; + val = (cl_ntoh64(*addr) & mask) >> shift; + value_write(desc[i].struct_offset_bytes, + desc[i].struct_size_bytes, + val, + structure); + } else { + if (desc[i].offset_bits % 8 || + desc[i].size_bits % 8) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Structure field %s of size %d " + "bits is not byte-aligned\n", + desc[i].field_name, desc[i].size_bits)); + } + + memcpy(structure + desc[i].struct_offset_bytes, + buf + desc[i].offset_words * 4 + + desc[i].offset_bits / 8, + desc[i].size_bits / 8); + } + } +} diff --git a/branches/Ndi/hw/mthca/kernel/mt_pci.h b/branches/Ndi/hw/mthca/kernel/mt_pci.h new file mode 100644 index 00000000..3f389ca9 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_pci.h @@ -0,0 +1,131 @@ +#ifndef MT_PCI_H +#define MT_PCI_H + +// =========================================== +// LITERALS +// =========================================== + +#ifndef PCI_VENDOR_ID_MELLANOX +#define PCI_VENDOR_ID_MELLANOX 0x15b3 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR +#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT +#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL +#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI +#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 +#endif + +#ifndef PCI_VENDOR_ID_TOPSPIN +#define PCI_VENDOR_ID_TOPSPIN 0x1867 +#endif + +/* live fishes */ +#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR_BD +#define PCI_DEVICE_ID_MELLANOX_TAVOR_BD 0x5a45 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_BD +#define PCI_DEVICE_ID_MELLANOX_ARBEL_BD 0x6279 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD 0x5e8d +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_BD +#define PCI_DEVICE_ID_MELLANOX_SINAI_BD 0x6275 +#endif + +// =========================================== +// TYPES +// =========================================== + + +// =========================================== +// MACROS/FUNCTIONS +// =========================================== + +// get bar boundaries +#if 1 +#define pci_resource_start(dev,bar_num) ((dev)->ext->bar[bar_num].phys) +#define pci_resource_len(dev,bar_num) ((dev)->ext->bar[bar_num].size) +#else +static inline uint64_t pci_resource_start(struct mthca_dev *dev, int bar_num) +{ + return dev->ext->bar[bar_num].phys; +} +#endif + + +// i/o to registers + +static inline u64 readq(const volatile void __iomem *addr) +{ + //TODO: write atomic implementation of _IO_READ_QWORD and change mthca_doorbell.h + u64 val; + READ_REGISTER_BUFFER_ULONG((PULONG)(addr), (PULONG)&val, 2 ); + return val; +} + +static inline u32 readl(const volatile void __iomem *addr) +{ + return READ_REGISTER_ULONG((PULONG)(addr)); +} + +static inline u16 reads(const volatile void __iomem *addr) +{ + return READ_REGISTER_USHORT((PUSHORT)(addr)); +} + +static inline u8 readb(const volatile void __iomem *addr) +{ + return READ_REGISTER_UCHAR((PUCHAR)(addr)); +} + +#define __raw_readq readq +#define __raw_readl readl +#define __raw_reads reads +#define __raw_readb readb + +static inline void writeq(unsigned __int64 val, volatile void __iomem *addr) +{ + //TODO: write atomic implementation of _IO_WRITE_QWORD and change mthca_doorbell.h + WRITE_REGISTER_BUFFER_ULONG( (PULONG)(addr), (PULONG)&val, 2 ); +} + +static inline void writel(unsigned int val, volatile void __iomem *addr) +{ + WRITE_REGISTER_ULONG((PULONG)(addr),val); +} + +static inline void writes(unsigned short val, volatile void __iomem *addr) +{ + WRITE_REGISTER_USHORT((PUSHORT)(addr),val); +} + +static inline void writeb(unsigned char val, volatile void __iomem *addr) +{ + WRITE_REGISTER_UCHAR((PUCHAR)(addr),val); +} + +#define __raw_writeq writeq +#define __raw_writel writel +#define __raw_writes writes +#define __raw_writeb writeb + +#endif + diff --git a/branches/Ndi/hw/mthca/kernel/mt_pcipool.h b/branches/Ndi/hw/mthca/kernel/mt_pcipool.h new file mode 100644 index 00000000..996cb11d --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_pcipool.h @@ -0,0 +1,103 @@ +#ifndef MT_PCIPOOL_H +#define MT_PCIPOOL_H + +typedef struct pci_pool { + size_t size; + struct mthca_dev *mdev; + char name [32]; + NPAGED_LOOKASIDE_LIST pool_hdr; +} pci_pool_t; + +// taken from dmapool.c + +/** +* pci_pool_create - Creates a pool of consistent memory blocks, for dma. +* @name: name of pool, for diagnostics +* @mdev: device that will be doing the DMA +* @size: size of the blocks in this pool. +* @align: alignment requirement for blocks; must be a power of two +* @allocation: returned blocks won't cross this boundary (or zero) +* Context: !in_interrupt() +* +* Returns a dma allocation pool with the requested characteristics, or +* null if one can't be created. Given one of these pools, dma_pool_alloc() +* may be used to allocate memory. Such memory will all have "consistent" +* DMA mappings, accessible by the device and its driver without using +* cache flushing primitives. The actual size of blocks allocated may be +* larger than requested because of alignment. +* +* If allocation is nonzero, objects returned from dma_pool_alloc() won't + * cross that size boundary. This is useful for devices which have + * addressing restrictions on individual DMA transfers, such as not crossing + * boundaries of 4KBytes. + */ + +pci_pool_t * +pci_pool_create (const char *name, struct mthca_dev *mdev, + size_t size, size_t align, size_t allocation); + +/** + * dma_pool_alloc - get a block of consistent memory + * @pool: dma pool that will produce the block + * @mem_flags: GFP_* bitmask + * @handle: pointer to dma address of block + * + * This returns the kernel virtual address of a currently unused block, + * and reports its dma address through the handle. + * If such a memory block can't be allocated, null is returned. + */ +static inline void * +pci_pool_alloc (pci_pool_t *pool, int mem_flags, dma_addr_t *handle) +{ + PHYSICAL_ADDRESS pa; + void * ptr; + UNREFERENCED_PARAMETER(mem_flags); + + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + + ptr = ExAllocateFromNPagedLookasideList( &pool->pool_hdr ); + if (ptr != NULL) { + pa = MmGetPhysicalAddress( ptr ); + *handle = pa.QuadPart; + } + return ptr; +} + + +/** +* dma_pool_free - put block back into dma pool +* @pool: the dma pool holding the block +* @vaddr: virtual address of block +* @dma: dma address of block +* +* Caller promises neither device nor driver will again touch this block +* unless it is first re-allocated. +*/ +static inline void +pci_pool_free (pci_pool_t *pool, void *vaddr, dma_addr_t dma) +{ + UNREFERENCED_PARAMETER(dma); + MT_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + ExFreeToNPagedLookasideList( &pool->pool_hdr, vaddr ); +} + + + +/** + * pci_pool_destroy - destroys a pool of dma memory blocks. + * @pool: dma pool that will be destroyed + * Context: !in_interrupt() + * + * Caller guarantees that no more memory from the pool is in use, + * and that nothing will try to use the pool after this call. + */ +static inline void +pci_pool_destroy (pci_pool_t *pool) +{ + ExDeleteNPagedLookasideList( &pool->pool_hdr ); + ExFreePool( pool); +} + + + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/mt_reset_tavor.c b/branches/Ndi/hw/mthca/kernel/mt_reset_tavor.c new file mode 100644 index 00000000..399c2f16 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_reset_tavor.c @@ -0,0 +1,485 @@ +#include +#include +#include "hca_driver.h" +#include "mthca.h" +#include "hca_debug.h" +#include "Mt_l2w.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_reset_tavor.tmh" +#endif + + +#pragma warning(disable : 4996) + +/* limitations */ +#define N_BUSES 16 /* max number of PCI buses */ +#define N_DEVICES 32 /* max number of devices on one bus */ +#define N_FUNCTIONS 8 /* max number of functions on one device */ +#define N_CARDS 8 /* max number of HCA cards */ + +/*----------------------------------------------------------------*/ + +PWCHAR +WcharFindChar( + IN PWCHAR pi_BufStart, + IN PWCHAR pi_BufEnd, + IN WCHAR pi_FromPattern, + IN WCHAR pi_ToPattern + ) +/*++ + +Routine Description: + Converts wide-character string into ASCII + +Arguments: + + pi_BufStart.......... start of the source string + pi_BufEnd............ end of the source string + pi_FromPattern....... start of pattern range to find + pi_ToPattern......... end of pattern range to find + +Return Value: + + pointer to the first pattern found or NULL (when reached the end) + +--*/ +{ /* WcharFindChar */ + + PWCHAR l_pResult = pi_BufStart; + + while (l_pResult < pi_BufEnd ) + { + if (*l_pResult >= pi_FromPattern && *l_pResult <= pi_ToPattern) + return l_pResult; + l_pResult++; + } + + return NULL; + +} /* WcharFindChar */ + + +/*----------------------------------------------------------------*/ + +/* + * Function: MdGetDevLocation + * + * Parameters: + * IN pi_pPdo - PDO of a device in question + * OUT po_pBus - pointer to the bus number of the device in question + * OUT po_pDevFunc - pointer to dev/func of the device, if found + * + * Returns: + * not STATUS_SUCCESS - the device location was not found + * STATUS_SUCCESS - the device location was found and returned in OUT parameters + * + * Description: + * The function uses IoGetDeviceProperty to get the location of a device with given PDO + * + */ +static NTSTATUS +MdGetDevLocation( + IN PDEVICE_OBJECT pi_pPdo, + OUT ULONG * po_pBus, + OUT ULONG * po_pDevFunc + ) +{ + ULONG l_BusNumber, l_DevNumber, l_Function, l_ResultLength = 0; + WCHAR l_Buffer[40], *l_pEnd, *l_pBuf = l_Buffer, *l_pBufEnd = l_Buffer + sizeof(l_Buffer); + NTSTATUS l_Status; + UNICODE_STRING l_UnicodeNumber; + + /* prepare */ + l_ResultLength = 0; + RtlZeroMemory( l_Buffer, sizeof(l_Buffer) ); + + /* Get the device number */ + l_Status = IoGetDeviceProperty(pi_pPdo, + DevicePropertyLocationInformation, sizeof(l_Buffer), l_Buffer, &l_ResultLength); + + /* Verify if the function was successful */ + if ( !NT_SUCCESS(l_Status) || !l_ResultLength ) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("(MdGetDevLocation) Unable to get device number: Status 0x%x, ResultSize %d \n", + l_Status, l_ResultLength )); + goto exit; + } + + // ALL THE BELOW CRAP WE DO INSTEAD OF + // sscanf(l_Buffer, "PCI bus %d, device %d, function %d", &l_BusNumber, &l_DevNumber, &l_Function ); + + /* take bus number */ + l_pBuf = WcharFindChar( l_pBuf, l_pBufEnd, L'0', L'9' ); + if (l_pBuf == NULL) goto err; + l_pEnd = WcharFindChar( l_pBuf, l_pBufEnd, L',', L',' ); + if (l_pEnd == NULL) goto err; + l_UnicodeNumber.Length = l_UnicodeNumber.MaximumLength = (USHORT)((PCHAR)l_pEnd - (PCHAR)l_pBuf); + l_UnicodeNumber.Buffer = l_pBuf; l_pBuf = l_pEnd; + RtlUnicodeStringToInteger( &l_UnicodeNumber, 10, &l_BusNumber); + + /* take slot number */ + l_pBuf = WcharFindChar( l_pBuf, l_pBufEnd, L'0', L'9' ); + if (l_pBuf == NULL) goto err; + l_pEnd = WcharFindChar( l_pBuf, l_pBufEnd, L',', L',' ); + if (l_pEnd == NULL) goto err; + l_UnicodeNumber.Length = l_UnicodeNumber.MaximumLength = (USHORT)((PCHAR)l_pEnd - (PCHAR)l_pBuf); + l_UnicodeNumber.Buffer = l_pBuf; l_pBuf = l_pEnd; + RtlUnicodeStringToInteger( &l_UnicodeNumber, 10, &l_DevNumber); + + /* take function number */ + *(l_Buffer + (l_ResultLength>>1)) = 0; /* set end of string */ + l_pBuf = WcharFindChar( l_pBuf, l_pBufEnd, L'0', L'9' ); + if (l_pBuf == NULL) goto err; + l_pEnd = WcharFindChar( l_pBuf, l_pBufEnd, 0, 0 ); + if (l_pEnd == NULL) goto err; + l_UnicodeNumber.Length = l_UnicodeNumber.MaximumLength = (USHORT)((PCHAR)l_pEnd - (PCHAR)l_pBuf); + l_UnicodeNumber.Buffer = l_pBuf; l_pBuf = l_pEnd; + RtlUnicodeStringToInteger( &l_UnicodeNumber, 10, &l_Function); + + /* return the results */ + *po_pBus = l_BusNumber; + *po_pDevFunc = (l_DevNumber & 0x01f) | ((l_Function & 7) << 5); + + goto exit; + +err: + l_Status = STATUS_UNSUCCESSFUL; +exit: + return l_Status; +} + +/*----------------------------------------------------------------*/ + +/* Function: SendAwaitIrpCompletion + * + * Parameters: + * + * Description: + * IRP completion routine + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * +*/ +static +NTSTATUS +SendAwaitIrpCompletion ( + IN PDEVICE_OBJECT DeviceObject, + IN PIRP Irp, + IN PVOID Context + ) +{ + UNREFERENCED_PARAMETER (DeviceObject); + UNREFERENCED_PARAMETER (Irp); + KeSetEvent ((PKEVENT) Context, IO_NO_INCREMENT, FALSE); + return STATUS_MORE_PROCESSING_REQUIRED; // Keep this IRP +} + +/*------------------------------------------------------------------------------------------------------*/ + +/* + * Function: SendAwaitIrp + * + * Description: + * Create and send IRP stack down the stack and wait for the response (Blocking Mode) + * + * Parameters: + * pi_pDeviceExt.......... ointer to USB device extension + * pi_MajorCode........... IRP major code + * pi_MinorCode........... IRP minor code + * pi_pBuffer............. parameter buffer + * pi_nSize............... size of the buffer + * po_pInfo.............. returned field Information from IoStatus block + * + * Returns: + * pointer to the entry on SUCCESS + * NULL - otherwise + * +*/ +static +NTSTATUS +SendAwaitIrp( + IN PDEVICE_OBJECT pi_pFdo, + IN PDEVICE_OBJECT pi_pLdo, + IN ULONG pi_MajorCode, + IN ULONG pi_MinorCode, + IN PVOID pi_pBuffer, + IN int pi_nSize, + OUT PVOID * po_pInfo + ) +/*++ + + Routine Description: + + Create and send IRP stack down the stack and wait for the response ( +Blocking Mode) + + Arguments: + + pi_pFdo................ our device + pi_pLdo................ lower device + pi_MajorCode........... IRP major code + pi_MinorCode........... IRP minor code + pi_pBuffer............. parameter buffer + pi_nSize............... size of the buffer + + Returns: + + standard NTSTATUS return codes. + + Notes: + +--*/ +{ /* SendAwaitIrp */ + // Event + KEVENT l_hEvent; + // Pointer to IRP + PIRP l_pIrp; + // Stack location + PIO_STACK_LOCATION l_pStackLocation; + // Returned status + NTSTATUS l_Status; + // when to invoke + BOOLEAN InvokeAlways = TRUE; + + // call validation + if(KeGetCurrentIrql() != PASSIVE_LEVEL) + return STATUS_SUCCESS; + + // create event + KeInitializeEvent(&l_hEvent, NotificationEvent, FALSE); + + // build IRP request to USBD driver + l_pIrp = IoAllocateIrp( pi_pFdo->StackSize, FALSE ); + + // validate request + if (!l_pIrp) + { + //MdKdPrint( DBGLVL_MAXIMUM, ("(SendAwaitIrp) Unable to allocate IRP !\n")); + return STATUS_INSUFFICIENT_RESOURCES; + } + + // fill IRP + l_pIrp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + // set completion routine + IoSetCompletionRoutine(l_pIrp,SendAwaitIrpCompletion, &l_hEvent, InvokeAlways, InvokeAlways, InvokeAlways); + + // fill stack location + l_pStackLocation = IoGetNextIrpStackLocation(l_pIrp); + l_pStackLocation->MajorFunction= (UCHAR)pi_MajorCode; + l_pStackLocation->MinorFunction= (UCHAR)pi_MinorCode; + RtlCopyMemory( &l_pStackLocation->Parameters, pi_pBuffer, pi_nSize ); + + // Call lower driver perform request + l_Status = IoCallDriver( pi_pLdo, l_pIrp ); + + // if the request not performed --> wait + if (l_Status == STATUS_PENDING) + { + // Wait until the IRP will be complete + KeWaitForSingleObject( + &l_hEvent, // event to wait for + Executive, // thread type (to wait into its context) + KernelMode, // mode of work + FALSE, // alertable + NULL // timeout + ); + l_Status = l_pIrp->IoStatus.Status; + } + + if (po_pInfo) + *po_pInfo = (PVOID)l_pIrp->IoStatus.Information; + + IoFreeIrp(l_pIrp); + return l_Status; + +} /* SendAwaitIrp */ + + +/*------------------------------------------------------------------------------------------------------*/ + +/* + * Function: FindBridgeIf_new + * + * Parameters: + * IN pi_pPdo - PDO of HCA's bus device + * IN pi_Bus, pi_DevFunc - bridge location + * OUT po_pPdo - pointer to PDO of the bridge, when found + * + * Returns: + * FALSE - the bridge was not found + * TRUE - a device was found; *po_pPdo contains its PDO + * + * Description: + * The function finds and opens the bus interface for Tavor HCA + * + * Algorithm: + * 1. find all PDOs of PCI.SYS driver and save it into an array; + * 2. For each PDO open its bus i/f and check whether it is our bridge; + * + * Note: + * 1. It is a "hack" algorithm. It uses some fields of system structures and some + * optimistic assumptions - see more below + * 2. We dangerously assume, that during part to of the algoritm no PDO will removed or added ! + * 3. PCI.SYS gives to its child devices names like \Device\NTPNP_PCI00nn. I tried to get Bridge's + * PDO by calling IoGetDeviceObjectPointer with all such names, but it returns STATUS_NO_SUCH_DEVICE + * for the *right* name of Bridge device !(IoGetDeviceObjectPointer really opens the device. Maybe Bridge is in exclusive use) + */ +int +FindBridgeIf( + IN hca_dev_ext_t *pi_ext, + OUT PBUS_INTERFACE_STANDARD pi_pInterface + ) +{ + NTSTATUS l_Status; + int rc = FALSE; /* result - "not found" by default */ + int n_pdos = 0; /* number of PCI.SYS's PDOs */ + PDEVICE_OBJECT *pdo; /* an array of PCI.SYS's PDOs */ + PDEVICE_OBJECT l_pHcaPdo; + + { // get HCA's bus PDO + IO_STACK_LOCATION l_Iosl; + PDEVICE_RELATIONS l_pDr; + + // find PDO of our bus driver (bypassing possible low filter drivers) + RtlZeroMemory( &l_Iosl, sizeof(l_Iosl) ); + l_Iosl.Parameters.QueryDeviceRelations.Type = TargetDeviceRelation; + l_Status = SendAwaitIrp( + pi_ext->cl_ext.p_self_do, + pi_ext->cl_ext.p_next_do, + IRP_MJ_PNP, + IRP_MN_QUERY_DEVICE_RELATIONS, + &l_Iosl.Parameters, + sizeof(l_Iosl.Parameters.QueryDeviceRelations), + &l_pDr + ); + + if (!NT_SUCCESS (l_Status)) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("IRP_MN_QUERY_DEVICE_RELATIONS for TargetDeviceRelation failed (%#x);: Fdo %p, Ldo %p \n", + l_Status, pi_ext->cl_ext.p_self_do, pi_ext->cl_ext.p_next_do )); + goto exit; + } + + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("IRP_MN_QUERY_DEVICE_RELATIONS for TargetDeviceRelation for Fdo %p, Ldo %p: num_of_PDOs %d, PDO %p \n", + pi_ext->cl_ext.p_self_do, pi_ext->cl_ext.p_next_do, l_pDr->Count, l_pDr->Objects[0] )); + l_pHcaPdo = l_pDr->Objects[0]; + } + + { // allocate and fill an array with all PCI.SYS PDO devices + // suppose that there is no more than N_PCI_DEVICES, belonging to PCI.SYS + #define N_PCI_DEVICES 256 + KIRQL irql; + PDRIVER_OBJECT l_pDrv; + PDEVICE_OBJECT l_pPdo; + int l_all_pdos = 0; + + pdo = (PDEVICE_OBJECT *)ExAllocatePoolWithTag( + NonPagedPool, + N_PCI_DEVICES * sizeof(PDEVICE_OBJECT), + MT_TAG_KERNEL ); + if (!pdo) + goto exit; + + // suppose, that PDOs are added only at PASSIVE_LEVEL + irql = KeRaiseIrqlToDpcLevel(); + + // get to the PCI.SYS driver + l_pDrv = l_pHcaPdo->DriverObject; + + // find and store all bus PDO s (because the bridge is a bus enumerated device) + for ( l_pPdo = l_pDrv->DeviceObject; l_pPdo; l_pPdo = l_pPdo->NextDevice ) { + l_all_pdos++; + if ( l_pPdo->Flags & DO_BUS_ENUMERATED_DEVICE ) { + pdo[n_pdos] = l_pPdo; + if (++n_pdos >= N_PCI_DEVICES) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_SHIM , + ("There are more than %d children of PCI.SYS. Skipping the rest \n", N_PCI_DEVICES )); + break; + } + } + } + + // return to previous level + KeLowerIrql(irql); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SHIM ,("Found %d PCI.SYS's PDOs (from %d) \n", n_pdos, l_all_pdos )); + } + + { // Find PDO of the Bridge of our HCA and return open bus interface to it + int i; + ULONG data, l_SecBus; + IO_STACK_LOCATION l_Stack; // parameter buffer for the request + ULONG l_DevId = ((int)(23110) << 16) | PCI_VENDOR_ID_MELLANOX; + + // loop over all the PCI driver devices + for ( i = 0; i < n_pdos; ++i ) { + + // clean interface data + RtlZeroMemory( (PCHAR)pi_pInterface, sizeof(BUS_INTERFACE_STANDARD) ); + + // get Bus Interface for the current PDO + l_Stack.Parameters.QueryInterface.InterfaceType = (LPGUID) &GUID_BUS_INTERFACE_STANDARD; + l_Stack.Parameters.QueryInterface.Size = sizeof(BUS_INTERFACE_STANDARD); + l_Stack.Parameters.QueryInterface.Version = 1; + l_Stack.Parameters.QueryInterface.Interface = (PINTERFACE)pi_pInterface; + l_Stack.Parameters.QueryInterface.InterfaceSpecificData = NULL; + + l_Status =SendAwaitIrp( pi_ext->cl_ext.p_self_do, pdo[i], IRP_MJ_PNP, + IRP_MN_QUERY_INTERFACE, &l_Stack.Parameters, sizeof(l_Stack.Parameters), NULL); + if (!NT_SUCCESS (l_Status)) { + HCA_PRINT( TRACE_LEVEL_WARNING ,HCA_DBG_SHIM , + ("Failed to get bus interface for pdo[%d] %p, error %#x \n", i, pdo[i], l_Status )); + continue; + } + + // Read DevID + data = 0; + if (4 != pi_pInterface->GetBusData( pi_pInterface->Context, + PCI_WHICHSPACE_CONFIG, &data, 0, 4)) { + HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_PNP, + ("Failed to read DevID for pdo[%d] %p, data %#x \n", i, pdo[i], data )); + goto next_loop; + } + + if (data != l_DevId) { + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PNP, + ("Not Tavor bridge: pdo[%d] %p, data %#x \n", i, pdo[i], data )); + goto next_loop; + } + + // Found Tavor Bridge - read its SecondaryBus + data = 0; + if (4 != pi_pInterface->GetBusData( pi_pInterface->Context, + PCI_WHICHSPACE_CONFIG, &data, 24, 4)) { /* 24 - PrimaryBus, 25 - SecondaryBus, 26 - SubordinateBus */ + HCA_PRINT( TRACE_LEVEL_WARNING, HCA_DBG_PNP, + ("Failed to read SecondaryBus for pdo[%d] %p, data %#x \n", i, pdo[i], data )); + goto next_loop; + } + + l_SecBus = (data >> 16) & 255; + if (l_SecBus != pi_ext->bus_number) { + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PNP, + ("Wrong bridge for our HCA: pdo[%d] %p, SecBus %d, HcaBus %d \n", i, pdo[i], l_SecBus, pi_ext->bus_number )); + goto next_loop; + } + else { + ULONG l_DevFunc, l_Bus; + l_Status = MdGetDevLocation( pdo[i], &l_Bus, &l_DevFunc ); + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_PNP, + ("Found bridge for our HCA: pdo[%d] %p (bus %d, dev/func %d, HcaPdo %p), SecBus %d, HcaBus %d \n", + i, pdo[i], l_Bus, l_DevFunc, l_pHcaPdo, l_SecBus, pi_ext->bus_number )); + rc = TRUE; + break; + } + next_loop: + pi_pInterface->InterfaceDereference( pi_pInterface->Context ); + } + } + + ExFreePool(pdo); +exit: + return rc; +} diff --git a/branches/Ndi/hw/mthca/kernel/mt_spinlock.h b/branches/Ndi/hw/mthca/kernel/mt_spinlock.h new file mode 100644 index 00000000..57f3ea5a --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_spinlock.h @@ -0,0 +1,143 @@ +#ifndef MT_SPINLOCK_H +#define MT_SPINLOCK_H + +typedef struct spinlock { + KSPIN_LOCK lock; + +#ifdef SUPPORT_SPINLOCK_ISR + PKINTERRUPT p_int_obj; + KIRQL irql; +#endif +} spinlock_t; + +typedef struct { + KLOCK_QUEUE_HANDLE lockh; + KIRQL irql; +} spinlockh_t; + +#ifdef SUPPORT_SPINLOCK_ISR + +static inline void +spin_lock_setint( + IN spinlock_t* const l, + IN PKINTERRUPT p_int_obj ) +{ + MT_ASSERT( l ); + l->p_int_obj = p_int_obj; +} + +static inline void spin_lock_irq_init( + IN spinlock_t* const l, + IN PKINTERRUPT int_obj + ) +{ + KeInitializeSpinLock( &l->lock ); + l->p_int_obj = int_obj; +} + +static inline unsigned long +spin_lock_irq( + IN spinlock_t* const l) +{ + MT_ASSERT( l ); + MT_ASSERT( l->p_int_obj ); + return (unsigned long)(l->irql = KeAcquireInterruptSpinLock ( l->p_int_obj )); +} + +static inline void +spin_unlock_irq( + IN spinlock_t* const p_spinlock ) +{ + MT_ASSERT( p_spinlock ); + MT_ASSERT( p_spinlock->p_int_obj ); + KeReleaseInterruptSpinLock ( p_spinlock->p_int_obj, p_spinlock->irql ); +} + +#endif + +#define SPIN_LOCK_PREP(lh) spinlockh_t lh + +static inline void spin_lock_init( + IN spinlock_t* const p_spinlock ) +{ + KeInitializeSpinLock( &p_spinlock->lock ); +} + +static inline void +spin_lock( + IN spinlock_t* const l, + IN spinlockh_t * const lh) +{ + KIRQL irql = KeGetCurrentIrql(); + + MT_ASSERT( l || lh ); + ASSERT(irql <= DISPATCH_LEVEL); + + if (irql == DISPATCH_LEVEL) + KeAcquireInStackQueuedSpinLockAtDpcLevel( &l->lock, &lh->lockh ); + else + KeAcquireInStackQueuedSpinLock( &l->lock, &lh->lockh ); + lh->irql = irql; +} + +static inline void +spin_unlock( + IN spinlockh_t * const lh) +{ + MT_ASSERT( lh ); + ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); + if (lh->irql == DISPATCH_LEVEL) + KeReleaseInStackQueuedSpinLockFromDpcLevel( &lh->lockh ); + else + KeReleaseInStackQueuedSpinLock( &lh->lockh ); +} + +static inline void +spin_lock_sync( + IN spinlock_t* const l ) +{ + KLOCK_QUEUE_HANDLE lockh; + MT_ASSERT( l ); + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeAcquireInStackQueuedSpinLock ( &l->lock, &lockh ); + KeReleaseInStackQueuedSpinLock( &lockh ); +} + +/* to be used only at DPC level */ +static inline void +spin_lock_dpc( + IN spinlock_t* const l, + IN spinlockh_t * const lh) +{ + MT_ASSERT( l || lh ); + ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); + KeAcquireInStackQueuedSpinLockAtDpcLevel( &l->lock, &lh->lockh ); +} + +/* to be used only at DPC level */ +static inline void +spin_unlock_dpc( + IN spinlockh_t * const lh) +{ + ASSERT(KeGetCurrentIrql() == DISPATCH_LEVEL); + KeReleaseInStackQueuedSpinLockFromDpcLevel( &lh->lockh ); +} + + +/* we are working from DPC level, so we can use usual spinlocks */ +#define spin_lock_irq spin_lock +#define spin_unlock_irq spin_unlock + +/* no diff in Windows */ +#define spin_lock_irqsave spin_lock_irq +#define spin_unlock_irqrestore spin_unlock_irq + +/* Windows doesn't support such kind of spinlocks so far, but may be tomorrow ... */ +#define rwlock_init spin_lock_init +#define read_lock_irqsave spin_lock_irqsave +#define read_unlock_irqrestore spin_unlock_irqrestore +#define write_lock_irq spin_lock_irq +#define write_unlock_irq spin_unlock_irq + +#endif + diff --git a/branches/Ndi/hw/mthca/kernel/mt_sync.h b/branches/Ndi/hw/mthca/kernel/mt_sync.h new file mode 100644 index 00000000..90d3f38c --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_sync.h @@ -0,0 +1,109 @@ +#ifndef MT_SYNC_H +#define MT_SYNC_H + +// literals +#ifndef LONG_MAX +#define LONG_MAX 2147483647L /* maximum (signed) long value */ +#endif + + +// mutex wrapper + +// suitable both for mutexes and semaphores +static inline void down(PRKMUTEX p_mutex) +{ + NTSTATUS status; + int need_to_wait = 1; + + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + while (need_to_wait) { + status = KeWaitForSingleObject( p_mutex, Executive, KernelMode, FALSE, NULL ); + if (status == STATUS_SUCCESS) + break; + } +} + +// suitable both for mutexes and semaphores +static inline int down_interruptible(PRKMUTEX p_mutex) +{ + NTSTATUS status; + + ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + status = KeWaitForSingleObject( p_mutex, Executive, KernelMode, TRUE, NULL ); + if (status == STATUS_SUCCESS) + return 0; + return -EINTR; +} + +#define sem_down(ptr) down((PRKMUTEX)(ptr)) +#define sem_down_interruptible(ptr) down_interruptible((PRKMUTEX)(ptr)) + +static inline void up(PRKMUTEX p_mutex) +{ + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeReleaseMutex( p_mutex, FALSE ); +} + +static inline void sem_up(PRKSEMAPHORE p_sem) +{ + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeReleaseSemaphore( p_sem, 0, 1, FALSE ); +} + +static inline void sem_init( + IN PRKSEMAPHORE p_sem, + IN LONG cnt, + IN LONG limit) +{ + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + KeInitializeSemaphore( p_sem, cnt, limit ); +} + + +typedef struct wait_queue_head { + KEVENT event; +} wait_queue_head_t; + +static inline void wait_event(wait_queue_head_t *obj_p, int condition) +{ + NTSTATUS status; + int need_to_wait = 1; + MT_ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); + if (condition) + return; + while (need_to_wait) { + status = KeWaitForSingleObject( &obj_p->event, Executive, KernelMode, FALSE, NULL ); + if (status == STATUS_SUCCESS) + break; + } +} + +static inline void wake_up(wait_queue_head_t *obj_p) +{ + MT_ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeSetEvent( &obj_p->event, 0, FALSE ); +} + +static inline void init_waitqueue_head(wait_queue_head_t *obj_p) +{ + //TODO: ASSERT is temporary outcommented, because using of fast mutexes in CompLib + // cause working on APC_LEVEL + //ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + KeInitializeEvent( &obj_p->event, NotificationEvent , FALSE ); +} + +static inline void free_irq(PKINTERRUPT int_obj) +{ + IoDisconnectInterrupt( int_obj ); +} + +int request_irq( + IN CM_PARTIAL_RESOURCE_DESCRIPTOR *int_info, /* interrupt resources */ + IN KSPIN_LOCK *isr_lock, /* spin lcok for ISR */ + IN PKSERVICE_ROUTINE isr, /* ISR */ + IN void *isr_ctx, /* ISR context */ + OUT PKINTERRUPT *int_obj /* interrupt object */ + ); + + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/mt_types.h b/branches/Ndi/hw/mthca/kernel/mt_types.h new file mode 100644 index 00000000..efe9a857 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_types.h @@ -0,0 +1,60 @@ +#ifndef MT_TYPES_H +#define MT_TYPES_H + +//#include +#pragma warning( push ) +#include + #include +#pragma warning( pop ) + +// =========================================== +// SUBSTITUTES +// =========================================== + +// gcc compiler attributes +#define __iomem +#define likely(x) (x) +#define unlikely(x) (x) + +// container_of +#define container_of CONTAINING_RECORD + +// inline +#define inline __inline + +// =========================================== +// TYPES +// =========================================== + +// basic types +typedef unsigned char u8, __u8; +typedef unsigned short int u16, __u16; +typedef unsigned int u32, __u32; +typedef unsigned __int64 u64, __u64; +typedef char s8, __s8; +typedef short int s16, __s16; +typedef int s32, __s32; +typedef __int64 s64, __s64; + +// inherited +typedef u16 __le16; +typedef u16 __be16; +typedef u32 __le32; +typedef u32 __be32; +typedef u64 __le64; +typedef u64 __be64; +typedef u64 dma_addr_t; +typedef u64 io_addr_t; + +// =========================================== +// MACROS +// =========================================== + +// assert +#ifdef _DEBUG_ +#define MT_ASSERT( exp ) (void)(!(exp)?DbgPrint("Assertion Failed:" #exp "\n"),DbgBreakPoint(),FALSE:TRUE) +#else +#define MT_ASSERT( exp ) +#endif /* _DEBUG_ */ + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/mt_ud_header.c b/branches/Ndi/hw/mthca/kernel/mt_ud_header.c new file mode 100644 index 00000000..e649c53a --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_ud_header.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_ud_header.tmh" +#endif +#include + +#define STRUCT_FIELD_INIT(header, field,ow,ob,sb) \ + offsetof(struct ib_unpacked_ ## header, field), \ + sizeof ((struct ib_unpacked_ ## header *) 0)->field, \ + ow,ob,sb, \ + #header ":" #field + +#define STRUCT_FIELD_INITR(ow,ob,sb) \ + 0, 0, ow, ob, sb, "reserved" + +static const struct ib_field lrh_table[] = { + { STRUCT_FIELD_INIT(lrh, virtual_lane, 0, 0, 4) }, + { STRUCT_FIELD_INIT(lrh, link_version, 0, 4, 4) }, + { STRUCT_FIELD_INIT(lrh, service_level, 0, 8, 4) }, + { STRUCT_FIELD_INITR(0,12,2) }, + { STRUCT_FIELD_INIT(lrh, link_next_header, 0, 14, 2) }, + { STRUCT_FIELD_INIT(lrh, destination_lid, 0, 16, 16) }, + { STRUCT_FIELD_INITR(1,0,5) }, + { STRUCT_FIELD_INIT(lrh, packet_length, 1, 5, 11) }, + { STRUCT_FIELD_INIT(lrh, source_lid, 1, 16, 16) } +}; + +static const struct ib_field grh_table[] = { + { STRUCT_FIELD_INIT(grh, ip_version, 0, 0, 4) }, + { STRUCT_FIELD_INIT(grh, traffic_class, 0, 4, 8) }, + { STRUCT_FIELD_INIT(grh, flow_label, 0, 12, 20) }, + { STRUCT_FIELD_INIT(grh, payload_length, 1, 0, 16) }, + { STRUCT_FIELD_INIT(grh, next_header, 1, 16, 8) }, + { STRUCT_FIELD_INIT(grh, hop_limit, 1, 24, 8) }, + { STRUCT_FIELD_INIT(grh, source_gid, 2, 0, 128) }, + { STRUCT_FIELD_INIT(grh, destination_gid, 6, 0, 128) } +}; + +static const struct ib_field bth_table[] = { + { STRUCT_FIELD_INIT(bth, opcode, 0, 0, 8) }, + { STRUCT_FIELD_INIT(bth, solicited_event, 0, 8, 1) }, + { STRUCT_FIELD_INIT(bth, mig_req, 0, 9, 1) }, + { STRUCT_FIELD_INIT(bth, pad_count, 0, 10, 2) }, + { STRUCT_FIELD_INIT(bth, transport_header_version, 0, 12, 4) }, + { STRUCT_FIELD_INIT(bth, pkey, 0, 16, 16) }, + { STRUCT_FIELD_INITR(1,0,8) }, + { STRUCT_FIELD_INIT(bth, destination_qpn, 1, 8, 24) }, + { STRUCT_FIELD_INIT(bth, ack_req, 2, 0, 1) }, + { STRUCT_FIELD_INITR(2,1,7) }, + { STRUCT_FIELD_INIT(bth, psn, 2, 8, 24) } +}; + +static const struct ib_field deth_table[] = { + { STRUCT_FIELD_INIT(deth, qkey, 0, 0, 32) }, + { STRUCT_FIELD_INITR(1,0,8) }, + { STRUCT_FIELD_INIT(deth, source_qpn, 1, 8, 24) } +}; + + +/** + * ib_ud_header_init - Initialize UD header structure + * @payload_bytes:Length of packet payload + * @grh_present:GRH flag (if non-zero, GRH will be included) + * @header:Structure to initialize + * + * ib_ud_header_init() initializes the lrh.link_version, lrh.link_next_header, + * lrh.packet_length, grh.ip_version, grh.payload_length, + * grh.next_header, bth.opcode, bth.pad_count and + * bth.transport_header_version fields of a &struct ib_ud_header given + * the payload length and whether a GRH will be included. + */ +void ib_ud_header_init(int payload_bytes, + int grh_present, + struct ib_ud_header *header) +{ + int header_len; + u16 packet_length; + + RtlZeroMemory(header, sizeof *header); + + header_len = + IB_LRH_BYTES + + IB_BTH_BYTES + + IB_DETH_BYTES; + if (grh_present) { + header_len += IB_GRH_BYTES; + } + + header->lrh.link_version = 0; + header->lrh.link_next_header = + (u8)(grh_present ? IB_LNH_IBA_GLOBAL : IB_LNH_IBA_LOCAL); + packet_length = (u16)((IB_LRH_BYTES + + IB_BTH_BYTES + + IB_DETH_BYTES + + payload_bytes + + 4 + /* ICRC */ + 3) / 4); /* round up */ + + header->grh_present = grh_present; + if (grh_present) { + packet_length += IB_GRH_BYTES / 4; + header->grh.ip_version = 6; + header->grh.payload_length = + cl_hton16((u16)((IB_BTH_BYTES + + IB_DETH_BYTES + + payload_bytes + + 4 + /* ICRC */ + 3) & ~3)); /* round up */ + header->grh.next_header = 0x1b; + } + + header->lrh.packet_length = cl_hton16(packet_length); + + if (header->immediate_present) + header->bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + else + header->bth.opcode = IB_OPCODE_UD_SEND_ONLY; + header->bth.pad_count = (u8)((4 - payload_bytes) & 3); + header->bth.transport_header_version = 0; +} + +/** + * ib_ud_header_pack - Pack UD header struct into wire format + * @header:UD header struct + * @buf:Buffer to pack into + * + * ib_ud_header_pack() packs the UD header structure @header into wire + * format in the buffer @buf. + */ +int ib_ud_header_pack(struct ib_ud_header *header, + u8 *buf) +{ + int len = 0; + + ib_pack(lrh_table, ARRAY_SIZE(lrh_table), + &header->lrh, buf); + len += IB_LRH_BYTES; + + if (header->grh_present) { + ib_pack(grh_table, ARRAY_SIZE(grh_table), + &header->grh, buf + len); + len += IB_GRH_BYTES; + } + + ib_pack(bth_table, ARRAY_SIZE(bth_table), + &header->bth, buf + len); + len += IB_BTH_BYTES; + + ib_pack(deth_table, ARRAY_SIZE(deth_table), + &header->deth, buf + len); + len += IB_DETH_BYTES; + + if (header->immediate_present) { + memcpy(buf + len, &header->immediate_data, sizeof header->immediate_data); + len += sizeof header->immediate_data; + } + + return len; +} + +/** + * ib_ud_header_unpack - Unpack UD header struct from wire format + * @header:UD header struct + * @buf:Buffer to pack into + * + * ib_ud_header_pack() unpacks the UD header structure @header from wire + * format in the buffer @buf. + */ +int ib_ud_header_unpack(u8 *buf, + struct ib_ud_header *header) +{ + ib_unpack(lrh_table, ARRAY_SIZE(lrh_table), + buf, &header->lrh); + buf += IB_LRH_BYTES; + + if (header->lrh.link_version != 0) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid LRH.link_version %d\n", + header->lrh.link_version)); + return -EINVAL; + } + + switch (header->lrh.link_next_header) { + case IB_LNH_IBA_LOCAL: + header->grh_present = 0; + break; + + case IB_LNH_IBA_GLOBAL: + header->grh_present = 1; + ib_unpack(grh_table, ARRAY_SIZE(grh_table), + buf, &header->grh); + buf += IB_GRH_BYTES; + + if (header->grh.ip_version != 6) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid GRH.ip_version %d\n", + header->grh.ip_version)); + return -EINVAL; + } + if (header->grh.next_header != 0x1b) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid GRH.next_header 0x%02x\n", + header->grh.next_header)); + return -EINVAL; + } + break; + + default: + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid LRH.link_next_header %d\n", + header->lrh.link_next_header)); + return -EINVAL; + } + + ib_unpack(bth_table, ARRAY_SIZE(bth_table), + buf, &header->bth); + buf += IB_BTH_BYTES; + + switch (header->bth.opcode) { + case IB_OPCODE_UD_SEND_ONLY: + header->immediate_present = 0; + break; + case IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE: + header->immediate_present = 1; + break; + default: + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid BTH.opcode 0x%02x\n", + header->bth.opcode)); + return -EINVAL; + } + + if (header->bth.transport_header_version != 0) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Invalid BTH.transport_header_version %d\n", + header->bth.transport_header_version)); + return -EINVAL; + } + + ib_unpack(deth_table, ARRAY_SIZE(deth_table), + buf, &header->deth); + buf += IB_DETH_BYTES; + + if (header->immediate_present) + memcpy(&header->immediate_data, buf, sizeof header->immediate_data); + + return 0; +} diff --git a/branches/Ndi/hw/mthca/kernel/mt_uverbs.c b/branches/Ndi/hw/mthca/kernel/mt_uverbs.c new file mode 100644 index 00000000..0e4e5674 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_uverbs.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_uverbs.tmh" +#endif + + +//TODO: all this module is to be eliminated !! + + +static void ib_uverbs_add_one(struct ib_device *device); +static void ib_uverbs_remove_one(struct ib_device *device); + +static struct ib_client uverbs_client = { + "uverbs", + ib_uverbs_add_one, + ib_uverbs_remove_one +}; + +struct ib_uverbs_device { + struct ib_device *ib_dev; +}; + +static void ib_uverbs_add_one(struct ib_device *device) +{ + struct ib_uverbs_device *uverbs_dev; + + if (!device->alloc_ucontext) + return; + + uverbs_dev = kzalloc(sizeof *uverbs_dev, GFP_KERNEL); + if (!uverbs_dev) + return; + + ib_set_client_data(device, &uverbs_client, uverbs_dev); +} + +static void ib_uverbs_remove_one(struct ib_device *device) +{ + struct ib_uverbs_device *uverbs_dev = ib_get_client_data(device, &uverbs_client); + + if (uverbs_dev) + kfree(uverbs_dev); +} + +int ib_uverbs_init(void) +{ + int ret; + + ret = ib_register_client(&uverbs_client); + if (ret) + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("user_verbs: couldn't register client\n")); + + return ret; +} + +void ib_uverbs_cleanup(void) +{ + ib_unregister_client(&uverbs_client); +} + diff --git a/branches/Ndi/hw/mthca/kernel/mt_verbs.c b/branches/Ndi/hw/mthca/kernel/mt_verbs.c new file mode 100644 index 00000000..29955e3c --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mt_verbs.c @@ -0,0 +1,935 @@ +/* + * Copyright (c) 2004 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2004 Infinicon Corporation. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2004 Topspin Corporation. All rights reserved. + * Copyright (c) 2004 Voltaire Corporation. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include "mthca_dev.h" +#include "mx_abi.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mt_verbs.tmh" +#endif + + +void ibv_um_close( struct ib_ucontext * h_um_ca ) +{ + int err; + ib_api_status_t status; + struct ib_ucontext *context_p = (struct ib_ucontext *)h_um_ca; + PREP_IBDEV_FOR_PRINT(context_p->device); + + HCA_ENTER(HCA_DBG_SHIM); + + context_p->is_removing = TRUE; + + if (atomic_read(&context_p->usecnt)) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("resources are not released (cnt %d)\n", context_p->usecnt)); + status = IB_RESOURCE_BUSY; + goto err_usage; + } + + err = ibv_dealloc_pd( context_p->pd ); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("ibv_dealloc_pd failed (%d)\n", err)); + status = errno_to_iberr(err); + } + + err = mthca_dealloc_ucontext(context_p); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("mthca_dealloc_ucontext failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_dealloc_ucontext; + } + + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_SHIM, + ("pcs %p\n", PsGetCurrentProcess()) ); + status = IB_SUCCESS; + goto end; + +err_dealloc_ucontext: +err_usage: +end: + if (status != IB_SUCCESS) + { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_SHIM, + ("completes with ERROR status %s\n", IB_GET_ERR_STR(status))); + } + HCA_EXIT(HCA_DBG_SHIM); + return; +} + +/* Protection domains */ + +struct ib_pd *ibv_alloc_pd(struct ib_device *device, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf) +{ + struct ib_pd *pd; + + // direct call is a must, because "lifefish" devices doesn't fill driver i/f table + pd = mthca_alloc_pd(device, context, p_umv_buf); + + if (!IS_ERR(pd)) { + pd->device = device; + pd->ucontext = context; + atomic_set(&pd->usecnt, 0); + KeInitializeMutex( &pd->mutex, 0 ); + INIT_LIST_HEAD( &pd->list ); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + } + + return pd; +} + +int ibv_dealloc_pd(struct ib_pd *pd) +{ + if (mthca_is_livefish(to_mdev(pd->device))) + goto done; + + // we need first to release list of AV MRs to decrease pd->usecnt + if (pd->ucontext) { + struct ib_mr *ib_mr, *tmp; + down(&pd->mutex ); + list_for_each_entry_safe(ib_mr, tmp, &pd->list, list,struct ib_mr,struct ib_mr) { + ibv_dereg_mr( ib_mr ); + } + up(&pd->mutex ); + } + + if (atomic_read(&pd->usecnt)) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ,("resources are not released (cnt %d)\n", pd->usecnt)); + return -EBUSY; + } + +done: + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + // direct call is a must, because "lifefish" devices doesn't fill driver i/f table + return mthca_dealloc_pd(pd); +} + +/* Address handles */ + +struct ib_ah *ibv_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf) +{ + int err; + struct ib_ah *ah; + struct ib_mr *ib_mr = NULL; + u64 start = 0; + u64 user_handle = 0; + struct ibv_create_ah_resp *create_ah_resp = 0; + + // for user call we need also allocate MR + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct ibv_create_ah *create_ah = (struct ibv_create_ah *)(void*)p_umv_buf->p_inout_buf; + + // create region; destroy will be done on dealloc_pd + ib_mr = ibv_reg_mr( + pd, + create_ah->mr.access_flags, + (void*)(ULONG_PTR)create_ah->mr.start, + create_ah->mr.length, create_ah->mr.hca_va, TRUE ); + if (IS_ERR(ib_mr)) { + err = PTR_ERR(ib_mr); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV ,("ibv_reg_mr failed (%d)\n", err)); + goto err_alloc_mr; + } + + start = create_ah->mr.start; + user_handle = create_ah->user_handle; + + // chain this MR to PD list + down(&pd->mutex ); + list_add_tail(&ib_mr->list, &pd->list); + up(&pd->mutex ); + } + + ah = pd->device->create_ah(pd, ah_attr); + + /* fill obligatory fields */ + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf; + create_ah_resp->user_handle = user_handle; + } + + if (IS_ERR(ah)) { + err = PTR_ERR(ah); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_AV ,("create_ah failed (%d)\n", err)); + goto err_create_ah; + } + + // fill results + ah->device = pd->device; + ah->pd = pd; + ah->ucontext = context; + atomic_inc(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_AV ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + + // fill results for user + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct ibv_create_ah_resp *create_ah_resp = (struct ibv_create_ah_resp *)(void*)p_umv_buf->p_inout_buf; + create_ah_resp->start = start; + create_ah_resp->mr.lkey = ib_mr->lkey; + create_ah_resp->mr.rkey = ib_mr->rkey; + create_ah_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr; + p_umv_buf->output_size = sizeof(struct ibv_create_ah_resp); + } + + return ah; + +err_create_ah: + if (ib_mr) + ibv_dereg_mr(ib_mr); +err_alloc_mr: + if( p_umv_buf && p_umv_buf->command ) + p_umv_buf->status = IB_ERROR; + return ERR_PTR(ib_mr); +} + +struct ib_ah *ibv_create_ah_from_wc(struct ib_pd *pd, struct _ib_wc *wc, + struct ib_grh *grh, u8 port_num) +{ + struct ib_ah_attr ah_attr; + u32 flow_class; + u16 gid_index; + int ret; + + memset(&ah_attr, 0, sizeof ah_attr); + ah_attr.dlid = wc->recv.ud.remote_lid; + ah_attr.sl = wc->recv.ud.remote_sl; + ah_attr.src_path_bits = wc->recv.ud.path_bits; + ah_attr.port_num = port_num; + + if (wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID) { + ah_attr.ah_flags = IB_AH_GRH; + ah_attr.grh.dgid = grh->dgid; + + ret = ib_find_cached_gid(pd->device, &grh->sgid, &port_num, + &gid_index); + if (ret) + return ERR_PTR(ret); + + ah_attr.grh.sgid_index = (u8) gid_index; + flow_class = cl_ntoh32(grh->version_tclass_flow); + ah_attr.grh.flow_label = flow_class & 0xFFFFF; + ah_attr.grh.traffic_class = (u8)((flow_class >> 20) & 0xFF); + ah_attr.grh.hop_limit = grh->hop_limit; + } + + return ibv_create_ah(pd, &ah_attr, NULL, NULL); +} + +int ibv_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) +{ + return ah->device->modify_ah ? + ah->device->modify_ah(ah, ah_attr) : + -ENOSYS; +} + +int ibv_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr) +{ + return ah->device->query_ah ? + ah->device->query_ah(ah, ah_attr) : + -ENOSYS; +} + + +static void release_user_cq_qp_resources( + struct ib_ucontext *ucontext, + struct ib_mr * ib_mr) +{ + if (ucontext) { + ibv_dereg_mr( ib_mr ); + atomic_dec(&ucontext->usecnt); + if (!atomic_read(&ucontext->usecnt) && ucontext->is_removing) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SHIM ,("User resources are released. Removing context\n")); + ibv_um_close(ucontext); + } + } +} + +int ibv_destroy_ah(struct ib_ah *ah) +{ + struct ib_pd *pd; + int ret; + + HCA_ENTER(HCA_DBG_AV); + pd = ah->pd; + + ret = ah->device->destroy_ah(ah); + if (!ret) { + atomic_dec(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_AV ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + } + HCA_EXIT(HCA_DBG_AV); + return ret; +} + +/* Shared receive queues */ + +struct ib_srq *ibv_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *srq_init_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf) +{ + int err; + struct ib_srq *ib_srq; + struct ib_mr *ib_mr = NULL; + u64 user_handle = 0; + struct ibv_create_srq_resp *create_srq_resp = 0; + + // for user call we need also allocate MR + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct ibv_create_srq *create_srp = (struct ibv_create_srq *)(void*)p_umv_buf->p_inout_buf; + + // create region + ib_mr = ibv_reg_mr( + (struct ib_pd *)(ULONG_PTR)create_srp->mr.pd_handle, + create_srp->mr.access_flags, + (void*)(ULONG_PTR)create_srp->mr.start, + create_srp->mr.length, create_srp->mr.hca_va, TRUE ); + if (IS_ERR(ib_mr)) { + err = PTR_ERR(ib_mr); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_reg_mr failed (%d)\n", err)); + goto err_alloc_mr; + } + create_srp->lkey = ib_mr->lkey; + user_handle = create_srp->user_handle; + } + + ib_srq = pd->device->create_srq(pd, srq_init_attr, p_umv_buf); + + /* fill obligatory fields */ + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + create_srq_resp = (struct ibv_create_srq_resp *)(void*)p_umv_buf->p_inout_buf; + create_srq_resp->user_handle = user_handle; + } + + if (IS_ERR(ib_srq)) { + err = PTR_ERR(ib_srq); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_srq failed (%d)\n", err)); + goto err_create_srq; + } + + // fill results + ib_srq->device = pd->device; + ib_srq->pd = pd; + ib_srq->ucontext = context; + ib_srq->event_handler = srq_init_attr->event_handler; + ib_srq->srq_context = srq_init_attr->srq_context; + atomic_inc(&pd->usecnt); + atomic_set(&ib_srq->usecnt, 0); + if (context) + atomic_inc(&context->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_SRQ , + ("uctx %p, qhndl %p, qnum %#x \n", + pd->ucontext, ib_srq, ((struct mthca_srq*)ib_srq)->srqn ) ); + + // fill results for user + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct mthca_srq *srq = (struct mthca_srq *)ib_srq; + ib_srq->ib_mr = ib_mr; + create_srq_resp->mr.lkey = ib_mr->lkey; + create_srq_resp->mr.rkey = ib_mr->rkey; + create_srq_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr; + create_srq_resp->srq_handle = (__u64)(ULONG_PTR)srq; + create_srq_resp->max_wr = (mthca_is_memfree(to_mdev(pd->device))) ? srq->max - 1 : srq->max; + create_srq_resp->max_sge = srq->max_gs; + create_srq_resp->srqn= srq->srqn; + p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); + } + + return ib_srq; + +err_create_srq: + if (ib_mr) + ibv_dereg_mr(ib_mr); +err_alloc_mr: + if( p_umv_buf && p_umv_buf->command ) + p_umv_buf->status = IB_ERROR; + HCA_EXIT(HCA_DBG_QP); + return ERR_PTR(err); +} + +int ibv_modify_srq(struct ib_srq *srq, + ib_srq_attr_t *srq_attr, + ib_srq_attr_mask_t srq_attr_mask) +{ + return srq->device->modify_srq(srq, srq_attr, srq_attr_mask); +} + +int ibv_query_srq(struct ib_srq *srq, + ib_srq_attr_t *srq_attr) +{ + return srq->device->query_srq(srq, srq_attr); +} + +int ibv_destroy_srq(struct ib_srq *srq) +{ + int ret; + struct ib_pd *pd = srq->pd; + struct ib_ucontext *ucontext = pd->ucontext; + struct ib_mr * ib_mr = srq->ib_mr; + + ret = srq->device->destroy_srq(srq); + if (!ret) { + atomic_dec(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_SRQ ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + release_user_cq_qp_resources(ucontext, ib_mr); + } + + return ret; +} + +/* Queue pairs */ + +struct ib_qp *ibv_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *qp_init_attr, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf) +{ + int err; + struct ib_qp *ib_qp; + struct ib_mr *ib_mr = NULL; + u64 user_handle = 0; + + HCA_ENTER(HCA_DBG_QP); + + // for user call we need also allocate MR + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct ibv_create_qp *create_qp = (struct ibv_create_qp *)(void*)p_umv_buf->p_inout_buf; + + // create region + ib_mr = ibv_reg_mr( + (struct ib_pd *)(ULONG_PTR)create_qp->mr.pd_handle, + create_qp->mr.access_flags, + (void*)(ULONG_PTR)create_qp->mr.start, + create_qp->mr.length, create_qp->mr.hca_va, TRUE ); + if (IS_ERR(ib_mr)) { + err = PTR_ERR(ib_mr); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("ibv_reg_mr failed (%d)\n", err)); + goto err_alloc_mr; + } + create_qp->lkey = ib_mr->lkey; + user_handle = create_qp->user_handle; + } + + ib_qp = pd->device->create_qp(pd, qp_init_attr, p_umv_buf); + + if (IS_ERR(ib_qp)) { + err = PTR_ERR(ib_qp); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP ,("create_qp failed (%d)\n", err)); + goto err_create_qp; + } + + // fill results + ib_qp->device = pd->device; + ib_qp->pd = pd; + ib_qp->send_cq = qp_init_attr->send_cq; + ib_qp->recv_cq = qp_init_attr->recv_cq; + ib_qp->srq = qp_init_attr->srq; + ib_qp->ucontext = context; + ib_qp->event_handler = qp_init_attr->event_handler; + ib_qp->qp_context = qp_init_attr->qp_context; + ib_qp->qp_type = qp_init_attr->qp_type; + atomic_inc(&pd->usecnt); + atomic_inc(&qp_init_attr->send_cq->usecnt); + atomic_inc(&qp_init_attr->recv_cq->usecnt); + if (qp_init_attr->srq) + atomic_inc(&qp_init_attr->srq->usecnt); + if (context) + atomic_inc(&context->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_QP , + ("uctx %p, qhndl %p, qnum %#x, q_num %#x, scq %#x:%#x, rcq %#x:%#x \n", + pd->ucontext, ib_qp, ((struct mthca_qp*)ib_qp)->qpn, ib_qp->qp_num, + ((struct mthca_cq*)ib_qp->send_cq)->cqn, ib_qp->send_cq->cqe, + ((struct mthca_cq*)ib_qp->recv_cq)->cqn, ib_qp->recv_cq->cqe ) ); + + // fill results for user + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct mthca_qp *qp = (struct mthca_qp *)ib_qp; + struct ibv_create_qp_resp *create_qp_resp = (struct ibv_create_qp_resp *)(void*)p_umv_buf->p_inout_buf; + ib_qp->ib_mr = ib_mr; + create_qp_resp->qpn = ib_qp->qp_num; + create_qp_resp->user_handle = user_handle; + create_qp_resp->mr.lkey = ib_mr->lkey; + create_qp_resp->mr.rkey = ib_mr->rkey; + create_qp_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr; + create_qp_resp->qp_handle = (__u64)(ULONG_PTR)qp; + create_qp_resp->max_send_wr = qp->sq.max; + create_qp_resp->max_recv_wr = qp->rq.max; + create_qp_resp->max_send_sge = qp->sq.max_gs; + create_qp_resp->max_recv_sge = qp->rq.max_gs; + create_qp_resp->max_inline_data = qp->max_inline_data; + p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp); + } + + return ib_qp; + +err_create_qp: + if (ib_mr) + ibv_dereg_mr(ib_mr); +err_alloc_mr: + if( p_umv_buf && p_umv_buf->command ) + p_umv_buf->status = IB_ERROR; + HCA_EXIT(HCA_DBG_QP); + return ERR_PTR(err); +} + +int ibv_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask) +{ + return qp->device->modify_qp(qp, qp_attr, qp_attr_mask); +} + +int ibv_query_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_qp_init_attr *qp_init_attr) +{ + return qp->device->query_qp ? + qp->device->query_qp(qp, qp_attr, qp_attr_mask, qp_init_attr) : + -ENOSYS; +} + +int ibv_destroy_qp(struct ib_qp *qp) +{ + struct ib_pd *pd; + struct ib_cq *scq, *rcq; + struct ib_srq *srq; + int ret; + struct ib_ucontext *ucontext; + struct ib_mr * ib_mr; + + pd = qp->pd; + scq = qp->send_cq; + rcq = qp->recv_cq; + srq = qp->srq; + ucontext = pd->ucontext; + ib_mr = qp->ib_mr; + + ret = qp->device->destroy_qp(qp); + if (!ret) { + atomic_dec(&pd->usecnt); + atomic_dec(&scq->usecnt); + atomic_dec(&rcq->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_QP ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + if (srq) + atomic_dec(&srq->usecnt); + release_user_cq_qp_resources(ucontext, ib_mr); + } + + return ret; +} + +/* Completion queues */ + +struct ib_cq *ibv_create_cq(struct ib_device *device, + ib_comp_handler comp_handler, + void (*event_handler)(struct ib_event *, void *), + void *cq_context, int cqe, + struct ib_ucontext *context, ci_umv_buf_t* const p_umv_buf) +{ + int err; + struct ib_cq *cq; + struct ib_mr *ib_mr = NULL; + u64 user_handle = 0; + + // for user call we need also allocate MR + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct ibv_create_cq *create_cq = (struct ibv_create_cq *)(void*)p_umv_buf->p_inout_buf; + + // create region + ib_mr = ibv_reg_mr( + (struct ib_pd *)(ULONG_PTR)create_cq->mr.pd_handle, + create_cq->mr.access_flags, + (void*)(ULONG_PTR)create_cq->mr.start, + create_cq->mr.length, create_cq->mr.hca_va, TRUE ); + if (IS_ERR(ib_mr)) { + err = PTR_ERR(ib_mr); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("ibv_reg_mr failed (%d)\n", err)); + goto err_alloc_mr; + } + user_handle = create_cq->user_handle; + create_cq->lkey = ib_mr->lkey; + cqe = create_cq->cqe; + } + + // create cq + cq = device->create_cq(device, cqe, context, p_umv_buf); + if (IS_ERR(cq)) { + err = PTR_ERR(cq); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_CQ ,("create_cq failed (%d)\n", err)); + goto err_create_cq; + } + + cq->device = device; + cq->ucontext = context; + cq->comp_handler = comp_handler; + cq->event_handler = event_handler; + cq->cq_context = cq_context; + atomic_set(&cq->usecnt, 0); + if (context) + atomic_inc(&context->usecnt); + + // fill results + if (context && p_umv_buf && p_umv_buf->p_inout_buf) { + struct ibv_create_cq_resp *create_cq_resp = (struct ibv_create_cq_resp *)(void*)p_umv_buf->p_inout_buf; + cq->ib_mr = ib_mr; + create_cq_resp->user_handle = user_handle; + create_cq_resp->mr.lkey = ib_mr->lkey; + create_cq_resp->mr.rkey = ib_mr->rkey; + create_cq_resp->mr.mr_handle = (u64)(ULONG_PTR)ib_mr; + create_cq_resp->cq_handle = (u64)(ULONG_PTR)cq; + create_cq_resp->cqe = cq->cqe; + p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp); + } + + return cq; + +err_create_cq: + if (ib_mr) + ibv_dereg_mr(ib_mr); +err_alloc_mr: + if( p_umv_buf && p_umv_buf->command ) + p_umv_buf->status = IB_ERROR; + return ERR_PTR(err); +} + +int ibv_destroy_cq(struct ib_cq *cq) +{ + int ret; + struct ib_ucontext *ucontext = cq->ucontext; + struct ib_mr * ib_mr = cq->ib_mr; + + if (atomic_read(&cq->usecnt)) + return -EBUSY; + + ret = cq->device->destroy_cq(cq); + + release_user_cq_qp_resources(ucontext, ib_mr); + + return ret; +} + +int ibv_resize_cq(struct ib_cq *cq, + int cqe) +{ + int ret; + + if (!cq->device->resize_cq) + return -ENOSYS; + + ret = cq->device->resize_cq(cq, &cqe); + if (!ret) + cq->cqe = cqe; + + return ret; +} + +/* Memory regions */ + +struct ib_mr *ibv_reg_mr(struct ib_pd *pd, + mthca_qp_access_t mr_access_flags, + void* __ptr64 vaddr, + uint64_t length, + uint64_t hca_va, + boolean_t um_call + ) +{ + struct ib_mr *ib_mr; + int err; + HCA_ENTER(HCA_DBG_MEMORY); + + ib_mr = pd->device->reg_virt_mr(pd, vaddr, length, hca_va, mr_access_flags, um_call); + if (IS_ERR(ib_mr)) { + err = PTR_ERR(ib_mr); + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_MEMORY ,("mthca_reg_user_mr failed (%d)\n", err)); + goto err_reg_user_mr; + } + + ib_mr->device = pd->device; + ib_mr->pd = pd; + atomic_inc(&pd->usecnt); + atomic_set(&ib_mr->usecnt, 0); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + HCA_EXIT(HCA_DBG_MEMORY); + return ib_mr; + +err_reg_user_mr: + HCA_EXIT(HCA_DBG_MEMORY); + return ERR_PTR(err); +} + +struct ib_mr *ibv_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t mr_access_flags) +{ + struct ib_mr *mr; + + // direct call is a must, because "lifefish" devices doesn't fill driver i/f table + mr = mthca_get_dma_mr(pd, mr_access_flags); + + if (!IS_ERR(mr)) { + mr->device = pd->device; + mr->pd = pd; + atomic_inc(&pd->usecnt); + atomic_set(&mr->usecnt, 0); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); + } + + return mr; +} + +struct ib_mr *ibv_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + mthca_qp_access_t mr_access_flags, + u64 *iova_start) +{ + struct ib_mr *mr; + + mr = pd->device->reg_phys_mr(pd, phys_buf_array, num_phys_buf, + mr_access_flags, iova_start); + + if (!IS_ERR(mr)) { + mr->device = pd->device; + mr->pd = pd; + atomic_inc(&pd->usecnt); + atomic_set(&mr->usecnt, 0); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); + } + + return mr; +} + +int ibv_rereg_phys_mr(struct ib_mr *mr, + int mr_rereg_mask, + struct ib_pd *pd, + struct ib_phys_buf *phys_buf_array, + int num_phys_buf, + mthca_qp_access_t mr_access_flags, + u64 *iova_start) +{ + struct ib_pd *old_pd; + int ret; + + if (!mr->device->rereg_phys_mr) + return -ENOSYS; + + if (atomic_read(&mr->usecnt)) + return -EBUSY; + + old_pd = mr->pd; + + ret = mr->device->rereg_phys_mr(mr, mr_rereg_mask, pd, + phys_buf_array, num_phys_buf, + mr_access_flags, iova_start); + + if (!ret && (mr_rereg_mask & IB_MR_REREG_PD)) { + atomic_dec(&old_pd->usecnt); + atomic_inc(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); + } + + return ret; +} + +int ibv_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr) +{ + return mr->device->query_mr ? + mr->device->query_mr(mr, mr_attr) : -ENOSYS; +} + +int ibv_dereg_mr(struct ib_mr *mr) +{ + int ret; + struct ib_pd *pd; + + if (atomic_read(&mr->usecnt)) + return -EBUSY; + + pd = mr->pd; + // direct call is a must, because "lifefish" devices doesn't fill driver i/f table + ret = mthca_dereg_mr(mr); + if (!ret) { + atomic_dec(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d, pd_handle %p, ctx %p \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt, pd, pd->ucontext)); + } + + return ret; +} + +/* Memory windows */ + +struct ib_mw *ibv_alloc_mw(struct ib_pd *pd) +{ + struct ib_mw *mw; + + if (!pd->device->alloc_mw) + return ERR_PTR(-ENOSYS); + + mw = pd->device->alloc_mw(pd); + if (!IS_ERR(mw)) { + mw->device = pd->device; + mw->pd = pd; + atomic_inc(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); + } + + return mw; +} + +int ibv_dealloc_mw(struct ib_mw *mw) +{ + struct ib_pd *pd; + int ret; + + pd = mw->pd; + ret = mw->device->dealloc_mw(mw); + if (!ret) { + atomic_dec(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); + } + + return ret; +} + +/* "Fast" memory regions */ + +struct ib_fmr *ibv_alloc_fmr(struct ib_pd *pd, + mthca_qp_access_t mr_access_flags, + struct ib_fmr_attr *fmr_attr) +{ + struct ib_fmr *fmr; + + if (!pd->device->alloc_fmr) + return ERR_PTR(-ENOSYS); + + fmr = pd->device->alloc_fmr(pd, mr_access_flags, fmr_attr); + if (!IS_ERR(fmr)) { + fmr->device = pd->device; + fmr->pd = pd; + atomic_inc(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); + } + + return fmr; +} + +int ibv_map_phys_fmr(struct ib_fmr *fmr, + u64 *page_list, int list_len, + u64 iova) +{ + return fmr->device->map_phys_fmr(fmr, page_list, list_len, iova); +} + +int ibv_unmap_fmr(struct list_head *fmr_list) +{ + struct ib_fmr *fmr; + + if (list_empty(fmr_list)) + return 0; + + fmr = list_entry(fmr_list->next, struct ib_fmr, list); + return fmr->device->unmap_fmr(fmr_list); +} + +int ibv_dealloc_fmr(struct ib_fmr *fmr) +{ + struct ib_pd *pd; + int ret; + + pd = fmr->pd; + ret = fmr->device->dealloc_fmr(fmr); + if (!ret) { + atomic_dec(&pd->usecnt); + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("PD%d use cnt %d \n", + ((struct mthca_pd*)pd)->pd_num, pd->usecnt)); + } + + return ret; +} + +/* Multicast groups */ + +int ibv_attach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) +{ + if (!qp->device->attach_mcast) + return -ENOSYS; + if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UNRELIABLE_DGRM) + return -EINVAL; + + return qp->device->attach_mcast(qp, gid, lid); +} + +int ibv_detach_mcast(struct ib_qp *qp, union ib_gid *gid, u16 lid) +{ + if (!qp->device->detach_mcast) + return -ENOSYS; + if (gid->raw[0] != 0xff || qp->qp_type != IB_QPT_UNRELIABLE_DGRM) + return -EINVAL; + + return qp->device->detach_mcast(qp, gid, lid); +} diff --git a/branches/Ndi/hw/mthca/kernel/mthca.h b/branches/Ndi/hw/mthca/kernel/mthca.h new file mode 100644 index 00000000..9570421a --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca.h @@ -0,0 +1,9 @@ +#ifndef MTHCA_H +#define MTHCA_H + +NTSTATUS mthca_init_one(hca_dev_ext_t *ext); +void mthca_remove_one(hca_dev_ext_t *ext); +int mthca_get_dev_info(struct mthca_dev *mdev, __be64 *node_guid, u32 *hw_id); + +#endif + diff --git a/branches/Ndi/hw/mthca/kernel/mthca.inf b/branches/Ndi/hw/mthca/kernel/mthca.inf new file mode 100644 index 00000000..f995a48a --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca.inf @@ -0,0 +1,204 @@ +; Mellanox Technologies InfiniBand HCAs. +; Copyright 2005 Mellanox Technologies all Rights Reserved. + +[Version] +Signature="$Windows NT$" +Class=InfiniBandHca +ClassGUID={58517E00-D3CF-40c9-A679-CEE5752F4491} +Provider=%OPENIB% +; must be synchronized with MTHCA_DEV.H +DriverVer=03/08/2006,1.0.0000.614 + +; ================= Destination directory section ===================== + +[DestinationDirs] +DefaultDestDir=%DIRID_DRIVERS% +ClassCopyFiles=%DIRID_SYSTEM% +MTHCA.UMCopyFiles=%DIRID_SYSTEM% +MTHCA.WOW64CopyFiles=%DIRID_SYSTEM_X86% + +; ================= Class Install section ===================== + +[ClassInstall32] +CopyFiles=ClassCopyFiles +AddReg=ClassAddReg + +[ClassCopyFiles] +IbInstaller.dll + +[ClassAddReg] +HKR,,,,"InfiniBand Host Channel Adapters" +HKR,,Icon,,-5 +HKR,,SilentInstall,,1 +HKLM,"System\CurrentControlSet\Control\CoDeviceInstallers", \ + %HcaClassGuid%,%REG_MULTI_SZ_APPEND%, "IbInstaller.dll,IbCoInstaller" + +; ================= Device Install section ===================== + +[SourceDisksNames.x86] +1=%DiskId%,,,"" + +[SourceDisksNames.amd64] +1=%DiskId%,,,"" + +[SourceDisksNames.ia64] +1=%DiskId%,,,"" + +[SourceDisksFiles] +IbInstaller.dll=1 +mthca.sys=1 +mthcau.dll=1 +mthcaud.dll=1 + +[SourceDisksFiles.amd64] +IbInstaller.dll=1 +mthca.sys=1 +mthcau.dll=1 +mthcaud.dll=1 +mthca32.dll=1 +mthca32d.dll=1 + +[SourceDisksFiles.ia64] +IbInstaller.dll=1 +mthca.sys=1 +mthcau.dll=1 +mthcaud.dll=1 +mthca32.dll=1 +mthca32d.dll=1 + +[Manufacturer] +%MTL% = HCA.DeviceSection,ntx86,ntamd64,ntia64 + +[HCA.DeviceSection] +; empty since we don't support W9x/Me + +[HCA.DeviceSection.ntx86] +%MT23108.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44 +%MT23109.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A45 +%MT25208.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278 +%MT25209.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6279 +%MT25218.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282 +%MT24204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C +%MT24205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8D +%MT25204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274 +%MT25205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6275 + +[HCA.DeviceSection.ntamd64] +%MT23108.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44 +%MT23109.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A45 +%MT25208.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278 +%MT25209.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6279 +%MT25218.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282 +%MT24204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C +%MT24205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8D +%MT25204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274 +%MT25205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6275 + +[HCA.DeviceSection.ntia64] +%MT23108.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A44 +%MT23109.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5A45 +%MT25208.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6278 +%MT25209.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6279 +%MT25218.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6282 +%MT24204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8C +%MT24205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_5E8D +%MT25204.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6274 +%MT25205.DeviceDesc%=MTHCA.DDInstall, PCI\VEN_15B3&DEV_6275 + +[MTHCA.DDInstall.ntx86] +CopyFiles = MTHCA.CopyFiles +CopyFiles = MTHCA.UMCopyFiles +CopyINF=ib_bus.inf + +[MTHCA.DDInstall.ntamd64] +CopyFiles = MTHCA.CopyFiles +CopyFiles = MTHCA.UMCopyFiles +CopyFiles = MTHCA.WOW64CopyFiles +CopyINF=ib_bus.inf + +[MTHCA.DDInstall.ntia64] +CopyFiles = MTHCA.CopyFiles +CopyFiles = MTHCA.UMCopyFiles +CopyFiles = MTHCA.WOW64CopyFiles +CopyINF=ib_bus.inf + +[MTHCA.DDInstall.ntx86.Services] +AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall,MTHCA.EventLog + +[MTHCA.DDInstall.ntamd64.Services] +AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall,MTHCA.EventLog + +[MTHCA.DDInstall.ntia64.Services] +AddService = mthca,%SPSVCINST_ASSOCSERVICE%,MTHCA.ServiceInstall,MTHCA.EventLog + +[MTHCA.CopyFiles] +mthca.sys + +[MTHCA.UMCopyFiles] +mthcau.dll,,,2 +mthcaud.dll,,,2 + +[MTHCA.WOW64CopyFiles] +mthcau.dll,mthca32.dll,,2 +mthcaud.dll,mthca32d.dll,,2 + +; +; ============= Service Install section ============== +; + +[MTHCA.ServiceInstall] +DisplayName = %MTHCA.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\mthca.sys +LoadOrderGroup = extended base +AddReg = MTHCA.ParamsReg + + +[MTHCA.EventLog] +AddReg = MTHCA.AddEventLogReg + +[MTHCA.AddEventLogReg] +HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\IoLogMsg.dll;%%SystemRoot%%\System32\drivers\mthca.sys" +HKR, , TypesSupported, 0x00010001, 7 + +[MTHCA.ParamsReg] +HKR,"Parameters","DebugLevel",%REG_DWORD%,0x00000003 +HKR,"Parameters","DebugFlags",%REG_DWORD%,0x0000ffff +HKR,"Parameters","SkipTavorReset",%REG_DWORD%,0 +HKR,"Parameters","DisableTavorResetOnFailure",%REG_DWORD%,1 +HKR,"Parameters","TunePci",%REG_DWORD%,0 +HKR,"Parameters","ProcessorAffinity",%REG_DWORD%,0 +HKR,"Parameters","MaxDpcTimeUs",%REG_DWORD%,10000 +HKR,"Parameters","ProfileQpNum",%REG_DWORD%,0 +HKR,"Parameters","ProfileRdOut",%REG_DWORD%,0xffffffff +HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\8bf1f640-63fe-4743-b9ef-fa38c695bfde","Flags",%REG_DWORD%,0xffff +HKLM,"System\CurrentControlSet\Control\WMI\GlobalLogger\8bf1f640-63fe-4743-b9ef-fa38c695bfde","Level",%REG_DWORD%,0x3 + +[Strings] +HcaClassGuid = "{58517E00-D3CF-40c9-A679-CEE5752F4491}" +OPENIB = "OpenIB Alliance" +MTL="Mellanox Technologies Ltd." +MTHCA.ServiceDesc = "Driver for Mellanox InfiniHost Devices" +MT23108.DeviceDesc="InfiniHost (MT23108) - Mellanox InfiniBand HCA" +MT23109.DeviceDesc="InfiniHost (MT23109) - Mellanox InfiniBand HCA (burner device)" +MT25208.DeviceDesc="InfiniHost (MT25208) - Mellanox InfiniBand HCA for PCI Express" +MT25209.DeviceDesc="InfiniHost (MT25209) - Mellanox InfiniBand HCA for PCI Express (burner device)" +MT25218.DeviceDesc="InfiniHost III Ex (MT25218) - Mellanox InfiniBand HCA for PCI Express" +MT24204.DeviceDesc="InfiniHost III Lx (MT24204) - Mellanox InfiniBand HCA for PCI Express" +MT24205.DeviceDesc="InfiniHost III Lx (MT24205) - Mellanox InfiniBand HCA for PCI Express (burner device)" +MT25204.DeviceDesc="InfiniHost III Lx (MT25204) - Mellanox InfiniBand HCA for PCI Express" +MT25205.DeviceDesc="InfiniHost III Lx (MT25205) - Mellanox InfiniBand HCA for PCI Express (burner device)" +DiskId = "Mellanox InfiniBand HCA installation disk" +SPSVCINST_NULL = 0x0 +SPSVCINST_ASSOCSERVICE = 0x00000002 +SERVICE_KERNEL_DRIVER = 1 +SERVICE_DEMAND_START = 3 +SERVICE_ERROR_NORMAL = 1 +REG_DWORD = 0x00010001 +REG_MULTI_SZ_APPEND = 0x00010008 +DIRID_SYSTEM = 11 +DIRID_DRIVERS = 12 +DIRID_SYSTEM_X86 = 16425 + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_allocator.c b/branches/Ndi/hw/mthca/kernel/mthca_allocator.c new file mode 100644 index 00000000..28dd974f --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_allocator.c @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_allocator.tmh" +#endif + +/* Trivial bitmap-based allocator */ +u32 mthca_alloc(struct mthca_alloc *alloc) +{ + u32 obj; + SPIN_LOCK_PREP(lh); + + spin_lock(&alloc->lock, &lh); + obj = find_next_zero_bit(alloc->table, alloc->max, alloc->last); + if (obj >= alloc->max) { + alloc->top = (alloc->top + alloc->max) & alloc->mask; + obj = find_first_zero_bit(alloc->table, alloc->max); + } + + if (obj < alloc->max) { + set_bit(obj, (long*)alloc->table); + obj |= alloc->top; + } else + obj = (u32)-1; + + spin_unlock(&lh); + + return obj; +} + +void mthca_free(struct mthca_alloc *alloc, u32 obj) +{ + SPIN_LOCK_PREP(lh); + + obj &= alloc->max - 1; + spin_lock(&alloc->lock, &lh); + clear_bit(obj, (long *)alloc->table); + alloc->last = MIN(alloc->last, obj); + alloc->top = (alloc->top + alloc->max) & alloc->mask; + spin_unlock(&lh); +} + +int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, + u32 reserved) +{ + int i; + HCA_ENTER(HCA_DBG_INIT); + /* num must be a power of 2 */ + if ((int)num != 1 << (ffs(num) - 1)) + return -EINVAL; + + alloc->last = 0; + alloc->top = 0; + alloc->max = num; + alloc->mask = mask; + spin_lock_init(&alloc->lock); + alloc->table = kmalloc(BITS_TO_LONGS(num) * sizeof (long), + GFP_KERNEL); + if (!alloc->table) + return -ENOMEM; + + bitmap_zero(alloc->table, num); + for (i = 0; i < (int)reserved; ++i) + set_bit(i, (long *)alloc->table); + + return 0; +} + +void mthca_alloc_cleanup(struct mthca_alloc *alloc) +{ + kfree(alloc->table); +} + +/* + * Array of pointers with lazy allocation of leaf pages. Callers of + * _get, _set and _clear methods must use a lock or otherwise + * serialize access to the array. + */ + +#define MTHCA_ARRAY_MASK (PAGE_SIZE / sizeof (void *) - 1) + +void *mthca_array_get(struct mthca_array *array, int index) +{ + int p = (index * sizeof (void *)) >> PAGE_SHIFT; + + if (array->page_list[p].page) + return array->page_list[p].page[index & MTHCA_ARRAY_MASK]; + else + return NULL; +} + +int mthca_array_set(struct mthca_array *array, int index, void *value) +{ + int p = (index * sizeof (void *)) >> PAGE_SHIFT; + + /* Allocate with GFP_ATOMIC because we'll be called with locks held. */ + if (!array->page_list[p].page) + array->page_list[p].page = (void **) get_zeroed_page(GFP_ATOMIC); + + if (!array->page_list[p].page) + return -ENOMEM; + + array->page_list[p].page[index & MTHCA_ARRAY_MASK] = value; + ++array->page_list[p].used; + + return 0; +} + +void mthca_array_clear(struct mthca_array *array, int index) +{ + int p = (index * sizeof (void *)) >> PAGE_SHIFT; + + if (array->page_list[p].used <= 0) { + HCA_PRINT(TRACE_LEVEL_INFORMATION, HCA_DBG_LOW,("Array %p index %d page %d with ref count %d < 0\n", + array, index, p, array->page_list[p].used)); + return; + } + + if (--array->page_list[p].used == 0) { + free_page((void*) array->page_list[p].page); + array->page_list[p].page = NULL; + } + else + array->page_list[p].page[index & MTHCA_ARRAY_MASK] = NULL; +} + +int mthca_array_init(struct mthca_array *array, int nent) +{ + int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; + int i; + + array->page_list = kmalloc(npage * sizeof *array->page_list, GFP_KERNEL); + if (!array->page_list) + return -ENOMEM; + + for (i = 0; i < npage; ++i) { + array->page_list[i].page = NULL; + array->page_list[i].used = 0; + } + + return 0; +} + +void mthca_array_cleanup(struct mthca_array *array, int nent) +{ + int i; + + for (i = 0; i < (int)((nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE); ++i) + free_page((void*) array->page_list[i].page); + + kfree(array->page_list); +} + +/* + * Handling for queue buffers -- we allocate a bunch of memory and + * register it in a memory region at HCA virtual address 0. If the + * requested size is > max_direct, we split the allocation into + * multiple pages, so we don't require too much contiguous memory. + */ + +int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, + int hca_write, struct mthca_mr *mr) +{ + int err = -ENOMEM; + int npages, shift; + u64 *dma_list = NULL; + dma_addr_t t; + int i; + + HCA_ENTER(HCA_DBG_MEMORY); + if (size <= max_direct) { + *is_direct = 1; + npages = 1; + shift = get_order(size) + PAGE_SHIFT; + + alloc_dma_zmem_map(dev, size, PCI_DMA_BIDIRECTIONAL, &buf->direct); + if (!buf->direct.page) + return -ENOMEM; + t = buf->direct.dma_address; /* shorten the code below */ + + while (t & ((1 << shift) - 1)) { + --shift; + npages *= 2; + } + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + goto err_free; + + for (i = 0; i < npages; ++i) + dma_list[i] = t + i * (1 << shift); + } else { + *is_direct = 0; + npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; + shift = PAGE_SHIFT; + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + return -ENOMEM; + + buf->page_list = kmalloc(npages * sizeof *buf->page_list, + GFP_KERNEL); + if (!buf->page_list) + goto err_out; + + for (i = 0; i < npages; ++i) + buf->page_list[i].page = NULL; + + for (i = 0; i < npages; ++i) { + alloc_dma_zmem_map(dev, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &buf->page_list[i]); + if (!buf->page_list[i].page) + goto err_free; + dma_list[i] = buf->page_list[i].dma_address; + } + } + + err = mthca_mr_alloc_phys(dev, pd->pd_num, + dma_list, shift, npages, + 0, size, + MTHCA_MPT_FLAG_LOCAL_READ | + (hca_write ? MTHCA_MPT_FLAG_LOCAL_WRITE : 0), + mr); + if (err) + goto err_free; + + kfree(dma_list); + + HCA_EXIT(HCA_DBG_MEMORY); + return 0; + +err_free: + mthca_buf_free(dev, size, buf, *is_direct, NULL); + +err_out: + kfree(dma_list); + + return err; +} + +void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, + int is_direct, struct mthca_mr *mr) +{ + int i; + + if (mr) + mthca_free_mr(dev, mr); + + if (is_direct) { + free_dma_mem_map(dev, &buf->direct, PCI_DMA_BIDIRECTIONAL); + } + else { + for (i = 0; i < (size + PAGE_SIZE - 1) / PAGE_SIZE; ++i) { + free_dma_mem_map(dev, &buf->page_list[i], PCI_DMA_BIDIRECTIONAL); + } + kfree(buf->page_list); + } +} diff --git a/branches/Ndi/hw/mthca/kernel/mthca_av.c b/branches/Ndi/hw/mthca/kernel/mthca_av.c new file mode 100644 index 00000000..ba029d05 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_av.c @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_av.tmh" +#endif + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, mthca_init_av_table) +#pragma alloc_text (PAGE, mthca_cleanup_av_table) +#endif + + +struct mthca_av { + __be32 port_pd; + u8 reserved1; + u8 g_slid; + __be16 dlid; + u8 reserved2; + u8 gid_index; + u8 msg_sr; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + __be32 dgid[4]; +}; + +int mthca_create_ah(struct mthca_dev *dev, + struct mthca_pd *pd, + struct ib_ah_attr *ah_attr, + struct mthca_ah *ah) +{ + u32 index = (u32)-1; + struct mthca_av *av = NULL; + + ah->type = MTHCA_AH_PCI_POOL; + + if (mthca_is_memfree(dev)) { + ah->av = kmalloc(sizeof *ah->av, GFP_ATOMIC); + if (!ah->av) + return -ENOMEM; + + ah->type = MTHCA_AH_KMALLOC; + av = ah->av; + } else if (!atomic_read(&pd->sqp_count) && + !(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { + index = mthca_alloc(&dev->av_table.alloc); + + /* fall back to allocate in host memory */ + if (index == -1) + goto on_hca_fail; + + av = kmalloc(sizeof *av, GFP_ATOMIC); + if (!av) + goto on_hca_fail; + + ah->type = MTHCA_AH_ON_HCA; + ah->avdma = dev->av_table.ddr_av_base + + index * MTHCA_AV_SIZE; + } + +on_hca_fail: + if (ah->type == MTHCA_AH_PCI_POOL) { + ah->av = pci_pool_alloc(dev->av_table.pool, + SLAB_ATOMIC, &ah->avdma); + if (!ah->av) + return -ENOMEM; + + av = ah->av; + } + + ah->key = pd->ntmr.ibmr.lkey; + + RtlZeroMemory(av, MTHCA_AV_SIZE); + + av->port_pd = cl_hton32(pd->pd_num | (ah_attr->port_num << 24)); + av->g_slid = ah_attr->src_path_bits; + av->dlid = cl_hton16(ah_attr->dlid); + av->msg_sr = (3 << 4) | /* 2K message */ + ah_attr->static_rate; + av->sl_tclass_flowlabel = cl_hton32(ah_attr->sl << 28); + if (ah_attr->ah_flags & IB_AH_GRH) { + av->g_slid |= 0x80; + av->gid_index = (u8)((ah_attr->port_num - 1) * dev->limits.gid_table_len + + ah_attr->grh.sgid_index); + av->hop_limit = ah_attr->grh.hop_limit; + av->sl_tclass_flowlabel |= + cl_hton32((ah_attr->grh.traffic_class << 20) | + ah_attr->grh.flow_label); + memcpy(av->dgid, ah_attr->grh.dgid.raw, 16); + } else { + /* Arbel workaround -- low byte of GID must be 2 */ + av->dgid[3] = cl_hton32(2); + } + + { // debug print + int j; + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Created UDAV at %p/%08lx:\n", + av, (unsigned long) ah->avdma)); + for (j = 0; j < 8; ++j) + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_AV ,(" [%2x] %08x\n", + j * 4, cl_ntoh32(((__be32 *) av)[j]))); + } + + if (ah->type == MTHCA_AH_ON_HCA) { + memcpy_toio((u8*)dev->av_table.av_map + index * MTHCA_AV_SIZE, + av, MTHCA_AV_SIZE); + ah->av = (struct mthca_av *)( (u8*)( dev->av_table.av_map) + index *MTHCA_AV_SIZE ); + kfree(av); + } + return 0; +} + +int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah) +{ + HCA_ENTER(HCA_DBG_AV); + + switch (ah->type) { + case MTHCA_AH_ON_HCA: + mthca_free(&dev->av_table.alloc, + (u32)( (ah->avdma - dev->av_table.ddr_av_base) /MTHCA_AV_SIZE)); + break; + + case MTHCA_AH_PCI_POOL: + pci_pool_free(dev->av_table.pool, ah->av, ah->avdma); + break; + + case MTHCA_AH_KMALLOC: + kfree(ah->av); + break; + } + + HCA_EXIT(HCA_DBG_AV); + return 0; +} + +int mthca_ah_grh_present(struct mthca_ah *ah) +{ + return !!(ah->av->g_slid & 0x80); +} + +int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, + struct ib_ud_header *header) +{ + if (ah->type == MTHCA_AH_ON_HCA) + return -ENOSYS; + + header->lrh.service_level = (u8)(cl_ntoh32(ah->av->sl_tclass_flowlabel) >> 28); + header->lrh.destination_lid = ah->av->dlid; + header->lrh.source_lid = cl_hton16(ah->av->g_slid & 0x7f); + header->grh_present = mthca_ah_grh_present(ah); + if (header->grh_present) { + header->grh.traffic_class = + (u8)((cl_ntoh32(ah->av->sl_tclass_flowlabel) >> 20) & 0xff); + header->grh.flow_label = + (u8)(ah->av->sl_tclass_flowlabel & cl_hton32(0xfffff)); + ib_get_cached_gid(&dev->ib_dev, + (u8) (cl_ntoh32(ah->av->port_pd) >> 24), + ah->av->gid_index % dev->limits.gid_table_len, + &header->grh.source_gid); + memcpy(header->grh.destination_gid.raw, + ah->av->dgid, 16); + } + + return 0; +} + +int mthca_init_av_table(struct mthca_dev *dev) +{ + int err; + + if (mthca_is_memfree(dev)) + return 0; + + err = mthca_alloc_init(&dev->av_table.alloc, + dev->av_table.num_ddr_avs, + dev->av_table.num_ddr_avs - 1, + 0); + if (err) + return err; + + dev->av_table.pool = pci_pool_create("mthca_av", dev, + MTHCA_AV_SIZE, + MTHCA_AV_SIZE, 0); + if (!dev->av_table.pool) + goto out_free_alloc; + + if (!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { + dev->av_table.av_map = ioremap(pci_resource_start(dev, HCA_BAR_TYPE_DDR) + + dev->av_table.ddr_av_base - + dev->ddr_start, + dev->av_table.num_ddr_avs * + MTHCA_AV_SIZE, + &dev->av_table.av_map_size); + if (!dev->av_table.av_map) + goto out_free_pool; + } else + dev->av_table.av_map = NULL; + + return 0; + + out_free_pool: + pci_pool_destroy(dev->av_table.pool); + + out_free_alloc: + mthca_alloc_cleanup(&dev->av_table.alloc); + return -ENOMEM; +} + +void mthca_cleanup_av_table(struct mthca_dev *dev) +{ + if (mthca_is_memfree(dev)) + return; + + if (dev->av_table.av_map) + iounmap(dev->av_table.av_map, dev->av_table.av_map_size); + pci_pool_destroy(dev->av_table.pool); + mthca_alloc_cleanup(&dev->av_table.alloc); +} + +//NB: temporary, for support of query_qp +void mthca_get_av_params( struct mthca_ah *ah_p, u8 *port_num, __be16 *dlid, u8 *sr, u8 *path_bits ) +{ + struct mthca_av *av_p = ah_p->av; + *port_num = (u8) (cl_ntoh32(av_p->port_pd) >> 24); + *dlid = av_p->dlid; + *sr = av_p->msg_sr & 0x0f; + *path_bits = av_p->g_slid & 0x7f; +} + +//NB: temporary, for support of modify_qp +void mthca_set_av_params( struct mthca_dev *dev, struct mthca_ah *ah_p, struct ib_ah_attr *ah_attr ) +{ + struct mthca_av *av = ah_p->av; + struct ib_ah *ib_ah_p = (struct ib_ah *)ah_p; + struct mthca_pd *pd = (struct mthca_pd *)ib_ah_p->pd; + + // taken from mthca_create_av + av->port_pd = cl_hton32(pd->pd_num | (ah_attr->port_num << 24)); + av->g_slid = ah_attr->src_path_bits; + av->dlid = cl_hton16(ah_attr->dlid); + av->msg_sr = (3 << 4) | /* 2K message */ + ah_attr->static_rate; + av->sl_tclass_flowlabel = cl_hton32(ah_attr->sl << 28); + if (ah_attr->ah_flags & IB_AH_GRH) { + av->g_slid |= 0x80; + av->gid_index = (u8)((ah_attr->port_num - 1) * dev->limits.gid_table_len + + ah_attr->grh.sgid_index); + av->hop_limit = ah_attr->grh.hop_limit; + av->sl_tclass_flowlabel |= + cl_hton32((ah_attr->grh.traffic_class << 20) | + ah_attr->grh.flow_label); + memcpy(av->dgid, ah_attr->grh.dgid.raw, 16); + } else { + /* Arbel workaround -- low byte of GID must be 2 */ + av->dgid[3] = cl_hton32(2); + } +} + + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_catas.c b/branches/Ndi/hw/mthca/kernel/mthca_catas.c new file mode 100644 index 00000000..0c91518f --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_catas.c @@ -0,0 +1,166 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_catas.tmh" +#endif + +enum { + MTHCA_CATAS_POLL_INTERVAL = 5 * HZ, + + MTHCA_CATAS_TYPE_INTERNAL = 0, + MTHCA_CATAS_TYPE_UPLINK = 3, + MTHCA_CATAS_TYPE_DDR = 4, + MTHCA_CATAS_TYPE_PARITY = 5, +}; + +static spinlock_t catas_lock; + +static void handle_catas(struct mthca_dev *dev) +{ + struct ib_event event; + const char *type; + int i; + + event.device = &dev->ib_dev; + event.event = IB_EVENT_DEVICE_FATAL; + event.element.port_num = 0; + + ib_dispatch_event(&event); + + switch (_byteswap_ulong(readl(dev->catas_err.map)) >> 24) { + case MTHCA_CATAS_TYPE_INTERNAL: + type = "internal error"; + break; + case MTHCA_CATAS_TYPE_UPLINK: + type = "uplink bus error"; + break; + case MTHCA_CATAS_TYPE_DDR: + type = "DDR data error"; + break; + case MTHCA_CATAS_TYPE_PARITY: + type = "internal parity error"; + break; + default: + type = "unknown error"; + break; + } + + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Catastrophic error detected: %s\n", type)); + for (i = 0; i < (int)dev->catas_err.size; ++i) + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,(" buf[%02x]: %08x\n", + i, _byteswap_ulong(readl(dev->catas_err.map + i)))); +} + +static void poll_catas(struct mthca_dev *dev) +{ + int i; + SPIN_LOCK_PREP(lh); + + for (i = 0; i < (int)dev->catas_err.size; ++i) + if (readl(dev->catas_err.map + i)) { + handle_catas(dev); + return; + } + + spin_lock_dpc(&catas_lock, &lh); + if (!dev->catas_err.stop) { + KeSetTimerEx( &dev->catas_err.timer, dev->catas_err.interval, + 0, &dev->catas_err.timer_dpc ); + } + spin_unlock_dpc(&lh); + + return; +} + +static void timer_dpc( + IN struct _KDPC *Dpc, + IN PVOID DeferredContext, + IN PVOID SystemArgument1, + IN PVOID SystemArgument2 + ) +{ + struct mthca_dev *dev = (struct mthca_dev *)DeferredContext; + UNREFERENCED_PARAMETER(Dpc); + UNREFERENCED_PARAMETER(SystemArgument1); + UNREFERENCED_PARAMETER(SystemArgument2); + poll_catas( dev ); +} + + +void mthca_start_catas_poll(struct mthca_dev *dev) +{ + u64 addr; + + dev->catas_err.stop = 0; + dev->catas_err.map = NULL; + + addr = pci_resource_start(dev, HCA_BAR_TYPE_HCR) + + ((pci_resource_len(dev, HCA_BAR_TYPE_HCR) - 1) & + dev->catas_err.addr); + + dev->catas_err.map = ioremap(addr, dev->catas_err.size * 4, &dev->catas_err.map_size ); + if (!dev->catas_err.map) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("couldn't map catastrophic error region " + "at 0x%I64x/0x%x\n", addr, dev->catas_err.size * 4)); + return; + } + + spin_lock_init( &catas_lock ); + KeInitializeDpc( &dev->catas_err.timer_dpc, timer_dpc, dev ); + KeInitializeTimer( &dev->catas_err.timer ); + dev->catas_err.interval.QuadPart = (-10)* (__int64)MTHCA_CATAS_POLL_INTERVAL; + KeSetTimerEx( &dev->catas_err.timer, dev->catas_err.interval, + 0, &dev->catas_err.timer_dpc ); +} + +void mthca_stop_catas_poll(struct mthca_dev *dev) +{ + SPIN_LOCK_PREP(lh); + + spin_lock_irq(&catas_lock, &lh); + dev->catas_err.stop = 1; + spin_unlock_irq(&lh); + + KeCancelTimer(&dev->catas_err.timer); + KeFlushQueuedDpcs(); + + if (dev->catas_err.map) { + iounmap(dev->catas_err.map, dev->catas_err.map_size); + } +} diff --git a/branches/Ndi/hw/mthca/kernel/mthca_cmd.c b/branches/Ndi/hw/mthca/kernel/mthca_cmd.c new file mode 100644 index 00000000..2ea169d4 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_cmd.c @@ -0,0 +1,1830 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_cmd.tmh" +#endif +#include "mthca_config_reg.h" +#include "mthca_cmd.h" +#include "mthca_memfree.h" + +#define CMD_POLL_TOKEN 0xffff + +enum { + HCR_IN_PARAM_OFFSET = 0x00, + HCR_IN_MODIFIER_OFFSET = 0x08, + HCR_OUT_PARAM_OFFSET = 0x0c, + HCR_TOKEN_OFFSET = 0x14, + HCR_STATUS_OFFSET = 0x18, + + HCR_OPMOD_SHIFT = 12, + HCA_E_BIT = 22, + HCR_GO_BIT = 23 +}; + +enum { + /* initialization and general commands */ + CMD_SYS_EN = 0x1, + CMD_SYS_DIS = 0x2, + CMD_MAP_FA = 0xfff, + CMD_UNMAP_FA = 0xffe, + CMD_RUN_FW = 0xff6, + CMD_MOD_STAT_CFG = 0x34, + CMD_QUERY_DEV_LIM = 0x3, + CMD_QUERY_FW = 0x4, + CMD_ENABLE_LAM = 0xff8, + CMD_DISABLE_LAM = 0xff7, + CMD_QUERY_DDR = 0x5, + CMD_QUERY_ADAPTER = 0x6, + CMD_INIT_HCA = 0x7, + CMD_CLOSE_HCA = 0x8, + CMD_INIT_IB = 0x9, + CMD_CLOSE_IB = 0xa, + CMD_QUERY_HCA = 0xb, + CMD_SET_IB = 0xc, + CMD_ACCESS_DDR = 0x2e, + CMD_MAP_ICM = 0xffa, + CMD_UNMAP_ICM = 0xff9, + CMD_MAP_ICM_AUX = 0xffc, + CMD_UNMAP_ICM_AUX = 0xffb, + CMD_SET_ICM_SIZE = 0xffd, + + /* TPT commands */ + CMD_SW2HW_MPT = 0xd, + CMD_QUERY_MPT = 0xe, + CMD_HW2SW_MPT = 0xf, + CMD_READ_MTT = 0x10, + CMD_WRITE_MTT = 0x11, + CMD_SYNC_TPT = 0x2f, + + /* EQ commands */ + CMD_MAP_EQ = 0x12, + CMD_SW2HW_EQ = 0x13, + CMD_HW2SW_EQ = 0x14, + CMD_QUERY_EQ = 0x15, + + /* CQ commands */ + CMD_SW2HW_CQ = 0x16, + CMD_HW2SW_CQ = 0x17, + CMD_QUERY_CQ = 0x18, + CMD_RESIZE_CQ = 0x2c, + + /* SRQ commands */ + CMD_SW2HW_SRQ = 0x35, + CMD_HW2SW_SRQ = 0x36, + CMD_QUERY_SRQ = 0x37, + CMD_ARM_SRQ = 0x40, + + /* QP/EE commands */ + CMD_RST2INIT_QPEE = 0x19, + CMD_INIT2RTR_QPEE = 0x1a, + CMD_RTR2RTS_QPEE = 0x1b, + CMD_RTS2RTS_QPEE = 0x1c, + CMD_SQERR2RTS_QPEE = 0x1d, + CMD_2ERR_QPEE = 0x1e, + CMD_RTS2SQD_QPEE = 0x1f, + CMD_SQD2SQD_QPEE = 0x38, + CMD_SQD2RTS_QPEE = 0x20, + CMD_ERR2RST_QPEE = 0x21, + CMD_QUERY_QPEE = 0x22, + CMD_INIT2INIT_QPEE = 0x2d, + CMD_SUSPEND_QPEE = 0x32, + CMD_UNSUSPEND_QPEE = 0x33, + /* special QPs and management commands */ + CMD_CONF_SPECIAL_QP = 0x23, + CMD_MAD_IFC = 0x24, + + /* multicast commands */ + CMD_READ_MGM = 0x25, + CMD_WRITE_MGM = 0x26, + CMD_MGID_HASH = 0x27, + + /* miscellaneous commands */ + CMD_DIAG_RPRT = 0x30, + CMD_NOP = 0x31, + + /* debug commands */ + CMD_QUERY_DEBUG_MSG = 0x2a, + CMD_SET_DEBUG_MSG = 0x2b, +}; + +/* + * According to Mellanox code, FW may be starved and never complete + * commands. So we can't use strict timeouts described in PRM -- we + * just arbitrarily select 60 seconds for now. + */ +#define CMD_POLL_N_TRIES 60 + +enum { + CMD_TIME_CLASS_A = 60 * HZ, + CMD_TIME_CLASS_B = 60 * HZ, + CMD_TIME_CLASS_C = 60 * HZ +}; + +enum { + GO_BIT_TIMEOUT = 10 * HZ +}; + +#define GO_BIT_N_TRIES 5 +#define GO_BIT_STALL_TIMEOUT ((GO_BIT_TIMEOUT/HZ)/GO_BIT_N_TRIES) /* usecs */ + +struct mthca_cmd_context { + KEVENT event; + int result; + int next; + u64 out_param; + u16 token; + u8 status; +}; + +static inline int go_bit(struct mthca_dev *dev) +{ + return readl(dev->hcr + HCR_STATUS_OFFSET) & + _byteswap_ulong(1 << HCR_GO_BIT); +} + +/* +* Function: performs busy-wait loop, while polling GO bit +* Return: 0 when GO bit was extinguished in time +*/ +static int poll_go_bit(struct mthca_dev *dev) +{ + int i=0; /* init must be here !*/ + + if (!go_bit(dev)) + return 0; + + for (; i= N_POLL_TRIES) { + if ( (__int64)interval.QuadPart > (__int64)MAX_POLL_INTERVAL) + interval.QuadPart += POLL_INTERVAL_DELTA; + i = 0; + } +#endif + } + + if (!go_bit(dev)) return 0; + return 1; +} + + +static int mthca_cmd_post(struct mthca_dev *dev, + u64 in_param, + u64 out_param, + u32 in_modifier, + u8 op_modifier, + u16 op, + u16 token, + int event) +{ + int err = 0; + + down(&dev->cmd.hcr_mutex); + + if (event && wait_go_bit(dev,GO_BIT_TIMEOUT)) { + err = -EAGAIN; + goto out; + } + + /* + * We use writel (instead of something like memcpy_toio) + * because writes of less than 32 bits to the HCR don't work + * (and some architectures such as ia64 implement memcpy_toio + * in terms of writeb). + */ + __raw_writel((u32) cl_hton32((u32)(in_param >> 32)), (u8 *)dev->hcr + 0 * 4); + __raw_writel((u32) cl_hton32((u32)(in_param & 0xfffffffful)), (u8 *) dev->hcr + 1 * 4); + __raw_writel((u32) cl_hton32(in_modifier), (u8 *)dev->hcr + 2 * 4); + __raw_writel((u32) cl_hton32((u32)(out_param >> 32)), (u8 *)dev->hcr + 3 * 4); + __raw_writel((u32) cl_hton32((u32)(out_param & 0xfffffffful)), (u8 *)dev->hcr + 4 * 4); + __raw_writel((u32) cl_hton32(token << 16), (u8 *)dev->hcr + 5 * 4); + + /* __raw_writel may not order writes. */ + wmb(); + + __raw_writel((u32) cl_hton32((1 << HCR_GO_BIT) | + (event ? (1 << HCA_E_BIT) : 0) | + (op_modifier << HCR_OPMOD_SHIFT) | + op), (u8 *)dev->hcr + 6 * 4); + +out: + up(&dev->cmd.hcr_mutex); + return err; +} + + +static int mthca_cmd_poll(struct mthca_dev *dev, + u64 in_param, + u64 *out_param, + int out_is_imm, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + int err = 0; + + sem_down(&dev->cmd.poll_sem); + + err = mthca_cmd_post(dev, in_param, + out_param ? *out_param : 0, + in_modifier, op_modifier, + op, CMD_POLL_TOKEN, 0); + if (err) + goto out; + + if (wait_go_bit(dev,timeout)) { + err = -EBUSY; + goto out; + } + + if (out_is_imm) + *out_param = + (u64) cl_ntoh32((__be32) + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET)) << 32 | + (u64) cl_ntoh32((__be32) + __raw_readl(dev->hcr + HCR_OUT_PARAM_OFFSET + 4)); + + *status = (u8)(cl_ntoh32((__be32) __raw_readl(dev->hcr + HCR_STATUS_OFFSET)) >> 24); + if (*status) + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n", + op, *status)); + +out: + sem_up(&dev->cmd.poll_sem); + return err; +} + +void mthca_cmd_event(struct mthca_dev *dev, + u16 token, + u8 status, + u64 out_param) +{ + struct mthca_cmd_context *context = + &dev->cmd.context[token & dev->cmd.token_mask]; + + /* previously timed out command completing at long last */ + if (token != context->token) + return; + + context->result = 0; + context->status = status; + context->out_param = out_param; + + context->token += dev->cmd.token_mask + 1; + + ASSERT(KeGetCurrentIrql() <= DISPATCH_LEVEL); + KeSetEvent( &context->event, 0, FALSE ); +} + +static int mthca_cmd_wait(struct mthca_dev *dev, + u64 in_param, + u64 *out_param, + int out_is_imm, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + int err = 0; + struct mthca_cmd_context *context; + SPIN_LOCK_PREP(lh); + + sem_down(&dev->cmd.event_sem); + + spin_lock( &dev->cmd.context_lock, &lh ); + BUG_ON(dev->cmd.free_head < 0); + context = &dev->cmd.context[dev->cmd.free_head]; + dev->cmd.free_head = context->next; + spin_unlock( &lh ); + + KeClearEvent( &context->event ); + err = mthca_cmd_post(dev, in_param, + out_param ? *out_param : 0, + in_modifier, op_modifier, + op, context->token, 1); + if (err) { + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_LOW, + ("mthca_cmd_wait: Command %02x completed with err %02x\n", op, err)); + goto out; + } + + { + NTSTATUS res; + LARGE_INTEGER interval; + interval.QuadPart = (-10)* (__int64)timeout; + res = KeWaitForSingleObject( &context->event, Executive, KernelMode, FALSE, &interval ); + if (res != STATUS_SUCCESS) { + err = -EBUSY; + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_LOW, + ("mthca_cmd_wait: Command %02x completed with err %02x\n", op, err)); + goto out; + } + } + + *status = context->status; + if (*status) + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_LOW,("mthca_cmd_wait: Command %02x completed with status %02x\n", + op, *status)); + + if (out_is_imm) + *out_param = context->out_param; + +out: + spin_lock(&dev->cmd.context_lock, &lh); + context->next = dev->cmd.free_head; + dev->cmd.free_head = (int)(context - dev->cmd.context); + spin_unlock(&lh); + + sem_up( &dev->cmd.event_sem ); + + return err; +} + +/* Invoke a command with an output mailbox */ +static int mthca_cmd_box(struct mthca_dev *dev, + u64 in_param, + u64 out_param, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + if (dev->cmd.use_events) + return mthca_cmd_wait(dev, in_param, &out_param, 0, + in_modifier, op_modifier, op, + timeout, status); + else + return mthca_cmd_poll(dev, in_param, &out_param, 0, + in_modifier, op_modifier, op, + timeout, status); +} + +/* Invoke a command with no output parameter */ +static int mthca_cmd(struct mthca_dev *dev, + u64 in_param, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + return mthca_cmd_box(dev, in_param, 0, in_modifier, + op_modifier, op, timeout, status); +} + +/* + * Invoke a command with an immediate output parameter (and copy the + * output into the caller's out_param pointer after the command + * executes). + */ +static int mthca_cmd_imm(struct mthca_dev *dev, + u64 in_param, + u64 *out_param, + u32 in_modifier, + u8 op_modifier, + u16 op, + unsigned long timeout, + u8 *status) +{ + if (dev->cmd.use_events) + return mthca_cmd_wait(dev, in_param, out_param, 1, + in_modifier, op_modifier, op, + timeout, status); + else + return mthca_cmd_poll(dev, in_param, out_param, 1, + in_modifier, op_modifier, op, + timeout, status); +} + +int mthca_cmd_init(struct mthca_dev *dev) +{ + KeInitializeMutex(&dev->cmd.hcr_mutex, 0); + sem_init(&dev->cmd.poll_sem, 1, 1); + dev->cmd.use_events = 0; + + dev->hcr = ioremap(pci_resource_start(dev, HCA_BAR_TYPE_HCR) + MTHCA_HCR_BASE, + MTHCA_HCR_SIZE, &dev->hcr_size); + if (!dev->hcr) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Couldn't map command register.")); + return -ENOMEM; + } + + dev->cmd.pool = pci_pool_create("mthca_cmd", dev, + MTHCA_MAILBOX_SIZE, + MTHCA_MAILBOX_SIZE, 0); + if (!dev->cmd.pool) { + iounmap(dev->hcr, dev->hcr_size); + return -ENOMEM; + } + + return 0; +} + +void mthca_cmd_cleanup(struct mthca_dev *dev) +{ + pci_pool_destroy(dev->cmd.pool); + iounmap(dev->hcr, dev->hcr_size); +} + +/* + * Switch to using events to issue FW commands (should be called after + * event queue to command events has been initialized). + */ +int mthca_cmd_use_events(struct mthca_dev *dev) +{ + int i; + + dev->cmd.context = kmalloc(dev->cmd.max_cmds * + sizeof (struct mthca_cmd_context), + GFP_KERNEL); + if (!dev->cmd.context) + return -ENOMEM; + + for (i = 0; i < dev->cmd.max_cmds; ++i) { + dev->cmd.context[i].token = (u16)i; + dev->cmd.context[i].next = i + 1; + KeInitializeEvent( &dev->cmd.context[i].event, NotificationEvent , FALSE ); + } + + dev->cmd.context[dev->cmd.max_cmds - 1].next = -1; + dev->cmd.free_head = 0; + + sem_init(&dev->cmd.event_sem, dev->cmd.max_cmds, LONG_MAX); + spin_lock_init(&dev->cmd.context_lock); + + for (dev->cmd.token_mask = 1; + dev->cmd.token_mask < dev->cmd.max_cmds; + dev->cmd.token_mask <<= 1) + ; /* nothing */ + --dev->cmd.token_mask; + + dev->cmd.use_events = 1; + sem_down(&dev->cmd.poll_sem); + + return 0; +} + +/* + * Switch back to polling (used when shutting down the device) + */ +void mthca_cmd_use_polling(struct mthca_dev *dev) +{ + int i; + + dev->cmd.use_events = 0; + + for (i = 0; i < dev->cmd.max_cmds; ++i) + sem_down(&dev->cmd.event_sem); + + kfree(dev->cmd.context); + + sem_up(&dev->cmd.poll_sem); +} + +struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, + unsigned int gfp_mask) +{ + struct mthca_mailbox *mailbox; + + mailbox = kmalloc(sizeof *mailbox, gfp_mask); + if (!mailbox) + return ERR_PTR(-ENOMEM); + + mailbox->buf = pci_pool_alloc(dev->cmd.pool, gfp_mask, &mailbox->dma); + if (!mailbox->buf) { + kfree(mailbox); + return ERR_PTR(-ENOMEM); + } + + return mailbox; +} + +void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox) +{ + if (!mailbox) + return; + + pci_pool_free(dev->cmd.pool, mailbox->buf, mailbox->dma); + kfree(mailbox); +} + +int mthca_SYS_EN(struct mthca_dev *dev, u8 *status) +{ + u64 out; + int ret; + + ret = mthca_cmd_imm(dev, 0, &out, 0, 0, CMD_SYS_EN, HZ, status); + + if (*status == MTHCA_CMD_STAT_DDR_MEM_ERR) + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SYS_EN DDR error: syn=%x, sock=%d, " + "sladdr=%d, SPD source=%s\n", + (int) (out >> 6) & 0xf, (int) (out >> 4) & 3, + (int) (out >> 1) & 7, (int) out & 1 ? "NVMEM" : "DIMM")); + + return ret; +} + +int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, HZ, status); +} + +static int mthca_map_cmd(struct mthca_dev *dev, u16 op, struct mthca_icm *icm, + u64 virt, u8 *status) +{ + struct mthca_mailbox *mailbox; + struct mthca_icm_iter iter; + __be64 *pages; + int lg; + int nent = 0; + unsigned long i; + int err = 0; + int ts = 0, tc = 0; + CPU_2_BE64_PREP; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + RtlZeroMemory(mailbox->buf, MTHCA_MAILBOX_SIZE); + pages = mailbox->buf; + + for (mthca_icm_first(icm, &iter); + !mthca_icm_last(&iter); + mthca_icm_next(&iter)) { + /* + * We have to pass pages that are aligned to their + * size, so find the least significant 1 in the + * address or size and use that as our log2 size. + */ + i = (u32)mthca_icm_addr(&iter) | mthca_icm_size(&iter); + lg = ffs(i) - 1; + if (lg < 12) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Got FW area not aligned to 4K (%I64x/%lx).\n", + (u64) mthca_icm_addr(&iter), + mthca_icm_size(&iter))); + err = -EINVAL; + goto out; + } + for (i = 0; i < mthca_icm_size(&iter) >> lg; ++i) { + if (virt != -1) { + pages[nent * 2] = cl_hton64(virt); + virt += 1Ui64 << lg; + } + pages[nent * 2 + 1] = CPU_2_BE64((mthca_icm_addr(&iter) + + (i << lg)) | (lg - 12)); + ts += 1 << (lg - 10); + ++tc; + + if (++nent == MTHCA_MAILBOX_SIZE / 16) { + err = mthca_cmd(dev, mailbox->dma, nent, 0, op, + CMD_TIME_CLASS_B, status); + if (err || *status) + goto out; + nent = 0; + } + } + } + + if (nent) + err = mthca_cmd(dev, mailbox->dma, nent, 0, op, + CMD_TIME_CLASS_B, status); + + switch (op) { + case CMD_MAP_FA: + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Mapped %d chunks/%d KB for FW.\n", tc, ts)); + break; + case CMD_MAP_ICM_AUX: + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Mapped %d chunks/%d KB for ICM aux.\n", tc, ts)); + break; + case CMD_MAP_ICM: + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped %d chunks/%d KB at %I64x for ICM.\n", + tc, ts, (u64) virt - (ts << 10))); + break; + } + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) +{ + return mthca_map_cmd(dev, CMD_MAP_FA, icm, (u64)-1, status); +} + +int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_FA, CMD_TIME_CLASS_B, status); +} + +int mthca_RUN_FW(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_RUN_FW, CMD_TIME_CLASS_A, status); +} + +int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *outbox; + int err = 0; + u8 lg; + +#define QUERY_FW_OUT_SIZE 0x100 +#define QUERY_FW_VER_OFFSET 0x00 +#define QUERY_FW_MAX_CMD_OFFSET 0x0f +#define QUERY_FW_ERR_START_OFFSET 0x30 +#define QUERY_FW_ERR_SIZE_OFFSET 0x38 + +#define QUERY_FW_START_OFFSET 0x20 +#define QUERY_FW_END_OFFSET 0x28 + +#define QUERY_FW_SIZE_OFFSET 0x00 +#define QUERY_FW_CLR_INT_BASE_OFFSET 0x20 +#define QUERY_FW_EQ_ARM_BASE_OFFSET 0x40 +#define QUERY_FW_EQ_SET_CI_BASE_OFFSET 0x48 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_FW, + CMD_TIME_CLASS_A, status); + + if (err) + goto out; + + MTHCA_GET(dev->fw_ver, outbox, QUERY_FW_VER_OFFSET); + /* + * FW subSIZE_Tor version is at more signifant bits than minor + * version, so swap here. + */ + dev->fw_ver = (dev->fw_ver & 0xffff00000000Ui64) | + ((dev->fw_ver & 0xffff0000Ui64) >> 16) | + ((dev->fw_ver & 0x0000ffffUi64) << 16); + + MTHCA_GET(lg, outbox, QUERY_FW_MAX_CMD_OFFSET); + dev->cmd.max_cmds = 1 << lg; + MTHCA_GET(dev->catas_err.addr, outbox, QUERY_FW_ERR_START_OFFSET); + MTHCA_GET(dev->catas_err.size, outbox, QUERY_FW_ERR_SIZE_OFFSET); + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW version %012I64x, max commands %d\n", + (u64) dev->fw_ver, dev->cmd.max_cmds)); + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Catastrophic error buffer at 0x%I64x, size 0x%x\n", + (u64) dev->catas_err.addr, dev->catas_err.size)); + + + if (mthca_is_memfree(dev)) { + MTHCA_GET(dev->fw.arbel.fw_pages, outbox, QUERY_FW_SIZE_OFFSET); + MTHCA_GET(dev->fw.arbel.clr_int_base, outbox, QUERY_FW_CLR_INT_BASE_OFFSET); + MTHCA_GET(dev->fw.arbel.eq_arm_base, outbox, QUERY_FW_EQ_ARM_BASE_OFFSET); + MTHCA_GET(dev->fw.arbel.eq_set_ci_base, outbox, QUERY_FW_EQ_SET_CI_BASE_OFFSET); + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("FW size %d KB\n", dev->fw.arbel.fw_pages << 2)); + + /* + * Arbel page size is always 4 KB; round up number of + * system pages needed. + */ + dev->fw.arbel.fw_pages = + ALIGN(dev->fw.arbel.fw_pages, PAGE_SIZE >> 12) >> + (PAGE_SHIFT - 12); + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Clear int @ %I64x, EQ arm @ %I64x, EQ set CI @ %I64x\n", + (u64) dev->fw.arbel.clr_int_base, + (u64) dev->fw.arbel.eq_arm_base, + (u64) dev->fw.arbel.eq_set_ci_base)); + } else { + MTHCA_GET(dev->fw.tavor.fw_start, outbox, QUERY_FW_START_OFFSET); + MTHCA_GET(dev->fw.tavor.fw_end, outbox, QUERY_FW_END_OFFSET); + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("FW size %d KB (start %I64x, end %I64x)\n", + (int) ((dev->fw.tavor.fw_end - dev->fw.tavor.fw_start) >> 10), + (u64) dev->fw.tavor.fw_start, + (u64) dev->fw.tavor.fw_end)); + } + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status) +{ + struct mthca_mailbox *mailbox; + u8 info; + u32 *outbox; + int err = 0; + +#define ENABLE_LAM_OUT_SIZE 0x100 +#define ENABLE_LAM_START_OFFSET 0x00 +#define ENABLE_LAM_END_OFFSET 0x08 +#define ENABLE_LAM_INFO_OFFSET 0x13 + +#define ENABLE_LAM_INFO_HIDDEN_FLAG (1 << 4) +#define ENABLE_LAM_INFO_ECC_MASK 0x3 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_ENABLE_LAM, + CMD_TIME_CLASS_C, status); + + if (err) + goto out; + + if (*status == MTHCA_CMD_STAT_LAM_NOT_PRE) + goto out; + + MTHCA_GET(dev->ddr_start, outbox, ENABLE_LAM_START_OFFSET); + MTHCA_GET(dev->ddr_end, outbox, ENABLE_LAM_END_OFFSET); + MTHCA_GET(info, outbox, ENABLE_LAM_INFO_OFFSET); + + if (!!(info & ENABLE_LAM_INFO_HIDDEN_FLAG) != + !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory " + "is %s hidden; does not match PCI config\n", + (info & ENABLE_LAM_INFO_HIDDEN_FLAG)? + "" : "not")); + } + if (info & ENABLE_LAM_INFO_HIDDEN_FLAG) + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("HCA-attached memory is hidden.\n")); + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n", + (int) ((dev->ddr_end - dev->ddr_start) >> 10), + (u64) dev->ddr_start, + (u64) dev->ddr_end)); + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_SYS_DIS, CMD_TIME_CLASS_C, status); +} + +int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status) +{ + struct mthca_mailbox *mailbox; + u8 info; + u32 *outbox; + int err = 0; + +#define QUERY_DDR_OUT_SIZE 0x100 +#define QUERY_DDR_START_OFFSET 0x00 +#define QUERY_DDR_END_OFFSET 0x08 +#define QUERY_DDR_INFO_OFFSET 0x13 + +#define QUERY_DDR_INFO_HIDDEN_FLAG (1 << 4) +#define QUERY_DDR_INFO_ECC_MASK 0x3 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DDR, + CMD_TIME_CLASS_A, status); + + if (err) + goto out; + + MTHCA_GET(dev->ddr_start, outbox, QUERY_DDR_START_OFFSET); + MTHCA_GET(dev->ddr_end, outbox, QUERY_DDR_END_OFFSET); + MTHCA_GET(info, outbox, QUERY_DDR_INFO_OFFSET); + + if (!!(info & QUERY_DDR_INFO_HIDDEN_FLAG) != + !!(dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) { + + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("FW reports that HCA-attached memory " + "is %s hidden; does not match PCI config\n", + (info & QUERY_DDR_INFO_HIDDEN_FLAG) ? + "" : "not")); + } + if (info & QUERY_DDR_INFO_HIDDEN_FLAG) + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("HCA-attached memory is hidden.\n")); + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory size %d KB (start %I64x, end %I64x)\n", + (int) ((dev->ddr_end - dev->ddr_start) >> 10), + (u64) dev->ddr_start, + (u64) dev->ddr_end)); + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, + struct mthca_dev_lim *dev_lim, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *outbox; + u8 field; + u16 size; + int err; + +#define QUERY_DEV_LIM_OUT_SIZE 0x100 +#define QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET 0x10 +#define QUERY_DEV_LIM_MAX_QP_SZ_OFFSET 0x11 +#define QUERY_DEV_LIM_RSVD_QP_OFFSET 0x12 +#define QUERY_DEV_LIM_MAX_QP_OFFSET 0x13 +#define QUERY_DEV_LIM_RSVD_SRQ_OFFSET 0x14 +#define QUERY_DEV_LIM_MAX_SRQ_OFFSET 0x15 +#define QUERY_DEV_LIM_RSVD_EEC_OFFSET 0x16 +#define QUERY_DEV_LIM_MAX_EEC_OFFSET 0x17 +#define QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET 0x19 +#define QUERY_DEV_LIM_RSVD_CQ_OFFSET 0x1a +#define QUERY_DEV_LIM_MAX_CQ_OFFSET 0x1b +#define QUERY_DEV_LIM_MAX_MPT_OFFSET 0x1d +#define QUERY_DEV_LIM_RSVD_EQ_OFFSET 0x1e +#define QUERY_DEV_LIM_MAX_EQ_OFFSET 0x1f +#define QUERY_DEV_LIM_RSVD_MTT_OFFSET 0x20 +#define QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET 0x21 +#define QUERY_DEV_LIM_RSVD_MRW_OFFSET 0x22 +#define QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET 0x23 +#define QUERY_DEV_LIM_MAX_AV_OFFSET 0x27 +#define QUERY_DEV_LIM_MAX_REQ_QP_OFFSET 0x29 +#define QUERY_DEV_LIM_MAX_RES_QP_OFFSET 0x2b +#define QUERY_DEV_LIM_MAX_RDMA_OFFSET 0x2f +#define QUERY_DEV_LIM_RSZ_SRQ_OFFSET 0x33 +#define QUERY_DEV_LIM_ACK_DELAY_OFFSET 0x35 +#define QUERY_DEV_LIM_MTU_WIDTH_OFFSET 0x36 +#define QUERY_DEV_LIM_VL_PORT_OFFSET 0x37 +#define QUERY_DEV_LIM_MAX_GID_OFFSET 0x3b +#define QUERY_DEV_LIM_MAX_PKEY_OFFSET 0x3f +#define QUERY_DEV_LIM_FLAGS_OFFSET 0x44 +#define QUERY_DEV_LIM_RSVD_UAR_OFFSET 0x48 +#define QUERY_DEV_LIM_UAR_SZ_OFFSET 0x49 +#define QUERY_DEV_LIM_PAGE_SZ_OFFSET 0x4b +#define QUERY_DEV_LIM_MAX_SG_OFFSET 0x51 +#define QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET 0x52 +#define QUERY_DEV_LIM_MAX_SG_RQ_OFFSET 0x55 +#define QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET 0x56 +#define QUERY_DEV_LIM_MAX_QP_MCG_OFFSET 0x61 +#define QUERY_DEV_LIM_RSVD_MCG_OFFSET 0x62 +#define QUERY_DEV_LIM_MAX_MCG_OFFSET 0x63 +#define QUERY_DEV_LIM_RSVD_PD_OFFSET 0x64 +#define QUERY_DEV_LIM_MAX_PD_OFFSET 0x65 +#define QUERY_DEV_LIM_RSVD_RDD_OFFSET 0x66 +#define QUERY_DEV_LIM_MAX_RDD_OFFSET 0x67 +#define QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET 0x80 +#define QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET 0x82 +#define QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET 0x84 +#define QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET 0x86 +#define QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET 0x88 +#define QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET 0x8a +#define QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET 0x8c +#define QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET 0x8e +#define QUERY_DEV_LIM_MTT_ENTRY_SZ_OFFSET 0x90 +#define QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET 0x92 +#define QUERY_DEV_LIM_PBL_SZ_OFFSET 0x96 +#define QUERY_DEV_LIM_BMME_FLAGS_OFFSET 0x97 +#define QUERY_DEV_LIM_RSVD_LKEY_OFFSET 0x98 +#define QUERY_DEV_LIM_LAMR_OFFSET 0x9f +#define QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET 0xa0 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_DEV_LIM, + CMD_TIME_CLASS_A, status); + + if (err) + goto out; + + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_QP_OFFSET); + dev_lim->reserved_qps = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_OFFSET); + dev_lim->max_qps = 1 << (field & 0x1f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_SRQ_OFFSET); + dev_lim->reserved_srqs = 1 << (field >> 4); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_OFFSET); + dev_lim->max_srqs = 1 << (field & 0x1f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EEC_OFFSET); + dev_lim->reserved_eecs = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EEC_OFFSET); + dev_lim->max_eecs = 1 << (field & 0x1f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_SZ_OFFSET); + dev_lim->max_cq_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_CQ_OFFSET); + dev_lim->reserved_cqs = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_CQ_OFFSET); + dev_lim->max_cqs = 1 << (field & 0x1f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MPT_OFFSET); + dev_lim->max_mpts = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_EQ_OFFSET); + dev_lim->reserved_eqs = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_EQ_OFFSET); + dev_lim->max_eqs = 1 << (field & 0x7); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MTT_OFFSET); + dev_lim->reserved_mtts = 1 << (field >> 4); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MRW_SZ_OFFSET); + dev_lim->max_mrw_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MRW_OFFSET); + dev_lim->reserved_mrws = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MTT_SEG_OFFSET); + dev_lim->max_mtt_seg = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_REQ_QP_OFFSET); + dev_lim->max_requester_per_qp = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RES_QP_OFFSET); + dev_lim->max_responder_per_qp = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDMA_OFFSET); + dev_lim->max_rdma_global = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_ACK_DELAY_OFFSET); + dev_lim->local_ca_ack_delay = field & 0x1f; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MTU_WIDTH_OFFSET); + dev_lim->max_mtu = field >> 4; + dev_lim->max_port_width = field & 0xf; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_VL_PORT_OFFSET); + dev_lim->max_vl = field >> 4; + dev_lim->num_ports = field & 0xf; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_GID_OFFSET); + dev_lim->max_gids = 1 << (field & 0xf); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PKEY_OFFSET); + dev_lim->max_pkeys = 1 << (field & 0xf); + MTHCA_GET(dev_lim->flags, outbox, QUERY_DEV_LIM_FLAGS_OFFSET); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_UAR_OFFSET); + dev_lim->reserved_uars = field >> 4; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_UAR_SZ_OFFSET); + dev_lim->uar_size = 1 << ((field & 0x3f) + 20); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_PAGE_SZ_OFFSET); + dev_lim->min_page_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_OFFSET); + dev_lim->max_sg = field; + + MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_OFFSET); + dev_lim->max_desc_sz = size; + + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_MCG_OFFSET); + dev_lim->max_qp_per_mcg = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_MCG_OFFSET); + dev_lim->reserved_mgms = field & 0xf; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_MCG_OFFSET); + dev_lim->max_mcgs = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_PD_OFFSET); + dev_lim->reserved_pds = field >> 4; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_PD_OFFSET); + dev_lim->max_pds = 1 << (field & 0x3f); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSVD_RDD_OFFSET); + dev_lim->reserved_rdds = field >> 4; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_RDD_OFFSET); + dev_lim->max_rdds = 1 << (field & 0x3f); + + MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEC_ENTRY_SZ_OFFSET); + dev_lim->eec_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_QPC_ENTRY_SZ_OFFSET); + dev_lim->qpc_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_EEEC_ENTRY_SZ_OFFSET); + dev_lim->eeec_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQPC_ENTRY_SZ_OFFSET); + dev_lim->eqpc_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_EQC_ENTRY_SZ_OFFSET); + dev_lim->eqc_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_CQC_ENTRY_SZ_OFFSET); + dev_lim->cqc_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_SRQ_ENTRY_SZ_OFFSET); + dev_lim->srq_entry_sz = size; + MTHCA_GET(size, outbox, QUERY_DEV_LIM_UAR_ENTRY_SZ_OFFSET); + dev_lim->uar_scratch_entry_sz = size; + + if (mthca_is_memfree(dev)) { + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); + dev_lim->max_srq_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); + dev_lim->max_qp_sz = 1 << field; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_RSZ_SRQ_OFFSET); + dev_lim->hca.arbel.resize_srq = field & 1; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET); + dev_lim->max_sg = min(field, dev_lim->max_sg); + MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET); + dev_lim->max_desc_sz = min((int)size, dev_lim->max_desc_sz); + MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET); + dev_lim->mpt_entry_sz = size; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET); + dev_lim->hca.arbel.max_pbl_sz = 1 << (field & 0x3f); + MTHCA_GET(dev_lim->hca.arbel.bmme_flags, outbox, + QUERY_DEV_LIM_BMME_FLAGS_OFFSET); + MTHCA_GET(dev_lim->hca.arbel.reserved_lkey, outbox, + QUERY_DEV_LIM_RSVD_LKEY_OFFSET); + MTHCA_GET(field, outbox, QUERY_DEV_LIM_LAMR_OFFSET); + dev_lim->hca.arbel.lam_required = field & 1; + MTHCA_GET(dev_lim->hca.arbel.max_icm_sz, outbox, + QUERY_DEV_LIM_MAX_ICM_SZ_OFFSET); + + if (dev_lim->hca.arbel.bmme_flags & 1){ + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Base MM extensions: yes " + "(flags %d, max PBL %d, rsvd L_Key %08x)\n", + dev_lim->hca.arbel.bmme_flags, + dev_lim->hca.arbel.max_pbl_sz, + dev_lim->hca.arbel.reserved_lkey)); + }else{ + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Base MM extensions: no\n")); + } + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max ICM size %I64d MB\n", + (u64) dev_lim->hca.arbel.max_icm_sz >> 20)); + } + else { + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SRQ_SZ_OFFSET); + dev_lim->max_srq_sz = (1 << field) - 1; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_QP_SZ_OFFSET); + dev_lim->max_qp_sz = (1 << field) - 1; + MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_AV_OFFSET); + dev_lim->hca.tavor.max_avs = 1 << (field & 0x3f); + dev_lim->mpt_entry_sz = MTHCA_MPT_ENTRY_SIZE; + } + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QPs: %d, reserved QPs: %d, entry size: %d\n", + dev_lim->max_qps, dev_lim->reserved_qps, dev_lim->qpc_entry_sz)); + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max SRQs: %d, reserved SRQs: %d, entry size: %d\n", + dev_lim->max_srqs, dev_lim->reserved_srqs, dev_lim->srq_entry_sz)); + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQs: %d, reserved CQs: %d, entry size: %d\n", + dev_lim->max_cqs, dev_lim->reserved_cqs, dev_lim->cqc_entry_sz)); + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max EQs: %d, reserved EQs: %d, entry size: %d\n", + dev_lim->max_eqs, dev_lim->reserved_eqs, dev_lim->eqc_entry_sz)); + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("reserved MPTs: %d, reserved MTTs: %d\n", + dev_lim->reserved_mrws, dev_lim->reserved_mtts)); + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max PDs: %d, reserved PDs: %d, reserved UARs: %d\n", + dev_lim->max_pds, dev_lim->reserved_pds, dev_lim->reserved_uars)); + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max QP/MCG: %d, reserved MGMs: %d\n", + dev_lim->max_pds, dev_lim->reserved_mgms)); + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Max CQEs: %d, max WQEs: %d, max SRQ WQEs: %d\n", + dev_lim->max_cq_sz, dev_lim->max_qp_sz, dev_lim->max_srq_sz)); + + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Flags: %08x\n", dev_lim->flags)); + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +static void get_board_id(u8 *vsd, char *board_id) +{ + int i; + +#define VSD_OFFSET_SIG1 0x00 +#define VSD_OFFSET_SIG2 0xde +#define VSD_OFFSET_MLX_BOARD_ID 0xd0 +#define VSD_OFFSET_TS_BOARD_ID 0x20 + +#define VSD_SIGNATURE_TOPSPIN 0x5ad + + RtlZeroMemory(board_id, MTHCA_BOARD_ID_LEN); + + if (cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG1)) == VSD_SIGNATURE_TOPSPIN && + cl_ntoh16(*(u16*)(vsd + VSD_OFFSET_SIG2)) == VSD_SIGNATURE_TOPSPIN) { + strlcpy(board_id, (const char *)(vsd + VSD_OFFSET_TS_BOARD_ID), MTHCA_BOARD_ID_LEN); + } else { + /* + * The board ID is a string but the firmware byte + * swaps each 4-byte word before passing it back to + * us. Therefore we need to swab it before printing. + */ + for (i = 0; i < 4; ++i) + ((u32 *) board_id)[i] = + _byteswap_ulong(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4)); + } +} + +int mthca_QUERY_ADAPTER(struct mthca_dev *dev, + struct mthca_adapter *adapter, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *outbox; + int err; + +#define QUERY_ADAPTER_OUT_SIZE 0x100 +#define QUERY_ADAPTER_VENDOR_ID_OFFSET 0x00 +#define QUERY_ADAPTER_DEVICE_ID_OFFSET 0x04 +#define QUERY_ADAPTER_REVISION_ID_OFFSET 0x08 +#define QUERY_ADAPTER_INTA_PIN_OFFSET 0x10 +#define QUERY_ADAPTER_VSD_OFFSET 0x20 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + outbox = mailbox->buf; + + err = mthca_cmd_box(dev, 0, mailbox->dma, 0, 0, CMD_QUERY_ADAPTER, + CMD_TIME_CLASS_A, status); + + if (err) + goto out; + + MTHCA_GET(adapter->vendor_id, outbox, QUERY_ADAPTER_VENDOR_ID_OFFSET); + MTHCA_GET(adapter->device_id, outbox, QUERY_ADAPTER_DEVICE_ID_OFFSET); + MTHCA_GET(adapter->revision_id, outbox, QUERY_ADAPTER_REVISION_ID_OFFSET); + MTHCA_GET(adapter->inta_pin, outbox, QUERY_ADAPTER_INTA_PIN_OFFSET); + + get_board_id((u8*)outbox + QUERY_ADAPTER_VSD_OFFSET, + adapter->board_id); + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_INIT_HCA(struct mthca_dev *dev, + struct mthca_init_hca_param *param, + u8 *status) +{ + struct mthca_mailbox *mailbox; + __be32 *inbox; + int err; + +#define INIT_HCA_IN_SIZE 0x200 +#define INIT_HCA_FLAGS_OFFSET 0x014 +#define INIT_HCA_QPC_OFFSET 0x020 +#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) +#define INIT_HCA_LOG_QP_OFFSET (INIT_HCA_QPC_OFFSET + 0x17) +#define INIT_HCA_EEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x20) +#define INIT_HCA_LOG_EEC_OFFSET (INIT_HCA_QPC_OFFSET + 0x27) +#define INIT_HCA_SRQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x28) +#define INIT_HCA_LOG_SRQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x2f) +#define INIT_HCA_CQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x30) +#define INIT_HCA_LOG_CQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x37) +#define INIT_HCA_EQPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x40) +#define INIT_HCA_EEEC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x50) +#define INIT_HCA_EQC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x60) +#define INIT_HCA_LOG_EQ_OFFSET (INIT_HCA_QPC_OFFSET + 0x67) +#define INIT_HCA_RDB_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x70) +#define INIT_HCA_UDAV_OFFSET 0x0b0 +#define INIT_HCA_UDAV_LKEY_OFFSET (INIT_HCA_UDAV_OFFSET + 0x0) +#define INIT_HCA_UDAV_PD_OFFSET (INIT_HCA_UDAV_OFFSET + 0x4) +#define INIT_HCA_MCAST_OFFSET 0x0c0 +#define INIT_HCA_MC_BASE_OFFSET (INIT_HCA_MCAST_OFFSET + 0x00) +#define INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x12) +#define INIT_HCA_MC_HASH_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x16) +#define INIT_HCA_LOG_MC_TABLE_SZ_OFFSET (INIT_HCA_MCAST_OFFSET + 0x1b) +#define INIT_HCA_TPT_OFFSET 0x0f0 +#define INIT_HCA_MPT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x00) +#define INIT_HCA_MTT_SEG_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x09) +#define INIT_HCA_LOG_MPT_SZ_OFFSET (INIT_HCA_TPT_OFFSET + 0x0b) +#define INIT_HCA_MTT_BASE_OFFSET (INIT_HCA_TPT_OFFSET + 0x10) +#define INIT_HCA_UAR_OFFSET 0x120 +#define INIT_HCA_UAR_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x00) +#define INIT_HCA_UARC_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x09) +#define INIT_HCA_LOG_UAR_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0a) +#define INIT_HCA_UAR_PAGE_SZ_OFFSET (INIT_HCA_UAR_OFFSET + 0x0b) +#define INIT_HCA_UAR_SCATCH_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x10) +#define INIT_HCA_UAR_CTX_BASE_OFFSET (INIT_HCA_UAR_OFFSET + 0x18) + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + RtlZeroMemory(inbox, INIT_HCA_IN_SIZE); + +#if defined(__LITTLE_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cl_hton32(1 << 1); +#elif defined(__BIG_ENDIAN) + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1 << 1); +#else +#error Host endianness not defined +#endif + /* Check port for UD address vector: */ + *(inbox + INIT_HCA_FLAGS_OFFSET / 4) |= cl_hton32(1); + + /* We leave wqe_quota, responder_exu, etc as 0 (default) */ + + /* QPC/EEC/CQC/EQC/RDB attributes */ + + MTHCA_PUT(inbox, param->qpc_base, INIT_HCA_QPC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_qps, INIT_HCA_LOG_QP_OFFSET); + MTHCA_PUT(inbox, param->eec_base, INIT_HCA_EEC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_eecs, INIT_HCA_LOG_EEC_OFFSET); + MTHCA_PUT(inbox, param->srqc_base, INIT_HCA_SRQC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_srqs, INIT_HCA_LOG_SRQ_OFFSET); + MTHCA_PUT(inbox, param->cqc_base, INIT_HCA_CQC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_cqs, INIT_HCA_LOG_CQ_OFFSET); + MTHCA_PUT(inbox, param->eqpc_base, INIT_HCA_EQPC_BASE_OFFSET); + MTHCA_PUT(inbox, param->eeec_base, INIT_HCA_EEEC_BASE_OFFSET); + MTHCA_PUT(inbox, param->eqc_base, INIT_HCA_EQC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_num_eqs, INIT_HCA_LOG_EQ_OFFSET); + MTHCA_PUT(inbox, param->rdb_base, INIT_HCA_RDB_BASE_OFFSET); + + /* UD AV attributes */ + + /* multicast attributes */ + + MTHCA_PUT(inbox, param->mc_base, INIT_HCA_MC_BASE_OFFSET); + MTHCA_PUT(inbox, param->log_mc_entry_sz, INIT_HCA_LOG_MC_ENTRY_SZ_OFFSET); + MTHCA_PUT(inbox, param->mc_hash_sz, INIT_HCA_MC_HASH_SZ_OFFSET); + MTHCA_PUT(inbox, param->log_mc_table_sz, INIT_HCA_LOG_MC_TABLE_SZ_OFFSET); + + /* TPT attributes */ + + MTHCA_PUT(inbox, param->mpt_base, INIT_HCA_MPT_BASE_OFFSET); + if (!mthca_is_memfree(dev)) + MTHCA_PUT(inbox, param->mtt_seg_sz, INIT_HCA_MTT_SEG_SZ_OFFSET); + MTHCA_PUT(inbox, param->log_mpt_sz, INIT_HCA_LOG_MPT_SZ_OFFSET); + MTHCA_PUT(inbox, param->mtt_base, INIT_HCA_MTT_BASE_OFFSET); + + /* UAR attributes */ + { + u8 uar_page_sz = PAGE_SHIFT - 12; + MTHCA_PUT(inbox, uar_page_sz, INIT_HCA_UAR_PAGE_SZ_OFFSET); + } + + MTHCA_PUT(inbox, param->uar_scratch_base, INIT_HCA_UAR_SCATCH_BASE_OFFSET); + + if (mthca_is_memfree(dev)) { + MTHCA_PUT(inbox, param->log_uarc_sz, INIT_HCA_UARC_SZ_OFFSET); + MTHCA_PUT(inbox, param->log_uar_sz, INIT_HCA_LOG_UAR_SZ_OFFSET); + MTHCA_PUT(inbox, param->uarc_base, INIT_HCA_UAR_CTX_BASE_OFFSET); + } + + err = mthca_cmd(dev, mailbox->dma, 0, 0, CMD_INIT_HCA, HZ, status); + + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_INIT_IB(struct mthca_dev *dev, + struct mthca_init_ib_param *param, + int port, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *inbox; + int err; + u32 flags; + +#define INIT_IB_IN_SIZE 56 +#define INIT_IB_FLAGS_OFFSET 0x00 +#define INIT_IB_FLAG_SIG (1 << 18) +#define INIT_IB_FLAG_NG (1 << 17) +#define INIT_IB_FLAG_G0 (1 << 16) +#define INIT_IB_VL_SHIFT 4 +#define INIT_IB_PORT_WIDTH_SHIFT 8 +#define INIT_IB_MTU_SHIFT 12 +#define INIT_IB_MAX_GID_OFFSET 0x06 +#define INIT_IB_MAX_PKEY_OFFSET 0x0a +#define INIT_IB_GUID0_OFFSET 0x10 +#define INIT_IB_NODE_GUID_OFFSET 0x18 +#define INIT_IB_SI_GUID_OFFSET 0x20 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + RtlZeroMemory(inbox, INIT_IB_IN_SIZE); + + flags = 0; + flags |= param->set_guid0 ? INIT_IB_FLAG_G0 : 0; + flags |= param->set_node_guid ? INIT_IB_FLAG_NG : 0; + flags |= param->set_si_guid ? INIT_IB_FLAG_SIG : 0; + flags |= param->vl_cap << INIT_IB_VL_SHIFT; + flags |= param->port_width << INIT_IB_PORT_WIDTH_SHIFT; + flags |= param->mtu_cap << INIT_IB_MTU_SHIFT; + MTHCA_PUT(inbox, flags, INIT_IB_FLAGS_OFFSET); + + MTHCA_PUT(inbox, param->gid_cap, INIT_IB_MAX_GID_OFFSET); + MTHCA_PUT(inbox, param->pkey_cap, INIT_IB_MAX_PKEY_OFFSET); + MTHCA_PUT(inbox, param->guid0, INIT_IB_GUID0_OFFSET); + MTHCA_PUT(inbox, param->node_guid, INIT_IB_NODE_GUID_OFFSET); + MTHCA_PUT(inbox, param->si_guid, INIT_IB_SI_GUID_OFFSET); + + err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_INIT_IB, + CMD_TIME_CLASS_A, status); + + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status) +{ + return mthca_cmd(dev, 0, port, 0, CMD_CLOSE_IB, HZ, status); +} + +int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status) +{ + return mthca_cmd(dev, 0, 0, (u8)panic, CMD_CLOSE_HCA, HZ, status); +} + +int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, + int port, u8 *status) +{ + struct mthca_mailbox *mailbox; + u32 *inbox; + int err; + u32 flags = 0; + +#define SET_IB_IN_SIZE 0x40 +#define SET_IB_FLAGS_OFFSET 0x00 +#define SET_IB_FLAG_SIG (1 << 18) +#define SET_IB_FLAG_RQK (1 << 0) +#define SET_IB_CAP_MASK_OFFSET 0x04 +#define SET_IB_SI_GUID_OFFSET 0x08 + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + RtlZeroMemory(inbox, SET_IB_IN_SIZE); + + flags |= param->set_si_guid ? SET_IB_FLAG_SIG : 0; + flags |= param->reset_qkey_viol ? SET_IB_FLAG_RQK : 0; + MTHCA_PUT(inbox, flags, SET_IB_FLAGS_OFFSET); + + MTHCA_PUT(inbox, param->cap_mask, SET_IB_CAP_MASK_OFFSET); + MTHCA_PUT(inbox, param->si_guid, SET_IB_SI_GUID_OFFSET); + + err = mthca_cmd(dev, mailbox->dma, port, 0, CMD_SET_IB, + CMD_TIME_CLASS_B, status); + + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status) +{ + return mthca_map_cmd(dev, CMD_MAP_ICM, icm, virt, status); +} + +int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status) +{ + struct mthca_mailbox *mailbox; + __be64 *inbox; + int err; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + inbox = mailbox->buf; + + inbox[0] = cl_hton64(virt); + inbox[1] = cl_hton64(dma_addr); + + err = mthca_cmd(dev, mailbox->dma, 1, 0, CMD_MAP_ICM, + CMD_TIME_CLASS_B, status); + + mthca_free_mailbox(dev, mailbox); + + if (!err) + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Mapped page at %I64x to %I64x for ICM.\n", + (u64) dma_addr, (u64) virt)); + + return err; +} + +int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status) +{ + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Unmapping %d pages at %I64x from ICM.\n", + page_count, (u64) virt)); + + return mthca_cmd(dev, virt, page_count, 0, CMD_UNMAP_ICM, CMD_TIME_CLASS_B, status); +} + +int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status) +{ + return mthca_map_cmd(dev, CMD_MAP_ICM_AUX, icm, (u64)-1, status); +} + +int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_UNMAP_ICM_AUX, CMD_TIME_CLASS_B, status); +} + +int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, + u8 *status) +{ + int ret = mthca_cmd_imm(dev, icm_size, aux_pages, 0, 0, CMD_SET_ICM_SIZE, + CMD_TIME_CLASS_A, status); + + if (ret || status) + return ret; + + /* + * Arbel page size is always 4 KB; round up number of system + * pages needed. + */ + *aux_pages = (*aux_pages + (1 << (PAGE_SHIFT - 12)) - 1) >> (PAGE_SHIFT - 12); + *aux_pages = ALIGN(*aux_pages, PAGE_SIZE >> 12) >> (PAGE_SHIFT - 12); + + return 0; +} + +int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int mpt_index, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, mpt_index, 0, CMD_SW2HW_MPT, + CMD_TIME_CLASS_B, status); +} + +int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int mpt_index, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, mpt_index, + (u8)!mailbox, CMD_HW2SW_MPT, + CMD_TIME_CLASS_B, status); +} + +int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int num_mtt, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, num_mtt, 0, CMD_WRITE_MTT, + CMD_TIME_CLASS_B, status); +} + +int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0, 0, CMD_SYNC_TPT, CMD_TIME_CLASS_B, status); +} + +int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, + int eq_num, u8 *status) +{ + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("%s mask %016I64x for eqn %d\n", + unmap ? "Clearing" : "Setting", + (u64) event_mask, eq_num)); + return mthca_cmd(dev, event_mask, (unmap << 31) | eq_num, + 0, CMD_MAP_EQ, CMD_TIME_CLASS_B, status); +} + +int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int eq_num, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, eq_num, 0, CMD_SW2HW_EQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int eq_num, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, eq_num, 0, + CMD_HW2SW_EQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int cq_num, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, cq_num, 0, CMD_SW2HW_CQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int cq_num, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, cq_num, 0, + CMD_HW2SW_CQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, srq_num, 0, CMD_SW2HW_SRQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, srq_num, 0, + CMD_HW2SW_SRQ, + CMD_TIME_CLASS_A, status); +} + +int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, + struct mthca_mailbox *mailbox, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, num, 0, + CMD_QUERY_SRQ, CMD_TIME_CLASS_A, status); +} + +int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status) +{ + return mthca_cmd(dev, limit, srq_num, 0, CMD_ARM_SRQ, + CMD_TIME_CLASS_B, status); +} + +int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, + int is_ee, struct mthca_mailbox *mailbox, u32 optmask, + u8 *status) +{ + enum { + MTHCA_TRANS_INVALID = 0, + MTHCA_TRANS_RST2INIT, + MTHCA_TRANS_INIT2INIT, + MTHCA_TRANS_INIT2RTR, + MTHCA_TRANS_RTR2RTS, + MTHCA_TRANS_RTS2RTS, + MTHCA_TRANS_SQERR2RTS, + MTHCA_TRANS_ANY2ERR, + MTHCA_TRANS_RTS2SQD, + MTHCA_TRANS_SQD2SQD, + MTHCA_TRANS_SQD2RTS, + MTHCA_TRANS_ANY2RST, + }; + static const u16 op[] = { + 0, /* MTHCA_TRANS_INVALID */ + CMD_RST2INIT_QPEE, /* MTHCA_TRANS_RST2INIT */ + CMD_INIT2INIT_QPEE, /* MTHCA_TRANS_INIT2INIT */ + CMD_INIT2RTR_QPEE, /* MTHCA_TRANS_INIT2RTR */ + CMD_RTR2RTS_QPEE, /* MTHCA_TRANS_RTR2RTS */ + CMD_RTS2RTS_QPEE, /* MTHCA_TRANS_RTS2RTS */ + CMD_SQERR2RTS_QPEE, /* MTHCA_TRANS_SQERR2RTS */ + CMD_2ERR_QPEE, /* MTHCA_TRANS_ANY2ERR */ + CMD_RTS2SQD_QPEE, /* MTHCA_TRANS_RTS2SQD */ + CMD_SQD2SQD_QPEE, /* MTHCA_TRANS_SQD2SQD */ + CMD_SQD2RTS_QPEE, /* MTHCA_TRANS_SQD2RTS */ + CMD_ERR2RST_QPEE /* MTHCA_TRANS_ANY2RST */ + }; + u8 op_mod = 0; + int my_mailbox = 0; + int err; + + UNREFERENCED_PARAMETER(optmask); + + if (trans < 0 || trans >= ARRAY_SIZE(op)) + return -EINVAL; + + if (trans == MTHCA_TRANS_ANY2RST) { + op_mod = 3; /* don't write outbox, any->reset */ + + /* For debugging */ + if (!mailbox) { + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (!IS_ERR(mailbox)) { + my_mailbox = 1; + op_mod = 2; /* write outbox, any->reset */ + } else + mailbox = NULL; + } + } else { + { // debug print + int i; + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n")); + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,(" opt param mask: %08x\n", cl_ntoh32(*(__be32 *)mailbox->buf))); + for (i = 2; i < 0x100 / 4; i=i+4) { + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,(" [%02x] %08x %08x %08x %08x\n",i-2, + cl_ntoh32(((__be32 *) mailbox->buf)[i ]), + cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]), + cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]), + cl_ntoh32(((__be32 *) mailbox->buf)[i + 3]))); + } + } + } + + if (trans == MTHCA_TRANS_ANY2RST) { + err = mthca_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, + (!!is_ee << 24) | num, op_mod, + op[trans], CMD_TIME_CLASS_C, status); + + if (mailbox) { // debug print + int i; + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("Dumping QP context:\n")); + for (i = 2; i < 0x100 / 4; i=i+4) { + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,(" [%02x] %08x %08x %08x %08x\n",i-2, + cl_ntoh32(((__be32 *) mailbox->buf)[i ]), + cl_ntoh32(((__be32 *) mailbox->buf)[i + 1]), + cl_ntoh32(((__be32 *) mailbox->buf)[i + 2]), + cl_ntoh32(((__be32 *) mailbox->buf)[i + 3]))); + } + } + } else + err = mthca_cmd(dev, mailbox->dma, optmask | (!!is_ee << 24) | num, + op_mod, op[trans], CMD_TIME_CLASS_C, status); + + if (my_mailbox) + mthca_free_mailbox(dev, mailbox); + + return err; +} + +int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, + struct mthca_mailbox *mailbox, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, (!!is_ee << 24) | num, 0, + CMD_QUERY_QPEE, CMD_TIME_CLASS_A, status); +} + +int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, + u8 *status) +{ + u8 op_mod; + + switch (type) { + case IB_QPT_QP0: + op_mod = 0; + break; + case IB_QPT_QP1: + op_mod = 1; + break; + case IB_QPT_RAW_IPV6: + op_mod = 2; + break; + case IB_QPT_RAW_ETHER: + op_mod = 3; + break; + default: + return -EINVAL; + } + + return mthca_cmd(dev, 0, qpn, op_mod, CMD_CONF_SPECIAL_QP, + CMD_TIME_CLASS_B, status); +} + +int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct _ib_wc *in_wc, struct _ib_grh *in_grh, + void *in_mad, void *response_mad, u8 *status) +{ + struct mthca_mailbox *inmailbox, *outmailbox; + u8 *inbox; + int err; + u32 in_modifier = port; + u8 op_modifier = 0; + + +#define MAD_IFC_BOX_SIZE 0x400 +#define MAD_IFC_MY_QPN_OFFSET 0x100 +#define MAD_IFC_RQPN_OFFSET 0x108 +#define MAD_IFC_SL_OFFSET 0x10c +#define MAD_IFC_G_PATH_OFFSET 0x10d +#define MAD_IFC_RLID_OFFSET 0x10e +#define MAD_IFC_PKEY_OFFSET 0x112 +#define MAD_IFC_GRH_OFFSET 0x140 + + inmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(inmailbox)) + return PTR_ERR(inmailbox); + inbox = inmailbox->buf; + + outmailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(outmailbox)) { + mthca_free_mailbox(dev, inmailbox); + return PTR_ERR(outmailbox); + } + + memcpy(inbox, in_mad, 256); + + /* + * Key check traps can't be generated unless we have in_wc to + * tell us where to send the trap. + */ + if (ignore_mkey || !in_wc) + op_modifier |= 0x1; + if (ignore_bkey || !in_wc) + op_modifier |= 0x2; + + if (in_wc) { + u8 val; + + memset(inbox + 256, 0, 256); + + + MTHCA_PUT(inbox, 0, MAD_IFC_MY_QPN_OFFSET); + MTHCA_PUT(inbox, cl_ntoh32(in_wc->recv.ud.remote_qp), MAD_IFC_RQPN_OFFSET); + val = in_wc->recv.ud.remote_sl << 4; + MTHCA_PUT(inbox, val, MAD_IFC_SL_OFFSET); + + val = in_wc->recv.ud.path_bits | + (in_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID ? 0x80 : 0); + MTHCA_PUT(inbox, val, MAD_IFC_G_PATH_OFFSET) + + MTHCA_PUT(inbox, cl_ntoh16(in_wc->recv.ud.remote_lid), MAD_IFC_RLID_OFFSET); + MTHCA_PUT(inbox, in_wc->recv.ud.pkey_index, MAD_IFC_PKEY_OFFSET); + + if (in_grh) + memcpy(inbox + MAD_IFC_GRH_OFFSET, in_grh, 40); + + op_modifier |= 0x4; + + in_modifier |= cl_ntoh16(in_wc->recv.ud.remote_lid) << 16; + + } + + err = mthca_cmd_box(dev, inmailbox->dma, outmailbox->dma, + in_modifier, op_modifier, + CMD_MAD_IFC, CMD_TIME_CLASS_C, status); + + if (!err && !*status) + memcpy(response_mad, outmailbox->buf, 256); + + mthca_free_mailbox(dev, inmailbox); + mthca_free_mailbox(dev, outmailbox); + return err; +} + +int mthca_READ_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status) +{ + return mthca_cmd_box(dev, 0, mailbox->dma, index, 0, + CMD_READ_MGM, CMD_TIME_CLASS_A, status); +} + +int mthca_WRITE_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status) +{ + return mthca_cmd(dev, mailbox->dma, index, 0, CMD_WRITE_MGM, + CMD_TIME_CLASS_A, status); +} + +int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + u16 *hash, u8 *status) +{ + u64 imm; + int err; + + err = mthca_cmd_imm(dev, mailbox->dma, &imm, 0, 0, CMD_MGID_HASH, + CMD_TIME_CLASS_A, status); + + *hash = (u16)imm; + return err; +} + +int mthca_NOP(struct mthca_dev *dev, u8 *status) +{ + return mthca_cmd(dev, 0, 0x1f, 0, CMD_NOP, 100000, status); /* 100 msecs */ +} diff --git a/branches/Ndi/hw/mthca/kernel/mthca_cmd.h b/branches/Ndi/hw/mthca/kernel/mthca_cmd.h new file mode 100644 index 00000000..fdeef839 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_cmd.h @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MTHCA_CMD_H +#define MTHCA_CMD_H + +#include + +#define MTHCA_MAILBOX_SIZE 4096 + +enum { + /* command completed successfully: */ + MTHCA_CMD_STAT_OK = 0x00, + /* Internal error (such as a bus error) occurred while processing command: */ + MTHCA_CMD_STAT_INTERNAL_ERR = 0x01, + /* Operation/command not supported or opcode modifier not supported: */ + MTHCA_CMD_STAT_BAD_OP = 0x02, + /* Parameter not supported or parameter out of range: */ + MTHCA_CMD_STAT_BAD_PARAM = 0x03, + /* System not enabled or bad system state: */ + MTHCA_CMD_STAT_BAD_SYS_STATE = 0x04, + /* Attempt to access reserved or unallocaterd resource: */ + MTHCA_CMD_STAT_BAD_RESOURCE = 0x05, + /* Requested resource is currently executing a command, or is otherwise busy: */ + MTHCA_CMD_STAT_RESOURCE_BUSY = 0x06, + /* memory error: */ + MTHCA_CMD_STAT_DDR_MEM_ERR = 0x07, + /* Required capability exceeds device limits: */ + MTHCA_CMD_STAT_EXCEED_LIM = 0x08, + /* Resource is not in the appropriate state or ownership: */ + MTHCA_CMD_STAT_BAD_RES_STATE = 0x09, + /* Index out of range: */ + MTHCA_CMD_STAT_BAD_INDEX = 0x0a, + /* FW image corrupted: */ + MTHCA_CMD_STAT_BAD_NVMEM = 0x0b, + /* Attempt to modify a QP/EE which is not in the presumed state: */ + MTHCA_CMD_STAT_BAD_QPEE_STATE = 0x10, + /* Bad segment parameters (Address/Size): */ + MTHCA_CMD_STAT_BAD_SEG_PARAM = 0x20, + /* Memory Region has Memory Windows bound to: */ + MTHCA_CMD_STAT_REG_BOUND = 0x21, + /* HCA local attached memory not present: */ + MTHCA_CMD_STAT_LAM_NOT_PRE = 0x22, + /* Bad management packet (silently discarded): */ + MTHCA_CMD_STAT_BAD_PKT = 0x30, + /* More outstanding CQEs in CQ than new CQ size: */ + MTHCA_CMD_STAT_BAD_SIZE = 0x40 +}; + +enum { + MTHCA_TRANS_INVALID = 0, + MTHCA_TRANS_RST2INIT, + MTHCA_TRANS_INIT2INIT, + MTHCA_TRANS_INIT2RTR, + MTHCA_TRANS_RTR2RTS, + MTHCA_TRANS_RTS2RTS, + MTHCA_TRANS_SQERR2RTS, + MTHCA_TRANS_ANY2ERR, + MTHCA_TRANS_RTS2SQD, + MTHCA_TRANS_SQD2SQD, + MTHCA_TRANS_SQD2RTS, + MTHCA_TRANS_ANY2RST, +}; + +enum { + DEV_LIM_FLAG_RC = 1 << 0, + DEV_LIM_FLAG_UC = 1 << 1, + DEV_LIM_FLAG_UD = 1 << 2, + DEV_LIM_FLAG_RD = 1 << 3, + DEV_LIM_FLAG_RAW_IPV6 = 1 << 4, + DEV_LIM_FLAG_RAW_ETHER = 1 << 5, + DEV_LIM_FLAG_SRQ = 1 << 6, + DEV_LIM_FLAG_BAD_PKEY_CNTR = 1 << 8, + DEV_LIM_FLAG_BAD_QKEY_CNTR = 1 << 9, + DEV_LIM_FLAG_MW = 1 << 16, + DEV_LIM_FLAG_AUTO_PATH_MIG = 1 << 17, + DEV_LIM_FLAG_ATOMIC = 1 << 18, + DEV_LIM_FLAG_RAW_MULTI = 1 << 19, + DEV_LIM_FLAG_UD_AV_PORT_ENFORCE = 1 << 20, + DEV_LIM_FLAG_UD_MULTI = 1 << 21, +}; + +struct mthca_mailbox { + dma_addr_t dma; + void *buf; +}; + +struct mthca_dev_lim { + int max_srq_sz; + int max_qp_sz; + int reserved_qps; + int max_qps; + int reserved_srqs; + int max_srqs; + int reserved_eecs; + int max_eecs; + int max_cq_sz; + int reserved_cqs; + int max_cqs; + int max_mpts; + int reserved_eqs; + int max_eqs; + int reserved_mtts; + int max_mrw_sz; + int reserved_mrws; + int max_mtt_seg; + int max_requester_per_qp; + int max_responder_per_qp; + int max_rdma_global; + int local_ca_ack_delay; + int max_mtu; + int max_port_width; + int max_vl; + int num_ports; + int max_gids; + int max_pkeys; + u32 flags; + int reserved_uars; + int uar_size; + int min_page_sz; + int max_sg; + int max_desc_sz; + int max_qp_per_mcg; + int reserved_mgms; + int max_mcgs; + int reserved_pds; + int max_pds; + int reserved_rdds; + int max_rdds; + int eec_entry_sz; + int qpc_entry_sz; + int eeec_entry_sz; + int eqpc_entry_sz; + int eqc_entry_sz; + int cqc_entry_sz; + int srq_entry_sz; + int uar_scratch_entry_sz; + int mpt_entry_sz; + union { + struct { + int max_avs; + } tavor; + struct { + int resize_srq; + int max_pbl_sz; + u8 bmme_flags; + u32 reserved_lkey; + int lam_required; + u64 max_icm_sz; + } arbel; + } hca; +}; + +struct mthca_adapter { + u32 vendor_id; + u32 device_id; + u32 revision_id; + char board_id[MTHCA_BOARD_ID_LEN]; + u8 inta_pin; +}; + +struct mthca_init_hca_param { + u64 qpc_base; + u64 eec_base; + u64 srqc_base; + u64 cqc_base; + u64 eqpc_base; + u64 eeec_base; + u64 eqc_base; + u64 rdb_base; + u64 mc_base; + u64 mpt_base; + u64 mtt_base; + u64 uar_scratch_base; + u64 uarc_base; + u16 log_mc_entry_sz; + u16 mc_hash_sz; + u8 log_num_qps; + u8 log_num_eecs; + u8 log_num_srqs; + u8 log_num_cqs; + u8 log_num_eqs; + u8 log_mc_table_sz; + u8 mtt_seg_sz; + u8 log_mpt_sz; + u8 log_uar_sz; + u8 log_uarc_sz; +}; + +struct mthca_init_ib_param { + int port_width; + int vl_cap; + int mtu_cap; + u16 gid_cap; + u16 pkey_cap; + int set_guid0; + u64 guid0; + int set_node_guid; + u64 node_guid; + int set_si_guid; + u64 si_guid; +}; + +struct mthca_set_ib_param { + int set_si_guid; + int reset_qkey_viol; + u64 si_guid; + u32 cap_mask; +}; + +int mthca_cmd_init(struct mthca_dev *dev); +void mthca_cmd_cleanup(struct mthca_dev *dev); +int mthca_cmd_use_events(struct mthca_dev *dev); +void mthca_cmd_use_polling(struct mthca_dev *dev); +void mthca_cmd_event(struct mthca_dev *dev, u16 token, + u8 status, u64 out_param); + +struct mthca_mailbox *mthca_alloc_mailbox(struct mthca_dev *dev, + unsigned int gfp_mask); +void mthca_free_mailbox(struct mthca_dev *dev, struct mthca_mailbox *mailbox); + +int mthca_SYS_EN(struct mthca_dev *dev, u8 *status); +int mthca_SYS_DIS(struct mthca_dev *dev, u8 *status); +int mthca_MAP_FA(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); +int mthca_UNMAP_FA(struct mthca_dev *dev, u8 *status); +int mthca_RUN_FW(struct mthca_dev *dev, u8 *status); +int mthca_QUERY_FW(struct mthca_dev *dev, u8 *status); +int mthca_ENABLE_LAM(struct mthca_dev *dev, u8 *status); +int mthca_DISABLE_LAM(struct mthca_dev *dev, u8 *status); +int mthca_QUERY_DDR(struct mthca_dev *dev, u8 *status); +int mthca_QUERY_DEV_LIM(struct mthca_dev *dev, + struct mthca_dev_lim *dev_lim, u8 *status); +int mthca_QUERY_ADAPTER(struct mthca_dev *dev, + struct mthca_adapter *adapter, u8 *status); +int mthca_INIT_HCA(struct mthca_dev *dev, + struct mthca_init_hca_param *param, + u8 *status); +int mthca_INIT_IB(struct mthca_dev *dev, + struct mthca_init_ib_param *param, + int port, u8 *status); +int mthca_CLOSE_IB(struct mthca_dev *dev, int port, u8 *status); +int mthca_CLOSE_HCA(struct mthca_dev *dev, int panic, u8 *status); +int mthca_SET_IB(struct mthca_dev *dev, struct mthca_set_ib_param *param, + int port, u8 *status); +int mthca_MAP_ICM(struct mthca_dev *dev, struct mthca_icm *icm, u64 virt, u8 *status); +int mthca_MAP_ICM_page(struct mthca_dev *dev, u64 dma_addr, u64 virt, u8 *status); +int mthca_UNMAP_ICM(struct mthca_dev *dev, u64 virt, u32 page_count, u8 *status); +int mthca_MAP_ICM_AUX(struct mthca_dev *dev, struct mthca_icm *icm, u8 *status); +int mthca_UNMAP_ICM_AUX(struct mthca_dev *dev, u8 *status); +int mthca_SET_ICM_SIZE(struct mthca_dev *dev, u64 icm_size, u64 *aux_pages, + u8 *status); +int mthca_SW2HW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int mpt_index, u8 *status); +int mthca_HW2SW_MPT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int mpt_index, u8 *status); +int mthca_WRITE_MTT(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int num_mtt, u8 *status); +int mthca_SYNC_TPT(struct mthca_dev *dev, u8 *status); +int mthca_MAP_EQ(struct mthca_dev *dev, u64 event_mask, int unmap, + int eq_num, u8 *status); +int mthca_SW2HW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int eq_num, u8 *status); +int mthca_HW2SW_EQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int eq_num, u8 *status); +int mthca_SW2HW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int cq_num, u8 *status); +int mthca_HW2SW_CQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int cq_num, u8 *status); +int mthca_SW2HW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status); +int mthca_HW2SW_SRQ(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + int srq_num, u8 *status); +int mthca_QUERY_SRQ(struct mthca_dev *dev, u32 num, + struct mthca_mailbox *mailbox, u8 *status); +int mthca_ARM_SRQ(struct mthca_dev *dev, int srq_num, int limit, u8 *status); +int mthca_MODIFY_QP(struct mthca_dev *dev, int trans, u32 num, + int is_ee, struct mthca_mailbox *mailbox, u32 optmask, + u8 *status); +int mthca_QUERY_QP(struct mthca_dev *dev, u32 num, int is_ee, + struct mthca_mailbox *mailbox, u8 *status); +int mthca_CONF_SPECIAL_QP(struct mthca_dev *dev, int type, u32 qpn, + u8 *status); +int mthca_MAD_IFC(struct mthca_dev *dev, int ignore_mkey, int ignore_bkey, + int port, struct _ib_wc *in_wc, struct _ib_grh *in_grh, + void *in_mad, void *response_mad, u8 *status); +int mthca_READ_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status); +int mthca_WRITE_MGM(struct mthca_dev *dev, int index, + struct mthca_mailbox *mailbox, u8 *status); +int mthca_MGID_HASH(struct mthca_dev *dev, struct mthca_mailbox *mailbox, + u16 *hash, u8 *status); +int mthca_NOP(struct mthca_dev *dev, u8 *status); + +#endif /* MTHCA_CMD_H */ diff --git a/branches/Ndi/hw/mthca/kernel/mthca_config_reg.h b/branches/Ndi/hw/mthca/kernel/mthca_config_reg.h new file mode 100644 index 00000000..9ff4a97a --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_config_reg.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MTHCA_CONFIG_REG_H +#define MTHCA_CONFIG_REG_H + +#define MTHCA_HCR_BASE 0x80680 +#define MTHCA_HCR_SIZE 0x0001c +#define MTHCA_ECR_BASE 0x80700 +#define MTHCA_ECR_SIZE 0x00008 +#define MTHCA_ECR_CLR_BASE 0x80708 +#define MTHCA_ECR_CLR_SIZE 0x00008 +#define MTHCA_MAP_ECR_SIZE (MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE) +#define MTHCA_CLR_INT_BASE 0xf00d8 +#define MTHCA_CLR_INT_SIZE 0x00008 +#define MTHCA_EQ_SET_CI_SIZE (8 * 32) + +#endif /* MTHCA_CONFIG_REG_H */ diff --git a/branches/Ndi/hw/mthca/kernel/mthca_cq.c b/branches/Ndi/hw/mthca/kernel/mthca_cq.c new file mode 100644 index 00000000..c64bc0b9 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_cq.c @@ -0,0 +1,963 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_cq.tmh" +#endif +#include "mthca_cmd.h" +#include "mthca_memfree.h" +#include "mthca_wqe.h" + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, mthca_init_cq_table) +#pragma alloc_text (PAGE, mthca_cleanup_cq_table) +#endif + +enum { + MTHCA_MAX_DIRECT_CQ_SIZE = 4 * PAGE_SIZE +}; + +/* + * Must be packed because start is 64 bits but only aligned to 32 bits. + */ +#pragma pack(push,1) +struct mthca_cq_context { + __be32 flags; + __be64 start; + __be32 logsize_usrpage; + __be32 error_eqn; /* Tavor only */ + __be32 comp_eqn; + __be32 pd; + __be32 lkey; + __be32 last_notified_index; + __be32 solicit_producer_index; + __be32 consumer_index; + __be32 producer_index; + __be32 cqn; + __be32 ci_db; /* Arbel only */ + __be32 state_db; /* Arbel only */ + u32 reserved; +}; +#pragma pack(pop) + +#define MTHCA_CQ_STATUS_OK ( 0 << 28) +#define MTHCA_CQ_STATUS_OVERFLOW ( 9 << 28) +#define MTHCA_CQ_STATUS_WRITE_FAIL (10 << 28) +#define MTHCA_CQ_FLAG_TR ( 1 << 18) +#define MTHCA_CQ_FLAG_OI ( 1 << 17) +#define MTHCA_CQ_STATE_DISARMED ( 0 << 8) +#define MTHCA_CQ_STATE_ARMED ( 1 << 8) +#define MTHCA_CQ_STATE_ARMED_SOL ( 4 << 8) +#define MTHCA_EQ_STATE_FIRED (10 << 8) + +enum { + MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe +}; + +enum { + SYNDROME_LOCAL_LENGTH_ERR = 0x01, + SYNDROME_LOCAL_QP_OP_ERR = 0x02, + SYNDROME_LOCAL_EEC_OP_ERR = 0x03, + SYNDROME_LOCAL_PROT_ERR = 0x04, + SYNDROME_WR_FLUSH_ERR = 0x05, + SYNDROME_MW_BIND_ERR = 0x06, + SYNDROME_BAD_RESP_ERR = 0x10, + SYNDROME_LOCAL_ACCESS_ERR = 0x11, + SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, + SYNDROME_REMOTE_ACCESS_ERR = 0x13, + SYNDROME_REMOTE_OP_ERR = 0x14, + SYNDROME_RETRY_EXC_ERR = 0x15, + SYNDROME_RNR_RETRY_EXC_ERR = 0x16, + SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20, + SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21, + SYNDROME_REMOTE_ABORTED_ERR = 0x22, + SYNDROME_INVAL_EECN_ERR = 0x23, + SYNDROME_INVAL_EEC_STATE_ERR = 0x24 +}; + +struct mthca_cqe { + __be32 my_qpn; + __be32 my_ee; + __be32 rqpn; + __be16 sl_g_mlpath; + __be16 rlid; + __be32 imm_etype_pkey_eec; + __be32 byte_cnt; + __be32 wqe; + u8 opcode; + u8 is_send; + u8 reserved; + u8 owner; +}; + +struct mthca_err_cqe { + __be32 my_qpn; + u32 reserved1[3]; + u8 syndrome; + u8 vendor_err; + __be16 db_cnt; + u32 reserved2; + __be32 wqe; + u8 opcode; + u8 reserved3[2]; + u8 owner; +}; + +#define MTHCA_CQ_ENTRY_OWNER_SW (0 << 7) +#define MTHCA_CQ_ENTRY_OWNER_HW (1 << 7) + +#define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24) +#define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24) +#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24) +#define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24) +#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24) + +#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24) +#define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24) +#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24) + +static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) +{ + if (cq->is_direct) + return (struct mthca_cqe *)((u8*)cq->queue.direct.page + (entry * MTHCA_CQ_ENTRY_SIZE)); + else + return (struct mthca_cqe *)((u8*)cq->queue.page_list[entry * MTHCA_CQ_ENTRY_SIZE / PAGE_SIZE].page + + (entry * MTHCA_CQ_ENTRY_SIZE) % PAGE_SIZE); +} + +static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i) +{ + struct mthca_cqe *cqe = get_cqe(cq, i); + return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; +} + +static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) +{ + return cqe_sw(cq, cq->cons_index & cq->ibcq.cqe); +} + +static inline void set_cqe_hw(struct mthca_cqe *cqe) +{ + cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; +} + +static void dump_cqe(u32 print_lvl, struct mthca_dev *dev, void *cqe_ptr) +{ + __be32 *cqe = cqe_ptr; + UNREFERENCED_PARAMETER(dev); + UNUSED_PARAM_WOWPP(print_lvl); + + (void) cqe; /* avoid warning if mthca_dbg compiled away... */ + HCA_PRINT(print_lvl,HCA_DBG_CQ,("CQE contents \n")); + HCA_PRINT(print_lvl,HCA_DBG_CQ,("\t[%2x] %08x %08x %08x %08x\n",0, + cl_ntoh32(cqe[0]), cl_ntoh32(cqe[1]), cl_ntoh32(cqe[2]), cl_ntoh32(cqe[3]))); + HCA_PRINT(print_lvl,HCA_DBG_CQ,("\t[%2x] %08x %08x %08x %08x \n",16, + cl_ntoh32(cqe[4]), cl_ntoh32(cqe[5]), cl_ntoh32(cqe[6]), cl_ntoh32(cqe[7]))); +} + +/* + * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index + * should be correct before calling update_cons_index(). + */ +static inline void update_cons_index(struct mthca_dev *dev, struct mthca_cq *cq, + int incr) +{ + __be32 doorbell[2]; + + if (mthca_is_memfree(dev)) { + *cq->set_ci_db = cl_hton32(cq->cons_index); + wmb(); + } else { + doorbell[0] = cl_hton32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn); + doorbell[1] = cl_hton32(incr - 1); + + mthca_write64(doorbell, + dev->kar + MTHCA_CQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } +} + +void mthca_cq_completion(struct mthca_dev *dev, u32 cqn) +{ + struct mthca_cq *cq; + + cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); + + if (!cq) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Completion event for bogus CQ %08x\n", cqn)); + return; + } + + if (mthca_is_memfree(dev)) { + if (cq->ibcq.ucontext) + ++*cq->p_u_arm_sn; + else + ++cq->arm_sn; + } + + cq->ibcq.comp_handler(&cq->ibcq, cq->ibcq.cq_context); +} + +void mthca_cq_event(struct mthca_dev *dev, u32 cqn, + enum ib_event_type event_type) +{ + struct mthca_cq *cq; + struct ib_event event; + SPIN_LOCK_PREP(lh); + + spin_lock(&dev->cq_table.lock, &lh); + + cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); + + if (cq) + atomic_inc(&cq->refcount); + spin_unlock(&lh); + + if (!cq) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Async event for bogus CQ %08x\n", cqn)); + return; + } + + event.device = &dev->ib_dev; + event.event = event_type; + event.element.cq = &cq->ibcq; + if (cq->ibcq.event_handler) + cq->ibcq.event_handler(&event, cq->ibcq.cq_context); + + if (atomic_dec_and_test(&cq->refcount)) + wake_up(&cq->wait); +} + +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, + struct mthca_srq *srq) +{ + struct mthca_cq *cq; + struct mthca_cqe *cqe; + u32 prod_index; + int nfreed = 0; + SPIN_LOCK_PREP(lht); + SPIN_LOCK_PREP(lh); + + spin_lock_irq(&dev->cq_table.lock, &lht); + cq = mthca_array_get(&dev->cq_table.cq, cqn & (dev->limits.num_cqs - 1)); + if (cq) + atomic_inc(&cq->refcount); + spin_unlock_irq(&lht); + + if (!cq) + return; + + spin_lock_irq(&cq->lock, &lh); + + /* + * First we need to find the current producer index, so we + * know where to start cleaning from. It doesn't matter if HW + * adds new entries after this loop -- the QP we're worried + * about is already in RESET, so the new entries won't come + * from our QP and therefore don't need to be checked. + */ + for (prod_index = cq->cons_index; + cqe_sw(cq, prod_index & cq->ibcq.cqe); + ++prod_index) { + if (prod_index == cq->cons_index + cq->ibcq.cqe) + break; + } + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Cleaning QPN %06x from CQN %06x; ci %d, pi %d\n", + qpn, cqn, cq->cons_index, prod_index)); + + /* + * Now sweep backwards through the CQ, removing CQ entries + * that match our QP by copying older entries on top of them. + */ + while ((int) --prod_index - (int) cq->cons_index >= 0) { + cqe = get_cqe(cq, prod_index & cq->ibcq.cqe); + if (cqe->my_qpn == cl_hton32(qpn)) { + if (srq) + mthca_free_srq_wqe(srq, cl_ntoh32(cqe->wqe)); + ++nfreed; + } + else + if (nfreed) { + memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe), + cqe, MTHCA_CQ_ENTRY_SIZE); + } + } + + if (nfreed) { + wmb(); + cq->cons_index += nfreed; + update_cons_index(dev, cq, nfreed); + } + + spin_unlock_irq(&lh); + if (atomic_dec_and_test(&cq->refcount)) + wake_up(&cq->wait); +} + +static void handle_error_cqe(struct mthca_dev *dev, struct mthca_cq *cq, + struct mthca_qp *qp, int wqe_index, int is_send, + struct mthca_err_cqe *cqe, + struct _ib_wc *entry, int *free_cqe) +{ + int dbd; + __be32 new_wqe; + + UNREFERENCED_PARAMETER(cq); + + if (cqe->syndrome != SYNDROME_WR_FLUSH_ERR) { + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_CQ ,("Completion with errro " + "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n", + cl_ntoh32(cqe->my_qpn), cl_ntoh32(cqe->wqe), + cq->cqn, cq->cons_index)); + dump_cqe(TRACE_LEVEL_INFORMATION, dev, cqe); + } + + + /* + * For completions in error, only work request ID, status, vendor error + * (and freed resource count for RD) have to be set. + */ + switch (cqe->syndrome) { + case SYNDROME_LOCAL_LENGTH_ERR: + entry->status = IB_WCS_LOCAL_LEN_ERR; + break; + case SYNDROME_LOCAL_QP_OP_ERR: + entry->status = IB_WCS_LOCAL_OP_ERR; + break; + case SYNDROME_LOCAL_PROT_ERR: + entry->status = IB_WCS_LOCAL_PROTECTION_ERR; + break; + case SYNDROME_WR_FLUSH_ERR: + entry->status = IB_WCS_WR_FLUSHED_ERR; + break; + case SYNDROME_MW_BIND_ERR: + entry->status = IB_WCS_MEM_WINDOW_BIND_ERR; + break; + case SYNDROME_BAD_RESP_ERR: + entry->status = IB_WCS_BAD_RESP_ERR; + break; + case SYNDROME_LOCAL_ACCESS_ERR: + entry->status = IB_WCS_LOCAL_ACCESS_ERR; + break; + case SYNDROME_REMOTE_INVAL_REQ_ERR: + entry->status = IB_WCS_REM_INVALID_REQ_ERR; + break; + case SYNDROME_REMOTE_ACCESS_ERR: + entry->status = IB_WCS_REM_ACCESS_ERR; + break; + case SYNDROME_REMOTE_OP_ERR: + entry->status = IB_WCS_REM_OP_ERR; + break; + case SYNDROME_RETRY_EXC_ERR: + entry->status = IB_WCS_TIMEOUT_RETRY_ERR; + break; + case SYNDROME_RNR_RETRY_EXC_ERR: + entry->status = IB_WCS_RNR_RETRY_ERR; + break; + case SYNDROME_REMOTE_INVAL_RD_REQ_ERR: + entry->status = IB_WCS_REM_INVALID_REQ_ERR; + break; + case SYNDROME_REMOTE_ABORTED_ERR: + case SYNDROME_LOCAL_EEC_OP_ERR: + case SYNDROME_LOCAL_RDD_VIOL_ERR: + case SYNDROME_INVAL_EECN_ERR: + case SYNDROME_INVAL_EEC_STATE_ERR: + default: + entry->status = IB_WCS_GENERAL_ERR; + break; + } + + entry->vendor_specific = cqe->vendor_err; + + /* + * Mem-free HCAs always generate one CQE per WQE, even in the + * error case, so we don't have to check the doorbell count, etc. + */ + if (mthca_is_memfree(dev)) + return; + + mthca_free_err_wqe(dev, qp, is_send, wqe_index, &dbd, &new_wqe); + + /* + * If we're at the end of the WQE chain, or we've used up our + * doorbell count, free the CQE. Otherwise just update it for + * the next poll operation. + */ + if (!(new_wqe & cl_hton32(0x3f)) || (!cqe->db_cnt && dbd)) + return; + + cqe->db_cnt = cl_hton16(cl_ntoh16(cqe->db_cnt) - (u16)dbd); + cqe->wqe = new_wqe; + cqe->syndrome = SYNDROME_WR_FLUSH_ERR; + + *free_cqe = 0; +} + +static inline int mthca_poll_one(struct mthca_dev *dev, + struct mthca_cq *cq, + struct mthca_qp **cur_qp, + int *freed, + struct _ib_wc *entry) +{ + struct mthca_wq *wq; + struct mthca_cqe *cqe; + unsigned wqe_index; + int is_error; + int is_send; + int free_cqe = 1; + int err = 0; + + HCA_ENTER(HCA_DBG_CQ); + cqe = next_cqe_sw(cq); + if (!cqe) + return -EAGAIN; + + /* + * Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + + { // debug print + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_CQ,("CQ: 0x%06x/%d: CQE -> QPN 0x%06x, WQE @ 0x%08x\n", + cq->cqn, cq->cons_index, cl_ntoh32(cqe->my_qpn), + cl_ntoh32(cqe->wqe))); + dump_cqe(TRACE_LEVEL_VERBOSE, dev, cqe); + } + + is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == + MTHCA_ERROR_CQE_OPCODE_MASK; + is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; + + if (!*cur_qp || cl_ntoh32(cqe->my_qpn) != (*cur_qp)->qpn) { + /* + * We do not have to take the QP table lock here, + * because CQs will be locked while QPs are removed + * from the table. + */ + *cur_qp = mthca_array_get(&dev->qp_table.qp, + cl_ntoh32(cqe->my_qpn) & + (dev->limits.num_qps - 1)); + if (!*cur_qp) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_CQ, ("CQ entry for unknown QP %06x\n", + cl_ntoh32(cqe->my_qpn) & 0xffffff)); + err = -EINVAL; + goto out; + } + } + + if (is_send) { + wq = &(*cur_qp)->sq; + wqe_index = ((cl_ntoh32(cqe->wqe) - (*cur_qp)->send_wqe_offset) + >> wq->wqe_shift); + entry->wr_id = (*cur_qp)->wrid[wqe_index + + (*cur_qp)->rq.max]; + } else if ((*cur_qp)->ibqp.srq) { + struct mthca_srq *srq = to_msrq((*cur_qp)->ibqp.srq); + u32 wqe = cl_ntoh32(cqe->wqe); + wq = NULL; + wqe_index = wqe >> srq->wqe_shift; + entry->wr_id = srq->wrid[wqe_index]; + mthca_free_srq_wqe(srq, wqe); + } else { + wq = &(*cur_qp)->rq; + wqe_index = cl_ntoh32(cqe->wqe) >> wq->wqe_shift; + entry->wr_id = (*cur_qp)->wrid[wqe_index]; + } + + if (wq) { + if (wq->last_comp < wqe_index) + wq->tail += wqe_index - wq->last_comp; + else + wq->tail += wqe_index + wq->max - wq->last_comp; + + wq->last_comp = wqe_index; + } + + if (is_send) { + entry->recv.ud.recv_opt = 0; + switch (cqe->opcode) { + case MTHCA_OPCODE_RDMA_WRITE: + entry->wc_type = IB_WC_RDMA_WRITE; + break; + case MTHCA_OPCODE_RDMA_WRITE_IMM: + entry->wc_type = IB_WC_RDMA_WRITE; + entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE; + break; + case MTHCA_OPCODE_SEND: + entry->wc_type = IB_WC_SEND; + break; + case MTHCA_OPCODE_SEND_IMM: + entry->wc_type = IB_WC_SEND; + entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE; + break; + case MTHCA_OPCODE_RDMA_READ: + entry->wc_type = IB_WC_RDMA_READ; + entry->length = cl_ntoh32(cqe->byte_cnt); + break; + case MTHCA_OPCODE_ATOMIC_CS: + entry->wc_type = IB_WC_COMPARE_SWAP; + entry->length = MTHCA_BYTES_PER_ATOMIC_COMPL; + break; + case MTHCA_OPCODE_ATOMIC_FA: + entry->wc_type = IB_WC_FETCH_ADD; + entry->length = MTHCA_BYTES_PER_ATOMIC_COMPL; + break; + case MTHCA_OPCODE_BIND_MW: + entry->wc_type = IB_WC_MW_BIND; + break; + default: + entry->wc_type = IB_WC_SEND; + break; + } + } else { + entry->length = cl_ntoh32(cqe->byte_cnt); + switch (cqe->opcode & 0x1f) { + case IB_OPCODE_SEND_LAST_WITH_IMMEDIATE: + case IB_OPCODE_SEND_ONLY_WITH_IMMEDIATE: + entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE; + entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec; + entry->wc_type = IB_WC_RECV; + break; + case IB_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: + case IB_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: + entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE; + entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec; + entry->wc_type = IB_WC_RECV_RDMA_WRITE; + break; + default: + entry->recv.ud.recv_opt = 0; + entry->wc_type = IB_WC_RECV; + break; + } + entry->recv.ud.remote_lid = cqe->rlid; + entry->recv.ud.remote_qp = cqe->rqpn & 0xffffff00; + entry->recv.ud.pkey_index = (u16)(cl_ntoh32(cqe->imm_etype_pkey_eec) >> 16); + entry->recv.ud.remote_sl = (uint8_t)(cl_ntoh16(cqe->sl_g_mlpath) >> 12); + entry->recv.ud.path_bits = (uint8_t)(cl_ntoh16(cqe->sl_g_mlpath) & 0x7f); + entry->recv.ud.recv_opt |= cl_ntoh16(cqe->sl_g_mlpath) & 0x80 ? + IB_RECV_OPT_GRH_VALID : 0; + } + if (!is_send && cqe->rlid == 0){ + HCA_PRINT(TRACE_LEVEL_INFORMATION,HCA_DBG_CQ,("found rlid == 0 \n ")); + entry->recv.ud.recv_opt |= IB_RECV_OPT_FORWARD; + + } + if (is_error) { + handle_error_cqe(dev, cq, *cur_qp, wqe_index, is_send, + (struct mthca_err_cqe *) cqe, entry, &free_cqe); + } + else + entry->status = IB_WCS_SUCCESS; + + out: + if (likely(free_cqe)) { + set_cqe_hw(cqe); + ++(*freed); + ++cq->cons_index; + } + HCA_EXIT(HCA_DBG_CQ); + return err; +} + +int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, + struct _ib_wc *entry) +{ + struct mthca_dev *dev = to_mdev(ibcq->device); + struct mthca_cq *cq = to_mcq(ibcq); + struct mthca_qp *qp = NULL; + int err = 0; + int freed = 0; + int npolled; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&cq->lock, &lh); + + for (npolled = 0; npolled < num_entries; ++npolled) { + err = mthca_poll_one(dev, cq, &qp, + &freed, entry + npolled); + if (err) + break; + } + + if (freed) { + wmb(); + update_cons_index(dev, cq, freed); + } + + spin_unlock_irqrestore(&lh); + + return (err == 0 || err == -EAGAIN) ? npolled : err; +} + +int mthca_poll_cq_list( + IN struct ib_cq *ibcq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ) +{ + struct mthca_dev *dev = to_mdev(ibcq->device); + struct mthca_cq *cq = to_mcq(ibcq); + struct mthca_qp *qp = NULL; + int err = 0; + int freed = 0; + ib_wc_t *wc_p, **next_pp; + SPIN_LOCK_PREP(lh); + + HCA_ENTER(HCA_DBG_CQ); + + spin_lock_irqsave(&cq->lock, &lh); + + // loop through CQ + next_pp = pp_done_wclist; + wc_p = *pp_free_wclist; + while( wc_p ) { + // poll one CQE + err = mthca_poll_one(dev, cq, &qp, &freed, wc_p); + if (err) + break; + + // prepare for the next loop + *next_pp = wc_p; + next_pp = &wc_p->p_next; + wc_p = wc_p->p_next; + } + + // prepare the results + *pp_free_wclist = wc_p; /* Set the head of the free list. */ + *next_pp = NULL; /* Clear the tail of the done list. */ + + // update consumer index + if (freed) { + wmb(); + update_cons_index(dev, cq, freed); + } + + spin_unlock_irqrestore(&lh); + HCA_EXIT(HCA_DBG_CQ); + return (err == 0 || err == -EAGAIN)? 0 : err; +} + + +int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify) +{ + __be32 doorbell[2]; + + doorbell[0] = cl_hton32((notify == IB_CQ_SOLICITED ? + MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : + MTHCA_TAVOR_CQ_DB_REQ_NOT) | + to_mcq(cq)->cqn); + doorbell[1] = (__be32) 0xffffffff; + + mthca_write64(doorbell, + to_mdev(cq->device)->kar + MTHCA_CQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&to_mdev(cq->device)->doorbell_lock)); + + return 0; +} + +int mthca_arbel_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify notify) +{ + struct mthca_cq *cq = to_mcq(ibcq); + __be32 doorbell[2]; + u32 sn; + __be32 ci; + + sn = cq->arm_sn & 3; + ci = cl_hton32(cq->cons_index); + + doorbell[0] = ci; + doorbell[1] = cl_hton32((cq->cqn << 8) | (2 << 5) | (sn << 3) | + (notify == IB_CQ_SOLICITED ? 1 : 2)); + + mthca_write_db_rec(doorbell, cq->arm_db); + + /* + * Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + + doorbell[0] = cl_hton32((sn << 28) | + (notify == IB_CQ_SOLICITED ? + MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : + MTHCA_ARBEL_CQ_DB_REQ_NOT) | + cq->cqn); + doorbell[1] = ci; + + mthca_write64(doorbell, + to_mdev(ibcq->device)->kar + MTHCA_CQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->doorbell_lock)); + + return 0; +} + +static void mthca_free_cq_buf(struct mthca_dev *dev, struct mthca_cq *cq) +{ + mthca_buf_free(dev, (cq->ibcq.cqe + 1) * MTHCA_CQ_ENTRY_SIZE, + &cq->queue, cq->is_direct, &cq->mr); +} + +int mthca_init_cq(struct mthca_dev *dev, int nent, + struct mthca_ucontext *ctx, u32 pdn, + struct mthca_cq *cq) +{ + int size = NEXT_PAGE_ALIGN(nent * MTHCA_CQ_ENTRY_SIZE ); + struct mthca_mailbox *mailbox; + struct mthca_cq_context *cq_context; + int err = -ENOMEM; + u8 status; + int i; + SPIN_LOCK_PREP(lh); + + cq->ibcq.cqe = nent - 1; + cq->is_kernel = !ctx; + + cq->cqn = mthca_alloc(&dev->cq_table.alloc); + if (cq->cqn == -1) + return -ENOMEM; + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->cq_table.table, cq->cqn); + if (err) + goto err_out; + + if (cq->is_kernel) { + cq->arm_sn = 1; + + err = -ENOMEM; + + cq->set_ci_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, + cq->cqn, &cq->set_ci_db); + if (cq->set_ci_db_index < 0) + goto err_out_icm; + + cq->arm_db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_CQ_ARM, + cq->cqn, &cq->arm_db); + if (cq->arm_db_index < 0) + goto err_out_ci; + } + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + goto err_out_arm; + + cq_context = mailbox->buf; + + if (cq->is_kernel) { + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_CQ_SIZE, + &cq->queue, &cq->is_direct, + &dev->driver_pd, 1, &cq->mr); + if (err) + goto err_out_mailbox; + + for (i = 0; i < nent; ++i) + set_cqe_hw(get_cqe(cq, i)); + } + + spin_lock_init(&cq->lock); + atomic_set(&cq->refcount, 1); + init_waitqueue_head(&cq->wait); + KeInitializeMutex(&cq->mutex, 0); + + RtlZeroMemory(cq_context, sizeof *cq_context); + cq_context->flags = cl_hton32(MTHCA_CQ_STATUS_OK | + MTHCA_CQ_STATE_DISARMED | + MTHCA_CQ_FLAG_TR); + cq_context->logsize_usrpage = cl_hton32((ffs(nent) - 1) << 24); + if (ctx) + cq_context->logsize_usrpage |= cl_hton32(ctx->uar.index); + else + cq_context->logsize_usrpage |= cl_hton32(dev->driver_uar.index); + cq_context->error_eqn = cl_hton32(dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn); + cq_context->comp_eqn = cl_hton32(dev->eq_table.eq[MTHCA_EQ_COMP].eqn); + cq_context->pd = cl_hton32(pdn); + cq_context->lkey = cl_hton32(cq->mr.ibmr.lkey); + cq_context->cqn = cl_hton32(cq->cqn); + + if (mthca_is_memfree(dev)) { + cq_context->ci_db = cl_hton32(cq->set_ci_db_index); + cq_context->state_db = cl_hton32(cq->arm_db_index); + } + + err = mthca_SW2HW_CQ(dev, mailbox, cq->cqn, &status); + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("SW2HW_CQ failed (%d)\n", err)); + goto err_out_free_mr; + } + + if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_CQ returned status 0x%02x\n", + status)); + err = -EINVAL; + goto err_out_free_mr; + } + + spin_lock_irq(&dev->cq_table.lock, &lh); + if (mthca_array_set(&dev->cq_table.cq, + cq->cqn & (dev->limits.num_cqs - 1), + cq)) { + spin_unlock_irq(&lh); + goto err_out_free_mr; + } + spin_unlock_irq(&lh); + + cq->cons_index = 0; + + mthca_free_mailbox(dev, mailbox); + + return 0; + +err_out_free_mr: + if (cq->is_kernel) + mthca_free_cq_buf(dev, cq); + +err_out_mailbox: + mthca_free_mailbox(dev, mailbox); + +err_out_arm: + if (cq->is_kernel && mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); + +err_out_ci: + if (cq->is_kernel && mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); + +err_out_icm: + mthca_table_put(dev, dev->cq_table.table, cq->cqn); + +err_out: + mthca_free(&dev->cq_table.alloc, cq->cqn); + + return err; +} + +void mthca_free_cq(struct mthca_dev *dev, + struct mthca_cq *cq) +{ + struct mthca_mailbox *mailbox; + int err; + u8 status; + SPIN_LOCK_PREP(lh); + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("No memory for mailbox to free CQ.\n")); + return; + } + + err = mthca_HW2SW_CQ(dev, mailbox, cq->cqn, &status); + if (err){ + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_CQ failed (%d)\n", err)); + } + else if (status){ + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_CQ returned status 0x%02x\n", status)); + } + { // debug print + __be32 *ctx = mailbox->buf; + int j; + UNUSED_PARAM_WOWPP(ctx); + UNUSED_PARAM_WOWPP(j); + + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("context for CQN %x (cons index %x, next sw %d)\n", + cq->cqn, cq->cons_index, + cq->is_kernel ? !!next_cqe_sw(cq) : 0)); + for (j = 0; j < 16; ++j) + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("[%2x] %08x\n", j * 4, cl_ntoh32(ctx[j]))); + } + spin_lock_irq(&dev->cq_table.lock, &lh); + mthca_array_clear(&dev->cq_table.cq, + cq->cqn & (dev->limits.num_cqs - 1)); + spin_unlock_irq(&lh); + + /* wait for all RUNNING DPCs on that EQ to complete */ + ASSERT(KeGetCurrentIrql() == PASSIVE_LEVEL); + KeFlushQueuedDpcs(); + + atomic_dec(&cq->refcount); + wait_event(&cq->wait, !atomic_read(&cq->refcount)); + + if (cq->is_kernel) { + mthca_free_cq_buf(dev, cq); + if (mthca_is_memfree(dev)) { + mthca_free_db(dev, MTHCA_DB_TYPE_CQ_ARM, cq->arm_db_index); + mthca_free_db(dev, MTHCA_DB_TYPE_CQ_SET_CI, cq->set_ci_db_index); + } + } + + mthca_table_put(dev, dev->cq_table.table, cq->cqn); + mthca_free(&dev->cq_table.alloc, cq->cqn); + mthca_free_mailbox(dev, mailbox); +} + +int mthca_init_cq_table(struct mthca_dev *dev) +{ + int err; + + spin_lock_init(&dev->cq_table.lock); + + err = mthca_alloc_init(&dev->cq_table.alloc, + dev->limits.num_cqs, + (1 << 24) - 1, + dev->limits.reserved_cqs); + if (err) + return err; + + err = mthca_array_init(&dev->cq_table.cq, + dev->limits.num_cqs); + if (err) + mthca_alloc_cleanup(&dev->cq_table.alloc); + + return err; +} + +void mthca_cleanup_cq_table(struct mthca_dev *dev) +{ + mthca_array_cleanup(&dev->cq_table.cq, dev->limits.num_cqs); + mthca_alloc_cleanup(&dev->cq_table.alloc); +} + + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_dev.h b/branches/Ndi/hw/mthca/kernel/mthca_dev.h new file mode 100644 index 00000000..86153f0e --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_dev.h @@ -0,0 +1,605 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * Copyright (c) 2006 SilverStorm Technologies, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MTHCA_DEV_H +#define MTHCA_DEV_H + +#include "hca_driver.h" +#include "mthca_log.h" +#include "mthca_provider.h" +#include "mthca_doorbell.h" + +// must be synchronized with MTHCA.INF +#define DRV_NAME "mthca" +#define PFX DRV_NAME ": " +#define DRV_VERSION "1.0.0000.614" +#define DRV_RELDATE "08/03/2006" + +#define HZ 1000000 /* 1 sec in usecs */ + +enum { + MTHCA_FLAG_DDR_HIDDEN = 1 << 1, + MTHCA_FLAG_SRQ = 1 << 2, + MTHCA_FLAG_MSI = 1 << 3, + MTHCA_FLAG_MSI_X = 1 << 4, + MTHCA_FLAG_NO_LAM = 1 << 5, + MTHCA_FLAG_FMR = 1 << 6, + MTHCA_FLAG_MEMFREE = 1 << 7, + MTHCA_FLAG_PCIE = 1 << 8, + MTHCA_FLAG_SINAI_OPT = 1 << 9, + MTHCA_FLAG_LIVEFISH = 1 << 10 +}; + +enum { + MTHCA_MAX_PORTS = 2 +}; + +enum { + MTHCA_BOARD_ID_LEN = 64 +}; + +enum { + MTHCA_EQ_CONTEXT_SIZE = 0x40, + MTHCA_CQ_CONTEXT_SIZE = 0x40, + MTHCA_QP_CONTEXT_SIZE = 0x200, + MTHCA_RDB_ENTRY_SIZE = 0x20, + MTHCA_AV_SIZE = 0x20, + MTHCA_MGM_ENTRY_SIZE = 0x40, + + /* Arbel FW gives us these, but we need them for Tavor */ + MTHCA_MPT_ENTRY_SIZE = 0x40, + MTHCA_MTT_SEG_SIZE = 0x40, + + MTHCA_QP_PER_MGM = 4 * (MTHCA_MGM_ENTRY_SIZE / 16 - 2) +}; + +enum { + MTHCA_EQ_CMD, + MTHCA_EQ_ASYNC, + MTHCA_EQ_COMP, + MTHCA_NUM_EQ +}; + +enum { + MTHCA_BYTES_PER_ATOMIC_COMPL = 8 +}; + +enum mthca_wr_opcode{ + MTHCA_OPCODE_NOP = 0x00, + MTHCA_OPCODE_RDMA_WRITE = 0x08, + MTHCA_OPCODE_RDMA_WRITE_IMM = 0x09, + MTHCA_OPCODE_SEND = 0x0a, + MTHCA_OPCODE_SEND_IMM = 0x0b, + MTHCA_OPCODE_RDMA_READ = 0x10, + MTHCA_OPCODE_ATOMIC_CS = 0x11, + MTHCA_OPCODE_ATOMIC_FA = 0x12, + MTHCA_OPCODE_BIND_MW = 0x18, + MTHCA_OPCODE_INVALID = 0xff +}; + +struct mthca_cmd { + struct pci_pool *pool; + int use_events; + KMUTEX hcr_mutex; + KSEMAPHORE poll_sem; + KSEMAPHORE event_sem; + int max_cmds; + spinlock_t context_lock; + int free_head; + struct mthca_cmd_context *context; + u16 token_mask; +}; + +struct mthca_limits { + int num_ports; + int vl_cap; + int mtu_cap; + int gid_table_len; + int pkey_table_len; + int local_ca_ack_delay; + int num_uars; + int max_sg; + int num_qps; + int max_wqes; + int max_desc_sz; + int max_qp_init_rdma; + int reserved_qps; + int num_srqs; + int max_srq_wqes; + int max_srq_sge; + int reserved_srqs; + int num_eecs; + int reserved_eecs; + int num_cqs; + int max_cqes; + int reserved_cqs; + int num_eqs; + int reserved_eqs; + int num_mpts; + int num_mtt_segs; + int fmr_reserved_mtts; + int reserved_mtts; + int reserved_mrws; + int reserved_uars; + int num_mgms; + int num_amgms; + int reserved_mcgs; + int num_pds; + int reserved_pds; + u32 page_size_cap; + u32 flags; + u8 port_width_cap; +}; + +struct mthca_alloc { + u32 last; + u32 top; + u32 max; + u32 mask; + spinlock_t lock; + unsigned long *table; +}; + +struct mthca_array { + struct { + void **page; + int used; + } *page_list; +}; + +struct mthca_uar_table { + struct mthca_alloc alloc; + u64 uarc_base; + int uarc_size; +}; + +struct mthca_pd_table { + struct mthca_alloc alloc; +}; + +struct mthca_buddy { + unsigned long **bits; + int max_order; + spinlock_t lock; +}; + +struct mthca_mr_table { + struct mthca_alloc mpt_alloc; + struct mthca_buddy mtt_buddy; + struct mthca_buddy *fmr_mtt_buddy; + u64 mtt_base; + u64 mpt_base; + struct mthca_icm_table *mtt_table; + struct mthca_icm_table *mpt_table; + struct { + void __iomem *mpt_base; + SIZE_T mpt_base_size; + void __iomem *mtt_base; + SIZE_T mtt_base_size; + struct mthca_buddy mtt_buddy; + } tavor_fmr; +}; + +struct mthca_eq_table { + struct mthca_alloc alloc; + void __iomem *clr_int; + u32 clr_mask; + u32 arm_mask; + struct mthca_eq eq[MTHCA_NUM_EQ]; + u64 icm_virt; + struct scatterlist sg; + int have_irq; + u8 inta_pin; + KLOCK_QUEUE_HANDLE lockh; +}; + +struct mthca_cq_table { + struct mthca_alloc alloc; + spinlock_t lock; + struct mthca_array cq; + struct mthca_icm_table *table; +}; + +struct mthca_srq_table { + struct mthca_alloc alloc; + spinlock_t lock; + struct mthca_array srq; + struct mthca_icm_table *table; +}; + +struct mthca_qp_table { + struct mthca_alloc alloc; + u32 rdb_base; + int rdb_shift; + int sqp_start; + spinlock_t lock; + struct mthca_array qp; + struct mthca_icm_table *qp_table; + struct mthca_icm_table *eqp_table; + struct mthca_icm_table *rdb_table; +}; + +struct mthca_av_table { + struct pci_pool *pool; + int num_ddr_avs; + u64 ddr_av_base; + void __iomem *av_map; + SIZE_T av_map_size; + struct mthca_alloc alloc; +}; + +struct mthca_mcg_table { + KMUTEX mutex; + struct mthca_alloc alloc; + struct mthca_icm_table *table; +}; + +struct mthca_catas_err { + u64 addr; + u32 __iomem *map; + SIZE_T map_size; + unsigned long stop; + u32 size; + KTIMER timer; + KDPC timer_dpc; + LARGE_INTEGER interval; +}; + +struct mthca_dev { + struct ib_device ib_dev; + hca_dev_ext_t *ext; + uplink_info_t uplink_info; + volatile long dpc_lock; + + int hca_type; + unsigned long mthca_flags; + unsigned long device_cap_flags; + + u32 rev_id; + char board_id[MTHCA_BOARD_ID_LEN]; + + /* firmware info */ + u64 fw_ver; + union { + struct { + u64 fw_start; + u64 fw_end; + } tavor; + struct { + u64 clr_int_base; + u64 eq_arm_base; + u64 eq_set_ci_base; + struct mthca_icm *fw_icm; + struct mthca_icm *aux_icm; + u16 fw_pages; + } arbel; + } fw; + + u64 ddr_start; + u64 ddr_end; + + MTHCA_DECLARE_DOORBELL_LOCK(doorbell_lock) + KMUTEX cap_mask_mutex; + + u8 __iomem *hcr; + SIZE_T hcr_size; + u8 __iomem *kar; + SIZE_T kar_size; + u8 __iomem *clr_base; + SIZE_T clr_base_size; + union { + struct { + void __iomem *ecr_base; + SIZE_T ecr_base_size; + } tavor; + struct { + void __iomem *eq_arm; + SIZE_T eq_arm_size; + void __iomem *eq_set_ci_base; + SIZE_T eq_set_ci_base_size; + } arbel; + } eq_regs; + + struct mthca_cmd cmd; + struct mthca_limits limits; + + struct mthca_uar_table uar_table; + struct mthca_pd_table pd_table; + struct mthca_mr_table mr_table; + struct mthca_eq_table eq_table; + struct mthca_cq_table cq_table; + struct mthca_srq_table srq_table; + struct mthca_qp_table qp_table; + struct mthca_av_table av_table; + struct mthca_mcg_table mcg_table; + struct mthca_catas_err catas_err; + struct mthca_uar driver_uar; + struct mthca_db_table *db_tab; + struct mthca_pd driver_pd; + struct mthca_mr driver_mr; + + struct ib_mad_agent *send_agent[MTHCA_MAX_PORTS][2]; + struct ib_ah *sm_ah[MTHCA_MAX_PORTS]; + spinlock_t sm_lock; + u32 state; +}; + +// mthca_dev states +enum { + MTHCA_DEV_UNINITIALIZED, + MTHCA_DEV_INITIALIZED, + MTHCA_DEV_FAILED +}; + +enum { + MTHCA_CQ_ENTRY_SIZE = 0x20 +}; + + + +#define MTHCA_GET(dest, source, offset) \ + { \ + void *__p = (char *) (source) + (offset); \ + void *__q = &(dest); \ + switch (sizeof (dest)) { \ + case 1: *(u8 *)__q = *(u8 *) __p; break; \ + case 2: *(u16 *)__q = (u16)cl_ntoh16(*(u16 *)__p); break; \ + case 4: *(u32 *)__q = (u32)cl_ntoh32(*(u32 *)__p); break; \ + case 8: *(u64 *)__q = (u64)cl_ntoh64(*(u64 *)__p); break; \ + default: ASSERT(0); \ + } \ + } + + +#define MTHCA_PUT(dest, source, offset) \ + { \ + void *__d = ((char *) (dest) + (offset)); \ + switch (sizeof(source)) { \ + case 1: *(u8 *) __d = (u8)(source); break; \ + case 2: *(__be16 *) __d = cl_hton16((u16)source); break; \ + case 4: *(__be32 *) __d = cl_hton32((u32)source); break; \ + case 8: *(__be64 *) __d = cl_hton64((u64)source); break; \ + default: ASSERT(0); \ + } \ + } + +NTSTATUS mthca_reset(struct mthca_dev *mdev); + +u32 mthca_alloc(struct mthca_alloc *alloc); +void mthca_free(struct mthca_alloc *alloc, u32 obj); +int mthca_alloc_init(struct mthca_alloc *alloc, u32 num, u32 mask, + u32 reserved); +void mthca_alloc_cleanup(struct mthca_alloc *alloc); +void *mthca_array_get(struct mthca_array *array, int index); +int mthca_array_set(struct mthca_array *array, int index, void *value); +void mthca_array_clear(struct mthca_array *array, int index); +int mthca_array_init(struct mthca_array *array, int nent); +void mthca_array_cleanup(struct mthca_array *array, int nent); +int mthca_buf_alloc(struct mthca_dev *dev, int size, int max_direct, + union mthca_buf *buf, int *is_direct, struct mthca_pd *pd, + int hca_write, struct mthca_mr *mr); +void mthca_buf_free(struct mthca_dev *dev, int size, union mthca_buf *buf, + int is_direct, struct mthca_mr *mr); + +int mthca_init_uar_table(struct mthca_dev *dev); +int mthca_init_pd_table(struct mthca_dev *dev); +int mthca_init_mr_table(struct mthca_dev *dev); +int mthca_init_eq_table(struct mthca_dev *dev); +int mthca_init_cq_table(struct mthca_dev *dev); +int mthca_init_srq_table(struct mthca_dev *dev); +int mthca_init_qp_table(struct mthca_dev *dev); +int mthca_init_av_table(struct mthca_dev *dev); +int mthca_init_mcg_table(struct mthca_dev *dev); + +void mthca_cleanup_uar_table(struct mthca_dev *dev); +void mthca_cleanup_pd_table(struct mthca_dev *dev); +void mthca_cleanup_mr_table(struct mthca_dev *dev); +void mthca_cleanup_eq_table(struct mthca_dev *dev); +void mthca_cleanup_cq_table(struct mthca_dev *dev); +void mthca_cleanup_srq_table(struct mthca_dev *dev); +void mthca_cleanup_qp_table(struct mthca_dev *dev); +void mthca_cleanup_av_table(struct mthca_dev *dev); +void mthca_cleanup_mcg_table(struct mthca_dev *dev); + +int mthca_register_device(struct mthca_dev *dev); +void mthca_unregister_device(struct mthca_dev *dev); + +void mthca_start_catas_poll(struct mthca_dev *dev); +void mthca_stop_catas_poll(struct mthca_dev *dev); + +int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar); +void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar); + +int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd); +void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd); + +struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size); +void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt); +int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, + int start_index, u64 *buffer_list, int list_len); +int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, + u64 iova, u64 total_size, mthca_mpt_access_t access, struct mthca_mr *mr); +int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, + mthca_mpt_access_t access, struct mthca_mr *mr); +int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, + u64 *buffer_list, int buffer_size_shift, + int list_len, u64 iova, u64 total_size, + mthca_mpt_access_t access, struct mthca_mr *mr); +void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr); + +int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, + mthca_mpt_access_t access, struct mthca_fmr *fmr); +int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova); +void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr); +int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova); +void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr); +int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr); + +int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt); +void mthca_unmap_eq_icm(struct mthca_dev *dev); + +int mthca_poll_cq(struct ib_cq *ibcq, int num_entries, + struct _ib_wc *entry); +int mthca_tavor_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); +int mthca_arbel_arm_cq(struct ib_cq *cq, enum ib_cq_notify notify); +int mthca_init_cq(struct mthca_dev *dev, int nent, + struct mthca_ucontext *ctx, u32 pdn, + struct mthca_cq *cq); +void mthca_free_cq(struct mthca_dev *dev, + struct mthca_cq *cq); +void mthca_cq_completion(struct mthca_dev *dev, u32 cqn); +void mthca_cq_event(struct mthca_dev *dev, u32 cqn, + enum ib_event_type event_type); +void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn, + struct mthca_srq *srq); + +int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, + ib_srq_attr_t *attr, struct mthca_srq *srq); +void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq); +int mthca_modify_srq(struct ib_srq *ibsrq, ib_srq_attr_t *attr, + ib_srq_attr_mask_t attr_mask); +void mthca_srq_event(struct mthca_dev *dev, u32 srqn, + enum ib_event_type event_type, u8 vendor_code); +void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr); +int mthca_tavor_post_srq_recv(struct ib_srq *srq, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); +int mthca_arbel_post_srq_recv(struct ib_srq *srq, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); + +void mthca_qp_event(struct mthca_dev *dev, u32 qpn, + enum ib_event_type event_type, u8 vendor_code); +int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask); +int mthca_tavor_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr); +int mthca_tavor_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); +int mthca_arbel_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr); +int mthca_arbel_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); +void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, + int index, int *dbd, __be32 *new_wqe); +int mthca_alloc_qp(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_qp_type_t type, + enum ib_sig_type send_policy, + struct ib_qp_cap *cap, + struct mthca_qp *qp); +int mthca_alloc_sqp(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_sig_type send_policy, + struct ib_qp_cap *cap, + int qpn, + int port, + struct mthca_sqp *sqp); +void mthca_free_qp(struct mthca_dev *dev, struct mthca_qp *qp); +int mthca_create_ah(struct mthca_dev *dev, + struct mthca_pd *pd, + struct ib_ah_attr *ah_attr, + struct mthca_ah *ah); +int mthca_destroy_ah(struct mthca_dev *dev, struct mthca_ah *ah); +int mthca_read_ah(struct mthca_dev *dev, struct mthca_ah *ah, + struct ib_ud_header *header); + +int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); +int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid); + +int mthca_process_mad(struct ib_device *ibdev, + int mad_flags, + u8 port_num, + struct _ib_wc *in_wc, + struct _ib_grh *in_grh, + struct ib_mad *in_mad, + struct ib_mad *out_mad); + +static inline struct mthca_dev *to_mdev(struct ib_device *ibdev) +{ + return container_of(ibdev, struct mthca_dev, ib_dev); +} + +static inline int mthca_is_memfree(struct mthca_dev *dev) +{ + return dev->mthca_flags & MTHCA_FLAG_MEMFREE; +} + +VOID +WriteEventLogEntry( + PVOID pi_pIoObject, + ULONG pi_ErrorCode, + ULONG pi_UniqueErrorCode, + ULONG pi_FinalStatus, + ULONG pi_nDataItems, + ... + ); + +VOID +WriteEventLogEntryStr( + PVOID pi_pIoObject, + ULONG pi_ErrorCode, + ULONG pi_UniqueErrorCode, + ULONG pi_FinalStatus, + PWCHAR pi_InsertionStr, + ULONG pi_nDataItems, + ... + ); + + +static inline int mthca_is_livefish(struct mthca_dev *mdev) +{ + return mdev->mthca_flags & MTHCA_FLAG_LIVEFISH; +} + +void mthca_get_av_params( struct mthca_ah *ah_p, u8 *port_num, __be16 *dlid, u8 *sr, u8 *path_bits ); + +void mthca_set_av_params( struct mthca_dev *dev, struct mthca_ah *ah_p, struct ib_ah_attr *ah_attr ); + +int ib_uverbs_init(void); +void ib_uverbs_cleanup(void); +int mthca_ah_grh_present(struct mthca_ah *ah); + +int mthca_max_srq_sge(struct mthca_dev *dev); + + +#endif /* MTHCA_DEV_H */ diff --git a/branches/Ndi/hw/mthca/kernel/mthca_doorbell.h b/branches/Ndi/hw/mthca/kernel/mthca_doorbell.h new file mode 100644 index 00000000..92cfb3fd --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_doorbell.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +enum { + MTHCA_SEND_DOORBELL_FENCE = 1 << 5 +}; + +#ifdef _WIN64 +/* + * Assume that we can just write a 64-bit doorbell atomically. s390 + * actually doesn't have writeq() but S/390 systems don't even have + * PCI so we won't worry about it. + */ + + + +#define MTHCA_DECLARE_DOORBELL_LOCK(name) +#define MTHCA_INIT_DOORBELL_LOCK(ptr) +#define MTHCA_GET_DOORBELL_LOCK(ptr) (NULL) + +static inline void mthca_write64_raw(__be64 val, void __iomem *dest) +{ + __raw_writeq((u64) val, dest); +} + +static inline void mthca_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + UNUSED_PARAM(doorbell_lock); + *(volatile u64 *)dest = *(volatile u64 *)val; +} + +static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) +{ + *(volatile u64 *) db = *(volatile u64 *) val; +} + +#else + +/* + * Just fall back to a spinlock to protect the doorbell if + * BITS_PER_LONG is 32 -- there's no portable way to do atomic 64-bit + * MMIO writes. + */ + +#define MTHCA_DECLARE_DOORBELL_LOCK(name) spinlock_t name; +#define MTHCA_INIT_DOORBELL_LOCK(ptr) spin_lock_init(ptr) +#define MTHCA_GET_DOORBELL_LOCK(ptr) (ptr) + +static inline void mthca_write64_raw(__be64 val, void __iomem *dest) +{ + __raw_writel(((u32 *) &val)[0], dest); + __raw_writel(((u32 *) &val)[1], (u8*)dest + 4); +} + +static inline void mthca_write64(__be32 val[2], void __iomem *dest, + spinlock_t *doorbell_lock) +{ + SPIN_LOCK_PREP(lh); + spin_lock_irqsave(doorbell_lock, &lh); + __raw_writel((u32) val[0], dest); + __raw_writel((u32) val[1], (u8*)dest + 4); + spin_unlock_irqrestore(&lh); +} + +static inline void mthca_write_db_rec(__be32 val[2], __be32 *db) +{ + db[0] = val[0]; + wmb(); + db[1] = val[1]; +} + +#endif diff --git a/branches/Ndi/hw/mthca/kernel/mthca_eq.c b/branches/Ndi/hw/mthca/kernel/mthca_eq.c new file mode 100644 index 00000000..111dea71 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_eq.c @@ -0,0 +1,1106 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_eq.tmh" +#endif +#include "mthca_cmd.h" +#include "mthca_config_reg.h" +#include "mthca_wqe.h" + +static int mthca_map_reg(struct mthca_dev *dev, + u64 offset, unsigned long size, + void __iomem **map, SIZE_T *map_size); +static int mthca_map_eq_regs(struct mthca_dev *dev); +static void mthca_unmap_eq_regs(struct mthca_dev *dev); +static int mthca_create_eq(struct mthca_dev *dev, + int nent, + u8 intr, + struct mthca_eq *eq); + + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, mthca_map_reg) +#pragma alloc_text (PAGE, mthca_map_eq_regs) +#pragma alloc_text (PAGE, mthca_init_eq_table) +#pragma alloc_text (PAGE, mthca_unmap_eq_regs) +#pragma alloc_text (PAGE, mthca_map_eq_icm) +#pragma alloc_text (PAGE, mthca_unmap_eq_icm) +#pragma alloc_text (PAGE, mthca_create_eq) +#pragma alloc_text (PAGE, mthca_cleanup_eq_table) +#endif + +enum { + MTHCA_NUM_ASYNC_EQE = 0x80, + MTHCA_NUM_CMD_EQE = 0x80, + MTHCA_NUM_SPARE_EQE = 0x80, + MTHCA_EQ_ENTRY_SIZE = 0x20 +}; + +/* + * Must be packed because start is 64 bits but only aligned to 32 bits. + */ +#pragma pack(push,1) +struct mthca_eq_context { + __be32 flags; + __be64 start; + __be32 logsize_usrpage; + __be32 tavor_pd; /* reserved for Arbel */ + u8 reserved1[3]; + u8 intr; + __be32 arbel_pd; /* lost_count for Tavor */ + __be32 lkey; + u32 reserved2[2]; + __be32 consumer_index; + __be32 producer_index; + u32 reserved3[4]; +}; +#pragma pack(pop) + +#define MTHCA_EQ_STATUS_OK ( 0 << 28) +#define MTHCA_EQ_STATUS_OVERFLOW ( 9 << 28) +#define MTHCA_EQ_STATUS_WRITE_FAIL (10 << 28) +#define MTHCA_EQ_OWNER_SW ( 0 << 24) +#define MTHCA_EQ_OWNER_HW ( 1 << 24) +#define MTHCA_EQ_FLAG_TR ( 1 << 18) +#define MTHCA_EQ_FLAG_OI ( 1 << 17) +#define MTHCA_EQ_STATE_ARMED ( 1 << 8) +#define MTHCA_EQ_STATE_FIRED ( 2 << 8) +#define MTHCA_EQ_STATE_ALWAYS_ARMED ( 3 << 8) +#define MTHCA_EQ_STATE_ARBEL ( 8 << 8) + +enum { + MTHCA_EVENT_TYPE_COMP = 0x00, + MTHCA_EVENT_TYPE_PATH_MIG = 0x01, + MTHCA_EVENT_TYPE_COMM_EST = 0x02, + MTHCA_EVENT_TYPE_SQ_DRAINED = 0x03, + MTHCA_EVENT_TYPE_CQ_ERROR = 0x04, + MTHCA_EVENT_TYPE_WQ_CATAS_ERROR = 0x05, + MTHCA_EVENT_TYPE_EEC_CATAS_ERROR = 0x06, + MTHCA_EVENT_TYPE_PATH_MIG_FAILED = 0x07, + MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR = 0x08, + MTHCA_EVENT_TYPE_PORT_CHANGE = 0x09, + MTHCA_EVENT_TYPE_CMD = 0x0a, + MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR = 0x10, + MTHCA_EVENT_TYPE_ECC_DETECT = 0x0e, + MTHCA_EVENT_TYPE_EQ_OVERFLOW = 0x0f, + MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR = 0x11, + MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR = 0x12, + MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE = 0x13, + MTHCA_EVENT_TYPE_SRQ_LIMIT = 0x14 +}; + +#define MTHCA_ASYNC_EVENT_MASK ((1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG) | \ + (1Ui64 << MTHCA_EVENT_TYPE_COMM_EST) | \ + (1Ui64 << MTHCA_EVENT_TYPE_SQ_DRAINED) | \ + (1Ui64 << MTHCA_EVENT_TYPE_CQ_ERROR) | \ + (1Ui64 << MTHCA_EVENT_TYPE_WQ_CATAS_ERROR) | \ + (1Ui64 << MTHCA_EVENT_TYPE_EEC_CATAS_ERROR) | \ + (1Ui64 << MTHCA_EVENT_TYPE_PATH_MIG_FAILED) | \ + (1Ui64 << MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR) | \ + (1Ui64 << MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR) | \ + (1Ui64 << MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR) | \ + (1Ui64 << MTHCA_EVENT_TYPE_PORT_CHANGE) | \ + (1Ui64 << MTHCA_EVENT_TYPE_ECC_DETECT)) +#define MTHCA_SRQ_EVENT_MASK ((1Ui64 << MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR) | \ + (1Ui64 << MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE) | \ + (1Ui64 << MTHCA_EVENT_TYPE_SRQ_LIMIT)) + +#define MTHCA_CMD_EVENT_MASK (1Ui64 << MTHCA_EVENT_TYPE_CMD) + +#define MTHCA_EQ_DB_INC_CI (1 << 24) +#define MTHCA_EQ_DB_REQ_NOT (2 << 24) +#define MTHCA_EQ_DB_DISARM_CQ (3 << 24) +#define MTHCA_EQ_DB_SET_CI (4 << 24) +#define MTHCA_EQ_DB_ALWAYS_ARM (5 << 24) + +#pragma pack(push,1) +struct mthca_eqe { + u8 reserved1; + u8 type; + u8 reserved2; + u8 subtype; + union { + u32 raw[6]; + struct { + __be32 cqn; + } comp; + struct { + u16 reserved1; + __be16 token; + u32 reserved2; + u8 reserved3[3]; + u8 status; + __be64 out_param; + } cmd; + struct { + __be32 qpn; + u32 reserved1; + u32 reserved2; + u8 reserved3[1]; + u8 vendor_code; + u8 reserved4[2]; + } qp; + struct { + __be32 srqn; + u32 reserved1; + u32 reserved2; + u8 reserved3[1]; + u8 vendor_code; + u8 reserved4[2]; + } srq; + struct { + __be32 cqn; + u32 reserved1; + u8 reserved2[3]; + u8 syndrome; + } cq_err; + struct { + u32 reserved1[2]; + __be32 port; + } port_change; + } event; + u8 reserved3[3]; + u8 owner; +} ; +#pragma pack(pop) + +#define MTHCA_EQ_ENTRY_OWNER_SW (0 << 7) +#define MTHCA_EQ_ENTRY_OWNER_HW (1 << 7) + +static inline u64 async_mask(struct mthca_dev *dev) +{ + return dev->mthca_flags & MTHCA_FLAG_SRQ ? + MTHCA_ASYNC_EVENT_MASK | MTHCA_SRQ_EVENT_MASK : + MTHCA_ASYNC_EVENT_MASK; +} + +static inline void tavor_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) +{ + __be32 doorbell[2]; + + doorbell[0] = cl_hton32(MTHCA_EQ_DB_SET_CI | eq->eqn); + doorbell[1] = cl_hton32(ci & (eq->nent - 1)); + + /* + * This barrier makes sure that all updates to ownership bits + * done by set_eqe_hw() hit memory before the consumer index + * is updated. set_eq_ci() allows the HCA to possibly write + * more EQ entries, and we want to avoid the exceedingly + * unlikely possibility of the HCA writing an entry and then + * having set_eqe_hw() overwrite the owner field. + */ + wmb(); + mthca_write64(doorbell, + dev->kar + MTHCA_EQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); +} + +static inline void arbel_set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) +{ + /* See comment in tavor_set_eq_ci() above. */ + wmb(); + __raw_writel((u32) cl_hton32(ci), + (u8*)dev->eq_regs.arbel.eq_set_ci_base + eq->eqn * 8); + /* We still want ordering, just not swabbing, so add a barrier */ + mb(); +} + +static inline void set_eq_ci(struct mthca_dev *dev, struct mthca_eq *eq, u32 ci) +{ + if (mthca_is_memfree(dev)) + arbel_set_eq_ci(dev, eq, ci); + else + tavor_set_eq_ci(dev, eq, ci); +} + +static inline void tavor_eq_req_not(struct mthca_dev *dev, int eqn) +{ + __be32 doorbell[2]; + + doorbell[0] = cl_hton32(MTHCA_EQ_DB_REQ_NOT | eqn); + doorbell[1] = 0; + + mthca_write64(doorbell, + dev->kar + MTHCA_EQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); +} + +static inline void arbel_eq_req_not(struct mthca_dev *dev, u32 eqn_mask) +{ + writel(eqn_mask, dev->eq_regs.arbel.eq_arm); +} + +static inline void disarm_cq(struct mthca_dev *dev, int eqn, int cqn) +{ + if (!mthca_is_memfree(dev)) { + __be32 doorbell[2]; + + doorbell[0] = cl_hton32(MTHCA_EQ_DB_DISARM_CQ | eqn); + doorbell[1] = cl_hton32(cqn); + + mthca_write64(doorbell, + dev->kar + MTHCA_EQ_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } +} + +static inline struct mthca_eqe *get_eqe(struct mthca_eq *eq, u32 entry) +{ + unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; + return (struct mthca_eqe *)((u8*)eq->page_list[off / PAGE_SIZE].page + off % PAGE_SIZE); +} + +static inline struct mthca_eqe* next_eqe_sw(struct mthca_eq *eq) +{ + struct mthca_eqe* eqe; + eqe = get_eqe(eq, eq->cons_index); + return (MTHCA_EQ_ENTRY_OWNER_HW & eqe->owner) ? NULL : eqe; +} + +static inline void set_eqe_hw(struct mthca_eqe *eqe) +{ + eqe->owner = MTHCA_EQ_ENTRY_OWNER_HW; +} + +static void port_change(struct mthca_dev *dev, int port, int active) +{ + struct ib_event record; + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Port change to %s for port %d\n", + active ? "active" : "down", port)); + + record.device = &dev->ib_dev; + record.event = active ? IB_EVENT_PORT_ACTIVE : IB_EVENT_PORT_ERR; + record.element.port_num = (u8)port; + // Gen2 ib_core mechanism + ib_dispatch_event(&record); + // our callback + ca_event_handler( &record, &dev->ext->hca.hob ); +} + +static int mthca_eq_int(struct mthca_dev *dev, struct mthca_eq *eq) +{ + int disarm_cqn; + int eqes_found = 0; + int set_ci = 0; + struct mthca_eqe *eqe = next_eqe_sw(eq); + uint64_t start = cl_get_time_stamp(); + int loops = 0; + + while (eqe) { + + /* + * Make sure we read EQ entry contents after we've + * checked the ownership bit. + */ + rmb(); + + switch (eqe->type) { + case MTHCA_EVENT_TYPE_COMP: + disarm_cqn = cl_ntoh32(eqe->event.comp.cqn) & 0xffffff; + disarm_cq(dev, eq->eqn, disarm_cqn); + mthca_cq_completion(dev, disarm_cqn); + break; + + case MTHCA_EVENT_TYPE_PATH_MIG: + mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_PATH_MIG, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_COMM_EST: + mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_COMM_EST, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_SQ_DRAINED: + mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_SQ_DRAINED, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_SRQ_QP_LAST_WQE: + mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_SRQ_QP_LAST_WQE_REACHED, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_SRQ_CATAS_ERROR: + mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff, + IB_EVENT_SRQ_LIMIT_REACHED, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_SRQ_LIMIT: + mthca_srq_event(dev, cl_ntoh32(eqe->event.srq.srqn) & 0xffffff, + IB_EVENT_SRQ_LIMIT_REACHED, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_WQ_CATAS_ERROR: + mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_QP_FATAL, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_PATH_MIG_FAILED: + mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_PATH_MIG_ERR, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_WQ_INVAL_REQ_ERROR: + mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_QP_REQ_ERR, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_WQ_ACCESS_ERROR: + mthca_qp_event(dev, cl_ntoh32(eqe->event.qp.qpn) & 0xffffff, + IB_EVENT_QP_ACCESS_ERR, eqe->event.qp.vendor_code); + break; + + case MTHCA_EVENT_TYPE_CMD: + mthca_cmd_event(dev, + cl_ntoh16(eqe->event.cmd.token), + eqe->event.cmd.status, + cl_ntoh64(eqe->event.cmd.out_param)); + break; + + case MTHCA_EVENT_TYPE_PORT_CHANGE: + port_change(dev, + (cl_ntoh32(eqe->event.port_change.port) >> 28) & 3, + eqe->subtype == 0x4); + break; + + case MTHCA_EVENT_TYPE_CQ_ERROR: + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("CQ %s on CQN %06x (syndrome %d)\n", + eqe->event.cq_err.syndrome == 1 ? + "overrun" : "access violation", + cl_ntoh32(eqe->event.cq_err.cqn) & 0xffffff, eqe->event.cq_err.syndrome)); + mthca_cq_event(dev, cl_ntoh32(eqe->event.cq_err.cqn), + IB_EVENT_CQ_ERR); + break; + + case MTHCA_EVENT_TYPE_EQ_OVERFLOW: + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("EQ overrun on EQN %d\n", eq->eqn)); + break; + + case MTHCA_EVENT_TYPE_EEC_CATAS_ERROR: + case MTHCA_EVENT_TYPE_LOCAL_CATAS_ERROR: + case MTHCA_EVENT_TYPE_ECC_DETECT: + default: + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW, ("Unhandled event %02x(%02x) on EQ %d\n", + eqe->type, eqe->subtype, eq->eqn)); + break; + }; + + set_eqe_hw(eqe); + ++eq->cons_index; + eqes_found += 1; + ++set_ci; + + /* + * The HCA will think the queue has overflowed if we + * don't tell it we've been processing events. We + * create our EQs with MTHCA_NUM_SPARE_EQE extra + * entries, so we must update our consumer index at + * least that often. + */ + if (unlikely(set_ci >= MTHCA_NUM_SPARE_EQE)) { + /* + * Conditional on hca_type is OK here because + * this is a rare case, not the fast path. + */ + set_eq_ci(dev, eq, eq->cons_index); + set_ci = 0; + } + loops++; + if (cl_get_time_stamp() - start > g_max_DPC_time_us ) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Handling of EQ stopped, and a new DPC is entered after %d loops\n", loops)); + KeInsertQueueDpc(&dev->eq_table.eq[eq->eq_num].dpc, NULL, NULL); + break; + } + eqe = next_eqe_sw(eq); + } + + /* + * Rely on caller to set consumer index so that we don't have + * to test hca_type in our interrupt handling fast path. + */ + return eqes_found; +} + +static void mthca_tavor_dpc( PRKDPC dpc, + PVOID ctx, PVOID arg1, PVOID arg2 ) +{ + struct mthca_eq *eq = ctx; + struct mthca_dev *dev = eq->dev; + SPIN_LOCK_PREP(lh); + + UNREFERENCED_PARAMETER(dpc); + UNREFERENCED_PARAMETER(arg1); + UNREFERENCED_PARAMETER(arg2); + + spin_lock_dpc(&eq->lock, &lh); + + /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */ + if (mthca_eq_int(dev, eq)) { + tavor_set_eq_ci(dev, eq, eq->cons_index); + tavor_eq_req_not(dev, eq->eqn); + } + + spin_unlock_dpc(&lh); +} + +static BOOLEAN mthca_tavor_interrupt( + PKINTERRUPT int_obj, + PVOID ctx + ) +{ + struct mthca_dev *dev = ctx; + u32 ecr; + int i; + + UNREFERENCED_PARAMETER(int_obj); + + if (dev->eq_table.clr_mask) + writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); + + ecr = readl((u8*)dev->eq_regs.tavor.ecr_base + 4); + if (!ecr) + return FALSE; + + writel(ecr, (u8*)dev->eq_regs.tavor.ecr_base + + MTHCA_ECR_CLR_BASE - MTHCA_ECR_BASE + 4); + + for (i = 0; i < MTHCA_NUM_EQ; ++i) { + if (ecr & dev->eq_table.eq[i].eqn_mask && + next_eqe_sw(&dev->eq_table.eq[i])) { + KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL); + } + } + + return TRUE; +} + +#ifdef MSI_SUPPORT +static irqreturn_t mthca_tavor_msi_x_interrupt(int irq, void *eq_ptr, + struct pt_regs *regs) +{ + struct mthca_eq *eq = eq_ptr; + struct mthca_dev *dev = eq->dev; + + mthca_eq_int(dev, eq); + tavor_set_eq_ci(dev, eq, eq->cons_index); + tavor_eq_req_not(dev, eq->eqn); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} +#endif + +static void mthca_arbel_dpc( PRKDPC dpc, + PVOID ctx, PVOID arg1, PVOID arg2 ) +{ + struct mthca_eq *eq = ctx; + struct mthca_dev *dev = eq->dev; + SPIN_LOCK_PREP(lh); + + UNREFERENCED_PARAMETER(dpc); + UNREFERENCED_PARAMETER(arg1); + UNREFERENCED_PARAMETER(arg2); + + spin_lock_dpc(&eq->lock, &lh); + + /* we need 'if' in case, when there were scheduled 2 DPC for one EQ */ + if (mthca_eq_int(dev, eq)) + arbel_set_eq_ci(dev, eq, eq->cons_index); + arbel_eq_req_not(dev, eq->eqn_mask); + + spin_unlock_dpc(&lh); +} + +static BOOLEAN mthca_arbel_interrupt( + PKINTERRUPT int_obj, + PVOID ctx + ) +{ + struct mthca_dev *dev = ctx; + int work = 0; + int i; + + UNREFERENCED_PARAMETER(int_obj); + + if (dev->eq_table.clr_mask) + writel(dev->eq_table.clr_mask, dev->eq_table.clr_int); + + for (i = 0; i < MTHCA_NUM_EQ; ++i) { + if (next_eqe_sw( &dev->eq_table.eq[i]) ) { + work = 1; + while(InterlockedCompareExchange(&dev->dpc_lock, 1, 0)); + + KeInsertQueueDpc(&dev->eq_table.eq[i].dpc, NULL, NULL); + InterlockedCompareExchange(&dev->dpc_lock, 0, 1); + } else { + arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); + } + } + + return (BOOLEAN)work; +} + +#ifdef MSI_SUPPORT +static irqreturn_t mthca_arbel_msi_x_interrupt(int irq, void *eq_ptr, + struct pt_regs *regs) +{ + struct mthca_eq *eq = eq_ptr; + struct mthca_dev *dev = eq->dev; + + mthca_eq_int(dev, eq); + arbel_set_eq_ci(dev, eq, eq->cons_index); + arbel_eq_req_not(dev, eq->eqn_mask); + + /* MSI-X vectors always belong to us */ + return IRQ_HANDLED; +} +#endif + +static int mthca_create_eq(struct mthca_dev *dev, + int nent, + u8 intr, + struct mthca_eq *eq) +{ + int npages; + u64 *dma_list = NULL; + struct mthca_mailbox *mailbox; + struct mthca_eq_context *eq_context; + int err = -ENOMEM; + int i; + u8 status; + + HCA_ENTER(HCA_DBG_INIT); + eq->dev = dev; + eq->nent = roundup_pow_of_two(max(nent, 2)); + npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; + + eq->page_list = kmalloc(npages * sizeof *eq->page_list, + GFP_KERNEL); + if (!eq->page_list) + goto err_out; + + for (i = 0; i < npages; ++i) + eq->page_list[i].page = NULL; + + dma_list = kmalloc(npages * sizeof *dma_list, GFP_KERNEL); + if (!dma_list) + goto err_out_free; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + goto err_out_free; + eq_context = mailbox->buf; + + for (i = 0; i < npages; ++i) { + alloc_dma_zmem_map(dev, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &eq->page_list[i]); + if (!eq->page_list[i].page) + goto err_out_free_pages; + dma_list[i] = eq->page_list[i].dma_address; + } + + for (i = 0; i < eq->nent; ++i) + set_eqe_hw(get_eqe(eq, i)); + + eq->eqn = mthca_alloc(&dev->eq_table.alloc); + if (eq->eqn == -1) + goto err_out_free_pages; + + err = mthca_mr_alloc_phys(dev, dev->driver_pd.pd_num, + dma_list, PAGE_SHIFT, npages, + 0, npages * PAGE_SIZE, + MTHCA_MPT_FLAG_LOCAL_WRITE | + MTHCA_MPT_FLAG_LOCAL_READ, + &eq->mr); + if (err) + goto err_out_free_eq; + + RtlZeroMemory(eq_context, sizeof *eq_context); + eq_context->flags = cl_hton32(MTHCA_EQ_STATUS_OK | + MTHCA_EQ_OWNER_HW | + MTHCA_EQ_STATE_ARMED | + MTHCA_EQ_FLAG_TR); + if (mthca_is_memfree(dev)) + eq_context->flags |= cl_hton32(MTHCA_EQ_STATE_ARBEL); + + eq_context->logsize_usrpage = cl_hton32((ffs(eq->nent) - 1) << 24); + if (mthca_is_memfree(dev)) { + eq_context->arbel_pd = cl_hton32(dev->driver_pd.pd_num); + } else { + eq_context->logsize_usrpage |= cl_hton32(dev->driver_uar.index); + eq_context->tavor_pd = cl_hton32(dev->driver_pd.pd_num); + } + eq_context->intr = intr; + eq_context->lkey = cl_hton32(eq->mr.ibmr.lkey); + + err = mthca_SW2HW_EQ(dev, mailbox, eq->eqn, &status); + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("SW2HW_EQ failed (%d)\n", err)); + goto err_out_free_mr; + } + if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_LOW,("SW2HW_EQ returned status 0x%02x\n", + status)); + err = -EINVAL; + goto err_out_free_mr; + } + + kfree(dma_list); + mthca_free_mailbox(dev, mailbox); + + eq->eqn_mask = _byteswap_ulong(1 << eq->eqn); + eq->cons_index = 0; + + dev->eq_table.arm_mask |= eq->eqn_mask; + + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_INIT ,("Allocated EQ %d with %d entries\n", + eq->eqn, eq->nent)); + + HCA_EXIT(HCA_DBG_INIT); + return err; + + err_out_free_mr: + mthca_free_mr(dev, &eq->mr); + + err_out_free_eq: + mthca_free(&dev->eq_table.alloc, eq->eqn); + + err_out_free_pages: + for (i = 0; i < npages; ++i) { + if (eq->page_list[i].page) { + free_dma_mem_map(dev, &eq->page_list[i], PCI_DMA_BIDIRECTIONAL); + } + } + mthca_free_mailbox(dev, mailbox); + + err_out_free: + kfree(eq->page_list); + kfree(dma_list); + + err_out: + HCA_EXIT(HCA_DBG_INIT); + return err; +} + +static void mthca_free_eq(struct mthca_dev *dev, + struct mthca_eq *eq) +{ + struct mthca_mailbox *mailbox; + int err; + u8 status; + int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / + PAGE_SIZE; + int i; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return; + + err = mthca_HW2SW_EQ(dev, mailbox, eq->eqn, &status); + if (err) + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_EQ failed (%d)\n", err)); + if (status) + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("HW2SW_EQ returned status 0x%02x\n", status)); + + dev->eq_table.arm_mask &= ~eq->eqn_mask; + + { // debug print + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Dumping EQ context %02x:\n", eq->eqn)); + for (i = 0; i < sizeof (struct mthca_eq_context) / 4; i=i+4) { + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("[%02x] %08x %08x %08x %08x\n", i, + cl_ntoh32(*(u32*)((u8*)mailbox->buf + i * 4)), + cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+1)*4)), + cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+2)*4)), + cl_ntoh32(*(u32*)((u8*)mailbox->buf + (i+1)*4)))); + + } + } + + mthca_free_mr(dev, &eq->mr); + for (i = 0; i < npages; ++i) { + free_dma_mem_map(dev, &eq->page_list[i], PCI_DMA_BIDIRECTIONAL); + } + + kfree(eq->page_list); + mthca_free_mailbox(dev, mailbox); +} + +static void mthca_free_irqs(struct mthca_dev *dev) +{ + if (dev->eq_table.have_irq) + free_irq(dev->ext->int_obj); +#ifdef MSI_SUPPORT + for (i = 0; i < MTHCA_NUM_EQ; ++i) + if (dev->eq_table.eq[i].have_irq) + free_irq(dev->eq_table.eq[i].msi_x_vector, + dev->eq_table.eq + i); +#endif +} + +static int mthca_map_reg(struct mthca_dev *dev, + u64 offset, unsigned long size, + void __iomem **map, SIZE_T *map_size) +{ + u64 base = pci_resource_start(dev, HCA_BAR_TYPE_HCR); + *map = ioremap(base + offset, size, map_size); + if (!*map) + return -ENOMEM; + return 0; +} + +static void mthca_unmap_reg(struct mthca_dev *dev, u64 offset, + unsigned long size, void __iomem *map, SIZE_T map_size) +{ + UNREFERENCED_PARAMETER(dev); + UNREFERENCED_PARAMETER(size); + UNREFERENCED_PARAMETER(offset); + iounmap(map, map_size); +} + +static int mthca_map_eq_regs(struct mthca_dev *dev) +{ + u64 mthca_base; + + mthca_base = pci_resource_start(dev, HCA_BAR_TYPE_HCR); + + if (mthca_is_memfree(dev)) { + /* + * We assume that the EQ arm and EQ set CI registers + * fall within the first BAR. We can't trust the + * values firmware gives us, since those addresses are + * valid on the HCA's side of the PCI bus but not + * necessarily the host side. + */ + if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, + &dev->clr_base, &dev->clr_base_size)) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map interrupt clear register, " + "aborting.\n")); + return -ENOMEM; + } + + /* + * Add 4 because we limit ourselves to EQs 0 ... 31, + * so we only need the low word of the register. + */ + if (mthca_map_reg(dev, ((pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_arm_base) + 4, 4, + &dev->eq_regs.arbel.eq_arm, &dev->eq_regs.arbel.eq_arm_size)) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Couldn't map EQ arm register, aborting.\n")); + mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + return -ENOMEM; + } + + if (mthca_map_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_set_ci_base, + MTHCA_EQ_SET_CI_SIZE, + &dev->eq_regs.arbel.eq_set_ci_base, + &dev->eq_regs.arbel.eq_set_ci_base_size + )) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Couldn't map EQ CI register, aborting.\n")); + mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_arm_base) + 4, 4, + dev->eq_regs.arbel.eq_arm, dev->eq_regs.arbel.eq_arm_size); + mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + return -ENOMEM; + } + } else { + if (mthca_map_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, + &dev->clr_base, &dev->clr_base_size)) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map interrupt clear register, " + "aborting.\n")); + return -ENOMEM; + } + + if (mthca_map_reg(dev, MTHCA_ECR_BASE, + MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, + &dev->eq_regs.tavor.ecr_base, &dev->eq_regs.tavor.ecr_base_size)) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map ecr register, " + "aborting.\n")); + mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + return -ENOMEM; + } + } + + return 0; + +} + +static void mthca_unmap_eq_regs(struct mthca_dev *dev) +{ + if (mthca_is_memfree(dev)) { + mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_set_ci_base, + MTHCA_EQ_SET_CI_SIZE, + dev->eq_regs.arbel.eq_set_ci_base, + dev->eq_regs.arbel.eq_set_ci_base_size); + mthca_unmap_reg(dev, ((pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.eq_arm_base) + 4, 4, + dev->eq_regs.arbel.eq_arm, + dev->eq_regs.arbel.eq_arm_size); + mthca_unmap_reg(dev, (pci_resource_len(dev, 0) - 1) & + dev->fw.arbel.clr_int_base, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + } else { + mthca_unmap_reg(dev, MTHCA_ECR_BASE, + MTHCA_ECR_SIZE + MTHCA_ECR_CLR_SIZE, + dev->eq_regs.tavor.ecr_base, + dev->eq_regs.tavor.ecr_base_size); + mthca_unmap_reg(dev, MTHCA_CLR_INT_BASE, MTHCA_CLR_INT_SIZE, + dev->clr_base, dev->clr_base_size); + } +} + +int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt) +{ + int ret; + u8 status; + + /* + * We assume that mapping one page is enough for the whole EQ + * context table. This is fine with all current HCAs, because + * we only use 32 EQs and each EQ uses 32 bytes of context + * memory, or 1 KB total. + */ + dev->eq_table.icm_virt = icm_virt; + alloc_dma_zmem_map(dev,PAGE_SIZE, PCI_DMA_BIDIRECTIONAL, &dev->eq_table.sg); + if (!dev->eq_table.sg.page) + return -ENOMEM; + + ret = mthca_MAP_ICM_page(dev, dev->eq_table.sg.dma_address, icm_virt, &status); + if (!ret && status) + ret = -EINVAL; + if (ret) + free_dma_mem_map(dev, &dev->eq_table.sg, PCI_DMA_BIDIRECTIONAL ); + + return ret; +} + +void mthca_unmap_eq_icm(struct mthca_dev *dev) +{ + u8 status; + + mthca_UNMAP_ICM(dev, dev->eq_table.icm_virt, PAGE_SIZE / 4096, &status); + free_dma_mem_map(dev, &dev->eq_table.sg, PCI_DMA_BIDIRECTIONAL ); +} + +int mthca_init_eq_table(struct mthca_dev *dev) +{ + int err; + u8 status; + u8 intr; + int i; + + HCA_ENTER(HCA_DBG_INIT); + err = mthca_alloc_init(&dev->eq_table.alloc, + dev->limits.num_eqs, + dev->limits.num_eqs - 1, + dev->limits.reserved_eqs); + if (err) + return err; + + err = mthca_map_eq_regs(dev); + if (err) + goto err_out_free; + +#ifdef MSI_SUPPORT + if (dev->mthca_flags & MTHCA_FLAG_MSI || + dev->mthca_flags & MTHCA_FLAG_MSI_X) { + dev->eq_table.clr_mask = 0; + } else +#endif + { + dev->eq_table.clr_mask = + _byteswap_ulong(1 << (dev->eq_table.inta_pin & 31)); + dev->eq_table.clr_int = dev->clr_base + + (dev->eq_table.inta_pin < 32 ? 4 : 0); + } + + dev->eq_table.arm_mask = 0; + + intr = (dev->mthca_flags & MTHCA_FLAG_MSI) ? + 128 : dev->eq_table.inta_pin; + + err = mthca_create_eq(dev, dev->limits.num_cqs + MTHCA_NUM_SPARE_EQE, + (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 128 : intr, + &dev->eq_table.eq[MTHCA_EQ_COMP]); + if (err) + goto err_out_unmap; + + err = mthca_create_eq(dev, MTHCA_NUM_ASYNC_EQE + MTHCA_NUM_SPARE_EQE, + (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 129 : intr, + &dev->eq_table.eq[MTHCA_EQ_ASYNC]); + if (err) + goto err_out_comp; + + err = mthca_create_eq(dev, MTHCA_NUM_CMD_EQE + MTHCA_NUM_SPARE_EQE, + (dev->mthca_flags & MTHCA_FLAG_MSI_X) ? 130 : intr, + &dev->eq_table.eq[MTHCA_EQ_CMD]); + if (err) + goto err_out_async; + +#ifdef MSI_SUPPORT + if (dev->mthca_flags & MTHCA_FLAG_MSI_X) { + static const char *eq_name[] = { + [MTHCA_EQ_COMP] = DRV_NAME " (comp)", + [MTHCA_EQ_ASYNC] = DRV_NAME " (async)", + [MTHCA_EQ_CMD] = DRV_NAME " (cmd)" + }; + + for (i = 0; i < MTHCA_NUM_EQ; ++i) { + err = request_irq(dev->eq_table.eq[i].msi_x_vector, + mthca_is_memfree(dev) ? + mthca_arbel_msi_x_interrupt : + mthca_tavor_msi_x_interrupt, + 0, eq_name[i], dev->eq_table.eq + i); + if (err) + goto err_out_cmd; + dev->eq_table.eq[i].have_irq = 1; + /* init DPC stuff something like that */ + spin_lock_init( &dev->eq_table.eq[i].lock ); + dev->dpc_lock = 0; + KeInitializeDpc( + &dev->eq_table.eq[i].dpc, + mthca_is_memfree(dev) ? + mthca_arbel_msi_x_dpc : + mthca_tavor_msi_x_dpc, + dev->eq_table.eq + i); + } + } else +#endif + { + spin_lock_init( &dev->ext->isr_lock ); + err = request_irq( + &dev->ext->interruptInfo, + &dev->ext->isr_lock.lock , + mthca_is_memfree(dev) ? mthca_arbel_interrupt : mthca_tavor_interrupt, + dev, + &dev->ext->int_obj + ); + if (err) + goto err_out_cmd; + dev->eq_table.have_irq = 1; + + /* init DPC stuff */ + for (i = 0; i < MTHCA_NUM_EQ; ++i) { + spin_lock_init( &dev->eq_table.eq[i].lock ); + KeInitializeDpc( + &dev->eq_table.eq[i].dpc, + mthca_is_memfree(dev) ? + mthca_arbel_dpc : + mthca_tavor_dpc, + dev->eq_table.eq + i); + dev->eq_table.eq[i].eq_num = i; + } + } + + err = mthca_MAP_EQ(dev, async_mask(dev), + 0, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); + if (err) + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT,("MAP_EQ for async EQ %d failed (%d)\n", + dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, err)); + if (status) + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT, ("MAP_EQ for async EQ %d returned status 0x%02x\n", + dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, status)); + err = mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, + 0, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); + if (err) + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT, ("MAP_EQ for cmd EQ %d failed (%d)\n", + dev->eq_table.eq[MTHCA_EQ_CMD].eqn, err)); + if (status) + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_INIT,("MAP_EQ for cmd EQ %d returned status 0x%02x\n", + dev->eq_table.eq[MTHCA_EQ_CMD].eqn, status)); + + for (i = 0; i < MTHCA_NUM_EQ; ++i) + if (mthca_is_memfree(dev)) + arbel_eq_req_not(dev, dev->eq_table.eq[i].eqn_mask); + else + tavor_eq_req_not(dev, dev->eq_table.eq[i].eqn); + + return 0; + +err_out_cmd: + mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_CMD]); + +err_out_async: + mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_ASYNC]); + +err_out_comp: + mthca_free_eq(dev, &dev->eq_table.eq[MTHCA_EQ_COMP]); + +err_out_unmap: + mthca_unmap_eq_regs(dev); + +err_out_free: + mthca_alloc_cleanup(&dev->eq_table.alloc); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_INIT ,("mthca_init_eq failed %d\n",err)); + return err; +} + +void mthca_cleanup_eq_table(struct mthca_dev *dev) +{ + u8 status; + int i; + + mthca_free_irqs(dev); + + mthca_MAP_EQ(dev, async_mask(dev), + 1, dev->eq_table.eq[MTHCA_EQ_ASYNC].eqn, &status); + mthca_MAP_EQ(dev, MTHCA_CMD_EVENT_MASK, + 1, dev->eq_table.eq[MTHCA_EQ_CMD].eqn, &status); + + for (i = 0; i < MTHCA_NUM_EQ; ++i) + mthca_free_eq(dev, &dev->eq_table.eq[i]); + + mthca_unmap_eq_regs(dev); + + mthca_alloc_cleanup(&dev->eq_table.alloc); +} + + + + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_log.c b/branches/Ndi/hw/mthca/kernel/mthca_log.c new file mode 100644 index 00000000..52024600 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_log.c @@ -0,0 +1,234 @@ +/* + * Copyright (c) 2005 Mellanox Technologies LTD. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +// Author: Yossi Leybovich + +#include "hca_driver.h" + + +VOID +WriteEventLogEntry( + PVOID pi_pIoObject, + ULONG pi_ErrorCode, + ULONG pi_UniqueErrorCode, + ULONG pi_FinalStatus, + ULONG pi_nDataItems, + ... + ) +/*++ + +Routine Description: + Writes an event log entry to the event log. + +Arguments: + + pi_pIoObject......... The IO object ( driver object or device object ). + pi_ErrorCode......... The error code. + pi_UniqueErrorCode... A specific error code. + pi_FinalStatus....... The final status. + pi_nDataItems........ Number of data items. + . + . data items values + . + +Return Value: + + None . + +--*/ +{ /* WriteEventLogEntry */ + + /* Variable argument list */ + va_list l_Argptr; + /* Pointer to an error log entry */ + PIO_ERROR_LOG_PACKET l_pErrorLogEntry; + + /* Init the variable argument list */ + va_start(l_Argptr, pi_nDataItems); + + /* Allocate an error log entry */ + l_pErrorLogEntry = + (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry( + pi_pIoObject, + (UCHAR)(sizeof(IO_ERROR_LOG_PACKET)+pi_nDataItems*sizeof(ULONG)) + ); + /* Check allocation */ + if ( l_pErrorLogEntry != NULL) + { /* OK */ + + /* Data item index */ + USHORT l_nDataItem ; + + /* Set the error log entry header */ + l_pErrorLogEntry->ErrorCode = pi_ErrorCode; + l_pErrorLogEntry->DumpDataSize = (USHORT) (pi_nDataItems*sizeof(ULONG)); + l_pErrorLogEntry->SequenceNumber = 0; + l_pErrorLogEntry->MajorFunctionCode = 0; + l_pErrorLogEntry->IoControlCode = 0; + l_pErrorLogEntry->RetryCount = 0; + l_pErrorLogEntry->UniqueErrorValue = pi_UniqueErrorCode; + l_pErrorLogEntry->FinalStatus = pi_FinalStatus; + + /* Insert the data items */ + for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++) + { /* Inset a data item */ + + /* Current data item */ + int l_CurDataItem ; + + /* Get next data item */ + l_CurDataItem = va_arg( l_Argptr, int); + + /* Put it into the data array */ + l_pErrorLogEntry->DumpData[l_nDataItem] = l_CurDataItem ; + + } /* Inset a data item */ + + /* Write the packet */ + IoWriteErrorLogEntry(l_pErrorLogEntry); + + } /* OK */ + + /* Term the variable argument list */ + va_end(l_Argptr); + +} /* WriteEventLogEntry */ + +/*------------------------------------------------------------------------------------------------------*/ + +VOID +WriteEventLogEntryStr( + PVOID pi_pIoObject, + ULONG pi_ErrorCode, + ULONG pi_UniqueErrorCode, + ULONG pi_FinalStatus, + PWCHAR pi_InsertionStr, + ULONG pi_nDataItems, + ... + ) +/*++ + +Routine Description: + Writes an event log entry to the event log. + +Arguments: + + pi_pIoObject......... The IO object ( driver object or device object ). + pi_ErrorCode......... The error code. + pi_UniqueErrorCode... A specific error code. + pi_FinalStatus....... The final status. + pi_nDataItems........ Number of data items. + . + . data items values + . + +Return Value: + + None . + +--*/ +{ /* WriteEventLogEntryStr */ + + /* Variable argument list */ + va_list l_Argptr; + /* Pointer to an error log entry */ + PIO_ERROR_LOG_PACKET l_pErrorLogEntry; + /* sizeof insertion string */ + int l_Size = (int)((pi_InsertionStr) ? ((wcslen(pi_InsertionStr) + 1) * sizeof( WCHAR )) : 0); + int l_PktSize =sizeof(IO_ERROR_LOG_PACKET)+pi_nDataItems*sizeof(ULONG); + int l_TotalSize =l_PktSize +l_Size; + + /* Init the variable argument list */ + va_start(l_Argptr, pi_nDataItems); + + /* Allocate an error log entry */ + if (l_TotalSize >= ERROR_LOG_MAXIMUM_SIZE - 2) + l_TotalSize = ERROR_LOG_MAXIMUM_SIZE - 2; + l_pErrorLogEntry = (PIO_ERROR_LOG_PACKET)IoAllocateErrorLogEntry( + pi_pIoObject, (UCHAR)l_TotalSize ); + + /* Check allocation */ + if ( l_pErrorLogEntry != NULL) + { /* OK */ + + /* Data item index */ + USHORT l_nDataItem ; + + /* Set the error log entry header */ + l_pErrorLogEntry->ErrorCode = pi_ErrorCode; + l_pErrorLogEntry->DumpDataSize = (USHORT) (pi_nDataItems*sizeof(ULONG)); + l_pErrorLogEntry->SequenceNumber = 0; + l_pErrorLogEntry->MajorFunctionCode = 0; + l_pErrorLogEntry->IoControlCode = 0; + l_pErrorLogEntry->RetryCount = 0; + l_pErrorLogEntry->UniqueErrorValue = pi_UniqueErrorCode; + l_pErrorLogEntry->FinalStatus = pi_FinalStatus; + + /* Insert the data items */ + for (l_nDataItem = 0; l_nDataItem < pi_nDataItems; l_nDataItem++) + { /* Inset a data item */ + + /* Current data item */ + int l_CurDataItem ; + + /* Get next data item */ + l_CurDataItem = va_arg( l_Argptr, int); + + /* Put it into the data array */ + l_pErrorLogEntry->DumpData[l_nDataItem] = l_CurDataItem ; + + } /* Inset a data item */ + + /* add insertion string */ + if (pi_InsertionStr) { + char *ptr; + int sz = min( l_TotalSize - l_PktSize, l_Size ); + l_pErrorLogEntry->NumberOfStrings = 1; + l_pErrorLogEntry->StringOffset = sizeof(IO_ERROR_LOG_PACKET) + l_pErrorLogEntry->DumpDataSize; + ptr = (char*)l_pErrorLogEntry + l_pErrorLogEntry->StringOffset; + memcpy( ptr, pi_InsertionStr, sz ); + *(WCHAR*)&ptr[sz - 2] = (WCHAR)0; + } + + /* Write the packet */ + IoWriteErrorLogEntry(l_pErrorLogEntry); + + } /* OK */ + + /* Term the variable argument list */ + va_end(l_Argptr); + +} /* WriteEventLogEntry */ + + + + + + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_log.mc b/branches/Ndi/hw/mthca/kernel/mthca_log.mc new file mode 100644 index 00000000..08cbddae --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_log.mc @@ -0,0 +1,56 @@ +;/*++ +;============================================================================= +;Copyright (c) 2001 Mellanox Technologies +; +;Module Name: +; +; mthcalog.mc +; +;Abstract: +; +; MTHCA Driver event log messages +; +;Authors: +; +; Yossi Leybovich +; +;Environment: +; +; Kernel Mode . +; +;============================================================================= +;--*/ +; +MessageIdTypedef = NTSTATUS + +SeverityNames = ( + Success = 0x0:STATUS_SEVERITY_SUCCESS + Informational = 0x1:STATUS_SEVERITY_INFORMATIONAL + Warning = 0x2:STATUS_SEVERITY_WARNING + Error = 0x3:STATUS_SEVERITY_ERROR + ) + +FacilityNames = ( + System = 0x0 + RpcRuntime = 0x2:FACILITY_RPC_RUNTIME + RpcStubs = 0x3:FACILITY_RPC_STUBS + Io = 0x4:FACILITY_IO_ERROR_CODE + MTHCA = 0x7:FACILITY_MTHCA_ERROR_CODE + ) + + +MessageId=0x0001 Facility=MTHCA Severity=Informational SymbolicName=EVENT_MTHCA_ANY_INFO +Language=English +%2 +. + +MessageId=0x0002 Facility=MTHCA Severity=Warning SymbolicName=EVENT_MTHCA_ANY_WARN +Language=English +%2 +. + +MessageId=0x0003 Facility=MTHCA Severity=Error SymbolicName=EVENT_MTHCA_ANY_ERROR +Language=English +%2 +. + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_log.rc b/branches/Ndi/hw/mthca/kernel/mthca_log.rc new file mode 100644 index 00000000..116522b7 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_log.rc @@ -0,0 +1,2 @@ +LANGUAGE 0x9,0x1 +1 11 MSG00001.bin diff --git a/branches/Ndi/hw/mthca/kernel/mthca_mad.c b/branches/Ndi/hw/mthca/kernel/mthca_mad.c new file mode 100644 index 00000000..07dee658 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_mad.c @@ -0,0 +1,293 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_mad.tmh" +#endif +#include "mthca_cmd.h" + +enum { + MTHCA_VENDOR_CLASS1 = 0x9, + MTHCA_VENDOR_CLASS2 = 0xa +}; + +struct mthca_trap_mad { + struct scatterlist sg; +}; + +static void update_sm_ah(struct mthca_dev *dev, + u8 port_num, u16 lid, u8 sl) +{ + struct ib_ah *new_ah; + struct ib_ah_attr ah_attr; + SPIN_LOCK_PREP(lh); + + if (!dev->send_agent[port_num - 1][0]) + return; + + RtlZeroMemory(&ah_attr, sizeof ah_attr); + ah_attr.dlid = lid; + ah_attr.sl = sl; + ah_attr.port_num = port_num; + + new_ah = ibv_create_ah(dev->send_agent[port_num - 1][0]->qp->pd, + &ah_attr, NULL, NULL); + if (IS_ERR(new_ah)) + return; + + spin_lock_irqsave(&dev->sm_lock, &lh); + if (dev->sm_ah[port_num - 1]) { + ibv_destroy_ah(dev->sm_ah[port_num - 1]); + } + dev->sm_ah[port_num - 1] = new_ah; + spin_unlock_irqrestore(&lh); +} + +/* + * Snoop SM MADs for port info and P_Key table sets, so we can + * synthesize LID change and P_Key change events. + */ +static void smp_snoop(struct ib_device *ibdev, + u8 port_num, + struct ib_mad *mad) +{ + struct ib_event event; + + if ((mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) && + mad->mad_hdr.method == IB_MGMT_METHOD_SET) { + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO) { + update_sm_ah(to_mdev(ibdev), port_num, + cl_ntoh16(*(__be16 *) (mad->data + 58)), + (*(u8 *) (mad->data + 76)) & 0xf); + + event.device = ibdev; + event.event = IB_EVENT_LID_CHANGE; + event.element.port_num = port_num; + ib_dispatch_event(&event); + } + + if (mad->mad_hdr.attr_id == IB_SMP_ATTR_PKEY_TABLE) { + event.device = ibdev; + event.event = IB_EVENT_PKEY_CHANGE; + event.element.port_num = port_num; + ib_dispatch_event(&event); + } + } +} + +static void forward_trap(struct mthca_dev *dev, + u8 port_num, + struct ib_mad *mad) +{ + int qpn = mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_SUBN_LID_ROUTED; + struct mthca_trap_mad *tmad; + struct ib_sge gather_list; + struct _ib_send_wr wr; + struct ib_mad_agent *agent = dev->send_agent[port_num - 1][qpn]; + int ret; + SPIN_LOCK_PREP(lh); + + /* fill the template */ + wr.ds_array = (ib_local_ds_t* __ptr64)(void*)&gather_list; + wr.num_ds = 1; + wr.wr_type = WR_SEND; + wr.send_opt = IB_SEND_OPT_SIGNALED; + wr.dgrm.ud.remote_qp = cl_hton32(qpn); + wr.dgrm.ud.remote_qkey = qpn ? IB_QP1_QKEY : 0; + + if (agent) { + tmad = kmalloc(sizeof *tmad, GFP_KERNEL); + if (!tmad) + return; + + alloc_dma_zmem(dev, sizeof *mad, &tmad->sg); + if (!tmad->sg.page) { + kfree(tmad); + return; + } + + memcpy(tmad->sg.page, mad, sizeof *mad); + + wr.dgrm.ud.rsvd = (void* __ptr64)&((struct ib_mad *)tmad->sg.page)->mad_hdr; + wr.wr_id = (u64)(ULONG_PTR)tmad; + gather_list.addr = tmad->sg.dma_address; + gather_list.length = tmad->sg.length; + gather_list.lkey = to_mpd(agent->qp->pd)->ntmr.ibmr.lkey; + + /* + * We rely here on the fact that MLX QPs don't use the + * address handle after the send is posted (this is + * wrong following the IB spec strictly, but we know + * it's OK for our devices). + */ + spin_lock_irqsave(&dev->sm_lock, &lh); + wr.dgrm.ud.h_av = (ib_av_handle_t)dev->sm_ah[port_num - 1]; + if (wr.dgrm.ud.h_av) { + HCA_PRINT( TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,(" ib_post_send_mad not ported \n" )); + ret = -EINVAL; + } + else + ret = -EINVAL; + spin_unlock_irqrestore(&lh); + + if (ret) { + free_dma_mem_map(dev, &tmad->sg, PCI_DMA_BIDIRECTIONAL ); + kfree(tmad); + } + } +} + +int mthca_process_mad(struct ib_device *ibdev, + int mad_flags, + u8 port_num, + struct _ib_wc *in_wc, + struct _ib_grh *in_grh, + struct ib_mad *in_mad, + struct ib_mad *out_mad) +{ + int err; + u8 status; + u16 slid = in_wc ? in_wc->recv.ud.remote_lid : cl_ntoh16(IB_LID_PERMISSIVE); + + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("in: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x\n", + (u32)in_mad->mad_hdr.mgmt_class, (u32)in_mad->mad_hdr.method, + (u32)in_mad->mad_hdr.attr_id, in_mad->mad_hdr.attr_mod, + (u32)in_mad->mad_hdr.class_specific, in_mad->mad_hdr.tid )); + + /* Forward locally generated traps to the SM */ + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP && + slid == 0) { + forward_trap(to_mdev(ibdev), port_num, in_mad); + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Not sent, but locally forwarded\n")); + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + } + + /* + * Only handle SM gets, sets and trap represses for SM class + * + * Only handle PMA and Mellanox vendor-specific class gets and + * sets for other classes. + */ + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_LID_ROUTED || + in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { + + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_TRAP_REPRESS) { + HCA_PRINT( TRACE_LEVEL_VERBOSE,HCA_DBG_MAD,(" Skip some methods. Nothing done !\n")); + return IB_MAD_RESULT_SUCCESS; + } + + /* + * Don't process SMInfo queries or vendor-specific + * MADs -- the SMA can't handle them. + */ + if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_SM_INFO || + ((in_mad->mad_hdr.attr_id & IB_SMP_ATTR_VENDOR_MASK) == + IB_SMP_ATTR_VENDOR_MASK)) { + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip SMInfo queries or vendor-specific MADs. Nothing done !\n")); + return IB_MAD_RESULT_SUCCESS; + } + } + else { + if (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT || + in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS1 || + in_mad->mad_hdr.mgmt_class == MTHCA_VENDOR_CLASS2) { + + if (in_mad->mad_hdr.method != IB_MGMT_METHOD_GET && + in_mad->mad_hdr.method != IB_MGMT_METHOD_SET) { + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip some management methods. Nothing done !\n")); + return IB_MAD_RESULT_SUCCESS; + } + } + else { + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD ,("Skip IB_MGMT_CLASS_PERF_MGMT et al. Nothing done !\n")); + return IB_MAD_RESULT_SUCCESS; + } + } + + // send MAD + err = mthca_MAD_IFC(to_mdev(ibdev), + mad_flags & IB_MAD_IGNORE_MKEY, + mad_flags & IB_MAD_IGNORE_BKEY, + port_num, in_wc, in_grh, in_mad, out_mad, + &status); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC failed\n")); + return IB_MAD_RESULT_FAILURE; + } + if (status == MTHCA_CMD_STAT_BAD_PKT) + return IB_MAD_RESULT_SUCCESS; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MAD ,("MAD_IFC returned status %02x\n", status)); + return IB_MAD_RESULT_FAILURE; + } + + if (!out_mad->mad_hdr.status) + smp_snoop(ibdev, port_num, in_mad); + + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_MAD,("out: Class %02x, Method %02x, AttrId %x, AttrMod %x, ClSpec %x, Tid %I64x, Status %x\n", + (u32)out_mad->mad_hdr.mgmt_class, (u32)out_mad->mad_hdr.method, + (u32)out_mad->mad_hdr.attr_id, out_mad->mad_hdr.attr_mod, + (u32)out_mad->mad_hdr.class_specific, out_mad->mad_hdr.tid, + (u32)out_mad->mad_hdr.status )); + + if (in_mad->mad_hdr.method == IB_MGMT_METHOD_TRAP_REPRESS) { + /* no response for trap repress */ + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; + } + + return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; +} + +static void send_handler(struct ib_mad_agent *agent, + struct ib_mad_send_wc *mad_send_wc) +{ + struct mthca_trap_mad *tmad = + (void *) (ULONG_PTR) mad_send_wc->wr_id; + + free_dma_mem_map(agent->device->mdev, &tmad->sg, PCI_DMA_BIDIRECTIONAL ); + kfree(tmad); +} diff --git a/branches/Ndi/hw/mthca/kernel/mthca_main.c b/branches/Ndi/hw/mthca/kernel/mthca_main.c new file mode 100644 index 00000000..3137a6b2 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_main.c @@ -0,0 +1,1108 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_main.tmh" +#endif +#include "mthca_config_reg.h" +#include "mthca_cmd.h" +#include "mthca_profile.h" +#include "mthca_memfree.h" + +static const char mthca_version[] = + DRV_NAME ": HCA Driver v" + DRV_VERSION " (" DRV_RELDATE ")"; + +static struct mthca_profile default_profile = { + 1 << 16, // num_qp + 4, // rdb_per_qp + 0, // num_srq + 1 << 16, // num_cq + 1 << 13, // num_mcg + 1 << 17, // num_mpt + 1 << 20, // num_mtt + 1 << 15, // num_udav (Tavor only) + 0, // num_uar + 1 << 18, // uarc_size (Arbel only) + 1 << 18, // fmr_reserved_mtts (Tavor only) +}; + +/* Types of supported HCA */ +enum __hca_type { + TAVOR, /* MT23108 */ + ARBEL_COMPAT, /* MT25208 in Tavor compat mode */ + ARBEL_NATIVE, /* MT25218 with extended features */ + SINAI, /* MT25204 */ + LIVEFISH /* a burning device */ +}; + +#define MTHCA_FW_VER(major, minor, subminor) \ + (((u64) (major) << 32) | ((u64) (minor) << 16) | (u64) (subminor)) + +static struct { + u64 max_unsupported_fw; + u64 min_supported_fw; + int is_memfree; + int is_pcie; +} mthca_hca_table[] = { + { MTHCA_FW_VER(3, 3, 2), MTHCA_FW_VER(3, 4, 0), 0, 0 }, /* TAVOR */ + { MTHCA_FW_VER(4, 7, 0), MTHCA_FW_VER(4, 7, 400), 0, 1 }, /* ARBEL_COMPAT */ + { MTHCA_FW_VER(5, 1, 0), MTHCA_FW_VER(5, 1, 400), 1, 1 }, /* ARBEL_NATIVE */ + { MTHCA_FW_VER(1, 0, 800), MTHCA_FW_VER(1, 1, 0), 1, 1 }, /* SINAI */ + { MTHCA_FW_VER(0, 0, 0), MTHCA_FW_VER(0, 0, 0), 0, 0 } /* LIVEFISH */ +}; + + +#define HCA(v, d, t) \ + { PCI_VENDOR_ID_##v, PCI_DEVICE_ID_MELLANOX_##d, t } + +static struct pci_device_id { + unsigned vendor; + unsigned device; + enum __hca_type driver_data; +} mthca_pci_table[] = { + HCA(MELLANOX, TAVOR, TAVOR), + HCA(MELLANOX, ARBEL_COMPAT, ARBEL_COMPAT), + HCA(MELLANOX, ARBEL, ARBEL_NATIVE), + HCA(MELLANOX, SINAI_OLD, SINAI), + HCA(MELLANOX, SINAI, SINAI), + HCA(TOPSPIN, TAVOR, TAVOR), + HCA(TOPSPIN, ARBEL_COMPAT, TAVOR), + HCA(TOPSPIN, ARBEL, ARBEL_NATIVE), + HCA(TOPSPIN, SINAI_OLD, SINAI), + HCA(TOPSPIN, SINAI, SINAI), + // live fishes + HCA(MELLANOX, TAVOR_BD, LIVEFISH), + HCA(MELLANOX, ARBEL_BD, LIVEFISH), + HCA(MELLANOX, SINAI_OLD_BD, LIVEFISH), + HCA(MELLANOX, SINAI_BD, LIVEFISH), + HCA(TOPSPIN, TAVOR_BD, LIVEFISH), + HCA(TOPSPIN, ARBEL_BD, LIVEFISH), + HCA(TOPSPIN, SINAI_OLD_BD, LIVEFISH), + HCA(TOPSPIN, SINAI_BD, LIVEFISH), +}; +#define MTHCA_PCI_TABLE_SIZE (sizeof(mthca_pci_table)/sizeof(struct pci_device_id)) + +// wrapper to driver's hca_tune_pci +static NTSTATUS mthca_tune_pci(struct mthca_dev *mdev) +{ + PDEVICE_OBJECT pdo = mdev->ext->cl_ext.p_self_do; + return hca_tune_pci(pdo, &mdev->uplink_info); +} + +int mthca_get_dev_info(struct mthca_dev *mdev, __be64 *node_guid, u32 *hw_id) +{ + struct ib_device_attr props; + struct ib_device *ib_dev = &mdev->ib_dev; + int err = (ib_dev->query_device )(ib_dev, &props ); + + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("can't get guid - mthca_query_port() failed (%08X)\n", err )); + return err; + } + + //TODO: do we need to convert GUID to LE by cl_ntoh64(x) ? + *node_guid = ib_dev->node_guid; + *hw_id = props.hw_ver; + return 0; +} + +static struct pci_device_id * mthca_find_pci_dev(unsigned ven_id, unsigned dev_id) +{ + struct pci_device_id *p_id = mthca_pci_table; + int i; + + // find p_id (appropriate line in mthca_pci_table) + for (i = 0; i < MTHCA_PCI_TABLE_SIZE; ++i, ++p_id) { + if (p_id->device == dev_id && p_id->vendor == ven_id) + return p_id; + } + return NULL; +} + + +static int mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim *dev_lim) +{ + int err; + u8 status; + + err = mthca_QUERY_DEV_LIM(mdev, dev_lim, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_DEV_LIM command failed, aborting.\n")); + return err; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_DEV_LIM returned status 0x%02x, " + "aborting.\n", status)); + return -EINVAL; + } + if (dev_lim->min_page_sz > PAGE_SIZE) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("HCA minimum page size of %d bigger than " + "kernel PAGE_SIZE of %ld, aborting.\n", + dev_lim->min_page_sz, PAGE_SIZE)); + return -ENODEV; + } + if (dev_lim->num_ports > MTHCA_MAX_PORTS) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("HCA has %d ports, but we only support %d, " + "aborting.\n", + dev_lim->num_ports, MTHCA_MAX_PORTS)); + return -ENODEV; + } + + if (dev_lim->uar_size > (int)pci_resource_len(mdev, HCA_BAR_TYPE_UAR)) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW , ("HCA reported UAR size of 0x%x bigger than " + "Bar%d size of 0x%lx, aborting.\n", + dev_lim->uar_size, HCA_BAR_TYPE_UAR, + (unsigned long)pci_resource_len(mdev, HCA_BAR_TYPE_UAR))); + return -ENODEV; + } + + + mdev->limits.num_ports = dev_lim->num_ports; + mdev->limits.vl_cap = dev_lim->max_vl; + mdev->limits.mtu_cap = dev_lim->max_mtu; + mdev->limits.gid_table_len = dev_lim->max_gids; + mdev->limits.pkey_table_len = dev_lim->max_pkeys; + mdev->limits.local_ca_ack_delay = dev_lim->local_ca_ack_delay; + mdev->limits.max_sg = dev_lim->max_sg; + mdev->limits.max_wqes = dev_lim->max_qp_sz; + mdev->limits.max_qp_init_rdma = dev_lim->max_requester_per_qp; + mdev->limits.reserved_qps = dev_lim->reserved_qps; + mdev->limits.max_srq_wqes = dev_lim->max_srq_sz; + mdev->limits.reserved_srqs = dev_lim->reserved_srqs; + mdev->limits.reserved_eecs = dev_lim->reserved_eecs; + mdev->limits.max_desc_sz = dev_lim->max_desc_sz; + mdev->limits.max_srq_sge = mthca_max_srq_sge(mdev); + /* + * Subtract 1 from the limit because we need to allocate a + * spare CQE so the HCA HW can tell the difference between an + * empty CQ and a full CQ. + */ + mdev->limits.max_cqes = dev_lim->max_cq_sz - 1; + mdev->limits.reserved_cqs = dev_lim->reserved_cqs; + mdev->limits.reserved_eqs = dev_lim->reserved_eqs; + mdev->limits.reserved_mtts = dev_lim->reserved_mtts; + mdev->limits.reserved_mrws = dev_lim->reserved_mrws; + mdev->limits.reserved_uars = dev_lim->reserved_uars; + mdev->limits.reserved_pds = dev_lim->reserved_pds; + mdev->limits.port_width_cap = (u8)dev_lim->max_port_width; + mdev->limits.page_size_cap = !(u32)(dev_lim->min_page_sz - 1); + mdev->limits.flags = dev_lim->flags; + + /* IB_DEVICE_RESIZE_MAX_WR not supported by driver. + May be doable since hardware supports it for SRQ. + + IB_DEVICE_N_NOTIFY_CQ is supported by hardware but not by driver. + + IB_DEVICE_SRQ_RESIZE is supported by hardware but SRQ is not + supported by driver. */ + mdev->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT | + IB_DEVICE_PORT_ACTIVE_EVENT | + IB_DEVICE_SYS_IMAGE_GUID | + IB_DEVICE_RC_RNR_NAK_GEN; + + if (dev_lim->flags & DEV_LIM_FLAG_BAD_PKEY_CNTR) + mdev->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR; + + if (dev_lim->flags & DEV_LIM_FLAG_BAD_QKEY_CNTR) + mdev->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR; + + if (dev_lim->flags & DEV_LIM_FLAG_RAW_MULTI) + mdev->device_cap_flags |= IB_DEVICE_RAW_MULTI; + + if (dev_lim->flags & DEV_LIM_FLAG_AUTO_PATH_MIG) + mdev->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG; + + if (dev_lim->flags & DEV_LIM_FLAG_UD_AV_PORT_ENFORCE) + mdev->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE; + + if (dev_lim->flags & DEV_LIM_FLAG_SRQ) + mdev->mthca_flags |= MTHCA_FLAG_SRQ; + + return 0; +} + +static int mthca_init_tavor(struct mthca_dev *mdev) +{ + u8 status; + int err; + struct mthca_dev_lim dev_lim; + struct mthca_profile profile; + struct mthca_init_hca_param init_hca; + + err = mthca_SYS_EN(mdev, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("SYS_EN command failed, aborting.\n")); + return err; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("SYS_EN returned status 0x%02x, " + "aborting.\n", status)); + return -EINVAL; + } + + err = mthca_QUERY_FW(mdev, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_FW command failed, aborting.\n")); + goto err_disable; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_FW returned status 0x%02x, " + "aborting.\n", status)); + err = -EINVAL; + goto err_disable; + } + err = mthca_QUERY_DDR(mdev, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_DDR command failed, aborting.\n")); + goto err_disable; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,( "QUERY_DDR returned status 0x%02x, " + "aborting.\n", status)); + err = -EINVAL; + goto err_disable; + } + + err = mthca_dev_lim(mdev, &dev_lim); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,( "QUERY_DEV_LIM command failed, aborting.\n")); + goto err_disable; + } + + profile = default_profile; + profile.num_uar = dev_lim.uar_size / PAGE_SIZE; + profile.uarc_size = 0; + + /* correct default profile */ + if ( g_profile_qp_num != 0 ) + profile.num_qp = g_profile_qp_num; + + if ( g_profile_rd_out != 0xffffffff ) + profile.rdb_per_qp = g_profile_rd_out; + + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + profile.num_srq = dev_lim.max_srqs; + + err = (int)mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); + if (err < 0) + goto err_disable; + + err = (int)mthca_INIT_HCA(mdev, &init_hca, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("INIT_HCA command failed, aborting.\n")); + goto err_disable; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("INIT_HCA returned status 0x%02x, " + "aborting.\n", status)); + err = -EINVAL; + goto err_disable; + } + + return 0; + +err_disable: + mthca_SYS_DIS(mdev, &status); + + return err; +} + +static int mthca_load_fw(struct mthca_dev *mdev) +{ + u8 status; + int err; + + /* FIXME: use HCA-attached memory for FW if present */ + + mdev->fw.arbel.fw_icm = + mthca_alloc_icm(mdev, mdev->fw.arbel.fw_pages, + GFP_HIGHUSER | __GFP_NOWARN); + if (!mdev->fw.arbel.fw_icm) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Couldn't allocate FW area, aborting.\n")); + return -ENOMEM; + } + + err = mthca_MAP_FA(mdev, mdev->fw.arbel.fw_icm, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("MAP_FA command failed, aborting.\n")); + goto err_free; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("MAP_FA returned status 0x%02x, aborting.\n", status)); + err = -EINVAL; + goto err_free; + } + err = mthca_RUN_FW(mdev, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("RUN_FW command failed, aborting.\n")); + goto err_unmap_fa; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("RUN_FW returned status 0x%02x, aborting.\n", status)); + err = -EINVAL; + goto err_unmap_fa; + } + + return 0; + +err_unmap_fa: + mthca_UNMAP_FA(mdev, &status); + +err_free: + mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); + return err; +} + +static int mthca_init_icm(struct mthca_dev *mdev, + struct mthca_dev_lim *dev_lim, + struct mthca_init_hca_param *init_hca, + u64 icm_size) +{ + u64 aux_pages; + u8 status; + int err; + + err = mthca_SET_ICM_SIZE(mdev, icm_size, &aux_pages, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("SET_ICM_SIZE command failed, aborting.\n")); + return err; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("SET_ICM_SIZE returned status 0x%02x, " + "aborting.\n", status)); + return -EINVAL; + } + + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW , ("%I64d KB of HCA context requires %I64d KB aux memory.\n", + (u64) icm_size >> 10, + (u64) aux_pages << 2)); + + mdev->fw.arbel.aux_icm = mthca_alloc_icm(mdev, (int)aux_pages, + GFP_HIGHUSER | __GFP_NOWARN); + if (!mdev->fw.arbel.aux_icm) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Couldn't allocate aux memory, aborting.\n")); + return -ENOMEM; + } + + err = mthca_MAP_ICM_AUX(mdev, mdev->fw.arbel.aux_icm, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("MAP_ICM_AUX command failed, aborting.\n")); + goto err_free_aux; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("MAP_ICM_AUX returned status 0x%02x, aborting.\n", status)); + err = -EINVAL; + goto err_free_aux; + } + + err = mthca_map_eq_icm(mdev, init_hca->eqc_base); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map EQ context memory, aborting.\n")); + goto err_unmap_aux; + } + + mdev->mr_table.mtt_table = mthca_alloc_icm_table(mdev, init_hca->mtt_base, + MTHCA_MTT_SEG_SIZE, + mdev->limits.num_mtt_segs, + mdev->limits.reserved_mtts, 1); + if (!mdev->mr_table.mtt_table) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map MTT context memory, aborting.\n")); + err = -ENOMEM; + goto err_unmap_eq; + } + + mdev->mr_table.mpt_table = mthca_alloc_icm_table(mdev, init_hca->mpt_base, + dev_lim->mpt_entry_sz, + mdev->limits.num_mpts, + mdev->limits.reserved_mrws, 1); + if (!mdev->mr_table.mpt_table) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map MPT context memory, aborting.\n")); + err = -ENOMEM; + goto err_unmap_mtt; + } + + mdev->qp_table.qp_table = mthca_alloc_icm_table(mdev, init_hca->qpc_base, + dev_lim->qpc_entry_sz, + mdev->limits.num_qps, + mdev->limits.reserved_qps, 0); + if (!mdev->qp_table.qp_table) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map QP context memory, aborting.\n")); + err = -ENOMEM; + goto err_unmap_mpt; + } + + mdev->qp_table.eqp_table = mthca_alloc_icm_table(mdev, init_hca->eqpc_base, + dev_lim->eqpc_entry_sz, + mdev->limits.num_qps, + mdev->limits.reserved_qps, 0); + if (!mdev->qp_table.eqp_table) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map EQP context memory, aborting.\n")); + err = -ENOMEM; + goto err_unmap_qp; + } + + mdev->qp_table.rdb_table = mthca_alloc_icm_table(mdev, init_hca->rdb_base, + MTHCA_RDB_ENTRY_SIZE, + mdev->limits.num_qps << + mdev->qp_table.rdb_shift, + 0, 0); + if (!mdev->qp_table.rdb_table) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map RDB context memory, aborting\n")); + err = -ENOMEM; + goto err_unmap_eqp; + } + + mdev->cq_table.table = mthca_alloc_icm_table(mdev, init_hca->cqc_base, + dev_lim->cqc_entry_sz, + mdev->limits.num_cqs, + mdev->limits.reserved_cqs, 0); + if (!mdev->cq_table.table) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map CQ context memory, aborting.\n")); + err = -ENOMEM; + goto err_unmap_rdb; + } + + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) { + mdev->srq_table.table = + mthca_alloc_icm_table(mdev, init_hca->srqc_base, + dev_lim->srq_entry_sz, + mdev->limits.num_srqs, + mdev->limits.reserved_srqs, 0); + if (!mdev->srq_table.table) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map SRQ context memory, " + "aborting.\n")); + err = -ENOMEM; + goto err_unmap_cq; + } + } + + /* + * It's not strictly required, but for simplicity just map the + * whole multicast group table now. The table isn't very big + * and it's a lot easier than trying to track ref counts. + */ + mdev->mcg_table.table = mthca_alloc_icm_table(mdev, init_hca->mc_base, + MTHCA_MGM_ENTRY_SIZE, + mdev->limits.num_mgms + + mdev->limits.num_amgms, + mdev->limits.num_mgms + + mdev->limits.num_amgms, + 0); + if (!mdev->mcg_table.table) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to map MCG context memory, aborting.\n")); + err = -ENOMEM; + goto err_unmap_srq; + } + + return 0; + +err_unmap_srq: + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + +err_unmap_cq: + mthca_free_icm_table(mdev, mdev->cq_table.table); + +err_unmap_rdb: + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); + +err_unmap_eqp: + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); + +err_unmap_qp: + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); + +err_unmap_mpt: + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); + +err_unmap_mtt: + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); + +err_unmap_eq: + mthca_unmap_eq_icm(mdev); + +err_unmap_aux: + mthca_UNMAP_ICM_AUX(mdev, &status); + +err_free_aux: + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + + return err; +} + +static int mthca_init_arbel(struct mthca_dev *mdev) +{ + struct mthca_dev_lim dev_lim; + struct mthca_profile profile; + struct mthca_init_hca_param init_hca; + u64 icm_size; + u8 status; + int err; + + err = mthca_QUERY_FW(mdev, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_FW command failed, aborting.\n")); + return err; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_FW returned status 0x%02x, " + "aborting.\n", status)); + return -EINVAL; + } + + err = mthca_ENABLE_LAM(mdev, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("ENABLE_LAM command failed, aborting.\n")); + return err; + } + if (status == MTHCA_CMD_STAT_LAM_NOT_PRE) { + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("No HCA-attached memory (running in MemFree mode)\n")); + mdev->mthca_flags |= MTHCA_FLAG_NO_LAM; + } else if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("ENABLE_LAM returned status 0x%02x, " + "aborting.\n", status)); + return -EINVAL; + } + + err = mthca_load_fw(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to start FW, aborting.\n")); + goto err_disable; + } + + err = mthca_dev_lim(mdev, &dev_lim); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_DEV_LIM command failed, aborting.\n")); + goto err_stop_fw; + } + + profile = default_profile; + profile.num_uar = dev_lim.uar_size / PAGE_SIZE; + profile.num_udav = 0; + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + profile.num_srq = dev_lim.max_srqs; + + /* correct default profile */ + if ( g_profile_qp_num != 0 ) + profile.num_qp = g_profile_qp_num; + + if ( g_profile_rd_out != 0xffffffff ) + profile.rdb_per_qp = g_profile_rd_out; + + RtlZeroMemory( &init_hca, sizeof(init_hca)); + icm_size = mthca_make_profile(mdev, &profile, &dev_lim, &init_hca); + if ((int) icm_size < 0) { + err = (int)icm_size; + goto err_stop_fw; + } + + err = mthca_init_icm(mdev, &dev_lim, &init_hca, icm_size); + if (err) + goto err_stop_fw; + + err = mthca_INIT_HCA(mdev, &init_hca, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("INIT_HCA command failed, aborting.\n")); + goto err_free_icm; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("INIT_HCA returned status 0x%02x, " + "aborting.\n", status)); + err = -EINVAL; + goto err_free_icm; + } + + return 0; + +err_free_icm: + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + mthca_free_icm_table(mdev, mdev->cq_table.table); + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); + mthca_unmap_eq_icm(mdev); + + mthca_UNMAP_ICM_AUX(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + +err_stop_fw: + mthca_UNMAP_FA(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); + +err_disable: + if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) + mthca_DISABLE_LAM(mdev, &status); + + return err; +} + +static void mthca_close_hca(struct mthca_dev *mdev) +{ + u8 status; + + mthca_CLOSE_HCA(mdev, 0, &status); + + if (mthca_is_memfree(mdev)) { + if (mdev->mthca_flags & MTHCA_FLAG_SRQ) + mthca_free_icm_table(mdev, mdev->srq_table.table); + mthca_free_icm_table(mdev, mdev->cq_table.table); + mthca_free_icm_table(mdev, mdev->qp_table.rdb_table); + mthca_free_icm_table(mdev, mdev->qp_table.eqp_table); + mthca_free_icm_table(mdev, mdev->qp_table.qp_table); + mthca_free_icm_table(mdev, mdev->mr_table.mpt_table); + mthca_free_icm_table(mdev, mdev->mr_table.mtt_table); + mthca_free_icm_table(mdev, mdev->mcg_table.table); + mthca_unmap_eq_icm(mdev); + + mthca_UNMAP_ICM_AUX(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.aux_icm); + + mthca_UNMAP_FA(mdev, &status); + mthca_free_icm(mdev, mdev->fw.arbel.fw_icm); + + if (!(mdev->mthca_flags & MTHCA_FLAG_NO_LAM)) + mthca_DISABLE_LAM(mdev, &status); + } else + mthca_SYS_DIS(mdev, &status); +} + +static int mthca_init_hca(struct mthca_dev *mdev) +{ + u8 status; + int err; + struct mthca_adapter adapter; + + if (mthca_is_memfree(mdev)) + err = mthca_init_arbel(mdev); + else + err = mthca_init_tavor(mdev); + + if (err) + return err; + + err = mthca_QUERY_ADAPTER(mdev, &adapter, &status); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_ADAPTER command failed, aborting.\n")); + goto err_close; + } + if (status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QUERY_ADAPTER returned status 0x%02x, " + "aborting.\n", status)); + err = -EINVAL; + goto err_close; + } + + mdev->eq_table.inta_pin = adapter.inta_pin; + mdev->rev_id = adapter.revision_id; + memcpy(mdev->board_id, adapter.board_id, sizeof mdev->board_id); + + return 0; + +err_close: + mthca_close_hca(mdev); + return err; +} + +static int mthca_setup_hca(struct mthca_dev *mdev) +{ + int err; + u8 status; + + MTHCA_INIT_DOORBELL_LOCK(&mdev->doorbell_lock); + + err = mthca_init_uar_table(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize " + "user access region table, aborting.\n")); + return err; + } + + err = mthca_uar_alloc(mdev, &mdev->driver_uar); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to allocate driver access region, " + "aborting.\n")); + goto err_uar_table_free; + } + + mdev->kar = ioremap((io_addr_t)mdev->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE,&mdev->kar_size); + if (!mdev->kar) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Couldn't map kernel access region, " + "aborting.\n")); + err = -ENOMEM; + goto err_uar_free; + } + + err = mthca_init_pd_table(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize " + "protection domain table, aborting.\n")); + goto err_kar_unmap; + } + + err = mthca_init_mr_table(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize " + "memory region table, aborting.\n")); + goto err_pd_table_free; + } + + err = mthca_pd_alloc(mdev, 1, &mdev->driver_pd); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to create driver PD, " + "aborting.\n")); + goto err_mr_table_free; + } + + err = mthca_init_eq_table(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW, ("Failed to initialize " + "event queue table, aborting.\n")); + goto err_pd_free; + } + + err = mthca_cmd_use_events(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to switch to event-driven " + "firmware commands, aborting.\n")); + goto err_eq_table_free; + } + + err = mthca_NOP(mdev, &status); + if (err || status) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("NOP command failed to generate interrupt, aborting.\n")); + if (mdev->mthca_flags & (MTHCA_FLAG_MSI | MTHCA_FLAG_MSI_X)){ + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Try again with MSI/MSI-X disabled.\n")); + }else{ + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("BIOS or ACPI interrupt routing problem?\n")); + } + + goto err_cmd_poll; + } + + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("NOP command IRQ test passed\n")); + + err = mthca_init_cq_table(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize " + "completion queue table, aborting.\n")); + goto err_cmd_poll; + } + + err = mthca_init_srq_table(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize " + "shared receive queue table, aborting.\n")); + goto err_cq_table_free; + } + + err = mthca_init_qp_table(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW, ("Failed to initialize " + "queue pair table, aborting.\n")); + goto err_srq_table_free; + } + + err = mthca_init_av_table(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize " + "address vector table, aborting.\n")); + goto err_qp_table_free; + } + + err = mthca_init_mcg_table(mdev); + if (err) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Failed to initialize " + "multicast group table, aborting.\n")); + goto err_av_table_free; + } + + return 0; + +err_av_table_free: + mthca_cleanup_av_table(mdev); + +err_qp_table_free: + mthca_cleanup_qp_table(mdev); + +err_srq_table_free: + mthca_cleanup_srq_table(mdev); + +err_cq_table_free: + mthca_cleanup_cq_table(mdev); + +err_cmd_poll: + mthca_cmd_use_polling(mdev); + +err_eq_table_free: + mthca_cleanup_eq_table(mdev); + +err_pd_free: + mthca_pd_free(mdev, &mdev->driver_pd); + +err_mr_table_free: + mthca_cleanup_mr_table(mdev); + +err_pd_table_free: + mthca_cleanup_pd_table(mdev); + +err_kar_unmap: + iounmap(mdev->kar, mdev->kar_size); + +err_uar_free: + mthca_uar_free(mdev, &mdev->driver_uar); + +err_uar_table_free: + mthca_cleanup_uar_table(mdev); + return err; +} + + +static int mthca_check_fw(struct mthca_dev *mdev, struct pci_device_id *p_id) +{ + int err = 0; + + if (mdev->fw_ver < mthca_hca_table[p_id->driver_data].max_unsupported_fw) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("HCA FW version %d.%d.%d is not supported. Use %d.%d.%d or higher.\n", + (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, + (int) (mdev->fw_ver & 0xffff), + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 32), + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 16) & 0xffff, + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw & 0xffff))); + err = -EINVAL; + } + else + if (mdev->fw_ver < mthca_hca_table[p_id->driver_data].min_supported_fw) { + HCA_PRINT_EV(TRACE_LEVEL_WARNING ,HCA_DBG_LOW , + ("The HCA FW version is %d.%d.%d, which is not the latest one. \n" + "If you meet any issues with the HCA please first try to upgrade the FW to version %d.%d.%d or higher.\n", + (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, + (int) (mdev->fw_ver & 0xffff), + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 32), + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw >> 16) & 0xffff, + (int) (mthca_hca_table[p_id->driver_data].min_supported_fw & 0xffff))); + } + else { + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("Current HCA FW version is %d.%d.%d. \n", + (int) (mdev->fw_ver >> 32), (int) (mdev->fw_ver >> 16) & 0xffff, + (int) (mdev->fw_ver & 0xffff))); + } + + return err; +} + +NTSTATUS mthca_init_one(hca_dev_ext_t *ext) +{ + static int mthca_version_printed = 0; + int err; + NTSTATUS status; + struct mthca_dev *mdev; + struct pci_device_id *p_id; + + /* print version */ + if (!mthca_version_printed) { + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_LOW ,("%s\n", mthca_version)); + ++mthca_version_printed; + } + + /* find the type of device */ +find_pci_dev: + p_id = mthca_find_pci_dev( + (unsigned)ext->hcaConfig.VendorID, + (unsigned)ext->hcaConfig.DeviceID); + if (p_id == NULL) { + status = STATUS_NO_SUCH_DEVICE; + goto end; + } + + /* allocate mdev structure */ + mdev = kzalloc(sizeof *mdev, GFP_KERNEL); + if (!mdev) { + // can't use HCA_PRINT_EV here ! + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Device struct alloc failed, " + "aborting.\n")); + status = STATUS_INSUFFICIENT_RESOURCES; + goto end; + } + + /* set some fields */ + mdev->ext = ext; /* pointer to DEVICE OBJECT extension */ + mdev->hca_type = p_id->driver_data; + mdev->ib_dev.mdev = mdev; + if (p_id->driver_data == LIVEFISH) + mdev->mthca_flags |= MTHCA_FLAG_LIVEFISH; + if (mthca_is_livefish(mdev)) + goto done; + if (ext->hca_hidden) + mdev->mthca_flags |= MTHCA_FLAG_DDR_HIDDEN; + if (mthca_hca_table[p_id->driver_data].is_memfree) + mdev->mthca_flags |= MTHCA_FLAG_MEMFREE; + if (mthca_hca_table[p_id->driver_data].is_pcie) + mdev->mthca_flags |= MTHCA_FLAG_PCIE; + +//TODO: after we have a FW, capable of reset, +// write a routine, that only presses the button + + /* + * Now reset the HCA before we touch the PCI capabilities or + * attempt a firmware command, since a boot ROM may have left + * the HCA in an undefined state. + */ + status = hca_reset( mdev->ext->cl_ext.p_self_do, p_id->driver_data == TAVOR ); + if ( !NT_SUCCESS( status ) ) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to reset HCA, aborting.\n")); + goto err_free_dev; + } + + if (mthca_cmd_init(mdev)) { + HCA_PRINT_EV(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Failed to init command interface, aborting.\n")); + status = STATUS_DEVICE_DATA_ERROR; + goto err_free_dev; + } + + status = mthca_tune_pci(mdev); + if ( !NT_SUCCESS( status ) ) { + goto err_cmd; + } + + err = mthca_init_hca(mdev); + if (err) { + status = STATUS_UNSUCCESSFUL; + goto err_cmd; + } + + err = mthca_check_fw(mdev, p_id); + if (err) { + status = STATUS_UNSUCCESSFUL; + goto err_close; + } + + err = mthca_setup_hca(mdev); + if (err) { + status = STATUS_UNSUCCESSFUL; + goto err_close; + } + + err = mthca_register_device(mdev); + if (err) { + status = STATUS_UNSUCCESSFUL; + goto err_cleanup; + } + + done: + ext->hca.mdev = mdev; + mdev->state = MTHCA_DEV_INITIALIZED; + return 0; + +err_cleanup: + mthca_cleanup_mcg_table(mdev); + mthca_cleanup_av_table(mdev); + mthca_cleanup_qp_table(mdev); + mthca_cleanup_srq_table(mdev); + mthca_cleanup_cq_table(mdev); + mthca_cmd_use_polling(mdev); + mthca_cleanup_eq_table(mdev); + + mthca_pd_free(mdev, &mdev->driver_pd); + + mthca_cleanup_mr_table(mdev); + mthca_cleanup_pd_table(mdev); + mthca_cleanup_uar_table(mdev); + +err_close: + mthca_close_hca(mdev); + +err_cmd: + mthca_cmd_cleanup(mdev); + +err_free_dev: + kfree(mdev); + + /* we failed device initialization - try to simulate "livefish" device to facilitate using FW burning tools */ + if (ext->hcaConfig.DeviceID == PCI_DEVICE_ID_MELLANOX_ARBEL) + ext->hcaConfig.DeviceID = PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT; + ext->hcaConfig.DeviceID += 1; /* generate appropriate "livefish" DevId */ + goto find_pci_dev; + +end: + return status; +} + +void mthca_remove_one(hca_dev_ext_t *ext) +{ + struct mthca_dev *mdev = ext->hca.mdev; + u8 status; + int p; + + ext->hca.mdev = NULL; + if (mdev) { + mdev->state = MTHCA_DEV_UNINITIALIZED; + if (mthca_is_livefish(mdev)) + goto done; + mthca_unregister_device(mdev); + + for (p = 1; p <= mdev->limits.num_ports; ++p) + mthca_CLOSE_IB(mdev, p, &status); + + mthca_cleanup_mcg_table(mdev); + mthca_cleanup_av_table(mdev); + mthca_cleanup_qp_table(mdev); + mthca_cleanup_srq_table(mdev); + mthca_cleanup_cq_table(mdev); + mthca_cmd_use_polling(mdev); + mthca_cleanup_eq_table(mdev); + mthca_pd_free(mdev, &mdev->driver_pd); + mthca_cleanup_mr_table(mdev); + mthca_cleanup_pd_table(mdev); + iounmap(mdev->kar, mdev->kar_size); + mthca_uar_free(mdev, &mdev->driver_uar); + mthca_cleanup_uar_table(mdev); + mthca_close_hca(mdev); + mthca_cmd_cleanup(mdev); +done: + kfree(mdev); + } +} + + + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_mcg.c b/branches/Ndi/hw/mthca/kernel/mthca_mcg.c new file mode 100644 index 00000000..ec477a9e --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_mcg.c @@ -0,0 +1,408 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_mcg.tmh" +#endif +#include "mthca_cmd.h" + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, mthca_init_mcg_table) +#pragma alloc_text (PAGE, mthca_cleanup_mcg_table) +#endif + +struct mthca_mgm { + __be32 next_gid_index; + u32 reserved[3]; + u8 gid[16]; + __be32 qp[MTHCA_QP_PER_MGM]; +}; + +static const u8 zero_gid[16] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + +/* + * Caller must hold MCG table semaphore. gid and mgm parameters must + * be properly aligned for command interface. + * + * Returns 0 unless a firmware command error occurs. + * + * If GID is found in MGM or MGM is empty, *index = *hash, *prev = -1 + * and *mgm holds MGM entry. + * + * if GID is found in AMGM, *index = index in AMGM, *prev = index of + * previous entry in hash chain and *mgm holds AMGM entry. + * + * If no AMGM exists for given gid, *index = -1, *prev = index of last + * entry in hash chain and *mgm holds end of hash chain. + */ +static int find_mgm(struct mthca_dev *dev, + u8 *gid, struct mthca_mailbox *mgm_mailbox, + u16 *hash, int *prev, int *index) +{ + struct mthca_mailbox *mailbox; + struct mthca_mgm *mgm = mgm_mailbox->buf; + u8 *mgid; + int err; + u8 status; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return -ENOMEM; + mgid = mailbox->buf; + + memcpy(mgid, gid, 16); + + err = mthca_MGID_HASH(dev, mailbox, hash, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("MGID_HASH returned status %02x\n", status)); + err = -EINVAL; + goto out; + } + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("Hash for %04x:%04x:%04x:%04x:" + "%04x:%04x:%04x:%04x is %04x\n", + cl_ntoh16(((__be16 *) gid)[0]), + cl_ntoh16(((__be16 *) gid)[1]), + cl_ntoh16(((__be16 *) gid)[2]), + cl_ntoh16(((__be16 *) gid)[3]), + cl_ntoh16(((__be16 *) gid)[4]), + cl_ntoh16(((__be16 *) gid)[5]), + cl_ntoh16(((__be16 *) gid)[6]), + cl_ntoh16(((__be16 *) gid)[7]), + *hash)); + + *index = *hash; + *prev = -1; + + do { + err = mthca_READ_MGM(dev, *index, mgm_mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("READ_MGM returned status %02x\n", status)); + err = -EINVAL; + goto out; + } + + if (!memcmp(mgm->gid, zero_gid, 16)) { + if (*index != *hash) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("Found zero MGID in AMGM.\n")); + err = -EINVAL; + } + goto out; + } + + if (!memcmp(mgm->gid, gid, 16)) + goto out; + + *prev = *index; + *index = cl_ntoh32(mgm->next_gid_index) >> 6; + } while (*index); + + *index = -1; + + out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_multicast_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_mailbox *mailbox; + struct mthca_mgm *mgm; + u16 hash; + int index, prev; + int link = 0; + int i; + int err; + u8 status; + + UNREFERENCED_PARAMETER(lid); + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + down(&dev->mcg_table.mutex); + + err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); + if (err) + goto out; + + if (index != -1) { + if (!memcmp(mgm->gid, zero_gid, 16)) + memcpy(mgm->gid, gid->raw, 16); + } else { + link = 1; + + index = mthca_alloc(&dev->mcg_table.alloc); + if (index == -1) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("No AMGM entries left\n")); + err = -ENOMEM; + goto out; + } + + err = mthca_READ_MGM(dev, index, mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("READ_MGM returned status %02x\n", status)); + err = -EINVAL; + goto out; + } + + memset(mgm, 0, sizeof *mgm); + memcpy(mgm->gid, gid->raw, 16); + mgm->next_gid_index = 0; + } + + for (i = 0; i < MTHCA_QP_PER_MGM; ++i) + if (mgm->qp[i] == cl_hton32(ibqp->qp_num | (1 << 31))) { + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("QP %06x already a member of MGM\n", + ibqp->qp_num)); + err = 0; + goto out; + } else if (!(mgm->qp[i] & cl_hton32(1UL << 31))) { + mgm->qp[i] = cl_hton32(ibqp->qp_num | (1 << 31)); + break; + } + + if (i == MTHCA_QP_PER_MGM) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("MGM at index %x is full.\n", index)); + err = -ENOMEM; + goto out; + } + + err = mthca_WRITE_MGM(dev, index, mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("WRITE_MGM returned status %02x\n", status)); + err = -EINVAL; + goto out; + } + + if (!link) + goto out; + + err = mthca_READ_MGM(dev, prev, mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("READ_MGM returned status %02x\n", status)); + err = -EINVAL; + goto out; + } + + mgm->next_gid_index = cl_hton32(index << 6); + + err = mthca_WRITE_MGM(dev, prev, mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("WRITE_MGM returned status %02x\n", status)); + err = -EINVAL; + } + +out: + if (err && link && index != -1) { + BUG_ON(index < dev->limits.num_mgms); + mthca_free(&dev->mcg_table.alloc, index); + } + KeReleaseMutex(&dev->mcg_table.mutex,FALSE); + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_multicast_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_mailbox *mailbox; + struct mthca_mgm *mgm; + u16 hash; + int prev, index; + int i, loc; + int err; + u8 status; + + UNREFERENCED_PARAMETER(lid); + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mgm = mailbox->buf; + + down(&dev->mcg_table.mutex); + + err = find_mgm(dev, gid->raw, mailbox, &hash, &prev, &index); + if (err) + goto out; + + if (index == -1) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW, ("MGID %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x " + "not found\n", + cl_ntoh16(((__be16 *) gid->raw)[0]), + cl_ntoh16(((__be16 *) gid->raw)[1]), + cl_ntoh16(((__be16 *) gid->raw)[2]), + cl_ntoh16(((__be16 *) gid->raw)[3]), + cl_ntoh16(((__be16 *) gid->raw)[4]), + cl_ntoh16(((__be16 *) gid->raw)[5]), + cl_ntoh16(((__be16 *) gid->raw)[6]), + cl_ntoh16(((__be16 *) gid->raw)[7]))); + err = -EINVAL; + goto out; + } + + for (loc = -1, i = 0; i < MTHCA_QP_PER_MGM; ++i) { + if (mgm->qp[i] == cl_hton32(ibqp->qp_num | (1 << 31))) + loc = i; + if (!(mgm->qp[i] & cl_hton32(1UL << 31))) + break; + } + + if (loc == -1) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("QP %06x not found in MGM\n", ibqp->qp_num)); + err = -EINVAL; + goto out; + } + + mgm->qp[loc] = mgm->qp[i - 1]; + mgm->qp[i - 1] = 0; + + err = mthca_WRITE_MGM(dev, index, mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("WRITE_MGM returned status %02x\n", status)); + err = -EINVAL; + goto out; + } + + if (i != 1) + goto out; + + if (prev == -1) { + /* Remove entry from MGM */ + int amgm_index_to_free = cl_ntoh32(mgm->next_gid_index) >> 6; + if (amgm_index_to_free) { + err = mthca_READ_MGM(dev, amgm_index_to_free, + mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("READ_MGM returned status %02x\n", + status)); + err = -EINVAL; + goto out; + } + } else + RtlZeroMemory(mgm->gid, 16); + + err = mthca_WRITE_MGM(dev, index, mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("WRITE_MGM returned status %02x\n", status)); + err = -EINVAL; + goto out; + } + if (amgm_index_to_free) { + BUG_ON(amgm_index_to_free < dev->limits.num_mgms); + mthca_free(&dev->mcg_table.alloc, amgm_index_to_free); + } + } else { + /* Remove entry from AMGM */ + int curr_next_index = cl_ntoh32(mgm->next_gid_index) >> 6; + err = mthca_READ_MGM(dev, prev, mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("READ_MGM returned status %02x\n", status)); + err = -EINVAL; + goto out; + } + + mgm->next_gid_index = cl_hton32(curr_next_index << 6); + + err = mthca_WRITE_MGM(dev, prev, mailbox, &status); + if (err) + goto out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_LOW ,("WRITE_MGM returned status %02x\n", status)); + err = -EINVAL; + goto out; + } + BUG_ON(index < dev->limits.num_mgms); + mthca_free(&dev->mcg_table.alloc, index); + } + + out: + KeReleaseMutex(&dev->mcg_table.mutex, FALSE); + mthca_free_mailbox(dev, mailbox); + return err; +} + +int mthca_init_mcg_table(struct mthca_dev *dev) +{ + int err; + int table_size = dev->limits.num_mgms + dev->limits.num_amgms; + + err = mthca_alloc_init(&dev->mcg_table.alloc, + table_size, + table_size - 1, + dev->limits.num_mgms); + + if (err) + return err; + + KeInitializeMutex(&dev->mcg_table.mutex,0); + + return 0; +} + +void mthca_cleanup_mcg_table(struct mthca_dev *dev) +{ + mthca_alloc_cleanup(&dev->mcg_table.alloc); +} + + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_memfree.c b/branches/Ndi/hw/mthca/kernel/mthca_memfree.c new file mode 100644 index 00000000..975ce6ab --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_memfree.c @@ -0,0 +1,729 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "hca_driver.h" +#include "mthca_memfree.h" +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_memfree.tmh" +#endif +#include "mthca_cmd.h" + +/* + * We allocate in as big chunks as we can, up to a maximum of 256 KB + * per chunk. + */ +enum { + MTHCA_ICM_ALLOC_SIZE = 1 << 18, + MTHCA_TABLE_CHUNK_SIZE = 1 << 18 +}; + +#pragma warning( disable : 4200) +struct mthca_user_db_table { + KMUTEX mutex; + struct { + u64 uvirt; + struct scatterlist mem; + int refcount; + } page[0]; +}; +#pragma warning( default : 4200) + +void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm) +{ + struct mthca_icm_chunk *chunk, *tmp; + int i; + + if (!icm) + return; + + list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list,struct mthca_icm_chunk,struct mthca_icm_chunk) { + if (chunk->nsg > 0) + pci_unmap_sg(dev, chunk->mem, chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + for (i = 0; i < chunk->npages; ++i) + free_dma_mem_map(dev, &chunk->mem[i], PCI_DMA_BIDIRECTIONAL ); + + kfree(chunk); + } + + kfree(icm); +} + +/* allocate device memory of 'npages' pages as a list of chunks, each containing an array of + continuous buffers. Allocated physical pages, and then they are mapped to bus space !*/ +struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, + unsigned int gfp_mask) +{ + struct mthca_icm *icm; + struct mthca_icm_chunk *chunk = NULL; + int cur_order; + + icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + if (!icm) + return icm; + + icm->refcount = 0; + INIT_LIST_HEAD(&icm->chunk_list); + + cur_order = get_order(MTHCA_ICM_ALLOC_SIZE); + + while (npages > 0) { + /* allocate a new chunk */ + if (!chunk) { + chunk = kmalloc(sizeof *chunk, + gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN)); + if (!chunk) + goto fail; + + RtlZeroMemory( chunk, sizeof *chunk ); + list_add_tail(&chunk->list, &icm->chunk_list); + } + + /* fill chunk with allocated consistent areas of integer pages each */ + while (1 << cur_order > npages) + /* try to take a max (required) number of pages */ + --cur_order; + + /* try to allocate a contiguous PHYSICAL buffer */ + alloc_dma_zmem( dev, PAGE_SIZE << cur_order, + &chunk->mem[chunk->npages] ); + + /* if succeded - proceed handling */ + if (chunk->mem[chunk->npages].page) { + + /* check, whether a chunk is full */ + if (++chunk->npages == MTHCA_ICM_CHUNK_LEN) { + /* it's full --> map physical addresses to bus ones */ + chunk->nsg = pci_map_sg(dev, chunk->mem, + chunk->npages, PCI_DMA_BIDIRECTIONAL ); + + if (chunk->nsg <= 0) + goto fail; + + chunk = NULL; + } + + /* calculate the remaining memory to be allocated */ + npages -= 1 << cur_order; + } + /* failed to allocate - lets decrement buffer size and try once more */ + else { + --cur_order; + if (cur_order < 0) + goto fail; + } + } + + /* last, not full chunk: map physical addresses to bus ones */ + if (chunk) { + chunk->nsg = pci_map_sg(dev, chunk->mem, + chunk->npages, + PCI_DMA_BIDIRECTIONAL); + + if (chunk->nsg <= 0) + goto fail; + } + + return icm; + +fail: + mthca_free_icm(dev, icm); + return NULL; +} + +int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) +{ + int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; + int ret = 0; + u8 status; + + down(&table->mutex); + + if (table->icm[i]) { + ++table->icm[i]->refcount; + goto out; + } + + table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT, + (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) | + __GFP_NOWARN); + if (!table->icm[i]) { + ret = -ENOMEM; + goto out; + } + + if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE, + &status) || status) { + mthca_free_icm(dev, table->icm[i]); + table->icm[i] = NULL; + ret = -ENOMEM; + goto out; + } + + ++table->icm[i]->refcount; + +out: + up(&table->mutex); + return ret; +} + +void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj) +{ + int i; + u8 status; + + if (!mthca_is_memfree(dev)) + return; + + i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE; + + down(&table->mutex); + + if (--table->icm[i]->refcount == 0) { + mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, + MTHCA_TABLE_CHUNK_SIZE >> 12, &status); + mthca_free_icm(dev, table->icm[i]); + table->icm[i] = NULL; + } + + up(&table->mutex); +} + +void *mthca_table_find(struct mthca_icm_table *table, int obj) +{ + int idx, offset, i; + struct mthca_icm_chunk *chunk; + struct mthca_icm *icm; + struct page *page = NULL; + + if (!table->lowmem) + return NULL; + + down(&table->mutex); + + idx = (obj & (table->num_obj - 1)) * table->obj_size; + icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE]; + offset = idx % MTHCA_TABLE_CHUNK_SIZE; + + if (!icm) + goto out; + + list_for_each_entry(chunk, &icm->chunk_list, list,struct mthca_icm_chunk) { + for (i = 0; i < chunk->npages; ++i) { + if ((int)chunk->mem[i].length >= offset) { + page = chunk->mem[i].page; + goto out; + } + offset -= chunk->mem[i].length; + } + } + +out: + up(&table->mutex); + return page ? (char*)page + offset : NULL; +} + +int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, + int start, int end) +{ + int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size; + int i, err; + + for (i = start; i <= end; i += inc) { + err = mthca_table_get(dev, table, i); + if (err) + goto fail; + } + + return 0; + +fail: + while (i > start) { + i -= inc; + mthca_table_put(dev, table, i); + } + + return err; +} + +void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, + int start, int end) +{ + int i; + + if (!mthca_is_memfree(dev)) + return; + + for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size) + mthca_table_put(dev, table, i); +} + +struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, + u64 virt, int obj_size, + int nobj, int reserved, + int use_lowmem) +{ + struct mthca_icm_table *table; + int num_icm; + unsigned chunk_size; + int i; + u8 status; + + num_icm = (obj_size * nobj + MTHCA_TABLE_CHUNK_SIZE -1) / MTHCA_TABLE_CHUNK_SIZE; + + table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL); + if (!table) + return NULL; + + table->virt = virt; + table->num_icm = num_icm; + table->num_obj = nobj; + table->obj_size = obj_size; + table->lowmem = use_lowmem; + KeInitializeMutex( &table->mutex, 0 ); + + for (i = 0; i < num_icm; ++i) + table->icm[i] = NULL; + + for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) { + chunk_size = MTHCA_TABLE_CHUNK_SIZE; + if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size) + chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE; + + table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT, + (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) | + __GFP_NOWARN); + if (!table->icm[i]) + goto err; + if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE, + &status) || status) { + mthca_free_icm(dev, table->icm[i]); + table->icm[i] = NULL; + goto err; + } + + /* + * Add a reference to this ICM chunk so that it never + * gets freed (since it contains reserved firmware objects). + */ + ++table->icm[i]->refcount; + } + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW, + ("Allocated/max chunks %d:%d, reserved/max objects %#x:%#x, one/total size %#x:%#x at %I64x \n", + i, num_icm, reserved, nobj, obj_size, nobj * obj_size, (u64) virt)); + + return table; + +err: + for (i = 0; i < num_icm; ++i) + if (table->icm[i]) { + mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE, + MTHCA_TABLE_CHUNK_SIZE >> 12, &status); + mthca_free_icm(dev, table->icm[i]); + } + + kfree(table); + + return NULL; +} + +void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table) +{ + int i; + u8 status; + + for (i = 0; i < table->num_icm; ++i) + if (table->icm[i]) { + mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE, + MTHCA_TABLE_CHUNK_SIZE >> 12, &status); + mthca_free_icm(dev, table->icm[i]); + } + + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW, + ( "Released chunks %d, objects %#x, one/total size %#x:%#x at %I64x \n", + table->num_icm, table->num_obj, table->obj_size, + table->num_obj * table->obj_size, (u64) table->virt)); + kfree(table); +} + +static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page) +{ + return dev->uar_table.uarc_base + + uar->index * dev->uar_table.uarc_size + + page * 4096; +} + +int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab, int index, u64 uaddr, void **kva) +{ + int ret = 0; + u8 status; + int i; + + if (!mthca_is_memfree(dev)) + return 0; + + if (index < 0 || index > dev->uar_table.uarc_size / 8) + return -EINVAL; + + down(&db_tab->mutex); + + i = index / MTHCA_DB_REC_PER_PAGE; + + if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) || + (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) || + (uaddr & 4095)) { + ret = -EINVAL; + goto out; + } + + if (db_tab->page[i].refcount) { + ++db_tab->page[i].refcount; + goto done; + } + + ret = get_user_pages(dev, uaddr & PAGE_MASK, 1, 1, + &db_tab->page[i].mem); + if (ret < 0) + goto out; + + db_tab->page[i].mem.length = 4096; + db_tab->page[i].mem.offset = (unsigned)(uaddr & ~PAGE_MASK); + + ret = pci_map_sg(dev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); + if (ret <= 0) { + put_page(&db_tab->page[i].mem); + goto out; + } + + ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem), + mthca_uarc_virt(dev, uar, i), &status); + if (!ret && status) + ret = -EINVAL; + if (ret) { + pci_unmap_sg(dev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); + put_page(&db_tab->page[i].mem); + goto out; + } + + db_tab->page[i].uvirt = uaddr; + db_tab->page[i].refcount = 1; + +done: + if (kva) + *kva = db_tab->page[i].mem.page; + +out: + up(&db_tab->mutex); + return ret; +} + +void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab, int index) +{ + u8 status; + int ix = index / MTHCA_DB_REC_PER_PAGE; + UNREFERENCED_PARAMETER(uar); + + if (!mthca_is_memfree(dev)) + return; + + /* + * To make our bookkeeping simpler, we don't unmap DB + * pages until we clean up the whole db table. + */ + + down(&db_tab->mutex); + + if (!--db_tab->page[ix].refcount) { + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, ix), 1, &status); + pci_unmap_sg(dev, &db_tab->page[ix].mem, 1, PCI_DMA_TODEVICE); + put_page(&db_tab->page[ix].mem); + db_tab->page[ix].uvirt = 0; + } + + up(&db_tab->mutex); +} + +struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev) +{ + struct mthca_user_db_table *db_tab; + int npages; + int i; + + if (!mthca_is_memfree(dev)) + return NULL; + + npages = dev->uar_table.uarc_size / 4096; + db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL); + if (!db_tab) + return ERR_PTR(-ENOMEM); + + KeInitializeMutex(&db_tab->mutex,0); + for (i = 0; i < npages; ++i) { + db_tab->page[i].refcount = 0; + db_tab->page[i].uvirt = 0; + } + + return db_tab; +} + +void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab) +{ + int i; + u8 status; + + if (!mthca_is_memfree(dev)) + return; + + for (i = 0; i < dev->uar_table.uarc_size / 4096; ++i) { + if (db_tab->page[i].uvirt) { + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status); + pci_unmap_sg(dev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE); + put_page(&db_tab->page[i].mem); + } + } + + kfree(db_tab); +} + +int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, u32 qn, __be32 **db) +{ + int group; + int start, end, dir; + int i, j; + struct mthca_db_page *page; + int ret = 0; + u8 status; + CPU_2_BE64_PREP; + + down(&dev->db_tab->mutex); + switch (type) { + case MTHCA_DB_TYPE_CQ_ARM: + case MTHCA_DB_TYPE_SQ: + group = 0; + start = 0; + end = dev->db_tab->max_group1; + dir = 1; + break; + + case MTHCA_DB_TYPE_CQ_SET_CI: + case MTHCA_DB_TYPE_RQ: + case MTHCA_DB_TYPE_SRQ: + group = 1; + start = dev->db_tab->npages - 1; + end = dev->db_tab->min_group2; + dir = -1; + break; + + default: + ret = -EINVAL; + goto out; + } + + /* try to find an unused index for a new page (in the bitmap) */ + for (i = start; i != end; i += dir) + if (dev->db_tab->page[i].db_rec && + !bitmap_full(dev->db_tab->page[i].used, + MTHCA_DB_REC_PER_PAGE)) { + page = dev->db_tab->page + i; + goto found; + } + + for (i = start; i != end; i += dir) { + if (!dev->db_tab->page[i].db_rec) { + page = dev->db_tab->page + i; + goto alloc; + } + } + + /* if there are no more place for DBs - get out */ + if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) { + ret = -ENOMEM; + goto out; + } + + /* fix limits indeces */ + if (group == 0) + ++dev->db_tab->max_group1; + else + --dev->db_tab->min_group2; + + /* allocate page */ + page = dev->db_tab->page + end; + +alloc: + alloc_dma_zmem_map(dev, 4096, PCI_DMA_BIDIRECTIONAL, &page->sg); + if (!page->sg.page) { + ret = -ENOMEM; + goto out; + } + page->db_rec = (__be64*)page->sg.page; + + ret = mthca_MAP_ICM_page(dev, page->sg.dma_address, + mthca_uarc_virt(dev, &dev->driver_uar, i), &status); + if (!ret && status) + ret = -EINVAL; + if (ret) { + free_dma_mem_map(dev, &page->sg, PCI_DMA_BIDIRECTIONAL); + goto out; + } + + bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE); + +found: + j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE); + set_bit(j, (long*)page->used); + + if (group == 1) + j = MTHCA_DB_REC_PER_PAGE - 1 - j; + + ret = i * MTHCA_DB_REC_PER_PAGE + j; + + page->db_rec[j] = CPU_2_BE64((((ULONGLONG)qn << 8) | (type << 5))); + + *db = (__be32 *) &page->db_rec[j]; +out: + up(&dev->db_tab->mutex); + + return ret; +} + +void mthca_free_db(struct mthca_dev *dev, int type, int db_index) +{ + int i, j; + struct mthca_db_page *page; + u8 status; + + UNREFERENCED_PARAMETER(type); + + i = db_index / MTHCA_DB_REC_PER_PAGE; + j = db_index % MTHCA_DB_REC_PER_PAGE; + + page = dev->db_tab->page + i; + + down(&dev->db_tab->mutex); + + page->db_rec[j] = 0; + if (i >= dev->db_tab->min_group2) + j = MTHCA_DB_REC_PER_PAGE - 1 - j; + clear_bit(j, (long*)page->used); + + if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) && + i >= dev->db_tab->max_group1 - 1) { + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); + + free_dma_mem_map(dev, &page->sg, PCI_DMA_BIDIRECTIONAL); + page->db_rec = NULL; + + if (i == dev->db_tab->max_group1) { + --dev->db_tab->max_group1; + /* XXX may be able to unmap more pages now */ + } + if (i == dev->db_tab->min_group2) + ++dev->db_tab->min_group2; + } + + up(&dev->db_tab->mutex); +} + +int mthca_init_db_tab(struct mthca_dev *dev) +{ + int i; + + if (!mthca_is_memfree(dev)) + return 0; + + dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL); + if (!dev->db_tab) + return -ENOMEM; + + KeInitializeMutex(&dev->db_tab->mutex, 0); + /* number of pages, needed for UAR context table */ + dev->db_tab->npages = dev->uar_table.uarc_size / 4096; + dev->db_tab->max_group1 = 0; + dev->db_tab->min_group2 = dev->db_tab->npages - 1; + /* allocate array of structures, containing descrpitors of UARC pages */ + dev->db_tab->page = kmalloc(dev->db_tab->npages * + sizeof *dev->db_tab->page, + GFP_KERNEL); + if (!dev->db_tab->page) { + kfree(dev->db_tab); + return -ENOMEM; + } + + for (i = 0; i < dev->db_tab->npages; ++i) + dev->db_tab->page[i].db_rec = NULL; + + return 0; +} + +void mthca_cleanup_db_tab(struct mthca_dev *dev) +{ + int i; + u8 status; + + if (!mthca_is_memfree(dev)) + return; + + /* + * Because we don't always free our UARC pages when they + * become empty to make mthca_free_db() simpler we need to + * make a sweep through the doorbell pages and free any + * leftover pages now. + */ + for (i = 0; i < dev->db_tab->npages; ++i) { + if (!dev->db_tab->page[i].db_rec) + continue; + + if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE)) + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_LOW ,("Kernel UARC page %d not empty\n", i)); + + mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status); + + free_dma_mem_map(dev, &dev->db_tab->page[i].sg, PCI_DMA_BIDIRECTIONAL); + } + + kfree(dev->db_tab->page); + kfree(dev->db_tab); +} diff --git a/branches/Ndi/hw/mthca/kernel/mthca_memfree.h b/branches/Ndi/hw/mthca/kernel/mthca_memfree.h new file mode 100644 index 00000000..184a3577 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_memfree.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MTHCA_MEMFREE_H +#define MTHCA_MEMFREE_H + + +#define MTHCA_ICM_CHUNK_LEN \ + ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \ + (sizeof (struct scatterlist))) + +struct mthca_icm_chunk { + struct list_head list; + int npages; + int nsg; + struct scatterlist mem[MTHCA_ICM_CHUNK_LEN]; +}; + +struct mthca_icm { + struct list_head chunk_list; + int refcount; +}; + +#pragma warning( disable : 4200) +struct mthca_icm_table { + u64 virt; + int num_icm; + int num_obj; + int obj_size; + int lowmem; + KMUTEX mutex; + struct mthca_icm *icm[0]; +}; +#pragma warning( default : 4200) + +struct mthca_icm_iter { + struct mthca_icm *icm; + struct mthca_icm_chunk *chunk; + int page_idx; +}; + +struct mthca_dev; + +struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages, + unsigned int gfp_mask); +void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm); + +struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev, + u64 virt, int obj_size, + int nobj, int reserved, + int use_lowmem); +void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table); +int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj); +void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj); +void *mthca_table_find(struct mthca_icm_table *table, int obj); +int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table, + int start, int end); +void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table, + int start, int end); + +static inline void mthca_icm_first(struct mthca_icm *icm, + struct mthca_icm_iter *iter) +{ + iter->icm = icm; + iter->chunk = list_empty(&icm->chunk_list) ? + NULL : list_entry(icm->chunk_list.next, + struct mthca_icm_chunk, list); + iter->page_idx = 0; +} + +static inline int mthca_icm_last(struct mthca_icm_iter *iter) +{ + return !iter->chunk; +} + +static inline void mthca_icm_next(struct mthca_icm_iter *iter) +{ + if (++iter->page_idx >= iter->chunk->nsg) { + if (iter->chunk->list.next == &iter->icm->chunk_list) { + iter->chunk = NULL; + return; + } + + iter->chunk = list_entry(iter->chunk->list.next, + struct mthca_icm_chunk, list); + iter->page_idx = 0; + } +} + +static inline dma_addr_t mthca_icm_addr(struct mthca_icm_iter *iter) +{ + return sg_dma_address(&iter->chunk->mem[iter->page_idx]); +} + +static inline unsigned long mthca_icm_size(struct mthca_icm_iter *iter) +{ + return sg_dma_len(&iter->chunk->mem[iter->page_idx]); +} + +enum { + MTHCA_DB_REC_PER_PAGE = 4096 / 8 +}; + +struct mthca_db_page { + DECLARE_BITMAP(used, MTHCA_DB_REC_PER_PAGE); + __be64 *db_rec; + struct scatterlist sg; +}; + +struct mthca_db_table { + int npages; + int max_group1; + int min_group2; + struct mthca_db_page *page; + KMUTEX mutex; +}; + +enum mthca_db_type { + MTHCA_DB_TYPE_INVALID = 0x0, + MTHCA_DB_TYPE_CQ_SET_CI = 0x1, + MTHCA_DB_TYPE_CQ_ARM = 0x2, + MTHCA_DB_TYPE_SQ = 0x3, + MTHCA_DB_TYPE_RQ = 0x4, + MTHCA_DB_TYPE_SRQ = 0x5, + MTHCA_DB_TYPE_GROUP_SEP = 0x7 +}; + +struct mthca_user_db_table; +struct mthca_uar; + +int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab, int index, u64 uaddr, void **kva); +void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab, int index); +struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev); +void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar, + struct mthca_user_db_table *db_tab); + +int mthca_init_db_tab(struct mthca_dev *dev); +void mthca_cleanup_db_tab(struct mthca_dev *dev); +int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type, u32 qn, __be32 **db); +void mthca_free_db(struct mthca_dev *dev, int type, int db_index); + +#endif /* MTHCA_MEMFREE_H */ diff --git a/branches/Ndi/hw/mthca/kernel/mthca_mr.c b/branches/Ndi/hw/mthca/kernel/mthca_mr.c new file mode 100644 index 00000000..f76b4b0c --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_mr.c @@ -0,0 +1,970 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_mr.tmh" +#endif +#include "mthca_cmd.h" +#include "mthca_memfree.h" + +static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order); +static void mthca_buddy_cleanup(struct mthca_buddy *buddy); + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, mthca_buddy_init) +#pragma alloc_text (PAGE, mthca_buddy_cleanup) +#pragma alloc_text (PAGE, mthca_init_mr_table) +#pragma alloc_text (PAGE, mthca_cleanup_mr_table) +#endif + +struct mthca_mtt { + struct mthca_buddy *buddy; + int order; + u32 first_seg; +}; + +/* + * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits. + */ +#pragma pack(push,1) +struct mthca_mpt_entry { + __be32 flags; + __be32 page_size; + __be32 key; + __be32 pd; + __be64 start; + __be64 length; + __be32 lkey; + __be32 window_count; + __be32 window_count_limit; + __be64 mtt_seg; + __be32 mtt_sz; /* Arbel only */ + u32 reserved[2]; +} ; +#pragma pack(pop) + +#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28) +#define MTHCA_MPT_FLAG_MIO (1 << 17) +#define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15) +#define MTHCA_MPT_FLAG_PHYSICAL (1 << 9) +#define MTHCA_MPT_FLAG_REGION (1 << 8) + +#define MTHCA_MTT_FLAG_PRESENT 1 + +#define MTHCA_MPT_STATUS_SW 0xF0 +#define MTHCA_MPT_STATUS_HW 0x00 + +#define SINAI_FMR_KEY_INC 0x1000000 + +static void dump_mtt(u32 print_lvl, __be64 *mtt_entry ,int list_len) +{ + int i; + UNUSED_PARAM_WOWPP(mtt_entry); // for release version + UNUSED_PARAM_WOWPP(print_lvl); + HCA_PRINT(print_lvl ,HCA_DBG_MEMORY ,("Dumping MTT entry len %d :\n",list_len)); + for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; i=i+4) { + HCA_PRINT(print_lvl ,HCA_DBG_MEMORY ,("[%02x] %016I64x %016I64x %016I64x %016I64x\n",i, + cl_ntoh64(mtt_entry[i]), + cl_ntoh64(mtt_entry[i+1]), + cl_ntoh64(mtt_entry[i+2]), + cl_ntoh64(mtt_entry[i+3]))); + } +} + + +static void dump_mpt(u32 print_lvl, struct mthca_mpt_entry *mpt_entry ) +{ + int i; + UNUSED_PARAM_WOWPP(mpt_entry); // for release version + UNUSED_PARAM_WOWPP(print_lvl); + HCA_PRINT(print_lvl ,HCA_DBG_MEMORY ,("Dumping MPT entry %08x :\n", mpt_entry->key)); + for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; i=i+4) { + HCA_PRINT(print_lvl ,HCA_DBG_MEMORY ,("[%02x] %08x %08x %08x %08x \n",i, + cl_ntoh32(((__be32 *) mpt_entry)[i]), + cl_ntoh32(((__be32 *) mpt_entry)[i+1]), + cl_ntoh32(((__be32 *) mpt_entry)[i+2]), + cl_ntoh32(((__be32 *) mpt_entry)[i+3]))); + } +} + + + + + + + + +/* + * Buddy allocator for MTT segments (currently not very efficient + * since it doesn't keep a free list and just searches linearly + * through the bitmaps) + */ + +static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order) +{ + int o; + u32 m; + u32 seg; + SPIN_LOCK_PREP(lh); + + spin_lock(&buddy->lock, &lh); + + for (o = order; o <= buddy->max_order; ++o) { + m = 1 << (buddy->max_order - o); + seg = find_first_bit(buddy->bits[o], m); + if (seg < m) + goto found; + } + + spin_unlock(&lh); + return (u32)-1; + + found: + clear_bit(seg, (long*)buddy->bits[o]); + + while (o > order) { + --o; + seg <<= 1; + set_bit(seg ^ 1, (long*)buddy->bits[o]); + } + + spin_unlock(&lh); + + seg <<= order; + + return seg; +} + +static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order) +{ + SPIN_LOCK_PREP(lh); + + seg >>= order; + + spin_lock(&buddy->lock, &lh); + + while (test_bit(seg ^ 1, buddy->bits[order])) { + clear_bit(seg ^ 1, (long*)buddy->bits[order]); + seg >>= 1; + ++order; + } + + set_bit(seg, (long*)buddy->bits[order]); + + spin_unlock(&lh); +} + +static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) +{ + int i, s; + + buddy->max_order = max_order; + spin_lock_init(&buddy->lock); + + buddy->bits = kmalloc((buddy->max_order + 1) * sizeof (long *), + GFP_KERNEL); + if (!buddy->bits) + goto err_out; + + RtlZeroMemory(buddy->bits, (buddy->max_order + 1) * sizeof (long *)); + + for (i = 0; i <= buddy->max_order; ++i) { + s = BITS_TO_LONGS(1 << (buddy->max_order - i)); + buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL); + if (!buddy->bits[i]) + goto err_out_free; + bitmap_zero(buddy->bits[i], + 1 << (buddy->max_order - i)); + } + + set_bit(0, (long*)buddy->bits[buddy->max_order]); + + return 0; + +err_out_free: + for (i = 0; i <= buddy->max_order; ++i) + kfree(buddy->bits[i]); + + kfree(buddy->bits); + +err_out: + return -ENOMEM; +} + +static void mthca_buddy_cleanup(struct mthca_buddy *buddy) +{ + int i; + + for (i = 0; i <= buddy->max_order; ++i) + kfree(buddy->bits[i]); + + kfree(buddy->bits); +} + +static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order, + struct mthca_buddy *buddy) +{ + u32 seg = mthca_buddy_alloc(buddy, order); + + if (seg == -1) + return (u32)-1; + + if (mthca_is_memfree(dev)) + if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg, + seg + (1 << order) - 1)) { + mthca_buddy_free(buddy, seg, order); + seg = (u32)-1; + } + + return seg; +} + +static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size, + struct mthca_buddy *buddy) +{ + struct mthca_mtt *mtt; + int i; + HCA_ENTER(HCA_DBG_MEMORY); + if (size <= 0) + return ERR_PTR(-EINVAL); + + mtt = kmalloc(sizeof *mtt, GFP_KERNEL); + if (!mtt) + return ERR_PTR(-ENOMEM); + + mtt->buddy = buddy; + mtt->order = 0; + for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1) + ++mtt->order; + + mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); + if (mtt->first_seg == -1) { + kfree(mtt); + return ERR_PTR(-ENOMEM); + } + HCA_EXIT(HCA_DBG_MEMORY); + return mtt; +} + +struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size) +{ + return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy); +} + +void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt) +{ + if (!mtt) + return; + + mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order); + + mthca_table_put_range(dev, dev->mr_table.mtt_table, + mtt->first_seg, + mtt->first_seg + (1 << mtt->order) - 1); + + kfree(mtt); +} + +int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt, + int start_index, u64 *buffer_list, int list_len) +{ + struct mthca_mailbox *mailbox; + __be64 *mtt_entry; + int err = 0; + u8 status; + int i; + u64 val = 1; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + mtt_entry = mailbox->buf; + + while (list_len > 0) { + val = dev->mr_table.mtt_base + + mtt->first_seg * MTHCA_MTT_SEG_SIZE + start_index * 8; + //TODO: a workaround of bug in _byteswap_uint64 + // in release version optimizer puts the above expression into the function call and generates incorrect code + // so we call the macro to work around that + mtt_entry[0] = CL_HTON64(val); + mtt_entry[1] = 0; + for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i) { + val = buffer_list[i]; + // BUG in compiler: it can't perform OR on u64 !!! We perform OR on the low dword + *(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT; + mtt_entry[i + 2] = cl_hton64(val); + } + + /* + * If we have an odd number of entries to write, add + * one more dummy entry for firmware efficiency. + */ + if (i & 1) + mtt_entry[i + 2] = 0; + + dump_mtt(TRACE_LEVEL_VERBOSE, mtt_entry ,i); + + err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status); + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("WRITE_MTT failed (%d)\n", err)); + goto out; + } + if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("WRITE_MTT returned status 0x%02x\n", + status)); + err = -EINVAL; + goto out; + } + + list_len -= i; + start_index += i; + buffer_list += i; + } + +out: + mthca_free_mailbox(dev, mailbox); + return err; +} + +static inline u32 tavor_hw_index_to_key(u32 ind) +{ + return ind; +} + +static inline u32 tavor_key_to_hw_index(u32 key) +{ + return key; +} + +static inline u32 arbel_hw_index_to_key(u32 ind) +{ + return (ind >> 24) | (ind << 8); +} + +static inline u32 arbel_key_to_hw_index(u32 key) +{ + return (key << 24) | (key >> 8); +} + +static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind) +{ + if (mthca_is_memfree(dev)) + return arbel_hw_index_to_key(ind); + else + return tavor_hw_index_to_key(ind); +} + +static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key) +{ + if (mthca_is_memfree(dev)) + return arbel_key_to_hw_index(key); + else + return tavor_key_to_hw_index(key); +} + + +static inline u32 adjust_key(struct mthca_dev *dev, u32 key) +{ + if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) + return ((key << 20) & 0x800000) | (key & 0x7fffff); + else + return key; +} + +int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift, + u64 iova, u64 total_size, mthca_mpt_access_t access, struct mthca_mr *mr) +{ + struct mthca_mailbox *mailbox; + struct mthca_mpt_entry *mpt_entry; + u32 key; + int err; + u8 status; + CPU_2_BE64_PREP; + + WARN_ON(buffer_size_shift >= 32); + + key = mthca_alloc(&dev->mr_table.mpt_alloc); + if (key == -1) + return -ENOMEM; + mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key); + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->mr_table.mpt_table, key); + if (err) + goto err_out_mpt_free; + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_out_table; + } + mpt_entry = mailbox->buf; + + mpt_entry->flags = cl_hton32(MTHCA_MPT_FLAG_SW_OWNS | + MTHCA_MPT_FLAG_MIO | + MTHCA_MPT_FLAG_REGION | + access); + if (!mr->mtt) + mpt_entry->flags |= cl_hton32(MTHCA_MPT_FLAG_PHYSICAL); + + mpt_entry->page_size = cl_hton32(buffer_size_shift - 12); + mpt_entry->key = cl_hton32(key); + mpt_entry->pd = cl_hton32(pd); + mpt_entry->start = cl_hton64(iova); + mpt_entry->length = cl_hton64(total_size); + + RtlZeroMemory(&mpt_entry->lkey, + sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey)); + + if (mr->mtt) + mpt_entry->mtt_seg = + CPU_2_BE64(dev->mr_table.mtt_base + + mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE); + + { + dump_mpt(TRACE_LEVEL_VERBOSE, mpt_entry); + } + + err = mthca_SW2HW_MPT(dev, mailbox, + key & (dev->limits.num_mpts - 1), + &status); + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("SW2HW_MPT failed (%d)\n", err)); + goto err_out_mailbox; + } else if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("SW2HW_MPT returned status 0x%02x\n", + status)); + err = -EINVAL; + goto err_out_mailbox; + } + + mthca_free_mailbox(dev, mailbox); + return err; + +err_out_mailbox: + mthca_free_mailbox(dev, mailbox); + +err_out_table: + mthca_table_put(dev, dev->mr_table.mpt_table, key); + +err_out_mpt_free: + mthca_free(&dev->mr_table.mpt_alloc, key); + return err; +} + +int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd, + mthca_mpt_access_t access, struct mthca_mr *mr) +{ + mr->mtt = NULL; + return mthca_mr_alloc(dev, pd, 12, 0, ~0Ui64, access, mr); +} + +int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd, + u64 *buffer_list, int buffer_size_shift, + int list_len, u64 iova, u64 total_size, + mthca_mpt_access_t access, struct mthca_mr *mr) +{ + int err; + HCA_ENTER(HCA_DBG_MEMORY); + mr->mtt = mthca_alloc_mtt(dev, list_len); + if (IS_ERR(mr->mtt)){ + err= PTR_ERR(mr->mtt); + goto out; + } + + err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len); + if (err) { + mthca_free_mtt(dev, mr->mtt); + goto out; + } + + err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova, + total_size, access, mr); + if (err) + mthca_free_mtt(dev, mr->mtt); + +out: + HCA_EXIT(HCA_DBG_MEMORY); + return err; +} + +/* Free mr or fmr */ +static void mthca_free_region(struct mthca_dev *dev, u32 lkey) +{ + mthca_table_put(dev, dev->mr_table.mpt_table, key_to_hw_index(dev, lkey)); + mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey)); +} + +void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr) +{ + int err; + u8 status; + + err = mthca_HW2SW_MPT(dev, NULL, + key_to_hw_index(dev, mr->ibmr.lkey) & + (dev->limits.num_mpts - 1), + &status); + if (err){ + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("HW2SW_MPT failed (%d)\n", err)); + }else if (status){ + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("HW2SW_MPT returned status 0x%02x\n", + status)); + } + + mthca_free_region(dev, mr->ibmr.lkey); + mthca_free_mtt(dev, mr->mtt); +} + +int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd, + mthca_mpt_access_t access, struct mthca_fmr *fmr) +{ + struct mthca_mpt_entry *mpt_entry; + struct mthca_mailbox *mailbox; + u64 mtt_seg; + u32 key, idx; + u8 status; + int list_len = fmr->attr.max_pages; + int err = -ENOMEM; + int i; + CPU_2_BE64_PREP; + + if (fmr->attr.page_shift < 12 || fmr->attr.page_shift >= 32) + return -EINVAL; + + /* For Arbel, all MTTs must fit in the same page. */ + if (mthca_is_memfree(dev) && + fmr->attr.max_pages * sizeof *fmr->mem.arbel.mtts > PAGE_SIZE) + return -EINVAL; + + fmr->maps = 0; + + key = mthca_alloc(&dev->mr_table.mpt_alloc); + if (key == -1) + return -ENOMEM; + key = adjust_key(dev, key); + + idx = key & (dev->limits.num_mpts - 1); + fmr->ibfmr.rkey = fmr->ibfmr.lkey = hw_index_to_key(dev, key); + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->mr_table.mpt_table, key); + if (err) + goto err_out_mpt_free; + + fmr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key); + BUG_ON(!fmr->mem.arbel.mpt); + } else + fmr->mem.tavor.mpt = (struct mthca_mpt_entry*)((u8*)dev->mr_table.tavor_fmr.mpt_base + + sizeof *(fmr->mem.tavor.mpt) * idx); + + fmr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy); + if (IS_ERR(fmr->mtt)) + goto err_out_table; + + mtt_seg =fmr->mtt->first_seg * MTHCA_MTT_SEG_SIZE; + + if (mthca_is_memfree(dev)) { + fmr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table, + fmr->mtt->first_seg); + BUG_ON(!fmr->mem.arbel.mtts); + } else + fmr->mem.tavor.mtts = (u64*)((u8*)dev->mr_table.tavor_fmr.mtt_base + mtt_seg); + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + goto err_out_free_mtt; + + mpt_entry = mailbox->buf; + + mpt_entry->flags = cl_hton32(MTHCA_MPT_FLAG_SW_OWNS | + MTHCA_MPT_FLAG_MIO | + MTHCA_MPT_FLAG_REGION | + access); + + mpt_entry->page_size = cl_hton32(fmr->attr.page_shift - 12); + mpt_entry->key = cl_hton32(key); + mpt_entry->pd = cl_hton32(pd); + RtlZeroMemory(&mpt_entry->start, + sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start)); + mpt_entry->mtt_seg = CPU_2_BE64(dev->mr_table.mtt_base + mtt_seg); + + { + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("Dumping MPT entry %08x:\n", fmr->ibfmr.lkey)); + for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; i=i+4) { + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("[%02x] %08x %08x %08x %08x \n",i, + cl_ntoh32(((__be32 *) mpt_entry)[i]), + cl_ntoh32(((__be32 *) mpt_entry)[i+1]), + cl_ntoh32(((__be32 *) mpt_entry)[i+2]), + cl_ntoh32(((__be32 *) mpt_entry)[i+3]))); + } + } + + err = mthca_SW2HW_MPT(dev, mailbox, + key & (dev->limits.num_mpts - 1), + &status); + + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("SW2HW_MPT failed (%d)\n", err)); + goto err_out_mailbox_free; + } + if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("SW2HW_MPT returned status 0x%02x\n", + status)); + err = -EINVAL; + goto err_out_mailbox_free; + } + + mthca_free_mailbox(dev, mailbox); + return 0; + +err_out_mailbox_free: + mthca_free_mailbox(dev, mailbox); + +err_out_free_mtt: + mthca_free_mtt(dev, fmr->mtt); + +err_out_table: + mthca_table_put(dev, dev->mr_table.mpt_table, key); + +err_out_mpt_free: + mthca_free(&dev->mr_table.mpt_alloc, fmr->ibfmr.lkey); + return err; +} + + +int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr) +{ + if (fmr->maps) + return -EBUSY; + + mthca_free_region(dev, fmr->ibfmr.lkey); + mthca_free_mtt(dev, fmr->mtt); + + return 0; +} + + +static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list, + int list_len, u64 iova) +{ + int page_mask; + UNREFERENCED_PARAMETER(page_list); + + if (list_len > fmr->attr.max_pages) + return -EINVAL; + + page_mask = (1 << fmr->attr.page_shift) - 1; + + /* We are getting page lists, so va must be page aligned. */ + if (iova & page_mask) + return -EINVAL; + + /* Trust the user not to pass misaligned data in page_list */ + #if 0 + for (i = 0; i < list_len; ++i) { + if (page_list[i] & ~page_mask) + return -EINVAL; + } + #endif + + if (fmr->maps >= fmr->attr.max_maps) + return -EINVAL; + + return 0; +} + + +int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova) +{ + struct mthca_fmr *fmr = to_mfmr(ibfmr); + struct mthca_dev *dev = to_mdev(ibfmr->device); + struct mthca_mpt_entry mpt_entry; + u32 key; + int i, err; + CPU_2_BE64_PREP; + + err = mthca_check_fmr(fmr, page_list, list_len, iova); + if (err) + return err; + + ++fmr->maps; + + key = tavor_key_to_hw_index(fmr->ibfmr.lkey); + key += dev->limits.num_mpts; + fmr->ibfmr.lkey = fmr->ibfmr.rkey = tavor_hw_index_to_key(key); + + writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); + + for (i = 0; i < list_len; ++i) { + __be64 mtt_entry; + u64 val = page_list[i]; + // BUG in compiler: it can't perform OR on u64 !!! We perform OR on the low dword + *(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT; + mtt_entry = cl_hton64(val); + mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i); + } + + mpt_entry.lkey = cl_hton32(key); + mpt_entry.length = CPU_2_BE64(list_len * (1Ui64 << fmr->attr.page_shift)); + mpt_entry.start = cl_hton64(iova); + + __raw_writel((u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key); + memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start, + offsetof(struct mthca_mpt_entry, window_count) - + offsetof(struct mthca_mpt_entry, start)); + + writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt); + + return 0; +} + +int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list, + int list_len, u64 iova) +{ + struct mthca_fmr *fmr = to_mfmr(ibfmr); + struct mthca_dev *dev = to_mdev(ibfmr->device); + u32 key; + int i, err; + CPU_2_BE64_PREP; + + err = mthca_check_fmr(fmr, page_list, list_len, iova); + if (err) + return err; + + ++fmr->maps; + + key = arbel_key_to_hw_index(fmr->ibfmr.lkey); + if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) + key += SINAI_FMR_KEY_INC; + else + key += dev->limits.num_mpts; + fmr->ibfmr.lkey = fmr->ibfmr.rkey = arbel_hw_index_to_key(key); + + *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; + + wmb(); + + for (i = 0; i < list_len; ++i) { + // BUG in compiler: it can't perform OR on u64 !!! We perform OR on the low dword + u64 val = page_list[i]; + *(PULONG)&val |= MTHCA_MTT_FLAG_PRESENT; + fmr->mem.arbel.mtts[i] = cl_hton64(val); + } + + fmr->mem.arbel.mpt->key = cl_hton32(key); + fmr->mem.arbel.mpt->lkey = cl_hton32(key); + fmr->mem.arbel.mpt->length = CPU_2_BE64(list_len * (1Ui64 << fmr->attr.page_shift)); + fmr->mem.arbel.mpt->start = cl_hton64(iova); + + wmb(); + + *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW; + + wmb(); + + return 0; +} + + +void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) +{ + u32 key; + + if (!fmr->maps) + return; + + key = tavor_key_to_hw_index(fmr->ibfmr.lkey); + key &= dev->limits.num_mpts - 1; + fmr->ibfmr.lkey = fmr->ibfmr.rkey = tavor_hw_index_to_key(key); + + fmr->maps = 0; + + writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt); +} + + +void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr) +{ + u32 key; + + if (!fmr->maps) + return; + + key = arbel_key_to_hw_index(fmr->ibfmr.lkey); + key &= dev->limits.num_mpts - 1; + fmr->ibfmr.lkey = fmr->ibfmr.rkey = arbel_hw_index_to_key(key); + + fmr->maps = 0; + + *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW; +} + +int mthca_init_mr_table(struct mthca_dev *dev) +{ + int err, i; + + err = mthca_alloc_init(&dev->mr_table.mpt_alloc, + (u32)dev->limits.num_mpts, + (u32)~0, (u32)dev->limits.reserved_mrws); + if (err) + return err; + + if (!mthca_is_memfree(dev) && + (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN)) + dev->limits.fmr_reserved_mtts = 0; + else + dev->mthca_flags |= MTHCA_FLAG_FMR; + + if (dev->mthca_flags & MTHCA_FLAG_SINAI_OPT) + HCA_PRINT(TRACE_LEVEL_INFORMATION ,HCA_DBG_MEMORY ,("Memory key throughput optimization activated.\n")); + + err = mthca_buddy_init(&dev->mr_table.mtt_buddy, + fls(dev->limits.num_mtt_segs - 1)); + + if (err) + goto err_mtt_buddy; + + dev->mr_table.tavor_fmr.mpt_base = NULL; + dev->mr_table.tavor_fmr.mtt_base = NULL; + + if (dev->limits.fmr_reserved_mtts) { + i = fls(dev->limits.fmr_reserved_mtts - 1); + + if (i >= 31) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("Unable to reserve 2^31 FMR MTTs.\n")); + err = -EINVAL; + goto err_fmr_mpt; + } + + dev->mr_table.tavor_fmr.mpt_base = + ioremap(dev->mr_table.mpt_base, + (1 << i) * sizeof (struct mthca_mpt_entry), + &dev->mr_table.tavor_fmr.mpt_base_size); + + if (!dev->mr_table.tavor_fmr.mpt_base) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("MPT ioremap for FMR failed.\n")); + err = -ENOMEM; + goto err_fmr_mpt; + } + + dev->mr_table.tavor_fmr.mtt_base = + ioremap(dev->mr_table.mtt_base, + (1 << i) * MTHCA_MTT_SEG_SIZE, + &dev->mr_table.tavor_fmr.mtt_base_size ); + + if (!dev->mr_table.tavor_fmr.mtt_base) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_MEMORY ,("MTT ioremap for FMR failed.\n")); + err = -ENOMEM; + goto err_fmr_mtt; + } + + err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, i); + if (err) + goto err_fmr_mtt_buddy; + + /* Prevent regular MRs from using FMR keys */ + err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, i); + if (err) + goto err_reserve_fmr; + + dev->mr_table.fmr_mtt_buddy = + &dev->mr_table.tavor_fmr.mtt_buddy; + } else + dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy; + + /* FMR table is always the first, take reserved MTTs out of there */ + if (dev->limits.reserved_mtts) { + i = fls(dev->limits.reserved_mtts - 1); + + if (mthca_alloc_mtt_range(dev, i, + dev->mr_table.fmr_mtt_buddy) == -1) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_MEMORY,("MTT table of order %d is too small.\n", + dev->mr_table.fmr_mtt_buddy->max_order)); + err = -ENOMEM; + goto err_reserve_mtts; + } + } + + return 0; + +err_reserve_mtts: +err_reserve_fmr: + if (dev->limits.fmr_reserved_mtts) + mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); + +err_fmr_mtt_buddy: + if (dev->mr_table.tavor_fmr.mtt_base) + iounmap(dev->mr_table.tavor_fmr.mtt_base, + dev->mr_table.tavor_fmr.mtt_base_size); + +err_fmr_mtt: + if (dev->mr_table.tavor_fmr.mpt_base) + iounmap(dev->mr_table.tavor_fmr.mpt_base, + dev->mr_table.tavor_fmr.mpt_base_size); + +err_fmr_mpt: + mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); + +err_mtt_buddy: + mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); + + return err; +} + +void mthca_cleanup_mr_table(struct mthca_dev *dev) +{ + /* XXX check if any MRs are still allocated? */ + if (dev->limits.fmr_reserved_mtts) + mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy); + + mthca_buddy_cleanup(&dev->mr_table.mtt_buddy); + + if (dev->mr_table.tavor_fmr.mtt_base) + iounmap(dev->mr_table.tavor_fmr.mtt_base, + dev->mr_table.tavor_fmr.mtt_base_size); + if (dev->mr_table.tavor_fmr.mpt_base) + iounmap(dev->mr_table.tavor_fmr.mpt_base, + dev->mr_table.tavor_fmr.mpt_base_size); + + mthca_alloc_cleanup(&dev->mr_table.mpt_alloc); +} + + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_pd.c b/branches/Ndi/hw/mthca/kernel/mthca_pd.c new file mode 100644 index 00000000..a39dc401 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_pd.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mthca_dev.h" + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, mthca_init_pd_table) +#pragma alloc_text (PAGE, mthca_cleanup_pd_table) +#endif + +int mthca_pd_alloc(struct mthca_dev *dev, int privileged, struct mthca_pd *pd) +{ + int err = 0; + + pd->privileged = privileged; + + atomic_set(&pd->sqp_count, 0); + pd->pd_num = mthca_alloc(&dev->pd_table.alloc); + if (pd->pd_num == -1) + return -ENOMEM; + + if (privileged) { + err = mthca_mr_alloc_notrans(dev, pd->pd_num, + MTHCA_MPT_FLAG_LOCAL_READ | + MTHCA_MPT_FLAG_LOCAL_WRITE, + &pd->ntmr); + if (err) + mthca_free(&dev->pd_table.alloc, pd->pd_num); + } + + return err; +} + +void mthca_pd_free(struct mthca_dev *dev, struct mthca_pd *pd) +{ + if (pd->privileged) + mthca_free_mr(dev, &pd->ntmr); + mthca_free(&dev->pd_table.alloc, pd->pd_num); +} + +int mthca_init_pd_table(struct mthca_dev *dev) +{ + return mthca_alloc_init(&dev->pd_table.alloc, + dev->limits.num_pds, + (1 << 24) - 1, + dev->limits.reserved_pds); +} + +void mthca_cleanup_pd_table(struct mthca_dev *dev) +{ + /* XXX check if any PDs are still allocated? */ + mthca_alloc_cleanup(&dev->pd_table.alloc); +} + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_profile.c b/branches/Ndi/hw/mthca/kernel/mthca_profile.c new file mode 100644 index 00000000..873c1e51 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_profile.c @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "mthca_profile.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_profile.tmh" +#endif + +enum { + MTHCA_RES_QP, + MTHCA_RES_EEC, + MTHCA_RES_SRQ, + MTHCA_RES_CQ, + MTHCA_RES_EQP, + MTHCA_RES_EEEC, + MTHCA_RES_EQ, + MTHCA_RES_RDB, + MTHCA_RES_MCG, + MTHCA_RES_MPT, + MTHCA_RES_MTT, + MTHCA_RES_UAR, + MTHCA_RES_UDAV, + MTHCA_RES_UARC, + MTHCA_RES_NUM +}; + +enum { + MTHCA_NUM_EQS = 32, + MTHCA_NUM_PDS = 1 << 15 +}; + +u64 mthca_make_profile(struct mthca_dev *dev, + struct mthca_profile *request, + struct mthca_dev_lim *dev_lim, + struct mthca_init_hca_param *init_hca) +{ + struct mthca_resource { + u64 size; + u64 start; + int type; + int num; + int log_num; + }; + + u64 mem_base, mem_avail; + u64 total_size = 0; + struct mthca_resource *profile; + struct mthca_resource tmp; + int i, j; + + profile = kmalloc(MTHCA_RES_NUM * sizeof *profile, GFP_KERNEL); + if (!profile) + return (u64)-ENOMEM; + + RtlZeroMemory(profile, MTHCA_RES_NUM * sizeof *profile); + + profile[MTHCA_RES_QP].size = dev_lim->qpc_entry_sz; + profile[MTHCA_RES_EEC].size = dev_lim->eec_entry_sz; + profile[MTHCA_RES_SRQ].size = dev_lim->srq_entry_sz; + profile[MTHCA_RES_CQ].size = dev_lim->cqc_entry_sz; + profile[MTHCA_RES_EQP].size = dev_lim->eqpc_entry_sz; + profile[MTHCA_RES_EEEC].size = dev_lim->eeec_entry_sz; + profile[MTHCA_RES_EQ].size = dev_lim->eqc_entry_sz; + profile[MTHCA_RES_RDB].size = MTHCA_RDB_ENTRY_SIZE; + profile[MTHCA_RES_MCG].size = MTHCA_MGM_ENTRY_SIZE; + profile[MTHCA_RES_MPT].size = dev_lim->mpt_entry_sz; + profile[MTHCA_RES_MTT].size = MTHCA_MTT_SEG_SIZE; + profile[MTHCA_RES_UAR].size = dev_lim->uar_scratch_entry_sz; + profile[MTHCA_RES_UDAV].size = MTHCA_AV_SIZE; + profile[MTHCA_RES_UARC].size = request->uarc_size; + + profile[MTHCA_RES_QP].num = request->num_qp; + profile[MTHCA_RES_SRQ].num = request->num_srq; + profile[MTHCA_RES_EQP].num = request->num_qp; + profile[MTHCA_RES_RDB].num = request->num_qp * request->rdb_per_qp; + profile[MTHCA_RES_CQ].num = request->num_cq; + profile[MTHCA_RES_EQ].num = MTHCA_NUM_EQS; + profile[MTHCA_RES_MCG].num = request->num_mcg; + profile[MTHCA_RES_MPT].num = request->num_mpt; + profile[MTHCA_RES_MTT].num = request->num_mtt; + profile[MTHCA_RES_UAR].num = request->num_uar; + profile[MTHCA_RES_UARC].num = request->num_uar; + profile[MTHCA_RES_UDAV].num = request->num_udav; + + for (i = 0; i < MTHCA_RES_NUM; ++i) { + profile[i].type = i; + profile[i].log_num = max(ffs(profile[i].num) - 1, 0); + profile[i].size *= profile[i].num; + if (mthca_is_memfree(dev)) + profile[i].size = max(profile[i].size, (u64) PAGE_SIZE); + } + + if (mthca_is_memfree(dev)) { + mem_base = 0; + mem_avail = dev_lim->hca.arbel.max_icm_sz; + } else { + mem_base = dev->ddr_start; + mem_avail = dev->fw.tavor.fw_start - dev->ddr_start; + } + + /* + * Sort the resources in decreasing order of size. Since they + * all have sizes that are powers of 2, we'll be able to keep + * resources aligned to their size and pack them without gaps + * using the sorted order. + */ + for (i = MTHCA_RES_NUM; i > 0; --i) + for (j = 1; j < i; ++j) { + if (profile[j].size > profile[j - 1].size) { + tmp = profile[j]; + profile[j] = profile[j - 1]; + profile[j - 1] = tmp; + } + } + + for (i = 0; i < MTHCA_RES_NUM; ++i) { + if (profile[i].size) { + profile[i].start = mem_base + total_size; + total_size += profile[i].size; + } + if (total_size > mem_avail) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_LOW,("Profile requires 0x%I64x bytes; " + "won't in 0x%I64x bytes of context memory.\n", + (u64) total_size, + (u64) mem_avail)); + kfree(profile); + return (u64)-ENOMEM; + } + + if (profile[i].size) + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("profile[%2d]--%2d/%2d @ 0x%16I64x " + "(size 0x%8I64x)\n", + i, profile[i].type, profile[i].log_num, + (u64) profile[i].start, + (u64) profile[i].size)); + } + + if (mthca_is_memfree(dev)){ + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA context memory: reserving %d KB\n", + (int) (total_size >> 10))); + }else{ + HCA_PRINT(TRACE_LEVEL_VERBOSE,HCA_DBG_LOW,("HCA memory: allocated %d KB/%d KB (%d KB free)\n", + (int) (total_size >> 10), (int) (mem_avail >> 10), + (int) ((mem_avail - total_size) >> 10))); + } + for (i = 0; i < MTHCA_RES_NUM; ++i) { + int mc_entry_sz = MTHCA_MGM_ENTRY_SIZE; + int mtt_seg_sz = MTHCA_MTT_SEG_SIZE; + + switch (profile[i].type) { + case MTHCA_RES_QP: + dev->limits.num_qps = profile[i].num; + init_hca->qpc_base = profile[i].start; + init_hca->log_num_qps = (u8)profile[i].log_num; + break; + case MTHCA_RES_EEC: + dev->limits.num_eecs = profile[i].num; + init_hca->eec_base = profile[i].start; + init_hca->log_num_eecs = (u8)profile[i].log_num; + break; + case MTHCA_RES_SRQ: + dev->limits.num_srqs = profile[i].num; + init_hca->srqc_base = profile[i].start; + init_hca->log_num_srqs = (u8)profile[i].log_num; + break; + case MTHCA_RES_CQ: + dev->limits.num_cqs = profile[i].num; + init_hca->cqc_base = profile[i].start; + init_hca->log_num_cqs = (u8)profile[i].log_num; + break; + case MTHCA_RES_EQP: + init_hca->eqpc_base = profile[i].start; + break; + case MTHCA_RES_EEEC: + init_hca->eeec_base = profile[i].start; + break; + case MTHCA_RES_EQ: + dev->limits.num_eqs = profile[i].num; + init_hca->eqc_base = profile[i].start; + init_hca->log_num_eqs = (u8)profile[i].log_num; + break; + case MTHCA_RES_RDB: + for (dev->qp_table.rdb_shift = 0; + request->num_qp << dev->qp_table.rdb_shift < profile[i].num; + ++dev->qp_table.rdb_shift) + ; /* nothing */ + dev->qp_table.rdb_base = (u32) profile[i].start; + init_hca->rdb_base = profile[i].start; + break; + case MTHCA_RES_MCG: + dev->limits.num_mgms = profile[i].num >> 1; + dev->limits.num_amgms = profile[i].num >> 1; + init_hca->mc_base = profile[i].start; + init_hca->log_mc_entry_sz = (u16)(ffs(mc_entry_sz) - 1); + init_hca->log_mc_table_sz = (u8)profile[i].log_num; + init_hca->mc_hash_sz = (u16)(1 << (profile[i].log_num - 1)); + break; + case MTHCA_RES_MPT: + dev->limits.num_mpts = profile[i].num; + dev->mr_table.mpt_base = profile[i].start; + init_hca->mpt_base = profile[i].start; + init_hca->log_mpt_sz = (u8)profile[i].log_num; + break; + case MTHCA_RES_MTT: + dev->limits.num_mtt_segs = profile[i].num; + dev->mr_table.mtt_base = profile[i].start; + init_hca->mtt_base = profile[i].start; + init_hca->mtt_seg_sz = (u8)(ffs(mtt_seg_sz) - 7); + break; + case MTHCA_RES_UAR: + dev->limits.num_uars = profile[i].num; + init_hca->uar_scratch_base = profile[i].start; + break; + case MTHCA_RES_UDAV: + dev->av_table.ddr_av_base = profile[i].start; + dev->av_table.num_ddr_avs = profile[i].num; + break; + case MTHCA_RES_UARC: + dev->uar_table.uarc_size = request->uarc_size; + dev->uar_table.uarc_base = profile[i].start; + init_hca->uarc_base = profile[i].start; + init_hca->log_uarc_sz = (u8)(ffs(request->uarc_size) - 13); + init_hca->log_uar_sz = (u8)(ffs(request->num_uar) - 1); + break; + default: + break; + } + } + + /* + * PDs don't take any HCA memory, but we assign them as part + * of the HCA profile anyway. + */ + dev->limits.num_pds = MTHCA_NUM_PDS; + + /* + * For Tavor, FMRs use ioremapped PCI memory. For 32 bit + * systems it may use too much vmalloc space to map all MTT + * memory, so we reserve some MTTs for FMR access, taking them + * out of the MR pool. They don't use additional memory, but + * we assign them as part of the HCA profile anyway. + */ + if (mthca_is_memfree(dev)) + dev->limits.fmr_reserved_mtts = 0; + else + dev->limits.fmr_reserved_mtts = request->fmr_reserved_mtts; + + kfree(profile); + return total_size; +} diff --git a/branches/Ndi/hw/mthca/kernel/mthca_profile.h b/branches/Ndi/hw/mthca/kernel/mthca_profile.h new file mode 100644 index 00000000..f1887c58 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_profile.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MTHCA_PROFILE_H +#define MTHCA_PROFILE_H + +#include "mthca_dev.h" +#include "mthca_cmd.h" + +struct mthca_profile { + int num_qp; + int rdb_per_qp; + int num_srq; + int num_cq; + int num_mcg; + int num_mpt; + int num_mtt; + int num_udav; + int num_uar; + int uarc_size; + int fmr_reserved_mtts; +}; + +u64 mthca_make_profile(struct mthca_dev *mdev, + struct mthca_profile *request, + struct mthca_dev_lim *dev_lim, + struct mthca_init_hca_param *init_hca); + +#endif /* MTHCA_PROFILE_H */ diff --git a/branches/Ndi/hw/mthca/kernel/mthca_provider.c b/branches/Ndi/hw/mthca/kernel/mthca_provider.c new file mode 100644 index 00000000..90979cd8 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_provider.c @@ -0,0 +1,1327 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "mx_abi.h" +#include "mthca_dev.h" +#include "mt_pa_cash.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_provider.tmh" +#endif +#include "mthca_cmd.h" +#include "mthca_memfree.h" + +static void init_query_mad(struct ib_smp *mad) +{ + mad->base_version = 1; + mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; + mad->class_version = 1; + mad->method = IB_MGMT_METHOD_GET; +} + +int mthca_query_device(struct ib_device *ibdev, + struct ib_device_attr *props) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + struct mthca_dev* mdev = to_mdev(ibdev); + + u8 status; + + RtlZeroMemory(props, sizeof *props); + + if (mthca_is_livefish(mdev)) { + props->max_pd = 1; + props->vendor_id = mdev->ext->hcaConfig.VendorID; + props->vendor_part_id = mdev->ext->hcaConfig.DeviceID; + return 0; + } + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mthca_MAD_IFC(mdev, 1, 1, + 1, NULL, NULL, in_mad, out_mad, &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + props->fw_ver = mdev->fw_ver; + props->device_cap_flags = mdev->device_cap_flags; + props->vendor_id = cl_ntoh32(*(__be32 *) (out_mad->data + 36)) & + 0xffffff; + props->vendor_part_id = cl_ntoh16(*(__be16 *) (out_mad->data + 30)); + props->hw_ver = cl_ntoh32(*(__be32 *) (out_mad->data + 32)); + memcpy(&props->sys_image_guid, out_mad->data + 4, 8); + props->max_mr_size = ~0Ui64; + props->page_size_cap = mdev->limits.page_size_cap; + props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; + props->max_qp_wr = mdev->limits.max_wqes; + props->max_sge = mdev->limits.max_sg; + props->max_cq = mdev->limits.num_cqs - mdev->limits.reserved_cqs; + props->max_cqe = mdev->limits.max_cqes; + props->max_mr = mdev->limits.num_mpts - mdev->limits.reserved_mrws; + props->max_pd = mdev->limits.num_pds - mdev->limits.reserved_pds; + props->max_qp_rd_atom = 1 << mdev->qp_table.rdb_shift; + props->max_qp_init_rd_atom = mdev->limits.max_qp_init_rdma; + props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp; + props->max_srq = mdev->limits.num_srqs - mdev->limits.reserved_srqs; + props->max_srq_wr = mdev->limits.max_srq_wqes; + if (mthca_is_memfree(mdev)) + --props->max_srq_wr; + props->max_srq_sge = mdev->limits.max_srq_sge; + props->local_ca_ack_delay = (u8)mdev->limits.local_ca_ack_delay; + props->atomic_cap = mdev->limits.flags & DEV_LIM_FLAG_ATOMIC ? + IB_ATOMIC_LOCAL : IB_ATOMIC_NONE; + props->max_pkeys = (u16)mdev->limits.pkey_table_len; + props->max_mcast_grp = mdev->limits.num_mgms + mdev->limits.num_amgms; + props->max_mcast_qp_attach = MTHCA_QP_PER_MGM; + props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * + props->max_mcast_grp; + + /* + * If Sinai memory key optimization is being used, then only + * the 8-bit key portion will change. For other HCAs, the + * unused index bits will also be used for FMR remapping. + */ + if (mdev->mthca_flags & MTHCA_FLAG_SINAI_OPT) + props->max_map_per_fmr = 255; + else + props->max_map_per_fmr = + (1 << (32 - long_log2(mdev->limits.num_mpts))) - 1; + + err = 0; + out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mthca_query_port(struct ib_device *ibdev, + u8 port, struct ib_port_attr *props) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + u8 status; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cl_hton32(port); + + err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, + port, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + RtlZeroMemory(props, sizeof *props); + props->lid = cl_ntoh16(*(__be16 *) (out_mad->data + 16)); + props->lmc = out_mad->data[34] & 0x7; + props->sm_lid = cl_ntoh16(*(__be16 *) (out_mad->data + 18)); + props->sm_sl = out_mad->data[36] & 0xf; + props->state = out_mad->data[32] & 0xf; + props->phys_state = out_mad->data[33] >> 4; + props->port_cap_flags = cl_ntoh32(*(__be32 *) (out_mad->data + 20)); + props->gid_tbl_len = to_mdev(ibdev)->limits.gid_table_len; + props->max_msg_sz = 0x80000000; + props->pkey_tbl_len = (u16)to_mdev(ibdev)->limits.pkey_table_len; + props->bad_pkey_cntr = cl_ntoh16(*(__be16 *) (out_mad->data + 46)); + props->qkey_viol_cntr = cl_ntoh16(*(__be16 *) (out_mad->data + 48)); + props->active_width = out_mad->data[31] & 0xf; + props->active_speed = out_mad->data[35] >> 4; + props->max_mtu = out_mad->data[41] & 0xf; + props->active_mtu = out_mad->data[36] >> 4; + props->subnet_timeout = out_mad->data[51] & 0x1f; + + out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mthca_modify_port(struct ib_device *ibdev, + u8 port, int port_modify_mask, + struct ib_port_modify *props) +{ + struct mthca_set_ib_param set_ib; + struct ib_port_attr attr; + int err; + u8 status; + + if (down_interruptible(&to_mdev(ibdev)->cap_mask_mutex)) + return -EFAULT; + + err = mthca_query_port(ibdev, port, &attr); + if (err) + goto out; + + set_ib.set_si_guid = 0; + set_ib.reset_qkey_viol = !!(port_modify_mask & IB_PORT_RESET_QKEY_CNTR); + + set_ib.cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) & + ~props->clr_port_cap_mask; + + err = mthca_SET_IB(to_mdev(ibdev), &set_ib, port, &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + +out: + up(&to_mdev(ibdev)->cap_mask_mutex); + return err; +} + +static int mthca_query_pkey_chunk(struct ib_device *ibdev, + u8 port, u16 index, u16 pkey[32]) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + u8 status; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE; + in_mad->attr_mod = cl_hton32(index / 32); + + err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, + port, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + { // copy the results + int i; + __be16 *pkey_chunk = (__be16 *)out_mad->data; + for (i=0; i<32; ++i) + pkey[i] = cl_ntoh16(pkey_chunk[i]); + } + + out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +static int mthca_query_gid_chunk(struct ib_device *ibdev, u8 port, + int index, union ib_gid gid[8]) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + u8 status; + __be64 subnet_prefix; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_PORT_INFO; + in_mad->attr_mod = cl_hton32(port); + + err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, + port, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + memcpy(&subnet_prefix, out_mad->data + 8, 8); + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_GUID_INFO; + in_mad->attr_mod = cl_hton32(index / 8); + + err = mthca_MAD_IFC(to_mdev(ibdev), 1, 1, + port, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + { // copy the results + int i; + __be64 *guid = (__be64 *)out_mad->data; + for (i=0; i<8; ++i) { + gid[i].global.subnet_prefix = subnet_prefix; + gid[i].global.interface_id = guid[i]; + } + } + + out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, + ci_umv_buf_t* const p_umv_buf) +{ + struct ibv_get_context_resp uresp; + struct mthca_ucontext *context; + int err; + + RtlZeroMemory(&uresp, sizeof uresp); + + uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; + if (mthca_is_memfree(to_mdev(ibdev))) + uresp.uarc_size = to_mdev(ibdev)->uar_table.uarc_size; + else + uresp.uarc_size = 0; + + context = kzalloc(sizeof *context, GFP_KERNEL); + if (!context) { + err = -ENOMEM; + goto err_nomem; + } + + if (mthca_is_livefish(to_mdev(ibdev))) + goto done; + + err = mthca_uar_alloc(to_mdev(ibdev), &context->uar); + if (err) + goto err_uar_alloc; + + /* + * map uar to user space + */ + + /* map UAR to kernel */ + context->kva = ioremap((io_addr_t)context->uar.pfn << PAGE_SHIFT, PAGE_SIZE,&context->uar_size); + if (!context->kva) { + HCA_PRINT( TRACE_LEVEL_ERROR, HCA_DBG_LOW ,("Couldn't map kernel access region, aborting.\n") ); + err = -ENOMEM; + goto err_ioremap; + } + + /* build MDL */ + context->mdl = IoAllocateMdl( context->kva, (ULONG)context->uar_size, + FALSE, TRUE, NULL ); + if( !context->mdl ) { + err = -ENOMEM; + goto err_alloc_mdl; + } + MmBuildMdlForNonPagedPool( context->mdl ); + + /* Map the memory into the calling process's address space. */ + __try { + context->ibucontext.user_uar = MmMapLockedPagesSpecifyCache( context->mdl, + UserMode, MmNonCached, NULL, FALSE, NormalPagePriority ); + } + __except(EXCEPTION_EXECUTE_HANDLER) { + err = -EACCES; + goto err_map; + } + + /* user_db_tab */ + context->db_tab = mthca_init_user_db_tab(to_mdev(ibdev)); + if (IS_ERR(context->db_tab)) { + err = PTR_ERR(context->db_tab); + goto err_init_user; + } + +done: + err = ib_copy_to_umv_buf(p_umv_buf, &uresp, sizeof uresp); + if (err) + goto err_copy_to_umv_buf; + + context->ibucontext.device = ibdev; + + atomic_set(&context->ibucontext.usecnt, 0); + return &context->ibucontext; + +err_copy_to_umv_buf: + mthca_cleanup_user_db_tab(to_mdev(ibdev), &context->uar, + context->db_tab); +err_init_user: + MmUnmapLockedPages( context->ibucontext.user_uar, context->mdl ); +err_map: + IoFreeMdl(context->mdl); +err_alloc_mdl: + iounmap(context->kva, PAGE_SIZE); +err_ioremap: + mthca_uar_free(to_mdev(ibdev), &context->uar); +err_uar_alloc: + kfree(context); +err_nomem: + return ERR_PTR(err); +} + + int mthca_dealloc_ucontext(struct ib_ucontext *context) +{ + struct mthca_ucontext *mucontext = to_mucontext(context); + + if (mthca_is_livefish(to_mdev(context->device))) + goto done; + mthca_cleanup_user_db_tab(to_mdev(context->device), &mucontext->uar, + mucontext->db_tab); + MmUnmapLockedPages( mucontext->ibucontext.user_uar, mucontext->mdl ); + IoFreeMdl(mucontext->mdl); + iounmap(mucontext->kva, PAGE_SIZE); + mthca_uar_free(to_mdev(context->device), &mucontext->uar); +done: + kfree(mucontext); + + return 0; +} + +struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + ci_umv_buf_t* const p_umv_buf) +{ + int err; + struct mthca_pd *pd; + struct ibv_alloc_pd_resp resp; + + /* sanity check */ + if (p_umv_buf && p_umv_buf->command) { + if (p_umv_buf->output_size < sizeof(struct ibv_alloc_pd_resp)) { + err = -EINVAL; + goto err_param; + } + } + + pd = kmalloc(sizeof *pd, GFP_KERNEL); + if (!pd) { + err = -ENOMEM; + goto err_mem; + } + + if (mthca_is_livefish(to_mdev(ibdev))) + goto done; + + err = mthca_pd_alloc(to_mdev(ibdev), !context, pd); + if (err) { + goto err_pd_alloc; + } + +done: + if (p_umv_buf && p_umv_buf->command) { + resp.pd_handle = (u64)(UINT_PTR)pd; + resp.pdn = pd->pd_num; + if (ib_copy_to_umv_buf(p_umv_buf, &resp, sizeof(struct ibv_alloc_pd_resp))) { + err = -EFAULT; + goto err_copy; + } + } + + return &pd->ibpd; + +err_copy: + mthca_pd_free(to_mdev(ibdev), pd); +err_pd_alloc: + kfree(pd); +err_mem: +err_param: + return ERR_PTR(err); +} + +int mthca_dealloc_pd(struct ib_pd *pd) +{ + if (mthca_is_livefish(to_mdev(pd->device))) + goto done; + + mthca_pd_free(to_mdev(pd->device), to_mpd(pd)); + +done: + kfree(pd); + return 0; +} + +static struct ib_ah *mthca_ah_create(struct ib_pd *pd, + struct ib_ah_attr *ah_attr) +{ + int err; + struct mthca_ah *ah; + + ah = kzalloc(sizeof *ah, GFP_ATOMIC); + if (!ah) + return ERR_PTR(-ENOMEM); + + err = mthca_create_ah(to_mdev(pd->device), to_mpd(pd), ah_attr, ah); + if (err) { + kfree(ah); + return ERR_PTR(err); + } + + return &ah->ibah; +} + +static int mthca_ah_destroy(struct ib_ah *ah) +{ + mthca_destroy_ah(to_mdev(ah->device), to_mah(ah)); + kfree(ah); + + return 0; +} + +static struct ib_srq *mthca_create_srq(struct ib_pd *pd, + struct ib_srq_init_attr *init_attr, + ci_umv_buf_t* const p_umv_buf) +{ + struct ibv_create_srq ucmd = { 0 }; + struct mthca_ucontext *context = NULL; + struct mthca_srq *srq; + int err; + + srq = kzalloc(sizeof *srq, GFP_KERNEL); + if (!srq) + return ERR_PTR(-ENOMEM); + + if (pd->ucontext) { + context = to_mucontext(pd->ucontext); + + if (ib_copy_from_umv_buf(&ucmd, p_umv_buf, sizeof ucmd)) { + err = -EFAULT; + goto err_free; + } + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, ucmd.db_index, + ucmd.db_page, NULL); + + if (err) + goto err_free; + + srq->mr.ibmr.lkey = ucmd.lkey; + srq->db_index = ucmd.db_index; + } + + err = mthca_alloc_srq(to_mdev(pd->device), to_mpd(pd), + &init_attr->attr, srq); + + if (err && pd->ucontext) + mthca_unmap_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, ucmd.db_index); + + if (err) + goto err_free; + + if (context && ib_copy_to_umv_buf(p_umv_buf, &srq->srqn, sizeof (u32))) { + mthca_free_srq(to_mdev(pd->device), srq); + err = -EFAULT; + goto err_free; + } + + return &srq->ibsrq; + +err_free: + kfree(srq); + + return ERR_PTR(err); +} + +static int mthca_destroy_srq(struct ib_srq *srq) +{ + struct mthca_ucontext *context; + + if (srq->ucontext) { + context = to_mucontext(srq->ucontext); + + mthca_unmap_user_db(to_mdev(srq->device), &context->uar, + context->db_tab, to_msrq(srq)->db_index); + } + + mthca_free_srq(to_mdev(srq->device), to_msrq(srq)); + kfree(srq); + + return 0; +} + +static struct ib_qp *mthca_create_qp(struct ib_pd *pd, + struct ib_qp_init_attr *init_attr, + ci_umv_buf_t* const p_umv_buf) +{ + struct ibv_create_qp ucmd = {0}; + struct mthca_qp *qp = NULL; + struct mthca_ucontext *context = NULL; + int err; + + switch (init_attr->qp_type) { + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + case IB_QPT_UNRELIABLE_DGRM: + { + + qp = kmalloc(sizeof *qp, GFP_KERNEL); + if (!qp) { + err = -ENOMEM; + goto err_mem; + } + + if (pd->ucontext) { + context = to_mucontext(pd->ucontext); + + if (ib_copy_from_umv_buf(&ucmd, p_umv_buf, sizeof ucmd)) { + err = -EFAULT; + goto err_copy; + } + + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, + ucmd.sq_db_index, ucmd.sq_db_page, NULL); + if (err) + goto err_map1; + + err = mthca_map_user_db(to_mdev(pd->device), &context->uar, + context->db_tab, + ucmd.rq_db_index, ucmd.rq_db_page, NULL); + if (err) + goto err_map2; + + qp->mr.ibmr.lkey = ucmd.lkey; + qp->sq.db_index = ucmd.sq_db_index; + qp->rq.db_index = ucmd.rq_db_index; + } + + err = mthca_alloc_qp(to_mdev(pd->device), to_mpd(pd), + to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq), + init_attr->qp_type, init_attr->sq_sig_type, + &init_attr->cap, qp); + + if (err) + if (pd->ucontext) + goto err_alloc_qp_user; + else + goto err_copy; + + qp->ibqp.qp_num = qp->qpn; + break; + } + case IB_QPT_QP0: + case IB_QPT_QP1: + { + /* Don't allow userspace to create special QPs */ + if (pd->ucontext) { + err = -EINVAL; + goto err_inval; + } + + qp = kmalloc(sizeof (struct mthca_sqp), GFP_KERNEL); + if (!qp) { + err = -ENOMEM; + goto err_mem; + } + + qp->ibqp.qp_num = init_attr->qp_type == IB_QPT_QP0 ? 0 : 1; + + err = mthca_alloc_sqp(to_mdev(pd->device), to_mpd(pd), + to_mcq(init_attr->send_cq), + to_mcq(init_attr->recv_cq), + init_attr->sq_sig_type, &init_attr->cap, + qp->ibqp.qp_num, init_attr->port_num, + to_msqp(qp)); + if (err) + goto err_alloc_sqp; + + break; + } + default: + /* Don't support raw QPs */ + err = -ENOSYS; + goto err_unsupported; + } + + init_attr->cap.max_send_wr = qp->sq.max; + init_attr->cap.max_recv_wr = qp->rq.max; + init_attr->cap.max_send_sge = qp->sq.max_gs; + init_attr->cap.max_recv_sge = qp->rq.max_gs; + init_attr->cap.max_inline_data = qp->max_inline_data; + + return &qp->ibqp; + + +err_alloc_qp_user: + if (pd->ucontext) + mthca_unmap_user_db(to_mdev(pd->device), + &context->uar, context->db_tab, ucmd.rq_db_index); +err_map2: + if (pd->ucontext) + mthca_unmap_user_db(to_mdev(pd->device), + &context->uar, context->db_tab, ucmd.sq_db_index); +err_map1: err_copy: err_alloc_sqp: + if (qp) + kfree(qp); +err_mem: err_inval: err_unsupported: + return ERR_PTR(err); +} + +static int mthca_destroy_qp(struct ib_qp *qp) +{ + if (qp->ucontext) { + mthca_unmap_user_db(to_mdev(qp->device), + &to_mucontext(qp->ucontext)->uar, + to_mucontext(qp->ucontext)->db_tab, + to_mqp(qp)->sq.db_index); + mthca_unmap_user_db(to_mdev(qp->device), + &to_mucontext(qp->ucontext)->uar, + to_mucontext(qp->ucontext)->db_tab, + to_mqp(qp)->rq.db_index); + } + mthca_free_qp(to_mdev(qp->device), to_mqp(qp)); + kfree(qp); + return 0; +} + +static struct ib_cq *mthca_create_cq(struct ib_device *ibdev, int entries, + struct ib_ucontext *context, + ci_umv_buf_t* const p_umv_buf) +{ + struct ibv_create_cq ucmd = {0}; + struct mthca_cq *cq; + int nent; + int err; + void *u_arm_db_page = 0; + + if (entries < 1 || entries > to_mdev(ibdev)->limits.max_cqes) + return ERR_PTR(-EINVAL); + + if (context) { + if (ib_copy_from_umv_buf(&ucmd, p_umv_buf, sizeof ucmd)) + return ERR_PTR(-EFAULT); + + err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, + ucmd.set_db_index, ucmd.set_db_page, NULL); + if (err) + return ERR_PTR(err); + + err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, + ucmd.arm_db_index, ucmd.arm_db_page, NULL); + if (err) + goto err_unmap_set; + + err = mthca_map_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, + ucmd.u_arm_db_index, + (u64)(ULONG_PTR)PAGE_ALIGN(ucmd.u_arm_db_page), + &u_arm_db_page); + if (err) + goto err_unmap_arm; + } + + cq = kmalloc(sizeof *cq, GFP_KERNEL); + if (!cq) { + err = -ENOMEM; + goto err_unmap_ev; + } + + if (context) { + cq->mr.ibmr.lkey = ucmd.lkey; + cq->set_ci_db_index = ucmd.set_db_index; + cq->arm_db_index = ucmd.arm_db_index; + cq->u_arm_db_index = ucmd.u_arm_db_index; + cq->p_u_arm_sn = (int*)((char*)u_arm_db_page + BYTE_OFFSET(ucmd.u_arm_db_page)); + } + + for (nent = 1; nent <= entries; nent <<= 1) + ; /* nothing */ + + err = mthca_init_cq(to_mdev(ibdev), nent, + context ? to_mucontext(context) : NULL, + context ? ucmd.mr.pdn : to_mdev(ibdev)->driver_pd.pd_num, + cq); + if (err) + goto err_free; + + if (context ) { + struct ibv_create_cq_resp *create_cq_resp = (struct ibv_create_cq_resp *)(void*)p_umv_buf->p_inout_buf; + create_cq_resp->cqn = cq->cqn; + } + + HCA_PRINT( TRACE_LEVEL_INFORMATION, HCA_DBG_LOW , + ("uctx %p, cq_hndl %p, cq_num %#x, cqe %#x\n", + context, &cq->ibcq, cq->cqn, cq->ibcq.cqe ) ); + + return &cq->ibcq; + +err_free: + kfree(cq); + +err_unmap_ev: + if (context) + mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, ucmd.u_arm_db_index); + +err_unmap_arm: + if (context) + mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, ucmd.arm_db_index); + +err_unmap_set: + if (context) + mthca_unmap_user_db(to_mdev(ibdev), &to_mucontext(context)->uar, + to_mucontext(context)->db_tab, ucmd.set_db_index); + + return ERR_PTR(err); +} + +static int mthca_destroy_cq(struct ib_cq *cq) +{ + if (cq->ucontext) { + mthca_unmap_user_db(to_mdev(cq->device), + &to_mucontext(cq->ucontext)->uar, + to_mucontext(cq->ucontext)->db_tab, + to_mcq(cq)->u_arm_db_index); + mthca_unmap_user_db(to_mdev(cq->device), + &to_mucontext(cq->ucontext)->uar, + to_mucontext(cq->ucontext)->db_tab, + to_mcq(cq)->arm_db_index); + mthca_unmap_user_db(to_mdev(cq->device), + &to_mucontext(cq->ucontext)->uar, + to_mucontext(cq->ucontext)->db_tab, + to_mcq(cq)->set_ci_db_index); + } + mthca_free_cq(to_mdev(cq->device), to_mcq(cq)); + kfree(cq); + + return 0; +} + +static +mthca_mpt_access_t +map_qp_mpt( + IN mthca_qp_access_t qp_acl) +{ +#define ACL_MTHCA(mfl,ifl) if (qp_acl & mfl) mpt_acl |= ifl + mthca_mpt_access_t mpt_acl = 0; + + ACL_MTHCA(MTHCA_ACCESS_REMOTE_READ,MTHCA_MPT_FLAG_REMOTE_READ); + ACL_MTHCA(MTHCA_ACCESS_REMOTE_WRITE,MTHCA_MPT_FLAG_REMOTE_WRITE); + ACL_MTHCA(MTHCA_ACCESS_REMOTE_ATOMIC,MTHCA_MPT_FLAG_ATOMIC); + ACL_MTHCA(MTHCA_ACCESS_LOCAL_WRITE,MTHCA_MPT_FLAG_LOCAL_WRITE); + + return (mpt_acl | MTHCA_MPT_FLAG_LOCAL_READ); +} + +struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc) +{ + struct mthca_mr *mr; + int err; + + mr = kzalloc(sizeof *mr, GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + err = mthca_mr_alloc_notrans(to_mdev(pd->device), + to_mpd(pd)->pd_num, + map_qp_mpt(acc), mr); + + if (err) { + kfree(mr); + return ERR_PTR(err); + } + + return &mr->ibmr; +} + +static struct ib_mr *mthca_reg_phys_mr(struct ib_pd *pd, + struct ib_phys_buf *buffer_list, + int num_phys_buf, + mthca_qp_access_t acc, + u64 *iova_start) +{ + struct mthca_mr *mr; + u64 *page_list; + u64 total_size; + u64 mask; + int shift; + int npages; + int err; + int i, j, n; + + /* First check that we have enough alignment */ + if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) + return ERR_PTR(-EINVAL); + + if (num_phys_buf > 1 && + ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) + return ERR_PTR(-EINVAL); + + mask = 0; + total_size = 0; + for (i = 0; i < num_phys_buf; ++i) { + if (i != 0) + mask |= buffer_list[i].addr; + if (i != num_phys_buf - 1) + mask |= buffer_list[i].addr + buffer_list[i].size; + + total_size += buffer_list[i].size; + } + + if (mask & ~PAGE_MASK) + return ERR_PTR(-EINVAL); + + /* Find largest page shift we can use to cover buffers */ + for (shift = PAGE_SHIFT; shift < 31; ++shift) + if (num_phys_buf > 1) { + if ((1Ui64 << shift) & mask) + break; + } else { + if (1Ui64 << shift >= + buffer_list[0].size + + (buffer_list[0].addr & ((1Ui64 << shift) - 1))) + break; + } + + buffer_list[0].size += buffer_list[0].addr & ((1Ui64 << shift) - 1); + buffer_list[0].addr &= ~0Ui64 << shift; + + mr = kzalloc(sizeof *mr, GFP_KERNEL); + if (!mr) + return ERR_PTR(-ENOMEM); + + npages = 0; + for (i = 0; i < num_phys_buf; ++i) + npages += (int)((buffer_list[i].size + (1Ui64 << shift) - 1) >> shift); + + if (!npages) + return &mr->ibmr; + + page_list = kmalloc(npages * sizeof *page_list, GFP_KERNEL); + if (!page_list) { + kfree(mr); + return ERR_PTR(-ENOMEM); + } + + n = 0; + for (i = 0; i < num_phys_buf; ++i) + for (j = 0; + j < (buffer_list[i].size + (1Ui64 << shift) - 1) >> shift; + ++j) + page_list[n++] = buffer_list[i].addr + ((u64) j << shift); + + HCA_PRINT( TRACE_LEVEL_VERBOSE ,HCA_DBG_LOW ,("Registering memory at %I64x (iova %I64x) " + "in PD %x; shift %d, npages %d.\n", + (u64) buffer_list[0].addr, + (u64) *iova_start, + to_mpd(pd)->pd_num, + shift, npages)); + + err = mthca_mr_alloc_phys(to_mdev(pd->device), + to_mpd(pd)->pd_num, + page_list, shift, npages, + *iova_start, total_size, + map_qp_mpt(acc), mr); + + if (err) { + kfree(page_list); + kfree(mr); + return ERR_PTR(err); + } + + kfree(page_list); + return &mr->ibmr; +} + +static struct ib_mr *mthca_reg_virt_mr(struct ib_pd *pd, + void* __ptr64 vaddr, uint64_t length, uint64_t hca_va, + mthca_qp_access_t acc, boolean_t um_call) +{ + struct mthca_dev *dev = to_mdev(pd->device); + struct mthca_mr *mr; + u64 *pages; + int err = 0; + uint32_t i, n; + mt_iobuf_t *iobuf_p; + mt_iobuf_iter_t iobuf_iter; + ib_access_t ib_acc; + + /* + * Be friendly to WRITE_MTT command and leave two + * empty slots for the index and reserved fields of the mailbox. + */ + int max_buf_list_size = PAGE_SIZE / sizeof (u64) - 2; + + HCA_ENTER(HCA_DBG_MEMORY); + + mr = kzalloc(sizeof *mr, GFP_KERNEL); + if (!mr) { + err = -ENOMEM; + goto err_nomem; + } + + /* + * We ask for writable memory if any access flags other than + * "remote read" are set. "Local write" and "remote write" + * obviously require write access. "Remote atomic" can do + * things like fetch and add, which will modify memory, and + * "MW bind" can change permissions by binding a window. + */ + + // try register the buffer + iobuf_p = &mr->iobuf; + iobuf_init( (u64)vaddr, length, um_call, iobuf_p); + ib_acc = (acc & ~MTHCA_ACCESS_REMOTE_READ) ? IB_AC_LOCAL_WRITE : 0; + err = iobuf_register_with_cash( (u64)vaddr, length, um_call, + &ib_acc, iobuf_p ); + if (err) + goto err_reg_mem; + mr->iobuf_used = TRUE; + + // allocate MTT's + mr->mtt = mthca_alloc_mtt(dev, iobuf_p->nr_pages); + if (IS_ERR(mr->mtt)) { + err = PTR_ERR(mr->mtt); + goto err_alloc_mtt; + } + + // allocate buffer_list for writing MTT's + pages = (u64 *) kmalloc(PAGE_SIZE,GFP_KERNEL); + if (!pages) { + err = -ENOMEM; + goto err_pages; + } + + // write MTT's + iobuf_iter_init( iobuf_p, &iobuf_iter ); + n = 0; + for (;;) { + // get up to max_buf_list_size page physical addresses + i = iobuf_get_tpt_seg( iobuf_p, &iobuf_iter, max_buf_list_size, pages ); + if (!i) + break; + + //TODO: convert physical adresses to dma one's + + // write 'i' dma addresses + err = mthca_write_mtt(dev, mr->mtt, n, pages, i); + if (err) + goto err_write_mtt; + n += i; + if (n >= iobuf_p->nr_pages) + break; + } + + CL_ASSERT(n == iobuf_p->nr_pages); + + // write MPT + err = mthca_mr_alloc(dev, to_mpd(pd)->pd_num, PAGE_SHIFT, hca_va, + length, map_qp_mpt(acc), mr); + if (err) + goto err_mt_alloc; + + // secure memory + if (!pd->ucontext) + goto done; + __try { + mr->secure_handle = MmSecureVirtualMemory ( vaddr, (SIZE_T)length, + (ib_acc & IB_AC_LOCAL_WRITE) ? PAGE_READWRITE : PAGE_READONLY ); + if (mr->secure_handle == NULL) { + err = -EFAULT; + goto err_secure; + } + } + __except (EXCEPTION_EXECUTE_HANDLER) { + NTSTATUS Status = GetExceptionCode(); + UNUSED_PARAM_WOWPP(Status); + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_MEMORY , + ("Exception 0x%x on MmSecureVirtualMemory(), addr %p, size %I64d, access %#x\n", + Status, vaddr, length, acc )); + err = -EFAULT; + goto err_secure; + } + +done: + free_page((void*) pages); + + HCA_EXIT(HCA_DBG_MEMORY); + return &mr->ibmr; + +err_secure: +err_mt_alloc: +err_write_mtt: + free_page((void*) pages); +err_pages: + mthca_free_mtt(dev, mr->mtt); +err_alloc_mtt: + iobuf_deregister(iobuf_p); +err_reg_mem: + kfree(mr); +err_nomem: + + HCA_EXIT(HCA_DBG_MEMORY); + return ERR_PTR(err); +} + +int mthca_dereg_mr(struct ib_mr *mr) +{ + struct mthca_mr *mmr = to_mmr(mr); + struct mthca_dev* dev = to_mdev(mr->device); + + if (mmr->secure_handle) + MmUnsecureVirtualMemory ( mmr->secure_handle ); + mthca_free_mr(dev, mmr); + if (mmr->iobuf_used) + iobuf_deregister_with_cash(&mmr->iobuf); + kfree(mmr); + return 0; +} + +static struct ib_fmr *mthca_alloc_fmr(struct ib_pd *pd, mthca_qp_access_t acc, + struct ib_fmr_attr *fmr_attr) +{ + struct mthca_fmr *fmr; + int err; + + fmr = kzalloc(sizeof *fmr, GFP_KERNEL); + if (!fmr) + return ERR_PTR(-ENOMEM); + + RtlCopyMemory(&fmr->attr, fmr_attr, sizeof *fmr_attr); + err = mthca_fmr_alloc(to_mdev(pd->device), to_mpd(pd)->pd_num, + map_qp_mpt(acc), fmr); + + if (err) { + kfree(fmr); + return ERR_PTR(err); + } + + return &fmr->ibfmr; +} + +static int mthca_dealloc_fmr(struct ib_fmr *fmr) +{ + struct mthca_fmr *mfmr = to_mfmr(fmr); + int err; + + err = mthca_free_fmr(to_mdev(fmr->device), mfmr); + if (err) + return err; + + kfree(mfmr); + return 0; +} + +static int mthca_unmap_fmr(struct list_head *fmr_list) +{ + struct ib_fmr *fmr; + int err; + u8 status; + struct mthca_dev *mdev = NULL; + + list_for_each_entry(fmr, fmr_list, list,struct ib_fmr) { + if (mdev && to_mdev(fmr->device) != mdev) + return -EINVAL; + mdev = to_mdev(fmr->device); + } + + if (!mdev) + return 0; + + if (mthca_is_memfree(mdev)) { + list_for_each_entry(fmr, fmr_list, list,struct ib_fmr) + mthca_arbel_fmr_unmap(mdev, to_mfmr(fmr)); + + wmb(); + } else + list_for_each_entry(fmr, fmr_list, list,struct ib_fmr) + mthca_tavor_fmr_unmap(mdev, to_mfmr(fmr)); + + err = mthca_SYNC_TPT(mdev, &status); + if (err) + return err; + if (status) + return -EINVAL; + return 0; +} + +static int mthca_init_node_data(struct mthca_dev *dev) +{ + struct ib_smp *in_mad = NULL; + struct ib_smp *out_mad = NULL; + int err = -ENOMEM; + u8 status; + + in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL); + out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL); + if (!in_mad || !out_mad) + goto out; + + init_query_mad(in_mad); + in_mad->attr_id = IB_SMP_ATTR_NODE_INFO; + + err = mthca_MAD_IFC(dev, 1, 1, + 1, NULL, NULL, in_mad, out_mad, + &status); + if (err) + goto out; + if (status) { + err = -EINVAL; + goto out; + } + + memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8); + +out: + kfree(in_mad); + kfree(out_mad); + return err; +} + +int mthca_register_device(struct mthca_dev *dev) +{ + int ret; + + ret = mthca_init_node_data(dev); + if (ret) + return ret; + + strlcpy(dev->ib_dev.name, "mthca%d", IB_DEVICE_NAME_MAX); + dev->ib_dev.node_type = IB_NODE_CA; + dev->ib_dev.phys_port_cnt = (u8)dev->limits.num_ports; + dev->ib_dev.mdev = dev; + dev->ib_dev.query_device = mthca_query_device; + dev->ib_dev.query_port = mthca_query_port; + dev->ib_dev.modify_port = mthca_modify_port; + dev->ib_dev.query_pkey_chunk = mthca_query_pkey_chunk; + dev->ib_dev.query_gid_chunk = mthca_query_gid_chunk; + dev->ib_dev.alloc_ucontext = mthca_alloc_ucontext; + dev->ib_dev.dealloc_ucontext = mthca_dealloc_ucontext; + dev->ib_dev.alloc_pd = mthca_alloc_pd; + dev->ib_dev.dealloc_pd = mthca_dealloc_pd; + dev->ib_dev.create_ah = mthca_ah_create; + dev->ib_dev.destroy_ah = mthca_ah_destroy; + + if (dev->mthca_flags & MTHCA_FLAG_SRQ) { + dev->ib_dev.create_srq = mthca_create_srq; + dev->ib_dev.modify_srq = mthca_modify_srq; + dev->ib_dev.query_srq = mthca_query_srq; + dev->ib_dev.destroy_srq = mthca_destroy_srq; + + if (mthca_is_memfree(dev)) + dev->ib_dev.post_srq_recv = mthca_arbel_post_srq_recv; + else + dev->ib_dev.post_srq_recv = mthca_tavor_post_srq_recv; + } + + dev->ib_dev.create_qp = mthca_create_qp; + dev->ib_dev.modify_qp = mthca_modify_qp; + dev->ib_dev.destroy_qp = mthca_destroy_qp; + dev->ib_dev.create_cq = mthca_create_cq; + dev->ib_dev.destroy_cq = mthca_destroy_cq; + dev->ib_dev.poll_cq = mthca_poll_cq; + dev->ib_dev.get_dma_mr = mthca_get_dma_mr; + dev->ib_dev.reg_phys_mr = mthca_reg_phys_mr; + dev->ib_dev.reg_virt_mr = mthca_reg_virt_mr; + dev->ib_dev.dereg_mr = mthca_dereg_mr; + + if (dev->mthca_flags & MTHCA_FLAG_FMR) { + dev->ib_dev.alloc_fmr = mthca_alloc_fmr; + dev->ib_dev.unmap_fmr = mthca_unmap_fmr; + dev->ib_dev.dealloc_fmr = mthca_dealloc_fmr; + if (mthca_is_memfree(dev)) + dev->ib_dev.map_phys_fmr = mthca_arbel_map_phys_fmr; + else + dev->ib_dev.map_phys_fmr = mthca_tavor_map_phys_fmr; + } + + dev->ib_dev.attach_mcast = mthca_multicast_attach; + dev->ib_dev.detach_mcast = mthca_multicast_detach; + dev->ib_dev.process_mad = mthca_process_mad; + + if (mthca_is_memfree(dev)) { + dev->ib_dev.req_notify_cq = mthca_arbel_arm_cq; + dev->ib_dev.post_send = mthca_arbel_post_send; + dev->ib_dev.post_recv = mthca_arbel_post_recv; + } else { + dev->ib_dev.req_notify_cq = mthca_tavor_arm_cq; + dev->ib_dev.post_send = mthca_tavor_post_send; + dev->ib_dev.post_recv = mthca_tavor_post_recv; + } + + KeInitializeMutex(&dev->cap_mask_mutex, 0); + + ret = ib_register_device(&dev->ib_dev); + if (ret) + return ret; + + mthca_start_catas_poll(dev); + + return 0; +} + +void mthca_unregister_device(struct mthca_dev *dev) +{ + mthca_stop_catas_poll(dev); + ib_unregister_device(&dev->ib_dev); +} diff --git a/branches/Ndi/hw/mthca/kernel/mthca_provider.h b/branches/Ndi/hw/mthca/kernel/mthca_provider.h new file mode 100644 index 00000000..7395f0fe --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_provider.h @@ -0,0 +1,404 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MTHCA_PROVIDER_H +#define MTHCA_PROVIDER_H + +#include +#include +#include + +typedef uint32_t mthca_mpt_access_t; +#define MTHCA_MPT_FLAG_ATOMIC (1 << 14) +#define MTHCA_MPT_FLAG_REMOTE_WRITE (1 << 13) +#define MTHCA_MPT_FLAG_REMOTE_READ (1 << 12) +#define MTHCA_MPT_FLAG_LOCAL_WRITE (1 << 11) +#define MTHCA_MPT_FLAG_LOCAL_READ (1 << 10) + +union mthca_buf { + struct scatterlist direct; + struct scatterlist *page_list; +}; + +struct mthca_uar { + PFN_NUMBER pfn; + int index; +}; + +struct mthca_user_db_table; + +struct mthca_ucontext { + struct ib_ucontext ibucontext; + struct mthca_uar uar; + struct mthca_user_db_table *db_tab; + // for user UAR + PMDL mdl; + PVOID kva; + SIZE_T uar_size; +}; + +struct mthca_mtt; + +struct mthca_mr { + //NB: the start of this structure is to be equal to mlnx_mro_t ! + //NB: the structure was not inserted here for not to mix driver and provider structures + struct ib_mr ibmr; + struct mthca_mtt *mtt; + int iobuf_used; + mt_iobuf_t iobuf; + void *secure_handle; +}; + +struct mthca_fmr { + struct ib_fmr ibfmr; + struct ib_fmr_attr attr; + struct mthca_mtt *mtt; + int maps; + union { + struct { + struct mthca_mpt_entry __iomem *mpt; + u64 __iomem *mtts; + } tavor; + struct { + struct mthca_mpt_entry *mpt; + __be64 *mtts; + } arbel; + } mem; +}; + +struct mthca_pd { + struct ib_pd ibpd; + u32 pd_num; + atomic_t sqp_count; + struct mthca_mr ntmr; + int privileged; +}; + +struct mthca_eq { + struct mthca_dev *dev; + int eqn; + int eq_num; + u32 eqn_mask; + u32 cons_index; + u16 msi_x_vector; + u16 msi_x_entry; + int have_irq; + int nent; + struct scatterlist *page_list; + struct mthca_mr mr; + KDPC dpc; /* DPC for MSI-X interrupts */ + spinlock_t lock; /* spinlock for simult DPCs */ +}; + +struct mthca_av; + +enum mthca_ah_type { + MTHCA_AH_ON_HCA, + MTHCA_AH_PCI_POOL, + MTHCA_AH_KMALLOC +}; + +struct mthca_ah { + struct ib_ah ibah; + enum mthca_ah_type type; + u32 key; + struct mthca_av *av; + dma_addr_t avdma; +}; + +/* + * Quick description of our CQ/QP locking scheme: + * + * We have one global lock that protects dev->cq/qp_table. Each + * struct mthca_cq/qp also has its own lock. An individual qp lock + * may be taken inside of an individual cq lock. Both cqs attached to + * a qp may be locked, with the send cq locked first. No other + * nesting should be done. + * + * Each struct mthca_cq/qp also has an atomic_t ref count. The + * pointer from the cq/qp_table to the struct counts as one reference. + * This reference also is good for access through the consumer API, so + * modifying the CQ/QP etc doesn't need to take another reference. + * Access because of a completion being polled does need a reference. + * + * Finally, each struct mthca_cq/qp has a wait_queue_head_t for the + * destroy function to sleep on. + * + * This means that access from the consumer API requires nothing but + * taking the struct's lock. + * + * Access because of a completion event should go as follows: + * - lock cq/qp_table and look up struct + * - increment ref count in struct + * - drop cq/qp_table lock + * - lock struct, do your thing, and unlock struct + * - decrement ref count; if zero, wake up waiters + * + * To destroy a CQ/QP, we can do the following: + * - lock cq/qp_table, remove pointer, unlock cq/qp_table lock + * - decrement ref count + * - wait_event until ref count is zero + * + * It is the consumer's responsibilty to make sure that no QP + * operations (WQE posting or state modification) are pending when the + * QP is destroyed. Also, the consumer must make sure that calls to + * qp_modify are serialized. + * + * Possible optimizations (wait for profile data to see if/where we + * have locks bouncing between CPUs): + * - split cq/qp table lock into n separate (cache-aligned) locks, + * indexed (say) by the page in the table + * - split QP struct lock into three (one for common info, one for the + * send queue and one for the receive queue) + */ +//TODO: check correctness of the above requirement: "It is the consumer's responsibilty to make sure that no QP +// operations (WQE posting or state modification) are pending when the QP is destroyed" + +struct mthca_cq { + struct ib_cq ibcq; + void *cq_context; // leo: for IBAL shim + spinlock_t lock; + atomic_t refcount; + int cqn; + u32 cons_index; + int is_direct; + int is_kernel; + + /* Next fields are Arbel only */ + int set_ci_db_index; + __be32 *set_ci_db; + int arm_db_index; + __be32 *arm_db; + int arm_sn; + int u_arm_db_index; + int *p_u_arm_sn; + + union mthca_buf queue; + struct mthca_mr mr; + wait_queue_head_t wait; + KMUTEX mutex; +}; + +struct mthca_srq { + struct ib_srq ibsrq; + spinlock_t lock; + atomic_t refcount; + int srqn; + int max; + int max_gs; + int wqe_shift; + int first_free; + int last_free; + u16 counter; /* Arbel only */ + int db_index; /* Arbel only */ + __be32 *db; /* Arbel only */ + void *last; + + int is_direct; + u64 *wrid; + union mthca_buf queue; + struct mthca_mr mr; + + wait_queue_head_t wait; + KMUTEX mutex; + void *srq_context; +}; + +struct mthca_wq { + spinlock_t lock; + int max; + unsigned next_ind; + unsigned last_comp; + unsigned head; + unsigned tail; + void *last; + int max_gs; + int wqe_shift; + + int db_index; /* Arbel only */ + __be32 *db; +}; + +struct mthca_qp { + struct ib_qp ibqp; + void *qp_context; // leo: for IBAL shim + //TODO: added just because absense of ibv_query_qp + // thereafter it may be worth to be replaced by struct ib_qp_attr qp_attr; + struct ib_qp_init_attr qp_init_attr; // leo: for query_qp + atomic_t refcount; + u32 qpn; + int is_direct; + u8 transport; + u8 state; + u8 atomic_rd_en; + u8 resp_depth; + + struct mthca_mr mr; + + struct mthca_wq rq; + struct mthca_wq sq; + enum ib_sig_type sq_policy; + int send_wqe_offset; + int max_inline_data; + + u64 *wrid; + union mthca_buf queue; + + wait_queue_head_t wait; + KMUTEX mutex; +}; + +struct mthca_sqp { + struct mthca_qp qp; + int port; + int pkey_index; + u32 qkey; + u32 send_psn; + struct ib_ud_header ud_header; + struct scatterlist sg; +}; + +static inline struct mthca_ucontext *to_mucontext(struct ib_ucontext *ibucontext) +{ + return container_of(ibucontext, struct mthca_ucontext, ibucontext); +} + +static inline struct mthca_fmr *to_mfmr(struct ib_fmr *ibfmr) +{ + return container_of(ibfmr, struct mthca_fmr, ibfmr); +} + +static inline struct mthca_mr *to_mmr(struct ib_mr *ibmr) +{ + return container_of(ibmr, struct mthca_mr, ibmr); +} + +static inline struct mthca_pd *to_mpd(struct ib_pd *ibpd) +{ + return container_of(ibpd, struct mthca_pd, ibpd); +} + +static inline struct mthca_ah *to_mah(struct ib_ah *ibah) +{ + return container_of(ibah, struct mthca_ah, ibah); +} + +static inline struct mthca_cq *to_mcq(struct ib_cq *ibcq) +{ + return container_of(ibcq, struct mthca_cq, ibcq); +} + +static inline struct mthca_srq *to_msrq(struct ib_srq *ibsrq) +{ + return container_of(ibsrq, struct mthca_srq, ibsrq); +} + +static inline struct mthca_qp *to_mqp(struct ib_qp *ibqp) +{ + return container_of(ibqp, struct mthca_qp, ibqp); +} + +static inline struct mthca_sqp *to_msqp(struct mthca_qp *qp) +{ + return container_of(qp, struct mthca_sqp, qp); +} + +static inline uint8_t start_port(struct ib_device *device) +{ + return device->node_type == IB_NODE_SWITCH ? 0 : 1; +} + +static inline uint8_t end_port(struct ib_device *device) +{ + return device->node_type == IB_NODE_SWITCH ? 0 : device->phys_port_cnt; +} + +static inline int ib_copy_from_umv_buf(void *dest, ci_umv_buf_t* const p_umv_buf, size_t len) +{ + RtlCopyMemory(dest, p_umv_buf->p_inout_buf, len); + return 0; +} + +static inline int ib_copy_to_umv_buf(ci_umv_buf_t* const p_umv_buf, void *src, size_t len) +{ + if (p_umv_buf->output_size < len) { + p_umv_buf->status = IB_INSUFFICIENT_MEMORY; + p_umv_buf->output_size = 0; + return -EFAULT; + } + RtlCopyMemory(p_umv_buf->p_inout_buf, src, len); + p_umv_buf->status = IB_SUCCESS; + p_umv_buf->output_size = (uint32_t)len; + return 0; +} + + + +// API +int mthca_query_device(struct ib_device *ibdev, + struct ib_device_attr *props); + +int mthca_query_port(struct ib_device *ibdev, + u8 port, struct ib_port_attr *props); + +int mthca_modify_port(struct ib_device *ibdev, + u8 port, int port_modify_mask, + struct ib_port_modify *props); + +struct ib_pd *mthca_alloc_pd(struct ib_device *ibdev, + struct ib_ucontext *context, + ci_umv_buf_t* const p_umv_buf); + +int mthca_dealloc_pd(struct ib_pd *pd); + +int mthca_dereg_mr(struct ib_mr *mr); + +int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr); + +struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev, + ci_umv_buf_t* const p_umv_buf); + +int mthca_dealloc_ucontext(struct ib_ucontext *context); + +struct ib_mr *mthca_get_dma_mr(struct ib_pd *pd, mthca_qp_access_t acc); + +int mthca_poll_cq_list( + IN struct ib_cq *ibcq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); + + +#endif /* MTHCA_PROVIDER_H */ diff --git a/branches/Ndi/hw/mthca/kernel/mthca_qp.c b/branches/Ndi/hw/mthca/kernel/mthca_qp.c new file mode 100644 index 00000000..0326def3 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_qp.c @@ -0,0 +1,2369 @@ +/* + * Copyright (c) 2004 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2004 Voltaire, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include + +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_qp.tmh" +#endif +#include "mthca_cmd.h" +#include "mthca_memfree.h" +#include "mthca_wqe.h" + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, mthca_init_qp_table) +#pragma alloc_text (PAGE, mthca_cleanup_qp_table) +#endif + +enum { + MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE, + MTHCA_ACK_REQ_FREQ = 10, + MTHCA_FLIGHT_LIMIT = 9, + MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */ + MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */ + MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */ +}; + +enum { + MTHCA_QP_STATE_RST = 0, + MTHCA_QP_STATE_INIT = 1, + MTHCA_QP_STATE_RTR = 2, + MTHCA_QP_STATE_RTS = 3, + MTHCA_QP_STATE_SQE = 4, + MTHCA_QP_STATE_SQD = 5, + MTHCA_QP_STATE_ERR = 6, + MTHCA_QP_STATE_DRAINING = 7 +}; + +enum { + MTHCA_QP_ST_RC = 0x0, + MTHCA_QP_ST_UC = 0x1, + MTHCA_QP_ST_RD = 0x2, + MTHCA_QP_ST_UD = 0x3, + MTHCA_QP_ST_MLX = 0x7 +}; + +enum { + MTHCA_QP_PM_MIGRATED = 0x3, + MTHCA_QP_PM_ARMED = 0x0, + MTHCA_QP_PM_REARM = 0x1 +}; + +enum { + /* qp_context flags */ + MTHCA_QP_BIT_DE = 1 << 8, + /* params1 */ + MTHCA_QP_BIT_SRE = 1 << 15, + MTHCA_QP_BIT_SWE = 1 << 14, + MTHCA_QP_BIT_SAE = 1 << 13, + MTHCA_QP_BIT_SIC = 1 << 4, + MTHCA_QP_BIT_SSC = 1 << 3, + /* params2 */ + MTHCA_QP_BIT_RRE = 1 << 15, + MTHCA_QP_BIT_RWE = 1 << 14, + MTHCA_QP_BIT_RAE = 1 << 13, + MTHCA_QP_BIT_RIC = 1 << 4, + MTHCA_QP_BIT_RSC = 1 << 3 +}; + +#pragma pack(push,1) +struct mthca_qp_path { + __be32 port_pkey; + u8 rnr_retry; + u8 g_mylmc; + __be16 rlid; + u8 ackto; + u8 mgid_index; + u8 static_rate; + u8 hop_limit; + __be32 sl_tclass_flowlabel; + u8 rgid[16]; +} ; + +struct mthca_qp_context { + __be32 flags; + __be32 tavor_sched_queue; /* Reserved on Arbel */ + u8 mtu_msgmax; + u8 rq_size_stride; /* Reserved on Tavor */ + u8 sq_size_stride; /* Reserved on Tavor */ + u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */ + __be32 usr_page; + __be32 local_qpn; + __be32 remote_qpn; + u32 reserved1[2]; + struct mthca_qp_path pri_path; + struct mthca_qp_path alt_path; + __be32 rdd; + __be32 pd; + __be32 wqe_base; + __be32 wqe_lkey; + __be32 params1; + __be32 reserved2; + __be32 next_send_psn; + __be32 cqn_snd; + __be32 snd_wqe_base_l; /* Next send WQE on Tavor */ + __be32 snd_db_index; /* (debugging only entries) */ + __be32 last_acked_psn; + __be32 ssn; + __be32 params2; + __be32 rnr_nextrecvpsn; + __be32 ra_buff_indx; + __be32 cqn_rcv; + __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */ + __be32 rcv_db_index; /* (debugging only entries) */ + __be32 qkey; + __be32 srqn; + __be32 rmsn; + __be16 rq_wqe_counter; /* reserved on Tavor */ + __be16 sq_wqe_counter; /* reserved on Tavor */ + u32 reserved3[18]; +} ; + +struct mthca_qp_param { + __be32 opt_param_mask; + u32 reserved1; + struct mthca_qp_context context; + u32 reserved2[62]; +} ; +#pragma pack(pop) + +enum { + MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0, + MTHCA_QP_OPTPAR_RRE = 1 << 1, + MTHCA_QP_OPTPAR_RAE = 1 << 2, + MTHCA_QP_OPTPAR_RWE = 1 << 3, + MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4, + MTHCA_QP_OPTPAR_Q_KEY = 1 << 5, + MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6, + MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7, + MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8, + MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9, + MTHCA_QP_OPTPAR_PM_STATE = 1 << 10, + MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11, + MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12, + MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13, + MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14, + MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15, + MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16 +}; + +static const u8 mthca_opcode[] = { + MTHCA_OPCODE_RDMA_WRITE, + MTHCA_OPCODE_RDMA_WRITE_IMM, + MTHCA_OPCODE_SEND, + MTHCA_OPCODE_SEND_IMM, + MTHCA_OPCODE_RDMA_READ, + MTHCA_OPCODE_ATOMIC_CS, + MTHCA_OPCODE_ATOMIC_FA +}; + + +enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS }; + +static struct _state_table { + int trans; + u32 req_param[NUM_TRANS]; + u32 opt_param[NUM_TRANS]; +} state_table[IBQPS_ERR + 1][IBQPS_ERR + 1]= {0}; + +static void fill_state_table() +{ + struct _state_table *t; + RtlZeroMemory( state_table, sizeof(state_table) ); + + /* IBQPS_RESET */ + t = &state_table[IBQPS_RESET][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_INIT].trans = MTHCA_TRANS_RST2INIT; + t[IBQPS_INIT].req_param[UD] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_QKEY; + t[IBQPS_INIT].req_param[UC] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS; + t[IBQPS_INIT].req_param[RC] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS; + t[IBQPS_INIT].req_param[MLX] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + t[IBQPS_INIT].opt_param[MLX] = IB_QP_PORT; + + /* IBQPS_INIT */ + t = &state_table[IBQPS_INIT][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_INIT].trans = MTHCA_TRANS_INIT2INIT; + t[IBQPS_INIT].opt_param[UD] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_QKEY; + t[IBQPS_INIT].opt_param[UC] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS; + t[IBQPS_INIT].opt_param[RC] = IB_QP_PKEY_INDEX |IB_QP_PORT |IB_QP_ACCESS_FLAGS; + t[IBQPS_INIT].opt_param[MLX] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + + t[IBQPS_RTR].trans = MTHCA_TRANS_INIT2RTR; + t[IBQPS_RTR].req_param[UC] = + IB_QP_AV |IB_QP_PATH_MTU |IB_QP_DEST_QPN |IB_QP_RQ_PSN; + t[IBQPS_RTR].req_param[RC] = + IB_QP_AV |IB_QP_PATH_MTU |IB_QP_DEST_QPN |IB_QP_RQ_PSN |IB_QP_MAX_DEST_RD_ATOMIC |IB_QP_MIN_RNR_TIMER; + t[IBQPS_RTR].opt_param[UD] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + t[IBQPS_RTR].opt_param[UC] = IB_QP_PKEY_INDEX |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS; + t[IBQPS_RTR].opt_param[RC] = IB_QP_PKEY_INDEX |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS; + t[IBQPS_RTR].opt_param[MLX] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + +/* IBQPS_RTR */ + t = &state_table[IBQPS_RTR][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_RTS].trans = MTHCA_TRANS_RTR2RTS; + t[IBQPS_RTS].req_param[UD] = IB_QP_SQ_PSN; + t[IBQPS_RTS].req_param[UC] = IB_QP_SQ_PSN; + t[IBQPS_RTS].req_param[RC] = + IB_QP_TIMEOUT |IB_QP_RETRY_CNT |IB_QP_RNR_RETRY |IB_QP_SQ_PSN |IB_QP_MAX_QP_RD_ATOMIC; + t[IBQPS_RTS].req_param[MLX] = IB_QP_SQ_PSN; + t[IBQPS_RTS].opt_param[UD] = IB_QP_CUR_STATE |IB_QP_QKEY; + t[IBQPS_RTS].opt_param[UC] = + IB_QP_CUR_STATE |IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[RC] = IB_QP_CUR_STATE |IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS |IB_QP_MIN_RNR_TIMER |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[MLX] = IB_QP_CUR_STATE |IB_QP_QKEY; + + /* IBQPS_RTS */ + t = &state_table[IBQPS_RTS][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_RTS].trans = MTHCA_TRANS_RTS2RTS; + t[IBQPS_RTS].opt_param[UD] = IB_QP_CUR_STATE |IB_QP_QKEY; + t[IBQPS_RTS].opt_param[UC] = IB_QP_ACCESS_FLAGS |IB_QP_ALT_PATH |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[RC] = IB_QP_ACCESS_FLAGS | + IB_QP_ALT_PATH |IB_QP_PATH_MIG_STATE |IB_QP_MIN_RNR_TIMER; + t[IBQPS_RTS].opt_param[MLX] = IB_QP_CUR_STATE |IB_QP_QKEY; + + t[IBQPS_SQD].trans = MTHCA_TRANS_RTS2SQD; + t[IBQPS_SQD].opt_param[UD] = IB_QP_EN_SQD_ASYNC_NOTIFY; + t[IBQPS_SQD].opt_param[UC] = IB_QP_EN_SQD_ASYNC_NOTIFY; + t[IBQPS_SQD].opt_param[RC] = IB_QP_EN_SQD_ASYNC_NOTIFY; + t[IBQPS_SQD].opt_param[MLX] = IB_QP_EN_SQD_ASYNC_NOTIFY; + + /* IBQPS_SQD */ + t = &state_table[IBQPS_SQD][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_RTS].trans = MTHCA_TRANS_SQD2RTS; + t[IBQPS_RTS].opt_param[UD] = IB_QP_CUR_STATE |IB_QP_QKEY; + t[IBQPS_RTS].opt_param[UC] = IB_QP_CUR_STATE | + IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[RC] = IB_QP_CUR_STATE |IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS |IB_QP_MIN_RNR_TIMER |IB_QP_PATH_MIG_STATE; + t[IBQPS_RTS].opt_param[MLX] = IB_QP_CUR_STATE |IB_QP_QKEY; + + t[IBQPS_SQD].trans = MTHCA_TRANS_SQD2SQD; + t[IBQPS_SQD].opt_param[UD] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + t[IBQPS_SQD].opt_param[UC] = IB_QP_AV | IB_QP_CUR_STATE | + IB_QP_ALT_PATH |IB_QP_ACCESS_FLAGS |IB_QP_PKEY_INDEX |IB_QP_PATH_MIG_STATE; + t[IBQPS_SQD].opt_param[RC] = IB_QP_AV |IB_QP_TIMEOUT |IB_QP_RETRY_CNT |IB_QP_RNR_RETRY | + IB_QP_MAX_QP_RD_ATOMIC |IB_QP_MAX_DEST_RD_ATOMIC |IB_QP_CUR_STATE |IB_QP_ALT_PATH | + IB_QP_ACCESS_FLAGS |IB_QP_PKEY_INDEX |IB_QP_MIN_RNR_TIMER |IB_QP_PATH_MIG_STATE; + t[IBQPS_SQD].opt_param[MLX] = IB_QP_PKEY_INDEX |IB_QP_QKEY; + + /* IBQPS_SQE */ + t = &state_table[IBQPS_SQE][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + + t[IBQPS_RTS].trans = MTHCA_TRANS_SQERR2RTS; + t[IBQPS_RTS].opt_param[UD] = IB_QP_CUR_STATE |IB_QP_QKEY; + t[IBQPS_RTS].opt_param[UC] = IB_QP_CUR_STATE | IB_QP_ACCESS_FLAGS; +// t[IBQPS_RTS].opt_param[RC] = IB_QP_CUR_STATE |IB_QP_MIN_RNR_TIMER; + t[IBQPS_RTS].opt_param[MLX] = IB_QP_CUR_STATE |IB_QP_QKEY; + + /* IBQPS_ERR */ + t = &state_table[IBQPS_ERR][0]; + t[IBQPS_RESET].trans = MTHCA_TRANS_ANY2RST; + t[IBQPS_ERR].trans = MTHCA_TRANS_ANY2ERR; + +}; + + +static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp) +{ + return qp->qpn >= (u32)dev->qp_table.sqp_start && + qp->qpn <= (u32)dev->qp_table.sqp_start + 3; +} + +static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp) +{ + return qp->qpn >= (u32)dev->qp_table.sqp_start && + qp->qpn <= (u32)(dev->qp_table.sqp_start + 1); +} + + +static void dump_wqe(u32 print_lvl, u32 *wqe_ptr , struct mthca_qp *qp_ptr) +{ + __be32 *wqe = wqe_ptr; + + UNUSED_PARAM_WOWPP(qp_ptr); + UNUSED_PARAM_WOWPP(print_lvl); + + (void) wqe; /* avoid warning if mthca_dbg compiled away... */ + HCA_PRINT(print_lvl,HCA_DBG_QP,("WQE contents QPN 0x%06x \n",qp_ptr->qpn)); + HCA_PRINT(print_lvl,HCA_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",0 + , cl_ntoh32(wqe[0]), cl_ntoh32(wqe[1]), cl_ntoh32(wqe[2]), cl_ntoh32(wqe[3]))); + HCA_PRINT(print_lvl,HCA_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",4 + , cl_ntoh32(wqe[4]), cl_ntoh32(wqe[5]), cl_ntoh32(wqe[6]), cl_ntoh32(wqe[7]))); + HCA_PRINT(print_lvl,HCA_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",8 + , cl_ntoh32(wqe[8]), cl_ntoh32(wqe[9]), cl_ntoh32(wqe[10]), cl_ntoh32(wqe[11]))); + HCA_PRINT(print_lvl,HCA_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",12 + , cl_ntoh32(wqe[12]), cl_ntoh32(wqe[13]), cl_ntoh32(wqe[14]), cl_ntoh32(wqe[15]))); + +} + + +static void *get_recv_wqe(struct mthca_qp *qp, int n) +{ + if (qp->is_direct) + return (u8*)qp->queue.direct.page + (n << qp->rq.wqe_shift); + else + return (u8*)qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].page + + ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1)); +} + +static void *get_send_wqe(struct mthca_qp *qp, int n) +{ + if (qp->is_direct) + return (u8*)qp->queue.direct.page + qp->send_wqe_offset + + (n << qp->sq.wqe_shift); + else + return (u8*)qp->queue.page_list[(qp->send_wqe_offset + + (n << qp->sq.wqe_shift)) >> + PAGE_SHIFT].page + + ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) & + (PAGE_SIZE - 1)); +} + +static void mthca_wq_init(struct mthca_wq *wq) +{ + spin_lock_init(&wq->lock); + wq->next_ind = 0; + wq->last_comp = wq->max - 1; + wq->head = 0; + wq->tail = 0; +} + +void mthca_qp_event(struct mthca_dev *dev, u32 qpn, + enum ib_event_type event_type, u8 vendor_code) +{ + struct mthca_qp *qp; + struct ib_event event; + SPIN_LOCK_PREP(lh); + + spin_lock(&dev->qp_table.lock, &lh); + qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); + if (qp) + atomic_inc(&qp->refcount); + spin_unlock(&lh); + + if (!qp) { + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_QP,("QP %06x Async event for bogus \n", qpn)); + return; + } + + event.device = &dev->ib_dev; + event.event = event_type; + event.element.qp = &qp->ibqp; + event.vendor_specific = vendor_code; + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_QP,("QP %06x Async event event_type 0x%x vendor_code 0x%x\n", + qpn,event_type,vendor_code)); + if (qp->ibqp.event_handler) + qp->ibqp.event_handler(&event, qp->ibqp.qp_context); + + if (atomic_dec_and_test(&qp->refcount)) + wake_up(&qp->wait); +} + +static int to_mthca_state(enum ib_qp_state ib_state) +{ + switch (ib_state) { + case IBQPS_RESET: return MTHCA_QP_STATE_RST; + case IBQPS_INIT: return MTHCA_QP_STATE_INIT; + case IBQPS_RTR: return MTHCA_QP_STATE_RTR; + case IBQPS_RTS: return MTHCA_QP_STATE_RTS; + case IBQPS_SQD: return MTHCA_QP_STATE_SQD; + case IBQPS_SQE: return MTHCA_QP_STATE_SQE; + case IBQPS_ERR: return MTHCA_QP_STATE_ERR; + default: return -1; + } +} + +static int to_mthca_st(int transport) +{ + switch (transport) { + case RC: return MTHCA_QP_ST_RC; + case UC: return MTHCA_QP_ST_UC; + case UD: return MTHCA_QP_ST_UD; + case RD: return MTHCA_QP_ST_RD; + case MLX: return MTHCA_QP_ST_MLX; + default: return -1; + } +} + +static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr, + int attr_mask) +{ + if (attr_mask & IB_QP_PKEY_INDEX) + sqp->pkey_index = attr->pkey_index; + if (attr_mask & IB_QP_QKEY) + sqp->qkey = attr->qkey; + if (attr_mask & IB_QP_SQ_PSN) + sqp->send_psn = attr->sq_psn; +} + +static void init_port(struct mthca_dev *dev, int port) +{ + int err; + u8 status; + struct mthca_init_ib_param param; + + RtlZeroMemory(¶m, sizeof param); + + param.port_width = dev->limits.port_width_cap; + param.vl_cap = dev->limits.vl_cap; + param.mtu_cap = dev->limits.mtu_cap; + param.gid_cap = (u16)dev->limits.gid_table_len; + param.pkey_cap = (u16)dev->limits.pkey_table_len; + + err = mthca_INIT_IB(dev, ¶m, port, &status); + if (err) + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("INIT_IB failed, return code %d.\n", err)); + if (status) + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("INIT_IB returned status %02x.\n", status)); +} + + +static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr, + int attr_mask) +{ + u8 dest_rd_atomic; + u32 access_flags; + u32 hw_access_flags = 0; + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + dest_rd_atomic = attr->max_dest_rd_atomic; + else + dest_rd_atomic = qp->resp_depth; + + if (attr_mask & IB_QP_ACCESS_FLAGS) + access_flags = attr->qp_access_flags; + else + access_flags = qp->atomic_rd_en; + + if (!dest_rd_atomic) + access_flags &= MTHCA_ACCESS_REMOTE_WRITE; + + if (access_flags & MTHCA_ACCESS_REMOTE_READ) + hw_access_flags |= MTHCA_QP_BIT_RRE; + if (access_flags & MTHCA_ACCESS_REMOTE_ATOMIC) + hw_access_flags |= MTHCA_QP_BIT_RAE; + if (access_flags & MTHCA_ACCESS_REMOTE_WRITE) + hw_access_flags |= MTHCA_QP_BIT_RWE; + + return cl_hton32(hw_access_flags); +} + +int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + enum ib_qp_state cur_state, new_state; + struct mthca_mailbox *mailbox; + struct mthca_qp_param *qp_param; + struct mthca_qp_context *qp_context; + u32 req_param, opt_param; + u32 sqd_event = 0; + u8 status; + int err = -EINVAL; + SPIN_LOCK_PREP(lhs); + SPIN_LOCK_PREP(lhr); + + down( &qp->mutex ); + + if (attr_mask & IB_QP_CUR_STATE) { + if (attr->cur_qp_state != IBQPS_RTR && + attr->cur_qp_state != IBQPS_RTS && + attr->cur_qp_state != IBQPS_SQD && + attr->cur_qp_state != IBQPS_SQE) + goto out; + else + cur_state = attr->cur_qp_state; + } else { + spin_lock_irq(&qp->sq.lock, &lhs); + spin_lock(&qp->rq.lock, &lhr); + cur_state = qp->state; + spin_unlock(&lhr); + spin_unlock_irq(&lhs); + } + + if (attr_mask & IB_QP_STATE) { + if (attr->qp_state < 0 || attr->qp_state > IBQPS_ERR) + goto out; + new_state = attr->qp_state; + } else + new_state = cur_state; + + if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Illegal QP transition " + "%d->%d\n", cur_state, new_state)); + goto out; + } + + req_param = state_table[cur_state][new_state].req_param[qp->transport]; + opt_param = state_table[cur_state][new_state].opt_param[qp->transport]; + + if ((req_param & attr_mask) != req_param) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("QP transition " + "%d->%d missing req attr 0x%08x\n", + cur_state, new_state, + req_param & ~attr_mask)); + //NB: IBAL doesn't use all the fields, so we can miss some mandatory flags + goto out; + } + + if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("QP transition (transport %d) " + "%d->%d has extra attr 0x%08x\n", + qp->transport, + cur_state, new_state, + attr_mask & ~(req_param | opt_param | + IB_QP_STATE))); + //NB: The old code sometimes uses optional flags that are not so in this code + goto out; + } + + if ((attr_mask & IB_QP_PKEY_INDEX) && + attr->pkey_index >= dev->limits.pkey_table_len) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("PKey index (%u) too large. max is %d\n", + attr->pkey_index,dev->limits.pkey_table_len-1)); + goto out; + } + + if ((attr_mask & IB_QP_PORT) && + (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Port number (%u) is invalid\n", attr->port_num)); + goto out; + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC && + attr->max_rd_atomic > dev->limits.max_qp_init_rdma) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Max rdma_atomic as initiator %u too large (max is %d)\n", + attr->max_rd_atomic, dev->limits.max_qp_init_rdma)); + goto out; + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC && + attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("Max rdma_atomic as responder %u too large (max %d)\n", + attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift)); + goto out; + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto out; + } + qp_param = mailbox->buf; + qp_context = &qp_param->context; + RtlZeroMemory(qp_param, sizeof *qp_param); + + qp_context->flags = cl_hton32((to_mthca_state(new_state) << 28) | + (to_mthca_st(qp->transport) << 16)); + qp_context->flags |= cl_hton32(MTHCA_QP_BIT_DE); + if (!(attr_mask & IB_QP_PATH_MIG_STATE)) + qp_context->flags |= cl_hton32(MTHCA_QP_PM_MIGRATED << 11); + else { + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_PM_STATE); + switch (attr->path_mig_state) { + case IB_APM_MIGRATED: + qp_context->flags |= cl_hton32(MTHCA_QP_PM_MIGRATED << 11); + break; + case IB_APM_REARM: + qp_context->flags |= cl_hton32(MTHCA_QP_PM_REARM << 11); + break; + case IB_APM_ARMED: + qp_context->flags |= cl_hton32(MTHCA_QP_PM_ARMED << 11); + break; + } + } + + /* leave tavor_sched_queue as 0 */ + + if (qp->transport == MLX || qp->transport == UD) + qp_context->mtu_msgmax = (IB_MTU_LEN_2048 << 5) | 11; + else if (attr_mask & IB_QP_PATH_MTU) { + if (attr->path_mtu < IB_MTU_LEN_256 || attr->path_mtu > IB_MTU_LEN_2048) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP, + ("path MTU (%u) is invalid\n", attr->path_mtu)); + goto out_mailbox; + } + qp_context->mtu_msgmax = (u8)((attr->path_mtu << 5) | 31); + } + + if (mthca_is_memfree(dev)) { + if (qp->rq.max) + qp_context->rq_size_stride = (u8)(long_log2(qp->rq.max) << 3); + qp_context->rq_size_stride |= qp->rq.wqe_shift - 4; + + if (qp->sq.max) + qp_context->sq_size_stride = (u8)(long_log2(qp->sq.max) << 3); + qp_context->sq_size_stride |= qp->sq.wqe_shift - 4; + } + + /* leave arbel_sched_queue as 0 */ + + if (qp->ibqp.ucontext) + qp_context->usr_page = + cl_hton32(to_mucontext(qp->ibqp.ucontext)->uar.index); + else + qp_context->usr_page = cl_hton32(dev->driver_uar.index); + qp_context->local_qpn = cl_hton32(qp->qpn); + if (attr_mask & IB_QP_DEST_QPN) { + qp_context->remote_qpn = cl_hton32(attr->dest_qp_num); + } + + if (qp->transport == MLX) + qp_context->pri_path.port_pkey |= + cl_hton32(to_msqp(qp)->port << 24); + else { + if (attr_mask & IB_QP_PORT) { + qp_context->pri_path.port_pkey |= + cl_hton32(attr->port_num << 24); + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_PORT_NUM); + } + } + + if (attr_mask & IB_QP_PKEY_INDEX) { + qp_context->pri_path.port_pkey |= + cl_hton32(attr->pkey_index); + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_PKEY_INDEX); + } + + if (attr_mask & IB_QP_RNR_RETRY) { + qp_context->pri_path.rnr_retry = attr->rnr_retry << 5; + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RNR_RETRY); + } + + if (attr_mask & IB_QP_AV) { + qp_context->pri_path.g_mylmc = attr->ah_attr.src_path_bits & 0x7f; + qp_context->pri_path.rlid = cl_hton16(attr->ah_attr.dlid); + //TODO: work around: set always full speed - really, it's much more complicate + qp_context->pri_path.static_rate = 0; + if (attr->ah_attr.ah_flags & IB_AH_GRH) { + qp_context->pri_path.g_mylmc |= 1 << 7; + qp_context->pri_path.mgid_index = attr->ah_attr.grh.sgid_index; + qp_context->pri_path.hop_limit = attr->ah_attr.grh.hop_limit; + qp_context->pri_path.sl_tclass_flowlabel = + cl_hton32((attr->ah_attr.sl << 28) | + (attr->ah_attr.grh.traffic_class << 20) | + (attr->ah_attr.grh.flow_label)); + memcpy(qp_context->pri_path.rgid, + attr->ah_attr.grh.dgid.raw, 16); + } else { + qp_context->pri_path.sl_tclass_flowlabel = + cl_hton32(attr->ah_attr.sl << 28); + } + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH); + } + + if (attr_mask & IB_QP_TIMEOUT) { + qp_context->pri_path.ackto = attr->timeout << 3; + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_ACK_TIMEOUT); + } + + /* XXX alt_path */ + + /* leave rdd as 0 */ + qp_context->pd = cl_hton32(to_mpd(ibqp->pd)->pd_num); + /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */ + qp_context->wqe_lkey = cl_hton32(qp->mr.ibmr.lkey); + qp_context->params1 = cl_hton32((unsigned long)( + (MTHCA_ACK_REQ_FREQ << 28) | + (MTHCA_FLIGHT_LIMIT << 24) | + MTHCA_QP_BIT_SWE)); + if (qp->sq_policy == IB_SIGNAL_ALL_WR) + qp_context->params1 |= cl_hton32(MTHCA_QP_BIT_SSC); + if (attr_mask & IB_QP_RETRY_CNT) { + qp_context->params1 |= cl_hton32(attr->retry_cnt << 16); + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RETRY_COUNT); + } + + if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) { + if (attr->max_rd_atomic) { + qp_context->params1 |= + cl_hton32(MTHCA_QP_BIT_SRE | + MTHCA_QP_BIT_SAE); + qp_context->params1 |= + cl_hton32(fls(attr->max_rd_atomic - 1) << 21); + } + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_SRA_MAX); + } + + if (attr_mask & IB_QP_SQ_PSN) + qp_context->next_send_psn = cl_hton32(attr->sq_psn); + qp_context->cqn_snd = cl_hton32(to_mcq(ibqp->send_cq)->cqn); + + if (mthca_is_memfree(dev)) { + qp_context->snd_wqe_base_l = cl_hton32(qp->send_wqe_offset); + qp_context->snd_db_index = cl_hton32(qp->sq.db_index); + } + + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) { + + if (attr->max_dest_rd_atomic) + qp_context->params2 |= + cl_hton32(fls(attr->max_dest_rd_atomic - 1) << 21); + + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RRA_MAX); + + } + + if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) { + qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask); + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RWE | + MTHCA_QP_OPTPAR_RRE | + MTHCA_QP_OPTPAR_RAE); + } + + qp_context->params2 |= cl_hton32(MTHCA_QP_BIT_RSC); + + if (ibqp->srq) + qp_context->params2 |= cl_hton32(MTHCA_QP_BIT_RIC); + + if (attr_mask & IB_QP_MIN_RNR_TIMER) { + qp_context->rnr_nextrecvpsn |= cl_hton32(attr->min_rnr_timer << 24); + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_RNR_TIMEOUT); + } + if (attr_mask & IB_QP_RQ_PSN) + qp_context->rnr_nextrecvpsn |= cl_hton32(attr->rq_psn); + + qp_context->ra_buff_indx = + cl_hton32(dev->qp_table.rdb_base + + ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << + dev->qp_table.rdb_shift)); + + qp_context->cqn_rcv = cl_hton32(to_mcq(ibqp->recv_cq)->cqn); + + if (mthca_is_memfree(dev)) + qp_context->rcv_db_index = cl_hton32(qp->rq.db_index); + + if (attr_mask & IB_QP_QKEY) { + qp_context->qkey = cl_hton32(attr->qkey); + qp_param->opt_param_mask |= cl_hton32(MTHCA_QP_OPTPAR_Q_KEY); + } + + if (ibqp->srq) + qp_context->srqn = cl_hton32(1 << 24 | + to_msrq(ibqp->srq)->srqn); + + if (cur_state == IBQPS_RTS && new_state == IBQPS_SQD && + attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY && + attr->en_sqd_async_notify) + sqd_event = (u32)(1 << 31); + + err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans, + qp->qpn, 0, mailbox, sqd_event, &status); + if (err) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("mthca_MODIFY_QP returned error (qp-num = 0x%x) returned status %02x " + "cur_state = %d new_state = %d attr_mask = %d req_param = %d opt_param = %d\n", + ibqp->qp_num, status, cur_state, new_state, + attr_mask, req_param, opt_param)); + goto out_mailbox; + } + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("mthca_MODIFY_QP bad status(qp-num = 0x%x) returned status %02x " + "cur_state = %d new_state = %d attr_mask = %d req_param = %d opt_param = %d\n", + ibqp->qp_num, status, cur_state, new_state, + attr_mask, req_param, opt_param)); + err = -EINVAL; + goto out_mailbox; + } + + qp->state = new_state; + if (attr_mask & IB_QP_ACCESS_FLAGS) + qp->atomic_rd_en = (u8)attr->qp_access_flags; + if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) + qp->resp_depth = attr->max_dest_rd_atomic; + + if (is_sqp(dev, qp)) + store_attrs(to_msqp(qp), attr, attr_mask); + + /* + * If we moved QP0 to RTR, bring the IB link up; if we moved + * QP0 to RESET or ERROR, bring the link back down. + */ + if (is_qp0(dev, qp)) { + if (cur_state != IBQPS_RTR && + new_state == IBQPS_RTR) + init_port(dev, to_msqp(qp)->port); + + if (cur_state != IBQPS_RESET && + cur_state != IBQPS_ERR && + (new_state == IBQPS_RESET || + new_state == IBQPS_ERR)) + mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status); + } + + /* + * If we moved a kernel QP to RESET, clean up all old CQ + * entries and reinitialize the QP. + */ + if (new_state == IBQPS_RESET && !qp->ibqp.ucontext) { + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); + if (qp->ibqp.send_cq != qp->ibqp.recv_cq) + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); + + mthca_wq_init(&qp->sq); + qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); + mthca_wq_init(&qp->rq); + qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); + + if (mthca_is_memfree(dev)) { + *qp->sq.db = 0; + *qp->rq.db = 0; + } + } + +out_mailbox: + mthca_free_mailbox(dev, mailbox); + +out: + up( &qp->mutex ); + return err; +} + +static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz) +{ + + /* + * Calculate the maximum size of WQE s/g segments, excluding + * the next segment and other non-data segments. + */ + int max_data_size = desc_sz - sizeof (struct mthca_next_seg); + + switch (qp->transport) { + case MLX: + max_data_size -= 2 * sizeof (struct mthca_data_seg); + break; + + case UD: + if (mthca_is_memfree(dev)) + max_data_size -= sizeof (struct mthca_arbel_ud_seg); + else + max_data_size -= sizeof (struct mthca_tavor_ud_seg); + break; + + default: + max_data_size -= sizeof (struct mthca_raddr_seg); + break; + } + return max_data_size; +} + +static inline int mthca_max_inline_data(int max_data_size) +{ + return max_data_size - MTHCA_INLINE_HEADER_SIZE ; +} + +static void mthca_adjust_qp_caps(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + int max_data_size = mthca_max_data_size(dev, qp, + min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift)); + + qp->max_inline_data = mthca_max_inline_data( max_data_size); + + qp->sq.max_gs = min(dev->limits.max_sg, + (int)(max_data_size / sizeof (struct mthca_data_seg))); + qp->rq.max_gs = min(dev->limits.max_sg, + (int)((min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) - + sizeof (struct mthca_next_seg)) / sizeof (struct mthca_data_seg))); +} + +/* + * Allocate and register buffer for WQEs. qp->rq.max, sq.max, + * rq.max_gs and sq.max_gs must all be assigned. + * mthca_alloc_wqe_buf will calculate rq.wqe_shift and + * sq.wqe_shift (as well as send_wqe_offset, is_direct, and + * queue) + */ +static int mthca_alloc_wqe_buf(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_qp *qp) +{ + int size; + int err = -ENOMEM; + + HCA_ENTER(HCA_DBG_QP); + size = sizeof (struct mthca_next_seg) + + qp->rq.max_gs * sizeof (struct mthca_data_seg); + + if (size > dev->limits.max_desc_sz) + return -EINVAL; + + for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; + qp->rq.wqe_shift++) + ; /* nothing */ + + size = qp->sq.max_gs * sizeof (struct mthca_data_seg); + switch (qp->transport) { + case MLX: + size += 2 * sizeof (struct mthca_data_seg); + break; + + case UD: + size += mthca_is_memfree(dev) ? + sizeof (struct mthca_arbel_ud_seg) : + sizeof (struct mthca_tavor_ud_seg); + break; + + case UC: + size += sizeof (struct mthca_raddr_seg); + break; + + case RC: + size += sizeof (struct mthca_raddr_seg); + /* + * An atomic op will require an atomic segment, a + * remote address segment and one scatter entry. + */ + size = max(size, + sizeof (struct mthca_atomic_seg) + + sizeof (struct mthca_raddr_seg) + + sizeof (struct mthca_data_seg)); + break; + + default: + break; + } + + /* Make sure that we have enough space for a bind request */ + size = max(size, sizeof (struct mthca_bind_seg)); + + size += sizeof (struct mthca_next_seg); + + if (size > dev->limits.max_desc_sz) + return -EINVAL; + + for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; + qp->sq.wqe_shift++) + ; /* nothing */ + + qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift, + 1 << qp->sq.wqe_shift); + + /* + * If this is a userspace QP, we don't actually have to + * allocate anything. All we need is to calculate the WQE + * sizes and the send_wqe_offset, so we're done now. + */ + if (pd->ibpd.ucontext) + return 0; + + size = (int)(LONG_PTR)NEXT_PAGE_ALIGN(qp->send_wqe_offset + + (qp->sq.max << qp->sq.wqe_shift)); + + qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64), + GFP_KERNEL); + if (!qp->wrid) + goto err_out; + + err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE, + &qp->queue, &qp->is_direct, pd, 0, &qp->mr); + if (err) + goto err_out; + + HCA_EXIT(HCA_DBG_QP); + return 0; + +err_out: + kfree(qp->wrid); + return err; +} + +static void mthca_free_wqe_buf(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + mthca_buf_free(dev, (int)(LONG_PTR)NEXT_PAGE_ALIGN(qp->send_wqe_offset + + (qp->sq.max << qp->sq.wqe_shift)), + &qp->queue, qp->is_direct, &qp->mr); + kfree(qp->wrid); +} + +static int mthca_map_memfree(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + int ret; + + if (mthca_is_memfree(dev)) { + ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn); + if (ret) + return ret; + + ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn); + if (ret) + goto err_qpc; + + ret = mthca_table_get(dev, dev->qp_table.rdb_table, + qp->qpn << dev->qp_table.rdb_shift); + if (ret) + goto err_eqpc; + + } + + return 0; + +err_eqpc: + mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); + +err_qpc: + mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); + + return ret; +} + +static void mthca_unmap_memfree(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + mthca_table_put(dev, dev->qp_table.rdb_table, + qp->qpn << dev->qp_table.rdb_shift); + mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn); + mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn); +} + +static int mthca_alloc_memfree(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + int ret = 0; + + if (mthca_is_memfree(dev)) { + qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ, + qp->qpn, &qp->rq.db); + if (qp->rq.db_index < 0) + return qp->rq.db_index; + + qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ, + qp->qpn, &qp->sq.db); + if (qp->sq.db_index < 0){ + mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); + return qp->sq.db_index; + } + + } + + return ret; +} + +static void mthca_free_memfree(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + if (mthca_is_memfree(dev)) { + mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index); + mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index); + } +} + +static int mthca_alloc_qp_common(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_sig_type send_policy, + struct mthca_qp *qp) +{ + int ret; + int i; + + atomic_set(&qp->refcount, 1); + init_waitqueue_head(&qp->wait); + KeInitializeMutex(&qp->mutex, 0); + + qp->state = IBQPS_RESET; + qp->atomic_rd_en = 0; + qp->resp_depth = 0; + qp->sq_policy = send_policy; + mthca_wq_init(&qp->sq); + mthca_wq_init(&qp->rq); + + UNREFERENCED_PARAMETER(send_cq); + UNREFERENCED_PARAMETER(recv_cq); + + ret = mthca_map_memfree(dev, qp); + if (ret) + return ret; + + ret = mthca_alloc_wqe_buf(dev, pd, qp); + if (ret) { + mthca_unmap_memfree(dev, qp); + return ret; + } + + mthca_adjust_qp_caps(dev, qp); + + /* + * If this is a userspace QP, we're done now. The doorbells + * will be allocated and buffers will be initialized in + * userspace. + */ + if (pd->ibpd.ucontext) + return 0; + + ret = mthca_alloc_memfree(dev, qp); + if (ret) { + mthca_free_wqe_buf(dev, qp); + mthca_unmap_memfree(dev, qp); + return ret; + } + + if (mthca_is_memfree(dev)) { + struct mthca_next_seg *next; + struct mthca_data_seg *scatter; + int size = (sizeof (struct mthca_next_seg) + + qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16; + + for (i = 0; i < qp->rq.max; ++i) { + next = get_recv_wqe(qp, i); + next->nda_op = cl_hton32(((i + 1) & (qp->rq.max - 1)) << + qp->rq.wqe_shift); + next->ee_nds = cl_hton32(size); + + for (scatter = (void *) (next + 1); + (void *) scatter < (void *) ((u8*)next + (1 << qp->rq.wqe_shift)); + ++scatter) + scatter->lkey = cl_hton32(MTHCA_INVAL_LKEY); + } + + for (i = 0; i < qp->sq.max; ++i) { + next = get_send_wqe(qp, i); + next->nda_op = cl_hton32((((i + 1) & (qp->sq.max - 1)) << + qp->sq.wqe_shift) + + qp->send_wqe_offset); + } + } + + qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); + qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); + + return 0; +} + +static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap, + struct mthca_qp *qp) +{ + int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz); + + /* Sanity check QP size before proceeding */ + if (cap->max_send_wr > (u32)dev->limits.max_wqes || + cap->max_recv_wr > (u32)dev->limits.max_wqes || + cap->max_send_sge > (u32)dev->limits.max_sg || + cap->max_recv_sge > (u32)dev->limits.max_sg || + cap->max_inline_data > (u32)mthca_max_inline_data(max_data_size)) + return -EINVAL; + + /* + * For MLX transport we need 2 extra S/G entries: + * one for the header and one for the checksum at the end + */ + if (qp->transport == MLX && cap->max_recv_sge + 2 > (u32)dev->limits.max_sg) + return -EINVAL; + + if (mthca_is_memfree(dev)) { + qp->rq.max = cap->max_recv_wr ? + roundup_pow_of_two(cap->max_recv_wr) : 0; + qp->sq.max = cap->max_send_wr ? + roundup_pow_of_two(cap->max_send_wr) : 0; + } else { + qp->rq.max = cap->max_recv_wr; + qp->sq.max = cap->max_send_wr; + } + + qp->rq.max_gs = cap->max_recv_sge; + qp->sq.max_gs = MAX(cap->max_send_sge, + ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE, + MTHCA_INLINE_CHUNK_SIZE) / + (int)sizeof (struct mthca_data_seg)); + + return 0; +} + +int mthca_alloc_qp(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_qp_type_t type, + enum ib_sig_type send_policy, + struct ib_qp_cap *cap, + struct mthca_qp *qp) +{ + int err; + SPIN_LOCK_PREP(lh); + + switch (type) { + case IB_QPT_RELIABLE_CONN: qp->transport = RC; break; + case IB_QPT_UNRELIABLE_CONN: qp->transport = UC; break; + case IB_QPT_UNRELIABLE_DGRM: qp->transport = UD; break; + default: return -EINVAL; + } + + err = mthca_set_qp_size(dev, cap, qp); + if (err) + return err; + + qp->qpn = mthca_alloc(&dev->qp_table.alloc); + if (qp->qpn == -1) + return -ENOMEM; + + err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, + send_policy, qp); + if (err) { + mthca_free(&dev->qp_table.alloc, qp->qpn); + return err; + } + + spin_lock_irq(&dev->qp_table.lock, &lh); + mthca_array_set(&dev->qp_table.qp, + qp->qpn & (dev->limits.num_qps - 1), qp); + spin_unlock_irq(&lh); + + return 0; +} + +int mthca_alloc_sqp(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_cq *send_cq, + struct mthca_cq *recv_cq, + enum ib_sig_type send_policy, + struct ib_qp_cap *cap, + int qpn, + int port, + struct mthca_sqp *sqp) +{ + u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1; + int err; + SPIN_LOCK_PREP(lhs); + SPIN_LOCK_PREP(lhr); + SPIN_LOCK_PREP(lht); + + err = mthca_set_qp_size(dev, cap, &sqp->qp); + if (err) + return err; + + alloc_dma_zmem_map(dev, + sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE, + PCI_DMA_BIDIRECTIONAL, + &sqp->sg); + if (!sqp->sg.page) + return -ENOMEM; + + spin_lock_irq(&dev->qp_table.lock, &lht); + if (mthca_array_get(&dev->qp_table.qp, mqpn)) + err = -EBUSY; + else + mthca_array_set(&dev->qp_table.qp, mqpn, sqp); + spin_unlock_irq(&lht); + + if (err) + goto err_out; + + sqp->port = port; + sqp->qp.qpn = mqpn; + sqp->qp.transport = MLX; + + err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq, + send_policy, &sqp->qp); + if (err) + goto err_out_free; + + atomic_inc(&pd->sqp_count); + + return 0; + + err_out_free: + /* + * Lock CQs here, so that CQ polling code can do QP lookup + * without taking a lock. + */ + spin_lock_irq(&send_cq->lock, &lhs); + if (send_cq != recv_cq) + spin_lock(&recv_cq->lock, &lhr); + + spin_lock(&dev->qp_table.lock, &lht); + mthca_array_clear(&dev->qp_table.qp, mqpn); + spin_unlock(&lht); + + if (send_cq != recv_cq) + spin_unlock(&lhr); + spin_unlock_irq(&lhs); + + err_out: + free_dma_mem_map(dev, &sqp->sg, PCI_DMA_BIDIRECTIONAL); + + return err; +} + +void mthca_free_qp(struct mthca_dev *dev, + struct mthca_qp *qp) +{ + u8 status; + struct mthca_cq *send_cq; + struct mthca_cq *recv_cq; + SPIN_LOCK_PREP(lhs); + SPIN_LOCK_PREP(lhr); + SPIN_LOCK_PREP(lht); + + send_cq = to_mcq(qp->ibqp.send_cq); + recv_cq = to_mcq(qp->ibqp.recv_cq); + + /* + * Lock CQs here, so that CQ polling code can do QP lookup + * without taking a lock. + */ + spin_lock_irq(&send_cq->lock, &lhs); + if (send_cq != recv_cq) + spin_lock(&recv_cq->lock, &lhr); + + spin_lock(&dev->qp_table.lock, &lht); + mthca_array_clear(&dev->qp_table.qp, + qp->qpn & (dev->limits.num_qps - 1)); + spin_unlock(&lht); + + if (send_cq != recv_cq) + spin_unlock(&lhr); + spin_unlock_irq(&lhs); + + atomic_dec(&qp->refcount); + wait_event(&qp->wait, !atomic_read(&qp->refcount)); + + if (qp->state != IBQPS_RESET) { + mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status); + } + + /* + * If this is a userspace QP, the buffers, MR, CQs and so on + * will be cleaned up in userspace, so all we have to do is + * unref the mem-free tables and free the QPN in our table. + */ + if (!qp->ibqp.ucontext) { + mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); + if (qp->ibqp.send_cq != qp->ibqp.recv_cq) + mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn, + qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL); + + mthca_free_memfree(dev, qp); + mthca_free_wqe_buf(dev, qp); + } + + mthca_unmap_memfree(dev, qp); + + if (is_sqp(dev, qp)) { + atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count)); + free_dma_mem_map(dev, &to_msqp(qp)->sg, PCI_DMA_BIDIRECTIONAL); + } else + mthca_free(&dev->qp_table.alloc, qp->qpn); +} + +static enum mthca_wr_opcode conv_ibal_wr_opcode(struct _ib_send_wr *wr) +{ + + enum mthca_wr_opcode opcode = -1; //= wr->wr_type; + + switch (wr->wr_type) { + case WR_SEND: + opcode = (wr->send_opt & IB_SEND_OPT_IMMEDIATE) ? MTHCA_OPCODE_SEND_IMM : MTHCA_OPCODE_SEND; + break; + case WR_RDMA_WRITE: + opcode = (wr->send_opt & IB_SEND_OPT_IMMEDIATE) ? MTHCA_OPCODE_RDMA_WRITE_IMM : MTHCA_OPCODE_RDMA_WRITE; + break; + case WR_RDMA_READ: opcode = MTHCA_OPCODE_RDMA_READ; break; + case WR_COMPARE_SWAP: opcode = MTHCA_OPCODE_ATOMIC_CS; break; + case WR_FETCH_ADD: opcode = MTHCA_OPCODE_ATOMIC_FA; break; + default: opcode = MTHCA_OPCODE_INVALID;break; + } + return opcode; +} + +/* Create UD header for an MLX send and build a data segment for it */ +static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp, + int ind, struct _ib_send_wr *wr, + struct mthca_mlx_seg *mlx, + struct mthca_data_seg *data) +{ + enum ib_wr_opcode opcode = conv_ibal_wr_opcode(wr); + int header_size; + int err; + u16 pkey; + CPU_2_BE64_PREP; + + if (!wr->dgrm.ud.h_av) { + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_AV, + ("absent AV in send wr %p\n", wr)); + return -EINVAL; + } + + ib_ud_header_init(256, /* assume a MAD */ + mthca_ah_grh_present(to_mah((struct ib_ah *)wr->dgrm.ud.h_av)), + &sqp->ud_header); + + err = mthca_read_ah(dev, to_mah((struct ib_ah *)wr->dgrm.ud.h_av), &sqp->ud_header); + if (err){ + HCA_PRINT(TRACE_LEVEL_ERROR , HCA_DBG_AV, ("read av error%p\n", + to_mah((struct ib_ah *)wr->dgrm.ud.h_av)->av)); + return err; + } + mlx->flags &= ~cl_hton32(MTHCA_NEXT_SOLICIT | 1); + mlx->flags |= cl_hton32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) | + (sqp->ud_header.lrh.destination_lid == + IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) | + (sqp->ud_header.lrh.service_level << 8)); + mlx->rlid = sqp->ud_header.lrh.destination_lid; + mlx->vcrc = 0; + + switch (opcode) { + case MTHCA_OPCODE_SEND: + sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY; + sqp->ud_header.immediate_present = 0; + break; + case MTHCA_OPCODE_SEND_IMM: + sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE; + sqp->ud_header.immediate_present = 1; + sqp->ud_header.immediate_data = wr->immediate_data; + break; + default: + return -EINVAL; + } + + sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0; + if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE) + sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE; + sqp->ud_header.bth.solicited_event = (u8)!!(wr->send_opt & IB_SEND_OPT_SOLICITED); + if (!sqp->qp.ibqp.qp_num) + ib_get_cached_pkey(&dev->ib_dev, (u8)sqp->port, + sqp->pkey_index, &pkey); + else + ib_get_cached_pkey(&dev->ib_dev, (u8)sqp->port, + wr->dgrm.ud.pkey_index, &pkey); + sqp->ud_header.bth.pkey = cl_hton16(pkey); + sqp->ud_header.bth.destination_qpn = wr->dgrm.ud.remote_qp; + sqp->ud_header.bth.psn = cl_hton32((sqp->send_psn++) & ((1 << 24) - 1)); + sqp->ud_header.deth.qkey = wr->dgrm.ud.remote_qkey & 0x00000080 ? + cl_hton32(sqp->qkey) : wr->dgrm.ud.remote_qkey; + sqp->ud_header.deth.source_qpn = cl_hton32(sqp->qp.ibqp.qp_num); + + header_size = ib_ud_header_pack(&sqp->ud_header, + (u8*)sqp->sg.page + + ind * MTHCA_UD_HEADER_SIZE); + + data->byte_count = cl_hton32(header_size); + data->lkey = cl_hton32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey); + data->addr = CPU_2_BE64(sqp->sg.dma_address + + ind * MTHCA_UD_HEADER_SIZE); + + return 0; +} + +static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, + struct ib_cq *ib_cq) +{ + unsigned cur; + struct mthca_cq *cq; + SPIN_LOCK_PREP(lh); + + cur = wq->head - wq->tail; + if (likely((int)cur + nreq < wq->max)) + return 0; + + cq = to_mcq(ib_cq); + spin_lock_dpc(&cq->lock, &lh); + cur = wq->head - wq->tail; + spin_unlock_dpc(&lh); + + return (int)cur + nreq >= wq->max; +} + +int mthca_tavor_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + u8 *wqe; + u8 *prev_wqe; + int err = 0; + int nreq; + int i; + int size; + int size0 = 0; + u32 f0 = unlikely(wr->send_opt & IB_SEND_OPT_FENCE) ? MTHCA_SEND_DOORBELL_FENCE : 0; + int ind; + u8 op0 = 0; + enum ib_wr_opcode opcode; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&qp->sq.lock, &lh); + + /* XXX check that state is OK to post send */ + + ind = qp->sq.next_ind; + + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,("SQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", qp->qpn, + qp->sq.head, qp->sq.tail, + qp->sq.max, nreq)); + err = -ENOMEM; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + wqe = get_send_wqe(qp, ind); + prev_wqe = qp->sq.last; + qp->sq.last = wqe; + opcode = conv_ibal_wr_opcode(wr); + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + ((struct mthca_next_seg *) wqe)->flags = + ((wr->send_opt & IB_SEND_OPT_SIGNALED) ? + cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) | + ((wr->send_opt & IB_SEND_OPT_SOLICITED) ? + cl_hton32(MTHCA_NEXT_SOLICIT) : 0) | + cl_hton32(1); + if (opcode == MTHCA_OPCODE_SEND_IMM|| + opcode == MTHCA_OPCODE_RDMA_WRITE_IMM) + ((struct mthca_next_seg *) wqe)->imm = wr->immediate_data; + + wqe += sizeof (struct mthca_next_seg); + size = sizeof (struct mthca_next_seg) / 16; + + switch (qp->transport) { + case RC: + switch (opcode) { + case MTHCA_OPCODE_ATOMIC_CS: + case MTHCA_OPCODE_ATOMIC_FA: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + + wqe += sizeof (struct mthca_raddr_seg); + + if (opcode == MTHCA_OPCODE_ATOMIC_CS) { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cl_hton64(wr->remote_ops.atomic2); + ((struct mthca_atomic_seg *) wqe)->compare = + cl_hton64(wr->remote_ops.atomic1); + } else { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cl_hton64(wr->remote_ops.atomic1); + ((struct mthca_atomic_seg *) wqe)->compare = 0; + } + + wqe += sizeof (struct mthca_atomic_seg); + size += (sizeof (struct mthca_raddr_seg) + + sizeof (struct mthca_atomic_seg)) / 16 ; + break; + + case MTHCA_OPCODE_RDMA_READ: + case MTHCA_OPCODE_RDMA_WRITE: + case MTHCA_OPCODE_RDMA_WRITE_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case UC: + switch (opcode) { + case MTHCA_OPCODE_RDMA_WRITE: + case MTHCA_OPCODE_RDMA_WRITE_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case UD: + ((struct mthca_tavor_ud_seg *) wqe)->lkey = + cl_hton32(to_mah((struct ib_ah *)wr->dgrm.ud.h_av)->key); + ((struct mthca_tavor_ud_seg *) wqe)->av_addr = + cl_hton64(to_mah((struct ib_ah *)wr->dgrm.ud.h_av)->avdma); + ((struct mthca_tavor_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp; + ((struct mthca_tavor_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey; + + wqe += sizeof (struct mthca_tavor_ud_seg); + size += sizeof (struct mthca_tavor_ud_seg) / 16; + break; + + case MLX: + err = build_mlx_header(dev, to_msqp(qp), ind, wr, + (void*)(wqe - sizeof (struct mthca_next_seg)), + (void*)wqe); + if (err) { + if (bad_wr) + *bad_wr = wr; + goto out; + } + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + break; + } + + if ((int)(int)wr->num_ds > qp->sq.max_gs) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SQ %06x too many gathers\n",qp->qpn)); + err = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + if (wr->send_opt & IB_SEND_OPT_INLINE) { + if (wr->num_ds) { + struct mthca_inline_seg *seg = (struct mthca_inline_seg *)wqe; + uint32_t s = 0; + + wqe += sizeof *seg; + for (i = 0; i < (int)wr->num_ds; ++i) { + struct _ib_local_ds *sge = &wr->ds_array[i]; + + s += sge->length; + + if (s > (uint32_t)qp->max_inline_data) { + err = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + memcpy(wqe, (void *) (ULONG_PTR) sge->vaddr, + sge->length); + wqe += sge->length; + } + + seg->byte_count = cl_hton32(MTHCA_INLINE_SEG | s); + size += align(s + sizeof *seg, 16) / 16; + } + } else { + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cl_hton64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + HCA_PRINT(TRACE_LEVEL_VERBOSE ,HCA_DBG_QP ,("SQ %06x [%02x] lkey 0x%08x vaddr 0x%I64x 0x%x\n",qp->qpn,i, + (wr->ds_array[i].lkey),(wr->ds_array[i].vaddr),wr->ds_array[i].length)); + } + } + + /* Add one more inline data segment for ICRC */ + if (qp->transport == MLX) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32((unsigned long)((1 << 31) | 4)); + ((u32 *) wqe)[1] = 0; + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + + qp->wrid[ind + qp->rq.max] = wr->wr_id; + + if (opcode == MTHCA_OPCODE_INVALID) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SQ %06x opcode invalid\n",qp->qpn)); + err = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cl_hton32(((ind << qp->sq.wqe_shift) + + qp->send_wqe_offset) |opcode); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cl_hton32((size0 ? 0 : MTHCA_NEXT_DBD) | size | + ((wr->send_opt & IB_SEND_OPT_FENCE) ? + MTHCA_NEXT_FENCE : 0)); + + if (!size0) { + size0 = size; + op0 = opcode; + } + + dump_wqe( TRACE_LEVEL_VERBOSE, (u32*)qp->sq.last,qp); + + ++ind; + if (unlikely(ind >= qp->sq.max)) + ind -= qp->sq.max; + } + +out: + if (likely(nreq)) { + __be32 doorbell[2]; + + doorbell[0] = cl_hton32(((qp->sq.next_ind << qp->sq.wqe_shift) + + qp->send_wqe_offset) | f0 | op0); + doorbell[1] = cl_hton32((qp->qpn << 8) | size0); + + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_SEND_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + qp->sq.next_ind = ind; + qp->sq.head += nreq; + + spin_unlock_irqrestore(&lh); + return err; +} + +int mthca_tavor_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + __be32 doorbell[2]; + int err = 0; + int nreq; + int i; + int size; + int size0 = 0; + int ind; + u8 *wqe; + u8 *prev_wqe; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&qp->rq.lock, &lh); + + /* XXX check that state is OK to post receive */ + + ind = qp->rq.next_ind; + + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { + nreq = 0; + + doorbell[0] = cl_hton32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); + doorbell[1] = cl_hton32(qp->qpn << 8); + + wmb(); + + mthca_write64(doorbell, dev->kar + MTHCA_RECV_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + + qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; + size0 = 0; + } + if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,("RQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", qp->qpn, + qp->rq.head, qp->rq.tail, + qp->rq.max, nreq)); + err = -ENOMEM; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + wqe = get_recv_wqe(qp, ind); + prev_wqe = qp->rq.last; + qp->rq.last = wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = + cl_hton32(MTHCA_NEXT_DBD); + ((struct mthca_next_seg *) wqe)->flags = 0; + + wqe += sizeof (struct mthca_next_seg); + size = sizeof (struct mthca_next_seg) / 16; + + if (unlikely((int)wr->num_ds > qp->rq.max_gs)) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("RQ %06x too many gathers\n",qp->qpn)); + err = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cl_hton64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; +// HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("RQ %06x [%02x] lkey 0x%08x vaddr 0x%I64x 0x %x 0x%08x\n",i,qp->qpn, +// (wr->ds_array[i].lkey),(wr->ds_array[i].vaddr),wr->ds_array[i].length, wr->wr_id)); + } + + qp->wrid[ind] = wr->wr_id; + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cl_hton32((ind << qp->rq.wqe_shift) | 1); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cl_hton32(MTHCA_NEXT_DBD | size); + + if (!size0) + size0 = size; + + dump_wqe(TRACE_LEVEL_VERBOSE, (u32*)wqe ,qp); + + ++ind; + if (unlikely(ind >= qp->rq.max)) + ind -= qp->rq.max; + } + +out: + if (likely(nreq)) { + doorbell[0] = cl_hton32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); + doorbell[1] = cl_hton32((qp->qpn << 8) | (nreq & 255)); + + wmb(); + + mthca_write64(doorbell, dev->kar + MTHCA_RECV_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + qp->rq.next_ind = ind; + qp->rq.head += nreq; + + spin_unlock_irqrestore(&lh); + return err; +} + +int mthca_arbel_post_send(struct ib_qp *ibqp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibqp->device); + struct mthca_qp *qp = to_mqp(ibqp); + __be32 doorbell[2]; + u8 *wqe; + u8 *prev_wqe; + int err = 0; + int nreq; + int i; + int size; + int size0 = 0; + u32 f0 = unlikely(wr->send_opt & IB_SEND_OPT_FENCE) ? MTHCA_SEND_DOORBELL_FENCE : 0; + int ind; + u8 op0 = 0; + enum ib_wr_opcode opcode; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&qp->sq.lock, &lh); + + /* XXX check that state is OK to post send */ + + ind = qp->sq.head & (qp->sq.max - 1); + + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { + nreq = 0; + doorbell[0] = cl_hton32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | + ((qp->sq.head & 0xffff) << 8) |f0 | op0); + doorbell[1] = cl_hton32((qp->qpn << 8) | size0); + qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; + size0 = 0; + f0 = unlikely(wr->send_opt & IB_SEND_OPT_FENCE) ? MTHCA_SEND_DOORBELL_FENCE : 0; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + *qp->sq.db = cl_hton32(qp->sq.head & 0xffff); + + /* + * Make sure doorbell record is written before we + * write MMIO send doorbell. + */ + wmb(); + mthca_write64(doorbell, dev->kar + MTHCA_SEND_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,("SQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", qp->qpn, + qp->sq.head, qp->sq.tail, + qp->sq.max, nreq)); + err = -ENOMEM; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + wqe = get_send_wqe(qp, ind); + prev_wqe = qp->sq.last; + qp->sq.last = wqe; + opcode = conv_ibal_wr_opcode(wr); + + ((struct mthca_next_seg *) wqe)->flags = + ((wr->send_opt & IB_SEND_OPT_SIGNALED) ? + cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) | + ((wr->send_opt & IB_SEND_OPT_SOLICITED) ? + cl_hton32(MTHCA_NEXT_SOLICIT) : 0) | + cl_hton32(1); + if (opcode == MTHCA_OPCODE_SEND_IMM|| + opcode == MTHCA_OPCODE_RDMA_WRITE_IMM) + ((struct mthca_next_seg *) wqe)->imm = wr->immediate_data; + + wqe += sizeof (struct mthca_next_seg); + size = sizeof (struct mthca_next_seg) / 16; + + switch (qp->transport) { + case RC: + switch (opcode) { + case MTHCA_OPCODE_ATOMIC_CS: + case MTHCA_OPCODE_ATOMIC_FA: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + + wqe += sizeof (struct mthca_raddr_seg); + + if (opcode == MTHCA_OPCODE_ATOMIC_FA) { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cl_hton64(wr->remote_ops.atomic2); + ((struct mthca_atomic_seg *) wqe)->compare = + cl_hton64(wr->remote_ops.atomic1); + } else { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cl_hton64(wr->remote_ops.atomic1); + ((struct mthca_atomic_seg *) wqe)->compare = 0; + } + + wqe += sizeof (struct mthca_atomic_seg); + size += (sizeof (struct mthca_raddr_seg) + + sizeof (struct mthca_atomic_seg)) / 16 ; + break; + + case MTHCA_OPCODE_RDMA_READ: + case MTHCA_OPCODE_RDMA_WRITE: + case MTHCA_OPCODE_RDMA_WRITE_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case UC: + switch (opcode) { + case MTHCA_OPCODE_RDMA_WRITE: + case MTHCA_OPCODE_RDMA_WRITE_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case UD: + memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, + to_mah((struct ib_ah *)wr->dgrm.ud.h_av)->av, MTHCA_AV_SIZE); + ((struct mthca_arbel_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp; + ((struct mthca_arbel_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey; + + wqe += sizeof (struct mthca_arbel_ud_seg); + size += sizeof (struct mthca_arbel_ud_seg) / 16; + break; + + case MLX: + err = build_mlx_header(dev, to_msqp(qp), ind, wr, + (void*)(wqe - sizeof (struct mthca_next_seg)), + (void*)wqe); + if (err) { + if (bad_wr) + *bad_wr = wr; + goto out; + } + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + break; + } + + if ((int)wr->num_ds > qp->sq.max_gs) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SQ %06x full too many gathers\n",qp->qpn)); + err = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + if (wr->send_opt & IB_SEND_OPT_INLINE) { + if (wr->num_ds) { + struct mthca_inline_seg *seg = (struct mthca_inline_seg *)wqe; + uint32_t s = 0; + + wqe += sizeof *seg; + for (i = 0; i < (int)wr->num_ds; ++i) { + struct _ib_local_ds *sge = &wr->ds_array[i]; + + s += sge->length; + + if (s > (uint32_t)qp->max_inline_data) { + err = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + memcpy(wqe, (void *) (uintptr_t) sge->vaddr, + sge->length); + wqe += sge->length; + } + + seg->byte_count = cl_hton32(MTHCA_INLINE_SEG | s); + size += align(s + sizeof *seg, 16) / 16; + } + } else { + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cl_hton64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + } + + /* Add one more inline data segment for ICRC */ + if (qp->transport == MLX) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32((unsigned long)((1 << 31) | 4)); + ((u32 *) wqe)[1] = 0; + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + + qp->wrid[ind + qp->rq.max] = wr->wr_id; + + if (opcode == MTHCA_OPCODE_INVALID) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("SQ %06x opcode invalid\n",qp->qpn)); + err = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cl_hton32(((ind << qp->sq.wqe_shift) + + qp->send_wqe_offset) |opcode); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cl_hton32(MTHCA_NEXT_DBD | size | + ((wr->send_opt & IB_SEND_OPT_FENCE) ? + MTHCA_NEXT_FENCE : 0)); + + if (!size0) { + size0 = size; + op0 = opcode; + } + + ++ind; + if (unlikely(ind >= qp->sq.max)) + ind -= qp->sq.max; + } + +out: + if (likely(nreq)) { + doorbell[0] = cl_hton32((nreq << 24) | + ((qp->sq.head & 0xffff) << 8) |f0 | op0); + doorbell[1] = cl_hton32((qp->qpn << 8) | size0); + qp->sq.head += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + *qp->sq.db = cl_hton32(qp->sq.head & 0xffff); + + /* + * Make sure doorbell record is written before we + * write MMIO send doorbell. + */ + wmb(); + mthca_write64(doorbell, + dev->kar + MTHCA_SEND_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + spin_unlock_irqrestore(&lh); + return err; +} + +int mthca_arbel_post_recv(struct ib_qp *ibqp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr) +{ + struct mthca_qp *qp = to_mqp(ibqp); + int err = 0; + int nreq; + int ind; + int i; + u8 *wqe; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&qp->rq.lock, &lh); + + /* XXX check that state is OK to post receive */ + + ind = qp->rq.head & (qp->rq.max - 1); + + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) { + HCA_PRINT(TRACE_LEVEL_ERROR,HCA_DBG_QP,("RQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", qp->qpn, + qp->rq.head, qp->rq.tail, + qp->rq.max, nreq)); + err = -ENOMEM; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + wqe = get_recv_wqe(qp, ind); + + ((struct mthca_next_seg *) wqe)->flags = 0; + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely((int)wr->num_ds > qp->rq.max_gs)) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP ,("RQ %06x full too many scatter\n",qp->qpn)); + err = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cl_hton64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < qp->rq.max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + qp->wrid[ind] = wr->wr_id; + + ++ind; + if (unlikely(ind >= qp->rq.max)) + ind -= qp->rq.max; + } +out: + if (likely(nreq)) { + qp->rq.head += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + *qp->rq.db = cl_hton32(qp->rq.head & 0xffff); + } + + spin_unlock_irqrestore(&lh); + return err; +} + +void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send, + int index, int *dbd, __be32 *new_wqe) +{ + struct mthca_next_seg *next; + + UNREFERENCED_PARAMETER(dev); + + /* + * For SRQs, all WQEs generate a CQE, so we're always at the + * end of the doorbell chain. + */ + if (qp->ibqp.srq) { + *new_wqe = 0; + return; + } + + if (is_send) + next = get_send_wqe(qp, index); + else + next = get_recv_wqe(qp, index); + + *dbd = !!(next->ee_nds & cl_hton32(MTHCA_NEXT_DBD)); + if (next->ee_nds & cl_hton32(0x3f)) + *new_wqe = (next->nda_op & cl_hton32((unsigned long)~0x3f)) | + (next->ee_nds & cl_hton32(0x3f)); + else + *new_wqe = 0; +} + +int mthca_init_qp_table(struct mthca_dev *dev) +{ + int err; + u8 status; + int i; + + spin_lock_init(&dev->qp_table.lock); + fill_state_table(); + + /* + * We reserve 2 extra QPs per port for the special QPs. The + * special QP for port 1 has to be even, so round up. + */ + dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL; + err = mthca_alloc_init(&dev->qp_table.alloc, + dev->limits.num_qps, + (1 << 24) - 1, + dev->qp_table.sqp_start + + MTHCA_MAX_PORTS * 2); + if (err) + return err; + + err = mthca_array_init(&dev->qp_table.qp, + dev->limits.num_qps); + if (err) { + mthca_alloc_cleanup(&dev->qp_table.alloc); + return err; + } + + for (i = 0; i < 2; ++i) { + err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_QP1 : IB_QPT_QP0, + dev->qp_table.sqp_start + i * 2, + &status); + if (err) + goto err_out; + if (status) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_QP,("CONF_SPECIAL_QP returned " + "status %02x, aborting.\n", + status)); + err = -EINVAL; + goto err_out; + } + } + return 0; + + err_out: + mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP1, 0, &status); + mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP0, 0, &status); + + mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); + mthca_alloc_cleanup(&dev->qp_table.alloc); + + return err; +} + +void mthca_cleanup_qp_table(struct mthca_dev *dev) +{ + u8 status; + + mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP1, 0, &status); + mthca_CONF_SPECIAL_QP(dev, IB_QPT_QP0, 0, &status); + + mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); + mthca_alloc_cleanup(&dev->qp_table.alloc); +} + + + diff --git a/branches/Ndi/hw/mthca/kernel/mthca_srq.c b/branches/Ndi/hw/mthca/kernel/mthca_srq.c new file mode 100644 index 00000000..784d5f49 --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_srq.c @@ -0,0 +1,751 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mt_l2w.h" +#include "mthca_dev.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "mthca_srq.tmh" +#endif +#include "mthca_cmd.h" +#include "mthca_memfree.h" +#include "mthca_wqe.h" + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (PAGE, mthca_init_srq_table) +#pragma alloc_text (PAGE, mthca_cleanup_srq_table) +#endif + + +enum { + MTHCA_MAX_DIRECT_SRQ_SIZE = 4 * PAGE_SIZE +}; + +struct mthca_tavor_srq_context { + __be64 wqe_base_ds; /* low 6 bits is descriptor size */ + __be32 state_pd; + __be32 lkey; + __be32 uar; + __be16 limit_watermark; + __be16 wqe_cnt; + u32 reserved[2]; +}; + +struct mthca_arbel_srq_context { + __be32 state_logsize_srqn; + __be32 lkey; + __be32 db_index; + __be32 logstride_usrpage; + __be64 wqe_base; + __be32 eq_pd; + __be16 limit_watermark; + __be16 wqe_cnt; + u16 reserved1; + __be16 wqe_counter; + u32 reserved2[3]; +}; + +static void *get_wqe(struct mthca_srq *srq, int n) +{ + if (srq->is_direct) + return (u8*)srq->queue.direct.page + (n << srq->wqe_shift); + else + return (u8*)srq->queue.page_list[(n << srq->wqe_shift) >> PAGE_SHIFT].page + + ((n << srq->wqe_shift) & (PAGE_SIZE - 1)); +} + +/* + * Return a pointer to the location within a WQE that we're using as a + * link when the WQE is in the free list. We use the imm field + * because in the Tavor case, posting a WQE may overwrite the next + * segment of the previous WQE, but a receive WQE will never touch the + * imm field. This avoids corrupting our free list if the previous + * WQE has already completed and been put on the free list when we + * post the next WQE. + */ +static inline int *wqe_to_link(void *wqe) +{ + return (int *) ((u8*)wqe + offsetof(struct mthca_next_seg, imm)); +} + +static void mthca_tavor_init_srq_context(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_srq *srq, + struct mthca_tavor_srq_context *context) +{ + CPU_2_BE64_PREP; + + RtlZeroMemory(context, sizeof *context); + + context->wqe_base_ds = CPU_2_BE64(1Ui64 << (srq->wqe_shift - 4)); + context->state_pd = cl_hton32(pd->pd_num); + context->lkey = cl_hton32(srq->mr.ibmr.lkey); + + if (pd->ibpd.ucontext) + context->uar = + cl_hton32(to_mucontext(pd->ibpd.ucontext)->uar.index); + else + context->uar = cl_hton32(dev->driver_uar.index); +} + +static void mthca_arbel_init_srq_context(struct mthca_dev *dev, + struct mthca_pd *pd, + struct mthca_srq *srq, + struct mthca_arbel_srq_context *context) +{ + int logsize; + + RtlZeroMemory(context, sizeof *context); + + logsize = long_log2(srq->max); + context->state_logsize_srqn = cl_hton32(logsize << 24 | srq->srqn); + context->lkey = cl_hton32(srq->mr.ibmr.lkey); + context->db_index = cl_hton32(srq->db_index); + context->logstride_usrpage = cl_hton32((srq->wqe_shift - 4) << 29); + if (pd->ibpd.ucontext) + context->logstride_usrpage |= + cl_hton32(to_mucontext(pd->ibpd.ucontext)->uar.index); + else + context->logstride_usrpage |= cl_hton32(dev->driver_uar.index); + context->eq_pd = cl_hton32(MTHCA_EQ_ASYNC << 24 | pd->pd_num); +} + +static void mthca_free_srq_buf(struct mthca_dev *dev, struct mthca_srq *srq) +{ + mthca_buf_free(dev, srq->max << srq->wqe_shift, &srq->queue, + srq->is_direct, &srq->mr); + kfree(srq->wrid); +} + +static int mthca_alloc_srq_buf(struct mthca_dev *dev, struct mthca_pd *pd, + struct mthca_srq *srq) +{ + struct mthca_data_seg *scatter; + u8 *wqe; + int err; + int i; + + if (pd->ibpd.ucontext) + return 0; + + srq->wrid = kmalloc(srq->max * sizeof (u64), GFP_KERNEL); + if (!srq->wrid) + return -ENOMEM; + + err = mthca_buf_alloc(dev, srq->max << srq->wqe_shift, + MTHCA_MAX_DIRECT_SRQ_SIZE, + &srq->queue, &srq->is_direct, pd, 1, &srq->mr); + if (err) { + kfree(srq->wrid); + return err; + } + + /* + * Now initialize the SRQ buffer so that all of the WQEs are + * linked into the list of free WQEs. In addition, set the + * scatter list L_Keys to the sentry value of 0x100. + */ + for (i = 0; i < srq->max; ++i) { + wqe = get_wqe(srq, i); + + *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; + + for (scatter = (struct mthca_data_seg *)(wqe + sizeof (struct mthca_next_seg)); + (void *) scatter < (void*)(wqe + (1 << srq->wqe_shift)); + ++scatter) + scatter->lkey = cl_hton32(MTHCA_INVAL_LKEY); + } + + srq->last = get_wqe(srq, srq->max - 1); + + return 0; +} + +int mthca_alloc_srq(struct mthca_dev *dev, struct mthca_pd *pd, + ib_srq_attr_t *attr, struct mthca_srq *srq) +{ + struct mthca_mailbox *mailbox; + u8 status; + int ds; + int err; + SPIN_LOCK_PREP(lh); + + /* Sanity check SRQ size before proceeding */ + if ((int)attr->max_wr > dev->limits.max_srq_wqes || + (int)attr->max_sge > dev->limits.max_srq_sge) + return -EINVAL; + + srq->max = attr->max_wr; + srq->max_gs = attr->max_sge; + srq->counter = 0; + + if (mthca_is_memfree(dev)) + srq->max = roundup_pow_of_two(srq->max + 1); + + ds = max(64UL, + roundup_pow_of_two(sizeof (struct mthca_next_seg) + + srq->max_gs * sizeof (struct mthca_data_seg))); + + if (!mthca_is_memfree(dev) && (ds > dev->limits.max_desc_sz)) + return -EINVAL; + + srq->wqe_shift = long_log2(ds); + + srq->srqn = mthca_alloc(&dev->srq_table.alloc); + if (srq->srqn == -1) + return -ENOMEM; + + if (mthca_is_memfree(dev)) { + err = mthca_table_get(dev, dev->srq_table.table, srq->srqn); + if (err) + goto err_out; + + if (!pd->ibpd.ucontext) { + srq->db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SRQ, + srq->srqn, &srq->db); + if (srq->db_index < 0) { + err = -ENOMEM; + goto err_out_icm; + } + } + } + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + err = PTR_ERR(mailbox); + goto err_out_db; + } + + err = mthca_alloc_srq_buf(dev, pd, srq); + if (err) + goto err_out_mailbox; + + spin_lock_init(&srq->lock); + atomic_set(&srq->refcount, 1); + init_waitqueue_head(&srq->wait); + KeInitializeMutex(&srq->mutex, 0); + + if (mthca_is_memfree(dev)) + mthca_arbel_init_srq_context(dev, pd, srq, mailbox->buf); + else + mthca_tavor_init_srq_context(dev, pd, srq, mailbox->buf); + + err = mthca_SW2HW_SRQ(dev, mailbox, srq->srqn, &status); + + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "SW2HW_SRQ failed (%d)\n", err)); + goto err_out_free_buf; + } + if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "SW2HW_SRQ returned status 0x%02x\n", + status)); + err = -EINVAL; + goto err_out_free_buf; + } + + spin_lock_irq(&dev->srq_table.lock, &lh); + if (mthca_array_set(&dev->srq_table.srq, + srq->srqn & (dev->limits.num_srqs - 1), + srq)) { + spin_unlock_irq(&lh); + goto err_out_free_srq; + } + spin_unlock_irq(&lh); + + mthca_free_mailbox(dev, mailbox); + + srq->first_free = 0; + srq->last_free = srq->max - 1; + + attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max; + attr->max_sge = srq->max_gs; + + return 0; + +err_out_free_srq: + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ failed (%d)\n", err)); + } else if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ returned status 0x%02x\n", status)); + } + +err_out_free_buf: + if (!pd->ibpd.ucontext) + mthca_free_srq_buf(dev, srq); + +err_out_mailbox: + mthca_free_mailbox(dev, mailbox); + +err_out_db: + if (!pd->ibpd.ucontext && mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); + +err_out_icm: + mthca_table_put(dev, dev->srq_table.table, srq->srqn); + +err_out: + mthca_free(&dev->srq_table.alloc, srq->srqn); + + return err; +} + +void mthca_free_srq(struct mthca_dev *dev, struct mthca_srq *srq) +{ + struct mthca_mailbox *mailbox; + int err; + u8 status; + SPIN_LOCK_PREP(lh); + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "No memory for mailbox to free SRQ.\n")); + return; + } + + err = mthca_HW2SW_SRQ(dev, mailbox, srq->srqn, &status); + if (err) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ failed (%d)\n", err)); + } else if (status) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "HW2SW_SRQ returned status 0x%02x\n", status)); + } + + spin_lock_irq(&dev->srq_table.lock, &lh); + mthca_array_clear(&dev->srq_table.srq, + srq->srqn & (dev->limits.num_srqs - 1)); + atomic_dec(&srq->refcount); + spin_unlock_irq(&lh); + + wait_event(&srq->wait, !atomic_read(&srq->refcount)); + + if (!srq->ibsrq.ucontext) { + mthca_free_srq_buf(dev, srq); + if (mthca_is_memfree(dev)) + mthca_free_db(dev, MTHCA_DB_TYPE_SRQ, srq->db_index); + } + + mthca_table_put(dev, dev->srq_table.table, srq->srqn); + mthca_free(&dev->srq_table.alloc, srq->srqn); + mthca_free_mailbox(dev, mailbox); +} + +int mthca_modify_srq(struct ib_srq *ibsrq, ib_srq_attr_t *attr, + ib_srq_attr_mask_t attr_mask) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + int ret; + u8 status; + + /* We don't support resizing SRQs (yet?) */ + if (attr_mask & IB_SRQ_MAX_WR) + return -ENOSYS; + + if (attr_mask & IB_SRQ_LIMIT) { + u32 max_wr = mthca_is_memfree(dev) ? srq->max - 1 : srq->max; + if (attr->srq_limit > max_wr) + return -ERANGE; + + down(&srq->mutex); + ret = mthca_ARM_SRQ(dev, srq->srqn, attr->srq_limit, &status); + up(&srq->mutex); + + if (ret) + return ret; + if (status) + return -EINVAL; + } + + return 0; +} + +int mthca_query_srq(struct ib_srq *ibsrq, ib_srq_attr_t *srq_attr) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + struct mthca_mailbox *mailbox; + struct mthca_arbel_srq_context *arbel_ctx; + struct mthca_tavor_srq_context *tavor_ctx; + u8 status; + int err; + + mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL); + if (IS_ERR(mailbox)) + return PTR_ERR(mailbox); + + err = mthca_QUERY_SRQ(dev, srq->srqn, mailbox, &status); + if (err) + goto out; + + if (mthca_is_memfree(dev)) { + arbel_ctx = mailbox->buf; + srq_attr->srq_limit = cl_ntoh16(arbel_ctx->limit_watermark); + } else { + tavor_ctx = mailbox->buf; + srq_attr->srq_limit = cl_ntoh16(tavor_ctx->limit_watermark); + } + + srq_attr->max_wr = (mthca_is_memfree(dev)) ? srq->max - 1 : srq->max; + srq_attr->max_sge = srq->max_gs; + +out: + mthca_free_mailbox(dev, mailbox); + + return err; +} + +void mthca_srq_event(struct mthca_dev *dev, u32 srqn, + enum ib_event_type event_type, u8 vendor_code) +{ + struct mthca_srq *srq; + struct ib_event event; + SPIN_LOCK_PREP(lh); + + spin_lock(&dev->srq_table.lock, &lh); + srq = mthca_array_get(&dev->srq_table.srq, srqn & (dev->limits.num_srqs - 1)); + if (srq) + atomic_inc(&srq->refcount); + spin_unlock(&lh); + + if (!srq) { + HCA_PRINT(TRACE_LEVEL_WARNING ,HCA_DBG_SRQ ,( "Async event for bogus SRQ %08x\n", srqn)); + return; + } + + if (!srq->ibsrq.event_handler) + goto out; + + event.device = &dev->ib_dev; + event.event = event_type; + event.element.srq = &srq->ibsrq; + event.vendor_specific = vendor_code; + HCA_PRINT(TRACE_LEVEL_WARNING,HCA_DBG_SRQ, + ("SRQ %06x Async event event_type 0x%x vendor_code 0x%x\n", + srqn,event_type,vendor_code)); + if (srq->ibsrq.event_handler) + srq->ibsrq.event_handler(&event, srq->ibsrq.srq_context); + +out: + if (atomic_dec_and_test(&srq->refcount)) + wake_up(&srq->wait); +} + +/* + * This function must be called with IRQs disabled. + */ +void mthca_free_srq_wqe(struct mthca_srq *srq, u32 wqe_addr) +{ + int ind; + SPIN_LOCK_PREP(lh); + + ind = wqe_addr >> srq->wqe_shift; + + spin_lock(&srq->lock, &lh); + + if (likely(srq->first_free >= 0)) + *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; + else + srq->first_free = ind; + + *wqe_to_link(get_wqe(srq, ind)) = -1; + srq->last_free = ind; + + spin_unlock(&lh); +} + +int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr) +{ + struct mthca_dev *dev = to_mdev(ibsrq->device); + struct mthca_srq *srq = to_msrq(ibsrq); + __be32 doorbell[2]; + int err = 0; + int first_ind; + int ind; + int next_ind; + int nreq; + int i; + u8 *wqe; + u8 *prev_wqe; + CPU_2_BE64_PREP; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&srq->lock, &lh); + + first_ind = srq->first_free; + + for (nreq = 0; wr; wr = wr->p_next) { + ind = srq->first_free; + + if (ind < 0) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn)); + err = -ENOMEM; + *bad_wr = wr; + break; + } + + wqe = get_wqe(srq, ind); + next_ind = *wqe_to_link(wqe); + + if (next_ind < 0) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn)); + err = -ENOMEM; + *bad_wr = wr; + break; + } + + prev_wqe = srq->last; + srq->last = wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely((int)wr->num_ds > srq->max_gs)) { + err = -EINVAL; + *bad_wr = wr; + srq->last = prev_wqe; + break; + } + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + CPU_2_BE64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cl_hton32((ind << srq->wqe_shift) | 1); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cl_hton32(MTHCA_NEXT_DBD); + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + + ++nreq; + if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { + nreq = 0; + + doorbell[0] = cl_hton32(first_ind << srq->wqe_shift); + doorbell[1] = cl_hton32(srq->srqn << 8); + + /* + * Make sure that descriptors are written + * before doorbell is rung. + */ + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_RECV_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + + first_ind = srq->first_free; + } + } + + if (likely(nreq)) { + doorbell[0] = cl_hton32(first_ind << srq->wqe_shift); + doorbell[1] = cl_hton32((srq->srqn << 8) | nreq); + + /* + * Make sure that descriptors are written before + * doorbell is rung. + */ + wmb(); + + mthca_write64(doorbell, + dev->kar + MTHCA_RECV_DOORBELL, + MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock)); + } + + spin_unlock_irqrestore(&lh); + return err; +} + +int mthca_arbel_post_srq_recv(struct ib_srq *ibsrq, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr) +{ + struct mthca_srq *srq = to_msrq(ibsrq); + int err = 0; + int ind; + int next_ind; + int nreq; + int i; + u8 *wqe; + CPU_2_BE64_PREP; + SPIN_LOCK_PREP(lh); + + spin_lock_irqsave(&srq->lock, &lh); + + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + ind = srq->first_free; + + if (ind < 0) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn)); + err = -ENOMEM; + *bad_wr = wr; + break; + } + + wqe = get_wqe(srq, ind); + next_ind = *wqe_to_link(wqe); + + if (next_ind < 0) { + HCA_PRINT(TRACE_LEVEL_ERROR ,HCA_DBG_SRQ ,( "SRQ %06x full\n", srq->srqn)); + err = -ENOMEM; + *bad_wr = wr; + break; + } + + ((struct mthca_next_seg *) wqe)->nda_op = + cl_hton32((next_ind << srq->wqe_shift) | 1); + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely((int)wr->num_ds > srq->max_gs)) { + err = -EINVAL; + *bad_wr = wr; + break; + } + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + CPU_2_BE64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + } + + if (likely(nreq)) { + srq->counter = (u16)(srq->counter + nreq); + + /* + * Make sure that descriptors are written before + * we write doorbell record. + */ + wmb(); + *srq->db = cl_hton32(srq->counter); + } + + spin_unlock_irqrestore(&lh); + return err; +} + +int mthca_max_srq_sge(struct mthca_dev *dev) +{ + if (mthca_is_memfree(dev)) + return dev->limits.max_sg; + + /* + * SRQ allocations are based on powers of 2 for Tavor, + * (although they only need to be multiples of 16 bytes). + * + * Therefore, we need to base the max number of sg entries on + * the largest power of 2 descriptor size that is <= to the + * actual max WQE descriptor size, rather than return the + * max_sg value given by the firmware (which is based on WQE + * sizes as multiples of 16, not powers of 2). + * + * If SRQ implementation is changed for Tavor to be based on + * multiples of 16, the calculation below can be deleted and + * the FW max_sg value returned. + */ + return min( (uint32_t)dev->limits.max_sg, + ((1 << (fls(dev->limits.max_desc_sz) - 1)) - + sizeof (struct mthca_next_seg)) / + sizeof (struct mthca_data_seg)); +} + +int mthca_init_srq_table(struct mthca_dev *dev) +{ + int err; + + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) + return 0; + + spin_lock_init(&dev->srq_table.lock); + + err = mthca_alloc_init(&dev->srq_table.alloc, + dev->limits.num_srqs, + dev->limits.num_srqs - 1, + dev->limits.reserved_srqs); + if (err) + return err; + + err = mthca_array_init(&dev->srq_table.srq, + dev->limits.num_srqs); + if (err) + mthca_alloc_cleanup(&dev->srq_table.alloc); + + return err; +} + +void mthca_cleanup_srq_table(struct mthca_dev *dev) +{ + if (!(dev->mthca_flags & MTHCA_FLAG_SRQ)) + return; + + mthca_array_cleanup(&dev->srq_table.srq, dev->limits.num_srqs); + mthca_alloc_cleanup(&dev->srq_table.alloc); +} diff --git a/branches/Ndi/hw/mthca/kernel/mthca_uar.c b/branches/Ndi/hw/mthca/kernel/mthca_uar.c new file mode 100644 index 00000000..b5bb7b3b --- /dev/null +++ b/branches/Ndi/hw/mthca/kernel/mthca_uar.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mthca_dev.h" +#include "mthca_memfree.h" + +int mthca_uar_alloc(struct mthca_dev *dev, struct mthca_uar *uar) +{ + uar->index = mthca_alloc(&dev->uar_table.alloc); + if (uar->index == -1) + return -ENOMEM; + + uar->pfn = (unsigned long)(pci_resource_start(dev, HCA_BAR_TYPE_UAR) >> PAGE_SHIFT) + uar->index; + + return 0; +} + +void mthca_uar_free(struct mthca_dev *dev, struct mthca_uar *uar) +{ + mthca_free(&dev->uar_table.alloc, uar->index); +} + +int mthca_init_uar_table(struct mthca_dev *dev) +{ + int ret; + + ret = mthca_alloc_init(&dev->uar_table.alloc, + dev->limits.num_uars, + dev->limits.num_uars - 1, + dev->limits.reserved_uars); + if (ret) + return ret; + + ret = mthca_init_db_tab(dev); + if (ret) + mthca_alloc_cleanup(&dev->uar_table.alloc); + + return ret; +} + +void mthca_cleanup_uar_table(struct mthca_dev *dev) +{ + mthca_cleanup_db_tab(dev); + + /* XXX check if any UARs are still allocated? */ + mthca_alloc_cleanup(&dev->uar_table.alloc); +} diff --git a/branches/Ndi/hw/mthca/mt_utils.c b/branches/Ndi/hw/mthca/mt_utils.c new file mode 100644 index 00000000..3d2124a8 --- /dev/null +++ b/branches/Ndi/hw/mthca/mt_utils.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +/* Nth element of the table contains the index of the first set bit of N; 8 - for N=0 */ +char g_set_bit_tbl[256]; + +/* Nth element of the table contains the index of the first 0 bit of N; 8 - for N=255 */ +char g_clr_bit_tbl[256]; + +void fill_bit_tbls() +{ + unsigned long i; + for (i=0; i<256; ++i) { + g_set_bit_tbl[i] = (char)(_ffs_raw(&i,0) - 1); + g_clr_bit_tbl[i] = (char)(_ffz_raw(&i,0) - 1); + } + g_set_bit_tbl[0] = g_clr_bit_tbl[255] = 8; +} + + diff --git a/branches/Ndi/hw/mthca/mt_utils.h b/branches/Ndi/hw/mthca/mt_utils.h new file mode 100644 index 00000000..ddbcf389 --- /dev/null +++ b/branches/Ndi/hw/mthca/mt_utils.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef MT_UTILS_H +#define MT_UTILS_H + +// Nth element of the table contains the index of the first set bit of N; 8 - for N=0 +extern char g_set_bit_tbl[256]; +// Nth element of the table contains the index of the first cleared bit of N; 8 - for N=0 +extern char g_clr_bit_tbl[256]; + +// DECLARE_BITMAP +#define BITS_PER_LONG 32 +#define BITS_TO_LONGS(bits) \ + (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) + +/* +* fls: find last bit set. +* returns: 0 - if not found or N+1, if found Nth bit +*/ + +static __inline int fls(int x) +{ + int r = 32; + + if (!x) + return 0; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +/** +* _ffs_raw - find the first one bit in a word +* @addr: The address to start the search at +* @offset: The bitnumber to start searching at +* +* returns: 0 - if not found or N+1, if found Nth bit +*/ +static __inline int _ffs_raw(const unsigned long *addr, int offset) +{ + //TODO: not an effective code - is better in Assembler + int mask; + int rbc; + int ix; + if (!*addr) return 0; + mask = 1 << offset; + rbc = BITS_PER_LONG - offset; + for (ix=0; ix + +/* + * Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * Specifically: + * - Do not use pointer types -- pass pointers in uint64_t instead. + * - Make sure that any structure larger than 4 bytes is padded to a + * multiple of 8 bytes. Otherwise the structure size will be + * different between 32-bit and 64-bit architectures. + */ + +struct ibv_get_context_resp { + uint64_t uar_addr; + uint64_t pd_handle; + uint32_t pdn; + uint32_t qp_tab_size; + uint32_t uarc_size; + uint32_t vend_id; + uint16_t dev_id; + uint16_t reserved[3]; +}; + +struct ibv_alloc_pd_resp { + uint64_t pd_handle; + uint32_t pdn; + uint32_t reserved; +}; + +struct ibv_reg_mr { + uint64_t start; + uint64_t length; + uint64_t hca_va; + uint32_t access_flags; + uint32_t pdn; + uint64_t pd_handle; +}; + +struct ibv_reg_mr_resp { + uint64_t mr_handle; + uint32_t lkey; + uint32_t rkey; +}; + +struct ibv_create_cq { + struct ibv_reg_mr mr; + uint64_t arm_db_page; + uint64_t set_db_page; + uint64_t u_arm_db_page; + uint64_t user_handle; + uint32_t arm_db_index; + uint32_t set_db_index; + uint32_t u_arm_db_index; + uint32_t cqe; + uint32_t lkey; /* used only by kernel */ + uint32_t reserved; +}; + +struct ibv_create_cq_resp { + uint64_t user_handle; + uint64_t cq_handle; + struct ibv_reg_mr_resp mr; + uint32_t cqe; + uint32_t cqn; +}; + +struct ibv_create_srq { + uint64_t user_handle; + struct ibv_reg_mr mr; + uint32_t lkey; /* used only in kernel */ + uint32_t db_index; + uint64_t db_page; +}; + +struct ibv_create_srq_resp { + struct ibv_reg_mr_resp mr; + uint64_t srq_handle; + uint64_t user_handle; + uint32_t max_wr; + uint32_t max_sge; + uint32_t srqn; + uint32_t reserved; +}; + +struct ibv_create_qp { + uint64_t sq_db_page; + uint64_t rq_db_page; + uint32_t sq_db_index; + uint32_t rq_db_index; + struct ibv_reg_mr mr; + uint64_t user_handle; + uint64_t send_cq_handle; + uint64_t recv_cq_handle; + uint64_t srq_handle; + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t max_recv_sge; + uint32_t max_inline_data; + uint32_t lkey; /* used only in kernel */ + uint8_t sq_sig_all; + uint8_t qp_type; + uint8_t is_srq; + uint8_t reserved[5]; +}; + +struct ibv_create_qp_resp { + struct ibv_reg_mr_resp mr; + uint64_t user_handle; + uint64_t qp_handle; + uint32_t qpn; + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t max_recv_sge; + uint32_t max_inline_data; +}; + +struct ibv_modify_qp_resp { + enum ibv_qp_attr_mask attr_mask; + uint8_t qp_state; + uint8_t reserved[3]; +}; + +struct ibv_create_ah { + uint64_t user_handle; + struct ibv_reg_mr mr; +}; + +struct ibv_create_ah_resp { + uint64_t user_handle; + uint64_t start; + struct ibv_reg_mr_resp mr; +}; + + +#endif /* MX_ABI_H */ + diff --git a/branches/Ndi/hw/mthca/user/Makefile b/branches/Ndi/hw/mthca/user/Makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/hw/mthca/user/SOURCES b/branches/Ndi/hw/mthca/user/SOURCES new file mode 100644 index 00000000..92f6c653 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/SOURCES @@ -0,0 +1,80 @@ +TRUNK=..\..\.. + +!if $(FREEBUILD) +TARGETNAME=mthcau +!else +TARGETNAME=mthcaud +!endif + +TARGETPATH=$(TRUNK)\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=DYNLINK +DLLDEF=$(O)\mlnx_uvp.def +USE_MSVCRT=1 +DLLENTRY=DllMain + +!if $(FREEBUILD) +ENABLE_EVENT_TRACING=1 +!else +#ENABLE_EVENT_TRACING=1 +!endif + +SOURCES= \ + \ + ..\mt_utils.c \ + \ + mlnx_uvp.rc \ + mlnx_ual_av.c \ + mlnx_ual_ca.c \ + mlnx_ual_cq.c \ + mlnx_ual_main.c \ + mlnx_ual_mcast.c \ + mlnx_ual_mrw.c \ + mlnx_ual_osbypass.c \ + mlnx_ual_pd.c \ + mlnx_ual_qp.c \ + mlnx_ual_srq.c \ + \ + mlnx_uvp_debug.c \ + mlnx_uvp.c \ + mlnx_uvp_ah.c \ + mlnx_uvp_cq.c \ + mlnx_uvp_memfree.c \ + mlnx_uvp_qp.c \ + mlnx_uvp_srq.c \ + mlnx_uvp_verbs.c + +INCLUDES= \ + ..; \ + $(TRUNK)\inc\user; \ + $(TRUNK)\inc\complib; \ + $(TRUNK)\inc\user\complib; \ + $(TRUNK)\inc; \ + +USER_C_FLAGS=$(USER_C_FLAGS) /DCL_NO_TRACK_MEM + +TARGETLIBS=\ + $(SDK_LIB_PATH)\user32.lib \ + $(SDK_LIB_PATH)\kernel32.lib \ + $(SDK_LIB_PATH)\Advapi32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +#LINKER_FLAGS=/MAP /MAPINFO:LINES + +!IFDEF ENABLE_EVENT_TRACING + +C_DEFINES = $(C_DEFINES) -DEVENT_TRACING -DWPP_OLDCC + + +RUN_WPP= $(SOURCES) -ext:.c.h -dll\ + -scan:mlnx_uvp_debug.h \ + -func:UVP_PRINT(LEVEL,FLAGS,(MSG,...)) \ + -func:UVP_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) +!ENDIF + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/hw/mthca/user/arch.h b/branches/Ndi/hw/mthca/user/arch.h new file mode 100644 index 00000000..9f23be4b --- /dev/null +++ b/branches/Ndi/hw/mthca/user/arch.h @@ -0,0 +1,53 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef INFINIBAND_ARCH_H +#define INFINIBAND_ARCH_H + +#define htonll cl_hton64 +#define ntohll cl_ntoh64 + +/* + * Architecture-specific defines. Currently, an architecture is + * required to implement the following operations: + * + * mb() - memory barrier. No loads or stores may be reordered across + * this macro by either the compiler or the CPU. + */ + +#define mb MemoryBarrier +#define wmb MemoryBarrier +#define rmb MemoryBarrier + +#endif /* INFINIBAND_ARCH_H */ diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_av.c b/branches/Ndi/hw/mthca/user/mlnx_ual_av.c new file mode 100644 index 00000000..8bc46a57 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_av.c @@ -0,0 +1,397 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mt_l2w.h" +#include "mlnx_uvp.h" +#include "mx_abi.h" + +#include "mlnx_ual_main.h" +#if defined(EVENT_TRACING) +#include "mlnx_ual_av.tmh" +#endif + +void +mlnx_get_av_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + + CL_ASSERT(p_uvp); + + /* + * Address Vector Management Verbs + */ + p_uvp->pre_create_av = mlnx_pre_create_av; + p_uvp->post_create_av = mlnx_post_create_av; + p_uvp->pre_query_av = mlnx_pre_query_av; + p_uvp->post_query_av = mlnx_post_query_av; + p_uvp->pre_modify_av = mlnx_pre_modify_av; + p_uvp->post_modify_av = mlnx_post_modify_av; + p_uvp->pre_destroy_av = mlnx_pre_destroy_av; + p_uvp->post_destroy_av = mlnx_post_destroy_av; + +} + + +uint8_t +gid_to_index_lookup ( + IN ib_ca_attr_t *p_ca_attr, + IN uint8_t port_num, + IN uint8_t *raw_gid) +{ + ib_gid_t *p_gid_table = NULL; + uint8_t i, index = 0; + uint16_t num_gids; + + p_gid_table = p_ca_attr->p_port_attr[port_num-1].p_gid_table; + CL_ASSERT (p_gid_table); + + num_gids = p_ca_attr->p_port_attr[port_num-1].num_gids; + UVP_PRINT(TRACE_LEVEL_INFORMATION, UVP_DBG_AV, + ("Port %d has %d gids\n", port_num, num_gids)); + + for (i = 0; i < num_gids; i++) + { + if (cl_memcmp (raw_gid, p_gid_table[i].raw, sizeof (ib_gid_t))) + { + UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_AV , + ("found GID at index %d\n", i)); + index = i; + break; + } + } + return index; +} + +ib_api_status_t +map_itom_av_attr ( + IN ib_ca_attr_t *p_ca_attr, + IN const ib_av_attr_t *p_av_attr, + OUT struct ibv_ah_attr *p_attr) +{ + + + ib_api_status_t status = IB_SUCCESS; + if (p_av_attr->port_num == 0 || + p_av_attr->port_num > p_ca_attr->num_ports) { + UVP_PRINT(TRACE_LEVEL_WARNING ,UVP_DBG_AV , + (" invalid port number specified (%d)\n",p_av_attr->port_num)); + return IB_INVALID_PORT; + } + + p_attr->sl = p_av_attr->sl; + p_attr->port_num = p_av_attr->port_num; + p_attr->dlid = CL_NTOH16 (p_av_attr->dlid); + p_attr->src_path_bits = p_av_attr->path_bits; // PATH: + + //TODO: how static_rate is coded ? + p_attr->static_rate = + (p_av_attr->static_rate == IB_PATH_RECORD_RATE_10_GBS ? 0 : 3); + + /* For global destination or Multicast address:*/ + if (p_av_attr->grh_valid) { + p_attr->is_global = TRUE; + p_attr->grh.hop_limit = p_av_attr->grh.hop_limit; + ib_grh_get_ver_class_flow( p_av_attr->grh.ver_class_flow, NULL, + &p_attr->grh.traffic_class, &p_attr->grh.flow_label ); + p_attr->grh.sgid_index = gid_to_index_lookup (p_ca_attr, + p_av_attr->port_num, (uint8_t *) p_av_attr->grh.src_gid.raw); + cl_memcpy (p_attr->grh.dgid.raw, p_av_attr->grh.dest_gid.raw, + sizeof (IB_gid_t)); + }else{ + p_attr->is_global = FALSE; + } + + return status; +} + +ib_api_status_t +mlnx_pre_create_av ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_av_attr_t *p_av_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + int err; + struct mthca_ah *ah; + struct ibv_ah_attr attr; + struct ibv_create_ah *p_create_av; + ib_api_status_t status = IB_SUCCESS; + size_t size = max( sizeof(struct ibv_create_ah), sizeof(struct ibv_create_ah_resp) ); + mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd; + mlnx_ual_hobul_t *p_hobul = p_pd->p_hobul; + + UVP_ENTER(UVP_DBG_AV); + + CL_ASSERT(p_umv_buf); + + // sanity check + if (p_av_attr->port_num == 0 || p_av_attr->port_num > p_hobul->p_hca_attr->num_ports) { + UVP_PRINT(TRACE_LEVEL_WARNING ,UVP_DBG_AV , + (" invalid port number specified (%d)\n",p_av_attr->port_num)); + status = IB_INVALID_PORT; + goto end; + } + + // convert parameters + cl_memset( &attr, 0, sizeof(attr)); + status = map_itom_av_attr (p_hobul->p_hca_attr, p_av_attr, &attr); + if(status != IB_SUCCESS ) + goto end; + + // allocate Ah object + ah = cl_zalloc( sizeof *ah ); + if( !ah ) { + status = IB_INSUFFICIENT_MEMORY; + goto end; + } + + // fill AH partly + ah->h_uvp_pd = h_uvp_pd; + cl_memcpy( &ah->av_attr, p_av_attr, sizeof(ah->av_attr) ); + + // try to create AV + err = mthca_alloc_av(to_mpd(p_pd->ibv_pd), &attr, ah, NULL); + if (err) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_AV , ("mthca_alloc_av failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_av; + } + + // allocate parameters + if( !p_umv_buf->p_inout_buf ) { + p_umv_buf->p_inout_buf = cl_zalloc( size ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_mem; + } + } + + // fill the parameters + p_umv_buf->input_size = sizeof(struct ibv_create_ah); + p_umv_buf->output_size = sizeof(struct ibv_create_ah_resp); + p_umv_buf->command = TRUE; + p_create_av = (struct ibv_create_ah *)p_umv_buf->p_inout_buf; + p_create_av->user_handle = (uint64_t)(ULONG_PTR)ah; + if (ah->in_kernel) { + struct mthca_ah_page *page = ah->page; + p_create_av->mr.start = (uint64_t)(ULONG_PTR)page->buf; + p_create_av->mr.length = g_page_size; + p_create_av->mr.hca_va = (uint64_t)(ULONG_PTR)page->buf; + p_create_av->mr.pd_handle = p_pd->ibv_pd->handle; + p_create_av->mr.pdn = to_mpd(p_pd->ibv_pd)->pdn; + p_create_av->mr.access_flags = 0; //local read + status = IB_SUCCESS; + } + else + status = IB_VERBS_PROCESSING_DONE; + + goto end; + +err_mem: + mthca_free_av(ah); +err_alloc_av: + cl_free(ah); +end: + UVP_EXIT(UVP_DBG_AV); + return status; +} + + +void +mlnx_post_create_av ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_av_handle_t *ph_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + int err; + struct mthca_ah *ah; + struct mthca_ah_page *page; + struct ibv_create_ah_resp *p_resp; + ib_api_status_t status = IB_SUCCESS; + mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd; + + UVP_ENTER(UVP_DBG_AV); + + CL_ASSERT(p_umv_buf); + + p_resp = (struct ibv_create_ah_resp *)p_umv_buf->p_inout_buf; + ah = (struct mthca_ah *)(ULONG_PTR)p_resp->user_handle; + + if (IB_SUCCESS == ioctl_status) { + + if (!mthca_is_memfree(p_pd->ibv_pd->context)) { + page = ah->page; + if (ah->in_kernel) { + // fill mr parameters + page->mr.handle = p_resp->mr.mr_handle; + page->mr.lkey = p_resp->mr.lkey; + page->mr.rkey = p_resp->mr.rkey; + page->mr.pd = p_pd->ibv_pd; + page->mr.context = p_pd->ibv_pd->context; + } + ah->key = page->mr.lkey; + } + *ph_uvp_av = (ib_av_handle_t)ah; + } + else { + mthca_free_av(ah); + cl_free(ah); + } + goto end; + +end: + if (p_resp) + cl_free( p_resp ); + UVP_EXIT(UVP_DBG_AV); +} + +ib_api_status_t +mlnx_pre_query_av ( + IN const ib_av_handle_t h_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + UNREFERENCED_PARAMETER(h_uvp_av); + UNREFERENCED_PARAMETER(p_umv_buf); + UVP_ENTER(UVP_DBG_AV); + UVP_EXIT(UVP_DBG_AV); + return IB_VERBS_PROCESSING_DONE; +} + + +void +mlnx_post_query_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ib_av_attr_t *p_addr_vector, + IN OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + struct mthca_ah *ah = (struct mthca_ah *)h_uvp_av; + UNREFERENCED_PARAMETER(p_umv_buf); + + UVP_ENTER(UVP_DBG_AV); + CL_ASSERT(p_umv_buf); + CL_ASSERT(p_addr_vector); + + if (ioctl_status == IB_SUCCESS) + { + cl_memcpy (p_addr_vector, &ah->av_attr, sizeof (ib_av_attr_t)); + if (ph_pd) + *ph_pd = (ib_pd_handle_t)ah->h_uvp_pd; + } + + UVP_EXIT(UVP_DBG_AV); +} + +void mthca_set_av_params( struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr ); + +ib_api_status_t +mlnx_pre_modify_av ( + IN const ib_av_handle_t h_uvp_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status ; + struct mthca_ah *mthca_ah = (struct mthca_ah *)h_uvp_av; + mlnx_ual_pd_info_t *p_pd_info; + mlnx_ual_hobul_t *p_hobul; + struct ibv_ah_attr attr; + + UNREFERENCED_PARAMETER(p_umv_buf); + + UVP_ENTER(UVP_DBG_AV); + + CL_ASSERT(p_umv_buf); + + p_pd_info = mthca_ah->h_uvp_pd; + CL_ASSERT (p_pd_info); + + p_hobul = p_pd_info->p_hobul; + CL_ASSERT (p_hobul); + + status = map_itom_av_attr (p_hobul->p_hca_attr, p_addr_vector, &attr); + if(status != IB_SUCCESS) return status; + + mthca_set_av_params( mthca_ah, &attr); + cl_memcpy (&mthca_ah->av_attr, p_addr_vector, sizeof(ib_av_attr_t)); + + UVP_EXIT(UVP_DBG_AV); + + return IB_VERBS_PROCESSING_DONE; +} + +void +mlnx_post_modify_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_AV); + UVP_EXIT(UVP_DBG_AV); +} + + +ib_api_status_t +mlnx_pre_destroy_av ( + IN const ib_av_handle_t h_uvp_av) +{ + ib_api_status_t status ; + struct mthca_ah *mthca_ah = (struct mthca_ah *)h_uvp_av; + UVP_ENTER(UVP_DBG_AV); + if (mthca_ah->in_kernel) + status = IB_SUCCESS; + else + status = IB_VERBS_PROCESSING_DONE; + UVP_EXIT(UVP_DBG_AV); + return status; +} + +void +mlnx_post_destroy_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status) +{ + struct mthca_ah *mthca_ah = (struct mthca_ah *)h_uvp_av; + + UVP_ENTER(UVP_DBG_AV); + CL_ASSERT (h_uvp_av); + + if (IB_SUCCESS == ioctl_status) { + mthca_free_av(mthca_ah); + cl_free(mthca_ah); + } + + UVP_EXIT(UVP_DBG_AV); + return; +} + diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_ca.c b/branches/Ndi/hw/mthca/user/mlnx_ual_ca.c new file mode 100644 index 00000000..29b09812 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_ca.c @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" +#include "mt_l2w.h" +#include "mlnx_uvp.h" +#include "mlnx_uvp_verbs.h" +#include "mx_abi.h" + +#if defined(EVENT_TRACING) +#include "mlnx_ual_ca.tmh" +#endif + +extern uint32_t mlnx_dbg_lvl; + +void +mlnx_get_ca_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + CL_ASSERT(p_uvp); + + /* + * HCA Access Verbs + */ + p_uvp->pre_open_ca = mlnx_pre_open_ca; + p_uvp->post_open_ca = mlnx_post_open_ca; + + + p_uvp->pre_query_ca = mlnx_pre_query_ca; + p_uvp->post_query_ca = mlnx_post_query_ca; + + p_uvp->pre_modify_ca = NULL; + p_uvp->post_modify_ca = NULL; + + p_uvp->pre_close_ca = mlnx_pre_close_ca; + p_uvp->post_close_ca = mlnx_post_close_ca; + +} + + + +ib_api_status_t +mlnx_pre_open_ca ( + IN const ib_net64_t ca_guid, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status = IB_SUCCESS; + + UVP_ENTER(UVP_DBG_SHIM); + if( p_umv_buf ) + { + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_get_context_resp) ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = p_umv_buf->output_size = sizeof(struct ibv_get_context_resp); + p_umv_buf->command = TRUE; + } +err_memory: + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +ib_api_status_t +mlnx_post_open_ca ( + IN const ib_net64_t ca_guid, + IN ib_api_status_t ioctl_status, + OUT ib_ca_handle_t *ph_uvp_ca, + IN ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status = ioctl_status; + mlnx_ual_hobul_t *new_ca; + struct ibv_get_context_resp *p_resp; + struct ibv_context * ibvcontext; + int err; + + UVP_ENTER(UVP_DBG_SHIM); + + p_resp = (struct ibv_get_context_resp *)p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == status) { + /* allocate ibv context */ + ibvcontext = mthca_alloc_context(p_resp); + if (IS_ERR(ibvcontext)) { + err = PTR_ERR(ibvcontext); + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM ,("mthca_alloc_context failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_context; + } + + /* allocate mthca context */ + new_ca = (mlnx_ual_hobul_t *)cl_zalloc( sizeof(mlnx_ual_hobul_t) ); + if( !new_ca ) { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + + /* return results */ + new_ca->ibv_ctx = ibvcontext; + new_ca->p_hca_attr = NULL; + *ph_uvp_ca = (ib_ca_handle_t)new_ca; + } + +err_memory: +err_alloc_context: + if (p_resp) + cl_free( p_resp ); + UVP_EXIT(UVP_DBG_SHIM); + return status; +} + +ib_api_status_t +mlnx_pre_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ) +{ + ib_api_status_t status = IB_SUCCESS; + + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(h_uvp_ca); + + /* + * First time call query_ca - populate our internal cached attributes + * so we can access the GID table. Note that query_ca calls *always* + * get their attributes from the kernel. + */ + if ( !h_uvp_ca->p_hca_attr ) + { + /* + * Assume if user buffer is valid then byte_cnt is valid too + * so we can preallocate ca attr buffer for post ioctl data saving + * + * Note that we squirel the buffer away into the umv_buf and only + * set it into the HCA if the query is successful. + */ + if ( p_ca_attr != NULL ) + { + p_umv_buf->p_inout_buf = cl_zalloc(byte_count); + if ( !p_umv_buf->p_inout_buf ) + { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , + ("Failed to alloc new_ca\n")); + status = IB_INSUFFICIENT_RESOURCES; + return status; + } + } + p_umv_buf->input_size = p_umv_buf->output_size = 0; + } + + UVP_EXIT(UVP_DBG_SHIM); + return status; +} + + +void +mlnx_post_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ) +{ + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(h_uvp_ca); + CL_ASSERT(p_umv_buf); + + if ( ioctl_status == IB_SUCCESS && p_ca_attr && + byte_count && !h_uvp_ca->p_hca_attr ) + { + CL_ASSERT( byte_count >= p_ca_attr->size ); + h_uvp_ca->p_hca_attr = p_umv_buf->p_inout_buf; + ib_copy_ca_attr( h_uvp_ca->p_hca_attr, p_ca_attr ); + } + else if (p_umv_buf->p_inout_buf) + { + cl_free (p_umv_buf->p_inout_buf); + } + + UVP_EXIT(UVP_DBG_SHIM); + return; +} + + +ib_api_status_t +mlnx_pre_modify_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN uint8_t port_num, + IN ib_ca_mod_t ca_mod, + IN const ib_port_attr_mod_t* p_port_attr_mod) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +void +mlnx_post_modify_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); +} + + +ib_api_status_t +mlnx_pre_close_ca ( + IN ib_ca_handle_t h_uvp_ca) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +ib_api_status_t +mlnx_post_close_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status ) +{ + mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void*)h_uvp_ca); + + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_hobul); + + if (IB_SUCCESS == ioctl_status) { + if (p_hobul->ibv_ctx) { + mthca_free_context(p_hobul->ibv_ctx); + p_hobul->ibv_ctx = NULL; + } + + if (p_hobul->p_hca_attr) { + cl_free( p_hobul->p_hca_attr); + p_hobul->p_hca_attr = NULL; + } + + cl_free(p_hobul); + } + + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_cq.c b/branches/Ndi/hw/mthca/user/mlnx_ual_cq.c new file mode 100644 index 00000000..ef704152 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_cq.c @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mt_l2w.h" +#include "mlnx_ual_main.h" +#include "mlnx_uvp.h" +#include "mx_abi.h" + +#if defined(EVENT_TRACING) +#include "mlnx_ual_cq.tmh" +#endif + + +extern uint32_t mlnx_dbg_lvl; + +void +mlnx_get_cq_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + UVP_ENTER(UVP_DBG_DEV); + + CL_ASSERT(p_uvp); + + /* + * Completion Queue Management Verbs + */ + p_uvp->pre_create_cq = mlnx_pre_create_cq; + p_uvp->post_create_cq = mlnx_post_create_cq; + + p_uvp->pre_query_cq = mlnx_pre_query_cq; + p_uvp->post_query_cq = NULL; + + p_uvp->pre_resize_cq = NULL; /* mlnx_pre_resize_cq: not supported in kernel */ + p_uvp->post_resize_cq = NULL; /* mlnx_post_resize_cq:not supported in kernel */ + + p_uvp->pre_destroy_cq = mlnx_pre_destroy_cq; + p_uvp->post_destroy_cq = mlnx_post_destroy_cq; + + UVP_EXIT(UVP_DBG_DEV); +} + +ib_api_status_t + mlnx_pre_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ) +{ + struct ibv_cq *ibv_cq; + ib_api_status_t status = IB_SUCCESS; + size_t size = max( sizeof(struct ibv_create_cq), sizeof(struct ibv_create_cq_resp) ); + mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void *)h_uvp_ca); + struct ibv_create_cq *p_create_cq; + int err; + + UVP_ENTER(UVP_DBG_CQ); + + CL_ASSERT(p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_zalloc( size ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = sizeof(struct ibv_create_cq); + p_umv_buf->output_size = sizeof(struct ibv_create_cq_resp); + p_umv_buf->command = TRUE; + + /* allocate ibv_cq */ + p_create_cq = (struct ibv_create_cq *)p_umv_buf->p_inout_buf; + ibv_cq = p_hobul->ibv_ctx->ops.create_cq_pre(p_hobul->ibv_ctx, p_size, p_create_cq); + if (IS_ERR(ibv_cq)) { + err = PTR_ERR(ibv_cq); + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ , ("mthca_alloc_cq_pre failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_cq; + } + + goto end; + +err_alloc_cq: + cl_free(p_umv_buf->p_inout_buf); +err_memory: +end: + UVP_EXIT(UVP_DBG_CQ); + return status; +} + + +void +mlnx_post_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + OUT ib_cq_handle_t *ph_uvp_cq, + IN ci_umv_buf_t *p_umv_buf ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ibv_create_cq_resp *p_resp; + struct ibv_cq *ibv_cq; + mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void *)h_uvp_ca); + + + UVP_ENTER(UVP_DBG_CQ); + + CL_ASSERT(p_hobul); + CL_ASSERT(p_umv_buf); + p_resp = (struct ibv_create_cq_resp *)p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) { + + /* allocate ibv_cq */ + ibv_cq = p_hobul->ibv_ctx->ops.create_cq_post(p_hobul->ibv_ctx, p_resp); + if (IS_ERR(ibv_cq)) { + err = PTR_ERR(ibv_cq); + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ , ("mthca_create_cq failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_create_cq; + } + + *ph_uvp_cq = (ib_cq_handle_t)ibv_cq; + } + goto end; + + p_hobul->ibv_ctx->ops.destroy_cq(ibv_cq); +err_create_cq: +end: + if (p_resp) + cl_free( p_resp ); + UVP_EXIT(UVP_DBG_CQ); + return; +} + + +ib_api_status_t +mlnx_pre_query_cq ( + IN const ib_cq_handle_t h_uvp_cq, + OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + struct ibv_cq *ibv_cq = (struct ibv_cq *)h_uvp_cq; + + UVP_ENTER(UVP_DBG_CQ); + + *p_size = ibv_cq->cqe; + + UVP_EXIT(UVP_DBG_CQ); + return IB_VERBS_PROCESSING_DONE; +} + + +ib_api_status_t +mlnx_pre_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq) +{ + UVP_ENTER(UVP_DBG_CQ); + UVP_EXIT(UVP_DBG_CQ); + return IB_SUCCESS; +} + +void +mlnx_post_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status) +{ + int err; + struct ibv_cq *ibv_cq = (struct ibv_cq *)h_uvp_cq; + UNREFERENCED_PARAMETER(ioctl_status); + + UVP_ENTER(UVP_DBG_CQ); + + CL_ASSERT(ibv_cq); + + if (IB_SUCCESS == ioctl_status) { + err = ibv_cq->context->ops.destroy_cq( ibv_cq ); + if (err) + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ, ("mthca_destroy_cq failed (%d)\n", err)); + //cl_free (p_cq_info); + } + + UVP_EXIT(UVP_DBG_CQ); +} + diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_data.h b/branches/Ndi/hw/mthca/user/mlnx_ual_data.h new file mode 100644 index 00000000..fa201d97 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_data.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ +#include +#include + +// taken from ib_defs.h +typedef uint32_t IB_wqpn_t; /* Work QP number: Only 24 LSbits */ +typedef uint8_t IB_port_t; +typedef uint8_t IB_gid_t[16]; /* GID (aka IPv6) H-to-L (big) (network) endianess */ +typedef uint32_t IB_ts_t; + +typedef struct _ib_ca +{ + struct ibv_context *ibv_ctx; + ib_ca_attr_t *p_hca_attr; +} mlnx_ual_hobul_t; + + +typedef struct _ib_pd +{ + struct ibv_pd *ibv_pd; + mlnx_ual_hobul_t *p_hobul; +} mlnx_ual_pd_info_t; + +typedef struct _ib_mw +{ + ib_pd_handle_t h_uvp_pd; + uint32_t rkey; +} mlnx_ual_mw_info_t; + diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_main.c b/branches/Ndi/hw/mthca/user/mlnx_ual_main.c new file mode 100644 index 00000000..03d90576 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_main.c @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include "mlnx_ual_main.h" + +#if defined(EVENT_TRACING) +#include "mlnx_ual_main.tmh" +#endif + + +uint32_t mlnx_dbg_lvl = 0; // MLNX_TRACE_LVL_8; + +static void uvp_init(); + +extern BOOL APIENTRY +_DllMainCRTStartupForGS( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ); + + +BOOL APIENTRY +DllMain( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ) +{ + switch( ul_reason_for_call ) + { + case DLL_PROCESS_ATTACH: +#if defined(EVENT_TRACING) +#if DBG + WPP_INIT_TRACING(L"mthcaud.dll"); +#else + WPP_INIT_TRACING(L"mthcau.dll"); +#endif +#endif + if( !_DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ) ) + { + return FALSE; + } + + fill_bit_tbls(); + uvp_init(); + break; + + case DLL_PROCESS_DETACH: + // The calling process is detaching + // the DLL from its address space. + // + // Note that lpvReserved will be NULL if the detach is due to + // a FreeLibrary() call, and non-NULL if the detach is due to + // process cleanup. + // +#if defined(EVENT_TRACING) + WPP_CLEANUP(); +#endif + + default: + return _DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ); + } + return TRUE; +} + + +/* + * UVP Shared Library Init routine +*/ + +static void +uvp_init() +{ + +#if !defined(EVENT_TRACING) +#if DBG +#define ENV_BUFSIZE 16 + TCHAR dbg_lvl_str[ENV_BUFSIZE]; + DWORD i; + + + i = GetEnvironmentVariable( "UVP_DBG_LEVEL", dbg_lvl_str, ENV_BUFSIZE ); + if( i && i <= 16 ) + { + g_mlnx_dbg_level = _tcstoul( dbg_lvl_str, NULL, ENV_BUFSIZE ); + } + + i = GetEnvironmentVariable( "UVP_DBG_FLAGS", dbg_lvl_str, ENV_BUFSIZE ); + if( i && i <= 16 ) + { + g_mlnx_dbg_flags = _tcstoul( dbg_lvl_str, NULL, ENV_BUFSIZE ); + } + + + UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_DEV , + ("Given UVP_DBG debug level:%d debug flags 0x%x\n", + g_mlnx_dbg_level ,g_mlnx_dbg_flags) ); + +#endif +#endif +} + +__declspec(dllexport) ib_api_status_t +uvp_get_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_uvp); + /* + * Version of the header file this interface export can handle + */ + p_uvp->version = 0x100; + p_uvp->guid = 0x12345678; + + /* + * CA Management + */ + mlnx_get_ca_interface (p_uvp); + + /* + * Protection Domain + */ + mlnx_get_pd_interface (p_uvp); + + /* + * SRQ Management Verbs + */ + mlnx_get_srq_interface (p_uvp); + + /* + * QP Management Verbs + */ + mlnx_get_qp_interface (p_uvp); + + /* + * Completion Queue Management Verbs + */ + mlnx_get_cq_interface (p_uvp); + + /* + * AV Management + */ + mlnx_get_av_interface(p_uvp); + + /* + * Memory Region / Window Management Verbs + */ + mlnx_get_mrw_interface (p_uvp); + + /* + * Multicast Support Verbs + */ + mlnx_get_mcast_interface (p_uvp); + + /* + * OS bypass (send, receive, poll/notify cq) + */ + mlnx_get_osbypass_interface(p_uvp); + + + /* + * Local MAD support, for HCA's that do not support + * Agents in the HW. + * ??? Do we need this for user-mode ??? + */ + + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_main.h b/branches/Ndi/hw/mthca/user/mlnx_ual_main.h new file mode 100644 index 00000000..bbc5fdc0 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_main.h @@ -0,0 +1,576 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __UAL_MAIN_H__ +#define __UAL_MAIN_H__ + +#include +#include +#include + +//#include +#include "mlnx_ual_data.h" +#include "mlnx_uvp_debug.h" +#include +#include +//#include + + +#define MAX_WRS_PER_CHAIN 16 +#define MAX_NUM_SGE 32 + +#define MLNX_SGE_SIZE 16 +#define MLNX_UAL_ALLOC_HCA_UL_RES 1 +#define MLNX_UAL_FREE_HCA_UL_RES 2 + +typedef unsigned __int3264 cl_dev_handle_t; + +extern uint32_t mlnx_dbg_lvl; +static inline errno_to_iberr(int err) +{ +#define MAP_ERR(err,ibstatus) case err: ib_status = ibstatus; break + ib_api_status_t ib_status = IB_UNKNOWN_ERROR; + if (err < 0) + err = -err; + switch (err) { + MAP_ERR( ENOENT, IB_NOT_FOUND ); + MAP_ERR( EINTR, IB_INTERRUPTED ); + MAP_ERR( EAGAIN, IB_RESOURCE_BUSY ); + MAP_ERR( ENOMEM, IB_INSUFFICIENT_MEMORY ); + MAP_ERR( EACCES, IB_INVALID_PERMISSION ); + MAP_ERR( EFAULT, IB_ERROR ); + MAP_ERR( EBUSY, IB_RESOURCE_BUSY ); + MAP_ERR( ENODEV, IB_UNSUPPORTED ); + MAP_ERR( EINVAL, IB_INVALID_PARAMETER ); + MAP_ERR( ENOSYS, IB_UNSUPPORTED ); + default: + CL_TRACE (CL_DBG_ERROR, mlnx_dbg_lvl, ("Unmapped errno (%d)\n", err)); + break; + } + return ib_status; +} + + + + +/* + * PROTOTYPES + */ + +/************* CA operations *************************/ +void +mlnx_get_ca_interface ( + IN OUT uvp_interface_t *p_uvp ); + + +ib_api_status_t +mlnx_pre_open_ca ( + IN const ib_net64_t ca_guid, + IN OUT ci_umv_buf_t *p_umv_buf); + + +ib_api_status_t +mlnx_post_open_ca ( + IN const ib_net64_t ca_guid, + IN ib_api_status_t ioctl_status, + OUT ib_ca_handle_t *ph_uvp_ca, + IN ci_umv_buf_t *p_umv_buf ); + + +ib_api_status_t +mlnx_pre_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ); + +void +mlnx_post_query_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_modify_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN uint8_t port_num, + IN ib_ca_mod_t modca_cmd, + IN const ib_port_attr_mod_t* p_port_attr_mod ); + +void +mlnx_post_modify_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status); + +ib_api_status_t +mlnx_pre_close_ca ( + IN ib_ca_handle_t h_uvp_ca ); + +ib_api_status_t +mlnx_post_close_ca ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status ); + + +/************* PD Management *************************/ +void +mlnx_get_pd_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_allocate_pd ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_allocate_pd ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + OUT ib_pd_handle_t *ph_uvp_pd, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_deallocate_pd ( + IN const ib_pd_handle_t h_uvp_pd); + +void +mlnx_post_deallocate_pd ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status ); + + +/************* AV Management *************************/ +void +mlnx_get_av_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_create_av ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf); + + +void +mlnx_post_create_av ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_av_handle_t *ph_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_av ( + IN const ib_av_handle_t h_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlnx_post_query_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ib_av_attr_t *p_addr_vector, + IN OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_modify_av ( + IN const ib_av_handle_t h_uvp_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_modify_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_destroy_av ( + IN const ib_av_handle_t h_uvp_av); + +void +mlnx_post_destroy_av ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status); + + +/************* CQ Management *************************/ +void +mlnx_get_cq_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlnx_post_create_cq ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + OUT ib_cq_handle_t *ph_uvp_cq, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_resize_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ); + +void +mlnx_post_resize_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_cq ( + IN const ib_cq_handle_t h_uvp_cq, + OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq); + +void +mlnx_post_destroy_cq ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status); + +/************* SRQ Management *************************/ +void +mlnx_get_srq_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_create_srq ( + IN const ib_pd_handle_t h_uvp_pd,// Fix me: if needed + IN const ib_srq_attr_t *p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_create_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_modify_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN const ib_srq_attr_mask_t srq_attr_attr, // Fixme + IN const ib_srq_attr_t *p_srq_attr, // Fixme + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_modify_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_srq ( + IN ib_srq_handle_t h_uvp_srq, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_query_srq ( + IN ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status, + IN ib_srq_attr_t *p_query_attr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq); + +void +mlnx_post_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status ); + + +/************* QP Management *************************/ +void +mlnx_get_qp_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_create_qp ( + IN const ib_pd_handle_t h_uvp_pd,// Fix me: if needed + IN const ib_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_qp_handle_t *ph_uvp_qp, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_qp_mod_t *p_modify_attr, // Fixme + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN ib_qp_attr_t *p_query_attr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp); + +void +mlnx_post_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status ); + +/************* MR/MW Management *************************/ +void +mlnx_get_mrw_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_pre_register_mr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_create_t *p_mr_create, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_register_mr ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + OUT const ib_mr_handle_t *ph_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_query_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_query_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN ib_api_status_t ioctl_status, + IN const ib_mr_attr_t *p_mr_query, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_modify_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_pd_handle_t h_uvp_pd OPTIONAL, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t *p_mr_create OPTIONAL, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_modify_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_pd_handle_t h_uvp_pd OPTIONAL, + IN ib_api_status_t ioctl_status, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_register_smr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_access_t access_ctrl, + IN void *p_vaddr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_register_smr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_handle_t h_uvp_mr, + IN ib_api_status_t ioctl_status, + IN const void *p_vaddr, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + OUT const ib_mr_handle_t *ph_uvp_smr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_deregister_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_deregister_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_create_mw ( + IN const ib_pd_handle_t h_uvp_pd, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_create_mw ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN net32_t rkey, + OUT ib_mw_handle_t *ph_uvp_mw, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_query_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_query_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN ib_api_status_t ioctl_status, + IN net32_t rkey, + OUT ib_pd_handle_t *ph_pd, + IN ci_umv_buf_t *p_umv_buf ); + +ib_api_status_t +mlnx_pre_destroy_mw ( + IN const ib_mw_handle_t h_uvp_mw); + // IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_destroy_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN ib_api_status_t ioctl_status); + + +/************* MCAST Management *************************/ +void +mlnx_get_mcast_interface ( + IN OUT uvp_interface_t *p_uvp ); + + +ib_api_status_t +mlnx_pre_attach_mcast ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_gid_t *p_mcast_gid, + IN const uint16_t mcast_lid, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_attach_mcast ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + OUT ib_mcast_handle_t *ph_mcast, + IN OUT ci_umv_buf_t *p_umv_buf); + +ib_api_status_t +mlnx_pre_detach_mcast ( + IN ib_mcast_handle_t h_uvp_mcast, + IN OUT ci_umv_buf_t *p_umv_buf); + +void +mlnx_post_detach_mcast ( + IN ib_mcast_handle_t h_uvp_mcast, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf); + + +/************* OS BYPASS Management *************************/ +void +mlnx_get_osbypass_interface ( + IN OUT uvp_interface_t *p_uvp ); + +ib_api_status_t +mlnx_post_send ( + IN const void* __ptr64 h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t** pp_send_failure ); + +ib_api_status_t +mlnx_post_recv ( + IN const void* __ptr64 h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ); + +ib_api_status_t +mlnx_post_srq_recv ( + IN const void* __ptr64 h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ); + +ib_api_status_t +mlnx_bind_mw ( + IN const ib_mw_handle_t h_uvp_mw, + IN const ib_qp_handle_t h_uvp_qp, + IN ib_bind_wr_t *p_mw_bind, + OUT net32_t* const p_rkey ); + +ib_api_status_t +mlnx_poll_cq ( + IN const void* __ptr64 h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); + +ib_api_status_t +mlnx_enable_cq_notify ( + IN const void* __ptr64 h_cq, + IN const boolean_t solicited ); + +ib_api_status_t +mlnx_enable_ncomp_cq_notify ( + IN const void* __ptr64 h_cq, + IN const uint32_t n_cqes ); + +ib_api_status_t +mlnx_peek_cq ( + IN const void* __ptr64 h_cq, + OUT uint32_t* const p_n_cqes ); + +#endif diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_mcast.c b/branches/Ndi/hw/mthca/user/mlnx_ual_mcast.c new file mode 100644 index 00000000..0b4cdac1 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_mcast.c @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" + +#if defined(EVENT_TRACING) +#include "mlnx_ual_mcast.tmh" +#endif + +void +mlnx_get_mcast_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_uvp); + + /* + * Multicast Support Verbs + */ + p_uvp->pre_attach_mcast = NULL; + p_uvp->post_attach_mcast = NULL; + p_uvp->pre_detach_mcast = NULL; + p_uvp->post_detach_mcast = NULL; + + UVP_EXIT(UVP_DBG_SHIM); +} + + + +ib_api_status_t +mlnx_pre_attach_mcast ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_gid_t *p_mcast_gid, + IN const uint16_t mcast_lid, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + p_umv_buf->command = TRUE; + + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + + +void +mlnx_post_attach_mcast ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + OUT ib_mcast_handle_t *ph_mcast, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); +} + + + +ib_api_status_t +mlnx_pre_detach_mcast ( + IN ib_mcast_handle_t h_uvp_mcast, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +void +mlnx_post_detach_mcast ( + IN ib_mcast_handle_t h_uvp_mcast, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); +} diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_mrw.c b/branches/Ndi/hw/mthca/user/mlnx_ual_mrw.c new file mode 100644 index 00000000..c02f9891 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_mrw.c @@ -0,0 +1,229 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mlnx_ual_main.h" +#if defined(EVENT_TRACING) +#include "mlnx_ual_mrw.tmh" +#endif + +void +mlnx_get_mrw_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_uvp); + + /* + * Memory Management Verbs + */ +// p_uvp->pre_register_mr = NULL; +// p_uvp->post_register_mr = NULL; +// p_uvp->pre_query_mr = NULL; +// p_uvp->post_query_mr = NULL; +// p_uvp->pre_deregister_mr = NULL; +// p_uvp->post_deregister_mr = NULL; +// p_uvp->pre_modify_mr = NULL; +// p_uvp->post_modify_mr = NULL; +// p_uvp->pre_register_smr = NULL; +// p_uvp->post_register_smr = NULL; + + /* + * Memory Window Verbs + */ + p_uvp->pre_create_mw = NULL; // mlnx_pre_create_mw + p_uvp->post_create_mw = NULL; // mlnx_post_create_mw + p_uvp->pre_query_mw = NULL; // mlnx_pre_query_mw + p_uvp->post_query_mw = NULL; // mlnx_post_query_mw + p_uvp->pre_destroy_mw = NULL; // mlnx_pre_destroy_mw + p_uvp->post_destroy_mw = NULL; // mlnx_post_destroy_mw + + /* register_pmr is not supported in user-mode */ + + UVP_EXIT(UVP_DBG_SHIM); +} + + + +ib_api_status_t +mlnx_pre_register_mr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_create_t *p_mr_create, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +void +mlnx_post_register_mr ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + OUT const ib_mr_handle_t *ph_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); + return; +} + + +ib_api_status_t +mlnx_pre_query_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +void +mlnx_post_query_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN ib_api_status_t ioctl_status, + IN const ib_mr_attr_t *p_mr_query, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); + return; +} + + +ib_api_status_t +mlnx_pre_modify_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_pd_handle_t h_uvp_pd OPTIONAL, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t *p_mr_create OPTIONAL, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +void +mlnx_post_modify_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_pd_handle_t h_uvp_pd OPTIONAL, + IN ib_api_status_t ioctl_status, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); + return; +} + + +ib_api_status_t +mlnx_pre_register_smr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_handle_t h_uvp_mr, + IN const ib_access_t access_ctrl, + IN void *p_vaddr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + CL_ASSERT(p_umv_buf); + p_umv_buf->p_inout_buf = NULL;; + p_umv_buf->input_size = 0; + p_umv_buf->output_size = 0; + + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +void +mlnx_post_register_smr ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_mr_handle_t h_uvp_mr, + IN ib_api_status_t ioctl_status, + IN const void *p_vaddr, + IN const uint32_t *p_lkey, + IN const uint32_t *p_rkey, + OUT const ib_mr_handle_t *ph_uvp_smr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); + return; +} + + +ib_api_status_t +mlnx_pre_deregister_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +void +mlnx_post_deregister_mr ( + IN const ib_mr_handle_t h_uvp_mr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UVP_ENTER(UVP_DBG_SHIM); + UVP_EXIT(UVP_DBG_SHIM); + return; +} + + diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_osbypass.c b/branches/Ndi/hw/mthca/user/mlnx_ual_osbypass.c new file mode 100644 index 00000000..d216922d --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_osbypass.c @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mt_l2w.h" +#include "mlnx_uvp.h" +#include "mx_abi.h" + +#include "mlnx_ual_main.h" +#if defined(EVENT_TRACING) +#include "mlnx_ual_osbypass.tmh" +#endif + + +void +mlnx_get_osbypass_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + + CL_ASSERT(p_uvp); + + /* + * Work Request Processing Verbs + * Should the types be same as Verbs? + */ + p_uvp->post_send = mlnx_post_send; + p_uvp->post_recv = mlnx_post_recv; + p_uvp->post_srq_recv = mlnx_post_srq_recv; + + /* + * Completion Processing and + * Completion Notification Request Verbs. + * Should the types be same as Verbs? + */ + p_uvp->poll_cq = mlnx_poll_cq; + p_uvp->rearm_cq = mlnx_enable_cq_notify; + p_uvp->rearm_n_cq = NULL; /* mlnx_enable_ncomp_cq_notify: Not implemented */; + p_uvp->peek_cq = NULL; /* mlnx_peek_cq: Not implemented */ + + /* Memory window bind */ + p_uvp->bind_mw = NULL; /* mlnx_bind_mw: Not implemented */ +} + + +ib_api_status_t +mlnx_post_send ( + IN const void* __ptr64 h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t** pp_send_failure ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct mthca_qp *qp = (struct mthca_qp *) ((void*)h_qp); + + UVP_ENTER(UVP_DBG_QP); + + CL_ASSERT (qp); + + CL_ASSERT( p_send_wr ); + + err = qp->ibv_qp.context->ops.post_send(&qp->ibv_qp, p_send_wr, pp_send_failure ); + + if (err) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP , ("mthca_post_send failed (%d)\n", err)); + if (err == -ENOMEM) + status = IB_INSUFFICIENT_RESOURCES; + else if (err == -EINVAL) + status = IB_INVALID_WR_TYPE; + else if (err == -ERANGE) + status = IB_INVALID_MAX_SGE; + else if (err == -EBUSY) + status = IB_INVALID_QP_STATE; + else + status = errno_to_iberr(err); + } + + UVP_EXIT(UVP_DBG_QP); + return status; +} + +ib_api_status_t +mlnx_post_recv ( + IN const void* __ptr64 h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct mthca_qp *qp = (struct mthca_qp *) ((void*)h_qp); + + UVP_ENTER(UVP_DBG_QP); + + CL_ASSERT (qp); + + CL_ASSERT( p_recv_wr ); + + err = qp->ibv_qp.context->ops.post_recv(&qp->ibv_qp, p_recv_wr, pp_recv_failure ); + + if (err) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP, ("mthca_post_recv failed (%d)\n", err)); + if (err == -ENOMEM) + status = IB_INSUFFICIENT_RESOURCES; + else if (err == -EINVAL) + status = IB_INVALID_WR_TYPE; + else if (err == -ERANGE) + status = IB_INVALID_MAX_SGE; + else if (err == -EBUSY) + status = IB_INVALID_QP_STATE; + else + status = errno_to_iberr(err); + } + + UVP_EXIT(UVP_DBG_QP); + return status; +} + + +ib_api_status_t +mlnx_post_srq_recv ( + IN const void* __ptr64 h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_srq); + + UVP_ENTER(UVP_DBG_QP); + + CL_ASSERT (srq); + + CL_ASSERT( p_recv_wr ); + + err = srq->ibv_srq.context->ops.post_srq_recv(&srq->ibv_srq, p_recv_wr, pp_recv_failure ); + if (err) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP, ("mthca_post_recv failed (%d)\n", err)); + if (err == -ENOMEM) + status = IB_INSUFFICIENT_RESOURCES; + else if (err == -EINVAL) + status = IB_INVALID_WR_TYPE; + else if (err == -ERANGE) + status = IB_INVALID_MAX_SGE; + else if (err == -EBUSY) + status = IB_INVALID_QP_STATE; + else + status = errno_to_iberr(err); + } + + UVP_EXIT(UVP_DBG_QP); + return status; +} + + +ib_api_status_t +mlnx_poll_cq ( + IN const void* __ptr64 h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct mthca_cq *cq = (struct mthca_cq *) ((void*)h_cq); + + UVP_ENTER(UVP_DBG_CQ); + CL_ASSERT (cq); + + if (!pp_free_wclist || !*pp_free_wclist || !pp_done_wclist) + { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ ,("Passed in bad params\n")); + status = IB_INVALID_PARAMETER; + goto err_invalid_params; + } + + err = cq->ibv_cq.context->ops.poll_cq_list(&cq->ibv_cq, pp_free_wclist, pp_done_wclist ); + if (err) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ , ("mthca_poll_cq failed (%d)\n", err)); + status = errno_to_iberr(err); + }else if (!*pp_done_wclist) + status = IB_NOT_FOUND; + + +err_invalid_params: + + if (status != IB_NOT_FOUND){ + UVP_PRINT_EXIT(TRACE_LEVEL_ERROR ,UVP_DBG_CQ ,("completes with ERROR status %s\n", ib_get_err_str(status))); + }else + UVP_EXIT(UVP_DBG_CQ); + + return status; +} + + +ib_api_status_t +mlnx_enable_cq_notify ( + IN const void* __ptr64 h_cq, + IN const boolean_t solicited ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct mthca_cq *cq = (struct mthca_cq *) ((void*)h_cq); + + UVP_ENTER(UVP_DBG_CQ); + CL_ASSERT (cq); + + err = cq->ibv_cq.context->ops.req_notify_cq(&cq->ibv_cq, (solicited) ? IB_CQ_SOLICITED : IB_CQ_NEXT_COMP ); + if (err) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_enable_cq_notify failed (%d)\n", err)); + status = errno_to_iberr(err); + goto exit; + } + +exit: + UVP_EXIT(UVP_DBG_CQ); + return status; +} + + +ib_api_status_t +mlnx_enable_ncomp_cq_notify ( + IN const void* __ptr64 h_cq, + IN const uint32_t n_cqes ) +{ + // Not yet implemented + ib_api_status_t status = IB_UNSUPPORTED; + UVP_ENTER(UVP_DBG_SHIM); + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mlnx_enable_ncomp_cq_notify is not implemented yet\n")); + UVP_EXIT(UVP_DBG_SHIM); + return status; +} + diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_pd.c b/branches/Ndi/hw/mthca/user/mlnx_ual_pd.c new file mode 100644 index 00000000..bf8a5489 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_pd.c @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "mt_l2w.h" +#include "mlnx_ual_main.h" +#include "mlnx_uvp.h" +#include "mx_abi.h" + +#if defined(EVENT_TRACING) +#include "mlnx_ual_pd.tmh" +#endif + +void +mlnx_get_pd_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_uvp); + + /* + * Protection Domain + */ + p_uvp->pre_allocate_pd = mlnx_pre_allocate_pd; + p_uvp->post_allocate_pd = mlnx_post_allocate_pd; + p_uvp->pre_deallocate_pd = mlnx_pre_deallocate_pd; + p_uvp->post_deallocate_pd = mlnx_post_deallocate_pd; + + UVP_EXIT(UVP_DBG_SHIM); +} + +ib_api_status_t +mlnx_pre_allocate_pd ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status = IB_SUCCESS; + + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_alloc_pd_resp) ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = p_umv_buf->output_size = sizeof(struct ibv_alloc_pd_resp); + p_umv_buf->command = TRUE; + +err_memory: + UVP_EXIT(UVP_DBG_SHIM); + return status; +} + + +void +mlnx_post_allocate_pd ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + OUT ib_pd_handle_t *ph_uvp_pd, + IN ci_umv_buf_t *p_umv_buf ) +{ + int err; + ib_api_status_t status = IB_SUCCESS; + struct ibv_alloc_pd_resp *p_resp; + struct ibv_pd *ibv_pd; + mlnx_ual_hobul_t *p_hobul = (mlnx_ual_hobul_t *)((void *)h_uvp_ca); + mlnx_ual_pd_info_t *p_new_pd; + + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_hobul); + CL_ASSERT(p_umv_buf); + p_resp = (struct ibv_alloc_pd_resp *)p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) { + + /* allocate ibv_pd */ + ibv_pd = p_hobul->ibv_ctx->ops.alloc_pd(p_hobul->ibv_ctx, p_resp); + if (IS_ERR(ibv_pd)) { + err = PTR_ERR(ibv_pd); + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_alloc_pd failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_alloc_pd; + } + + /* allocate pd */ + p_new_pd = (mlnx_ual_pd_info_t *)cl_zalloc( sizeof(mlnx_ual_pd_info_t) ); + if( !p_new_pd ) { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + + /* return results */ + p_new_pd->ibv_pd = ibv_pd; + p_new_pd->p_hobul = p_hobul; + *ph_uvp_pd = (ib_pd_handle_t)p_new_pd; + } + goto end; + +err_memory: + p_hobul->ibv_ctx->ops.dealloc_pd(ibv_pd); +err_alloc_pd: +end: + if (p_resp) + cl_free( p_resp ); + UVP_EXIT(UVP_DBG_SHIM); + return; +} + + +ib_api_status_t +mlnx_pre_deallocate_pd ( + IN const ib_pd_handle_t h_uvp_pd) +{ + mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void *)h_uvp_pd); + UVP_ENTER(UVP_DBG_SHIM); + CL_ASSERT(p_pd_info); + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +void +mlnx_post_deallocate_pd ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status ) +{ + int err; + mlnx_ual_pd_info_t *p_pd_info = (mlnx_ual_pd_info_t *)((void *)h_uvp_pd); + + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_pd_info || p_pd_info->ibv_pd); + + if (IB_SUCCESS == ioctl_status) { + err = p_pd_info->p_hobul->ibv_ctx->ops.dealloc_pd( p_pd_info->ibv_pd ); + if (err) + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_alloc_pd failed (%d)\n", err)); + + cl_free (p_pd_info); + } + UVP_EXIT(UVP_DBG_SHIM); +} diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_qp.c b/branches/Ndi/hw/mthca/user/mlnx_ual_qp.c new file mode 100644 index 00000000..6581a44c --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_qp.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mt_l2w.h" +#include "mlnx_uvp.h" +#include "mx_abi.h" + +#include "mlnx_ual_main.h" +#if defined(EVENT_TRACING) +#include "mlnx_ual_qp.tmh" +#endif + + +void +mlnx_get_qp_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_uvp); + + /* + * QP Management Verbs + */ + p_uvp->pre_create_qp = mlnx_pre_create_qp; + p_uvp->post_create_qp = mlnx_post_create_qp; + + // !!! none for create_spl_qp, UAL will return error !!! + + p_uvp->pre_modify_qp = mlnx_pre_modify_qp; + p_uvp->post_modify_qp = mlnx_post_modify_qp; + p_uvp->pre_query_qp = NULL; + p_uvp->post_query_qp = mlnx_post_query_qp; + p_uvp->pre_destroy_qp = mlnx_pre_destroy_qp; + p_uvp->post_destroy_qp = mlnx_post_destroy_qp; + + UVP_EXIT(UVP_DBG_SHIM); +} + +ib_api_status_t + mlnx_pre_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + int err; + struct ibv_qp *ibv_qp; + struct ibv_qp_init_attr attr; + struct ibv_create_qp *p_create_qp; + ib_api_status_t status = IB_SUCCESS; + size_t size = max( sizeof(struct ibv_create_qp), sizeof(struct ibv_create_qp_resp) ); + struct ibv_pd *ibv_pd = h_uvp_pd->ibv_pd; + + UVP_ENTER(UVP_DBG_QP); + + CL_ASSERT(p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_zalloc( size ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = sizeof(struct ibv_create_qp); + p_umv_buf->output_size = sizeof(struct ibv_create_qp_resp); + p_umv_buf->command = TRUE; + + /* convert attributes */ + attr.send_cq = (struct ibv_cq *)p_create_attr->h_sq_cq; + attr.recv_cq = (struct ibv_cq *)p_create_attr->h_rq_cq; + attr.srq = (struct ibv_srq*)p_create_attr->h_srq; + attr.cap.max_send_wr = p_create_attr->sq_depth; + attr.cap.max_recv_wr = p_create_attr->rq_depth; + attr.cap.max_send_sge = p_create_attr->sq_sge; + attr.cap.max_recv_sge = p_create_attr->rq_sge; + attr.cap.max_inline_data = 0; /* absent in IBAL */ + attr.qp_type = p_create_attr->qp_type; + attr.sq_sig_all = p_create_attr->sq_signaled; + + /* allocate ibv_qp */ + p_create_qp = (struct ibv_create_qp *)p_umv_buf->p_inout_buf; + ibv_qp = ibv_pd->context->ops.create_qp_pre(ibv_pd, &attr, p_create_qp); + if (IS_ERR(ibv_qp)) { + err = PTR_ERR(ibv_qp); + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_create_qp_pre failed (%d)\n", err)); + if(err == -ENOMEM && (attr.cap.max_send_sge == 0 ||attr.cap.max_recv_sge == 0|| + attr.cap.max_send_wr == 0 || attr.cap.max_recv_wr == 0)) + status = IB_INVALID_SETTING; + else + status = errno_to_iberr(err); + goto err_alloc_qp; + } + + goto end; + +err_alloc_qp: + cl_free(p_umv_buf->p_inout_buf); +err_memory: +end: + UVP_EXIT(UVP_DBG_QP); + return status; +} + +void +mlnx_post_create_qp ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_qp_handle_t *ph_uvp_qp, + IN ci_umv_buf_t *p_umv_buf ) +{ + int err; + struct ibv_qp *ibv_qp; + struct ibv_create_qp_resp *p_resp; + struct ibv_create_qp *p_create_qp; + ib_api_status_t status = IB_SUCCESS; + struct ibv_pd *ibv_pd = h_uvp_pd->ibv_pd; + + UVP_ENTER(UVP_DBG_QP); + + + CL_ASSERT(p_umv_buf); + p_resp = (struct ibv_create_qp_resp *)p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) { + + /* allocate ibv_qp */ + ibv_qp = ibv_pd->context->ops.create_qp_post(ibv_pd, p_resp); + if (IS_ERR(ibv_qp)) { + err = PTR_ERR(ibv_qp); + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP , ("mthca_create_qp_post failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_create_cq; + } + + *ph_uvp_qp = (ib_qp_handle_t)ibv_qp; + } + goto end; + + ibv_pd->context->ops.destroy_qp(ibv_qp); +err_create_cq: +end: + if (p_resp) + cl_free( p_resp ); + UVP_EXIT(UVP_DBG_QP); + return; +} + +ib_api_status_t +mlnx_pre_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_qp_mod_t *p_modify_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + ib_api_status_t status = IB_SUCCESS; + UNREFERENCED_PARAMETER(h_uvp_qp); + UNREFERENCED_PARAMETER(p_modify_attr); + + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(p_umv_buf); + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_zalloc( sizeof(struct ibv_modify_qp_resp) ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = 0; + p_umv_buf->output_size = sizeof(struct ibv_modify_qp_resp); + p_umv_buf->command = TRUE; + +err_memory: + UVP_EXIT(UVP_DBG_SHIM); + return status; +} + + +void +mlnx_post_modify_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + int err; + ib_api_status_t status; + struct ibv_modify_qp_resp *p_resp; + struct ibv_qp_attr attr; + struct ibv_qp *ibv_qp = (struct ibv_qp *)h_uvp_qp; + + UVP_ENTER(UVP_DBG_SHIM); + CL_ASSERT(p_umv_buf); + + p_resp = (struct ibv_modify_qp_resp *)p_umv_buf->p_inout_buf; + + if (IB_SUCCESS == ioctl_status) + { + memset( &attr, 0, sizeof(attr)); + attr.qp_state = p_resp->qp_state; + if (ibv_qp) { + err = ibv_qp->context->ops.modify_qp( ibv_qp, + &attr, p_resp->attr_mask); + if (err) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_modify_qp failed (%d)\n", err)); + status = errno_to_iberr(err); + goto err_modify_qp; + } + } + UVP_PRINT(TRACE_LEVEL_INFORMATION ,UVP_DBG_SHIM , + ("Committed to modify QP to state %d\n", p_resp->qp_state)); + } + + +err_modify_qp: + if (p_resp) + cl_free (p_resp); + UVP_EXIT(UVP_DBG_SHIM); + return; + } + + +ib_api_status_t +mlnx_pre_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + UNREFERENCED_PARAMETER(h_uvp_qp); + UVP_ENTER(UVP_DBG_SHIM); + p_umv_buf->input_size = p_umv_buf->output_size = 0; + p_umv_buf->command = FALSE; + p_umv_buf->status = IB_SUCCESS; + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + + +void +mlnx_post_query_qp ( + IN ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN OUT ib_qp_attr_t *p_query_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + struct mthca_qp *p_mthca_qp = (struct mthca_qp *)h_uvp_qp; + UVP_ENTER(UVP_DBG_SHIM); + + UNREFERENCED_PARAMETER(p_umv_buf); + if(IB_SUCCESS == ioctl_status) + { + p_query_attr->sq_max_inline = p_mthca_qp->max_inline_data; + p_query_attr->sq_sge = p_mthca_qp->sq.max_gs; + p_query_attr->sq_depth = p_mthca_qp->sq.max; + p_query_attr->rq_sge = p_mthca_qp->rq.max_gs; + p_query_attr->rq_depth = p_mthca_qp->rq.max; + } + UVP_EXIT(UVP_DBG_SHIM); +} + + +ib_api_status_t +mlnx_pre_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp) +{ + int err; + + + UVP_ENTER(UVP_DBG_SHIM); + + mthca_destroy_qp_pre((struct ibv_qp*)h_uvp_qp); + + UVP_EXIT(UVP_DBG_SHIM); + return IB_SUCCESS; +} + +void +mlnx_post_destroy_qp ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status) +{ + int err; + + UVP_ENTER(UVP_DBG_SHIM); + + CL_ASSERT(h_uvp_qp); + + mthca_destroy_qp_post((struct ibv_qp*)h_uvp_qp, (int)ioctl_status); + if (ioctl_status != IB_SUCCESS) + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_SHIM , ("mthca_destroy_qp_post failed (%d)\n", ioctl_status)); + + UVP_EXIT(UVP_DBG_SHIM); + return; +} + diff --git a/branches/Ndi/hw/mthca/user/mlnx_ual_srq.c b/branches/Ndi/hw/mthca/user/mlnx_ual_srq.c new file mode 100644 index 00000000..196da791 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_ual_srq.c @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: mlnx_ual_srq.c 1611 2006-08-20 14:48:55Z leonid $ + */ + +#include "mt_l2w.h" +#include "mlnx_ual_main.h" +#include "mlnx_uvp.h" +#include "mx_abi.h" + +#if defined(EVENT_TRACING) +#include "mlnx_ual_srq.tmh" +#endif + + +extern uint32_t mlnx_dbg_lvl; + +void +mlnx_get_srq_interface ( + IN OUT uvp_interface_t *p_uvp ) +{ + UVP_ENTER(UVP_DBG_DEV); + + CL_ASSERT(p_uvp); + + /* + * Completion Queue Management Verbs + */ + p_uvp->pre_create_srq = mlnx_pre_create_srq; + p_uvp->post_create_srq = mlnx_post_create_srq; + + p_uvp->pre_query_srq = NULL; /* mlnx_pre_query_srq; */ + p_uvp->post_query_srq = NULL; /*mlnx_post_query_srq;*/ + + p_uvp->pre_modify_srq = NULL; /* mlnx_modify_srq;*/ + p_uvp->post_modify_srq = NULL; /*mlnx_post_modify_srq;*/ + + p_uvp->pre_destroy_srq = NULL; /* mlnx_pre_destroy_srq; */ + p_uvp->post_destroy_srq = mlnx_post_destroy_srq; + + UVP_EXIT(UVP_DBG_DEV); +} + +static void __free_srq(struct mthca_srq *srq) +{ + /* srq may be NULL, when ioctl returned with some kind of error, e.g. IB_INVALID_PARAM */ + if (!srq) + return; + + if (mthca_is_memfree(srq->ibv_srq.context)) { + mthca_free_db(to_mctx(srq->ibv_srq.context)->db_tab, MTHCA_DB_TYPE_SRQ, + srq->db_index); + } + + if (srq->buf) { +#ifdef NOT_USE_VIRTUAL_ALLOC + cl_free(srq->buf); +#else + VirtualFree( srq->buf, 0, MEM_RELEASE); +#endif + } + + if (srq->wrid) + cl_free(srq->wrid); + + cl_spinlock_destroy(&srq->lock); + cl_free (srq); +} + +ib_api_status_t +mlnx_pre_create_srq ( + IN const ib_pd_handle_t h_uvp_pd,// Fix me: if needed + IN const ib_srq_attr_t *p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf) +{ + struct mthca_srq *srq; + ib_api_status_t status = IB_SUCCESS; + size_t size = max( sizeof(struct ibv_create_srq), sizeof(struct ibv_create_srq_resp) ); + mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd; + struct ibv_pd *ibv_pd = p_pd->ibv_pd; + struct ibv_create_srq *p_create_srq; + int err; + + UVP_ENTER(UVP_DBG_SRQ); + + CL_ASSERT(p_umv_buf); + + /* Sanity check SRQ size before proceeding */ + if (p_srq_attr->max_wr > 1 << 16 || p_srq_attr->max_sge > 64) + { + status = IB_INVALID_PARAMETER; + goto err_params; + } + + if( !p_umv_buf->p_inout_buf ) + { + p_umv_buf->p_inout_buf = cl_zalloc( size ); + if( !p_umv_buf->p_inout_buf ) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_memory; + } + } + p_umv_buf->input_size = sizeof(struct ibv_create_srq); + p_umv_buf->output_size = sizeof(struct ibv_create_srq_resp); + p_umv_buf->command = TRUE; + + /* allocate srq */ + srq = cl_zalloc(sizeof *srq); + if (!srq) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_srq; + } + + /* init fields */ + cl_spinlock_construct(&srq->lock); + if (cl_spinlock_init(&srq->lock)) + goto err_lock; + + srq->ibv_srq.pd = ibv_pd; + srq->ibv_srq.context = ibv_pd->context; + srq->max = align_queue_size(ibv_pd->context, p_srq_attr->max_wr, 1); + srq->max_gs = p_srq_attr->max_sge; + srq->counter = 0; + + if (mthca_alloc_srq_buf(ibv_pd, (void*)p_srq_attr, srq)) + { + status = IB_INSUFFICIENT_MEMORY; + goto err_alloc_buf; + } + + // fill the parameters for ioctl + p_create_srq = (struct ibv_create_srq *)p_umv_buf->p_inout_buf; + p_create_srq->user_handle = (uint64_t)(ULONG_PTR)srq; + p_create_srq->mr.start = (uint64_t)(ULONG_PTR)srq->buf; + p_create_srq->mr.length = srq->buf_size; + p_create_srq->mr.hca_va = 0; + p_create_srq->mr.pd_handle = p_pd->ibv_pd->handle; + p_create_srq->mr.pdn = to_mpd(p_pd->ibv_pd)->pdn; + p_create_srq->mr.access_flags = 0; //local read + + if (mthca_is_memfree(ibv_pd->context)) { + srq->db_index = mthca_alloc_db(to_mctx(ibv_pd->context)->db_tab, + MTHCA_DB_TYPE_SRQ, &srq->db); + if (srq->db_index < 0) + goto err_alloc_db; + + p_create_srq->db_page = db_align(srq->db); + p_create_srq->db_index = srq->db_index; + } + + status = IB_SUCCESS; + goto end; + +err_alloc_db: +#ifdef NOT_USE_VIRTUAL_ALLOC + cl_free(srq->buf); +#else + VirtualFree( srq->buf, 0, MEM_RELEASE); +#endif + cl_free(srq->wrid); +err_alloc_buf: + cl_spinlock_destroy(&srq->lock); +err_lock: + cl_free(srq); +err_alloc_srq: + cl_free(p_umv_buf->p_inout_buf); +err_memory: +err_params: +end: + UVP_EXIT(UVP_DBG_SRQ); + return status; +} + + +void +mlnx_post_create_srq ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ) +{ + int err; + struct mthca_srq *srq; + struct ibv_create_srq_resp *p_resp; + mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)h_uvp_pd; + struct ibv_pd *ibv_pd = p_pd->ibv_pd; + ib_api_status_t status = IB_SUCCESS; + + UVP_ENTER(UVP_DBG_SRQ); + + CL_ASSERT(p_umv_buf); + p_resp = (struct ibv_create_srq_resp *)p_umv_buf->p_inout_buf; + srq = (struct mthca_srq *)(ULONG_PTR)p_resp->user_handle; + + if (IB_SUCCESS == ioctl_status) { + + /* complete filling SRQ object */ + srq->ibv_srq.handle = p_resp->srq_handle; + srq->srqn = p_resp->srqn; + srq->max = p_resp->max_wr; + srq->max_gs = p_resp->max_sge; + srq->mr.handle = p_resp->mr.mr_handle; + srq->mr.lkey = p_resp->mr.lkey; + srq->mr.rkey = p_resp->mr.rkey; + srq->mr.pd = ibv_pd; + srq->mr.context = ibv_pd->context; + + if (mthca_is_memfree(ibv_pd->context)) + mthca_set_db_qn(srq->db, MTHCA_DB_TYPE_SRQ, srq->srqn); + + *ph_uvp_srq = (ib_srq_handle_t)srq; + } + else + __free_srq(srq); + + if (p_resp) + cl_free( p_resp ); + UVP_EXIT(UVP_DBG_SRQ); + return; +} + +void +mlnx_post_destroy_srq ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status) +{ + int err; + struct mthca_srq *srq = (struct mthca_srq *) ((void*)h_uvp_srq); + + UVP_ENTER(UVP_DBG_CQ); + + CL_ASSERT(srq); + + if (IB_SUCCESS == ioctl_status) + __free_srq(srq); + + UVP_EXIT(UVP_DBG_CQ); +} + + diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp.c b/branches/Ndi/hw/mthca/user/mlnx_uvp.c new file mode 100644 index 00000000..2e36bb62 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp.c @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mt_l2w.h" +#include "mlnx_uvp.h" + +#if defined(EVENT_TRACING) +#include "mlnx_uvp.tmh" +#endif + +#include "mx_abi.h" + +size_t g_page_size = 0; + +#ifndef PCI_VENDOR_ID_MELLANOX +#define PCI_VENDOR_ID_MELLANOX 0x15b3 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR +#define PCI_DEVICE_ID_MELLANOX_TAVOR 0x5a44 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT +#define PCI_DEVICE_ID_MELLANOX_ARBEL_COMPAT 0x6278 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL +#define PCI_DEVICE_ID_MELLANOX_ARBEL 0x6282 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD 0x5e8c +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI +#define PCI_DEVICE_ID_MELLANOX_SINAI 0x6274 +#endif + +#ifndef PCI_VENDOR_ID_TOPSPIN +#define PCI_VENDOR_ID_TOPSPIN 0x1867 +#endif + +/* live fishes */ +#ifndef PCI_DEVICE_ID_MELLANOX_TAVOR_BD +#define PCI_DEVICE_ID_MELLANOX_TAVOR_BD 0x5a45 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_ARBEL_BD +#define PCI_DEVICE_ID_MELLANOX_ARBEL_BD 0x6279 +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD +#define PCI_DEVICE_ID_MELLANOX_SINAI_OLD_BD 0x5e8d +#endif + +#ifndef PCI_DEVICE_ID_MELLANOX_SINAI_BD +#define PCI_DEVICE_ID_MELLANOX_SINAI_BD 0x6275 +#endif + + +#define HCA(v, d, t) \ + { PCI_VENDOR_ID_##v, PCI_DEVICE_ID_MELLANOX_##d, MTHCA_##t } + +static struct pci_device_id { + unsigned vendor; + unsigned device; + enum mthca_hca_type type; +} mthca_pci_table[] = { + HCA( MELLANOX, TAVOR, TAVOR), + HCA( MELLANOX, ARBEL_COMPAT, TAVOR), + HCA( MELLANOX, ARBEL, ARBEL), + HCA( MELLANOX, SINAI_OLD, ARBEL), + HCA( MELLANOX, SINAI, ARBEL), + HCA( TOPSPIN, TAVOR, TAVOR), + HCA( TOPSPIN, ARBEL_COMPAT, TAVOR), + HCA( TOPSPIN, ARBEL, ARBEL), + HCA( TOPSPIN, SINAI_OLD, ARBEL), + HCA( TOPSPIN, SINAI, ARBEL), + // live fishes + HCA(MELLANOX, TAVOR_BD, LIVEFISH), + HCA(MELLANOX, ARBEL_BD, LIVEFISH), + HCA(MELLANOX, SINAI_OLD_BD, LIVEFISH), + HCA(MELLANOX, SINAI_BD, LIVEFISH), + HCA(TOPSPIN, TAVOR_BD, LIVEFISH), + HCA(TOPSPIN, ARBEL_BD, LIVEFISH), + HCA(TOPSPIN, SINAI_OLD_BD, LIVEFISH), + HCA(TOPSPIN, SINAI_BD, LIVEFISH), +}; + +static struct ibv_context_ops mthca_ctx_ops = { + NULL, // mthca_query_device, + NULL, // mthca_query_port, + mthca_alloc_pd, + mthca_free_pd, + NULL, // mthca_reg_mr, + NULL, // mthca_dereg_mr, + mthca_create_cq_pre, + mthca_create_cq_post, + mthca_poll_cq, + mthca_poll_cq_list, + NULL, /* req_notify_cq */ + mthca_destroy_cq, + NULL, // mthca_create_srq, + NULL, // mthca_modify_srq, + NULL, // mthca_destroy_srq, + NULL, /* post_srq_recv */ + mthca_create_qp_pre, + mthca_create_qp_post, + mthca_modify_qp, + NULL, + NULL, /* post_send */ + NULL, /* post_recv */ + mthca_attach_mcast, + mthca_detach_mcast +}; + +struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p) +{ + struct mthca_context * context; + struct ibv_alloc_pd_resp pd_resp; + int i; + + /* allocate context */ + context = cl_zalloc(sizeof *context); + if (!context) + return NULL; + + /* find page size */ + if (!g_page_size) { + SYSTEM_INFO sys_info; + GetSystemInfo(&sys_info); + g_page_size = sys_info.dwPageSize; + } + + /* calculate device type */ + for (i = 0; i < sizeof mthca_pci_table / sizeof mthca_pci_table[0]; ++i) + if (resp_p->vend_id == mthca_pci_table[i].vendor && + resp_p->dev_id == mthca_pci_table[i].device) + goto found; + goto err_dev_type; + +found: + context->hca_type = mthca_pci_table[i].type; + context->uar = (void*)(UINT_PTR)resp_p->uar_addr; + context->num_qps = resp_p->qp_tab_size; + context->qp_table_shift = ffs(context->num_qps) - 1 - MTHCA_QP_TABLE_BITS; + context->qp_table_mask = (1 << context->qp_table_shift) - 1; + + if (mthca_is_memfree(&context->ibv_ctx)) { + context->db_tab = mthca_alloc_db_tab(resp_p->uarc_size); + if (!context->db_tab) + goto err_alloc_db_tab; + } else + context->db_tab = NULL; + + context->qp_table_mutex = CreateMutex( NULL, FALSE, NULL ); + if (!context->qp_table_mutex) + goto err_mutex; + for (i = 0; i < MTHCA_QP_TABLE_SIZE; ++i) + context->qp_table[i].refcnt = 0; + + cl_spinlock_construct(&context->uar_lock); + if (cl_spinlock_init(&context->uar_lock)) + goto err_spinlock; + + pd_resp.pd_handle = resp_p->pd_handle; + pd_resp.pdn = resp_p->pdn; + context->pd = mthca_alloc_pd(&context->ibv_ctx, &pd_resp); + if (!context->pd) + goto err_unmap; + + context->ibv_ctx.ops = mthca_ctx_ops; + + if (mthca_is_memfree(&context->ibv_ctx)) { + context->ibv_ctx.ops.req_notify_cq = mthca_arbel_arm_cq; + context->ibv_ctx.ops.post_send = mthca_arbel_post_send; + context->ibv_ctx.ops.post_recv = mthca_arbel_post_recv; + context->ibv_ctx.ops.post_srq_recv = mthca_arbel_post_srq_recv; + } else { + context->ibv_ctx.ops.req_notify_cq = mthca_tavor_arm_cq; + context->ibv_ctx.ops.post_send = mthca_tavor_post_send; + context->ibv_ctx.ops.post_recv = mthca_tavor_post_recv; + context->ibv_ctx.ops.post_srq_recv = mthca_tavor_post_srq_recv; + } + + return &context->ibv_ctx; + +err_unmap: +err_spinlock: +err_mutex: + mthca_free_db_tab(context->db_tab); + +err_alloc_db_tab: +err_dev_type: + cl_free(context); + return NULL; +} + +void mthca_free_context(struct ibv_context *ibctx) +{ + struct mthca_context *context = to_mctx(ibctx); + + cl_spinlock_destroy(&context->uar_lock); + mthca_free_pd(context->pd); + mthca_free_db_tab(context->db_tab); + cl_free(context); +} diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp.def b/branches/Ndi/hw/mthca/user/mlnx_uvp.def new file mode 100644 index 00000000..55f97537 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp.def @@ -0,0 +1,10 @@ +#if DBG +LIBRARY mthcaud.dll +#else +LIBRARY mthcau.dll +#endif + +#ifndef _WIN64 +EXPORTS +uvp_get_interface +#endif diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp.h b/branches/Ndi/hw/mthca/user/mlnx_uvp.h new file mode 100644 index 00000000..00e18b6e --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp.h @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MTHCA_H +#define MTHCA_H + +#include +#include +#include +#include "mlnx_uvp_debug.h" + +#define PFX "mthca: " + +enum mthca_hca_type { + MTHCA_TAVOR, + MTHCA_ARBEL, + MTHCA_LIVEFISH +}; + +enum { + MTHCA_CQ_ENTRY_SIZE = 0x20, + MTHCA_BYTES_PER_ATOMIC_COMPL = 0x8 +}; + +enum { + MTHCA_QP_TABLE_BITS = 8, + MTHCA_QP_TABLE_SIZE = 1 << MTHCA_QP_TABLE_BITS, + MTHCA_QP_TABLE_MASK = MTHCA_QP_TABLE_SIZE - 1 +}; + +enum { + MTHCA_DB_REC_PAGE_SIZE = 4096, + MTHCA_DB_REC_PER_PAGE = MTHCA_DB_REC_PAGE_SIZE / 8 +}; + +enum mthca_db_type { + MTHCA_DB_TYPE_INVALID = 0x0, + MTHCA_DB_TYPE_CQ_SET_CI = 0x1, + MTHCA_DB_TYPE_CQ_ARM = 0x2, + MTHCA_DB_TYPE_SQ = 0x3, + MTHCA_DB_TYPE_RQ = 0x4, + MTHCA_DB_TYPE_SRQ = 0x5, + MTHCA_DB_TYPE_GROUP_SEP = 0x7 +}; + +enum { + MTHCA_OPCODE_NOP = 0x00, + MTHCA_OPCODE_RDMA_WRITE = 0x08, + MTHCA_OPCODE_RDMA_WRITE_IMM = 0x09, + MTHCA_OPCODE_SEND = 0x0a, + MTHCA_OPCODE_SEND_IMM = 0x0b, + MTHCA_OPCODE_RDMA_READ = 0x10, + MTHCA_OPCODE_ATOMIC_CS = 0x11, + MTHCA_OPCODE_ATOMIC_FA = 0x12, + MTHCA_OPCODE_BIND_MW = 0x18, + MTHCA_OPCODE_INVALID = 0xff +}; + +struct mthca_ah_page; + +struct mthca_db_table; + +struct mthca_context { + struct ibv_context ibv_ctx; + void *uar; + cl_spinlock_t uar_lock; + struct mthca_db_table *db_tab; + struct ibv_pd *pd; + struct { + struct mthca_qp **table; + int refcnt; + } qp_table[MTHCA_QP_TABLE_SIZE]; + HANDLE qp_table_mutex; + int num_qps; + int qp_table_shift; + int qp_table_mask; + enum mthca_hca_type hca_type; +}; + +struct mthca_pd { + struct ibv_pd ibv_pd; + struct mthca_ah_page *ah_list; + HANDLE ah_mutex; + uint32_t pdn; +}; + +struct mthca_cq { + struct ibv_cq ibv_cq; + void *buf; + cl_spinlock_t lock; + struct ibv_mr mr; + uint32_t cqn; + uint32_t cons_index; + + /* Next fields are mem-free only */ + int set_ci_db_index; + uint32_t *set_ci_db; + int arm_db_index; + uint32_t *arm_db; + int u_arm_db_index; + uint32_t *p_u_arm_sn; +}; + +struct mthca_srq { + struct ibv_srq ibv_srq; + void *buf; + void *last; + cl_spinlock_t lock; + struct ibv_mr mr; + uint64_t *wrid; + uint32_t srqn; + int max; + int max_gs; + int wqe_shift; + int first_free; + int last_free; + int buf_size; + + /* Next fields are mem-free only */ + int db_index; + uint32_t *db; + uint16_t counter; +}; + +struct mthca_wq { + cl_spinlock_t lock; + int max; + unsigned next_ind; + unsigned last_comp; + unsigned head; + unsigned tail; + void *last; + int max_gs; + int wqe_shift; + + /* Next fields are mem-free only */ + int db_index; + uint32_t *db; +}; + +struct mthca_qp { + struct ibv_qp ibv_qp; + uint8_t *buf; + uint64_t *wrid; + int send_wqe_offset; + int max_inline_data; + int buf_size; + struct mthca_wq sq; + struct mthca_wq rq; + struct ibv_mr mr; + int sq_sig_all; +}; + +struct mthca_av { + uint32_t port_pd; + uint8_t reserved1; + uint8_t g_slid; + uint16_t dlid; + uint8_t reserved2; + uint8_t gid_index; + uint8_t msg_sr; + uint8_t hop_limit; + uint32_t sl_tclass_flowlabel; + uint32_t dgid[4]; +}; + +struct mthca_ah { + struct mthca_av *av; + ib_av_attr_t av_attr; + ib_pd_handle_t h_uvp_pd; + struct mthca_ah_page *page; + uint32_t key; + int in_kernel; +}; + +struct mthca_ah_page { + struct mthca_ah_page *prev, *next; + void *buf; + struct ibv_mr mr; + int use_cnt; + unsigned free[0]; +}; + + +static inline uintptr_t db_align(uint32_t *db) +{ + return (uintptr_t) db & ~((uintptr_t) MTHCA_DB_REC_PAGE_SIZE - 1); +} + +#define to_mxxx(xxx, type) \ + ((struct mthca_##type *) \ + ((uint8_t *) ib##xxx - offsetof(struct mthca_##type, ibv_##xxx))) + +static inline struct mthca_context *to_mctx(struct ibv_context *ibctx) +{ + return to_mxxx(ctx, context); +} + +static inline struct mthca_pd *to_mpd(struct ibv_pd *ibpd) +{ + return to_mxxx(pd, pd); +} + +static inline struct mthca_cq *to_mcq(struct ibv_cq *ibcq) +{ + return to_mxxx(cq, cq); +} + +static inline struct mthca_srq *to_msrq(struct ibv_srq *ibsrq) +{ + return to_mxxx(srq, srq); +} + +static inline struct mthca_qp *to_mqp(struct ibv_qp *ibqp) +{ + return to_mxxx(qp, qp); +} + +static inline int mthca_is_memfree(struct ibv_context *ibctx) +{ + return to_mctx(ibctx)->hca_type == MTHCA_ARBEL; +} + +int mthca_alloc_db(struct mthca_db_table *db_tab, enum mthca_db_type type, + uint32_t **db); +void mthca_set_db_qn(uint32_t *db, enum mthca_db_type type, uint32_t qn); +void mthca_free_db(struct mthca_db_table *db_tab, enum mthca_db_type type, int db_index); +struct mthca_db_table *mthca_alloc_db_tab(int uarc_size); +void mthca_free_db_tab(struct mthca_db_table *db_tab); + +int mthca_query_device(struct ibv_context *context, + struct ibv_device_attr *attr); +int mthca_query_port(struct ibv_context *context, uint8_t port, + struct ibv_port_attr *attr); + + struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, + struct ibv_alloc_pd_resp *resp_p); + +int mthca_free_pd(struct ibv_pd *pd); + +struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *cqe, + struct ibv_create_cq *req); +struct ibv_cq *mthca_create_cq_post(struct ibv_context *context, + struct ibv_create_cq_resp *resp); +int mthca_destroy_cq(struct ibv_cq *cq); +int mthca_poll_cq(struct ibv_cq *cq, int ne, struct _ib_wc *wc); +int mthca_poll_cq_list(struct ibv_cq *ibcq, + struct _ib_wc** const pp_free_wclist, + struct _ib_wc** const pp_done_wclist ); +int mthca_tavor_arm_cq(struct ibv_cq *cq, int solicited); +int mthca_arbel_arm_cq(struct ibv_cq *cq, int solicited); +void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn, + struct mthca_srq *srq); +void mthca_init_cq_buf(struct mthca_cq *cq, int nent); + +struct ibv_srq *mthca_create_srq(struct ibv_pd *pd, + struct ibv_srq_init_attr *attr); +int mthca_modify_srq(struct ibv_srq *srq, + struct ibv_srq_attr *attr, + enum ibv_srq_attr_mask mask); +int mthca_destroy_srq(struct ibv_srq *srq); +int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr, + struct mthca_srq *srq); +void mthca_free_srq_wqe(struct mthca_srq *srq, int ind); +int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq, + struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); +int mthca_arbel_post_srq_recv(struct ibv_srq *ibsrq, + struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); +struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, + struct ibv_qp_init_attr *attr, struct ibv_create_qp *req); +struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd, + struct ibv_create_qp_resp *resp); +int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, + enum ibv_qp_attr_mask attr_mask); +void mthca_destroy_qp_pre(struct ibv_qp *qp); +void mthca_destroy_qp_post(struct ibv_qp *qp, int ret); +void mthca_init_qp_indices(struct mthca_qp *qp); +int mthca_tavor_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr); +int mthca_tavor_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); +int mthca_arbel_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr); +int mthca_arbel_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); +int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap, + ib_qp_type_t type, struct mthca_qp *qp); +struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn); +int mthca_store_qp(struct mthca_context *ctx, uint32_t qpn, struct mthca_qp *qp); +void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn); +int mthca_free_err_wqe(struct mthca_qp *qp, int is_send, + int index, int *dbd, uint32_t *new_wqe); +int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr, + struct mthca_ah *ah, struct ibv_create_ah_resp *resp); +void mthca_free_av(struct mthca_ah *ah); +int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid); +int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid); +struct ibv_context *mthca_alloc_context(struct ibv_get_context_resp *resp_p); +void mthca_free_context(struct ibv_context *ibctx); + +#endif /* MTHCA_H */ diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp.rc b/branches/Ndi/hw/mthca/user/mlnx_uvp.rc new file mode 100644 index 00000000..f3d2e34a --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp.rc @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DLL +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef DBG +#define VER_FILEDESCRIPTION_STR "HCA User Mode Verb Provider (checked)" +#define VER_INTERNALNAME_STR "mthcaud.dll" +#define VER_ORIGINALFILENAME_STR "mthcaud.dll" +#else +#define VER_FILEDESCRIPTION_STR "HCA User Mode Verb Provider" +#define VER_INTERNALNAME_STR "mthcau.dll" +#define VER_ORIGINALFILENAME_STR "mthcau.dll" +#endif + +#include diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_ah.c b/branches/Ndi/hw/mthca/user/mlnx_uvp_ah.c new file mode 100644 index 00000000..be1eb898 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_ah.c @@ -0,0 +1,190 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "mt_l2w.h" +#include "mlnx_uvp.h" +#include "mlnx_ual_data.h" +#include "mx_abi.h" + +static struct mthca_ah_page *__add_page( + struct mthca_pd *pd, int page_size, int per_page) +{ + struct mthca_ah_page *page; + int i; + + page = cl_malloc(sizeof *page + per_page * sizeof (int)); + if (!page) + return NULL; + + if (posix_memalign(&page->buf, page_size, page_size)) { + cl_free(page); + return NULL; + } + + page->use_cnt = 0; + for (i = 0; i < per_page; ++i) + page->free[i] = ~0; + + page->prev = NULL; + page->next = pd->ah_list; + pd->ah_list = page; + if (page->next) + page->next->prev = page; + + return page; +} + +int mthca_alloc_av(struct mthca_pd *pd, struct ibv_ah_attr *attr, + struct mthca_ah *ah, struct ibv_create_ah_resp *resp) +{ + if (mthca_is_memfree(pd->ibv_pd.context)) { + ah->av = cl_malloc(sizeof *ah->av); + if (!ah->av) + return -ENOMEM; + } else { + struct mthca_ah_page *page; + int ps; + int pp; + int i, j; + + ps = g_page_size; + pp = ps / (sizeof *ah->av * 8 * sizeof (int)); + + WaitForSingleObject( pd->ah_mutex, INFINITE ); + for (page = pd->ah_list; page; page = page->next) + if (page->use_cnt < ps / (int)(sizeof *ah->av)) + for (i = 0; i < pp; ++i) + if (page->free[i]) + goto found; + + page = __add_page(pd, ps, pp); + if (!page) { + ReleaseMutex( pd->ah_mutex ); + return -ENOMEM; + } + ah->in_kernel = TRUE; + + found: + ++page->use_cnt; + + for (i = 0, j = -1; i < pp; ++i) + if (page->free[i]) { + j = ffs(page->free[i]); + page->free[i] &= ~(1 << (j - 1)); + ah->av = (struct mthca_av *)((uint8_t*)page->buf + + (i * 8 * sizeof (int) + (j - 1)) * sizeof *ah->av); + break; + } + + ah->page = page; + + ReleaseMutex( pd->ah_mutex ); + } + + memset(ah->av, 0, sizeof *ah->av); + + ah->av->port_pd = cl_hton32(pd->pdn | (attr->port_num << 24)); + ah->av->g_slid = attr->src_path_bits; + ah->av->dlid = cl_hton16(attr->dlid); + ah->av->msg_sr = (3 << 4) | /* 2K message */ + attr->static_rate; + ah->av->sl_tclass_flowlabel = cl_hton32(attr->sl << 28); + if (attr->is_global) { + ah->av->g_slid |= 0x80; + /* XXX get gid_table length */ + ah->av->gid_index = (attr->port_num - 1) * 32 + + attr->grh.sgid_index; + ah->av->hop_limit = attr->grh.hop_limit; + ah->av->sl_tclass_flowlabel |= + cl_hton32((attr->grh.traffic_class << 20) | + attr->grh.flow_label); + memcpy(ah->av->dgid, attr->grh.dgid.raw, 16); + } else { + /* Arbel workaround -- low byte of GID must be 2 */ + ah->av->dgid[3] = cl_hton32(2); + } + return 0; +} + +void mthca_free_av(struct mthca_ah *ah) +{ + mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)ah->h_uvp_pd; + if (mthca_is_memfree(p_pd->ibv_pd->context)) { + cl_free(ah->av); + } else { + struct mthca_pd *pd = to_mpd(p_pd->ibv_pd); + struct mthca_ah_page *page; + int i; + + WaitForSingleObject( pd->ah_mutex, INFINITE ); + page = ah->page; + i = ((uint8_t *)ah->av - (uint8_t *)page->buf) / sizeof *ah->av; + page->free[i / (8 * sizeof (int))] |= 1 << (i % (8 * sizeof (int))); + --page->use_cnt; + ReleaseMutex( pd->ah_mutex ); + } +} + +//NB: temporary, for support of modify_qp +void mthca_set_av_params( struct mthca_ah *ah_p, struct ibv_ah_attr *ah_attr ) +{ + struct mthca_av *av = ah_p->av; + mlnx_ual_pd_info_t *p_pd = (mlnx_ual_pd_info_t *)ah_p->h_uvp_pd; + struct mthca_pd *pd =to_mpd(p_pd->ibv_pd); + + // taken from mthca_alloc_av + //TODO: why cl_hton32 ? + av->port_pd = cl_hton32(pd->pdn | (ah_attr->port_num << 24)); + av->g_slid = ah_attr->src_path_bits; + //TODO: why cl_hton16 ? + av->dlid = cl_hton16(ah_attr->dlid); + av->msg_sr = (3 << 4) | /* 2K message */ + ah_attr->static_rate; + //TODO: why cl_hton32 ? + av->sl_tclass_flowlabel = cl_hton32(ah_attr->sl << 28); + if (ah_attr->is_global) { + av->g_slid |= 0x80; + av->gid_index = (ah_attr->port_num - 1) * 32 + + ah_attr->grh.sgid_index; + av->hop_limit = ah_attr->grh.hop_limit; + av->sl_tclass_flowlabel |= cl_hton32((ah_attr->grh.traffic_class << 20) | + ah_attr->grh.flow_label); + memcpy(av->dgid, ah_attr->grh.dgid.raw, 16); + } else { + /* Arbel workaround -- low byte of GID must be 2 */ + //TODO: why cl_hton32 ? + av->dgid[3] = cl_hton32(2); + } +} + diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_cq.c b/branches/Ndi/hw/mthca/user/mlnx_uvp_cq.c new file mode 100644 index 00000000..ebbf426d --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_cq.c @@ -0,0 +1,626 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include "mlnx_uvp.h" +#include "mlnx_uvp_doorbell.h" + +#if defined(EVENT_TRACING) +#include "mlnx_uvp_cq.tmh" +#endif + + +enum { + MTHCA_CQ_DOORBELL = 0x20 +}; + +enum { + CQ_OK = 0, + CQ_EMPTY = -1, + CQ_POLL_ERR = -2 +}; + +#define MTHCA_TAVOR_CQ_DB_INC_CI (1 << 24) +#define MTHCA_TAVOR_CQ_DB_REQ_NOT (2 << 24) +#define MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL (3 << 24) +#define MTHCA_TAVOR_CQ_DB_SET_CI (4 << 24) +#define MTHCA_TAVOR_CQ_DB_REQ_NOT_MULT (5 << 24) + +#define MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL (1 << 24) +#define MTHCA_ARBEL_CQ_DB_REQ_NOT (2 << 24) +#define MTHCA_ARBEL_CQ_DB_REQ_NOT_MULT (3 << 24) + +enum { + MTHCA_CQ_ENTRY_OWNER_SW = 0x00, + MTHCA_CQ_ENTRY_OWNER_HW = 0x80, + MTHCA_ERROR_CQE_OPCODE_MASK = 0xfe +}; + +enum { + SYNDROME_LOCAL_LENGTH_ERR = 0x01, + SYNDROME_LOCAL_QP_OP_ERR = 0x02, + SYNDROME_LOCAL_EEC_OP_ERR = 0x03, + SYNDROME_LOCAL_PROT_ERR = 0x04, + SYNDROME_WR_FLUSH_ERR = 0x05, + SYNDROME_MW_BIND_ERR = 0x06, + SYNDROME_BAD_RESP_ERR = 0x10, + SYNDROME_LOCAL_ACCESS_ERR = 0x11, + SYNDROME_REMOTE_INVAL_REQ_ERR = 0x12, + SYNDROME_REMOTE_ACCESS_ERR = 0x13, + SYNDROME_REMOTE_OP_ERR = 0x14, + SYNDROME_RETRY_EXC_ERR = 0x15, + SYNDROME_RNR_RETRY_EXC_ERR = 0x16, + SYNDROME_LOCAL_RDD_VIOL_ERR = 0x20, + SYNDROME_REMOTE_INVAL_RD_REQ_ERR = 0x21, + SYNDROME_REMOTE_ABORTED_ERR = 0x22, + SYNDROME_INVAL_EECN_ERR = 0x23, + SYNDROME_INVAL_EEC_STATE_ERR = 0x24 +}; + +struct mthca_cqe { + uint32_t my_qpn; + uint32_t my_ee; + uint32_t rqpn; + uint16_t sl_g_mlpath; + uint16_t rlid; + uint32_t imm_etype_pkey_eec; + uint32_t byte_cnt; + uint32_t wqe; + uint8_t opcode; + uint8_t is_send; + uint8_t reserved; + uint8_t owner; +}; + +struct mthca_err_cqe { + uint32_t my_qpn; + uint32_t reserved1[3]; + uint8_t syndrome; + uint8_t vendor_err; + uint16_t db_cnt; + uint32_t reserved2; + uint32_t wqe; + uint8_t opcode; + uint8_t reserved3[2]; + uint8_t owner; +}; + +static inline struct mthca_cqe *get_cqe(struct mthca_cq *cq, int entry) +{ + return (struct mthca_cqe *)((uint8_t*)cq->buf + entry * MTHCA_CQ_ENTRY_SIZE); +} + +static inline struct mthca_cqe *cqe_sw(struct mthca_cq *cq, int i) +{ + struct mthca_cqe *cqe = get_cqe(cq, i); + return MTHCA_CQ_ENTRY_OWNER_HW & cqe->owner ? NULL : cqe; +} + +static inline struct mthca_cqe *next_cqe_sw(struct mthca_cq *cq) +{ + return cqe_sw(cq, cq->cons_index & cq->ibv_cq.cqe); +} + +static inline void set_cqe_hw(struct mthca_cqe *cqe) +{ + cqe->owner = MTHCA_CQ_ENTRY_OWNER_HW; +} + +/* + * incr is ignored in native Arbel (mem-free) mode, so cq->cons_index + * should be correct before calling update_cons_index(). + */ +static inline void update_cons_index(struct mthca_cq *cq, int incr) +{ + uint32_t doorbell[2]; + + if (mthca_is_memfree(cq->ibv_cq.context)) { + *cq->set_ci_db = cl_hton32(cq->cons_index); + mb(); + } else { + doorbell[0] = cl_hton32(MTHCA_TAVOR_CQ_DB_INC_CI | cq->cqn); + doorbell[1] = cl_hton32(incr - 1); + + mthca_write64(doorbell, to_mctx(cq->ibv_cq.context), MTHCA_CQ_DOORBELL); + } +} + + +static void dump_cqe(uint32_t print_lvl, void *cqe_ptr) +{ + uint32_t *cqe = cqe_ptr; + int i; + (void) cqe; /* avoid warning if mthca_dbg compiled away... */ + + UVP_PRINT(print_lvl,UVP_DBG_CQ,("CQE content \n")); + UVP_PRINT(print_lvl,UVP_DBG_CQ,(" [%2x] %08x %08x %08x %08x \n",0 + , cl_ntoh32(cqe[0]), cl_ntoh32(cqe[1]), cl_ntoh32(cqe[2]), cl_ntoh32(cqe[3]))); + UVP_PRINT(print_lvl,UVP_DBG_CQ,(" [%2x] %08x %08x %08x %08x\n",16 + , cl_ntoh32(cqe[4]), cl_ntoh32(cqe[5]), cl_ntoh32(cqe[6]), cl_ntoh32(cqe[7]))); + +} + +static int handle_error_cqe(struct mthca_cq *cq, + struct mthca_qp *qp, int wqe_index, int is_send, + struct mthca_err_cqe *cqe, + struct _ib_wc *entry, int *free_cqe) +{ + int err; + int dbd; + uint32_t new_wqe; + + if (cqe->syndrome == SYNDROME_LOCAL_QP_OP_ERR) { + UVP_PRINT(TRACE_LEVEL_ERROR , UVP_DBG_CQ,("local QP operation err " + "(QPN %06x, WQE @ %08x, CQN %06x, index %d)\n", + cl_ntoh32(cqe->my_qpn), cl_ntoh32(cqe->wqe), + cq->cqn, cq->cons_index)); + dump_cqe(TRACE_LEVEL_VERBOSE, cqe); + } + + /* + * For completions in error, only work request ID, status, vendor error + * (and freed resource count for RD) have to be set. + */ + switch (cqe->syndrome) { + case SYNDROME_LOCAL_LENGTH_ERR: + entry->status = IB_WCS_LOCAL_LEN_ERR; + break; + case SYNDROME_LOCAL_QP_OP_ERR: + entry->status = IB_WCS_LOCAL_OP_ERR; + break; + case SYNDROME_LOCAL_PROT_ERR: + entry->status = IB_WCS_LOCAL_PROTECTION_ERR; + break; + case SYNDROME_WR_FLUSH_ERR: + entry->status = IB_WCS_WR_FLUSHED_ERR; + break; + case SYNDROME_MW_BIND_ERR: + entry->status = IB_WCS_MEM_WINDOW_BIND_ERR; + break; + case SYNDROME_BAD_RESP_ERR: + entry->status = IB_WCS_BAD_RESP_ERR; + break; + case SYNDROME_LOCAL_ACCESS_ERR: + entry->status = IB_WCS_LOCAL_ACCESS_ERR; + break; + case SYNDROME_REMOTE_INVAL_REQ_ERR: + entry->status = IB_WCS_REM_INVALID_REQ_ERR; + break; + case SYNDROME_REMOTE_ACCESS_ERR: + entry->status = IB_WCS_REM_ACCESS_ERR; + break; + case SYNDROME_REMOTE_OP_ERR: + entry->status = IB_WCS_REM_OP_ERR; + break; + case SYNDROME_RETRY_EXC_ERR: + entry->status = IB_WCS_TIMEOUT_RETRY_ERR; + break; + case SYNDROME_RNR_RETRY_EXC_ERR: + entry->status = IB_WCS_RNR_RETRY_ERR; + break; + case SYNDROME_LOCAL_EEC_OP_ERR: + case SYNDROME_LOCAL_RDD_VIOL_ERR: + case SYNDROME_REMOTE_INVAL_RD_REQ_ERR: + case SYNDROME_REMOTE_ABORTED_ERR: + case SYNDROME_INVAL_EECN_ERR: + case SYNDROME_INVAL_EEC_STATE_ERR: + default: + entry->status = IB_WCS_GENERAL_ERR; + break; + } + + entry->vendor_specific = cqe->vendor_err; + + /* + * Mem-free HCAs always generate one CQE per WQE, even in the + * error case, so we don't have to check the doorbell count, etc. + */ + if (mthca_is_memfree(cq->ibv_cq.context)) + return 0; + + err = mthca_free_err_wqe(qp, is_send, wqe_index, &dbd, &new_wqe); + if (err) + return err; + + /* + * If we're at the end of the WQE chain, or we've used up our + * doorbell count, free the CQE. Otherwise just update it for + * the next poll operation. + * + * This doesn't apply to mem-free HCAs, which never use the + * doorbell count field. In that case we always free the CQE. + */ + if (mthca_is_memfree(cq->ibv_cq.context) || + !(new_wqe & cl_hton32(0x3f)) || (!cqe->db_cnt && dbd)) + return 0; + + cqe->db_cnt = cl_hton16(cl_ntoh16(cqe->db_cnt) - dbd); + cqe->wqe = new_wqe; + cqe->syndrome = SYNDROME_WR_FLUSH_ERR; + + *free_cqe = 0; + + return 0; +} + +static inline int mthca_poll_one(struct mthca_cq *cq, + struct mthca_qp **cur_qp, + int *freed, + struct _ib_wc *entry) +{ + struct mthca_wq *wq; + struct mthca_cqe *cqe; + uint32_t qpn; + int wqe_index; + int is_error; + int is_send; + int free_cqe = 1; + int err = 0; + + UVP_ENTER(UVP_DBG_CQ); + + cqe = next_cqe_sw(cq); + if (!cqe) + return -EAGAIN; + + /* + * Make sure we read CQ entry contents after we've checked the + * ownership bit. + */ + rmb(); + + { // debug print + UVP_PRINT(TRACE_LEVEL_VERBOSE,UVP_DBG_CQ,("%x/%d: CQE -> QPN %06x, WQE @ %08x\n", + cq->cqn, cq->cons_index, cl_ntoh32(cqe->my_qpn), + cl_ntoh32(cqe->wqe))); + dump_cqe(TRACE_LEVEL_VERBOSE,cqe); + } + + qpn = cl_ntoh32(cqe->my_qpn); + + is_error = (cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == + MTHCA_ERROR_CQE_OPCODE_MASK; + is_send = is_error ? cqe->opcode & 0x01 : cqe->is_send & 0x80; + + if (!*cur_qp || cl_ntoh32(cqe->my_qpn) != (*cur_qp)->ibv_qp.qp_num) { + /* + * We do not have to take the QP table lock here, + * because CQs will be locked while QPs are removed + * from the table. + */ + *cur_qp = mthca_find_qp(to_mctx(cq->ibv_cq.context), cl_ntoh32(cqe->my_qpn)); + if (!*cur_qp) { + UVP_PRINT(TRACE_LEVEL_WARNING,UVP_DBG_CQ, ("CQ entry for unknown QP %06x\n", + cl_ntoh32(cqe->my_qpn) & 0xffffff)); + err = -EINVAL; + goto out; + } + } + + if (is_send) { + wq = &(*cur_qp)->sq; + wqe_index = ((cl_ntoh32(cqe->wqe) - (*cur_qp)->send_wqe_offset) >> wq->wqe_shift); + entry->wr_id = (*cur_qp)->wrid[wqe_index + (*cur_qp)->rq.max]; + } else if ((*cur_qp)->ibv_qp.srq) { + struct mthca_srq * srq = to_msrq((*cur_qp)->ibv_qp.srq); + uint32_t wqe = cl_hton32(cqe->wqe); + wq = NULL; + wqe_index = wqe >> srq->wqe_shift; + entry->wr_id = srq->wrid[wqe_index]; + mthca_free_srq_wqe(srq, wqe_index); + } else { + wq = &(*cur_qp)->rq; + wqe_index = cl_ntoh32(cqe->wqe) >> wq->wqe_shift; + entry->wr_id = (*cur_qp)->wrid[wqe_index]; + } + + if (wq) { + if ((int)wq->last_comp < wqe_index) + wq->tail += wqe_index - wq->last_comp; + else + wq->tail += wqe_index + wq->max - wq->last_comp; + + wq->last_comp = wqe_index; + } + + if (is_send) { + entry->recv.ud.recv_opt = 0; + switch (cqe->opcode) { + case MTHCA_OPCODE_RDMA_WRITE: + entry->wc_type = IB_WC_RDMA_WRITE; + break; + case MTHCA_OPCODE_RDMA_WRITE_IMM: + entry->wc_type = IB_WC_RDMA_WRITE; + entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE; + break; + case MTHCA_OPCODE_SEND: + entry->wc_type = IB_WC_SEND; + break; + case MTHCA_OPCODE_SEND_IMM: + entry->wc_type = IB_WC_SEND; + entry->recv.ud.recv_opt |= IB_RECV_OPT_IMMEDIATE; + break; + case MTHCA_OPCODE_RDMA_READ: + entry->wc_type = IB_WC_RDMA_READ; + entry->length = cl_ntoh32(cqe->byte_cnt); + break; + case MTHCA_OPCODE_ATOMIC_CS: + entry->wc_type = IB_WC_COMPARE_SWAP; + entry->length = MTHCA_BYTES_PER_ATOMIC_COMPL; + break; + case MTHCA_OPCODE_ATOMIC_FA: + entry->wc_type = IB_WC_FETCH_ADD; + entry->length = MTHCA_BYTES_PER_ATOMIC_COMPL; + break; + case MTHCA_OPCODE_BIND_MW: + entry->wc_type = IB_WC_MW_BIND; + break; + default: + /* assume it's a send completion */ + entry->wc_type = IB_WC_SEND; + break; + } + } else { + entry->length = cl_ntoh32(cqe->byte_cnt); + switch (cqe->opcode & 0x1f) { + case IBV_OPCODE_SEND_LAST_WITH_IMMEDIATE: + case IBV_OPCODE_SEND_ONLY_WITH_IMMEDIATE: + entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE; + entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec; + entry->wc_type = IB_WC_RECV; + break; + case IBV_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE: + case IBV_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE: + entry->recv.ud.recv_opt = IB_RECV_OPT_IMMEDIATE; + entry->recv.ud.immediate_data = cqe->imm_etype_pkey_eec; + entry->wc_type = IB_WC_RECV; + break; + default: + entry->recv.ud.recv_opt = 0; + entry->wc_type = IB_WC_RECV; + break; + } + entry->recv.ud.remote_lid = cqe->rlid; + entry->recv.ud.remote_qp = cqe->rqpn & 0xffffff00; + entry->recv.ud.pkey_index = (uint16_t)(cl_ntoh32(cqe->imm_etype_pkey_eec) >> 16); + entry->recv.ud.remote_sl = cl_ntoh16(cqe->sl_g_mlpath) >> 12; + entry->recv.ud.path_bits = cl_ntoh16(cqe->sl_g_mlpath) & 0x7f; + entry->recv.ud.recv_opt |= cl_ntoh16(cqe->sl_g_mlpath) & 0x80 ? + IB_RECV_OPT_GRH_VALID : 0; + } + + + if (is_error) { + err = handle_error_cqe(cq, *cur_qp, wqe_index, is_send, + (struct mthca_err_cqe *) cqe, + entry, &free_cqe); + } + else + entry->status = IB_WCS_SUCCESS; + +out: + if (likely(free_cqe)) { + set_cqe_hw(cqe); + ++(*freed); + ++cq->cons_index; + } + + UVP_EXIT(UVP_DBG_CQ); + return err; +} + +int mthca_poll_cq(struct ibv_cq *ibcq, int num_entries, struct _ib_wc *entry) +{ + struct mthca_cq *cq = to_mcq(ibcq); + struct mthca_qp *qp = NULL; + int err = CQ_OK; + int freed = 0; + int npolled; + + cl_spinlock_acquire(&cq->lock); + + for (npolled = 0; npolled < num_entries; ++npolled) { + err = mthca_poll_one(cq, &qp, &freed, entry + npolled); + if (err) + break; + } + + if (freed) { + wmb(); + update_cons_index(cq, freed); + } + + cl_spinlock_release(&cq->lock); + + return (err == 0 || err == -EAGAIN) ? npolled : err; +} + +int mthca_poll_cq_list( + IN struct ibv_cq *ibcq, + IN OUT struct _ib_wc** const pp_free_wclist, + OUT struct _ib_wc** const pp_done_wclist ) +{ + struct mthca_cq *cq = to_mcq(ibcq); + struct mthca_qp *qp = NULL; + int err = CQ_OK; + int freed = 0; + ib_wc_t *wc_p, **next_pp; + uint32_t wc_cnt = 0; + + cl_spinlock_acquire(&cq->lock); + + // loop through CQ + next_pp = pp_done_wclist; + wc_p = *pp_free_wclist; + while( wc_p ) { + // poll one CQE + err = mthca_poll_one(cq, &qp, &freed, wc_p); + if (err) + break; + + // prepare for the next loop + *next_pp = wc_p; + next_pp = &wc_p->p_next; + wc_p = wc_p->p_next; + } + + // prepare the results + *pp_free_wclist = wc_p; /* Set the head of the free list. */ + *next_pp = NULL; /* Clear the tail of the done list. */ + + // update consumer index + if (freed) { + wmb(); + update_cons_index(cq, freed); + } + + cl_spinlock_release(&cq->lock); + return (err == 0 || err == -EAGAIN)? 0 : err; +} + +int mthca_tavor_arm_cq(struct ibv_cq *cq, enum ib_cq_notify notify) +{ + uint32_t doorbell[2]; + + doorbell[0] = cl_hton32((notify == IB_CQ_SOLICITED ? + MTHCA_TAVOR_CQ_DB_REQ_NOT_SOL : + MTHCA_TAVOR_CQ_DB_REQ_NOT) | + to_mcq(cq)->cqn); + doorbell[1] = 0xffffffff; + + mthca_write64(doorbell, to_mctx(cq->context), MTHCA_CQ_DOORBELL); + + return 0; +} + +int mthca_arbel_arm_cq(struct ibv_cq *ibvcq, enum ib_cq_notify notify) +{ + struct mthca_cq *cq = to_mcq(ibvcq); + uint32_t doorbell[2]; + uint32_t sn; + uint32_t ci; + + sn = *cq->p_u_arm_sn & 3; + ci = cl_hton32(cq->cons_index); + + doorbell[0] = ci; + doorbell[1] = cl_hton32((cq->cqn << 8) | (2 << 5) | (sn << 3) | + (notify == IB_CQ_SOLICITED ? 1 : 2)); + + mthca_write_db_rec(doorbell, cq->arm_db); + + /* + * Make sure that the doorbell record in host memory is + * written before ringing the doorbell via PCI MMIO. + */ + wmb(); + + doorbell[0] = cl_hton32((sn << 28) | + (notify == IB_CQ_SOLICITED ? + MTHCA_ARBEL_CQ_DB_REQ_NOT_SOL : + MTHCA_ARBEL_CQ_DB_REQ_NOT) | + cq->cqn); + doorbell[1] = ci; + + mthca_write64(doorbell, to_mctx(ibvcq->context), MTHCA_CQ_DOORBELL); + + return 0; +} + +static inline int is_recv_cqe(struct mthca_cqe *cqe) +{ + if ((cqe->opcode & MTHCA_ERROR_CQE_OPCODE_MASK) == + MTHCA_ERROR_CQE_OPCODE_MASK) + return !(cqe->opcode & 0x01); + else + return !(cqe->is_send & 0x80); +} + +void mthca_cq_clean(struct mthca_cq *cq, uint32_t qpn, struct mthca_srq *srq) +{ + struct mthca_cqe *cqe; + uint32_t prod_index; + int nfreed = 0; + + cl_spinlock_acquire(&cq->lock); + + /* + * First we need to find the current producer index, so we + * know where to start cleaning from. It doesn't matter if HW + * adds new entries after this loop -- the QP we're worried + * about is already in RESET, so the new entries won't come + * from our QP and therefore don't need to be checked. + */ + for (prod_index = cq->cons_index; + cqe_sw(cq, prod_index & cq->ibv_cq.cqe); + ++prod_index) + if (prod_index == cq->cons_index + cq->ibv_cq.cqe) + break; + + /* + * Now sweep backwards through the CQ, removing CQ entries + * that match our QP by copying older entries on top of them. + */ + while ((int) --prod_index - (int) cq->cons_index >= 0) { + cqe = get_cqe(cq, prod_index & cq->ibv_cq.cqe); + if (cqe->my_qpn == cl_hton32(qpn)) { + if (srq && is_recv_cqe(cqe)) + mthca_free_srq_wqe(srq, + cl_ntoh32(cqe->wqe) >> srq->wqe_shift); + ++nfreed; + } else if (nfreed) + memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibv_cq.cqe), + cqe, MTHCA_CQ_ENTRY_SIZE); + } + + if (nfreed) { + mb(); + cq->cons_index += nfreed; + update_cons_index(cq, nfreed); + } + + cl_spinlock_release(&cq->lock); +} + +void mthca_init_cq_buf(struct mthca_cq *cq, int nent) +{ + int i; + + for (i = 0; i < nent; ++i) + set_cqe_hw(get_cqe(cq, i)); + + cq->cons_index = 0; +} diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_debug.c b/branches/Ndi/hw/mthca/user/mlnx_uvp_debug.c new file mode 100644 index 00000000..3fc71134 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_debug.c @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2005 Mellanox Technologies LTD. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + */ + +// Author: Yossi Leybovich + +#include "mlnx_uvp_debug.h" +#include +#include +#include + +#if !defined(EVENT_TRACING) + + +#if DBG +uint32_t g_mlnx_dbg_level = TRACE_LEVEL_WARNING; +uint32_t g_mlnx_dbg_flags= UVP_DBG_QP | UVP_DBG_CQ|UVP_DBG_MEMORY; +#endif + +VOID +_UVP_PRINT( + IN char* msg, + ... + ) + + { +#if DBG +#define TEMP_BUFFER_SIZE 1024 + va_list list; + UCHAR debugMessageBuffer[TEMP_BUFFER_SIZE]; + HRESULT result; + + va_start(list, msg); + + if (msg) { + + // + // Using new safe string functions instead of _vsnprintf. This function takes + // care of NULL terminating if the message is longer than the buffer. + // + + result = StringCbVPrintfA (debugMessageBuffer, sizeof(debugMessageBuffer), + msg, list); + if(((HRESULT)(result) < 0)) { + + OutputDebugString (": StringCbVPrintfA failed \n"); + return; + } + OutputDebugString ( debugMessageBuffer); + + } + va_end(list); + + return; +#endif //DBG +} + +#endif //EVENT_TRACING + diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_debug.h b/branches/Ndi/hw/mthca/user/mlnx_uvp_debug.h new file mode 100644 index 00000000..2a9cbc5b --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_debug.h @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _MLNX_UVP_DEBUG_H_ +#define _MLNX_UVP_DEBUG_H_ + +#include + +extern uint32_t g_mlnx_dbg_level; +extern uint32_t g_mlnx_dbg_flags; + + +#if defined(EVENT_TRACING) +// +// Software Tracing Definitions +// +// + +#define WPP_CONTROL_GUIDS \ + WPP_DEFINE_CONTROL_GUID(HCACtlGuid,(2C718E52,0D36,4bda,9E58,0FC601818D8F), \ + WPP_DEFINE_BIT( UVP_DBG_DEV) \ + WPP_DEFINE_BIT( UVP_DBG_PNP) \ + WPP_DEFINE_BIT( UVP_DBG_MAD) \ + WPP_DEFINE_BIT( UVP_DBG_PO) \ + WPP_DEFINE_BIT( UVP_DBG_CQ) \ + WPP_DEFINE_BIT( UVP_DBG_QP) \ + WPP_DEFINE_BIT( UVP_DBG_MEMORY) \ + WPP_DEFINE_BIT( UVP_DBG_SRQ) \ + WPP_DEFINE_BIT( UVP_DBG_AV) \ + WPP_DEFINE_BIT( UVP_DBG_SEND) \ + WPP_DEFINE_BIT( UVP_DBG_RECV) \ + WPP_DEFINE_BIT( UVP_DBG_LOW) \ + WPP_DEFINE_BIT( UVP_DBG_SHIM)) + + +#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl) +#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags) +#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE) +#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags) + + +// begin_wpp config +// UVP_ENTER(FLAG); +// UVP_EXIT(FLAG); +// USEPREFIX(UVP_PRINT, "%!FUNC!() "); +// USESUFFIX(UVP_ENTER, "%!FUNC!===>"); +// USESUFFIX(UVP_EXIT, "%!FUNC!<==="); +// end_wpp + + +#else + +#include +#include + +/* + * Debug macros + */ + + +#define UVP_DBG_DEV (1 << 0) +#define UVP_DBG_PNP (1 << 1) +#define UVP_DBG_MAD (1 << 2) +#define UVP_DBG_PO (1 << 3) +#define UVP_DBG_QP (1 << 4) +#define UVP_DBG_CQ (1 << 5) +#define UVP_DBG_MEMORY (1 << 6) +#define UVP_DBG_SRQ (1 << 7) +#define UVP_DBG_AV (1 << 8) +#define UVP_DBG_SEND (1 << 9) +#define UVP_DBG_RECV (1 << 10) +#define UVP_DBG_LOW (1 << 11) +#define UVP_DBG_SHIM (1 << 12) + + +VOID + _UVP_PRINT( + IN char* msg, + ...); + +#if DBG + +#define UVP_PRINT(_level_,_flags_,_msg_) \ + if ((_level_) <= g_mlnx_dbg_level && (_flags_) & g_mlnx_dbg_flags) {\ + _UVP_PRINT("[UVP] %s():",__FUNCTION__);\ + if((_level_) == TRACE_LEVEL_ERROR) _UVP_PRINT ("***ERROR*** ");\ + _UVP_PRINT _msg_ ; \ + } + + +// +#else + +#define UVP_PRINT(lvl ,flags, msg) + +#endif + + +#define UVP_ENTER(flags)\ + UVP_PRINT(TRACE_LEVEL_VERBOSE, flags,("===>\n")); + +#define UVP_EXIT(flags)\ + UVP_PRINT(TRACE_LEVEL_VERBOSE, flags,("<===\n")); + +#define UVP_PRINT_EXIT(_level_,_flag_,_msg_) \ + {\ + if (status != IB_SUCCESS) {\ + UVP_PRINT(_level_,_flag_,_msg_);\ + }\ + UVP_EXIT(_flag_);\ + } + +#endif //EVENT_TRACING + +#endif /*_MLNX_UVP_DEBUG_H_ */ + diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_doorbell.h b/branches/Ndi/hw/mthca/user/mlnx_uvp_doorbell.h new file mode 100644 index 00000000..7928eceb --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_doorbell.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef DOORBELL_H +#define DOORBELL_H + +enum { + MTHCA_SEND_DOORBELL_FENCE = 1 << 5 +}; +#if defined _WIN64 + +static inline void mthca_write64(uint32_t val[2], struct mthca_context *ctx, int offset) +{ + *(volatile uint64_t *) ((char *)ctx->uar + offset) = *(volatile uint64_t*)val; +} + +static inline void mthca_write_db_rec(uint32_t val[2], uint32_t *db) +{ + *(volatile uint64_t *) db = *(volatile uint64_t*)val; +} + + +#elif defined(_WIN32) + +static inline void mthca_write64(uint32_t val[2], struct mthca_context *ctx, int offset) +{ + volatile uint64_t *target_p = (volatile uint64_t*)((uint8_t*)ctx->uar + offset); + + cl_spinlock_acquire(&ctx->uar_lock); + *(volatile uint32_t *) ((uint8_t*)ctx->uar + offset) = val[0]; + *(volatile uint32_t *) ((uint8_t*)ctx->uar + offset + 4) = val[1]; + cl_spinlock_release(&ctx->uar_lock); + + //TODO: can we save mm0 and not to use emms, as Linux do ? + //__asm movq mm0,val + //__asm movq target_p,mm0 + //__asm emms +} +static inline void mthca_write_db_rec(uint32_t val[2], uint32_t *db) +{ + db[0] = val[0]; + wmb(); + db[1] = val[1]; +} + + +#endif + +#endif /* MTHCA_H */ diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_kern_abi.h b/branches/Ndi/hw/mthca/user/mlnx_uvp_kern_abi.h new file mode 100644 index 00000000..093c8b0c --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_kern_abi.h @@ -0,0 +1,644 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef KERN_ABI_H +#define KERN_ABI_H + +/* + * This file must be kept in sync with the kernel's version of + * drivers/infiniband/include/ib_user_verbs.h + */ + +/* + * Make sure that all structs defined in this file remain laid out so + * that they pack the same way on 32-bit and 64-bit architectures (to + * avoid incompatibility between 32-bit userspace and 64-bit kernels). + * Specifically: + * - Do not use pointer types -- pass pointers in uint64_t instead. + * - Make sure that any structure larger than 4 bytes is padded to a + * multiple of 8 bytes. Otherwise the structure size will be + * different between 32-bit and 64-bit architectures. + */ + +struct ibv_kern_async_event { + uint64_t element; + uint32_t event_type; + uint32_t reserved; +}; + +struct ibv_comp_event { + uint64_t cq_handle; +}; + +/* + * All commands from userspace should start with a uint32_t command field + * followed by uint16_t in_words and out_words fields (which give the + * length of the command block and response buffer if any in 32-bit + * words). The kernel driver will read these fields first and read + * the rest of the command struct based on these value. + */ + +struct ibv_query_params { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; +}; + +struct ibv_query_params_resp { + uint32_t num_cq_events; +}; + +struct ibv_get_context_resp { + uint64_t uar_addr; + uint64_t pd_handle; + uint32_t pdn; + uint32_t qp_tab_size; + uint32_t uarc_size; + uint32_t vend_id; + uint16_t dev_id; +}; + +struct ibv_query_device { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint64_t driver_data[0]; +}; + +struct ibv_query_device_resp { + uint64_t fw_ver; + uint64_t node_guid; + uint64_t sys_image_guid; + uint64_t max_mr_size; + uint64_t page_size_cap; + uint32_t vendor_id; + uint32_t vendor_part_id; + uint32_t hw_ver; + uint32_t max_qp; + uint32_t max_qp_wr; + uint32_t device_cap_flags; + uint32_t max_sge; + uint32_t max_sge_rd; + uint32_t max_cq; + uint32_t max_cqe; + uint32_t max_mr; + uint32_t max_pd; + uint32_t max_qp_rd_atom; + uint32_t max_ee_rd_atom; + uint32_t max_res_rd_atom; + uint32_t max_qp_init_rd_atom; + uint32_t max_ee_init_rd_atom; + uint32_t atomic_cap; + uint32_t max_ee; + uint32_t max_rdd; + uint32_t max_mw; + uint32_t max_raw_ipv6_qp; + uint32_t max_raw_ethy_qp; + uint32_t max_mcast_grp; + uint32_t max_mcast_qp_attach; + uint32_t max_total_mcast_qp_attach; + uint32_t max_ah; + uint32_t max_fmr; + uint32_t max_map_per_fmr; + uint32_t max_srq; + uint32_t max_srq_wr; + uint32_t max_srq_sge; + uint16_t max_pkeys; + uint8_t local_ca_ack_delay; + uint8_t phys_port_cnt; + uint8_t reserved[4]; +}; + +struct ibv_query_port { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint8_t port_num; + uint8_t reserved[7]; + uint64_t driver_data[0]; +}; + +struct ibv_query_port_resp { + uint32_t port_cap_flags; + uint32_t max_msg_sz; + uint32_t bad_pkey_cntr; + uint32_t qkey_viol_cntr; + uint32_t gid_tbl_len; + uint16_t pkey_tbl_len; + uint16_t lid; + uint16_t sm_lid; + uint8_t state; + uint8_t max_mtu; + uint8_t active_mtu; + uint8_t lmc; + uint8_t max_vl_num; + uint8_t sm_sl; + uint8_t subnet_timeout; + uint8_t init_type_reply; + uint8_t active_width; + uint8_t active_speed; + uint8_t phys_state; + uint8_t reserved[3]; +}; + +struct ibv_alloc_pd_resp { + uint64_t pd_handle; + uint32_t pdn; + uint32_t reserved; +}; + +struct ibv_reg_mr { + uint64_t start; + uint64_t length; + uint64_t hca_va; + uint32_t access_flags; + uint32_t pdn; + uint64_t pd_handle; +}; + +struct ibv_reg_mr_resp { + uint64_t mr_handle; + uint32_t lkey; + uint32_t rkey; +}; + +struct ibv_create_cq { + struct ibv_reg_mr mr; + uint64_t arm_db_page; + uint64_t set_db_page; + uint32_t arm_db_index; + uint32_t set_db_index; + uint64_t user_handle; + uint32_t cqe; + uint32_t lkey; /* used only by kernel */ +}; +struct ibv_create_cq_resp { + uint64_t user_handle; + uint64_t cq_handle; + struct ibv_reg_mr_resp mr; + uint32_t cqe; + uint32_t cqn; +}; + +struct ibv_kern_wc { + uint64_t wr_id; + uint32_t status; + uint32_t opcode; + uint32_t vendor_err; + uint32_t byte_len; + uint32_t imm_data; + uint32_t qp_num; + uint32_t src_qp; + uint32_t wc_flags; + uint16_t pkey_index; + uint16_t slid; + uint8_t sl; + uint8_t dlid_path_bits; + uint8_t port_num; + uint8_t reserved; +}; + +struct ibv_poll_cq { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint32_t cq_handle; + uint32_t ne; +}; + +struct ibv_poll_cq_resp { + uint32_t count; + uint32_t reserved; + struct ibv_kern_wc wc[]; +}; + +struct ibv_req_notify_cq { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint32_t cq_handle; + uint32_t solicited; +}; + +struct ibv_destroy_cq { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint32_t cq_handle; + uint32_t reserved; +}; + +struct ibv_destroy_cq_resp { + uint32_t comp_events_reported; + uint32_t async_events_reported; +}; + +struct ibv_kern_global_route { + uint8_t dgid[16]; + uint32_t flow_label; + uint8_t sgid_index; + uint8_t hop_limit; + uint8_t traffic_class; + uint8_t reserved; +}; + +struct ibv_kern_ah_attr { + struct ibv_kern_global_route grh; + uint16_t dlid; + uint8_t sl; + uint8_t src_path_bits; + uint8_t static_rate; + uint8_t is_global; + uint8_t port_num; + uint8_t reserved; +}; + +struct ibv_kern_qp_attr { + uint32_t qp_attr_mask; + uint32_t qp_state; + uint32_t cur_qp_state; + uint32_t path_mtu; + uint32_t path_mig_state; + uint32_t qkey; + uint32_t rq_psn; + uint32_t sq_psn; + uint32_t dest_qp_num; + uint32_t qp_access_flags; + + struct ibv_kern_ah_attr ah_attr; + struct ibv_kern_ah_attr alt_ah_attr; + + /* ib_qp_cap */ + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t max_recv_sge; + uint32_t max_inline_data; + + uint16_t pkey_index; + uint16_t alt_pkey_index; + uint8_t en_sqd_async_notify; + uint8_t sq_draining; + uint8_t max_rd_atomic; + uint8_t max_dest_rd_atomic; + uint8_t min_rnr_timer; + uint8_t port_num; + uint8_t timeout; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t alt_port_num; + uint8_t alt_timeout; + uint8_t reserved[5]; +}; + +struct ibv_create_qp { + uint64_t sq_db_page; + uint64_t rq_db_page; + uint32_t sq_db_index; + uint32_t rq_db_index; + struct ibv_reg_mr mr; + uint64_t user_handle; + uint64_t send_cq_handle; + uint64_t recv_cq_handle; + uint64_t srq_handle; + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t max_recv_sge; + uint32_t max_inline_data; + uint32_t lkey; /* used only in kernel */ + uint8_t sq_sig_all; + uint8_t qp_type; + uint8_t is_srq; + uint8_t reserved[5]; +}; + +struct ibv_create_qp_resp { + struct ibv_reg_mr_resp mr; + uint64_t user_handle; + uint64_t qp_handle; + uint32_t qpn; + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t max_recv_sge; + uint32_t max_inline_data; +}; + +struct ibv_modify_qp_resp { + enum ibv_qp_attr_mask attr_mask; + uint8_t qp_state; + uint8_t reserved[3]; +}; + +struct ibv_kern_send_wr { + uint64_t wr_id; + uint32_t num_sge; + uint32_t opcode; + uint32_t send_flags; + uint32_t imm_data; + union { + struct { + uint64_t remote_addr; + uint32_t rkey; + uint32_t reserved; + } rdma; + struct { + uint64_t remote_addr; + uint64_t compare_add; + uint64_t swap; + uint32_t rkey; + uint32_t reserved; + } atomic; + struct { + uint32_t ah; + uint32_t remote_qpn; + uint32_t remote_qkey; + uint32_t reserved; + } ud; + } wr; +}; + +struct ibv_post_send { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint32_t qp_handle; + uint32_t wr_count; + uint32_t sge_count; + uint32_t wqe_size; + struct ibv_kern_send_wr send_wr[]; +}; + +struct ibv_post_send_resp { + uint32_t bad_wr; +}; + +struct ibv_kern_recv_wr { + uint64_t wr_id; + uint32_t num_sge; + uint32_t reserved; +}; + +struct ibv_post_recv { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint32_t qp_handle; + uint32_t wr_count; + uint32_t sge_count; + uint32_t wqe_size; + struct ibv_kern_recv_wr recv_wr[]; +}; + +struct ibv_post_recv_resp { + uint32_t bad_wr; +}; + +struct ibv_post_srq_recv { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint32_t srq_handle; + uint32_t wr_count; + uint32_t sge_count; + uint32_t wqe_size; + struct ibv_kern_recv_wr recv_wr[]; +}; + +struct ibv_post_srq_recv_resp { + uint32_t bad_wr; +}; + +struct ibv_create_ah { + struct ibv_reg_mr mr; +}; + +struct ibv_create_ah_resp { + uint64_t start; + struct ibv_reg_mr_resp mr; + ib_av_attr_t av_attr; +}; + +struct ibv_destroy_ah { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint32_t ah_handle; +}; + +struct ibv_attach_mcast { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint8_t gid[16]; + uint32_t qp_handle; + uint16_t mlid; + uint16_t reserved; + uint64_t driver_data[]; +}; + +struct ibv_detach_mcast { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint8_t gid[16]; + uint32_t qp_handle; + uint16_t mlid; + uint16_t reserved; + uint64_t driver_data[]; +}; + +struct ibv_create_srq { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint64_t user_handle; + uint32_t pd_handle; + uint32_t max_wr; + uint32_t max_sge; + uint32_t srq_limit; + uint64_t driver_data[]; +}; + +struct ibv_create_srq_resp { + uint32_t srq_handle; +}; + +struct ibv_modify_srq { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint32_t srq_handle; + uint32_t attr_mask; + uint32_t max_wr; + uint32_t srq_limit; + uint64_t driver_data[]; +}; + +struct ibv_destroy_srq { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint32_t srq_handle; + uint32_t reserved; +}; + +struct ibv_destroy_srq_resp { + uint32_t events_reported; +}; + +/* + * Compatibility with older ABI versions + */ + +enum { + IB_USER_VERBS_CMD_QUERY_PARAMS_V2, + IB_USER_VERBS_CMD_GET_CONTEXT_V2, + IB_USER_VERBS_CMD_QUERY_DEVICE_V2, + IB_USER_VERBS_CMD_QUERY_PORT_V2, + IB_USER_VERBS_CMD_QUERY_GID_V2, + IB_USER_VERBS_CMD_QUERY_PKEY_V2, + IB_USER_VERBS_CMD_ALLOC_PD_V2, + IB_USER_VERBS_CMD_DEALLOC_PD_V2, + IB_USER_VERBS_CMD_CREATE_AH_V2, + IB_USER_VERBS_CMD_MODIFY_AH_V2, + IB_USER_VERBS_CMD_QUERY_AH_V2, + IB_USER_VERBS_CMD_DESTROY_AH_V2, + IB_USER_VERBS_CMD_REG_MR_V2, + IB_USER_VERBS_CMD_REG_SMR_V2, + IB_USER_VERBS_CMD_REREG_MR_V2, + IB_USER_VERBS_CMD_QUERY_MR_V2, + IB_USER_VERBS_CMD_DEREG_MR_V2, + IB_USER_VERBS_CMD_ALLOC_MW_V2, + IB_USER_VERBS_CMD_BIND_MW_V2, + IB_USER_VERBS_CMD_DEALLOC_MW_V2, + IB_USER_VERBS_CMD_CREATE_CQ_V2, + IB_USER_VERBS_CMD_RESIZE_CQ_V2, + IB_USER_VERBS_CMD_DESTROY_CQ_V2, + IB_USER_VERBS_CMD_POLL_CQ_V2, + IB_USER_VERBS_CMD_PEEK_CQ_V2, + IB_USER_VERBS_CMD_REQ_NOTIFY_CQ_V2, + IB_USER_VERBS_CMD_CREATE_QP_V2, + IB_USER_VERBS_CMD_QUERY_QP_V2, + IB_USER_VERBS_CMD_MODIFY_QP_V2, + IB_USER_VERBS_CMD_DESTROY_QP_V2, + IB_USER_VERBS_CMD_POST_SEND_V2, + IB_USER_VERBS_CMD_POST_RECV_V2, + IB_USER_VERBS_CMD_ATTACH_MCAST_V2, + IB_USER_VERBS_CMD_DETACH_MCAST_V2, + IB_USER_VERBS_CMD_CREATE_SRQ_V2, + IB_USER_VERBS_CMD_MODIFY_SRQ_V2, + IB_USER_VERBS_CMD_QUERY_SRQ_V2, + IB_USER_VERBS_CMD_DESTROY_SRQ_V2, + IB_USER_VERBS_CMD_POST_SRQ_RECV_V2, + /* + * Set commands that didn't exist to -1 so our compile-time + * trick opcodes in IBV_INIT_CMD() doesn't break. + */ + IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL_V2 = -1, +}; + +struct ibv_destroy_cq_v1 { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint32_t cq_handle; +}; + +struct ibv_destroy_qp_v1 { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint32_t qp_handle; +}; + +struct ibv_destroy_srq_v1 { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint32_t srq_handle; +}; + +struct ibv_get_context_v2 { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint64_t cq_fd_tab; + uint64_t driver_data[]; +}; + +struct ibv_create_cq_v2 { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint64_t response; + uint64_t user_handle; + uint32_t cqe; + uint32_t event_handler; + uint64_t driver_data[]; +}; + +struct ibv_modify_srq_v3 { + uint32_t command; + uint16_t in_words; + uint16_t out_words; + uint32_t srq_handle; + uint32_t attr_mask; + uint32_t max_wr; + uint32_t max_sge; + uint32_t srq_limit; + uint32_t reserved; + uint64_t driver_data[]; +}; + +struct ibv_create_qp_resp_v3 { + uint32_t qp_handle; + uint32_t qpn; +}; + +#endif /* KERN_ABI_H */ diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_memfree.c b/branches/Ndi/hw/mthca/user/mlnx_uvp_memfree.c new file mode 100644 index 00000000..f08d5e44 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_memfree.c @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "mlnx_uvp.h" + +#define MTHCA_FREE_MAP_SIZE (MTHCA_DB_REC_PER_PAGE / BITS_PER_LONG) + +struct mthca_db_page { + unsigned long free[MTHCA_FREE_MAP_SIZE]; + uint64_t *db_rec; +}; + +struct mthca_db_table { + int npages; + int max_group1; + int min_group2; + HANDLE mutex; + struct mthca_db_page page[]; +}; + +int mthca_alloc_db(struct mthca_db_table *db_tab, enum mthca_db_type type, + uint32_t **db) +{ + int i, j, k; + int group, start, end, dir; + int ret = 0; + + WaitForSingleObject( db_tab->mutex, INFINITE ); + + switch (type) { + case MTHCA_DB_TYPE_CQ_ARM: + case MTHCA_DB_TYPE_SQ: + group = 0; + start = 0; + end = db_tab->max_group1; + dir = 1; + break; + + case MTHCA_DB_TYPE_CQ_SET_CI: + case MTHCA_DB_TYPE_RQ: + case MTHCA_DB_TYPE_SRQ: + group = 1; + start = db_tab->npages - 1; + end = db_tab->min_group2; + dir = -1; + break; + + default: + ret = -1; + goto out; + } + + for (i = start; i != end; i += dir) + if (db_tab->page[i].db_rec) + for (j = 0; j < MTHCA_FREE_MAP_SIZE; ++j) + if (db_tab->page[i].free[j]) + goto found; + + if (db_tab->max_group1 >= db_tab->min_group2 - 1) { + ret = -1; + goto out; + } + + if (posix_memalign((void **) &db_tab->page[i].db_rec, MTHCA_DB_REC_PAGE_SIZE, + MTHCA_DB_REC_PAGE_SIZE)) { + ret = -1; + goto out; + } + + memset(db_tab->page[i].db_rec, 0, MTHCA_DB_REC_PAGE_SIZE); + memset(db_tab->page[i].free, 0xff, sizeof db_tab->page[i].free); + + if (group == 0) + ++db_tab->max_group1; + else + --db_tab->min_group2; + +found: + for (j = 0; j < MTHCA_FREE_MAP_SIZE; ++j) { + k = ffsl(db_tab->page[i].free[j]); + if (k) + break; + } + + if (!k) { + ret = -1; + goto out; + } + + --k; + db_tab->page[i].free[j] &= ~(1UL << k); + + j = j * BITS_PER_LONG + k; + if (group == 1) + j = MTHCA_DB_REC_PER_PAGE - 1 - j; + + ret = i * MTHCA_DB_REC_PER_PAGE + j; + *db = (uint32_t *) &db_tab->page[i].db_rec[j]; + +out: + ReleaseMutex( db_tab->mutex ); + return ret; +} + +void mthca_set_db_qn(uint32_t *db, enum mthca_db_type type, uint32_t qn) +{ + db[1] = cl_hton32((qn << 8) | (type << 5)); +} + +void mthca_free_db(struct mthca_db_table *db_tab, enum mthca_db_type type, int db_index) +{ + int i, j; + struct mthca_db_page *page; + + i = db_index / MTHCA_DB_REC_PER_PAGE; + j = db_index % MTHCA_DB_REC_PER_PAGE; + + page = db_tab->page + i; + + WaitForSingleObject( db_tab->mutex, INFINITE ); + page->db_rec[j] = 0; + + if (i >= db_tab->min_group2) + j = MTHCA_DB_REC_PER_PAGE - 1 - j; + + page->free[j / BITS_PER_LONG] |= 1UL << (j % BITS_PER_LONG); + + ReleaseMutex( db_tab->mutex ); +} + +struct mthca_db_table *mthca_alloc_db_tab(int uarc_size) +{ + struct mthca_db_table *db_tab; + int npages; + int i; + + npages = uarc_size / MTHCA_DB_REC_PAGE_SIZE; + db_tab = cl_malloc(sizeof (struct mthca_db_table) + + npages * sizeof (struct mthca_db_page)); + if (!db_tab) + goto err_malloc; + + db_tab->mutex = CreateMutex( NULL, FALSE, NULL ); + if (!db_tab->mutex) + goto err_mutex; + db_tab->npages = npages; + db_tab->max_group1 = 0; + db_tab->min_group2 = npages - 1; + + for (i = 0; i < npages; ++i) + db_tab->page[i].db_rec = NULL; + + goto end; + +err_mutex: + cl_free(db_tab); +err_malloc: +end: + return db_tab; +} + +void mthca_free_db_tab(struct mthca_db_table *db_tab) +{ + int i; + + if (!db_tab) + return; + + for (i = 0; i < db_tab->npages; ++i) + if (db_tab->page[i].db_rec) +#ifdef NOT_USE_VIRTUAL_ALLOC + cl_free(db_tab->page[i].db_rec); +#else + VirtualFree( db_tab->page[i].db_rec, 0, MEM_RELEASE); +#endif + + cl_free(db_tab); +} diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_qp.c b/branches/Ndi/hw/mthca/user/mlnx_uvp_qp.c new file mode 100644 index 00000000..4cbca73a --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_qp.c @@ -0,0 +1,1085 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include "mlnx_uvp.h" +#include "mlnx_uvp_doorbell.h" +#include "mthca_wqe.h" +#include "mlnx_ual_data.h" + +#if defined(EVENT_TRACING) +#include "mlnx_uvp_qp.tmh" +#endif + +static const uint8_t mthca_opcode[] = { + MTHCA_OPCODE_RDMA_WRITE, + MTHCA_OPCODE_RDMA_WRITE_IMM, + MTHCA_OPCODE_SEND, + MTHCA_OPCODE_SEND_IMM, + MTHCA_OPCODE_RDMA_READ, + MTHCA_OPCODE_ATOMIC_CS, + MTHCA_OPCODE_ATOMIC_FA +}; + +static enum mthca_wr_opcode conv_ibal_wr_opcode(struct _ib_send_wr *wr) +{ + enum mthca_wr_opcode opcode = -1; //= wr->wr_type; + + switch (wr->wr_type) { + case WR_SEND: + opcode = (wr->send_opt & IB_SEND_OPT_IMMEDIATE) ? MTHCA_OPCODE_SEND_IMM : MTHCA_OPCODE_SEND; + break; + case WR_RDMA_WRITE: + opcode = (wr->send_opt & IB_SEND_OPT_IMMEDIATE) ? MTHCA_OPCODE_RDMA_WRITE_IMM : MTHCA_OPCODE_RDMA_WRITE; + break; + case WR_RDMA_READ: opcode = MTHCA_OPCODE_RDMA_READ; break; + case WR_COMPARE_SWAP: opcode = MTHCA_OPCODE_ATOMIC_CS; break; + case WR_FETCH_ADD: opcode = MTHCA_OPCODE_ATOMIC_FA; break; + default: opcode = MTHCA_OPCODE_INVALID;break; + } + return opcode; +} + + +static void dump_wqe(uint32_t print_lvl, uint32_t *wqe_ptr , struct mthca_qp *qp_ptr) +{ + net32_t *wqe = wqe_ptr; + + (void) wqe; /* avoid warning if mthca_dbg compiled away... */ + UVP_PRINT(print_lvl,UVP_DBG_QP,("WQE contents QPN 0x%06x \n",qp_ptr->ibv_qp.qp_num)); + UVP_PRINT(print_lvl,UVP_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",0 + , cl_ntoh32(wqe[0]), cl_ntoh32(wqe[1]), cl_ntoh32(wqe[2]), cl_ntoh32(wqe[3]))); + UVP_PRINT(print_lvl,UVP_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",4 + , cl_ntoh32(wqe[4]), cl_ntoh32(wqe[5]), cl_ntoh32(wqe[6]), cl_ntoh32(wqe[7]))); + UVP_PRINT(print_lvl,UVP_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",8 + , cl_ntoh32(wqe[8]), cl_ntoh32(wqe[9]), cl_ntoh32(wqe[10]), cl_ntoh32(wqe[11]))); + UVP_PRINT(print_lvl,UVP_DBG_QP,("WQE contents [%02x] %08x %08x %08x %08x \n",12 + , cl_ntoh32(wqe[12]), cl_ntoh32(wqe[13]), cl_ntoh32(wqe[14]), cl_ntoh32(wqe[15]))); + +} +static void *get_recv_wqe(struct mthca_qp *qp, int n) +{ + return qp->buf + (n << qp->rq.wqe_shift); +} + +static void *get_send_wqe(struct mthca_qp *qp, int n) +{ + void *wqe_addr = qp->buf + qp->send_wqe_offset + (n << qp->sq.wqe_shift); + UVP_PRINT(TRACE_LEVEL_INFORMATION,UVP_DBG_QP, + ("wqe %p, qp_buf %p, offset %#x, index %d, shift %d \n", + wqe_addr, qp->buf, qp->send_wqe_offset, n, + qp->sq.wqe_shift)); + + return wqe_addr; +} + +void mthca_init_qp_indices(struct mthca_qp *qp) +{ + qp->sq.next_ind = 0; + qp->sq.last_comp = qp->sq.max - 1; + qp->sq.head = 0; + qp->sq.tail = 0; + qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); + + qp->rq.next_ind = 0; + qp->rq.last_comp = qp->rq.max - 1; + qp->rq.head = 0; + qp->rq.tail = 0; + qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); +} + +static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq, struct mthca_cq *cq) +{ + unsigned cur; + + cur = wq->head - wq->tail; + if ((int)(cur + nreq) < wq->max) + return 0; + + cl_spinlock_acquire(&cq->lock); + cur = wq->head - wq->tail; + cl_spinlock_release(&cq->lock); + + return (int)(cur + nreq) >= wq->max; +} + + +int mthca_tavor_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr) +{ + struct mthca_qp *qp = to_mqp(ibqp); + uint8_t *wqe; + uint8_t *prev_wqe; + int ret = 0; + int nreq; + int i; + int size; + int size0 = 0; + uint32_t f0 = unlikely(wr->send_opt & IB_SEND_OPT_FENCE) ? MTHCA_SEND_DOORBELL_FENCE : 0; + int ind; + int op0 = 0; + enum ib_wr_opcode opcode; + + UVP_ENTER(UVP_DBG_QP); + cl_spinlock_acquire(&qp->sq.lock); + + /* XXX check that state is OK to post send */ + + ind = qp->sq.next_ind; + + if(ibqp->state == IBV_QPS_RESET) { + ret = -EBUSY; + if (bad_wr) + *bad_wr = wr; + goto err_busy; + } + + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + + if (mthca_wq_overflow(&qp->sq, nreq, to_mcq(qp->ibv_qp.send_cq))) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", ibqp->qp_num, + qp->sq.head, qp->sq.tail, + qp->sq.max, nreq)); + ret = -ENOMEM; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + wqe = get_send_wqe(qp, ind); + prev_wqe = qp->sq.last; + qp->sq.last = wqe; + opcode = conv_ibal_wr_opcode(wr); + if (opcode == MTHCA_OPCODE_INVALID) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x opcode invalid\n",ibqp->qp_num)); + ret = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + ((struct mthca_next_seg *) wqe)->flags = + ((wr->send_opt & IB_SEND_OPT_SIGNALED) ? + cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) | + ((wr->send_opt & IB_SEND_OPT_SOLICITED) ? + cl_hton32(MTHCA_NEXT_SOLICIT) : 0) | + cl_hton32(1); + if (opcode == MTHCA_OPCODE_SEND_IMM|| + opcode == MTHCA_OPCODE_RDMA_WRITE_IMM) + ((struct mthca_next_seg *) wqe)->imm = wr->immediate_data; + + wqe += sizeof (struct mthca_next_seg); + size = sizeof (struct mthca_next_seg) / 16; + + + switch (ibqp->qp_type) { + case IB_QPT_RELIABLE_CONN: + switch (opcode) { + case MTHCA_OPCODE_ATOMIC_CS: + case MTHCA_OPCODE_ATOMIC_FA: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + + wqe += sizeof (struct mthca_raddr_seg); + + if (opcode == MTHCA_OPCODE_ATOMIC_CS) { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cl_hton64(wr->remote_ops.atomic2); + ((struct mthca_atomic_seg *) wqe)->compare = + cl_hton64(wr->remote_ops.atomic1); + } else { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cl_hton64(wr->remote_ops.atomic1); + ((struct mthca_atomic_seg *) wqe)->compare = 0; + } + + wqe += sizeof (struct mthca_atomic_seg); + size += (sizeof (struct mthca_raddr_seg) + + sizeof (struct mthca_atomic_seg)) / 16; + break; + + case MTHCA_OPCODE_RDMA_WRITE: + case MTHCA_OPCODE_RDMA_WRITE_IMM: + case MTHCA_OPCODE_RDMA_READ: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case IB_QPT_UNRELIABLE_CONN: + switch (opcode) { + case MTHCA_OPCODE_RDMA_WRITE: + case MTHCA_OPCODE_RDMA_WRITE_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case IB_QPT_UNRELIABLE_DGRM: + { + struct mthca_ah *ah = ((struct mthca_ah *)wr->dgrm.ud.h_av); + ((struct mthca_tavor_ud_seg *) wqe)->lkey = + cl_hton32(ah->key); + ((struct mthca_tavor_ud_seg *) wqe)->av_addr = + cl_hton64((uint64_t)ah->av); + ((struct mthca_tavor_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp; + ((struct mthca_tavor_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey; + + wqe += sizeof (struct mthca_tavor_ud_seg); + size += sizeof (struct mthca_tavor_ud_seg) / 16; + break; + } + + default: + break; + } + + if ((int)(int)wr->num_ds > qp->sq.max_gs) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x too many gathers\n",ibqp->qp_num)); + ret = -ERANGE; + if (bad_wr) + *bad_wr = wr; + goto out; + } +//TODO sleybo: + if (wr->send_opt & IB_SEND_OPT_INLINE) { + if (wr->num_ds) { + struct mthca_inline_seg *seg = (struct mthca_inline_seg *)wqe; + uint32_t s = 0; + + wqe += sizeof *seg; + for (i = 0; i < (int)wr->num_ds; ++i) { + struct _ib_local_ds *sge = &wr->ds_array[i]; + + s += sge->length; + + if (s > (uint32_t)qp->max_inline_data) { + ret = -1; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + memcpy(wqe, (void *) (ULONG_PTR) sge->vaddr, + sge->length); + wqe += sge->length; + } + + seg->byte_count = cl_hton32(MTHCA_INLINE_SEG | s); + size += align(s + sizeof *seg, 16) / 16; + } + } else { + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cl_hton64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + } + + qp->wrid[ind + qp->rq.max] = wr->wr_id; + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cl_hton32(((ind << qp->sq.wqe_shift) + + qp->send_wqe_offset) |opcode); + + wmb(); + + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cl_hton32((size0 ? 0 : MTHCA_NEXT_DBD) | size | + ((wr->send_opt& IB_SEND_OPT_FENCE) ? + MTHCA_NEXT_FENCE : 0)); + + if (!size0) { + size0 = size; + op0 = opcode; + } + + dump_wqe( TRACE_LEVEL_VERBOSE, (uint32_t*)qp->sq.last,qp); + + ++ind; + if (unlikely(ind >= qp->sq.max)) + ind -= qp->sq.max; + + } + +out: + if (likely(nreq)) { + uint32_t doorbell[2]; + + doorbell[0] = cl_hton32(((qp->sq.next_ind << qp->sq.wqe_shift) + + qp->send_wqe_offset) | f0 | op0); + doorbell[1] = cl_hton32((ibqp->qp_num << 8) | size0); + + wmb(); + + mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_SEND_DOORBELL); + } + + qp->sq.next_ind = ind; + qp->sq.head += nreq; + +err_busy: + cl_spinlock_release(&qp->sq.lock); + + UVP_EXIT(UVP_DBG_QP); + return ret; +} + + +int mthca_tavor_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr) +{ + struct mthca_qp *qp = to_mqp(ibqp); + uint32_t doorbell[2]; + int ret = 0; + int nreq; + int i; + int size; + int size0 = 0; + int ind; + uint8_t *wqe; + uint8_t *prev_wqe; + + UVP_ENTER(UVP_DBG_QP); + + cl_spinlock_acquire(&qp->rq.lock); + + /* XXX check that state is OK to post receive */ + + ind = qp->rq.next_ind; + if(ibqp->state == IBV_QPS_RESET) { + ret = -EBUSY; + if (bad_wr) + *bad_wr = wr; + goto err_busy; + } + + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) { + nreq = 0; + + doorbell[0] = cl_hton32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); + doorbell[1] = cl_hton32(ibqp->qp_num << 8); //TODO sleybo: add qpn to qp struct + + /* + * Make sure that descriptors are written + * before doorbell is rung. + */ + mb(); + + mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_RECV_DOORBELL); + + qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB; + size0 = 0; + } + + if (mthca_wq_overflow(&qp->rq, nreq, to_mcq(qp->ibv_qp.recv_cq))) { + UVP_PRINT(TRACE_LEVEL_ERROR,UVP_DBG_QP,("RQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", ibqp->qp_num, + qp->rq.head, qp->rq.tail, + qp->rq.max, nreq)); + ret = -ENOMEM; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + wqe = get_recv_wqe(qp, ind); + prev_wqe = qp->rq.last; + qp->rq.last = wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = + cl_hton32(MTHCA_NEXT_DBD); + ((struct mthca_next_seg *) wqe)->flags = + cl_hton32(MTHCA_NEXT_CQ_UPDATE); + + wqe += sizeof (struct mthca_next_seg); + size = sizeof (struct mthca_next_seg) / 16; + + if (unlikely((int)wr->num_ds > qp->rq.max_gs)) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("RQ %06x too many gathers\n",ibqp->qp_num)); + ret = -ERANGE; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cl_hton64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } + + qp->wrid[ind] = wr->wr_id; + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cl_hton32((ind << qp->rq.wqe_shift) | 1); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cl_hton32(MTHCA_NEXT_DBD | size); + + if (!size0) + size0 = size; + + ++ind; + if (unlikely(ind >= qp->rq.max)) + ind -= qp->rq.max; + } + +out: + if (likely(nreq)) { + doorbell[0] = cl_hton32((qp->rq.next_ind << qp->rq.wqe_shift) | size0); + doorbell[1] = cl_hton32((ibqp->qp_num << 8) | (nreq & 255)); + + /* + * Make sure that descriptors are written before + * doorbell is rung. + */ + mb(); + + mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_RECV_DOORBELL); + } + + qp->rq.next_ind = ind; + qp->rq.head += nreq; + +err_busy: + cl_spinlock_release(&qp->rq.lock); + UVP_EXIT(UVP_DBG_QP); + return ret; +} + +int mthca_arbel_post_send(struct ibv_qp *ibqp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr) +{ + struct mthca_qp *qp = to_mqp(ibqp); + uint32_t doorbell[2]; + uint8_t *wqe; + uint8_t *prev_wqe; + int ret = 0; + int nreq; + int i; + int size; + int size0 = 0; + uint32_t f0 = unlikely(wr->send_opt & IB_SEND_OPT_FENCE) ? MTHCA_SEND_DOORBELL_FENCE : 0; + int ind; + uint8_t op0 = 0; + enum ib_wr_opcode opcode; + + UVP_ENTER(UVP_DBG_QP); + + cl_spinlock_acquire(&qp->sq.lock); + + /* XXX check that state is OK to post send */ + + ind = qp->sq.head & (qp->sq.max - 1); + if(ibqp->state == IBV_QPS_RESET) { + ret = -EBUSY; + if (bad_wr) + *bad_wr = wr; + goto err_busy; + } + + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) { + nreq = 0; + + doorbell[0] = cl_hton32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) | + ((qp->sq.head & 0xffff) << 8) | f0 | op0); + doorbell[1] = cl_hton32((ibqp->qp_num << 8) | size0); + qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB; + size0 = 0; + f0 = unlikely(wr->send_opt & IB_SEND_OPT_FENCE) ? MTHCA_SEND_DOORBELL_FENCE : 0; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + *qp->sq.db = cl_hton32(qp->sq.head & 0xffff); + + /* + * Make sure doorbell record is written before we + * write MMIO send doorbell. + */ + wmb(); + mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_SEND_DOORBELL); + + } + + if (mthca_wq_overflow(&qp->sq, nreq, to_mcq(qp->ibv_qp.send_cq))) { + UVP_PRINT(TRACE_LEVEL_ERROR,UVP_DBG_QP,("SQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", ibqp->qp_num, + qp->sq.head, qp->sq.tail, + qp->sq.max, nreq)); + ret = -ENOMEM; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + wqe = get_send_wqe(qp, ind); + prev_wqe = qp->sq.last; + qp->sq.last = wqe; + opcode = conv_ibal_wr_opcode(wr); + + ((struct mthca_next_seg *) wqe)->flags = + ((wr->send_opt & IB_SEND_OPT_SIGNALED) ? + cl_hton32(MTHCA_NEXT_CQ_UPDATE) : 0) | + ((wr->send_opt & IB_SEND_OPT_SOLICITED) ? + cl_hton32(MTHCA_NEXT_SOLICIT) : 0) | + cl_hton32(1); + if (opcode == MTHCA_OPCODE_SEND_IMM|| + opcode == MTHCA_OPCODE_RDMA_WRITE_IMM) + ((struct mthca_next_seg *) wqe)->imm = wr->immediate_data; + + wqe += sizeof (struct mthca_next_seg); + size = sizeof (struct mthca_next_seg) / 16; + + switch (ibqp->qp_type) { + case IB_QPT_RELIABLE_CONN: + switch (opcode) { + case MTHCA_OPCODE_ATOMIC_CS: + case MTHCA_OPCODE_ATOMIC_FA: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + + wqe += sizeof (struct mthca_raddr_seg); + + if (opcode == MTHCA_OPCODE_ATOMIC_CS) { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cl_hton64(wr->remote_ops.atomic2); + ((struct mthca_atomic_seg *) wqe)->compare = + cl_hton64(wr->remote_ops.atomic1); + } else { + ((struct mthca_atomic_seg *) wqe)->swap_add = + cl_hton64(wr->remote_ops.atomic1); + ((struct mthca_atomic_seg *) wqe)->compare = 0; + } + + wqe += sizeof (struct mthca_atomic_seg); + size += (sizeof (struct mthca_raddr_seg) + + sizeof (struct mthca_atomic_seg)) / 16; + break; + + case MTHCA_OPCODE_RDMA_READ: + case MTHCA_OPCODE_RDMA_WRITE: + case MTHCA_OPCODE_RDMA_WRITE_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case IB_QPT_UNRELIABLE_CONN: + switch (opcode) { + case MTHCA_OPCODE_RDMA_WRITE: + case MTHCA_OPCODE_RDMA_WRITE_IMM: + ((struct mthca_raddr_seg *) wqe)->raddr = + cl_hton64(wr->remote_ops.vaddr); + ((struct mthca_raddr_seg *) wqe)->rkey = + wr->remote_ops.rkey; + ((struct mthca_raddr_seg *) wqe)->reserved = 0; + wqe += sizeof (struct mthca_raddr_seg); + size += sizeof (struct mthca_raddr_seg) / 16; + break; + + default: + /* No extra segments required for sends */ + break; + } + + break; + + case IB_QPT_UNRELIABLE_DGRM: + { + struct mthca_ah *ah = ((struct mthca_ah *)wr->dgrm.ud.h_av); + memcpy(((struct mthca_arbel_ud_seg *) wqe)->av, + ah->av, sizeof ( struct mthca_av)); + ((struct mthca_arbel_ud_seg *) wqe)->dqpn = wr->dgrm.ud.remote_qp; + ((struct mthca_arbel_ud_seg *) wqe)->qkey = wr->dgrm.ud.remote_qkey; + + + wqe += sizeof (struct mthca_arbel_ud_seg); + size += sizeof (struct mthca_arbel_ud_seg) / 16; + break; + } + + default: + break; + } + + if ((int)wr->num_ds > qp->sq.max_gs) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x full too many gathers\n",ibqp->qp_num)); + ret = -ERANGE; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + if (wr->send_opt & IB_SEND_OPT_INLINE) { + if (wr->num_ds) { + struct mthca_inline_seg *seg = (struct mthca_inline_seg *)wqe; + uint32_t s = 0; + + wqe += sizeof *seg; + for (i = 0; i < (int)wr->num_ds; ++i) { + struct _ib_local_ds *sge = &wr->ds_array[i]; + + s += sge->length; + + if (s > (uint32_t)qp->max_inline_data) { + ret = -1; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + memcpy(wqe, (void *) (uintptr_t) sge->vaddr, + sge->length); + wqe += sge->length; + } + + seg->byte_count = cl_hton32(MTHCA_INLINE_SEG | s); + size += align(s + sizeof *seg, 16) / 16; + } + } else { + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cl_hton64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + size += sizeof (struct mthca_data_seg) / 16; + } +//TODO do this also in kernel +// size += wr->num_ds * (sizeof *seg / 16); + } + + qp->wrid[ind + qp->rq.max] = wr->wr_id; + + if (opcode == MTHCA_OPCODE_INVALID) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SQ %06x opcode invalid\n",ibqp->qp_num)); + ret = -EINVAL; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cl_hton32(((ind << qp->sq.wqe_shift) + + qp->send_wqe_offset) | + opcode); + wmb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cl_hton32(MTHCA_NEXT_DBD | size | + ((wr->send_opt & IB_SEND_OPT_FENCE) ? + MTHCA_NEXT_FENCE : 0)); + + if (!size0) { + size0 = size; + op0 = opcode; + } + + ++ind; + if (unlikely(ind >= qp->sq.max)) + ind -= qp->sq.max; + } + +out: + if (likely(nreq)) { + doorbell[0] = cl_hton32((nreq << 24) | + ((qp->sq.head & 0xffff) << 8) | f0 | op0); + doorbell[1] = cl_hton32((ibqp->qp_num << 8) | size0); + + qp->sq.head += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + wmb(); + *qp->sq.db = cl_hton32(qp->sq.head & 0xffff); + + /* + * Make sure doorbell record is written before we + * write MMIO send doorbell. + */ + wmb(); + mthca_write64(doorbell, to_mctx(ibqp->pd->context), MTHCA_SEND_DOORBELL); + } + +err_busy: + cl_spinlock_release(&qp->sq.lock); + + UVP_EXIT(UVP_DBG_QP); + + return ret; +} + +int mthca_arbel_post_recv(struct ibv_qp *ibqp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr) +{ + struct mthca_qp *qp = to_mqp(ibqp); + int ret = 0; + int nreq; + int ind; + int i; + uint8_t *wqe; + + UVP_ENTER(UVP_DBG_QP); + + cl_spinlock_acquire(&qp->rq.lock); + + /* XXX check that state is OK to post receive */ + + ind = qp->rq.head & (qp->rq.max - 1); + if(ibqp->state == IBV_QPS_RESET) { + ret = -EBUSY; + if (bad_wr) + *bad_wr = wr; + goto err_busy; + } + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + if (mthca_wq_overflow(&qp->rq, nreq, to_mcq(qp->ibv_qp.recv_cq))) {//TODO sleybo: check the cq + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("RQ %06x full (%u head, %u tail," + " %d max, %d nreq)\n", ibqp->qp_num, + qp->rq.head, qp->rq.tail, + qp->rq.max, nreq)); + ret = -ENOMEM; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + wqe = get_recv_wqe(qp, ind); + + ((struct mthca_next_seg *) wqe)->flags = 0; + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely((int)wr->num_ds > qp->rq.max_gs)) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("RQ %06x full too many scatter\n",ibqp->qp_num)); + ret = -ERANGE; + if (bad_wr) + *bad_wr = wr; + goto out; + } + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + cl_hton64(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < qp->rq.max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + qp->wrid[ind] = wr->wr_id; + + ++ind; + if (unlikely(ind >= qp->rq.max)) + ind -= qp->rq.max; + } +out: + if (likely(nreq)) { + qp->rq.head += nreq; + + /* + * Make sure that descriptors are written before + * doorbell record. + */ + mb(); + *qp->rq.db = cl_hton32(qp->rq.head & 0xffff); + } + +err_busy: + cl_spinlock_release(&qp->rq.lock); + + UVP_EXIT(UVP_DBG_QP); + + return ret; +} + +int mthca_alloc_qp_buf(struct ibv_pd *pd, struct ibv_qp_cap *cap, + ib_qp_type_t type, struct mthca_qp *qp) +{ + int size; + int max_sq_sge; + + qp->rq.max_gs = cap->max_recv_sge; + qp->sq.max_gs = cap->max_send_sge; + max_sq_sge = align(cap->max_inline_data + sizeof (struct mthca_inline_seg), + sizeof (struct mthca_data_seg)) / sizeof (struct mthca_data_seg); + if (max_sq_sge < (int)cap->max_send_sge) + max_sq_sge = cap->max_send_sge; + + qp->wrid = cl_malloc((qp->rq.max + qp->sq.max) * sizeof (uint64_t)); + if (!qp->wrid) + return -1; + + size = sizeof (struct mthca_next_seg) + + qp->rq.max_gs * sizeof (struct mthca_data_seg); + + for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size; + qp->rq.wqe_shift++) + ; /* nothing */ + + size = max_sq_sge * sizeof (struct mthca_data_seg); + switch (type) { + case IB_QPT_UNRELIABLE_DGRM: + size += mthca_is_memfree(pd->context) ? + sizeof (struct mthca_arbel_ud_seg) : + sizeof (struct mthca_tavor_ud_seg); + break; + + case IB_QPT_UNRELIABLE_CONN: + size += sizeof (struct mthca_raddr_seg); + break; + + case IB_QPT_RELIABLE_CONN: + size += sizeof (struct mthca_raddr_seg); + /* + * An atomic op will require an atomic segment, a + * remote address segment and one scatter entry. + */ + if (size < (sizeof (struct mthca_atomic_seg) + + sizeof (struct mthca_raddr_seg) + + sizeof (struct mthca_data_seg))) + size = (sizeof (struct mthca_atomic_seg) + + sizeof (struct mthca_raddr_seg) + + sizeof (struct mthca_data_seg)); + break; + + default: + break; + } + + /* Make sure that we have enough space for a bind request */ + if (size < sizeof (struct mthca_bind_seg)) + size = sizeof (struct mthca_bind_seg); + + size += sizeof (struct mthca_next_seg); + + for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size; + qp->sq.wqe_shift++) + ; /* nothing */ + + qp->send_wqe_offset = align(qp->rq.max << qp->rq.wqe_shift, + 1 << qp->sq.wqe_shift); + + qp->buf_size = qp->send_wqe_offset + (qp->sq.max << qp->sq.wqe_shift); + + if (posix_memalign(&qp->buf, g_page_size, + align(qp->buf_size, g_page_size))) { + cl_free(qp->wrid); + return -1; + } + + memset(qp->buf, 0, qp->buf_size); + + if (mthca_is_memfree(pd->context)) { + struct mthca_next_seg *next; + struct mthca_data_seg *scatter; + int i; + uint32_t sz; + + sz = cl_hton32((sizeof (struct mthca_next_seg) + + qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16); + + for (i = 0; i < qp->rq.max; ++i) { + next = get_recv_wqe(qp, i); + next->nda_op = cl_hton32(((i + 1) & (qp->rq.max - 1)) << + qp->rq.wqe_shift); + next->ee_nds = sz; + + for (scatter = (void *) (next + 1); + (void *) scatter < (void *) ((char *)next + (1 << qp->rq.wqe_shift)); + ++scatter) + scatter->lkey = cl_hton32(MTHCA_INVAL_LKEY); + } + + for (i = 0; i < qp->sq.max; ++i) { + next = get_send_wqe(qp, i); + next->nda_op = cl_hton32((((i + 1) & (qp->sq.max - 1)) << + qp->sq.wqe_shift) + + qp->send_wqe_offset); + } + } + + qp->sq.last = get_send_wqe(qp, qp->sq.max - 1); + qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1); + + return 0; +} + +struct mthca_qp *mthca_find_qp(struct mthca_context *ctx, uint32_t qpn) +{ + int tind = (qpn & (ctx->num_qps - 1)) >> ctx->qp_table_shift; + + if (ctx->qp_table[tind].refcnt) + return ctx->qp_table[tind].table[qpn & ctx->qp_table_mask]; + else + return NULL; +} + +int mthca_store_qp(struct mthca_context *ctx, uint32_t qpn, struct mthca_qp *qp) +{ + int tind = (qpn & (ctx->num_qps - 1)) >> ctx->qp_table_shift; + int ret = 0; + + WaitForSingleObject( ctx->qp_table_mutex, INFINITE ); + + if (!ctx->qp_table[tind].refcnt) { + ctx->qp_table[tind].table = cl_malloc( + (ctx->qp_table_mask + 1) * sizeof (struct mthca_qp *)); + if (!ctx->qp_table[tind].table) { + ret = -1; + goto out; + } + } + ++ctx->qp_table[tind].refcnt; + ctx->qp_table[tind].table[qpn & ctx->qp_table_mask] = qp; + +out: + ReleaseMutex( ctx->qp_table_mutex ); + return ret; +} + +void mthca_clear_qp(struct mthca_context *ctx, uint32_t qpn) +{ + int tind = (qpn & (ctx->num_qps - 1)) >> ctx->qp_table_shift; + + WaitForSingleObject( ctx->qp_table_mutex, INFINITE ); + + if (!--ctx->qp_table[tind].refcnt) + cl_free(ctx->qp_table[tind].table); + else + ctx->qp_table[tind].table[qpn & ctx->qp_table_mask] = NULL; + + ReleaseMutex( ctx->qp_table_mutex ); +} + +int mthca_free_err_wqe(struct mthca_qp *qp, int is_send, + int index, int *dbd, uint32_t *new_wqe) +{ + struct mthca_next_seg *next; + + /* + * For SRQs, all WQEs generate a CQE, so we're always at the + * end of the doorbell chain. + */ + if (qp->ibv_qp.srq) { + *new_wqe = 0; + return 0; + } + + if (is_send) + next = get_send_wqe(qp, index); + else + next = get_recv_wqe(qp, index); + + *dbd = !!(next->ee_nds & cl_hton32(MTHCA_NEXT_DBD)); + if (next->ee_nds & cl_hton32(0x3f)) + *new_wqe = (next->nda_op & cl_hton32(~0x3f)) | + (next->ee_nds & cl_hton32(0x3f)); + else + *new_wqe = 0; + + return 0; +} + diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_srq.c b/branches/Ndi/hw/mthca/user/mlnx_uvp_srq.c new file mode 100644 index 00000000..e29bbd3c --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_srq.c @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "mlnx_uvp.h" +#include "mlnx_uvp_doorbell.h" +#include "mthca_wqe.h" + +#if defined(EVENT_TRACING) +#include "mlnx_uvp_srq.tmh" +#endif + +static void *get_wqe(struct mthca_srq *srq, int n) +{ + return (uint8_t*)srq->buf + (n << srq->wqe_shift); +} + +/* + * Return a pointer to the location within a WQE that we're using as a + * link when the WQE is in the free list. We use the imm field at an + * offset of 12 bytes because in the Tavor case, posting a WQE may + * overwrite the next segment of the previous WQE, but a receive WQE + * will never touch the imm field. This avoids corrupting our free + * list if the previous WQE has already completed and been put on the + * free list when we post the next WQE. + */ +static inline int *wqe_to_link(void *wqe) +{ + return (int *) ((uint8_t*)wqe + 12); +} + +void mthca_free_srq_wqe(struct mthca_srq *srq, int ind) +{ + cl_spinlock_acquire(&srq->lock); + + if (srq->first_free >= 0) + *wqe_to_link(get_wqe(srq, srq->last_free)) = ind; + else + srq->first_free = ind; + + *wqe_to_link(get_wqe(srq, ind)) = -1; + srq->last_free = ind; + + cl_spinlock_release(&srq->lock); +} + +int mthca_tavor_post_srq_recv(struct ibv_srq *ibsrq, + struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr) +{ + struct mthca_srq *srq = to_msrq(ibsrq); + uint32_t doorbell[2]; + int err = 0; + int first_ind; + int ind; + int next_ind; + int nreq; + int i; + uint8_t *wqe; + uint8_t *prev_wqe; + + cl_spinlock_acquire(&srq->lock); + + first_ind = srq->first_free; + + for (nreq = 0; wr; wr = wr->p_next) { + ind = srq->first_free; + + if (ind < 0) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SRQ %06x full\n", srq->srqn)); + err = -1; + *bad_wr = wr; + break; + } + + wqe = get_wqe(srq, ind); + next_ind = *wqe_to_link(wqe); + + if (next_ind < 0) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SRQ %06x full\n", srq->srqn)); + err = -ENOMEM; + *bad_wr = wr; + break; + } + + prev_wqe = srq->last; + srq->last = wqe; + + ((struct mthca_next_seg *) wqe)->nda_op = 0; + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely((int)wr->num_ds > srq->max_gs)) { + err = -1; + *bad_wr = wr; + srq->last = prev_wqe; + break; + } + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + htonll(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + ((struct mthca_next_seg *) prev_wqe)->nda_op = + cl_hton32((ind << srq->wqe_shift) | 1); + mb(); + ((struct mthca_next_seg *) prev_wqe)->ee_nds = + cl_hton32(MTHCA_NEXT_DBD); + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + + if (++nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB) { + nreq = 0; + + doorbell[0] = cl_hton32(first_ind << srq->wqe_shift); + doorbell[1] = cl_hton32(srq->srqn << 8); + + /* + * Make sure that descriptors are written + * before doorbell is rung. + */ + wmb(); + + mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL); + + first_ind = srq->first_free; + } + } + + if (nreq) { + doorbell[0] = cl_hton32(first_ind << srq->wqe_shift); + doorbell[1] = cl_hton32((srq->srqn << 8) | nreq); + + /* + * Make sure that descriptors are written before + * doorbell is rung. + */ + wmb(); + + mthca_write64(doorbell, to_mctx(ibsrq->context), MTHCA_RECV_DOORBELL); + } + + cl_spinlock_release(&srq->lock); + return err; +} + +int mthca_arbel_post_srq_recv(struct ibv_srq *ibsrq, + struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr) +{ + struct mthca_srq *srq = to_msrq(ibsrq); + int err = 0; + int ind; + int next_ind; + int nreq; + int i; + uint8_t *wqe; + + cl_spinlock_acquire(&srq->lock); + + for (nreq = 0; wr; ++nreq, wr = wr->p_next) { + ind = srq->first_free; + + if (ind < 0) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("SRQ %06x full\n", srq->srqn)); + err = -ENOMEM; + *bad_wr = wr; + break; + } + + wqe = get_wqe(srq, ind); + next_ind = *wqe_to_link(wqe); + + if (next_ind < 0) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_LOW ,("SRQ %06x full\n", srq->srqn)); + err = -ENOMEM; + *bad_wr = wr; + break; + } + + ((struct mthca_next_seg *) wqe)->nda_op = + cl_hton32((next_ind << srq->wqe_shift) | 1); + ((struct mthca_next_seg *) wqe)->ee_nds = 0; + /* flags field will always remain 0 */ + + wqe += sizeof (struct mthca_next_seg); + + if (unlikely((int)wr->num_ds > srq->max_gs)) { + err = -1; + *bad_wr = wr; + break; + } + + for (i = 0; i < (int)wr->num_ds; ++i) { + ((struct mthca_data_seg *) wqe)->byte_count = + cl_hton32(wr->ds_array[i].length); + ((struct mthca_data_seg *) wqe)->lkey = + cl_hton32(wr->ds_array[i].lkey); + ((struct mthca_data_seg *) wqe)->addr = + htonll(wr->ds_array[i].vaddr); + wqe += sizeof (struct mthca_data_seg); + } + + if (i < srq->max_gs) { + ((struct mthca_data_seg *) wqe)->byte_count = 0; + ((struct mthca_data_seg *) wqe)->lkey = cl_hton32(MTHCA_INVAL_LKEY); + ((struct mthca_data_seg *) wqe)->addr = 0; + } + + srq->wrid[ind] = wr->wr_id; + srq->first_free = next_ind; + } + + if (likely(nreq)) { + srq->counter += (uint16_t)nreq; + + /* + * Make sure that descriptors are written before + * we write doorbell record. + */ + wmb(); + *srq->db = cl_hton32(srq->counter); + } + + cl_spinlock_release(&srq->lock); + return err; +} + +int mthca_alloc_srq_buf(struct ibv_pd *pd, struct ibv_srq_attr *attr, + struct mthca_srq *srq) +{ + struct mthca_data_seg *scatter; + uint8_t *wqe; + int size; + int i; + + srq->wrid = cl_malloc(srq->max * sizeof (uint64_t)); + if (!srq->wrid) + return -1; + + size = sizeof (struct mthca_next_seg) + + srq->max_gs * sizeof (struct mthca_data_seg); + + for (srq->wqe_shift = 6; 1 << srq->wqe_shift < size; ++srq->wqe_shift) + ; /* nothing */ + + srq->buf_size = srq->max << srq->wqe_shift; + + if (posix_memalign(&srq->buf, g_page_size, + align(srq->buf_size, g_page_size))) { + cl_free(srq->wrid); + return -1; + } + + cl_memclr(srq->buf, srq->buf_size); + + /* + * Now initialize the SRQ buffer so that all of the WQEs are + * linked into the list of free WQEs. In addition, set the + * scatter list L_Keys to the sentry value of 0x100. + */ + + for (i = 0; i < srq->max; ++i) { + wqe = get_wqe(srq, i); + + *wqe_to_link(wqe) = i < srq->max - 1 ? i + 1 : -1; + + for (scatter = (struct mthca_data_seg *)(wqe + sizeof (struct mthca_next_seg)); + (void *) scatter < (void*)(wqe + (1 << srq->wqe_shift)); + ++scatter) + scatter->lkey = cl_hton32(MTHCA_INVAL_LKEY); + } + + srq->first_free = 0; + srq->last_free = srq->max - 1; + srq->last = get_wqe(srq, srq->max - 1); + + return 0; +} diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.c b/branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.c new file mode 100644 index 00000000..c9468ee8 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.c @@ -0,0 +1,532 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include + +#include "mlnx_uvp.h" +#include "mx_abi.h" +#include "mthca_wqe.h" + + +#if defined(EVENT_TRACING) +#include "mlnx_uvp_verbs.tmh" +#endif + +struct ibv_pd *mthca_alloc_pd(struct ibv_context *context, struct ibv_alloc_pd_resp *resp) +{ + struct mthca_pd *pd; + + pd = cl_zalloc(sizeof *pd); + if (!pd) + goto err_malloc; + + if (!mthca_is_memfree(context)) { + pd->ah_list = NULL; + pd->ah_mutex = CreateMutex( NULL, FALSE, NULL ); + if (!pd->ah_mutex) + goto err_mutex; + } + + /* fill response fields */ + pd->ibv_pd.context = context; + pd->ibv_pd.handle = resp->pd_handle; + pd->pdn = resp->pdn; + + return &pd->ibv_pd; + +err_mutex: + cl_free(pd); +err_malloc: + return NULL; +} + +int mthca_free_pd(struct ibv_pd *ibv_pd) +{ + struct mthca_pd *pd = to_mpd(ibv_pd); + if (!mthca_is_memfree(ibv_pd->context)) { + struct mthca_ah_page *page, *next_page; + WaitForSingleObject( pd->ah_mutex, INFINITE ); + for (page = pd->ah_list; page; page = next_page) { + next_page = page->next; + #ifdef NOT_USE_VIRTUAL_ALLOC + cl_free(page->buf); + #else + VirtualFree( page->buf, 0, MEM_RELEASE); + #endif + cl_free(page); + } + ReleaseMutex( pd->ah_mutex ); + CloseHandle(pd->ah_mutex); + } + cl_free(pd); + return 0; +} + +/* allocate create_cq infrastructure and fill it's request parameters structure */ +struct ibv_cq *mthca_create_cq_pre(struct ibv_context *context, int *p_cqe, + struct ibv_create_cq *req) +{ + struct mthca_cq *cq; + int nent; + int ret; + + /* Sanity check CQ size before proceeding */ + if (*p_cqe > 131072) + goto exit; + + cq = cl_zalloc(sizeof *cq); + if (!cq) + goto exit; + + cl_spinlock_construct(&cq->lock); + if (cl_spinlock_init(&cq->lock)) + goto err; + + for (nent = 1; nent <= *p_cqe; nent <<= 1) + ; /* nothing */ + + if (posix_memalign(&cq->buf, g_page_size, + align(nent * MTHCA_CQ_ENTRY_SIZE, g_page_size))) + goto err_memalign; + + mthca_init_cq_buf(cq, nent); + + if (mthca_is_memfree(context)) { + cq->set_ci_db_index = mthca_alloc_db(to_mctx(context)->db_tab, + MTHCA_DB_TYPE_CQ_SET_CI, + &cq->set_ci_db); + if (cq->set_ci_db_index < 0) + goto err_unreg; + + cq->arm_db_index = mthca_alloc_db(to_mctx(context)->db_tab, + MTHCA_DB_TYPE_CQ_ARM, + &cq->arm_db); + if (cq->arm_db_index < 0) + goto err_set_db; + + cq->u_arm_db_index = mthca_alloc_db(to_mctx(context)->db_tab, + MTHCA_DB_TYPE_CQ_ARM, + &cq->p_u_arm_sn); + if (cq->u_arm_db_index < 0) + goto err_arm_db; + + *cq->p_u_arm_sn = 1; + + req->arm_db_page = db_align(cq->arm_db); + req->set_db_page = db_align(cq->set_ci_db); + req->u_arm_db_page = (uint64_t)(ULONG_PTR)cq->p_u_arm_sn; + req->arm_db_index = cq->arm_db_index; + req->set_db_index = cq->set_ci_db_index; + req->u_arm_db_index = cq->u_arm_db_index; + } + + req->mr.start = (uint64_t)(ULONG_PTR)cq->buf; + req->mr.length = nent * MTHCA_CQ_ENTRY_SIZE; + req->mr.hca_va = 0; + req->mr.pd_handle = to_mctx(context)->pd->handle; + req->mr.pdn = to_mpd(to_mctx(context)->pd)->pdn; + req->mr.access_flags = MTHCA_ACCESS_LOCAL_WRITE; + req->user_handle = (uint64_t)(ULONG_PTR)cq; +#if 1 + req->cqe = *p_cqe; + *p_cqe = nent-1; +// *p_cqe = *p_cqe; // return the same value +// cq->ibv_cq.cqe = nent -1; +#else + req->cqe = nent; + *p_cqe = *p_cqe; // return the same value +#endif + return &cq->ibv_cq; + +err_arm_db: + if (mthca_is_memfree(context)) + mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI, + cq->arm_db_index); + +err_set_db: + if (mthca_is_memfree(context)) + mthca_free_db(to_mctx(context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI, + cq->set_ci_db_index); + +err_unreg: + cl_free(cq->buf); + +err_memalign: + cl_spinlock_destroy(&cq->lock); + +err: + cl_free(cq); + +exit: + return ERR_PTR(-ENOMEM); +} + +struct ibv_cq *mthca_create_cq_post(struct ibv_context *context, + struct ibv_create_cq_resp *resp) +{ + struct mthca_cq *cq; + int ret; + + cq = (struct mthca_cq *)(ULONG_PTR)resp->user_handle; + + cq->cqn = resp->cqn; + cq->mr.handle = resp->mr.mr_handle; + cq->mr.lkey = resp->mr.lkey; + cq->mr.rkey = resp->mr.rkey; + cq->mr.pd = to_mctx(context)->pd; + cq->mr.context = context; + cq->ibv_cq.cqe = resp->cqe; + cq->ibv_cq.handle = resp->cq_handle; + cq->ibv_cq.context = context; + + if (mthca_is_memfree(context)) { + mthca_set_db_qn(cq->set_ci_db, MTHCA_DB_TYPE_CQ_SET_CI, cq->cqn); + mthca_set_db_qn(cq->arm_db, MTHCA_DB_TYPE_CQ_ARM, cq->cqn); + } + + return &cq->ibv_cq; + +} + +int mthca_destroy_cq(struct ibv_cq *cq) +{ + int ret; + + if (mthca_is_memfree(cq->context)) { + mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI, + to_mcq(cq)->u_arm_db_index); + mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_SET_CI, + to_mcq(cq)->set_ci_db_index); + mthca_free_db(to_mctx(cq->context)->db_tab, MTHCA_DB_TYPE_CQ_ARM, + to_mcq(cq)->arm_db_index); + } + +#ifdef NOT_USE_VIRTUAL_ALLOC + cl_free(to_mcq(cq)->buf); +#else + VirtualFree( to_mcq(cq)->buf, 0, MEM_RELEASE); +#endif + + + cl_spinlock_destroy(&((struct mthca_cq *)cq)->lock); + cl_free(to_mcq(cq)); + + return 0; +} + +int align_queue_size(struct ibv_context *context, int size, int spare) +{ + int ret; + + /* + * If someone asks for a 0-sized queue, presumably they're not + * going to use it. So don't mess with their size. + */ + if (!size) + return 0; + + if (mthca_is_memfree(context)) { + for (ret = 1; ret < size + spare; ret <<= 1) + ; /* nothing */ + + return ret; + } else + return size + spare; +} + +struct ibv_qp *mthca_create_qp_pre(struct ibv_pd *pd, + struct ibv_qp_init_attr *attr, struct ibv_create_qp *req) +{ + struct mthca_qp *qp; + struct ibv_context *context = pd->context; + int ret = -ENOMEM; + + UVP_ENTER(UVP_DBG_QP); + /* Sanity check QP size before proceeding */ + if (attr->cap.max_send_wr > 65536 || + attr->cap.max_recv_wr > 65536 || + attr->cap.max_send_sge > 64 || + attr->cap.max_recv_sge > 64 || + attr->cap.max_inline_data > 1024) { + ret = -EINVAL; + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("sanity checks failed (%d)\n",ret)); + goto exit; + } + + qp = cl_zalloc(sizeof *qp); + if (!qp) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_malloc failed (%d)\n",ret)); + goto err_nomem; + } + + qp->sq.max = align_queue_size(context, attr->cap.max_send_wr, 0); + qp->rq.max = align_queue_size(context, attr->cap.max_recv_wr, 0); + + if (mthca_alloc_qp_buf(pd, &attr->cap, attr->qp_type, qp)) { + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("mthca_alloc_qp_buf failed (%d)\n",ret)); + goto err_nomem; + } + + mthca_init_qp_indices(qp); + + cl_spinlock_construct(&qp->sq.lock); + if (cl_spinlock_init(&qp->sq.lock)) { + ret = -EFAULT; + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed for sq (%d)\n",ret)); + goto err_spinlock_sq; + } + + cl_spinlock_construct(&qp->rq.lock); + if (cl_spinlock_init(&qp->rq.lock)) { + ret = -EFAULT; + UVP_PRINT(TRACE_LEVEL_ERROR ,UVP_DBG_QP ,("cl_spinlock_init failed for rq (%d)\n",ret)); + goto err_spinlock_rq; + } + + if (mthca_is_memfree(context)) { + qp->sq.db_index = mthca_alloc_db(to_mctx(context)->db_tab, + MTHCA_DB_TYPE_SQ, + &qp->sq.db); + if (qp->sq.db_index < 0) + goto err_sq_db; + + qp->rq.db_index = mthca_alloc_db(to_mctx(context)->db_tab, + MTHCA_DB_TYPE_RQ, + &qp->rq.db); + if (qp->rq.db_index < 0) + goto err_rq_db; + + req->sq_db_page = db_align(qp->sq.db); + req->rq_db_page = db_align(qp->rq.db); + req->sq_db_index = qp->sq.db_index; + req->rq_db_index = qp->rq.db_index; + } + + // fill the rest qp fields + qp->ibv_qp.pd = pd; + qp->ibv_qp.context= pd->context; + qp->ibv_qp.send_cq = attr->send_cq; + qp->ibv_qp.recv_cq = attr->recv_cq; + qp->ibv_qp.srq = attr->srq; + qp->ibv_qp.state = IBV_QPS_RESET; + qp->ibv_qp.qp_type = attr->qp_type; + + // fill the rest request fields + req->mr.start = (uint64_t)(ULONG_PTR)qp->buf; + req->mr.length = qp->buf_size; + req->mr.hca_va = 0; + req->mr.pd_handle = pd->handle; + req->mr.pdn = to_mpd(pd)->pdn; + req->mr.access_flags = 0; //local read + req->user_handle = (uint64_t)(ULONG_PTR)qp; + req->send_cq_handle = attr->send_cq->handle; + req->recv_cq_handle = attr->recv_cq->handle; + req->srq_handle = (attr->srq) ? attr->srq->handle : 0; + req->max_send_wr = attr->cap.max_send_wr; + req->max_recv_wr = attr->cap.max_recv_wr; + req->max_send_sge = attr->cap.max_send_sge; + req->max_recv_sge = attr->cap.max_recv_sge; + req->max_inline_data = attr->cap.max_inline_data; + req->sq_sig_all = (uint8_t)attr->sq_sig_all; + req->qp_type = attr->qp_type; + req->is_srq = !!attr->srq; + + + UVP_EXIT(UVP_DBG_QP); + return &qp->ibv_qp; + +err_rq_db: + if (mthca_is_memfree(context)) + mthca_free_db(to_mctx(context)->db_tab, + MTHCA_DB_TYPE_SQ, qp->sq.db_index); + +err_sq_db: + cl_spinlock_destroy(&qp->rq.lock); + +err_spinlock_rq: + cl_spinlock_destroy(&qp->sq.lock); + +err_spinlock_sq: + cl_free(qp->wrid); +#ifdef NOT_USE_VIRTUAL_ALLOC + cl_free(qp->buf); +#else + VirtualFree( qp->buf, 0, MEM_RELEASE); +#endif + +err_nomem: + cl_free(qp); + +exit: + + UVP_EXIT(UVP_DBG_QP); + return ERR_PTR(ret); +} + +struct ibv_qp *mthca_create_qp_post(struct ibv_pd *pd, + struct ibv_create_qp_resp *resp) +{ + struct mthca_qp *qp; + int ret; + UVP_ENTER(UVP_DBG_QP); + qp = (struct mthca_qp *)(ULONG_PTR)resp->user_handle; + + qp->ibv_qp.handle = resp->qp_handle; + qp->ibv_qp.qp_num = resp->qpn; + qp->sq.max = resp->max_send_wr; + qp->rq.max = resp->max_recv_wr; + qp->sq.max_gs = resp->max_send_sge; + qp->rq.max_gs = resp->max_recv_sge; + qp->max_inline_data = resp->max_inline_data; + qp->mr.handle = resp->mr.mr_handle; + qp->mr.lkey = resp->mr.lkey; + qp->mr.rkey = resp->mr.rkey; + qp->mr.pd = pd; + qp->mr.context = pd->context; + + if (mthca_is_memfree(pd->context)) { + mthca_set_db_qn(qp->sq.db, MTHCA_DB_TYPE_SQ, qp->ibv_qp.qp_num); + mthca_set_db_qn(qp->rq.db, MTHCA_DB_TYPE_RQ, qp->ibv_qp.qp_num); + } + + ret = mthca_store_qp(to_mctx(pd->context), qp->ibv_qp.qp_num, qp); + if (ret) + goto err_store_qp; + + UVP_EXIT(UVP_DBG_QP); + return &qp->ibv_qp; + +err_store_qp: + UVP_EXIT(UVP_DBG_QP); + return ERR_PTR(ret); +} + + +int mthca_modify_qp(struct ibv_qp *qp, struct ibv_qp_attr *attr, + enum ibv_qp_attr_mask attr_mask) +{ + int ret = 0; + + if (attr_mask & IBV_QP_STATE) + qp->state = attr->qp_state; + + if ((attr_mask & IBV_QP_STATE) && + (attr->qp_state == IBV_QPS_RESET)) { + mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num, + qp->srq ? to_msrq(qp->srq) : NULL); + if (qp->send_cq != qp->recv_cq) + mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL); + + mthca_init_qp_indices(to_mqp(qp)); + + if (mthca_is_memfree(qp->pd->context)) { + *to_mqp(qp)->sq.db = 0; + *to_mqp(qp)->rq.db = 0; + } + } + + return ret; +} + + +void mthca_destroy_qp_pre(struct ibv_qp *qp) +{ + int ret; + + mthca_cq_clean(to_mcq(qp->recv_cq), qp->qp_num, + qp->srq ? to_msrq(qp->srq) : NULL); + if (qp->send_cq != qp->recv_cq) + mthca_cq_clean(to_mcq(qp->send_cq), qp->qp_num, NULL); + + cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock); + if (qp->send_cq != qp->recv_cq) + cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock); + mthca_clear_qp(to_mctx(qp->pd->context), qp->qp_num); + if (qp->send_cq != qp->recv_cq) + cl_spinlock_release(&to_mcq(qp->recv_cq)->lock); + cl_spinlock_release(&to_mcq(qp->send_cq)->lock); +} + +void mthca_destroy_qp_post(struct ibv_qp *qp, int ret) +{ + if (ret) { + cl_spinlock_acquire(&to_mcq(qp->send_cq)->lock); + if (qp->send_cq != qp->recv_cq) + cl_spinlock_acquire(&to_mcq(qp->recv_cq)->lock); + mthca_store_qp(to_mctx(qp->pd->context), qp->qp_num, to_mqp(qp)); + if (qp->send_cq != qp->recv_cq) + cl_spinlock_release(&to_mcq(qp->recv_cq)->lock); + cl_spinlock_release(&to_mcq(qp->send_cq)->lock); + } + else { + if (mthca_is_memfree(qp->pd->context)) { + mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_RQ, + to_mqp(qp)->rq.db_index); + mthca_free_db(to_mctx(qp->pd->context)->db_tab, MTHCA_DB_TYPE_SQ, + to_mqp(qp)->sq.db_index); + } + + cl_spinlock_destroy(&((struct mthca_qp *)qp)->sq.lock); + cl_spinlock_destroy(&((struct mthca_qp *)qp)->rq.lock); + +#ifdef NOT_USE_VIRTUAL_ALLOC + cl_free(to_mqp(qp)->buf); +#else + VirtualFree( to_mqp(qp)->buf, 0, MEM_RELEASE); +#endif + cl_free(to_mqp(qp)->wrid); + cl_free(to_mqp(qp)); + } + +} + +int mthca_attach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid) +{ +#ifdef WIN_TO_BE_CHANGED + return ibv_cmd_attach_mcast(qp, gid, lid); +#else + return -ENOSYS; +#endif +} + +int mthca_detach_mcast(struct ibv_qp *qp, union ibv_gid *gid, uint16_t lid) +{ +#ifdef WIN_TO_BE_CHANGED + return ibv_cmd_detach_mcast(qp, gid, lid); +#else + return -ENOSYS; +#endif +} + diff --git a/branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.h b/branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.h new file mode 100644 index 00000000..5ea2dabb --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mlnx_uvp_verbs.h @@ -0,0 +1,490 @@ +/* + * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2004 Intel Corporation. All rights reserved. + * Copyright (c) 2005 Cisco Systems. All rights reserved. + * Copyright (c) 2005 PathScale, Inc. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MLNX_UVP_VERBS_H +#define MLNX_UVP_VERBS_H + +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +union ibv_gid { + uint8_t raw[16]; + struct { + uint64_t subnet_prefix; + uint64_t interface_id; + } global; +}; + +enum ibv_node_type { + IBV_NODE_CA = 1, + IBV_NODE_SWITCH, + IBV_NODE_ROUTER +}; + +enum ibv_device_cap_flags { + IBV_DEVICE_RESIZE_MAX_WR = 1, + IBV_DEVICE_BAD_PKEY_CNTR = 1 << 1, + IBV_DEVICE_BAD_QKEY_CNTR = 1 << 2, + IBV_DEVICE_RAW_MULTI = 1 << 3, + IBV_DEVICE_AUTO_PATH_MIG = 1 << 4, + IBV_DEVICE_CHANGE_PHY_PORT = 1 << 5, + IBV_DEVICE_UD_AV_PORT_ENFORCE = 1 << 6, + IBV_DEVICE_CURR_QP_STATE_MOD = 1 << 7, + IBV_DEVICE_SHUTDOWN_PORT = 1 << 8, + IBV_DEVICE_INIT_TYPE = 1 << 9, + IBV_DEVICE_PORT_ACTIVE_EVENT = 1 << 10, + IBV_DEVICE_SYS_IMAGE_GUID = 1 << 11, + IBV_DEVICE_RC_RNR_NAK_GEN = 1 << 12, + IBV_DEVICE_SRQ_RESIZE = 1 << 13, + IBV_DEVICE_N_NOTIFY_CQ = 1 << 14, +}; + +enum ibv_atomic_cap { + IBV_ATOMIC_NONE, + IBV_ATOMIC_HCA, + IBV_ATOMIC_GLOB +}; + +struct ibv_device_attr { + char fw_ver[64]; + uint64_t node_guid; + uint64_t sys_image_guid; + uint64_t max_mr_size; + uint64_t page_size_cap; + uint32_t vendor_id; + uint32_t vendor_part_id; + uint32_t hw_ver; + int max_qp; + int max_qp_wr; + int device_cap_flags; + int max_sge; + int max_sge_rd; + int max_cq; + int max_cqe; + int max_mr; + int max_pd; + int max_qp_rd_atom; + int max_ee_rd_atom; + int max_res_rd_atom; + int max_qp_init_rd_atom; + int max_ee_init_rd_atom; + enum ibv_atomic_cap atomic_cap; + int max_ee; + int max_rdd; + int max_mw; + int max_raw_ipv6_qp; + int max_raw_ethy_qp; + int max_mcast_grp; + int max_mcast_qp_attach; + int max_total_mcast_qp_attach; + int max_ah; + int max_fmr; + int max_map_per_fmr; + int max_srq; + int max_srq_wr; + int max_srq_sge; + uint16_t max_pkeys; + uint8_t local_ca_ack_delay; + uint8_t phys_port_cnt; +}; + +enum ibv_mtu { + IBV_MTU_256 = 1, + IBV_MTU_512 = 2, + IBV_MTU_1024 = 3, + IBV_MTU_2048 = 4, + IBV_MTU_4096 = 5 +}; + +enum ibv_port_state { + IBV_PORT_NOP = 0, + IBV_PORT_DOWN = 1, + IBV_PORT_INIT = 2, + IBV_PORT_ARMED = 3, + IBV_PORT_ACTIVE = 4, + IBV_PORT_ACTIVE_DEFER = 5 +}; + +struct ibv_port_attr { + enum ibv_port_state state; + enum ibv_mtu max_mtu; + enum ibv_mtu active_mtu; + int gid_tbl_len; + uint32_t port_cap_flags; + uint32_t max_msg_sz; + uint32_t bad_pkey_cntr; + uint32_t qkey_viol_cntr; + uint16_t pkey_tbl_len; + uint16_t lid; + uint16_t sm_lid; + uint8_t lmc; + uint8_t max_vl_num; + uint8_t sm_sl; + uint8_t subnet_timeout; + uint8_t init_type_reply; + uint8_t active_width; + uint8_t active_speed; + uint8_t phys_state; +}; + +enum ibv_event_type { + IBV_EVENT_CQ_ERR, + IBV_EVENT_QP_FATAL, + IBV_EVENT_QP_REQ_ERR, + IBV_EVENT_QP_ACCESS_ERR, + IBV_EVENT_COMM_EST, + IBV_EVENT_SQ_DRAINED, + IBV_EVENT_PATH_MIG, + IBV_EVENT_PATH_MIG_ERR, + IBV_EVENT_DEVICE_FATAL, + IBV_EVENT_PORT_ACTIVE, + IBV_EVENT_PORT_ERR, + IBV_EVENT_LID_CHANGE, + IBV_EVENT_PKEY_CHANGE, + IBV_EVENT_SM_CHANGE, + IBV_EVENT_SRQ_ERR, + IBV_EVENT_SRQ_LIMIT_REACHED, + IBV_EVENT_QP_LAST_WQE_REACHED +}; + +struct ibv_async_event { + union { + struct ibv_cq *cq; + struct ibv_qp *qp; + struct ibv_srq *srq; + int port_num; + } element; + enum ibv_event_type event_type; +}; + +enum ibv_access_flags { + IBV_ACCESS_LOCAL_WRITE = 1, + IBV_ACCESS_REMOTE_WRITE = (1<<1), + IBV_ACCESS_REMOTE_READ = (1<<2), + IBV_ACCESS_REMOTE_ATOMIC = (1<<3), + IBV_ACCESS_MW_BIND = (1<<4) +}; + +struct ibv_pd { + struct ibv_context *context; + uint64_t handle; +}; + +struct ibv_mr { + struct ibv_context *context; + struct ibv_pd *pd; + uint64_t handle; + uint32_t lkey; + uint32_t rkey; +}; + +struct ibv_global_route { + ib_gid_t dgid; + uint32_t flow_label; + uint8_t sgid_index; + uint8_t hop_limit; + uint8_t traffic_class; +}; + +struct ibv_ah_attr { + struct ibv_global_route grh; + uint16_t dlid; + uint8_t sl; + uint8_t src_path_bits; + uint8_t static_rate; + uint8_t is_global; + uint8_t port_num; +}; + + +enum ib_cq_notify { + IB_CQ_SOLICITED, + IB_CQ_NEXT_COMP +}; + +enum ibv_srq_attr_mask { + IBV_SRQ_MAX_WR = 1 << 0, + IBV_SRQ_LIMIT = 1 << 1, +}; + +struct ibv_srq_attr { + uint32_t max_wr; + uint32_t max_sge; + uint32_t srq_limit; +}; + +struct ibv_srq_init_attr { + void *srq_context; + struct ibv_srq_attr attr; +}; + +struct ibv_qp_cap { + uint32_t max_send_wr; + uint32_t max_recv_wr; + uint32_t max_send_sge; + uint32_t max_recv_sge; + uint32_t max_inline_data; +}; + +struct ibv_qp_init_attr { + void *qp_context; + struct ibv_cq *send_cq; + struct ibv_cq *recv_cq; + struct ibv_srq *srq; + struct ibv_qp_cap cap; + ib_qp_type_t qp_type; + int sq_sig_all; +}; + +enum ibv_qp_attr_mask { + IBV_QP_STATE = 1 << 0, + IBV_QP_CUR_STATE = 1 << 1, + IBV_QP_EN_SQD_ASYNC_NOTIFY = 1 << 2, + IBV_QP_ACCESS_FLAGS = 1 << 3, + IBV_QP_PKEY_INDEX = 1 << 4, + IBV_QP_PORT = 1 << 5, + IBV_QP_QKEY = 1 << 6, + IBV_QP_AV = 1 << 7, + IBV_QP_PATH_MTU = 1 << 8, + IBV_QP_TIMEOUT = 1 << 9, + IBV_QP_RETRY_CNT = 1 << 10, + IBV_QP_RNR_RETRY = 1 << 11, + IBV_QP_RQ_PSN = 1 << 12, + IBV_QP_MAX_QP_RD_ATOMIC = 1 << 13, + IBV_QP_ALT_PATH = 1 << 14, + IBV_QP_MIN_RNR_TIMER = 1 << 15, + IBV_QP_SQ_PSN = 1 << 16, + IBV_QP_MAX_DEST_RD_ATOMIC = 1 << 17, + IBV_QP_PATH_MIG_STATE = 1 << 18, + IBV_QP_CAP = 1 << 19, + IBV_QP_DEST_QPN = 1 << 20 +}; + +enum ibv_qp_state { + IBV_QPS_RESET, + IBV_QPS_INIT, + IBV_QPS_RTR, + IBV_QPS_RTS, + IBV_QPS_SQD, + IBV_QPS_SQE, + IBV_QPS_ERR +}; + +enum ibv_mig_state { + IBV_MIG_MIGRATED, + IBV_MIG_REARM, + IBV_MIG_ARMED +}; + +struct ibv_qp_attr { + enum ibv_qp_state qp_state; + enum ibv_qp_state cur_qp_state; + enum ibv_mtu path_mtu; + enum ibv_mig_state path_mig_state; + uint32_t qkey; + uint32_t rq_psn; + uint32_t sq_psn; + uint32_t dest_qp_num; + int qp_access_flags; + struct ibv_qp_cap cap; + struct ibv_ah_attr ah_attr; + struct ibv_ah_attr alt_ah_attr; + uint16_t pkey_index; + uint16_t alt_pkey_index; + uint8_t en_sqd_async_notify; + uint8_t sq_draining; + uint8_t max_rd_atomic; + uint8_t max_dest_rd_atomic; + uint8_t min_rnr_timer; + uint8_t port_num; + uint8_t timeout; + uint8_t retry_cnt; + uint8_t rnr_retry; + uint8_t alt_port_num; + uint8_t alt_timeout; +}; + + +enum ibv_send_flags { + IBV_SEND_FENCE = 1 << 0, + IBV_SEND_SIGNALED = 1 << 1, + IBV_SEND_SOLICITED = 1 << 2, + IBV_SEND_INLINE = 1 << 3 +}; + +struct ibv_sge { + uint64_t addr; + uint32_t length; + uint32_t lkey; +}; + +struct ibv_send_wr { + struct ibv_send_wr *next; + uint64_t wr_id; + struct ibv_sge *sg_list; + int num_sge; + enum ibv_wr_opcode opcode; + enum ibv_send_flags send_flags; + uint32_t imm_data; /* in network byte order */ + union { + struct { + uint64_t remote_addr; + uint32_t rkey; + } rdma; + struct { + uint64_t remote_addr; + uint64_t compare_add; + uint64_t swap; + uint32_t rkey; + } atomic; + struct { + struct mthca_ah *ah; + uint32_t remote_qpn; + uint32_t remote_qkey; + } ud; + } wr; +}; + +struct ibv_recv_wr { + struct ibv_recv_wr *next; + uint64_t wr_id; + struct ibv_sge *sg_list; + int num_sge; +}; + +typedef enum MTHCA_QP_ACCESS_FLAGS { + MTHCA_ACCESS_LOCAL_WRITE = 1, + MTHCA_ACCESS_REMOTE_WRITE = (1<<1), + MTHCA_ACCESS_REMOTE_READ = (1<<2), + MTHCA_ACCESS_REMOTE_ATOMIC = (1<<3), + MTHCA_ACCESS_MW_BIND = (1<<4) +} mthca_qp_access_t; + + +struct ibv_srq { + struct ibv_pd *pd; + uint64_t handle; + struct ibv_context *context; +}; + +struct ibv_qp { + struct ibv_pd *pd; + struct ibv_cq *send_cq; + struct ibv_cq *recv_cq; + struct ibv_srq *srq; + uint64_t handle; + uint32_t qp_num; + enum ibv_qp_state state; + ib_qp_type_t qp_type; + struct ibv_context *context; +}; + +struct ibv_cq { + uint64_t handle; + int cqe; + struct ibv_context *context; +}; + +struct ibv_ah { + struct ibv_pd *pd; +}; + +struct ibv_context_ops { + int (*query_device)(struct ibv_context *context, + struct ibv_device_attr *device_attr); + int (*query_port)(struct ibv_context *context, uint8_t port_num, + struct ibv_port_attr *port_attr); + struct ibv_pd * (*alloc_pd)(struct ibv_context *context, struct ibv_alloc_pd_resp *resp_p); + int (*dealloc_pd)(struct ibv_pd *pd); + struct ibv_mr * (*reg_mr)(struct ibv_pd *pd, void *addr, size_t length, + enum ibv_access_flags access); + int (*dereg_mr)(struct ibv_mr *mr); + struct ibv_cq * (*create_cq_pre)(struct ibv_context *context, int *cqe, + struct ibv_create_cq *req); + struct ibv_cq * (*create_cq_post)(struct ibv_context *context, + struct ibv_create_cq_resp *resp); + int (*poll_cq)(struct ibv_cq *cq, int num_entries, struct _ib_wc *wc); + int (*poll_cq_list)( struct ibv_cq *ibcq, + struct _ib_wc** const pp_free_wclist, + struct _ib_wc** const pp_done_wclist ); + int (*req_notify_cq)(struct ibv_cq *cq, int solicited_only); + int (*destroy_cq)(struct ibv_cq *cq); + struct ibv_srq * (*create_srq)(struct ibv_pd *pd, + struct ibv_srq_init_attr *srq_init_attr); + int (*modify_srq)(struct ibv_srq *srq, + struct ibv_srq_attr *srq_attr, + enum ibv_srq_attr_mask srq_attr_mask); + int (*destroy_srq)(struct ibv_srq *srq); + int (*post_srq_recv)(struct ibv_srq *srq, + struct _ib_recv_wr *recv_wr, + struct _ib_recv_wr **bad_recv_wr); + struct ibv_qp *(*create_qp_pre)(struct ibv_pd *pd, + struct ibv_qp_init_attr *attr, struct ibv_create_qp *req); + struct ibv_qp *(*create_qp_post)(struct ibv_pd *pd, + struct ibv_create_qp_resp *resp); + int (*modify_qp)(struct ibv_qp *qp, struct ibv_qp_attr *attr, + enum ibv_qp_attr_mask attr_mask); + int (*destroy_qp)(struct ibv_qp *qp); + int (*post_send)(struct ibv_qp *qp, struct _ib_send_wr *wr, + struct _ib_send_wr **bad_wr); + int (*post_recv)(struct ibv_qp *qp, struct _ib_recv_wr *wr, + struct _ib_recv_wr **bad_wr); + int (*attach_mcast)(struct ibv_qp *qp, union ibv_gid *gid, + uint16_t lid); + int (*detach_mcast)(struct ibv_qp *qp, union ibv_gid *gid, + uint16_t lid); +}; + +struct ibv_context { + struct ibv_context_ops ops; + void *abi_compat; +}; + +int align_queue_size(struct ibv_context *context, int size, int spare); + +END_C_DECLS + +#endif /* INFINIBAND_VERBS_H */ diff --git a/branches/Ndi/hw/mthca/user/mt_l2w.h b/branches/Ndi/hw/mthca/user/mt_l2w.h new file mode 100644 index 00000000..9f204d22 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/mt_l2w.h @@ -0,0 +1,87 @@ +#ifndef UMT_L2W_H +#define UMT_L2W_H + +// =========================================== +// INCLUDES +// =========================================== + +// OS +#include +#include +#include +//#include +#include +#include +//#include +#include + + +// =========================================== +// SUBSTITUTIONS +// =========================================== + +#define inline __inline +#define likely(x) (x) +#define unlikely(x) (x) + +// =========================================== +// LITERALS +// =========================================== + + + +// =========================================== +// TYPES +// =========================================== + + +// =========================================== +// MACROS +// =========================================== + +// nullifying macros + +#define ERR_PTR(error) ((void*)(LONG_PTR)(error)) +#define PTR_ERR(ptr) ((long)(LONG_PTR)(void*)(ptr)) +//TODO: there are 2 assumptions here: +// - pointer can't be too big (around -1) +// - error can't be bigger than 1000 +#define IS_ERR(ptr) ((ULONG_PTR)ptr > (ULONG_PTR)-1000L) + +#define ffsl(val) ffs(val) + +extern size_t g_page_size; + +static inline int posix_memalign(void **memptr, size_t alignment, size_t size) +{ +#ifdef NOT_USE_VIRTUAL_ALLOC + // sanity checks + if (alignment % sizeof(void*)) + return EINVAL; + if (alignment < g_page_size) { + fprintf(stderr, "mthca: Fatal (posix_memalign): alignment too small - %d \n", alignment ); + return EINVAL; + } + + // allocation + *memptr = cl_malloc(size); + if (*memptr) + return 0; + else + return ENOMEM; +#else + *memptr = VirtualAlloc( NULL, size, MEM_COMMIT | MEM_RESERVE, PAGE_READWRITE ); + if (*memptr) + return 0; + else + return ENOMEM; +#endif +} + +// =========================================== +// FUNCTIONS +// =========================================== + + +#endif + diff --git a/branches/Ndi/hw/mthca/user/opcode.h b/branches/Ndi/hw/mthca/user/opcode.h new file mode 100644 index 00000000..cf2598b6 --- /dev/null +++ b/branches/Ndi/hw/mthca/user/opcode.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef INFINIBAND_OPCODE_H +#define INFINIBAND_OPCODE_H + +/* + * This macro cleans up the definitions of constants for BTH opcodes. + * It is used to define constants such as IBV_OPCODE_UD_SEND_ONLY, + * which becomes IBV_OPCODE_UD + IBV_OPCODE_SEND_ONLY, and this gives + * the correct value. + * + * In short, user code should use the constants defined using the + * macro rather than worrying about adding together other constants. +*/ +#define IBV_OPCODE(transport, op) \ + IBV_OPCODE_ ## transport ## _ ## op = \ + IBV_OPCODE_ ## transport + IBV_OPCODE_ ## op + +enum { + /* transport types -- just used to define real constants */ + IBV_OPCODE_RC = 0x00, + IBV_OPCODE_UC = 0x20, + IBV_OPCODE_RD = 0x40, + IBV_OPCODE_UD = 0x60, + + /* operations -- just used to define real constants */ + IBV_OPCODE_SEND_FIRST = 0x00, + IBV_OPCODE_SEND_MIDDLE = 0x01, + IBV_OPCODE_SEND_LAST = 0x02, + IBV_OPCODE_SEND_LAST_WITH_IMMEDIATE = 0x03, + IBV_OPCODE_SEND_ONLY = 0x04, + IBV_OPCODE_SEND_ONLY_WITH_IMMEDIATE = 0x05, + IBV_OPCODE_RDMA_WRITE_FIRST = 0x06, + IBV_OPCODE_RDMA_WRITE_MIDDLE = 0x07, + IBV_OPCODE_RDMA_WRITE_LAST = 0x08, + IBV_OPCODE_RDMA_WRITE_LAST_WITH_IMMEDIATE = 0x09, + IBV_OPCODE_RDMA_WRITE_ONLY = 0x0a, + IBV_OPCODE_RDMA_WRITE_ONLY_WITH_IMMEDIATE = 0x0b, + IBV_OPCODE_RDMA_READ_REQUEST = 0x0c, + IBV_OPCODE_RDMA_READ_RESPONSE_FIRST = 0x0d, + IBV_OPCODE_RDMA_READ_RESPONSE_MIDDLE = 0x0e, + IBV_OPCODE_RDMA_READ_RESPONSE_LAST = 0x0f, + IBV_OPCODE_RDMA_READ_RESPONSE_ONLY = 0x10, + IBV_OPCODE_ACKNOWLEDGE = 0x11, + IBV_OPCODE_ATOMIC_ACKNOWLEDGE = 0x12, + IBV_OPCODE_COMPARE_SWAP = 0x13, + IBV_OPCODE_FETCH_ADD = 0x14, + + /* real constants follow -- see comment about above IBV_OPCODE() + macro for more details */ + + /* RC */ + IBV_OPCODE(RC, SEND_FIRST), + IBV_OPCODE(RC, SEND_MIDDLE), + IBV_OPCODE(RC, SEND_LAST), + IBV_OPCODE(RC, SEND_LAST_WITH_IMMEDIATE), + IBV_OPCODE(RC, SEND_ONLY), + IBV_OPCODE(RC, SEND_ONLY_WITH_IMMEDIATE), + IBV_OPCODE(RC, RDMA_WRITE_FIRST), + IBV_OPCODE(RC, RDMA_WRITE_MIDDLE), + IBV_OPCODE(RC, RDMA_WRITE_LAST), + IBV_OPCODE(RC, RDMA_WRITE_LAST_WITH_IMMEDIATE), + IBV_OPCODE(RC, RDMA_WRITE_ONLY), + IBV_OPCODE(RC, RDMA_WRITE_ONLY_WITH_IMMEDIATE), + IBV_OPCODE(RC, RDMA_READ_REQUEST), + IBV_OPCODE(RC, RDMA_READ_RESPONSE_FIRST), + IBV_OPCODE(RC, RDMA_READ_RESPONSE_MIDDLE), + IBV_OPCODE(RC, RDMA_READ_RESPONSE_LAST), + IBV_OPCODE(RC, RDMA_READ_RESPONSE_ONLY), + IBV_OPCODE(RC, ACKNOWLEDGE), + IBV_OPCODE(RC, ATOMIC_ACKNOWLEDGE), + IBV_OPCODE(RC, COMPARE_SWAP), + IBV_OPCODE(RC, FETCH_ADD), + + /* UC */ + IBV_OPCODE(UC, SEND_FIRST), + IBV_OPCODE(UC, SEND_MIDDLE), + IBV_OPCODE(UC, SEND_LAST), + IBV_OPCODE(UC, SEND_LAST_WITH_IMMEDIATE), + IBV_OPCODE(UC, SEND_ONLY), + IBV_OPCODE(UC, SEND_ONLY_WITH_IMMEDIATE), + IBV_OPCODE(UC, RDMA_WRITE_FIRST), + IBV_OPCODE(UC, RDMA_WRITE_MIDDLE), + IBV_OPCODE(UC, RDMA_WRITE_LAST), + IBV_OPCODE(UC, RDMA_WRITE_LAST_WITH_IMMEDIATE), + IBV_OPCODE(UC, RDMA_WRITE_ONLY), + IBV_OPCODE(UC, RDMA_WRITE_ONLY_WITH_IMMEDIATE), + + /* RD */ + IBV_OPCODE(RD, SEND_FIRST), + IBV_OPCODE(RD, SEND_MIDDLE), + IBV_OPCODE(RD, SEND_LAST), + IBV_OPCODE(RD, SEND_LAST_WITH_IMMEDIATE), + IBV_OPCODE(RD, SEND_ONLY), + IBV_OPCODE(RD, SEND_ONLY_WITH_IMMEDIATE), + IBV_OPCODE(RD, RDMA_WRITE_FIRST), + IBV_OPCODE(RD, RDMA_WRITE_MIDDLE), + IBV_OPCODE(RD, RDMA_WRITE_LAST), + IBV_OPCODE(RD, RDMA_WRITE_LAST_WITH_IMMEDIATE), + IBV_OPCODE(RD, RDMA_WRITE_ONLY), + IBV_OPCODE(RD, RDMA_WRITE_ONLY_WITH_IMMEDIATE), + IBV_OPCODE(RD, RDMA_READ_REQUEST), + IBV_OPCODE(RD, RDMA_READ_RESPONSE_FIRST), + IBV_OPCODE(RD, RDMA_READ_RESPONSE_MIDDLE), + IBV_OPCODE(RD, RDMA_READ_RESPONSE_LAST), + IBV_OPCODE(RD, RDMA_READ_RESPONSE_ONLY), + IBV_OPCODE(RD, ACKNOWLEDGE), + IBV_OPCODE(RD, ATOMIC_ACKNOWLEDGE), + IBV_OPCODE(RD, COMPARE_SWAP), + IBV_OPCODE(RD, FETCH_ADD), + + /* UD */ + IBV_OPCODE(UD, SEND_ONLY), + IBV_OPCODE(UD, SEND_ONLY_WITH_IMMEDIATE) +}; + +#endif /* INFINIBAND_OPCODE_H */ diff --git a/branches/Ndi/inc/complib/cl_async_proc.h b/branches/Ndi/inc/complib/cl_async_proc.h new file mode 100644 index 00000000..083571a4 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_async_proc.h @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of the asynchronous processing module. + * + * Environment: + * All + */ + + +#ifndef _CL_ASYNC_PROC_H_ +#define _CL_ASYNC_PROC_H_ + + +#include +#include +#include +#include + + +/****h* Component Library/Asynchronous Processor +* NAME +* Asynchronous Processor +* +* DESCRIPTION +* The asynchronous processor provides threads for executing queued callbacks. +* +* The threads in the asynchronous processor wait for callbacks to be queued. +* +* The asynchronous processor functions operate on a cl_async_proc_t structure +* which should be treated as opaque and manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_async_proc_t, cl_async_proc_item_t +* +* Initialization: +* cl_async_proc_construct, cl_async_proc_init, cl_async_proc_destroy +* +* Manipulation: +* cl_async_proc_queue +*********/ + + +/****s* Component Library: Asynchronous Processor/cl_async_proc_t +* NAME +* cl_async_proc_t +* +* DESCRIPTION +* Asynchronous processor structure. +* +* The cl_async_proc_t structure should be treated as opaque, and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_async_proc +{ + cl_thread_pool_t thread_pool; + cl_qlist_t item_queue; + cl_spinlock_t lock; + cl_state_t state; + +} cl_async_proc_t; +/* +* FIELDS +* item_pool +* Pool of items storing the callback function and contexts to be invoked +* by the asynchronous processor's threads. +* +* thread_pool +* Thread pool that will invoke the callbacks. +* +* item_queue +* Queue of items that the threads should process. +* +* lock +* Lock used to synchronize access to the item pool and queue. +* +* SEE ALSO +* Asynchronous Processor +*********/ + + +/* + * Declare the structure so we can reference it in the following function + * prototype. + */ +typedef struct _cl_async_proc_item *__p_cl_async_proc_item_t; + + +/****d* Component Library: Asynchronous Processor/cl_pfn_async_proc_cb_t +* NAME +* cl_pfn_async_proc_cb_t +* +* DESCRIPTION +* The cl_pfn_async_proc_cb_t function type defines the prototype for +* callbacks queued to and invoked by the asynchronous processor. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_async_proc_cb_t)( + IN struct _cl_async_proc_item *p_item ); +/* +* PARAMETERS +* p_item +* Pointer to the cl_async_proc_item_t structure that was queued in +* a call to cl_async_proc_queue. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_async_proc_queue +* function. +* +* SEE ALSO +* Asynchronous Processor, cl_async_proc_item_t +*********/ + + +/****s* Component Library: Asynchronous Processor/cl_async_proc_item_t +* NAME +* cl_async_proc_item_t +* +* DESCRIPTION +* Asynchronous processor item structure passed to the cl_async_proc_queue +* function to queue a callback for execution. +* +* SYNOPSIS +*/ +typedef struct _cl_async_proc_item +{ + cl_pool_item_t pool_item; + cl_pfn_async_proc_cb_t pfn_callback; + +} cl_async_proc_item_t; +/* +* FIELDS +* pool_item +* Pool item for queuing the item to be invoked by the asynchronous +* processor's threads. This field is defined as a pool item to +* allow items to be managed by a pool. +* +* pfn_callback +* Pointer to a callback function to invoke when the item is dequeued. +* +* SEE ALSO +* Asynchronous Processor, cl_async_proc_queue, cl_pfn_async_proc_cb_t +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Asynchronous Processor/cl_async_proc_construct +* NAME +* cl_async_proc_construct +* +* DESCRIPTION +* The cl_async_proc_construct function initializes the state of a +* thread pool. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_async_proc_construct( + IN cl_async_proc_t* const p_async_proc ); +/* +* PARAMETERS +* p_async_proc +* [in] Pointer to an asynchronous processor structure. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_async_proc_destroy without first calling +* cl_async_proc_init. +* +* Calling cl_async_proc_construct is a prerequisite to calling any other +* thread pool function except cl_async_proc_init. +* +* SEE ALSO +* Asynchronous Processor, cl_async_proc_init, cl_async_proc_destroy +*********/ + + +/****f* Component Library: Asynchronous Processor/cl_async_proc_init +* NAME +* cl_async_proc_init +* +* DESCRIPTION +* The cl_async_proc_init function initialized an asynchronous processor +* for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_async_proc_init( + IN cl_async_proc_t* const p_async_proc, + IN const uint32_t thread_count, + IN const char* const name ); +/* +* PARAMETERS +* p_async_proc +* [in] Pointer to an asynchronous processor structure to initialize. +* +* thread_count +* [in] Number of threads to be managed by the asynchronous processor. +* +* name +* [in] Name to associate with the threads. The name may be up to 16 +* characters, including a terminating null character. All threads +* created in the asynchronous processor have the same name. +* +* RETURN VALUES +* CL_SUCCESS if the asynchronous processor creation succeeded. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to inititalize +* the asynchronous processor. +* +* CL_ERROR if the threads could not be created. +* +* NOTES +* cl_async_proc_init creates and starts the specified number of threads. +* If thread_count is zero, the asynchronous processor creates as many +* threads as there are processors in the system. +* +* SEE ALSO +* Asynchronous Processor, cl_async_proc_construct, cl_async_proc_destroy, +* cl_async_proc_queue +*********/ + + +/****f* Component Library: Asynchronous Processor/cl_async_proc_destroy +* NAME +* cl_async_proc_destroy +* +* DESCRIPTION +* The cl_async_proc_destroy function performs any necessary cleanup +* for a thread pool. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_async_proc_destroy( + IN cl_async_proc_t* const p_async_proc ); +/* +* PARAMETERS +* p_async_proc +* [in] Pointer to an asynchronous processor structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function blocks until all threads exit, and must therefore not +* be called from any of the asynchronous processor's threads. Because of +* its blocking nature, callers of cl_async_proc_destroy must ensure that +* entering a wait state is valid from the calling thread context. +* +* This function should only be called after a call to +* cl_async_proc_construct or cl_async_proc_init. +* +* SEE ALSO +* Asynchronous Processor, cl_async_proc_construct, cl_async_proc_init +*********/ + + +/****f* Component Library: Asynchronous Processor/cl_async_proc_queue +* NAME +* cl_async_proc_queue +* +* DESCRIPTION +* The cl_async_proc_queue function queues a callback to an asynchronous +* processor. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_async_proc_queue( + IN cl_async_proc_t* const p_async_proc, + IN cl_async_proc_item_t* const p_item ); +/* +* PARAMETERS +* p_async_proc +* [in] Pointer to an asynchronous processor structure to initialize. +* +* p_item +* [in] Pointer to an asynchronous processor item to queue for execution. +* The callback and context fields of the item must be valid. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* Asynchronous Processor, cl_async_proc_init, cl_pfn_async_proc_cb_t +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* !defined(_CL_ASYNC_PROC_H_) */ diff --git a/branches/Ndi/inc/complib/cl_atomic.h b/branches/Ndi/inc/complib/cl_atomic.h new file mode 100644 index 00000000..11cce604 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_atomic.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of atomic manipulation functions. + * + * Environment: + * All + */ + + +#ifndef _CL_ATOMIC_H_ +#define _CL_ATOMIC_H_ + + +#include + + +/****h* Component Library/Atomic Operations +* NAME +* Atomic Operations +* +* DESCRIPTION +* The Atomic Operations functions allow callers to operate on +* 32-bit signed integers in an atomic fashion. +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Atomic Operations/cl_atomic_inc +* NAME +* cl_atomic_inc +* +* DESCRIPTION +* The cl_atomic_inc function atomically increments a 32-bit signed +* integer and returns the incremented value. +* +* SYNOPSIS +*/ +CL_EXPORT int32_t CL_API +cl_atomic_inc( + IN atomic32_t* const p_value ); +/* +* PARAMETERS +* p_value +* [in] Pointer to a 32-bit integer to increment. +* +* RETURN VALUE +* Returns the incremented value pointed to by p_value. +* +* NOTES +* The provided value is incremented and its value returned in one atomic +* operation. +* +* cl_atomic_inc maintains data consistency without requiring additional +* synchronization mechanisms in multi-threaded environments. +* +* SEE ALSO +* Atomic Operations, cl_atomic_dec, cl_atomic_add, cl_atomic_sub, +* cl_atomic_xchg, cl_atomic_comp_xchg +*********/ + + +/****f* Component Library: Atomic Operations/cl_atomic_dec +* NAME +* cl_atomic_dec +* +* DESCRIPTION +* The cl_atomic_dec function atomically decrements a 32-bit signed +* integer and returns the decremented value. +* +* SYNOPSIS +*/ +CL_EXPORT int32_t CL_API +cl_atomic_dec( + IN atomic32_t* const p_value ); +/* +* PARAMETERS +* p_value +* [in] Pointer to a 32-bit integer to decrement. +* +* RETURN VALUE +* Returns the decremented value pointed to by p_value. +* +* NOTES +* The provided value is decremented and its value returned in one atomic +* operation. +* +* cl_atomic_dec maintains data consistency without requiring additional +* synchronization mechanisms in multi-threaded environments. +* +* SEE ALSO +* Atomic Operations, cl_atomic_inc, cl_atomic_add, cl_atomic_sub, +* cl_atomic_xchg, cl_atomic_comp_xchg +*********/ + + +/****f* Component Library: Atomic Operations/cl_atomic_add +* NAME +* cl_atomic_add +* +* DESCRIPTION +* The cl_atomic_add function atomically adds a value to a +* 32-bit signed integer and returns the resulting value. +* +* SYNOPSIS +*/ +CL_EXPORT int32_t CL_API +cl_atomic_add( + IN atomic32_t* const p_value, + IN const int32_t increment ); +/* +* PARAMETERS +* p_value +* [in] Pointer to a 32-bit integer that will be added to. +* +* increment +* [in] Value by which to increment the integer pointed to by p_value. +* +* RETURN VALUE +* Returns the value pointed to by p_value after the addition. +* +* NOTES +* The provided increment is added to the value and the result returned in +* one atomic operation. +* +* cl_atomic_add maintains data consistency without requiring additional +* synchronization mechanisms in multi-threaded environments. +* +* SEE ALSO +* Atomic Operations, cl_atomic_inc, cl_atomic_dec, cl_atomic_sub, +* cl_atomic_xchg, cl_atomic_comp_xchg +*********/ + + +/****f* Component Library: Atomic Operations/cl_atomic_sub +* NAME +* cl_atomic_sub +* +* DESCRIPTION +* The cl_atomic_sub function atomically subtracts a value from a +* 32-bit signed integer and returns the resulting value. +* +* SYNOPSIS +*/ +CL_EXPORT int32_t CL_API +cl_atomic_sub( + IN atomic32_t* const p_value, + IN const int32_t decrement ); +/* +* PARAMETERS +* p_value +* [in] Pointer to a 32-bit integer that will be subtracted from. +* +* decrement +* [in] Value by which to decrement the integer pointed to by p_value. +* +* RETURN VALUE +* Returns the value pointed to by p_value after the subtraction. +* +* NOTES +* The provided decrement is subtracted from the value and the result +* returned in one atomic operation. +* +* cl_atomic_sub maintains data consistency without requiring additional +* synchronization mechanisms in multi-threaded environments. +* +* SEE ALSO +* Atomic Operations, cl_atomic_inc, cl_atomic_dec, cl_atomic_add, +* cl_atomic_xchg, cl_atomic_comp_xchg +*********/ + + +/****f* Component Library: Atomic Operations/cl_atomic_xchg +* NAME +* cl_atomic_xchg +* +* DESCRIPTION +* The cl_atomic_xchg function atomically sets a value of a +* 32-bit signed integer and returns the initial value. +* +* SYNOPSIS +*/ +CL_EXPORT int32_t CL_API +cl_atomic_xchg( + IN atomic32_t* const p_value, + IN const int32_t new_value ); +/* +* PARAMETERS +* p_value +* [in] Pointer to a 32-bit integer to exchange with new_value. +* +* new_value +* [in] Value to assign. +* +* RETURN VALUE +* Returns the initial value pointed to by p_value. +* +* NOTES +* The provided value is exchanged with new_value and its initial value +* returned in one atomic operation. +* +* cl_atomic_xchg maintains data consistency without requiring additional +* synchronization mechanisms in multi-threaded environments. +* +* SEE ALSO +* Atomic Operations, cl_atomic_inc, cl_atomic_dec, cl_atomic_add, +* cl_atomic_sub, cl_atomic_comp_xchg +*********/ + + +/****f* Component Library: Atomic Operations/cl_atomic_comp_xchg +* NAME +* cl_atomic_comp_xchg +* +* DESCRIPTION +* The cl_atomic_comp_xchg function atomically compares a 32-bit signed +* integer to a desired value, sets that integer to the +* specified value if equal, and returns the initial value. +* +* SYNOPSIS +*/ +CL_EXPORT int32_t CL_API +cl_atomic_comp_xchg( + IN atomic32_t* const p_value, + IN const int32_t compare, + IN const int32_t new_value ); +/* +* PARAMETERS +* p_value +* [in] Pointer to a 32-bit integer to exchange with new_value. +* +* compare +* [in] Value to compare to the value pointed to by p_value. +* +* new_value +* [in] Value to assign if the value pointed to by p_value is equal to +* the value specified by the compare parameter. +* +* RETURN VALUE +* Returns the initial value of the variable pointed to by p_value. +* +* NOTES +* The value pointed to by p_value is compared to the value specified by the +* compare parameter. If the two values are equal, the p_value variable is +* set to new_value. The initial value pointed to by p_value is returned. +* +* cl_atomic_comp_xchg maintains data consistency without requiring additional +* synchronization mechanisms in multi-threaded environments. +* +* SEE ALSO +* Atomic Operations, cl_atomic_inc, cl_atomic_dec, cl_atomic_add, +* cl_atomic_sub, cl_atomic_xchg +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_ATOMIC_H_ */ diff --git a/branches/Ndi/inc/complib/cl_byteswap.h b/branches/Ndi/inc/complib/cl_byteswap.h new file mode 100644 index 00000000..60fd59ae --- /dev/null +++ b/branches/Ndi/inc/complib/cl_byteswap.h @@ -0,0 +1,539 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * provides byteswapping utilities. Basic fuctions are obtained from platform + * specific implementations from ibyteswap_osd.h. + * + * Environment: + * All + */ + + +#ifndef _CL_BYTESWAP_H_ +#define _CL_BYTESWAP_H_ + + +#include +#include + + +/****h* Component Library/Byte Swapping +* NAME +* Byte Swapping +* +* DESCRIPTION +* The byte swapping functions and macros allow swapping bytes from network +* byte order to host byte order. +* +* All data transmitted between systems should be in network byte order. +* In order to utilize such data, it must be converted to host byte order +* before use. +* +* SEE ALSO +* Functions: +* cl_ntoh16, cl_hton16, cl_ntoh32, cl_hton32, cl_ntoh64, cl_hton64, +* cl_ntoh +* +* Macros: +* CL_NTOH16, CL_HTON16, CL_NTOH32, CL_HTON32, CL_NTOH64, CL_HTON64 +*********/ + + +/* + * The ibyteswap_osd.h provides the following macros. + * __LITTLE_ENDIAN + * __BIG_ENDIAN + * __BYTE_ORDER + * + * If the platform provides byte swapping functions, ibyteswap_osd.h also + * provides the following macros. + * ntoh16, hton16 + * ntoh32, hton32 + * ntoh64, hton64 + */ + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****d* Component Library: Byte Swapping/CL_NTOH16 +* NAME +* CL_NTOH16 +* +* DESCRIPTION +* The CL_NTOH16 macro converts a 16-bit value from network byte order to +* host byte order. The CL_NTOH16 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_NTOH16 is less efficient +* than the cl_ntoh16 function. +* +* SYNOPSIS +* CL_NTOH16( val ); +* +* PARAMETERS +* val +* [in] 16-bit value to swap from network byte order to host byte order. +* +* RESULT +* Value of val converted to host byte order. +* +* NOTES +* This macro is analogous to CL_HTON16. +* +* SEE ALSO +* Byte Swapping, CL_HTON16, CL_NTOH32, CL_NTOH64, +* cl_ntoh16, cl_ntoh32, cl_ntoh64, cl_ntoh +*********/ +/****d* Component Library: Byte Swapping/CL_HTON16 +* NAME +* CL_HTON16 +* +* DESCRIPTION +* The CL_HTON16 macro converts a 16-bit value from host byte order to +* network byte order. The CL_HTON16 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_HTON16 is less efficient +* than the cl_hton16 function. +* +* SYNOPSIS +* CL_HTON16( val ); +* +* PARAMETERS +* val +* [in] 16-bit value to swap from host byte order to network byte order. +* +* RESULT +* Value of val converted to network byte order. +* +* NOTES +* This macro is analogous to CL_NTOH16. +* +* SEE ALSO +* Byte Swapping, CL_NTOH16, CL_HTON32, CL_HTON64, +* cl_hton16, cl_hton32, cl_hton64, cl_ntoh +*********/ +#if CPU_LE + #define CL_NTOH16( x ) (uint16_t)( \ + (((uint16_t)(x) & 0x00FF) << 8) | \ + (((uint16_t)(x) & 0xFF00) >> 8) ) +#else + #define CL_NTOH16( x ) (x) +#endif +#define CL_HTON16 CL_NTOH16 + + +/****f* Component Library: Byte Swapping/cl_ntoh16 +* NAME +* cl_ntoh16 +* +* DESCRIPTION +* The cl_ntoh16 function converts a 16-bit value from network byte order to +* host byte order. +* +* SYNOPSIS +* uint16_t +* cl_ntoh16( +* IN const uint16_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from network byte order to host byte order. +* +* RETURN VALUE +* Value of val converted to host byte order. +* +* NOTES +* This function is analogous to cl_hton16. +* +* SEE ALSO +* Byte Swapping, cl_hton16, cl_ntoh32, cl_ntoh64, cl_ntoh +*********/ +/****f* Component Library: Byte Swapping/cl_hton16 +* NAME +* cl_hton16 +* +* DESCRIPTION +* The cl_hton16 function converts a 16-bit value from host byte order to +* network byte order. +* +* SYNOPSIS +* uint16_t +* cl_hton16( +* IN const uint16_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from host byte order to network byte order . +* +* RETURN VALUE +* Value of val converted to network byte order. +* +* NOTES +* This function is analogous to cl_ntoh16. +* +* SEE ALSO +* Byte Swapping, cl_ntoh16, cl_hton32, cl_hton64, cl_ntoh +*********/ +#ifndef cl_ntoh16 + #define cl_ntoh16 CL_NTOH16 + #define cl_hton16 CL_HTON16 +#endif + + +/****d* Component Library: Byte Swapping/CL_NTOH32 +* NAME +* CL_NTOH32 +* +* DESCRIPTION +* The CL_NTOH32 macro converts a 32-bit value from network byte order to +* host byte order. The CL_NTOH32 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_NTOH32 is less efficient +* than the cl_ntoh32 function. +* +* SYNOPSIS +* CL_NTOH32( val ); +* +* PARAMETERS +* val +* [in] 32-bit value to swap from network byte order to host byte order. +* +* RESULT +* Value of val converted to host byte order. +* +* NOTES +* This macro is analogous to CL_HTON32. +* +* SEE ALSO +* Byte Swapping, CL_HTON32, CL_NTOH16, CL_NTOH64, +* cl_ntoh16, cl_ntoh32, cl_ntoh64, cl_ntoh +*********/ +/****d* Component Library: Byte Swapping/CL_HTON32 +* NAME +* CL_HTON32 +* +* DESCRIPTION +* The CL_HTON32 macro converts a 32-bit value from host byte order to +* network byte order. The CL_HTON32 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_HTON32 is less efficient +* than the cl_hton32 function. +* +* SYNOPSIS +* CL_HTON32( val ); +* +* PARAMETERS +* val +* [in] 32-bit value to swap from host byte order to network byte order. +* +* RESULT +* Value of val converted to network byte order. +* +* NOTES +* This macro is analogous to CL_NTOH32. +* +* SEE ALSO +* Byte Swapping, CL_NTOH32, CL_HTON16, CL_HTON64, +* cl_hton16, cl_hton32, cl_hton64, cl_ntoh +*********/ +#if CPU_LE + #define CL_NTOH32( x ) (uint32_t)( \ + (((uint32_t)(x) & 0x000000FF) << 24) | \ + (((uint32_t)(x) & 0x0000FF00) << 8) | \ + (((uint32_t)(x) & 0x00FF0000) >> 8) | \ + (((uint32_t)(x) & 0xFF000000) >> 24) ) +#else + #define CL_NTOH32( x ) (x) +#endif +#define CL_HTON32 CL_NTOH32 + + +/****f* Component Library: Byte Swapping/cl_ntoh32 +* NAME +* cl_ntoh32 +* +* DESCRIPTION +* The cl_ntoh32 function converts a 32-bit value from network byte order to +* host byte order. +* +* SYNOPSIS +* uint32_t +* cl_ntoh32( +* IN const uint32_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from network byte order to host byte order. +* +* RETURN VALUE +* Value of val converted in host byte order. +* +* NOTES +* This function is analogous to cl_hton32. +* +* SEE ALSO +* Byte Swapping, cl_hton32, cl_ntoh16, cl_ntoh64, cl_ntoh +*********/ +/****f* Component Library: Byte Swapping/cl_hton32 +* NAME +* cl_hton32 +* +* DESCRIPTION +* The cl_hton32 function converts a 32-bit value from host byte order to +* network byte order. +* +* SYNOPSIS +* uint32_t +* cl_hton32( +* IN const uint32_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from host byte order to network byte order . +* +* RETURN VALUE +* Value of val converted to network byte order. +* +* NOTES +* This function is analogous to cl_ntoh32. +* +* SEE ALSO +* Byte Swapping, cl_ntoh32, cl_hton16, cl_hton64, cl_ntoh +*********/ +#ifndef cl_ntoh32 + #define cl_ntoh32 CL_NTOH32 + #define cl_hton32 CL_HTON32 +#endif + + +/****d* Component Library: Byte Swapping/CL_NTOH64 +* NAME +* CL_NTOH64 +* +* DESCRIPTION +* The CL_NTOH64 macro converts a 64-bit value from network byte order to +* host byte order. The CL_NTOH64 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_NTOH64 is less efficient +* than the cl_ntoh64 function. +* +* SYNOPSIS +* CL_NTOH64( val ); +* +* PARAMETERS +* val +* [in] 64-bit value to swap from network byte order to host byte order. +* +* RESULT +* Value of val converted to host byte order. +* +* NOTES +* This macro is analogous to CL_HTON64. +* +* SEE ALSO +* Byte Swapping, CL_HTON64, CL_NTOH16, CL_NTOH32, +* cl_ntoh16, cl_ntoh32, cl_ntoh64, cl_ntoh +*********/ +/****d* Component Library: Byte Swapping/CL_HTON64 +* NAME +* CL_HTON64 +* +* DESCRIPTION +* The CL_HTON64 macro converts a 64-bit value from host byte order to +* network byte order. The CL_HTON64 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_HTON64 is less efficient +* than the cl_hton64 function. +* +* SYNOPSIS +* CL_HTON64( val ); +* +* PARAMETERS +* val +* [in] 64-bit value to swap from host byte order to network byte order. +* +* RESULT +* Value of val converted to network byte order. +* +* NOTES +* This macro is analogous to CL_NTOH64. +* +* SEE ALSO +* Byte Swapping, CL_NTOH64, CL_HTON16, CL_HTON32, +* cl_hton16, cl_hton32, cl_hton64, cl_ntoh +*********/ +#if CPU_LE + #define CL_NTOH64( x ) (uint64_t)( \ + (((uint64_t)(x) & CL_CONST64(0x00000000000000FF)) << 56) | \ + (((uint64_t)(x) & CL_CONST64(0x000000000000FF00)) << 40) | \ + (((uint64_t)(x) & CL_CONST64(0x0000000000FF0000)) << 24) | \ + (((uint64_t)(x) & CL_CONST64(0x00000000FF000000)) << 8 ) | \ + (((uint64_t)(x) & CL_CONST64(0x000000FF00000000)) >> 8 ) | \ + (((uint64_t)(x) & CL_CONST64(0x0000FF0000000000)) >> 24) | \ + (((uint64_t)(x) & CL_CONST64(0x00FF000000000000)) >> 40) | \ + (((uint64_t)(x) & CL_CONST64(0xFF00000000000000)) >> 56) ) +#else + #define CL_NTOH64( x ) (x) +#endif +#define CL_HTON64 CL_NTOH64 + + +/****f* Component Library: Byte Swapping/cl_ntoh64 +* NAME +* cl_ntoh64 +* +* DESCRIPTION +* The cl_ntoh64 function converts a 64-bit value from network byte order to +* host byte order. +* +* SYNOPSIS +* uint64_t +* cl_ntoh64( +* IN const uint64_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from network byte order to host byte order. +* +* RETURN VALUE +* Value of val converted in host byte order. +* +* NOTES +* This function is analogous to cl_hton64. +* +* SEE ALSO +* Byte Swapping, cl_hton64, cl_ntoh16, cl_ntoh32, cl_ntoh +*********/ +/****f* Component Library: Byte Swapping/cl_hton64 +* NAME +* cl_hton64 +* +* DESCRIPTION +* The cl_hton64 function converts a 64-bit value from host byte order to +* network byte order. +* +* SYNOPSIS +* uint64_t +* cl_hton64( +* IN const uint64_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from host byte order to network byte order . +* +* RETURN VALUE +* Value of val converted to network byte order. +* +* NOTES +* This function is analogous to cl_ntoh64. +* +* SEE ALSO +* Byte Swapping, cl_ntoh64, cl_hton16, cl_hton32, cl_ntoh +*********/ +#ifndef cl_ntoh64 + #define cl_ntoh64 CL_NTOH64 + #define cl_hton64 CL_HTON64 +#endif + + +/****f* Component Library: Byte Swapping/cl_ntoh +* NAME +* cl_ntoh +* +* DESCRIPTION +* The cl_ntoh function converts a value from network byte order to +* host byte order. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_ntoh( + OUT char* const p_dest, + IN const char* const p_src, + IN const uint8_t size ) +{ +#if CPU_LE + uint8_t i; + char temp; + + if( p_src == p_dest ) + { + /* Swap in place if source and destination are the same. */ + for( i = 0; i < size / 2; i++ ) + { + temp = p_dest[i]; + p_dest[i] = p_src[size - 1 - i]; + p_dest[size - 1 - i] = temp; + } + } + else + { + for( i = 0; i < size; i++ ) + p_dest[i] = p_src[size - 1 - i]; + } +#else + /* + * If the source and destination are not the same, copy the source to + * the destination. + */ + if( p_src != p_dest ) + cl_memcpy( p_dest, p_src, size ); +#endif +} +/* +* PARAMETERS +* p_dest +* [in] Pointer to a byte array to contain the converted value of p_src. +* +* p_src +* [in] Pointer to a byte array to be converted from network byte +* ordering. +* +* size +* [in] Number of bytes to swap.p_dest +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_ntoh can perform in place swapping if both p_src and p_dest point to +* the same buffer. +* +* SEE ALSO +* Byte Swapping, cl_ntoh16, cl_ntoh32, cl_ntoh64 +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_BYTESWAP_H_ */ diff --git a/branches/Ndi/inc/complib/cl_comppool.h b/branches/Ndi/inc/complib/cl_comppool.h new file mode 100644 index 00000000..19050918 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_comppool.h @@ -0,0 +1,619 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of the composite pool. + * The composite pool managers a pool of composite objects. A composite object is an object + * that is made of multiple sub objects. + * The pool can grow to meet demand, limited only by system memory. + * + * Environment: + * All + */ + + +#ifndef _CL_COMP_POOL_H_ +#define _CL_COMP_POOL_H_ + + +#include + + +/****h* Component Library/Composite Pool +* NAME +* Composite Pool +* +* DESCRIPTION +* The Composite Pool provides a self-contained and self-sustaining pool of +* user defined composite objects. +* +* A composite object is an object that is composed of one or more +* sub-objects, each of which needs to be treated separately for +* initialization. Objects can be retrieved from the pool as long as there +* is memory in the system. +* +* To aid in object oriented design, the composite pool provides the user +* the ability to specify callbacks that are invoked for each object for +* construction, initialization, and destruction. Constructor and destructor +* callback functions may not fail. +* +* A composite pool does not return memory to the system as the user returns +* objects to the pool. The only method of returning memory to the system is +* to destroy the pool. +* +* The composite pool functions operates on a cl_cpool_t structure which +* should be treated as opaque and should be manipulated only through the +* provided functions. +* +* SEE ALSO +* Structures: +* cl_cpool_t +* +* Callbacks: +* cl_pfn_cpool_init_t, cl_pfn_cpool_dtor_t +* +* Initialization/Destruction: +* cl_cpool_construct, cl_cpool_init, cl_cpool_destroy +* +* Manipulation: +* cl_cpool_get, cl_cpool_put, cl_cpool_grow +* +* Attributes: +* cl_is_cpool_inited, cl_cpool_count +*********/ + + +/****d* Component Library: Composite Pool/cl_pfn_cpool_init_t +* NAME +* cl_pfn_cpool_init_t +* +* DESCRIPTION +* The cl_pfn_cpool_init_t function type defines the prototype for +* functions used as initializers for objects being allocated by a +* composite pool. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_cpool_init_t)( + IN void** const p_comp_array, + IN const uint32_t num_components, + IN void* context ); +/* +* PARAMETERS +* p_object +* [in] Pointer to an object to initialize. +* +* context +* [in] Context provided in a call to cl_cpool_init. +* +* RETURN VALUES +* Return CL_SUCCESS to indicates that initialization of the object +* was successful and that initialization of further objects may continue. +* +* Other cl_status_t values will be returned by cl_cpool_init +* and cl_cpool_grow. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by the user as an optional parameter to the +* cl_cpool_init function. +* +* The initializer is invoked once per allocated object, allowing the user +* to chain components to form a composite object and perform any necessary +* initialization. Returning a status other than CL_SUCCESS aborts a grow +* operation, initiated either through cl_cpool_init or cl_cpool_grow, and +* causes the initiating function to fail. Any non-CL_SUCCESS status will +* be returned by the function that initiated the grow operation. +* +* All memory for the requested number of components is pre-allocated. +* +* When later performing a cl_cpool_get call, the return value is a pointer +* to the first component. +* +* SEE ALSO +* Composite Pool, cl_cpool_init, cl_cpool_grow +*********/ + + +/****d* Component Library: Composite Pool/cl_pfn_cpool_dtor_t +* NAME +* cl_pfn_cpool_dtor_t +* +* DESCRIPTION +* The cl_pfn_cpool_dtor_t function type defines the prototype for +* functions used as destructor for objects being deallocated by a +* composite pool. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_cpool_dtor_t)( + IN void* const p_object, + IN void* context ); +/* +* PARAMETERS +* p_object +* [in] Pointer to an object to destruct. +* +* context +* [in] Context provided in the call to cl_cpool_init. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by the user as an optional parameter to the +* cl_cpool_init function. +* +* The destructor is invoked once per allocated object, allowing the user +* to perform any necessary cleanup. Users should not attempt to deallocate +* the memory for the composite object, as the composite pool manages +* object allocation and deallocation. +* +* SEE ALSO +* Composite Pool, cl_cpool_init +*********/ + + +/****s* Component Library: Composite Pool/cl_cpool_t +* NAME +* cl_cpool_t +* +* DESCRIPTION +* Composite pool structure. +* +* The cl_cpool_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_cpool +{ + cl_qcpool_t qcpool; + cl_pfn_cpool_init_t pfn_init; + cl_pfn_cpool_dtor_t pfn_dtor; + const void *context; + +} cl_cpool_t; +/* +* FIELDS +* qcpool +* Quick composite pool that manages all objects. +* +* pfn_init +* Pointer to the user's initializer callback, used by the pool +* to translate the quick composite pool's initializer callback to +* a composite pool initializer callback. +* +* pfn_dtor +* Pointer to the user's destructor callback, used by the pool +* to translate the quick composite pool's destructor callback to +* a composite pool destructor callback. +* +* context +* User's provided context for callback functions, used by the pool +* to when invoking callbacks. +* +* SEE ALSO +* Composite Pool +*********/ + + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +/****f* Component Library: Composite Pool/cl_cpool_construct +* NAME +* cl_cpool_construct +* +* DESCRIPTION +* The cl_cpool_construct function constructs a composite pool. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_cpool_construct( + IN cl_cpool_t* const p_pool ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_cpool_t structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_pool_init, cl_cpool_destroy, cl_is_cpool_inited. +* +* Calling cl_cpool_construct is a prerequisite to calling any other +* composite pool function except cl_cpool_init. +* +* SEE ALSO +* Composite Pool, cl_cpool_init, cl_cpool_destroy, cl_is_cpool_inited +*********/ + + +/****f* Component Library: Composite Pool/cl_is_cpool_inited +* NAME +* cl_is_cpool_inited +* +* DESCRIPTION +* The cl_is_cpool_inited function returns whether a composite pool was +* successfully initialized. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_cpool_inited( + IN const cl_cpool_t* const p_pool ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_pool ); + return( cl_is_qcpool_inited( &p_pool->qcpool ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_cpool_t structure whose initialization state +* to check. +* +* RETURN VALUES +* TRUE if the composite pool was initialized successfully. +* +* FALSE otherwise. +* +* NOTES +* Allows checking the state of a composite pool to determine if invoking +* member functions is appropriate. +* +* SEE ALSO +* Composite Pool +*********/ + + +/****f* Component Library: Composite Pool/cl_cpool_init +* NAME +* cl_cpool_init +* +* DESCRIPTION +* The cl_cpool_init function initializes a composite pool for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_cpool_init( + IN cl_cpool_t* const p_pool, + IN const size_t min_size, + IN const size_t max_size, + IN const size_t grow_size, + IN size_t* const component_sizes, + IN const uint32_t num_components, + IN cl_pfn_cpool_init_t pfn_initializer OPTIONAL, + IN cl_pfn_cpool_dtor_t pfn_destructor OPTIONAL, + IN const void* const context ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_cpool_t structure to initialize. +* +* min_size +* [in] Minimum number of objects that the pool should support. All +* necessary allocations to allow storing the minimum number of items +* are performed at initialization time, and all necessary callbacks +* successfully invoked. +* +* max_size +* [in] Maximum number of objects to which the pool is allowed to grow. +* A value of zero specifies no maximum. +* +* grow_size +* [in] Number of objects to allocate when incrementally growing the pool. +* A value of zero disables automatic growth. +* +* component_sizes +* [in] Pointer to the first entry in an array of sizes describing, +* in order, the sizes of the components that make up a composite object. +* +* num_components +* [in] Number of components that make up a composite object. +* +* pfn_initializer +* [in] Initialization callback to invoke for every new object when +* growing the pool. This parameter may be NULL only if the objects +* stored in the composite pool consist of only one component. +* See the cl_pfn_cpool_init function type declaration for details +* about the callback function. +* +* pfn_destructor +* [in] Destructor callback to invoke for every object before memory for +* that object is freed. This parameter is optional and may be NULL. +* See the cl_pfn_cpool_dtor function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUES +* CL_SUCCESS if the composite pool was initialized successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the +* composite pool. +* +* CL_INVALID_SETTING if a NULL constructor was provided for composite objects +* consisting of more than one component. Also returns CL_INVALID_SETTING if +* the maximum size is non-zero and less than the minimum size. +* +* Other cl_status_t value returned by optional initialization callback function +* specified by the pfn_initializer parameter. +* +* NOTES +* cl_cpool_init initializes, and if necessary, grows the pool to +* the capacity desired. +* +* SEE ALSO +* Composite Pool, cl_cpool_construct, cl_cpool_destroy, +* cl_cpool_get, cl_cpool_put, cl_cpool_grow, +* cl_cpool_count, cl_pfn_cpool_ctor_t, cl_pfn_cpool_init_t, +* cl_pfn_cpool_dtor_t +*********/ + + +/****f* Component Library: Composite Pool/cl_cpool_destroy +* NAME +* cl_cpool_destroy +* +* DESCRIPTION +* The cl_cpool_destroy function destroys a composite pool. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_cpool_destroy( + IN cl_cpool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + + cl_qcpool_destroy( &p_pool->qcpool ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_cpool_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* All memory allocated for composite objects is freed. The destructor +* callback, if any, will be invoked for every allocated object. Further +* operations on the composite pool should not be attempted after +* cl_cpool_destroy is invoked. +* +* This function should only be called after a call to cl_cpool_construct. +* +* In a debug build, cl_cpool_destroy asserts that all objects are in +* the pool. +* +* SEE ALSO +* Composite Pool, cl_cpool_construct, cl_cpool_init +*********/ + + +/****f* Component Library: Composite Pool/cl_cpool_count +* NAME +* cl_cpool_count +* +* DESCRIPTION +* The cl_cpool_count function returns the number of available objects +* in a composite pool. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_cpool_count( + IN cl_cpool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + return( cl_qcpool_count( &p_pool->qcpool ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_cpool_t structure for which the number of +* available objects is requested. +* +* RETURN VALUE +* Returns the number of objects available in the specified +* composite pool. +* +* SEE ALSO +* Composite Pool +*********/ + + +/****f* Component Library: Composite Pool/cl_cpool_get +* NAME +* cl_cpool_get +* +* DESCRIPTION +* The cl_cpool_get function retrieves an object from a +* composite pool. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_cpool_get( + IN cl_cpool_t* const p_pool ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_pool ); + + p_pool_obj = (cl_pool_obj_t*)cl_qcpool_get( &p_pool->qcpool ); + if( !p_pool_obj ) + return( NULL ); + + CL_ASSERT( p_pool_obj->list_obj.p_object ); + return( (void*)p_pool_obj->list_obj.p_object ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_cpool_t structure from which to retrieve +* an object. +* +* RETURN VALUES +* Returns a pointer to the first component of a composite object. +* +* Returns NULL if the pool is empty and can not be grown automatically. +* +* NOTES +* cl_cpool_get returns the object at the head of the pool. If the pool is +* empty, it is automatically grown to accommodate this request unless the +* grow_size parameter passed to the cl_cpool_init function was zero. +* +* SEE ALSO +* Composite Pool, cl_cpool_get_tail, cl_cpool_put, cl_cpool_grow, +* cl_cpool_count +*********/ + + +/****f* Component Library: Composite Pool/cl_cpool_put +* NAME +* cl_cpool_put +* +* DESCRIPTION +* The cl_cpool_put function returns an object to a composite pool. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_cpool_put( + IN cl_cpool_t* const p_pool, + IN void* const p_object ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_pool ); + CL_ASSERT( p_object ); + + /* Calculate the offset to the list object representing this object. */ + p_pool_obj = (cl_pool_obj_t*) + (((uint8_t*)p_object) - sizeof(cl_pool_obj_t)); + + /* good sanity check */ + CL_ASSERT( p_pool_obj->list_obj.p_object == p_object ); + + cl_qcpool_put( &p_pool->qcpool, (cl_pool_item_t*)p_pool_obj ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_cpool_t structure to which to return +* an object. +* +* p_object +* [in] Pointer to the first component of an object to return to the pool. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_cpool_put places the returned object at the head of the pool. +* +* The object specified by the p_object parameter must have been +* retrieved from the pool by a previous call to cl_cpool_get. +* +* SEE ALSO +* Composite Pool, cl_cpool_put_tail, cl_cpool_get +*********/ + + +/****f* Component Library: Composite Pool/cl_cpool_grow +* NAME +* cl_cpool_grow +* +* DESCRIPTION +* The cl_cpool_grow function grows a composite pool by +* the specified number of objects. +* +* SYNOPSIS +*/ +CL_INLINE cl_status_t CL_API +cl_cpool_grow( + IN cl_cpool_t* const p_pool, + IN const size_t obj_count ) +{ + CL_ASSERT( p_pool ); + return( cl_qcpool_grow( &p_pool->qcpool, obj_count ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_cpool_t structure whose capacity to grow. +* +* obj_count +* [in] Number of objects by which to grow the pool. +* +* RETURN VALUES +* CL_SUCCESS if the composite pool grew successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to grow the +* composite pool. +* +* cl_status_t value returned by optional initialization callback function +* specified by the pfn_initializer parameter passed to the +* cl_cpool_init function. +* +* NOTES +* It is not necessary to call cl_cpool_grow if the pool is +* configured to grow automatically. +* +* SEE ALSO +* Composite Pool +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + + +#endif /* _CL_COMP_POOL_H_ */ diff --git a/branches/Ndi/inc/complib/cl_debug.h b/branches/Ndi/inc/complib/cl_debug.h new file mode 100644 index 00000000..36f72653 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_debug.h @@ -0,0 +1,597 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of functions for reporting debug output. + * + * Environment: + * All + */ + + +#ifndef _CL_DEBUG_H_ +#define _CL_DEBUG_H_ + + +#include + + +/****h* Component Library/Debug Output +* NAME +* Debug Output +* +* DESCRIPTION +* The debug output functions and macros send debug messages to the current +* debug target. +*********/ + + +/****f* Component Library: Debug Output/cl_break +* NAME +* cl_break +* +* DESCRIPTION +* The cl_break function halts execution. +* +* SYNOPSIS +* void +* cl_break(); +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* In a release build, cl_break has no effect. +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + +#ifndef cl_dbg_out +#if defined( _DEBUG_ ) +/****f* Component Library: Debug Output/cl_dbg_out +* NAME +* cl_dbg_out +* +* DESCRIPTION +* The cl_dbg_out function sends a debug message to the debug target in +* debug builds only. +* +* SYNOPSIS +*/ +CL_EXPORT void +cl_dbg_out( + IN const char* const debug_message, + IN ... ); +/* +* PARAMETERS +* debug_message +* [in] ANSI string formatted identically as for a call to the standard C +* function printf. +* +* ... +* [in] Extra parameters for string formatting, as defined for the +* standard C function printf. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* In a release build, cl_dbg_out has no effect. +* +* The formatting of the debug_message string is the same as for printf +* +* cl_dbg_out sends the debug message to the current debug target. +* +* SEE ALSO +* Debug Output, cl_msg_out +*********/ +#else +CL_INLINE void +cl_dbg_out( + IN const char* const debug_message, + IN ... ) +{ + UNUSED_PARAM( debug_message ); +} +#endif /* defined( _DEBUG_ ) */ +#endif /* !defined( cl_dbg_out ) */ + +#ifndef cl_msg_out +/****f* Component Library: Debug Output/cl_msg_out +* NAME +* cl_msg_out +* +* DESCRIPTION +* The cl_msg_out function sends a debug message to the message log target. +* +* SYNOPSIS +*/ +CL_EXPORT void +cl_msg_out( + IN const char* const message, + IN ... ); +/* +* PARAMETERS +* message +* [in] ANSI string formatted identically as for a call to the standard C +* function printf. +* +* ... +* [in] Extra parameters for string formatting, as defined for the +* standard C function printf. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_msg_out is available in both debug and release builds. +* +* The formatting of the message string is the same as for printf +* +* cl_msg_out sends the message to the current message logging target. +* +* SEE ALSO +* Debug Output, cl_dbg_out +*********/ +#endif + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +/****d* Component Library: Debug Output/Debug Levels +* NAME +* Debug Levels +* +* DESCRIPTION +* The debug output macros reserve the upper bit of the debug level to +* convey an error. +* +* SYNOPSIS +*/ +#define CL_DBG_DISABLE 0 +#define CL_DBG_ERROR 0x80000000 +#define CL_DBG_ALL 0xFFFFFFFF +/* +* VALUES +* CL_DBG_DISABLE +* Disable all debug output, including errors. +* +* CL_DBG_ERROR +* Enable error debug output. +* +* CL_DBG_ALL +* Enbale all debug output. +* +* NOTES +* Users can define custom debug levels using the lower 31 bits of their +* debug level to control non-error debug output. Error messages are +* always displayed, regardless of the lower bit definition. +* +* When specifying the debug output desired for non-error messages +* (the CHK_LVL parameter in the debug output macros), users must define +* all bits whose output they are interested in. +* +* SEE ALSO +* Debug Output, CL_PRINT, CL_ENTER, CL_EXIT, CL_TRACE, CL_TRACE_EXIT +*********/ + + +#if defined(_DEBUG_) + +/****d* Component Library: Debug Output/CL_PRINT +* NAME +* CL_PRINT +* +* DESCRIPTION +* The CL_PRINT macro sends a string to the current debug target if +* the requested debug level matches the current debug level. +* +* SYNOPSIS +* CL_PRINT( DBG_LVL, CHK_LVL, STRING ); +* +* PARAMETERS +* DBG_LVL +* [in] Debug level for the string to output +* +* CHK_LVL +* [in] Current debug level against which to check DBG_LVL +* +* STRING +* [in] String to send to the current debug target. The string includes +* parentheses in order to allow additional parameters. +* +* RETURN VALUE +* This macro does not return a value. +* +* EXAMPLE +* #define MY_FUNC_DBG_LVL 1 +* +* uint32_t my_dbg_lvl = CL_DBG_ALL; +* +* void +* my_func() +* { +* CL_PRINT( MY_FUNC_DBG_LVL, my_dbg_lvl, ("Hello %s!\n", "world") ); +* } +* +* RESULT +* Hello world! +* +* NOTES +* The requested string is printed only if all bits set in DBG_LVL are also +* set in CHK_LVL unless the most significant bit is set (indicating an +* error), in which case the lower bits are ignored. CHK_LVL may have +* additional bits set. +* +* In multi-processor environments where the current processor can be +* determined, the zero-based number of the processor on which the output +* is generated is prepended to the output. +* +* SEE ALSO +* Debug Output, Debug Levels, CL_ENTER, CL_EXIT, CL_TRACE, CL_TRACE_EXIT +*********/ +#define CL_PRINT( DBG_LVL, CHK_LVL, STRING ) \ + do{ \ + if( DBG_LVL & CHK_LVL & CL_DBG_ERROR ) \ + cl_dbg_out STRING; \ + else if( (DBG_LVL & CHK_LVL) == DBG_LVL ) \ + cl_dbg_out STRING; \ + } while(CHK_LVL^CHK_LVL) + + +/****d* Component Library: Debug Output/CL_ENTER +* NAME +* CL_ENTER +* +* DESCRIPTION +* The CL_ENTER macro marks the entrance into a function by sending a +* string to the current debug target if the requested debug level matches +* the current debug level. +* +* SYNOPSIS +* CL_ENTER( DBG_LVL, CHK_LVL ); +* +* PARAMETERS +* DBG_LVL +* [in] Debug level for the string to output +* +* CHK_LVL +* [in] Current debug level against which to check DBG_LVL +* +* RETURN VALUE +* This macro does not return a value. +* +* EXAMPLE +* #define __MODULE__ "my_module" +* #define MY_FUNC_DBG_LVL 1 +* +* uint32_t my_dbg_lvl = CL_DBG_ALL; +* +* void +* my_func() +* { +* CL_ENTER( MY_FUNC_DBG_LVL, my_dbg_lvl ); +* CL_EXIT( MY_FUNC_DBG_LVL, my_dbg_lvl ); +* } +* +* RESULT +* my_module:my_func() [ +* my_module:my_func() ] +* +* NOTES +* The function entrance notification is printed only if all bits set +* in DBG_LVL are also set in CHK_LVL. CHK_LVL may have additional bits set. +* +* If the __MODULE__ preprocessor keyword is defined, that keyword will be +* prepended to the function name, separated with a colon. +* +* In multi-processor environments where the current processor can be +* determined, the zero-based number of the processor on which the output +* is generated is prepended to the output. +* +* SEE ALSO +* Debug Output, Debug Levels, CL_PRINT, CL_EXIT, CL_TRACE, CL_TRACE_EXIT +*********/ +#define CL_ENTER( DBG_LVL, CHK_LVL ) \ + do{ \ + CL_CHK_STK; \ + CL_PRINT( DBG_LVL, CHK_LVL, _CL_DBG_ENTER ); \ + } while(CHK_LVL^CHK_LVL) + + +/****d* Component Library: Debug Output/CL_EXIT +* NAME +* CL_EXIT +* +* DESCRIPTION +* The CL_EXIT macro marks the exit from a function by sending a string +* to the current debug target if the requested debug level matches the +* current debug level. +* +* SYNOPSIS +* CL_EXIT( DBG_LVL, CHK_LVL ); +* +* PARAMETERS +* DBG_LVL +* [in] Debug level for the string to output +* +* CHK_LVL +* [in] Current debug level against which to check DBG_LVL +* +* RETURN VALUE +* This macro does not return a value. +* +* EXAMPLE +* #define __MODULE__ "my_module" +* #define MY_FUNC_DBG_LVL 1 +* +* uint32_t my_dbg_lvl = CL_DBG_ALL; +* +* void +* my_func() +* { +* CL_ENTER( MY_FUNC_DBG_LVL, my_dbg_lvl ); +* CL_EXIT( MY_FUNC_DBG_LVL, my_dbg_lvl ); +* } +* +* RESULT +* my_module:my_func() [ +* my_module:my_func() ] +* +* NOTES +* The exit notification is printed only if all bits set in DBG_LVL are also +* set in CHK_LVL. CHK_LVL may have additional bits set. +* +* The CL_EXIT macro must only be used after the CL_ENTRY macro as it +* depends on that macro's implementation. +* +* If the __MODULE__ preprocessor keyword is defined, that keyword will be +* prepended to the function name, separated with a colon. +* +* In multi-processor environments where the current processor can be +* determined, the zero-based number of the processor on which the output +* is generated is prepended to the output. +* +* SEE ALSO +* Debug Output, Debug Levels, CL_PRINT, CL_ENTER, CL_TRACE, CL_TRACE_EXIT +*********/ +#define CL_EXIT( DBG_LVL, CHK_LVL ) \ + CL_PRINT( DBG_LVL, CHK_LVL, _CL_DBG_EXIT ) + + +/****d* Component Library: Debug Output/CL_TRACE +* NAME +* CL_TRACE +* +* DESCRIPTION +* The CL_TRACE macro sends a string to the current debug target if +* the requested debug level matches the current debug level. The +* output is prepended with the function name and, depending on the +* debug level requested, an indication of the severity of the message. +* +* SYNOPSIS +* CL_TRACE( DBG_LVL, CHK_LVL, STRING ); +* +* PARAMETERS +* DBG_LVL +* [in] Debug level for the string to output +* +* CHK_LVL +* [in] Current debug level against which to check DBG_LVL +* +* STRING +* [in] String to send to the current debug target. The string includes +* parentheses in order to allow additional parameters. +* +* RETURN VALUE +* This macro does not return a value. +* +* EXAMPLE +* #define __MODULE__ "my_module" +* #define MY_FUNC_DBG_LVL 1 +* +* uint32_t my_dbg_lvl = CL_DBG_ALL; +* +* void +* my_func() +* { +* CL_ENTER( MY_FUNC_DBG_LVL, my_dbg_lvl ); +* CL_TRACE( MY_FUNC_DBG_LVL, my_dbg_lvl, ("Hello %s!\n", "world") ); +* CL_EXIT( MY_FUNC_DBG_LVL, my_dbg_lvl ); +* } +* +* RESULT +* my_module:my_func() [ +* my_module:my_func(): Hello world! +* my_module:my_func() ] +* +* NOTES +* The requested string is printed only if all bits set in DBG_LVL are also +* set in CHK_LVL. CHK_LVL may have additional bits set. +* +* The CL_TRACE macro must only be used after the CL_ENTRY macro as it +* depends on that macro's implementation. +* +* If the DBG_LVL has the upper bit set, the output will contain +* an "!ERROR!" statement between the function name and STRING. +* +* If the __MODULE__ preprocessor keyword is defined, that keyword will be +* prepended to the function name, separated with a colon. +* +* In multi-processor environments where the current processor can be +* determined, the zero-based number of the processor on which the output +* is generated is prepended to the output. +* +* SEE ALSO +* Debug Output, Debug Levels, CL_PRINT, CL_ENTER, CL_EXIT, CL_TRACE_EXIT +*********/ +#define CL_TRACE( DBG_LVL, CHK_LVL, STRING ) \ +do{ \ +switch( DBG_LVL & CL_DBG_ERROR ) \ +{ \ + case CL_DBG_ERROR: \ + CL_PRINT( DBG_LVL, CHK_LVL, _CL_DBG_ERROR ); \ + break; \ + default: \ + CL_PRINT( DBG_LVL, CHK_LVL, _CL_DBG_INFO ); \ + break; \ +} \ +CL_PRINT( DBG_LVL, CHK_LVL, STRING ); \ +} while(CHK_LVL^CHK_LVL) + + +/****d* Component Library: Debug Output/CL_TRACE_EXIT +* NAME +* CL_TRACE_EXIT +* +* DESCRIPTION +* The CL_TRACE_EXIT macro combines the functionality of the CL_TRACE and +* CL_EXIT macros, in that order. +* +* SYNOPSIS +* CL_TRACE_EXIT( DBG_LVL, CHK_LVL, STRING ); +* +* PARAMETERS +* DBG_LVL +* [in] Debug level for the string to output +* +* CHK_LVL +* [in] Current debug level against which to check DBG_LVL +* +* STRING +* [in] String to send to the current debug target. The string includes +* parentheses in order to allow additional parameters. +* +* RETURN VALUE +* This macro does not return a value. +* +* EXAMPLE +* #define __MODULE__ "my_module" +* #define MY_FUNC_DBG_LVL 1 +* +* uint32_t my_dbg_lvl = CL_DBG_ALL; +* +* void +* my_func() +* { +* CL_ENTER( MY_FUNC_DBG_LVL, my_dbg_lvl ); +* CL_TRACE_EXIT( MY_FUNC_DBG_LVL, my_dbg_lvl, ("Hello %s!\n", "world") ); +* } +* +* RESULT +* my_module:my_func() [ +* my_module:my_func(): Hello world! +* my_module:my_func() ] +* +* NOTES +* The requested string is printed only if all bits set in DBG_LVL are also +* set in CHK_LVL. CHK_LVL may have additional bits set. +* +* The CL_TRACE_EXIT macro must only be used after the CL_ENTRY macro as it +* depends on that macro's implementation. +* +* If the DBG_LVL has the upper bit set, the output will contain +* an "!ERROR!" statement between the function name and STRING. +* +* If the __MODULE__ preprocessor keyword is defined, that keyword will be +* prepended to the function name, separated with a colon. +* +* In multi-processor environments where the current processor can be +* determined, the zero-based number of the processor on which the output +* is generated is prepended to the output. +* +* SEE ALSO +* Debug Output, Debug Levels, CL_PRINT, CL_ENTER, CL_EXIT, CL_TRACE +*********/ +#define CL_TRACE_EXIT( DBG_LVL, CHK_LVL, STRING ) \ + do{ \ + CL_TRACE( DBG_LVL, CHK_LVL, STRING ); \ + CL_EXIT( DBG_LVL, CHK_LVL ); \ + } while(CHK_LVL^CHK_LVL) + +#else /* defined(_DEBUG_) */ + +/* Define as NULL macros in a free build. */ +#define CL_PRINT( DBG_LVL, CHK_LVL, STRING ) +#define CL_ENTER( DBG_LVL, CHK_LVL ) +#define CL_EXIT( DBG_LVL, CHK_LVL ) +#define CL_TRACE( DBG_LVL, CHK_LVL, STRING ) +#define CL_TRACE_EXIT( DBG_LVL, CHK_LVL, STRING ) + +#endif /* defined(_DEBUG_) */ + + +/****d* Component Library: Debug Output/64-bit Print Format +* NAME +* 64-bit Print Format +* +* DESCRIPTION +* The 64-bit print keywords allow users to use 64-bit values in debug or +* console output. +* +* Different platforms define 64-bit print formats differently. The 64-bit +* print formats exposed by the component library are supported in all +* platforms. +* +* VALUES +* PRId64 +* Print a 64-bit integer in signed decimal format. +* PRIx64 +* Print a 64-bit integer in hexadecimal format. +* PRIo64 +* Print a 64-bit integer in octal format. +* PRIu64 +* Print a 64-bit integer in unsigned decimal format. +* +* EXAMPLE +* uint64 MyVal = 2; +* // Print a 64-bit integer in hexadecimal format. +* cl_dbg_out( "MyVal: 0x%" PRIx64 "\n", MyVal ); +* +* NOTES +* Standard print flags to specify padding and precision can still be used +* following the '%' sign in the string preceding the 64-bit print keyword. +* +* The above keywords are strings and make use of compilers' string +* concatenation ability. +*********/ + + +#endif /* _CL_DEBUG_H_ */ diff --git a/branches/Ndi/inc/complib/cl_event.h b/branches/Ndi/inc/complib/cl_event.h new file mode 100644 index 00000000..8fed078f --- /dev/null +++ b/branches/Ndi/inc/complib/cl_event.h @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of event abstraction. + * + * Environment: + * All + */ + + +#ifndef _CL_EVENT_H_ +#define _CL_EVENT_H_ + + +/* Indicates that waiting on an event should never timeout */ +#define EVENT_NO_TIMEOUT 0xFFFFFFFF + + +#include + + +/****h* Component Library/Event +* NAME +* Event +* +* DESCRIPTION +* The Event provides the ability to suspend and wakeup a thread. +* +* The event functions operates on a cl_event_t structure which should be +* treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_event_t +* +* Initialization/Destruction: +* cl_event_construct, cl_event_init, cl_event_destroy +* +* Manipulation: +* cl_event_signal, cl_event_reset, cl_event_wait_on +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Event/cl_event_construct +* NAME +* cl_event_construct +* +* DESCRIPTION +* The cl_event_construct function constructs an event. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_event_construct( + IN cl_event_t* const p_event ); +/* +* PARAMETERS +* p_event +* [in] Pointer to an cl_event_t structure to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_event_destroy without first calling cl_event_init. +* +* Calling cl_event_construct is a prerequisite to calling any other event +* function except cl_event_init. +* +* SEE ALSO +* Event, cl_event_init, cl_event_destroy +*********/ + + +/****f* Component Library: Event/cl_event_init +* NAME +* cl_event_init +* +* DESCRIPTION +* The cl_event_init function initializes an event for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_event_init( + IN cl_event_t* const p_event, + IN const boolean_t manual_reset ); +/* +* PARAMETERS +* p_event +* [in] Pointer to an cl_event_t structure to initialize. +* +* manual_reset +* [in] If FALSE, indicates that the event resets itself after releasing +* a single waiter. If TRUE, the event remains in the signalled state +* until explicitly reset by a call to cl_event_reset. +* +* RETURN VALUES +* CL_SUCCESS if event initialization succeeded. +* +* CL_ERROR otherwise. +* +* NOTES +* Allows calling event manipulation functions, such as cl_event_signal, +* cl_event_reset, and cl_event_wait_on. +* +* The event is initially in a reset state. +* +* SEE ALSO +* Event, cl_event_construct, cl_event_destroy, cl_event_signal, +* cl_event_reset, cl_event_wait_on +*********/ + + +/****f* Component Library: Event/cl_event_destroy +* NAME +* cl_event_destroy +* +* DESCRIPTION +* The cl_event_destroy function performs any necessary cleanup of an event. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_event_destroy( + IN cl_event_t* const p_event ); + +/* +* PARAMETERS +* p_event +* [in] Pointer to an cl_event_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function should only be called after a call to cl_event_construct +* or cl_event_init. +* +* SEE ALSO +* Event, cl_event_construct, cl_event_init +*********/ + + +/****f* Component Library: Event/cl_event_signal +* NAME +* cl_event_signal +* +* DESCRIPTION +* The cl_event_signal function sets an event to the signalled state and +* releases one or more waiting threads. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_event_signal( + IN cl_event_t* const p_event ); +/* +* PARAMETERS +* p_event +* [in] Pointer to an cl_event_t structure to set. +* +* RETURN VALUES +* CL_SUCCESS if the event was successfully signalled. +* +* CL_ERROR otherwise. +* +* NOTES +* For auto-reset events, the event is reset automatically once a wait +* operation is satisfied. +* +* Triggering the event multiple times does not guarantee that the same +* number of wait operations are satisfied. This is because events are +* either in a signalled on non-signalled state, and triggering an event +* that is already in the signalled state has no effect. +* +* SEE ALSO +* Event, cl_event_reset, cl_event_wait_on +*********/ + + +/****f* Component Library: Event/cl_event_reset +* NAME +* cl_event_reset +* +* DESCRIPTION +* The cl_event_reset function sets an event to the non-signalled state. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_event_reset( + IN cl_event_t* const p_event ); +/* +* PARAMETERS +* p_event +* [in] Pointer to an cl_event_t structure to reset. +* +* RETURN VALUES +* CL_SUCCESS if the event was successfully reset. +* +* CL_ERROR otherwise. +* +* SEE ALSO +* Event, cl_event_signal, cl_event_wait_on +*********/ + + +/****f* Component Library: Event/cl_event_wait_on +* NAME +* cl_event_wait_on +* +* DESCRIPTION +* The cl_event_wait_on function waits for the specified event to be +* triggered for a minimum amount of time. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_event_wait_on( + IN cl_event_t* const p_event, + IN const uint32_t wait_us, + IN const boolean_t interruptible ); +/* +* PARAMETERS +* p_event +* [in] Pointer to an cl_event_t structure on which to wait. +* +* wait_us +* [in] Number of microseconds to wait. +* +* interruptible +* [in] Indicates whether the wait operation can be interrupted +* by external signals. +* +* RETURN VALUES +* CL_SUCCESS if the wait operation succeeded in response to the event +* being set. +* +* CL_TIMEOUT if the specified time period elapses. +* +* CL_NOT_DONE if the wait was interrupted by an external signal. +* +* CL_ERROR if the wait operation failed. +* +* NOTES +* If wait_us is set to EVENT_NO_TIMEOUT, the function will wait until the +* event is triggered and never timeout. +* +* If the timeout value is zero, this function simply tests the state of +* the event. +* +* If the event is already in the signalled state at the time of the call +* to cl_event_wait_on, the call completes immediately with CL_SUCCESS. +* +* SEE ALSO +* Event, cl_event_signal, cl_event_reset +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_EVENT_H_ */ diff --git a/branches/Ndi/inc/complib/cl_fleximap.h b/branches/Ndi/inc/complib/cl_fleximap.h new file mode 100644 index 00000000..dde21357 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_fleximap.h @@ -0,0 +1,928 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of flexi map, a binary tree where the caller always provides + * all necessary storage. + * + * Environment: + * All + */ + + +#ifndef _CL_FLEXIMAP_H_ +#define _CL_FLEXIMAP_H_ + + +#include + + +/****h* Component Library/Flexi Map +* NAME +* Flexi Map +* +* DESCRIPTION +* Flexi map implements a binary tree that stores user provided cl_fmap_item_t +* structures. Each item stored in a flexi map has a unique user defined key +* (duplicates are not allowed). Flexi map provides the ability to +* efficiently search for an item given a key. Flexi map allows user defined +* keys of any size. Storage for keys and a comparisson function are provided +* by users to allow flexi map to store items with arbitrary key values. +* +* Flexi map does not allocate any memory, and can therefore not fail +* any operations due to insufficient memory. Flexi map can thus be useful +* in minimizing the error paths in code. +* +* Flexi map is not thread safe, and users must provide serialization when +* adding and removing items from the map. +* +* The flexi map functions operate on a cl_fmap_t structure which should +* be treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_fmap_t, cl_fmap_item_t +* +* Callbacks: +* cl_pfn_fmap_apply_t +* +* Item Manipulation: +* cl_fmap_key +* +* Initialization: +* cl_fmap_init +* +* Iteration: +* cl_fmap_end, cl_fmap_head, cl_fmap_tail, cl_fmap_next, cl_fmap_prev +* +* Manipulation: +* cl_fmap_insert, cl_fmap_get, cl_fmap_remove_item, cl_fmap_remove, +* cl_fmap_remove_all, cl_fmap_merge, cl_fmap_delta +* +* Search: +* cl_fmap_apply_func +* +* Attributes: +* cl_fmap_count, cl_is_fmap_empty, +*********/ + + +/****s* Component Library: Flexi Map/cl_fmap_item_t +* NAME +* cl_fmap_item_t +* +* DESCRIPTION +* The cl_fmap_item_t structure is used by maps to store objects. +* +* The cl_fmap_item_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_fmap_item +{ + /* Must be first to allow casting. */ + cl_pool_item_t pool_item; + struct _cl_fmap_item *p_left; + struct _cl_fmap_item *p_right; + struct _cl_fmap_item *p_up; + cl_map_color_t color; + const void* __ptr64 p_key; +#ifdef _DEBUG_ + struct _cl_fmap *p_map; +#endif + +} cl_fmap_item_t; +/* +* FIELDS +* pool_item +* Used to store the item in a doubly linked list, allowing more +* efficient map traversal. +* +* p_left +* Pointer to the map item that is a child to the left of the node. +* +* p_right +* Pointer to the map item that is a child to the right of the node. +* +* p_up +* Pointer to the map item that is the parent of the node. +* +* p_nil +* Pointer to the map's NIL item, used as a terminator for leaves. +* The NIL sentinel is in the cl_fmap_t structure. +* +* color +* Indicates whether a node is red or black in the map. +* +* p_key +* Pointer to the value that uniquely represents a node in a map. This +* pointer is set by calling cl_fmap_insert and can be retrieved by +* calling cl_fmap_key. +* +* NOTES +* None of the fields of this structure should be manipulated by users, as +* they are crititcal to the proper operation of the map in which they +* are stored. +* +* To allow storing items in either a quick list, a quick pool, or a flexi +* map, the map implementation guarantees that the map item can be safely +* cast to a pool item used for storing an object in a quick pool, or cast to +* a list item used for storing an object in a quick list. This removes the +* need to embed a flexi map item, a list item, and a pool item in objects +* that need to be stored in a quick list, a quick pool, and a flexi map. +* +* The flexi map item is defined to be identical in layout as a map item. +* +* SEE ALSO +* Flexi Map, cl_fmap_insert, cl_fmap_key, cl_pool_item_t, cl_list_item_t +*********/ + + +/****d* Component Library: Flexi Map/cl_pfn_fmap_cmp_t +* NAME +* cl_pfn_fmap_cmp_t +* +* DESCRIPTION +* The cl_pfn_fmap_cmp_t function type defines the prototype for functions +* used to compare item keys in a flexi map. +* +* SYNOPSIS +*/ +typedef intn_t +(CL_API *cl_pfn_fmap_cmp_t)( + IN const void* const p_key1, + IN const void* const p_key2 ); +/* +* PARAMETERS +* p_key1 +* [in] Pointer to the first of two keys to compare. +* +* p_key2 +* [in] Pointer to the second of two keys to compare. +* +* RETURN VALUE +* Returns 0 if the keys match. +* Returns less than 0 if p_key1 is less than p_key2. +* Returns greater than 0 if p_key1 is greater than p_key2. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_fmap_init function. +* +* SEE ALSO +* Flexi Map, cl_fmap_init +*********/ + + +/****s* Component Library: Flexi Map/cl_fmap_t +* NAME +* cl_fmap_t +* +* DESCRIPTION +* Flexi map structure. +* +* The cl_fmap_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_fmap +{ + cl_fmap_item_t root; + cl_fmap_item_t nil; + cl_state_t state; + size_t count; + cl_pfn_fmap_cmp_t pfn_compare; + +} cl_fmap_t; +/* +* PARAMETERS +* root +* Map item that serves as root of the map. The root is set up to +* always have itself as parent. The left pointer is set to point to +* the item at the root. +* +* nil +* Map item that serves as terminator for all leaves, as well as providing +* the list item used as quick list for storing map items in a list for +* faster traversal. +* +* state +* State of the map, used to verify that operations are permitted. +* +* count +* Number of items in the map. +* +* pfn_compare +* Pointer to a compare function to invoke to compare the keys of +* items in the map. +* +* SEE ALSO +* Flexi Map, cl_pfn_fmap_cmp_t +*********/ + + +/****d* Component Library: Flexi Map/cl_pfn_fmap_apply_t +* NAME +* cl_pfn_fmap_apply_t +* +* DESCRIPTION +* The cl_pfn_fmap_apply_t function type defines the prototype for functions +* used to iterate items in a flexi map. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_fmap_apply_t)( + IN cl_fmap_item_t* const p_map_item, + IN void* context ); +/* +* PARAMETERS +* p_map_item +* [in] Pointer to a cl_fmap_item_t structure. +* +* context +* [in] Value passed to the callback function. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_fmap_apply_func +* function. +* +* SEE ALSO +* Flexi Map, cl_fmap_apply_func +*********/ + + +#ifdef __cplusplus +extern "C" { +#endif + + +/****f* Component Library: Flexi Map/cl_fmap_count +* NAME +* cl_fmap_count +* +* DESCRIPTION +* The cl_fmap_count function returns the number of items stored +* in a flexi map. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_fmap_count( + IN const cl_fmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + return( p_map->count ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure whose item count to return. +* +* RETURN VALUE +* Returns the number of items stored in the map. +* +* SEE ALSO +* Flexi Map, cl_is_fmap_empty +*********/ + + +/****f* Component Library: Flexi Map/cl_is_fmap_empty +* NAME +* cl_is_fmap_empty +* +* DESCRIPTION +* The cl_is_fmap_empty function returns whether a flexi map is empty. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_fmap_empty( + IN const cl_fmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + return( p_map->count == 0 ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure to test for emptiness. +* +* RETURN VALUES +* TRUE if the flexi map is empty. +* +* FALSE otherwise. +* +* SEE ALSO +* Flexi Map, cl_fmap_count, cl_fmap_remove_all +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_key +* NAME +* cl_fmap_key +* +* DESCRIPTION +* The cl_fmap_key function retrieves the key value of a map item. +* +* SYNOPSIS +*/ +#pragma warning (push) +#pragma warning (disable :4244) +CL_INLINE const void* CL_API +cl_fmap_key( + IN const cl_fmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( p_item->p_key ); +} +#pragma warning (pop ) +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose key value to return. +* +* RETURN VALUE +* Returns the a pointer to the key value for the specified map item. +* The key value should not be modified to insure proper flexi map operation. +* +* NOTES +* The key value is set in a call to cl_fmap_insert. +* +* SEE ALSO +* Flexi Map, cl_fmap_insert +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_init +* NAME +* cl_fmap_init +* +* DESCRIPTION +* The cl_fmap_init function initialized a flexi map for use. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_fmap_init( + IN cl_fmap_t* const p_map, + IN cl_pfn_fmap_cmp_t pfn_compare ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure to initialize. +* +* pfn_compare +* [in] Pointer to the compare function used to compare keys. +* See the cl_pfn_fmap_cmp_t function type declaration for details +* about the callback function. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Allows calling flexi map manipulation functions. +* +* SEE ALSO +* Flexi Map, cl_fmap_insert, cl_fmap_remove +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_end +* NAME +* cl_fmap_end +* +* DESCRIPTION +* The cl_fmap_end function returns the end of a flexi map. +* +* SYNOPSIS +*/ +CL_INLINE const cl_fmap_item_t* const CL_API +cl_fmap_end( + IN const cl_fmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + /* Nil is the end of the map. */ + return( &p_map->nil ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure whose end to return. +* +* RETURN VALUE +* Pointer to the end of the map. +* +* NOTES +* cl_fmap_end is useful for determining the validity of map items returned +* by cl_fmap_head, cl_fmap_tail, cl_fmap_next, or cl_fmap_prev. If the map +* item pointer returned by any of these functions compares to the end, the +* end of the map was encoutered. +* When using cl_fmap_head or cl_fmap_tail, this condition indicates that +* the map is empty. +* +* SEE ALSO +* Flexi Map, cl_fmap_head, cl_fmap_tail, cl_fmap_next, cl_fmap_prev +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_head +* NAME +* cl_fmap_head +* +* DESCRIPTION +* The cl_fmap_head function returns the map item with the lowest key +* value stored in a flexi map. +* +* SYNOPSIS +*/ +CL_INLINE cl_fmap_item_t* CL_API +cl_fmap_head( + IN const cl_fmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + return( (cl_fmap_item_t*)p_map->nil.pool_item.list_item.p_next ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure whose item with the lowest key +* is returned. +* +* RETURN VALUES +* Pointer to the map item with the lowest key in the flexi map. +* +* Pointer to the map end if the flexi map was empty. +* +* NOTES +* cl_fmap_head does not remove the item from the map. +* +* SEE ALSO +* Flexi Map, cl_fmap_tail, cl_fmap_next, cl_fmap_prev, cl_fmap_end, +* cl_fmap_item_t +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_tail +* NAME +* cl_fmap_tail +* +* DESCRIPTION +* The cl_fmap_tail function returns the map item with the highest key +* value stored in a flexi map. +* +* SYNOPSIS +*/ +CL_INLINE cl_fmap_item_t* CL_API +cl_fmap_tail( + IN const cl_fmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + return( (cl_fmap_item_t*)p_map->nil.pool_item.list_item.p_prev ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure whose item with the highest key +* is returned. +* +* RETURN VALUES +* Pointer to the map item with the highest key in the flexi map. +* +* Pointer to the map end if the flexi map was empty. +* +* NOTES +* cl_fmap_end does not remove the item from the map. +* +* SEE ALSO +* Flexi Map, cl_fmap_head, cl_fmap_next, cl_fmap_prev, cl_fmap_end, +* cl_fmap_item_t +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_next +* NAME +* cl_fmap_next +* +* DESCRIPTION +* The cl_fmap_next function returns the map item with the next higher +* key value than a specified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_fmap_item_t* CL_API +cl_fmap_next( + IN const cl_fmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( (cl_fmap_item_t*)p_item->pool_item.list_item.p_next ); +} +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose successor to return. +* +* RETURN VALUES +* Pointer to the map item with the next higher key value in a flexi map. +* +* Pointer to the map end if the specified item was the last item in +* the flexi map. +* +* SEE ALSO +* Flexi Map, cl_fmap_head, cl_fmap_tail, cl_fmap_prev, cl_fmap_end, +* cl_fmap_item_t +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_prev +* NAME +* cl_fmap_prev +* +* DESCRIPTION +* The cl_fmap_prev function returns the map item with the next lower +* key value than a precified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_fmap_item_t* CL_API +cl_fmap_prev( + IN const cl_fmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( (cl_fmap_item_t*)p_item->pool_item.list_item.p_prev ); +} +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose predecessor to return. +* +* RETURN VALUES +* Pointer to the map item with the next lower key value in a flexi map. +* +* Pointer to the map end if the specifid item was the first item in +* the flexi map. +* +* SEE ALSO +* Flexi Map, cl_fmap_head, cl_fmap_tail, cl_fmap_next, cl_fmap_end, +* cl_fmap_item_t +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_insert +* NAME +* cl_fmap_insert +* +* DESCRIPTION +* The cl_fmap_insert function inserts a map item into a flexi map. +* +* SYNOPSIS +*/ +CL_EXPORT cl_fmap_item_t* CL_API +cl_fmap_insert( + IN cl_fmap_t* const p_map, + IN const void* const p_key, + IN cl_fmap_item_t* const p_item ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure into which to add the item. +* +* p_key +* [in] Pointer to the key value to assign to the item. Storage for +* the key must be persistant, as only the pointer is stored. Users +* are responsible for maintaining the validity of key pointers while +* they are in use. +* +* p_item +* [in] Pointer to a cl_fmap_item_t stucture to insert into the flexi map. +* +* RETURN VALUE +* Pointer to the item in the map with the specified key. If insertion +* was successful, this is the pointer to the item. If an item with the +* specified key already exists in the map, the pointer to that item is +* returned. +* +* NOTES +* Insertion operations may cause the flexi map to rebalance. +* +* SEE ALSO +* Flexi Map, cl_fmap_remove, cl_fmap_item_t +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_get +* NAME +* cl_fmap_get +* +* DESCRIPTION +* The cl_fmap_get function returns the map item associated with a key. +* +* SYNOPSIS +*/ +CL_EXPORT cl_fmap_item_t* CL_API +cl_fmap_get( + IN const cl_fmap_t* const p_map, + IN const void* const p_key ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure from which to retrieve the +* item with the specified key. +* +* p_key +* [in] Pointer to a key value used to search for the desired map item. +* +* RETURN VALUES +* Pointer to the map item with the desired key value. +* +* Pointer to the map end if there was no item with the desired key value +* stored in the flexi map. +* +* NOTES +* cl_fmap_get does not remove the item from the flexi map. +* +* SEE ALSO +* Flexi Map, cl_fmap_remove +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_remove_item +* NAME +* cl_fmap_remove_item +* +* DESCRIPTION +* The cl_fmap_remove_item function removes the specified map item +* from a flexi map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_fmap_remove_item( + IN cl_fmap_t* const p_map, + IN cl_fmap_item_t* const p_item ); +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item to remove from its flexi map. +* +* RETURN VALUES +* This function does not return a value. +* +* In a debug build, cl_fmap_remove_item asserts that the item being removed +* is in the specified map. +* +* NOTES +* Removes the map item pointed to by p_item from its flexi map. +* +* SEE ALSO +* Flexi Map, cl_fmap_remove, cl_fmap_remove_all, cl_fmap_insert +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_remove +* NAME +* cl_fmap_remove +* +* DESCRIPTION +* The cl_fmap_remove function removes the map item with the specified key +* from a flexi map. +* +* SYNOPSIS +*/ +CL_EXPORT cl_fmap_item_t* CL_API +cl_fmap_remove( + IN cl_fmap_t* const p_map, + IN const void* const p_key ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure from which to remove the item +* with the specified key. +* +* p_key +* [in] Pointer to the key value used to search for the map item +* to remove. +* +* RETURN VALUES +* Pointer to the removed map item if it was found. +* +* Pointer to the map end if no item with the specified key exists in the +* flexi map. +* +* SEE ALSO +* Flexi Map, cl_fmap_remove_item, cl_fmap_remove_all, cl_fmap_insert +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_remove_all +* NAME +* cl_fmap_remove_all +* +* DESCRIPTION +* The cl_fmap_remove_all function removes all items in a flexi map, +* leaving it empty. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_fmap_remove_all( + IN cl_fmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + p_map->root.p_left = &p_map->nil; + p_map->nil.pool_item.list_item.p_next = &p_map->nil.pool_item.list_item; + p_map->nil.pool_item.list_item.p_prev = &p_map->nil.pool_item.list_item; + p_map->count = 0; +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure to empty. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* Flexi Map, cl_fmap_remove, cl_fmap_remove_item +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_merge +* NAME +* cl_fmap_merge +* +* DESCRIPTION +* The cl_fmap_merge function moves all items from one map to another, +* excluding duplicates. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_fmap_merge( + OUT cl_fmap_t* const p_dest_map, + IN OUT cl_fmap_t* const p_src_map ); +/* +* PARAMETERS +* p_dest_map +* [out] Pointer to a cl_fmap_t structure to which items should be added. +* +* p_src_map +* [in/out] Pointer to a cl_fmap_t structure whose items to add +* to p_dest_map. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Items are evaluated based on their keys only. +* +* Upon return from cl_fmap_merge, the flexi map referenced by p_src_map +* contains all duplicate items. +* +* SEE ALSO +* Flexi Map, cl_fmap_delta +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_delta +* NAME +* cl_fmap_delta +* +* DESCRIPTION +* The cl_fmap_delta function computes the differences between two maps. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_fmap_delta( + IN OUT cl_fmap_t* const p_map1, + IN OUT cl_fmap_t* const p_map2, + OUT cl_fmap_t* const p_new, + OUT cl_fmap_t* const p_old ); +/* +* PARAMETERS +* p_map1 +* [in/out] Pointer to the first of two cl_fmap_t structures whose +* differences to compute. +* +* p_map2 +* [in/out] Pointer to the second of two cl_fmap_t structures whose +* differences to compute. +* +* p_new +* [out] Pointer to an empty cl_fmap_t structure that contains the items +* unique to p_map2 upon return from the function. +* +* p_old +* [out] Pointer to an empty cl_fmap_t structure that contains the items +* unique to p_map1 upon return from the function. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Items are evaluated based on their keys. Items that exist in both +* p_map1 and p_map2 remain in their respective maps. Items that +* exist only p_map1 are moved to p_old. Likewise, items that exist only +* in p_map2 are moved to p_new. This function can be usefull in evaluating +* changes between two maps. +* +* Both maps pointed to by p_new and p_old must be empty on input. This +* requirement removes the possibility of failures. +* +* SEE ALSO +* Flexi Map, cl_fmap_merge +*********/ + + +/****f* Component Library: Flexi Map/cl_fmap_apply_func +* NAME +* cl_fmap_apply_func +* +* DESCRIPTION +* The cl_fmap_apply_func function executes a specified function +* for every item stored in a flexi map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_fmap_apply_func( + IN const cl_fmap_t* const p_map, + IN cl_pfn_fmap_apply_t pfn_func, + IN const void* const context ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_fmap_t structure. +* +* pfn_func +* [in] Function invoked for every item in the flexi map. +* See the cl_pfn_fmap_apply_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* The function provided must not perform any map operations, as these +* would corrupt the flexi map. +* +* SEE ALSO +* Flexi Map, cl_pfn_fmap_apply_t +*********/ + +#ifdef __cplusplus +} +#endif + + +#endif /* _CL_FLEXIMAP_H_ */ diff --git a/branches/Ndi/inc/complib/cl_ioctl.h b/branches/Ndi/inc/complib/cl_ioctl.h new file mode 100644 index 00000000..93998a48 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_ioctl.h @@ -0,0 +1,626 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of IOCTL object + * + * Environment: + * All + */ + + +#ifndef _CL_IOCTL_H_ +#define _CL_IOCTL_H_ + + +#include +#include + + +/****h* Component Library/IOCTL Object +* NAME +* IOCTL Object +* +* DESCRIPTION +* The IOCTL object provides functionality for handling IOCTL requests. +* +* The IOCTL object is only available in kernel mode and provides +* functionality for accessing information about IO requests initiated +* by a user-mode application. The IOCTL_CODE macro is used in both +* user and kernel mode to initiate and dispatch IOCTL requests, respectively. +* +* In Linux, in order for the IOCTL object to be used, requests must be +* initiated and handled using the Device Framework abstraction. +* +* SEE ALSO +* Structures: +* cl_ioctl_handle_t +* +* Callbacks: +* cl_pfn_ioctl_handler_t +* +* Control Code Generation +* IOCTL_CODE +* +* Kernel Mode Access +* cl_ioctl_process +* cl_ioctl_complete +* cl_ioctl_type +* cl_ioctl_cmd +* cl_ioctl_ctl_code +* cl_ioctl_in_buf +* cl_ioctl_in_size +* cl_ioctl_out_buf +* cl_ioctl_out_size +* +* User Mode Access +* cl_ioctl_request +* cl_ioctl_result +*********/ + + +/****d* Component Library: IOCTL Object/IOCTL_CODE +* NAME +* IOCTL_CODE +* +* DESCRIPTION +* Macro for defining IO control command codes. +* +* SYNOPSIS +* uint32_t IOCTL_CODE( uint16_t type, uint16_t cmd ) +* +* PARAMETERS +* type +* [in] user-defined type representing the type of command. For Linux, +* the type is truncated to 8-bits. For Windows, the type is a 16-bit +* value, as described in "Specifying Device Types" in the DDK docs. +* +* cmd +* [in] User-defined command. For Linux, the command field is truncated +* to 8-bits. For Windows, the command can be 12-bits, with values +* below 0x800 reserved by Microsoft for system defined commands. +* +* RETURN VALUE +* A 32-bit control code. User-mode clients use the control code to initiate +* requests. Kernel-mode clients use the control code to distinguish between +* different requests. +* +* NOTE +* In Windows, all IOCTL command codes defined with the IOCTL_CODE command +* result in FILE_ANY_ACCESS and METHOD_BUFFERED being specified. +* +* SEE ALSO +* IOCTL Object, cl_dev_ioctl, cl_ioctl_type, cl_ioctl_cmd +*********/ + + +#ifdef CL_KERNEL + +/****d* Component Library: IOCTL Object/cl_ioctl_handle_t +* NAME +* cl_ioctl_handle_t +* +* DESCRIPTION +* Opaque handle representing an IO request. +* +* NOTES +* The cl_ioctl_handle_t type is only available in the kernel. +* The cl_ioctl_handle_t type should be treated as opaque, as it +* varies from environment to environment. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_type, cl_ioctl_cmd, cl_ioctl_in_buf, +* cl_ioctl_in_size, cl_ioctl_out_buf, cl_ioctl_out_size, +* cl_ioctl_set_status, cl_ioctl_set_ret_bytes +*********/ + + +/****d* Component Library: IOCTL Object/cl_pfn_ioctl_handler_t +* NAME +* cl_pfn_ioctl_handler_t +* +* DESCRIPTION +* The cl_pfn_ioctl_handler_t function type defines the prototype for +* IOCTL handlers used when handling IOCTL requests initiated by +* cl_ioctl_request. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_ioctl_handler_t)( + IN cl_ioctl_handle_t h_ioctl, + IN void *context_1, + IN void *context_2 ); +/* +* PARAMETERS +* h_ioctl +* [in] Handle to the IOCTL request. +* +* context_1 +* [in] First context parameters, as provided to cl_ioctl_process. +* +* context_2 +* [in] Second context parameters, as provided to cl_ioctl_process. +* +* RETURN VALUES +* CL_SUCCESS if the IOCTL was completed successfully. +* +* CL_PENDING if the IOCTL is being processed asynchronously. +* +* Other return values in case of errors. +* +* NOTES +* It is acceptable to complete the IOCTL successfully to report an error +* status in the output buffer. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_ioctl_process +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +/****f* Component Library: IOCTL Object/cl_ioctl_process +* NAME +* cl_ioctl_process +* +* DESCRIPTION +* The cl_ioctl_process function unpacks information initiated by a call to +* cl_ioctl_request function and invokes a user-supplied callback. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_ioctl_process( + IN void *p_ioctl, + IN cl_pfn_ioctl_handler_t pfn_ioctl_handler, + IN void *context_1, + IN void *context_2 ); +/* +* PARAMETERS +* p_ioctl +* [in] Pointer to an OS specific IOCTL information. In Linux, +* this parameter depends on whether the IOCTL is handled synchronously +* or asynchronously. See the notes for further detail. +* In Windows, this is a pointer to an IRP. +* +* pfn_ioctl_handler +* [in] Pointer to the callback function to invoke for handling the IOCTL. +* This callback is independent of the IOCTL command. +* +* context_1 +* [in] First of two context parameters to pass to the handler. +* +* context_2 +* [in] Second of two context parameters to pass to the handler. +* +* RETURN VALUES +* CL_SUCCESS if the IOCTL was processed successfully. +* +* Other values to indicate various failures. +* +* NOTES +* Users must call cl_ioctl_complete from within the handler if completing +* the IOCTL request synchronously. If the IOCTL request's control code is +* invalid, the handler should return CL_INVALID_REQUEST. +* +* In Linux, the p_ioctl parameter is a copy of the argp parameter on input, +* and on output points to the IOCTL request object passed to the IOCTL +* handler if and only if the IOCTL handler returned CL_PENDING. +* This allows the user to cancel the request by passing the same +* handle to the cancel routine that was passed to the IOCTL handler. +* If all IOCTLs are handled synchronously, it is acceptable to pass the argp +* parameter of the IOCTL entry point instead of a copy. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_pfn_ioctl_handler_t, cl_ioctl_complete +*********/ + + +/****f* Component Library: IOCTL Object/cl_ioctl_complete +* NAME +* cl_ioctl_complete +* +* DESCRIPTION +* Fills in completion information for an IOCTL and releases the IOCTL request +* for completion. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_ioctl_complete( + IN cl_ioctl_handle_t h_ioctl, + IN cl_status_t io_status, + IN size_t ret_bytes ); +/* +* PARAMETERS +* h_ioctl +* Handle to the IOCTL being completed. This handle was provided to +* the IOCTL handler. +* +* io_status +* Status of the IOCTL request. +* +* ret_bytes +* Number of bytes written to the output buffer. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_ioctl_process +*********/ + + +/****f* Component Library: IOCTL Object/cl_ioctl_type +* NAME +* cl_ioctl_type +* +* DESCRIPTION +* Returns the type of an IOCTL. +* +* SYNOPSIS +*/ +CL_EXPORT uint16_t CL_API +cl_ioctl_type( + IN cl_ioctl_handle_t h_ioctl ); +/* +* PARAMETERS +* h_ioctl +* [in] Handle to an IOCTL +* +* RETURN VALUE +* Returns the type of the specified IOCTL request, as defined using +* the IOCTL_CMD macro. +* +* NOTES +* The cl_ioctl_type function is only available in the kernel. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_ioctl_cmd, cl_ioctl_ctl_code +********/ + + +/****f* Component Library: IOCTL Object/cl_ioctl_cmd +* NAME +* cl_ioctl_cmd +* +* DESCRIPTION +* Returns the command of an IOCTL +* +* SYNOPSIS +*/ +CL_EXPORT uint16_t CL_API +cl_ioctl_cmd( + IN cl_ioctl_handle_t h_ioctl ); +/* +* PARAMETERS +* h_ioctl +* [in] Handle to an IOCTL +* +* RETURN VALUE +* Returns the command of the specified IOCTL request, as defined using +* the IOCTL_CMD macro. +* +* NOTES +* The cl_ioctl_cmd function is only available in the kernel. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_ioctl_type, cl_ioctl_ctl_code +********/ + + +/****f* Component Library: IOCTL Object/cl_ioctl_ctl_code +* NAME +* cl_ioctl_ctl_code +* +* DESCRIPTION +* Returns the 32-bit control code of an IOCTL +* +* SYNOPSIS +*/ +CL_EXPORT uint32_t CL_API +cl_ioctl_ctl_code( + IN cl_ioctl_handle_t h_ioctl ); +/* +* PARAMETERS +* h_ioctl +* [in] Handle to an IOCTL +* +* RETURN VALUE +* Returns the 32-bit control code of the specified IOCTL request, +* as defined using the IOCTL_CMD macro. +* +* NOTES +* The cl_ioctl_ctl_code function is only available in the kernel. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_ioctl_type, cl_ioctl_cmd +********/ + + +/****f* Component Library: IOCTL Object/cl_ioctl_in_buf +* NAME +* cl_ioctl_in_buf +* +* DESCRIPTION +* Returns a pointer to the input buffer of an IOCTL. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +cl_ioctl_in_buf( + IN cl_ioctl_handle_t h_ioctl ); +/* +* PARAMETERS +* h_ioctl +* [in] Handle to an IOCTL +* +* RETURN VALUE +* Returns the input buffer of the specified IOCTL request. +* +* NOTES +* The cl_ioctl_in_buf function is only available in the kernel. +* +* In Windows, for IOCTL operations defined as METHOD_IN_DIRECT, the +* returned pointer points to the MDL describing the input buffer. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_ioctl_in_size, +* cl_ioctl_out_buf, cl_ioctl_out_size +********/ + + +/****f* Component Library: IOCTL Object/cl_ioctl_in_size +* NAME +* cl_ioctl_in_size +* +* DESCRIPTION +* Returns the size of the input buffer of an IOCTL. +* +* SYNOPSIS +*/ +CL_EXPORT ULONG CL_API +cl_ioctl_in_size( + IN cl_ioctl_handle_t h_ioctl ); +/* +* PARAMETERS +* h_ioctl +* [in] Handle to an IOCTL +* +* RETURN VALUE +* Returns the size, in bytes, of the input buffer of the specified +* IOCTL request. +* +* NOTES +* The cl_ioctl_in_size function is only available in the kernel. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_ioctl_in_buf, +* cl_ioctl_out_buf, cl_ioctl_out_size +********/ + + +/****f* Component Library: IOCTL Object/cl_ioctl_out_buf +* NAME +* cl_ioctl_out_buf +* +* DESCRIPTION +* Returns a pointer to the output buffer of an IOCTL. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +cl_ioctl_out_buf( + IN cl_ioctl_handle_t h_ioctl ); +/* +* PARAMETERS +* h_ioctl +* [in] Handle to an IOCTL +* +* RETURN VALUE +* Returns a pointer to the output buffer of the specified IOCTL request. +* +* NOTES +* The cl_ioctl_out_buf function is only available in the kernel. +* +* In Windows, for IOCTL operations defined as METHOD_IN_DIRECT or +* METHOD_OUT_DIRECT, the returned pointer points to the MDL describing +* the input buffer. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_ioctl_out_size, +* cl_ioctl_in_buf, cl_ioctl_in_size +********/ + + +/****f* Component Library: IOCTL Object/cl_ioctl_out_size +* NAME +* cl_ioctl_out_size +* +* DESCRIPTION +* Returns the size of the output buffer of an IOCTL. +* +* SYNOPSIS +*/ +CL_EXPORT ULONG CL_API +cl_ioctl_out_size( + IN cl_ioctl_handle_t h_ioctl ); +/* +* PARAMETERS +* h_ioctl +* [in] Handle to an IOCTL +* +* RETURN VALUE +* Returns the size, in bytes, of the input buffer of the specified +* IOCTL request. +* +* NOTES +* The cl_ioctl_out_size function is only available in the kernel. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_handle_t, cl_ioctl_out_buf, +* cl_ioctl_in_buf, cl_ioctl_in_size +********/ + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#else /* CL_KERNEL */ + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +/****f* Component Library: IOCTL Object/cl_ioctl_request +* NAME +* cl_ioctl_request +* +* DESCRIPTION +* The cl_ioctl_request is used by user-mode clients to initiate IOCTL +* requests to a device. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_ioctl_request( + IN void *h_dev, + IN uint32_t ioctl_code, + IN void *p_in_buf, + IN size_t in_size, + OUT void *p_out_buf, + IN size_t out_size, + OUT size_t *p_ret_bytes OPTIONAL, + IN void *p_async_info OPTIONAL ); +/* +* PARAMETERS +* h_dev +* [in] Handle to the device to which the IOCTL request is targetted. +* In Linux, this is a file descriptor. In Windows, this is a file +* handle. +* +* ioctl_code +* [in] Control code for the IOCTL request. +* +* p_in_buf +* [in] Pointer to the input buffer. +* +* in_size +* [in] Size, in bytes, of the input buffer. +* +* p_out_buf +* [out] Pointer to the output buffer. +* +* out_size +* [in] Size, in bytes, of the output buffer. +* +* p_ret_bytes +* [out] Number of bytes written to the output buffer. This parameter is +* mutually exclusive of the p_async_info parameter. +* +* p_async_info +* [in] For platforms that support asynchronous I/O, supplies a pointer +* to that platform's async I/O structure, if any. For Windows, this +* is a pointer to an OVERLAPPED structure. This parameter is mutually +* exclusive of the p_ret_bytes parameter. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_result +*********/ + + +/****f* Component Library: IOCTL Object/cl_ioctl_result +* NAME +* cl_ioctl_result +* +* DESCRIPTION +* Checks the status of an asynchronous IOCTL request. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_ioctl_result( + IN void *h_dev, + IN void *p_async_info, + OUT size_t *p_ret_bytes, + IN boolean_t blocking ); +/* +* PARAMETERS +* h_dev +* [in] Handle to the device to which the IOCTL request is targetted. +* In Linux, this is a file descriptor. In Windows, this is a file +* handle. +* +* p_async_info +* [in] For platforms that support asynchronous I/O, supplies a pointer +* to that platform's async I/O structure, if any. For Windows, this +* is a pointer to an OVERLAPPED structure. This must be the same +* as that provided in the cl_ioctl_request function. +* +* p_ret_bytes +* [out] Number of bytes written to the output buffer. +* +* blocking +* [in] If TRUE, indicates that the call should wait until the +* specified IOCTL request is complete. +* +* RETURN VALUES +* CL_SUCCESS if the IOCTL request was successful. p_ret_bytes contains +* the number bytes written to the output buffer. +* +* CL_PENDING if the IOCTL request is not yet complete. +* +* Other status values to indicate errors. +* +* SEE ALSO +* IOCTL Object, cl_ioctl_request +*********/ + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif /* CL_KERNEL */ + +#endif /* _CL_IOCTL_H_ */ diff --git a/branches/Ndi/inc/complib/cl_irqlock.h b/branches/Ndi/inc/complib/cl_irqlock.h new file mode 100644 index 00000000..ea139e69 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_irqlock.h @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of interrupt level IRQ lock object. + * + * Environment: + * All + */ + + +#ifndef _CL_IRQLOCK_H_ +#define _CL_IRQLOCK_H_ + + +#include + + +/****h* Component Library/Irqlock +* NAME +* Irqlock +* +* DESCRIPTION +* Irqlock provides synchronization at interrupt level between threads for +* exclusive access to a resource. +* +* The irqlock functions manipulate a cl_irqlock_t structure which should +* be treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_irqlock_t +* +* Initialization: +* cl_irqlock_construct, cl_irqlock_init, cl_irqlock_destroy +* +* Manipulation +* cl_irqlock_acquire, cl_irqlock_release +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Irqlock/cl_irqlock_construct +* NAME +* cl_irqlock_construct +* +* DESCRIPTION +* The cl_irqlock_construct function initializes the state of a +* IRQ lock. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_irqlock_construct( + IN cl_irqlock_t* const p_irqlock ); +/* +* PARAMETERS +* p_irqlock +* [in] Pointer to a IRQ lock structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_irqlock_destroy without first calling +* cl_irqlock_init. +* +* Calling cl_irqlock_construct is a prerequisite to calling any other +* IRQ lock function except cl_irqlock_init. +* +* SEE ALSO +* Irqlock, cl_irqlock_init, cl_irqlock_destroy +*********/ + + +/****f* Component Library: Irqlock/cl_irqlock_init +* NAME +* cl_irqlock_init +* +* DESCRIPTION +* The cl_irqlock_init function initializes a IRQ lock for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_irqlock_init( + IN cl_irqlock_t* const p_irqlock, + IN cl_interrupt_t* const p_interrupt ); +/* +* PARAMETERS +* p_irqlock +* [in] Pointer to a IRQ lock structure to initialize. +* +* p_interrupt +* [in] Platform specific pointer conveying information about the +* interrupt vector and level with which to synchronize. +* +* RETURN VALUES +* CL_SUCCESS if initialization succeeded. +* +* CL_ERROR if initialization failed. Callers should call +* cl_irqlock_destroy to clean up any resources allocated during +* initialization. +* +* NOTES +* Initialize the IRQ lock structure. Allows calling cl_irqlock_aquire +* and cl_irqlock_release. +* +* In Linux, the p_interrupt parameter is currently ignored. +* +* In Windows, the p_interrupt parameter is a pointer to a KINTERRUPT object, +* the value of which is supplied by a call to IoConnectInterrupt. +* +* SEE ALSO +* Irqlock, cl_irqlock_construct, cl_irqlock_destroy, +* cl_irqlock_acquire, cl_irqlock_release +*********/ + + +/****f* Component Library: Irqlock/cl_irqlock_destroy +* NAME +* cl_irqlock_destroy +* +* DESCRIPTION +* The cl_irqlock_destroy function performs all necessary cleanup of a +* IRQ lock. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_irqlock_destroy( + IN cl_irqlock_t* const p_irqlock ); +/* +* PARAMETERS +* p_irqlock +* [in] Pointer to a IRQ lock structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of a IRQ lock. This function must only +* be called if either cl_irqlock_construct or cl_irqlock_init has been +* called. +* +* SEE ALSO +* Irqlock, cl_irqlock_construct, cl_irqlock_init +*********/ + + +/****f* Component Library: Irqlock/cl_irqlock_acquire +* NAME +* cl_irqlock_acquire +* +* DESCRIPTION +* The cl_irqlock_acquire function acquires a IRQ lock. +* This version of lock does not prevent an interrupt from +* occuring on the processor on which the code is being +* executed. To protect from an interrupt level resource +* use the cl_irqlock_acquire_irq function. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_irqlock_acquire( + IN cl_irqlock_t* const p_irqlock ); +/* +* PARAMETERS +* p_irqlock +* [in] Pointer to a IRQ lock structure to acquire. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Irqlock, cl_irqlock_release +*********/ + + +/****f* Component Library: Irqlock/cl_irqlock_release +* NAME +* cl_irqlock_release +* +* DESCRIPTION +* The cl_irqlock_release function releases a IRQ lock object. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_irqlock_release( + IN cl_irqlock_t* const p_irqlock ); +/* +* PARAMETERS +* p_irqlock +* [in] Pointer to a IRQ lock structure to release. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Releases a IRQ lock after a call to cl_irqlock_acquire. +* +* SEE ALSO +* Irqlock, cl_irqlock_acquire +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_IRQLOCK_H_ */ diff --git a/branches/Ndi/inc/complib/cl_list.h b/branches/Ndi/inc/complib/cl_list.h new file mode 100644 index 00000000..c05dcdef --- /dev/null +++ b/branches/Ndi/inc/complib/cl_list.h @@ -0,0 +1,1364 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of list. + * + * Environment: + * All + */ + + +#ifndef _CL_LIST_H_ +#define _CL_LIST_H_ + + +#include +#include + + +/****h* Component Library/List +* NAME +* List +* +* DESCRIPTION +* List stores objects in a doubly linked list. +* +* Unlike quick list, users pass pointers to the object being stored, rather +* than to a cl_list_item_t structure. Insertion operations on a list can +* fail, and callers should trap for such failures. +* +* Use quick list in situations where insertion failures cannot be tolerated. +* +* List is not thread safe, and users must provide serialization. +* +* The list functions operates on a cl_list_t structure which should be +* treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Types: +* cl_list_iterator_t +* +* Structures: +* cl_list_t +* +* Callbacks: +* cl_pfn_list_apply_t, cl_pfn_list_find_t +* +* Initialization/Destruction: +* cl_list_construct, cl_list_init, cl_list_destroy +* +* Iteration: +* cl_list_next, cl_list_prev, cl_list_head, cl_list_tail, +* cl_list_end +* +* Manipulation: +* cl_list_insert_head, cl_list_insert_tail, +* cl_list_insert_array_head, cl_list_insert_array_tail, +* cl_list_insert_prev, cl_list_insert_next, +* cl_list_remove_head, cl_list_remove_tail, +* cl_list_remove_object, cl_list_remove_item, cl_list_remove_all +* +* Search: +* cl_is_object_in_list, cl_list_find_from_head, cl_list_find_from_tail, +* cl_list_apply_func +* +* Attributes: +* cl_list_count, cl_is_list_empty, cl_is_list_inited +*********/ + + +/****s* Component Library: List/cl_list_t +* NAME +* cl_list_t +* +* DESCRIPTION +* List structure. +* +* The cl_list_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_list +{ + cl_qlist_t list; + cl_qpool_t list_item_pool; + +} cl_list_t; +/* +* FIELDS +* list +* Quick list of items stored in the list. +* +* list_item_pool +* Quick pool of list objects for storing objects in the quick list. +* +* SEE ALSO +* List +*********/ + + +/****d* Component Library: List/cl_list_iterator_t +* NAME +* cl_list_iterator_t +* +* DESCRIPTION +* Iterator type used to walk a list. +* +* SYNOPSIS +*/ +typedef const cl_list_item_t *cl_list_iterator_t; +/* +* NOTES +* The iterator should be treated as opaque to prevent corrupting the list. +* +* SEE ALSO +* List, cl_list_head, cl_list_tail, cl_list_next, cl_list_prev, +* cl_list_obj +*********/ + + +/****d* Component Library: List/cl_pfn_list_apply_t +* NAME +* cl_pfn_list_apply_t +* +* DESCRIPTION +* The cl_pfn_list_apply_t function type defines the prototype for functions +* used to iterate objects in a list. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_list_apply_t)( + IN void* const p_object, + IN void* context ); +/* +* PARAMETERS +* p_object +* [in] Pointer to an object stored in a list. +* +* context +* [in] Context provided in a call to cl_list_apply_func. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_list_apply_func +* function. +* +* SEE ALSO +* List, cl_list_apply_func +*********/ + + +/****d* Component Library: List/cl_pfn_list_find_t +* NAME +* cl_pfn_list_find_t +* +* DESCRIPTION +* The cl_pfn_list_find_t function type defines the prototype for functions +* used to find objects in a list. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_list_find_t)( + IN const void* const p_object, + IN void* context ); +/* +* PARAMETERS +* p_object +* [in] Pointer to an object stored in a list. +* +* context +* [in] Context provided in a call to ListFindFromHead or ListFindFromTail. +* +* RETURN VALUES +* Return CL_SUCCESS if the desired item was found. This stops list iteration. +* +* Return CL_NOT_FOUND to continue the list iteration. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_list_find_from_head +* and cl_list_find_from_tail functions. +* +* SEE ALSO +* List, cl_list_find_from_head, cl_list_find_from_tail +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: List/cl_list_construct +* NAME +* cl_list_construct +* +* DESCRIPTION +* The cl_list_construct function constructs a list. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_list_construct( + IN cl_list_t* const p_list ); +/* +* PARAMETERS +* p_list +* [in] Pointer to cl_list_t object whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_list_init, cl_list_destroy and cl_is_list_inited. +* +* Calling cl_list_construct is a prerequisite to calling any other +* list function except cl_list_init. +* +* SEE ALSO +* List, cl_list_init, cl_list_destroy, cl_is_list_inited +*********/ + + +/****f* Component Library: List/cl_is_list_inited +* NAME +* cl_is_list_inited +* +* DESCRIPTION +* The cl_is_list_inited function returns whether a list was +* initialized successfully. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_list_inited( + IN const cl_list_t* const p_list ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* + * The pool is the last thing initialized. If it is initialized, the + * list is initialized too. + */ + return( cl_is_qpool_inited( &p_list->list_item_pool ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure whose initilization state +* to check. +* +* RETURN VALUES +* TRUE if the list was initialized successfully. +* +* FALSE otherwise. +* +* NOTES +* Allows checking the state of a list to determine if invoking +* member functions is appropriate. +* +* SEE ALSO +* List +*********/ + + +/****f* Component Library: List/cl_list_init +* NAME +* cl_list_init +* +* DESCRIPTION +* The cl_list_init function initializes a list for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_list_init( + IN cl_list_t* const p_list, + IN const size_t min_items ); +/* +* PARAMETERS +* p_list +* [in] Pointer to cl_list_t structure to initialize. +* +* min_items +* [in] Minimum number of items that can be stored. All necessary +* allocations to allow storing the minimum number of items is performed +* at initialization time. +* +* RETURN VALUES +* CL_SUCCESS if the list was initialized successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory for initialization. +* +* NOTES +* The list will always be able to store at least as many items as specified +* by the min_items parameter. +* +* SEE ALSO +* List, cl_list_construct, cl_list_destroy, cl_list_insert_head, +* cl_list_insert_tail, cl_list_remove_head, cl_list_remove_tail +*********/ + + +/****f* Component Library: List/cl_list_destroy +* NAME +* cl_list_destroy +* +* DESCRIPTION +* The cl_list_destroy function destroys a list. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_list_destroy( + IN cl_list_t* const p_list ); +/* +* PARAMETERS +* p_list +* [in] Pointer to cl_list_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_list_destroy does not affect any of the objects stored in the list, +* but does release all memory allocated internally. Further operations +* should not be attempted on the list after cl_list_destroy is invoked. +* +* This function should only be called after a call to cl_list_construct +* or cl_list_init. +* +* In debug builds, cl_list_destroy asserts if the list is not empty. +* +* SEE ALSO +* List, cl_list_construct, cl_list_init +*********/ + + +/****f* Component Library: List/cl_is_list_empty +* NAME +* cl_is_list_empty +* +* DESCRIPTION +* The cl_is_list_empty function returns whether a list is empty. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_list_empty( + IN const cl_list_t* const p_list ) +{ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + return( cl_is_qlist_empty( &p_list->list ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure. +* +* RETURN VALUES +* TRUE if the specified list is empty. +* +* FALSE otherwise. +* +* SEE ALSO +* List, cl_list_count, cl_list_remove_all +*********/ + + +/****f* Component Library: List/cl_list_insert_head +* NAME +* cl_list_insert_head +* +* DESCRIPTION +* The cl_list_insert_head function inserts an object at the head of a list. +* +* SYNOPSIS +*/ +CL_INLINE cl_status_t CL_API +cl_list_insert_head( + IN cl_list_t* const p_list, + IN const void* const p_object ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + /* Get a list item to add to the list. */ + p_pool_obj = (cl_pool_obj_t*)cl_qpool_get( &p_list->list_item_pool ); + if( !p_pool_obj ) + return( CL_INSUFFICIENT_MEMORY ); + + p_pool_obj->list_obj.p_object = p_object; + cl_qlist_insert_head( &p_list->list, &p_pool_obj->list_obj.list_item ); + return( CL_SUCCESS ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure into which to insert the object. +* +* p_object +* [in] Pointer to an object to insert into the list. +* +* RETURN VALUES +* CL_SUCCESS if the insertion was successful. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion. +* +* NOTES +* Inserts the specified object at the head of the list. List insertion +* operations are guaranteed to work for the minimum number of items as +* specified in cl_list_init by the min_items parameter. +* +* SEE ALSO +* List, cl_list_insert_tail, cl_list_insert_array_head, +* cl_list_insert_array_tail, cl_list_insert_prev, cl_list_insert_next, +* cl_list_remove_head +*********/ + + +/****f* Component Library: List/cl_list_insert_tail +* NAME +* cl_list_insert_tail +* +* DESCRIPTION +* The cl_list_insert_tail function inserts an object at the head of a list. +* +* SYNOPSIS +*/ +CL_INLINE cl_status_t CL_API +cl_list_insert_tail( + IN cl_list_t* const p_list, + IN const void* const p_object ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + /* Get a list item to add to the list. */ + p_pool_obj = (cl_pool_obj_t*)cl_qpool_get( &p_list->list_item_pool ); + if( !p_pool_obj ) + return( CL_INSUFFICIENT_MEMORY ); + + p_pool_obj->list_obj.p_object = p_object; + cl_qlist_insert_tail( &p_list->list, &p_pool_obj->list_obj.list_item ); + return( CL_SUCCESS ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure into which to insert the object. +* +* p_object +* [in] Pointer to an object to insert into the list. +* +* RETURN VALUES +* CL_SUCCESS if the insertion was successful. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion. +* +* NOTES +* Inserts the specified object at the tail of the list. List insertion +* operations are guaranteed to work for the minimum number of items as +* specified in cl_list_init by the min_items parameter. +* +* SEE ALSO +* List, cl_list_insert_head, cl_list_insert_array_head, +* cl_list_insert_array_tail, cl_list_insert_prev, cl_list_insert_next, +* cl_list_remove_tail +*********/ + + +/****f* Component Library: List/cl_list_insert_array_head +* NAME +* cl_list_insert_array_head +* +* DESCRIPTION: +* The cl_list_insert_array_head function inserts an array of objects +* at the head of a list. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_list_insert_array_head( + IN cl_list_t* const p_list, + IN const void* const p_array, + IN uint32_t item_count, + IN const uint32_t item_size ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure into which to insert the objects. +* +* p_array +* [in] Pointer to the first object in an array. +* +* item_count +* [in] Number of objects in the array. +* +* item_size +* [in] Size of the objects added to the list. This is the stride in the +* array from one object to the next. +* +* RETURN VALUES +* CL_SUCCESS if the insertion was successful. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion. +* +* NOTES +* Inserts all objects in the array to the head of the list, preserving the +* ordering of the objects. If not successful, no items are added. +* List insertion operations are guaranteed to work for the minimum number +* of items as specified in cl_list_init by the min_items parameter. +* +* SEE ALSO +* List, cl_list_insert_array_tail, cl_list_insert_head, cl_list_insert_tail, +* cl_list_insert_prev, cl_list_insert_next +*********/ + + +/****f* Component Library: List/cl_list_insert_array_tail +* NAME +* cl_list_insert_array_tail +* +* DESCRIPTION +* The cl_list_insert_array_tail function inserts an array of objects +* at the tail of a list. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_list_insert_array_tail( + IN cl_list_t* const p_list, + IN const void* const p_array, + IN uint32_t item_count, + IN const uint32_t item_size); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure into which to insert the objects. +* +* p_array +* [in] Pointer to the first object in an array. +* +* item_count +* [in] Number of objects in the array. +* +* item_size +* [in] Size of the objects added to the list. This is the stride in the +* array from one object to the next. +* +* RETURN VALUES +* CL_SUCCESS if the insertion was successful. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion. +* +* NOTES +* Inserts all objects in the array to the tail of the list, preserving the +* ordering of the objects. If not successful, no items are added. +* List insertion operations are guaranteed to work for the minimum number +* of items as specified in cl_list_init by the min_items parameter. +* +* SEE ALSO +* List, cl_list_insert_array_head, cl_list_insert_head, cl_list_insert_tail, +* cl_list_insert_prev, cl_list_insert_next +*********/ + + +/****f* Component Library: List/cl_list_insert_next +* NAME +* cl_list_insert_next +* +* DESCRIPTION +* The cl_list_insert_next function inserts an object in a list after +* the object associated with a given iterator. +* +* SYNOPSIS +*/ +CL_INLINE cl_status_t CL_API +cl_list_insert_next( + IN cl_list_t* const p_list, + IN const cl_list_iterator_t iterator, + IN const void* const p_object ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + /* Get a list item to add to the list. */ + p_pool_obj = (cl_pool_obj_t*)cl_qpool_get( &p_list->list_item_pool ); + if( !p_pool_obj ) + return( CL_INSUFFICIENT_MEMORY ); + + p_pool_obj->list_obj.p_object = p_object; + cl_qlist_insert_next( &p_list->list, (cl_list_item_t*)iterator, + &p_pool_obj->list_obj.list_item ); + return( CL_SUCCESS ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure into which to insert the object. +* +* iterator +* [in] cl_list_iterator_t returned by a previous call to cl_list_head, +* cl_list_tail, cl_list_next, or cl_list_prev. +* +* p_object +* [in] Pointer to an object to insert into the list. +* +* RETURN VALUES +* CL_SUCCESS if the insertion was successful. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion. +* +* SEE ALSO +* List, cl_list_insert_prev, cl_list_insert_head, cl_list_insert_tail, +* cl_list_insert_array_head, cl_list_insert_array_tail +*********/ + + + +/****f* Component Library: List/cl_list_insert_prev +* NAME +* cl_list_insert_prev +* +* DESCRIPTION +* The cl_list_insert_prev function inserts an object in a list before +* the object associated with a given iterator. +* +* SYNOPSIS +*/ +CL_INLINE cl_status_t CL_API +cl_list_insert_prev( + IN cl_list_t* const p_list, + IN const cl_list_iterator_t iterator, + IN const void* const p_object ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + /* Get a list item to add to the list. */ + p_pool_obj = (cl_pool_obj_t*)cl_qpool_get( &p_list->list_item_pool ); + if( !p_pool_obj ) + return( CL_INSUFFICIENT_MEMORY ); + + p_pool_obj->list_obj.p_object = p_object; + cl_qlist_insert_prev( &p_list->list, (cl_list_item_t*)iterator, + &p_pool_obj->list_obj.list_item ); + return( CL_SUCCESS ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure into which to insert the object. +* +* iterator +* [in] cl_list_iterator_t returned by a previous call to cl_list_head, +* cl_list_tail, cl_list_next, or cl_list_prev. +* +* p_object +* [in] Pointer to an object to insert into the list. +* +* RETURN VALUES +* CL_SUCCESS if the insertion was successful. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory for the insertion. +* +* SEE ALSO +* List, cl_list_insert_next, cl_list_insert_head, cl_list_insert_tail, +* cl_list_insert_array_head, cl_list_insert_array_tail +*********/ + + +/****f* Component Library: List/cl_list_remove_head +* NAME +* cl_list_remove_head +* +* DESCRIPTION +* The cl_list_remove_head function removes an object from the head of a list. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_list_remove_head( + IN cl_list_t* const p_list ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + /* See if the list is empty. */ + if( cl_is_qlist_empty( &p_list->list ) ) + return( NULL ); + + /* Get the item at the head of the list. */ + p_pool_obj = (cl_pool_obj_t*)cl_qlist_remove_head( &p_list->list ); + + /* Place the pool item back into the pool. */ + cl_qpool_put( &p_list->list_item_pool, (cl_pool_item_t*)p_pool_obj ); + + return( (void*)p_pool_obj->list_obj.p_object ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure from which to remove an object. +* +* RETURN VALUES +* Returns the pointer to the object formerly at the head of the list. +* +* NULL if the list was empty. +* +* SEE ALSO +* List, cl_list_remove_tail, cl_list_remove_all, cl_list_remove_object, +* cl_list_remove_item, cl_list_insert_head +*********/ + + +/****f* Component Library: List/cl_list_remove_tail +* NAME +* cl_list_remove_tail +* +* DESCRIPTION +* The cl_list_remove_tail function removes an object from the tail of a list. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_list_remove_tail( + IN cl_list_t* const p_list ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + /* See if the list is empty. */ + if( cl_is_qlist_empty( &p_list->list ) ) + return( NULL ); + + /* Get the item at the head of the list. */ + p_pool_obj = (cl_pool_obj_t*)cl_qlist_remove_tail( &p_list->list ); + + /* Place the list item back into the pool. */ + cl_qpool_put( &p_list->list_item_pool, (cl_pool_item_t*)p_pool_obj ); + + return( (void*)p_pool_obj->list_obj.p_object ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure from which to remove an object. +* +* RETURN VALUES +* Returns the pointer to the object formerly at the tail of the list. +* +* NULL if the list was empty. +* +* SEE ALSO +* List, cl_list_remove_head, cl_list_remove_all, cl_list_remove_object, +* cl_list_remove_item, cl_list_insert_head +*********/ + + +/****f* Component Library: List/cl_list_remove_all +* NAME +* cl_list_remove_all +* +* DESCRIPTION +* The cl_list_remove_all function removes all objects from a list, +* leaving it empty. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_list_remove_all( + IN cl_list_t* const p_list ) +{ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + /* Return all the list items to the pool. */ + cl_qpool_put_list( &p_list->list_item_pool, &p_list->list ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure from which to remove all objects. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* List, cl_list_remove_head, cl_list_remove_tail, cl_list_remove_object, +* cl_list_remove_item +*********/ + + +/****f* Component Library: List/cl_list_remove_object +* NAME +* cl_list_remove_object +* +* DESCRIPTION +* The cl_list_remove_object function removes a specific object from a list. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_list_remove_object( + IN cl_list_t* const p_list, + IN const void* const p_object ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure from which to remove the object. +* +* p_object +* [in] Pointer to an object to remove from the list. +* +* RETURN VALUES +* CL_SUCCESS if the object was removed. +* +* CL_NOT_FOUND if the object was not found in the list. +* +* NOTES +* Removes the first occurrence of an object from a list. +* +* SEE ALSO +* List, cl_list_remove_item, cl_list_remove_head, cl_list_remove_tail, +* cl_list_remove_all +*********/ + + +/****f* Component Library: List/cl_list_remove_item +* NAME +* cl_list_remove_item +* +* DESCRIPTION +* The cl_list_remove_item function removes an object from the head of a list. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_list_remove_item( + IN cl_list_t* const p_list, + IN const cl_list_iterator_t iterator ) +{ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + cl_qlist_remove_item( &p_list->list, (cl_list_item_t*)iterator ); + + /* Place the list item back into the pool. */ + cl_qpool_put( &p_list->list_item_pool, (cl_pool_item_t*)iterator ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure from which to remove the item. +* +* iterator +* [in] cl_list_iterator_t returned by a previous call to cl_list_head, +* cl_list_tail, cl_list_next, or cl_list_prev. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* List, cl_list_remove_object, cl_list_remove_head, cl_list_remove_tail, +* cl_list_remove_all +*********/ + + +/****f* Component Library: List/cl_is_object_in_list +* NAME +* cl_is_object_in_list +* +* DESCRIPTION +* The cl_is_object_in_list function returns whether an object +* is stored in a list. +* +* SYNOPSIS +*/ +CL_EXPORT boolean_t CL_API +cl_is_object_in_list( + IN const cl_list_t* const p_list, + IN const void* const p_object ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure in which to look for the object. +* +* p_object +* [in] Pointer to an object stored in a list. +* +* RETURN VALUES +* TRUE if p_object was found in the list. +* +* FALSE otherwise. +* +* SEE ALSO +* List +*********/ + + +/****f* Component Library: List/cl_list_end +* NAME +* cl_list_end +* +* DESCRIPTION +* The cl_list_end function returns returns the list iterator for +* the end of a list. +* +* SYNOPSIS +*/ +CL_INLINE const cl_list_iterator_t CL_API +cl_list_end( + IN const cl_list_t* const p_list ) +{ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + return( cl_qlist_end( &p_list->list ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure for which the iterator for the +* object at the head is to be returned. +* +* RETURN VALUE +* cl_list_iterator_t for the end of the list. +* +* NOTES +* Use cl_list_obj to retrieve the object associated with the +* returned cl_list_iterator_t. +* +* SEE ALSO +* List, cl_list_head, cl_list_tail, cl_list_next, cl_list_prev, +* cl_list_obj +*********/ + + +/****f* Component Library: List/cl_list_head +* NAME +* cl_list_head +* +* DESCRIPTION +* The cl_list_head function returns returns a list iterator for +* the head of a list. +* +* SYNOPSIS +*/ +CL_INLINE const cl_list_iterator_t CL_API +cl_list_head( + IN const cl_list_t* const p_list ) +{ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + return( cl_qlist_head( &p_list->list ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure for which the iterator for the +* object at the head is to be returned. +* +* RETURN VALUES +* cl_list_iterator_t for the head of the list. +* +* cl_list_iterator_t for the end of the list if the list is empty. +* +* NOTES +* Use cl_list_obj to retrieve the object associated with the +* returned cl_list_iterator_t. +* +* SEE ALSO +* List, cl_list_tail, cl_list_next, cl_list_prev, cl_list_end, +* cl_list_obj +*********/ + + +/****f* Component Library: List/cl_list_tail +* NAME +* cl_list_tail +* +* DESCRIPTION +* The cl_list_tail function returns returns a list iterator for +* the tail of a list. +* +* SYNOPSIS +*/ +CL_INLINE const cl_list_iterator_t CL_API +cl_list_tail( + IN const cl_list_t* const p_list ) +{ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + return( cl_qlist_tail( &p_list->list ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure for which the iterator for the +* object at the tail is to be returned. +* +* RETURN VALUES +* cl_list_iterator_t for the tail of the list. +* +* cl_list_iterator_t for the end of the list if the list is empty. +* +* NOTES +* Use cl_list_obj to retrieve the object associated with the +* +* returned cl_list_iterator_t. +* +* SEE ALSO +* List, cl_list_head, cl_list_next, cl_list_prev, cl_list_end, +* cl_list_obj +*********/ + + +/****f* Component Library: List/cl_list_next +* NAME +* cl_list_next +* +* DESCRIPTION +* The cl_list_next function returns a list iterator for the object stored +* in a list after the object associated with a given list iterator. +* +* SYNOPSIS +*/ +CL_INLINE const cl_list_iterator_t CL_API +cl_list_next( + IN const cl_list_iterator_t iterator ) +{ + CL_ASSERT( iterator ); + + return( cl_qlist_next( iterator ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure for which the iterator for the +* next object is to be returned. +* +* iterator +* [in] cl_list_iterator_t returned by a previous call to cl_list_head, +* cl_list_tail, cl_list_next, or cl_list_prev. +* +* RETURN VALUES +* cl_list_iterator_t for the object following the object associated with +* the list iterator specified by the iterator parameter. +* +* cl_list_iterator_t for the end of the list if the list is empty. +* +* NOTES +* Use cl_list_obj to retrieve the object associated with the +* returned cl_list_iterator_t. +* +* SEE ALSO +* List, cl_list_prev, cl_list_head, cl_list_tail, cl_list_end, +* cl_list_obj +*********/ + + +/****f* Component Library: List/cl_list_prev +* NAME +* cl_list_prev +* +* DESCRIPTION +* The cl_list_prev function returns a list iterator for the object stored +* in a list before the object associated with a given list iterator. +* +* SYNOPSIS +*/ +CL_INLINE const cl_list_iterator_t CL_API +cl_list_prev( + IN const cl_list_iterator_t iterator ) +{ + CL_ASSERT( iterator ); + + return( cl_qlist_prev( iterator ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure for which the iterator for the +* next object is to be returned. +* +* iterator +* [in] cl_list_iterator_t returned by a previous call to cl_list_head, +* cl_list_tail, cl_list_next, or cl_list_prev. +* +* RETURN VALUES +* cl_list_iterator_t for the object preceding the object associated with +* the list iterator specified by the iterator parameter. +* +* cl_list_iterator_t for the end of the list if the list is empty. +* +* NOTES +* Use cl_list_obj to retrieve the object associated with the +* returned cl_list_iterator_t. +* +* SEE ALSO +* List, cl_list_next, cl_list_head, cl_list_tail, cl_list_end, +* cl_list_obj +*********/ + + +/****f* Component Library: List/cl_list_obj +* NAME +* cl_list_obj +* +* DESCRIPTION +* The cl_list_obj function returns the object associated +* with a list iterator. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_list_obj( + IN const cl_list_iterator_t iterator ) +{ + CL_ASSERT( iterator ); + + return( (void*)((cl_pool_obj_t*)iterator)->list_obj.p_object ); +} +/* +* PARAMETERS +* iterator +* [in] cl_list_iterator_t returned by a previous call to cl_list_head, +* cl_list_tail, cl_list_next, or cl_list_prev whose object is requested. +* +* RETURN VALUE +* Pointer to the object associated with the list iterator specified +* by the iterator parameter. +* +* SEE ALSO +* List, cl_list_head, cl_list_tail, cl_list_next, cl_list_prev +*********/ + + +/****f* Component Library: List/cl_list_find_from_head +* NAME +* cl_list_find_from_head +* +* DESCRIPTION +* The cl_list_find_from_head function uses a specified function +* to search for an object starting from the head of a list. +* +* SYNOPSIS +*/ +CL_EXPORT const cl_list_iterator_t CL_API +cl_list_find_from_head( + IN const cl_list_t* const p_list, + IN cl_pfn_list_find_t pfn_func, + IN const void* const context ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure to search. +* +* pfn_func +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_list_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUES +* Returns the iterator for the object if found. +* +* Returns the iterator for the list end otherwise. +* +* NOTES +* cl_list_find_from_head does not remove the found object from +* the list. The iterator for the object is returned when the function +* provided by the pfn_func parameter returns CL_SUCCESS. The function +* specified by the pfn_func parameter must not perform any list +* operations as these would corrupt the list. +* +* SEE ALSO +* List, cl_list_find_from_tail, cl_list_apply_func, +* cl_pfn_list_find_t +*********/ + + +/****f* Component Library: List/cl_list_find_from_tail +* NAME +* cl_list_find_from_tail +* +* DESCRIPTION +* The cl_list_find_from_tail function uses a specified function +* to search for an object starting from the tail of a list. +* +* SYNOPSIS +*/ +CL_EXPORT const cl_list_iterator_t CL_API +cl_list_find_from_tail( + IN const cl_list_t* const p_list, + IN cl_pfn_list_find_t pfn_func, + IN const void* const context ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure to search. +* +* pfn_func +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_list_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUES +* Returns the iterator for the object if found. +* +* Returns the iterator for the list end otherwise. +* +* NOTES +* cl_list_find_from_tail does not remove the found object from +* the list. The iterator for the object is returned when the function +* provided by the pfn_func parameter returns CL_SUCCESS. The function +* specified by the pfn_func parameter must not perform any list +* operations as these would corrupt the list. +* +* SEE ALSO +* List, cl_list_find_from_head, cl_list_apply_func, +* cl_pfn_list_find_t +*********/ + + +/****f* Component Library: List/cl_list_apply_func +* NAME +* cl_list_apply_func +* +* DESCRIPTION +* The cl_list_apply_func function executes a specified function for every +* object stored in a list. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_list_apply_func( + IN const cl_list_t* const p_list, + IN cl_pfn_list_apply_t pfn_func, + IN const void* const context ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure to iterate. +* +* pfn_func +* [in] Function invoked for every item in a list. +* See the cl_pfn_list_apply_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_list_apply_func invokes the specified callback function for every +* object stored in the list, starting from the head. The function specified +* by the pfn_func parameter must not perform any list operations as these +* would corrupt the list. +* +* SEE ALSO +* List, cl_list_find_from_head, cl_list_find_from_tail, +* cl_pfn_list_apply_t +*********/ + + +/****f* Component Library: List/cl_list_count +* NAME +* cl_list_count +* +* DESCRIPTION +* The cl_list_count function returns the number of objects stored in a list. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_list_count( + IN const cl_list_t* const p_list ) +{ + CL_ASSERT( p_list ); + CL_ASSERT( cl_is_qpool_inited( &p_list->list_item_pool ) ); + + return( cl_qlist_count( &p_list->list ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_list_t structure whose object to count. +* +* RETURN VALUES +* Number of objects stored in the specified list. +* +* SEE ALSO +* List +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_LIST_H_ */ diff --git a/branches/Ndi/inc/complib/cl_log.h b/branches/Ndi/inc/complib/cl_log.h new file mode 100644 index 00000000..389c6c6a --- /dev/null +++ b/branches/Ndi/inc/complib/cl_log.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of logging mechanisms. + * + * Environment: + * All + */ + + +#ifndef _CL_LOG_H_ +#define _CL_LOG_H_ + + +#include + + +/****h* Component Library/Log Provider +* NAME +* Log Provider +* +* DESCRIPTION +* The log provider allows users to log information in a system log instead of +* the console or debugger target. +**********/ + + +/****d* Component Library: Log Provider/cl_log_type_t +* NAME +* cl_log_type_t +* +* DESCRIPTION +* The cl_log_type_t enumerated type is used to differentiate between +* different types of log entries. +* +* SYNOPSIS +*/ +typedef enum _cl_log_type +{ + CL_LOG_INFO, + CL_LOG_WARN, + CL_LOG_ERROR + +} cl_log_type_t; +/* +* VALUES +* CL_LOG_INFO +* Indicates a log entry is purely informational. +* +* CL_LOG_WARN +* Indicates a log entry is a warning but non-fatal. +* +* CL_LOG_ERROR +* Indicates a log entry is a fatal error. +* +* SEE ALSO +* Log Provider, cl_log_event +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Log Provider/cl_log_event +* NAME +* cl_log_event +* +* DESCRIPTION +* The cl_log_event function adds a new entry to the system log. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_log_event( + IN const char* const name, + IN const cl_log_type_t type, + IN const char* const message, + IN const void* const p_data OPTIONAL, + IN const uint32_t data_len ); +/* +* PARAMETERS +* name +* [in] Pointer to an ANSI string containing the name of the source for +* the log entry. +* +* type +* [in] Defines the type of log entry to add to the system log. +* See the definition of cl_log_type_t for acceptable values. +* +* message +* [in] Pointer to an ANSI string containing the text for the log entry. +* The message should not be terminated with a new line, as the log +* provider appends a new line to all log entries. +* +* p_data +* [in] Optional pointer to data providing context for the log entry. +* At most 256 bytes of data can be successfully logged. +* +* data_len +* [in] Length of the buffer pointed to by the p_data parameter. Ignored +* if p_data is NULL. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* If the data length exceeds the maximum supported, the event is logged +* without its accompanying data. +* +* SEE ALSO +* Log Provider, cl_log_type_t +*********/ + + +#ifdef __cplusplus +} +#endif + + +#endif /* _CL_LOG_H_ */ diff --git a/branches/Ndi/inc/complib/cl_map.h b/branches/Ndi/inc/complib/cl_map.h new file mode 100644 index 00000000..d7732988 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_map.h @@ -0,0 +1,875 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of map, a binary tree. + * + * Environment: + * All + */ + + +#ifndef _CL_MAP_H_ +#define _CL_MAP_H_ + + +#include +#include + + +/****h* Component Library/Map +* NAME +* Map +* +* DESCRIPTION +* Map implements a binary tree that stores user objects. Each item stored +* in a map has a unique 64-bit key (duplicates are not allowed). Map +* provides the ability to efficiently search for an item given a key. +* +* Map may allocate memory when inserting objects, and can therefore fail +* operations due to insufficient memory. Use quick map in situations where +* such insertion failures cannot be tolerated. +* +* Map is not thread safe, and users must provide serialization when adding +* and removing items from the map. +* +* The map functions operates on a cl_map_t structure which should be treated +* as opaque and should be manipulated only through the provided functions. +* +* SEE ALSO +* Types: +* cl_map_iterator_t +* +* Structures: +* cl_map_t, cl_map_item_t, cl_map_obj_t +* +* Item Manipulation: +* cl_map_obj, cl_map_key +* +* Initialization: +* cl_map_construct, cl_map_init, cl_map_destroy +* +* Iteration: +* cl_map_end, cl_map_head, cl_map_tail, cl_map_next, cl_map_prev +* +* Manipulation +* cl_map_insert, cl_map_get, cl_map_remove_item, cl_map_remove, +* cl_map_remove_all, cl_map_merge, cl_map_delta +* +* Attributes: +* cl_map_count, cl_is_map_empty, cl_is_map_inited +*********/ + + +/****s* Component Library: Map/cl_map_t +* NAME +* cl_map_t +* +* DESCRIPTION +* Quick map structure. +* +* The cl_map_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_map +{ + cl_qmap_t qmap; + cl_qpool_t pool; + +} cl_map_t; +/* +* FIELDS +* qmap +* Quick map object that maintains the map. +* +* pool +* Pool of cl_map_obj_t structures used to store user objects +* in the map. +* +* SEE ALSO +* Map, cl_map_obj_t +*********/ + + +/****d* Component Library: Map/cl_map_iterator_t +* NAME +* cl_map_iterator_t +* +* DESCRIPTION +* Iterator type used to walk a map. +* +* SYNOPSIS +*/ +typedef const cl_map_item_t *cl_map_iterator_t; +/* +* NOTES +* The iterator should be treated as opaque to prevent corrupting the map. +* +* SEE ALSO +* Map, cl_map_head, cl_map_tail, cl_map_next, cl_map_prev, cl_map_key +*********/ + + +#ifdef __cplusplus +extern "C" { +#endif + + +/****f* Component Library: Map/cl_map_count +* NAME +* cl_map_count +* +* DESCRIPTION +* The cl_map_count function returns the number of items stored +* in a map. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_map_count( + IN const cl_map_t* const p_map ) +{ + CL_ASSERT( p_map ); + return( cl_qmap_count( &p_map->qmap ) ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a map whose item count to return. +* +* RETURN VALUE +* Returns the number of items stored in the map. +* +* SEE ALSO +* Map, cl_is_map_empty +*********/ + + +/****f* Component Library: Map/cl_is_map_empty +* NAME +* cl_is_map_empty +* +* DESCRIPTION +* The cl_is_map_empty function returns whether a map is empty. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_map_empty( + IN const cl_map_t* const p_map ) +{ + CL_ASSERT( p_map ); + return( cl_is_qmap_empty( &p_map->qmap ) ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a map to test for emptiness. +* +* RETURN VALUES +* TRUE if the map is empty. +* +* FALSE otherwise. +* +* SEE ALSO +* Map, cl_map_count, cl_map_remove_all +*********/ + + +/****f* Component Library: Map/cl_map_key +* NAME +* cl_map_key +* +* DESCRIPTION +* The cl_map_key function retrieves the key value of a map item. +* +* SYNOPSIS +*/ +CL_INLINE uint64_t CL_API +cl_map_key( + IN const cl_map_iterator_t itor ) +{ + return( cl_qmap_key( itor ) ); +} +/* +* PARAMETERS +* itor +* [in] Iterator for the item whose key to return. +* +* RETURN VALUE +* Returns the 64-bit key value for the specified iterator. +* +* NOTES +* The iterator specified by the itor parameter must have been retrived by +* a previous call to cl_map_head, cl_map_tail, cl_map_next, or cl_map_prev. +* +* The key value is set in a call to cl_map_insert. +* +* SEE ALSO +* Map, cl_map_insert, cl_map_head, cl_map_tail, cl_map_next, cl_map_prev +*********/ + + +/****f* Component Library: Map/cl_map_construct +* NAME +* cl_map_construct +* +* DESCRIPTION +* The cl_map_construct function constructs a map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_map_construct( + IN cl_map_t* const p_map ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_map_t structure to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_map_init, cl_map_destroy, and cl_is_map_inited. +* +* Calling cl_map_construct is a prerequisite to calling any other +* map function except cl_map_init. +* +* SEE ALSO +* Map, cl_map_init, cl_map_destroy, cl_is_map_inited +*********/ + + +/****f* Component Library: Event/cl_is_map_inited +* NAME +* cl_is_map_inited +* +* DESCRIPTION +* The cl_is_map_inited function returns whether a map was +* successfully initialized. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_map_inited( + IN const cl_map_t* const p_map ) +{ + /* + * The map's pool of map items is the last thing initialized. + * We can therefore use it to test for initialization. + */ + return( cl_is_qpool_inited( &p_map->pool ) ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_map_t structure whose initialization state +* to check. +* +* RETURN VALUES +* TRUE if the map was initialized successfully. +* +* FALSE otherwise. +* +* NOTES +* Allows checking the state of a map to determine if invoking +* member functions is appropriate. +* +* SEE ALSO +* Map +*********/ + + +/****f* Component Library: Map/cl_map_init +* NAME +* cl_map_init +* +* DESCRIPTION +* The cl_map_init function initialized a map for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_map_init( + IN cl_map_t* const p_map, + IN const size_t min_items ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_map_t structure to initialize. +* +* min_items +* [in] Minimum number of items that can be stored. All necessary +* allocations to allow storing the minimum number of items is performed +* at initialization time. +* +* RETURN VALUES +* CL_SUCCESS if the map was initialized successfully. +* +* NOTES +* Allows calling map manipulation functions. +* +* SEE ALSO +* Map, cl_map_destroy, cl_map_insert, cl_map_remove +*********/ + + +/****f* Component Library: Map/cl_map_destroy +* NAME +* cl_map_destroy +* +* DESCRIPTION +* The cl_map_destroy function destroys a map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_map_destroy( + IN cl_map_t* const p_map ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a map to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified map. Further +* operations should not be attempted on the map. cl_map_destroy does +* not affect any of the objects stored in the map. +* This function should only be called after a call to cl_map_construct. +* +* In debug builds, cl_map_destroy asserts that the map is empty. +* +* SEE ALSO +* Map, cl_map_construct, cl_map_init +*********/ + + +/****f* Component Library: Map/cl_map_end +* NAME +* cl_map_end +* +* DESCRIPTION +* The cl_map_end function returns the iterator for the end of a map. +* +* SYNOPSIS +*/ +CL_INLINE const cl_map_iterator_t CL_API +cl_map_end( + IN const cl_map_t* const p_map ) +{ + CL_ASSERT( p_map ); + return( cl_qmap_end( &p_map->qmap ) ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_map_t structure whose end to return. +* +* RETURN VALUE +* Iterator for the end of the map. +* +* NOTES +* cl_map_end is useful for determining the validity of map items returned +* by cl_map_head, cl_map_tail, cl_map_next, cl_map_prev. If the iterator +* by any of these functions compares to the end, the end of the map was +* encoutered. +* When using cl_map_head or cl_map_tail, this condition indicates that +* the map is empty. +* +* SEE ALSO +* Map, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_prev +*********/ + + +/****f* Component Library: Map/cl_map_head +* NAME +* cl_map_head +* +* DESCRIPTION +* The cl_map_head function returns the map item with the lowest key +* value stored in a map. +* +* SYNOPSIS +*/ +CL_INLINE cl_map_iterator_t CL_API +cl_map_head( + IN const cl_map_t* const p_map ) +{ + CL_ASSERT( p_map ); + return( cl_qmap_head( &p_map->qmap ) ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a map whose item with the lowest key is returned. +* +* RETURN VALUES +* Iterator for the object with the lowest key in the map. +* +* Iterator for the map end if the map was empty. +* +* NOTES +* cl_map_head does not remove the object from the map. +* +* SEE ALSO +* Map, cl_map_tail, cl_map_next, cl_map_prev, cl_map_end +*********/ + + +/****f* Component Library: Map/cl_map_tail +* NAME +* cl_map_tail +* +* DESCRIPTION +* The cl_map_tail function returns the map item with the highest key +* value stored in a map. +* +* SYNOPSIS +*/ +CL_INLINE cl_map_iterator_t CL_API +cl_map_tail( + IN const cl_map_t* const p_map ) +{ + CL_ASSERT( p_map ); + return( cl_qmap_tail( &p_map->qmap ) ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a map whose item with the highest key +* is returned. +* +* RETURN VALUES +* Iterator for the object with the highest key in the map. +* +* Iterator for the map end if the map was empty. +* +* NOTES +* cl_map_end does no remove the object from the map. +* +* SEE ALSO +* Map, cl_map_head, cl_map_next, cl_map_prev, cl_map_end +*********/ + + +/****f* Component Library: Map/cl_map_next +* NAME +* cl_map_next +* +* DESCRIPTION +* The cl_map_next function returns the map item with the next higher +* key value than a specified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_map_iterator_t CL_API +cl_map_next( + IN const cl_map_iterator_t itor ) +{ + CL_ASSERT( itor ); + return( cl_qmap_next( itor ) ); +} +/* +* PARAMETERS +* itor +* [in] Iterator for an object in a map whose successor to return. +* +* RETURN VALUES +* Iterator for the object with the next higher key value in a map. +* +* Iterator for the map end if the specified object was the last item in +* the map. +* +* NOTES +* The iterator must have been retrieved by a previous call to cl_map_head, +* cl_map_tail, cl_map_next, or cl_map_prev. +* +* SEE ALSO +* Map, cl_map_head, cl_map_tail, cl_map_prev, cl_map_end +*********/ + + +/****f* Component Library: Map/cl_map_prev +* NAME +* cl_map_prev +* +* DESCRIPTION +* The cl_map_prev function returns the map item with the next lower +* key value than a precified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_map_iterator_t CL_API +cl_map_prev( + IN const cl_map_iterator_t itor ) +{ + CL_ASSERT( itor ); + return( cl_qmap_prev( itor ) ); +} +/* +* PARAMETERS +* itor +* [in] Iterator for an object in a map whose predecessor to return. +* +* RETURN VALUES +* Iterator for the object with the next lower key value in a map. +* +* Iterator for the map end if the specified object was the first item in +* the map. +* +* NOTES +* The iterator must have been retrieved by a previous call to cl_map_head, +* cl_map_tail, cl_map_next, or cl_map_prev. +* +* SEE ALSO +* Map, cl_map_head, cl_map_tail, cl_map_next, cl_map_end +*********/ + + +/****f* Component Library: Map/cl_map_insert +* NAME +* cl_map_insert +* +* DESCRIPTION +* The cl_map_insert function inserts a map item into a map. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +cl_map_insert( + IN cl_map_t* const p_map, + IN const uint64_t key, + IN const void* const p_object ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a map into which to add the item. +* +* key +* [in] Value to associate with the object. +* +* p_object +* [in] Pointer to an object to insert into the map. +* +* RETURN VALUES +* Pointer to the object in the map with the specified key after the call +* completes. +* +* NULL if there was not enough memory to insert the desired item. +* +* NOTES +* Insertion operations may cause the map to rebalance. +* +* If the map already contains an object already with the specified key, +* that object will not be replaced and the pointer to that object is +* returned. +* +* SEE ALSO +* Map, cl_map_remove, cl_map_item_t +*********/ + + +/****f* Component Library: Map/cl_map_get +* NAME +* cl_map_get +* +* DESCRIPTION +* The cl_map_get function returns the object associated with a key. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +cl_map_get( + IN const cl_map_t* const p_map, + IN const uint64_t key ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a map from which to retrieve the object with +* the specified key. +* +* key +* [in] Key value used to search for the desired object. +* +* RETURN VALUES +* Pointer to the object with the desired key value. +* +* NULL if there was no item with the desired key value stored in +* the map. +* +* NOTES +* cl_map_get does not remove the item from the map. +* +* SEE ALSO +* Map, cl_map_remove +*********/ + + +/****f* Component Library: Map/cl_map_remove_item +* NAME +* cl_map_remove_item +* +* DESCRIPTION +* The cl_map_remove_item function removes the specified map item +* from a map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_map_remove_item( + IN cl_map_t* const p_map, + IN const cl_map_iterator_t itor ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a map from which to remove the object associated with +* the specified iterator. +* +* itor +* [in] Iterator for an object to remove from its map. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Removes the object associated with the specifid iterator from its map. +* +* The specified iterator is no longer valid after the call completes. +* +* The iterator must have been retrieved by a previous call to cl_map_head, +* cl_map_tail, cl_map_next, or cl_map_prev. +* +* SEE ALSO +* Map, cl_map_remove, cl_map_remove_all, cl_map_insert, cl_map_head, +* cl_map_tail, cl_map_next, cl_map_prev +*********/ + + +/****f* Component Library: Map/cl_map_remove +* NAME +* cl_map_remove +* +* DESCRIPTION +* The cl_map_remove function removes the map item with the specified key +* from a map. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +cl_map_remove( + IN cl_map_t* const p_map, + IN const uint64_t key ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_map_t structure from which to remove the item +* with the specified key. +* +* key +* [in] Key value used to search for the object to remove. +* +* RETURN VALUES +* Pointer to the object associated with the specified key if +* it was found and removed. +* +* NULL if no object with the specified key exists in the map. +* +* SEE ALSO +* Map, cl_map_remove_item, cl_map_remove_all, cl_map_insert +*********/ + + +/****f* Component Library: Map/cl_map_remove_all +* NAME +* cl_map_remove_all +* +* DESCRIPTION +* The cl_map_remove_all function removes all objects from a map, +* leaving it empty. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_map_remove_all( + IN cl_map_t* const p_map ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a map to empty. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Map, cl_map_remove, cl_map_remove_item +*********/ + + +/****f* Component Library: Map/cl_map_obj +* NAME +* cl_map_obj +* +* DESCRIPTION +* The cl_map_obj function returns the object associated with an iterator. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_map_obj( + IN const cl_map_iterator_t itor ) +{ + return( cl_qmap_obj( PARENT_STRUCT( itor, cl_map_obj_t, item ) ) ); +} +/* +* PARAMETERS +* itor +* [in] Iterator whose object to return. +* +* RETURN VALUES +* Returns the value of the object pointer associated with the iterator. +* +* The iterator must have been retrieved by a previous call to cl_map_head, +* cl_map_tail, cl_map_next, or cl_map_prev. +* +* SEE ALSO +* Map, cl_map_head, cl_map_tail, cl_map_next, cl_map_prev +*********/ + + +/****f* Component Library: Map/cl_map_merge +* NAME +* cl_map_merge +* +* DESCRIPTION +* The cl_map_merge function moves all items from one map to another, +* excluding duplicates. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_map_merge( + OUT cl_map_t* const p_dest_map, + IN OUT cl_map_t* const p_src_map ); +/* +* PARAMETERS +* p_dest_map +* [out] Pointer to a cl_map_t structure to which items should be added. +* +* p_src_map +* [in/out] Pointer to a cl_map_t structure whose items to add +* to p_dest_map. +* +* RETURN VALUES +* CL_SUCCESS if the operation succeeded. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory for the operation +* to succeed. +* +* NOTES +* Items are evaluated based on their keys only. +* +* Upon return from cl_map_merge, the map referenced by p_src_map contains +* all duplicate items. +* +* SEE ALSO +* Map, cl_map_delta +*********/ + + +/****f* Component Library: Map/cl_map_delta +* NAME +* cl_map_delta +* +* DESCRIPTION +* The cl_map_delta function computes the differences between two maps. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_map_delta( + IN OUT cl_map_t* const p_map1, + IN OUT cl_map_t* const p_map2, + OUT cl_map_t* const p_new, + OUT cl_map_t* const p_old ); +/* +* PARAMETERS +* p_map1 +* [in/out] Pointer to the first of two cl_map_t structures whose +* differences to compute. +* +* p_map2 +* [in/out] Pointer to the second of two cl_map_t structures whose +* differences to compute. +* +* p_new +* [out] Pointer to an empty cl_map_t structure that contains the items +* unique to p_map2 upon return from the function. +* +* p_old +* [out] Pointer to an empty cl_map_t structure that contains the items +* unique to p_map1 upon return from the function. +* +* RETURN VALUES +* CL_SUCCESS if the operation succeeded. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory for the operation +* to succeed. +* +* NOTES +* Items are evaluated based on their keys. Items that exist in both +* p_map1 and p_map2 remain in their respective maps. Items that +* exist only p_map1 are moved to p_old. Likewise, items that exist only +* in p_map2 are moved to p_new. This function can be usefull in evaluating +* changes between two maps. +* +* Both maps pointed to by p_new and p_old must be empty on input. +* +* Upon failure, all input maps are restored to their original state. +* +* SEE ALSO +* Map, cl_map_merge +*********/ + + +#ifdef __cplusplus +} +#endif + +#endif /* _CL_MAP_H_ */ diff --git a/branches/Ndi/inc/complib/cl_math.h b/branches/Ndi/inc/complib/cl_math.h new file mode 100644 index 00000000..8e8af960 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_math.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Defines standard math related macros and functions. + * + * Environment: + * All + */ + + +#ifndef _CL_MATH_H_ +#define _CL_MATH_H_ + + +#include + + +/****d* Component Library: Math/MAX +* NAME +* MAX +* +* DESCRIPTION +* The MAX macro returns the greater of two values. +* +* SYNOPSIS +* MAX( x, y ); +* +* PARAMETERS +* x +* [in] First of two values to compare. +* +* y +* [in] Second of two values to compare. +* +* RETURN VALUE +* Returns the greater of the x and y parameters. +* +* SEE ALSO +* MIN, ROUNDUP +*********/ +#ifndef MAX +#define MAX(x,y) ((x) > (y) ? (x) : (y)) +#endif + + +/****d* Component Library: Math/MIN +* NAME +* MIN +* +* DESCRIPTION +* The MIN macro returns the greater of two values. +* +* SYNOPSIS +* MIN( x, y ); +* +* PARAMETERS +* x +* [in] First of two values to compare. +* +* y +* [in] Second of two values to compare. +* +* RETURN VALUE +* Returns the lesser of the x and y parameters. +* +* SEE ALSO +* MAX, ROUNDUP +*********/ +#ifndef MIN +#define MIN(x,y) ((x) < (y) ? (x) : (y)) +#endif + + +/****d* Component Library: Math/ROUNDUP +* NAME +* ROUNDUP +* +* DESCRIPTION +* The ROUNDUP macro rounds a value up to a given multiple. +* +* SYNOPSIS +* ROUNDUP( val, align ); +* +* PARAMETERS +* val +* [in] Value that is to be rounded up. The type of the value is +* indeterminate, but must be at most the size of a natural integer +* for the platform. +* +* align +* [in] Multiple to which the val parameter must be rounded up. +* +* RETURN VALUE +* Returns a value that is the input value specified by val rounded up to +* the nearest multiple of align. +* +* NOTES +* The value provided must be of a type at most the size of a natural integer. +*********/ +#ifndef ROUNDUP +#define ROUNDUP(val, align) \ + ((((val) / (align))*(align)) + (((val) % (align)) ? (align) : 0)) +#endif + + +#endif /* _CL_MATH_H_ */ diff --git a/branches/Ndi/inc/complib/cl_memory.h b/branches/Ndi/inc/complib/cl_memory.h new file mode 100644 index 00000000..b66f22cc --- /dev/null +++ b/branches/Ndi/inc/complib/cl_memory.h @@ -0,0 +1,963 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of generic memory allocation calls. + * + * Environment: + * All + */ + + +#ifndef _CL_MEMORY_H_ +#define _CL_MEMORY_H_ + + +#include +#include + + +/****h* Public/Memory Management +* NAME +* Memory Management +* +* DESCRIPTION +* The memory management functionality provides memory manipulation +* functions as well as powerful debugging tools. +* +* The Allocation Tracking functionality provides a means for tracking memory +* allocations in order to detect memory leaks. +* +* Memory allocation tracking stores the file name and line number where +* allocations occur. Gathering this information does have an adverse impact +* on performance, and memory tracking should therefore not be enabled in +* release builds of software. +* +* Memory tracking is compiled into the debug version of the library, +* and can be enabled for the release version as well. To Enable memory +* tracking in a release build of the public layer, users should define +* the MEM_TRACK_ON keyword for compilation. +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****i* Component Library: Memory Management/__cl_mem_track +* NAME +* __cl_mem_track +* +* DESCRIPTION +* The __cl_mem_track function enables or disables memory allocation tracking. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +__cl_mem_track( + IN const boolean_t start ); +/* +* PARAMETERS +* start +* [in] Specifies whether to start or stop memory tracking. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function performs all necessary initialization for tracking +* allocations. Users should never call this function, as it is called by +* the component library framework. +* +* If the Start parameter is set to TRUE, the function starts tracking memory +* usage if not already started. When set to FALSE, memory tracking is stoped +* and all remaining allocations are displayed to the applicable debugger, if +* any. +* +* Starting memory tracking when it is already started has no effect. +* Likewise, stoping memory tracking when it is already stopped has no effect. +* +* SEE ALSO +* Memory Management, cl_mem_display +**********/ + + +/****f* Component Library: Memory Management/cl_mem_display +* NAME +* cl_mem_display +* +* DESCRIPTION +* The cl_mem_display function displays all tracked memory allocations to +* the applicable debugger. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_mem_display( void ); +/* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Each tracked memory allocation is displayed along with the file name and +* line number that allocated it. +* +* Output is sent to the platform's debugging target, which may be the +* system log file. +* +* SEE ALSO +* Memory Management +**********/ + + +/****i* Component Library: Memory Management/__cl_malloc_trk +* NAME +* __cl_malloc_trk +* +* DESCRIPTION +* The __cl_malloc_trk function allocates and tracks a block of memory. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +__cl_malloc_trk( + IN const char* const p_file_name, + IN const int32_t line_num, + IN const size_t bytes, + IN const boolean_t pageable ); +/* +* PARAMETERS +* p_file_name +* [in] Name of the source file initiating the allocation. +* +* line_num +* [in] Line number in the specified file where the allocation is +* initiated +* +* size +* [in] Size of the requested allocation. +* +* pageable +* [in] On operating systems that support pageable vs. non pageable +* memory in the kernel, set to TRUE to allocate memory from paged pool. +* +* RETURN VALUES +* Pointer to allocated memory if successful. +* +* NULL otherwise. +* +* NOTES +* Allocated memory follows alignment rules specific to the different +* environments. +* This function is should not be called directly. The cl_malloc macro will +* redirect users to this function when memory tracking is enabled. +* +* SEE ALSO +* Memory Management, __cl_malloc_ntrk, __cl_zalloc_trk, __cl_free_trk +**********/ + + +/****i* Component Library: Memory Management/__cl_zalloc_trk +* NAME +* __cl_zalloc_trk +* +* DESCRIPTION +* The __cl_zalloc_trk function allocates and tracks a block of memory +* initialized to zero. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +__cl_zalloc_trk( + IN const char* const p_file_name, + IN const int32_t line_num, + IN const size_t bytes, + IN const boolean_t pageable ); +/* +* PARAMETERS +* p_file_name +* [in] Name of the source file initiating the allocation. +* +* line_num +* [in] Line number in the specified file where the allocation is +* initiated +* +* size +* [in] Size of the requested allocation. +* +* pageable +* [in] On operating systems that support pageable vs. non pageable +* memory in the kernel, set to TRUE to allocate memory from paged pool. +* +* RETURN VALUES +* Pointer to allocated memory if successful. +* +* NULL otherwise. +* +* NOTES +* Allocated memory follows alignment rules specific to the different +* environments. +* This function should not be called directly. The cl_zalloc macro will +* redirect users to this function when memory tracking is enabled. +* +* SEE ALSO +* Memory Management, __cl_zalloc_ntrk, __cl_malloc_trk, __cl_free_trk +**********/ + + +/****i* Component Library: Memory Management/__cl_malloc_ntrk +* NAME +* __cl_malloc_ntrk +* +* DESCRIPTION +* The __cl_malloc_ntrk function allocates a block of memory. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +__cl_malloc_ntrk( + IN const size_t size, + IN const boolean_t pageable ); +/* +* PARAMETERS +* size +* [in] Size of the requested allocation. +* +* pageable +* [in] On operating systems that support pageable vs. non pageable +* memory in the kernel, set to TRUE to allocate memory from paged pool. +* +* RETURN VALUES +* Pointer to allocated memory if successful. +* +* NULL otherwise. +* +* NOTES +* Allocated memory follows alignment rules specific to the different +* environments. +* This function is should not be called directly. The cl_malloc macro will +* redirect users to this function when memory tracking is not enabled. +* +* SEE ALSO +* Memory Management, __cl_malloc_trk, __cl_zalloc_ntrk, __cl_free_ntrk +**********/ + + +/****i* Component Library: Memory Management/__cl_zalloc_ntrk +* NAME +* __cl_zalloc_ntrk +* +* DESCRIPTION +* The __cl_zalloc_ntrk function allocates a block of memory +* initialized to zero. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +__cl_zalloc_ntrk( + IN const size_t bytes, + IN const boolean_t pageable ); +/* +* PARAMETERS +* size +* [in] Size of the requested allocation. +* +* pageable +* [in] On operating systems that support pageable vs. non pageable +* memory in the kernel, set to TRUE to allocate memory from paged pool. +* +* RETURN VALUES +* Pointer to allocated memory if successful. +* +* NULL otherwise. +* +* NOTES +* Allocated memory follows alignment rules specific to the different +* environments. +* This function should not be called directly. The cl_zalloc macro will +* redirect users to this function when memory tracking is not enabled. +* +* SEE ALSO +* Memory Management, __cl_zalloc_trk, __cl_malloc_ntrk, __cl_free_ntrk +**********/ + + +/****i* Component Library: Memory Management/__cl_free_trk +* NAME +* __cl_free_trk +* +* DESCRIPTION +* The __cl_free_trk function deallocates a block of tracked memory. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +__cl_free_trk( + IN void* const p_memory ); +/* +* PARAMETERS +* p_memory +* [in] Pointer to a memory block. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* The p_memory parameter is the pointer returned by a previous call to +* __cl_malloc_trk, or __cl_zalloc_trk. +* +* __cl_free_trk has no effect if p_memory is NULL. +* +* This function should not be called directly. The cl_free macro will +* redirect users to this function when memory tracking is enabled. +* +* SEE ALSO +* Memory Management, __cl_free_ntrk, __cl_malloc_trk, __cl_zalloc_trk +**********/ + + +/****i* Component Library: Memory Management/__cl_free_ntrk +* NAME +* __cl_free_ntrk +* +* DESCRIPTION +* The __cl_free_ntrk function deallocates a block of memory. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +__cl_free_ntrk( + IN void* const p_memory ); +/* +* PARAMETERS +* p_memory +* [in] Pointer to a memory block. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* The p_memory parameter is the pointer returned by a previous call to +* __cl_malloc_ntrk, or __cl_zalloc_ntrk. +* +* __cl_free_ntrk has no effect if p_memory is NULL. +* +* This function should not be called directly. The cl_free macro will +* redirect users to this function when memory tracking is not enabled. +* +* SEE ALSO +* Memory Management, __cl_free_ntrk, __cl_malloc_trk, __cl_zalloc_trk +**********/ + + +/****f* Component Library: Memory Management/cl_malloc +* NAME +* cl_malloc +* +* DESCRIPTION +* The cl_malloc function allocates a block of memory. +* +* SYNOPSIS +*/ +void* +cl_malloc( + IN const size_t size ); +/* +* PARAMETERS +* size +* [in] Size of the requested allocation. +* +* RETURN VALUES +* Pointer to allocated memory if successful. +* +* NULL otherwise. +* +* NOTES +* Allocated memory follows alignment rules specific to the different +* environments. +* +* SEE ALSO +* Memory Management, cl_free, cl_zalloc, cl_palloc, cl_pzalloc, +* cl_memset, cl_memclr, cl_memcpy, cl_memcmp +**********/ + + +/****f* Component Library: Memory Management/cl_zalloc +* NAME +* cl_zalloc +* +* DESCRIPTION +* The cl_zalloc function allocates a block of memory initialized to zero. +* +* SYNOPSIS +*/ +void* +cl_zalloc( + IN const size_t size ); +/* +* PARAMETERS +* size +* [in] Size of the requested allocation. +* +* RETURN VALUES +* Pointer to allocated memory if successful. +* +* NULL otherwise. +* +* NOTES +* Allocated memory follows alignment rules specific to the different +* environments. +* +* SEE ALSO +* Memory Management, cl_free, cl_malloc, cl_palloc, cl_pzalloc, +* cl_memset, cl_memclr, cl_memcpy, cl_memcmp +**********/ + + +/****f* Component Library: Memory Management/cl_palloc +* NAME +* cl_palloc +* +* DESCRIPTION +* The cl_palloc function allocates a block of memory from paged pool if the +* operating system supports it. If the operating system does not distinguish +* between pool types, cl_palloc is identical to cl_malloc. +* +* SYNOPSIS +*/ +void* +cl_palloc( + IN const size_t size ); +/* +* PARAMETERS +* size +* [in] Size of the requested allocation. +* +* RETURN VALUES +* Pointer to allocated memory if successful. +* +* NULL otherwise. +* +* NOTES +* Allocated memory follows alignment rules specific to the different +* environments. +* +* SEE ALSO +* Memory Management, cl_free, cl_malloc, cl_zalloc, cl_pzalloc, +* cl_memset, cl_memclr, cl_memcpy, cl_memcmp +**********/ + + +/****f* Component Library: Memory Management/cl_pzalloc +* NAME +* cl_pzalloc +* +* DESCRIPTION +* The cl_pzalloc function allocates a block of memory from paged pool if the +* operating system supports it and initializes it to zero. If the operating +* system does not distinguish between pool types, cl_pzalloc is identical +* to cl_zalloc. +* +* SYNOPSIS +*/ +void* +cl_pzalloc( + IN const size_t size ); +/* +* PARAMETERS +* size +* [in] Size of the requested allocation. +* +* RETURN VALUES +* Pointer to allocated memory if successful. +* +* NULL otherwise. +* +* NOTES +* Allocated memory follows alignment rules specific to the different +* environments. +* +* SEE ALSO +* Memory Management, cl_free, cl_malloc, cl_zalloc, cl_palloc, +* cl_memset, cl_memclr, cl_memcpy, cl_memcmp +**********/ + + +/****f* Component Library: Memory Management/cl_free +* NAME +* cl_free +* +* DESCRIPTION +* The cl_free function deallocates a block of memory. +* +* SYNOPSIS +*/ +void +cl_free( + IN void* const p_memory ); +/* +* PARAMETERS +* p_memory +* [in] Pointer to a memory block. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* The p_memory parameter is the pointer returned by a previous call to +* cl_malloc, or cl_zalloc. +* +* cl_free has no effect if p_memory is NULL. +* +* SEE ALSO +* Memory Management, cl_alloc, cl_zalloc +**********/ + + +/****f* Component Library: Memory Management/cl_memset +* NAME +* cl_memset +* +* DESCRIPTION +* The cl_memset function sets every byte in a memory range to a given value. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_memset( + IN void* const p_memory, + IN const uint8_t fill, + IN const size_t count ); +/* +* PARAMETERS +* p_memory +* [in] Pointer to a memory block. +* +* fill +* [in] Byte value with which to fill the memory. +* +* count +* [in] Number of bytes to set. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Memory Management, cl_memclr, cl_memcpy, cl_memcmp +**********/ + + +#ifndef _CL_MEMCLR_DEFINED_ +/****f* Component Library: Memory Management/cl_memclr +* NAME +* cl_memclr +* +* DESCRIPTION +* The cl_memclr function sets every byte in a memory range to zero. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_memclr( + IN void* const p_memory, + IN const size_t count ) +{ + cl_memset( p_memory, 0, count ); +} +/* +* PARAMETERS +* p_memory +* [in] Pointer to a memory block. +* +* count +* [in] Number of bytes to set. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Memory Management, cl_memset, cl_memcpy, cl_memcmp +**********/ +#endif + + +/****f* Component Library: Memory Management/cl_memcpy +* NAME +* cl_memcpy +* +* DESCRIPTION +* The cl_memcpy function copies a given number of bytes from +* one buffer to another. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +cl_memcpy( + IN void* const p_dest, + IN const void* const p_src, + IN const size_t count ); +/* +* PARAMETERS +* p_dest +* [in] Pointer to the buffer being copied to. +* +* p_src +* [in] Pointer to the buffer being copied from. +* +* count +* [in] Number of bytes to copy from the source buffer to the +* destination buffer. +* +* RETURN VALUE +* Returns a pointer to the destination buffer. +* +* SEE ALSO +* Memory Management, cl_memset, cl_memclr, cl_memcmp +**********/ + + +/****f* Component Library: Memory Management/cl_memcmp +* NAME +* cl_memcmp +* +* DESCRIPTION +* The cl_memcmp function compares two memory buffers. +* +* SYNOPSIS +*/ +CL_EXPORT int32_t CL_API +cl_memcmp( + IN const void* const p_mem, + IN const void* const p_ref, + IN const size_t count ); +/* +* PARAMETERS +* p_mem +* [in] Pointer to a memory block being compared. +* +* p_ref +* [in] Pointer to the reference memory block to compare against. +* +* count +* [in] Number of bytes to compare. +* +* RETURN VALUES +* Returns less than zero if p_mem is less than p_ref. +* +* Returns greater than zero if p_mem is greater than p_ref. +* +* Returns zero if the two memory regions are the identical. +* +* SEE ALSO +* Memory Management, cl_memset, cl_memclr, cl_memcpy +**********/ + + +#ifdef CL_KERNEL + +/****f* Component Library: Memory Management/cl_get_pagesize +* NAME +* cl_get_pagesize +* +* DESCRIPTION +* Returns the number of bytes in a OS defined page. +* +* SYNOPSIS +*/ +CL_EXPORT uint32_t CL_API +cl_get_pagesize( void ); +/* +* PARAMETERS +* NONE +* +* RETURN VALUES +* Returns the number of bytes in a page as defined by the Operating +* System. +* +* SEE ALSO +* Memory Management +**********/ + + +/****f* Component Library: Memory Management/cl_get_physaddr +* NAME +* cl_get_physaddr +* +* DESCRIPTION +* Returns the Physical address for a kernel virtual address. +* +* SYNOPSIS +*/ +CL_EXPORT uint64_t CL_API +cl_get_physaddr( + IN void *vaddr ); +/* +* PARAMETERS +* p_addr +* [in] Pointer to virtual to which the physical address is required. +* +* RETURN VALUES +* Returns the physical address for a virtual address. +* +* NOTES +* This call is only available in kernel mode. +* +* SEE ALSO +* Memory Management +**********/ + + +/****f* Component Library: Memory Management/cl_check_for_read +* NAME +* cl_check_for_read +* +* DESCRIPTION +* Checks a user-mode virtual address for read access. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_check_for_read( + IN const void* const vaddr, + IN const size_t count ); +/* +* PARAMETERS +* vaddr +* [in] Virtual address to check for read access. +* +* count +* [in] Number of bytes of the buffer at the specified address +* to validate. +* +* RETURN VALUES +* CL_SUCCESS if the virtual address is valid for a read of the specified +* size. +* +* CL_INVALID_PERMISSION if the virtual address or the size is not valid. +* +* NOTES +* This call is only available in the kernel. The buffer can only be accessed +* in the context of the application thread (i.e. in the path of an IOCTL +* request). Callers cannot be holding a spinlock when calling this function. +* +* SEE ALSO +* Memory Management, cl_check_for_write, cl_copy_to_user, cl_copy_from_user +*********/ + + +/****f* Component Library: Memory Management/cl_check_for_write +* NAME +* cl_check_for_write +* +* DESCRIPTION +* Checks a user-mode virtual address for write access. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_check_for_write( + IN void* const vaddr, + IN const size_t count ); +/* +* PARAMETERS +* vaddr +* [in] Virtual address to check for write access. +* +* count +* [in] Number of bytes of the buffer at the specified +* address to validate. +* +* RETURN VALUES +* CL_SUCCESS if the virtual address is valid for a write of the specified +* size. +* +* CL_INVALID_PERMISSION if the virtual address or the size is not valid. +* +* NOTES +* This call is only available in the kernel. The buffer can only be accessed +* in the context of the application thread (i.e. in the path of an IOCTL +* request). Callers cannot be holding a spinlock when calling this function. +* +* SEE ALSO +* Memory Management, cl_check_for_read, cl_copy_to_user, cl_copy_from_user +*********/ + + +/****f* Component Library: Memory Management/cl_copy_to_user +* NAME +* cl_copy_to_user +* +* DESCRIPTION +* Copies data into a user-mode buffer, performing access checks. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_copy_to_user( + IN void* const p_dest, + IN const void* const p_src, + IN const size_t count ); +/* +* PARAMETERS +* p_dest +* [in] User-mode virtual address to which to copy data. +* +* p_src +* [in] Pointer to the buffer being copied from. +* +* count +* [in] Number of bytes to copy from the source buffer to the +* destination buffer. +* +* RETURN VALUES +* CL_SUCCESS if the user-mode buffer virtual address is valid as the +* destination of the copy. +* +* CL_INVALID_PERMISSION if the virtual address or the count is not valid. +* +* NOTES +* This call is only available in the kernel. The buffer can only be accessed +* in the context of the application thread (i.e. in the path of an IOCTL +* request). Callers cannot be holding a spinlock when calling this function. +* +* SEE ALSO +* Memory Management, cl_check_for_read, cl_check_for_write, cl_copy_from_user +*********/ + + +/****f* Component Library: Memory Management/cl_copy_from_user +* NAME +* cl_copy_from_user +* +* DESCRIPTION +* Copies data from a user-mode buffer, performing access checks. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_copy_from_user( + IN void* const p_dest, + IN const void* const p_src, + IN const size_t count ); +/* +* PARAMETERS +* p_dest +* [in] Pointer to the buffer being copied to. +* +* p_src +* [in] User-mode virtual address from which to copy data. +* +* count +* [in] Number of bytes to copy from the source buffer to the +* destination buffer. +* +* RETURN VALUES +* CL_SUCCESS if the user-mode buffer virtual address is valid as the +* source of the copy. +* +* CL_INVALID_PERMISSION if the virtual address or the count is not valid. +* +* NOTES +* This call is only available in the kernel. The buffer can only be accessed +* in the context of the application thread (i.e. in the path of an IOCTL +* request). Callers cannot be holding a spinlock when calling this function. +* +* SEE ALSO +* Memory Management, cl_check_for_read, cl_check_for_write, cl_copy_to_user +*********/ + +#endif /* CL_KERNEL */ + +#if defined( CL_NO_TRACK_MEM ) && defined( CL_TRACK_MEM ) + #error Conflict: Cannot define both CL_NO_TRACK_MEM and CL_TRACK_MEM. +#endif + +/* + * Turn on memory allocation tracking in debug builds if not explicitly + * disabled or already turned on. + */ +#if defined( _DEBUG_ ) && \ + !defined( CL_NO_TRACK_MEM ) && \ + !defined( CL_TRACK_MEM ) + #define CL_TRACK_MEM +#endif + + +/* + * Define allocation macro. + */ +#if defined( CL_TRACK_MEM ) + +#define cl_malloc( a ) \ + __cl_malloc_trk( __FILE__, __LINE__, a, FALSE ) + +#define cl_zalloc( a ) \ + __cl_zalloc_trk( __FILE__, __LINE__, a, FALSE ) + +#define cl_palloc( a ) \ + __cl_malloc_trk( __FILE__, __LINE__, a, TRUE ) + +#define cl_pzalloc( a ) \ + __cl_zalloc_trk( __FILE__, __LINE__, a, TRUE ) + +#define cl_free( a ) \ + __cl_free_trk( a ) + +#else /* !defined( CL_TRACK_MEM ) */ + +#define cl_malloc( a ) \ + __cl_malloc_ntrk( a, FALSE ) + +#define cl_zalloc( a ) \ + __cl_zalloc_ntrk( a, FALSE ) + +#define cl_palloc( a ) \ + __cl_malloc_ntrk( a, TRUE ) + +#define cl_pzalloc( a ) \ + __cl_zalloc_ntrk( a, TRUE ) + +#define cl_free( a ) \ + __cl_free_ntrk( a ) + +#endif /* defined( CL_TRACK_MEM ) */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_MEMORY_H_ */ diff --git a/branches/Ndi/inc/complib/cl_mutex.h b/branches/Ndi/inc/complib/cl_mutex.h new file mode 100644 index 00000000..2ae4de31 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_mutex.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of mutex object. + * + * Environment: + * All + */ + + +#ifndef _CL_MUTEX_H_ +#define _CL_MUTEX_H_ + + +#include + + +/****h* complib/Mutex +* NAME +* Mutex +* +* DESCRIPTION +* Mutex provides synchronization between threads for exclusive access to +* a resource. +* +* The Mutex functions manipulate a cl_mutex_t structure which should +* be treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_mutex_t +* +* Initialization: +* cl_mutex_construct, cl_mutex_init, cl_mutex_destroy +* +* Manipulation +* cl_mutex_acquire, cl_mutex_release +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Mutex/cl_mutex_construct +* NAME +* cl_mutex_construct +* +* DESCRIPTION +* The cl_mutex_construct function initializes the state of a +* mutex. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_mutex_construct( + IN cl_mutex_t* const p_mutex ); +/* +* PARAMETERS +* p_mutex +* [in] Pointer to a mutex structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_semphore_destroy without first calling +* cl_mutex_init. +* +* Calling cl_mutex_construct is a prerequisite to calling any other +* mutex function except cl_mutex_init. +* +* SEE ALSO +* Mutex, cl_semphore_init cl_mutex_destroy +*********/ + + +/****f* Component Library: Mutex/cl_mutex_init +* NAME +* cl_mutex_init +* +* DESCRIPTION +* The cl_mutex_init function initializes a mutex for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_mutex_init( + IN cl_mutex_t* const p_mutex ); +/* +* PARAMETERS +* p_mutex +* [in] Pointer to a mutex structure to initialize. +* +* RETURN VALUES +* CL_SUCCESS if initialization succeeded. +* +* CL_ERROR if initialization failed. Callers should call +* cl_mutex_destroy to clean up any resources allocated during +* initialization. +* +* NOTES +* Initializes the mutex structure. Allows calling cl_mutex_aquire +* and cl_mutex_release. The cl_mutex is always created in the unlocked state. +* +* SEE ALSO +* Mutex, cl_mutex_construct, cl_mutex_destroy, +* cl_mutex_acquire, cl_mutex_release +*********/ + + +/****f* Component Library: Mutex/cl_mutex_destroy +* NAME +* cl_mutex_destroy +* +* DESCRIPTION +* The cl_mutex_destroy function performs all necessary cleanup of a +* mutex. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_mutex_destroy( + IN cl_mutex_t* const p_mutex ); +/* +* PARAMETERS +* p_mutex +* [in] Pointer to a mutex structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of a mutex. This function must only +* be called if either cl_mutex_construct or cl_mutex_init has been +* called. +* +* SEE ALSO +* Mutex, cl_mutex_construct, cl_mutex_init +*********/ + + +/****f* Component Library: Mutex/cl_mutex_acquire +* NAME +* cl_mutex_acquire +* +* DESCRIPTION +* The cl_mutex_acquire function acquires a mutex. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_mutex_acquire( + IN cl_mutex_t* const p_mutex ); +/* +* PARAMETERS +* p_mutex +* [in] Pointer to a mutex structure to acquire. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Mutex, cl_mutex_release +*********/ + + +/****f* Component Library: Mutex/cl_mutex_release +* NAME +* cl_mutex_release +* +* DESCRIPTION +* The cl_mutex_release function releases a mutex object. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_mutex_release( + IN cl_mutex_t* const p_mutex ); +/* +* PARAMETERS +* p_mutex +* [in] Pointer to a mutex structure to release. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Releases a mutex after a call to cl_mutex_acquire. +* +* SEE ALSO +* Mutex, cl_mutex_acquire +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_MUTEX_H_ */ diff --git a/branches/Ndi/inc/complib/cl_obj.h b/branches/Ndi/inc/complib/cl_obj.h new file mode 100644 index 00000000..e691d101 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_obj.h @@ -0,0 +1,998 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of basic objects and relationships. + * + * Environment: + * All + */ + + +#if !defined(__CL_OBJ_H__) +#define __CL_OBJ_H__ + +#include +#include +#include +#include +#include +#include + + +/****h* Component Library/Object +* NAME +* Object +* +* DESCRIPTION +* Object describes a basic class that can be used to track accesses to an +* object and provides automatic cleanup of an object that is dependent +* on another object. +* +* Dependencies between objects are described using a relationship. A +* child object is considered dependent on a parent object. Destruction of +* a parent object automatically results in the destruction of any child +* objects associated with the parent. +* +* The relationship between parent and child objects is many to many. +* Parents can have multiple child objects, and a child can be dependent on +* multiple parent objects. In the latter case, destruction of any parent +* object results in the destruction of the child object. +* +* Other relationships between objects are described using references. An +* object that takes a reference on a second object prevents the second object +* from being deallocated as long as the reference is held. +* +* SEE ALSO +* Types +* cl_destroy_type_t +* +* Structures: +* cl_obj_t, cl_obj_rel_t +* +* Callbacks: +* cl_pfn_obj_call_t +* +* Initialization/Destruction: +* cl_obj_mgr_create, cl_obj_mgr_destroy, +* cl_obj_construct, cl_obj_init, cl_obj_destroy, cl_obj_deinit +* +* Object Relationships: +* cl_obj_ref, cl_obj_deref, +* cl_rel_alloc, cl_rel_free, cl_obj_insert_rel, cl_obj_remove_rel +* +* Object Manipulation: +* cl_obj_reset +*********/ + + + +/* Forward declaration. */ +typedef struct _cl_obj *__p_cl_obj_t; + + + +/****s* Component Library: Object/cl_obj_mgr_t +* NAME +* cl_obj_mgr_t +* +* DESCRIPTION +* The global object manager. +* +* The manager must be created before constructing any other objects, and all +* objects must be destroyed before the object manager is destroyed. +* +* The manager is used to maintain the list of all objects currently active +* in the system. It provides a pool of relationship items used to +* describe parent-child, or dependent, relationships between two objects. +* The manager contains an asynchronous processing thread that is used to +* support asynchronous object destruction. +* +* SYNOPSIS +*/ +typedef struct _cl_obj_mgr +{ + cl_qlist_t obj_list; + cl_spinlock_t lock; + + cl_async_proc_t async_proc_mgr; + + cl_qpool_t rel_pool; + +} cl_obj_mgr_t; +/* +* FIELDS +* obj_list +* List of all object's in the system. Object's are inserted into this +* list when constructed and removed when freed. +* +* lock +* A lock used by the object manager for synchronization to the obj_list. +* +* async_proc_mgr +* An asynchronous processing manager used to process asynchronous +* destruction requests. Users wishing to synchronize the execution of +* specific routines with object destruction may queue work requests to +* this processing manager. +* +* rel_pool +* Pool of items used to describe dependent relationships. Users may +* obtain relationship objects from this pool when forming relationships, +* but are not required to do so. +* +* SEE ALSO +* Object, cl_obj_mgr_create, cl_obj_mgr_destroy, +* cl_obj_construct, cl_obj_deinit, +* cl_qlist_t, cl_spinlock_t, cl_async_proc_t, cl_qpool_t +*********/ + + + +#ifdef __cplusplus +extern "C" { +#endif + + + +/****f* Component Library: Object/cl_obj_mgr_create +* NAME +* cl_obj_mgr_create +* +* DESCRIPTION +* This routine creates an object manager used to track all objects by +* the user. The object manager assists with debugging efforts by identifying +* objects that are not destroyed properly. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_obj_mgr_create(void); +/* +* PARAMETERS +* None. +* +* RETURN VALUE +* CL_SUCCESS +* The object manager was succesfully created. +* +* CL_INSUFFICIENT_MEMORY +* The object manager could not be allocated. +* +* NOTES +* This call must succeed before invoking any other object-related function. +* +* SEE ALSO +* Object, cl_obj_mgr_destroy +*********/ + + + +/****f* Component Library: Object/cl_obj_mgr_destroy +* NAME +* cl_obj_mgr_destroy +* +* DESCRIPTION +* This routine destroys the object manager created through cl_obj_mgr_create. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_obj_mgr_destroy(void); +/* +* PARAMETERS +* None. +* +* RETURN VALUE +* None. +* +* NOTES +* When the object manager is destroyed, it will display information about all +* objects that have not yet been destroyed. +* +* SEE ALSO +* Object, cl_obj_mgr_create +*********/ + + +/****d* Component Library: Object/cl_pfn_obj_call_t +* NAME +* cl_pfn_obj_call_t +* +* DESCRIPTION +* The cl_pfn_obj_call_t function type defines the prototype for functions +* used to return objects to the user. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_obj_call_t)( + IN struct _cl_obj *p_obj ); +/* +* PARAMETERS +* p_obj +* [in] Pointer to a cl_obj_t. This is the object being returned to +* the user. +* +* RETURN VALUES +* None. +* +* NOTES +* This function type is provided as a prototype for functions provided +* by users as parameters to the cl_obj_init function. +* +* SEE ALSO +* Object, cl_obj_init, cl_obj_t +*********/ + + +/****d* Component Library: Object/cl_destroy_type_t +* NAME +* cl_destroy_type_t +* +* DESCRIPTION +* Indicates the type of destruction to perform on an object. +* +* SYNOPSIS +*/ +typedef enum _cl_destroy_type +{ + CL_DESTROY_ASYNC, + CL_DESTROY_SYNC + +} cl_destroy_type_t; +/* +* VALUES +* CL_DESTROY_ASYNC +* Indicates that the object should be destroyed asynchronously. Objects +* destroyed asynchronously complete initial destruction processing, then +* return the calling thread. Once their reference count goes to zero, +* they are queue onto an asynchronous thread to complete destruction +* processing. +* +* CL_DESTROY_SYNC +* Indicates that the object should be destroyed synchronously. Objects +* destroyed synchronously wait (block) until their reference count goes +* to zero. Once their reference count goes to zero, destruction +* processing is completed by the calling thread. +* +* SEE ALSO +* Object, cl_obj_init, cl_obj_destroy, cl_obj_deinit, cl_obj_t +*********/ + + + +/****s* Component Library: Object/cl_obj_t +* NAME +* cl_obj_t +* +* DESCRIPTION +* Object structure. +* +* SYNOPSIS +*/ +typedef struct _cl_obj +{ + cl_pool_item_t pool_item; /* Must be first. */ + uint32_t type; + cl_state_t state; + cl_destroy_type_t destroy_type; + + cl_async_proc_item_t async_item; + cl_event_t event; + + cl_pfn_obj_call_t pfn_destroying; + cl_pfn_obj_call_t pfn_cleanup; + cl_pfn_obj_call_t pfn_free; + + cl_spinlock_t lock; + + cl_qlist_t parent_list; + cl_qlist_t child_list; + + atomic32_t ref_cnt; + +} cl_obj_t; +/* +* FIELDS +* pool_item +* Used to track the object with the global object manager. We use +* a pool item, rather than a list item, to let users store the object +* in a pool. +* +* type +* Stores a user-specified object type. +* +* state +* Records the current state of the object, such as initialized, +* destroying, etc. +* +* destroy_type +* Specifies the type of destruction, synchronous or asynchronous, to +* perform on this object. +* +* async_item +* Asynchronous item used when destroying the object asynchronously. +* This item is queued to an asynchronous thread to complete destruction +* processing. +* +* event +* Event used when destroying the object synchronously. A call to destroy +* the object will wait on this event until the destruction has completed. +* +* pfn_destroying +* User-specified callback invoked to notify a user that an object has +* been marked for destruction. This callback is invoked directly from +* the thread destroying the object and is used to notify a user that +* a parent object has invoked a child object's destructor. +* +* pfn_cleanup +* User-specified callback invoked as an object is undergoing destruction. +* For object's destroyed asynchronously, this callback is invoked from +* the context of the asynchronous destruction thread. Users may block +* in the context of this thread; however, further destruction processing +* will not continue until this callback returns. +* +* pfn_free +* User-specified callback invoked to notify a user that an object has +* been destroyed and is ready for deallocation. Users should either +* call cl_obj_deinit or cl_obj_reset from within this callback. +* +* lock +* A lock provided by the object. +* +* parent_list +* A list of relationships to parent objects that an object is dependent +* on. +* +* child_list +* A list of all child objects that are dependent on this object. +* Destroying this object will result in all related objects maintained +* in the child list also being destroyed. +* +* ref_cnt +* A count of the number of objects still referencing this object. +* +* SEE ALSO +* Object, cl_obj_construct, cl_obj_init, cl_obj_destroy, +* cl_obj_deinit, cl_pfn_obj_call_t, cl_destroy_type_t, +* cl_pool_item_t, cl_state_t, cl_async_proc_item_t, +* cl_event_t, cl_spinlock_t, cl_qlist_t, atomic32_t +*********/ + + + +/****f* Component Library: Object/cl_obj_construct +* NAME +* cl_obj_construct +* +* DESCRIPTION +* This routine prepares an object for use. The object must be successfully +* initialized before being used. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_obj_construct( + IN cl_obj_t * const p_obj, + IN const uint32_t obj_type ); +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object to construct. +* +* obj_type +* [in] A user-specified type associated with the object. This type +* is recorded by the object for debugging purposes and may be accessed +* by the user. +* +* RETURN VALUE +* None. +* +* NOTES +* This call must succeed before invoking any other function on an object. +* +* SEE ALSO +* Object, cl_obj_init, cl_obj_destroy, cl_obj_deinit. +*********/ + + +/****f* Component Library: Object/cl_obj_init +* NAME +* cl_obj_init +* +* DESCRIPTION +* This routine initializes an object for use. Upon the successful completion +* of this call, the object is ready for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_obj_init( + IN cl_obj_t * const p_obj, + IN cl_destroy_type_t destroy_type, + IN const cl_pfn_obj_call_t pfn_destroying OPTIONAL, + IN const cl_pfn_obj_call_t pfn_cleanup OPTIONAL, + IN const cl_pfn_obj_call_t pfn_free ); +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object to initialize. +* +* destroy_type +* [in] Specifies the destruction model used by this object. +* +* pfn_destroying +* [in] User-specified callback invoked to notify a user that an object has +* been marked for destruction. This callback is invoked directly from +* the thread destroying the object and is used to notify a user that +* a parent object has invoked a child object's destructor. +* +* pfn_cleanup +* [in] User-specified callback invoked to an object is undergoing +* destruction. For object's destroyed asynchronously, this callback +* is invoked from the context of the asynchronous destruction thread. +* Users may block in the context of this thread; however, further +* destruction processing will not continue until this callback returns. +* +* pfn_free +* [in] User-specified callback invoked to notify a user that an object has +* been destroyed and is ready for deallocation. Users should either +* call cl_obj_deinit or cl_obj_reset from within this callback. +* +* RETURN VALUE +* CL_SUCCESS +* The object was successfully initialized. +* +* CL_INSUFFICIENT_MEMORY +* The object could not allocate the necessary memory resources to +* complete initialization. +* +* NOTES +* The three destruction callbacks are used to notify the user of the progress +* of the destruction, permitting the user to perform an additional processing. +* Pfn_destroying is used to notify the user that the object is being +* destroyed. It is called after an object has removed itself from +* relationships with its parents, but before it destroys any child objects +* that it might have. +* +* Pfn_cleanup is invoked after all child objects have been destroyed, and +* there are no more references on the object itself. For objects destroyed +* asynchronously, pfn_cleanup is invoked from an asynchronous destruction +* thread. +* +* Pfn_free is called to notify the user that the destruction of the object has +* completed. All relationships have been removed, and all child objects have +* been destroyed. Relationship items (cl_obj_rel_t) that were used to +* identify parent objects are returned to the user through the p_parent_list +* field of the cl_obj_t structure. +* +* SEE ALSO +* Object, cl_obj_construct, cl_obj_destroy, cl_obj_deinit, +* cl_obj_t, cl_destroy_type_t, cl_pfn_obj_call_t, +*********/ + + +/****f* Component Library: Object/cl_obj_destroy +* NAME +* cl_obj_destroy +* +* DESCRIPTION +* This routine destroys the specified object. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_obj_destroy( + IN cl_obj_t * p_obj ); +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object to destroy. +* +* RETURN VALUE +* None. +* +* NOTES +* This routine starts the destruction process for the specified object. For +* additional information regarding destruction callbacks, see the following +* fields in cl_obj_t and parameters in cl_obj_init: pfn_destroying, +* pfn_cleanup, and pfn_free. +* +* In most cases, after calling this routine, users should call cl_obj_deinit +* from within their pfn_free callback routine. +* +* SEE ALSO +* Object, cl_obj_construct, cl_obj_init, cl_obj_deinit, +* cl_obj_t, cl_destroy_type_t, cl_pfn_obj_call_t +*********/ + + + +/****f* Component Library: Object/cl_obj_deinit +* NAME +* cl_obj_deinit +* +* DESCRIPTION +* Release all resources allocated by an object. This routine should +* typically be called from a user's pfn_free routine. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_obj_deinit( + IN cl_obj_t * const p_obj ); +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object to free. +* +* RETURN VALUE +* None. +* +* NOTES +* This call must be invoked to release the object from the global object +* manager. +* +* SEE ALSO +* Object, cl_obj_construct, cl_obj_init, cl_obj_destroy, cl_obj_t +*********/ + + + +/****f* Component Library: Object/cl_obj_reset +* NAME +* cl_obj_reset +* +* DESCRIPTION +* Reset an object's state. This is called after cl_obj_destroy has +* been called on a object, but before cl_obj_deinit has been invoked. +* After an object has been reset, it is ready for re-use. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_obj_reset( + IN cl_obj_t * const p_obj ); +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object to reset. +* +* RETURN VALUE +* None. +* +* NOTES +* This routine allows an object to be initialized once, then destroyed +* and re-used multiple times. This permits the user to allocate and +* maintain a pool of objects. The objects may be reset and returned to +* the pool, rather than freed, after being destroyed. The objects would +* not be freed until the pool itself was destroyed. +* +* SEE ALSO +* Object, cl_obj_destroy, cl_obj_free, cl_obj_t +*********/ + + + +/****f* Component Library: Object/cl_obj_ref +* NAME +* cl_obj_ref +* +* DESCRIPTION +* Increments the reference count on an object and returns the updated count. +* This routine is thread safe, but does not result in locking the object. +* +* SYNOPSIS +*/ +CL_EXPORT int32_t CL_API +cl_obj_ref( + IN cl_obj_t * const p_obj ); +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object to reference. +* +* RETURN VALUE +* The updated reference count. +* +* SEE ALSO +* Object, cl_obj_t, cl_obj_deref +*********/ + + + +/****f* Component Library: Object/cl_obj_deref +* NAME +* cl_obj_deref +* +* DESCRIPTION +* Decrements the reference count on an object and returns the updated count. +* This routine is thread safe, but results in locking the object. +* +* SYNOPSIS +*/ +CL_EXPORT int32_t CL_API +cl_obj_deref( + IN cl_obj_t * const p_obj ); +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object to dereference. +* +* RETURN VALUE +* The updated reference count. +* +* SEE ALSO +* Object, cl_obj_t, cl_obj_ref +*********/ + + +/****f* Component Library: Object/cl_obj_type +* NAME +* cl_obj_type +* +* DESCRIPTION +* Returns the type of an object. +* +* SYNOPSIS +*/ +CL_INLINE uint32_t CL_API +cl_obj_type( + IN cl_obj_t * const p_obj ) +{ + return p_obj->type; +} +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object whose type to return. +* +* RETURN VALUE +* The type of the object, as specified in the call to cl_obj_init. +* +* SEE ALSO +* Object, cl_obj_t, cl_obj_init +*********/ + + +/****f* Component Library: Object/cl_obj_lock +* NAME +* cl_obj_lock +* +* DESCRIPTION +* Acquires an object's lock. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_obj_lock( + IN cl_obj_t * const p_obj ) +{ + CL_ASSERT( p_obj->state == CL_INITIALIZED || + p_obj->state == CL_DESTROYING ); + cl_spinlock_acquire( &p_obj->lock ); +} +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object whose lock to acquire. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Object, cl_obj_t, cl_obj_unlock +*********/ + + +/****f* Component Library: Object/cl_obj_unlock +* NAME +* cl_obj_unlock +* +* DESCRIPTION +* Releases an object's lock previously acquired by a call to cl_obj_lock. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_obj_unlock( + IN cl_obj_t * const p_obj ) +{ + CL_ASSERT( p_obj->state == CL_INITIALIZED || + p_obj->state == CL_DESTROYING ); + cl_spinlock_release( &p_obj->lock ); +} +/* +* PARAMETERS +* p_obj +* [in] A pointer to the object whose lock to release. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Object, cl_obj_t, cl_obj_lock +*********/ + + +/****s* Component Library: Object/cl_obj_rel_t +* NAME +* cl_obj_rel_t +* +* DESCRIPTION +* Identifies a dependent relationship between two objects. +* +* SYNOPSIS +*/ +typedef struct _cl_obj_rel +{ + cl_pool_item_t pool_item; /* Must be first. */ + struct _cl_obj *p_parent_obj; + + cl_list_item_t list_item; + struct _cl_obj *p_child_obj; + +} cl_obj_rel_t; +/* +* FIELDS +* pool_item +* An item used to store the relationship in a free pool maintained +* by the object manager. This field is also used by the parent object +* to store the relationship in its child_list. +* +* p_parent_obj +* A reference to the parent object for the relationship. +* +* list_item +* This field is used by the child object to store the relationship in +* its parent_list. +* +* p_child_obj +* A reference to the child object for the relationship. +* +* NOTES +* This structure is used to define all dependent relationships. Dependent +* relationships are those where the destruction of a parent object result in +* the destruction of child objects. For other types of relationships, simple +* references between objects may be used. +* +* Relationship items are stored in lists maintained by both the parent +* and child objects. References to both objects exist while the +* relationship is maintained. Typically, relationships are defined by +* the user by calling cl_obj_insert_rel, but are destroyed automatically +* via an object's destruction process. +* +* SEE ALSO +* Object, cl_rel_alloc, cl_rel_free, cl_obj_insert_rel, cl_obj_remove_rel, +* cl_obj_destroy +*********/ + + + +/****f* Component Library: Object/cl_rel_alloc +* NAME +* cl_rel_alloc +* +* DESCRIPTION +* Retrieves an object relationship item from the object manager. +* +* SYNOPSIS +*/ +CL_EXPORT cl_obj_rel_t* CL_API +cl_rel_alloc(void); +/* +* PARAMETERS +* None. +* +* RETURN VALUE +* A reference to an allocated relationship object, or NULL if no relationship +* object could be allocated. +* +* NOTES +* This routine retrieves a cl_obj_rel_t structure from a pool maintained +* by the object manager. The pool automatically grows as needed. +* +* Relationship items are used to describe a dependent relationship between +* a parent and child object. In cases where a child has a fixed number of +* relationships, the user may be able to allocate and manage the cl_obj_rel_t +* structures more efficiently than obtaining the structures through this call. +* +* SEE ALSO +* Object, cl_rel_free, cl_obj_insert_rel, cl_obj_remove_rel, cl_obj_destroy +*********/ + + + +/****f* Component Library: Object/cl_rel_free +* NAME +* cl_rel_free +* +* DESCRIPTION +* Return a relationship object to the global object manager. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_rel_free( + IN cl_obj_rel_t * const p_rel ); +/* +* PARAMETERS +* p_rel +* [in] A reference to the relationship item to free. +* +* RETURN VALUE +* None. +* +* NOTES +* Relationship items must not be freed until both the parent and child +* object have removed their references to one another. Relationship items +* may be freed after calling cl_obj_remove_rel or after the associated +* child object's free callback has been invoked. In the latter case, the +* invalid relationship items are referenced by the child object's parent_list. +* +* SEE ALSO +* Object, cl_rel_alloc, cl_obj_insert_rel, cl_obj_remove_rel, cl_obj_destroy +*********/ + + + +/****f* Component Library: Object/cl_obj_insert_rel +* NAME +* cl_obj_insert_rel +* +* DESCRIPTION +* Forms a relationship between two objects, with the existence of the child +* object dependent on the parent. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_obj_insert_rel( + IN cl_obj_rel_t * const p_rel, + IN cl_obj_t * const p_parent_obj, + IN cl_obj_t * const p_child_obj ); +/* +* PARAMETERS +* p_rel +* [in] A reference to an unused relationship item. +* +* p_parent_obj +* [in] A reference to the parent object. +* +* p_child_obj +* [in] A reference to the child object. +* +* RETURN VALUE +* None. +* +* NOTES +* This call inserts a relationship between the parent and child object. +* The relationship allows for the automatic destruction of the child object +* if the parent is destroyed. +* +* A given object can have multiple parent and child objects, but the +* relationships must form into an object tree. That is, there cannot be any +* cycles formed through the parent-child relationships. (For example, an +* object cannot be both the parent and a child of a second object.) +* +* SEE ALSO +* Object, cl_rel_alloc, cl_rel_free, cl_obj_remove_rel, cl_obj_destroy +*********/ + + + +/****f* Component Library: Object/cl_obj_insert_rel_parent_locked +* NAME +* cl_obj_insert_rel_parent_locked +* +* DESCRIPTION +* Forms a relationship between two objects, with the existence of the child +* object dependent on the parent. The parent's object lock is held. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_obj_insert_rel_parent_locked( + IN cl_obj_rel_t * const p_rel, + IN cl_obj_t * const p_parent_obj, + IN cl_obj_t * const p_child_obj ); +/* +* PARAMETERS +* p_rel +* [in] A reference to an unused relationship item. +* +* p_parent_obj +* [in] A reference to the parent object. +* +* p_child_obj +* [in] A reference to the child object. +* +* RETURN VALUE +* None. +* +* NOTES +* This call inserts a relationship between the parent and child object. +* The relationship allows for the automatic destruction of the child object +* if the parent is destroyed. +* +* A given object can have multiple parent and child objects, but the +* relationships must form into an object tree. That is, there cannot be any +* cycles formed through the parent-child relationships. (For example, an +* object cannot be both the parent and a child of a second object.) +* +* This call requires the caller to already hold the parent object's lock. +* +* SEE ALSO +* Object, cl_rel_alloc, cl_rel_free, cl_obj_remove_rel, cl_obj_destroy +*********/ + + + +/****f* Component Library: Object/cl_obj_remove_rel +* NAME +* cl_obj_remove_rel +* +* DESCRIPTION +* Manually removes a relationship between two objects. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_obj_remove_rel( + IN cl_obj_rel_t * const p_rel ); +/* +* PARAMETERS +* p_rel +* [in] A reference to the relationship to remove. +* +* RETURN VALUE +* None. +* +* NOTES +* This routine permits a user to manually remove a dependent relationship +* between two objects. When removing a relationship using this call, the +* user must ensure that objects referenced by the relationship are not +* destroyed, either directly or indirectly via a parent. +* +* SEE ALSO +* Object, cl_rel_alloc, cl_rel_free, cl_obj_insert_rel, cl_obj_destroy +*********/ + + +#ifdef __cplusplus +} +#endif + + +#endif /* __CL_OBJ_H__ */ diff --git a/branches/Ndi/inc/complib/cl_passivelock.h b/branches/Ndi/inc/complib/cl_passivelock.h new file mode 100644 index 00000000..a1a5d117 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_passivelock.h @@ -0,0 +1,433 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * This file contains the passive lock, which synchronizes passive threads. + * The passive lock allows multiple readers to access a resource + * simultaneously, exclusive from a single thread writting. It is similar + * in behavior to the passive lock, but works at passive only. + * + * Environment: + * All + */ + + +#ifndef _CL_PASSIVE_LOCK_H_ +#define _CL_PASSIVE_LOCK_H_ + + +#include +#include + + +/****h* Component Library/Passive Lock +* NAME +* Passive Lock +* +* DESCRIPTION +* The Passive Lock provides synchronization between multiple threads that +* are sharing the lock with a single thread holding the lock exclusively. +* +* Passive lock works exclusively between threads and cannot be used in +* situations where the caller cannot be put into a waiting state. +* +* The passive lock functions operate a cl_plock_t structure which should +* be treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_plock_t +* +* Initialization: +* cl_plock_construct, cl_plock_init, cl_plock_destroy +* +* Manipulation +* cl_plock_acquire, cl_plock_excl_acquire, cl_plock_release +*********/ + + +/****s* Component Library: Passive Lock/cl_plock_t +* NAME +* cl_plock_t +* +* DESCRIPTION +* Passive Lock structure. +* +* The cl_plock_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_plock +{ + cl_event_t reader_event; + cl_event_t writer_event; + atomic32_t reader_count; + +} cl_plock_t; +/* +* FIELDS +* reader_event +* Event used to synchronize shared access to the lock. +* +* writer_event +* Event used to synchronize exclusive access to the lock. +* +* reader_count +* Number of threads holding the lock for shared access. +* +* SEE ALSO +* Passive Lock +*********/ + + +/****f* Component Library: Passive Lock/cl_plock_construct +* NAME +* cl_plock_construct +* +* DESCRIPTION +* The cl_plock_construct function initializes the state of a +* passive lock. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_plock_construct( + IN cl_plock_t* const p_lock ) +{ + CL_ASSERT( p_lock ); + + p_lock->reader_count = 0; + cl_event_construct( &p_lock->reader_event ); + cl_event_construct( &p_lock->writer_event ); +} +/* +* PARAMETERS +* p_lock +* [in] Pointer to a cl_plock_t structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_plock_destroy without first calling cl_plock_init. +* +* Calling cl_plock_construct is a prerequisite to calling any other +* passive lock function except cl_plock_init. +* +* SEE ALSO +* Passive Lock, cl_plock_init, cl_plock_destroy +*********/ + + +/****f* Component Library: Passive Lock/cl_plock_destroy +* NAME +* cl_plock_destroy +* +* DESCRIPTION +* The cl_plock_destroy function performs any necessary cleanup +* of a passive lock. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_plock_destroy( + IN cl_plock_t* const p_lock ) +{ + CL_ASSERT( p_lock ); + CL_ASSERT( p_lock->reader_count == 0 ); + + cl_event_destroy( &p_lock->writer_event ); + cl_event_destroy( &p_lock->reader_event ); +} +/* +* PARAMETERS +* p_lock +* [in] Pointer to a cl_plock_t structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_plock_destroy performs any necessary cleanup of the specified +* passive lock. +* +* This function must only be called if cl_plock_construct or +* cl_plock_init has been called. The passive lock must not be held +* when calling this function. +* +* SEE ALSO +* Passive Lock, cl_plock_construct, cl_plock_init +*********/ + + +/****f* Component Library: Passive Lock/cl_plock_init +* NAME +* cl_plock_init +* +* DESCRIPTION +* The cl_plock_init function initializes a passive lock. +* +* SYNOPSIS +*/ +CL_INLINE cl_status_t CL_API +cl_plock_init( + IN cl_plock_t* const p_lock ) +{ + cl_status_t status; + + CL_ASSERT( p_lock ); + + cl_plock_construct( p_lock ); + + status = cl_event_init( &p_lock->writer_event, FALSE ); + if( status != CL_SUCCESS ) + { + cl_plock_destroy( p_lock ); + return( status ); + } + + status = cl_event_init( &p_lock->reader_event, FALSE ); + if( status != CL_SUCCESS ) + { + cl_plock_destroy( p_lock ); + return( status ); + } + + /* + * Set the writer event to signalled so that the first + * wait operation succeeds. + */ + status = cl_event_signal( &p_lock->writer_event ); + if( status != CL_SUCCESS ) + { + cl_plock_destroy( p_lock ); + return( status ); + } + + /* + * Set the reader event to signalled so that the first + * wait operation succeeds. + */ + status = cl_event_signal( &p_lock->reader_event ); + if( status != CL_SUCCESS ) + { + cl_plock_destroy( p_lock ); + return( status ); + } + + return( CL_SUCCESS ); +} +/* +* PARAMETERS +* p_lock +* [in] Pointer to a cl_plock_t structure to initialize. +* +* RETURN VALUES +* CL_SUCCESS if the passive lock was initialized successfully. +* +* CL_ERROR otherwise. +* +* NOTES +* Allows calling cl_plock_acquire, cl_plock_release, +* cl_plock_excl_acquire, and cl_plock_excl_release. +* +* SEE ALSO +* Passive Lock, cl_plock_construct, cl_plock_destroy, +* cl_plock_excl_acquire, cl_plock_excl_release, +* cl_plock_acquire, cl_plock_release +*********/ + + +/****f* Component Library: Passive Lock/cl_plock_acquire +* NAME +* cl_plock_acquire +* +* DESCRIPTION +* The cl_plock_acquire function acquires a passive lock for +* shared access. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_plock_acquire( + IN cl_plock_t* const p_lock ) +{ + cl_status_t status; + + CL_ASSERT( p_lock ); + + status = + cl_event_wait_on( &p_lock->reader_event, EVENT_NO_TIMEOUT, FALSE ); + CL_ASSERT( status == CL_SUCCESS ); + + /* + * Increment the reader count to block a thread trying for exclusive + * access. + */ + cl_atomic_inc( &p_lock->reader_count ); +#ifdef DBG_PASSIVE_LOCKS + cl_dbg_out( "cl_plock_acquire: ReaderCount = %u\n", + p_lock->reader_count ); +#endif + /* + * Release the reader event to satisfy the wait of another reader + * or a writer. + */ + cl_event_signal( &p_lock->reader_event ); +} +/* +* PARAMETERS +* p_lock +* [in] Pointer to a cl_plock_t structure to acquire. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Passive Lock, cl_plock_release, cl_plock_excl_acquire +*********/ + + +/****f* Component Library: Passive Lock/cl_plock_excl_acquire +* NAME +* cl_plock_excl_acquire +* +* DESCRIPTION +* The cl_plock_excl_acquire function acquires exclusive access +* to a passive lock. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_plock_excl_acquire( + IN cl_plock_t* const p_lock ) +{ + cl_status_t status; + + CL_ASSERT( p_lock ); + + /* Acquire the reader event. This will block new readers. */ + status = + cl_event_wait_on( &p_lock->reader_event, EVENT_NO_TIMEOUT, FALSE ); + CL_ASSERT( status == CL_SUCCESS ); + + /* Wait for the writer event until all readers have exited. */ + while( p_lock->reader_count ) + { +#ifdef DBG_PASSIVE_LOCKS + cl_dbg_out( "cl_plock_excl_acquire: ReaderCount = %u\n", + p_lock->reader_count ); +#endif + status = + cl_event_wait_on( &p_lock->writer_event, EVENT_NO_TIMEOUT, FALSE ); + CL_ASSERT( status == CL_SUCCESS ); + } + +#ifdef DBG_PASSIVE_LOCKS + cl_dbg_out( "cl_plock_excl_acquire: Exit\n" ); +#endif +} +/* +* PARAMETERS +* p_lock +* [in] Pointer to a cl_plock_t structure to acquire exclusively. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Passive Lock, cl_plock_release, cl_plock_acquire +*********/ + + +/****f* Component Library: Passive Lock/cl_plock_release +* NAME +* cl_plock_release +* +* DESCRIPTION +* The cl_plock_release function releases a passive lock from +* shared or exclusive access. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_plock_release( + IN cl_plock_t* const p_lock ) +{ + CL_ASSERT( p_lock ); + + if( p_lock->reader_count ) + { + + /* + * Decrement the count to allow a thread waiting for exclusive + * access to continue. + */ + cl_atomic_dec( &p_lock->reader_count ); + + #ifdef DBG_PASSIVE_LOCKS + cl_dbg_out( "cl_plock_release: ReaderCount = %u\n", + p_lock->reader_count ); + #endif + + /* Release a writer, if any. */ + cl_event_signal( &p_lock->writer_event ); + } + else + { + /* Release threads waiting to acquire the lock. */ + cl_event_signal( &p_lock->reader_event ); + cl_event_signal( &p_lock->writer_event ); + + #ifdef DBG_PASSIVE_LOCKS + cl_dbg_out( "cl_plock_release: Exit\n" ); + #endif + } +} +/* +* PARAMETERS +* p_lock +* [in] Pointer to a cl_plock_t structure to release. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Passive Lock, cl_plock_acquire, cl_plock_excl_acquire +*********/ + + +#endif /* _CL_PASSIVE_LOCK_H_ */ diff --git a/branches/Ndi/inc/complib/cl_perf.h b/branches/Ndi/inc/complib/cl_perf.h new file mode 100644 index 00000000..58c4d634 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_perf.h @@ -0,0 +1,807 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of performance tracking. + * + * Environment: + * All + */ + + +#ifndef _CL_PERF_H_ +#define _CL_PERF_H_ + + +#include +#include +#include + + +/****h* Component Library/Performance Counters +* NAME +* Performance Counters +* +* DESCRIPTION +* The performance counters allows timing operations to benchmark +* software performance and help identify potential bottlenecks. +* +* All performance counters are NULL macros when disabled, preventing them +* from adversly affecting performance in builds where the counters are not +* used. +* +* Each counter records elapsed time in micro-seconds, minimum time elapsed, +* and total number of samples. +* +* Each counter is independently protected by a spinlock, allowing use of +* the counters in multi-processor environments. +* +* The impact of serializing access to performance counters is measured, +* allowing measurements to be corrected as necessary. +* +* NOTES +* Performance counters do impact performance, and should only be enabled +* when gathering data. Counters can be enabled or disabled on a per-user +* basis at compile time. To enable the counters, users should define +* the PERF_TRACK_ON keyword before including the cl_perf.h file. +* Undefining the PERF_TRACK_ON keyword disables the performance counters. +* When disabled, all performance tracking calls resolve to no-ops. +* +* When using performance counters, it is the user's responsibility to +* maintain the counter indexes. It is recomended that users define an +* enumerated type to use for counter indexes. It improves readability +* and simplifies maintenance by reducing the work necessary in managing +* the counter indexes. +* +* SEE ALSO +* Structures: +* cl_perf_t +* +* Initialization: +* cl_perf_construct, cl_perf_init, cl_perf_destroy +* +* Manipulation +* cl_perf_reset, cl_perf_display, cl_perf_start, cl_perf_update, +* cl_perf_log, cl_perf_stop +* +* Macros: +* PERF_DECLARE, PERF_DECLARE_START +*********/ + + +/* + * Number of times the counter calibration test is executed. This is used + * to determine the average time to use a performance counter. + */ +#define PERF_CALIBRATION_TESTS 100000 + + +/****i* Component Library: Performance Counters/cl_perf_data_t +* NAME +* cl_perf_data_t +* +* DESCRIPTION +* The cl_perf_data_t structure is used to tracking information +* for a single counter. +* +* SYNOPSIS +*/ +typedef struct _cl_perf_data +{ + uint64_t count; + uint64_t total_time; + uint64_t min_time; + cl_spinlock_t lock; + +} cl_perf_data_t; +/* +* FIELDS +* count +* Number of samples in the counter. +* +* total_time +* Total time for all samples, in microseconds. +* +* min_time +* Minimum time for any sample in the counter, in microseconds. +* +* lock +* Spinlock to serialize counter updates. +* +* SEE ALSO +* Performance Counters +*********/ + + +/****i* Component Library: Performance Counters/cl_perf_t +* NAME +* cl_perf_t +* +* DESCRIPTION +* The cl_perf_t structure serves as a container for a group of performance +* counters and related calibration data. +* +* This structure should be treated as opaque and be manipulated only through +* the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_perf +{ + cl_perf_data_t *data_array; + uintn_t size; + uint64_t locked_calibration_time; + uint64_t normal_calibration_time; + cl_state_t state; + +} cl_perf_t; +/* +* FIELDS +* data_array +* Pointer to the array of performance counters. +* +* size +* Number of counters in the counter array. +* +* locked_calibration_time +* Time needed to update counters while holding a spinlock. +* +* normal_calibration_time +* Time needed to update counters while not holding a spinlock. +* +* state +* State of the performance counter provider. +* +* SEE ALSO +* Performance Counters, cl_perf_data_t +*********/ + + +/****f* Component Library: Performance Counters/cl_perf_construct +* NAME +* cl_perf_construct +* +* DESCRIPTION +* The cl_perf_construct macro constructs a performance +* tracking container. +* +* SYNOPSIS +*/ +void +cl_perf_construct( + IN cl_perf_t* const p_perf ); +/* +* PARAMETERS +* p_perf +* [in] Pointer to a performance counter container to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_perf_construct allows calling cl_perf_destroy without first calling +* cl_perf_init. +* +* Calling cl_perf_construct is a prerequisite to calling any other +* perfromance counter function except cl_perf_init. +* +* This function is implemented as a macro and has no effect when +* performance counters are disabled. +* +* SEE ALSO +* Performance Counters, cl_perf_init, cl_perf_destroy +*********/ + + +/****f* Component Library: Performance Counters/cl_perf_init +* NAME +* cl_perf_init +* +* DESCRIPTION +* The cl_perf_init function initializes a performance counter container +* for use. +* +* SYNOPSIS +*/ +cl_status_t +cl_perf_init( + IN cl_perf_t* const p_perf, + IN const uintn_t num_counters ); +/* +* PARAMETERS +* p_perf +* [in] Pointer to a performance counter container to initalize. +* +* num_cntrs +* [in] Number of counters to allocate in the container. +* +* RETURN VALUES +* CL_SUCCESS if initialization was successful. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize +* the container. +* +* CL_ERROR if an error was encountered initializing the locks for the +* performance counters. +* +* NOTES +* This function allocates all memory required for the requested number of +* counters and initializes all locks protecting those counters. After a +* successful initialization, cl_perf_init calibrates the counters and +* resets their value. +* +* This function is implemented as a macro and has no effect when +* performance counters are disabled. +* +* SEE ALSO +* Performance Counters, cl_perf_construct, cl_perf_destroy, cl_perf_display +*********/ + + +/****f* Component Library: Performance Counters/cl_perf_destroy +* NAME +* cl_perf_destroy +* +* DESCRIPTION +* The cl_perf_destroy function destroys a performance tracking container. +* +* SYNOPSIS +*/ +void +cl_perf_destroy( + IN cl_perf_t* const p_perf, + IN const boolean_t display ); +/* +* PARAMETERS +* p_perf +* [in] Pointer to a performance counter container to destroy. +* +* display +* [in] If TRUE, causes the performance counters to be displayed. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_perf_destroy frees all resources allocated in a call to cl_perf_init. +* If the display parameter is set to TRUE, displays all counter values +* before deallocating resources. +* +* This function should only be called after a call to cl_perf_construct +* or cl_perf_init. +* +* This function is implemented as a macro and has no effect when +* performance counters are disabled. +* +* SEE ALSO +* Performance Counters, cl_perf_construct, cl_perf_init +*********/ + + +/****f* Component Library: Performance Counters/cl_perf_reset +* NAME +* cl_perf_reset +* +* DESCRIPTION +* The cl_perf_reset function resets the counters contained in +* a performance tracking container. +* +* SYNOPSIS +*/ +void +cl_perf_reset( + IN cl_perf_t* const p_perf ); +/* +* PARAMETERS +* p_perf +* [in] Pointer to a performance counter container whose counters +* to reset. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function is implemented as a macro and has no effect when +* performance counters are disabled. +* +* SEE ALSO +* Performance Counters +*********/ + + +/****f* Component Library: Performance Counters/cl_perf_display +* NAME +* cl_perf_display +* +* DESCRIPTION +* The cl_perf_display function displays the current performance +* counter values. +* +* SYNOPSIS +*/ +void +cl_perf_display( + IN const cl_perf_t* const p_perf ); +/* +* PARAMETERS +* p_perf +* [in] Pointer to a performance counter container whose counter +* values to display. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function is implemented as a macro and has no effect when +* performance counters are disabled. +* +* SEE ALSO +* Performance Counters, cl_perf_init +*********/ + + +/****d* Component Library: Performance Counters/PERF_DECLARE +* NAME +* PERF_DECLARE +* +* DESCRIPTION +* The PERF_DECLARE macro declares a performance counter variable used +* to store the starting time of a timing sequence. +* +* SYNOPSIS +* PERF_DECLARE( index ) +* +* PARAMETERS +* index +* [in] Index of the performance counter for which to use this +* variable. +* +* NOTES +* Variables should generally be declared on the stack to support +* multi-threading. In cases where a counter needs to be used to +* time operations accross multiple functions, care must be taken to +* ensure that the start time stored in this variable is not overwritten +* before the related performance counter has been updated. +* +* This macro has no effect when performance counters are disabled. +* +* SEE ALSO +* Performance Counters, PERF_DECLARE_START, cl_perf_start, cl_perf_log, +* cl_perf_stop +*********/ + + +/****d* Component Library: Performance Counters/PERF_DECLARE_START +* NAME +* PERF_DECLARE_START +* +* DESCRIPTION +* The PERF_DECLARE_START macro declares a performance counter variable +* and sets it to the starting time of a timed sequence. +* +* SYNOPSIS +* PERF_DECLARE_START( index ) +* +* PARAMETERS +* index +* [in] Index of the performance counter for which to use this +* variable. +* +* NOTES +* Variables should generally be declared on the stack to support +* multi-threading. +* +* This macro has no effect when performance counters are disabled. +* +* SEE ALSO +* Performance Counters, PERF_DECLARE, cl_perf_start, cl_perf_log, +* cl_perf_stop +*********/ + + +/****d* Component Library: Performance Counters/cl_perf_start +* NAME +* cl_perf_start +* +* DESCRIPTION +* The cl_perf_start macro sets the starting value of a timed sequence. +* +* SYNOPSIS +*/ +void +cl_perf_start( + IN const uintn_t index ); +/* +* PARAMETERS +* index +* [in] Index of the performance counter to set. +* +* NOTES +* This macro has no effect when performance counters are disabled. +* +* SEE ALSO +* Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_log, +* cl_perf_update, cl_perf_stop +*********/ + + +/****d* Component Library: Performance Counters/cl_perf_clr +* NAME +* cl_perf_clr +* +* DESCRIPTION +* The cl_perf_clr macro clears a counter variable. +* +* SYNOPSIS +*/ +void +cl_perf_inc( + IN const uintn_t index ); +/* +* PARAMETERS +* index +* [in] Index of the performance counter to set. +* +* NOTES +* This macro has no effect when performance counters are disabled. +* +* SEE ALSO +* Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_log, +* cl_perf_update, cl_perf_stop +*********/ + + +/****d* Component Library: Performance Counters/cl_perf_inc +* NAME +* cl_perf_inc +* +* DESCRIPTION +* The cl_perf_inc macro increments a counter variable by one. +* +* SYNOPSIS +*/ +void +cl_perf_inc( + IN const uintn_t index ); +/* +* PARAMETERS +* index +* [in] Index of the performance counter to set. +* +* NOTES +* This macro has no effect when performance counters are disabled. +* +* SEE ALSO +* Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_log, +* cl_perf_update, cl_perf_stop +*********/ + + +/****d* Component Library: Performance Counters/cl_perf_update +* NAME +* cl_perf_update +* +* DESCRIPTION +* The cl_perf_update macro adds a timing sample based on a provided start +* time to a counter in a performance counter container. +* +* SYNOPSIS +*/ +void +cl_perf_update( + IN cl_perf_t* const p_perf, + IN const uintn_t index, + IN const uint64_t start_time ); +/* +* PARAMETERS +* p_perf +* [in] Pointer to a performance counter container to whose counter +* the sample should be added. +* +* index +* [in] Number of the performance counter to update with a new sample. +* +* start_time +* [in] Timestamp to use as the start time for the timing sample. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This macro has no effect when performance counters are disabled. +* +* SEE ALSO +* Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_start, +* cl_perf_lob, cl_perf_stop +*********/ + + +/****d* Component Library: Performance Counters/cl_perf_update_ctr +* NAME +* cl_perf_update_ctr +* +* DESCRIPTION +* The cl_perf_update_ctr macro updates a counter in a performance +* counter container. +* +* SYNOPSIS +*/ +void +cl_perf_update_ctr( + IN cl_perf_t* const p_perf, + IN const uintn_t index ); +/* +* PARAMETERS +* p_perf +* [in] Pointer to a performance counter container to whose counter +* the sample should be added. +* +* index +* [in] Number of the performance counter to update with a new sample. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This macro has no effect when performance counters are disabled. +* +* SEE ALSO +* Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_start, +* cl_perf_lob, cl_perf_stop +*********/ + + +/****d* Component Library: Performance Counters/cl_perf_log +* NAME +* cl_perf_log +* +* DESCRIPTION +* The cl_perf_log macro adds a given timing sample to a +* counter in a performance counter container. +* +* SYNOPSIS +*/ +void +cl_perf_log( + IN cl_perf_t* const p_perf, + IN const uintn_t index, + IN const uint64_t pc_total_time ); +/* +* PARAMETERS +* p_perf +* [in] Pointer to a performance counter container to whose counter +* the sample should be added. +* +* index +* [in] Number of the performance counter to update with a new sample. +* +* pc_total_time +* [in] Total elapsed time for the sample being added. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This macro has no effect when performance counters are disabled. +* +* SEE ALSO +* Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_start, +* cl_perf_update, cl_perf_stop +*********/ + + +/****d* Component Library: Performance Counters/cl_perf_stop +* NAME +* cl_perf_stop +* +* DESCRIPTION +* The cl_perf_log macro updates a counter in a performance counter +* container with a new timing sample. +* +* SYNOPSIS +*/ +void +cl_perf_stop( + IN cl_perf_t* const p_perf, + IN const uintn_t index ); +/* +* PARAMETERS +* p_perf +* [in] Pointer to a performance counter container to whose counter +* a sample should be added. +* +* index +* [in] Number of the performance counter to update with a new sample. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* The ending time stamp is taken and elapsed time calculated before updating +* the specified counter. +* +* This macro has no effect when performance counters are disabled. +* +* SEE ALSO +* Performance Counters, PERF_DECLARE, PERF_DECLARE_START, cl_perf_start, +* cl_perf_log +*********/ + + +/* + * PERF_TRACK_ON must be defined by the user before including this file to + * enable performance tracking. To disable tracking, users should undefine + * PERF_TRACK_ON. + */ +#if defined( PERF_TRACK_ON ) +/* + * Enable performance tracking. + */ + +#define cl_perf_construct( p_perf ) \ + __cl_perf_construct( p_perf ) +#define cl_perf_init( p_perf, num_counters ) \ + __cl_perf_init( p_perf, num_counters ) +#define cl_perf_destroy( p_perf, display ) \ + __cl_perf_destroy( p_perf, display ) +#define cl_perf_reset( p_perf ) \ + __cl_perf_reset( p_perf ) +#define cl_perf_display( p_perf ) \ + __cl_perf_display( p_perf ) +#define PERF_DECLARE( index ) \ + uint64_t Pc##index +#define PERF_DECLARE_START( index ) \ + uint64 Pc##index = cl_get_time_stamp() +#define cl_perf_start( index ) \ + (Pc##index = cl_get_time_stamp()) +#define cl_perf_clr( index ) \ + (Pc##index = 0) +#define cl_perf_inc( index ) \ + (Pc##index++) +#define cl_perf_log( p_perf, index, pc_total_time ) \ +{\ + /* Update the performance data. This requires synchronization. */ \ + cl_spinlock_acquire( &((cl_perf_t*)p_perf)->data_array[index].lock ); \ + \ + ((cl_perf_t*)p_perf)->data_array[index].total_time += pc_total_time; \ + ((cl_perf_t*)p_perf)->data_array[index].count++; \ + if( pc_total_time < ((cl_perf_t*)p_perf)->data_array[index].min_time ) \ + ((cl_perf_t*)p_perf)->data_array[index].min_time = pc_total_time; \ + \ + cl_spinlock_release( &((cl_perf_t*)p_perf)->data_array[index].lock ); \ +} +#define cl_perf_update( p_perf, index, start_time ) \ +{\ + /* Get the ending time stamp, and calculate the total time. */ \ + uint64_t pc_total_time = cl_get_time_stamp() - start_time;\ + /* Using stack variable for start time, stop and log */ \ + cl_perf_log( p_perf, index, pc_total_time ); \ +} +#define cl_perf_update_ctr( p_perf, index ) \ + cl_perf_log( p_perf, index, Pc##index ) +#define cl_perf_stop( p_perf, index ) \ +{\ + cl_perf_update( p_perf, index, Pc##index );\ +} + +#define cl_get_perf_values( p_perf, index, p_total, p_min, p_count ) \ +{\ + *p_total = p_perf->data_array[index].total_time; \ + *p_min = p_perf->data_array[index].min_time; \ + *p_count = p_perf->data_array[index].count; \ +} + +#define cl_get_perf_calibration( p_perf, p_locked_time, p_normal_time ) \ +{\ + *p_locked_time = p_perf->locked_calibration_time; \ + *p_normal_time = p_perf->normal_calibration_time; \ +} + +#define cl_get_perf_string( p_perf, i ) \ +"CL Perf:\t%lu\t%"PRIu64"\t%"PRIu64"\t%"PRIu64"\n", \ + i, p_perf->data_array[i].total_time, \ + p_perf->data_array[i].min_time, p_perf->data_array[i].count + +#else /* PERF_TRACK_ON */ +/* + * Disable performance tracking. + */ + +#define cl_perf_construct( p_perf ) +#define cl_perf_init( p_perf, num_cntrs ) CL_SUCCESS +#define cl_perf_destroy( p_perf, display ) +#define cl_perf_reset( p_perf ) +#define cl_perf_display( p_perf ) +#define PERF_DECLARE( index ) +#define PERF_DECLARE_START( index ) +#define cl_perf_start( index ) +#define cl_perf_clr( index ) +#define cl_perf_inc( index ) +#define cl_perf_log( p_perf, index, pc_total_time ) +#define cl_perf_update( p_perf, index, start_time ) +#define cl_perf_update_ctr( p_perf, index ) +#define cl_perf_stop( p_perf, index ) +#define cl_get_perf_values( p_perf, index, p_total, p_min, p_count ) +#define cl_get_perf_calibration( p_perf, p_locked_time, p_normal_time ) +#endif /* PERF_TRACK_ON */ + + +/* + * Internal performance tracking functions. Users should never call these + * functions directly. Instead, use the macros defined above to resolve + * to these functions when PERF_TRACK_ON is defined, which allows disabling + * performance tracking. + */ + + +/* + * Initialize the state of the performance tracking structure. + */ +CL_EXPORT void CL_API +__cl_perf_construct( + IN cl_perf_t* const p_perf ); + +/* + * Size the performance tracking information and initialize all + * related structures. + */ +CL_EXPORT cl_status_t CL_API +__cl_perf_init( + IN cl_perf_t* const p_perf, + IN const uintn_t num_counters ); + +/* + * Destroy the performance tracking data. + */ +CL_EXPORT void CL_API +__cl_perf_destroy( + IN cl_perf_t* const p_perf, + IN const boolean_t display ); + +/* + * Reset the performance tracking data. + */ +CL_EXPORT void CL_API +__cl_perf_reset( + IN cl_perf_t* const p_perf ); + +/* + * Display the current performance tracking data. + */ +CL_EXPORT void CL_API +__cl_perf_display( + IN const cl_perf_t* const p_perf ); + + +#endif /* _CL_PERF_H_ */ diff --git a/branches/Ndi/inc/complib/cl_pool.h b/branches/Ndi/inc/complib/cl_pool.h new file mode 100644 index 00000000..df09b500 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_pool.h @@ -0,0 +1,594 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of the pool. + * The pool manages a pool of objects. + * The pool can grow to meet demand, limited only by system memory. + * + * Environment: + * All + */ + + +#ifndef _CL_POOL_H_ +#define _CL_POOL_H_ + + +#include + + +/****h* Component Library/Pool +* NAME +* Pool +* +* DESCRIPTION +* The pool provides a self-contained and self-sustaining pool +* of user defined objects. +* +* To aid in object oriented design, the pool provides the user +* the ability to specify callbacks that are invoked for each object for +* construction, initialization, and destruction. Constructor and destructor +* callback functions may not fail. +* +* A pool does not return memory to the system as the user returns +* objects to the pool. The only method of returning memory to the system is +* to destroy the pool. +* +* The Pool functions operate on a cl_pool_t structure which should be treated +* as opaque and should be manipulated only through the provided functions. +* +* SEE ALSO +* Structures: +* cl_pool_t +* +* Callbacks: +* cl_pfn_pool_init_t, cl_pfn_pool_dtor_t +* +* Initialization/Destruction: +* cl_pool_construct, cl_pool_init, cl_pool_destroy +* +* Manipulation: +* cl_pool_get, cl_pool_put, cl_pool_grow +* +* Attributes: +* cl_is_pool_inited, cl_pool_count +*********/ + + +/****d* Component Library: Pool/cl_pfn_pool_init_t +* NAME +* cl_pfn_pool_init_t +* +* DESCRIPTION +* The cl_pfn_pool_init_t function type defines the prototype for +* functions used as initializers for objects being allocated by a +* pool. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_pool_init_t)( + IN void* const p_object, + IN void* context ); +/* +* PARAMETERS +* p_object +* [in] Pointer to an object to initialize. +* +* context +* [in] Context provided in a call to cl_pool_init. +* +* RETURN VALUES +* Return CL_SUCCESS to indicates that initialization of the object +* was successful and initialization of further objects may continue. +* +* Other cl_status_t values will be returned by cl_pool_init +* and cl_pool_grow. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by the user as an optional parameter to the +* cl_pool_init function. +* +* The initializer is invoked once per allocated object, allowing the user +* to trap initialization failures. Returning a status other than CL_SUCCESS +* aborts a grow operation, initiated either through cl_pool_init or +* cl_pool_grow, and causes the initiating function to fail. +* Any non-CL_SUCCESS status will be returned by the function that initiated +* the grow operation. +* +* SEE ALSO +* Pool, cl_pool_init, cl_pool_grow +*********/ + + +/****d* Component Library: Pool/cl_pfn_pool_dtor_t +* NAME +* cl_pfn_pool_dtor_t +* +* DESCRIPTION +* The cl_pfn_pool_dtor_t function type defines the prototype for +* functions used as destructor for objects being deallocated by a +* pool. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_pool_dtor_t)( + IN void* const p_object, + IN void* context ); +/* +* PARAMETERS +* p_object +* [in] Pointer to an object to destruct. +* +* context +* [in] Context provided in the call to cl_pool_init. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by the user as an optional parameter to the +* cl_pool_init function. +* +* The destructor is invoked once per allocated object, allowing the user +* to perform any necessary cleanup. Users should not attempt to deallocate +* the memory for the object, as the pool manages object +* allocation and deallocation. +* +* SEE ALSO +* Pool, cl_pool_init +*********/ + + +/****s* Component Library: Pool/cl_pool_t +* NAME +* cl_pool_t +* +* DESCRIPTION +* pool structure. +* +* The cl_pool_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_pool +{ + cl_qcpool_t qcpool; + cl_pfn_pool_init_t pfn_init; + cl_pfn_pool_dtor_t pfn_dtor; + const void *context; + +} cl_pool_t; +/* +* FIELDS +* qcpool +* Quick composite pool that manages all objects. +* +* pfn_init +* Pointer to the user's initializer callback, used by the pool +* to translate the quick composite pool's initializer callback to +* a pool initializer callback. +* +* pfn_dtor +* Pointer to the user's destructor callback, used by the pool +* to translate the quick composite pool's destructor callback to +* a pool destructor callback. +* +* context +* User's provided context for callback functions, used by the pool +* to when invoking callbacks. +* +* SEE ALSO +* Pool +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +/****f* Component Library: Pool/cl_pool_construct +* NAME +* cl_pool_construct +* +* DESCRIPTION +* The cl_pool_construct function constructs a pool. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_pool_construct( + IN cl_pool_t* const p_pool ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_pool_t structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_pool_init, cl_pool_destroy, and cl_is_pool_inited. +* +* Calling cl_pool_construct is a prerequisite to calling any other +* pool function except cl_pool_init. +* +* SEE ALSO +* Pool, cl_pool_init, cl_pool_destroy, cl_is_pool_inited +*********/ + + +/****f* Component Library: Pool/cl_is_pool_inited +* NAME +* cl_is_pool_inited +* +* DESCRIPTION +* The cl_is_pool_inited function returns whether a pool was successfully +* initialized. +* +* SYNOPSIS +*/ +CL_INLINE uint32_t CL_API +cl_is_pool_inited( + IN const cl_pool_t* const p_pool ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_pool ); + return( cl_is_qcpool_inited( &p_pool->qcpool ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_pool_t structure whose initialization state +* to check. +* +* RETURN VALUES +* TRUE if the pool was initialized successfully. +* +* FALSE otherwise. +* +* NOTES +* Allows checking the state of a pool to determine if invoking member +* functions is appropriate. +* +* SEE ALSO +* Pool +*********/ + + +/****f* Component Library: Pool/cl_pool_init +* NAME +* cl_pool_init +* +* DESCRIPTION +* The cl_pool_init function initializes a pool for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_pool_init( + IN cl_pool_t* const p_pool, + IN const size_t min_count, + IN const size_t max_count, + IN const size_t grow_size, + IN const size_t object_size, + IN cl_pfn_pool_init_t pfn_initializer OPTIONAL, + IN cl_pfn_pool_dtor_t pfn_destructor OPTIONAL, + IN const void* const context ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_pool_t structure to initialize. +* +* min_count +* [in] Minimum number of objects that the pool should support. All +* necessary allocations to allow storing the minimum number of items +* are performed at initialization time, and all necessary callbacks +* invoked. +* +* max_count +* [in] Maximum number of objects to which the pool is allowed to grow. +* A value of zero specifies no maximum. +* +* grow_size +* [in] Number of objects to allocate when incrementally growing the pool. +* A value of zero disables automatic growth. +* +* object_size +* [in] Size, in bytes, of each object. +* +* pfn_initializer +* [in] Initialization callback to invoke for every new object when +* growing the pool. This parameter is optional and may be NULL. +* See the cl_pfn_pool_init_t function type declaration for details +* about the callback function. +* +* pfn_destructor +* [in] Destructor callback to invoke for every object before memory for +* that object is freed. This parameter is optional and may be NULL. +* See the cl_pfn_pool_dtor_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUES +* CL_SUCCESS if the pool was initialized successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the +* pool. +* +* CL_INVALID_SETTING if a the maximum size is non-zero and less than the +* minimum size. +* +* Other cl_status_t value returned by optional initialization callback function +* specified by the pfn_initializer parameter. +* +* NOTES +* cl_pool_init initializes, and if necessary, grows the pool to +* the capacity desired. +* +* SEE ALSO +* Pool, cl_pool_construct, cl_pool_destroy, +* cl_pool_get, cl_pool_put, cl_pool_grow, +* cl_pool_count, cl_pfn_pool_init_t, cl_pfn_pool_dtor_t +*********/ + + +/****f* Component Library: Pool/cl_pool_destroy +* NAME +* cl_pool_destroy +* +* DESCRIPTION +* The cl_pool_destroy function destroys a pool. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_pool_destroy( + IN cl_pool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + cl_qcpool_destroy( &p_pool->qcpool ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_pool_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* All memory allocated for objects is freed. The destructor callback, +* if any, will be invoked for every allocated object. Further operations +* on the pool should not be attempted after cl_pool_destroy +* is invoked. +* +* This function should only be called after a call to +* cl_pool_construct or cl_pool_init. +* +* In a debug build, cl_pool_destroy asserts that all objects are in +* the pool. +* +* SEE ALSO +* Pool, cl_pool_construct, cl_pool_init +*********/ + + +/****f* Component Library: Pool/cl_pool_count +* NAME +* cl_pool_count +* +* DESCRIPTION +* The cl_pool_count function returns the number of available objects +* in a pool. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_pool_count( + IN cl_pool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + return( cl_qcpool_count( &p_pool->qcpool ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_pool_t structure for which the number of +* available objects is requested. +* +* RETURN VALUE +* Returns the number of objects available in the specified pool. +* +* SEE ALSO +* Pool +*********/ + + +/****f* Component Library: Pool/cl_pool_get +* NAME +* cl_pool_get +* +* DESCRIPTION +* The cl_pool_get function retrieves an object from a pool. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_pool_get( + IN cl_pool_t* const p_pool ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_pool ); + + p_pool_obj = (cl_pool_obj_t*)cl_qcpool_get( &p_pool->qcpool ); + if( !p_pool_obj ) + return( NULL ); + + CL_ASSERT( p_pool_obj->list_obj.p_object ); + return( (void*)p_pool_obj->list_obj.p_object ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_pool_t structure from which to retrieve +* an object. +* +* RETURN VALUES +* Returns a pointer to an object. +* +* Returns NULL if the pool is empty and can not be grown automatically. +* +* NOTES +* cl_pool_get returns the object at the head of the pool. If the pool is +* empty, it is automatically grown to accommodate this request unless the +* grow_size parameter passed to the cl_pool_init function was zero. +* +* SEE ALSO +* Pool, cl_pool_get_tail, cl_pool_put, cl_pool_grow, cl_pool_count +*********/ + + +/****f* Component Library: Pool/cl_pool_put +* NAME +* cl_pool_put +* +* DESCRIPTION +* The cl_pool_put function returns an object to a pool. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_pool_put( + IN cl_pool_t* const p_pool, + IN void* const p_object ) +{ + cl_pool_obj_t *p_pool_obj; + + CL_ASSERT( p_pool ); + CL_ASSERT( p_object ); + + /* Calculate the offset to the list object representing this object. */ + p_pool_obj = (cl_pool_obj_t*) + (((uint8_t*)p_object) - sizeof(cl_pool_obj_t)); + + /* good sanity check */ + CL_ASSERT( p_pool_obj->list_obj.p_object == p_object ); + + cl_qcpool_put( &p_pool->qcpool, (cl_pool_item_t*)p_pool_obj ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_pool_t structure to which to return +* an object. +* +* p_object +* [in] Pointer to an object to return to the pool. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_pool_put places the returned object at the head of the pool. +* +* The object specified by the p_object parameter must have been +* retrieved from the pool by a previous call to cl_pool_get. +* +* SEE ALSO +* Pool, cl_pool_put_tail, cl_pool_get +*********/ + + +/****f* Component Library: Pool/cl_pool_grow +* NAME +* cl_pool_grow +* +* DESCRIPTION +* The cl_pool_grow function grows a pool by +* the specified number of objects. +* +* SYNOPSIS +*/ +CL_INLINE cl_status_t CL_API +cl_pool_grow( + IN cl_pool_t* const p_pool, + IN const size_t obj_count ) +{ + CL_ASSERT( p_pool ); + return( cl_qcpool_grow( &p_pool->qcpool, obj_count ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_pool_t structure whose capacity to grow. +* +* obj_count +* [in] Number of objects by which to grow the pool. +* +* RETURN VALUES +* CL_SUCCESS if the pool grew successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to grow the +* pool. +* +* cl_status_t value returned by optional initialization callback function +* specified by the pfn_initializer parameter passed to the +* cl_pool_init function. +* +* NOTES +* It is not necessary to call cl_pool_grow if the pool is +* configured to grow automatically. +* +* SEE ALSO +* Pool +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + + +#endif /* _CL_POOL_H_ */ diff --git a/branches/Ndi/inc/complib/cl_ptr_vector.h b/branches/Ndi/inc/complib/cl_ptr_vector.h new file mode 100644 index 00000000..bfba4f73 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_ptr_vector.h @@ -0,0 +1,878 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * This file contains pointer vector definitions. Pointer Vector provides + * dynmically resizable array functionality. + * + * Environment: + * All + */ + + +#ifndef _CL_PTR_VECTOR_H_ +#define _CL_PTR_VECTOR_H_ + + +#include + + +/****h* Component Library/Pointer Vector +* NAME +* Pointer Vector +* +* DESCRIPTION +* The Pointer Vector is a self-sizing array of pointers. Like a traditonal +* array, a pointer vector allows efficient constant time access to elements +* with a specified index. A pointer vector grows transparently as the +* user adds elements to the array. +* +* The cl_pointer vector_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SEE ALSO +* Structures: +* cl_ptr_vector_t +* +* Callbacks: +* cl_pfn_ptr_vec_apply_t, cl_pfn_ptr_vec_find_t +* +* Item Manipulation: +* cl_ptr_vector_set_obj, cl_ptr_vector_obj +* +* Initialization: +* cl_ptr_vector_construct, cl_ptr_vector_init, cl_ptr_vector_destroy +* +* Manipulation: +* cl_ptr_vector_get_capacity, cl_ptr_vector_set_capacity, +* cl_ptr_vector_get_size, cl_ptr_vector_set_size, cl_ptr_vector_set_min_size +* cl_ptr_vector_get_ptr, cl_ptr_vector_get, cl_ptr_vector_at, cl_ptr_vector_set +* +* Search: +* cl_ptr_vector_find_from_start, cl_ptr_vector_find_from_end +* cl_ptr_vector_apply_func +*********/ + + +/****d* Component Library: Pointer Vector/cl_pfn_ptr_vec_apply_t +* NAME +* cl_pfn_ptr_vec_apply_t +* +* DESCRIPTION +* The cl_pfn_ptr_vec_apply_t function type defines the prototype for +* functions used to iterate elements in a pointer vector. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_ptr_vec_apply_t)( + IN const size_t index, + IN void* const element, + IN void* context ); +/* +* PARAMETERS +* index +* [in] Index of the element. +* +* p_element +* [in] Pointer to an element at the specified index in the pointer vector. +* +* context +* [in] Context provided in a call to cl_ptr_vector_apply_func. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for +* the function passed by users as a parameter to the cl_ptr_vector_apply_func +* function. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_apply_func +*********/ + + +/****d* Component Library: Pointer Vector/cl_pfn_ptr_vec_find_t +* NAME +* cl_pfn_ptr_vec_find_t +* +* DESCRIPTION +* The cl_pfn_ptr_vec_find_t function type defines the prototype for +* functions used to find elements in a pointer vector. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_ptr_vec_find_t)( + IN const size_t index, + IN const void* const element, + IN void* context ); +/* +* PARAMETERS +* index +* [in] Index of the element. +* +* p_element +* [in] Pointer to an element at the specified index in the +* pointer vector. +* +* context +* [in] Context provided in a call to cl_ptr_vector_find_from_start or +* cl_ptr_vector_find_from_end. +* +* RETURN VALUES +* Return CL_SUCCESS if the element was found. This stops pointer vector +* iteration. +* +* CL_NOT_FOUND to continue the pointer vector iteration. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the +* cl_ptr_vector_find_from_start and cl_ptr_vector_find_from_end functions. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_find_from_start, cl_ptr_vector_find_from_end +*********/ + + +/****s* Component Library: Pointer Vector/cl_ptr_vector_t +* NAME +* cl_ptr_vector_t +* +* DESCRIPTION +* Pointer Vector structure. +* +* The cl_ptr_vector_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_ptr_vector +{ + size_t size; + size_t grow_size; + size_t capacity; + const void **p_ptr_array; + cl_state_t state; + +} cl_ptr_vector_t; +/* +* FIELDS +* size +* Number of elements successfully initialized in the pointer vector. +* +* grow_size +* Number of elements to allocate when growing. +* +* capacity +* total # of elements allocated. +* +* alloc_list +* List of allocations. +* +* p_ptr_array +* Internal array of pointers to elements. +* +* state +* State of the pointer vector. +* +* SEE ALSO +* Pointer Vector +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_construct +* NAME +* cl_ptr_vector_construct +* +* DESCRIPTION +* The cl_ptr_vector_construct function constructs a pointer vector. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_ptr_vector_construct( + IN cl_ptr_vector_t* const p_vector ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_ptr_vector_destroy without first calling +* cl_ptr_vector_init. +* +* Calling cl_ptr_vector_construct is a prerequisite to calling any other +* pointer vector function except cl_ptr_vector_init. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_init, cl_ptr_vector_destroy +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_init +* NAME +* cl_ptr_vector_init +* +* DESCRIPTION +* The cl_ptr_vector_init function initializes a pointer vector for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_ptr_vector_init( + IN cl_ptr_vector_t* const p_vector, + IN const size_t min_cap, + IN const size_t grow_size ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure to inititalize. +* +* min_cap +* [in] Initial number of elements the vector will support. +* The vector is always initialized with a size of zero. +* +* grow_size +* [in] Number of elements to allocate when incrementally growing +* the pointer vector. A value of zero disables automatic growth. +* +* RETURN VALUES +* CL_SUCCESS if the pointer vector was initialized successfully. +* +* CL_INSUFFICIENT_MEMORY if the initialization failed. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_construct, cl_ptr_vector_destroy, +* cl_ptr_vector_set, cl_ptr_vector_get, cl_ptr_vector_at +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_destroy +* NAME +* cl_ptr_vector_destroy +* +* DESCRIPTION +* The cl_ptr_vector_destroy function destroys a pointer vector. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_ptr_vector_destroy( + IN cl_ptr_vector_t* const p_vector ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_ptr_vector_destroy frees all memory allocated for the pointer vector. +* +* This function should only be called after a call to cl_ptr_vector_construct +* or cl_ptr_vector_init. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_construct, cl_ptr_vector_init +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_get_capacity +* NAME +* cl_ptr_vector_get_capacity +* +* DESCRIPTION +* The cl_ptr_vector_get_capacity function returns the capacity of +* a pointer vector. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_ptr_vector_get_capacity( + IN const cl_ptr_vector_t* const p_vector ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + return( p_vector->capacity ); +} +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure whose capacity to return. +* +* RETURN VALUE +* Capacity, in elements, of the pointer vector. +* +* NOTES +* The capacity is the number of elements that the pointer vector can store, +* and can be greater than the number of elements stored. To get the number +* of elements stored in the pointer vector, use cl_ptr_vector_get_size. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_set_capacity, cl_ptr_vector_get_size +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_get_size +* NAME +* cl_ptr_vector_get_size +* +* DESCRIPTION +* The cl_ptr_vector_get_size function returns the size of a pointer vector. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_ptr_vector_get_size( + IN const cl_ptr_vector_t* const p_vector ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_UNINITIALIZED || + p_vector->state == CL_INITIALIZED ); + + return( p_vector->size ); +} +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure whose size to return. +* +* RETURN VALUE +* Size, in elements, of the pointer vector. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_set_size, cl_ptr_vector_get_capacity +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_get +* NAME +* cl_ptr_vector_get +* +* DESCRIPTION +* The cl_ptr_vector_get function returns the pointer stored in a +* pointer vector at a specified index. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_ptr_vector_get( + IN const cl_ptr_vector_t* const p_vector, + IN const size_t index ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( p_vector->size > index ); + + return( (void*)p_vector->p_ptr_array[index] ); +} +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure from which to get an +* element. +* +* index +* [in] Index of the element. +* +* RETURN VALUE +* Value of the pointer stored at the specified index. +* +* NOTES +* cl_ptr_vector_get provides constant access times regardless of the index. +* +* cl_ptr_vector_get does not perform boundary checking. Callers are +* responsible for providing an index that is within the range of the pointer +* vector. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_at, cl_ptr_vector_set, cl_ptr_vector_get_size +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_at +* NAME +* cl_ptr_vector_at +* +* DESCRIPTION +* The cl_ptr_vector_at function copies an element stored in a pointer +* vector at a specified index, performing boundary checks. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_ptr_vector_at( + IN const cl_ptr_vector_t* const p_vector, + IN const size_t index, + OUT void** const p_element ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure from which to get a copy of +* an element. +* +* index +* [in] Index of the element. +* +* p_element +* [out] Pointer to storage for the pointer element. Contains a copy of +* the desired pointer upon successful completion of the call. +* +* RETURN VALUES +* CL_SUCCESS if an element was found at the specified index. +* +* CL_INVALID_SETTING if the index was out of range. +* +* NOTES +* cl_ptr_vector_at provides constant time access regardless of +* the index, and performs boundary checking on the pointer vector. +* +* Upon success, the p_element parameter contains a copy of the +* desired element. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_get +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_set +* NAME +* cl_ptr_vector_set +* +* DESCRIPTION +* The cl_ptr_vector_set function sets the element at the specified index. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_ptr_vector_set( + IN cl_ptr_vector_t* const p_vector, + IN const size_t index, + IN const void* const element ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure into which to store +* an element. +* +* index +* [in] Index of the element. +* +* element +* [in] Pointer to store in the pointer vector. +* +* RETURN VALUES +* CL_SUCCESS if the element was successfully set. +* +* CL_INSUFFICIENT_MEMORY if the pointer vector could not be resized to +* accommodate the new element. +* +* NOTES +* cl_ptr_vector_set grows the pointer vector as needed to accommodate +* the new element, unless the grow_size parameter passed into the +* cl_ptr_vector_init function was zero. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_get +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_insert +* NAME +* cl_ptr_vector_insert +* +* DESCRIPTION +* The cl_ptr_vector_insert function inserts an element into a pointer vector. +* +* SYNOPSIS +*/ +CL_INLINE cl_status_t CL_API +cl_ptr_vector_insert( + IN cl_ptr_vector_t* const p_vector, + IN const void* const element, + OUT size_t* const p_index OPTIONAL ) +{ + cl_status_t status; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + status = cl_ptr_vector_set( p_vector, p_vector->size, element ); + if( status == CL_SUCCESS && p_index ) + *p_index = p_vector->size - 1; + + return( status ); +} +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure into which to store +* an element. +* +* element +* [in] Pointer to store in the pointer vector. +* +* p_index +* [out] Pointer to the index of the element. Valid only if +* insertion was successful. +* +* RETURN VALUES +* CL_SUCCESS if the element was successfully inserted. +* +* CL_INSUFFICIENT_MEMORY if the pointer vector could not be resized to +* accommodate the new element. +* +* NOTES +* cl_ptr_vector_insert places the new element at the end of +* the pointer vector. +* +* cl_ptr_vector_insert grows the pointer vector as needed to accommodate +* the new element, unless the grow_size parameter passed into the +* cl_ptr_vector_init function was zero. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_remove, cl_ptr_vector_set +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_remove +* NAME +* cl_ptr_vector_remove +* +* DESCRIPTION +* The cl_ptr_vector_remove function removes and returns the pointer stored +* in a pointer vector at a specified index. Items beyond the removed item +* are shifted down and the size of the pointer vector is decremented. +* +* SYNOPSIS +*/ +CL_EXPORT void* CL_API +cl_ptr_vector_remove( + IN cl_ptr_vector_t* const p_vector, + IN const size_t index ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure from which to get an +* element. +* +* index +* [in] Index of the element. +* +* RETURN VALUE +* Value of the pointer stored at the specified index. +* +* NOTES +* cl_ptr_vector_get does not perform boundary checking. Callers are +* responsible for providing an index that is within the range of the pointer +* vector. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_insert, cl_ptr_vector_get_size +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_set_capacity +* NAME +* cl_ptr_vector_set_capacity +* +* DESCRIPTION +* The cl_ptr_vector_set_capacity function reserves memory in a +* pointer vector for a specified number of pointers. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_ptr_vector_set_capacity( + IN cl_ptr_vector_t* const p_vector, + IN const size_t new_capacity ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure whose capacity to set. +* +* new_capacity +* [in] Total number of elements for which the pointer vector should +* allocate memory. +* +* RETURN VALUES +* CL_SUCCESS if the capacity was successfully set. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to satisfy the +* operation. The pointer vector is left unchanged. +* +* NOTES +* cl_ptr_vector_set_capacity increases the capacity of the pointer vector. +* It does not change the size of the pointer vector. If the requested +* capacity is less than the current capacity, the pointer vector is left +* unchanged. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_get_capacity, cl_ptr_vector_set_size, +* cl_ptr_vector_set_min_size +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_set_size +* NAME +* cl_ptr_vector_set_size +* +* DESCRIPTION +* The cl_ptr_vector_set_size function resizes a pointer vector, either +* increasing or decreasing its size. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_ptr_vector_set_size( + IN cl_ptr_vector_t* const p_vector, + IN const size_t size ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure whose size to set. +* +* size +* [in] Number of elements desired in the pointer vector. +* +* RETURN VALUES +* CL_SUCCESS if the size of the pointer vector was set successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to complete the +* operation. The pointer vector is left unchanged. +* +* NOTES +* cl_ptr_vector_set_size sets the pointer vector to the specified size. +* If size is smaller than the current size of the pointer vector, the size +* is reduced. +* +* This function can only fail if size is larger than the current capacity. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_get_size, cl_ptr_vector_set_min_size, +* cl_ptr_vector_set_capacity +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_set_min_size +* NAME +* cl_ptr_vector_set_min_size +* +* DESCRIPTION +* The cl_ptr_vector_set_min_size function resizes a pointer vector to a +* specified size if the pointer vector is smaller than the specified size. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_ptr_vector_set_min_size( + IN cl_ptr_vector_t* const p_vector, + IN const size_t min_size ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure whose minimum size to set. +* +* min_size +* [in] Minimum number of elements that the pointer vector should contain. +* +* RETURN VALUES +* CL_SUCCESS if the pointer vector size is greater than or equal to min_size. +* This could indicate that the pointer vector's capacity was increased to +* min_size or that the pointer vector was already of sufficient size. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to resize the +* pointer vector. The pointer vector is left unchanged. +* +* NOTES +* If min_size is smaller than the current size of the pointer vector, +* the pointer vector is unchanged. The pointer vector is unchanged if the +* size could not be changed due to insufficient memory being available to +* perform the operation. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_get_size, cl_ptr_vector_set_size, +* cl_ptr_vector_set_capacity +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_apply_func +* NAME +* cl_ptr_vector_apply_func +* +* DESCRIPTION +* The cl_ptr_vector_apply_func function invokes a specified function for +* every element in a pointer vector. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_ptr_vector_apply_func( + IN const cl_ptr_vector_t* const p_vector, + IN cl_pfn_ptr_vec_apply_t pfn_callback, + IN const void* const context ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure whose elements to iterate. +* +* pfn_callback +* [in] Function invoked for every element in the array. +* See the cl_pfn_ptr_vec_apply_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback function. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_ptr_vector_apply_func invokes the specified function for every element +* in the pointer vector, starting from the beginning of the pointer vector. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_find_from_start, cl_ptr_vector_find_from_end, +* cl_pfn_ptr_vec_apply_t +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_find_from_start +* NAME +* cl_ptr_vector_find_from_start +* +* DESCRIPTION +* The cl_ptr_vector_find_from_start function uses a specified function to +* search for elements in a pointer vector starting from the lowest index. +* +* SYNOPSIS +*/ +CL_EXPORT size_t CL_API +cl_ptr_vector_find_from_start( + IN const cl_ptr_vector_t* const p_vector, + IN cl_pfn_ptr_vec_find_t pfn_callback, + IN const void* const context ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure to inititalize. +* +* pfn_callback +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_ptr_vec_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback function. +* +* RETURN VALUES +* Index of the element, if found. +* +* Size of the pointer vector if the element was not found. +* +* NOTES +* cl_ptr_vector_find_from_start does not remove the found element from +* the pointer vector. The index of the element is returned when the function +* provided by the pfn_callback parameter returns CL_SUCCESS. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_find_from_end, cl_ptr_vector_apply_func, +* cl_pfn_ptr_vec_find_t +*********/ + + +/****f* Component Library: Pointer Vector/cl_ptr_vector_find_from_end +* NAME +* cl_ptr_vector_find_from_end +* +* DESCRIPTION +* The cl_ptr_vector_find_from_end function uses a specified function to +* search for elements in a pointer vector starting from the highest index. +* +* SYNOPSIS +*/ +CL_EXPORT size_t CL_API +cl_ptr_vector_find_from_end( + IN const cl_ptr_vector_t* const p_vector, + IN cl_pfn_ptr_vec_find_t pfn_callback, + IN const void* const context ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_ptr_vector_t structure to inititalize. +* +* pfn_callback +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_ptr_vec_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback function. +* +* RETURN VALUES +* Index of the element, if found. +* +* Size of the pointer vector if the element was not found. +* +* NOTES +* cl_ptr_vector_find_from_end does not remove the found element from +* the pointer vector. The index of the element is returned when the function +* provided by the pfn_callback parameter returns CL_SUCCESS. +* +* SEE ALSO +* Pointer Vector, cl_ptr_vector_find_from_start, cl_ptr_vector_apply_func, +* cl_pfn_ptr_vec_find_t +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* _CL_PTR_VECTOR_H_ */ diff --git a/branches/Ndi/inc/complib/cl_qcomppool.h b/branches/Ndi/inc/complib/cl_qcomppool.h new file mode 100644 index 00000000..8cdbb55f --- /dev/null +++ b/branches/Ndi/inc/complib/cl_qcomppool.h @@ -0,0 +1,785 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of the quick composite pool. The quick composite pool + * manages a pool of composite objects. A composite object is an object + * that is made of multiple sub objects. + * It can grow to meet demand, limited only by system memory. + * + * Environment: + * All + */ + + +#ifndef _CL_QUICK_COMPOSITE_POOL_H_ +#define _CL_QUICK_COMPOSITE_POOL_H_ + + +#include +#include + + +/****h* Component Library/Quick Composite Pool +* NAME +* Quick Composite Pool +* +* DESCRIPTION +* The Quick Composite Pool provides a self-contained and self-sustaining +* pool of user defined composite objects. +* +* A composite object is an object that is composed of one or more +* sub-objects, each of which needs to be treated separately for +* initialization. Objects can be retrieved from the pool as long as there +* is memory in the system. +* +* To aid in object oriented design, the Quick Composite Pool provides users +* the ability to specify callbacks that are invoked for each object for +* construction, initialization, and destruction. Constructor and destructor +* callback functions may not fail. +* +* A Quick Composite Pool does not return memory to the system as the user +* returns objects to the pool. The only method of returning memory to the +* system is to destroy the pool. +* +* The Quick Composite Pool operates on cl_pool_item_t structures that +* describe composite objects. This provides for more efficient memory use. +* If using a cl_pool_item_t is not desired, the Composite Pool provides +* similar functionality but operates on opaque objects. +* +* The Quick Composit Pool functions operate on a cl_qcpool_t structure +* which should be treated as opaque and should be manipulated only through +* the provided functions. +* +* SEE ALSO +* Structures: +* cl_qcpool_t, cl_pool_item_t +* +* Callbacks: +* cl_pfn_qcpool_init_t, cl_pfn_qcpool_dtor_t +* +* Initialization/Destruction: +* cl_qcpool_construct, cl_qcpool_init, cl_qcpool_destroy +* +* Manipulation: +* cl_qcpool_get, cl_qcpool_put, cl_qcpool_put_list, cl_qcpool_grow +* +* Attributes: +* cl_is_qcpool_inited, cl_qcpool_count +*********/ + + +/****s* Component Library: Quick Composite Pool/cl_pool_item_t +* NAME +* cl_pool_item_t +* +* DESCRIPTION +* The cl_pool_item_t structure is used by pools to store objects. +* +* SYNOPSIS +*/ +typedef struct _cl_pool_item +{ + cl_list_item_t list_item; +#ifdef _DEBUG_ + /* Pad to make the cl_pool_obj structure line up properly */ + void *pad; + /* Pointer to the owner pool used for sanity checks. */ + struct _cl_qcpool *p_pool; +#endif + +} cl_pool_item_t; +/* +* FIELDS +* list_item +* Used internally by the pool. Users should not use this field. +* +* p_pool +* Used internally by the pool in debug builds to check for consistency. +* +* NOTES +* The pool item structure is defined in such a way as to safely allow +* users to cast from a pool item to a list item for storing items +* retrieved from a quick pool in a quick list. +* +* SEE ALSO +* Quick Composite Pool, cl_list_item_t +*********/ + + +/****i* Component Library: Quick List/cl_pool_obj_t +* NAME +* cl_pool_obj_t +* +* DESCRIPTION +* The cl_pool_obj_t structure is used by pools to store objects. +* +* SYNOPSIS +*/ +typedef struct _cl_pool_obj +{ + /* The pool item must be the first item to allow casting. */ + cl_list_obj_t list_obj; +#ifdef _DEBUG_ + /* Pointer to the owner pool used for sanity checks. */ + struct _cl_qcpool *p_pool; +#endif + +} cl_pool_obj_t; +/* +* FIELDS +* pool_item +* Used internally by the pool. Users should not use this field. +* +* p_object +* Pointer to the user's object being stored in the pool. +* +* NOTES +* The pool object structure is used by non-quick pools to store object. +* +* SEE ALSO +* cl_pool_item_t +*********/ + + +/****d* Component Library: Quick Composite Pool/cl_pfn_qcpool_init_t +* NAME +* cl_pfn_qcpool_init_t +* +* DESCRIPTION +* The cl_pfn_qcpool_init_t function type defines the prototype for +* functions used as initializer for objects being allocated by a +* quick composite pool. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_qcpool_init_t)( + IN void** const p_comp_array, + IN const uint32_t num_components, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); +/* +* PARAMETERS +* p_comp_array +* [in] Pointer to the first entry in an array of pointers, each of +* which points to a component that makes up a composite object. +* +* num_components +* [in] Number of components that in the component array. +* +* context +* [in] Context provided in a call to cl_qcpool_init. +* +* pp_pool_item +* [out] Users should set this pointer to reference the cl_pool_item_t +* structure that represents the composite object. This pointer must +* not be NULL if the function returns CL_SUCCESS. +* +* RETURN VALUE +* Return CL_SUCCESS to indicate that initialization of the object +* was successful and that initialization of further objects may continue. +* +* Other cl_status_t values will be returned by cl_qcpool_init +* and cl_qcpool_grow. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by the user as a parameter to the +* cl_qcpool_init function. +* +* The initializer is invoked once per allocated object, allowing the user +* to chain components to form a composite object and perform any necessary +* initialization. Returning a status other than CL_SUCCESS aborts a grow +* operation, initiated either through cl_qcpool_init or cl_qcpool_grow, +* and causes the initiating function to fail. Any non-CL_SUCCESS status +* will be returned by the function that initiated the grow operation. +* +* All memory for the requested number of components is pre-allocated. Users +* should include space in one of their components for the cl_pool_item_t +* structure that will represent the composite object to avoid having to +* allocate that structure in the initialization callback. Alternatively, +* users may specify an additional component for the cl_pool_item_t structure. +* +* When later performing a cl_qcpool_get call, the return value is a pointer +* to the cl_pool_item_t returned by this function in the pp_pool_item +* parameter. Users must set pp_pool_item to a valid pointer to the +* cl_pool_item_t representing the object if they return CL_SUCCESS. +* +* SEE ALSO +* Quick Composite Pool, cl_qcpool_init +*********/ + + +/****d* Component Library: Quick Composite Pool/cl_pfn_qcpool_dtor_t +* NAME +* cl_pfn_qcpool_dtor_t +* +* DESCRIPTION +* The cl_pfn_qcpool_dtor_t function type defines the prototype for +* functions used as destructor for objects being deallocated by a +* quick composite pool. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_qcpool_dtor_t)( + IN const cl_pool_item_t* const p_pool_item, + IN void* context ); +/* +* PARAMETERS +* p_pool_item +* [in] Pointer to a cl_pool_item_t structure representing an object. +* +* context +* [in] Context provided in a call to cl_qcpool_init. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by the user as an optional parameter to the +* cl_qcpool_init function. +* +* The destructor is invoked once per allocated object, allowing the user +* to perform any necessary cleanup. Users should not attempt to deallocate +* the memory for the composite object, as the quick composite pool manages +* object allocation and deallocation. +* +* SEE ALSO +* Quick Composite Pool, cl_qcpool_init +*********/ + + +/****s* Component Library: Quick Composite Pool/cl_qcpool_t +* NAME +* cl_qcpool_t +* +* DESCRIPTION +* Quick composite pool structure. +* +* The cl_qcpool_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_qcpool +{ + uint32_t num_components; + size_t *component_sizes; + void **p_components; + size_t num_objects; + size_t max_objects; + size_t grow_size; + cl_pfn_qcpool_init_t pfn_init; + cl_pfn_qcpool_dtor_t pfn_dtor; + const void *context; + cl_qlist_t free_list; + cl_qlist_t alloc_list; + cl_state_t state; + +} cl_qcpool_t; +/* +* FIELDS +* num_components +* Number of components per object. +* +* component_sizes +* Array of sizes, one for each component. +* +* p_components +* Array of pointers to components, used for the constructor callback. +* +* num_objects +* Number of objects managed by the pool +* +* grow_size +* Number of objects to add when automatically growing the pool. +* +* pfn_init +* Pointer to the user's initializer callback to invoke when initializing +* new objects. +* +* pfn_dtor +* Pointer to the user's destructor callback to invoke before deallocating +* memory allocated for objects. +* +* context +* User's provided context for callback functions, used by the pool +* when invoking callbacks. +* +* free_list +* Quick list of objects available. +* +* alloc_list +* Quick list used to store information about allocations. +* +* state +* State of the pool. +* +* SEE ALSO +* Quick Composite Pool +*********/ + + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +/****f* Component Library: Quick Composite Pool/cl_qcpool_construct +* NAME +* cl_qcpool_construct +* +* DESCRIPTION +* The cl_qcpool_construct function constructs a quick composite pool. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qcpool_construct( + IN cl_qcpool_t* const p_pool ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qcpool_t structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_qcpool_init, cl_qcpool_destroy, cl_is_qcpool_inited. +* +* Calling cl_qcpool_construct is a prerequisite to calling any other +* quick composite pool function except cl_qcpool_init. +* +* SEE ALSO +* Quick Composite Pool, cl_qcpool_init, cl_qcpool_destroy, +* cl_is_qcpool_inited +*********/ + + +/****f* Component Library: Quick Composite Pool/cl_is_qcpool_inited +* NAME +* cl_is_qcpool_inited +* +* DESCRIPTION +* The cl_is_qcpool_inited function returns whether a quick composite pool was +* successfully initialized. +* +* SYNOPSIS +*/ +CL_INLINE uint32_t CL_API +cl_is_qcpool_inited( + IN const cl_qcpool_t* const p_pool ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_pool ); + /* CL_ASSERT that the pool is not in some invalid state. */ + CL_ASSERT( cl_is_state_valid( p_pool->state ) ); + + return( p_pool->state == CL_INITIALIZED ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qcpool_t structure to check. +* +* RETURN VALUES +* TRUE if the quick composite pool was initialized successfully. +* +* FALSE otherwise. +* +* NOTES +* Allows checking the state of a quick composite pool to determine if +* invoking member functions is appropriate. +* +* SEE ALSO +* Quick Composite Pool +*********/ + + +/****f* Component Library: Quick Composite Pool/cl_qcpool_init +* NAME +* cl_qcpool_init +* +* DESCRIPTION +* The cl_qcpool_init function initializes a quick composite pool for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_qcpool_init( + IN cl_qcpool_t* const p_pool, + IN const size_t min_size, + IN const size_t max_size, + IN const size_t grow_size, + IN const size_t* const component_sizes, + IN const uint32_t num_components, + IN cl_pfn_qcpool_init_t pfn_initializer OPTIONAL, + IN cl_pfn_qcpool_dtor_t pfn_destructor OPTIONAL, + IN const void* const context ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qcpool_t structure to initialize. +* +* min_size +* [in] Minimum number of objects that the pool should support. All +* necessary allocations to allow storing the minimum number of items +* are performed at initialization time, and all necessary callbacks +* successfully invoked. +* +* max_size +* [in] Maximum number of objects to which the pool is allowed to grow. +* A value of zero specifies no maximum. +* +* grow_size +* [in] Number of objects to allocate when incrementally growing the pool. +* A value of zero disables automatic growth. +* +* component_sizes +* [in] Pointer to the first entry in an array of sizes describing, +* in order, the sizes of the components that make up a composite object. +* +* num_components +* [in] Number of components that make up a composite object. +* +* pfn_initializer +* [in] Initializer callback to invoke for every new object when growing +* the pool. This parameter may be NULL only if the objects stored in +* the quick composite pool consist of only one component. If NULL, the +* pool assumes the cl_pool_item_t structure describing objects is +* located at the head of each object. See the cl_pfn_qcpool_init_t +* function type declaration for details about the callback function. +* +* pfn_destructor +* [in] Destructor callback to invoke for every object before memory for +* that object is freed. This parameter is optional and may be NULL. +* See the cl_pfn_qcpool_dtor_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUES +* CL_SUCCESS if the quick composite pool was initialized successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the +* quick composite pool. +* +* CL_INVALID_SETTING if a NULL constructor was provided for composite objects +* consisting of more than one component. Also returns CL_INVALID_SETTING if +* the maximum size is non-zero and less than the minimum size. +* +* Other cl_status_t value returned by optional initialization callback function +* specified by the pfn_initializer parameter. +* +* If initialization fails, the pool is left in a destroyed state. Callers +* may still safely call cl_qcpool_destroy. +* +* NOTES +* cl_qcpool_init initializes, and if necessary, grows the pool to +* the capacity desired. +* +* SEE ALSO +* Quick Composite Pool, cl_qcpool_construct, cl_qcpool_destroy, +* cl_qcpool_get, cl_qcpool_put, cl_qcpool_grow, +* cl_qcpool_count, cl_pfn_qcpool_init_t, cl_pfn_qcpool_dtor_t +*********/ + + +/****f* Component Library: Quick Composite Pool/cl_qcpool_destroy +* NAME +* cl_qcpool_destroy +* +* DESCRIPTION +* The cl_qcpool_destroy function destroys a quick composite pool. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qcpool_destroy( + IN cl_qcpool_t* const p_pool ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qcpool_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* All memory allocated for composite objects is freed. The destructor +* callback, if any, will be invoked for every allocated object. Further +* operations on the composite pool should not be attempted after +* cl_qcpool_destroy is invoked. +* +* This function should only be called after a call to +* cl_qcpool_construct or cl_qcpool_init. +* +* In a debug build, cl_qcpool_destroy asserts that all objects are in +* the pool. +* +* SEE ALSO +* Quick Composite Pool, cl_qcpool_construct, cl_qcpool_init +*********/ + + +/****f* Component Library: Quick Composite Pool/cl_qcpool_count +* NAME +* cl_qcpool_count +* +* DESCRIPTION +* The cl_qcpool_count function returns the number of available objects +* in a quick composite pool. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_qcpool_count( + IN cl_qcpool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->state == CL_INITIALIZED ); + + return( cl_qlist_count( &p_pool->free_list ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qcpool_t structure for which the number of +* available objects is requested. +* +* RETURN VALUE +* Returns the number of objects available in the specified +* quick composite pool. +* +* SEE ALSO +* Quick Composite Pool +*********/ + + +/****f* Component Library: Quick Composite Pool/cl_qcpool_get +* NAME +* cl_qcpool_get +* +* DESCRIPTION +* The cl_qcpool_get function retrieves an object from a +* quick composite pool. +* +* SYNOPSIS +*/ +CL_EXPORT cl_pool_item_t* CL_API +cl_qcpool_get( + IN cl_qcpool_t* const p_pool ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qcpool_t structure from which to retrieve +* an object. +* +* RETURN VALUES +* Returns a pointer to a cl_pool_item_t for a composite object. +* +* Returns NULL if the pool is empty and can not be grown automatically. +* +* NOTES +* cl_qcpool_get returns the object at the head of the pool. If the pool is +* empty, it is automatically grown to accommodate this request unless the +* grow_size parameter passed to the cl_qcpool_init function was zero. +* +* SEE ALSO +* Quick Composite Pool, cl_qcpool_get_tail, cl_qcpool_put, +* cl_qcpool_grow, cl_qcpool_count +*********/ + + +/****f* Component Library: Quick Composite Pool/cl_qcpool_put +* NAME +* cl_qcpool_put +* +* DESCRIPTION +* The cl_qcpool_put function returns an object to a quick composite pool. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qcpool_put( + IN cl_qcpool_t* const p_pool, + IN cl_pool_item_t* const p_pool_item ) +{ + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->state == CL_INITIALIZED ); + CL_ASSERT( p_pool_item ); + /* Make sure items being returned came from the specified pool. */ + CL_ASSERT( p_pool_item->p_pool == p_pool ); + + /* return this lil' doggy to the pool */ + cl_qlist_insert_head( &p_pool->free_list, &p_pool_item->list_item ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qcpool_t structure to which to return +* an object. +* +* p_pool_item +* [in] Pointer to a cl_pool_item_t structure for the object +* being returned. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_qcpool_put places the returned object at the head of the pool. +* +* The object specified by the p_pool_item parameter must have been +* retrieved from the pool by a previous call to cl_qcpool_get. +* +* SEE ALSO +* Quick Composite Pool, cl_qcpool_put_tail, cl_qcpool_get +*********/ + + +/****f* Component Library: Quick Composite Pool/cl_qcpool_put_list +* NAME +* cl_qcpool_put_list +* +* DESCRIPTION +* The cl_qcpool_put_list function returns a list of objects to the head of +* a quick composite pool. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qcpool_put_list( + IN cl_qcpool_t* const p_pool, + IN cl_qlist_t* const p_list ) +{ +#ifdef _DEBUG_ + cl_list_item_t *p_item; +#endif + + CL_ASSERT( p_pool ); + CL_ASSERT( p_pool->state == CL_INITIALIZED ); + CL_ASSERT( p_list ); + +#ifdef _DEBUG_ + /* Chech that all items in the list came from this pool. */ + p_item = cl_qlist_head( p_list ); + while( p_item != cl_qlist_end( p_list ) ) + { + CL_ASSERT( ((cl_pool_item_t*)p_item)->p_pool == p_pool ); + p_item = cl_qlist_next( p_item ); + } +#endif + + /* return these lil' doggies to the pool */ + cl_qlist_insert_list_head( &p_pool->free_list, p_list ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qcpool_t structure to which to return +* a list of objects. +* +* p_list +* [in] Pointer to a cl_qlist_t structure for the list of objects +* being returned. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_qcpool_put_list places the returned objects at the head of the pool. +* +* The objects in the list specified by the p_list parameter must have been +* retrieved from the pool by a previous call to cl_qcpool_get. +* +* SEE ALSO +* Quick Composite Pool, cl_qcpool_put, cl_qcpool_put_tail, cl_qcpool_get +*********/ + + +/****f* Component Library: Quick Composite Pool/cl_qcpool_grow +* NAME +* cl_qcpool_grow +* +* DESCRIPTION +* The cl_qcpool_grow function grows a quick composite pool by +* the specified number of objects. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_qcpool_grow( + IN cl_qcpool_t* const p_pool, + IN size_t obj_count ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qcpool_t structure whose capacity to grow. +* +* obj_count +* [in] Number of objects by which to grow the pool. +* +* RETURN VALUES +* CL_SUCCESS if the quick composite pool grew successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to grow the +* quick composite pool. +* +* cl_status_t value returned by optional initialization callback function +* specified by the pfn_initializer parameter passed to the +* cl_qcpool_init function. +* +* NOTES +* It is not necessary to call cl_qcpool_grow if the pool is +* configured to grow automatically. +* +* SEE ALSO +* Quick Composite Pool +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + + +#endif /* _CL_QUICK_COMPOSITE_POOL_H_ */ diff --git a/branches/Ndi/inc/complib/cl_qlist.h b/branches/Ndi/inc/complib/cl_qlist.h new file mode 100644 index 00000000..cd8065ed --- /dev/null +++ b/branches/Ndi/inc/complib/cl_qlist.h @@ -0,0 +1,1770 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of quick list. + * + * Environment: + * All + */ + + +#ifndef _CL_QUICK_LIST_H_ +#define _CL_QUICK_LIST_H_ + + +#include + + +/****h* Component Library/Quick List +* NAME +* Quick List +* +* DESCRIPTION +* Quick list implements a doubly linked that stores user provided +* cl_list_item_t structures. +* Quick list does not allocate any memory, and can therefore not fail any +* operations. Quick list can therefore be useful in minimizing the error +* paths in code. +* +* Quick list is not thread safe, and users must provide serialization when +* adding and removing items from the list. Note that it is possible to +* walk a quick list while simultaneously adding to it. +* +* The Quick List functions operate on a cl_qlist_t structure which should be +* treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_qlist_t, cl_list_item_t, cl_list_obj_t +* +* Callbacks: +* cl_pfn_qlist_apply_t, cl_pfn_qlist_find_t +* +* Item Manipulation: +* cl_qlist_set_obj, cl_qlist_obj +* +* Initialization: +* cl_qlist_init +* +* Iteration: +* cl_qlist_next, cl_qlist_prev, cl_qlist_head, cl_qlist_tail, +* cl_qlist_end +* +* Manipulation: +* cl_qlist_insert_head, cl_qlist_insert_tail, +* cl_qlist_insert_list_head, cl_qlist_insert_list_tail, +* cl_qlist_insert_array_head, cl_qlist_insert_array_tail, +* cl_qlist_insert_prev, cl_qlist_insert_next, +* cl_qlist_remove_head, cl_qlist_remove_tail, +* cl_qlist_remove_item, cl_qlist_remove_all +* +* Search: +* cl_is_item_in_qlist, cl_qlist_find_next, cl_qlist_find_prev, +* cl_qlist_find_from_head, cl_qlist_find_from_tail +* cl_qlist_apply_func, cl_qlist_move_items +* +* Attributes: +* cl_qlist_count, cl_is_qlist_empty +*********/ + + +/****s* Component Library: Quick List/cl_list_item_t +* NAME +* cl_list_item_t +* +* DESCRIPTION +* The cl_list_item_t structure is used by lists to store objects. +* +* SYNOPSIS +*/ +typedef struct _cl_list_item +{ + struct _cl_list_item *p_next; + struct _cl_list_item *p_prev; +#ifdef _DEBUG_ + struct _cl_qlist *p_list; +#endif + +} cl_list_item_t; +/* +* FIELDS +* p_next +* Used internally by the list. Users should not use this field. +* +* p_prev +* Used internally by the list. Users should not use this field. +* +* SEE ALSO +* Quick List +*********/ + + +/****s* Component Library: Quick List/cl_list_obj_t +* NAME +* cl_list_obj_t +* +* DESCRIPTION +* The cl_list_obj_t structure is used by lists to store objects. +* +* SYNOPSIS +*/ +typedef struct _cl_list_obj +{ + cl_list_item_t list_item; + const void *p_object; /* User's context */ + +} cl_list_obj_t; +/* +* FIELDS +* list_item +* Used internally by the list. Users should not use this field. +* +* p_object +* User defined context. Users should not access this field directly. +* Use cl_qlist_set_obj and cl_qlist_obj to set and retrieve the value +* of this field. +* +* NOTES +* Users can use the cl_qlist_set_obj and cl_qlist_obj functions to store +* and retrieve context information in the list item. +* +* SEE ALSO +* Quick List, cl_qlist_set_obj, cl_qlist_obj, cl_list_item_t +*********/ + + +/****s* Component Library: Quick List/cl_qlist_t +* NAME +* cl_qlist_t +* +* DESCRIPTION +* Quick list structure. +* +* The cl_qlist_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_qlist +{ + cl_list_item_t end; + size_t count; + cl_state_t state; + +} cl_qlist_t; +/* +* FIELDS +* end +* List item used to mark the end of the list. +* +* count +* Number of items in the list. +* +* state +* State of the quick list. +* +* SEE ALSO +* Quick List +*********/ + + +/****d* Component Library: Quick List/cl_pfn_qlist_apply_t +* NAME +* cl_pfn_qlist_apply_t +* +* DESCRIPTION +* The cl_pfn_qlist_apply_t function type defines the prototype for functions +* used to iterate items in a quick list. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_qlist_apply_t)( + IN cl_list_item_t* const p_list_item, + IN void* context ); +/* +* PARAMETERS +* p_list_item +* [in] Pointer to a cl_list_item_t structure. +* +* context +* [in] Value passed to the callback function. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_qlist_apply_func +* function. +* +* SEE ALSO +* Quick List, cl_qlist_apply_func +*********/ + + +/****d* Component Library: Quick List/cl_pfn_qlist_find_t +* NAME +* cl_pfn_qlist_find_t +* +* DESCRIPTION +* The cl_pfn_qlist_find_t function type defines the prototype for functions +* used to find items in a quick list. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_qlist_find_t)( + IN const cl_list_item_t* const p_list_item, + IN void* context ); +/* +* PARAMETERS +* p_list_item +* [in] Pointer to a cl_list_item_t. +* +* context +* [in] Value passed to the callback function. +* +* RETURN VALUES +* Return CL_SUCCESS if the desired item was found. This stops list iteration. +* +* Return CL_NOT_FOUND to continue list iteration. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_qlist_find_from_head, +* cl_qlist_find_from_tail, cl_qlist_find_next, and cl_qlist_find_prev +* functions. +* +* SEE ALSO +* Quick List, cl_qlist_find_from_head, cl_qlist_find_from_tail, +* cl_qlist_find_next, cl_qlist_find_prev +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****i* Component Library: Quick List/__cl_primitive_insert +* NAME +* __cl_primitive_insert +* +* DESCRIPTION +* Add a new item in front of the specified item. This is a low level +* function for use internally by the queuing routines. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +__cl_primitive_insert( + IN cl_list_item_t* const p_list_item, + IN cl_list_item_t* const p_new_item ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_item ); + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_new_item ); + + p_new_item->p_next = p_list_item; + p_new_item->p_prev = p_list_item->p_prev; + p_list_item->p_prev = p_new_item; + p_new_item->p_prev->p_next = p_new_item; +} +/* +* PARAMETERS +* p_list_item +* [in] Pointer to cl_list_item_t to insert in front of +* +* p_new_item +* [in] Pointer to cl_list_item_t to add +* +* RETURN VALUE +* This function does not return a value. +*********/ + + +/****i* Component Library: Quick List/__cl_primitive_remove +* NAME +* __cl_primitive_remove +* +* DESCRIPTION +* Remove an item from a list. This is a low level routine +* for use internally by the queuing routines. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +__cl_primitive_remove( + IN cl_list_item_t* const p_list_item ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_item ); + CL_ASSERT( p_list_item->p_next ); + CL_ASSERT( p_list_item->p_prev ); + + /* set the back pointer */ + p_list_item->p_next->p_prev= p_list_item->p_prev; + /* set the next pointer */ + p_list_item->p_prev->p_next= p_list_item->p_next; + + /* if we're debugging, spruce up the pointers to help find bugs */ +#if defined( _DEBUG_ ) + if( p_list_item != p_list_item->p_next ) + { + p_list_item->p_next = NULL; + p_list_item->p_prev = NULL; + } +#endif /* defined( _DEBUG_ ) */ +} +/* +* PARAMETERS +* p_list_item +* [in] Pointer to cl_list_item_t to remove +* +* RETURN VALUE +* This function does not return a value. +*********/ + + +/* + * Declaration of quick list functions + */ + +/****f* Component Library: Quick List/cl_qlist_set_obj +* NAME +* cl_qlist_set_obj +* +* DESCRIPTION +* The cl_qlist_set_obj function sets the object stored in a list object. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qlist_set_obj( + IN cl_list_obj_t* const p_list_obj, + IN const void* const p_object ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_obj ); + p_list_obj->p_object = p_object; +} +/* +* PARAMETERS +* p_list_obj +* [in] Pointer to a cl_list_obj_t structure. +* +* p_object +* [in] User defined context. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Quick List, cl_qlist_obj +*********/ + + +/****f* Component Library: Quick List/cl_qlist_obj +* NAME +* cl_qlist_obj +* +* DESCRIPTION +* The cl_qlist_set_obj function returns the object stored in a list object. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_qlist_obj( + IN const cl_list_obj_t* const p_list_obj ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_obj ); + + return( (void*)p_list_obj->p_object ); +} +/* +* PARAMETERS +* p_list_obj +* [in] Pointer to a cl_list_obj_t structure. +* +* RETURN VALUE +* Returns the value of the object pointer stored in the list object. +* +* SEE ALSO +* Quick List, cl_qlist_set_obj +*********/ + + +CL_INLINE void CL_API +__cl_qlist_reset( + IN cl_qlist_t* const p_list ) +{ + /* Point the end item to itself. */ + p_list->end.p_next = &p_list->end; + p_list->end.p_prev = &p_list->end; +#if defined( _DEBUG_ ) + p_list->end.p_list = p_list; +#endif + + /* Clear the count. */ + p_list->count = 0; +} + + +/****f* Component Library: Quick List/cl_qlist_init +* NAME +* cl_qlist_init +* +* DESCRIPTION +* The cl_qlist_init function initializes a quick list. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qlist_init( + IN cl_qlist_t* const p_list ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + + p_list->state = CL_INITIALIZED; + + /* Reset the quick list data structure. */ + __cl_qlist_reset( p_list ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure to initialize. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Allows calling quick list manipulation functions. +* +* SEE ALSO +* Quick List, cl_qlist_insert_head, cl_qlist_insert_tail, +* cl_qlist_remove_head, cl_qlist_remove_tail +*********/ + + +/****f* Component Library: Quick List/cl_qlist_count +* NAME +* cl_qlist_count +* +* DESCRIPTION +* The cl_qlist_count function returns the number of list items stored +* in a quick list. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_qlist_count( + IN const cl_qlist_t* const p_list ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + return( p_list->count ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* RETURN VALUE +* Number of items in the list. This function iterates though the quick +* list to count the items. +* +* SEE ALSO +* Quick List, cl_is_qlist_empty +*********/ + + +/****f* Component Library: Quick List/cl_is_qlist_empty +* NAME +* cl_is_qlist_empty +* +* DESCRIPTION +* The cl_is_qlist_empty function returns whether a quick list is empty. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_qlist_empty( + IN const cl_qlist_t* const p_list ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + return( !cl_qlist_count( p_list ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* RETURN VALUES +* TRUE if the specified quick list is empty. +* +* FALSE otherwise. +* +* SEE ALSO +* Quick List, cl_qlist_count, cl_qlist_remove_all +*********/ + + +/****f* Component Library: Quick List/cl_qlist_next +* NAME +* cl_qlist_next +* +* DESCRIPTION +* The cl_qlist_next function returns a pointer to the list item following +* a given list item in a quick list. +* +* SYNOPSIS +*/ +CL_INLINE cl_list_item_t* CL_API +cl_qlist_next( + IN const cl_list_item_t* const p_list_item ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_item ); + + /* Return the next item. */ + return( p_list_item->p_next ); +} +/* +* PARAMETERS +* p_list_item +* [in] Pointer to the cl_list_item_t whose successor to return. +* +* Returns: +* Pointer to the list item following the list item specified by +* the p_list_item parameter in the quick list. +* +* Pointer to the list end if p_list_item was at the tail of the list. +* +* SEE ALSO +* Quick List, cl_qlist_head, cl_qlist_tail, cl_qlist_prev, cl_qlist_end, +* cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_prev +* NAME +* cl_qlist_prev +* +* DESCRIPTION +* The cl_qlist_prev function returns a poirter to the list item preceding +* a given list item in a quick list. +* +* SYNOPSIS +*/ +CL_INLINE cl_list_item_t* CL_API +cl_qlist_prev( + IN const cl_list_item_t* const p_list_item ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_item ); + + /* Return the previous item. */ + return( p_list_item->p_prev ); +} +/* +* PARAMETERS +* p_list_item +* [in] Pointer to the cl_list_item_t whose predecessor to return. +* +* Returns: +* Pointer to the list item preceding the list item specified by +* the p_list_item parameter in the quick list. +* +* Pointer to the list end if p_list_item was at the tail of the list. +* +* SEE ALSO +* Quick List, cl_qlist_head, cl_qlist_tail, cl_qlist_next, cl_qlist_end, +* cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_head +* NAME +* cl_qlist_head +* +* DESCRIPTION +* The cl_qlist_head function returns the list item at +* the head of a quick list. +* +* SYNOPSIS +*/ +CL_INLINE cl_list_item_t* CL_API +cl_qlist_head( + IN const cl_qlist_t* const p_list ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + return( cl_qlist_next( &p_list->end ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* RETURN VALUES +* Pointer to the list item at the head of the quick list. +* +* Pointer to the list end if the list was empty. +* +* NOTES +* cl_qlist_head does not remove the item from the list. +* +* SEE ALSO +* Quick List, cl_qlist_tail, cl_qlist_next, cl_qlist_prev, cl_qlist_end, +* cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_tail +* NAME +* cl_qlist_tail +* +* DESCRIPTION +* The cl_qlist_tail function returns the list item at +* the tail of a quick list. +* +* SYNOPSIS +*/ +CL_INLINE cl_list_item_t* CL_API +cl_qlist_tail( + IN const cl_qlist_t* const p_list ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + return( cl_qlist_prev( &p_list->end ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* RETURN VALUES +* Pointer to the list item at the tail of the quick list. +* +* Pointer to the list end if the list was empty. +* +* NOTES +* cl_qlist_tail does not remove the item from the list. +* +* SEE ALSO +* Quick List, cl_qlist_head, cl_qlist_next, cl_qlist_prev, cl_qlist_end, +* cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_end +* NAME +* cl_qlist_end +* +* DESCRIPTION +* The cl_qlist_end function returns the end of a quick list. +* +* SYNOPSIS +*/ +CL_INLINE const cl_list_item_t* const CL_API +cl_qlist_end( + IN const cl_qlist_t* const p_list ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + return( &p_list->end ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* RETURN VALUE +* Pointer to the end of the list. +* +* NOTES +* cl_qlist_end is useful for determining the validity of list items returned +* by cl_qlist_head, cl_qlist_tail, cl_qlist_next, cl_qlist_prev, as well as +* the cl_qlist_find functions. If the list item pointer returned by any of +* these functions compares to the end, the end of the list was encoutered. +* When using cl_qlist_head or cl_qlist_tail, this condition indicates that +* the list is empty. +* +* SEE ALSO +* Quick List, cl_qlist_head, cl_qlist_tail, cl_qlist_next, cl_qlist_prev, +* cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_insert_head +* NAME +* cl_qlist_insert_head +* +* DESCRIPTION +* The cl_qlist_insert_head function inserts a list item at the +* head of a quick list. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qlist_insert_head( + IN cl_qlist_t* const p_list, + IN cl_list_item_t* const p_list_item ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_item ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + /* + * The list item must not already be part of the list. Note that this + * assertion may fail if an uninitialized list item happens to have its + * list pointer equal to the specified list. The chances of this + * happening are acceptable in light of the value of this check. + */ + CL_ASSERT( p_list_item->p_list != p_list ); + +#if defined( _DEBUG_ ) + p_list_item->p_list = p_list; +#endif + + /* Insert before the head. */ + __cl_primitive_insert( cl_qlist_head( p_list ), p_list_item ); + + p_list->count++; +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure into which to insert the object. +* +* p_list_item +* [in] Pointer to a cl_list_item_t structure to add. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* In debug builds, cl_qlist_insert_head asserts that the specified list item +* is not already in the list. +* +* SEE ALSO +* Quick List, cl_qlist_insert_tail, cl_qlist_insert_list_head, +* cl_qlist_insert_list_tail, cl_qlist_insert_array_head, +* cl_qlist_insert_array_tail, cl_qlist_insert_prev, cl_qlist_insert_next, +* cl_qlist_remove_head, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_insert_tail +* NAME +* cl_qlist_insert_tail +* +* DESCRIPTION +* The cl_qlist_insert_tail function inserts a list item at the tail +* of a quick list. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qlist_insert_tail( + IN cl_qlist_t* const p_list, + IN cl_list_item_t* const p_list_item ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_item ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + /* + * The list item must not already be part of the list. Note that this + * assertion may fail if an uninitialized list item happens to have its + * list pointer equal to the specified list. The chances of this + * happening are acceptable in light of the value of this check. + */ + CL_ASSERT( p_list_item->p_list != p_list ); + +#if defined( _DEBUG_ ) + p_list_item->p_list = p_list; +#endif + + /* + * Put the new element in front of the end which is the same + * as being at the tail + */ + __cl_primitive_insert( &p_list->end, p_list_item ); + + p_list->count++; +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure into which to insert the object. +* +* p_list_item +* [in] Pointer to cl_list_item_t structure to add. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* In debug builds, cl_qlist_insert_tail asserts that the specified list item +* is not already in the list. +* +* SEE ALSO +* Quick List, cl_qlist_insert_head, cl_qlist_insert_list_head, +* cl_qlist_insert_list_tail, cl_qlist_insert_array_head, +* cl_qlist_insert_array_tail, cl_qlist_insert_prev, cl_qlist_insert_next, +* cl_qlist_remove_tail, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_insert_list_head +* NAME +* cl_qlist_insert_list_head +* +* DESCRIPTION +* The cl_qlist_insert_list_head function merges two quick lists by +* inserting one at the head of the other. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qlist_insert_list_head( + IN cl_qlist_t* const p_dest_list, + IN cl_qlist_t* const p_src_list ); +/* +* PARAMETERS +* p_dest_list +* [in] Pointer to destination quicklist object. +* +* p_src_list +* [in] Pointer to quicklist to add. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Inserts all list items in the source list to the head of the +* destination list. The ordering of the list items is preserved. +* +* The list pointed to by the p_src_list parameter is empty when +* the call returns. +* +* SEE ALSO +* Quick List, cl_qlist_insert_list_tail, cl_qlist_insert_head, +* cl_qlist_insert_tail, cl_qlist_insert_array_head, +* cl_qlist_insert_array_tail, cl_qlist_insert_prev, cl_qlist_insert_next, +* cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_insert_list_tail +* NAME +* cl_qlist_insert_list_tail +* +* DESCRIPTION +* The cl_qlist_insert_list_tail function merges two quick lists by +* inserting one at the tail of the other. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qlist_insert_list_tail( + IN cl_qlist_t* const p_dest_list, + IN cl_qlist_t* const p_src_list ); +/* +* PARAMETERS +* p_dest_list +* [in] Pointer to destination quicklist object +* +* p_src_list +* [in] Pointer to quicklist to add +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Inserts all list items in the source list to the tail of the +* destination list. The ordering of the list items is preserved. +* +* The list pointed to by the p_src_list parameter is empty when +* the call returns. +* +* SEE ALSO +* Quick List, cl_qlist_insert_list_head, cl_qlist_insert_head, +* cl_qlist_insert_tail, cl_qlist_insert_array_head, +* cl_qlist_insert_array_tail, cl_qlist_insert_prev, cl_qlist_insert_next, +* cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_insert_array_head +* NAME +* cl_qlist_insert_array_head +* +* DESCRIPTION +* The cl_qlist_insert_array_head function inserts an array of list items +* at the head of a quick list. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qlist_insert_array_head( + IN cl_qlist_t* const p_list, + IN cl_list_item_t* const p_array, + IN size_t item_count, + IN const size_t item_size ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure into which to insert +* the objects. +* +* p_array +* [in] Pointer to the first list item in an array of cl_list_item_t +* structures. +* +* item_count +* [in] Number of cl_list_item_t structures in the array. +* +* item_size +* [in] Size of the items added to the list. This is the stride in the +* array from one cl_list_item_t structure to the next. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Inserts all the list items in the array specified by the p_array parameter +* to the head of the quick list specified by the p_list parameter, +* preserving ordering of the list items. +* +* The array pointer passed into the function points to the cl_list_item_t +* in the first element of the caller's element array. There is no +* restriction on where the element is stored in the parent structure. +* +* SEE ALSO +* Quick List, cl_qlist_insert_array_tail, cl_qlist_insert_head, +* cl_qlist_insert_tail, cl_qlist_insert_list_head, cl_qlist_insert_list_tail, +* cl_qlist_insert_prev, cl_qlist_insert_next, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_insert_array_tail +* NAME +* cl_qlist_insert_array_tail +* +* DESCRIPTION +* The cl_qlist_insert_array_tail function inserts an array of list items +* at the tail of a quick list. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qlist_insert_array_tail( + IN cl_qlist_t* const p_list, + IN cl_list_item_t* const p_array, + IN size_t item_count, + IN const size_t item_size); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure into which to insert +* the objects. +* +* p_array +* [in] Pointer to the first list item in an array of cl_list_item_t +* structures. +* +* item_count +* [in] Number of cl_list_item_t structures in the array. +* +* item_size +* [in] Size of the items added to the list. This is the stride in the +* array from one cl_list_item_t structure to the next. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Inserts all the list items in the array specified by the p_array parameter +* to the tail of the quick list specified by the p_list parameter, +* preserving ordering of the list items. +* +* The array pointer passed into the function points to the cl_list_item_t +* in the first element of the caller's element array. There is no +* restriction on where the element is stored in the parent structure. +* +* SEE ALSO +* Quick List, cl_qlist_insert_array_head, cl_qlist_insert_head, +* cl_qlist_insert_tail, cl_qlist_insert_list_head, cl_qlist_insert_list_tail, +* cl_qlist_insert_prev, cl_qlist_insert_next, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_insert_prev +* NAME +* cl_qlist_insert_prev +* +* DESCRIPTION +* The cl_qlist_insert_prev function inserts a list item before a +* specified list item in a quick list. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qlist_insert_prev( + IN cl_qlist_t* const p_list, + IN cl_list_item_t* const p_list_item, + IN cl_list_item_t* const p_new_item ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_item ); + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_new_item ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + /* + * The list item must not already be part of the list. Note that this + * assertion may fail if an uninitialized list item happens to have its + * list pointer equal to the specified list. The chances of this + * happening are acceptable in light of the value of this check. + */ + CL_ASSERT( p_new_item->p_list != p_list ); + +#if defined( _DEBUG_ ) + p_new_item->p_list = p_list; +#endif + + __cl_primitive_insert( p_list_item, p_new_item ); + + p_list->count++; +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure into which to add the new item. +* +* p_list_item +* [in] Pointer to a cl_list_item_t structure. +* +* p_new_item +* [in] Pointer to a cl_list_item_t structure to add to the quick list. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Inserts the new list item before the list item specified by p_list_item. +* +* SEE ALSO +* Quick List, cl_qlist_insert_next, cl_qlist_insert_head, +* cl_qlist_insert_tail, cl_qlist_insert_list_head, cl_qlist_insert_list_tail, +* cl_qlist_insert_array_head, cl_qlist_insert_array_tail, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_insert_next +* NAME +* cl_qlist_insert_next +* +* DESCRIPTION +* The cl_qlist_insert_next function inserts a list item after a specified +* list item in a quick list. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qlist_insert_next( + IN cl_qlist_t* const p_list, + IN cl_list_item_t* const p_list_item, + IN cl_list_item_t* const p_new_item ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_item ); + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_new_item ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + /* + * The list item must not already be part of the list. Note that this + * assertion may fail if an uninitialized list item happens to have its + * list pointer equal to the specified list. The chances of this + * happening are acceptable in light of the value of this check. + */ + CL_ASSERT( p_new_item->p_list != p_list ); + +#if defined( _DEBUG_ ) + p_new_item->p_list = p_list; +#endif + + __cl_primitive_insert( cl_qlist_next( p_list_item ), p_new_item ); + + p_list->count++; +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure into which to add the new item. +* +* p_list_item +* [in] Pointer to a cl_list_item_t structure. +* +* p_new_item +* [in] Pointer to a cl_list_item_t structure to add to the quick list. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Inserts the new list item after the list item specified by p_list_item. +* The list item specified by p_list_item must be in the quick list. +* +* SEE ALSO +* Quick List, cl_qlist_insert_prev, cl_qlist_insert_head, +* cl_qlist_insert_tail, cl_qlist_insert_list_head, cl_qlist_insert_list_tail, +* cl_qlist_insert_array_head, cl_qlist_insert_array_tail, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_remove_head +* NAME +* cl_qlist_remove_head +* +* DESCRIPTION +* The cl_qlist_remove_head function removes and returns the list item +* at the head of a quick list. +* +* SYNOPSIS +*/ +CL_INLINE cl_list_item_t* CL_API +cl_qlist_remove_head( + IN cl_qlist_t* const p_list ) +{ + cl_list_item_t *p_item; + + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + p_item = cl_qlist_head( p_list ); + /* CL_ASSERT that the list item is part of the list. */ + CL_ASSERT( p_item->p_list == p_list ); + + if( p_item == cl_qlist_end( p_list ) ) + return( p_item ); + +#if defined( _DEBUG_ ) + /* Clear the item's link to the list. */ + p_item->p_list = NULL; +#endif + + __cl_primitive_remove( p_item ); + + p_list->count--; + + return( p_item ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* RETURN VALUES +* Returns a pointer to the list item formerly at the head of the quick list. +* +* Pointer to the list end if the list was empty. +* +* SEE ALSO +* Quick List, cl_qlist_remove_tail, cl_qlist_remove_all, cl_qlist_remove_item, +* cl_qlist_end, cl_qlist_head, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_remove_tail +* NAME +* cl_qlist_remove_tail +* +* DESCRIPTION +* The cl_qlist_remove_tail function removes and returns the list item +* at the tail of a quick list. +* +* SYNOPSIS +*/ +CL_INLINE cl_list_item_t* CL_API +cl_qlist_remove_tail( + IN cl_qlist_t* const p_list ) +{ + cl_list_item_t *p_item; + + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + + p_item = cl_qlist_tail( p_list ); + /* CL_ASSERT that the list item is part of the list. */ + CL_ASSERT( p_item->p_list == p_list ); + + if( p_item == cl_qlist_end( p_list ) ) + return( p_item ); + +#if defined( _DEBUG_ ) + /* Clear the item's link to the list. */ + p_item->p_list = NULL; +#endif + + __cl_primitive_remove( p_item ); + + p_list->count--; + + return( p_item ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* RETURN VALUES +* Returns a pointer to the list item formerly at the tail of the quick list. +* +* Pointer to the list end if the list was empty. +* +* SEE ALSO +* Quick List, cl_qlist_remove_head, cl_qlist_remove_all, cl_qlist_remove_item, +* cl_qlist_end, cl_qlist_tail, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_remove_item +* NAME +* cl_qlist_remove_item +* +* DESCRIPTION +* The cl_qlist_remove_item function removes a specific list item from a quick list. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qlist_remove_item( + IN cl_qlist_t* const p_list, + IN cl_list_item_t* const p_list_item ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list_item ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + /* CL_ASSERT that the list item is part of the list. */ + CL_ASSERT( p_list_item->p_list == p_list ); + + if( p_list_item == cl_qlist_end( p_list ) ) + return; + +#if defined( _DEBUG_ ) + /* Clear the item's link to the list. */ + p_list_item->p_list = NULL; +#endif + + __cl_primitive_remove( p_list_item ); + + p_list->count--; +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure from which to remove the item. +* +* p_list_item +* [in] Pointer to a cl_list_item_t structure to remove. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Removes the list item pointed to by the p_list_item parameter from +* its list. +* +* SEE ALSO +* Quick List, cl_qlist_remove_head, cl_qlist_remove_tail, cl_qlist_remove_all, +* cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_remove_all +* NAME +* cl_qlist_remove_all +* +* DESCRIPTION +* The cl_qlist_remove_all function removes all items from a quick list. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qlist_remove_all( + IN cl_qlist_t* const p_list ) +{ +#if defined( _DEBUG_ ) + cl_list_item_t *p_list_item; + + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + p_list_item = cl_qlist_head( p_list ); + while( p_list_item != cl_qlist_end( p_list ) ) + { + p_list_item = cl_qlist_next( p_list_item ); + cl_qlist_prev( p_list_item )->p_list = NULL; + } +#endif + + __cl_qlist_reset( p_list ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Quick List, cl_qlist_remove_head, cl_qlist_remove_tail, +* cl_qlist_remove_item, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_is_item_in_qlist +* NAME +* cl_is_item_in_qlist +* +* DESCRIPTION +* The cl_is_item_in_qlist function checks for the presence of a +* list item in a quick list. +* +* SYNOPSIS +*/ +CL_EXPORT boolean_t CL_API +cl_is_item_in_qlist( + IN const cl_qlist_t* const p_list, + IN const cl_list_item_t* const p_list_item ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* p_list_item +* [in] Pointer to the cl_list_item_t to find. +* +* RETURN VALUES +* TRUE if the list item was found in the quick list. +* +* FALSE otherwise. +* +* SEE ALSO +* Quick List, cl_qlist_remove_item, cl_list_item_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_find_next +* NAME +* cl_qlist_find_next +* +* DESCRIPTION +* The cl_qlist_find_next function invokes a specified function to +* search for an item, starting from a given list item. +* +* SYNOPSIS +*/ +CL_EXPORT cl_list_item_t* CL_API +cl_qlist_find_next( + IN const cl_qlist_t* const p_list, + IN const cl_list_item_t* const p_list_item, + IN cl_pfn_qlist_find_t pfn_func, + IN const void* const context ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure in which to search. +* +* p_list_item +* [in] Pointer to a cl_list_item_t structure from which to start the search. +* +* pfn_func +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_qlist_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context if a +* callback function is provided, or value compared to the quick list's +* list items. +* +* Returns: +* Pointer to the list item, if found. +* +* p_list_item if not found. +* +* NOTES +* cl_qlist_find_next does not remove list items from the list. +* The list item is returned when the function specified by the pfn_func +* parameter returns CL_SUCCESS. The list item from which the search starts is +* excluded from the search. +* +* The function provided by the pfn_func must not perform any list operations, +* as these would corrupt the list. +* +* SEE ALSO +* Quick List, cl_qlist_find_prev, cl_qlist_find_from_head, +* cl_qlist_find_from_tail, cl_qlist_end, cl_qlist_apply_func, +* cl_qlist_move_items, cl_list_item_t, cl_pfn_qlist_find_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_find_prev +* NAME +* cl_qlist_find_prev +* +* DESCRIPTION +* The cl_qlist_find_prev function invokes a specified function to +* search backward for an item, starting from a given list item. +* +* SYNOPSIS +*/ +CL_EXPORT cl_list_item_t* CL_API +cl_qlist_find_prev( + IN const cl_qlist_t* const p_list, + IN const cl_list_item_t* const p_list_item, + IN cl_pfn_qlist_find_t pfn_func, + IN const void* const context ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure in which to search. +* +* p_list_item +* [in] Pointer to a cl_list_item_t structure from which to start the search. +* +* pfn_func +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_qlist_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context if a +* callback function is provided, or value compared to the quick list's +* list items. +* +* Returns: +* Pointer to the list item, if found. +* +* p_list_item if not found. +* +* NOTES +* cl_qlist_find_prev does not remove list items from the list. +* The list item is returned when the function specified by the pfn_func +* parameter returns CL_SUCCESS. The list item from which the search starts is +* excluded from the search. +* +* The function provided by the pfn_func must not perform any list operations, +* as these would corrupt the list. +* +* SEE ALSO +* Quick List, cl_qlist_find_next, cl_qlist_find_from_head, +* cl_qlist_find_from_tail, cl_qlist_end, cl_qlist_apply_func, +* cl_qlist_move_items, cl_list_item_t, cl_pfn_qlist_find_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_find_from_head +* NAME +* cl_qlist_find_from_head +* +* DESCRIPTION +* The cl_qlist_find_from_head function invokes a specified function to +* search for an item, starting at the head of a quick list. +* +* SYNOPSIS +*/ +CL_INLINE cl_list_item_t* CL_API +cl_qlist_find_from_head( + IN const cl_qlist_t* const p_list, + IN cl_pfn_qlist_find_t pfn_func, + IN const void* const context ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + /* CL_ASSERT that a find function is provided. */ + CL_ASSERT( pfn_func ); + + return( cl_qlist_find_next( p_list, cl_qlist_end( p_list ), pfn_func, + context ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* pfn_func +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_qlist_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context if a +* callback function is provided, or value compared to the quick list's +* list items. +* +* Returns: +* Pointer to the list item, if found. +* +* Pointer to the list end otherwise +* +* NOTES +* cl_qlist_find_from_head does not remove list items from the list. +* The list item is returned when the function specified by the pfn_func +* parameter returns CL_SUCCESS. +* +* The function provided by the pfn_func parameter must not perform any list +* operations, as these would corrupt the list. +* +* SEE ALSO +* Quick List, cl_qlist_find_from_tail, cl_qlist_find_next, cl_qlist_find_prev, +* cl_qlist_end, cl_qlist_apply_func, cl_qlist_move_items, cl_list_item_t, +* cl_pfn_qlist_find_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_find_from_tail +* NAME +* cl_qlist_find_from_tail +* +* DESCRIPTION +* The cl_qlist_find_from_tail function invokes a specified function to +* search for an item, starting at the tail of a quick list. +* +* SYNOPSIS +*/ +CL_INLINE cl_list_item_t* CL_API +cl_qlist_find_from_tail( + IN const cl_qlist_t* const p_list, + IN cl_pfn_qlist_find_t pfn_func, + IN const void* const context ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_list ); + /* CL_ASSERT that the list was initialized. */ + CL_ASSERT( p_list->state == CL_INITIALIZED ); + /* CL_ASSERT that a find function is provided. */ + CL_ASSERT( pfn_func ); + + return( cl_qlist_find_prev( p_list, cl_qlist_end( p_list ), pfn_func, + context ) ); +} +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* pfn_func +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_qlist_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context if a +* callback function is provided, or value compared to the quick list's +* list items. +* +* Returns: +* Pointer to the list item, if found. +* +* Pointer to the list end otherwise +* +* NOTES +* cl_qlist_find_from_tail does not remove list items from the list. +* The list item is returned when the function specified by the pfn_func +* parameter returns CL_SUCCESS. +* +* The function provided by the pfn_func parameter must not perform any list +* operations, as these would corrupt the list. +* +* SEE ALSO +* Quick List, cl_qlist_find_from_head, cl_qlist_find_next, cl_qlist_find_prev, +* cl_qlist_apply_func, cl_qlist_end, cl_qlist_move_items, cl_list_item_t, +* cl_pfn_qlist_find_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_apply_func +* NAME +* cl_qlist_apply_func +* +* DESCRIPTION +* The cl_qlist_apply_func function executes a specified function +* for every list item stored in a quick list. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qlist_apply_func( + IN const cl_qlist_t* const p_list, + IN cl_pfn_qlist_apply_t pfn_func, + IN const void* const context ); +/* +* PARAMETERS +* p_list +* [in] Pointer to a cl_qlist_t structure. +* +* pfn_func +* [in] Function invoked for every item in the quick list. +* See the cl_pfn_qlist_apply_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* The function provided must not perform any list operations, as these +* would corrupt the quick list. +* +* SEE ALSO +* Quick List, cl_qlist_find_from_head, cl_qlist_find_from_tail, +* cl_qlist_move_items, cl_pfn_qlist_apply_t +*********/ + + +/****f* Component Library: Quick List/cl_qlist_move_items +* NAME +* cl_qlist_move_items +* +* DESCRIPTION +* The cl_qlist_move_items function moves list items from one list to +* another based on the return value of a user supplied function. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qlist_move_items( + IN cl_qlist_t* const p_src_list, + IN cl_qlist_t* const p_dest_list, + IN cl_pfn_qlist_find_t pfn_func, + IN const void* const context ); +/* +* PARAMETERS +* p_src_list +* [in] Pointer to a cl_qlist_t structure from which +* list items are removed. +* +* p_dest_list +* [in] Pointer to a cl_qlist_t structure to which the source +* list items are added. +* +* pfn_func +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_qlist_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* If the function specified by the pfn_func parameter returns CL_SUCCESS, +* the related list item is removed from p_src_list and inserted at the tail +* of the p_dest_list. +* +* The cl_qlist_move_items function continues iterating through p_src_list +* from the last item moved, allowing multiple items to be located and moved +* in a single list iteration. +* +* The function specified by pfn_func must not perform any list operations, +* as these would corrupt the list. +* +* SEE ALSO +* Quick List, cl_qlist_find_from_head, cl_qlist_find_from_tail, +* cl_qlist_apply_func, cl_pfn_qlist_find_t +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_QUICK_LIST_H_ */ diff --git a/branches/Ndi/inc/complib/cl_qlockpool.h b/branches/Ndi/inc/complib/cl_qlockpool.h new file mode 100644 index 00000000..3187dcb6 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_qlockpool.h @@ -0,0 +1,369 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of cl_qlock_pool_t. + * This object represents a threadsafe quick-pool of objects. + * + * Environment: + * All + */ + +#ifndef _CL_QLOCKPOOL_H_ +#define _CL_QLOCKPOOL_H_ + + +#include +#include + + +/****h* Component Library/Quick Locking Pool +* NAME +* Quick Locking Pool +* +* DESCRIPTION +* The Quick Locking Pool represents a thread-safe quick pool. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SEE ALSO +* Structures: +* cl_qlock_pool_t +* +* Initialization: +* cl_qlock_pool_construct, cl_qlock_pool_init, cl_qlock_pool_destroy +* +* Manipulation +* cl_qlock_pool_get, cl_qlock_pool_put +*********/ + + +/****s* Component Library: Quick Locking Pool/cl_qlock_pool_t +* NAME +* cl_qlock_pool_t +* +* DESCRIPTION +* Quick Locking Pool structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_qlock_pool +{ + cl_spinlock_t lock; + cl_qpool_t pool; + +} cl_qlock_pool_t; +/* +* FIELDS +* lock +* Spinlock guarding the pool. +* +* pool +* quick_pool of user objects. +* +* SEE ALSO +* Quick Locking Pool +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Quick Locking Pool/cl_qlock_pool_construct +* NAME +* cl_qlock_pool_construct +* +* DESCRIPTION +* This function constructs a Quick Locking Pool. +* +* SYNOPSIS +*/ +static inline void +cl_qlock_pool_construct( + IN cl_qlock_pool_t* const p_pool ) +{ + cl_qpool_construct( &p_pool->pool ); + cl_spinlock_construct( &p_pool->lock ); +} + +/* +* PARAMETERS +* p_pool +* [in] Pointer to a Quick Locking Pool to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_qlock_pool_init, cl_qlock_pool_destroy +* +* Calling cl_qlock_pool_construct is a prerequisite to calling any other +* method except cl_qlock_pool_init. +* +* SEE ALSO +* Quick Locking Pool, cl_qlock_pool_init, cl_qlock_pool_destroy +*********/ + + +/****f* Component Library: Quick Locking Pool/cl_qlock_pool_destroy +* NAME +* cl_qlock_pool_destroy +* +* DESCRIPTION +* The cl_qlock_pool_destroy function destroys a node, releasing +* all resources. +* +* SYNOPSIS +*/ +static inline void +cl_qlock_pool_destroy( + IN cl_qlock_pool_t* const p_pool ) +{ + /* + If the pool has already been put into use, grab the lock + to sync with other threads before we blow everything away. + */ + if( cl_is_qpool_inited( &p_pool->pool ) ) + { + cl_spinlock_acquire( &p_pool->lock ); + cl_qpool_destroy( &p_pool->pool ); + cl_spinlock_release( &p_pool->lock ); + } + else + cl_qpool_destroy( &p_pool->pool ); + + cl_spinlock_destroy( &p_pool->lock ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a Quick Locking Pool to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Quick Locking Pool. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* cl_qlock_pool_construct or cl_qlock_pool_init. +* +* SEE ALSO +* Quick Locking Pool, cl_qlock_pool_construct, cl_qlock_pool_init +*********/ + + +/****f* Component Library: Quick Locking Pool/cl_qlock_pool_init +* NAME +* cl_qlock_pool_init +* +* DESCRIPTION +* The cl_qlock_pool_init function initializes a Quick Locking Pool for use. +* +* SYNOPSIS +*/ +static inline cl_status_t +cl_qlock_pool_init( + IN cl_qlock_pool_t* const p_pool, + IN const size_t min_size, + IN const size_t max_size, + IN const size_t grow_size, + IN const size_t object_size, + IN cl_pfn_qpool_init_t pfn_initializer OPTIONAL, + IN cl_pfn_qpool_dtor_t pfn_destructor OPTIONAL, + IN const void* const context ) +{ + cl_status_t status; + + cl_qlock_pool_construct( p_pool ); + + status = cl_spinlock_init( &p_pool->lock ); + if( status ) + return( status ); + + status = cl_qpool_init( &p_pool->pool, min_size, max_size, grow_size, + object_size, pfn_initializer, pfn_destructor, context ); + + return( status ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to an cl_qlock_pool_t object to initialize. +* +* min_size +* [in] Minimum number of objects that the pool should support. All +* necessary allocations to allow storing the minimum number of items +* are performed at initialization time, and all necessary callbacks +* successfully invoked. +* +* max_size +* [in] Maximum number of objects to which the pool is allowed to grow. +* A value of zero specifies no maximum. +* +* grow_size +* [in] Number of objects to allocate when incrementally growing the pool. +* A value of zero disables automatic growth. +* +* object_size +* [in] Size, in bytes, of each object. +* +* pfn_initializer +* [in] Initialization callback to invoke for every new object when +* growing the pool. This parameter is optional and may be NULL. If NULL, +* the pool assumes the cl_pool_item_t structure describing objects is +* located at the head of each object. See the cl_pfn_qpool_init_t +* function type declaration for details about the callback function. +* +* pfn_destructor +* [in] Destructor callback to invoke for every object before memory for +* that object is freed. This parameter is optional and may be NULL. +* See the cl_pfn_qpool_dtor_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUES +* CL_SUCCESS if the quick pool was initialized successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the +* quick pool. +* +* CL_INVALID_SETTING if a the maximum size is non-zero and less than the +* minimum size. +* +* Other cl_status_t value returned by optional initialization callback function +* specified by the pfn_initializer parameter. +* +* NOTES +* Allows calling other Quick Locking Pool methods. +* +* SEE ALSO +* Quick Locking Pool, cl_qlock_pool_construct, cl_qlock_pool_destroy +*********/ + + +/****f* Component Library: Quick Locking Pool/cl_qlock_pool_get +* NAME +* cl_qlock_pool_get +* +* DESCRIPTION +* Gets an object wrapper and wire MAD from the pool. +* +* SYNOPSIS +*/ +static inline cl_pool_item_t* +cl_qlock_pool_get( + IN cl_qlock_pool_t* const p_pool ) +{ + cl_pool_item_t* p_item; + cl_spinlock_acquire( &p_pool->lock ); + p_item = cl_qpool_get( &p_pool->pool ); + cl_spinlock_release( &p_pool->lock ); + return( p_item ); +} + +/* +* PARAMETERS +* p_pool +* [in] Pointer to an cl_qlock_pool_t object. +* +* RETURN VALUES +* Returns a pointer to a cl_pool_item_t contained in the user object. +* +* NOTES +* The object must eventually be returned to the pool with a call to +* cl_qlock_pool_put. +* +* The cl_qlock_pool_construct or cl_qlock_pool_init must be called before +* using this function. +* +* SEE ALSO +* Quick Locking Pool, cl_qlock_pool_put +*********/ + + +/****f* Component Library: Quick Locking Pool/cl_qlock_pool_put +* NAME +* cl_qlock_pool_put +* +* DESCRIPTION +* Returns an object to the pool. +* +* SYNOPSIS +*/ +static inline void +cl_qlock_pool_put( + IN cl_qlock_pool_t* const p_pool, + IN cl_pool_item_t* const p_item ) +{ + cl_spinlock_acquire( &p_pool->lock ); + cl_qpool_put( &p_pool->pool, p_item ); + cl_spinlock_release( &p_pool->lock ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to an cl_qlock_pool_t object. +* +* p_item +* [in] Pointer to the cl_pool_item_t in an object that was previously +* retrieved from the pool. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* The cl_qlock_pool_construct or cl_qlock_pool_init must be called before +* using this function. +* +* SEE ALSO +* Quick Locking Pool, cl_qlock_pool_get +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_QLOCKPOOL_H_ */ diff --git a/branches/Ndi/inc/complib/cl_qmap.h b/branches/Ndi/inc/complib/cl_qmap.h new file mode 100644 index 00000000..4981b4e9 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_qmap.h @@ -0,0 +1,973 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of quick map, a binary tree where the caller always provides + * all necessary storage. + * + * Environment: + * All + */ + + +#ifndef _CL_QMAP_H_ +#define _CL_QMAP_H_ + + +#include +#include + + +/****h* Component Library/Quick Map +* NAME +* Quick Map +* +* DESCRIPTION +* Quick map implements a binary tree that stores user provided cl_map_item_t +* structures. Each item stored in a quick map has a unique 64-bit key +* (duplicates are not allowed). Quick map provides the ability to +* efficiently search for an item given a key. +* +* Quick map does not allocate any memory, and can therefore not fail +* any operations due to insufficient memory. Quick map can thus be useful +* in minimizing the error paths in code. +* +* Quick map is not thread safe, and users must provide serialization when +* adding and removing items from the map. +* +* The quick map functions operate on a cl_qmap_t structure which should be +* treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_qmap_t, cl_map_item_t, cl_map_obj_t +* +* Callbacks: +* cl_pfn_qmap_apply_t +* +* Item Manipulation: +* cl_qmap_set_obj, cl_qmap_obj, cl_qmap_key +* +* Initialization: +* cl_qmap_init +* +* Iteration: +* cl_qmap_end, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_prev +* +* Manipulation: +* cl_qmap_insert, cl_qmap_get, cl_qmap_remove_item, cl_qmap_remove, +* cl_qmap_remove_all, cl_qmap_merge, cl_qmap_delta +* +* Search: +* cl_qmap_apply_func +* +* Attributes: +* cl_qmap_count, cl_is_qmap_empty, +*********/ + + +/****s* Component Library: Quick Map/cl_map_item_t +* NAME +* cl_map_item_t +* +* DESCRIPTION +* The cl_map_item_t structure is used by maps to store objects. +* +* The cl_map_item_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_map_item +{ + /* Must be first to allow casting. */ + cl_pool_item_t pool_item; + struct _cl_map_item *p_left; + struct _cl_map_item *p_right; + struct _cl_map_item *p_up; + cl_map_color_t color; + uint64_t key; +#ifdef _DEBUG_ + struct _cl_qmap *p_map; +#endif + +} cl_map_item_t; +/* +* FIELDS +* pool_item +* Used to store the item in a doubly linked list, allowing more +* efficient map traversal. +* +* p_left +* Pointer to the map item that is a child to the left of the node. +* +* p_right +* Pointer to the map item that is a child to the right of the node. +* +* p_up +* Pointer to the map item that is the parent of the node. +* +* p_nil +* Pointer to the map's NIL item, used as a terminator for leaves. +* The NIL sentinel is in the cl_qmap_t structure. +* +* color +* Indicates whether a node is red or black in the map. +* +* key +* Value that uniquely represents a node in a map. This value is set by +* calling cl_qmap_insert and can be retrieved by calling cl_qmap_key. +* +* NOTES +* None of the fields of this structure should be manipulated by users, as +* they are crititcal to the proper operation of the map in which they +* are stored. +* +* To allow storing items in either a quick list, a quick pool, or a quick +* map, the map implementation guarantees that the map item can be safely +* cast to a pool item used for storing an object in a quick pool, or cast to +* a list item used for storing an object in a quick list. This removes the +* need to embed a map item, a list item, and a pool item in objects that need +* to be stored in a quick list, a quick pool, and a quick map. +* +* SEE ALSO +* Quick Map, cl_qmap_insert, cl_qmap_key, cl_pool_item_t, cl_list_item_t +*********/ + + +/****s* Component Library: Quick Map/cl_map_obj_t +* NAME +* cl_map_obj_t +* +* DESCRIPTION +* The cl_map_obj_t structure is used to store objects in maps. +* +* The cl_map_obj_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_map_obj +{ + cl_map_item_t item; + const void *p_object; + +} cl_map_obj_t; +/* +* FIELDS +* item +* Map item used by internally by the map to store an object. +* +* p_object +* User defined context. Users should not access this field directly. +* Use cl_qmap_set_obj and cl_qmap_obj to set and retrieve the value +* of this field. +* +* NOTES +* None of the fields of this structure should be manipulated by users, as +* they are crititcal to the proper operation of the map in which they +* are stored. +* +* Use cl_qmap_set_obj and cl_qmap_obj to set and retrieve the object +* stored in a map item, respectively. +* +* SEE ALSO +* Quick Map, cl_qmap_set_obj, cl_qmap_obj, cl_map_item_t +*********/ + + +/****s* Component Library: Quick Map/cl_qmap_t +* NAME +* cl_qmap_t +* +* DESCRIPTION +* Quick map structure. +* +* The cl_qmap_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_qmap +{ + cl_map_item_t root; + cl_map_item_t nil; + cl_state_t state; + size_t count; + +} cl_qmap_t; +/* +* PARAMETERS +* root +* Map item that serves as root of the map. The root is set up to +* always have itself as parent. The left pointer is set to point to +* the item at the root. +* +* nil +* Map item that serves as terminator for all leaves, as well as providing +* the list item used as quick list for storing map items in a list for +* faster traversal. +* +* state +* State of the map, used to verify that operations are permitted. +* +* count +* Number of items in the map. +* +* SEE ALSO +* Quick Map +*********/ + + +/****d* Component Library: Quick Map/cl_pfn_qmap_apply_t +* NAME +* cl_pfn_qmap_apply_t +* +* DESCRIPTION +* The cl_pfn_qmap_apply_t function type defines the prototype for functions +* used to iterate items in a quick map. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_qmap_apply_t)( + IN cl_map_item_t* const p_map_item, + IN void* context ); +/* +* PARAMETERS +* p_map_item +* [in] Pointer to a cl_map_item_t structure. +* +* context +* [in] Value passed to the callback function. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_qmap_apply_func +* function. +* +* SEE ALSO +* Quick Map, cl_qmap_apply_func +*********/ + + +#ifdef __cplusplus +extern "C" { +#endif + + +/****f* Component Library: Quick Map/cl_qmap_count +* NAME +* cl_qmap_count +* +* DESCRIPTION +* The cl_qmap_count function returns the number of items stored +* in a quick map. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_qmap_count( + IN const cl_qmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + return( p_map->count ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure whose item count to return. +* +* RETURN VALUE +* Returns the number of items stored in the map. +* +* SEE ALSO +* Quick Map, cl_is_qmap_empty +*********/ + + +/****f* Component Library: Quick Map/cl_is_qmap_empty +* NAME +* cl_is_qmap_empty +* +* DESCRIPTION +* The cl_is_qmap_empty function returns whether a quick map is empty. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_qmap_empty( + IN const cl_qmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + return( p_map->count == 0 ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure to test for emptiness. +* +* RETURN VALUES +* TRUE if the quick map is empty. +* +* FALSE otherwise. +* +* SEE ALSO +* Quick Map, cl_qmap_count, cl_qmap_remove_all +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_set_obj +* NAME +* cl_qmap_set_obj +* +* DESCRIPTION +* The cl_qmap_set_obj function sets the object stored in a map object. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qmap_set_obj( + IN cl_map_obj_t* const p_map_obj, + IN const void* const p_object ) +{ + CL_ASSERT( p_map_obj ); + p_map_obj->p_object = p_object; +} +/* +* PARAMETERS +* p_map_obj +* [in] Pointer to a map object stucture whose object pointer +* is to be set. +* +* p_object +* [in] User defined context. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Quick Map, cl_qmap_obj +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_obj +* NAME +* cl_qmap_obj +* +* DESCRIPTION +* The cl_qmap_obj function returns the object stored in a map object. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_qmap_obj( + IN const cl_map_obj_t* const p_map_obj ) +{ + CL_ASSERT( p_map_obj ); + return( (void*)p_map_obj->p_object ); +} +/* +* PARAMETERS +* p_map_obj +* [in] Pointer to a map object stucture whose object pointer to return. +* +* RETURN VALUE +* Returns the value of the object pointer stored in the map object. +* +* SEE ALSO +* Quick Map, cl_qmap_set_obj +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_key +* NAME +* cl_qmap_key +* +* DESCRIPTION +* The cl_qmap_key function retrieves the key value of a map item. +* +* SYNOPSIS +*/ +CL_INLINE uint64_t CL_API +cl_qmap_key( + IN const cl_map_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( p_item->key ); +} +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose key value to return. +* +* RETURN VALUE +* Returns the 64-bit key value for the specified map item. +* +* NOTES +* The key value is set in a call to cl_qmap_insert. +* +* SEE ALSO +* Quick Map, cl_qmap_insert +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_init +* NAME +* cl_qmap_init +* +* DESCRIPTION +* The cl_qmap_init function initialized a quick map for use. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qmap_init( + IN cl_qmap_t* const p_map ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure to initialize. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Allows calling quick map manipulation functions. +* +* SEE ALSO +* Quick Map, cl_qmap_insert, cl_qmap_remove +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_end +* NAME +* cl_qmap_end +* +* DESCRIPTION +* The cl_qmap_end function returns the end of a quick map. +* +* SYNOPSIS +*/ +CL_INLINE const cl_map_item_t* const CL_API +cl_qmap_end( + IN const cl_qmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + /* Nil is the end of the map. */ + return( &p_map->nil ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure whose end to return. +* +* RETURN VALUE +* Pointer to the end of the map. +* +* NOTES +* cl_qmap_end is useful for determining the validity of map items returned +* by cl_qmap_head, cl_qmap_tail, cl_qmap_next, or cl_qmap_prev. If the map +* item pointer returned by any of these functions compares to the end, the +* end of the map was encoutered. +* When using cl_qmap_head or cl_qmap_tail, this condition indicates that +* the map is empty. +* +* SEE ALSO +* Quick Map, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_prev +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_head +* NAME +* cl_qmap_head +* +* DESCRIPTION +* The cl_qmap_head function returns the map item with the lowest key +* value stored in a quick map. +* +* SYNOPSIS +*/ +CL_INLINE cl_map_item_t* CL_API +cl_qmap_head( + IN const cl_qmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + return( (cl_map_item_t*)p_map->nil.pool_item.list_item.p_next ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure whose item with the lowest key +* is returned. +* +* RETURN VALUES +* Pointer to the map item with the lowest key in the quick map. +* +* Pointer to the map end if the quick map was empty. +* +* NOTES +* cl_qmap_head does not remove the item from the map. +* +* SEE ALSO +* Quick Map, cl_qmap_tail, cl_qmap_next, cl_qmap_prev, cl_qmap_end, +* cl_qmap_item_t +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_tail +* NAME +* cl_qmap_tail +* +* DESCRIPTION +* The cl_qmap_tail function returns the map item with the highest key +* value stored in a quick map. +* +* SYNOPSIS +*/ +CL_INLINE cl_map_item_t* CL_API +cl_qmap_tail( + IN const cl_qmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + return( (cl_map_item_t*)p_map->nil.pool_item.list_item.p_prev ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure whose item with the highest key +* is returned. +* +* RETURN VALUES +* Pointer to the map item with the highest key in the quick map. +* +* Pointer to the map end if the quick map was empty. +* +* NOTES +* cl_qmap_end does not remove the item from the map. +* +* SEE ALSO +* Quick Map, cl_qmap_head, cl_qmap_next, cl_qmap_prev, cl_qmap_end, +* cl_qmap_item_t +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_next +* NAME +* cl_qmap_next +* +* DESCRIPTION +* The cl_qmap_next function returns the map item with the next higher +* key value than a specified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_map_item_t* CL_API +cl_qmap_next( + IN const cl_map_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( (cl_map_item_t*)p_item->pool_item.list_item.p_next ); +} +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose successor to return. +* +* RETURN VALUES +* Pointer to the map item with the next higher key value in a quick map. +* +* Pointer to the map end if the specified item was the last item in +* the quick map. +* +* SEE ALSO +* Quick Map, cl_qmap_head, cl_qmap_tail, cl_qmap_prev, cl_qmap_end, +* cl_map_item_t +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_prev +* NAME +* cl_qmap_prev +* +* DESCRIPTION +* The cl_qmap_prev function returns the map item with the next lower +* key value than a precified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_map_item_t* CL_API +cl_qmap_prev( + IN const cl_map_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( (cl_map_item_t*)p_item->pool_item.list_item.p_prev ); +} +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose predecessor to return. +* +* RETURN VALUES +* Pointer to the map item with the next lower key value in a quick map. +* +* Pointer to the map end if the specifid item was the first item in +* the quick map. +* +* SEE ALSO +* Quick Map, cl_qmap_head, cl_qmap_tail, cl_qmap_next, cl_qmap_end, +* cl_map_item_t +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_insert +* NAME +* cl_qmap_insert +* +* DESCRIPTION +* The cl_qmap_insert function inserts a map item into a quick map. +* +* SYNOPSIS +*/ +CL_EXPORT cl_map_item_t* CL_API +cl_qmap_insert( + IN cl_qmap_t* const p_map, + IN const uint64_t key, + IN cl_map_item_t* const p_item ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure into which to add the item. +* +* key +* [in] Value to assign to the item. +* +* p_item +* [in] Pointer to a cl_map_item_t stucture to insert into the quick map. +* +* RETURN VALUE +* Pointer to the item in the map with the specified key. If insertion +* was successful, this is the pointer to the item. If an item with the +* specified key already exists in the map, the pointer to that item is +* returned. +* +* NOTES +* Insertion operations may cause the quick map to rebalance. +* +* SEE ALSO +* Quick Map, cl_qmap_remove, cl_map_item_t +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_get +* NAME +* cl_qmap_get +* +* DESCRIPTION +* The cl_qmap_get function returns the map item associated with a key. +* +* SYNOPSIS +*/ +CL_EXPORT cl_map_item_t* CL_API +cl_qmap_get( + IN const cl_qmap_t* const p_map, + IN const uint64_t key ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure from which to retrieve the +* item with the specified key. +* +* key +* [in] Key value used to search for the desired map item. +* +* RETURN VALUES +* Pointer to the map item with the desired key value. +* +* Pointer to the map end if there was no item with the desired key value +* stored in the quick map. +* +* NOTES +* cl_qmap_get does not remove the item from the quick map. +* +* SEE ALSO +* Quick Map, cl_qmap_remove +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_remove_item +* NAME +* cl_qmap_remove_item +* +* DESCRIPTION +* The cl_qmap_remove_item function removes the specified map item +* from a quick map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qmap_remove_item( + IN cl_qmap_t* const p_map, + IN cl_map_item_t* const p_item ); +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item to remove from its quick map. +* +* RETURN VALUES +* This function does not return a value. +* +* In a debug build, cl_qmap_remove_item asserts that the item being removed +* is in the specified map. +* +* NOTES +* Removes the map item pointed to by p_item from its quick map. +* +* SEE ALSO +* Quick Map, cl_qmap_remove, cl_qmap_remove_all, cl_qmap_insert +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_remove +* NAME +* cl_qmap_remove +* +* DESCRIPTION +* The cl_qmap_remove function removes the map item with the specified key +* from a quick map. +* +* SYNOPSIS +*/ +CL_EXPORT cl_map_item_t* CL_API +cl_qmap_remove( + IN cl_qmap_t* const p_map, + IN const uint64_t key ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure from which to remove the item +* with the specified key. +* +* key +* [in] Key value used to search for the map item to remove. +* +* RETURN VALUES +* Pointer to the removed map item if it was found. +* +* Pointer to the map end if no item with the specified key exists in the +* quick map. +* +* SEE ALSO +* Quick Map, cl_qmap_remove_item, cl_qmap_remove_all, cl_qmap_insert +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_remove_all +* NAME +* cl_qmap_remove_all +* +* DESCRIPTION +* The cl_qmap_remove_all function removes all items in a quick map, +* leaving it empty. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qmap_remove_all( + IN cl_qmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + p_map->root.p_left = &p_map->nil; + p_map->nil.pool_item.list_item.p_next = &p_map->nil.pool_item.list_item; + p_map->nil.pool_item.list_item.p_prev = &p_map->nil.pool_item.list_item; + p_map->count = 0; +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure to empty. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* Quick Map, cl_qmap_remove, cl_qmap_remove_item +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_merge +* NAME +* cl_qmap_merge +* +* DESCRIPTION +* The cl_qmap_merge function moves all items from one map to another, +* excluding duplicates. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qmap_merge( + OUT cl_qmap_t* const p_dest_map, + IN OUT cl_qmap_t* const p_src_map ); +/* +* PARAMETERS +* p_dest_map +* [out] Pointer to a cl_qmap_t structure to which items should be added. +* +* p_src_map +* [in/out] Pointer to a cl_qmap_t structure whose items to add +* to p_dest_map. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Items are evaluated based on their keys only. +* +* Upon return from cl_qmap_merge, the quick map referenced by p_src_map +* contains all duplicate items. +* +* SEE ALSO +* Quick Map, cl_qmap_delta +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_delta +* NAME +* cl_qmap_delta +* +* DESCRIPTION +* The cl_qmap_delta function computes the differences between two maps. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qmap_delta( + IN OUT cl_qmap_t* const p_map1, + IN OUT cl_qmap_t* const p_map2, + OUT cl_qmap_t* const p_new, + OUT cl_qmap_t* const p_old ); +/* +* PARAMETERS +* p_map1 +* [in/out] Pointer to the first of two cl_qmap_t structures whose +* differences to compute. +* +* p_map2 +* [in/out] Pointer to the second of two cl_qmap_t structures whose +* differences to compute. +* +* p_new +* [out] Pointer to an empty cl_qmap_t structure that contains the items +* unique to p_map2 upon return from the function. +* +* p_old +* [out] Pointer to an empty cl_qmap_t structure that contains the items +* unique to p_map1 upon return from the function. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Items are evaluated based on their keys. Items that exist in both +* p_map1 and p_map2 remain in their respective maps. Items that +* exist only p_map1 are moved to p_old. Likewise, items that exist only +* in p_map2 are moved to p_new. This function can be usefull in evaluating +* changes between two maps. +* +* Both maps pointed to by p_new and p_old must be empty on input. This +* requirement removes the possibility of failures. +* +* SEE ALSO +* Quick Map, cl_qmap_merge +*********/ + + +/****f* Component Library: Quick Map/cl_qmap_apply_func +* NAME +* cl_qmap_apply_func +* +* DESCRIPTION +* The cl_qmap_apply_func function executes a specified function +* for every item stored in a quick map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qmap_apply_func( + IN const cl_qmap_t* const p_map, + IN cl_pfn_qmap_apply_t pfn_func, + IN const void* const context ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_qmap_t structure. +* +* pfn_func +* [in] Function invoked for every item in the quick map. +* See the cl_pfn_qmap_apply_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* The function provided must not perform any map operations, as these +* would corrupt the quick map. +* +* SEE ALSO +* Quick Map, cl_pfn_qmap_apply_t +*********/ + +#ifdef __cplusplus +} +#endif + + +#endif /* _CL_QMAP_H_ */ diff --git a/branches/Ndi/inc/complib/cl_qpool.h b/branches/Ndi/inc/complib/cl_qpool.h new file mode 100644 index 00000000..03b27376 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_qpool.h @@ -0,0 +1,639 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of the quick pool. + * The quick pool manages a pool of objects. + * The pool can grow to meet demand, limited only by system memory. + * + * Environment: + * All + */ + + +#ifndef _CL_QUICK_POOL_H_ +#define _CL_QUICK_POOL_H_ + + +#include + + +/****h* Component Library/Quick Pool +* NAME +* Quick Pool +* +* DESCRIPTION +* The quick pool provides a self-contained and self-sustaining pool +* of user defined objects. +* +* To aid in object oriented design, the quick pool provides the user +* the ability to specify callbacks that are invoked for each object for +* construction, initialization, and destruction. Constructor and destructor +* callback functions may not fail. +* +* A quick pool does not return memory to the system as the user returns +* objects to the pool. The only method of returning memory to the system is +* to destroy the pool. +* +* The quick pool operates on cl_pool_item_t structures that describe +* objects. This can provides for more efficient memory use and operation. +* If using a cl_pool_item_t is not desired, the Pool provides similar +* functionality but operates on opaque objects. +* +* The quick pool functions operates on a cl_qpool_t structure which should +* be treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_qpool_t, cl_pool_item_t +* +* Callbacks: +* cl_pfn_qpool_init_t, cl_pfn_qpool_dtor_t +* +* Initialization/Destruction: +* cl_qpool_construct, cl_qpool_init, cl_qpool_destroy +* +* Manipulation: +* cl_qpool_get, cl_qpool_put, cl_qpool_put_list, cl_qpool_grow +* +* Attributes: +* cl_is_qpool_inited, cl_qpool_count +*********/ + + +/****d* Component Library: Quick Pool/cl_pfn_qpool_init_t +* NAME +* cl_pfn_qpool_init_t +* +* DESCRIPTION +* The cl_pfn_qpool_init_t function type defines the prototype for +* functions used as constructor for objects being allocated by a +* quick pool. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_qpool_init_t)( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); +/* +* PARAMETERS +* p_object +* [in] Pointer to an object to initialize. +* +* context +* [in] Context provided in a call to cl_qpool_init. +* +* RETURN VALUES +* Return CL_SUCCESS to indicate that initialization of the object +* was successful and that initialization of further objects may continue. +* +* Other cl_status_t values will be returned by cl_qcpool_init +* and cl_qcpool_grow. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by the user as an optional parameter to the +* cl_qpool_init function. +* +* The initializer is invoked once per allocated object, allowing the user +* to perform any necessary initialization. Returning a status other than +* CL_SUCCESS aborts a grow operation, initiated either through cl_qcpool_init +* or cl_qcpool_grow, causing the initiating function to fail. +* Any non-CL_SUCCESS status will be returned by the function that initiated +* the grow operation. +* +* All memory for the object is pre-allocated. Users should include space in +* their objects for the cl_pool_item_t structure that will represent the +* object to avoid having to allocate that structure in the initialization +* callback. +* +* When later performing a cl_qcpool_get call, the return value is a pointer +* to the cl_pool_item_t returned by this function in the pp_pool_item +* parameter. Users must set pp_pool_item to a valid pointer to the +* cl_pool_item_t representing the object if they return CL_SUCCESS. +* +* SEE ALSO +* Quick Pool, cl_qpool_init +*********/ + + +/****d* Component Library: Quick Pool/cl_pfn_qpool_dtor_t +* NAME +* cl_pfn_qpool_dtor_t +* +* DESCRIPTION +* The cl_pfn_qpool_dtor_t function type defines the prototype for +* functions used as destructor for objects being deallocated by a +* quick pool. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_qpool_dtor_t)( + IN const cl_pool_item_t* const p_pool_item, + IN void* context ); +/* +* PARAMETERS +* p_pool_item +* [in] Pointer to a cl_pool_item_t structure representing an object. +* +* context +* [in] Context provided in a call to cl_qpool_init. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by the user as an optional parameter to the +* cl_qpool_init function. +* +* The destructor is invoked once per allocated object, allowing the user +* to perform any necessary cleanup. Users should not attempt to deallocate +* the memory for the object, as the quick pool manages object +* allocation and deallocation. +* +* SEE ALSO +* Quick Pool, cl_qpool_init +*********/ + + +/****s* Component Library: Quick Pool/cl_qpool_t +* NAME +* cl_qpool_t +* +* DESCRIPTION +* Quick pool structure. +* +* The cl_qpool_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_qpool +{ + cl_qcpool_t qcpool; + cl_pfn_qpool_init_t pfn_init; + cl_pfn_qpool_dtor_t pfn_dtor; + const void *context; + +} cl_qpool_t; +/* +* FIELDS +* qcpool +* Quick composite pool that manages all objects. +* +* pfn_init +* Pointer to the user's initializer callback, used by the pool +* to translate the quick composite pool's initializer callback to +* a quick pool initializer callback. +* +* pfn_dtor +* Pointer to the user's destructor callback, used by the pool +* to translate the quick composite pool's destructor callback to +* a quick pool destructor callback. +* +* context +* User's provided context for callback functions, used by the pool +* to when invoking callbacks. +* +* SEE ALSO +* Quick Pool +*********/ + + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +/****f* Component Library: Quick Pool/cl_qpool_construct +* NAME +* cl_qpool_construct +* +* DESCRIPTION +* The cl_qpool_construct function constructs a quick pool. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_qpool_construct( + IN cl_qpool_t* const p_pool ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qpool_t structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_qpool_init, cl_qpool_destroy, cl_is_qpool_inited. +* +* Calling cl_qpool_construct is a prerequisite to calling any other +* quick pool function except cl_pool_init. +* +* SEE ALSO +* Quick Pool, cl_qpool_init, cl_qpool_destroy, cl_is_qpool_inited. +*********/ + + +/****f* Component Library: Quick Pool/cl_is_qpool_inited +* NAME +* cl_is_qpool_inited +* +* DESCRIPTION +* The cl_is_qpool_inited function returns whether a quick pool was +* successfully initialized. +* +* SYNOPSIS +*/ +CL_INLINE uint32_t CL_API +cl_is_qpool_inited( + IN const cl_qpool_t* const p_pool ) +{ + /* CL_ASSERT that a non-null pointer is provided. */ + CL_ASSERT( p_pool ); + return( cl_is_qcpool_inited( &p_pool->qcpool ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qpool_t structure whose initialization state +* to check. +* +* RETURN VALUES +* TRUE if the quick pool was initialized successfully. +* +* FALSE otherwise. +* +* NOTES +* Allows checking the state of a quick pool to determine if +* invoking member functions is appropriate. +* +* SEE ALSO +* Quick Pool +*********/ + + +/****f* Component Library: Quick Pool/cl_qpool_init +* NAME +* cl_qpool_init +* +* DESCRIPTION +* The cl_qpool_init function initializes a quick pool for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_qpool_init( + IN cl_qpool_t* const p_pool, + IN const size_t min_size, + IN const size_t max_size, + IN const size_t grow_size, + IN const size_t object_size, + IN cl_pfn_qpool_init_t pfn_initializer OPTIONAL, + IN cl_pfn_qpool_dtor_t pfn_destructor OPTIONAL, + IN const void* const context ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qpool_t structure to initialize. +* +* min_size +* [in] Minimum number of objects that the pool should support. All +* necessary allocations to allow storing the minimum number of items +* are performed at initialization time, and all necessary callbacks +* successfully invoked. +* +* max_size +* [in] Maximum number of objects to which the pool is allowed to grow. +* A value of zero specifies no maximum. +* +* grow_size +* [in] Number of objects to allocate when incrementally growing the pool. +* A value of zero disables automatic growth. +* +* object_size +* [in] Size, in bytes, of each object. +* +* pfn_initializer +* [in] Initialization callback to invoke for every new object when +* growing the pool. This parameter is optional and may be NULL. If NULL, +* the pool assumes the cl_pool_item_t structure describing objects is +* located at the head of each object. See the cl_pfn_qpool_init_t +* function type declaration for details about the callback function. +* +* pfn_destructor +* [in] Destructor callback to invoke for every object before memory for +* that object is freed. This parameter is optional and may be NULL. +* See the cl_pfn_qpool_dtor_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUES +* CL_SUCCESS if the quick pool was initialized successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize the +* quick pool. +* +* CL_INVALID_SETTING if a the maximum size is non-zero and less than the +* minimum size. +* +* Other cl_status_t value returned by optional initialization callback function +* specified by the pfn_initializer parameter. +* +* NOTES +* cl_qpool_init initializes, and if necessary, grows the pool to +* the capacity desired. +* +* SEE ALSO +* Quick Pool, cl_qpool_construct, cl_qpool_destroy, +* cl_qpool_get, cl_qpool_put, cl_qpool_grow, +* cl_qpool_count, cl_pfn_qpool_init_t, cl_pfn_qpool_init_t, +* cl_pfn_qpool_dtor_t +*********/ + + +/****f* Component Library: Quick Pool/cl_qpool_destroy +* NAME +* cl_qpool_destroy +* +* DESCRIPTION +* The cl_qpool_destroy function destroys a quick pool. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qpool_destroy( + IN cl_qpool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + cl_qcpool_destroy( &p_pool->qcpool ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qpool_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* All memory allocated for objects is freed. The destructor callback, +* if any, will be invoked for every allocated object. Further operations +* on the pool should not be attempted after cl_qpool_destroy +* is invoked. +* +* This function should only be called after a call to +* cl_qpool_construct or cl_qpool_init. +* +* In a debug build, cl_qpool_destroy asserts that all objects are in +* the pool. +* +* SEE ALSO +* Quick Pool, cl_qpool_construct, cl_qpool_init +*********/ + + +/****f* Component Library: Quick Pool/cl_qpool_count +* NAME +* cl_qpool_count +* +* DESCRIPTION +* The cl_qpool_count function returns the number of available objects +* in a quick pool. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_qpool_count( + IN cl_qpool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + return( cl_qcpool_count( &p_pool->qcpool ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qpool_t structure for which the number of +* available objects is requested. +* +* RETURN VALUE +* Returns the number of objects available in the specified quick pool. +* +* SEE ALSO +* Quick Pool +*********/ + + +/****f* Component Library: Quick Pool/cl_qpool_get +* NAME +* cl_qpool_get +* +* DESCRIPTION +* The cl_qpool_get function retrieves an object from a +* quick pool. +* +* SYNOPSIS +*/ +CL_INLINE cl_pool_item_t* CL_API +cl_qpool_get( + IN cl_qpool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + return( cl_qcpool_get( &p_pool->qcpool ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qpool_t structure from which to retrieve +* an object. +* +* RETURN VALUES +* Returns a pointer to a cl_pool_item_t for an object. +* +* Returns NULL if the pool is empty and can not be grown automatically. +* +* NOTES +* cl_qpool_get returns the object at the head of the pool. If the pool is +* empty, it is automatically grown to accommodate this request unless the +* grow_size parameter passed to the cl_qpool_init function was zero. +* +* SEE ALSO +* Quick Pool, cl_qpool_get_tail, cl_qpool_put, cl_qpool_grow, cl_qpool_count +*********/ + + +/****f* Component Library: Quick Pool/cl_qpool_put +* NAME +* cl_qpool_put +* +* DESCRIPTION +* The cl_qpool_put function returns an object to the head of a quick pool. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qpool_put( + IN cl_qpool_t* const p_pool, + IN cl_pool_item_t* const p_pool_item ) +{ + CL_ASSERT( p_pool ); + cl_qcpool_put( &p_pool->qcpool, p_pool_item ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qpool_t structure to which to return +* an object. +* +* p_pool_item +* [in] Pointer to a cl_pool_item_t structure for the object +* being returned. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_qpool_put places the returned object at the head of the pool. +* +* The object specified by the p_pool_item parameter must have been +* retrieved from the pool by a previous call to cl_qpool_get. +* +* SEE ALSO +* Quick Pool, cl_qpool_put_tail, cl_qpool_get +*********/ + + +/****f* Component Library: Quick Pool/cl_qpool_put_list +* NAME +* cl_qpool_put_list +* +* DESCRIPTION +* The cl_qpool_put_list function returns a list of objects to the head +* of a quick pool. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_qpool_put_list( + IN cl_qpool_t* const p_pool, + IN cl_qlist_t* const p_list ) +{ + CL_ASSERT( p_pool ); + cl_qcpool_put_list( &p_pool->qcpool, p_list ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qpool_t structure to which to return +* a list of objects. +* +* p_list +* [in] Pointer to a cl_qlist_t structure for the list of objects +* being returned. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_qpool_put_list places the returned objects at the head of the pool. +* +* The objects in the list specified by the p_list parameter must have been +* retrieved from the pool by a previous call to cl_qpool_get. +* +* SEE ALSO +* Quick Pool, cl_qpool_put, cl_qpool_put_tail, cl_qpool_get +*********/ + + +/****f* Component Library: Quick Pool/cl_qpool_grow +* NAME +* cl_qpool_grow +* +* DESCRIPTION +* The cl_qpool_grow function grows a quick pool by +* the specified number of objects. +* +* SYNOPSIS +*/ +CL_INLINE cl_status_t CL_API +cl_qpool_grow( + IN cl_qpool_t* const p_pool, + IN const size_t obj_count ) +{ + CL_ASSERT( p_pool ); + return( cl_qcpool_grow( &p_pool->qcpool, obj_count ) ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to a cl_qpool_t structure whose capacity to grow. +* +* obj_count +* [in] Number of objects by which to grow the pool. +* +* RETURN VALUES +* CL_SUCCESS if the quick pool grew successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to grow the +* quick pool. +* +* cl_status_t value returned by optional initialization callback function +* specified by the pfn_initializer parameter passed to the +* cl_qpool_init function. +* +* NOTES +* It is not necessary to call cl_qpool_grow if the pool is +* configured to grow automatically. +* +* SEE ALSO +* Quick Pool +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + + +#endif /* _CL_QUICK_POOL_H_ */ diff --git a/branches/Ndi/inc/complib/cl_rbmap.h b/branches/Ndi/inc/complib/cl_rbmap.h new file mode 100644 index 00000000..7e73fb42 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_rbmap.h @@ -0,0 +1,593 @@ +/*++ +Copyright © InfiniCon Systems, Inc. All rights reserved. + +THIS SOFTWARE IS PROVIDED BY INFINICON SYSTEMS, INC. ("INFINICON") TO EACH +PERSON OR COMPANY ("RECIPIENT") ON AN "AS IS" BASIS. ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL INFINICON BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; +OR BUSINESS INTERRUPTION) HOWEVER CAUSED OR ON ANY THEORY OF LIABILITY, +WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED +OF THE POSSIBILITY OF SUCH DAMAGE. + +Any agreements between InfiniCon and the Recipient shall apply to Recipient's +use of the Software. +--*/ + + +/* + * Abstract: + * Declaration of primitive red/black map, a red/black tree where the caller + * always provides all necessary storage. + * + * This tree implementation exposes functions required for the client to + * manually walk the map, allowing clients to implement various methods + * of comparisson. + * + * Environment: + * All + * + * $Revision$ + */ + + +#ifndef _CL_RBMAP_H_ +#define _CL_RBMAP_H_ + + +#include + + +/****h* Component Library/RB Map +* NAME +* RB Map +* +* DESCRIPTION +* RB map implements a binary tree that stores user provided cl_rbmap_item_t +* structures. Each item stored in a RB map has a unique key +* (duplicates are not allowed). RB map provides the ability to +* efficiently search for an item given a key. +* +* RB map does not allocate any memory, and can therefore not fail +* any operations due to insufficient memory. RB map can thus be useful +* in minimizing the error paths in code. +* +* RB map is not thread safe, and users must provide serialization when +* adding and removing items from the map. +* +* The RB map functions operate on a cl_rbmap_t structure which should be +* treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_rbmap_t, cl_rbmap_item_t +* +* Initialization: +* cl_rbmap_init +* +* Iteration: +* cl_rbmap_root, cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up +* +* Manipulation: +* cl_rbmap_insert, cl_rbmap_get, cl_rbmap_remove_item, cl_rbmap_remove, +* cl_rbmap_reset, cl_rbmap_merge, cl_rbmap_delta +* +* Search: +* cl_rbmap_apply_func +* +* Attributes: +* cl_rbmap_count, cl_is_rbmap_empty, +*********/ + + +/****i* Component Library: RB Map/cl_map_color_t +* NAME +* cl_map_color_t +* +* DESCRIPTION +* The cl_map_color_t enumerated type is used to note the color of +* nodes in a map. +* +* SYNOPSIS +*/ +typedef enum _cl_map_color +{ + CL_MAP_RED, + CL_MAP_BLACK + +} cl_map_color_t; +/* +* VALUES +* CL_MAP_RED +* The node in the map is red. +* +* CL_MAP_BLACK +* The node in the map is black. +* +* SEE ALSO +* RB Map, cl_rbmap_item_t +*********/ + + +/****s* Component Library: RB Map/cl_rbmap_item_t +* NAME +* cl_rbmap_item_t +* +* DESCRIPTION +* The cl_rbmap_item_t structure is used by maps to store objects. +* +* The cl_rbmap_item_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_rbmap_item +{ + struct _cl_rbmap_item *p_left; + struct _cl_rbmap_item *p_right; + struct _cl_rbmap_item *p_up; + cl_map_color_t color; +#ifdef _DEBUG_ + struct _cl_rbmap *p_map; +#endif + +} cl_rbmap_item_t; +/* +* FIELDS +* p_left +* Pointer to the map item that is a child to the left of the node. +* +* p_right +* Pointer to the map item that is a child to the right of the node. +* +* p_up +* Pointer to the map item that is the parent of the node. +* +* color +* Indicates whether a node is red or black in the map. +* +* NOTES +* None of the fields of this structure should be manipulated by users, as +* they are crititcal to the proper operation of the map in which they +* are stored. +* +* To allow storing items in either a quick list, a quick pool, or a quick +* map, the map implementation guarantees that the map item can be safely +* cast to a pool item used for storing an object in a quick pool, or cast to +* a list item used for storing an object in a quick list. This removes the +* need to embed a map item, a list item, and a pool item in objects that need +* to be stored in a quick list, a quick pool, and a RB map. +* +* SEE ALSO +* RB Map, cl_rbmap_insert, cl_rbmap_key, cl_pool_item_t, cl_list_item_t +*********/ + + +/****s* Component Library: RB Map/cl_rbmap_t +* NAME +* cl_rbmap_t +* +* DESCRIPTION +* Quick map structure. +* +* The cl_rbmap_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_rbmap +{ + cl_rbmap_item_t root; + cl_rbmap_item_t nil; + cl_state_t state; + size_t count; + +} cl_rbmap_t; +/* +* PARAMETERS +* root +* Map item that serves as root of the map. The root is set up to +* always have itself as parent. The left pointer is set to point to +* the item at the root. +* +* nil +* Map item that serves as terminator for all leaves, as well as providing +* the list item used as quick list for storing map items in a list for +* faster traversal. +* +* state +* State of the map, used to verify that operations are permitted. +* +* count +* Number of items in the map. +* +* SEE ALSO +* RB Map +*********/ + + +#ifdef __cplusplus +extern "C" { +#endif + + +/****f* Component Library: RB Map/cl_rbmap_count +* NAME +* cl_rbmap_count +* +* DESCRIPTION +* The cl_rbmap_count function returns the number of items stored +* in a RB map. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_rbmap_count( + IN const cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + return( p_map->count ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure whose item count to return. +* +* RETURN VALUE +* Returns the number of items stored in the map. +* +* SEE ALSO +* RB Map, cl_is_rbmap_empty +*********/ + + +/****f* Component Library: RB Map/cl_is_rbmap_empty +* NAME +* cl_is_rbmap_empty +* +* DESCRIPTION +* The cl_is_rbmap_empty function returns whether a RB map is empty. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_rbmap_empty( + IN const cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + return( p_map->count == 0 ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure to test for emptiness. +* +* RETURN VALUES +* TRUE if the RB map is empty. +* +* FALSE otherwise. +* +* SEE ALSO +* RB Map, cl_rbmap_count, cl_rbmap_reset +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_reset +* NAME +* cl_rbmap_reset +* +* DESCRIPTION +* The cl_rbmap_reset function removes all items in a RB map, +* leaving it empty. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_rbmap_reset( + IN cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + + p_map->root.p_left = &p_map->nil; + p_map->count = 0; +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure to empty. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* RB Map, cl_rbmap_remove, cl_rbmap_remove_item +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_init +* NAME +* cl_rbmap_init +* +* DESCRIPTION +* The cl_rbmap_init function initialized a RB map for use. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_rbmap_init( + IN cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + + /* special setup for the root node */ + p_map->root.p_left = &p_map->nil; + p_map->root.p_right = &p_map->nil; + p_map->root.p_up = &p_map->root; + p_map->root.color = CL_MAP_BLACK; + + /* Setup the node used as terminator for all leaves. */ + p_map->nil.p_left = &p_map->nil; + p_map->nil.p_right = &p_map->nil; + p_map->nil.p_up = &p_map->nil; + p_map->nil.color = CL_MAP_BLACK; + +#ifdef _DEBUG_ + p_map->root.p_map = p_map; + p_map->nil.p_map = p_map; +#endif + + p_map->state = CL_INITIALIZED; + + p_map->count = 0; +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure to initialize. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Allows calling RB map manipulation functions. +* +* SEE ALSO +* RB Map, cl_rbmap_insert, cl_rbmap_remove +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_root +* NAME +* cl_rbmap_root +* +* DESCRIPTION +* The cl_rbmap_root function returns the root of a RB map. +* +* SYNOPSIS +*/ +CL_INLINE cl_rbmap_item_t* const CL_API +cl_rbmap_root( + IN const cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + return( p_map->root.p_left ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure whose root to return. +* +* RETURN VALUE +* Pointer to the end of the map. +* +* NOTES +* cl_rbmap_end is useful for determining the validity of map items returned +* by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev. If the map +* item pointer returned by any of these functions compares to the end, the +* end of the map was encoutered. +* When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that +* the map is empty. +* +* SEE ALSO +* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev, +* cl_rbmap_end, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_end +* NAME +* cl_rbmap_end +* +* DESCRIPTION +* The cl_rbmap_end function returns the end of a RB map. +* +* SYNOPSIS +*/ +CL_INLINE const cl_rbmap_item_t* const CL_API +cl_rbmap_end( + IN const cl_rbmap_t* const p_map ) +{ + CL_ASSERT( p_map ); + CL_ASSERT( p_map->state == CL_INITIALIZED ); + /* Nil is the end of the map. */ + return( &p_map->nil ); +} +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure whose end to return. +* +* RETURN VALUE +* Pointer to the end of the map. +* +* NOTES +* cl_rbmap_end is useful for determining the validity of map items returned +* by cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, or cl_rbmap_prev. If the map +* item pointer returned by any of these functions compares to the end, the +* end of the map was encoutered. +* When using cl_rbmap_head or cl_rbmap_tail, this condition indicates that +* the map is empty. +* +* SEE ALSO +* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_prev +* cl_rbmap_root, cl_rbmap_left, cl_rbmap_right, cl_rbmap_up +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_left +* NAME +* cl_rbmap_left +* +* DESCRIPTION +* The cl_rbmap_left function returns the map item to the left +* of the specified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_rbmap_item_t* CL_API +cl_rbmap_left( + IN const cl_rbmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( (cl_rbmap_item_t*)p_item->p_left ); +} +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose predecessor to return. +* +* RETURN VALUES +* Pointer to the map item to the left in a RB map. +* +* Pointer to the map end if no item is to the left. +* +* SEE ALSO +* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end, +* cl_rbmap_item_t +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_right +* NAME +* cl_rbmap_right +* +* DESCRIPTION +* The cl_rbmap_right function returns the map item to the right +* of the specified map item. +* +* SYNOPSIS +*/ +CL_INLINE cl_rbmap_item_t* CL_API +cl_rbmap_right( + IN const cl_rbmap_item_t* const p_item ) +{ + CL_ASSERT( p_item ); + return( (cl_rbmap_item_t*)p_item->p_right ); +} +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item whose predecessor to return. +* +* RETURN VALUES +* Pointer to the map item to the right in a RB map. +* +* Pointer to the map end if no item is to the right. +* +* SEE ALSO +* RB Map, cl_rbmap_head, cl_rbmap_tail, cl_rbmap_next, cl_rbmap_end, +* cl_rbmap_item_t +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_insert +* NAME +* cl_rbmap_insert +* +* DESCRIPTION +* The cl_rbmap_insert function inserts a map item into a RB map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_rbmap_insert( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_insert_at, + IN cl_rbmap_item_t* const p_item, + IN boolean_t left ); +/* +* PARAMETERS +* p_map +* [in] Pointer to a cl_rbmap_t structure into which to add the item. +* +* p_insert_at +* [in] Pointer to a cl_rbmap_item_t structure to serve as parent +* to p_item. +* +* p_item +* [in] Pointer to a cl_rbmap_item_t stucture to insert into the RB map. +* +* left +* [in] Indicates that p_item should be inserted to the left of p_insert_at. +* +* RETURN VALUE +* Pointer to the item in the map with the specified key. If insertion +* was successful, this is the pointer to the item. If an item with the +* specified key already exists in the map, the pointer to that item is +* returned. +* +* NOTES +* Insertion operations may cause the RB map to rebalance. +* +* SEE ALSO +* RB Map, cl_rbmap_remove, cl_rbmap_item_t +*********/ + + +/****f* Component Library: RB Map/cl_rbmap_remove_item +* NAME +* cl_rbmap_remove_item +* +* DESCRIPTION +* The cl_rbmap_remove_item function removes the specified map item +* from a RB map. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_rbmap_remove_item( + IN cl_rbmap_t* const p_map, + IN cl_rbmap_item_t* const p_item ); +/* +* PARAMETERS +* p_item +* [in] Pointer to a map item to remove from its RB map. +* +* RETURN VALUES +* This function does not return a value. +* +* In a debug build, cl_rbmap_remove_item asserts that the item being removed +* is in the specified map. +* +* NOTES +* Removes the map item pointed to by p_item from its RB map. +* +* SEE ALSO +* RB Map, cl_rbmap_remove, cl_rbmap_reset, cl_rbmap_insert +*********/ + + +#ifdef __cplusplus +} +#endif + + +#endif /* _CL_RBMAP_H_ */ diff --git a/branches/Ndi/inc/complib/cl_reqmgr.h b/branches/Ndi/inc/complib/cl_reqmgr.h new file mode 100644 index 00000000..7077e13b --- /dev/null +++ b/branches/Ndi/inc/complib/cl_reqmgr.h @@ -0,0 +1,481 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of asynchronous request manager. The request manager does + * not return resources, only notifies the user when resources are available. + * + * Environment: + * All + */ + + +#ifndef _CL_REQ_MGR_H_ +#define _CL_REQ_MGR_H_ + + +#include + + +/****h* Component Library/Request Manager +* NAME +* Request Manager +* +* DESCRIPTION +* The Request Manager manages synchronous as well as asynchronous +* requests for objects. +* +* Request manager does not supply the objects, but merely returns whether +* objects are available to satisfy requests. This allows users to use +* various sources for objects. +* +* While the request manager manages synchronous and asynchronous requests +* for objects, it does not itself operate asynchronously. Instead, the +* cl_req_mgr_resume function returns information for resuming asynchronous +* requests. If a call to cl_req_mgr_resume returns CL_SUCCESS, additional +* requests may be able to resume. It is recommended that users flush +* pending requests by calling cl_req_mgr_resume while CL_SUCCESS is returned. +* +* The request manager functions operates on a cl_req_mgr_t structure which +* should be treated as opaque and should be manipulated only through the +* provided functions. +* +* SEE ALSO +* Types: +* cl_req_type_t +* +* Structures: +* cl_req_mgr_t +* +* Callbacks: +* cl_pfn_req_cb_t, cl_pfn_reqmgr_get_count_t +* +* Initialization/Destruction: +* cl_req_mgr_construct, cl_req_mgr_init, cl_req_mgr_destroy +* +* Manipulation: +* cl_req_mgr_get, cl_req_mgr_resume +* +* Attributes: +* cl_is_req_mgr_inited, cl_req_mgr_count +*********/ + + +/****d* Component Library: Request Manager/cl_pfn_req_cb_t +* NAME +* cl_pfn_req_cb_t +* +* DESCRIPTION +* The cl_pfn_req_cb_t function type defines the prototype for functions +* used to store a function pointer to a user defined function. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_req_cb_t)( void ); +/* +* PARAMETERS +* This function does not take parameters. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Function pointers specified by this parameter do not have to match the +* defined syntax, as these callbacks are never invoked directly by the +* request manager. When specifying a function with a different prototype, +* cast the function pointer to this type. +* +* SEE ALSO +* Request Manager, cl_req_mgr_get, cl_req_mgr_resume +*********/ + + +/****d* Component Library: Request Manager/cl_req_type_t +* NAME +* cl_req_type_t +* +* DESCRIPTION +* The cl_req_type_t enumerated type describes the type of request. +* +* SYNOPSIS +*/ +typedef enum _cl_req_type +{ + REQ_GET_SYNC, + REQ_GET_ASYNC, + REQ_GET_PARTIAL_OK + +} cl_req_type_t; +/* +* VALUES +* REQ_GET_SYNC +* Synchronous request. +* +* REQ_GET_ASYNC +* Asynchronous requests for which all objects are required at once. +* +* REQ_GET_PARTIAL_OK +* Asynchronous requests that may be broken into multiple smaller requests. +* +* SEE ALSO +* Request Manager, cl_req_mgr_get +*********/ + + +/****d* Component Library: Request Manager/cl_pfn_reqmgr_get_count_t +* NAME +* cl_pfn_reqmgr_get_count_t +* +* DESCRIPTION +* The cl_pfn_reqmgr_get_count_t function type defines the prototype for +* functions used to retrieve the number of available objects in a pool. +* +* SYNOPSIS +*/ +typedef size_t +(CL_API *cl_pfn_reqmgr_get_count_t)( + IN void* context ); +/* +* PARAMETERS +* Context +* [in] Context provided in a call to cl_req_mgr_init by +* the get_context parameter. +* +* RETURN VALUE +* Returns the number of objects available in an object pool for which +* requests are managed by a request manager. +* +* NOTES +* This function type is provided as function prototype reference for the +* function passed into cl_req_mgr_init. This function is invoked by the +* request manager when trying to fulfill requests for resources, either +* through a call to cl_req_mgr_get or cl_req_mgr_resume. +* +* SEE ALSO +* Request Manager, cl_req_mgr_init, cl_req_mgr_get, cl_req_mgr_resume +*********/ + + +/****s* Component Library: Request Manager/cl_req_mgr_t +* NAME +* cl_req_mgr_t +* +* DESCRIPTION +* Quick composite pool structure. +* +* The cl_req_mgr_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_req_mgr +{ + cl_pfn_reqmgr_get_count_t pfn_get_count; + const void *get_context; + cl_qlist_t request_queue; + cl_qpool_t request_pool; + +} cl_req_mgr_t; +/* +* FIELDS +* pfn_get_count +* Pointer to the count callback function. +* +* get_context +* Context to pass as single parameter to count callback. +* +* request_queue +* Pending requests for elements. +* +* request_pool +* Pool of request structures for storing requests in the request queue. +* +* SEE ALSO +* Request Manager +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Request Manager/cl_req_mgr_construct +* NAME +* cl_req_mgr_construct +* +* DESCRIPTION +* The cl_req_mgr_construct function constructs a request manager. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_req_mgr_construct( + IN cl_req_mgr_t* const p_req_mgr ); +/* +* PARAMETERS +* p_req_mgr +* [in] Pointer to a cl_req_mgr_t structure to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_req_mgr_construct allows calling cl_req_mgr_destroy without first +* calling cl_req_mgr_init. +* +* Calling cl_req_mgr_construct is a prerequisite to calling any other +* request manager function except cl_req_mgr_init. +* +* SEE ALSO +* Request Manager, cl_req_mgr_init, cl_req_mgr_destroy +*********/ + + +/****f* Component Library: Request Manager/cl_req_mgr_init +* NAME +* cl_req_mgr_init +* +* DESCRIPTION +* The cl_req_mgr_init function initializes a request manager for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_req_mgr_init( + IN cl_req_mgr_t* const p_req_mgr, + IN cl_pfn_reqmgr_get_count_t pfn_get_count, + IN const void* const get_context ); +/* +* PARAMETERS +* p_req_mgr +* [in] Pointer to a cl_req_mgr_t structure to initialize. +* +* pfn_get_count +* [in] Callback function invoked by the request manager to get the +* number of objects available in a pool of objects for which the +* request manager is managing requests. +* See the cl_pfn_req_mgr_get_count_t function type declaration for +* details about the callback function. +* +* get_context +* [in] Context to pass into the function specified by the +* pfn_get_count parameter. +* +* RETURN VALUES +* CL_SUCCESS if the request manager was successfully initialized. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to initialize +* the request manager. +* +* SEE ALSO +* Request Manager, cl_req_mgr_construct, cl_req_mgr_destroy, cl_req_mgr_get, +* cl_req_mgr_resume, cl_pfn_req_mgr_get_count_t +*********/ + + +/****f* Component Library: Request Manager/cl_req_mgr_destroy +* NAME +* cl_req_mgr_destroy +* +* DESCRIPTION +* The cl_req_mgr_destroy function destroys a request manager. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_req_mgr_destroy( + IN cl_req_mgr_t* const p_req_mgr ); +/* +* PARAMETERS +* p_req_mgr +* [in] Pointer to a cl_req_mgr_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_req_mgr_destroy frees all memory allocated by the request manager. +* Further operations on the request manager should not be attempted. +* +* This function should only be called after a call to cl_req_mgr_construct +* or cl_req_mgr_init. +* +* SEE ALSO +* Request Manager, cl_req_mgr_construct, cl_req_mgr_init +*********/ + + +/****f* Component Library: Request Manager/cl_req_mgr_get +* NAME +* cl_req_mgr_get +* +* DESCRIPTION +* The cl_req_mgr_get function handles synchronous and asynchronous +* requests for objects. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_req_mgr_get( + IN cl_req_mgr_t* const p_req_mgr, + IN OUT size_t* const p_count, + IN const cl_req_type_t req_type, + IN cl_pfn_req_cb_t pfn_callback, + IN const void* const context1, + IN const void* const context2 ); +/* +* PARAMETERS +* p_req_mgr +* [in] Pointer to a cl_req_mgr_t structure from which to check +* for resources. +* +* p_count +* [in/out] On input, contains the number of objects requested. +* On output, contains the number of objects available. +* +* req_type +* [in] Enumerated type describing the type of request. Valid values are: +* ReqGetSync +* Synchronous request. +* ReqGetAsync +* Asynchronous requests for which all objects are required at +* once. +* ReqGetAsyncPartialOk +* Asynchronous requests that may be broken into multiple smaller +* requests. +* +* pfn_callback +* [in] Pointer to a callback function for use by the caller. This +* callback function is never invoked by the request manager. +* +* context1 +* [in] First of two contexts for a resource request. +* +* context2 +* [in] Second of two contexts for a resource request. +* +* RETURN VALUES +* CL_SUCCESS if all objects requested are available. +* +* CL_PENDING if the request could not be completed in its entirety. +* The p_count parameter contains the number of objects immediately available. +* +* CL_INSUFFICIENT_RESOURCES if the request could not be completed due to +* insufficient objects being available. +* +* CL_INSUFFICIENT_MEMORY if the request failed due to a lack of system memory. +* +* NOTES +* Upon successful completion of this function, the p_count parameter contains +* the number of objects available. +* +* Synchronous requests fail if there are any asynchronous requests pending, +* or if there are not enough resources to immediately satisfy the request in +* its entirety . +* +* Asynchronous requests fail if there is insufficient system memory to +* queue them. +* +* Once an asynchronous request is queued, use cl_req_mgr_resume to retrieve +* information for resuming queued requests. +* +* SEE ALSO +* Request Manager, cl_req_mgr_resume +*********/ + + +/****f* Component Library: Request Manager/cl_req_mgr_resume +* NAME +* cl_req_mgr_resume +* +* DESCRIPTION +* The cl_req_mgr_resume function attempts to resume queued requests. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_req_mgr_resume( + IN cl_req_mgr_t* const p_req_mgr, + OUT size_t* const p_count, + OUT cl_pfn_req_cb_t* const ppfn_callback, + OUT const void** const p_context1, + OUT const void** const p_context2 ); +/* +* PARAMETERS +* p_req_mgr +* [in] Pointer to a cl_req_mgr_t structure from which to resume requests. +* +* p_count +* [out] Contains the number of objects available for a resuming request. +* +* ppfn_callback +* [out] Contains the pfn_callback value for the resuming request, as +* provided to the call to the cl_req_mgr_get function. +* +* p_context1 +* [out] Contains the context1 value for the resuming request, as provided +* to the call to the cl_req_mgr_get function. +* +* p_context2 +* [out] Contains the context2 value for the resuming request, as provided +* to the call to the cl_req_mgr_get function. +* +* RETURN VALUES +* CL_SUCCESS if a request was completed. +* +* CL_PENDING if a request was continued, but not completed. +* +* CL_INSUFFICIENT_RESOURCES if a request could not be continued due to +* a lack of resources. +* +* CL_NOT_DONE if there were no pending requests. +* +* NOTES +* cl_req_mgr_resume resumes at most one request. Further requests may be +* able to be resumed if this call returns CL_SUCCESS. +* +* SEE ALSO +* Request Manager, cl_req_mgr_get +*********/ + + +#ifdef __cplusplus +} +#endif + + +#endif /* _CL_REQ_MGR_H_ */ diff --git a/branches/Ndi/inc/complib/cl_spinlock.h b/branches/Ndi/inc/complib/cl_spinlock.h new file mode 100644 index 00000000..8dd45e1e --- /dev/null +++ b/branches/Ndi/inc/complib/cl_spinlock.h @@ -0,0 +1,238 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of spin lock object. + * + * Environment: + * All + */ + + +#ifndef _CL_SPINLOCK_H_ +#define _CL_SPINLOCK_H_ + + +#include + + +/****h* Component Library/Spinlock +* NAME +* Spinlock +* +* DESCRIPTION +* Spinlock provides synchronization between threads for exclusive access to +* a resource. +* +* The spinlock functions manipulate a cl_spinlock_t structure which should +* be treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_spinlock_t +* +* Initialization: +* cl_spinlock_construct, cl_spinlock_init, cl_spinlock_destroy +* +* Manipulation +* cl_spinlock_acquire, cl_spinlock_release +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Spinlock/cl_spinlock_construct +* NAME +* cl_spinlock_construct +* +* DESCRIPTION +* The cl_spinlock_construct function initializes the state of a +* spin lock. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_spinlock_construct( + IN cl_spinlock_t* const p_spinlock ); +/* +* PARAMETERS +* p_spinlock +* [in] Pointer to a spin lock structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_spinlock_destroy without first calling +* cl_spinlock_init. +* +* Calling cl_spinlock_construct is a prerequisite to calling any other +* spin lock function except cl_spinlock_init. +* +* SEE ALSO +* Spinlock, cl_spinlock_init, cl_spinlock_destroy +*********/ + + +/****f* Component Library: Spinlock/cl_spinlock_init +* NAME +* cl_spinlock_init +* +* DESCRIPTION +* The cl_spinlock_init function initializes a spin lock for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_spinlock_init( + IN cl_spinlock_t* const p_spinlock ); +/* +* PARAMETERS +* p_spinlock +* [in] Pointer to a spin lock structure to initialize. +* +* RETURN VALUES +* CL_SUCCESS if initialization succeeded. +* +* CL_ERROR if initialization failed. Callers should call +* cl_spinlock_destroy to clean up any resources allocated during +* initialization. +* +* NOTES +* Initialize the spin lock structure. Allows calling cl_spinlock_aquire +* and cl_spinlock_release. +* +* SEE ALSO +* Spinlock, cl_spinlock_construct, cl_spinlock_destroy, +* cl_spinlock_acquire, cl_spinlock_release +*********/ + + +/****f* Component Library: Spinlock/cl_spinlock_destroy +* NAME +* cl_spinlock_destroy +* +* DESCRIPTION +* The cl_spinlock_destroy function performs all necessary cleanup of a +* spin lock. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_spinlock_destroy( + IN cl_spinlock_t* const p_spinlock ); +/* +* PARAMETERS +* p_spinlock +* [in] Pointer to a spin lock structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of a spin lock. This function must only +* be called if either cl_spinlock_construct or cl_spinlock_init has been +* called. +* +* SEE ALSO +* Spinlock, cl_spinlock_construct, cl_spinlock_init +*********/ + + +/****f* Component Library: Spinlock/cl_spinlock_acquire +* NAME +* cl_spinlock_acquire +* +* DESCRIPTION +* The cl_spinlock_acquire function acquires a spin lock. +* This version of lock does not prevent an interrupt from +* occuring on the processor on which the code is being +* executed. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_spinlock_acquire( + IN cl_spinlock_t* const p_spinlock ); +/* +* PARAMETERS +* p_spinlock +* [in] Pointer to a spin lock structure to acquire. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Spinlock, cl_spinlock_release +*********/ + + +/****f* Component Library: Spinlock/cl_spinlock_release +* NAME +* cl_spinlock_release +* +* DESCRIPTION +* The cl_spinlock_release function releases a spin lock object. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_spinlock_release( + IN cl_spinlock_t* const p_spinlock ); +/* +* PARAMETERS +* p_spinlock +* [in] Pointer to a spin lock structure to release. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Releases a spin lock after a call to cl_spinlock_acquire. +* +* SEE ALSO +* Spinlock, cl_spinlock_acquire +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_SPINLOCK_H_ */ diff --git a/branches/Ndi/inc/complib/cl_syscallback.h b/branches/Ndi/inc/complib/cl_syscallback.h new file mode 100644 index 00000000..09678969 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_syscallback.h @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * System Callback abstractions. + * + * Environment: + * All + */ + + +#ifndef _CL_SYS_CALLBACK_H_ +#define _CL_SYS_CALLBACK_H_ + + +#include + +/****h* Component Library/System Callback +* NAME +* System Callback +* +* DESCRIPTION +* The System Callback provider uses threads from a system thread-pool to +* invoke specified callback functions. +* +* Callbacks can be queued in a low- or high-priority queue for processing. +* +* cl_thread_suspend and cl_thread_stall can be used to delay or stall the +* callback thread. +* +* Environments that do not have a native system thread-pool emulate this +* functionality to provide cross-environment support. +* +* The cl_sys_callback_item_t structure should be treated as opaque and be +* manipulated only through the provided functions. +*********/ + + +/****d* Component Library: System Callback/cl_pfn_sys_callback_t +* NAME +* cl_pfn_sys_callback_t +* +* DESCRIPTION +* The cl_pfn_sys_callback_t function type defines the prototype for +* functions invoked by the system callback provider. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_sys_callback_t)( + IN void* get_context, + IN void* queue_context ); +/* +* PARAMETERS +* get_context +* [in] Value of the get_context parameter specified in a call +* to cl_sys_callback_get. +* +* queue_context +* [in] Value of the queue_context parameter specified in a call +* to cl_sys_callback_queue. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by users as a parameter to the +* cl_sys_callback_queue function. +* +* SEE ALSO +* System Callback, cl_sys_callback_queue +*********/ + + +/* Include platform specific system callback support. */ +#include + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****i* Component Library: System Callback/cl_sys_callback_construct +* NAME +* cl_sys_callback_construct +* +* DESCRIPTION +* The cl_sys_callback_construct function is called to initialize the state +* of the system callback provider. +* +* SYNOPSIS +*/ +void +__cl_sys_callback_construct( void ); +/* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function is called internally when initializing the component +* library for use. Users should never call this function directly. +* +* Calling cl_sys_callback_construct is a prerequisite to calling any other +* system callback function. +* +* Allows calling cl_sys_callback_init, cl_sys_callback_destroy, and +* cl_is_sys_callback_inited. +* +* SEE ALSO +* System Callback, cl_sys_callback_init, cl_sys_callback_destroy, +* cl_is_sys_callback_inited +*********/ + + +/****f* Component Library: System Callback/cl_is_sys_callback_inited +* NAME +* cl_is_sys_callback_inited +* +* DESCRIPTION +* The cl_is_sys_callback_inited function returns whether the system +* callback provider was initialized successfully +* +* SYNOPSIS +*/ +boolean_t +__cl_is_sys_callback_inited( void ); +/* +* RETURN VALUES +* TRUE if the system callback provider was initialized successfully. +* +* FALSE otherwise. +* +* NOTES +* Allows checking the state of the system callback provider to determine +* if invoking member functions is appropriate. +* +* SEE ALSO +* System Callback +*********/ + + +/****i* Component Library: System Callback/cl_sys_callback_init +* NAME +* cl_sys_callback_init +* +* DESCRIPTION +* The cl_sys_callback_init function is called to initialize the system +* callback provider. +* +* SYNOPSIS +*/ +cl_status_t +__cl_sys_callback_init( void ); +/* +* RETURN VALUES +* CL_SUCCESS if the system callback provider was initialized successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to inititalize +* the system callback provider. +* +* CL_ERROR if the system callback provider's threads could not be created. +* +* NOTES +* This function is called internally when initializing the component +* library for use. Users should never call this function directly. +* +* SEE ALSO +* System Callback, cl_sys_callback_construct, cl_sys_callback_destroy +*********/ + + +/****i* Component Library: System Callback/cl_sys_callback_destroy +* NAME +* cl_sys_callback_destroy +* +* DESCRIPTION +* The cl_sys_callback_destroy function is called to destroy the system +* callback provider. +* +* SYNOPSIS +*/ +void +__cl_sys_callback_destroy( void ); +/* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function is called internally when destroying the component +* library after use. Users should never call this function directly. +* +* All threads and resources allocated by the system callback provider +* are freed. +* +* This function should only be called after calling either +* cl_sys_callback_construct or cl_sys_callback_construct. +* +* SEE ALSO +* System Callback, cl_sys_callback_construct, cl_sys_callback_construct +*********/ + + +/****f* Component Library: System Callback/cl_sys_callback_get +* NAME +* cl_sys_callback_get +* +* DESCRIPTION +* The cl_sys_callback_get function retrieves a system callback item. +* +* SYNOPSIS +*/ +CL_EXPORT cl_sys_callback_item_t* CL_API +cl_sys_callback_get( + IN const void* const get_context ); +/* +* PARAMETERS +* get_context +* [in] Context value to pass into the callback function. +* +* RETURN VALUES +* Returns a pointer to a system callback item if successful. +* +* Returns NULL if the call fails. +* +* NOTES +* A system callback item must be released with a call to cl_sys_callback_put. +* +* Care must be taken to prevent a system callback item from being returned +* to the pool while it is queued. Callers of cl_sys_callback_queue must not +* return the system callback item to the pool until their callback has been +* invoked. +* +* In Windows 2000 Kernel Mode, the get_context is a pointer to the device +* object for which the system callback is being used. +* +* SEE ALSO +* System Callback, SysCallbackPut, SysCallbackQueue +*********/ + + +/****f* Component Library: System Callback/cl_sys_callback_put +* NAME +* cl_sys_callback_put +* +* DESCRIPTION +* The cl_sys_callback_put function releases the specified +* system callback item. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_sys_callback_put( + IN cl_sys_callback_item_t* const p_item ); +/* +* PARAMETERS +* p_item +* [in] Pointer to a system callback item to release. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* The p_item parameter points to a system callback item returned by +* a previous call to cl_sys_callback_get. +* +* The specified system callback item must not be queued when making +* a call to this function. This function can, however, be called +* from the callback function. +* +* SEE ALSO +* System Callback, cl_sys_callback_get, cl_sys_callback_queue +*********/ + + +/****f* Component Library: System Callback/cl_sys_callback_queue +* NAME +* cl_sys_callback_queue +* +* DESCRIPTION +* The cl_sys_callback_queue function queues the specified system callback item +* for execution. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_sys_callback_queue( + IN cl_sys_callback_item_t* const p_item, + IN cl_pfn_sys_callback_t pfn_callback, + IN const void* const queue_context, + IN const boolean_t high_priority ); +/* +* PARAMETERS +* p_item +* [in] Pointer to a system callback item. +* +* pfn_callback +* [in] Pointer to a function to be invoked by the system callback module. +* See the cl_pfn_sys_callback_t function type definition for details +* about the callback function. +* +* queue_context +* [in] Value passed to the system callback function. +* +* high_priority +* [in] Specifies whether the request should be queued in the high- or +* low-priority queue. +* +* RETURN VALUES +* CL_SUCCESS if the system callback item was successfully queued. +* +* CL_ERROR otherwise. +* +* NOTES +* A thread from the system thread pool will invoke the specified callback +* function with the get_context value specified in the call to +* cl_sys_callback_get and the specified context as parameters. +* +* The high priority queue is processed before the low priority queue. There +* is no fairness algorithm implemented for removing items from the queues. +* +* Care should be taken to only queue a given system callback item once +* at a time. +* +* SEE ALSO +* System Callback, cl_sys_callback_get, cl_pfn_sys_callback_t +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* _CL_SYS_CALLBACK_H_ */ diff --git a/branches/Ndi/inc/complib/cl_thread.h b/branches/Ndi/inc/complib/cl_thread.h new file mode 100644 index 00000000..aef5c9c6 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_thread.h @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of thread abstraction and thread related operations. + * + * Environment: + * All + */ + + +#ifndef _CL_THREAD_H_ +#define _CL_THREAD_H_ + + +#include + + +/****i* Component Library/Thread +* NAME +* Thread +* +* DESCRIPTION +* The Thread provides a separate thread of execution. +* +* The cl_thread_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +*********/ + + +/****d* Component Library: Thread/cl_pfn_thread_callback_t +* NAME +* cl_pfn_thread_callback_t +* +* DESCRIPTION +* The cl_pfn_thread_callback_t function type defines the prototype +* for functions invoked by thread objects +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_thread_callback_t)( + IN void* context ); +/* +* PARAMETERS +* context +* [in] Value specified in a call to cl_thread_init or +* cl_thread_pool_create. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for +* the function provided by users as a parameter to the cl_thread_init +* and cl_thread_pool_create functions. +* +* SEE ALSO +* Thread Pool +*********/ + + +/****i* Component Library: Thread/cl_thread_t +* NAME +* cl_thread_t +* +* DESCRIPTION +* Thread structure. +* +* The cl_thread_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_thread +{ + cl_thread_osd_t osd; + cl_pfn_thread_callback_t pfn_callback; + const void *context; + char name[16]; + +} cl_thread_t; +/* +* FIELDS +* osd +* Implementation specific structure for managing thread information. +* +* pfn_callback +* Callback function for the thread to invoke. +* +* context +* Context to pass to the thread callback function. +* +* name +* Name to assign to the thread. +* +* SEE ALSO +* Thread +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****i* Component Library: Thread/cl_thread_construct +* NAME +* cl_thread_construct +* +* DESCRIPTION +* The cl_thread_construct function initializes the state of a thread. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_thread_construct( + IN cl_thread_t* const p_thread ); +/* +* PARAMETERS +* p_thread +* [in] Pointer to a cl_thread_t structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_thread_destroy without first calling cl_thread_init. +* +* Calling cl_thread_construct is a prerequisite to calling any other +* thread function except cl_thread_init. +* +* SEE ALSO +* Thread, cl_thread_init, cl_thread_destroy +*********/ + + +/****i* Component Library: Thread/cl_thread_init +* NAME +* cl_thread_init +* +* DESCRIPTION +* The cl_thread_init function creates a new thread of execution. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_thread_init( + IN cl_thread_t* const p_thread, + IN cl_pfn_thread_callback_t pfn_callback, + IN const void* const context, + IN const char* const name ); +/* +* PARAMETERS +* p_thread +* [in] Pointer to a cl_thread_t structure to initialize. +* +* pfn_callback +* [in] Address of a function to be invoked by a thread. +* See the cl_pfn_thread_callback_t function type definition for +* details about the callback function. +* +* context +* [in] Value to pass to the callback function. +* +* name +* [in] Name to associate with the thread. The name may be up to 16 +* characters, including a terminating null character. +* +* RETURN VALUES +* CL_SUCCESS if thread creation succeeded. +* +* CL_ERROR if thread creation failed. +* +* NOTES +* The thread created with cl_thread_init will invoke the callback +* specified by the callback parameter with context as single parameter. +* +* The callback function is invoked once, and the thread exits when the +* callback returns. +* +* It is invalid to call cl_thread_destroy from the callback function, +* as doing so will result in a deadlock. +* +* SEE ALSO +* Thread, cl_thread_construct, cl_thread_destroy, cl_thread_suspend, +* cl_thread_stall, cl_pfn_thread_callback_t +*********/ + + +/****i* Component Library: Thread/cl_thread_destroy +* NAME +* cl_thread_destroy +* +* DESCRIPTION +* The cl_thread_destroy function performs any necessary cleanup to free +* resources associated with the specified thread. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_thread_destroy( + IN cl_thread_t* const p_thread ); +/* +* PARAMETERS +* p_thread +* [in] Pointer to a cl_thread_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function blocks until the thread exits and must not be called by the +* thread itself. Callers must therefore ensure that such a blocking call is +* possible from the context of the call. +* +* This function must only be called after a call to cl_thread_construct or +* cl_thread_init. +* +* SEE ALSO +* Thread, cl_thread_construct, cl_thread_init +*********/ + + +/****f* Component Library: Thread/cl_thread_suspend +* NAME +* cl_thread_suspend +* +* DESCRIPTION +* The cl_thread_suspend function suspends the calling thread for a minimum +* of the specified number of milliseconds. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_thread_suspend( + IN const uint32_t pause_ms ); +/* +* PARAMETERS +* pause_ms +* [in] Number of milliseconds to suspend the calling thread. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function should only be called if it is valid for the caller's thread +* to enter a wait state. For stalling a thread that cannot enter a wait +* state, callers should use cl_thread_stall. +* +* SEE ALSO +* Thread, cl_thread_stall +*********/ + + +/****f* Component Library: Thread/cl_thread_stall +* NAME +* cl_thread_stall +* +* DESCRIPTION +* The cl_thread_stall function stalls the calling thread for a minimum of +* the specified number of microseconds. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_thread_stall( + IN const uint32_t pause_us ); +/* +* PARAMETERS +* pause_us +* [in] Number of microseconds to stall the calling thread. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* The cl_thread_stall function performs a busy wait for the specified +* number of microseconds. Care should be taken when using this function as +* it does not relinquish its quantum of operation. For longer wait +* operations, users should call cl_thread_suspend if possible. +* +* SEE ALSO +* Thread, cl_thread_suspend +*********/ + + +/****f* Component Library: Thread/cl_proc_count +* NAME +* cl_proc_count +* +* DESCRIPTION +* The cl_proc_count function returns the number of processors in the system. +* +* SYNOPSIS +*/ +CL_EXPORT uint32_t CL_API +cl_proc_count( void ); +/* +* RETURN VALUE +* Returns the number of processors in the system. +*********/ + + +/****i* Component Library: Thread/cl_is_current_thread +* NAME +* cl_is_current_thread +* +* DESCRIPTION +* The cl_is_current_thread function compares the calling thread to the +* specified thread and returns whether they are the same. +* +* SYNOPSIS +*/ +CL_EXPORT boolean_t CL_API +cl_is_current_thread( + IN const cl_thread_t* const p_thread ); +/* +* PARAMETERS +* p_thread +* [in] Pointer to a cl_thread_t structure to compare to the +* caller's thead. +* +* RETURN VALUES +* TRUE if the thread specified by the p_thread parameter is the +* calling thread. +* +* FALSE otherwise. +* +* SEE ALSO +* Thread, cl_threadinit_t +*********/ + + +/****f* Component Library: Thread/cl_is_blockable +* NAME +* cl_is_blockable +* +* DESCRIPTION +* The cl_is_blockable indicates if the current caller context is +* blockable. +* +* SYNOPSIS +*/ +CL_EXPORT boolean_t CL_API +cl_is_blockable( void ); +/* +* RETURN VALUE +* TRUE if the caller's thread context can be blocked, i.e it is safe +* to perform a sleep, or call a down operation on a semaphore. +* +* FALSE otherwise +* +* SEE ALSO +* Thread +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* _CL_THREAD_H_ */ diff --git a/branches/Ndi/inc/complib/cl_threadpool.h b/branches/Ndi/inc/complib/cl_threadpool.h new file mode 100644 index 00000000..b4c0089b --- /dev/null +++ b/branches/Ndi/inc/complib/cl_threadpool.h @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of thread pool. + * + * Environment: + * All + */ + + +#ifndef _CL_THREAD_POOL_H_ +#define _CL_THREAD_POOL_H_ + + +#include +#include +#include + + +/****h* Component Library/Thread Pool +* NAME +* Thread Pool +* +* DESCRIPTION +* The Thread Pool manages a user specified number of threads. +* +* Each thread in the thread pool waits for a user initiated signal before +* invoking a user specified callback function. All threads in the thread +* pool invoke the same callback function. +* +* The thread pool functions operate on a cl_thread_pool_t structure which +* should be treated as opaque, and should be manipulated only through the +* provided functions. +* +* SEE ALSO +* Structures: +* cl_thread_pool_t +* +* Initialization: +* cl_thread_pool_construct, cl_thread_pool_init, cl_thread_pool_destroy +* +* Manipulation +* cl_thread_pool_signal +*********/ + + +/****s* Component Library: Thread Pool/cl_thread_pool_t +* NAME +* cl_thread_pool_t +* +* DESCRIPTION +* Thread pool structure. +* +* The cl_thread_pool_t structure should be treated as opaque, and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_thread_pool +{ + cl_pfn_thread_callback_t pfn_callback; + const void *context; + cl_list_t thread_list; + cl_event_t wakeup_event; + cl_event_t destroy_event; + boolean_t exit; + cl_state_t state; + atomic32_t running_count; + +} cl_thread_pool_t; +/* +* FIELDS +* pfn_callback +* Callback function for the thread to invoke. +* +* context +* Context to pass to the thread callback function. +* +* thread_list +* List of threads managed by the thread pool. +* +* event +* Event used to signal threads to wake up and do work. +* +* destroy_event +* Event used to signal threads to exit. +* +* exit +* Flag used to indicates threads to exit. +* +* state +* State of the thread pool. +* +* running_count +* Number of threads running. +* +* SEE ALSO +* Thread Pool +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Thread Pool/cl_thread_pool_construct +* NAME +* cl_thread_pool_construct +* +* DESCRIPTION +* The cl_thread_pool_construct function initializes the state of a +* thread pool. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_thread_pool_construct( + IN cl_thread_pool_t* const p_thread_pool ); +/* +* PARAMETERS +* p_thread_pool +* [in] Pointer to a thread pool structure. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_thread_pool_destroy without first calling +* cl_thread_pool_init. +* +* Calling cl_thread_pool_construct is a prerequisite to calling any other +* thread pool function except cl_thread_pool_init. +* +* SEE ALSO +* Thread Pool, cl_thread_pool_init, cl_thread_pool_destroy +*********/ + + +/****f* Component Library: Thread Pool/cl_thread_pool_init +* NAME +* cl_thread_pool_init +* +* DESCRIPTION +* The cl_thread_pool_init function creates the threads to be +* managed by a thread pool. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_thread_pool_init( + IN cl_thread_pool_t* const p_thread_pool, + IN uint32_t thread_count, + IN cl_pfn_thread_callback_t pfn_callback, + IN const void* const context, + IN const char* const name ); +/* +* PARAMETERS +* p_thread_pool +* [in] Pointer to a thread pool structure to initialize. +* +* thread_count +* [in] Number of threads to be managed by the thread pool. +* +* pfn_callback +* [in] Address of a function to be invoked by a thread. +* See the cl_pfn_thread_callback_t function type definition for +* details about the callback function. +* +* context +* [in] Value to pass to the callback function. +* +* name +* [in] Name to associate with the threads. The name may be up to 16 +* characters, including a terminating null character. All threads +* created in the pool have the same name. +* +* RETURN VALUES +* CL_SUCCESS if the thread pool creation succeeded. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to inititalize +* the thread pool. +* +* CL_ERROR if the threads could not be created. +* +* NOTES +* cl_thread_pool_init creates and starts the specified number of threads. +* If thread_count is zero, the thread pool creates as many threads as there +* are processors in the system. +* +* SEE ALSO +* Thread Pool, cl_thread_pool_construct, cl_thread_pool_destroy, +* cl_thread_pool_signal, cl_pfn_thread_callback_t +*********/ + + +/****f* Component Library: Thread Pool/cl_thread_pool_destroy +* NAME +* cl_thread_pool_destroy +* +* DESCRIPTION +* The cl_thread_pool_destroy function performs any necessary cleanup +* for a thread pool. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_thread_pool_destroy( + IN cl_thread_pool_t* const p_thread_pool ); +/* +* PARAMETERS +* p_thread_pool +* [in] Pointer to a thread pool structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function blocks until all threads exit, and must therefore not +* be called from any of the thread pool's threads. Because of its blocking +* nature, callers of cl_thread_pool_destroy must ensure that entering a wait +* state is valid from the calling thread context. +* +* This function should only be called after a call to +* cl_thread_pool_construct or cl_thread_pool_init. +* +* SEE ALSO +* Thread Pool, cl_thread_pool_construct, cl_thread_pool_init +*********/ + + +/****f* Component Library: Thread Pool/cl_thread_pool_signal +* NAME +* cl_thread_pool_signal +* +* DESCRIPTION +* The cl_thread_pool_signal function signals a single thread of +* the thread pool to invoke the thread poolÂ’s callback function. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_thread_pool_signal( + IN cl_thread_pool_t* const p_thread_pool ); +/* +* PARAMETERS +* p_thread_pool +* [in] Pointer to a thread pool structure to signal. +* +* RETURN VALUES +* CL_SUCCESS if the thread pool was successfully signalled. +* +* CL_ERROR otherwise. +* +* NOTES +* Each call to this function wakes up at most one waiting thread in +* the thread pool. +* +* If all threads are running, cl_thread_pool_signal has no effect. +* +* SEE ALSO +* Thread Pool +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* _CL_THREAD_POOL_H_ */ diff --git a/branches/Ndi/inc/complib/cl_timer.h b/branches/Ndi/inc/complib/cl_timer.h new file mode 100644 index 00000000..8df14d8b --- /dev/null +++ b/branches/Ndi/inc/complib/cl_timer.h @@ -0,0 +1,446 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of timer abstraction. + * + * Environment: + * All + */ + + +#ifndef _CL_TIMER_H_ +#define _CL_TIMER_H_ + + +#include + + +/****h* Component Library/Timer +* NAME +* Timer +* +* DESCRIPTION +* The Timer provides the ability to schedule a function to be invoked at +* a given time in the future. +* +* The timer callback function must not perform any blocking operations. +* +* The timer functions operate on a cl_timer_t structure which should be +* treated as opaque and should be manipulated only through the provided +* functions. +* +* SEE ALSO +* Structures: +* cl_timer_t +* +* Callbacks: +* cl_pfn_timer_callback_t +* +* Initialization: +* cl_timer_construct, cl_timer_init, cl_timer_destroy +* +* Manipulation: +* cl_timer_start, cl_timer_stop +*********/ + + +/****d* Component Library: Timer/cl_pfn_timer_callback_t +* NAME +* cl_pfn_timer_callback_t +* +* DESCRIPTION +* The cl_pfn_timer_callback_t function type defines the prototype for +* functions used to notify users of a timer expiration. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_timer_callback_t)( + IN void* context ); +/* +* PARAMETERS +* context +* [in] Value specified in a previous call to cl_timer_init. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_timer_init function. +* +* SEE ALSO +* Timer, cl_timer_init +*********/ + + +/* + * This include file defines the timer structure, and depends on the timer + * callback definition. + */ +#include + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Timer/cl_timer_construct +* NAME +* cl_timer_construct +* +* DESCRIPTION +* The cl_timer_construct function initializes the state of a timer. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_timer_construct( + IN cl_timer_t* const p_timer ); +/* +* PARAMETERS +* p_timer +* [in] Pointer to a cl_timer_t structure whose state to initialize. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_timer_destroy without first calling cl_timer_init. +* +* Calling cl_timer_construct is a prerequisite to calling any other +* timer function except cl_timer_init. +* +* SEE ALSO +* Timer, cl_timer_init, cl_timer_destroy +*********/ + + +/****f* Component Library: Timer/cl_timer_init +* NAME +* cl_timer_init +* +* DESCRIPTION +* The cl_timer_init function initializes a timer for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_timer_init( + IN cl_timer_t* const p_timer, + IN cl_pfn_timer_callback_t pfn_callback, + IN const void* const context ); +/* +* PARAMETERS +* p_timer +* [in] Pointer to a cl_timer_t structure to initialize. +* +* pfn_callback +* [in] Address of a callback function to be invoked when a timer expires. +* See the cl_pfn_timer_callback_t function type definition for details +* about the callback function. +* +* context +* [in] Value to pass to the callback function. +* +* RETURN VALUES +* CL_SUCCESS if the timer was successfully initialized. +* +* CL_ERROR otherwise. +* +* NOTES +* Allows calling cl_timer_start and cl_timer_stop. +* +* SEE ALSO +* Timer, cl_timer_construct, cl_timer_destroy, cl_timer_start, +* cl_timer_stop, cl_pfn_timer_callback_t +*********/ + + +/****f* Component Library: Timer/cl_timer_destroy +* NAME +* cl_timer_destroy +* +* DESCRIPTION +* The cl_timer_destroy function performs any necessary cleanup of a timer. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_timer_destroy( + IN cl_timer_t* const p_timer ); +/* +* PARAMETERS +* p_timer +* [in] Pointer to a cl_timer_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_timer_destroy cancels any pending callbacks. +* +* This function should only be called after a call to cl_timer_construct +* or cl_timer_init. +* +* SEE ALSO +* Timer, cl_timer_construct, cl_timer_init +*********/ + + +/****f* Component Library: Timer/cl_timer_start +* NAME +* cl_timer_start +* +* DESCRIPTION +* The cl_timer_start function sets a timer to expire after a given interval. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_timer_start( + IN cl_timer_t* const p_timer, + IN const uint32_t time_ms ); +/* +* PARAMETERS +* p_timer +* [in] Pointer to a cl_timer_t structure to schedule. +* +* time_ms +* [in] Time, in milliseconds, before the timer should expire. +* +* RETURN VALUES +* CL_SUCCESS if the timer was successfully scheduled. +* +* CL_ERROR otherwise. +* +* NOTES +* cl_timer_start implicitly stops the timer before being scheduled. +* +* The interval specified by the time_ms parameter is a minimum interval. +* The timer is guaranteed to expire no sooner than the desired interval, but +* may take longer to expire. +* +* SEE ALSO +* Timer, cl_timer_stop, cl_timer_trim +*********/ + + +/****f* Component Library: Timer/cl_timer_stop +* NAME +* cl_timer_stop +* +* DESCRIPTION +* The cl_timer_stop function stops a pending timer from expiring. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_timer_stop( + IN cl_timer_t* const p_timer ); +/* +* PARAMETERS +* p_timer +* [in] Pointer to a cl_timer_t structure. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Timer, cl_timer_start, cl_timer_trim +*********/ + + +/****f* Component Library: Timer/cl_timer_trim +* NAME +* cl_timer_trim +* +* DESCRIPTION +* The cl_timer_trim function pulls in the absolute expiration +* time of a timer if the current expiration time exceeds the specified +* interval. +* +* sets a timer to expire after a given +* interval if that interval is less than the current timer expiration. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_timer_trim( + IN cl_timer_t* const p_timer, + IN const uint32_t time_ms ); +/* +* PARAMETERS +* p_timer +* [in] Pointer to a cl_timer_t structure to schedule. +* +* time_ms +* [in] Maximum time, in milliseconds, before the timer should expire. +* +* RETURN VALUES +* CL_SUCCESS if the timer was successfully scheduled. +* +* CL_ERROR otherwise. +* +* NOTES +* cl_timer_trim has no effect if the time interval is greater than the +* remaining time when the timer is set. +* +* If the new interval time is less than the remaining time, cl_timer_trim +* implicitly stops the timer before reseting it. +* +* If the timer is reset, it is guaranteed to expire no sooner than the +* new interval, but may take longer to expire. +* +* SEE ALSO +* Timer, cl_timer_start, cl_timer_stop +*********/ + + +/****f* Component Library: Time Stamp/cl_get_time_stamp +* NAME +* cl_get_time_stamp +* +* DESCRIPTION +* The cl_get_time_stamp function returns the current time stamp in +* microseconds since the system was booted. +* +* SYNOPSIS +*/ +CL_EXPORT uint64_t CL_API +cl_get_time_stamp( void ); +/* +* RETURN VALUE +* Time elapsed, in microseconds, since the system was booted. +* +* SEE ALSO +* Timer, cl_get_time_stamp_usec, cl_get_time_stamp_sec +*********/ + + +/****f* Component Library: Time Stamp/cl_get_time_stamp_usec +* NAME +* cl_get_time_stamp_usec +* +* DESCRIPTION +* The cl_get_time_stamp_usec function returns the current time stamp in +* microseconds since the system was booted. +* +* SYNOPSIS +*/ +CL_INLINE uint64_t CL_API +cl_get_time_stamp_usec( void ) +{ + return cl_get_time_stamp(); +} +/* +* RETURN VALUE +* Time elapsed, in microseconds, since the system was booted. +* +* SEE ALSO +* Timer, cl_get_time_stamp, cl_get_time_stamp_sec +*********/ + + +/****f* Component Library: Time Stamp/cl_get_time_stamp_sec +* NAME +* cl_get_time_stamp_sec +* +* DESCRIPTION +* The cl_get_time_stamp_sec function returns the current time stamp in +* seconds since the system was booted. +* +* SYNOPSIS +*/ +CL_EXPORT uint32_t CL_API +cl_get_time_stamp_sec( void ); +/* +* RETURN VALUE +* Time elapsed, in seconds, since the system was booted. +* +* SEE ALSO +* Timer, cl_get_time_stamp +*********/ + + +/****f* Component Library: Time Stamp/cl_get_tick_count +* NAME +* cl_get_tick_count +* +* DESCRIPTION +* The cl_get_tick_count function returns the raw high-resolution +* performance counter value. +* +* SYNOPSIS +*/ +CL_EXPORT uint64_t CL_API +cl_get_tick_count( void ); +/* +* RETURN VALUE +* Value of the high-resolution performance counter. +* +* SEE ALSO +* Timer, cl_get_time_stamp, cl_get_tick_freq +*********/ + + +/****f* Component Library: Time Stamp/cl_get_tick_freq +* NAME +* cl_get_tick_freq +* +* DESCRIPTION +* The cl_get_tick_freq function returns the frequency of the +* high-resolution performance counter. +* +* SYNOPSIS +*/ +CL_EXPORT uint64_t CL_API +cl_get_tick_freq( void ); +/* +* RETURN VALUE +* The frequency of the high-resolution performance counter. +* +* SEE ALSO +* Timer, cl_get_time_stamp, cl_get_tick_count +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_TIMER_H_ */ diff --git a/branches/Ndi/inc/complib/cl_types.h b/branches/Ndi/inc/complib/cl_types.h new file mode 100644 index 00000000..5e43f54d --- /dev/null +++ b/branches/Ndi/inc/complib/cl_types.h @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Defines standard return codes, keywords, macros, and debug levels. + * + * Environment: + * All supported platforms + */ + + +#ifndef _CL_TYPES_H_ +#define _CL_TYPES_H_ + + +#include + + +typedef uint16_t net16_t; +typedef uint32_t net32_t; +typedef uint64_t net64_t; + + + +/****d* Component Library: Pointer Manipulation/offsetof +* NAME +* offsetof +* +* DESCRIPTION +* The offsetof macro returns the offset of a member within a structure. +* +* SYNOPSIS +* uintn_t +* offsetof( +* IN TYPE, +* IN MEMBER ); +* +* PARAMETERS +* TYPE +* [in] Name of the structure containing the specified member. +* +* MEMBER +* [in] Name of the member whose offset in the specified structure +* is to be returned. +* +* RETURN VALUE +* Number of bytes from the beginning of the structure to the +* specified member. +* +* SEE ALSO +* PARENT_STRUCT +*********/ +#ifndef offsetof +#define offsetof(TYPE, MEMBER) ((uintn_t) &((TYPE *)0)->MEMBER) +#endif + + +/****d* Component Library: Pointer Manipulation/PARENT_STRUCT +* NAME +* PARENT_STRUCT +* +* DESCRIPTION +* The PARENT_STRUCT macro returns a pointer to a structure +* given a name and pointer to one of its members. +* +* SYNOPSIS +* PARENT_TYPE* +* PARENT_STRUCT( +* IN void* const p_member, +* IN PARENT_TYPE, +* IN MEMBER_NAME ); +* +* PARAMETERS +* p_member +* [in] Pointer to the MEMBER_NAME member of a PARENT_TYPE structure. +* +* PARENT_TYPE +* [in] Name of the structure containing the specified member. +* +* MEMBER_NAME +* [in] Name of the member whose address is passed in the p_member +* parameter. +* +* RETURN VALUE +* Pointer to a structure of type PARENT_TYPE whose MEMBER_NAME member is +* located at p_member. +* +* SEE ALSO +* offsetof +*********/ +#ifndef PARENT_STRUCT +#define PARENT_STRUCT(p_member, PARENT_TYPE, MEMBER_NAME) \ + ((PARENT_TYPE*)((uint8_t*)(p_member) - offsetof(PARENT_TYPE, MEMBER_NAME))) +#endif + +/****d* Component Library/Parameter Keywords +* NAME +* Parameter Keywords +* +* DESCRIPTION +* The Parameter Keywords can be used to clarify the usage of function +* parameters to users. +* +* VALUES +* IN +* Designates that the parameter is used as input to a function. +* +* OUT +* Designates that the parameter's value will be set by the function. +* +* OPTIONAL +* Designates that the parameter is optional, and may be NULL. +* The OPTIONAL keyword, if used, follows the parameter name. +* +* EXAMPLE +* // Function declaration. +* void* +* my_func( +* IN void* const p_param1, +* OUT void** const p_handle OPTIONAL ); +* +* NOTES +* Multiple keywords can apply to a single parameter. The IN and OUT +* keywords precede the parameter type. The OPTIONAL +* keyword, if used, follows the parameter name. +*********/ +#ifndef IN +#define IN /* Function input parameter */ +#endif +#ifndef OUT +#define OUT /* Function output parameter */ +#endif +#ifndef OPTIONAL +#define OPTIONAL /* Optional function parameter - NULL if not used */ +#endif + + +/*%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% Function Returns And Completion Codes %% +%% %% +%% The text for any addition to this enumerated type must be added to the %% +%% string array defined in . %% +%% %% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%*/ + + +/****d* Component Library/Data Types +* NAME +* Data Types +* +* DESCRIPTION +* The component library provides and uses explicitly sized types. +* +* VALUES +* char +* 8-bit, defined by compiler. +* +* void +* 0-bit, defined by compiler. +* +* int8_t +* 8-bit signed integer. +* +* uint8_t +* 8-bit unsigned integer. +* +* int16_t +* 16-bit signed integer. +* +* uint16_t +* 16-bit unsigned integer. +* +* net16_t +* 16-bit network byte order value. +* +* int32_t +* 32-bit signed integer. +* +* uint32_t +* 32-bit unsigned integer. +* +* net32_t +* 32-bit network byte order value. +* +* int64_t +* 64-bit signed integer. +* +* uint64_t +* 64-bit unsigned integer. +* +* net64_t +* 64-bit network byte order value. +* +* intn_t +* Signed natural sized integer. 32-bit on a 32-bit platform, 64-bit on +* a 64-bit platform. +* +* uintn_t +* Unsigned natural sized integer. 32-bit on a 32-bit platform, 64-bit on +* a 64-bit platform. +* +* boolean_t +* integral sized. Set to TRUE or FALSE and used in logical expressions. +* +* NOTES +* Pointer types are not defined as these provide no value and can potentially +* lead to naming confusion. +*********/ + + +/****d* Component Library: Data Types/cl_status_t +* NAME +* cl_status_t +* +* DESCRIPTION +* The cl_status_t return types are used by the component library to +* provide detailed function return values. +* +* SYNOPSIS +*/ +typedef enum _cl_status +{ + CL_SUCCESS = 0, + CL_ERROR, + CL_INVALID_STATE, + CL_INVALID_OPERATION, + CL_INVALID_SETTING, + CL_INVALID_PARAMETER, + CL_INSUFFICIENT_RESOURCES, + CL_INSUFFICIENT_MEMORY, + CL_INVALID_PERMISSION, + CL_COMPLETED, + CL_NOT_DONE, + CL_PENDING, + CL_TIMEOUT, + CL_CANCELED, + CL_REJECT, + CL_OVERRUN, + CL_NOT_FOUND, + CL_UNAVAILABLE, + CL_BUSY, + CL_DISCONNECT, + CL_DUPLICATE, + CL_INVALID_REQUEST, + + CL_STATUS_COUNT /* should be the last value */ + +} cl_status_t; +/* +* SEE ALSO +* Data Types, CL_STATUS_MSG +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +/* Status values above converted to text for easier printing. */ +CL_EXPORT const char* cl_status_text[CL_STATUS_COUNT]; + + +#ifndef cl_panic +/****f* Component Library: Error Trapping/cl_panic +* NAME +* cl_panic +* +* DESCRIPTION +* Halts execution of the current process. Halts the system if called in +* from the kernel. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_panic( + IN const char* const message, + IN ... ); +/* +* PARAMETERS +* message +* [in] ANSI string formatted identically as for a call to the standard C +* function printf describing the cause for the panic. +* +* ... +* [in] Extra parameters for string formatting, as defined for the +* standard C function printf. +* +* RETURN VALUE +* This function does not return. +* +* NOTES +* The formatting of the message string is the same as for printf +* +* cl_panic sends the message to the current message logging target. +*********/ +#endif /* cl_panic */ + + +/****d* Component Library: Data Types/CL_STATUS_MSG +* NAME +* CL_STATUS_MSG +* +* DESCRIPTION +* The CL_STATUS_MSG macro returns a textual representation of +* an cl_status_t code. +* +* SYNOPSIS +* const char* +* CL_STATUS_MSG( +* IN cl_status_t errcode ); +* +* PARAMETERS +* errcode +* [in] cl_status_t code for which to return a text representation. +* +* RETURN VALUE +* Pointer to a string containing a textual representation of the errcode +* parameter. +* +* NOTES +* This function performs boundary checking on the cl_status_t value, +* masking off the upper 24-bits. If the value is out of bounds, the string +* "invalid status code" is returned. +* +* SEE ALSO +* cl_status_t +*********/ +#define CL_STATUS_MSG( errcode ) \ + ((errcode < CL_STATUS_COUNT)?cl_status_text[errcode]:"invalid status code") + + +#if !defined( FALSE ) +#define FALSE 0 +#endif /* !defined( TRUE ) */ + + +#if !defined( TRUE ) +#define TRUE (!FALSE) +#endif /* !defined( TRUE ) */ + + +/****d* Component Library: Unreferenced Parameters/UNUSED_PARAM +* NAME +* UNUSED_PARAM +* +* DESCRIPTION +* The UNUSED_PARAM macro can be used to eliminates compiler warnings related +* to intentionally unused formal parameters in function implementations. +* +* SYNOPSIS +* UNUSED_PARAM( P ) +* +* EXAMPLE +* void my_func( int32_t value ) +* { +* UNUSED_PARAM( value ); +* } +*********/ + + +/****d* Component Library/Object States +* NAME +* Object States +* +* DESCRIPTION +* The object states enumerated type defines the valid states of components. +* +* SYNOPSIS +*/ +typedef enum _cl_state +{ + CL_UNINITIALIZED = 1, + CL_INITIALIZED, + CL_DESTROYING, + CL_DESTROYED + +} cl_state_t; +/* +* VALUES +* CL_UNINITIALIZED +* Indicates that initialization was not invoked successfully. +* +* CL_INITIALIZED +* Indicates initialization was successful. +* +* CL_DESTROYING +* Indicates that the object is undergoing destruction. +* +* CL_DESTROYED +* Indicates that the object's destructor has already been called. Most +* objects set their final state to CL_DESTROYED before freeing the +* memory associated with the object. +*********/ + + +/****d* Component Library: Object States/cl_is_state_valid +* NAME +* cl_is_state_valid +* +* DESCRIPTION +* The cl_is_state_valid function returns whether a state has a valid value. +* +* SYNOPSIS +*/ +CL_INLINE boolean_t CL_API +cl_is_state_valid( + IN const cl_state_t state ) +{ + return( (state == CL_UNINITIALIZED) || (state == CL_INITIALIZED) || + (state == CL_DESTROYING) || (state == CL_DESTROYED) ); +} +/* +* PARAMETERS +* state +* State whose value to validate. +* +* RETURN VALUES +* TRUE if the specified state has a valid value. +* +* FALSE otherwise. +* +* NOTES +* This function is used in debug builds to check for valid states. If an +* uninitialized object is passed, the memory for the state may cause the +* state to have an invalid value. +* +* SEE ALSO +* Object States +*********/ + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif /* _DATA_TYPES_H_ */ diff --git a/branches/Ndi/inc/complib/cl_vector.h b/branches/Ndi/inc/complib/cl_vector.h new file mode 100644 index 00000000..86dad3f2 --- /dev/null +++ b/branches/Ndi/inc/complib/cl_vector.h @@ -0,0 +1,1004 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * This file contains vector definitions. Vector provides dynmically + * resizable array functionality. Objects in a Vector are not relocated + * when the array is resized. + * + * Environment: + * All + */ + + +#ifndef _CL_VECTOR_H_ +#define _CL_VECTOR_H_ + + +#include + + +/****h* Component Library/Vector +* NAME +* Vector +* +* DESCRIPTION +* The Vector is a self-sizing array. Like a traditonal array, a vector +* allows efficient constant time access to elements with a specified index. +* A vector grows transparently as the user adds elements to the array. +* +* As the vector grows in size, it does not relocate existing elements in +* memory. This allows using pointers to elements stored in a Vector. +* +* Users can supply an initializer functions that allow a vector to ensure +* that new items added to the vector are properly initialized. A vector +* calls the initializer function on a per object basis when growing the +* array. The initializer is optional. +* +* The initializer function can fail, and returns a cl_status_t. The vector +* will call the destructor function, if provided, for an element that +* failed initialization. If an initializer fails, a vector does not call +* the initializer for objects in the remainder of the new memory allocation. +* +* The cl_vector_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SEE ALSO +* Structures: +* cl_vector_t +* +* Callbacks: +* cl_pfn_vec_init_t, cl_pfn_vec_dtor_t, cl_pfn_vec_apply_t, +* cl_pfn_vec_find_t +* +* Item Manipulation: +* cl_vector_set_obj, cl_vector_obj +* +* Initialization: +* cl_vector_construct, cl_vector_init, cl_vector_destroy +* +* Manipulation: +* cl_vector_get_capacity, cl_vector_set_capacity, +* cl_vector_get_size, cl_vector_set_size, cl_vector_set_min_size +* cl_vector_get_ptr, cl_vector_get, cl_vector_at, cl_vector_set +* +* Search: +* cl_vector_find_from_start, cl_vector_find_from_end +* cl_vector_apply_func +*********/ + + +/****d* Component Library: Vector/cl_pfn_vec_init_t +* NAME +* cl_pfn_vec_init_t +* +* DESCRIPTION +* The cl_pfn_vec_init_t function type defines the prototype for functions +* used as initializer for elements being allocated by a vector. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_vec_init_t)( + IN void* const p_element, + IN void* context ); +/* +* PARAMETERS +* p_element +* [in] Pointer to an element being added to a vector. +* +* context +* [in] Context provided in a call to cl_vector_init. +* +* RETURN VALUES +* Return CL_SUCCESS to indicate that the element was initialized successfully. +* +* Other cl_status_t values will be returned by the cl_vector_init, +* cl_vector_set_size, and cl_vector_set_min_size functions. +* +* In situations where the vector's size needs to grows in order to satisfy +* a call to cl_vector_set, a non-successful status returned by the +* initializer callback causes the growth to stop. +* +* NOTES +* This function type is provided as function prototype reference for +* the initializer function provided by users as an optional parameter to +* the cl_vector_init function. +* +* SEE ALSO +* Vector, cl_vector_init +*********/ + + +/****d* Component Library: Vector/cl_pfn_vec_dtor_t +* NAME +* cl_pfn_vec_dtor_t +* +* DESCRIPTION +* The cl_pfn_vec_dtor_t function type defines the prototype for functions +* used as destructor for elements being deallocated from a vector. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_vec_dtor_t)( + IN void* const p_element, + IN void* context ); +/* +* PARAMETERS +* p_element +* [in] Pointer to an element being deallocated from a vector. +* +* context +* [in] Context provided in a call to cl_vector_init. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for +* the destructor function provided by users as an optional parameter to +* the cl_vector_init function. +* +* SEE ALSO +* Vector, cl_vector_init +*********/ + + +/****d* Component Library: Vector/cl_pfn_vec_apply_t +* NAME +* cl_pfn_vec_apply_t +* +* DESCRIPTION +* The cl_pfn_vec_apply_t function type defines the prototype for functions +* used to iterate elements in a vector. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_vec_apply_t)( + IN const size_t index, + IN void* const p_element, + IN void* context ); +/* +* PARAMETERS +* index +* [in] Index of the element. +* +* p_element +* [in] Pointer to an element at the specified index in the vector. +* +* context +* [in] Context provided in a call to cl_vector_apply_func. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function type is provided as function prototype reference for +* the function passed by users as a parameter to the cl_vector_apply_func +* function. +* +* SEE ALSO +* Vector, cl_vector_apply_func +*********/ + + +/****d* Component Library: Vector/cl_pfn_vec_find_t +* NAME +* cl_pfn_vec_find_t +* +* DESCRIPTION +* The cl_pfn_vec_find_t function type defines the prototype for functions +* used to find elements in a vector. +* +* SYNOPSIS +*/ +typedef cl_status_t +(CL_API *cl_pfn_vec_find_t)( + IN const size_t index, + IN const void* const p_element, + IN void* context ); +/* +* PARAMETERS +* index +* [in] Index of the element. +* +* p_element +* [in] Pointer to an element at the specified index in the vector. +* +* context +* [in] Context provided in a call to cl_vector_find_from_start or +* cl_vector_find_from_end. +* +* RETURN VALUES +* Return CL_SUCCESS if the element was found. This stops vector iteration. +* +* CL_NOT_FOUND to continue the vector iteration. +* +* NOTES +* This function type is provided as function prototype reference for the +* function provided by users as a parameter to the cl_vector_find_from_start +* and cl_vector_find_from_end functions. +* +* SEE ALSO +* Vector, cl_vector_find_from_start, cl_vector_find_from_end +*********/ + + +/****i* Component Library: Vector/cl_pfn_vec_copy_t +* NAME +* cl_pfn_vec_copy_t +* +* DESCRIPTION +* The cl_pfn_vec_copy_t function type defines the prototype for functions +* used to copy elements in a vector. +* +* SYNOPSIS +*/ +typedef void +(CL_API *cl_pfn_vec_copy_t)( + IN void* const p_dest, + IN const void* const p_src, + IN const size_t size ); +/* +* PARAMETERS +* p_dest +* [in] Pointer to the destination buffer into which to copy p_src. +* +* p_src +* [in] Pointer to the destination buffer from which to copy. +* +* size +* [in] Number of bytes to copy. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Vector +*********/ + + +/****s* Component Library: Vector/cl_vector_t +* NAME +* cl_vector_t +* +* DESCRIPTION +* Vector structure. +* +* The cl_vector_t structure should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_vector +{ + size_t size; + size_t grow_size; + size_t capacity; + size_t element_size; + cl_pfn_vec_init_t pfn_init; + cl_pfn_vec_dtor_t pfn_dtor; + cl_pfn_vec_copy_t pfn_copy; + const void *context; + cl_qlist_t alloc_list; + void **p_ptr_array; + cl_state_t state; + +} cl_vector_t; +/* +* FIELDS +* size +* Number of elements successfully initialized in the vector. +* +* grow_size +* Number of elements to allocate when growing. +* +* capacity +* total # of elements allocated. +* +* element_size +* Size of each element. +* +* pfn_init +* User supplied element initializer. +* +* pfn_dtor +* User supplied element destructor. +* +* pfn_copy +* Copy operator. +* +* context +* User context for callbacks. +* +* alloc_list +* List of allocations. +* +* p_ptr_array +* Internal array of pointers to elements. +* +* state +* State of the vector. +* +* SEE ALSO +* Vector +*********/ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****f* Component Library: Vector/cl_vector_construct +* NAME +* cl_vector_construct +* +* DESCRIPTION +* The cl_vector_construct function constructs a vector. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_vector_construct( + IN cl_vector_t* const p_vector ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_vector_destroy without first calling cl_vector_init. +* +* Calling cl_vector_construct is a prerequisite to calling any other +* vector function except cl_vector_init. +* +* SEE ALSO +* Vector, cl_vector_init, cl_vector_destroy +*********/ + + +/****f* Component Library: Vector/cl_vector_init +* NAME +* cl_vector_init +* +* DESCRIPTION +* The cl_vector_init function initializes a vector for use. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_vector_init( + IN cl_vector_t* const p_vector, + IN const size_t min_size, + IN const size_t grow_size, + IN const size_t element_size, + IN cl_pfn_vec_init_t pfn_init OPTIONAL, + IN cl_pfn_vec_dtor_t pfn_dtor OPTIONAL, + IN const void* const context ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure to inititalize. +* +* initial_size +* [in] Initial number of elements. +* +* grow_size +* [in] Number of elements to allocate when incrementally growing +* the vector. A value of zero disables automatic growth. +* +* element_size +* [in] Size of each element. +* +* pfn_init +* [in] Initializer callback to invoke for every new element. +* See the cl_pfn_vec_init_t function type declaration for details about +* the callback function. +* +* pfn_dtor +* [in] Destructor callback to invoke for elements being deallocated. +* See the cl_pfn_vec_dtor_t function type declaration for details about +* the callback function. +* +* context +* [in] Value to pass to the callback functions to provide context. +* +* RETURN VALUES +* CL_SUCCESS if the vector was initialized successfully. +* +* CL_INSUFFICIENT_MEMORY if the initialization failed. +* +* cl_status_t value returned by optional initializer function specified by +* the pfn_init parameter. +* +* NOTES +* The constructor and initializer functions, if any, are invoked for every +* new element in the array. +* +* SEE ALSO +* Vector, cl_vector_construct, cl_vector_destroy, cl_vector_set, +* cl_vector_get, cl_vector_get_ptr, cl_vector_at +*********/ + + +/****f* Component Library: Vector/cl_vector_destroy +* NAME +* cl_vector_destroy +* +* DESCRIPTION +* The cl_vector_destroy function destroys a vector. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_vector_destroy( + IN cl_vector_t* const p_vector ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_vector_destroy frees all memory allocated for the vector. The vector +* is left initialized to a zero capacity and size. +* +* This function should only be called after a call to cl_vector_construct +* or cl_vector_init. +* +* SEE ALSO +* Vector, cl_vector_construct, cl_vector_init +*********/ + + +/****f* Component Library: Vector/cl_vector_get_capacity +* NAME +* cl_vector_get_capacity +* +* DESCRIPTION +* The cl_vector_get_capacity function returns the capacity of a vector. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_vector_get_capacity( + IN const cl_vector_t* const p_vector ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + return( p_vector->capacity ); +} +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure whose capacity to return. +* +* RETURN VALUE +* Capacity, in elements, of the vector. +* +* NOTES +* The capacity is the number of elements that the vector can store, and +* can be greater than the number of elements stored. To get the number of +* elements stored in the vector, use cl_vector_get_size. +* +* SEE ALSO +* Vector, cl_vector_set_capacity, cl_vector_get_size +*********/ + + +/****f* Component Library: Vector/cl_vector_get_size +* NAME +* cl_vector_get_size +* +* DESCRIPTION +* The cl_vector_get_size function returns the size of a vector. +* +* SYNOPSIS +*/ +CL_INLINE size_t CL_API +cl_vector_get_size( + IN const cl_vector_t* const p_vector ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + return( p_vector->size ); +} +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure whose size to return. +* +* RETURN VALUE +* Size, in elements, of the vector. +* +* SEE ALSO +* Vector, cl_vector_set_size, cl_vector_get_capacity +*********/ + + +/****f* Component Library: Vector/cl_vector_get_ptr +* NAME +* cl_vector_get_ptr +* +* DESCRIPTION +* The cl_vector_get_ptr function returns a pointer to an element +* stored in a vector at a specified index. +* +* SYNOPSIS +*/ +CL_INLINE void* CL_API +cl_vector_get_ptr( + IN const cl_vector_t* const p_vector, + IN const size_t index ) +{ + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + + return( p_vector->p_ptr_array[index] ); +} +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure from which to get a +* pointer to an element. +* +* index +* [in] Index of the element. +* +* RETURN VALUE +* Pointer to the element stored at specified index. +* +* NOTES +* cl_vector_get_ptr provides constant access times regardless of the index. +* +* cl_vector_get_ptr does not perform boundary checking. Callers are +* responsible for providing an index that is within the range of the vector. +* +* SEE ALSO +* Vector, cl_vector_get, cl_vector_at, cl_vector_set, cl_vector_get_size +*********/ + + +/****f* Component Library: Vector/cl_vector_get +* NAME +* cl_vector_get +* +* DESCRIPTION +* The cl_vector_get function copies an element stored in a vector at a +* specified index. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_vector_get( + IN const cl_vector_t* const p_vector, + IN const size_t index, + OUT void* const p_element ) +{ + void *p_src; + + CL_ASSERT( p_vector ); + CL_ASSERT( p_vector->state == CL_INITIALIZED ); + CL_ASSERT( p_element ); + + /* Get a pointer to the element. */ + p_src = cl_vector_get_ptr( p_vector, index ); + p_vector->pfn_copy( p_element, p_src, p_vector->element_size ); +} +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure from which to get a copy of +* an element. +* +* index +* [in] Index of the element. +* +* p_element +* [out] Pointer to storage for the element. Contains a copy of the +* desired element upon successful completion of the call. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_vector_get provides constant time access regardless of the index. +* +* cl_vector_get does not perform boundary checking on the vector, and +* callers are responsible for providing an index that is within the range +* of the vector. To access elements after performing boundary checks, +* use cl_vector_at. +* +* The p_element parameter contains a copy of the desired element upon +* return from this function. +* +* SEE ALSO +* Vector, cl_vector_get_ptr, cl_vector_at +*********/ + + +/****f* Component Library: Vector/cl_vector_at +* NAME +* cl_vector_at +* +* DESCRIPTION +* The cl_vector_at function copies an element stored in a vector at a +* specified index, performing boundary checks. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_vector_at( + IN const cl_vector_t* const p_vector, + IN const size_t index, + OUT void* const p_element ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure from which to get a copy of +* an element. +* +* index +* [in] Index of the element. +* +* p_element +* [out] Pointer to storage for the element. Contains a copy of the +* desired element upon successful completion of the call. +* +* RETURN VALUES +* CL_SUCCESS if an element was found at the specified index. +* +* CL_INVALID_SETTING if the index was out of range. +* +* NOTES +* cl_vector_at provides constant time access regardless of the index, and +* performs boundary checking on the vector. +* +* Upon success, the p_element parameter contains a copy of the desired element. +* +* SEE ALSO +* Vector, cl_vector_get, cl_vector_get_ptr +*********/ + + +/****f* Component Library: Vector/cl_vector_set +* NAME +* cl_vector_set +* +* DESCRIPTION +* The cl_vector_set function sets the element at the specified index. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_vector_set( + IN cl_vector_t* const p_vector, + IN const size_t index, + IN void* const p_element ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure into which to store +* an element. +* +* index +* [in] Index of the element. +* +* p_element +* [in] Pointer to an element to store in the vector. +* +* RETURN VALUES +* CL_SUCCESS if the element was successfully set. +* +* CL_INSUFFICIENT_MEMORY if the vector could not be resized to accommodate +* the new element. +* +* NOTES +* cl_vector_set grows the vector as needed to accommodate the new element, +* unless the grow_size parameter passed into the cl_vector_init function +* was zero. +* +* SEE ALSO +* Vector, cl_vector_get +*********/ + + +/****f* Component Library: Vector/cl_vector_set_capacity +* NAME +* cl_vector_set_capacity +* +* DESCRIPTION +* The cl_vector_set_capacity function reserves memory in a vector for a +* specified number of elements. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_vector_set_capacity( + IN cl_vector_t* const p_vector, + IN const size_t new_capacity ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure whose capacity to set. +* +* new_capacity +* [in] Total number of elements for which the vector should +* allocate memory. +* +* RETURN VALUES +* CL_SUCCESS if the capacity was successfully set. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to satisfy the +* operation. The vector is left unchanged. +* +* NOTES +* cl_vector_set_capacity increases the capacity of the vector. It does +* not change the size of the vector. If the requested capacity is less +* than the current capacity, the vector is left unchanged. +* +* SEE ALSO +* Vector, cl_vector_get_capacity, cl_vector_set_size, +* cl_vector_set_min_size +*********/ + + +/****f* Component Library: Vector/cl_vector_set_size +* NAME +* cl_vector_set_size +* +* DESCRIPTION +* The cl_vector_set_size function resizes a vector, either increasing or +* decreasing its size. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_vector_set_size( + IN cl_vector_t* const p_vector, + IN const size_t size ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure whose size to set. +* +* size +* [in] Number of elements desired in the vector. +* +* RETURN VALUES +* CL_SUCCESS if the size of the vector was set successfully. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to complete the +* operation. The vector is left unchanged. +* +* NOTES +* cl_vector_set_size sets the vector to the specified size. If size is +* smaller than the current size of the vector, the size is reduced. +* The destructor function, if any, will be invoked for all elements that +* are above size. Likewise, the constructor and initializer, if any, will +* be invoked for all new elements. +* +* This function can only fail if size is larger than the current capacity. +* +* SEE ALSO +* Vector, cl_vector_get_size, cl_vector_set_min_size, +* cl_vector_set_capacity +*********/ + + +/****f* Component Library: Vector/cl_vector_set_min_size +* NAME +* cl_vector_set_min_size +* +* DESCRIPTION +* The cl_vector_set_min_size function resizes a vector to a specified size +* if the vector is smaller than the specified size. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_vector_set_min_size( + IN cl_vector_t* const p_vector, + IN const size_t min_size ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure whose minimum size to set. +* +* min_size +* [in] Minimum number of elements that the vector should contain. +* +* RETURN VALUES +* CL_SUCCESS if the vector size is greater than or equal to min_size. This +* could indicate that the vector's capacity was increased to min_size or +* that the vector was already of sufficient size. +* +* CL_INSUFFICIENT_MEMORY if there was not enough memory to resize the vector. +* The vector is left unchanged. +* +* NOTES +* If min_size is smaller than the current size of the vector, the vector is +* unchanged. The vector is unchanged if the size could not be changed due +* to insufficient memory being available to perform the operation. +* +* SEE ALSO +* Vector, cl_vector_get_size, cl_vector_set_size, cl_vector_set_capacity +*********/ + + +/****f* Component Library: Vector/cl_vector_apply_func +* NAME +* cl_vector_apply_func +* +* DESCRIPTION +* The cl_vector_apply_func function invokes a specified function for every +* element in a vector. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_vector_apply_func( + IN const cl_vector_t* const p_vector, + IN cl_pfn_vec_apply_t pfn_callback, + IN const void* const context ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure whose elements to iterate. +* +* pfn_callback +* [in] Function invoked for every element in the array. +* See the cl_pfn_vec_apply_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback function. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_vector_apply_func invokes the specified function for every element +* in the vector, starting from the beginning of the vector. +* +* SEE ALSO +* Vector, cl_vector_find_from_start, cl_vector_find_from_end, +* cl_pfn_vec_apply_t +*********/ + + +/****f* Component Library: Vector/cl_vector_find_from_start +* NAME +* cl_vector_find_from_start +* +* DESCRIPTION +* The cl_vector_find_from_start function uses a specified function to +* search for elements in a vector starting from the lowest index. +* +* SYNOPSIS +*/ +CL_EXPORT size_t CL_API +cl_vector_find_from_start( + IN const cl_vector_t* const p_vector, + IN cl_pfn_vec_find_t pfn_callback, + IN const void* const context ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure to inititalize. +* +* pfn_callback +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_vec_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback function. +* +* RETURN VALUES +* Index of the element, if found. +* +* Size of the vector if the element was not found. +* +* NOTES +* cl_vector_find_from_start does not remove the found element from +* the vector. The index of the element is returned when the function +* provided by the pfn_callback parameter returns CL_SUCCESS. +* +* SEE ALSO +* Vector, cl_vector_find_from_end, cl_vector_apply_func, cl_pfn_vec_find_t +*********/ + + +/****f* Component Library: Vector/cl_vector_find_from_end +* NAME +* cl_vector_find_from_end +* +* DESCRIPTION +* The cl_vector_find_from_end function uses a specified function to search +* for elements in a vector starting from the highest index. +* +* SYNOPSIS +*/ +CL_EXPORT size_t CL_API +cl_vector_find_from_end( + IN const cl_vector_t* const p_vector, + IN cl_pfn_vec_find_t pfn_callback, + IN const void* const context ); +/* +* PARAMETERS +* p_vector +* [in] Pointer to a cl_vector_t structure to inititalize. +* +* pfn_callback +* [in] Function invoked to determine if a match was found. +* See the cl_pfn_vec_find_t function type declaration for details +* about the callback function. +* +* context +* [in] Value to pass to the callback function. +* +* RETURN VALUES +* Index of the element, if found. +* +* Size of the vector if the element was not found. +* +* NOTES +* cl_vector_find_from_end does not remove the found element from +* the vector. The index of the element is returned when the function +* provided by the pfn_callback parameter returns CL_SUCCESS. +* +* SEE ALSO +* Vector, cl_vector_find_from_start, cl_vector_apply_func, +* cl_pfn_vec_find_t +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* _CL_VECTOR_H_ */ diff --git a/branches/Ndi/inc/complib/cl_waitobj.h b/branches/Ndi/inc/complib/cl_waitobj.h new file mode 100644 index 00000000..7138b1bb --- /dev/null +++ b/branches/Ndi/inc/complib/cl_waitobj.h @@ -0,0 +1,377 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of wait object. + * + * Environment: + * All + */ + + +#ifndef _CL_WAITOBJ_H_ +#define _CL_WAITOBJ_H_ + + +#include + + +/****h* Component Library/Wait Object +* NAME +* Wait Object +* +* DESCRIPTION +* The Wait Object provides the capability for a user mode process to +* create and manipulate a wait object that can also be manipulated from +* kernel mode. +* +* SEE ALSO +* Structures: +* cl_waitobj_handle_t +* +* User Mode Initialization/Destruction: +* cl_waitobj_create +* cl_waitobj_destroy +* +* Kernel Mode Access: +* cl_waitobj_ref +* cl_waitobj_deref +* +* Manipulation: +* cl_waitobj_signal +* cl_waitobj_reset +* cl_waitobj_wait_on +******/ + + +/****d* Component Library: Wait Object/cl_waitobj_handle_t +* NAME +* cl_waitobj_handle_t +* +* DESCRIPTION +* Defines the handle for an OS wait object. +* +* NOTES +* The wait object handle should be treated as opaque and is defined +* differently depending on the target environment. +* +* SEE ALSO +* Wait Object, cl_waitobj_create, cl_waitobj_destroy, +* cl_waitobj_ref, cl_waitobj_deref, cl_waitobj_signal, +* cl_waitobj_reset, cl_waitobj_wait_on +******/ + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +#if defined(CL_KERNEL) + +/****f* Component Library: Wait Object/cl_waitobj_ref +* NAME +* cl_waitobj_ref +* +* DESCRIPTION +* The cl_waitobj_ref function validates a user mode wait object handle +* and returns a kernel mode wait object handle. A reference is taken +* on the object to prevent its destruction even if the user mode +* application destroys it. +* +* SYNOPSIS +*/ +CL_EXPORT cl_waitobj_handle_t CL_API +cl_waitobj_ref( + IN void *h_user_wait_obj ); +/* +* PARAMETERS +* h_user_wait_obj +* [in] A wait object handle passed from user mode. +* +* RETURN VALUES +* Returns a kernel wait object handle upon success. The returned handle +* should only be used as parameters to kernel mode calls. +* +* Returns NULL in case of failure. +* +* NOTES +* This function is only available in kernel mode. +* +* SEE ALSO +* Wait Object, cl_waitobj_handle_t, cl_waitobj_deref, +* cl_waitobj_signal, cl_waitobj_reset, cl_waitobj_wait_on +******/ + + +/****f* Component Library: Wait Object/cl_waitobj_deref +* NAME +* cl_waitobj_deref +* +* DESCRIPTION +* The cl_waitobj_deref function release a reference on a kernel mode +* wait object handle and allows the wait object to be destroyed. +* +* SYNOPSIS +*/ +CL_EXPORT void CL_API +cl_waitobj_deref( + IN cl_waitobj_handle_t h_kernel_wait_obj ); +/* +* PARAMETERS +* h_kernel_wait_obj +* [in] A wait object handle returned by a previous call to cl_waitobj_ref. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* This function is only available in kernel mode. +* +* SEE ALSO +* Wait Object, cl_waitobj_handle_t, cl_waitobj_ref, +* cl_waitobj_signal, cl_waitobj_reset, cl_waitobj_wait_on +******/ + +#else /* CL_KERNEL */ + +/****f* Component Library: Wait Object/cl_waitobj_create +* NAME +* cl_waitobj_create +* +* DESCRIPTION +* The cl_waitobj_create function creates a wait object. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_waitobj_create( + IN const boolean_t manual_reset, + OUT cl_waitobj_handle_t* const ph_wait_obj ); +/* +* PARAMETERS +* manual_reset +* [in] If FALSE, indicates that the event resets itself after releasing +* a single waiter. If TRUE, the event remains in the signalled state +* until explicitly reset by a call to cl_event_reset. +* +* ph_wait_obj +* [out] Pointer to a wait object handle set upon successful creation. +* +* RETURN VALUES +* CL_SUCCESS if the wait object was created successfully. +* +* CL_ERROR if the wait object creation failed. +* +* NOTES +* This function is only available in user mode. +* +* SEE ALSO +* Wait Object, cl_waitobj_handle_t, cl_waitobj_destroy, +* cl_waitobj_signal, cl_waitobj_reset, cl_waitobj_wait_on +******/ + + +/****f* Component Library: Wait Object/cl_waitobj_destroy +* NAME +* cl_waitobj_destroy +* +* DESCRIPTION +* The cl_waitobj_destroy function destroys a wait object. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_waitobj_destroy( + IN cl_waitobj_handle_t h_wait_obj ); +/* +* PARAMETERS +* h_wait_obj +* [in] A handle to the wait object to destroy, obtained by a pervious +* call to cl_waitobj_create. +* +* RETURN VALUES +* CL_SUCCESS if the wait object handle is destroyed. +* +* CL_INVALID_PARAMETER if the wait object handle is invalid. +* +* NOTES +* This function is only available in user mode. +* +* SEE ALSO +* Wait Object, cl_waitobj_handle_t, cl_waitobj_create, +* cl_waitobj_signal, cl_waitobj_reset, cl_waitobj_wait_on +*********/ + +#endif /* CL_KERNEL */ + +/****f* Component Library: Wait Object/cl_waitobj_signal +* NAME +* cl_waitobj_signal +* +* DESCRIPTION +* The cl_waitobj_signal function sets a wait object to the signalled +* state and releases one or more waiting threads. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_waitobj_signal( + IN cl_waitobj_handle_t h_wait_obj ); +/* +* PARAMETERS +* h_wait_obj +* [in] A handle to the wait object that needs to be signaled. +* +* RETURN VALUES +* CL_SUCCESS if the event was successfully signalled. +* +* CL_ERROR otherwise. +* +* NOTES +* For auto-reset wait objects, the wait object is reset automatically once +* a wait operation is satisfied. +* +* Triggering the wait object multiple times does not guarantee that the same +* number of wait operations are satisfied. This is because wait objects are +* either in a signalled on non-signalled state, and triggering a wait object +* that is already in the signalled state has no effect. +* +* In kernel mode, a pointer to a cl_event_t can safely be used instead of +* a wait object handle. +* +* SEE ALSO +* Wait Object, cl_waitobj_create, cl_waitobj_destroy, +* cl_waitobj_ref, cl_waitobj_deref, +* cl_waitobj_reset, cl_waitobj_wait_on +*********/ + + +/****f* Component Library: Wait Object/cl_waitobj_reset +* NAME +* cl_waitobj_reset +* +* DESCRIPTION +* The cl_waitobj_reset function sets an wait object to the non-signalled state. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_waitobj_reset( + IN cl_waitobj_handle_t h_wait_obj ); +/* +* PARAMETERS +* h_wait_obj +* [in] A handle to the wait object that needs to reset. +* +* RETURN VALUES +* CL_SUCCESS if the wait object was successfully reset. +* +* CL_ERROR otherwise. +* +* NOTES +* In kernel mode, a pointer to a cl_event_t can safely be used instead of +* a wait object handle. +* +* SEE ALSO +* Wait Object, cl_waitobj_create, cl_waitobj_destroy, +* cl_waitobj_ref, cl_waitobj_deref, +* cl_waitobj_signal, cl_waitobj_wait_on +*********/ + + +/****f* Component Library: Wait Object/cl_waitobj_wait_on +* NAME +* cl_waitobj_wait_on +* +* DESCRIPTION +* The cl_waitobj_wait_on function waits for the specified wait object to be +* triggered for a minimum amount of time. +* +* SYNOPSIS +*/ +CL_EXPORT cl_status_t CL_API +cl_waitobj_wait_on( + IN cl_waitobj_handle_t h_wait_obj, + IN const uint32_t wait_us, + IN const boolean_t interruptible ); +/* +* PARAMETERS +* h_wait_obj +* [in] A handle to the wait object on which to wait. +* +* wait_us +* [in] Number of microseconds to wait. +* +* interruptible +* [in] Indicates whether the wait operation can be interrupted +* by external signals. +* +* RETURN VALUES +* CL_SUCCESS if the wait operation succeeded in response to the wait object +* being set. +* +* CL_TIMEOUT if the specified time period elapses. +* +* CL_NOT_DONE if the wait was interrupted by an external signal. +* +* CL_ERROR if the wait operation failed. +* +* NOTES +* If wait_us is set to EVENT_NO_TIMEOUT, the function will wait until the +* wait object is triggered and never timeout. +* +* If the timeout value is zero, this function simply tests the state of +* the wait object. +* +* If the wait object is already in the signalled state at the time of the call +* to cl_waitobj_wait_on, the call completes immediately with CL_SUCCESS. +* +* In kernel mode, a pointer to a cl_event_t can safely be used instead of +* a wait object handle. +* +* SEE ALSO +* Wait Object, cl_waitobj_create, cl_waitobj_destroy, +* cl_waitobj_ref, cl_waitobj_deref, +* cl_waitobj_signal, cl_waitobj_reset +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + + +#endif /* _CL_WAITOBJ_H_ */ diff --git a/branches/Ndi/inc/complib/comp_lib.h b/branches/Ndi/inc/complib/comp_lib.h new file mode 100644 index 00000000..39f5e0cb --- /dev/null +++ b/branches/Ndi/inc/complib/comp_lib.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * One stop shopping for component library headers. + * + * Environment: + * All + */ + + +#ifndef _CL_LIB_H_ +#define _CL_LIB_H_ + + +/****h* Component Library/Component Library +* NAME +* component library +* +* DESCRIPTION +* The component library is a collection of components that can be used to +* create complex projects quickly and reliably. +* +* The component library simplifies development by eliminating the need to +* re-implement existing functionality. This contributes to shorter +* development cycles as well as smaller code bases, helping reduce the +* number of bugs by leveraging tried and tested code. +* +* The component library also provides the same interface in multiple +* environments, such as kernel mode and user mode, allowing code to be used +* in both, again reducing code duplication and development life cycles. +* +* Components of the library all follow the same usage model, as follows: +* - The constructor for all components should be called before any other +* function for that component. It is acceptable to call the initializer +* without first calling the constructor. +* +* - The initializer for all components must be called successfully +* before any function manipulating that component is called. +* +* - The destructor for all components must be called if the initializer +* was called. +* +* In a debug build, the components assert that the proper sequence is +* followed. +*********/ + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +#endif /* _CL_LIB_H_ */ diff --git a/branches/Ndi/inc/iba/ib_al.h b/branches/Ndi/inc/iba/ib_al.h new file mode 100644 index 00000000..0fb3354f --- /dev/null +++ b/branches/Ndi/inc/iba/ib_al.h @@ -0,0 +1,10157 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined(__IB_AL_H__) +#define __IB_AL_H__ + +#include +#include +#include + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + +/****h* IB Access Layer API/Access Layer +* NAME +* InfiniBand Access Layer +* COPYRIGHT +* Copyright (c) 2003 Intel Corporation - All Rights Reserved. +* DESCRIPTION +* The access layer provides transport level access to an InfiniBand fabric. +* It supplies a foundation upon which a channel driver may be built. The +* access layer exposes the capabilities of the InfiniBand architecture and +* adds support for higher-level functionality required by most users of an +* InfiniBand fabric. Users define the protocols and policies used by the +* access layer, and the access layer implements them under the direction +* of a user. +****/ + + +typedef struct _ib_al* __ptr64 ib_al_handle_t; +typedef struct _al_pnp* __ptr64 ib_pnp_handle_t; +typedef struct _al_reg_svc* __ptr64 ib_reg_svc_handle_t; +typedef struct _al_mad_send* __ptr64 ib_mad_send_handle_t; +typedef struct _al_mad_svc* __ptr64 ib_mad_svc_handle_t; +typedef struct _al_query* __ptr64 ib_query_handle_t; +typedef struct _al_sub* __ptr64 ib_sub_handle_t; +typedef struct _al_listen* __ptr64 ib_listen_handle_t; +typedef struct _al_ioc* __ptr64 ib_ioc_handle_t; +typedef struct _al_svc_entry* __ptr64 ib_svc_handle_t; +typedef struct _al_pool_key* __ptr64 ib_pool_key_t; +typedef struct _al_pool* __ptr64 ib_pool_handle_t; +typedef struct _mlnx_fmr_pool_element* __ptr64 mlnx_fmr_pool_el_t; + +typedef struct _ib_cm_handle +{ + ib_al_handle_t h_al; + ib_qp_handle_t h_qp; + net32_t cid; + +} ib_cm_handle_t; + + +/****s* Access Layer/ib_shmid_t +* NAME +* ib_shmid_t +* +* DESCRIPTION +* Shared Memory Identifier, used to uniquely identify a shared memory region. +* +* SYNOPSIS +*/ +typedef uint8_t ib_shmid_t[64]; +/* +* SEE ALSO +* ib_reg_shmid +*********/ + + +/****d* Access Layer/ATS +* NAME +* DAPL Address Translation Service +* +* DESCRIPTION +* ATS service ID, service name, and IPv4 offset for DAPL-compliant +* ATS service records. +*/ +#define ATS_SERVICE_ID CL_NTOH64( 0x10000CE100415453 ) +#define ATS_NAME "DAPL Address Translation Service" +#define ATS_IPV4_OFFSET 12 +/**********/ + + +/****s* Access Layer/ib_mad_element_t +* NAME +* ib_mad_element_t +* +* DESCRIPTION +* Information used to submit a work request to a management datagram (MAD) +* queue pair. +* +* SYNOPSIS +*/ +typedef struct _ib_mad_element +{ + struct _ib_mad_element* __ptr64 p_next; + const void* __ptr64 context1; + const void* __ptr64 context2; + + /* Request/completion data. */ + ib_mad_t* __ptr64 p_mad_buf; + uint32_t size; + uint32_t immediate_data; + ib_net32_t remote_qp; + + /* Send request information. */ + ib_av_handle_t h_av; + ib_send_opt_t send_opt; + ib_net32_t remote_qkey; + boolean_t resp_expected; + uint32_t timeout_ms; + uint32_t retry_cnt; + uint8_t rmpp_version; + + /* Completion information. */ + ib_wc_status_t status; + boolean_t grh_valid; + ib_grh_t* __ptr64 p_grh; + + /* Completed receive data or send request information if h_av is NULL. */ + uint32_t recv_opt; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint16_t pkey_index; + uint8_t path_bits; + + /* Transaction completion data. */ + void* __ptr64 send_context1; + void* __ptr64 send_context2; + +} ib_mad_element_t; +/* +* FIELDS +* p_next +* A pointer used to chain MAD elements together. This value is +* set to NULL to mark the end of the chain. +* +* context1 +* User-defined context information associated with the datagram. +* +* context2 +* User-defined context information associated with the datagram. +* +* p_buffer +* The local data buffer contain the MAD. +* +* size +* The size of the MAD referenced by p_buffer. +* +* immediate_data +* 32-bit field sent or received as part of a datagram message. +* This field is valid for send operations if the send_opt +* IB_SEND_OPT_IMMEDIATE flag has been set. This field is valid +* on received datagram completions if the recv_opt +* IB_RECV_OPT_IMMEDIATE flag is set. +* +* remote_qp +* Identifies the destination queue pair of a datagram send operation or +* the source queue pair of a received datagram. +* +* h_av +* An address vector that specifies the path information used to route +* the outbound datagram to the destination queue pair. This handle may +* be NULL when sending a directed route SMP or if the access layer +* should create the address vector for the user. +* +* send_opt +* Optional send control parameters. The following options are valid: +* IB_SEND_OPT_IMMEDIATE and IB_SEND_OPT_SOLICITED. IB_SEND_OPT_FENCE +* is only valid on MAD QPs. +* +* remote_qkey +* The qkey for the destination queue pair. +* +* resp_expected +* This field is used to indicate that the submitted operation expects +* a response. When set, the access layer will retry this send operation +* until the corresponding response is successfully received, or the +* request times out. Send operations for which a response is expected +* will always be completed by the access layer before the corresponding +* received response. +* +* timeout_ms +* Specifies the number of milliseconds to wait for a response to +* a request until retrying or timing out the request. This field is +* ignored if resp_expected is set to FALSE. +* +* retry_cnt +* Specifies the number of times that the request will be retried +* before failing the request. This field is ignored if resp_expected +* is set to FALSE. +* +* rmpp_version +* Indicates the version of the RMPP protocol to use when sending this +* MAD. For MADs posted to MAD services of type IB_MAD_SVC_DEFAULT, +* setting this field to 0 disables RMPP on user-defined management +* classes or invokes the default RMPP version for well-defined management +* classes, if appropriate. For MADs posted to MAD services of type +* IB_MAD_SVC_RMPP, setting this field to 0 disables RMPP on the sent +* MAD. Note that if the RMPP header exists, but the RMPP protocol is +* not activated for this MAD, the user must ensure that the RMPP header +* has been zeroed. This field is intended to help support backwards +* compatibility. +* +* status +* The result of the MAD work request. +* +* grh_valid +* A flag indicating whether the p_grh reference is valid. +* +* p_grh +* A reference to the global route header information. +* +* recv_opt +* Indicates optional fields valid as part of a work request that +* completed on an unreliable datagram queue pair. +* +* remote_lid +* The source LID of the received datagram. +* +* remote_sl +* The service level used by the source of the received datagram. +* +* pkey_index +* This is valid only for IB_QPT_QP1 and IB_QPT_QP1_ALIAS QP types. +* For received datagrams, this field contains the pkey index for +* the source queue pair. For send operations, this field contains +* the pkey index to use when posting the send work request. +* +* path_bits +* The portion of the remote_lid that may be changed to vary the path +* through the subnet to the remote port. +* +* send_context1 +* If this datagram was received as a response to a sent datagram, this +* field contains the context1 value of the send operation. If this is +* an unsolicited receive, this field will be 0. +* +* send_context2 +* If this datagram was received as a response to a sent datagram, this +* field contains the context2 value of the send operation. If this is +* an unsolicited receive, this field will be 0. +* +* remote_qp +* Identifies the source queue pair of a received datagram. +* +* NOTES +* The format of data sent over the fabric is expected to be in the form +* of a MAD. MADs are expected to match the format defined by the +* Infiniband specification and must be in network-byte order when posted +* to a MAD service. +* +* This structure is received to notify a user that a datagram has been +* received for a registered management class. Information of the source +* of the data is provided, along with the data buffer. +* +* The MAD element structure is defined such that a received MAD element +* may be re-used as a sent response. In such cases, the h_av field may be +* NULL. The address vector will be created and destroyed by the access +* layer. +* +* SEE ALSO +* ib_get_mad, ib_put_mad, ib_send_mad, ib_local_ds_t, ib_send_opt_t, +* ib_pfn_mad_recv_cb_t, ib_get_mad_buf +*****/ + + +/****f* Access Layer/ib_get_mad_buf +* NAME +* ib_get_mad_buf +* +* DESCRIPTION +* Returns a pointer to the MAD buffer associated with a MAD element. +* +* SYNOPSIS +*/ +#pragma warning(push) +#pragma warning(disable: 4244 ) +AL_INLINE void* AL_API +ib_get_mad_buf( + IN const ib_mad_element_t* const p_mad_element ) +{ + CL_ASSERT( p_mad_element ); + return( p_mad_element->p_mad_buf ); +} +#pragma warning (pop) +/* +* PARAMETERS +* p_mad_element +* [in] A pointer to a MAD element. +* +* NOTES +* Returns a pointer to the MAD buffer associated with a MAD element. +* +* SEE ALSO +* ib_mad_element_t +*****/ + + +/****f* Access Layer/ib_pfn_comp_cb_t +* NAME +* ib_pfn_comp_cb_t +* +* DESCRIPTION +* Completion callback provided by a client. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_comp_cb_t)( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); +/* +* PARAMETERS +* h_cq +* [in] Handle for the completion queue on which the completion occurred. +* +* cq_context +* [in] User-specified context for the completion queue on which the +* completion occurred. +* +* NOTES +* This function is invoked upon completion of a work request on a queue pair +* associated with the completion queue. The context associated with the +* completion queue on which the completion occurred is return to the client +* through the callback. +* +* In the kernel, this callback is usually invoked using a tasklet, dependent +* on the implementation of the underlying verbs provider driver. +*****/ + + +/****d* Access Layer/ib_al_flags_t +* NAME +* ib_al_flags_t +* +* DESCRIPTION +* Access layer flags used to direct the operation of various calls. +* +* SYNOPSIS +*/ +typedef uint32_t ib_al_flags_t; +#define IB_FLAGS_SYNC 0x00000001 +/* +* VALUES +* IB_FLAGS_SYNC +* Indicates that the given operation should be performed synchronously. +* The call will block until it completes. Callbacks will still be +* invoked. +* +* SEE ALSO +* ib_cm_req_t, ib_cm_rep_t, ib_cm_dreq_t, ib_cm_lap_t, +* ib_reg_svc_req_t, ib_mcast_req_t, ib_query_req_t, ib_sub_req_t +*****/ + + +/****f* Access Layer/ib_pfn_destroy_cb_t +* NAME +* ib_pfn_destroy_cb_t +* +* DESCRIPTION +* Asynchronous callback invoked after a resource has been successfully +* destroyed. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_destroy_cb_t)( + IN void *context ); +/* +* PARAMETERS +* context +* [in] User-specified context associated with the resource being +* destroyed. The context for the resource is usually set during the +* object's creation. +* +* NOTES +* This callback notifies a client that a resource has been successfully +* destroyed. It is used to indicate that all pending callbacks associated +* with the resource have completed, and no additional events will be +* generated for that resource. +* +* This callback is invoked within a system thread context in the kernel. +* +* If the user specifies ib_sync_destroy as the asynchronous callback, then +* the object being destroyed will be destroyed synchronously. This may +* result in the calling thread blocking while outstanding callbacks complete. +* +* SEE ALSO +* ib_sync_destroy +*****/ + + + +/****f* Access Layer/ib_sync_destroy +* NAME +* ib_sync_destroy +* +* DESCRIPTION +* Access layer routine used to indicate synchronous destruction of an +* object. +* +* SYNOPSIS +*/ +static const ib_pfn_destroy_cb_t ib_sync_destroy = (ib_pfn_destroy_cb_t)-1i64; +/* +* PARAMETERS +* Not Applicable. +* +* NOTES +* Users specify ib_sync_destroy as the ib_pfn_destroy_cb_t callback in order +* to force synchronous object destruction. This may result in the calling +* thread blocking while outstanding callbacks complete. +* +* SEE ALSO +* ib_pfn_destroy_cb_t +*****/ + + +/****s* Access Layer/ib_async_event_rec_t +* NAME +* ib_async_event_rec_t +* +* DESCRIPTION +* Information returned when an asynchronous event occurs on an allocated +* resource. +* +* SYNOPSIS +*/ +typedef struct _ib_async_event_rec +{ + ib_async_event_t code; + uint64_t vendor_specific; + + void* __ptr64 context; + union _handle_t + { + ib_ca_handle_t h_ca; + ib_cq_handle_t h_cq; + ib_qp_handle_t h_qp; + ib_srq_handle_t h_srq; + + } handle; + +} ib_async_event_rec_t; +/* +* FIELDS +* code +* A code that identifies the type of event being reported. +* +* vendor_specific +* A field containing optional vendor specific information. +* +* context +* User-defined context information associated with the resource on +* which the error occurred. +* +* handle +* A handle to the resource for which this event record was generated. +* This handle will match the handle returned during the creation of +* resource. It is provided in case an event occurs before a client's +* call to create a resource can return. +* +* SEE ALSO +* ib_async_event_t, ib_pfn_event_cb_t +*****/ + + +/****f* Access Layer/ib_pfn_event_cb_t +* NAME +* ib_pfn_event_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after an asynchronous event +* has occurred on an allocated resource. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_event_cb_t)( + IN ib_async_event_rec_t *p_event_rec ); +/* +* PARAMETERS +* p_event_rec +* [in] Information returned to the user, indicating the type of +* event and the associated user context. +* +* NOTES +* This callback is invoked within a system thread context in the kernel. +* +* SEE ALSO +* ib_async_event_rec_t +*****/ + + +/****f* Access Layer/ib_open_ca +* NAME +* ib_open_ca +* +* DESCRIPTION +* Opens a channel adapter for additional access. A channel adapter must +* be opened before consuming resources on that adapter. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_open_ca( + IN const ib_al_handle_t h_al, + IN const ib_net64_t ca_guid, + IN const ib_pfn_event_cb_t pfn_ca_event_cb OPTIONAL, + IN const void* const ca_context, + OUT ib_ca_handle_t* const ph_ca ); +/* +* PARAMETERS +* h_al +* [in] The handle to an open instance of AL. +* +* ca_guid +* [in] The GUID of the channel adapter to open. +* +* pfn_ca_event_cb +* [in] A user-specified callback that is invoked after an +* asynchronous event has occurred on the channel adapter. +* +* ca_context +* [in] A client-specified context to associate with this opened instance +* of the channel adapter. This context is returned to the user when +* invoking asynchronous callbacks referencing this channel adapter. +* +* ph_ca +* [out] Upon successful completion of this call, this references a +* handle to the opened channel adapter. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_GUID +* No channel adapter in the system was found for the specified ca_guid. +* +* IB_INVALID_PARAMETER +* A reference to the CA handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to open the channel adapter. +* +* NOTES +* When successful, this routine returns a handle to an open instance of a CA. +* +* SEE ALSO +* ib_query_ca, ib_modify_ca, ib_close_ca, ib_pfn_event_cb_t +*****/ + + +/****f* Access Layer/ib_query_ca +* NAME +* ib_query_ca +* +* DESCRIPTION +* Queries the attributes of an opened channel adapter. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query_ca( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT uint32_t* const p_size ); +/* +* PARAMETERS +* h_ca +* [in] The handle to an open channel adapter. +* +* p_ca_attr +* [out] A reference to a buffer where the channel adapter attributes, +* including port attribute information will be copied. If this parameter +* is NULL, then the required buffer size needed to return all of the CA +* attribute information is returned through the p_size parameter. The +* ib_ca_attr_t structure for the specified channel adapter is stored +* at the top of the buffer. +* +* p_size +* [in/out] On input, this references the size of the data buffer +* referenced by the p_ca_attr parameter. +* +* On output, the number of bytes used or needed to copy all CA +* attribute information. +* +* RETURN VALUES +* IB_SUCCESS +* The attributes were returned successfully. +* +* IB_INVALID_CA_HANDLE +* The channel adapter handle was invalid. +* +* IB_INSUFFICIENT_MEMORY +* The size of the p_ca_attr buffer, specified through p_size, is +* insufficient to store all of the CA attribute information. +* +* IB_INVALID_PARAMETER +* A reference to the size was not provided. +* +* NOTES +* This routine returns information about the specified channel adapter, +* including port attributes. The amount of information returned through +* this call is variable sized. Users may obtain the size of the data +* buffer required to obtain the CA attributes by calling this function +* with p_ca_attr set to NULL. The access layer will then return the +* necessary size in the variable referenced by the p_size parameter. +* +* SEE ALSO +* ib_open_ca, ib_query_ca_by_guid, ib_modify_ca, ib_close_ca, ib_ca_attr_t +*****/ + + +/****f* Access Layer/ib_query_ca_by_guid +* NAME +* ib_query_ca_by_guid +* +* DESCRIPTION +* Queries the attributes of an opened channel adapter. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query_ca_by_guid( + IN const ib_al_handle_t h_al, + IN const ib_net64_t ca_guid, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT uint32_t* const p_size ); +/* +* PARAMETERS +* h_al +* [in] The handle to an open instance of AL. +* +* ca_guid +* [in] The GUID of the channel adapter to query. +* +* p_ca_attr +* [out] A reference to a buffer where the channel adapter attributes, +* including port attribute information will be copied. If this parameter +* is NULL, then the required buffer size needed to return all of the CA +* attribute information is returned through the p_size parameter. The +* ib_ca_attr_t structure for the specified channel adapter is stored +* at the top of the buffer. +* +* p_size +* [in/out] On input, this references the size of the data buffer +* referenced by the p_ca_attr parameter. +* +* On output, the number of bytes used or needed to copy all CA +* attribute information. +* +* RETURN VALUES +* IB_SUCCESS +* The attributes were returned successfully. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_GUID +* No channel adapter in the system was found for the specified ca_guid. +* +* IB_INSUFFICIENT_MEMORY +* The size of the p_ca_attr buffer, specified through p_size, is +* insufficient to store all of the CA attribute information. +* +* IB_INVALID_PARAMETER +* A reference to the size was not provided. +* +* NOTES +* This routine returns information about the specified channel adapter, +* including port attributes. The amount of information returned through +* this call is variable sized. Users may obtain the size of the data +* buffer required to obtain the CA attributes by calling this function +* with p_ca_attr set to NULL. The access layer will then return the +* necessary size in the variable referenced by the p_size parameter. +* +* SEE ALSO +* ib_open_ca, ib_query_ca, ib_modify_ca, ib_close_ca, ib_ca_attr_t +*****/ + + +/****f* Access Layer/ib_modify_ca +* NAME +* ib_modify_ca +* +* DESCRIPTION +* Modifies the attributes and violation counters associated with a port. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_modify_ca( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t ca_mod, + IN const ib_port_attr_mod_t* const p_port_attr_mod ); +/* +* PARAMETERS +* h_ca +* [in] A handle to an opened channel adapter. +* +* port_num +* [in] An index to the port that is being modified. The port_num matches +* the index of the port as returned through the ib_query_ca call. +* +* ca_mod +* [in] A mask of the attributes and counters to modify. +* +* p_port_attr_mod +* [in] A list of the specific port attribute information to modify. For +* the access layer to modify an attribute, its corresponding bit must be +* set in the ca_mod parameter. +* +* RETURN VALUES +* IB_SUCCESS +* The attributes were successfully modified. +* +* IB_INVALID_CA_HANDLE +* The channel adapter handle was invalid. +* +* IB_INVALID_PORT +* The port number supplied was invalid for the given channel adapter. +* +* IB_INVALID_PARAMETER +* The supplied ca_mod mask is invalid or a reference to the port +* attribute information was not provided. +* +* IB_UNSUPPORTED +* The optional qkey and pkey violation counters are not supported by +* this channel adapter, but an attempt was made to modify them. +* +* NOTES +* This call sets the attributes for a port in its associated PORT_INFO +* structure. It will also reset pkey and qkey violation counters. +* +* SEE ALSO +* ib_open_ca, ib_query_ca, ib_close_ca, ib_ca_mod_t, ib_port_attr_mod_t +*****/ + + +/****f* Access Layer/ib_close_ca +* NAME +* ib_close_ca +* +* DESCRIPTION +* Closes an opened channel adapter. Once closed, no further access to this +* channel adapter is possible. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_close_ca( + IN const ib_ca_handle_t h_ca, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_ca +* [in] A handle to an opened channel adapter. +* +* pfn_destroy_cb +* [in] A user-specified callback that is invoked after the channel +* adapter has been successfully destroyed. +* +* RETURN VALUES +* IB_SUCCESS +* The close request was registered. +* +* IB_INVALID_CA_HANDLE +* The channel adapter handle was invalid. +* +* NOTES +* This call closes the opened channel adapter and frees all associated +* resources, such as queue pairs, protection domains, and completion +* queues. Since callbacks may be outstanding against the channel adapter +* or one of its resources at the time the close operation is invoked, this +* call operates asynchronously. The user will be notified through a callback +* once the close operation completes, indicating that no additional callbacks +* will be invoked for the specified channel adapter or a related resource. +* +* SEE ALSO +* ib_open_ca +*****/ + + +/****f* Access Layer/ib_alloc_pd +* NAME +* ib_alloc_pd +* +* DESCRIPTION +* Allocates a protection domain on the specified channel adapter. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_alloc_pd( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_type_t pd_type, + IN const void* const pd_context, + OUT ib_pd_handle_t* const ph_pd ); +/* +* PARAMETERS +* h_ca +* [in] A handle to an opened channel adapter. +* +* pd_type +* [in] Indicates the type of protection domain being created. +* +* pd_context +* [in] A client-specified context to associate with this allocated +* protection domain. This context is returned to the user when +* invoking asynchronous callbacks referencing this protection domain. +* +* ph_pd +* [out] Upon successful completion of this call, this references a +* handle to the allocated protection domain. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_CA_HANDLE +* The channel adapter handle was invalid. +* +* IB_INVALID_PARAMETER +* The supplied pd_type value is invalid or a reference to the protection +* domain handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to allocate the protection domain. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to create the protection domain. +* +* NOTES +* When successful, this routine returns a handle to a newly allocated +* protection domain. +* +* SEE ALSO +* ib_dealloc_pd, ib_pd_type_t +*****/ + + +/****f* Access Layer/ib_dealloc_pd +* NAME +* ib_dealloc_pd +* +* DESCRIPTION +* Deallocates a protection domain. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_dealloc_pd( + IN const ib_pd_handle_t h_pd, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_pd +* [in] A handle to an allocated protection domain. +* +* pfn_destroy_cb +* [in] A user-specified callback that is invoked after the protection +* domain has been successfully destroyed. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* NOTES +* This call deallocates a protection domain and releases all associated +* resources, including queue pairs and registered memory regions. Since +* callbacks may be outstanding against one of protection domain's related +* resources at the time the deallocation call is invoked, this call operates +* asynchronously. The user will be notified through a callback once the +* deallocation call completes, indicating that no additional callbacks +* will be invoked for a related resource. +* +* SEE ALSO +* ib_alloc_pd +*****/ + + +/****f* Access Layer/ib_create_av +* NAME +* ib_create_av +* +* DESCRIPTION +* Creates an address vector. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_create_av( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t* const p_av_attr, + OUT ib_av_handle_t* const ph_av ); +/* +* PARAMETERS +* h_pd +* [in] A handle to an allocated protection domain that the address +* vector will be associated with. +* +* p_av_attr +* [in] Attributes for the newly created address vector. +* +* ph_av +* [out] Upon successful completion of this call, this references a +* handle to the newly created address vector. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the address vector attributes or handle was not +* provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to create the address vector. +* +* IB_INVALID_PORT +* The port number supplied, through the address vector attributes, +* was invalid for the given channel adapter. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to create the address vector. +* +* NOTES +* This routine creates an address vector. Clients specify the attributes +* of the address vector through the p_av_attr parameter. +* +* SEE ALSO +* ib_query_av, ib_modify_av, ib_destroy_av +*****/ + + +/****f* Access Layer/ib_query_av +* NAME +* ib_query_av +* +* DESCRIPTION +* Returns the attributes of an address vector. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query_av( + IN const ib_av_handle_t h_av, + OUT ib_av_attr_t* const p_av_attr, + OUT ib_pd_handle_t* const ph_pd ); +/* +* PARAMETERS +* h_av +* [in] A handle to an existing address vector. +* +* p_av_attr +* [out] Upon successful completion, the structure referenced by this +* parameter contains the attributes of the specified address vector. +* +* ph_pd +* [out] Upon successful completion, this references a handle to the +* protection domain associated with the address vector. +* +* RETURN VALUES +* IB_SUCCESS +* The attributes were returned successfully. +* +* IB_INVALID_AV_HANDLE +* The address vector handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the address vector attributes structure or protection +* domain handle was not provided. +* +* SEE ALSO +* ib_create_av, ib_modify_av, ib_destroy_av, ib_av_attr_t +*****/ + + +/****f* Access Layer/ib_modify_av +* NAME +* ib_modify_av +* +* DESCRIPTION +* Modifies the attributes of an existing address vector. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_modify_av( + IN const ib_av_handle_t h_av, + IN const ib_av_attr_t* const p_av_attr ); +/* +* PARAMETERS +* h_av +* [in] A handle to an existing address vector. +* +* p_av_attr +* [in] The new attributes to use when modifying the address vector. +* +* RETURN VALUES +* IB_SUCCESS +* The address vector was successfully modified. +* +* IB_INVALID_AV_HANDLE +* The address vector handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the address vector attributes structure was not +* provided. +* +* IB_INVALID_PORT +* The port number supplied, through the address vector attributes, +* was invalid for the given channel adapter. +* +* NOTES +* This routine modifies the attributes of an existing address vector. +* The new attributes are specified through the p_av_attr parameter. +* +* SEE ALSO +* ib_create_av, ib_destroy_av +*****/ + + +/****f* Access Layer/ib_destroy_av +* NAME +* ib_destroy_av +* +* DESCRIPTION +* Destroys an existing address vector. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_destroy_av( + IN const ib_av_handle_t h_av ); +/* +* PARAMETERS +* h_av +* [in] A handle to an existing address vector. +* +* RETURN VALUES +* IB_SUCCESS +* The address vector was successfully destroyed. +* +* IB_INVALID_AV_HANDLE +* The address vector handle was invalid. +* +* NOTES +* This routine destroys an existing address vector. +* +* SEE ALSO +* ib_create_av +*****/ + + +/****f* Access Layer/ib_create_srq +* NAME +* ib_create_srq +* +* DESCRIPTION +* Creates a shared receive queue and returns its handle to the user. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_create_srq( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t pfn_srq_event_cb OPTIONAL, + OUT ib_srq_handle_t* const ph_srq ); +/* +* PARAMETERS +* h_pd +* [in] This is a handle to a protection domain associated with the shared queue +* pair. +* +* p_srq_attr +* [in] Attributes necessary to allocate and initialize a shared receive queue. +* +* srq_context +* [in] A user-specified context information associated with the shared +* receive queue. +* +* pfn_qp_event_cb +* [in] User-specified error callback routine invoked after an +* asynchronous event has occurred on the shared receive queue. +* +* ph_srq +* [out] Upon successful completion of this call, this references a +* handle to the newly created shared receive queue. +* +* RETURN VALUES +* IB_SUCCESS +* The receive queue was successfully created. +* +* IB_INVALID_PD_HANDLE +* The protection domain to associate with the shared receive queue was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the shared receive queue attributes or handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to create the shared receive queue. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to create the shared receive queue. +* +* IB_INVALID_SETTING +* The specified shared receive queue creation attributes are invalid. +* +* IB_INVALID_MAX_WRS +* The requested maximum send or receive work request depth could not be +* supported. +* +* IB_INVALID_MAX_SGE +* The requested maximum number of scatter-gather entries for the send or +* receive queue could not be supported. +* +* NOTES +* This routine allocates a shared receive queue with the specified attributes. If +* the shared receive queue cannot be allocated, an error is returned. When creating +* the shared receive queue, users associate a context with the shared receive queue. This +* context is returned to the user through the asynchronous event callback +* if an event occurs. +* +* This routine is used to create receive queues, which work with QPs of type: +* +* IB_QPT_RELIABLE_CONN +* IB_QPT_UNRELIABLE_CONN +* IB_QPT_UNRELIABLE_DGRM +* +* SEE ALSO +* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t, +* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_query_srq +* NAME +* ib_query_srq +* +* DESCRIPTION +* Query the current attributes of the shared receive queue. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query_srq( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr ); +/* +* PARAMETERS +* h_srq +* [in] A handle to an existing shared receive queue. +* +* p_srq_attr +* [out] Upon successful completion of this call, the structure +* referenced by this parameter contains the attributes of the specified +* quere pair. +* +* RETURN VALUES +* IB_SUCCESS +* The shared receive queue attributes were returned successfully. +* +* IB_INVALID_SRQ_HANDLE +* The shared receive queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the shared receive queue attributes structure was not provided. +* +* NOTES +* This routine returns information about the specified shared receive queue. +* +* SEE ALSO +* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t, +* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_modify_srq +* NAME +* ib_modify_srq +* +* DESCRIPTION +* Modifies the attributes of an existing shared receive queue. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_modify_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask ); +/* +* PARAMETERS +* h_srq +* [in] A handle to an existing shared receive queue. +* +* p_srq_attr +* [in] Attributes necessary to allocate and initialize a shared receive queue. +* +* srq_attr_mask +* [in] Flags, indicating which fields in the previous structure are valid. +* +* RETURN VALUES +* IB_SUCCESS +* The shared receive queue was successfully modified. +* +* IB_INVALID_SRQ_HANDLE +* The shared receive queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the shared receive queue attributes was not provided. +* +* IB_INVALID_SETTING +* The specified shared receive queue attributes were invalid. +* +* IB_UNSUPPORTED +* The required action is not supported yet. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to register the modify the shared receive queue. +* +* NOTES +* This routine modifies the attributes of an existing shared receive queue and +* transitions it to a new state. The new state and attributes are +* specified through the p_qp_mod parameter. Upon successful completion, +* the shared receive queue is in the requested state. +* +* SEE ALSO +* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t, +* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_destroy_srq +* NAME +* ib_destroy_srq +* +* DESCRIPTION +* Release a shared receive queue. Once destroyed, no further access to this +* shared receive queue is possible. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_destroy_srq( + IN const ib_srq_handle_t h_srq, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_srq +* [in] A handle to an existing shared shared receive queue. +* +* pfn_destroy_cb +* [in] A user-specified callback that is invoked after the shared receive queue +* has been successfully destroyed. +* +* RETURN VALUES +* IB_SUCCESS +* The destroy request was registered. +* +* IB_INVALID_SRQ_HANDLE +* The shared receive queue handle was invalid. +* +* IB_RESOURCE_BUSY +* There are QPs, bound to the shared receive queue +* +* NOTES +* This call destroys an existing shared receive queue. Since callbacks may be +* outstanding against the shared receive queue at the time the destroy operation is +* invoked, then this call operates asynchronously. The user will be notified +* through a callback once the destroy operation completes, indicating that +* no additional callbacks will be invoked for the specified shared receive queue. +* +* SEE ALSO +* ib_query_srq, ib_modify_srq, ib_destroy_srq, ib_srq_attr_t, +* ib_srq_attr_mask_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_post_srq_recv +* NAME +* ib_post_srq_recv +* +* DESCRIPTION +* This routine posts a work request to the shared receive queue of a shared receive queue. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_post_srq_recv( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ); +/* +* PARAMETERS +* h_srq +* [in] The shared receive queue to which this work request is being submitted. +* +* p_recv_wr +* [in] A reference to the head of the work request list. +* +* pp_recv_failure +* [out] If the post receive operation failed, this references the work +* request in the p_recv_wr list where the first failure occurred. +* This parameter may be NULL if only a single work request is being +* posted to the QP. +* +* RETURN VALUES +* IB_SUCCESS +* All work requests were successfully posted. +* +* IB_INVALID_QP_HANDLE +* The shared receive queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the receive work request list was not provided. +* +* IB_INSUFFICIENT_RESOURCES +* The number of posted work requests exceed the current depth available +* on the receive queue. +* +* IB_INVALID_WR_TYPE +* The work request type was invalid. +* +* IB_INVALID_QP_STATE +* The current shared receive queue state does not allow posting receives. +* +* NOTES +* This routine posts a work request to the shared receive queue. +* The type of work to perform is defined by the p_recv_wr parameter. This +* call is used to post data buffers to receive incoming message sends. +* +* SEE ALSO +* ib_recv_wr_t +*****/ + + +/****f* Access Layer/ib_create_qp +* NAME +* ib_create_qp +* +* DESCRIPTION +* Creates a queue pair and returns its handle to the user. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_create_qp( + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb OPTIONAL, + OUT ib_qp_handle_t* const ph_qp ); +/* +* PARAMETERS +* h_pd +* [in] This is a handle to a protection domain associated with the queue +* pair. +* +* p_qp_create +* [in] Attributes necessary to allocate and initialize the queue pair. +* +* qp_context +* [in] A user-specified context information associated with the +* queue pair. +* +* pfn_qp_event_cb +* [in] User-specified error callback routine invoked after an +* asynchronous event has occurred on the queue pair. +* +* ph_qp +* [out] Upon successful completion of this call, this references a +* handle to the newly created queue pair. +* +* RETURN VALUES +* IB_SUCCESS +* The queue pair was successfully created. +* +* IB_INVALID_PD_HANDLE +* The protection domain to associate with the queue pair was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the queue pair attributes or handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to create the queue pair. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to create the queue pair. +* +* IB_INVALID_CQ_HANDLE +* The send or receive completion queue to associate with the queue pair +* was invalid. +* +* IB_INVALID_SRQ_HANDLE +* The shared receive queue to be associated with the queue pair +* was invalid. +* +* IB_INVALID_SETTING +* The specified queue pair creation attributes are invalid. +* +* IB_UNSUPPORTED +* The specified queue pair type was not supported by the channel adapter. +* +* IB_INVALID_MAX_WRS +* The requested maximum send or receive work request depth could not be +* supported. +* +* IB_INVALID_MAX_SGE +* The requested maximum number of scatter-gather entries for the send or +* receive queue could not be supported. +* +* NOTES +* 1. This routine allocates a queue pair with the specified attributes. If +* the queue pair cannot be allocated, an error is returned. When creating +* the queue pair, users associate a context with the queue pair. This +* context is returned to the user through the asynchronous event callback +* if an event occurs. +* +* 2. For QPs that are associated with an SRQ, the Consumer should take +* the QP through the Error State before invoking a Destroy QP or a Modify +* QP to the Reset State. The Consumer may invoke the Destroy QP without +* first performing a Modify QP to the Error State and waiting for the Affiliated +* Asynchronous Last WQE Reached Event. However, if the Consumer +* does not wait for the Affiliated Asynchronous Last WQE Reached Event, +* then WQE and Data Segment leakage may occur. +* +* 3. This routine is used to create queue pairs of type: +* IB_QPT_RELIABLE_CONN +* IB_QPT_UNRELIABLE_CONN +* IB_QPT_UNRELIABLE_DGRM +* IB_QPT_MAD +* +* 4. Callers of ib_create_qp should call ib_init_dgrm_svc if the queue pair +* is of type IB_QPT_UNRELIABLE_DGRM or IB_QPT_MAD before sending or +* receiving data. IB_QPT_RELIABLE_CONN, IB_QPT_UNRELIABLE_CONN type +* queue pairs should be used by the connection establishment process +* before data may be sent or received on the QP. +* +* This call does not return the QP attributes as MAD QPs do not support +* such an operation. This is a minor specification deviation. +* +* SEE ALSO +* ib_query_qp, ib_modify_qp, ib_destroy_qp, ib_cm_req, ib_cm_rep, ib_cm_rtu +* ib_init_dgrm_svc, ib_qp_create_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_get_spl_qp +* NAME +* ib_get_spl_qp +* +* DESCRIPTION +* Create a special QP or QP alias. This call provides access to queue +* pairs 0 and 1, and the raw queue pair types. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_get_spl_qp( + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t pfn_qp_event_cb OPTIONAL, + OUT ib_pool_key_t* const p_pool_key OPTIONAL, + OUT ib_qp_handle_t* const ph_qp ); +/* +* PARAMETERS +* h_pd +* [in] This is a handle to a protection domain associated with the queue +* pair. This must be a protection domain alias for aliased QP types. +* +* port_guid +* [in] The port GUID that the special QP will be associated with. +* +* p_qp_create +* [in] Attributes necessary to allocate and initialize the queue pair. +* +* qp_context +* [in] A user-specified context information associated with the +* queue pair. +* +* pfn_qp_ervent_cb +* [in] User-specified error callback routine invoked after an +* asynchronous event has occurred on the queue pair. +* +* p_pool_key +* [in] A key to a pool of MAD elements that are used to send MADs. +* This key is only valid for aliased QP types. +* +* ph_qp +* [out] Upon successful completion of this call, this references a +* handle to the newly created queue pair. +* +* RETURN VALUES +* IB_SUCCESS +* The queue pair was successfully created. +* +* IB_INVALID_PD_HANDLE +* The protection domain to associate with the queue pair was invalid. +* +* IB_INVALID_PORT +* The port number supplied was invalid for the given channel adapter. +* +* IB_INVALID_PARAMETER +* A reference to the queue pair attributes or handle was not provided. +* +* IB_INVALID_PERMISSION +* The calling process does not have sufficient privilege to create the +* requested queue pair type. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to create the queue pair. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to create the queue pair. +* +* IB_INVALID_CQ_HANDLE +* The send or receive completion queue to associate with the queue pair +* was invalid. +* +* IB_INVALID_SETTING +* The specified queue pair type was invalid. +* +* IB_UNSUPPORTED +* The specified queue pair type was not supported by the channel adapter. +* +* IB_INVALID_MAX_WRS +* The requested maximum send or receive work request depth could not be +* supported. +* +* IB_INVALID_MAX_SGE +* The requested maximum number of scatter-gather entries for the send or +* receive queue could not be supported. +* +* NOTES +* This routine allocates a queue pair with the specified attributes. If +* the queue pair cannot be allocated, an error is returned. When creating +* the queue pair, users associate a context with the queue pair. This +* context is returned to the user through the asynchronous event callback +* if an event occurs. +* +* This routine is used to create queue pairs of type: +* +* IB_QPT_QP0 +* IB_QPT_QP1 +* IB_QPT_RAW_IPV6 +* IB_QPT_RAW_ETHER +* IB_QPT_QP0_ALIAS +* IB_QPT_QP1_ALIAS +* +* Callers of ib_get_spl_qp should call ib_init_dgrm_svc if the queue pair is +* of type IB_QPT_QP0, IB_QPT_QP1, IB_QPT_RAW_IPV6, IB_QPT_RAW_ETHER before +* sending or receiving data. MADs may be sent on aliased QPs on the +* successful return of this routine. +* +* SEE ALSO +* ib_query_qp, ib_modify_qp, ib_destroy_qp, ib_get_mad +* ib_init_dgrm_svc, ib_qp_create_t, ib_pfn_event_cb_t, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_query_qp +* NAME +* ib_query_qp +* +* DESCRIPTION +* Query the current attributes of the queue pair. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query_qp( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t* const p_qp_attr ); +/* +* PARAMETERS +* h_qp +* [in] A handle to an existing queue pair. +* +* p_qp_attr +* [out] Upon successful completion of this call, the structure +* referenced by this parameter contains the attributes of the specified +* quere pair. +* +* RETURN VALUES +* IB_SUCCESS +* The queue pair attributes were returned successfully. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the queue pair attributes structure was not provided. +* +* NOTES +* This routine returns information about the specified queue pair. +* +* SEE ALSO +* ib_create_qp, ib_modify_qp, ib_qp_attr_t +*****/ + + +/****f* Access Layer/ib_modify_qp +* NAME +* ib_modify_qp +* +* DESCRIPTION +* Modifies the attributes of an existing queue pair. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_modify_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod ); +/* +* PARAMETERS +* h_qp +* [in] A handle to an existing queue pair. +* +* p_qp_mod +* [in] The new attributes to use when modifying the queue pair. +* +* RETURN VALUES +* IB_SUCCESS +* The queue pair was successfully modified. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the queue pair attributes was not provided. +* +* IB_INVALID_SETTING +* The specified queue pair attributes were invalid. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to register the modify the queue pair. +* +* IB_UNSUPPORTED +* The requested modification was not supported. +* +* IB_INVALID_QP_STATE +* The queue pair was in an invalid state for the requested operation. +* +* IB_INVALID_PKEY +* The specified pkey was not valid. +* +* IB_INVALID_APM_STATE +* The specified automatic path migration state was not valid. +* +* NOTES +* This routine modifies the attributes of an existing queue pair and +* transitions it to a new state. The new state and attributes are +* specified through the p_qp_mod parameter. Upon successful completion, +* the queue pair is in the requested state. +* +* SEE ALSO +* ib_create_qp, ib_destroy_qp, ib_qp_mod_t +*****/ + + +/****f* Access Layer/ib_destroy_qp +* NAME +* ib_destroy_qp +* +* DESCRIPTION +* Release a queue pair. Once destroyed, no further access to this +* queue pair is possible. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_destroy_qp( + IN const ib_qp_handle_t h_qp, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_qp +* [in] A handle to an existing queue pair. +* +* pfn_destroy_cb +* [in] A user-specified callback that is invoked after the queue pair +* has been successfully destroyed. +* +* RETURN VALUES +* IB_SUCCESS +* The destroy request was registered. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle was invalid. +* +* NOTES +* This call destroys an existing queue pair. Since callbacks may be +* outstanding against the queue pair at the time the destroy operation is +* invoked, the this call operates asynchronously. The user will be notified +* through a callback once the destroy operation completes, indicating that +* no additional callbacks will be invoked for the specified queue pair. +* +* SEE ALSO +* ib_create_qp +*****/ + + +/****s* Access Layer/ib_cq_create_t +* NAME +* ib_cq_create_t +* +* DESCRIPTION +* Attributes used to initialize a completion queue at creation time. +* +* SYNOPSIS +*/ +typedef struct _ib_cq_create +{ + uint32_t size; + ib_pfn_comp_cb_t pfn_comp_cb; + cl_waitobj_handle_t h_wait_obj; + +} ib_cq_create_t; +/* +* FIELDS +* size +* Specifies the maximum number of work completions that may be on the +* completion queue. If the creation call is successful, the actual +* size of the completion queue will be returned. The actual size of +* the CQ will be greater than or equal to the requested size. +* +* pfn_comp_cb +* A callback that is invoked whenever a signaled completion occurs on +* the completion queue. This field is mutually exclusive with the +* p_event field. +* +* h_wait_obj +* A wait object that is triggered whenever a signaled completion occurs +* on the completion queue. This field is mutually exclusive with the +* pfn_comp_cb field and is only valid for user-mode clients. The wait +* object must be ready for use when the call to ib_create_cq is invoked. +* +* NOTES +* Clients must specify either an event or a callback when creating a +* completion queue. When a signaled completion occurs on the completion +* queue, the client will be notified through the callback or by +* signaling the specified event. +* +* SEE ALSO +* ib_create_cq, ib_pfn_comp_cb_t +*****/ + + +/****f* Access Layer/ib_create_cq +* NAME +* ib_create_cq +* +* DESCRIPTION +* Creates a completion queue and returns its handle to the user. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_create_cq( + IN const ib_ca_handle_t h_ca, + IN OUT ib_cq_create_t* const p_cq_create, + IN const void* const cq_context, + IN const ib_pfn_event_cb_t pfn_cq_event_cb OPTIONAL, + OUT ib_cq_handle_t* const ph_cq ); +/* +* PARAMETERS +* h_ca +* [in] A handle to an open channel adapter. +* +* p_cq_create +* [in] Attributes necessary to allocate and initialize the +* completion queue. +* +* cq_context +* [in] A user-specified context associated with the completion queue. +* +* pfn_cq_event_cb +* [in] User-specified error callback routine invoked after an +* asynchronous event has occurred on the completion queue. +* +* ph_cq +* [out] Upon successful completion of this call, this references a +* handle to the newly created completion queue. +* +* RETURN VALUES +* IB_SUCCESS +* The completion queue was successfully created. +* +* IB_INVALID_CA_HANDLE +* The channel adapter handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the completion queue attributes or handle was not +* provided. +* +* IB_INVALID_SETTING +* The specified attributes that should be used to create the completion +* queue are invalid. Both completion callback and wait object +* information were supplied or are missing. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to create the completion queue. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to create the completion queue. +* +* IB_INVALID_CQ_SIZE +* The requested size of the completion queue was larger than the +* maximum supported by the associated channel adapter. +* +* NOTES +* This routine allocates a completion queue on the specified channel +* adapter. If the completion queue cannot be allocated, an error is +* returned. When creating the completion queue, users associate a context +* with the completion queue. This context is returned to the user through +* the completion and asynchronous event callbacks. +* +* SEE ALSO +* ib_query_cq, ib_modify_cq, ib_destroy_cq, ib_cq_create_t, ib_pfn_event_cb_t +*****/ + + +/****f* Access Layer/ib_modify_cq +* NAME +* ib_modify_cq +* +* DESCRIPTION +* Modifies the attributes associated with a completion queue, allowing the +* completion queue to be resized. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_modify_cq( + IN const ib_cq_handle_t h_cq, + IN OUT uint32_t* const p_size ); +/* +* PARAMETERS +* h_cq +* [in] A handle to an existing completion queue. +* +* p_size +* [in/out] Specifies the new size of the completion queue. If the +* modify call is successful, the actual size of the completion queue +* will be returned. The actual size of the CQ will be greater than or +* equal to the requested size. +* +* RETURN VALUES +* IB_SUCCESS +* The completion queue was successfully modified. +* +* IB_INVALID_CQ_HANDLE +* The completion queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the completion queue size was not provided. +* +* IB_INVALID_CQ_SIZE +* The requested size of the completion queue was larger than the +* maximum supported by the associated channel adapter. +* +* IB_OVERFLOW +* The specified size of the completion queue is smaller than the number +* of work completions currently on the completion queue. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to modify the completion queue. +* +* NOTES +* This routine allows a client to modify the size of a completion queue. +* If the new size is larger than what the associated channel adapter can +* support, an error is returned. If the completion queue has valid +* completion entries on it and the requested size is smaller than the +* number of entries, an overflow error is returned and the modify +* operation is aborted. +* +* SEE ALSO +* ib_create_cq +*****/ + + +/****f* Access Layer/ib_query_cq +* NAME +* ib_query_cq +* +* DESCRIPTION +* Returns information about the specified completion queue. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_size ); +/* +* PARAMETERS +* h_cq +* [in] A handle to an existing completion queue. +* +* p_size +* [out] Upon successful completion of this call, contains the actual +* size of the completion queue. +* +* RETURN VALUES +* IB_SUCCESS +* The completion queue was successfully queried. +* +* IB_INVALID_CQ_HANDLE +* The completion queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the completion queue size was not provided. +* +* SEE ALSO +* ib_create_cq +*****/ + + +/****f* Access Layer/ib_destroy_cq +* NAME +* ib_destroy_cq +* +* DESCRIPTION +* Destroys a completion queue. Once destroyed, no further access to the +* completion queue is possible. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_destroy_cq( + IN const ib_cq_handle_t h_cq, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_qp +* [in] A handle to an existing completion queue. +* +* pfn_destroy_cb +* [in] A user-provided callback that is invoked after the +* completion queue has been successfully destroyed. +* +* RETURN VALUES +* IB_SUCCESS +* The destroy request was registered. +* +* IB_INVALID_CQ_HANDLE +* The completion queue handle was invalid. +* +* NOTES +* This call destroys an existing completion queue. Since callbacks may be +* outstanding against the completion queue at the time the destroy operation +* is invoked, the this call operates asynchronously. The user will be +* notified through a callback once the destroy operation completes, +* indicating that no additional callbacks will be invoked for the specified +* completion queue. +* +* If there are still queue pairs associated with the completion queue when +* this function is invoked, the destroy operation will fail with status +* IB_RESOURCE_BUSY. +* +* SEE ALSO +* ib_create_cq, ib_pfn_destroy_cb_t +*****/ + + +/****f* Access Layer/ib_reg_mem +* NAME +* ib_reg_mem +* +* DESCRIPTION +* Registers a virtual memory region with a channel adapter. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reg_mem( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t* const p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); +/* +* PARAMETERS +* h_pd +* [in] A handle to an existing protection domain that the memory +* should be registered with. +* +* p_mr_create +* [in] Information describing the memory region to register. +* +* p_lkey +* [out] The local access key associated with this registered memory +* region. +* +* p_rkey +* [out] A key that may be used by a remote end-point when performing +* RDMA or atomic operations to this registered memory region. +* +* ph_mr +* [out] Upon successful completion of this call, this references a +* handle to the registered memory region. This handle is used when +* performing data transfers and to deregister the memory. +* +* RETURN VALUES +* IB_SUCCESS +* The memory region was successfully registered. +* +* IB_INVALID_PD_HANDLE +* The protection domain to associate with the memory region was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the memory region information, lkey, rkey, or handle +* was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register the memory region. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to register the memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* NOTES +* This routine registers a virtual memory region with a channel adapter. +* Memory must be registered before being used in a data transfer operation. +* +* SEE ALSO +* ib_dereg_mr, ib_reg_phys, ib_reg_shared, ib_mr_create_t +*****/ + + +/****f* Access Layer/ib_reg_phys +* NAME +* ib_reg_phys +* +* DESCRIPTION +* Registers a physical memory region with a channel adapter. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reg_phys( + IN const ib_pd_handle_t h_pd, + IN const ib_phys_create_t* const p_phys_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); +/* +* PARAMETERS +* h_pd +* [in] A handle to an existing protection domain that the memory +* should be registered with. +* +* p_phys_create +* [in] Information describing the memory region to register. +* +* p_vaddr +* [in/out] On input, references the requested virtual address for the +* start of the physical region. On output, references the actual +* virtual address assigned to the registered region. +* +* p_lkey +* [out] The local access key associated with this registered memory +* region. +* +* p_rkey +* [out] A key that may be used by a remote end-point when performing +* RDMA or atomic operations to this registered memory region. +* +* ph_mr +* [out] Upon successful completion of this call, this references a +* handle to the registered memory region. This handle is used when +* performing data transfers and to deregister the memory. +* +* RETURN VALUES +* IB_SUCCESS +* The physical memory region was successfully registered. +* +* IB_INVALID_PD_HANDLE +* The protection domain to associate with the physical memory region +* was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the physical memory region information, virtual address, +* lkey, rkey, or handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register the physical memory region. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to register the physical memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* NOTES +* This routine registers an array of physical pages as a single virtually +* contiguous region with a channel adapter. Memory must be registered +* before being used in a data transfer operation. +* +* SEE ALSO +* ib_dereg_mr, ib_reg_mem, ib_reg_shared, ib_phys_create_t +*****/ + + +/****f* Access Layer/ib_query_mr +* NAME +* ib_query_mr +* +* DESCRIPTION +* Query the current attributes of a memory region. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query_mr( + IN const ib_mr_handle_t h_mr, + OUT ib_mr_attr_t* const p_mr_attr ); +/* +* PARAMETERS +* h_mr +* [in] A handle to a registered memory region. +* +* p_mr_attr +* [out] A reference to a structure where the registered memory attributes +* will be copied. +* +* RETURN VALUES +* IB_SUCCESS +* The memory region attributes were returned successfully. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the memory region attributes was not provided. +* +* NOTES +* This routine returns information about the specified registered memory +* region. +* +* SEE ALSO +* ib_dereg_mr, ib_reg_mem, ib_reg_shared, ib_mr_attr_t +*****/ + + +/****f* Access Layer/ib_rereg_mem +* NAME +* ib_rereg_mem +* +* DESCRIPTION +* Modifies the attributes of an existing memory region. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_rereg_mem( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t* const p_mr_create OPTIONAL, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL ); +/* +* PARAMETERS +* h_mr +* [in] A handle to the registered memory region being modified. +* +* mr_mod_mask +* [in] A mask used to specify which attributes of the memory region are +* being modified. +* +* p_mr_create +* [in] This references information needed to perform the modification on +* the registered memory region. This parameter may be NULL if only the +* protection domain will be modified. +* +* p_lkey +* [out] The local access key associated with this registered memory +* region. +* +* p_rkey +* [out] A key that may be used by a remote end-point when performing RDMA +* or atomic operations to this registered memory region. +* +* h_pd +* [in] An optionally provided parameter used to modify the protection +* domain of a registered region. +* +* RETURN VALUES +* IB_SUCCESS +* The memory region attributes were modified successfully. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the lkey or rkey was not provided or the specified +* modify mask is invalid. +* +* IB_INVALID_SETTING +* The specified memory region attributes are invalid. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to modify the memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* IB_RESOURCE_BUSY +* The memory region has windows bound to it. +* +* NOTES +* This routine modifies the attributes of the specified memory region. +* The memory being modified may have been registered using either virtual +* or physical registration. Conceptually, this routine is equivalent to +* to calling ib_dereg_mr, followed by ib_reg_mem, but may be higher +* performing. +* +* SEE ALSO +* ib_reg_mem, ib_reg_phys, ib_dereg_mr, ib_mr_mod_t, ib_mr_create_t +*****/ + + +/****f* Access Layer/ib_rereg_phys +* NAME +* ib_rereg_phys +* +* DESCRIPTION +* Modifies the attributes of an existing memory region. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_rereg_phys( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_phys_create_t* const p_phys_create OPTIONAL, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL ); +/* +* PARAMETERS +* h_mr +* [in] A handle to the registered memory region being modified. +* +* mr_mod_mask +* [in] A mask used to specify which attributes of the memory region are +* being modified. +* +* p_phys_create +* [in] This references information needed to perform the modification on +* the registered memory region. This parameter may be NULL if +* only the protection domain will be modified. +* +* p_vaddr +* [in/out] On input, this specifies the requested virtual address for the +* start of the physical region. On output, this references the actual +* virtual address assigned to the registered region. +* +* p_lkey +* [out] The local access key associated with this registered memory +* region. +* +* p_rkey +* [out] A key that may be used by a remote end-point when performing RDMA +* or atomic operations to this registered memory region. +* +* h_pd +* [in] An optionally provided parameter used to modify the protection +* domain of a registered region. +* +* RETURN VALUES +* IB_SUCCESS +* The memory region attributes were modified successfully. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the virtual address, lkey, rkey was not provided or +* the specified modify mask is invalid. +* +* IB_INVALID_SETTING +* The specified memory region attributes are invalid. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to modify the memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* IB_RESOURCE_BUSY +* The memory region has windows bound to it. +* +* NOTES +* This routine modifies the attributes of the specified memory region. +* The memory being modified may have been registered using either virtual +* or physical registration. Conceptually, this routine is equivalent to +* to calling ib_dereg_mr, followed by ib_reg_phys, but may be higher +* performing. +* +* SEE ALSO +* ib_reg_mem, ib_reg_phys, ib_dereg_mr, ib_mr_mod_t, ib_mr_create_t +*****/ + + +/****f* Access Layer/ib_reg_shared +* NAME +* ib_reg_shared +* +* DESCRIPTION +* Registers a memory region that has the same physical pages as an +* existing registered memory region. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reg_shared( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); +/* +* PARAMETERS +* h_mr +* [in] A handle to an existing registered memory region that this +* registration should share physical pages with. +* +* h_pd +* [in] Handle to the PD on which memory is being registered +* +* access_ctrl +* [in] Access rights of the registered region. +* +* p_vaddr +* [in/out] On input, this specifies the requested virtual address for the +* start of the physical region. On output, this references the actual +* virtual address assigned to the registered region. This is always a +* 64-bit quantity to support registering more than 4GB of memory on +* 32-bit systems with PAE. +* +* p_lkey +* [out] The local access key associated with this registered memory +* region. +* +* p_rkey +* [out] A key that may be used by a remote end-point when performing RDMA +* or atomic operations to this registered memory region. +* +* ph_mr +* [out] Upon successful completion of this call, this references a handle +* to the registered memory region. This handle is used when performing +* data transfers and to deregister the memory. +* +* RETURN VALUES +* IB_SUCCESS +* The shared memory region was successfully registered. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the virtual address, lkey, rkey, or handle was not +* provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register the shared memory region. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to register the shared memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* NOTES +* This routine registers a memory region that shares the same set of +* physical pages associated with an existing registered memory region. +* +* SEE ALSO +* ib_dereg_mr, ib_reg_mem, ib_reg_phys, ib_reg_shared, ib_mr_create_t +*****/ + + +/****f* Access Layer/ib_reg_shmid +* NAME +* ib_reg_shmid +* +* DESCRIPTION +* Registers a memory region to be shared across multiple processes. +* The memory is referenced by a shared memory identifier. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reg_shmid( + IN const ib_pd_handle_t h_pd, + IN const ib_shmid_t shmid, + IN const ib_mr_create_t* const p_mr_create, + OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); +/* +* PARAMETERS +* h_pd +* [in] A handle to an existing protection domain that the memory +* should be registered with. +* +* shmid +* [in] An identifier for the shared memory region. +* +* p_mr_create +* [in] Information describing the attributes of the memory region to +* register. +* +* p_vaddr, +* [out] The HCA assigned, HCA relative virtual address for the +* memory region. +* +* p_lkey +* [out] The local access key associated with this registered memory +* region. +* +* p_rkey +* [out] A key that may be used by a remote end-point when performing RDMA +* or atomic operations to this registered memory region. +* +* ph_mr +* [out] Upon successful completion of this call, this references a handle +* to the registered memory region. This handle is used when performing +* data transfers and to deregister the memory. +* +* RETURN VALUES +* IB_SUCCESS +* The shared memory region was successfully registered. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the memory region information, lkey, rkey, or handle +* was not provided. +* +* IB_INVALID_SETTING +* The length and page mapping for the memory region do not match those +* of the region identified by the provided SHMID. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register the shared memory region. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to register the shared memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* NOTES +* This routine registers a memory region that is shared between processes. +* The region being registered is identified through a shared memory +* identifier. The registered region shares hardware resources as much +* as possible. +* +* SEE ALSO +* ib_dereg_mr, ib_reg_mem, ib_reg_shared, ib_mr_create_t +*****/ + + +/****f* Access Layer/ib_dereg_mr +* NAME +* ib_dereg_mr +* +* DESCRIPTION +* Deregisters a registered memory region. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_dereg_mr( + IN const ib_mr_handle_t h_mr ); +/* +* PARAMETERS +* h_mr +* [in] A handle to a registered memory region that will be unregistered. +* +* RETURN VALUES +* IB_SUCCESS +* The memory region was successfully deregistered. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_RESOURCE_BUSY +* The memory region has memory windows bound to it. +* +* NOTES +* This routine deregisters a memory region with a channel adapter. The +* region may be deregistered only if there are no memory windows or +* existing shared memory regions currently bound to the region. Work +* requests referencing this region when it is deregistered will fail +* with a WRS_LOCAL_PROTECTION_ERR error. +* +* SEE ALSO +* ib_reg_mem, ib_reg_phys, ib_reg_shared +*****/ + + +#ifdef CL_KERNEL + +/****f* Access Layer/mlnx_create_fmr +* NAME +* mlnx_create_fmr +* +* DESCRIPTION +* Creates a Mellanox fast memory region. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +mlnx_create_fmr( + IN const ib_pd_handle_t h_pd, + IN const mlnx_fmr_create_t* const p_fmr_create, + OUT mlnx_fmr_handle_t* const ph_fmr ); +/* +* PARAMETERS +* h_pd +* [in] An optionally provided parameter used to modify the protection +* domain of a registered region. +* p_fmr_create +* [in] This references information needed to perform the modification on +* the registered memory region. This parameter may be NULL if only the +* protection domain will be modified. +* ph_fmr +* [out] A handle to the registered memory region being modified. +* +* RETURN VALUES +* IB_SUCCESS +* The memory region attributes were modified successfully. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the lkey or rkey was not provided or the specified +* modify mask is invalid. +* +* IB_INVALID_SETTING +* The specified memory region attributes are invalid. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to modify the memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* IB_RESOURCE_BUSY +* The memory region has windows bound to it. +* +* NOTES +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* mlnx_destroy_fmr, mlnx_fmr_create_t +*****/ + + +/****f* Access Layer/mlnx_map_fmr +* NAME +* mlnx_map_fmr +* +* DESCRIPTION +* //TODO +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +mlnx_map_phys_fmr( + IN const mlnx_fmr_handle_t h_fmr, + IN const uint64_t* const paddr_list, + IN const int list_len, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey ); +/* +* PARAMETERS +* h_fmr +* [in] Handle to the fast memory region that these pages map to +* page_list +* [in] array of phys address +* list_len +* [in] number of pages in the list +* p_vaddr +* [in/out] On input, references the requested virtual address for the +* start of the FMR. On output, references the actual +* virtual address assigned to the FMR. +* p_lkey +* [out] The local access key associated with this registered memory +* region. +* p_rkey +* [out] A key that may be used by a remote end-point when performing +* RDMA or atomic operations to this registered memory region. +* +* RETURN VALUES +* IB_SUCCESS +* The memory region attributes were modified successfully. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the lkey or rkey was not provided or the specified +* modify mask is invalid. +* +* IB_INVALID_SETTING +* The specified memory region attributes are invalid. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to modify the memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* IB_RESOURCE_BUSY +* The memory region has windows bound to it. +* +* NOTES +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* mlnx_destroy_fmr, mlnx_fmr_create_t +*****/ + + +/****f* Access Layer/mlnx_unmap_fmr +* NAME +* mlnx_unmap_fmr +* +* DESCRIPTION +* //TODO +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +mlnx_unmap_fmr( + IN const mlnx_fmr_handle_t h_fmr ); +/* +* PARAMETERS +* h_fmr +* +* RETURN VALUES +* IB_SUCCESS +* The memory region attributes were modified successfully. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the lkey or rkey was not provided or the specified +* modify mask is invalid. +* +* IB_INVALID_SETTING +* The specified memory region attributes are invalid. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to modify the memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* IB_RESOURCE_BUSY +* The memory region has windows bound to it. +* +* NOTES +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* mlnx_destroy_fmr, mlnx_fmr_create_t +*****/ + + +/****f* Access Layer/mlnx_destroy_fmr +* NAME +* mlnx_destroy_fmr +* +* DESCRIPTION +* Destroys an existing Mellanox fast memory region. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +mlnx_destroy_fmr( + IN const mlnx_fmr_handle_t h_fmr ); +/* +* PARAMETERS +* h_fmr +* [in] A handle to the registered memory region being modified. +* +* RETURN VALUES +* IB_SUCCESS +* The memory region attributes were modified successfully. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the lkey or rkey was not provided or the specified +* modify mask is invalid. +* +* IB_INVALID_SETTING +* The specified memory region attributes are invalid. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to modify the memory region. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* IB_RESOURCE_BUSY +* The memory region has windows bound to it. +* +* NOTES +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* mlnx_destroy_fmr, mlnx_fmr_create_t +*****/ + + +/****f* Access Layer/ib_create_fmr_pool +* NAME +* ib_create_fmr_pool +* +* DESCRIPTION +* Creates a pool of FMR elements for use +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +mlnx_create_fmr_pool( + IN const ib_pd_handle_t h_pd, + IN const mlnx_fmr_pool_create_t *p_fmr_pool_attr, + OUT mlnx_fmr_pool_handle_t* const ph_pool ); +/* +* PARAMETERS +TODO +* +* RETURN VALUES +TODO +* +* NOTES +TODO +* +* SEE ALSO +TOD +*****/ + + +/****f* Access Layer/ib_destroy_fmr_pool +* NAME +* ib_destroy_fmr_pool +* +* DESCRIPTION +* Destroys a MAD pool and all associated resources. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +mlnx_destroy_fmr_pool( + IN const mlnx_fmr_pool_handle_t h_pool ); +/* +* PARAMETERS +TODO +* +* RETURN VALUES +TODO +* +* NOTES +TODO +* +* SEE ALSO +TODO +*****/ + + + + + + + +/****f* Access Layer/ib_fmr_pool_map_phys +* NAME +* ib_destroy_fmr_pool +* +* DESCRIPTION +* Destroys a MAD pool and all associated resources. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +mlnx_map_phys_fmr_pool( + IN const mlnx_fmr_pool_handle_t h_pool , + IN const uint64_t* const paddr_list, + IN const int list_len, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT mlnx_fmr_pool_el_t *pp_fmr_el); +/* +* PARAMETERS +TODO +* +* RETURN VALUES +TODO +* +* NOTES +TODO +* +* SEE ALSO +TODO +*****/ + + + + + +/****f* Access Layer/ib_destroy_fmr_pool +* NAME +* ib_destroy_fmr_pool +* +* DESCRIPTION +* Destroys a MAD pool and all associated resources. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +mlnx_unmap_fmr_pool( + IN mlnx_fmr_pool_el_t p_fmr_el ); +/* +* PARAMETERS +TODO +* +* RETURN VALUES +TODO +* +* NOTES +TODO +* +* SEE ALSO +TODO +*****/ + + +/****f* Access Layer/ib_flush_fmr_pool +* NAME +* ib_flush_fmr_pool +* +* DESCRIPTION +* Destroys a MAD pool and all associated resources. +* +* SYNOPSIS +*/ +ib_api_status_t +mlnx_flush_fmr_pool(mlnx_fmr_pool_handle_t h_pool); +/* +* PARAMETERS +TODO +* +* RETURN VALUES +TODO +* +* NOTES +TODO +* +* SEE ALSO +TODO +*****/ +#endif /* CL_KERNEL */ + +/****f* Access Layer/ib_create_mw +* NAME +* ib_create_mw +* +* DESCRIPTION +* Creates a memory window associated with the specified protection domain. +* Newly created windows are not bound to any specific memory region. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_create_mw( + IN const ib_pd_handle_t h_pd, + OUT net32_t* const p_rkey, + OUT ib_mw_handle_t* const ph_mw ); +/* +* PARAMETERS +* h_pd +* [in] A handle to an existing protection domain that the memory window +* should be created within. +* +* p_rkey +* [out] The current rkey associated with the memory window. This key is +* used to bind the window to a registered memory region. +* +* ph_mw +* [out] Upon successful completion of this call, this references a handle +* to the memory window. This handle is used to bind and destroy +* the window. +* +* RETURN VALUES +* IB_SUCCESS +* The memory window was successfully created. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the memory window rkey or handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to create the memory window. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to create the memory window. +* +* NOTES +* This routine creates an unbound memory window associated with a specified +* protection domain. The memory window cannot be used for data transfer +* operations until being bound to a registered memory region. +* +* SEE ALSO +* ib_destroy_mw, ib_query_mw, ib_bind_mw +*****/ + + +/****f* Access Layer/ib_query_mw +* NAME +* ib_query_mw +* +* DESCRIPTION +* Query the current attributes of a memory window. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query_mw( + IN const ib_mw_handle_t h_mw, + OUT ib_pd_handle_t* const ph_pd, + OUT net32_t* const p_rkey ); +/* +* PARAMETERS +* h_mw +* [in] A handle to an existing memory window. +* +* ph_pd +* [out] Upon successful completion of this call, this will reference +* the protection domain associated with this memory window. +* +* p_rkey +* [out] Upon successful completion of this call, this will reference +* the current rkey associated with this memory window. +* +* RETURN VALUES +* IB_SUCCESS +* The memory window attributes were returned successfully. +* +* IB_INVALID_MW_HANDLE +* The memory window handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the protection domain handle or rkey was not provided. +* +* NOTES +* This routine returns information about the specified memory window. +* +* SEE ALSO +* ib_create_mw +*****/ + + +/****f* Access Layer/ib_bind_mw +* NAME +* ib_bind_mw +* +* DESCRIPTION +* Binds a memory window to a registered memory region. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_bind_mw( + IN const ib_mw_handle_t h_mw, + IN const ib_qp_handle_t h_qp, + IN ib_bind_wr_t* const p_mw_bind, + OUT net32_t* const p_rkey ); +/* +* PARAMETERS +* h_mw +* [in] A handle to an existing memory window. +* +* h_qp +* [in] A handle to a queue pair that the bind request will be posted to. +* +* p_mw_bind +* [in] Describes the memory window bind request. +* +* p_rkey +* [out] The new rkey for the memory window that may be used by a remote +* end-point when performing RDMA or atomic operations to this memory +* region. +* +* RETURN VALUES +* IB_SUCCESS +* The memory window bind operation was successfully posted. +* +* IB_INVALID_MW_HANDLE +* The memory window handle was invalid. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the memory window bind work request or rkey was not +* provided. +* +* IB_INVALID_SERVICE_TYPE +* The queue pair configuration does not support this type of service. +* +* IB_INVALID_MR_HANDLE +* The memory region handle was invalid. +* +* IB_INVALID_RKEY +* The rkey is invalid for the memory region being bound. +* +* IB_UNSUPPORTED +* The requested access rights are not supported by the channel adapter. +* +* IB_INVALID_PERMISSION +* The requested access rights are invalid. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to bind the memory window. +* +* NOTES +* This routine posts a request to bind a memory window to a registered +* memory region. The bind operation occurs on the specified queue pair, +* but the bound region is usable across all queue pairs within the same +* protection domain. +* +* SEE ALSO +* ib_create_mw, ib_bind_wr_t +*****/ + + +/****f* Access Layer/ib_destroy_mw +* NAME +* ib_destroy_mw +* +* DESCRIPTION +* Destroys a memory window. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_destroy_mw( + IN const ib_mw_handle_t h_mw ); +/* +* PARAMETERS +* h_mw +* [in] A handle to an existing memory window. +* +* RETURN VALUES +* IB_SUCCESS +* The memory window was successfully destroyed. +* +* IB_INVALID_MW_HANDLE +* The memory window handle was invalid. +* +* NOTES +* This routine deallocates a window entry created via a ib_create_mw. +* Once this operation is complete, future accesses to the window will fail. +* +* SEE ALSO +* ib_create_mw +*****/ + + +/****f* Access Layer/ib_post_send +* NAME +* ib_post_send +* +* DESCRIPTION +* This routine posts a work request to the send queue of a queue pair. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_post_send( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t **pp_send_failure OPTIONAL ); +/* +* PARAMETERS +* h_qp +* [in] The queue pair to which this work request is being submitted. +* +* p_send_wr +* [in] A reference to the head of the work request list. +* +* pp_send_failure +* [out] If the post send operation failed, this references the work +* request in the p_send_wr list where the first failure occurred. +* This parameter may be NULL if only a single work request is being +* posted to the QP. +* +* RETURN VALUES +* IB_SUCCESS +* All work requests were successfully posted. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the send work request list was not provided. +* +* IB_INSUFFICIENT_RESOURCES +* The number of posted work requests exceed the current depth available +* on the send queue. +* +* IB_INVALID_WR_TYPE +* The work request type was invalid. +* +* IB_INVALID_QP_STATE +* The current queue pair state does not allow posting sends. +* +* IB_INVALID_MAX_SGE +* The number of work request scatter gather elements exceed the queue +* pair configuration. +* +* IB_UNSUPPORTED +* The requested operation is not supported by the channel adapter. +* +* NOTES +* This routine posts a work request to the send queue of a queue pair. +* The type of work to perform is defined by the p_send_wr parameter. +* +* SEE ALSO +* ib_send_wr_t +*****/ + + +/****f* Access Layer/ib_post_recv +* NAME +* ib_post_recv +* +* DESCRIPTION +* This routine posts a work request to the receive queue of a queue pair. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_post_recv( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ); +/* +* PARAMETERS +* h_qp +* [in] The queue pair to which this work request is being submitted. +* +* p_recv_wr +* [in] A reference to the head of the work request list. +* +* pp_recv_failure +* [out] If the post receive operation failed, this references the work +* request in the p_recv_wr list where the first failure occurred. +* This parameter may be NULL if only a single work request is being +* posted to the QP. +* +* RETURN VALUES +* IB_SUCCESS +* All work requests were successfully posted. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the receive work request list was not provided. +* +* IB_INSUFFICIENT_RESOURCES +* The number of posted work requests exceed the current depth available +* on the receive queue. +* +* IB_INVALID_WR_TYPE +* The work request type was invalid. +* +* IB_INVALID_QP_STATE +* The current queue pair state does not allow posting receives. +* +* NOTES +* This routine posts a work request to the receive queue of a queue pair. +* The type of work to perform is defined by the p_recv_wr parameter. This +* call is used to post data buffers to receive incoming message sends. +* +* SEE ALSO +* ib_recv_wr_t +*****/ + + +/****f* Access Layer/ib_send_mad +* NAME +* ib_send_mad +* +* DESCRIPTION +* This routine posts a work request to the send queue of a queue pair. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_send_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element_list, + OUT ib_mad_element_t **pp_mad_failure OPTIONAL ); +/* +* PARAMETERS +* h_mad_svc +* [in] The MAD service to which this work request is being submitted. +* +* p_mad_element_list +* [in] A list of MAD elements that will be posted to the send queue. +* +* pp_mad_failure +* [out] If the send MAD operation failed, this references the MAD +* element in the p_mad_element_list where the first failure occurred. +* This parameter is optional if p_mad_element_list contains a single +* MAD. +* +* RETURN VALUES +* IB_SUCCESS +* The MAD element list was successfully posted. +* +* IB_INVALID_HANDLE +* The MAD service handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the MAD element list was not provided. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available to complete +* the request. +* +* IB_INVALID_SETTING +* The MAD element RMPP version is not supported by the access layer. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to complete the request. +* +* NOTES +* This routine posts a work request to send a MAD on a MAD service. All +* MAD elements successfully posted by this call are under the control of +* the access layer and should not be accessed until the send operation +* completes. +* +* In order to guarantee that MADs sent by separate clients do not use the +* same transaction ID, the access layer reserves the upper 32-bits of the +* TID on all unsolicited MADs. MADs sent with the response bit set will +* not have their transaction ID's modified. Unsolicited MADs will have the +* upper 32-bits of their TID set to an access layer generated client ID. +* +* SEE ALSO +* ib_mad_element_t, ib_cancel_mad +*****/ + + +/****f* Access Layer/ib_cancel_mad +* NAME +* ib_cancel_mad +* +* DESCRIPTION +* This routine cancels a pending send transaction to a MAD service. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cancel_mad( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element ); +/* +* PARAMETERS +* h_mad_svc +* [in] The MAD service to which the send operation was directed. +* +* p_mad_element +* [in] A handle to a sent MAD element. +* +* RETURN VALUES +* IB_SUCCESS +* The requested MAD transaction was located and canceled. +* +* IB_INVALID_PARAMETER +* A reference to the MAD element list was not provided. +* +* IB_NOT_FOUND +* The requested transaction was not located or had already completed. +* +* NOTES +* This routine cancels a pending send transaction to a MAD service. If +* the request is successfully located and has not yet completed, it will +* be completed with its status set to IB_CANCELED. The canceled operation +* will be returned to the user through the normal MAD completion callback. +* If the send transaction has already completed, this call will return +* IB_NOT_FOUND. +* +* SEE ALSO +* ib_send_mad +*****/ + + +/****f* Access Layer/ib_peek_cq +* NAME +* ib_peek_cq +* +* DESCRIPTION +* Returns the number of entries currently on the completion queue. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_peek_cq( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_n_cqes ); +/* +* PARAMETERS +* h_cq +* [in] Handle to the completion queue to peek. +* +* p_n_cqes +* [out] Upon successful completion of this call, contains the number +* of completion queue entries currently on the completion queue. +* +* RETURN VALUES +* IB_SUCCESS +* The peek operation completed successfully. +* +* IB_INVALID_CQ_HANDLE +* The completion queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the completion queue entry count was not provided. +* +* IB_UNSUPPORTED +* This operation is not supported by the channel adapter. +* +* NOTES +* The value returned is a snapshot of the number of compleiton queue +* entries curently on the completion queue. Support for this operation +* is optional by a channel adapter vendor. +* +* SEE ALSO +* ib_create_cq, ib_poll_cq, ib_rearm_cq, ib_rearm_n_cq +*****/ + + +/****f* Access Layer/ib_poll_cq +* NAME +* ib_poll_cq +* +* DESCRIPTION +* Checks a completion queue for completed work requests. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_poll_cq( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); +/* +* PARAMETERS +* h_cq +* [in] A handle to a completion queue to check for completions on. +* +* pp_free_wclist +* [in/out] On input, a list of work completion structures provided by +* the client. These are used to report completed work requests through +* the pp_done_wclist. +* +* On output, this contains the list of work completions structures for +* which no work completion was found. +* +* pp_done_wclist +* [out] A list of work completions retrieved from the completion queue. +* +* RETURN VALUES +* IB_SUCCESS +* The poll operation completed successfully. If the work completion +* structures referenced by the pp_free_wclist list is empty there are +* potentially more completions available to retrieve. +* +* IB_INVALID_PARAMETER +* A reference to the free or done work completion list was not provided. +* +* IB_INVALID_CQ_HANDLE +* The completion queue handle was invalid. +* +* IB_NOT_FOUND +* No completed work requests were removed from the completion queue. +* +* NOTES +* This routine retrieves completed work requests from the specified +* completion queue. This call will retrieve all completed requests, +* up to to the number of work completion structures referenced by the +* pp_free_wclist. Completed requests will be returned through the +* pp_done_wclist parameter. +* +* SEE ALSO +* ib_create_cq, ib_post_send, ib_post_recv, ib_bind_mw, ib_wc_t +*****/ + + +/****f* Access Layer/ib_rearm_cq +* NAME +* ib_rearm_cq +* +* DESCRIPTION +* This indicates that the completion queue should notify the client when +* the next completion is added. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_rearm_cq( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ); +/* +* PARAMETERS +* h_cq +* [in] Handle to the completion queue to rearm. +* +* solicited +* [in] A flag indicating whether the request is to generate a +* notification on the next entry, if set to FALSE, or on the next +* solicited entry being added to the completion queue, if set to TRUE. +* +* RETURN VALUES +* IB_SUCCESS +* The completion queue rearm request was registered successfully. +* +* IB_INVALID_CQ_HANDLE +* The completion queue handle was invalid. +* +* NOTES +* This routine instructs the channel interface to invoke the completion +* handler when the next completion queue entry is added to this CQ. +* +* SEE ALSO +* ib_create_cq, ib_peek_cq, ib_poll_cq, ib_rearm_n_cq +*****/ + + +/****f* Access Layer/ib_rearm_n_cq +* NAME +* ib_rearm_n_cq +* +* DESCRIPTION +* This indicates that the completion queue should notify the client when +* the next N completions have been added to this CQ. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_rearm_n_cq( + IN const ib_cq_handle_t h_cq, + IN const uint32_t n_cqes ); +/* +* PARAMETERS +* h_cq +* [in] Handle to the completion queue to rearm. +* +* n_cqes +* [in] The number of completion queue entries to be added to the +* completion queue before notifying the client. This value must +* greater than or equal to one and less than or equal to the size +* of the completion queue. +* +* RETURN VALUES +* IB_SUCCESS +* The completion queue rearm request was registered successfully. +* +* IB_INVALID_CQ_HANDLE +* The completion queue handle was invalid. +* +* IB_INVALID_PARAMETER +* The requested number of completion queue entries was invalid. +* +* IB_UNSUPPORTED +* This operation is not supported by the channel adapter. +* +* NOTES +* This routine instructs the channel interface to invoke the completion +* handler when the next N completions have been added to this CQ regardless +* of the completion type (solicited or unsolicited). Any CQ entries that +* existed before the rearm is enabled will not result in a call to the +* handler. Support for this operation is optional by a channel adapter +* vendor. +* +* SEE ALSO +* ib_create_cq, ib_peek_cq, ib_poll_cq, ib_rearm_cq +*****/ + + +/****s* Access Layer/ib_mcast_rec_t +* NAME +* ib_mcast_rec_t +* +* DESCRIPTION +* Information returned as a result of joining a multicast group. +* +* SYNOPSIS +*/ +typedef struct _ib_mcast_rec +{ + const void* __ptr64 mcast_context; + ib_api_status_t status; + ib_net16_t error_status; + + ib_mcast_handle_t h_mcast; + ib_member_rec_t* __ptr64 p_member_rec; + +} ib_mcast_rec_t; +/* +* FIELDS +* mcast_context +* User-defined context information associated with the multicast join +* request. +* +* status +* Indicates the success of the multicast group join operation. +* +* error_status +* Provide additional error information that was provided by the SA. +* This field is only valid if status is set to IB_REMOTE_ERROR. +* +* h_mcast +* Upon successful completion of a multicast join, this references a +* handle to the multicast group. This handle is used to leave the +* multicast group. +* +* p_member_rec +* References a member record that provides information about the +* multicast group. +* +* NOTES +* This structure is returned to a client through a callback to notify them +* of the result of a multicast join operation. +* +* SEE ALSO +* ib_join_mcast, ib_pfn_mcast_cb_t, ib_leave_mcast +*****/ + + +/****f* Access Layer/ib_pfn_mcast_cb_t +* NAME +* ib_pfn_mcast_cb_t +* +* DESCRIPTION +* User-defined callback invoked on completion of a multicast join request. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_mcast_cb_t)( + IN ib_mcast_rec_t *p_mcast_rec ); +/* +* PARAMETERS +* p_mcast_rec +* [in] References the result of the join operation. +* +* NOTES +* The callback is used to notify a client of the result of a multicast +* join request. +* +* This callback is invoked within a system thread context in the kernel. +* +* SEE ALSO +* ib_join_mcast, ib_mcast_rec_t +*****/ + + +/****s* Access Layer/ib_mcast_req_t +* NAME +* ib_mcast_req_t +* +* DESCRIPTION +* Information used to join a multicast group. +* +* SYNOPSIS +*/ +typedef struct _ib_mcast_req +{ + boolean_t create; + ib_member_rec_t member_rec; + + const void* __ptr64 mcast_context; + ib_pfn_mcast_cb_t pfn_mcast_cb; + + uint32_t timeout_ms; + uint32_t retry_cnt; + ib_al_flags_t flags; + + ib_net64_t port_guid; + uint16_t pkey_index; + +} ib_mcast_req_t; +/* +* FIELDS +* create +* Indicates that the multicast group should be created if it does not +* already exist. +* +* member_rec +* Specifies the membership information of the multicast group to join +* or create. The mgid and join state (scope_state) fields of the +* member record must be set. In addition, if create is set to TRUE, +* the following fields must also be set: qkey, tclass, service level +* and flow label (sl_flow_hop), and pkey. All other fields are ignored +* by the access layer. +* +* mcast_context +* User-defined context information associated with the join request. +* This context is returned to the user through the function specified +* by the pfn_mcast_cb field. +* +* pfn_mcast_cb +* A user-defined callback that is invoked upon completion of the +* join request. +* +* timeout_ms +* Specifies the number of milliseconds to wait for a response for +* the join request until retrying or timing out the request. +* +* retry_cnt +* Specifies the number of times that the join request will be retried +* before failing the request. +* +* flags +* Used to describe the mode of operation. Set to IB_FLAGS_SYNC to +* process the called routine synchronously. +* +* port_guid +* Indicates the port that will join the multicast group. The QP +* specified as part of the ib_join_mast call will bind to this port. +* +* pkey_index +* Specifies the pkey associated with this queue pair. +* +* NOTES +* This structure is used when joining an existing multicast group or +* creating a new multicast group. +* +* SEE ALSO +* ib_join_mcast, ib_pfn_mcast_cb_t, ib_gid_t +*****/ + + +/****f* Access Layer/ib_join_mcast +* NAME +* ib_join_mcast +* +* DESCRIPTION +* Attaches a queue pair to a multicast group. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_join_mcast( + IN const ib_qp_handle_t h_qp, + IN const ib_mcast_req_t* const p_mcast_req ); +/* +* PARAMETERS +* h_qp +* [in] A handle to an unreliable datagram queue pair that will join the +* multicast group. +* +* p_mcast_req +* [in] Specifies the multicast group to join. +* +* RETURN VALUES +* IB_SUCCESS +* The join multicast group request has been initiated. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the multicast group request information was not +* provided. +* +* IB_INVALID_SERVICE_TYPE +* The queue pair configuration does not support this type of service. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to join the multicast group. +* +* IB_INVALID_GUID +* No port was found for the port_guid specified in the request. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to perform the operation. +* +* IB_INVALID_PKEY +* The pkey specified in the multicast join request does not match the +* pkey of the queue pair. +* +* IB_INVALID_PORT +* The port GUID specified in the multicast join request does not match +* the port of the queue pair. +* +* IB_ERROR +* An error occurred while performing the multicast group join operation. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available to complete +* the request. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to complete the request. +* +* NOTES +* This routine results in the specified queue pair joining a multicast +* group. If the multicast group does not already exist, it will be created +* at the user's option. Information about the multicast group is returned +* to the user through a callback specified through the p_mcast_req +* parameter. +* +* If the specified queue pair is already a member of a multicast group when +* this call is invoked, an error will occur if there are conflicting +* membership requirements. The QP is restricted to being bound to a single +* port_guid and using a single pkey. +* +* SEE ALSO +* ib_leave_mcast, ib_mcast_req_t, ib_create_qp, ib_init_dgrm_svc +*****/ + + +/****f* Access Layer/ib_leave_mcast +* NAME +* ib_leave_mcast +* +* DESCRIPTION +* Removes a queue pair from a multicast group. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_leave_mcast( + IN const ib_mcast_handle_t h_mcast, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_mcast +* [in] A handle to a joined multicast group. +* +* pfn_destroy_cb +* [in] An optional user-specified callback that is invoked after the +* leave request has completed. +* +* RETURN VALUES +* IB_SUCCESS +* The leave multicast group request has been initiated. +* +* IB_INVALID_MCAST_HANDLE +* The multicast group handle was invalid. +* +* IB_ERROR +* An error occurred while performing the multicast group leave operation. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to perform the operation. +* +* NOTES +* This routine detaches a queue pair from a multicast group and removes +* it as a member of the group with the subnet administrator. +* +* SEE ALSO +* ib_join_mcast, ib_pfn_destroy_cb_t +*****/ + + +/****f* Access Layer/ib_local_mad +* NAME +* ib_local_mad +* +* DESCRIPTION +* Request that a locally received MAD be processed by the channel adapter +* on which it was received. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_local_mad( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const void* const p_mad_in, + OUT void* p_mad_out ); +/* +* PARAMETERS +* h_ca +* [in] A handle to the channel adapter that should process the MAD. +* This must be the same adapter that the MAD was received on. +* +* port_num +* [in] The port number to which this request is directed. +* +* p_mad_in +* [in] Pointer to a management datagram (MAD) structure containing +* the command to be processed. +* +* p_mad_out +* [out] References a MAD that should contain the response to the +* received input MAD specified through the p_mad_in parameter. +* +* RETURN VALUES +* IB_SUCCESS +* The local MAD was processed successfully. +* +* IB_INVALID_CA_HANDLE +* The channel adapter handle was invalid. +* +* IB_INVALID_PORT +* The port number was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the MAD input or MAD output buffer was not provided. +* +* NOTES +* This call is provided to support SMA and GSA implementations above the +* verbs interface on ports that the access layer has disabled. This routine +* is used to perform local operations by the channel adapter. On successful +* return, the provide output MAD should be used when sending a response. +* +* SEE ALSO +* ib_query_ca, ib_ca_attr_t +*****/ + + +/****s* Access Layer/ib_req_pdata_t +* NAME +* ib_req_pdata_t +* +* DESCRIPTION +* User data sent as part of a request for communication. +* +* SYNOPSIS +*/ +typedef union _ib_req_pdata +{ + uint8_t data[IB_REQ_PDATA_SIZE]; + +} ib_req_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_rep_pdata_t +* NAME +* ib_rep_pdata_t +* +* DESCRIPTION +* User data sent as part of a reply to a request for communication. +* +* SYNOPSIS +*/ +typedef union _ib_rep_pdata +{ + uint8_t data[IB_REP_PDATA_SIZE]; + +} ib_rep_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_rtu_pdata_t +* NAME +* ib_rtu_pdata_t +* +* DESCRIPTION +* User data sent as part of a ready to use message. +* +* SYNOPSIS +*/ +typedef union _ib_rtu_pdata +{ + uint8_t data[IB_RTU_PDATA_SIZE]; + +} ib_rtu_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_rej_pdata_t +* NAME +* ib_rej_pdata_t +* +* DESCRIPTION +* User data sent as part of a connection reject message. +* +* SYNOPSIS +*/ +typedef union _ib_rej_pdata +{ + uint8_t data[IB_REJ_PDATA_SIZE]; + +} ib_rej_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_mra_pdata_t +* NAME +* ib_mra_pdata_t +* +* DESCRIPTION +* User data sent as part of a message receipt acknowledgement. +* +* SYNOPSIS +*/ +typedef union _ib_mra_pdata +{ + uint8_t data[IB_MRA_PDATA_SIZE]; + +} ib_mra_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_lap_pdata_t +* NAME +* ib_lap_pdata_t +* +* DESCRIPTION +* User data sent as part of a load alternate path message. +* +* SYNOPSIS +*/ +typedef union _ib_lap_pdata +{ + uint8_t data[IB_LAP_PDATA_SIZE]; + +} ib_lap_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_apr_pdata_t +* NAME +* ib_apr_pdata_t +* +* DESCRIPTION +* User data sent as part of an alternate path response. +* +* SYNOPSIS +*/ +typedef union _ib_apr_pdata +{ + uint8_t data[IB_APR_PDATA_SIZE]; + +} ib_apr_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_dreq_pdata_t +* NAME +* ib_dreq_pdata_t +* +* DESCRIPTION +* User data sent as part of a disconnection request. +* +* SYNOPSIS +*/ +typedef union _ib_dreq_pdata +{ + uint8_t data[IB_DREQ_PDATA_SIZE]; + +} ib_dreq_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_drep_pdata_t +* NAME +* ib_drep_pdata_t +* +* DESCRIPTION +* User data sent as part of a reply to a disconnection request. +* +* SYNOPSIS +*/ +typedef union _ib_drep_pdata +{ + uint8_t data[IB_DREP_PDATA_SIZE]; + +} ib_drep_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_sidr_req_pdata_t +* NAME +* ib_sidr_req_pdata_t +* +* DESCRIPTION +* User data sent as part of a service ID resolution request. +* +* SYNOPSIS +*/ +typedef union _ib_sidr_req_pdata +{ + uint8_t data[IB_SIDR_REQ_PDATA_SIZE]; + +} ib_sidr_req_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_sidr_rep_pdata_t +* NAME +* ib_sidr_rep_pdata_t +* +* DESCRIPTION +* User data sent as part of a service ID resolution reply. +* +* SYNOPSIS +*/ +typedef union _ib_sidr_rep_pdata +{ + uint8_t data[IB_SIDR_REP_PDATA_SIZE]; + +} ib_sidr_rep_pdata_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_ari_t +* NAME +* ib_ari_t +* +* DESCRIPTION +* Infiniband-defined additional rejection information. +* +* SYNOPSIS +*/ +typedef struct _ib_ari +{ + uint8_t data[IB_ARI_SIZE]; + +} ib_ari_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_apr_info_t +* NAME +* ib_apr_info_t +* +* DESCRIPTION +* Infiniband-defined additional rejection information. +* +* SYNOPSIS +*/ +typedef struct _ib_apr_info +{ + uint8_t data[IB_APR_INFO_SIZE]; + +} ib_apr_info_t; +/* +* SEE ALSO +* ib_cm_data_sizes_t +*****/ + + +/****s* Access Layer/ib_cm_req_rec_t +* NAME +* ib_cm_req_rec_t +* +* DESCRIPTION +* Connection request information returned to the user through their +* connection request callback. +* +* SYNOPSIS +*/ +#pragma warning(disable:4324) +typedef struct _ib_cm_req_rec +{ + const void* __ptr64 context; + ib_cm_handle_t h_cm_req; + ib_listen_handle_t h_cm_listen; + + const uint8_t* __ptr64 p_req_pdata; + + ib_qp_type_t qp_type; + + /* valid for rc, uc & rd qp_type only */ + uint8_t resp_res; + boolean_t flow_ctrl; + uint8_t rnr_retry_cnt; + ib_path_rec_t primary_path; + ib_path_rec_t alt_path; + + /* valid for ud qp_type only */ + ib_net16_t pkey; + const void* __ptr64 sidr_context; + +} ib_cm_req_rec_t; +#pragma warning(default:4324) +/* +* FIELDS +* context +* For peer-to-peer connections, this is the queue pair context associated +* with a connection request. For listens, this is the listen context +* specified through the ib_cm_listen routine. +* +* h_cm_req +* The handle to the communication manager request. This handle is used +* to reply to the or reject the connection. +* +* h_cm_listen +* For connection request callbacks initiated in response to an +* ib_cm_listen call, this is a handle to the listen request. This +* handle is provided to the user to avoid a race condition between +* the return of the ib_cm_listen routine and the notification of a +* connection request. +* +* p_req_pdata +* A reference to user-defined private data sent as part of the connection +* request. +* +* qp_type +* Indicates the CM service type. +* +* resp_res +* The maximum number of RDMA read/atomic operations from the recipient +* that the requestor supports on the connection. The init_depth +* specified in the call to ib_cm_rep must be less than or equal to +* this value. +* +* flow_ctrl +* Indicates if the remote CA implements hardware end-to-end flow control. +* +* rnr_retry_cnt +* Requested number of RNR NAK retries to perform before generating a +* local error. +* +* primary_path +* The path record to use for the primary connection. +* +* alt_path +* The path record to use for the alternate connection. +* +* pkey +* The pkey used in the user's request. +* +* sidr_context +* The sidr_context used in ib_cm_listen. +* +* SEE ALSO +* ib_cm_req, ib_cm_listen, ib_pfn_cm_req_cb_t, +* ib_access_t, ib_path_rec_t, ib_req_pdata_t, ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_rep_rec_t +* NAME +* ib_cm_rep_rec_t +* +* DESCRIPTION +* Connection request reply information returned to the user through their +* connection reply callback. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_rep_rec +{ + const uint8_t* __ptr64 p_rep_pdata; + + ib_qp_type_t qp_type; + + ib_cm_handle_t h_cm_rep; + /* valid for rc, uc & rd qp_type only */ + const void* __ptr64 qp_context; + uint8_t resp_res; + boolean_t flow_ctrl; + ib_apr_status_t apr_status; + + /* valid for ud qp_type only */ + const void* __ptr64 sidr_context; + ib_sidr_status_t status; + ib_net32_t remote_qp; + ib_net32_t remote_qkey; + ib_class_port_info_t class_info; + +} ib_cm_rep_rec_t; +/* +* FIELDS +* p_rep_pdata +* A reference to user-defined private data sent as part of the connection +* request reply. +* +* qp_type +* Indicates the CM service type. +* +* h_cm_rep +* The handle to the communication manager reply. This handle is used +* to issue a ready to use message or to reject the connection. +* +* h_qp +* The handle to the queue pair associated with a connection request. +* +* qp_context +* The queue pair context associated with a connection request. +* +* resp_res +* The maximum number of RDMA read/atomic operations from the recipient +* that the requestor supports on the connection. This may be less than +* the init_depth specified in the call to ib_cm_req. The local queue +* pair will be configured with this value unless the connection is +* rejected. +* +* flow_ctrl +* Indicates if the remote CA implements hardware end-to-end flow control. +* +* apr_status +* Indicates whether the alternate path information was accepted. +* +* h_al +* The AL handle on which the SIDR request was issued. +* +* sidr_context +* The sidr_context used in ib_cm_req. +* +* status +* Status of the request made previously using ib_cm_req. +* +* remote_qp +* Identifies the destination queue pair number. +* +* remote_qkey +* Identifies the destination qkey. +* +* class_info +* Identifies the class_port_info returned if status was not successful. +* This field has no value if status is successful. +* +* SEE ALSO +* ib_cm_req, ib_cm_rep, ib_pfn_cm_rep_cb_t, ib_cm_status_t, ib_rep_pdata_t +* ib_qp_type_t, ib_sidr_status_t +*****/ + + +/****s* Access Layer/ib_cm_rtu_rec_t +* NAME +* ib_cm_rtu_rec_t +* +* DESCRIPTION +* Connection ready to use message information returned to the user through +* their ready to use callback. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_rtu_rec +{ + const uint8_t* __ptr64 p_rtu_pdata; + + ib_qp_handle_t h_qp; + const void* __ptr64 qp_context; + +} ib_cm_rtu_rec_t; +/* +* FIELDS +* p_rtu_pdata +* A reference to user-defined private data sent as part of the ready +* to use message. +* +* h_qp +* The queue pair handle associated with the connection request. +* +* qp_context +* The queue pair context associated with the connection request. +* +* SEE ALSO +* ib_cm_rtu, ib_pfn_cm_rtu_cb_t, ib_cm_status_t, ib_rtu_pdata_t, +* ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_rej_rec_t +* NAME +* ib_cm_rej_rec_t +* +* DESCRIPTION +* Connection rejection information returned to the user through their +* rejection callback. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_rej_rec +{ + ib_rej_status_t rej_status; + const uint8_t* __ptr64 p_ari; + uint8_t ari_length; + + const uint8_t* __ptr64 p_rej_pdata; + + ib_qp_handle_t h_qp; + const void* __ptr64 qp_context; + +} ib_cm_rej_rec_t; +/* +* FIELDS +* rej_status +* The reason for the connection rejection. +* +* p_ari +* Additional rejection information. The data referenced by this field +* is dependent on the rej_status and is defined by the Infiniband +* specification. +* +* ari_length +* Length of valid data provided in the p_ari buffer. +* +* p_rej_pdata +* A reference to user-defined private data sent as part of the connection +* request reply. +* +* h_qp +* The queue pair handle associated with a connection request. +* +* qp_context +* The queue pair context associated with a connection request. +* +* SEE ALSO +* ib_cm_rej, ib_pfn_cm_rej_cb_t, ib_rej_status_t, ib_ari_t, ib_rej_pdata_t, +* ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_mra_rec_t +* NAME +* ib_cm_mra_rec_t +* +* DESCRIPTION +* Message received acknowledgement information returned to the user through +* a callback. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_mra_rec +{ + const uint8_t* __ptr64 p_mra_pdata; + + ib_qp_handle_t h_qp; + const void* __ptr64 qp_context; + +} ib_cm_mra_rec_t; +/* +* FIELDS +* p_mra_pdata +* A reference to user-defined private data sent as part of the MRA. +* +* h_qp +* The queue pair handle associated with a connection request. +* +* qp_context +* The queue pair context associated with a connection request. +* +* SEE ALSO +* ib_cm_req, ib_cm_mra, ib_pfn_cm_mra_cb_t, ib_mra_pdata_t, ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_lap_rec_t +* NAME +* ib_cm_lap_rec_t +* +* DESCRIPTION +* Load alternate path request information returned to the user through +* a callback. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_lap_rec +{ + ib_cm_handle_t h_cm_lap; + ib_path_rec_t alt_path; + + const uint8_t* __ptr64 p_lap_pdata; + + const void* __ptr64 qp_context; + +} ib_cm_lap_rec_t; +/* +* FIELDS +* p_lap_pdata +* A reference to user-defined private data sent as part of the load +* alternate path request. +* +* qp_context +* The queue pair context associated with a connection request. +* +* h_cm_lap +* A handle to the load alternate path request. This handle is used +* to reply to the load request. +* +* alt_path +* Requested alternate path. Users must accept or reject the path by +* calling ib_cm_apr. +* +* SEE ALSO +* ib_cm_lap, ib_pfn_cm_lap_cb_t, ib_lap_pdata_t, ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_apr_rec_t +* NAME +* ib_cm_apr_rec_t +* +* DESCRIPTION +* Load alternate path response information returned to the user through +* a callback. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_apr_rec +{ + ib_api_status_t cm_status; + ib_apr_status_t apr_status; + + const uint8_t* __ptr64 p_info; + uint8_t info_length; + + const uint8_t* __ptr64 p_apr_pdata; + + ib_qp_handle_t h_qp; + const void* __ptr64 qp_context; + +} ib_cm_apr_rec_t; +/* +* FIELDS +* cm_status +* The status of the alternate path response. IB_SUCCESS indicates that +* the alternate path was loaded successfully. IB_TIMEOUT indicates that +* a reply was not received within the specified timeout and retry count. +* Other error values indicates that the alternate path was not loaded. +* if the apr_status is IB_AP_SUCCESS, the QP failed to load the path. +* Other apr_status values indicate that the request was rejected for some +* reason. +* +* apr_status +* The alternate path response status. This indicates additional failure +* information to a load alternate path request and is defined by the +* InfiniBand specification. +* +* info_length +* Length of valid data in the APR additional information buffer. +* +* p_info +* APR additional information. +* +* p_apr_pdata +* A reference to user-defined private data sent as part of the alternate +* path response. +* +* h_qp +* The queue pair handle associated with the alternate path response. +* +* qp_context +* The queue pair context associated with the alternate path response. +* +* SEE ALSO +* ib_cm_lap, ib_pfn_cm_apr_cb_t, ib_apr_status_t, ib_apr_info_t +* ib_apr_pdata_t, ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_dreq_rec_t +* NAME +* ib_cm_dreq_rec_t +* +* DESCRIPTION +* Disconnection request information returned to the user through their +* disconnection callback. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_dreq_rec +{ + ib_cm_handle_t h_cm_dreq; + + const uint8_t* __ptr64 p_dreq_pdata; + + const void* __ptr64 qp_context; + +} ib_cm_dreq_rec_t; +/* +* FIELDS +* h_cm_dreq +* A handle to the disconnection request. This handle is used to reply +* to the disconnection request. +* +* p_dreq_pdata +* A reference to user-defined private data sent as part of the +* disconnect request. +* +* qp_context +* The queue pair context associated with the disconnect request. +* +* SEE ALSO +* ib_cm_dreq, ib_pfn_cm_dreq_cb_t, ib_dreq_pdata_t, ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_drep_rec_t +* NAME +* ib_cm_drep_rec_t +* +* DESCRIPTION +* Disconnection reply information returned to the user through their +* disconnect reply callback. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_drep_rec +{ + ib_api_status_t cm_status; + + const uint8_t* __ptr64 p_drep_pdata; + + ib_qp_handle_t h_qp; + const void* __ptr64 qp_context; + +} ib_cm_drep_rec_t; +/* +* FIELDS +* cm_status +* The status of the disconnect request. Valid values are IB_SUCCESS +* and IB_TIMEOUT. IB_TIMEOUT indicates that a reply was not received +* within the specified timeout and retry count. +* +* p_drep_pdata +* A reference to user-defined private data sent as part of the +* disconnect reply. +* +* h_qp +* The queue pair handle associated with the disconnect reply. +* +* qp_context +* The queue pair context associated with the disconnect reply. +* +* SEE ALSO +* ib_cm_drep, ib_pfn_cm_drep_cb_t, ib_drep_pdata_t, ib_qp_type_t +*****/ + + +/****f* Access Layer/ib_pfn_cm_req_cb_t +* NAME +* ib_pfn_cm_req_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after receiving a connection +* request message. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_cm_req_cb_t)( + IN ib_cm_req_rec_t *p_cm_req_rec ); +/* +* PARAMETERS +* p_cm_req_rec +* [in] Connection request information returned to the user, indicating +* the parameters for the connection. +* +* NOTES +* This callback is invoked to notify the user of a connection request. This +* routine is invoked for peer to peer connection request calls to ib_cm_req +* and for calls to ib_cm_listen. Users must call ib_cm_rep to accept the +* connection or ib_cm_rej to reject the connection from the callback. +* +* Users may also call ib_cm_mra to acknowledge the connection request and +* prevent the remote side from timing out the connection request. The +* ib_cm_mra routine should be invoked if the user requires substantial +* processing time to process the connection request. +* +* In the kernel, this callback is typically invoked from within a tasklet, +* depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_cm_req, ib_cm_listen, ib_cm_rep, ib_cm_mra, ib_cm_rej, ib_cm_req_rec_t +*****/ + + +/****f* Access Layer/ib_pfn_cm_rep_cb_t +* NAME +* ib_pfn_cm_rep_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after receiving a connection +* request reply message. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_cm_rep_cb_t)( + IN ib_cm_rep_rec_t *p_cm_rep_rec ); +/* +* PARAMETERS +* p_cm_rep_rec +* [in] Connection request reply information returned to the user, +* indicating the remote connection data. +* +* NOTES +* This callback is invoked to notify the user of a connection request reply. +* This routine is invoked after calling ib_cm_req. Users must call +* ib_cm_rtu to accept the connection or ib_cm_rej to reject the connection +* from the callback. +* +* Users may also call ib_cm_mra to acknowledge the connection request reply +* and prevent the remote side from timing out the connection request. The +* ib_cm_mra routine should be invoked if the user requires substantial +* processing time to process the connection request reply. +* +* If a reply is not received within the specified timeout period, +* this callback will be invoked with the status set to IB_CM_TIMEOUT. Users +* may call ib_cm_rej to notify the remote side that the connection request +* is being rejected due to a timeout. +* +* In the kernel, this callback is typically invoked from within a tasklet, +* depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_cm_req, ib_cm_listen, ib_cm_rep, ib_cm_rej, ib_cm_mra, ib_cm_rej, +* ib_cm_rep_rec_t +*****/ + + +/****f* Access Layer/ib_pfn_cm_rtu_cb_t +* NAME +* ib_pfn_cm_rtu_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after receiving a connection +* ready to use message. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_cm_rtu_cb_t)( + IN ib_cm_rtu_rec_t *p_cm_rtu_rec ); +/* +* PARAMETERS +* p_cm_rtu_rec +* [in] Connection ready to use information returned to the user. +* +* NOTES +* This callback is invoked to notify the user that a connection is ready +* to use. This routine is invoked after calling ib_cm_rep. If a ready to +* use message is not received within the specified timeout period, this +* callback will be invoked with the status set to IB_CM_TIMEOUT. +* +* This callback will be invoked before a user is notified of any completions +* that has occurred on the associated queue pair. +* +* In the kernel, this callback is typically invoked from within a tasklet, +* depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_cm_rep, ib_cm_rtu_rec_t +*****/ + + +/****f* Access Layer/ib_pfn_cm_rej_cb_t +* NAME +* ib_pfn_cm_rej_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after receiving a connection +* rejection message. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_cm_rej_cb_t)( + IN ib_cm_rej_rec_t *p_cm_rej_rec ); +/* +* PARAMETERS +* p_cm_rej_rec +* [in] Connection rejection information returned to the user. +* +* NOTES +* This callback is invoked to notify the user that a connection has been +* rejected. This routine may be invoked after calling ib_cm_req or +* ib_cm_rep. +* +* In the kernel, this callback is typically invoked from within a tasklet, +* depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_cm_req, ib_cm_rep, ib_cm_rtu, ib_cm_rej, ib_cm_rej_rec_t +*****/ + + +/****f* Access Layer/ib_pfn_cm_mra_cb_t +* NAME +* ib_pfn_cm_mra_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after receiving a message +* received acknowledgement. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_cm_mra_cb_t)( + IN ib_cm_mra_rec_t *p_cm_mra_rec ); + +/* +* PARAMETERS +* p_cm_mra_rec +* [in] Message received acknowledgement information received from the +* remote side. +* +* NOTES +* This callback is invoked to notify the user that their request was +* successfully received, but additional processing is required. This +* callback may be invoked after calling ib_cm_req or ib_cm_rep +* +* In the kernel, this callback is typically invoked from within a tasklet, +* depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_cm_req, ib_cm_rep, ib_cm_mra_rec_t +*****/ + + +/****f* Access Layer/ib_pfn_cm_lap_cb_t +* NAME +* ib_pfn_cm_lap_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after receiving a load +* alternate path message. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_cm_lap_cb_t)( + IN ib_cm_lap_rec_t *p_cm_lap_rec ); +/* +* PARAMETERS +* p_cm_lap_rec +* [in] Load alternate path information sent by the remote side. +* +* NOTES +* This callback is invoked to notify the user of a load alternate path +* request. Users must call ib_cm_apr to respond to the load alternate +* path request from within this callback. The ib_cm_apr call is used +* to accept or reject the load alternate path request. +* +* In the kernel, this callback is typically invoked from within a +* tasklet, depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_cm_lap, ib_cm_apr, ib_cm_lap_rec_t +*****/ + + +/****f* Access Layer/ib_pfn_cm_apr_cb_t +* NAME +* ib_pfn_cm_apr_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after receiving a load +* alternate path response message. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_cm_apr_cb_t)( + IN ib_cm_apr_rec_t *p_cm_apr_rec ); +/* +* PARAMETERS +* p_cm_apr_rec +* [in] Load alternate path response information sent by the remote side. +* +* NOTES +* This callback is invoked to notify the user of a load alternate path +* response. If a response is not received within the specified timeout +* period, this callback will be invoked with the status set to IB_CM_TIMEOUT. +* +* In the kernel, this callback is typically invoked from within a tasklet, +* depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_cm_lap, ib_cm_apr, ib_cm_apr_rec_t +*****/ + + +/****f* Access Layer/ib_pfn_cm_dreq_cb_t +* NAME +* ib_pfn_cm_dreq_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after receiving a disconnect +* request message. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_cm_dreq_cb_t)( + IN ib_cm_dreq_rec_t *p_cm_dreq_rec ); +/* +* PARAMETERS +* p_cm_dreq_rec +* [in] Disconnect request information returned to the user. +* +* NOTES +* This callback is invoked to notify the user of a disconnect request. +* Users must call ib_cm_drep to respond to the disconnect request. After +* this callback returns, the queue pair associated with the connection is +* transitioned to the time-wait state and is no longer usable for sending +* and receiving data. +* +* In the kernel, this callback is typically invoked from within a tasklet, +* depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_cm_req, ib_cm_listen, ib_cm_drep, ib_cm_dreq_rec_t +*****/ + + +/****f* Access Layer/ib_pfn_cm_drep_cb_t +* NAME +* ib_pfn_cm_drep_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after receiving a disconnect +* reply message. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_cm_drep_cb_t)( + IN ib_cm_drep_rec_t *p_cm_drep_rec ); +/* +* PARAMETERS +* p_cm_drep_rec +* [in] Disconnect reply information returned to the user. +* +* NOTES +* This callback is invoked to notify the user of a disconnect reply. If +* no reply was received within the specified timeout period, this callback +* will be invoked with the status set to IB_CM_TIMEOUT. +* +* In the kernel, this callback is typically invoked from within a +* tasklet, depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_cm_dreq, ib_cm_drep, ib_cm_drep_rec_t +*****/ + + +/****d* Access Layer/ib_listen_info_t +* NAME +* ib_listen_info_t +* +* DESCRIPTION +* Constants used to specify directed listen requests. +* +* SYNOPSIS +*/ +#define IB_ALL_CAS 0 +#define IB_ALL_PORTS 0 +#define IB_ALL_LIDS 0 +#define IB_ALL_PKEYS 0 +/* +* SEE ALSO +* ib_cm_listen, ib_cm_listen_t +*****/ + + +/****s* Access Layer/ib_cm_listen_t +* NAME +* ib_cm_listen_t +* +* DESCRIPTION +* Request to listen for incoming connection attempts. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_listen +{ + ib_net64_t svc_id; + + ib_net64_t ca_guid; + ib_net64_t port_guid; + ib_net16_t lid; + ib_net16_t pkey; + + uint8_t* __ptr64 p_compare_buffer; + uint8_t compare_offset; + uint8_t compare_length; + + ib_pfn_cm_req_cb_t pfn_cm_req_cb; + + ib_qp_type_t qp_type; + + /* valid for ud qp_type only */ + const void* __ptr64 sidr_context; + +} ib_cm_listen_t; +/* +* FIELDS +* svc_id +* The identifier of the service to register for incoming connection +* requests. +* +* ca_guid +* Directs the communication manager to register the listen only +* with the specified channel adapter. This should be set to IB_ALL_CAS +* if the listen is not directed to a particular channel adapter. +* +* port_guid +* Directs the communication manager to register the listen only +* with the specified port. This should be set to IB_ALL_PORTS +* if the listen is not directed to a particular port. +* +* lid +* Directs the communication manager to register the listen only +* with the specified LID. This should be set to IB_ALL_LIDS +* if the listen is not directed to a particular LID. +* +* pkey +* Directs the communication manager to register the listen only with +* the specified pkey value. This should be set to IB_ALL_PKEYS +* iv the listen is not directed to a particular partition. +* +* p_compare_buffer +* An optionally provided buffer that will be used to match incoming +* connection requests with a registered service. Use of this buffer +* permits multiple services to listen on the same service ID as long as +* they provide different compare buffers. Incoming requests will +* be matched against the compare buffer. +* +* compare_offset +* An offset into the user-defined data area of a connection request +* which contains the start of the data that will be compared against. +* The offset must be the same for all requests using the same service ID. +* +* compare_length +* Specifies the size of the compare buffer in bytes. The length must +* be the same for all requests using the same service ID. +* +* pfn_cm_req_cb +* References a user-provided callback that will be invoked whenever a +* connection request is received. +* +* qp_type +* Indicates the CM service type. +* +* pfn_cm_mra_cb +* References a user-provided callback that will be invoked when +* a message received acknowledgement is received. +* +* pfn_cm_rej_cb +* References a user-provided callback that will be invoked if the +* connection is rejected by the remote end-point. +* +* sidr_context +* sidr specific context for listens. This context is passed back in +* the ib_pfn_cm_req_cb_t callback. +* +* NOTES +* Users fill out this structure when listening on a service ID with the +* local communication manager. The communication manager will use the given +* service ID and compare buffer to route connection requests to the +* appropriate client. Users may direct listens requests on a particular +* channel adapter, port, or LID. +* +* Message received acknowledgement (MRA) callbacks will not be invoked +* until a connection request has been replied to. +* +* SEE ALSO +* ib_listen_info_t, ib_pfn_cm_req_cb_t, ib_pfn_cm_mra_cb_t, +* ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_req_t +* NAME +* ib_cm_req_t +* +* DESCRIPTION +* Connection request information used to establish a new connection. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_req +{ + ib_net64_t svc_id; + + ib_al_flags_t flags; + uint8_t max_cm_retries; + + ib_path_rec_t* __ptr64 p_primary_path; + + ib_pfn_cm_rep_cb_t pfn_cm_rep_cb; + + const uint8_t* __ptr64 p_req_pdata; + uint8_t req_length; + + ib_qp_type_t qp_type; + + /* valid for rc, uc & rd qp_type only */ + ib_qp_handle_t h_qp; + + uint8_t* __ptr64 p_compare_buffer; + uint8_t compare_offset; + uint8_t compare_length; + + uint8_t resp_res; + uint8_t init_depth; + uint8_t remote_resp_timeout; + boolean_t flow_ctrl; + uint8_t local_resp_timeout; + uint8_t rnr_nak_timeout; + uint8_t rnr_retry_cnt; + uint8_t retry_cnt; + + ib_path_rec_t* __ptr64 p_alt_path OPTIONAL; + + ib_pfn_cm_req_cb_t pfn_cm_req_cb; + ib_pfn_cm_mra_cb_t pfn_cm_mra_cb; + ib_pfn_cm_rej_cb_t pfn_cm_rej_cb; + + /* valid for ud qp_type only */ + ib_al_handle_t h_al; + const void* __ptr64 sidr_context; + uint32_t timeout_ms; + ib_net16_t pkey; + +} ib_cm_req_t; +/* +* FIELDS +* svc_id +* The ID of the remote service to which the connection request is +* being made. +* +* flags +* Used to describe the mode of operation. Set to IB_FLAGS_SYNC to +* process the called routine synchronously. +* +* max_cm_retries +* The maximum number of times that either CM should resend a connection +* establishment message. +* +* p_primary_path +* Path information over which to establish the primary connection. +* +* pfn_cm_rep_cb +* References a user-provided callback that will be invoked when +* a reply to the connection request is received. +* +* p_req_pdata +* Optional user-defined private data sent as part of the connection +* request. +* +* req_length +* Defines the size of the user-defined private data. +* +* qp_type +* Indicates the CM service type. +* +* h_qp +* A handle to the queue pair to use in the connection. +* +* p_compare_buffer +* An optionally provided buffer that will be used to match incoming +* connection requests with a registered service. Use of this buffer +* permits multiple services to connect using the same service ID as +* long as they provide different compare buffers. Incoming requests +* will be matched against the compare buffer. Valid for peer-to-peer +* connection requests only. +* +* compare_offset +* An offset into the user-defined data area of a connection request +* which contains the start of the data that will be compared against. +* The offset must be the same for all requests using the same service ID. +* Valid for peer-to-peer connection requests only. +* +* compare_length +* Specifies the size of the compare buffer in bytes. The length must +* be the same for all requests using the same service ID. Valid for +* peer-to-peer connection requests only. +* +* resp_res +* The maximum number of outstanding RDMA read/atomic operations the +* requestor supports from the remote QP. +* +* init_depth +* The maximum number of outstanding RDMA read/atomic operations the +* requestor will have outstanding to the remote QP. +* +* remote_resp_timeout +* The time within which the remote CM should transmit a response to +* the sender. This value is expressed as +* 4.096 * (2 ^ local_resp_timeout) microseconds. +* +* flow_ctrl +* Indicates whether the local CA supports end-to-end flow control. +* +* local_resp_timeout +* The time that the remote CM should wait to receive a response from +* the local CM. This value is expressed as +* 4.096 * (2 ^ local_resp_timeout) microseconds. +* +* rnr_nak_timeout +* The time to wait before retrying a packet after receiving a RNR NAK. +* This value is defined in section 9.7.5.2.8 of the IB Spec, table 45. +* +* rnr_retry_cnt +* The number of times that the local QP should retry a send operation +* after receiving an RNR NACK before reporting an error. +* +* retry_cnt +* The number of times that a QP should retry a send operation before +* reporting an error. +* +* p_alt_path +* Optional path information that will be used as the alternate +* connection path in the case of failure. +* +* pfn_cm_req_cb +* References a user-provided callback that will be invoked when +* a request for a connection is received. This is required for peer-to +* peer connection requests, and must be NULL for client/server +* connection requests. +* +* pfn_cm_mra_cb +* References a user-provided callback that will be invoked when +* a message received acknowledgement is received. +* +* pfn_cm_rej_cb +* References a user-provided callback that will be invoked if the +* connection is rejected by the remote end-point. +* +* sidr_context +* The user-defined sidr context information that will be passed back in a +* ib_cm_req callback. +* +* timeout_ms +* Timeout value in milli-seconds for the REQ to expire. The CM will add +* twice packet lifetime to this value to determine the actual timeout +* value used. +* +* pkey +* pkey to be used as part of the request. This field is only valid for +* IB_MCLASS_CM_VER_2 clients. +* +* SEE ALSO +* ib_cm_req, ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t, ib_pfn_cm_mra_cb_t, +* ib_pfn_cm_rej_cb_t, ib_path_rec_t, ib_req_pdata_t, ib_qp_type_t +*****/ + + +/****d* Access Layer/ib_cm_failover_t +* NAME +* ib_cm_failover_t +* +* DESCRIPTION +* Fail over acceptance status returned as part of a connection reply. +* +* SYNOPSIS +*/ +typedef uint8_t ib_cm_failover_t; +#define IB_FAILOVER_ACCEPT_SUCCESS 0 +#define IB_FAILOVER_ACCEPT_UNSUPPORTED 1 +#define IB_FAILOVER_ACCEPT_ERROR 2 +/* +* NOTES +* These values and their use are defined the Infiniband specification. +* +* SEE ALSO +* ib_cm_rep, ib_cm_rep_t +*****/ + + +/****s* Access Layer/ib_cm_rep_t +* NAME +* ib_cm_rep_t +* +* DESCRIPTION +* Connection reply information used when establishing a connection. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_rep +{ + ib_al_flags_t flags; + + const uint8_t* __ptr64 p_rep_pdata; + uint8_t rep_length; + + ib_qp_handle_t h_qp; + ib_qp_type_t qp_type; + + /* valid for rc, uc & rd qp_type only */ + ib_access_t access_ctrl; + uint32_t sq_depth; + uint32_t rq_depth; + + uint8_t init_depth; + uint8_t target_ack_delay; + ib_cm_failover_t failover_accepted; + boolean_t flow_ctrl; + uint8_t rnr_nak_timeout; + uint8_t rnr_retry_cnt; + + ib_pfn_cm_rej_cb_t pfn_cm_rej_cb; + ib_pfn_cm_mra_cb_t pfn_cm_mra_cb; + ib_pfn_cm_rtu_cb_t pfn_cm_rtu_cb; + ib_pfn_cm_lap_cb_t pfn_cm_lap_cb; + ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb; + + ib_recv_wr_t* __ptr64 p_recv_wr; + ib_recv_wr_t* __ptr64 *__ptr64 pp_recv_failure; + + /*valid for ud qp_type only */ + ib_sidr_status_t status; + ib_class_port_info_t class_info; + +} ib_cm_rep_t; +/* +* FIELDS +* flags +* Used to describe the mode of operation. Set to IB_FLAGS_SYNC to +* process the called routine synchronously. +* +* p_rep_pdata +* Optional user-defined private data sent as part of the connection +* reply. +* +* rep_length +* Defines the size of the user-defined private data. +* +* qp_type +* Indicates the CM service type. +* +* h_qp +* A handle to the queue pair to use in the connection. For SIDR, h_qp +* is valid only if sidr status is IB_SIDR_SUCCESS. +* +* access_ctrl +* Indicates the type of access permitted on the local QP. +* +* sq_depth +* The maximum number of outstanding send operations that the local +* QP needs to support. +* +* rq_depth +* The maximum number of outstanding receive operations that the local +* QP needs to support. +* +* init_depth +* The maximum number of outstanding RDMA read/atomic operations the +* sender of the reply will have outstanding to the remote QP. +* +* target_ack_delay +* The time that the remote QP should wait to receive an ACK from the +* local QP. +* +* failover_accepted +* Status indicating if the fail over path was accepted by the sender +* of the reply. +* +* flow_ctrl +* Indicates whether the local CA supports end-to-end flow control. +* +* rnr_nak_timeout +* The time to wait before retrying a packet after receiving a RNR NAK. +* +* rnr_retry_cnt +* The number of times that the local QP should retry a send operation +* after receiving an RNR NACK before reporting an error. +* +* pfn_cm_rtu_cb +* References a user-defined callback that will be invoked when +* a connection is ready to use for send operations. +* +* pfn_cm_lap_cb +* References a user-defined callback that will be invoked when +* a load alternate path request is received for the connecting +* queue pair or end-to-end context. +* +* pfn_cm_dreq_cb +* References a user-defined callback that will be invoked when +* a disconnect request is received is for the connecting +* queue pair or end-to-end context. +* +* p_recv_wr +* A reference to the head of the work request list to be initially +* posted to the receive queue. Providing this list closes a potential +* race condition between sending a CM REP message and posting receives. +* Use of this field is optional. +* +* pp_recv_failure +* If the post receive operation failed, this references the work +* request in the p_recv_wr list where the first failure occurred. +* This field is required only if p_recv_wr is used. +* +* status +* sidr status value returned back to a previously received REQ. +* +* class_info +* The contents of this field are valid only if status is IB_SIDR_REDIRECT. +* +* SEE ALSO +* ib_cm_rep, ib_access_t, ib_cm_failover_t, ib_rep_pdata_t +* ib_pfn_cm_rtu_cb_t, ib_pfn_cm_lap_cb_t, ib_pfn_cm_dreq_cb_t, +* ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_rtu_t +* NAME +* ib_cm_rtu_t +* +* DESCRIPTION +* Connection ready to use information used when establishing a connection. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_rtu +{ + ib_access_t access_ctrl; + uint32_t sq_depth; + uint32_t rq_depth; + + const uint8_t* __ptr64 p_rtu_pdata; + uint8_t rtu_length; + + ib_pfn_cm_apr_cb_t pfn_cm_apr_cb; + ib_pfn_cm_dreq_cb_t pfn_cm_dreq_cb; + +} ib_cm_rtu_t; +/* +* FIELDS +* access_ctrl +* Indicates the type of access permitted on the local QP. +* +* sq_depth +* The maximum number of outstanding send operations that the local +* QP needs to support. This field should be set to zero if the CA +* does not support changing the work request depth after the QP is +* created. +* +* rq_depth +* The maximum number of outstanding receive operations that the local +* QP needs to support. This field should be set to zero if the CA +* does not support changing the work request depth after the QP is +* created. +* +* p_rtu_pdata +* Optional user-defined private data sent as part of the connection +* ready to use message. +* +* rtu_length +* Defines the size of the user-defined private data. +* +* pfn_cm_apr_cb +* References a user-defined callback that will be invoked when an +* alternate path response is received for the connecting queue pair +* or end-to-end context. +* +* pfn_cm_dreq_cb +* References a user-defined callback that will be invoked when a +* disconnect request is received is for the connecting queue pair +* or end-to-end context. +* +* SEE ALSO +* ib_cm_rtu, ib_access_t, ib_rtu_pdata_t +*****/ + + +/****s* Access Layer/ib_cm_rej_t +* NAME +* ib_cm_rej_t +* +* DESCRIPTION +* Information used to reject a connection request. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_rej +{ + ib_rej_status_t rej_status; + + ib_ari_t* __ptr64 p_ari; + uint8_t ari_length; + const uint8_t* __ptr64 p_rej_pdata; + uint8_t rej_length; + +} ib_cm_rej_t; +/* +* FIELDS +* rej_status +* The reason for the connection rejection. +* +* p_ari +* Additional rejection information. The data referenced by this field +* is dependent on the rej_status and is defined by the Infiniband +* specification. +* +* ari_length +* Length of valid data provided in the p_ari buffer. +* +* p_rej_pdata +* A reference to user-defined private data sent as part of the +* reject message. +* +* rej_length +* Defines the size of the user-defined private data. +* +* SEE ALSO +* ib_cm_rej, ib_pfn_cm_rej_cb_t, ib_rej_status_t, ib_ari_t, ib_rej_pdata_t +*****/ + + +/****s* Access Layer/ib_cm_mra_t +* NAME +* ib_cm_mra_t +* +* DESCRIPTION +* Connection message received acknowledgement information used to +* indicate that a connection request, reply, or load alternate path +* has been received. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_mra +{ + uint8_t svc_timeout; + + const uint8_t* __ptr64 p_mra_pdata; + uint8_t mra_length; + +} ib_cm_mra_t; +/* +* FIELDS +* svc_timeout +* Indicates the amount of time that the local service requires to +* complete processing of the previously received message. +* +* p_mra_pdata +* Optional user-defined private data sent as part of the message +* received acknowledgement. +* +* mra_length +* Defines the size of the user-defined private data. +* +* SEE ALSO +* ib_cm_mra, ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t, ib_pfn_cm_lap_cb_t, +* ib_mra_pdata_t +*****/ + + +/****s* Access Layer/ib_cm_lap_t +* NAME +* ib_cm_lap_t +* +* DESCRIPTION +* Load alternate path information used to configure a queue pair with an +* alternate path. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_lap +{ + ib_al_flags_t flags; + + const uint8_t* __ptr64 p_lap_pdata; + uint8_t lap_length; + + ib_qp_type_t qp_type; + + /* valid for rc, uc & rd qp_type only */ + ib_qp_handle_t h_qp; + + uint8_t remote_resp_timeout; + ib_path_rec_t* __ptr64 p_alt_path; + ib_pfn_cm_apr_cb_t pfn_cm_apr_cb; + +} ib_cm_lap_t; +/* +* FIELDS +* flags +* Used to describe the mode of operation. Set to IB_FLAGS_SYNC to +* process the called routine synchronously. +* +* p_lap_pdata +* Optional user-defined private data sent as part of the load alternate +* path message. +* +* lap_length +* Defines the size of the user-defined private data. +* +* qp_type +* Indicates the CM service type. +* +* h_qp +* A handle to the queue pair that should receive the alternate path. +* +* remote_resp_timeout +* The time within which the remote CM should transmit a response to +* the sender. This value is expressed as +* 4.096 * (2 ^ local_resp_timeout) microseconds. +* +* p_alt_path +* The path record to use for the alternate connection. +* +* pfn_cm_apr_cb +* References a user-defined callback that will be invoked when the +* response to the load request is received. +* +* SEE ALSO +* ib_cm_lap, ib_pfn_cm_lap_cb_t, ib_pfn_cm_apr_cb_t, ib_path_rec_t, +* ib_pfn_lap_pdata_t, ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_apr_t +* NAME +* ib_cm_apr_t +* +* DESCRIPTION +* Load alternate path information used to configure a queue pair with an +* alternate path. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_apr +{ + const uint8_t* __ptr64 p_apr_pdata; + uint8_t apr_length; + + ib_qp_type_t qp_type; + + /* valid for rc, uc & rd qp_type only */ + ib_qp_handle_t h_qp; + + ib_apr_status_t apr_status; + uint8_t info_length; + const ib_apr_info_t* __ptr64 p_info; + +} ib_cm_apr_t; +/* +* FIELDS +* p_apr_pdata +* Optional user-defined private data sent as part of the alternate +* path response message. +* +* apr_length +* Defines the size of the user-defined private data. +* +* qp_type +* Indicates the CM service type. +* +* h_qp +* A handle to the queue pair that should receive the alternate path. +* +* apr_status +* The alternate path response status. This indicates additional failure +* information to a load alternate path request and is defined by the +* Infiniband specification. +* +* info_length +* Length of valid data in the APR additional information buffer. +* +* p_info +* APR additional information. +* +* SEE ALSO +* ib_cm_apr, ib_pfn_cm_apr_cb_t, ib_lap_pdata_t, ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_dreq_t +* NAME +* ib_cm_dreq_t +* +* DESCRIPTION +* Disconnection request information used to tear down a connection. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_dreq +{ + ib_al_flags_t flags; + + uint8_t* __ptr64 p_dreq_pdata; + uint8_t dreq_length; + + ib_qp_type_t qp_type; + + /* valid for rc, uc & rd qp_type only */ + ib_qp_handle_t h_qp; + ib_pfn_cm_drep_cb_t pfn_cm_drep_cb; + +} ib_cm_dreq_t; +/* +* FIELDS +* flags +* Used to describe the mode of operation. Set to IB_FLAGS_SYNC to +* process the called routine synchronously. +* +* p_dreq_pdata +* A reference to user-defined private data sent as part of the +* disconnection request. +* +* dreq_length +* Defines the size of the user-defined private data. +* +* qp_type +* Indicates the CM service type. +* +* h_qp +* A handle to the queue pair to disconnect. +* +* pfn_cm_drep_cb +* References a user-defined callback that will be invoked when +* the reply to the disconnect is received. +* +* NOTES +* Users submit this structure to disconnect a queue pair or end-to-end +* context. A single disconnect call disconnects either a queue pair or +* an end-to-end context, but not both. +* +* SEE ALSO +* ib_cm_dreq, ib_cm_drep, ib_dreq_pdata_t, ib_al_flags_t, +* ib_qp_type_t +*****/ + + +/****s* Access Layer/ib_cm_drep_t +* NAME +* ib_cm_drep_t +* +* DESCRIPTION +* Disconnection reply information used when tearing down a connection. +* +* SYNOPSIS +*/ +typedef struct _ib_cm_drep +{ + uint8_t* __ptr64 p_drep_pdata; + uint8_t drep_length; + +} ib_cm_drep_t; +/* +* FIELDS +* p_drep_pdata +* A reference to user-defined private data sent as part of the +* disconnection reply. +* +* drep_length +* Defines the size of the user-defined private data. +* +* SEE ALSO +* ib_cm_drep, ib_drep_pdata_t +*****/ + + +/****s* Access Layer/ib_listen_err_rec_t +* NAME +* ib_listen_err_rec_t +* +* DESCRIPTION +* Information returned to the user when an error occurs on a listen request. +* +* SYNOPSIS +*/ +typedef struct _ib_listen_err_rec +{ + void* __ptr64 listen_context; + ib_api_status_t reason; + ib_listen_handle_t h_cm_listen; + +} ib_listen_err_rec_t; +/* +* FIELDS +* listen_context +* User-defined context information associated with the listen request +* through the ib_cm_listen call. +* +* reason +* A status that identifies the reason for error being reported. +* +* h_cm_listen +* The handle for the listen request. This handle will match the handle +* returned by ib_cm_listen call. It is provided in case an error event +* occurs before a client's call to ib_cm_listen can return. +* +* SEE ALSO +* ib_pfn_listen_err_cb_t, ib_api_status_t +*****/ + + +/****f* Access Layer/ib_pfn_listen_err_cb_t +* NAME +* ib_pfn_listen_err_cb_t +* +* DESCRIPTION +* A user-specified callback that is invoked after an error has occurred on +* a listen request. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_listen_err_cb_t)( + IN ib_listen_err_rec_t *p_listen_err_rec ); +/* +* PARAMETERS +* p_listen_err_rec +* [in] Error information returned to the user, indicating the reason +* for the error and associated context information. +* +* NOTES +* This callback is invoked within a system thread context in the kernel. +* +* SEE ALSO +* p_listen_err_rec +*****/ + + +/****f* Access Layer/ib_cm_listen +* NAME +* ib_cm_listen +* +* DESCRIPTION +* Issues a request to the local communication manager to listen for +* incoming connection requests. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_listen( + IN const ib_al_handle_t h_al, + IN const ib_cm_listen_t* const p_cm_listen, + IN const ib_pfn_listen_err_cb_t pfn_listen_err_cb, + IN const void* const listen_context, + OUT ib_listen_handle_t* const ph_cm_listen ); +/* +* PARAMETERS +* h_al +* [in] A handle to an opened instance of the access layer. +* +* p_cm_listen +* [in] Information used to direct the listen request to match incoming +* connection requests. +* +* pfn_listen_err_cb +* [in] User-specified error callback routine to invoke if an error +* occurs while listening. +* +* listen_context +* User-specified context information that is returned as a part of all +* connection requests through the pfn_cm_req_cb routine. The context is +* also returned through the error and destroy callbacks. +* +* ph_cm_listen +* [out] Upon successful completion of this call, this references a handle +* to the listen request. This handle may be used to cancel the listen +* operation. +* +* RETURN VALUES +* IB_SUCCESS +* The listen request was successfully registered with the connection +* manager. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the listen request information, error callback function, +* or listen handle was not provided. +* +* IB_INVALID_SETTING +* The class version specified in the listen request is not supported by +* connection manager or the listen request is not unique. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register the listen request. +* +* IB_INVALID_GUID +* A channel adapter or port GUID is not wildcarded and no channel adapter +* or port in the system was found for the specified GUID. +* +* IB_INVALID_LID +* The lid is not wildcarded and is not within the lid range for the port +* specified in the listen request information. +* +* IB_INVALID_PKEY +* The pkey is not wildcarded and is not a valid pkey for the port +* specified in the listen request information. +* +* NOTES +* This routine directs the access layer to route connection requests +* matching the specified connection parameters to the client. Clients +* listen for connections matching a particular service ID, and may optionally +* direct their listen request towards a specific channel adapter, port, or +* LID. +* +* If local configuration changes occur that invalidate a listen request, the +* specified error callback will be invoked. Invalidated listen requests +* should be canceled by the user. An example of a configuration change that +* invalidates listen requests is a LID change for directed listens. The +* listen error callback will be invoked within the context of a system +* thread. +* +* SEE ALSO +* ib_cm_listen_t, ib_pfn_listen_err_cb_t +*****/ + + +/****f* Access Layer/ib_cm_cancel +* NAME +* ib_cm_cancel +* +* DESCRIPTION +* Routine used to cancel listening for connection requests. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_cancel( + IN const ib_listen_handle_t h_cm_listen, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_cm_listen +* [in] A handle to an existing listen request. +* +* pfn_destroy_cb +* [in] A user-specified callback that is invoked after the listen +* request has been successfully canceled. +* +* RETURN VALUES +* IB_SUCCESS +* The cancel listen operation was initiated. +* +* IB_INVALID_HANDLE +* The connection manager handle was invalid. +* +* NOTES +* This routine cancels a listen request. To avoid a race condition +* canceling a request at the same time a connection callback is in +* progress, the cancel operation operates asynchronously. For +* additional details see ib_pfn_destroy_cb_t. +* +* SEE ALSO +* ib_cm_listen, ib_pfn_destroy_cb_t +*****/ + + +/****f* Access Layer/ib_cm_req +* NAME +* ib_cm_req +* +* DESCRIPTION +* Issues a connection request to a specified end-point. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_req( + IN const ib_cm_req_t* const p_cm_req ); +/* +* PARAMETERS +* p_cm_req +* [in] Information describing the type of connection and the remote +* endpoint for the connection. +* +* RETURN VALUES +* IB_SUCCESS +* The connection request was initiated. +* +* IB_INVALID_PARAMETER +* A reference to the connect request information was not provided. +* +* IB_INVALID_SETTING +* The connect request information contains one or more of the following +* errors: +* - The class version, queue pair type, or path is not supported by +* connection manager. +* - The private data length exceeds the value allowed by the specified +* connection class version. +* - The primary path is not on the same channel adapter as the queue +* pair. +* - The primary and alternate paths are on different channel adapters. +* - The primary and alternate paths specify different MTUs. +* - A primary or alternate path record packet lifetime is out of range. +* - A primary or alternate path record pkey is out of range. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle specified in the connect request was invalid. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_STATE +* The queue pair or end-to-end context is already connected. +* +* IB_INVALID_QP_STATE +* The queue pair was in an invalid state to perform the operation. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to initiate the connect request. +* +* NOTES +* This routine issues a connection request through the communication +* manager to a specified end-point. The p_cm_req parameter contains +* details needed to form the connection. The connection request will +* match with a remote ib_cm_listen or ib_cm_req connection request. +* +* SEE ALSO +* ib_cm_req_t, ib_cm_listen, ib_pfn_cm_req_cb_t +*****/ + + +/****f* Access Layer/ib_cm_rep +* NAME +* ib_cm_rep +* +* DESCRIPTION +* Sends a reply to a connection request, indicating that the connection +* has been accepted. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_rep( + IN const ib_cm_handle_t h_cm_req, + IN const ib_cm_rep_t* const p_cm_rep ); +/* +* PARAMETERS +* h_cm_req +* [in] A handle to the connection request being replied to. This handle +* is provided by the access layer through the ib_pfn_cm_req_cb_t +* callback. +* +* p_cm_rep +* [in] Contains reply information to return to the initiator of the +* connection request. +* +* RETURN VALUES +* IB_SUCCESS +* The connection reply was initiated. +* +* IB_INVALID_HANDLE +* The connection manager request handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the reply information was not provided. +* +* IB_INVALID_STATE +* The current connection state does not allow sending this message. +* +* IB_INVALID_SETTING +* The connect reply information contains one or more of the following +* errors: +* - The class version, queue pair type, or path is not supported by +* connection manager. +* - The private data length exceeds the value allowed by the connection +* class version. +* - The primary path is not on the same channel adapter as the queue +* pair. +* - The primary and alternate paths are on different channel adapters. +* - The primary and alternate paths specify different MTUs. +* - A primary or alternate path record packet lifetime is out of range. +* - A primary or alternate path record pkey is out of range. +* - The specified private data length is invalid. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle specified in the reply was invalid. +* +* IB_INVALID_QP_STATE +* The queue pair was in an invalid state to perform the operation. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to send the connect reply. +* +* NOTES +* This routine results in the access layer replying to a connection +* request from a remote node. This call results in sending a response +* to the requesting node that the request has been accepted. +* +* SEE ALSO +* ib_cm_rep_t, ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t +*****/ + + +/****f* Access Layer/ib_cm_rtu +* NAME +* ib_cm_rtu +* +* DESCRIPTION +* Sends a ready to use message for a connection request, indicating that +* the connection has been accepted and is ready for use. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_rtu( + IN const ib_cm_handle_t h_cm_rep, + IN const ib_cm_rtu_t* const p_cm_rtu ); +/* +* PARAMETERS +* h_cm_rep +* [in] A handle to the connection reply being responded to. This handle +* is provided by the access layer through the ib_pfn_cm_rep_cb_t +* callback. +* +* p_cm_rtu +* [in] Contains ready to use information to return to the sender of the +* connection reply. +* +* RETURN VALUES +* IB_SUCCESS +* The connection ready to use was initiated. +* +* IB_INVALID_HANDLE +* The connection manager reply handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the ready to use information was not provided. +* +* IB_INVALID_STATE +* The current connection state does not allow sending this message. +* +* IB_INVALID_SETTING +* The specified queue pair attributes were invalid or the private data +* length exceeds the value allowed by the specified connection class +* version. +* +* IB_UNSUPPORTED +* The specified queue pair access control was not supported. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to send the ready to use response. +* +* NOTES +* This routine results in the access layer marking a connection as ready +* to use and notifying the remote end-point. +* +* SEE ALSO +* ib_cm_rep_t, ib_pfn_cm_rep_cb_t, ib_cm_rtu_t +*****/ + + +/****f* Access Layer/ib_cm_rej +* NAME +* ib_cm_rej +* +* DESCRIPTION +* Rejects a connection request from a remote end-point. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_rej( + IN const ib_cm_handle_t h_cm, + IN const ib_cm_rej_t* const p_cm_rej ); +/* +* PARAMETERS +* h_cm +* [in] A handle to the connection request or reply being rejected. +* This is the h_cm_req or h_cm_rep handle provided through the +* ib_pfn_cm_req_cb_t or ib_pfn_cm_rep_cb_t callback, respectively. +* +* p_cm_rej +* [in] Contains the connection rejection information to return to the +* connecting end-point. +* +* RETURN VALUES +* IB_SUCCESS +* The connection reject was initiated. +* +* IB_INVALID_HANDLE +* The connection manager handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the reject information was not provided. +* +* NOTES +* This routine results in the access layer rejecting a connection +* and notifying the remote end-point. +* +* SEE ALSO +* ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t, ib_cm_rej_t +*****/ + + +/****f* Access Layer/ib_cm_mra +* NAME +* ib_cm_mra +* +* DESCRIPTION +* Notifies the remote end-point of a connection or load alternate path +* request that the request message has been received, but additional +* processing is required. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_mra( + IN const ib_cm_handle_t h_cm, + IN const ib_cm_mra_t* const p_cm_mra ); +/* +* PARAMETERS +* h_cm +* [in] A handle to the connection request, connection reply, or load +* alternate path request that should receive the message received +* acknowledgement message. This is the h_cm_req, h_cm_rep, or +* h_cm_lap handle provided through the ib_pfn_cm_req_cb_t, +* ib_pfn_cm_rep_cb_t, or ib_pfn_cm_lap_cb_t callback, respectively. +* +* p_cm_mra +* [in] Contains the message received acknowledgement data to return to +* the requesting end-point. +* +* RETURN VALUES +* IB_SUCCESS +* The message receive acknowledge was sent successfully. +* +* IB_INVALID_HANDLE +* The connection manager reply handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the message receive acknowledge information was not +* provided. +* +* IB_INVALID_STATE +* The current connection state does not allow sending this message. +* +* IB_INVALID_SETTING +* The class version is not supported by connection manager or the +* specified private data length is invalid. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to send the message receive acknowledge. +* +* NOTES +* This routine results in the access layer acknowledging a connection or +* load alternate path message. It should be invoked by a client if the +* client is unable to respond to a request within a specified timeout, +* in order to prevent the remote end-point from timing out. +* +* SEE ALSO +* ib_pfn_cm_req_cb_t, ib_pfn_cm_rep_cb_t, ib_pfn_cm_lap_cb_t, ib_cm_mra_t +*****/ + + +/****f* Access Layer/ib_cm_lap +* NAME +* ib_cm_lap +* +* DESCRIPTION +* Issues a load alternate path request to a specified end-point. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_lap( + IN const ib_cm_lap_t* const p_cm_lap ); +/* +* PARAMETERS +* p_cm_lap +* [in] Information describing the alternate path to load and the remote +* endpoint for the connection. +* +* RETURN VALUES +* IB_SUCCESS +* The load alternate path request was sent successfully. +* +* IB_INVALID_PARAMETER +* A reference to the load alternate path information was not provided. +* +* IB_UNSUPPORTED +* The passive side of the connection attempted to load an alternate path. +* +* IB_INVALID_STATE +* The current connection state does not allow sending this message. +* +* IB_INVALID_SETTING +* The load alternate path information contains one or more of the +* following errors: +* - The class version, queue pair type, or path is not supported by +* connection manager. +* - The primary path is not on the same channel adapter as the queue +* pair. +* - The primary and alternate paths are on different channel adapters. +* - The primary and alternate paths specify different MTUs. +* - The alternate path record packet lifetime is out of range. +* - The alternate path record pkey is out of range. +* - The specified private data length is invalid. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle specified in the load alternate path information +* was invalid. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to send the load alternate path request. +* +* NOTES +* This routine issues initiates loading an alternate path on an existing +* connected queue pair or end-to-end context. If the request is successful, +* the alternate path will be loaded and armed for path migration. +* +* The p_cm_lap parameter describes the alternate path to load and indicates +* the remote endpoint of an existing connection that will receive the load +* request. +* +* SEE ALSO +* ib_cm_apr, ib_cm_lap_t, ib_pfn_cm_lap_cb_t, ib_pfn_cm_apr_cb_t +*****/ + + +/****f* Access Layer/ib_cm_apr +* NAME +* ib_cm_apr +* +* DESCRIPTION +* Responds to a load alternate path request, to accept or reject the +* proposed alternate path. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_apr( + IN const ib_cm_handle_t h_cm_lap, + IN const ib_cm_apr_t* const p_cm_apr ); +/* +* PARAMETERS +* h_cm_lap +* [in] A handle to a load alternate path request corresponding to the +* response. This handle is provided through the ib_pfn_cm_lap_cb_t. +* +* p_cm_apr +* [in] Information describing the alternate path response. The response +* will accept or reject the load request. If the request is rejected +* this parameter will reference additional rejection information. +* +* RETURN VALUES +* IB_SUCCESS +* The load alternate path response was sent successfully. +* +* IB_INVALID_HANDLE +* The connection manager load alternate path handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the alternate path information was not provided. +* +* IB_INVALID_STATE +* The current connection state does not allow sending this message. +* +* IB_INVALID_SETTING +* The private data length specified in alternate path information is +* invalid. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle specified in the alternate path information +* was invalid. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to send the alternate path response. +* +* NOTES +* This routine responds to a load alternate path request. +* +* SEE ALSO +* ib_cm_lap, ib_cm_apr_t, ib_pfn_cm_lap_cb_t, ib_pfn_cm_apr_cb_t +*****/ + + +/****f* Access Layer/ib_force_apm +* NAME +* ib_force_apm +* +* DESCRIPTION +* This routine indicates that a queue pair should immediately migrate to its +* alternate path. All future data transfers will occur over the new path. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_force_apm( + IN const ib_qp_handle_t h_qp ); +/* +* PARAMETERS +* h_qp +* [in] A handle to the queue pair to migrate. +* +* RETURN VALUES +* IB_SUCCESS +* The queue pair or end-to-end context was successfully modified. +* +* IB_INVALID_PARAMETER +* Neither or both of the queue pair or the end-to-end context handles +* were valid. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle was invalid. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to register the modify the queue pair or end-to-end context. +* +* IB_UNSUPPORTED +* The requested modification was not supported. +* +* IB_INVALID_QP_STATE +* The queue pair was in an invalid state for the requested operation. +* +* NOTES +* For this routine to operate correctly, the specified queue pair must have +* an existing alternate path loaded. If an alternate path is not loaded, or +* has not yet been armed, this call will fail. +* +* Use of this call results in additional data transfers that occur on the +* given queue pair using the alternate path. Once this call completes, a +* new alternate path may be loaded using the ib_cm_lap call. +* +* SEE ALSO +* ib_cm_lap +*****/ + + +/****f* Access Layer/ib_cm_dreq +* NAME +* ib_cm_dreq +* +* DESCRIPTION +* This routine disconnects a queue pair or end-to-end context. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_dreq( + IN const ib_cm_dreq_t* const p_cm_dreq ); +/* +* PARAMETERS +* p_cm_dreq +* [in] Information that describes the connection being disconnected. +* +* RETURN VALUES +* IB_SUCCESS +* The disconnect request was sent successfully. +* +* IB_INVALID_PARAMETER +* A reference to the disconnect request information was not provided. +* +* IB_INVALID_STATE +* The current connection state does not allow sending this message. +* +* IB_INVALID_SETTING +* The private data length specified in disconnect request information is +* invalid. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle specified in the disconnect request information +* was invalid. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to send the disconnect request. +* +* NOTES +* This function will disconnect a queue pair or end-to-end context. +* It results in sending a disconnection request message to the remote +* end-point. After calling this routine, data transfers on the specified +* queue pair or end-to-end context will fail. +* +* SEE ALSO +* ib_cm_drep, ib_pfn_cm_dreq_cb_t, ib_cm_dreq_t +*****/ + + +/****f* Access Layer/ib_cm_drep +* NAME +* ib_cm_drep +* +* DESCRIPTION +* This routine replies to a disconnection request and disconnects +* a queue pair or end-to-end context. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_drep( + IN const ib_cm_handle_t h_cm_dreq, + IN const ib_cm_drep_t* const p_cm_drep ); +/* +* PARAMETERS +* h_cm_dreq +* [in] A handle to a disconnection request being replied to. This +* handle is provided through the ib_pfn_cm_dreq_cb_t callback. +* +* p_cm_drep +* [in] Reply information used to respond to the disconnection request. +* +* RETURN VALUES +* IB_SUCCESS +* The disconnect request was sent successfully. +* +* IB_INVALID_HANDLE +* The connection manager disconnect request handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the disconnect repy information was not provided. +* +* IB_INVALID_STATE +* The current connection state does not allow sending this message. +* +* IB_INVALID_SETTING +* The private data length specified in disconnect reply information is +* invalid. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to send the disconnect reply. +* +* NOTES +* This function will disconnect a queue pair or end-to-end context. It +* results in sending a disconnection reply message to the remote end-point. +* After calling this routine, data transfers on the specified queue pair or +* end-to-end context will fail. +* +* SEE ALSO +* ib_cm_dreq, ib_pfn_cm_dreq_cb_t, ib_cm_drep_t +*****/ + + +/****f* Access Layer/ib_cm_handoff +* NAME +* ib_cm_handoff +* +* DESCRIPTION +* Hands off the received REQ information to svc_id. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_cm_handoff( + IN const ib_cm_handle_t h_cm_req, + IN const ib_net64_t svc_id ); +/* +* PARAMETERS +* h_cm_req +* [in] A handle to the connection request being handed off. +* This is the h_cm_req handle provided through the ib_pfn_cm_req_cb_t +* callback. +* +* svc_id +* [in] The service id to which this connection request is handed off. +* +* RETURN VALUES +* IB_SUCCESS +* The handoff was initiated. +* +* IB_INVALID_HANDLE +* The connection manager handle was invalid. +* +* IB_INVALID_PARAMETER +* A valid service id was not provided. +* +* IB_INVALID_STATE +* The current connection state does not allow this transfer. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to complete the request. +* +* NOTES +* This routine results in the access layer handing off the connection +* to the service id as a new incoming connection. +* +* SEE ALSO +* ib_pfn_cm_req_cb_t, ib_cm_rej_t, ib_cm_listen +*****/ + + +/****s* Access Layer/ib_cep_listen_t +* NAME +* ib_cep_listen_t +* +* DESCRIPTION +* Request to listen for incoming connection attempts. +* +* SYNOPSIS +*/ +typedef struct _ib_cep_listen +{ + net64_t svc_id; + + net64_t port_guid; + + uint8_t* __ptr64 p_cmp_buf; + uint8_t cmp_len; + uint8_t cmp_offset; + +} ib_cep_listen_t; +/* +* FIELDS +* svc_id +* The identifier of the service to register for incoming connection +* requests. +* +* port_guid +* Directs the communication manager to register the listen only +* with the specified port. This should be set to IB_ALL_PORTS +* if the listen is not directed to a particular port. +* +* p_cmp_buf +* An optionally provided buffer that will be used to match incoming +* connection requests with a registered service. Use of this buffer +* permits multiple services to listen on the same service ID as long as +* they provide different compare buffers. Incoming requests will +* be matched against the compare buffer. +* +* cmp_len +* Specifies the size of the compare buffer in bytes. The length must +* be the same for all requests using the same service ID. +* +* cmp_offset +* An offset into the user-defined data area of a connection request +* which contains the start of the data that will be compared against. +* The offset must be the same for all requests using the same service ID. +* +* NOTES +* Users fill out this structure when listening on a service ID with the +* local communication manager. The communication manager will use the given +* service ID and compare buffer to route connection requests to the +* appropriate client. Users may direct listens requests on a particular +* channel adapter, port, or LID. +*****/ + + +/****f* Access Layer/ib_create_ioc +* NAME +* ib_create_ioc +* +* DESCRIPTION +* Creates an instance of an I/O controller. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_create_ioc( + IN const ib_ca_handle_t h_ca, + IN const ib_ioc_profile_t* const p_ioc_profile, + OUT ib_ioc_handle_t* const ph_ioc ); +/* +* PARAMETERS +* h_ca +* [in] A handle to an opened channel adapter. The controller will be +* created to be exposed through the given adapter. +* +* p_ioc_profile +* [in] I/O controller profile information. +* +* ph_ioc +* [out] Upon successful completion of this call, this references a +* handle to the created I/O controller. This handle may be used to +* add service entries to the controller and register it. +* +* RETURN VALUES +* IB_SUCCESS +* The I/O controller was successfully created. +* +* IB_INVALID_CA_HANDLE +* The channel adapter handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the I/O controller profile information or handle +* was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to create the I/O controller. +* +* NOTES +* This routine creates an I/O controller. Once created, services may be +* added to the controller before being registered with the local device +* manager. +* +* SEE ALSO +* ib_destroy_ioc, ib_add_svc_entry, ib_reg_ioc, ib_ioc_profile_t +*****/ + + +/****f* Access Layer/ib_destroy_ioc +* NAME +* ib_destroy_ioc +* +* DESCRIPTION +* Destroys an instance of an I/O controller. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_destroy_ioc( + IN const ib_ioc_handle_t h_ioc ); +/* +* PARAMETERS +* h_ioc +* [in] A handle to an existing I/O controller. +* +* RETURN VALUES +* IB_SUCCESS +* The I/O controller was successfully destroyed. +* +* IB_INVALID_HANDLE +* The I/O controller handle was invalid. +* +* NOTES +* Once an I/O controller is destroyed, it is no longer reported by the +* local device manager as an exported device. This routine automatically +* removes all services associated with the controller. +* +* SEE ALSO +* ib_create_ioc +*****/ + + +/****f* Access Layer/ib_reg_ioc +* NAME +* ib_reg_ioc +* +* DESCRIPTION +* Registers an I/O controller with the local device manager, which will +* export the controller to the fabric. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reg_ioc( + IN const ib_ioc_handle_t h_ioc ); +/* +* PARAMETERS +* h_ioc +* [in] A handle to the controller being registered. +* +* RETURN VALUES +* IB_SUCCESS +* The I/O controller was successfully registered. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register the I/O controller. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the I/O +* unit to register the I/O controller. +* +* IB_INVALID_HANDLE +* The I/O controller handle was invalid. +* +* NOTES +* This routine registers an I/O controller with the local device manager. +* The device manager exports the controller to the fabric as part of an +* I/O unit. Typically, clients will call ib_add_svc_entry to add services +* to the controller before registering it with the device manager. +* +* SEE ALSO +* ib_create_ioc, ib_destroy_ioc, ib_add_svc_entry +*****/ + + +/****f* Access Layer/ib_add_svc_entry +* NAME +* ib_add_svc_entry +* +* DESCRIPTION +* Adds a new service entry to an existing I/O controller. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_add_svc_entry( + IN const ib_ioc_handle_t h_ioc, + IN const ib_svc_entry_t* const p_svc_entry, + OUT ib_svc_handle_t* const ph_svc ); +/* +* PARAMETERS +* h_ioc +* [in] A handle to an existing I/O controller that will support the +* added service. +* +* p_svc_entry +* [in] Service entry information that will be reported as part of the +* controller's service profile. +* +* ph_svc +* [out] Upon successful completion of this call, this references a handle +* to the added service. This handle may be used to remove the service +* entry. +* +* RETURN VALUES +* IB_SUCCESS +* The service entry was successfully added. +* +* IB_INVALID_HANDLE +* The I/O controller handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the service entry information or handle was not +* provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register the service entry. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the I/O +* controller to register the service entry. +* +* NOTES +* This routine adds a new service to an I/O controller. Once added, the +* service will be reported with the controller profile, provided that the +* controller is registered with the local device manager. +* +* SEE ALSO +* ib_create_ioc, ib_remove_svc_entry, ib_reg_ioc, ib_svc_entry_t +*****/ + + +/****f* Access Layer/ib_remove_svc_entry +* NAME +* ib_remove_svc_entry +* +* DESCRIPTION +* This removes a service entry from an I/O controller. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_remove_svc_entry( + IN const ib_svc_handle_t h_svc ); +/* +* PARAMETERS +* h_svc +* [in] A handle to an existing service entry. +* +* RETURN VALUES +* IB_SUCCESS +* The service entry was successfully removed. +* +* IB_INVALID_HANDLE +* The service entry handle was invalid. +* +* NOTES +* This routine removes the specified service from its associated I/O +* controller. Once removed, the service information will no longer be +* exported along with the controller. +* +* SEE ALSO +* ib_add_svc_entry +*****/ + + +/****f* Access Layer/ib_get_ca_guids +* NAME +* ib_get_ca_guids +* +* DESCRIPTION +* Returns a list of GUIDS for all channel adapter currently available in +* the system. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_get_ca_guids( + IN ib_al_handle_t h_al, + OUT ib_net64_t* const p_guid_array OPTIONAL, + IN OUT size_t* const p_guid_cnt ); +/* +* PARAMETERS +* h_al +* [in] A handle to an opened instance of the access layer. +* +* p_guid_array +* [out] An array of GUIDs provided by the user and filled out by the +* access layer. If this parameter is NULL, the access layer will return +* the number of entries in the array necessary to retrieve the GUID list. +* +* p_guid_cnt +* [in/out] On input, this specifies the number of entries in the +* GUID array. +* +* On output, the access layer will set this to the number of valid +* entries in the p_guid_array or the minimum number of entries needed +* in the GUID array in order to return all channel adapter GUIDs. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the GUID count was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* NOTES +* This routine returns a list of GUIDs for all available channel adapters. +* When called, the access layer will examine p_guid_cnt to determine the +* number of entries available in the p_guid_array. If the count is too +* small, the function will return IB_INSUFFICIENT_MEMORY, and set p_guid_cnt +* to the number of needed entries. +* +* SEE ALSO +* ib_open_al, ib_open_ca +*****/ + + +/****f* Access Layer/ib_get_ca_by_gid +* NAME +* ib_get_ca_by_gid +* +* DESCRIPTION +* Returns the GUID of a channel adapter contain the given port GID. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_get_ca_by_gid( + IN ib_al_handle_t h_al, + IN const ib_gid_t* const p_gid, + OUT ib_net64_t* const p_ca_guid ); +/* +* PARAMETERS +* h_al +* [in] A handle to an opened instance of the access layer. +* +* p_gid +* [in] A port GID. +* +* p_ca_guid +* [out] A GUID to the CA that contains the port matching the user- +* specified GID. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the port GID or CA GUID was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_NOT_FOUND +* No channel adapters in the system contain the specifed port GID. +* +* NOTES +* This routine returns a CA GUID that contains the user-specified port GID. +* If no channel adapters in the system contain the port GID, the call will +* return IB_NOT_FOUND. +* +* SEE ALSO +* ib_open_al, ib_open_ca, ib_get_ca_guids +*****/ + + +/****f* Access Layer/ib_get_port_by_gid +* NAME +* ib_get_port_by_gid +* +* DESCRIPTION +* Returns the GUID of a port that contains the given port GID. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_get_port_by_gid( + IN ib_al_handle_t h_al, + IN const ib_gid_t* const p_gid, + OUT ib_net64_t* const p_port_guid ); +/* +* PARAMETERS +* h_al +* [in] A handle to an opened instance of the access layer. +* +* p_gid +* [in] A port GID. +* +* p_port_guid +* [out] A GUID to the port that contains the matching user- +* specified GID. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the port GID or port GUID was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_NOT_FOUND +* No channel adapters in the system contain the specifed port GID. +* +* NOTES +* This routine returns a port GUID that contains the user-specified port GID. +* If no channel adapters in the system contain the port GID, the call will +* return IB_NOT_FOUND. +* +* SEE ALSO +* ib_open_al, ib_open_ca, ib_get_ca_guids +*****/ + + +/****f* Access Layer/ib_pfn_mad_comp_cb_t +* NAME +* ib_pfn_mad_comp_cb_t +* +* DESCRIPTION +* User-defined callback used to notify the user of a completion for a +* sent or received datagram. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_mad_comp_cb_t)( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ); +/* +* PARAMETERS +* h_mad_svc +* [in] Handle to the MAD service on which the completion occured. +* +* mad_svc_context +* [in] User-defined context information associated with the MAD service +* on which the completion occurred. +* +* p_mad_element +* [in] References information on the completed MAD request. +* +* NOTES +* This function is invoked upon completion of a sent or receive MAD. +* It is separate from the normal completion callbacks in order to allow +* the access layer to perform post processing on the MAD, such as +* segmentation and reassembly, and retransmissions if a response was +* expected. +* +* The mad element returned through this call should be returned to its MAD +* pool after completion processing on the MAD has concluded. Completed +* receive MAD elements should not be reposted to the receive queue of a +* MAD QP. +* +* In the kernel, this callback is typically invoked from within a +* tasklet, depending on the implementation of the verbs provider driver. +* +* SEE ALSO +* ib_send_mad, ib_reg_mad_svc +*****/ + + +/****f* Access Layer/ib_create_mad_pool +* NAME +* ib_create_mad_pool +* +* DESCRIPTION +* Creates a pool of MAD elements for use sending and receive management +* datagrams. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_create_mad_pool( + IN const ib_al_handle_t h_al, + IN const size_t min, + IN const size_t max, + IN const size_t grow_size, + OUT ib_pool_handle_t* const ph_pool ); +/* +* PARAMETERS +* h_al +* [in] A handle to an open instance of the access layer. +* +* min +* [in] The minimum number of MAD elements to create in the pool. +* +* max +* [in] The maximum number of MAD elements that will be created by the +* pool. If max is set to 0, the pool will continue to grow as long +* as system resources are available. +* +* grow_size +* [in] The number of MAD elements to add to the pool when growing it. +* If set to 0, the pool will not grow beyond the number specified +* at creation. This value must be greater than 0, if min is set to 0. +* +* ph_pool +* [out] On successful completion of this call, this returns a handle to +* the newly created pool. +* +* RETURN VALUES +* IB_SUCCESS +* The MAD pool was created successfully. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the pool handle was not provided. +* +* IB_INVALID_SETTING +* The maximum number of MAD elements was non-zero and less than the +* minimum number of MAD elements. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to create the MAD pool. +* +* NOTES +* This routine creates a pool of MAD elements. The elements may be used +* to send and receive MADs on alias and MAD type QPs. +* +* SEE ALSO +* ib_destroy_mad_pool, ib_get_mad, ib_put_mad, ib_reg_mad_pool, +* ib_dereg_mad_pool +*****/ + + +/****f* Access Layer/ib_destroy_mad_pool +* NAME +* ib_destroy_mad_pool +* +* DESCRIPTION +* Destroys a MAD pool and all associated resources. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_destroy_mad_pool( + IN const ib_pool_handle_t h_pool ); +/* +* PARAMETERS +* h_pool +* [in] A handle to a MAD pool allocated through the ib_create_mad_pool +* routine. +* +* RETURN VALUES +* IB_SUCCESS +* The MAD pool was successfully destroyed. +* +* IB_INVALID_HANDLE +* The MAD pool handle was invalid. +* +* IB_RESOURCE_BUSY +* One or more MAD elements have not been returned to the MAD pool. +* +* NOTES +* This call destroys a MAD pool and all resources allocated by the pool. +* +* SEE ALSO +* ib_create_mad_pool, ib_get_mad, ib_put_mad +*****/ + + +/****f* Access Layer/ib_reg_mad_pool +* NAME +* ib_reg_mad_pool +* +* DESCRIPTION +* Registers a MAD pool for use with a protection domain. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reg_mad_pool( + IN const ib_pool_handle_t h_pool, + IN const ib_pd_handle_t h_pd, + OUT ib_pool_key_t* const p_pool_key ); +/* +* PARAMETERS +* h_pool +* [in] A handle to a MAD pool. +* +* h_pd +* [in] A handle to a protection domain. +* +* p_pool_key +* [out] A key associated with registering the MAD pool with the +* protection domain. This key is returned to the user and is used +* when retrieving MADs from the pool. +* +* RETURN VALUES +* IB_SUCCESS +* The MAD pool was successfully registered with the protection domain. +* +* IB_INVALID_HANDLE +* The MAD pool handle was invalid. +* +* IB_INVALID_PD_HANDLE +* The protection domain handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the pool key was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register the MAD pool. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to register the MAD pool. +* +* NOTES +* This function registers a MAD pool with a protection domain. After +* successful completion of this call, the MAD elements of the associated +* pool are usable on any queue pairs associated with the given protection +* domain. +* +* SEE ALSO +* ib_create_mad_pool, ib_destroy_mad_pool, ib_dereg_mad_pool, ib_get_mad +*****/ + + +/****f* Access Layer/ib_dereg_mad_pool +* NAME +* ib_dereg_mad_pool +* +* DESCRIPTION +* Deregisters a MAD pool from a protection domain. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_dereg_mad_pool( + IN const ib_pool_key_t pool_key ); +/* +* PARAMETERS +* pool_key +* [in] Key to the MAD pool to deregister. The specified pool must +* have been registered with a protection domain through a call to +* ib_reg_mad_pool. +* +* RETURN VALUES +* IB_SUCCESS +* The MAD pool was successfully deregistered from the protection domain. +* +* IB_INVALID_PARAMETER +* The MAD pool key was invalid. +* +* IB_RESOURCE_BUSY +* One or more MAD elements were removed from the MAD pool using the +* specified pool key, and were not returned. +* +* NOTES +* This function deregisters a MAD pool with a protection domain. After +* successful completion of this call, the MAD elements of the associated +* pool are no longer usable on the protection domain. +* +* SEE ALSO +* ib_create_mad_pool, ib_destroy_mad_pool, ib_reg_mad_pool +*****/ + + +/****f* Access Layer/ib_get_mad +* NAME +* ib_get_mad +* +* DESCRIPTION +* Obtains a MAD element from the pool. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_get_mad( + IN const ib_pool_key_t pool_key, + IN const size_t buf_size, + OUT ib_mad_element_t **pp_mad_element ); +/* +* PARAMETERS +* pool_key +* [in] Key for the pool to obtain a MAD element for the desired +* protection domain. +* +* buf_size +* [in] The size of the buffer referenced by the MAD element. +* +* pp_mad_element +* [out] Upon successful completion of this call, this references +* the returned MAD element. +* +* RETURN VALUES +* IB_SUCCESS +* The MAD element was successfully retrieved from the MAD pool. +* +* IB_INVALID_PARAMETER +* The MAD pool key was invalid or a reference to the MAD element +* pointer was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to obtain the MAD element. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to grow and register the MAD pool. +* +* NOTES +* This function obtains a MAD element containing a data segment +* that references a data buffer for the given pool key. The data buffer +* referenced by the MAD element is zeroed before being returned to the +* user. +* +* It is recommended that elements retrieved from a MAD pool for use on +* the receive queue of a MAD QP have a buffer size of 256 bytes. +* +* For MADs being sent, buf_size should be set to the size of the relevant +* data sent as part of the MAD, and should not include any padding needed +* to make the MAD size a multiple of 256 bytes. For most MADs, buf_size +* may be set equal to the size of the MAD header plus the amount of user +* data transfered as part of the MAD. +* +* SEE ALSO +* ib_put_mad, ib_send_mad, ib_mad_element_t +*****/ + + +/****f* Access Layer/ib_put_mad +* NAME +* ib_put_mad +* +* DESCRIPTION +* Returns a list of MAD elements to the pool. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_put_mad( + IN const ib_mad_element_t* p_mad_element_list ); +/* +* PARAMETERS +* p_mad_element_list +* [in] A pointer to a list of MAD elements. +* +* RETURN VALUES +* IB_SUCCESS +* The list of MAD elements was successfully returned to the MAD pool. +* +* IB_INVALID_PARAMETER +* A reference to the MAD element list was not provided. +* +* NOTES +* This function returns a list of MAD elements to the pool. +* +* SEE ALSO +* ib_get_mad, ib_mad_element_t +*****/ + + +/****s* Access Layer/ib_dgrm_info_t +* NAME +* ib_dgrm_info_t +* +* DESCRIPTION +* Information specified when initializing a datagram queue pair before its +* first use. +* +* SYNOPSIS +*/ +typedef struct _ib_dgrm_info +{ + ib_net64_t port_guid; + uint32_t qkey; + uint16_t pkey_index; + +} ib_dgrm_info_t; +/* +* FIELDS +* port_guid +* Specifies the port that the datagram service will use. This field +* applies only to IB_QPT_UNRELIABLE_DGRM and IB_QPT_MAD QP types. +* +* qkey +* Specifies the qkey that the queue pair will use. Incoming messages +* must have a matching qkey for the message to be accepted by the +* receiving QP. +* +* pkey_index +* Specifies the pkey associated with this queue pair. +* +* SEE ALSO +* ib_init_dgrm_svc +*****/ + + +/****f* Access Layer/ib_init_dgrm_svc +* NAME +* ib_init_dgrm_svc +* +* DESCRIPTION +* Initializes a datagram queue pair for use. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_init_dgrm_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_dgrm_info_t* const p_dgrm_info OPTIONAL ); +/* +* PARAMETERS +* h_qp +* [in] A handle to an existing queue pair. +* +* p_dgrm_info +* [in] References information needed to configure the queue pair for +* use sending and receiving datagrams. This field is optional for +* IB_QPT_QP0, IB_QPT_QP1 queue pair types and is not used for +* IB_QPT_RAW_IPV6, and IB_QPT_RAW_ETHER queue pair types. +* +* RETURN VALUES +* IB_SUCCESS +* The datagram queue pair was initialized successfully. +* +* IB_INVALID_QP_HANDLE +* The datagram queue pair handle was invalid. +* +* IB_INVALID_PARAMETER +* The queue pair handle was not created as a datagram queue pair type +* or a reference to the datagram service information was not provided. +* +* IB_INVALID_QP_STATE +* The queue pair was in an invalid state for the requested operation. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to initialize the datagram queue pair. +* +* NOTES +* This call binds the queue pair to a given port and transitions its state +* to ready to send and receive data. A queue pair must be initialized +* before it can be used to send and receive datagrams. +* +* This routine is used to initialize queue pairs of type: +* +* IB_QPT_QP0 +* IB_QPT_QP1 +* IB_QPT_MAD +* IB_QPT_RAW_IPV6 +* IB_QPT_RAW_ETHER +* IB_QPT_UNRELIABLE_DGRM +* +* For IB_QPT_MAD type queue pairs, receive buffers are automatically posted +* by the access layer, however, users must call ib_reg_mad_svc to receive +* MADs. Received MAD elements must be returned to the access layer through +* the ib_put_mad() call. +* +* SEE ALSO +* ib_create_qp, ib_get_spl_qp, ib_dgrm_info_t, ib_reg_mad_svc +*****/ + + +/****d* Access Layer/ib_mad_svc_type_t +* NAME +* ib_mad_svc_type_t +* +* DESCRIPTION +* Indicates the type of services provided by a MAD service. +* +* SYNOPSIS +*/ +typedef enum _ib_mad_svc_type +{ + IB_MAD_SVC_DEFAULT = 0, + IB_MAD_SVC_RMPP, + IB_MAD_SVC_RAW + +} ib_mad_svc_type_t; +/* +* VALUES +* IB_MAD_SVC_DEFAULT +* Indicates that the access layer will provide all necessary services, +* including retransmissions and RMPP for well-defined management classes. +* +* IB_MAD_SVC_RMPP +* Indicates that the MAD service requires retransmissions and the RMPP +* header is available on all MADs. (The RMPP protocol will be activated +* on a per send basis.) This service type should be used for +* user-defined management classes requiring RMPP. +* +* IB_MAD_SVC_RAW +* Specifies that the MAD service will not perform retransmissions or +* perform RMPP. All MADs received or sent on a MAD service of this type +* +* NOTES +* This enum is used to define the types of MAD services available to users. +* +* SEE ALSO +* ib_mad_svc_t, ib_reg_mad_svc +*****/ + + + +/****s* Access Layer/ib_mad_svc_t +* NAME +* ib_mad_svc_t +* +* DESCRIPTION +* Information used to request management datagram support with a queue pair. +* +* SYNOPSIS +*/ +typedef struct _ib_mad_svc +{ + void *mad_svc_context; + ib_pfn_mad_comp_cb_t pfn_mad_send_cb; + ib_pfn_mad_comp_cb_t pfn_mad_recv_cb; + + boolean_t support_unsol; + uint8_t mgmt_class; + uint8_t mgmt_version; + boolean_t method_array[IB_MAX_METHODS]; + + ib_mad_svc_type_t svc_type; + +} ib_mad_svc_t; +/* +* FIELDS +* mad_svc_context +* User-defined context that is returned by the access layer through +* the pfn_mad_send_cb and pfn_mad_recv_cb. +* +* pfn_mad_send_cb +* A send callback that is invoked to notify the user that a send +* operation has completed for a sent MAD. +* +* pfn_mad_recv_cb +* A receive callback that is invoked to notify the user that a MAD +* has been received. +* +* support_unsol +* If set to TRUE, this field indicates that the registering client +* supports processing unsolicited MADs. Unsolicited MADs are +* received MADs that do not have the response bit set. If set to TRUE, +* the following fields are required (must be non-zero): mgmt_class, +* mgmt_version, and method_array. +* +* mgmt_version +* Indicates which version of a management class the client requires +* support for. The access layer distinguishes between clients +* requiring different versions of the same management class. +* This field is ignored if the support_unsol field is set to FALSE. +* +* mgmt_class +* Indicates the management class that should be supported by the +* access layer. This field is ignored if the support_unsol field is +* set to FALSE. +* +* method_array +* An array of 127 entries specifying which methods are supported by +* a client when receiving unsolicited MADs. Each index corresponds to +* a single method, and each entry in the array indicates if the method +* is supported by the client. This field is ignored if the +* support_unsol field is set to FALSE. +* +* svc_type +* Indicates the type of services that should be provided by the MAD +* service. +* +* NOTES +* Clients use this structure to define which management datagram methods +* they support, and the type of support required for each. A received MAD +* is distinguished by the access layer based on the following three fields: +* management class, management version, and method. +* +* Specific combinations of class, version, and method may be registered +* for unsolicited MADs only once. The access layer supports multiple +* clients registering for unsolicited MADs as long as they do not share the +* same methods, class, or version. +* +* The svc_type field can be set by a client to indicate that the access +* layer should invoke RMPP for the specified management class of MADs. If +* set to IB_MAD_SVC_DEFAULT, the access layer will automatically invoke RMPP +* for well known MAD classes (those defined by the 1.1 version of the +* InfiniBand specification). The svc_type field is intended to be used by +* clients sending and receiving vendor specific management class requiring +* RMPP and clients providing their own MAD services. +* +* SEE ALSO +* ib_reg_mad_svc, ib_pfn_mad_send_cb_t, ib_pfn_mad_recv_cb_t, +* ib_mad_svc_type_t +*****/ + + +/****f* Access Layer/ib_reg_mad_svc +* NAME +* ib_reg_mad_svc +* +* DESCRIPTION +* Requests management datagram support for a specified class with a +* queue pair. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reg_mad_svc( + IN const ib_qp_handle_t h_qp, + IN const ib_mad_svc_t* const p_mad_svc, + OUT ib_mad_svc_handle_t* const ph_mad_svc ); +/* +* PARAMETERS +* h_qp +* [in] A handle to queue pair. The queue pair must have been created +* as one of the following types: IB_QPT_QP0, IB_QPT_QP0_ALIAS, +* IB_QPT_QP1, IB_QPT_QP1_ALIAS, or IB_QPT_MAD. +* +* p_mad_svc +* [in] A reference to the management class and methods supported by +* this queue pair. +* +* ph_mad_svc +* [out] On successful completion of this call, this references a +* handle to the newly created MAD service. +* +* RETURN VALUES +* IB_SUCCESS +* The queue pair was registered successfully. +* +* IB_INVALID_QP_HANDLE +* The queue pair handle was invalid. +* +* IB_INVALID_PARAMETER +* The queue pair handle was not created with the proper queue pair +* type or a reference to the MAD service information or handle was +* not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register the queue pair. +* +* NOTES +* This routine registers a queue pair as using a particular management +* class. This indicates that the access layer should perform additional +* processing on MADs sent and received by this queue pair. Queue pairs +* registered for MAD support receive access layer SAR and retransmissions +* services. A queue pair may be registered for multiple management classes. +* +* SEE ALSO +* ib_create_qp, ib_mad_svc_t +*****/ + +/****s* Access Layer/ib_reg_svc_rec_t +* NAME +* _ib_reg_svc_rec_t +* +* DESCRIPTION +* Information returned as a result of registering a service with the subnet +* administrator. This includes name service registration. +* +* SYNOPSIS +*/ +typedef struct _ib_reg_svc_rec +{ + const void* __ptr64 svc_context; + ib_reg_svc_handle_t h_reg_svc; + ib_api_status_t req_status; + ib_net16_t resp_status; + ib_service_record_t svc_rec; + +} ib_reg_svc_rec_t; +/* +* FIELDS +* svc_context +* User-defined context information associated with the registration +* through the ib_reg_svc call. +* +* req_status +* Indicates the success of the registration operation. +* +* resp_status +* Indicates the status of the response from the SA +* +* h_reg_svc +* For successful queries, this references the first record of +* information returned by the subnet administrator. If multiple +* records of information were returned, the ib_reg_svc_rec_t will +* be chained together. +* +* svc_rec +* The service record returned by the SA for the registered service. +* +* NOTES +* A query result structure is returned to a client through their +* ib_pfn_query_cb_t routine to notify them of the results of a subnet +* administration query. +* +* SEE ALSO +* ib_reg_svc, ib_pfn_reg_svc_cb_t, ib_reg_svc_status_t +*****/ + + +/****f* Access Layer/ib_pfn_reg_svc_cb_t +* NAME +* ib_pfn_reg_svc_cb_t +* +* DESCRIPTION +* User-defined callback that is invoked to notify a client of the result +* of a service registration attempt. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_reg_svc_cb_t)( + IN ib_reg_svc_rec_t *p_reg_svc_rec ); +/* +* PARAMETERS +* p_reg_svc_rec +* [in] References the result of the service registration attempt. +* +* NOTES +* The callback is used to notify a client of the result of a service +* registration attempt with the subnet administrator. +* +* In the kernel, this callback is usually invoked using a tasklet, dependent +* on the implementation of the underlying verbs provider driver. +* +* SEE ALSO +* ib_reg_svc, ib_reg_svc_rec_t +*****/ + + +/****s* Access Layer/ib_reg_svc_req_t +* NAME +* ib_reg_svc_req_t +* +* DESCRIPTION +* Information used to request that a service be registered with the subnet +* administrator. +* +* SYNOPSIS +*/ +typedef struct _ib_reg_svc_req +{ + ib_service_record_t svc_rec; + ib_net64_t port_guid; + + uint32_t timeout_ms; + uint32_t retry_cnt; + ib_al_flags_t flags; + + const void *svc_context; + ib_net64_t svc_data_mask; + + ib_pfn_reg_svc_cb_t pfn_reg_svc_cb; + +} ib_reg_svc_req_t; +/* +* FIELDS +* svc_rec +* Service record that describes the service being registered. +* +* port_guid +* Directs the registration to use the specified port. The request will +* contact the management entity reachable through the given port. +* +* timeout_ms +* Specifies the number of milliseconds to wait for a response for +* the registration until retrying or timing out the request. +* +* retry_cnt +* Specifies the number of times that the registration will be retried +* before failing the request. +* +* flags +* Used to describe the mode of operation. Set to IB_FLAGS_SYNC to +* process the called routine synchronously. +* +* svc_context +* User-defined context information associated with this registration +* request. This context is returned to the user through the function +* specified by the pfn_reg_svc_cb field. +* +* svc_data_mask +* User-defined component mask indicating which parts of the private +* data is populated. This is used as an extension to the svc_id +* for data compare. Also used as a cheap way to communicate data +* to all clients for this service. +* +* pfn_reg_svc_cb +* A user-defined callback that is invoked upon completion of the +* registration request. +* +* NOTES +* This structure is used to register a service with the subnet administrator. +* The registration call operates asynchronously unless the flags field is +* set to IB_FLAGS_SYNC. If synchronous operation is indicated, the client +* will receive a callback with the results of the registration attempt +* before the ib_reg_svc call returns. Synchronous operation results in +* the calling thread blocking. +* +* SEE ALSO +* ib_reg_svc, ib_svc_rec_t, ib_pfn_reg_svc_cb_t +*****/ + + +/****f* Access Layer/ib_reg_svc +* NAME +* ib_reg_svc +* +* DESCRIPTION +* Routine used to register for a service with the subnet administrator. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reg_svc( + IN const ib_al_handle_t h_al, + IN const ib_reg_svc_req_t* const p_reg_svc_req, + OUT ib_reg_svc_handle_t* const ph_reg_svc ); +/* +* PARAMETERS +* h_al +* [in] A handle to an open instance of the access layer. +* +* p_reg_svc_req +* [in] Describes the service to register with the subnet administrator. +* +* ph_reg_svc +* [out] Pointer to a service registration handle, used to deregister +* the service. Set upon successful completion of the function. +* +* RETURN VALUES +* IB_SUCCESS +* The service registration was initiated. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the service registration request was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_NOT_FOUND +* No channel adapters in the system contain the GID specified in the +* service record. +* +* IB_INVALID_GID +* No port was found matching the GID specified in the service record. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to perform the operation. +* +* NOTES +* This routine registers a service with the subnet administrator. Registered +* services are reported by the subnet administrator to clients querying the +* subnet administrator for service information. +* +* Once registered, a client will receive notification, via a callback, +* that a service has been successfully registered. +* +* SEE ALSO +* ib_dereg_svc, ib_reg_svc_req_t +*****/ + + +/****f* Access Layer/ib_dereg_svc +* NAME +* ib_dereg_svc +* +* DESCRIPTION +* Remove a service as being registered with the subnet administrator. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_dereg_svc( + IN const ib_reg_svc_handle_t h_reg_svc, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_reg_svc +* [in] A handle to a registered service. +* +* pfn_destroy_cb +* [in] A user-specified callback that is invoked after the service +* has been deregistered. +* +* RETURN VALUES +* IB_SUCCESS +* The service deregistration was initiated. +* +* IB_INVALID_HANDLE +* The registered service handle was invalid. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to perform the operation. +* +* NOTES +* This routine deregisters a service with the subnet administrator. +* To avoid a race condition deregistering a service at the same time +* the registration completion callback is in progress, the deregister +* operation operates asynchronously. For additional details see +* ib_pfn_destroy_cb_t. +* +* SEE ALSO +* ib_reg_svc, ib_pfn_destroy_cb_t +*****/ + + +/****d* Access Layer/ib_query_type_t +* NAME +* ib_query_type_t +* +* DESCRIPTION +* Abstracted queries supported by the access layer. +* +* SYNOPSIS +*/ +typedef enum _ib_query_type +{ + IB_QUERY_USER_DEFINED, + + IB_QUERY_ALL_SVC_RECS, + IB_QUERY_SVC_REC_BY_NAME, + IB_QUERY_SVC_REC_BY_ID, + + IB_QUERY_CLASS_PORT_INFO, + + IB_QUERY_NODE_REC_BY_NODE_GUID, + IB_QUERY_PORT_REC_BY_LID, + + IB_QUERY_VLARB_BY_LID_PORT_BLOCK, + IB_QUERY_SLVL_BY_LID_AND_PORTS, + + IB_QUERY_PATH_REC_BY_PORT_GUIDS, + IB_QUERY_PATH_REC_BY_GIDS, + IB_QUERY_PATH_REC_BY_LIDS, + +} ib_query_type_t; +/* +* VALUES +* IB_QUERY_USER_DEFINED +* Query the SA based on user-defined input. Queries of this type +* should reference an ib_user_query_t structure as input into the +* query. +* +* IB_QUERY_SVC_REC_BY_NAME +* Query for service records based on the service name. Queries of +* this type should reference an ib_svc_name_t structure as input +* into the query. +* +* IB_QUERY_SVC_REC_BY_ID +* Query for service records based on the service ID. Queries of +* this type should reference an ib_net64_t value that indicates the +* ID of the service being requested. +* +* IB_QUERY_NODE_REC_BY_NODE_GUID +* Query for node information based on the node's GUID. Queries of +* this type should reference an ib_net64_t value that indicates the +* GUID of the node being requested. +* +* IB_QUERY_PORT_REC_BY_LID +* Query for port information based on the port's base LID. Queries of +* this type should reference an ib_net16_t value that indicates the +* base LID of the port being requested. +* +* IB_QUERY_PATH_REC_BY_PORT_GUIDS +* Query for path records between the specified pair of port GUIDs. +* Queries of this type should reference an ib_guid_pair_t structure +* that indicates the GUIDs of the path being requested. +* +* IB_QUERY_PATH_REC_BY_GIDS +* Query for path records between the specified pair of port GIDs. +* Queries of this type should reference an ib_gid_pair_t structure +* that indicates the GIDs of the path being requested. +* +* IB_QUERY_PATH_REC_BY_LIDS +* Query for path records between the specified pair of port LIDs. +* Queries of this type should reference an ib_lid_pair_t structure +* that indicates the LIDs of the path being requested. +* +* NOTES +* This enum is used to define abstracted queries provided by the access +* layer. Users may issue queries not listed here by sending MADs directly +* to the subnet administrator or a class manager. These queries are +* intended to represent those most often used by clients. +* +* SEE ALSO +* ib_query, ib_query_req_t, ib_user_query_t, ib_gid_pair_t, ib_lid_pair_t +* ib_guid_pair_t +*****/ + + +/****s* Access Layer/ib_user_query_t +* NAME +* ib_user_query_t +* +* DESCRIPTION +* User-defined query information. +* +* SYNOPSIS +*/ +typedef struct _ib_user_query +{ + uint8_t method; + ib_net16_t attr_id; + uint32_t attr_size; + ib_net64_t comp_mask; + void* __ptr64 p_attr; + +} ib_user_query_t; +/* +* FIELDS +* +* method +* Method to be run +* +* attr_id +* Attribute identifier of query data. +* +* attr_size +* Size of the query attribute in bytes. This is translated into the +* attr_offset field of the SA MAD by the ib_query call. +* +* comp_mask +* Indicates the attribute components that are specified for the query. +* +* p_attr +* References the attribute structure used as input into the query. +* This field is ignored if comp_mask is set to 0. +* +* NOTES +* This structure is used to describe a user-defined query. The attribute +* ID, attribute offset, component mask, and attribute structure must match +* those defined by the IBA specification. Users should refer to chapter 15 +* of the IBA specification for additional details. +* +* SEE ALSO +* ib_query_type_t, ib_query, ib_get_attr_offset, ib_get_attr_size +*****/ + + +/****s* Access Layer/ib_gid_pair_t +* NAME +* ib_gid_pair_t +* +* DESCRIPTION +* Source and destination GIDs. +* +* SYNOPSIS +*/ +typedef struct _ib_gid_pair +{ + ib_gid_t src_gid; + ib_gid_t dest_gid; + +} ib_gid_pair_t; +/* +* FIELDS +* src_gid +* Source GID of a path. +* +* dest_gid +* Destination GID of a path. +* +* NOTES +* This structure is used to describe the endpoints of a path. +* +* SEE ALSO +* ib_gid_t +*****/ + + +/****s* Access Layer/ib_lid_pair_t +* NAME +* ib_lid_pair_t +* +* DESCRIPTION +* Source and destination LIDs. +* +* SYNOPSIS +*/ +typedef struct _ib_lid_pair +{ + ib_net16_t src_lid; + ib_net16_t dest_lid; + +} ib_lid_pair_t; +/* +* FIELDS +* src_lid +* Source LID of a path. +* +* dest_lid +* Destination LID of a path. +* +* NOTES +* This structure is used to describe the endpoints of a path. +*****/ + + +/****s* Access Layer/ib_guid_pair_t +* NAME +* ib_guid_pair_t +* +* DESCRIPTION +* Source and destination GUIDs. These may be port or channel adapter +* GUIDs, depending on the context in which this structure is used. +* +* SYNOPSIS +*/ +typedef struct _ib_guid_pair +{ + ib_net64_t src_guid; + ib_net64_t dest_guid; + +} ib_guid_pair_t; +/* +* FIELDS +* src_guid +* Source GUID of a path. +* +* dest_guid +* Destination GUID of a path. +* +* NOTES +* This structure is used to describe the endpoints of a path. The given +* GUID pair may belong to either ports or channel adapters. +* +* SEE ALSO +* ib_guid_t +*****/ + + +/****s* Access Layer/ib_query_rec_t +* NAME +* ib_query_rec_t +* +* DESCRIPTION +* Contains the results of a subnet administration query. +* +* SYNOPSIS +*/ +typedef struct _ib_query_rec +{ + const void* __ptr64 query_context; + ib_api_status_t status; + + ib_query_type_t query_type; + uint32_t result_cnt; + ib_mad_element_t* __ptr64 p_result_mad; + +} ib_query_rec_t; +/* +* FIELDS +* query_context +* User-defined context information associated with the query through +* the ib_reg_query call. +* +* status +* Indicates the success of the query operation. +* +* query_type +* Indicates the type of query for which the results are being returned. +* This matches the query_type specified through the ib_reg_query call. +* +* result_cnt +* The number of result structures that were returned by the query. +* +* p_result_mad +* For queries returning IB_SUCCESS or IB_REMOTE_ERROR, this references +* the MAD returned by the subnet administrator containing the list +* of results or the returned error code. +* +* NOTES +* A query result structure is returned to a client through their +* ib_pfn_query_cb_t routine to notify them of the results of a subnet +* administration query. If the query was successful or received an error +* from the subnet administrator, p_result_mad will reference a MAD element +* containing the results. The MAD referenced by p_result_mad is owned by +* the user and remains available even after their callback returns. Users +* must call ib_put_mad() to return the MAD element back to the access layer +* when they are done accessing the results. +* +* To retrieve individual result structures from the p_result_mad, users +* may call ib_get_query_result(). +* +* SEE ALSO +* ib_query, ib_pfn_query_cb_t, ib_api_status_t, ib_put_mad, ib_mad_element_t +* ib_query_status_t, ib_query_type_t, ib_get_query_result +*****/ + + +/****f* Access Layer/ib_get_query_result +* NAME +* ib_get_query_result +* +* DESCRIPTION +* Retrieves a result structure from a MAD returned by a call to ib_query(). +* +* SYNOPSIS +*/ +AL_INLINE void* AL_API +ib_get_query_result( + IN ib_mad_element_t *p_result_mad, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_mad ); + p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad ); + CL_ASSERT( p_sa_mad ); + CL_ASSERT( ib_get_attr_size( p_sa_mad->attr_offset ) * (result_index + 1) + + IB_SA_MAD_HDR_SIZE <= p_result_mad->size ); + + return( p_sa_mad->data + + (ib_get_attr_size( p_sa_mad->attr_offset ) * result_index) ); +} +/* +* PARAMETERS +* p_result_mad +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a result structure from a call +* to ib_query(). The type of result structure must be known to the user +* either through the user's context or the query_type returned as part of +* the ib_query_rec_t structure. +* +* SEE ALSO +* ib_query_rec_t, ib_mad_element_t +*****/ + + +/****f* Access Layer/ib_get_query_path_rec +* NAME +* ib_get_query_path_rec +* +* DESCRIPTION +* Retrieves a path record result from a MAD returned by a call to +* ib_query(). +* +* SYNOPSIS +*/ +AL_INLINE ib_path_rec_t* AL_API +ib_get_query_path_rec( + IN ib_mad_element_t *p_result_mad, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_mad ); + p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_PATH_RECORD ); + + return( (ib_path_rec_t*)ib_get_query_result( p_result_mad, result_index ) ); +} +/* +* PARAMETERS +* p_result_mad +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a path record result from +* a call to ib_query(). +* +* SEE ALSO +* ib_query_rec_t, ib_mad_element_t, ib_get_query_result, ib_path_rec_t +*****/ + + +/****f* Access Layer/ib_get_query_portinfo_rec +* NAME +* ib_get_query_portinfo_rec +* +* DESCRIPTION +* Retrieves a port info record result from a MAD returned by a call to +* ib_query(). +* +* SYNOPSIS +*/ +AL_INLINE ib_portinfo_record_t* AL_API +ib_get_query_portinfo_rec( + IN ib_mad_element_t *p_result_mad, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_mad ); + p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_PORTINFO_RECORD ); + + return( (ib_portinfo_record_t*)ib_get_query_result( p_result_mad, + result_index ) ); +} +/* +* PARAMETERS +* p_result_mad +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a port info record result from +* a call to ib_query(). +* +* SEE ALSO +* ib_query_rec_t, ib_mad_element_t, ib_get_query_result, ib_portinfo_record_t +*****/ + + +/****f* Access Layer/ib_get_query_node_rec +* NAME +* ib_get_query_node_rec +* +* DESCRIPTION +* Retrieves a node record result from a MAD returned by a call to +* ib_query(). +* +* SYNOPSIS +*/ +AL_INLINE ib_node_record_t* AL_API +ib_get_query_node_rec( + IN ib_mad_element_t *p_result_mad, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_mad ); + p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_NODE_RECORD ); + + return( (ib_node_record_t*)ib_get_query_result( p_result_mad, + result_index ) ); +} +/* +* PARAMETERS +* p_result_mad +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a node record result from +* a call to ib_query(). +* +* SEE ALSO +* ib_query_rec_t, ib_mad_element_t, ib_get_query_result, ib_node_record_t +*****/ + + +/****f* Access Layer/ib_get_query_svc_rec +* NAME +* ib_get_query_svc_rec +* +* DESCRIPTION +* Retrieves a service record result from a MAD returned by a call to +* ib_query(). +* +* SYNOPSIS +*/ +AL_INLINE ib_service_record_t* AL_API +ib_get_query_svc_rec( + IN ib_mad_element_t *p_result_mad, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_mad ); + p_sa_mad = (ib_sa_mad_t*)ib_get_mad_buf( p_result_mad ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_SERVICE_RECORD ); + + return( (ib_service_record_t*)ib_get_query_result( p_result_mad, + result_index ) ); +} +/* +* PARAMETERS +* p_result_mad +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a service record result from +* a call to ib_query(). +* +* SEE ALSO +* ib_query_rec_t, ib_mad_element_t, ib_get_query_result, ib_service_record_t +*****/ + + +/****f* Access Layer/ib_pfn_query_cb_t +* NAME +* ib_pfn_query_cb_t +* +* DESCRIPTION +* User-defined callback invoked on completion of a subnet administrator +* query. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_query_cb_t)( + IN ib_query_rec_t *p_query_rec ); +/* +* PARAMETERS +* p_query_rec +* [in] This is a reference to a structure containing the result of the +* query. +* +* NOTES +* This routine is invoked to notify a client of the result of a subnet +* administration query. The p_query_rec parameter references the result +* of the query and, in the case of a successful query, any information +* returned by the subnet administrator. +* +* In the kernel, this callback is usually invoked using a tasklet, dependent +* on the implementation of the underlying verbs provider driver. +* +* SEE ALSO +* ib_query_rec_t +*****/ + + +/****s* Access Layer/ib_query_req_t +* NAME +* ib_query_req_t +* +* DESCRIPTION +* Information used to request an access layer provided query of the subnet +* administrator. +* +* SYNOPSIS +*/ +typedef struct _ib_query_req +{ + ib_query_type_t query_type; + const void* __ptr64 p_query_input; + ib_net64_t port_guid; + + uint32_t timeout_ms; + uint32_t retry_cnt; + ib_al_flags_t flags; + + const void* __ptr64 query_context; + ib_pfn_query_cb_t pfn_query_cb; + +} ib_query_req_t; +/* +* FIELDS +* query_type +* Indicates the type of query that the access layer should perform. +* +* p_query_input +* A pointer to the input for the query. The data referenced by this +* structure is dependent on the type of query being requested and is +* determined by the specified query_type. +* +* port_guid +* Directs the query to use the specified port. The request will +* contact the management entity reachable through the given port. +* +* timeout_ms +* Specifies the number of milliseconds to wait for a response for +* this query until retrying or timing out the request. +* +* retry_cnt +* Specifies the number of times that the query will be retried before +* failing the request. +* +* flags +* Used to describe the mode of operation. Set to IB_FLAGS_SYNC to +* process the called routine synchronously. +* +* query_context +* User-defined context information associated with this query. The +* context data is returned to the user as a part of their query +* callback. +* +* pfn_query_cb +* A user-defined callback that is invoked upon completion of the query. +* +* NOTES +* This structure is used when requesting an access layer provided query +* of the subnet administrator. Clients specify the type of query through +* the query_type field. Based on the type of query, the p_query_input +* field is set to reference the appropriate data structure. +* +* The information referenced by the p_query_input field is one of the +* following: +* +* -- a NULL terminated service name +* -- a service id +* -- a single GUID +* -- a pair of GUIDs specified through an ib_guid_pair_t structure +* -- a pair of GIDs specified through an ib_gid_pair_t structure +* +* SEE ALSO +* ib_query_type_t, ib_pfn_query_cb_t, ib_guid_pair_t, +* ib_gid_pair_t +*****/ + + +/****f* Access Layer/ib_query +* NAME +* ib_query +* +* DESCRIPTION +* Routine used to request an access layer provided query of the subnet +* administrator. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_query( + IN const ib_al_handle_t h_al, + IN const ib_query_req_t* const p_query_req, + OUT ib_query_handle_t* const ph_query OPTIONAL ); +/* +* PARAMETERS +* h_al +* [in] A handle to an open instance of the access layer. +* +* p_query_req +* [in] Specifies the type of query that the access layer should perform, +* along with information needed to process the completed query. +* +* ph_query +* [out] Pointer to a query handle that can be used to cancel the query. +* +* RETURN VALUES +* IB_SUCCESS +* The subnet administrator query was initiated. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the query request was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_INVALID_GUID +* No port was found for the port_guid specified in the request. +* +* IB_ERROR +* An invalid query_type was specified in the request. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to perform the operation. +* +* NOTES +* This routine directs the access layer to initiate a query to the subnet +* administrator for desired information. The access layer will issue the +* query, collect the results, and report them to the client through a user- +* specified callback. The access layer is responsible for retrying the +* operation as directed by the client. +* +* SEE ALSO +* ib_cancel_query, ib_query_req_t +*****/ + + +/****f* Access Layer/ib_cancel_query +* NAME +* ib_cancel_query +* +* DESCRIPTION +* Routine used to cancel a query of the subnet administrator. +* +* SYNOPSIS +*/ +AL_EXPORT void AL_API +ib_cancel_query( + IN const ib_al_handle_t h_al, + IN const ib_query_handle_t h_query ); +/* +* PARAMETERS +* h_al +* [in] A handle to an open instance of the access layer. +* +* h_query +* [in] Query handle returned by a previous call to ib_query(). +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* This routine directs the access layer to cancel a query to the subnet +* administrator. The access layer will issue notify the user with the +* final status of the query through the query callback specified in the +* call to ib_query(). +* +* SEE ALSO +* ib_query +*****/ + + +/****d* Access Layer/ib_pnp_class_t +* NAME +* ib_pnp_class_t +* +* DESCRIPTION +* Specifies the class of plug and play events that are being subscribed for. +* +* SYNOPSIS +*/ +#define IB_PNP_CA 0x00000001 +#define IB_PNP_PORT 0x00000002 +#define IB_PNP_IOU 0x00000004 +#define IB_PNP_IOC 0x00000008 + +#define IB_PNP_FLAG_REG_SYNC 0x40000000 +#define IB_PNP_FLAG_REG_COMPLETE 0x80000000 +#define IB_PNP_FLAG_MASK 0xF0000000 +#define IB_PNP_CLASS_MASK 0x000000FF + +typedef uint32_t ib_pnp_class_t; +/* +* VALUES +* IB_PNP_CA +* Value used to register for local channel adapter events. These +* events include the addition or removal of a local channel adapter. +* +* IB_PNP_PORT +* Value used to register for local port events. These events include +* local port up or down events and port LID or Pkey changes. +* +* IB_PNP_IOU +* Value used to register for I/O unit PnP events. I/O unit events +* include notifications of I/O unit assignment to and dissociation from +* the local host. +* +* IB_PNP_IOC +* Value used to register for an I/O controller PnP event. I/O controller +* events include notification of an I/O controller assignment to a local +* port and indication that an I/O controller dissociation has occurred. +* +* IB_PNP_FLAG_REG_SYNC +* Flag that is ORed with the PnP Class to control behavior of the +* ib_pnp_reg call. When set, ib_pnp_reg returns after client has +* received all events for the current state of the system. +* +* IB_PNP_FLAG_REG_COMPLETE +* Flag that is ORed with the PnP Class to control whether an event +* is generated to indicate that a client has received all events for the +* current state of the system. +* +* NOTES +* When registering for PnP notification, a client specifies the class of +* local events that the client wishes to be notified of. For example to +* request notification of events on a port, a client would use IB_PNP_PORT. +* To be notified of the assignment of an I/O controller, a client would use +* IB_PNP_IOC. +* +* The PnP APIs do not support registration for multiple event classes at +* a time. +* +* SEE ALSO +* ib_pfn_pnp_cb_t, ib_pfn_report_cb_t, ib_pnp_rec_t, ib_pnp_event_t +*****/ + + +/****d* Access Layer/ib_pnp_event_t +* NAME +* ib_pnp_event_t +* +* DESCRIPTION +* Indicates the type of plug and play event that has occurred. +* +* SYNOPSIS +*/ +#define IB_PNP_EVENT_PATH 0x00000800 +#define IB_PNP_EVENT_ADD 0x00001000 +#define IB_PNP_EVENT_REMOVE 0x00002000 +#define IB_PNP_EVENT_CHANGE 0x00004000 +#define IB_PNP_EVENT_INIT 0x00008000 +#define IB_PNP_EVENT_ARMED 0x00010000 +#define IB_PNP_EVENT_ACTIVE 0x00020000 +#define IB_PNP_EVENT_DOWN 0x00040000 +#define IB_PNP_EVENT_PKEY 0x00080000 +#define IB_PNP_EVENT_SM 0x00100000 +#define IB_PNP_EVENT_GID 0x00200000 +#define IB_PNP_EVENT_LID 0x00400000 +#define IB_PNP_EVENT_SUBNET 0x00800000 + +#define IB_PNP_CA_ADD (IB_PNP_CA | IB_PNP_EVENT_ADD) +#define IB_PNP_CA_REMOVE (IB_PNP_CA | IB_PNP_EVENT_REMOVE) + +#define IB_PNP_PORT_ADD (IB_PNP_PORT | IB_PNP_EVENT_ADD) +#define IB_PNP_PORT_REMOVE (IB_PNP_PORT | IB_PNP_EVENT_REMOVE) +#define IB_PNP_PORT_INIT (IB_PNP_PORT | IB_PNP_EVENT_INIT) +#define IB_PNP_PORT_ARMED (IB_PNP_PORT | IB_PNP_EVENT_ARMED) +#define IB_PNP_PORT_ACTIVE (IB_PNP_PORT | IB_PNP_EVENT_ACTIVE) +#define IB_PNP_PORT_DOWN (IB_PNP_PORT | IB_PNP_EVENT_DOWN) +#define IB_PNP_PKEY_CHANGE (IB_PNP_PORT | IB_PNP_EVENT_PKEY) +#define IB_PNP_SM_CHANGE (IB_PNP_PORT | IB_PNP_EVENT_SM) +#define IB_PNP_GID_CHANGE (IB_PNP_PORT | IB_PNP_EVENT_GID) +#define IB_PNP_LID_CHANGE (IB_PNP_PORT | IB_PNP_EVENT_LID) +#define IB_PNP_SUBNET_TIMEOUT_CHANGE (IB_PNP_PORT | IB_PNP_EVENT_SUBNET) + +#define IB_PNP_IOU_ADD (IB_PNP_IOU | IB_PNP_EVENT_ADD) +#define IB_PNP_IOU_REMOVE (IB_PNP_IOU | IB_PNP_EVENT_REMOVE) +#define IB_PNP_IOC_ADD (IB_PNP_IOC | IB_PNP_EVENT_ADD) +#define IB_PNP_IOC_REMOVE (IB_PNP_IOC | IB_PNP_EVENT_REMOVE) +#define IB_PNP_IOC_PATH_ADD (IB_PNP_IOC | IB_PNP_EVENT_PATH | \ + IB_PNP_EVENT_ADD) +#define IB_PNP_IOC_PATH_REMOVE (IB_PNP_IOC | IB_PNP_EVENT_PATH | \ + IB_PNP_EVENT_REMOVE) + +#define IB_PNP_REG_COMPLETE IB_PNP_FLAG_REG_COMPLETE + +typedef uint32_t ib_pnp_event_t; +/* +* VALUES +* IB_PNP_CA_ADD +* Indicates that a new channel adapter has been added. +* +* IB_PNP_CA_REMOVE +* Indicates that a channel adapter has been removed. +* +* IB_PNP_PORT_ADD +* Indicates that a new port has been added. This callback will always +* be followed by a callback to indicate the actual port state to allow +* clients to use the PnP callbacks to drive their state machine. +* +* IB_PNP_PORT_REMOVE +* Indicates that a port has been removed. +* A CA remove event will trigger this event first. +* +* IB_PNP_PORT_INIT +* Indicates that a port is in the IB_LINK_INIT state. +* +* IB_PNP_PORT_ARMED +* Indicates that a port is in the IB_LINK_ARMED state. +* +* IB_PNP_PORT_ACTIVE +* Indicates that a port is in the IB_LINK_ACTIVE state. +* +* IB_PNP_PORT_DOWN +* Indicates that a port down event has occurred. +* +* IB_PNP_PKEY_CHANGE +* Indicates that port Pkey change has ocurred. +* +* IB_PNP_SM_CHANGE +* Indicates that the SM assignment for a port has changed. +* +* IB_PNP_GID_CHANGE +* Indicates that the GID assignment for a port has changed. +* +* IB_PNP_LID_CHANGE +* Indicates that the LID or LMC assignment for a port has changed. +* +* IB_PNP_SUBNET_TIMEOUT_CHANGE +* Indicates that the subnet timeout assignment for a port has changed. +* +* IB_PNP_IOU_ADD +* Indicates that an I/O unit assignment has occured. +* +* IB_PNP_IOU_REMOVE +* Indicates that an I/O unit disassociation has occured. +* +* IB_PNP_IOC_ADD +* Indicates that an I/O controller assignment has occurred. +* +* IB_PNP_IOC_REMOVE +* Indicates that an I/O controller dissociation has occurred. +* A port down event will trigger this event first. +* +* IB_PNP_IOC_PATH_ADD +* Indicates that a new path to an I/O controller is available. +* +* IB_PNP_IOC_PATH_REMOVE +* Indiactes that a path to an I/O controller is no longer avaialble. +* +* IB_PNP_REG_COMPLETE +* Indicates that all events associated with a ib_reg_pnp call have been +* reported to the user. The user's state of the system is now in +* sync with that of the access layer. +* +* NOTES +* The Access Layer maintains a queue of client PnP registrations. +* Using this queue, PnP events are reported to clients in a specific +* order. CA add, port add, and IOC add events are reported from the +* head of the queue, while CA remove, port remove, and IOC remove events +* are reported from the tail. Clients are responsible for performing +* registrations in the proper sequence to ensure that PnP event +* notifiations occur in the desired order. +* +* SEE ALSO +* ib_pfn_pnp_cb_t, ib_pfn_report_cb_t, ib_pnp_rec_t, ib_pnp_class_t +*****/ + + +AL_INLINE const char* AL_API +ib_get_pnp_event_str( + IN ib_pnp_event_t event ) +{ + switch( event ) + { + case IB_PNP_CA_ADD : return "IB_PNP_CA_ADD"; + case IB_PNP_CA_REMOVE : return "IB_PNP_CA_REMOVE"; + case IB_PNP_PORT_ADD : return "IB_PNP_PORT_ADD"; + case IB_PNP_PORT_REMOVE : return "IB_PNP_PORT_REMOVE"; + case IB_PNP_PORT_INIT : return "IB_PNP_PORT_INIT"; + case IB_PNP_PORT_ARMED : return "IB_PNP_PORT_ARMED"; + case IB_PNP_PORT_ACTIVE : return "IB_PNP_PORT_ACTIVE"; + case IB_PNP_PORT_DOWN : return "IB_PNP_PORT_DOWN"; + case IB_PNP_PKEY_CHANGE : return "IB_PNP_PKEY_CHANGE"; + case IB_PNP_SM_CHANGE : return "IB_PNP_SM_CHANGE"; + case IB_PNP_GID_CHANGE : return "IB_PNP_GID_CHANGE"; + case IB_PNP_LID_CHANGE : return "IB_PNP_LID_CHANGE"; + case IB_PNP_SUBNET_TIMEOUT_CHANGE : return "IB_PNP_SUBNET_TIMEOUT_CHANGE"; + case IB_PNP_IOU_ADD : return "IB_PNP_IOU_ADD"; + case IB_PNP_IOU_REMOVE : return "IB_PNP_IOU_REMOVE"; + case IB_PNP_IOC_ADD : return "IB_PNP_IOC_ADD"; + case IB_PNP_IOC_REMOVE : return "IB_PNP_IOC_REMOVE"; + case IB_PNP_IOC_PATH_ADD : return "IB_PNP_IOC_PATH_ADD"; + case IB_PNP_IOC_PATH_REMOVE : return "IB_PNP_IOC_PATH_REMOVE"; + case IB_PNP_REG_COMPLETE : return "IB_PNP_REG_COMPLETE"; + } + return "Unknown"; +} + + +/****s* Access Layer/ib_pnp_rec_t +* NAME +* ib_pnp_rec_t +* +* DESCRIPTION +* Notification information used to describe local channel adapter, port, +* and I/O controller events. +* +* SYNOPSIS +*/ +typedef struct _ib_pnp_rec +{ + ib_pnp_event_t pnp_event; + + ib_pnp_handle_t h_pnp; + ib_pnp_handle_t h_ioc_event; + + void* __ptr64 pnp_context; + void* __ptr64 context; + //NOTE: + //guid and ca_guid use as key to flexi map need to keep these field together + ib_net64_t guid; + ib_net64_t ca_guid; + +} ib_pnp_rec_t; +/* +* FIELDS +* pnp_event +* Describes the type of plug and play event that is being reported. +* +* h_pnp +* A handle to the notification registration for which this PnP record +* was generated. This handle will match the handle returned through +* an ib_reg_pnp call. It is provided in case a PnP notification event +* occurs before a client's call to ib_reg_pnp can return. This handle +* may be used to cancel further notification of PnP events. +* +* h_ioc_event +* A handle that is unique to an I/O controller assignment event. +* This handle is used to reject the assignment of an I/O controller +* from within the ib_pfn_pnp_cb_t callback. Valid for IB_PNP_IOC_ADD +* events only. +* +* pnp_context +* User-defined context information specified when registering for +* notification of the event. See the notes section below for +* more details. +* +* context +* This field references a user-specified context on which the event +* occurred. See the notes section below for more details. +* +* guid +* The GUID of the adapter, port, IOU, or IOC for which +* the PnP event occurred. +* +* ca_guid +* The GUID of the HCA +* +* NOTES +* This structure is returned to the user to notify them of: the addition +* of a channel adapter, the removal of a channel adapter, a port up or down +* event, a port pkey change, and I/O controller addition and removal events. +* +* The context field is NULL unless a context value has already been set +* by the user. +* +* The context value can be changed by updating its field +* and will take effect once the notification callback returns. +* +* Once a device has been removed, all context associated with that device +* is lost. Context is maintained between port down and subsequent port up +* events provided that the channel adapter is not removed. +* +* I/O controller path notifications are only delivered with respect to a +* previously reported I/O controller. +* +* SEE ALSO +* ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t, +* ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t +*****/ + + +/****s* Access Layer/ib_pnp_ca_rec_t +* NAME +* ib_pnp_ca_rec_t +* +* DESCRIPTION +* Notification information used to describe local channel adapter events. +* +* SYNOPSIS +*/ +typedef struct _ib_pnp_ca_rec +{ + ib_pnp_rec_t pnp_rec; + ib_ca_attr_t* __ptr64 p_ca_attr; + +} ib_pnp_ca_rec_t; +/* +* FIELDS +* pnp_rec +* Structure describing the plug and play event being reported. +* +* p_ca_attr +* Attributes of the channel adapter that has experienced the event. +* NULL for IB_PNP_CA_REMOVE, IB_PNP_PORT_REMOVE, and IB_PNP_IOC_REMOVE +* events. +* +* NOTES +* This structure is returned to the user to notify them of the addition +* or the removal of a channel adapter. +* +* The context field is NULL unless a context value has already been set +* by the user. +* +* Context values can be changed by updating the appropriate field +* and will be take effect once the notification callback returns. +* +* Once a device has been removed, all context associated with that device +* is lost. +* +* Recipients of CA-related PnP events should cast the ib_pnp_rec_t structure +* returned in the PnP callback to this type to access CA-specific information. +* +* SEE ALSO +* ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t, +* ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t +*****/ + + +/****s* Access Layer/ib_pnp_port_rec_t +* NAME +* ib_pnp_port_rec_t +* +* DESCRIPTION +* Notification information used to describe local port events. +* +* SYNOPSIS +*/ +typedef struct _ib_pnp_port_rec +{ + ib_pnp_rec_t pnp_rec; + ib_ca_attr_t* __ptr64 p_ca_attr; + ib_port_attr_t* __ptr64 p_port_attr; + +} ib_pnp_port_rec_t; +/* +* FIELDS +* pnp_rec +* Structure describing the plug and play event being reported. +* +* p_ca_attr +* Attributes of the channel adapter that has experienced the event. +* NULL for IB_PNP_CA_REMOVE, IB_PNP_PORT_REMOVE, and IB_PNP_IOC_REMOVE +* events. +* +* p_port_attr +* Attributes of the port that has experienced the event. Valid only +* for IB_PNP_PORT_UP, IB_PNP_PORT_DOWN, IB_PNP_PKEY_CHANGE, and +* IB_PNP_IOC_ADD events. +* +* NOTES +* This structure is returned to the user to notify them of port events. +* +* The context field is NULL unless a context value has already been set +* by the user. +* +* Context values can be changed by updating the appropriate field +* and will be take effect once the notification callback returns. +* +* Once a device has been removed, all context associated with that device +* is lost. Context is maintained between port down and subsequent port up +* events provided that the channel adapter is not removed. +* +* Recipients of port related PnP events should cast the ib_pnp_rec_t structure +* returned in the PnP callback to this type to access port specific information. +* +* SEE ALSO +* ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t, +* ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t +*****/ + + +/****s* Access Layer/ib_pnp_iou_rec_t +* NAME +* ib_pnp_iou_rec_t +* +* DESCRIPTION +* Notification information used to describe local I/O unit events. +* +* SYNOPSIS +*/ +typedef struct _ib_pnp_iou_rec +{ + ib_pnp_rec_t pnp_rec; + net64_t guid; + net64_t ca_guid; + net64_t chassis_guid; + uint8_t slot; + net32_t vend_id; + net16_t dev_id; + net32_t revision; + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + +} ib_pnp_iou_rec_t; +/* +* FIELDS +* pnp_rec +* Structure describing the plug and play event being reported. +* +* ca_guid +* GUID of the local HCA through which the I/O unit is accessible. Valid +* only for IB_PNP_IOU_ADD events. +* +* chassis guid +* GUID of the chassis in which an I/O unit is installed. Valid only for +* IB_PNP_IOU_ADD events. +* +* slot +* Chassis slot number in which an I/O unit is installed. Valid only for +* IB_PNP_IOU_ADD events. +* +* guid +* GUID of an I/O unit from which one or more I/O controllers are assigned +* to this host. Valid only for IB_PNP_IOU_ADD events. +* +* vend_id +* Vendor ID of an I/O unit from which one or more I/O controllers are +* assigned to this host. Valid only for IB_PNP_IOU_ADD events. +* +* dev_id +* Device ID of an I/O unit from which one or more I/O controllers are +* assigned to this host. Valid only for IB_PNP_IOU_ADD events. +* +* revision +* Revision of an I/O unit from which one or more I/O controllers are +* assigned to this host. Valid only for IB_PNP_IOU_ADD events. +* +* desc +* Node description string for an I/O unit from which one or more I/O +* controllers are assigned to this host. Valid only for IB_PNP_IOU_ADD +* events. +* +* NOTES +* This structure is returned to the user to notify them of the addition +* and removal of an I/O Unit. +* +* The context field is NULL unless a context value has already been set +* by the user. +* +* Context values can be changed by updating the appropriate field +* and will be take effect once the notification callback returns. +* +* Once a device has been removed, all context associated with that device +* is lost. Context is maintained between port down and subsequent port up +* events provided that the channel adapter is not removed. +* +* SEE ALSO +* ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t, +* ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t +*****/ + + +/****s* Access Layer/ib_pnp_ioc_rec_t +* NAME +* ib_pnp_ioc_rec_t +* +* DESCRIPTION +* Notification information used to describe local channel adapter, port, +* and I/O controller events. +* +* SYNOPSIS +*/ +typedef struct _ib_pnp_ioc_rec +{ + ib_pnp_rec_t pnp_rec; + net64_t ca_guid; + ib_ioc_info_t info; + ib_svc_entry_t svc_entry_array[1]; + +} ib_pnp_ioc_rec_t; +/* +* FIELDS +* pnp_rec +* Structure describing the plug and play event being reported. +* +* ca_guid +* GUID of the local HCA through which the I/O controller is accessible. +* Valid only for IB_PNP_IOC_ADD events. +* +* p_ioc_info +* The I/O controller information for an assigned controller, including +* information for the I/O unit. Valid only for IB_PNP_IOC_ADD events. +* +* svc_entry_array +* If an I/O controller is being reported, this will reference an array +* of service entries associated with the I/O controller. The actual +* number of entries in the array may be determined by examining the +* svc_entries field in the I/O controller profile. Valid only for +* IB_PNP_IOC_ADD events. +* +* NOTES +* This structure is returned to the user to notify them of the addition +* and removal of an I/O controller. +* +* The context field is NULL unless a context value has already been set +* by the user. +* +* Context values can be changed by updating the appropriate field +* and will be take effect once the notification callback returns. +* +* Once a device has been removed, all context associated with that device +* is lost. Context is maintained between port down and subsequent port up +* events provided that the channel adapter is not removed. +* +* SEE ALSO +* ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t, +* ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t +*****/ + + +/****s* Access Layer/ib_pnp_ioc_path_rec_t +* NAME +* ib_pnp_ioc_path_rec_t +* +* DESCRIPTION +* Notification information used to describe local channel adapter, port, +* and I/O controller events. +* +* SYNOPSIS +*/ +typedef struct _ib_pnp_ioc_path_rec +{ + ib_pnp_rec_t pnp_rec; + net64_t ca_guid; + net64_t port_guid; + ib_path_rec_t path; + +} ib_pnp_ioc_path_rec_t; +/* +* FIELDS +* pnp_rec +* Structure describing the plug and play event being reported. +* +* ca_guid +* GUID of the local HCA through which the I/O controller is accessible. +* Valid only for IB_PNP_IOC_PATH_ADD and IB_PNP_IOC_PATH_REMOVE events. +* +* port_guid +* GUID of the local HCA port through which the I/O controller is +* accessible. Valid only for IB_PNP_IOC_PATH_ADD and +* IB_PNP_IOC_PATH_REMOVE events. +* +* p_path +* Path record that provides connectivity with a given I/O controller. +* Valid only for IB_PNP_IOC_PATH_ADD and IB_PNP_IOC_PATH_REMOVE events. +* +* NOTES +* This structure is returned to the user to notify them of the addition +* and removal of a path to an I/O controller. I/O controller path +* notifications are only delivered with respect to a previously reported +* I/O controller. +* +* The context field is NULL unless a context value has already been set +* by the user. +* +* Context values can be changed by updating the appropriate field +* and will be take effect once the notification callback returns. +* +* Once a device has been removed, all context associated with that device +* is lost. Context is maintained between port down and subsequent port up +* events provided that the channel adapter is not removed. +* +* SEE ALSO +* ib_open_al, ib_ca_attr_t, ib_reg_pnp, ib_dereg_pnp, ib_pfn_pnp_cb_t, +* ib_ioc_info_t, ib_reject_ioc, ib_pnp_event_t +*****/ + + +/****f* Access Layer/ib_pfn_pnp_cb_t +* NAME +* ib_pfn_pnp_cb_t +* +* DESCRIPTION +* User-defined callback that is invoked to notify a client of the addition +* or removal of a channel adapter, a port up or down event, port changes, +* and the assignment of an I/O controller to a local port. +* +* SYNOPSIS +*/ +typedef ib_api_status_t +(AL_API * __ptr64 ib_pfn_pnp_cb_t)( + IN ib_pnp_rec_t *p_pnp_rec ); +/* +* PARAMETERS +* p_pnp_rec +* [in] A reference to a plug and play record. The plug and play +* record contains details about the type of local event that has +* occurred, along with the relevant device information. +* +* RETURN VALUES +* IB_SUCCESS +* Indicates to the PnP manager that the callback client requires it +* to maintain a context for this event. +* +* Other +* Indicates to the PnP manager that the callback client does not need +* a context for this event. +* +* NOTES +* The callback is used to notify users of local events that have occurred +* on a given channel adapter. Information about the type of event that +* occurred along with the associated device is returned to the user through +* the p_pnp_rec parameter. +* +* Users register for plug and play changes by requesting notification from +* the access layer. Users may register for notifications either by directly +* invoking the appropriate function in the access layer, or indirectly by +* adding the necessary registration data to the access layer device file. +* +* This callback is invoked from within a system thread context. +* +* If the callback returns a status other than IB_SUCCESS, no further +* callback for related events will be delivered. +* +* SEE ALSO +* ib_pnp_rec_t, ib_reg_pnp, ib_dereg_pnp, ib_reject_ioc +*****/ + + +/****s* Access Layer/ib_pnp_req_t +* NAME +* ib_pnp_req_t +* +* DESCRIPTION +* Information used to register for notification of local and I/O +* controller assignment events. +* +* SYNOPSIS +*/ +typedef struct _ib_pnp_req +{ + ib_pnp_class_t pnp_class; + const void *pnp_context; + ib_pfn_pnp_cb_t pfn_pnp_cb; + +} ib_pnp_req_t; +/* +* FIELDS +* pnp_class +* Specifies the class of PnP events that the client wishes to be +* notified of. +* +* pnp_context +* User-defined context information associated with this notification. +* The context data is returned to the user as a part of their PnP +* notification callback. +* +* pfn_pnp_cb +* User-defined callback function that is invoked to notify the user of +* the occurrance of a plug and play event. +* +* NOTES +* This structure is used when requesting notification of local events from +* the access layer. The class of PnP events which to be notified of is +* specified through the pnp_class field. +* +* SEE ALSO +* ib_pnp_class_t, ib_pfn_pnp_cb_t, ib_reg_pnp, ib_pnp_rec_t +*****/ + + +/****f* Access Layer/ib_reg_pnp +* NAME +* ib_reg_pnp +* +* DESCRIPTION +* Routine used to register for notification of local and I/O controller +* assignment events. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reg_pnp( + IN const ib_al_handle_t h_al, + IN const ib_pnp_req_t* const p_pnp_req, + OUT ib_pnp_handle_t* const ph_pnp ); +/* +* PARAMETERS +* h_al +* [in] A handle to an open instance of the access layer. +* +* p_pnp_req +* [in] Specifies the type of events that the user wishes to be notified +* of, along with information needed to process the completed query. +* +* ph_pnp +* [out] Upon successful completion of this call, this references a handle +* to the PnP notification request. This handle may be used to cancel the +* notification registration. +* +* RETURN VALUES +* IB_SUCCESS +* The PnP registration was successful. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the PnP request information or handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to register for PnP notification. +* +* NOTES +* This routine registers the calling client with the access layer for +* notification of locally occurring events, or the assignment of I/O +* controllers to a local device. Once registered, a client will receive +* notification, via a callback, that a given event has occurred on a +* local device. Clients may restrict the types of events and devices +* that are reported. The p_pnp_req parameter is used to indicate which +* device events to report to the user. +* +* Upon invoking this routine, the client may receive a callback through +* the ib_pfn_pnp_cb_t routine to notify them of the current system state. +* For example, if a client registers for notification of port up events, +* then the access layer will notify the client of all available ports when +* this routine is first invoked. +* +* SEE ALSO +* ib_dereg_pnp, ib_pnp_req_t, ib_pnp_rec_t, ib_pfn_pnp_cb_t +*****/ + + +/****f* Access Layer/ib_dereg_pnp +* NAME +* ib_dereg_pnp +* +* DESCRIPTION +* Routine used to cancel notification of local events or I/O controller +* assignments. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_dereg_pnp( + IN const ib_pnp_handle_t h_pnp, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_pnp +* [in] A handle returned as a result of an ib_reg_pnp operation. +* +* pfn_destroy_cb +* [in] A user-specified callback that is invoked after the PnP +* registration has been successfully deregistered. +* +* NOTES +* This routine cancels a pending PnP operation. To avoid a race condition +* canceling a request at the same time a notification callback is in +* progress, the cancel operation operates asynchronously. For additional +* details see ib_pfn_destroy_cb_t. +* +* RETURN VALUES +* IB_SUCCESS +* The PnP deregistration was initiated. +* +* IB_INVALID_HANDLE +* The PnP handle was invalid. +* +* SEE ALSO +* ib_reg_pnp, ib_pfn_destroy_cb_t +*****/ + + +/****s* Access Layer/ib_sub_rec_t +* NAME +* ib_sub_rec_t +* +* DESCRIPTION +* Information returned to a user that indicates the result of a subscription +* request. +* +* SYNOPSIS +*/ +typedef struct _ib_sub_rec +{ + const void* __ptr64 sub_context; + ib_api_status_t status; + ib_sub_handle_t h_sub; + +} ib_sub_rec_t; +/* +* FIELDS +* sub_context +* References user-defined context information associated with the +* subscription request. This field is set by the user through the +* ib_subscribe routine. +* +* status +* Indicates the success of the subscription request. +* +* h_sub +* The handle to the subscription request that was returned to the user +* from the ib_subscribe call. This handle is provided to the user to +* avoid a race condition between the return of the ib_subscribe routine +* and the notification of an event. +* +* NOTES +* This structure is returned to the user to notify them of the results +* of a subscription request. After successfully subscribing with a +* class manager for an event, this structure will be returned to the user +* with the status set to IB_SUCCESS. The sub_context field will be set +* to the context specified through the p_sub_req parameter in the +* ib_subscribe routine. +* +* SEE ALSO +* ib_subscribe +*****/ + + +/****f* Access Layer/ib_pfn_sub_cb_t +* NAME +* ib_pfn_sub_cb_t +* +* DESCRIPTION +* User-defined callback invoked on completion of a subscription request. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_sub_cb_t)( + IN ib_sub_rec_t *p_sub_rec ); +/* +* PARAMETERS +* p_sub_rec +* [in] This is a reference to a structure containing the result of the +* subscription request. +* +* NOTES +* This routine is invoked to notify a client of the result of a +* subscription request with a class manager. If the subscription request +* was successful, the client will receive future notifications of the +* subscribed event from the class manager. +* +* This callback will always be invoked before a client receives information +* reported on a subscribed event that has occurred. +* +* In the kernel, this callback is usually invoked using a tasklet, dependent +* on the implementation of the underlying verbs provider driver. +* +* SEE ALSO +* ib_subscribe, ib_sub_rec_t +*****/ + + +/****s* Access Layer/ib_report_rec_t +* NAME +* ib_report_rec_t +* +* DESCRIPTION +* Reported event information returned to the user when a subscribed for +* event occurs. +* +* SYNOPSIS +*/ +typedef struct _ib_report_rec +{ + const void* __ptr64 report_context; + ib_mad_notice_attr_t* __ptr64 p_notice; + +} ib_report_rec_t; +/* +* FIELDS +* report_context +* Client-defined context information specified when registering for +* the report. +* +* p_notice +* Reported information that describes the event that has occurred. +* +* NOTES +* Subscription for reported events is done through a class manager. When +* a class manager detects that such an event occurs, it will generate a +* report to the subscribed client. The reported information is referenced +* through the p_notice field. +* +* SEE ALSO +* ib_mad_notice_attr_t +*****/ + + +/****f* Access Layer/ib_pfn_report_cb_t +* NAME +* ib_pfn_report_cb_t +* +* DESCRIPTION +* User-defined callback that is invoked to notify a client of an event +* that has occurred on the fabric. +* +* SYNOPSIS +*/ +typedef void +(AL_API * __ptr64 ib_pfn_report_cb_t)( + IN ib_report_rec_t *p_report_rec ); +/* +* PARAMETERS +* p_report_rec +* [in] A reference to an event report. The report contains +* details about the type of event that has occurred, along with the +* relevant device information. +* +* NOTES +* The callback is used to notify users of remote events that have been seen +* by a specified class manager. Information about the type of event that +* occurred along with the associated device is returned to the user through +* the p_report_rec parameter. +* +* Users register for device changes by subscribing with a class manager. +* Users may subscribe for events either by directly invoking the +* appropriate function in the access layer, or indirectly by adding the +* necessary registration data to the access layer device file. +* +* This callback is invoked from within a system thread context. +* +* SEE ALSO +* ib_report_rec_t, ib_subscribe, ib_unsubscribe +*****/ + + +/****s* Access Layer/ib_sub_req_t +* NAME +* ib_sub_req_t +* +* DESCRIPTION +* Information used to subscribed for event notification from a class +* manager. +* +* SYNOPSIS +*/ +typedef struct _ib_sub_req +{ + ib_svc_name_t* __ptr64 p_class_mgr_name; + ib_inform_info_t* __ptr64 p_inform_info; + ib_net64_t port_guid; + + uint32_t timeout_ms; + uint32_t retry_cnt; + ib_al_flags_t flags; + + const void* __ptr64 sub_context; + ib_pfn_sub_cb_t pfn_sub_cb; + + const void* __ptr64 report_context; + ib_pfn_report_cb_t pfn_report_cb; + +} ib_sub_req_t; +/* +* FIELDS +* p_class_mgr_name +* The service name of the class manager to subscribe for events with. +* +* p_inform_info +* Information describing the type of event being subscribed to. +* +* port_guid +* Directs the subscription request to use the specified port. The +* request will contact the subnet administrator reachable through the +* given port. +* +* timeout_ms +* Specifies the number of milliseconds to wait for a response for +* this subscription until retrying or timing out the request. +* +* retry_cnt +* Specifies the number of times that the query will be retried before +* failing the request. +* +* flags +* Used to describe the mode of operation. Set to IB_FLAGS_SYNC to +* process the called routine synchronously. +* +* sub_context +* User-defined context information associated with this subscription +* request. This context is returned to the user through the function +* specified by the pfn_sub_cb field. +* +* pfn_sub_cb +* A user-defined callback that is invoked upon completion of the +* subscription request. This is used to notify a client that of the +* result of their subscription request. +* +* report_context +* User-defined context information associated with this subscription. +* This context is returned to the user through the client's +* ib_pfn_report_cb_t callback routine specified in ib_open_al. +* +* pfn_report_cb +* A user-defined callback that is invoked to notify the user that an +* event report has been received. +* +* NOTES +* This structure is used to subscribe for events with a class manager. Both +* the subscription request and any corresponding event notifications operate +* asynchronously. Clients will be notified of the result of their +* subscription request before receiving notification of associated events. +* +* SEE ALSO +* ib_subscribe, ib_svc_name_t, ib_inform_info_t, ib_pfn_sub_cb_t, +* ib_pfn_report_cb_t, ib_open_al +*****/ + + +/****f* Access Layer/ib_subscribe +* NAME +* ib_subscribe +* +* DESCRIPTION +* Subscribe with a class manager for event notification. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_subscribe( + IN const ib_al_handle_t h_al, + IN const ib_sub_req_t* const p_sub_req, + OUT ib_sub_handle_t* const ph_sub ); +/* +* PARAMETERS +* h_al +* [in] A handle to an open instance of the access layer. +* +* p_sub_req +* [in] Specifies the type of events that the user wishes to be +* notified of, along with information needed to process the completed +* subscription. +* +* ph_sub +* [out] Upon successful completion of this call, this references a handle +* to the subscription request. This handle may be used to unsubscribe +* from the events. +* +* RETURN VALUES +* IB_SUCCESS +* The subscription request was initiated. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the subscription request or handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_INVALID_GUID +* No port was found for the port_guid specified in the request. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to perform the operation. +* +* NOTES +* This routine registers the calling client with a class manager for +* notification of events. Once registered, a client will receive +* notification, via a callback, that a given event has occurred on +* a device managed by the class manager. +* +* SEE ALSO +* ib_unsubscribe, ib_sub_req_t, ib_pfn_sub_cb_t, ib_pfn_report_cb_t +*****/ + + +/****f* Access Layer/ib_unsubscribe +* NAME +* ib_unsubscribe +* +* DESCRIPTION +* Unsubscribe with a class manager for event notification. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_unsubscribe( + IN const ib_sub_handle_t h_sub, + IN const ib_pfn_destroy_cb_t pfn_destroy_cb OPTIONAL ); +/* +* PARAMETERS +* h_al +* [in] A handle to an open instance of the access layer. +* +* h_sub +* [in] A handle to a subscribed event. +* +* pfn_destroy_cb +* [in] A user-specified callback that is invoked after the subscription +* request has been successfully canceled. +* +* RETURN VALUES +* IB_SUCCESS +* The unsubscribe request was initiated. +* +* IB_INVALID_HANDLE +* The subscription handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the subscription request or handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to perform the operation. +* +* NOTES +* This routine cancels an active or pending event subscription with a class +* manager. To avoid a race condition canceling a subscription at the same +* time an event notification callback is in progress, the unsubscribe +* operation operates asynchronously. For additional details see +* ib_pfn_destroy_cb_t. +* +* SEE ALSO +* ib_subscribe, ib_pfn_destroy_cb_t +*****/ + + +/****f* Access Layer/ib_reject_ioc +* NAME +* ib_reject_ioc +* +* DESCRIPTION +* Rejects an I/O controller assignment to a host. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_reject_ioc( + IN const ib_al_handle_t h_al, + IN const ib_pnp_handle_t h_ioc_event ); +/* +* PARAMETERS +* h_al +* [in] A handle to an open instance of the access layer. +* +* h_ioc_event +* [in] A handle provided as part of the notification of an I/O controller +* being assigned. This handle is obtained through the ib_pnp_rec_t +* structure given to a client through their ib_pfn_pnp_cb_t callback. +* +* RETURN VALUES +* IB_SUCCESS +* The I/O controller reject request was initiated. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_HANDLE +* The I/O controller handle was invalid. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_INSUFFICIENT_RESOURCES +* There were insufficient resources currently available on the channel +* adapter to perform the operation. +* +* NOTES +* This routine rejects an I/O controller assigned by the configuration +* manager to the local host. The access layer sends a rejection notification +* to the configuration manager and disable access to the controller from +* the local host. This routine must be called from a client's +* ib_pfn_pnp_cb_t callback to reject a newly assigned I/O controller. +* +* SEE ALSO +* ib_pfn_pnp_cb_t, ib_pnp_rec_t +*****/ + + +#define IB_ANY_INDEX -1 +/****d* Access Layer/ib_device_attr_mask_t +* NAME +* ib_device_attr_mask_t +* +* DESCRIPTION +* Used to specify desired attributes of a device or port. +* +* SYNOPSIS +*/ +#define IB_DEV_PORT_ACTIVE 0x1 +/* +* VALUES +* IB_DEV_PORT_ACTIVE +* Specifies that a port state should be active. Applies only to port +* GUIDs. +* +* SEE ALSO +* ib_get_guid +*****/ + + +/****f* Access Layer/ib_get_guid +* NAME +* ib_get_guid +* +* DESCRIPTION +* Returns a GUID for a device or port that matches the user-specified +* attributes. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_get_guid( + IN ib_al_handle_t h_al, + IN const uint32_t index, + IN const ib_pnp_class_t device_type, + IN const uint64_t attr_mask, + OUT ib_net64_t* const p_guid ); +/* +* PARAMETERS +* h_al +* [in] A handle to an opened instance of the access layer. +* +* index +* [in] Specifies the location of the device or port. Users specify this +* value to iterate through all devices or ports on the system. If set +* to IB_ANY_INDEX, then the first device or port matching the given +* attributes will be returned. +* +* device_type +* [in] Indicates the type of device to retrieve the GUID for. +* +* attr_mask +* [in] Specifies a set of attributes that the given device or port +* must have for a successful match to occur. +* +* p_guid +* [out] On successful return, this parameter will reference the GUID +* of the device or port that contains the specified attributes. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* IB_INVALID_SETTING +* The specified device type is invalid. +* +* IB_INVALID_PARAMETER +* No p_guid parameter was specified. +* +* IB_NO_MATCH +* The device or port at the specified index does not have the given +* attributes. +* +* IB_INVALID_INDEX +* No device or port exists for the specified index. +* +* NOTES +* This routine returns a GUID for a device or port that matches the +* user-specified attributes. If index is IB_ANY_INDEX, then the first +* device or port matching the given attributes is returned if a match is +* found. If no match is found, the call will return IB_NO_MATCH. If a +* valid index is specified, then the device or port located at that index +* will be examined to see if it has the given attributes. If the device +* or port with those attributes is found, its GUID is returned. +* +* This routine may be used to locate a device or port with a given set +* of attributes, or iterate through all devices or ports on the system. +* The specified index values are set by the access layer, but the index +* associated with a GUID may change if devices are removed from the system. +* +* SEE ALSO +* ib_open_al, ib_pnp_class_t, ib_get_ca_guids, ib_query_ca_by_guid +*****/ + + +/****f* Access Layer/ib_ci_call +* NAME +* ib_ci_call +* +* DESCRIPTION +* Performs a vendor specific CA interface function call. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_ci_call( + IN ib_ca_handle_t h_ca, + IN const void* __ptr64 * const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op ); +/* +* PARAMETERS +* h_ca +* [in] An opened instance of a channel adapter. +* +* handle_array +* [in] This parameter references an array containing handles of +* existing CA resources. This array should contain all of the +* handles specified in the vendor specific data provided with this +* call. All handles specified through this array are validated by +* the access layer as existing and belonging to the calling process. +* The verbs provider driver is responsible for verifying that the +* number and type of handles are correct for the requested operation. +* +* num_handles +* [in] The number of the handles in handle array. This count is +* verified by the access layer. +* +* p_ci_op +* [in] A reference to the vendor specific CA interface data +* structure containing the operation parameters. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_CA_HANDLE +* The specified CA handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the vendor specific data was not provided. +* +* IB_INVALID_HANDLE +* A handle specified in the handle array was invalid. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_ERROR +* An error occurred while processing the command. Additional +* error information is provided in the p_ci_op status field. +* +* NOTES +* This routine performs a vendor specific CA interface function call. +* The optional p_ci_op structure provides a means to pass vendor +* specific parameters and data to the verbs provider driver. If the +* vendor specific data contains handles, the client should provide the +* optional handle array that lists all of the handles specified in the +* vendor specific data. The handles in the handle array are restricted +* to the following types: ib_pd_handle_t, ib_cq_handle_t, +* ib_av_handle_t, ib_qp_handle_t, ib_mr_handle_t, or ib_mw_handle_t. +* The contents of the handle array are verified by the access layer and +* the verbs provider driver. This call cannot be used to allocate private +* handles that are passed as parameters in access layer calls. +* +* SEE ALSO +* ib_open_ca, ib_alloc_pd, ib_create_av, ib_create_cq, +* ib_create_qp, ib_reg_mr, ib_reg_phys, ib_reg_shared, +* ib_create_mw, ib_ci_op_t +*****/ + + +/****f* Access Layer/ib_open_al +* NAME +* ib_open_al +* +* DESCRIPTION +* This routine opens an instance of the access layer for the user and +* returns its handle. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_open_al( + OUT ib_al_handle_t* const ph_al ); +/* +* PARAMETERS +* ph_al +* [in] Upon successful completion of this call, this parameter will +* reference a handle to the access layer. +* +* RETURN VALUES +* IB_SUCCESS +* The access layer was opened successfully. +* +* IB_INVALID_PARAMETER +* A reference to the access layer handle was not provided. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* NOTES +* This function opens an instance of the access layer. An instance of the +* access layer is required before allocating additional resources from the +* access layer or a channel adapter. If successful, a handle to the access +* layer is returned. User-mode clients should not call ib_open_al from the +* module initialization routine. +* +* SEE ALSO +* ib_close_al +*****/ + + +/****f* Access Layer/ib_close_al +* NAME +* ib_close_al +* +* DESCRIPTION +* Deregisters a channel driver with the access layer and releases all +* associated resources, including queue pairs, connection requests, +* and completion queues. +* +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t AL_API +ib_close_al( + IN const ib_al_handle_t h_al ); +/* +* PARAMETERS +* h_al +* [in] A handle to an instance of the access layer. +* +* RETURN VALUES +* IB_SUCCESS +* The access layer was closed successfully. +* +* IB_INVALID_AL_HANDLE +* The access layer handle was invalid. +* +* NOTES +* This call destroys an existing instance of the access layer. Since +* callbacks may be outstanding against the resources managed by this +* access layer instance when the destroy operation is invoked, this +* call may block until all outstanding callbacks complete. This +* routine may not be called from a callback invoked by the access layer. +* +* SEE ALSO +* ib_open_al +*****/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + + +#endif /* __IB_AL_H__ */ diff --git a/branches/Ndi/inc/iba/ib_al_ioctl.h b/branches/Ndi/inc/iba/ib_al_ioctl.h new file mode 100644 index 00000000..c006a623 --- /dev/null +++ b/branches/Ndi/inc/iba/ib_al_ioctl.h @@ -0,0 +1,3402 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef __IB_UAL_IOCTL_H__ +#define __IB_UAL_IOCTL_H__ + +#include +#include +#include +#include + + + +/* +* Typedefs +* +*/ +/* +* ual_close_ca_ioctl: +* NOTES: +* It is sufficient to pass the ca handle to the kernel proxy on close_ca +* The UAL context for this CA instance maintains the application callback +* So, when the proxy notifies for a close_ca_callback, we know which +* app callback to call +* +*/ + + +/****s* User-mode Access Layer/ual_bind_file_ioctl_t +* NAME +* ual_bind_file_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* binding a file handle to an existing proxy context. +* +* SYNOPSIS +*/ +typedef struct _ual_bind_file_ioctl +{ + void* __ptr64 h_file; + +} ual_bind_file_ioctl_t; +/* +* FIELDS +* h_file +* File handle from the user-mode process intended for asynchronous requests. +* The IOCTL code will specify the type of asynchronous requests to be +* performed on this file handle. +* +* SEE ALSO +* +* NOTES +*****/ + + +/****s* User-mode Access Layer/ual_get_uvp_name_t +* NAME +* ual_get_uvp_name_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* getting the user library information. +* +* SYNOPSIS +*/ +typedef union _ual_get_uvp_name +{ + struct _ual_get_uvp_name_in + { + ib_net64_t ca_guid; + + } in; + struct _ual_get_uvp_name_out + { + ib_api_status_t status; + char uvp_lib_name[MAX_LIB_NAME]; + + } out; + +} ual_get_uvp_name_ioctl_t; +/* +* FIELDS +* in.ca_guid +* The GUID of the channel adapter +* +* out.status +* Status of the operation +* +* out.uvp_lib_name +* The vendor's library name associated with the CA +* +* SEE ALSO +* +* +* NOTES +*****/ + + + +/****s* User-mode Access Layer/ual_open_ca_ioctl_t +* NAME +* ual_open_ca_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_open_ca +* +* SYNOPSIS +*/ +typedef union _ual_open_ca_ioctl +{ + struct _ual_open_ca_ioctl_in + { + ci_umv_buf_t umv_buf; + ib_net64_t guid; + void* __ptr64 context; + + } in; + + struct _ual_open_ca_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint64_t h_ca; + + } out; + +} ual_open_ca_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.guid +* The GUID of the channel adapter to open. +* +* in.context +* A caller-specified context to associate with this opened instance +* of the channel adapter. This context is returned to the user when +* invoking asynchronous callbacks referencing this channel adapter. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation +* +* out.h_ca +* On return from IOCTL, contains the CA Handle from AL. +*****/ + + + +/****s* User-mode Access Layer/ual_query_ca_ioctl_t +* NAME +* ual_query_ca_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_query_ca +* +* SYNOPSIS +*/ +typedef union _ual_query_ca_ioctl +{ + struct _ual_query_ca_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_ca; + uint32_t byte_cnt; + ib_ca_attr_t* __ptr64 p_ca_attr; + + } in; + struct _ual_query_ca_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint32_t byte_cnt; + + } out; + +} ual_query_ca_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_ca +* The handle to an open instance of CA returned via a +* ual_open_ca_ioctl structure. +* +* in.byte_cnt +* Specifies the size of the data buffer referenced by the p_ca_attr +* parameter. +* +* p_ca_attr +* A reference to a buffer where the channel adapter attributes, +* including port attribute information will be copied. If this parameter +* is NULL, then the required buffer size needed to return all of the CA +* attribute information is returned through the out.byte_cnt parameter. +* The ib_ca_attr_t structure for the specified channel adapter is stored +* at the top of this buffer. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation +* +* out.byte_cnt +* Contains the number of bytes used or needed to copy all CA attributes. +*****/ + + + +/****s* User-mode Access Layer/ual_modify_ca_ioctl_t +* NAME +* ual_modify_ca_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_modify_ca +* +* SYNOPSIS +*/ +typedef union _ual_modify_ca_ioctl +{ + struct _ual_modify_ca_ioctl_in + { + uint64_t h_ca; + uint8_t port_num; + ib_ca_mod_t ca_mod; + ib_port_attr_mod_t port_attr_mod; + + } in; + struct _ual_modify_ca_ioclt_out + { + ib_api_status_t status; + + } out; + + +} ual_modify_ca_ioctl_t; +/* +* FIELDS +* in.h_ca +* The handle to an open instance of CA (in KAL space). +* +* in.port_num +* An index of the port that is being modified. +* +* in.ca_mod +* The mask of the attributes and counters to modify. +* +* in.port_attr_mod +* List of port attribute information to modify. +* +* out.status +* Status of the operation +*****/ + + + +/****s* User-mode Access Layer/ual_close_ca_ioctl_t +* NAME +* ual_close_ca_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_close_ca +* +* SYNOPSIS +*/ +typedef union _ual_close_ca_ioctl +{ + struct _ual_close_ca_ioctl_in + { + uint64_t h_ca; + + } in; + struct _ual_close_ca_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_close_ca_ioctl_t; +/* +* FIELDS +* in.h_ca +* The handle to an open instance of CA (in KAL space). +* +* out.status +* Status of the operation +*****/ + +/****s* User-mode Access Layer/ual_ci_call_ioctl_t +* NAME +* ual_ci_call_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_ci_call +* +* SYNOPSIS +*/ +typedef union _ual_ci_call_ioctl +{ + struct _ual_ci_call_ioctl_in + { + ci_umv_buf_t umv_buf; + ib_ci_op_t ci_op; + uint64_t h_ca; + uint32_t num_handles; + uint64_t handle_array[1]; + + } in; + struct _ual_ci_call_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_ci_op_t ci_op; + ib_api_status_t status; + + } out; + +} ual_ci_call_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.ci_op +* Contains information on the operation that needs to be performed +* by the verbs provider. The proxy marshals the data buffer between +* user mode and kernel space. +* +* in.h_ca +* The handle to an open instance of CA returned by a ual_open_ca_ioctl. +* +* in.num_handles +* The number of handles in the array at in.p_handle_array. +* +* in.handle_array +* First entry in an array of handles used for this operation. Ignored if +* in.num_handles is zero. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation +* +* out.ci_op +* Contains information on the operation that needs to be performed +* by the verbs provider. The proxy marshals the data buffer between +* user mode and kernel space. +*****/ + + + +/****s* User-mode Access Layer/ual_alloc_pd_ioctl_t +* NAME +* ual_alloc_pd_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_alloc_pd +* +* SYNOPSIS +*/ +typedef union _ual_alloc_pd_ioctl +{ + struct _ual_alloc_pd_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_ca; + ib_pd_type_t type; + void* __ptr64 context; + + } in; + struct _ual_alloc_pd_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint64_t h_pd; + + } out; + +} ual_alloc_pd_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_ca +* The handle to an open instance of CA returned in a ual_open_ca_ioctl. +* +* in.context +* UAL's pd context. This context will be provided on destroy callback. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation +* +* out.h_pd +* The handle to the PD to use in further PD-related IOCTLs. +*****/ + + + +/****s* User-mode Access Layer/ual_dealloc_pd_ioctl_t +* NAME +* ual_dealloc_pd_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_dealloc_pd +* +* SYNOPSIS +*/ +typedef union _ual_dealloc_pd_ioctl +{ + struct _ual_dealloc_pd_ioctl_in + { + uint64_t h_pd; + + } in; + struct _ual_dealloc_pd_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_dealloc_pd_ioctl_t; +/* +* FIELDS +* in.h_pd +* The handle of the PD that is going to be deallocated. +* +* out.status +* Status of the operation +*****/ + + + +/****s* User-mode Access Layer/ual_create_av_ioctl_t +* NAME +* ual_create_av_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_create_av +* +* SYNOPSIS +*/ +typedef union _ual_create_av_ioctl +{ + struct _ual_create_av_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_pd; + ib_av_attr_t attr; + + } in; + struct _ual_create_av_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint64_t h_av; + + } out; + +} ual_create_av_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_pd +* The handle to an already allocated PD (in KAL space). +* +* in.attr +* Attributes of the address vector that needs to be created. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.h_av +* Handle to the newly created address vector. +*****/ + + + +/****s* User-mode Access Layer/ual_query_av_ioctl_t +* NAME +* ual_query_av_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_query_av +* +* SYNOPSIS +*/ +typedef union _ual_query_av_ioctl +{ + struct _ual_query_av_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_av; + + } in; + struct _ual_query_av_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + ib_av_attr_t attr; + void* __ptr64 pd_context; + + } out; + +} ual_query_av_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_av +* A handle to an existing address vector. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.attr +* Attributes of the address vector. +* +* pd_context +* Context associated with the PD when created. +*****/ + + + +/****s* User-mode Access Layer/ual_modify_av_ioctl_t +* NAME +* ual_modify_av_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_modify_av +* +* SYNOPSIS +*/ +typedef union _ual_modify_av_ioctl +{ + struct _ual_modify_av_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_av; + ib_av_attr_t attr; + + } in; + struct _ual_modify_av_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + + } out; + +} ual_modify_av_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_av +* A handle to an existing address vector. +* +* in.attr +* The requested attributes to be used for modifying the address vector. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation +*****/ + + + +/****s* User-mode Access Layer/ual_destroy_av_ioctl_t +* NAME +* ual_destroy_av_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_destroy_av +* +* SYNOPSIS +*/ +typedef union _ual_destroy_av_ioctl +{ + struct _ual_destroy_av_ioctl_in + { + uint64_t h_av; + + } in; + struct _ual_destroy_av_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_destroy_av_ioctl_t; +/* +* FIELDS +* in.h_av +* A handle to an existing address vector. +* +* out.status +* Status of the operation. +*****/ + +/****s* User-mode Access Layer/ual_create_srq_ioctl_t +* NAME +* ual_create_srq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_create_srq +* +* SYNOPSIS +*/ +typedef union _ual_create_srq_ioctl +{ + struct _ual_create_srq_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_pd; + ib_srq_attr_t srq_attr; + void* __ptr64 context; + boolean_t ev_notify; + + } in; + struct _ual_create_srq_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint64_t h_srq; + + } out; + +} ual_create_srq_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_pd +* Protection domain on which to create the srq. +* +* in.srq_attr +* Attributes necessary for creating the srq. +* +* in.context +* UAL's srq context that needs to be returned on a callback. +* +* in.ev_notify +* Boolean indicating whether asynchronous events should be +* forwarded to user-mode. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.h_srq +* Handle for the newly created srq. +*****/ + + +/****s* User-mode Access Layer/ual_modify_srq_ioctl_t +* NAME +* ual_modify_srq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_modify_srq +* +* SYNOPSIS +*/ +typedef union _ual_modify_srq_ioctl +{ + struct _ual_modify_srq_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_srq; + ib_srq_attr_mask_t srq_attr_mask; + ib_srq_attr_t srq_attr; + + } in; + struct _ual_modify_srq_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + + } out; + +} ual_modify_srq_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_srq +* A handle to an existing Queue Pair. +* +* in.modify_attr +* Attributes used for modifying the srq. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +*****/ + + + +/****s* User-mode Access Layer/ual_query_srq_ioctl_t +* NAME +* ual_query_srq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_query_srq +* +* SYNOPSIS +*/ +typedef union _ual_query_srq_ioctl +{ + struct _ual_query_srq_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_srq; + + } in; + struct _ual_query_srq_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + ib_srq_attr_t srq_attr; + + } out; + +} ual_query_srq_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* h_srq +* Handle to the srq whose attributes to query. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.srq_attr +* Attributes of the srq. +*****/ + + + +/****s* User-mode Access Layer/ual_destroy_srq_ioctl_t +* NAME +* ual_destroy_srq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_destroy_srq +* +* SYNOPSIS +*/ +typedef union _ual_destroy_srq_ioctl +{ + struct _ual_destroy_srq_ioctl_in + { + uint64_t h_srq; + + } in; + struct _ual_destroy_srq_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_destroy_srq_ioctl_t; +/* +* FIELDS +* in.h_srq +* Handle of the srq that needs to be destroyed. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_create_qp_ioctl_t +* NAME +* ual_create_qp_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_create_qp +* +* SYNOPSIS +*/ +typedef union _ual_create_qp_ioctl +{ + struct _ual_create_qp_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_pd; + ib_qp_create_t qp_create; + void* __ptr64 context; + boolean_t ev_notify; + + } in; + struct _ual_create_qp_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + ib_qp_attr_t attr; + uint64_t h_qp; + + } out; + +} ual_create_qp_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_pd +* Protection domain on which to create the QP. +* +* in.qp_create +* Attributes necessary for creating the QP. +* +* in.context +* UAL's qp context that needs to be returned on a callback. +* +* in.ev_notify +* Boolean indicating whether asynchronous events should be +* forwarded to user-mode. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.attr +* Actual attributes of the newly created QP. +* +* out.h_qp +* Handle for the newly created QP. +*****/ + + + +/****s* User-mode Access Layer/ual_modify_qp_ioctl_t +* NAME +* ual_modify_qp_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_modify_qp +* +* SYNOPSIS +*/ +typedef union _ual_modify_qp_ioctl +{ + struct _ual_modify_qp_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_qp; + ib_qp_mod_t modify_attr; + + } in; + struct _ual_modify_qp_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + //ib_qp_query_t query_attr; // Not returned by AL + + } out; + +} ual_modify_qp_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_qp +* A handle to an existing Queue Pair. +* +* in.modify_attr +* Attributes used for modifying the QP. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.query_attr +* On return from the ioctl, contains the actual attributes of +* the QP just modified. +*****/ + + + +/****s* User-mode Access Layer/ual_query_qp_ioctl_t +* NAME +* ual_query_qp_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_query_qp +* +* SYNOPSIS +*/ +typedef union _ual_query_qp_ioctl +{ + struct _ual_query_qp_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_qp; + + } in; + struct _ual_query_qp_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + ib_qp_attr_t attr; + + } out; + +} ual_query_qp_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* h_qp +* Handle to the QP whose attributes to query. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.attr +* Attributes of the QP. +*****/ + + + +/****s* User-mode Access Layer/ual_destroy_qp_ioctl_t +* NAME +* ual_destroy_qp_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_destroy_qp +* +* SYNOPSIS +*/ +typedef union _ual_destroy_qp_ioctl +{ + struct _ual_destroy_qp_ioctl_in + { + uint64_t h_qp; + + } in; + struct _ual_destroy_qp_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_destroy_qp_ioctl_t; +/* +* FIELDS +* in.h_qp +* Handle of the QP that needs to be destroyed. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_create_cq_ioctl_t +* NAME +* ual_create_cq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_create_cq +* +* SYNOPSIS +*/ +typedef union _ual_create_cq_ioctl +{ + struct _ual_create_cq_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_ca; + void* __ptr64 h_wait_obj; + void* __ptr64 context; + uint32_t size; + boolean_t ev_notify; + + } in; + struct _ual_create_cq_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint64_t h_cq; + uint32_t size; + + } out; + +} ual_create_cq_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_ca +* CA handle on which to create the CQ. +* +* in.cq_create +* Attributes necessary for creating the cq. +* +* in.cq_context +* UAL's cq context that needs to be returned on a callback. +* +* in.ev_notify +* Boolean indicating whether asynchronous events should be +* forwared to user-mode. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* h_cq +* Handle to the newly created CQ. +* +* size +* Actual size of the newly created CQ. +*****/ + + + +/****s* User-mode Access Layer/ual_modify_cq_ioctl_t +* NAME +* ual_modify_cq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_modify_cq +* +* SYNOPSIS +*/ +typedef union _ual_modify_cq_ioctl +{ + struct _ual_modify_cq_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_cq; + uint32_t size; + + } in; + struct _ual_modify_cq_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint32_t size; + + } out; + +} ual_modify_cq_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_cq +* A handle to the CQ to modify. +* +* in.size +* The requested new size of the CQ. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.size +* The actual size of the CQ. +*****/ + + + +/****s* User-mode Access Layer/ual_query_cq_ioctl_t +* NAME +* ual_query_cq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_query_cq +* +* SYNOPSIS +*/ +typedef union _ual_query_cq_ioctl +{ + struct _ual_query_cq_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_cq; + + } in; + struct _ual_query_cq_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint32_t size; + + } out; + +} ual_query_cq_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_cq +* A handle to an existing CQ. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.size +* The size of the CQ. +*****/ + + + +/****s* User-mode Access Layer/ual_destroy_cq_ioctl_t +* NAME +* ual_destroy_cq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_destroy_cq +* +* SYNOPSIS +*/ +typedef union _ual_destroy_cq_ioctl +{ + struct _ual_destroy_cq_ioctl_in + { + uint64_t h_cq; + + } in; + struct _ual_destroy_cq_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_destroy_cq_ioctl_t; +/* +* FIELDS +* in.h_cq +* Handle of the cq that needs to be destroyed. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_reg_mem_ioctl_t +* NAME +* ual_reg_mem_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_reg_mem +* +* SYNOPSIS +*/ +typedef union _ual_reg_mem_ioctl +{ + struct _ual_reg_mem_ioctl_in + { + uint64_t h_pd; + ib_mr_create_t mem_create; + + } in; + struct _ual_reg_mem_ioctl_out + { + ib_api_status_t status; + net32_t lkey; + net32_t rkey; + uint64_t h_mr; + + } out; + +} ual_reg_mem_ioctl_t; +/* +* FIELDS +* in.h_pd +* Handle to the protection domain on which to register the memory. +* +* in.mem_create +* Information for registering the memory region. +* +* out.status +* Status of the operation. +* +* out.lkey +* LKey value returned by verb. +* +* out.rkey +* RKey value returned by verb. +* +* h_mr +* Handle to the registered memory region. +*****/ + + + +/****s* User-mode Access Layer/ual_query_mr_ioctl_t +* NAME +* ual_query_mr_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_query_mr +* +* SYNOPSIS +*/ +typedef union _ual_query_mr_ioctl +{ + struct _ual_query_mr_ioctl_in + { + uint64_t h_mr; + + } in; + struct _ual_query_mr_ioctl_out + { + ib_api_status_t status; + ib_mr_attr_t attr; + + } out; + +} ual_query_mr_ioctl_t; +/* +* FIELDS +* in.h_mr +* A handle to a registered memory region. +* +* out.status +* Status of the operation. +* +* out.attr +* Attributes of the registered memory region. +*****/ + + + +/****s* User-mode Access Layer/ual_rereg_mem_ioctl_t +* NAME +* ual_rereg_mem_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_modify_mr +* +* SYNOPSIS +*/ +typedef union _ual_rereg_mem_ioctl +{ + struct _ual_rereg_mem_ioctl_in + { + uint64_t h_mr; + ib_mr_mod_t mem_mod_mask; + ib_mr_create_t mem_create; + uint64_t h_pd; + + } in; + struct _ual_rereg_mem_ioctl_out + { + ib_api_status_t status; + net32_t lkey; + net32_t rkey; + + } out; + +} ual_rereg_mem_ioctl_t; +/* +* FIELDS +* in.h_mr +* A handle to a registered memory region that is being modified. +* +* in.mem_mod_mask +* The attributes to use when modifying the memory region. +* +* in.mem_create +* Information to use for modifying the memory region. Required only +* for changes other than the PD. +* +* in.h_pd +* PD Handle for changing protection domains. +* +* out.status +* Status of the operation. +* +* out.l_key +* LKey of the memory region. +* +* out.rkey +* RKey of the memory region. +*****/ + + + +/****s* User-mode Access Layer/ual_reg_shared_ioctl_t +* NAME +* ual_reg_shared_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_reg_shared +* +* SYNOPSIS +*/ +typedef union _ual_reg_shared_ioctl +{ + struct _ual_reg_shared_ioctl_in + { + uint64_t h_mr; + uint64_t h_pd; + ib_access_t access_ctrl; + uint64_t vaddr; + + } in; + struct _ual_reg_shared_ioctl_out + { + ib_api_status_t status; + uint64_t vaddr; + net32_t lkey; + net32_t rkey; + uint64_t h_new_mr; + + } out; + +} ual_reg_shared_ioctl_t; +/* +* FIELDS +* in.h_mr +* A handle to the existing registered memory region. +* +* in.h_pd +* A handle to the PD on which memory is being registered. +* +* in.access_ctrl +* Access control for the new memory region. +* +* in.vaddr +* Requested virtual address. +* +* out.status +* Status of the operation. +* +* out.vaddr +* Actual virtual address of the registered region. +* +* out.l_key +* LKey of the memory region. +* +* out.rkey +* RKey of the memory region. +* +* h_new_mr +* Handle to the registered memory region. +*****/ + + + +/****s* User-mode Access Layer/ual_dereg_mr_ioctl_t +* NAME +* ual_dereg_mr_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_dereg_mr +* +* SYNOPSIS +*/ +typedef union _ual_dereg_mr_ioctl +{ + struct _ual_dereg_mr_ioctl_in + { + uint64_t h_mr; + + } in; + struct _ual_dereg_mr_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_dereg_mr_ioctl_t; +/* +* FIELDS +* in.h_mr +* A handle to a registered memory region. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_create_mw_ioctl_t +* NAME +* ual_create_mw_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_create_mw +* +* SYNOPSIS +*/ +typedef union _ual_create_mw_ioctl +{ + struct _ual_create_mw_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_pd; + + } in; + struct _ual_create_mw_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + net32_t rkey; + uint64_t h_mw; + + } out; + +} ual_create_mw_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_pd +* A handle to the protection domain on which the memory window should +* be created. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* out.rkey +* RKey associated with the memory window. +* +* h_mw +* Handle to the newly created MW. +*****/ + + + +/****s* User-mode Access Layer/ual_query_mw_ioctl_t +* NAME +* ual_query_mw_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_query_mw +* +* SYNOPSIS +*/ +typedef union _ual_query_mw_ioctl +{ + struct _ual_query_mw_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_mw; + + } in; + struct _ual_query_mw_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + void* __ptr64 pd_context; + net32_t rkey; + + } out; + +} ual_query_mw_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_mw +* A handle to an existing memory window. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* pd_context +* This user-mode context for the protection domain +* associated with the memory window. +* +* rkey +* RKey associated with the memory window. +*****/ + + + +/****s* User-mode Access Layer/ual_bind_mw_ioctl_t +* NAME +* ual_bind_mw_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_bind_mw +* +* SYNOPSIS +*/ +typedef union _ual_bind_mw_ioctl +{ + struct _ual_bind_mw_ioctl_in + { + uint64_t h_mw; + uint64_t h_qp; + ib_bind_wr_t mw_bind; + + } in; + struct _ual_bind_mw_ioctl_out + { + ib_api_status_t status; + uint32_t r_key; + + } out; + +} ual_bind_mw_ioctl_t; +/* +* FIELDS +* in.h_mw +* A handle to an existing memory window. +* +* in.h_qp +* The qp handle to post the bind request. +* +* in.mw_bind +* Bind request. +* +* out.status +* Status of the operation. +* +* out.rkey +* RKey for the memory window. +*****/ + + + +/****s* User-mode Access Layer/ual_destroy_mw_ioctl_t +* NAME +* ual_destroy_mw_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_destroy_mw +* +* SYNOPSIS +*/ +typedef union _ual_destroy_mw_ioctl +{ + struct _ual_destroy_mw_ioctl_in + { + uint64_t h_mw; + + } in; + struct _ual_destroy_mw_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_destroy_mw_ioctl_t; +/* +* FIELDS +* in.h_mw +* A handle to an existing memory window. +* +* out.status +* Status of the operation +*****/ + + + +/****s* User-mode Access Layer/ual_post_send_ioctl_t +* NAME +* ual_post_send_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_post_send +* +* SYNOPSIS +*/ +typedef union _ual_post_send_ioctl +{ + struct _ual_post_send_ioctl_in + { + uint64_t h_qp; + uint32_t num_wr; + uint32_t num_ds; + ib_send_wr_t send_wr[1]; + /* Additional work requests follow, followed by data segments. */ + + } in; + struct _ual_post_send_ioctl_out + { + ib_api_status_t status; + uint32_t failed_cnt; + + } out; + +} ual_post_send_ioctl_t; +/* +* FIELDS +* in.h_qp +* A handle to QP where the work request is being posted. +* +* in.num_wr +* Number of work request items in the array of work requests. +* +* in.num_ds +* Number of data segments following the array of work requests. +* +* in.send_wr +* First work request in the array of work requests being posted. +* +* out.status +* Status of the operation. +* +* out.failed_cnt +* Number of work request that failed. +*****/ + + +/****s* User-mode Access Layer/ual_post_srq_recv_ioctl_t +* NAME +* ual_post_srq_recv_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_post_srq_recv +* +* SYNOPSIS +*/ +typedef union _ual_post_srq_recv_ioctl +{ + struct _ual_post_srq_recv_ioctl_in + { + uint64_t h_srq; + uint32_t num_wr; + uint32_t num_ds; + ib_recv_wr_t recv_wr[1]; + /* Additional work requests follow, followed by data segments. */ + + } in; + struct _ual_post_srq_recv_ioctl_out + { + ib_api_status_t status; + uint32_t failed_cnt; + + } out; + +} ual_post_srq_recv_ioctl_t; +/* +* FIELDS +* in.h_srq +* A handle to SRQ where the work request is being posted. +* +* in.num_wr +* Number of work request items in the array of work requests. +* +* in.num_ds +* Number of data segments following the array of work requests. +* +* in.recv_wr +* First work request in the array of work requests being posted. +* +* out.status +* Status of the operation. +* +* failed_cnt +* Number of work request that failed. +*****/ + + + +/****s* User-mode Access Layer/ual_post_recv_ioctl_t +* NAME +* ual_post_recv_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_post_recv +* +* SYNOPSIS +*/ +typedef union _ual_post_recv_ioctl +{ + struct _ual_post_recv_ioctl_in + { + uint64_t h_qp; + uint32_t num_wr; + uint32_t num_ds; + ib_recv_wr_t recv_wr[1]; + /* Additional work requests follow, followed by data segments. */ + + } in; + struct _ual_post_recv_ioctl_out + { + ib_api_status_t status; + uint32_t failed_cnt; + + } out; + +} ual_post_recv_ioctl_t; +/* +* FIELDS +* in.h_qp +* A handle to QP where the work request is being posted. +* +* in.num_wr +* Number of work request items in the array of work requests. +* +* in.num_ds +* Number of data segments following the array of work requests. +* +* in.recv_wr +* First work request in the array of work requests being posted. +* +* out.status +* Status of the operation. +* +* failed_cnt +* Number of work request that failed. +*****/ + + + +/****s* User-mode Access Layer/ual_peek_cq_ioctl_t +* NAME +* ual_peek_cq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_peek_cq +* +* SYNOPSIS +*/ +typedef union _ual_peek_cq_ioctl +{ + struct _ual_peek_cq_ioctl_in + { + uint64_t h_cq; + + } in; + struct _ual_peek_cq_ioctl_out + { + ib_api_status_t status; + uint32_t n_cqes; + + } out; + +} ual_peek_cq_ioctl_t; +/* +* FIELDS +* in.h_cq +* A handle to a CQ. +* +* out.status +* Status of the operation. +* +* out.n_cqes +* The number of completion queue entries currently on the CQ. +*****/ + + + +/****s* User-mode Access Layer/ual_poll_cq_ioctl_t +* NAME +* ual_poll_cq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_poll_cq +* +* SYNOPSIS +*/ +typedef union _ual_poll_cq_ioctl +{ + struct _ual_poll_cq_ioctl_in + { + uint64_t h_cq; + uint32_t num_wc; + + } in; + struct _ual_poll_cq_ioctl_out + { + ib_api_status_t status; + uint32_t num_wc; + ib_wc_t wc[1]; + /* Additional WC's follow. */ + } out; + +} ual_poll_cq_ioctl_t; +/* +* FIELDS +* in.h_cq +* A handle to cq that is going to be polled for completions. +* +* in.num_wc +* Number of work completions in the output array. +* +* out.status +* Status of the operation. +* +* out.num_wc +* Number of work completions polled. +* +* out.wc +* First work completion in the array to use for polling. +*****/ + + + +/****s* User-mode Access Layer/ual_rearm_cq_ioctl_t +* NAME +* ual_rearm_cq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_rearm_cq +* +* SYNOPSIS +*/ +typedef union _ual_rearm_cq_ioctl +{ + struct _ual_rearm_cq_ioctl_in + { + uint64_t h_cq; + boolean_t solicited; + + } in; + struct _ual_rearm_cq_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_rearm_cq_ioctl_t; +/* +* FIELDS +* in.h_cq +* A handle to a CQ. +* +* in.solicited +* Flag for rearm CQ. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_rearm_n_cq_ioctl_t +* NAME +* ual_rearm_n_cq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_rearm_n_cq +* +* SYNOPSIS +*/ +typedef union _ual_rearm_n_cq_ioctl +{ + struct _ual_rearm_n_cq_ioctl_in + { + uint64_t h_cq; + uint32_t n_cqes; + + } in; + struct _ual_rearm_n_cq_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_rearm_n_cq_ioctl_t; +/* +* FIELDS +* in.h_cq +* A handle to a CQ. +* +* in.n_cqes +* Rearm the CQ to signal when the next N completions are added. +* +* in.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_attach_mcast_ioctl_t +* NAME +* ual_attach_mcast_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* verbs attach multicast call. +* +* SYNOPSIS +*/ +typedef union _ual_attach_mcast_ioctl +{ + struct _ual_attach_mcast_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_qp; + ib_gid_t mgid; + ib_net16_t mlid; + + } in; + struct _ual_attach_mcast_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint64_t h_attach; + + } out; + +} ual_attach_mcast_ioctl_t; +/* +* FIELDS +* in.umv_buf +* Opaque to IBAL buffer descriptor to allow the user-mode HCA library to +* exchange private information with the kernel-mode HCA driver. +* +* in.h_qp +* Handle to the QP that is joining the multicast group. +* +* in.mgid +* Multicast GID address for this multicast group. +* +* in.mlid +* Multicast LID for this multicast group. +* +* out.umv_buf +* Returns the status from the HCA driver to the user-mode HCA library, +* along with any vendor specific output information. +* +* out.status +* Status of the operation. +* +* h_attach +* Multicast group handle. +*****/ + + + +/****s* User-mode Access Layer/ual_detach_mcast_ioctl_t +* NAME +* ual_detach_mcast_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* verbs detach call. +* +* SYNOPSIS +*/ +typedef union _ual_detach_mcast_ioctl +{ + struct _ual_detach_mcast_ioctl_in + { + uint64_t h_attach; + + } in; + struct _ual_detach_mcast_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_detach_mcast_ioctl_t; +/* +* FIELDS +* in.h_attach +* A handle to the multicast group. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_reg_mad_svc_ioctl_t +* NAME +* ual_reg_mad_svc_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_reg_mad_svc +* +* SYNOPSIS +*/ +typedef union _ual_reg_mad_svc_ioctl +{ + struct _ual_reg_mad_svc_ioctl_in + { + uint64_t h_qp; + ib_mad_svc_t mad_svc; + + } in; + struct _ual_reg_mad_svc_ioctl_out + { + ib_api_status_t status; + uint64_t h_mad_svc; + + } out; + +} ual_reg_mad_svc_ioctl_t; +/* +* FIELDS +* in.h_qp +* Handle to the special QP or MAD QP. +* +* in.mad_svc +* Mad service definition. +* +* out.status +* Status of the operation. +* +* out.h_mad_svc +* Handle to the mad service. +*****/ + + + +/****s* User-mode Access Layer/ual_dereg_mad_svc_ioctl_t +* NAME +* ual_dereg_mad_svc_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_dereg_mad_svc +* +* SYNOPSIS +*/ +typedef union _ual_dereg_mad_svc_ioctl +{ + struct _ual_dereg_mad_svc_ioctl_in + { + uint64_t h_mad_svc; + + } in; + struct _ual_dereg_mad_svc_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_dereg_mad_svc_ioctl_t; +/* +* FIELDS +* in.h_mad_svc +* Handle to the mad service. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_reg_mad_pool_ioctl_t +* NAME +* ual_reg_mad_pool_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* registering a mad pool to be used with special qp. This ioctl +* will result in user-mode pool registered. Additionally, +* the kernel proxy will allocate a kernel mad pool and register it +* so that later mad_sends will have the appropriate pool in kernel. +* +* SYNOPSIS +*/ +typedef union _ual_reg_mad_pool_ioctl +{ + struct _ual_reg_mad_pool_ioctl_in + { + uint64_t h_pd; + + } in; + struct _ual_reg_mad_pool_ioctl_out + { + ib_api_status_t status; + uint64_t pool_key; + + } out; + +} ual_reg_mad_pool_ioctl_t; +/* +* FIELDS +* in.h_pd +* PD associated with the pool +* +* out.status +* Status of the operation. +* +* out.pool_key +* Pool key to the mad pool in kernel space +*****/ + + + +/****s* User-mode Access Layer/ual_dereg_mad_pool_ioctl_t +* NAME +* ual_dereg_mad_pool_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* deregistering a mad pool to be used with special qp. +* The kernel proxy will deregister and destroy the mad pool +* created on behalf of the user process. +* +* SYNOPSIS +*/ +typedef union _ual_dereg_mad_pool_ioctl +{ + struct _ual_dereg_mad_pool_ioctl_in + { + uint64_t pool_key; + + } in; + struct _ual_dereg_mad_pool_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_dereg_mad_pool_ioctl_t; +/* +* FIELDS +* in.pool_key +* Pool key to the mad pool in kernel space. +* +* out.status +* Status of the operation +*****/ + + + +/****s* User-mode Access Layer/ual_send_mad_ioctl_t +* NAME +* ual_send_mad_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_send_mad +* +* SYNOPSIS +*/ +typedef union _ual_send_mad_ioctl +{ + struct _ual_send_mad_ioctl_in + { + uint64_t h_mad_svc; + uint64_t pool_key; + uint64_t h_av; + ib_mad_element_t* __ptr64 p_mad_element; + uint32_t size; + void* __ptr64* __ptr64 ph_proxy; + + } in; + struct _ual_send_mad_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_send_mad_ioctl_t; +/* +* FIELDS +* in.h_mad_svc +* Handle to the mad service. +* +* in.pool_key +* Pool key associated with the pool in kernel space. +* +* in.h_av +* handle to address vector of MAD. +* +* in.p_mad_element +* Pointer to the user-mode MAD element. The proxy marshals this data. +* +* in.size +* size of MAD buffer to send. +* +* in.ph_proxy +* Location to which to write the context used to cancel the MAD. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_cancel_mad_ioctl_t +* NAME +* ual_cancel_mad_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_cancel_mad +* +* SYNOPSIS +*/ +typedef union _ual_cancel_mad_ioctl +{ + struct _ual_cancel_mad_ioctl_in + { + uint64_t h_mad_svc; + void* __ptr64 h_proxy_element; + + } in; + struct _ual_cancel_mad_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_cancel_mad_ioctl_t; +/* +* FIELDS +* in.h_mad_svc +* contains the handle to the mad service +* +* in.h_mad_send +* this references the handle to the sent MAD operation. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_spl_qp_ioctl_t +* NAME +* ual_spl_qp_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters to get +* the alias qp from KAL. +* +* SYNOPSIS +*/ +typedef union _ual_spl_qp_ioctl +{ + struct _ual_spl_qp_ioctl_in + { + ci_umv_buf_t umv_buf; + uint64_t h_pd; + ib_net64_t port_guid; + ib_qp_create_t qp_create; + void* __ptr64 context; + + } in; + struct _ual_spl_qp_ioctl_out + { + ci_umv_buf_t umv_buf; + ib_api_status_t status; + uint64_t h_qp; + + } out; + +} ual_spl_qp_ioctl_t; +/* +* FIELDS +* in.h_pd +* Protection domain for the special qp. +* +* in.port_guid +* Port GUID on which to allocate the special QP. +* +* in.qp_create +* Special QP creation parameters. +* +* in.qp_context +* Context to associate with the QP, to be used in any notifications. +* +* out.status +* Status of the operation. +* +* out.h_qp +* Handle to the special QP. +*****/ + + + +/****s* User-mode Access Layer/ual_mad_recv_ioctl_t +* NAME +* ual_mad_recv_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters to get +* the mad_element information upon receiving MAD. +* +* SYNOPSIS +*/ +typedef union _ual_mad_recv_ioctl +{ + struct _ual_mad_recv_comp_ioctl_in + { + uint64_t h_mad; + ib_mad_element_t* __ptr64 p_user_mad; + ib_mad_t* __ptr64 p_mad_buf; + ib_grh_t* __ptr64 p_grh; + + } in; + struct _ual_mad_recv_comp_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_mad_recv_ioctl_t; +/* +* FIELDS +* in.h_mad +* Received MAD handle handed to usermode in the MAD recv notification. +* +* in.p_user_mad +* Pointer to a user-mode mad element. +* +* in.p_mad_buf +* Pointer to the MAD element's user-mode buffer. +* +* in.p_grh +* Ponter to the MAD element's user-mode GRH buffer. +* +* out.status +* Status of the operation. +*****/ + + + +/****s* User-mode Access Layer/ual_local_mad_ioctl_t +* NAME +* ual_local_mad_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_local_mad +* +* SYNOPSIS +*/ +typedef union _ual_local_mad_ioctl +{ + struct _ual_local_mad_ioctl_in + { + uint64_t h_ca; + uint8_t port_num; + uint8_t mad_in[MAD_BLOCK_SIZE]; + + } in; + struct _ual_local_mad_ioctl_out + { + ib_api_status_t status; + uint8_t mad_out[MAD_BLOCK_SIZE]; + + } out; + +} ual_local_mad_ioctl_t; +/* +** FIELDS +* in.h_ca +* The handle to an open instance of CA returned via a +* ual_open_ca_ioctl structure. +* in.port_num +* Port number this MAD refere to. +* in.mad_in +* Mad structure from user mode to forward to local HCA. +* +** out.status +* Status of the operation. +* out.mad_out +* Mad structure answer from local HCA for user mode. +*****/ + + + +/****s* User-mode Access Layer/ual_create_cep_ioctl_t +* NAME +* ual_create_cep_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the output parameters to +* create a CEP. +* +* SYNOPSIS +*/ +typedef struct _ual_create_cep_ioctl +{ + ib_api_status_t status; + net32_t cid; + +} ual_create_cep_ioctl_t; +/* +* FIELDS +* status +* Status of the operation. +* +* cid +* CID of the created CEP. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_listen_ioctl_t +* NAME +* ual_cep_listen_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters to +* perform a CM listen request. +* +* SYNOPSIS +*/ +typedef struct _ual_cep_listen_ioctl +{ + net32_t cid; + ib_cep_listen_t cep_listen; + uint8_t compare[IB_REQ_PDATA_SIZE]; + +} ual_cep_listen_ioctl_t; +/* +* FIELDS +* in.cid +* CID of an existing CEP. +* +* in.cep_listen +* Information used to direct the listen request to match incoming +* connection requests. +*****/ + + + +/****s* User-mode Access Layer/ual_cm_req_ioctl_t +* NAME +* ual_cm_req_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* al_cep_pre_req call. +* +* SYNOPSIS +*/ +typedef union _ual_cep_req_ioctl +{ + struct _ual_cep_req_ioctl_in + { + net32_t cid; + ib_cm_req_t cm_req; + ib_path_rec_t paths[2]; + uint8_t pdata[IB_REQ_PDATA_SIZE]; + uint8_t compare[IB_REQ_PDATA_SIZE]; + + } in; + struct _ual_cep_req_ioctl_out + { + ib_api_status_t status; + ib_qp_mod_t init; + + } out; + +} ual_cep_req_ioctl_t; +/* +* FIELDS +* in.cid +* CID of the target CEP. +* +* in.cm_req +* CM REQ parameters. +* +* in.paths +* Array of paths, the first being the primary path to use for the REQ. +* +* out.status +* Status of the operation +* +* out.init +* QP modify paramters for INIT state transition. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_rep_ioctl_t +* NAME +* ual_cep_rep_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* al_cep_pre_rep call. +* +* SYNOPSIS +*/ +typedef union _ual_cep_rep_ioctl +{ + struct _ual_cep_rep_ioctl_in + { + void* __ptr64 context; + net32_t cid; + ib_cm_rep_t cm_rep; + uint8_t pdata[IB_REP_PDATA_SIZE]; + + } in; + struct _ual_cep_rep_ioctl_out + { + ib_api_status_t status; + ib_qp_mod_t init; + + } out; + +} ual_cep_rep_ioctl_t; +/* +* FIELDS +* in.h_cm_req +* The cm_req connection handle got on the callback. +* +* in.cm_rep +* CM REP parameters. +* +* out.status +* Status of the operation. +* +* out.init +* QP modify paramters for INIT state transition. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_get_rtr_ioctl_t +* NAME +* ual_cep_get_rtr_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the output parameters for +* al_cep_get_rtr_attr call. +* +* SYNOPSIS +*/ +typedef struct _ual_cep_get_rtr_ioctl +{ + ib_api_status_t status; + ib_qp_mod_t rtr; + +} ual_cep_get_rtr_ioctl_t; +/* +* FIELDS +* out.status +* Status of the operation. +* +* out.rtr +* QP modify paramters for RTR state transition. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_get_rts_ioctl_t +* NAME +* ual_cep_get_rts_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the output parameters for +* al_cep_get_rts_attr call. +* +* SYNOPSIS +*/ +typedef struct _ual_cep_get_rts_ioctl +{ + ib_api_status_t status; + ib_qp_mod_t rts; + +} ual_cep_get_rts_ioctl_t; +/* +* FIELDS +* out.status +* Status of the operation. +* +* out.rts +* QP modify paramters for RTS state transition. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_rtu_ioctl_t +* NAME +* ual_cep_rtu_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for +* al_cep_rtu call. +* +* SYNOPSIS +*/ +typedef struct _ual_cep_rtu_ioctl +{ + net32_t cid; + uint8_t pdata_len; + uint8_t pdata[IB_RTU_PDATA_SIZE]; + +} ual_cep_rtu_ioctl_t; +/* +* FIELDS +* in.cid +* The cm_rep connection handle got on the callback. +* +* in.pdata_len +* Length of private data. +* +* in.pdata +* Private data. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_rej_ioctl_t +* NAME +* ual_cep_rej_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for +* al_cep_rej +* +* SYNOPSIS +*/ +typedef struct _ual_cep_rej_ioctl +{ + net32_t cid; + + ib_rej_status_t rej_status; + uint8_t ari_len; + uint8_t pdata_len; + uint8_t ari[IB_ARI_SIZE]; + uint8_t pdata[IB_REJ_PDATA_SIZE]; + +} ual_cep_rej_ioctl_t; +/* +* FIELDS +* in.cid +* The CID of the target CEP. +* +* in.rej_status +* Rejection status as defined in IB spec. +* +* in.ari_len +* Length of the ARI data. +* +* in.pdata_len +* Length of the private data. +* +* in.ari +* ARI data. +* +* in.pdata +* Private data. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_handoff_ioctl_t +* NAME +* ual_cep_handoff_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_cm_handoff +* +* SYNOPSIS +*/ +typedef union _ual_cep_handoff_ioctl +{ + struct _ual_cep_handoff_ioctl_in + { + uint64_t h_cm; + net64_t sid; + + } in; + struct _ual_cep_handoff_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_cep_handoff_ioctl_t; +/* +* FIELDS +* in.h_cm +* The connection handle got on the callback. +* +* in.sid +* Service ID to which to handoff the listen. +* +* out.status +* Status of the operation +*****/ + + + +/****s* User-mode Access Layer/ual_cep_mra_ioctl_t +* NAME +* ual_cep_mra_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for +* ib_cm_mra +* +* SYNOPSIS +*/ +typedef struct _ual_cep_mra_ioctl +{ + net32_t cid; + ib_cm_mra_t cm_mra; + uint8_t pdata[IB_MRA_PDATA_SIZE]; + +} ual_cep_mra_ioctl_t; +/* +* FIELDS +* in.cid +* The CID for the target CEP. +* +* in.cm_mra +* CM MRA parameters. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_lap_ioctl_t +* NAME +* ual_cep_lap_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for +* ib_cm_lap +* +* SYNOPSIS +*/ +typedef struct _ual_cep_lap_ioctl +{ + net32_t cid; + ib_cm_lap_t cm_lap; + ib_path_rec_t alt_path; + uint8_t pdata[IB_LAP_PDATA_SIZE]; + +} ual_cep_lap_ioctl_t; +/* +* FIELDS +* in.cm_lap +* CM LAP parameters +* +* in.alt_path +* Alternate path information. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_apr_ioctl_t +* NAME +* ual_cep_apr_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for +* ib_cep_apr +* +* SYNOPSIS +*/ +typedef union _ual_cep_apr_ioctl +{ + struct _ual_cep_apr_ioctl_in + { + net32_t cid; + ib_cm_apr_t cm_apr; + uint8_t apr_info[IB_APR_INFO_SIZE]; + uint8_t pdata[IB_APR_PDATA_SIZE]; + + } in; + + struct _ual_cep_apr_ioctl_out + { + ib_api_status_t status; + ib_qp_mod_t apr; + + } out; + +} ual_cep_apr_ioctl_t; +/* +* FIELDS +* in.h_cm_lap +* The cm_lap connection handle got on the LAP callback. +* +* in.cm_apr +* CM APR parameters. +*****/ + + + +/****s* User-mode Access Layer/ual_force_apm_ioctl_t +* NAME +* ual_force_apm_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_force_apm +* +* SYNOPSIS +*/ +typedef union _ual_force_apm_ioctl +{ + union _ual_force_apm_ioctl_in + { + uint64_t h_qp; + + } in; + struct _ual_force_apm_ioctl_out + { + ib_api_status_t status; + + } out; + +} ual_force_apm_ioctl_t; +/* +* FIELDS +* in.h_qp +* A handle to the QP to migrate. +* +* out.status +* Status of the operation +*****/ + + + +/****s* User-mode Access Layer/ual_cep_dreq_ioctl_t +* NAME +* ual_cep_dreq_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for +* ib_cm_dreq +* +* SYNOPSIS +*/ +typedef struct _ual_cep_dreq_ioctl +{ + net32_t cid; + uint8_t pdata_len; + uint8_t pdata[IB_DREQ_PDATA_SIZE]; + +} ual_cep_dreq_ioctl_t; +/* +* FIELDS +* cm_dreq +* CM DREQ parameters. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_drep_ioctl_t +* NAME +* ual_cep_drep_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_cm_drep +* +* SYNOPSIS +*/ +typedef struct _ual_cep_drep_ioctl +{ + net32_t cid; + ib_cm_drep_t cm_drep; + uint8_t pdata[IB_DREP_PDATA_SIZE]; + +} ual_cep_drep_ioctl_t; +/* +* FIELDS +* in.h_cm_dreq +* The cm_dreq connection handle got on the callback. +* +* in.cm_drep +* CM DREP parameters. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_get_timewait_ioctl_t +* NAME +* ual_cep_get_timewait_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the output parameters for +* ib_cep_get_timewait +* +* SYNOPSIS +*/ +typedef struct _ual_cep_get_timewait_ioctl +{ + ib_api_status_t status; + uint64_t timewait_us; + +} ual_cep_get_timewait_ioctl_t; +/* +* FIELDS +* in.status +* Status of the request. +* +* in.timewait_us +* Timewait value, in microseconds. +*****/ + + + +/****s* User-mode Access Layer/ual_cep_poll_ioctl_t +* NAME +* ual_cep_poll_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the output parameters to +* poll for incoming events on a CEP. The input parameter is the CID. +* +* SYNOPSIS +*/ +typedef struct _ual_cep_poll_ioctl +{ + ib_api_status_t status; + void* __ptr64 context; + net32_t new_cid; + ib_mad_element_t element; + ib_grh_t grh; + uint8_t mad_buf[MAD_BLOCK_SIZE]; + +} ual_cep_poll_ioctl_t; +/* +* FIELDS +* status +* Status of the operation. +* +* new_cep +* For listen requests, CEP information of CEPs created in response +* to incoming REQs. +* +* mad_buf +* Payload of a received MAD (or failed send) +*****/ + + + +/****s* User-mode Access Layer/ual_reg_shmid_ioctl_t +* NAME +* ual_reg_shmid_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_create_shmid +* +* SYNOPSIS +*/ +typedef union _ual_reg_shmid_ioctl +{ + struct _ual_reg_shmid_ioctl_in + { + uint64_t h_pd; + ib_shmid_t shmid; + ib_mr_create_t mr_create; + + } in; + struct _ual_reg_shmid_ioctl_out + { + ib_api_status_t status; + uint64_t vaddr; + net32_t lkey; + net32_t rkey; + uint64_t h_mr; + + } out; + +} ual_reg_shmid_ioctl_t; +/* +* PARAMETERS +* in.h_pd +* A handle to an existing protection domain that the memory +* should be registered with. +* +* in.shmid +* An identifier to the shared memory region. +* +* in.mr_create +* Information describing the attributes of the memory region to +* register. +* +* out.status +* Status of the operation. +* +* out.vaddr +* Assigned I/O virtual address for the memory region. +* +* out.lkey +* The local access key associated with this registered memory +* region. +* +* out.rkey +* A key that may be used by a remote end-point when performing RDMA +* or atomic operations to this registered memory region. +* +* out.h_mr +* Upon successful completion of this call, this references a handle +* to the registered memory region. This handle is used when performing +* data transfers and to deregister the memory. +*****/ + + + +/****s* User-mode Access Layer/ual_send_sa_req_t +* NAME +* ual_send_sa_req_t +* +* DESCRIPTION +* IOCTL structure containing the input and output parameters for +* ib_create_shmid +* +* SYNOPSIS +*/ +typedef union _ual_send_sa_req_ioctl +{ + struct _ual_send_sa_req_ioctl_in + { + net64_t port_guid; + uint32_t timeout_ms; + uint32_t retry_cnt; + ib_user_query_t sa_req; + uint8_t attr[IB_SA_DATA_SIZE]; + uint64_t* __ptr64 ph_sa_req; + ib_api_status_t* __ptr64 p_status; + + } in; + struct _ual_send_sa_req_ioctl_out + { + ib_api_status_t status; + uint64_t h_resp; + uint32_t resp_size; + + } out; + +} ual_send_sa_req_ioctl_t; +/* +* PARAMETERS +* in.sa_mad_data +* The SA request to send. +* +* in.attr +* The SA attribute data to send. +* +* in.ph_sa_req +* Pointer to UAL's query handle. The proxy fills this in +* before returning from the IOCTL handler to allow cancelation. +* +* in.p_status +* Pointer to status of the query. +* +* out.status +* Status of the query if it was initiated successfully. +* +* out.h_resp +* Handle to a response MAD. +* +* out.resp_size +* Size, in bytes, of the response MAD's buffer. +*****/ + + + +/****s* User-mode Access Layer/ual_cancel_sa_req_ioctl_t +* NAME +* ual_cancel_sa_req_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for cancelling an +* SA request. +* +* SYNOPSIS +*/ +typedef struct _ual_cancel_sa_req_ioctl +{ + uint64_t h_sa_req; + +} ual_cancel_sa_req_ioctl_t; +/* +* PARAMETERS +* h_sa_req +* Handle to the query to cancel. +*****/ + + + +/****s* User-mode Access Layer/ual_reg_pnp_ioctl_in_t +* NAME +* ual_reg_pnp_ioctl_in_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for registering +* for PnP events. +* +* SYNOPSIS +*/ +typedef struct _ual_reg_pnp_ioctl_in +{ + ib_pnp_class_t pnp_class; + void* __ptr64 sync_event; + ib_api_status_t* __ptr64 p_status; + uint64_t* __ptr64 p_hdl; + +} ual_reg_pnp_ioctl_in_t; +/* +* NOTES +* This is an asynchronous IOCTL. +* +* The output parameters are a ual_rearm_pnp_ioctl_out_t. +* +* PARAMETERS +* pnp_class +* Class of PnP events for which to register. +* +* p_status +* Pointer to user-mode status variable to set in failure case. +* +* p_hdl +* Pointer to user-mode handle variable to set in success case. +*****/ + + + +/****s* User-mode Access Layer/ual_poll_pnp_ioctl_t +* NAME +* ual_poll_pnp_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the parameters for retriveing data for +* a PnP event. +* +* SYNOPSIS +*/ +typedef union _ual_poll_pnp_ioctl +{ + struct _ual_poll_pnp_ioctl_in + { + uint64_t evt_hdl; + + } in; + struct _ual_poll_pnp_ioctl_out + { + ib_pnp_rec_t pnp_rec; + + } out; + +} ual_poll_pnp_ioctl_t; +/* +* NOTES +* This is a synchronous IOCTL. +* +* PARAMETERS +* in.evt_hdl +* Handle to a new PnP event. +* +* out.pnp_rec +* Buffer for the new PnP event. +*****/ + + + +/****s* User-mode Access Layer/ual_rearm_pnp_ioctl_in_t +* NAME +* ual_rearm_pnp_ioctl_in_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for requesting +* notification of the next PnP event. +* +* SYNOPSIS +*/ +typedef struct _ual_rearm_pnp_ioctl_in +{ + uint64_t pnp_hdl; + uint64_t last_evt_hdl; + void* __ptr64 last_evt_context; + ib_api_status_t last_evt_status; + +} ual_rearm_pnp_ioctl_in_t; +/* +* NOTES +* This is an asynchronous IOCTL. +* +* The output parameters are a ual_rearm_pnp_ioctl_out_t. +* +* PARAMETERS +* pnp_hdl +* Handle to the PnP registration to rearm. +* +* last_evt_hdl +* Handle to the last PnP event processed. +* +* last_evt_context +* Context value to set for the last reported PnP event. +* +* last_evt_status +* Status value to return for the last reported PnP event. +*****/ + + + +/****s* User-mode Access Layer/ual_rearm_pnp_ioctl_out_t +* NAME +* ual_rearm_pnp_ioctl_out_t +* +* DESCRIPTION +* IOCTL structure containing the output parameters for a PnP event. +* +* SYNOPSIS +*/ +typedef struct _ual_rearm_pnp_ioctl_out +{ + uint64_t evt_hdl; + uint32_t evt_size; + +} ual_rearm_pnp_ioctl_out_t; +/* +* NOTES +* This is an asynchronous IOCTL. +* +* The output parameters are identical to that of ual_reg_pnp_ioctl_t. +* +* PARAMETERS +* evt_hdl +* Handle to a new PnP event. +* +* evt_size +* Buffer size needed to poll the new PnP event. +*****/ + + + +/****s* User-mode Access Layer/ual_dereg_pnp_ioctl_t +* NAME +* ual_dereg_pnp_ioctl_t +* +* DESCRIPTION +* IOCTL structure containing the input parameters for cancelling an +* SA request. +* +* SYNOPSIS +*/ +typedef struct _ual_dereg_pnp_ioctl +{ + uint64_t h_pnp; + +} ual_dereg_pnp_ioctl_t; +/* +* NOTES +* This is an asynchronous IOCTL. +* +* PARAMETERS +* h_pnp +* Handle to the PnP registration to deregister. +*****/ +#endif /* __IB_UAL_IOCTL_H__ */ diff --git a/branches/Ndi/inc/iba/ib_at_ioctl.h b/branches/Ndi/inc/iba/ib_at_ioctl.h new file mode 100644 index 00000000..f7d4b852 --- /dev/null +++ b/branches/Ndi/inc/iba/ib_at_ioctl.h @@ -0,0 +1,135 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* This file is shared between user- and kernel-mode */ + +#include + + +#ifndef _IB_AT_IOCTL_H_ +#define _IB_AT_IOCTL_H_ + + +#define IBAT_IOCTL_VERSION 4 + +#define IBAT_MAC_LEN 6 + + +#define IOCTL_IBAT( n ) \ + CTL_CODE( FILE_DEVICE_UNKNOWN, (0x800 + n), \ + METHOD_BUFFERED, FILE_ANY_ACCESS ) + +/** This IRP is used to return all available CAs ports number + * and port guid */ +#define IOCTL_IBAT_PORTS IOCTL_IBAT( 1 ) + +typedef struct _IBAT_PORT_RECORD +{ + UINT64 CaGuid; + UINT64 PortGuid; + UINT8 PortNum; + +} IBAT_PORT_RECORD; + +typedef struct _IOCTL_IBAT_PORTS_IN +{ + ULONG Version; + +} IOCTL_IBAT_PORTS_IN; + +typedef struct _IOCTL_IBAT_PORTS_OUT +{ + /** Total size, of the output buffer needed if the + * suplied buffer wasn't enough */ + ULONG Size; + LONG NumPorts; + IBAT_PORT_RECORD Ports[1]; + +} IOCTL_IBAT_PORTS_OUT; + + +/** This IRP is used to return all the ip addresses that + * are assigned to a port */ +#define IOCTL_IBAT_IP_ADDRESSES IOCTL_IBAT( 2 ) + +typedef struct _IOCTL_IBAT_IP_ADDRESSES_IN +{ + ULONG Version; + /** The guid of the port that we are querying for. May be + * zero if querying for IP addresses of all ports. */ + UINT64 PortGuid; + +} IOCTL_IBAT_IP_ADDRESSES_IN; + +typedef struct _IP_ADDRESS +{ + /** Might only be 4 or 6 */ + CHAR IpVersion; + /** Sized to support both IPv4 and IPv6 */ + UCHAR Address[16]; + +} IP_ADDRESS; + +typedef struct _IOCTL_IBAT_IP_ADDRESSES_OUT +{ + /** Total size of the output buffer needed if the + * suplied buffer wasn't enough */ + ULONG Size; + LONG AddressCount; + IP_ADDRESS Address[1]; + +} IOCTL_IBAT_IP_ADDRESSES_OUT; + + +/** This IRP is used to convert a remote MAC addresses to a remote GID */ +#define IOCTL_IBAT_MAC_TO_GID IOCTL_IBAT( 3 ) + +typedef struct _IOCTL_IBAT_MAC_TO_GID_IN +{ + ULONG Version; + UINT64 PortGuid; + UCHAR DestMac[IBAT_MAC_LEN]; + +} IOCTL_IBAT_MAC_TO_GID_IN; + +typedef struct _IOCTL_IBAT_MAC_TO_GID_OUT +{ + ib_gid_t DestGid; + +} IOCTL_IBAT_MAC_TO_GID_OUT; + + +#define IBAT_DEV_NAME L"\\Device\\ibat" +#define IBAT_DOS_DEV_NAME L"\\DosDevices\\Global\\ibat" +#define IBAT_WIN32_NAME L"\\\\.\\ibat" + +#endif /* _IB_AT_IOCTL_H_ */ diff --git a/branches/Ndi/inc/iba/ib_ci.h b/branches/Ndi/inc/iba/ib_ci.h new file mode 100644 index 00000000..8e7b2057 --- /dev/null +++ b/branches/Ndi/inc/iba/ib_ci.h @@ -0,0 +1,2917 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined(__IB_CI_H__) +#define __IB_CI_H__ + +#include + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +/****h* IB_API/Verbs +* NAME +* Verbs -- Verbs implements the hardware and software glue to the OS layer. +* COPYRIGHT +* Copyright© 2001 Intel Corporation - All Rights Reserved. +* DESCRIPTION +* The Verbs API definition defines the interface mechanism between an IHV +* supplied driver component. It implements verbs functionality as defined +* Volume 1, of the InfiniBand(tm) specifications. +* AUTHOR +* Intel Corporation +* CREATION DATE +* XX.XX.XX +* NOTES +* Evolving Spec!! +* Invalid Handle checks are a mere signature checks in kernel mode. Hence +* passing invalid pointer would lead to panics in the kernel. For user mode +* These are verified for most verbs that need to take a kernel transition. +* Verbs those are entirely done in user mode that would affect speed path +* do not perform consistency checks. So invalid pointers would lead to +* application crash with core dumps. +*********** +*/ + + +/* + * Version that identifies this version of the header file for interface + * definition. + */ +#define VERBS_MAJOR_VER (0x0001) +#define VERBS_MINOR_VER (0x0004) + +#define VERBS_VERSION (((VERBS_MAJOR_VER) << 16) | (VERBS_MINOR_VER)) +#define MK_VERBS_VERSION(maj,min) ((((maj) & 0xFFFF) << 16) | \ + ((min) & 0xFFFF)) + +/* + * TODO: The in and out buffers should be separated (usage can still make + * both point to the same actual memory region. + */ +/****s* Verbs/ci_umv_buf_t +* NAME +* ci_umv_buf_t -- Vendor specific structure to facilitate user mode IO +* DESCRIPTION +* This structure is provided to assist the vendor specific user mode +* library to exchange information with its kernel mode driver. The +* user mode InfiniBand(tm) Access Layer will call the vendor specific +* module before a call is made to the kernel mode driver. The kernel mode +* driver is expected to know the format and data in the p_inout_buf, +* and copy any necessary data that must be handed to the user mode +* vendor library. +* PURPOSE +* command +* A command code that is understood by the vendor specific kernel +* mode driver. +* p_inout_buf +* The user mode component of the vendor specific library allocates +* this memory and passes information in this buffer. vendor is expected +* to set both the input and output buffer sizes appropriately. +* This information is required since the kernel mode proxy that passes +* this buffer to the kernel mode vendor specific library will copy the +* content of this buffer to a kernel mode buffer. The kernel mode +* vendor specific driver would copy the data that needs to be returned +* to the user mode component, and set the output size appropriately +* so that the proxy can now copy the data back to user mode buffer. +* +* In the Infiniband Access Layer, it is important to know the +* usage of umv_buf and whether the contents of the p_inout_buf +* can have embedded user-mode pointers. When invoked from an +* arbitrary thread context, Vendor driver can NOT access user-mode +* pointers of a user-process. +* input_size +* Size of the input buffer, must be set by the user mode vendor +* specific library. +* output_size +* Size of the output buffer. Must be set by the user mode component +* to specify the maximum size of the data expected from its kernel +* mode driver. The kernel mode driver would set the size to the exact +* size that needs to be returned to its user mode counterpart. +* status +* Indicates the status of the operation from the kernel mode vendor +* specific driver. The caller is supposed to initialize it appropriately +* to identify if an operation succeded, or failed. For e.g. when +* the user mode library is called after a resource creation, the user +* mode vendor specific code must be able to identify if there is +* post processing required, or if any resource allocation failed. +* SOURCE +*/ +typedef struct _umv_buf +{ + uint32_t command; + uint32_t status; + uint32_t input_size; + uint32_t output_size; + void* __ptr64 p_inout_buf; +} ci_umv_buf_t; +/******/ + +/****f* Verbs/ci_completion_cb_t +* NAME +* ci_completion_cb_t -- Completion Notification callback. +* SYNOPSIS +*/ + +typedef void +(*ci_completion_cb_t)( + IN void *cq_context ); + +/* +* DESCRIPTION +* This function prototype indicates the parameter passed to ci_open_ca() +* to receive completion callbacks. +* PARAMETERS +* cq_context +* [in] Completion queue context passed during the ci_create_cq +* RETURN VALUE +* None +* NOTES +* The consumer only gets the cq_context and ca_context. It is the client +* responsibility to store the cq_handle in the context after the creation +* time. So it can call ci_poll_cq() after the arrival of the notification. +* SEE ALSO +* ci_open_ca, ci_create_cq +****** +*/ + + +/****f* Verbs/ci_async_event_cb_t +* NAME +* ci_async_event_cb_t +* +* DESCRIPTION +* Asynchronous event notification routine. +* +* SYNOPSIS +*/ +typedef void +(*ci_async_event_cb_t)( + IN const ib_event_rec_t* const p_event_record ); +/* +* PARAMETERS +* p_event_record +* [in] Information describing the type of event that has occurred. +* +* NOTES +* This routine is called when an asynchronous event is generated by a +* channel adapter. The event notification record passed has relevant +* information on the type of the event, the source that caused the event, +* and the context associated. +*****/ + + +/****f* Verbs/ci_open_ca +* NAME +* ci_open_ca -- open and possibly obtain a handle to access the HCA. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_open_ca) ( + IN const ib_net64_t ca_guid, + IN const ci_completion_cb_t pfn_completion_cb, + IN const ci_async_event_cb_t pfn_async_event_cb, + IN const void* const ca_context, + OUT ib_ca_handle_t *ph_ca ); +/* +* DESCRIPTION +* This routine returns a handle to an open instance of a HCA. Client can call +* this routine to retrieve a new open instance. Only one instance of the +* open call is active at any time. If a duplicate open is called by the +* consumer or any other consumer, it IB_RESOURCE_BUSY error status is +* returned. +* PARAMETERS +* ca_guid +* [in] The HCA adapter's EUI64 identifier. Clients would use other +* enumeration API's to locate all available adapters and their +* guids in a system, e.g. GetCaGuids(), maintained by the IB +* Access Layer. User mode consumers also have the same mechanism +* to retrieve this information. +* pfn_completion_cb +* [in] Completion Handler, one per open instance. +* pfn_async_event_cb +* [in] Asynchronous event handler, one per open instance. +* ca_context +* [in] Verbs consumer supplied value, which is returned on calls to +* handlers which in turn is used by clients to identify the +* open instance. +* ph_ca +* [out] Pointer to a handle to the newly open instance of the CA returned +* by the Verbs Provider. +* +* RETURN VALUE +* IB_SUCCESS +* The HCA is successfully opened and returned handle is valid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* IB_INVALID_PARAMETER +* Callback routine are not provided, GUID value is zero, or ph_ca is NULL +* IB_RESOURCE_BUSY +* The interface is already open by another consumer. +* IB_NOT_FOUND +* ca_guid passed is not valid +* +* SEE ALSO +* ci_query_ca, ci_modify_ca, ci_close_ca +*********/ + + +/****f* Verbs/ci_um_open_ca +* NAME +* ci_um_open_ca -- Create a CA context for use by user-mode processes. +* SYNOPSIS +*/ +typedef ib_api_status_t +(*ci_um_open_ca_t) ( + IN const ib_ca_handle_t h_ca, + IN OUT ci_umv_buf_t* const p_umv_buf, + OUT ib_ca_handle_t* const ph_um_ca ); +/* +* DESCRIPTION +* This routine is used called on behalf of a user-mode application to +* establish a per-CA context in user-mode. +* +* PARAMETERS +* h_ca +* [in] Handle returned by an earlier call to ci_open_ca() +* p_umv_buf +* [in/out] Vendor specific parameter to support user mode IO. +* ph_um_ca +* [out] Handle to pass into ci_um_close_ca call to free any kernel +* resources allocated for the user-mode appliation. +* +* RETURN VALUE +* IB_SUCCESS +* The user-mode context information is returned successfully. +* IB_INSUFFICIENT_MEMORY +* The size of the p_ca_attr buffer, specified through p_size, is +* insufficient to store all of the CA attribute information. +* IB_INVALID_CA_HANDLE +* h_ca is invalid +* IB_INVALID_PARAMETER +* The p_umv_buf parameters are insufficient to complete the request. +* +* SEE ALSO +* ci_query_ca, ci_modify_ca, ci_close_ca +*********/ + + +/****f* Verbs/ci_query_ca +* NAME +* ci_query_ca -- Query the attributes of the HCA +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_query_ca) ( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t *p_ca_attr OPTIONAL, + IN OUT uint32_t *p_size, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine retrieves vital information about this hca. It returns +* necessary information about HCA guid, port guids, LID's assigned by +* the master SM. Clients can use this information to communicate with the +* Master SM node to perform path queries etc. +* PARAMETERS +* h_ca +* [in] Handle returned by an earlier call to ci_open_ca() +* p_ca_attr +* [out] CA attribute of this Host Channel adapter +* p_size +* [in/out] On input, this references the size of the data buffer +* referenced by the p_ca_attr parameter. +* On output, the number of bytes used or needed to copy all CA +* attribute information. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The attribute structure is returned completely. +* IB_INSUFFICIENT_MEMORY +* The size of the p_ca_attr buffer, specified through p_size, is +* insufficient to store all of the CA attribute information. +* IB_INVALID_CA_HANDLE +* h_ca is invalid +* IB_INVALID_PARAMETER +* p_size is NULL. +* NOTES +* Users may obtain the size of the data buffer required to obtain the +* CA attributes by calling this function with p_ca_attr set to NULL. +* The channel interface will then return the necessary size in the +* variable referenced by the p_size parameter. The caller can then allocate +* exact size and call this routine again. No partial information is returned +* if the size is not sufficient. +* SEE ALSO +* ci_open_ca, ci_modify_ca +********/ + +/****f* Verbs/ci_modify_ca +* NAME +* ci_modify_ca -- Modify port attributes and error counters +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_modify_ca) ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t ca_mod, + IN const ib_port_attr_mod_t *p_port_attr_mod ); +/* +* DESCRIPTION +* Modifies either the P_KEY/Q_KEY violation counters, or sets the capability +* mask in the port attributes. This is effectively translated to PORTINFO +* values responded later when a MAD from SM or another node arrives to +* retrieve port related attributes. +* +* PARAMETERS +* h_ca +* [in] Handle returned by previous call to ci_open_ca() +* port_num +* [in] Port number, which needs to be modified. +* ca_mod +* [in] Command mask to perform operations on. +* p_port_attr_mod +* [in] port attribute which needs this change to be performed. +* if the capability bit is set, then that corresponding +* port capability is turned on. +* RETURN VALUE +* IB_SUCCESS +* Modify port attributes was performed +* IB_INVALID_PORT +* Invalid port number supplied in port_att. +* IB_INVALID_PARAMETER +* Unknown Command supplied in port_attr. +* IB_UNSUPPORTED +* Optional Q_KEY and P_KEY violation counters are not supported. +* IB_INVALID_CA_HANDLE +* h_ca is invalid +* NOTES +* No ownership checks are performed in the Verbs Provider Driver. +* All such permission checks are to be performed by the IB access layer +* before passing requests down to the HCA driver. These operations can be +* performed only by the special QP owner. Either the QP0 or QP1. Since port +* attributes is really maintained by the QP0 for SMA to respond with correct +* values, but the agent capability is really a QP1 functionality. +* SEE ALSO +* ci_open_ca, ci_query_ca, ci_close_ca +*/ +/********/ + +/****f* Verbs/ci_close_ca +* NAME +* ci_close_ca -- Close access to adapter via this h_ca +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_close_ca) ( + IN ib_ca_handle_t h_ca ); +/* +* DESCRIPTION +* This routine is called when the client no longer wishes to use HCA +* resources obtained via this h_ca. All resources allocated as part +* this handle during the ci_open_ca are destroyed. +* PARAMETERS +* h_ca +* [in] CA handle returned via the ci_open_ca() call. +* RETURN VALUE +* IB_SUCCESS +* The intend to destroy is registered. No further calls for +* completion or async event will be sent to this instance. When it is +* appropriate to destroy this instance, the event h_kevent is signaled. +* IB_RESOURCE_BUSY +* Some resource allocated via this handle is not freed. +* IB_INVALID_CA_HANDLE +* h_ca is invalid +* NOTES +* This call cannot be called from any of the notification functions invoked +* by the Verbs driver. For e.g. the completion handler or the async error +* callback provided during the ci_open_ca() call. The call will block until +* all references to this adapter object is closed which includes all the +* pending callbacks returning back to the verbs provider driver. +* +* Resources allocated during the ci_open_ca() is deallocated. Other resource +* cleanup are responsibility of the consumer . +* SEE ALSO +* ci_open_ca +********/ + +/****f* Verbs/ci_um_close_ca_t +* NAME +* ci_um_close_ca_t -- Close user-mode access to adapter via this h_ca +* SYNOPSIS +*/ +typedef void +(*ci_um_close_ca_t) ( + IN ib_ca_handle_t h_ca, + IN ib_ca_handle_t h_um_ca ); +/* +* DESCRIPTION +* This routine is called when the client no longer wishes to use HCA +* resources obtained via this h_ca. All resources allocated as part +* this handle during the ci_um_open_ca are destroyed. +* PARAMETERS +* h_ca +* [in] CA handle returned via the ci_open_ca() call. +* h_um_ca +* [in] CA handle returned via the ci_um_open_ca() call. +* +* RETURN VALUE +* This funtion does not return a value. +* NOTES +* This call is invoked from the context of a UM application when such an +* appliation closes the HCA in user-mode. +* +* Resources allocated during the ci_um_open_ca() are deallocated. +* +* SEE ALSO +* ci_um_open_ca +********/ + +/****f* Verbs/ci_allocate_pd +* NAME +* ci_allocate_pd -- Allocate a protection domain for this adapter. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_allocate_pd) ( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_type_t type, + OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine allocates a protection domain handle, which is later +* used to create QP's, Register Memory Regions, Bind Memory Windows +* and address vectors. Protection domain has no InfiniBand architectural +* attributes but the OS implements policy on its usage and allocation. +* PARAMETERS +* h_ca +* [in] Handle returned by ci_open_ca() +* +* type +* [in] Type of the protection domain. CA vendors may use this +* information to optimize how the PD is allocated. +* +* ph_pd +* [out] The handle to the newly created protection domain +* +* p_umv_buf +* [in/out] Vendor specific parameter to support user mode IO. +* +* RETURN VALUE +* IB_SUCCESS +* PD is successfully allocated and the ph_pd is valid. +* +* IB_INSUFFICIENT_RESOURCES +* No more PD's available for this adapter. +* +* IB_INVALID_CA_HANDLE +* HCA handle is not valid +* +* SEE ALSO +* ci_deallocate_pd +*******/ + +/****f* Verbs/ci_deallocate_pd +* NAME +* ci_deallocate_pd -- Deallocate a protection domain object. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_deallocate_pd) ( + IN ib_pd_handle_t h_pd ); + +/* +* DESCRIPTION +* This routine deallocates a pd that is allocated via the ci_allocate_pd() +* call. The PD cannot be deallocated if it is still bound to a QP, any memory +* region, memory window or address vector. +* PARAMETERS +* h_pd +* [in] Handle allocated via the ci_allocate_pd() +* RETURN VALUE +* IB_SUCCESS +* PD is freed successfully +* IB_INVALID_PD_HANDLE +* pd_handle is invalid +* IB_RESOURCE_BUSY +* PD is probably still bound to some resource +* SEE ALSO +* ci_allocate_pd +******* +*/ + + +/****f* Verbs/ci_create_av +* NAME +* ci_create_av -- Create an address vector for use in UD. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_create_av) ( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t *p_av_attr, + OUT ib_av_handle_t *ph_av, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); + +/* +* DESCRIPTION +* This routine creates an address vector for use in unreliable datagram +* queue pairs. The information necessary to create the address vector +* handle is supplied in the ib_av_attr_t parameter. +* PARAMETERS +* h_pd +* [in] Protection domain to which this av is associated. +* p_av_attr +* [in] Parameters to create the address vector handle +* ph_av +* [out] Handle to use for datagram sends. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The create operation was successful +* IB_INSUFFICIENT_RESOURCES +* No more address handles are available +* IB_INVALID_PD_HANDLE +* The specified protection domain handle is invalid +* IB_INVALID_PORT +* Invalid port number supplied. +* IB_INVALID_PARAMETER +* One of the p_av_attr or p_av_attr was NULL. +* NOTES +* The values in the p_av_attr is not validated for correctness. The values +* in the attribute such as port number, protection domain etc are also +* validated during processing by the channel adapter. If the attribute +* validation fails a processing error IB_WCS_LOCAL_OP_ERR. +* SEE ALSO +* ci_allocate_pd +********/ + +/****f* Verbs/ci_query_av +* NAME +* ci_query_av -- Obtain the address vector associated with the handle +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_query_av) ( + IN const ib_av_handle_t h_av, + OUT ib_av_attr_t *p_av_attr, + OUT ib_pd_handle_t *ph_pd, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); + +/* +* DESCRIPTION +* This routine returns the address vector and pd_handle associated with the +* av_handle. +* PARAMETERS +* h_av +* [in] Handle to the address vector +* p_av_attr +* [out] address vector data referred by the av_handle +* ph_pd +* [out] pd handle associated with the av_handle +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* returned values are valid +* IB_INVALID_AV_HANDLE +* The address vector handle was invalid +* IB_INVALID_PARAMETER +* One of the p_av_attr or ph_pd parameters was NULL. +* IB_INVALID_PORT +* Invalid port number passed in the Address Vector. +* SEE ALSO +* ci_create_av, ci_modify_av +******* +*/ + +/****f* Verbs/ci_modify_av +* NAME +* ci_modify_av -- Change the address vector referred by the av_handle +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_modify_av) ( + IN const ib_av_handle_t h_av, + IN const ib_av_attr_t *p_av_attr, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine allows a consumer to modify the address information +* passed. +* PARAMETERS +* h_av +* [in] Address handle that needs to be updated with new info. +* p_av_attr +* [in] New address vector to associate with the addr_handle. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* Operation was successful +* IB_INVALID_AV_HANDLE +* The address vector handle was invalid +* IB_INVALID_PORT +* Invalid port number passed in the Address Vector. +* IB_INVALID_PARAMETER +* The parameter p_av_attr is not valid. +* NOTES +* The values in the p_av_attr is not validated for correctness. The values +* in the attribute such as port number, protection domain etc are validated +* during processing by the channel adapter. If the attribute validation fails +* a processing error IB_WCS_LOCAL_OP_ERR. +* SEE ALSO +* ci_create_av, ci_query_av +********* +*/ + +/****f* Verbs/ci_destroy_av +* NAME +* ci_destroy_av -- Destroy the address vector +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_destroy_av) ( + IN const ib_av_handle_t h_av ); +/* +* DESCRIPTION +* This routine destroys the specified address handle. After the routine +* returns, this address handle cannot be used to reference the destination. +* PARAMETERS +* h_av +* [in] Handle that needs to be destroyed. +* RETURN VALUE +* IB_SUCCESS +* Operation was successful. +* IB_INVALID_AV_HANDLE +* The address vector handle was invalid +* SEE ALSO +* ci_create_av +********* +*/ + +/****f* Verbs/ci_create_srq +* NAME +* ci_create_srq -- Create a Shared Queue Pair for the specified HCA +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_create_srq) ( + IN const ib_pd_handle_t h_pd, + IN const void *srq_context, + IN const ib_srq_attr_t * const p_srq_attr, + OUT ib_srq_handle_t *ph_srq, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* A new shared queue pair is created on the specified HCA. The initial set of +* parameters is provided by the srq_attr_mask/p_srq_attr parameters. The newly created +* queue pair with its attributes is returned in the srq_query_attr structure. +* PARAMETERS +* h_pd +* [in] Handle to Protection Domain +* srq_context +* [in] A user specified context passed in a asynchronous error callback. +* p_srq_attr +* [in out] Initial attributes with which the srq must be created. +* ph_srq +* [out] Handle to the queue pair newly created. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The queue pair is successfully created with the provided initial +* attributes. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete request. +* IB_INVALID_PD_HANDLE +* pd_handle supplied in the qp_create_attr is invalid +* IB_INVALID_SERVICE_TYPE +* Invalid service type. +* IB_INVALID_MAX_WRS +* Max WRS capacity exceeded +* IB_INVALID_MAX_SGE +* Max Scatter gather element request exceeds HCA capability +* IB_UNSUPPORTED +* Unreliable datagram not supported +* IB_INVALID_PARAMETER +* The parameter p_create_attr is invalid. +* NOTES +* If any of the initial parameters is not valid, the queue pair is not +* created. If the routine call is not successful then the contents of +* qp_query_attr and qp_handle is undefined. +* SEE ALSO +* ci_query_qp, ci_modify_qp, ci_destroy_qp +****** +*/ + + +/****f* Verbs/ci_modify_srq +* NAME +* ci_modify_srq -- Modify attributes of the specified SRQ. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_modify_srq) ( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine is used to modify the srq states or other attributes of the +* srq. On successful completion, the requested state transition is performed +* and the srq is transitioned to the required state. +* PARAMETERS +* h_srq +* [in] Handle to the queue pair whose state is to be modified. +* p_srq_attr +* [in] Initial attributes with which the srq must be created. +* srq_attr_mask +* [in] Flags, specifying valid fields in ib_srq_attr_t structure. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The operation was successful and the QP attributes are modified +* to the requested state. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the requested operation. +* IB_INVALID_QP_HANDLE +* Invalid QP handle was passed. +* IB_UNSUPPORTED +* Requested operation is not supported, for e.g. Atomic operations. +* IB_QP_INVALID_STATE +* Invalid state transition request. Current QP state not in allowable +* state. +* IB_INVALID_PARAMETER +* The parameter p_modify_attr is not valid. +* SEE ALSO +* ci_create_qp, ci_destroy_qp, ci_query_qp +****** +*/ + + +/****f* Verbs/ci_query_srq +* NAME +* ci_query_srq -- Query the current SRQ attributes +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_query_srq) ( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine queries the current attributes for the srq +* corresponding to h_srq. The attributes are returned in p_query_attr. +* Depending on the current state of the srq, some of the fields in the +* attribute structure may not be valid. +* PARAMETERS +* h_srq +* [in] Handle to the srq for which the attributes are being retrieved +* p_srq_attr +* [out] Pointer to the ib_srq_query_t structure where the current +* attributes of the srq is returned. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The values returned in p_qp_attr are valid. +* IB_INVALID_QP_HANDLE +* The h_qp supplied is not a valid handle. +* IB_INVALID_PARAMETER +* Parameter p_qp_attr is not valid. +* SEE ALSO +* ci_create_qp, ci_destroy_qp, ci_modify_srq +***** +*/ + + +/****f* Verbs/ci_destroy_srq +* NAME +* ci_destroy_srq -- Destroy the specified Shared Queue Pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_destroy_srq) ( + IN const ib_srq_handle_t h_srq ); +/* +* DESCRIPTION +* Destroys the associated srq. The srq could have outstanding work requests +* when this call is made. Any outstanding work requests *SHALL NOT* be +* completed after this routine returns. +* PARAMETERS +* h_srq +* [in] Handle to the srq that needs to be destroyed. +* RETURN VALUE +* IB_SUCCESS +* The intend to destroy this queue pair is registered and no further +* work requests will be processed. When no pending callbacks are in +* progress, the destroy_callback function is invoked which marks the +* destruction of the resource. The consumer can be guaranteed that +* no future callbacks will be propagated on behalf of this resource. +* IB_INVALID_QP_HANDLE +* The handle passed is invalid. +* IB_RESOURCE_BUSY +* If the queue pair is a unreliable datagram service type, and +* is still bound to a multicast group. +* NOTES +* This call cannot be called from any of the notification functions invoked +* by the Verbs driver. For e.g. the completion handler or the async error +* callback provided during the ci_open_ca() call. The call will block until +* all references to this adapter object is closed which includes all the +* pending callbacks returning back to the verbs provider driver. +* SEE ALSO +* ci_cquery_qp, ci_destroy_qp, ci_modify_srq +****** +*/ + + +/****f* Verbs/ci_create_qp +* NAME +* ci_create_qp -- Create a Queue Pair for the specified HCA +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_create_qp) ( + IN const ib_pd_handle_t h_pd, + IN const void *qp_context, + IN const ib_qp_create_t *p_create_attr, + OUT ib_qp_attr_t *p_qp_attr, + OUT ib_qp_handle_t *ph_qp, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* A new queue pair is created on the specified HCA. The initial set of +* parameters is provided by the qp_create_attr parameter. The newly created +* queue pair, with its attributes such as the qp number is returned +* in the qp_query_attr structure. +* PARAMETERS +* h_pd +* [in] Handle to Protection Domain +* qp_context +* [in] A user specified context passed in a asynchronous error callback. +* p_create_attr +* [in] Initial attributes with which the qp must be created. +* p_qp_attr +* [out] Attributes of the newly created queue pair. +* ph_qp +* [out] Handle to the queue pair newly created. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The queue pair is successfully created with the provided initial +* attributes. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete request. +* IB_INVALID_PD_HANDLE +* pd_handle supplied in the qp_create_attr is invalid +* IB_INVALID_CQ_HANDLE +* cq_handle supplied for send/receive is invalid. +* IB_INVALID_SERVICE_TYPE +* Invalid service type. +* IB_INVALID_MAX_WRS +* Max WRS capacity exceeded +* IB_INVALID_MAX_SGE +* Max Scatter gather element request exceeds HCA capability +* IB_UNSUPPORTED +* Unreliable datagram not supported +* IB_INVALID_PARAMETER +* The parameter p_create_attr is invalid. +* NOTES +* If any of the initial parameters is not valid, the queue pair is not +* created. If the routine call is not successful then the contents of +* qp_query_attr and qp_handle is undefined. +* SEE ALSO +* ci_create_spl_qp, ci_query_qp, ci_modify_qp, ci_destroy_qp +****** +*/ + +/****f* Verbs/ci_create_spl_qp +* NAME +* ci_create_spl_qp -- Create a special queue pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_create_spl_qp) ( + IN const ib_pd_handle_t h_pd, + IN const uint8_t port_num, + IN const void *qp_context, + IN const ib_qp_create_t *p_create_attr, + OUT ib_qp_attr_t *p_qp_attr, + OUT ib_qp_handle_t *ph_qp ); +/* +* DESCRIPTION +* Create and return a handle to for the indicated service type on the +* specified port. QP service types can be one of SMI, GSI, Raw IPv6 or +* Raw ether type as specified in qp_type_t. +* PARAMETERS +* h_pd +* [in] Handle to the PD on which the special queue pair is to be created. +* port_num +* [in] Port number for which this special queue pair is created. +* qp_context +* [in] User specified context passed during the async error callback +* routine. +* p_create_attr +* [in] Initial set of attributes with which the queue pair is to be +* created. +* p_qp_attr +* [out] QP attributes after the qp is successfully created. +* +* ph_qp +* [out] Handle to the special qp after its creation. +* RETURN VALUE +* IB_SUCCESS +* The special queue pair of the requested service type is created. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy the request. +* IB_NOT_FOUND +* Indicated port guid is not found on this HCA. +* IB_INVALID_CQ_HANDLE +* Invalid cq handle passed to send/receive side. +* IB_INVALID_MAX_WRS +* Max WRS capacity exceeded +* IB_INVALID_MAX_SGE +* Max Scatter gather element request exceeds HCA capability +* IB_RESOURCE_BUSY +* Applicable to SMI/GSI qp's. This return code means that the SMI/GSI +* QP is already allocated. +* IB_INVALID_PD +* Invalid protection domain supplied. +* IB_INVALID_PORT +* Invalid port number supplied. +* IB_UNSUPPORTED +* Raw datagram unsupported. +* IB_INVALID_PARAMETER +* The parameter p_create_attr is not valid. +* NOTES +* This verb is privileged and only available in kernel mode. The User mode +* clients that need access to SMI/GSI qp's is recommended to do this via +* a higher level of abstraction. +* SEE ALSO +* ci_create_qp, ci_query_qp, ci_modify_qp, ci_destroy_qp +****** +*/ + +/****f* Verbs/ci_modify_qp +* NAME +* ci_modify_qp -- Modify attributes of the specified QP. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_modify_qp) ( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t *p_modify_attr, + OUT ib_qp_attr_t *p_qp_attr OPTIONAL, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine is used to modify the qp states or other attributes of the +* QP. On successful completion, the requested state transition is performed +* and the QP is transitioned to the required state. +* PARAMETERS +* h_qp +* [in] Handle to the queue pair whose state is to be modified. +* p_modify_attr +* [in] Specifies what attributes need to be modified in the qp. +* p_qp_attr +* [out] QP attributes after the qp is successfully created. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The operation was successful and the QP attributes are modified +* to the requested state. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the requested operation. +* IB_INVALID_QP_HANDLE +* Invalid QP handle was passed. +* IB_UNSUPPORTED +* Requested operation is not supported, for e.g. Atomic operations. +* IB_QP_INVALID_STATE +* Invalid state transition request. Current QP state not in allowable +* state. +* IB_INVALID_PKEY +* Pkey specified in modify request not valid entry in P_KEY table. Or +* index is out of range. +* IB_INVALID_APM_STATE +* Invalid automatic path migration state specified in the request. +* IB_INVALID_PARAMETER +* The parameter p_modify_attr is not valid. +* NOTES +* Refer to Table 79 in chapter 11, Volume 1 of the InfiniBand Specifications. +* SEE ALSO +* ci_create_qp, ci_create_spl_qp, ci_query_qp +****** +*/ + +/****f* Verbs/ci_query_qp +* NAME +* ci_query_qp -- Query the current QP attributes +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_query_qp) ( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t* const p_qp_attr, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine queries the current attributes for the QP +* corresponding to h_qp. The attributes are returned in p_query_attr. +* Depending on the current state of the QP, some of the fields in the +* attribute structure may not be valid. +* PARAMETERS +* h_qp +* [in] Handle to the QP for which the attributes are being retrieved +* p_qp_attr +* [out] Pointer to the ib_qp_query_t structure where the current +* attributes of the QP is returned. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The values returned in p_qp_attr are valid. +* IB_INVALID_QP_HANDLE +* The h_qp supplied is not a valid handle. +* IB_INVALID_PARAMETER +* Parameter p_qp_attr is not valid. +* SEE ALSO +* ci_create_qp +***** +*/ + +/****f* Verbs/ci_destroy_qp +* NAME +* ci_destroy_qp -- Destroy the specified Queue Pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_destroy_qp) ( + IN const ib_qp_handle_t h_qp, + IN const uint64_t timewait ); +/* +* DESCRIPTION +* Destroys the associated QP. The QP could have outstanding work requests +* when this call is made. Any outstanding work requests *SHALL NOT* be +* completed after this routine returns. +* PARAMETERS +* h_qp +* [in] Handle to the qp that needs to be destroyed. +* timewait +* [in] Time (in microseconds) at which the QP should leave +* the timewait state and can be reused. +* +* RETURN VALUE +* IB_SUCCESS +* The intend to destroy this queue pair is registered and no further +* work requests will be processed. When no pending callbacks are in +* progress, the destroy_callback function is invoked which marks the +* destruction of the resource. The consumer can be guaranteed that +* no future callbacks will be propagated on behalf of this resource. +* IB_INVALID_QP_HANDLE +* The handle passed is invalid. +* IB_RESOURCE_BUSY +* If the queue pair is a unreliable datagram service type, and +* is still bound to a multicast group. +* NOTES +* This call cannot be called from any of the notification functions invoked +* by the Verbs driver. For e.g. the completion handler or the async error +* callback provided during the ci_open_ca() call. The call will block until +* all references to this adapter object is closed which includes all the +* pending callbacks returning back to the verbs provider driver. +* +* If the CQ associated with this QP is still not destroyed, the completions +* on behalf of this QP can still be pulled via the ci_poll_cq() call. Any +* resources allocated by the Channel Interface must be deallocated as part +* of this call. +* SEE ALSO +* ci_create_qp, ci_create_spl_qp +****** +*/ + +/****f* Verbs/ci_create_cq +* NAME +* ci_create_cq -- Create a completion queue (CQ) on the specified HCA. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_create_cq) ( + IN const ib_ca_handle_t h_ca, + IN const void *cq_context, + IN OUT uint32_t* const p_size, + OUT ib_cq_handle_t *ph_cq, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* The consumer must specify the minimum number of entries in the CQ. The +* exact number of entries the Channel Interface created is returned to the +* client. If the requested number of entries is larger than what this +* HCA can support, an error is returned. +* PARAMETERS +* h_ca +* [in] A handle to the open HCA +* cq_context +* [in] The context that is passed during the completion callbacks. +* p_size +* [in out] Points to a variable containing the number of CQ entries +* requested by the consumer. On completion points to the size of the +* CQ that was created by the provider. +* ph_cq +* [out] Handle to the newly created CQ on successful creation. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The operation was successful. +* IB_INVALID_CA_HANDLE +* The h_ca passed is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete request. +* IB_INVALID_CQ_SIZE +* Requested CQ Size is not supported. +* IB_INVALID_PARAMETER +* one of the parameters was NULL. +* NOTES +* The consumer would need a way to retrieve the cq_handle associated with +* context being returned, so it can perform ci_poll_cq() to retrieve +* completion queue entries. The handle as such is not being passed, since +* there is no information in the handle that is visible to the consumer. +* Passing a context directly would help avoid any reverse lookup that the +* consumer would need to perform in order to identify it's own internal +* data-structures needed to process this completion completely. +* SEE ALSO +* ci_destroy_cq, ci_query_cq, ci_resize_cq +****** +*/ + +/****f* Verbs/ci_resize_cq +* NAME +* ci_resize_cq -- Modify the maximum number of entries the CQ could hold. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_resize_cq) ( + IN const ib_cq_handle_t h_cq, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine allows the caller to modify the maximum number of entries +* the CQ could hold. It is possible to resize the CQ while there are +* entries in the CQ, and with outstanding work requests that will generate +* completions. If the entries in the CQ are more than the new size which is +* being created, an error is returned. +* PARAMETERS +* h_cq +* [in] Completion Queue handle +* p_size +* [in out] This parameter indicates the requested size of the CQ. On +* successful completion, the current size allocated is returned in +* this same parameter. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The resize operation was successful. +* IB_INVALID_CQ_HANDLE +* The CQ handle is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete request. +* IB_INVALID_PARAMETER +* one of the parameters was NULL. +* IB_INVALID_CQ_SIZE +* Requested CQ Size is not supported. +* IB_OVERFLOW +* The CQ has more entries than the resize request. The CQ is not +* modified, and old entries still exist. +* NOTES +* If the consumer wishes to resize the CQ smaller than originally created, +* it is recommended to retrieve all entries before performing a CQ resize +* operation. It is left to the verb implementer on resize operations, to +* actually reduce the entries, or leave it as it. The consumer must make no +* assumptions on the successful completion, and should only rely on the +* size returned in p_size. +* SEE ALSO +* ci_create_cq +****** +*/ + +/****f* Verbs/ci_query_cq +* NAME +* ci_query_cq -- Query the number of entries configured for the CQ. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_query_cq) ( + IN const ib_cq_handle_t h_cq, + OUT uint32_t *p_size, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine returns the maximum number of entries this completion +* queue is configured. +* PARAMETERS +* h_cq +* [in] Handle to the completion queue +* p_size +* [out] The number of entries the completion queue is configured to hold +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The call completed successfully, and the returned values are valid +* IB_INVALID_CQ_HANDLE +* The cq_handle passed is invalid. +* IB_INVALID_PARAMETER +* one of the parameters was NULL. +* SEE ALSO +* ci_create_cq, ci_resize_cq +****** +*/ + +/****f* Verbs/ci_destroy_cq +* NAME +* ci_destroy_cq -- Destroy a completion queue. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_destroy_cq) ( + IN const ib_cq_handle_t h_cq ); +/* +* DESCRIPTION +* Destroys a completion queue. If any queue pairs are still bound +* to this CQ, the attempt to destroy will fail, and the CQ and associated +* resources are *NOT* destroyed. +* PARAMETERS +* cq_handle +* [in] Handle to the cq that is to be destroyed. +* RETURN VALUE +* IB_SUCCESS +* The intend to destroy the completion queue is registered successfully. +* The destroy_callback function will be invoked when it is safe and +* guarantees that no more completion callbacks will be invoked for +* this CQ. Any pending CQ notifications are discarded. +* IB_INVALID_CQ_HANDLE +* The CQ handle is invalid. +* IB_RESOURCE_BUSY +* Queue pairs may still be bound to this completion queue. +* IB_INVALID_PARAMETER +* one of the parameters was NULL. +* SEE ALSO +* ci_create_cq +* NOTES +* This call cannot be called from any of the notification functions invoked +* by the Verbs driver. For e.g. the completion handler or the async error +* callback provided during the ci_open_ca() call. The call will block until +* all references to this adapter object is closed which includes all the +* pending callbacks returning back to the verbs provider driver. +****** +*/ + + + +/****f* Verbs/ci_register_mr +* NAME +* ci_register_mr -- Register a memory region with the HCA. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_register_mr) ( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t* const p_mr_create, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ); +/* +* DESCRIPTION +* This routine registers a virtually contiguous region of memory with the +* HCA. All memory regions that need to be used by the HCA must be registered +* prior to use in data transfer operations. On successful completion +* the region handle, lkey are returned. If remote access rights are specified +* then the rkey is also returned. +* PARAMETERS +* h_pd +* [in] Handle to the PD on which memory is being registered +* p_mr_create +* [in] Holds attributes for the region being registered. Look at +* ib_mr_create_t for more details. +* p_lkey +* [out] Local Key Attributes of the registered memory region +* p_rkey +* [out] Remote key of the registered memory region. The verbs provider +* is required to give this in the expected ordering on the wire. When +* rkey's are exchanged between remote nodes, no swapping of this data +* will be performed. +* ph_mr +* [out] Handle to the registered memory region. This handle is used when +* submitting work requests to refer to this region of memory. +* um_call +* [in] Boolean indicating whether the registration originated in user-mode. +* RETURN VALUE +* IB_SUCCESS +* Registration with the adapter was successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* IB_INVALID_PARAMETER +* One of the input pointers was NULL. +* IB_INVALID_PD_HANDLE +* Invalid mr_pdhandle +* IB_INVALID_PERMISSION +* Invalid access rights. +* NOTES +* In addition to registration, the routine also pins memory, so that the +* physical page associated with the virtual address does not get swapped +* out during the time the HCA is attempting to transfer data to this +* address. If the memory is not pinned, this could lead to data-corruption +* and unpredictable behavior by the operating environment. +* +* SEE ALSO +* ci_deregister_mr, ci_query_mr, ci_register_pmr, ci_modify_mr, +* ci_register_smr +****** +*/ + +/****f* Verbs/ci_register_pmr +* NAME +* ci_register_pmr -- Register a physical memory region with the HCA. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_register_pmr) ( + IN const ib_pd_handle_t h_pd, + IN const ib_phys_create_t*const p_pmr_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ); +/* +* DESCRIPTION +* This routine registers an array of physical pages as a single virtually +* contiguous region with the HCA. All memory regions that need to be used by +* the HCA must be registered prior to use in data transfer operations. +* On successful completion the region handle, lkey and rkey used for +* local and remote access authentication are returned. +* PARAMETERS +* h_pd +* [in] Handle to the PD on which memory is being registered +* p_pmr_create +* [in] Holds attributes for the region being registered. +* p_vaddr +* [in/out] On input, references the requested virtual address for the +* start of the physical region. On output, references the actual +* virtual address assigned to the registered region. +* p_lkey +* [out] Local key of the registered memory region +* p_rkey +* [out] Remote key of the registered memory region.The verbs provider +* is required to give this in the expected ordering on the wire. When +* rkey's are exchanged between remote nodes, no swapping of this data +* will be performed. +* ph_mr +* [out] Handle to the registered memory region. This handle is used when +* submitting work requests to refer to this region of memory. +* um_call +* [in] Boolean indicating whether the registration originated in user-mode. +* RETURN VALUE +* IB_SUCCESS +* Registration with the adapter was successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* IB_INVALID_PARAMETER +* Invalid length or address in p_mr_create. Also returned if the page_size +* passed is not one of supported page sizes by the HCA. +* IB_INVALID_PD_HANDLE +* Invalid mr_pdhandle +* IB_INVALID_PERMISSION +* Invalid access rights. +* PORTABILITY +* Kernel Mode only +* NOTES +* Remote and Atomic access settings in ib_access_ctrl_t, requires local +* write access to be enabled, otherwise IB_INVALID_PERMISSION is returned. +* The p_vaddr returned could be different from the vaddr specified in +* p_pmr_create. If the requested virtual addr offset in a page does not +* match, the channel interface is free to pick and assign a pseudo virtual +* address. The address constructed is not a system virtual address, and only +* meaningful to the adapter to which this registration is targeted. +* SEE ALSO +* ci_deregister_mr, ci_query_mr, ci_register_mr, ci_modify_mr, +* ci_register_smr +****** +*/ + +/****f* Verbs/ci_query_mr +* NAME +* ci_query_mr -- Query attributes of a registered memory region +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_query_mr) ( + IN const ib_mr_handle_t h_mr, + OUT ib_mr_attr_t* const p_mr_query ); +/* +* DESCRIPTION +* This routine retrieves the memory region attributed of a +* registered memory region. The memory handle could have been +* obtained via ci_register_mr or ci_register_pmr. +* PARAMETERS +* h_mr +* [in] Memory handle for which the attributes need to be retrieved. +* p_mr_query +* [out] Attributes of the memory region associated with memory handle. +* RETURN VALUE +* IB_SUCCESS +* The routine completed successfully and attributes returned +* are valid. +* IB_INVALID_MR_HANDLE +* The memory handle is not valid. +* IB_INVALID_PARAMETER +* One of the input pointers was NULL. +* NOTES +* Invalid handle checks are a mere signature checks in kernel mode. +* Drivers in kernel are expected to be good corporate citizens. +* In user mode, proper ownership is determined before passing handles +* down to kernel to protect from rogue applications. +* SEE ALSO +* ci_register_mr, ci_register_pmr +****** +*/ + +/****f* Verbs/ci_modify_mr +* NAME +* ci_modify_mr -- Modify some or all parameters of a memory region. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_modify_mr) ( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_modify_mask, + IN const ib_mr_create_t* const p_mr_create OPTIONAL, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ); +/* +* DESCRIPTION +* This routine modifies attributes of the specified memory region +* irrespective of whether the handle was obtained via ci_register_mr +* or ci_register_pmr. This verb conceptually performs a de-registration +* followed by a ci_register_mr. +* PARAMETERS +* h_mr +* [in] Handle to the memory region whose attributes are to be modified. +* mr_modify_mask +* [in] Command specifying which parts of the mem_region is valid. The +* command is specified as a bit mask. +* p_mr_create +* [in] Desired attributes that need to be modified for mem_handle. +* This is an optional parameter which can be NULL if mr_modify_mask +* is set to IB_MR_MOD_PD. +* p_lkey +* [out] The new l_key for this newly registered memory region. +* p_rkey +* [out] The new r_key for this newly registered memory region. +* The verbs provider is required to give this in the expected ordering +* on the wire. When rkey's are exchanged between remote nodes, no +* swapping of this data will be performed. +* h_pd +* [in] This parameter is valid only if the IB_MR_MOD_PD flag is set +* in the mr_modify_req parameter. This field supplies the new +* protection domain to which the modified region should be +* associated with. +* um_call +* [in] Boolean indicating whether the registration originated in user-mode. +* RETURN VALUE +* IB_SUCCESS +* The modify memory region request completed successfully. +* IB_RESOURCE_BUSY +* The memory region has windows bound to it. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the request. +* IB_INVALID_MR_HANDLE +* The memory handle supplied is not a valid memory region handle. +* IB_INVALID_PERMISSION +* Invalid access rights specified. +* IB_INVALID_PARAMETER +* A reference to the lkey or rkey was not provided or the specified +* modify mask is invalid. +* IB_INVALID_SETTING +* The specified memory region attributes are invalid. +* IB_INVALID_PD_HANDLE +* Protection domain handle supplied is not valid. +* NOTES +* Remote and Atomic access settings in ib_access_ctrl_t, requires local +* write access to be enabled. +* TBD: How to handle shared memory region being passed to modify_mem? +* SEE ALSO +* ci_register_mr, ci_register_pmr, ci_register_smr +******* +*/ + +/****f* Verbs/ci_modify_pmr +* NAME +* ci_modify_pmr -- Modify some or all parameters of a memory region. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_modify_pmr) ( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_modify_mask, + IN const ib_phys_create_t* const p_pmr_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL, + IN boolean_t um_call ); +/* +* DESCRIPTION +* This routine modifies attributes of the specified memory region +* irrespective of whether the handle was obtained via ci_register_mr +* or ci_register_pmr. This verb conceptually performs a de-registration +* followed by a ci_register_pmr. +* PARAMETERS +* h_mr +* [in] Handle to the memory region whose attributes are to be modified. +* mr_modify_mask +* [in] Command specifying which parts of the mem_region is valid. The +* command is specified as a bit mask. +* p_pmr_create +* [in] Desired attributes that need to be modified for mem_handle. +* p_vaddr +* [in/out] On input, references the requested virtual address for the +* start of the physical region. On output, references the actual +* virtual address assigned to the registered region. +* p_lkey +* [out] The new l_key for this newly registered physical memory region. +* p_rkey +* [out] The new r_key for this newly registered physical memory region. +* VPD is required to give this in the expected ordering on the wire. When +* rkey's are exchanged between remote nodes, no swapping of this data +* will be performed. +* h_pd +* [in] This parameter is valid only if the IB_MR_MOD_PD flag is set +* in the mr_modify_req parameter. This field supplies the new +* protection domain to which the modified region should be +* associated with. +* um_call +* [in] Boolean indicating whether the registration originated in user-mode. +* RETURN VALUE +* IB_SUCCESS +* The modify memory region request completed successfully. +* IB_RESOURCE_BUSY +* The memory region has windows bound to it. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the request. +* IB_INVALID_MR_HANDLE +* The memory handle supplied is not a valid memory region handle. +* IB_INVALID_PERMISSION +* Invalid access rights specified. +* IB_INVALID_PARAMETER +* A reference to the virtual address, lkey, rkey was not provided or +* the specified modify mask is invalid. +* IB_INVALID_SETTING +* The specified memory region attributes are invalid. +* PORTABILITY +* Kernel Mode only +* NOTES +* Remote and Atomic access settings in ib_access_ctrl_t, requires local +* write access to be enabled. +* SEE ALSO +* ci_register_mr, ci_register_pmr, ci_register_smr +********* +*/ + +/****f* Verbs/ci_register_smr +* NAME +* ci_register_smr -- Register a memory region using same physical pages as +* an existing memory region. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_register_smr) ( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr, + IN boolean_t um_call ); +/* +* DESCRIPTION +* This routine registers a new memory region but shares the same set of +* physical pages associated with memory handle. For user mode applications +* the process *must* be owning memory handle for this call to be successful. +* PARAMETERS +* h_mr +* [in] Handle to memory region whose physical pages are being registered +* by this shared registration. +* h_pd +* [in] Handle to the PD on which memory is being registered +* access_ctrl +* [in] Memory access restrictions on the registered memory. +* p_vaddr +* [in/out] On input, references the requested virtual address for the +* start of the physical region. On output, references the actual +* virtual address assigned to the registered region. +* p_lkey +* [out] L_KEY for this memory region. +* p_rkey +* [out] R_KEY for this memory region. This is valid only when remote +* access is enabled for this region. The verbs provider +* is required to give this in the expected ordering on the wire. When +* rkey's are exchanged between remote nodes, no swapping of this data +* will be performed. +* ph_mr +* [out] Handle to the registered memory region. This handle is used when +* submitting work requests to refer to this region of memory. +* um_call +* [in] Boolean indicating whether the registration originated in user-mode. +* RETURN VALUE +* IB_SUCCESS +* The call is successful and a new region handle returned is valid. +* IB_INVALID_MR_HANDLE +* mr_handle is invalid. +* IB_INVALID_PD_HANDLE +* mr_pdhandle supplied is invalid. +* IB_INVALID_PERMISSION +* Invalid access rights passed in mr_access. +* NOTES +* ISSUE: how to deal with ci_deregister_mr, ci_modify_mr, ci_modify_pmr +* should we treat them as memory windows and fail those if a shared region +* was registered? +* SEE ALSO +* ci_register_mr, ci_register_pmr, ci_modify_mr, ci_modify_pmr +****** +*/ + +/****f* Verbs/ci_deregister_mr +* NAME +* ci_deregister_mr -- Deregister a memory region +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_deregister_mr) ( + IN const ib_mr_handle_t h_mr ); +/* +* DESCRIPTION +* This routine deregisters a memory region from the HCA. The region can +* de-registered only if there are no existing memory windows bound to +* this region, and if no existing shared memory regions were registered +* that refers to the same set of physical pages associated with the memory +* handle. If there are outstanding work requests referring to this memory +* region, then after this call is successful, those work requests will +* complete with WRS_LOCAL_PROTECTION_ERR. +* PARAMETERS +* h_mr +* [in] Memory handle that is being de-registered. +* RETURN VALUE +* IB_SUCCESS +* The memory de-registration was successful +* IB_INVALID_MR_HANDLE +* The memory handle supplied is not a valid memory handle. +* IB_RESOURCE_BUSY +* The memory region has active windows bound. +* NOTES +* SEE ALSO +* ci_register_mr, ci_register_pmr, ci_register_smr +****** +*/ + + +#ifdef CL_KERNEL +/****f* Verbs/ci_alloc_mlnx_fmr +* NAME +* ci_alloc_mlnx_fmr -- Allocate a Mellanox fast memory region with the HCA. +* SYNOPSIS +*/ +typedef ib_api_status_t +(*ci_alloc_mlnx_fmr) ( + IN const ib_pd_handle_t h_pd, + IN mlnx_fmr_create_t const *p_fmr_ctreate, + OUT mlnx_fmr_handle_t* const ph_fmr); +/* +* DESCRIPTION +* //TODO +* PARAMETERS +* h_pd +* [in] Handle to the PD on which fast memory is being registered +* mr_access_flags +* [in] mask of the access rights to the memory region +* p_fmr_attr +* [in] attribute of this fmr +* ph_fmr +* [out] Handle to the fast memory region. This handle is used when +* mapin/unmaping fmr +* +* RETURN VALUE +* IB_SUCCESS +* Registration with the adapter was successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* IB_INVALID_PARAMETER +* One of the input pointers was NULL. +* IB_INVALID_PD_HANDLE +* Invalid pd handle +* IB_INVALID_PERMISSION +* Invalid access rights. +* +* NOTES +* The Alloc operation does not map nor pinned any memory. +* In order to use the FMR the user need to call map +* +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* ci_dealloc_mlnx_fmr, ci_map_phys_mlnx_fmr, ci_unmap_mlnx_fmr +****** +*/ + + +/****f* Verbs/ci_map_phys_mlnx_fmr +* NAME +* ci_map_phys_mlnx_fmr -- Map a Mellanox fast memory region with a +* given page list. +* +* SYNOPSIS +*/ +typedef ib_api_status_t +(*ci_map_phys_mlnx_fmr) ( + IN const mlnx_fmr_handle_t h_fmr, + IN const uint64_t* const paddr_list, + IN const int list_len, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey); +/* +* DESCRIPTION +* //TODO +* +* PARAMETERS +* h_fmr +* [in] Handle to the fast memory region that these pages map to +* page_list +* [in] array of phys address +* list_len +* [in] number of pages in the list +* p_vaddr +* [in/out] On input, references the requested virtual address for the +* start of the FMR. On output, references the actual +* virtual address assigned to the FMR. +* p_lkey +* [out] The local access key associated with this registered memory +* region. +* p_rkey +* [out] A key that may be used by a remote end-point when performing +* RDMA or atomic operations to this registered memory region. +* +* RETURN VALUE +* IB_SUCCESS +* Registration with the adapter was successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* IB_INVALID_PARAMETER +* One of the input pointers was NULL. +* IB_INVALID_PD_HANDLE +* Invalid pd handle +* IB_INVALID_PERMISSION +* Invalid access rights. +* +* NOTES +* The Alloc operation does not map nor pinned any memory. +* In order to use the FMR the user need to call map +* +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* ci_dealloc_mlnx_fmr, ci_alloc_mlnx_fmr, ci_unmap_mlnx_fmr +****** +*/ + + +/****f* Verbs/ci_unmap_mlnx_fmr +* NAME +* ci_unmap_mlnx_fmr -- UnMap a Mellanox fast memory region. +* SYNOPSIS +*/ +typedef ib_api_status_t +(*ci_unmap_mlnx_fmr) ( + IN const mlnx_fmr_handle_t *ph_fmr); +/* +* DESCRIPTION +* //TODO +* +* PARAMETERS +* h_fmr +* [in] Handle to the fast memory region that these pages map to +* +* RETURN VALUE +* IB_SUCCESS +* Registration with the adapter was successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* IB_INVALID_PARAMETER +* One of the input pointers was NULL. +* IB_INVALID_PD_HANDLE +* Invalid pd handle +* IB_INVALID_PERMISSION +* Invalid access rights. +* +* NOTES +* The Alloc operation does not map nor pinned any memory. +* In order to use the FMR the user need to call map +* +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* ci_dealloc_mlnx_fmr, ci_alloc_mlnx_fmr, ci_map_phy_mlnx_fmr +****** +*/ + + +/****f* Verbs/ci_dealloc_mlnx_fmr +* NAME +* ci_dealloc_mlnx_fmr -- Deallocate a Mellanox fast memory region. +* +* SYNOPSIS +*/ +typedef ib_api_status_t +(*ci_dealloc_mlnx_fmr) ( + IN mlnx_fmr_handle_t const h_fmr); +/* +* DESCRIPTION +* //TODO +* +* PARAMETERS +* h_fmr +* [in] Handle to the fast memory region. This handle is used when +* mapin/unmaping fmr +* +* RETURN VALUE +* IB_SUCCESS +* Registration with the adapter was successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* IB_INVALID_PARAMETER +* One of the input pointers was NULL. +* IB_INVALID_PD_HANDLE +* Invalid pd handle +* IB_INVALID_PERMISSION +* Invalid access rights. +* +* NOTES +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* ci_dealloc_mlnx_fmr, ci_map_phys_mlnx_fmr, ci_unmap_mlnx_fmr +****** +*/ +#endif + + +/****f* Verbs/ci_create_mw +* NAME +* ci_create_mw -- Create a memory window entry for later use +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_create_mw) ( + IN const ib_pd_handle_t h_pd, + OUT net32_t* const p_rkey, + OUT ib_mw_handle_t *ph_mw, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine allocates a memory window. This window entry cannot be used +* for remote access unless this window is bound to a memory region +* via the ci_bind_mw call. +* PARAMETERS +* h_pd +* [in] Protection domain handle to use for this memory window +* p_rkey +* [out] Remote access key that can be exchanged with a remote node to +* perform RDMA transactions on this memory window. This R_KEY is still not +* bound to any memory regions, until a successful call to ci_bind_mw. +* VPD is required to give this in the expected ordering on the wire. When +* rkey's are exchanged between remote nodes, no swapping of this data +* will be performed. +* ph_mw +* [out] Handle to the newly created memory window. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The memory window allocation completed successfully. +* IB_INSUFFICIENT_RESOURCES +* Not enough resources to complete the request. +* IB_INVALID_PD_HANDLE +* pd_handle supplied is invalid. +* IB_INVALID_PARAMETER +* One of the pointers was not valid. +* SEE ALSO +* ci_destroy_mw, ci_query_mw, ci_bind_mw +****** +*/ + +/****f* Verbs/ci_query_mw +* NAME +* ci_query_mw -- Query memory window attributes for memory window handle +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_query_mw) ( + IN const ib_mw_handle_t h_mw, + OUT ib_pd_handle_t *ph_pd, + OUT net32_t* const p_rkey, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine retrieves the current R_KEY and protection domain +* handle associated with this mw_handle. +* PARAMETERS +* h_mw +* [in] Memory window handle whose attributes are being retrieved. +* ph_pd +* [out] Protection domain handle associated with this mw_handle +* p_rkey +* [out] Current R_KEY associated with this mw_handle.The verbs provider +* is required to give this in the expected ordering on the wire. When +* rkey's are exchanged between remote nodes, no swapping of this data +* will be performed. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The query operation completed successfully. +* IB_INVALID_MW_HANDLE +* mw_handle supplied is an invalid handle +* IB_INVALID_PARAMETER +* One of the pointers was not valid. +* SEE ALSO +* ci_create_mw, ci_bind_mw +****** +*/ + +/****f* Verbs/ci_bind_mw +* NAME +* ci_bind_mw -- Bind a memory window to a memory region. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_bind_mw) ( + IN const ib_mw_handle_t h_mw, + IN const ib_qp_handle_t h_qp, + IN ib_bind_wr_t* const p_mw_bind, + OUT net32_t* const p_rkey ); +/* +* DESCRIPTION +* This routine posts a request to bind a memory window to a registered +* memory region. If the queue pair was created with selectable signaling, +* once the operation is completed successfully then a completion queue entry +* is generated indicating the bind operation has completed. The IB_POST_FENCE +* option could be specified to cause the requestor to wait until outstanding +* RDMA operations can be completed. +* PARAMETERS +* h_mw +* [in] Handle to memory window that needs to be bound to a memory region. +* h_qp +* [in] Queue Pair to which this bind request is to be posted. +* p_mw_bind +* [in] Input parameters for this bind request, consisting of virtual +* addr range of bind request etc. +* p_rkey +* [out] On successful completion, the new R_KEY is returned. +* VPD is required to give this in the expected ordering on the wire. When +* rkey's are exchanged between remote nodes, no swapping of this data +* will be performed. +* RETURN VALUE +* IB_SUCCESS +* The memory bind operation was posted successfully. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the request. +* No more WQE's to post this request +* No more free WQE's to post this request +* IB_INVALID_MW_HANDLE +* memw_handle supplied is an invalid memory window handle. +* IB_INVALID_PERMISSION +* Invalid access rights specified in request +* IB_INVALID_SERVICE_TYPE +* Invalid service type for this qp_handle. +* IB_INVALID_PARAMETER +* One of the pointers was not valid. +* IB_INVALID_RKEY +* R_KEY specified is invalid for the memory region being bound. +* IB_INVALID_QP_HANDLE +* h_qp supplied was an invalid QP handle. +* NOTES +* - A previously bound memory window can be bound to the same or different +* memory region. +* +* - A bind operation with length of 0, invalidates any previous binding +* and returns an R_KEY in the unbound state. +* SEE ALSO +* ci_create_mw +****** +*/ + +/****f* Verbs/ci_destroy_mw +* NAME +* ci_destroy_mw -- Destroy a memory window. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_destroy_mw) ( + IN const ib_mw_handle_t h_mw ); +/* +* DESCRIPTION +* This routine deallocates a window entry created via a ci_create_mw. +* Once this operation is complete, the channel interface guarantees that +* no future remote accesses will be permitted to this window entry. +* PARAMETERS +* h_mw +* [in] Handle to the memory window that is being destroyed. +* RETURN VALUE +* IB_SUCCESS +* The destroy window request completed successfully. +* IB_INVALID_MW_HANDLE +* memw_handle supplied is invalid. +* NOTES +* Deallocate memory window implicitly means the window is also unbound +* once the call completes successfully. Any future remote access with +* the same R_KEY should fail with protection violation. +* SEE ALSO +* ci_create_mw +****** +*/ + +/****f* Verbs/ci_post_send +* NAME +* ci_post_send -- Post a work request to the send side of a queue pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_post_send) ( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t **pp_failed ); +/* +* DESCRIPTION +* This routine posts a work request to the send side of the queue pair. +* The different types of work request that can be posted are explained in +* the ib_wr_t structure. For exact details on ordering rules please consult +* the Volume 1, of the InfiniBand Specifications. If there is more +* outstanding requests posted that what the queue is configured for, an +* immediate error is returned. +* PARAMETERS +* h_qp +* [in] The queue pair to which this work request is being submitted. +* p_send_wr +* [in] A pointer to the head of the list that must be posted to the +* Send Queue. +* pp_failed +* [out] A pointer to the head of the list that holds the failed WRs. +* If all the entries provided are posted with the CI, then this parameter +* would be set to NULL. +* RETURN VALUE +* Any unsuccessful status indicates the status of the first failed request. +* +* IB_SUCCESS +* All the work requests are completed successfully +* IB_INVALID_QP_HANDLE +* The qp_handle supplied is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the request. +* There are no more work elements in the channel interface to +* process this request, and the total outstanding work request has +* been exceeded. +* IB_INVALID_WR_TYPE +* The work request type was not valid. +* IB_INVALID_QP_STATE +* The queue pair is either in Reset, Init, RTR or Error state. +* IB_INVALID_MAX_SGE +* The work request has too many scatter gather elements than what the +* QP is configured. +* IB_UNSUPPORTED +* Atomics or Reliable datagram request is not supported by this HCA. +* IB_INVALID_ADDR_HANDLE +* Address handle supplied in the work request is invalid. +* NOTES +* Please refer to Table 81 and Table 82 for allowed operation types +* on different types of queue pairs, and the different modifiers +* acceptable for the work request for different QP service types. +* SEE ALSO +* ci_post_recv, ci_poll_cq +****** +*/ + +/****f* Verbs/ci_post_srq_recv +* NAME +* ci_post_srq_recv -- Post a work request to the receive queue of a queue pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_post_srq_recv) ( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_failed ); +/* +* DESCRIPTION +* This routine allows to queue a work request to the receive side of a +* shared queue pair. The work_req holds necessary data to satisfy an incoming +* receive message. If an attempt is made to queue more work requests than +* what is available, an error is returned. +* PARAMETERS +* h_srq +* [in] Handle to the queue pair to which the receive work request is being +* posted. +* p_recv_wr +* [in] Holds the WRs to be posted to the receive queue. +* pp_failed +* [out] If any entry could not be posted with the CI, then this points +* to the first WR that completed unsuccessfully. If all entries are +* posted, then this field is set to NULL on successful exit. +* RETURN VALUE +* Any unsuccessful status indicates the status of the first failed request. +* +* IB_SUCCESS +* The work request was successfully queued to the receive side of the QP. +* IB_INVALID_SRQ_HANDLE +* srq_handle supplied is not valid. +* IB_INSUFFICIENT_RESOURCES +* The qp has exceeded its receive queue depth than what is has been +* configured. +* IB_INVALID_WR_TYPE +* Invalid work request type found in the request. +* SEE ALSO +****** +*/ + + + +/****f* Verbs/ci_post_recv +* NAME +* ci_post_recv -- Post a work request to the receive queue of a queue pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_post_recv) ( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_failed ); +/* +* DESCRIPTION +* This routine allows to queue a work request to the receive side of a +* queue pair. The work_req holds necessary data to satisfy an incoming +* receive message. If an attempt is made to queue more work requests than +* what is available, an error is returned. +* PARAMETERS +* h_qp +* [in] Handle to the queue pair to which the receive work request is being +* posted. +* p_recv_wr +* [in] Holds the WRs to be posted to the receive queue. +* pp_failed +* [out] If any entry could not be posted with the CI, then this points +* to the first WR that completed unsuccessfully. If all entries are +* posted, then this field is set to NULL on successful exit. +* RETURN VALUE +* Any unsuccessful status indicates the status of the first failed request. +* +* IB_SUCCESS +* The work request was successfully queued to the receive side of the QP. +* IB_INVALID_QP_HANDLE +* qp_handle supplied is not valid. +* IB_INSUFFICIENT_RESOURCES +* The qp has exceeded its receive queue depth than what is has been +* configured. +* IB_INVALID_WR_TYPE +* Invalid work request type found in the request. +* IB_INVALID_QP_STATE +* QP was in reset or init state. +* (TBD: there may be an errata that allows posting in init state) +* SEE ALSO +* ci_post_send, ci_poll_cq. +****** +*/ + +/****f* Verbs/ci_peek_cq +* NAME +* ci_peek_cq +* +* DESCRIPTION +* Returns the number of entries currently on the completion queue. +* +* SYNOPSIS +*/ +typedef ib_api_status_t +(*ci_peek_cq) ( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_n_cqes ); +/* +* PARAMETERS +* h_cq +* [in] Handle to the completion queue to peek. +* +* p_n_cqes +* [out] The number of completion entries on the CQ. +* +* RETURN VALUES +* IB_SUCCESS +* The peek operation completed successfully. +* IB_INVALID_CQ_HANDLE +* The completion queue handle was invalid. +* IB_INVALID_PARAMETER +* A reference to the completion queue entry count was not provided. +* IB_UNSUPPORTED +* This operation is not supported by the channel adapter. +* +* NOTES +* The value returned is a snapshot of the number of compleiton queue +* entries curently on the completion queue. Support for this operation +* is optional by a channel adapter vendor. +* +* SEE ALSO +* ci_create_cq, ci_poll_cq, ci_enable_cq_notify, ci_enable_ncomp_cq_notify +*****/ + +/****f* Verbs/ci_poll_cq +* NAME +* ci_poll_cq -- Retrieve a work completion record from a completion queue +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_poll_cq) ( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); +/* +* DESCRIPTION +* This routine retrieves a work completion entry from the specified +* completion queue. The contents of the data returned in a work completion +* is specified in ib_wc_t. +* PARAMETERS +* h_cq +* [in] Handle to the completion queue being polled. +* pp_free_wclist +* [in out] A list of work request structures provided by the consumer +* for the channel interface to return completed Completion Queue +* entries. If not all the entries are consumed, this list holds the +* list of un-utilized completion entries provided back to the consumer. +* pp_done_wclist +* [out] A list of work completions retrieved from the completion queue +* and successfully processed. +* RETURN VALUE +* IB_SUCCESS +* Poll completed successfully and found one or more entries. If on +* completion the pp_free_wclist is empty, then there are potentially more +* entries and the consumer must continue to retrieve entries. +* IB_INVALID_CQ_HANDLE +* The cq_handle supplied is not valid. +* IB_NOT_FOUND +* There were no completion entries found in the specified CQ. +* SEE ALSO +* ci_create_cq, ci_post_send, ci_post_recv, ci_bind_mw +****** +*/ + +/****f* Verbs/ci_enable_cq_notify +* NAME +* ci_enable_cq_notify -- Invoke the Completion handler, on next entry added. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_enable_cq_notify) ( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ); +/* +* DESCRIPTION +* This routine instructs the channel interface to invoke the completion +* handler when the next completion queue entry is added to this CQ. +* Please refer to Volume 1, of the InfiniBand specification for a complete +* description. +* PARAMETERS +* h_cq +* [in] Handle to the CQ on which the notification is being enabled. +* solicited +* [in] A boolean flag indicating whether the request is to generate a +* notification on the next entry or on the next solicited entry +* being added to the completion queue. +* RETURN VALUE +* IB_SUCCESS +* The notification request was registered successfully. +* IB_INVALID_CQ_HANDLE +* cq_handle supplied is not a valid handle. +* NOTES +* The consumer cannot call a request for notification without emptying +* entries from the CQ. i.e if a consumer registers for a notification +* request in the completion callback before pulling entries from the +* CQ via ci_poll_cq, the notification is not generated for completions +* already in the CQ. For e.g. in the example below, if there are no calls +* to ci_poll_cq() after the ci_enable_cq_notify(). For any CQ entries added +* before calling this ci_enable_cq_notify() call, the consumer does not +* get a completion notification callback. In order to comply with the verb +* spec, consumer is supposed to perform a ci_poll_cq() after the +* ci_enable_cq_notify() is made to retrive any entries that might have +* been added to the CQ before the CI registers the notification enable. +* +* while ((ret_val = ci_poll_cq(cq_handle, &free_list, &done_list) +* == FSUCCESS)) +* { +* process entries; +* } +* if (ret_val == IB_NOT_FOUND) +* ci_enable_cq_notify(cq_handle); +* // Need to perform a ci_poll_cq() +* // after the enable. +* SEE ALSO +* ci_create_cq, ci_peek_cq, ci_poll_cq, ci_enable_ncomp_cq_notify +****** +*/ + +/****f* Verbs/ci_enable_ncomp_cq_notify +* NAME +* ci_enable_ncomp_cq_notify -- Invoke the Completion handler when the next +* N completions are added. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_enable_ncomp_cq_notify) ( + IN const ib_cq_handle_t h_cq, + IN const uint32_t n_cqes ); +/* +* DESCRIPTION +* This routine instructs the channel interface to invoke the completion +* handler when the next N completions have been added to this CQ. +* PARAMETERS +* h_cq +* [in] Handle to the CQ on which the notification is being enabled. +* n_cqes +* [in] The number of completion queue entries to be added to the +* completion queue before notifying the client. This value must +* greater than or equal to one and less than or equal to the size +* of the completion queue. +* +* RETURN VALUE +* IB_SUCCESS +* The notification request was registered successfully. +* IB_INVALID_CQ_HANDLE +* cq_handle supplied is not a valid handle. +* IB_INVALID_PARAMETER +* The requested number of completion queue entries was invalid. +* IB_UNSUPPORTED +* This operation is not supported by the channel adapter. +* +* NOTES +* This routine instructs the channel interface to invoke the completion +* handler when the next N completions have been added to this CQ regardless +* of the completion type (solicited or unsolicited). Any CQ entries that +* existed before the rearm is enabled will not result in a call to the +* handler. Support for this operation is optional by a channel adapter +* vendor. +* +* SEE ALSO +* ci_create_cq, ci_peek_cq, ci_poll_cq, ci_enable_cq_notify +****** +*/ + +/****f* Verbs/ci_attach_mcast +* NAME +* ci_attach_mcast -- Attach a queue pair to a multicast group +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_attach_mcast) ( + IN const ib_qp_handle_t h_qp, + IN const ib_gid_t *p_mcast_gid, + IN const ib_net16_t mcast_lid, + OUT ib_mcast_handle_t *ph_mcast, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* DESCRIPTION +* This routine attaches the given qp_handle to a multicast gid as specified +* by mcast_gid parameter. +* PARAMETERS +* h_qp +* [in] Queue pair handle which needs to be added to the multicast group +* on the adapter. +* mcast_lid +* [in] The multicast group LID value. +* p_mcast_gid +* [in] IPv6 address associated with this multicast group. +* ph_mcast +* [out] Multicast handle holding the association of this queue pair +* to the multicast group. +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* RETURN VALUE +* IB_SUCCESS +* The queue pair handle was successfully added to the multicast +* group. +* IB_INVALID_QP_HANDLE +* qp_handle supplied is invalid. +* IB_INVALID_SERVICE_TYPE +* Queue pair handle supplied is not of unreliable datagram type. +* IB_INVALID_GID +* The supplied addr is not a valid multicast ipv6 address. +* IB_INVALID_LID +* The supplied lid is not a valid multicast lid. +* IB_UNSUPPORTED +* Multicast is not supported by this HCA. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete request. +* IB_INVALID_PARAMETER +* One of the parameters was NULL. +* SEE ALSO +* ci_create_qp, ci_detach_mcast +****** +*/ + + +/****f* Verbs/ci_detach_mcast +* NAME +* ci_detach_mcast -- Detach a queue pair from a multicast group +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_detach_mcast) ( + IN const ib_mcast_handle_t h_mcast ); +/* +* DESCRIPTION +* This routine detaches a queue pair from its previously associated multicast +* group. +* PARAMETERS +* h_mcast +* [in] The multicast handle passed back to consumer after the +* ci_mcast_attach call. +* RETURN VALUE +* IB_SUCCESS +* The qp was successfully detached from the multicast group. +* IB_INVALID_MCAST_HANDLE +* mcast_handle supplied is an invalid handle +* IB_INVALID_PARAMETER +* One of the parameters was NULL. +* SEE ALSO +* ci_attach_mcast +****** +*/ + +/****f* Verbs/ci_local_mad +* NAME +* ci_local_mad -- Request a mad to be processed by the local adapter. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(*ci_local_mad) ( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_av_attr_t *p_src_av_attr, + IN const ib_mad_t *p_mad_in, + OUT ib_mad_t *p_mad_out ); +/* +* DESCRIPTION +* This routine is OPTIONAL for the channel interface. This is required +* for adapters which do not have the agents such as Subnet Management +* agent (SMA) Or the GSA in the Verbs Provider driver. +* hardware, for all such adapters the exact queue pair management of +* special queue pairs happen above the channel interface. This routine +* is used to perform local operations, since there is no agent below the +* channel interface. For e.g: If a Subnet Management packet (SMP) to +* set PORT_STATE is received, this reception is processed above the channel +* interface, then this call is done to set the port state on the local +* adapter. On successful return, the response is generated and sent to the +* Subnet Manager. +* PARAMETERS +* h_ca +* [in] A handle to the channel adapter that should process the MAD. +* This must be the same adapter that the MAD was received on. +* port_num +* [in] port number to which this request is directed is to be sent. +* p_mad_in +* [in] pointer to a management datagram (MAD) structure containing +* the command to be processed. +* p_mad_out +* [out] Response packet after processing the command. The storage to this +* must be allocated by the consumer. +* RETURN VALUE +* IB_SUCCESS +* Command processed successfully. +* IB_INVALID_CA_HANDLE +* The HCA handle supplied is not valid. +* IB_INVALID_PORT +* The port number supplied is invalid. +* PORTABILITY +* Kernel Mode only +* NOTES +* This call is provided to aid adapters that don't have a agent functionality +* built in the channel interface. Some adapters do have a local processor +* to process these packets, hence even for local port management, we can +* use the same mechanism we use to configure external nodes by using a +* hop counter = 1 in the MAD packets. If the SMA indicates it has a local +* sma in the ib_ca_attr_t, then the packets are posted to the adapter +* instead of making a private call to the adapter. +* SEE ALSO +* ci_query_ca, ci_ca_attr_t +****** +*/ + + +/****f* Verbs/ci_vendor_call +* NAME +* ci_vendor_call +* +* DESCRIPTION +* Performs a vendor specific CA interface function call. +* +* SYNOPSIS +*/ +typedef ib_api_status_t +(*ci_vendor_call)( + IN const ib_ca_handle_t h_ca, + IN const void* __ptr64* const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN OUT ci_umv_buf_t *p_umv_buf OPTIONAL ); +/* +* PARAMETERS +* h_ca +* [in] A handle to an opened CA. +* +* handle_array +* [in] This parameter references an array containing handles of +* existing CA resources. This array should contain all of the +* handles specified in the vendor specific data provided with this +* call. All handles specified through this array are validated by +* the verbs provider driver to ensure that the number and type of +* handles are correct for the requested operation. +* +* num_handles +* [in] The number of the handles in handle array. This count is +* verified by the access layer. +* +* p_ci_op +* [in] A reference to the vendor specific CA interface data +* structure containing the operation parameters. +* +* p_umv_buf +* [in out] Vendor specific parameter to support user mode IO. +* +* RETURN VALUES +* IB_SUCCESS +* The operation was successful. +* +* IB_INVALID_CA_HANDLE +* The CA handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the vendor specific data was not provided. +* +* IB_INVALID_HANDLE +* A handle specified in the handle array was invalid. +* +* IB_INSUFFICIENT_MEMORY +* There was insufficient memory to perform the operation. +* +* IB_ERROR +* An error occurred while processing the command. Additional +* error information is provided in the p_ci_op status field. +* +* NOTES +* This routine performs a vendor specific CA interface function call. +* The p_ci_op structure provides a means to pass vendor specific data to +* the verbs provider driver. If the vendor specific data contains handles, +* the client should provide the optional handle array that lists all of +* all of the handles specified in the vendor specific data. The handles +* in the handle array are restricted to the following types: ib_ca_handle_t, +* ib_pd_handle_t, ib_cq_handle_t, ib_av_handle_t, ib_qp_handle_t, +* ib_mr_handle_t, or ib_mw_handle_t +* The contents of the handle array are verified by the +* access layer and the verbs provider driver. +* +* SEE ALSO +* ci_open_ca, ci_allocate_pd, ci_create_av, ci_create_cq, +* ci_create_qp, ci_register_mr, ci_register_pmr, +* ci_register_smr, ci_create_mw, ib_ci_op_t +*****/ + + +#define MAX_LIB_NAME 32 + +#ifdef CL_KERNEL + +/****s* Verbs/ci_interface_t +* NAME +* ci_interface_t -- Interface holding Channel Interface API's +* PURPOSE +* The following structure is supplied by a Channel Interface +* providing verbs functionality. +* SOURCE +*/ + +typedef struct _ci_interface +{ + net64_t guid; + + /* + * Device object of the HCA. In Windows, this is a pointer to the PDO + * for the HCA device. + */ + void *p_hca_dev; + + /* + * Vendor ID, Device ID, Device Revision of the HCA + * libname refers to the user mode library to load + * to support direct user mode IO. If vendor does not support one + * then the fields must be initialized to all zero's. + */ + uint32_t vend_id; + uint16_t dev_id; + uint16_t dev_revision; + char libname[MAX_LIB_NAME]; + /* + * Version of the header file this interface export can handle + */ + uint32_t version; + + /* + * HCA Access Verbs + */ + ci_open_ca open_ca; + ci_um_open_ca_t um_open_ca; + ci_query_ca query_ca; + ci_modify_ca modify_ca; + ci_close_ca close_ca; + ci_um_close_ca_t um_close_ca; + + ci_vendor_call vendor_call; + + /* + * Protection Domain + */ + ci_allocate_pd allocate_pd; + ci_deallocate_pd deallocate_pd; + + /* + * Address Vector Management Verbs + */ + + ci_create_av create_av; + ci_query_av query_av; + ci_modify_av modify_av; + ci_destroy_av destroy_av; + + /* + * SRQ Management Verbs + */ + ci_create_srq create_srq; + ci_modify_srq modify_srq; + ci_query_srq query_srq; + ci_destroy_srq destroy_srq; + + /* + * QP Management Verbs + */ + ci_create_qp create_qp; + ci_create_spl_qp create_spl_qp; + ci_modify_qp modify_qp; + ci_query_qp query_qp; + ci_destroy_qp destroy_qp; + + /* + * Completion Queue Management Verbs + */ + ci_create_cq create_cq; + ci_resize_cq resize_cq; + ci_query_cq query_cq; + ci_destroy_cq destroy_cq; + + /* + * Memory Management Verbs + */ + ci_register_mr register_mr; + ci_register_pmr register_pmr; + ci_query_mr query_mr; + ci_modify_mr modify_mr; + ci_modify_pmr modify_pmr; + ci_register_smr register_smr; + ci_deregister_mr deregister_mr; + + /* + * Mellanox Fast Memory Management Verbs + */ + ci_alloc_mlnx_fmr alloc_mlnx_fmr; + ci_map_phys_mlnx_fmr map_phys_mlnx_fmr; + ci_unmap_mlnx_fmr unmap_mlnx_fmr; + ci_dealloc_mlnx_fmr dealloc_mlnx_fmr; + + + /* + * Memory Window Verbs + */ + ci_create_mw create_mw; + ci_query_mw query_mw; + ci_bind_mw bind_mw; + ci_destroy_mw destroy_mw; + + /* + * Work Request Processing Verbs + */ + ci_post_send post_send; + ci_post_recv post_recv; + ci_post_srq_recv post_srq_recv; + + /* + * Completion Processing and + * Completion Notification Request Verbs. + */ + ci_peek_cq peek_cq; /* Optional */ + ci_poll_cq poll_cq; + ci_enable_cq_notify enable_cq_notify; + ci_enable_ncomp_cq_notify enable_ncomp_cq_notify; /* Optional */ + + /* + * Multicast Support Verbs + */ + ci_attach_mcast attach_mcast; + ci_detach_mcast detach_mcast; + + /* + * Local MAD support, for HCA's that do not support + * Agents in the HW. + */ + ci_local_mad local_mad; + +} ci_interface_t; +/********/ + + +/****f* Verbs/ib_register_ca +* NAME +* ib_register_ca -- Inform the IB Access Layer about a new HCA +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t +ib_register_ca ( + IN const ci_interface_t *p_ci ); +/* +* DESCRIPTION +* This routine is called by a HCA kernel mode driver to inform the +* IB Access Layer about a new HCA that is ready for use. It is expected +* that the Access Layer could immediatly turn around and call for services +* even before the call returns back the HCA driver code. The HCA driver +* must initialize all resources and be ready to service any calls before adding +* its services to the IB Access Layer. +* +* PARAMETERS +* p_ci +* [in] Pointer to the ci_interface_t structure that has the function +* vector to support verbs functionality. +* +* RETURN VALUE +* IB_SUCCESS +* The registration is successful. +* +* IB_INVALID_PARAMETER +* A reference to the CI interface structure was not provided. +* +* IB_INSUFFICIENT_RESOURCES +* Insufficient memory to satisfy resource requirements. +* +* IB_DUPLICATE_CA +* HCA GUID is already registered with the IB Access Layer +* +* PORTABILITY +* Kernel Mode only +* +* SEE ALSO +* ib_deregister_ca, ci_interface_t +*******/ + +/****f* Verbs/ib_deregister_ca +* NAME +* ib_deregister_ca -- Inform the IB Access Layer that this HCA is no longer available +* SYNOPSIS +*/ +AL_EXPORT ib_api_status_t +ib_deregister_ca ( + IN const net64_t ca_guid ); +/* +* DESCRIPTION +* This routine is called by the HCA driver when this HCA would no longer be +* available for services. The access layer is expected to return all resources +* back to the HCA driver, and perform a ci_close_ca on this interface. +* +* PARAMETERS +* ca_guid +* [in] GUID of the HCA that is being removed. +* +* RETURN VALUE +* IB_SUCCESS +* The deregistration is successful. +* +* IB_NOT_FOUND +* No HCA with the specified GUID is registered. +* +* IB_BUSY +* The HCA is still in use and cannot be released. +* +* PORTABILITY +* Kernel Mode only +* +* SEE ALSO +* ib_register_ca, ci_interface_t +*******/ +#endif /* CL_KERNEL */ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + +#endif // __IB_CI_H__ diff --git a/branches/Ndi/inc/iba/ib_types.h b/branches/Ndi/inc/iba/ib_types.h new file mode 100644 index 00000000..e84252b1 --- /dev/null +++ b/branches/Ndi/inc/iba/ib_types.h @@ -0,0 +1,11333 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined(__IB_TYPES_H__) +#define __IB_TYPES_H__ + +#include +#include + +#ifdef CL_KERNEL + #define AL_EXPORT + #define AL_API + #define AL_INLINE static inline +#else + #if defined( EXPORT_AL_SYMBOLS ) + #define AL_EXPORT __declspec(dllexport) + #else + #define AL_EXPORT __declspec(dllimport) + #endif + #define AL_API __stdcall + #define AL_INLINE AL_EXPORT inline +#endif /* CL_KERNEL */ + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +/****h* IBA Base/Constants +* NAME +* Constants +* +* DESCRIPTION +* The following constants are used throughout the IBA code base. +* +* Definitions are from the InfiniBand Architecture Specification v1.2 +* +*********/ + +/****d* IBA Base: Constants/MAD_BLOCK_SIZE +* NAME +* MAD_BLOCK_SIZE +* +* DESCRIPTION +* Size of a non-RMPP MAD datagram. +* +* SOURCE +*/ +#define MAD_BLOCK_SIZE 256 +/**********/ + +/****d* IBA Base: Constants/MAD_RMPP_HDR_SIZE +* NAME +* MAD_RMPP_HDR_SIZE +* +* DESCRIPTION +* Size of an RMPP header, including the common MAD header. +* +* SOURCE +*/ +#define MAD_RMPP_HDR_SIZE 36 +/**********/ + +/****d* IBA Base: Constants/MAD_RMPP_DATA_SIZE +* NAME +* MAD_RMPP_DATA_SIZE +* +* DESCRIPTION +* Size of an RMPP transaction data section. +* +* SOURCE +*/ +#define MAD_RMPP_DATA_SIZE (MAD_BLOCK_SIZE - MAD_RMPP_HDR_SIZE) +/**********/ + +/****d* IBA Base: Constants/MAD_BLOCK_GRH_SIZE +* NAME +* MAD_BLOCK_GRH_SIZE +* +* DESCRIPTION +* Size of a MAD datagram, including the GRH. +* +* SOURCE +*/ +#define MAD_BLOCK_GRH_SIZE 296 +/**********/ + +/****d* IBA Base: Constants/IB_LID_PERMISSIVE +* NAME +* IB_LID_PERMISSIVE +* +* DESCRIPTION +* Permissive LID +* +* SOURCE +*/ +#define IB_LID_PERMISSIVE 0xFFFF +/**********/ + +/****d* IBA Base: Constants/IB_DEFAULT_PKEY +* NAME +* IB_DEFAULT_PKEY +* +* DESCRIPTION +* P_Key value for the default partition. +* +* SOURCE +*/ +#define IB_DEFAULT_PKEY 0xFFFF +/**********/ + +/****d* IBA Base: Constants/IB_QP1_WELL_KNOWN_Q_KEY +* NAME +* IB_QP1_WELL_KNOWN_Q_KEY +* +* DESCRIPTION +* Well-known Q_Key for QP1 privileged mode access (15.4.2). +* +* SOURCE +*/ +#define IB_QP1_WELL_KNOWN_Q_KEY CL_NTOH32(0x80010000) +/*********/ + +#define IB_QP0 0 +#define IB_QP1 CL_NTOH32(1) + +#define IB_QP_PRIVILEGED_Q_KEY CL_NTOH32(0x80000000) + +/****d* IBA Base: Constants/IB_LID_UCAST_START +* NAME +* IB_LID_UCAST_START +* +* DESCRIPTION +* Lowest valid unicast LID value. +* +* SOURCE +*/ +#define IB_LID_UCAST_START_HO 0x0001 +#define IB_LID_UCAST_START (CL_HTON16(IB_LID_UCAST_START_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_LID_UCAST_END +* NAME +* IB_LID_UCAST_END +* +* DESCRIPTION +* Highest valid unicast LID value. +* +* SOURCE +*/ +#define IB_LID_UCAST_END_HO 0xBFFF +#define IB_LID_UCAST_END (CL_HTON16(IB_LID_UCAST_END_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_LID_MCAST_START +* NAME +* IB_LID_MCAST_START +* +* DESCRIPTION +* Lowest valid multicast LID value. +* +* SOURCE +*/ +#define IB_LID_MCAST_START_HO 0xC000 +#define IB_LID_MCAST_START (CL_HTON16(IB_LID_MCAST_START_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_LID_MCAST_END +* NAME +* IB_LID_MCAST_END +* +* DESCRIPTION +* Highest valid multicast LID value. +* +* SOURCE +*/ +#define IB_LID_MCAST_END_HO 0xFFFE +#define IB_LID_MCAST_END (CL_HTON16(IB_LID_MCAST_END_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_DEFAULT_SUBNET_PREFIX +* NAME +* IB_DEFAULT_SUBNET_PREFIX +* +* DESCRIPTION +* Default subnet GID prefix. +* +* SOURCE +*/ +#define IB_DEFAULT_SUBNET_PREFIX (CL_HTON64(0xFE80000000000000ULL)) +/**********/ + +/****d* IBA Base: Constants/IB_NODE_NUM_PORTS_MAX +* NAME +* IB_NODE_NUM_PORTS_MAX +* +* DESCRIPTION +* Maximum number of ports in a single node (14.2.5.7). +* SOURCE +*/ +#define IB_NODE_NUM_PORTS_MAX 0xFE +/**********/ + +/****d* IBA Base: Constants/IB_INVALID_PORT_NUM +* NAME +* IB_INVALID_PORT_NUM +* +* DESCRIPTION +* Value used to indicate an invalid port number (14.2.5.10). +* +* SOURCE +*/ +#define IB_INVALID_PORT_NUM 0xFF +/*********/ + +/****d* IBA Base: Constants/IB_SUBNET_PATH_HOPS_MAX +* NAME +* IB_SUBNET_PATH_HOPS_MAX +* +* DESCRIPTION +* Maximum number of directed route switch hops in a subnet (14.2.1.2). +* +* SOURCE +*/ +#define IB_SUBNET_PATH_HOPS_MAX 64 +/*********/ + +/****d* IBA Base: Constants/IB_PKEY_MAX_BLOCKS +* NAME +* IB_PKEY_MAX_BLOCKS +* +* DESCRIPTION +* Maximum number of PKEY blocks (14.2.5.7). +* +* SOURCE +*/ +#define IB_PKEY_MAX_BLOCKS 2048 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_MAX_BLOCK_ID +* NAME +* IB_MCAST_MAX_BLOCK_ID +* +* DESCRIPTION +* Maximum number of Multicast port mask blocks +* +* SOURCE +*/ +#define IB_MCAST_MAX_BLOCK_ID 511 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_BLOCK_ID_MASK_HO +* NAME +* IB_MCAST_BLOCK_ID_MASK_HO +* +* DESCRIPTION +* Mask (host order) to recover the Multicast block ID. +* +* SOURCE +*/ +#define IB_MCAST_BLOCK_ID_MASK_HO 0x000001FF +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_BLOCK_SIZE +* NAME +* IB_MCAST_BLOCK_SIZE +* +* DESCRIPTION +* Number of port mask entries in a multicast forwarding table block. +* +* SOURCE +*/ +#define IB_MCAST_BLOCK_SIZE 32 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_MASK_SIZE +* NAME +* IB_MCAST_MASK_SIZE +* +* DESCRIPTION +* Number of port mask bits in each entry in the multicast forwarding table. +* +* SOURCE +*/ +#define IB_MCAST_MASK_SIZE 16 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_POSITION_MASK_HO +* NAME +* IB_MCAST_POSITION_MASK_HO +* +* DESCRIPTION +* Mask (host order) to recover the multicast block position. +* +* SOURCE +*/ +#define IB_MCAST_POSITION_MASK_HO 0xF0000000 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_POSITION_MAX +* NAME +* IB_MCAST_POSITION_MAX +* +* DESCRIPTION +* Maximum value for the multicast block position. +* +* SOURCE +*/ +#define IB_MCAST_POSITION_MAX 0xF +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_POSITION_SHIFT +* NAME +* IB_MCAST_POSITION_SHIFT +* +* DESCRIPTION +* Shift value to normalize the multicast block position value. +* +* SOURCE +*/ +#define IB_MCAST_POSITION_SHIFT 28 +/*********/ + +/****d* IBA Base: Constants/IB_PKEY_ENTRIES_MAX +* NAME +* IB_PKEY_ENTRIES_MAX +* +* DESCRIPTION +* Maximum number of PKEY entries per port (14.2.5.7). +* +* SOURCE +*/ +#define IB_PKEY_ENTRIES_MAX (IB_PKEY_MAX_BLOCKS * IB_PKEY_BLOCK_SIZE) +/*********/ + +/****d* IBA Base: Constants/IB_PKEY_BASE_MASK +* NAME +* IB_PKEY_BASE_MASK +* +* DESCRIPTION +* Masks for the base P_Key value given a P_Key Entry. +* +* SOURCE +*/ +#define IB_PKEY_BASE_MASK (CL_HTON16(0x7FFF)) +/*********/ + +/****d* IBA Base: Constants/IB_PKEY_TYPE_MASK +* NAME +* IB_PKEY_TYPE_MASK +* +* DESCRIPTION +* Masks for the P_Key membership type given a P_Key Entry. +* +* SOURCE +*/ +#define IB_PKEY_TYPE_MASK (CL_NTOH16(0x8000)) +/*********/ + +/****d* IBA Base: Constants/IB_DEFAULT_PARTIAL_PKEY +* NAME +* IB_DEFAULT_PARTIAL_PKEY +* +* DESCRIPTION +* 0x7FFF in network order +* +* SOURCE +*/ +#define IB_DEFAULT_PARTIAL_PKEY (CL_HTON16(0x7FFF)) +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_SUBN_LID +* NAME +* IB_MCLASS_SUBN_LID +* +* DESCRIPTION +* Subnet Management Class, Subnet Manager LID routed (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_SUBN_LID 0x01 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_SUBN_DIR +* NAME +* IB_MCLASS_SUBN_DIR +* +* DESCRIPTION +* Subnet Management Class, Subnet Manager directed route (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_SUBN_DIR 0x81 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_SUBN_ADM +* NAME +* IB_MCLASS_SUBN_ADM +* +* DESCRIPTION +* Subnet Management Class, Subnet Administration (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_SUBN_ADM 0x03 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_PERF +* NAME +* IB_MCLASS_PERF +* +* DESCRIPTION +* Subnet Management Class, Performance Manager (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_PERF 0x04 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_BM +* NAME +* IB_MCLASS_BM +* +* DESCRIPTION +* Subnet Management Class, Baseboard Manager (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_BM 0x05 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_DEV_MGMT +* NAME +* IB_MCLASS_DEV_MGMT +* +* DESCRIPTION +* Subnet Management Class, Device Management (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_DEV_MGMT 0x06 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_COMM_MGMT +* NAME +* IB_MCLASS_COMM_MGMT +* +* DESCRIPTION +* Subnet Management Class, Communication Management (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_COMM_MGMT 0x07 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_SNMP +* NAME +* IB_MCLASS_SNMP +* +* DESCRIPTION +* Subnet Management Class, SNMP Tunneling (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_SNMP 0x08 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_VENDOR_LOW_RANGE_MIN +* NAME +* IB_MCLASS_VENDOR_LOW_RANGE_MIN +* +* DESCRIPTION +* Subnet Management Class, Vendor Specific Low Range Start +* +* SOURCE +*/ +#define IB_MCLASS_VENDOR_LOW_RANGE_MIN 0x09 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_VENDOR_LOW_RANGE_MAX +* NAME +* IB_MCLASS_VENDOR_LOW_RANGE_MAX +* +* DESCRIPTION +* Subnet Management Class, Vendor Specific Low Range End +* +* SOURCE +*/ +#define IB_MCLASS_VENDOR_LOW_RANGE_MAX 0x0f +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_DEV_ADM +* NAME +* IB_MCLASS_DEV_ADM +* +* DESCRIPTION +* Subnet Management Class, Device Administration +* +* SOURCE +*/ +#define IB_MCLASS_DEV_ADM 0x10 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_BIS +* NAME +* IB_MCLASS_BIS +* +* DESCRIPTION +* Subnet Management Class, BIS +* +* SOURCE +*/ +#define IB_MCLASS_BIS 0x12 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_VENDOR_HIGH_RANGE_MIN +* NAME +* IB_MCLASS_VENDOR_HIGH_RANGE_MIN +* +* DESCRIPTION +* Subnet Management Class, Vendor Specific High Range Start +* +* SOURCE +*/ +#define IB_MCLASS_VENDOR_HIGH_RANGE_MIN 0x30 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_VENDOR_HIGH_RANGE_MAX +* NAME +* IB_MCLASS_VENDOR_HIGH_RANGE_MAX +* +* DESCRIPTION +* Subnet Management Class, Vendor Specific High Range End +* +* SOURCE +*/ +#define IB_MCLASS_VENDOR_HIGH_RANGE_MAX 0x4f +/**********/ + +/****f* IBA Base: Types/ib_class_is_vendor_specific_low +* NAME +* ib_class_is_vendor_specific_low +* +* DESCRIPTION +* Indicates if the Class Code if a vendor specific class from +* the low range +* +* SYNOPSIS +*/ +static inline boolean_t +ib_class_is_vendor_specific_low( + IN const uint8_t class_code ) +{ + return( (class_code >= IB_MCLASS_VENDOR_LOW_RANGE_MIN) && + (class_code <= IB_MCLASS_VENDOR_LOW_RANGE_MAX)) ; +} +/* +* PARAMETERS +* class_code +* [in] The Management Datagram Class Code +* +* RETURN VALUE +* TRUE if the class is in the Low range of Vendor Specific MADs +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* IB_MCLASS_VENDOR_LOW_RANGE_MIN, IB_MCLASS_VENDOR_LOW_RANGE_MAX +*********/ + +/****f* IBA Base: Types/ib_class_is_vendor_specific_high +* NAME +* ib_class_is_vendor_specific_high +* +* DESCRIPTION +* Indicates if the Class Code if a vendor specific class from +* the high range +* +* SYNOPSIS +*/ +static inline boolean_t +ib_class_is_vendor_specific_high( + IN const uint8_t class_code ) +{ + return( (class_code >= IB_MCLASS_VENDOR_HIGH_RANGE_MIN) && + (class_code <= IB_MCLASS_VENDOR_HIGH_RANGE_MAX)) ; +} +/* +* PARAMETERS +* class_code +* [in] The Management Datagram Class Code +* +* RETURN VALUE +* TRUE if the class is in the High range of Vendor Specific MADs +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* IB_MCLASS_VENDOR_HIGH_RANGE_MIN, IB_MCLASS_VENDOR_HIGH_RANGE_MAX +*********/ + + +/****f* IBA Base: Types/ib_class_is_vendor_specific +* NAME +* ib_class_is_vendor_specific +* +* DESCRIPTION +* Indicates if the Class Code if a vendor specific class +* +* SYNOPSIS +*/ +static inline boolean_t +ib_class_is_vendor_specific( + IN const uint8_t class_code ) +{ + return( ib_class_is_vendor_specific_low(class_code) || + ib_class_is_vendor_specific_high(class_code) ); +} +/* +* PARAMETERS +* class_code +* [in] The Management Datagram Class Code +* +* RETURN VALUE +* TRUE if the class is a Vendor Specific MAD +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_class_is_vendor_specific_low, ib_class_is_vendor_specific_high +*********/ + +/****f* IBA Base: Types/ib_class_is_rmpp +* NAME +* ib_class_is_rmpp +* +* DESCRIPTION +* Indicates if the Class Code supports RMPP +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_class_is_rmpp( + IN const uint8_t class_code ) +{ + return( (class_code == IB_MCLASS_SUBN_ADM) || + (class_code == IB_MCLASS_DEV_MGMT) || + (class_code == IB_MCLASS_DEV_ADM) || + (class_code == IB_MCLASS_BIS) || + ib_class_is_vendor_specific_high( class_code ) ); +} +/* +* PARAMETERS +* class_code +* [in] The Management Datagram Class Code +* +* RETURN VALUE +* TRUE if the class supports RMPP +* FALSE otherwise. +* +* NOTES +* +*********/ + +/* + * MAD methods + */ + +/****d* IBA Base: Constants/IB_MAX_METHOD +* NAME +* IB_MAX_METHOD +* +* DESCRIPTION +* Total number of methods available to a class, not including the R-bit. +* +* SOURCE +*/ +#define IB_MAX_METHODS 128 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_RESP_MASK +* NAME +* IB_MAD_METHOD_RESP_MASK +* +* DESCRIPTION +* Response mask to extract 'R' bit from the method field. (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_RESP_MASK 0x80 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_GET +* NAME +* IB_MAD_METHOD_GET +* +* DESCRIPTION +* Get() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_GET 0x01 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_SET +* NAME +* IB_MAD_METHOD_SET +* +* DESCRIPTION +* Set() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_SET 0x02 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_GET_RESP +* NAME +* IB_MAD_METHOD_GET_RESP +* +* DESCRIPTION +* GetResp() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_GET_RESP 0x81 +/**********/ + +#define IB_MAD_METHOD_DELETE 0x15 + +/****d* IBA Base: Constants/IB_MAD_METHOD_GETTABLE +* NAME +* IB_MAD_METHOD_GETTABLE +* +* DESCRIPTION +* SubnAdmGetTable() Method (15.2.2) +* +* SOURCE +*/ +#define IB_MAD_METHOD_GETTABLE 0x12 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_GETTABLE_RESP +* NAME +* IB_MAD_METHOD_GETTABLE_RESP +* +* DESCRIPTION +* SubnAdmGetTableResp() Method (15.2.2) +* +* SOURCE +*/ +#define IB_MAD_METHOD_GETTABLE_RESP 0x92 + +/**********/ + +#define IB_MAD_METHOD_GETTRACETABLE 0x13 +#define IB_MAD_METHOD_GETMULTI 0x14 +#define IB_MAD_METHOD_GETMULTI_RESP 0x94 + + +/****d* IBA Base: Constants/IB_MAD_METHOD_SEND +* NAME +* IB_MAD_METHOD_SEND +* +* DESCRIPTION +* Send() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_SEND 0x03 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_TRAP +* NAME +* IB_MAD_METHOD_TRAP +* +* DESCRIPTION +* Trap() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_TRAP 0x05 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_REPORT +* NAME +* IB_MAD_METHOD_REPORT +* +* DESCRIPTION +* Report() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_REPORT 0x06 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_REPORT_RESP +* NAME +* IB_MAD_METHOD_REPORT_RESP +* +* DESCRIPTION +* ReportResp() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_REPORT_RESP 0x86 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_TRAP_REPRESS +* NAME +* IB_MAD_METHOD_TRAP_REPRESS +* +* DESCRIPTION +* TrapRepress() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_TRAP_REPRESS 0x07 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_BUSY +* NAME +* IB_MAD_STATUS_BUSY +* +* DESCRIPTION +* Temporarily busy, MAD discarded (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_BUSY (CL_HTON16(0x0001)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_REDIRECT +* NAME +* IB_MAD_STATUS_REDIRECT +* +* DESCRIPTION +* QP Redirection required (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_REDIRECT (CL_HTON16(0x0002)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_CLASS_VER +* NAME +* IB_MAD_STATUS_UNSUP_CLASS_VER +* +* DESCRIPTION +* Unsupported class version (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_UNSUP_CLASS_VER (CL_HTON16(0x0004)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD +* NAME +* IB_MAD_STATUS_UNSUP_METHOD +* +* DESCRIPTION +* Unsupported method (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_UNSUP_METHOD (CL_HTON16(0x0008)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD_ATTR +* NAME +* IB_MAD_STATUS_UNSUP_METHOD_ATTR +* +* DESCRIPTION +* Unsupported method/attribute combination (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_UNSUP_METHOD_ATTR (CL_HTON16(0x000C)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_INVALID_FIELD +* NAME +* IB_MAD_STATUS_INVALID_FIELD +* +* DESCRIPTION +* Attribute contains one or more invalid fields (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_INVALID_FIELD (CL_HTON16(0x001C)) +/**********/ + +#define IB_MAD_STATUS_CLASS_MASK (CL_HTON16(0xFF00)) + +#define IB_SA_MAD_STATUS_SUCCESS (CL_HTON16(0x0000)) +#define IB_SA_MAD_STATUS_NO_RESOURCES (CL_HTON16(0x0100)) +#define IB_SA_MAD_STATUS_REQ_INVALID (CL_HTON16(0x0200)) +#define IB_SA_MAD_STATUS_NO_RECORDS (CL_HTON16(0x0300)) +#define IB_SA_MAD_STATUS_TOO_MANY_RECORDS (CL_HTON16(0x0400)) +#define IB_SA_MAD_STATUS_INVALID_GID (CL_HTON16(0x0500)) +#define IB_SA_MAD_STATUS_INSUF_COMPS (CL_HTON16(0x0600)) + +#define IB_DM_MAD_STATUS_NO_IOC_RESP (CL_HTON16(0x0100)) +#define IB_DM_MAD_STATUS_NO_SVC_ENTRIES (CL_HTON16(0x0200)) +#define IB_DM_MAD_STATUS_IOC_FAILURE (CL_HTON16(0x8000)) + +/****d* IBA Base: Constants/IB_MAD_ATTR_CLASS_PORT_INFO +* NAME +* IB_MAD_ATTR_CLASS_PORT_INFO +* +* DESCRIPTION +* ClassPortInfo attribute (13.4.8) +* +* SOURCE +*/ +#define IB_MAD_ATTR_CLASS_PORT_INFO (CL_NTOH16(0x0001)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_NOTICE +* NAME +* IB_MAD_ATTR_NOTICE +* +* DESCRIPTION +* Notice attribute (13.4.8) +* +* SOURCE +*/ +#define IB_MAD_ATTR_NOTICE (CL_NTOH16(0x0002)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_INFORM_INFO +* NAME +* IB_MAD_ATTR_INFORM_INFO +* +* DESCRIPTION +* InformInfo attribute (13.4.8) +* +* SOURCE +*/ +#define IB_MAD_ATTR_INFORM_INFO (CL_NTOH16(0x0003)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_DESC +* NAME +* IB_MAD_ATTR_NODE_DESC +* +* DESCRIPTION +* NodeDescription attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_NODE_DESC (CL_NTOH16(0x0010)) + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_CTRL +* NAME +* IB_MAD_ATTR_PORT_SMPL_CTRL +* +* DESCRIPTION +* NodeDescription attribute (16.1.2) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORT_SMPL_CTRL (CL_NTOH16(0x0010)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_INFO +* NAME +* IB_MAD_ATTR_NODE_INFO +* +* DESCRIPTION +* NodeInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_NODE_INFO (CL_NTOH16(0x0011)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_RSLT +* NAME +* IB_MAD_ATTR_PORT_SMPL_RSLT +* +* DESCRIPTION +* NodeInfo attribute (16.1.2) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORT_SMPL_RSLT (CL_NTOH16(0x0011)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO +* NAME +* IB_MAD_ATTR_SWITCH_INFO +* +* DESCRIPTION +* SwitchInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SWITCH_INFO (CL_NTOH16(0x0012)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_CNTRS +* NAME +* IB_MAD_ATTR_PORT_CNTRS +* +* DESCRIPTION +* SwitchInfo attribute (16.1.2) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORT_CNTRS (CL_NTOH16(0x0012)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_GUID_INFO +* NAME +* IB_MAD_ATTR_GUID_INFO +* +* DESCRIPTION +* GUIDInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_GUID_INFO (CL_NTOH16(0x0014)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_INFO +* NAME +* IB_MAD_ATTR_PORT_INFO +* +* DESCRIPTION +* PortInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORT_INFO (CL_NTOH16(0x0015)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_P_KEY_TABLE +* NAME +* IB_MAD_ATTR_P_KEY_TABLE +* +* DESCRIPTION +* PartitionTable attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_P_KEY_TABLE (CL_NTOH16(0x0016)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SLVL_TABLE +* NAME +* IB_MAD_ATTR_SLVL_TABLE +* +* DESCRIPTION +* SL VL Mapping Table attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SLVL_TABLE (CL_NTOH16(0x0017)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_VL_ARBITRATION +* NAME +* IB_MAD_ATTR_VL_ARBITRATION +* +* DESCRIPTION +* VL Arbitration Table attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_VL_ARBITRATION (CL_NTOH16(0x0018)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_LIN_FWD_TBL +* NAME +* IB_MAD_ATTR_LIN_FWD_TBL +* +* DESCRIPTION +* Switch linear forwarding table +* +* SOURCE +*/ +#define IB_MAD_ATTR_LIN_FWD_TBL (CL_NTOH16(0x0019)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_RND_FWD_TBL +* NAME +* IB_MAD_ATTR_RND_FWD_TBL +* +* DESCRIPTION +* Switch random forwarding table +* +* SOURCE +*/ +#define IB_MAD_ATTR_RND_FWD_TBL (CL_NTOH16(0x001A)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_MCAST_FWD_TBL +* NAME +* IB_MAD_ATTR_MCAST_FWD_TBL +* +* DESCRIPTION +* Switch multicast forwarding table +* +* SOURCE +*/ +#define IB_MAD_ATTR_MCAST_FWD_TBL (CL_NTOH16(0x001B)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_RECORD +* NAME +* IB_MAD_ATTR_NODE_RECORD +* +* DESCRIPTION +* NodeRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_NODE_RECORD (CL_NTOH16(0x0011)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORTINFO_RECORD +* NAME +* IB_MAD_ATTR_PORTINFO_RECORD +* +* DESCRIPTION +* PortInfoRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORTINFO_RECORD (CL_NTOH16(0x0012)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO_RECORD +* NAME +* IB_MAD_ATTR_SWITCH_INFO_RECORD +* +* DESCRIPTION +* SwitchInfoRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SWITCH_INFO_RECORD (CL_NTOH16(0x0014)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_LINK_RECORD +* NAME +* IB_MAD_ATTR_LINK_RECORD +* +* DESCRIPTION +* LinkRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_LINK_RECORD (CL_NTOH16(0x0020)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SM_INFO +* NAME +* IB_MAD_ATTR_SM_INFO +* +* DESCRIPTION +* SMInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SM_INFO (CL_NTOH16(0x0020)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SMINFO_RECORD +* NAME +* IB_MAD_ATTR_SMINFO_RECORD +* +* DESCRIPTION +* SMInfoRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SMINFO_RECORD (CL_NTOH16(0x0018)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_GUIDINFO_RECORD +* NAME +* IB_MAD_ATTR_GUIDINFO_RECORD +* +* DESCRIPTION +* GuidInfoRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_GUIDINFO_RECORD (CL_NTOH16(0x0030)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_VENDOR_DIAG +* NAME +* IB_MAD_ATTR_VENDOR_DIAG +* +* DESCRIPTION +* VendorDiag attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_VENDOR_DIAG (CL_NTOH16(0x0030)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_LED_INFO +* NAME +* IB_MAD_ATTR_LED_INFO +* +* DESCRIPTION +* LedInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_LED_INFO (CL_NTOH16(0x0031)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SERVICE_RECORD +* NAME +* IB_MAD_ATTR_SERVICE_RECORD +* +* DESCRIPTION +* ServiceRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SERVICE_RECORD (CL_NTOH16(0x0031)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_LFT_RECORD +* NAME +* IB_MAD_ATTR_LFT_RECORD +* +* DESCRIPTION +* LinearForwardingTableRecord attribute (15.2.5.6) +* +* SOURCE +*/ +#define IB_MAD_ATTR_LFT_RECORD (CL_NTOH16(0x0015)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_MFT_RECORD +* NAME +* IB_MAD_ATTR_MFT_RECORD +* +* DESCRIPTION +* MulticastForwardingTableRecord attribute (15.2.5.8) +* +* SOURCE +*/ +#define IB_MAD_ATTR_MFT_RECORD (CL_NTOH16(0x0017)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PKEYTBL_RECORD +* NAME +* IB_MAD_ATTR_PKEYTBL_RECORD +* +* DESCRIPTION +* PKEY Table Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PKEY_TBL_RECORD (CL_NTOH16(0x0033)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PATH_RECORD +* NAME +* IB_MAD_ATTR_PATH_RECORD +* +* DESCRIPTION +* PathRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PATH_RECORD (CL_NTOH16(0x0035)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_VLARB_RECORD +* NAME +* IB_MAD_ATTR_VLARB_RECORD +* +* DESCRIPTION +* VL Arbitration Table Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_VLARB_RECORD (CL_NTOH16(0x0036)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SLVL_RECORD +* NAME +* IB_MAD_ATTR_SLVL_RECORD +* +* DESCRIPTION +* SLtoVL Mapping Table Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SLVL_RECORD (CL_NTOH16(0x0013)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_MCMEMBER_RECORD +* NAME +* IB_MAD_ATTR_MCMEMBER_RECORD +* +* DESCRIPTION +* MCMemberRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_MCMEMBER_RECORD (CL_NTOH16(0x0038)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_TRACE_RECORD +* NAME +* IB_MAD_ATTR_TRACE_RECORD +* +* DESCRIPTION +* TraceRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_TRACE_RECORD (CL_NTOH16(0x0039)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_MULTIPATH_RECORD +* NAME +* IB_MAD_ATTR_MULTIPATH_RECORD +* +* DESCRIPTION +* MultiPathRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_MULTIPATH_RECORD (CL_NTOH16(0x003A)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SVC_ASSOCIATION_RECORD +* NAME +* IB_MAD_ATTR_SVC_ASSOCIATION_RECORD +* +* DESCRIPTION +* Service Association Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SVC_ASSOCIATION_RECORD (CL_NTOH16(0x003B)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_INFORM_INFO_RECORD +* NAME +* IB_MAD_ATTR_INFORM_INFO_RECORD +* +* DESCRIPTION +* InformInfo Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_INFORM_INFO_RECORD (CL_NTOH16(0x00F3)) + +/****d* IBA Base: Constants/IB_MAD_ATTR_IO_UNIT_INFO +* NAME +* IB_MAD_ATTR_IO_UNIT_INFO +* +* DESCRIPTION +* IOUnitInfo attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_IO_UNIT_INFO (CL_NTOH16(0x0010)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_IO_CONTROLLER_PROFILE +* NAME +* IB_MAD_ATTR_IO_CONTROLLER_PROFILE +* +* DESCRIPTION +* IOControllerProfile attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_IO_CONTROLLER_PROFILE (CL_NTOH16(0x0011)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SERVICE_ENTRIES +* NAME +* IB_MAD_ATTR_SERVICE_ENTRIES +* +* DESCRIPTION +* ServiceEntries attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SERVICE_ENTRIES (CL_NTOH16(0x0012)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT +* NAME +* IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT +* +* DESCRIPTION +* DiagnosticTimeout attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT (CL_NTOH16(0x0020)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PREPARE_TO_TEST +* NAME +* IB_MAD_ATTR_PREPARE_TO_TEST +* +* DESCRIPTION +* PrepareToTest attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PREPARE_TO_TEST (CL_NTOH16(0x0021)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_TEST_DEVICE_ONCE +* NAME +* IB_MAD_ATTR_TEST_DEVICE_ONCE +* +* DESCRIPTION +* TestDeviceOnce attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_TEST_DEVICE_ONCE (CL_NTOH16(0x0022)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_TEST_DEVICE_LOOP +* NAME +* IB_MAD_ATTR_TEST_DEVICE_LOOP +* +* DESCRIPTION +* TestDeviceLoop attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_TEST_DEVICE_LOOP (CL_NTOH16(0x0023)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_DIAG_CODE +* NAME +* IB_MAD_ATTR_DIAG_CODE +* +* DESCRIPTION +* DiagCode attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_DIAG_CODE (CL_NTOH16(0x0024)) +/**********/ + +/****d* IBA Base: Constants/IB_NODE_TYPE_CA +* NAME +* IB_NODE_TYPE_CA +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NODE_TYPE_CA 0x01 +/**********/ + +/****d* IBA Base: Constants/IB_NODE_TYPE_SWITCH +* NAME +* IB_NODE_TYPE_SWITCH +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NODE_TYPE_SWITCH 0x02 +/**********/ + +/****d* IBA Base: Constants/IB_NODE_TYPE_ROUTER +* NAME +* IB_NODE_TYPE_ROUTER +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NODE_TYPE_ROUTER 0x03 +/**********/ + +/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_CA +* NAME +* IB_NOTICE_NODE_TYPE_CA +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NOTICE_NODE_TYPE_CA (CL_NTOH32(0x000001)) +/**********/ + +/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_SWITCH +* NAME +* IB_NOTICE_NODE_TYPE_SWITCH +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NOTICE_NODE_TYPE_SWITCH (CL_NTOH32(0x000002)) +/**********/ + +/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_ROUTER +* NAME +* IB_NOTICE_NODE_TYPE_ROUTER +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NOTICE_NODE_TYPE_ROUTER (CL_NTOH32(0x000003)) +/**********/ + +/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_SUBN_MGMT +* NAME +* IB_NOTICE_NODE_TYPE_SUBN_MGMT +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2). +* Note that this value is not defined for the NodeType field +* of the NodeInfo attribute (14.2.5.3). +* +* SOURCE +*/ +#define IB_NOTICE_NODE_TYPE_SUBN_MGMT (CL_NTOH32(0x000004)) +/**********/ + +/****d* IBA Base: Constants/IB_MTU_LEN_TYPE +* NAME +* IB_MTU_LEN_TYPE +* +* DESCRIPTION +* Encoded path MTU. +* 1: 256 +* 2: 512 +* 3: 1024 +* 4: 2048 +* 5: 4096 +* others: reserved +* +* SOURCE +*/ +#define IB_MTU_LEN_256 1 +#define IB_MTU_LEN_512 2 +#define IB_MTU_LEN_1024 3 +#define IB_MTU_LEN_2048 4 +#define IB_MTU_LEN_4096 5 + +#define IB_MIN_MTU IB_MTU_LEN_256 +#define IB_MAX_MTU IB_MTU_LEN_4096 + +/**********/ + +/****d* IBA Base: Constants/IB_PATH_SELECTOR_TYPE +* NAME +* IB_PATH_SELECTOR_TYPE +* +* DESCRIPTION +* Path selector. +* 0: greater than specified +* 1: less than specified +* 2: exactly the specified +* 3: largest available +* +* SOURCE +*/ +#define IB_PATH_SELECTOR_GREATER_THAN 0 +#define IB_PATH_SELECTOR_LESS_THAN 1 +#define IB_PATH_SELECTOR_EXACTLY 2 +#define IB_PATH_SELECTOR_LARGEST 3 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_NOTACTIVE +* NAME +* IB_SMINFO_STATE_NOTACTIVE +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_NOTACTIVE 0 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_DISCOVERING +* NAME +* IB_SMINFO_STATE_DISCOVERING +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_DISCOVERING 1 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_STANDBY +* NAME +* IB_SMINFO_STATE_STANDBY +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_STANDBY 2 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_MASTER +* NAME +* IB_SMINFO_STATE_MASTER +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_MASTER 3 +/**********/ + +/****d* IBA Base: Constants/IB_PATH_REC_SELECTOR_MASK +* NAME +* IB_PATH_REC_SELECTOR_MASK +* +* DESCRIPTION +* Mask for the selector field for path record MTU, rate, +* and packet lifetime. +* +* SOURCE +*/ +#define IB_PATH_REC_SELECTOR_MASK 0xC0 + +/****d* IBA Base: Constants/IB_MULTIPATH_REC_SELECTOR_MASK +* NAME +* IB_MULTIPATH_REC_SELECTOR_MASK +* +* DESCRIPTION +* Mask for the selector field for multipath record MTU, rate, +* and packet lifetime. +* +* SOURCE +*/ +#define IB_MULTIPATH_REC_SELECTOR_MASK 0xC0 +/**********/ + +/****d* IBA Base: Constants/IB_PATH_REC_BASE_MASK +* NAME +* IB_PATH_REC_BASE_MASK +* +* DESCRIPTION +* Mask for the base value field for path record MTU, rate, +* and packet lifetime. +* +* SOURCE +*/ +#define IB_PATH_REC_BASE_MASK 0x3F +/**********/ + +/****d* IBA Base: Constants/IB_MULTIPATH_REC_BASE_MASK +* NAME +* IB_MULTIPATH_REC_BASE_MASK +* +* DESCRIPTION +* Mask for the base value field for multipath record MTU, rate, +* and packet lifetime. +* +* SOURCE +*/ +#define IB_MULTIPATH_REC_BASE_MASK 0x3F +/**********/ + +/****h* IBA Base/Type Definitions +* NAME +* Type Definitions +* +* DESCRIPTION +* Definitions are from the InfiniBand Architecture Specification v1.2 +* +*********/ + +/****d* IBA Base: Types/ib_net16_t +* NAME +* ib_net16_t +* +* DESCRIPTION +* Defines the network ordered type for 16-bit values. +* +* SOURCE +*/ +typedef uint16_t ib_net16_t; +/**********/ + +/****d* IBA Base: Types/ib_net32_t +* NAME +* ib_net32_t +* +* DESCRIPTION +* Defines the network ordered type for 32-bit values. +* +* SOURCE +*/ +typedef uint32_t ib_net32_t; +/**********/ + +/****d* IBA Base: Types/ib_net64_t +* NAME +* ib_net64_t +* +* DESCRIPTION +* Defines the network ordered type for 64-bit values. +* +* SOURCE +*/ +typedef uint64_t ib_net64_t; +/**********/ + +/****d* IBA Base: Types/ib_gid_prefix_t +* NAME +* ib_gid_prefix_t +* +* DESCRIPTION +* +* SOURCE +*/ +typedef ib_net64_t ib_gid_prefix_t; +/**********/ + +/****d* IBA Base: Constants/ib_link_states_t +* NAME +* ib_link_states_t +* +* DESCRIPTION +* Defines the link states of a port. +* +* SOURCE +*/ +#define IB_LINK_NO_CHANGE 0 +#define IB_LINK_DOWN 1 +#define IB_LINK_INIT 2 +#define IB_LINK_ARMED 3 +#define IB_LINK_ACTIVE 4 +#define IB_LINK_ACT_DEFER 5 +/**********/ + +static const char* const __ib_node_type_str[] = +{ + "UNKNOWN", + "Channel Adapter", + "Switch", + "Router", + "Subnet Management" +}; + +/****f* IBA Base: Types/ib_get_node_type_str +* NAME +* ib_get_node_type_str +* +* DESCRIPTION +* Returns a string for the specified node type. +* +* SYNOPSIS +*/ +AL_INLINE const char* AL_API +ib_get_node_type_str( + IN uint8_t node_type ) +{ + if( node_type >= IB_NODE_TYPE_ROUTER ) + node_type = 0; + return( __ib_node_type_str[node_type] ); +} +/* +* PARAMETERS +* node_type +* [in] Encoded node type as returned in the NodeInfo attribute. + +* RETURN VALUES +* Pointer to the node type string. +* +* NOTES +* +* SEE ALSO +* ib_node_info_t +*********/ + +static const char* const __ib_port_state_str[] = +{ + "No State Change (NOP)", + "DOWN", + "INIT", + "ARMED", + "ACTIVE", + "ACTDEFER", + "UNKNOWN" +}; + +/****f* IBA Base: Types/ib_get_port_state_str +* NAME +* ib_get_port_state_str +* +* DESCRIPTION +* Returns a string for the specified port state. +* +* SYNOPSIS +*/ +AL_INLINE const char* AL_API +ib_get_port_state_str( + IN uint8_t port_state ) +{ + if( port_state > IB_LINK_ACTIVE ) + port_state = IB_LINK_ACTIVE + 1; + return( __ib_port_state_str[port_state] ); +} +/* +* PARAMETERS +* port_state +* [in] Encoded port state as returned in the PortInfo attribute. + +* RETURN VALUES +* Pointer to the port state string. +* +* NOTES +* +* SEE ALSO +* ib_port_info_t +*********/ + +/****f* IBA Base: Types/ib_get_port_state_from_str +* NAME +* ib_get_port_state_from_str +* +* DESCRIPTION +* Returns a string for the specified port state. +* +* SYNOPSIS +*/ +AL_INLINE const uint8_t AL_API +ib_get_port_state_from_str( + IN char* p_port_state_str ) +{ + if( !strncmp(p_port_state_str,"No State Change (NOP)",12) ) + return(0); + else if( !strncmp(p_port_state_str, "DOWN",4) ) + return(1); + else if( !strncmp(p_port_state_str, "INIT", 4) ) + return(2); + else if( !strncmp(p_port_state_str,"ARMED", 5) ) + return(3); + else if( !strncmp(p_port_state_str, "ACTIVE", 6) ) + return(4); + else if( !strncmp(p_port_state_str, "ACTDEFER", 8) ) + return(5); + return(6); +} +/* +* PARAMETERS +* p_port_state_str +* [in] A string matching one returned by ib_get_port_state_str +* +* RETURN VALUES +* The appropriate code. +* +* NOTES +* +* SEE ALSO +* ib_port_info_t +*********/ + +/****d* IBA Base: Constants/Join States +* NAME +* Join States +* +* DESCRIPTION +* Defines the join state flags for multicast group management. +* +* SOURCE +*/ +#define IB_JOIN_STATE_FULL 1 +#define IB_JOIN_STATE_NON 2 +#define IB_JOIN_STATE_SEND_ONLY 4 +/**********/ + +/****f* IBA Base: Types/ib_pkey_get_base +* NAME +* ib_pkey_get_base +* +* DESCRIPTION +* Returns the base P_Key value with the membership bit stripped. +* +* SYNOPSIS +*/ +AL_INLINE ib_net16_t AL_API +ib_pkey_get_base( + IN const ib_net16_t pkey ) +{ + return( (ib_net16_t)(pkey & IB_PKEY_BASE_MASK) ); +} +/* +* PARAMETERS +* pkey +* [in] P_Key value +* +* RETURN VALUE +* Returns the base P_Key value with the membership bit stripped. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_pkey_is_full_member +* NAME +* ib_pkey_is_full_member +* +* DESCRIPTION +* Indicates if the port is a full member of the parition. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_pkey_is_full_member( + IN const ib_net16_t pkey ) +{ + return( (pkey & IB_PKEY_TYPE_MASK) == IB_PKEY_TYPE_MASK ); +} +/* +* PARAMETERS +* pkey +* [in] P_Key value +* +* RETURN VALUE +* TRUE if the port is a full member of the partition. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_pkey_get_base, ib_net16_t +*********/ + +/****f* IBA Base: Types/ib_pkey_is_invalid +* NAME +* ib_pkey_is_invalid +* +* DESCRIPTION +* Returns TRUE if the given P_Key is an invalid P_Key +* C10-116: the CI shall regard a P_Key as invalid if its low-order +* 15 bits are all zero... +* +* SYNOPSIS +*/ +static inline boolean_t +ib_pkey_is_invalid( + IN const ib_net16_t pkey ) +{ + if (ib_pkey_get_base(pkey) == 0x0000) + return TRUE; + + return FALSE; +} +/* +* PARAMETERS +* pkey +* [in] P_Key value +* +* RETURN VALUE +* Returns the base P_Key value with the membership bit stripped. +* +* NOTES +* +* SEE ALSO +*********/ + +/****d* IBA Base: Types/ib_gid_t +* NAME +* ib_gid_t +* +* DESCRIPTION +* +* SYNOPSIS +*/ +#include +typedef union _ib_gid +{ + uint8_t raw[16]; + struct _ib_gid_unicast + { + ib_gid_prefix_t prefix; + ib_net64_t interface_id; + + } PACK_SUFFIX unicast; + + struct _ib_gid_multicast + { + uint8_t header[2]; + uint8_t raw_group_id[14]; + + } PACK_SUFFIX multicast; + +} PACK_SUFFIX ib_gid_t; +#include +/* +* FIELDS +* raw +* GID represented as an unformated byte array. +* +* unicast +* Typical unicast representation with subnet prefix and +* port GUID. +* +* multicast +* Representation for multicast use. +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_gid_is_multicast +* NAME +* ib_gid_is_multicast +* +* DESCRIPTION +* Returns a boolean indicating whether a GID is a multicast GID. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_gid_is_multicast( + IN const ib_gid_t* p_gid ) +{ + return( p_gid->raw[0] == 0xFF ); +} + +/****f* IBA Base: Types/ib_gid_get_scope +* NAME +* ib_gid_get_scope +* +* DESCRIPTION +* Returns scope of (assumed) multicast GID. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_mgid_get_scope( + IN const ib_gid_t* p_gid ) +{ + return( p_gid->raw[1] & 0x0F ); +} + +/****f* IBA Base: Types/ib_gid_set_scope +* NAME +* ib_gid_set_scope +* +* DESCRIPTION +* Sets scope of (assumed) multicast GID. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_mgid_set_scope( + IN ib_gid_t* const p_gid, + IN const uint8_t scope ) +{ + p_gid->raw[1] &= 0xF0; + p_gid->raw[1] |= scope & 0x0F; +} + +/****f* IBA Base: Types/ib_gid_set_default +* NAME +* ib_gid_set_default +* +* DESCRIPTION +* Sets a GID to the default value. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_gid_set_default( + IN ib_gid_t* const p_gid, + IN const ib_net64_t interface_id ) +{ + p_gid->unicast.prefix = IB_DEFAULT_SUBNET_PREFIX; + p_gid->unicast.interface_id = interface_id; +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* interface_id +* [in] Manufacturer assigned EUI64 value of a port. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_gid_get_subnet_prefix +* NAME +* ib_gid_get_subnet_prefix +* +* DESCRIPTION +* Gets the subnet prefix from a GID. +* +* SYNOPSIS +*/ +AL_INLINE ib_net64_t AL_API +ib_gid_get_subnet_prefix( + IN const ib_gid_t* const p_gid ) +{ + return( p_gid->unicast.prefix ); +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* RETURN VALUES +* 64-bit subnet prefix value. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_gid_is_link_local +* NAME +* ib_gid_is_link_local +* +* DESCRIPTION +* Returns TRUE if the unicast GID scoping indicates link local, +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t +ib_gid_is_link_local( + IN const ib_gid_t* const p_gid ) +{ + return( ib_gid_get_subnet_prefix( p_gid ) == IB_DEFAULT_SUBNET_PREFIX ); +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* RETURN VALUES +* Returns TRUE if the unicast GID scoping indicates link local, +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_gid_is_site_local +* NAME +* ib_gid_is_site_local +* +* DESCRIPTION +* Returns TRUE if the unicast GID scoping indicates site local, +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t +ib_gid_is_site_local( + IN const ib_gid_t* const p_gid ) +{ + return( ( ib_gid_get_subnet_prefix( p_gid ) & + CL_HTON64( 0xFFFFFFFFFFFF0000ULL ) ) == CL_HTON64( 0xFEC0000000000000ULL ) ); +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* RETURN VALUES +* Returns TRUE if the unicast GID scoping indicates site local, +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_gid_get_guid +* NAME +* ib_gid_get_guid +* +* DESCRIPTION +* Gets the guid from a GID. +* +* SYNOPSIS +*/ +AL_INLINE ib_net64_t AL_API +ib_gid_get_guid( + IN const ib_gid_t* const p_gid ) +{ + return( p_gid->unicast.interface_id ); +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* RETURN VALUES +* 64-bit GUID value. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****s* IBA Base: Types/ib_field32_t +* NAME +* ib_field32_t +* +* DESCRIPTION +* Represents a 32-bit field, and allows access as a 32-bit network byte +* ordered or a 4-byte array. +* +* SYNOPSIS +*/ +#include +typedef union _ib_field32_t +{ + net32_t val; + uint8_t bytes[4]; + +} PACK_SUFFIX ib_field32_t; +#include +/* +* FIELDS +* val +* Full field value. +* +* bytes +* Byte array representing the field. The byte array provides identical +* access independently from CPU byte-ordering. +*********/ + +/****s* IBA Base: Types/ib_path_rec_t +* NAME +* ib_path_rec_t +* +* DESCRIPTION +* Path records encapsulate the properties of a given +* route between two end-points on a subnet. +* +* SYNOPSIS +*/ +#include +typedef __declspec(align(8)) struct _ib_path_rec +{ + uint64_t resv0; + ib_gid_t dgid; + ib_gid_t sgid; + ib_net16_t dlid; + ib_net16_t slid; + ib_field32_t hop_flow_raw; + uint8_t tclass; + uint8_t num_path; + ib_net16_t pkey; + ib_net16_t sl; + uint8_t mtu; + uint8_t rate; + uint8_t pkt_life; + uint8_t preference; + uint16_t resv1; + uint32_t resv2; + +} PACK_SUFFIX ib_path_rec_t; +#include +/* +* FIELDS +* resv0 +* Reserved bytes. +* +* dgid +* GID of destination port. +* +* sgid +* GID of source port. +* +* dlid +* LID of destination port. +* +* slid +* LID of source port. +* +* hop_flow_raw +* Global routing parameters: hop count, flow label and raw bit. +* +* tclass +* Another global routing parameter. +* +* num_path +* Reversible path - 1 bit to say if path is reversible. +* num_path [6:0] In queries, maximum number of paths to return. +* In responses, undefined. +* +* pkey +* Partition key (P_Key) to use on this path. +* +* resv1 +* Reserved byte. +* +* sl +* Service level to use on this path. +* +* mtu +* MTU and MTU selector fields to use on this path +* +* rate +* Rate and rate selector fields to use on this path. +* +* pkt_life +* Packet lifetime +* +* preference +* Indicates the relative merit of this path versus other path +* records returned from the SA. Lower numbers are better. +* +* resv1 +* Reserved bytes. +* +* resv2 +* Reserved bytes. +* SEE ALSO +*********/ + +/* Path Record Component Masks */ +#define IB_PR_COMPMASK_DGID (CL_HTON64(((uint64_t)1)<<2)) +#define IB_PR_COMPMASK_SGID (CL_HTON64(((uint64_t)1)<<3)) +#define IB_PR_COMPMASK_DLID (CL_HTON64(((uint64_t)1)<<4)) +#define IB_PR_COMPMASK_SLID (CL_HTON64(((uint64_t)1)<<5)) +#define IB_PR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<6)) +#define IB_PR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<7)) +#define IB_PR_COMPMASK_FLOWLABEL (CL_HTON64(((uint64_t)1)<<8)) +#define IB_PR_COMPMASK_HOPLIMIT (CL_HTON64(((uint64_t)1)<<9)) +#define IB_PR_COMPMASK_TCLASS (CL_HTON64(((uint64_t)1)<<10)) +#define IB_PR_COMPMASK_REVERSIBLE (CL_HTON64(((uint64_t)1)<<11)) +#define IB_PR_COMPMASK_NUM_PATH (CL_HTON64(((uint64_t)1)<<12)) +#define IB_PR_COMPMASK_PKEY (CL_HTON64(((uint64_t)1)<<13)) +#define IB_PR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<14)) +#define IB_PR_COMPMASK_SL (CL_HTON64(((uint64_t)1)<<15)) +#define IB_PR_COMPMASK_MTUSELEC (CL_HTON64(((uint64_t)1)<<16)) +#define IB_PR_COMPMASK_MTU (CL_HTON64(((uint64_t)1)<<17)) +#define IB_PR_COMPMASK_RATESELEC (CL_HTON64(((uint64_t)1)<<18)) +#define IB_PR_COMPMASK_RATE (CL_HTON64(((uint64_t)1)<<19)) +#define IB_PR_COMPMASK_PKTLIFETIMESELEC (CL_HTON64(((uint64_t)1)<<20)) +#define IB_PR_COMPMASK_PKTLIFETIME (CL_HTON64(((uint64_t)1)<<21)) + +/* Link Record Component Masks */ +#define IB_LR_COMPMASK_FROM_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_LR_COMPMASK_FROM_PORT (CL_HTON64(((uint64_t)1)<<1)) +#define IB_LR_COMPMASK_TO_PORT (CL_HTON64(((uint64_t)1)<<2)) +#define IB_LR_COMPMASK_TO_LID (CL_HTON64(((uint64_t)1)<<3)) + +/* VL Arbitration Record Masks */ +#define IB_VLA_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_VLA_COMPMASK_OUT_PORT (CL_HTON64(((uint64_t)1)<<1)) +#define IB_VLA_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<2)) + +/* SLtoVL Mapping Record Masks */ +#define IB_SLVL_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_SLVL_COMPMASK_IN_PORT (CL_HTON64(((uint64_t)1)<<1)) +#define IB_SLVL_COMPMASK_OUT_PORT (CL_HTON64(((uint64_t)1)<<2)) + +/* P_Key Table Record Masks */ +#define IB_PKEY_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_PKEY_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<1)) +#define IB_PKEY_COMPMASK_PORT (CL_HTON64(((uint64_t)1)<<2)) + +/* Switch Info Record Masks */ +#define IB_SWIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_SWIR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<1)) + +/* LFT Record Masks */ +#define IB_LFTR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_LFTR_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<1)) + +/* MFT Record Masks */ +#define IB_MFTR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_MFTR_COMPMASK_POSITION (CL_HTON64(((uint64_t)1)<<1)) +#define IB_MFTR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<2)) +#define IB_MFTR_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<3)) +#define IB_MFTR_COMPMASK_RESERVED2 (CL_HTON64(((uint64_t)1)<<4)) + +/* NodeInfo Record Masks */ +#define IB_NR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_NR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<1)) +#define IB_NR_COMPMASK_BASEVERSION (CL_HTON64(((uint64_t)1)<<2)) +#define IB_NR_COMPMASK_CLASSVERSION (CL_HTON64(((uint64_t)1)<<3)) +#define IB_NR_COMPMASK_NODETYPE (CL_HTON64(((uint64_t)1)<<4)) +#define IB_NR_COMPMASK_NUMPORTS (CL_HTON64(((uint64_t)1)<<5)) +#define IB_NR_COMPMASK_SYSIMAGEGUID (CL_HTON64(((uint64_t)1)<<6)) +#define IB_NR_COMPMASK_NODEGUID (CL_HTON64(((uint64_t)1)<<7)) +#define IB_NR_COMPMASK_PORTGUID (CL_HTON64(((uint64_t)1)<<8)) +#define IB_NR_COMPMASK_PARTCAP (CL_HTON64(((uint64_t)1)<<9)) +#define IB_NR_COMPMASK_DEVID (CL_HTON64(((uint64_t)1)<<10)) +#define IB_NR_COMPMASK_REV (CL_HTON64(((uint64_t)1)<<11)) +#define IB_NR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<12)) +#define IB_NR_COMPMASK_VENDID (CL_HTON64(((uint64_t)1)<<13)) +#define IB_NR_COMPMASK_NODEDESC (CL_HTON64(((uint64_t)1)<<14)) + +/* Service Record Component Masks Sec 15.2.5.14 Ver 1.1*/ +#define IB_SR_COMPMASK_SID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_SR_COMPMASK_SGID (CL_HTON64(((uint64_t)1)<<1)) +#define IB_SR_COMPMASK_SPKEY (CL_HTON64(((uint64_t)1)<<2)) +#define IB_SR_COMPMASK_RES1 (CL_HTON64(((uint64_t)1)<<3)) +#define IB_SR_COMPMASK_SLEASE (CL_HTON64(((uint64_t)1)<<4)) +#define IB_SR_COMPMASK_SKEY (CL_HTON64(((uint64_t)1)<<5)) +#define IB_SR_COMPMASK_SNAME (CL_HTON64(((uint64_t)1)<<6)) +#define IB_SR_COMPMASK_SDATA8_0 (CL_HTON64(((uint64_t)1)<<7)) +#define IB_SR_COMPMASK_SDATA8_1 (CL_HTON64(((uint64_t)1)<<8)) +#define IB_SR_COMPMASK_SDATA8_2 (CL_HTON64(((uint64_t)1)<<9)) +#define IB_SR_COMPMASK_SDATA8_3 (CL_HTON64(((uint64_t)1)<<10)) +#define IB_SR_COMPMASK_SDATA8_4 (CL_HTON64(((uint64_t)1)<<11)) +#define IB_SR_COMPMASK_SDATA8_5 (CL_HTON64(((uint64_t)1)<<12)) +#define IB_SR_COMPMASK_SDATA8_6 (CL_HTON64(((uint64_t)1)<<13)) +#define IB_SR_COMPMASK_SDATA8_7 (CL_HTON64(((uint64_t)1)<<14)) +#define IB_SR_COMPMASK_SDATA8_8 (CL_HTON64(((uint64_t)1)<<15)) +#define IB_SR_COMPMASK_SDATA8_9 (CL_HTON64(((uint64_t)1)<<16)) +#define IB_SR_COMPMASK_SDATA8_10 (CL_HTON64(((uint64_t)1)<<17)) +#define IB_SR_COMPMASK_SDATA8_11 (CL_HTON64(((uint64_t)1)<<18)) +#define IB_SR_COMPMASK_SDATA8_12 (CL_HTON64(((uint64_t)1)<<19)) +#define IB_SR_COMPMASK_SDATA8_13 (CL_HTON64(((uint64_t)1)<<20)) +#define IB_SR_COMPMASK_SDATA8_14 (CL_HTON64(((uint64_t)1)<<21)) +#define IB_SR_COMPMASK_SDATA8_15 (CL_HTON64(((uint64_t)1)<<22)) +#define IB_SR_COMPMASK_SDATA16_0 (CL_HTON64(((uint64_t)1)<<23)) +#define IB_SR_COMPMASK_SDATA16_1 (CL_HTON64(((uint64_t)1)<<24)) +#define IB_SR_COMPMASK_SDATA16_2 (CL_HTON64(((uint64_t)1)<<25)) +#define IB_SR_COMPMASK_SDATA16_3 (CL_HTON64(((uint64_t)1)<<26)) +#define IB_SR_COMPMASK_SDATA16_4 (CL_HTON64(((uint64_t)1)<<27)) +#define IB_SR_COMPMASK_SDATA16_5 (CL_HTON64(((uint64_t)1)<<28)) +#define IB_SR_COMPMASK_SDATA16_6 (CL_HTON64(((uint64_t)1)<<29)) +#define IB_SR_COMPMASK_SDATA16_7 (CL_HTON64(((uint64_t)1)<<30)) +#define IB_SR_COMPMASK_SDATA32_0 (CL_HTON64(((uint64_t)1)<<31)) +#define IB_SR_COMPMASK_SDATA32_1 (CL_HTON64(((uint64_t)1)<<32)) +#define IB_SR_COMPMASK_SDATA32_2 (CL_HTON64(((uint64_t)1)<<33)) +#define IB_SR_COMPMASK_SDATA32_3 (CL_HTON64(((uint64_t)1)<<34)) +#define IB_SR_COMPMASK_SDATA64_0 (CL_HTON64(((uint64_t)1)<<35)) +#define IB_SR_COMPMASK_SDATA64_1 (CL_HTON64(((uint64_t)1)<<36)) + +/* Port Info Record Component Masks */ +#define IB_PIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_PIR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<1)) +#define IB_PIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2)) +#define IB_PIR_COMPMASK_MKEY (CL_HTON64(((uint64_t)1)<<3)) +#define IB_PIR_COMPMASK_GIDPRE (CL_HTON64(((uint64_t)1)<<4)) +#define IB_PIR_COMPMASK_BASELID (CL_HTON64(((uint64_t)1)<<5)) +#define IB_PIR_COMPMASK_SMLID (CL_HTON64(((uint64_t)1)<<6)) +#define IB_PIR_COMPMASK_CAPMASK (CL_HTON64(((uint64_t)1)<<7)) +#define IB_PIR_COMPMASK_DIAGCODE (CL_HTON64(((uint64_t)1)<<8)) +#define IB_PIR_COMPMASK_MKEYLEASEPRD (CL_HTON64(((uint64_t)1)<<9)) +#define IB_PIR_COMPMASK_LOCALPORTNUM (CL_HTON64(((uint64_t)1)<<10)) +#define IB_PIR_COMPMASK_LINKWIDTHENABLED (CL_HTON64(((uint64_t)1)<<11)) +#define IB_PIR_COMPMASK_LNKWIDTHSUPPORT (CL_HTON64(((uint64_t)1)<<12)) +#define IB_PIR_COMPMASK_LNKWIDTHACTIVE (CL_HTON64(((uint64_t)1)<<13)) +#define IB_PIR_COMPMASK_LNKSPEEDSUPPORT (CL_HTON64(((uint64_t)1)<<14)) +#define IB_PIR_COMPMASK_PORTSTATE (CL_HTON64(((uint64_t)1)<<15)) +#define IB_PIR_COMPMASK_PORTPHYSTATE (CL_HTON64(((uint64_t)1)<<16)) +#define IB_PIR_COMPMASK_LINKDWNDFLTSTATE (CL_HTON64(((uint64_t)1)<<17)) +#define IB_PIR_COMPMASK_MKEYPROTBITS (CL_HTON64(((uint64_t)1)<<18)) +#define IB_PIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<19)) +#define IB_PIR_COMPMASK_LMC (CL_HTON64(((uint64_t)1)<<20)) +#define IB_PIR_COMPMASK_LINKSPEEDACTIVE (CL_HTON64(((uint64_t)1)<<21)) +#define IB_PIR_COMPMASK_LINKSPEEDENABLE (CL_HTON64(((uint64_t)1)<<22)) +#define IB_PIR_COMPMASK_NEIGHBORMTU (CL_HTON64(((uint64_t)1)<<23)) +#define IB_PIR_COMPMASK_MASTERSMSL (CL_HTON64(((uint64_t)1)<<24)) +#define IB_PIR_COMPMASK_VLCAP (CL_HTON64(((uint64_t)1)<<25)) +#define IB_PIR_COMPMASK_INITTYPE (CL_HTON64(((uint64_t)1)<<26)) +#define IB_PIR_COMPMASK_VLHIGHLIMIT (CL_HTON64(((uint64_t)1)<<27)) +#define IB_PIR_COMPMASK_VLARBHIGHCAP (CL_HTON64(((uint64_t)1)<<28)) +#define IB_PIR_COMPMASK_VLARBLOWCAP (CL_HTON64(((uint64_t)1)<<29)) +#define IB_PIR_COMPMASK_INITTYPEREPLY (CL_HTON64(((uint64_t)1)<<30)) +#define IB_PIR_COMPMASK_MTUCAP (CL_HTON64(((uint64_t)1)<<31)) +#define IB_PIR_COMPMASK_VLSTALLCNT (CL_HTON64(((uint64_t)1)<<32)) +#define IB_PIR_COMPMASK_HOQLIFE (CL_HTON64(((uint64_t)1)<<33)) +#define IB_PIR_COMPMASK_OPVLS (CL_HTON64(((uint64_t)1)<<34)) +#define IB_PIR_COMPMASK_PARENFIN (CL_HTON64(((uint64_t)1)<<35)) +#define IB_PIR_COMPMASK_PARENFOUT (CL_HTON64(((uint64_t)1)<<36)) +#define IB_PIR_COMPMASK_FILTERRAWIN (CL_HTON64(((uint64_t)1)<<37)) +#define IB_PIR_COMPMASK_FILTERRAWOUT (CL_HTON64(((uint64_t)1)<<38)) +#define IB_PIR_COMPMASK_MKEYVIO (CL_HTON64(((uint64_t)1)<<39)) +#define IB_PIR_COMPMASK_PKEYVIO (CL_HTON64(((uint64_t)1)<<40)) +#define IB_PIR_COMPMASK_QKEYVIO (CL_HTON64(((uint64_t)1)<<41)) +#define IB_PIR_COMPMASK_GUIDCAP (CL_HTON64(((uint64_t)1)<<42)) +#define IB_PIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<43)) +#define IB_PIR_COMPMASK_SUBNTO (CL_HTON64(((uint64_t)1)<<44)) +#define IB_PIR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<45)) +#define IB_PIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<46)) +#define IB_PIR_COMPMASK_LOCALPHYERR (CL_HTON64(((uint64_t)1)<<47)) +#define IB_PIR_COMPMASK_OVERRUNERR (CL_HTON64(((uint64_t)1)<<48)) + +/* Multicast Member Record Component Masks */ +#define IB_MCR_COMPMASK_GID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_MCR_COMPMASK_MGID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_MCR_COMPMASK_PORT_GID (CL_HTON64(((uint64_t)1)<<1)) +#define IB_MCR_COMPMASK_QKEY (CL_HTON64(((uint64_t)1)<<2)) +#define IB_MCR_COMPMASK_MLID (CL_HTON64(((uint64_t)1)<<3)) +#define IB_MCR_COMPMASK_MTU_SEL (CL_HTON64(((uint64_t)1)<<4)) +#define IB_MCR_COMPMASK_MTU (CL_HTON64(((uint64_t)1)<<5)) +#define IB_MCR_COMPMASK_TCLASS (CL_HTON64(((uint64_t)1)<<6)) +#define IB_MCR_COMPMASK_PKEY (CL_HTON64(((uint64_t)1)<<7)) +#define IB_MCR_COMPMASK_RATE_SEL (CL_HTON64(((uint64_t)1)<<8)) +#define IB_MCR_COMPMASK_RATE (CL_HTON64(((uint64_t)1)<<9)) +#define IB_MCR_COMPMASK_LIFE_SEL (CL_HTON64(((uint64_t)1)<<10)) +#define IB_MCR_COMPMASK_LIFE (CL_HTON64(((uint64_t)1)<<11)) +#define IB_MCR_COMPMASK_SL (CL_HTON64(((uint64_t)1)<<12)) +#define IB_MCR_COMPMASK_FLOW (CL_HTON64(((uint64_t)1)<<13)) +#define IB_MCR_COMPMASK_HOP (CL_HTON64(((uint64_t)1)<<14)) +#define IB_MCR_COMPMASK_SCOPE (CL_HTON64(((uint64_t)1)<<15)) +#define IB_MCR_COMPMASK_JOIN_STATE (CL_HTON64(((uint64_t)1)<<16)) +#define IB_MCR_COMPMASK_PROXY (CL_HTON64(((uint64_t)1)<<17)) + +/* GUID Info Record Component Masks */ +#define IB_GIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_GIR_COMPMASK_BLOCKNUM (CL_HTON64(((uint64_t)1)<<1)) +#define IB_GIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2)) +#define IB_GIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<3)) +#define IB_GIR_COMPMASK_GID0 (CL_HTON64(((uint64_t)1)<<4)) +#define IB_GIR_COMPMASK_GID1 (CL_HTON64(((uint64_t)1)<<5)) +#define IB_GIR_COMPMASK_GID2 (CL_HTON64(((uint64_t)1)<<6)) +#define IB_GIR_COMPMASK_GID3 (CL_HTON64(((uint64_t)1)<<7)) +#define IB_GIR_COMPMASK_GID4 (CL_HTON64(((uint64_t)1)<<8)) +#define IB_GIR_COMPMASK_GID5 (CL_HTON64(((uint64_t)1)<<9)) +#define IB_GIR_COMPMASK_GID6 (CL_HTON64(((uint64_t)1)<<10)) +#define IB_GIR_COMPMASK_GID7 (CL_HTON64(((uint64_t)1)<<11)) + +/* MultiPath Record Component Masks */ +#define IB_MPR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<0)) +#define IB_MPR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<1)) +#define IB_MPR_COMPMASK_FLOWLABEL (CL_HTON64(((uint64_t)1)<<2)) +#define IB_MPR_COMPMASK_HOPLIMIT (CL_HTON64(((uint64_t)1)<<3)) +#define IB_MPR_COMPMASK_TCLASS (CL_HTON64(((uint64_t)1)<<4)) +#define IB_MPR_COMPMASK_REVERSIBLE (CL_HTON64(((uint64_t)1)<<5)) +#define IB_MPR_COMPMASK_NUMBPATH (CL_HTON64(((uint64_t)1)<<6)) +#define IB_MPR_COMPMASK_PKEY (CL_HTON64(((uint64_t)1)<<7)) +#define IB_MPR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<8)) +#define IB_MPR_COMPMASK_SL (CL_HTON64(((uint64_t)1)<<9)) +#define IB_MPR_COMPMASK_MTUSELEC (CL_HTON64(((uint64_t)1)<<10)) +#define IB_MPR_COMPMASK_MTU (CL_HTON64(((uint64_t)1)<<11)) +#define IB_MPR_COMPMASK_RATESELEC (CL_HTON64(((uint64_t)1)<<12)) +#define IB_MPR_COMPMASK_RATE (CL_HTON64(((uint64_t)1)<<13)) +#define IB_MPR_COMPMASK_PKTLIFETIMESELEC (CL_HTON64(((uint64_t)1)<<14)) +#define IB_MPR_COMPMASK_PKTLIFETIME (CL_HTON64(((uint64_t)1)<<15)) +#define IB_MPR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<16)) +#define IB_MPR_COMPMASK_INDEPSELEC (CL_HTON64(((uint64_t)1)<<17)) +#define IB_MPR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<18)) +#define IB_MPR_COMPMASK_SGIDCOUNT (CL_HTON64(((uint64_t)1)<<19)) +#define IB_MPR_COMPMASK_DGIDCOUNT (CL_HTON64(((uint64_t)1)<<20)) +#define IB_MPR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<21)) + +/* SMInfo Record Component Masks */ +#define IB_SMIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_SMIR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<1)) +#define IB_SMIR_COMPMASK_GUID (CL_HTON64(((uint64_t)1)<<2)) +#define IB_SMIR_COMPMASK_SMKEY (CL_HTON64(((uint64_t)1)<<3)) +#define IB_SMIR_COMPMASK_ACTCOUNT (CL_HTON64(((uint64_t)1)<<4)) +#define IB_SMIR_COMPMASK_PRIORITY (CL_HTON64(((uint64_t)1)<<5)) +#define IB_SMIR_COMPMASK_SMSTATE (CL_HTON64(((uint64_t)1)<<6)) + +/* InformInfo Record Component Masks */ +#define IB_IIR_COMPMASK_SUBSCRIBERGID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_IIR_COMPMASK_ENUM (CL_HTON64(((uint64_t)1)<<1)) +#define IB_IIR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<2)) +#define IB_IIR_COMPMASK_GID (CL_HTON64(((uint64_t)1)<<3)) +#define IB_IIR_COMPMASK_LIDRANGEBEGIN (CL_HTON64(((uint64_t)1)<<4)) +#define IB_IIR_COMPMASK_LIDRANGEEND (CL_HTON64(((uint64_t)1)<<5)) +#define IB_IIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<6)) +#define IB_IIR_COMPMASK_ISGENERIC (CL_HTON64(((uint64_t)1)<<7)) +#define IB_IIR_COMPMASK_SUBSCRIBE (CL_HTON64(((uint64_t)1)<<8)) +#define IB_IIR_COMPMASK_TYPE (CL_HTON64(((uint64_t)1)<<9)) +#define IB_IIR_COMPMASK_TRAPNUMB (CL_HTON64(((uint64_t)1)<<10)) +#define IB_IIR_COMPMASK_DEVICEID (CL_HTON64(((uint64_t)1)<<10)) +#define IB_IIR_COMPMASK_QPN (CL_HTON64(((uint64_t)1)<<11)) +#define IB_IIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<12)) +#define IB_IIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<13)) +#define IB_IIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<14)) +#define IB_IIR_COMPMASK_PRODTYPE (CL_HTON64(((uint64_t)1)<<15)) +#define IB_IIR_COMPMASK_VENDID (CL_HTON64(((uint64_t)1)<<15)) + +/****f* IBA Base: Types/ib_path_rec_init_local +* NAME +* ib_path_rec_init_local +* +* DESCRIPTION +* Initializes a subnet local path record. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_path_rec_init_local( + IN ib_path_rec_t* const p_rec, + IN const ib_gid_t* const p_dgid, + IN const ib_gid_t* const p_sgid, + IN const ib_net16_t dlid, + IN const ib_net16_t slid, + IN const uint8_t num_path, + IN const ib_net16_t pkey, + IN const uint8_t sl, + IN const uint8_t mtu_selector, + IN const uint8_t mtu, + IN const uint8_t rate_selector, + IN const uint8_t rate, + IN const uint8_t pkt_life_selector, + IN const uint8_t pkt_life, + IN const uint8_t preference ) +{ + p_rec->dgid = *p_dgid; + p_rec->sgid = *p_sgid; + p_rec->dlid = dlid; + p_rec->slid = slid; + p_rec->num_path = num_path; + p_rec->pkey = pkey; + /* Lower 4 bits of path rec's SL are reserved. */ + p_rec->sl = cl_ntoh16( sl ); + p_rec->mtu = (uint8_t)((mtu & IB_PATH_REC_BASE_MASK) | + (uint8_t)(mtu_selector << 6)); + p_rec->rate = (uint8_t)((rate & IB_PATH_REC_BASE_MASK) | + (uint8_t)(rate_selector << 6)); + p_rec->pkt_life = (uint8_t)((pkt_life & IB_PATH_REC_BASE_MASK) | + (uint8_t)(pkt_life_selector << 6)); + p_rec->preference = preference; + + /* Clear global routing fields for local path records */ + p_rec->hop_flow_raw.val = 0; + p_rec->tclass = 0; + + p_rec->resv0 = 0; + p_rec->resv1 = 0; + p_rec->resv2 = 0; +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* dgid +* [in] GID of destination port. +* +* sgid +* [in] GID of source port. +* +* dlid +* [in] LID of destination port. +* +* slid +* [in] LID of source port. +* +* num_path +* [in] Reversible path - 1 bit to say if path is reversible. +* num_path [6:0] In queries, maximum number of paths to return. +* In responses, undefined. +* +* pkey +* [in] Partition key (P_Key) to use on this path. +* +* sl +* [in] Service level to use on this path. Lower 4-bits are valid. +* +* mtu_selector +* [in] Encoded MTU selector value to use on this path +* +* mtu +* [in] Encoded MTU to use on this path +* +* rate_selector +* [in] Encoded rate selector value to use on this path. +* +* rate +* [in] Encoded rate to use on this path. +* +* pkt_life_selector +* [in] Encoded Packet selector value lifetime for this path. +* +* pkt_life +* [in] Encoded Packet lifetime for this path. +* +* preference +* [in] Indicates the relative merit of this path versus other path +* records returned from the SA. Lower numbers are better. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_num_path +* NAME +* ib_path_rec_num_path +* +* DESCRIPTION +* Get max number of paths to return. +* +* SYNOPSIS +*/ +static inline uint8_t +ib_path_rec_num_path( + IN const ib_path_rec_t* const p_rec ) +{ + return( p_rec->num_path &0x7F ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Maximum number of paths to return for each unique SGID_DGID combination. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_sl +* NAME +* ib_path_rec_sl +* +* DESCRIPTION +* Get path service level. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_path_rec_sl( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)((cl_ntoh16( p_rec->sl )) & 0xF) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* SL. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_mtu +* NAME +* ib_path_rec_mtu +* +* DESCRIPTION +* Get encoded path MTU. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_path_rec_mtu( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->mtu & IB_PATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path MTU. +* 1: 256 +* 2: 512 +* 3: 1024 +* 4: 2048 +* 5: 4096 +* others: reserved +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_mtu_sel +* NAME +* ib_path_rec_mtu_sel +* +* DESCRIPTION +* Get encoded path MTU selector. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_path_rec_mtu_sel( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->mtu & IB_PATH_REC_SELECTOR_MASK) >> 6) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path MTU selector value (for queries). +* 0: greater than MTU specified +* 1: less than MTU specified +* 2: exactly the MTU specified +* 3: largest MTU available +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_rate +* NAME +* ib_path_rec_rate +* +* DESCRIPTION +* Get encoded path rate. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_path_rec_rate( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->rate & IB_PATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path rate. +* 2: 2.5 Gb/sec. +* 3: 10 Gb/sec. +* 4: 30 Gb/sec. +* 5: 5 Gb/sec. +* 6: 20 Gb/sec. +* 7: 40 Gb/sec. +* 8: 60 Gb/sec. +* 9: 80 Gb/sec. +* 10: 120 Gb/sec. +* others: reserved +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_rate_sel +* NAME +* ib_path_rec_rate_sel +* +* DESCRIPTION +* Get encoded path rate selector. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_path_rec_rate_sel( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->rate & IB_PATH_REC_SELECTOR_MASK) >> 6) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path rate selector value (for queries). +* 0: greater than rate specified +* 1: less than rate specified +* 2: exactly the rate specified +* 3: largest rate available +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_pkt_life +* NAME +* ib_path_rec_pkt_life +* +* DESCRIPTION +* Get encoded path pkt_life. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_path_rec_pkt_life( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->pkt_life & IB_PATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path pkt_life = 4.096 µsec * 2 ** PacketLifeTime. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_pkt_life_sel +* NAME +* ib_path_rec_pkt_life_sel +* +* DESCRIPTION +* Get encoded path pkt_lifetime selector. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_path_rec_pkt_life_sel( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->pkt_life & IB_PATH_REC_SELECTOR_MASK) >> 6 )); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path pkt_lifetime selector value (for queries). +* 0: greater than rate specified +* 1: less than rate specified +* 2: exactly the rate specified +* 3: smallest packet lifetime available +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_flow_lbl +* NAME +* ib_path_rec_flow_lbl +* +* DESCRIPTION +* Get flow label. +* +* SYNOPSIS +*/ +AL_INLINE net32_t AL_API +ib_path_rec_flow_lbl( + IN const ib_path_rec_t* const p_rec ) +{ + return( cl_hton32( (cl_ntoh32(p_rec->hop_flow_raw.val) >> 8) & 0x000FFFFF ) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Flow label of the path record. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_hop_limit +* NAME +* ib_path_rec_hop_limit +* +* DESCRIPTION +* Get hop limit. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_path_rec_hop_limit( + IN const ib_path_rec_t* const p_rec ) +{ + return( p_rec->hop_flow_raw.bytes[3] ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Hop limit of the path record. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_set_hop_flow_raw +* NAME +* ib_path_rec_set_hop_flow_raw +* +* DESCRIPTION +* Sets the hop limit, flow label, and raw traffic bits of a path record. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_path_rec_set_hop_flow_raw( + OUT ib_path_rec_t* const p_rec, + IN const uint8_t hop_limit, + IN const net32_t flow_lbl, + IN const boolean_t raw ) +{ + p_rec->hop_flow_raw.val = (cl_ntoh32( flow_lbl ) & 0x000FFFFF) << 8; + if( raw ) + p_rec->hop_flow_raw.val |= 0x80000000; + p_rec->hop_flow_raw.val = cl_hton32( p_rec->hop_flow_raw.val ); + p_rec->hop_flow_raw.bytes[3] = hop_limit; +} +/* +* PARAMETERS +* p_rec +* Pointer to the path record whose hop limit, flow label, and rab +* traffic fields to set. +* +* hop_limit +* Hop limit to set in the path record. +* +* flow_lbl +* Flow label, in network byte order, to set in the path record. +* +* raw +* Boolean flag to indicate whether the path record is for raw traffic. +* +* SEE ALSO +* ib_path_rec_t +*********/ + + +/****s* IBA Base: Constants/IB_CLASS_CAP_TRAP +* NAME +* IB_CLASS_CAP_TRAP +* +* DESCRIPTION +* ClassPortInfo CapabilityMask bits. This bit will be set +* if the class supports Trap() MADs (13.4.8.1). +* +* SEE ALSO +* ib_class_port_info_t, IB_CLASS_CAP_GETSET +* +* SOURCE +*/ +#define IB_CLASS_CAP_TRAP 0x0001 +/*********/ + +/****s* IBA Base: Constants/IB_CLASS_CAP_GETSET +* NAME +* IB_CLASS_CAP_GETSET +* +* DESCRIPTION +* ClassPortInfo CapabilityMask bits. This bit will be set +* if the class supports Get(Notice) and Set(Notice) MADs (13.4.8.1). +* +* SEE ALSO +* ib_class_port_info_t, IB_CLASS_CAP_TRAP +* +* SOURCE +*/ +#define IB_CLASS_CAP_GETSET 0x0002 +/*********/ + +/****s* IBA Base: Constants/IB_CLASS_RESP_TIME_MASK +* NAME +* IB_CLASS_RESP_TIME_MASK +* +* DESCRIPTION +* Mask bits to extract the reponse time value from the +* resp_time_val field of ib_class_port_info_t. +* +* SEE ALSO +* ib_class_port_info_t +* +* SOURCE +*/ +#define IB_CLASS_RESP_TIME_MASK 0x1F +/*********/ +/****s* IBA Base: Types/ib_class_port_info_t +* NAME +* ib_class_port_info_t +* +* DESCRIPTION +* IBA defined ClassPortInfo attribute (13.4.8.1) +* route between two end-points on a subnet. +* +* SYNOPSIS +*/ +#include +typedef struct _ib_class_port_info +{ + uint8_t base_ver; + uint8_t class_ver; + ib_net16_t cap_mask; + ib_net32_t resp_time_val; + ib_gid_t redir_gid; + ib_net32_t redir_tc_sl_fl; + ib_net16_t redir_lid; + ib_net16_t redir_pkey; + ib_net32_t redir_qp; + ib_net32_t redir_qkey; + ib_gid_t trap_gid; + ib_net32_t trap_tc_sl_fl; + ib_net16_t trap_lid; + ib_net16_t trap_pkey; + ib_net32_t trap_hop_qp; + ib_net32_t trap_qkey; + +} PACK_SUFFIX ib_class_port_info_t; +#include +/* +* FIELDS +* base_ver +* Maximum supported MAD Base Version. +* +* class_ver +* Maximum supported management class version. +* +* cap_mask +* Supported capabilities of this management class. +* +* resp_time_value +* Maximum expected response time. +* +* redr_gid +* GID to use for redirection, or zero +* +* recdir_tc_sl_fl +* Traffic class, service level and flow label the requester +* should use if the service is redirected. +* +* redir_lid +* LID used for redirection, or zero +* +* redir_pkey +* P_Key used for redirection +* +* redir_qp +* QP number used for redirection +* +* redir_qkey +* Q_Key associated with the redirected QP. This shall be the +* well known Q_Key value. +* +* trap_gid +* GID value used for trap messages from this service. +* +* trap_tc_sl_fl +* Traffic class, service level and flow label used for +* trap messages originated by this service. +* +* trap_lid +* LID used for trap messages, or zero +* +* trap_pkey +* P_Key used for trap messages +* +* trap_hop_qp +* Hop limit (upper 8 bits) and QP number used for trap messages +* +* trap_qkey +* Q_Key associated with the trap messages QP. +* +* SEE ALSO +* IB_CLASS_CAP_GETSET, IB_CLASS_CAP_TRAP +* +*********/ + +/****s* IBA Base: Types/ib_sm_info_t +* NAME +* ib_sm_info_t +* +* DESCRIPTION +* SMInfo structure (14.2.5.13). +* +* SYNOPSIS +*/ +#include +typedef struct _ib_sm_info +{ + ib_net64_t guid; + ib_net64_t sm_key; + ib_net32_t act_count; + uint8_t pri_state; + +} PACK_SUFFIX ib_sm_info_t; +#include +/* +* FIELDS +* guid +* Port GUID for this SM. +* +* sm_key +* SM_Key of this SM. +* +* act_count +* Activity counter used as a heartbeat. +* +* pri_state +* Priority and State information +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_sminfo_get_priority +* NAME +* ib_sminfo_get_priority +* +* DESCRIPTION +* Returns the priority value. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_sminfo_get_priority( + IN const ib_sm_info_t* const p_smi ) +{ + return( (uint8_t)((p_smi->pri_state & 0xF0)>>4) ); +} +/* +* PARAMETERS +* p_smi +* [in] Pointer to the SMInfo Attribute. +* +* RETURN VALUES +* Returns the priority value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_sminfo_get_state +* NAME +* ib_sminfo_get_state +* +* DESCRIPTION +* Returns the state value. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_sminfo_get_state( + IN const ib_sm_info_t* const p_smi ) +{ + return( (uint8_t)(p_smi->pri_state & 0x0F) ); +} +/* +* PARAMETERS +* p_smi +* [in] Pointer to the SMInfo Attribute. +* +* RETURN VALUES +* Returns the state value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* IBA Base: Types/ib_mad_t +* NAME +* ib_mad_t +* +* DESCRIPTION +* IBA defined MAD header (13.4.3) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_mad +{ + uint8_t base_ver; + uint8_t mgmt_class; + uint8_t class_ver; + uint8_t method; + ib_net16_t status; + ib_net16_t class_spec; + ib_net64_t trans_id; + ib_net16_t attr_id; + ib_net16_t resv; + ib_net32_t attr_mod; +} PACK_SUFFIX ib_mad_t; +#include +/* +* FIELDS +* base_ver +* MAD base format. +* +* mgmt_class +* Class of operation. +* +* class_ver +* Version of MAD class-specific format. +* +* method +* Method to perform, including 'R' bit. +* +* status +* Status of operation. +* +* class_spec +* Reserved for subnet management. +* +* trans_id +* Transaction ID. +* +* attr_id +* Attribute ID. +* +* resv +* Reserved field. +* +* attr_mod +* Attribute modifier. +* +* SEE ALSO +*********/ + + +/****s* IBA Base: Types/ib_rmpp_mad_t +* NAME +* ib_rmpp_mad_t +* +* DESCRIPTION +* IBA defined MAD RMPP header (13.6.2.1) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_rmpp_mad +{ + ib_mad_t common_hdr; + + uint8_t rmpp_version; + uint8_t rmpp_type; + uint8_t rmpp_flags; + uint8_t rmpp_status; + + ib_net32_t seg_num; + ib_net32_t paylen_newwin; + +} PACK_SUFFIX ib_rmpp_mad_t; +#include +/* +* SEE ALSO +* ib_mad_t +*********/ + + +/****f* IBA Base: Types/ib_mad_init_new +* NAME +* ib_mad_init_new +* +* DESCRIPTION +* Initializes a MAD common header. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_mad_init_new( + IN ib_mad_t* const p_mad, + IN const uint8_t mgmt_class, + IN const uint8_t class_ver, + IN const uint8_t method, + IN const ib_net64_t trans_id, + IN const ib_net16_t attr_id, + IN const ib_net32_t attr_mod ) +{ + CL_ASSERT( p_mad ); + p_mad->base_ver = 1; + p_mad->mgmt_class = mgmt_class; + p_mad->class_ver = class_ver; + p_mad->method = method; + p_mad->status = 0; + p_mad->class_spec = 0; + p_mad->trans_id = trans_id; + p_mad->attr_id = attr_id; + p_mad->resv = 0; + p_mad->attr_mod = attr_mod; +} +/* +* PARAMETERS +* p_mad +* [in] Pointer to the MAD common header. +* +* mgmt_class +* [in] Class of operation. +* +* class_ver +* [in] Version of MAD class-specific format. +* +* method +* [in] Method to perform, including 'R' bit. +* +* trans_Id +* [in] Transaction ID. +* +* attr_id +* [in] Attribute ID. +* +* attr_mod +* [in] Attribute modifier. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* ib_mad_t +*********/ + +/****f* IBA Base: Types/ib_mad_init_response +* NAME +* ib_mad_init_response +* +* DESCRIPTION +* Initializes a MAD common header as a response. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_mad_init_response( + IN const ib_mad_t* const p_req_mad, + IN ib_mad_t* const p_mad, + IN const ib_net16_t status ) +{ + CL_ASSERT( p_req_mad ); + CL_ASSERT( p_mad ); + *p_mad = *p_req_mad; + p_mad->status = status; + if( p_mad->method == IB_MAD_METHOD_SET ) + p_mad->method = IB_MAD_METHOD_GET; + p_mad->method |= IB_MAD_METHOD_RESP_MASK; +} +/* +* PARAMETERS +* p_req_mad +* [in] Pointer to the MAD common header in the original request MAD. +* +* p_mad +* [in] Pointer to the MAD common header to initialize. +* +* status +* [in] MAD Status value to return; +* +* RETURN VALUES +* None. +* +* NOTES +* p_req_mad and p_mad may point to the same MAD. +* +* SEE ALSO +* ib_mad_t +*********/ + +/****f* IBA Base: Types/ib_mad_is_response +* NAME +* ib_mad_is_response +* +* DESCRIPTION +* Returns TRUE if the MAD is a response ('R' bit set), +* FALSE otherwise. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_mad_is_response( + IN const ib_mad_t* const p_mad ) +{ + CL_ASSERT( p_mad ); + return( (p_mad->method & IB_MAD_METHOD_RESP_MASK) == + IB_MAD_METHOD_RESP_MASK ); +} +/* +* PARAMETERS +* p_mad +* [in] Pointer to the MAD. +* +* RETURN VALUES +* Returns TRUE if the MAD is a response ('R' bit set), +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_mad_t +*********/ + +#define IB_RMPP_TYPE_DATA 1 +#define IB_RMPP_TYPE_ACK 2 +#define IB_RMPP_TYPE_STOP 3 +#define IB_RMPP_TYPE_ABORT 4 + +#define IB_RMPP_NO_RESP_TIME 0x1F +#define IB_RMPP_FLAG_ACTIVE 0x01 +#define IB_RMPP_FLAG_FIRST 0x02 +#define IB_RMPP_FLAG_LAST 0x04 + +#define IB_RMPP_STATUS_SUCCESS 0 +#define IB_RMPP_STATUS_RESX 1 /* resources exhausted */ +#define IB_RMPP_STATUS_T2L 118 /* time too long */ +#define IB_RMPP_STATUS_BAD_LEN 119 /* incon. last and payload len */ +#define IB_RMPP_STATUS_BAD_SEG 120 /* incon. first and segment no */ +#define IB_RMPP_STATUS_BADT 121 /* bad rmpp type */ +#define IB_RMPP_STATUS_W2S 122 /* newwindowlast too small */ +#define IB_RMPP_STATUS_S2B 123 /* segment no too big */ +#define IB_RMPP_STATUS_BAD_STATUS 124 /* illegal status */ +#define IB_RMPP_STATUS_UNV 125 /* unsupported version */ +#define IB_RMPP_STATUS_TMR 126 /* too many retries */ +#define IB_RMPP_STATUS_UNSPEC 127 /* unspecified */ + +/****f* IBA Base: Types/ib_rmpp_is_flag_set +* NAME +* ib_rmpp_is_flag_set +* +* DESCRIPTION +* Returns TRUE if the MAD has the given RMPP flag set. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_rmpp_is_flag_set( + IN const ib_rmpp_mad_t* const p_rmpp_mad, + IN const uint8_t flag ) +{ + CL_ASSERT( p_rmpp_mad ); + return( (p_rmpp_mad->rmpp_flags & flag) == flag ); +} +/* +* PARAMETERS +* ib_rmpp_mad_t +* [in] Pointer to a MAD with an RMPP header. +* +* flag +* [in] The RMPP flag being examined. +* +* RETURN VALUES +* Returns TRUE if the MAD has the given RMPP flag set. +* +* NOTES +* +* SEE ALSO +* ib_mad_t, ib_rmpp_mad_t +*********/ + +AL_INLINE void AL_API +ib_rmpp_set_resp_time( + IN ib_rmpp_mad_t* const p_rmpp_mad, + IN const uint8_t resp_time ) +{ + CL_ASSERT( p_rmpp_mad ); + p_rmpp_mad->rmpp_flags |= (resp_time << 3); +} + + +AL_INLINE uint8_t AL_API +ib_rmpp_get_resp_time( + IN const ib_rmpp_mad_t* const p_rmpp_mad ) +{ + CL_ASSERT( p_rmpp_mad ); + return( (uint8_t)(p_rmpp_mad->rmpp_flags >> 3) ); +} + +/****d* IBA Base: Constants/IB_SMP_DIRECTION +* NAME +* IB_SMP_DIRECTION +* +* DESCRIPTION +* The Direction bit for directed route SMPs. +* +* SOURCE +*/ +#define IB_SMP_DIRECTION_HO 0x8000 +#define IB_SMP_DIRECTION (CL_HTON16(IB_SMP_DIRECTION_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_SMP_STATUS_MASK +* NAME +* IB_SMP_STATUS_MASK +* +* DESCRIPTION +* Mask value for extracting status from a directed route SMP. +* +* SOURCE +*/ +#define IB_SMP_STATUS_MASK_HO 0x7FFF +#define IB_SMP_STATUS_MASK (CL_HTON16(IB_SMP_STATUS_MASK_HO)) +/**********/ + +/****s* IBA Base: Types/ib_smp_t +* NAME +* ib_smp_t +* +* DESCRIPTION +* IBA defined SMP. (14.2.1.2) +* +* SYNOPSIS +*/ +#define IB_SMP_DATA_SIZE 64 +#include +typedef struct _ib_smp +{ + uint8_t base_ver; + uint8_t mgmt_class; + uint8_t class_ver; + uint8_t method; + ib_net16_t status; + uint8_t hop_ptr; + uint8_t hop_count; + ib_net64_t trans_id; + ib_net16_t attr_id; + ib_net16_t resv; + ib_net32_t attr_mod; + ib_net64_t m_key; + ib_net16_t dr_slid; + ib_net16_t dr_dlid; + uint32_t resv1[7]; + uint8_t data[IB_SMP_DATA_SIZE]; + uint8_t initial_path[IB_SUBNET_PATH_HOPS_MAX]; + uint8_t return_path[IB_SUBNET_PATH_HOPS_MAX]; + +} PACK_SUFFIX ib_smp_t; +#include +/* +* FIELDS +* base_ver +* MAD base format. +* +* mgmt_class +* Class of operation. +* +* class_ver +* Version of MAD class-specific format. +* +* method +* Method to perform, including 'R' bit. +* +* status +* Status of operation. +* +* hop_ptr +* Hop pointer for directed route MADs. +* +* hop_count +* Hop count for directed route MADs. +* +* trans_Id +* Transaction ID. +* +* attr_id +* Attribute ID. +* +* resv +* Reserved field. +* +* attr_mod +* Attribute modifier. +* +* m_key +* Management key value. +* +* dr_slid +* Directed route source LID. +* +* dr_dlid +* Directed route destination LID. +* +* resv0 +* Reserved for 64 byte alignment. +* +* data +* MAD data payload. +* +* initial_path +* Outbound port list. +* +* return_path +* Inbound port list. +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_smp_get_status +* NAME +* ib_smp_get_status +* +* DESCRIPTION +* Returns the SMP status value in network order. +* +* SYNOPSIS +*/ +AL_INLINE ib_net16_t AL_API +ib_smp_get_status( + IN const ib_smp_t* const p_smp ) +{ + return( (ib_net16_t)(p_smp->status & IB_SMP_STATUS_MASK) ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* RETURN VALUES +* Returns the SMP status value in network order. +* +* NOTES +* +* SEE ALSO +* ib_smp_t +*********/ + +/****f* IBA Base: Types/ib_smp_is_response +* NAME +* ib_smp_is_response +* +* DESCRIPTION +* Returns TRUE if the SMP is a response MAD, FALSE otherwise. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_smp_is_response( + IN const ib_smp_t* const p_smp ) +{ + return( ib_mad_is_response( (const ib_mad_t*)p_smp ) ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* RETURN VALUES +* Returns TRUE if the SMP is a response MAD, FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_smp_t +*********/ + +/****f* IBA Base: Types/ib_smp_is_d +* NAME +* ib_smp_is_d +* +* DESCRIPTION +* Returns TRUE if the SMP 'D' (direction) bit is set. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_smp_is_d( + IN const ib_smp_t* const p_smp ) +{ + return( (p_smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* RETURN VALUES +* Returns TRUE if the SMP 'D' (direction) bit is set. +* +* NOTES +* +* SEE ALSO +* ib_smp_t +*********/ + +/****f* IBA Base: Types/ib_smp_init_new +* NAME +* ib_smp_init_new +* +* DESCRIPTION +* Initializes a MAD common header. +* +* TODO +* This is too big for inlining, but leave it here for now +* since there is not yet another convient spot. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_smp_init_new( + IN ib_smp_t* const p_smp, + IN const uint8_t method, + IN const ib_net64_t trans_id, + IN const ib_net16_t attr_id, + IN const ib_net32_t attr_mod, + IN const uint8_t hop_count, + IN const ib_net64_t m_key, + IN const uint8_t* path_out, + IN const ib_net16_t dr_slid, + IN const ib_net16_t dr_dlid ) +{ + CL_ASSERT( p_smp ); + CL_ASSERT( hop_count < IB_SUBNET_PATH_HOPS_MAX ); + p_smp->base_ver = 1; + p_smp->mgmt_class = IB_MCLASS_SUBN_DIR; + p_smp->class_ver = 1; + p_smp->method = method; + p_smp->status = 0; + p_smp->hop_ptr = 0; + p_smp->hop_count = hop_count; + p_smp->trans_id = trans_id; + p_smp->attr_id = attr_id; + p_smp->resv = 0; + p_smp->attr_mod = attr_mod; + p_smp->m_key = m_key; + p_smp->dr_slid = dr_slid; + p_smp->dr_dlid = dr_dlid; + + cl_memclr( p_smp->resv1, + sizeof(p_smp->resv1) + + sizeof(p_smp->data) + + sizeof(p_smp->initial_path) + + sizeof(p_smp->return_path) ); + + /* copy the path */ + cl_memcpy( &p_smp->initial_path, path_out, + sizeof( p_smp->initial_path ) ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* method +* [in] Method to perform, including 'R' bit. +* +* trans_Id +* [in] Transaction ID. +* +* attr_id +* [in] Attribute ID. +* +* attr_mod +* [in] Attribute modifier. +* +* hop_count +* [in] Number of hops in the path. +* +* m_key +* [in] Management key for this SMP. +* +* path_out +* [in] Port array for outbound path. +* +* +* RETURN VALUES +* None. +* +* NOTES +* Payload area is initialized to zero. +* +* +* SEE ALSO +* ib_mad_t +*********/ + +/****f* IBA Base: Types/ib_smp_get_payload_ptr +* NAME +* ib_smp_get_payload_ptr +* +* DESCRIPTION +* Gets a pointer to the SMP payload area. +* +* SYNOPSIS +*/ +AL_INLINE void* AL_API +ib_smp_get_payload_ptr( + IN const ib_smp_t* const p_smp ) +{ + return( (void*)p_smp->data ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* RETURN VALUES +* Pointer to SMP payload area. +* +* NOTES +* +* SEE ALSO +* ib_mad_t +*********/ + +/****s* IBA Base: Types/ib_node_info_t +* NAME +* ib_node_info_t +* +* DESCRIPTION +* IBA defined NodeInfo. (14.2.5.3) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_node_info +{ + uint8_t base_version; + uint8_t class_version; + uint8_t node_type; + uint8_t num_ports; + ib_net64_t sys_guid; + ib_net64_t node_guid; + ib_net64_t port_guid; + ib_net16_t partition_cap; + ib_net16_t device_id; + ib_net32_t revision; + ib_net32_t port_num_vendor_id; + +} PACK_SUFFIX ib_node_info_t; +#include +/************/ + +/****s* IBA Base: Types/ib_sa_mad_t +* NAME +* ib_sa_mad_t +* +* DESCRIPTION +* IBA defined SA MAD format. (15.2.1) +* +* SYNOPSIS +*/ +#define IB_SA_DATA_SIZE 200 + +#include +typedef struct _ib_sa_mad +{ + uint8_t base_ver; + uint8_t mgmt_class; + uint8_t class_ver; + uint8_t method; + ib_net16_t status; + ib_net16_t resv; + ib_net64_t trans_id; + ib_net16_t attr_id; + ib_net16_t resv1; + ib_net32_t attr_mod; + + uint8_t rmpp_version; + uint8_t rmpp_type; + uint8_t rmpp_flags; + uint8_t rmpp_status; + + ib_net32_t seg_num; + ib_net32_t paylen_newwin; + + ib_net64_t sm_key; + + ib_net16_t attr_offset; + ib_net16_t resv3; + + ib_net64_t comp_mask; + + uint8_t data[IB_SA_DATA_SIZE]; +} PACK_SUFFIX ib_sa_mad_t; +#include +/**********/ +#define IB_SA_MAD_HDR_SIZE (sizeof(ib_sa_mad_t) - IB_SA_DATA_SIZE) + + + +AL_INLINE uint32_t AL_API +ib_get_attr_size( + IN const ib_net16_t attr_offset ) +{ + return( ((uint32_t)cl_ntoh16( attr_offset )) << 3 ); +} + +AL_INLINE ib_net16_t AL_API +ib_get_attr_offset( + IN const uint32_t attr_size ) +{ + if( attr_size & 0x07 ) + return( cl_hton16( (uint16_t)(attr_size >> 3) + 1 ) ); + else + return( cl_hton16( (uint16_t)(attr_size >> 3) ) ); +} + +/****f* IBA Base: Types/ib_sa_mad_get_payload_ptr +* NAME +* ib_sa_mad_get_payload_ptr +* +* DESCRIPTION +* Gets a pointer to the SA MAD's payload area. +* +* SYNOPSIS +*/ +AL_INLINE void* AL_API +ib_sa_mad_get_payload_ptr( + IN const ib_sa_mad_t* const p_sa_mad ) +{ + return( (void*)p_sa_mad->data ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SA MAD packet. +* +* RETURN VALUES +* Pointer to SA MAD payload area. +* +* NOTES +* +* SEE ALSO +* ib_mad_t +*********/ + +#define IB_NODE_INFO_PORT_NUM_MASK (CL_NTOH32(0xFF000000)) +#define IB_NODE_INFO_VEND_ID_MASK (CL_NTOH32(0x00FFFFFF)) +#if CPU_LE + #define IB_NODE_INFO_PORT_NUM_SHIFT 0 +#else + #define IB_NODE_INFO_PORT_NUM_SHIFT 24 +#endif + +/****f* IBA Base: Types/ib_node_info_get_local_port_num +* NAME +* ib_node_info_get_local_port_num +* +* DESCRIPTION +* Gets a the local port number from the NodeInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_node_info_get_local_port_num( + IN const ib_node_info_t* const p_ni ) +{ + return( (uint8_t)(( p_ni->port_num_vendor_id & + IB_NODE_INFO_PORT_NUM_MASK ) + >> IB_NODE_INFO_PORT_NUM_SHIFT )); +} +/* +* PARAMETERS +* p_ni +* [in] Pointer to a NodeInfo attribute. +* +* RETURN VALUES +* Local port number that returned the attribute. +* +* NOTES +* +* SEE ALSO +* ib_node_info_t +*********/ + +/****f* IBA Base: Types/ib_node_info_get_vendor_id +* NAME +* ib_node_info_get_vendor_id +* +* DESCRIPTION +* Gets the VendorID from the NodeInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE ib_net32_t AL_API +ib_node_info_get_vendor_id( + IN const ib_node_info_t* const p_ni ) +{ + return( (ib_net32_t)( p_ni->port_num_vendor_id & + IB_NODE_INFO_VEND_ID_MASK ) ); +} +/* +* PARAMETERS +* p_ni +* [in] Pointer to a NodeInfo attribute. +* +* RETURN VALUES +* VendorID that returned the attribute. +* +* NOTES +* +* SEE ALSO +* ib_node_info_t +*********/ + +#define IB_NODE_DESCRIPTION_SIZE 64 + +#include +typedef struct _ib_node_desc +{ + // Node String is an array of UTF-8 character that + // describes the node in text format + // Note that this string is NOT NULL TERMINATED! + uint8_t description[IB_NODE_DESCRIPTION_SIZE]; + +} PACK_SUFFIX ib_node_desc_t; +#include + +#include +typedef struct _ib_node_record_t +{ + ib_net16_t lid; + ib_net16_t resv; + ib_node_info_t node_info; + ib_node_desc_t node_desc; + uint8_t pad[4]; + +} PACK_SUFFIX ib_node_record_t; +#include + +/****s* IBA Base: Types/ib_port_info_t +* NAME +* ib_port_info_t +* +* DESCRIPTION +* IBA defined PortInfo. (14.2.5.6) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_port_info +{ + ib_net64_t m_key; + ib_net64_t subnet_prefix; + ib_net16_t base_lid; + ib_net16_t master_sm_base_lid; + ib_net32_t capability_mask; + ib_net16_t diag_code; + ib_net16_t m_key_lease_period; + uint8_t local_port_num; + uint8_t link_width_enabled; + uint8_t link_width_supported; + uint8_t link_width_active; + uint8_t state_info1; /* LinkSpeedSupported and PortState */ + uint8_t state_info2; /* PortPhysState and LinkDownDefaultState */ + uint8_t mkey_lmc; + uint8_t link_speed; /* LinkSpeedEnabled and LinkSpeedActive */ + uint8_t mtu_smsl; + uint8_t vl_cap; /* VLCap and InitType */ + uint8_t vl_high_limit; + uint8_t vl_arb_high_cap; + uint8_t vl_arb_low_cap; + uint8_t mtu_cap; + uint8_t vl_stall_life; + uint8_t vl_enforce; + ib_net16_t m_key_violations; + ib_net16_t p_key_violations; + ib_net16_t q_key_violations; + uint8_t guid_cap; + uint8_t subnet_timeout; /* cli_rereg(1b), resrv( +2b), timeout(5b) */ + uint8_t resp_time_value; + uint8_t error_threshold; + +} PACK_SUFFIX ib_port_info_t; +#include +/************/ + +#define IB_PORT_STATE_MASK 0x0F +#define IB_PORT_LMC_MASK 0x07 +#define IB_PORT_LMC_MAX 0x07 +#define IB_PORT_MPB_MASK 0xC0 +#define IB_PORT_MPB_SHIFT 6 +#define IB_PORT_LINK_SPEED_SHIFT 4 +#define IB_PORT_LINK_SPEED_SUPPORTED_MASK 0xF0 +#define IB_PORT_LINK_SPEED_ACTIVE_MASK 0xF0 +#define IB_PORT_LINK_SPEED_ENABLED_MASK 0x0F +#define IB_PORT_PHYS_STATE_MASK 0xF0 +#define IB_PORT_PHYS_STATE_SHIFT 4 +#define IB_PORT_LNKDWNDFTSTATE_MASK 0x0F + +#define IB_PORT_CAP_RESV0 (CL_NTOH32(0x00000001)) +#define IB_PORT_CAP_IS_SM (CL_NTOH32(0x00000002)) +#define IB_PORT_CAP_HAS_NOTICE (CL_NTOH32(0x00000004)) +#define IB_PORT_CAP_HAS_TRAP (CL_NTOH32(0x00000008)) +#define IB_PORT_CAP_HAS_IPD (CL_NTOH32(0x00000010)) +#define IB_PORT_CAP_HAS_AUTO_MIG (CL_NTOH32(0x00000020)) +#define IB_PORT_CAP_HAS_SL_MAP (CL_NTOH32(0x00000040)) +#define IB_PORT_CAP_HAS_NV_MKEY (CL_NTOH32(0x00000080)) +#define IB_PORT_CAP_HAS_NV_PKEY (CL_NTOH32(0x00000100)) +#define IB_PORT_CAP_HAS_LED_INFO (CL_NTOH32(0x00000200)) +#define IB_PORT_CAP_SM_DISAB (CL_NTOH32(0x00000400)) +#define IB_PORT_CAP_HAS_SYS_IMG_GUID (CL_NTOH32(0x00000800)) +#define IB_PORT_CAP_HAS_PKEY_SW_EXT_PORT_TRAP (CL_NTOH32(0x00001000)) +#define IB_PORT_CAP_RESV13 (CL_NTOH32(0x00002000)) +#define IB_PORT_CAP_RESV14 (CL_NTOH32(0x00004000)) +#define IB_PORT_CAP_RESV15 (CL_NTOH32(0x00008000)) +#define IB_PORT_CAP_HAS_COM_MGT (CL_NTOH32(0x00010000)) +#define IB_PORT_CAP_HAS_SNMP (CL_NTOH32(0x00020000)) +#define IB_PORT_CAP_REINIT (CL_NTOH32(0x00040000)) +#define IB_PORT_CAP_HAS_DEV_MGT (CL_NTOH32(0x00080000)) +#define IB_PORT_CAP_HAS_VEND_CLS (CL_NTOH32(0x00100000)) +#define IB_PORT_CAP_HAS_DR_NTC (CL_NTOH32(0x00200000)) +#define IB_PORT_CAP_HAS_CAP_NTC (CL_NTOH32(0x00400000)) +#define IB_PORT_CAP_HAS_BM (CL_NTOH32(0x00800000)) +#define IB_PORT_CAP_HAS_LINK_RT_LATENCY (CL_NTOH32(0x01000000)) +#define IB_PORT_CAP_HAS_CLIENT_REREG (CL_NTOH32(0x02000000)) +#define IB_PORT_CAP_RESV26 (CL_NTOH32(0x04000000)) +#define IB_PORT_CAP_RESV27 (CL_NTOH32(0x08000000)) +#define IB_PORT_CAP_RESV28 (CL_NTOH32(0x10000000)) +#define IB_PORT_CAP_RESV29 (CL_NTOH32(0x20000000)) +#define IB_PORT_CAP_RESV30 (CL_NTOH32(0x40000000)) +#define IB_PORT_CAP_RESV31 (CL_NTOH32(0x80000000)) + +/****f* IBA Base: Types/ib_port_info_get_port_state +* NAME +* ib_port_info_get_port_state +* +* DESCRIPTION +* Returns the port state. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_port_state( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->state_info1 & IB_PORT_STATE_MASK) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Port state. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_port_state +* NAME +* ib_port_info_set_port_state +* +* DESCRIPTION +* Sets the port state. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_port_state( + IN ib_port_info_t* const p_pi, + IN const uint8_t port_state ) +{ + p_pi->state_info1 = (uint8_t)((p_pi->state_info1 & 0xF0) | port_state ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* port_state +* [in] Port state value to set. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_vl_cap +* NAME +* ib_port_info_get_vl_cap +* +* DESCRIPTION +* Gets the VL Capability of a port. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_vl_cap( + IN const ib_port_info_t* const p_pi) +{ + return((p_pi->vl_cap >> 4) & 0x0F); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* VL_CAP field +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_init_type +* NAME +* ib_port_info_get_init_type +* +* DESCRIPTION +* Gets the init type of a port. +* +* SYNOPSIS +*/ +static inline uint8_t +ib_port_info_get_init_type( + IN const ib_port_info_t* const p_pi) +{ + return (uint8_t) (p_pi->vl_cap & 0x0F); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* InitType field +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_op_vls +* NAME +* ib_port_info_get_op_vls +* +* DESCRIPTION +* Gets the operational VLs on a port. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_op_vls( + IN const ib_port_info_t* const p_pi) +{ + return((p_pi->vl_enforce >> 4) & 0x0F); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* OP_VLS field +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_op_vls +* NAME +* ib_port_info_set_op_vls +* +* DESCRIPTION +* Sets the operational VLs on a port. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_op_vls( + IN ib_port_info_t* const p_pi, + IN const uint8_t op_vls ) +{ + p_pi->vl_enforce = (uint8_t)((p_pi->vl_enforce & 0x0F) | (op_vls << 4) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* op_vls +* [in] Encoded operation VLs value. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_state_no_change +* NAME +* ib_port_info_set_state_no_change +* +* DESCRIPTION +* Sets the port state fields to the value for "no change". +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_state_no_change( + IN ib_port_info_t* const p_pi ) +{ + ib_port_info_set_port_state( p_pi, IB_LINK_NO_CHANGE ); + p_pi->state_info2 = 0; +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_link_speed_sup +* NAME +* ib_port_info_get_link_speed_sup +* +* DESCRIPTION +* Returns the encoded value for the link speed supported. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_link_speed_sup( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->state_info1 & + IB_PORT_LINK_SPEED_SUPPORTED_MASK) >> + IB_PORT_LINK_SPEED_SHIFT) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the link speed supported. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_link_speed_sup +* NAME +* ib_port_info_set_link_speed_sup +* +* DESCRIPTION +* Given an integer of the supported link speed supported. +* Set the appropriate bits in state_info1 +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_link_speed_sup( + IN uint8_t const speed, + IN ib_port_info_t* p_pi ) +{ + p_pi->state_info1 = + ( ~IB_PORT_LINK_SPEED_SUPPORTED_MASK & p_pi->state_info1 ) | + ( IB_PORT_LINK_SPEED_SUPPORTED_MASK & + (speed << IB_PORT_LINK_SPEED_SHIFT) ); +} +/* +* PARAMETERS +* speed +* [in] Supported Speeds Code. +* +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_port_phys_state +* NAME +* ib_port_info_get_port_phys_state +* +* DESCRIPTION +* Returns the encoded value for the port physical state. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_port_phys_state( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->state_info2 & + IB_PORT_PHYS_STATE_MASK) >> + IB_PORT_PHYS_STATE_SHIFT) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the port physical state. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_port_phys_state +* NAME +* ib_port_info_set_port_phys_state +* +* DESCRIPTION +* Given an integer of the port physical state, +* Set the appropriate bits in state_info2 +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_port_phys_state( + IN uint8_t const phys_state, + IN ib_port_info_t* p_pi ) +{ + p_pi->state_info2 = + ( ~IB_PORT_PHYS_STATE_MASK & p_pi->state_info2 ) | + ( IB_PORT_PHYS_STATE_MASK & + (phys_state << IB_PORT_PHYS_STATE_SHIFT) ); +} +/* +* PARAMETERS +* phys_state +* [in] port physical state. +* +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_link_down_def_state +* NAME +* ib_port_info_get_link_down_def_state +* +* DESCRIPTION +* Returns the link down default state. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_link_down_def_state( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->state_info2 & IB_PORT_LNKDWNDFTSTATE_MASK) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* link down default state of the port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_link_down_def_state +* NAME +* ib_port_info_set_link_down_def_state +* +* DESCRIPTION +* Sets the link down default state of the port. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_link_down_def_state( + IN ib_port_info_t* const p_pi, + IN const uint8_t link_dwn_state ) +{ + p_pi->state_info2 = (uint8_t)((p_pi->state_info2 & 0xF0) | link_dwn_state ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* link_dwn_state +* [in] Link down default state of the port. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_link_speed_active +* NAME +* ib_port_info_get_link_speed_active +* +* DESCRIPTION +* Returns the Link Speed Active value assigned to this port. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_link_speed_active( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->link_speed & + IB_PORT_LINK_SPEED_ACTIVE_MASK) >> + IB_PORT_LINK_SPEED_SHIFT) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the link speed active value assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +#define IB_LINK_WIDTH_ACTIVE_1X 1 +#define IB_LINK_WIDTH_ACTIVE_4X 2 +#define IB_LINK_WIDTH_ACTIVE_12X 8 +#define IB_LINK_SPEED_ACTIVE_2_5 1 +#define IB_LINK_SPEED_ACTIVE_5 2 +#define IB_LINK_SPEED_ACTIVE_10 4 + +/* following v1 ver1.2 p901 */ +#define IB_PATH_RECORD_RATE_2_5_GBS 2 +#define IB_PATH_RECORD_RATE_10_GBS 3 +#define IB_PATH_RECORD_RATE_30_GBS 4 +#define IB_PATH_RECORD_RATE_5_GBS 5 +#define IB_PATH_RECORD_RATE_20_GBS 6 +#define IB_PATH_RECORD_RATE_40_GBS 7 +#define IB_PATH_RECORD_RATE_60_GBS 8 +#define IB_PATH_RECORD_RATE_80_GBS 9 +#define IB_PATH_RECORD_RATE_120_GBS 10 + +#define IB_MIN_RATE IB_PATH_RECORD_RATE_2_5_GBS +#define IB_MAX_RATE IB_PATH_RECORD_RATE_120_GBS + +/****f* IBA Base: Types/ib_port_info_compute_rate +* NAME +* ib_port_info_compute_rate +* +* DESCRIPTION +* Returns the encoded value for the path rate. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_compute_rate( + IN const ib_port_info_t* const p_pi ) +{ + uint8_t rate = 0; + + switch (ib_port_info_get_link_speed_active(p_pi)) + { + case IB_LINK_SPEED_ACTIVE_2_5: + switch (p_pi->link_width_active) + { + case IB_LINK_WIDTH_ACTIVE_1X: + rate = IB_PATH_RECORD_RATE_2_5_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_4X: + rate = IB_PATH_RECORD_RATE_10_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_12X: + rate = IB_PATH_RECORD_RATE_30_GBS; + break; + + default: + rate = IB_PATH_RECORD_RATE_2_5_GBS; + break; + } + break; + case IB_LINK_SPEED_ACTIVE_5: + switch (p_pi->link_width_active) + { + case IB_LINK_WIDTH_ACTIVE_1X: + rate = IB_PATH_RECORD_RATE_5_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_4X: + rate = IB_PATH_RECORD_RATE_20_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_12X: + rate = IB_PATH_RECORD_RATE_60_GBS; + break; + + default: + rate = IB_PATH_RECORD_RATE_5_GBS; + break; + } + break; + case IB_LINK_SPEED_ACTIVE_10: + switch (p_pi->link_width_active) + { + case IB_LINK_WIDTH_ACTIVE_1X: + rate = IB_PATH_RECORD_RATE_10_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_4X: + rate = IB_PATH_RECORD_RATE_40_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_12X: + rate =IB_PATH_RECORD_RATE_120_GBS; + break; + + default: + rate = IB_PATH_RECORD_RATE_10_GBS; + break; + } + break; + default: + rate = IB_PATH_RECORD_RATE_2_5_GBS; + break; + } + + return rate; +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the link speed supported. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_path_get_ipd +* NAME +* ib_path_get_ipd +* +* DESCRIPTION +* Returns the encoded value for the inter packet delay. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_path_get_ipd( + IN uint8_t local_link_width_supported, + IN uint8_t path_rec_rate ) +{ + uint8_t ipd = 0; + + switch(local_link_width_supported) + { + /* link_width_supported = 1: 1x */ + case 1: + break; + + /* link_width_supported = 3: 1x or 4x */ + case 3: + switch(path_rec_rate & 0x3F) + { + case IB_PATH_RECORD_RATE_2_5_GBS: + ipd = 3; + break; + default: + break; + } + break; + + /* link_width_supported = 11: 1x or 4x or 12x */ + case 11: + switch(path_rec_rate & 0x3F) + { + case IB_PATH_RECORD_RATE_2_5_GBS: + ipd = 11; + break; + case IB_PATH_RECORD_RATE_10_GBS: + ipd = 2; + break; + default: + break; + } + break; + + default: + break; + } + + return ipd; +} +/* +* PARAMETERS +* local_link_width_supported +* [in] link with supported for this port +* +* path_rec_rate +* [in] rate field of the path record +* +* RETURN VALUES +* Returns the ipd +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_mtu_cap +* NAME +* ib_port_info_get_mtu_cap +* +* DESCRIPTION +* Returns the encoded value for the maximum MTU supported by this port. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_mtu_cap( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->mtu_cap & 0x0F) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the LMC value assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_neighbor_mtu +* NAME +* ib_port_info_get_neighbor_mtu +* +* DESCRIPTION +* Returns the encoded value for the neighbor MTU at this port. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_neighbor_mtu( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->mtu_smsl & 0xF0) >> 4) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the neighbor MTU at this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_neighbor_mtu +* NAME +* ib_port_info_set_neighbor_mtu +* +* DESCRIPTION +* Sets the Neighbor MTU value in the PortInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_neighbor_mtu( + IN ib_port_info_t* const p_pi, + IN const uint8_t mtu ) +{ + CL_ASSERT( mtu <= 5 ); + CL_ASSERT( mtu != 0 ); + p_pi->mtu_smsl = (uint8_t)((p_pi->mtu_smsl & 0x0F) | (mtu << 4)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* mtu +* [in] Encoded MTU value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + + +/****f* IBA Base: Types/ib_port_info_get_sm_sl +* NAME +* ib_port_info_get_sm_sl +* +* DESCRIPTION +* Returns the encoded value for the SM sl at this port. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_sm_sl( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->mtu_smsl & 0x0F) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the neighbor MTU at this port. +* +* NOTES +* +* SEE ALSO +*********/ +/****f* IBA Base: Types/ib_port_info_set_sm_sl +* NAME +* ib_port_info_set_sm_sl +* +* DESCRIPTION +* Sets the SM sl value in the PortInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_sm_sl( + IN ib_port_info_t* const p_pi, + IN const uint8_t sm_sl ) +{ + CL_ASSERT( sm_sl<= 5 ); + CL_ASSERT( sm_sl != 0 ); + p_pi->mtu_smsl = (uint8_t)((p_pi->mtu_smsl & 0xF0) | sm_sl ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* mtu +* [in] Encoded SM sl value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_timeout +* NAME +* ib_port_info_set_timeout +* +* DESCRIPTION +* Sets the encoded subnet timeout value in the PortInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_timeout( + IN ib_port_info_t* const p_pi, + IN const uint8_t timeout ) +{ + CL_ASSERT( timeout <= 0x1F ); + p_pi->subnet_timeout = + (uint8_t)( + (p_pi->subnet_timeout & 0x80) | (timeout & 0x1F)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* timeout +* [in] Encoded timeout value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_client_rereg +* NAME +* ib_port_info_set_client_rereg +* +* DESCRIPTION +* Sets the encoded client reregistration bit value in the PortInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_client_rereg( + IN ib_port_info_t* const p_pi, + IN const uint8_t client_rereg ) +{ + CL_ASSERT( client_rereg <= 0x1 ); + p_pi->subnet_timeout = + (uint8_t)( + (p_pi->subnet_timeout & 0x1F) | ((client_rereg << 7) & 0x80)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* client_rereg +* [in] Client reregistration value to set (either 1 or 0). +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_timeout +* NAME +* ib_port_info_get_timeout +* +* DESCRIPTION +* Gets the encoded subnet timeout value in the PortInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_timeout( + IN ib_port_info_t const* p_pi ) +{ + return(p_pi->subnet_timeout & 0x1F ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* The encoded timeout value +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_client_rereg +* NAME +* ib_port_info_get_client_rereg +* +* DESCRIPTION +* Gets the encoded client reregistration bit value in the PortInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_client_rereg( + IN ib_port_info_t const* p_pi ) +{ + return ( (p_pi->subnet_timeout & 0x80 ) >> 7); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Client reregistration value (either 1 or 0). +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_hoq_lifetime +* NAME +* ib_port_info_set_hoq_lifetime +* +* DESCRIPTION +* Sets the Head of Queue Lifetime for which a packet can live in the head +* of VL queue +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_hoq_lifetime( + IN ib_port_info_t* const p_pi, + IN const uint8_t hoq_life ) +{ + p_pi->vl_stall_life = (uint8_t)((hoq_life & 0x1f) | + (p_pi->vl_stall_life & 0xe0)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* hoq_life +* [in] Encoded lifetime value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_hoq_lifetime +* NAME +* ib_port_info_get_hoq_lifetime +* +* DESCRIPTION +* Gets the Head of Queue Lifetime for which a packet can live in the head +* of VL queue +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_hoq_lifetime( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->vl_stall_life & 0x1f) ); +} + +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Encoded lifetime value +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_vl_stall_count +* NAME +* ib_port_info_set_vl_stall_count +* +* DESCRIPTION +* Sets the VL Stall Count which define the number of contiguous +* HLL (hoq) drops that will put the VL into stalled mode. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_vl_stall_count( + IN ib_port_info_t* const p_pi, + IN const uint8_t vl_stall_count ) +{ + p_pi->vl_stall_life = (uint8_t)((p_pi->vl_stall_life & 0x1f) | + ((vl_stall_count << 5) & 0xe0)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* vl_stall_count +* [in] value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_vl_stall_count +* NAME +* ib_port_info_get_vl_stall_count +* +* DESCRIPTION +* Gets the VL Stall Count which define the number of contiguous +* HLL (hoq) drops that will put the VL into stalled mode +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_vl_stall_count( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->vl_stall_life & 0xe0) >> 5); +} + +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* vl stall count +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_lmc +* NAME +* ib_port_info_get_lmc +* +* DESCRIPTION +* Returns the LMC value assigned to this port. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_lmc( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->mkey_lmc & IB_PORT_LMC_MASK) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the LMC value assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_lmc +* NAME +* ib_port_info_set_lmc +* +* DESCRIPTION +* Sets the LMC value in the PortInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_lmc( + IN ib_port_info_t* const p_pi, + IN const uint8_t lmc ) +{ + CL_ASSERT( lmc <= IB_PORT_LMC_MAX ); + p_pi->mkey_lmc = (uint8_t)((p_pi->mkey_lmc & 0xF8) | lmc); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* lmc +* [in] LMC value to set, must be less than 7. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_link_speed_enabled +* NAME +* ib_port_info_get_link_speed_enabled +* +* DESCRIPTION +* Returns the link speed enabled value assigned to this port. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_link_speed_enabled( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->link_speed & IB_PORT_LINK_SPEED_ENABLED_MASK) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Port state. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_link_speed_enabled +* NAME +* ib_port_info_set_link_speed_enabled +* +* DESCRIPTION +* Sets the link speed enabled value in the PortInfo attribute. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_link_speed_enabled( + IN ib_port_info_t* const p_pi, + IN const uint8_t link_speed_enabled ) +{ + p_pi->link_speed = (uint8_t)((p_pi->link_speed & 0xF0) | link_speed_enabled ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* link_speed_enabled +* [in] link speed enabled value to set. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_mpb +* NAME +* ib_port_info_get_mpb +* +* DESCRIPTION +* Returns the M_Key protect bits assigned to this port. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_mpb( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->mkey_lmc & IB_PORT_MPB_MASK) >> + IB_PORT_MPB_SHIFT) ); +} +/* +* PARAMETERS +* p_ni +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the M_Key protect bits assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_mpb +* NAME +* ib_port_info_set_mpb +* +* DESCRIPTION +* Set the M_Key protect bits of this port. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_mpb( + IN ib_port_info_t* p_pi, + IN uint8_t mpb ) +{ + p_pi->mkey_lmc = + (~IB_PORT_MPB_MASK & p_pi->mkey_lmc) | + ( IB_PORT_MPB_MASK & (mpb << IB_PORT_MPB_SHIFT) ); +} +/* +* PARAMETERS +* mpb +* [in] M_Key protect bits +* p_ni +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_local_phy_err_thd +* NAME +* ib_port_info_get_local_phy_err_thd +* +* DESCRIPTION +* Returns the Phy Link Threshold +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_local_phy_err_thd( + IN const ib_port_info_t* const p_pi ) +{ + return (uint8_t)( (p_pi->error_threshold & 0xF0) >> 4); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the Phy Link error threshold assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_overrun_err_thd +* NAME +* ib_port_info_get_local_overrun_err_thd +* +* DESCRIPTION +* Returns the Credits Overrun Errors Threshold +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_port_info_get_overrun_err_thd( + IN const ib_port_info_t* const p_pi ) +{ + return (uint8_t)(p_pi->error_threshold & 0x0F); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the Credits Overrun errors threshold assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_phy_and_overrun_err_thd +* NAME +* ib_port_info_set_phy_and_overrun_err_thd +* +* DESCRIPTION +* Sets the Phy Link and Credits Overrun Errors Threshold +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_port_info_set_phy_and_overrun_err_thd( + IN ib_port_info_t* const p_pi, + IN uint8_t phy_threshold, + IN uint8_t overrun_threshold ) +{ + p_pi->error_threshold = + (uint8_t)( ((phy_threshold & 0x0F) << 4) | (overrun_threshold & 0x0F) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* phy_threshold +* [in] Physical Link Errors Threshold above which Trap 129 is generated +* +* overrun_threshold +* [in] Credits overrun Errors Threshold above which Trap 129 is generated +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +typedef uint8_t ib_svc_name_t[64]; + +#include +typedef struct _ib_service_record +{ + ib_net64_t service_id; + ib_gid_t service_gid; + ib_net16_t service_pkey; + ib_net16_t resv; + ib_net32_t service_lease; + uint8_t service_key[16]; + ib_svc_name_t service_name; + uint8_t service_data8[16]; + ib_net16_t service_data16[8]; + ib_net32_t service_data32[4]; + ib_net64_t service_data64[2]; + +} PACK_SUFFIX ib_service_record_t; +#include + +#include +typedef struct _ib_portinfo_record +{ + ib_net16_t lid; + uint8_t port_num; + uint8_t resv; + ib_port_info_t port_info; + uint8_t pad[6]; + +} PACK_SUFFIX ib_portinfo_record_t; +#include + +#include +typedef struct _ib_link_record +{ + ib_net16_t from_lid; + uint8_t from_port_num; + uint8_t to_port_num; + ib_net16_t to_lid; + uint8_t pad[2]; + +} PACK_SUFFIX ib_link_record_t; +#include + +#include +typedef struct _ib_sminfo_record +{ + ib_net16_t lid; + uint16_t resv0; + ib_sm_info_t sm_info; + uint8_t pad[7]; + +} PACK_SUFFIX ib_sminfo_record_t; +#include + +/****s* IBA Base: Types/ib_lft_record_t +* NAME +* ib_lft_record_t +* +* DESCRIPTION +* IBA defined LinearForwardingTableRecord (15.2.5.6) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_lft_record +{ + ib_net16_t lid; + ib_net16_t block_num; + uint32_t resv0; + uint8_t lft[64]; +} PACK_SUFFIX ib_lft_record_t; +#include +/************/ + +/****s* IBA Base: Types/ib_mft_record_t +* NAME +* ib_mft_record_t +* +* DESCRIPTION +* IBA defined MulticastForwardingTableRecord (15.2.5.8) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_mft_record +{ + ib_net16_t lid; + ib_net16_t position_block_num; + uint32_t resv0; + ib_net16_t mft[IB_MCAST_BLOCK_SIZE]; +} PACK_SUFFIX ib_mft_record_t; +#include +/************/ + +/****s* IBA Base: Types/ib_switch_info_t +* NAME +* ib_switch_info_t +* +* DESCRIPTION +* IBA defined SwitchInfo. (14.2.5.4) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_switch_info +{ + ib_net16_t lin_cap; + ib_net16_t rand_cap; + ib_net16_t mcast_cap; + ib_net16_t lin_top; + uint8_t def_port; + uint8_t def_mcast_pri_port; + uint8_t def_mcast_not_port; + uint8_t life_state; + ib_net16_t lids_per_port; + ib_net16_t enforce_cap; + uint8_t flags; + +} PACK_SUFFIX ib_switch_info_t; +#include +/************/ + +#include +typedef struct _ib_switch_info_record +{ + ib_net16_t lid; + uint16_t resv0; + ib_switch_info_t switch_info; + uint8_t pad[3]; + +} PACK_SUFFIX ib_switch_info_record_t; +#include + +#define IB_SWITCH_PSC 0x04 + +/****f* IBA Base: Types/ib_switch_info_get_state_change +* NAME +* ib_switch_info_get_state_change +* +* DESCRIPTION +* Returns the value of the state change flag. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_switch_info_get_state_change( + IN const ib_switch_info_t* const p_si ) +{ + return( (p_si->life_state & IB_SWITCH_PSC) == IB_SWITCH_PSC ); +} +/* +* PARAMETERS +* p_si +* [in] Pointer to a SwitchInfo attribute. +* +* RETURN VALUES +* Returns the value of the state change flag. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_switch_info_clear_state_change +* NAME +* ib_switch_info_clear_state_change +* +* DESCRIPTION +* Clears the switch's state change bit. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_switch_info_clear_state_change( + IN ib_switch_info_t* const p_si ) +{ + p_si->life_state = (uint8_t)(p_si->life_state & 0xFB); +} +/* +* PARAMETERS +* p_ni +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the LMC value assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_switch_info_is_enhanced_port0 +* NAME +* ib_switch_info_is_enhanced_port0 +* +* DESCRIPTION +* Returns TRUE if the enhancedPort0 bit is on (meaning the switch +* port zero supports enhanced functions). +* Returns FALSE otherwise. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_switch_info_is_enhanced_port0( + IN const ib_switch_info_t* const p_si ) +{ + return( (p_si->flags & 0x08) == 0x08 ); +} +/* +* PARAMETERS +* p_si +* [in] Pointer to a SwitchInfo attribute. +* +* RETURN VALUES +* Returns TRUE if the switch supports enhanced port 0. FALSE otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* IBA Base: Types/ib_guid_info_t +* NAME +* ib_guid_info_t +* +* DESCRIPTION +* IBA defined GuidInfo. (14.2.5.5) +* +* SYNOPSIS +*/ +#define GUID_TABLE_MAX_ENTRIES 8 + +#include +typedef struct _ib_guid_info +{ + ib_net64_t guid[GUID_TABLE_MAX_ENTRIES]; + +} PACK_SUFFIX ib_guid_info_t; +#include +/************/ + +#include +typedef struct _ib_guidinfo_record +{ + ib_net16_t lid; + uint8_t block_num; + uint8_t resv; + uint32_t reserved; + ib_guid_info_t guid_info; +} PACK_SUFFIX ib_guidinfo_record_t; +#include + +#define IB_MULTIPATH_MAX_GIDS 11 /* Support max that can fit into first MAD (for now) */ + +#include +typedef struct _ib_multipath_rec_t +{ + ib_net32_t hop_flow_raw; + uint8_t tclass; + uint8_t num_path; + ib_net16_t pkey; + uint8_t resv0; + uint8_t sl; + uint8_t mtu; + uint8_t rate; + uint8_t pkt_life; + uint8_t resv1; + uint8_t independence; /* formerly resv2 */ + uint8_t sgid_count; + uint8_t dgid_count; + uint8_t resv3[7]; + ib_gid_t gids[IB_MULTIPATH_MAX_GIDS]; +} PACK_SUFFIX ib_multipath_rec_t; +#include +/* +* FIELDS +* hop_flow_raw +* Global routing parameters: hop count, flow label and raw bit. +* +* tclass +* Another global routing parameter. +* +* num_path +* Reversible path - 1 bit to say if path is reversible. +* num_path [6:0] In queries, maximum number of paths to return. +* In responses, undefined. +* +* pkey +* Partition key (P_Key) to use on this path. +* +* sl +* Service level to use on this path. +* +* mtu +* MTU and MTU selector fields to use on this path +* rate +* Rate and rate selector fields to use on this path. +* +* pkt_life +* Packet lifetime +* +* preference +* Indicates the relative merit of this path versus other path +* records returned from the SA. Lower numbers are better. +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_num_path +* NAME +* ib_multipath_rec_num_path +* +* DESCRIPTION +* Get max number of paths to return. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_multipath_rec_num_path( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( p_rec->num_path &0x7F ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Maximum number of paths to return for each unique SGID_DGID combination. +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_sl +* NAME +* ib_multipath_rec_sl +* +* DESCRIPTION +* Get multipath service level. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_multipath_rec_sl( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)((cl_ntoh16( p_rec->sl )) & 0xF) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* SL. +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_mtu +* NAME +* ib_multipath_rec_mtu +* +* DESCRIPTION +* Get encoded path MTU. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_multipath_rec_mtu( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->mtu & IB_MULTIPATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded path MTU. +* 1: 256 +* 2: 512 +* 3: 1024 +* 4: 2048 +* 5: 4096 +* others: reserved +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_mtu_sel +* NAME +* ib_multipath_rec_mtu_sel +* +* DESCRIPTION +* Get encoded multipath MTU selector. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_multipath_rec_mtu_sel( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->mtu & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded path MTU selector value (for queries). +* 0: greater than MTU specified +* 1: less than MTU specified +* 2: exactly the MTU specified +* 3: largest MTU available +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_rate +* NAME +* ib_multipath_rec_rate +* +* DESCRIPTION +* Get encoded multipath rate. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_multipath_rec_rate( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->rate & IB_MULTIPATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded multipath rate. +* 2: 2.5 Gb/sec. +* 3: 10 Gb/sec. +* 4: 30 Gb/sec. +* others: reserved +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_rate_sel +* NAME +* ib_multipath_rec_rate_sel +* +* DESCRIPTION +* Get encoded multipath rate selector. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_multipath_rec_rate_sel( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->rate & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded path rate selector value (for queries). +* 0: greater than rate specified +* 1: less than rate specified +* 2: exactly the rate specified +* 3: largest rate available +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_pkt_life +* NAME +* ib_multipath_rec_pkt_life +* +* DESCRIPTION +* Get encoded multipath pkt_life. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_multipath_rec_pkt_life( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->pkt_life & IB_MULTIPATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded multipath pkt_life = 4.096 µsec * 2 ** PacketLifeTime. +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_pkt_life_sel +* NAME +* ib_multipath_rec_pkt_life_sel +* +* DESCRIPTION +* Get encoded multipath pkt_lifetime selector. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_multipath_rec_pkt_life_sel( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->pkt_life & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6 )); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded path pkt_lifetime selector value (for queries). +* 0: greater than rate specified +* 1: less than rate specified +* 2: exactly the rate specified +* 3: smallest packet lifetime available +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +#define IB_NUM_PKEY_ELEMENTS_IN_BLOCK 32 +/****s* IBA Base: Types/ib_pkey_table_t +* NAME +* ib_pkey_table_t +* +* DESCRIPTION +* IBA defined PKey table. (14.2.5.7) +* +* SYNOPSIS +*/ + +#include +typedef struct _ib_pkey_table +{ + ib_net16_t pkey_entry[IB_NUM_PKEY_ELEMENTS_IN_BLOCK]; + +} PACK_SUFFIX ib_pkey_table_t; +#include +/************/ + +/****s* IBA Base: Types/ib_pkey_table_record_t +* NAME +* ib_pkey_table_record_t +* +* DESCRIPTION +* IBA defined P_Key Table Record for SA Query. (15.2.5.11) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_pkey_table_record +{ + ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 + uint16_t block_num; + uint8_t port_num; // for switch: port number, for CA: reserved + uint8_t reserved1; + uint16_t reserved2; + ib_pkey_table_t pkey_tbl; + +} PACK_SUFFIX ib_pkey_table_record_t; +#include +/************/ + +#define IB_DROP_VL 15 +#define IB_MAX_NUM_VLS 16 +/****s* IBA Base: Types/ib_slvl_table_t +* NAME +* ib_slvl_table_t +* +* DESCRIPTION +* IBA defined SL2VL Mapping Table Attribute. (14.2.5.8) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_slvl_table +{ + uint8_t vl_table[IB_MAX_NUM_VLS/2]; + +} PACK_SUFFIX ib_slvl_table_t; +#include +/************/ + +/****s* IBA Base: Types/ib_slvl_table_record_t +* NAME +* ib_slvl_table_record_t +* +* DESCRIPTION +* IBA defined SL to VL Mapping Table Record for SA Query. (15.2.5.4) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_slvl_table_record +{ + ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 + uint8_t in_port_num; // reserved for CAs + uint8_t out_port_num; // reserved for CAs + uint32_t resv; + ib_slvl_table_t slvl_tbl; + +} PACK_SUFFIX ib_slvl_table_record_t; +#include +/************/ + +/****f* IBA Base: Types/ib_slvl_table_set +* NAME +* ib_slvl_table_set +* +* DESCRIPTION +* Set slvl table entry. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_slvl_table_set( + IN ib_slvl_table_t* p_slvl_tbl, + IN const uint8_t sl_index, + IN const uint8_t vl ) + { + uint8_t idx = sl_index/2; + CL_ASSERT(vl <= 15); + CL_ASSERT(sl_index <= 15); + + if (sl_index%2) + { + /* this is an odd sl. Need to update the ls bits */ + p_slvl_tbl->vl_table[idx] = ( p_slvl_tbl->vl_table[idx] & 0xF0 ) | vl ; + } + else + { + /* this is an even sl. Need to update the ms bits */ + p_slvl_tbl->vl_table[idx] = ( vl << 4 ) | ( p_slvl_tbl->vl_table[idx] & 0x0F ); + } +} +/* +* PARAMETERS +* p_slvl_tbl +* [in] pointer to ib_slvl_table_t object. +* +* sl_index +* [in] the sl index in the table to be updated. +* +* vl +* [in] the vl value to update for that sl. +* +* RETURN VALUES +* None +* +* NOTES +* +* SEE ALSO +* ib_slvl_table_t +*********/ + +/****f* IBA Base: Types/ib_slvl_table_get +* NAME +* ib_slvl_table_get +* +* DESCRIPTION +* Get slvl table entry. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_slvl_table_get( +IN const ib_slvl_table_t* p_slvl_tbl, +IN const uint8_t sl_index ) +{ + uint8_t idx = sl_index/2; + CL_ASSERT(sl_index <= 15); + + if (sl_index%2) + { + /* this is an odd sl. Need to return the ls bits. */ + return ( p_slvl_tbl->vl_table[idx] & 0x0F ); + } + else + { + /* this is an even sl. Need to return the ms bits. */ + return ( (p_slvl_tbl->vl_table[idx] & 0xF0) >> 4 ); + } +} +/* +* PARAMETERS +* p_slvl_tbl +* [in] pointer to ib_slvl_table_t object. +* +* sl_index +* [in] the sl index in the table whose value should be returned. +* +* RETURN VALUES +* vl for the requested sl_index. +* +* NOTES +* +* SEE ALSO +* ib_slvl_table_t +*********/ + +/****s* IBA Base: Types/ib_vl_arb_element_t +* NAME +* ib_vl_arb_element_t +* +* DESCRIPTION +* IBA defined VL Arbitration Table Element. (14.2.5.9) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_vl_arb_element +{ + uint8_t res_vl; + uint8_t weight; +} PACK_SUFFIX ib_vl_arb_element_t; +#include +/************/ + +/****f* IBA Base: Types/ib_vl_arb_element_get_vl +* NAME +* ib_vl_arb_element_get_vl +* +* DESCRIPTION +* Retrieves the VL from a VL arbitration table element. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_vl_arb_element_get_vl( + IN const ib_vl_arb_element_t vl_arb_element ) +{ + return (vl_arb_element.res_vl >> 4); +} +/* +* PARAMETERS +* vl_arb_element +* [in] VL arbitration table element from which to return the VL. +* +* RETURN VALUES +* Returns the VL value for the specified VL arbitration table element. +* +* SEE ALSO +* vl_arb_element, ib_vl_arb_element_set_vl +*********/ + +/****f* IBA Base: Types/ib_vl_arb_element_set_vl +* NAME +* ib_vl_arb_element_set_vl +* +* DESCRIPTION +* Retrieves the VL from a VL arbitration table element. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_vl_arb_element_set_vl( + IN OUT ib_vl_arb_element_t* const p_vl_arb_element, + IN const uint8_t vl ) +{ + p_vl_arb_element->res_vl = vl << 4; +} +/* +* PARAMETERS +* vl_arb_element +* [in/out] VL arbitration table element in which to store the VL. +* +* vl +* [in] VL to store in the specified element. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* vl_arb_element, ib_vl_arb_element_get_vl +*********/ + +#define IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK 32 + +/****s* IBA Base: Types/ib_vl_arb_table_t +* NAME +* ib_vl_arb_table_t +* +* DESCRIPTION +* IBA defined VL Arbitration Table. (14.2.5.9) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_vl_arb_table +{ + ib_vl_arb_element_t vl_entry[IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK]; +} PACK_SUFFIX ib_vl_arb_table_t; +#include +/************/ + +/****s* IBA Base: Types/ib_vl_arb_table_record_t +* NAME +* ib_vl_arb_table_record_t +* +* DESCRIPTION +* IBA defined VL Arbitration Table Record for SA Query. (15.2.5.9) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_vl_arb_table_record +{ + ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 + uint8_t port_num; + uint8_t block_num; + uint32_t reserved; + ib_vl_arb_table_t vl_arb_tbl; +} PACK_SUFFIX ib_vl_arb_table_record_t; +#include +/************/ + +/****s* IBA Base: Types/ib_grh_t +* NAME +* ib_grh_t +* +* DESCRIPTION +* Global route header information received with unreliable datagram messages +* +* SYNOPSIS +*/ +#include +typedef struct _ib_grh +{ + ib_net32_t ver_class_flow; + ib_net16_t resv1; + uint8_t resv2; + uint8_t hop_limit; + ib_gid_t src_gid; + ib_gid_t dest_gid; +} PACK_SUFFIX ib_grh_t; +#include +/************/ + +/****f* IBA Base: Types/ib_grh_get_ver_class_flow +* NAME +* ib_grh_get_ver_class_flow +* +* DESCRIPTION +* Get encoded version, traffic class and flow label in grh +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_grh_get_ver_class_flow( + IN const ib_net32_t ver_class_flow, + OUT uint8_t* const p_ver OPTIONAL, + OUT uint8_t* const p_tclass OPTIONAL, + OUT net32_t* const p_flow_lbl OPTIONAL ) +{ + ib_net32_t tmp_ver_class_flow; + + tmp_ver_class_flow = cl_ntoh32( ver_class_flow ); + + if (p_ver) + *p_ver = (uint8_t)(tmp_ver_class_flow >> 28); + + if (p_tclass) + *p_tclass = (uint8_t)(tmp_ver_class_flow >> 20); + + if (p_flow_lbl) + *p_flow_lbl = (ver_class_flow & CL_HTON32( 0x000FFFFF )); +} +/* +* PARAMETERS +* ver_class_flow +* [in] the version, traffic class and flow label info. +* +* RETURN VALUES +* p_ver +* [out] pointer to the version info. +* +* p_tclass +* [out] pointer to the traffic class info. +* +* p_flow_lbl +* [out] pointer to the flow label info +* +* NOTES +* +* SEE ALSO +* ib_grh_t +*********/ + +/****f* IBA Base: Types/ib_grh_set_ver_class_flow +* NAME +* ib_grh_set_ver_class_flow +* +* DESCRIPTION +* Set encoded version, traffic class and flow label in grh +* +* SYNOPSIS +*/ +AL_INLINE ib_net32_t AL_API +ib_grh_set_ver_class_flow( + IN const uint8_t ver, + IN const uint8_t tclass, + IN const net32_t flow_lbl ) +{ + ib_net32_t ver_class_flow; + + ver_class_flow = cl_hton32( (ver << 28) | (tclass << 20) ); + ver_class_flow |= (flow_lbl & CL_HTON32( 0x000FFFFF )); + return (ver_class_flow); +} +/* +* PARAMETERS +* ver +* [in] the version info. +* +* tclass +* [in] the traffic class info. +* +* flow_lbl +* [in] the flow label info +* +* RETURN VALUES +* ver_class_flow +* [out] the version, traffic class and flow label info. +* +* NOTES +* +* SEE ALSO +* ib_grh_t +*********/ + +/****s* IBA Base: Types/ib_member_rec_t +* NAME +* ib_member_rec_t +* +* DESCRIPTION +* Multicast member record, used to create, join, and leave multicast +* groups. +* +* SYNOPSIS +*/ +#include +typedef struct _ib_member_rec +{ + ib_gid_t mgid; + ib_gid_t port_gid; + ib_net32_t qkey; + ib_net16_t mlid; + uint8_t mtu; + uint8_t tclass; + ib_net16_t pkey; + uint8_t rate; + uint8_t pkt_life; + ib_net32_t sl_flow_hop; + uint8_t scope_state; + uint8_t proxy_join; + uint8_t reserved[2]; + uint8_t pad[4]; + +} PACK_SUFFIX ib_member_rec_t; +#include +/* +* FIELDS +* mgid +* Multicast GID address for this multicast group. +* +* port_gid +* Valid GID of the endpoint joining this multicast group. +* +* requestor_gid +* GID of the endpoint making this request on hehave of port_gid. +* +* qkey +* Q_Key to be used by this multicast group. +* +* mlid +* Multicast LID for this multicast group. +* +* mtu +* MTU and MTU selector fields to use on this path +* +* tclass +* Another global routing parameter. +* +* pkey +* Partition key (P_Key) to use for this member. +* +* rate +* Rate and rate selector fields to use on this path. +* +* pkt_life +* Packet lifetime +* +* sl_flow_hop +* Global routing parameters: service level, hop count, and flow label. +* +* scope_state +* MGID scope and JoinState of multicast request. +* +* proxy_join +* Enables others in the Partition to proxy add/remove from the group +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_member_get_sl_flow_hop +* NAME +* ib_member_get_sl_flow_hop +* +* DESCRIPTION +* Get encoded sl, flow label, and hop limit +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_member_get_sl_flow_hop( + IN const ib_net32_t sl_flow_hop, + OUT uint8_t* const p_sl OPTIONAL, + OUT net32_t* const p_flow_lbl OPTIONAL, + OUT uint8_t* const p_hop OPTIONAL ) +{ + ib_net32_t tmp_sl_flow_hop; + + if (p_sl) + *p_sl = (uint8_t)(sl_flow_hop & 0x0f); + + tmp_sl_flow_hop = sl_flow_hop >> 4; + + if (p_flow_lbl) + *p_flow_lbl = (uint32_t)(tmp_sl_flow_hop & 0xffffff); + + tmp_sl_flow_hop = tmp_sl_flow_hop >> 20; + + if (p_hop) + *p_hop = (uint8_t)(tmp_sl_flow_hop & 0xff); +} +/* +* PARAMETERS +* sl_flow_hop +* [in] the sl, flow label, and hop limit of MC Group +* +* RETURN VALUES +* p_sl +* [out] pointer to the service level +* +* p_flow_lbl +* [out] pointer to the flow label info +* +* p_hop +* [out] pointer to the hop count limit. +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_set_sl_flow_hop +* NAME +* ib_member_set_sl_flow_hop +* +* DESCRIPTION +* Set encoded sl, flow label, and hop limit +* +* SYNOPSIS +*/ +AL_INLINE ib_net32_t AL_API +ib_member_set_sl_flow_hop( + IN const uint8_t sl, + IN const net32_t flow_lbl, + IN const uint8_t hop_limit ) +{ + ib_net32_t sl_flow_hop; + + sl_flow_hop = sl; + sl_flow_hop <<= 20; + sl_flow_hop |= (cl_ntoh32( flow_lbl ) & 0x000FFFFF); + sl_flow_hop <<= 8; + sl_flow_hop |= hop_limit; + return cl_hton32(sl_flow_hop); +} +/* +* PARAMETERS +* sl +* [in] the service level. +* +* flow_lbl +* [in] the flow label info +* +* hop_limit +* [in] the hop limit. +* +* RETURN VALUES +* sl_flow_hop +* [out] the encoded sl, flow label, and hop limit +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_get_scope +* NAME +* ib_member_get_scope +* +* DESCRIPTION +* Get encoded MGID scope +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_member_get_scope( + IN const uint8_t scope_state ) +{ + return (scope_state >> 4); +} +/* +* PARAMETERS +* scope_state +* [in] the scope and state +* +* RETURN VALUES +* Encoded scope. +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_get_state +* NAME +* ib_member_get_state +* +* DESCRIPTION +* Get encoded MGID JoinState +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_member_get_state( + IN const uint8_t scope_state ) +{ + return (scope_state & 0x0f); +} +/* +* PARAMETERS +* scope_state +* [in] the scope and state +* +* RETURN VALUES +* Encoded JoinState +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_get_scope_state +* NAME +* ib_member_get_scope_state +* +* DESCRIPTION +* Get encoded MGID scope and JoinState +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_member_get_scope_state( + IN const uint8_t scope_state, + OUT uint8_t* const p_scope, + OUT uint8_t* const p_state ) +{ + if (p_scope) + *p_scope = ib_member_get_scope( scope_state ); + + if (p_state) + *p_state = ib_member_get_state( scope_state ); +} +/* +* PARAMETERS +* scope_state +* [in] the scope and state +* +* RETURN VALUES +* p_scope +* [out] pointer to the MGID scope +* +* p_state +* [out] pointer to the join state +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_set_scope +* NAME +* ib_member_set_scope +* +* DESCRIPTION +* Set encoded scope of a MCR. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_member_set_scope( + IN OUT uint8_t* const p_scope_state, + IN const uint8_t scope ) +{ + CL_ASSERT( scope <= 0x0F ); + /* Scope is MS 4-bits. */ + *p_scope_state &= 0xF0; + *p_scope_state |= (scope << 4); +} +/* +* PARAMETERS +* scope_state +* [in/out] Pointer to the MCR scope_state field. +* +* scope +* [in] The desired scope. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_set_state +* NAME +* ib_member_set_state +* +* DESCRIPTION +* Set encoded JoinState of a MCR. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_member_set_state( + IN OUT uint8_t* const p_scope_state, + IN const uint8_t state ) +{ + CL_ASSERT( state <= 0x0F ); + /* State is LS 4-bits. */ + *p_scope_state &= 0x0F; + *p_scope_state |= (state & 0x0F); +} +/* +* PARAMETERS +* scope_state +* [in/out] Pointer to the MCR scope_state field to modify. +* +* state +* [in] the JoinState +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_set_scope_state +* NAME +* ib_member_set_scope_state +* +* DESCRIPTION +* Set encoded version, MGID scope and JoinState +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_member_set_scope_state( + IN const uint8_t scope, + IN const uint8_t state ) +{ + /* Scope is MS 4-bits, state is LS 4-bits */ + return ((scope << 4) | (state & 0xF)); +} +/* +* PARAMETERS +* scope +* [in] the MGID scope +* +* state +* [in] the JoinState +* +* RETURN VALUES +* scope_state +* [out] the encoded one +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_set_join_state +* NAME +* ib_member_set_join_state +* +* DESCRIPTION +* Set JoinState +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_member_set_join_state( + IN OUT ib_member_rec_t *p_mc_rec, + IN const uint8_t state ) +{ + p_mc_rec->scope_state &= 0xF0; + p_mc_rec->scope_state |= (state & 0x0F); +} +/* +* PARAMETERS +* p_mc_rec +* [in] pointer to the member record +* +* state +* [in] the JoinState +* +* RETURN VALUES +* NONE +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/* + * Join State Codes: + */ +#define IB_MC_REC_STATE_FULL_MEMBER 0x01 +#define IB_MC_REC_STATE_NON_MEMBER 0x02 +#define IB_MC_REC_STATE_SEND_ONLY_MEMBER 0x04 + +/* + * Generic MAD notice types + */ +#define IB_NOTICE_TYPE_FATAL 0x00 +#define IB_NOTICE_TYPE_URGENT 0x01 +#define IB_NOTICE_TYPE_SECURITY 0x02 +#define IB_NOTICE_TYPE_SUBN_MGMT 0x03 +#define IB_NOTICE_TYPE_INFO 0x04 +#define IB_NOTICE_TYPE_EMPTY 0x7F + +#include +typedef struct _ib_mad_notice_attr +{ + /* is_generic:1, type:7, producer type or vendor id:24 */ + net32_t combo1; + /* trap number or device id, depending on is_generic. */ + net16_t combo2; + + net16_t issuer_lid; + /* notice toggle:1, notice_count:15 */ + net16_t combo3; + + uint8_t data_details[54]; + ib_gid_t issuer_gid; + +} PACK_SUFFIX ib_mad_notice_attr_t; +#include + +/****f* IBA Base: Types/ib_notice_get_generic +* NAME +* ib_notice_get_generic +* +* DESCRIPTION +* Retrieves whether a notice trap is generic. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_notice_get_generic( + IN const ib_mad_notice_attr_t* const p_notice_attr ) +{ + if( cl_ntoh32( p_notice_attr->combo1 ) & 0x00000001 ) + return TRUE; + return FALSE; +} +/* +* PARAMETERS +* p_notice_attr +* [in] Pointer to the notice attribute structure for which to return +* whether it is generic or not. +* +* RETURN VALUES +* Returns TRUE if the notice is generic. +* +* Returns FALSE if the notice is vendor specific. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_set_generic +*********/ + +/****f* IBA Base: Types/ib_notice_set_generic +* NAME +* ib_notice_set_generic +* +* DESCRIPTION +* Sets whether a notice trap is generic. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_notice_set_generic( + IN OUT ib_mad_notice_attr_t* const p_notice_attr, + IN const boolean_t is_generic ) +{ + uint32_t val; + + val = cl_ntoh32( p_notice_attr->combo1 ); + if( is_generic ) + val |= 0x00000001; + else + val &= 0xFFFFFFFE; + p_notice_attr->combo1 = cl_hton32( val ); +} +/* +* PARAMETERS +* p_notice_attr +* [in/out] Pointer to the notice attribute structure for which to set +* the generic bit. +* +* is_generic +* [in] TRUE if the notice is generic, FALSE if vendor specific. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_get_generic +*********/ + +/****f* IBA Base: Types/ib_notice_get_type +* NAME +* ib_notice_get_type +* +* DESCRIPTION +* Retrieves the type of a notice trap. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_notice_get_type( + IN const ib_mad_notice_attr_t* const p_notice_attr ) +{ + return (uint8_t)((cl_ntoh32( p_notice_attr->combo1 ) >> 1) & 0x0000007F); +} +/* +* PARAMETERS +* p_notice_attr +* [in] Pointer to the notice attribute structure whose type to return. +* +* RETURN VALUES +* Returns the type of the notice. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_set_type +*********/ + +/****f* IBA Base: Types/ib_notice_set_type +* NAME +* ib_notice_set_type +* +* DESCRIPTION +* Sets the type of a notice trap. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_notice_set_type( + IN OUT ib_mad_notice_attr_t* const p_notice_attr, + IN const uint8_t type ) +{ + uint32_t val; + + val = cl_ntoh32( p_notice_attr->combo1 ); + /* Clear the type. */ + val &= 0xFFFFFF01; + /* Set new value. */ + val |= (((uint32_t)(type & 0x7F)) << 1); + p_notice_attr->combo1 = cl_hton32( val ); +} +/* +* PARAMETERS +* p_notice_attr +* [in/out] Pointer to the notice attribute structure whose type to set. +* +* type +* [in] Type of notice trap. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_get_type +*********/ + +/****f* IBA Base: Types/ib_notice_get_prod_type +* NAME +* ib_notice_get_prod_type +* +* DESCRIPTION +* Retrieves the producer type from a generic notice trap. +* +* SYNOPSIS +*/ +AL_INLINE uint32_t AL_API +ib_notice_get_prod_type( + IN const ib_mad_notice_attr_t* const p_notice_attr ) +{ + return (cl_ntoh32( p_notice_attr->combo1 ) >> 8); +} +/* +* PARAMETERS +* p_notice_attr +* [in] Pointer to the notice attribute structure whose +* prducer type to return. +* +* RETURN VALUES +* Returns the producer type of the notice, in host byte order. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_set_prod_type +*********/ + +/****f* IBA Base: Types/ib_notice_set_prod_type +* NAME +* ib_notice_set_prod_type +* +* DESCRIPTION +* Sets the producer type of a generic notice trap. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_notice_set_prod_type( + IN OUT ib_mad_notice_attr_t* const p_notice_attr, + IN const uint32_t prod_type ) +{ + uint32_t val; + + val = cl_ntoh32( p_notice_attr->combo1 ); + /* Clear the type. */ + val &= 0x000000FF; + /* Set new value. */ + val |= (prod_type << 8); + p_notice_attr->combo1 = cl_hton32( val ); +} +/* +* PARAMETERS +* p_notice_attr +* [in/out] Pointer to the notice attribute structure +* whose producer type to set. +* +* prod_type +* [in] Producer type of notice trap. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_get_prod_type +*********/ + +/****f* IBA Base: Types/ib_notice_get_vend_id +* NAME +* ib_notice_get_vend_id +* +* DESCRIPTION +* Retrieves the vendor ID from a vendor specific notice trap. +* +* SYNOPSIS +*/ +AL_INLINE uint32_t AL_API +ib_notice_get_vend_id( + IN const ib_mad_notice_attr_t* const p_notice_attr ) +{ + return ib_notice_get_prod_type( p_notice_attr ); +} +/* +* PARAMETERS +* p_notice_attr +* [in] Pointer to the notice attribute structure whose +* vendor ID to return. +* +* RETURN VALUES +* Returns the vendor ID of the notice, in host byte order. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_set_vend_id +*********/ + +/****f* IBA Base: Types/ib_notice_set_vend_id +* NAME +* ib_notice_set_vend_id +* +* DESCRIPTION +* Sets the vendor ID of a vendor specific notice trap. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_notice_set_vend_id( + IN OUT ib_mad_notice_attr_t* const p_notice_attr, + IN const uint32_t vend_id ) +{ + ib_notice_set_prod_type( p_notice_attr, vend_id ); +} +/* +* PARAMETERS +* p_notice_attr +* [in/out] Pointer to the notice attribute structure +* whose vendor ID to set. +* +* vend_id +* [in] Vendor ID of notice trap. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_get_vend_id +*********/ + +/****f* IBA Base: Types/ib_notice_get_trap_num +* NAME +* ib_notice_get_trap_num +* +* DESCRIPTION +* Retrieves the trap number from a generic notice trap. +* +* SYNOPSIS +*/ +AL_INLINE uint16_t AL_API +ib_notice_get_trap_num( + IN const ib_mad_notice_attr_t* const p_notice_attr ) +{ + return cl_ntoh16( p_notice_attr->combo2 ); +} +/* +* PARAMETERS +* p_notice_attr +* [in] Pointer to the notice attribute structure whose +* trap number to return. +* +* RETURN VALUES +* Returns the vendor ID of the notice, in host byte order. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_set_trap_num +*********/ + +/****f* IBA Base: Types/ib_notice_set_trap_num +* NAME +* ib_notice_set_trap_num +* +* DESCRIPTION +* Sets the trap number of a generic notice trap. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_notice_set_trap_num( + IN OUT ib_mad_notice_attr_t* const p_notice_attr, + IN const uint16_t trap_num ) +{ + p_notice_attr->combo2 = cl_hton16( trap_num ); +} +/* +* PARAMETERS +* p_notice_attr +* [in/out] Pointer to the notice attribute structure +* whose trap number to set. +* +* trap_num +* [in] Trap number to set. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_get_trap_num +*********/ + +/****f* IBA Base: Types/ib_notice_get_dev_id +* NAME +* ib_notice_get_dev_id +* +* DESCRIPTION +* Retrieves the device ID from a vendor specific notice trap. +* +* SYNOPSIS +*/ +AL_INLINE uint16_t AL_API +ib_notice_get_dev_id( + IN const ib_mad_notice_attr_t* const p_notice_attr ) +{ + return ib_notice_get_trap_num( p_notice_attr ); +} +/* +* PARAMETERS +* p_notice_attr +* [in] Pointer to the notice attribute structure whose +* device ID to return. +* +* RETURN VALUES +* Returns the vendor ID of the notice, in host byte order. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_set_dev_id +*********/ + +/****f* IBA Base: Types/ib_notice_set_dev_id +* NAME +* ib_notice_set_dev_id +* +* DESCRIPTION +* Sets the producer type of a vendor specific notice trap. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_notice_set_dev_id( + IN OUT ib_mad_notice_attr_t* const p_notice_attr, + IN const uint16_t dev_id ) +{ + ib_notice_set_trap_num( p_notice_attr, dev_id ); +} +/* +* PARAMETERS +* p_notice_attr +* [in/out] Pointer to the notice attribute structure +* whose device ID to set. +* +* dev_id +* [in] Device ID of notice trap. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_get_dev_id +*********/ + +/****f* IBA Base: Types/ib_notice_get_toggle +* NAME +* ib_notice_get_toggle +* +* DESCRIPTION +* Retrieves the notice toggle bit from a notice trap. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_notice_get_toggle( + IN const ib_mad_notice_attr_t* const p_notice_attr ) +{ + return (cl_ntoh16( p_notice_attr->combo3 ) & 0x0001); +} +/* +* PARAMETERS +* p_notice_attr +* [in] Pointer to the notice attribute structure whose +* notice toggle bit value to return. +* +* RETURN VALUES +* Returns TRUE if the notice toggle bit of the notice is set. +* +* Returns FALSE otherwise. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_set_toggle +*********/ + +/****f* IBA Base: Types/ib_notice_set_toggle +* NAME +* ib_notice_set_toggle +* +* DESCRIPTION +* Sets the notice toggle bit of a notice trap. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_notice_set_toggle( + IN OUT ib_mad_notice_attr_t* const p_notice_attr, + IN const boolean_t toggle_val ) +{ + uint16_t val; + val = cl_ntoh16( p_notice_attr->combo3 ); + if( toggle_val ) + val |= 0x0001; + else + val &= 0xFFFE; + p_notice_attr->combo3 = cl_hton16( val ); +} +/* +* PARAMETERS +* p_notice_attr +* [in/out] Pointer to the notice attribute structure +* whose notice toggle bit to set or clear. +* +* toggle_val +* [in] Boolean value indicating whether the toggle bit of the notice +* should be set or cleared. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_get_toggle +*********/ + +/****f* IBA Base: Types/ib_notice_get_count +* NAME +* ib_notice_get_count +* +* DESCRIPTION +* Retrieves the notice toggle count from a notice trap. +* +* SYNOPSIS +*/ +AL_INLINE boolean_t AL_API +ib_notice_get_count( + IN const ib_mad_notice_attr_t* const p_notice_attr ) +{ + return ((cl_ntoh16( p_notice_attr->combo3 ) & 0xFFFE) >> 1); +} +/* +* PARAMETERS +* p_notice_attr +* [in] Pointer to the notice attribute structure whose +* notice toggle count to return. +* +* RETURN VALUES +* Returns the notice toggle count of the notice. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_set_count +*********/ + +/****f* IBA Base: Types/ib_notice_set_count +* NAME +* ib_notice_set_count +* +* DESCRIPTION +* Sets the toggle count of a notice trap. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_notice_set_count( + IN OUT ib_mad_notice_attr_t* const p_notice_attr, + IN const uint16_t toggle_cnt ) +{ + uint16_t val; + val = cl_ntoh16( p_notice_attr->combo3 ); + val &= 0x0001; + val |= (toggle_cnt << 1); + p_notice_attr->combo3 = cl_hton16( val ); +} +/* +* PARAMETERS +* p_notice_attr +* [in/out] Pointer to the notice attribute structure +* whose device ID to set. +* +* toggle_cnt +* [in] Toggle count value of the notice. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_mad_notice_attr_t, ib_notice_get_count +*********/ + +#include +typedef struct _ib_inform_info +{ + ib_gid_t gid; + + ib_net16_t lid_range_begin; + ib_net16_t lid_range_end; + // EZ: not v1.1 ib_net16_t pkey; + ib_net16_t reserved; + uint8_t is_generic; + uint8_t subscribe; + // EZ: not v1.1 ib_net16_t class_range; + ib_net16_t trap_type; + + /* trap num or dev_id */ + ib_net16_t combo1; + /* QPN:24, resv:3, resp_time_val:5 */ + ib_net32_t combo2; + /* resv:8, producer type or vendor id:24 */ + ib_net32_t combo3; + +} PACK_SUFFIX ib_inform_info_t; +#include + +/****f* IBA Base: Types/ib_inform_get_trap_num +* NAME +* ib_inform_get_trap_num +* +* DESCRIPTION +* Retrieves the trap number from an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE uint16_t AL_API +ib_inform_get_trap_num( + IN const ib_inform_info_t* const p_inform_info ) +{ + return cl_ntoh16( p_inform_info->combo1 ); +} +/* +* PARAMETERS +* p_inform_info +* [in] Pointer to the inform info structure whose +* trap number to return. +* +* RETURN VALUES +* Returns the trap number of the infrom info, in host byte order. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_set_trap_num +*********/ + +/****f* IBA Base: Types/ib_inform_set_trap_num +* NAME +* ib_inform_set_trap_num +* +* DESCRIPTION +* Sets the trap number of an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_inform_set_trap_num( + IN OUT ib_inform_info_t* const p_inform_info, + IN const uint16_t trap_num ) +{ + p_inform_info->combo1 = cl_hton16( trap_num ); +} +/* +* PARAMETERS +* p_inform_info +* [in/out] Pointer to the inform info structure +* whose trap number to set. +* +* trap_num +* [in] Trap number to set. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_get_trap_num +*********/ + +/****f* IBA Base: Types/ib_inform_get_dev_id +* NAME +* ib_inform_get_dev_id +* +* DESCRIPTION +* Retrieves the device ID from a vendor specific inform trap. +* +* SYNOPSIS +*/ +AL_INLINE uint16_t AL_API +ib_inform_get_dev_id( + IN const ib_inform_info_t* const p_inform_info ) +{ + return ib_inform_get_trap_num( p_inform_info ); +} +/* +* PARAMETERS +* p_inform_info +* [in] Pointer to the inform info structure whose +* device ID to return. +* +* RETURN VALUES +* Returns the vendor ID of the inform info, in host byte order. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_set_dev_id +*********/ + +/****f* IBA Base: Types/ib_inform_set_dev_id +* NAME +* ib_inform_set_dev_id +* +* DESCRIPTION +* Sets the producer type of a vendor specific inform trap. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_inform_set_dev_id( + IN OUT ib_inform_info_t* const p_inform_info, + IN const uint16_t dev_id ) +{ + ib_inform_set_trap_num( p_inform_info, dev_id ); +} +/* +* PARAMETERS +* p_inform_info +* [in/out] Pointer to the inform info structure +* whose device ID to set. +* +* dev_id +* [in] Device ID of inform trap. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_get_dev_id +*********/ + +/****f* IBA Base: Types/ib_inform_get_qpn +* NAME +* ib_inform_get_qpn +* +* DESCRIPTION +* Retrieves the QPN from an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE net32_t AL_API +ib_inform_get_qpn( + IN const ib_inform_info_t* const p_inform_info ) +{ + return (p_inform_info->combo2 & CL_NTOH32( 0x00FFFFFF )); +} +/* +* PARAMETERS +* p_inform_info +* [in] Pointer to the inform info structure whose +* QPN to return. +* +* RETURN VALUES +* Returns the QPN of the infrom info. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_set_qpn +*********/ + +/****f* IBA Base: Types/ib_inform_set_qpn +* NAME +* ib_inform_set_qpn +* +* DESCRIPTION +* Sets the QPN of an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_inform_set_qpn( + IN OUT ib_inform_info_t* const p_inform_info, + IN const net32_t qpn ) +{ + p_inform_info->combo2 &= CL_NTOH32( 0xFF000000 ); + p_inform_info->combo2 |= (qpn & CL_NTOH32( 0x00FFFFFF )); +} +/* +* PARAMETERS +* p_inform_info +* [in/out] Pointer to the inform info structure +* whose QPN to set. +* +* qpn +* [in] QPN of the inform info. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_get_qpn +*********/ + +/****f* IBA Base: Types/ib_inform_get_resp_time_val +* NAME +* ib_inform_get_resp_time_val +* +* DESCRIPTION +* Retrieves the response time value from an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_inform_get_resp_time_val( + IN const ib_inform_info_t* const p_inform_info ) +{ + return (uint8_t)(cl_ntoh32( p_inform_info->combo2 ) >> 27); +} +/* +* PARAMETERS +* p_inform_info +* [in] Pointer to the inform info structure whose +* response time value to return. +* +* RETURN VALUES +* Returns the response time value of the infrom info. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_set_resp_time_val +*********/ + +/****f* IBA Base: Types/ib_inform_set_resp_time_val +* NAME +* ib_inform_set_resp_time_val +* +* DESCRIPTION +* Sets the response time value of an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_inform_set_resp_time_val( + IN OUT ib_inform_info_t* const p_inform_info, + IN const uint8_t resp_time_val ) +{ + uint32_t val; + + val = cl_ntoh32( p_inform_info->combo2 ); + val &= 0x07FFFFFF; + val |= (resp_time_val << 27); + p_inform_info->combo2 = cl_hton32( val ); +} +/* +* PARAMETERS +* p_inform_info +* [in/out] Pointer to the inform info structure +* whose response time value to set. +* +* resp_time_val +* [in] Response time value of the inform info. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_get_resp_time_val +*********/ + +/****f* IBA Base: Types/ib_inform_get_prod_type +* NAME +* ib_inform_get_prod_type +* +* DESCRIPTION +* Retrieves the producer type from an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE uint32_t AL_API +ib_inform_get_prod_type( + IN const ib_inform_info_t* const p_inform_info ) +{ + return (cl_ntoh32( p_inform_info->combo3 ) >> 8); +} +/* +* PARAMETERS +* p_inform_info +* [in] Pointer to the inform info structure whose +* prducer type to return. +* +* RETURN VALUES +* Returns the producer type of the infrom info, in host byte order. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_set_prod_type +*********/ + +/****f* IBA Base: Types/ib_inform_set_prod_type +* NAME +* ib_inform_set_prod_type +* +* DESCRIPTION +* Sets the producer type of an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_inform_set_prod_type( + IN OUT ib_inform_info_t* const p_inform_info, + IN const uint32_t prod_type ) +{ + p_inform_info->combo3 = cl_hton32( prod_type << 8 ); +} +/* +* PARAMETERS +* p_inform_info +* [in/out] Pointer to the inform info structure +* whose producer type to set. +* +* prod_type +* [in] Producer type of inform trap. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_get_prod_type +*********/ + +/****f* IBA Base: Types/ib_inform_get_vend_id +* NAME +* ib_inform_get_vend_id +* +* DESCRIPTION +* Retrieves the vendor ID from an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE uint32_t AL_API +ib_inform_get_vend_id( + IN const ib_inform_info_t* const p_inform_info ) +{ + return ib_inform_get_prod_type( p_inform_info ); +} +/* +* PARAMETERS +* p_inform_info +* [in] Pointer to the inform info structure whose +* vendor ID to return. +* +* RETURN VALUES +* Returns the vendor ID of the infrom info, in host byte order. +* +* SEE ALSO +* ib_inform_info_t, ib_inform_set_vend_id +*********/ + +/****f* IBA Base: Types/ib_inform_set_vend_id +* NAME +* ib_inform_set_vend_id +* +* DESCRIPTION +* Sets the vendor ID of an inform info structure. +* +* SYNOPSIS +*/ +AL_INLINE void AL_API +ib_inform_set_vend_id( + IN OUT ib_inform_info_t* const p_inform_info, + IN const uint32_t vend_id ) +{ + ib_inform_set_prod_type( p_inform_info, vend_id ); +} +/* +* PARAMETERS +* p_inform_info +* [in/out] Pointer to the inform info structure +* whose vendor ID to set. +* +* vend_id +* [in] Vendor ID of inform trap. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* ib_mad_inform_info_t, ib_inform_get_vend_id +*********/ + +/****s* IBA Base: Types/ib_inform_info_record_t +* NAME +* ib_inform_info_record_t +* +* DESCRIPTION +* IBA defined InformInfo Record. (15.2.5.12) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_inform_info_record +{ + ib_gid_t subscriber_gid; + net16_t subscriber_enum; + uint16_t reserved[3]; + ib_inform_info_t inform_info; + +} PACK_SUFFIX ib_inform_info_record_t; +#include +/********/ +/****d* IBA Base: Types/DM_SVC_NAME +* NAME +* DM_SVC_NAME +* +* DESCRIPTION +* IBA defined Device Management service name (16.3) +* +* SYNOPSIS +*/ +#define DM_SVC_NAME "DeviceManager.IBTA" +/* +* SEE ALSO +*********/ + +/****s* IBA Base: Types/ib_dm_mad_t +* NAME +* ib_dm_mad_t +* +* DESCRIPTION +* IBA defined Device Management MAD (16.3.1) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_dm_mad +{ + ib_mad_t hdr; + uint8_t resv[40]; + +#define IB_DM_DATA_SIZE 192 + uint8_t data[IB_DM_DATA_SIZE]; + +} PACK_SUFFIX ib_dm_mad_t; +#include +/* +* FIELDS +* hdr +* Common MAD header. +* +* resv +* Reserved. +* +* data +* Device Management payload. The structure and content of this field +* depend upon the method, attr_id, and attr_mod fields in the header. +* +* SEE ALSO +* ib_mad_t +*********/ + +/****s* IBA Base: Types/ib_iou_info_t +* NAME +* ib_iou_info_t +* +* DESCRIPTION +* IBA defined IO Unit information structure (16.3.3.3) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_iou_info +{ + ib_net16_t change_id; + uint8_t max_controllers; + uint8_t diag_rom; + +#define IB_DM_CTRL_LIST_SIZE 128 +#define IB_DM_MAX_CTRL 0xFF; + + uint8_t controller_list[IB_DM_CTRL_LIST_SIZE]; +#define IOC_NOT_INSTALLED 0x0 +#define IOC_INSTALLED 0x1 +// Reserved values 0x02-0xE +#define SLOT_DOES_NOT_EXIST 0xF + +} PACK_SUFFIX ib_iou_info_t; +#include +/* +* FIELDS +* change_id +* Value incremented, with rollover, by any change to the controller_list. +* +* max_controllers +* Number of slots in controller_list. +* +* diag_rom +* A byte containing two fields: DiagDeviceID and OptionROM. +* These fields may be read using the ib_iou_info_diag_dev_id +* and ib_iou_info_option_rom functions. +* +* controller_list +* A series of 4-bit nibbles, with each nibble representing a slot +* in the IO Unit. Individual nibbles may be read using the +* ioc_at_slot function. +* +* SEE ALSO +* ib_dm_mad_t, ib_iou_info_diag_dev_id, ib_iou_info_option_rom, ioc_at_slot +*********/ + +/****f* IBA Base: Types/ib_iou_info_diag_dev_id +* NAME +* ib_iou_info_diag_dev_id +* +* DESCRIPTION +* Returns the DiagDeviceID. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_iou_info_diag_dev_id( + IN const ib_iou_info_t* const p_iou_info ) +{ + return( (uint8_t)(p_iou_info->diag_rom >> 6 & 1) ); +} +/* +* PARAMETERS +* p_iou_info +* [in] Pointer to the IO Unit information structure. +* +* RETURN VALUES +* DiagDeviceID field of the IO Unit information. +* +* NOTES +* +* SEE ALSO +* ib_iou_info_t +*********/ + +/****f* IBA Base: Types/ib_iou_info_option_rom +* NAME +* ib_iou_info_option_rom +* +* DESCRIPTION +* Returns the OptionROM. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ib_iou_info_option_rom( + IN const ib_iou_info_t* const p_iou_info ) +{ + return( (uint8_t)(p_iou_info->diag_rom >> 7) ); +} +/* +* PARAMETERS +* p_iou_info +* [in] Pointer to the IO Unit information structure. +* +* RETURN VALUES +* OptionROM field of the IO Unit information. +* +* NOTES +* +* SEE ALSO +* ib_iou_info_t +*********/ + +/****f* IBA Base: Types/ioc_at_slot +* NAME +* ioc_at_slot +* +* DESCRIPTION +* Returns the IOC value at the specified slot. +* +* SYNOPSIS +*/ +AL_INLINE uint8_t AL_API +ioc_at_slot( + IN const ib_iou_info_t* const p_iou_info, + IN uint8_t slot ) +{ + if( !slot ) + return SLOT_DOES_NOT_EXIST; + else if( slot-- & 0x01 ) + return (p_iou_info->controller_list[slot >> 1] >> 4); + else + return (p_iou_info->controller_list[slot >> 1] & 0x0F); +} +/* +* PARAMETERS +* p_iou_info +* [in] Pointer to the IO Unit information structure. +* +* slot +* [in] 1-based slot number of the IOC slot to check. +* +* RETURN VALUES +* Returns the encoded value for the desired slot. Possible values are +* SLOT_DOES_NOT_EXIST, IOC_NOT_INSTALLED, and IOC_INSTALLED. +* +* NOTES +* The input slot number is 1-based, not zero based. +* +* SEE ALSO +* ib_iou_info_t +*********/ + +/****s* IBA Base: Types/ib_ioc_profile_t +* NAME +* ib_ioc_profile_t +* +* DESCRIPTION +* IBA defined IO Controller profile structure (16.3.3.4) +* +* SYNOPSIS +*/ +#include +typedef __declspec(align(8)) struct _ib_ioc_profile +{ + ib_net64_t ioc_guid; + + ib_net32_t vend_id; + + ib_net32_t dev_id; + ib_net16_t dev_ver; + ib_net16_t resv2; + + ib_net32_t subsys_vend_id; + ib_net32_t subsys_id; + + ib_net16_t io_class; + ib_net16_t io_subclass; + ib_net16_t protocol; + ib_net16_t protocol_ver; + + ib_net32_t resv3; + ib_net16_t send_msg_depth; + uint8_t resv4; + uint8_t rdma_read_depth; + ib_net32_t send_msg_size; + ib_net32_t rdma_size; + + uint8_t ctrl_ops_cap; +#define CTRL_OPS_CAP_ST 0x01 +#define CTRL_OPS_CAP_SF 0x02 +#define CTRL_OPS_CAP_RT 0x04 +#define CTRL_OPS_CAP_RF 0x08 +#define CTRL_OPS_CAP_WT 0x10 +#define CTRL_OPS_CAP_WF 0x20 +#define CTRL_OPS_CAP_AT 0x40 +#define CTRL_OPS_CAP_AF 0x80 + + uint8_t resv5; + + uint8_t num_svc_entries; +#define MAX_NUM_SVC_ENTRIES 0xff + + uint8_t resv6[9]; + +#define CTRL_ID_STRING_LEN 64 + char id_string[CTRL_ID_STRING_LEN]; + +} PACK_SUFFIX ib_ioc_profile_t; +#include +/* +* FIELDS +* ioc_guid +* An EUI-64 GUID used to uniquely identify the IO controller. +* +* vend_id +* IO controller vendor ID, IEEE format. +* +* dev_id +* A number assigned by the vendor to identify the type of controller. +* +* dev_ver +* A number assigned by the vendor to identify the divice version. +* +* subsys_vend_id +* ID of the vendor of the enclosure, if any, in which the IO controller +* resides in IEEE format; otherwise zero. +* +* subsys_id +* A number identifying the subsystem where the controller resides. +* +* io_class +* 0x0000 - 0xfffe = reserved for IO classes encompased by InfiniBand +* Architecture. 0xffff = Vendor specific. +* +* io_subclass +* 0x0000 - 0xfffe = reserved for IO subclasses encompased by InfiniBand +* Architecture. 0xffff = Vendor specific. This shall be set to 0xfff +* if the io_class component is 0xffff. +* +* protocol +* 0x0000 - 0xfffe = reserved for IO subclasses encompased by InfiniBand +* Architecture. 0xffff = Vendor specific. This shall be set to 0xfff +* if the io_class component is 0xffff. +* +* protocol_ver +* Protocol specific. +* +* send_msg_depth +* Maximum depth of the send message queue. +* +* rdma_read_depth +* Maximum depth of the per-channel RDMA read queue. +* +* send_msg_size +* Maximum size of send messages. +* +* ctrl_ops_cap +* Supported operation types of this IO controller. A bit set to one +* for affirmation of supported capability. +* +* num_svc_entries +* Number of entries in the service entries table. +* +* id_string +* UTF-8 encoded string for identifying the controller to an operator. +* +* SEE ALSO +* ib_dm_mad_t +*********/ + + +AL_INLINE uint32_t AL_API +ib_ioc_profile_get_vend_id( + IN const ib_ioc_profile_t* const p_ioc_profile ) +{ + return( cl_ntoh32(p_ioc_profile->vend_id) >> 8 ); +} + + +AL_INLINE void AL_API +ib_ioc_profile_set_vend_id( + IN ib_ioc_profile_t* const p_ioc_profile, + IN const uint32_t vend_id ) +{ + p_ioc_profile->vend_id = (cl_hton32(vend_id) << 8); +} + + +AL_INLINE uint32_t AL_API +ib_ioc_profile_get_subsys_vend_id( + IN const ib_ioc_profile_t* const p_ioc_profile ) +{ + return( cl_ntoh32(p_ioc_profile->subsys_vend_id) >> 8 ); +} + + +AL_INLINE void AL_API +ib_ioc_profile_set_subsys_vend_id( + IN ib_ioc_profile_t* const p_ioc_profile, + IN const uint32_t subsys_vend_id ) +{ + p_ioc_profile->subsys_id = (cl_hton32(subsys_vend_id) << 8); +} + +/****s* IBA Base: Types/ib_svc_entry_t +* NAME +* ib_svc_entry_t +* +* DESCRIPTION +* IBA defined IO Controller service entry structure (16.3.3.5) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_svc_entry +{ +#define MAX_SVC_ENTRY_NAME_LEN 40 + char name[MAX_SVC_ENTRY_NAME_LEN]; + + ib_net64_t id; + +} PACK_SUFFIX ib_svc_entry_t; +#include +/* +* FIELDS +* name +* UTF-8 encoded, null-terminated name of the service. +* +* id +* An identifier of the associated Service. +* +* SEE ALSO +* ib_svc_entries_t +*********/ + +/****s* IBA Base: Types/ib_svc_entries_t +* NAME +* ib_svc_entries_t +* +* DESCRIPTION +* IBA defined IO Controller service entry array (16.3.3.5) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_svc_entries +{ +#define SVC_ENTRY_COUNT 4 + ib_svc_entry_t service_entry[SVC_ENTRY_COUNT]; + +} PACK_SUFFIX ib_svc_entries_t; +#include +/* +* FIELDS +* service_entry +* An array of IO controller service entries. +* +* SEE ALSO +* ib_dm_mad_t, ib_svc_entry_t +*********/ + + +/****f* IBA Bases: Types/ib_dm_get_slot_lo_hi +* DESCRIPTION +* Returns the IOC slot number, and the lower and upper bound of the +* service entries given the attribute modifier of ServiceEntries response. +*/ +AL_INLINE void AL_API +ib_dm_get_slot_lo_hi( + IN const ib_net32_t slot_lo_hi, + OUT uint8_t *const p_slot OPTIONAL, + OUT uint8_t *const p_lo OPTIONAL, + OUT uint8_t *const p_hi OPTIONAL ) +{ + ib_net32_t tmp_slot_lo_hi = CL_NTOH32( slot_lo_hi ); + + if( p_slot ) + *p_slot = (uint8_t)( ( tmp_slot_lo_hi >> 16 ) ); + + if( p_hi ) + *p_hi = (uint8_t)( ( tmp_slot_lo_hi >> 8 ) ); + + if( p_lo ) + *p_lo = (uint8_t)( ( tmp_slot_lo_hi >> 0 ) ); +} +/* SEE ALSO +* ib_dm_set_slot_lo_hi +********/ + +/****f* IBA Bases: Types/ib_dm_set_slot_lo_hi +* DESCRIPTION +* Joins the IOC slot number, and the lower and upper bound of the service +* entries and returns it. +*/ +AL_INLINE net32_t AL_API +ib_dm_set_slot_lo_hi( + IN const uint8_t slot, + IN const uint8_t lo, + IN const uint8_t hi ) +{ + uint32_t tmp; + + tmp = slot << 16; + tmp |= hi << 8; + tmp |= lo; + return cl_hton32( tmp ); +} +/* SEE ALSO +* ib_dm_get_slot_lo_hi +********/ + +/* + * Information describing an I/O controller + */ +#pragma warning(disable:4324) +typedef struct _ib_ioc_info +{ + net64_t chassis_guid; + uint8_t chassis_slot; + net64_t iou_guid; + uint8_t iou_slot; + ib_ioc_profile_t profile; + +} ib_ioc_info_t; +#pragma warning(default:4324) + + +/* + * Defines known Communication management class versions + */ +#define IB_MCLASS_CM_VER_2 2 +#define IB_MCLASS_CM_VER_1 1 + +/* + * Defines the size of user available data in communication management MADs + */ +#define IB_REQ_PDATA_SIZE 92 +#define IB_MRA_PDATA_SIZE 222 +#define IB_REJ_PDATA_SIZE 148 +#define IB_REP_PDATA_SIZE 196 +#define IB_RTU_PDATA_SIZE 224 +#define IB_LAP_PDATA_SIZE 168 +#define IB_APR_PDATA_SIZE 148 +#define IB_DREQ_PDATA_SIZE 220 +#define IB_DREP_PDATA_SIZE 224 +#define IB_SIDR_REQ_PDATA_SIZE 216 +#define IB_SIDR_REP_PDATA_SIZE 136 + +#define IB_ARI_SIZE 72 // redefine +#define IB_APR_INFO_SIZE 72 + +/****d* Access Layer/ib_rej_status_t +* NAME +* ib_rej_status_t +* +* DESCRIPTION +* Rejection reasons. +* +* SYNOPSIS +*/ +typedef ib_net16_t ib_rej_status_t; +/* +* SEE ALSO +* ib_cm_rej, ib_cm_rej_rec_t +* +* SOURCE + */ +#define IB_REJ_INSUF_QP CL_HTON16(1) +#define IB_REJ_INSUF_EEC CL_HTON16(2) +#define IB_REJ_INSUF_RESOURCES CL_HTON16(3) +#define IB_REJ_TIMEOUT CL_HTON16(4) +#define IB_REJ_UNSUPPORTED CL_HTON16(5) +#define IB_REJ_INVALID_COMM_ID CL_HTON16(6) +#define IB_REJ_INVALID_COMM_INSTANCE CL_HTON16(7) +#define IB_REJ_INVALID_SID CL_HTON16(8) +#define IB_REJ_INVALID_XPORT CL_HTON16(9) +#define IB_REJ_STALE_CONN CL_HTON16(10) +#define IB_REJ_RDC_NOT_EXIST CL_HTON16(11) +#define IB_REJ_INVALID_GID CL_HTON16(12) +#define IB_REJ_INVALID_LID CL_HTON16(13) +#define IB_REJ_INVALID_SL CL_HTON16(14) +#define IB_REJ_INVALID_TRAFFIC_CLASS CL_HTON16(15) +#define IB_REJ_INVALID_HOP_LIMIT CL_HTON16(16) +#define IB_REJ_INVALID_PKT_RATE CL_HTON16(17) +#define IB_REJ_INVALID_ALT_GID CL_HTON16(18) +#define IB_REJ_INVALID_ALT_LID CL_HTON16(19) +#define IB_REJ_INVALID_ALT_SL CL_HTON16(20) +#define IB_REJ_INVALID_ALT_TRAFFIC_CLASS CL_HTON16(21) +#define IB_REJ_INVALID_ALT_HOP_LIMIT CL_HTON16(22) +#define IB_REJ_INVALID_ALT_PKT_RATE CL_HTON16(23) +#define IB_REJ_PORT_REDIRECT CL_HTON16(24) +#define IB_REJ_INVALID_MTU CL_HTON16(26) +#define IB_REJ_INSUFFICIENT_RESP_RES CL_HTON16(27) +#define IB_REJ_USER_DEFINED CL_HTON16(28) +#define IB_REJ_INVALID_RNR_RETRY CL_HTON16(29) +#define IB_REJ_DUPLICATE_LOCAL_COMM_ID CL_HTON16(30) +#define IB_REJ_INVALID_CLASS_VER CL_HTON16(31) +#define IB_REJ_INVALID_FLOW_LBL CL_HTON16(32) +#define IB_REJ_INVALID_ALT_FLOW_LBL CL_HTON16(33) + +#define IB_REJ_SERVICE_HANDOFF CL_HTON16(65535) +/******/ + +/****d* Access Layer/ib_apr_status_t +* NAME +* ib_apr_status_t +* +* DESCRIPTION +* Automatic path migration status information. +* +* SYNOPSIS +*/ +typedef uint8_t ib_apr_status_t; +/* +* SEE ALSO +* ib_cm_apr, ib_cm_apr_rec_t +* +* SOURCE + */ +#define IB_AP_SUCCESS 0 +#define IB_AP_INVALID_COMM_ID 1 +#define IB_AP_UNSUPPORTED 2 +#define IB_AP_REJECT 3 +#define IB_AP_REDIRECT 4 +#define IB_AP_IS_CURRENT 5 +#define IB_AP_INVALID_QPN 6 +#define IB_AP_INVALID_LID 7 +#define IB_AP_INVALID_GID 8 +#define IB_AP_INVALID_FLOW_LBL 9 +#define IB_AP_INVALID_TCLASS 10 +#define IB_AP_INVALID_HOP_LIMIT 11 +#define IB_AP_INVALID_PKT_RATE 12 +#define IB_AP_INVALID_SL 13 +/******/ + +/****d* Access Layer/ib_cm_cap_mask_t +* NAME +* ib_cm_cap_mask_t +* +* DESCRIPTION +* Capability mask values in ClassPortInfo. +* +* SYNOPSIS +*/ +#define IB_CM_RELIABLE_CONN_CAPABLE CL_HTON16(9) +#define IB_CM_RELIABLE_DGRM_CAPABLE CL_HTON16(10) +#define IB_CM_RDGRM_CAPABLE CL_HTON16(11) +#define IB_CM_UNRELIABLE_CONN_CAPABLE CL_HTON16(12) +#define IB_CM_SIDR_CAPABLE CL_HTON16(13) +/* +* SEE ALSO +* ib_cm_rep, ib_class_port_info_t +* +* SOURCE +* +*******/ + +/* + * Service ID resolution status + */ +typedef uint8_t ib_sidr_status_t; +#define IB_SIDR_SUCCESS 0 +#define IB_SIDR_UNSUPPORTED 1 +#define IB_SIDR_REJECT 2 +#define IB_SIDR_NO_QP 3 +#define IB_SIDR_REDIRECT 4 +#define IB_SIDR_UNSUPPORTED_VER 5 + + +/************/ +/****s* IBA Base: Types/ib_gmp_t +* NAME +* ib_gmp_t +* +* DESCRIPTION +* IBA defined GMP MAD format. (16.1.1) +* +* SYNOPSIS +*/ +#define IB_GMP_DATA_SIZE 200 + +#include +typedef struct _ib_gmp +{ + uint8_t base_ver; + uint8_t mgmt_class; + uint8_t class_ver; + uint8_t method; + ib_net16_t status; + ib_net16_t resv; + ib_net64_t trans_id; + ib_net16_t attr_id; + ib_net16_t resv1; + ib_net32_t attr_mod; + uint8_t resv2[40]; + uint8_t data[IB_GMP_DATA_SIZE]; + +} PACK_SUFFIX ib_gmp_t; +#include +/**********/ +#define IB_GMP_MAD_HDR_SIZE (sizeof(ib_gmp_t) - IB_GMP_DATA_SIZE) + + + +/************/ +/****s* IBA Base: Types/ib_port_counters_t +* NAME +* ib_gmp_t +* +* DESCRIPTION +* IBA defined PortCounters MAD format. (16.1.3.5) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_port_counters +{ + uint8_t reserved0; + uint8_t port_select; + ib_net16_t counter_select; + ib_net16_t symbol_error_counter; + uint8_t link_error_recovery_counter; + uint8_t link_down_counter; + ib_net16_t port_rcv_errors; + ib_net16_t port_rcv_remote_physical_errors; + ib_net16_t port_rcv_switch_relay_errors; + ib_net16_t port_xmit_discard; + uint8_t port_xmit_constraint_errors; + uint8_t port_rcv_constraint_errors; + uint8_t reserved1; + /* uint4_t excessive_buffer_overrun_errors; + uint4_t local_link_integrity_errors; */ + uint8_t lli_errors_exc_buf_errors; + ib_net16_t reserved2; + ib_net16_t vl15_dropped; + ib_net32_t port_xmit_data; + ib_net32_t port_rcv_data; + ib_net32_t port_xmit_pkts; + ib_net32_t port_rcv_pkts; + +} PACK_SUFFIX ib_port_counters_t; +#include +/**********/ + +#define IB_COUNTER_SYMBOL_ERROR CL_NTOH16(1<<0) +#define IB_COUNTER_LINK_RECOVERY_ERROR CL_NTOH16(1<<1) +#define IB_COUNTER_LINK_DOWN CL_NTOH16(1<<2) +#define IB_COUNTER_RCV_ERROR CL_NTOH16(1<<3) +#define IB_COUNTERT_RCV_RMT_PHY_ERROR CL_NTOH16(1<<4) +#define IB_COUNTER_RCV_SWITCH_RELAY_ERROR CL_NTOH16(1<<5) +#define IB_COUNTER_XMIT_DISCARD CL_NTOH16(1<<6) +#define IB_COUNTER_XMIT_CONSTRAIN CL_NTOH16(1<<7) +#define IB_COUNTER_RCV_CONSTRAIN CL_NTOH16(1<<8) +#define IB_COUNTER_LINK_INTEG_ERROR CL_NTOH16(1<<9) +#define IB_COUNTER_EXECE_BUF_ERROR CL_NTOH16(1<<10) +#define IB_COUNTER_VL15_DROP CL_NTOH16(1<<11) +#define IB_COUNTER_XMIT_DATA CL_NTOH16(1<<12) +#define IB_COUNTER_XMIT_PKT CL_NTOH16(1<<13) +#define IB_COUNTER_RCV_DATA CL_NTOH16(1<<14) +#define IB_COUNTER_RCV_PKT CL_NTOH16(1<<15) +#define IB_COUNTER_ALL 0xff + + +/* + * The following definitions are shared between the Access Layer and VPD + */ + + +typedef struct _ib_ca* __ptr64 ib_ca_handle_t; +typedef struct _ib_pd* __ptr64 ib_pd_handle_t; +typedef struct _ib_mr* __ptr64 ib_mr_handle_t; +typedef struct _ib_mw* __ptr64 ib_mw_handle_t; +typedef struct _ib_qp* __ptr64 ib_qp_handle_t; +typedef struct _ib_srq* __ptr64 ib_srq_handle_t; +typedef struct _ib_cq* __ptr64 ib_cq_handle_t; +typedef struct _ib_av* __ptr64 ib_av_handle_t; +typedef struct _ib_mcast* __ptr64 ib_mcast_handle_t; +typedef struct _mlnx_fmr* __ptr64 mlnx_fmr_handle_t; +typedef struct _mlnx_fmr_pool* __ptr64 mlnx_fmr_pool_handle_t; + + +/****d* Access Layer/ib_api_status_t +* NAME +* ib_api_status_t +* +* DESCRIPTION +* Function return codes indicating the success or failure of an API call. +* Note that success is indicated by the return value IB_SUCCESS, which +* is always zero. +* +* NOTES +* IB_VERBS_PROCESSING_DONE is used by UVP library to terminate a verbs call +* in the pre-ioctl step itself. +* +* SYNOPSIS +*/ +typedef enum _ib_api_status_t +{ + IB_SUCCESS, + IB_INSUFFICIENT_RESOURCES, + IB_INSUFFICIENT_MEMORY, + IB_INVALID_PARAMETER, + IB_INVALID_SETTING, + IB_NOT_FOUND, + IB_TIMEOUT, + IB_CANCELED, + IB_INTERRUPTED, + IB_INVALID_PERMISSION, + IB_UNSUPPORTED, + IB_OVERFLOW, + IB_MAX_MCAST_QPS_REACHED, + IB_INVALID_QP_STATE, + IB_INVALID_APM_STATE, + IB_INVALID_PORT_STATE, + IB_INVALID_STATE, + IB_RESOURCE_BUSY, + IB_INVALID_PKEY, + IB_INVALID_LKEY, + IB_INVALID_RKEY, + IB_INVALID_MAX_WRS, + IB_INVALID_MAX_SGE, + IB_INVALID_CQ_SIZE, + IB_INVALID_SRQ_SIZE, + IB_INVALID_SERVICE_TYPE, + IB_INVALID_GID, + IB_INVALID_LID, + IB_INVALID_GUID, + IB_INVALID_CA_HANDLE, + IB_INVALID_AV_HANDLE, + IB_INVALID_CQ_HANDLE, + IB_INVALID_QP_HANDLE, + IB_INVALID_SRQ_HANDLE, + IB_INVALID_PD_HANDLE, + IB_INVALID_MR_HANDLE, + IB_INVALID_FMR_HANDLE, + IB_INVALID_MW_HANDLE, + IB_INVALID_MCAST_HANDLE, + IB_INVALID_CALLBACK, + IB_INVALID_AL_HANDLE, /* InfiniBand Access Layer */ + IB_INVALID_HANDLE, /* InfiniBand Access Layer */ + IB_ERROR, /* InfiniBand Access Layer */ + IB_REMOTE_ERROR, /* Infiniband Access Layer */ + IB_VERBS_PROCESSING_DONE, /* See Notes above */ + IB_INVALID_WR_TYPE, + IB_QP_IN_TIMEWAIT, + IB_EE_IN_TIMEWAIT, + IB_INVALID_PORT, + IB_NOT_DONE, + IB_INVALID_INDEX, + IB_NO_MATCH, + IB_PENDING, + IB_UNKNOWN_ERROR /* ALWAYS LAST ENUM VALUE! */ + +} ib_api_status_t; +/*****/ + + + +/****f* IBA Base: Types/ib_get_err_str +* NAME +* ib_get_err_str +* +* DESCRIPTION +* Returns a string for the specified status value. +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_err_str( + IN ib_api_status_t status ); +/* +* PARAMETERS +* status +* [in] status value +* +* RETURN VALUES +* Pointer to the status description string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****d* Verbs/ib_async_event_t +* NAME +* ib_async_event_t -- Async event types +* +* DESCRIPTION +* This type indicates the reason the async callback was called. +* The context in the ib_event_rec_t indicates the resource context +* that associated with the callback. For example, for IB_AE_CQ_ERROR +* the context provided during the ib_create_cq is returned in the event. +* +* SYNOPSIS +*/ +typedef enum _ib_async_event_t +{ + IB_AE_SQ_ERROR = 1, + IB_AE_SQ_DRAINED, + IB_AE_RQ_ERROR, + IB_AE_CQ_ERROR, + IB_AE_QP_FATAL, + IB_AE_QP_COMM, + IB_AE_QP_APM, + IB_AE_LOCAL_FATAL, + IB_AE_PKEY_TRAP, + IB_AE_QKEY_TRAP, + IB_AE_MKEY_TRAP, + IB_AE_PORT_TRAP, + IB_AE_SYSIMG_GUID_TRAP, + IB_AE_BUF_OVERRUN, + IB_AE_LINK_INTEGRITY, + IB_AE_FLOW_CTRL_ERROR, + IB_AE_BKEY_TRAP, + IB_AE_QP_APM_ERROR, + IB_AE_WQ_REQ_ERROR, + IB_AE_WQ_ACCESS_ERROR, + IB_AE_PORT_ACTIVE, + IB_AE_PORT_DOWN, + IB_AE_CLIENT_REREGISTER, + IB_AE_SRQ_LIMIT_REACHED, + IB_AE_SRQ_CATAS_ERROR, + IB_AE_SRQ_QP_LAST_WQE_REACHED, + IB_AE_UNKNOWN /* ALWAYS LAST ENUM VALUE */ + +} ib_async_event_t; +/* +* VALUES +* IB_AE_SQ_ERROR +* An error occurred when accessing the send queue of the QP or EEC. +* This event is optional. +* +* IB_AE_SQ_DRAINED +* The send queue of the specified QP has completed the outstanding +* messages in progress when the state change was requested and, if +* applicable, has received all acknowledgements for those messages. +* +* IB_AE_RQ_ERROR +* An error occurred when accessing the receive queue of the QP or EEC. +* This event is optional. +* +* IB_AE_CQ_ERROR +* An error occurred when writing an entry to the CQ. +* +* IB_AE_QP_FATAL +* A catastrophic error occurred while accessing or processing the +* work queue that prevents reporting of completions. +* +* IB_AE_QP_COMM +* The first packet has arrived for the receive work queue where the +* QP is still in the RTR state. +* +* IB_AE_QP_APM +* If alternate path migration is supported, this event indicates that +* the QP connection has migrated to the alternate path. +* +* IB_AE_LOCAL_FATAL +* A catastrophic HCA error occurred which cannot be attributed to any +* resource; behavior is indeterminate. +* +* IB_AE_PKEY_TRAP +* A PKEY violation was detected. This event is optional. +* +* IB_AE_QKEY_TRAP +* A QKEY violation was detected. This event is optional. +* +* IB_AE_MKEY_TRAP +* An MKEY violation was detected. This event is optional. +* +* IB_AE_PORT_TRAP +* A port capability change was detected. This event is optional. +* +* IB_AE_SYSIMG_GUID_TRAP +* If the system image GUID is supported, this event indicates that the +* system image GUID of this HCA has been changed. This event is +* optional. +* +* IB_AE_BUF_OVERRUN +* The number of consecutive flow control update periods with at least +* one overrun error in each period has exceeded the threshold specified +* in the port info attributes. This event is optional. +* +* IB_AE_LINK_INTEGRITY +* The detection of excessively frequent local physical errors has +* exceeded the threshold specified in the port info attributes. This +* event is optional. +* +* IB_AE_FLOW_CTRL_ERROR +* An HCA watchdog timer monitoring the arrival of flow control updates +* has expired without receiving an update. This event is optional. +* +* IB_AE_BKEY_TRAP +* An BKEY violation was detected. This event is optional. +* +* IB_AE_QP_APM_ERROR +* If alternate path migration is supported, this event indicates that +* an incoming path migration request to this QP was not accepted. +* +* IB_AE_WQ_REQ_ERROR +* An OpCode violation was detected at the responder. +* +* IB_AE_WQ_ACCESS_ERROR +* An access violation was detected at the responder. +* +* IB_AE_PORT_ACTIVE +* If the port active event is supported, this event is generated +* when the link becomes active: IB_LINK_ACTIVE. +* +* IB_AE_PORT_DOWN +* The link is declared unavailable: IB_LINK_INIT, IB_LINK_ARMED, +* IB_LINK_DOWN. +* +* IB_AE_CLIENT_REREGISTER +* The SM idicate to client to reregister its SA records. +* +* IB_AE_SRQ_LIMIT_REACHED +* Reached SRQ low watermark +* +* IB_AE_SRQ_CATAS_ERROR +* An error occurred while processing or accessing the SRQ that prevents +* dequeuing a WQE from the SRQ and reporting of receive completions. +* +* IB_AE_SRQ_QP_LAST_WQE_REACHED +* An event, issued for a QP, associated with a shared receive queue, when +* a CQE is generated for the last WQE, or +* the QP gets in the Error State and there are no more WQEs on the RQ. +* +* IB_AE_UNKNOWN +* An unknown error occurred which cannot be attributed to any +* resource; behavior is indeterminate. +* +*****/ + + + +/****f* IBA Base: Types/ib_get_async_event_str +* NAME +* ib_get_async_event_str +* +* DESCRIPTION +* Returns a string for the specified asynchronous event. +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_async_event_str( + IN ib_async_event_t event ); +/* +* PARAMETERS +* event +* [in] event value +* +* RETURN VALUES +* Pointer to the asynchronous event description string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* Verbs/ib_event_rec_t +* NAME +* ib_event_rec_t -- Async event notification record +* +* DESCRIPTION +* When an async event callback is made, this structure is passed to indicate +* the type of event, the source of event that caused it, and the context +* associated with this event. +* +* context -- Context of the resource that caused the event. +* -- ca_context if this is a port/adapter event. +* -- qp_context if the source is a QP event +* -- cq_context if the source is a CQ event. +* -- ee_context if the source is an EE event. +* +* SYNOPSIS +*/ +typedef struct _ib_event_rec +{ + void* __ptr64 context; + ib_async_event_t type; + + /* HCA vendor specific event information. */ + uint64_t vendor_specific; + + /* The following structures are valid only for trap types. */ + union _trap + { + struct + { + uint16_t lid; + ib_net64_t port_guid; + uint8_t port_num; + + /* + * The following structure is valid only for + * P_KEY, Q_KEY, and M_KEY violation traps. + */ + struct + { + uint8_t sl; + uint16_t src_lid; + uint16_t dest_lid; + union _key + { + uint16_t pkey; + uint32_t qkey; + uint64_t mkey; + } key; + uint32_t src_qp; + uint32_t dest_qp; + ib_gid_t src_gid; + ib_gid_t dest_gid; + + } violation; + + } info; + + ib_net64_t sysimg_guid; + + } trap; + +} ib_event_rec_t; +/*******/ + +/****d* Access Layer/ib_atomic_t +* NAME +* ib_atomic_t +* +* DESCRIPTION +* Indicates atomicity levels supported by an adapter. +* +* SYNOPSIS +*/ +typedef enum _ib_atomic_t +{ + IB_ATOMIC_NONE, + IB_ATOMIC_LOCAL, + IB_ATOMIC_GLOBAL + +} ib_atomic_t; +/* +* VALUES +* IB_ATOMIC_NONE +* Atomic operations not supported. +* +* IB_ATOMIC_LOCAL +* Atomic operations guaranteed between QPs of a single CA. +* +* IB_ATOMIC_GLOBAL +* Atomic operations are guaranteed between CA and any other entity +* in the system. +*****/ + +/****s* Access Layer/ib_port_cap_t +* NAME +* ib_port_cap_t +* +* DESCRIPTION +* Indicates which management agents are currently available on the specified +* port. +* +* SYNOPSIS +*/ +typedef struct _ib_port_cap +{ + boolean_t cm; + boolean_t snmp; + boolean_t dev_mgmt; + boolean_t vend; + boolean_t sm; + boolean_t sm_disable; + boolean_t qkey_ctr; + boolean_t pkey_ctr; + boolean_t notice; + boolean_t trap; + boolean_t apm; + boolean_t slmap; + boolean_t pkey_nvram; + boolean_t mkey_nvram; + boolean_t sysguid; + boolean_t dr_notice; + boolean_t boot_mgmt; + boolean_t capm_notice; + boolean_t reinit; + boolean_t ledinfo; + boolean_t port_active; + boolean_t ipd; + boolean_t pkey_switch_ext_port; + boolean_t bm; + boolean_t link_rtl; + boolean_t client_reregister; + +} ib_port_cap_t; +/*****/ + +/****d* Access Layer/ib_init_type_t +* NAME +* ib_init_type_t +* +* DESCRIPTION +* If supported by the HCA, the type of initialization requested by +* this port before SM moves it to the active or armed state. If the +* SM implements reinitialization, it shall set these bits to indicate +* the type of initialization performed prior to activating the port. +* Otherwise, these bits shall be set to 0. +* +* SYNOPSIS +*/ +typedef uint8_t ib_init_type_t; +#define IB_INIT_TYPE_NO_LOAD 0x01 +#define IB_INIT_TYPE_PRESERVE_CONTENT 0x02 +#define IB_INIT_TYPE_PRESERVE_PRESENCE 0x04 +#define IB_INIT_TYPE_DO_NOT_RESUSCITATE 0x08 +/*****/ + +/****s* Access Layer/ib_port_attr_mod_t +* NAME +* ib_port_attr_mod_t +* +* DESCRIPTION +* Port attributes that may be modified. +* +* SYNOPSIS +*/ +typedef struct _ib_port_attr_mod +{ + ib_port_cap_t cap; + uint16_t pkey_ctr; + uint16_t qkey_ctr; + + ib_init_type_t init_type; + ib_net64_t system_image_guid; + +} ib_port_attr_mod_t; +/* +* SEE ALSO +* ib_port_cap_t +*****/ + +/****s* Access Layer/ib_port_attr_t +* NAME +* ib_port_attr_t +* +* DESCRIPTION +* Information about a port on a given channel adapter. +* +* SYNOPSIS +*/ +typedef struct _ib_port_attr +{ + ib_net64_t port_guid; + uint8_t port_num; + uint8_t mtu; + uint64_t max_msg_size; + ib_net16_t lid; + uint8_t lmc; + + /* + * LinkWidthSupported as defined in PortInfo. Required to calculate + * inter-packet delay (a.k.a. static rate). + */ + uint8_t link_width_supported; + + uint16_t max_vls; + + ib_net16_t sm_lid; + uint8_t sm_sl; + uint8_t link_state; + + ib_init_type_t init_type_reply; /* Optional */ + + /* + * subnet_timeout: + * The maximum expected subnet propagation delay to reach any port on + * the subnet. This value also determines the rate at which traps can + * be generated from this node. + * + * timeout = 4.096 microseconds * 2^subnet_timeout + */ + uint8_t subnet_timeout; + + ib_port_cap_t cap; + uint16_t pkey_ctr; + uint16_t qkey_ctr; + + uint16_t num_gids; + uint16_t num_pkeys; + /* + * Pointers at the end of the structure to allow doing a simple + * memory comparison of contents up to the first pointer. + */ + ib_gid_t* __ptr64 p_gid_table; + ib_net16_t* __ptr64 p_pkey_table; + +} ib_port_attr_t; +/* +* SEE ALSO +* uint8_t, ib_port_cap_t, ib_link_states_t +*****/ + +/****s* Access Layer/ib_ca_attr_t +* NAME +* ib_ca_attr_t +* +* DESCRIPTION +* Information about a channel adapter. +* +* SYNOPSIS +*/ +typedef struct _ib_ca_attr +{ + ib_net64_t ca_guid; + + uint32_t vend_id; + uint16_t dev_id; + uint16_t revision; + uint64_t fw_ver; + + /* + * Total size of the ca attributes in bytes + */ + uint32_t size; + uint32_t max_qps; + uint32_t max_wrs; + + uint32_t max_sges; + uint32_t max_rd_sges; + + uint32_t max_cqs; + uint32_t max_cqes; + + uint32_t max_pds; + + uint32_t init_regions; + uint64_t init_region_size; + + uint32_t init_windows; + uint32_t max_addr_handles; + + uint32_t max_partitions; + + ib_atomic_t atomicity; + + uint8_t max_qp_resp_res; + uint8_t max_resp_res; + + uint8_t max_qp_init_depth; + + uint32_t max_ipv6_qps; + uint32_t max_ether_qps; + + uint32_t max_mcast_grps; + uint32_t max_mcast_qps; + uint32_t max_qps_per_mcast_grp; + uint32_t max_fmr; + uint32_t max_map_per_fmr; + uint32_t max_srq; + uint32_t max_srq_wrs; + uint32_t max_srq_sges; + + /* + * local_ack_delay: + * Specifies the maximum time interval between the local CA receiving + * a message and the transmission of the associated ACK or NAK. + * + * timeout = 4.096 microseconds * 2^local_ack_delay + */ + uint8_t local_ack_delay; + + boolean_t bad_pkey_ctr_support; + boolean_t bad_qkey_ctr_support; + boolean_t raw_mcast_support; + boolean_t apm_support; + boolean_t av_port_check; + boolean_t change_primary_port; + boolean_t modify_wr_depth; + boolean_t modify_srq_depth; + boolean_t current_qp_state_support; + boolean_t shutdown_port_capability; + boolean_t init_type_support; + boolean_t port_active_event_support; + boolean_t system_image_guid_support; + boolean_t hw_agents; + + ib_net64_t system_image_guid; + + uint32_t num_page_sizes; + uint8_t num_ports; + + uint32_t* __ptr64 p_page_size; + ib_port_attr_t* __ptr64 p_port_attr; + +} ib_ca_attr_t; +/* +* FIELDS +* ca_guid +* GUID for this adapter. +* +* vend_id +* IEEE vendor ID for this adapter +* +* dev_id +* Device ID of this adapter. (typically from PCI device ID) +* +* revision +* Revision ID of this adapter +* +* fw_ver +* Device Firmware version. +* +* size +* Total size in bytes for the HCA attributes. This size includes total +* size required for all the variable members of the structure. If a +* vendor requires to pass vendor specific fields beyond this structure, +* the HCA vendor can choose to report a larger size. If a vendor is +* reporting extended vendor specific features, they should also provide +* appropriate access functions to aid with the required interpretation. +* +* max_qps +* Maximum number of QP's supported by this HCA. +* +* max_wrs +* Maximum number of work requests supported by this HCA. +* +* max_sges +* Maximum number of scatter gather elements supported per work request. +* +* max_rd_sges +* Maximum number of scatter gather elements supported for READ work +* requests for a Reliable Datagram QP. This value must be zero if RD +* service is not supported. +* +* max_cqs +* Maximum number of Completion Queues supported. +* +* max_cqes +* Maximum number of CQ elements supported per CQ. +* +* max_pds +* Maximum number of protection domains supported. +* +* init_regions +* Initial number of memory regions supported. These are only informative +* values. HCA vendors can extended and grow these limits on demand. +* +* init_region_size +* Initial limit on the size of the registered memory region. +* +* init_windows +* Initial number of window entries supported. +* +* max_addr_handles +* Maximum number of address handles supported. +* +* max_partitions +* Maximum number of partitions supported. +* +* atomicity +* Indicates level of atomic operations supported by this HCA. +* +* max_qp_resp_res +* Maximum limit on number of responder resources for incomming RDMA +* operations on QPs. +* +* max_fmr +* Maximum number of Fast Memory Regions supported. +* +* max_map_per_fmr +* Maximum number of mappings, supported by a Fast Memory Region. +* +* max_srq +* Maximum number of Shared Receive Queues supported. +* +* max_srq_wrs +* Maximum number of work requests supported by this SRQ. +* +* max_srq_sges +* Maximum number of scatter gather elements supported per work request on SRQ. +* +* max_resp_res +* Maximum number of responder resources per HCA, with this HCA used as +* the target. +* +* max_qp_init_depth +* Maximimum initiator depth per QP for initiating RDMA reads and +* atomic operations. +* +* max_ipv6_qps +* max_ether_qps +* Maximum number of IPV6 and raw ether QP's supported by this HCA. +* +* max_mcast_grps +* Maximum number of multicast groups supported. +* +* max_mcast_qps +* Maximum number of QP's that can support multicast operations. +* +* max_qps_per_mcast_grp +* Maximum number of multicast QP's per multicast group. +* +* local_ack_delay +* Specifies the maximum time interval between the local CA receiving +* a message and the transmission of the associated ACK or NAK. +* timeout = 4.096 microseconds * 2^local_ack_delay +* +* bad_pkey_ctr_support +* bad_qkey_ctr_support +* Indicates support for the bad pkey and qkey counters. +* +* raw_mcast_support +* Indicates support for raw packet multicast. +* +* apm_support +* Indicates support for Automatic Path Migration. +* +* av_port_check +* Indicates ability to check port number in address handles. +* +* change_primary_port +* Indicates ability to change primary port for a QP or EEC during a +* SQD->RTS transition. +* +* modify_wr_depth +* Indicates ability to modify QP depth during a modify QP operation. +* Check the verb specification for permitted states. +* +* modify_srq_depth +* Indicates ability to modify SRQ depth during a modify SRQ operation. +* Check the verb specification for permitted states. +* +* current_qp_state_support +* Indicates ability of the HCA to support the current QP state modifier +* during a modify QP operation. +* +* shutdown_port_capability +* Shutdown port capability support indicator. +* +* init_type_support +* Indicates init_type_reply and ability to set init_type is supported. +* +* port_active_event_support +* Port active event support indicator. +* +* system_image_guid_support +* System image GUID support indicator. +* +* hw_agents +* Indicates SMA is implemented in HW. +* +* system_image_guid +* Optional system image GUID. This field is valid only if the +* system_image_guid_support flag is set. +* +* num_page_sizes +* Indicates support for different page sizes supported by the HCA. +* The variable size array can be obtained from p_page_size. +* +* num_ports +* Number of physical ports supported on this HCA. +* +* p_page_size +* Array holding different page size supported. +* +* p_port_attr +* Array holding port attributes. +* +* NOTES +* This structure contains the attributes of a channel adapter. Users must +* call ib_copy_ca_attr to copy the contents of this structure to a new +* memory region. +* +* SEE ALSO +* ib_port_attr_t, ib_atomic_t, ib_copy_ca_attr +*****/ + +/****f* Access layer/ib_copy_ca_attr +* NAME +* ib_copy_ca_attr +* +* DESCRIPTION +* Copies CA attributes. +* +* SYNOPSIS +*/ +AL_EXPORT ib_ca_attr_t* AL_API +ib_copy_ca_attr( + IN ib_ca_attr_t* const p_dest, + IN const ib_ca_attr_t* const p_src ); +/* +* PARAMETERS +* p_dest +* Pointer to the buffer that is the destination of the copy. +* +* p_src +* Pointer to the CA attributes to copy. +* +* RETURN VALUE +* Pointer to the copied CA attributes. +* +* NOTES +* The buffer pointed to by the p_dest parameter must be at least the size +* specified in the size field of the buffer pointed to by p_src. +* +* SEE ALSO +* ib_ca_attr_t, ib_dup_ca_attr, ib_free_ca_attr +*****/ + + +/****d* Access Layer/ib_pd_type_t +* NAME +* ib_pd_type_t +* +* DESCRIPTION +* Indicates the type of protection domain being allocated. +* +* SYNOPSIS +*/ +typedef enum _ib_pd_type +{ + IB_PDT_NORMAL, + IB_PDT_ALIAS, + IB_PDT_SQP, + IB_PDT_UD + +} ib_pd_type_t; +/* +* VALUES +* IB_PDT_NORMAL +* Protection domain for all non-aliased QPs. +* +* IB_PDT_ALIAS +* Protection domain for IB_QPT_QP0_ALIAS and IB_QPT_QP1_ALIAS QPs. +* +* IB_PDT_SQP +* Protection domain for special queue pair usage. +* +* IB_PDT_UD +* Protection domain for UD queue pair usage. +*****/ + + +/****s* Access Layer/ib_av_attr_t +* NAME +* ib_av_attr_t +* +* DESCRIPTION +* IBA address vector. +* +* SYNOPSIS +*/ +typedef struct _ib_av_attr +{ + uint8_t port_num; + + uint8_t sl; + ib_net16_t dlid; + + boolean_t grh_valid; + ib_grh_t grh; + uint8_t static_rate; + uint8_t path_bits; + + struct _av_conn + { + uint8_t path_mtu; + uint8_t local_ack_timeout; + uint8_t seq_err_retry_cnt; + uint8_t rnr_retry_cnt; + + } conn; + +} ib_av_attr_t; +/* +* SEE ALSO +* ib_gid_t +*****/ + +/****d* Access Layer/ib_qp_type_t +* NAME +* ib_qp_type_t +* +* DESCRIPTION +* Indicates the type of queue pair being created. +* +* SYNOPSIS +*/ +typedef enum _ib_qp_type +{ + IB_QPT_RELIABLE_CONN = 0, /* Matches CM REQ transport type */ + IB_QPT_UNRELIABLE_CONN = 1, /* Matches CM REQ transport type */ + IB_QPT_RELIABLE_DGRM = 2, /* Matches CM REQ transport type */ + IB_QPT_UNRELIABLE_DGRM, + IB_QPT_QP0, + IB_QPT_QP1, + IB_QPT_RAW_IPV6, + IB_QPT_RAW_ETHER, + IB_QPT_MAD, /* InfiniBand Access Layer */ + IB_QPT_QP0_ALIAS, /* InfiniBand Access Layer */ + IB_QPT_QP1_ALIAS, /* InfiniBand Access Layer */ + IB_QPT_UNKNOWN +} ib_qp_type_t; +/* +* VALUES +* IB_QPT_RELIABLE_CONN +* Reliable, connected queue pair. +* +* IB_QPT_UNRELIABLE_CONN +* Unreliable, connected queue pair. +* +* IB_QPT_RELIABLE_DGRM +* Reliable, datagram queue pair. +* +* IB_QPT_UNRELIABLE_DGRM +* Unreliable, datagram queue pair. +* +* IB_QPT_QP0 +* Queue pair 0. +* +* IB_QPT_QP1 +* Queue pair 1. +* +* IB_QPT_RAW_DGRM +* Raw datagram queue pair. +* +* IB_QPT_RAW_IPV6 +* Raw IP version 6 queue pair. +* +* IB_QPT_RAW_ETHER +* Raw Ethernet queue pair. +* +* IB_QPT_MAD +* Unreliable, datagram queue pair that will send and receive management +* datagrams with assistance from the access layer. +* +* IB_QPT_QP0_ALIAS +* Alias to queue pair 0. Aliased QPs can only be created on an aliased +* protection domain. +* +* IB_QPT_QP1_ALIAS +* Alias to queue pair 1. Aliased QPs can only be created on an aliased +* protection domain. +*****/ + + +/****f* IBA Base: Types/ib_get_qp_type_str +* NAME +* ib_get_qp_type_str +* +* DESCRIPTION +* Returns a string for the specified QP type +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_qp_type_str( + IN uint8_t qp_type ); + +/* +* PARAMETERS +* qp_type +* [in] Encoded QP type as defined in the +QP attribute. + +* RETURN VALUES +* Pointer to the QP type string. +* +* NOTES +* +* SEE ALSO +* ib_qp_type_t +*********/ + +/****d* Access Layer/ib_access_t +* NAME +* ib_access_t +* +* DESCRIPTION +* Indicates the type of access is permitted on resources such as QPs, +* memory regions and memory windows. +* +* SYNOPSIS +*/ +typedef uint32_t ib_access_t; +#define IB_AC_RDMA_READ 0x00000001 +#define IB_AC_RDMA_WRITE 0x00000002 +#define IB_AC_ATOMIC 0x00000004 +#define IB_AC_LOCAL_WRITE 0x00000008 +#define IB_AC_MW_BIND 0x00000010 +/* +* NOTES +* Users may combine access rights using a bit-wise or operation to specify +* additional access. For example: IB_AC_RDMA_READ | IB_AC_RDMA_WRITE grants +* RDMA read and write access. +*****/ + +/****d* Access Layer/ib_qp_state_t +* NAME +* ib_qp_state_t +* +* DESCRIPTION +* Indicates or sets the state of a queue pair. The current state of a queue +* pair is returned through the ib_qp_query call and set via the +* ib_qp_modify call. +* +* SYNOPSIS +*/ +typedef uint32_t ib_qp_state_t; +#define IB_QPS_RESET 0x00000001 +#define IB_QPS_INIT 0x00000002 +#define IB_QPS_RTR 0x00000004 +#define IB_QPS_RTS 0x00000008 +#define IB_QPS_SQD 0x00000010 +#define IB_QPS_SQD_DRAINING 0x00000030 +#define IB_QPS_SQD_DRAINED 0x00000050 +#define IB_QPS_SQERR 0x00000080 +#define IB_QPS_ERROR 0x00000100 +#define IB_QPS_TIME_WAIT 0xDEAD0000 /* InfiniBand Access Layer */ +/*****/ + +/****d* Access Layer/ib_apm_state_t +* NAME +* ib_apm_state_t +* +* DESCRIPTION +* The current automatic path migration state of a queue pair +* +* SYNOPSIS +*/ +typedef enum _ib_apm_state +{ + IB_APM_MIGRATED = 1, + IB_APM_REARM, + IB_APM_ARMED + +} ib_apm_state_t; +/*****/ + +/****d* Access Layer/ib_srq_attr_mask_t +* NAME +* ib_srq_attr_mask_t +* +* DESCRIPTION +* Indicates valid fields in ib_srq_attr_t structure +* +* SYNOPSIS +*/ +typedef enum _ib_srq_attr_mask { + IB_SRQ_MAX_WR = 1 << 0, + IB_SRQ_LIMIT = 1 << 1, +} ib_srq_attr_mask_t; +/*****/ + + +/****s* Access Layer/ib_srq_attr_t +* NAME +* ib_srq_attr_t +* +* DESCRIPTION +* Attributes used to initialize a shared queue pair at creation time. +* +* SYNOPSIS +*/ +typedef struct _ib_srq_attr { + uint32_t max_wr; + uint32_t max_sge; + uint32_t srq_limit; +} ib_srq_attr_t; +/* +* FIELDS +* max_wr +* Specifies the max number of work request on SRQ. +* +* max_sge +* Specifies the max number of scatter/gather elements in one work request. +* +* srq_limit +* Specifies the low water mark for SRQ. +* +* SEE ALSO +* ib_qp_type_t, ib_srq_attr_mask_t +*****/ + + +/****s* Access Layer/ib_qp_create_t +* NAME +* ib_qp_create_t +* +* DESCRIPTION +* Attributes used to initialize a queue pair at creation time. +* +* SYNOPSIS +*/ +typedef struct _ib_qp_create +{ + ib_qp_type_t qp_type; + + uint32_t sq_depth; + uint32_t rq_depth; + uint32_t sq_sge; + uint32_t rq_sge; + + ib_cq_handle_t h_sq_cq; + ib_cq_handle_t h_rq_cq; + ib_srq_handle_t h_srq; + + boolean_t sq_signaled; + +} ib_qp_create_t; +/* +* FIELDS +* type +* Specifies the type of queue pair to create. +* +* sq_depth +* Indicates the requested maximum number of work requests that may be +* outstanding on the queue pair's send queue. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* rq_depth +* Indicates the requested maximum number of work requests that may be +* outstanding on the queue pair's receive queue. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* sq_sge +* Indicates the maximum number scatter-gather elements that may be +* given in a send work request. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* rq_sge +* Indicates the maximum number scatter-gather elements that may be +* given in a receive work request. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* h_sq_cq +* A handle to the completion queue that will be used to report send work +* request completions. This handle must be NULL if the type is +* IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS. +* +* h_rq_cq +* A handle to the completion queue that will be used to report receive +* work request completions. This handle must be NULL if the type is +* IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS. +* +* h_srq +* A handle to an SRQ to get receive completions via. Must be coded NULL +* when QP is not associated with SRQ +* +* sq_signaled +* A flag that is used to indicate whether the queue pair will signal +* an event upon completion of a send work request. If set to +* TRUE, send work requests will always generate a completion +* event. If set to FALSE, a completion event will only be +* generated if the send_opt field of the send work request has the +* IB_SEND_OPT_SIGNALED flag set. +* +* SEE ALSO +* ib_qp_type_t, ib_qp_attr_t +*****/ + +/****s* Access Layer/ib_qp_attr_t +* NAME +* ib_qp_attr_t +* +* DESCRIPTION +* Queue pair attributes returned through ib_query_qp. +* +* SYNOPSIS +*/ +typedef struct _ib_qp_attr +{ + ib_pd_handle_t h_pd; + ib_qp_type_t qp_type; + ib_access_t access_ctrl; + uint16_t pkey_index; + + uint32_t sq_max_inline; + uint32_t sq_depth; + uint32_t rq_depth; + uint32_t sq_sge; + uint32_t rq_sge; + uint8_t init_depth; + uint8_t resp_res; + + ib_cq_handle_t h_sq_cq; + ib_cq_handle_t h_rq_cq; + ib_srq_handle_t h_srq; + + boolean_t sq_signaled; + + ib_qp_state_t state; + ib_net32_t num; + ib_net32_t dest_num; + ib_net32_t qkey; + + ib_net32_t sq_psn; + ib_net32_t rq_psn; + + uint8_t primary_port; + uint8_t alternate_port; + ib_av_attr_t primary_av; + ib_av_attr_t alternate_av; + ib_apm_state_t apm_state; + +} ib_qp_attr_t; +/* +* FIELDS +* h_pd +* This is a handle to a protection domain associated with the QP. +* +* sq_max_inline +* Maximum payload that can be inlined directly in a WQE, eliminating +* protection checks and additional DMA operations. +* +* NOTES +* Other fields are defined by the Infiniband specification. +* +* SEE ALSO +* ib_qp_type_t, ib_access_t, ib_qp_state_t, ib_av_attr_t, ib_apm_state_t +*****/ + +/****d* Access Layer/ib_qp_opts_t +* NAME +* ib_qp_opts_t +* +* DESCRIPTION +* Optional fields supplied in the modify QP operation. +* +* SYNOPSIS +*/ +typedef uint32_t ib_qp_opts_t; +#define IB_MOD_QP_ALTERNATE_AV 0x00000001 +#define IB_MOD_QP_PKEY 0x00000002 +#define IB_MOD_QP_APM_STATE 0x00000004 +#define IB_MOD_QP_PRIMARY_AV 0x00000008 +#define IB_MOD_QP_RNR_NAK_TIMEOUT 0x00000010 +#define IB_MOD_QP_RESP_RES 0x00000020 +#define IB_MOD_QP_INIT_DEPTH 0x00000040 +#define IB_MOD_QP_PRIMARY_PORT 0x00000080 +#define IB_MOD_QP_ACCESS_CTRL 0x00000100 +#define IB_MOD_QP_QKEY 0x00000200 +#define IB_MOD_QP_SQ_DEPTH 0x00000400 +#define IB_MOD_QP_RQ_DEPTH 0x00000800 +#define IB_MOD_QP_CURRENT_STATE 0x00001000 +#define IB_MOD_QP_RETRY_CNT 0x00002000 +#define IB_MOD_QP_LOCAL_ACK_TIMEOUT 0x00004000 +#define IB_MOD_QP_RNR_RETRY_CNT 0x00008000 +/* +* SEE ALSO +* ib_qp_mod_t +*****/ + +/****s* Access Layer/ib_qp_mod_t +* NAME +* ib_qp_mod_t +* +* DESCRIPTION +* Information needed to change the state of a queue pair through the +* ib_modify_qp call. +* +* SYNOPSIS +*/ +typedef struct _ib_qp_mod +{ + ib_qp_state_t req_state; + + union _qp_state + { + struct _qp_init + { + uint8_t primary_port; + ib_net32_t qkey; + uint16_t pkey_index; + ib_access_t access_ctrl; + + } init; + + struct _qp_rtr + { + ib_net32_t rq_psn; + ib_net32_t dest_qp; + ib_av_attr_t primary_av; + uint8_t resp_res; + uint8_t rnr_nak_timeout; + + ib_qp_opts_t opts; + ib_av_attr_t alternate_av; + ib_net32_t qkey; + uint16_t pkey_index; + ib_access_t access_ctrl; + uint32_t sq_depth; + uint32_t rq_depth; + + } rtr; + + struct _qp_rts + { + ib_net32_t sq_psn; + uint8_t retry_cnt; + uint8_t rnr_retry_cnt; + uint8_t local_ack_timeout; + uint8_t init_depth; + + ib_qp_opts_t opts; + uint8_t rnr_nak_timeout; + ib_qp_state_t current_state; + ib_net32_t qkey; + ib_access_t access_ctrl; + uint8_t resp_res; + + ib_av_attr_t primary_av; + ib_av_attr_t alternate_av; + + uint32_t sq_depth; + uint32_t rq_depth; + + ib_apm_state_t apm_state; + uint8_t primary_port; + uint16_t pkey_index; + + } rts; + + struct _qp_sqd + { + boolean_t sqd_event; + + } sqd; + + } state; + +} ib_qp_mod_t; +/* +* SEE ALSO +* ib_qp_state_t, ib_access_t, ib_av_attr_t, ib_apm_state_t +*****/ + + +/****d* Access Layer/ib_wr_type_t +* NAME +* ib_wr_type_t +* +* DESCRIPTION +* Identifies the type of work request posted to a queue pair. +* +* SYNOPSIS +*/ +typedef enum _ib_wr_type_t +{ + WR_SEND = 1, + WR_RDMA_WRITE, + WR_RDMA_READ, + WR_COMPARE_SWAP, + WR_FETCH_ADD, + WR_UNKNOWN + +} ib_wr_type_t; +/*****/ + + +/****f* IBA Base: Types/ib_get_wr_type_str +* NAME +* ib_get_wr_type_str +* +* DESCRIPTION +* Returns a string for the specified work request type +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_wr_type_str( + IN uint8_t wr_type ); + +/* +* PARAMETERS +* wr_type +* [in] Encoded work request type as defined in the +work request attribute. + +* RETURN VALUES +* Pointer to the work request type string. +* +* NOTES +* +* SEE ALSO +* ib_wr_type_t +*********/ + + +/****s* Access Layer/ib_local_ds_t +* NAME +* ib_local_ds_t +* +* DESCRIPTION +* Local data segment information referenced by send and receive work +* requests. This is used to specify local data buffers used as part of a +* work request. +* +* SYNOPSIS +*/ +typedef struct _ib_local_ds +{ + uint64_t vaddr; + uint32_t length; + uint32_t lkey; + +} ib_local_ds_t; +/*****/ + +/****d* Access Layer/ib_send_opt_t +* NAME +* ib_send_opt_t +* +* DESCRIPTION +* Optional flags used when posting send work requests. These flags +* indicate specific processing for the send operation. +* +* SYNOPSIS +*/ +typedef uint32_t ib_send_opt_t; +#define IB_SEND_OPT_IMMEDIATE 0x00000001 +#define IB_SEND_OPT_FENCE 0x00000002 +#define IB_SEND_OPT_SIGNALED 0x00000004 +#define IB_SEND_OPT_SOLICITED 0x00000008 +#define IB_SEND_OPT_INLINE 0x00000010 +#define IB_SEND_OPT_LOCAL 0x00000020 +#define IB_SEND_OPT_VEND_MASK 0xFFFF0000 +/* +* VALUES +* The following flags determine the behavior of a work request when +* posted to the send side. +* +* IB_SEND_OPT_IMMEDIATE +* Send immediate data with the given request. +* +* IB_SEND_OPT_FENCE +* The operation is fenced. Complete all pending send operations +* before processing this request. +* +* IB_SEND_OPT_SIGNALED +* If the queue pair is configured for signaled completion, then +* generate a completion queue entry when this request completes. +* +* IB_SEND_OPT_SOLICITED +* Set the solicited bit on the last packet of this request. +* +* IB_SEND_OPT_INLINE +* Indicates that the requested send data should be copied into a VPD +* owned data buffer. This flag permits the user to issue send operations +* without first needing to register the buffer(s) associated with the +* send operation. Verb providers that support this operation may place +* vendor specific restrictions on the size of send operation that may +* be performed as inline. +* +* +* IB_SEND_OPT_LOCAL +* Indicates that a sent MAD request should be given to the local VPD for +* processing. MADs sent using this option are not placed on the wire. +* This send option is only valid for MAD send operations. +* +* +* IB_SEND_OPT_VEND_MASK +* This mask indicates bits reserved in the send options that may be used +* by the verbs provider to indicate vendor specific options. Bits set +* in this area of the send options are ignored by the Access Layer, but +* may have specific meaning to the underlying VPD. +* +*****/ + +/****s* Access Layer/ib_send_wr_t +* NAME +* ib_send_wr_t +* +* DESCRIPTION +* Information used to submit a work request to the send queue of a queue +* pair. +* +* SYNOPSIS +*/ +typedef struct _ib_send_wr +{ + struct _ib_send_wr* __ptr64 p_next; + uint64_t wr_id; + ib_wr_type_t wr_type; + ib_send_opt_t send_opt; + uint32_t num_ds; + ib_local_ds_t* __ptr64 ds_array; + ib_net32_t immediate_data; + + union _send_dgrm + { + struct _send_ud + { + ib_net32_t remote_qp; + ib_net32_t remote_qkey; + ib_av_handle_t h_av; + uint16_t pkey_index; + void* __ptr64 rsvd; + + } ud; + + struct _send_rd + { + ib_net32_t remote_qp; + ib_net32_t remote_qkey; + ib_net32_t eecn; + + } rd; + + struct _send_raw_ether + { + ib_net16_t dest_lid; + uint8_t path_bits; + uint8_t sl; + uint8_t max_static_rate; + ib_net16_t ether_type; + + } raw_ether; + + struct _send_raw_ipv6 + { + ib_net16_t dest_lid; + uint8_t path_bits; + uint8_t sl; + uint8_t max_static_rate; + + } raw_ipv6; + + } dgrm; + + struct _send_remote_ops + { + uint64_t vaddr; + net32_t rkey; + + ib_net64_t atomic1; + ib_net64_t atomic2; + + } remote_ops; + +} ib_send_wr_t; +/* +* FIELDS +* p_next +* A pointer used to chain work requests together. This permits multiple +* work requests to be posted to a queue pair through a single function +* call. This value is set to NULL to mark the end of the chain. +* +* wr_id +* A 64-bit work request identifier that is returned to the consumer +* as part of the work completion. +* +* wr_type +* The type of work request being submitted to the send queue. +* +* send_opt +* Optional send control parameters. +* +* num_ds +* Number of local data segments specified by this work request. +* +* ds_array +* A reference to an array of local data segments used by the send +* operation. +* +* immediate_data +* 32-bit field sent as part of a message send or RDMA write operation. +* This field is only valid if the send_opt flag IB_SEND_OPT_IMMEDIATE +* has been set. +* +* dgrm.ud.remote_qp +* Identifies the destination queue pair of an unreliable datagram send +* operation. +* +* dgrm.ud.remote_qkey +* The qkey for the destination queue pair. +* +* dgrm.ud.h_av +* An address vector that specifies the path information used to route +* the outbound datagram to the destination queue pair. +* +* dgrm.ud.pkey_index +* The pkey index for this send work request. This is valid only +* for IB_QPT_QP1 and IB_QPT_QP1_ALIAS QP types. The work request +* is posted to using this pkey index build the GMP's BTH instead +* of the QP's pkey. +* +* dgrm.ud.rsvd +* Reserved for use by the Access Layer. +* +* dgrm.raw_ether.dest_lid +* The destination LID that will receive this raw ether send. +* +* dgrm.raw_ether.path_bits +* path bits... +* +* dgrm.raw_ether.sl +* service level... +* +* dgrm.raw_ether.max_static_rate +* static rate... +* +* dgrm.raw_ether.ether_type +* ether type... +* +* dgrm.raw_ipv6.dest_lid +* The destination LID that will receive this raw ether send. +* +* dgrm.raw_ipv6.path_bits +* path bits... +* +* dgrm.raw_ipv6.sl +* service level... +* +* dgrm.raw_ipv6.max_static_rate +* static rate... +* +* remote_ops.vaddr +* The registered virtual memory address of the remote memory to access +* with an RDMA or atomic operation. +* +* remote_ops.rkey +* The rkey associated with the specified remote vaddr. This data must +* be presented exactly as obtained from the remote node. No swapping +* of data must be performed. +* +* atomic1 +* The first operand for an atomic operation. +* +* atomic2 +* The second operand for an atomic operation. +* +* NOTES +* The format of data sent over the fabric is user-defined and is considered +* opaque to the access layer. The sole exception to this are MADs posted +* to a MAD QP service. MADs are expected to match the format defined by +* the Infiniband specification and must be in network-byte order when posted +* to the MAD QP service. +* +* SEE ALSO +* ib_wr_type_t, ib_local_ds_t, ib_send_opt_t +*****/ + +/****s* Access Layer/ib_recv_wr_t +* NAME +* ib_recv_wr_t +* +* DESCRIPTION +* Information used to submit a work request to the receive queue of a queue +* pair. +* +* SYNOPSIS +*/ +typedef struct _ib_recv_wr +{ + struct _ib_recv_wr* __ptr64 p_next; + uint64_t wr_id; + uint32_t num_ds; + ib_local_ds_t* __ptr64 ds_array; + +} ib_recv_wr_t; +/* +* FIELDS +* p_next +* A pointer used to chain work requests together. This permits multiple +* work requests to be posted to a queue pair through a single function +* call. This value is set to NULL to mark the end of the chain. +* +* wr_id +* A 64-bit work request identifier that is returned to the consumer +* as part of the work completion. +* +* num_ds +* Number of local data segments specified by this work request. +* +* ds_array +* A reference to an array of local data segments used by the send +* operation. +* +* SEE ALSO +* ib_local_ds_t +*****/ + +/****s* Access Layer/ib_bind_wr_t +* NAME +* ib_bind_wr_t +* +* DESCRIPTION +* Information used to submit a memory window bind work request to the send +* queue of a queue pair. +* +* SYNOPSIS +*/ +typedef struct _ib_bind_wr +{ + uint64_t wr_id; + ib_send_opt_t send_opt; + + ib_mr_handle_t h_mr; + ib_access_t access_ctrl; + net32_t current_rkey; + + ib_local_ds_t local_ds; + +} ib_bind_wr_t; +/* +* FIELDS +* wr_id +* A 64-bit work request identifier that is returned to the consumer +* as part of the work completion. +* +* send_opt +* Optional send control parameters. +* +* h_mr +* Handle to the memory region to which this window is being bound. +* +* access_ctrl +* Access rights for this memory window. +* +* current_rkey +* The current rkey assigned to this window for remote access. +* +* local_ds +* A reference to a local data segment used by the bind operation. +* +* SEE ALSO +* ib_send_opt_t, ib_access_t, ib_local_ds_t +*****/ + +/****d* Access Layer/ib_wc_status_t +* NAME +* ib_wc_status_t +* +* DESCRIPTION +* Indicates the status of a completed work request. These VALUES are +* returned to the user when retrieving completions. Note that success is +* identified as IB_WCS_SUCCESS, which is always zero. +* +* SYNOPSIS +*/ +typedef enum _ib_wc_status_t +{ + IB_WCS_SUCCESS, + IB_WCS_LOCAL_LEN_ERR, + IB_WCS_LOCAL_OP_ERR, + IB_WCS_LOCAL_PROTECTION_ERR, + IB_WCS_WR_FLUSHED_ERR, + IB_WCS_MEM_WINDOW_BIND_ERR, + IB_WCS_REM_ACCESS_ERR, + IB_WCS_REM_OP_ERR, + IB_WCS_RNR_RETRY_ERR, + IB_WCS_TIMEOUT_RETRY_ERR, + IB_WCS_REM_INVALID_REQ_ERR, + IB_WCS_BAD_RESP_ERR, + IB_WCS_LOCAL_ACCESS_ERR, + IB_WCS_GENERAL_ERR, + IB_WCS_UNMATCHED_RESPONSE, /* InfiniBand Access Layer */ + IB_WCS_CANCELED, /* InfiniBand Access Layer */ + IB_WCS_UNKNOWN /* Must be last. */ + +} ib_wc_status_t; +/* +* VALUES +* IB_WCS_SUCCESS +* Work request completed successfully. +* +* IB_WCS_MAD +* The completed work request was associated with a managmenet datagram +* that requires post processing. The MAD will be returned to the user +* through a callback once all post processing has completed. +* +* IB_WCS_LOCAL_LEN_ERR +* Generated for a work request posted to the send queue when the +* total of the data segment lengths exceeds the message length of the +* channel. Generated for a work request posted to the receive queue when +* the total of the data segment lengths is too small for a +* valid incoming message. +* +* IB_WCS_LOCAL_OP_ERR +* An internal QP consistency error was generated while processing this +* work request. This may indicate that the QP was in an incorrect state +* for the requested operation. +* +* IB_WCS_LOCAL_PROTECTION_ERR +* The data segments of the locally posted work request did not refer to +* a valid memory region. The memory may not have been properly +* registered for the requested operation. +* +* IB_WCS_WR_FLUSHED_ERR +* The work request was flushed from the QP before being completed. +* +* IB_WCS_MEM_WINDOW_BIND_ERR +* A memory window bind operation failed due to insufficient access +* rights. +* +* IB_WCS_REM_ACCESS_ERR, +* A protection error was detected at the remote node for a RDMA or atomic +* operation. +* +* IB_WCS_REM_OP_ERR, +* The operation could not be successfully completed at the remote node. +* This may indicate that the remote QP was in an invalid state or +* contained an invalid work request. +* +* IB_WCS_RNR_RETRY_ERR, +* The RNR retry count was exceeded while trying to send this message. +* +* IB_WCS_TIMEOUT_RETRY_ERR +* The local transport timeout counter expired while trying to send this +* message. +* +* IB_WCS_REM_INVALID_REQ_ERR, +* The remote node detected an invalid message on the channel. This error +* is usually a result of one of the following: +* - The operation was not supported on receive queue. +* - There was insufficient buffers to receive a new RDMA request. +* - There was insufficient buffers to receive a new atomic operation. +* - An RDMA request was larger than 2^31 bytes. +* + * IB_WCS_BAD_RESP_ERR, + * An unexpected transport layer opcode was returned + * by the responder. + * + * IB_WCS_LOCAL_ACCESS_ERR, + * A protection error occurred on a local data buffer + * during the processing of a RDMA Write with Immediate Data + * operation sent from the remote node. + * +* IB_WCS_UNMATCHED_RESPONSE +* A response MAD was received for which there was no matching send. The +* send operation may have been canceled by the user or may have timed +* out. +* +* IB_WCS_CANCELED +* The completed work request was canceled by the user. + * + * IB_WCS_GENERAL_ERR, + * Any other error + * +*****/ + + + +/****f* IBA Base: Types/ib_get_wc_status_str +* NAME +* ib_get_wc_status_str +* +* DESCRIPTION +* Returns a string for the specified work completion status. +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_wc_status_str( + IN ib_wc_status_t wc_status ); +/* +* PARAMETERS +* wc_status +* [in] work completion status value +* +* RETURN VALUES +* Pointer to the work completion status description string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****d* Access Layer/ib_wc_type_t +* NAME +* ib_wc_type_t +* +* DESCRIPTION +* Indicates the type of work completion. +* +* SYNOPSIS +*/ +typedef enum _ib_wc_type_t +{ + IB_WC_SEND, + IB_WC_RDMA_WRITE, + IB_WC_RECV, + IB_WC_RDMA_READ, + IB_WC_MW_BIND, + IB_WC_FETCH_ADD, + IB_WC_COMPARE_SWAP, + IB_WC_RECV_RDMA_WRITE, + IB_WC_UNKNOWN + +} ib_wc_type_t; +/*****/ + + +/****f* IBA Base: Types/ib_get_wc_type_str +* NAME +* ib_get_wc_type_str +* +* DESCRIPTION +* Returns a string for the specified work completion type. +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_wc_type_str( + IN ib_wc_type_t wc_type ); +/* +* PARAMETERS +* wc_type +* [in] work completion type value +* +* RETURN VALUES +* Pointer to the work completion type description string. +* +* NOTES +* +* SEE ALSO +*********/ + + +/****d* Access Layer/ib_recv_opt_t +* NAME +* ib_recv_opt_t +* +* DESCRIPTION +* Indicates optional fields valid in a receive work completion. +* +* SYNOPSIS +*/ +typedef uint32_t ib_recv_opt_t; +#define IB_RECV_OPT_IMMEDIATE 0x00000001 +#define IB_RECV_OPT_FORWARD 0x00000002 +#define IB_RECV_OPT_GRH_VALID 0x00000004 +#define IB_RECV_OPT_VEND_MASK 0xFFFF0000 +/* +* VALUES +* IB_RECV_OPT_IMMEDIATE +* Indicates that immediate data is valid for this work completion. +* +* IB_RECV_OPT_FORWARD +* Indicates that the received trap should be forwarded to the SM. +* +* IB_RECV_OPT_GRH_VALID +* Indicates presence of the global route header. When set, the first +* 40 bytes received are the GRH. +* +* IB_RECV_OPT_VEND_MASK +* This mask indicates bits reserved in the receive options that may be +* used by the verbs provider to indicate vendor specific options. Bits +* set in this area of the receive options are ignored by the Access Layer, +* but may have specific meaning to the underlying VPD. +*****/ + +/****s* Access Layer/ib_wc_t +* NAME +* ib_wc_t +* +* DESCRIPTION +* Work completion information. +* +* SYNOPSIS +*/ +typedef struct _ib_wc +{ + struct _ib_wc* __ptr64 p_next; + uint64_t wr_id; + ib_wc_type_t wc_type; + + uint32_t length; + ib_wc_status_t status; + uint64_t vendor_specific; + + union _wc_recv + { + struct _wc_conn + { + ib_recv_opt_t recv_opt; + ib_net32_t immediate_data; + + } conn; + + struct _wc_ud + { + ib_recv_opt_t recv_opt; + ib_net32_t immediate_data; + ib_net32_t remote_qp; + uint16_t pkey_index; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + + } ud; + + struct _wc_rd + { + ib_net32_t remote_eecn; + ib_net32_t remote_qp; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint32_t free_cnt; + + } rd; + + struct _wc_raw_ipv6 + { + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + + } raw_ipv6; + + struct _wc_raw_ether + { + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + ib_net16_t ether_type; + + } raw_ether; + + } recv; + +} ib_wc_t; +/* +* FIELDS +* p_next +* A pointer used to chain work completions. This permits multiple +* work completions to be retrieved from a completion queue through a +* single function call. This value is set to NULL to mark the end of +* the chain. +* +* wr_id +* The 64-bit work request identifier that was specified when posting the +* work request. +* +* wc_type +* Indicates the type of work completion. +* +* +* length +* The total length of the data sent or received with the work request. +* +* status +* The result of the work request. +* +* vendor_specific +* HCA vendor specific information returned as part of the completion. +* +* recv.conn.recv_opt +* Indicates optional fields valid as part of a work request that +* completed on a connected (reliable or unreliable) queue pair. +* +* recv.conn.immediate_data +* 32-bit field received as part of an inbound message on a connected +* queue pair. This field is only valid if the recv_opt flag +* IB_RECV_OPT_IMMEDIATE has been set. +* +* recv.ud.recv_opt +* Indicates optional fields valid as part of a work request that +* completed on an unreliable datagram queue pair. +* +* recv.ud.immediate_data +* 32-bit field received as part of an inbound message on a unreliable +* datagram queue pair. This field is only valid if the recv_opt flag +* IB_RECV_OPT_IMMEDIATE has been set. +* +* recv.ud.remote_qp +* Identifies the source queue pair of a received datagram. +* +* recv.ud.pkey_index +* The pkey index for the source queue pair. This is valid only for +* GSI type QP's. +* +* recv.ud.remote_lid +* The source LID of the received datagram. +* +* recv.ud.remote_sl +* The service level used by the source of the received datagram. +* +* recv.ud.path_bits +* path bits... +* +* recv.rd.remote_eecn +* The remote end-to-end context number that sent the received message. +* +* recv.rd.remote_qp +* Identifies the source queue pair of a received message. +* +* recv.rd.remote_lid +* The source LID of the received message. +* +* recv.rd.remote_sl +* The service level used by the source of the received message. +* +* recv.rd.free_cnt +* The number of available entries in the completion queue. Reliable +* datagrams may complete out of order, so this field may be used to +* determine the number of additional completions that may occur. +* +* recv.raw_ipv6.remote_lid +* The source LID of the received message. +* +* recv.raw_ipv6.remote_sl +* The service level used by the source of the received message. +* +* recv.raw_ipv6.path_bits +* path bits... +* +* recv.raw_ether.remote_lid +* The source LID of the received message. +* +* recv.raw_ether.remote_sl +* The service level used by the source of the received message. +* +* recv.raw_ether.path_bits +* path bits... +* +* recv.raw_ether.ether_type +* ether type... +* NOTES +* When the work request completes with error, the only values that the +* consumer can depend on are the wr_id field, and the status of the +* operation. +* +* If the consumer is using the same CQ for completions from more than +* one type of QP (i.e Reliable Connected, Datagram etc), then the consumer +* must have additional information to decide what fields of the union are +* valid. +* SEE ALSO +* ib_wc_type_t, ib_qp_type_t, ib_wc_status_t, ib_recv_opt_t +*****/ + +/****s* Access Layer/ib_mr_create_t +* NAME +* ib_mr_create_t +* +* DESCRIPTION +* Information required to create a registered memory region. +* +* SYNOPSIS +*/ +typedef struct _ib_mr_create +{ + void* __ptr64 vaddr; + uint64_t length; + ib_access_t access_ctrl; +} ib_mr_create_t; +/* +* FIELDS +* vaddr +* Starting virtual address of the region being registered. +* +* length +* Length of the buffer to register. +* +* access_ctrl +* Access rights of the registered region. +* +* SEE ALSO +* ib_access_t +*****/ + +#ifdef CL_KERNEL + +/****s* Access Layer/mlnx_fmr_create_t +* NAME +* mlnx_fmr_create_t +* +* DESCRIPTION +* Information required to create a Mellanox fast memory region. +* +* SYNOPSIS +*/ +typedef struct _mlnx_fmr_create +{ + int max_pages; + int max_maps; + uint8_t page_size; + ib_access_t access_ctrl; + +} mlnx_fmr_create_t; +/* +* FIELDS +* max_pages +* max pages in the region. +* +* max_maps +* max times, the region can be mapped before remapping. +* +* page_size +* log2 of the page size (e.g. 12 for 4KB). +* +* access_ctrl +* Access rights of the registered region. +* +* NOTES +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* ib_access_t +*****/ + + +/****s* Access Layer/mlnx_fmr_pool_create_t +* NAME +* mlnx_fmr_pool_create_t +* +* DESCRIPTION +* Information required to create a Mellanox fast memory region pool. +* +* SYNOPSIS +*/ +typedef struct _mlnx_fmr_pool_create +{ + int max_pages_per_fmr; + uint8_t page_size; /* really - page_shift, log2 of page_size */ + enum ib_access_flags access_ctrl; + int pool_size; + int dirty_watermark; + void (*flush_function)(mlnx_fmr_pool_handle_t h_pool, void *arg); + void *flush_arg; + boolean_t cache; +} mlnx_fmr_pool_create_t; +/* +* FIELDS +* max_pages +* max pages in the region. +* +* max_maps +* max times, the region can be mapped before remapping. +* +* page_size +* log2 of the page size (e.g. 12 for 4KB). +* +* access_ctrl +* Access rights of the registered region. +* +* NOTES +* This is a Mellanox specific extension to verbs. +* +* SEE ALSO +* ib_access_t +*****/ +#endif + +/****s* Access Layer/ib_phys_range_t +* NAME +* ib_phys_range_t +* +* DESCRIPTION +* Information describing a physical memory range. +* +* SYNOPSIS +*/ +typedef struct _ib_phys_range +{ + uint64_t base_addr; + uint64_t size; + +} ib_phys_range_t; +/* +* FIELDS +* base_addr +* Physical address of the base of the memory range. +* +* size +* size, in bytes, of the memory range. +* +* NOTES +* The base address must be start and end on an HCA-supported page boundary. +* +* SEE ALSO +* ib_phys_create_t +*********/ + + +/****s* Access Layer/ib_phys_create_t +* NAME +* ib_phys_create_t +* +* DESCRIPTION +* Information required to create a physical memory region. +* +* SYNOPSIS +*/ +typedef struct _ib_phys_create +{ + uint64_t length; + uint32_t num_ranges; + ib_phys_range_t* __ptr64 range_array; + uint32_t buf_offset; + uint32_t hca_page_size; + ib_access_t access_ctrl; +} ib_phys_create_t; +/* +* FIELDS +* length +* The length of the memory region in bytes. +* +* num_ranges +* Number of ib_phys_range structures listed in the specified range array. +* +* range_array +* An array of ib_phys_range structures to be registered as a single memory +* region. +* +* buf_offset +* The offset into the first physical memory range of the specified memory +* region on which to start the virtual address. +* +* hca_page_size +* The HCA page size to use to register the memory. +* +* access_ctrl +* Access rights of the registered region. +* +* SEE ALSO +* ib_access_t +*****/ + +/****s* Access Layer/ib_mr_attr_t +* NAME +* ib_mr_attr_t +* +* DESCRIPTION +* Attributes of a registered memory region. +* +* SYNOPSIS +*/ +typedef struct _ib_mr_attr +{ + ib_pd_handle_t h_pd; + uint64_t local_lb; + uint64_t local_ub; + uint64_t remote_lb; + uint64_t remote_ub; + ib_access_t access_ctrl; + net32_t lkey; + net32_t rkey; + +} ib_mr_attr_t; +/* +* DESCRIPTION +* h_pd +* Handle to the protection domain for this memory region. +* +* local_lb +* The virtual address of the lower bound of protection for local +* memory access. This is always a 64-bit quantity to support registering +* more than 4GB of memory on 32-bit systems with PAE. +* +* local_ub +* The virtual address of the upper bound of protection for local +* memory access. This is always a 64-bit quantity to support registering +* more than 4GB of memory on 32-bit systems with PAE. +* +* remote_lb +* The virtual address of the lower bound of protection for remote +* memory access. This is always a 64-bit quantity to support registering +* more than 4GB of memory on 32-bit systems with PAE. +* +* remote_ub +* The virtual address of the upper bound of protection for remote +* memory access. This is always a 64-bit quantity to support registering +* more than 4GB of memory on 32-bit systems with PAE. +* +* access_ctrl +* Access rights for the specified memory region. +* +* lkey +* The lkey associated with this memory region. +* +* rkey +* The rkey associated with this memory region. +* +* NOTES +* The remote_lb, remote_ub, and rkey are only valid if remote memory access +* is enabled for this memory region. +* +* SEE ALSO +* ib_access_t +*****/ + +/****d* Access Layer/ib_ca_mod_t +* NAME +* ib_ca_mod_t -- Modify port attributes and error counters +* +* DESCRIPTION +* Specifies modifications to the port attributes of a channel adapter. +* +* SYNOPSIS +*/ +typedef uint32_t ib_ca_mod_t; +#define IB_CA_MOD_IS_CM_SUPPORTED 0x00000001 +#define IB_CA_MOD_IS_SNMP_SUPPORTED 0x00000002 +#define IB_CA_MOD_IS_DEV_MGMT_SUPPORTED 0x00000004 +#define IB_CA_MOD_IS_VEND_SUPPORTED 0x00000008 +#define IB_CA_MOD_IS_SM 0x00000010 +#define IB_CA_MOD_IS_SM_DISABLED 0x00000020 +#define IB_CA_MOD_QKEY_CTR 0x00000040 +#define IB_CA_MOD_PKEY_CTR 0x00000080 +#define IB_CA_MOD_IS_NOTICE_SUPPORTED 0x00000100 +#define IB_CA_MOD_IS_TRAP_SUPPORTED 0x00000200 +#define IB_CA_MOD_IS_APM_SUPPORTED 0x00000400 +#define IB_CA_MOD_IS_SLMAP_SUPPORTED 0x00000800 +#define IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED 0x00001000 +#define IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED 0x00002000 +#define IB_CA_MOD_IS_SYSGUID_SUPPORTED 0x00004000 +#define IB_CA_MOD_IS_DR_NOTICE_SUPPORTED 0x00008000 +#define IB_CA_MOD_IS_BOOT_MGMT_SUPPORTED 0x00010000 +#define IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED 0x00020000 +#define IB_CA_MOD_IS_REINIT_SUPORTED 0x00040000 +#define IB_CA_MOD_IS_LEDINFO_SUPPORTED 0x00080000 +#define IB_CA_MOD_SHUTDOWN_PORT 0x00100000 +#define IB_CA_MOD_INIT_TYPE_VALUE 0x00200000 +#define IB_CA_MOD_SYSTEM_IMAGE_GUID 0x00400000 +#define IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED 0x00800000 +#define IB_CA_MOD_RESERVED_MASK 0xFF000000 +/* +* VALUES +* IB_CA_MOD_IS_CM_SUPPORTED +* Indicates if there is a communication manager accessible through +* the port. +* +* IB_CA_MOD_IS_SNMP_SUPPORTED +* Indicates if there is an SNMP agent accessible through the port. +* +* IB_CA_MOD_IS_DEV_MGMT_SUPPORTED +* Indicates if there is a device management agent accessible +* through the port. +* +* IB_CA_MOD_IS_VEND_SUPPORTED +* Indicates if there is a vendor supported agent accessible +* through the port. +* +* IB_CA_MOD_IS_SM +* Indicates if there is a subnet manager accessible through +* the port. +* +* IB_CA_MOD_IS_SM_DISABLED +* Indicates if the port has been disabled for configuration by the +* subnet manager. +* +* IB_CA_MOD_QKEY_CTR +* Used to reset the qkey violation counter associated with the +* port. +* +* IB_CA_MOD_PKEY_CTR +* Used to reset the pkey violation counter associated with the +* port. +* +* IB_CA_MOD_IS_NOTICE_SUPPORTED +* Indicates that this CA supports ability to generate Notices for +* Port State changes. (only applicable to switches) +* +* IB_CA_MOD_IS_TRAP_SUPPORTED +* Indicates that this management port supports ability to generate +* trap messages. (only applicable to switches) +* +* IB_CA_MOD_IS_APM_SUPPORTED +* Indicates that this port is capable of performing Automatic +* Path Migration. +* +* IB_CA_MOD_IS_SLMAP_SUPPORTED +* Indicates this port supports SLMAP capability. +* +* IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED +* Indicates that PKEY is supported in NVRAM +* +* IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED +* Indicates that MKEY is supported in NVRAM +* +* IB_CA_MOD_IS_SYSGUID_SUPPORTED +* Indicates System Image GUID support. +* +* IB_CA_MOD_IS_DR_NOTICE_SUPPORTED +* Indicate support for generating Direct Routed Notices +* +* IB_CA_MOD_IS_BOOT_MGMT_SUPPORTED +* Indicates support for Boot Management +* +* IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED +* Indicates capability to generate notices for changes to CAPMASK +* +* IB_CA_MOD_IS_REINIT_SUPORTED +* Indicates type of node init supported. Refer to Chapter 14 for +* Initialization actions. +* +* IB_CA_MOD_IS_LEDINFO_SUPPORTED +* Indicates support for LED info. +* +* IB_CA_MOD_SHUTDOWN_PORT +* Used to modify the port active indicator. +* +* IB_CA_MOD_INIT_TYPE_VALUE +* Used to modify the init_type value for the port. +* +* IB_CA_MOD_SYSTEM_IMAGE_GUID +* Used to modify the system image GUID for the port. +* +* IB_CA_MOD_IS_CLIENT_REREGISTER_SUPPORTED +* Used to modify the system image GUID for the port. +* +* IB_CA_MOD_RESERVED_MASK +* Mask of all the reserved bits. If any of these bits are set +* ib_modify_ca will return IB_INVALID_PARAMETER. +*****/ + +/****d* Access Layer/ib_mr_mod_t +* NAME +* ib_mr_mod_t +* +* DESCRIPTION +* Mask used to specify which attributes of a registered memory region are +* being modified. +* +* SYNOPSIS +*/ +typedef uint32_t ib_mr_mod_t; +#define IB_MR_MOD_ADDR 0x00000001 +#define IB_MR_MOD_PD 0x00000002 +#define IB_MR_MOD_ACCESS 0x00000004 +/* +* PARAMETERS +* IB_MEM_MOD_ADDR +* The address of the memory region is being modified. +* +* IB_MEM_MOD_PD +* The protection domain associated with the memory region is being +* modified. +* +* IB_MEM_MOD_ACCESS +* The access rights the memory region are being modified. +*****/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_INIT +* NAME +* IB_SMINFO_STATE_INIT +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_INIT 4 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_HANDOVER +* NAME +* IB_SMINFO_ATTR_MOD_HANDOVER +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_HANDOVER (CL_NTOH32(0x000001)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_ACKNOWLEDGE +* NAME +* IB_SMINFO_ATTR_MOD_ACKNOWLEDGE +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_ACKNOWLEDGE (CL_NTOH32(0x000002)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_DISABLE +* NAME +* IB_SMINFO_ATTR_MOD_DISABLE +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_DISABLE (CL_NTOH32(0x000003)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_STANDBY +* NAME +* IB_SMINFO_ATTR_MOD_STANDBY +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_STANDBY (CL_NTOH32(0x000004)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_DISCOVER +* NAME +* IB_SMINFO_ATTR_MOD_DISCOVER +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_DISCOVER (CL_NTOH32(0x000005)) +/**********/ + +/****s* Access Layer/ib_ci_op_t +* NAME +* ib_ci_op_t +* +* DESCRIPTION +* A structure used for vendor specific CA interface communication. +* +* SYNOPSIS +*/ +typedef struct _ib_ci_op +{ + IN uint32_t command; + IN uint32_t buf_size; + IN uint32_t buf_info; + IN OUT int32_t status; + OUT uint32_t num_bytes_ret; + IN OUT void* __ptr64 p_buf OPTIONAL; + +} ib_ci_op_t; +/* +* FIELDS +* command +* A command code that is understood by the verbs provider. +* +* status +* The completion status from the verbs provider. This field should be +* initialize to indicate an error to allow detection and cleanup in +* case a communication error occurs between user-mode and kernel-mode. +* +* buf_size +* The size of the buffer in bytes. +* +* buf_info +* Additional buffer information +* +* p_buf +* A reference to a buffer containing vendor specific data. The verbs +* provider must not access pointers in the p_buf between user-mode and +* kernel-mode. Any pointers embedded in the p_buf are invalidated by +* the user-mode/kernel-mode transition. +* +* num_bytes_ret +* The size in bytes of the vendor specific data returned in the buffer. +* This field is set by the verbs provider. The verbs provider should +* verify that the buffer size is sufficient to hold the data being +* returned. +* +* NOTES +* This structure is provided to allow the exchange of vendor specific +* data between the originator and the verbs provider. Users of this +* structure are expected to know the format of data in the p_buf based +* on the structure command field or the usage context. +*****/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + + +#endif /* __IB_TYPES_H__ */ + + diff --git a/branches/Ndi/inc/kernel/complib/cl_atomic_osd.h b/branches/Ndi/inc/kernel/complib/cl_atomic_osd.h new file mode 100644 index 00000000..90289d59 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_atomic_osd.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_ATOMIC_OSD_H_ +#define _CL_ATOMIC_OSD_H_ + + +#include "complib/cl_types.h" + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE int32_t +cl_atomic_inc( + IN atomic32_t* const p_value ) +{ + return( InterlockedIncrement( (LONG*)p_value ) ); +} + + +CL_INLINE int32_t +cl_atomic_dec( + IN atomic32_t* const p_value ) +{ + return( InterlockedDecrement( (LONG*)p_value ) ); +} + + +CL_INLINE int32_t +cl_atomic_add( + IN atomic32_t* const p_value, + IN const int32_t increment ) +{ + /* Return the incremented value. */ + return( InterlockedExchangeAdd( (long*)p_value, increment ) + increment ); +} + + +CL_INLINE int32_t +cl_atomic_sub( + IN atomic32_t* const p_value, + IN const int32_t decrement ) +{ + /* Return the decremented value. */ + return( InterlockedExchangeAdd( (long*)p_value, -decrement ) - decrement ); +} + + +CL_INLINE int32_t +cl_atomic_xchg( + IN atomic32_t* const p_value, + IN const int32_t new_value ) +{ + return( InterlockedExchange( (long*)p_value, new_value ) ); +} + + +CL_INLINE int32_t +cl_atomic_comp_xchg( + IN atomic32_t* const p_value, + IN const int32_t compare, + IN const int32_t new_value ) +{ + return( InterlockedCompareExchange( (long*)p_value, new_value, compare ) ); +} + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _CL_ATOMIC_OSD_H_ diff --git a/branches/Ndi/inc/kernel/complib/cl_bus_ifc.h b/branches/Ndi/inc/kernel/complib/cl_bus_ifc.h new file mode 100644 index 00000000..9ec8e1b3 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_bus_ifc.h @@ -0,0 +1,75 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_BUS_IFC_H_ +#define _CL_BUS_IFC_H_ + + +#include + + +/****f* Component Library: Plug and Play/cl_fwd_query_ifc +* NAME +* cl_fwd_query_ifc +* +* DESCRIPTION +* Forwards a IRP_MN_QUERY_INTERFACE request to the device stack +* represented by the input device object. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_fwd_query_ifc( + IN DEVICE_OBJECT* const p_dev_obj, + IN IO_STACK_LOCATION* const p_io_stack ); +/* +* PARAMETERS +* p_dev_obj +* Pointer to the device object that is the IRP target. +* +* p_io_stack +* Pointer to the original IRP's I/O stack location, used to format +* the forwarded IRP. +* +* RETURN VALUES +* IRP status value. +* +* NOTES +* The IRP forwarded is synchronous, so this call must be invoked at PASSIVE. +* +* SEE ALSO +* Plug and Play +*********/ + +#endif /* _CL_BUS_IFC_H_ */ \ No newline at end of file diff --git a/branches/Ndi/inc/kernel/complib/cl_byteswap_osd.h b/branches/Ndi/inc/kernel/complib/cl_byteswap_osd.h new file mode 100644 index 00000000..dfa29043 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_byteswap_osd.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef _CL_BYTESWAP_OSD_H_ +#define _CL_BYTESWAP_OSD_H_ + + +#include "complib/cl_types.h" + + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define CPU_LE 1 +#define CPU_BE 0 + +#define cl_ntoh16 _byteswap_ushort +#define cl_hton16 _byteswap_ushort + +#define cl_ntoh32 _byteswap_ulong +#define cl_hton32 _byteswap_ulong + +#define cl_ntoh64 _byteswap_uint64 +#define cl_hton64 _byteswap_uint64 + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _CL_BYTESWAP_OSD_H_ + + + + + diff --git a/branches/Ndi/inc/kernel/complib/cl_debug_osd.h b/branches/Ndi/inc/kernel/complib/cl_debug_osd.h new file mode 100644 index 00000000..7a3d2f95 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_debug_osd.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_DEBUG_OSD_H_ +#define _CL_DEBUG_OSD_H_ + + +#include "complib/cl_types.h" + + +#if !defined(__MODULE__) +#define __MODULE__ "" +#define __MOD_DELIMITER__ "" +#else /* !defined(__MODULE__) */ +#define __MOD_DELIMITER__ ":" +#endif /* !defined(__MODULE__) */ + + +#if defined( _WIN64 ) +#define PRIdSIZE_T "I64d" +#else +#define PRIdSIZE_T "d" +#endif +#define PRId64 "I64d" +#define PRIx64 "I64x" +#define PRIo64 "I64o" +#define PRIu64 "I64u" + + +#if defined( _DEBUG_ ) +#define cl_dbg_out DbgPrint +#else +#define cl_dbg_out __noop +#endif /* defined( _DEBUG_ ) */ + +#define cl_msg_out DbgPrint + + +/* + * The following macros are used internally by the CL_ENTER, CL_TRACE, + * CL_TRACE_EXIT, and CL_EXIT macros. + */ +#if defined( _WDMDDK_ ) +/* wdm.h does not provide for a way to get the current processor number. */ +#define _CL_DBG_ENTER \ + ("%s%s%s() [\n", __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + +#define _CL_DBG_EXIT \ + ("%s%s%s() ]\n", __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + +#define _CL_DBG_INFO \ + ("%s%s%s(): ", __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + +#define _CL_DBG_ERROR \ + ("%s%s%s() !ERROR!: ", __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + +#else // !defined( _WDMDDK_ ) + +#define _CL_DBG_ENTER \ + ("~%d:(%x)%s%s%s() [\n", KeGetCurrentProcessorNumber(), PsGetCurrentThread(), __MODULE__, \ + __MOD_DELIMITER__, __FUNCTION__) + +#define _CL_DBG_EXIT \ + ("~%d:%s%s%s() ]\n", KeGetCurrentProcessorNumber(), __MODULE__, \ + __MOD_DELIMITER__, __FUNCTION__) + +#define _CL_DBG_INFO \ + ("~%d:%s%s%s(): ", KeGetCurrentProcessorNumber(), __MODULE__, \ + __MOD_DELIMITER__, __FUNCTION__) + +#define _CL_DBG_ERROR \ + ("~%d:%s%s%s() !ERROR!: ", KeGetCurrentProcessorNumber(), __MODULE__, \ + __MOD_DELIMITER__, __FUNCTION__) + +#endif // !defined( _WDMDDK_ ) + +#define CL_CHK_STK + + +#endif /* _CL_DEBUG_OSD_H_ */ + diff --git a/branches/Ndi/inc/kernel/complib/cl_event_osd.h b/branches/Ndi/inc/kernel/complib/cl_event_osd.h new file mode 100644 index 00000000..9e5ea3f9 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_event_osd.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_EVENT_OSD_H_ +#define _CL_EVENT_OSD_H_ + + +#include "complib/cl_types.h" +#include "complib/cl_memory.h" + + +/* Simple definition, eh? */ +typedef KEVENT cl_event_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE void +cl_event_construct( + IN cl_event_t* const p_event ) +{ + CL_ASSERT( p_event ); + + cl_memclr( p_event, sizeof(cl_event_t) ); +} + + +CL_INLINE cl_status_t +cl_event_init( + IN cl_event_t* const p_event, + IN const boolean_t manual_reset ) +{ + CL_ASSERT( p_event ); + + cl_event_construct( p_event ); + + if( manual_reset ) + KeInitializeEvent( p_event, NotificationEvent, FALSE ); + else + KeInitializeEvent( p_event, SynchronizationEvent, FALSE ); + + return( CL_SUCCESS ); +} + + +CL_INLINE void +cl_event_destroy( + IN cl_event_t* const p_event ) +{ + UNUSED_PARAM( p_event ); +} + + +CL_INLINE cl_status_t +cl_event_signal( + IN cl_event_t* const p_event ) +{ + CL_ASSERT( p_event ); + CL_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + + KeSetEvent( p_event, 0, FALSE ); + + return( CL_SUCCESS ); +} + + +CL_INLINE cl_status_t +cl_event_reset( + IN cl_event_t* const p_event ) +{ + CL_ASSERT( p_event ); + CL_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + + KeClearEvent( p_event ); + + return( CL_SUCCESS ); +} + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _CL_EVENT_OSD_H_ \ No newline at end of file diff --git a/branches/Ndi/inc/kernel/complib/cl_init.h b/branches/Ndi/inc/kernel/complib/cl_init.h new file mode 100644 index 00000000..206e6308 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_init.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _CL_INIT_H_ +#define _CL_INIT_H_ + + +#include +#include + + +#ifdef CL_TRACK_MEM +#ifdef NEED_CL_OBJ +#define CL_INIT (__cl_mem_track( TRUE ), cl_obj_mgr_create()) +#define CL_DEINIT (cl_obj_mgr_destroy(), __cl_mem_track( FALSE )) +#else /* NEED_CL_OBJ */ +#define CL_INIT (__cl_mem_track( TRUE ), STATUS_SUCCESS) +#define CL_DEINIT (__cl_mem_track( FALSE )) +#endif /* NEED_CL_OBJ */ +#else /* CL_TRACK_MEM */ +#ifdef NEED_CL_OBJ +#define CL_INIT cl_obj_mgr_create() +#define CL_DEINIT cl_obj_mgr_destroy() +#else /* NEED_CL_OBJ */ +#define CL_INIT STATUS_SUCCESS +#define CL_DEINIT +#endif /* NEED_CL_OBJ */ +#endif /* CL_TRACK_MEM */ + +#endif // _CL_INIT_H_ diff --git a/branches/Ndi/inc/kernel/complib/cl_ioctl_osd.h b/branches/Ndi/inc/kernel/complib/cl_ioctl_osd.h new file mode 100644 index 00000000..450f3ef9 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_ioctl_osd.h @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of IOCTL object + * + * Environment: + * Windows Kernel Mode + */ + + +#ifndef _CL_IOCTL_OSD_H_ +#define _CL_IOCTL_OSD_H_ + + +#include + + +#define IOCTL_CODE( type, cmd ) \ + CTL_CODE( type, (cmd & 0x0FFF), METHOD_BUFFERED, FILE_ANY_ACCESS) + + +typedef PIRP cl_ioctl_handle_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +CL_INLINE cl_status_t +cl_ioctl_process( + IN void *p_ioctl, + IN cl_status_t (pfn_ioctl_handler( cl_ioctl_handle_t, void*, void* ) ), + IN void *context_1, + IN void *context_2 ) +{ + return pfn_ioctl_handler( ((PIRP)p_ioctl), context_1, context_2 ); +} + + +CL_INLINE uint16_t +cl_ioctl_type( + IN cl_ioctl_handle_t h_ioctl ) +{ + IO_STACK_LOCATION *p_io_stack; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + return (uint16_t)DEVICE_TYPE_FROM_CTL_CODE( + p_io_stack->Parameters.DeviceIoControl.IoControlCode ); +} + + +CL_INLINE uint16_t +cl_ioctl_cmd( + IN cl_ioctl_handle_t h_ioctl ) +{ + IO_STACK_LOCATION *p_io_stack; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + return (uint16_t) + ((p_io_stack->Parameters.DeviceIoControl.IoControlCode >> 2) & 0x0FFF); +} + + +CL_INLINE uint32_t +cl_ioctl_ctl_code( + IN cl_ioctl_handle_t h_ioctl ) +{ + IO_STACK_LOCATION *p_io_stack; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + return p_io_stack->Parameters.DeviceIoControl.IoControlCode; +} + + +CL_INLINE void* +cl_ioctl_in_buf( + IN cl_ioctl_handle_t h_ioctl ) +{ + IO_STACK_LOCATION *p_io_stack; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + switch( p_io_stack->Parameters.DeviceIoControl.IoControlCode & 0x03 ) + { + case METHOD_BUFFERED: + case METHOD_OUT_DIRECT: + return (h_ioctl)->AssociatedIrp.SystemBuffer; + + case METHOD_IN_DIRECT: + return (h_ioctl)->MdlAddress; + + case METHOD_NEITHER: + return p_io_stack->Parameters.DeviceIoControl.Type3InputBuffer; + } + return NULL; +} + + +CL_INLINE ULONG +cl_ioctl_in_size( + IN cl_ioctl_handle_t h_ioctl ) +{ + IO_STACK_LOCATION *p_io_stack; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + switch( p_io_stack->Parameters.DeviceIoControl.IoControlCode & 0x03 ) + { + case METHOD_BUFFERED: + case METHOD_OUT_DIRECT: + case METHOD_NEITHER: + return p_io_stack->Parameters.DeviceIoControl.InputBufferLength; + + case METHOD_IN_DIRECT: + return p_io_stack->Parameters.DeviceIoControl.OutputBufferLength; + } + return 0; +} + + +CL_INLINE void* +cl_ioctl_out_buf( + IN cl_ioctl_handle_t h_ioctl ) +{ + IO_STACK_LOCATION *p_io_stack; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + switch( p_io_stack->Parameters.DeviceIoControl.IoControlCode & 0x03 ) + { + case METHOD_BUFFERED: + return (h_ioctl)->AssociatedIrp.SystemBuffer; + + case METHOD_IN_DIRECT: + case METHOD_OUT_DIRECT: + return (h_ioctl)->MdlAddress; + + case METHOD_NEITHER: + return (h_ioctl)->UserBuffer; + } + return NULL; +} + + +CL_INLINE ULONG +cl_ioctl_out_size( + IN cl_ioctl_handle_t h_ioctl ) +{ + IO_STACK_LOCATION *p_io_stack; + + p_io_stack = IoGetCurrentIrpStackLocation( h_ioctl ); + return p_io_stack->Parameters.DeviceIoControl.OutputBufferLength; +} + + +CL_INLINE void +cl_ioctl_complete( + IN cl_ioctl_handle_t h_ioctl, + IN cl_status_t io_status, + IN size_t ret_bytes ) +{ + h_ioctl->IoStatus.Status = cl_to_ntstatus( io_status ); + h_ioctl->IoStatus.Information = ret_bytes; + IoCompleteRequest( h_ioctl, IO_NO_INCREMENT ); +} + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif // _CL_IOCTL_OSD_H_ diff --git a/branches/Ndi/inc/kernel/complib/cl_irqlock_osd.h b/branches/Ndi/inc/kernel/complib/cl_irqlock_osd.h new file mode 100644 index 00000000..4398b2c3 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_irqlock_osd.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of IRQ lock object. + * + * Environment: + * Windows Kernel Mode + */ + + +#ifndef _CL_IRQLOCK_OSD_H_ +#define _CL_IRQLOCK_OSD_H_ + + +#include + + +typedef struct _cl_irqlock +{ + PKINTERRUPT p_interrupt; + KIRQL irql; + +} cl_irqlock_t; + + +typedef struct _KINTERRUPT cl_interrupt_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE void +cl_irqlock_construct( + IN cl_irqlock_t* const p_irqlock ) +{ + p_irqlock->p_interrupt = NULL; +} + + +CL_INLINE cl_status_t +cl_irqlock_init( + IN cl_irqlock_t* const p_irqlock, + IN cl_interrupt_t* const p_interrupt ) +{ + cl_irqlock_construct( p_irqlock ); + p_irqlock->p_interrupt = p_interrupt; + return CL_SUCCESS; +} + + +CL_INLINE void +cl_irqlock_destroy( + IN cl_irqlock_t* const p_irqlock ) +{ + p_irqlock->p_interrupt = NULL; +} + + +CL_INLINE void +cl_irqlock_acquire( + IN cl_irqlock_t* const p_irqlock ) +{ +#if WINVER > 0x500 + p_irqlock->irql = KeAcquireInterruptSpinLock( p_irqlock->p_interrupt ); +#else + UNUSED_PARAM( p_irqlock ); +#pragma warning( push, 3 ) + ASSERT( 0 ); +#pragma warning( pop ) +#endif +} + + +CL_INLINE void +cl_irqlock_release( + IN cl_irqlock_t* const p_irqlock ) +{ +#if WINVER > 0x500 + KeReleaseInterruptSpinLock( p_irqlock->p_interrupt, p_irqlock->irql ); +#else + UNUSED_PARAM( p_irqlock ); +#pragma warning( push, 3 ) + ASSERT( 0 ); +#pragma warning( pop ) +#endif +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + + +#endif /* _CL_IRQLOCK_OSD_H_ */ diff --git a/branches/Ndi/inc/kernel/complib/cl_memory_osd.h b/branches/Ndi/inc/kernel/complib/cl_memory_osd.h new file mode 100644 index 00000000..20584619 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_memory_osd.h @@ -0,0 +1,216 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Defines kernel-only memory related functions. + * + * Environment: + * Windows Kernel Mode + */ + + +#ifndef _CL_MEMORY_OSD_H_ +#define _CL_MEMORY_OSD_H_ + + +#include + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE void +cl_memset( + IN void* const p_memory, + IN const uint8_t fill, + IN const size_t count ) +{ + RtlFillMemory( p_memory, count, fill ); +} + + +CL_INLINE void* +cl_memcpy( + IN void* const p_dest, + IN const void* const p_src, + IN const size_t count ) +{ + RtlCopyMemory( p_dest, p_src, count ); + return p_dest; +} + + +CL_INLINE int32_t +cl_memcmp( + IN const void* const p_memory1, + IN const void* const p_memory2, + IN const size_t count ) +{ + return( memcmp( p_memory1, p_memory2, count ) ); +} + + +CL_INLINE uint32_t +cl_get_pagesize( void ) +{ + return (PAGE_SIZE); +} + + +#ifdef CL_NTDDK +CL_INLINE uint64_t +cl_get_physaddr( + IN void *vaddr ) +{ + return MmGetPhysicalAddress( vaddr ).QuadPart; +} +#endif + + +CL_INLINE cl_status_t +cl_check_for_read( + IN const void* const vaddr, + IN const size_t count ) +{ + __try + { + ProbeForRead( ( void *)vaddr, count, sizeof(void*) ); + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + return CL_INVALID_PERMISSION; + } + return CL_SUCCESS; +} + + +CL_INLINE cl_status_t +cl_check_for_write( + IN void* const vaddr, + IN const size_t count ) +{ + __try + { + /* + * We use ProbeForRead instead of ProbeForWrite because + * the additional checks to make sure the pages can be written + * are not guaranteed to still hold by the time we copy into + * the buffer. + * + * Furthermore, even if the pages don't change, ProbeForWrite will + * cause all pages to be paged in, and these pages could be paged out + * before the copy, requiring the copy to page them in once again. + * + * Micky Snir (mailto:mickys@microsoft.com) recommended *not* using + * ProbeForWrite because the page validity/permissions can change after + * the call and the actual access. + */ + ProbeForRead( vaddr, count, sizeof(void*) ); + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + return CL_INVALID_PERMISSION; + } + return CL_SUCCESS; +} + + +CL_INLINE cl_status_t +cl_copy_to_user( + IN void* const p_dest, + IN const void* const p_src, + IN const size_t count ) +{ + /* + * The memory copy must be done within a try/except block as the + * memory could be changing while the buffer is copied. + */ + __try + { + /* + * We use ProbeForRead instead of ProbeForWrite because + * the additional checks to make sure the pages can be written + * are not guaranteed to still hold by the time we copy into + * the buffer. + * + * Furthermore, even if the pages don't change, ProbeForWrite will + * cause all pages to be paged in, and these pages could be paged out + * before the copy, requiring the copy to page them in once again. + * + * Micky Snir (mailto:mickys@microsoft.com) recommended *not* using + * ProbeForWrite because the page validity/permissions can change after + * the call and the actual access. + */ + ProbeForRead( p_dest, count, 1 ); + cl_memcpy( p_dest, p_src, count ); + return CL_SUCCESS; + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + return CL_INVALID_PERMISSION; + } +} + + +CL_INLINE cl_status_t +cl_copy_from_user( + IN void* const p_dest, + IN const void* const p_src, + IN const size_t count ) +{ + /* + * The memory copy must be done within a try/except block as the + * memory could be changing while the buffer is copied. + */ + __try + { + ProbeForRead( (void*)p_src, count, 1 ); + cl_memcpy( p_dest, p_src, count ); + return CL_SUCCESS; + } + __except(EXCEPTION_EXECUTE_HANDLER) + { + return CL_INVALID_PERMISSION; + } +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_MEMORY_OSD_H_ */ + diff --git a/branches/Ndi/inc/kernel/complib/cl_mutex_osd.h b/branches/Ndi/inc/kernel/complib/cl_mutex_osd.h new file mode 100644 index 00000000..5b607c1d --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_mutex_osd.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of mutex object. + * + * Environment: + * Windows Kernel Mode + */ + + +#ifndef _CL_MUTEX_OSD_H_ +#define _CL_MUTEX_OSD_H_ + + +#include + + +typedef FAST_MUTEX cl_mutex_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE void +cl_mutex_construct( + IN cl_mutex_t* const p_mutex ) +{ + UNUSED_PARAM( p_mutex ); +} + + +CL_INLINE cl_status_t +cl_mutex_init( + IN cl_mutex_t* const p_mutex ) +{ + CL_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + ExInitializeFastMutex( p_mutex ); + return CL_SUCCESS; +} + + +CL_INLINE void +cl_mutex_destroy( + IN cl_mutex_t* const p_mutex ) +{ + UNUSED_PARAM( p_mutex ); +} + + +CL_INLINE void +cl_mutex_acquire( + IN cl_mutex_t* const p_mutex ) +{ + CL_ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + ExAcquireFastMutex( p_mutex ); +} + + +CL_INLINE void +cl_mutex_release( + IN cl_mutex_t* const p_mutex ) +{ + CL_ASSERT( KeGetCurrentIrql() == APC_LEVEL ); + ExReleaseFastMutex( p_mutex ); +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_MUTEX_OSD_H_ */ diff --git a/branches/Ndi/inc/kernel/complib/cl_packoff.h b/branches/Ndi/inc/kernel/complib/cl_packoff.h new file mode 100644 index 00000000..2117feb7 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_packoff.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +/* Note: The lack of conditional inclusion is intentional. */ +#include diff --git a/branches/Ndi/inc/kernel/complib/cl_packon.h b/branches/Ndi/inc/kernel/complib/cl_packon.h new file mode 100644 index 00000000..648b239e --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_packon.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +/* Note: The lack of conditional inclusion is intentional. */ +#include + + +/* + * Abstract: + * Turns on byte packing, which is necessary for passing information from + * system to system over a network to ensure no padding by the compiler has + * taken place. + * + * Note: + * No Robodoc documentation as with other headers. + */ + +#ifndef PACK_SUFFIX +#define PACK_SUFFIX +#endif diff --git a/branches/Ndi/inc/kernel/complib/cl_pnp_po.h b/branches/Ndi/inc/kernel/complib/cl_pnp_po.h new file mode 100644 index 00000000..073b57b1 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_pnp_po.h @@ -0,0 +1,994 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_PNP_PO_H_ +#define _CL_PNP_PO_H_ + + +#include "complib/cl_types.h" + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****h* Component Library/Plug and Play +* NAME +* Plug and Play +* +* DESCRIPTION +* Provides plug and play support for kernel drivers. +* +* SEE ALSO +* Callback Types: +* cl_pfn_pnp_po_t +* +* Structures: +* cl_vfptr_pnp_t +*********/ + + +#define CL_DBG_PNP (1 << 1) + + +/****d* Component Library: Plug and Play/cl_irp_action_t +* NAME +* cl_irp_action_t +* +* DESCRIPTION +* Indicates what kind of action to take in response to an IRP. Used +* when processing PnP IRPs. +* +* SYNOPSIS: +*/ +typedef enum _cl_irp_action +{ + IrpPassDown, + IrpSkip, + IrpIgnore, + IrpComplete, + IrpDoNothing + +} cl_irp_action_t; +/* +* VALUES: +* IrpPassDown +* Pass the IRP down to the next driver. The IRP's current stack location +* has been setup properly. +* +* IrpSkip +* Skip the current IRP stack location but sets the IRP's status. +* +* IrpIgnore +* Skip the current IRP without setting its status value since the IRP +* is not interesting. +* +* IrpComplete +* Complete the IRP. Sets the status in the IoStatus block, completes the +* IRP, and returns the status. +* +* IrpDoNothing +* The routine assumed control of the IRP completion. No further +* processing is required in the dispatch routine. The remove lock should +* not be released. +*********/ + + +/****d* Component Library: Plug and Play/cl_pfn_pnp_po_t +* NAME +* cl_pfn_pnp_po_t +* +* DESCRIPTION +* Function prototype for PnP and Power Management IRPs handlers. +* +* SYNOPSIS +*/ +typedef NTSTATUS +(*cl_pfn_pnp_po_t)( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); +/* +* PARAMETERS +* p_dev_obj +* Pointer to the device object that is the IRP target. +* +* p_irp +* Pointer to the request IRP. +* +* p_action +* Action to take for propagating the IRP. +* +* RETURN VALUES +* IRP status value. +* +* SEE ALSO +* Plug and Play, cl_irp_action_t, cl_vfptr_pnp_po_t +*********/ + + +/****d* Component Library: Plug and Play/cl_pfn_release_resources_t +* NAME +* cl_pfn_release_resources_t +* +* DESCRIPTION +* Function prototype for releasing resources associated with a device. +* Called from either the remove handler or the surprise remove handler. +* +* SYNOPSIS +*/ +typedef void +(*cl_pfn_release_resources_t)( + IN DEVICE_OBJECT* const p_dev_obj ); +/* +* PARAMETERS +* p_dev_obj +* Pointer to the device object whose resources to release. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* Plug and Play, cl_vfptr_pnp_po_t +*********/ + + +/****d* Component Library: Plug and Play/cl_vfptr_pnp_po_t +* NAME +* cl_vfptr_pnp_po_t +* +* DESCRIPTION +* Virtual function pointer table for PnP and Power Management IRPs. +* +* SYNOPSIS +*/ +typedef struct _cl_vfptr_pnp_po +{ + const char* identity; + cl_pfn_pnp_po_t pfn_start; + cl_pfn_pnp_po_t pfn_query_stop; + cl_pfn_pnp_po_t pfn_stop; + cl_pfn_pnp_po_t pfn_cancel_stop; + cl_pfn_pnp_po_t pfn_query_remove; + cl_pfn_release_resources_t pfn_release_resources; + cl_pfn_pnp_po_t pfn_remove; + cl_pfn_pnp_po_t pfn_cancel_remove; + cl_pfn_pnp_po_t pfn_surprise_remove; + cl_pfn_pnp_po_t pfn_query_capabilities; + cl_pfn_pnp_po_t pfn_query_pnp_state; + cl_pfn_pnp_po_t pfn_filter_res_req; + cl_pfn_pnp_po_t pfn_dev_usage_notification; + cl_pfn_pnp_po_t pfn_query_bus_relations; + cl_pfn_pnp_po_t pfn_query_ejection_relations; + cl_pfn_pnp_po_t pfn_query_removal_relations; + cl_pfn_pnp_po_t pfn_query_target_relations; + cl_pfn_pnp_po_t pfn_unknown; + cl_pfn_pnp_po_t pfn_query_resources; + cl_pfn_pnp_po_t pfn_query_res_req; + cl_pfn_pnp_po_t pfn_query_bus_info; + cl_pfn_pnp_po_t pfn_query_interface; + cl_pfn_pnp_po_t pfn_read_config; + cl_pfn_pnp_po_t pfn_write_config; + cl_pfn_pnp_po_t pfn_eject; + cl_pfn_pnp_po_t pfn_set_lock; + cl_pfn_pnp_po_t pfn_query_power; + cl_pfn_pnp_po_t pfn_set_power; + cl_pfn_pnp_po_t pfn_power_sequence; + cl_pfn_pnp_po_t pfn_wait_wake; + +} cl_vfptr_pnp_po_t; +/* +* FIELDS +* identity +* string identifying the target, used in generating debug output. +* +* pfn_start +* IRP_MN_START_DEVICE handler. Users must send forward the IRP to lower +* devices as required for their driver type. +* +* pfn_query_stop +* IRP_MN_QUERY_STOP_DEVICE handler. +* +* pfn_stop +* IRP_MN_STOP_DEVICE handler. +* +* pfn_cancel_stop +* IRP_MN_CANCEL_STOP_DEVICE handler. Users must send forward the IRP to +* lower devices as required for their driver type. +* +* pfn_query_remove +* IRP_MN_QUERY_REMOVE_DEVICE handler. +* +* pfn_release_resources +* Called to release resources allocated for the device. +* +* pfn_remove +* IRP_MN_REMOVE_DEVICE handler. +* +* pfn_cancel_remove +* IRP_MN_CANCEL_REMOVE_DEVICE handler. Users must send forward the IRP +* to lower devices as required for their driver type. +* +* pfn_surprise_remove +* IRP_MN_SURPRISE_REMOVE_DEVICE handler. +* +* pfn_query_capabilities +* IRP_MN_QUERY_DEVICE_CAPABILITIES handler. +* +* pfn_query_pnp_state +* IRP_MN_QUERY_PNP_STATE handler. +* +* pfn_filter_res_req +* IRP_MN_FILTER_RESOURCE_REQUIREMENTS handler. +* +* pfn_dev_usage_notification +* IRP_MN_QUERY_DEVICE_USAGE_NOTIFICATION handler. +* +* pfn_query_bus_relations +* IRP_MN_QUERY_BUS_RELATIONS handler. +* +* pfn_query_ejection_relations +* IRP_MN_QUERY_EJECTION_RELATIONS handler. +* +* pfn_query_removal_relations +* IRP_MN_QUERY_REMOVAL_RELATIONS handler. +* +* pfn_query_target_relations +* IRP_MN_QUERY_TARGET_RELATIONS handler. +* +* pfn_unknown +* FDO and Filter drivers should pass this IRP down. Bus drivers should +* complete the request for their PDOs without modifying the status. +* The component library provides the cl_irp_skip and cl_irp_complete +* functions to skip and complete and IRP, respectively. These functions +* can be used to perform the required action. This handler is invoked +* when an undocumented PNP irp is received. +* +* pfn_query_resources +* IRP_MN_QUERY_RESOURCES handler. +* +* pfn_query_res_req +* IRP_MN_QUERY_RESOURCE_REQUIREMENTS handler. +* +* pfn_query_bus_info +* IRP_MN_QUERY_BUS_INFORMATION handler. +* +* pfn_query_interface +* IRP_MN_QUERY_INTERFACE handler. +* +* pfn_read_config +* IRP_MN_READ_CONFIG handler. +* +* pfn_write_config +* IRP_MN_WRITE_CONFIG handler. +* +* pfn_eject +* IRP_MN_EJECT handler. +* +* pfn_set_lock +* IRP_MN_SET_LOCK handler. +* +* pfn_query_power +* IRP_MN_QUERY_POWER handler. +* +* pfn_set_power +* IRP_MN_SET_POWER handler. +* +* pfn_power_sequence +* IRP_MN_POWER_SEQUENCE handler. +* +* pfn_wait_wake +* IRP_MN_WAIT_WAKE handler. +* +* NOTES +* The component library provides default handlers for skiping the IRP, +* completing the IRP without changing the status, and processing the IRP +* on the way up the device stack. Users can set the handler to +* cl_irp_skip, cl_irp_complete, cl_do_sync_pnp. +* +* The handlers specified in pfn_query_power, pfn_set_power, +* pfn_power_sequence, and pfn_wait_wake, if implemented as pageable code, +* should be marked in a single pageable code section. The component library +* will use the first function that is not a component library handler to +* lock down the user's power management code. +* +* SEE ALSO +* Plug and Play, cl_pfn_pnp_po_t, cl_pfn_release_resources_t, cl_do_sync_pnp, +* cl_irp_skip, cl_irp_complete, cl_irp_unsupported, cl_irp_fail +***********/ + + +/****d* Component Library: Plug and Play/cl_pfn_query_text_t +* NAME +* cl_pfn_query_text_t +* +* DESCRIPTION +* Function pointer type for handling IRP_MN_QUERY_DEVICE_TEXT IRPs. +* +* SYNOPSIS +*/ +typedef NTSTATUS +(*cl_pfn_query_text_t)( + IN DEVICE_OBJECT* const p_dev_obj, + OUT IRP* const p_irp ); +/* +* PARAMETERS +* pDevObj +* Pointer to the device object that is the IRP target. +* +* pIrp +* [out] Pointer to the request IRP. Sets the requested device text in +* pIrp->IoStatus.Information. +* +* RETURN VALUES +* NTSTATUS value indicating the result of the query. +* +* NOTES +* Only bus drivers handle the IRP_MN_QUERY_DEVICE_TEXT IRP for their PDOs. +* +* SEE ALSO +* Plug and Play +*********/ + + +/****d* Component Library: Plug and Play/cl_vfptr_query_txt +* NAME +* cl_vfptr_query_txt +* +* DESCRIPTION +* Function pointer table for the various type of text requested by +* IRP_MN_QUERY_DEVICE_TEXT IRPs. +* +* SYNOPSIS +*/ +typedef struct _cl_vfptr_query_txt +{ + cl_pfn_query_text_t pfn_query_device_id; + cl_pfn_query_text_t pfn_query_hardware_id; + cl_pfn_query_text_t pfn_query_compatible_id; + cl_pfn_query_text_t pfn_query_unique_id; + cl_pfn_query_text_t pfn_query_description; + cl_pfn_query_text_t pfn_query_location; + +} cl_vfptr_query_txt_t; +/* +* FIELDS +* pfn_query_device_id +* The request is for the target device's device ID. +* +* pfn_query_hardware_id +* The request is for the target device's device IDs. +* +* pfn_query_compatible_id +* The request is for the target device's comptible IDs. +* +* pfn_query_unique_id +* The request is for the target device's unique ID. +* +* pfn_query_description +* The request is for the target device's description. +* +* pfn_query_location +* The request is for the target device's location text. +* +* NOTES +* Hardware and compatible IDs should be returned in the most specific to +* most general order. The IDs are used to match drivers to devices. +* +* The query text function pointer table is maintained separately from the +* PnP function pointer table to allow FDO and filter drivers to not define +* the table since they typically do not handle these requests. +* +* SEE ALSO +* Plug and Play, cl_pfn_query_text_t +*********/ + + +/****d* Component Library: Plug and Play/cl_pnp_state_t +* NAME +* cl_pnp_state_t +* +* DESCRIPTION +* PnP States for device objects managed by this driver. +* +* SYNOPSIS +*/ +typedef enum _cl_pnp_state +{ + NotStarted = 0, + Started, + StopPending, + Stopped, + RemovePending, + SurpriseRemoved, + Deleted, + UnKnown + +} cl_pnp_state_t; +/* +* VALUES +* NotStarted +* Not started yet. +* +* Started +* Device has received the IPR_MN_START_DEVICE IRP +* +* StopPending +* Device has received the IPR_MN_QUERY_STOP_DEVICE IRP +* +* Stopped +* Device has received the IPR_MN_STOP_DEVICE IRP +* +* RemovePending +* Device has received the IPR_MN_QUERY_REMOVE_DEVICE IRP +* +* SurpriseRemoved +* Device has received the IPR_MN_SURPRISE_REMOVE_DEVICE IRP +* +* Deleted +* Device has received the IPR_MN_REMOVE_DEVICE IRP +* +* UnKnown +* Unknown state +* +* SEE ALSO +* Plug and Play, cl_pnp_po_ext_t +*********/ + + +/****d* Component Library: Plug and Play/cl_pnp_po_ext_t +* NAME +* cl_pnp_po_ext_t +* +* DESCRIPTION +* Device extension structure required for using the component library +* plug and play helper routines. +* +* SYNOPSIS +*/ +typedef struct _cl_pnp_po_ext +{ + cl_pnp_state_t pnp_state; + cl_pnp_state_t last_pnp_state; + + DEVICE_OBJECT *p_self_do; + DEVICE_OBJECT *p_next_do; + DEVICE_OBJECT *p_pdo; + + IO_REMOVE_LOCK remove_lock; + IO_REMOVE_LOCK stop_lock; + + atomic32_t n_paging_files; + atomic32_t n_crash_files; + atomic32_t n_hibernate_files; + + const cl_vfptr_pnp_po_t *vfptr_pnp_po; + const cl_vfptr_query_txt_t *vfptr_query_txt; + + void *h_cl_locked_section; + void *h_user_locked_section; + + atomic32_t n_ifc_ref; + + uint32_t dbg_lvl; + +} cl_pnp_po_ext_t; +/* +* FIELDS +* pnp_state +* Current PnP device state. +* +* last_pnp_state +* Previous PnP device state, to restore in case a query is cancelled. +* +* p_self_do +* Pointer to the device's own device object. +* +* p_next_do +* Pointer to the next device object. Null if the device is a PDO. +* +* p_pdo +* The pointer to the PDO for the device node. +* +* remove_lock +* Remove lock used to synchronize access to the device when handling +* PnP IRPs. +* +* stop_lock +* Lock used to track non-PnP and non-Power IO operations. +* +* n_paging_files +* Number of times the device is in a paging file path. +* +* n_crash_files +* Number of times the device is in a dump file path. +* +* n_hibernate_files +* Number of times the device is in a hibernation files path. +* +* vfptr_pnp +* Funtion pointer table for the PnP and Power Management handlers. +* +* vfptr_query_txt +* Function pointer table for IRP_MN_QUERY_DEVICE_TEXT handlers. +* +* NOTES +* This structure must be first in the device extension so that the device +* extension can successfully be cast to a cl_pnp_po_ext_t pointer by the +* IRP handler routines. +* +* When performing I/O operations, users should acquire the stop lock and +* check status before starting an I/O operation, and release the stop lock +* after completing an I/O operation using the cl_start_io and cl_end_io +* functions respectively. +* +* SEE ALSO +* Plug and Play, cl_vfptr_pnp_po_t, cl_pnp_state_t, cl_start_io, cl_end_io +*********/ + + +/****f* Component Library: Plug and Play/cl_init_pnp_po_ext +* NAME +* cl_init_pnp_po_ext +* +* DESCRIPTION +* Initializes the component library device extension for use. +* +* SYNOPSIS +*/ +CL_EXPORT void +cl_init_pnp_po_ext( + IN OUT DEVICE_OBJECT* const p_dev_obj, + IN DEVICE_OBJECT* const p_next_do, + IN DEVICE_OBJECT* const p_pdo, + IN const uint32_t pnp_po_dbg_lvl, + IN const cl_vfptr_pnp_po_t* const vfptr_pnp, + IN const cl_vfptr_query_txt_t* const vfptr_query_txt OPTIONAL ); +/* +* PARAMETERS +* p_dev_obj +* Pointer to the device object for the device. +* +* p_next_do +* Pointer to the next device object in the device stack. Must be NULL +* for PDO objects. +* +* p_pdo +* Pointer to the PDO for the device node. +* +* pnp_po_dbg_lvl +* Debug level to control flow of debug messages. If the bit for +* CL_DBG_PNP is set, verbose debug messages are generated. +* +* vfptr_pnp_po +* Pointer to the function table of PnP and Power Management handlers. +* +* vfptr_query_txt +* Pointer to the function table for IRP_MN_QUERY_DEVICE_TEXT handlers. +* +* SEE ALSO +* Plug and Play, cl_pnp_po_ext_t, cl_vfptr_pnp_po_t, cl_vfptr_query_txt_t +*********/ + + +/****f* Component Library: Plug and Play/cl_set_pnp_state +* NAME +* cl_set_pnp_state +* +* DESCRIPTION +* Sets the PnP state stored in the common device extension to the desired +* state. Stores the previous state for rollback purposes. +* +* SYNOPSIS +*/ +CL_INLINE void +cl_set_pnp_state( + OUT cl_pnp_po_ext_t* const p_ext, + IN const cl_pnp_state_t new_state ) +{ + p_ext->last_pnp_state = p_ext->pnp_state; + p_ext->pnp_state = new_state; +} +/* +* PARAMTETERS +* p_ext +* Pointer to the device extension whose PnP state to set. +* +* new_state +* New PnP state to store in the device extension. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* Plug and Play, cl_pnp_po_ext_t, cl_pnp_state_t +*********/ + + +/****f* Component Library: Plug and Play/cl_rollback_pnp_state +* NAME +* cl_rollback_pnp_state +* +* DESCRIPTION +* Rolls back a PnP state change. +* +* SYNOPSIS +*/ +CL_INLINE void +cl_rollback_pnp_state( + OUT cl_pnp_po_ext_t* const p_ext ) +{ + p_ext->pnp_state = p_ext->last_pnp_state; +} +/* +* PARAMTETERS +* p_ext +* Pointer to the device extension whose PnP state to set. +* +* RETURN VALUES +* This function does not return a value. +* +* SEE ALSO +* Plug and Play, cl_pnp_po_ext_t +*********/ + + +/****f* Component Library: Plug and Play:/cl_pnp +* NAME +* cl_pnp +* +* DESCRIPTION +* Main PnP entry point for the driver. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_pnp( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ); +/* +* PARAMTETERS +* p_dev_obj +* Pointer to the device object that is the target of the PnP IRP. +* +* p_irp +* Pointer to the PnP IRP to perform on the specified device. +* +* RETURN VALUES +* STATUS_SUCCESS if the operation is successful +* +* Other NTSTATUS values in case of error. +* +* SEE ALSO +* Plug and Play +**********/ + + +/****f* Component Library: Plug and Play:/cl_power +* NAME +* cl_power +* +* DESCRIPTION +* Main Power Management entry point for the driver. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_power( + IN PDEVICE_OBJECT p_dev_obj, + IN PIRP p_irp ); +/* +* PARAMTETERS +* p_dev_obj +* Pointer to the device object that is the target of the PnP IRP. +* +* p_irp +* Pointer to the PnP IRP to perform on the specified device. +* +* RETURN VALUES +* STATUS_SUCCESS if the operation is successful +* +* Other NTSTATUS values in case of error. +* +* SEE ALSO +* Plug and Play +**********/ + + +/****f* Component Library: Plug and Play/cl_do_sync_pnp +* NAME +* cl_do_sync_pnp +* +* DESCRIPTION +* Sends an IRP to the next driver synchronously. Returns when the lower +* drivers have completed the IRP. Used to process IRPs on the way up the +* device node. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_do_sync_pnp( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); +/* +* PARAMETER +* p_dev_obj +* Pointer to the device object that is the IRP target. +* +* p_irp +* Pointer to the request IRP. +* +* p_action +* Action to take for propagating the IRP. +* +* RETURN VALUES +* IRP status value returned by lower driver. +*********/ + + +/****f* Component Library: Plug and Play/cl_alloc_relations +* NAME +* cl_alloc_relations +* +* DESCRIPTION +* Allocates device relations and copies existing device relations, if any. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_alloc_relations( + IN IRP* const p_irp, + IN const size_t n_devs ); +/* +* PARAMETERS +* p_irp +* Pointer to the IPR_MN_QUERY_DEVICE_RELATIONS IRP for which the +* relations are being allocated. +* +* n_devs +* Number of devices the caller will report in the DEVICE_RELATIONS +* structure stored at pIrp->IoStatus.Infomation upon success. +* +* RETURN VALUES +* STATUS_SUCCESS if the DEVICE_RELATIONS structure was allocated successfully. +* +* STATUS_INSUFFICIENT_RESOURCES if there was not enough memory to complete +* the operation. +* +* NOTES +* Upon failure, any original relations buffer is freed. Users should fail +* the IRP with the returned status value. +* +* SEE ALSO +* Plug and Play +**********/ + + +/****f* Component Library: Plug and Play/cl_do_remove +* NAME +* cl_do_remove +* +* DESCRIPTION +* Propagates an IRP_MN_REMOVE_DEVICE IRP, detaches, and deletes the +* device object. Useable by function and filter drivers only. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_do_remove( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp, + OUT cl_irp_action_t* const pAction ); +/**********/ + + +/****f* Component Library: Plug and Play:/cl_irp_skip +* NAME +* cl_irp_skip +* +* DESCRIPTION +* Default function for skipping a PnP IRP. Sets the IRP's status value. +* Useable only by function and filter drivers. Bus drivers should use +* cl_irp_complete for their PDOs to complete an IRP with no change in status. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_irp_skip( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); +/* +* PARAMETERS +* p_dev_obj +* Pointer to the device object that is the IRP target. +* +* p_irp +* Pointer to the request IRP. +* +* p_action +* Action to take for propagating the IRP. +* +* RETURN VALUES +* IRP status value. +* +* SEE ALSO +* Plug and Play, cl_irp_action_t +*********/ + + +/****f* Component Library: Plug and Play:/cl_irp_ignore +* NAME +* cl_irp_ignore +* +* DESCRIPTION +* Default function for skipping a PnP IRP without setting the IRP's status. +* Useable only by function and filter drivers. Bus drivers should use +* cl_irp_complete for their PDOs to complete an IRP with no change in status. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_irp_ignore( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); +/* +* PARAMETERS +* p_dev_obj +* Pointer to the device object that is the IRP target. +* +* p_irp +* Pointer to the request IRP. +* +* p_action +* Action to take for propagating the IRP. +* +* RETURN VALUES +* IRP status value. +* +* SEE ALSO +* Plug and Play, cl_irp_action_t +*********/ + + +/****f* Component Library: Plug and Play:/cl_irp_complete +* NAME +* cl_irp_complete +* +* DESCRIPTION +* Default handler for completeing a PnP or Power Management IRP with no +* action. Should only be used by bus drivers for their PDOs to complete +* an IRP with no change in status. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_irp_complete( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); +/* +* PARAMETERS +* p_dev_obj +* Pointer to the device object that is the IRP target. +* +* p_irp +* Pointer to the request IRP. +* +* p_action +* Action to take for propagating the IRP. +* +* RETURN VALUES +* IRP status value. +* +* SEE ALSO +* Plug and Play, cl_irp_action_t, cl_vfptr_pnp_po_t +*********/ + + +/****f* Component Library: Plug and Play:/cl_irp_succeed +* NAME +* cl_irp_succeed +* +* DESCRIPTION +* Default handler for succeeding an IRP with STATUS_SUCCESS. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_irp_succeed( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); +/* +* PARAMETERS +* p_dev_obj +* Pointer to the device object that is the IRP target. +* +* p_irp +* Pointer to the request IRP. +* +* p_action +* Action to take for propagating the IRP. +* +* RETURN VALUES +* IRP status value. +* +* SEE ALSO +* Plug and Play, cl_irp_action_t +*********/ + + +/****f* Component Library: Plug and Play:/cl_irp_unsupported +* NAME +* cl_irp_unsupported +* +* DESCRIPTION +* Default handler for failing an IRP with STATUS_UNSUPPORTED. +* +* SYNOPSIS +*/ +CL_EXPORT NTSTATUS +cl_irp_unsupported( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); +/* +* PARAMETERS +* p_dev_obj +* Pointer to the device object that is the IRP target. +* +* p_irp +* Pointer to the request IRP. +* +* p_action +* Action to take for propagating the IRP. +* +* RETURN VALUES +* IRP status value. +* +* SEE ALSO +* Plug and Play, cl_irp_action_t +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_PNP_PO_H_ */ diff --git a/branches/Ndi/inc/kernel/complib/cl_spinlock_osd.h b/branches/Ndi/inc/kernel/complib/cl_spinlock_osd.h new file mode 100644 index 00000000..bf6a4196 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_spinlock_osd.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_SPINLOCK_OSD_H_ +#define _CL_SPINLOCK_OSD_H_ + + +#include "complib/cl_types.h" +#include "complib/cl_memory.h" + + +/* Spinlock object definition. */ +typedef struct _cl_spinlock +{ + KSPIN_LOCK lock; + KIRQL irql; + +} cl_spinlock_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE void +cl_spinlock_construct( + IN cl_spinlock_t* const p_spinlock ) +{ + cl_memclr( p_spinlock, sizeof(cl_spinlock_t*) ); +} + + +CL_INLINE cl_status_t +cl_spinlock_init( + IN cl_spinlock_t* const p_spinlock ) +{ + CL_ASSERT( p_spinlock ); + + cl_spinlock_construct( p_spinlock ); + + KeInitializeSpinLock( &p_spinlock->lock ); + return( CL_SUCCESS ); +} + + +CL_INLINE void +cl_spinlock_destroy( + IN cl_spinlock_t* const p_spinlock ) +{ + UNUSED_PARAM( p_spinlock ); +} + + +CL_INLINE void +cl_spinlock_acquire( + IN cl_spinlock_t* const p_spinlock ) +{ + KIRQL irql = KeGetCurrentIrql(); + CL_ASSERT( p_spinlock ); + + if (irql == DISPATCH_LEVEL) { + KeAcquireSpinLockAtDpcLevel( &p_spinlock->lock ); + p_spinlock->irql = irql; + } + else + KeAcquireSpinLock( &p_spinlock->lock, &p_spinlock->irql ); +} + + +CL_INLINE void +cl_spinlock_release( + IN cl_spinlock_t* const p_spinlock ) +{ + CL_ASSERT( p_spinlock ); + + if (p_spinlock->irql == DISPATCH_LEVEL) + KeReleaseSpinLockFromDpcLevel( &p_spinlock->lock ); + else + KeReleaseSpinLock( &p_spinlock->lock, p_spinlock->irql ); +} + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _CL_SPINLOCK_OSD_H_ diff --git a/branches/Ndi/inc/kernel/complib/cl_syscallback_osd.h b/branches/Ndi/inc/kernel/complib/cl_syscallback_osd.h new file mode 100644 index 00000000..280835c8 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_syscallback_osd.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_SYS_CALLBACK_OSD_H_ +#define _CL_SYS_CALLBACK_OSD_H_ + + +#include "complib/cl_types.h" + + +typedef struct _IO_WORKITEM cl_sys_callback_item_t; + + +#endif // _CL_SYS_CALLBACK_OSD_H_ \ No newline at end of file diff --git a/branches/Ndi/inc/kernel/complib/cl_thread_osd.h b/branches/Ndi/inc/kernel/complib/cl_thread_osd.h new file mode 100644 index 00000000..14b501ae --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_thread_osd.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_THREAD_OSD_H_ +#define _CL_THREAD_OSD_H_ + + +#include "complib/cl_types.h" +#include "complib/cl_timer.h" + + +/* OS specific information about the thread. */ +typedef struct _cl_thread_osd +{ + HANDLE h_thread; + PKTHREAD p_thread; + +} cl_thread_osd_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + +CL_INLINE void +cl_thread_suspend( + IN const uint32_t pause_ms ) +{ + LARGE_INTEGER interval; + + // Convert the delay in milliseconds to 100 nanosecond intervals. + interval.QuadPart = -(int64_t)(((uint64_t)pause_ms * 10000)); + KeDelayExecutionThread( KernelMode, FALSE, &interval ); +} + + +CL_INLINE void +cl_thread_stall( + IN const uint32_t pause_us ) +{ + KeStallExecutionProcessor( pause_us ); +} + + +CL_INLINE boolean_t +cl_is_blockable( void ) +{ + return ( KeGetCurrentIrql() < DISPATCH_LEVEL ); +} + + +#ifdef __cplusplus +} // extern "C" +#endif + + +#endif // _CL_THREAD_OSD_H_ diff --git a/branches/Ndi/inc/kernel/complib/cl_timer_osd.h b/branches/Ndi/inc/kernel/complib/cl_timer_osd.h new file mode 100644 index 00000000..bd3af5e8 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_timer_osd.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_TIMER_OSD_H_ +#define _CL_TIMER_OSD_H_ + + +#include "complib/cl_types.h" + + +/* Timer object definition. */ +typedef struct _cl_timer +{ + KTIMER timer; + KDPC dpc; + cl_pfn_timer_callback_t pfn_callback; + const void *context; + uint64_t timeout_time; + +} cl_timer_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + +/* 100ns to s conversion */ +#define HUNDREDNS_TO_SEC CL_CONST64(10000000) +/* s to µs conversion */ +#define SEC_TO_MICRO CL_CONST64(1000000) + +CL_INLINE uint64_t CL_API +cl_get_time_stamp( void ) +{ + LARGE_INTEGER tick_count, frequency; + + tick_count = KeQueryPerformanceCounter( &frequency ); + return( tick_count.QuadPart / (frequency.QuadPart / SEC_TO_MICRO) ); +} + +CL_INLINE uint32_t CL_API +cl_get_time_stamp_sec( void ) +{ + return( (uint32_t)(KeQueryInterruptTime() / HUNDREDNS_TO_SEC) ); +} + +CL_INLINE uint64_t CL_API +cl_get_tick_count( void ) +{ + LARGE_INTEGER tick_count; + + tick_count = KeQueryPerformanceCounter( NULL ); + return tick_count.QuadPart; +} + +CL_INLINE uint64_t CL_API +cl_get_tick_freq( void ) +{ + LARGE_INTEGER frequency; + + KeQueryPerformanceCounter( &frequency ); + return frequency.QuadPart; +} + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif // _CL_TIMER_OSD_H_ \ No newline at end of file diff --git a/branches/Ndi/inc/kernel/complib/cl_types_osd.h b/branches/Ndi/inc/kernel/complib/cl_types_osd.h new file mode 100644 index 00000000..7a6dd643 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_types_osd.h @@ -0,0 +1,138 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_TYPES_OSD_H_ +#define _CL_TYPES_OSD_H_ + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +#pragma warning( push ) +#include +#if defined( NDIS_MINIPORT_DRIVER ) +#include +#if NDIS_WDM +#define CL_NTDDK +#endif /* NDIS_WDM */ +#elif !defined( _MINIPORT_ ) +#include +#define CL_NTDDK +#endif /* defined( NDIS_MINIPORT_DRIVER ) */ +#pragma warning( pop ) +/* + * Disable warnings related to taking the address of a dllimport function. + * This is needed to allow user of the PNP/PO abstraction to use the default + * handlers. + */ +#pragma warning( disable:4232 ) + +/* For DECLSPEC_EXPORT and DECLSPEC_IMPORT */ +#include + +#define CL_KERNEL + +#if DBG + #define _DEBUG_ +#else + #undef _DEBUG_ +#endif + +typedef __int8 int8_t; +typedef unsigned __int8 uint8_t; +typedef __int16 int16_t; +typedef unsigned __int16 uint16_t; +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +/* boolean_t must be intergral sized to avoid a bunch of warnings. */ +typedef int boolean_t; +typedef unsigned char uchar_t; +typedef _W64 __int3264 intn_t; +typedef _W64 unsigned __int3264 uintn_t; +typedef volatile __int32 atomic32_t; + + +#ifndef CL_ASSERT +#define CL_ASSERT ASSERT +#endif + + +#define UNUSED_PARAM UNREFERENCED_PARAMETER +#if defined(EVENT_TRACING) +#define UNUSED_PARAM_WOWPP(a) +#else +#define UNUSED_PARAM_WOWPP(a)UNREFERENCED_PARAMETER(a) +#endif + + +#define CL_EXPORT + +#if !defined( __cplusplus ) +#define inline __inline +#endif + +#define CL_INLINE CL_EXPORT inline + +#define CL_API + +#define cl_panic DbgBreakPoint + +#ifndef offsetof +#define offsetof FIELD_OFFSET +#endif + +#define PARENT_STRUCT( P, T, M ) CONTAINING_RECORD( (void*)P, T, M ) + +typedef enum _cl_status cl_status_t; + +#define CL_CONST64( x ) x##ui64 + +NTSTATUS +cl_to_ntstatus( + IN enum _cl_status status ); + +enum _cl_status +cl_from_ntstatus( + IN NTSTATUS status ); + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif // _CL_TYPES_OSD_H_ diff --git a/branches/Ndi/inc/kernel/complib/cl_waitobj_osd.h b/branches/Ndi/inc/kernel/complib/cl_waitobj_osd.h new file mode 100644 index 00000000..9320cba9 --- /dev/null +++ b/branches/Ndi/inc/kernel/complib/cl_waitobj_osd.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_WAITOBJ_OSD_H_ +#define _CL_WAITOBJ_OSD_H_ + + +#include +#include + + +typedef PKEVENT cl_waitobj_handle_t; + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +CL_INLINE cl_waitobj_handle_t +cl_waitobj_ref( + IN void *h_user_wait_obj ) +{ + cl_waitobj_handle_t h_kevent = NULL; + + /* + * Assumption that if the call fails, the h_kevent parameter is unchanged, + * or set to NULL. + */ + ObReferenceObjectByHandle( h_user_wait_obj, STANDARD_RIGHTS_ALL, + *ExEventObjectType, UserMode, (PVOID*)&h_kevent, NULL ); + + return h_kevent; +} + + +CL_INLINE void +cl_waitobj_deref( + IN cl_waitobj_handle_t h_kernel_wait_obj ) +{ + ObDereferenceObject( h_kernel_wait_obj ); +} + + +CL_INLINE cl_status_t +cl_waitobj_signal( + IN cl_waitobj_handle_t h_wait_obj ) +{ + return cl_event_signal( (cl_event_t*)h_wait_obj ); +} + + +CL_INLINE cl_status_t +cl_waitobj_reset( + IN cl_waitobj_handle_t h_wait_obj ) +{ + return cl_event_reset( (cl_event_t*)h_wait_obj ); +} + + +CL_INLINE cl_status_t +cl_waitobj_wait_on( + IN cl_waitobj_handle_t h_wait_obj, + IN const uint32_t wait_us, + IN const boolean_t interruptible ) +{ + return cl_event_wait_on( (cl_event_t*)h_wait_obj, wait_us, interruptible ); +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + + +#endif /* _CL_WAITOBJ_OSD_H_ */ diff --git a/branches/Ndi/inc/kernel/iba/ib_al_ifc.h b/branches/Ndi/inc/kernel/iba/ib_al_ifc.h new file mode 100644 index 00000000..de4ce45b --- /dev/null +++ b/branches/Ndi/inc/kernel/iba/ib_al_ifc.h @@ -0,0 +1,775 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if !defined _IB_AL_IFC_H_ +#define _IB_AL_IFC_H_ + + +#include + + +/****h* Access Layer/AL Interface +* NAME +* AL Interface +* +* DESCRIPTION +* Header file for the interface exported to ICT client drivers for access to +* IB resources provided by HCAs. +*********/ + +#define AL_INTERFACE_VERSION (11) + + + +/* Function prototypes. Match definitions in ib_al.h. */ +typedef void +(*ib_pfn_sync_destroy_t)( + IN void *context ); + +typedef ib_api_status_t +(*ib_pfn_open_ca_t)( + IN const ib_al_handle_t h_al, + IN const ib_net64_t ca_guid, + IN const ib_pfn_event_cb_t ca_event_cb OPTIONAL, + IN const void* const ca_context, + OUT ib_ca_handle_t* const ph_ca ); + +typedef ib_api_status_t +(*ib_pfn_query_ca_t)( + IN const ib_ca_handle_t h_ca, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT uint32_t* const p_size ); + +typedef DEVICE_OBJECT* +(*ib_pfn_get_ca_dev_t)( + IN const ib_ca_handle_t h_ca ); + +typedef ib_api_status_t +(*ib_pfn_query_ca_by_guid_t)( + IN const ib_al_handle_t h_al, + IN const ib_net64_t ca_guid, + OUT ib_ca_attr_t* const p_ca_attr OPTIONAL, + IN OUT size_t* const p_size ); + +typedef ib_api_status_t +(*ib_pfn_modify_ca_t)( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const ib_ca_mod_t ca_mod, + IN const ib_port_attr_mod_t* const p_port_attr_mod ); + +typedef ib_api_status_t +(*ib_pfn_close_ca_t)( + IN const ib_ca_handle_t h_ca, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_alloc_pd_t)( + IN const ib_ca_handle_t h_ca, + IN const ib_pd_type_t pd_type, + IN const void* const pd_context, + OUT ib_pd_handle_t* const ph_pd ); + +typedef ib_api_status_t +(*ib_pfn_dealloc_pd_t)( + IN const ib_pd_handle_t h_pd, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_create_av_t)( + IN const ib_pd_handle_t h_pd, + IN const ib_av_attr_t* const p_av_attr, + OUT ib_av_handle_t* const ph_av ); + +typedef ib_api_status_t +(*ib_pfn_query_av_t)( + IN const ib_av_handle_t h_av, + OUT ib_av_attr_t* const p_av_attr, + OUT ib_pd_handle_t* const ph_pd ); + +typedef ib_api_status_t +(*ib_pfn_modify_av_t)( + IN const ib_av_handle_t h_av, + IN const ib_av_attr_t* const p_av_attr ); + +typedef ib_api_status_t +(*ib_pfn_destroy_av_t)( + IN const ib_av_handle_t h_av ); + +typedef ib_api_status_t +(*ib_pfn_create_srq_t)( + IN const ib_pd_handle_t h_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN const void* const srq_context, + IN const ib_pfn_event_cb_t srq_event_cb OPTIONAL, + OUT ib_srq_handle_t* const ph_srq ); + +typedef ib_api_status_t +(*ib_pfn_query_srq_t)( + IN const ib_srq_handle_t h_srq, + OUT ib_srq_attr_t* const p_srq_attr ); + +typedef ib_api_status_t +(*ib_pfn_modify_srq_t)( + IN const ib_srq_handle_t h_srq, + IN const ib_srq_attr_t* const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask ); + +typedef ib_api_status_t +(*ib_pfn_destroy_srq_t)( + IN const ib_srq_handle_t h_srq, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_post_srq_recv_t)( + IN const ib_srq_handle_t h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_create_qp_t)( + IN const ib_pd_handle_t h_pd, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t qp_event_cb OPTIONAL, + OUT ib_qp_handle_t* const ph_qp ); + +typedef ib_api_status_t +(*ib_pfn_get_spl_qp_t)( + IN const ib_pd_handle_t h_pd, + IN const ib_net64_t port_guid, + IN const ib_qp_create_t* const p_qp_create, + IN const void* const qp_context, + IN const ib_pfn_event_cb_t qp_event_cb OPTIONAL, + OUT ib_pool_key_t* const p_pool_key OPTIONAL, + OUT ib_qp_handle_t* const ph_qp ); + +typedef ib_api_status_t +(*ib_pfn_query_qp_t)( + IN const ib_qp_handle_t h_qp, + OUT ib_qp_attr_t* const p_qp_attr ); + +typedef ib_api_status_t +(*ib_pfn_modify_qp_t)( + IN const ib_qp_handle_t h_qp, + IN const ib_qp_mod_t* const p_qp_mod ); + +typedef ib_api_status_t +(*ib_pfn_destroy_qp_t)( + IN const ib_qp_handle_t h_qp, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_create_cq_t)( + IN const ib_ca_handle_t h_ca, + IN OUT ib_cq_create_t* const p_cq_create, + IN const void* const cq_context, + IN const ib_pfn_event_cb_t cq_event_cb OPTIONAL, + OUT ib_cq_handle_t* const ph_cq ); + +typedef ib_api_status_t +(*ib_pfn_modify_cq_t)( + IN const ib_cq_handle_t h_cq, + IN OUT uint32_t* const p_size ); + +typedef ib_api_status_t +(*ib_pfn_query_cq_t)( + IN const ib_cq_handle_t h_cq, + OUT uint32_t* const p_size ); + +typedef ib_api_status_t +(*ib_pfn_destroy_cq_t)( + IN const ib_cq_handle_t h_cq, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_reg_mem_t)( + IN const ib_pd_handle_t h_pd, + IN const ib_mr_create_t* const p_mr_create, + OUT uint32_t* const p_lkey, + OUT uint32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); + +typedef ib_api_status_t +(*ib_pfn_reg_phys_t)( + IN const ib_pd_handle_t h_pd, + IN const ib_phys_create_t* const p_phys_create, + IN OUT uint64_t* const p_vaddr, + OUT uint32_t* const p_lkey, + OUT uint32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); + +typedef ib_api_status_t +(*ib_pfn_query_mr_t)( + IN const ib_mr_handle_t h_mr, + OUT ib_mr_attr_t* const p_mr_attr ); + +typedef ib_api_status_t +(*ib_pfn_rereg_mem_t)( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_mr_create_t* const p_mr_create OPTIONAL, + OUT uint32_t* const p_lkey, + OUT uint32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_rereg_phys_t)( + IN const ib_mr_handle_t h_mr, + IN const ib_mr_mod_t mr_mod_mask, + IN const ib_phys_create_t* const p_phys_create OPTIONAL, + IN OUT void** const p_vaddr, + OUT uint32_t* const p_lkey, + OUT uint32_t* const p_rkey, + IN const ib_pd_handle_t h_pd OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_reg_shared_t)( + IN const ib_mr_handle_t h_mr, + IN const ib_pd_handle_t h_pd, + IN const ib_access_t access_ctrl, + IN OUT void** const p_vaddr, + OUT uint32_t* const p_lkey, + OUT uint32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); + +typedef ib_api_status_t +(*ib_pfn_create_shmid_t)( + IN const ib_pd_handle_t h_pd, + IN const int shmid, + IN const ib_mr_create_t* const p_mr_create, + OUT uint32_t* const p_lkey, + OUT uint32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); + +typedef ib_api_status_t +(*ib_pfn_reg_shmid_t)( + IN const ib_pd_handle_t h_pd, + IN const ib_shmid_t shmid, + IN const ib_mr_create_t* const p_mr_create, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT ib_mr_handle_t* const ph_mr ); + +typedef ib_api_status_t +(*ib_pfn_dereg_mr_t)( + IN const ib_mr_handle_t h_mr ); + +typedef ib_api_status_t +(*mlnx_pfn_create_fmr_t)( + IN const ib_pd_handle_t h_pd, + IN const mlnx_fmr_create_t* const p_fmr_create, + OUT mlnx_fmr_handle_t* const ph_fmr ); + +typedef ib_api_status_t +(*mlnx_pfn_map_phys_fmr_t)( + IN const mlnx_fmr_handle_t h_fmr, + IN const uint64_t* const paddr_list, + IN const int list_len, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey ); + +typedef ib_api_status_t +(*mlnx_pfn_unmap_fmr_t)( + IN const mlnx_fmr_handle_t h_fmr ); + +typedef ib_api_status_t +(*mlnx_pfn_destroy_fmr_t)( + IN mlnx_fmr_handle_t const h_fmr ); + + +typedef ib_api_status_t +(*mlnx_pfn_create_fmr_pool_t)( + IN const ib_pd_handle_t h_pd, + IN const mlnx_fmr_pool_create_t *p_fmr_pool_attr, + OUT mlnx_fmr_pool_handle_t* const ph_pool ); + + +typedef ib_api_status_t +(*mlnx_pfn_destroy_fmr_pool_t)( + IN const mlnx_fmr_pool_handle_t h_pool ); + + +typedef ib_api_status_t +(*mlnx_pfn_map_phys_fmr_pool_t)( + IN const mlnx_fmr_pool_handle_t h_pool , + IN const uint64_t* const paddr_list, + IN const int list_len, + IN OUT uint64_t* const p_vaddr, + OUT net32_t* const p_lkey, + OUT net32_t* const p_rkey, + OUT mlnx_fmr_pool_el_t *p_fmr_el); + +typedef ib_api_status_t +(*mlnx_pfn_unmap_fmr_pool_t)( + IN mlnx_fmr_pool_el_t p_fmr_el ); + +typedef ib_api_status_t +(*mlnx_pfn_flush_fmr_pool_t)( + IN const mlnx_fmr_pool_handle_t h_pool ); + + +typedef ib_api_status_t +(*ib_pfn_create_mw_t)( + IN const ib_pd_handle_t h_pd, + OUT uint32_t* const p_rkey, + OUT ib_mw_handle_t* const ph_mw ); + +typedef ib_api_status_t +(*ib_pfn_query_mw_t)( + IN const ib_mw_handle_t h_mw, + OUT ib_pd_handle_t* const ph_pd, + OUT uint32_t* const p_rkey ); + +typedef ib_api_status_t +(*ib_pfn_bind_mw_t)( + IN const ib_mw_handle_t h_mw, + IN const ib_qp_handle_t h_qp, + IN ib_bind_wr_t* const p_mw_bind, + OUT uint32_t* const p_rkey ); + +typedef ib_api_status_t +(*ib_pfn_destroy_mw_t)( + IN const ib_mw_handle_t h_mw ); + +typedef ib_api_status_t +(*ib_pfn_post_send_t)( + IN const ib_qp_handle_t h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t **pp_send_failure OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_post_recv_t)( + IN const ib_qp_handle_t h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t **pp_recv_failure OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_send_mad_t)( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element_list, + OUT ib_mad_element_t **pp_mad_failure OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_cancel_mad_t)( + IN const ib_mad_svc_handle_t h_mad_svc, + IN ib_mad_element_t* const p_mad_element ); + +typedef ib_api_status_t +(*ib_pfn_poll_cq_t)( + IN const ib_cq_handle_t h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); + +typedef ib_api_status_t +(*ib_pfn_rearm_cq_t)( + IN const ib_cq_handle_t h_cq, + IN const boolean_t solicited ); + +typedef ib_api_status_t +(*ib_pfn_join_mcast_t)( + IN const ib_qp_handle_t h_qp, + IN const ib_mcast_req_t* const p_mcast_req ); + +typedef ib_api_status_t +(*ib_pfn_leave_mcast_t)( + IN const ib_mcast_handle_t h_mcast, + IN const ib_pfn_destroy_cb_t destroy_cb ); + +typedef ib_api_status_t +(*ib_pfn_local_mad_t)( + IN const ib_ca_handle_t h_ca, + IN const uint8_t port_num, + IN const void* const p_mad_in, + OUT void* p_mad_out ); + +typedef ib_api_status_t +(*ib_pfn_cm_listen_t)( + IN const ib_al_handle_t h_al, + IN const ib_cm_listen_t* const p_cm_listen, + IN const ib_pfn_listen_err_cb_t listen_err_cb, + IN const void* const listen_context, + OUT ib_listen_handle_t* const ph_cm_listen ); + +typedef ib_api_status_t +(*ib_pfn_cm_cancel_t)( + IN const ib_listen_handle_t h_cm_listen, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_cm_req_t)( + IN const ib_cm_req_t* const p_cm_req ); + +typedef ib_api_status_t +(*ib_pfn_cm_rep_t)( + IN const ib_cm_handle_t h_cm_req, + IN const ib_cm_rep_t* const p_cm_rep ); + +typedef ib_api_status_t +(*ib_pfn_cm_rtu_t)( + IN const ib_cm_handle_t h_cm_rep, + IN const ib_cm_rtu_t* const p_cm_rtu ); + +typedef ib_api_status_t +(*ib_pfn_cm_rej_t)( + IN const ib_cm_handle_t h_cm, + IN const ib_cm_rej_t* const p_cm_rej ); + +typedef ib_api_status_t +(*ib_pfn_cm_mra_t)( + IN const ib_cm_handle_t h_cm, + IN const ib_cm_mra_t* const p_cm_mra ); + +typedef ib_api_status_t +(*ib_pfn_cm_lap_t)( + IN const ib_cm_lap_t* const p_cm_lap ); + +typedef ib_api_status_t +(*ib_pfn_cm_apr_t)( + IN const ib_cm_handle_t h_cm_lap, + IN const ib_cm_apr_t* const p_cm_apr ); + +typedef ib_api_status_t +(*ib_pfn_force_apm_t)( + IN const ib_qp_handle_t h_qp ); + +typedef ib_api_status_t +(*ib_pfn_cm_dreq_t)( + IN const ib_cm_dreq_t* const p_cm_dreq ); + +typedef ib_api_status_t +(*ib_pfn_cm_drep_t)( + IN const ib_cm_handle_t h_cm_dreq, + IN const ib_cm_drep_t* const p_cm_drep ); + +typedef ib_api_status_t +(*ib_pfn_cm_handoff_t)( + IN const ib_cm_handle_t h_cm_req, + IN const ib_net64_t svc_id ); + +typedef ib_api_status_t +(*ib_pfn_create_ioc_t)( + IN const ib_ca_handle_t h_ca, + IN const ib_ioc_profile_t* const p_ioc_profile, + OUT ib_ioc_handle_t* const ph_ioc ); + +typedef ib_api_status_t +(*ib_pfn_destroy_ioc_t)( + IN const ib_ioc_handle_t h_ioc ); + +typedef ib_api_status_t +(*ib_pfn_reg_ioc_t)( + IN const ib_ioc_handle_t h_ioc ); + +typedef ib_api_status_t +(*ib_pfn_add_svc_entry_t)( + IN const ib_ioc_handle_t h_ioc, + IN const ib_svc_entry_t* const p_svc_entry, + OUT ib_svc_handle_t* const ph_svc ); + +typedef ib_api_status_t +(*ib_pfn_remove_svc_entry_t)( + IN const ib_svc_handle_t h_svc ); + +typedef ib_api_status_t +(*ib_pfn_get_ca_guids_t)( + IN ib_al_handle_t h_al, + OUT ib_net64_t* const p_guid_array OPTIONAL, + IN OUT uintn_t* const p_guid_cnt ); + +typedef ib_api_status_t +(*ib_pfn_get_ca_by_gid_t)( + IN ib_al_handle_t h_al, + IN const ib_gid_t* const p_gid, + OUT ib_net64_t* const p_ca_guid ); + +typedef ib_api_status_t +(*ib_pfn_get_ca_by_gid_t)( + IN ib_al_handle_t h_al, + IN const ib_gid_t* const p_gid, + OUT ib_net64_t* const p_ca_guid ); + +typedef ib_api_status_t +(*ib_pfn_get_port_by_gid_t)( + IN ib_al_handle_t h_al, + IN const ib_gid_t* const p_gid, + OUT ib_net64_t* const p_port_guid ); + +typedef ib_api_status_t +(*ib_pfn_create_mad_pool_t)( + IN const ib_al_handle_t h_al, + IN const size_t min, + IN const size_t max, + IN const size_t grow_size, + OUT ib_pool_handle_t* const ph_pool ); + +typedef ib_api_status_t +(*ib_pfn_destroy_mad_pool_t)( + IN const ib_pool_handle_t h_pool ); + +typedef ib_api_status_t +(*ib_pfn_reg_mad_pool_t)( + IN const ib_pool_handle_t h_pool, + IN const ib_pd_handle_t h_pd, + OUT ib_pool_key_t* const p_pool_key ); + +typedef ib_api_status_t +(*ib_pfn_dereg_mad_pool_t)( + IN const ib_pool_key_t pool_key ); + +typedef ib_api_status_t +(*ib_pfn_get_mad_t)( + IN const ib_pool_key_t pool_key, + IN const size_t buf_size, + OUT ib_mad_element_t **pp_mad_element ); + +typedef ib_api_status_t +(*ib_pfn_put_mad_t)( + IN const ib_mad_element_t* p_mad_element_list ); + +typedef ib_api_status_t +(*ib_pfn_init_dgrm_svc_t)( + IN const ib_qp_handle_t h_qp, + IN const ib_dgrm_info_t* const p_dgrm_info OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_reg_mad_svc_t)( + IN const ib_qp_handle_t h_qp, + IN const ib_mad_svc_t* const p_mad_svc, + OUT ib_mad_svc_handle_t* const ph_mad_svc ); + +typedef ib_api_status_t +(*ib_pfn_reg_svc_t)( + IN const ib_al_handle_t h_al, + IN const ib_reg_svc_req_t* const p_reg_svc_req, + OUT ib_reg_svc_handle_t* const ph_reg_svc ); + +typedef ib_api_status_t +(*ib_pfn_dereg_svc_t)( + IN const ib_reg_svc_handle_t h_reg_svc, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_query_t)( + IN const ib_al_handle_t h_al, + IN const ib_query_req_t* const p_query_req, + OUT ib_query_handle_t* const ph_query OPTIONAL ); + +typedef void +(*ib_pfn_cancel_query_t)( + IN const ib_al_handle_t h_al, + IN const ib_query_handle_t query_hndl ); + +typedef ib_api_status_t +(*ib_pfn_reg_pnp_t)( + IN const ib_al_handle_t h_al, + IN const ib_pnp_req_t* const p_pnp_req, + OUT ib_pnp_handle_t* const ph_pnp ); + +typedef ib_api_status_t +(*ib_pfn_dereg_pnp_t)( + IN const ib_pnp_handle_t h_pnp, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_subscribe_t)( + IN const ib_al_handle_t h_al, + IN const ib_sub_req_t* const p_sub_req, + OUT ib_sub_handle_t* const ph_sub ); + +typedef ib_api_status_t +(*ib_pfn_unsubscribe_t)( + IN const ib_sub_handle_t h_sub, + IN const ib_pfn_destroy_cb_t destroy_cb OPTIONAL ); + +typedef ib_api_status_t +(*ib_pfn_reject_ioc_t)( + IN const ib_al_handle_t h_al, + IN const ib_pnp_handle_t h_ioc_event ); + +typedef ib_api_status_t +(*ib_pfn_ci_call_t)( + IN ib_ca_handle_t h_ca, + IN const void* __ptr64 * const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op ); + +typedef ib_api_status_t +(*ib_pfn_open_al_t)( + OUT ib_al_handle_t* const ph_al ); + +typedef ib_api_status_t +(*ib_pfn_close_al_t)( + IN const ib_al_handle_t h_al ); + +typedef const char* +(*ib_pfn_get_err_str_t)( + IN ib_api_status_t status ); + +typedef const char* +(*ib_pfn_get_wc_status_str_t)( + IN ib_wc_status_t wc_status ); + + +/* Interface specific data header. */ +typedef struct _ib_al_ifc_data +{ + const GUID *type; + USHORT size; + USHORT version; + void *p_data; + +} ib_al_ifc_data_t; + + +/* Interface definitions */ +typedef struct _ib_al_ifc +{ + /* Standard interface header. */ + INTERFACE wdm; + + /* AL entry points. */ + ib_pfn_sync_destroy_t sync_destroy; + ib_pfn_open_ca_t open_ca; + ib_pfn_query_ca_t query_ca; + ib_pfn_get_ca_dev_t get_dev; + ib_pfn_close_ca_t close_ca; + ib_pfn_alloc_pd_t alloc_pd; + ib_pfn_dealloc_pd_t dealloc_pd; + ib_pfn_create_av_t create_av; + ib_pfn_query_av_t query_av; + ib_pfn_modify_av_t modify_av; + ib_pfn_destroy_av_t destroy_av; + ib_pfn_create_qp_t create_qp; + ib_pfn_get_spl_qp_t get_spl_qp; + ib_pfn_query_qp_t query_qp; + ib_pfn_modify_qp_t modify_qp; + ib_pfn_destroy_qp_t destroy_qp; + ib_pfn_create_cq_t create_cq; + ib_pfn_modify_cq_t modify_cq; + ib_pfn_query_cq_t query_cq; + ib_pfn_destroy_cq_t destroy_cq; + ib_pfn_reg_mem_t reg_mem; + ib_pfn_reg_phys_t reg_phys; + ib_pfn_query_mr_t query_mr; + ib_pfn_rereg_mem_t rereg_mem; + ib_pfn_create_shmid_t create_shmid; + ib_pfn_reg_shmid_t reg_shmid; + ib_pfn_dereg_mr_t dereg_mr; + ib_pfn_create_mw_t create_mw; + ib_pfn_query_mw_t query_mw; + ib_pfn_bind_mw_t bind_mw; + ib_pfn_destroy_mw_t destroy_mw; + ib_pfn_post_send_t post_send; + ib_pfn_post_recv_t post_recv; + ib_pfn_send_mad_t send_mad; + ib_pfn_cancel_mad_t cancel_mad; + ib_pfn_poll_cq_t poll_cq; + ib_pfn_rearm_cq_t rearm_cq; + ib_pfn_join_mcast_t join_mcast; + ib_pfn_leave_mcast_t leave_mcast; + ib_pfn_local_mad_t local_mad; + ib_pfn_cm_listen_t cm_listen; + ib_pfn_cm_cancel_t cm_cancel; + ib_pfn_cm_req_t cm_req; + ib_pfn_cm_rep_t cm_rep; + ib_pfn_cm_rtu_t cm_rtu; + ib_pfn_cm_rej_t cm_rej; + ib_pfn_cm_mra_t cm_mra; + ib_pfn_cm_lap_t cm_lap; + ib_pfn_cm_apr_t cm_apr; + ib_pfn_force_apm_t force_apm; + ib_pfn_cm_dreq_t cm_dreq; + ib_pfn_cm_drep_t cm_drep; + ib_pfn_cm_handoff_t cm_handoff; + ib_pfn_create_ioc_t create_ioc; + ib_pfn_destroy_ioc_t destroy_ioc; + ib_pfn_reg_ioc_t reg_ioc; + ib_pfn_add_svc_entry_t add_svc_entry; + ib_pfn_remove_svc_entry_t remove_svc_entry; + ib_pfn_get_ca_guids_t get_ca_guids; + ib_pfn_get_ca_by_gid_t get_ca_by_gid; + ib_pfn_get_port_by_gid_t get_port_by_gid; + ib_pfn_create_mad_pool_t create_mad_pool; + ib_pfn_destroy_mad_pool_t destroy_mad_pool; + ib_pfn_reg_mad_pool_t reg_mad_pool; + ib_pfn_dereg_mad_pool_t dereg_mad_pool; + ib_pfn_get_mad_t get_mad; + ib_pfn_put_mad_t put_mad; + ib_pfn_init_dgrm_svc_t init_dgrm_svc; + ib_pfn_reg_mad_svc_t reg_mad_svc; + ib_pfn_reg_svc_t reg_svc; + ib_pfn_dereg_svc_t dereg_svc; + ib_pfn_query_t query; + ib_pfn_cancel_query_t cancel_query; + ib_pfn_reg_pnp_t reg_pnp; + ib_pfn_dereg_pnp_t dereg_pnp; + ib_pfn_subscribe_t subscribe; + ib_pfn_unsubscribe_t unsubscribe; + ib_pfn_reject_ioc_t reject_ioc; + ib_pfn_ci_call_t ci_call; + ib_pfn_open_al_t open_al; + ib_pfn_close_al_t close_al; + ib_pfn_get_err_str_t get_err_str; + ib_pfn_get_wc_status_str_t get_wc_status_str; + mlnx_pfn_create_fmr_t create_mlnx_fmr; + mlnx_pfn_map_phys_fmr_t map_phys_mlnx_fmr; + mlnx_pfn_unmap_fmr_t unmap_mlnx_fmr; + mlnx_pfn_destroy_fmr_t destroy_mlnx_fmr; + mlnx_pfn_create_fmr_pool_t create_mlnx_fmr_pool; + mlnx_pfn_destroy_fmr_pool_t destroy_mlnx_fmr_pool; + mlnx_pfn_map_phys_fmr_pool_t map_phys_mlnx_fmr_pool; + mlnx_pfn_unmap_fmr_pool_t unmap_mlnx_fmr_pool; + mlnx_pfn_flush_fmr_pool_t flush_mlnx_fmr_pool; + + ib_pfn_create_srq_t create_srq; + ib_pfn_query_srq_t query_srq; + ib_pfn_modify_srq_t modify_srq; + ib_pfn_destroy_srq_t destroy_srq; + ib_pfn_post_srq_recv_t post_srq_recv; + +} ib_al_ifc_t; + + +#endif /* !defined _IB_AL_IFC_H_ */ + +/* + * AL interface GUID. The GUID is defined outside the conditional include + * on purpose so that it can be instantiated only once where it is actually + * needed. See the DDK docs section "Using GUIDs in Drivers" for more info. + */ +/* {707A1BDE-BF9F-4565-8FDD-144EF6514FE8} */ +DEFINE_GUID(GUID_IB_AL_INTERFACE, +0x707a1bde, 0xbf9f, 0x4565, 0x8f, 0xdd, 0x14, 0x4e, 0xf6, 0x51, 0x4f, 0xe8); diff --git a/branches/Ndi/inc/kernel/iba/ib_ci_ifc.h b/branches/Ndi/inc/kernel/iba/ib_ci_ifc.h new file mode 100644 index 00000000..00ceaf94 --- /dev/null +++ b/branches/Ndi/inc/kernel/iba/ib_ci_ifc.h @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if !defined _IB_CI_IFC_H_ +#define _IB_CI_IFC_H_ + + +#include "iba/ib_ci.h" + + +/****h* Access Layer/AL Channel Interface +* NAME +* AL Channel Interface +* +* DESCRIPTION +* Header file for the interface exported to HCA drivers to allow them +* to register with AL for use by AL clients. +*********/ + + +#define IB_CI_INTERFACE_VERSION (3) + + +/* Function type for ib_register_ca. Match the definition in ib_ci.h */ +typedef ib_api_status_t +(*ib_pfn_register_ca_t)( + IN const ci_interface_t* const p_ci ); + + +/* Function type for ib_deregister_ca. Matches the definition in ib_ci.h */ +typedef ib_api_status_t +(*ib_pfn_deregister_ca_t)( + IN const net64_t ca_guid ); + + +/* + * Appends a CA's device relations given a CA guid to an existing list + * of relations. + */ +typedef NTSTATUS +(*ib_pfn_get_relations_t)( + IN net64_t ca_guid, + IN OUT IRP* const p_irp ); + + +typedef const char* +(*ib_pfn_get_err_str_t)( + IN ib_api_status_t status ); + + + +/****s* Access Layer: AL Channel Interface/ib_ci_ifc_t +* NAME +* ib_ci_ifc_t +* +* DESCRIPTION +* +* SYNOPSIS +*/ +typedef struct _ib_ci_ifc +{ + + INTERFACE wdm; + + ib_pfn_register_ca_t register_ca; + ib_pfn_deregister_ca_t deregister_ca; + ib_pfn_get_relations_t get_relations; + ib_pfn_get_err_str_t get_err_str; + +} ib_ci_ifc_t; +/* +* FIELDS +* wdmr. +* Standard interface header. +* +* register_ca +* Pointer to ib_pfn_register_ca_t function +* +* deregister_ca +* Pointer to ib_pfn_deregister_ca_t function +* +* get_relations +* Pointer to ib_pfn_get_relations_t function +* +* get_err_str +* Pointer to ib_pfn_get_err_str_t function +* +* SEE ALSO +* ib_register_ca,ib_deregister_ca, +*********/ + + +#endif /* !defined _IB_CI_IFC_H_ */ + +/* + * CI interface GUID. The GUID is defined outside the conditional include + * on purpose so that it can be instantiated only once where it is actually + * needed. See the DDK docs section "Using GUIDs in Drivers" for more info. + */ +/* {9B617D6B-65AB-4fe5-9601-555271F7534D} */ +DEFINE_GUID(GUID_IB_CI_INTERFACE, +0x9b617d6b, 0x65ab, 0x4fe5, 0x96, 0x1, 0x55, 0x52, 0x71, 0xf7, 0x53, 0x4d); diff --git a/branches/Ndi/inc/kernel/iba/ioc_ifc.h b/branches/Ndi/inc/kernel/iba/ioc_ifc.h new file mode 100644 index 00000000..6ea9328b --- /dev/null +++ b/branches/Ndi/inc/kernel/iba/ioc_ifc.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include + + +#if !defined _IOC_IFC_H_ +#define _IOC_IFC_H_ + + +/****h* Access Layer/IO Controler Interface +* NAME +* IO Controler Interface +* +* DESCRIPTION +* Header file for the interface exported to IOC client drivers to retrieve +* IOC device information. +* +* The interface contains information about the particular instance of an IOC. +*********/ + + +#define IOC_INTERFACE_DATA_VERSION (3) + + + +/****s* Access Layer: IO Controler Interface/ioc_ifc_data_t +* NAME +* ioc_ifc_data_t +* +* DESCRIPTION +* I/O Controler interface data. +* +* The ioc_ifc_data_t structure +* +* SYNOPSIS +*/ +typedef struct _ioc_ifc_data +{ + net64_t ca_guid; + net64_t guid; + +} ioc_ifc_data_t; +/* +* FIELDS +* ca_guid +* Local CA GUID through which IOC is accessible. +* +* guid +* IOC GUID. +* +* SEE ALSO +* +*********/ + + +#endif /* !defined _IOC_IFC_H_ */ + +/* + * IOC interface GUID. The GUID is defined outside the conditional include + * on purpose so that it can be instantiated only once where it is actually + * needed. See the DDK docs section "Using GUIDs in Drivers" for more info. + */ +/* {20883ACD-57F3-4da3-AD03-73D0178ACAE9} */ +DEFINE_GUID(GUID_IOC_INTERFACE_DATA, +0x20883acd, 0x57f3, 0x4da3, 0xad, 0x3, 0x73, 0xd0, 0x17, 0x8a, 0xca, 0xe9); diff --git a/branches/Ndi/inc/kernel/iba/iou_ifc.h b/branches/Ndi/inc/kernel/iba/iou_ifc.h new file mode 100644 index 00000000..2741397a --- /dev/null +++ b/branches/Ndi/inc/kernel/iba/iou_ifc.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "iba/ib_al_ifc.h" + + +#if !defined _IOU_IFC_H_ +#define _IOU_IFC_H_ + + +/****h* Access Layer/ IO Unit Interface +* NAME +* IO Unit Interface +* +* DESCRIPTION +* Header file for the interface exported to IOU client drivers to retrieve +* IOU device information. +* +* The interface contains information about the particular instance of an IOU. +*********/ + + +#define IOU_INTERFACE_DATA_VERSION (4) + + + +/****s* Access Layer: IO Unit Interface/iou_ifc_data_t +* NAME +* iou_ifc_data_t +* +* DESCRIPTION +* I/O Unit interface data. +* +* The iou_ifc_data_t structure +* +* SYNOPSIS +*/ +typedef struct _iou_ifc_data +{ + net64_t ca_guid; + net64_t chassis_guid; + uint8_t slot; + net64_t guid; + +} iou_ifc_data_t; +/* +* FIELDS +* ca_guid +* Local CA GUID through which IOU is accessible. +* +* chassis_guid +* IOU chassis GUID. +* +* slot +* IOU slot. +* +* guid +* IOU GUID. +* +* SEE ALSO +* +*********/ + +#endif /* !defined _IOU_IFC_H_ */ + +/* + * IOU interface GUID. The GUID is defined outside the conditional include + * on purpose so that it can be instantiated only once where it is actually + * needed. See the DDK docs section "Using GUIDs in Drivers" for more info. + */ +/* {C78F0228-E564-4d46-8A4B-295030414C1A} */ +DEFINE_GUID(GUID_IOU_INTERFACE_DATA, +0xc78f0228, 0xe564, 0x4d46, 0x8a, 0x4b, 0x29, 0x50, 0x30, 0x41, 0x4c, 0x1a); diff --git a/branches/Ndi/inc/kernel/iba/ipoib_ifc.h b/branches/Ndi/inc/kernel/iba/ipoib_ifc.h new file mode 100644 index 00000000..c7ab0156 --- /dev/null +++ b/branches/Ndi/inc/kernel/iba/ipoib_ifc.h @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "iba/ib_al_ifc.h" + + +#if !defined _IPOIB_IFC_H_ +#define _IPOIB_IFC_H_ + + +/****h* Access Layer/IPoIB Interface +* NAME +* IPoIB Interface +* +* DESCRIPTION +* Header file for the interface exported to IPoIB client drivers for access +* to IB resources provided by HCAs. +* +* The actual interface returned is an contains information about the +* particular instance of an IPoIB device. +*********/ + + +#define IPOIB_INTERFACE_DATA_VERSION (5) + + +/****s* Access Layer: IPoIB Interface/ipoib_ifc_data_t +* NAME +* ipoib_ifc_data_t +* +* DESCRIPTION +* IPoIB interface datat. +* +* The ipoib_ifc_data_t structure +* +* SYNOPSIS +*/ +typedef struct _ipoib_ifc_data +{ + net64_t ca_guid; + net64_t port_guid; + uint8_t port_num; + +} ipoib_ifc_data_t; +/* +* FIELDS +* ca_guid +* HCA GUID for this IPoIB interface +* +* port_guid +* Port GUID for this IPoIB interface +* +* port_num +* Port Number GUID for this IPoIB interface +* +* SEE ALSO +* +*********/ + + + +#endif /* !defined _IPOIB_IFC_H_ */ + +/* + * IPOIB interface GUID. The GUID is defined outside the conditional include + * on purpose so that it can be instantiated only once where it is actually + * needed. See the DDK docs section "Using GUIDs in Drivers" for more info. + */ +/* {B40DDB48-5710-487a-B812-6DAF56C7F423} */ +DEFINE_GUID(GUID_IPOIB_INTERFACE_DATA, +0xb40ddb48, 0x5710, 0x487a, 0xb8, 0x12, 0x6d, 0xaf, 0x56, 0xc7, 0xf4, 0x23); diff --git a/branches/Ndi/inc/kernel/ip_packet.h b/branches/Ndi/inc/kernel/ip_packet.h new file mode 100644 index 00000000..60421edb --- /dev/null +++ b/branches/Ndi/inc/kernel/ip_packet.h @@ -0,0 +1,459 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _IP_PACKET_H_ +#define _IP_PACKET_H_ + + +#include +#include + + +#ifndef HW_ADDR_LEN +#define HW_ADDR_LEN 6 +#endif /* HW_ADDR_LEN */ + + +#define ETH_PROT_TYPE_IP CL_HTON16(0x800) +#define ETH_PROT_TYPE_ARP CL_HTON16(0x806) + + +#define ETH_IS_LOCALLY_ADMINISTERED(addr) \ + (BOOLEAN)(((PUCHAR)(addr))[0] & ((UCHAR)0x02)) + + +#include +/****s* IB Network Drivers/mac_addr_t +* NAME +* mac_addr_t +* +* DESCRIPTION +* Defines the ehternet MAC address. +* +* SYNOPSIS +*/ +typedef struct _mac_addr +{ + uint8_t addr[HW_ADDR_LEN]; + +} PACK_SUFFIX mac_addr_t; +/* +* FIELDS +* addr +* Byte array representing the MAC address. +* +* NOTES +* The HW_ADDR_LEN value must be defined before including this header in order +* to support various address lengths. If not defined, the default length for +* ethernet (6 bytes) is used. +* +* addr[0] & 0x1 indicates multicast +* addr[0] & 0x2 indicates LAA if not multicast +* +* SEE ALSO +* IB Network Drivers, arp_pkt_t, ip_hdr_t, tcp_hdr_t, udp_hdr_t +*********/ +#include + + +#include +/****s* IB Network Drivers/eth_hdr_t +* NAME +* eth_hdr_t +* +* DESCRIPTION +* Defines the ehternet header for IP packets. +* +* SYNOPSIS +*/ +typedef struct _eth_hdr +{ + mac_addr_t dst; + mac_addr_t src; + net16_t type; + +} PACK_SUFFIX eth_hdr_t; +/* +* FIELDS +* dst +* Destination address +* +* src +* Source address +* +* type +* Ethernet protocol type +* +* NOTES +* The HW_ADDR_LEN value must be defined before including this header in order +* to support various address lengths. +* +* SEE ALSO +* IB Network Drivers, arp_pkt_t, ip_hdr_t, tcp_hdr_t, udp_hdr_t +*********/ +#include + + +#define ARP_HW_TYPE_ETH CL_HTON16(1) +#define ARP_HW_TYPE_IB CL_HTON16(32) + +#define ARP_OP_REQ CL_HTON16(1) +#define ARP_OP_REP CL_HTON16(2) + + +#include +/****s* IB Network Drivers/arp_pkt_t +* NAME +* arp_pkt_t +* +* DESCRIPTION +* Defines the ARP packet for IP packets. +* +* SYNOPSIS +*/ +typedef struct _arp_pkt +{ + net16_t hw_type; + net16_t prot_type; + uint8_t hw_size; + uint8_t prot_size; + net16_t op; + mac_addr_t src_hw; + net32_t src_ip; + mac_addr_t dst_hw; + net32_t dst_ip; + +} PACK_SUFFIX arp_pkt_t; +/* +* FIELDS +* hw_type +* Hardware type +* +* prot_type +* Protocol type. See ETH_PROT_TYPE_XXX definitions. +* +* hw_size +* Size of the hardware address +* +* prot_size +* Size of the protocol address +* +* op +* ARP operation +* +* src_hw +* Source HW (MAC) address +* +* src_ip +* Source IP address +* +* dst_hw +* Destination HW (MAC) address +* +* dst_ip +* Destination IP address +* +* NOTES +* The HW_ADDR_LEN value must be defined before including this header in order +* to support various MAC address lengths. +* +* SEE ALSO +* IB Network Drivers, eth_hdr_t, ip_hdr_t, tcp_hdr_t, udp_hdr_t +*********/ +#include + + +#define IP_PROT_IP 4 +#define IP_PROT_TCP 6 +#define IP_PROT_UDP 17 + + +#include +/****s* IB Network Drivers/ip_hdr_t +* NAME +* ip_hdr_t +* +* DESCRIPTION +* Defines the IP header for IP packets. +* +* SYNOPSIS +*/ +typedef struct _ip_hdr +{ + uint8_t ver_hl; + uint8_t svc_type; + net16_t length; + net16_t id; + net16_t offset; + uint8_t ttl; + uint8_t prot; + net16_t chksum; + net32_t src_ip; + net32_t dst_ip; + +} PACK_SUFFIX ip_hdr_t; +/* +* FIELDS +* ver_hl +* Header version and length. +* +* svc_type +* Service type. +* +* length +* Total length. +* +* id +* Packet identification. +* +* offset +* Fragment offset. +* +* ttl +* Time to live. +* +* prot +* Protocol. +* +* chksum +* Checksum. +* +* src_ip +* Source IP address +* +* dst_ip +* Destination IP address +* +* SEE ALSO +* IB Network Drivers, eth_hdr_t, arp_pkt_t, tcp_hdr_t, udp_hdr_t +*********/ +#include + + +#include +/****s* IB Network Drivers/tcp_hdr_t +* NAME +* tcp_hdr_t +* +* DESCRIPTION +* Defines the IP header for IP packets. +* +* SYNOPSIS +*/ +typedef struct _tcp_hdr +{ + net16_t src_port; + net16_t dst_port; + net32_t seq_num; + net32_t ack_num; + uint8_t offset; + uint8_t flags; + net16_t window; + net16_t chksum; + net16_t urp; + +} PACK_SUFFIX tcp_hdr_t; +/* +* FIELDS +* src_port +* Source port. +* +* dst_port +* Destination port. +* +* seq_num +* Sequence number. +* +* ack_num +* Acknowledge number. +* +* offset +* data offset. +* +* flags +* TCP flags. +* +* window +* Window number. +* +* chksum +* Checksum. +* +* urp +* Urgent pointer. +* +* SEE ALSO +* IB Network Drivers, eth_hdr_t, arp_pkt_t, ip_hdr_t, udp_hdr_t +*********/ +#include + + +#include +/****s* IB Network Drivers/udp_hdr_t +* NAME +* udp_hdr_t +* +* DESCRIPTION +* Defines the IP header for IP packets. +* +* SYNOPSIS +*/ +typedef struct _udp_hdr +{ + net16_t src_port; + net16_t dst_port; + net16_t length; + net16_t chksum; + +} PACK_SUFFIX udp_hdr_t; +/* +* FIELDS +* src_port +* Source port. +* +* dst_port +* Destination port. +* +* length +* Length of datagram. +* +* chksum +* Checksum. +* +* SEE ALSO +* IB Network Drivers, eth_hdr_t, arp_pkt_t, ip_hdr_t, tcp_hdr_t +*********/ +#include + + +#define DHCP_PORT_SERVER CL_HTON16(67) +#define DHCP_PORT_CLIENT CL_HTON16(68) + +#define DHCP_REQUEST 1 +#define DHCP_REPLY 2 +#define DHCP_HW_TYPE_ETH 1 +#define DHCP_HW_TYPE_IB 32 +#define DHCP_OPT_PAD 0 +#define DHCP_OPT_END 255 +#define DHCP_OPT_MSG 53 +#define DHCP_OPT_CLIENT_ID 61 + +#define DHCPDISCOVER 1 +#define DHCPOFFER 2 +#define DHCPREQUEST 3 +#define DHCPDECLINE 4 +#define DHCPACK 5 +#define DHCPNAK 6 +#define DHCPRELEASE 7 +#define DHCPINFORM 8 + +#define DHCP_FLAGS_BROADCAST CL_HTON16(0x8000) +#define DHCP_COOKIE 0x63538263 +#define DHCP_OPTIONS_SIZE 312 +#define DHCP_COOKIE_SIZE 4 + + +/* Minimum DHCP size is without options (but with 4-byte magic cookie). */ +#define DHCP_MIN_SIZE (sizeof(dhcp_pkt_t) + DHCP_COOKIE_SIZE - DHCP_OPTIONS_SIZE ) + +#include +/****s* IB Network Drivers/dhcp_pkt_t +* NAME +* dhcp_pkt_t +* +* DESCRIPTION +* Defines the DHCP packet format as documented in RFC 2131 +* http://www.zvon.org/tmRFC/RFC2131/Output/index.html +* +* SYNOPSIS +*/ +typedef struct _dhcp_pkt +{ + uint8_t op; + uint8_t htype; + uint8_t hlen; + uint8_t hops; + net32_t xid; + net16_t secs; + net16_t flags; + net32_t ciaddr; + net32_t yiaddr; + net32_t siaddr; + net32_t giaddr; + uint8_t chaddr[16]; + uint8_t sname[64]; + uint8_t file[128]; + uint8_t options[312]; + +} PACK_SUFFIX dhcp_pkt_t; +/* +* SEE ALSO +* IB Network Drivers, eth_hdr_t, arp_pkt_t, ip_hdr_t, udp_hdr_t +*********/ +#include + + +#include +typedef struct _udp_pkt +{ + udp_hdr_t hdr; + dhcp_pkt_t dhcp; + +} PACK_SUFFIX udp_pkt_t; + +typedef struct _ip_pkt +{ + ip_hdr_t hdr; + union _ip_payload + { + tcp_hdr_t tcp; + udp_pkt_t udp; + + } PACK_SUFFIX prot; + +} PACK_SUFFIX ip_pkt_t; + +typedef struct _eth_pkt +{ + eth_hdr_t hdr; + union _eth_payload + { + arp_pkt_t arp; + ip_pkt_t ip; + + } PACK_SUFFIX type; + +} PACK_SUFFIX eth_pkt_t; +#include + + +#endif /* _IP_PACKET_H_ */ diff --git a/branches/Ndi/inc/mthca/mthca_vc.h b/branches/Ndi/inc/mthca/mthca_vc.h new file mode 100644 index 00000000..3fdf7fff --- /dev/null +++ b/branches/Ndi/inc/mthca/mthca_vc.h @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies, Inc. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef MTHCA_VC_H +#define MTHCA_VC_H + +typedef +struct _map_crspace { + unsigned __int64 va; /* address of CRSPACE, mapped to user space */ + unsigned long size; /* size of CRSPACE, mapped to user space */ + unsigned long reserved; /* to align on quadword boundary */ +} map_crspace; + +/* Definitions for hca_driver commands*/ +#define FW_READ 0x00 +#define FW_WRITE 0x01 +#define FW_READ_CMD 0x08 +#define FW_WRITE_CMD 0x09 +#define FW_MAP_CRSPACE 0x0A +#define FW_UNMAP_CRSPACE 0x0B +#define FW_REREGISTER_HCA 0x0c +#define FW_OPEN_IF 0xe7 +#define FW_CLOSE_IF 0x7e + +/* uplink info */ +typedef struct { + uint8_t bus_type; /* 1 - PCI, 2 - PCI-X, 3 - PCI_E */ +#define UPLINK_BUS_PCI 1 +#define UPLINK_BUS_PCIX 2 +#define UPLINK_BUS_PCIE 3 + union { + struct { + uint8_t capabilities; +#define UPLINK_BUS_PCIX_133 2 /* 133 MHz capable */ + uint16_t frequency; /* in MHz */ + } pci_x; + struct { + uint8_t capabilities; + uint8_t link_speed; /* 1X link speed */ +#define UPLINK_BUS_PCIE_SDR 1 /* 2.5 Gbps */ +#define UPLINK_BUS_PCIE_DDR 2 /* 5 Gbps */ + uint8_t link_width; /* x1, x2, x4, x8, x12, x16, x32 */ + } pci_e; + } u; +} uplink_info_t; + +/* Defines for get data for vendor specific */ +#define MTHCA_BRD_ID_LEN 64 + +inline char* mthca_get_board_id(ib_ca_attr_t *ca_attr) +{ + return (char*)(ca_attr)+(ca_attr->size - MTHCA_BRD_ID_LEN - sizeof(uplink_info_t)); +} + +inline void* mthca_get_uplink_info(ib_ca_attr_t *ca_attr) +{ + return (char*)(ca_attr)+(ca_attr->size - sizeof(uplink_info_t)); +} + +#endif diff --git a/branches/Ndi/inc/oib_ver.h b/branches/Ndi/inc/oib_ver.h new file mode 100644 index 00000000..59a29d3f --- /dev/null +++ b/branches/Ndi/inc/oib_ver.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include + +#if (VER_FILEBUILD < 10) +#define VER_FILEBPAD "000" +#elif (VER_FILEBUILD < 100) +#define VER_FILEBPAD "00" +#elif (VER_FILEBUILD < 1000) +#define VER_FILEBPAD "0" +#else +#define VER_FILEBPAD +#endif + +#define VER_FILEVERSION VER_FILEMAJORVERSION,\ + VER_FILEMINORVERSION,\ + VER_FILEBUILD,\ + VER_FILEREV + +#define VER_FILEVERSION_STR2(M,m,b,r) #M "." #m "." VER_FILEBPAD #b "." #r +#define VER_FILEVERSION_STR1(M,m,b,r) VER_FILEVERSION_STR2(M,m,b,r) +#define VER_FILEVERSION_STR VER_FILEVERSION_STR1( VER_FILEMAJORVERSION, \ + VER_FILEMINORVERSION, \ + VER_FILEBUILD, \ + VER_FILEREV ) + +#undef __BUILDMACHINE__ + +#ifdef VER_COMPANYNAME_STR +#undef VER_COMPANYNAME_STR +#endif +#define VER_COMPANYNAME_STR IB_COMPANYNAME + +#ifdef VER_PRODUCTNAME_STR +#undef VER_PRODUCTNAME_STR +#endif +#define VER_PRODUCTNAME_STR IB_PRODUCTNAME + +#define VER_LEGALCOPYRIGHT_STR "Copyright\xa9 2005 OpenIB Alliance" diff --git a/branches/Ndi/inc/openib.def b/branches/Ndi/inc/openib.def new file mode 100644 index 00000000..58a14e23 --- /dev/null +++ b/branches/Ndi/inc/openib.def @@ -0,0 +1,34 @@ +!INCLUDE $(NTMAKEENV)\makefile.def + +# Allow overriding the company name. +!IF !DEFINED(IB_COMPANYNAME) +IB_COMPANYNAME="""OpenIB\x20Alliance""" +!ENDIF + +# Allow overriding the product name. +!IF !DEFINED(IB_PRODUCTNAME) +IB_PRODUCTNAME="""OpenIB\x20Windows""" +!ENDIF + +!IF !DEFINED(IB_MAJORVERSION) +IB_MAJORVERSION=1 +!endif + +!IF !DEFINED(IB_MINORVERSION) +IB_MINORVERSION=0 +!endif + +!IF !DEFINED(IB_FILEBUILD) +IB_FILEBUILD=0 +!endif + +!IF !DEFINED(IB_FILEREV) +IB_FILEREV=$(OPENIB_REV) +!endif + +C_DEFINES=$(C_DEFINES) -DIB_COMPANYNAME=$(IB_COMPANYNAME) \ + -DIB_PRODUCTNAME=$(IB_PRODUCTNAME) \ + -DVER_FILEMAJORVERSION=$(IB_MAJORVERSION) \ + -DVER_FILEMINORVERSION=$(IB_MINORVERSION) \ + -DVER_FILEBUILD=$(IB_FILEBUILD) \ + -DVER_FILEREV=$(IB_FILEREV) diff --git a/branches/Ndi/inc/user/complib/cl_atomic_osd.h b/branches/Ndi/inc/user/complib/cl_atomic_osd.h new file mode 100644 index 00000000..062177d8 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_atomic_osd.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_ATOMIC_OSD_H_ +#define _CL_ATOMIC_OSD_H_ + + +#include "cl_types.h" + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE int32_t CL_API +cl_atomic_inc( + IN atomic32_t* const p_value ) +{ + return( InterlockedIncrement( (LONG*)p_value ) ); +} + + +CL_INLINE int32_t CL_API +cl_atomic_dec( + IN atomic32_t* const p_value ) +{ + return( InterlockedDecrement( (LONG*)p_value ) ); +} + + +CL_INLINE int32_t CL_API +cl_atomic_add( + IN atomic32_t* const p_value, + IN const int32_t increment ) +{ + /* Return the incremented value. */ + return( InterlockedExchangeAdd( (long*)p_value, increment ) + increment ); +} + + +CL_INLINE int32_t CL_API +cl_atomic_sub( + IN atomic32_t* const p_value, + IN const int32_t decrement ) +{ + /* Return the decremented value. */ + return( InterlockedExchangeAdd( (long*)p_value, -decrement ) - decrement ); +} + + +CL_INLINE int32_t CL_API +cl_atomic_xchg( + IN atomic32_t* const p_value, + IN const int32_t new_value ) +{ + return( InterlockedExchange( (long*)p_value, new_value ) ); +} + + +CL_INLINE int32_t CL_API +cl_atomic_comp_xchg( + IN atomic32_t* const p_value, + IN const int32_t compare, + IN const int32_t new_value ) +{ + return( InterlockedCompareExchange( (long*)p_value, new_value, compare ) ); +} + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _CL_ATOMIC_OSD_H_ diff --git a/branches/Ndi/inc/user/complib/cl_byteswap_osd.h b/branches/Ndi/inc/user/complib/cl_byteswap_osd.h new file mode 100644 index 00000000..dfa29043 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_byteswap_osd.h @@ -0,0 +1,67 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef _CL_BYTESWAP_OSD_H_ +#define _CL_BYTESWAP_OSD_H_ + + +#include "complib/cl_types.h" + + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define CPU_LE 1 +#define CPU_BE 0 + +#define cl_ntoh16 _byteswap_ushort +#define cl_hton16 _byteswap_ushort + +#define cl_ntoh32 _byteswap_ulong +#define cl_hton32 _byteswap_ulong + +#define cl_ntoh64 _byteswap_uint64 +#define cl_hton64 _byteswap_uint64 + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _CL_BYTESWAP_OSD_H_ + + + + + diff --git a/branches/Ndi/inc/user/complib/cl_debug_osd.h b/branches/Ndi/inc/user/complib/cl_debug_osd.h new file mode 100644 index 00000000..3bf6208d --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_debug_osd.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _CL_DEBUG_OSD_H_ +#define _CL_DEBUG_OSD_H_ + + +#include "cl_types.h" +#include +#include + + +#if !defined(__MODULE__) +#define __MODULE__ "" +#define __MOD_DELIMITER__ "" +#else /* !defined(__MODULE__) */ +#define __MOD_DELIMITER__ ":" +#endif /* !defined(__MODULE__) */ + + +#if defined( _WIN64 ) +#define PRIdSIZE_T "I64d" +#else +#define PRIdSIZE_T "d" +#endif +#define PRId64 "I64d" +#define PRIx64 "I64x" +#define PRIo64 "I64o" +#define PRIu64 "I64u" + + +#if defined( _DEBUG_ ) +#define cl_dbg_out cl_msg_out +#else +#define cl_dbg_out __noop +#endif /* defined( _DEBUG_ ) */ + + +/* + * The following macros are used internally by the CL_ENTER, CL_TRACE, + * CL_TRACE_EXIT, and CL_EXIT macros. + */ +#define _CL_DBG_ENTER \ + ("0x%x:%s%s%s() [\n", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + +#define _CL_DBG_EXIT \ + ("0x%x:%s%s%s() ]\n", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + +#define _CL_DBG_INFO \ + ("0x%x:%s%s%s(): ", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + +#define _CL_DBG_ERROR \ + ("0x%x:%s%s%s() !ERROR!: ", GetCurrentThreadId(), __MODULE__, __MOD_DELIMITER__, __FUNCTION__) + +#define CL_CHK_STK + +#endif /* _CL_DEBUG_OSD_H_ */ + diff --git a/branches/Ndi/inc/user/complib/cl_event_osd.h b/branches/Ndi/inc/user/complib/cl_event_osd.h new file mode 100644 index 00000000..44fad4ba --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_event_osd.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_EVENT_OSD_H_ +#define _CL_EVENT_OSD_H_ + + +#include "cl_types.h" + + +/* Simple definition, eh? */ +typedef HANDLE cl_event_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE void CL_API +cl_event_construct( + IN cl_event_t* const p_event ) +{ + CL_ASSERT( p_event ); + + *p_event = NULL; +} + + +CL_INLINE cl_status_t CL_API +cl_event_init( + IN cl_event_t* const p_event, + IN const boolean_t manual_reset ) +{ + CL_ASSERT( p_event ); + + *p_event = CreateEvent( NULL, manual_reset, FALSE, NULL ); + if( !*p_event ) + return( CL_ERROR ); + + return( CL_SUCCESS ); +} + + +CL_INLINE void CL_API +cl_event_destroy( + IN cl_event_t* const p_event ) +{ + CL_ASSERT( p_event ); + + if( *p_event ) + CloseHandle( *p_event ); +} + + +CL_INLINE cl_status_t CL_API +cl_event_signal( + IN cl_event_t* const p_event ) +{ + CL_ASSERT( p_event ); + CL_ASSERT( *p_event ); + + if( !SetEvent( *p_event ) ) + return( CL_ERROR ); + + return( CL_SUCCESS ); +} + + +CL_INLINE cl_status_t CL_API +cl_event_reset( + IN cl_event_t* const p_event ) +{ + CL_ASSERT( p_event ); + CL_ASSERT( *p_event ); + + if( !ResetEvent( *p_event ) ) + return( CL_ERROR ); + + return( CL_SUCCESS ); +} + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _CL_EVENT_OSD_H_ \ No newline at end of file diff --git a/branches/Ndi/inc/user/complib/cl_ioctl_osd.h b/branches/Ndi/inc/user/complib/cl_ioctl_osd.h new file mode 100644 index 00000000..2c5b9e41 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_ioctl_osd.h @@ -0,0 +1,118 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of IOCTL object + * + * Environment: + * Windows User Mode + */ + + +#ifndef _CL_IOCTL_OSD_H_ +#define _CL_IOCTL_OSD_H_ + + +#include +#pragma warning(push, 3) +#include +#pragma warning(pop) + + +#define IOCTL_CODE( type, cmd ) \ + CTL_CODE( type, (cmd & 0x0FFF), METHOD_BUFFERED, FILE_ANY_ACCESS) + + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +CL_INLINE cl_status_t CL_API +cl_ioctl_request( + IN void *h_dev, + IN uint32_t ioctl_code, + IN void *p_in_buf, + IN size_t in_size, + OUT void *p_out_buf, + IN size_t out_size, + OUT size_t *p_ret_bytes OPTIONAL, + IN void *p_async_info OPTIONAL ) +{ + DWORD bytes_ret; + + if( !DeviceIoControl( h_dev, ioctl_code, p_in_buf, (DWORD)in_size, + p_out_buf, (DWORD)out_size, &bytes_ret, (LPOVERLAPPED)p_async_info ) ) + { + if( GetLastError() == ERROR_IO_PENDING ) + return CL_PENDING; + else + return CL_ERROR; + } + + *p_ret_bytes = bytes_ret; + return CL_SUCCESS; +} + + +CL_INLINE cl_status_t CL_API +cl_ioctl_result( + IN void *h_dev, + IN void *p_async_info, + OUT size_t *p_ret_bytes, + IN boolean_t blocking ) +{ + DWORD bytes_ret; + + if( !GetOverlappedResult( + h_dev, (LPOVERLAPPED)p_async_info, &bytes_ret, blocking ) ) + { + if( GetLastError() == ERROR_IO_INCOMPLETE ) + return CL_NOT_DONE; + else + return CL_ERROR; + } + + *p_ret_bytes = bytes_ret; + return CL_SUCCESS; +} + + + +#ifdef __cplusplus +} +#endif /* __cplusplus */ + + +#endif // _CL_IOCTL_OSD_H_ diff --git a/branches/Ndi/inc/user/complib/cl_memory_osd.h b/branches/Ndi/inc/user/complib/cl_memory_osd.h new file mode 100644 index 00000000..f3cc5db3 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_memory_osd.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Defines platform specific memory related functions. + * + * Environment: + * Windows User Mode + */ + + +#ifndef _CL_MEMORY_OSD_H_ +#define _CL_MEMORY_OSD_H_ + + +#include + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE void CL_API +cl_memset( + IN void* const p_memory, + IN const uint8_t fill, + IN const size_t count ) +{ + RtlFillMemory( p_memory, count, fill ); +} + + +CL_INLINE void* CL_API +cl_memcpy( + IN void* const p_dest, + IN const void* const p_src, + IN const size_t count ) +{ + RtlCopyMemory( p_dest, p_src, count ); + return p_dest; +} + + +CL_INLINE int32_t CL_API +cl_memcmp( + IN const void* const p_memory1, + IN const void* const p_memory2, + IN const size_t count ) +{ + return( memcmp( p_memory1, p_memory2, count ) ); +} + + +#define _CL_MEMCLR_DEFINED_ +CL_INLINE void CL_API +cl_memclr( + IN void* const p_memory, + IN const size_t count ) +{ + RtlSecureZeroMemory( p_memory, count ); +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_MEMORY_OSD_H_ */ + diff --git a/branches/Ndi/inc/user/complib/cl_mutex_osd.h b/branches/Ndi/inc/user/complib/cl_mutex_osd.h new file mode 100644 index 00000000..4fd9415f --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_mutex_osd.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of mutex object. + * + * Environment: + * Windows User Mode + */ + + +#ifndef _CL_MUTEX_OSD_H_ +#define _CL_MUTEX_OSD_H_ + + +#include + + +typedef HANDLE cl_mutex_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE void CL_API +cl_mutex_construct( + IN cl_mutex_t* const p_mutex ) +{ + *p_mutex = NULL; +} + + +CL_INLINE cl_status_t CL_API +cl_mutex_init( + IN cl_mutex_t* const p_mutex ) +{ + *p_mutex = CreateMutex( NULL, FALSE, NULL ); + if( *p_mutex ) + return CL_SUCCESS; + else + return CL_ERROR; +} + + +CL_INLINE void CL_API +cl_mutex_destroy( + IN cl_mutex_t* const p_mutex ) +{ + CloseHandle( *p_mutex ); +} + + +CL_INLINE void CL_API +cl_mutex_acquire( + IN cl_mutex_t* const p_mutex ) +{ + WaitForSingleObject( *p_mutex, INFINITE ); +} + + +CL_INLINE void CL_API +cl_mutex_release( + IN cl_mutex_t* const p_mutex ) +{ + ReleaseMutex( *p_mutex ); +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_MUTEX_OSD_H_ */ diff --git a/branches/Ndi/inc/user/complib/cl_packoff.h b/branches/Ndi/inc/user/complib/cl_packoff.h new file mode 100644 index 00000000..2117feb7 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_packoff.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +/* Note: The lack of conditional inclusion is intentional. */ +#include diff --git a/branches/Ndi/inc/user/complib/cl_packon.h b/branches/Ndi/inc/user/complib/cl_packon.h new file mode 100644 index 00000000..648b239e --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_packon.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +/* Note: The lack of conditional inclusion is intentional. */ +#include + + +/* + * Abstract: + * Turns on byte packing, which is necessary for passing information from + * system to system over a network to ensure no padding by the compiler has + * taken place. + * + * Note: + * No Robodoc documentation as with other headers. + */ + +#ifndef PACK_SUFFIX +#define PACK_SUFFIX +#endif diff --git a/branches/Ndi/inc/user/complib/cl_spinlock_osd.h b/branches/Ndi/inc/user/complib/cl_spinlock_osd.h new file mode 100644 index 00000000..eb07caed --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_spinlock_osd.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_SPINLOCK_OSD_H_ +#define _CL_SPINLOCK_OSD_H_ + + +#include "cl_types.h" + + +/* Spinlock object definition. */ +typedef struct _cl_spinlock +{ + CRITICAL_SECTION crit_sec; + /* The flag is necessary to conditionally destroy the critical section. */ + boolean_t initialized; + +} cl_spinlock_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +CL_INLINE void CL_API +cl_spinlock_construct( + IN cl_spinlock_t* const p_spinlock ) +{ + p_spinlock->initialized = FALSE; +} + + +CL_INLINE cl_status_t CL_API +cl_spinlock_init( + IN cl_spinlock_t* const p_spinlock ) +{ + CL_ASSERT( p_spinlock ); + + cl_spinlock_construct( p_spinlock ); + + /* + * Documentation recommends a spin count ~4K. + * High bit set to force pre-allocation of event. + */ + if( !InitializeCriticalSectionAndSpinCount( + &p_spinlock->crit_sec, 0x80001000 ) ) + { + return( CL_ERROR ); + } + + p_spinlock->initialized = TRUE; + return( CL_SUCCESS ); +} + + +CL_INLINE void CL_API +cl_spinlock_destroy( + IN cl_spinlock_t* const p_spinlock ) +{ + CL_ASSERT( p_spinlock ); + + if( p_spinlock->initialized ) + { + p_spinlock->initialized = FALSE; + DeleteCriticalSection( &p_spinlock->crit_sec ); + } +} + + +CL_INLINE void CL_API +cl_spinlock_acquire( + IN cl_spinlock_t* const p_spinlock ) +{ + CL_ASSERT( p_spinlock && p_spinlock->initialized ); + + EnterCriticalSection( &p_spinlock->crit_sec ); +} + + +CL_INLINE void CL_API +cl_spinlock_release( + IN cl_spinlock_t* const p_spinlock ) +{ + CL_ASSERT( p_spinlock && p_spinlock->initialized ); + + LeaveCriticalSection( &p_spinlock->crit_sec ); +} + + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif // _CL_SPINLOCK_OSD_H_ diff --git a/branches/Ndi/inc/user/complib/cl_syscallback_osd.h b/branches/Ndi/inc/user/complib/cl_syscallback_osd.h new file mode 100644 index 00000000..142eaec9 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_syscallback_osd.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_SYS_CALLBACK_OSD_H_ +#define _CL_SYS_CALLBACK_OSD_H_ + + +#include "cl_types.h" + + +typedef struct _cl_sys_callback_item +{ + cl_pfn_sys_callback_t pfn_callback; + const void* get_context; + const void* queue_context; + +} cl_sys_callback_item_t; + + +#endif // _CL_SYS_CALLBACK_OSD_H_ \ No newline at end of file diff --git a/branches/Ndi/inc/user/complib/cl_thread_osd.h b/branches/Ndi/inc/user/complib/cl_thread_osd.h new file mode 100644 index 00000000..23705ed8 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_thread_osd.h @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_THREAD_OSD_H_ +#define _CL_THREAD_OSD_H_ + + +#include "cl_types.h" +#include "cl_timer.h" + + +/* OS specific information about the thread. */ +typedef struct _cl_thread_osd +{ + HANDLE h_thread; + DWORD thread_id; + +} cl_thread_osd_t; + + +#ifdef __cplusplus +extern "C" +{ +#endif + +CL_INLINE void CL_API +cl_thread_suspend( + IN const uint32_t pause_ms ) +{ + Sleep( pause_ms ); +} + + +CL_INLINE void CL_API +cl_thread_stall( + IN const uint32_t pause_us ) +{ + uint64_t end_time; + + end_time = cl_get_time_stamp() + pause_us; + + /* Spin. */ + while( cl_get_time_stamp() < end_time ) + ; +} + + +CL_INLINE boolean_t CL_API +cl_is_blockable( void ) +{ + return TRUE; +} + + +#ifdef __cplusplus +} // extern "C" +#endif + + +#endif // _CL_THREAD_OSD_H_ diff --git a/branches/Ndi/inc/user/complib/cl_timer_osd.h b/branches/Ndi/inc/user/complib/cl_timer_osd.h new file mode 100644 index 00000000..0a023ba6 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_timer_osd.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_TIMER_OSD_H_ +#define _CL_TIMER_OSD_H_ + + +#include "cl_types.h" + + +/* Timer object definition. */ +typedef struct _cl_timer +{ + HANDLE h_timer; + cl_pfn_timer_callback_t pfn_callback; + const void *context; + uint64_t timeout_time; + DWORD thread_id; + +} cl_timer_t; + + +#endif // _CL_TIMER_OSD_H_ \ No newline at end of file diff --git a/branches/Ndi/inc/user/complib/cl_types_osd.h b/branches/Ndi/inc/user/complib/cl_types_osd.h new file mode 100644 index 00000000..16e91046 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_types_osd.h @@ -0,0 +1,146 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_TYPES_OSD_H_ +#define _CL_TYPES_OSD_H_ + + +#if !defined( WINVER ) + // Enable Windows 2000 features. + #define _WIN32_WINNT 0x0500 + #define WINVER 0x0500 +#else // !defined( WINVER ) + #if(WINVER < 0x0500) + // Force a compile error. + #error The component library requires Windows 2000 features. \ + WINVER and _WIN32_WINNT must be set to 0x0500 or greater. + #elif !defined(_WIN32_WINNT) + #define _WIN32_WINNT WINVER + #endif +#endif // !defined( WINVER ) + +#if _WIN32_WINNT != WINVER && _WIN32_WINNT != 0x0500 && WINVER != 0x501 + #error _WIN32_WINNT does not match WINVER. +#endif // _WIN32_WINNT != WINVER + +#if !defined( _WINDOWS_ ) + // Include the windows header file. + #include +#endif // !defined( _WINDOWS_ ) + +#if defined( _DEBUG ) || DBG + #define _DEBUG_ +#else + #undef _DEBUG_ +#endif + +#ifdef __GNUC__ + +#include +typedef intptr_t intn_t; +typedef uintptr_t uintn_t; +typedef volatile uint32_t atomic32_t; + +#else /* __GNUC__ */ + +typedef __int8 int8_t; +typedef unsigned __int8 uint8_t; +typedef __int16 int16_t; +typedef unsigned __int16 uint16_t; +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +typedef unsigned char uchar_t; +typedef _W64 __int3264 intn_t; +typedef _W64 unsigned __int3264 uintn_t; +typedef volatile __int32 atomic32_t; + +#endif /* __GNUC__ */ + +/* boolean_t must be intergral sized to avoid a bunch of warnings. */ +typedef int boolean_t; + + +#ifndef CL_ASSERT +#ifdef _DEBUG_ +#define CL_ASSERT( exp ) (void)(!(exp)?OutputDebugString("Assertion Failed:" #exp "\n"),DebugBreak(),FALSE:TRUE) +#else +#define CL_ASSERT( exp ) +#endif /* _DEBUG_ */ +#endif /* CL_ASSERT */ + + +#define UNUSED_PARAM UNREFERENCED_PARAMETER +#if defined(EVENT_TRACING) +#define UNUSED_PARAM_WOWPP(a) +#else +#define UNUSED_PARAM_WOWPP(a)UNREFERENCED_PARAMETER(a) +#endif + +#if !defined(EXPORT_CL_SYMBOLS) +#define CL_EXPORT DECLSPEC_IMPORT +#else +#define CL_EXPORT __declspec(dllexport) +#endif + +#if !defined( __cplusplus ) +#define inline __inline +#endif + +#define CL_INLINE CL_EXPORT inline + +#define CL_API __stdcall + +#define cl_panic abort + +#ifndef offsetof +#define offsetof FIELD_OFFSET +#endif + +#define PARENT_STRUCT( P, T, M ) CONTAINING_RECORD( (void*)P, T, M ) + +#ifdef __GNUC__ +#define CL_CONST64( x ) x##LLU +#else +#define CL_CONST64( x ) x##ui64 +#endif + + +#if !defined( __cplusplus ) +#define inline __inline +#endif + + +#endif // _CL_TYPES_OSD_H_ diff --git a/branches/Ndi/inc/user/complib/cl_waitobj_osd.h b/branches/Ndi/inc/user/complib/cl_waitobj_osd.h new file mode 100644 index 00000000..b9f4eee8 --- /dev/null +++ b/branches/Ndi/inc/user/complib/cl_waitobj_osd.h @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#ifndef _CL_WAITOBJ_OSD_H_ +#define _CL_WAITOBJ_OSD_H_ + + +#include +#include + +typedef cl_event_t cl_waitobj_handle_t; + +#ifdef __cplusplus +extern "C" +{ +#endif /* __cplusplus */ + + +CL_INLINE cl_status_t CL_API +cl_waitobj_create( + IN const boolean_t manual_reset, + OUT cl_waitobj_handle_t* const ph_wait_obj ) +{ + cl_event_construct( ph_wait_obj ); + return cl_event_init( ph_wait_obj, manual_reset ); +} + + +CL_INLINE cl_status_t CL_API +cl_waitobj_destroy( + IN cl_waitobj_handle_t h_wait_obj ) +{ + /* + * Note that we can take the address of the function parameter *only* + * because the wait object (and cl_event_t) is just a HANDLE, so + * copying it works. + */ + cl_event_destroy( &h_wait_obj ); + return CL_SUCCESS; +} + + +CL_INLINE cl_status_t CL_API +cl_waitobj_signal( + IN cl_waitobj_handle_t h_wait_obj ) +{ + /* + * Note that we can take the address of the function parameter *only* + * because the wait object (and cl_event_t) is just a HANDLE, so + * copying it works. + */ + return cl_event_signal( &h_wait_obj ); +} + + +CL_INLINE cl_status_t CL_API +cl_waitobj_reset( + IN cl_waitobj_handle_t h_wait_obj ) +{ + /* + * Note that we can take the address of the function parameter *only* + * because the wait object (and cl_event_t) is just a HANDLE, so + * copying it works. + */ + return cl_event_reset( &h_wait_obj ); +} + + +CL_INLINE cl_status_t CL_API +cl_waitobj_wait_on( + IN cl_waitobj_handle_t h_wait_obj, + IN const uint32_t wait_us, + IN const boolean_t interruptible ) +{ + /* + * Note that we can take the address of the function parameter *only* + * because the wait object (and cl_event_t) is just a HANDLE, so + * copying it works. + */ + return cl_event_wait_on( &h_wait_obj, wait_us, interruptible ); +} + + +#ifdef __cplusplus +} /* extern "C" */ +#endif /* __cplusplus */ + + +#endif /* _CL_WAITOBJ_OSD_H_ */ diff --git a/branches/Ndi/inc/user/iba/ib_uvp.h b/branches/Ndi/inc/user/iba/ib_uvp.h new file mode 100644 index 00000000..33be7520 --- /dev/null +++ b/branches/Ndi/inc/user/iba/ib_uvp.h @@ -0,0 +1,3370 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef __IB_UAL_UVP_H__ +#define __IB_UAL_UVP_H__ + +#include +#include +#include + +/****h* UAL_UVP_Interface/user-mode Verbs +* NAME +* User-mode Verbs -- User-mode Verbs implements the HCA specific +* user-mode functions to plug in to the Usermode Access Layer +* Architecture (UAL) +* +* COPYRIGHT +* Copyright© 2001 Intel Corporation - All Rights Reserved. +* DESCRIPTION +* The user-mode Verbs Interface defines the mechanism for a HCA vendor +* to plug into the User-mode Access Layer (UAL) architecture. +* Access Layer API is what is exposed to the user-mode applications. +* The interface described here is not Verbs API. In this interface model, +* UAL provides a generic mechanism to exchange vendor specific info +* in the implementation of verbs within the UAL architecture. UAL provides +* the support for callback processing. For instance, AL provides a +* QP error callback when a qp incurs error. Such asynchronous events are +* handled with the support of UAL and not by the vendor interface described +* here. +* +* For verbs related AL APIs, UAL packages the parameters in an IOCTL +* and sends it to the kernel AL. In the UAL design, this is broken down +* into 3 steps. +* +* a. Pre-ioctl step +* A vendor specified pre-ioctl function is called with relevant input +* parameters including a private buffer template (ci_umv_buf_t) +* for the vendor to communicate with the corresponding HCA driver. +* For calls that does not go to the HCA driver (for e.g. ib_open_ca()) +* no private buffer will be passed. +* b. Sending IOCTL to kernel AL +* Following step (a), UAL prepares an IOCTL with the relevant parameters +* including the vendor's private buffer. UAL/user-mode proxy does not +* interpret the contents of the private buffer. +* UAL sends the IOCTL to the user-mode proxy in kernel. The proxy +* interfaces with kernel AL to act on behalf the user. AL passes the +* parameters to the Verbs Provider Driver and the results are returned +* back to UAL. +* c. Post-ioctl step. +* Following the return from IOCTL in step (b), UAL calls a +* vendor-specified post-ioctl function with relevant parameters. +* UAL will call the post-ioctl function whether or not step (b) +* succeeded. The ioctl itself could have successfully returned but +* a vendor-specific status in ci_umv_buf_t may indicate a failure. +* UAL also passes the ioctl status to the vendor library so that +* the appropriate action can be taken in the post call. +* +* Use of ci_umv_buf_t and pre/post return values +* +* 1. ci_umv_buf is provided by UAL as a unique buffer template for +* a given verbs call. Vendor could keep any info relevant to +* the specific verbs call in this buffer. This buffer is sufficient +* for uniquely identifying which call it is intended for. For instance, +* the umv buffer set up by vendor in a uvp_pre_create_qp() could later +* tell the uvp_post_create_qp_t() which QP it is intended for. +* +* 2. The success of pre/post-ioctl step to UAL means IB_SUCCESS. +* Any value other than IB_SUCCESS is treated as failure. +* +* 3. The Vendor could exchange a status in ci_umv_buf_t. However, this +* interface does not enumerate the status in ci_umv_buf_t. +* However, the vendor could check the status in ci_umv_buf_t +* returned from the pre-ioctl step and act accordingly. +* +* AUTHOR +* Intel Corporation +* CREATION DATE +* XX.XX.XX +* NOTES +* 1. For user mode verbs that require a kernel transition, handles passed +* to kernel are validated in the user-mode proxy running in kernel. +* Those Verbs that are entirely done in user mode that would affect +* speed path do not perform consistency checks. So invalid pointers +* would lead to application crash with core dumps. +* +******* +* +*/ + +/******/ + +/******/ + +/* +* +* Vendor-specific handles +* +*/ + +/****f* user-mode Verbs/unsupported functions +* NAME +* 1. Register physical memory region with HCA (ci_register_pmr) +* 2. Modify physical memory region with HCA (ci_modify_pmr) +* 3. Create Special QP (ci_create_spl_qp) +* +* For all these functions, the vendor does NOT provide support +* and UAL will return IB_UNSUPPORTED to the caller of Access Layer. +* +* SYNOPSIS +*/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_open_ca_t +* NAME +* uvp_pre_open_ca_t -- Pre-ioctl operation for user-mode ib_open_ca() +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_open_ca_t) ( + IN const ib_net64_t ca_guid, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_open_ca_t() is implemented by vendor. It is the pre-ioctl routine +* for the AL call ib_open_ca() in user-mode. +* +* +* PARAMETERS +* ca_guid +* [in] The HCA adapter's EUI64 identifier. Clients would use other +* enumeration API's to locate all available adapters and their +* guids in a system, e.g. GetCaGuids(), maintained by the IB +* Access Layer. +* +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains any vendor-specific +* record to be exchanged with the vendor's HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl function succeeded. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* IB_INVALID_PARAMETER +* Invalid GUID. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_post_open_ca_t, uvp_pre_query_ca, uvp_post_query_ca_t, uvp_pre_modify_ca, +* uvp_post_modify_ca_t, uvp_pre_close_ca_t, uvp_post_close_ca_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_open_ca_t +* NAME +* uvp_post_open_ca_t -- Post-ioctl operation for user-mode ib_open_ca() +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_post_open_ca_t) ( + IN const ib_net64_t ca_guid, + IN ib_api_status_t ioctl_status, + OUT ib_ca_handle_t *ph_uvp_ca, + IN ci_umv_buf_t *p_umv_buf ); +/* +* DESCRIPTION +* uvp_post_open_ca_t() is implemented by vendor. It is the post-ioctl routine +* for the AL call ib_open_ca() in user-mode. +* +* PARAMETERS +* ca_guid +* [in] The HCA adapter's EUI64 identifier. +* ioctl_status +* [in] The ioctl status of the AL API. +* ph_uvp_ca +* [out] Pointer to vendor's handle to the newly opened instance of +* the CA. +* p_umv_buf +* [in] This contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_open_ca). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* IB_SUCCESS +* The HCA is return handle is valid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_open_ca_t, uvp_pre_query_ca, uvp_post_query_ca_t, uvp_pre_modify_ca, +* uvp_post_modify_ca_t, uvp_pre_close_ca_t, uvp_post_close_ca_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_query_ca +* NAME +* uvp_pre_query_ca -- Pre-ioctl operation for user-mode ib_query_ca() +* +* SYNOPSIS +*/ +typedef ib_api_status_t +(AL_API *uvp_pre_query_ca) ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ); +/* +* DESCRIPTION +* uvp_pre_query_ca() is implemented by vendor. It is the pre-ioctl routine +* for the AL call ib_query_ca() in user-mode. +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library handle to the open instance of the CA +* p_ca_attr +* [in] Pointer to the user's CA attribute buffer. +* byte_count +* [in] User-supplied size of the CA attribute buffer. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains any vendor-specific +* record to be exchanged with the vendor's HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl function succeeded. +* IB_INVALID_CA_HANDLE +* CA handle is invalid +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to satisfy request. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_open_ca_t, uvp_post_open_ca_t, uvp_post_query_ca_t, uvp_pre_modify_ca, +* uvp_post_modify_ca_t, uvp_pre_close_ca_t, uvp_post_close_ca_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_query_ca_t +* NAME +* uvp_post_query_ca_t -- Post-ioctl operation for user-mode ib_query_ca() +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_query_ca_t) ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN ib_ca_attr_t *p_ca_attr, + IN size_t byte_count, + IN ci_umv_buf_t *p_umv_buf ); +/* +* DESCRIPTION +* uvp_post_query_ca_t() is implemented by vendor. It is the post-ioctl routine +* for the AL call ib_query_ca() in user-mode. UAL provides the results +* of querying the CA attributes to the vendor's post-ioctl routine. +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library handle to the open instance of the CA +* ioctl_status +* [in] The ioctl status of the AL API. +* p_ca_attr +* [in] CA attribute of this Host Channel adapter (as returned by +* from ioctl to kernel AL). +* byte_count +* [in] Number of bytes in ca_attr buffer. +* p_umv_buf +* [in ] This contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_query_ca). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_open_ca_t, uvp_post_open_ca_t, uvp_pre_query_ca, uvp_pre_modify_ca, +* uvp_post_modify_ca_t, uvp_pre_close_ca_t, uvp_post_close_ca_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_modify_ca +* NAME +* uvp_pre_modify_ca -- Pre-ioctl operation for user-mode ib_modify_ca() +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_modify_ca) ( + IN ib_ca_handle_t h_uvp_ca, + IN uint8_t port_num, + IN ib_ca_mod_t ca_mod, + IN const ib_port_attr_mod_t* const p_port_attr_mod ); + +/* +* DESCRIPTION +* uvp_pre_modify_ca() is implemented by vendor. It is the pre-ioctl routine +* for the AL call ib_modify_ca() in user-mode. +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library handle to the open instance of the CA +* port_num +* [in] An index to the port that is being modified. The port_num matches +* the index of the port as returned through the ib_query_ca call. +* ca_mod +* [in] A mask of the attributes and counters to modify. +* p_port_attr_mod +* [in] A list of the specific port attribute information to modify. For +* the access layer to modify an attribute, its corresponding bit must be +* set in the ca_mod parameter. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl function succeeded. +* IB_INVALID_CA_HANDLE +* CA handle is invalid. +* IB_INVALID_PARAMETER +* One or more parameters is invalid. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_open_ca_t, uvp_post_open_ca_t, uvp_pre_query_ca, uvp_post_query_ca_t, +* uvp_post_modify_ca_t, uvp_pre_close_ca_t, uvp_post_close_ca_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_modify_ca_t +* NAME +* uvp_post_modify_ca_t -- Post-ioctl operation for user-mode ib_modify_ca() +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_modify_ca_t) ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status ); + +/* +* DESCRIPTION +* uvp_post_modify_ca_t() is implemented by vendor. It is the post-ioctl routine +* for the AL call ib_modify_ca() in user-mode. +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library handle to the open instance of the CA +* ioctl_status +* [in] The ioctl status of the AL API. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_open_ca_t, uvp_post_open_ca_t, uvp_pre_query_ca, uvp_post_query_ca_t, +* uvp_pre_modify_ca, uvp_pre_close_ca_t, uvp_post_close_ca_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_close_ca_t +* NAME +* uvp_pre_close_ca_t -- Pre-ioctl operation for user-mode ib_close_ca(). +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_close_ca_t) ( + IN ib_ca_handle_t h_uvp_ca ); + +/* +* DESCRIPTION +* uvp_pre_close_ca_t() is implemented by vendor. It is the pre-ioctl routine +* for the AL call ib_close_ca() in user-mode. +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library handle to the open instance of the CA +* +* RETURN VALUE +* IB_SUCCESS +* Successfully completed the pre-ioctl. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_open_ca_t, uvp_post_open_ca_t, uvp_pre_query_ca, uvp_post_query_ca_t, +* uvp_pre_modify_ca, uvp_post_modify_ca_t, uvp_post_close_ca_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_close_ca_t +* NAME +* uvp_post_close_ca_t -- Post-ioctl operation for user-mode ib_close_ca(). +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_post_close_ca_t) ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status ); + +/* +* DESCRIPTION +* uvp_post_close_ca_t() is implemented by vendor. It is the post-ioctl routine +* for the AL call ib_close_ca(). +* UAL calls this function in the context of the asynchronous callback +* from AL notifying the successful destruction of CA. +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library handle to the open instance of the CA +* +* RETURN VALUE +* IB_SUCCESS +* The post-ioctl for ib_close_ca() successfully completed. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_open_ca_t, uvp_post_open_ca_t, uvp_pre_query_ca, uvp_post_query_ca_t, +* uvp_pre_modify_ca, uvp_post_modify_ca_t, uvp_pre_close_ca_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_ci_call +* NAME +* uvp_pre_ci_call -- Pre-ioctl function to ib_ci_call +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_ci_call) ( + IN const ib_ca_handle_t h_uvp_ca, + IN const void* __ptr64 * const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN OUT ci_umv_buf_t *p_umv_buf); +/* +* DESCRIPTION +* uvp_pre_ci_call() is implemented by vendor. It is the pre-ioctl +* routine for ib_ci_call(). +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library handle to the CA +* handle_array +* [in] An array of uvp handles. For valid types, refer to ib_ci.h or +* ib_al.h. This is an optional parameter. +* num_handles +* [in] The number of handles in the array. +* p_ci_op +* [in] The operation that is requested by the client. For more info, +* refer ib_types.h +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl is successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_post_ci_call +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_ci_call +* NAME +* uvp_post_ci_call -- Post-ioctl function to ib_ci_call +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_ci_call) ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN const void* __ptr64 * const handle_array OPTIONAL, + IN uint32_t num_handles, + IN ib_ci_op_t* const p_ci_op, + IN OUT ci_umv_buf_t *p_umv_buf); +/* +* DESCRIPTION +* uvp_post_ci_call() is implemented by vendor. It is the pre-ioctl +* routine for ib_ci_call(). +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library handle to the CA +* ioctl_status +* [in] The ioctl status of the AL API. +* handle_array +* [in] An array of uvp handles. For valid types, refer to ib_ci.h or +* ib_al.h. This is an optional parameter. +* num_handles +* [in] The number of handles in the array. +* p_ci_op +* [in] The operation that is requested by the client. For more info, +* refer ib_types.h +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_ci_call +* +********/ + + +/********/ + +/****f* user-mode Verbs/uvp_pre_allocate_pd +* NAME +* uvp_pre_allocate_pd -- Pre-ioctl function to allocate PD +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_allocate_pd) ( + IN ib_ca_handle_t h_uvp_ca, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_allocate_pd() is implemented by vendor. It is the pre-ioctl routine +* for the AL call ib_alloc_pd() in user-mode. +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library handle to the open instance of the CA +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* IB_INVALID_CA_HANDLE +* CA handle is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_post_allocate_pd_t, uvp_pre_deallocate_pd, uvp_post_deallocate_pd_t +* +*******/ + +/********/ + +/****f* user-mode Verbs/uvp_post_allocate_pd_t +* NAME +* uvp_post_allocate_pd_t -- Post-ioctl function to allocate PD +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_allocate_pd_t) ( + IN ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + OUT ib_pd_handle_t *ph_uvp_pd, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_allocate_pd_t() is implemented by vendor. It is the post-ioctl +* routine for the AL call ib_alloc_pd(). +* +* PARAMETERS +* h_uvp_ca +* [in] Vendor's user-mode library CA handle. +* ioctl_status +* [in] The ioctl status of the AL API. +* ph_uvp_pd +* [out] The vendor library handle to the newly created protection domain. +* p_umv_buf +* [in] This contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_allocate_pd). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_allocate_pd, uvp_pre_deallocate_pd, uvp_post_deallocate_pd_t +* +*******/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_deallocate_pd +* NAME +* uvp_pre_deallocate_pd -- Pre-ioctl function to deallocate PD +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_deallocate_pd) ( + IN const ib_pd_handle_t h_uvp_pd ); + +/* +* DESCRIPTION +* uvp_pre_deallocate_pd() is implemented by vendor. It is the pre-ioctl +* routine for the AL call ib_deallocate_pd(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's user-mode library PD handle. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_allocate_pd, uvp_post_allocate_pd_t, uvp_post_deallocate_pd_t +* +*******/ + +/********/ + +/****f* user-mode Verbs/uvp_post_deallocate_pd_t +* NAME +* uvp_post_deallocate_pd_t -- Post-ioctl function to deallocate PD +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_deallocate_pd_t) ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status ); + +/* +* DESCRIPTION +* uvp_post_deallocate_pd_t() is implemented by the vendor. It is the +* post-ioctl routine for the AL call ib_dealloc_pd(). +* +* When all the resouces associated with a PD are destroyed, +* UAL invokes this post-ioctl routine to deallocate PD. Since the +* completion of the resource deallocation (e.g QP/CQ) is asynchronous, +* this function is called from a UAL asynchronous callback +* processing thread. +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's user-mode library PD handle. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_allocate_pd, uvp_post_allocate_pd_t, uvp_pre_deallocate_pd +* +*******/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_create_av +* NAME +* uvp_pre_create_av -- Pre-ioctl function to create AV +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_create_av) ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf); +/* +* DESCRIPTION +* uvp_pre_create_av() is implemented by vendor. It is the pre-ioctl +* routine for ib_create_av(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's user-mode library handle to the Protection domain +* to which this AV is associated. +* p_addr_vector +* [in] Parameters to create the address vector. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl is successful. +* IB_INVALID_SETTING +* Values in the vector is not valid +* IB_INVALID_PD_HANDLE +* The PD handle is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_post_create_av_t, uvp_pre_query_av, uvp_post_query_av_t, uvp_pre_modify_av, +* uvp_post_modify_av_t, uvp_pre_destroy_av, uvp_post_destroy_av_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_create_av_t +* NAME +* uvp_post_create_av_t -- Post-ioctl function to create AV +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_create_av_t) ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_av_handle_t *ph_uvp_av, + IN ci_umv_buf_t *p_umv_buf ); +/* +* DESCRIPTION +* uvp_post_create_av_t() is implemented by vendor. It is the post-ioctl routine +* for ib_create_av(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's user-mode library handle to the Protection domain +* to which this AV is associated +* ioctl_status +* [in] The ioctl status of the AL API. +* ph_uvp_av +* [out] Vendor's address vector handle. +* p_umv_buf +* [in] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_create_av). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_av, uvp_pre_query_av, uvp_post_query_av_t, uvp_pre_modify_av, +* uvp_post_modify_av_t, uvp_pre_destroy_av, uvp_post_destroy_av_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_query_av +* NAME +* uvp_pre_query_av -- Pre-ioctl operation for ib_query_ca() +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_query_av) ( + IN const ib_av_handle_t h_uvp_av, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_query_av() is implemented by vendor. It is the pre-ioctl routine +* for the AL call ib_query_av() in user-mode. +* +* PARAMETERS +* h_uvp_av +* [in] Vendor's handle to the address vector in user-mode library +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl function succeeded. +* IB_INVALID_AV_HANDLE +* AV handle was invalid +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_av, uvp_post_create_av_t, uvp_post_query_av_t, uvp_pre_modify_av, +* uvp_post_modify_av_t, uvp_pre_destroy_av, uvp_post_destroy_av_t +* +*********/ + +/********/ +/****f* user-mode Verbs/uvp_post_query_av_t +* NAME +* +* Vendor-specific post-ioctl operation for user-mode ib_query_ca() +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_query_av_t) ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN OUT ib_av_attr_t *p_addr_vector, + IN OUT ib_pd_handle_t *ph_pd, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_query_av_t() is implemented by vendor. It is the post-ioctl routine +* for the AL call ib_query_av() in user-mode. +* UAL provides the results of the query to the vendor library in this +* post-ioctl routine. +* +* PARAMETERS +* h_uvp_av +* [in] Vendor's handle to the address vector in user-mode library +* ioctl_status +* [in] The ioctl status of the AL API. +* p_addr_vector +* [in out] AV attribute (as returned by the ioctl). +* ph_pd +* [out] The vendor library PD handle associated with this AV. +* p_umv_buf +* [in] +* On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_query_av). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_av, uvp_post_create_av_t, uvp_pre_query_av, uvp_pre_modify_av, +* uvp_post_modify_av_t, uvp_pre_destroy_av, uvp_post_destroy_av_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_modify_av +* NAME +* uvp_pre_modify_av -- Pre-ioctl function to modify AV +* +* SYNOPSIS +*/ +typedef ib_api_status_t +(AL_API *uvp_pre_modify_av) ( + IN const ib_av_handle_t h_uvp_av, + IN const ib_av_attr_t *p_addr_vector, + IN OUT ci_umv_buf_t *p_umv_buf ); +/* +* DESCRIPTION +* uvp_pre_modify_av() is implemented by vendor. It is the pre-ioctl routine +* for ib_modify_av(). +* +* PARAMETERS +* h_uvp_av +* [in] Vendor's AV handle in user-mode library. +* p_addr_vector +* [in] Parameters to modify the address vector handle +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl is successful. +* IB_INVALID_SETTING +* Values in the vector is not valid. +* IB_INVALID_AV_HANDLE +* The AV handle is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_av, uvp_post_create_av_t, uvp_pre_query_av, uvp_post_query_av_t, +* uvp_post_modify_av_t, uvp_pre_destroy_av, uvp_post_destroy_av_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_modify_av_t +* NAME +* uvp_post_modify_av_t -- Post-ioctl function to modify AV +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_modify_av_t) ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_modify_av_t() is implemented by vendor to modify the attributes +* of AV. It is the post-ioctl routine for ib_modify_av(). +* +* PARAMETERS +* h_uvp_av +* [in] Vendor's av handle in user-mode library. +* ioctl_status +* [in] The ioctl status of the AL API. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_modify_av). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_av, uvp_post_create_av_t, uvp_pre_query_av, uvp_post_query_av_t, +* uvp_pre_modify_av, uvp_pre_destroy_av, uvp_post_destroy_av_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_destroy_av +* NAME +* uvp_pre_destroy_av -- Pre-ioctl function to destroy AV +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_destroy_av) ( + IN const ib_av_handle_t h_uvp_av ); + +/* +* DESCRIPTION +* uvp_pre_destroy_av() is implemented by vendor to destroy the AV. +* It is the pre-ioctl routine for ib_destroy_av(). +* +* PARAMETERS +* h_uvp_av +* [in] Vendor's AV handle in user-mode library. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl is successful. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_av, uvp_post_create_av_t, uvp_pre_query_av, uvp_post_query_av_t, +* uvp_pre_modify_av, uvp_post_modify_av_t, uvp_post_destroy_av_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_destroy_av_t +* NAME +* uvp_post_destroy_av_t -- Post-ioctl function to destroy AV +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_destroy_av_t) ( + IN const ib_av_handle_t h_uvp_av, + IN ib_api_status_t ioctl_status ); + +/* +* DESCRIPTION +* uvp_post_destroy_av_t() is implemented by vendor. It is the post-ioctl +* routine for ib_destroy_av(). +* +* PARAMETERS +* h_uvp_av +* [in] Vendor's AV handle in user-mode library. +* p_umv_buf +* [in out] +* On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_destroy_av). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_av, uvp_post_create_av_t, uvp_pre_query_av, uvp_post_query_av_t, +* uvp_pre_modify_av, uvp_post_modify_av_t, uvp_pre_destroy_av +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_create_srq +* NAME +* uvp_pre_create_srq -- Pre-ioctl function to Create a Shared Queue Pair. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_create_srq) ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_srq_attr_t* const p_srq_attr, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_create_srq() is implemented by vendor. It is the pre-ioctl routine +* for ib_create_srq(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's Protection domain handle in user-mode library. +* p_srq_attr +* [in] Initial attributes with which the srq must be created. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* IB_INVALID_PD_HANDLE +* The PD handle is invalid. +* IB_UNSUPPORTED +* The specified queue pair type was not supported by the channel adapter. +* IB_INVALID_MAX_WRS +* The requested maximum send or receive work request depth could not be +* supported. +* IB_INVALID_MAX_SGE +* The requested maximum number of scatter-gather entries for the send or +* receive queue could not be supported. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call. +* IB_INVALID_PARAMETER +* At least one parameter is invalid. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, uvp_pre_modify_srq, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_create_srq_t +* NAME +* uvp_post_create_srq_t -- Post-ioctl function to Create a Queue Pair. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_create_srq_t) ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_srq_handle_t *ph_uvp_srq, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_create_srq_t() is implemented by vendor. It is the post-ioctl routine +* for ib_create_srq(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's Protection domain handle in user-mode library. +* ioctl_status +* [in] The ioctl status of the AL API. +* ph_uvp_srq +* [out] Vendor's srq handle for the newly created srq (in user-mode +* library). +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_create_srq). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_pre_query_srq, uvp_post_query_srq_t, uvp_pre_modify_srq, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_modify_srq +* NAME +* uvp_pre_modify_srq -- Pre-ioctl function to Modify attributes of the +* specified srq. +* +* SYNOPSIS +* +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_modify_srq) ( + IN const ib_srq_handle_t h_uvp_srq, + IN const ib_srq_attr_t * const p_srq_attr, + IN const ib_srq_attr_mask_t srq_attr_mask, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_modify_srq() is implemented by vendor to modify the attributes of a +* srq. It is the pre-ioctl routine for ib_modify_srq(). +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's srq Handle to the queue pair (in user-mode library) +* whose state is to be modified. +* p_srq_attr +* [in] Specifies what attributes need to be modified in the srq. +* srq_attr_mask +* [in] Specifies which fields of ib_srq_attr_t are valid. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the requested operation. +* IB_INVALID_SRQ_HANDLE +* Invalid srq handle. +* IB_UNSUPPORTED +* Requested operation is not supported, for e.g. Atomic operations. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_modify_srq_t +* NAME +* uvp_post_modify_srq_t -- Post-ioctl function to Modify attributes of +* the specified srq. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_modify_srq_t) ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_modify_srq_t() is implemented by vendor to modify the srq attributes. +* It is the post-ioctl routine for ib_modify_srq(). +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's srq Handle to the queue pair (in user-mode library) +* whose state is modified. +* ioctl_status +* [in] The ioctl status of the AL API. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_modify_srq). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, +* uvp_pre_modify_srq, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_query_srq +* NAME +* uvp_pre_query_srq -- Pre-ioctl function to Query the attributes of the srq +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_query_srq) ( + IN ib_srq_handle_t h_uvp_srq, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_query_srq() is implemented by vendor. It is the pre-ioctl routine +* for the AL call ib_query_srq(). +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's handle to the srq (in user-mode library). +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl function succeeded. +* IB_INVALID_SRQ_HANDLE +* srq handle is invalid +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_post_query_srq_t, uvp_pre_modify_srq, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_query_srq_t +* NAME +* uvp_post_query_srq_t -- Post-ioctl operation for user-mode ib_query_srq() +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_query_srq_t) ( + IN ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status, + IN ib_srq_attr_t *p_query_attr, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_query_srq_t() is implemented by vendor. It is the post-ioctl routine +* for ib_query_srq(). +* UAL provides the results of the query to the vendor library in this +* post-ioctl routine. +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's handle to the srq (in user-mode library). +* ioctl_status +* [in] The ioctl status of the AL API. +* p_query_attr +* [in] srq attribute as returned by the ioctl. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_query_srq). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_pre_modify_srq, +* uvp_post_modify_srq_t, uvp_pre_destroy_srq, uvp_post_destroy_srq_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_destroy_srq +* NAME +* uvp_pre_destroy_srq -- Pre-ioctl function to Destroy a Queue Pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_destroy_srq) ( + IN const ib_srq_handle_t h_uvp_srq ); + +/* +* DESCRIPTION +* uvp_pre_destroy_srq() is the pre-ioctl routine implemented by vendor +* to destroy srq. +* UAL invokes this pre-ioctl routine to destroy srq. +* The vendor is expected to perform any preliminary steps in preparation +* for destroying the srq and perform any book-keeping. +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's Handle to the srq (in user-mode library) +* that needs to be destroyed. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, +* uvp_pre_modify_srq, uvp_post_modify_srq_t, uvp_post_destroy_srq_t +* +********/ + +/********/ + + +/****f* user-mode Verbs/uvp_post_destroy_srq_t +* NAME +* uvp_post_destroy_srq_t -- Post-ioctl function to Destroy a Queue Pair. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_destroy_srq_t) ( + IN const ib_srq_handle_t h_uvp_srq, + IN ib_api_status_t ioctl_status ); + +/* +* DESCRIPTION +* uvp_post_destroy_srq_t() is implemented by vendor. It is the post-ioctl +* routine for ib_destroy_srq(). +* UAL invokes this post-ioctl routine to destroy srq when it receives +* asynchronous notification from the user-mode proxy in kernel. +* +* PARAMETERS +* h_uvp_srq +* [in] Vendor's Handle to the srq (in user-mode library) +* that needs to be destroyed. +* +* RETURN VALUE +* IB_SUCCESS +* The post-ioctl call is successful. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_srq, uvp_post_create_srq_t, uvp_pre_query_srq, uvp_post_query_srq_t, +* uvp_pre_modify_srq, uvp_post_modify_srq_t, uvp_pre_destroy_srq +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_create_qp +* NAME +* uvp_pre_create_qp -- Pre-ioctl function to Create a Queue Pair. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_create_qp) ( + IN const ib_pd_handle_t h_uvp_pd, + IN const ib_qp_create_t *p_create_attr, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_create_qp() is implemented by vendor. It is the pre-ioctl routine +* for ib_create_qp(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's Protection domain handle in user-mode library. +* p_create_attr +* [in] Initial attributes with which the qp must be created. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* IB_INVALID_PD_HANDLE +* The PD handle is invalid. +* IB_INVALID_CQ_HANDLE +* The send or receive completion queue to associate with the queue pair +* is invalid. +* IB_UNSUPPORTED +* The specified queue pair type was not supported by the channel adapter. +* IB_INVALID_MAX_WRS +* The requested maximum send or receive work request depth could not be +* supported. +* IB_INVALID_MAX_SGE +* The requested maximum number of scatter-gather entries for the send or +* receive queue could not be supported. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call. +* IB_INVALID_PARAMETER +* At least one parameter is invalid. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_post_create_qp_t, uvp_pre_query_qp, uvp_post_query_qp_t, uvp_pre_modify_qp, +* uvp_post_modify_qp_t, uvp_pre_destroy_qp, uvp_post_destroy_qp_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_create_qp_t +* NAME +* uvp_post_create_qp_t -- Post-ioctl function to Create a Queue Pair. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_create_qp_t) ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + OUT ib_qp_handle_t *ph_uvp_qp, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_create_qp_t() is implemented by vendor. It is the post-ioctl routine +* for ib_create_qp(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's Protection domain handle in user-mode library. +* ioctl_status +* [in] The ioctl status of the AL API. +* ph_uvp_qp +* [out] Vendor's QP handle for the newly created QP (in user-mode +* library). +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_create_qp). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_qp, uvp_pre_query_qp, uvp_post_query_qp_t, uvp_pre_modify_qp, +* uvp_post_modify_qp_t, uvp_pre_destroy_qp, uvp_post_destroy_qp_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_modify_qp +* NAME +* uvp_pre_modify_qp -- Pre-ioctl function to Modify attributes of the +* specified QP. +* +* SYNOPSIS +* +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_modify_qp) ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_qp_mod_t *p_modify_attr, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_modify_qp() is implemented by vendor to modify the attributes of a +* QP. It is the pre-ioctl routine for ib_modify_qp(). +* +* PARAMETERS +* h_uvp_qp +* [in] Vendor's qp Handle to the queue pair (in user-mode library) +* whose state is to be modified. +* p_modify_attr +* [in] Specifies what attributes need to be modified in the qp. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the requested operation. +* IB_INVALID_QP_HANDLE +* Invalid QP handle. +* IB_UNSUPPORTED +* Requested operation is not supported, for e.g. Atomic operations. +* IB_QP_INVALID_STATE +* Invalid state transition request. Current QP state not in allowable +* state. +* IB_INVALID_PKEY +* Pkey specified in modify request not valid entry in P_KEY table. Or +* index is out of range. +* IB_INVALID_PMIG_STATE +* Invalid path migration state specified in the request. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_pre_query_qp, uvp_post_query_qp_t, +* uvp_post_modify_qp_t, uvp_pre_destroy_qp, uvp_post_destroy_qp_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_modify_qp_t +* NAME +* uvp_post_modify_qp_t -- Post-ioctl function to Modify attributes of +* the specified QP. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_modify_qp_t) ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_modify_qp_t() is implemented by vendor to modify the qp attributes. +* It is the post-ioctl routine for ib_modify_qp(). +* +* PARAMETERS +* h_uvp_qp +* [in] Vendor's qp Handle to the queue pair (in user-mode library) +* whose state is modified. +* ioctl_status +* [in] The ioctl status of the AL API. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_modify_qp). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_pre_query_qp, uvp_post_query_qp_t, +* uvp_pre_modify_qp, uvp_pre_destroy_qp, uvp_post_destroy_qp_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_query_qp +* NAME +* uvp_pre_query_qp -- Pre-ioctl function to Query the attributes of the QP +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_query_qp) ( + IN ib_qp_handle_t h_uvp_qp, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_query_qp() is implemented by vendor. It is the pre-ioctl routine +* for the AL call ib_query_qp(). +* +* PARAMETERS +* h_uvp_qp +* [in] Vendor's handle to the QP (in user-mode library). +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl function succeeded. +* IB_INVALID_QP_HANDLE +* QP handle is invalid +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_post_query_qp_t, uvp_pre_modify_qp, +* uvp_post_modify_qp_t, uvp_pre_destroy_qp, uvp_post_destroy_qp_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_query_qp_t +* NAME +* uvp_post_query_qp_t -- Post-ioctl operation for user-mode ib_query_qp() +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_query_qp_t) ( + IN ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + IN ib_qp_attr_t *p_query_attr, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_query_qp_t() is implemented by vendor. It is the post-ioctl routine +* for ib_query_qp(). +* UAL provides the results of the query to the vendor library in this +* post-ioctl routine. +* +* PARAMETERS +* h_uvp_qp +* [in] Vendor's handle to the QP (in user-mode library). +* ioctl_status +* [in] The ioctl status of the AL API. +* p_query_attr +* [in] QP attribute as returned by the ioctl. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_query_qp). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User Mode +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_pre_query_qp, uvp_pre_modify_qp, +* uvp_post_modify_qp_t, uvp_pre_destroy_qp, uvp_post_destroy_qp_t +* +*********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_destroy_qp +* NAME +* uvp_pre_destroy_qp -- Pre-ioctl function to Destroy a Queue Pair. +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_destroy_qp) ( + IN const ib_qp_handle_t h_uvp_qp ); + +/* +* DESCRIPTION +* uvp_pre_destroy_qp() is the pre-ioctl routine implemented by vendor +* to destroy QP. +* UAL invokes this pre-ioctl routine to destroy QP. +* The vendor is expected to perform any preliminary steps in preparation +* for destroying the QP and perform any book-keeping. +* +* PARAMETERS +* h_uvp_qp +* [in] Vendor's Handle to the qp (in user-mode library) +* that needs to be destroyed. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_pre_query_qp, uvp_post_query_qp_t, +* uvp_pre_modify_qp, uvp_post_modify_qp_t, uvp_post_destroy_qp_t +* +********/ + +/********/ + + +/****f* user-mode Verbs/uvp_post_destroy_qp_t +* NAME +* uvp_post_destroy_qp_t -- Post-ioctl function to Destroy a Queue Pair. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_destroy_qp_t) ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status ); + +/* +* DESCRIPTION +* uvp_post_destroy_qp_t() is implemented by vendor. It is the post-ioctl +* routine for ib_destroy_qp(). +* UAL invokes this post-ioctl routine to destroy QP when it receives +* asynchronous notification from the user-mode proxy in kernel. +* +* PARAMETERS +* h_uvp_qp +* [in] Vendor's Handle to the qp (in user-mode library) +* that needs to be destroyed. +* +* RETURN VALUE +* IB_SUCCESS +* The post-ioctl call is successful. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_pre_query_qp, uvp_post_query_qp_t, +* uvp_pre_modify_qp, uvp_post_modify_qp_t, uvp_pre_destroy_qp +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_create_cq +* NAME +* uvp_pre_create_cq -- Pre-ioctl function to Create a completion queue (CQ) +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_create_cq) ( + IN const ib_ca_handle_t h_uvp_ca, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ); +/* +* DESCRIPTION +* uvp_pre_create_cq() is implemented by vendor. It is the pre-ioctl routine +* for ib_create_cq(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's handle to an existing protection domain (in user-mode +* library) +* p_size +* [in out] Points to a variable containing the number of CQ entries +* requested by the consumer. +* On return, points to the size of the CQ that was created +* by the provider. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The operation was successful. +* IB_INVALID_PD_HANDLE +* The h_uvp_pd passed is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete request. +* IB_INVALID_CQ_SIZE +* Requested CQ Size is not supported. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_post_create_cq_t, uvp_pre_resize_cq, uvp_post_resize_cq_t, +* uvp_pre_query_cq, uvp_post_query_cq_t, uvp_pre_destroy_cq, +* uvp_post_destroy_cq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_create_cq_t +* NAME +* uvp_post_create_cq_t -- Post-ioctl function to Create a completion queue (CQ) +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_create_cq_t) ( + IN const ib_ca_handle_t h_uvp_ca, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + OUT ib_cq_handle_t *ph_uvp_cq, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_create_cq_t() is implemented by vendor to create CQ. +* It is the post-ioctl routine for ib_create_cq(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's handle to an existing protection domain (in user-mode +* library) +* ioctl_status +* [in] The ioctl status of the AL API. +* size +* [in] size of the CQ that was created by the provider. +* If VPD created the CQ in kernel, this is the value as set by +* VPD. If UVP creates the CQ in user-mode, then uvp already knows +* the size of the CQ in the pre-ioctl. +* ph_uvp_cq +* [out] Vendor's Handle to the newly created CQ (in user-mode library). +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_create_cq). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_cq, uvp_pre_resize_cq, uvp_post_resize_cq_t, +* uvp_pre_query_cq, uvp_post_query_cq_t, uvp_pre_destroy_cq, +* uvp_post_destroy_cq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_resize_cq +* NAME +* uvp_pre_resize_cq -- Pre-ioctl function to resize a CQ. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_resize_cq) ( + IN const ib_cq_handle_t h_uvp_cq, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_resize_cq() is implemented by vendor to resize the CQ. +* It is the pre-ioctl routine for ib_resize_cq(). +* +* PARAMETERS +* h_uvp_cq +* [in] Vendor's Handle to the already created CQ (in user-mode library). +* p_size +* [in out] On input, points to a variable containing the number +* of CQ entries requested by the consumer. +* On completion points to the size of the CQ that was resized by +* the provider. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. + +* RETURN VALUE +* IB_SUCCESS +* The operation was successful. +* IB_INVALID_CQ_HANDLE +* The CQ handle is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete request. +* IB_INVALID_CQ_SIZE +* Requested CQ Size is not supported. +* IB_OVERFLOW +* The CQ has more entries than the resize request. The CQ is not +* modified, and old entries still exist. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_post_resize_cq_t, +* uvp_pre_query_cq, uvp_post_query_cq_t, uvp_pre_destroy_cq, +* uvp_post_destroy_cq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_resize_cq_t +* NAME +* uvp_post_resize_cq_t -- Post-ioctl function to resize a CQ. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_resize_cq_t) ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_resize_cq_t() is implemented by vendor to resize the CQ. +* It is the post-ioctl routine for ib_resize_cq(). +* +* PARAMETERS +* h_uvp_cq +* [in] Vendor's Handle to the already created CQ (in user-mode library). +* ioctl_status +* [in] The ioctl status of the AL API. +* size +* [in] size of the CQ that was created by the provider. +* If VPD resized the CQ in kernel, this is the value as set by +* VPD. If UVP resizes the CQ in user-mode, then uvp already knows +* the size of the CQ in the pre-ioctl. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_resize_cq). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_pre_resize_cq, +* uvp_pre_query_cq, uvp_post_query_cq_t, uvp_pre_destroy_cq, +* uvp_post_destroy_cq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_query_cq +* NAME +* uvp_pre_query_cq -- Pre-ioctl to Query the number of entries +* configured for the CQ. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_query_cq) ( + IN const ib_cq_handle_t h_uvp_cq, + IN OUT uint32_t* const p_size, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_query_cq() is implemented by vendor. It is the pre-ioctl routine +* for ib_query_cq(). +* Can we always go to the kernel to query even if it is created +* in vendor library in user-mode? +* +* PARAMETERS +* h_uvp_cq +* [in] Vendor's Handle to the already created CQ (in user-mode library). +* +* p_size +* [out] Size of the CQ if processing ends in user-mode. +* +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The operation was successful. +* IB_INVALID_CQ_HANDLE +* The CQ handle is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources in Vendor library to complete the call. +* IB_VERBS_PROCESSING_DONE +* The UVP fully processed the request. The post_query_cq handler +* will not be invoked. +* +* PORTABILITY +* User mode. +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_pre_resize_cq, +* uvp_post_resize_cq_t, uvp_post_query_cq_t, uvp_pre_destroy_cq, +* uvp_post_destroy_cq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_query_cq_t +* NAME +* uvp_post_query_cq_t -- Post-ioctl to Query the number of entries +* configured for the CQ. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_query_cq_t) ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status, + IN const uint32_t size, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_query_cq_t() is implemented by vendor to query CQ. +* It is the post-ioctl routine for ib_query_cq(). +* +* PARAMETERS +* h_uvp_cq +* [in] Vendor's Handle to the already created CQ (in user-mode library). +* ioctl_status +* [in] The ioctl status of the AL API. +* size +* [in] The size of the CQ retuned by the IOCTL. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_query_cq). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_pre_resize_cq, +* uvp_post_resize_cq_t, uvp_pre_query_cq, uvp_pre_destroy_cq, +* uvp_post_destroy_cq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_destroy_cq +* NAME +* uvp_pre_destroy_cq -- Pre-ioctl function to Destroy a CQ. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_destroy_cq) ( + IN const ib_cq_handle_t h_uvp_cq ); + +/* +* DESCRIPTION +* uvp_pre_destroy_cq() is implemented by vendor to destroy CQ. +* It is the pre-ioctl routine for ib_destroy_cq(). +* +* PARAMETERS +* h_uvp_cq +* [in] Vendor's Handle to the cq (in user-mode library) +* that needs to be destroyed. +* +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call is successful. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_pre_resize_cq, +* uvp_post_resize_cq_t, uvp_pre_query_cq, uvp_post_query_cq_t, +* uvp_post_destroy_cq_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_destroy_cq_t +* NAME +* uvp_post_destroy_cq_t -- Post-ioctl function to Destroy a CQ. +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_destroy_cq_t) ( + IN const ib_cq_handle_t h_uvp_cq, + IN ib_api_status_t ioctl_status ); + +/* +* DESCRIPTION +* uvp_post_destroy_cq_t() is implemented by vendor to destroy CQ. +* It is the post-ioctl routine for ib_destroy_cq(). +* UAL invokes this post-ioctl routine to destroy CQ when it receives +* asynchronous notification from the user-mode proxy. +* +* PARAMETERS +* h_uvp_cq +* [in] Vendor's Handle to the cq (in user-mode library) +* that needs to be destroyed. +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_pre_resize_cq, +* uvp_post_resize_cq_t, uvp_pre_query_cq, uvp_post_query_cq_t, +* uvp_pre_destroy_cq +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_create_mw +* NAME +* uvp_pre_create_mw -- Pre-ioctl function to create a memory window +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_create_mw) ( + IN const ib_pd_handle_t h_uvp_pd, + IN OUT ci_umv_buf_t *p_umv_buf ); +/* +* DESCRIPTION +* uvp_pre_create_mw() is implemented by vendor. It is the pre-ioctl routine +* for ib_create_mw(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's Protection domain handle (in user-mode library) +* to use for this memory window +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The memory window allocation completed successfully. +* IB_INSUFFICIENT_RESOURCES +* Not enough resources to complete the request. +* IB_INVALID_PD_HANDLE +* pd_handle supplied is invalid. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_post_create_mw_t, uvp_pre_query_mw, uvp_post_query_mw_t, +* uvp_bind_mw, uvp_pre_destroy_mw, uvp_post_destroy_mw_t +* +********/ + + +/********/ + + +/****f* user-mode Verbs/uvp_post_create_mw_t +* NAME +* uvp_post_create_mw_t -- Post-ioctl function to create a memory window +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_create_mw_t) ( + IN const ib_pd_handle_t h_uvp_pd, + IN ib_api_status_t ioctl_status, + IN net32_t rkey, + OUT ib_mw_handle_t *ph_uvp_mw, + IN ci_umv_buf_t *p_umv_buf ); +/* +* DESCRIPTION +* uvp_post_create_mw_t() is implemented by vendor. It is the post-ioctl routine +* for ib_create_mw(). +* +* PARAMETERS +* h_uvp_pd +* [in] Vendor's Protection domain handle (in user-mode library) +* to use for this memory window +* ioctl_status +* [in] The ioctl status of the AL API. +* p_rkey +* [in] Remote access key that can be exchanged with a remote node to +* perform RDMA transactions on this memory window. +* ph_uvp_mw +* [out] Vendor's Handle (in user-mode library) to the newly created +* memory window. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_create_mw). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return an error. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_mw, uvp_pre_query_mw, uvp_post_query_mw_t, +* uvp_bind_mw, uvp_pre_destroy_mw, uvp_post_destroy_mw_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_query_mw +* NAME +* uvp_pre_query_mw -- Pre-ioctl to Query a memory window +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_query_mw) ( + IN const ib_mw_handle_t h_uvp_mw, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_query_mw is implemented by vendor. It is the pre-ioctl routine +* for ib_query_mw(). +* +* PARAMETERS +* h_uvp_mw +* [in] Vendor's Memory window handle (in user-mode library) +* whose attributes are being retrieved. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* RETURN VALUE +* IB_SUCCESS +* The pre-ioctl call completed successfully. +* IB_INVALID_MW_HANDLE +* mw_handle supplied is an invalid handle. +* IB_INSUFFICIENT_RESOURCES +* Not enough resources to complete the request. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_mw, uvp_post_create_mw_t, uvp_post_query_mw_t, +* uvp_bind_mw, uvp_pre_destroy_mw, uvp_post_destroy_mw_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_query_mw_t +* NAME +* uvp_post_query_mw_t -- Post-ioctl to Query a memory window +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_query_mw_t) ( + IN const ib_mw_handle_t h_uvp_mw, + IN ib_api_status_t ioctl_status, + IN net32_t rkey, + OUT ib_pd_handle_t *ph_pd, + IN ci_umv_buf_t *p_umv_buf ); +/* +* DESCRIPTION +* uvp_post_query_mw_t is implemented by vendor. It is the post-ioctl routine +* for ib_query_mw(). +* +* PARAMETERS +* h_uvp_mw +* [in] Vendor's Memory window handle (in user-mode library) +* whose attributes are being retrieved. +* ioctl_status +* [in] The ioctl status of the AL API. +* rkey +* [in] Current R_KEY associated with this mw_handle +* ph_pd +* [in] Protection domain handle associated with this mw_handle +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_query_mw). +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* IB_SUCCESS +* The query operation completed successfully. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_mw, uvp_post_create_mw_t, uvp_pre_query_mw, +* uvp_bind_mw, uvp_pre_destroy_mw, uvp_post_destroy_mw_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_bind_mw +* NAME +* uvp_bind_mw -- Bind a memory window to a memory region. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_bind_mw) ( + IN const ib_mw_handle_t h_uvp_mw, + IN const ib_qp_handle_t h_uvp_qp, + IN ib_bind_wr_t *p_mw_bind, + OUT net32_t* const p_rkey ); +/* +* DESCRIPTION +* This routine posts a request to bind a memory window to a registered +* memory region. If the queue pair was created with selectable signaling, +* once the operation is completed successfully then a completion queue entry +* is generated indicating the bind operation has completed. The IB_POST_FENCE +* option could be specified to cause the requestor to wait until outstanding +* RDMA operations can be completed. +* +* PARAMETERS +* h_uvp_mw +* [in] Vendor's Handle (in user-mode library) to memory window +* that needs to be bound to a memory region. +* h_uvp_qp +* [in] Vendor's QP Handle (in user-mode library) to which +* this bind request is to be posted. +* p_mw_bind +* [in] Input parameters for this bind request, consisting of virtual +* addr range of bind request etc. On successful completion, the new R_KEY +* is returned. +* p_rkey +* [out] Current R_KEY associated with this mw_handle +* +* RETURN VALUE +* IB_SUCCESS +* The memory bind operation was posted successfully. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the request. +* No more WQE's to post this request +* No more free WQE's to post this request +* IB_INVALID_MW_HANDLE +* memw_handle supplied is an invalid memory window handle. +* IB_INVALID_PERMISSION +* Invalid access rights specified in request +* IB_INVALID_SERVICE_TYPE +* Invalid service type for this qp_handle. +* IB_INVALID_PARAMETER +* Address or length parameter specified is invalid. +* IB_INVALID_RKEY +* R_KEY specified is invalid for the memory region being bound. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_mw, uvp_post_create_mw_t, uvp_pre_query_mw, +* uvp_post_query_mw_t, uvp_pre_destroy_mw, uvp_post_destroy_mw_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_destroy_mw +* NAME +* uvp_pre_destroy_mw -- Pre-ioctl function to destroy a memory window +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_destroy_mw) ( + IN const ib_mw_handle_t h_uvp_mw ); + +/* +* DESCRIPTION +* uvp_pre_destroy_mw() is implemented by vendor. It is the pre-ioctl routine +* for ib_destroy_mw(). +* +* +* PARAMETERS +* h_uvp_mw +* [in] Vendor's handle (in user-mode library) to the memory window +* +* RETURN VALUE +* IB_SUCCESS +* Pre-ioctl succeeded. +* +* PORTABILITY +* User mode + +* SEE ALSO +* uvp_pre_create_mw, uvp_post_create_mw_t, uvp_pre_query_mw, +* uvp_post_query_mw_t, uvp_bind_mw, uvp_post_destroy_mw_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_destroy_mw_t +* NAME +* uvp_post_destroy_mw_t -- Post-ioctl function to destroy a memory window +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_destroy_mw_t) ( + IN const ib_mw_handle_t h_uvp_mw, + IN ib_api_status_t ioctl_status ); +/* +* DESCRIPTION +* uvp_post_destroy_mw_t() is implemented by vendor. It is the post-ioctl +* routine to destroy a memory window. +* +* +* PARAMETERS +* h_uvp_mw +* [in] Vendor's handle to the memory window +* +* RETURN VALUE +* IB_SUCCESS +* Destroy operation successful. +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_pre_create_mw, uvp_post_create_mw_t, uvp_pre_query_mw, +* uvp_post_query_mw_t, uvp_bind_mw, uvp_pre_destroy_mw +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_send +* NAME +* uvp_post_send -- Post a work request to the send side of a queue pair. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_post_send) ( + IN const void* __ptr64 h_qp, + IN ib_send_wr_t* const p_send_wr, + OUT ib_send_wr_t** pp_send_failure ); +/* +* DESCRIPTION +* This routine posts a work request to the send side of the queue pair. +* The different types of work request that can be posted are explained in +* the ib_wr_t structure. For exact details on ordering rules please consult +* the Volume 1, of the InfiniBand Specifications. If there is more +* outstanding requests posted that what the queue is configured for, an +* immediate error is returned. +* +* PARAMETERS +* h_qp +* [in] Type-cast as appropriate for user/kernel mode, this is +* the Queue pair handle to which the receive work request is being +* posted. +* p_send_wr +* [in] List of work requests that needs to be send. +* pp_send_failure +* [out] The work requests that failed. +* +* RETURN VALUE +* Any unsuccessful status indicates the status of the first failed request. +* +* IB_SUCCESS +* All the work requests are completed successfully +* IB_INVALID_QP_HANDLE +* The qp_handle supplied is invalid. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the request. +* There are no more work elements in the channel interface to +* process this request, and the total outstanding work request has +* been exceeded. +* IB_INVALID_WR_TYPE +* The work request type was not valid. +* IB_INVALID_QP_STATE +* The queue pair is either in Reset, Init, RTR or Error state. +* IB_INVALID_MAX_SGE +* The work request has too many scatter gather elements than what the +* QP is configured. +* IB_UNSUPPORTED +* Atomics or Reliable datagram request is not supported by this HCA. +* IB_INVALID_ADDR_HANDLE +* Address handle supplied in the work request is invalid. +* +* PORTABILITY +* Kernel & User mode. +* +* NOTES +* Please refer to Table 81 and Table 82 for allowed operation types +* on different types of queue pairs, and the different modifiers +* acceptable for the work request for different QP service types. +* +* SEE ALSO +* uvp_post_recv, uvp_poll_cq +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_recv +* NAME +* uvp_post_recv -- Post a work request to the receive queue of a queue pair. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_post_recv) ( + IN const void* __ptr64 h_qp, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ); + +/* +* DESCRIPTION +* This routine allows to queue a work request to the receive side of a +* queue pair. The work_req holds necessary data to satisfy an incoming +* receive message. If an attempt is made to queue more work requests than +* what is available, an error is returned. +* +* PARAMETERS +* h_qp +* [in] Type-cast as appropriate for user/kernel mode, this is +* the Queue pair handle to which the receive work request is being +* posted. +* p_recv_wr +* [in] List of recv work requests that needs to be posted. +* pp_recv_failure +* [out] The work requests that failed. + +* RETURN VALUE +* Any unsuccessful status indicates the status of the first failed request. +* +* IB_SUCCESS +* The work request was successfully queued to the receive side of the QP. +* IB_INVALID_QP_HANDLE +* qp_handle supplied is not valid. +* IB_INSUFFICIENT_RESOURCES +* The qp has exceeded its receive queue depth than what is has been +* configured. +* IB_INVALID_WR_TYPE +* Invalid work request type found in the request. +* IB_INVALID_QP_STATE +* QP was in reset or init state. +* (TBD: there may be an errata that allows posting in init state) +* +* PORTABILITY +* Kernel & User mode. +* +* SEE ALSO +* uvp_post_send, uvp_poll_cq +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_srq_recv +* NAME +* uvp_post_srq_recv -- Post a work request to the shared receive queue of a queue pair. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_post_srq_recv) ( + IN const void* __ptr64 h_srq, + IN ib_recv_wr_t* const p_recv_wr, + OUT ib_recv_wr_t** pp_recv_failure ); + +/* +* DESCRIPTION +* This routine allows to queue a work request to the receive side of a shared +* queue pair. The work_req holds necessary data to satisfy an incoming +* receive message. If an attempt is made to queue more work requests than +* what is available, an error is returned. +* +* PARAMETERS +* h_srq +* [in] Type-cast as appropriate for user/kernel mode, this is +* the shared Queue pair handle to which the receive work request is being +* posted. +* p_recv_wr +* [in] List of recv work requests that needs to be posted. +* pp_recv_failure +* [out] The work requests that failed. + +* RETURN VALUE +* Any unsuccessful status indicates the status of the first failed request. +* +* IB_SUCCESS +* The work request was successfully queued to the receive side of the QP. +* IB_INVALID_SRQ_HANDLE +* srq_handle supplied is not valid. +* IB_INSUFFICIENT_RESOURCES +* The qp has exceeded its receive queue depth than what is has been +* configured. +* IB_INVALID_WR_TYPE +* Invalid work request type found in the request. +* +* PORTABILITY +* Kernel & User mode. +* +* SEE ALSO +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_peek_cq +* NAME +* uvp_peek_cq +* +* DESCRIPTION +* Returns the number of entries currently on the completion queue. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_peek_cq) ( + IN const void* __ptr64 h_cq, + OUT uint32_t* const p_n_cqes ); + +/* +* PARAMETERS +* h_cq +* [in] Type-cast as appropriate for user/kernel mode, this is the +* CQ handle for the completion queue being peeked. +* +* p_n_cqes +* [out] Upon successful completion of this call, contains the number +* of completion queue entries currently on the completion queue. +* +* RETURN VALUES +* IB_SUCCESS +* The peek operation completed successfully. +* +* IB_INVALID_CQ_HANDLE +* The completion queue handle was invalid. +* +* IB_INVALID_PARAMETER +* A reference to the completion queue entry count was not provided. +* +* PORTABILITY +* Kernel and User mode +* +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_poll_cq, uvp_rearm_cq, +* uvp_rearm_n_cq +*****/ + +/********/ + +/****f* user-mode Verbs/uvp_poll_cq +* NAME +* uvp_poll_cq -- Retrieve a work completion record from a completion queue +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_poll_cq) ( + IN const void* __ptr64 h_cq, + IN OUT ib_wc_t** const pp_free_wclist, + OUT ib_wc_t** const pp_done_wclist ); + +/* +* DESCRIPTION +* This routine retrieves a work completion entry from the specified +* completion queue. The contents of the data returned in a work completion +* is specified in ib_wc_t. +* +* PARAMETERS +* h_cq +* [in] Type-cast as appropriate for user/kernel mode, this is +* the CQ handle for the completion queue being polled. +* pp_free_wclist +* [in out] A list of work request structures provided by the consumer +* for the channel interface to return completed Completion Queue +* entries. If not all the entries are consumed, this list holds the +* list of un-utilized completion entries provided back to the consumer. +* pp_done_wclist +* [out] A list of work completions retrieved from the completion queue +* and successfully processed. +* +* RETURN VALUE +* IB_SUCCESS +* Poll completed successfully. If on completion the wc_free list is +* empty, then there are potentially more entries and the consumer must +* be ready to retrieve entries further. +* IB_INVALID_CQ_HANDLE +* The cq_handle supplied is not valid. +* IB_NOT_FOUND +* There are no more entries found in the specified CQ. +* +* PORTABILITY +* Kernel & User mode. +* +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_rearm_cq, +* uvp_rearm_n_cq, uvp_post_send, uvp_post_recv +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_rearm_cq +* NAME +* uvp_rearm_cq -- Invoke the Completion handler, on next entry added. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_rearm_cq) ( + IN const void* __ptr64 h_cq, + IN const boolean_t solicited ); + +/* +* DESCRIPTION +* This routine instructs the channel interface to invoke the completion +* handler when the next completion queue entry is added to this CQ. +* Please refer to Volume 1, of the InfiniBand specification for a complete +* description. +* +* PARAMETERS +* h_cq +* [in] Type-cast as appropriate for user/kernel mode, this is the +* CQ handle for the completion queue being armed. +* solicited +* [in] A boolean flag indicating whether the request is to generate a +* notification on the next entry or on the next solicited entry +* being added to the completion queue. +* +* RETURN VALUE +* IB_SUCCESS +* The notification request was registered successfully. +* IB_INVALID_CQ_HANDLE +* cq_handle supplied is not a valid handle. +* +* PORTABILITY +* Kernel and User mode +* +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_peek_cq, uvp_poll_cq, +* uvp_rearm_n_cq +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_rearm_n_cq +* NAME +* uvp_rearm_n_cq -- Invoke the Completion handler, when next +* N completions have been added to this CQ. +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_rearm_n_cq) ( + IN const void* __ptr64 h_cq, + IN const uint32_t n_cqes ); + +/* +* DESCRIPTION +* This routine instructs the channel interface to invoke the completion +* handler when the next N completions are added to this CQ. +* +* PARAMETERS +* h_cq +* [in] Type-cast as appropriate for user/kernel mode, this is the +* CQ handle for the completion queue being armed. +* n_cqes +* [in] The number of completion queue entries to be added to the +* completion queue before notifying the client. This value must +* greater than or equal to one and less than or equal to the size +* of the completion queue. +* +* RETURN VALUE +* IB_SUCCESS +* The notification request was registered successfully. +* IB_INVALID_CQ_HANDLE +* cq_handle supplied is not a valid handle. +* IB_INVALID_PARAMETER +* The requested number of completion queue entries was invalid. +* +* PORTABILITY +* Kernel and User mode +* +* SEE ALSO +* uvp_pre_create_cq, uvp_post_create_cq_t, uvp_peek_cq, uvp_poll_cq, +* uvp_rearm_cq +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_attach_mcast +* NAME +* uvp_pre_attach_mcast -- Pre-ioctl function to Attach a queue pair +* to a multicast group +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_attach_mcast) ( + IN const ib_qp_handle_t h_uvp_qp, + IN const ib_gid_t *p_mcast_gid, + IN const uint16_t mcast_lid, + IN OUT ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_pre_attach_mcast() is the pre-ioctl routine implemented by vendor +* to attach a queue pair to a multicast group. +* +* PARAMETERS +* h_uvp_qp +* [in] Vendor's Queue pair handle (in user-mode library) +* which needs to be added to the multicast group on the adapter. +* mcast_lid +* [in] The multicast group LID value. +* p_mcast_gid +* [in] IPv6 address associated with this multicast group. +* p_umv_buf +* [in out] On input, UAL provides this buffer template. +* On return from this function, p_umv_buf contains +* any vendor-specific record to be exchanged with the vendor's +* HCA driver. +* +* RETURN VALUE +* IB_SUCCESS +* The queue pair handle was successfully added to the multicast +* group. +* IB_INVALID_QP_HANDLE +* qp_handle supplied is invalid. +* IB_INVALID_SERVICE_TYPE +* Queue pair handle supplied is not of unreliable datagram type. +* IB_INVALID_GID +* The supplied addr is not a valid multicast ipv6 address. +* IB_INSUFFICIENT_RESOURCES +* Insufficient resources to complete the request. +* +* PORTABILITY +* User mode. +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_post_attach_mcast_t, +* uvp_pre_detach_mcast, uvp_post_detach_mcast_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_attach_mcast_t +* NAME +* uvp_post_attach_mcast_t -- Post-ioctl function to Attach a queue pair +* to a multicast group +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_attach_mcast) ( + IN const ib_qp_handle_t h_uvp_qp, + IN ib_api_status_t ioctl_status, + OUT ib_mcast_handle_t *ph_mcast, + IN ci_umv_buf_t *p_umv_buf ); + +/* +* DESCRIPTION +* uvp_post_attach_mcast_t() is the post-ioctl routine implemented by vendor +* to attach a queue pair to a multicast group. +* +* PARAMETERS +* h_uvp_qp +* [in] Vendor's Queue pair handle (in user-mode library) +* which needs to be added to +* the multicast group on the adapter. +* ioctl_status +* [in] The ioctl status of the AL API. +* ph_mcast +* [out] Vendor's Multicast handle (in user-mode library) +* holding the association of this queue pair to the multicast group. +* p_umv_buf +* [in out] On input, it contains any vendor-specific private information +* exchanged with the vendor's Verbs Provider Driver (uvp_pre_attach_mcast) +* Vendor is expected to check vendor-specific status in +* umv_buf as appropriate. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* Kernel & User mode. +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_pre_attach_mcast, +* uvp_pre_detach_mcast, uvp_post_detach_mcast_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_pre_detach_mcast +* NAME +* uvp_pre_detach_mcast -- Pre-ioctl function to detach a queue pair +* to a multicast group +* +* SYNOPSIS +*/ + +typedef ib_api_status_t +(AL_API *uvp_pre_detach_mcast) ( + IN ib_mcast_handle_t h_uvp_mcast ); + +/* +* DESCRIPTION +* uvp_pre_attach_mcast() is the pre-ioctl routine implemented by vendor +* to attach a queue pair to a multicast group. +* Upon return from the pre-ioctl function, UAL packages up the UMV buffer +* in an IOCTL and passes it on to the user-mode proxy. UAL passes the +* info to the user-mode proxy stating that it no longer wishes to receive +* callback for mcast join for the caller. Note that UAL takes care of +* handling callbcak. +* +* PARAMETERS +* h_uvp_mcast +* [in] Vendor's Multicast handle (in user-mode library) +* holding the association of this queue pair to the multicast group. +* +* RETURN VALUE +* IB_SUCCESS +* The queue pair handle was successfully added to the multicast +* group. +* +* PORTABILITY +* Kernel & User mode. +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_pre_attach_mcast, +* uvp_post_attach_mcast_t, uvp_post_detach_mcast_t +* +********/ + +/********/ + +/****f* user-mode Verbs/uvp_post_detach_mcast_t +* NAME +* uvp_post_detach_mcast_t -- Post-ioctl function to detach a queue pair +* from a multicast group +* +* SYNOPSIS +*/ + +typedef void +(AL_API *uvp_post_detach_mcast) ( + IN ib_mcast_handle_t h_uvp_mcast, + IN ib_api_status_t ioctl_status ); + +/* +* DESCRIPTION +* uvp_post_detach_mcast_t() is the post-ioctl routine implemented by vendor +* to attach a queue pair to a multicast group. +* +* PARAMETERS +* h_uvp_mcast +* [out] Vendor's Multicast handle holding the association of this +* queue pair to the multicast group. +* ioctl_status +* [in] The ioctl status of the AL API. +* +* RETURN VALUE +* This function does not return a value. +* +* PORTABILITY +* Kernel & User mode. +* +* SEE ALSO +* uvp_pre_create_qp, uvp_post_create_qp_t, uvp_pre_attach_mcast, +* uvp_post_attach_mcast_t, uvp_pre_detach_mcast +* +********/ + +/********/ + +/********/ + +/****s* user-mode Verbs/uvp_interface_t +* NAME +* uvp_interface_t -- Interface holding supported Vendor APIs +* +* PURPOSE +* The following structure is supplied by a Vendor library +* providing verbs functionality. +* +* SOURCE +*/ +typedef struct _uvp_interface +{ + ib_net64_t guid; + /* + * Version of the header file this interface export can handle + */ + uint32_t version; + + /* + * HCA Access Verbs + */ + uvp_pre_open_ca_t pre_open_ca; + uvp_post_open_ca_t post_open_ca; + + uvp_pre_query_ca pre_query_ca; + uvp_post_query_ca_t post_query_ca; + + uvp_pre_modify_ca pre_modify_ca; + uvp_post_modify_ca_t post_modify_ca; + + uvp_pre_close_ca_t pre_close_ca; + uvp_post_close_ca_t post_close_ca; + + uvp_pre_ci_call pre_ci_call; + uvp_post_ci_call post_ci_call; + + + /* + * Protection Domain + */ + uvp_pre_allocate_pd pre_allocate_pd; + uvp_post_allocate_pd_t post_allocate_pd; + uvp_pre_deallocate_pd pre_deallocate_pd; + uvp_post_deallocate_pd_t post_deallocate_pd; + + /* + * Address Vector Management Verbs + */ + + uvp_pre_create_av pre_create_av; + uvp_post_create_av_t post_create_av; + + uvp_pre_query_av pre_query_av; + uvp_post_query_av_t post_query_av; + + uvp_pre_modify_av pre_modify_av; + uvp_post_modify_av_t post_modify_av; + uvp_pre_destroy_av pre_destroy_av; + uvp_post_destroy_av_t post_destroy_av; + + /* + * SRQ Management Verbs + */ + uvp_pre_create_srq pre_create_srq; + uvp_post_create_srq_t post_create_srq; + + uvp_pre_modify_srq pre_modify_srq; + uvp_post_modify_srq_t post_modify_srq; + + uvp_pre_query_srq pre_query_srq; + uvp_post_query_srq_t post_query_srq; + + uvp_pre_destroy_srq pre_destroy_srq; + uvp_post_destroy_srq_t post_destroy_srq; + + + /* + * QP Management Verbs + */ + uvp_pre_create_qp pre_create_qp; + uvp_post_create_qp_t post_create_qp; + + /* No support for create_spl_qp, UAL will return error */ + + uvp_pre_modify_qp pre_modify_qp; + uvp_post_modify_qp_t post_modify_qp; + + uvp_pre_query_qp pre_query_qp; + uvp_post_query_qp_t post_query_qp; + + uvp_pre_destroy_qp pre_destroy_qp; + uvp_post_destroy_qp_t post_destroy_qp; + + /* + * Completion Queue Management Verbs + */ + uvp_pre_create_cq pre_create_cq; + uvp_post_create_cq_t post_create_cq; + + uvp_pre_query_cq pre_query_cq; + uvp_post_query_cq_t post_query_cq; + + uvp_pre_resize_cq pre_resize_cq; + uvp_post_resize_cq_t post_resize_cq; + + uvp_pre_destroy_cq pre_destroy_cq; + uvp_post_destroy_cq_t post_destroy_cq; + + /* + * Memory Window Verbs + */ + uvp_pre_create_mw pre_create_mw; + uvp_post_create_mw_t post_create_mw; + uvp_pre_query_mw pre_query_mw; + uvp_post_query_mw_t post_query_mw; + uvp_pre_destroy_mw pre_destroy_mw; + uvp_post_destroy_mw_t post_destroy_mw; + + /* No pre/post functions for bind */ + uvp_bind_mw bind_mw; + + /* + * Work Request Processing Verbs + * Should the types be same as Verbs? + */ + uvp_post_send post_send; + uvp_post_recv post_recv; + uvp_post_srq_recv post_srq_recv; + + /* + * Completion Processing and + * Completion Notification Request Verbs. + * Should the types be same as Verbs? + */ + uvp_peek_cq peek_cq; + uvp_poll_cq poll_cq; + uvp_rearm_cq rearm_cq; + uvp_rearm_n_cq rearm_n_cq; + + /* + * Multicast Support Verbs + */ + uvp_pre_attach_mcast pre_attach_mcast; + uvp_post_attach_mcast post_attach_mcast; + uvp_pre_detach_mcast pre_detach_mcast; + uvp_post_detach_mcast post_detach_mcast; + +} uvp_interface_t; + +/********/ + +/****f* user-mode Verbs/uvp_get_interface +* NAME +* uvp_get_interface -- Get the Vendor's supported Verbs calls +* +* SYNOPSIS +*/ +typedef ib_api_status_t +(AL_API *uvp_get_interface_t)( + IN OUT uvp_interface_t* const p_uvp ); +/* +* DESCRIPTION +* This routine is called by UAL to get the functions supported by +* a vendor's library. Upon discovering a new CA, UAL will look for +* the appropriate vendor library, load the library and query using +* this function to get the supported interfaces. +* +* If the vendor does not support an interface function, it should be +* set to NULL in the interface structure returned. +* +* PARAMETERS +* p_uvp +* [in out] Pointer to the uvp_interface_t structure that has the function +* vector to support verbs functionality. +* +* RETURN VALUE +* IB_SUCCESS +* The registration is successful. +* IB_INSUFFICIENT_MEMORY +* Insufficient memory to satisfy request +* +* PORTABILITY +* User mode +* +* SEE ALSO +* uvp_interface_t +* +********/ + +/********/ + +#endif // __IB_UAL_UVP_H__ diff --git a/branches/Ndi/inc/user/wsd/ibsp_regpath.h b/branches/Ndi/inc/user/wsd/ibsp_regpath.h new file mode 100644 index 00000000..90661199 --- /dev/null +++ b/branches/Ndi/inc/user/wsd/ibsp_regpath.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _IBSP_REGPATH_H_ +#define _IBSP_REGPATH_H_ + +/* these definitions are common for installSP and WSD projects */ +#define IBSP_PM_REGISTRY_PATH \ + TEXT("SYSTEM\\CurrentControlSet\\Services\\IBWSD\\") +#define IBSP_PM_EVENTLOG_PATH \ + TEXT("SYSTEM\\CurrentControlSet\\Services\\EventLog\\Application\\IBWSD") +#define IBSP_PM_SUBKEY_NAME TEXT("IBWSD") +#define IBSP_PM_SUBKEY_PERF TEXT("Performance") +#define IBSP_PM_INI_FILE "ibsp_perfcounters.ini" +#define IBSP_PM_SYM_H_FILE "ibsp_perfini.h" + + +enum IBSP_PM_COUNTERS +{ + BYTES_SEND = 0, + BYTES_RECV, + BYTES_WRITE, + BYTES_READ, + BYTES_TOTAL, + COMP_SEND, + COMP_RECV, + COMP_TOTAL, + INTR_TOTAL, + IBSP_PM_NUM_COUNTERS + +}; + + +/* counter symbol names */ +#define IBSP_PM_OBJ 0 +#define IBSP_PM_COUNTER( X ) ((X + 1) * 2) + +#endif /* _IBSP_REGPATH_H_ */ diff --git a/branches/Ndi/tests/alts/allocdeallocpd.c b/branches/Ndi/tests/alts/allocdeallocpd.c new file mode 100644 index 00000000..c0fe9705 --- /dev/null +++ b/branches/Ndi/tests/alts/allocdeallocpd.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include + +/* + * Function prototypes + */ + + + +/* + * Test Case AllocDeallocPD + */ + +ib_api_status_t +al_test_alloc_dealloc_pd(void) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_al_handle_t h_al = NULL; + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + /* Open AL */ + ib_status = alts_open_al(&h_al); + + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_al); + + /* Open CA */ + ib_status = alts_open_ca(h_al,&h_ca); + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_ca); + + /* + * Allocate a PD here + */ + ib_status = ib_alloc_pd(h_ca, IB_PDT_NORMAL, NULL, &h_pd); //passing null context + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_alloc_pd failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + ALTS_PRINT( ALTS_DBG_INFO, + ("\tib_alloc_pd passed with handle = %p\n",h_pd)); + + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + + if(ib_status == IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_INFO, + ("\tib_dealloc_pd passed\n")); + } + else + { + ALTS_PRINT( ALTS_DBG_INFO, + ("\tib_dealloc_pd failed with status = %s\n",ib_get_err_str(ib_status))); + } + + break; //End of while + } + /* Close AL */ + if(h_al) + alts_close_al(h_al); + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + + diff --git a/branches/Ndi/tests/alts/alts_common.h b/branches/Ndi/tests/alts/alts_common.h new file mode 100644 index 00000000..57baa91c --- /dev/null +++ b/branches/Ndi/tests/alts/alts_common.h @@ -0,0 +1,301 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include + +#if !defined(__ALTS_COMMON_H__) +#define __ALTS_COMMON_H__ + + +typedef struct _mem_region +{ + void *buffer; + ib_mr_handle_t mr_h; + uint32_t lkey; + uint32_t rkey; + + uint16_t my_lid; + +} mem_region_t; + + +#ifndef __MODULE__ +#define __MODULE__ "ALTS:" +#endif + + +typedef struct cmd_line_struct_t_ +{ + /* + * TBD + */ + uint32_t pgm_to_run; + boolean_t um; + uint32_t iteration; + ib_api_status_t status; + +} cmd_line_arg_t; + + +/* + * Device Name of this driver + */ +#define ALTS_DEVICE_NAME "/dev/al_test" + +/* + * Define a magic number + */ +#define ALTS_DEV_KEY 'T' + +#define ALTS_MAX_CA 4 + +/* + * List all the supported test cases here. + */ +typedef enum alts_dev_ops +{ + OpenClose = 1, + QueryCAAttribute, + ModifyCAAttribute, + AllocDeallocPD, + CreateDestroyAV, + QueryAndModifyAV, + CreateDestroyQP, + QueryAndModifyQP, + CreateAndDestroyCQ, + QueryAndModifyCQ, + AttachMultiCast, + RegisterMemRegion, + RegisterVarMemRegions, + ReregisterHca, + RegisterPhyMemRegion, + CreateMemWindow, + RegisterSharedMemRegion, + MultiSend, + RegisterPnP, + MadTests, + MadQuery, + CmTests, + MaxTestCase +} alts_dev_ops_t; + +/* + * Define all the IOCTL CMD CODES Here + */ +#define ALTS_OpenClose \ + IOCTL_CODE(ALDEV_KEY, OpenClose) +#define ALTS_QueryCAAttribute \ + IOCTL_CODE(ALDEV_KEY, QueryCAAttribute) +#define ALTS_ModifyCAAttribute \ + IOCTL_CODE(ALDEV_KEY, ModifyCAAttribute) +#define ALTS_AllocDeallocPD \ + IOCTL_CODE(ALDEV_KEY, AllocDeallocPD) +#define ALTS_AllocDeallocRDD \ + IOCTL_CODE(ALDEV_KEY, AllocDeallocRDD) +#define ALTS_CreateDestroyAV \ + IOCTL_CODE(ALDEV_KEY, CreateDestroyAV) +#define ALTS_QueryAndModifyAV \ + IOCTL_CODE(ALDEV_KEY, QueryAndModifyAV) +#define ALTS_CreateDestroyQP \ + IOCTL_CODE(ALDEV_KEY, CreateDestroyQP) +#define ALTS_QueryAndModifyQP \ + IOCTL_CODE(ALDEV_KEY, QueryAndModifyQP) +#define ALTS_CreateAndDestroyCQ \ + IOCTL_CODE(ALDEV_KEY, CreateAndDestroyCQ) +#define ALTS_QueryAndModifyCQ \ + IOCTL_CODE(ALDEV_KEY, QueryAndModifyCQ) +#define ALTS_CreateAndDestroyEEC \ + IOCTL_CODE(ALDEV_KEY, CreateAndDestroyEEC) +#define ALTS_QueryAndModifyEEC \ + IOCTL_CODE(ALDEV_KEY, QueryAndModifyEEC) +#define ALTS_AttachMultiCast \ + IOCTL_CODE(ALDEV_KEY, AttachMultiCast) +#define ALTS_RegisterMemRegion \ + IOCTL_CODE(ALDEV_KEY, RegisterMemRegion) +#define ALTS_RegisterPhyMemRegion \ + IOCTL_CODE(ALDEV_KEY, RegisterPhyMemRegion) +#define ALTS_CreateMemWindow \ + IOCTL_CODE(ALDEV_KEY, CreateMemWindow) +#define ALTS_RegisterSharedMemRegion \ + IOCTL_CODE(ALDEV_KEY, RegisterSharedMemRegion) +#define ALTS_MultiSend \ + IOCTL_CODE(ALDEV_KEY, MultiSend) +#define ALTS_MadTests \ + IOCTL_CODE(ALDEV_KEY, MadTests) + +#define ALTS_CmTests \ + IOCTL_CODE(ALDEV_KEY, CmTests) + + +#define ALTS_CQ_SIZE 0x50 + + +/* + * Function Prototypes for the above test cases + */ +ib_api_status_t +al_test_openclose( void ); + +ib_api_status_t +al_test_modifycaattr( void ); + +ib_api_status_t +al_test_querycaattr( void ); + +ib_api_status_t +al_test_alloc_dealloc_pd( void ); + +ib_api_status_t +al_test_alloc_dealloc_rdd( void ); + +ib_api_status_t +al_test_create_destroy_av( void ); + +ib_api_status_t +al_test_query_modify_av( void ); + +ib_api_status_t +al_test_create_destroy_cq( void ); + +ib_api_status_t +al_test_query_modify_cq( void ); + + +ib_api_status_t +al_test_create_destroy_qp( void ); + +ib_api_status_t +al_test_query_modify_qp( void ); + + +ib_api_status_t +al_test_create_destroy_eec( void ); + +ib_api_status_t +al_test_query_modify_eec( void ); + +ib_api_status_t +al_test_register_mem( void ); + +ib_api_status_t +al_test_create_mem_window( void ); + +ib_api_status_t +al_test_register_var_mem( void ); + +ib_api_status_t +al_test_reregister_hca( void ); + +ib_api_status_t +al_test_multi_send_recv( void ); + +ib_api_status_t +al_test_register_pnp( void ); + +ib_api_status_t +al_test_mad( void ); + +ib_api_status_t +al_test_query( void ); + +ib_api_status_t +al_test_cm( void ); + +ib_api_status_t +al_test_register_phys_mem( void ); + +ib_api_status_t +al_test_register_shared_mem( void ); + + +/* + * Misc function prototypes + */ + +ib_api_status_t +alts_open_al( ib_al_handle_t *ph_al ); + +ib_api_status_t +alts_close_al( ib_al_handle_t ph_al ); + +ib_api_status_t +alts_open_ca( + IN ib_al_handle_t h_al, + OUT ib_ca_handle_t *p_alts_ca_h + ); + +ib_api_status_t +alts_close_ca( IN ib_ca_handle_t alts_ca_h ); + +void +alts_ca_err_cb( ib_async_event_rec_t *p_err_rec); + +void +alts_ca_destroy_cb( void *context); + +void +alts_pd_destroy_cb( void *context ); + +void +alts_print_ca_attr( ib_ca_attr_t *alts_ca_attr ); + + +void +alts_qp_err_cb( + ib_async_event_rec_t *p_err_rec ); + +void +alts_qp_destroy_cb( + void *context ); + + + +void +alts_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); + +void +alts_cq_err_cb( + ib_async_event_rec_t *p_err_rec ); + +void +alts_cq_destroy_cb( + void *context ); + + + + +#endif // __ALTS_COMMON_H__ diff --git a/branches/Ndi/tests/alts/alts_debug.h b/branches/Ndi/tests/alts/alts_debug.h new file mode 100644 index 00000000..9167c963 --- /dev/null +++ b/branches/Ndi/tests/alts/alts_debug.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined(__ALTS_DEBUG_H__) +#define __ALTS_DEBUG_H__ + +#ifndef __MODULE__ +#define __MODULE__ "alts" +#endif + +#include + +#define ALTS_DBG_NORMAL (1 << 0) +#define ALTS_DBG_PNP (1 << 1) +#define ALTS_DBG_INFO (1 << 2) +#define ALTS_DBG_VERBOSE (1 << 3) +#define ALTS_DBG_DEV (1 << 4) +#define ALTS_DBG_STATUS (1 << 5) +#define ALTS_DBG_ERROR CL_DBG_ERROR + +#define ALTS_DBG_NONE CL_DBG_DISABLE +#define ALTS_DBG_FULL CL_DBG_ALL + +extern uint32_t alts_dbg_lvl; + +/* Macros for simplifying CL_ENTER, CL_TRACE, etc. */ +#define ALTS_ENTER( msg_lvl ) \ + CL_ENTER( msg_lvl, alts_dbg_lvl ) + +#define ALTS_EXIT( msg_lvl ) \ + CL_EXIT( msg_lvl, alts_dbg_lvl ) + +#define ALTS_TRACE( msg_lvl, msg ) \ + CL_TRACE( msg_lvl, alts_dbg_lvl, msg ) + +#define ALTS_TRACE_EXIT( msg_lvl, msg ) \ + CL_TRACE_EXIT( msg_lvl, alts_dbg_lvl, msg ) + +#ifndef CL_KERNEL + +#define ALTS_PRINT( msg_lvl, msg ) \ + printf msg +#else + +#define ALTS_PRINT( msg_lvl, msg ) \ + CL_PRINT( msg_lvl, alts_dbg_lvl, msg ) +#endif + +#endif // __ALTS_DEBUG_H__ diff --git a/branches/Ndi/tests/alts/alts_misc.c b/branches/Ndi/tests/alts/alts_misc.c new file mode 100644 index 00000000..1ccde244 --- /dev/null +++ b/branches/Ndi/tests/alts/alts_misc.c @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * This module defines the basic AL test suite function + * + * Environment: + * Kernel Mode and User Mode + */ + + +#include +#include +#include +#include +#include +#include + + +ib_api_status_t +alts_open_al( + ib_al_handle_t *ph_al ) +{ + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + ib_status = ib_open_al(ph_al); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_open_al failed status = %d\n", ib_status) ); + + } + else + { + ALTS_PRINT( ALTS_DBG_INFO, ("ib_open_al PASSED.\n") ); + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_close_al( + ib_al_handle_t h_al ) +{ + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + ib_status = ib_close_al(h_al); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_close_al failed status = %d\n", ib_status)); + } + else + { + ALTS_PRINT( ALTS_DBG_INFO, ("ib_close_al PASSED.\n") ); + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_open_ca( + IN ib_al_handle_t h_al, + OUT ib_ca_handle_t *p_alts_ca_h ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + size_t guid_count; + ib_net64_t ca_guid_array[ALTS_MAX_CA]; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + do + { + ib_status = ib_get_ca_guids(h_al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_get_ca_guids failed status = %d\n", (uint32_t)ib_status) ); + break; + + } + + ALTS_PRINT(ALTS_DBG_INFO, \ + ("Total number of CA in the sytem is %d\n",(uint32_t)guid_count)); + + if(guid_count == 0) + { + ib_status = IB_ERROR; + break; + } + + if (guid_count > ALTS_MAX_CA) + { + guid_count = ALTS_MAX_CA; + + ALTS_PRINT(ALTS_DBG_INFO, \ + ("Resetting guid_count to %d\n",ALTS_MAX_CA)); + } + + ib_status = ib_get_ca_guids(h_al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_get_ca_guids failed with status = %d\n", ib_status) ); + break; + } + + ib_status = ib_open_ca( + h_al, + ca_guid_array[0], //Default open the first CA + alts_ca_err_cb, + NULL, //ca_context + p_alts_ca_h); + + ALTS_PRINT(ALTS_DBG_INFO, + ("GUID = %" PRIx64"\n", ca_guid_array[0])); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_open_ca failed with status = %d\n", ib_status) ); + break; + } + + } while (0); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +alts_close_ca( + IN ib_ca_handle_t alts_ca_h + ) +{ + ib_api_status_t ib_status; + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + ib_status = ib_close_ca(alts_ca_h, alts_ca_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_close_ca failed status = %d\n", ib_status)); + } + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +void +alts_ca_destroy_cb( + void *context) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE ); +} + +void +alts_pd_destroy_cb( + void *context + ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if(context != NULL) + ALTS_PRINT(ALTS_DBG_INFO, + ("Context is %"PRIdSIZE_T"\n", (size_t)context)); + + + ALTS_EXIT( ALTS_DBG_VERBOSE ); +} + + + +void +alts_ca_err_cb( + ib_async_event_rec_t *p_err_rec + ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_PRINT( ALTS_DBG_INFO, + ("p_err_rec->code is %d\n",p_err_rec->code)); + + ALTS_EXIT( ALTS_DBG_VERBOSE ); +} + + + +void +alts_print_ca_attr( + ib_ca_attr_t *alts_ca_attr + ) +{ + uint32_t i; + ib_port_attr_t *p_port; + UNUSED_PARAM( alts_ca_attr ); + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("\tdev_id is <0x%x>\n",alts_ca_attr->dev_id)); + + for (i = 0; i < alts_ca_attr->num_ports; i ++) + { + p_port = (alts_ca_attr->p_port_attr +i); + ALTS_PRINT( ALTS_DBG_INFO, + ("Port %d\tPrefix: %#16I64x \tGUID: %#16I64x\nLink Status: %s\tLID %#x \n",i, + cl_ntoh64(p_port->p_gid_table->unicast.prefix), + cl_ntoh64(p_port->p_gid_table->unicast.interface_id), + ib_get_port_state_str(p_port->link_state), + cl_ntoh16(p_port->lid)) ); + } +} + diff --git a/branches/Ndi/tests/alts/alts_readme.txt b/branches/Ndi/tests/alts/alts_readme.txt new file mode 100644 index 00000000..c5976dfc --- /dev/null +++ b/branches/Ndi/tests/alts/alts_readme.txt @@ -0,0 +1,119 @@ +README: + +AL Test Suite consists of a bunch of test cases to test the AL functionality. +Test case are focused for bring up of AL and some data transfer test cases. +AL test suite can be used to test individual AL exposed API's and also can +be used to debug and bring up of SHIM. + +AL Test Suite consists of Kernel Mode component and a user mode component. +All the test cases are under shared/alts directory. These test case +can be compiled for both user mode as well as for kernel mode. + +1)AL Test Suite for User mode + This consists of user mode AL test application which has all the test cases in + usermode test application. No kernel mode component is required here. However + this al test suite needs user mode component library. + + Compiling user mode AL test suite: + a)First compile User mode Component Library. + + >cd ~/linuxuser/iba/complib + >make + + b)To compile AL test suite for User mode, run make command with BUILD_USER set + to 1 as showm below. + + >cd ~/linuxuser/iba/alts + >make BUILD_USER + +2)AL Test Suite for Kernel mode + This consists both a user mode component and a kernel mode component. User mode + component is needed to drive the test. Kernel mode component is a driver + which consists of all the test cases compiled in linked to the driver. + + Compiling kernel mode AL test suite: + a)Compile User mode Component Library. + + >cd ~/linuxuser/iba/complib + >make + + b)Compile Kernel mode Component Library + + >cd ~/linux/drivers/iba/complib + >make + + c>Compile user mode AL test suite + + >cd ~/linuxuser/iba/alts + >make + + d) Compile kernel mode AL test Driver + + >cd ~/linux/drivers/iba/alts + >make + + +3)Running the test: +If you would like to test KAL, then you need to install kernel mode +component of AL test suite as shown below. + +>cd ~/linux/drivers/iba/alts +>insmod altsdrv.o + +Running specific tests are as shown below. + >./alts -tc=XXXXX [-um|-km] + + tc-> stands for test case to run. + -um -> user mode test + -km -> kernel mode test + + XXXX can be any one of the following. + OpenClose + QueryCAAttribute + ModifyCAAttribute + AllocDeallocPD + AllocDeallocRDD + CreateDestroyAV + QueryAndModifyAV + CreateDestroyQP + QueryAndModifyQP + CreateAndDestroyCQ + QueryAndModifyCQ + CreateAndDestroyEEC + QueryAndModifyEEC + AttachMultiCast + RegisterMemRegion + RegisterVarMemRegions + ReregisterHca + RegisterPhyMemRegion + CreateMemWindow + RegisterSharedMemRegion + MultiSend + RegisterPnP + MadTests + MadQuery + CmTests + + To run OpenClose test case in user mode + >./alts -tc=OpenClose -um + + To run OpenClose test case in kernel mode + >./alts -tc=OpenClose -km + + OR + >./alts -tc=OpenClose + + Default is kernel mode. + + +To see the results: +All the log messages by default goes to /var/log/messages file. +Grep for "failed" if any test case has failed you should see +that in this file. + + + + + + + diff --git a/branches/Ndi/tests/alts/cmtests.c b/branches/Ndi/tests/alts/cmtests.c new file mode 100644 index 00000000..7e7661ac --- /dev/null +++ b/branches/Ndi/tests/alts/cmtests.c @@ -0,0 +1,4273 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * mad test does a data transfer between two queue pairs created one + * on each port of the hca. In order for this test to work, two ports of the hca + * should be connected in a loop back and must be configured to ACTIVE PORT STATE. + * + * + * Environment: + * All + */ + + +#include +#include +#include +#include +#include +#include + +/* Parameters */ +#define MAX_QPS 8 +#define SRC_QP 0 +#define DEST_QP 1 + + +#pragma warning(disable:4324) +typedef struct _alts_cm_ca_obj +{ + ib_api_status_t status; + ib_qp_type_t test_type; + ib_pfn_comp_cb_t pfn_comp_cb; + + ib_ca_handle_t h_ca; + ib_ca_attr_t *p_ca_attr; + ib_port_attr_t *p_src_port_attr; + ib_port_attr_t *p_dest_port_attr; + + ib_net32_t src_qp_num; + ib_net32_t dest_qp_num; + + ib_net64_t src_portguid; + uint8_t src_port_num; + + ib_net64_t dest_portguid; + uint8_t dest_port_num; + + ib_net16_t slid; + ib_net16_t dlid; + + ib_pool_key_t h_src_pool; + ib_pool_key_t h_dest_pool; + + ib_mad_svc_handle_t h_src_mad_svc; + ib_mad_svc_handle_t h_dest_mad_svc; + + boolean_t num_cq; + ib_cq_handle_t h_cq; + ib_cq_handle_t h_cq_alt; + uint32_t cq_size; + + ib_pd_handle_t h_pd; + + ib_qp_handle_t h_qp[MAX_QPS]; + uint32_t qkey; + + ib_qp_attr_t qp_attr[MAX_QPS]; + + ib_send_wr_t *p_send_wr; + ib_recv_wr_t *p_recv_wr; + size_t wr_send_size; + size_t wr_recv_size; + uint32_t num_wrs; + uint32_t ds_list_depth; + uint32_t msg_size; // Initialize this field + + ib_av_handle_t h_av_src; + ib_av_handle_t h_av_dest; + + uint32_t send_done; + uint32_t recv_done; + uint32_t cq_done; // total completions + boolean_t is_src; + + boolean_t is_loopback; + boolean_t reply_requested; + boolean_t rdma_enabled; + + // cm stuff + ib_path_rec_t path_src; + ib_path_rec_t path_dest; + + ib_cm_req_t req_src; + ib_cm_req_t req_dest; + + ib_cm_listen_t listen; + ib_listen_handle_t h_cm_listen; + + ib_cm_rep_t rep_dest; + + ib_cm_rtu_t rtu_src; + + uint32_t cm_cbs; + uint32_t cm_errs; + + ib_cm_dreq_t dreq_src; + ib_cm_drep_t drep_dest; + + cl_event_t mra_event; + boolean_t mra_test; + boolean_t rej_test; + + ib_cm_handle_t h_cm_req; + + cl_event_t destroy_event; + + boolean_t handoff; + ib_listen_handle_t h_cm_listen_handoff; + ib_net64_t handoff_svc_id; + + mem_region_t mem_region[10]; + +} alts_cm_ca_obj_t; +#pragma warning(default:4324) + +#define MAX_SERVER 500 + +typedef struct _alts_serv_object +{ + alts_cm_ca_obj_t alts_obj; + + ib_cq_handle_t h_cq[MAX_SERVER]; + ib_qp_handle_t h_qp[MAX_SERVER]; + +} alts_serv_object_t; + + +typedef struct _alts_rmda +{ + char msg_type; + uint64_t vaddr; + ib_net32_t rkey; + char msg[32]; +} alts_rdma_t; + +/* + * Function Prototypes + */ +ib_api_status_t +alts_cm_activate_qp( + alts_cm_ca_obj_t *p_ca_obj, + ib_qp_handle_t h_qp ); + +ib_api_status_t +alts_cm_check_active_ports( + alts_cm_ca_obj_t *p_ca_obj ); + +ib_api_status_t +alts_cm_destroy_resources( + alts_cm_ca_obj_t *p_ca_obj ); + +ib_api_status_t +alts_rc_deregister_mem( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index ); + +ib_api_status_t +cm_post_sends( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts ); + +void +__mra_thread( + IN void* context ); + +/* + * QP Error callback function + */ +ib_api_status_t +alts_cm_rc_tests ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_cm_rc_rej_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_cm_handoff_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_cm_rc_flush_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_rc_no_cm_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_cm_rc_rdma_tests ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_rc_mra_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_cm_uc_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_cm_sidr_tests ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +#define ALTS_TEST_MGMT_CLASS 0x56 +#define ALTS_TEST_MGMT_CLASS_VER 1 + +/* + * Gloabal Variables + */ +ib_al_handle_t h_al; +ib_dgrm_info_t dgrm_info; +ib_mad_svc_t mad_svc; +ib_send_wr_t send_wr; +ib_local_ds_t send_ds; +ib_recv_wr_t recv_wr; +ib_local_ds_t recv_ds; +alts_cm_ca_obj_t *gp_ca_obj; + +extern ib_cq_create_t cq_create_attr; +extern ib_qp_create_t qp_create_attr; +extern ib_av_attr_t av_attr; +extern ib_wc_t free_wclist; +extern ib_wc_t free_wcl; + +ib_api_status_t cm_client_server=IB_NOT_FOUND; +ib_api_status_t cm_client_server_rej=IB_NOT_FOUND; +ib_api_status_t cm_client_server_flush=IB_NOT_FOUND; +ib_api_status_t cm_client_server_no_cm=IB_NOT_FOUND; +ib_api_status_t cm_rdma=IB_NOT_FOUND; +ib_api_status_t cm_mra=IB_NOT_FOUND; +ib_api_status_t cm_uc=IB_NOT_FOUND; +ib_api_status_t cm_sidr=IB_NOT_FOUND; +ib_api_status_t cm_handoff=IB_NOT_FOUND; + +/* This test case assumes that the HCA has 2 port connected + * through the switch. Sends packets from lower port number to higher + * port number. + */ +ib_api_status_t +al_test_cm(void) +{ + ib_api_status_t ib_status = IB_ERROR; + ib_ca_handle_t h_ca = NULL; + uint32_t bsize; + ib_ca_attr_t *p_ca_attr = NULL; + //alts_cm_ca_obj_t ca_obj; // for testing stack + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + do + { + /* + * Open the AL interface + */ + h_al = NULL; + ib_status = alts_open_al(&h_al); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_al failed status = %s\n", + ib_get_err_str( ib_status )) ); + break; + } + + /* + * Default opens the first CA + */ + ib_status = alts_open_ca(h_al, &h_ca); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_ca failed status = %s\n", + ib_get_err_str( ib_status )) ); + break; + } + + /* + * Get the CA Attributest + * Check for two active ports + */ + + /* + * Query the CA + */ + bsize = 0; + ib_status = ib_query_ca(h_ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %s\n", + ib_get_err_str( ib_status )) ); + break; + } + + CL_ASSERT(bsize); + + cm_client_server = alts_cm_rc_tests(h_ca, bsize); + if(cm_client_server != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_rc_tests() failed with status %s\n", + ib_get_err_str( cm_client_server )) ); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_cm_rc_tests() passed\n")); + + cm_client_server_rej = alts_cm_rc_rej_test(h_ca, bsize); + if(cm_client_server_rej != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_rc_rej_test() failed with status %s\n", + ib_get_err_str( cm_client_server_rej )) ); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_cm_rc_rej_test() passed\n")); + + cm_client_server_flush = alts_cm_rc_flush_test(h_ca, bsize); + if(cm_client_server_flush != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_rc_flush_test() failed with status %s\n", + ib_get_err_str( cm_client_server_flush )) ); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_cm_rc_flush_test() passed\n")); + + cm_client_server_no_cm = alts_rc_no_cm_test(h_ca, bsize); + if(cm_client_server_no_cm != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_rc_no_cm_test() failed with status %s\n", + ib_get_err_str( cm_client_server_no_cm )) ); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_rc_no_cm_test() passed\n")); + + cm_rdma = alts_cm_rc_rdma_tests(h_ca, bsize); + if(cm_rdma != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_rc_rdma_tests() failed with status %s\n", + ib_get_err_str( cm_rdma )) ); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_cm_rc_rdma_tests() passed\n")); + + cm_mra = alts_rc_mra_test(h_ca, bsize); + if(cm_mra != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_rc_mra_test() failed with status %s\n", + ib_get_err_str( cm_mra )) ); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_rc_mra_test() passed\n")); + + cm_handoff = alts_cm_handoff_test(h_ca, bsize); + if(cm_handoff != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_handoff_test() failed with status %s\n", + ib_get_err_str( cm_handoff )) ); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_cm_handoff_test() passed\n")); + + cm_uc = alts_cm_uc_test(h_ca, bsize); + if(cm_uc != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_uc_test() failed with status %s\n", + ib_get_err_str( cm_uc )) ); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_cm_uc_test() passed\n")); + + cm_sidr = alts_cm_sidr_tests(h_ca, bsize); + if(cm_sidr != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_sidr_tests() failed with status %s\n", + ib_get_err_str( cm_sidr )) ); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_cm_sidr_tests() passed\n")); + + } while (0); + + /* + * Destroy the resources + */ + if (p_ca_attr) + cl_free(p_ca_attr); + + ALTS_PRINT(ALTS_DBG_STATUS, + ("Test results (cm):\n" + "\trc client server......: %s\n" + "\trc reject.............: %s\n" + "\tqp flush on disconnect: %s\n" + "\trc no cm..............: %s\n" + "\trmda..................: %s\n" + "\tmra...................: %s\n" + "\thandoff...............: %s\n" + "\tuc....................: %s\n" + "\tsidr..................: %s\n", + ib_get_err_str(cm_client_server), + ib_get_err_str(cm_client_server_rej), + ib_get_err_str(cm_client_server_flush), + ib_get_err_str(cm_client_server_no_cm), + ib_get_err_str(cm_rdma), + ib_get_err_str(cm_mra), + ib_get_err_str(cm_handoff), + ib_get_err_str(cm_uc), + ib_get_err_str(cm_sidr) + )); + + if( h_al ) + alts_close_al(h_al); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + + return ib_status; + +} + +static void +__alts_cm_destroy_pd_cb( + IN void *context ) +{ + cl_event_signal( &((alts_cm_ca_obj_t*)context)->destroy_event ); +} + +ib_api_status_t +alts_cm_destroy_resources( + alts_cm_ca_obj_t *p_ca_obj) +{ + uint32_t i, j; + + /* + * Destroy Send QP, Recv QP, CQ and PD + */ + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if (p_ca_obj->h_qp[SRC_QP]) + { + ib_status = ib_destroy_qp(p_ca_obj->h_qp[SRC_QP], NULL); + } + + if (p_ca_obj->is_loopback != TRUE) + { + if (p_ca_obj->h_qp[DEST_QP]) + { + ib_status = ib_destroy_qp(p_ca_obj->h_qp[DEST_QP], NULL); + } + } + + if (p_ca_obj->h_cq) + ib_status = ib_destroy_cq(p_ca_obj->h_cq, NULL); + if (p_ca_obj->h_cq_alt) + ib_status = ib_destroy_cq(p_ca_obj->h_cq_alt, NULL); + + // deregister mem + for (i=0; i < p_ca_obj->num_wrs; i++) + { + alts_rc_deregister_mem(p_ca_obj, i); + } + + // send + for (j=i; j < i + p_ca_obj->num_wrs; j++) + { + ib_status = alts_rc_deregister_mem(p_ca_obj, j); + } + + if (p_ca_obj->h_pd) + { + ib_status = ib_dealloc_pd(p_ca_obj->h_pd,__alts_cm_destroy_pd_cb); + cl_event_wait_on( &p_ca_obj->destroy_event, EVENT_NO_TIMEOUT, FALSE ); + } + + //cl_thread_suspend( 1000 ); + cl_event_destroy( &p_ca_obj->destroy_event ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +alts_sidr_message_passing( + alts_cm_ca_obj_t *p_ca_obj) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_mad_element_t *p_mad_element; + ib_mad_t *p_mad; + char *p_buf; + uint32_t i; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + //Create an Address vector + av_attr.dlid = p_ca_obj->dlid; + av_attr.port_num = p_ca_obj->src_port_num; + av_attr.sl = 0; + av_attr.path_bits = 0; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + av_attr.grh_valid = FALSE; + + ib_status = ib_create_av(p_ca_obj->h_pd,&av_attr,&p_ca_obj->h_av_src); + if(ib_status != IB_SUCCESS) + return ib_status; + + p_ca_obj->send_done = 0; + p_ca_obj->recv_done = 0; + p_ca_obj->cq_done = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("++++++ dlid(x%x) src_port(%d) ====\n", + av_attr.dlid, av_attr.port_num)); + + for (i=0; inum_wrs; i++) + { + p_mad_element = NULL; + ib_status = ib_get_mad( + p_ca_obj->h_src_pool, + MAD_BLOCK_SIZE, + &p_mad_element ); + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_get_mad()! %s\n", ib_get_err_str(ib_status))); + return (ib_status); + } + + // format mad + p_mad_element->context1 = (void *)1; + p_mad_element->context2 = p_ca_obj; + + /* Send request information. */ + p_mad_element->h_av = p_ca_obj->h_av_src; + p_mad_element->send_opt = IB_SEND_OPT_SIGNALED; + + + if (p_ca_obj->reply_requested == TRUE) + p_mad_element->resp_expected = TRUE; + else + p_mad_element->resp_expected = FALSE; //TRUE; + + p_mad_element->remote_qp = p_ca_obj->qp_attr[DEST_QP].num; + + p_mad_element->remote_qkey = p_ca_obj->qkey; + p_mad_element->timeout_ms = 10; + p_mad_element->retry_cnt = 1; + + /* Completion information. */ + p_mad_element->status = 0; + + // format mad + p_mad = p_mad_element->p_mad_buf; + + p_buf = (char *)p_mad; + cl_memset(p_buf, 0x66, 256); // set pattern in buffer + + + switch (p_ca_obj->qp_attr[SRC_QP].num) + { + case IB_QP0: + ib_mad_init_new( + p_mad, + IB_MCLASS_SUBN_LID, + ALTS_TEST_MGMT_CLASS_VER, + IB_MAD_METHOD_GET, + (ib_net64_t) CL_NTOH64(0x666), + IB_MAD_ATTR_SM_INFO, + 0 ); + break; + + case IB_QP1: + default: + ib_mad_init_new( + p_mad, + ALTS_TEST_MGMT_CLASS, + ALTS_TEST_MGMT_CLASS_VER, + IB_MAD_METHOD_GET, + (ib_net64_t) CL_NTOH64(0x666), + IB_MAD_ATTR_CLASS_PORT_INFO, + 0 ); + break; + } + + // send + ib_status = ib_send_mad( + p_ca_obj->h_src_mad_svc, + p_mad_element, + NULL ); + + if(ib_status != IB_SUCCESS) + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("ib_send_mad failed\n")); + + //cl_thread_suspend(10); // 10 usec + } + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + + cl_thread_suspend(10000); // 10 seconds + + if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(10000); // 10 seconds + } + + + if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(10000); // 10 seconds + } + + if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(10000); // 10 seconds + } + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +cm_post_sends( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts ) +{ + ib_send_wr_t *p_s_wr, *p_send_failure_wr; + uint32_t msg_size, i; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if (p_ca_obj->test_type == IB_QPT_UNRELIABLE_DGRM) + msg_size = p_ca_obj->msg_size - sizeof(ib_grh_t); + else + msg_size = 64; + + //msg_size = p_ca_obj->msg_size; + + msg_size = 64; + + p_s_wr = p_ca_obj->p_send_wr; + + p_s_wr->p_next = NULL; + p_s_wr->ds_array[0].length = msg_size; + p_s_wr->num_ds = 1; + + p_s_wr->wr_type = WR_SEND; + + if (p_ca_obj->test_type == IB_QPT_UNRELIABLE_DGRM) + { + p_s_wr->dgrm.ud.h_av = p_ca_obj->h_av_src; + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED | IB_SEND_OPT_SOLICITED; +// p_s_wr->send_opt = IB_SEND_OPT_IMMEDIATE | + + p_s_wr->dgrm.ud.remote_qkey = p_ca_obj->qkey; + p_s_wr->dgrm.ud.remote_qp = p_ca_obj->qp_attr[DEST_QP].num; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("======= qkey(x%x) qp_num(x%x) ========\n", + p_s_wr->dgrm.ud.remote_qkey, + p_s_wr->dgrm.ud.remote_qp)); + + } + else if ( (p_ca_obj->test_type == IB_QPT_RELIABLE_CONN) || + (p_ca_obj->test_type == IB_QPT_UNRELIABLE_CONN) ) + { + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED | IB_SEND_OPT_SOLICITED; +// p_s_wr->send_opt = IB_SEND_OPT_IMMEDIATE | + + /* + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED | \ + IB_SEND_OPT_IMMEDIATE | \ + IB_SEND_OPT_SOLICITED ;*/ + + } + + + for (i = 0; i < num_posts; i++) + { + p_s_wr->wr_id = i+reg_index; + p_s_wr->immediate_data = 0xfeedde00 + i; + + p_s_wr->remote_ops.vaddr = 0; + p_s_wr->remote_ops.rkey = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d)*****\n", + (void*)(uintn_t)p_s_wr->ds_array[0].vaddr, + p_s_wr->ds_array[0].lkey, + p_s_wr->ds_array[0].length)); + + ib_status = ib_post_send( + p_ca_obj->h_qp[SRC_QP], + p_s_wr, + &p_send_failure_wr); + + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +alts_cm_check_active_ports(alts_cm_ca_obj_t *p_ca_obj) +{ + ib_api_status_t ib_status; + ib_ca_attr_t *p_ca_attr; + ib_port_attr_t *p_src_port_attr = NULL; + ib_port_attr_t *p_dest_port_attr = NULL; + uint32_t i; + ib_port_attr_t *p_port_attr; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT(p_ca_obj); + + p_ca_attr = p_ca_obj->p_ca_attr; + + CL_ASSERT(p_ca_attr); + + for(i=0; i< p_ca_attr->num_ports; i++) + { + p_port_attr = &p_ca_attr->p_port_attr[i]; + + if (p_port_attr->link_state == IB_LINK_ACTIVE) + { + if (p_src_port_attr == NULL) + p_src_port_attr = p_port_attr; + else + if(p_dest_port_attr == NULL) + p_dest_port_attr = p_port_attr; + else + break; + } + } + + // handle loopback case + if (p_ca_obj->is_loopback == TRUE) + p_dest_port_attr = p_src_port_attr; + + if (p_src_port_attr && p_dest_port_attr) + { + p_ca_obj->p_dest_port_attr = p_dest_port_attr; + p_ca_obj->p_src_port_attr = p_src_port_attr; + + p_ca_obj->dlid = p_dest_port_attr->lid; + p_ca_obj->slid = p_src_port_attr->lid; + + p_ca_obj->dest_portguid = p_dest_port_attr->port_guid; + p_ca_obj->src_portguid = p_src_port_attr->port_guid; + + p_ca_obj->dest_port_num = p_dest_port_attr->port_num; + p_ca_obj->src_port_num = p_src_port_attr->port_num; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("**** slid = x%x (x%x) ***dlid = x%x (x%x) ***************\n", + p_ca_obj->slid, + CL_NTOH16(p_ca_obj->slid), + p_ca_obj->dlid, + CL_NTOH16(p_ca_obj->dlid) )); + + ib_status = IB_SUCCESS; + + } + else + { + + ib_status = IB_ERROR; + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +void +rc_cm_cq_comp_cb( + void *cq_context, + ib_qp_type_t qp_type + ) +{ + ib_api_status_t ib_status; + uint32_t i = 0, id; + ib_wc_t *p_free_wcl, *p_done_cl= NULL; + alts_cm_ca_obj_t *p_ca_obj; + ib_cq_handle_t h_cq; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT(cq_context); + + h_cq = *((ib_cq_handle_t*)cq_context); + p_ca_obj = gp_ca_obj; + + ib_status = ib_rearm_cq(h_cq, FALSE); + + p_free_wcl = &free_wcl; + p_free_wcl->p_next = NULL; + p_done_cl = NULL; + + ib_status = ib_poll_cq(h_cq, &p_free_wcl, &p_done_cl); + + while(p_done_cl) + { + + /* + * print output + */ + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Got a completion:\n" + "\ttype....:%s\n" + "\twr_id...:%"PRIx64"\n" + "status....:%s\n", + ib_get_wc_type_str(p_done_cl->wc_type), + p_done_cl->wr_id, + ib_get_wc_status_str(p_done_cl->status) )); + + + if (p_done_cl->wc_type == IB_WC_RECV) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("message length..:%d bytes\n", + p_done_cl->length )); + + id = (uint32_t)p_done_cl->wr_id; + + if (qp_type == IB_QPT_UNRELIABLE_DGRM) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvUD info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n" + "\tremote_qp..:x%x\n" + "\tpkey_index.:%d\n" + "\tremote_lid.:x%x\n" + "\tremote_sl..:x%x\n" + "\tpath_bits..:x%x\n", + p_done_cl->recv.ud.recv_opt, + p_done_cl->recv.ud.immediate_data, + CL_NTOH32(p_done_cl->recv.ud.remote_qp), + p_done_cl->recv.ud.pkey_index, + CL_NTOH16(p_done_cl->recv.ud.remote_lid), + p_done_cl->recv.ud.remote_sl, + p_done_cl->recv.ud.path_bits)); + } + else + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvRC info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n", + p_done_cl->recv.conn.recv_opt, + p_done_cl->recv.ud.immediate_data )); + } + + } + + p_free_wcl = p_done_cl; + p_free_wcl->p_next = NULL; + p_done_cl = NULL; + i++; + + ib_status = ib_poll_cq(h_cq, &p_free_wcl, &p_done_cl); + } + + p_ca_obj->cq_done += i; + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ (in callback=%d) (total=%d)\n", + i, + p_ca_obj->cq_done) ); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +ib_api_status_t +rc_rdma_write_send( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index ) +{ + ib_send_wr_t *p_s_wr, *p_send_failure_wr; + uint32_t msg_size; + ib_api_status_t ib_status = IB_SUCCESS; + alts_rdma_t *p_rdma_req; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + msg_size = 64; + //msg_size = p_ca_obj->msg_size; + + p_s_wr = p_ca_obj->p_send_wr; + + p_s_wr->p_next = NULL; + p_s_wr->ds_array[0].length = msg_size; + p_s_wr->num_ds = 1; + + p_s_wr->wr_type = WR_RDMA_WRITE; + + p_s_wr->send_opt = IB_SEND_OPT_SOLICITED ; + //IB_SEND_OPT_IMMEDIATE | + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED; + + p_s_wr->ds_array[0].vaddr = + (uintn_t)p_ca_obj->mem_region[reg_index].buffer; + p_s_wr->ds_array[0].lkey = p_ca_obj->mem_region[reg_index].lkey; + + p_s_wr->wr_id = reg_index; + p_s_wr->immediate_data = 0xfeedde00 + reg_index; + + p_rdma_req = (alts_rdma_t*)p_ca_obj->mem_region[reg_index].buffer; + p_s_wr->remote_ops.vaddr = p_rdma_req->vaddr; + p_s_wr->remote_ops.rkey = p_rdma_req->rkey; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d)*****\n", + (void*)(uintn_t)p_s_wr->ds_array[0].vaddr, + p_s_wr->ds_array[0].lkey, + p_s_wr->ds_array[0].length)); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******remote:vaddr(x%"PRIx64") rkey(x%x) len(%d)*****\n", + p_s_wr->remote_ops.vaddr, + p_s_wr->remote_ops.rkey, + p_s_wr->ds_array[0].length)); + + ib_status = ib_post_send( + p_ca_obj->h_qp[DEST_QP], + p_s_wr, + &p_send_failure_wr); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +rc_rdma_read_send( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index ) +{ + ib_send_wr_t *p_s_wr, *p_send_failure_wr; + uint32_t msg_size; + ib_api_status_t ib_status = IB_SUCCESS; + alts_rdma_t *p_rdma_req; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + msg_size = 64; + //msg_size = p_ca_obj->msg_size; + + p_s_wr = p_ca_obj->p_send_wr; + + p_s_wr->p_next = NULL; + p_s_wr->ds_array[0].length = msg_size; + p_s_wr->num_ds = 1; + + p_s_wr->wr_type = WR_RDMA_READ; + + //p_s_wr->send_opt = IB_SEND_OPT_SOLICITED ; + //IB_SEND_OPT_IMMEDIATE | + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED; + + p_s_wr->ds_array[0].vaddr = + (uintn_t)p_ca_obj->mem_region[reg_index].buffer; + p_s_wr->ds_array[0].lkey = p_ca_obj->mem_region[reg_index].lkey; + + p_s_wr->wr_id = reg_index; + p_s_wr->immediate_data = 0xfeedde00 + reg_index; + + p_rdma_req = (alts_rdma_t*)p_ca_obj->mem_region[reg_index].buffer; + cl_memclr( p_rdma_req->msg, 64 ); + p_s_wr->remote_ops.vaddr = p_rdma_req->vaddr; + p_s_wr->remote_ops.rkey = p_rdma_req->rkey; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d)*****\n", + (void*)(uintn_t)p_s_wr->ds_array[0].vaddr, + p_s_wr->ds_array[0].lkey, + p_s_wr->ds_array[0].length)); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******remote:vaddr(x%"PRIx64") rkey(x%x) len(%d)*****\n", + p_s_wr->remote_ops.vaddr, + p_s_wr->remote_ops.rkey, + p_s_wr->ds_array[0].length)); + + ib_status = ib_post_send( + p_ca_obj->h_qp[DEST_QP], + p_s_wr, + &p_send_failure_wr); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +void +process_response( + IN alts_cm_ca_obj_t *p_ca_obj, + IN alts_rdma_t *p_data, + IN uint32_t reg_index ) +{ + + switch(p_data->msg_type) + { + case 'W': + ALTS_PRINT( ALTS_DBG_INFO, ("RDMA Write requested\n" ) ); + p_data->msg_type = 'C'; + rc_rdma_write_send( p_ca_obj, reg_index ); + break; + case 'R': + ALTS_PRINT( ALTS_DBG_INFO, ("RDMA Read requested\n" ) ); + p_data->msg_type = 'C'; + rc_rdma_read_send( p_ca_obj, reg_index ); + break; + + case 'C': + ALTS_PRINT( ALTS_DBG_INFO, ("Msg completed. [%s]\n", + p_data->msg ) ); + break; + + default: + ALTS_PRINT(ALTS_DBG_ERROR, ("Bad RDMA msg!!!\n")); + break; + } + + +} + +void +rdma_cq_comp_cb( + void *cq_context, + ib_qp_type_t qp_type + ) +{ + ib_api_status_t ib_status; + uint32_t i = 0, id; + ib_wc_t *p_free_wcl, *p_done_cl= NULL; + alts_cm_ca_obj_t *p_ca_obj; + ib_cq_handle_t h_cq; + alts_rdma_t *p_data; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( qp_type ); + + CL_ASSERT(cq_context); + + h_cq = *((ib_cq_handle_t*)cq_context); + p_ca_obj = gp_ca_obj; + + ib_status = ib_rearm_cq(h_cq, FALSE); + + p_free_wcl = &free_wcl; + p_free_wcl->p_next = NULL; + p_done_cl = NULL; + + ib_status = ib_poll_cq(h_cq, &p_free_wcl, &p_done_cl); + + while(p_done_cl) + { + /* + * print output + */ + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Got a completion:\n" + "\ttype....:%s\n" + "\twr_id...:%"PRIx64"\n" + "status....:%s\n", + ib_get_wc_type_str(p_done_cl->wc_type), + p_done_cl->wr_id, + ib_get_wc_status_str(p_done_cl->status) )); + + if( p_done_cl->status == IB_WCS_SUCCESS ) + { + if (p_done_cl->wc_type == IB_WC_RECV) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("message length..:%d bytes\n", + p_done_cl->length )); + + id = (uint32_t)p_done_cl->wr_id; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvRC info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n", + p_done_cl->recv.conn.recv_opt, + p_done_cl->recv.ud.immediate_data )); + + if( p_ca_obj->rdma_enabled == TRUE ) + { + process_response( p_ca_obj, + (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer, + (uint32_t)p_done_cl->wr_id ); + } + } + else + if (p_done_cl->wc_type == IB_WC_RDMA_WRITE) + { + // convert request to read now + p_data = + (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer; + p_data->msg_type = 'R'; + process_response( p_ca_obj, + p_data, + (uint32_t)p_done_cl->wr_id ); + } + else + if (p_done_cl->wc_type == IB_WC_RDMA_READ) + { + id = (uint32_t)p_done_cl->wr_id; + process_response( p_ca_obj, + (alts_rdma_t*)p_ca_obj->mem_region[p_done_cl->wr_id].buffer, + (uint32_t)p_done_cl->wr_id ); + } + } + + p_free_wcl = p_done_cl; + p_free_wcl->p_next = NULL; + p_done_cl = NULL; + i++; + + ib_status = ib_poll_cq(h_cq, &p_free_wcl, &p_done_cl); + } + + p_ca_obj->cq_done += i; + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ (in callback=%d) (total=%d)\n", + i, + p_ca_obj->cq_done) ); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +cm_rc_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + rc_cm_cq_comp_cb (cq_context, IB_QPT_RELIABLE_CONN); +} + +void +cm_ud_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + rc_cm_cq_comp_cb(cq_context, IB_QPT_UNRELIABLE_DGRM); +} + +void +cm_rdma_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + rdma_cq_comp_cb(cq_context, IB_QPT_RELIABLE_CONN); +} + +void +rc_cm_cq_err_cb( + ib_async_event_rec_t *p_err_rec + ) +{ + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +rc_cm_qp_err_cb( + ib_async_event_rec_t *p_err_rec + ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +rc_cm_qp_destroy_cb( + void *context + ) +{ + /* + * QP destroy call back + */ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +ib_api_status_t +alts_create_test_resources( alts_cm_ca_obj_t *p_ca_obj ) +{ + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + gp_ca_obj = p_ca_obj; + + cl_event_init( &p_ca_obj->destroy_event, FALSE ); + + cl_memclr(&qp_create_attr, sizeof(ib_qp_create_t)); + + /* + * Allocate a PD + */ + ib_status = ib_alloc_pd( + p_ca_obj->h_ca, + IB_PDT_NORMAL, + p_ca_obj, //pd_context + &p_ca_obj->h_pd); + + CL_ASSERT(ib_status == IB_SUCCESS); + + /* + * Create CQ Attributes + */ + cq_create_attr.size = p_ca_obj->cq_size; + cq_create_attr.pfn_comp_cb = p_ca_obj->pfn_comp_cb; + cq_create_attr.h_wait_obj = NULL; + + if( p_ca_obj->rdma_enabled ) + { + cq_create_attr.pfn_comp_cb = cm_rdma_cq_comp_cb; + } + + ib_status = ib_create_cq( + p_ca_obj->h_ca, + &cq_create_attr, + &p_ca_obj->h_cq, + rc_cm_cq_err_cb, + &p_ca_obj->h_cq ); + CL_ASSERT(ib_status == IB_SUCCESS); + + + p_ca_obj->cq_size = cq_create_attr.size; + + if( p_ca_obj->num_cq > 1 ) + { + ib_status = ib_create_cq( + p_ca_obj->h_ca, + &cq_create_attr, + &p_ca_obj->h_cq_alt, + rc_cm_cq_err_cb, + &p_ca_obj->h_cq_alt ); + + CL_ASSERT(ib_status == IB_SUCCESS); + CL_ASSERT(p_ca_obj->cq_size == cq_create_attr.size); + } + + /* + * Create QP Attributes + */ + qp_create_attr.sq_depth = p_ca_obj->num_wrs; + qp_create_attr.rq_depth = p_ca_obj->num_wrs; + qp_create_attr.sq_sge = 1; + qp_create_attr.rq_sge = 1; + + if( p_ca_obj->num_cq > 1 ) + qp_create_attr.h_sq_cq = p_ca_obj->h_cq_alt; + else + qp_create_attr.h_sq_cq = p_ca_obj->h_cq; + + qp_create_attr.h_rq_cq = p_ca_obj->h_cq; + + qp_create_attr.sq_signaled = TRUE; + //qp_create_attr.sq_signaled = FALSE; + + qp_create_attr.qp_type = p_ca_obj->test_type; + + ib_status = ib_create_qp( + p_ca_obj->h_pd, + &qp_create_attr, + p_ca_obj, + rc_cm_qp_err_cb, + &p_ca_obj->h_qp[SRC_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_create_qp()! %s\n", + ib_get_err_str(ib_status))); + + return (ib_status); + } + + ib_status = ib_query_qp(p_ca_obj->h_qp[SRC_QP], + &p_ca_obj->qp_attr[SRC_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in query_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[SRC_QP],rc_cm_qp_destroy_cb); + return (ib_status); + } + + if (p_ca_obj->is_loopback == TRUE) + { + // do loopback on same QP + p_ca_obj->h_qp[DEST_QP] = p_ca_obj->h_qp[SRC_QP]; + } + else + { + ib_status = ib_create_qp( + p_ca_obj->h_pd, + &qp_create_attr, + p_ca_obj, + rc_cm_qp_err_cb, + &p_ca_obj->h_qp[DEST_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_create_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[SRC_QP],rc_cm_qp_destroy_cb); + return (ib_status); + } + + ib_status = ib_query_qp(p_ca_obj->h_qp[DEST_QP], + &p_ca_obj->qp_attr[DEST_QP]); + + //CL_ASSERT(ib_status == IB_SUCCESS); + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in query_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[DEST_QP],rc_cm_qp_destroy_cb); + return (ib_status); + } + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_rc_register_mem( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t size ) +{ + ib_mr_create_t mr_create = {0}; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + cl_memclr(&mr_create, sizeof(ib_mr_create_t)); + p_ca_obj->mem_region[reg_index].buffer = cl_zalloc(size); + CL_ASSERT (p_ca_obj->mem_region[reg_index].buffer); + + mr_create.vaddr = p_ca_obj->mem_region[reg_index].buffer; + mr_create.length = size; + mr_create.access_ctrl = + IB_AC_LOCAL_WRITE | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE; + + ib_status = ib_reg_mem( + p_ca_obj->h_pd, + &mr_create, + &p_ca_obj->mem_region[reg_index].lkey, + &p_ca_obj->mem_region[reg_index].rkey, + &p_ca_obj->mem_region[reg_index].mr_h); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + + return ib_status; +} + +ib_api_status_t +alts_rc_deregister_mem( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index + ) +{ + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if ( p_ca_obj->mem_region[reg_index].buffer != NULL ) + { + /* + * At times the buffer may have been allocated without a register + */ + if (p_ca_obj->mem_region[reg_index].mr_h) + ib_status = ib_dereg_mr(p_ca_obj->mem_region[reg_index].mr_h); + else + ib_status = IB_ERROR; + + CL_ASSERT(ib_status == IB_SUCCESS); + + if ( ib_status != IB_SUCCESS ) + { + //PRINT the error msg + } + + cl_free(p_ca_obj->mem_region[reg_index].buffer); + + p_ca_obj->mem_region[reg_index].buffer = NULL; + } + else + { + ib_status = IB_ERROR; + + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +rc_multisend_post_sends( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts ) +{ + ib_send_wr_t *p_s_wr, *p_send_failure_wr; + uint32_t msg_size, i; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + msg_size = 64; + //msg_size = p_ca_obj->msg_size; + + p_s_wr = p_ca_obj->p_send_wr; + + p_s_wr->p_next = NULL; + p_s_wr->ds_array[0].length = msg_size; + p_s_wr->num_ds = 1; + + p_s_wr->wr_type = WR_SEND; + + if( num_posts > 1 ) + { + p_s_wr->send_opt = IB_SEND_OPT_SOLICITED ; + } + else + { + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED | \ + IB_SEND_OPT_IMMEDIATE | \ + IB_SEND_OPT_SOLICITED ; + } + + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED | \ + IB_SEND_OPT_IMMEDIATE | \ + IB_SEND_OPT_SOLICITED ; + + + for (i = 0; i < num_posts; i++) + { + sprintf((char *)p_ca_obj->mem_region[i+reg_index].buffer,"hello %d", i); + + p_s_wr->ds_array[0].vaddr = + (uintn_t)p_ca_obj->mem_region[i+reg_index].buffer; + p_s_wr->ds_array[0].lkey = p_ca_obj->mem_region[i+reg_index].lkey; + + p_s_wr->wr_id = i+reg_index; + p_s_wr->immediate_data = 0xfeedde00 + i; + + p_s_wr->remote_ops.vaddr = 0; + p_s_wr->remote_ops.rkey = 0; + + //p_s_wr->dgrm.ud.h_av + if( (i > 0) && (i == ( num_posts - 1 ))) + { + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED | \ + IB_SEND_OPT_IMMEDIATE | \ + IB_SEND_OPT_SOLICITED ; + } + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d) send_opts(x%x)*****\n", + (void*)(uintn_t)p_s_wr->ds_array[0].vaddr, + p_s_wr->ds_array[0].lkey, + p_s_wr->ds_array[0].length, + p_s_wr->send_opt )); + + ib_status = ib_post_send( + p_ca_obj->h_qp[SRC_QP], + p_s_wr, + &p_send_failure_wr); + + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +rc_multisend_post_recvs( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts ) +{ + ib_recv_wr_t *p_r_wr, *p_failure_wr; + uint32_t msg_size, i; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + msg_size = 64; + //msg_size = p_ca_obj->msg_size; + + p_r_wr = p_ca_obj->p_recv_wr; + + p_r_wr->p_next = NULL; + p_r_wr->ds_array[0].length = msg_size; + p_r_wr->num_ds = 1; + + for (i = 0; i < num_posts; i++) + { + p_r_wr->ds_array[0].vaddr = + (uintn_t)p_ca_obj->mem_region[i+reg_index].buffer; + p_r_wr->ds_array[0].lkey = p_ca_obj->mem_region[i+reg_index].lkey; + + p_r_wr->wr_id = i+reg_index; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d)*****\n", + (void*)(uintn_t)p_r_wr->ds_array[0].vaddr, + p_r_wr->ds_array[0].lkey, + p_r_wr->ds_array[0].length)); + + if (p_ca_obj->is_loopback == TRUE) + { + ib_status = ib_post_recv( + p_ca_obj->h_qp[SRC_QP], + p_r_wr, + &p_failure_wr); + } + else + { + ib_status = ib_post_recv( + p_ca_obj->h_qp[DEST_QP], + p_r_wr, + &p_failure_wr); + } + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_rc_message_passing( + alts_cm_ca_obj_t *p_ca_obj, + ib_qp_type_t qp_type) +{ + uint32_t i,j, k; + ib_api_status_t ib_status = IB_SUCCESS; + ib_wc_t *p_free_wclist; + ib_wc_t *p_done_cl; + uint32_t id; + char *buff; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + p_ca_obj->wr_send_size = sizeof(ib_send_wr_t) + \ + (sizeof(ib_local_ds_t) * p_ca_obj->ds_list_depth); + p_ca_obj->wr_recv_size = sizeof(ib_recv_wr_t) + \ + (sizeof(ib_local_ds_t) * p_ca_obj->ds_list_depth); + + p_ca_obj->p_send_wr = &send_wr; + p_ca_obj->p_recv_wr = &recv_wr; + + p_ca_obj->p_send_wr->ds_array = &send_ds; + p_ca_obj->p_recv_wr->ds_array = &recv_ds; + + // receive + for (i=0; i < p_ca_obj->num_wrs; i++) + { + ib_status = alts_rc_register_mem( p_ca_obj, i, 4096); + + if ( ib_status != IB_SUCCESS ) + { + while( i-- ) + alts_rc_deregister_mem(p_ca_obj, i); + + return ib_status; + } + else + { + p_ca_obj->mem_region[i].my_lid = p_ca_obj->dlid; + } + } + + // send + for (k=0; k < p_ca_obj->num_wrs; k++) + { + ib_status = + alts_rc_register_mem( p_ca_obj, k + p_ca_obj->num_wrs, 4096); + + if ( ib_status != IB_SUCCESS ) + { + while( k-- ) + alts_rc_deregister_mem(p_ca_obj, k + p_ca_obj->num_wrs); + + while( i-- ) + alts_rc_deregister_mem(p_ca_obj, i); + + return ib_status; + } + else + { + p_ca_obj->mem_region[k].my_lid = p_ca_obj->slid; + } + } + + p_ca_obj->cq_done = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("++++++ dlid(x%x) src_port(%d) ====\n", + p_ca_obj->dlid, p_ca_obj->src_port_num)); + + if(ib_status == IB_SUCCESS) + { + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE); + rc_multisend_post_recvs( p_ca_obj, 0, p_ca_obj->num_wrs ); + + if( p_ca_obj->num_cq > 1 ) + ib_status = ib_rearm_cq(p_ca_obj->h_cq_alt, FALSE); + else + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE); + rc_multisend_post_sends( p_ca_obj, p_ca_obj->num_wrs, p_ca_obj->num_wrs ); + } + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + + cl_thread_suspend(3000); // 10 seconds + +//#if 0 + + if (!p_ca_obj->cq_done) + { + p_free_wclist = &free_wclist; + p_free_wclist->p_next = NULL; + p_done_cl = NULL; + j = 0; + + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wclist, &p_done_cl); + + while(p_done_cl) + { + /* + * print output + */ + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Got a completion:\n" + "\ttype....:%s\n" + "\twr_id...:%"PRIx64"\n" + "\tstatus..:%s\n", + ib_get_wc_type_str(p_done_cl->wc_type), + p_done_cl->wr_id, + ib_get_wc_status_str(p_done_cl->status))); + + if (p_done_cl->wc_type == IB_WC_RECV) + { + id = (uint32_t)p_done_cl->wr_id; + buff = (char *)p_ca_obj->mem_region[id].buffer; + if (qp_type == IB_QPT_UNRELIABLE_DGRM) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("---MSG--->%s\n",&buff[40])); + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvUD info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n" + "\tremote_qp..:x%x\n" + "\tpkey_index.:%d\n" + "\tremote_lid.:x%x\n" + "\tremote_sl..:x%x\n" + "\tpath_bits..:x%x\n" + "\tsrc_lid....:x%x\n", + p_done_cl->recv.ud.recv_opt, + p_done_cl->recv.ud.immediate_data, + CL_NTOH32(p_done_cl->recv.ud.remote_qp), + p_done_cl->recv.ud.pkey_index, + CL_NTOH16(p_done_cl->recv.ud.remote_lid), + p_done_cl->recv.ud.remote_sl, + p_done_cl->recv.ud.path_bits, + CL_NTOH16(p_ca_obj->mem_region[id].my_lid))); + } + else + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvRC info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n", + p_done_cl->recv.conn.recv_opt, + p_done_cl->recv.ud.immediate_data )); + } + + } + + p_free_wclist = p_done_cl; + p_free_wclist->p_next = NULL; + p_done_cl = NULL; + j++; + p_done_cl = NULL; + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wclist, &p_done_cl); + } + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ is = %d\n", j) ); + + p_ca_obj->cq_done += i; + + ib_status = IB_SUCCESS; + } +//#endif + + while( i-- ) + alts_rc_deregister_mem(p_ca_obj, i); + + while( k-- ) + alts_rc_deregister_mem(p_ca_obj, k + p_ca_obj->num_wrs); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + + +// cm cbs +void +alts_cm_apr_cb( + IN ib_cm_apr_rec_t *p_cm_apr_rec ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_cm_apr_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +alts_cm_dreq_cb( + IN ib_cm_dreq_rec_t *p_cm_dreq_rec ) +{ + ib_api_status_t ib_status; + alts_cm_ca_obj_t *p_ca_obj; + ib_cm_drep_t drep; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT( p_cm_dreq_rec ); + + p_ca_obj = (alts_cm_ca_obj_t* __ptr64)p_cm_dreq_rec->qp_context; + CL_ASSERT( p_ca_obj ); + + p_ca_obj->cm_cbs++; // count crows + + // send a drep + cl_memclr(&drep, sizeof(ib_cm_drep_t)); + + ib_status = ib_cm_drep(p_cm_dreq_rec->h_cm_dreq,&drep); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +alts_cm_rep_cb( + IN ib_cm_rep_rec_t *p_cm_rep_rec ) +{ + ib_api_status_t ib_status; + alts_cm_ca_obj_t *p_ca_obj; + ib_cm_rtu_t *p_cm_rtu; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT( p_cm_rep_rec ); + + if(( p_cm_rep_rec->qp_type == IB_QPT_RELIABLE_CONN ) || + ( p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_CONN )) + { + p_ca_obj = (alts_cm_ca_obj_t* __ptr64)p_cm_rep_rec->qp_context; + CL_ASSERT( p_ca_obj ); + + p_ca_obj->cm_cbs++; // count crows + + p_cm_rtu = &p_ca_obj->rtu_src; + + cl_memclr( p_cm_rtu, sizeof(ib_cm_rtu_t) ); + + p_cm_rtu->access_ctrl = IB_AC_LOCAL_WRITE; + + if( p_ca_obj->rdma_enabled == TRUE ) + { + p_cm_rtu->access_ctrl |= IB_AC_RDMA_READ + IB_AC_RDMA_WRITE; + } + + if( p_ca_obj->p_ca_attr->modify_wr_depth ) + { + p_cm_rtu->sq_depth = 16; + p_cm_rtu->rq_depth = 16; + } + p_cm_rtu->pfn_cm_apr_cb = alts_cm_apr_cb; + p_cm_rtu->pfn_cm_dreq_cb = alts_cm_dreq_cb; + + ib_status = ib_cm_rtu( p_cm_rep_rec->h_cm_rep, p_cm_rtu ); + + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("ib_cm_rtu returned %s\n", ib_get_err_str( ib_status )) ); + } + else + if ( p_cm_rep_rec->qp_type == IB_QPT_UNRELIABLE_DGRM ) + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("sidr rep in\n" + "\tstatus........:x%x\n" + "\tremote_qp.....:x%x\n" + "\tremote_qkey...:x%x\n", + p_cm_rep_rec->status, + p_cm_rep_rec->remote_qp, + p_cm_rep_rec->remote_qkey )); + + p_ca_obj = (alts_cm_ca_obj_t* __ptr64)p_cm_rep_rec->sidr_context; + CL_ASSERT( p_ca_obj ); + + p_ca_obj->cm_cbs++; // count crows + } + else + { + // + return; + } + + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + +void +alts_cm_rej_cb( + IN ib_cm_rej_rec_t *p_cm_rej_rec ) +{ + alts_cm_ca_obj_t *p_ca_obj; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + p_ca_obj = (alts_cm_ca_obj_t* __ptr64)p_cm_rej_rec->qp_context; + + // only use context if qp was set up + if( p_ca_obj ) + { + p_ca_obj->cm_errs++; // count crows + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +alts_cm_mra_cb( + IN ib_cm_mra_rec_t *p_cm_mra_rec ) +{ + alts_cm_ca_obj_t *p_ca_obj; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + p_ca_obj = (alts_cm_ca_obj_t* __ptr64)p_cm_mra_rec->qp_context; + CL_ASSERT( p_ca_obj ); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +alts_cm_err_cb( + IN ib_listen_err_rec_t *p_err_rec ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + +void +alts_cm_rtu_cb( + IN ib_cm_rtu_rec_t *p_cm_rtu_rec ) +{ + alts_cm_ca_obj_t *p_ca_obj; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT( p_cm_rtu_rec ); + + p_ca_obj = (alts_cm_ca_obj_t* __ptr64)p_cm_rtu_rec->qp_context; + CL_ASSERT( p_ca_obj ); + + p_ca_obj->cm_cbs++; // count crows + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +alts_cm_lap_cb( + IN ib_cm_lap_rec_t *p_cm_lap_rec ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_cm_lap_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +alts_cm_req_cb( + IN ib_cm_req_rec_t *p_cm_req_rec ) +{ + ib_api_status_t ib_status; + alts_cm_ca_obj_t *p_ca_obj; + ib_cm_rep_t *p_cm_rep; + ib_cm_mra_t cm_mra; + ib_cm_rej_t cm_rej; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT( p_cm_req_rec ); + + p_ca_obj = (alts_cm_ca_obj_t* __ptr64)p_cm_req_rec->context; + + CL_ASSERT( p_ca_obj ); + + if ( p_cm_req_rec->qp_type == IB_QPT_RELIABLE_CONN ) + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("rc connect request in\n")); + } + else + if ( p_cm_req_rec->qp_type == IB_QPT_UNRELIABLE_DGRM ) + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("sidr connect request in\n")); + } + else + if ( p_cm_req_rec->qp_type == IB_QPT_UNRELIABLE_CONN ) + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("unreliable connect request in\n")); + } + else + { + return; + } + + if( p_ca_obj->rej_test ) + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("rejecting request\n")); + cl_memclr( &cm_rej, sizeof( ib_cm_rej_t ) ); + cm_rej.rej_status = IB_REJ_USER_DEFINED; + ib_cm_rej( p_cm_req_rec->h_cm_req, &cm_rej ); + return; + } + + /* initiate handoff process */ + if( p_ca_obj->handoff == TRUE ) + { + /* set it to false to stop all other transactions that happen + in the same cb */ + p_ca_obj->handoff = FALSE; + + ib_status = ib_cm_handoff( p_cm_req_rec->h_cm_req, + p_ca_obj->handoff_svc_id ); + if( ib_status != IB_SUCCESS ) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("handoff failed with %s!\n", ib_get_err_str(ib_status)) ); + } + else + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("CM handoff successful\n") ); + } + return; + } + + + p_ca_obj->cm_cbs++; // count crows + + p_cm_rep = &p_ca_obj->rep_dest; + cl_memclr( p_cm_rep, sizeof(ib_cm_rep_t)); + + p_ca_obj->h_cm_req = p_cm_req_rec->h_cm_req; + p_cm_rep->qp_type = p_cm_req_rec->qp_type; + p_cm_rep->h_qp = p_ca_obj->h_qp[DEST_QP]; + + // class specific + if (( p_cm_req_rec->qp_type == IB_QPT_RELIABLE_CONN ) || + ( p_cm_req_rec->qp_type == IB_QPT_UNRELIABLE_CONN )) + { + // rc, uc & rd + p_cm_rep->access_ctrl = IB_AC_LOCAL_WRITE; // | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE; + + /* Verify that the CA supports modify_wr_depth after QP creation. */ + if( p_ca_obj->p_ca_attr->modify_wr_depth ) + { + p_cm_rep->sq_depth = p_ca_obj->num_wrs; + p_cm_rep->rq_depth = p_ca_obj->num_wrs; + } + + p_cm_rep->init_depth = 1; + p_cm_rep->target_ack_delay = 10; + p_cm_rep->failover_accepted = IB_FAILOVER_ACCEPT_UNSUPPORTED; + p_cm_rep->flow_ctrl = TRUE; + p_cm_rep->rnr_nak_timeout = 7; + p_cm_rep->rnr_retry_cnt = 7; + p_cm_rep->pfn_cm_rej_cb = alts_cm_rej_cb; + p_cm_rep->pfn_cm_mra_cb = alts_cm_mra_cb; + p_cm_rep->pfn_cm_rtu_cb = alts_cm_rtu_cb; + p_cm_rep->pfn_cm_lap_cb = alts_cm_lap_cb; + p_cm_rep->pfn_cm_dreq_cb = alts_cm_dreq_cb; + + if( p_ca_obj->mra_test == TRUE ) + { + // send a MRA to test + cm_mra.mra_length = 0; + cm_mra.p_mra_pdata = NULL; + cm_mra.svc_timeout = 21; // equals 8.5 sec wait + packet lifetime + + ib_status = ib_cm_mra( p_cm_req_rec->h_cm_req, &cm_mra ); + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("ib_cm_mra returned %s\n", ib_get_err_str( ib_status )) ); + } + else + { + ib_status = ib_cm_rep( p_cm_req_rec->h_cm_req, p_cm_rep ); + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("ib_cm_rep returned %s\n", ib_get_err_str( ib_status )) ); + } + } + else + { + // ud + if( p_cm_req_rec->pkey != p_ca_obj->p_dest_port_attr->p_pkey_table[0]) + p_cm_rep->status = IB_SIDR_REJECT; + else + p_cm_rep->status = IB_SIDR_SUCCESS; + + ib_status = ib_cm_rep( p_cm_req_rec->h_cm_req, p_cm_rep ); + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +ib_api_status_t +alts_cm_client_server( + alts_cm_ca_obj_t *p_ca_obj ) +{ + ib_api_status_t ib_status = IB_ERROR; + ib_cm_req_t *p_req_server, *p_req_client; + ib_path_rec_t *p_path_server, *p_path_client; + ib_cm_listen_t *p_listen; + cl_status_t cl_status; + cl_thread_t mra_thread; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if( p_ca_obj->mra_test == TRUE ) + { + // create a thread to process MRA + cl_status = cl_event_init( &p_ca_obj->mra_event, TRUE ); + if( cl_status != CL_SUCCESS ) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("cl_event_init failed !\n") ); + return IB_ERROR; + } + cl_memclr( &mra_thread, sizeof(cl_thread_t) ); + cl_status = cl_thread_init( &mra_thread, __mra_thread, p_ca_obj, "cm_altsTH" ); + if( cl_status != CL_SUCCESS ) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("cl_thread_init failed !\n") ); + return IB_ERROR; + } + } + + // setup data pointers + p_req_server = &p_ca_obj->req_dest; + p_req_client = &p_ca_obj->req_src; + + p_path_server = &p_ca_obj->path_dest; + p_path_client = &p_ca_obj->path_src; + + p_listen = &p_ca_obj->listen; + + p_ca_obj->cm_cbs = 0; + + // setup server + p_req_server->h_qp = p_ca_obj->h_qp[DEST_QP]; + + cl_memclr( p_listen, sizeof(ib_cm_listen_t) ); + + p_listen->qp_type = p_ca_obj->test_type; + p_listen->svc_id = 1; + p_listen->ca_guid = p_ca_obj->p_ca_attr->ca_guid; + p_listen->port_guid = p_ca_obj->p_dest_port_attr->port_guid; + p_listen->lid = p_ca_obj->dlid; + p_listen->pkey = p_ca_obj->p_dest_port_attr->p_pkey_table[0]; + p_listen->pfn_cm_req_cb = alts_cm_req_cb; + + ib_status = ib_cm_listen(h_al, p_listen, alts_cm_err_cb, + p_ca_obj, &p_ca_obj->h_cm_listen ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_cm_listen failed with status = %d\n", ib_status) ); + goto cm_end; + } + + // setup handoff server if requested + if( p_ca_obj->handoff == TRUE ) + { + p_listen->svc_id = 2; + p_ca_obj->handoff_svc_id = 2; + + ib_status = ib_cm_listen(h_al, p_listen, alts_cm_err_cb, + p_ca_obj, &p_ca_obj->h_cm_listen_handoff ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_cm_listen failed for handoff with status = %d\n", + ib_status) ); + goto cm_end; + } + } + + // setup client + cl_memclr( p_path_client, sizeof(ib_path_rec_t) ); + p_path_client->sgid.unicast.interface_id = + p_ca_obj->p_src_port_attr->p_gid_table->unicast.interface_id; + p_path_client->sgid.unicast.prefix = + p_ca_obj->p_src_port_attr->p_gid_table->unicast.prefix; + + p_path_client->dgid.unicast.interface_id = + p_ca_obj->p_dest_port_attr->p_gid_table->unicast.interface_id; + p_path_client->dgid.unicast.prefix = + p_ca_obj->p_dest_port_attr->p_gid_table->unicast.prefix; + + p_path_client->slid = p_ca_obj->slid; + p_path_client->dlid = p_ca_obj->dlid; + p_path_client->num_path = 1; + p_path_client->pkey = p_ca_obj->p_src_port_attr->p_pkey_table[0]; + p_path_client->mtu = IB_MTU_LEN_256; + p_path_client->pkt_life = 10; + + cl_memclr( p_req_client, sizeof(ib_cm_req_t) ); + + p_req_client->qp_type = p_ca_obj->test_type; + p_req_client->svc_id = 1; + + p_req_client->max_cm_retries = 3; + p_req_client->p_primary_path = p_path_client; + p_req_client->pfn_cm_rep_cb = alts_cm_rep_cb; + + if( p_req_client->qp_type == IB_QPT_UNRELIABLE_DGRM ) + { + p_req_client->h_al = h_al; + p_req_client->sidr_context = p_ca_obj; + p_req_client->timeout_ms = 1000; /* 1 sec */ + p_req_client->pkey = p_ca_obj->p_dest_port_attr->p_pkey_table[0]; + } + else + { + p_req_client->resp_res = 3; + p_req_client->init_depth = 1; + p_req_client->remote_resp_timeout = 11; + p_req_client->retry_cnt = 3; + p_req_client->rnr_nak_timeout = 7; + p_req_client->rnr_retry_cnt = 7; + p_req_client->pfn_cm_rej_cb = alts_cm_rej_cb; + p_req_client->pfn_cm_mra_cb = alts_cm_mra_cb; + p_req_client->h_qp = p_ca_obj->h_qp[SRC_QP]; + p_req_client->local_resp_timeout = 12; + } + + ib_status = ib_cm_req(p_req_client); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_cm_req failed with status = %d\n", ib_status) ); + goto cm_end; + } + + if( p_ca_obj->mra_test == TRUE ) + cl_thread_suspend( 10000 ); + else + cl_thread_suspend( 3000 ); + + + switch( p_req_client->qp_type ) + { + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + if( p_ca_obj->rej_test == TRUE ) + { + if (!p_ca_obj->cm_errs) + ib_status = IB_ERROR; + } + else if( p_ca_obj->cm_cbs != 3 ) + ib_status = IB_ERROR; + break; + + case IB_QPT_UNRELIABLE_DGRM: + if( p_ca_obj->cm_cbs != 2 ) + ib_status = IB_ERROR; + break; + + default: + ib_status = IB_ERROR; + break; + } + + if( ib_status == IB_SUCCESS ) + { + // query QPs + ib_status = ib_query_qp(p_ca_obj->h_qp[SRC_QP], + &p_ca_obj->qp_attr[SRC_QP]); + ib_status = ib_query_qp(p_ca_obj->h_qp[DEST_QP], + &p_ca_obj->qp_attr[DEST_QP]); + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("Src qp_state(%d) dest_qp(x%x) dlid(x%x)\n", + p_ca_obj->qp_attr[SRC_QP].state, + p_ca_obj->qp_attr[SRC_QP].dest_num, + p_ca_obj->qp_attr[SRC_QP].primary_av.dlid ) ); + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("Dest qp_state(%d) dest_qp(%x) dlid(x%x)\n", + p_ca_obj->qp_attr[DEST_QP].state, + p_ca_obj->qp_attr[DEST_QP].dest_num, + p_ca_obj->qp_attr[DEST_QP].primary_av.dlid ) ); + + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("Src sq_psn(x%x) rq_psn(x%x)\n" + "Dest sq_psn(x%x) rq_psn(x%x)\n", + p_ca_obj->qp_attr[SRC_QP].sq_psn, p_ca_obj->qp_attr[SRC_QP].rq_psn, + p_ca_obj->qp_attr[DEST_QP].sq_psn, + p_ca_obj->qp_attr[DEST_QP].rq_psn ) ); + + // return status + ib_status = IB_SUCCESS; + } + + +cm_end: + if( p_ca_obj->mra_test == TRUE ) + { + cl_thread_destroy( &mra_thread ); + cl_event_destroy( &p_ca_obj->mra_event ); + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +void +alts_cm_drep_cb( + IN ib_cm_drep_rec_t *p_cm_drep_rec ) +{ + alts_cm_ca_obj_t *p_ca_obj; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT( p_cm_drep_rec ); + + p_ca_obj = (alts_cm_ca_obj_t* __ptr64)p_cm_drep_rec->qp_context; + CL_ASSERT( p_ca_obj ); + + p_ca_obj->cm_cbs++; // count crows + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +alts_listen_destroy_cb( + IN void *context ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT( context ); + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +ib_api_status_t +alts_cm_destroy( + alts_cm_ca_obj_t *p_ca_obj ) +{ + ib_api_status_t ib_status = IB_ERROR; + ib_cm_dreq_t *p_dreq_client; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + p_ca_obj->cm_cbs = 0; + + // only dreq for connected types + if( p_ca_obj->test_type != IB_QPT_UNRELIABLE_DGRM ) + { + // setup data pointers + p_dreq_client = &p_ca_obj->dreq_src; + + cl_memclr(p_dreq_client, sizeof(ib_cm_dreq_t)); + p_dreq_client->h_qp = p_ca_obj->h_qp[SRC_QP]; + p_dreq_client->pfn_cm_drep_cb = alts_cm_drep_cb; + + ib_status = ib_cm_dreq(p_dreq_client); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_cm_dreq failed with status = %d\n", ib_status) ); + goto cm_destroy_end; + } + + cl_thread_suspend( 1000 ); + + if (p_ca_obj->cm_cbs) + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("ib_cm_dreq successful\n") ); + } + + p_ca_obj->cm_cbs = 0; + } + + ib_status = ib_cm_cancel(p_ca_obj->h_cm_listen, alts_listen_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_cm_cancel failed with status = %d\n", ib_status) ); + } + +cm_destroy_end: + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_cm_rc_tests ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_cm_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_cm_ca_obj_t*)cl_zalloc(sizeof(alts_cm_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_cm_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->test_type = IB_QPT_RELIABLE_CONN; + p_ca_obj->pfn_comp_cb = cm_rc_cq_comp_cb; // set your cq handler + + p_ca_obj->reply_requested = TRUE; + + /* + * get an active port + */ + ib_status = alts_cm_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + + /* + * Create the necessary resource PD/QP/QP + */ + p_ca_obj->num_cq = 2; + + ib_status = alts_create_test_resources( p_ca_obj ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_create_test_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_cm_client_server(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_client_server failed with status %d\n", ib_status)); + break; + } + + // run the test + ib_status = alts_rc_message_passing(p_ca_obj, IB_QPT_RELIABLE_CONN); + + cl_thread_suspend(1000); /* 1 sec */ + + // destroy connection + ib_status = alts_cm_destroy(p_ca_obj); + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_cm_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + + +ib_api_status_t +alts_cm_rc_rej_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_cm_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_cm_ca_obj_t*)cl_zalloc(sizeof(alts_cm_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_cm_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->test_type = IB_QPT_RELIABLE_CONN; + p_ca_obj->pfn_comp_cb = cm_rc_cq_comp_cb; // set your cq handler + + p_ca_obj->reply_requested = TRUE; + + p_ca_obj->rej_test = TRUE; + + /* + * get an active port + */ + ib_status = alts_cm_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = alts_create_test_resources( p_ca_obj ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_create_test_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_cm_client_server(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_client_server failed with status %d\n", ib_status)); + break; + } + + // destroy connection + //ib_status = alts_cm_destroy(p_ca_obj); + ib_status = ib_cm_cancel(p_ca_obj->h_cm_listen, alts_listen_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_cm_cancel failed with status = %d\n", ib_status) ); + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_cm_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + + +ib_api_status_t +alts_cm_handoff_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_cm_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_cm_ca_obj_t*)cl_zalloc(sizeof(alts_cm_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_cm_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->test_type = IB_QPT_RELIABLE_CONN; + p_ca_obj->pfn_comp_cb = cm_rc_cq_comp_cb; // set your cq handler + + p_ca_obj->reply_requested = TRUE; + + p_ca_obj->rej_test = FALSE; + + /* + * get an active port + */ + ib_status = alts_cm_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = alts_create_test_resources( p_ca_obj ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_create_test_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Create handoff service + */ + p_ca_obj->handoff = TRUE; + + + /* + * Start Message passing activity + */ + ib_status = alts_cm_client_server(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_client_server failed with status %d\n", ib_status)); + break; + } + + // destroy connection + ib_status = alts_cm_destroy(p_ca_obj); + + // destroy handoff listen + if( p_ca_obj->h_cm_listen_handoff ) + { + ib_status = ib_cm_cancel(p_ca_obj->h_cm_listen_handoff, + alts_listen_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_cm_cancel failed with status = %d\n", ib_status) ); + } + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_cm_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_cm_rc_flush_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_cm_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_cm_ca_obj_t*)cl_zalloc(sizeof(alts_cm_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_cm_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 4; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->test_type = IB_QPT_RELIABLE_CONN; + p_ca_obj->pfn_comp_cb = cm_rc_cq_comp_cb; // set your cq handler + + p_ca_obj->reply_requested = TRUE; + + /* + * get an active port + */ + ib_status = alts_cm_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + p_ca_obj->num_cq = 2; + + ib_status = alts_create_test_resources( p_ca_obj ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_create_test_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_cm_client_server(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_client_server failed with status %d\n", ib_status)); + break; + } + + // run the test + ib_status = alts_rc_message_passing(p_ca_obj, IB_QPT_RELIABLE_CONN); + + cl_thread_suspend(3000); /* 1 sec */ + + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE); + if (p_ca_obj->cq_done == 8) + rc_multisend_post_recvs( p_ca_obj, 0, p_ca_obj->num_wrs ); + + // destroy connection + ib_status = alts_cm_destroy(p_ca_obj); + + cl_thread_suspend(4000); /* 1 sec */ + + /* force a cq completion callback to overcome interrupt issue */ + /* Intel Gen1 hardware does not generate an interrupt cb for a + qp set to error state */ + /* + if(p_ca_obj->cq_done == 8) + rc_cm_cq_comp_cb(p_ca_obj); + */ + + if (p_ca_obj->cq_done == 12) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_cm_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_cm_activate_qp( + alts_cm_ca_obj_t *p_ca_obj, + ib_qp_handle_t h_qp + ) +{ + + ib_qp_mod_t qp_mod_attr = {0}; + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if(p_ca_obj->is_src == 1) + qp_mod_attr.state.init.primary_port = p_ca_obj->src_port_num; + else + qp_mod_attr.state.init.primary_port = p_ca_obj->dest_port_num; + + qp_mod_attr.state.init.qkey = p_ca_obj->qkey; + qp_mod_attr.state.init.pkey_index = 0x0; + qp_mod_attr.state.init.access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_MW_BIND; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******** port num = %d ***************\n", + qp_mod_attr.state.init.primary_port)); + + qp_mod_attr.req_state = IB_QPS_INIT; + ib_status = ib_modify_qp(h_qp, &qp_mod_attr); + + CL_ASSERT(ib_status == IB_SUCCESS); + + // Time to query the QP + if(p_ca_obj->is_src == 1) + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[SRC_QP]); + CL_ASSERT(ib_status == IB_SUCCESS); + } + else + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[DEST_QP]); + CL_ASSERT(ib_status == IB_SUCCESS); + } + + + // transition to RTR + cl_memclr(&qp_mod_attr, sizeof(ib_qp_mod_t)); + + qp_mod_attr.state.rtr.opts = 0; + qp_mod_attr.state.rtr.rq_psn = CL_NTOH32(0x00000001); + + switch ( p_ca_obj->test_type ) + { + case IB_QPT_RELIABLE_CONN: + case IB_QPT_UNRELIABLE_CONN: + qp_mod_attr.state.rtr.opts = IB_MOD_QP_PRIMARY_AV; + break; + default: + break; + } + + if (p_ca_obj->is_src == 1) + { + if (p_ca_obj->is_loopback == TRUE) + { + qp_mod_attr.state.rtr.dest_qp = p_ca_obj->qp_attr[SRC_QP].num; + qp_mod_attr.state.rtr.primary_av.port_num = p_ca_obj->src_port_num; + qp_mod_attr.state.rtr.primary_av.dlid = p_ca_obj->slid; + } + else + { + qp_mod_attr.state.rtr.dest_qp = p_ca_obj->qp_attr[DEST_QP].num; + qp_mod_attr.state.rtr.primary_av.port_num = p_ca_obj->src_port_num; + qp_mod_attr.state.rtr.primary_av.dlid = p_ca_obj->dlid; + } + } + else + { + qp_mod_attr.state.rtr.dest_qp = p_ca_obj->qp_attr[SRC_QP].num; + qp_mod_attr.state.rtr.primary_av.port_num = p_ca_obj->dest_port_num; + qp_mod_attr.state.rtr.primary_av.dlid = p_ca_obj->slid; + } + + qp_mod_attr.state.rtr.primary_av.sl = 0; + qp_mod_attr.state.rtr.primary_av.grh_valid = 0; //Set to false + + qp_mod_attr.state.rtr.primary_av.static_rate = IB_PATH_RECORD_RATE_10_GBS; + qp_mod_attr.state.rtr.primary_av.path_bits = 0; + + qp_mod_attr.state.rtr.primary_av.conn.path_mtu = 1; + qp_mod_attr.state.rtr.primary_av.conn.local_ack_timeout = 7; + qp_mod_attr.state.rtr.primary_av.conn.seq_err_retry_cnt = 7; + qp_mod_attr.state.rtr.primary_av.conn.rnr_retry_cnt = 7; + qp_mod_attr.state.rtr.rq_psn = CL_NTOH32(0x00000001); + qp_mod_attr.state.rtr.resp_res = 7; //32; + qp_mod_attr.state.rtr.rnr_nak_timeout = 7; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("****RTR***** dlid = x%x (x%x) *port_num = %d *dest_qp = %d ***\n", + qp_mod_attr.state.rtr.primary_av.dlid, + CL_NTOH16(qp_mod_attr.state.rtr.primary_av.dlid), + qp_mod_attr.state.rtr.primary_av.port_num, + CL_NTOH32(qp_mod_attr.state.rtr.dest_qp) )); + + qp_mod_attr.req_state = IB_QPS_RTR; + ib_status = ib_modify_qp(h_qp, &qp_mod_attr); + + CL_ASSERT(ib_status == IB_SUCCESS); + + if(p_ca_obj->is_src == 1) + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[SRC_QP]); + } + else + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[DEST_QP]); + } + + cl_memclr(&qp_mod_attr, sizeof(ib_qp_mod_t)); + + qp_mod_attr.state.rts.sq_psn = CL_NTOH32(0x00000001); + + // NOTENOTE: Confirm the below time out settings + qp_mod_attr.state.rts.retry_cnt = 7; + qp_mod_attr.state.rts.rnr_retry_cnt = 7; + qp_mod_attr.state.rts.rnr_nak_timeout = 7; + qp_mod_attr.state.rts.local_ack_timeout = 7; + qp_mod_attr.state.rts.init_depth = 3; //3; + + qp_mod_attr.req_state = IB_QPS_RTS; + ib_status = ib_modify_qp(h_qp, &qp_mod_attr); + + CL_ASSERT(ib_status == IB_SUCCESS); + + if(p_ca_obj->is_src == 1) + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[SRC_QP]); + CL_ASSERT(ib_status == IB_SUCCESS); + } + else + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[DEST_QP]); + CL_ASSERT(ib_status == IB_SUCCESS); + } + + if (p_ca_obj->is_loopback == TRUE) + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[DEST_QP]); + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_SUCCESS; +} + + +ib_api_status_t +alts_rc_no_cm_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_cm_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_cm_ca_obj_t*)cl_zalloc(sizeof(alts_cm_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_cm_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->test_type = IB_QPT_RELIABLE_CONN; + p_ca_obj->pfn_comp_cb = cm_rc_cq_comp_cb; // set your cq handler + + p_ca_obj->reply_requested = TRUE; + + /* + * get an active port + */ + ib_status = alts_cm_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + + /* + * Create the necessary resource PD/QP/QP + */ + p_ca_obj->num_cq = 2; + + ib_status = alts_create_test_resources( p_ca_obj ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_create_test_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Time to Activate the QP + */ + p_ca_obj->is_src = 1; + ib_status = alts_cm_activate_qp(p_ca_obj, p_ca_obj->h_qp[SRC_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + p_ca_obj->is_src = 0; + ib_status = alts_cm_activate_qp(p_ca_obj, p_ca_obj->h_qp[DEST_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + + // run the test + ib_status = alts_rc_message_passing(p_ca_obj, IB_QPT_RELIABLE_CONN); + + cl_thread_suspend(3000); /* 1 sec */ + + // destroy connection + ib_status = alts_cm_destroy(p_ca_obj); + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_cm_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + + + +ib_api_status_t +rc_rdma_post_recvs( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts ) +{ + ib_recv_wr_t *p_r_wr, *p_failure_wr; + uint32_t msg_size, i; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + msg_size = 64; + //msg_size = p_ca_obj->msg_size; + + p_r_wr = p_ca_obj->p_recv_wr; + + p_r_wr->p_next = NULL; + p_r_wr->ds_array[0].length = msg_size; + p_r_wr->num_ds = 1; + + // post on recv and send side + for (i = 0; i < num_posts; i++) + { + p_r_wr->ds_array[0].vaddr = + (uintn_t)p_ca_obj->mem_region[i+reg_index].buffer; + p_r_wr->ds_array[0].lkey = p_ca_obj->mem_region[i+reg_index].lkey; + + p_r_wr->wr_id = i+reg_index; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d)*****\n", + (void*)(uintn_t)p_r_wr->ds_array[0].vaddr, + p_r_wr->ds_array[0].lkey, + p_r_wr->ds_array[0].length)); + + ib_status = ib_post_recv( + p_ca_obj->h_qp[DEST_QP], + p_r_wr, + &p_failure_wr); + + + p_r_wr->ds_array[0].vaddr = (uintn_t) + p_ca_obj->mem_region[i+reg_index+num_posts].buffer; + p_r_wr->ds_array[0].lkey = + p_ca_obj->mem_region[i+reg_index+num_posts].lkey; + + p_r_wr->wr_id = i+reg_index+num_posts; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d)*****\n", + (void*)(uintn_t)p_r_wr->ds_array[0].vaddr, + p_r_wr->ds_array[0].lkey, + p_r_wr->ds_array[0].length)); + + ib_status = ib_post_recv( + p_ca_obj->h_qp[SRC_QP], + p_r_wr, + &p_failure_wr); + + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +rc_rdma_post_sends( + alts_cm_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts, + uint32_t rdma_index, + char rdma_type ) +{ + ib_send_wr_t *p_s_wr, *p_send_failure_wr; + uint32_t msg_size, i; + ib_api_status_t ib_status = IB_SUCCESS; + alts_rdma_t *p_rdma_req; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + msg_size = 64; + //msg_size = p_ca_obj->msg_size; + + p_s_wr = p_ca_obj->p_send_wr; + + p_s_wr->p_next = NULL; + p_s_wr->ds_array[0].length = msg_size; + p_s_wr->num_ds = 1; + + p_s_wr->wr_type = WR_SEND; + + p_s_wr->send_opt = IB_SEND_OPT_IMMEDIATE | \ + IB_SEND_OPT_SOLICITED ; + + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED | + IB_SEND_OPT_SOLICITED ; + + + for (i = 0; i < num_posts; i++) + { + p_rdma_req = (alts_rdma_t*)p_ca_obj->mem_region[i+reg_index].buffer; + p_rdma_req->msg_type = rdma_type; // write or read + + p_rdma_req->vaddr = (uintn_t)(p_ca_obj->mem_region[i+rdma_index].buffer); + p_rdma_req->rkey = p_ca_obj->mem_region[i+rdma_index].rkey; + sprintf((char *)p_rdma_req->msg,"hello %d", i); + + p_s_wr->ds_array[0].vaddr = + (uintn_t)p_ca_obj->mem_region[i+reg_index].buffer; + p_s_wr->ds_array[0].lkey = p_ca_obj->mem_region[i+reg_index].lkey; + + p_s_wr->wr_id = i+reg_index; + p_s_wr->immediate_data = 0xfeedde00 + i; + + p_s_wr->remote_ops.vaddr = 0; + p_s_wr->remote_ops.rkey = 0; + + //p_s_wr->dgrm.ud.h_av + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d)*****\n", + (void*)(uintn_t)p_s_wr->ds_array[0].vaddr, + p_s_wr->ds_array[0].lkey, + p_s_wr->ds_array[0].length)); + + ib_status = ib_post_send( + p_ca_obj->h_qp[SRC_QP], + p_s_wr, + &p_send_failure_wr); + + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_rc_rdma_message_passing( + alts_cm_ca_obj_t *p_ca_obj, + ib_qp_type_t qp_type) +{ + uint32_t i, j, k; + ib_api_status_t ib_status = IB_SUCCESS; + ib_wc_t *p_free_wclist; + ib_wc_t *p_done_cl; + uint32_t id; + char *buff; + alts_rdma_t *p_rdma_req; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + p_ca_obj->wr_send_size = sizeof(ib_send_wr_t) + \ + (sizeof(ib_local_ds_t) * p_ca_obj->ds_list_depth); + p_ca_obj->wr_recv_size = sizeof(ib_recv_wr_t) + \ + (sizeof(ib_local_ds_t) * p_ca_obj->ds_list_depth); + + p_ca_obj->p_send_wr = &send_wr; + p_ca_obj->p_recv_wr = &recv_wr; + + p_ca_obj->p_send_wr->ds_array = &send_ds; + p_ca_obj->p_recv_wr->ds_array = &recv_ds; + + // receive + for (i=0; i < p_ca_obj->num_wrs * 2; i++) + { + ib_status = alts_rc_register_mem( p_ca_obj, i, 4096); + + if ( ib_status != IB_SUCCESS ) + { + while( i-- ) + alts_rc_deregister_mem(p_ca_obj, i); + + return ib_status; + } + else + { + p_ca_obj->mem_region[i].my_lid = p_ca_obj->dlid; + } + } + + // send + for (k=0; k < p_ca_obj->num_wrs * 2; k++) + { + ib_status = + alts_rc_register_mem( p_ca_obj, k + (p_ca_obj->num_wrs * 2), 4096); + + if ( ib_status != IB_SUCCESS ) + { + while( k-- ) + alts_rc_deregister_mem(p_ca_obj, k + (p_ca_obj->num_wrs * 2) ); + + while( i-- ) + alts_rc_deregister_mem(p_ca_obj, i); + + return ib_status; + } + else + { + p_ca_obj->mem_region[k].my_lid = p_ca_obj->slid; + } + } + + p_ca_obj->cq_done = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("++++++ dlid(x%x) src_port(%d) ====\n", + p_ca_obj->dlid, p_ca_obj->src_port_num)); + + if(ib_status == IB_SUCCESS) + { + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE); + rc_rdma_post_recvs( p_ca_obj, 0, 1 ); // p_ca_obj->num_wrs + + if( p_ca_obj->num_cq > 1 ) + ib_status = ib_rearm_cq(p_ca_obj->h_cq_alt, FALSE); + else + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE); + + rc_rdma_post_sends( p_ca_obj, p_ca_obj->num_wrs * 2, 1, + p_ca_obj->num_wrs, 'W' ); + // send only one for now + //p_ca_obj->num_wrs ); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + + cl_thread_suspend(1000); // 10 seconds + + // check for rdma recv completion + p_rdma_req = (alts_rdma_t*)p_ca_obj->mem_region[p_ca_obj->num_wrs].buffer; + if( p_rdma_req->msg_type != 'C') // write completed + { + ib_status = IB_ERROR; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RDMA_Write failed\n")); + } + else + { + p_ca_obj->cq_done++; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RDMA_Write success\n")); + } + } + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + + cl_thread_suspend(3000); // 10 seconds + +//#if 0 + + if (!p_ca_obj->cq_done) + { + p_free_wclist = &free_wclist; + p_free_wclist->p_next = NULL; + p_done_cl = NULL; + j = 0; + + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wclist, &p_done_cl); + + while(p_done_cl) + { + /* + * print output + */ + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Got a completion:\n" + "\ttype....:%s\n" + "\twr_id...:%"PRIx64"\n" + "\tstatus..:%s\n", + ib_get_wc_type_str(p_done_cl->wc_type), + p_done_cl->wr_id, + ib_get_wc_status_str(p_done_cl->status))); + + if (p_done_cl->wc_type == IB_WC_RECV) + { + id = (uint32_t)p_done_cl->wr_id; + buff = (char *)p_ca_obj->mem_region[id].buffer; + if (qp_type == IB_QPT_UNRELIABLE_DGRM) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("---MSG--->%s\n",&buff[40])); + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvUD info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n" + "\tremote_qp..:x%x\n" + "\tpkey_index.:%d\n" + "\tremote_lid.:x%x\n" + "\tremote_sl..:x%x\n" + "\tpath_bits..:x%x\n" + "\tsrc_lid....:x%x\n", + p_done_cl->recv.ud.recv_opt, + p_done_cl->recv.ud.immediate_data, + CL_NTOH32(p_done_cl->recv.ud.remote_qp), + p_done_cl->recv.ud.pkey_index, + CL_NTOH16(p_done_cl->recv.ud.remote_lid), + p_done_cl->recv.ud.remote_sl, + p_done_cl->recv.ud.path_bits, + CL_NTOH16(p_ca_obj->mem_region[id].my_lid))); + } + else + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvRC info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n", + p_done_cl->recv.conn.recv_opt, + p_done_cl->recv.ud.immediate_data )); + } + + } + + p_free_wclist = p_done_cl; + p_free_wclist->p_next = NULL; + p_done_cl = NULL; + j++; + p_done_cl = NULL; + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wclist, &p_done_cl); + } + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ is = %d\n", j) ); + + p_ca_obj->cq_done += j; + + ib_status = IB_SUCCESS; + } + + + while( i-- ) + alts_rc_deregister_mem(p_ca_obj, i); + + while( k-- ) + alts_rc_deregister_mem(p_ca_obj, k + (p_ca_obj->num_wrs * 2)); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_cm_rc_rdma_tests ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_cm_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_cm_ca_obj_t*)cl_zalloc(sizeof(alts_cm_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_cm_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 2; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->test_type = IB_QPT_RELIABLE_CONN; + p_ca_obj->pfn_comp_cb = cm_rc_cq_comp_cb; // set your cq handler + + p_ca_obj->rdma_enabled = TRUE; + + p_ca_obj->reply_requested = TRUE; + + /* + * get an active port + */ + ib_status = alts_cm_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + + /* + * Create the necessary resource PD/QP/QP + */ + p_ca_obj->num_cq = 2; + + ib_status = alts_create_test_resources( p_ca_obj ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_create_test_resources() failed with status %d\n", ib_status)); + break; + } + + //cl_thread_suspend( 30000 ); + + /* + * Start Message passing activity + */ + ib_status = alts_cm_client_server(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_client_server failed with status %d\n", ib_status)); + break; + } + + // query qp_info + ib_status = ib_query_qp(p_ca_obj->h_qp[SRC_QP], + &p_ca_obj->qp_attr[SRC_QP]); + ib_status = ib_query_qp(p_ca_obj->h_qp[DEST_QP], + &p_ca_obj->qp_attr[DEST_QP]); + + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("SRC QP Info\n" + "\tstate.........:%d\n" + "\tpri_port......:%d\n" + "\tqp_num........:x%x\n" + "\tdest_qp_num...:x%x\n" + "\taccess_ctl....:x%x\n" + "\tsq_signalled..:%d\n" + "\tsq_psn........:x%x\n" + "\trq_psn........:x%x\n" + "\tresp_res......:x%x\n" + "\tinit_depth....:x%x\n" + "\tsq_depth......:x%x\n" + "\trq_depth......:x%x\n", + p_ca_obj->qp_attr[SRC_QP].state, + p_ca_obj->qp_attr[SRC_QP].primary_port, + p_ca_obj->qp_attr[SRC_QP].num, + p_ca_obj->qp_attr[SRC_QP].dest_num, + p_ca_obj->qp_attr[SRC_QP].access_ctrl, + p_ca_obj->qp_attr[SRC_QP].sq_signaled, + p_ca_obj->qp_attr[SRC_QP].sq_psn, + p_ca_obj->qp_attr[SRC_QP].rq_psn, + p_ca_obj->qp_attr[SRC_QP].resp_res, + p_ca_obj->qp_attr[SRC_QP].init_depth, + p_ca_obj->qp_attr[SRC_QP].sq_depth, + p_ca_obj->qp_attr[SRC_QP].rq_depth )); + + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("DEST QP Info\n" + "\tstate.........:%d\n" + "\tpri_port......:%d\n" + "\tqp_num........:x%x\n" + "\tdest_qp_num...:x%x\n" + "\taccess_ctl....:x%x\n" + "\tsq_signalled..:%d\n" + "\tsq_psn........:x%x\n" + "\trq_psn........:x%x\n" + "\tresp_res......:x%x\n" + "\tinit_depth....:x%x\n" + "\tsq_depth......:x%x\n" + "\trq_depth......:x%x\n", + p_ca_obj->qp_attr[DEST_QP].state, + p_ca_obj->qp_attr[DEST_QP].primary_port, + p_ca_obj->qp_attr[DEST_QP].num, + p_ca_obj->qp_attr[DEST_QP].dest_num, + p_ca_obj->qp_attr[DEST_QP].access_ctrl, + p_ca_obj->qp_attr[DEST_QP].sq_signaled, + p_ca_obj->qp_attr[DEST_QP].sq_psn, + p_ca_obj->qp_attr[DEST_QP].rq_psn, + p_ca_obj->qp_attr[DEST_QP].resp_res, + p_ca_obj->qp_attr[DEST_QP].init_depth, + p_ca_obj->qp_attr[DEST_QP].sq_depth, + p_ca_obj->qp_attr[DEST_QP].rq_depth )); + + //cl_thread_suspend( 30000 ); + + // run the test + ib_status = alts_rc_rdma_message_passing(p_ca_obj, IB_QPT_RELIABLE_CONN); + + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("sleep for 3 seconds...\n" )); + cl_thread_suspend(3000); /* 1 sec */ + + // destroy connection + ib_status = alts_cm_destroy(p_ca_obj); + + if (p_ca_obj->cq_done >= 4) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_cm_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + + +void +__mra_thread( + IN void* context ) +{ + ib_api_status_t ib_status; + alts_cm_ca_obj_t *p_ca_obj; + ib_cm_rep_t *p_cm_rep; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + p_ca_obj = (alts_cm_ca_obj_t*)context; + + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("mra_event sleep (30 secs)...\n") ); + + cl_event_wait_on( &p_ca_obj->mra_event, 8 *1000 * 1000, TRUE ); + + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("mra_event triggered...\n") ); + + p_cm_rep = &p_ca_obj->rep_dest; + + ib_status = ib_cm_rep( p_ca_obj->h_cm_req, p_cm_rep ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + +ib_api_status_t +alts_rc_mra_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_cm_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_cm_ca_obj_t*)cl_zalloc(sizeof(alts_cm_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_cm_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->test_type = IB_QPT_RELIABLE_CONN; + p_ca_obj->pfn_comp_cb = cm_rc_cq_comp_cb; // set your cq handler + + p_ca_obj->reply_requested = TRUE; + + p_ca_obj->mra_test = TRUE; + + /* + * get an active port + */ + ib_status = alts_cm_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + + /* + * Create the necessary resource PD/QP/QP + */ + p_ca_obj->num_cq = 2; + + ib_status = alts_create_test_resources( p_ca_obj ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_create_test_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_cm_client_server(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_client_server failed with status %d\n", ib_status)); + break; + } + + // run the test + ib_status = alts_rc_message_passing(p_ca_obj, IB_QPT_RELIABLE_CONN); + + cl_thread_suspend(1000); /* 1 sec */ + + // destroy connection + ib_status = alts_cm_destroy(p_ca_obj); + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_cm_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +alts_cm_uc_test ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_cm_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_cm_ca_obj_t*)cl_zalloc(sizeof(alts_cm_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_cm_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->test_type = IB_QPT_UNRELIABLE_CONN; + p_ca_obj->pfn_comp_cb = cm_rc_cq_comp_cb; // set your cq handler + + p_ca_obj->reply_requested = TRUE; + + /* + * get an active port + */ + ib_status = alts_cm_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + + /* + * Create the necessary resource PD/QP/QP + */ + p_ca_obj->num_cq = 2; + + ib_status = alts_create_test_resources( p_ca_obj ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_create_test_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_cm_client_server(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_client_server failed with status %d\n", ib_status)); + break; + } + + // run the test + ib_status = alts_rc_message_passing(p_ca_obj, IB_QPT_RELIABLE_CONN); + + cl_thread_suspend(1000); /* 1 sec */ + + // destroy connection + ib_status = alts_cm_destroy(p_ca_obj); + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_cm_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +alts_cm_sidr_tests ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_cm_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_cm_ca_obj_t*)cl_zalloc(sizeof(alts_cm_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_cm_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0x1; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->test_type = IB_QPT_UNRELIABLE_DGRM; + p_ca_obj->pfn_comp_cb = cm_ud_cq_comp_cb; // set your cq handler + + p_ca_obj->reply_requested = TRUE; + + /* + * get an active port + */ + ib_status = alts_cm_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = alts_create_test_resources( p_ca_obj ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_create_test_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_cm_client_server(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_cm_client_server failed with status %d\n", ib_status)); + break; + } + + // run the test + //ib_status = alts_rc_message_passing(p_ca_obj,IB_QPT_UNRELIABLE_DGRM); + + cl_thread_suspend(1000); /* 1 sec */ + + // destroy connection + ib_status = alts_cm_destroy(p_ca_obj); + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_cm_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} diff --git a/branches/Ndi/tests/alts/createanddestroycq.c b/branches/Ndi/tests/alts/createanddestroycq.c new file mode 100644 index 00000000..778033c1 --- /dev/null +++ b/branches/Ndi/tests/alts/createanddestroycq.c @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include + + + + +/* + * Function prototypes + */ +ib_api_status_t +alts_cq( + boolean_t modify_cq_attr + ); + +/* + * Test Case CrateDestroyCQ + */ + + +ib_api_status_t +al_test_create_destroy_cq(void) +{ + boolean_t modify_cq_attr = FALSE; + + return alts_cq(modify_cq_attr); +} + +ib_api_status_t +al_test_query_modify_cq(void) +{ + boolean_t modify_cq_attr = TRUE; + + return alts_cq(modify_cq_attr); +} + + +/* Internal Functions */ + +ib_api_status_t +alts_cq( + boolean_t modify_cq_attr + ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_al_handle_t h_al = NULL; + ib_ca_handle_t h_ca; + ib_cq_handle_t h_cq = NULL; + cl_status_t cl_status; + ib_cq_create_t cq_create; + ib_pd_handle_t h_pd; + int iteration = 0; +#ifdef CL_KERNEL + cl_event_t cq_event; + + cl_event_construct( &cq_event ); +#endif + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + /* Open AL */ + ib_status = alts_open_al(&h_al); + + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_al); + + /* Open CA */ + ib_status = alts_open_ca(h_al,&h_ca); + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_ca); + + /* + * Allocate a PD required for CQ + */ + ib_status = ib_alloc_pd(h_ca, IB_PDT_NORMAL, NULL, &h_pd); //passing null context + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("\tib_alloc_pd failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + ALTS_PRINT( ALTS_DBG_ERROR, + ("\tib_pd handle = %p\n",h_pd)); + + /* 1st pass: event callback, 2nd pass: wait object. */ + do + { + iteration++; + + /* + * Initialize the CQ creation attributes. + */ + cl_memclr( &cq_create, sizeof( ib_cq_create_t ) ); + if( iteration == 1 ) + { + /* Event callback */ + cq_create.pfn_comp_cb = alts_cq_comp_cb; + } + else if( iteration == 2 ) +#ifdef CL_KERNEL + { + cl_status = cl_event_init( &cq_event, FALSE ); + if( cl_status != CL_SUCCESS ) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("cl_event_init returned status %s\n", + cl_status_text[cl_status]) ); + break; + } + cq_create.h_wait_obj = &cq_event; + } +#else + { + /* Wait Object */ + cl_status = + cl_waitobj_create( TRUE, &cq_create.h_wait_obj ); + if( cl_status != CL_SUCCESS ) + { + CL_PRINT( ALTS_DBG_ERROR, alts_dbg_lvl, + ("cl_create_wait_object failed status = 0x%x\n", + cl_status) ); + break; + } + } + else + { + /* Bogus wait object. */ + cq_create.h_wait_obj = (void*)(uintn_t)0xCDEF0000; + } +#endif + + cq_create.size = ALTS_CQ_SIZE; //Size of the CQ // NOTENOTE + + /* Create CQ here */ + ib_status = ib_create_cq(h_ca, &cq_create, + NULL,alts_cq_err_cb, &h_cq); + + /* Trap expected failure. */ + if( iteration == 3 && ib_status != IB_SUCCESS ) + break; + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_create_cq failed status = %s\n", + ib_get_err_str(ib_status)) ); + break; + } + CL_ASSERT(h_cq); + ALTS_PRINT( ALTS_DBG_INFO,\ + ("ib_create_cq successful size = 0x%x status = %s\n", + cq_create.size, ib_get_err_str(ib_status)) ); + + while( modify_cq_attr == TRUE ) + { + /* + * Query and Modify CQ ATTR + */ + uint32_t cq_size; + + ib_status = ib_query_cq(h_cq,&cq_size); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_cq failed status = %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + if(cq_size != cq_create.size) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_cq failed cq_size=0x%x cq_create.cq_size=0x%x\n", + cq_size,cq_create.size)); + ib_status = IB_INVALID_CQ_SIZE; + break; + } + + ALTS_PRINT( ALTS_DBG_INFO, + ("ib_query_cq cq_size = 0x%x\n", cq_size) ); + + cq_size = 0x90; + + ib_status = ib_modify_cq(h_cq,&cq_size); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_modify_cq failed status = %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + ALTS_PRINT( ALTS_DBG_INFO, + ("ib_modify_cq passed for cq_size = 0x%x\n", cq_size) ); + + break; //Break for the while + } + + ib_status = ib_destroy_cq(h_cq, alts_cq_destroy_cb); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_destroy_cq failed status = %s\n", + ib_get_err_str(ib_status)) ); + break; + } + ALTS_PRINT( ALTS_DBG_INFO,\ + ("ib_destroy_cq successful status = %s\n", + ib_get_err_str(ib_status))); + + +#ifdef CL_KERNEL + } while( iteration < 2 ); + + cl_event_destroy( &cq_event ); +#else + if( cq_create.h_wait_obj ) + { + cl_status = cl_waitobj_destroy( cq_create.h_wait_obj ); + if( cl_status != CL_SUCCESS ) + { + CL_PRINT( ALTS_DBG_ERROR, alts_dbg_lvl, + ("cl_destroy_wait_object failed status = 0x%x", + cl_status) ); + } + } + + } while( iteration < 3 ); +#endif + + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_dealloc_pd failed status = %s\n", + ib_get_err_str(ib_status)) ); + } + + alts_close_ca(h_ca); + + break; //End of while + } + + /* Close AL */ + if(h_al) + alts_close_al(h_al); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +void +alts_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( h_cq ); + UNUSED_PARAM( cq_context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + +void +alts_cq_err_cb( + ib_async_event_rec_t *p_err_rec ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +alts_cq_destroy_cb( + void *context + ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} diff --git a/branches/Ndi/tests/alts/createanddestroyqp.c b/branches/Ndi/tests/alts/createanddestroyqp.c new file mode 100644 index 00000000..0e7e9795 --- /dev/null +++ b/branches/Ndi/tests/alts/createanddestroyqp.c @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include + + + + +/* + * Function prototypes + */ +ib_api_status_t +alts_qp( + boolean_t modify_cq_attr + ); + + + +/* + * Test Case CrateDestroyQP + */ + + +ib_api_status_t +al_test_create_destroy_qp(void) +{ + boolean_t modify_qp_attr = FALSE; + + return alts_qp(modify_qp_attr); +} + +ib_api_status_t +al_test_query_modify_qp(void) +{ + boolean_t modify_qp_attr = TRUE; + + return alts_qp(modify_qp_attr); +} + + +/* Internal Functions */ + +ib_api_status_t +alts_qp( + boolean_t modify_cq_attr + ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_al_handle_t h_al = NULL; + ib_ca_handle_t h_ca; + ib_cq_handle_t h_cq = NULL; + ib_qp_handle_t h_qp = NULL; + ib_pd_handle_t h_pd; + cl_status_t cl_status; + ib_cq_create_t cq_create; + ib_qp_create_t qp_create; + uint32_t bsize; + ib_ca_attr_t *p_ca_attr = NULL; + ib_qp_attr_t p_qp_attr; + + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + /* Open AL */ + ib_status = alts_open_al(&h_al); + + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_al); + + /* Open CA */ + ib_status = alts_open_ca(h_al,&h_ca); + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_ca); + + /* + * Allocate a PD required for CQ + */ + ib_status = ib_alloc_pd(h_ca, IB_PDT_NORMAL, NULL, &h_pd); //passing null context + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("\tib_alloc_pd failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + ALTS_PRINT( ALTS_DBG_ERROR, + ("\tib_pd handle = %p\n",h_pd)); + + /* + * Get the CA Attributest + * Check for two active ports + */ + + /* + * Query the CA + */ + bsize = 0; + ib_status = ib_query_ca(h_ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + CL_ASSERT(bsize); + + + + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + + + + /* + * Create CQ Attributes + */ + cq_create.size = ALTS_CQ_SIZE; + cq_create.pfn_comp_cb = alts_cq_comp_cb; + cq_create.h_wait_obj = NULL; + + ib_status = ib_create_cq( + h_ca, + &cq_create, + NULL, + alts_cq_err_cb, + &h_cq ); + CL_ASSERT(ib_status == IB_SUCCESS); + + + + /* + * Create QP Attributes + */ + cl_memclr(&qp_create, sizeof(ib_qp_create_t)); + qp_create.sq_depth= 1; + qp_create.rq_depth= 1; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + qp_create.h_sq_cq = h_cq; //NULL + qp_create.h_rq_cq = h_cq; + + qp_create.sq_signaled = TRUE; + + qp_create.qp_type = IB_QPT_RELIABLE_CONN; + + + ib_status = ib_create_qp( + h_pd, + &qp_create, + NULL, + alts_qp_err_cb, + &h_qp); + + if (ib_status != IB_SUCCESS) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("Error in ib_create_qp()! %s\n", + ib_get_err_str(ib_status))); + ALTS_EXIT(ALTS_DBG_VERBOSE); + return (ib_status); + } + + ib_status = ib_query_qp(h_qp, + &p_qp_attr); + + if (ib_status != IB_SUCCESS) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("Error in query_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(h_qp,alts_qp_destroy_cb); + ALTS_EXIT(ALTS_DBG_VERBOSE); + return (ib_status); + } + + ib_status = ib_destroy_qp(h_qp, alts_qp_destroy_cb); + + if (h_cq) + ib_status = ib_destroy_cq(h_cq, alts_qp_destroy_cb); + + + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_dealloc_pd failed status = %s\n", + ib_get_err_str(ib_status)) ); + } + + alts_close_ca(h_ca); + + break; //End of while + } + + /* Close AL */ + if(h_al) + alts_close_al(h_al); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +void +alts_qp_err_cb( + ib_async_event_rec_t *p_err_rec ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +alts_qp_destroy_cb( + void *context + ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + + + diff --git a/branches/Ndi/tests/alts/createdestroyav.c b/branches/Ndi/tests/alts/createdestroyav.c new file mode 100644 index 00000000..8099c718 --- /dev/null +++ b/branches/Ndi/tests/alts/createdestroyav.c @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include + +/* + * Function prototypes + */ +ib_api_status_t +alts_av( + boolean_t modify_av_attr + ); + + +/* + * Test Case CrateDestroyAV + */ + +ib_api_status_t +al_test_create_destroy_av(void) +{ + boolean_t modify_av_attr = FALSE; + + return alts_av(modify_av_attr); +} + +ib_api_status_t +al_test_query_modify_av(void) +{ + boolean_t modify_av_attr = TRUE; + + return alts_av(modify_av_attr); +} + + +/* Internal Functions */ + +ib_api_status_t +alts_av( + boolean_t modify_av_attr + ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_al_handle_t h_al = NULL; + size_t guid_count; + ib_port_attr_t *p_alts_port_attr; + ib_av_attr_t alts_av_attr, query_av_attr; + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd, h_pd1; + ib_av_handle_t h_av; + ib_net64_t ca_guid_array[ALTS_MAX_CA]; + uint32_t bsize; + ib_ca_attr_t *alts_ca_attr = NULL; + uint8_t i; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + /* Step 1: Open AL */ + ib_status = alts_open_al(&h_al); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, \ + ("Open AL failed\n")); + break; + } + + CL_ASSERT(h_al); + + /* + * Step 2: Open the first available CA + */ + + ib_status = ib_get_ca_guids(h_al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_get_ca_guids failed status = %s\n",ib_get_err_str(ib_status)) ); + break; + + } + + ALTS_PRINT(ALTS_DBG_INFO, \ + ("Total number of CA in the sytem is %d\n",(uint32_t)guid_count)); + + if(guid_count == 0) + { + ib_status = IB_ERROR; + break; + } + + ib_status = ib_get_ca_guids(h_al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_get_ca_guids failed with status = %s\n", ib_get_err_str(ib_status)) ); + break; + } + + ib_status = ib_open_ca(h_al, + ca_guid_array[0], //Default open the first CA + alts_ca_err_cb, + (void *)1234, //ca_context + &h_ca); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_open_ca failed with status = %s\n", ib_get_err_str(ib_status)) ); + break; + + } + CL_ASSERT(h_ca); + + /* + * Step 3: Query for the CA Attributes + */ + + /* Query the CA */ + bsize = 0; + ib_status = ib_query_ca(h_ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %s\n",ib_get_err_str(ib_status)) ); + + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; + } + CL_ASSERT(bsize); + + /* Allocate the memory needed for query_ca */ + + alts_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + CL_ASSERT(alts_ca_attr); + + ib_status = ib_query_ca(h_ca, alts_ca_attr, &bsize); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %s\n", ib_get_err_str(ib_status)) ); + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; + } + + p_alts_port_attr = alts_ca_attr->p_port_attr; + + /* + * Step 4: Get the active port + */ + ALTS_PRINT( ALTS_DBG_INFO, \ + ("Get the active Port\n")); +//#if 0 + for(i=0;i < alts_ca_attr->num_ports; i++) + { + p_alts_port_attr = &alts_ca_attr->p_port_attr[i]; + if(p_alts_port_attr->link_state == IB_LINK_ACTIVE) + break; + } + + if(p_alts_port_attr->link_state != IB_LINK_ACTIVE) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("port attribute link state is not active\n") ); + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; + } + + + ALTS_PRINT(ALTS_DBG_INFO, \ + ("Active port number is %d\n",p_alts_port_attr->port_num)); +//#endif + + /* + * Step 5: Construct the AV structure + */ + + alts_av_attr.port_num = p_alts_port_attr->port_num; + //DLID is SM LID + alts_av_attr.dlid = p_alts_port_attr->sm_lid; + + alts_av_attr.sl = 0; + alts_av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + alts_av_attr.path_bits = 0; + alts_av_attr.grh_valid = TRUE; + + alts_av_attr.grh.dest_gid.unicast.interface_id= ca_guid_array[0]; + alts_av_attr.grh.src_gid.unicast.interface_id = ca_guid_array[0]; + alts_av_attr.grh.hop_limit = 0; + alts_av_attr.grh.ver_class_flow = 0; + /* + * step 6: Create a PD + */ + + /* NOTE Try creating PD for IB_PDT_ALIAS type */ + + ib_status = ib_alloc_pd(h_ca, IB_PDT_NORMAL, (void *)1234, &h_pd); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_alloc_pd failed with status = %s\n",ib_get_err_str(ib_status)) ); + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; + } + + + + /* + * Step 7: Create the Address Vector + */ + ib_status = ib_create_av(h_pd, &alts_av_attr, &h_av); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_create_av failed with status = %s\n",ib_get_err_str(ib_status)) ); + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; + } + if(modify_av_attr == TRUE) + { + /* + * Query the AV fromt the handle + */ + ib_status = ib_query_av(h_av, &query_av_attr, &h_pd1); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_av failed with status = %s\n",ib_get_err_str(ib_status)) ); + + ib_destroy_av(h_av); + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; + } + + query_av_attr.dlid = p_alts_port_attr->lid; //DLID is local lid; + + ib_status = ib_modify_av(h_av, &query_av_attr); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_modify_av failed with status = %s\n",ib_get_err_str(ib_status)) ); + ib_destroy_av(h_av); + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; + } + /* Again query the AV to verify the modified value*/ + ib_status = ib_query_av(h_av, &query_av_attr, &h_pd1); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_av failed with status = %s\n", ib_get_err_str(ib_status)) ); + + ib_destroy_av(h_av); + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; + } + CL_ASSERT(query_av_attr.dlid == p_alts_port_attr->lid); + ALTS_PRINT( ALTS_DBG_INFO, + ("ib_modify_av PASSED\n") ); + + + + } + + + /* + * Destroy the address Vector + */ + ib_status = ib_destroy_av(h_av); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_destroy_av failed with status = %s\n", ib_get_err_str(ib_status)) ); + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; + } + + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + ib_close_ca(h_ca, alts_ca_destroy_cb); + break; //End of while + } + + /* Close AL */ + if(h_al) + alts_close_al(h_al); + if(alts_ca_attr) + cl_free(alts_ca_attr); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} diff --git a/branches/Ndi/tests/alts/creatememwindow.c b/branches/Ndi/tests/alts/creatememwindow.c new file mode 100644 index 00000000..5b3ad046 --- /dev/null +++ b/branches/Ndi/tests/alts/creatememwindow.c @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include + + +/* Test case PARAMETERS */ + +#define MEM_ALLIGN 32 +#define MEM_SIZE 1024 + + +/* + * Function prototypes + */ + + +/* + * Test Case RegisterMemRegion + */ + +ib_api_status_t +al_test_create_mem_window( + void + ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_al_handle_t h_al = NULL; + ib_ca_handle_t h_ca = NULL; + ib_pd_handle_t h_pd = NULL; + + ib_mr_create_t virt_mem; + char *ptr = NULL, *ptr_align; + size_t mask; + uint32_t lkey; + uint32_t rkey; + ib_mr_handle_t h_mr = NULL; + ib_mr_attr_t alts_mr_attr; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + /* Open AL */ + ib_status = alts_open_al(&h_al); + + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_al); + + /* Open CA */ + ib_status = alts_open_ca(h_al,&h_ca); + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_ca); + + /* + * Allocate a PD here + */ + ib_status = ib_alloc_pd(h_ca, IB_PDT_NORMAL, NULL, &h_pd); //passing null context + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_alloc_pd failed status = %d", ib_status) ); + alts_close_ca(h_ca); + break; + } + + /* + * Allocate the virtual memory which needs to be registered + */ + + mask = MEM_ALLIGN - 1; + + ptr = cl_malloc(MEM_SIZE + MEM_ALLIGN - 1); + + CL_ASSERT(ptr); + + ptr_align = ptr; + + if(((size_t)ptr & mask) != 0) + ptr_align = (char *)(((size_t)ptr+mask)& ~mask); + + virt_mem.vaddr = ptr_align; + virt_mem.length = MEM_SIZE; + virt_mem.access_ctrl = (IB_AC_LOCAL_WRITE | IB_AC_MW_BIND); + + /* + * Register the memory region + */ + + ib_status = ib_reg_mem(h_pd, &virt_mem, &lkey, &rkey, &h_mr); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_reg_mem failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + /* + * Query the memory region + */ + ib_status = ib_query_mr(h_mr, &alts_mr_attr); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_mr failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + /* + * Re-register the memeory region + */ + virt_mem.access_ctrl |= (IB_AC_RDMA_WRITE ); + + ib_status = ib_rereg_mem(h_mr,IB_MR_MOD_ACCESS, + &virt_mem,&lkey,&rkey,NULL); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_rereg_mem failed status = %s\n", ib_get_err_str(ib_status)) ); + } + /* + * Create, Query and Destroy the memory window + */ + + { + + uint32_t rkey_mw; + ib_mw_handle_t h_mw; + + ib_pd_handle_t h_pd_query; + uint32_t rkey_mw_query; + + ib_status = ib_create_mw(h_pd,&rkey_mw,&h_mw); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_create_mw failed status = %s\n",ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + ib_status = ib_query_mw(h_mw,&h_pd_query,&rkey_mw_query); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_mw failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + ib_status = ib_destroy_mw(h_mw); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_destroy_mw failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + } + + /* + * De-register the memory region + */ + ib_status = ib_dereg_mr(h_mr); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_dereg_mr failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + /* + * Deallocate the PD + */ + + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_dealloc_pd failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + break; //End of while + } + + if( ptr ) + cl_free( ptr ); + + /* Close AL */ + if(h_al) + alts_close_al(h_al); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} diff --git a/branches/Ndi/tests/alts/dirs b/branches/Ndi/tests/alts/dirs new file mode 100644 index 00000000..d1917e8a --- /dev/null +++ b/branches/Ndi/tests/alts/dirs @@ -0,0 +1,3 @@ +DIRS=\ + user \ +# kernel diff --git a/branches/Ndi/tests/alts/ibquery.c b/branches/Ndi/tests/alts/ibquery.c new file mode 100644 index 00000000..4e40b270 --- /dev/null +++ b/branches/Ndi/tests/alts/ibquery.c @@ -0,0 +1,582 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * This test validates the ib_query API. ib_query access layer api queries + * subnet administrator on behalf of clients. + * + * + * Environment: + * All + */ + + +#include +#include +#include +#include +#include +#include + +/* + * Function prototypes + */ + +void +alts_query_cb( +IN ib_query_rec_t *p_query_rec +); +void +alts_reg_svc_cb( +IN ib_reg_svc_rec_t *p_reg_svc_rec +); +void +alts_print_port_guid( +IN ib_port_info_t *p_port_info +); +void +alts_print_node_info( + ib_node_info_t *p_node_info +); + +/* + * Globals + */ +ib_net64_t query_portguid; + +/* This test case assumes that the HCA has been configured by running + * SM. + */ +ib_api_status_t +al_test_query(void) +{ + ib_api_status_t ib_status = IB_ERROR; + ib_al_handle_t h_al = NULL; + + ib_ca_handle_t h_ca = NULL; + uint32_t bsize; + uint32_t i; + ib_ca_attr_t *p_ca_attr = NULL; + //alts_ca_object_t ca_obj; // for testing stack + ib_query_req_t query_req; + ib_port_attr_t *p_port_attr = NULL; + ib_reg_svc_req_t reg_svc_req; + ib_gid_t port_gid = {0}; + ib_net16_t pkey=0; + ib_lid_pair_t lid_pair; + ib_guid_pair_t guid_pair; + ib_gid_pair_t gid_pair; + ib_user_query_t info; + ib_path_rec_t path; + ib_reg_svc_handle_t h_reg_svc; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + do + { + /* + * Open the AL interface + */ + ib_status = alts_open_al(&h_al); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_al failed status = %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + /* + * Default opens the first CA + */ + ib_status = alts_open_ca(h_al, &h_ca); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_ca failed status = %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + /* + * Query the CA + */ + bsize = 0; + ib_status = ib_query_ca(h_ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %s\n", + ib_get_err_str(ib_status)) ); + ib_status = IB_ERROR; + break; + } + + CL_ASSERT(bsize); + + + /* Allocate the memory needed for query_ca */ + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + ib_status = IB_INSUFFICIENT_MEMORY; + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + //Get the Active port GUID + query_portguid = 0x0; + for(i=0; i< p_ca_attr->num_ports; i++) + { + p_port_attr = &p_ca_attr->p_port_attr[i]; + + if (p_port_attr->link_state == IB_LINK_ACTIVE) + { + query_portguid = p_port_attr->port_guid; + port_gid = p_port_attr->p_gid_table[0]; + pkey = p_port_attr->p_pkey_table[0]; + break; + } + } + + if(query_portguid == 0x0) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("Atlease one port need to be active\n") ); + ib_status = IB_ERROR; + break; + } + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("calling ib_query api\n")); + + +#if 1 + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sending IB_QUERY_NODE_REC_BY_NODE_GUID\n")); + + query_req.query_type = IB_QUERY_NODE_REC_BY_NODE_GUID; + query_req.p_query_input = &p_ca_attr->ca_guid; //Node GUID + query_req.port_guid = query_portguid; + query_req.timeout_ms = 10000; //milliseconds + query_req.retry_cnt = 3; + query_req.flags = IB_FLAGS_SYNC; + query_req.query_context = NULL; + query_req.pfn_query_cb = alts_query_cb; + + ib_status = ib_query( h_al, &query_req, NULL ); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + cl_thread_suspend( 1000 ); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sending IB_QUERY_PATH_REC_BY_PORT_GUIDS\n")); + + query_req.query_type = IB_QUERY_PATH_REC_BY_PORT_GUIDS; + query_req.p_query_input = &guid_pair; //Node GUID + query_req.port_guid = query_portguid; + query_req.timeout_ms = 10000; //milliseconds + query_req.retry_cnt = 3; + query_req.flags = IB_FLAGS_SYNC; + query_req.query_context = NULL; + guid_pair.src_guid = query_portguid; + guid_pair.dest_guid = query_portguid; + query_req.pfn_query_cb = alts_query_cb; + + ib_status = ib_query( h_al, &query_req, NULL ); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + cl_thread_suspend( 1000 ); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sending IB_QUERY_PATH_REC_BY_GIDS\n")); + + query_req.query_type = IB_QUERY_PATH_REC_BY_GIDS; + query_req.p_query_input = &gid_pair; //Node GUID + query_req.port_guid = query_portguid; + query_req.timeout_ms = 10000; //milliseconds + query_req.retry_cnt = 3; + query_req.flags = IB_FLAGS_SYNC; + query_req.query_context = NULL; + ib_gid_set_default( &gid_pair.src_gid, query_portguid ); + ib_gid_set_default( &gid_pair.dest_gid, query_portguid ); + query_req.pfn_query_cb = alts_query_cb; + + ib_status = ib_query( h_al, &query_req, NULL ); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + cl_thread_suspend( 1000 ); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sending IB_QUERY_PATH_REC_BY_LIDS\n")); + + query_req.query_type = IB_QUERY_PATH_REC_BY_LIDS; + query_req.p_query_input = &lid_pair; //Node GUID + query_req.port_guid = query_portguid; + query_req.timeout_ms = 10000; //milliseconds + query_req.retry_cnt = 3; + query_req.flags = IB_FLAGS_SYNC; + query_req.query_context = NULL; + lid_pair.src_lid = p_port_attr->lid; + lid_pair.dest_lid = p_port_attr->lid; + query_req.pfn_query_cb = alts_query_cb; + + ib_status = ib_query( h_al, &query_req, NULL ); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + cl_thread_suspend( 1000 ); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sending IB_QUERY_USER_DEFINED\n")); + + query_req.query_type = IB_QUERY_USER_DEFINED; + query_req.p_query_input = &info; //Node GUID + query_req.port_guid = query_portguid; + query_req.timeout_ms = 10000; //milliseconds + query_req.retry_cnt = 3; + query_req.flags = IB_FLAGS_SYNC; + query_req.query_context = NULL; + info.method = IB_MAD_METHOD_GET; + info.attr_id = IB_MAD_ATTR_PATH_RECORD; + info.attr_size = sizeof(ib_path_rec_t); + info.comp_mask = IB_PR_COMPMASK_DLID | IB_PR_COMPMASK_SLID;// | IB_PR_COMPMASK_NUM_PATH; + info.p_attr = &path; + + cl_memclr( &path, sizeof(ib_path_rec_t) ); + path.dlid = p_port_attr->lid; + path.slid = p_port_attr->lid; + path.num_path = 0x1; + query_req.pfn_query_cb = alts_query_cb; + + ib_status = ib_query( h_al, &query_req, NULL ); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query api failed with status %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + cl_thread_suspend( 1000 ); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("registering a service with the SA\n")); + + cl_memclr( ®_svc_req, sizeof( ib_reg_svc_req_t ) ); + + reg_svc_req.svc_rec.service_id = 0x52413; + reg_svc_req.svc_rec.service_gid = port_gid; + reg_svc_req.svc_rec.service_pkey = pkey; + reg_svc_req.svc_rec.service_lease = 0xFFFFFFFF; + //reg_svc_req.svc_rec.service_key[16]; + strcpy( (char*)reg_svc_req.svc_rec.service_name, "alts" ); + reg_svc_req.svc_data_mask = IB_SR_COMPMASK_SID | + IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SKEY | + IB_SR_COMPMASK_SNAME; + + reg_svc_req.port_guid = query_portguid; + reg_svc_req.timeout_ms = 10000; + reg_svc_req.retry_cnt = 3; + reg_svc_req.flags = IB_FLAGS_SYNC; + reg_svc_req.svc_context = NULL; + reg_svc_req.pfn_reg_svc_cb = alts_reg_svc_cb; + + ib_status = ib_reg_svc( h_al, ®_svc_req, &h_reg_svc ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_reg_svc api failed with status %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + /* + * Note we leave this registration registered + * and let ib_close_al clean it up + */ + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("registering a service with the SA\n")); + + cl_memclr( ®_svc_req, sizeof( ib_reg_svc_req_t ) ); + + reg_svc_req.svc_rec.service_id = 0x52413; + reg_svc_req.svc_rec.service_gid = port_gid; + reg_svc_req.svc_rec.service_pkey = pkey; + reg_svc_req.svc_rec.service_lease = 0xFFFFFFFF; + //reg_svc_req.svc_rec.service_key[16]; + strcpy( (char*)reg_svc_req.svc_rec.service_name, "alts" ); + reg_svc_req.svc_data_mask = IB_SR_COMPMASK_SID | + IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SKEY | + IB_SR_COMPMASK_SNAME; + + reg_svc_req.port_guid = query_portguid; + reg_svc_req.timeout_ms = 10000; + reg_svc_req.retry_cnt = 3; + reg_svc_req.flags = IB_FLAGS_SYNC; + reg_svc_req.svc_context = NULL; + reg_svc_req.pfn_reg_svc_cb = alts_reg_svc_cb; + + ib_status = ib_reg_svc( h_al, ®_svc_req, &h_reg_svc ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_reg_svc api failed with status %s\n", + ib_get_err_str(ib_status)) ); + break; + } + + ib_status = ib_dereg_svc( h_reg_svc, ib_sync_destroy ); + if( ib_status != IB_SUCCESS ) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_dereg_svc api failed with status %s\n", + ib_get_err_str(ib_status)) ); + break; + } +#endif + + }while (0); + + if( p_ca_attr ) + cl_free( p_ca_attr ); + + if( h_al ) + alts_close_al( h_al ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + + return ib_status; + +} + + +void +alts_print_port_guid( +ib_port_info_t *p_port_info) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_port_info ); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("ib_port_attr_t info:\n" + "\tsubnet_timeout...:x%x\n" + "\tlocal_port_num....:x%x\n" + "\tmtu_smsl.........:x%x\n" + "\tbase_lid.........:x%x\n", + p_port_info->subnet_timeout, + p_port_info->local_port_num, + p_port_info->mtu_smsl, + p_port_info->base_lid + )); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + +} + +void +alts_print_node_info( + ib_node_info_t *p_node_info +) +{ + UNUSED_PARAM( p_node_info ); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("alts_print_node_info info:\n" + "\tnode_type...:x%x\n" + "\tnum_ports....:x%x\n" + "\tnode_guid.........:x%"PRIx64"\n" + "\tport_guid.........:x%"PRIx64"\n", + p_node_info->node_type, + p_node_info->num_ports, + p_node_info->node_guid, + p_node_info->port_guid + )); +} + +void +alts_query_cb( +IN ib_query_rec_t *p_query_rec +) +{ + uint32_t i; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("ib_query_rec_t info:\n" + "\tstatus...:x%x\n" + "\tquery_type...:x%x\n", + p_query_rec->status, + p_query_rec->query_type + )); + + if(p_query_rec->status == IB_SUCCESS) + { + ib_node_record_t * p_node_rec; + + switch(p_query_rec->query_type) + { +#if 1 + case IB_QUERY_NODE_REC_BY_NODE_GUID: + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("returning IB_QUERY_NODE_REC_BY_NODE_GUID\n")); + for( i=0; iresult_cnt; i++ ) + { + p_node_rec = (ib_node_record_t *)ib_get_query_result( + p_query_rec->p_result_mad, i ); + alts_print_node_info(&p_node_rec->node_info); + } + break; + + case IB_QUERY_PATH_REC_BY_PORT_GUIDS: + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("returning IB_QUERY_PATH_REC_BY_PORT_GUIDS\n")); + break; + + case IB_QUERY_PATH_REC_BY_GIDS: + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("returning IB_QUERY_PATH_REC_BY_GIDS\n")); + break; + + case IB_QUERY_PATH_REC_BY_LIDS: + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("returning IB_QUERY_PATH_REC_BY_LIDS\n")); + break; + + case IB_QUERY_USER_DEFINED: + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("returning IB_QUERY_USER_DEFINED\n")); + break; + + +#endif + break; + + default: + break; + + } + } + else + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("p_query_rec->status failed\n")); + } + + if( p_query_rec->p_result_mad ) + ib_put_mad( p_query_rec->p_result_mad ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + + +void +alts_dereg_svc_cb( + IN void *context ) +{ + UNUSED_PARAM( context ); + ALTS_ENTER( ALTS_DBG_VERBOSE ); + ALTS_PRINT( ALTS_DBG_VERBOSE, ("ib_dereg_svc done\n") ); + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + +void +alts_reg_svc_cb( + IN ib_reg_svc_rec_t *p_reg_svc_rec ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + +#if defined( CL_KERNEL ) && !defined( _DEBUG_ ) + UNUSED_PARAM( p_reg_svc_rec ); +#endif + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("ib_reg_svc_rec_t info:\n" + "\treq_status...:x%x\n" + "\tresp_status...:x%x\n", + p_reg_svc_rec->req_status, + p_reg_svc_rec->resp_status + )); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} diff --git a/branches/Ndi/tests/alts/kernel/SOURCES b/branches/Ndi/tests/alts/kernel/SOURCES new file mode 100644 index 00000000..e78ba363 --- /dev/null +++ b/branches/Ndi/tests/alts/kernel/SOURCES @@ -0,0 +1,38 @@ +TARGETNAME=alts +TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=DRIVER + +SOURCES= alts.rc \ + alts_driver.c \ + ..\allocdeallocpd.c \ + ..\alts_misc.c \ + ..\cmtests.c \ + ..\createanddestroycq.c \ + ..\createdestroyav.c \ + ..\creatememwindow.c \ + ..\ibquery.c \ + ..\madtests.c \ + ..\multisendrecv.c \ + ..\openclose.c \ + ..\querycaattr.c \ + ..\registermemregion.c \ + ..\registerpnp.c \ + ..\smatests.c + +INCLUDES=..\..\..\inc;..\..\..\inc\kernel;.. + +C_DEFINES=$(C_DEFINES) -DDRIVER -DDEPRECATE_DDK_FUNCTIONS + +TARGETLIBS= \ + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib + +!if !defined(DDK_TARGET_OS) || "$(DDK_TARGET_OS)"=="Win2K" +# +# The driver is built in the Win2K build environment +# - use the library version of safe strings +# +TARGETLIBS= $(TARGETLIBS) $(DDK_LIB_PATH)\ntstrsafe.lib +!endif + +MSC_WARNING_LEVEL= /W4 /wd4127 diff --git a/branches/Ndi/tests/alts/kernel/alts.inf b/branches/Ndi/tests/alts/kernel/alts.inf new file mode 100644 index 00000000..84b7cffc --- /dev/null +++ b/branches/Ndi/tests/alts/kernel/alts.inf @@ -0,0 +1,169 @@ +;/*++ +; +;Copyright 2004 InfiniCon Systems, Inc. All Rights Reserved. +; +;Module Name: +; +; infinihost.inf +; +;Abstract: +; +; INF file for installing the InfiniCon InfiniBand HCAs. +; +;Author: +; +; InfiniCon Systems, Inc +; +;REVISION: +; +; $Revision$ +; +;--*/ + +[Version] +Signature="$Windows NT$" +Class=InfiniBandHca +ClassGUID=%HcaClassGuid% +Provider=%Vendor% +CatalogFile=infiniserv.cat +DriverVer=03/08/2006,1.0.0000.614 + +; ================= Destination directory section ===================== + +[DestinationDirs] +DefaultDestDir=12 +ClassCopyFiles=11 +;MT23108UMCopyFiles=11 +; add additional HCA user-mode section names here. + +; ================= Class Install section ===================== + +[ClassInstall32] +CopyFiles=ClassCopyFiles +AddReg=ClassAddReg + +[ClassCopyFiles] +IbInstaller.dll + +[ClassAddReg] +HKR,,,,"InfiniBand Host Channel Adapters" +HKR,,Icon,,-5 +HKLM,"System\CurrentControlSet\Control\CoDeviceInstallers", \ + %HcaClassGuid%,%REG_MULTI_SZ_APPEND%, "IbInstaller.dll,IbCoInstaller" + +; ================= Device Install section ===================== + +[SourceDisksNames] +1=%DiskId% + +[SourceDisksFiles.x86] +IbInstaller.dll=1 +complib.sys=1 +ibal.sys=1 +mt23108.sys=1 +thca.sys=1 +alts.sys=1 + +[Manufacturer] +%Vendor% = INFINICON_SYS,nt + +[INFINICON_SYS] +; empty since we don't support W9x/Me + +[INFINICON_SYS.nt] +%MT23108.DeviceDesc% = MT23108.Install.nt,PCI\VEN_15B3&DEV_5A44 +; add additional devices here. + +[MT23108.Install.nt] +CopyFiles = MT23108.CopyFiles + +[MT23108.Install.nt.HW] +AddReg = MT23108.FiltersReg + +[MT23108.Install.nt.Services] +AddService = thca,%SPSVCINST_NULL%,THCA.ServiceInstall +AddService = alts,%SPSVCINST_NULL%,ALTS.ServiceInstall +AddService = mt23108,%SPSVCINST_ASSOCSERVICE%,MT23108.ServiceInstall +AddService = ibal,%SPSVCINST_NULL%,Ibal.ServiceInstall + +[MT23108.CopyFiles] +complib.sys +ibal.sys +mt23108.sys +thca.sys +alts.sys + +; +; ============= Service Install section ============== +; + +[MT23108.ServiceInstall] +DisplayName = %MT23108.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\mt23108.sys +LoadOrderGroup = extended base +AddReg = MT23108.ParamsReg + +[THCA.ServiceInstall] +DisplayName = %THCA.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\thca.sys +LoadOrderGroup = extended base +AddReg = THCA.ParamsReg + +[ALTS.ServiceInstall] +DisplayName = %ALTS.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\alts.sys +LoadOrderGroup = extended base + +[Ibal.ServiceInstall] +DisplayName = %Ibal.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\ibal.sys +AddReg = Ibal.ParamsReg + +[MT23108.FiltersReg] +HKR,,"UpperFilters", 0x00010000,"thca","alts" + +[MT23108.ParamsReg] +HKR,"Parameters","DebugFlags",%REG_DWORD%,0xFFFFFFFF +HKR,"Parameters","ConfAddr",%REG_DWORD%,88 +HKR,"Parameters","ConfData",%REG_DWORD%,92 +HKR,"Parameters","DdrMapOffset",%REG_DWORD%,0x100000 +HKR,"Parameters","DdrMapSize",%REG_DWORD%,0x1600000 +HKR,"Parameters","UseIbMgt",%REG_DWORD%,1 +HKR,"Parameters","ThhLegacySqp",%REG_DWORD%,0 +HKR,"Parameters","ResetCard",%REG_DWORD%,0 + +[THCA.ParamsReg] +HKR,"Parameters","DebugFlags",%REG_DWORD%,0x80000000 + +[Ibal.ParamsReg] +HKR,"Parameters","DebugFlags",%REG_DWORD_NO_CLOBBER%,0x80000000 + +[Strings] +HcaClassGuid = "{58517E00-D3CF-40c9-A679-CEE5752F4491}" +Vendor = "InfiniCon Systems, Inc." +MT23108.DeviceDesc = "InfiniCon MT23108 InfiniBand HCA Test" +MT23108.ServiceDesc = "InfiniCon MT23108 InfiniBand HCA Driver" +THCA.ServiceDesc = "InfiniCon MT23108 HCA VPD for IBAL" +ALTS.ServiceDesc = "InfiniCon Access Layer Test Suite Driver" +Ibal.ServiceDesc = "InfiniCon InfiniBand Access Layer" +DiskId = "InfiniCon InfiniBand HCA installation disk" +SPSVCINST_NULL = 0x0 +SPSVCINST_ASSOCSERVICE = 0x00000002 +SERVICE_KERNEL_DRIVER = 1 +SERVICE_DEMAND_START = 3 +SERVICE_ERROR_NORMAL = 1 +REG_DWORD = 0x00010001 +REG_DWORD_NO_CLOBBER = 0x00010003 +REG_MULTI_SZ_APPEND = 0x00010008 diff --git a/branches/Ndi/tests/alts/kernel/alts.rc b/branches/Ndi/tests/alts/kernel/alts.rc new file mode 100644 index 00000000..0b0ca481 --- /dev/null +++ b/branches/Ndi/tests/alts/kernel/alts.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "InfiniBand Access Layer Test Driver (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "InfiniBand Access Layer Test Driver" +#endif + +#define VER_INTERNALNAME_STR "alts.sys" +#define VER_ORIGINALFILENAME_STR "alts.sys" + +#include diff --git a/branches/Ndi/tests/alts/kernel/alts_driver.c b/branches/Ndi/tests/alts/kernel/alts_driver.c new file mode 100644 index 00000000..43ff0882 --- /dev/null +++ b/branches/Ndi/tests/alts/kernel/alts_driver.c @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Provides the driver entry points for the ALTS kernel driver. + */ + + +#include +#include +#include +#include "alts_common.h" +#include "alts_debug.h" +#include + + +#if !defined(FILE_DEVICE_INFINIBAND) // Not defined in WXP DDK +#define FILE_DEVICE_INFINIBAND 0x0000003B +#endif + +uint32_t alts_dbg_lvl = ALTS_DBG_ERROR | ALTS_DBG_STATUS; + + +NTSTATUS +DriverEntry( + IN DRIVER_OBJECT *p_driver_obj, + IN UNICODE_STRING *p_registry_path ); + +static void +alts_drv_unload( + IN DRIVER_OBJECT *p_driver_obj ); + +//static NTSTATUS +//alts_ioctl( +// IN DEVICE_OBJECT *p_dev_obj, +// IN IRP *p_irp ); + +static NTSTATUS +alts_sysctl( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + +static NTSTATUS +alts_add_device( + IN DRIVER_OBJECT *p_driver_obj, + IN DEVICE_OBJECT *p_pdo ); + +static NTSTATUS +alts_start_tests( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ); + +static void +alts_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ); + + +#ifdef ALLOC_PRAGMA +#pragma alloc_text (INIT, DriverEntry) +#pragma alloc_text (PAGE, alts_drv_unload) +//#pragma alloc_text (PAGE, alts_ioctl) +#pragma alloc_text (PAGE, alts_sysctl) +#endif + + +static const cl_vfptr_pnp_po_t alts_vfptr_pnp = { + "ALTS", + alts_start_tests, // StartDevice + cl_irp_skip, + cl_irp_skip, + cl_irp_skip, + cl_irp_skip, // QueryRemove + alts_release_resources, + cl_do_remove, // Remove + cl_irp_skip, // CancelRemove + cl_irp_skip, // SurpriseRemove + cl_irp_skip, + cl_irp_skip, + cl_irp_skip, + cl_irp_skip, + cl_irp_skip, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, + cl_irp_ignore, // QueryPower + cl_irp_ignore, // SetPower + cl_irp_ignore, // PowerSequence + cl_irp_ignore // WaitWake +}; + + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_driver_obj, + IN PUNICODE_STRING p_registry_path ) +{ + NTSTATUS status; +#ifdef _DEBUG_ + static boolean_t exit = FALSE; +#endif + + ALTS_ENTER( ALTS_DBG_DEV ); + + UNUSED_PARAM( p_registry_path ); + +#ifdef _DEBUG_ + DbgBreakPoint(); + if( exit ) + { + ALTS_TRACE_EXIT( ALTS_DBG_DEV, ("Load aborted.\n") ); + return STATUS_DRIVER_INTERNAL_ERROR; + } +#endif + + status = CL_INIT; + if( !NT_SUCCESS(status) ) + { + ALTS_TRACE_EXIT( ALTS_DBG_ERROR, + ("cl_init returned %08X.\n", status) ); + return status; + } + + p_driver_obj->MajorFunction[IRP_MJ_PNP] = cl_pnp; + p_driver_obj->MajorFunction[IRP_MJ_POWER] = cl_power; +// p_driver_obj->MajorFunction[IRP_MJ_DEVICE_CONTROL] = alts_ioctl; + p_driver_obj->MajorFunction[IRP_MJ_SYSTEM_CONTROL] = alts_sysctl; + p_driver_obj->DriverUnload = alts_drv_unload; + p_driver_obj->DriverExtension->AddDevice = alts_add_device; + + ALTS_EXIT( ALTS_DBG_DEV ); + return STATUS_SUCCESS; +} + + +static void +alts_drv_unload( + IN PDRIVER_OBJECT p_driver_obj ) +{ + ALTS_ENTER( ALTS_DBG_DEV ); + + UNUSED_PARAM( p_driver_obj ); + + CL_DEINIT; + + ALTS_EXIT( ALTS_DBG_DEV ); +} + + +//static NTSTATUS +//alts_ioctl( +// IN DEVICE_OBJECT *p_dev_obj, +// IN IRP *p_irp ) +//{ +// +//} + + +static NTSTATUS +alts_sysctl( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ) +{ + NTSTATUS status; + cl_pnp_po_ext_t *p_ext; + + ALTS_ENTER( ALTS_DBG_DEV ); + + p_ext = p_dev_obj->DeviceExtension; + + IoSkipCurrentIrpStackLocation( p_irp ); + status = IoCallDriver( p_ext->p_next_do, p_irp ); + + ALTS_EXIT( ALTS_DBG_DEV ); + return status; +} + + +static NTSTATUS +alts_add_device( + IN DRIVER_OBJECT *p_driver_obj, + IN DEVICE_OBJECT *p_pdo ) +{ + NTSTATUS status; + DEVICE_OBJECT *p_dev_obj, *p_next_do; + + ALTS_ENTER( ALTS_DBG_PNP ); + + /* + * Create the device so that we have a device extension to store stuff in. + */ + status = IoCreateDevice( p_driver_obj, sizeof(cl_pnp_po_ext_t), + NULL, FILE_DEVICE_INFINIBAND, FILE_DEVICE_SECURE_OPEN, + FALSE, &p_dev_obj ); + if( !NT_SUCCESS( status ) ) + { + ALTS_TRACE_EXIT( ALTS_DBG_ERROR, + ("IoCreateDevice returned 0x%08X.\n", status) ); + return status; + } + + /* Attach to the device stack. */ + p_next_do = IoAttachDeviceToDeviceStack( p_dev_obj, p_pdo ); + if( !p_next_do ) + { + IoDeleteDevice( p_dev_obj ); + ALTS_TRACE_EXIT( ALTS_DBG_ERROR, + ("IoAttachDeviceToDeviceStack failed.\n") ); + return STATUS_NO_SUCH_DEVICE; + } + + /* Inititalize the complib extension. */ + cl_init_pnp_po_ext( p_dev_obj, p_next_do, p_pdo, alts_dbg_lvl, + &alts_vfptr_pnp, NULL ); + + ALTS_EXIT( ALTS_DBG_PNP ); + return status; +} + + +static NTSTATUS +alts_start_tests( + IN DEVICE_OBJECT* const p_dev_obj, + IN IRP* const p_irp, + OUT cl_irp_action_t* const p_action ) +{ + NTSTATUS status; + ib_api_status_t ib_status; + + status = cl_do_sync_pnp( p_dev_obj, p_irp, p_action ); + if( !NT_SUCCESS( status ) ) + return status; + + /* Wait 10 seconds for LIDs to get assigned. */ + cl_thread_suspend( 60000 ); + + /* We're started. Launch the tests. */ + ib_status = al_test_openclose(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nOpenClose returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_querycaattr(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nQueryCAAttribute returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_modifycaattr(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nModifyCAAttribute returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_alloc_dealloc_pd(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nAllocDeallocPD returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_create_destroy_av(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nCreateDestroyAV returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_query_modify_av(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nQueryAndModifyAV returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_create_destroy_cq(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nCreateAndDestroyCQ returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_query_modify_cq(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nQueryAndModifyCQ returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_register_mem(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nRegisterMemRegion returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_register_phys_mem(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nRegisterPhyMemRegion returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_create_mem_window(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nCreateMemWindow returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_register_shared_mem(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nRegisterSharedMemRegion returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_multi_send_recv(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nMultiSend returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_register_pnp(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nRegisterPnP returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_mad(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nMadTests returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_query(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nMadQuery returned %s\n\n", ib_get_err_str( ib_status )) ); + + ib_status = al_test_cm(); + ALTS_TRACE( ALTS_DBG_STATUS, + ("\nCmTests returned %s\n\n", ib_get_err_str( ib_status )) ); + + return status; +} + + +static void +alts_release_resources( + IN DEVICE_OBJECT* const p_dev_obj ) +{ + UNUSED_PARAM( p_dev_obj ); +} diff --git a/branches/Ndi/tests/alts/kernel/alts_driver.h b/branches/Ndi/tests/alts/kernel/alts_driver.h new file mode 100644 index 00000000..1b097345 --- /dev/null +++ b/branches/Ndi/tests/alts/kernel/alts_driver.h @@ -0,0 +1,23 @@ +/* BEGIN_ICS_COPYRIGHT **************************************** +** END_ICS_COPYRIGHT ****************************************/ + + + +#if !defined( _ALTS_DRIVER_H_ ) +#define _ALTS_DRIVER_H_ + + +#include +#include +#include +#include "alts_debug.h" + + +typedef struct _alts_dev_ext +{ + cl_pnp_po_ext_t cl_ext; + +} alts_dev_ext_t; + + +#endif /* !defined( _ALTS_DRIVER_H_ ) */ diff --git a/branches/Ndi/tests/alts/kernel/makefile b/branches/Ndi/tests/alts/kernel/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/tests/alts/kernel/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/tests/alts/madtests.c b/branches/Ndi/tests/alts/madtests.c new file mode 100644 index 00000000..bd1c1928 --- /dev/null +++ b/branches/Ndi/tests/alts/madtests.c @@ -0,0 +1,3037 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * mad test does a data transfer between two queue pairs created one + * on each port of the hca. In order for this test to work, two ports of the hca + * should be connected in a loop back and must be configured to ACTIVE PORT STATE. + * + * + * Environment: + * All + */ + + +#include +#include +#include +#include +#include +#include + +/* Parameters */ + +#define TestUD1 1 +#define TestRC1 2 + +#define MAX_QPS 8 +#define SRC_QP 0 +#define DEST_QP 1 + + +typedef struct _alts_mad_ca_obj +{ + ib_api_status_t status; + uint32_t test_type; + + ib_ca_handle_t h_ca; + ib_ca_attr_t *p_ca_attr; + ib_port_attr_t *p_src_port_attr; + ib_port_attr_t *p_dest_port_attr; + + ib_net32_t src_qp_num; + ib_net32_t dest_qp_num; + + ib_net64_t src_portguid; + uint8_t src_port_num; + + ib_net64_t dest_portguid; + uint8_t dest_port_num; + + ib_net16_t slid; + ib_net16_t dlid; + + ib_pool_key_t h_src_pool; + ib_pool_key_t h_dest_pool; + + ib_mad_svc_handle_t h_src_mad_svc; + ib_mad_svc_handle_t h_dest_mad_svc; + + ib_cq_handle_t h_cq; + uint32_t cq_size; + + ib_pd_handle_t h_pd; + + ib_qp_handle_t h_qp[MAX_QPS]; + uint32_t qkey; + + ib_qp_attr_t qp_attr[MAX_QPS]; + + + ib_send_wr_t *p_send_wr; + ib_recv_wr_t *p_recv_wr; + size_t wr_send_size; + size_t wr_recv_size; + uint32_t num_wrs; + uint32_t ds_list_depth; + uint32_t msg_size; // Initialize this field + + ib_av_handle_t h_av_src; + ib_av_handle_t h_av_dest; + + uint32_t send_done; + uint32_t send_done_error; + uint32_t recv_done; + uint32_t recv_done_error; + uint32_t cq_done; // total completions + boolean_t is_src; + + boolean_t is_loopback; + boolean_t reply_requested; + +} alts_mad_ca_obj_t; + + + +/* + * Function Prototypes + */ + +ib_api_status_t +alts_mad_check_active_ports( + alts_mad_ca_obj_t *p_ca_obj ); + +ib_api_status_t +mad_create_resources( + alts_mad_ca_obj_t *p_ca_obj ); + +ib_api_status_t +mad_activate_svc( + alts_mad_ca_obj_t *p_ca_obj, + ib_qp_handle_t h_qp ); + +ib_api_status_t +alts_spl_destroy_resources( + alts_mad_ca_obj_t *p_ca_obj ); + +ib_api_status_t +mad_post_sends( + alts_mad_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_post ); + +ib_api_status_t +mad_post_recvs( + alts_mad_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_post ); + +void +mad_cq_destroy_cb( + void *context ); + +void +mad_pd_destroy_cb( + void *context ); + +void +mad_qp_destroy_cb( + void *context ); + +/* + * CQ completion callback function + */ + +void +mad_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); + +/* + * CQ Error callback function + */ + +void +mad_cq_err_cb( + ib_async_event_rec_t *p_err_rec ); + +/* + * QP Error callback function + */ +void +mad_qp_err_cb( + ib_async_event_rec_t *p_err_rec ); + +void +mad_svc_send_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ); + +void +mad_svc_recv_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ); + +void +mad_svc_qp0_recv_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ); + +ib_api_status_t +alts_qp1_loopback( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_qp1_2_ports( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_qp1_2_ports_100( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_qp1_pingpong( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_qp0_loopback( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_qp0_2_ports( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_qp0_2_ports_100( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_qp0_pingpong( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_qp0_ping_switch ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +#define ALTS_TEST_MGMT_CLASS 0x56 +#define ALTS_TEST_MGMT_CLASS_VER 1 +#define ALTS_TEST_MGMT_METHOD 0x56 + +/* + * Gloabal Variables + */ +ib_al_handle_t h_al; +ib_dgrm_info_t dgrm_info; +ib_mad_svc_t mad_svc; +ib_send_wr_t send_wr; +ib_recv_wr_t recv_wr; + +extern ib_cq_create_t cq_create_attr; +extern ib_qp_create_t qp_create_attr; +extern ib_av_attr_t av_attr; +extern ib_wc_t free_wclist; +extern ib_wc_t free_wcl; + +ib_api_status_t qp1_loopback=IB_NOT_FOUND, qp1_2_ports=IB_NOT_FOUND; +ib_api_status_t qp1_2_ports_100=IB_NOT_FOUND, qp1_pingpong=IB_NOT_FOUND; + +ib_api_status_t qp0_loopback=IB_NOT_FOUND, qp0_2_ports=IB_NOT_FOUND; +ib_api_status_t qp0_2_ports_100=IB_NOT_FOUND, qp0_pingpong=IB_NOT_FOUND; + +ib_api_status_t qp0_ping_switch=IB_NOT_FOUND; + + +/* This test case assumes that the HCA has 2 port connected + * through the switch. Sends packets from lower port number to higher + * port number. + */ +ib_api_status_t +al_test_mad(void) +{ + ib_api_status_t ib_status = IB_ERROR; + ib_ca_handle_t h_ca = NULL; + uint32_t bsize; + ib_ca_attr_t *p_ca_attr = NULL; + //alts_mad_ca_obj_t ca_obj; // for testing stack + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + do + { + /* + * Open the AL interface + */ + h_al = NULL; + ib_status = alts_open_al(&h_al); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_al failed status = %d", ib_status) ); + break; + } + + /* + * Default opens the first CA + */ + ib_status = alts_open_ca(h_al, &h_ca); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_ca failed status = %d", ib_status) ); + break; + } + + /* + * Get the CA Attributest + * Check for two active ports + */ + + /* + * Query the CA + */ + bsize = 0; + ib_status = ib_query_ca(h_ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + CL_ASSERT(bsize); + + // run tests + qp1_loopback = alts_qp1_loopback(h_ca, bsize); + if(qp1_loopback != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("qp1_loopback() failed with status %d\n", qp1_loopback)); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("qp1_loopback() passed\n")); + + qp1_2_ports = alts_qp1_2_ports(h_ca, bsize); + if(qp1_2_ports != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_qp1_2_ports() failed with status %d\n", qp1_2_ports)); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_qp1_2_ports() passed\n")); + + qp1_2_ports_100 = alts_qp1_2_ports_100(h_ca, bsize); + if(qp1_2_ports_100 != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_qp1_2_ports_100() failed with status %d\n", + qp1_2_ports_100)); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_qp1_2_ports_100() passed\n")); + + qp1_pingpong = alts_qp1_pingpong(h_ca, bsize); + if(qp1_pingpong != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_qp1_pingpong() failed with status %d\n", + qp1_pingpong)); + //break; + } + else + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_qp1_pingpong() passed\n")); + } + + // run tests + qp0_loopback = alts_qp0_loopback(h_ca, bsize); + if(qp0_loopback != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("qp0_loopback() failed with status %d\n", qp0_loopback)); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("qp0_loopback() passed\n")); + + qp0_2_ports = alts_qp0_2_ports(h_ca, bsize); + if(qp0_2_ports != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_qp0_2_ports() failed with status %d\n", qp0_2_ports)); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_qp0_2_ports() passed\n")); + + qp0_2_ports_100 = alts_qp0_2_ports_100(h_ca, bsize); + if(qp0_2_ports_100 != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_qp0_2_ports_100() failed with status %d\n", + qp0_2_ports_100)); + break; + } + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_qp0_2_ports_100() passed\n")); + + qp0_pingpong = alts_qp0_pingpong(h_ca, bsize); + if(qp0_pingpong != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_qp0_pingpong() failed with status %d\n", + qp0_pingpong)); + //break; + } + else + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_qp0_pingpong() passed\n")); + } + + qp0_ping_switch = alts_qp0_ping_switch(h_ca, bsize); + if(qp0_ping_switch != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_qp0_ping_switch() failed with status %d\n", + qp0_ping_switch)); + //break; + } + else + { + ALTS_PRINT( ALTS_DBG_VERBOSE, + ("alts_qp0_ping_switch() passed\n")); + } + + } while (0); + + /* + * Destroy the resources + */ + if (p_ca_attr) + cl_free(p_ca_attr); + + ALTS_PRINT(ALTS_DBG_STATUS, + ("Test results (mad):\n" + "\tqp1_loopback..........: %s\n" + "\tqp1_2_ports...........: %s\n" + "\tqp1_2_ports_100_msgs..: %s\n" + "\tqp1_pingpong..........: %s\n", + ib_get_err_str(qp1_loopback), + ib_get_err_str(qp1_2_ports), + ib_get_err_str(qp1_2_ports_100), + ib_get_err_str(qp1_pingpong) + )); + + ALTS_PRINT(ALTS_DBG_STATUS, + ( + "\tqp0_loopback..........: %s\n" + "\tqp0_2_ports...........: %s\n" + "\tqp0_2_ports_100_msgs..: %s\n" + "\tqp0_pingpong..........: %s\n" + "\tqp0_ping_switch.......: %s\n", + ib_get_err_str(qp0_loopback), + ib_get_err_str(qp0_2_ports), + ib_get_err_str(qp0_2_ports_100), + ib_get_err_str(qp0_pingpong), + ib_get_err_str(qp0_ping_switch) + )); + + if( h_al ) + alts_close_al(h_al); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + + return ib_status; + +} + +ib_api_status_t +alts_spl_destroy_resources( + alts_mad_ca_obj_t *p_ca_obj) +{ + /* + * Destroy Send QP, Recv QP, CQ and PD + */ + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if (p_ca_obj->h_qp[SRC_QP]) + { + ib_status = ib_destroy_qp(p_ca_obj->h_qp[SRC_QP], NULL); + } + + if (p_ca_obj->is_loopback != TRUE) + { + if (p_ca_obj->h_qp[DEST_QP]) + { + ib_status = ib_destroy_qp(p_ca_obj->h_qp[DEST_QP], NULL); + } + } + + if (p_ca_obj->h_cq) + ib_status = ib_destroy_cq(p_ca_obj->h_cq, NULL); + + if (p_ca_obj->h_pd) + ib_status = ib_dealloc_pd(p_ca_obj->h_pd,NULL); + + cl_thread_suspend( 1000 ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +alts_spl_message_passing( + alts_mad_ca_obj_t *p_ca_obj) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_mad_element_t *p_mad_element; + ib_mad_t *p_mad; + char *p_buf; + uint32_t i; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + //Create an Address vector + av_attr.dlid = p_ca_obj->dlid; + av_attr.port_num = p_ca_obj->src_port_num; + av_attr.sl = 0; + av_attr.path_bits = 0; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + av_attr.grh_valid = FALSE; + + ib_status = ib_create_av(p_ca_obj->h_pd,&av_attr,&p_ca_obj->h_av_src); + if(ib_status != IB_SUCCESS) + return ib_status; + + p_ca_obj->send_done = 0; + p_ca_obj->send_done_error = 0; + p_ca_obj->recv_done = 0; + p_ca_obj->recv_done_error = 0; + p_ca_obj->cq_done = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("++++++ dlid(x%x) src_port(%d) ====\n", + av_attr.dlid, av_attr.port_num)); + + for (i=0; inum_wrs; i++) + { + p_mad_element = NULL; + ib_status = ib_get_mad( + p_ca_obj->h_src_pool, + MAD_BLOCK_SIZE, + &p_mad_element ); + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_get_mad()! %s\n", ib_get_err_str(ib_status))); + return (ib_status); + } + + // format mad + p_mad_element->context1 = (void *)1; + p_mad_element->context2 = p_ca_obj; + + /* Send request information. */ + p_mad_element->h_av = p_ca_obj->h_av_src; + p_mad_element->send_opt = IB_SEND_OPT_SIGNALED; + + + if (p_ca_obj->reply_requested == TRUE) + p_mad_element->resp_expected = TRUE; + else + p_mad_element->resp_expected = FALSE; //TRUE; + + p_mad_element->remote_qp = p_ca_obj->qp_attr[DEST_QP].num; + + p_mad_element->remote_qkey = p_ca_obj->qkey; + p_mad_element->timeout_ms = 20; + p_mad_element->retry_cnt = 1; + + /* Completion information. */ + p_mad_element->status = 0; + + // format mad + p_mad = p_mad_element->p_mad_buf; + + p_buf = (char *)p_mad; + cl_memset(p_buf, 0x66, 256); // set pattern in buffer + + + switch (p_ca_obj->qp_attr[SRC_QP].num) + { + case IB_QP0: + ib_mad_init_new( + p_mad, + IB_MCLASS_SUBN_LID, + ALTS_TEST_MGMT_CLASS_VER, + ALTS_TEST_MGMT_METHOD, + (ib_net64_t) CL_NTOH64(0x666), + IB_MAD_ATTR_SM_INFO, + 0 ); + break; + + case IB_QP1: + default: + ib_mad_init_new( + p_mad, + ALTS_TEST_MGMT_CLASS, + ALTS_TEST_MGMT_CLASS_VER, + ALTS_TEST_MGMT_METHOD, + (ib_net64_t) CL_NTOH64(0x666), + IB_MAD_ATTR_CLASS_PORT_INFO, + 0 ); + break; + } + + // send + ib_status = ib_send_mad( + p_ca_obj->h_src_mad_svc, + p_mad_element, + NULL ); + + if(ib_status != IB_SUCCESS) + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("ib_send_mad failed")); + + //cl_thread_suspend(10); // 10 usec + } + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + + cl_thread_suspend(10000); // 10 seconds + + if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(10000); // 10 seconds + } + + + if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(10000); // 10 seconds + } + + if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(10000); // 10 seconds + } + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_qp0_msg_at_hc ( + alts_mad_ca_obj_t *p_ca_obj, + IN const uint8_t hop_count ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_mad_element_t *p_mad_element; + ib_mad_t *p_mad; + char *p_buf; + uint32_t i; + uint8_t path_out[IB_SUBNET_PATH_HOPS_MAX]; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + //Create an Address vector + av_attr.dlid = IB_LID_PERMISSIVE; + av_attr.port_num = p_ca_obj->src_port_num; + av_attr.sl = 0; + av_attr.path_bits = 0; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + av_attr.grh_valid = FALSE; + + ib_status = ib_create_av(p_ca_obj->h_pd,&av_attr,&p_ca_obj->h_av_src); + if(ib_status != IB_SUCCESS) + return ib_status; + + p_ca_obj->send_done = 0; + p_ca_obj->send_done_error = 0; + p_ca_obj->recv_done = 0; + p_ca_obj->recv_done_error = 0; + p_ca_obj->cq_done = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("++++++ dlid(x%x) src_port(%d) ====\n", + av_attr.dlid, av_attr.port_num)); + + for (i=0; inum_wrs; i++) + { + p_mad_element = NULL; + ib_status = ib_get_mad( + p_ca_obj->h_src_pool, + MAD_BLOCK_SIZE, + &p_mad_element ); + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_get_mad()! %s\n", ib_get_err_str(ib_status))); + return (ib_status); + } + + // format mad + p_mad_element->context1 = (void *)1; + p_mad_element->context2 = p_ca_obj; + + /* Send request information. */ + p_mad_element->h_av = p_ca_obj->h_av_src; + p_mad_element->send_opt = IB_SEND_OPT_SIGNALED; + + + if (p_ca_obj->reply_requested == TRUE) + p_mad_element->resp_expected = TRUE; + else + p_mad_element->resp_expected = FALSE; //TRUE; + + p_mad_element->remote_qp = p_ca_obj->qp_attr[DEST_QP].num; + + p_mad_element->remote_qkey = p_ca_obj->qkey; + p_mad_element->timeout_ms = 20; + p_mad_element->retry_cnt = 1; + + /* Completion information. */ + p_mad_element->status = 0; + + // format mad + p_mad = p_mad_element->p_mad_buf; + + p_buf = (char *)p_mad; + cl_memset(p_buf, 0x66, 256); // set pattern in buffer + + path_out[1] = p_ca_obj->src_port_num; + + ib_smp_init_new( + (ib_smp_t *)p_mad, + ALTS_TEST_MGMT_METHOD, + (ib_net64_t) CL_NTOH64(0x666), + IB_MAD_ATTR_NODE_DESC, + 0, + hop_count, + 0, + (const uint8_t *)&path_out, + IB_LID_PERMISSIVE, + IB_LID_PERMISSIVE ); + + // send + ib_status = ib_send_mad( + p_ca_obj->h_src_mad_svc, + p_mad_element, + NULL ); + + if(ib_status != IB_SUCCESS) + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("ib_send_mad failed")); + + + + } + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + + cl_thread_suspend(10000); // 10 seconds + + if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(10000); // 10 seconds + } + + + if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(10000); // 10 seconds + } + + if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(10000); // 10 seconds + } + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +mad_post_sends( + alts_mad_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts ) +{ + ib_send_wr_t *p_s_wr, *p_send_failure_wr; + uint32_t msg_size, i; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if (p_ca_obj->test_type == TestUD1) + msg_size = p_ca_obj->msg_size - sizeof(ib_grh_t); + else + msg_size = 64; + + //msg_size = p_ca_obj->msg_size; + + msg_size = 64; + + p_s_wr = p_ca_obj->p_send_wr; + + p_s_wr->p_next = NULL; + p_s_wr->ds_array[0].length = msg_size; + p_s_wr->num_ds = 1; + + p_s_wr->wr_type = WR_SEND; + + if (p_ca_obj->test_type == TestUD1) + { + p_s_wr->dgrm.ud.h_av = p_ca_obj->h_av_src; + p_s_wr->send_opt = IB_SEND_OPT_IMMEDIATE | \ + IB_SEND_OPT_SIGNALED | IB_SEND_OPT_SOLICITED; + + p_s_wr->dgrm.ud.remote_qkey = p_ca_obj->qkey; + p_s_wr->dgrm.ud.remote_qp = p_ca_obj->qp_attr[DEST_QP].num; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("======= qkey(x%x) qp_num(x%x) ========\n", + p_s_wr->dgrm.ud.remote_qkey, + p_s_wr->dgrm.ud.remote_qp)); + + } + else if(p_ca_obj->test_type == TestRC1) + { + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED | \ + IB_SEND_OPT_IMMEDIATE | \ + IB_SEND_OPT_SOLICITED ; + + } + + + for (i = 0; i < num_posts; i++) + { + p_s_wr->wr_id = i+reg_index; + p_s_wr->immediate_data = 0xfeedde00 + i; + + p_s_wr->remote_ops.vaddr = 0; + p_s_wr->remote_ops.rkey = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d)*****\n", + (void*)(uintn_t)p_s_wr->ds_array[0].vaddr, + p_s_wr->ds_array[0].lkey, + p_s_wr->ds_array[0].length)); + + ib_status = ib_post_send( + p_ca_obj->h_qp[SRC_QP], + p_s_wr, + &p_send_failure_wr); + + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +mad_post_recvs( + alts_mad_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts ) +{ + ib_recv_wr_t *p_r_wr, *p_failure_wr; + uint32_t msg_size, i; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + if (p_ca_obj->test_type == TestUD1) + msg_size = p_ca_obj->msg_size; + else + msg_size = 64; + //msg_size = p_ca_obj->msg_size; + + if (p_ca_obj->test_type == TestUD1) + msg_size = 64 + sizeof(ib_grh_t); + else + msg_size = 64; + + p_r_wr = p_ca_obj->p_recv_wr; + + p_r_wr->p_next = NULL; + p_r_wr->ds_array[0].length = msg_size; + p_r_wr->num_ds = 1; + + for (i = 0; i < num_posts; i++) + { + + p_r_wr->wr_id = i+reg_index; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******vaddr(x%"PRIx64") lkey(x%x) len(%d)*****\n", + (void*)(uintn_t)p_r_wr->ds_array[0].vaddr, + p_r_wr->ds_array[0].lkey, + p_r_wr->ds_array[0].length)); + + if (p_ca_obj->is_loopback == TRUE) + { + ib_status = ib_post_recv( + p_ca_obj->h_qp[SRC_QP], + p_r_wr, + &p_failure_wr); + } + else + { + ib_status = ib_post_recv( + p_ca_obj->h_qp[DEST_QP], + p_r_wr, + &p_failure_wr); + } + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +mad_activate_svc( + alts_mad_ca_obj_t *p_ca_obj, + ib_qp_handle_t h_qp ) +{ + ib_api_status_t ib_status; + ib_mad_svc_handle_t h_mad_svc; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + // init dgrm svc + if(p_ca_obj->is_src == 1) + dgrm_info.port_guid = p_ca_obj->src_portguid; + else + dgrm_info.port_guid = p_ca_obj->dest_portguid; + + dgrm_info.qkey = p_ca_obj->qkey; + dgrm_info.pkey_index = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("******** port num = %d ***************\n", + p_ca_obj->src_port_num)); + + ib_status = ib_init_dgrm_svc( + h_qp, + &dgrm_info ); + + if (ib_status != IB_SUCCESS) + return ib_status; + + // create svc + cl_memclr(&mad_svc, sizeof(ib_mad_svc_t)); + + mad_svc.mad_svc_context = p_ca_obj; + mad_svc.pfn_mad_send_cb = mad_svc_send_cb; + mad_svc.pfn_mad_recv_cb = mad_svc_recv_cb; + + mad_svc.support_unsol = TRUE; + + mad_svc.mgmt_class = ALTS_TEST_MGMT_CLASS; + mad_svc.mgmt_version = ALTS_TEST_MGMT_CLASS_VER; + + // fill in methods supported + mad_svc.method_array[ALTS_TEST_MGMT_METHOD] = TRUE; + + ib_status = ib_reg_mad_svc( + h_qp, + &mad_svc, + &h_mad_svc ); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_SUCCESS; +} + +ib_api_status_t +alts_mad_check_active_ports(alts_mad_ca_obj_t *p_ca_obj) +{ + ib_api_status_t ib_status; + ib_ca_attr_t *p_ca_attr; + ib_port_attr_t *p_src_port_attr = NULL; + ib_port_attr_t *p_dest_port_attr = NULL; + uint32_t i; + ib_port_attr_t *p_port_attr; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT(p_ca_obj); + + p_ca_attr = p_ca_obj->p_ca_attr; + + CL_ASSERT(p_ca_attr); + + for(i=0; i< p_ca_attr->num_ports; i++) + { + p_port_attr = &p_ca_attr->p_port_attr[i]; + + if (p_port_attr->link_state == IB_LINK_ACTIVE) + { + if (p_src_port_attr == NULL) + p_src_port_attr = p_port_attr; + else + if(p_dest_port_attr == NULL) + p_dest_port_attr = p_port_attr; + else + break; + } + } + + // handle loopback case + if (p_ca_obj->is_loopback == TRUE) + p_dest_port_attr = p_src_port_attr; + + if (p_src_port_attr && p_dest_port_attr) + { + p_ca_obj->p_dest_port_attr = p_dest_port_attr; + p_ca_obj->p_src_port_attr = p_src_port_attr; + + p_ca_obj->dlid = p_dest_port_attr->lid; + p_ca_obj->slid = p_src_port_attr->lid; + + p_ca_obj->dest_portguid = p_dest_port_attr->port_guid; + p_ca_obj->src_portguid = p_src_port_attr->port_guid; + + p_ca_obj->dest_port_num = p_dest_port_attr->port_num; + p_ca_obj->src_port_num = p_src_port_attr->port_num; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("**** slid = x%x (x%x) ***dlid = x%x (x%x) ***************\n", + p_ca_obj->slid, + CL_NTOH16(p_ca_obj->slid), + p_ca_obj->dlid, + CL_NTOH16(p_ca_obj->dlid) )); + + ib_status = IB_SUCCESS; + + } + else + { + + ib_status = IB_ERROR; + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +/* + * Create the CQ, PD and QP + */ +ib_api_status_t +mad_create_resources( alts_mad_ca_obj_t *p_ca_obj ) +{ + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + cl_memclr(&qp_create_attr, sizeof(ib_qp_create_t)); + + /* + * Allocate a PD + */ + ib_status = ib_alloc_pd( + p_ca_obj->h_ca, + IB_PDT_NORMAL, + p_ca_obj, //pd_context + &p_ca_obj->h_pd); + + CL_ASSERT(ib_status == IB_SUCCESS); + + /* + * Create CQ Attributes + */ + cq_create_attr.size = p_ca_obj->cq_size; + cq_create_attr.pfn_comp_cb = mad_cq_comp_cb; + cq_create_attr.h_wait_obj = NULL; + + ib_status = ib_create_cq( + p_ca_obj->h_ca, + &cq_create_attr, + p_ca_obj, + mad_cq_err_cb, + &p_ca_obj->h_cq ); + CL_ASSERT(ib_status == IB_SUCCESS); + + p_ca_obj->cq_size = cq_create_attr.size; + + /* + * Create QP Attributes + */ + qp_create_attr.sq_depth = p_ca_obj->num_wrs; + qp_create_attr.rq_depth = p_ca_obj->num_wrs; + qp_create_attr.sq_sge = 1; + qp_create_attr.rq_sge = 1; + qp_create_attr.h_sq_cq = p_ca_obj->h_cq; + qp_create_attr.h_rq_cq = p_ca_obj->h_cq; + + qp_create_attr.sq_signaled = TRUE; + + qp_create_attr.qp_type = IB_QPT_MAD; + + ib_status = ib_create_qp( + p_ca_obj->h_pd, + &qp_create_attr, + p_ca_obj, + mad_qp_err_cb, + &p_ca_obj->h_qp[SRC_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_create_qp()! %s\n", + ib_get_err_str(ib_status))); + + return (ib_status); + } + + ib_status = ib_query_qp(p_ca_obj->h_qp[SRC_QP], + &p_ca_obj->qp_attr[SRC_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in query_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[SRC_QP],mad_qp_destroy_cb); + return (ib_status); + } + + if (p_ca_obj->is_loopback == TRUE) + { + // do loopback on same QP + p_ca_obj->h_qp[DEST_QP] = p_ca_obj->h_qp[SRC_QP]; + p_ca_obj->qp_attr[DEST_QP] = p_ca_obj->qp_attr[SRC_QP]; + } + else + { + ib_status = ib_create_qp( + p_ca_obj->h_pd, + &qp_create_attr, + p_ca_obj, + mad_qp_err_cb, + &p_ca_obj->h_qp[DEST_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_create_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[SRC_QP],mad_qp_destroy_cb); + return (ib_status); + } + + ib_status = ib_query_qp(p_ca_obj->h_qp[DEST_QP], + &p_ca_obj->qp_attr[DEST_QP]); + + //CL_ASSERT(ib_status == IB_SUCCESS); + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in query_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[DEST_QP],mad_qp_destroy_cb); + return (ib_status); + } + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +void +alts_send_mad_resp( + alts_mad_ca_obj_t *p_ca_obj, + ib_mad_element_t *p_gmp ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_mad_element_t *p_resp; + ib_mad_t *p_mad; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + //Create an Address vector + av_attr.dlid = p_gmp->remote_lid; + av_attr.port_num = p_ca_obj->dest_port_num; + av_attr.sl = p_gmp->remote_sl; + av_attr.path_bits = p_gmp->path_bits; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + + av_attr.grh_valid = p_gmp->grh_valid; + if (p_gmp->grh_valid == TRUE) + av_attr.grh = *p_gmp->p_grh; + + ib_status = ib_create_av(p_ca_obj->h_pd,&av_attr,&p_ca_obj->h_av_dest); + if(ib_status != IB_SUCCESS) + return; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("++++++ dlid(x%x) src_port(%d) ====\n", + av_attr.dlid, av_attr.port_num)); + + ib_status = ib_get_mad( + p_ca_obj->h_dest_pool, + MAD_BLOCK_SIZE, + &p_resp ); + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_get_mad()! %s\n", ib_get_err_str(ib_status))); + + return; + } + + // format mad + p_resp->context1 = (void *)1; + p_resp->context2 = p_ca_obj; + + /* Send request information. */ + p_resp->h_av = p_ca_obj->h_av_dest; + p_resp->send_opt = IB_SEND_OPT_SIGNALED; + p_resp->resp_expected = FALSE; //TRUE; + + p_resp->remote_qp = p_gmp->remote_qp; + p_resp->remote_qkey = p_ca_obj->qkey; + p_resp->timeout_ms = 0; + p_resp->retry_cnt = 0; + + /* Completion information. */ + p_resp->status = 0; + + // format mad + p_mad = p_resp->p_mad_buf; + + // copy msg received as response + ib_mad_init_response( + p_gmp->p_mad_buf, + p_mad, + 0 ); + + // send + ib_status = ib_send_mad( + p_ca_obj->h_dest_mad_svc, + p_resp, + NULL ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ; +} + +void +mad_svc_send_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ) +{ + ib_mad_element_t *p_gmp; + uint32_t i = 0; + alts_mad_ca_obj_t *p_ca_obj; + ib_mad_t *p_mad; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( h_mad_svc ); + + CL_ASSERT (mad_svc_context); + CL_ASSERT (p_mad_element); + + p_gmp = p_mad_element; + + p_ca_obj = (alts_mad_ca_obj_t*)mad_svc_context; + + do + { + p_mad = p_gmp->p_mad_buf; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Send completed:\n" + "\tstatus......:%s\n" + "\tremote_qp...:x%x\n" + "\tremote_qkey.:x%x\n" + "\ttid.........:x%"PRIx64"\n", + ib_get_wc_status_str(p_gmp->status), + CL_NTOH32(p_gmp->remote_qp), + CL_NTOH32(p_gmp->remote_qkey), + p_mad->trans_id + )); + + if( p_gmp->status == IB_WCS_SUCCESS ) + { + i++; + p_ca_obj->send_done++; + } + else + { + p_ca_obj->send_done_error++; + } + + // loop + p_gmp = p_gmp->p_next; + + } while (p_gmp); + + p_ca_obj->cq_done += i; + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ (in callback=%d) (send=%d) (total=%d)\n", + i, + p_ca_obj->send_done, + p_ca_obj->cq_done) ); + + // put it back in the mad owner's pool + ib_put_mad(p_mad_element); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +mad_svc_recv_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ) +{ + ib_mad_element_t *p_gmp; + uint32_t i = 0; + alts_mad_ca_obj_t *p_ca_obj; + ib_mad_t *p_mad; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( h_mad_svc ); + + CL_ASSERT (mad_svc_context); + CL_ASSERT (p_mad_element); + + p_gmp = p_mad_element; + p_ca_obj = (alts_mad_ca_obj_t*)mad_svc_context; + + do + { + p_mad = p_gmp->p_mad_buf; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Recv completed:\n" + "\tstatus......:%s\n" + "\tremote_qp...:x%x\n" + "\tremote_lid..:x%x\n" + "\ttid.........:x%"PRIx64"\n", + ib_get_wc_status_str(p_gmp->status), + CL_NTOH32(p_gmp->remote_qp), + p_gmp->remote_lid, + p_mad->trans_id + )); + + if( p_gmp->status == IB_WCS_SUCCESS ) + { + i++; + p_ca_obj->recv_done++; + + // process received mad + if (p_ca_obj->reply_requested == TRUE) + { + // is it a request? + if (ib_mad_is_response(p_mad) != TRUE) + alts_send_mad_resp(p_ca_obj, p_gmp); + } + } + else + { + p_ca_obj->recv_done_error++; + } + + + // loop + p_gmp = p_gmp->p_next; + + } while (p_gmp); + + p_ca_obj->cq_done += i; + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ (in callback=%d) (recv=%d) (total=%d)\n", + i, + p_ca_obj->recv_done, + p_ca_obj->cq_done) ); + + // put it back in the mad owner's pool + ib_put_mad(p_mad_element); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +mad_svc_qp0_recv_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ) +{ + ib_mad_element_t *p_gmp; + uint32_t i = 0; + alts_mad_ca_obj_t *p_ca_obj; + ib_mad_t *p_mad; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( h_mad_svc ); + + CL_ASSERT (mad_svc_context); + CL_ASSERT (p_mad_element); + + p_gmp = p_mad_element; + p_ca_obj = (alts_mad_ca_obj_t*)mad_svc_context; + + do + { + p_mad = p_gmp->p_mad_buf; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Recv completed:\n" + "\tstatus......:%s\n" + "\tremote_qp...:x%x\n" + "\tremote_lid..:x%x\n" + "\ttid.........:x%"PRIx64"\n", + ib_get_wc_status_str(p_gmp->status), + CL_NTOH32(p_gmp->remote_qp), + p_gmp->remote_lid, + p_mad->trans_id + )); + + if( p_gmp->status == IB_WCS_SUCCESS ) + { + i++; + p_ca_obj->recv_done++; + + // process received mad + if (p_ca_obj->reply_requested == TRUE) + { + // is it a request? + //if (ib_mad_is_response(p_mad) != TRUE) + // alts_send_mad_resp(p_ca_obj, p_gmp); + } + } + else + { + p_ca_obj->recv_done_error++; + } + + + + // loop + p_gmp = p_gmp->p_next; + + } while (p_gmp); + + p_ca_obj->cq_done += i; + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ (in callback=%d) (recv=%d) (total=%d)\n", + i, + p_ca_obj->recv_done, + p_ca_obj->cq_done) ); + + // put it back in the mad owner's pool + ib_put_mad(p_mad_element); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +/* + * Create the Spl PD and QP + */ +ib_api_status_t +mad_create_spl_resources( + alts_mad_ca_obj_t *p_ca_obj, + ib_qp_type_t qp_type, + uint8_t mgmt_class, + uint8_t class_ver, + ib_pfn_mad_comp_cb_t pfn_mad_svc_send_cb, + ib_pfn_mad_comp_cb_t pfn_mad_svc_recv_cb ) +{ + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + /* + * Allocate a PD + */ + ib_status = ib_alloc_pd( + p_ca_obj->h_ca, + IB_PDT_ALIAS, + p_ca_obj, //pd_context + &p_ca_obj->h_pd); + + CL_ASSERT(ib_status == IB_SUCCESS); + + /* + * Create QP Attributes + */ + cl_memclr(&qp_create_attr, sizeof(ib_qp_create_t)); + + qp_create_attr.sq_depth = p_ca_obj->num_wrs; + qp_create_attr.rq_depth = p_ca_obj->num_wrs; + qp_create_attr.sq_sge = 1; + qp_create_attr.rq_sge = 1; + qp_create_attr.h_sq_cq = NULL; + qp_create_attr.h_rq_cq = NULL; + + qp_create_attr.sq_signaled = TRUE; + + qp_create_attr.qp_type = qp_type; // IB_QPT_QP1_ALIAS or IB_QPT_QP0_ALIAS; + + ib_status = ib_get_spl_qp( + p_ca_obj->h_pd, + p_ca_obj->src_portguid, + &qp_create_attr, + p_ca_obj, // context + mad_qp_err_cb, + &p_ca_obj->h_src_pool, + &p_ca_obj->h_qp[SRC_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_get_spl_qp()! %s\n", ib_get_err_str(ib_status))); + return (ib_status); + } + + ib_status = ib_query_qp(p_ca_obj->h_qp[SRC_QP], + &p_ca_obj->qp_attr[SRC_QP]); + + // create svc + cl_memclr(&mad_svc, sizeof(ib_mad_svc_t)); + + mad_svc.mad_svc_context = p_ca_obj; + mad_svc.pfn_mad_send_cb = pfn_mad_svc_send_cb; + mad_svc.pfn_mad_recv_cb = pfn_mad_svc_recv_cb; + + mad_svc.support_unsol = TRUE; + + mad_svc.mgmt_class = mgmt_class; + mad_svc.mgmt_version = class_ver; + + + // fill in methods supported + mad_svc.method_array[ALTS_TEST_MGMT_METHOD] = TRUE; + + ib_status = ib_reg_mad_svc( + p_ca_obj->h_qp[SRC_QP], + &mad_svc, + &p_ca_obj->h_src_mad_svc ); + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_reg_mad_svc()! %s\n", ib_get_err_str(ib_status))); + return (ib_status); + } + + // do the server side too if we are not doing loopback + if (p_ca_obj->is_loopback == TRUE) + { + // do loopback on same QP + p_ca_obj->h_qp[DEST_QP] = p_ca_obj->h_qp[SRC_QP]; + p_ca_obj->qp_attr[DEST_QP] = p_ca_obj->qp_attr[SRC_QP]; + } + else + { + ib_status = ib_get_spl_qp( + p_ca_obj->h_pd, + p_ca_obj->dest_portguid, + &qp_create_attr, + p_ca_obj, // context + mad_qp_err_cb, + &p_ca_obj->h_dest_pool, + &p_ca_obj->h_qp[DEST_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_get_spl_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[SRC_QP],mad_qp_destroy_cb); + return (ib_status); + } + + ib_status = ib_query_qp(p_ca_obj->h_qp[DEST_QP], + &p_ca_obj->qp_attr[DEST_QP]); + + // create svc + cl_memclr(&mad_svc, sizeof(ib_mad_svc_t)); + + mad_svc.mad_svc_context = p_ca_obj; + mad_svc.pfn_mad_send_cb = pfn_mad_svc_send_cb; + mad_svc.pfn_mad_recv_cb = pfn_mad_svc_recv_cb; + + mad_svc.support_unsol = TRUE; + + if (qp_type == IB_QPT_QP0_ALIAS) + { + mad_svc.mgmt_class = IB_MCLASS_SUBN_LID; + mad_svc.mgmt_version = ALTS_TEST_MGMT_CLASS_VER; + } + else + { + mad_svc.mgmt_class = ALTS_TEST_MGMT_CLASS; + mad_svc.mgmt_version = ALTS_TEST_MGMT_CLASS_VER; + } + + // fill in methods supported + mad_svc.method_array[ALTS_TEST_MGMT_METHOD] = TRUE; + + ib_status = ib_reg_mad_svc( + p_ca_obj->h_qp[DEST_QP], + &mad_svc, + &p_ca_obj->h_dest_mad_svc ); + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_reg_mad_svc()! %s\n", ib_get_err_str(ib_status))); + return (ib_status); + } + + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +void +mad_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + ib_api_status_t ib_status; + uint32_t i = 0, id; + ib_wc_t *p_free_wcl, *p_done_cl= NULL; + alts_mad_ca_obj_t *p_ca_obj; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( h_cq ); + + CL_ASSERT(cq_context); + + p_ca_obj = (alts_mad_ca_obj_t *)cq_context; + + + ib_status = ib_rearm_cq(p_ca_obj->h_cq, TRUE); + + p_free_wcl = &free_wcl; + p_free_wcl->p_next = NULL; + + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wcl, &p_done_cl); + + while(p_done_cl) + { + + /* + * print output + */ + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Got a completion:\n" + "\ttype....:%s\n" + "\twr_id...:%"PRIx64"\n", + ib_get_wc_type_str(p_done_cl->wc_type), + p_done_cl->wr_id )); + + + if (p_done_cl->wc_type == IB_WC_RECV) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("message length..:%d bytes\n", + p_done_cl->length )); + + id = (uint32_t)p_done_cl->wr_id; + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvUD info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n", + p_done_cl->recv.conn.recv_opt, + p_done_cl->recv.ud.immediate_data )); + } + + p_free_wcl = p_done_cl; + p_free_wcl->p_next = NULL; + p_done_cl = NULL; + i++; + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wcl, &p_done_cl); + } + + p_ca_obj->cq_done += i; + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ (in callback=%d) (total=%d)\n", + i, + p_ca_obj->cq_done) ); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +mad_cq_err_cb( + ib_async_event_rec_t *p_err_rec + ) +{ + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +mad_qp_err_cb( + ib_async_event_rec_t *p_err_rec + ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +mad_pd_destroy_cb( + void *context + ) +{ +/* + * PD destroy call back + */ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + +void +mad_qp_destroy_cb( + void *context + ) +{ +/* + * QP destroy call back + */ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +mad_cq_destroy_cb( + void *context + ) +{ +/* + * CQ destroy call back + */ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +ib_api_status_t +alts_qp1_loopback( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_mad_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_mad_ca_obj_t*)cl_zalloc(sizeof(alts_mad_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_mad_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = IB_QP1_WELL_KNOWN_Q_KEY; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = TRUE; + + /* + * get an active port + */ + ib_status = alts_mad_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = mad_create_spl_resources( + p_ca_obj, + IB_QPT_QP1_ALIAS, + ALTS_TEST_MGMT_CLASS, + ALTS_TEST_MGMT_CLASS_VER, + mad_svc_send_cb, + mad_svc_recv_cb ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("mad_create_spl_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_spl_message_passing(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_spl_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("errors: send(%d) recv(%d)\n", + p_ca_obj->send_done_error, p_ca_obj->recv_done_error)); + + ib_status = IB_ERROR; + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_spl_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_qp1_2_ports( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_mad_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_mad_ca_obj_t*)cl_zalloc(sizeof(alts_mad_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_mad_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = IB_QP1_WELL_KNOWN_Q_KEY; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + /* + * get an active port + */ + ib_status = alts_mad_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = mad_create_spl_resources( + p_ca_obj, + IB_QPT_QP1_ALIAS, + ALTS_TEST_MGMT_CLASS, + ALTS_TEST_MGMT_CLASS_VER, + mad_svc_send_cb, + mad_svc_recv_cb ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("mad_create_spl_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_spl_message_passing(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_spl_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("errors: send(%d) recv(%d)\n", + p_ca_obj->send_done_error, p_ca_obj->recv_done_error)); + + ib_status = IB_ERROR; + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_spl_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_qp1_2_ports_100( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_mad_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_mad_ca_obj_t*)cl_zalloc(sizeof(alts_mad_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_mad_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = IB_QP1_WELL_KNOWN_Q_KEY; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 100; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + /* + * get an active port + */ + ib_status = alts_mad_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = mad_create_spl_resources( + p_ca_obj, + IB_QPT_QP1_ALIAS, + ALTS_TEST_MGMT_CLASS, + ALTS_TEST_MGMT_CLASS_VER, + mad_svc_send_cb, + mad_svc_recv_cb ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("mad_create_spl_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_spl_message_passing(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_spl_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 200) + ib_status = IB_SUCCESS; + else + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("errors: send(%d) recv(%d)\n", + p_ca_obj->send_done_error, p_ca_obj->recv_done_error)); + + ib_status = IB_ERROR; + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_spl_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_qp1_pingpong( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_mad_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_mad_ca_obj_t*)cl_zalloc(sizeof(alts_mad_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_mad_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = IB_QP1_WELL_KNOWN_Q_KEY; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP1; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->reply_requested = TRUE; // we need a reply + + /* + * get an active port + */ + ib_status = alts_mad_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = mad_create_spl_resources( + p_ca_obj, + IB_QPT_QP1_ALIAS, + ALTS_TEST_MGMT_CLASS, + ALTS_TEST_MGMT_CLASS_VER, + mad_svc_send_cb, + mad_svc_recv_cb ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("mad_create_spl_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_spl_message_passing(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_spl_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 4) + ib_status = IB_SUCCESS; + else + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("errors: send(%d) recv(%d)\n", + p_ca_obj->send_done_error, p_ca_obj->recv_done_error)); + + ib_status = IB_ERROR; + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_spl_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +alts_qp0_loopback( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_mad_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_mad_ca_obj_t*)cl_zalloc(sizeof(alts_mad_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_mad_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP0; + p_ca_obj->is_loopback = TRUE; + + p_ca_obj->reply_requested = FALSE; + + /* + * get an active port + */ + ib_status = alts_mad_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = mad_create_spl_resources( + p_ca_obj, + IB_QPT_QP0_ALIAS, + IB_MCLASS_SUBN_LID, + ALTS_TEST_MGMT_CLASS_VER, + mad_svc_send_cb, + mad_svc_recv_cb ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("mad_create_spl_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_spl_message_passing(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_spl_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("errors: send(%d) recv(%d)\n", + p_ca_obj->send_done_error, p_ca_obj->recv_done_error)); + + ib_status = IB_ERROR; + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_spl_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_qp0_2_ports( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_mad_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_mad_ca_obj_t*)cl_zalloc(sizeof(alts_mad_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_mad_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP0; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->reply_requested = FALSE; + + /* + * get an active port + */ + ib_status = alts_mad_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = mad_create_spl_resources( + p_ca_obj, + IB_QPT_QP0_ALIAS, + IB_MCLASS_SUBN_LID, + ALTS_TEST_MGMT_CLASS_VER, + mad_svc_send_cb, + mad_svc_recv_cb ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("mad_create_spl_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_spl_message_passing(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_spl_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("errors: send(%d) recv(%d)\n", + p_ca_obj->send_done_error, p_ca_obj->recv_done_error)); + + ib_status = IB_ERROR; + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_spl_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_qp0_2_ports_100( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_mad_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_mad_ca_obj_t*)cl_zalloc(sizeof(alts_mad_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_mad_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 100; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP0; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->reply_requested = FALSE; + + /* + * get an active port + */ + ib_status = alts_mad_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = mad_create_spl_resources( + p_ca_obj, + IB_QPT_QP0_ALIAS, + IB_MCLASS_SUBN_LID, + ALTS_TEST_MGMT_CLASS_VER, + mad_svc_send_cb, + mad_svc_recv_cb ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("mad_create_spl_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_spl_message_passing(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_spl_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 200) + ib_status = IB_SUCCESS; + else + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("errors: send(%d) recv(%d)\n", + p_ca_obj->send_done_error, p_ca_obj->recv_done_error)); + + ib_status = IB_ERROR; + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_spl_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_qp0_pingpong( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_mad_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_mad_ca_obj_t*)cl_zalloc(sizeof(alts_mad_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_mad_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP0; + p_ca_obj->is_loopback = FALSE; + + p_ca_obj->reply_requested = TRUE; // we need a reply + + /* + * get an active port + */ + ib_status = alts_mad_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = mad_create_spl_resources( + p_ca_obj, + IB_QPT_QP0_ALIAS, + IB_MCLASS_SUBN_LID, + ALTS_TEST_MGMT_CLASS_VER, + mad_svc_send_cb, + mad_svc_recv_cb ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("mad_create_spl_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_spl_message_passing(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_spl_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 4) + ib_status = IB_SUCCESS; + else + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("errors: send(%d) recv(%d)\n", + p_ca_obj->send_done_error, p_ca_obj->recv_done_error)); + + ib_status = IB_ERROR; + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_spl_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + + +ib_api_status_t +alts_qp0_ping_switch ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_mad_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ib_port_attr_t *p_src_port_attr = NULL; + ib_port_attr_t *p_dest_port_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_mad_ca_obj_t*)cl_zalloc(sizeof(alts_mad_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_mad_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->src_qp_num = IB_QP0; + p_ca_obj->is_loopback = TRUE; + + p_ca_obj->reply_requested = TRUE; + + // set the out port to be the last port + p_src_port_attr = &p_ca_attr->p_port_attr[p_ca_attr->num_ports-1]; + + if (p_src_port_attr->link_state == 0) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test requires the last port of HCA connected to a switch.\n")); + + ib_status = IB_ERROR; + break; + } + + // reset src and dest + p_dest_port_attr = p_src_port_attr; + + p_ca_obj->p_dest_port_attr = p_dest_port_attr; + p_ca_obj->p_src_port_attr = p_src_port_attr; + + p_ca_obj->dlid = p_dest_port_attr->lid; + p_ca_obj->slid = p_src_port_attr->lid; + + p_ca_obj->dest_portguid = p_dest_port_attr->port_guid; + p_ca_obj->src_portguid = p_src_port_attr->port_guid; + + p_ca_obj->dest_port_num = p_dest_port_attr->port_num; + p_ca_obj->src_port_num = p_src_port_attr->port_num; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("**** slid = x%x (x%x) ***dlid = x%x (x%x) ***************\n", + p_ca_obj->slid, + CL_NTOH16(p_ca_obj->slid), + p_ca_obj->dlid, + CL_NTOH16(p_ca_obj->dlid) )); + + + /* + * Create the necessary resource PD/QP/QP + */ + ib_status = mad_create_spl_resources( + p_ca_obj, + IB_QPT_QP0_ALIAS, + IB_MCLASS_SUBN_DIR, + ALTS_TEST_MGMT_CLASS_VER, + mad_svc_send_cb, + mad_svc_recv_cb ); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("mad_create_spl_resources() failed with status %d\n", ib_status)); + break; + } + + /* + * Start Message passing activity + */ + ib_status = alts_qp0_msg_at_hc(p_ca_obj, 1); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_spl_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("errors: send(%d) recv(%d)\n", + p_ca_obj->send_done_error, p_ca_obj->recv_done_error)); + + ib_status = IB_ERROR; + } + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_spl_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} diff --git a/branches/Ndi/tests/alts/multisendrecv.c b/branches/Ndi/tests/alts/multisendrecv.c new file mode 100644 index 00000000..6c1ca0dd --- /dev/null +++ b/branches/Ndi/tests/alts/multisendrecv.c @@ -0,0 +1,2371 @@ +/* +* Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Multisendrecv test does a data transfer between two queue pairs created one + * on each port of the hca. In order for this test to work, two ports of the hca + * should be connected in a loop back and must be configured to ACTIVE PORT STATE. + * + * + * Environment: + * All + */ + + +#include +#include +#include +#include +#include +#include + +/* Parameters */ + +#define TestUD1 1 +#define TestRC1 2 + +#define MAX_QPS 8 +#define SRC_QP 0 +#define DEST_QP 1 + + +typedef struct _alts_multisr_ca_obj +{ + ib_api_status_t status; + uint32_t test_type; + + ib_ca_handle_t h_ca; + ib_ca_attr_t *p_ca_attr; + ib_port_attr_t *p_src_port_attr; + ib_port_attr_t *p_dest_port_attr; + + ib_net64_t src_portguid; + uint8_t src_port_num; + + ib_net64_t dest_portguid; + uint8_t dest_port_num; + + ib_net16_t slid; + ib_net16_t dlid; + + ib_cq_handle_t h_cq; + uint32_t cq_size; + + ib_pd_handle_t h_pd; + + ib_qp_handle_t h_qp[MAX_QPS]; + ib_net32_t qkey; + + ib_qp_attr_t qp_attr[MAX_QPS]; + + + ib_send_wr_t *p_send_wr; + ib_recv_wr_t *p_recv_wr; + size_t wr_send_size; + size_t wr_recv_size; + uint32_t num_wrs; + uint32_t ds_list_depth; + uint32_t msg_size; // Initialize this field + + ib_av_handle_t h_av_src; + mem_region_t mem_region[200]; + + uint32_t cq_done; // total completions + boolean_t is_src; + boolean_t is_loopback; + +} alts_multisr_ca_obj_t; + + + +/* + * Function Prototypes + */ + +ib_api_status_t +alts_check_active_ports( + alts_multisr_ca_obj_t *p_ca_obj ); + +ib_api_status_t +alts_create_resources( + alts_multisr_ca_obj_t *p_ca_obj ); + +ib_api_status_t +alts_activate_qp( + alts_multisr_ca_obj_t *p_ca_obj, + ib_qp_handle_t h_qp ); + +ib_api_status_t +alts_destroy_resources( + alts_multisr_ca_obj_t *p_ca_obj ); + +ib_api_status_t +alts_register_mem( + alts_multisr_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t size ); + +ib_api_status_t +alts_deregister_mem( + alts_multisr_ca_obj_t *p_ca_obj, + uint32_t reg_index ); + +ib_api_status_t +multisend_post_sends( + alts_multisr_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_post ); + +ib_api_status_t +multisend_post_recvs( + alts_multisr_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_post ); + +void +multisend_cq_destroy_cb( + void *context ); + +void +multisend_pd_destroy_cb( + void *context ); + +void +multisend_qp_destroy_cb( + void *context ); + +/* + * CQ completion callback function + */ + +void +multisend_cq_comp_cb( + void *cq_context, + ib_qp_type_t qp_type); + +void +ud_multisend_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); + +void +rc_multisend_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); +/* + * CQ Error callback function + */ + +void +multisend_cq_err_cb( + ib_async_event_rec_t *p_err_rec ); + +/* + * QP Error callback function + */ +void +multisend_qp_err_cb( + ib_async_event_rec_t *p_err_rec ); + +ib_api_status_t +alts_ud_loopback ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_ud_2_ports ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_ud_2_ports_100_msgs ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_rc_loopback ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_rc_2_ports ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +ib_api_status_t +alts_rc_2_ports_100_msgs ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ); + +/* + * Gloabal Variables + */ +ib_send_wr_t send_wr; +ib_recv_wr_t recv_wr; +ib_local_ds_t send_ds; +ib_local_ds_t recv_ds; +ib_cq_create_t cq_create_attr; +ib_qp_create_t qp_create_attr; +ib_av_attr_t av_attr; +ib_mr_create_t mr_create = {0}; +ib_wc_t free_wclist; +ib_wc_t free_wcl; + +ib_api_status_t ud_loopback=IB_NOT_FOUND, ud_2_ports=IB_NOT_FOUND; +ib_api_status_t rc_loopback=IB_NOT_FOUND, rc_2_ports=IB_NOT_FOUND; +ib_api_status_t ud_2_ports_100=IB_NOT_FOUND, rc_2_ports_100=IB_NOT_FOUND; + +//extern uint32_t g_al_dbg_lvl; + +/* This test case assumes that the HCA has 2 port connected + * through the switch. Sends packets from lower port number to higher + * port number. + */ +ib_api_status_t +al_test_multi_send_recv(void) +{ + ib_api_status_t ib_status = IB_ERROR; + ib_al_handle_t h_al; + ib_ca_handle_t h_ca = NULL; + uint32_t bsize; + ib_ca_attr_t *p_ca_attr = NULL; + //alts_multisr_ca_obj_t ca_obj; // for testing stack + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + do + { + /* + * Open the AL interface + */ + ib_status = alts_open_al(&h_al); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_al failed status = %d", ib_status) ); + break; + } + + /* + * Default opens the first CA + */ + ib_status = alts_open_ca(h_al, &h_ca); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_ca failed status = %d", ib_status) ); + break; + } + + /* + * Get the CA Attributest + * Check for two active ports + */ + + /* + * Query the CA + */ + bsize = 0; + ib_status = ib_query_ca(h_ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + CL_ASSERT(bsize); + + /* run all tests in succession */ + ib_status = alts_ud_loopback(h_ca, bsize); + ud_loopback = ib_status; + if (ib_status != IB_SUCCESS) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("alts_ud_loopback failed with status = %d\n", ib_status) ); + + break; + } + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("alts_ud_loopback passed.\n") ); + + ib_status = alts_ud_2_ports(h_ca, bsize); + ud_2_ports = ib_status; + if (ib_status != IB_SUCCESS) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("alts_ud_2_ports failed with status = %d\n", ib_status) ); + break; + } + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("alts_ud_2_ports passed.\n") ); + + ib_status = alts_ud_2_ports_100_msgs(h_ca, bsize); + ud_2_ports_100 = ib_status; + if (ib_status != IB_SUCCESS) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("alts_ud_2_ports_100_msgs failed with status = %d\n", + ib_status) ); + break; + } + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("alts_ud_2_ports_100_msgs passed.\n") ); + +//#if 0 + /********* + ********* + Note for Mellanox & other HCA's: + + enable this test to test rc loopback on the same QP + ********/ + + ib_status = alts_rc_loopback(h_ca, bsize); + rc_loopback = ib_status; + if (ib_status != IB_SUCCESS) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("alts_rc_loopback failed with status = %d\n", ib_status) ); + break; + } + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("alts_rc_loopback passed.\n") ); +//#endif + + ib_status = alts_rc_2_ports(h_ca, bsize); + rc_2_ports = ib_status; + if (ib_status != IB_SUCCESS) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("alts_rc_2_ports failed with status = %d\n", ib_status) ); + break; + } + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("alts_rc_2_ports passed.\n") ); + + ib_status = alts_rc_2_ports_100_msgs(h_ca, bsize); + rc_2_ports_100 = ib_status; + if (ib_status != IB_SUCCESS) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("alts_rc_2_ports_100_msgs failed with status = %d\n", + ib_status) ); + break; + } + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("alts_rc_2_ports_100_msgs passed.\n") ); + + } while (0); + + /* Destroy the resources*/ + if (p_ca_attr) + cl_free(p_ca_attr); + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Test results (MultiSend):\n" + "\tud_loopback..........: %s\n" + "\tud_2_ports...........: %s\n" + "\tud_2_ports_100_msgs..: %s\n" + "\trc_loopback..........: %s\n" + "\trc_2_ports...........: %s\n" + "\trc_2_ports_100_msgs..: %s\n", + ib_get_err_str(ud_loopback), + ib_get_err_str(ud_2_ports), + ib_get_err_str(ud_2_ports_100), + ib_get_err_str(rc_loopback), + ib_get_err_str(rc_2_ports), + ib_get_err_str(rc_2_ports_100) + )); + + alts_close_ca(h_ca); + alts_close_al(h_al); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + + return ib_status; + +} + +ib_api_status_t +alts_destroy_resources( + alts_multisr_ca_obj_t *p_ca_obj) +{ + uint32_t j; + + /* + * Destroy Send QP, Recv QP, CQ and PD + */ + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if (p_ca_obj->h_qp[SRC_QP]) + { + ib_status = ib_destroy_qp(p_ca_obj->h_qp[SRC_QP],multisend_qp_destroy_cb); + } + + if (p_ca_obj->is_loopback != TRUE) + { + if (p_ca_obj->h_qp[DEST_QP]) + { + ib_status = ib_destroy_qp( + p_ca_obj->h_qp[DEST_QP], + multisend_qp_destroy_cb); + } + } + + if (p_ca_obj->h_cq) + ib_status = ib_destroy_cq(p_ca_obj->h_cq,multisend_cq_destroy_cb); + + /* + * Deregister the Memeory + */ + for(j=0; j < p_ca_obj->num_wrs * 2; j++) + { + if(p_ca_obj->mem_region[j].buffer != NULL) + ib_status = alts_deregister_mem(p_ca_obj, j); + } + + if (p_ca_obj->h_pd) + ib_status = ib_dealloc_pd(p_ca_obj->h_pd,multisend_pd_destroy_cb); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_message_passing( + alts_multisr_ca_obj_t *p_ca_obj, + ib_qp_type_t qp_type ) +{ + uint32_t i,j, k; + ib_api_status_t ib_status = IB_SUCCESS; +//#if 0 + ib_wc_t *p_free_wclist; + ib_wc_t *p_done_cl; + uint32_t id; + char *buff; +//#endif + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + p_ca_obj->wr_send_size = sizeof(ib_send_wr_t) + \ + (sizeof(ib_local_ds_t) * p_ca_obj->ds_list_depth); + p_ca_obj->wr_recv_size = sizeof(ib_recv_wr_t) + \ + (sizeof(ib_local_ds_t) * p_ca_obj->ds_list_depth); + + p_ca_obj->p_send_wr = &send_wr; + p_ca_obj->p_recv_wr = &recv_wr; + + p_ca_obj->p_send_wr->ds_array = &send_ds; + p_ca_obj->p_recv_wr->ds_array = &recv_ds; + + // receive + for (i=0; i < p_ca_obj->num_wrs; i++) + { + ib_status = alts_register_mem( p_ca_obj, i, 4096); + + if ( ib_status != IB_SUCCESS ) + { + for(j=0; jmem_region[i].my_lid = p_ca_obj->dlid; + } + } + + if(ib_status != IB_SUCCESS) + return ib_status; + + // send + for (k=i; k < i + p_ca_obj->num_wrs; k++) + { + ib_status = alts_register_mem( p_ca_obj, k, 4096); + + if ( ib_status != IB_SUCCESS ) + { + for(j=i; jmem_region[k].my_lid = p_ca_obj->slid; + } + } + + if(ib_status != IB_SUCCESS) + return ib_status; + + + //Create an Address vector + av_attr.dlid = p_ca_obj->dlid; + av_attr.port_num = p_ca_obj->src_port_num; + av_attr.sl = 0; + av_attr.path_bits = 0; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + av_attr.grh_valid = FALSE; + + ib_status = ib_create_av(p_ca_obj->h_pd,&av_attr,&p_ca_obj->h_av_src); + if(ib_status != IB_SUCCESS) + return ib_status; + + p_ca_obj->cq_done = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("++++++ dlid(x%x) src_port(%d) ====\n", + av_attr.dlid, av_attr.port_num)); + + if(ib_status == IB_SUCCESS) + { + multisend_post_recvs( p_ca_obj, 0, p_ca_obj->num_wrs ); + multisend_post_sends( p_ca_obj, p_ca_obj->num_wrs, p_ca_obj->num_wrs ); +#if 0 + for ( i = 0 ; i < p_ca_obj->num_wrs; ++i) + { + multisend_post_recvs( p_ca_obj, i, p_ca_obj->num_wrs ); + } + + for ( k = i ; k < i + p_ca_obj->num_wrs; ++k) + { + multisend_post_sends( p_ca_obj, k, p_ca_obj->num_wrs ); + } +#endif + } + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + +// cl_thread_suspend(10000); // 10 seconds + + while (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("sleeping for awhile ...\n")); + cl_thread_suspend(0); // 10 seconds + } + + + //if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + // (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + //{ + // ALTS_PRINT(ALTS_DBG_VERBOSE, + // ("sleeping for awhile ...\n")); + // cl_thread_suspend(10000); // 10 seconds + //} + + //if (((!p_ca_obj->cq_done) && (p_ca_obj->num_wrs> 2)) || + // (p_ca_obj->cq_done != p_ca_obj->num_wrs*2)) + //{ + // ALTS_PRINT(ALTS_DBG_VERBOSE, + // ("sleeping for awhile ...\n")); + // cl_thread_suspend(10000); // 10 seconds + //} + +//#if 0 + + if (!p_ca_obj->cq_done) + { + p_free_wclist = &free_wclist; + p_free_wclist->p_next = NULL; + p_done_cl = NULL; + i = 0; + + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wclist, &p_done_cl); + + while(p_done_cl) + { + /* + * print output + */ + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Got a completion:\n" + "\ttype....:%s\n" + "\twr_id...:%"PRIx64"\n", + ib_get_wc_type_str(p_done_cl->wc_type), + p_done_cl->wr_id )); + + if (p_done_cl->wc_type == IB_WC_RECV) + { + id = (uint32_t)p_done_cl->wr_id; + buff = (char *)p_ca_obj->mem_region[id].buffer; + if (qp_type == IB_QPT_UNRELIABLE_DGRM) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("---MSG--->%s\n",&buff[40])); + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvUD info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n" + "\tremote_qp..:x%x\n" + "\tpkey_index.:%d\n" + "\tremote_lid.:x%x\n" + "\tremote_sl..:x%x\n" + "\tpath_bits..:x%x\n" + "\tsrc_lid....:x%x\n", + p_done_cl->recv.ud.recv_opt, + p_done_cl->recv.ud.immediate_data, + CL_NTOH32(p_done_cl->recv.ud.remote_qp), + p_done_cl->recv.ud.pkey_index, + CL_NTOH16(p_done_cl->recv.ud.remote_lid), + p_done_cl->recv.ud.remote_sl, + p_done_cl->recv.ud.path_bits, + CL_NTOH16(p_ca_obj->mem_region[id].my_lid))); + } + else + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvRC info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n", + p_done_cl->recv.conn.recv_opt, + p_done_cl->recv.ud.immediate_data )); + } + + } + + p_free_wclist = p_done_cl; + p_free_wclist->p_next = NULL; + p_done_cl = NULL; + i++; + p_done_cl = NULL; + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wclist, &p_done_cl); + } + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ is = %d\n", i) ); + + p_ca_obj->cq_done += i; + + ib_status = IB_SUCCESS; + } +//#endif + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +multisend_post_sends( + alts_multisr_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts ) +{ + ib_send_wr_t *p_s_wr, *p_send_failure_wr; + uint32_t msg_size, i; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if (p_ca_obj->test_type == TestUD1) + msg_size = p_ca_obj->msg_size - sizeof(ib_grh_t); + else + msg_size = 64; + + //msg_size = p_ca_obj->msg_size; + + msg_size = 64; + + p_s_wr = p_ca_obj->p_send_wr; + + p_s_wr->p_next = NULL; + p_s_wr->ds_array[0].length = msg_size; + p_s_wr->num_ds = 1; + + p_s_wr->wr_type = WR_SEND; + + if (p_ca_obj->test_type == TestUD1) + { + p_s_wr->dgrm.ud.h_av = p_ca_obj->h_av_src; + p_s_wr->send_opt = IB_SEND_OPT_IMMEDIATE | \ + IB_SEND_OPT_SIGNALED | IB_SEND_OPT_SOLICITED; + + p_s_wr->dgrm.ud.remote_qkey = p_ca_obj->qkey; + p_s_wr->dgrm.ud.remote_qp = p_ca_obj->qp_attr[DEST_QP].num; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("======= qkey(x%x) qp_num(x%x) ========\n", + p_s_wr->dgrm.ud.remote_qkey, + p_s_wr->dgrm.ud.remote_qp)); + + } + else if(p_ca_obj->test_type == TestRC1) + { + p_s_wr->send_opt = IB_SEND_OPT_SIGNALED | \ + IB_SEND_OPT_IMMEDIATE | \ + IB_SEND_OPT_SOLICITED ; + + } + + + for (i = 0; i < num_posts; i++) + { + sprintf((char *)p_ca_obj->mem_region[i+reg_index].buffer,"hello %d", i); + + p_s_wr->ds_array[0].vaddr = + (uintn_t)p_ca_obj->mem_region[i+reg_index].buffer; + p_s_wr->ds_array[0].lkey = p_ca_obj->mem_region[i+reg_index].lkey; + + p_s_wr->wr_id = i+reg_index; + p_s_wr->immediate_data = 0xfeedde00 + i; + + p_s_wr->remote_ops.vaddr = 0; + p_s_wr->remote_ops.rkey = 0; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("***** Send ******vaddr(0x%"PRIx64") lkey(0x%x) len(%d)*****\n", + (void*)(uintn_t)p_s_wr->ds_array[0].vaddr, + p_s_wr->ds_array[0].lkey, + p_s_wr->ds_array[0].length)); + + ib_status = ib_post_send( + p_ca_obj->h_qp[SRC_QP], + p_s_wr, + &p_send_failure_wr); + + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +multisend_post_recvs( + alts_multisr_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t num_posts ) +{ + ib_recv_wr_t *p_r_wr, *p_failure_wr; + uint32_t msg_size, i; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + + if (p_ca_obj->test_type == TestUD1) + msg_size = p_ca_obj->msg_size; + else + msg_size = 64; + //msg_size = p_ca_obj->msg_size; + + if (p_ca_obj->test_type == TestUD1) + msg_size = 64 + sizeof(ib_grh_t); + else + msg_size = 64; + + p_r_wr = p_ca_obj->p_recv_wr; + + p_r_wr->p_next = NULL; + p_r_wr->ds_array[0].length = msg_size; + p_r_wr->num_ds = 1; + + for (i = 0; i < num_posts; i++) + { + p_r_wr->ds_array[0].vaddr = + (uintn_t)p_ca_obj->mem_region[i+reg_index].buffer; + p_r_wr->ds_array[0].lkey = p_ca_obj->mem_region[i+reg_index].lkey; + + p_r_wr->wr_id = i+reg_index; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("***** Recv ******vaddr(0x%"PRIx64") lkey(0x%x) len(%d)*****\n", + (void*)(uintn_t)p_r_wr->ds_array[0].vaddr, + p_r_wr->ds_array[0].lkey, + p_r_wr->ds_array[0].length)); + + if (p_ca_obj->is_loopback == TRUE) + { + ib_status = ib_post_recv( + p_ca_obj->h_qp[SRC_QP], + p_r_wr, + &p_failure_wr); + } + else + { + ib_status = ib_post_recv( + p_ca_obj->h_qp[DEST_QP], + p_r_wr, + &p_failure_wr); + } + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_register_mem( + alts_multisr_ca_obj_t *p_ca_obj, + uint32_t reg_index, + uint32_t size ) +{ + ib_mr_create_t mr_create = {0}; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + cl_memclr(&mr_create, sizeof(ib_mr_create_t)); + p_ca_obj->mem_region[reg_index].buffer = cl_zalloc(size); + CL_ASSERT (p_ca_obj->mem_region[reg_index].buffer); + + mr_create.vaddr = p_ca_obj->mem_region[reg_index].buffer; + mr_create.length = size; + mr_create.access_ctrl = IB_AC_LOCAL_WRITE; + + ib_status = ib_reg_mem( + p_ca_obj->h_pd, + &mr_create, + &p_ca_obj->mem_region[reg_index].lkey, + &p_ca_obj->mem_region[reg_index].rkey, + &p_ca_obj->mem_region[reg_index].mr_h); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + + return ib_status; +} + +ib_api_status_t +alts_deregister_mem( + alts_multisr_ca_obj_t *p_ca_obj, + uint32_t reg_index + ) +{ + + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if ( p_ca_obj->mem_region[reg_index].buffer != NULL ) + { + ib_status = ib_dereg_mr(p_ca_obj->mem_region[reg_index].mr_h); + + CL_ASSERT(ib_status == IB_SUCCESS); + + if ( ib_status != IB_SUCCESS ) + { + //PRINT the error msg + } + + cl_free(p_ca_obj->mem_region[reg_index].buffer); + + p_ca_obj->mem_region[reg_index].buffer = NULL; + } + else + { + ib_status = IB_ERROR; + + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + + +ib_api_status_t +alts_activate_qp( + alts_multisr_ca_obj_t *p_ca_obj, + ib_qp_handle_t h_qp + ) +{ + + ib_qp_mod_t qp_mod_attr = {0}; + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + if(p_ca_obj->is_src == 1) + qp_mod_attr.state.init.primary_port = p_ca_obj->src_port_num; + else + qp_mod_attr.state.init.primary_port = p_ca_obj->dest_port_num; + + qp_mod_attr.state.init.qkey = p_ca_obj->qkey; + qp_mod_attr.state.init.pkey_index = 0x0; + qp_mod_attr.state.init.access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_MW_BIND; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("****INIT***** port num = %d \n", + qp_mod_attr.state.init.primary_port)); + + qp_mod_attr.req_state = IB_QPS_INIT; + ib_status = ib_modify_qp(h_qp, &qp_mod_attr); + + CL_ASSERT(ib_status == IB_SUCCESS); + + // Time to query the QP + if(p_ca_obj->is_src == 1) + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[SRC_QP]); + CL_ASSERT(ib_status == IB_SUCCESS); + } + else + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[DEST_QP]); + CL_ASSERT(ib_status == IB_SUCCESS); + } + + + // transition to RTR + cl_memclr(&qp_mod_attr, sizeof(ib_qp_mod_t)); + + qp_mod_attr.state.rtr.opts = 0; + qp_mod_attr.state.rtr.rq_psn = CL_NTOH32(0x00000001); + + switch ( p_ca_obj->test_type ) + { + case TestRC1: + qp_mod_attr.state.rtr.opts = IB_MOD_QP_PRIMARY_AV; + break; + default: + break; + } + + if (p_ca_obj->is_src == 1) + { + if (p_ca_obj->is_loopback == TRUE) + { + qp_mod_attr.state.rtr.dest_qp = p_ca_obj->qp_attr[SRC_QP].num; + qp_mod_attr.state.rtr.primary_av.port_num = p_ca_obj->src_port_num; + qp_mod_attr.state.rtr.primary_av.dlid = p_ca_obj->slid; + } + else + { + qp_mod_attr.state.rtr.dest_qp = p_ca_obj->qp_attr[DEST_QP].num; + qp_mod_attr.state.rtr.primary_av.port_num = p_ca_obj->src_port_num; + qp_mod_attr.state.rtr.primary_av.dlid = p_ca_obj->dlid; + } + } + else + { + qp_mod_attr.state.rtr.dest_qp = p_ca_obj->qp_attr[SRC_QP].num; + qp_mod_attr.state.rtr.primary_av.port_num = p_ca_obj->dest_port_num; + qp_mod_attr.state.rtr.primary_av.dlid = p_ca_obj->slid; + } + + qp_mod_attr.state.rtr.primary_av.sl = 0; + qp_mod_attr.state.rtr.primary_av.grh_valid = 0; //Set to false + + qp_mod_attr.state.rtr.primary_av.static_rate = IB_PATH_RECORD_RATE_10_GBS; + qp_mod_attr.state.rtr.primary_av.path_bits = 0; + + qp_mod_attr.state.rtr.primary_av.conn.path_mtu = 1; + qp_mod_attr.state.rtr.primary_av.conn.local_ack_timeout = 7; + qp_mod_attr.state.rtr.primary_av.conn.seq_err_retry_cnt = 7; + qp_mod_attr.state.rtr.primary_av.conn.rnr_retry_cnt = 7; + qp_mod_attr.state.rtr.rq_psn = CL_NTOH32(0x00000001); + qp_mod_attr.state.rtr.resp_res = 7; //32; + qp_mod_attr.state.rtr.rnr_nak_timeout = 7; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("****RTR***** dlid = x%x (x%x) port_num = %d dest_qp = %d \n", + qp_mod_attr.state.rtr.primary_av.dlid, + CL_NTOH16(qp_mod_attr.state.rtr.primary_av.dlid), + qp_mod_attr.state.rtr.primary_av.port_num, + CL_NTOH32(qp_mod_attr.state.rtr.dest_qp) )); + + qp_mod_attr.req_state = IB_QPS_RTR; + ib_status = ib_modify_qp(h_qp, &qp_mod_attr); + + CL_ASSERT(ib_status == IB_SUCCESS); + + if(p_ca_obj->is_src == 1) + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[SRC_QP]); + } + else + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[DEST_QP]); + } + + cl_memclr(&qp_mod_attr, sizeof(ib_qp_mod_t)); + + qp_mod_attr.state.rts.sq_psn = CL_NTOH32(0x00000001); + + // NOTENOTE: Confirm the below time out settings + qp_mod_attr.state.rts.retry_cnt = 7; + qp_mod_attr.state.rts.rnr_retry_cnt = 7; + qp_mod_attr.state.rts.rnr_nak_timeout = 7; + qp_mod_attr.state.rts.local_ack_timeout = 7; + qp_mod_attr.state.rts.init_depth = 3; //3; + + qp_mod_attr.req_state = IB_QPS_RTS; + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("****RTS***** \n")); + ib_status = ib_modify_qp(h_qp, &qp_mod_attr); + + CL_ASSERT(ib_status == IB_SUCCESS); + + if(p_ca_obj->is_src == 1) + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[SRC_QP]); + CL_ASSERT(ib_status == IB_SUCCESS); + } + else + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[DEST_QP]); + CL_ASSERT(ib_status == IB_SUCCESS); + } + + if (p_ca_obj->is_loopback == TRUE) + { + ib_status = ib_query_qp(h_qp, + &p_ca_obj->qp_attr[DEST_QP]); + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_SUCCESS; +} + + +ib_api_status_t +alts_check_active_ports(alts_multisr_ca_obj_t *p_ca_obj) +{ + ib_api_status_t ib_status; + ib_ca_attr_t *p_ca_attr; + ib_port_attr_t *p_src_port_attr = NULL; + ib_port_attr_t *p_dest_port_attr = NULL; + uint32_t i; + ib_port_attr_t *p_port_attr; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT(p_ca_obj); + + p_ca_attr = p_ca_obj->p_ca_attr; + + CL_ASSERT(p_ca_attr); + + for(i=0; i< p_ca_attr->num_ports; i++) + { + p_port_attr = &p_ca_attr->p_port_attr[i]; + + if (p_port_attr->link_state == IB_LINK_ACTIVE) + { + if (p_src_port_attr == NULL) + p_src_port_attr = p_port_attr; + else + if(p_dest_port_attr == NULL) + p_dest_port_attr = p_port_attr; + else + break; + } + } + + // handle loopback case + if (p_ca_obj->is_loopback == TRUE) + p_dest_port_attr = p_src_port_attr; + + if (p_src_port_attr && p_dest_port_attr) + { + p_ca_obj->p_dest_port_attr = p_dest_port_attr; + p_ca_obj->p_src_port_attr = p_src_port_attr; + + p_ca_obj->dlid = p_dest_port_attr->lid; + p_ca_obj->slid = p_src_port_attr->lid; + + p_ca_obj->dest_portguid = p_dest_port_attr->port_guid; + p_ca_obj->src_portguid = p_src_port_attr->port_guid; + + p_ca_obj->dest_port_num = p_dest_port_attr->port_num; + p_ca_obj->src_port_num = p_src_port_attr->port_num; + + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("****** slid = x%x (x%x) ***dlid = x%x (x%x) ***************\n", + p_ca_obj->slid, + CL_NTOH16(p_ca_obj->slid), + p_ca_obj->dlid, + CL_NTOH16(p_ca_obj->dlid) )); + + ib_status = IB_SUCCESS; + + } + else + { + + ib_status = IB_ERROR; + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +/* + * Create the CQ, PD and QP + */ + +ib_api_status_t +alts_create_resources( alts_multisr_ca_obj_t *p_ca_obj ) +{ + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + cl_memclr(&qp_create_attr, sizeof(ib_qp_create_t)); + + /* + * Allocate a PD + */ + ib_status = ib_alloc_pd( + p_ca_obj->h_ca, + IB_PDT_NORMAL, + p_ca_obj, //pd_context + &p_ca_obj->h_pd); + + CL_ASSERT(ib_status == IB_SUCCESS); + + /* + * Create CQ Attributes + */ + cq_create_attr.size = p_ca_obj->cq_size; + //cq_create_attr.pfn_comp_cb = multisend_cq_comp_cb; + switch ( p_ca_obj->test_type ) + { + case TestUD1: + cq_create_attr.pfn_comp_cb = ud_multisend_cq_comp_cb; + break; + case TestRC1: + cq_create_attr.pfn_comp_cb = rc_multisend_cq_comp_cb; + break; + } + cq_create_attr.h_wait_obj = NULL; + + ib_status = ib_create_cq( + p_ca_obj->h_ca, + &cq_create_attr, + p_ca_obj, + multisend_cq_err_cb, + &p_ca_obj->h_cq ); + CL_ASSERT(ib_status == IB_SUCCESS); + + + p_ca_obj->cq_size = cq_create_attr.size; + + /* + * Create QP Attributes + */ + qp_create_attr.sq_depth = p_ca_obj->num_wrs; + qp_create_attr.rq_depth = p_ca_obj->num_wrs; + qp_create_attr.sq_sge = 1; + qp_create_attr.rq_sge = 1; + qp_create_attr.h_sq_cq = p_ca_obj->h_cq; + qp_create_attr.h_rq_cq = p_ca_obj->h_cq; + + qp_create_attr.sq_signaled = TRUE; + //qp_create_attr.sq_signaled = FALSE; + + + switch ( p_ca_obj->test_type ) + { + case TestUD1: + qp_create_attr.qp_type = IB_QPT_UNRELIABLE_DGRM; + break; + + case TestRC1: + qp_create_attr.qp_type = IB_QPT_RELIABLE_CONN; + break; + + default: + break; + } + + ib_status = ib_create_qp( + p_ca_obj->h_pd, + &qp_create_attr, + p_ca_obj, + multisend_qp_err_cb, + &p_ca_obj->h_qp[SRC_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_create_qp()! %s\n", + ib_get_err_str(ib_status))); + + return (ib_status); + } + + ib_status = ib_query_qp(p_ca_obj->h_qp[SRC_QP], + &p_ca_obj->qp_attr[SRC_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in query_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[SRC_QP],multisend_qp_destroy_cb); + return (ib_status); + } + + if (p_ca_obj->is_loopback == TRUE) + { + // do loopback on same QP + p_ca_obj->h_qp[DEST_QP] = p_ca_obj->h_qp[SRC_QP]; + } + else + { + ib_status = ib_create_qp( + p_ca_obj->h_pd, + &qp_create_attr, + p_ca_obj, + multisend_qp_err_cb, + &p_ca_obj->h_qp[DEST_QP]); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_create_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[SRC_QP],multisend_qp_destroy_cb); + return (ib_status); + } + + ib_status = ib_query_qp(p_ca_obj->h_qp[DEST_QP], + &p_ca_obj->qp_attr[DEST_QP]); + + //CL_ASSERT(ib_status == IB_SUCCESS); + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in query_qp()! %s\n", + ib_get_err_str(ib_status))); + + ib_destroy_qp(p_ca_obj->h_qp[DEST_QP],multisend_qp_destroy_cb); + return (ib_status); + } + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +void +multisend_cq_comp_cb( + void *cq_context, + ib_qp_type_t qp_type + ) +{ + ib_api_status_t ib_status; + uint32_t i = 0, id; + char *buff; + ib_wc_t *p_free_wcl, *p_done_cl= NULL; + alts_multisr_ca_obj_t *p_ca_obj; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT(cq_context); + + p_ca_obj = (alts_multisr_ca_obj_t *)cq_context; + + + p_free_wcl = &free_wcl; + p_free_wcl->p_next = NULL; + p_done_cl = NULL; + + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wcl, &p_done_cl); + +poll_loop: + while(p_done_cl) + { + + if(p_done_cl->status != IB_WCS_SUCCESS) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Got a completion with error !!!!!!!! status = %s type=%s\n", + ib_get_wc_status_str(p_done_cl->status), + ib_get_wc_type_str( p_done_cl->wc_type))); + + } + else + { + /* + * print output + */ + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("Got a completion:\n" + "\ttype....:%s\n" + "\twr_id...:%"PRIx64"\n", + ib_get_wc_type_str(p_done_cl->wc_type), + p_done_cl->wr_id )); + + + + if (p_done_cl->wc_type == IB_WC_RECV) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("message length..:%d bytes\n", + p_done_cl->length )); + + id = (uint32_t)p_done_cl->wr_id; + buff = (char *)p_ca_obj->mem_region[id].buffer; + if (qp_type == IB_QPT_UNRELIABLE_DGRM) + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("---MSG--->%s\n",&buff[40])); + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvUD info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n" + "\tremote_qp..:x%x\n" + "\tpkey_index.:%d\n" + "\tremote_lid.:x%x\n" + "\tremote_sl..:x%x\n" + "\tpath_bits..:x%x\n" + "\tsrc_lid....:x%x\n", + p_done_cl->recv.ud.recv_opt, + p_done_cl->recv.ud.immediate_data, + CL_NTOH32(p_done_cl->recv.ud.remote_qp), + p_done_cl->recv.ud.pkey_index, + CL_NTOH16(p_done_cl->recv.ud.remote_lid), + p_done_cl->recv.ud.remote_sl, + p_done_cl->recv.ud.path_bits, + CL_NTOH16(p_ca_obj->mem_region[id].my_lid))); + } + else + { + ALTS_PRINT(ALTS_DBG_VERBOSE, + ("RecvRC info:\n" + "\trecv_opt...:x%x\n" + "\timm_data...:x%x\n", + p_done_cl->recv.conn.recv_opt, + p_done_cl->recv.ud.immediate_data )); + } + + } + } + + p_free_wcl = p_done_cl; + p_free_wcl->p_next = NULL; + p_done_cl = NULL; + i++; + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wcl, &p_done_cl); + } + + /* poll one more time to avoid a race with hw */ + ib_status = ib_rearm_cq(p_ca_obj->h_cq, TRUE); //TRUE); + if( IB_SUCCESS == ib_status ) + { + ib_status = ib_poll_cq(p_ca_obj->h_cq, &p_free_wcl, &p_done_cl); + if( p_done_cl ) + goto poll_loop; + } + + p_free_wcl = &free_wcl; + + p_ca_obj->cq_done += i; + + ALTS_PRINT( ALTS_DBG_INFO, + ("Number of items polled from CQ (in callback=%d) (total=%d)\n", + i, + p_ca_obj->cq_done) ); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +ud_multisend_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + multisend_cq_comp_cb (cq_context, IB_QPT_UNRELIABLE_DGRM ); +} + +void +rc_multisend_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + multisend_cq_comp_cb (cq_context, IB_QPT_RELIABLE_CONN ); +} + +void +multisend_cq_err_cb( + ib_async_event_rec_t *p_err_rec + ) +{ + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_PRINT(ALTS_DBG_VERBOSE,("ERROR: Async CQ error !!!!!!!!!\n")); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +multisend_qp_err_cb( + ib_async_event_rec_t *p_err_rec + ) +{ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_PRINT(ALTS_DBG_VERBOSE,("ERROR: Async QP error !!!!!!!!!\n")); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + +void +multisend_pd_destroy_cb( + void *context + ) +{ +/* + * PD destroy call back + */ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + + +void +multisend_qp_destroy_cb( + void *context + ) +{ +/* + * QP destroy call back + */ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +void +multisend_cq_destroy_cb( + void *context + ) +{ +/* + * CQ destroy call back + */ + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( context ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} + +/* + * The tests + */ +ib_api_status_t +alts_ud_loopback ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_multisr_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_multisr_ca_obj_t*)cl_zalloc(sizeof(alts_multisr_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_multisr_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0x66; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->test_type = TestUD1; + p_ca_obj->is_loopback = TRUE; + + /* + * get an active port + */ + ib_status = alts_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test requires atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/CQ/QP/QP + */ + ib_status = alts_create_resources(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("Create necessary resource failed with status %d\n", ib_status)); + break; + } + + + /* + * Time to Activate the QP + */ + p_ca_obj->is_src = 1; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[SRC_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + /* + * Rearm Q + */ + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE);//TRUE); + + /* + * Start Message passing activity + */ + ib_status = alts_message_passing(p_ca_obj, IB_QPT_UNRELIABLE_DGRM); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_message_passing failed with status %d\n", ib_status)); + break; + } + + +// cl_thread_suspend(1000 ); /* 1 sec */ + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_ud_2_ports ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_multisr_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_multisr_ca_obj_t*)cl_zalloc(sizeof(alts_multisr_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_multisr_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0x66; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->test_type = TestUD1; + p_ca_obj->is_loopback = FALSE; + + /* + * get an active port + */ + ib_status = alts_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test requires atleast 2 active ports on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/CQ/QP/QP + */ + ib_status = alts_create_resources(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("Create necessary resource failed with status %d\n", ib_status)); + break; + } + + /* + * Time to Activate the QP + */ + p_ca_obj->is_src = 1; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[SRC_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + p_ca_obj->is_src = 0; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[DEST_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + /* + * Rearm Q + */ + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE);//TRUE); + + /* + * Start Message passing activity + */ + ib_status = alts_message_passing(p_ca_obj, IB_QPT_UNRELIABLE_DGRM); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000 ); + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_ud_2_ports_100_msgs ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_multisr_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_multisr_ca_obj_t*)cl_zalloc(sizeof(alts_multisr_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_multisr_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0x66; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 100; + p_ca_obj->msg_size = 256; + + p_ca_obj->test_type = TestUD1; + p_ca_obj->is_loopback = FALSE; + + /* + * get an active port + */ + ib_status = alts_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test requires atleast 2 active ports on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/CQ/QP/QP + */ + ib_status = alts_create_resources(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("Create necessary resource failed with status %d\n", ib_status)); + break; + } + + + /* + * Time to Activate the QP + */ + p_ca_obj->is_src = 1; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[SRC_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + p_ca_obj->is_src = 0; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[DEST_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + /* + * Rearm Q + */ + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE);//TRUE); + + /* + * Start Message passing activity + */ + ib_status = alts_message_passing(p_ca_obj, IB_QPT_UNRELIABLE_DGRM); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 200) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_rc_loopback ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_multisr_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_multisr_ca_obj_t*)cl_zalloc(sizeof(alts_multisr_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_multisr_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * check for Intel early ref hardware + */ + if ((p_ca_attr->vend_id == 0x00d0b7) && + (p_ca_attr->dev_id == 0x3101) && + (p_ca_attr->revision < 2)) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test cannot run on this revision of the HCA hardware!!!\n")); + ib_status = IB_SUCCESS; + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0x66; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->test_type = TestRC1; + p_ca_obj->is_loopback = TRUE; + + /* + * get an active port + */ + ib_status = alts_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test routing atleast 1 active port on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/CQ/QP/QP + */ + ib_status = alts_create_resources(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("Create necessary resource failed with status %d\n", ib_status)); + break; + } + + + /* + * Time to Activate the QP + */ + p_ca_obj->is_src = 1; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[SRC_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + /* + * Rearm Q + */ + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE);//TRUE); + + /* + * Start Message passing activity + */ + ib_status = alts_message_passing(p_ca_obj, IB_QPT_RELIABLE_CONN); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + +ib_api_status_t +alts_rc_2_ports ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_multisr_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_multisr_ca_obj_t*)cl_zalloc(sizeof(alts_multisr_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_multisr_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0x66; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 1; + p_ca_obj->msg_size = 256; + + p_ca_obj->test_type = TestRC1; + p_ca_obj->is_loopback = FALSE; + + /* + * get an active port + */ + ib_status = alts_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test requires atleast 2 active ports on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/CQ/QP/QP + */ + ib_status = alts_create_resources(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("Create necessary resource failed with status %d\n", ib_status)); + break; + } + + /* + * Time to Activate the QP + */ + p_ca_obj->is_src = 1; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[SRC_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + p_ca_obj->is_src = 0; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[DEST_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + /* + * Rearm Q + */ + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE);//TRUE); + + /* + * Start Message passing activity + */ + ib_status = alts_message_passing(p_ca_obj, IB_QPT_RELIABLE_CONN); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 2) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + +ib_api_status_t +alts_rc_2_ports_100_msgs ( + ib_ca_handle_t h_ca, + uint32_t ca_attr_size ) +{ + ib_api_status_t ib_status = IB_ERROR, ib_status2; + uint32_t bsize; + alts_multisr_ca_obj_t *p_ca_obj = NULL; + ib_ca_attr_t *p_ca_attr = NULL; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT (h_ca); + CL_ASSERT (ca_attr_size); + + do + { + p_ca_obj = (alts_multisr_ca_obj_t*)cl_zalloc(sizeof(alts_multisr_ca_obj_t)); + if (!p_ca_obj) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for alts_multisr_ca_obj_t!\n") ); + break; + } + + /* Allocate the memory needed for query_ca */ + bsize = ca_attr_size; + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + break; + } + + ib_status = ib_query_ca(h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + break; + } + + /* + * Initialize the CA Object + */ + p_ca_obj->h_ca = h_ca; + p_ca_obj->p_ca_attr = p_ca_attr; + p_ca_obj->status = IB_SUCCESS; + p_ca_obj->cq_size = 255*2; + p_ca_obj->qkey = 0x66; + p_ca_obj->ds_list_depth = 1; + p_ca_obj->num_wrs = 100; + p_ca_obj->msg_size = 256; + + p_ca_obj->test_type = TestRC1; + p_ca_obj->is_loopback = FALSE; + + /* + * get an active port + */ + ib_status = alts_check_active_ports(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("This test requires atleast 2 active ports on the 1st hca\n")); + break; + } + + /* + * Create the necessary resource PD/CQ/QP/QP + */ + ib_status = alts_create_resources(p_ca_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("Create necessary resource failed with status %d\n", ib_status)); + break; + } + + /* + * Time to Activate the QP + */ + p_ca_obj->is_src = 1; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[SRC_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + p_ca_obj->is_src = 0; + ib_status = alts_activate_qp(p_ca_obj, p_ca_obj->h_qp[DEST_QP]); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_activate_qp failed with status %d\n", ib_status)); + break; + } + + /* + * Rearm Q + */ + ib_status = ib_rearm_cq(p_ca_obj->h_cq, FALSE);//TRUE); + + /* + * Start Message passing activity + */ + ib_status = alts_message_passing(p_ca_obj, IB_QPT_RELIABLE_CONN); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_message_passing failed with status %d\n", ib_status)); + break; + } + + cl_thread_suspend(1000); /* 1 sec */ + + if (p_ca_obj->cq_done == 200) + ib_status = IB_SUCCESS; + else + ib_status = IB_ERROR; + + } while (0); + + /* + * Destroy the resources + */ + ib_status2 = alts_destroy_resources(p_ca_obj); + if (ib_status == IB_SUCCESS) + ib_status = ib_status2; + + if (p_ca_attr) + cl_free(p_ca_attr); + + if (p_ca_obj) + cl_free(p_ca_obj); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} diff --git a/branches/Ndi/tests/alts/openclose.c b/branches/Ndi/tests/alts/openclose.c new file mode 100644 index 00000000..7d4e5619 --- /dev/null +++ b/branches/Ndi/tests/alts/openclose.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include + +ib_api_status_t +al_test_openclose(void) +{ + ib_al_handle_t ph_al; + ib_api_status_t ib_status = IB_SUCCESS; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + + ib_status = ib_open_al(&ph_al); + if(ib_status != IB_SUCCESS) + { + ALTS_TRACE( ALTS_DBG_ERROR, + ("ib_open_al failed status = %s\n", ib_get_err_str(ib_status)) ); + break; + } + + ALTS_PRINT( ALTS_DBG_INFO, ("ib_open_al PASSED!!!\n") ); + + cl_thread_suspend( 1000 ); + + ib_status = ib_close_al(ph_al); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_close_al failed status = %s\n",ib_get_err_str(ib_status))); + + break; + } + ALTS_PRINT( ALTS_DBG_INFO, ("ib_close_al PASSED!!!") ); + + break; //Break from while + } + + ALTS_EXIT( ALTS_DBG_VERBOSE ); + return ib_status; +} + + diff --git a/branches/Ndi/tests/alts/querycaattr.c b/branches/Ndi/tests/alts/querycaattr.c new file mode 100644 index 00000000..2771a1e8 --- /dev/null +++ b/branches/Ndi/tests/alts/querycaattr.c @@ -0,0 +1,270 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include +#include +#include +#include + +/* + * Function prototypes + */ + +ib_api_status_t +alts_ca_attr( + boolean_t modify_attr + ); + +/* + * Test Case QueryCaAttributes + */ + +ib_api_status_t +al_test_modifycaattr(void) +{ + boolean_t modify_ca_attr = TRUE; + + return alts_ca_attr(modify_ca_attr); +} + +ib_api_status_t +al_test_querycaattr(void) +{ + boolean_t modify_ca_attr = FALSE; + + return alts_ca_attr(modify_ca_attr); +} + + +/* Internal Functions */ + +ib_api_status_t +alts_ca_attr( + boolean_t modify_attr + ) +{ + ib_al_handle_t h_al = NULL; + ib_api_status_t ib_status = IB_SUCCESS; + ib_api_status_t ret_status = IB_SUCCESS; + size_t guid_count; + ib_net64_t ca_guid_array[ALTS_MAX_CA]; + ib_ca_attr_t *alts_ca_attr; + uintn_t i; + ib_ca_handle_t h_ca = NULL; + uint32_t bsize; + ib_port_attr_mod_t port_attr_mod; + + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + /* + * Open the AL instance + */ + ib_status = ib_open_al(&h_al); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_open_al failed status = %d", ib_status) ); + ret_status = ib_status; + break; + } + + ALTS_PRINT( ALTS_DBG_INFO, ("ib_open_al PASSED.\n") ); + CL_ASSERT(h_al); + + /* + * Get the Local CA Guids + */ + ib_status = ib_get_ca_guids(h_al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_get_ca_guids failed status = %d\n", (uint32_t)ib_status) ); + ret_status = ib_status; + goto Cleanup1; + } + + ALTS_PRINT(ALTS_DBG_INFO, + ("Total number of CA in the sytem is %d\n",(uint32_t)guid_count)); + + /* + * If no CA's Present then return + */ + + if(guid_count == 0) + goto Cleanup1; + + // ca_guid_array holds ALTS_MAX_CA + ib_status = ib_get_ca_guids(h_al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_get_ca_guids failed with status = %d\n", ib_status) ); + ret_status = ib_status; + goto Cleanup1; + } + + + + /* + * For Each CA Guid found Open the CA, + * Query the CA Attribute and close the CA + */ + for(i=0; i < guid_count; i++) + { + ALTS_PRINT(ALTS_DBG_INFO, + ("CA[%d] GUID IS 0x%" PRIx64 "\n",i,_byteswap_uint64(ca_guid_array[i])) ); + + /* Open the CA */ + ib_status = ib_open_ca(h_al, + ca_guid_array[i], + alts_ca_err_cb, + NULL, //ca_context + &h_ca); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, ("ib_open_ca failed with status = %d\n", ib_status) ); + ret_status = ib_status; + goto Cleanup1; + } + ALTS_PRINT(ALTS_DBG_INFO, + ("ib_open_ca passed\n")); + + + /* Query the CA */ + bsize = 0; + ib_status = ib_query_ca(h_ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT(ALTS_DBG_ERROR, ("ib_query_ca failed with status = %d\n", ib_status) ); + ret_status = ib_status; + goto Cleanup2; + } + CL_ASSERT(bsize); + + /* Allocate the memory needed for query_ca */ + + alts_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + CL_ASSERT(alts_ca_attr); + + ib_status = ib_query_ca(h_ca, alts_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + ret_status = ib_status; + goto Cleanup2; + } + + /* Print_ca_attributes */ + + alts_print_ca_attr(alts_ca_attr); + + if(modify_attr) + { + port_attr_mod.pkey_ctr = 10; + port_attr_mod.qkey_ctr = 10; + + ib_status = ib_modify_ca(h_ca,alts_ca_attr->p_port_attr->port_num, + IB_CA_MOD_QKEY_CTR | IB_CA_MOD_PKEY_CTR , + &port_attr_mod); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_modify_ca failed with status = %d\n", ib_status) ); + ret_status = ib_status; + } + + ib_status = ib_query_ca(h_ca, alts_ca_attr, &bsize); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + goto Cleanup2; + } + + CL_ASSERT(port_attr_mod.pkey_ctr != \ + alts_ca_attr->p_port_attr->pkey_ctr); + CL_ASSERT(port_attr_mod.qkey_ctr != \ + alts_ca_attr->p_port_attr->qkey_ctr); + + } + + /* Free the memory */ + cl_free(alts_ca_attr); + alts_ca_attr = NULL; + /* Close the current open CA */ + ib_status = ib_close_ca(h_ca, alts_ca_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_close_ca failed status = %d", ib_status)); + } + h_ca = NULL; + + } + +Cleanup2: + if(h_ca != NULL) + { + ib_status = ib_close_ca(h_ca, alts_ca_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_close_ca failed status = %d", ib_status)); + } + } + +Cleanup1: + ib_status = ib_close_al(h_al); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_close_al failed status = %d", ib_status)); + } + + break; + + } //End of while(1) + + ALTS_EXIT( ALTS_DBG_VERBOSE ); + return ret_status; +} + diff --git a/branches/Ndi/tests/alts/registermemregion.c b/branches/Ndi/tests/alts/registermemregion.c new file mode 100644 index 00000000..96eaff6e --- /dev/null +++ b/branches/Ndi/tests/alts/registermemregion.c @@ -0,0 +1,723 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include + + +/* Test case PARAMETERS */ + +#define MEM_ALLIGN 32 +#define MEM_SIZE 1024 + + +/* + * Function prototypes + */ + + +/* + * Test Case RegisterMemRegion + */ +ib_api_status_t +al_test_register_mem( + void + ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_al_handle_t h_al = NULL; + ib_ca_handle_t h_ca = NULL; + ib_pd_handle_t h_pd = NULL; + + ib_mr_create_t virt_mem; + char *ptr = NULL, *ptr_align; + size_t mask; + uint32_t lkey; + uint32_t rkey; + ib_mr_handle_t h_mr = NULL; + ib_mr_attr_t alts_mr_attr; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + /* Open AL */ + ib_status = alts_open_al(&h_al); + + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_al); + + /* Open CA */ + ib_status = alts_open_ca(h_al,&h_ca); + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_ca); + + /* + * Allocate a PD here + */ + ib_status = ib_alloc_pd(h_ca, IB_PDT_NORMAL, NULL, &h_pd); //passing null context + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_alloc_pd failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + /* + * Allocate the virtual memory which needs to be registered + */ + + mask = MEM_ALLIGN - 1; + + ptr = cl_malloc(MEM_SIZE + MEM_ALLIGN - 1); + + CL_ASSERT(ptr); + + ptr_align = ptr; + + if(((size_t)ptr & mask) != 0) + ptr_align = (char *)(((size_t)ptr+mask)& ~mask); + + virt_mem.vaddr = ptr_align; + virt_mem.length = MEM_SIZE; + virt_mem.access_ctrl = (IB_AC_LOCAL_WRITE | IB_AC_MW_BIND); + + /* + * Register the memory region + */ + + ib_status = ib_reg_mem(h_pd, &virt_mem, &lkey, &rkey, &h_mr); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_reg_mem failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + /* + * Query the memory region + */ + ib_status = ib_query_mr(h_mr, &alts_mr_attr); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_mr failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + if(alts_mr_attr.lkey != lkey || alts_mr_attr.rkey != rkey) + { + + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_mr failed lkey rkey different from reg\n")); + ALTS_PRINT( ALTS_DBG_ERROR, + ("\t\t reg-lkey = %x query-lkey %x reg-rkey%x query-rkey%x\n" , + alts_mr_attr.lkey , lkey , alts_mr_attr.rkey , rkey)); + alts_close_ca(h_ca); + ib_status = IB_INVALID_LKEY; + break; + + } + + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_mr passed\n" + "\t\t lkey = %x rkey%x query-rkey%x\n" , + lkey, rkey, alts_mr_attr.rkey) ); + /* + * Re-register the memeory region + */ + virt_mem.access_ctrl |= (IB_AC_RDMA_WRITE ); + + ib_status = ib_rereg_mem(h_mr,IB_MR_MOD_ACCESS, + &virt_mem,&lkey,&rkey,NULL); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_rereg_mem failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_rereg_mr passed with status = %s\n",ib_get_err_str(ib_status))); + + /* + * De-register the memory region + */ + ib_status = ib_dereg_mr(h_mr); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_dereg_mr failed status = %s\n", ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + /* + * Deallocate the PD + */ + + ib_status = ib_dealloc_pd(h_pd,alts_pd_destroy_cb); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_dealloc_pd failed status = %s\n",ib_get_err_str(ib_status)) ); + alts_close_ca(h_ca); + break; + } + + break; //End of while + } + + if ( ptr ) + cl_free ( ptr ); + /* Close AL */ + if(h_al) + alts_close_al(h_al); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + + + +/* + * Test Case RegisterVarMemRegions + */ + +#define MIN_MEM_SIZE 1 // size of the first region +#define N_SIZES 27 // number of regions, each next one is twice the size of the previous one +#define ITER_NUM 3 // each region will be re-/deregistered ITER_NUM times + +ib_api_status_t +al_test_register_var_mem( + void + ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_al_handle_t h_al = NULL; + ib_ca_handle_t h_ca = NULL; + ib_pd_handle_t h_pd = NULL; + + ib_mr_create_t virt_mem; + char *ptr = NULL; + uint32_t lkey; + uint32_t rkey; + ib_mr_handle_t h_mr = NULL; + ib_mr_attr_t alts_mr_attr; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + /* Open AL */ + ib_status = alts_open_al(&h_al); + + if(ib_status != IB_SUCCESS) + goto done; + + CL_ASSERT(h_al); + + /* Open CA */ + ib_status = alts_open_ca(h_al,&h_ca); + if(ib_status != IB_SUCCESS) + goto err_open_ca; + + CL_ASSERT(h_ca); + + /* + * Allocate a PD here + */ + ib_status = ib_alloc_pd(h_ca, IB_PDT_NORMAL, NULL, &h_pd); //passing null context + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_alloc_pd failed status = %s\n", ib_get_err_str(ib_status)) ); + goto err_alloc_pd;; + } + + /* + * Register the memory region + */ + { + #define MAX_MEM_SIZE (MIN_MEM_SIZE << N_SIZES) // 1GB + #define MEM_OFFSET 1 + #define PAGE_SIZE 4096 + #define PAGE_MASK (PAGE_SIZE - 1) + unsigned i, j, offset; + unsigned __int64 size; + unsigned __int64 sizea; + int reg_time[N_SIZES], dereg_time[N_SIZES]; + int reg_tries[N_SIZES], dereg_tries[N_SIZES]; + unsigned __int64 Start, End; + + ALTS_PRINT( ALTS_DBG_ERROR, ("***** min_size %#x, max_size %#x, n_sizes %d \n", + MIN_MEM_SIZE, MAX_MEM_SIZE, N_SIZES )); + + + for (size = MIN_MEM_SIZE, j=0; size < MAX_MEM_SIZE; size <<= 1, ++j) + { + + /* Allocate the virtual memory which needs to be registered */ + sizea = size + MEM_OFFSET - 1; + ptr = cl_malloc((size_t)sizea); + if (!ptr) { + ALTS_PRINT( ALTS_DBG_ERROR, + ("cl_malloc failed on %#x bytes\n", sizea) ); + continue; + } + offset = (int)((ULONG_PTR)ptr & PAGE_MASK); + virt_mem.vaddr = ptr - offset; + virt_mem.length = sizea + offset; + virt_mem.access_ctrl = IB_AC_LOCAL_WRITE; + + reg_time[j] =dereg_time[j] =reg_tries[j] =dereg_tries[j] =0; + for (i=0; i +#include +#include +#include + +ib_api_status_t +al_test_pnp_callback( + IN ib_pnp_rec_t* notify ); + +/* + * al_test_register_pnp test case. + * This test case test the ib_reg_pnp and ib_dereg_pnp calls of AL + */ +ib_api_status_t +al_test_register_pnp(void) +{ + ib_al_handle_t h_al; + ib_api_status_t ib_status = IB_SUCCESS; + ib_pnp_req_t pnp_req; + ib_pnp_handle_t h_pnp; + + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + ib_status = ib_open_al(&h_al); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_open_al failed status = %d", ib_status) ); + break; + } + + ALTS_PRINT( ALTS_DBG_INFO, + ("ib_open_al PASSED!!!\n") ); + + cl_memclr(&pnp_req,sizeof(ib_pnp_req_t)); + + pnp_req.pnp_context = (void*)(uintn_t)0xdeadbeef; + pnp_req.pfn_pnp_cb = al_test_pnp_callback; + pnp_req.pnp_class = IB_PNP_CA | IB_PNP_FLAG_REG_COMPLETE | IB_PNP_FLAG_REG_SYNC; + + h_pnp = NULL; + ib_status = ib_reg_pnp(h_al, &pnp_req, &h_pnp ); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_reg_pnp failed status = %s\n", ib_get_err_str(ib_status))); + + } + else + { + ALTS_PRINT( ALTS_DBG_INFO, + ("ib_reg_pnp PASSED!!\n")); + } + + ALTS_PRINT( ALTS_DBG_INFO, ("h_pnp = (0x%p)\n", h_pnp) ); + + if(h_pnp) + { + //ib_status = ib_dereg_pnp(h_pnp, al_test_pnp_destroy_cb); + ib_status = ib_dereg_pnp(h_pnp, NULL); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_dereg_pnp failed status = %s\n", ib_get_err_str(ib_status))); + } + else + { + ALTS_PRINT( ALTS_DBG_INFO, + ("ib_dereg_pnp PASSED!!\n")); + } + } + + + ib_status = ib_close_al(h_al); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_close_al failed status = %s\n", ib_get_err_str(ib_status))); + } + else + { + ALTS_PRINT( ALTS_DBG_INFO, ("ib_close_al PASSED!!!\n") ); + } + + break; //Break from while + } + + ALTS_EXIT( ALTS_DBG_VERBOSE ); + return ib_status; +} + +/* + * This is a pnp callback function call by AL + */ + +ib_api_status_t +al_test_pnp_callback( + IN ib_pnp_rec_t* notify ) +{ + void *context = (void*)0x1234; + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + CL_ASSERT(notify); + + ALTS_PRINT( ALTS_DBG_INFO, + ("AL pnp event (0x%x)\n", notify->pnp_event) ); + + CL_ASSERT( notify->pnp_context == (void*)(uintn_t)0xdeadbeef ); + + switch ( notify->pnp_event ) + { + /* + * Deal with additions + */ + case IB_PNP_CA_ADD: + CL_ASSERT( ((ib_pnp_ca_rec_t*)notify)->p_ca_attr != NULL ); + notify->context = context; + break; + case IB_PNP_PORT_ADD: + CL_ASSERT( ((ib_pnp_port_rec_t*)notify)->p_ca_attr != NULL ); + CL_ASSERT( ((ib_pnp_port_rec_t*)notify)->p_port_attr != NULL ); + notify->context = context; + break; + /* + * Deal with removals + */ + case IB_PNP_CA_REMOVE: + CL_ASSERT( notify->context == context); + break; + case IB_PNP_PORT_REMOVE: + CL_ASSERT( notify->context == context ); + break; + /* + * Deal with link state + */ + case IB_PNP_PORT_ACTIVE: + CL_ASSERT( ((ib_pnp_port_rec_t*)notify)->p_port_attr != NULL ); + CL_ASSERT( notify->context == context ); + /* + * we treat a port up event like a pkey change event + */ + break; + case IB_PNP_PORT_DOWN: + CL_ASSERT( notify->context == context ); + break; + /* + * Deal with PKey changes + */ + case IB_PNP_PKEY_CHANGE: + CL_ASSERT( ((ib_pnp_port_rec_t*)notify)->p_port_attr != NULL ); + CL_ASSERT( notify->context == context ); + break; + case IB_PNP_REG_COMPLETE: + break; + /* + * Deal with unknown/unhandled + */ + default: + ALTS_PRINT( ALTS_DBG_ERROR, + ("Unknown/unhandled AL event (0x%x)\n", notify->pnp_event) ); + + } + ALTS_EXIT( ALTS_DBG_VERBOSE ); + return IB_SUCCESS; +} + + + + diff --git a/branches/Ndi/tests/alts/reregister_hca.c b/branches/Ndi/tests/alts/reregister_hca.c new file mode 100644 index 00000000..2a1790c3 --- /dev/null +++ b/branches/Ndi/tests/alts/reregister_hca.c @@ -0,0 +1,104 @@ + +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include +#include +#include + + +/* + * Function prototypes + */ + + +/* + * Test Case RegisterMemRegion + */ +ib_api_status_t +al_test_reregister_hca( + void + ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_al_handle_t h_al = NULL; + ib_ca_handle_t h_ca = NULL; + ib_ci_op_t ci_op; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + while(1) + { + /* Open AL */ + ib_status = alts_open_al(&h_al); + + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_al); + + /* Open CA */ + ib_status = alts_open_ca(h_al,&h_ca); + if(ib_status != IB_SUCCESS) + break; + + CL_ASSERT(h_ca); + + /* send ioctl */ + memset( &ci_op, 0, sizeof(ci_op) ); + ci_op.command = FW_REREGISTER_HCA; + + ib_status = ib_ci_call (h_ca, NULL, 0, &ci_op); + if(ib_status != IB_SUCCESS) + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_ci_call failed status = %s\n", + ib_get_err_str(ib_status)) ); + else + ALTS_PRINT( ALTS_DBG_INFO, + ("ib_ci_call PASSED.\n") ); + break; + } + + if(h_ca) + alts_close_ca(h_ca); + + if(h_al) + alts_close_al(h_al); + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} + diff --git a/branches/Ndi/tests/alts/smatests.c b/branches/Ndi/tests/alts/smatests.c new file mode 100644 index 00000000..c3186eb4 --- /dev/null +++ b/branches/Ndi/tests/alts/smatests.c @@ -0,0 +1,438 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * mad test does a data transfer between two queue pairs created one + * on each port of the hca. In order for this test to work, two ports of the hca + * should be connected in a loop back and must be configured to ACTIVE PORT STATE. + * + * + * Environment: + * All + */ + + +#include +#include +#include +#include +#include +#include + +extern ib_cq_create_t cq_create_attr; +extern ib_qp_create_t qp_create_attr; +extern ib_av_attr_t av_attr; +extern ib_mad_svc_t mad_svc; + + +typedef struct _alts_sma_object +{ + ib_api_status_t status; + ib_al_handle_t h_al; + ib_ca_handle_t h_ca; + ib_ca_attr_t *p_ca_attr; + ib_port_attr_t *p_send_port_attr; + + ib_pd_handle_t h_pd; + + ib_cq_handle_t h_cq; + uint32_t cq_size; + + ib_pool_key_t h_mad_pool; + ib_qp_handle_t h_qp0; + + ib_mad_svc_handle_t h_sma_mad_svc; + +} alts_sma_object_t; + + +/* Function Prototype */ +ib_api_status_t +alts_get_al_resource( + alts_sma_object_t *p_alts_sma_obj +); + +ib_api_status_t +alts_init_tst_resource_sma( + alts_sma_object_t *p_alts_sma_obj +); +ib_api_status_t +alts_get_send_port( + alts_sma_object_t *p_alts_sma_obj +); + +void +sma_mad_qp_err_cb( + ib_async_event_rec_t *p_err_rec +); +void +alts_sma_mad_svc_send_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element +); + +void +alts_sma_mad_svc_recv_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element +); + +ib_api_status_t +alts_discover_fabric( + alts_sma_object_t *p_alts_sma_obj +); + + + +/********************************************************** +***********************************************************/ +ib_api_status_t +al_test_sma(void) +{ + alts_sma_object_t *p_alts_sma_obj; + ib_api_status_t ib_status = IB_ERROR; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + /* Allocate Memory for the alts_sma;*/ + p_alts_sma_obj = (alts_sma_object_t *)cl_zalloc(sizeof(alts_sma_object_t)); + + if(p_alts_sma_obj == NULL) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("cl_zalloc failed\n") ); + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_ERROR; + } + + p_alts_sma_obj->cq_size = 255*2; + + do + { + /* Initialize the AL resources */ + ib_status = alts_open_al(&p_alts_sma_obj->h_al); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_al failed status = %d\n", ib_status) ); + break; + } + + /* + * Default opens the first CA + */ + + ib_status = alts_open_ca(p_alts_sma_obj->h_al, &p_alts_sma_obj->h_ca); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_open_ca failed status = %d\n", ib_status) ); + break; + } + + ib_status = alts_get_al_resource(p_alts_sma_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_get_al_resource failed status = %d\n", ib_status) ); + break; + } + + + ib_status = alts_init_tst_resource_sma(p_alts_sma_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_init_tst_resource_sma failed status = %d\n", ib_status) ); + break; + } + + ib_status = alts_discover_fabric(p_alts_sma_obj); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_discover_fabric failed status = %d\n", ib_status) ); + break; + } + }while(0); + + if(p_alts_sma_obj->h_ca) + alts_close_ca(p_alts_sma_obj->h_ca); + if(p_alts_sma_obj->h_al) + alts_close_al(p_alts_sma_obj->h_al); + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} +/********************************************************* +**********************************************************/ +ib_api_status_t +alts_discover_fabric(alts_sma_object_t *p_alts_sma_obj) +{ + UNUSED_PARAM( p_alts_sma_obj ); + return IB_SUCCESS; +} +/********************************************************* +**********************************************************/ +ib_api_status_t +alts_get_al_resource(alts_sma_object_t *p_alts_sma_obj) +{ + uint32_t bsize; + ib_api_status_t ib_status; + ib_ca_attr_t *p_ca_attr; + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + bsize = 0; + ib_status = ib_query_ca(p_alts_sma_obj->h_ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + ALTS_PRINT(ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; + } + CL_ASSERT(bsize); + + p_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + if (!p_ca_attr) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("zalloc() failed for p_ca_attr!\n") ); + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_ERROR; + } + + ib_status = ib_query_ca(p_alts_sma_obj->h_ca, p_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("ib_query_ca failed with status = %d\n", ib_status) ); + cl_free(p_ca_attr); + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_ERROR; + } + p_alts_sma_obj->p_ca_attr = p_ca_attr; + + ib_status = alts_get_send_port(p_alts_sma_obj); + + if(ib_status != IB_SUCCESS) + { + ALTS_PRINT( ALTS_DBG_ERROR, + ("alts_get_send_port failed with status = %d\n", ib_status) ); + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_ERROR; + } + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return ib_status; +} +/********************************************************* +**********************************************************/ + +ib_api_status_t +alts_get_send_port(alts_sma_object_t *p_alts_sma_obj) +{ + ib_ca_attr_t *p_ca_attr; + ib_port_attr_t *p_send_port_attr = NULL; + ib_port_attr_t *p_port_attr; + uint32_t i; + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + p_ca_attr = p_alts_sma_obj->p_ca_attr; + + for(i=0; inum_ports; i++) + { + p_port_attr = &p_ca_attr->p_port_attr[i]; + if ((p_port_attr->link_state == IB_LINK_ACTIVE) || + (p_port_attr->link_state == IB_LINK_INIT)) + { + if (p_send_port_attr == NULL) + { + p_send_port_attr = p_port_attr; + break; + } + } + } //end of for + + if(p_send_port_attr == NULL) + { + /* No port is connected */ + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_ERROR; + } + + p_alts_sma_obj->p_send_port_attr = p_send_port_attr; + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_SUCCESS; +} +/********************************************************* +**********************************************************/ + +ib_api_status_t +alts_init_tst_resource_sma(alts_sma_object_t *p_alts_sma_obj) +{ + ib_api_status_t ib_status; + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + /* + * Create the necessary resource PD/QP/QP + */ + + /* + * Allocate a PD + */ + ib_status = ib_alloc_pd( + p_alts_sma_obj->h_ca, + IB_PDT_ALIAS, + p_alts_sma_obj, //pd_context + &p_alts_sma_obj->h_pd); + + CL_ASSERT(ib_status == IB_SUCCESS); + + /* + * Create QP Attributes + */ + cl_memclr(&qp_create_attr, sizeof(ib_qp_create_t)); + + qp_create_attr.sq_depth = 10; + qp_create_attr.rq_depth = 10; + qp_create_attr.sq_sge = 1; + qp_create_attr.rq_sge = 1; + qp_create_attr.h_sq_cq = NULL; + qp_create_attr.h_rq_cq = NULL; + + qp_create_attr.sq_signaled = TRUE; + + qp_create_attr.qp_type = IB_QPT_QP0_ALIAS; + + ib_status = ib_get_spl_qp( + p_alts_sma_obj->h_pd, + p_alts_sma_obj->p_send_port_attr->port_guid, + &qp_create_attr, + p_alts_sma_obj, // context + sma_mad_qp_err_cb, + &p_alts_sma_obj->h_mad_pool, + &p_alts_sma_obj->h_qp0); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_get_spl_qp()! %s\n", ib_get_err_str(ib_status))); + ALTS_EXIT( ALTS_DBG_VERBOSE); + return (ib_status); + } + + // create svc + cl_memclr(&mad_svc, sizeof(ib_mad_svc_t)); + + mad_svc.mad_svc_context = p_alts_sma_obj; + mad_svc.pfn_mad_send_cb = alts_sma_mad_svc_send_cb; + mad_svc.pfn_mad_recv_cb = alts_sma_mad_svc_recv_cb; + + mad_svc.support_unsol = TRUE; + + + mad_svc.mgmt_class = IB_MCLASS_SUBN_DIR; + mad_svc.mgmt_version = 0x01; + + // fill in methods supported + mad_svc.method_array[IB_MAD_METHOD_GET] = TRUE; + mad_svc.method_array[IB_MAD_METHOD_SET] = TRUE; + mad_svc.method_array[IB_MAD_METHOD_TRAP] = TRUE; + mad_svc.method_array[IB_MAD_METHOD_REPORT] = TRUE; + mad_svc.method_array[IB_MAD_METHOD_TRAP_REPRESS] = TRUE; + + ib_status = ib_reg_mad_svc( + p_alts_sma_obj->h_qp0, + &mad_svc, + &p_alts_sma_obj->h_sma_mad_svc ); + + if (ib_status != IB_SUCCESS) + { + ALTS_TRACE_EXIT(ALTS_DBG_VERBOSE, + ("Error in ib_reg_mad_svc()! %s\n", ib_get_err_str(ib_status))); + ALTS_EXIT( ALTS_DBG_VERBOSE); + return (ib_status); + } + + + ALTS_EXIT( ALTS_DBG_VERBOSE); + return IB_SUCCESS; +} +/********************************************************* +**********************************************************/ + +void +alts_sma_mad_svc_send_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ) +{ + UNUSED_PARAM( h_mad_svc ); + UNUSED_PARAM( mad_svc_context ); + UNUSED_PARAM( p_mad_element ); +} +/********************************************************* +**********************************************************/ + +void +alts_sma_mad_svc_recv_cb( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_mad_element ) +{ + UNUSED_PARAM( h_mad_svc ); + UNUSED_PARAM( mad_svc_context ); + UNUSED_PARAM( p_mad_element ); +} +/********************************************************* +**********************************************************/ +void +sma_mad_qp_err_cb( + ib_async_event_rec_t *p_err_rec + ) +{ + + ALTS_ENTER( ALTS_DBG_VERBOSE ); + + UNUSED_PARAM( p_err_rec ); + + ALTS_EXIT( ALTS_DBG_VERBOSE); +} diff --git a/branches/Ndi/tests/alts/user/SOURCES b/branches/Ndi/tests/alts/user/SOURCES new file mode 100644 index 00000000..934a86e5 --- /dev/null +++ b/branches/Ndi/tests/alts/user/SOURCES @@ -0,0 +1,36 @@ +TARGETNAME=alts +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +SOURCES=alts_main.c \ + ..\allocdeallocpd.c \ + ..\alts_misc.c \ + ..\cmtests.c \ + ..\createanddestroycq.c \ + ..\createanddestroyqp.c \ + ..\createdestroyav.c \ + ..\creatememwindow.c \ + ..\ibquery.c \ + ..\madtests.c \ + ..\multisendrecv.c \ + ..\openclose.c \ + ..\querycaattr.c \ + ..\registermemregion.c \ + ..\reregister_hca.c \ + ..\registerpnp.c \ + ..\smatests.c + +INCLUDES=..\..\..\inc;..\..\..\inc\user;..; + +TARGETLIBS= \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tests/alts/user/alts_main.c b/branches/Ndi/tests/alts/user/alts_main.c new file mode 100644 index 00000000..929c8d8c --- /dev/null +++ b/branches/Ndi/tests/alts/user/alts_main.c @@ -0,0 +1,507 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * This is the main c file for the AL test suite application + * + * Environment: + * User Mode + */ + + +#include "stdio.h" +#include "string.h" +#include "stdlib.h" + + +#include +#include "alts_debug.h" +#include "alts_common.h" + +//#include +//#include + + +//#define COMPILE_USER_MODE +#define strcasecmp lstrcmpi +#define strncasecmp( s1, s2, l ) CompareString( LOCALE_USER_DEFAULT, NORM_IGNORECASE, s1, strlen(s1), s2, l ) + +#if !defined( FALSE ) +#define FALSE 0 +#endif /* !defined( FALSE ) */ + +#if !defined( TRUE ) +#define TRUE 1 +#endif /* !defined( TRUE ) */ + +/* + * Global Varables + */ + +//Global Debug level +uint32_t alts_dbg_lvl = ALTS_DBG_FULL; + +/* + * Data structure + */ + + +/* + * Function Prototype + */ +boolean_t +parse_cmd_line( + cmd_line_arg_t *input_arg, + int argc, + char **argv + ); + +void +usage( + void); + + +#ifndef CL_KERNEL +void +run_ual_test( + cmd_line_arg_t *cmd_line_arg + ); +#endif + +void +run_kal_test( + cmd_line_arg_t *cmd_line_arg + ); + + + +/******************************************************************* +*******************************************************************/ + + +int32_t __cdecl +main( + int32_t argc, + char* argv[]) +{ + boolean_t isvalid = FALSE; + cmd_line_arg_t cmd_line_arg={0}; + + CL_ENTER( ALTS_DBG_VERBOSE, alts_dbg_lvl ); + + cl_memclr(&cmd_line_arg,sizeof(cmd_line_arg)); + + isvalid = parse_cmd_line(&cmd_line_arg, + argc, argv); + + if(cmd_line_arg.pgm_to_run == 0) + { + CL_PRINT( ALTS_DBG_ERROR, alts_dbg_lvl, + ("Command line parse failed\n") ); + usage(); + return 0; + } + + if(cmd_line_arg.um == TRUE) + { + { + #ifndef CL_KERNEL + run_ual_test(&cmd_line_arg); + #else + CL_PRINT( ALTS_DBG_ERROR, alts_dbg_lvl, + ("User Mode test not COMPILED.\n #define COMPILE_USER_MODE to build for usr mode\n") ); + #endif + } + } + else + { + CL_PRINT( ALTS_DBG_ERROR, alts_dbg_lvl, + ("Kernel mode test not supported\n") ); + //run_kal_test(&cmd_line_arg); + } + CL_EXIT( ALTS_DBG_VERBOSE, alts_dbg_lvl ); + + return 0; +} + +#ifndef CL_KERNEL +void +run_ual_test(cmd_line_arg_t *cmd_line_arg) +{ +ib_api_status_t ib_status = IB_ERROR; + + switch(cmd_line_arg->pgm_to_run) + { + case OpenClose: + ib_status = al_test_openclose(); + break; + case QueryCAAttribute: + ib_status = al_test_querycaattr(); + break; + case ModifyCAAttribute: + ib_status = al_test_modifycaattr(); + break; + case AllocDeallocPD: + ib_status = al_test_alloc_dealloc_pd(); + break; + case CreateDestroyAV: + ib_status = al_test_create_destroy_av(); + break; + case QueryAndModifyAV: + ib_status = al_test_query_modify_av(); + break; + case CreateDestroyQP: + ib_status = al_test_create_destroy_qp(); + break; + case QueryAndModifyQP: + CL_PRINT( ALTS_DBG_VERBOSE, alts_dbg_lvl, + ("altsapp: QueryAndModifyQP not implemented.\n") ); + ib_status = IB_SUCCESS; + break; + case CreateAndDestroyCQ: + ib_status = al_test_create_destroy_cq(); + break; + case QueryAndModifyCQ: + ib_status = al_test_query_modify_cq(); + break; + case AttachMultiCast: + CL_PRINT( ALTS_DBG_VERBOSE, alts_dbg_lvl, + ("altsapp: AttachMultiCast not implemented.\n") ); + ib_status = IB_SUCCESS; + break; + case RegisterMemRegion: + ib_status = al_test_register_mem(); + break; + case RegisterVarMemRegions: + ib_status = al_test_register_var_mem(); + break; + case ReregisterHca: + ib_status = al_test_reregister_hca(); + break; + case RegisterPhyMemRegion: + CL_PRINT( ALTS_DBG_VERBOSE, alts_dbg_lvl, + ("altsapp: RegisterPhyMemRegion not implemented.\n") ); + ib_status = IB_SUCCESS; + break; + case CreateMemWindow: + ib_status = al_test_create_mem_window(); + break; + case RegisterSharedMemRegion: + ib_status = al_test_register_shared_mem(); + break; + case MultiSend: + ib_status = al_test_multi_send_recv(); + break; + case RegisterPnP: + ib_status = al_test_register_pnp(); + break; + case MadTests: + ib_status = al_test_mad(); + break; + case MadQuery: + ib_status = al_test_query(); + break; + case CmTests: + ib_status = al_test_cm(); + break; + + case MaxTestCase: + break; + default: + break; + } + if(ib_status != IB_SUCCESS) + { + printf("********************************\n"); + printf("altsapp:AL test failed\n"); + printf("********************************\n"); + } + else + { + printf("********************************\n"); + printf("altsapp:AL test passed\n"); + printf("********************************\n"); + } + +} +#endif + +//void +//run_kal_test(cmd_line_arg_t *cmd_line_arg) +//{ +// +// cl_dev_handle_t h_al_test; +// cl_status_t cl_status; +// uint32_t command; +// uintn_t inbufsz = 0; +// uintn_t outbufsz = 0; +// +// CL_ENTER( ALTS_DBG_VERBOSE, alts_dbg_lvl ); +// +// cl_status = cl_open_device(ALTS_DEVICE_NAME, &h_al_test); +// +// if(cl_status != CL_SUCCESS) +// { +// printf("altsapp:cl_open_device failed\n"); +// CL_EXIT( ALTS_DBG_VERBOSE, alts_dbg_lvl ); +// return; +// } +// +// command = IOCTL_CMD(ALTS_DEV_KEY, cmd_line_arg->pgm_to_run); +// inbufsz = sizeof(cmd_line_arg_t); +// +// +// cl_status = cl_ioctl_device( +// h_al_test, +// command, +// cmd_line_arg, +// inbufsz, +// &outbufsz); +// +// +// if(cl_status != CL_SUCCESS) +// { +// printf("********************************\n"); +// printf("altsapp:AL test failed\n"); +// printf("********************************\n"); +// +// CL_EXIT( ALTS_DBG_VERBOSE, alts_dbg_lvl ); +// return; +// } +// +// if(cmd_line_arg->status == IB_SUCCESS) +// { +// printf("********************************\n"); +// printf("altsapp:AL test passed\n"); +// printf("********************************\n"); +// } +// else +// { +// printf("********************************\n"); +// printf("altsapp:AL test failed\n"); +// printf("********************************\n"); +// } +// +// cl_close_device(h_al_test); +// CL_EXIT( ALTS_DBG_VERBOSE, alts_dbg_lvl ); +// +//} + +/* + * Command Line Parser Routine + */ + +boolean_t parse_cmd_line( + cmd_line_arg_t *input_arg, + int argc, + char **argv + ) +{ + size_t i,n,k,j; + char temp[256]; + int Value; + + if (argc <= 1 || (NULL==argv)) + return FALSE; + + input_arg->pgm_to_run = 0; //Set to Zero + + i = argc; + while (--i != 0) + { + /* + * Check for all the test case name + */ + ++argv; + if (strcasecmp(*argv, "--tc=OpenClose") == 0) + { + input_arg->pgm_to_run = OpenClose; + continue; + } + if (strcasecmp(*argv, "--tc=QueryCAAttribute") == 0) + { + input_arg->pgm_to_run = QueryCAAttribute; + continue; + } + if (strcasecmp(*argv, "--tc=ModifyCAAttribute") == 0) + { + input_arg->pgm_to_run = ModifyCAAttribute; + continue; + } + if (strcasecmp(*argv, "--tc=AllocDeallocPD") == 0) + { + input_arg->pgm_to_run = AllocDeallocPD; + continue; + } + if (strcasecmp(*argv, "--tc=CreateDestroyAV") == 0) + { + input_arg->pgm_to_run = CreateDestroyAV; + continue; + } + if (strcasecmp(*argv, "--tc=QueryAndModifyAV") == 0) + { + input_arg->pgm_to_run = QueryAndModifyAV; + continue; + } + if (strcasecmp(*argv, "--tc=CreateDestroyQP") == 0) + { + input_arg->pgm_to_run = CreateDestroyQP; + continue; + } + if (strcasecmp(*argv, "--tc=QueryAndModifyQP") == 0) + { + input_arg->pgm_to_run = QueryAndModifyQP; + continue; + } + if (strcasecmp(*argv, "--tc=CreateAndDestroyCQ") == 0) + { + input_arg->pgm_to_run = CreateAndDestroyCQ; + continue; + } + if (strcasecmp(*argv, "--tc=QueryAndModifyCQ") == 0) + { + input_arg->pgm_to_run = QueryAndModifyCQ; + continue; + } + if (strcasecmp(*argv, "--tc=AttachMultiCast") == 0) + { + input_arg->pgm_to_run = AttachMultiCast; + continue; + } + if (strcasecmp(*argv, "--tc=RegisterMemRegion") == 0) + { + input_arg->pgm_to_run = RegisterMemRegion; + continue; + } + if (strcasecmp(*argv, "--tc=RegisterVarMemRegions") == 0) + { + input_arg->pgm_to_run = RegisterVarMemRegions; + continue; + } + if (strcasecmp(*argv, "--tc=ReregisterHca") == 0) + { + input_arg->pgm_to_run = ReregisterHca; + continue; + } + if (strcasecmp(*argv, "--tc=RegisterPhyMemRegion") == 0) + { + input_arg->pgm_to_run = RegisterPhyMemRegion; + continue; + } + if (strcasecmp(*argv, "--tc=CreateMemWindow") == 0) + { + input_arg->pgm_to_run = CreateMemWindow; + continue; + } + if (strcasecmp(*argv, "--tc=RegisterSharedMemRegion") == 0) + { + input_arg->pgm_to_run = RegisterSharedMemRegion; + continue; + } + if (strcasecmp(*argv, "--tc=MultiSend") == 0) + { + input_arg->pgm_to_run = MultiSend; + continue; + } + if (strcasecmp(*argv, "--tc=RegisterPnP") == 0) + { + input_arg->pgm_to_run = RegisterPnP; + continue; + } + if (strcasecmp(*argv, "--tc=MadTests") == 0) + { + input_arg->pgm_to_run = MadTests; + continue; + } + if (strcasecmp(*argv, "--tc=MadQuery") == 0) + { + input_arg->pgm_to_run = MadQuery; + continue; + } + if (strcasecmp(*argv, "--tc=CmTests") == 0) + { + input_arg->pgm_to_run = CmTests; + continue; + } + + + /* + * Read Other parameter + */ + if (strcasecmp(*argv, "--um") == 0) + { + input_arg->um = TRUE; + printf("altst:Running user mode test case\n"); + continue; + } + + if (strcasecmp(*argv, "--km") == 0) + { + input_arg->um = FALSE; + printf("altst:Running kernel mode test case\n"); + continue; + } + + n = strlen(*argv); + + if (strncasecmp(*argv, "--Iteration=", j=strlen("--Iteration=")) == 0 ) + { + k = 0; + while (j < n){ + temp[k] = (*argv)[j++]; k++;} + temp[k] = '\0'; + + Value = atoi(temp); + printf("/Iteration= %d", Value); + + if (Value < 0 || Value > 50) + printf("Invalid Iteration specified\n"); + else + printf("Valid Iteration specified\n"); + continue; + } + + } + + return TRUE; +} + +void +usage(void) +{ + printf("Usage: ./alts --tc=XXXXX [--um|--km]\n"); + printf("XXXX -> see the alts_readme.txt\n"); + printf("--um -> Usermode\n"); + printf("--km -> Kernelmode\n"); +} diff --git a/branches/Ndi/tests/alts/user/makefile b/branches/Ndi/tests/alts/user/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/tests/alts/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/tests/cmtest/dirs b/branches/Ndi/tests/cmtest/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/tests/cmtest/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tests/cmtest/user/SOURCES b/branches/Ndi/tests/cmtest/user/SOURCES new file mode 100644 index 00000000..31bc874d --- /dev/null +++ b/branches/Ndi/tests/cmtest/user/SOURCES @@ -0,0 +1,20 @@ +TARGETNAME=cmtest +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +SOURCES=cmtest_main.c + +INCLUDES=..\..\..\inc;..\..\..\inc\user; + +TARGETLIBS= \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tests/cmtest/user/cmtest_main.c b/branches/Ndi/tests/cmtest/user/cmtest_main.c new file mode 100644 index 00000000..e487f8f4 --- /dev/null +++ b/branches/Ndi/tests/cmtest/user/cmtest_main.c @@ -0,0 +1,2028 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 1996-2002 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Command line interface for cmtest. + * + * Environment: + * User Mode + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Globals */ +#define CMT_DBG_VERBOSE 1 + +#define CMT_BASE_SVC_ID 0xFFEE +#define CMT_ACCESS_CTRL (IB_AC_LOCAL_WRITE + IB_AC_RDMA_READ + IB_AC_RDMA_WRITE) +#define BAD_PKEY_INDEX 0xFFFF + + +typedef enum _cmtest_state +{ + test_idle, test_connecting, test_transfering, test_disconnecting + +} cmtest_state_t; + + + +typedef struct _ib_root +{ + ib_al_handle_t h_al; + ib_pd_handle_t h_pd; + + /* Input parameters to control test. */ + int32_t num_nodes; + uint32_t num_msgs; + boolean_t per_msg_buf; + cl_mutex_t mutex; + + cmtest_state_t state; + atomic32_t num_connected; + uint32_t conn_index; /* current connection id */ + uint32_t total_sent; + uint32_t total_recv; + + uint32_t num_iter; + + uint32_t msg_size; + + ib_ca_handle_t h_ca; + ib_net16_t l_lid; + ib_net16_t r_lid; + ib_net64_t ca_guid; + ib_net64_t port_guid; + uint8_t port_num; + uint16_t num_pkeys; + ib_net16_t *p_pkey_table; + + /* cm info */ + boolean_t is_server; + ib_listen_handle_t h_listen; + ib_path_rec_t path_rec; + + /* CQ info. */ + boolean_t is_polling; + + struct _ib_node *p_nodes; + ib_qp_create_t qp_create; + ib_qp_mod_t qp_mod_reset; + ib_qp_mod_t qp_mod_init; + + /* reg mem info */ + ib_mr_handle_t h_mr; + uint32_t lkey; + uint32_t rkey; + uint8_t *p_mem; + uint8_t *p_mem_recv; + uint8_t *p_mem_send; + + uint64_t conn_start_time; + + /* + * Connection parameters are initialized once to improve connection + * establishment rate. + */ + ib_cm_req_t cm_req; + ib_cm_rep_t cm_rep; + ib_cm_rtu_t cm_rtu; + ib_cm_dreq_t cm_dreq; + ib_cm_drep_t cm_drep; + + uint32_t inst_id; + +} ib_root_t; + + + +typedef enum _cmnode_state +{ + node_idle, node_conn, node_dreq_sent, node_dreq_rcvd + +} cmnode_state_t; + + + +typedef struct _ib_node +{ + uint64_t id; + + ib_cq_handle_t h_send_cq; + ib_cq_handle_t h_recv_cq; + ib_qp_handle_t h_qp; + uint32_t max_inline; + + cmnode_state_t state; + ib_cm_handle_t h_cm_req; + ib_cm_handle_t h_cm_dreq; + + uint32_t send_cnt; + uint32_t recv_cnt; + +} ib_node_t; + + + +uint32_t cmt_dbg_lvl = 0x80000000; + +ib_root_t g_root; + + +static char *wc_type_text[] = { + "IB_WC_SEND", + "IB_WC_RDMA_WRITE", + "IB_WC_RECV", + "IB_WC_RDMA_READ", + "IB_WC_MW_BIND", + "IB_WC_FETCH_ADD", + "IB_WC_COMPARE_SWAP", + "IB_WC_RECV_RDMA_WRITE" +}; + +static char *wc_status_text[] = { + "IB_WCS_SUCCESS", + "IB_WCS_LOCAL_LEN_ERR", + "IB_WCS_LOCAL_OP_ERR", + "IB_WCS_LOCAL_EEC_OP_ERR", + "IB_WCS_LOCAL_PROTECTION_ERR", + "IB_WCS_WR_FLUSHED_ERR", + "IB_WCS_MEM_WINDOW_BIND_ERR", + "IB_WCS_REM_ACCESS_ERR", + "IB_WCS_REM_OP_ERR", + "IB_WCS_RNR_RETRY_ERR", + "IB_WCS_TIMEOUT_RETRY_ERR", + "IB_WCS_REM_INVALID_REQ_ERR", + "IB_WCS_REM_INVALID_RD_REQ_ERR", + "IB_WCS_INVALID_EECN", + "IB_WCS_INVALID_EEC_STATE", + "IB_WCS_UNMATCHED_RESPONSE", + "IB_WCS_CANCELED" +}; + + + +static void +__req_cb( + IN ib_cm_req_rec_t *p_cm_req_rec ); + +static void +__rep_cb( + IN ib_cm_rep_rec_t *p_cm_rep_rec ); + +static void +__rtu_cb( + IN ib_cm_rtu_rec_t *p_cm_rtu_rec ); + +static void +__rej_cb( + IN ib_cm_rej_rec_t *p_cm_rej_rec ); + +static void +__mra_cb( + IN ib_cm_mra_rec_t *p_cm_mra_rec ); + +static void +__apr_cb( + IN ib_cm_apr_rec_t *p_cm_apr_rec ); + +static void +__lap_cb( + IN ib_cm_lap_rec_t *p_cm_lap_rec ); + +static void +__dreq_cb( + IN ib_cm_dreq_rec_t *p_cm_dreq_rec ); + +static void +__drep_cb( + IN ib_cm_drep_rec_t *p_cm_drep_rec ); + +static boolean_t +__poll_cq( + IN ib_node_t *p_node, + IN ib_cq_handle_t h_cq ); + + +/********************************************************************** + **********************************************************************/ +static void +__show_usage() +{ + printf( "\n------- cmtest - Usage and options ----------------------\n" ); + printf( "Usage: cmtest [options]\n"); + printf( "Options:\n" ); + printf( "-s\n" + "--server\n" + " This option directs cmtest to act as a Server\n" ); + printf( "-l \n" + "--local \n" + " This option specifies the local endpoint.\n" ); + printf( "-r \n" + "--remote \n" + " This option specifies the remote endpoint.\n" ); + printf( "-c \n" + "--connect \n" + " This option specifies the number of connections to open.\n" + " Default of 1.\n" ); + printf( "-m \n" + "--msize \n" + " This option specifies the byte size of each message.\n" + " Default is 100 bytes.\n" ); + printf( "-n \n" + "--nmsgs \n" + " This option specifies the number of messages to send at a time.\n" ); + printf( "-p\n" + "--permsg\n" + " This option indicates if a separate buffer should be used per message.\n" + " Default is one buffer for all messages.\n" ); + printf( "-i \n" + "--iterate \n" + " This option specifies the number of times to loop through 'nmsgs'.\n" + " Default of 1.\n" ); + printf( "-v\n" + "--verbose\n" + " This option enables verbosity level to debug console.\n" ); + printf( "-h\n" + "--help\n" + " Display this usage info then exit.\n\n" ); +} + + +/* Windows support. */ +struct option +{ + const char *long_name; + unsigned long flag; + void *pfn_handler; + char short_name; +}; + +static char *optarg; + +#define strtoull strtoul + + +char +getopt_long( + int argc, + char *argv[], + const char *short_option, + const struct option *long_option, + void *unused ) +{ + static int i = 1; + int j; + char ret = 0; + + UNUSED_PARAM( unused ); + + if( i == argc ) + return -1; + + if( argv[i][0] != '-' ) + return ret; + + /* find the first character of the value. */ + for( j = 1; isalpha( argv[i][j] ); j++ ) + ; + optarg = &argv[i][j]; + + if( argv[i][1] == '-' ) + { + /* Long option. */ + for( j = 0; long_option[j].long_name; j++ ) + { + if( strncmp( &argv[i][2], long_option[j].long_name, + optarg - argv[i] - 2 ) ) + { + continue; + } + + switch( long_option[j].flag ) + { + case 1: + if( *optarg == '\0' ) + return 0; + default: + break; + } + ret = long_option[j].short_name; + break; + } + } + else + { + for( j = 0; short_option[j] != '\0'; j++ ) + { + if( !isalpha( short_option[j] ) ) + return 0; + + if( short_option[j] == argv[i][1] ) + { + ret = short_option[j]; + break; + } + + if( short_option[j+1] == ':' ) + { + if( *optarg == '\0' ) + return 0; + j++; + } + } + } + i++; + return ret; +} + + +static boolean_t +__parse_options( + int argc, + char* argv[] ) +{ + uint32_t next_option; + const char* const short_option = "esl:r:c:m:n:i:pvh"; + + /* + In the array below, the 2nd parameter specified the number + of arguments as follows: + 0: no arguments + 1: argument + 2: optional + */ + const struct option long_option[] = + { + { "event", 2, NULL, 'e'}, + { "server", 2, NULL, 's'}, + { "local", 1, NULL, 'l'}, + { "remote", 1, NULL, 'r'}, + { "connect", 1, NULL, 'c'}, + { "msize", 1, NULL, 'm'}, + { "nmsgs", 1, NULL, 'n'}, + { "iterate", 1, NULL, 'i'}, + { "permsg", 0, NULL, 'p'}, + { "verbose", 0, NULL, 'v'}, + { "help", 0, NULL, 'h'}, + { NULL, 0, NULL, 0 } /* Required at end of array */ + }; + + /* Set the default options. */ + g_root.msg_size = 100; + g_root.num_nodes = 1; + g_root.num_msgs = 0; + g_root.num_iter = 1; + g_root.is_polling = TRUE; + + /* parse cmd line arguments as input params */ + do + { + next_option = getopt_long( argc, argv, short_option, + long_option, NULL ); + + switch( next_option ) + { + case 's': + g_root.is_server = TRUE; + printf( "\tServer mode\n" ); + break; + + case 'd': + g_root.inst_id = strtoull( optarg, NULL, 0 ); + printf( "\tinstance_id..: %d\n", g_root.inst_id ); + break; + + case 'c': + g_root.num_nodes = strtoull( optarg, NULL, 0 ); + printf( "\tconnections..: %d\n", g_root.num_nodes ); + break; + + case 'l': + g_root.l_lid = cl_ntoh16( (uint16_t)strtoull( optarg, NULL, 0 ) ); + printf( "\tlocal lid....: x%x\n", g_root.l_lid ); + break; + + case 'r': + g_root.r_lid = cl_ntoh16( (uint16_t)strtoull( optarg, NULL, 0 ) ); + printf( "\tremote lid...: x%x\n", g_root.r_lid ); + break; + + case 'm': + g_root.msg_size = strtoull( optarg, NULL, 0 ); + printf( "\tmsg size.....: %d bytes\n", g_root.msg_size ); + break; + + case 'n': + g_root.num_msgs = strtoull( optarg, NULL, 0 ); + printf( "\tnum msgs.....: %d\n", g_root.num_msgs ); + break; + + case 'i': + g_root.num_iter = strtoull( optarg, NULL, 0 ); + printf( "\titerate......: %d\n", g_root.num_iter ); + break; + + case 'p': + g_root.per_msg_buf = TRUE; + printf( "\tper message data buffer\n" ); + break; + + case 'v': + cmt_dbg_lvl = 0xFFFFFFFF; + printf( "\tverbose\n" ); + break; + + case 'e': + g_root.is_polling = FALSE; + printf( "\tevent driven completions\n" ); + break; + + case 'h': + __show_usage(); + return FALSE; + + case -1: + break; + + default: /* something wrong */ + __show_usage(); + return FALSE; + } + } while( next_option != -1 ); + + return TRUE; +} + + +/********************************************************************** + **********************************************************************/ +static void +__init_conn_info() +{ + /* Initialize connection request parameters. */ + g_root.cm_req.svc_id = CMT_BASE_SVC_ID + g_root.inst_id; + g_root.cm_req.max_cm_retries = 5; + g_root.cm_req.p_primary_path = &g_root.path_rec; + g_root.cm_req.pfn_cm_rep_cb = __rep_cb; + g_root.cm_req.qp_type = IB_QPT_RELIABLE_CONN; + g_root.cm_req.resp_res = 3; + g_root.cm_req.init_depth = 1; + g_root.cm_req.remote_resp_timeout = 20; + g_root.cm_req.flow_ctrl = TRUE; + g_root.cm_req.local_resp_timeout = 20; + g_root.cm_req.rnr_nak_timeout = 6; + g_root.cm_req.rnr_retry_cnt = 3; + g_root.cm_req.retry_cnt = 5; + g_root.cm_req.pfn_cm_mra_cb = __mra_cb; + g_root.cm_req.pfn_cm_rej_cb = __rej_cb; + + /* Initialize connection reply parameters. */ + g_root.cm_rep.qp_type = IB_QPT_RELIABLE_CONN; + g_root.cm_rep.access_ctrl = CMT_ACCESS_CTRL; + g_root.cm_rep.sq_depth = 0; + g_root.cm_rep.rq_depth = 0; + g_root.cm_rep.init_depth = 1; + g_root.cm_rep.target_ack_delay = 7; + g_root.cm_rep.failover_accepted = IB_FAILOVER_ACCEPT_UNSUPPORTED; + g_root.cm_rep.flow_ctrl = TRUE; + g_root.cm_rep.rnr_nak_timeout = 7; + g_root.cm_rep.rnr_retry_cnt = 6; + g_root.cm_rep.pfn_cm_rej_cb = __rej_cb; + g_root.cm_rep.pfn_cm_mra_cb = __mra_cb; + g_root.cm_rep.pfn_cm_rtu_cb = __rtu_cb; + g_root.cm_rep.pfn_cm_lap_cb = __lap_cb; + g_root.cm_rep.pfn_cm_dreq_cb = __dreq_cb; + + /* Initialize connection RTU parameters. */ + g_root.cm_rtu.pfn_cm_apr_cb = __apr_cb; + g_root.cm_rtu.pfn_cm_dreq_cb = __dreq_cb; + + /* Initialize disconnection request parameters. */ + g_root.cm_dreq.pfn_cm_drep_cb = __drep_cb; + g_root.cm_dreq.qp_type = IB_QPT_RELIABLE_CONN; + + /* Disconnection reply parameters are all zero. */ +} + + + +static uint16_t +__get_pkey_index() +{ + uint16_t i; + + for( i = 0; i < g_root.num_pkeys; i++ ) + { + if( g_root.p_pkey_table[i] == g_root.path_rec.pkey ) + return i; + } + + return BAD_PKEY_INDEX; +} + + + +static void +__init_qp_info() +{ + /* Set common QP attributes for all create calls. */ + g_root.qp_create.qp_type = IB_QPT_RELIABLE_CONN; + if( g_root.num_msgs ) + { + g_root.qp_create.sq_depth = g_root.num_msgs; + g_root.qp_create.rq_depth = g_root.num_msgs; + } + else + { + /* Minimal queue depth of one. */ + g_root.qp_create.sq_depth = 1; + g_root.qp_create.rq_depth = 1; + } + + g_root.qp_create.sq_signaled = FALSE; + g_root.qp_create.sq_sge = 1; + g_root.qp_create.rq_sge = 1; + + /* Set the QP attributes when modifying the QP to the reset state. */ + g_root.qp_mod_reset.req_state = IB_QPS_RESET; + + /* Set the QP attributes when modifying the QP to the init state. */ + g_root.qp_mod_init.req_state = IB_QPS_INIT; + g_root.qp_mod_init.state.init.access_ctrl = CMT_ACCESS_CTRL; + g_root.qp_mod_init.state.init.primary_port = g_root.port_num; + g_root.qp_mod_init.state.init.pkey_index = __get_pkey_index(); +} + + + +static ib_api_status_t +__post_recvs( + IN ib_node_t *p_node ) +{ + ib_api_status_t status = IB_SUCCESS; + ib_recv_wr_t recv_wr; + ib_recv_wr_t *p_recv_failure; + ib_local_ds_t ds_array; + uint32_t i; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + if( !g_root.num_msgs ) + { + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return IB_SUCCESS; + } + + cl_memclr( &recv_wr, sizeof( ib_recv_wr_t ) ); + ds_array.length = g_root.msg_size; + ds_array.lkey = g_root.lkey; + recv_wr.ds_array = &ds_array; + recv_wr.num_ds = (( g_root.msg_size <= 4 )? 0: 1 ); + + for( i = 0; i < g_root.num_msgs; i++ ) + { + CL_PRINT( CMT_DBG_VERBOSE, cmt_dbg_lvl, (".") ); + + if( g_root.per_msg_buf ) + { + ds_array.vaddr = (uintn_t)(g_root.p_mem_recv + (i * g_root.msg_size) + + (p_node->id * g_root.num_msgs * g_root.msg_size)); + } + else + { + ds_array.vaddr = (uintn_t)g_root.p_mem; + } + + recv_wr.wr_id = i; + + status = ib_post_recv( p_node->h_qp, &recv_wr, &p_recv_failure ); + if( status != IB_SUCCESS ) + { + printf( "ib_post_recv failed [%s]!\n", ib_get_err_str(status) ); + break; + } + } + + CL_PRINT( CMT_DBG_VERBOSE, cmt_dbg_lvl, ("\n") ); + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return status; +} + + + +static void +__ca_async_event_cb( + ib_async_event_rec_t *p_err_rec ) +{ + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + CL_TRACE( CMT_DBG_VERBOSE, cmt_dbg_lvl, + ( "p_err_rec->code is %d\n", p_err_rec->code ) ); + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__cm_listen_err_cb( + IN ib_listen_err_rec_t *p_listen_err_rec ) +{ + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + if( !p_listen_err_rec ) + printf( "__listen_err_cb NULL p_listen_err_rec\n" ); + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__cancel_listen_cb( + IN void *context ) +{ + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + if( !context ) + printf( "__cancel_listen_cb NULL context\n" ); + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +/* We need to halt the test and recover from the reject error. */ +static void +__rej_cb( + IN ib_cm_rej_rec_t *p_cm_rej_rec ) +{ + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + /* + * Note - because this callback exits the app, any output beyond the + * the first time may report junk. There have been instances where + * the callback is invoked more times than there are connection requests + * but that behavior disapeared if the call to exit below is removed. + */ + printf( "Connection was rejected, status: 0x%x\n", + p_cm_rej_rec->rej_status ); + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + exit( 1 ); +} + + + +static void +__req_cb( + IN ib_cm_req_rec_t *p_cm_req_rec ) +{ + ib_api_status_t status; + ib_node_t *p_node; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + CL_ASSERT( p_cm_req_rec ); + + /* Record the starting time for the server. */ + if( !g_root.conn_start_time ) + g_root.conn_start_time = cl_get_time_stamp( ); + + /* + * Do not send replies until the server is ready to establish all + * connections. + */ + cl_mutex_acquire( &g_root.mutex ); + p_node = &g_root.p_nodes[g_root.conn_index++]; + + if( g_root.state == test_connecting ) + { + /* Get a node for this connection and send the reply. */ + g_root.cm_rep.h_qp = p_node->h_qp; + status = ib_cm_rep( p_cm_req_rec->h_cm_req, &g_root.cm_rep ); + if( status != IB_SUCCESS ) + { + printf( "Call to ib_cm_rep failed\n" ); + exit( 1 ); + } + } + else + { + p_node->h_cm_req = p_cm_req_rec->h_cm_req; + } + cl_mutex_release( &g_root.mutex ); + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__rep_cb( + IN ib_cm_rep_rec_t *p_cm_rep_rec ) +{ + ib_api_status_t status; + ib_node_t *p_node; + uint8_t pdata[IB_RTU_PDATA_SIZE]; + ib_cm_mra_t mra; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + CL_ASSERT( p_cm_rep_rec ); + + p_node = (ib_node_t*)p_cm_rep_rec->qp_context; + CL_ASSERT( p_node ); + + mra.p_mra_pdata = NULL; + mra.mra_length = 0; + mra.svc_timeout = 0xff; + + ib_cm_mra( p_cm_rep_rec->h_cm_rep, &mra ); + + __post_recvs( p_node ); + + /* Mark that we're connected before sending the RTU. */ + p_node->state = node_conn; + + g_root.cm_rtu.p_rtu_pdata = pdata; + g_root.cm_rtu.rtu_length = IB_RTU_PDATA_SIZE; + + status = ib_cm_rtu( p_cm_rep_rec->h_cm_rep, &g_root.cm_rtu ); + if( status != IB_SUCCESS ) + { + printf( "Call to ib_cm_rtu returned %s\n", ib_get_err_str( status ) ); + exit( 1 ); + } + + cl_atomic_inc( &g_root.num_connected ); + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__rtu_cb( + IN ib_cm_rtu_rec_t *p_cm_rtu_rec ) +{ + ib_node_t *p_node; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + CL_ASSERT( p_cm_rtu_rec ); + + p_node = (ib_node_t*)p_cm_rtu_rec->qp_context; + p_node->state = node_conn; + + __post_recvs( p_node ); + cl_atomic_inc( &g_root.num_connected ); + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__mra_cb( + IN ib_cm_mra_rec_t *p_cm_mra_rec ) +{ + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + CL_ASSERT( p_cm_mra_rec ); + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__apr_cb( + IN ib_cm_apr_rec_t *p_cm_apr_rec ) +{ + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + CL_ASSERT( p_cm_apr_rec ); + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__lap_cb( + IN ib_cm_lap_rec_t *p_cm_lap_rec ) +{ + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + CL_ASSERT( p_cm_lap_rec ); + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__dreq_cb( + IN ib_cm_dreq_rec_t *p_cm_dreq_rec ) +{ + ib_node_t *p_node; + ib_api_status_t status; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + CL_ASSERT( p_cm_dreq_rec ); + p_node = (ib_node_t*)p_cm_dreq_rec->qp_context; + CL_ASSERT( p_node ); + + /* + * Record that we've already received a DREQ to avoid trying to + * disconnect the QP a second time. Synchronize with the DREQ call + * using the mutex. + */ + cl_mutex_acquire( &g_root.mutex ); + + /* If we need to send or receive more data, don't disconnect yet. */ + if( g_root.state == test_disconnecting ) + { + /* Send the DREP. */ + status = ib_cm_drep( p_cm_dreq_rec->h_cm_dreq, &g_root.cm_drep ); + + /* If the DREP was successful, we're done with this connection. */ + if( status == IB_SUCCESS ) + { + p_node->state = node_idle; + cl_atomic_dec( &g_root.num_connected ); + } + } + else + { + /* Record that we need to disconnect, but don't send the DREP yet. */ + p_node->state = node_dreq_rcvd; + p_node->h_cm_dreq = p_cm_dreq_rec->h_cm_dreq; + } + cl_mutex_release( &g_root.mutex ); + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__drep_cb( + IN ib_cm_drep_rec_t *p_cm_drep_rec ) +{ + ib_node_t *p_node; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + CL_ASSERT( p_cm_drep_rec ); + p_node = (ib_node_t*)p_cm_drep_rec->qp_context; + CL_ASSERT( p_node ); + + /* We're done with this connection. */ + cl_mutex_acquire( &g_root.mutex ); + p_node->state = node_idle; + cl_atomic_dec( &g_root.num_connected ); + cl_mutex_release( &g_root.mutex ); + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static void +__cq_cb( + IN const ib_cq_handle_t h_cq, + IN void* cq_context ) +{ + ib_node_t *p_node = (ib_node_t*)cq_context; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + if( !g_root.is_polling ) + { + if( !__poll_cq( p_node, h_cq ) ) + exit( 1 ); + } + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + +static ib_api_status_t +__create_qp( + IN ib_node_t *p_node ) +{ + ib_api_status_t status; + ib_qp_attr_t attr; + + /* Set the per node QP attributes. */ + g_root.qp_create.h_sq_cq = p_node->h_send_cq; + g_root.qp_create.h_rq_cq = p_node->h_recv_cq; + + /* Allocate the QP. */ + status = ib_create_qp( g_root.h_pd, &g_root.qp_create, p_node, NULL, + &p_node->h_qp ); + if( status != IB_SUCCESS ) + { + printf( "[%d] ib_create_qp failed [%s]!\n", __LINE__, + ib_get_err_str(status) ); + return status; + } + + /* Store the max inline size. */ + status = ib_query_qp( p_node->h_qp, &attr ); + if( status != IB_SUCCESS ) + p_node->max_inline = 0; + else + p_node->max_inline = attr.sq_max_inline; + + /* + * Transition the QP to the initialize state. This prevents the CM + * from having to make this QP transition and improves the connection + * establishment rate. + */ + status = ib_modify_qp( p_node->h_qp, &g_root.qp_mod_reset ); + if( status != IB_SUCCESS ) + { + printf( "ib_modify_qp to IB_QPS_RESET returned %s\n", + ib_get_err_str(status) ); + return status; + } + + status = ib_modify_qp( p_node->h_qp, &g_root.qp_mod_init ); + if( status != IB_SUCCESS ) + { + printf( "ib_modify_qp to IB_QPS_INIT returned %s\n", + ib_get_err_str(status) ); + return status; + } + + return status; +} + + + +/* + * Allocate new QPs for all nodes. + */ +static ib_api_status_t +__create_qps() +{ + uint64_t start_time, total_time; + int32_t i; + ib_api_status_t status; + + printf( "Creating QPs...\n" ); + start_time = cl_get_time_stamp(); + + for( i = 0; i < g_root.num_nodes; i++ ) + { + /* Allocate a new QP. */ + status = __create_qp( &g_root.p_nodes[i] ); + if( status != IB_SUCCESS ) + break; + } + + total_time = cl_get_time_stamp() - start_time; + printf( "Allocation time: %"PRId64" ms\n", total_time/1000 ); + + return status; +} + + + +/* + * Destroy all QPs for all nodes. + */ +static void +__destroy_qps() +{ + uint64_t start_time, total_time; + int32_t i; + + printf( "Destroying QPs...\n" ); + start_time = cl_get_time_stamp(); + + for( i = 0; i < g_root.num_nodes; i++ ) + { + /* Destroy the QP. */ + if( g_root.p_nodes[i].h_qp ) + { + ib_destroy_qp( g_root.p_nodes[i].h_qp, ib_sync_destroy ); + g_root.p_nodes[i].h_qp = NULL; + } + } + + total_time = cl_get_time_stamp() - start_time; + printf( "Destruction time: %"PRId64" ms\n", total_time/1000 ); +} + + + +static boolean_t +__init_node( + IN OUT ib_node_t* p_node ) +{ + ib_api_status_t status; + ib_cq_create_t cq_create; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + /* Create the CQs. */ + cl_memclr( &cq_create, sizeof(ib_cq_create_t) ); + if( g_root.num_msgs ) + cq_create.size = g_root.num_msgs; + else + cq_create.size = 1; /* minimal of one entry */ + + cq_create.pfn_comp_cb = __cq_cb; + status = ib_create_cq( g_root.h_ca, &cq_create, p_node, NULL, + &p_node->h_send_cq ); + if( status != IB_SUCCESS ) + { + printf( "ib_create_cq failed for send CQ [%s]!\n", + ib_get_err_str(status) ); + return FALSE; + } + if( !g_root.is_polling ) + { + status = ib_rearm_cq( p_node->h_send_cq, FALSE ); + if( status != IB_SUCCESS ) + { + printf( "ib_rearm_cq failed for send CQ [%s]!\n", + ib_get_err_str(status) ); + return FALSE; + } + } + + status = ib_create_cq( g_root.h_ca, &cq_create, p_node, NULL, + &p_node->h_recv_cq ); + if( status != IB_SUCCESS ) + { + printf( "ib_create_cq failed for recv CQ [%s]!\n", + ib_get_err_str(status) ); + return FALSE; + } + if( !g_root.is_polling ) + { + status = ib_rearm_cq( p_node->h_recv_cq, FALSE ); + if( status != IB_SUCCESS ) + { + printf( "ib_rearm_cq failed for recv CQ [%s]!\n", + ib_get_err_str(status) ); + return FALSE; + } + } + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return TRUE; +} + +static boolean_t +__destroy_node( + IN OUT ib_node_t* p_node ) +{ + ib_api_status_t status = IB_SUCCESS; + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + if (!p_node ) + return (FALSE); + if ( p_node->h_send_cq ) + { + status = ib_destroy_cq( p_node->h_send_cq, ib_sync_destroy ); + p_node->h_send_cq = NULL; + if( status != IB_SUCCESS ) + { + printf( "ib_destroy_cq failed for send CQ [%s]!\n", + ib_get_err_str(status) ); + } + } + if (p_node->h_recv_cq) + { + status = ib_destroy_cq( p_node->h_recv_cq, ib_sync_destroy ); + p_node->h_recv_cq = NULL; + if( status != IB_SUCCESS ) + { + printf( "ib_destroy_cq failed for recv CQ [%s]!\n", + ib_get_err_str(status) ); + } + } + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return (status == IB_SUCCESS); +} + + + +static boolean_t +__create_nodes() +{ + int32_t i; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + for( i = 0; i < g_root.num_nodes; i++ ) + { + g_root.p_nodes[i].id = i; + + CL_PRINT( CMT_DBG_VERBOSE, cmt_dbg_lvl, + ("--> create connection %d of instance %d\n", i, g_root.inst_id) ); + + if( !__init_node( &g_root.p_nodes[i] ) ) + return FALSE; + } + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return TRUE; +} + +static boolean_t +__destroy_nodes() +{ + int32_t i; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + for( i = 0; i < g_root.num_nodes; i++ ) + { + if( !__destroy_node( &g_root.p_nodes[i] ) ) + return FALSE; + } + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return TRUE; +} + +/* query function called by ib_query() */ +static void +__sa_query_cb( + IN ib_query_rec_t *p_query_rec ) +{ + ib_path_rec_t *p_path; + ib_api_status_t status; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + CL_ASSERT( p_query_rec ); + + if( p_query_rec->status != IB_SUCCESS ) + { + printf( "ib_query failed [%d]\n", p_query_rec->status ); + return; + } + + if( p_query_rec->query_type != IB_QUERY_PATH_REC_BY_LIDS ) + { + printf( "Unexpected query type returned.\n" ); + return; + } + + if( !p_query_rec->p_result_mad ) + { + printf( "No result MAD returned from ib_query.\n" ); + return; + } + + /* copy the 1st (zero'th) path record to local storage. */ + p_path = ib_get_query_path_rec( p_query_rec->p_result_mad, 0 ); + memcpy( (void*)&g_root.path_rec, (void*)p_path, + sizeof(ib_path_rec_t) ); + + CL_TRACE( CMT_DBG_VERBOSE, cmt_dbg_lvl, + ( "path{ slid:0x%x, dlid:0x%x }\n", + g_root.path_rec.slid, g_root.path_rec.dlid) ); + + /* release response MAD(s) back to AL pool */ + if( p_query_rec->p_result_mad ) + { + status = ib_put_mad( p_query_rec->p_result_mad ); + if( status != IB_SUCCESS ) + { + printf( "ib_put_mad() failed [%s]\n", + ib_get_err_str(status) ); + } + } + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); +} + + + +static boolean_t +__query_for_path() +{ + ib_api_status_t status; + ib_query_req_t query_rec; + ib_lid_pair_t lid_pair; + + /* Query the SA for a path record. */ + query_rec.query_type = IB_QUERY_PATH_REC_BY_LIDS; + + lid_pair.src_lid = g_root.l_lid; + lid_pair.dest_lid = g_root.r_lid; + + query_rec.p_query_input = (void*)&lid_pair; + query_rec.port_guid = g_root.port_guid; + query_rec.timeout_ms = 5 * 1000; // seconds + query_rec.retry_cnt = 2; + query_rec.flags = IB_FLAGS_SYNC; + query_rec.query_context = &g_root; + query_rec.pfn_query_cb = __sa_query_cb; + + status = ib_query( g_root.h_al, &query_rec, NULL ); + if( ( status != IB_SUCCESS ) || ( !g_root.path_rec.dlid ) ) + { + printf( "ib_query failed.\n" ); + return FALSE; + } + + return TRUE; +} + + + +static boolean_t +__create_messages() +{ + ib_mr_create_t mr_create; + uint32_t buf_size; + ib_api_status_t status; + + /* If we're not sending messages, just return. */ + if( !g_root.num_msgs || !g_root.msg_size ) + return TRUE; + + /* Allocate the message memory - we ignore the data, so just one buffer. */ + if( g_root.per_msg_buf ) + buf_size = (g_root.num_nodes * g_root.num_msgs * g_root.msg_size) << 1; + else + buf_size = g_root.msg_size; + g_root.p_mem = cl_zalloc( buf_size ); + if( !g_root.p_mem ) + { + printf( "Not enough memory for transfers!\n" ); + return FALSE; + } + memset (g_root.p_mem, 0xae, buf_size); + g_root.p_mem_recv = g_root.p_mem; + g_root.p_mem_send = g_root.p_mem + (buf_size >> 1); + + /* Register the memory with AL. */ + mr_create.vaddr = g_root.p_mem; + mr_create.length = buf_size; + mr_create.access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_MW_BIND; + status = ib_reg_mem( g_root.h_pd, &mr_create, &g_root.lkey, + &g_root.rkey, &g_root.h_mr ); + if( status != IB_SUCCESS ) + { + printf( "ib_reg_mem failed [%s]!\n", ib_get_err_str(status) ); + return FALSE; + } + + return TRUE; +} + + + +/* + * PnP callback handler. Record the port GUID of an active port. + */ +static ib_api_status_t +__pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + ib_pnp_port_rec_t* p_port_rec; + uint32_t size; + + p_port_rec = (ib_pnp_port_rec_t*)p_pnp_rec; + + /* + * Ignore PNP events that are not related to port active, or if + * we already have an active port. + */ + if( p_pnp_rec->pnp_event != IB_PNP_PORT_ACTIVE || g_root.port_guid ) + return IB_SUCCESS; + + /* Find the proper port for the given local LID. */ + if( g_root.l_lid ) + { + if( g_root.l_lid != p_port_rec->p_port_attr->lid ) + return IB_SUCCESS; + } + else + { + g_root.l_lid = p_port_rec->p_port_attr->lid; + printf( "\tlocal lid....: x%x\n", g_root.l_lid ); + } + + /* Record the active port information. */ + g_root.ca_guid = p_port_rec->p_ca_attr->ca_guid; + g_root.port_num = p_port_rec->p_port_attr->port_num; + + /* Record the PKEYs available on the active port. */ + size = sizeof( ib_net16_t ) * p_port_rec->p_port_attr->num_pkeys; + g_root.p_pkey_table = cl_zalloc( size ); + if( !g_root.p_pkey_table ) + return IB_SUCCESS; + g_root.num_pkeys = p_port_rec->p_port_attr->num_pkeys; + cl_memcpy( g_root.p_pkey_table, + p_port_rec->p_port_attr->p_pkey_table, size ); + + /* Set the port_guid last to indicate that we're ready. */ + g_root.port_guid = p_port_rec->p_port_attr->port_guid; + return IB_SUCCESS; +} + + +/* + * Register for PnP events and wait until a port becomes active. + */ +static boolean_t +__reg_pnp() +{ + ib_api_status_t status; + ib_pnp_req_t pnp_req; + ib_pnp_handle_t h_pnp; + + cl_memclr( &pnp_req, sizeof( ib_pnp_req_t ) ); + pnp_req.pnp_class = IB_PNP_PORT; + pnp_req.pnp_context = &g_root; + pnp_req.pfn_pnp_cb = __pnp_cb; + + /* Register for PnP events. */ + status = ib_reg_pnp( g_root.h_al, &pnp_req, &h_pnp ); + if( status != IB_SUCCESS ) + { + printf( "ib_reg_pnp failed [%s]!\n", ib_get_err_str(status) ); + return FALSE; + } + + /* Wait until a port goes active. */ + while( !g_root.port_guid ) + cl_thread_suspend( 10 ); + + /* Deregister from PnP. */ + ib_dereg_pnp( h_pnp, NULL ); + + return TRUE; +} + + + +static boolean_t +__init_root() +{ + ib_api_status_t status; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + cl_mutex_construct( &g_root.mutex ); + if( cl_mutex_init( &g_root.mutex ) != CL_SUCCESS ) + return FALSE; + + /* Open AL. */ + status = ib_open_al( &g_root.h_al ); + if( status != IB_SUCCESS ) + { + printf( "ib_open_al failed [%s]!\n", ib_get_err_str(status) ); + return FALSE; + } + + /* Register for PnP events, and wait until we have an active port. */ + if( !__reg_pnp() ) + return FALSE; + + /* Open the CA. */ + status = ib_open_ca( g_root.h_al, g_root.ca_guid, + __ca_async_event_cb, &g_root, &g_root.h_ca ); + if( status != IB_SUCCESS ) + { + printf( "ib_open_ca failed [%s]!\n", ib_get_err_str(status) ); + return FALSE; + } + + /* Create a PD. */ + status = ib_alloc_pd( g_root.h_ca, IB_PDT_NORMAL, &g_root, + &g_root.h_pd ); + if( status != IB_SUCCESS ) + { + printf( "ib_alloc_pd failed [%s]!\n", ib_get_err_str(status) ); + return FALSE; + } + + /* Get a path record to the remote side. */ + if( !__query_for_path() ) + { + printf( "Unable to query for path record!\n" ); + return FALSE; + } + + /* Allocate and register memory for the messages. */ + if( !__create_messages() ) + { + printf( "Unable to create messages!\n" ); + return FALSE; + } + + /* Create the connection endpoints. */ + g_root.p_nodes = (ib_node_t*)cl_zalloc( + sizeof(ib_node_t) * g_root.num_nodes ); + if( !g_root.p_nodes ) + { + printf( "Unable to allocate nodes\n" ); + return FALSE; + } + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return TRUE; +} + + + +static void +__cleanup() +{ + if( g_root.h_listen ) + ib_cm_cancel( g_root.h_listen, __cancel_listen_cb ); + + /* Close AL if it was opened. */ + if( g_root.h_al ) + ib_close_al( g_root.h_al ); + + cl_mutex_destroy( &g_root.mutex ); + + if( g_root.p_mem ) + cl_free( g_root.p_mem ); + + if( g_root.p_pkey_table ) + cl_free( g_root.p_pkey_table ); + + /* Free all allocated memory. */ + if( g_root.p_nodes ) + cl_free( g_root.p_nodes ); +} + + + +/* + * Have the server start listening for connections. + */ +static boolean_t +__listen() +{ + ib_cm_listen_t cm_listen; + ib_api_status_t status; + + cl_memclr( &cm_listen, sizeof( ib_cm_listen_t ) ); + + /* The server side listens. */ + cm_listen.svc_id = CMT_BASE_SVC_ID + g_root.inst_id; + + cm_listen.pfn_cm_req_cb = __req_cb; + + cm_listen.qp_type = IB_QPT_RELIABLE_CONN; + + status = ib_cm_listen( g_root.h_al, &cm_listen, + __cm_listen_err_cb, &g_root, &g_root.h_listen ); + if( status != IB_SUCCESS ) + { + printf( "ib_cm_listen failed [%s]!\n", ib_get_err_str(status) ); + return FALSE; + } + return TRUE; +} + + + +/* + * Initiate all client connection requests. + */ +static ib_api_status_t +__conn_reqs() +{ + ib_api_status_t status; + int32_t i; + uint8_t pdata[IB_REQ_PDATA_SIZE]; + + g_root.cm_req.p_req_pdata = pdata; + g_root.cm_req.req_length = IB_REQ_PDATA_SIZE; + + /* Request a connection for each client. */ + for( i = 0; i < g_root.num_nodes; i++ ) + { + g_root.cm_req.h_qp = g_root.p_nodes[i].h_qp; + + status = ib_cm_req( &g_root.cm_req ); + if( status != IB_SUCCESS ) + { + printf( "ib_cm_req failed [%s]!\n", ib_get_err_str(status) ); + return status; + } + } + return IB_SUCCESS; +} + + + +/* + * Send any connection replies waiting to be sent. + */ +static ib_api_status_t +__conn_reps() +{ + ib_api_status_t status; + uintn_t i; + uint8_t pdata[IB_REP_PDATA_SIZE]; + + g_root.cm_rep.p_rep_pdata = pdata; + g_root.cm_rep.rep_length = IB_REP_PDATA_SIZE; + + /* Send a reply for each connection that requires one. */ + for( i = 0; i < g_root.conn_index; i++ ) + { + g_root.cm_rep.h_qp = g_root.p_nodes[i].h_qp; + status = ib_cm_rep( g_root.p_nodes[i].h_cm_req, &g_root.cm_rep ); + if( status != IB_SUCCESS ) + { + printf( "ib_cm_rep failed [%s]!\n", ib_get_err_str(status) ); + return status; + } + } + return IB_SUCCESS; +} + + + +/* + * Establish all connections. + */ +static ib_api_status_t +__connect() +{ + uint64_t total_time; + ib_api_status_t status; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + printf( "Connecting...\n" ); + cl_mutex_acquire( &g_root.mutex ); + g_root.state = test_connecting; + + /* Initiate the connections. */ + if( g_root.is_server ) + { + /* + * Send any replies. Note that we hold the mutex while sending the + * replies since we need to use the global cm_rep structure. + */ + status = __conn_reps(); + cl_mutex_release( &g_root.mutex ); + } + else + { + cl_mutex_release( &g_root.mutex ); + g_root.conn_start_time = cl_get_time_stamp(); + status = __conn_reqs(); + } + + if( status != IB_SUCCESS ) + return status; + + /* Wait for all connections to complete. */ + while( g_root.num_connected < g_root.num_nodes ) + cl_thread_suspend( 0 ); + + /* Calculate the total connection time. */ + total_time = cl_get_time_stamp() - g_root.conn_start_time; + g_root.state = test_idle; + + /* Reset connection information for next test. */ + g_root.conn_index = 0; + g_root.conn_start_time = 0; + + printf( "Connect time: %"PRId64" ms\n", total_time/1000 ); + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return status; +} + + + +static void +__disconnect() +{ + ib_api_status_t status; + int32_t i; + ib_node_t *p_node; + uint64_t total_time, start_time; + + printf( "Disconnecting...\n" ); + + /* Initiate the disconnection process. */ + cl_mutex_acquire( &g_root.mutex ); + g_root.state = test_disconnecting; + start_time = cl_get_time_stamp(); + cl_mutex_release( &g_root.mutex ); + + /* We hold the mutex to prevent calling ib_cm_drep at the same time. */ + for( i = 0; i < g_root.num_nodes; i++ ) + { + p_node = &g_root.p_nodes[i]; + + /* + * Send the DREQ. Note that some of these may fail, since the + * remote side may be disconnecting at the same time. Call DREQ + * only if we haven't called DREP yet. + */ + cl_mutex_acquire( &g_root.mutex ); + switch( p_node->state ) + { + case node_conn: + g_root.cm_dreq.h_qp = p_node->h_qp; + status = ib_cm_dreq( &g_root.cm_dreq ); + if( status == IB_SUCCESS ) + p_node->state = node_dreq_sent; + break; + + case node_dreq_rcvd: + status = ib_cm_drep( p_node->h_cm_dreq, &g_root.cm_drep ); + + /* If the DREP was successful, we're done with this connection. */ + if( status == IB_SUCCESS ) + { + p_node->state = node_idle; + cl_atomic_dec( &g_root.num_connected ); + } + break; + + default: + /* Node is already disconnected. */ + break; + } + cl_mutex_release( &g_root.mutex ); + } + + /* Wait for all disconnections to complete. */ + while( g_root.num_connected ) + cl_thread_suspend( 0 ); + + if( g_root.h_listen ) + { + ib_cm_cancel( g_root.h_listen, __cancel_listen_cb ); + g_root.h_listen = NULL; + } + /* Calculate the total connection time. */ + total_time = cl_get_time_stamp() - start_time; + g_root.state = test_idle; + + printf( "Disconnect time: %"PRId64" ms\n", total_time/1000 ); +} + + + +/* + * Send the requested number of messages on each connection. + */ +static boolean_t +__send_msgs() +{ + ib_api_status_t status; + int32_t i; + uint32_t m; + ib_send_wr_t send_wr; + ib_send_wr_t *p_send_failure; + ib_local_ds_t ds_array; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + /* For each connection... */ + for( i = 0; i < g_root.num_nodes; i++ ) + { + /* Send the specified number of messages. */ + for( m = 0; m < g_root.num_msgs; m++ ) + { + /* Get the buffer for this message. */ + if( g_root.per_msg_buf ) + { + ds_array.vaddr = (uintn_t)(g_root.p_mem_send + + (i * g_root.num_msgs) + (m * g_root.msg_size)); + } + else + { + ds_array.vaddr = (uintn_t)g_root.p_mem; + } + ds_array.length = g_root.msg_size; + ds_array.lkey = g_root.lkey; + + /* Format the send WR for this message. */ + send_wr.ds_array = &ds_array; + send_wr.send_opt = IB_SEND_OPT_SIGNALED | IB_SEND_OPT_SOLICITED; + send_wr.send_opt |= ((g_root.msg_size <= 4)? IB_SEND_OPT_IMMEDIATE : 0x0 ); + send_wr.wr_type = WR_SEND; + send_wr.num_ds = ((g_root.msg_size <= 4)? 0 : 1 ); + send_wr.p_next = NULL; + send_wr.wr_id = m; + + if( g_root.msg_size < g_root.p_nodes[i].max_inline ) + send_wr.send_opt |= IB_SEND_OPT_INLINE; + + /* Torpedoes away! Send the message. */ + CL_PRINT( CMT_DBG_VERBOSE, cmt_dbg_lvl, (".") ); + status = ib_post_send( g_root.p_nodes[i].h_qp, &send_wr, + &p_send_failure ); + if( status != IB_SUCCESS ) + { + printf( "ib_post_send failed [%s]!\n", + ib_get_err_str(status) ); + return FALSE; + } + } + } + + CL_PRINT( CMT_DBG_VERBOSE, cmt_dbg_lvl, ("\n") ); + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return TRUE; +} + + + +/* + * Remove num_msgs completions from the specified CQ. + */ +static boolean_t +__poll_cq( + IN ib_node_t *p_node, + IN ib_cq_handle_t h_cq ) +{ + ib_api_status_t status = IB_SUCCESS; + ib_wc_t free_wc[2]; + ib_wc_t *p_free_wc, *p_done_wc; + + CL_ENTER( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + + while( status != IB_NOT_FOUND ) + { + /* Get all completions. */ + p_free_wc = &free_wc[0]; + free_wc[0].p_next = &free_wc[1]; + free_wc[1].p_next = NULL; + p_done_wc = NULL; + + status = ib_poll_cq( h_cq, &p_free_wc, &p_done_wc ); + + /* Continue polling if nothing is done. */ + if( status == IB_NOT_FOUND ) + break; + + /* Abort if an error occurred. */ + if( status != IB_SUCCESS ) + { + printf( "Error polling status = %#x( wc_status =%s)\n", + status, + ((p_done_wc != NULL )? wc_status_text[p_done_wc->status]:"N/A") ); + return FALSE; + } + + while( p_done_wc ) + { + switch( p_done_wc->status ) + { + case IB_WCS_SUCCESS: + CL_PRINT( CMT_DBG_VERBOSE, cmt_dbg_lvl, + ("Got a completion: \n\ttype....:%s\n\twr_id...:%"PRIx64"\n" + "status....:%s\n", wc_type_text[p_done_wc->wc_type], + p_done_wc->wr_id, wc_status_text[p_done_wc->status] ) ); + + if( p_done_wc->wc_type == IB_WC_RECV ) + { + CL_ASSERT( p_done_wc->wr_id == p_node->recv_cnt ); + if( p_done_wc->length != g_root.msg_size ) + { + printf( "Error: received %d bytes, expected %d.\n", + p_done_wc->length, g_root.msg_size ); + } + + p_node->recv_cnt++; + g_root.total_recv++; + } + else + { + CL_ASSERT( p_done_wc->wr_id == p_node->send_cnt ); + p_node->send_cnt++; + g_root.total_sent++; + } + break; + + default: + printf( "[%d] Bad completion type(%s) status(%s)\n", + __LINE__, wc_type_text[p_done_wc->wc_type], + wc_status_text[p_done_wc->status] ); + return FALSE; + } + p_done_wc = p_done_wc->p_next; + } + } + + if( !g_root.is_polling ) + { + status = ib_rearm_cq(h_cq, FALSE); + if (status != IB_SUCCESS) + { + printf("Failed to rearm CQ %p\n", h_cq ); + return FALSE; + } + } + + CL_EXIT( CMT_DBG_VERBOSE, cmt_dbg_lvl ); + return TRUE; +} + + + +/* + * Remove num_msgs completions from all send CQs for all connections. + */ +static boolean_t +__poll_send_cqs() +{ + ib_node_t *p_node; + int32_t i; + + for( i = 0; i < g_root.num_nodes; i++ ) + { + p_node = &g_root.p_nodes[i]; + while( p_node->send_cnt < g_root.num_msgs ) + { + if( !g_root.is_polling ) + cl_thread_suspend( 0 ); + else if( !__poll_cq( p_node, p_node->h_send_cq ) ) + return FALSE; + } + } + return TRUE; +} + + + +/* + * Remove num_msgs completions from all receive CQs for all connections. + */ +static boolean_t +__poll_recv_cqs() +{ + ib_node_t *p_node; + int32_t i; + + for( i = 0; i < g_root.num_nodes; i++ ) + { + p_node = &g_root.p_nodes[i]; + while( p_node->recv_cnt < g_root.num_msgs ) + { + if( !g_root.is_polling ) + cl_thread_suspend( 0 ); + else if( !__poll_cq( p_node, p_node->h_recv_cq ) ) + return FALSE; + } + } + return TRUE; +} + + + +/********************************************************************** + **********************************************************************/ +int __cdecl +main( + int argc, + char* argv[] ) +{ + uint64_t start_time, total_time; + uint64_t total_xfer; + uint32_t i; + + cl_memclr( &g_root, sizeof(ib_root_t) ); + + /* Set defaults. */ + if( !__parse_options( argc, argv ) ) + return 1; + + /* Initialize the root - open all common HCA resources. */ + if( !__init_root() ) + { + printf( "__init_root failed\n" ); + __cleanup(); + return 1; + } + + /* + * Execute the test the specified number of times. Abort the test + * if any errors occur. + */ + total_xfer = g_root.num_msgs * g_root.msg_size * g_root.num_nodes; + for( i = 0; i < g_root.num_iter; i++ ) + { + printf( "----- Iteration: %d, %d connections -----\n", + i, g_root.num_nodes ); + + /* Initialize the connection parameters. */ + __init_conn_info(); + __init_qp_info(); + __create_nodes(); + /* Start listening for connections if we're the server. */ + if( g_root.is_server ) + __listen(); + + /* Allocate a new set of QPs for the connections. */ + if( __create_qps() != IB_SUCCESS ) + { + printf( "Unable to allocate QPs for test.\n" ); + break; + } + + /* Establish all connections. */ + if( __connect() != IB_SUCCESS ) + { + printf( "Failed to establish connections.\n" ); + break; + } + + printf( "Transfering data...\n" ); + g_root.state = test_transfering; + start_time = cl_get_time_stamp(); + + if( g_root.num_msgs ) + { + if( g_root.is_server ) + { + /* The server initiate the sends to avoid race conditions. */ + if( !__send_msgs() ) + break; + + /* Get all send completions. */ + if( !__poll_send_cqs() ) + break; + + /* Get all receive completions. */ + if( !__poll_recv_cqs() ) + break; + } + else + { + /* Get all receive completions. */ + if( !__poll_recv_cqs() ) + break; + + /* Reply to the sends. */ + if( !__send_msgs() ) + break; + + /* Get all send completions. */ + if( !__poll_send_cqs() ) + break; + } + } + + total_time = cl_get_time_stamp() - start_time; + g_root.state = test_idle; + + printf( "Data transfer time: %"PRId64" ms, %d messages/conn, " + "%"PRId64" total bytes\n", total_time/1000, + g_root.num_msgs, total_xfer ); + + /* Disconnect all connections. */ + __disconnect(); + __destroy_qps(); + __destroy_nodes(); + } + + __cleanup(); + return 0; +} diff --git a/branches/Ndi/tests/cmtest/user/makefile b/branches/Ndi/tests/cmtest/user/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/tests/cmtest/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/tests/dirs b/branches/Ndi/tests/dirs new file mode 100644 index 00000000..fc792b6c --- /dev/null +++ b/branches/Ndi/tests/dirs @@ -0,0 +1,6 @@ +DIRS=\ + alts \ + cmtest \ + wsd \ + ibat \ + limits diff --git a/branches/Ndi/tests/ibat/dirs b/branches/Ndi/tests/ibat/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/tests/ibat/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tests/ibat/user/PrintIp.c b/branches/Ndi/tests/ibat/user/PrintIp.c new file mode 100644 index 00000000..8fc1c443 --- /dev/null +++ b/branches/Ndi/tests/ibat/user/PrintIp.c @@ -0,0 +1,253 @@ +/* +* Copyright (c) 2005 Mellanox Technologies. All rights reserved. +* Copyright (c) 2005 SilverStorm Technologies. All rights reserved. +* +* This software is available to you under the OpenIB.org BSD license +* below: +* +* Redistribution and use in source and binary forms, with or +* without modification, are permitted provided that the following +* conditions are met: +* +* - Redistributions of source code must retain the above +* copyright notice, this list of conditions and the following +* disclaimer. +* +* - Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials +* provided with the distribution. +* +* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS +* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN +* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +* SOFTWARE. +* +* $Id$ +*/ + + +#include +#include +#include +#include +#include "iba\ib_types.h" +#include + + +// Print all ips that are related to infiniband on this computer +int print_ips() +{ + HANDLE hKernelLib; + HRESULT hr = S_OK; + char temp [1000]; + char temp1 [1000]; + IOCTL_IBAT_PORTS_IN ipoib_ports_in; + IOCTL_IBAT_PORTS_OUT *p_ipoib_ports_out; + IBAT_PORT_RECORD *ports_records; + + IOCTL_IBAT_IP_ADDRESSES_IN addresses_in; + IOCTL_IBAT_IP_ADDRESSES_OUT *addresses_out; + IP_ADDRESS *ip_addreses; + + BOOL ret; + int i,j; + DWORD BytesReturned = 0; + + printf("Adapters that are known to the ipoib modules are:\n\n"); + + hKernelLib = + CreateFileW( + IBAT_WIN32_NAME, + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, // share mode none + NULL, // no security + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL // no template + ); + + if (hKernelLib == INVALID_HANDLE_VALUE) { + hr = HRESULT_FROM_WIN32(GetLastError()); + printf("failed to open the kernel device hr=0x%x\n", hr); + return 1; + } + + ipoib_ports_in.Version = IBAT_IOCTL_VERSION; + + p_ipoib_ports_out = (IOCTL_IBAT_PORTS_OUT *)temp; + + ret = DeviceIoControl( + hKernelLib, + IOCTL_IBAT_PORTS, + &ipoib_ports_in, + sizeof(ipoib_ports_in), + p_ipoib_ports_out, + sizeof(temp), + &BytesReturned, + NULL + ); + + if (ret == 0) { + hr = HRESULT_FROM_WIN32(GetLastError()); + printf("DeviceIoControl failed for IOCTL_IBAT_PORTS hr=0x%x\n", hr); + return 1; + } + if (p_ipoib_ports_out->Size > sizeof(temp)) { + printf("Data truncated, please call again with a buffer of %d bytes", p_ipoib_ports_out->Size); + } + + ports_records = p_ipoib_ports_out->Ports; + printf("Number of devices %d\n", p_ipoib_ports_out->NumPorts); + for (i = 0 ; i < p_ipoib_ports_out->NumPorts; i++) + { + printf("%d: ca guid = 0x%I64x port guid=0x%I64x\n", i, CL_NTOH64(ports_records[i].CaGuid), CL_NTOH64(ports_records[i].PortGuid)); + + // print the ip adresses of this port + addresses_in.Version = IBAT_IOCTL_VERSION; + addresses_in.PortGuid = ports_records[i].PortGuid; + + addresses_out = (IOCTL_IBAT_IP_ADDRESSES_OUT *)temp1; + + ret = DeviceIoControl( + hKernelLib, + IOCTL_IBAT_IP_ADDRESSES, + &addresses_in, + sizeof(addresses_in), + addresses_out, + sizeof(temp1), + &BytesReturned, + NULL + ); + + if (ret == 0) + { + hr = HRESULT_FROM_WIN32(GetLastError()); + printf("DeviceIoControl failed for IOCTL_IBAT_IP_ADDRESSES hr=0x%x\n", hr); + return 1; + } + if (addresses_out->Size > sizeof(temp1) ) + { + printf("Data truncated, please call again with a buffer of %d bytes", p_ipoib_ports_out->Size); + return 1; + } + + printf(" found %d ips:", addresses_out->AddressCount); + ip_addreses = addresses_out->Address; + for (j = 0; j < addresses_out->AddressCount; j++) + { + printf(" %d.%d.%d.%d ", + ip_addreses[j].Address[12], + ip_addreses[j].Address[13], + ip_addreses[j].Address[14], + ip_addreses[j].Address[15]); + } + printf("\n"); + } + + return 0; +}; + +void print_usage(char *argv[]) +{ + printf("This program is used to print ip adapters and their addresses or to do arp\n"); + printf("Usage is: %s \n",argv[0]); + printf("or %s (for example %s remoteip 1.2.3.4)\n", argv[0],argv[0]); +} + +int remote_ip(char *remote_ip) +{ + HANDLE hKernelLib; + HRESULT hr = S_OK; + IPAddr ip; + char *pIp = (char *)&ip; + int b1,b2,b3,b4; + DWORD ret; + IOCTL_IBAT_MAC_TO_GID_IN mac; + IOCTL_IBAT_MAC_TO_GID_OUT gid; + DWORD BytesReturned = 0; + + ULONG pMacAddr[2], PhyAddrLen ; + unsigned char *pMac = (unsigned char *)&pMacAddr; + PhyAddrLen = sizeof(pMacAddr); + + hKernelLib = + CreateFileW( + IBAT_WIN32_NAME, + GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, // share mode none + NULL, // no security + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL // no template + ); + + if (hKernelLib == INVALID_HANDLE_VALUE) + { + hr = HRESULT_FROM_WIN32(GetLastError()); + printf("failed to open the kernel device hr=0x%x\n", hr); + return 1; + } + + sscanf(remote_ip, "%d.%d.%d.%d", &b1, &b2, &b3, &b4); + printf("Calling arp for addresses %d.%d.%d.%d\n", b1, b2, b3, b4); + + pIp[0] = (char)b1; + pIp[1] = (char)b2; + pIp[2] = (char)b3; + pIp[3] = (char)b4; + + ret = SendARP(ip ,0 ,pMacAddr, &PhyAddrLen ); + if (ret != NO_ERROR) + { + printf("Error in SendARP"); + return 1; + } + + printf("Mac of the remote addresses is %x-%x-%x-%x-%x-%x\n", + pMac[0], pMac[1], pMac[2], pMac[3], pMac[4], pMac[5] ); + + // query for the gid + memcpy(mac.DestMac, pMac, 6); + + ret = DeviceIoControl( + hKernelLib, + IOCTL_IBAT_MAC_TO_GID, + &mac, + sizeof(mac), + &gid, + sizeof(gid), + &BytesReturned, + NULL ); + + if (ret == 0) + { + hr = HRESULT_FROM_WIN32(GetLastError()); + printf("DeviceIoControl failed for IOCTL_IBAT_IP_ADDRESSES hr=0x%x\n", hr); + } + + printf("lid of remote ip is = 0x%I64x : 0x%I64x\n", CL_NTOH64(gid.DestGid.unicast.prefix), CL_NTOH64(gid.DestGid.unicast.interface_id)); + + return 0; +} + + +int __cdecl main(int argc, char *argv[]) +{ + if (argc < 2) { + print_usage(argv); + return 1; + } + if (!strcmp(argv[1], "print_ips")) { + return print_ips(); + } + if (!strcmp(argv[1], "remoteip")) { + return remote_ip(argv[2]); + } + print_usage(argv); + return 1; +} diff --git a/branches/Ndi/tests/ibat/user/SOURCES b/branches/Ndi/tests/ibat/user/SOURCES new file mode 100644 index 00000000..b50428b7 --- /dev/null +++ b/branches/Ndi/tests/ibat/user/SOURCES @@ -0,0 +1,18 @@ +TARGETNAME=PrintIP +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +SOURCES=\ + PrintIp.c + +TARGETLIBS=\ + $(SDK_LIB_PATH)\ws2_32.lib \ + $(SDK_LIB_PATH)\Iphlpapi.lib + +MSC_WARNING_LEVEL= /W4 + +INCLUDES=..\..\..\inc;\ + ..\..\..\inc\user;\ + $(PLATFORM_SDK_PATH)\include; diff --git a/branches/Ndi/tests/ibat/user/makefile b/branches/Ndi/tests/ibat/user/makefile new file mode 100644 index 00000000..9c985f57 --- /dev/null +++ b/branches/Ndi/tests/ibat/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/Ndi/tests/limits/dirs b/branches/Ndi/tests/limits/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/tests/limits/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tests/limits/user/SOURCES b/branches/Ndi/tests/limits/user/SOURCES new file mode 100644 index 00000000..a80ae467 --- /dev/null +++ b/branches/Ndi/tests/limits/user/SOURCES @@ -0,0 +1,20 @@ +TARGETNAME=ib_limits +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +SOURCES=limits_main.c + +INCLUDES=..\..\..\inc;..\..\..\inc\user; + +TARGETLIBS= \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tests/limits/user/limits_main.c b/branches/Ndi/tests/limits/user/limits_main.c new file mode 100644 index 00000000..3216bc10 --- /dev/null +++ b/branches/Ndi/tests/limits/user/limits_main.c @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Test limits for: + * - memory registration + * - CQ creation + * - CQ resize + * - QP creation + * + * Environment: + * User Mode + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/* Globals */ +#define CMT_DBG_VERBOSE 1 + + +uint32_t cmt_dbg_lvl = 0x80000000; + + +/********************************************************************** + **********************************************************************/ +static void +__show_usage() +{ + printf( "\n------- ib_limits - Usage and options ----------------------\n" ); + printf( "Usage: ib_limits [options]\n"); + printf( "Options:\n" ); + printf( "-m\n" + "--memory\n" + "\tThis option directs ib_limits to test memory registration\n" ); + printf( "-c\n" + "--cq\n" + "\tThis option directs ib_limits to test CQ creation\n" ); + printf( "-r\n" + "--resize_cq\n" + "\tThis option directs ib_limits to test CQ resize\n" ); + printf( "-q\n" + "--qp\n" + "\tThis option directs ib_limits to test QP creation\n" ); + printf( "-v\n" + "--verbose\n" + " This option enables verbosity level to debug console.\n" ); + printf( "-h\n" + "--help\n" + " Display this usage info then exit.\n\n" ); +} + + +/* Windows support. */ +struct option +{ + const char *long_name; + unsigned long flag; + void *pfn_handler; + char short_name; +}; + +static char *optarg; + +#define strtoull strtoul + + +boolean_t test_mr, test_cq, test_resize, test_qp; + + +char +getopt_long( + int argc, + char *argv[], + const char *short_option, + const struct option *long_option, + void *unused ) +{ + static int i = 1; + int j; + char ret = 0; + + UNUSED_PARAM( unused ); + + if( i == argc ) + return -1; + + if( argv[i][0] != '-' ) + return ret; + + /* find the first character of the value. */ + for( j = 1; isalpha( argv[i][j] ); j++ ) + ; + optarg = &argv[i][j]; + + if( argv[i][1] == '-' ) + { + /* Long option. */ + for( j = 0; long_option[j].long_name; j++ ) + { + if( strncmp( &argv[i][2], long_option[j].long_name, + optarg - argv[i] - 2 ) ) + { + continue; + } + + switch( long_option[j].flag ) + { + case 1: + if( *optarg == '\0' ) + return 0; + default: + break; + } + ret = long_option[j].short_name; + break; + } + } + else + { + for( j = 0; short_option[j] != '\0'; j++ ) + { + if( !isalpha( short_option[j] ) ) + return 0; + + if( short_option[j] == argv[i][1] ) + { + ret = short_option[j]; + break; + } + + if( short_option[j+1] == ':' ) + { + if( *optarg == '\0' ) + return 0; + j++; + } + } + } + i++; + return ret; +} + + +static boolean_t +__parse_options( + int argc, + char* argv[] ) +{ + uint32_t next_option; + const char* const short_option = "mcrq:vh"; + + /* + In the array below, the 2nd parameter specified the number + of arguments as follows: + 0: no arguments + 1: argument + 2: optional + */ + const struct option long_option[] = + { + { "memory", 2, NULL, 'm'}, + { "cq", 2, NULL, 'c'}, + { "resize_cq",2, NULL, 'r'}, + { "qp", 2, NULL, 'q'}, + { "verbose", 0, NULL, 'v'}, + { "help", 0, NULL, 'h'}, + { NULL, 0, NULL, 0 } /* Required at end of array */ + }; + + test_mr = FALSE; + test_cq = FALSE; + test_resize = FALSE; + test_qp = FALSE; + + /* parse cmd line arguments as input params */ + do + { + next_option = getopt_long( argc, argv, short_option, + long_option, NULL ); + + switch( next_option ) + { + case 'm': + test_mr = TRUE; + printf( "\tTest Memory Registration\n" ); + break; + + case 'c': + test_cq = TRUE; + printf( "\tTest CQ\n" ); + break; + + case 'r': + test_resize = TRUE; + printf( "\tTest CQ Resize\n" ); + break; + + case 'q': + test_qp = TRUE; + printf( "\tTest QP\n" ); + break; + + case 'v': + cmt_dbg_lvl = 0xFFFFFFFF; + printf( "\tverbose\n" ); + break; + + case 'h': + __show_usage(); + return FALSE; + + case -1: + break; + + default: /* something wrong */ + __show_usage(); + return FALSE; + } + } while( next_option != -1 ); + + return TRUE; +} + + +struct __mr_buf +{ + cl_list_item_t list_item; + ib_mr_handle_t h_mr; + char buf[8192 - sizeof(ib_mr_handle_t) - sizeof(cl_list_item_t)]; +}; + +static void +__test_mr( + ib_pd_handle_t h_pd ) +{ + ib_api_status_t status = IB_SUCCESS; + struct __mr_buf *p_mr; + int i = 0; + ib_mr_create_t mr_create; + cl_qlist_t mr_list; + net32_t lkey, rkey; + int64_t reg_time, dereg_time, tmp_time, cnt; + + printf( "MR testing [\n" ); + + cl_qlist_init( &mr_list ); + reg_time = 0; + dereg_time = 0; + cnt = 0; + + do + { + p_mr = cl_malloc( sizeof(struct __mr_buf) ); + if( !p_mr ) + { + i++; + printf( "Failed to allocate memory.\n" ); + continue; + } + + mr_create.vaddr = p_mr->buf; + mr_create.length = sizeof(p_mr->buf); + mr_create.access_ctrl = + IB_AC_LOCAL_WRITE | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE; + + tmp_time = cl_get_time_stamp(); + status = ib_reg_mem( h_pd, &mr_create, &lkey, &rkey, &p_mr->h_mr ); + if( status != IB_SUCCESS ) + { + i++; + printf( "ib_reg_mem returned %s\n", ib_get_err_str( status ) ); + cl_free( p_mr ); + continue; + } + reg_time += cl_get_time_stamp() - tmp_time; + cnt++; + + cl_qlist_insert_tail( &mr_list, &p_mr->list_item ); + + } while( status == IB_SUCCESS || i < 1000 ); + + while( cl_qlist_count( &mr_list ) ) + { + p_mr = PARENT_STRUCT( cl_qlist_remove_head( &mr_list ), + struct __mr_buf, list_item ); + + tmp_time = cl_get_time_stamp(); + status = ib_dereg_mr( p_mr->h_mr ); + if( status != IB_SUCCESS ) + printf( "ib_dereg_mr returned %s\n", ib_get_err_str( status ) ); + dereg_time += cl_get_time_stamp() - tmp_time; + + cl_free( p_mr ); + } + + printf( "reg time %f, dereg time %f\n", (double)reg_time/(double)cnt, + (double)dereg_time/(double)cnt ); + printf( "MR testing ]\n" ); +} + + +struct __cq +{ + cl_list_item_t list_item; + ib_cq_handle_t h_cq; +}; + +static void +__test_cq( + ib_ca_handle_t h_ca, + boolean_t resize ) +{ + ib_api_status_t status = IB_SUCCESS; + struct __cq *p_cq; + int i = 0, j; + ib_cq_create_t cq_create; + cl_qlist_t cq_list; + cl_waitobj_handle_t h_waitobj; + uint32_t size; + + printf( "CQ %stesting [\n", resize?"resize ":"" ); + + cl_qlist_init( &cq_list ); + + if( cl_waitobj_create( FALSE, &h_waitobj ) != CL_SUCCESS ) + { + printf( "Failed to allocate CQ wait object.\n" ); + return; + } + + do + { + p_cq = cl_malloc( sizeof(*p_cq) ); + if( !p_cq ) + { + i++; + printf( "Failed to allocate memory.\n" ); + continue; + } + + cq_create.h_wait_obj = h_waitobj; + cq_create.pfn_comp_cb = NULL; + if( resize ) + cq_create.size = 32; + else + cq_create.size = 4096; + + status = ib_create_cq( h_ca, &cq_create, NULL, NULL, &p_cq->h_cq ); + if( status != IB_SUCCESS ) + { + i++; + printf( "ib_create_cq returned %s\n", ib_get_err_str( status ) ); + cl_free( p_cq ); + continue; + } + + if( resize ) + { + size = 256; + j = 0; + + do + { + status = ib_modify_cq( p_cq->h_cq, &size ); + if( status == IB_SUCCESS ) + { + size += 256; + } + else + { + j++; + printf( "ib_modify_cq returned %s\n", + ib_get_err_str( status ) ); + } + + } while( status == IB_SUCCESS || j < 100 ); + } + + cl_qlist_insert_tail( &cq_list, &p_cq->list_item ); + + } while( status == IB_SUCCESS || i < 1000 ); + + while( cl_qlist_count( &cq_list ) ) + { + p_cq = PARENT_STRUCT( cl_qlist_remove_head( &cq_list ), + struct __cq, list_item ); + + status = ib_destroy_cq( p_cq->h_cq, NULL ); + if( status != IB_SUCCESS ) + printf( "ib_destroy_cq returned %s\n", ib_get_err_str( status ) ); + + cl_free( p_cq ); + } + + printf( "CQ %stesting ]\n", resize?"resize ":"" ); +} + +/********************************************************************** + **********************************************************************/ +int __cdecl +main( + int argc, + char* argv[] ) +{ + ib_api_status_t status; + ib_al_handle_t h_al; + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd; + size_t size; + net64_t *ca_guids; + + /* Set defaults. */ + if( !__parse_options( argc, argv ) ) + return 1; + + status = ib_open_al( &h_al ); + if( status != IB_SUCCESS ) + { + printf( "ib_open_al returned %s\n", ib_get_err_str( status ) ); + return 1; + } + + size = 0; + status = ib_get_ca_guids( h_al, NULL, &size ); + if( status != IB_INSUFFICIENT_MEMORY ) + { + printf( "ib_get_ca_guids for array size returned %s", + ib_get_err_str( status ) ); + goto done; + } + + if( size == 0 ) + { + printf( "No CAs installed.\n" ); + goto done; + } + + ca_guids = malloc( sizeof(net64_t) * size ); + if( !ca_guids ) + { + printf( "Failed to allocate CA GUID array.\n" ); + goto done; + } + + status = ib_get_ca_guids( h_al, ca_guids, &size ); + if( status != IB_SUCCESS ) + { + printf( "ib_get_ca_guids for CA guids returned %s", + ib_get_err_str( status ) ); + free( ca_guids ); + goto done; + } + + status = ib_open_ca( h_al, ca_guids[0], NULL, NULL, &h_ca ); + free( ca_guids ); + if( status != IB_SUCCESS ) + { + printf( "ib_open_ca returned %s", ib_get_err_str( status ) ); + goto done; + } + + status = ib_alloc_pd( h_ca, IB_PDT_NORMAL, NULL, &h_pd ); + if( status != IB_SUCCESS ) + { + printf( "ib_alloc_pd returned %s", ib_get_err_str( status ) ); + goto done; + } + + if( test_mr ) + __test_mr( h_pd ); + + if( test_cq ) + __test_cq( h_ca, FALSE ); + + if( test_resize ) + __test_cq( h_ca, TRUE ); + + //if( test_qp ) + // __test_qp( h_ca, h_pd ); + +done: + ib_close_al( h_al ); + + return 0; +} diff --git a/branches/Ndi/tests/limits/user/makefile b/branches/Ndi/tests/limits/user/makefile new file mode 100644 index 00000000..9c985f57 --- /dev/null +++ b/branches/Ndi/tests/limits/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/Ndi/tests/wsd/dirs b/branches/Ndi/tests/wsd/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/tests/wsd/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tests/wsd/user/contest/contest.c b/branches/Ndi/tests/wsd/user/contest/contest.c new file mode 100644 index 00000000..78012031 --- /dev/null +++ b/branches/Ndi/tests/wsd/user/contest/contest.c @@ -0,0 +1,200 @@ + +#define WIN32_LEAN_AND_MEAN +#include +#include +#include + +#include "contest.h" + +static int init_winsock(void) +{ + // initialization of the winsock DLL first + WORD wVersionRequested; + WSADATA wsaData; + int ret; + + wVersionRequested = MAKEWORD( 2, 2 ); + ret = WSAStartup( wVersionRequested, &wsaData ); + if ( ret != 0 ) { + printf("WSAStartup Error!\n"); + return 1; + } + + if ( LOBYTE( wsaData.wVersion ) != 2 || + HIBYTE( wsaData.wVersion ) != 2) { + WSACleanup( ); + printf("Bad winsock version!\n"); + } + + return 0; +} + + +static struct { + SOCKET s; +} conn[MAX_CONNS]; + + +int client(const char *host) +{ + struct sockaddr_in to_addr; + int i; + char message[100]; + int recd, sent, len; + + if (init_winsock()) { + return 1; + } + + to_addr.sin_family = AF_INET; + to_addr.sin_addr.s_addr = inet_addr(host); + to_addr.sin_port = htons(CONTEST_PORT); + + printf("Creating sockets\n"); + + for (i=0; iguid)); + + for(item_port = cl_qlist_head(&hca->ports_list); + item_port != cl_qlist_end(&hca->ports_list); + item_port = cl_qlist_next(item_port)) { + + struct ibsp_port *port = PARENT_STRUCT(item_port, struct ibsp_port, item); + + printf(" found port %d, guid %I64x\n", port->port_num, + CL_NTOH64(port->guid)); + + for(item_ip = cl_qlist_head(&port->ip_list); + item_ip != cl_qlist_end(&port->ip_list); + item_ip = cl_qlist_next(item_ip)) { + + struct ibsp_ip_addr *ip = + PARENT_STRUCT(item_ip, struct ibsp_ip_addr, item); + + printf(" %s\n", inet_ntoa(ip->ip_addr)); + + /* Remember that for the PR test */ + port_pr = port; + ip_addr_pr = ip->ip_addr; + } + } + } + + cl_spinlock_release(&g_ibsp.hca_mutex); + + if (port_pr == NULL) { + printf("BAD: port_pr is NULL\n"); + return 1; + } + + /* Display the list of all IP addresses */ + printf("List of IP addresses:\n"); + + cl_spinlock_acquire(&g_ibsp.ip_mutex); + + for(item_ip = cl_qlist_head(&g_ibsp.ip_list); + item_ip != cl_qlist_end(&g_ibsp.ip_list); item_ip = cl_qlist_next(item_ip)) { + + struct ibsp_ip_addr *ip = + PARENT_STRUCT(item_ip, struct ibsp_ip_addr, item_global); + + printf(" %s\n", inet_ntoa(ip->ip_addr)); + } + + cl_spinlock_release(&g_ibsp.ip_mutex); + + /* Query for the GUID of all local IP addresses */ + printf("Guid of local IP addresses:\n"); + for(item_ip = cl_qlist_head(&g_ibsp.ip_list); + item_ip != cl_qlist_end(&g_ibsp.ip_list); item_ip = cl_qlist_next(item_ip)) { + + struct ibsp_ip_addr *ip = + PARENT_STRUCT(item_ip, struct ibsp_ip_addr, item_global); + + ret = query_guid_address(port_pr, ip->ip_addr.S_un.S_addr, &remote_guid); + if (ret) { + printf("query_guid_address failed\n"); + return 1; + } + + printf("got GUID %I64x for IP %s\n", CL_NTOH64(remote_guid), + inet_ntoa(ip->ip_addr)); + } + + /* Query for the GUID of all IP addresses */ + printf("Guid of IP addresses:\n"); + for(i = 0; i < ROUTING_SIZE; i++) { + struct in_addr in; + ib_net32_t ip_addr; + + ip_addr = inet_addr(routing[i].ip_source); + in.S_un.S_addr = ip_addr; + + ret = query_guid_address(port_pr, ip_addr, &remote_guid); + if (ret) { + printf("query_guid_address failed for IP %s\n", inet_ntoa(in)); + } else { + in.S_un.S_addr = ip_addr; + printf("got GUID %I64x for IP %s\n", CL_NTOH64(remote_guid), inet_ntoa(in)); + } + + /* TODO: fill routing.guid */ + } + + /* Find the remote IP */ + remote_ip_addr.S_un.S_addr = INADDR_ANY; + for(i = 0; i < ROUTING_SIZE; i++) { + if (ip_addr_pr.S_un.S_addr == inet_addr(routing[i].ip_source)) { + remote_ip_addr.S_un.S_addr = inet_addr(routing[i].ip_dest); + break; + } + } + if (remote_ip_addr.S_un.S_addr == INADDR_ANY) { + /* Did not find source address. */ + printf("BAD- source IP %s not in routing\n", inet_ntoa(ip_addr_pr)); + return 1; + } + + printf("going to test between %s", inet_ntoa(ip_addr_pr)); + printf(" and %s\n", inet_ntoa(remote_ip_addr)); + + ret = query_guid_address(port_pr, remote_ip_addr.S_un.S_addr, &remote_guid); + if (ret) { + printf("query_guid_address failed for remote IP\n"); + return 1; + } + + printf("querying PR between %I64x and %I64x\n", + CL_NTOH64(port_pr->guid), CL_NTOH64(remote_guid)); + + ret = query_pr(port_pr, remote_guid, &path_rec); + if (ret) { + printf("query_pr failed\n"); + return 1; + } +#if 0 + /* Stressing query PR */ + for(i = 0; i < 1000000; i++) { + ret = query_pr(port_pr, remote_guid, &path_rec); + if (ret) { + printf("query_pr failed (at %d)\n", i); + return 1; + } + if (i % 1000 == 0) { + printf("."); + fflush(stdout); + } + } +#endif + +#if 0 + while(1) { + /* Display the list of all IP addresses */ + printf("List of IP addresses:\n"); + + cl_spinlock_acquire(&g_ibsp.ip_mutex); + + for(item_ip = cl_qlist_head(&g_ibsp.ip_list); + item_ip != cl_qlist_end(&g_ibsp.ip_list); item_ip = cl_qlist_next(item_ip)) { + + struct ibsp_ip_addr *ip = + PARENT_STRUCT(item_ip, struct ibsp_ip_addr, item_global); + + printf(" %s\n", inet_ntoa(ip->ip_addr)); + } + + cl_spinlock_release(&g_ibsp.ip_mutex); + + Sleep(100); + } +#endif + + return 0; +} + +int +main(void) +{ + int ret; + + memset(&g_ibsp, 0, sizeof(g_ibsp)); + if (init_globals()) { + CL_ERROR(IBSP_DBG_TEST, gdbg_lvl, ("init_globals failed\n")); + return 1; + } + + ret = ibsp_initialize(); + if (ret) { + printf("ib_initialize failed (%d)\n", ret); + return 1; + } + + do_test(); + + ib_release(); + + release_globals(); + + return 0; +} diff --git a/branches/Ndi/tests/wsd/user/test2/ibwrap.c b/branches/Ndi/tests/wsd/user/test2/ibwrap.c new file mode 100644 index 00000000..50a6cce4 --- /dev/null +++ b/branches/Ndi/tests/wsd/user/test2/ibwrap.c @@ -0,0 +1,599 @@ +#include "ibwrap.h" + +/* CQ completion handler */ +static void ib_cq_comp(void *cq_context) +{ + ib_api_status_t status; + ib_wc_t wclist; + ib_wc_t *free_wclist; + ib_wc_t *done_wclist; + BOOL need_rearm; + struct qp_pack *qp = cq_context; + + need_rearm = TRUE; + + again: + while(1) { + + wclist.p_next = NULL; + + free_wclist = &wclist; + + status = ib_poll_cq(qp->cq_handle, &free_wclist, &done_wclist); + + switch (status) { + case IB_NOT_FOUND: + goto done; + + case IB_SUCCESS: + printf("got a completion\n"); + break; + + default: + printf("ib_poll_cq failed badly (%d)\n", status); + need_rearm = FALSE; + goto done; + } + + if (done_wclist->status != IB_WCS_SUCCESS) { + printf("operation failed - status %d\n", done_wclist->status); + } + + need_rearm = TRUE; + + /* We have some completions. */ + while(done_wclist) { + + cl_atomic_dec(&qp->wq_posted); + + done_wclist = done_wclist->p_next; + }; + + if (free_wclist != NULL) { + /* No more completions */ + goto done; + } + }; + + done: + if (need_rearm) { + + need_rearm = FALSE; + + status = ib_rearm_cq(qp->cq_handle, FALSE); + if (status != IB_SUCCESS) { + printf("ib_poll_cq failed badly (%d)\n", status); + } + + goto again; + } +} + +/* Enable or disable completion. */ +int control_qp_completion(struct qp_pack *qp, int enable) +{ + ib_api_status_t status; + + if (enable) { + + status = ib_rearm_cq(qp->cq_handle, FALSE); + if (status) { + printf("ib_rearm_cq failed (%d)\n", status); + goto fail; + } + + } + + return 0; + + fail: + return -1; +} + +int create_qp(struct qp_pack *qp) +{ + ib_api_status_t status; + ib_cq_create_t cq_create; + ib_qp_create_t qp_create; + ib_qp_mod_t qp_mod; + ib_net64_t *guid_list = NULL; + size_t adapter_count; + size_t ca_attr_size = 0; + + qp->wq_posted = 0; + + /* Open AL */ + status = ib_open_al(&qp->al_handle); + if (status != IB_SUCCESS) { + printf("ib_open_al failed (%d)\n", status); + goto fail; + } + + /* Find a CA */ + adapter_count = 10; + guid_list = malloc(sizeof(ib_net64_t) * adapter_count); + if (guid_list == NULL) { + printf("can't get enough memory (%d, %d)\n", sizeof(ib_net64_t), adapter_count); + goto fail; + } + + status = ib_get_ca_guids(qp->al_handle, guid_list, &adapter_count); + if (status != IB_SUCCESS) { + printf("second ib_get_ca_guids failed (%d)\n", status); + goto fail; + } + + if (adapter_count < 1) { + printf("not enough CA (%d)\n", adapter_count); + goto fail; + } + + /* Open the hca */ + status = ib_open_ca(qp->al_handle, guid_list[0], NULL, /* event handler */ + NULL, /* context */ + &qp->hca_handle); + + if (status != IB_SUCCESS) { + printf("ib_open_ca failed (%d)\n", status); + goto fail; + } + + + /* Get the HCA attributes */ + ca_attr_size = 0; + query_ca_again: + status = ib_query_ca(qp->hca_handle, qp->ca_attr, &ca_attr_size); + + if (status == IB_INSUFFICIENT_MEMORY) { + + printf("ib_query_ca needs %d bytes\n", ca_attr_size); + + /* Allocate more memory */ + qp->ca_attr = malloc(ca_attr_size); + + if (qp->ca_attr) + goto query_ca_again; + else { + printf("HeapAlloc failed\n"); + goto fail; + } + } else if (status != IB_SUCCESS) { + printf("ib_query_ca failed (%d)\n", status); + goto fail; + } + + /* Find a port */ + if (qp->ca_attr->num_ports < 1) { + printf("not enough ports (%d)\n", qp->ca_attr->num_ports); + goto fail; + } + + qp->hca_port = &qp->ca_attr->p_port_attr[0]; + + /* Create a PD */ + status = ib_alloc_pd(qp->hca_handle, IB_PDT_NORMAL, qp, /* context */ + &qp->pd_handle); + if (status) { + printf("ib_alloc_pd failed (%d)\n", status); + goto fail; + } + + /* Create a CQ */ + cq_create.size = 50; + cq_create.pfn_comp_cb = ib_cq_comp; + cq_create.h_wait_obj = NULL; /* we use signaled completion instead */ + + status = ib_create_cq(qp->hca_handle, &cq_create, qp, /* context */ + NULL, /* async handler */ + &qp->cq_handle); + if (status) { + printf("ib_create_cq failed (%d)\n", status); + goto fail; + } + + status = ib_rearm_cq(qp->cq_handle, FALSE); + if (status) { + printf("ib_rearm_cq failed (%d)\n", status); + goto fail; + } + + /* Arm the CQ handler */ + if (control_qp_completion(qp, 1) != 0) { + printf("control_qp_completion failed\n"); + goto fail; + } + + /* Create a qp */ + cl_memclr(&qp_create, sizeof(ib_qp_create_t)); + qp_create.qp_type = IB_QPT_RELIABLE_CONN; + qp_create.h_rdd = NULL; + qp_create.sq_depth = 255; + qp_create.rq_depth = 255; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + qp_create.h_rq_cq = qp->cq_handle; + qp_create.h_sq_cq = qp->cq_handle; + qp_create.sq_signaled = TRUE; + + status = ib_create_qp(qp->pd_handle, &qp_create, qp, /* context */ + NULL, /* async handler */ + &qp->qp_handle); + if (status) { + printf("ib_create_qp failed (%d)\n", status); + goto fail; + } + + /* Modify QP to INIT state */ + memset(&qp_mod, 0, sizeof(qp_mod)); + + qp_mod.req_state = IB_QPS_INIT; + qp_mod.state.init.pkey_index = 0; + qp_mod.state.init.primary_port = qp->hca_port->port_num; + qp_mod.state.init.access_ctrl = IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE; + qp_mod.state.init.opts = IB_MOD_QP_PRIMARY_PORT | IB_MOD_QP_ACCESS_CTRL | IB_MOD_QP_PKEY; + + status = ib_modify_qp(qp->qp_handle, &qp_mod); + if (status) { + printf("ib_modify_qp failed (%d)\n", status); + goto fail; + } + + if (query_qp(qp)) { + printf("query failed\n"); + goto fail; + } + + return 0; + + fail: + return -1; +} + +int query_qp(struct qp_pack *qp) +{ + ib_api_status_t status; + + status = ib_query_qp(qp->qp_handle, &qp->qp_attr); + if (status) { + printf("ib_query_qp failed (%d)\n", status); + goto fail; + } + + return 0; + + fail: + return -1; +} + +int connect_qp(struct qp_pack *qp1, struct qp_pack *qp2) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + /* Update attributes */ + if (query_qp(qp1)) { + printf("query failed\n"); + goto fail; + } + + if (query_qp(qp2)) { + printf("query failed\n"); + goto fail; + } + + /* Modify QP to RTR state */ + memset(&qp_mod, 0, sizeof(qp_mod)); + + qp_mod.req_state = IB_QPS_RTR; + qp_mod.state.rtr.rq_psn = 0x1234; /* random */ + qp_mod.state.rtr.dest_qp = qp2->qp_attr.num; + qp_mod.state.rtr.primary_av.dlid = qp2->hca_port->lid; + qp_mod.state.rtr.primary_av.static_rate = 1; + qp_mod.state.rtr.primary_av.port_num = qp1->hca_port->port_num; + + qp_mod.state.rtr.primary_av.conn.local_ack_timeout = 0; + qp_mod.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_2048; + qp_mod.state.rtr.primary_av.conn.rnr_retry_cnt = 6; + qp_mod.state.rtr.primary_av.conn.seq_err_retry_cnt = 6; + + qp_mod.state.rtr.qkey = 0; + qp_mod.state.rtr.resp_res = 4; + qp_mod.state.rtr.rnr_nak_timeout = 6; + + qp_mod.state.rtr.opts = IB_MOD_QP_QKEY | IB_MOD_QP_PRIMARY_AV | IB_MOD_QP_RNR_NAK_TIMEOUT | IB_MOD_QP_RESP_RES; + + status = ib_modify_qp(qp1->qp_handle, &qp_mod); + if (status) { + printf("ib_modify_qp 1 failed (%d)\n", status); + goto fail; + } + + /* Modify QP to RTS state */ + memset(&qp_mod, 0, sizeof(qp_mod)); + + qp_mod.req_state = IB_QPS_RTS; + qp_mod.state.rts.sq_psn = 0x1234; + qp_mod.state.rts.retry_cnt = 7; + qp_mod.state.rts.rnr_retry_cnt = 7; + + qp_mod.state.rts.opts = IB_MOD_QP_RETRY_CNT | IB_MOD_QP_RNR_RETRY_CNT; + + status = ib_modify_qp(qp1->qp_handle, &qp_mod); + if (status) { + printf("ib_modify_qp 2 failed (%d)\n", status); + goto fail; + } + + return 0; + + fail: + return -1; +} + +#if 0 +/* Move the QP to the error state. Wait for the CQ to flush. */ +int move_qp_to_error(struct qp_pack *qp) +{ + ib_api_status_t status; + VAPI_qp_attr_t qp_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + + /* Modify QP to ERROR state */ + memset(&qp_attr, 0, sizeof(qp_attr)); + + qp_attr.qp_state = VAPI_ERR; + + qp_attr_mask = QP_ATTR_QP_STATE; + + vret = VAPI_modify_qp(qp->hca_handle, qp->qp_handle, &qp_attr, &qp_attr_mask, &qp_cap); + + if (vret != VAPI_OK) { + VAPI_ERR_LOG("VAPI_modify_qp(ERROR) failed ", vret); + goto fail; + } + + while(qp->wq_posted != 0) { + usleep(10000); + } + + return 0; + + fail: + return -1; +} +#endif + +#if 0 +int move_qp_to_reset(struct qp_pack *qp) +{ + ib_api_status_t status; + VAPI_qp_attr_t qp_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + + /* Modify QP to RESET state */ + memset(&qp_attr, 0, sizeof(qp_attr)); + + qp_attr.qp_state = VAPI_RESET; + + qp_attr_mask = QP_ATTR_QP_STATE; + + vret = VAPI_modify_qp(qp->hca_handle, qp->qp_handle, &qp_attr, &qp_attr_mask, &qp_cap); + + if (vret != VAPI_OK) { + VAPI_ERR_LOG("VAPI_modify_qp(ERROR) failed ", vret); + goto fail; + } + + return 0; + + fail: + return -1; +} + +int move_qp_to_drain(struct qp_pack *qp) +{ + ib_api_status_t status; + VAPI_qp_attr_t qp_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + + /* Modify QP to SQD state */ + memset(&qp_attr, 0, sizeof(qp_attr)); + + qp_attr.qp_state = VAPI_SQD; + qp_attr_mask = QP_ATTR_QP_STATE; + + vret = VAPI_modify_qp(qp->hca_handle, qp->qp_handle, &qp_attr, &qp_attr_mask, &qp_cap); + + if (vret != VAPI_OK) { + VAPI_ERR_LOG("VAPI_modify_qp(SQD) failed ", vret); + goto fail; + } + + return 0; + + fail: + return -1; +} +#endif + +int destroy_qp(struct qp_pack *qp) +{ + ib_api_status_t status; + + if (qp->qp_handle) { + status = ib_destroy_qp(qp->qp_handle, NULL); + if (status) { + printf("ib_destroy_qp failed (%d)\n", status); + } + qp->qp_handle = NULL; + } + + if (qp->cq_handle) { + status = ib_destroy_cq(qp->cq_handle, NULL); + if (status) { + printf("ib_destroy_cq failed (%d)\n", status); + } + qp->cq_handle = NULL; + } + + if (qp->pd_handle) { + status = ib_dealloc_pd(qp->pd_handle, NULL); + if (status) { + printf("ib_dealloc_pd failed (%d)\n", status); + } + qp->pd_handle = NULL; + } + + if (qp->hca_handle) { + status = ib_close_ca(qp->hca_handle, NULL); + if (status != IB_SUCCESS) { + printf("ib_close_ca failed (%d)\n", status); + } + qp->hca_handle = NULL; + } + + if (qp->al_handle) { + status = ib_close_al(qp->al_handle); + if (status != IB_SUCCESS) { + printf("ib_close_al failed (%d)\n", status); + } + qp->al_handle = NULL; + } + + return 0; +} + +/* Create and register a memory region which will be used for rdma transfer. */ +int create_mr(struct qp_pack *qp, struct mr_pack *mr, ib_access_t acl) { + + ib_api_status_t status; + ib_mr_create_t mr_create; + + mr->buf = malloc(mr->size); + if (mr->buf == NULL) { + printf("malloc failed\n"); + goto fail; + } + + mr_create.access_ctrl = acl; + mr_create.length = mr->size; + mr_create.vaddr = mr->buf; + + status = ib_reg_mem(qp->pd_handle, &mr_create, + &mr->lkey, &mr->rkey, &mr->mr_handle); + + if (status) { + printf("ib_reg_mem failed (%d)\n", status); + goto fail; + } + + return 0; + + fail: + return -1; +} + +/* Unregister and free a local memory region. */ +int delete_mr(struct mr_pack *mr) { + ib_api_status_t status; + + status = ib_dereg_mr(mr->mr_handle); + if (status) { + printf("ib_dereg_mr failed (%d)\n", status); + goto fail; + } + + free(mr->buf); + mr->buf = NULL; + + return 0; + + fail: + return -1; +} + +#if 0 +int post_receive_buffer(struct qp_pack *qp, struct mr_pack *mr) +{ + VAPI_rr_desc_t rwr; + VAPI_sg_lst_entry_t dataseg[1]; + ib_api_status_t status; + + /* Post receive buffer on qp1 */ + memset(&rwr, 0, sizeof(rwr)); + rwr.id = (VAPI_wr_id_t)(MT_virt_addr_t)qp; + rwr.opcode = VAPI_RECEIVE; + rwr.comp_type = VAPI_SIGNALED; + + rwr.sg_lst_len = 1; + rwr.sg_lst_p = dataseg; + + dataseg[0].addr = (unsigned long)mr->buf; + dataseg[0].len = mr->size; + dataseg[0].lkey = mr->rep_mrw.l_key; + + /* Post the work request */ + vret = VAPI_post_rr(qp->hca_handle, qp->qp_handle, &rwr); + + cl_atomic_inc(&qp->wq_posted); + + if (vret != VAPI_OK) { + VAPI_ERR_LOG("VAPI_post_rr failed ", vret); + atomic_dec(&qp->wq_posted); + goto fail; + } + + return 0; + + fail: + return -1; +} +#endif + +int post_send_buffer(struct qp_pack *qp, struct mr_pack *local_mr, + struct mr_pack *remote_mr, ib_wr_type_t opcode, int offset, size_t length) +{ + ib_api_status_t status; + ib_local_ds_t dataseg; + ib_send_wr_t swr; + + /* Post send buffer on qp2. */ + memset(&swr, 0, sizeof(swr)); + swr.wr_id = (uint64_t) (uintptr_t)qp; + swr.wr_type = opcode; + + if (length == 0) { + swr.num_ds = 0; + swr.ds_array = NULL; + } else { + swr.num_ds = 1; + swr.ds_array = &dataseg; + + dataseg.length = (uint32_t)length; + dataseg.vaddr = (uint64_t) (uintptr_t) local_mr->buf + offset; + dataseg.lkey = local_mr->lkey; + } + + if (opcode == WR_RDMA_WRITE) { + swr.remote_ops.vaddr = (uint64_t) (uintptr_t) remote_mr->buf; + swr.remote_ops.rkey = remote_mr->rkey; + } + + cl_atomic_inc(&qp->wq_posted); + + status = ib_post_send(qp->qp_handle, &swr, NULL); + + if (status != IB_SUCCESS) { + cl_atomic_dec(&qp->wq_posted); + printf("ib_post_send failed (%d)\n", status); + goto fail; + } + + return 0; + + fail: + return -1; +} diff --git a/branches/Ndi/tests/wsd/user/test2/ibwrap.h b/branches/Ndi/tests/wsd/user/test2/ibwrap.h new file mode 100644 index 00000000..e46e06fd --- /dev/null +++ b/branches/Ndi/tests/wsd/user/test2/ibwrap.h @@ -0,0 +1,41 @@ +#include + +struct qp_pack { + ib_al_handle_t al_handle; + ib_ca_handle_t hca_handle; + ib_pd_handle_t pd_handle; + ib_qp_handle_t qp_handle; + ib_cq_handle_t cq_handle; + + atomic32_t wq_posted; + + ib_ca_attr_t *ca_attr; + ib_port_attr_t *hca_port; /* Port to use */ + ib_qp_attr_t qp_attr; +}; + +struct mr_pack { + void *buf; + size_t size; + + net32_t lkey; + net32_t rkey; + ib_mr_handle_t mr_handle; +}; + + +extern int create_mr(struct qp_pack *qp, struct mr_pack *mr, ib_access_t acl); +extern int delete_mr(struct mr_pack *mr); +extern int create_qp(struct qp_pack *qp); +extern int connect_qp(struct qp_pack *qp1, struct qp_pack *qp2); +extern int post_send_buffer(struct qp_pack *qp, struct mr_pack *local_mr, struct mr_pack *remote_mr, ib_wr_type_t opcode, int offset, size_t length); +extern int destroy_qp(struct qp_pack *qp); +extern int query_qp(struct qp_pack *qp); + +#if 0 +extern int post_receive_buffer(struct qp_pack *qp, struct mr_pack *mr); +extern int move_qp_to_error(struct qp_pack *qp); +extern int move_qp_to_reset(struct qp_pack *qp); +extern int move_qp_to_drain(struct qp_pack *qp); +extern int control_qp_completion(struct qp_pack *qp, int enable); +#endif diff --git a/branches/Ndi/tests/wsd/user/test2/test2.c b/branches/Ndi/tests/wsd/user/test2/test2.c new file mode 100644 index 00000000..5a1348b0 --- /dev/null +++ b/branches/Ndi/tests/wsd/user/test2/test2.c @@ -0,0 +1,64 @@ +#include + +#include "ibwrap.h" + +int main(void) +{ + struct qp_pack qp1, qp2; + struct mr_pack mr1, mr2, mr3; + + while(1) { + /* Create both QP and move them into the RTR state */ + if (create_qp(&qp1) || create_qp(&qp2)) { + printf("Failed to create a QP\n"); + goto done; + } + + /* Connect both QP */ + if (connect_qp(&qp1, &qp2)) { + printf("Failed to connect QP1\n"); + goto done; + } + if (connect_qp(&qp2, &qp1)) { + printf("Failed to connect QP2\n"); + goto done; + } + + /* Create RDMA buffers */ + mr1.size = mr2.size = mr3.size = 500000; + if (create_mr(&qp1, &mr1, IB_AC_LOCAL_WRITE | IB_AC_RDMA_WRITE | IB_AC_RDMA_READ) || + create_mr(&qp1, &mr2, IB_AC_LOCAL_WRITE | IB_AC_RDMA_WRITE | IB_AC_RDMA_READ) || + create_mr(&qp2, &mr3, IB_AC_LOCAL_WRITE | IB_AC_RDMA_WRITE | IB_AC_RDMA_READ)) { + printf("Cannot create RDMA buffers\n"); + goto done; + } + + if (post_send_buffer(&qp1, &mr1, &mr3, WR_RDMA_WRITE, 1004, mr1.size-1004)) { + printf("post_send_buffer failed1\n"); + goto done; + } + + if (post_send_buffer(&qp1, &mr2, &mr3, WR_RDMA_WRITE, 1004, mr1.size-1004)) { + printf("post_send_buffer failed1\n"); + goto done; + } + + /* Wait for both wr to complete. */ + while(qp1.wq_posted != 0) { + Sleep(100); + } + + if (delete_mr(&mr1) || delete_mr(&mr2) || delete_mr(&mr3)) { + printf("cannot destroy mr\n"); + } + + if (destroy_qp(&qp1) || destroy_qp(&qp2)) { + printf("cannot destroy QP\n"); + } + } + + printf("End of test\n"); + + done: + return 0; +} diff --git a/branches/Ndi/tests/wsd/user/test3/ibwrap.c b/branches/Ndi/tests/wsd/user/test3/ibwrap.c new file mode 100644 index 00000000..dd3e90d3 --- /dev/null +++ b/branches/Ndi/tests/wsd/user/test3/ibwrap.c @@ -0,0 +1,610 @@ +#include "ibwrap.h" + +/* CQ completion handler */ +static void ib_cq_comp(void *cq_context) +{ + ib_api_status_t status; + ib_wc_t wclist; + ib_wc_t *free_wclist; + ib_wc_t *done_wclist; + BOOL need_rearm; + struct qp_pack *qp = cq_context; + + need_rearm = TRUE; + + again: + while(1) { + + wclist.p_next = NULL; + + free_wclist = &wclist; + + status = ib_poll_cq(qp->cq_handle, &free_wclist, &done_wclist); + + switch (status) { + case IB_NOT_FOUND: + goto done; + + case IB_SUCCESS: + //printf("got a completion\n"); + break; + + default: + printf("ib_poll_cq failed badly (%d)\n", status); + need_rearm = FALSE; + goto done; + } + +#if 0 + if (done_wclist->status != IB_WCS_SUCCESS) { + printf("operation failed - status %d\n", done_wclist->status); + } +#endif + + need_rearm = TRUE; + + /* We have some completions. */ + while(done_wclist) { + + cl_atomic_dec(&qp->wq_posted); + + if (qp->comp) qp->comp(qp, done_wclist); + + done_wclist = done_wclist->p_next; + }; + + if (free_wclist != NULL) { + /* No more completions */ + goto done; + } + }; + + done: + if (need_rearm) { + + need_rearm = FALSE; + + status = ib_rearm_cq(qp->cq_handle, FALSE); + if (status != IB_SUCCESS) { + printf("ib_poll_cq failed badly (%d)\n", status); + } + + goto again; + } +} + +/* Enable or disable completion. */ +int control_qp_completion(struct qp_pack *qp, int enable) +{ + ib_api_status_t status; + + if (enable) { + + status = ib_rearm_cq(qp->cq_handle, FALSE); + if (status) { + printf("ib_rearm_cq failed (%d)\n", status); + goto fail; + } + + } + + return 0; + + fail: + return -1; +} + +int create_qp(struct qp_pack *qp) +{ + ib_api_status_t status; + ib_cq_create_t cq_create; + ib_qp_create_t qp_create; + ib_qp_mod_t qp_mod; + ib_net64_t *guid_list = NULL; + size_t adapter_count; + size_t ca_attr_size = 0; + + qp->wq_posted = 0; + + /* Open AL */ + status = ib_open_al(&qp->al_handle); + if (status != IB_SUCCESS) { + printf("ib_open_al failed (%d)\n", status); + goto fail; + } + + /* Find a CA */ + adapter_count = 10; + guid_list = malloc(sizeof(ib_net64_t) * adapter_count); + if (guid_list == NULL) { + printf("can't get enough memory (%d, %d)\n", sizeof(ib_net64_t), adapter_count); + goto fail; + } + + status = ib_get_ca_guids(qp->al_handle, guid_list, &adapter_count); + if (status != IB_SUCCESS) { + printf("second ib_get_ca_guids failed (%d)\n", status); + goto fail; + } + + if (adapter_count < 1) { + printf("not enough CA (%d)\n", adapter_count); + goto fail; + } + + /* Open the hca */ + status = ib_open_ca(qp->al_handle, guid_list[0], NULL, /* event handler */ + NULL, /* context */ + &qp->hca_handle); + + if (status != IB_SUCCESS) { + printf("ib_open_ca failed (%d)\n", status); + goto fail; + } + + + /* Get the HCA attributes */ + ca_attr_size = 0; + query_ca_again: + status = ib_query_ca(qp->hca_handle, qp->ca_attr, &ca_attr_size); + + if (status == IB_INSUFFICIENT_MEMORY) { + + printf("ib_query_ca needs %d bytes\n", ca_attr_size); + + /* Allocate more memory */ + qp->ca_attr = malloc(ca_attr_size); + + if (qp->ca_attr) + goto query_ca_again; + else { + printf("HeapAlloc failed\n"); + goto fail; + } + } else if (status != IB_SUCCESS) { + printf("ib_query_ca failed (%d)\n", status); + goto fail; + } + + /* Find a port */ + if (qp->ca_attr->num_ports < 1) { + printf("not enough ports (%d)\n", qp->ca_attr->num_ports); + goto fail; + } + + qp->hca_port = &qp->ca_attr->p_port_attr[0]; + + /* Create a PD */ + status = ib_alloc_pd(qp->hca_handle, IB_PDT_NORMAL, qp, /* context */ + &qp->pd_handle); + if (status) { + printf("ib_alloc_pd failed (%d)\n", status); + goto fail; + } + + /* Create a CQ */ + cq_create.size = 50; + cq_create.pfn_comp_cb = ib_cq_comp; + cq_create.h_wait_obj = NULL; /* we use signaled completion instead */ + + status = ib_create_cq(qp->hca_handle, &cq_create, qp, /* context */ + NULL, /* async handler */ + &qp->cq_handle); + if (status) { + printf("ib_create_cq failed (%d)\n", status); + goto fail; + } + + status = ib_rearm_cq(qp->cq_handle, FALSE); + if (status) { + printf("ib_rearm_cq failed (%d)\n", status); + goto fail; + } + + /* Arm the CQ handler */ + if (control_qp_completion(qp, 1) != 0) { + printf("control_qp_completion failed\n"); + goto fail; + } + + /* Create a qp */ + cl_memclr(&qp_create, sizeof(ib_qp_create_t)); + qp_create.qp_type = IB_QPT_RELIABLE_CONN; + qp_create.h_rdd = NULL; + qp_create.sq_depth = 255; + qp_create.rq_depth = 255; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + qp_create.h_rq_cq = qp->cq_handle; + qp_create.h_sq_cq = qp->cq_handle; + qp_create.sq_signaled = TRUE; + + status = ib_create_qp(qp->pd_handle, &qp_create, qp, /* context */ + NULL, /* async handler */ + &qp->qp_handle); + if (status) { + printf("ib_create_qp failed (%d)\n", status); + goto fail; + } + + /* Modify QP to INIT state */ + memset(&qp_mod, 0, sizeof(qp_mod)); + + qp_mod.req_state = IB_QPS_INIT; + qp_mod.state.init.pkey_index = 0; + qp_mod.state.init.primary_port = qp->hca_port->port_num; + qp_mod.state.init.access_ctrl = IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE; + qp_mod.state.init.opts = IB_MOD_QP_PRIMARY_PORT | IB_MOD_QP_ACCESS_CTRL | IB_MOD_QP_PKEY; + + status = ib_modify_qp(qp->qp_handle, &qp_mod); + if (status) { + printf("ib_modify_qp failed (%d)\n", status); + goto fail; + } + + if (query_qp(qp)) { + printf("query failed\n"); + goto fail; + } + + return 0; + + fail: + return -1; +} + +int query_qp(struct qp_pack *qp) +{ + ib_api_status_t status; + + status = ib_query_qp(qp->qp_handle, &qp->qp_attr); + if (status) { + printf("ib_query_qp failed (%d)\n", status); + goto fail; + } + + return 0; + + fail: + return -1; +} + +int connect_qp(struct qp_pack *qp1, struct qp_pack *qp2) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + /* Update attributes */ + if (query_qp(qp1)) { + printf("query failed\n"); + goto fail; + } + + if (query_qp(qp2)) { + printf("query failed\n"); + goto fail; + } + + /* Modify QP to RTR state */ + memset(&qp_mod, 0, sizeof(qp_mod)); + + qp_mod.req_state = IB_QPS_RTR; + qp_mod.state.rtr.rq_psn = 0x1234; /* random */ + qp_mod.state.rtr.dest_qp = qp2->qp_attr.num; + qp_mod.state.rtr.primary_av.dlid = qp2->hca_port->lid; + qp_mod.state.rtr.primary_av.static_rate = 1; + qp_mod.state.rtr.primary_av.port_num = qp1->hca_port->port_num; + + qp_mod.state.rtr.primary_av.conn.local_ack_timeout = 0; + qp_mod.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_2048; + qp_mod.state.rtr.primary_av.conn.rnr_retry_cnt = 6; + qp_mod.state.rtr.primary_av.conn.seq_err_retry_cnt = 6; + + qp_mod.state.rtr.qkey = 0; + qp_mod.state.rtr.resp_res = 4; + qp_mod.state.rtr.rnr_nak_timeout = 6; + + qp_mod.state.rtr.opts = IB_MOD_QP_QKEY | IB_MOD_QP_PRIMARY_AV | IB_MOD_QP_RNR_NAK_TIMEOUT | IB_MOD_QP_RESP_RES; + + status = ib_modify_qp(qp1->qp_handle, &qp_mod); + if (status) { + printf("ib_modify_qp 1 failed (%d)\n", status); + goto fail; + } + + /* Modify QP to RTS state */ + memset(&qp_mod, 0, sizeof(qp_mod)); + + qp_mod.req_state = IB_QPS_RTS; + qp_mod.state.rts.sq_psn = 0x1234; + qp_mod.state.rts.retry_cnt = 7; + qp_mod.state.rts.rnr_retry_cnt = 7; + + qp_mod.state.rts.opts = IB_MOD_QP_RETRY_CNT | IB_MOD_QP_RNR_RETRY_CNT; + + status = ib_modify_qp(qp1->qp_handle, &qp_mod); + if (status) { + printf("ib_modify_qp 2 failed (%d)\n", status); + goto fail; + } + + return 0; + + fail: + return -1; +} + +#if 0 +/* Move the QP to the error state. Wait for the CQ to flush. */ +int move_qp_to_error(struct qp_pack *qp) +{ + ib_api_status_t status; + VAPI_qp_attr_t qp_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + + /* Modify QP to ERROR state */ + memset(&qp_attr, 0, sizeof(qp_attr)); + + qp_attr.qp_state = VAPI_ERR; + + qp_attr_mask = QP_ATTR_QP_STATE; + + vret = VAPI_modify_qp(qp->hca_handle, qp->qp_handle, &qp_attr, &qp_attr_mask, &qp_cap); + + if (vret != VAPI_OK) { + VAPI_ERR_LOG("VAPI_modify_qp(ERROR) failed ", vret); + goto fail; + } + + while(qp->wq_posted != 0) { + usleep(10000); + } + + return 0; + + fail: + return -1; +} +#endif + +#if 0 +int move_qp_to_reset(struct qp_pack *qp) +{ + ib_api_status_t status; + VAPI_qp_attr_t qp_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + + /* Modify QP to RESET state */ + memset(&qp_attr, 0, sizeof(qp_attr)); + + qp_attr.qp_state = VAPI_RESET; + + qp_attr_mask = QP_ATTR_QP_STATE; + + vret = VAPI_modify_qp(qp->hca_handle, qp->qp_handle, &qp_attr, &qp_attr_mask, &qp_cap); + + if (vret != VAPI_OK) { + VAPI_ERR_LOG("VAPI_modify_qp(ERROR) failed ", vret); + goto fail; + } + + return 0; + + fail: + return -1; +} + +int move_qp_to_drain(struct qp_pack *qp) +{ + ib_api_status_t status; + VAPI_qp_attr_t qp_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + + /* Modify QP to SQD state */ + memset(&qp_attr, 0, sizeof(qp_attr)); + + qp_attr.qp_state = VAPI_SQD; + qp_attr_mask = QP_ATTR_QP_STATE; + + vret = VAPI_modify_qp(qp->hca_handle, qp->qp_handle, &qp_attr, &qp_attr_mask, &qp_cap); + + if (vret != VAPI_OK) { + VAPI_ERR_LOG("VAPI_modify_qp(SQD) failed ", vret); + goto fail; + } + + return 0; + + fail: + return -1; +} +#endif + +int destroy_qp(struct qp_pack *qp) +{ + ib_api_status_t status; + + if (qp->qp_handle) { + status = ib_destroy_qp(qp->qp_handle, NULL); + if (status) { + printf("ib_destroy_qp failed (%d)\n", status); + } + qp->qp_handle = NULL; + } + + if (qp->cq_handle) { + status = ib_destroy_cq(qp->cq_handle, NULL); + if (status) { + printf("ib_destroy_cq failed (%d)\n", status); + } + qp->cq_handle = NULL; + } + + if (qp->pd_handle) { + status = ib_dealloc_pd(qp->pd_handle, NULL); + if (status) { + printf("ib_dealloc_pd failed (%d)\n", status); + } + qp->pd_handle = NULL; + } + + if (qp->hca_handle) { + status = ib_close_ca(qp->hca_handle, NULL); + if (status != IB_SUCCESS) { + printf("ib_close_ca failed (%d)\n", status); + } + qp->hca_handle = NULL; + } + + if (qp->al_handle) { + status = ib_close_al(qp->al_handle); + if (status != IB_SUCCESS) { + printf("ib_close_al failed (%d)\n", status); + } + qp->al_handle = NULL; + } + + return 0; +} + +/* Create and register a memory region which will be used for rdma transfer. */ +int create_mr(struct qp_pack *qp, struct mr_pack *mr, size_t len) { + + ib_api_status_t status; + ib_mr_create_t mr_create; + + mr->size = len; + + mr->buf = malloc(mr->size); + if (mr->buf == NULL) { + printf("malloc failed\n"); + goto fail; + } + + mr_create.access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_RDMA_WRITE | IB_AC_RDMA_READ; + mr_create.length = mr->size; + mr_create.vaddr = mr->buf; + + status = ib_reg_mem(qp->pd_handle, &mr_create, + &mr->lkey, &mr->rkey, &mr->mr_handle); + + if (status) { + printf("ib_reg_mem failed (%d)\n", status); + goto fail; + } + + return 0; + + fail: + return -1; +} + +/* Unregister and free a local memory region. */ +int delete_mr(struct mr_pack *mr) { + ib_api_status_t status; + + status = ib_dereg_mr(mr->mr_handle); + if (status) { + printf("ib_dereg_mr failed (%d)\n", status); + goto fail; + } + + free(mr->buf); + memset(mr, 0, sizeof(struct mr_pack)); + + return 0; + + fail: + return -1; +} + +int post_receive_buffer(struct qp_pack *qp, struct mr_pack *mr, + int offset, size_t length) +{ + ib_api_status_t status; + ib_local_ds_t dataseg; + ib_recv_wr_t rwr; + + /* Post receive buffer on qp1 */ + memset(&rwr, 0, sizeof(rwr)); + + rwr.wr_id = (uint64_t) (uintptr_t)qp; + + rwr.num_ds = 1; + rwr.ds_array = &dataseg; + + dataseg.length = (uint32_t)length; + dataseg.vaddr = (uint64_t) (uintptr_t) mr->buf + offset; + dataseg.lkey = mr->lkey; + + cl_atomic_inc(&qp->wq_posted); + + status = ib_post_recv(qp->qp_handle, &rwr, NULL); + + if (status != IB_SUCCESS) { + cl_atomic_dec(&qp->wq_posted); + printf("ib_post_send failed (%d)\n", status); + goto fail; + } + + return 0; + + fail: + return -1; +} + +int post_send_buffer(struct qp_pack *qp, struct mr_pack *local_mr, + struct mr_pack *remote_mr, ib_wr_type_t opcode, int offset, size_t length) +{ + ib_api_status_t status; + ib_local_ds_t dataseg; + ib_send_wr_t swr; + + /* Post send buffer on qp2. */ + memset(&swr, 0, sizeof(swr)); + swr.wr_id = (uint64_t) (uintptr_t)qp; + swr.wr_type = opcode; + + if (length == 0) { + swr.num_ds = 0; + swr.ds_array = NULL; + } else { + swr.num_ds = 1; + swr.ds_array = &dataseg; + + dataseg.length = (uint32_t)length; + dataseg.vaddr = (uint64_t) (uintptr_t) local_mr->buf + offset; + dataseg.lkey = local_mr->lkey; + } + + if (opcode == WR_RDMA_WRITE || opcode == WR_RDMA_READ) { + swr.remote_ops.vaddr = (uint64_t) (uintptr_t) remote_mr->buf; + swr.remote_ops.rkey = remote_mr->rkey; + + printf("RDMA %d %I64x %x - %I64x %x\n", + swr.ds_array[0].length, + swr.ds_array[0].vaddr, + swr.ds_array[0].lkey, + swr.remote_ops.vaddr, + swr.remote_ops.rkey); + printf("REM RDMA %p %x\n", remote_mr->buf, remote_mr->rkey); + } + + cl_atomic_inc(&qp->wq_posted); + + status = ib_post_send(qp->qp_handle, &swr, NULL); + + if (status != IB_SUCCESS) { + cl_atomic_dec(&qp->wq_posted); + printf("ib_post_send failed (%d)\n", status); + goto fail; + } + + return 0; + + fail: + return -1; +} diff --git a/branches/Ndi/tests/wsd/user/test3/ibwrap.h b/branches/Ndi/tests/wsd/user/test3/ibwrap.h new file mode 100644 index 00000000..697f23ec --- /dev/null +++ b/branches/Ndi/tests/wsd/user/test3/ibwrap.h @@ -0,0 +1,44 @@ +#include + +struct qp_pack { + ib_al_handle_t al_handle; + ib_ca_handle_t hca_handle; + ib_pd_handle_t pd_handle; + ib_qp_handle_t qp_handle; + ib_cq_handle_t cq_handle; + + atomic32_t wq_posted; + + ib_ca_attr_t *ca_attr; + ib_port_attr_t *hca_port; /* Port to use */ + ib_qp_attr_t qp_attr; + + void (*comp)(struct qp_pack *, ib_wc_t *wc); +}; + +struct mr_pack { + void *buf; + size_t size; + + net32_t lkey; + net32_t rkey; + ib_mr_handle_t mr_handle; +}; + + +extern int create_mr(struct qp_pack *qp, struct mr_pack *mr, ib_access_t acl); +extern int delete_mr(struct mr_pack *mr); +extern int create_qp(struct qp_pack *qp); +extern int connect_qp(struct qp_pack *qp1, struct qp_pack *qp2); +extern int post_send_buffer(struct qp_pack *qp, struct mr_pack *local_mr, struct mr_pack *remote_mr, ib_wr_type_t opcode, int offset, size_t length); +extern int destroy_qp(struct qp_pack *qp); +extern int query_qp(struct qp_pack *qp); +int post_receive_buffer(struct qp_pack *qp, struct mr_pack *mr, + int offset, size_t length); + +#if 0 +extern int move_qp_to_error(struct qp_pack *qp); +extern int move_qp_to_reset(struct qp_pack *qp); +extern int move_qp_to_drain(struct qp_pack *qp); +extern int control_qp_completion(struct qp_pack *qp, int enable); +#endif diff --git a/branches/Ndi/tests/wsd/user/test3/test3.c b/branches/Ndi/tests/wsd/user/test3/test3.c new file mode 100644 index 00000000..a202da82 --- /dev/null +++ b/branches/Ndi/tests/wsd/user/test3/test3.c @@ -0,0 +1,126 @@ +#include + +#include "ibwrap.h" + +#define SR_BUFFER_SIZE 1576 +#define RDMA_BUFFER 500000 + +struct qp_pack qp1, qp2; +struct mr_pack mr1_send, mr1_recv, mr2_send, mr2_recv, mr1_rdma, mr2_rdma; + +void qp1_comp(struct qp_pack *qp, ib_wc_t *wc) +{ + printf("QP1 - completion error %d for op %d\n", wc->status, wc->wc_type); + + if (wc->status) goto done; + + if (wc->wc_type == IB_WC_RECV) { + + if (create_mr(qp, &mr1_rdma, RDMA_BUFFER)) { + printf("Cannot create RDMA buffer\n"); + goto done; + } + + if (post_send_buffer(qp, &mr1_rdma, &mr2_rdma, WR_RDMA_READ, 0, RDMA_BUFFER)) { + printf("post_send_buffer failed1\n"); + goto done; + } + + if (post_send_buffer(qp, &mr1_send, NULL, WR_SEND, 0, 40)) { + printf("post_send_buffer failed1\n"); + goto done; + } + } + +done: + ; +} + +void qp2_comp(struct qp_pack *qp, ib_wc_t *wc) +{ + printf("QP2 - completion error %d for op %d\n", wc->status, wc->wc_type); + + if (wc->status) goto done; + + if (wc->wc_type == IB_WC_RECV) { + delete_mr(&mr2_rdma); + } + +done: + ; +} + +int main(void) +{ + int i; + + /* Create both QP and move them into the RTR state */ + if (create_qp(&qp1) || create_qp(&qp2)) { + printf("Failed to create a QP\n"); + goto done; + } + qp1.comp = qp1_comp; + qp2.comp = qp2_comp; + + /* Connect both QP */ + if (connect_qp(&qp1, &qp2)) { + printf("Failed to connect QP1\n"); + goto done; + } + if (connect_qp(&qp2, &qp1)) { + printf("Failed to connect QP2\n"); + goto done; + } + + /* Create RDMA buffers */ + mr1_send.size = mr1_recv.size = mr2_send.size = mr2_recv.size = SR_BUFFER_SIZE; + if (create_mr(&qp1, &mr1_send, 5*SR_BUFFER_SIZE) || + create_mr(&qp1, &mr1_recv, 6*SR_BUFFER_SIZE) || + create_mr(&qp2, &mr2_send, 5*SR_BUFFER_SIZE) || + create_mr(&qp2, &mr2_recv, 6*SR_BUFFER_SIZE)) { + printf("Cannot create RDMA buffers\n"); + goto done; + } + + /* Post receives */ + for (i=0; i<6; i++) { + if (post_receive_buffer(&qp1, &mr1_recv, i*SR_BUFFER_SIZE, SR_BUFFER_SIZE)) { + printf("post_recv_buffer failed1\n"); + goto done; + } + } + + for (i=0; i<6; i++) { + if (post_receive_buffer(&qp2, &mr2_recv, i*SR_BUFFER_SIZE, SR_BUFFER_SIZE)) { + printf("post_recv_buffer failed1\n"); + goto done; + } + } + + if (create_mr(&qp2, &mr2_rdma, RDMA_BUFFER)) { + printf("Cannot create RDMA buffer\n"); + } + + if (post_send_buffer(&qp2, &mr2_send, NULL, WR_SEND, 0, 40)) { + printf("post_send_buffer failed1\n"); + goto done; + } + +#if 0 + /* Wait for both wr to complete. */ + while(qp1.wq_posted != 0) { + Sleep(100); + } +#endif + + Sleep(4000); + + if (destroy_qp(&qp1) || destroy_qp(&qp2)) { + printf("cannot destroy QP\n"); + } + + printf("End of test\n"); + + done: + return 0; +} diff --git a/branches/Ndi/tests/wsd/user/ttcp/SOURCES b/branches/Ndi/tests/wsd/user/ttcp/SOURCES new file mode 100644 index 00000000..b85c3624 --- /dev/null +++ b/branches/Ndi/tests/wsd/user/ttcp/SOURCES @@ -0,0 +1,13 @@ +TARGETNAME=ttcp +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +SOURCES=\ + ttcp.c + +TARGETLIBS=\ + $(SDK_LIB_PATH)\ws2_32.lib + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tests/wsd/user/ttcp/makefile b/branches/Ndi/tests/wsd/user/ttcp/makefile new file mode 100644 index 00000000..9c985f57 --- /dev/null +++ b/branches/Ndi/tests/wsd/user/ttcp/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/Ndi/tests/wsd/user/ttcp/ttcp.c b/branches/Ndi/tests/wsd/user/ttcp/ttcp.c new file mode 100644 index 00000000..aba293e3 --- /dev/null +++ b/branches/Ndi/tests/wsd/user/ttcp/ttcp.c @@ -0,0 +1,860 @@ +/* + * T T C P . C + * + * Test TCP connection. Makes a connection on port 5001 + * and transfers fabricated buffers or data copied from stdin. + * + * Ported to MS Windows enviroment, Nov. 1999 + * Lily Yang, Intel Corp. + * Always in sinkmode, use only real time for measurement. + * So, -s and -v options are not accepted. + * + * Usable on 4.2, 4.3, and 4.1a systems by defining one of + * BSD42 BSD43 (BSD41a) + * Machines using System V with BSD sockets should define SYSV. + * + * Modified for operation under 4.2BSD, 18 Dec 84 + * T.C. Slattery, USNA + * Minor improvements, Mike Muuss and Terry Slattery, 16-Oct-85. + * Modified in 1989 at Silicon Graphics, Inc. + * catch SIGPIPE to be able to print stats when receiver has died + * for tcp, don't look for sentinel during reads to allow small transfers + * increased default buffer size to 8K, nbuf to 2K to transfer 16MB + * moved default port to 5001, beyond IPPORT_USERRESERVED + * make sinkmode default because it is more popular, + * -s now means don't sink/source + * count number of read/write system calls to see effects of + * blocking from full socket buffers + * for tcp, -D option turns off buffered writes (sets TCP_NODELAY sockopt) + * buffer alignment options, -A and -O + * print stats in a format that's a bit easier to use with grep & awk + * for SYSV, mimic BSD routines to use most of the existing timing code + * Modified by Steve Miller of the University of Maryland, College Park + * -b sets the socket buffer size (SO_SNDBUF/SO_RCVBUF) + * Modified Sept. 1989 at Silicon Graphics, Inc. + * restored -s sense at request of tcs@brl + * Modified Oct. 1991 at Silicon Graphics, Inc. + * use getopt(3) for option processing, add -f and -T options. + * SGI IRIX 3.3 and 4.0 releases don't need #define SYSV. + * + * Distribution Status - + * Public Domain. Distribution Unlimited. + */ + + +#include +#include +#include +#include + +#define DBG_SPEW 0 + +#if DBG_SPEW +#define DEBUG_OUT(x) { \ + char str[256];\ + sprintf(str, "%s:%d:0x%x:0x%x: %s", "ttcp", __LINE__, GetCurrentProcessId(),\ + GetCurrentThreadId(), (x));\ + OutputDebugString(str);\ +} + +#define DEBUG_OUT1(x,p) { \ + char str[256];\ + sprintf(str, "%s:%d:0x%x:0x%x: buf=0x%p %s", "ttcp", __LINE__, GetCurrentProcessId(),\ + GetCurrentThreadId(), (p),(x));\ + OutputDebugString(str);\ +} +#else +#define DEBUG_OUT(x) +#define DEBUG_OUT1(x,p) +#endif + +struct sockaddr_in sinhim; +struct sockaddr_in frominet; + +int domain, fromlen; + +int buflen = 8 * 1024; /* length of buffer */ +char *buf; /* ptr to dynamic buffer */ +int nbuf = 2 * 32; /* number of buffers to send in sinkmode */ + +int bufoffset = 0; /* align buffer to this */ +int bufalign = 16*1024; /* modulo this */ + +int udp = 0; /* 0 = tcp, !0 = udp */ +int options = 0; /* socket options */ +int one = 1; /* for 4.3 BSD style setsockopt() */ +short port = 5001; /* TCP port number */ +char *host = 0; /* ptr to name of host */ +int trans; /* 0=receive, !0=transmit mode */ +int nodelay = 0; /* set TCP_NODELAY socket option */ +int b_flag = 0; /* use mread() */ +int sockbufsize = 0; /* socket buffer size to use */ +char fmt = 'K'; /* output format: k = kilobits, K = kilobytes, + * m = megabits, M = megabytes, + * g = gigabits, G = gigabytes */ +int touchdata = 0; /* access data after reading */ + +#ifndef errno +extern int errno; +#endif + +char Usage[] = "\ +Usage: ttcp -t [-options] host \n\ + ttcp -r [-options]\n\ +Common options:\n\ + -l ## length of bufs read from or written to network (default 8192)\n\ + -u use UDP instead of TCP\n\ + -p ## port number to send to or listen at (default 5001)\n\ + -A align the start of buffers to this modulus (default 16384)\n\ + -O start buffers at this offset from the modulus (default 0)\n\ + -d set SO_DEBUG socket option\n\ + -b ## set socket buffer size (if supported)\n\ + -f X format for rate: k,K = kilo{bit,byte}; m,M = mega; g,G = giga\n\ +Options specific to -t:\n\ + -n## number of source bufs written to network (default 2048)\n\ + -D don't buffer TCP writes (sets TCP_NODELAY socket option)\n\ +Options specific to -r:\n\ + -B for -s, only output full blocks as specified by -l (for TAR)\n\ + -T \"touch\": access each byte as it's read\n\ +"; + +double nbytes = 0; /* bytes on net */ +unsigned long numCalls; /* # of I/O system calls */ +double realt; /* user, real time (seconds) */ +SOCKET fd = (SOCKET)SOCKET_ERROR; +SOCKET fd_orig = (SOCKET)SOCKET_ERROR; + +BOOL DoGracefulShutdown(SOCKET sock); +void err(); +void mes(); +void winsockInit(); +void commandLine(); +void bind_socket(); +void showTime(PSYSTEMTIME pst); +void pattern(); +int Nread(); +int Nwrite(); +int mread(); +void delay(); +void timer(); +double time_elapsed(); +char *outfmt(); + +int __cdecl main(int argc, char **argv) +{ + SYSTEMTIME time0, time1; + int cnt; + + commandLine(argc, argv); + winsockInit(); + + memset(&sinhim,0,sizeof(sinhim)); + + if(trans) { + struct hostent * pHostAddr; + // xmitr + memset(&sinhim,0,sizeof(sinhim)); + sinhim.sin_family = AF_INET; + if (atoi(host) > 0 ) { + // Numeric + sinhim.sin_addr.s_addr = inet_addr(host); + } else { + if ((pHostAddr=gethostbyname(host)) == NULL) + err("bad hostname"); + sinhim.sin_addr.s_addr = *(u_long*)pHostAddr->h_addr; + } + sinhim.sin_port = htons(port); + } + + + if (udp && buflen < 5) { + buflen = 5; // send more than the sentinel size + } + + if ( (buf = (char *)malloc(buflen+bufalign)) == (char *)NULL) + err("malloc"); + if (bufalign != 0) + buf +=(bufalign - ((uintptr_t)buf % bufalign) + bufoffset) % bufalign; + + fprintf(stdout, "ttcp PID=0x%x TID=0x%x\n", GetCurrentProcessId(), GetCurrentThreadId()); + + if (trans) { + fprintf(stdout, "ttcp -t: buflen=%d, nbuf=%d, align=%d/%d, port=%d", + buflen, nbuf, bufalign, bufoffset, port); + if (sockbufsize) + fprintf(stdout, ", sockbufsize=%d", sockbufsize); + fprintf(stdout, " %s -> %s\n", udp?"udp":"tcp", host); + } else { + fprintf(stdout, + "ttcp -r: buflen=%d, nbuf=%d, align=%d/%d, port=%d", + buflen, nbuf, bufalign, bufoffset, port); + if (sockbufsize) + fprintf(stdout, ", sockbufsize=%d", sockbufsize); + fprintf(stdout, " %s\n", udp?"udp":"tcp"); + } + DEBUG_OUT(("socket() start.\n")); + if ((fd = socket(AF_INET, udp?SOCK_DGRAM:SOCK_STREAM, 0)) == INVALID_SOCKET){ + err("socket"); + } + DEBUG_OUT(("socket() finish.\n")); + fprintf(stdout, "fd=0x%x\n",fd); + + DEBUG_OUT(("bind_socket() start.\n")); + if (trans) { + bind_socket(fd, 0); + } else { + bind_socket(fd, port); + } + DEBUG_OUT(("bind_socket() finish.\n")); + + /* + * CLOSE_ISSUE + * If the SO_LINGER option is enabled, then closesocket() seems to wake + * up a separate thread (previously spawned by the switch during connect() call) + * after l_linger seconds, and this thread takes care of calling GetOverlappedResult, + * DeregisterMemory, etc. In this case, there is no delay in the WSACleanup() + * call, and both the provide WSPCloseSocket() and WSPCleanup() calls are + * seen. + */ +#if 0 + { + struct linger ling; + ling.l_onoff = 1; + ling.l_linger = 3; + DEBUG_OUT(("setsockopt(SO_LINGER) start.\n")); + if (setsockopt(fd, SOL_SOCKET, SO_LINGER, + (const char*)&ling, sizeof(ling)) < 0){ + err("setsockopt: SO_LINGER"); + } + DEBUG_OUT(("setsockopt(SO_LINGER) finished.\n")); + } + +#endif + + if (sockbufsize) { + if (trans) { + if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, + (const char*)&sockbufsize,sizeof sockbufsize) < 0) + err("setsockopt: sndbuf"); + } else { + if (setsockopt(fd, SOL_SOCKET, SO_RCVBUF, + (const char*)&sockbufsize,sizeof sockbufsize) < 0) + err("setsockopt: rcvbuf"); + } + } + + if (!udp) { + if (trans) { + // We are the client if transmitting + if (options) { + if( setsockopt(fd, SOL_SOCKET, options, (const char*)&one, + sizeof(one)) < 0) + err("setsockopt"); + } + if (nodelay) { + struct protoent *p; + p = getprotobyname("tcp"); + if( p && setsockopt(fd, p->p_proto, TCP_NODELAY, + (const char*)&one, sizeof(one)) < 0) + err("setsockopt: nodelay"); + } + DEBUG_OUT(("connect() start.\n")); + if(connect(fd, (const struct sockaddr *)&sinhim, sizeof(sinhim) ) < 0){ + err("connect"); + } + DEBUG_OUT(("connect() finished.\n")); + fprintf(stdout,"ttcp -t: TCP connection established! \n"); + } else { + + // otherwise, we are the server and + //should listen for the connections + + DEBUG_OUT(("listen() start.\n")); + listen(fd,1); // workaround for alleged u4.2 bug + DEBUG_OUT(("listen() finish.\n")); + if(options) { + if( setsockopt(fd, SOL_SOCKET, options, (const char*)&one, + sizeof(one)) < 0) + err("setsockopt"); + } + fromlen = sizeof(frominet); + domain = AF_INET; + fd_orig=fd; + + DEBUG_OUT(("accept() start.\n")); + if((fd=accept(fd, (struct sockaddr *)&frominet, &fromlen) ) == INVALID_SOCKET){ + err("accept"); + } + DEBUG_OUT(("accept() finish.\n")); + + { + struct sockaddr_in peer; + int peerlen = sizeof(peer); + + fprintf(stdout,"closesocket(fd_orig)..\n"); + if (closesocket(fd_orig)==SOCKET_ERROR){ + err("socket close fd_orig!"); + fd_orig = (SOCKET)SOCKET_ERROR; + } + + + if (getpeername(fd, (struct sockaddr *) &peer, + &peerlen) < 0) { + err("getpeername"); + } + fprintf(stdout,"ttcp -r: accept from %s fd=0x%p\n", + inet_ntoa(peer.sin_addr), fd); + } + } + } + + if (trans) { + fprintf(stdout,"ttcp -t: start transmitting ...\n"); + pattern( buf, buflen ); + if(udp) (void)Nwrite( fd, buf, 4 ); // rcvr start + timer(&time0); + while (nbuf-- && Nwrite(fd,buf,buflen) == buflen){ + nbytes += buflen; +#if DBG_SPEW + fprintf(stdout,"nbytes=%.0f nbuf=%d, buflen=%d\n", nbytes, nbuf, buflen); +#endif + } + timer(&time1); +#if DBG_SPEW + fprintf(stdout,"nbytes=%.0f nbuf=%d\n", nbytes, nbuf); +#endif + if(udp) (void)Nwrite( fd, buf, 4 ); // rcvr end + } else { + + fprintf(stdout,"ttcp -r: start receiving ...\n"); + if (udp) { + while ((cnt=Nread(fd,buf,buflen)) > 0) { + static int going = 0; + if( cnt <= 4 ) { + if( going ) + break; // "EOF" + going = 1; + timer(&time0); + } else { + nbytes += cnt; + } + } + } else { + timer(&time0); + while ((cnt=Nread(fd,buf,buflen)) > 0) { + nbytes += cnt; +#if DBG_SPEW + fprintf(stdout,"nbytes=%.0f cnt=%d\n", nbytes, cnt); +#endif + } + timer(&time1); +#if DBG_SPEW + fprintf(stdout,"nbytes=%.0f cnt=%d\n", nbytes, cnt); +#endif + } + } + + if(0){ + fprintf(stdout, "Pausing before close...\n"); + _getch(); + } + +#if 1 + /* + * CLOSE_ISSUE + * Calling closesocket() without any linger option results in + * approximately a 15 second delay in the WSACleanup() call, + * with no provider WSPCloseSocket() or WSPCleanup() calls ever + * seen. + */ + DEBUG_OUT(("closesocket() start.\n")); + fprintf(stdout,"closesocket(fd)..\n"); + if (closesocket(fd)==SOCKET_ERROR) { + err("socket close fd!"); + } + DEBUG_OUT(("closesocket() finish.\n")); +#else + /* + * CLOSE_ISSUE + * If DoGracefulShutdown is called, then the closesocket() (in DoGracefulShutdown()) + * seems to wake up a separate thread (previously spawned by the switch during + * connect() call) and this thread takes care of calling GetOverlappedResult, + * DeregisterMemory, etc. In this case, there is no delay in the WSACleanup() + * call, and both the provide WSPCloseSocket() and WSPCleanup() calls are + * seen. + */ + DoGracefulShutdown(fd); +#endif + + + if(0){ + fprintf(stdout, "Pausing before cleanup...\n"); + _getch(); + } + + + DEBUG_OUT(("WSACleanup() start.\n")); + fprintf(stdout,"WSACleanup()..\n"); + /* + * CLOSE_ISSUE without SO_LINGER, or DoGracefulShutdown(), WSACleanup() + * can hang for 15 seconds. No WSPCloseSocket() or WSPCleanup() calls + * are seen in the provider for this case. + */ + if (WSACleanup()==SOCKET_ERROR) { + err("WSACleanup"); + } + DEBUG_OUT(("WSACleanup() finish.\n")); + + if(0){ + fprintf(stdout, "Pausing after cleanup...\n"); + _getch(); + } + + realt = time_elapsed(&time0, &time1); + + if(udp&&trans) { + (void)Nwrite( fd, buf, 4 ); // rcvr end + (void)Nwrite( fd, buf, 4 ); // rcvr end + (void)Nwrite( fd, buf, 4 ); // rcvr end + (void)Nwrite( fd, buf, 4 ); // rcvr end + } + if( realt <= 0.0 ) realt = 0.001; + fprintf(stdout, + "ttcp %s: %.0f bytes in %.2f real MilliSeconds = %s/sec +++\n", + trans?"-t":"-r", + nbytes, realt, outfmt(nbytes*1000/realt)); + fprintf(stdout, + "ttcp %s: %d I/O calls, msec/call = %.2f, calls/sec = %.2f\n", + trans?"-t":"-r", + numCalls, + realt/((double)numCalls), + ((double)(1000*numCalls))/realt); + fflush(stdout); + +#if 0 + if(1){ + fprintf(stdout, "Pausing before return...\n"); + _getch(); + } +#endif + + return(0); +} + +void err(char *s) +{ + fprintf(stderr,"ERROR -- ttcp %s: ", trans?"-t":"-r"); + fprintf(stderr,"%s\n",s); + fprintf(stderr, "Cleaning up\n"); + if(fd!=SOCKET_ERROR){ + if (closesocket(fd)==SOCKET_ERROR) + fprintf(stderr, "socket close fd!"); + } + + fprintf(stderr,"\n"); + exit(-1); +} + +void mes(char *s) +{ + fprintf(stdout,"ttcp %s: %s\n", trans?"-t":"-r", s); + return; +} + +void pattern(char *cp, int cnt) +{ + register char c; + c = 0; + while( cnt-- > 0 ) { + while( !isprint((c&0x7F)) ) c++; + *cp++ = (c++&0x7F); + } +} + +char *outfmt(double b) +{ + static char obuf[50]; + switch (fmt) { + case 'G': + sprintf(obuf, "%.2f GByte", b / 1024.0 / 1024.0 / 1024.0); + break; + default: + case 'K': + sprintf(obuf, "%.2f KByte", b / 1024.0); + break; + case 'M': + sprintf(obuf, "%.2f MByte", b / 1024.0 / 1024.0); + break; + case 'g': + sprintf(obuf, "%.2f Gbit", b * 8.0 / 1024.0 / 1024.0 / 1024.0); + break; + case 'k': + sprintf(obuf, "%.2f Kbit", b * 8.0 / 1024.0); + break; + case 'm': + sprintf(obuf, "%.2f Mbit", b * 8.0 / 1024.0 / 1024.0); + break; + } + return obuf; +} + +/* + * T I M E R + */ +void timer(PSYSTEMTIME time) +{ + GetSystemTime(time); +// showTime(time); +} +/* + * N R E A D + */ +int Nread(int fd, void *buf, int count) +{ + struct sockaddr_in from; + int len = sizeof(from); + register int cnt; + if( udp ) { + cnt = recvfrom( fd, buf, count, 0, (struct sockaddr *)&from, &len ); + numCalls++; + } else { + if( b_flag ) + cnt = mread( fd, buf, count ); // fill buf + else { + DEBUG_OUT(("recv() start.\n")); + cnt = recv(fd, buf, count, 0); + DEBUG_OUT(("recv() finish.\n")); + numCalls++; + } + if (touchdata && cnt > 0) { + register int c = cnt, sum=0; + register char *b = buf; + while (c--) + sum += *b++; + } + } + return(cnt); +} + +/* + * N W R I T E + */ +int Nwrite(int fd, void *buf, int count) +{ + register int cnt; + if( udp ) { +again: + cnt = sendto( fd, buf, count, 0, (const struct sockaddr *)&sinhim, + sizeof(sinhim) ); + numCalls++; + if( cnt<0 && WSAGetLastError() == WSAENOBUFS ) { + delay(18000); + goto again; + } + } else { + DEBUG_OUT1(("send() start.\n"),buf); + cnt = send(fd, buf, count, 0); + DEBUG_OUT1(("send() finish.\n"),buf); + numCalls++; + } + return(cnt); +} + +void delay(int us) +{ + struct timeval tv; + + tv.tv_sec = 0; + tv.tv_usec = us; + (void)select( 1, NULL, NULL, NULL, &tv ); +} + +/* + * M R E A D + * + * This function performs the function of a read(II) but will + * call read(II) multiple times in order to get the requested + * number of characters. This can be necessary because + * network connections don't deliver data with the same + * grouping as it is written with. Written by Robert S. Miles, BRL. + */ +int mread(int fd, char *bufp, unsigned int n) +{ + register unsigned count = 0; + register int nread; + + do { + nread = recv(fd, bufp, n-count, 0); + numCalls++; + if(nread < 0) { + perror("ttcp_mread"); + return(-1); + } + if(nread == 0) + return((int)count); + count += (unsigned)nread; + bufp += nread; + } while(count < n); + + return((int)count); +} + +void winsockInit() +{ + // initialization of the winsock DLL first + WORD wVersionRequested; + WSADATA wsaData; + int ret; + + wVersionRequested = MAKEWORD( 2, 0 ); + ret = WSAStartup( wVersionRequested, &wsaData ); + if ( ret != 0 ) { + // Tell the user that we couldn't find a usable + // WinSock DLL. + err(" Winsock Error!"); + } + + // Confirm that the WinSock DLL supports 2.0. + // Note that if the DLL supports versions greater + // than 2.0 in addition to 2.0, it will still return + // 2.0 in wVersion since that is the version we + // requested. + + if ( LOBYTE( wsaData.wVersion ) != 2 || + HIBYTE( wsaData.wVersion ) != 0 ) { + // Tell the user that we couldn't find a usable + // WinSock DLL. + WSACleanup( ); + err(" Winsock Error!"); + } + return; + +} + +void commandLine(int argc, char ** argv) +{ + int i; + char carg; + + if (argc < 2) goto usage; + + i=1; + while (ih_addr; + } +#else + addr.sin_addr.s_addr = INADDR_ANY; +#endif + + // bind with the socket + if (bind(fd, (const struct sockaddr*)&addr, + sizeof(addr)) == SOCKET_ERROR) { + err("socket error!"); + } + free(hostname); + return; + +} + +void showTime(PSYSTEMTIME pst) +{ + fprintf(stdout, "It is now %02u:%02u:%02u:%03u on %02u/%02u/%4u.\n", + pst->wHour, pst->wMinute, pst->wSecond, pst->wMilliseconds, \ + pst->wMonth, pst->wDay, pst->wYear); +} + +double time_elapsed(PSYSTEMTIME pst0,PSYSTEMTIME pst1) +{ + double diff=0; + diff += pst1->wMilliseconds - pst0->wMilliseconds; + diff += (pst1->wSecond - pst0->wSecond)*1000; + diff += (pst1->wMinute - pst0->wMinute)*60*1000; + diff += (pst1->wHour - pst0->wHour)*60*60*1000; + diff += (pst1->wHour - pst0->wHour)*60*60*1000; + diff += (pst1->wDay - pst0->wDay)*24*60*60*1000; + diff += (pst1->wMonth - pst0->wMonth)*30*24*60*60*1000; + diff += (pst1->wYear - pst0->wYear)*365*24*60*60*1000; + return diff; +} + +// Do a graceful shutdown of a the given socket sock. +BOOL DoGracefulShutdown(SOCKET sock) +{ + BOOL bRetVal = FALSE; + WSAEVENT hEvent = WSA_INVALID_EVENT; + long lNetworkEvents = 0; + int status = 0; + + hEvent = WSACreateEvent(); + if (hEvent == WSA_INVALID_EVENT) + { + fprintf(stderr, "DoGracefulShutdown: WSACreateEvent failed: %d\n", + WSAGetLastError()); + goto CLEANUP; + } + + DEBUG_OUT(("WSAEventSelect() start.\n")); + lNetworkEvents = FD_CLOSE; + if (WSAEventSelect(sock, hEvent, lNetworkEvents) != 0) + { + fprintf(stderr, "DoGracefulShutdown: WSAEventSelect failed: %d\n", + WSAGetLastError()); + goto CLEANUP; + } + DEBUG_OUT(("WSAEventSelect() finish.\n")); + + DEBUG_OUT(("shutdown() start.\n")); + if (shutdown(sock, SD_SEND) != 0) + { + fprintf(stderr, "DoGracefulShutdown: shutdown failed: %d\n", + WSAGetLastError()); + goto CLEANUP; + } + DEBUG_OUT(("shutdown() finish.\n")); + + DEBUG_OUT(("WaitForSingleObject() start.\n")); + if (WaitForSingleObject(hEvent, INFINITE) != WAIT_OBJECT_0) + { + fprintf(stderr, "DoGracefulShutdown: WaitForSingleObject failed: %d\n", + WSAGetLastError()); + goto CLEANUP; + } + DEBUG_OUT(("WaitForSingleObject() finish.\n")); + + do + { + char buf[128]; + + DEBUG_OUT(("recv() start.\n")); + status = recv(sock, buf, sizeof(buf), 0); + DEBUG_OUT(("recv() finish.\n")); + } while (!(status == 0 || status == SOCKET_ERROR)); + + DEBUG_OUT(("closesocket() start.\n")); + if (closesocket(sock) != 0) + { + fprintf(stderr, "DoGracefulShutdown: closesocket failed: %d\n", + WSAGetLastError()); + goto CLEANUP; + } + DEBUG_OUT(("closesocket() finish.\n")); + + printf("Socket %d has been closed gracefully\n", sock); + sock = INVALID_SOCKET; + bRetVal = TRUE; + +CLEANUP: + + if (hEvent != WSA_INVALID_EVENT) + { + WSACloseEvent(hEvent); + hEvent = WSA_INVALID_EVENT; + } + + if (sock != INVALID_SOCKET) + { + fprintf(stderr, "DoGracefulShutdown: Can't close socket gracefully. " + "So, closing it anyway ... \n"); + closesocket(sock); + sock = INVALID_SOCKET; + } + + return bRetVal; +} + diff --git a/branches/Ndi/tools/coinstaller/dirs b/branches/Ndi/tools/coinstaller/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/tools/coinstaller/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tools/coinstaller/user/IBInstaller.rc b/branches/Ndi/tools/coinstaller/user/IBInstaller.rc new file mode 100644 index 00000000..572cbfb6 --- /dev/null +++ b/branches/Ndi/tools/coinstaller/user/IBInstaller.rc @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "resource.h" + +#define APSTUDIO_READONLY_SYMBOLS +///////////////////////////////////////////////////////////////////////////// +// +// Generated from the TEXTINCLUDE 2 resource. +// +#include "afxres.h" + +///////////////////////////////////////////////////////////////////////////// +#undef APSTUDIO_READONLY_SYMBOLS + +///////////////////////////////////////////////////////////////////////////// +// English (U.S.) resources + +#if !defined(AFX_RESOURCE_DLL) || defined(AFX_TARG_ENU) +#ifdef _WIN32 +LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US +#pragma code_page(1252) +#endif //_WIN32 + +#ifdef APSTUDIO_INVOKED +///////////////////////////////////////////////////////////////////////////// +// +// TEXTINCLUDE +// + +1 TEXTINCLUDE +BEGIN + "resource.h\0" +END + +2 TEXTINCLUDE +BEGIN + "#include ""afxres.h""\r\n" + "\0" +END + +3 TEXTINCLUDE +BEGIN + "\r\n" + "\0" +END + +#endif // APSTUDIO_INVOKED + + +///////////////////////////////////////////////////////////////////////////// +// +// Version +// + +VS_VERSION_INFO VERSIONINFO + FILEVERSION 1,0,0,0 + PRODUCTVERSION 1,0,0,0 + FILEFLAGSMASK 0x17L +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x4L + FILETYPE 0x2L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "CompanyName", "SilverStorm Technologies, Inc." + VALUE "FileDescription", "InfiniBand HCA Class Co-installer" + VALUE "FileVersion", "1, 0, 0, 0" + VALUE "InternalName", "IbInstaller" + VALUE "LegalCopyright", "Copyright© 2003 SilverStorm Technologies, Inc. All rights reserved." + VALUE "OriginalFilename", "IbInstaller.dll" + VALUE "ProductName", "InfiniBand HCA Class Co-installer" + VALUE "ProductVersion", "1, 0, 0, 0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END + +#endif // English (U.S.) resources +///////////////////////////////////////////////////////////////////////////// + + + +#ifndef APSTUDIO_INVOKED +///////////////////////////////////////////////////////////////////////////// +// +// Generated from the TEXTINCLUDE 3 resource. +// + + +///////////////////////////////////////////////////////////////////////////// +#endif // not APSTUDIO_INVOKED + diff --git a/branches/Ndi/tools/coinstaller/user/IbInstaller.cpp b/branches/Ndi/tools/coinstaller/user/IbInstaller.cpp new file mode 100644 index 00000000..5ee73e3a --- /dev/null +++ b/branches/Ndi/tools/coinstaller/user/IbInstaller.cpp @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +// IBInstaller.cpp : Defines the entry point for the DLL application. +// + +#include "stdafx.h" + +wchar_t debug_buf[256]; + +// IB Fabric device HW ID +#define GUID_IB_BUS_HW_ID TEXT("{94f41ced-78eb-407c-b5df-958040af0fd8}") + +#define DEVICE_DESC TEXT("InfiniBand Fabric") + +// System Class GUID (from wdmguid.h) +//{4D36E97D-E325-11CE-BFC1-08002BE10318} +static const GUID GUID_CLASS_SYSTEM = +{ 0x4D36E97D, 0xE325, 0x11CE, {0xBF, 0xC1, 0x08, 0x00, 0x2B, 0xE1, 0x03, 0x18 } }; + + +BOOL APIENTRY DllMain( + HANDLE hModule, + DWORD ul_reason_for_call, + LPVOID lpReserved ) +{ + UNREFERENCED_PARAMETER( hModule ); + UNREFERENCED_PARAMETER( ul_reason_for_call ); + UNREFERENCED_PARAMETER( lpReserved ); + return TRUE; +} + + +// +// Checks the installed devices, looking for an instance of the bus root. +// +DWORD NeedInstall() +{ + HDEVINFO hDevList; + BOOL bSuccess; + SP_DEVINFO_DATA devInfo; + DWORD i; + TCHAR buf[128]; + + memset( &devInfo, 0, sizeof(SP_DEVINFO_DATA) ); + devInfo.cbSize = sizeof(SP_DEVINFO_DATA); + + OutputDebugString( + TEXT("[IbInstaller]Checking for existance of IB Fabric Root device.\n") ); + + // Get all devices of the system class. + hDevList = SetupDiGetClassDevs( &GUID_CLASS_SYSTEM, 0, NULL, 0 ); + if( hDevList == INVALID_HANDLE_VALUE ) + { + swprintf( debug_buf,L"[IbInstaller] Failed to get system class dev info list Error %d\n",GetLastError()); + OutputDebugString( debug_buf ); + return GetLastError(); + } + + // Enumerate until we find our device. If the device exists, we + // exit. + i = 0; + do + { + // Get the next device. + bSuccess = SetupDiEnumDeviceInfo( hDevList, i++, &devInfo ); + if( !bSuccess ) + { + OutputDebugString( TEXT("[IbInstaller]SetupDiEnumDeviceInfo failed.\n") ); + break; + } + + // Get the device's description. + bSuccess = SetupDiGetDeviceRegistryProperty( hDevList, &devInfo, + SPDRP_HARDWAREID, NULL, (BYTE*)buf, sizeof(buf), NULL ); + if( !bSuccess ) + { + // Device has no HW ID. + OutputDebugString( + TEXT("[IbInstaller]SetupDiGetDeviceRegistryProperty failed.\n") ); + // Skip to the next. + bSuccess = TRUE; + continue; + } + + // Compare to our device description. + if( _tcscmp( buf, GUID_IB_BUS_HW_ID ) ) + continue; + + // The device is already installed. + SetupDiDestroyDeviceInfoList( hDevList ); + OutputDebugString( TEXT("[IbInstaller]IB Fabric Root device already exists.\n") ); + return ERROR_ALREADY_EXISTS; + + } while( bSuccess ); + + return ERROR_SUCCESS; +} + + +DWORD SelectDriver( + IN HDEVINFO hDevList, + IN SP_DEVINFO_DATA *pDevInfo, + OUT SP_DRVINFO_DATA *pDrvInfo ) +{ + DWORD i; + BOOL bSuccess; + + + // Get a list of drivers. + bSuccess = + SetupDiBuildDriverInfoList( hDevList, pDevInfo, SPDIT_CLASSDRIVER ); + if( !bSuccess ) + { + swprintf( debug_buf,L"[IbInstaller] SetupDiBuildDriverInfoList failed Error %d\n",GetLastError()); + OutputDebugString( debug_buf ); + return GetLastError(); + } + + // Set the size of the structure properly. + pDrvInfo->cbSize = sizeof(SP_DRVINFO_DATA); + + // Enumerate all drivers, looking for the correct description. + i = 0; + do + { + + bSuccess = SetupDiEnumDriverInfo( hDevList, pDevInfo, + SPDIT_CLASSDRIVER, i++, pDrvInfo ); + if( !bSuccess ) + { + swprintf( debug_buf,L"[IbInstaller] SetupDiEnumDriverInfo failed Error %d\n",GetLastError()); + OutputDebugString( debug_buf ); + break; + } + + // make the string + swprintf( debug_buf,TEXT("[IbInstaller] pDrvInfo->Description %s\n"),pDrvInfo->Description); + OutputDebugString( debug_buf ); + + if( _tcscmp( pDrvInfo->Description, DEVICE_DESC ) ) + continue; + + // Found it! + OutputDebugString( TEXT("[IbInstaller]Found our driver!\n") ); + return ERROR_SUCCESS; + + } while( bSuccess ); + + return ERROR_NOT_FOUND; +} + + +DWORD +CreateIbBusRoot( + IN PCTSTR driverPath ) +{ + HDEVINFO hDevList; + BOOL bSuccess; + SP_DEVINFO_DATA devInfo; + SP_DRVINFO_DATA drvInfo; + LONG status; + SP_DEVINSTALL_PARAMS installParams; + + + memset( &devInfo, 0, sizeof(SP_DEVINFO_DATA) ); + devInfo.cbSize = sizeof(SP_DEVINFO_DATA); + + OutputDebugString( TEXT("[IbInstaller]Creating IB Fabric Root device.\n") ); + + // Create a list for devices of the system class. + hDevList = SetupDiCreateDeviceInfoList( &GUID_CLASS_SYSTEM, NULL ); + if( hDevList == INVALID_HANDLE_VALUE ) + { + OutputDebugString( TEXT("[IbInstaller]Failed to create dev info list.\n") ); + return GetLastError(); + } + + // Create the device. + bSuccess = SetupDiCreateDeviceInfo( hDevList, TEXT("SYSTEM"), + &GUID_CLASS_SYSTEM, DEVICE_DESC, + NULL, DICD_GENERATE_ID, &devInfo ); + if( !bSuccess ) + { + OutputDebugString( TEXT("[IbInstaller]SetupDiCreateDeviceInfo failed.\n") ); + SetupDiDestroyDeviceInfoList( hDevList ); + return GetLastError(); + } + + // Setup the HW ID for the device. + bSuccess = SetupDiSetDeviceRegistryProperty( hDevList, &devInfo, + SPDRP_HARDWAREID, (BYTE*)GUID_IB_BUS_HW_ID, sizeof(GUID_IB_BUS_HW_ID) ); + if( !bSuccess ) + { + OutputDebugString( + TEXT("[IbInstaller]SetupDiSetDeviceRegistryProperty failed.\n") ); + SetupDiDestroyDeviceInfoList( hDevList ); + return GetLastError(); + } + + // Setup the install path. + ZeroMemory( &installParams, sizeof(installParams) ); + installParams.cbSize = sizeof(installParams); + _tcsncpy( installParams.DriverPath, driverPath, MAX_PATH ); + + bSuccess = + SetupDiSetDeviceInstallParams( hDevList, &devInfo, &installParams ); + if( !bSuccess ) + { + OutputDebugString( TEXT("[IbInstaller]SetupDiSetDeviceInstallParams failed.\n") ); + SetupDiDestroyDeviceInfoList( hDevList ); + return GetLastError(); + } + + status = SelectDriver( hDevList, &devInfo, &drvInfo ); + if( status != ERROR_SUCCESS ) + { + OutputDebugString( TEXT("[IbInstaller]Could not find driver.\n") ); + SetupDiDestroyDriverInfoList( hDevList, &devInfo, SPDIT_CLASSDRIVER ); + SetupDiDestroyDeviceInfoList( hDevList ); + return status; + } + + // Select the device. + bSuccess = SetupDiSetSelectedDevice( hDevList, &devInfo ); + if( !bSuccess ) + { + OutputDebugString( TEXT("[IbInstaller]SetupDiSetSelectedDevice failed.\n") ); + SetupDiDestroyDriverInfoList( hDevList, &devInfo, SPDIT_CLASSDRIVER ); + SetupDiDestroyDeviceInfoList( hDevList ); + return GetLastError(); + } + + // Select the driver. + bSuccess = SetupDiSetSelectedDriver( hDevList, &devInfo, &drvInfo ); + if( !bSuccess ) + { + OutputDebugString( TEXT("[IbInstaller]SetupDiSetSelectedDriver failed.\n") ); + SetupDiDestroyDriverInfoList( hDevList, &devInfo, SPDIT_CLASSDRIVER ); + SetupDiDestroyDeviceInfoList( hDevList ); + return GetLastError(); + } + + // Register the device (since it is non-PnP). + bSuccess = SetupDiRegisterDeviceInfo( hDevList, &devInfo, SPRDI_FIND_DUPS, + NULL, NULL, NULL ); + if( !bSuccess ) + { + OutputDebugString( TEXT("[IbInstaller]SetupDiRegisterDeviceInfo failed.\n") ); + SetupDiDestroyDriverInfoList( hDevList, &devInfo, SPDIT_CLASSDRIVER ); + SetupDiDestroyDeviceInfoList( hDevList ); + return GetLastError(); + } + + // Install the device (copies the files and starts it). + bSuccess = SetupDiInstallDevice( hDevList, &devInfo ); + if( !bSuccess ) + { + OutputDebugString( TEXT("[IbInstaller]SetupDiInstallDevice failed.\n") ); + SetupDiDestroyDriverInfoList( hDevList, &devInfo, SPDIT_CLASSDRIVER ); + SetupDiDestroyDeviceInfoList( hDevList ); + return GetLastError(); + } + + return ERROR_SUCCESS; +} + + +UINT CALLBACK +IbFileCallback( + IN PVOID Context, + IN UINT Notification, + IN UINT_PTR Param1, + IN UINT_PTR Param2 ) +{ + TCHAR *pPath; + FILEPATHS *pFileInfo; + + UNREFERENCED_PARAMETER( Param2 ); + + if( Notification != SPFILENOTIFY_QUEUESCAN_EX ) + return 0; + + pPath = (TCHAR*)Context; + pFileInfo = (FILEPATHS*)Param1; + + // Copy the source path of the file to the path. + if( pFileInfo->Source ) + _tcsncpy( pPath, pFileInfo->Source, MAX_PATH ); + + return 0; +} + +extern "C" +{ + + +HRESULT +IbCoInstaller( + IN DI_FUNCTION InstallFunction, + IN HDEVINFO DeviceInfoSet, + IN PSP_DEVINFO_DATA DeviceInfoData OPTIONAL, + IN OUT PCOINSTALLER_CONTEXT_DATA Context ) +{ + SP_DEVINSTALL_PARAMS InstallParams; + DWORD result; + BOOL b; + TCHAR path[MAX_PATH]; + size_t nEnd; + + UNREFERENCED_PARAMETER( Context ); + + // The file queue is valid on the DIF_INSTALLDEVICE, so trap that + // code and extract the install path. + if( InstallFunction != DIF_INSTALLDEVICE ) + return NO_ERROR; + + // First find out if we need to install the transport. + result = NeedInstall(); + if( result != ERROR_SUCCESS ) + { + if( result == ERROR_ALREADY_EXISTS ) + return NO_ERROR; + else + return result; + } + + // Extract the file path from the file queue. + // First get the file queue (it's in the install parameters). + memset( &InstallParams, 0, sizeof(InstallParams) ); + InstallParams.cbSize = sizeof(InstallParams); + + // Get the installation parameters. + b = SetupDiGetDeviceInstallParams( DeviceInfoSet, DeviceInfoData, + &InstallParams ); + if( !b ) + return GetLastError(); + + // If there isn't a file queue, abort the installation. + if( !InstallParams.FileQueue ) + return ERROR_DI_DONT_INSTALL; + + // Scan the file queue. The callback will copy the file name to our path. + SetupScanFileQueue( InstallParams.FileQueue, SPQ_SCAN_USE_CALLBACKEX, NULL, + IbFileCallback, &path, &result ); + if( result ) + return result; + + // Strip the file name from the path. + nEnd = _tcslen( path ); + while( path[nEnd] != '\\' ) + nEnd--; +/* + NOTE: no need to strip the platform directoty it was removed for WHQL + // Skip the slash. + nEnd--; + // Strip the platform subdir name from the path. + while( path[nEnd] != '\\' ) + nEnd--; +*/ + path[nEnd] = _T('\0'); + + swprintf( debug_buf ,L"[IbInstaller] path %s\n",path); + OutputDebugString( debug_buf ); + + + // Create the bus root. + result = CreateIbBusRoot( path ); + if( result != ERROR_SUCCESS ) + return result; + + return NO_ERROR; +} + +} // extern "C" + diff --git a/branches/Ndi/tools/coinstaller/user/IbInstaller.def b/branches/Ndi/tools/coinstaller/user/IbInstaller.def new file mode 100644 index 00000000..80318468 --- /dev/null +++ b/branches/Ndi/tools/coinstaller/user/IbInstaller.def @@ -0,0 +1,4 @@ +LIBRARY IbInstaller.dll + +EXPORTS + IbCoInstaller diff --git a/branches/Ndi/tools/coinstaller/user/SOURCES b/branches/Ndi/tools/coinstaller/user/SOURCES new file mode 100644 index 00000000..b50482c2 --- /dev/null +++ b/branches/Ndi/tools/coinstaller/user/SOURCES @@ -0,0 +1,18 @@ +TARGETNAME=IbInstaller +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE = DYNLINK + +SOURCES= IbInstaller.rc \ + IbInstaller.cpp + +PRECOMPILED_INCLUDE=stdafx.h +PRECOMPILED_CXX=1 + +USE_MSVCRT=1 + +C_DEFINES=$(C_DEFINES) -DUNICODE -D_UNICODE + +TARGETLIBS=$(SDK_LIB_PATH)\setupapi.lib \ + $(SDK_LIB_PATH)\kernel32.lib + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/tools/coinstaller/user/makefile b/branches/Ndi/tools/coinstaller/user/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/tools/coinstaller/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/coinstaller/user/resource.h b/branches/Ndi/tools/coinstaller/user/resource.h new file mode 100644 index 00000000..d04b714d --- /dev/null +++ b/branches/Ndi/tools/coinstaller/user/resource.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +//{{NO_DEPENDENCIES}} +// Microsoft Visual C++ generated include file. +// Used by IbInstaller1.rc + +// Next default values for new objects +// +#ifdef APSTUDIO_INVOKED +#ifndef APSTUDIO_READONLY_SYMBOLS +#define _APS_NEXT_RESOURCE_VALUE 101 +#define _APS_NEXT_COMMAND_VALUE 40001 +#define _APS_NEXT_CONTROL_VALUE 1001 +#define _APS_NEXT_SYMED_VALUE 101 +#endif +#endif diff --git a/branches/Ndi/tools/coinstaller/user/stdafx.cpp b/branches/Ndi/tools/coinstaller/user/stdafx.cpp new file mode 100644 index 00000000..0fa15e8b --- /dev/null +++ b/branches/Ndi/tools/coinstaller/user/stdafx.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +// stdafx.cpp : source file that includes just the standard includes +// IbInstaller.pch will be the pre-compiled header +// stdafx.obj will contain the pre-compiled type information + +#include "stdafx.h" + +// TODO: reference any additional headers you need in STDAFX.H +// and not in this file diff --git a/branches/Ndi/tools/coinstaller/user/stdafx.h b/branches/Ndi/tools/coinstaller/user/stdafx.h new file mode 100644 index 00000000..64fa5537 --- /dev/null +++ b/branches/Ndi/tools/coinstaller/user/stdafx.h @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +// stdafx.h : include file for standard system include files, +// or project specific include files that are used frequently, but +// are changed infrequently +// + +#pragma once + +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN // Exclude rarely-used stuff from Windows headers +#endif + +// Windows Header Files: +#include + +// TODO: reference additional headers your program requires here +#include +#include +#include +#include diff --git a/branches/Ndi/tools/dirs b/branches/Ndi/tools/dirs new file mode 100644 index 00000000..50d31f79 --- /dev/null +++ b/branches/Ndi/tools/dirs @@ -0,0 +1,12 @@ +DIRS=\ + coinstaller \ + fwupdate \ + wsdinstall \ + vstat \ + mtcr \ + flint \ + mread \ + mwrite \ + mst \ + spark \ + perftests diff --git a/branches/Ndi/tools/flint/dirs b/branches/Ndi/tools/flint/dirs new file mode 100644 index 00000000..5a7e8b31 --- /dev/null +++ b/branches/Ndi/tools/flint/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tools/flint/user/SOURCES b/branches/Ndi/tools/flint/user/SOURCES new file mode 100644 index 00000000..0696bfd8 --- /dev/null +++ b/branches/Ndi/tools/flint/user/SOURCES @@ -0,0 +1,63 @@ +TARGETNAME=flint +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 +USE_NTDLL=1 + + +!if !defined(WINIBHOME) +WINIBHOME=..\..\.. +!endif + +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + + + +ZLIB=$(WINIBHOME)\tools\ext_libs\user\zlib-1.1.4 + + +SOURCES=flint.rc \ + flint.cpp + + +INCLUDES= $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; \ + $(WINIBHOME)\inc\iba; \ + $(WINIBHOME)\tools\mtcr\user; \ + $(ZLIB)\include; + +TARGETLIBS= \ +!if $(FREEBUILD) + $(CRT_LIB_PATH)\msvcrt.lib \ + $(SDK_LIB_PATH)\Ws2_32.lib\ + $(TARGETPATH)\*\mtcr.lib +!else + $(CRT_LIB_PATH)\msvcrtd.lib\ + $(SDK_LIB_PATH)\Ws2_32.lib\ + $(TARGETPATH)\*\mtcr.lib +!endif + +USER_C_FLAGS=$(USER_C_FLAGS) /Ze /EHsc + +# TODO:Should I define the __WIN__ manually +C_DEFINES=$(C_DEFINES) -D__WIN__ -DZEXPORT=__cdecl + +C_DEFINES=$(C_DEFINES) -DNO_ZLIB + + +!if $(FREEBUILD) + +!else +C_DEFINES=$(C_DEFINES) -DDEBUG +!endif + +# Version: +!if !defined(MFT_BLD_VER) +MFT_BLD_VER=Devel +!endif +C_DEFINES=$(C_DEFINES) "-DVERSION_ID=$(MFT_BLD_VER)" + +386_STDCALL=0 + +MSC_WARNING_LEVEL= /W3 + diff --git a/branches/Ndi/tools/flint/user/flint.cpp b/branches/Ndi/tools/flint/user/flint.cpp new file mode 100644 index 00000000..e9326632 --- /dev/null +++ b/branches/Ndi/tools/flint/user/flint.cpp @@ -0,0 +1,6451 @@ +/* + * + * flint.cpp - FLash INTerface + * + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * Version: $Id$ + * + */ + +//MTCR needs to be first since it needs to define all kind of +//macros which affect standard headers. + +#include "mtcr.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef NO_ZLIB +#include +#endif + +#include + +#ifndef __WIN__ + +// +// Linux +// + +#include +#include +#include +#include +#include + +#else // __WIN__ + +// +// Windows (Under DDK) +// + +#include +#include + +// Sleep adaptor +#define usleep(x) Sleep((x)/1000) +#define sleep(x) Sleep((x)*1000) + +#define vsnprintf _vsnprintf +#define strtoull _strtoui64 +#define isatty _isatty + +#define COMP_CDECL __cdecl + +#define __LITTLE_ENDIAN 1234 +#define __BIG_ENDIAN 4321 +#define __BYTE_ORDER __LITTLE_ENDIAN + + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define bswap_32(x) ntohl(x) +#else +#error windows is assumed to run a on little endian architecture +#endif + +#endif // __WIN__ + +#include +#include + +#ifndef DEV_MST_EXAMPLE1 + #define DEV_MST_EXAMPLE1 "/dev/mst/mt23108_pci_cr0" +#endif + +#ifndef DEV_MST_EXAMPLE2 + #define DEV_MST_EXAMPLE2 "/dev/mst/mt23108_pciconf0" +#endif + +#ifndef FLINT_NAME + #ifdef __GNUC__ + #define FLINT_NAME "%1$s" + #else + #define FLINT_NAME "./flint" + #endif +#endif + +namespace std {}; using namespace std; + +#ifdef VERSION_ID +//char* _versionID = VERSION_ID ; +#define __VFSTR(x) #x +#define _VFSTR(x) __VFSTR(x) +char* _versionID = _VFSTR( VERSION_ID ) ; +#else +char* _versionID = "VERSION_ID_HERE"; +#endif + +char* _svnID = "$Revision$"; + +#ifndef __be32_to_cpu + #define __be32_to_cpu(x) ntohl(x) + #ifndef bswap_32 + #define bswap_32(x) (htonl(x)) + #endif +#endif +#ifndef __cpu_to_be32 + #define __cpu_to_be32(x) htonl(x) +#endif + +#if __BYTE_ORDER == __LITTLE_ENDIAN + #ifndef __cpu_to_le32 + #define __cpu_to_le32(x) (x) + #endif + #ifndef __le32_to_cpu + #define __le32_to_cpu(x) (x) + #endif +#elif __BYTE_ORDER == __BIG_ENDIAN + #ifndef __cpu_to_le32 + #define __cpu_to_le32(x) bswap_32(x) + #endif + #ifndef __le32_to_cpu + #define __le32_to_cpu(x) bswap_32(x) + #endif +#else + #ifndef __cpu_to_le32 + #define __cpu_to_le32(x) bswap_32(__cpu_to_be32(x)) + #endif + #ifndef __le32_to_cpu + #define __le32_to_cpu(x) __be32_to_cpu(bswap_32(x)) + #endif +#endif + +#if __BYTE_ORDER == __LITTLE_ENDIAN + #ifndef __cpu_to_le16 + #define __cpu_to_le16(x) (x) + #endif + #ifndef __le16_to_cpu + #define __le16_to_cpu(x) (x) + #endif +#elif __BYTE_ORDER == __BIG_ENDIAN + #ifndef __cpu_to_le16 + #define __cpu_to_le16(x) bswap_16(x) + #endif + #ifndef __le16_to_cpu + #define __le16_to_cpu(x) bswap_16(x) + #endif +#else + #ifndef __cpu_to_le16 + #define __cpu_to_le16(x) bswap_16(__cpu_to_be16(x)) + #endif + #ifndef __le16_to_cpu + #define __le16_to_cpu(x) __be16_to_cpu(bswap_16(x)) + #endif +#endif + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// Miscellaneous global stuff // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// +typedef struct guid { + u_int32_t h; + u_int32_t l; +} guid_t; + +static inline void be_guid_to_cpu(guid_t* to, guid_t* from) { + to->h=__be32_to_cpu(from->h); + to->l=__be32_to_cpu(from->l); +} + +static inline void cpu_to_be_guid(guid_t* to, guid_t* from) { + to->h=__cpu_to_be32(from->h); + to->l=__cpu_to_be32(from->l); +} + +#define GUID_FORMAT "%8.8x%8.8x" +#define TOCPU1(s) s = __be32_to_cpu(s) +#define TOCPU(s) do { \ + u_int32_t *p = (u_int32_t *)(s); \ + for (u_int32_t ii=0; ii %08x\n", (u_int32_t)offs, (u_int32_t)(*(val))); \ + } while (0) + +#endif + +class ErrMsg +{ +public: + ErrMsg() : _err(0) { } + ~ErrMsg() { err_clear(); } + const char *err() const { return _err; } + void err_clear() { delete [] _err; _err = 0; } + +protected: + + char *vprint(const char *format, va_list args) + { + const int INIT_VAL = 1024; + int max_str, max_buf = INIT_VAL; + char *out_buf; + + while (1) + { + out_buf = new char[max_buf]; + max_str = max_buf - 1; + + if (vsnprintf(out_buf, max_str, format, args) < max_str) + return out_buf; + delete [] out_buf; + max_buf *= 2; + } + } + + + bool errmsg(const char *format, ...) +#ifdef __GNUC__ + __attribute__ ((format (printf, 2, 3))) +#endif + ; + +private: + + char *_err; +}; + + +bool ErrMsg::errmsg(const char *format, ...) { + va_list args; + + char* prev_err = _err; + + va_start(args, format); + _err = vprint(format, args); + va_end(args); + + delete[] prev_err; + + return false; +} + + +enum { + SIGNATURE=0x5a445a44 +}; +struct PS { + u_int32_t fi_addr; + u_int32_t fi_size; + u_int32_t signature; + u_int32_t fw_reserved[5]; + u_int32_t vsd[52]; + u_int32_t psid[4]; + u_int32_t branch_to; + u_int32_t crc016; +}; +enum { + H_FIRST = 1, + H_DDR = 1, + H_CNF = 2, + H_JMP = 3, + H_EMT = 4, + H_ROM = 5, + H_GUID = 6, + H_BOARD_ID = 7, + H_USER_DATA = 8, + H_FW_CONF = 9, + H_IMG_INFO = 10, + H_LAST = 10 +}; + +struct GPH { + u_int32_t type; + u_int32_t size; + u_int32_t param; + u_int32_t next; +}; + +const u_int32_t BOARD_ID_BSN_LEN=64; +const u_int32_t BOARD_ID_BID_LEN=32; +const u_int32_t BOARD_ID_PID=7; + +struct BOARD_ID { + char bsn[BOARD_ID_BSN_LEN]; + char bid[BOARD_ID_BID_LEN]; +}; + +int const VSD_LEN = 208; +int const PSID_LEN = 16; + +// +// TODO: Remove the below globals to class members. +// +bool _print_crc = false; +bool _silent = false; +bool _assume_yes = false; +bool _image_is_full; +bool _no_erase = false; +bool _no_burn = false; + +bool _unlock_bypass = false; + +bool _byte_write = false; + + +void report(const char *format, ...) +#ifdef __GNUC__ +__attribute__ ((format (printf, 1, 2))) +#endif +; +void report(const char *format, ...) +{ + va_list args; + + if (!_silent) { + va_start(args, format); + vprintf(format, args); + va_end(args); + } +} // report + +void report_erase(const char *format, ...) +{ + va_list args; + char buf[256]; + int i; + int len; + + if (_silent) + return; + + va_start(args, format); + vsnprintf(buf, sizeof buf, format, args); + va_end(args); + + len = strlen(buf); + for(i=0; i < len; ++i) + printf("\b"); +} // report_erase + +static u_int32_t log2up (u_int32_t in) { + u_int32_t i; + for (i = 0; i < 32; i++) { + if (in <= (u_int32_t)(1 << i)) + break; + } + + return i; +} + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// CRC16 CALCULATION // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// +class Crc16 { +public: + Crc16(bool d = false) : _debug(d) { clear();} + u_int16_t get() { return _crc;} + void clear() { _crc = 0xffff;} + void operator<<(u_int32_t val) { add(val);} + void add(u_int32_t val); + void finish(); +private: + u_int16_t _crc; + bool _debug; +}; + +//////////////////////////////////////////////////////////////////////// +void Crc16::add(u_int32_t o) +{ + if (_debug) + printf("Crc16::add(%08x)\n", o); + for (int i=0; i<32; i++) { + if (_crc & 0x8000) + _crc = (u_int16_t) ((((_crc<<1) | (o>>31)) ^ 0x100b) & 0xffff); + else + _crc= (u_int16_t) (((_crc<<1) | (o>>31)) & 0xffff); + o = (o<<1) & 0xffffffff; + } +} // Crc16::add + + +//////////////////////////////////////////////////////////////////////// +void Crc16::finish() +{ + for (int i=0; i<16; i++) { + if (_crc & 0x8000) + _crc=((_crc<<1) ^ 0x100b) & 0xffff; + else + _crc=(_crc<<1) & 0xffff; + } + + // Revert 16 low bits + _crc = _crc ^ 0xffff; + +} // Crc16::finish + + +////////////////////////////////////////////////////////////////////// +// +// class u_int32_ba (bit access): +// A uint wrapper which allows easy access to bit/range of bits. +// +// Usage example: +// u_int32_ba a; +// Read_Word( Table.reg ,&a); +// int upper_byte = a.range(31,24); +// if (a[15]) +// cout << " Bit 15 is 1 \n"; +// else +// cout << " Bit 15 is 0 \n"; +// +// u_int32_ba b; +// b.range(15,12) = 0xa; +// b[31] = 1; // b == 0x8000a000 +// Write_Word( Table.reg ,b); +// +////////////////////////////////////////////////////////////////////// + + +class u_int32_ba { +public: + u_int32_ba(u_int32_t i = 0) : + _bits(i), + _rbits(_bits), + _sptr(0), + _eptr(31) {} + + u_int32_ba operator[](u_int32_t idx) {return range((u_int8_t)idx,(u_int8_t)idx);} + u_int32_ba& operator= (u_int32_t i) {_rbits = ((i << _sptr) & mask()) | (_rbits & ~mask()); return *this;} + u_int32_t* operator& () {return &_bits;} + operator u_int32_t () {return((mask() & _rbits) >> _sptr);} + + u_int32_ba range (u_int8_t eptr, + u_int8_t sptr) {return u_int32_ba(*this,eptr,sptr);} + +private: + u_int32_ba(u_int32_ba& other, u_int8_t eptr, u_int8_t sptr) : + _bits(other._bits), + _rbits(other._bits), + _sptr(sptr), + _eptr(eptr) {} + + u_int32_t mask () { + u_int32_t s_msk = (u_int32_t)-1; // start mask + u_int32_t e_msk = (u_int32_t)-1; // end mask + + s_msk = (s_msk << _sptr); + e_msk = (_eptr >= (sizeof(_bits)*8-1)) ? e_msk : ~(e_msk << (_eptr+1)); + + return(s_msk & e_msk); + }; + + u_int32_t _bits; + u_int32_t& _rbits; + + u_int8_t _sptr; + u_int8_t _eptr; +}; + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// FLASH ACCESS // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// + +// Common base class for Flash and for FImage +class FBase : public ErrMsg{ +public: + FBase() {} + virtual ~FBase() {} + + virtual bool open(const char *, bool) {return false;} + virtual void close() = 0; + virtual bool read(u_int32_t addr, u_int32_t *data) = 0; + virtual bool read(u_int32_t addr, void *data, int len, + bool verbose=false) = 0; + + virtual u_int32_t get_sector_size() = 0; + virtual u_int32_t get_size() = 0; + + enum { + MAX_FLASH = 4*1048576 + }; +}; + +// Flash image (RO) +class FImage : public FBase { +public: + FImage() : _buf(0) {} + virtual ~FImage() { close();} + + u_int32_t *getBuf() { return _buf;} + u_int32_t getBufLength() { return _len;} + virtual bool open(const char *fname, bool read_only = false); + virtual void close(); + virtual bool read(u_int32_t addr, u_int32_t *data); + virtual bool read(u_int32_t addr, void *data, int len, bool verbose=false); + + virtual u_int32_t get_sector_size(); + virtual u_int32_t get_size() { return getBufLength();} + +private: + u_int32_t *_buf; + u_int32_t _len; +}; + +// Flash access (R/W) + +// +// Flash classes heirarchy: +// +// +// Flash { CmdSet (abstract) } +// | +// | +// +----> ParallelFlash { CmdSetAmd, CmdSetIntel } +// | +// | +// |----> SerialFlash +// | +// +--------> SpiFlash { CmdSetStSpi } +// | +// +- - - - > LpcFlash (currently not uset - not implemented) +// +// +// Flash Interface: +// - open +// - close +// - read +// - detect type (and allocate CmdSet accordingly) +// +// +// Flash Class HAS A command set. +// +// CmdSet Interface: +// - write +// - erase_sector +// +// Q: Why is diferentiation needed for both flash type and CmdSet ? +// A: Read operation is done in the same way for all flash devices of +// the same type (serial or parallel). This is a basic requirement +// from the flash, since the HCA HW reads from flash at boot, and +// the way it reads can not be changed. +// However, Write and Erase operations protocol varies between flash +// vendors. +// The term 'command set' is taken from the Common Flash Interface (CFI) +// specification. +// +// +// Flash Allocation flow: +// +// 1. Main checks device type, and allocates Flash sub class accordingly. +// 2. In Flash.open() , get_cmd_set() is called. this function checks flash +// type, gets flash attributes (size, sector size) and allocates CmdSet. +// +// + + + +// +// +// +class Flash : public FBase { +public: + Flash(u_int32_t log2_bank_size) : + _mf(0), + _cmd_set(NULL), + _curr_bank(0xffffffff), + _log2_bank_size(log2_bank_size) + {} + + virtual ~Flash() { close();}; + + // FBase Interface + + virtual bool open (const char *device, + bool force_lock = false, + bool read_only = false); + + virtual void close (); + + virtual bool read (u_int32_t addr, + u_int32_t *data) = 0; + + virtual bool read (u_int32_t addr, + void* data, + int len, + bool verbose = false); + // + // Flash Interface + // + + virtual u_int32_t + get_sector_size () { return _get_sector_size(); } + + virtual u_int32_t + get_size () {return _cfi_data.device_size ? _cfi_data.device_size : (u_int32_t)MAX_FLASH;} + + + virtual bool wait_ready (const char* msg = NULL) = 0; + + + // Write and Erase functions are performed by the Command Set + + virtual bool erase_sector (u_int32_t addr) {if (!set_bank(addr)) return false;return _cmd_set->erase_sector(addr);} + + virtual bool write (u_int32_t addr, + void* data, + int cnt, + bool noerase = false, + bool noverify = false); + + virtual bool write (u_int32_t addr, + u_int32_t data); + + bool print_cfi_info(); + + + enum { + TRANS = 4096 + }; + +#ifndef _MSC_VER +protected: +#endif + + // FLASH constants + enum FlashConstant { + FLASH_CMD_CNT = 5000, // Number of reads till flash cmd is zeroed + ERASE_DELAY = 200000, // Delay between reads when wating for sector erase + ERASE_CNT = 80, // Maximal number of reads when wating for sector erase + READ_CNT_FAST = 5000, // Number of fast reads after write byte + READ_CNT_SLOW = 50, // Number of slow reads after write byte + READ_DELAY = 100000, // Delay between slow reads after write byte + WR_REPORT_FAST = 256, // Report frequency when write (fast interfaces) + WR_REPORT_SLOW = 4, // Report frequency when write (slow interfaces) + RD_REPORT_FAST = 4096, // Report frequency when read (fast interfaces) + RD_REPORT_SLOW = 64, // Report frequency when read (slow interfaces) + GPIO_SEM_TRIES = 10240, // Number of tries to obtain a GPIO sem. + MAX_WRITE_BUFFER_SIZE = 32// Max buffer size for buffer write devices + }; + + + enum CrAddres { + GPIO_DIR_L = 0xf008c, + GPIO_POL_L = 0xf0094, + GPIO_MOD_L = 0xf009c, + GPIO_DAT_L = 0xf0084, + GPIO_DATACLEAR_L = 0xf00d4, + GPIO_DATASET_L = 0xf00dc, + + SEMAP63 = 0xf03fc + }; + + virtual bool lock (bool retry=true); + virtual bool unlock (); + + virtual bool init_gpios () = 0; + + virtual bool get_cmd_set () = 0; + + bool set_bank (u_int32_t addr); + + virtual bool write_internal(u_int32_t addr, + u_int8_t data) = 0; + + bool write_internal(u_int32_t addr, + u_int8_t* data, + u_int32_t cnt); + + class CmdSet { + public: + CmdSet () : _curr_sector(0xffffffff) {} + virtual ~CmdSet () {} + + virtual bool write (u_int32_t addr, + void* data, + int cnt, + bool noerase = false, + bool noverify = false) = 0; + + virtual bool erase_sector (u_int32_t addr) = 0; + + virtual bool reset () = 0; + + protected: + + u_int32_t _curr_sector; + }; + + + // + // This structure holds all CFI query information as defined + // in the JEDEC standard. All information up to + // primary_extended_query is standard among all amnufactures + // with CFI enabled devices. + // + + struct cfi_query { + cfi_query() { + memset(this, 0, sizeof(*this)); + } + u_int8_t manuf_id; + u_int8_t device_id; + + char query_string[4]; // Should be 'QRY' + u_int16_t oem_command_set; // Command set + u_int16_t primary_table_address; // Addy of entended table + u_int16_t alt_command_set; // Alt table + u_int16_t alt_table_address; // Alt table addy + float vcc_min; // Vcc minimum + float vcc_max; // Vcc maximum + float vpp_min; // Vpp minimum, if supported + float vpp_max; // Vpp maximum, if supported + int timeout_single_write; // Time of single write + int timeout_buffer_write; // Time of buffer write + int timeout_block_erase; // Time of sector erase + int timeout_chip_erase; // Time of chip erase + int max_timeout_single_write; // Max time of single write + int max_timeout_buffer_write; // Max time of buffer write + int max_timeout_block_erase; // Max time of sector erase + int max_timeout_chip_erase; // Max time of chip erase + long device_size; // Device size in bytes + u_int16_t interface_description; // Interface description + int max_multi_byte_write; // Time of multi-byte write + int num_erase_blocks; // Number of sector defs. + struct { + unsigned long sector_size; // Byte size of sector + int num_sectors; // Num sectors of this size + u_int32_t sector_mask; // Sector mask + } erase_block[8]; // Max of 256, but 8 is good + + // AMD SPECIFIC + char primary_extended_query[4]; // Vendor specific info here + u_int16_t major_version; // Major code version + u_int16_t minor_version; // Minor code version + u_int8_t sensitive_unlock; // Is byte sensitive unlock? + u_int8_t erase_suspend; // Capable of erase suspend? + u_int8_t sector_protect; // Can Sector protect? + u_int8_t sector_temp_unprotect; // Can we temporarily unprotect? + u_int8_t protect_scheme; // Scheme of unprotection + u_int8_t is_simultaneous; // Is a smulataneous part? + u_int8_t is_burst; // Is a burst mode part? + u_int8_t is_page; // Is a page mode part? + }; + + bool print_cfi_info ( const cfi_query *q ); + + + virtual bool set_bank_int (u_int32_t bank) = 0; + u_int32_t bank_mask () {return((1 << _log2_bank_size) -1 );} + + mfile *_mf; + + cfi_query _cfi_data; + CmdSet* _cmd_set; + + u_int32_t _curr_bank; + u_int32_t _log2_bank_size; + + bool _locked; + + + /* Work around for MX flashes reporting weird erase sector size. */ + /* It reports two sector sizes, actually works as 1. */ + bool _mx_flash_workaround() { + return (_cfi_data.num_erase_blocks == 2 && + //_cfi_data.manuf_id == 0xff && _cfi_data.device_id == 0xff && + _cfi_data.erase_block[0].sector_size == 0x2000 && + _cfi_data.erase_block[0].sector_mask == 0xffffe000 && + _cfi_data.erase_block[0].num_sectors == 8 && + _cfi_data.erase_block[1].sector_size == 0x10000 && + _cfi_data.erase_block[1].sector_mask == 0xffff0000 && + _cfi_data.erase_block[1].num_sectors == 63); + } + u_int32_t + _get_sector_mask () + { + return _mx_flash_workaround()? + _cfi_data.erase_block[1].sector_mask : + _cfi_data.erase_block[0].sector_mask; + } + u_int32_t + _get_sector_size () + { + return _mx_flash_workaround()? + _cfi_data.erase_block[1].sector_size : + _cfi_data.erase_block[0].sector_size; + } +}; + + +class ParallelFlash : public Flash { +public: + ParallelFlash(); + ~ParallelFlash() {close();} + + enum { + CS_INTEL = 0x01, + CS_AMD = 0x02, + }; + + // FBase Interface + +// virtual bool open (const char *device, +// bool read_only = false); + + virtual void close (); + + + virtual bool read (u_int32_t addr, + u_int32_t *data); + + virtual bool read (u_int32_t addr, + void* data, + int len, + bool verbose=false) {return Flash::read(addr, data, len, verbose);} + + virtual bool wait_ready (const char* msg = NULL); + + static void set_byte_mode (bool mode) {CmdSetAmd::set_byte_mode(mode);} + + +#ifndef _MSC_VER +protected: +#endif + + virtual bool init_gpios (); + + virtual bool get_cmd_set (); + + virtual bool set_bank_int (u_int32_t bank); + + virtual bool write_internal(u_int32_t addr, + u_int8_t data); + + enum FlashCmds { + IDLE = 0, + READ4 = (1<<29), + WRITE1 = (2<<29) + }; + + enum { + BANK_SHIFT = 19, + BANK_MASK = 0xfff80000 + }; + + enum { + FLASH = 0xf01a4, + ADDR_MSK = 0x7ffffUL, + CMD_MASK = 0xe0000000UL + }; + + enum { + LEN_MSK = 0x3ff, + LEN_SHIFT = 19 + }; + + enum { + CPUMODE_MSK = 0xc0000000UL, + CPUMODE_SHIFT = 30 + }; + + enum CrAddres { + CPUMODE = 0xf0150 + }; + + + // + // AMD's Am29LV033C command set + // + class CmdSetAmd : public Flash::CmdSet { + public: + CmdSetAmd (ParallelFlash& f ) : _f(f) {} + + virtual bool write (u_int32_t addr, + void* data, + int cnt, + bool noerase = false, + bool noverify = false); + + virtual bool erase_sector (u_int32_t addr); + virtual bool reset (); + + static void set_byte_mode (bool mode) {_byte_mode = mode;} + + protected: + ParallelFlash& _f; + + static bool _byte_mode; + + bool unlock_bypass (bool unlock); + }; + + + // + // Intel's 28F320J3 x8 command set, using buffer writes + // + class CmdSetIntel : public Flash::CmdSet { + public: + enum FlashCommand { + FC_ReadID = 0x90, + FC_Read = 0xFF, + FC_Erase = 0x20, + FC_Confirm = 0xD0, + FC_Clear = 0x50, + FC_Write = 0x40, + FC_LoadPB = 0xE0, + FC_PBWrite = 0x0C, + FC_Status = 0x70, + FC_Suspend = 0xB0, + FC_Resume = 0xD0, + FC_ReadESR = 0x71, + FC_QueryCFI = 0x98, + FC_SCSErase = 0x28, + FC_SCSWrite = 0xE8 + }; + + enum FlashStatus { + FS_Ready = 0x80, + FS_Suspended = 0x40, + FS_Error = 0x3E, + FS_BlockError = 0x3F + }; + + CmdSetIntel (ParallelFlash& f ) : _f(f) {} + + virtual bool write (u_int32_t addr, + void* data, + int cnt, + bool noerase = false, + bool noverify = false); + + virtual bool erase_sector (u_int32_t addr); + virtual bool reset (); + + protected: + ParallelFlash& _f; + }; + + // + // Intel's 28F320J3 x8 command set, using byte write (For debug only). + // + class CmdSetIntelWriteByte : public CmdSetIntel { + public: + CmdSetIntelWriteByte (ParallelFlash& f) : CmdSetIntel(f) {} + + virtual bool write (u_int32_t addr, + void* data, + int cnt, + bool noerase = false, + bool noverify = false); + + }; + + bool get_cfi (cfi_query *query); + + u_int16_t extract_word ( const u_int8_t* pb, int data_width) { + assert (data_width == 1 || data_width == 2); + u_int16_t ret = *pb | ((*(pb + data_width)) << 8); + return ret; + } + + u_int32_t USE_SCR; + bool _use_scr; + + // Place holders to keep GPIO data for restoring after closing flash. + u_int32_t _dir; + u_int32_t _pol; + u_int32_t _mod; + u_int32_t _data; + +}; + + +class SerialFlash : public Flash { +public: + SerialFlash() : Flash(20) {} + + enum CrAddres { + FLASH_GW = 0xf0400, + FLASH_ADDR = 0xf0404, + FLASH_DATA = 0xf0408, + FLASH_CS = 0xf0418, + + GPIO_LOCK = 0xf00ec + }; + + enum BitOffset { + READ_OP = 0, + ADDR_INCR = 1, + + LPC_STOP = 3, + SPI_NO_DATA = 4, + SPI_NO_ADDR = 5, + SPI_SPECIAL = 6, + + MSIZE_S = 8, + MSIZE_E = 10, + + STATUS_S = 26, + STATUS_E = 29, + + BUSY = 30, + + SPI_ADDR_S = 0, + SPI_ADDR_E = 23, + + SPI_CMD_S = 24, + SPI_CMD_E = 31, + + SPI_GPIO_S = 5, + SPI_GPIO_E = 7 + }; + + +protected: + + virtual bool write_internal(u_int32_t addr, + u_int8_t data); + +}; + +bool SerialFlash::write_internal (u_int32_t addr, + u_int8_t data) {addr = 0; data = 0; return true;} + + +class SpiFlash : public SerialFlash { +public: + SpiFlash() {} + ~SpiFlash() {close();} + + + // FBase Interface + + virtual void close (); + + virtual bool read (u_int32_t addr, + u_int32_t *data); + + virtual bool read (u_int32_t addr, + void* data, + int len, + bool verbose=false) {return Flash::read(addr, data, len, verbose);} + + virtual bool wait_ready (const char* msg); + +protected: + + virtual bool init_gpios (); + + virtual bool get_cmd_set (); + + virtual bool set_bank_int (u_int32_t bank); + + + virtual bool read_id (u_int8_t *data, u_int8_t cmd); + + + // + // ST's M25P80 command set + // + class CmdSetStSpi : public Flash::CmdSet { + public: + CmdSetStSpi (SpiFlash& f ) : _f(f) , _mf(f._mf){} + + virtual bool write (u_int32_t addr, + void* data, + int cnt, + bool noerase = false, + bool noverify = false); + + virtual bool erase_sector (u_int32_t addr); + + virtual bool reset () {return true;} + + enum FlashCommand { + FC_SE = 0xD8, + FC_PP = 0x02, + FC_RDSR = 0x05, + FC_WREN = 0x06, + FC_READ = 0x03, + FC_RDID = 0xF9, + FC_RES = 0xAB + }; + + + protected: + bool write_block (u_int32_t block_addr, + void* block_data, + u_int32_t block_size); + + bool wait_wip (u_int32_t delay, + u_int32_t retrys, + u_int32_t fast_retry = 0); + + bool write_enable (); + + + SpiFlash& _f; + mfile* _mf; + + }; + + bool fill_cfi (cfi_query *query); + +}; + + +//////////////////////////////////////////////////////////////////////// +// +// FImage Class Implementation +// +//////////////////////////////////////////////////////////////////////// + +bool FImage::open(const char *fname, bool read_only) +{ + int fsize; + int r_cnt; + FILE *fh; + + read_only = true; // FImage can be opened only for read + + fh = fopen(fname, "rb"); + + if (!fh) { + return errmsg("Can't open file \"%s\" - %s\n", fname, strerror(errno)); + } + + // Get the file size: + if (fseek(fh, 0, SEEK_END) < 0) { + return errmsg("Can't get file size for \"%s\" - %s\n", fname, strerror(errno)); + } + + fsize = ftell(fh); + if (fsize < 0) { + return errmsg("Can't get file size for \"%s\" - %s\n", fname, strerror(errno)); + } + rewind(fh); + + //printf("-D- %s size is %d\n", fname, fsize); + if (fsize & 0x3) { + return errmsg("Image size should be 4-bytes aligned. Make sure file %s is in the right format (binary image)", + fname); + } + + _buf = new u_int32_t[fsize/4]; + if ((r_cnt = fread(_buf, 1, fsize, fh)) != fsize) { + if (r_cnt < 0) + return errmsg("Read error on file \"%s\" - %s\n",fname, strerror(errno)); + else + return errmsg("Read error on file \"%s\" - read only %d bytes (from %ld)\n", + fname, r_cnt, (unsigned long)fsize); + } + + _len = fsize; + fclose(fh); + + return true; +} // FImage::open + +//////////////////////////////////////////////////////////////////////// +void FImage::close() +{ + delete [] _buf; + _buf = 0; +} // FImage::close + +//////////////////////////////////////////////////////////////////////// +bool FImage::read(u_int32_t addr, u_int32_t *data) +{ + return read(addr, data, 4); +} // FImage::read + +//////////////////////////////////////////////////////////////////////// +bool FImage::read(u_int32_t addr, void *data, int len, bool) +{ + if (addr & 0x3) { + return errmsg("Address should be 4-bytes aligned."); + } + if (len & 0x3) { + return errmsg("Length should be 4-bytes aligned."); + } + if (!_buf) { + return errmsg("read() when not opened"); + } + + if (addr + len > _len) { + return errmsg("Reading 0x%x bytes from address 0x%x is out of image limits (0x%x bytes)", + len, addr, _len); + } + + u_int32_t *p = (u_int32_t *)data; + for (int i=0; ireset(); + } + + return true; +} // Flash::open + +//////////////////////////////////////////////////////////////////////// +void Flash::close() +{ + if (!_mf) + return; + + delete _cmd_set; + + // ??? Check if unlock should be before delete _cmd_set + if (_locked) { + unlock(); + } + + mclose(_mf); + _mf = 0; + +} // Flash::close + + +bool Flash::lock(bool retry) { + + retry = false; // compiler - REMOVE ??? + + // Obtain GPIO Semaphore + u_int32_t cnt=0; + u_int32_t word; + do { + if (++cnt > GPIO_SEM_TRIES) { + return errmsg("Can not obtain Flash semaphore (63). You can run \"flint -clear_semaphore -d \" to force semaphore unlock. See help for details."); + } + MREAD4(SEMAP63, &word); + } // while (word); + // HACK! : On win 64, the semaphore reg, when read through pci (mmap), does not appear + // as 0x00000000 when cleared, but as 0xffffff00 , or 0xffff00ff . Could not find the + // reason for that. + // The hack is to treat anything which is NOT 0xffffffff or 0x1 as a free semaphore. + // Though ugly, this is safe. + while (word == 0xffffffff || word == 0x00000001); + + return true; +} + + +bool Flash::unlock() { + + // Free GPIO Semaphore + mwrite4(_mf, SEMAP63, 0); + return true; +} + + +//////////////////////////////////////////////////////////////////////// +bool Flash::read(u_int32_t addr, void *data, int len, bool verbose) +{ + u_int32_t perc = 0xffffffff; + + if (addr & 0x3) { + return errmsg("Address should be 4-bytes aligned."); + } + if (len & 0x3) { + return errmsg("Length should be 4-bytes aligned."); + } + + // Report + if (verbose) { + printf("000%%"); + fflush(stdout); + } + + u_int32_t *p = (u_int32_t *)data; + for (int i=0; i get_size()) { + return errmsg( + "Trying to write %d bytes to address 0x%x, which exceeds flash size (0x%x).", + cnt, + addr, + get_size()); + } + + return _cmd_set->write(addr, data, cnt, noerase, noverify); +} + + +//////////////////////////////////////////////////////////////////////// +bool Flash::write(u_int32_t addr, u_int32_t data) +{ + if (!_mf) { + return errmsg("Not opened"); + } + if (addr & 0x3) { + return errmsg("Address should be 4-bytes aligned."); + } + + // Here, we use non-virtual variants for efficiency + // TODO: Rewrite using get_sector_size() only + u_int32_t word; + u_int32_t sector_mask = _get_sector_mask(); + u_int32_t sector_size = _get_sector_size(); + + u_int32_t sector = addr & sector_mask; + u_int32_t word_in_sector = (addr & ~sector_mask)/sizeof(u_int32_t); + + if (!read(addr, &word)) + return false; + if (word == data) + return true; // already there + + vector buff(sector_size/sizeof(u_int32_t)); + if (!read(sector, &buff[0] , sector_size)) + return false; + buff[word_in_sector] = data; + return write(sector, &buff[0], sector_size); +} // Flash::write + +//////////////////////////////////////////////////////////////////////// + +inline +bool Flash::set_bank (u_int32_t addr) { + u_int32_t bank = (addr >> _log2_bank_size); + + if (bank != _curr_bank ) { + if (!set_bank_int(bank)) + return false; + + _curr_bank = bank; + } + + return true; +} + + +// A smaple function which loops through and prints out the data +// contained in the CFI query structure. Should ONLY be called +// after init_flash + +bool Flash::print_cfi_info() +{ + + const cfi_query *q = &_cfi_data; + + int i=0; + printf("\n"); + + char* head_fmt = "%-50s "; + + printf(head_fmt, "CFI Query String Read:"); + printf("[%s]\n", (char *) q->query_string); + + printf(head_fmt, "Primary table address at offset:"); + printf("[0x%2x] hex.\n", q->primary_table_address); + + printf(head_fmt, "Manufacturer ID, Device ID:"); + printf("[0x%02x,0x%02x] hex.\n", q->manuf_id, q->device_id ); + + printf(head_fmt, "Command set:"); + printf("[0x%04x] hex.\n", q->oem_command_set); + + printf(head_fmt, "Write buffer:"); + printf("[%d] bytes\n", q->max_multi_byte_write ); + + printf("\n----Voltage and Signal Timing Parameters-------------------\n"); + printf(head_fmt, "Vcc operating voltage:"); + printf("[%2.3f] to [%2.3f] Volts\n", q->vcc_min, q->vcc_max); + + + printf(head_fmt, "Vpp operating voltage:"); + if (q->vpp_min == 0.0) + printf("Device does not support Vpp voltage.\n"); + else { + printf("[%2.3f] to [%2.3f] Volts\n", q->vpp_min, q->vpp_max); + } + + printf(head_fmt, "Typical timeout for single write (micro-sec):"); + printf("[%8i]us\n", q->timeout_single_write); + + + + printf(head_fmt,"Typical timeout for single write (micro-sec):"); + if (q->timeout_buffer_write == 0x00) + printf("Buffer writes not supported in this device.\n"); + else { + printf("[%8i]us\n" ,q->timeout_buffer_write); + } + + printf(head_fmt, "Typical timeout for block erase (milli-sec):", q->timeout_block_erase); + printf("[%8i]ms\n", q->timeout_block_erase); + + printf(head_fmt, "Typical timeout for chip erase (milli-sec):"); + if (q->timeout_chip_erase == 0x00) + printf("Not supported in this device.\n"); + else { + printf("[%8i]ms\n", q->timeout_single_write); + } + + printf(head_fmt, "Maximum timeout for single write (micro-sec) :"); + printf("[%8i]us\n", q->max_timeout_single_write); + + printf(head_fmt, "Maximum timeout for buffer write (micro-sec) :"); + if (q->max_timeout_buffer_write == 0x00) + printf("Not supported in this device.\n"); + else { + printf("[%8i]us\n", q->max_timeout_buffer_write); + } + + printf(head_fmt, "Maximum timeout for block erase (milli-sec) :"); + printf("[%8i]ms\n", q->max_timeout_block_erase); + + printf(head_fmt, "Maximum timeout for chip erase (milli-sec) :"); + if (q->max_timeout_chip_erase == 0x00) + printf("Not supported in this device.\n"); + else { + printf("[%8i]ms\n", q->max_timeout_chip_erase); + } + + + + + printf("\n----Sector Organization Parameters-------------------\n\n"); + + printf(head_fmt, "Device size:"); + printf("[%8li] bytes, or [%2i] Mbit\n", + q->device_size, + (int) (q->device_size/((long)0x20000))); + + printf(head_fmt, "Number of erase block regions:"); + printf("%d\n",q->num_erase_blocks); + + for (i=0; inum_erase_blocks; i++) { + printf(" Size:[%8lx] bytes, Mask [%08x], [Number:[%4i]\n", + q->erase_block[i].sector_size, + q->erase_block[i].sector_mask, + q->erase_block[i].num_sectors); + } + + printf("\n----Primary Vendor-Specific Extended Parameters----\n\n"); + + printf(head_fmt, "CFI Extended String Read:"); + printf("[%s]\n", (char *) q->primary_extended_query); + + printf(head_fmt, "Major version:", q->major_version); + printf("[%3x]\n", q->major_version); + + printf(head_fmt, "Minor version:", q->minor_version); + printf("[%3x]\n", q->minor_version); + + printf(head_fmt, "Sensitive Unlock:", q->sensitive_unlock); + printf("[%3x]\n", q->sensitive_unlock); + + printf(head_fmt, "Erase Suspend:", q->erase_suspend); + printf("[%3x]\n", q->erase_suspend); + + printf(head_fmt, "Sector Protect:", q->sector_protect); + printf("[%3x]\n", q->sector_protect); + + printf(head_fmt, "Temporary Sector Unprotect:", q->sector_temp_unprotect); + printf("[%3x]\n", q->sector_temp_unprotect); + + printf(head_fmt, "Protection Scheme:", q->protect_scheme); + printf("[%3x]\n", q->protect_scheme); + + printf(head_fmt, "Is simultaneous? :", q->is_simultaneous); + printf("[%3x]\n", q->is_simultaneous); + + printf(head_fmt, "Is Burst capable? :", q->is_burst); + printf("[%3x]\n", q->is_burst); + + printf(head_fmt, "Is Page capable? :", q->is_page); + printf("[%3x]\n", q->is_page); + + printf("Done.\n\n"); + + return true; +} // Flash::print_cfi_info() + + + +//////////////////////////////////////////////////////////////////////// +// +// ParallelFlash Class Implementation +// +//////////////////////////////////////////////////////////////////////// + +ParallelFlash::ParallelFlash() : Flash(19) +{ + char *use_scr_p = getenv("FLINT_USE_SCRATCHPAD"); + + if (use_scr_p) { + char *endp; + + USE_SCR = strtoul(use_scr_p, &endp, 0); + if (*endp) { + printf("Invalid FLINT_USE_SCRATCHPAD syntax (%s). Must be integer.", + use_scr_p); + _use_scr = false; + } else { + printf("Burning via SCRATCHPAD interface by addr 0x%x\n", USE_SCR); + _use_scr = true; + } + } else + _use_scr = false; +} // Flash::Flash + + + +//////////////////////////////////////////////////////////////////////// +bool ParallelFlash::init_gpios() { + + // Save old values + MREAD4(GPIO_DIR_L, &_dir); + MREAD4(GPIO_POL_L, &_pol); + MREAD4(GPIO_MOD_L, &_mod); + MREAD4(GPIO_DAT_L, &_data); + + // Set Direction=1, Polarity=0, Mode=0 for 3 GPIO lower bits + u_int32_t dir = _dir | 0x70; + u_int32_t pol = _pol & ~0x70; + u_int32_t mod = _mod & ~0x70; + MWRITE4(GPIO_DIR_L, dir); + MWRITE4(GPIO_POL_L, pol); + MWRITE4(GPIO_MOD_L, mod); + + // Set CPUMODE + u_int32_t word; + MREAD4(CPUMODE, &word); + word &= ~CPUMODE_MSK; + word |= 1 << CPUMODE_SHIFT; + MWRITE4(CPUMODE, word); + + return true; +} + +void ParallelFlash::close() { + + // Restore origin values + mwrite4(_mf, GPIO_DIR_L, _dir); + mwrite4(_mf, GPIO_POL_L, _pol); + mwrite4(_mf, GPIO_MOD_L, _mod); + mwrite4(_mf, GPIO_DAT_L, _data); + + _curr_bank = 0xffffffff; + + Flash::close(); +} + + +bool ParallelFlash::get_cmd_set() { + + // + // CFI Query + // + if (!get_cfi(&_cfi_data)) + return false; + + // + // Some sanity checks: + // + + if (_cfi_data.max_multi_byte_write > MAX_WRITE_BUFFER_SIZE) { + return errmsg("Device write buffer(%d) is larger than the supported size(%d).", + _cfi_data.max_multi_byte_write, MAX_WRITE_BUFFER_SIZE); + } + + if (!_mx_flash_workaround() && _cfi_data.num_erase_blocks > 1) { + return errmsg("Device has more than one sector size - not supported by this tool"); + } + + // + // Sellect CmdSet + // + + switch (_cfi_data.oem_command_set) { + case CS_INTEL: + if (_byte_write || _cfi_data.max_multi_byte_write == 0) + _cmd_set = new CmdSetIntelWriteByte(*this); + else + _cmd_set = new CmdSetIntel(*this); + break; + case CS_AMD: + _cmd_set = new CmdSetAmd(*this); + break; + + default: + return errmsg("Unknown CFI command set (%d)",_cfi_data.oem_command_set) ; + } + + return true; +} + + + +//////////////////////////////////////////////////////////////////////// +bool ParallelFlash::set_bank_int(u_int32_t bank) +{ + if (!_mf) { + return errmsg("Not opened"); + } + + //printf("\n*** Flash::set_bank(0x%lx) : 0x%lx\n", bank, (bank >> 19) & 0x07); + MWRITE4(GPIO_DATACLEAR_L, 0x70); + MWRITE4(GPIO_DATASET_L, (bank << 4) & 0x70); + + return true; +} // Flash::ParallelFlashGw::set_bank_int + + +//////////////////////////////////////////////////////////////////////// +bool ParallelFlash::wait_ready(const char* msg) { + u_int32_t cnt = 0; + u_int32_t cmd; + do { + // Timeout checks + if (++cnt > FLASH_CMD_CNT) { + return errmsg("Flash gateway timeout: %s", msg); + } + + MREAD4(FLASH, &cmd); + + } while (cmd & CMD_MASK); + + return true; +} + +//////////////////////////////////////////////////////////////////////// +bool ParallelFlash::read(u_int32_t addr, u_int32_t *data) +{ + if (!_mf) { + return errmsg("Not opened"); + } + + u_int32_t cmd; + if (addr & 0x3) { + return errmsg("Address should be 4-bytes aligned."); + } + + if (!set_bank(addr)) + return false; + + + MWRITE4(FLASH, READ4 | (addr & ADDR_MSK)); + + if (!wait_ready("Read")) + return false; + + MREAD4(FLASH+4, &cmd); + cmd = __cpu_to_be32(cmd); + memcpy(data, &cmd, sizeof(u_int32_t)); + + return true; +} // Flash::read + +//////////////////////////////////////////////////////////////////////// +bool ParallelFlash::write_internal(u_int32_t addr, u_int8_t data) +{ + MWRITE4(FLASH+4, data << 24); + MWRITE4(FLASH, WRITE1 | (addr & ADDR_MSK)); + + if (!wait_ready("Write")) + return false; + + return true; +} // Flash::write_internal + + +//////////////////////////////////////////////////////////////////////// +bool ParallelFlash::CmdSetAmd::unlock_bypass(bool unlock) { + + if (unlock) { + // unlock bypass + + if (!_f.write_internal(0x555, 0xaa)) + return false; + if (!_f.write_internal(0x2aa, 0x55)) + return false; + if (!_f.write_internal(0x555, 0x20)) + return false; + } else { + // unlock reset + if (!_f.write_internal(0x555, 0x90)) + return false; + if (!_f.write_internal(0x2aa, 0x00)) + return false; + } + return true; +} + + +//////////////////////////////////////////////////////////////////////// + +bool ParallelFlash::CmdSetAmd::_byte_mode = false; + +bool ParallelFlash::CmdSetAmd::write(u_int32_t addr, void *data, int cnt, + bool noerase, bool noverify) +{ + if (!_f._mf) { + return _f.errmsg("Not opened"); + } + if (addr & 0x3) { + return _f.errmsg("Address should be 4-bytes aligned."); + } + + char *p = (char *)data; + + if (_unlock_bypass) { + if (!unlock_bypass(true)) { + return _f.errmsg("Failed unlock bypass"); + } + } + + for (int i=0; i FLASH_CMD_CNT) { + return _f.errmsg("Use scratchpad: CMD doesn't become zero"); + } + if (mread4(_f._mf, _f.USE_SCR , &cmd) != 4) return false; + + } while (cmd & CMD_MASK); + i += 3; + addr += 3; + } else if ((u_int8_t)(*p) != 0xff) { + + if (_byte_mode) { + + if (!_f.write_internal(0xaaa, 0xaa)) + return false; + if (!_f.write_internal(0x555, 0x55)) + return false; + if (!_f.write_internal(0xaaa, 0xa0)) + return false; + } else { + if (!_unlock_bypass) { + if (!_f.write_internal(0x555, 0xaa)) + return false; + if (!_f.write_internal(0x2aa, 0x55)) + return false; + } + + if (!_f.write_internal(0x555, 0xa0)) + return false; + } + + if (!_f.write_internal(addr, *p++)) + return false; + + do { + // Timeout checks + if (++cnt1 > READ_CNT_FAST) + usleep(READ_DELAY); + if (cnt1 > READ_CNT_FAST + READ_CNT_SLOW) { + return _f.errmsg("Flash write error - read value didn't stabilize."); + return false; + } + + if (!_f.read(addr & ~3, &word)) + return false; + + word = __be32_to_cpu(word); + act = (u_int8_t) ((word >> ((3 - (addr & 3)) * 8)) & 0xff); + exp = *(p-1) & 0xff; + //if (act != exp) + // printf("write: %08x - exp:%02x act:%02x /%08x/\n", + // addr, exp & 0xff, act & 0xff, word); + } while (!noverify && act != exp); + + } else { + p++; + } + } + + if (_unlock_bypass) { + if (!unlock_bypass(false)) { + return _f.errmsg("Failed re-lock bypass"); + } + } + + return true; +} // flash_write + + + +bool ParallelFlash::CmdSetIntelWriteByte::write(u_int32_t addr, void *data, int cnt, + bool noerase, bool noverify) +{ + if (!_f._mf) { + return _f.errmsg("Not opened"); + } + if (addr & 0x3) { + return _f.errmsg("Address should be 4-bytes aligned."); + } + + char *p = (char *)data; + + for (int i=0; i READ_CNT_FAST) + usleep(READ_DELAY); + if (cnt1 > READ_CNT_FAST + READ_CNT_SLOW) { + return _f.errmsg("Flash write error - timeout waiting for ready after write."); + } + + // TODO - Move to read single for Arbel + if (!_f.read(addr & ~3, &status)) + return false; + + //if (act != exp) + // printf("write: %08x - exp:%02x act:%02x /%08x/\n", + // addr, exp & 0xff, act & 0xff, word); + } while ((status & FS_Ready) == 0); + + if (status & FS_Error) { + return _f.errmsg("Flash write error - error staus detected."); + } + + if (!noverify) { + u_int32_t word; + if (!reset()) + return false; + // TODO - Move to read single for Arbel + if (!_f.read(addr & ~3, &word)) + return false; + + word = __be32_to_cpu(word); + act = (u_int8_t) ((word >> ((3 - (addr & 3)) * 8)) & 0xff); + exp = *(p-1) & 0xff; + + if (act != exp) { + printf("write: %08x - exp:%02x act:%02x /%08x/\n", + addr, exp & 0xff, act & 0xff, word); + + return _f.errmsg("Write verification failed"); + } + } + + } else { + p++; + } + } + + if (!reset()) + return false; + + return true; +} // flash_write + +/////////////////////////////////////////////////////////////////////////// + +// +// Use the buffer write capability. +// +bool ParallelFlash::CmdSetIntel::write(u_int32_t addr, void *data, int cnt, + bool noerase, bool noverify) +{ + if (!_f._mf) { + return _f.errmsg("Not opened"); + } + if (addr & 0x3) { + return _f.errmsg("Address should be 4-bytes aligned."); + } + + u_int8_t *p = (u_int8_t *)data; + + u_int32_t block_size = _f._cfi_data.max_multi_byte_write; + u_int32_t block_mask = ~(block_size - 1 ); + + // TODO - Check MAX_WRITE_BUFFER_SIZE against block_size in open (or here) + u_int8_t tmp_buff[MAX_WRITE_BUFFER_SIZE]; + + while (cnt) { + + u_int32_t prefix_pad_size = 0; + u_int32_t suffix_pad_size = 0; + + u_int32_t block_addr = addr & block_mask; + u_int32_t data_size = block_size; + + u_int8_t* write_data = p; + + + // + // First and last cycles (can be the same one) may not be block aligned. + // Check the status, and copy data to a padded temp bufer if not alligned. + // (there's an option to write partial buffer, but Intel reference code always + // writes full buffer, with pads if needed. I do the dame ...) + // + + prefix_pad_size = addr - block_addr; + + if ((addr & block_mask) == ((addr + cnt) & block_mask)) { + suffix_pad_size = block_size - ((addr + cnt) % block_size); + } + + if (suffix_pad_size || prefix_pad_size) { + memset(tmp_buff, 0xff, block_size); + + data_size -= prefix_pad_size; + data_size -= suffix_pad_size; + + memcpy(tmp_buff + prefix_pad_size, p , data_size); + + write_data = tmp_buff; + } + + int cnt1 = 0; + + // + // Bank setup. + // + if (!_f.set_bank(addr)) + return false; + + if (!noerase) { + u_int32_t sector = (addr / _f.get_sector_size()) * _f.get_sector_size(); + if (sector != _curr_sector) { + _curr_sector = sector; + if (!erase_sector(_curr_sector)) + return false; + } + } + + if (_no_burn) + continue; + + // + // Check to see if there's something to do + // + bool all_ffs = true; + for (u_int32_t i = 0; i < block_size ; i++) { + if (write_data[i] != 0xff) { + all_ffs = false; + break; + } + } + + if (!all_ffs) { + + u_int32_t status; + cnt1 = 0; + do { + // Get Write buffer + if (!_f.write_internal(block_addr, FC_SCSWrite)) + return false; + + if (cnt1 > ((READ_CNT_FAST + READ_CNT_SLOW) * 4)) { + //printf("-D- status = %08x\n", status); + reset(); + return _f.errmsg("Flash write error - Write buffer not ready."); + } + + cnt1++; + + if (!_f.read(block_addr, &status)) + return false; + + } while (!(status & FS_Ready)); + + if (status & FS_Error) { + return _f.errmsg("Flash write error - Error getting write buffer"); + } + + // word count (allways full buffer, coded as cull buffer size -1) + if (!_f.write_internal(block_addr, block_size - 1)) + return false; + + // Write data to buffer + for (u_int32_t i = 0; i < block_size ; i++ ) { + if (!_f.write_internal(block_addr + i, write_data[i])) + return false; + } + + // write confirm + if (!_f.write_internal(block_addr, FC_Confirm)) + return false; + + cnt1 = 0; + do { + // Timeout checks + if (++cnt1 > READ_CNT_FAST) + usleep(READ_DELAY); + if (cnt1 > READ_CNT_FAST + READ_CNT_SLOW) { + reset(); + return _f.errmsg("Flash write error - Write buffer status timeout"); + } + + // TODO - Move to read single for Arbel + if (!_f.read(block_addr, &status)) + return false; + + //if (act != exp) + // printf("write: %08x - exp:%02x act:%02x /%08x/\n", + // addr, exp & 0xff, act & 0xff, word); + } while ((status & 0x80) == 0); + + // + // TODO: Status checks. + // + + if (!noverify) { + u_int8_t verify_buffer[MAX_WRITE_BUFFER_SIZE]; + if (!reset()) + return false; + + if (!_f.read(addr, verify_buffer, data_size)) + return false; + + for (u_int32_t i = 0 ; i < data_size ; i++) { + if (verify_buffer[i] != write_data[i + prefix_pad_size]) { + return _f.errmsg( + "Write verification failed. Addr: %08x - exp:%02x act:%02x", + addr + i, + write_data[i + prefix_pad_size] , + verify_buffer[i]); + } + } + } + } + + + // + // loop advance + // + + addr += data_size; + p += data_size; + cnt -= data_size; + } + + if (!reset()) + return false; + + return true; +} // flash_write + +//////////////////////////////////////////////////////////////////////// +bool ParallelFlash::CmdSetAmd::reset() { + if (!_f.write_internal(0x555, 0xf0)) { + return _f.errmsg("Device reset failed: %s", _f.err()); + } + return true; +} + +//////////////////////////////////////////////////////////////////////// +bool ParallelFlash::CmdSetAmd::erase_sector(u_int32_t addr) +{ + u_int32_t word = 0; + int cnt = 0; + + if (_no_erase) + return true; + + // Just to insure zeroes because erase completion waits for ones + if (!write(addr, &word, sizeof(word), true)) + return false; + + // erase sector sequence + if (_byte_mode ) { + + if (!_f.write_internal(0xaaa, 0xaa)) + return false; + if (!_f.write_internal(0x555, 0x55)) + return false; + if (!_f.write_internal(0xaaa, 0x80)) + return false; + if (!_f.write_internal(0xaaa, 0xaa)) + return false; + if (!_f.write_internal(0x555, 0x55)) + return false; + if (!_f.write_internal(addr, 0x30)) + return false; + } else { + + if (!_f.write_internal(0x555, 0xaa)) + return false; + if (!_f.write_internal(0x2aa, 0x55)) + return false; + if (!_f.write_internal(0x555, 0x80)) + return false; + if (!_f.write_internal(0x555, 0xaa)) + return false; + if (!_f.write_internal(0x2aa, 0x55)) + return false; + if (!_f.write_internal(addr, 0x30)) + return false; + + } + + // Wait while erase completes + do { + // Timeout checks + if (++cnt > ERASE_CNT) { + return _f.errmsg("Flash erase sector timeout"); + } + if (!_f.read(addr, &word)) + return false; + + //printf("erase_sector: addr:%08lx, %08x\n", addr, word); + usleep(ERASE_DELAY); + } while (word != 0xffffffff); + + + return true; +} // Flash::erase_sector + +//////////////////////////////////////////////////////////////////////// + +bool ParallelFlash::CmdSetIntel::reset() { + if (!_f.write_internal(0x555, FC_Read)) { + return _f.errmsg("Device reset failed"); + } + return true; +} + +//////////////////////////////////////////////////////////////////////// +bool ParallelFlash::CmdSetIntel::erase_sector(u_int32_t addr) +{ + u_int32_t status = 0; + int cnt = 0; + + if (_no_erase) + return true; + + // Just to insure zeroes because erase completion waits for ones + //if (!write(addr, &word, sizeof(word), true)) + // return false; + + // Erase command + if (!_f.write_internal(addr, FC_Erase)) + return false; + + // Erase confirm + if (!_f.write_internal(addr, FC_Confirm)) + return false; + + usleep(ERASE_DELAY); + + // Wait while erase completes + do { + // Timeout checks + if (++cnt > ERASE_CNT) { + return _f.errmsg("Flash erase sector timeout"); + } + if (!_f.read(addr, &status)) + return false; + + //printf("CmdSetIntel::erase_sector: addr:%08lx, %08x\n", addr, word); + usleep(ERASE_DELAY); + } while ((status & FS_Ready) == 0); + + if (status & FS_Error) { + return _f.errmsg("Status register detected erase error (0x%x)", status & FS_Error); + } + + // Reset + if (!reset()) + return false; + + return true; +} // ParallelFlash::CmdSetIntel::erase_sector + +//******************************************************************* +// flash_get_cfi() is the main CFI workhorse function. Due to it's +// complexity and size it need only be called once upon +// initializing the flash system. Once it is called, all operations +// are performed by looking at the cfi_query structure. +// All possible care was made to make this algorithm as efficient as +// possible. 90% of all operations are memory reads, and all +// calculations are done using bit-shifts when possible +//******************************************************************* + +bool ParallelFlash::get_cfi(struct cfi_query *query) +{ + + enum { + TOTAL_QUERY_SIZE = 1024, + EXTENDED_QUERY_SIZE = 12 + }; + + u_int8_t fwp[TOTAL_QUERY_SIZE]; // flash window + + int volts=0, milli=0, temp=0, i=0; + int offset=0; + + u_int32_t query_base = 0x10; + + // Initial house-cleaning + memset(fwp, 0xff, TOTAL_QUERY_SIZE); + + for (i=0; i < 8; i++) { + query->erase_block[i].sector_size = 0; + query->erase_block[i].num_sectors = 0; + } query->erase_block[i].sector_mask = 0; + + // reset + if (!write_internal(0x55, 0xff)) + return false; + + // CFI QUERY + if (!write_internal(0x55, 0x98)) + return false; + + char query_str_x8[4]; + char query_str_x16asx8[8]; + + if (!read(0x10, query_str_x8, 0x4)) + return false; + + if (!read(0x20, query_str_x16asx8, 0x8)) + return false; + + query_str_x8[3] = '\0'; + query_str_x16asx8[7] = '\0'; + + if ( strncmp( query_str_x8 , "QRY" ,3 ) == 0) { + // x8 CFI flash (AMD) + query_base = 1; + } else if ( query_str_x16asx8[0] == 'Q' && + query_str_x16asx8[2] == 'R' && + query_str_x16asx8[4] == 'Y') { + // x16 CFI flash worqing in x8 mode + query_base = 2; + } else { + + printf(" Received CFI query from addr 0x10: [%s]\n", query_str_x8 ); + printf(" Received CFI query from addr 0x20: [%s]\n", query_str_x16asx8); + + return errmsg("Failed CFI query"); + } + + if (!read(0x0, fwp, 0x4)) // Dev ID + return false; + + if (!read(query_base * 0x10, fwp + query_base * 0x10, query_base * 0x20)) + return false; + + query->manuf_id = fwp[query_base * 0]; + query->device_id = fwp[query_base * 1]; + + query->query_string[0] = fwp[query_base * 0x10]; + query->query_string[1] = fwp[query_base * 0x11]; + query->query_string[2] = fwp[query_base * 0x12]; + query->query_string[3] = '\0'; + + query->oem_command_set = extract_word(fwp + query_base * 0x13, query_base); + query->primary_table_address = extract_word(fwp + query_base * 0x15, query_base); // Important one! + query->alt_command_set = extract_word(fwp + query_base * 0x17, query_base); + query->alt_table_address = extract_word(fwp + query_base * 0x19, query_base); + + // We will do some bit translation to give the following values + // numerical meaning in terms of C 'float' numbers + + volts = ((fwp[query_base * 0x1B] & 0xF0) >> 4); + milli = ( fwp[query_base * 0x1B] & 0x0F); + query->vcc_min = (float) (volts + ((float)milli/10)); + + volts = ((fwp[query_base * 0x1C] & 0xF0) >> 4); + milli = ( fwp[query_base * 0x1C] & 0x0F); + query->vcc_max = (float) (volts + ((float)milli/10)); + + volts = ((fwp[query_base * 0x1D] & 0xF0) >> 4); + milli = ( fwp[query_base * 0x1D] & 0x0F); + query->vpp_min = (float) (volts + ((float)milli/10)); + + volts = ((fwp[query_base * 0x1E] & 0xF0) >> 4); + milli = ( fwp[query_base * 0x1E] & 0x0F); + query->vpp_max = (float) (volts + ((float)milli/10)); + + // Let's not drag in the libm library to calculate powers + // for something as simple as 2^(power) + // Use a bit shift instead - it's faster + + temp = fwp[query_base * 0x1F]; + query->timeout_single_write = (1 << temp); + + temp = fwp[query_base * 0x20]; + if (temp != 0x00) + query->timeout_buffer_write = (1 << temp); + else + query->timeout_buffer_write = 0x00; + + temp = 0; + temp = fwp[query_base * 0x21]; + query->timeout_block_erase = (1 << temp); + + temp = fwp[query_base * 0x22]; + if (temp != 0x00) + query->timeout_chip_erase = (1 << temp); + else + query->timeout_chip_erase = 0x00; + + temp = fwp[query_base * 0x23]; + query->max_timeout_single_write = (1 << temp) * + query->timeout_single_write; + + temp = fwp[query_base * 0x24]; + if (temp != 0x00) + query->max_timeout_buffer_write = (1 << temp) * + query->timeout_buffer_write; + else + query->max_timeout_buffer_write = 0x00; + + temp = fwp[query_base * 0x25]; + query->max_timeout_block_erase = (1 << temp) * + query->timeout_block_erase; + + temp = fwp[query_base * 0x26]; + if (temp != 0x00) + query->max_timeout_chip_erase = (1 << temp) * + query->timeout_chip_erase; + else + query->max_timeout_chip_erase = 0x00; + + temp = fwp[query_base * 0x27]; + query->device_size = (long) (((long)1) << temp); + + query->interface_description = extract_word(fwp + query_base * 0x28, query_base); + + temp = fwp[query_base * 0x2A]; + if (temp != 0x00) + query->max_multi_byte_write = (1 << temp); + else + query->max_multi_byte_write = 0; + + query->num_erase_blocks = fwp[query_base * 0x2C]; + + if (!read(query_base * 0x2C, fwp + query_base * 0x2C ,query_base * 4 * (query->num_erase_blocks + 1))) + return false; + + for (i=0; i < query->num_erase_blocks; i++) { + query->erase_block[i].num_sectors = extract_word(fwp + query_base * (0x2D+(4*i)), query_base); + query->erase_block[i].num_sectors++; + + query->erase_block[i].sector_size = (long) 256 * + ( (long)256 * fwp[(query_base * (0x30+(4*i)))] + + fwp[(query_base * (0x2F+(4*i)))] ); + + query->erase_block[i].sector_mask = ~(query->erase_block[i].sector_size - 1); + } + + // Store primary table offset in variable for clarity + offset = query->primary_table_address; + + if ((offset + EXTENDED_QUERY_SIZE) * query_base > TOTAL_QUERY_SIZE) { + return errmsg("Primary extended query larger than TOTAL_QUERY_SIZE (%d)",TOTAL_QUERY_SIZE) ; + } + + + // DEBUG: + //printf("Raw Cfi query:\n"); + //printf(" 0123456789abcdef_123456789abcdef_123456789abcdef\n "); + //for (u_int32_t i = 0x10 * query_base ; i <= (0x30 * query_base); i+= query_base) { + // printf("%02x", fwp[i]); + //} + //printf("\n"); + + u_int32_t dw_aligned_offs = (((offset * query_base) >> 2 ) << 2); + + if (!read(dw_aligned_offs , fwp + dw_aligned_offs , EXTENDED_QUERY_SIZE * query_base)) + return false; + + query->primary_extended_query[0] = fwp[query_base * (offset)]; + query->primary_extended_query[1] = fwp[query_base * (offset + 1)]; + query->primary_extended_query[2] = fwp[query_base * (offset + 2)]; + query->primary_extended_query[3] = '\0'; + + if ( query->primary_extended_query[0] != 'P' && + query->primary_extended_query[1] != 'R' && + query->primary_extended_query[2] != 'I') { + return errmsg("Bad primary table address in CFI query"); + } + + query->major_version = fwp[query_base * (offset + 3)]; + query->minor_version = fwp[query_base * (offset + 4)]; + + query->sensitive_unlock = (u_int8_t) (fwp[query_base * (offset+5)] & 0x0F); + query->erase_suspend = (u_int8_t) (fwp[query_base * (offset+6)] & 0x0F); + query->sector_protect = (u_int8_t) (fwp[query_base * (offset+7)] & 0x0F); + query->sector_temp_unprotect = (u_int8_t) (fwp[query_base * (offset+8)] & 0x0F); + query->protect_scheme = (u_int8_t) (fwp[query_base * (offset+9)] & 0x0F); + query->is_simultaneous = (u_int8_t) (fwp[query_base * (offset+10)] & 0x0F); + query->is_burst = (u_int8_t) (fwp[query_base * (offset+11)] & 0x0F); + query->is_page = (u_int8_t) (fwp[query_base * (offset+12)] & 0x0F); + + return true; +} + + + +//////////////////////////////////////////////////////////////////////// +// +// SpiFlash Class Implementation +// +//////////////////////////////////////////////////////////////////////// + +bool SpiFlash::init_gpios() { + + // + // Set Multi SPI CS to output and 0. + // Assuming 4 flashes. If there are less than 4 flashes and there's + // a write attempt, it will fail. + // + + u_int32_t num_of_spis = 4; + u_int32_t spi_en = (1 << (num_of_spis - 1 ) ) -1; + + u_int32_ba dir; + u_int32_ba mod; + u_int32_ba pol; + + // No need to set the data - SPI GW CS does that in HW + //MREAD4(GPIO_DAT_L, &data); + + MREAD4(GPIO_DIR_L, &dir); + MREAD4(GPIO_POL_L, &pol); + MREAD4(GPIO_MOD_L, &mod); + + dir.range (SPI_GPIO_E, SPI_GPIO_S) = spi_en; + pol.range (SPI_GPIO_E, SPI_GPIO_S) = ~spi_en; + mod.range(SPI_GPIO_E, SPI_GPIO_S) = ~spi_en; + + // unlock gpio + MWRITE4(GPIO_LOCK , 0xaaaa); + + MWRITE4(GPIO_DIR_L, dir); + MWRITE4(GPIO_POL_L, pol); + MWRITE4(GPIO_MOD_L, mod); + + return true; +} + +void SpiFlash::close() { + // Chip reset does not reset the chip sellect - Make sure after reset + // boot loads FW from SPI 0. + set_bank(0); + + // unlock gpio + mwrite4(_mf, GPIO_LOCK , 0xaaaa); +} + +bool SpiFlash::get_cmd_set () { + + // + // Read device ID and allocate command set accordingly. + // + + // + // Initiate some CFI fields to mimic cfi query procedure of parallel flash: + // + + _cfi_data.max_multi_byte_write = 16; // In SPI context, this is the transaction size. Max is 16. + _cfi_data.num_erase_blocks = 1; + _cfi_data.erase_block[0].sector_size = 64 * 1024; + _cfi_data.erase_block[0].sector_mask = ~(_cfi_data.erase_block[0].sector_size - 1); + + u_int32_t spi_size = 0; + u_int32_t num_spis = 0; + + for (u_int32_t spi_sel = 0 ; spi_sel < 4 ; spi_sel++) { + if (!set_bank_int(spi_sel)) return false; + + unsigned char es; // electronic signature + u_int32_t cur_spi_size = 0; + + if (!read_id(&es, CmdSetStSpi::FC_RES)) return false; + + if (es >= 0x10 && es < 0x16) { + // Range OK: + + // NOTE: This mapping between electronic signature and device size is device specific! + // This mapping works for ST M25Pxx and Saifun SA25Fxxx families. + cur_spi_size = 1 << (es + 1); + + num_spis++; + + if (spi_sel == 0) { + spi_size = cur_spi_size; + } else if (cur_spi_size != spi_size){ + return errmsg("SPI flash #%d of size 0x%x bytes differs in size from SPI flash #%d of size 0x%x bytes. " + "All flash devices must be of the same size.", + spi_sel, + cur_spi_size, + spi_sel - 1, + spi_size); + } + + + } else if (es == 0xff) { + // No spi device on this chip_select + break; + } else { + return errmsg("Unexpected SPI electronic signature value (0x%2x) when detecting flash size. " + "Flash #%d my be defected.", + es, + spi_sel); + } + + // printf("-D- %3d %08x\n", spi_sel, cur_spi_size); + } + + _cfi_data.device_size = spi_size * num_spis; + _log2_bank_size = log2up(spi_size); + + + _cmd_set = new CmdSetStSpi(*this); + + return true; +} + + +bool SpiFlash::set_bank_int(u_int32_t bank) +{ + if (!_mf) { + return errmsg("Not opened"); + } + + // TODO: Check number of banks in open! + if (bank > 3) { + return errmsg("Tried to set bank to %d but %d is the is the largest bank number", bank, 3); + } + + //printf("\n*** Flash::set_bank(0x%lx) : 0x%lx\n", bank, (bank >> 19) & 0x07); + u_int32_ba flash_cs; + flash_cs.range(31,30) = bank; + MWRITE4(FLASH_CS, flash_cs); + + return true; +} // Flash::SpiFlash::set_bank + + +//////////////////////////////////////////////////////////////////////// +bool SpiFlash::wait_ready(const char* msg) +{ + u_int32_ba gw_cmd = 0; + u_int32_t cnt = 0; + do { + // Timeout checks + if (++cnt > FLASH_CMD_CNT) { + return errmsg("Flash gateway timeout: %s.", msg); + } + + MREAD4(FLASH_GW, &gw_cmd); + + } while (gw_cmd[BUSY]); + + return true; +} + + +//////////////////////////////////////////////////////////////////////// +bool SpiFlash::read(u_int32_t addr, u_int32_t *data) +{ + if (!_mf) { + return errmsg("Not opened"); + } + + if (addr & 0x3) { + return errmsg("Address should be 4-bytes aligned."); + } + + if (!set_bank(addr)) + return false; + + + // + // Prepare command word + // + + u_int32_ba gw_cmd; + u_int32_ba gw_addr; + + gw_cmd[BUSY] = 1; + gw_cmd[READ_OP] = 1; + //gw_cmd[ADDR_INCR] = 0; + + gw_cmd.range(MSIZE_E, MSIZE_S) = 2; + + gw_addr.range(SPI_ADDR_E, SPI_ADDR_S) = addr & bank_mask(); + + MWRITE4(FLASH_ADDR, gw_addr); + MWRITE4(FLASH_GW, gw_cmd); + + if (!wait_ready("Read")) + return false; + + MREAD4(FLASH_DATA, data); + + *data = __cpu_to_be32(*data); + + return true; +} // SpiFlash::read + + +//////////////////////////////////////////////////////////////////////// +bool SpiFlash::read_id(u_int8_t *data, u_int8_t cmd) +{ + + // + // Prepare command word + // + + u_int32_ba gw_cmd; + u_int32_ba gw_addr; + u_int32_t flash_data; + + gw_cmd[BUSY] = 1; + gw_cmd[READ_OP] = 1; + gw_cmd[SPI_SPECIAL] = 1; + gw_cmd[SPI_NO_ADDR] = 1; + + gw_cmd.range(MSIZE_E, MSIZE_S) = 2; + + gw_addr.range(SPI_CMD_E, SPI_CMD_S) = cmd; + + MWRITE4(FLASH_ADDR, gw_addr); + MWRITE4(FLASH_GW, gw_cmd); + + if (!wait_ready("Read id")) + return false; + + MREAD4(FLASH_DATA, &flash_data); + + /* ID is at offset 3 in word */ + *data = (u_int8_t)(flash_data & 0xff); + + //printf("-D- ES is %02x\n", *data); + + return true; +} // SpiFlash::read + +// +// TODO: Unify all the block handling code with the CmdSet001 write. +// + +bool SpiFlash::CmdSetStSpi::write (u_int32_t addr, + void* data, + int cnt, + bool noerase, + bool noverify) { + + if (!_f._mf) { + return _f.errmsg("Not opened"); + } + if (addr & 0x3) { + return _f.errmsg("Address should be 4-bytes aligned."); + } + + u_int8_t *p = (u_int8_t *)data; + + u_int32_t block_size = _f._cfi_data.max_multi_byte_write; + u_int32_t block_mask = ~(block_size - 1 ); + + // TODO - Check MAX_WRITE_BUFFER_SIZE against block_size in open (or here) + u_int8_t tmp_buff[MAX_WRITE_BUFFER_SIZE]; + + while (cnt) { + + u_int32_t prefix_pad_size = 0; + u_int32_t suffix_pad_size = 0; + + u_int32_t block_addr = addr & block_mask; + u_int32_t data_size = block_size; + + u_int8_t* block_data = p; + + + // + // First and last cycles (can be the same one) may not be block aligned. + // Check the status, and copy data to a padded temp bufer if not alligned. + // (there's an option to write partial buffer, but Intel reference code always + // writes full buffer, with pads if needed. I do the dame ...) + // + + prefix_pad_size = addr - block_addr; + + if ((addr & block_mask) == ((addr + cnt) & block_mask)) { + suffix_pad_size = block_size - ((addr + cnt) % block_size); + } + + if (suffix_pad_size || prefix_pad_size) { + memset(tmp_buff, 0xff, block_size); + + data_size -= prefix_pad_size; + data_size -= suffix_pad_size; + + memcpy(tmp_buff + prefix_pad_size, p , data_size); + + block_data = tmp_buff; + } + + // + // Bank setup. + // + if (!_f.set_bank(addr)) + return false; + + if (!noerase) { + u_int32_t sector = (addr / _f.get_sector_size()) * _f.get_sector_size(); + if (sector != _curr_sector) { + _curr_sector = sector; + if (!erase_sector(_curr_sector)) + return false; + } + } + + if (_no_burn) + continue; + + // + // Check to see if there's something to do + // + bool all_ffs = true; + for (u_int32_t i = 0; i < block_size ; i++) { + if (block_data[i] != 0xff) { + all_ffs = false; + break; + } + } + + if (!all_ffs) { + + write_block(block_addr, block_data, block_size); + + if (!noverify) { + u_int8_t verify_buffer[MAX_WRITE_BUFFER_SIZE]; + if (!reset()) + return false; + + if (!_f.read(addr, verify_buffer, data_size)) + return false; + + for (u_int32_t i = 0 ; i < data_size ; i++) { + if (verify_buffer[i] != block_data[i + prefix_pad_size]) { + return _f.errmsg("Write verification failed. Addr %08x - exp:%02x act:%02x\n", + addr + i, + block_data[i + prefix_pad_size] , + verify_buffer[i]); + } + } + } + } + + + // + // loop advance + // + + addr += data_size; + p += data_size; + cnt -= data_size; + } + + if (!reset()) + return false; + + return true; +} + +bool SpiFlash::CmdSetStSpi::erase_sector(u_int32_t addr) +{ + + if (_no_erase) + return true; + + u_int32_ba gw_cmd; + u_int32_ba gw_addr; + + + if (!write_enable()) + return false; + + // + // Erase sector command: + // + + gw_cmd[BUSY] = 1; + gw_cmd[SPI_SPECIAL] = 1; + gw_cmd[SPI_NO_DATA] = 1; + + gw_addr.range(SPI_CMD_E, SPI_CMD_S) = FC_SE; + gw_addr.range(SPI_ADDR_E, SPI_ADDR_S) = addr & _f.bank_mask(); + + MWRITE4(FLASH_ADDR, gw_addr); + MWRITE4(FLASH_GW, gw_cmd); + + if (!_f.wait_ready("ES")) + return false; + + // + // Wait for erase completion + // + + if (!wait_wip(ERASE_DELAY, ERASE_CNT)) + return false; + + return true; +} // Flash::erase_sector + + + +bool SpiFlash::CmdSetStSpi::write_block(u_int32_t block_addr, + void* block_data, + u_int32_t block_size) { + + u_int32_ba gw_cmd; + u_int32_ba gw_addr; + + // sanity check ??? remove ??? + if (block_size != (u_int32_t)_f._cfi_data.max_multi_byte_write) { + return _f.errmsg("Block write of wrong block size. %d instead of %d", + block_size, (u_int32_t)_f._cfi_data.max_multi_byte_write); + } + + if (!write_enable()) + return false; + + // + // Write the data block + // + + + gw_cmd[BUSY] = 1; + gw_cmd[SPI_SPECIAL] = 1; + + gw_cmd.range(MSIZE_E, MSIZE_S) = log2up(block_size); + + gw_addr.range(SPI_CMD_E, SPI_CMD_S) = FC_PP; + gw_addr.range(SPI_ADDR_E, SPI_ADDR_S) = block_addr & _f.bank_mask(); + + MWRITE4(FLASH_ADDR, gw_addr); + + // Data: + for (u_int32_t offs = 0 ; offs < block_size ; offs += 4) { + // NOTE: !!! To much swapping around the data. !!! + // Flash GW in sinai eats full DWords with byte0 as high data. + // TODO: Swap on writes in Parallel flash. Save double swapping for serial flash. + u_int32_t word = *((u_int32_t*)((u_int8_t*)block_data + offs)); + word = __be32_to_cpu(word); + MWRITE4(FLASH_DATA + offs, word ); + } + + MWRITE4(FLASH_GW, gw_cmd); + + if (!_f.wait_ready("PP command")) + return false; + + // + // Wait for end of write in flash (WriteInProgress = 0): + // + + if (!wait_wip(READ_DELAY, READ_CNT_SLOW + READ_CNT_FAST, READ_CNT_FAST)) + return false; + + return true; +} + + +bool SpiFlash::CmdSetStSpi::write_enable() { + + u_int32_ba gw_cmd; + u_int32_ba gw_addr; + + // + // Write enable: + // + + gw_cmd[BUSY] = 1; + gw_cmd[SPI_NO_ADDR] = 1; + gw_cmd[SPI_NO_DATA] = 1; + gw_cmd[SPI_SPECIAL] = 1; + + gw_addr.range(SPI_CMD_E, SPI_CMD_S) = FC_WREN; + + MWRITE4(FLASH_ADDR, gw_addr); + MWRITE4(FLASH_GW, gw_cmd); + + if (!_f.wait_ready("WREN command")) + return false; + + return true; +} + +bool SpiFlash::CmdSetStSpi::wait_wip(u_int32_t delay, u_int32_t retrys, u_int32_t fast_retrys ) { + + + u_int32_ba gw_cmd; + u_int32_ba gw_data; + u_int32_ba gw_addr; + + u_int32_t cnt = 0; + + // + // Read SR: + // + + gw_cmd[BUSY] = 1; + gw_cmd[READ_OP] = 1; + gw_cmd[SPI_NO_ADDR] = 1; + gw_cmd[SPI_SPECIAL] = 1; + + gw_addr.range(SPI_CMD_E, SPI_CMD_S) = FC_RDSR; + + do { + + if (++cnt > fast_retrys) + usleep(delay); + if (cnt > retrys) { + reset(); + return _f.errmsg("Flash write error - Write In Progress bit didn't clear."); + } + + MWRITE4(FLASH_ADDR, gw_addr); + MWRITE4(FLASH_GW, gw_cmd); + + if (!_f.wait_ready("RDSR")) + return false; + + MREAD4(FLASH_DATA, &gw_data); + + } while (gw_data[24]); // WIP bit in status reg - Note byte 0 is in bits 31-24 of data word. + + return true; +} + + +//////////////////////////////////////////////////////////////////////// +// +// Burn Operations functions +// +//////////////////////////////////////////////////////////////////////// + +class Operations : public ErrMsg { +public: + + Operations() : _last_image_addr(0), _num_ports(2), _allow_skip_is(false) {} + + enum { + GUIDS = 4 + }; + + enum ImageInfoTags { + II_IiFormatRevision = 0, + II_FwVersion = 1, + II_FwBuildTime = 2, + II_DeviceType = 3, + II_PSID = 4, + II_VSD = 5, + II_SuppurtedPsids = 6, + II_Last = 7, // Mark the end of used tag ids + II_End = 0xff + }; + + struct ImageInfo; + + bool write_image (Flash& f, u_int32_t addr, void *data, int cnt, bool need_report); + bool WriteSignature (Flash& f, u_int32_t image_idx, u_int32_t sig); + bool repair (Flash& f, const int from, const int to, bool need_report); + bool FailSafe_burn (Flash& f, void *data, int size, bool single_image_burn, bool need_report); + + bool Verify (FBase& f); + bool DumpConf (const char* conf_file = NULL); + + + bool DisplayImageInfo(ImageInfo* info); + + bool QueryAll (FBase& f, ImageInfo* info) {return QueryIs(f, info) && + (!info->isFailsafe || QueryPs(f, info)) && + QueryImage(f, info);} + + bool getBSN (char *s, guid_t *guid); + bool getGUID (const char *s, guid_t *guid); + + bool patchVSD (FImage& f, + const char *user_vsd, + const char *user_psid, + const char *curr_vsd, + const char *curr_psid, + const char *image_psid); + + bool patchGUIDs (FImage& f, guid_t guids[GUIDS], guid_t old_guids[GUIDS], bool interactive); + + void SetNumPorts (u_int32_t num_ports) {_num_ports = num_ports;} + void SetAllowSkipIs (bool asis) {_allow_skip_is = asis;} + + bool ask_user (const char* msg); + + u_int32_t _last_image_addr; + + + // + // ImageInfo struct: Everything you wanted to know about the FW image (and was afraid to ask). + // This struct includes both user's info (psid, dev rev , fwver ...) and tools internal + // info (images locations, guid ptr ...). + // + struct ImageInfo { + ImageInfo() : + invSectOk(false), + psOk(false), + imageOk(false) + { + psid[0] = '\0'; + vsd[0] = '\0'; + for (int i=0; i < II_Last; i++ ) + infoFound[i] = false; + } + + // *Ok : The exit status ofthe specific query. + // Note - invSectOk = true doesnt mean that invariant sector exists, it + // only means that the query was OK (and isFailsafe may be false). + + bool invSectOk; + bool psOk; + bool imageOk; + + bool isFailsafe; + + + bool validImage[2]; + u_int32_t psStart; + u_int32_t imgStart; + + guid_t guids[4]; + char vsd[209]; + char psid[17]; + + u_int8_t isVer; + u_int16_t fwVer[3]; // = {major_ver, minor_ver , sum_minor_ver} + u_int16_t fwTime[6]; // = {year, month, day, hour, minute, second} + + u_int16_t devType; + u_int8_t devRev; + + bool infoFound[II_Last]; + + }; + + + +private: + + bool FailSafe_burn_image (Flash& f, + void *data, + int ps_addr, + const char* image_name, + int image_addr, + int image_size, + bool need_report); + + bool CheckInvariantSector (Flash& f, u_int32_t *data32, int sect_size); + + bool FailSafe_burn_internal (Flash& f, void *data, int cnt, bool need_report); + + bool checkBoot2 (FBase& f, u_int32_t beg, u_int32_t offs, + u_int32_t& next, const char *pref); + + bool checkGen (FBase& f, u_int32_t beg, + u_int32_t offs, u_int32_t& next, const char *pref); + + bool checkPS (FBase& f, u_int32_t offs, u_int32_t& next, const char *pref); + + bool checkList (FBase& f, u_int32_t offs, const char *pref); + + bool extractGUIDptr (u_int32_t sign, u_int32_t *buf, int buf_len, + char *pref, u_int32_t *ind, int *nguids); + + void patchGUIDsSection (u_int32_t *buf, u_int32_t ind, + guid_t guids[GUIDS], int nguids); + + u_int32_t BSN_subfield (const char *s, int beg, int len); + + void _patchVSD (FImage& f, int ind, char *vsd); + + void PatchPs (u_int8_t* rawPs, + const char vsd[VSD_LEN], + const char psid[PSID_LEN] = NULL, + u_int32_t imageAddr = 0); + + + bool QueryIs (FBase& f, ImageInfo* info); + bool QueryPs (FBase& f, ImageInfo* info); + bool QueryImage (FBase& f, ImageInfo* info); + + + bool ParseInfoSect (u_int8_t* buff, u_int32_t byteSize, ImageInfo *info); + + u_int32_t _num_ports; + bool _allow_skip_is; + + std::vector _fw_conf_sect; +}; + + +// +// Asks user a yes/no question. +// Returns true if user chose Y, false if user chose N. +// + +bool Operations::ask_user(const char* msg) { + printf(msg); + if (_assume_yes) + printf("y\n"); + else { + char ansbuff[32]; + ansbuff[0] = '\0'; + + if (!isatty(0)) { + return errmsg("Not on tty - Can't interact. assuming \"no\" for question \"%s\"", msg); + } + fflush(stdout); + fgets(ansbuff, 30, stdin); + + if ( strcmp(ansbuff, "y\n") && + strcmp(ansbuff, "Y\n") && + strcmp(ansbuff, "yes\n") && + strcmp(ansbuff, "Yes\n") && + strcmp(ansbuff, "YES\n")) + return errmsg("Aborted by user"); + } + return true; +} + +bool Operations::write_image(Flash& f, u_int32_t addr, void *data, int cnt, bool need_report) +{ + u_int8_t *p = (u_int8_t *)data; + u_int32_t curr_addr = addr; + u_int32_t towrite = cnt; + u_int32_t perc = 0xffffffff; + + //f.curr_sector = 0xffffffff; // Erase sector first time + if (need_report) { + printf("000%%"); + fflush(stdout); + } + + while (towrite) { + // Write + int trans = (towrite > (int)Flash::TRANS) ? (int)Flash::TRANS : towrite; + if (!f.write(curr_addr, p, trans)) + return errmsg("Flash write failed: %s", f.err()); + p += trans; + curr_addr += trans; + towrite -= trans; + + // Report + if (need_report) { + u_int32_t new_perc = ((cnt - towrite) * 100) / cnt; + if (new_perc != perc) { + printf("\b\b\b\b%03d%%", new_perc); + fflush(stdout); + perc = new_perc; + } + } + } + + if (need_report) { + printf("\b\b\b\b100%%"); + fflush(stdout); + } + + return true; +} // Flash::write_image + + +//////////////////////////////////////////////////////////////////////// +bool Operations::WriteSignature(Flash& f, u_int32_t image_idx, u_int32_t sig) { + u_int32_t sect_size = f.get_sector_size(); + + if (!f.write( sect_size * (image_idx + 1) + 8, &sig, 4, true, false)) + return false; + + return true; +} + + +//////////////////////////////////////////////////////////////////////// +bool Operations::repair(Flash& f, const int from, const int to, bool need_report) +{ + + u_int32_t sect_size = f.get_sector_size(); + + report("Repairing: Copy %s image to %s -", from ? "secondary" : "primary" , + to ? "secondary" : "primary"); + + + // Read valid pointer sector + u_int32_t sect[sizeof(PS)/4]; + report("\b READ %s ", from ? "SPS" : "PPS"); + if (!f.read(from ? sect_size*2 : sect_size, sect, sizeof(sect) , need_report)) { + report("FAILED\n\n"); + return false; + } + report_erase(" READ %s 100%", from ? "SPS" : "PPS"); + + + + u_int32_t im_ptr = sect[0]; + u_int32_t sig = sect[2]; + + TOCPU1(im_ptr); + TOCPU1(sig); + + // Make sure ps ik ok: + if (sig != SIGNATURE) { + return errmsg("Can't copy image. Pointer sector %d signature is bad (%08x).", from, sig); + } + + // Valid image size in bytes + u_int32_t im_size_b; + if (!f.read(sect_size * (from+1) + 4, &im_size_b)) { + report("FAILED\n\n"); + return false; + } + TOCPU1(im_size_b); + + // Valid image size in sectors + u_int32_t im_size_s = (im_size_b + sect_size - 1) / sect_size; + + // Address to copy valid image + u_int32_t write_to = (!to) ? sect_size * 3 : sect_size * (3 + im_size_s); + + // f.read valid image + report(" READ FW "); + fflush(stdout); + char *buf = new char[im_size_b]; + if (!f.read(im_ptr, buf, im_size_b, need_report)) { + report("FAILED\n\n"); + delete [] buf; + return false; + } + report_erase(" READ FW 100%"); + + // Copy it to right place + report("\b WRITE FW "); + fflush(stdout); + if (!write_image(f, write_to, buf, im_size_b, need_report)) { + report("FAILED\n\n"); + delete [] buf; + return false; + } + delete [] buf; + report_erase(" WRITE FW 100%"); + + // Set new image address + // ++++++ + sect[0] = __be32_to_cpu(write_to); + + // Calculate new CRC + // ++++++ + Crc16 crc; + + for (u_int32_t i = 0; i < (sizeof(sect)/4 - 1) ; i++) { + crc << __be32_to_cpu(sect[i]); + } + crc.finish(); + + sect[sizeof(sect)/4 - 1] = __be32_to_cpu(crc.get()); + + // Corrupt signature + u_int32_t valid_signature = sect[2]; + sect[2] = 0xffffffff; + + // Write it to invalid sector + report("\b WRITE %s ", to ? "SPS" : "PPS"); + if (!write_image(f, to ? sect_size*2 : sect_size, sect, sizeof(sect), need_report)) { + report("FAILED\n\n"); + return false; + } + report_erase(" WRITE %s 100%", to ? "SPS" : "PPS"); + + // Validate signature + report("\b SIGNATURE "); + if (!WriteSignature(f, to, valid_signature)) { + report("FAILED\n\n"); + return false; + } + + report_erase(" SIGNATURE "); + report(" OK \n"); + return true; +} // Flash::repair + + + + + +//////////////////////////////////////////////////////////////////////// +bool Operations::FailSafe_burn_image(Flash& f, + void *data, + int ps_addr, + const char* image_name, + int image_addr, + int image_size, + bool need_report) { + + u_int8_t* data8 = (u_int8_t*) data; + u_int32_t sect_size = f.get_sector_size(); + + report("Burning %-9s FW image without signatures - ", image_name); + fflush(stdout); + + // Invalidate signature + u_int32_t zeros = 0; + if (!f.write(ps_addr + 8, &zeros, 4, true, false)) { + report("FAILED (Invalidating signature)\n\n"); + return false; + } + + // Burn image (from new offset) + + // Both burnt images are taken from the first image in the file - both images in file are identical. + // (future binary releases may contain a single image). + if (!write_image(f, image_addr, data8 + sect_size * 3, image_size, need_report)) { + report("FAILED\n\n"); + return false; + } + report("\b\b\b\bOK \n"); + report("Restoring %-9s signature - ", image_name); + fflush(stdout); + + // Burn PS + if (!write_image(f, ps_addr, data8 + ps_addr, sect_size, false)) { + report("FAILED\n\n"); + return false; + } + + // Validate signature + u_int32_t sig = SIGNATURE; + TOCPU1(sig); + if (!f.write(ps_addr + 8, &sig, 4, true, false)) { + report("FAILED\n\n"); + return false; + } + + report("OK \n"); + + return true; +} + + +//////////////////////////////////////////////////////////////////////// +bool Operations::FailSafe_burn_internal(Flash& f, void *data, int cnt, bool need_report) +{ + u_int32_t *data32 = (u_int32_t *)data; + + u_int32_t sect_size = f.get_sector_size(); + + // Extract Primary/Secondary image pointers and lengths + u_int32_t prim_ptr = data32[sect_size / 4]; + u_int32_t prim_len = data32[sect_size / 4 + 1]; + u_int32_t scnd_ptr = data32[(sect_size * 2) / 4]; + u_int32_t scnd_len = data32[(sect_size * 2) / 4 + 1]; + TOCPU1(prim_ptr); + TOCPU1(prim_len); + TOCPU1(scnd_ptr); + TOCPU1(scnd_len); + if ((cnt < (int)(prim_ptr + prim_len)) || (cnt < (int)(scnd_ptr + scnd_len))) { + return errmsg("Invalid image: too small."); + } + if (prim_len != scnd_len) { + return errmsg("Invalid image: two FW images should be in a same size."); + } + + // Image size from flash + u_int32_t old_im_size; + if (!f.read(sect_size + 4, &old_im_size)) { + report("FAILED\n\n"); + return false; + } + TOCPU1(old_im_size); + + u_int32_t prim_order; + u_int32_t scnd_order; + + u_int32_t ps_addr[2]; + u_int32_t image_addr[2]; + char* image_name[2]; + + + if (prim_len > old_im_size) { + scnd_order = 0; + prim_order = 1; + } else { + prim_order = 0; + scnd_order = 1; + } + + image_name[scnd_order] = "Secondary"; + image_addr[scnd_order] = scnd_ptr; + ps_addr [scnd_order] = sect_size * 2; + + image_name[prim_order] = "Primary"; + image_addr[prim_order] = prim_ptr; + ps_addr [prim_order] = sect_size; + + + for (int i = 0 ; i < 2 ; i++) { + if (!FailSafe_burn_image(f, data, ps_addr[i], image_name[i], image_addr[i], prim_len, need_report)) { + return false; + } + } + + return true; +} + +bool Operations::CheckInvariantSector(Flash& f, u_int32_t *data32, int sect_size) { + int i; + + report("\nRead and verify Invariant Sector - "); + fflush(stdout); + + // Once more check signature - the Inv.Sector signature should be OK + u_int32_t signature; + if (!f.read(0x24, &signature)) { + report("FAILED\n\n"); + return false; + } + TOCPU1(signature); + if (signature != SIGNATURE) { + report("FAILED\n\n"); + return errmsg("Flash has wrong signature in Invariant Sector (Expected %08x, got %08x).", SIGNATURE, signature); + } + + // Now check Invariant sector contents + vector buf1(sect_size/4); + + if (!f.read(0, &buf1[0] , sect_size)) { + report("FAILED\n\n"); + return false; + } + + int first_diff = -1; + + for (i=0; i < sect_size/4; i++) { + if (buf1[i] != data32[i] && (data32[i] != 0 || buf1[i] != 0xffffffff)) { + if (first_diff == -1) + first_diff = i; + } + } + + // Check if a diff was found: + if (first_diff != -1) { + report("DIFF DETECTED\n\n"); + printf(" Invariant sector mismatch. Address 0x%x " + " in image: 0x%08x, while in flash: 0x%08x\n\n", + first_diff*4 , data32[first_diff], buf1[first_diff]); + + printf(" The invariant sector can not be burnt in a failsafe manner.\n" + " To force burn of the invariant sector, rerun with -nofs flag.\n"); + + if (_allow_skip_is) { + printf(" You can also continue to update the FW without updating the invariant sector.\n" + " See the firmware release notes for more details.\n\n"); + + return ask_user(" Do you want to continue ? "); + + } else { + // Continue with burn + printf(" You can also update the FW without updating the invariant sector by\n" + " specifying the -skip_is flag.\n" + " See the firmware release notes for more details.\n\n"); + + return errmsg("Invariant sector mismatch"); + } + } + + report("OK\n"); + return true; + +} + +//////////////////////////////////////////////////////////////////////// +bool Operations::FailSafe_burn(Flash& f, void *data, int size, bool single_image_burn, bool need_report) +{ + u_int32_t *data32 = (u_int32_t *)data; + u_int8_t *data8 = (u_int8_t *)data; + + u_int32_t i; + + u_int32_t sect_size = f.get_sector_size(); + + if (size < (int)sect_size * 3) { + report("FAILED\n\n"); + return errmsg("Image is too small."); + } + + if (!CheckInvariantSector(f, data32, sect_size)) { + return false; + } + + // Check signatures in image + u_int32_t actual_signature = data32[sect_size/4 + 2]; + + u_int32_t signature_for_compare = actual_signature; + + TOCPU1(signature_for_compare); + if (signature_for_compare != SIGNATURE) { + return errmsg("Bad image file given: signature in PPS is 0x%08x (should be 0x%08x)", + signature_for_compare, SIGNATURE); + } + signature_for_compare = data32[(sect_size * 2)/4 + 2]; + TOCPU1(signature_for_compare); + if (signature_for_compare != SIGNATURE) { + return errmsg("Bad image file given: signature in SPS is 0x%08x (should be 0x%08x)", + signature_for_compare, SIGNATURE); + } + + // Corrupt signatures in image + data32[sect_size/4 + 2] = 0xffffffff; + data32[(sect_size * 2)/4 + 2] = 0xffffffff; + + bool cur_image_ok[2] = {false, false}; + u_int32_t cur_image_addr[2]; + u_int32_t cur_image_size[2]; + + // Check signatures on flash + report("Read and verify PPS/SPS in flash - "); + for (i = 0 ; i < 2 ; i++) { + if (!f.read(sect_size * (i+1) + 8, &signature_for_compare)) { + + } + TOCPU1(signature_for_compare); + if (signature_for_compare == SIGNATURE) { + cur_image_ok[i] = true; + + if (!f.read(sect_size * (i+1) , &cur_image_addr[i]) || + !f.read(sect_size * (i+1) + 4, &cur_image_size[i])) { + report("FAILED\n\n"); + return false; + } + + TOCPU1(cur_image_addr[i]); + TOCPU1(cur_image_size[i]); + } + } + + if (!cur_image_ok[0] && !cur_image_ok[1]) { + // + // Both images are invalid in flash + // -------------------------------- + // + printf("\nBoth images (primary and secondary) are invalid in flash.\n"); + printf("The burning can't be failsafe, but it is harmless for host.\n"); + if(!ask_user("\n Do you want to continue ? (y/n) [n] : ")) { + return false; + } + + // Burn all image + report("Burn FW image without signatures - "); + fflush(stdout); + if (!write_image(f, sect_size, data8 + sect_size, size - sect_size, need_report)) { + report("FAILED\n\n"); + return false; + } + report("\b\b\b\bOK \n"); + + // Restore signatures + report("Restore right signatures - "); + fflush(stdout); + if (!WriteSignature(f, 0, actual_signature)) { + report("FAILED (PPS Signature)\n\n"); + return false; + } + if (!WriteSignature(f, 1, actual_signature)) { + report("FAILED (SPS Signature)\n\n"); + return false; + } + report("OK\n"); + return true; + } else { + report("OK\n"); + } + + if (single_image_burn == false) { + + if (cur_image_ok[0] == false || cur_image_ok[1] == false) { + int image_from; + int image_to; + + assert (cur_image_ok[1] || cur_image_ok[0]); + + if (cur_image_ok[1]) { + image_from = 1; + image_to = 0; + } else { + image_from = 0; + image_to = 1; + } + + report("Reparable Error Detected.\n"); + if (!repair(f, image_from, image_to, need_report)) + return false; + } + + // + // Both images are valid in flash + // + return FailSafe_burn_internal(f, data, size, need_report); + + } else { + + // + // Single image burn: + // + + // Extract Primary/Secondary image pointers and lengths + u_int32_t frst_new_image_addr = data32[sect_size / 4]; + u_int32_t frst_new_image_size = data32[sect_size / 4 + 1]; + TOCPU1(frst_new_image_addr); + TOCPU1(frst_new_image_size); + + if (!cur_image_ok[0] && cur_image_ok[1]) { + // Second image is valid on flash. + // If the new image can fit in the first image gap, it would be + // burnt as first image. + // Otherwise (new image too big), image on flash is copied from second to + // first image, and new image would be written as second. + + if (frst_new_image_addr + frst_new_image_size > cur_image_addr[1]) { + // New image is too large - can't get in between first image start + // and current (second) image - move current image to be first. + if (!repair(f, 1, 0, need_report)) + return false; + + // Now 2 images are valid + cur_image_ok[0] = true; + } else { + if (!FailSafe_burn_image(f, data, sect_size, "first", sect_size * 3, frst_new_image_size, need_report)) + return false; + + if (!WriteSignature(f, 1, 0)) + return false; + + return true; + } + } + + if (cur_image_ok[0] && cur_image_ok[1]) { + + // Invalidate second image + if (!WriteSignature(f, 1, 0)) { + report("FAILED\n"); + return false; + } + + cur_image_ok[1] = false; + } + + if (cur_image_ok[0] && !cur_image_ok[1]) { + u_int32_t new_image_size_sect = ((frst_new_image_size - 1) / sect_size) + 1 ; + + // First image is valid on flash. + // If the new image is smaller than current image, it would + // overwrite the end of current image. In this case, move the current image + // to the second position and burn in first. + // + // TODO: STOP THIS MOVEMENT BULLSHI%@#&! !!! : Reproduce PS in flint with the correct addr. Locate second image in middle of flash. + + if ( (3 + new_image_size_sect) * sect_size < cur_image_addr[0] + cur_image_size[0]) { + // New image overwrites end of cur image + // move current image to be second. + if (!repair(f, 0, 1, need_report)) + return false; + + // Now 2 images are valid + cur_image_ok[1] = true; + + // Burn new image as firse + if (!FailSafe_burn_image(f, data, sect_size, "first", + sect_size * 3, frst_new_image_size, need_report)) + return false; + + if (!WriteSignature(f, 1, 0)) + return false; + + return true; + + + } else { + if (!FailSafe_burn_image(f, data, sect_size * 2, "second", + sect_size * (3 + new_image_size_sect) , frst_new_image_size, need_report)) + return false; + + // Invalidate first image + if (!WriteSignature(f, 0, 0)) + return false; + + return true; + + } + + + } else { + report("Bad flash state: Valid images = (%d,%d).\n", cur_image_ok[0], cur_image_ok[1] ); + return false; + } + + } + + return true; +} + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// VERIFY FLASH // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// +bool Operations::checkBoot2(FBase& f, u_int32_t beg, u_int32_t offs, + u_int32_t& next, const char *pref) +{ + u_int32_t size; + + char *pr = (char *)alloca(strlen(pref) + 512); + + sprintf(pr, "%s /0x%08x/ (BOOT2)", pref, offs+beg); + + // Size + READ4(f, offs+beg+4, &size, pr); + TOCPU1(size); + if (size > 1048576 || size < 4) { + report("%s /0x%08x/ - unexpected size (0x%x)\n", pr, offs+beg+4, size); + return false; + } + + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (BOOT2)", pref, offs+beg, + offs+beg+(size+4)*4-1, (size+4)*4); + + Crc16 crc; + u_int32_t *buff = (u_int32_t*)alloca((size + 4)*sizeof(u_int32_t)); + + READBUF(f, offs+beg, buff, size*4 + 16, pr); + TOCPUn(buff, size+4); + CRC1n(crc, buff, size+4); + crc.finish(); + u_int32_t crc_act = buff[size+3]; + if (crc.get() != crc_act) { + report("%s /0x%08x/ - wrong CRC (exp:0x%x, act:0x%x)\n", + pr, offs+beg, crc.get(), crc_act); + return false; + } + + if (_print_crc) + report("%s - OK (CRC:0x%04x)\n", pr, crc_act&0xffff); + else + report("%s - OK\n", pr); + next = offs + size*4 + 16; + return true; +} // checkBoot2 + +static int part_cnt; + +//////////////////////////////////////////////////////////////////////// +bool Operations::checkGen(FBase& f, u_int32_t beg, + u_int32_t offs, u_int32_t& next, const char *pref) +{ + char *pr = (char *)alloca(strlen(pref) + 100); + + u_int32_t size=0; + GPH gph; + + // GPH + sprintf(pr, "%s /0x%08x/ (GeneralHeader)", pref, offs+beg); + READBUF(f, offs+beg, &gph, sizeof(GPH), pr); + TOCPUBY(gph); + + // Body + + part_cnt++; + + // May be BOOT3? + if (gph.type < H_FIRST || gph.type > H_LAST) { + if (part_cnt > 2) { + //report("%s /0x%x/ - Invalid partition type (%d)\n", + // pref, offs+beg, gph.type); + //return false; + } else + return checkBoot2(f, beg, offs, next, pref); + } + + // All partitions here + offs += beg; + switch (gph.type) { + case H_DDR: + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (DDR)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + case H_CNF: + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (Configuration)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + case H_JMP: + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (Jump addresses)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + case H_EMT: + size = gph.size; + size = (size + 3) / 4 * 4; + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (EMT Service)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + case H_FW_CONF: + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (FW Configuration)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + case H_ROM: + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (ROM)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + case H_USER_DATA: + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (User Data)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + case H_IMG_INFO: + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (Image Info)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + case H_BOARD_ID: + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (Board ID)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + case H_GUID: + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (GUID)", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4); + break; + default: + // For forward compatibility, try analyzing even if section type is uncknown + // Assuming the size is in DW, like all other sections (except emt service). + // If this assumption is wrong, CRC calc would fail - no harm done. + size = gph.size * sizeof(u_int32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (UNKNOWN SECTION TYPE (%d))", + pref, offs, offs+size+(u_int32_t)sizeof(gph)+3, + size+(u_int32_t)sizeof(gph)+4, gph.type); + + } + + // CRC + Crc16 crc; + + // Fix for win32: alloca fails on large allocations. + // TODO: Mem leak possible - fix. + //u_int32_t *buff = (u_int32_t*)alloca(size); + u_int32_t *buff = new u_int32_t[size/4]; + + READBUF(f, offs+sizeof(gph), buff, size, pr); + TOCPUn(buff,size/4); + CRCBY(crc, gph); + CRCn(crc, buff, size/4); + crc.finish(); + u_int32_t crc_act; + READ4(f, offs+sizeof(gph)+size, &crc_act, pr); + TOCPU1(crc_act); + if (crc.get() != crc_act) { + report("%s /0x%08x/ - wrong CRC (exp:0x%x, act:0x%x)\n", + pr, offs, crc.get(), crc_act); + return false; + } + + if (_print_crc) + report("%s - OK (CRC:0x%04x)\n", pr, crc_act&0xffff); + else + report("%s - OK\n", pr); + next = gph.next; + + if (gph.type == H_FW_CONF) { + _fw_conf_sect.clear(); + _fw_conf_sect.insert(_fw_conf_sect.end(), + vector::iterator((u_int8_t*)buff), + vector::iterator((u_int8_t*)buff + size)); + + } + + // mark last read addr + _last_image_addr = offs + size +sizeof(gph) + 4; // the 4 is for the trailing crc + + delete buff; + + return true; +} // checkGen + +//////////////////////////////////////////////////////////////////////// +bool Operations::checkPS(FBase& f, u_int32_t offs, u_int32_t& next, const char *pref) +{ + Crc16 crc; + PS ps; + f.read(offs, &ps, sizeof(ps)); + TOCPUBY(ps); + + // Signature + if (ps.signature != SIGNATURE) { + report("%s Pointer Sector /0x%08x/ - invalid signature (%08x)\n", + pref, offs, ps.signature); + return false; + } + + // CRC + CRC1BY(crc, ps); + crc.finish(); + if (crc.get() != ps.crc016) { + report("%s Pointer Sector /0x%08x/ - wrong CRC (exp:0x%x, act:0x%x)\n", + pref, offs, ps.crc016, crc.get()); + return false; + } + + next = ps.fi_addr; + if (_print_crc) + report("%s Image /0x%08x-0x%08x (0x%06x)/ (Pointer Sector)- OK (CRC:0x%04x)\n", pref, offs, + offs+(u_int32_t)sizeof(ps)-1, (u_int32_t)sizeof(ps), ps.crc016&0xffff); + else + report("%s Image /0x%08x-0x%08x (0x%06x)/ (Pointer Sector)- OK\n", pref, offs, + offs+(u_int32_t)sizeof(ps)-1, (u_int32_t)sizeof(ps)); + return true; +} // checkPS + +//////////////////////////////////////////////////////////////////////// +bool Operations::checkList(FBase& f, u_int32_t offs, const char *pref) +{ + u_int32_t next_ptr; + + CHECKB2(f, offs, 0x28, next_ptr, pref); + part_cnt = 1; + while (next_ptr && next_ptr != 0xff000000) + CHECKGN(f, offs, next_ptr, next_ptr, pref); + + return true; +} // checkList + +//////////////////////////////////////////////////////////////////////// +bool Operations::Verify(FBase& f) +{ + u_int32_t prim_ptr, scnd_ptr; + u_int32_t signature; + + bool ret = true; + + READ4(f, 0x24, &signature, "Signature"); + TOCPU1(signature); + if (signature == SIGNATURE) { + // Full image + _image_is_full = true; + report("\nFailsafe image:\n\n"); + CHECKB2(f, 0, 0x28, prim_ptr, "Invariant "); + report("\n"); + if (checkPS(f, f.get_sector_size(), prim_ptr, "Primary ")) + ret &= checkList(f, prim_ptr, " "); + report("\n"); + if (checkPS(f, f.get_sector_size() * 2, scnd_ptr, "Secondary")) + CHECKLS(f, scnd_ptr, " "); + } else { + // Short image + _image_is_full = false; + report("\nShort image:\n"); + CHECKLS(f, 0, " "); + } + + return ret; +} // Verify + + +bool Operations::DumpConf (const char* conf_file) { +#ifndef NO_ZLIB + + FILE* out; + if (conf_file == NULL) { + out = stdout; + } else { + out = fopen(conf_file, "w"); + + if (out == NULL) { + return errmsg("Can't open file %s for write: %s.", conf_file, strerror(errno)); + } + } + + if (_fw_conf_sect.empty()) { + return errmsg("Fw configuration section not found in the given image."); + } + + // restore endianess. + TOCPUn(&(_fw_conf_sect[0]), _fw_conf_sect.size()/4); + + // uncompress: + uLongf destLen = _fw_conf_sect.size(); + destLen *= 10; + vector dest(destLen); + + int rc = uncompress((Bytef *)&(dest[0]), &destLen, + (const Bytef *)&(_fw_conf_sect[0]), _fw_conf_sect.size()); + + if (rc != Z_OK) + { + return errmsg("Failed uncompressing FW Info section. uncompress returnes %d", rc); + } + + dest.resize(destLen); + dest[destLen] = 0; // Terminating NULL + fprintf(out, "%s", (char*)&(dest[0])); + + if (conf_file != NULL) { + fclose(out); + } + + return true; +#else + return errmsg("Executable was compiled with \"dump configuration\" option disabled."); +#endif + +} // DumpConf + + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// GUIDs TREATMENT // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// +#define GETGUID(s, g) do { if (!ops.getGUID(s,g)) return 1; } while (0) +#define GETBSN(s, g) do { if (!ops.getBSN(s,g)) return 1; } while (0) + +#define BSN_RET do { \ + printf("Invalid BSN. Should be MTxxxxx[-]R[xx]ddmmyy-nnn[-cc]\n"); \ + return false; \ +} while(0) +#define BSN_RET1(s) do { \ + printf("Valid BSN format is: MTxxxxx[-]R[xx]ddmmyy-nnn[-cc]\n%s.\n",s); \ + return false; \ +} while(0) +u_int32_t Operations::BSN_subfield(const char *s, int beg, int len) +{ + char buf[64]; + strncpy(buf, &s[beg], len); + buf[len] = '\0'; + return strtoul(&buf[0], 0, 10); +} +bool Operations::getBSN(char *s, guid_t *guid) +{ + const u_int64_t COMPANY_ID = 0x0002c9; + const u_int64_t TYPE = 1; + bool cc_present = false; + char *p; + int date_offs = 0; + int i; + + // Convert to lowercase + for (p = s; *p; p++) + *p = (char)tolower(*p); + + // Check validity + p = s; + if (strncmp(p, "mt", 2)) // MT + BSN_RET; + p += 2; + for (i=0; i<5; i++) + if (!isdigit(*p++)) // xxxxx + BSN_RET; + if (*p == '-') { // - /optional/ + p++; + date_offs++; + } + if (*p < 'a' || *p > 'z') // R + BSN_RET; + p++; + + // Count how many digits after R + char *q = p; + int ndigits=0; + while (isdigit(*q++)) + ndigits++; + + switch (ndigits) { + case 6: + p += 6; // skip ddmmyy + break; + case 8: + p += 8; // skip xxddmmyy + date_offs += 2; + break; + default: + BSN_RET; + } + + if (*p++ != '-') // - + BSN_RET; + for (i=0; i<3; i++) // nnn + if (!isdigit(*p++)) + BSN_RET; + if (*p) { + cc_present = true; + if (*p++ != '-') // - + BSN_RET; + for (i=0; i<2; i++) // cc + if (!isdigit(*p++)) + BSN_RET; + } + + u_int32_t dd = BSN_subfield(s, 8+date_offs, 2); + if (dd > 31) + BSN_RET1("Day (dd) should not exceed 31"); + if (!dd) + BSN_RET1("Day (dd) can't be zero"); + u_int32_t mm = BSN_subfield(s, 10+date_offs, 2); + if (mm > 12) + BSN_RET1("Months (mm) should not exceed 12"); + if (!mm) + BSN_RET1("Months (mm) can't be zero"); + u_int32_t yy = BSN_subfield(s, 12+date_offs, 2); + if (yy > 99) + BSN_RET1("Year (yy) should not exceed 99"); + if (!yy) + BSN_RET1("Year (yy) can't be zero"); + u_int32_t num = BSN_subfield(s, 15+date_offs, 3); + if (num > 999) + BSN_RET1("Number (num) should not exceed 999"); + if (!num) + BSN_RET1("Number (num) can't be zero"); + int cc = 1; + if (cc_present) { + cc = BSN_subfield(s, 19+date_offs, 2); + if (cc > 14) + BSN_RET1("Chip number (cc) should not exceed 14"); + if (!cc) + BSN_RET1("Chip number (cc) can't be zero"); + } + u_int64_t id = ((((yy*12+mm-1)*31+ dd-1) * 1000) + num-1) * 112; + id += (cc-1)*8; + + u_int64_t g = (COMPANY_ID << 40) | (TYPE << 32) | id; + guid->h = (u_int32_t)(g>>32); + guid->l = (u_int32_t)g; + return true; +} + +bool Operations::getGUID(const char *s, guid_t *guid) +{ + char* endp; + u_int64_t g; + + g = strtoull(s, &endp, 16); + if (*endp || (g == 0xffffffffffffffffULL && errno == ERANGE)) { + printf("Invalid GUID syntax (%s) %s \n", + s, + errno ? strerror(errno) : "" ); + return false; + } + guid->h = (u_int32_t)(g >> 32); + guid->l = (u_int32_t)(g & 0xffffffff); + return true; +} // getGUID + +//////////////////////////////////////////////////////////////////////// +bool Operations::extractGUIDptr(u_int32_t sign, u_int32_t *buf, int buf_len, + char *pref, u_int32_t *ind, int *nguids) +{ + u_int32_t offs = 0; + + // Check signature + if (sign) { + u_int32_t signature = buf[(sign + 8)/4]; + TOCPU1(signature); + if (signature != SIGNATURE) { + printf("%s pointer section not valid\n", pref); + return false; + } + offs = buf[sign/4]; + TOCPU1(offs); + } + + // Get GUID ptr + *ind = buf[(offs+0x24)/4]; + TOCPU1(*ind); + *ind += offs; + if (*ind >= (u_int32_t)buf_len) { + printf("%s image - insane GUID pointer (%08x)\n", pref, *ind); + return false; + } + *nguids = buf[*ind/4 - 3]; + TOCPU1(*nguids); + *nguids /= 2; + + // More sanity check + if (*nguids > GUIDS) { + printf("%s image - insane number of GUIDs (%d)\n", pref, *nguids); + return false; + } + + return true; +} // extractGUIDptr + +//////////////////////////////////////////////////////////////////////// +void Operations::patchGUIDsSection(u_int32_t *buf, u_int32_t ind, + guid_t guids[GUIDS], int nguids) +{ + u_int32_t i, word; + u_int32_t new_buf[GUIDS*2]; + Crc16 crc; + + // Form new GUID section + for (i=0; i<(u_int32_t)nguids; i++) { + new_buf[i*2] = guids[i].h; + new_buf[i*2+1] = guids[i].l; + } + + // Calculate new CRC16 + for (i=ind/4 - 4; ivsd[0], vsd, VSD_LEN + PSID_LEN); + + u_int32_t *qp = (u_int32_t *)ps; + for (unsigned int i=0; icrc016 = __cpu_to_be32(crc016); +} // _patchVSD + + +// +// PatchPs() : +// This func assumes it gets a pointer (rawPs) to a valid PS. +// It patches the PS with the given data, recalculated CRC , +// and copies it back to the rawPs. +// + +void Operations::PatchPs(u_int8_t* rawPs, + const char* vsd, + const char* psid, + u_int32_t imageAddr) { + + Crc16 crc; + PS *ps = (PS*)rawPs; + + u_int32_t fix_start = 0; + u_int32_t fix_end = 0; + + if (vsd) { + u_int32_t len = strlen(vsd); + + memset(&ps->vsd[0], 0, VSD_LEN ); + memcpy(&ps->vsd[0], vsd, len); + + fix_end += VSD_LEN; + } else { + fix_start +=VSD_LEN; + } + if (psid) { + u_int32_t len = strlen(psid); + + memset(&ps->psid[0], 0, PSID_LEN ); + memcpy(&ps->psid[0], psid, len ); + fix_end += PSID_LEN; + } + + //vsd is kept in flash byte-swapped. + //recode it back before patching + u_int32_t *qp; + + qp = (u_int32_t *)&ps->vsd[0]; + for (u_int32_t i=fix_start; ifi_addr = __cpu_to_be32(imageAddr); + } + + qp = (u_int32_t *)ps; + for (unsigned int i=0; icrc016 = __cpu_to_be32(crc016); + +} + + +//////////////////////////////////////////////////////////////////////// +//Note that vsd1 is a string of bytes. +bool Operations::patchVSD(FImage& f, + const char *user_vsd, + const char *user_psid, + const char *curr_vsd, + const char *curr_psid, + const char *image_psid) +{ + const char* vsd_to_use = curr_vsd ? curr_vsd : ""; + const char* psid_to_use = image_psid; + + // Form new VSD + + if (user_psid) { + // New psid is explicitly given - take it from user + printf("\n You are about to replace current PSID in the image file - \"%s\" with a different PSID - \"%s\".\n" + " Note: It is highly recommended NOT to change the image PSID.\n", user_psid, image_psid); + + if (! ask_user("\n Is it OK ? (y/n) [n] : ")) + return false; + + psid_to_use = user_psid; + } + + if (user_vsd) { + vsd_to_use = user_vsd; + } + + + if (curr_psid && strncmp( psid_to_use, (char*) curr_psid, PSID_LEN)) { + printf("\n You are about to replace current PSID in flash - \"%s\" with a different PSID - \"%s\".\n", curr_psid, psid_to_use); + + if (! ask_user("\n Is it OK ? (y/n) [n] : ")) + return false; + } + + PatchPs((u_int8_t*)f.getBuf() + f.get_sector_size(), vsd_to_use, psid_to_use); + PatchPs((u_int8_t*)f.getBuf() + f.get_sector_size() * 2, vsd_to_use, psid_to_use); + + return true; +} // pathVSD + + +//////////////////////////////////////////////////////////////////////// +bool Operations::patchGUIDs(FImage& f, guid_t new_guids[GUIDS], guid_t old_guids[GUIDS], bool interactive) +{ + guid_t image_file_guids[GUIDS]; + guid_t* used_guids = old_guids ? old_guids : new_guids; + u_int32_t *buf = f.getBuf(); + int buf_len = f.getBufLength(); + u_int32_t signature = buf[0x24/4]; + u_int32_t ind1=0,ind2=0; + int nguid1, nguid2; + + TOCPU1(signature); + if (signature == SIGNATURE) { + // Full image + if (interactive) + printf("\nFull image:\n\n"); + if (!extractGUIDptr(f.get_sector_size() , buf, buf_len, "Primary" , &ind1, &nguid1) && + !extractGUIDptr(f.get_sector_size() *2, buf, buf_len, "Secondary", &ind2, &nguid2)) + return false; + + } else { + // Short image + if (interactive) + printf("\nShort image:\n\n"); + if (!extractGUIDptr(0, buf, buf_len, "Primary", &ind1, &nguid1)) + return false; + } + + // Print old GUIDs and get confirmation + if (interactive && new_guids) { + bool image_file_old_guids_fmt = nguid1 < GUIDS; + for (int i=0; i 1) + printf(" Port2: " GUID_FORMAT "\n", old_guids[2].h,old_guids[2].l); + if (!image_file_old_guids_fmt) + printf(" Sys.Image: " GUID_FORMAT "\n", old_guids[3].h,old_guids[3].l); + } + + printf("\n You are about to burn the image with the following GUIDs:\n"); + printf(" Node: " GUID_FORMAT "\n", new_guids[0].h,new_guids[0].l); + printf(" Port1: " GUID_FORMAT "\n", new_guids[1].h,new_guids[1].l); + if (_num_ports > 1) + printf(" Port2: " GUID_FORMAT "\n", new_guids[2].h,new_guids[2].l); + if (!image_file_old_guids_fmt) + printf(" Sys.Image: " GUID_FORMAT "\n", new_guids[3].h,new_guids[3].l); + + if (!ask_user("\n Is it OK ? (y/n) [n] : ")) + return false; + + used_guids = new_guids; + } + + // Path GUIDs section + if (ind1) + patchGUIDsSection(buf, ind1, used_guids, nguid1); + if (ind2) + patchGUIDsSection(buf, ind2, used_guids, nguid2); + + if (!interactive) { + bool old_guids_fmt = nguid1 < GUIDS; + printf("\n Burn image with the following GUIDs:\n"); + printf(" Node: " GUID_FORMAT "\n", used_guids[0].h,used_guids[0].l); + printf(" Port1: " GUID_FORMAT "\n", used_guids[1].h,used_guids[1].l); + if (_num_ports > 1) + printf(" Port2: " GUID_FORMAT "\n", used_guids[2].h,used_guids[2].l); + if (!old_guids_fmt) + printf(" Sys.Image: " GUID_FORMAT "\n", used_guids[3].h,used_guids[3].l); + } + return true; +} // patchGUIDs + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// Revision info and board ID // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// + +bool Operations::QueryIs (FBase& f, + Operations::ImageInfo* info) { + u_int32_t signature; + + READ4(f, 0x24, &signature, "Signature"); + TOCPU1(signature); + if (signature == SIGNATURE) { + // Full image + info->isFailsafe = true; + + // FW ID + u_int32_t fw_id; + + READ4(f, 0x10, &fw_id, "FW ID"); + TOCPU1(fw_id); + + info->isVer = ( fw_id >> 8) && 0xff; + info->devRev = fw_id >> 24; + + } else { + info->isFailsafe = false; + info->imgStart = 0; + } + + info->invSectOk = true; + return true; +} + +bool Operations::QueryPs (FBase& f, + Operations::ImageInfo* info) { + + if (!info->isFailsafe) { + return errmsg("Internal Error: Tried to query PS when image is not failsafe"); + } + + u_int32_t prim_ptr, scnd_ptr; + u_int32_t sectSize = f.get_sector_size(); + + bool currSielent = _silent; + _silent = true; + + if (checkPS(f, sectSize, prim_ptr, "Primary ")) { + info->imgStart = prim_ptr; + info->psStart = sectSize; + } else if (checkPS(f, sectSize * 2, scnd_ptr, "Secondary")) { + info->imgStart = scnd_ptr; + info->psStart = sectSize * 2; + } else { + return errmsg("No valid image found."); + } + + char vsd[VSD_LEN+PSID_LEN+1]; // +1 => Leave a space for \0 when psid size == 16 . + + memset(vsd, 0, sizeof(vsd)); + READBUF(f, info->psStart + 0x20, vsd, VSD_LEN+PSID_LEN , "Vendor Specific Data (Board ID)"); + TOCPUBY(vsd); + + memcpy(info->vsd, vsd, VSD_LEN); + memcpy(info->psid, vsd + VSD_LEN, PSID_LEN); + + info->vsd [sizeof(info->vsd) - 1] = '\0'; + info->psid[sizeof(info->psid) - 1] = '\0'; + + _silent = currSielent; + + info->psOk = true; + + return true; +} + + +bool Operations::QueryImage (FBase& f, + Operations::ImageInfo* info) { + + u_int32_t guid_ptr, nguids; + guid_t guids[GUIDS]; + + // FW ID + u_int32_t fw_id; + u_int32_t im_start = info->imgStart; + + + READ4(f, im_start + 0x10, &fw_id, "FW ID"); + TOCPU1(fw_id); + + info->devRev = fw_id >> 24; + // Read GUIDs + READ4(f, im_start + 0x24, &guid_ptr, "GUID PTR"); + TOCPU1(guid_ptr); + guid_ptr += im_start; + if (guid_ptr >= f.get_size()) { + return errmsg("Failed to read GUIDs - Insane GUID pointer (%08x). Probably image is corrupted", guid_ptr); + } + READ4(f, guid_ptr - 3*sizeof(u_int32_t), &nguids, "Number of GUIDs"); + TOCPU1(nguids); + if (nguids > GUIDS*2) { + report("Failed to read GUIDs - Insane Number of GUIDs (%d)\n", nguids); + return false; + } + READBUF(f, guid_ptr, guids, nguids / 2 * sizeof(u_int64_t), "GUIDS"); + TOCPUBY64(guids); + for (u_int32_t i = 0 ; i < nguids/2 ; i++) { + info->guids[i] = guids[i]; + } + + // Read Info: + u_int32_ba info_ptr_ba; + u_int32_t info_ptr; + u_int32_t info_size; + u_int8_t info_ptr_cs = 0; + READ4(f, im_start + 0x1C, &info_ptr, "INFO PTR"); + TOCPU1(info_ptr); + + // Verify info_ptr checksum (should be 0) + info_ptr_ba = info_ptr; + for (u_int32_t i = 0; i < 4 ; i++) { + info_ptr_cs += (u_int8_t)info_ptr_ba.range(i*8+7, i*8); + } + + if (info_ptr_cs) { + return errmsg("Failed to read Info Section - Bad checksum for Info section pointer (%08x). Probably image is corrupted", info_ptr); + } + + info_ptr = info_ptr_ba.range(23,0); + if (info_ptr_cs == 0 && info_ptr != 0) { + + info_ptr += im_start; + if (info_ptr >= f.get_size()) { + return errmsg("Failed to read Info Section - Info section pointer (%08x) too large. Probably image is corrupted", info_ptr); + } + READ4(f, info_ptr - 3*sizeof(u_int32_t), &info_size, "Info section size"); + TOCPU1(info_size); + + // byte size; + info_size *= 4; + + u_int8_t* info_buff = (u_int8_t*)alloca(info_size); + READBUF(f, info_ptr, info_buff, info_size, "Info Section"); + + if (ParseInfoSect(info_buff, info_size, info)) { + return false; + } + } + + info->imageOk = true; + return true; +} + + +bool Operations::ParseInfoSect(u_int8_t* buff, u_int32_t byteSize, Operations::ImageInfo *info) { + + u_int32_t *p = (u_int32_t*)buff; + u_int32_t offs = 0; + + bool endFound = false; + + while ((__be32_to_cpu(*p) >> 24) != II_End && offs < byteSize) { + u_int32_t tagSize = __be32_to_cpu(*p) & 0xffffff; + u_int32_t tagId = __be32_to_cpu(*p) >> 24; + + u_int32_t tmp; + + switch (tagId) { + case II_FwVersion: + info->fwVer[0] = __be32_to_cpu(*(p+1)) >> 16; + tmp = __be32_to_cpu(*(p+2)); + info->fwVer[1] = tmp >> 16; + info->fwVer[2] = tmp & 0xffff; + + info->infoFound[tagId] = true; + break; + + case II_DeviceType: + tmp = __be32_to_cpu(*(p+1)); + info->devType = tmp & 0xffff; + //info->devRev = (tmp >> 16) & 0xff; + info->infoFound[tagId] = true; + + case II_PSID: + // set psid only if not previosly found in PS + if (!info->psOk) { + const char* str = (const char*)p; + str += 4; + + for (int i = 0 ; i < PSID_LEN ; i++) { + info->psid[i] = str[i]; + } + info->psid[PSID_LEN] = '\0'; + + info->infoFound[tagId] = true; + + } + break; + + case II_End: + endFound = true; + break; + + //default: + //printf("-D- Found tag ID %d of size %d - ignoring.\n", tagId, tagSize); + } + + p += tagSize/4 + 1; + offs += tagSize; + } + + offs += 4; + + if (offs != byteSize) { + if (endFound) { + return errmsg("Info section corrupted: Section data size is %x bytes, " + "but end tag found after %x bytes.", byteSize, offs); + } else { + return errmsg("Info section corrupted: Section data size is %x bytes, " + "but end tag not found before section end.", byteSize); + } + } + + return true; +} + +bool Operations::DisplayImageInfo(Operations::ImageInfo* info) { + + report("Image type: %s\n", info->isFailsafe ? "Failsafe" : "Short"); + + if (info->infoFound[II_FwVersion]) { + report("FW Version: %d.%d.%d\n", info->fwVer[0], info->fwVer[1], info->fwVer[2]); + } + + if (info->isFailsafe) { + report("I.S. Version: %d\n", info->isVer ); + } + + if (info->infoFound[II_DeviceType]) { + report("Device ID: %d\n", info->devType); + } + + report("Chip Revision: %X\n", info->devRev); + + // GUIDS: + report("GUID Des: Node Port1 "); + + if (_num_ports > 1) + report("Port2 "); + report( "Sys image\n"); + + report("GUIDs: "); + for (u_int32_t i=0; i < GUIDS; i++) { + if (i != 2 || _num_ports > 1 ) + report(GUID_FORMAT " ", info->guids[i].h, info->guids[i].l); + } + + + // VSD, PSID + + report("\nBoard ID: %s", info->vsd); + if (info->psid[0]) + report(" (%s)\n", info->psid); + else + report("\n"); + + report("VSD: %s\n", info->vsd); + report("PSID: %s\n", info->psid); + + return true; +} + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// Detect Device type and return matching Flash interface // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// + +Flash* get_serial_flash(mfile* mf) { + + enum { + CR_FLASH_TYPE = 0xf0810, + BO_FLASH_TYPE_S = 10, + BO_FLASH_TYPE_E = 11, + }; + + enum FlashType { + FT_LPC = 0, + FT_SPI = 1, + FT_XBUS = 2, + FT_EEPROM = 3 + }; + + char* flash_type_str[] = {"LPC", "SPI", "XBUS", "EEPROM"}; + + u_int32_ba strap_option; + u_int32_t flash_type; + + if (mread4(mf, CR_FLASH_TYPE, &strap_option) != 4) return false; + + flash_type = strap_option.range(BO_FLASH_TYPE_E, BO_FLASH_TYPE_S); + + + switch (flash_type) { + case FT_SPI: + return new SpiFlash; + case FT_LPC: + case FT_XBUS: + case FT_EEPROM: + printf("*** ERROR *** flash of type %s not supported.\n", + flash_type_str[flash_type]); + } + + return NULL; + +} + + +Flash* get_flash(const char* device, u_int32_t& num_ports) { + Flash* f = NULL; + + // + // Check device ID. Allocate flash accordingly + // + + u_int32_t dev_id; + + mfile* mf = mopen(device); + if (!mf) { + printf("*** ERROR *** Can't open %s: %s\n", device, strerror(errno)); + return NULL; + } + + if (mread4(mf, 0xf0014, &dev_id) != 4) return false; + + dev_id &= 0xffff; + + //printf("-D- read dev id: %d\n", dev_id); + + switch (dev_id) { + case 23108: + case 25208: + num_ports = 2; + f = new ParallelFlash; + break; + + case 24204: + case 25204: + num_ports = 1; + f = get_serial_flash(mf); + break; + + case 0xffff: + printf("*** ERROR *** Read a corrupted device id (0x%x). Probably HW/PCI access problem\n", dev_id); + default: + printf("*** ERROR *** Device type %d not supported.\n", dev_id); + } + + mclose(mf); + + return f; +} + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// MAIN // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// +// sed -e 's/"/\\"/g' < flint.txt | perl -pe 's/^(.*)$/"$1\\n"/' +void usage(const char *sname, bool full = false) +{ + const char *descr = + "\n" + " FLINT - FLash INTerface\n" + "\n" + "InfiniHost flash memory operations.\n" + "\n" + "Usage:\n" + "------\n" + "\n" + " " FLINT_NAME " [switches...] [parameters...]\n" + "\n" + "\n" + "Switches summary:\n" + "-----------------\n" + " -bsn - Mellanox Board Serial Number (BSN).\n" + " Valid BSN format is:\n" + " MTxxxxx[-]R[xx]ddmmyy-nnn[-cc]\n" + " Commands affected: burn\n" + "\n" + " -crc - Print CRC after each section when verify.\n" + "\n" + " -d[evice] - Device flash is connected to.\n" + " Commands affected: all\n" + "\n" + " -guid - Base value for up to 4 GUIDs, which\n" + " are automatically assigned the\n" + " following values:\n" + "\n" + " guid -> node GUID\n" + " guid+1 -> port1\n" + " guid+2 -> port2\n" + " guid+3 -> system image GUID.\n" + "\n" + " Note: For a single port HCA, port2 guid is assigned\n" + " with the 'guid + 2' value, although it is ignored.\n" + "\n" + " Commands affected: burn\n" + "\n" + " -guids - 4 GUIDs must be specified here.\n" + " The specified GUIDs are assigned\n" + " the following values, repectively:\n" + " node, port1, port2 and system image GUID.\n" + "\n" + " Note: For a single port HCA, port2 guid must be\n" + " specified (can be set to 0x0), although it is ignored.\n" + "\n" + " Commands affected: burn\n" + "\n" + " -clear_semaphore - Force clear of the flash semaphore on the device.\n" + " This flag should come BEFORE the -d[evice] flag in the command line.\n" + " No command is allowed when this flag is used.\n" + " NOTE: Using this flag may result in an unstable behavior and flash image\n" + " corruption if the device or another flash application is currently\n" + " using the flash. Handle with care.\n" + "\n" + " -h[elp] - Prints this message and exits\n" + " -hh - Prints extended command help\n" + "\n" + " -i[mage] - Binary image file.\n" + " Commands affected: burn, verify\n" + "\n" + " -nofs - Burn image not in failsafe manner.\n" + "\n" + " -skip_is - Allow burning the FW image without updating the invariant sector,\n" + " to insures failsafe burning even when invariant sector difference is detected.\n" + " See the specific FW release notes for more details.\n" + "\n" + " -byte_mode - Shift address when accessing flash internal registers. May\n" + " be required for burn/write commands when accessing certain\n" + " flash types.\n" + "\n" +#if 0 + " -unlock - Use unlock bypass feature of the flash for quicker burn.\n" + " Commands affected: burn\n" + "\n" +#endif + " -s[ilent] - Do not print burn progress flyer.\n" + " Commands affected: burn\n" + "\n" + " -y[es] - Non interactive mode - assume answer\n" + " \"yes\" to all questions.\n" + " Commands affected: all\n" + "\n" + " -vsd - Write this string, of up to 208 characters, to VSD when burn.\n" + "\n" + " -psid - Write the Parameter Set ID (PSID) string to PS-ID field (last 16 bytes of VSD) when burn.\n" + "\n" + " -use_image_ps - Burn vsd as appears in the given image - don't keep existing vsd on flash.\n" + " Commands affected: burn\n" + "\n" + " -dual_image - Make the burn process burn two images on flash (previously default algorithm). Current\n" + " default failsafe burn process burns a single image (in alternating locations).\n" + " Commands affected: burn\n" + "\n" + " -v - Version info.\n" + "\n" + "Commands summary (use -hh flag for full commands description):\n" + "-----------------\n" + " b[urn] - Burn flash\n" + " e[rase] - Erase sector\n" + " q[uery] - Query misc. flash/FW characteristics\n" + " rw - Read one dword from flash\n" + " v[erify] - Verify entire flash\n" + " ww - Write one dword to flash\n" + " bb - Burn Block - Burns the given image as is. No checks are done.\n" + " wwne - Write one dword to flash without sector erase\n" + " wbne - Write a data block to flash without sector erase\n" + " rb - Read a data block from flash\n" + " ri - Read the fw image on the flash.\n" + " dc - Dump Configuration: print fw configuration file for the given image.\n" + "\n"; + + const char* full_descr = + "\n" + "Command descriptions:\n" + "----------------------------\n" + "\n" + "* Burn flash\n" + " Burns entire flash from raw binary image.\n" + "\n" + " Command:\n" + " b[urn]\n" + " Parameters:\n" + " None\n" + " Examples:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " -i image1.bin burn\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE2 " -guid 0x2c9000100d050 -i image1.bin b\n" + "\n" + "\n" + "* Burn Block\n" + " Burns entire flash from raw binary image as is. No checks are done on the flash or\n" + " on the given image file. No fields (such as VSD or Guids) are read from flash. \n" + "\n" + " Command:\n" + " bb\n" + " Parameters:\n" + " None\n" + " Examples:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " -i image1.bin bb\n" + "\n" + "\n" + "* Erase sector.\n" + " Erases a sector that contains specified address.\n" + "\n" + " Command:\n" + " e[rase]\n" + " Parameters:\n" + " addr - address of word in sector that you want\n" + " to erase.\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " erase 0x10000\n" + "\n" + "\n" + "* Query miscellaneous FW and flash parameters\n" + "\n" + " Command:\n" + " q[uery]\n" + " Parameters:\n" + " None\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " query\n" + "\n" + "\n" + "* Query flash device parameters (Common Flash Interface)\n" + "\n" + " Command:\n" + " cfi\n" + " Parameters:\n" + " None\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " cfi\n" + "\n" + "\n" + "* Read one dword from flash.\n" + "\n" + " Command:\n" + " rw\n" + " Parameters:\n" + " addr - address of word to read\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " rw 0x20\n" + "\n" + "\n" + "* Verify entire flash.\n" + "\n" + " Command:\n" + " v[erify]\n" + " Parameters:\n" + " None\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " v\n" + "\n" + "\n" + "* Write one dword to flash.\n" + " Note that the utility will read an entire flash sector,\n" + " modify one word and write the sector back. This may take\n" + " a few seconds.\n" + "\n" + " Command:\n" + " ww\n" + " Parameters:\n" + " addr - address of word\n" + " data - value of word\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " ww 0x10008 0x5a445a44\n" + "\n" + "\n" + "* Write one dword to flash without sector erase.\n" + " Note that the result of operation is undefined and depends\n" + " on flash type. Usually \"bitwise AND\" (&) between specified\n" + " word and previous flash contents will be written to\n" + " specified address.\n" + "\n" + " Command:\n" + " wwne\n" + " Parameters:\n" + " addr - address of word\n" + " data - value of word\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " wwne 0x10008 0x5a445a44\n" + "\n" + "* Read a data block from the flash and write it to a file or to screen.\n" + "\n" + " Command:\n" + " rb\n" + " Parameters:\n" + " addr - address of block\n" + " size - size of data to read in bytes\n" + " file - filename to write the block (raw binary). If not given, the data\n" + " is printed to screen\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " rb 0x10000 100 file.bin\n" + "\n" + "* Read the FW image from flash and write it to a file.\n" + "\n" + " Command:\n" + " ri\n" + " Parameters:\n" + " file - filename to write the image to (raw binary).\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " ri file.bin\n" + "\n" + "* Write a block of data to the flash without erasing.\n" + "\n" + " Command:\n" + " wbne\n" + " Parameters:\n" + " addr - address of block\n" + " size - size of data to write in bytes\n" + " data - data to write - space seperated dwords\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " wbne 0x10000 12 0x30000 0x76800 0x5a445a44\n" + "\n" + "* Print (to screen or to a file) the firmware configuration text file used by the image generation process.\n" + " This command would fail if the image does not contain a FW configuration section. Existence of this\n" + " section depends on the version of the image generation tool.\n" + "\n" + " Command:\n" + " dc\n" + " Parameters:\n" + " file - (optional) filename to write the dumped configuration to. If not given, the data\n" + " is printed to screen\n" + " Example:\n" + " " FLINT_NAME " -d " DEV_MST_EXAMPLE1 " dc\n" + "\n"; + + printf(descr, sname); + + if (full) { + printf(full_descr, sname); + } +} + + +// +// Signal handlers +// + +Flash* g_flash = NULL; + +#ifdef _WIN32 +HANDLE g_hMainTread = GetCurrentThread(); +#endif + + + +int g_signals_for_termination[] = { + SIGINT, +#ifndef _WIN32 + SIGHUP, +#endif + SIGTERM +}; + + + +void TerminationHandler (int signum) +{ + static volatile sig_atomic_t fatal_error_in_progress = 0; + +#ifdef _WIN32 + if (signum == 0) { + + report ("\nWarning: Got SIGINT. Raising SIGTERM\n"); + raise(SIGTERM); + return; + } + + report ("\nWarning: This program can not be interrupted.Please wait for its termination.\n"); + signal(signum, TerminationHandler); + return; +#endif + + + if (fatal_error_in_progress) + raise (signum); + fatal_error_in_progress = 1; + + signal (signum, SIG_DFL); + + if (g_flash != NULL) { + report("\n Received signal %d. Cleaning up ...", signum); + fflush(stdout); + sleep(1); // let erase sector end + g_flash->wait_ready("Process termination"); + + g_flash->close(); + report(" Done.\n"); + } + raise(signum); +} + +// +// Commands database and parsing methods +// +enum CommandInput { + CI_NONE = 0x01, + CI_IMG_ONLY = 0x02, + CI_DEV_ONLY = 0x04, + CI_IMG_OR_DEV = 0x06, + CI_IMG_AND_DEV = 0x08 +}; + +enum CommandType { + CMD_UNKNOWN, + CMD_BURN, + CMD_BURN_BLOCK, + CMD_QUERY, + CMD_VERIFY, + CMD_READ_WORD, + CMD_READ_BLOCK, + CMD_WRITE_WORD, + CMD_WRITE_WORD_NE, + CMD_WRITE_BLOCK, + CMD_WRITE_BLOCK_NE, + CMD_ERASE_SECT, + CMD_DUMP_CONF, + CMD_READ_IMAGE, + CMD_CFI, + CMD_CLEAR_SEM +}; + +struct CommandInfo { + CommandType cmd; + const char* cmdName; + bool requireExactMatch; + int maxArgs; + CommandInput requiredInput; + const char* cmdDescription; + +}; + +CommandInfo const g_commands[] = { + { CMD_BURN , "burn" ,false , 0 , CI_IMG_AND_DEV , ""}, + { CMD_BURN_BLOCK , "bb" ,true , 0 , CI_IMG_AND_DEV , ""}, + { CMD_QUERY , "query" ,false , 0 , CI_IMG_OR_DEV , ""}, + { CMD_VERIFY , "verify",false , 0 , CI_IMG_OR_DEV , ""}, + { CMD_READ_WORD , "rw" ,true , 1 , CI_DEV_ONLY , ""}, + { CMD_READ_BLOCK , "rb" ,true , 3 , CI_IMG_OR_DEV , ""}, + { CMD_WRITE_WORD , "ww" ,true , 2 , CI_DEV_ONLY , ""}, + { CMD_WRITE_WORD_NE , "wwne" ,true , 2 , CI_DEV_ONLY , ""}, + { CMD_WRITE_BLOCK , "wb" ,true , 2 , CI_DEV_ONLY , ""}, + { CMD_WRITE_BLOCK_NE , "wbne" ,true ,-1 , CI_DEV_ONLY , ""}, + { CMD_ERASE_SECT , "erase" ,false , 1 , CI_DEV_ONLY , ""}, + { CMD_DUMP_CONF , "dc" ,true , 1 , CI_IMG_OR_DEV , ""}, + { CMD_READ_IMAGE , "ri" ,true , 1 , CI_DEV_ONLY , ""}, + { CMD_CLEAR_SEM , "clear_semaphore" ,true , 0 , CI_DEV_ONLY , ""}, + { CMD_CFI , "cfi" ,true , 0 , CI_DEV_ONLY , ""} +}; + +#define numbel(x) (sizeof(x)/sizeof((x)[0])) + + +const CommandInfo* GetCommandInfo(CommandType cmd) { + for (u_int32_t i = 0 ; i < numbel(g_commands); i++ ) { + if (cmd == g_commands[i].cmd) { + return &g_commands[i]; + } + } + + return NULL; +} + +CommandType ParseCommand(const char* cmd) { + u_int32_t cmdLenGiven = strlen(cmd); + + for (u_int32_t i = 0 ; i < numbel(g_commands); i++ ) { + if (g_commands[i].requireExactMatch ) { + if (!strcmp(cmd, g_commands[i].cmdName)) { + return g_commands[i].cmd; + } + } else { + // Match if given cmd maches the beginning of the checked cmd + if (!strncmp(cmd, g_commands[i].cmdName, cmdLenGiven )) { + return g_commands[i].cmd; + } + } + } + return CMD_UNKNOWN; +} + + +bool CheckCommandInputs(const char* dev, + const char* img, + CommandType cmd) { + + const CommandInfo* cmdInfo = GetCommandInfo(cmd); + + if (!cmdInfo) { + printf("*** INTERNAL ERROR *** Unknown command given to CheckCommandInputs() (%d)\n", cmd); + return false; + } + + char* inputDesStr [] = { + NULL, + "neither a device nor an image file", // CI_NONE + "an image file", // CI_IMG_ONLY, + NULL, + "a device", // CI_DEV_ONLY, + NULL, + "either an image file or a device", // CI_IMG_OR_DEV, + NULL, + "both an image file and a device" // CI_IMG_AND_DEV + }; + + CommandInput given; + + if ( dev && img) { + given = CI_IMG_AND_DEV; + } else if (!dev && img) { + given = CI_IMG_ONLY; + } else if (dev && !img) { + given = CI_DEV_ONLY; + } else { + given = CI_NONE; + } + + if ((given & cmdInfo->requiredInput) == 0) { + printf("*** ERROR *** Command \"%s\" requires %s to be specified", + cmdInfo->cmdName, + inputDesStr[cmdInfo->requiredInput]); + + if (given != CI_NONE) { + printf(", but %s %s given.\n", + inputDesStr[given], + given == CI_IMG_AND_DEV ? "are" : "is"); + } else { + printf(".\n"); + } + + return false; + } + + return true; +} + +bool CheckMaxCmdArguments(CommandType cmd, int numArgs) { + const CommandInfo* cmdInfo = GetCommandInfo(cmd); + if (!cmdInfo) { + printf("*** INTERNAL ERROR *** Unknown command given to CheckMaxCmdArguments (%d)\n", cmd); + return false; + } + + if (cmdInfo->maxArgs >= 0 && numArgs > cmdInfo->maxArgs) { + printf("*** ERROR *** Command \"%s\" requires %d arguments, but %d arguments were given\n", + cmdInfo->cmdName, + cmdInfo->maxArgs, + numArgs); + return false; + } + return true; +} + +//////////////////////////////////////////////////////////////////////// +#define NEXTS(s) do { \ + if (++i >= ac) \ + { \ + printf("Missed parameter after \"%s\" switch\n", s); \ + return 1; \ + }} while(0) +#define NEXTC(p, s) do { \ + if (++i >= ac) \ + { \ + printf("Missed %s parameter after \"%s\" command\n", p,s); \ + return 1; \ + }} while(0) + +#define SETERR(args) do { printf("*** ERROR *** "); printf args; printf("\n"); return 1; } while(0) + + +int main(int ac, char *av[]) +{ + + char *image_fname=0, *device=0; + bool clear_semaphore = false; + bool silent = false; + bool guids_specified = false; + bool burn_failsafe = true; + bool use_image_ps = false; + bool single_image_burn = true; + + char* cmdStr = NULL; + + char *user_vsd=0; + char *user_psid=0; + guid_t user_guids[Operations::GUIDS]; + int rc = 0; + + CommandType cmd = CMD_UNKNOWN; + + auto_ptr f; + FImage fim; + + Operations ops; + + // + // Map termination signal handlers + // + int i; + for (i = 0 ; i < (int)(sizeof(g_signals_for_termination)/sizeof(g_signals_for_termination[0])) ; i++ ) { + signal (g_signals_for_termination[i], TerminationHandler); + } + + if (ac < 2) { + usage(av[0]); + rc = 1; goto done; + } + + // Go thru command line options + for (i=1; i < ac; i++) { + // + // Switches + // -------- + // + if (*av[i] == '-') { + int switchLen = strlen(av[i]); + + if (!strcmp(av[i], "-dual_image")) + single_image_burn = false; + + else if (!strcmp(av[i], "-clear_semaphore")) { + clear_semaphore = true; + } + + else if (!strncmp(av[i], "-device", switchLen)) { + NEXTS("-device"); + device = av[i]; + + //f.reset( get_flash(device) ); + + } else if (!strcmp(av[i], "-v") || !strcmp(av[i], "-vv")) { + printf("%s: %s .", + av[0], + _versionID); + + if (!strcmp(av[i], "-vv")) { + printf(" SVN %s", _svnID + 1); + } + + printf("\n"); + rc = 0; goto done; + + } else if (!strcmp(av[i], "-unlock")) { + _unlock_bypass = true; + } else if (!strcmp(av[i], "-noerase")) + _no_erase = true; + else if (!strcmp(av[i], "-noburn")) + _no_burn = true; + else if (!strcmp(av[i], "-crc")) + _print_crc = true; + else if (!strcmp(av[i], "-bytewrite")) { + if (device) { + printf("\"-bytewrite\" should be specifies before \"-device\" switch in the command line.\n"); + rc = 1; goto done; + } + _byte_write = true; + } else if (!strcmp(av[i], "-vsd")) { + NEXTS("-vsd"); + user_vsd = av[i]; + } + // -vsd1 is an alias to -vsd, for backward compatibility. Can be removed in the future. + else if (!strcmp(av[i], "-vsd1")) { + NEXTS("-vsd1"); + user_vsd = av[i]; + } else if (!strcmp(av[i], "-psid")) { + NEXTS("-psid"); + user_psid = av[i]; + } + // -vsd2 is an alias to psid, for backward compatibility. Can be removed in the future. + else if (!strcmp(av[i], "-vsd2")) { + NEXTS("-vsd2"); + user_psid = av[i]; + } else if (!strcmp(av[i], "-bsn")) { + NEXTS("-bsn"); + GETBSN(av[i], &user_guids[0]); + for (int i=1; i>32); + user_guids[i].l = (u_int32_t)g; + } + guids_specified = true; + } else if (!strncmp(av[i], "-image", switchLen)) { + NEXTS("-image"); + image_fname = av[i]; + } else if (!strcmp(av[i], "-guid")) { + NEXTS("-guid"); + GETGUID(av[i], &user_guids[0]); + for (int i=1; i>32); + user_guids[i].l = (u_int32_t)g; + } + guids_specified = true; + } else if (!strcmp(av[i], "-guids")) { + NEXTS("-guids"); + for (int j=0; j= ac) { + printf("Exactly four GUIDs must be specified.\n"); + rc = 1; goto done; + } + } + i--; + guids_specified = true; + } else if (!strncmp(av[i], "-silent", switchLen)) + silent = true; + else if (!strncmp(av[i], "-use_image_ps", 2)) + use_image_ps = true; + else if (!strncmp(av[i], "-nofs", 5)) + burn_failsafe = false; + else if (!strcmp(av[i], "-skip_is")) + ops.SetAllowSkipIs(true); + else if (!strncmp(av[i], "-yes", switchLen)) + _assume_yes = true; + else if (!strcmp(av[i], "-byte_mode")) + ParallelFlash::set_byte_mode(true); + + else if (!strncmp(av[i], "-hh", 3) || !strncmp(av[i], "--hh", 4)) { + usage(av[0], true); + rc = 1; goto done; + } else if (!strncmp(av[i], "-help", switchLen) || !strncmp(av[i], "--h", 3)) { + usage(av[0]); + rc = 1; goto done; + } else { + printf("*** ERROR *** Invalid switch \"%s\" is specified.\n", av[i]); + rc = 1; goto done; + } + } else { + // command + cmdStr = av[i]; + break; + } + } + + + // + // Commands + // -------- + // + + if (clear_semaphore) { + if (cmdStr) { + printf("*** ERROR *** No command is allowed when -clear_semaphore flag is given.\n"); + rc = 1; goto done; + } else { + cmdStr = "clear_semaphore"; + } + } + + if (!cmdStr) { + printf("*** ERROR *** No command given. See help for details.\n"); + rc = 1; goto done; + } + + // + // Check and parse command + // + cmd = ParseCommand(cmdStr); + + if (cmd == CMD_UNKNOWN) { + printf("*** ERROR *** Invalid command \"%s\".\n", av[i]); + rc = 1; goto done; + } + + if (cmd == CMD_CLEAR_SEM) { + clear_semaphore = true; + } + + if (!CheckCommandInputs(device, image_fname, cmd)) { + rc = 1; goto done; + } + + if (!CheckMaxCmdArguments(cmd, ac - i - 1 )) { + rc = 1; goto done; + } + + + FBase* fbase; + char* cmdTarget; + char* cmdAccess; + + if (device) { + // Open the device + + u_int32_t num_ports; + auto_ptr tmp( get_flash(device, num_ports)); + f = tmp; + + if (f.get() == NULL) { + printf("*** ERROR *** Can't get flash type using device %s\n", device); + rc = 1; goto done; + } + + ops.SetNumPorts(num_ports); + + g_flash = f.get(); + if (!f->open(device, clear_semaphore)) { + printf("*** ERROR *** Can't open %s: %s\n", device, f->err()); + rc = 1; goto done; + } + + cmdTarget = "Flash"; + cmdAccess = device; + fbase = f.get(); + } + + if (image_fname) { + if (!fim.open(image_fname)) { + printf("*** ERROR *** Image file open failed: %s\n", fim.err()); + rc = 1; goto done; + } + + cmdTarget = "Image file"; + cmdAccess = image_fname; + fbase = &fim; + } + + + + + + switch (cmd) { + case CMD_BURN: + case CMD_BURN_BLOCK: + { + + // + // BURN + // + + bool burn_block = (cmd == CMD_BURN_BLOCK); + + if (!burn_block) { + // Make checks and replace vsd/guids. + + Operations::ImageInfo fileInfo; + Operations::ImageInfo flashInfo; + + bool old_silent = _silent; + _silent = true; + if (!ops.Verify(fim) || !ops.QueryAll(fim, &fileInfo)) { + printf("*** ERROR *** %s: Not a valid image file (%s)\n", image_fname, ops.err()); + rc = 1; goto done; + } + + // Check that the flash sector size is well defined in the image + if (fim.get_sector_size() && (fim.get_sector_size() != f->get_sector_size())) { + printf("*** ERROR *** Flash sector size(0x%x) differs from sector size defined in the image (0x%x).\n" + " This means that the given FW file is not configured to work with the burnt HCA board type.\n", + f->get_sector_size(), + fim.get_sector_size()); + rc = 1; goto done; + } + + // Get GUID and VSD info from flash + + bool read_guids = true; + bool read_ps = true; + + // Flash query (unlike image file query) does not have to + // pass. E.G. blank flash and the user supplies the needed data (guids, vsd). + + bool flash_query_res = ops.QueryAll(*f, &flashInfo); + + if (guids_specified) + read_guids = false; + + if ((user_vsd && user_psid) || use_image_ps) + read_ps = false; + + if (read_guids && !flash_query_res) { + + if (read_guids && !flashInfo.imageOk) { + + printf("\n"); + printf("*** ERROR *** Can't extract GUIDS info from flash. " + "Please specify GUIDs (using command line flags -guid(s) ). \n"); + } + + if (burn_failsafe) { + printf(" Can't burn in a failsafe mode. Please use \"-nofs\" flag to burn in a none failsafe mode.\n"); + } + rc = 1; goto done; + } + + if (read_ps && !flashInfo.psOk) { + printf("\n"); + if (burn_failsafe) { + + printf("*** ERROR *** Can't extract VSD/PSID info from flash.\n" + " Can't burn in a failsafe mode. Please use \"-nofs\" flag to burn in a none failsafe mode.\n"); + rc = 1; goto done; + } else { + printf("*** WARNING *** Can't extract VSD/PSID info from flash.\n\n" + " To use a specific VSD, abort and re-burn specifying the\n" + " needed info (using command line flags -vsd / -use_image_ps).\n" + " You can also continue burn using blank VSD.\n"); + + if (!ops.ask_user("\n Continue burn using a blank VSD ? (y/n) ")) { + rc = 1; goto done; + } + } + } + + // Print FW versions: + printf(" Current FW version on flash: "); + if (flashInfo.infoFound[Operations::II_FwVersion]) { + printf("%d.%d.%d\n", flashInfo.fwVer[0], flashInfo.fwVer[1], flashInfo.fwVer[2]); + } else { + printf("N/A\n"); + } + + printf(" New FW version: "); + if (fileInfo.infoFound[Operations::II_FwVersion]) { + printf("%d.%d.%d\n", fileInfo.fwVer[0], fileInfo.fwVer[1], fileInfo.fwVer[2]); + } else { + printf("N/A\n"); + } + + + // Patch GUIDS + if (guids_specified) { + if (!ops.patchGUIDs(fim, + user_guids, + flashInfo.imageOk ? flashInfo.guids : NULL, + isatty(0) != 0)) { + rc = 1; goto done; + } + } else { + if (!ops.patchGUIDs(fim, NULL, flashInfo.guids, false)) { + rc = 1; goto done; + } + } + + if (_image_is_full && !use_image_ps) + if (!ops.patchVSD(fim, + user_vsd, + user_psid, + flashInfo.psOk ? flashInfo.vsd : NULL, + flashInfo.psOk ? flashInfo.psid : NULL, + fileInfo.psid )) { + rc = 1; goto done; + } + + _silent = old_silent; + } else { + // BURN BLOCK: + burn_failsafe = false; + } + + // Burn it + if (burn_failsafe) { + // Failsafe burn + if (!_image_is_full) { + printf("*** ERROR *** Failsafe burn failed: FW Image on flash is short.\n"); + printf("It is impossible to burn a short image in a failsafe mode.\n"); + printf("If you want to burn in non failsafe mode, use the \"-nofs\" switch.\n"); + rc = 1; goto done; + } + + // FS burn + if (!ops.FailSafe_burn(*f, + fim.getBuf(), + fim.getBufLength(), + single_image_burn, + !silent)) { + if (f->err()) { + // The error is in flash access: + printf("*** ERROR *** Flash access failed during burn: %s\n", f->err()); + } else { + // operation/ algorithm error: + printf("*** ERROR *** Failsafe burn failed: %s\n", ops.err()); + printf("If you want to burn in non failsafe mode, use the \"-nofs\" switch.\n"); + } + rc = 1; goto done; + } + } else { + // + // Not failsafe (sequential) burn + // + + // Ask is it OK + printf("\n"); + if (burn_block) { + printf("Block burn: The given image will be burnt as is. No fields (such\n"); + printf("as GUIDS,VSD) are taken from current image on flash.\n"); + } + printf("Burn process will not be failsafe. No checks are performed.\n"); + printf("ALL flash, including Invariant Sector will be overwritten.\n"); + printf("If this process fails computer may remain in unoperatable state.\n"); + + if (!ops.ask_user("\nAre you sure ? (y/n) [n] : ")) { + rc = 1; goto done; + } + + // Non FS burn + if (!ops.write_image(*f, 0, fim.getBuf(), fim.getBufLength(), + !silent)) { + report("\n"); + printf("*** ERROR *** Non failsafe burn failed: %s\n", ops.err()); + rc = 1; goto done; + } + report("\n"); + } + } + break; + + case CMD_ERASE_SECT: + { + // + // ERASE SECTOR + // Parameters: + // + u_int32_t addr; + char *endp; + + // Address of sector to erase + NEXTC("", "erase"); + addr = strtoul(av[i], &endp, 0); + if (*endp) { + printf("Invalid address \"%s\"\n", av[i]); + rc = 1; goto done; + } + + // Erase + if (!f->erase_sector(addr)) { + printf("*** ERROR *** Erase sector failed: %s\n", f->err()); + rc = 1; goto done; + } + } + break; + + case CMD_QUERY: + { + // QUERY + Operations::ImageInfo info; + + if (!ops.QueryAll(*fbase, &info)) { + printf("*** ERROR *** %s query (%s) failed: %s\n", cmdTarget , cmdAccess, ops.err()); + rc = 1; goto done; + } + + ops.DisplayImageInfo(&info); + } + break; + + case CMD_READ_BLOCK: + { + // READ BLOCK + // Parameters: [OUT_FILENAME] + // if OUT_FILENAME is given, binari read block is stored + // in the given file. Otherwise, data is printed to screen. + u_int32_t addr, length; + u_int8_t *data; + char *endp; + + bool to_file = false; + + // Address and length + NEXTC("", "rb"); + addr = strtoul(av[i], &endp, 0); + if (*endp) { + printf("Invalid address \"%s\"\n", av[i]); + rc = 1; goto done; + } + NEXTC("", "rb"); + length = strtoul(av[i], &endp, 0); + if (*endp) { + printf("Invalid length \"%s\"\n", av[i]); + rc = 1; goto done; + } + data = new u_int8_t[length]; + + // Output file + FILE* fh; + + if (i + 2 == ac) + to_file = true; + + if (to_file) { + NEXTC("", "rb"); + if ((fh = fopen(av[i], "wb")) == NULL) { + fprintf(stderr, "Can't open "); + perror(av[i]); + rc = 1; goto done; + } + } + + // Read flash + if (!fbase->read(addr, data, length)) { + printf("*** ERROR *** Flash read failed: %s\n", fbase->err()); + rc = 1; goto done; + } + + if (to_file) { + // Write output + if (fwrite(data, 1, length, fh) != length) { + perror("Write error"); + rc = 1; goto done; + } + fclose(fh); + } else { + for (u_int32_t i = 0; i < length ; i+=4) { + u_int32_t word = *((u_int32_t*)(data + i)); + + word = __be32_to_cpu(word); + printf("0x%08x ", word); + } + printf("\n"); + } + delete [] data; + } + break; + + case CMD_READ_WORD: + { + // READ DWORD + // Parameters: + u_int32_t data, addr; + char *endp; + + // Address + NEXTC("", "rw"); + addr = strtoul(av[i], &endp, 0); + if (*endp) { + printf("Invalid address \"%s\"\n", av[i]); + rc = 1; goto done; + } + + // Read + if (!f->read(addr, &data)) { + printf("*** ERROR *** Flash read failed: %s\n", f->err()); + rc = 1; goto done; + } + printf("0x%08x\n", (unsigned int)__cpu_to_be32(data)); + + } + break; + + case CMD_VERIFY: + { + // VERIFY + if (!ops.Verify(*fbase)) { + printf("\n*** ERROR *** FW Image verification failed. AN HCA DEVICE CAN NOT BOOT FROM THIS IMAGE.\n"); + rc = 1; goto done; + } else { + printf("\nFW Image verification succeeded. Image is OK.\n\n"); + } + } + break; + + case CMD_DUMP_CONF: + { + // Dump conf + _silent = true; + + char* conf_file = NULL; + if (i + 2 <= ac) { + NEXTC("", "dc"); + conf_file = av[i]; + } + + ops.Verify(*fbase); + + if(!ops.DumpConf(conf_file)) { + printf("*** ERROR *** Failed dumping FW configuration: %s\n", ops.err()); + rc = 1; goto done; + } + } + break; + + case CMD_READ_IMAGE: + { + // Dump conf + _silent = true; + + char* img_file = NULL; + NEXTC("", "ri"); + img_file = av[i]; + + ops.Verify(*f); + + //printf("Last addr: 0x%08x\n", ops._last_image_addr); + + u_int32_t length = ops._last_image_addr; + u_int8_t* data = new u_int8_t[length]; + + FILE* fh; + + if ((fh = fopen(av[i], "wb")) == NULL) { + fprintf(stderr, "Can't open "); + perror(av[i]); + rc = 1; goto done; + } + + // Read flash + if (!f->read(0, data, length)) { + printf("*** ERROR *** Flash read failed: %s\n", f->err()); + rc = 1; goto done; + } + + // Write output + if (fwrite(data, 1, length, fh) != length) { + perror("Write error"); + rc = 1; goto done; + } + fclose(fh); + + delete [] data; + } + break; + + case CMD_WRITE_BLOCK: + { + // WRITE BLOCK + // Parameters: + u_int32_t addr; + char *endp; + + char* fname; + + // Device + if (!device) { + printf("For wb command \"-device\" switch must be specified.\n"); + rc = 1; goto done; + } + + // Input file + FImage fim; + + NEXTC("", "wb"); + + fname = av[i]; + + // Address + NEXTC("", "wb"); + addr = strtoul(av[i], &endp, 0); + if (*endp) { + printf("Invalid address \"%s\"\n", av[i]); + rc = 1; goto done; + } + + if (!fim.open(image_fname)) { + printf("*** ERROR *** Image file open failed: %s\n", fim.err()); + rc = 1; goto done; + } + + // Write flash + if (!ops.write_image(*f, addr, fim.getBuf(), fim.getBufLength(), !silent)) { + printf("*** ERROR *** Flash write failed: %s\n", ops.err()); + rc = 1; goto done; + } + } + break; + + case CMD_WRITE_WORD: + { + // WRITE DWORD + // Parameters: + u_int32_t data, addr; + char *endp; + + // Address and data + NEXTC("", "ww"); + addr = strtoul(av[i], &endp, 0); + if (*endp) { + printf("Invalid address \"%s\"\n", av[i]); + rc = 1; goto done; + } + NEXTC("", "ww"); + data = __cpu_to_be32(strtoul(av[i], &endp, 0)); + if (*endp) { + printf("Invalid data \"%s\"\n", av[i]); + rc = 1; goto done; + } + + //f->curr_sector = 0xffffffff; // First time erase sector + if (!f->write(addr, data)) { + printf("*** ERROR *** Flash write failed: %s\n", f->err()); + rc = 1; goto done; + } + } + break; + + case CMD_WRITE_BLOCK_NE: + { + // WRITE DWORD WITHOUT ERASE + // Parameters: + u_int32_t size, addr; + char *endp; + + // Address and data + NEXTC("", "wbne"); + addr = strtoul(av[i], &endp, 0); + if (*endp) { + printf("Invalid address \"%s\"\n", av[i]); + rc = 1; goto done; + } + NEXTC("", "wbne"); + size = strtoul(av[i], &endp, 0); + if (*endp || size % 4) { + printf("Invalid size \"%s\"\n", av[i]); + rc = 1; goto done; + } + vector data_vec(size/4); + for (u_int32_t w = 0; w < size/4 ; w++) { + NEXTC("", "wbne"); + data_vec[w] = __cpu_to_be32(strtoul(av[i], &endp, 0)); + if (*endp) { + printf("Invalid data \"%s\"\n", av[i]); + rc = 1; goto done; + } + + //printf("-D- writing: %08x : %08x\n", addr + w*4 , data_vec[w]); + } + + if (!f->write(addr, &data_vec[0], size, true, false)) { + printf("*** ERROR *** Flash write failed: %s\n", f->err()); + rc = 1; goto done; + } + } + break; + + case CMD_WRITE_WORD_NE: + { + // WRITE DWORD WITHOUT ERASE + // Parameters: + u_int32_t data, addr; + char *endp; + + // Address and data + NEXTC("", "wwne"); + addr = strtoul(av[i], &endp, 0); + if (*endp) { + printf("Invalid address \"%s\"\n", av[i]); + rc = 1; goto done; + } + NEXTC("", "wwne"); + data = __cpu_to_be32(strtoul(av[i], &endp, 0)); + if (*endp) { + printf("Invalid data \"%s\"\n", av[i]); + rc = 1; goto done; + } + + if (!f->write(addr, &data, 4, true, false)) { + printf("*** ERROR *** Flash write failed: %s\n", f->err()); + rc = 1; goto done; + } + } + break; + + case CMD_CFI: + { + if (!f->print_cfi_info()) { + printf("*** ERROR *** Cfi query failed: %s\n", f->err()); + rc = 1; goto done; + } + } + break; + + case CMD_CLEAR_SEM: + // Do nothing - opening the device already cleared the semaphore. + break; + + default: + printf("*** INTERNAL ERROR *** Invalid command %d.\n", cmd); + rc = 1; goto done; + } + +done: + + //mask signals + for (i = 0 ; i < (int)(sizeof(g_signals_for_termination)/sizeof(g_signals_for_termination[0])) ; i++ ) { + signal (g_signals_for_termination[i], SIG_IGN); + } + + return rc; +} + diff --git a/branches/Ndi/tools/flint/user/flint.rc b/branches/Ndi/tools/flint/user/flint.rc new file mode 100644 index 00000000..62ad3fde --- /dev/null +++ b/branches/Ndi/tools/flint/user/flint.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#if DBG +#define VER_FILEDESCRIPTION_STR "Mellanox HCAs FW burning tool. (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Mellanox HCAs FW burning tool." +#endif + +#define VER_INTERNALNAME_STR "flint.exe" +#define VER_ORIGINALFILENAME_STR "flint.exe" + +#include diff --git a/branches/Ndi/tools/flint/user/makefile b/branches/Ndi/tools/flint/user/makefile new file mode 100644 index 00000000..128ed372 --- /dev/null +++ b/branches/Ndi/tools/flint/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/fwupdate/dirs b/branches/Ndi/tools/fwupdate/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/tools/fwupdate/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tools/fwupdate/user/SOURCES b/branches/Ndi/tools/fwupdate/user/SOURCES new file mode 100644 index 00000000..679eec68 --- /dev/null +++ b/branches/Ndi/tools/fwupdate/user/SOURCES @@ -0,0 +1,21 @@ +TARGETNAME=hcafwupdate +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +SOURCES=flint.cpp \ + flint-tools.cpp + +INCLUDES= ..\..\..\inc;..\..\..\inc\user; + +TARGETLIBS= \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tools/fwupdate/user/flint-tools.cpp b/branches/Ndi/tools/fwupdate/user/flint-tools.cpp new file mode 100644 index 00000000..4bcaf7ea --- /dev/null +++ b/branches/Ndi/tools/fwupdate/user/flint-tools.cpp @@ -0,0 +1,368 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include "flint-tools.h" + +FlashCommandLine::FlashCommandLine() +{ + initialize(); +} + +FlashCommandLine::FlashCommandLine(int argc, char **argv) +{ + initialize(); + parse(argc, argv); +} + +void FlashCommandLine::initialize() +{ + m_options = no_prompts; + m_usedefaultdevice = true; // use the default device name + m_readguidsfromflash = true; // read the current guids from the flash + m_isFlashDeviceOption = false; + //m_device[MAXPATHLEN]; + m_defaultdevice = "//mt23108_pciconf"; // default the device for now + //m_nodeguidstr[MAXPATHLEN]; + //m_rawfile[MAXPATHLEN]; +} + +bool FlashCommandLine::isOption(Options opt) +{ + return ((m_options & opt)==opt); +} + +bool FlashCommandLine::isFlashDeviceOption(void) +{ + return (m_isFlashDeviceOption); +} + +static char optarg[MAXPATHLEN]; + static struct option long_options[] = + { + {"noquery" , 0, 0, 'q'}, + {"silent" , 0, 0, 's'}, + {"burn" , 0, 0, 'b'}, + {"debug" , 0, 0, 'x'}, + {"dump-flash" , 0, 0, 'p'}, + {"dump-image" , 0, 0, 'z'}, + {"device" , 1, 0, 'd'}, + {"guid" , 1, 0, 'n'}, + {"noverify" , 0, 0, 'y'}, + {"verify-flash" , 0, 0, 'v'}, + {"verify-image" , 0, 0, 'g'}, + {"image-info" , 0, 0, 'i'}, + {"flash-info" , 0, 0, 'j'}, + {"no-prompts" , 0, 0, 'k'}, + {"prompt" , 0, 0, 'l'}, + {"write-file" , 1, 0, 'w'}, + {"force" , 0, 0, 'f'}, + {"flash-format" , 0, 0, 'A'}, + {"nobestdevice" , 0, 0, 'c'}, + {"burn-invariant" , 0, 0, 'B'}, + {"help" , 0, 0, 'h'}, + {0, 0, 0, 0} + }; + static char *short_options="qsbxpzdnyvgijwfcBh"; + + +char getopt_long( + int argc, + char *argv[], + const char *short_option, + const struct option *long_option, + void *unused ) +{ + static int i = 1; + char ret = -1; + + UNUSED_PARAM( unused ); + + if ( i == argc ) + return ret; + + if( argv[i][0] != '-' ) + return ret; + + if ( isalpha(argv[i][1])) + { + ret = argv[i][1]; + char *tmp_buf = &argv[i][2]; + + for(;;) + { + if ( *tmp_buf == '\0') + { + i++; + tmp_buf = &argv[i][0]; + } + + if ( *tmp_buf != '-' ) + { + + if (isalnum( *tmp_buf ) ) + { + sscanf(argv[i],"%s", &optarg); + return ret; + } + } + return ret; + } + } + return ret; +} + +int32_t FlashCommandLine::parse(int argc, char **argv) +{ + int opt; + bool rawFileRequired=false; + int i = 0; + + cl_memset (m_device,0, MAXPATHLEN ); + cl_memset (m_rawfile,0, MAXPATHLEN ); + strncpy(m_program, argv[0], MAXPATHLEN); + strncpy(m_device, m_defaultdevice, MAXPATHLEN); // set the device name to the default + + + // process the FlashCommandLine to determine desired actions + if (argc < 2) + { + usage(argv[0]); + return -1; + } + + while (1) + { + int option_index = 0; + cl_memset (optarg, 0, sizeof(optarg)); + if (++i >= argc ) + return 1; + + //opt = getopt_long (argc, argv, short_options , long_options, &option_index); + opt = argv[i][1]; + if (argv[i][2] != '\0') + cl_memcpy(optarg, &argv[i][2], min( strlen(argv[i]), MAXPATHLEN)); + if ( ++i < argc && argv[i][0] != '-' ) + { + cl_memcpy(optarg, &argv[i][0], min( strlen(argv[i]), MAXPATHLEN)); + } + + if (opt == -1) + break; + + switch (opt) + { + case 'q': + m_options |= noquery; // query and show guids, no write + break; + case 'b': + m_options |= burn; // query and show guids, no write + m_isFlashDeviceOption = true; + rawFileRequired=true; + break; + case 'd': + m_usedefaultdevice=false; // don't use the default device + //m_options |= disable_bestdevice; + //strncpy(m_device, optarg, MAXPATHLEN); // set the device name + strncpy(m_device, m_defaultdevice, MAXPATHLEN); // use default - we don't export device name so far + break; + case 'n': + m_readguidsfromflash=false; + strncpy(m_nodeguidstr, optarg, MAXPATHLEN); // set the node guid string + break; + case 's': + m_options |= silent; // be very quite + break; + case 'x': + m_options |= debug; // be very noisy + break; + case 'y': + m_options ^= verify_flash; + m_isFlashDeviceOption = true; + break; + case 'v': + m_options |= verify_flash; + m_isFlashDeviceOption = true; + break; + case 'g': + m_options |= verify_image; + rawFileRequired=true; + strncpy(m_rawfile, optarg, min(strlen (optarg),MAXPATHLEN)); + break; + case 'p': + m_options |= dump_flash; + m_isFlashDeviceOption = true; + break; + case 'z': + m_options |= dump_image; + rawFileRequired=true; + strncpy(m_rawfile, optarg, min(strlen (optarg),MAXPATHLEN)); + break; + case 'i': + m_options |= show_image_info; + rawFileRequired = true; + break; + case 'j': + m_options |= show_flash_info; + m_isFlashDeviceOption = true; + break; + case 'k': + break; // obsolete be silent about for backward support + case 'l': + m_options ^= no_prompts; + break; + case 'f': + m_options |= force; + break; + case 'A': + m_options |= flash_format; + m_isFlashDeviceOption = true; + break; + case 'w': + strncpy(m_revision, optarg, revisionStringLength); // set the node guid string + m_options |= write_file; + rawFileRequired=true; + break; + case 'c': + m_options |= disable_bestdevice; + break; + case 'B': + m_options |= burn_invariant_section; + break; + case 'h': // display some useful usage information + default: + usage(argv[0]); + exit(-1); + } // end case + + if ( rawFileRequired && m_rawfile[0] == '\0' ) + { + cl_memcpy(m_rawfile, optarg, min(strlen (optarg),MAXPATHLEN)); + if ( strlen (m_rawfile ) == 0 ) + { + fprintf(stderr, "Empty firmware filename.\n"); + usage(argv[0]); + exit(-1); + } + } + } // end while + + return 1; +} + +uint8_t FlashCommandLine::getOptions() +{ + return (uint8_t)m_options; +} + +char *FlashCommandLine::getProgramName() +{ + return m_program; +} + + +char *FlashCommandLine::getDeviceName() +{ + return m_device; +} + +char *FlashCommandLine::getDefaultDeviceName() +{ + return m_defaultdevice; +} + +char *FlashCommandLine::getNodeGUID() +{ + return m_nodeguidstr; +} + +char *FlashCommandLine::getRawFileName() +{ + return m_rawfile; +} + +char *FlashCommandLine::getRevisionString() +{ + return m_revision; +} + + +bool FlashCommandLine::useDefaultDevice() +{ + return m_usedefaultdevice; +} + +bool FlashCommandLine::useFlashNodeGUID() +{ + return m_readguidsfromflash; +} + + +void FlashCommandLine::usage(const char *progname) +{ + char *rversion = "$Revision$"; + char version[128]; + uint32_t x; + + for (x=0;x<128;x++) + { + if (rversion[x+11] == '$') + { + break; + } + version[x] = rversion[x+11]; + } + + fprintf(stdout, "usage: %s [-b][-q][-x][-p][-z][-d device][-n 0x0000000000000000]\n" + " [-v][-g][-i][-w revision][-h][-c][raw-firmware-file]\n" + "version: %s\n" + " --noquery |-q - do not query device guids\n" + " --burn |-b - burn raw image file to flash device\n" + " --debug |-x - enable debug mode\n" + " --dump-flash |-p - dump flash image to stdout\n" + " --dump-image |-z - dump raw file image to stdout\n" + " --device |-d - the hca configuration device (*_pciconf# or *_cr#)\n" + " --guid |-n - the 64 bit GUID to use for the hca device\n" + " --verify-flash |-v - verify the flash image\n" + " --verify-image |-g - verify the raw file image\n" + " --image-info |-i - display raw file image information\n" + " --flash-info |-j - display flash image information\n" + " --prompts |-l - at decision points prompt the end user\n" + " --write-file |-w - write image to file injecting the VSD section\n" + " --nobestdevice |-c - use exactly the device the -d option specified\n" + " --burn-invariant|-B - burn the invariant section\n" + " --help |-h - this usage clause\n", + progname, version); +} + diff --git a/branches/Ndi/tools/fwupdate/user/flint-tools.h b/branches/Ndi/tools/fwupdate/user/flint-tools.h new file mode 100644 index 00000000..32f53c2a --- /dev/null +++ b/branches/Ndi/tools/fwupdate/user/flint-tools.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _FLINT_TOOLS_H +#define _FLINT_TOOLS_H + +#define __STDC_FORMAT_MACROS + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "complib/cl_byteswap.h" +#include "complib/cl_memory.h" +#include "complib/cl_debug.h" +#include "complib/cl_types.h" +#include "iba/ib_types.h" +#include "iba/ib_al.h" + +#ifndef MAXPATHLEN +#define MAXPATHLEN (128) +#endif + +#ifndef PRIx32 +#define PRIx32 "x" +#endif + +#ifndef PRIX32 +#define PRIX32 "X" +#endif + +#ifndef PRId32 +#define PRId32 "d" +#endif + +#define __be32_to_cpu(s) cl_ntoh32(s) +#define __cpu_to_be32(s) __be32_to_cpu(s) + +#include "mtcr.h" + +/* define some debug helpers *********************/ + + extern u_int32_t g_fw_dbg_lvl; + +#define FW_DBG_ERROR CL_DBG_ERROR +#define FW_DBG_FULL CL_DBG_ALL +#define FW_DBG_TRACE +#define FW_ENTER( msg_lvl ) \ + CL_ENTER( msg_lvl, g_fw_dbg_lvl ) + +#define FW_EXIT( msg_lvl ) \ + CL_EXIT( msg_lvl, g_fw_dbg_lvl ) + +#define FW_TRACE( msg_lvl, msg ) \ + CL_TRACE( msg_lvl, g_fw_dbg_lvl, msg ) + +#define FW_TRACE_EXIT( msg_lvl, msg ) \ + CL_TRACE_EXIT( msg_lvl, g_fw_dbg_lvl, msg ) + +#define FW_PRINT( msg_lvl, msg ) \ + CL_PRINT( msg_lvl, g_fw_dbg_lvl, msg ) +/**************************************************/ +struct option +{ + const char *long_name; + unsigned long flag; + void *pfn_handler; + char short_name; +}; + +class FlashCommandLine +{ +public: + enum Constants { + revisionStringLength = 25 + }; + FlashCommandLine(); + FlashCommandLine(int argc, char **argv); + int32_t parse(int argc, char **argv); + enum Options { none = 0x0, + silent = 0x1, + debug = 0x2, + verify_image = 0x4, + verify_flash = 0x8, + dump_image = 0x10, + dump_flash = 0x20, + burn = 0x40, + noquery = 0x80, + no_prompts = 0x100, + write_file = 0x200, + show_image_info = 0x400, + show_flash_info = 0x800, + force = 0x1000, + flash_format = 0x2000, + disable_bestdevice = 0x4000, + burn_invariant_section = 0x8000 + }; + uint8_t getOptions(); + char *getProgramName(); + char *getDeviceName(); + char *getDefaultDeviceName(); + char *getNodeGUID(); + char *getRawFileName(); + char *getRevisionString(); + bool useDefaultDevice(); + bool useFlashNodeGUID(); + void usage(const char *progname); + bool isOption(Options opt); + bool isFlashDeviceOption(void); +protected: + void initialize(); +private: + int m_options; + bool m_isFlashDeviceOption; + bool m_usedefaultdevice; + bool m_readguidsfromflash; + char m_device[MAXPATHLEN]; + char *m_defaultdevice; + char m_nodeguidstr[MAXPATHLEN]; + char m_rawfile[MAXPATHLEN]; + char m_program[MAXPATHLEN]; + char m_revision[revisionStringLength]; +}; +#endif diff --git a/branches/Ndi/tools/fwupdate/user/flint.cpp b/branches/Ndi/tools/fwupdate/user/flint.cpp new file mode 100644 index 00000000..5e36b802 --- /dev/null +++ b/branches/Ndi/tools/fwupdate/user/flint.cpp @@ -0,0 +1,2756 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "flint-tools.h" +#include + +bool DebugMode = false; +bool DisableBestDevice = false; + +static uint32_t TAVOR_TYPE = 23108; +static uint32_t ARBEL_TYPE = 25208; +static int mellanox_mode = 0; +static uint32_t devRevision; +static uint32_t devType; +static uint32_t devId; +static char bestDev[MAXPATHLEN]; + +char *techSupportMessage = "Use the --force option to skip this check. This operation\n" + "allows any firmware to be placed on the HCA device, but could cause the\n" + "the device to cease functioning. If this occurs, please contact\n" + "technical support.\n"; + + +#define M_DEBUG printf +#define LOCK_FILE_NAME "C:\\fwupdate.lck" + +static int file_lock; +static int FwUpdateLock(void); +static void FwUpdateUnlock(void); + +// global defines + +u_int32_t g_fw_dbg_lvl = CL_DBG_ERROR ; + +#define BE2CPU32(s) __be32_to_cpu(s) +#define TOCPU1(s) s = __be32_to_cpu(s) + +#define TOCPU(s,bsz) do { \ + uint32_t *p = (uint32_t *)(s); \ + for (uint32_t ii=0; ii>31)) ^ 0x100b) & 0xffff; + else + _crc=((_crc<<1) | (o>>31)) & 0xffff; + o = (o<<1) & 0xffffffff; + } +} // Crc16::add + + +//////////////////////////////////////////////////////////////////////// +void Crc16::finish() +{ + for (uint32_t i=0; i<16; i++) + { + if (_crc & 0x8000) + _crc=((_crc<<1) ^ 0x100b) & 0xffff; + else + _crc=(_crc<<1) & 0xffff; + } + + // Revert 16 low bits + _crc = _crc ^ 0xffff; + +} // Crc16::finish + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// FLASH ACCESS // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// +//#define FUJITSU_BYTE_MODE + +// Common base class for Flash and for FImage +class FBase +{ +public: + FBase() {m_debug=false;} + + virtual bool open(const char *) = 0; + virtual void close() = 0; + virtual bool read(uint32_t addr, uint32_t *data) = 0; + virtual bool read(uint32_t addr, void *data, uint32_t len) = 0; + virtual bool dump() = 0; + virtual uint32_t getClassType() = 0; + bool verify(); + void setDebug(bool on) { m_debug = on; }; + bool isDebug() { return m_debug; }; + char *_err; + +protected: + char _msg[1024]; + bool m_debug; +}; + +// Flash image (RO) +class FImage : public FBase +{ +public: + FImage() : _buf(0) { } + FImage(bool debug) : _buf(0) { setDebug(debug); } + virtual ~FImage() { close(); } + + enum { RawImageFileType = 1 }; + + uint32_t *getBuf() { return _buf; } + uint32_t getBufLength() { return _len; } + virtual bool open(const char *fname); + virtual void close(); + virtual bool read(uint32_t addr, uint32_t *data); + virtual bool read(uint32_t addr, void *data, uint32_t len); + virtual bool dump(); + virtual uint32_t getClassType() { return RawImageFileType; }; + +private: + uint32_t *_buf; + uint32_t _len; +}; + +// Flash access (R/W) +class Flash : public FBase +{ +public: + enum GPIO_STATE { High, Low }; + + Flash() : curr_sector(0xffffffff), m_curr_bank(0xffffffff), m_gpio_state(Flash::Low) {} + virtual ~Flash() { close(); }; + + enum { SECT_SIZE = 0x10000, SECT_MASK = 0xffff0000, FlashDeviceType = 2 }; + enum { FW_READ = 0x00, + FW_WRITE = 0x01, + FW_READ_CMD = 0x08, + FW_WRITE_CMD= 0x09, + FW_OPEN_IF = 0xe7, + FW_CLOSE_IF = 0x7e }; + uint32_t curr_sector; + hca_dev_t m_mf; + virtual bool open(const char *); + virtual void close(); + virtual bool read(uint32_t addr, uint32_t *data); + virtual bool read(uint32_t addr, void *data, uint32_t len); + virtual bool dump(); + virtual uint32_t getClassType() { return FlashDeviceType; }; + void setGPIOState(GPIO_STATE state); + bool format(); + bool write(uint32_t addr, uint32_t data); + bool write(uint32_t addr, void *data, uint32_t cnt, + bool noerase = false, bool noverify = false); + bool write_image(uint32_t addr, void *data, uint32_t cnt, bool eraseInvariant = false, bool report=false); + bool erase_sector(uint32_t addr); + + ib_api_status_t access(hca_dev_t *mf, u_int32_t offset, void *data, u_int32_t length, u_int32_t operation); +private: + + uint32_t m_curr_bank; + GPIO_STATE m_gpio_state; + uint32_t m_dir; + uint32_t m_pol; + uint32_t m_mod; + uint32_t m_data; + + bool hca_open (void); + void hca_close (); + bool set_bank(uint32_t bank); + bool write_internal(uint32_t addr, uint8_t data); + + enum FlashCmds {IDLE=0, + READ4=(1<<29), + WRITE1=(2<<29) + }; + + enum { TRANS = 4096 }; + enum { BANK_SHIFT = 19, + BANK_MASK = 0xfff80000 + }; + + enum { FLASH = 0xf01a4, + ADDR_MSK=0x7ffffUL, + CMD_MASK=0xe0000000UL + }; + + enum { CPUMODE = 0xf0150, + CPUMODE_MSK=0xc0000000UL, + CPUMODE_SHIFT=30 + }; + + enum { SEMAP63 = 0xf03fc, + GPIO_DIR_L = 0xf008c, + GPIO_POL_L = 0xf0094, + GPIO_MOD_L = 0xf009c, + GPIO_DAT_L = 0xf0084, + GPIO_DATACLEAR_L = 0xf00d4, + GPIO_DATASET_L = 0xf00dc + }; + + // FLASH constants + enum { FLASH_CMD_CNT = 5000, // Number of reads till flash cmd is zeroed + ERASE_DELAY = 20000, // Delay between reads when wating for sector erase + ERASE_CNT = 80, // Maximal number of reads when wating for sector erase + READ_CNT_FAST = 5000, // Number of fast reads after write byte + READ_CNT_SLOW = 50, // Number of slow reads after write byte + READ_DELAY = 10000, // Delay between slow reads after write byte + WR_REPORT_FAST = 256, // Report frequency when write (fast interfaces) + WR_REPORT_SLOW = 4, // Report frequency when write (slow interfaces) + RD_REPORT_FAST = 4096, // Report frequency when read (fast interfaces) + RD_REPORT_SLOW = 64, // Report frequency when read (slow interfaces) + GPIO_SEM_TRIES = 10240 // Number of tries to obtain a GPIO sem. + }; + +}; + +bool FImage::dump() +{ + uint32_t word; + uint32_t position = 0; + + while (1) + { + if (!this->read(position, &word)) + { + break; + } + + if ((position % 16) == 0) + { + printf("\n%08"PRIx32": ", position); + } + printf(" %08"PRIx32"", word); + + position = position + 4; + } + printf("\n"); + return true; +} + +//////////////////////////////////////////////////////////////////////// +bool FImage::open(const char *fname) +{ + struct _stat stat_buf; + int32_t r_cnt; + + uint32_t fd = _open(fname, _O_BINARY | _O_RDONLY, S_IREAD); + if (!fd ) + { + sprintf(_msg, "Can't open file \"%s\" - %s\n", fname, + strerror(errno)); + _err = &_msg[0]; + return false; + } + if (_fstat(fd, &stat_buf) < 0) + { + sprintf(_msg, "Can't stat file \"%s\" - %s\n", fname, + strerror(errno)); + _err = &_msg[0]; + return false; + } + if (stat_buf.st_size & 0x3) + { + _err = "Image size should be 4-bytes aligned."; + return false; + } + + _buf = new uint32_t[stat_buf.st_size/4]; + if ((r_cnt = _read(fd, _buf, stat_buf.st_size)) != stat_buf.st_size) + { + if (r_cnt < 0) + sprintf(_msg, "Read error on file \"%s\" - %s\n", + fname, strerror(errno)); + else + sprintf(_msg, "Read error on file \"%s\" - read only %d bytes (from %ld)\n", + fname, r_cnt, stat_buf.st_size); + _err = &_msg[0]; + return false; + } + + _len = stat_buf.st_size; + _close(fd); + return true; +} // FImage::open + +//////////////////////////////////////////////////////////////////////// +void FImage::close() +{ + delete [] _buf; + _buf = 0; +} // FImage::close + +//////////////////////////////////////////////////////////////////////// +bool FImage::read(uint32_t addr, uint32_t *data) +{ + if (addr & 0x3) + { + _err = "Address should be 4-bytes aligned."; + return false; + } + if (!_buf) + { + _err = "Not opened"; + return false; + } + if (addr > _len-4) + { + sprintf(_msg, "Address (0x%x) is out of image limits\n", addr); + _err = &_msg[0]; + return false; + } + *data = _buf[addr/4]; + return true; +} // FImage::read + +//////////////////////////////////////////////////////////////////////// +bool FImage::read(uint32_t addr, void *data, uint32_t len) +{ + if (addr & 0x3) + { + _err = "Address should be 4-bytes aligned."; + return false; + } + if (len & 0x3) + { + _err = "Length should be 4-bytes aligned."; + return false; + } + if (!_buf) + { + _err = "Not opened"; + return false; + } + + uint32_t *p = (uint32_t *)data; + for (uint32_t i=0; iread(SECT_SIZE, &ps, sizeof(ps)); + + TOCPUBY(ps,sizeof(ps)); + + // Signature + if (ps.signature != SIGNATURE) + { + max_size = MST_MAX_FLASH; + } else + { + max_size = ps.fi_size; + } + + while (position < max_size ) + { + if (!this->read(position, &word)) + { + break; + } + + if ((position % 16) == 0) + { + printf("\n%08"PRIx32": ", position); + } + printf(" %08"PRIx32"", word); + + position = position + 4; + } + printf("\n"); + return true; +} + +bool Flash::format() +{ + uint32_t position = 0; + uint32_t max_size = 0; + PS ps; + uint32_t percent = 0; + uint32_t prev=0; + this->read(SECT_SIZE, &ps, sizeof(ps)); + + TOCPUBY(ps, sizeof(ps)); + + // Signature + if (ps.signature != SIGNATURE) + { + max_size = MST_MAX_FLASH; + } else + { + max_size = ps.fi_size; + } + + printPercent(0,1); + + while (position < max_size ) + { + this->erase_sector(position); + percent = (uint32_t)(((float)position / (float)max_size) * 100.00); + if (percent != prev) + printPercent(percent,0); + prev = percent; + position = position + SECT_SIZE; // increment by 65k sectors + } + printPercent(100,0); + printStatus(0); + return true; +} + +//////////////////////////////////////////////////////////////////////// +bool Flash::open(const char *device ) +{ + // Open device + bool status = false; + if (!device) + { + M_DEBUG("Specify HCA to work with\n"); + return status; + } + m_mf.ph_al = (ib_al_handle_t *)(intn_t)&m_mf.h_al; + m_mf.ph_ca = (ib_ca_handle_t *)(intn_t)&m_mf.h_ca; + if ( !(hca_open())) + { + M_DEBUG("Failed to open HCA \n"); + return status; + } + + //M_DEBUG("Open HCA GUID %"PRIx64"\n", cl_ntoh64(*m_mf.ca_guid)); + + m_curr_bank = 0xffffffff; + + // Obtain GPIO Semaphore + uint32_t cnt=0; + uint32_t word; + + MREAD4(FLASH, &word, FW_OPEN_IF ); + + while ( !status ) + { + MREAD4(SEMAP63, &word, FW_READ_CMD); + if( !word ) + { + status = true; + break; + } + if (++cnt > GPIO_SEM_TRIES) + { + _err = "cannot obtain GPIO semaophore (63)"; + break; + } + } /* while */ + + if ( !status ) + { + MREAD4(FLASH, &word, FW_CLOSE_IF ); + return status; + } + + //M_DEBUG("Acquired GPIO sempahore.\n"); + + // Save old values + MREAD4(GPIO_DIR_L, &m_dir, FW_READ_CMD); + MREAD4(GPIO_POL_L, &m_pol, FW_READ_CMD); + MREAD4(GPIO_MOD_L, &m_mod, FW_READ_CMD); + MREAD4(GPIO_DAT_L, &m_data, FW_READ_CMD); + + //M_DEBUG("Read GPIO information.\n"); + + // Set Direction=1, Polarity=0, Mode=0 for 3 GPIO lower bits + uint32_t dir; + uint32_t pol; + uint32_t mod; + + dir = m_dir | 0x70; + pol = m_pol & ~0x70; + mod = m_mod & ~0x70; + + MWRITE4(GPIO_DIR_L, &dir, FW_WRITE_CMD); + MWRITE4(GPIO_POL_L, &pol, FW_WRITE_CMD); + MWRITE4(GPIO_MOD_L, &mod, FW_WRITE_CMD); + + // M_DEBUG("Set GPIO bits.\n"); + + // Set CPUMODE + MREAD4(CPUMODE, &word, FW_READ_CMD); + word &= ~CPUMODE_MSK; + word |= 1 << CPUMODE_SHIFT; + MWRITE4(CPUMODE, &word, FW_WRITE_CMD); + + + //M_DEBUG("Set cpu mode.\n"); + + // Reset flash + status = write_internal(0, 0xf0); + M_DEBUG("Flash Open %s\n", ((status)?"OK":"FAIL")); + return status; +} // Flash::open + +//////////////////////////////////////////////////////////////////////// +void Flash::close() +{ + uint32_t data = 0; + + if (!m_mf.h_ca) + return; + + m_gpio_state = Flash::Low; + set_bank(0); + + // Restore origin values + MWRITE4(GPIO_DIR_L, &m_dir, FW_WRITE_CMD); + MWRITE4(GPIO_POL_L, &m_pol, FW_WRITE_CMD); + MWRITE4(GPIO_MOD_L, &m_mod, FW_WRITE_CMD); + MWRITE4(GPIO_DAT_L, &m_data, FW_WRITE_CMD); + + // Free GPIO Semaphore + MWRITE4(SEMAP63, &data, FW_WRITE_CMD); + + // free kernel BusInterface + MREAD4(FLASH, &data, FW_CLOSE_IF ); + + hca_close(); + m_mf.h_ca = 0; + m_curr_bank = 0xffffffff; +} // Flash::close + +//////////////////////////////////////////////////////////////////////// +bool Flash::set_bank(uint32_t bank) +{ + uint32_t data = 0x70; + if (!m_mf.h_ca) + { + _err = "Not opened"; + return false; + } + + //printf("\n*** Flash::set_bank(0x%"PRIx32") : 0x%"PRIx32"\n", bank, (BANK_SHIFT-4) & 0x70); + MWRITE4(GPIO_DATACLEAR_L, &data, FW_WRITE_CMD); + if (m_gpio_state == Flash::Low) + { + data &= (bank >> (BANK_SHIFT-4)); + MWRITE4(GPIO_DATASET_L, &data, FW_WRITE_CMD ); + } else + { + data |= (bank >> (BANK_SHIFT-4)); + MWRITE4(GPIO_DATASET_L, &data, FW_WRITE_CMD); + } + + return true; +} // Flash::set_bank + +//////////////////////////////////////////////////////////////////////// +bool Flash::read(uint32_t addr, uint32_t *data) +{ + if (!m_mf.h_al) + { + _err = "Not opened"; + return false; + } + + uint32_t cmd; + if (addr & 0x3) + { + _err = "Address should be 4-bytes aligned."; + return false; + } + + uint32_t bank = addr & BANK_MASK; + if (bank != m_curr_bank) + { + m_curr_bank = bank; + if (!set_bank(bank)) + return false; + } + MREAD4(addr, &cmd, FW_READ ); // new read + cmd = __be32_to_cpu(cmd); + memcpy(data, &cmd, sizeof(uint32_t)); + + return true; +} // Flash::read + +//////////////////////////////////////////////////////////////////////// +bool Flash::read(uint32_t addr, void *data, uint32_t len) +{ + if (addr & 0x3) + { + _err = "Address should be 4-bytes aligned."; + return false; + } + if (len & 0x3) + { + _err = "Length should be 4-bytes aligned."; + return false; + } + + uint32_t *p = (uint32_t *)data; + for (uint32_t i=0; i ERASE_CNT) + { + _err = "Flash erase sector timeout"; + return false; + } + if (!read(addr, &word)) + return false; + + //printf("erase_sector: addr:%08"PRIx32", %08"PRIx32"\n", addr, word); + Sleep(ERASE_DELAY/1000); + } while (word != 0xffffffff); + + return true; +} // Flash::erase_sector + +//////////////////////////////////////////////////////////////////////// +bool Flash::write(uint32_t addr, uint32_t data) +{ + if (!m_mf.h_al) + { + _err = "Not opened"; + return false; + } + if (addr & 0x3) + { + _err = "Address should be 4-bytes aligned."; + return false; + } + + uint32_t word; + uint32_t sector = addr & SECT_MASK; + uint32_t word_in_sector = (addr & ~SECT_MASK)/sizeof(uint32_t); + + if (!read(addr, &word)) + return false; + if (word == data) + return true; // already there + + uint32_t buff[SECT_SIZE/sizeof(uint32_t)]; + if (!read(sector, buff, SECT_SIZE)) + return false; + buff[word_in_sector] = data; + return write(sector, buff, SECT_SIZE); +} // Flash::write + +//////////////////////////////////////////////////////////////////////// +bool Flash::write(uint32_t addr, void *data, uint32_t cnt, + bool noerase, bool noverify) +{ + if (!m_mf.h_al) + { + _err = "Not opened"; + return false; + } + if (addr & 0x3) + { + _err = "Address should be 4-bytes aligned."; + return false; + } + + if ( !data ) + return true; + + char *p = (char *)data; + + for (uint32_t i=0; i> ((addr & 3) * 8)) & 0xff; +#elif __BYTE_ORDER == __BIG_ENDIAN + act = (word >> ((3 - (addr & 3)) * 8)) & 0xff; +#endif + exp = *(p-1) & 0xff; + if ( act != exp ) + { + M_DEBUG("Flash write error - read value %#x doesn't written value %#x", act, exp); + return false; + } + } + } + return true; +} // flash_write + +void Flash::hca_close (void) +{ + + if (m_mf.h_ca) + ib_close_ca( m_mf.h_ca, NULL ); + if (m_mf.h_al) + ib_close_al( m_mf.h_al); +} + bool Flash::hca_open(void) +{ + ib_api_status_t ib_status = IB_SUCCESS; + + /* Open ibal */ + ib_status = ib_open_al( m_mf.ph_al ); + if ( ib_status != IB_SUCCESS ) + { + M_DEBUG("Failed ot open AL status %#x\n", ib_status); + return false; + } + + ib_status = ib_get_ca_guids ( *m_mf.ph_al, NULL, &m_mf.ca_guids_count ); + if (m_mf.ca_guids_count == 0) + { + M_DEBUG("FOUND NO GUIDS\n"); + return false; + } + + m_mf.ca_guid =(ib_net64_t *)cl_zalloc(m_mf.ca_guids_count*sizeof(ib_net64_t)); + if(m_mf.ca_guid == NULL) + { + M_DEBUG("Failed to allocate memory for CA GUID table\n"); + return false; + } + + ib_status = ib_get_ca_guids (*m_mf.ph_al, m_mf.ca_guid, &m_mf.ca_guids_count ); + if (ib_status != IB_SUCCESS) + { + M_DEBUG("Failed to get CA GUIDs\n"); + return false; + } + + ib_status = ib_open_ca( *m_mf.ph_al, m_mf.ca_guid[0], NULL, &m_mf, m_mf.ph_ca ); + if (ib_status != IB_SUCCESS) + { + M_DEBUG("Failed to open CA\n"); + return false; + } + + return true; +} + +#define STR_LEN 128 +void printPercent(const uint32_t percent, const uint32_t reset) +{ +#define PRECISION 2 +#define NO_OF_PARTS (100 / (PRECISION)) + /*"12345678901234567890123456789012345678901234567890"*/ + char markerBar[NO_OF_PARTS+1] = " "; + static uint32_t prevNoOfXs = 0; + uint32_t noOfXs = 0; + uint32_t x; + char str[STR_LEN]; + cl_memset(str,0,STR_LEN); + + markerBar[NO_OF_PARTS+1] = '\0'; + + if (reset == 1) + { + sprintf(str,"|%s| %3d%%", markerBar, percent); + prevNoOfXs = 0; + } + else + { + noOfXs = percent / PRECISION; + for (x = 0; x < NO_OF_PARTS; x++) + { + if (x < noOfXs) + markerBar[x] = '*'; + } + if (noOfXs>prevNoOfXs) + { + sprintf(str,"|%s| %3d%%", markerBar, percent); + //printf("|%s| %3d%%", markerBar, percent); + prevNoOfXs = noOfXs; + } + } + printf("\r%s",str); + fflush(stdout); +} + +void printStatus(const uint32_t status) +{ + printf("[%s]\n",((status == 0)?"SUCCESS":"FAILED")); +} + +//////////////////////////////////////////////////////////////////////// +bool Flash::write_image(uint32_t addr, void *data, uint32_t cnt, bool eraseInvariant, bool report) +{ + uint8_t *p = (uint8_t *)data; + uint32_t curr_addr = addr; + uint32_t towrite = cnt; + uint32_t perc = 0xffffffff; + bool noerase = false; + + curr_sector = 0xffffffff; // Erase sector first time + if (report) + { + printf("writing: "); + printPercent(0,1); + fflush(stdout); + } + + while (towrite) + { + // Write + uint32_t trans = (towrite > (uint32_t)TRANS) ? (uint32_t)TRANS : towrite; + + if (eraseInvariant) + { + noerase = false; + } else + { + if (curr_addr < SECT_SIZE) + { + noerase = true; + } else + { + noerase = false; + } + } + + if (!noerase) + { + if (!write(curr_addr, p, trans, noerase, true)) + { + printStatus(1); + return false; + } + } + p += trans; + curr_addr += trans; + towrite -= trans; + + // Report + if (report) + { + uint32_t new_perc = ((cnt - towrite) * 100) / cnt; + if (new_perc != perc) + { + printPercent(new_perc,0); + fflush(stdout); + perc = new_perc; + } + } + } + + if (report) + { + printPercent(100,0); + printStatus(0); + fflush(stdout); + } + + return true; +} // Flash::write_image + + +/* + Note: + for read - data return in native Endian mode, + for write - data should be written in BigEndian mode + check where data is actually converted !!!! +*/ +ib_api_status_t Flash::access(hca_dev_t *dev, u_int32_t offset, void *p_data, uint32_t length, uint32_t operation ) +{ + + ib_ci_op_t ci_op; + ib_api_status_t ib_status; + + BOOL dev_status; + DWORD bytes_ret; + + ci_op.buf_size = length; + ci_op.p_buf = p_data; + ci_op.buf_info = offset; + ci_op.num_bytes_ret = sizeof (ib_ci_op_t); + ci_op.command = operation; + + ib_status = ib_ci_call (m_mf.h_ca, NULL, 0, &ci_op); + + if ( ib_status != IB_SUCCESS ) + { + FW_TRACE (FW_DBG_ERROR,("Failed ib_ci_call return status %#x\n", ib_status )); + return ib_status; + } + return IB_SUCCESS; +} + +bool getGUIDsFromFlash(FBase &device, uint64_t guids[GUIDS]) +{ + uint32_t NODE_GUIDH, NODE_GUIDL; + uint32_t PORT1_GUIDH, PORT1_GUIDL; + uint32_t PORT2_GUIDH, PORT2_GUIDL; + uint32_t prim_ptr; + uint32_t signature; + PS ps; + bool isFailSafe = false; + + READ4(device, 0x24, &signature, "Signature"); + TOCPU1(signature); + + if (signature == SIGNATURE) + { + // Fail Safe image + + // Assume flash has been verified, and both images have the same guids, therefore, + // we only need to read the primary image's guids + device.read(SECT_SIZE, &ps, sizeof(ps)); + TOCPUBY(ps,sizeof(ps)); + READ4(device, ps.fi_addr+0x24, &prim_ptr, "Primary Section"); + M_DEBUG("Firmware Address : 0x%"PRIx32"\n", ps.fi_addr); + M_DEBUG("Node GUID Offset : 0x%"PRIx32"\n", prim_ptr); + prim_ptr = BE2CPU32(prim_ptr)+ps.fi_addr; + isFailSafe = true; + } + else + { + // Short image + prim_ptr = signature; + } + + printf("Found valid GUID pointer : %08"PRIx32"\n", prim_ptr); + if (prim_ptr < MST_MAX_FLASH || isFailSafe) + { + READ4(device, prim_ptr, &NODE_GUIDH, "Node GUID High"); + READ4(device, prim_ptr+4, &NODE_GUIDL, "Node GUID Low"); + guids[0] = BE2CPU32(NODE_GUIDH); + guids[0] = (guids[0]<<32) | BE2CPU32(NODE_GUIDL); + READ4(device, prim_ptr+8, &PORT1_GUIDH, "Port 1 GUID High"); + READ4(device, prim_ptr+12, &PORT1_GUIDL, "Port 1 GUID Low"); + guids[1] = BE2CPU32(PORT1_GUIDH); + guids[1] = (guids[1]<<32) | BE2CPU32(PORT1_GUIDL); + READ4(device, prim_ptr+16, &PORT2_GUIDH, "Port 2 GUID High"); + READ4(device, prim_ptr+20, &PORT2_GUIDL, "Port 2 GUID Low"); + guids[2] = BE2CPU32(PORT2_GUIDH); + guids[2] = (guids[2]<<32) | BE2CPU32(PORT2_GUIDL); + + guids[3] = guids[0]; + printf("Found device's existing GUIDs:\n"); + printf("Node GUID : 0x%016"PRIx64"\n", guids[0]); + printf("Port1 GUID : 0x%016"PRIx64"\n", guids[1]); + printf("Port2 GUID : 0x%016"PRIx64"\n", guids[2]); + } else + { + printf("Found an invalid GUID pointer!\n"); + return false; + } + + return true; +} + +bool getGUIDs(char *guidString, uint64_t guids[GUIDS]) +{ + uint32_t PORT1_GUIDH, PORT1_GUIDL; + uint32_t PORT2_GUIDH, PORT2_GUIDL; + uint64_t GUID; //strtoull(guidString, (char **)NULL, 0); + uint32_t NODE_GUIDH; + uint32_t NODE_GUIDL; + + if (!getGUID(guidString, &GUID)) + { + return false; + } + + NODE_GUIDH = (uint32_t)(GUID >> 32); + NODE_GUIDL = (uint32_t)GUID; + + if ((NODE_GUIDH & 0xffffffff) == 0x00066a00 + && ((NODE_GUIDL & 0xf8000000) == 0x98000000 + || (NODE_GUIDL & 0xf8000000) == 0xb0000000)) + { + // Always make it a InfiniCon NODE GUID + NODE_GUIDL = (NODE_GUIDL & 0x0FFFFFFF) | 0x98000000; + + // Convert to the InfiniCon Node Guid Convention. + PORT1_GUIDH = NODE_GUIDH; + PORT1_GUIDL = (NODE_GUIDL & 0x07ffffff) | 0xa0000000; + PORT2_GUIDH = NODE_GUIDH | 1; + PORT2_GUIDL = (NODE_GUIDL & 0x07ffffff) | 0xa0000000; + } else { + // Treat everything else as a Mellanox Node Guid Convention. + PORT1_GUIDH=NODE_GUIDH; + PORT1_GUIDL=NODE_GUIDL+1; + if ( PORT1_GUIDL==0 ) { + PORT1_GUIDH++; + } + PORT2_GUIDH=PORT1_GUIDH; + PORT2_GUIDL=PORT1_GUIDL+1; + if ( PORT2_GUIDL==0 ) { + PORT2_GUIDH++; + } + } + + guids[0] = NODE_GUIDH; + guids[0] = (guids[0]<<32) | NODE_GUIDL; + + guids[1] = PORT1_GUIDH; + guids[1] = (guids[1]<<32) | PORT1_GUIDL; + + guids[2] = PORT2_GUIDH; + guids[2] = (guids[2]<<32) | PORT2_GUIDL; + + guids[3] = guids[0]; + + printf("Using user specified GUIDs :\n"); + printf("Node GUID : 0x%016"PRIx64"\n", guids[0]); + printf("Port 1 GUID : 0x%016"PRIx64"\n", guids[1]); + printf("Port 2 GUID : 0x%016"PRIx64"\n", guids[2]); + return true; +} + +bool _silent = false; +#define report printf + +//////////////////////////////////////////////////////////////////////// +bool isInvariantSectionEqual(FBase& f1, FBase& f2) +{ + uint32_t size1; + uint32_t size2; + uint32_t beg = 0x0; + uint32_t offs = 0x28; + const char* pr = "isInvariantSectionEqual"; + uint32_t *buf1; + uint32_t *buf2; + bool status = true; + + // Size + READ4(f1, offs+beg+4, &size1, pr); + READ4(f2, offs+beg+4, &size2, pr); + TOCPU1(size1); + TOCPU1(size2); + +// M_DEBUG("Invariant sector size1: %"PRId32"\n", size1); +// M_DEBUG("Invariant sector size2: %"PRId32"\n", size2); + + if (size1 != size2) + { + M_DEBUG("Invariant sector sizes do not match.\n"); + return false; + } + + buf1 = (uint32_t *)cl_zalloc((size1+4)*4); + if (!buf1) + { + cl_dbg_out("Failed to allocate memory size =%d", size1+4 ); + return false; + } + buf2 = (uint32_t *)cl_zalloc( (size2+4)*4); + if (!buf2) + { + cl_free(buf1); + cl_dbg_out("Failed to allocate memory size =%d", size2+4 ); + return false; + } + + READBUF(f1, offs+beg, buf1, (size1+4)*4, pr); + READBUF(f2, offs+beg, buf2, (size2+4)*4, pr); + + if ( cl_memcmp(buf1, buf2, (size1+4)*4) ) + { + // M_DEBUG("Invariant sections are not equal.\n"); + status = false; + } + cl_free(buf1); + cl_free(buf2); + return status; +} // isInvariantSectionEqual + +//////////////////////////////////////////////////////////////////////// +bool checkBoot2(FBase& f, uint32_t beg, uint32_t offs, uint32_t& next, const char *pref) +{ + char pr[256]; + uint32_t size; + uint32_t *buff; + uint32_t *safe_buf; + //sprintf(pr, "%s /0x%08"PRIx32"/ (BOOT2)", pref, offs+beg); + + // Size + READ4(f, offs+beg+4, &size, pr); + TOCPU1(size); + + if (size > 1048576 || size < 4) + { + report("%s /0x%08"PRIx32"/ - unexpected size (0x%x)\n", pr, offs+beg+4, size); + return false; + } + + //sprintf(pr, "%s /0x%08"PRIx32"-0x%08"PRIx32" (0x%06"PRIx32")/ (BOOT2)", pref, offs+beg, offs+beg+(size+4)*4-1, (size+4)*4); + + // CRC + Crc16 crc; + buff = (uint32_t*)cl_zalloc((size+4)*4); + if (buff) + safe_buf = buff; + READBUF(f, offs+beg, buff, (size+4)*4, pr); + TOCPU(buff, (size+4)*4 ); + CRC1(crc, buff, (size+4)*4 ); + crc.finish(); + uint32_t crc_act = *(buff+size+3); + if (crc.get() != crc_act) + { + report("%s /0x%08"PRIx32"/ - wrong CRC (exp:0x%x, act:0x%x)\n", + pr, offs+beg, crc.get(), crc_act); + cl_free(safe_buf); + return false; + } + + //report("%s - OK\n", pr); + next = offs + (size+4)*4; + cl_free(safe_buf); + return true; +} // checkBoot2 + +static uint32_t part_cnt; + +//////////////////////////////////////////////////////////////////////// +bool checkGen(FBase& f, uint32_t beg, + uint32_t offs, uint32_t& next, const char *pref) +{ + uint32_t size=0; + char pr[1024]; + GPH gph; + cl_memset(pr,0,1024); + // GPH + sprintf(pr, "%s /0x%08"PRIx32"/ (GeneralHeader)", pref, offs+beg); + READBUF(f, offs+beg, &gph, sizeof(GPH), pr); + TOCPUBY(gph, sizeof(GPH)); + + // Body + + part_cnt++; + + // May be BOOT3? + if (gph.type < H_FIRST || gph.type > H_LAST) + { + if (part_cnt > 2) + { + report("%s /0x%"PRIx32"/ - Invalid partition type (%"PRIx32")\n", + pref, offs+beg, gph.type); + return false; + } + else + return checkBoot2(f, beg, offs, next, pref); + } + + // All partitions here + offs += beg; + switch(gph.type) + { + case H_DDR: + size = gph.size * sizeof(uint32_t); + sprintf(pr, "%s /0x%08"PRIx32"-0x%08"PRIx32" (0x%06"PRIx32")/ (DDR)", + pref, offs, offs+size+(unsigned)sizeof(gph)+3, size+(unsigned)sizeof(gph)+4); + break; + case H_CNF: + size = gph.size * sizeof(uint32_t); + sprintf(pr, "%s /0x%08"PRIx32"-0x%08"PRIx32" (0x%06"PRIx32")/ (Configuration)", + pref, offs, offs+size+(unsigned)sizeof(gph)+3, size+(unsigned)sizeof(gph)+4); + break; + case H_JMP: + size = gph.size * sizeof(uint32_t); + sprintf(pr, "%s /0x%08"PRIx32"-0x%08"PRIx32" (0x%06"PRIx32")/ (Jump addresses)", + pref, offs, offs+size+(unsigned)sizeof(gph)+3, size+(unsigned)sizeof(gph)+4); + break; + case H_EMT: + size = gph.size; + size = (size + 3) / 4 * 4; + sprintf(pr, "%s /0x%08"PRIx32"-0x%08"PRIx32" (0x%06"PRIx32")/ (EMT Service)", + pref, offs, offs+size+(unsigned)sizeof(gph)+3, size+(unsigned)sizeof(gph)+4); + break; + case H_FW_CONF: + size = gph.size * sizeof(uint32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (FW Configuration)", + pref, offs, offs+size+(uint32_t)sizeof(gph)+3, + size+(uint32_t)sizeof(gph)+4); + break; + case H_ROM: + size = gph.size * sizeof(uint32_t); + sprintf(pr, "%s /0x%08"PRIx32"-0x%08"PRIx32" (0x%06"PRIx32")/ (ROM)", + pref, offs, offs+size+(unsigned)sizeof(gph)+3, size+(unsigned)sizeof(gph)+4); + break; + case H_GUID: + size = gph.size * sizeof(uint32_t); + sprintf(pr, "%s /0x%08"PRIx32"-0x%08"PRIx32" (0x%06"PRIx32")/ (GUID)", + pref, offs, offs+size+(unsigned)sizeof(gph)+3, size+(unsigned)sizeof(gph)+4); + break; + case H_USER_DATA: + size = gph.size * sizeof(uint32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (User Data)", + pref, offs, offs+size+(uint32_t)sizeof(gph)+3, + size+(uint32_t)sizeof(gph)+4); + break; + case H_BOARD_ID: + size = gph.size * sizeof(uint32_t); + sprintf(pr, "%s /0x%08"PRIx32"-0x%08"PRIx32" (0x%06"PRIx32")/ (Board ID)", + pref, offs, offs+size+(unsigned)sizeof(gph)+3,size+(u_int32_t)sizeof(gph)+4); + break; + default: + // For forward compatibility, try analyzing even if section type is uncknown + // Assuming the size is in DW, like all other sections (except emt service). + // If this assumption is wrong, CRC calc would fail - no harm done. + size = gph.size * sizeof(uint32_t); + sprintf(pr, "%s /0x%08x-0x%08x (0x%06x)/ (UNKNOWN SECTION TYPE (%d))", + pref, offs, offs+size+(uint32_t)sizeof(gph)+3, + size+(uint32_t)sizeof(gph)+4, gph.type); + } + + // CRC + Crc16 crc; + uint32_t *buff; + buff = (uint32_t*)cl_zalloc(size); + READBUF(f, offs+sizeof(gph), buff, size, pr); + TOCPU(buff,size); + CRCBY(crc, gph, sizeof(GPH)); + CRC(crc, buff,size); + crc.finish(); + uint32_t crc_act; + READ4(f, offs+sizeof(gph)+size, &crc_act, pr); + TOCPU1(crc_act); + if (crc.get() != crc_act) + { + report("%s /0x%08"PRIx32"/ - wrong CRC (exp:0x%x, act:0x%x)\n", + pr, offs, crc.get(), crc_act); + cl_free(buff); + return false; + } + + report("%s - OK\n", pr); + next = gph.next; + cl_free(buff); + return true; +} // checkGen + +//////////////////////////////////////////////////////////////////////// +bool checkPS(FBase& f, uint32_t offs, uint32_t& next, const char *pref) +{ + Crc16 crc; + PS ps; + f.read(offs, &ps, sizeof(ps)); + TOCPUBY(ps, sizeof(ps)); + + // Signature + if (ps.signature != SIGNATURE) + { + report("%s Pointer Sector /0x%08"PRIx32"/ - wrong signature (%08"PRIx32")\n", + pref, offs, ps.signature); + return false; + } + + // CRC + CRC1BY(crc, ps, sizeof(PS)); + crc.finish(); + if (crc.get() != ps.crc016) + { + report("%s Pointer Sector /0x%08"PRIx32"/ - wrong CRC (exp:0x%"PRIx32", act:0x%x)\n", + pref, offs, ps.crc016, crc.get()); + return false; + } + + next = ps.fi_addr; + report("%s Image /0x%08"PRIx32"-0x%08"PRIx32" (0x%06"PRIx32")/ - OK\n", pref, offs, + offs+(unsigned)sizeof(ps)-1, (unsigned)sizeof(ps)); + return true; +} // checkPS + +//////////////////////////////////////////////////////////////////////// +bool checkList(FBase& f, uint32_t offs, const char *pref) +{ + uint32_t next_ptr; + + CHECKB2(f, offs, 0x28, next_ptr, pref); + part_cnt = 1; + while (next_ptr && next_ptr != 0xff000000) + CHECKGN(f, offs, next_ptr, next_ptr, pref); + return true; +} // checkList + +//////////////////////////////////////////////////////////////////////// +bool FBase::verify() +{ + uint32_t prim_ptr, scnd_ptr; + uint32_t signature; + + READ4((*this), 0x24, &signature, "Signature"); + TOCPU1(signature); + if (signature == SIGNATURE) + { + // Full image + report("\nFailsafe image:\n\n"); + CHECKB2((*this), 0, 0x28, prim_ptr, "Invariant "); + report("\n"); + if (checkPS((*this), SECT_SIZE, prim_ptr, "Primary ")) + CHECKLS((*this), prim_ptr, " "); + report("\n"); + if (checkPS((*this), SECT_SIZE * 2, scnd_ptr, "Secondary")) + CHECKLS((*this), scnd_ptr, " "); + } + else + { + // Short image + report("\nShort image:\n"); + CHECKLS((*this), 0, " "); + } + + return true; +} // Verify + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// GUIDs TREATMENT // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// +#define GETGUID(s, g) do { if (!getGUID(s,g)) return 1; } while (0) +#define GETBSN(s, g) do { if (!getBSN(s,g)) return 1; } while (0) + +#define BSN_RET do { \ + printf("Invalid BSN. Should be MTxxxxxRddmmyy-nnn[-cc]\n"); \ + return false; \ +} while(0) +#define BSN_RET1(s) do { \ + printf("Valid BSN format is: MTxxxxxRddmmyy-nnn[-cc]\n%s.\n",s); \ + return false; \ +} while(0) +uint32_t BSN_subfield(const char *s, uint32_t beg, uint32_t len) +{ + char buf[64]; + strncpy(buf, &s[beg], len); + buf[len] = '\0'; + return strtoul(&buf[0], 0, 10); +} +bool getBSN(char *s, uint64_t *guid) +{ + const uint64_t COMPANY_ID = 0x0002c9; + const uint64_t TYPE = 1; + bool cc_present = false; + char *p; + uint32_t i; + + // Convert to lowercase + for (p = s; *p; p++) + *p = (char)tolower(*p); + + // Check validity + p = s; + if (strncmp(p, "mt", 2)) // MT + BSN_RET; + p += 2; + for (i=0; i<5; i++) + if (!isdigit(*p++)) // xxxxx + BSN_RET; + if (*p < 'a' || *p > 'z') // R + BSN_RET; + p++; + for (i=0; i<6; i++) // ddmmyy + if (!isdigit(*p++)) + BSN_RET; + if (*p++ != '-') // - + BSN_RET; + for (i=0; i<3; i++) // nnn + if (!isdigit(*p++)) + BSN_RET; + if (*p) + { + cc_present = true; + if (*p++ != '-') // - + BSN_RET; + for (i=0; i<2; i++) // cc + if (!isdigit(*p++)) + BSN_RET; + } + + uint32_t dd = BSN_subfield(s, 8, 2); + if (dd > 31) + BSN_RET1("Day (dd) should not exceed 31"); + if (!dd) + BSN_RET1("Day (dd) can't be zero"); + uint32_t mm = BSN_subfield(s, 10, 2); + if (mm > 12) + BSN_RET1("Months (mm) should not exceed 12"); + if (!mm) + BSN_RET1("Months (mm) can't be zero"); + uint32_t yy = BSN_subfield(s, 12, 2); + if (yy > 99) + BSN_RET1("Year (yy) should not exceed 99"); + if (!yy) + BSN_RET1("Year (yy) can't be zero"); + uint32_t num = BSN_subfield(s, 15, 3); + if (num > 999) + BSN_RET1("Number (num) should not exceed 999"); + if (!num) + BSN_RET1("Number (num) can't be zero"); + uint32_t cc = 1; + if (cc_present) + { + cc = BSN_subfield(s, 19, 2); + if (cc > 14) + BSN_RET1("Chip number (cc) should not exceed 14"); + if (!cc) + BSN_RET1("Chip number (cc) can't be zero"); + } + uint64_t id = ((((yy*12+mm-1)*31+ dd-1) * 1000) + num-1) * 112; + id += (cc-1)*8; + *guid = (COMPANY_ID << 40) | (TYPE << 32) | id; + return true; +} + +bool getGUID(const char *s, uint64_t *guid) +{ + char str[17], *endp; + int i,j; + uint32_t h,l; + memset(str, '0', 15); + str[16] = '\0'; + + for (i=(int)strlen(s)-1,j=15;i >= 0 && j >= 0 ; i-- ) + { + if (isxdigit(s[i])) + str[j--] = s[i]; + } + l = strtoul(&str[8], &endp, 16); + if (*endp) + { + printf("Invalid GUID syntax (%s)\n", &str[8]); + return false; + } + str[8] = '\0'; + h = strtoul(&str[0], &endp, 16); + if (*endp) + { + printf("Invalid GUID syntax (%s)\n", str); + return false; + } + *guid = ((uint64_t)h << 32) | l; + return true; +} // getGUID + +//////////////////////////////////////////////////////////////////////// +bool extractGUIDptr(uint32_t sign, uint32_t *buf, uint32_t buf_len, + char *pref, uint32_t *ind, uint32_t *nguids) +{ + uint32_t offs = 0; + + // Check signature + if (sign) + { + uint32_t signature = buf[(sign + 8)/4]; + TOCPU1(signature); + if (signature != SIGNATURE) + { + printf("%s pointer section not valid\n", pref); + return false; + } + offs = buf[sign/4]; + TOCPU1(offs); + } + + // Get GUID ptr + *ind = buf[(offs+0x24)/4]; + TOCPU1(*ind); + *ind += offs; + if (*ind >= (uint32_t)buf_len) + { + printf("%s image - insane GUID pointer (%08"PRIx32")\n", pref, *ind); + return false; + } + *nguids = buf[*ind/4 - 3]; + TOCPU1(*nguids); + *nguids /= 2; + + // More sanity check + if (*nguids > GUIDS) + { + printf("%s image - insane number of GUIDs (%d)\n", pref, *nguids); + return false; + } + + return true; +} // extractGUIDptr + +//////////////////////////////////////////////////////////////////////// +void patchGUIDsSection( + uint32_t *buf, + uint32_t ind, + uint64_t guids[GUIDS], + uint32_t nguids) +{ + uint32_t i, word; + uint32_t new_buf[GUIDS*2]; + Crc16 crc; + + // Form new GUID section + for (i=0; i<(uint32_t)nguids; i++) + { + new_buf[i*2] = (uint32_t)(guids[i] >> 32); + new_buf[i*2+1] = (uint32_t)(guids[i] & 0xffffffff); + } + + // Calculate new CRC16 + for (i=ind/4 - 4; i>=24; + + if (Fhardwareversion > 0xA0) + { + device.read(0x24, &Fsignature, sizeof(Fsignature)); + TOCPU1(Fsignature); + } + + if (!cl.isOption(FlashCommandLine::force)) + { + if (Fhardwareversion != 0xA1 && Fhardwareversion != 0xA0) + { + printf("!!!WARNING!!!\n" + "Unable to determine the device\'s %s firmware revision (unknown rev %x)!\n" + "It could be a corrupted firmware or an unsupported board revision.\n" + "%s" + "Aborting operation.\n", + cl.getDeviceName(), + Fhardwareversion, + techSupportMessage); + return false; + } + } else + { + printf("!!!WARNING!!! Skipping flash device hardware revision integrity check.\n"); + } + + M_DEBUG("Open raw file.\n"); + + if (!image.open(cl.getRawFileName())) + { + printf("Error: %s %s\n", image._err, cl.getRawFileName()); + return false; + } + + M_DEBUG("Verify raw file.\n"); + + _silent = true; + if (!image.verify()) + { + printf("%s is not a valid image\n", cl.getRawFileName()); + return false; + } + + image.read(0x24, &Isignature); + TOCPU1(Isignature); + + image.read(0x10, &Ihardwareversion, sizeof(Ihardwareversion)); + TOCPU1(Ihardwareversion); + Ihardwareversion >>=24; + + M_DEBUG("Verify hardware and signature information.\n"); + + if (!cl.isOption(FlashCommandLine::force)) + { + if (Ihardwareversion != 0xA1 && Ihardwareversion != 0xA0) + { + printf("!!!WARNING!!!\n" + "Unable to determine the image\'s %s firmware revision (unknown rev %x)!\n" + "It could be a corrupted image or an unsupported board revision.\n" + "%s" + "Aborting operation.\n", + cl.getDeviceName(), + Ihardwareversion, + techSupportMessage); + return false; + } + else if (Fhardwareversion != Ihardwareversion) + { + printf("!!!WARNING!!! " + "An HCA Rev %X firmware can not be used with HCA Rev %X device!!!\n" + "Aborting operation.\n", + Ihardwareversion, + Fhardwareversion); + return false; + } + /* + if (Fhardwareversion == 0xA0) + { + if (Isignature == SIGNATURE) + { + printf("!!!WARNING!!! " + "Rev A0 HCA's will not work with a fail safe image!!!\n" + "%s" + "Aborting operation.\n", techSupportMessage); + return false; + } + } + */ + } else + { + printf("!!!DANGER!!!! Skipping image file hardware revision integrity check.\n"); + } + + printf("HCA Rev : %"PRIX32"\n", Fhardwareversion); + printf("Image Rev : %"PRIX32"\n", Ihardwareversion); + + if (!cl.useFlashNodeGUID()) + { // the user specified the guid at the command line + if (!getGUIDs(cl.getNodeGUID(),guids)) + { + printf("Aborting burn operation.\n"); + device.close(); + return false; + } + insertGUIDs = true; + } else + { + // should we query the flash device for the guids + // If we don't query the flash for the guids, and + // we don't specify the guids at the command line + // then we will use the guids supplied in the raw + // image file + M_DEBUG("Query the flash device\n"); + if (!cl.isOption(FlashCommandLine::noquery)) + { + // obtain the guids from the flash + if (!getGUIDsFromFlash(device, guids)) + { + printf("The image on the flash device appears corrupted!\n" + "Unable to determine the GUIDs from the flash device %s.\n" + "Try the -n <0xGUID> option. If this fails, then contact\n" + "SilverStorm Technologies technical support.\n", + cl.getDeviceName()); + device.close(); + return false; + } + insertGUIDs = true; + } + } + + bool interactive; + + if (cl.isOption(FlashCommandLine::no_prompts)) + { + interactive=false; + } + else + { + interactive = (_isatty(_fileno( stdin )) > 0 ? true: false); + } + + bool eraseInvariant = false; + + if (cl.isOption(FlashCommandLine::burn_invariant_section)) + { + M_DEBUG("Always burn invariant section.\n"); + eraseInvariant = true; + } else // only burn the invariant section if it differs + { + if (isInvariantSectionEqual(device, image)) + { + M_DEBUG("The invariant sections are equal.\n"); + eraseInvariant = false; + } else + { + M_DEBUG("The invariant sections differ.\n"); + eraseInvariant = true; + } + } + + M_DEBUG("Patch GUIDs.\n"); + + // Patch GUIDs + if (insertGUIDs && !patchGUIDs(image, guids, interactive)) + { + printf("Aborting burn operation at user request.\n"); + return true; + } + + M_DEBUG("Patch VSDs.\n"); + + // Patch VSDs + if (!patchVSDs(image, NULL)) + { + // if this fails it means it's a short image, and we should just continue on + } + + M_DEBUG("Write firmware.\n"); + + // Burn it + if (!device.write_image(0, image.getBuf(), image.getBufLength(), eraseInvariant, (_isatty(_fileno( stdin )) > 0? true: false)) ) + { + printf("Error: %s\n", device._err); + return false; + } + + device.close(); + image.close(); + + return true; +} + +bool patchVSDToRawImage(FlashCommandLine cl) +{ + printf("Attempting to insert VSD section into the %s file: \n", cl.getRawFileName()); + + if (!image.open(cl.getRawFileName())) + { + printf("Error: %s\n", image._err); + printf("Aborting operation.\n"); + return false; + } + + if (!patchVSDs(image, cl.getRevisionString())) + { + printf("Failed.\n" + "!!!WARNING!!!\n" + "Unable to inject the VSD into a Short Image.\n" + "Short images do not support the VSD section.\n" + "Only Fail Safe images support the VSD section.\n" + "Aborting operation.\n"); + return false; + } + + FILE *fd; + void *write_buffer; + uint32_t buflen = image.getBufLength(); + + // it is necessary to close the file prior to re-opening for write privileges + // this allows us to write to the same file without using a temporary file + write_buffer = malloc(buflen); + memcpy(write_buffer, image.getBuf(), buflen); + image.close(); + + // only open the file, if any things fails, then the image verify + // will indicate issues + if ((fd = fopen(cl.getRawFileName(),"w")) != NULL) + { + fwrite(write_buffer, buflen, 1, fd); + fclose(fd); + } else + { + printf("Failed.\n" + "Unable to write to raw image file %s.\n", cl.getRawFileName()); + _silent=true; + if (!image.verify()) // verify file integrity + { + printf("!!!WARNING!!! " + "The file appears to have been corrupted!!!\n"); + return false; + } else + { + printf("File appears to be valid, but the VSD injection failed.\n"); + } + _silent=false; + + return false; + } + + // ok to free the buffer now + free(write_buffer); + + if (!image.open(cl.getRawFileName())) + { + printf("Error: %s\n", image._err); + printf("Aborting operation.\n"); + return false; + } + _silent=true; + if (!image.verify()) // verify file integrity + { + printf("Failed Verification.\n" + "The file %s appears to have been corrupted\n", cl.getRawFileName()); + return false; + } + _silent=false; + image.close(); + printf("Insertion : [SUCCESS]\n"); + printf("Done.\n"); + return true; +} + +bool showInfo(FBase &device, FlashCommandLine cl) +{ + uint32_t NODE_GUIDH, NODE_GUIDL; + uint32_t PORT1_GUIDH, PORT1_GUIDL; + uint32_t PORT2_GUIDH, PORT2_GUIDL; + uint32_t hardwareversion; + uint32_t psptr; + uint32_t signature; + int section = 1; + PS ps; + time_t myTime; + bool isFailSafe = false; + uint64_t guids[GUIDS]; + char revision[FlashCommandLine::revisionStringLength]; + + READ4(device, 0x24, &signature, "Signature"); + TOCPU1(signature); + + if (DebugMode) + { + _silent=false; + } else + { + _silent=false; + } + + if (signature == SIGNATURE) + { + uint8_t offset =0, x=0; + // Fail Safe image + + // verify the image invariant section + if (!checkBoot2(device, 0, 0x28, psptr, "Invariant\t")) + { + printf("Invariant section is not valid!"); + _silent=false; + return false; + } + + if (!checkPS(device, SECT_SIZE, psptr, "Primary\t")) + { + M_DEBUG("Primary section is not valid\n"); + // If the primary is invalid then try secondary section + if (!checkPS(device, SECT_SIZE*2, psptr, "Secondary\t")) + { + printf("Firmware is corrupted. Unable to display information.\n"); + _silent=false; + return false; + } + section=2; // secondary is valid, use it + } + + device.read(0x10, &hardwareversion, sizeof(hardwareversion)); + TOCPU1(hardwareversion); + hardwareversion >>=24; + device.read(SECT_SIZE*section, &ps, sizeof(ps)); + TOCPUBY(ps,sizeof(ps)); + printf("PSID : %s\n",ps.psid.as_str); + printf("Image Type : Fail Safe\n"); + if (hardwareversion != 0xA0 && hardwareversion != 0xA1) + { + printf("Hardware Version : 0xInvalid\n"); + + } else + { + printf("Hardware Version : 0x%"PRIX32"\n", hardwareversion); + } + printf("Company : %s\n", ps.vsd[0]==0x00066A?"SilverStorm Technologies":"Mellanox, Inc"); + + myTime = ps.vsd[2]; + printf("%s Date : %s", (device.getClassType()==Flash::FlashDeviceType?"Burn ":"Creation"), asctime(localtime(&myTime))); + + for (x=3;x<9;x++) + { + memcpy(revision+offset, &ps.vsd[x], sizeof(uint32_t)); + offset = offset + 4; + } + revision[FlashCommandLine::revisionStringLength-1]='\0'; + printf("Firmware Revision : %s\n", revision); + READ4(device, ps.fi_addr+0x24, &psptr, "pointer section"); + printf("Firmware Address : 0x%"PRIx32"\n", ps.fi_addr); + printf("Node GUID Offset : 0x%"PRIx32"\n", psptr); + psptr = BE2CPU32(psptr)+ps.fi_addr; + isFailSafe = true; + } + else + { + if (!checkList(device, 0,"")) + { + printf("Firmware is not valid. Can not display information.\n"); + _silent=false; + return false; + } + // Short image + // Assume flash has been verified, and both images have the same guids, therefore, + // we only need to read the primary image's guids + device.read(0x10, &hardwareversion, sizeof(hardwareversion)); + TOCPU1(hardwareversion); + hardwareversion >>=24; + printf("Image Type : Short\n"); + if (hardwareversion != 0xA0 && hardwareversion != 0xA1) + { + printf("Hardware Version : 0xInvalid\n"); + + } else + { + printf("Hardware Version : 0x%X\n", hardwareversion); + } + printf("Company : Unknown\n"); + printf("%s Date : Unknown\n", (device.getClassType()==Flash::FlashDeviceType?"Burn ":"Creation")); + printf("Firmware Revision : Unknown\n"); + printf("Firmware Address : 0xUnknown\n"); + psptr = signature; + if (psptr < MST_MAX_FLASH ) + { + printf("Node GUID Offset : 0x%"PRIx32"\n", psptr); + } else + { + printf("Node GUID Offset : 0x%"PRIx32" !!! Error: Invalid !!!\n", psptr); + } + + } + + if (psptr < MST_MAX_FLASH || isFailSafe) + { + READ4(device, psptr, &NODE_GUIDH, "Node GUID High"); + READ4(device, psptr+4, &NODE_GUIDL, "Node GUID Low"); + guids[0] = BE2CPU32(NODE_GUIDH); + guids[0] = (guids[0]<<32) | BE2CPU32(NODE_GUIDL); + READ4(device, psptr+8, &PORT1_GUIDH, "Port 1 GUID High"); + READ4(device, psptr+12, &PORT1_GUIDL, "Port 1 GUID Low"); + guids[1] = BE2CPU32(PORT1_GUIDH); + guids[1] = (guids[1]<<32) | BE2CPU32(PORT1_GUIDL); + READ4(device, psptr+16, &PORT2_GUIDH, "Port 2 GUID High"); + READ4(device, psptr+20, &PORT2_GUIDL, "Port 2 GUID Low"); + guids[2] = BE2CPU32(PORT2_GUIDH); + guids[2] = (guids[2]<<32) | BE2CPU32(PORT2_GUIDL); + + guids[3] = guids[0]; + printf("Node GUID : 0x%016"PRIx64"\n", guids[0]); + printf("Port1 GUID : 0x%016"PRIx64"\n", guids[1]); + printf("Port2 GUID : 0x%016"PRIx64"\n", guids[2]); + } else + { + printf("Node GUID : 0xInvalid\n"); + printf("Port1 GUID : 0xInvalid\n"); + printf("Port2 GUID : 0xInvalid\n"); + printf("!!! WARNING: The flash %s is corrupted.\n" + "You must use the -n <0xGUID> and --force option to update this device.\n", + cl.getDeviceName()); + _silent=false; + return false; + } + _silent=false; + return true; +} + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// MAIN // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// +int __cdecl +main(uint32_t ac, char *av[]) +{ + FlashCommandLine cl; + + //try to get lock on box + if (FwUpdateLock() != 0 ) + return -1; + + // register unlock func in exit + _onexit((_onexit_t)FwUpdateUnlock); + + cl.parse(ac, av); + + if (cl.isOption(FlashCommandLine::debug)) + { + DebugMode = true; + g_fw_dbg_lvl = 1; + } + + if (cl.isOption(FlashCommandLine::disable_bestdevice)) + { + DisableBestDevice = true; + } + + + if (cl.isOption(FlashCommandLine::flash_format)) + { + if (!device.open(cl.getDeviceName())) + { + printf("Error: %s %s\n", device._err, cl.getDeviceName()); + return -1; + } + + device.format(); + device.close(); + } + + if (cl.isOption(FlashCommandLine::show_image_info)) + { + if (!image.open(cl.getRawFileName())) + { + printf("Error: %s\n", image._err); + return -1; + } + + showInfo(image, cl); + image.close(); + } + + if (cl.isOption(FlashCommandLine::show_flash_info)) + { + if (!device.open(cl.getDeviceName())) + { + printf("Error: %s %s\n", device._err, cl.getDeviceName()); + return -1; + } + + showInfo(device, cl); + device.close(); + } + + if (cl.isOption(FlashCommandLine::dump_image)) + { + if (!image.open(cl.getRawFileName())) + { + printf("Error: %s\n", image._err); + return -1; + } + image.dump(); + image.close(); + + } + + if (cl.isOption(FlashCommandLine::dump_flash)) + { + if (!device.open(cl.getDeviceName())) + { + printf("Error: %s %s\n", device._err, cl.getDeviceName()); + return -1; + } + device.dump(); + device.close(); + } + + if (cl.isOption(FlashCommandLine::verify_image)) + { + if (!image.open(cl.getRawFileName())) + { + printf("Error: %s\n", image._err); + return -1; + } + image.verify(); + image.close(); + } + + if (cl.isOption(FlashCommandLine::verify_flash)) + { + if (!device.open(cl.getDeviceName())) + { + printf("Error: %s %s\n", device._err, cl.getDeviceName()); + return -1; + } + device.verify(); + device.close(); + } + + if (cl.isOption(FlashCommandLine::write_file)) + { + if (!patchVSDToRawImage(cl)) + { + return -1; + } + } + + if (cl.isOption(FlashCommandLine::burn)) + { + if (!burnImageToFlash(cl)) + { + return -1; + } + + // For A0 HCAs we need to write to both banks of the flash. + if ((devType == TAVOR_TYPE) && (devRevision == 0xA0)) + { + device.setGPIOState(Flash::High); + + if (!burnImageToFlash(cl)) + { + return -1; + } + } + } + + return 0; +} + +bool probePciBusForHcaDevice(const char *device) +{ + FILE *stream; + char output[MAXPATHLEN]; + char tmpRevision[5]; + uint32_t i; + + devId = HcaDevnameToDevId(device); + devType = HcaDevnameToDevType(device); + + sprintf(output,"lspci -m -n | grep 15b3 | grep %x",devType); + stream = _popen(output, "r"); + + if (NULL == stream) + { + printf("Error probing the PCI bus for HCA devcies.\n"); + return false; + } + M_DEBUG("Probing PCI bus for HCA devices.\n"); + i = 0; + while (fgets(output, MAXPATHLEN, stream)) + { + if (mellanox_mode) + { + if (i++ != devId) continue; + } else + { + if (++i != devId) continue; + } + + if (strlen(output)>1) + output[strlen(output)-1]='\0'; + + tmpRevision[0] = '0'; + tmpRevision[1] = 'x'; + tmpRevision[2] = output[37]; + tmpRevision[3] = output[38]; + tmpRevision[4] = '\0'; + + // probably should check errno in case of an invalid revision + devRevision = strtol(tmpRevision, (char**)NULL, 0); + + sprintf(bestDev, "/dev/mst/mt%d_pci_cr%d", devType, devId); + + M_DEBUG("Trying device %s.\n", bestDev); + + if (NULL != fopen(bestDev, "r")) + { + M_DEBUG("HCA %d (Rev. %"PRIx32") using device name %s.\n", devId, devRevision, bestDev); + } else + { + sprintf(bestDev, "/dev/mst/mt%d_pciconf%d", devType, devId); + M_DEBUG("Trying device %s.\n", bestDev); + + if (NULL != fopen(bestDev, "r")) + { + M_DEBUG("HCA %d (Rev. %"PRIx32") using device name %s.\n", devId, devRevision, bestDev); + } else + { + printf("We found HCA %d on the PCI bus, but it does not appear to be operating properly.\n", devId); + printf("Please verify the mst device driver is running without errors. If the problem persist,\n" + "then contact technical support.\n"); + } + _pclose(stream); + return false; + } + _pclose(stream); + return true; + } + + printf("Specified device %d not found.\n", devId); + printf("Found %d device%s of type mt%d\n",i,(i==1)?"":"s",devType); + _pclose(stream); + return false; +} + +uint32_t HcaDevnameToDevId(const char *devname) +{ + if (strlen(devname) >= 1) + { + if (isdigit(devname[strlen(devname)-1])) + { + return ((uint8_t)devname[strlen(devname)-1])-48; + } else + { + M_DEBUG("Device name should end with a numeric value between 1 and 9.\n"); + } + + } else + { + M_DEBUG("Device name is too short.\n"); + } + + M_DEBUG("Invalid device name using default device 1.\n"); + return 1; +} + +uint32_t HcaDevnameToDevType(const char *devname) +{ + uint32_t type = 0; + char *str; + + str = strstr(devname, "mt"); + if (str) + { + str += 2; + type = strtol(str, NULL, 10); + } + if ((type != TAVOR_TYPE) && (type != ARBEL_TYPE)) + { + M_DEBUG("Device name should contain device type, using mt23108.\n"); + type = TAVOR_TYPE; + } + return type; +} + +bool isMstLoaded(void) +{ + FILE *stream; + char output[MAXPATHLEN]; + + /* Check for infinicon-compiled mst driver. */ + stream = _popen("/sbin/lsmod | grep mst |cut -d\" \" -f1", "r"); + + if (NULL == stream) + return false; + + fgets(output, MAXPATHLEN, stream); + + if (strlen(output)>1) + output[strlen(output)-1]='\0'; + + _pclose(stream); + + if ((NULL != output) && (strncmp("mst", output, MAXPATHLEN) == 0)) + return true; + + /* That failed - check for mellanox-compiled mst driver. */ + stream = _popen("/sbin/lsmod | grep mst_pci |cut -d\" \" -f1", "r"); + + if (NULL == stream) + return false; + + fgets(output, MAXPATHLEN, stream); + + if (strlen(output)>1) + output[strlen(output)-1]='\0'; + + _pclose(stream); + + if ((NULL != output) && (strncmp("mst_pci", output, MAXPATHLEN) == 0)) + { + mellanox_mode = 1; + return true; + } + else + return false; +} + +void __cdecl catch_signal( int sig ) +{ + //fprintf( stdout, "\nProgram Interrupted. Closing devices.\n" ); + //device.close(); + //image.close(); + //exit( 0 ); +} + +static int +FwUpdateLock(void) +{ + int status = 0; + if ((file_lock = _open( LOCK_FILE_NAME, _O_CREAT | _O_EXCL, _S_IREAD | _S_IWRITE )) == -1 ) + { + printf("One instance is running already\n"); + status = -1; + } + return status; +} +static void +FwUpdateUnlock(void) +{ + if ((_close(file_lock)) != -1 ) + { + if ((_unlink(LOCK_FILE_NAME)) != -1 ) + { + return; + } + } + printf("Unlock can not release lock\n"); +} \ No newline at end of file diff --git a/branches/Ndi/tools/fwupdate/user/makefile b/branches/Ndi/tools/fwupdate/user/makefile new file mode 100644 index 00000000..66f1c8e5 --- /dev/null +++ b/branches/Ndi/tools/fwupdate/user/makefile @@ -0,0 +1,8 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def + diff --git a/branches/Ndi/tools/fwupdate/user/mtcr.h b/branches/Ndi/tools/fwupdate/user/mtcr.h new file mode 100644 index 00000000..719beec8 --- /dev/null +++ b/branches/Ndi/tools/fwupdate/user/mtcr.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2004-2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _MST_H +#define _MST_H + +#include + +#ifdef WIN32 + +#define MTCR_API + +typedef unsigned __int8 u_int8_t; +typedef __int8 int8_t; +typedef unsigned __int16 u_int16_t; +typedef __int16 int16_t; +typedef unsigned __int32 u_int32_t; +typedef __int32 int32_t; +typedef unsigned __int64 u_int64_t; +typedef __int64 int64_t; +#if defined(_WIN64) + typedef __int64 MT_long_ptr_t; + typedef unsigned __int64 MT_ulong_ptr_t; +#else + typedef long MT_long_ptr_t; + typedef unsigned long MT_ulong_ptr_t; +#endif + +#else /* UNIX */ + +#define MTCR_API + +#endif + + +#ifdef __cplusplus +extern "C" { +#endif + +typedef struct mib_private_t { + int dummy; +} MIB_Private; + +typedef enum { MACCELERATE, MRESTORE, MREAD4, MWRITE4, MREAD64, MWRITE64 } mif_ioctl_cmd_t; + +typedef enum MType_t {MST_PCI, MST_PCICONF, MST_CALBR, MST_USB, MST_IB, MST_IF, MST_PPC, MST_USB_DIMAX} MType; +typedef enum DType_t {MST_GAMLA, MST_TAVOR, MST_DIMM, MST_NOADDR} DType; +#define MST_ANAFA2 MST_TAVOR +#define MST_EEPROM MST_GAMLA +enum Mdevs_t { + MDEVS_GAMLA = 0x01, // Each device that actually is a Gamla + MDEVS_I2CM = 0x02, // Each device that can work as I2C master + MDEVS_MEM = 0x04, // Each device that is a memory driver (vtop) + MDEVS_TAVOR_DDR = 0x08, // Each device that maps to Tavor DDR + MDEVS_TAVOR_UAR = 0x10, // Each device that maps to Tavor UAR + MDEVS_TAVOR_CR = 0x20, // Each device that maps to Tavor CR + MDEVS_IF = 0x40, // Standard device interface + MDEVS_REM = 0x80, // Remote devices + MDEVS_PPC = 0x100, // PPC devices + MDEVS_TAVOR = (MDEVS_TAVOR_DDR|MDEVS_TAVOR_UAR|MDEVS_TAVOR_CR), + MDEVS_ALL = 0xffffffff +}; + +// All fields in follow structure are not supposed to be used +// or modified by user programs. Except i2c_slave that may be +// modified before each access to target I2C slave address +typedef struct mfile_t { + MType tp; // type of driver + DType dtype; // target device to access to + DType itype; // interface device to access via + int is_i2cm; // use device as I2C master + unsigned char i2c_slave; +#ifdef __WIN__ + MT_ulong_ptr_t fd; +#else + int fd; +#endif + int sock; // in not -1 - remote interface + void *ptr; + MIB_Private mib; // Data for IB interface (if relevant) + unsigned int i2c_RESERVED; // Reserved for internal usage (i2c internal) +} mfile; + +typedef struct mif_param_st { + mif_ioctl_cmd_t cmd; + + DType dtype; + + char addr; + int len; + int imm; + int offset; + void * buf; +} mif_param_t; + +#ifdef WIN32 +#define FromHandle(h) ((MT_long_ptr_t)(h)) +#define ToHandle(h) ((HANDLE)(h)) +#else +#define FromHandle(h) ((int)(h)) +#define ToHandle(h) ((HANDLE)(h)) +#endif + +/* + * Get list of MST (Mellanox Software Tools) devices. + * Put all device names as null-terminated strings to buf. + * + * Return number of devices found or -1 if buf overflow + */ +MTCR_API int mdevices(char *buf, int len, int mask); + +/* + * Open Mellanox Software tools (mst) driver. + * Return valid mfile ptr or 0 on failure + */ + +/* + * Open Mellanox Software tools (mst) driver. Device type==TAVOR + * Return valid mfile ptr or 0 on failure + */ +MTCR_API mfile *mopen(int hca_num); + +/* + * Close Mellanox driver + * req. descriptor + */ +MTCR_API int mclose(mfile *mf); + +/* + * Accelerate device if possible. + * When device is I2C master - overclock it + */ +MTCR_API void maccelerate(mfile *mf); + +/* + * Restore normal settings, if device was accelerated. + */ +MTCR_API void mrestore(mfile *mf); + +/* + * Read 4 bytes, return number of succ. read bytes or -1 on failure + */ +MTCR_API int mread4(mfile *mf, unsigned int offset, u_int32_t *value); + +/* + * Write 4 bytes, return number of succ. written bytes or -1 on failure + */ +MTCR_API int mwrite4(mfile *mf, unsigned int offset, u_int32_t value); + +/* + * Read up to 64 bytes, return number of succ. read bytes or -1 on failure + * + * This makes sense only w/ CALIBRE/DevaSys interfaces *to EEPROM reading only* + */ +MTCR_API int mread64(mfile *mf, unsigned int offset, void *data, int length); + +/* + * Write up to 64 bytes, return number of succ. written bytes or -1 on failure + * + * This makes sense only w/ CALIBRE/DevaSys interfaces *to EEPROM burning only* + */ +MTCR_API int mwrite64(mfile *mf, unsigned int offset, void *data, int length); + +/* + * Set a new value for i2c_slave + * Return previous value + */ +MTCR_API unsigned char mset_i2c_slave(mfile *mf, unsigned char new_i2c_slave); + +/* + * get free phys. contigous pages + * order should be in range [0..9] + * the size of allocated memory will be (2^order * 4096) + * return pointer to virtual address mapped to the allocated area + * on failure returns 0 and errno is set + */ +MTCR_API void *mget_free_pages (mfile *mf, unsigned int order); + +/* + * free phys. contigous pages + * order should be in range [0..9] + * vma is freed + * on success returns 0 + * on failure returns -1 and errno is set + */ +MTCR_API int mfree_pages (mfile *mf, void *addr, unsigned int order); + +/* + * translate virtual address to physical address + * return physical address on success, or 0 on error + */ +MTCR_API unsigned long mvtop (mfile *mf, void *va); + +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/branches/Ndi/tools/mread/user/SOURCES b/branches/Ndi/tools/mread/user/SOURCES new file mode 100644 index 00000000..359ceff6 --- /dev/null +++ b/branches/Ndi/tools/mread/user/SOURCES @@ -0,0 +1,40 @@ +TARGETNAME=mread +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + + +!if !defined(WINIBHOME) +WINIBHOME=..\..\.. +!endif + +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + + + +SOURCES=mread.c \ + mread.rc + +INCLUDES= $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; \ + $(WINIBHOME)\inc\iba; \ + $(WINIBHOME)\tools\mtcr\user; + +TARGETLIBS= \ +!if $(FREEBUILD) + $(TARGETPATH)\*\mtcr.lib +!else + $(TARGETPATH)\*\mtcr.lib +!endif + +!if $(FREEBUILD) + +!else +C_DEFINES=$(C_DEFINES) -DDEBUG +!endif + +C_DEFINES=$(C_DEFINES) -D__WIN__ + +386_STDCALL=0 + + diff --git a/branches/Ndi/tools/mread/user/makefile b/branches/Ndi/tools/mread/user/makefile new file mode 100644 index 00000000..80d407b3 --- /dev/null +++ b/branches/Ndi/tools/mread/user/makefile @@ -0,0 +1,8 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE ..\..\..\inc\openib.def + diff --git a/branches/Ndi/tools/mread/user/mread.c b/branches/Ndi/tools/mread/user/mread.c new file mode 100644 index 00000000..dd3db44c --- /dev/null +++ b/branches/Ndi/tools/mread/user/mread.c @@ -0,0 +1,73 @@ +/* + * + * mread.c - CR Space read access + * + */ + +#include +#include +#include + +#include "mtcr.h" + +void usage(const char *n) +{ + printf("%s []\n", n); + exit(1); +} + +int main(int ac, char *av[]) +{ + char *endp; + int rc=0; + unsigned int addr, val; + mfile *mf; + DType dtype = MST_TAVOR; + +#if 0 + int i, rc1; + char buf[1024], *p=buf; + rc1 = mdevices(buf, 1024); + for (i=0; i= 4) + mset_i2c_slave(mf, (unsigned char)strtoul(av[3],0,0)); + + if ((rc = mread4(mf, addr, &val)) < 0) + { + mclose(mf); + perror("mread"); + return 1; + } + if (rc < 4) + { + mclose(mf); + printf("Read only %d bytes\n", rc); + return 1; + } + + mclose(mf); + printf("Read 0x%08x:0x%08x\n", addr, val); + return rc; +} diff --git a/branches/Ndi/tools/mst/dirs b/branches/Ndi/tools/mst/dirs new file mode 100644 index 00000000..5a7e8b31 --- /dev/null +++ b/branches/Ndi/tools/mst/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tools/mst/user/SOURCES b/branches/Ndi/tools/mst/user/SOURCES new file mode 100644 index 00000000..c6662baa --- /dev/null +++ b/branches/Ndi/tools/mst/user/SOURCES @@ -0,0 +1,41 @@ +TARGETNAME=mst +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + + +!if !defined(WINIBHOME) +WINIBHOME=..\..\.. +!endif + +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + + + +SOURCES=mst.c \ + mst.rc + + +INCLUDES= $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; \ + $(WINIBHOME)\inc\iba; \ + $(WINIBHOME)\tools\mtcr\user; + +TARGETLIBS= \ +!if $(FREEBUILD) + $(TARGETPATH)\*\mtcr.lib +!else + $(TARGETPATH)\*\mtcr.lib +!endif + +!if $(FREEBUILD) + +!else +C_DEFINES=$(C_DEFINES) -DDEBUG +!endif + +C_DEFINES=$(C_DEFINES) -D__WIN__ + +386_STDCALL=0 + + diff --git a/branches/Ndi/tools/mst/user/makefile b/branches/Ndi/tools/mst/user/makefile new file mode 100644 index 00000000..80d407b3 --- /dev/null +++ b/branches/Ndi/tools/mst/user/makefile @@ -0,0 +1,8 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE ..\..\..\inc\openib.def + diff --git a/branches/Ndi/tools/mst/user/mst.c b/branches/Ndi/tools/mst/user/mst.c new file mode 100644 index 00000000..41a42601 --- /dev/null +++ b/branches/Ndi/tools/mst/user/mst.c @@ -0,0 +1,46 @@ +#include +#include +#include + +#include "mtcr.h" + +void usage(const char *n) +{ + printf("%s: List available mst devices.\nUsage: %s status\n", n, n); + exit(1); +} + +int list_devices() { + char buff[1024]; + int devs; + int i; + char* p = buff; + devs = mdevices(buff, sizeof(buff), 0xffffffff); + + if (devs < 0) { + printf("-E- Error getting devices\n"); + return 0; + } + + printf("Found %d devices:\n", devs); + + for (i = 0; i < devs ; i++) { + printf(" %s\n", p); + p += strlen(p) + 1; + } + return 1; +} + +int __cdecl main(int ac, char *av[]) +{ + + if (ac != 2 || strcmp(av[1],"status")) { + usage(av[0]); + } + + if (!list_devices()) + return 1; + + return 0; +} + diff --git a/branches/Ndi/tools/mst/user/mst.rc b/branches/Ndi/tools/mst/user/mst.rc new file mode 100644 index 00000000..93b78841 --- /dev/null +++ b/branches/Ndi/tools/mst/user/mst.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#if DBG +#define VER_FILEDESCRIPTION_STR "Mellanox HW access device listing. (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Mellanox HW access device listing." +#endif + +#define VER_INTERNALNAME_STR "mst.exe" +#define VER_ORIGINALFILENAME_STR "mst.exe" + +#include diff --git a/branches/Ndi/tools/mtcr/dirs b/branches/Ndi/tools/mtcr/dirs new file mode 100644 index 00000000..5a7e8b31 --- /dev/null +++ b/branches/Ndi/tools/mtcr/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tools/mtcr/user/SOURCES b/branches/Ndi/tools/mtcr/user/SOURCES new file mode 100644 index 00000000..ba02bca0 --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/SOURCES @@ -0,0 +1,88 @@ +# +# Name: sources +# +# Purpose: +# Building MTCR for this platform (user space) +# + +TARGETTYPE=DYNLINK +TARGETNAME=mtcr +DLLENTRY=_DllMainCRTStartup +DLLDEF=mtcr.def +USE_CRTDLL=1 + +!if !defined(WINIBHOME) +WINIBHOME=..\..\.. +!endif + +LIBPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + + +INCLUDES= $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; \ + $(WINIBHOME)\inc\iba; \ + $(WINIBHOME)\inc\mthca; \ + .\usb; + + +SOURCES= \ + mtcr.rc \ + mtcr_i2c.c \ + mtcr.c \ + usb.cpp + + +TARGETLIBS= \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(CRT_LIB_PATH)\msvcrt.lib \ + $(TARGETPATH)\*\ibal.lib\ + $(SDK_LIB_PATH)\Kernel32.lib\ +!else + $(TARGETPATH)\*\complibd.lib\ + $(CRT_LIB_PATH)\msvcrt.lib\ + $(TARGETPATH)\*\ibald.lib\ + $(SDK_LIB_PATH)\Ws2_32.lib\ + $(SDK_LIB_PATH)\Kernel32.lib\ + +!endif + + +# dimax driver not provided for 64 bits arch. +MTCR_NO_USB=1 + + +!if "$(_BUILDARCH)" == "x86" +TARGETLIBS=$(TARGETLIBS)\ + .\usb\usbi2cio.lib \ + .\usb\I2cBrdg.lib + +C_DEFINES=$(C_DEFINES) -DMTCR_USB_SUPPORT +!endif + + + + +# TODO:Should I define the __WIN__ manually +C_DEFINES=$(C_DEFINES) /DMTCR_EXPORTS /DMTL_MODULE=MTCR -D__WIN__ + + +!if $(FREEBUILD) + +!else +C_DEFINES=$(C_DEFINES) -DDEBUG -DDBG +!endif + + +# This is for the perl and zlib lib funcs, which requires __cdecl. +# TODO: define for all arch. check if MSC_STDCALL works. +386_STDCALL=0 +amd64_STDCALL=0 + +MSC_WARNING_LEVEL= /W3 + +#BUILD_CONSUMES=mlxsys +BUILD_PRODUCES=mtcr + diff --git a/branches/Ndi/tools/mtcr/user/com_def.h b/branches/Ndi/tools/mtcr/user/com_def.h new file mode 100644 index 00000000..52a1fe42 --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/com_def.h @@ -0,0 +1,194 @@ +/* - Mellanox Confidential and Proprietary - + * + * Copyright (C) May 2002, Mellanox Technologies Ltd. ALL RIGHTS RESERVED. + * + * Except as specifically permitted herein, no portion of the information, + * including but not limited to object code and source code, may be reproduced, + * modified, distributed, republished or otherwise exploited in any form or by + * any means for any purpose without the prior written permission of Mellanox + * Technologies Ltd. Use of software subject to the terms and conditions + * detailed in the file "LICENSE.txt". + * + * End of legal section ...................................................... + * + * wincomat.h - Windows compatibility + * + * Version: $Id$ + * + * Author: Alex Rezinsky (alexr@mellanox.co.il) + */ + +#ifndef COM_DEF_H +#define COM_DEF_H + +#include +#include + + +#include + +// TODO: reference additional headers your program requires here +#include +#include + + +//----------------------------------------------------- +// DEBUG PRINTS +//----------------------------------------------------- + +#define DEBUG_LEVEL_HIGH 5 +#define DEBUG_LEVEL_MID 3 +#define DEBUG_LEVEL_LOW 1 +#define DEBUG_LEVEL_ALWAYS 0 + + +#ifdef __cplusplus +#define EXTERN_VAR extern "C" +#else +#define EXTERN_VAR extern +#endif + +EXTERN_VAR ULONG g_DebugLevel; + +// +// HACK: For this revision, allow debug prng (up to level 3) also for non debug build +// + +//#ifdef DBG +#define DEBUG_PRINT_ON 1 +//#else +// to see the prints - uncomment the next line and set level to DEBUG_LEVEL_MID +//#define DEBUG_PRINT_ON 1 +//#endif + +#ifdef DEBUG_PRINT_ON + + #include + #define M_DEBUG(args) do { printf("-D- "); printf args ;} while(0) + #define M_DEBUGS(args, stat) do { printf("-D- "); printf ("stat:%#x:%s: ", stat, ib_get_err_str(stat)); printf args ;} while(0) + + #define DBG_PRINT(lvl, params ) \ + if (g_DebugLevel >= lvl) { M_DEBUG (params); } + + #define DBG_PRINTS(lvl, params, stat ) \ + if (g_DebugLevel >= lvl) { M_DEBUGS (params, stat); } + +#else + #define DBG_PRINT(lvl, params ) + #define M_DEBUG(args) + #define M_DEBUGS(args, stat) + #define DBG_PRINTS(lvl, params, stat ) +#endif + + +#define DPRINT0(params) DBG_PRINT(DEBUG_LEVEL_ALWAYS, params) +#define DPRINT1(params) DBG_PRINT(DEBUG_LEVEL_LOW, params) +#define DPRINT3(params) DBG_PRINT(DEBUG_LEVEL_MID, params) + +#ifdef DBG +#define DPRINT5(params) DBG_PRINT(DEBUG_LEVEL_HIGH, params) +#else +#define DPRINT5(params) +#endif + +#define DPRINTS1(params, stat) DBG_PRINTS(DEBUG_LEVEL_LOW, params, stat) + + +/* + * Errors + */ +#include + + /****************** General Purpose Error Codes (0 to -999) *****************/ + +#define ERROR_LIST_GENERAL \ + INFO( MT_OK, 0, "success" ) \ + INFO( MT_ERROR, -1, "generic error" ) \ + INFO( MT_ENOINIT, -2, "module not initialized" ) \ + INFO( MT_EINVAL, -3, "invalid argument" ) \ + INFO( MT_ENORSC, -4, "No such resource (probably out of range)" ) \ + INFO( MT_EPERM, -5, "Not enough permissions to perform operation" ) \ + INFO( MT_ENOSYS, -6, "The system doesn't support requested operation" ) \ + INFO( MT_EAGAIN, -7, "Resource temporarily unavailable" ) \ + INFO( MT_EALIGN, -8, "Alignment error (offset/size not aligned)" ) \ + INFO( MT_EDEADLK, -9, "Resource deadlock avoided" ) \ + INFO( MT_ENOENT, -10, "No such file or directory" ) \ + INFO( MT_EACCES, -11, "Permission denied" ) \ + INFO( MT_EINTR, -12, "process received interrupt") \ + INFO( MT_ESTATE, -13, "Invalid state") \ + INFO( MT_ENOMOD, -ENOSYS, "module not loaded") /* When module not loaded, syscall return ENOSYS */ + + /**************** Memory Handling Error Codes (-1000 to -1199) **************/ + + +#define ERROR_LIST_MEMORY \ + INFO( MT_EKMALLOC, -1000, "Can't allocate kernel memory" ) \ + INFO( MT_ENOMEM, -1001, "Given address doesn't match process address space" ) \ + INFO( MT_EMALLOC, -1002, "malloc fail") \ + INFO( MT_EFAULT, -1003, "Bad address" ) + + /****************** General Device Error Codes (-1200 to -1399) *************/ + +#define ERROR_LIST_DEVICE \ + INFO( MT_ENODEV, -1200, "No such device" ) \ + INFO( MT_EBUSY, -1201, "Device or resource busy (or used by another)" ) \ + INFO( MT_EBUSBUSY, -1202, "Bus busy" ) + + /*********************** I2C Error Codes (-1400 to -1499) *******************/ + +#define ERROR_LIST_I2C \ + INFO( MT_EI2CNACK, -1400, "I2C: received NACK from slave" ) \ + INFO( MT_EI2PINHI, -1401, "I2C: Pending Interrupt Not does no become low" ) \ + INFO( MT_EI2TOUT, -1402, "I2C: Operation has been timed out" ) + +#define ERROR_LIST ERROR_LIST_GENERAL ERROR_LIST_MEMORY ERROR_LIST_DEVICE ERROR_LIST_I2C + + /** + ** See at end of file the full list of POSIX errors + **/ + + +typedef enum { +#define INFO(A,B,C) A = B, + ERROR_LIST +#undef INFO + MT_DUMMY_ERROR /* this one is needed to quite warning by -pedantic */ +} call_result_t; +/* + * Different names + */ +#define sys_errlist _sys_errlist + +/* + * Types + */ + +/* + * Endianess + */ +#define __be32_to_cpu __cpu_to_be32 + +#ifdef _WIN64 +#define __cpu_to_be32(x) ((((x) >> 24)&0x000000ff) | (((x) >> 8)&0x0000ff00) | (((x) << 8)&0x00ff0000) | (((x) << 24)&0xff000000)) +#elif defined(_WIN32) +__inline __int32 __cpu_to_be32( __int32 dwX ) +{ + _asm mov eax, dwX + _asm bswap eax + _asm mov dwX, eax + + return dwX; +} +#else +#error unsupported platform +#endif + +#define __be16_to_cpu __cpu_to_be16 +#define __cpu_to_be16(x) ((((x) >> 8)&0xff) | (((x) << 8)&0xff00)) + +/* + * usleep + */ +_inline void usleep( unsigned long x) { Sleep((x + 999) / 1000); } + +#endif diff --git a/branches/Ndi/tools/mtcr/user/makefile b/branches/Ndi/tools/mtcr/user/makefile new file mode 100644 index 00000000..80d407b3 --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/makefile @@ -0,0 +1,8 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE ..\..\..\inc\openib.def + diff --git a/branches/Ndi/tools/mtcr/user/mtcr.c b/branches/Ndi/tools/mtcr/user/mtcr.c new file mode 100644 index 00000000..77d58b48 --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/mtcr.c @@ -0,0 +1,1031 @@ +// mtcr.c : Defines the entry point for the DLL application. +// +#include +#include +#include +#include + + +#include "com_def.h" +#include "usb.h" +#include "mtcr.h" +#include "mtcr_i2c.h" +#include "mthca_vc.h" + +//----------------------------------------------------- +// NEW FEATURES +//----------------------------------------------------- +#define FIX_NAME 1 +#define SUPPORT_I2CM 1 + +#define USB_DEV_NAME "mtusb-1" +#define CLEAR(st) memset(&(st), 0, sizeof(st)) + + + +#define MTCR_DEBUG_ENV "MTCR_DEBUG_LEVEL" +#ifdef DBG +ULONG g_DebugLevel = DEBUG_LEVEL_MID; +#else +ULONG g_DebugLevel = DEBUG_LEVEL_LOW; +#endif + +//----------------------------------------------------- + +#define MAX_HCA_NUM 16 + + +typedef struct mfile_ibal_t { + mfile s; + ib_al_handle_t h_al; + ib_ca_handle_t h_ca; + map_crspace cr_map; +} mfile_ibal; + + +BOOL APIENTRY DllMain( HANDLE hModule, + DWORD ul_reason_for_call, + LPVOID lpReserved + ) +{ + char* pszDbgLevel; + switch (ul_reason_for_call) + { + case DLL_PROCESS_ATTACH: + // s_hCtl = (HANDLE)-1; + // ConnectToDriver(); + g_DebugLevel = 0; + pszDbgLevel = getenv(MTCR_DEBUG_ENV); + if (pszDbgLevel) { + g_DebugLevel = atol(pszDbgLevel); + } + + break; + + case DLL_PROCESS_DETACH: + // DisconnectFromDriver(); + break; + + case DLL_THREAD_ATTACH: + case DLL_THREAD_DETACH: + break; + } + return TRUE; +} + +// device ids +#define DEVASYS_DEV_ID 12345 /* dummy */ +#define TAVOR_DEV_ID 23108 +#define TAVOR_CONF_DEV_ID 23109 +#define ARBEL_TM_DEV_ID 25208 +#define ARBEL_TM_CONF_DEV_ID 25209 +#define ARBEL_DEV_ID 25218 +#define ARBEL_CONF_DEV_ID 25219 +#define SINAI_4X_DEV_ID 24204 +#define SINAI_4X_CONF_DEV_ID 24205 +#define SINAI_8X_DEV_ID 25204 +#define SINAI_8X_CONF_DEV_ID 25205 + +#define IS_CONF_DEV(dev_id) \ + ((dev_id == TAVOR_CONF_DEV_ID) || \ + (dev_id == ARBEL_TM_CONF_DEV_ID) || \ + (dev_id == ARBEL_CONF_DEV_ID) || \ + (dev_id == SINAI_4X_CONF_DEV_ID) || \ + (dev_id == SINAI_8X_CONF_DEV_ID)) + +#define MAX_DEV_NAME 32 +typedef struct { + USHORT DevId; // Device Id, e.g. 23108 + UCHAR DevName[MAX_DEV_NAME]; // exported name, e.g. "InfiniHost" + Mdevs mask; +} DEVICE_DB_T, *PDEVICE_DB_T; + + +#define TAVOR_TYPE_DEVICE_NAME_FMT "mt%hu_pci%s%hu" +#define MDT_DEVICE_NAME_FMT "%s%hu" + +static DEVICE_DB_T db[] = { + { DEVASYS_DEV_ID, "devasys_usb", MDEVS_TAVOR }, + { TAVOR_DEV_ID, "InfiniHost", MDEVS_TAVOR }, + { TAVOR_CONF_DEV_ID, "InfiniHostBd", MDEVS_TAVOR_CR }, + { ARBEL_TM_DEV_ID, "InfiniHost", MDEVS_TAVOR }, + { ARBEL_TM_CONF_DEV_ID, "InfiniHostBd", MDEVS_TAVOR_CR }, + { ARBEL_DEV_ID, "InfiniHost_III_Ex", MDEVS_TAVOR }, + { ARBEL_CONF_DEV_ID, "InfiniHostBd", MDEVS_TAVOR_CR }, + { SINAI_4X_DEV_ID, "InfiniHost_III_Lx", MDEVS_TAVOR }, + { SINAI_4X_CONF_DEV_ID, "InfiniHostBd", MDEVS_TAVOR_CR }, + { SINAI_8X_DEV_ID, "InfiniHost_III_Lx", MDEVS_TAVOR }, + { SINAI_8X_CONF_DEV_ID, "InfiniHostBd", MDEVS_TAVOR_CR }, +}; +#define DEVICE_DB_SIZE (sizeof(db) / sizeof(DEVICE_DB_T)) + +Mdevs dmasks[] = { MDEVS_TAVOR_CR, MDEVS_TAVOR_CR, MDEVS_TAVOR_UAR, MDEVS_TAVOR_DDR }; +char *dsuffix[] = { "conf", "_cr", "_uar", "_ddr"}; +#define MASKS_SIZE (sizeof(dmasks) / sizeof(Mdevs)) + + +// Return: < 0 - Error. > 0 - Numbern of characters written (including last '\0') +int create_mst_names_by_dev_id(USHORT dev_id, int dev_ix, int mask, char *name, int name_len, int *cnt) +{ + int i,j; char *suffix; BOOL found = FALSE; char *nm_ptr = name; + int tot_len = 0; + + DPRINT3(( "create_mst_names_by_dev_id: dev_id %d, dev_ix %d, mask %#x, name_len %d\n", + dev_id, dev_ix, mask, name_len )); + + // specific stuff: for CONF devices create only "_cr" device + *name = 0; + *cnt = 0; + if (IS_CONF_DEV(dev_id)) { + int len; + tot_len += _snprintf(name, name_len, TAVOR_TYPE_DEVICE_NAME_FMT, dev_id, "conf", dev_ix ); + tot_len++; // trailing null + *cnt = 1; + return tot_len; + } + DPRINT3(( "create_mst_names_by_dev_id: not conf device %hu, is_conf_dev %d \n", + dev_id, IS_CONF_DEV(dev_id) )); + + // find device + for (i=0; i name_len) { + DPRINT1(( "create_mst_names_by_dev_id: not enough length (%d > %d)\n", + len, name_len )); + return -1; + } + // copy the results + DPRINT5(( "create_mst_names_by_dev_id: name %s\n", + l_name )); + memcpy( nm_ptr, l_name, len ); + nm_ptr += len; + tot_len += len; + name_len -= len; + (*cnt)++; + } + } + + return tot_len; +} + + +// Return: 0 - error, 1 - OK +int parse_mst_name(const char *mst_name, + PUSHORT dev_id, + PUSHORT dev_ix, + MType* mst_dev_type, + Mdevs* access_type) +{ + char *ptr; + char suffix[MAX_DEV_NAME]; + const char* fname; + + // Unix device name compatibility: Remove path (if exists) from device name: + + if ((fname = strrchr(mst_name, '/')) || (fname = strrchr(mst_name, '\\'))) { + DPRINT3(("Removing path from file: %s --> %s\n", mst_name, fname + 1)); + mst_name = fname + 1; + } + + suffix[0] = '\0'; + + if (strstr(mst_name, USB_DEV_NAME)) { + *dev_id = DEVASYS_DEV_ID; + *dev_ix =0; + *mst_dev_type = MST_USB; + + } else { + // get dev_id and suffix. dev_ix gets a dummy value. + sscanf( mst_name, TAVOR_TYPE_DEVICE_NAME_FMT, dev_id, suffix, dev_ix ); + // step over the suffix. ptr will be at the card's number + if ((ptr=strstr( suffix, "conf"))) { /* CONF device */ + ptr += 7; + *mst_dev_type = MST_PCICONF; + } else if ((ptr=strstr( suffix, "_cr"))) { + ptr += 3; + *mst_dev_type = MST_PCI; + *access_type = MDEVS_TAVOR_CR; + } else if ((ptr=strstr( suffix, "_uar"))) { + ptr += 4; + *mst_dev_type = MST_PCI; + *access_type = MDEVS_TAVOR_UAR; + } else if ((ptr=strstr( suffix, "_ddr"))) { + ptr += 4; + *mst_dev_type = MST_PCI; + *access_type = MDEVS_TAVOR_DDR; + } else { + DPRINT1(( "parse_mst_name: incorrect device name '%s' \n", mst_name )); + return 0; + } + + // get dev_ix + sscanf( ptr, "%hu", dev_ix ); + } + + DPRINT3( ("parse_mst_name: name %s, dev_id %d, dev_ix %d\n", + mst_name, *dev_id, *dev_ix)); + return 1; +} + + +// Return: 0 - error, 1 - OK +int create_mdt_name_by_dev_id(USHORT dev_id, USHORT dev_ix, char *name, int name_len) +{ + int i; + + DPRINT3(( "create_mdt_name_by_dev_id: dev_id %d, dev_ix %d\n", + dev_id, dev_ix )); + + // name generation + *name = 0; + for (i=0; i size) { + DPRINT1(("get_dev_ids(): Got buffer for %d HCAs, but %d HCAs found on machine.\n", size, ca_guids_count)); + goto ErrExit; + } + + for (i = 0; i < ca_guids_count ; i++) { + ib_ca_attr_t* ca_data; + u_int32_t bsize; + + cnt++; + + // Query the CA + ib_status = ib_query_ca_by_guid(h_al, guids[i], NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + DPRINTS1(("Failed to get size of query ca %d by guid.\n", i), ib_status); + dev_ids[i] = 0; + continue; + } + + ca_data = (ib_ca_attr_t*)malloc(bsize); + if (ca_data == NULL) { + DPRINT1(("get_dev_ids: malloc failed.\n")); + continue; + } + + ca_data->dev_id = 0; + + ib_status = ib_query_ca_by_guid(h_al, guids[i], ca_data, &bsize); + if(ib_status != IB_SUCCESS) + { + DPRINTS1(("Failed to query ca %d by guid.\n", i), ib_status); + } + + // Get the device id: + dev_ids[i] = ca_data->dev_id; + + free(ca_data); + + } + + goto OkExit; + +ErrExit: + cnt = -1; + +OkExit: + + return cnt; +} + + +int get_hca_idx(ib_al_handle_t h_al, USHORT dev_id, USHORT dev_ix) { + USHORT dev_ids[MAX_HCA_NUM]; + int cnt; + int i; + int matching_devs_found = 0; + + cnt = get_dev_ids(h_al, dev_ids, MAX_HCA_NUM); + + if (cnt < 0) { + return cnt; + } + + for (i = 0 ; i < cnt ; i++) { + if (dev_ids[i] == dev_id) { + if (matching_devs_found == dev_ix) { + DPRINT3(("get_hca_idx, type=%d, idx=%d. HCA index = %d\n", dev_id, dev_ix, i)); + return i; + } + matching_devs_found++; + } + } + + DPRINT3(("get_hca_idx, type=%d, idx=%d. No matching device found in %d HCAs\n", dev_id, dev_ix, i)); + + return -1; +} + + + +// +// dev_idx_by_type - stores current hca type idx for each type. Assumed to +// be in the same size of DEVICE_DB_SIZE +// +int get_and_inc_dev_idx(u_int32_t* dev_idx_by_type, USHORT dev_id) { + u_int32_t i; + int ret = -1; + + for (i = 0; i < DEVICE_DB_SIZE; i++) { + if (dev_id == db[i].DevId) { + ret = dev_idx_by_type[i]; + dev_idx_by_type[i]++; + break; + } + } + + return ret; +} + + +// +// +// List devices in their MST compatible names. +// Each device type is indexed sepetrately. +// +// + +MTCR_API int mdevices(char *buf, int len, int mask) +{ + u_int32_t tot_len = 0; + char* p = buf; + int devs = 0; + int i; + + u_int32_t dev_idx_by_type[DEVICE_DB_SIZE]; + USHORT dev_ids[MAX_HCA_NUM]; + int cnt = 0; + + ib_api_status_t ib_status; + ib_al_handle_t h_al = 0; + + + + memset( (char*)dev_idx_by_type, 0, sizeof(dev_idx_by_type)); + + ib_status = ib_open_al( &h_al ); + if ( ib_status != IB_SUCCESS ) { + DPRINTS1(("Failed to open AL\n"), ib_status ); + // return -1; + } else { + cnt = get_dev_ids(h_al, dev_ids, MAX_HCA_NUM); + + if (cnt < 0) { + cnt = 0; + } + + ib_close_al(h_al); + + } + + for(i = 0; i < cnt; i++) { + int idx; + int curr_cnt = 0; + int curr_len; + + if (dev_ids[i] == 0) { + continue; + } + + idx = get_and_inc_dev_idx(dev_idx_by_type, dev_ids[i]); + + if (idx < 0) { + DPRINT1(("mdevices: Unknown dev id detected: %d. skipped.\n", dev_ids[i])); + continue; + } + + // For now - get only TAVOR_CR (cr, conf ) devices. + curr_len = create_mst_names_by_dev_id(dev_ids[i], idx, MDEVS_TAVOR_CR , p , len - tot_len , &curr_cnt); + if (curr_cnt < 0) { + return -1; + } + + tot_len += curr_len; + p += curr_len; + devs += curr_cnt; + } + + + if (usb_is_connected() ) { + sprintf(p, USB_DEV_NAME ); + devs++; + } + + return devs; + +} + + +MTCR_API mfile *mopen(const char *name) +{ + return mopend(name, MST_TAVOR); +} + +MTCR_API mfile *mopend(const char *name, DType dtype) +{ + USHORT dev_id=0, dev_ix=0; + HANDLE h; + MType mst_dev_type; + Mdevs access_type; + int target_hca; + + /* allocate mfile struct */ + mfile_ibal *mf = (mfile_ibal *)malloc(sizeof(mfile_ibal)); + if (!mf) { + errno = ENOMEM; + return 0; + } + + memset( (char*)mf, 0, sizeof(mfile_ibal)); + + mf->s.sock = -1; + + /* parse name */ + + if (!parse_mst_name(name, &dev_id, &dev_ix, &mst_dev_type, &access_type )) { + goto ErrExit; + } + + DPRINT3(( "mopend: %s, dtype %d, devid %d\n", name, dtype, dev_id)); + + + switch(dev_id) { + case TAVOR_DEV_ID: case ARBEL_TM_DEV_ID: + case ARBEL_DEV_ID: case SINAI_4X_DEV_ID: + case SINAI_8X_DEV_ID: + mf->s.itype = MST_TAVOR; + break; + default: + mf->s.itype = MST_GAMLA; + } + + + /* Type of device */ + mf->s.dtype = dtype; + if (dtype == MST_TAVOR) + mf->s.i2c_slave = 0x48; + else + mf->s.i2c_slave = 0x5a; + +#ifdef SUPPORT_I2CM + /* Use the device as I2C master? */ + mf->s.is_i2cm = strstr(name, "i2cm") ? 1 : 0; + + /* Type of interface (relevant when is_i2cm==1, unused otherwise */ + if (mf->s.is_i2cm) + { + switch(dev_id) { + case TAVOR_DEV_ID: case ARBEL_TM_DEV_ID: + case ARBEL_DEV_ID: case SINAI_4X_DEV_ID: + case SINAI_8X_DEV_ID: + mf->s.itype = MST_TAVOR; + break; + default: + goto ErrExit; + } + } +#endif + + if (dev_id != DEVASYS_DEV_ID ) { + /* Open ibal HCA handle */ + u_int32_t stub; + + ib_net64_t guids[MAX_HCA_NUM]; + + ib_api_status_t ib_status; + size_t ca_guids_count = MAX_HCA_NUM; + + ib_status = ib_open_al( &mf->h_al ); + + if ( ib_status != IB_SUCCESS ) { + M_DEBUGS(("Failed to open AL\n"), ib_status ); + goto ErrExit; + } + + + ib_status = ib_get_ca_guids ( mf->h_al, guids, &ca_guids_count ); + if (ib_status != IB_SUCCESS) { + M_DEBUGS(("Failed to get CA GUIDs\n"), ib_status); + goto ErrExit; + } + + if (ca_guids_count == 0) { + DPRINT1(("FOUND NO GUIDS\n")); + goto ErrExit; + } + + target_hca = get_hca_idx(mf->h_al, dev_id , dev_ix ); + if (target_hca < 0) { + goto ErrExit; + } + + ib_status = ib_open_ca( mf->h_al, guids[target_hca], NULL, mf, &mf->h_ca ); + if (ib_status != IB_SUCCESS) + { + DPRINTS1(("Failed to open CA\n"), ib_status); + goto ErrExit; + } + + if(mst_dev_type == MST_PCICONF) { + // Type of file + mf->s.tp = MST_PCICONF; + + DPRINT5(("Calling: ibal_access(mf->h_ca, 0x0, &stub, 4, FW_OPEN_IF )\n")); + if (ibal_access(mf->h_ca, 0x0, &stub, 4, FW_OPEN_IF ) != IB_SUCCESS) { + goto ErrExit; + } + } else { + + int bar_num; + + // Type of file + mf->s.tp = MST_PCI; + + // calculate bar number + if (access_type == MDEVS_TAVOR_CR) { + // TODO: See what about UAR and DDR bars - not supported for now. + + } else { + DPRINT1(("Only _cr access is supported")); + goto ErrName; + } + + // check FW_MAP_CRSPACE + if (ibal_access(mf->h_ca, 0x0, &mf->cr_map, sizeof(mf->cr_map), FW_MAP_CRSPACE ) != IB_SUCCESS) { + goto ErrExit; + } + + mf->s.ptr = (void*)(ULONG_PTR)mf->cr_map.va; + } + + } else if (dev_id == DEVASYS_DEV_ID) { + // Type of file + h = usb_open(); + if ( h == INVALID_HANDLE_VALUE ) + goto ErrExit; + mf->s.fd = FromHandle(h); + // mf->s.tp = (usb_is_dimax()) ? MST_USB_DIMAX : MST_USB; + mf->s.tp = MST_USB; + } else { + goto ErrExit; + } + + /* OK */ + return (mfile*)mf; +ErrName: +// CloseHandle(h); +ErrExit: + mclose((mfile*)mf); + errno = ENODEV; + return 0; +} + +MTCR_API void maccelerate(mfile *mf) +{ +#ifdef SUPPORT_I2CM + if (mf->is_i2cm) + i2c_master_set((mfile*)mf); +#endif +} + +MTCR_API void mrestore(mfile *mf) +{ +#ifdef SUPPORT_I2CM + if (mf->is_i2cm) + i2c_master_restore(mf); +#endif +} + +MTCR_API int mclose(mfile *mf) +{ + int rc=0; + + if (mf->tp == MST_USB) { + rc = usb_close( (HANDLE)mf->fd); + } else { + mfile_ibal* mfi = (mfile_ibal*)mf; + u_int32_t stub; + + if (mf->tp == MST_PCICONF) { + ibal_access(mfi->h_ca, 0x0, &stub, 4, FW_CLOSE_IF ); + } else if (mf->tp = MST_PCI) { + if (mfi->cr_map.size) { + + if (ibal_access(mfi->h_ca, 0x0, NULL, 0, FW_UNMAP_CRSPACE ) != IB_SUCCESS) { + DPRINT1(("Unmap crspace failed")); + } + } + } + + if (mfi->h_ca) + ib_close_ca( mfi->h_ca, NULL ); + if (mfi->h_al) + ib_close_al( mfi->h_al); + } + + free(mf); + return rc; +} + +MTCR_API int mread4(mfile *mf, unsigned int offset, u_int32_t *value) +{ + int rc = 4; + u_int32_t lvalue; + mfile_ibal* mfi = (mfile_ibal*)mf; + switch (mf->tp) { + + case MST_PCI: + + if (!mf->ptr) + return -1; + + if (offset >= mfi->cr_map.size) { + DPRINT1(("MTCR:mread4: Tried to access value at offset %x, which is out of pci bar (size %x)\n", + offset, + mfi->cr_map.size)); + errno = EINVAL; + return -1; + } + + +#ifdef SUPPORT_I2CM + if (mf->is_i2cm) + return i2c_master_read_cr(mf, value, offset, 4); +#endif + + if (mf->dtype == MST_TAVOR) + *value = __be32_to_cpu(*((volatile unsigned int *)((char *)mf->ptr + offset))); + else + *value = *((volatile unsigned int *)((char *)mf->ptr + offset)); + break; + + case MST_PCICONF: + { + +#ifdef SUPPORT_I2CM + if (mf->is_i2cm) + return i2c_master_read_cr(mf, value, offset, 4); +#endif + + if (ibal_access(((mfile_ibal*)mf)->h_ca, offset, value, 4, FW_READ_CMD) == IB_SUCCESS) { + rc = 4; + } else { + rc = -1; + } + + break; + } + + case MST_USB: + { + switch(mf->dtype) + { + case MST_GAMLA: + { + unsigned int offs = (usb_is_dimax()) ? offset : __cpu_to_be32(offset); + unsigned int addr_len = 2; + rc = usb_read( (HANDLE)mf->fd, I2C_TRANS_32ADR, mf->i2c_slave, offs, addr_len, + (u_int8_t*)&lvalue, 4 ); + break; + } + case MST_TAVOR: + default: + { + unsigned int offs = (usb_is_dimax()) ? offset : __cpu_to_be32(offset); + unsigned int addr_len = 4; + rc = usb_read( (HANDLE)mf->fd, I2C_TRANS_32ADR, mf->i2c_slave, offs, addr_len, + (u_int8_t*)&lvalue, 4 ); + break; + } + } + + if (rc == MT_OK) { + *value = __be32_to_cpu(lvalue); + rc = 4; + } + break; + + } + + default: + return -1; + } + + DPRINT5(( "MTCR:mread4: off 0x%x, val 0x%x\n", offset, *value)); + return rc; +} + +MTCR_API int mwrite4(mfile *mf, unsigned int offset, u_int32_t value) +{ + int rc = 4; + unsigned int lvalue; + mfile_ibal* mfi = (mfile_ibal*)mf; + + switch(mf->tp) + { + case MST_PCI: + if (!mf->ptr) + return -1; + + if (offset >= mfi->cr_map.size) { + DPRINT1(("MTCR:mwrite4: Tried to access value at offset %x, which is out of pci bar (size %x)\n", + offset, + mfi->cr_map.size)); + errno = EINVAL; + return -1; + } + +#ifdef SUPPORT_I2CM + if (mf->is_i2cm) + return i2c_master_write_cr(mf, value, offset, 4); +#endif + + if (mf->dtype == MST_TAVOR) + *((volatile unsigned int *)((char *)mf->ptr + offset)) = __cpu_to_be32(value); + else + *((volatile unsigned int *)((char *)mf->ptr + offset)) = value; + break; + + case MST_PCICONF: + { + +#ifdef SUPPORT_I2CM + if (mf->is_i2cm) + return i2c_master_write_cr(mf, value, offset, 4); +#endif + + if (ibal_access(((mfile_ibal*)mf)->h_ca, offset, &value, 4, FW_WRITE_CMD) == IB_SUCCESS) { + rc = 4; + } else { + rc = -1; + } + + break; + } + + case MST_USB: + { + + switch(mf->dtype) + { + case MST_GAMLA: + { + unsigned int offs = (usb_is_dimax()) ? offset : __cpu_to_be32(offset); + unsigned int addr_len = 2; + lvalue = __cpu_to_be32(value); + rc = usb_write( (HANDLE)mf->fd, I2C_TRANS_32ADR, mf->i2c_slave, offs, addr_len, + (u_int8_t*)&lvalue, 4 ); + break; + } + case MST_TAVOR: + default: + { + unsigned int offs = (usb_is_dimax()) ? offset : __cpu_to_be32(offset); + unsigned int addr_len = 4; + lvalue = __cpu_to_be32(value); + rc = usb_write( (HANDLE)mf->fd, I2C_TRANS_32ADR, mf->i2c_slave, offs, addr_len, + (u_int8_t*)&lvalue, 4 ); + break; + } + } + + if (rc == MT_OK) { + rc = 4; + } + break; + } + default: + return -1; + } + + DPRINT5(("MTCR:mwrite4: off 0x%x, val 0x%x\n", offset, value)); + return rc; +} + +MTCR_API int mread64(mfile *mf, unsigned int offset, void *data, int length) +{ + int rc; + + if (length > MAX_TRANS_SIZE) + { + errno = EINVAL; + return -1; + } + + switch(mf->tp) + { + case MST_PCI: + case MST_PCICONF: + { + int i; + unsigned char *cdata = (unsigned char *)data; + + for (i=0; idtype) + { + case MST_NOADDR: trans_type = I2C_TRANS_NOADR; aw = 0; break; + case MST_DIMM: trans_type = I2C_TRANS_8ADR; aw = 1; break; + case MST_GAMLA: trans_type = I2C_TRANS_16ADR; aw = 2; break; + case MST_TAVOR: trans_type = I2C_TRANS_32ADR; aw = 4; break; + } + + ret = usb_read((HANDLE)mf->fd, trans_type, mf->i2c_slave, offset, aw, data, length); + if (ret == MT_OK) { + return length; + } else { + errno = ret; + return -1; + } + } +#endif + + + } + + errno = EPERM; + return -1; +} + + +MTCR_API int mwrite64(mfile *mf, unsigned int offset, void *data, int length) +{ + + int rc; + + if (length > MAX_TRANS_SIZE) + { + errno = EINVAL; + return -1; + } + + switch(mf->tp) + { + case MST_PCI: + case MST_PCICONF: + { + int i; + unsigned char *cdata = (unsigned char *)data; + + for (i=0; idtype) + { + case MST_NOADDR: trans_type = I2C_TRANS_NOADR; aw = 0; break; + case MST_DIMM: trans_type = I2C_TRANS_8ADR; aw = 1; break; + case MST_GAMLA: trans_type = I2C_TRANS_16ADR; aw = 2; break; + case MST_TAVOR: trans_type = I2C_TRANS_32ADR; aw = 4; break; + } + + ret = usb_write((HANDLE)mf->fd, trans_type, mf->i2c_slave, offset, aw, data, length); + if (ret == MT_OK) { + return length; + } else { + errno = ret; + return -1; + } + } +#endif + + } + + errno = EPERM; + return -1; +} + +unsigned char mset_i2c_slave(mfile *mf, unsigned char new_i2c_slave) +{ + unsigned char ret; + if (mf) + { + ret = mf->i2c_slave; + mf->i2c_slave = new_i2c_slave; + } + else + ret = 0xff; + return ret; +} + diff --git a/branches/Ndi/tools/mtcr/user/mtcr.def b/branches/Ndi/tools/mtcr/user/mtcr.def new file mode 100644 index 00000000..dead913e --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/mtcr.def @@ -0,0 +1 @@ +EXPORTS \ No newline at end of file diff --git a/branches/Ndi/tools/mtcr/user/mtcr.h b/branches/Ndi/tools/mtcr/user/mtcr.h new file mode 100644 index 00000000..b799fd3d --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/mtcr.h @@ -0,0 +1,216 @@ +/* + * + * mtcr.h - Mellanox Software tools (mst) driver definitions + * + */ + +#ifndef _MST_H +#define _MST_H + + +#ifdef __WIN__ + +#include + +#ifdef MTCR_EXPORTS +#define MTCR_API __declspec(dllexport) +#else +#define MTCR_API __declspec(dllimport) +#endif + +typedef unsigned __int8 u_int8_t; +typedef __int8 int8_t; +typedef unsigned __int16 u_int16_t; +typedef __int16 int16_t; +typedef unsigned __int32 u_int32_t; +typedef __int32 int32_t; +typedef unsigned __int64 u_int64_t; +typedef __int64 int64_t; + +#if defined(_WIN64) + typedef __int64 MT_long_ptr_t; + typedef unsigned __int64 MT_ulong_ptr_t; +#else + typedef _W64 long MT_long_ptr_t; + typedef _W64 unsigned long MT_ulong_ptr_t; +#endif + +#else /* UNIX */ + +#include +#define MTCR_API + +#endif + + +#ifdef __cplusplus +extern "C" { +#endif + +#ifndef USE_IB_MGT +typedef struct mib_private_t { + int dummy; +} MIB_Private; +#else +#include "mtcr_ib_private.h" +#endif + +typedef enum MType_t {MST_PCI, MST_PCICONF, MST_CALBR, MST_USB, + MST_IB, MST_IF, MST_PPC, MST_USB_DIMAX +#ifdef ENABLE_MST_DEV_I2C + ,MST_DEV_I2C +#endif +} MType; +typedef enum DType_t {MST_GAMLA, MST_TAVOR, MST_DIMM, MST_NOADDR} DType; +#define MST_ANAFA2 MST_TAVOR +#define MST_EEPROM MST_GAMLA +typedef enum Mdevs_t { + MDEVS_GAMLA = 0x01, /* Each device that actually is a Gamla */ + MDEVS_I2CM = 0x02, /* Each device that can work as I2C master */ + MDEVS_MEM = 0x04, /* Each device that is a memory driver (vtop) */ + MDEVS_TAVOR_DDR = 0x08, /* Each device that maps to Tavor DDR */ + MDEVS_TAVOR_UAR = 0x10, /* Each device that maps to Tavor UAR */ + MDEVS_TAVOR_CR = 0x20, /* Each device that maps to Tavor CR */ + MDEVS_IF = 0x40, /* Standard device interface */ + MDEVS_REM = 0x80, /* Remote devices */ + MDEVS_PPC = 0x100, /* PPC devices */ + MDEVS_DEV_I2C = 0x200, /* Generic linux kernel i2c device */ + MDEVS_TAVOR = (MDEVS_TAVOR_DDR|MDEVS_TAVOR_UAR|MDEVS_TAVOR_CR), + MDEVS_ALL = 0xffffffff +} Mdevs; + +/* All fields in follow structure are not supposed to be used */ +/* or modified by user programs. Except i2c_slave that may be */ +/* modified before each access to target I2C slave address */ +typedef struct mfile_t { + MType tp; /* type of driver */ + DType dtype; /* target device to access to */ + DType itype; /* interface device to access via */ + int is_i2cm; /* use device as I2C master */ + unsigned char i2c_slave; +#ifdef __WIN__ + MT_ulong_ptr_t fd; +#else + int fd; +#endif + int sock; /* in not -1 - remote interface */ + void *ptr; + MIB_Private mib; /* Data for IB interface (if relevant) */ + unsigned int i2c_RESERVED; /* Reserved for internal usage (i2c internal) */ + enum Mdevs_t flags; +} mfile; + +#ifdef __WIN__ +#define FromHandle(h) ((MT_ulong_ptr_t)(h)) +#define ToHandle(h) ((HANDLE)(h)) +#else +#define FromHandle(h) ((int)(h)) +#define ToHandle(h) ((HANDLE)(h)) +#endif + +/* + * Get list of MST (Mellanox Software Tools) devices. + * Put all device names as null-terminated strings to buf. + * + * Return number of devices found or -1 if buf overflow + */ +MTCR_API int mdevices(char *buf, int len, int mask); + +/* + * Open Mellanox Software tools (mst) driver. + * Return valid mfile ptr or 0 on failure + */ +MTCR_API mfile *mopend(const char *name, DType dtype); + +/* + * Open Mellanox Software tools (mst) driver. Device type==TAVOR + * Return valid mfile ptr or 0 on failure + */ +MTCR_API mfile *mopen(const char *name); + +/* + * Close Mellanox driver + * req. descriptor + */ +MTCR_API int mclose(mfile *mf); + +/* + * Accelerate device if possible. + * When device is I2C master - overclock it + */ +MTCR_API void maccelerate(mfile *mf); + +/* + * Restore normal settings, if device was accelerated. + */ +MTCR_API void mrestore(mfile *mf); + +/* + * Read 4 bytes, return number of succ. read bytes or -1 on failure + */ +MTCR_API int mread4(mfile *mf, unsigned int offset, u_int32_t *value); + +/* + * Write 4 bytes, return number of succ. written bytes or -1 on failure + */ +MTCR_API int mwrite4(mfile *mf, unsigned int offset, u_int32_t value); + +/* + * Read lots of bytes, return number of succ. read bytes or -1 on failure + * + * Works for any interface, but important for long bursts. + */ +MTCR_API int mread_by_chunks(mfile *mf, unsigned int offset, void *data, int length); + +/* + * Read up to 64 bytes, return number of succ. read bytes or -1 on failure + * + * This makes sense only w/ CALIBRE/DevaSys interfaces *to EEPROM reading only* + */ +MTCR_API int mread64(mfile *mf, unsigned int offset, void *data, int length); + +/* + * Write up to 64 bytes, return number of succ. written bytes or -1 on failure + * + * This makes sense only w/ CALIBRE/DevaSys interfaces *to EEPROM burning only* + */ +MTCR_API int mwrite64(mfile *mf, unsigned int offset, void *data, int length); + +/* + * Set a new value for i2c_slave + * Return previous value + */ +MTCR_API unsigned char mset_i2c_slave(mfile *mf, unsigned char new_i2c_slave); + +/* + * get free phys. contigous pages + * order should be in range [0..9] + * the size of allocated memory will be (2^order * 4096) + * return pointer to virtual address mapped to the allocated area + * on failure returns 0 and errno is set + */ +MTCR_API void *mget_free_pages (mfile *mf, unsigned int order); + +/* + * free phys. contigous pages + * order should be in range [0..9] + * vma is freed + * on success returns 0 + * on failure returns -1 and errno is set + */ +MTCR_API int mfree_pages (mfile *mf, void *addr, unsigned int order); + + + +/* + * translate virtual address to physical address + * return physical address on success, or 0 on error + */ +MTCR_API unsigned long mvtop (mfile *mf, void *va); + +#ifdef __cplusplus +} +#endif + + +#endif diff --git a/branches/Ndi/tools/mtcr/user/mtcr.rc b/branches/Ndi/tools/mtcr/user/mtcr.rc new file mode 100644 index 00000000..3f2d2293 --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/mtcr.rc @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DLL +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#ifdef MTCR_USB_SUPPORT +#define VER_FILEDESCRIPTION_STR "Mellanox HW Access library (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Mellanox HW Access library. No USB support. (Debug)" +#endif + +#else + +#ifdef MTCR_USB_SUPPORT +#define VER_FILEDESCRIPTION_STR "Mellanox HW Access library" +#else +#define VER_FILEDESCRIPTION_STR "Mellanox HW Access library. No USB support." +#endif + +#endif + +#define VER_INTERNALNAME_STR "mtcr.dll" +#define VER_ORIGINALFILENAME_STR "mtcr.dll" + +#include diff --git a/branches/Ndi/tools/mtcr/user/mtcr_i2c.c b/branches/Ndi/tools/mtcr/user/mtcr_i2c.c new file mode 100644 index 00000000..fb03a942 --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/mtcr_i2c.c @@ -0,0 +1,612 @@ +/* - Mellanox Confidential and Proprietary - + * + * Copyright (C) March 2002, Mellanox Technologies Ltd. ALL RIGHTS RESERVED. + * + * Except as specifically permitted herein, no portion of the information, + * including but not limited to object code and source code, may be reproduced, + * modified, distributed, republished or otherwise exploited in any form or by + * any means for any purpose without the prior written permission of Mellanox + * Technologies Ltd. Use of software subject to the terms and conditions + * detailed in the file "LICENSE.txt". + * + * End of legal section ...................................................... + * + */ + +/* + * + * mtcr_i2c.c - Mellanox Software tools (mst) driver: I2C master routines + * + */ + + +#include +#include +#include +#include + +#ifdef __WIN__ +#include "com_def.h" +#else +#include + +#include + + +#include +#include +#include "mst_pci.h" +#include "mst_pciconf.h" +#endif + +#include "mtcr.h" +#include "mtcr_i2c.h" + + +#define REPEAT_WRITE_IF_NACK +/* #define LOWR_LEVEL_DEBUG */ +/* #define LOWW_LEVEL_DEBUG */ +/* #define MID_LEVEL_DEBUG */ +/* #define HIGH_LEVEL_DEBUG */ + +enum CONSTANTS { + EPROM_PRIMARY_COUNT = 5000, /* Amount of "fast" check */ + EPROM_SECONDARY_COUNT = 10, /* Amount of "slow" check */ + EPROM_REQUEST_PAUSE = 100, /* Microseconds for pause */ + MAX_REPETITION = 100, /* Maximal number of transaction repet. */ + ST_IN_TRANSACTION = 7, /* Status: cycle in transaction completed OK */ + ST_TIMEOUT = 6, /* Status: transaction timeout */ + ST_IDLE = 0 /* Status: IDLE */ +}; + +/* SPM registers offset */ +#define SPM_OFFS ((mf->itype == MST_TAVOR) ? 0xf0180 : 0x3180) + +#define CLEAR(st) memset(&(st), 0, sizeof(st)) + +/* Like mread4, but only for PCI/PCICONF interfaces and doesn't check is_i2cm */ +static int mread4_(mfile *mf, unsigned int offset, unsigned int *value) +{ + int rc = 4; + + +#ifdef SUPPORT_PCICONF + struct mst_read4_st r4; +#endif + + +#ifdef LOWR_LEVEL_DEBUG + printf("mread4_: tp=%d,itype=%d - try read offs=0x%x\n", + mf->tp, mf->itype, offset); +#endif + + switch(mf->tp) + { + case MST_PCI: + if (!mf->ptr) + { + errno = EFAULT; + return -1; + } + + if (mf->itype == MST_TAVOR) + *value = __be32_to_cpu(*((unsigned int *)((char *)mf->ptr + offset))); + else + *value = *((unsigned int *)((char *)mf->ptr + offset)); + break; +#ifdef SUPPORT_PCICONF + case MST_PCICONF: + CLEAR(r4); + r4.offset = offset; + if ((rc = ioctl(mf->fd, PCICONF_READ4, &r4)) < 0) + return rc; + *value = r4.data; + rc = 4; + break; +#endif + default: + errno = EINVAL; + rc = -1; + } + +#ifdef LOWR_LEVEL_DEBUG + printf(" mread4_: rc=%d,offs=0x%x val=0x%08x\n", rc, offset, *value); +#endif + + return rc; +} + +/* Like mwrite4, but only for PCI/PCICONF interfaces and doesn't check is_i2cm */ +static int mwrite4_(mfile *mf, unsigned int offset, unsigned int value) +{ + int rc = 4; + + +#ifdef SUPPORT_PCICONF + struct mst_write4_st r4; +#endif + +#ifdef LOWW_LEVEL_DEBUG + printf("mwrite4_: tp=%d,itype=%d - try write offs=0x%x val=0x%08x\n", + mf->tp, mf->itype, offset, value); +#endif + + switch(mf->tp) + { + case MST_PCI: + if (!mf->ptr) + { + errno = EFAULT; + return -1; + } + + if (mf->itype == MST_TAVOR) + *((unsigned int *)((char *)mf->ptr + offset)) = __cpu_to_be32(value); + else + *((unsigned int *)((char *)mf->ptr + offset)) = value; + break; +#ifdef SUPPORT_PCICONF + case MST_PCICONF: + CLEAR(r4); + r4.offset = offset; + r4.data = value; + if ((rc = ioctl(mf->fd, PCICONF_WRITE4, &r4)) < 0) + return rc; + rc = 4; + break; +#endif + default: + errno = EINVAL; + rc = -1; + } + +#ifdef LOWW_LEVEL_DEBUG + printf(" mwrite4_: rc=%d\n", rc); +#endif + + return rc; +} + + +/* Return -1 when I/O error or cmd doesn't become zero */ +/* Return status field otherwise */ +static int wait_trans(mfile *mf) +{ + u_int32_t val; + u_int32_t cnt = 0; + + while(1) + { + if (mread4_(mf, SPM_OFFS, &val) != 4) + return -1; + if (!(val >> 29)) + break; + if (cnt > EPROM_PRIMARY_COUNT) + usleep(EPROM_REQUEST_PAUSE); + if (cnt > EPROM_PRIMARY_COUNT+EPROM_SECONDARY_COUNT) + { + errno = EBUSY; + return -1; + } + cnt++; + } + return (val>>16) & 0x07; +} + +/* Return status field or -1 on fatal failure */ +static int w_trans(mfile *mf, void *data_, int len) +{ + char *data = (char *)data_; + int vbt = -1; + u_int32_t val = 0; + +#ifdef MID_LEVEL_DEBUG + printf("w_trans: data=0x%08x, len:%d\n", *(unsigned int *)data, len); +#endif + + switch (len) + { + case 1: + vbt = 0; + val = (*data << 24) & 0xff000000; + break; + case 2: + vbt = 1; + val = ((*data << 24) & 0xff000000) | ((data[1] << 16) & 0xff0000); + break; + case 3: + vbt = 2; + val = ((*data << 24) & 0xff000000) | ((data[1] << 16) & 0xff0000) | + ((data[2] << 8) & 0xff00); + break; + case 4: + vbt = 3; + val = ((*data << 24) & 0xff000000) | ((data[1] << 16) & 0xff0000) | + ((data[2] << 8) & 0xff00) | (data[3] & 0xff); + break; + default: + return -1; + } + if (mwrite4_(mf, SPM_OFFS + 4, val) != 4) + return -1; + if (mwrite4_(mf, SPM_OFFS, (2<<29) | (vbt << 22) | (mf->i2c_slave & 0x7f)) != 4) + return -1; + + return wait_trans(mf); +} + + +/* Return status field or -1 on fatal failure */ +static int r_trans(mfile *mf, void *data_, int len) +{ + char *data = (char *)data_; + int vbt = -1, rc; + u_int32_t val = 0; + +#ifdef MID_LEVEL_DEBUG + printf("r_trans: len:%d\n", len); +#endif + + switch (len) + { + case 1: + vbt = 0; + break; + case 2: + vbt = 1; + break; + case 3: + vbt = 2; + break; + case 4: + vbt = 3; + break; + default: + return -1; + } + + if (mwrite4_(mf, SPM_OFFS, (1<<29) | (vbt << 22) | (mf->i2c_slave & 0x7f)) != 4) + return -1; + rc = wait_trans(mf); + if (rc != 7) + return rc; + + if (mread4_(mf, SPM_OFFS + 4, &val) != 4) + return -1; + + switch (len) + { + case 1: + *data = (val >> 24) & 0xff; + break; + case 2: + *data++ = (val >> 24) & 0xff; + *data = (val >> 16) & 0xff; + break; + case 3: + *data++ = (val >> 24) & 0xff; + *data++ = (val >> 16) & 0xff; + *data = (val >> 8) & 0xff; + break; + case 4: + *data++ = (val >> 24) & 0xff; + *data++ = (val >> 16) & 0xff; + *data++ = (val >> 8) & 0xff; + *data = val & 0xff; + break; + default: + return -1; + } + return 7; +} + +/* Return status field or -1 on fatal failure */ +static int end_trans(mfile *mf) +{ + if (mwrite4_(mf, SPM_OFFS, (3<<29) | (mf->i2c_slave & 0x7f)) != 4) + return -1; + return wait_trans(mf); +} + +int i2c_master_write_cr(mfile *mf, unsigned int value, unsigned int offset, + int len) +{ + unsigned int ivalue = __cpu_to_be32(value); + unsigned short svalue = __cpu_to_be16((unsigned short)(value & 0xffff)); + unsigned char cvalue = value & 0xff; + int rc=0, repeat=1; + unsigned short offs = __cpu_to_be16(offset & 0xffff); + unsigned char off1 = offset & 0xff; + +#ifdef HIGH_LEVEL_DEBUG + printf("i2c_master_write_cr i2c:0x%x, offs:0x%x, val:0x%x, len:%d\n", + mf->i2c_slave, offset, value, len); +#endif + + offset = __cpu_to_be32(offset); + + while (repeat) + { + /* Write address (1byte for DIMM, 2bytes for Gamla and 4bytes for Tavor) */ + switch(mf->dtype) + { + case MST_TAVOR: + rc = w_trans(mf, &offset, sizeof(offset)); + break; + case MST_GAMLA: + rc = w_trans(mf, &offs, sizeof(offs)); + break; + case MST_DIMM: + rc = w_trans(mf, &off1, sizeof(off1)); + break; + case MST_NOADDR: + rc = ST_IN_TRANSACTION; /* FAKE! No address transaction */ + break; + } + + /* Write data */ + if (rc == ST_IN_TRANSACTION) + switch(len) + { + case 1: + rc = w_trans(mf, &cvalue, sizeof(cvalue)); + break; + case 2: + rc = w_trans(mf, &svalue, sizeof(svalue)); + break; + case 4: + rc = w_trans(mf, &ivalue, sizeof(ivalue)); + break; + default: + errno = EINVAL; + return -1; + } + + /* End transaction (anyway, even it was an error)s */ + if (end_trans(mf) < 0) + return -1; + + /* Now check Gw status and repeat transaction if timeout */ + switch (rc) + { + case ST_IN_TRANSACTION: + repeat = 0; + break; +#ifdef REPEAT_WRITE_IF_NACK + default: + if (++repeat > MAX_REPETITION) + { + errno = EIO; + return -1; + } +#else + case ST_TIMEOUT: + if (++repeat > MAX_REPETITION) + return -1; + /* printf("i2c_master_write_cr: repeat:%d\n", repeat); */ + break; + default: + errno = EIO; + return -1; +#endif + } + } + +#ifdef HIGH_LEVEL_DEBUG + printf(" i2c_master_write_cr: rc=%d\n", len); +#endif + + return len; +} + +int i2c_master_read_cr(mfile *mf, void *value, unsigned int offset, int len) +{ + unsigned int *ivalue = (unsigned int *)value; + unsigned short *svalue = (unsigned short *)value; + unsigned char *cvalue = (unsigned char *)value; + int rc=0, repeat=1; + unsigned short offs = __cpu_to_be16(offset & 0xffff); + unsigned char off1 = offset & 0xff; + +#ifdef HIGH_LEVEL_DEBUG + printf("i2c_master_read_cr i2c:0x%x, offs:0x%x, len:%d\n", + mf->i2c_slave, offset, len); +#endif + + offset = __cpu_to_be32(offset); + + while (repeat) + { + /* Write address (1byte for DIMM, 2bytes for Gamla and 4bytes for Tavor) */ + switch(mf->dtype) + { + case MST_TAVOR: + rc = w_trans(mf, &offset, sizeof(offset)); + break; + case MST_GAMLA: + rc = w_trans(mf, &offs, sizeof(offs)); + break; + case MST_DIMM: + rc = w_trans(mf, &off1, sizeof(off1)); + break; + case MST_NOADDR: + rc = ST_IN_TRANSACTION; /* FAKE! No address transaction */ + break; + } + + /* Read data */ + if (rc == ST_IN_TRANSACTION) + switch(len) + { + case 1: + rc = r_trans(mf, cvalue, 1); + break; + case 2: + rc = r_trans(mf, svalue, 2); + break; + case 4: + rc = r_trans(mf, ivalue, 4); + break; + default: + errno = EINVAL; + return -1; + } + + /* End transaction (anyway, even it was an error)s */ + if (end_trans(mf) < 0) + return -1; + + /* Now check Gw status and repeat transaction if timeout */ + switch (rc) + { + case ST_IN_TRANSACTION: + repeat = 0; + break; + case ST_TIMEOUT: + if (++repeat > MAX_REPETITION) + return -1; + /* printf("i2c_master_read_cr: repeat:%d\n", repeat); */ + break; + default: + errno = EIO; + return -1; + } + } + + /* Convert output value */ + switch(len) + { + case 2: + *svalue = __be16_to_cpu(*svalue); + break; + case 4: + *ivalue = __be32_to_cpu(*ivalue); + break; + } + +#ifdef HIGH_LEVEL_DEBUG + printf(" i2c_master_read_cr: val:0x%x rc=%d\n", *ivalue, len); +#endif + + return len; +} +int i2c_master_read(mfile *mf, void *value, unsigned int offset, int len) +{ + unsigned int *ivalue = (unsigned int *)value; + unsigned short *svalue = (unsigned short *)value; + unsigned char *cvalue = (unsigned char *)value; + int rc=0, repeat=1; + unsigned short offs = __cpu_to_be16(offset & 0xffff); + unsigned char off1 = offset & 0xff; + +#ifdef HIGH_LEVEL_DEBUG + printf("i2c_master_read_cr i2c:0x%x, offs:0x%x, len:%d\n", + mf->i2c_slave, offset, len); +#endif + + offset = __cpu_to_be32(offset); + + while (repeat) + { + /* Write address (1byte for DIMM, 2bytes for Gamla and 4bytes for Tavor) */ + switch(mf->dtype) + { + case MST_TAVOR: + rc = w_trans(mf, &offset, sizeof(offset)); + break; + case MST_GAMLA: + rc = w_trans(mf, &offs, sizeof(offs)); + break; + case MST_DIMM: + rc = w_trans(mf, &off1, sizeof(off1)); + break; + case MST_NOADDR: + rc = ST_IN_TRANSACTION; /* FAKE! No address transaction */ + break; + } + + /* Read data */ + if (rc == ST_IN_TRANSACTION) + switch(len) + { + case 1: + rc = r_trans(mf, cvalue, 1); + break; + case 2: + rc = r_trans(mf, svalue, 2); + break; + case 3: + rc = r_trans(mf, svalue, 3); + break; + case 4: + rc = r_trans(mf, ivalue, 4); + break; + default: + errno = EINVAL; + return -1; + } + + /* End transaction (anyway, even it was an error)s */ + if (end_trans(mf) < 0) + return -1; + + /* Now check Gw status and repeat transaction if timeout */ + switch (rc) + { + case ST_IN_TRANSACTION: + repeat = 0; + break; + case ST_TIMEOUT: + if (++repeat > MAX_REPETITION) + return -1; + /* printf("i2c_master_read_cr: repeat:%d\n", repeat); */ + break; + default: + errno = EIO; + return -1; + } + } + +#ifdef HIGH_LEVEL_DEBUG + printf(" i2c_master_read_cr: val:0x%x rc=%d\n", *ivalue, len); +#endif + + return len; +} + +void i2c_master_set(mfile *mf) +{ + unsigned int val; + int i; + + if (mf->itype == MST_GAMLA) + { + /* Screw up SPM clock. Works w/ Gamla only */ + mread4_(mf, SPM_OFFS + 12, &val); + mf->i2c_RESERVED = val; + val &= 0xffff0000; + val |= 0x100; + mwrite4_(mf, SPM_OFFS + 12, val); + } + + /* Tavor I2C bug workaround */ + if (mf->dtype == MST_TAVOR) + { + for (i=0; i<9; i++) + end_trans(mf); + + i2c_master_read_cr(mf, &val, 0xf0014, 4); + + for (i=0; i<9; i++) + end_trans(mf); + } + else + i2c_master_read_cr(mf, &val, 0x2800, 4); +} + +void i2c_master_restore(mfile *mf) +{ + if (mf->itype == MST_GAMLA) + /* Restore SPM clock. */ + mwrite4_(mf, SPM_OFFS + 12, mf->i2c_RESERVED); +} diff --git a/branches/Ndi/tools/mtcr/user/mtcr_i2c.h b/branches/Ndi/tools/mtcr/user/mtcr_i2c.h new file mode 100644 index 00000000..8142b54a --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/mtcr_i2c.h @@ -0,0 +1,38 @@ +/* - Mellanox Confidential and Proprietary - + * + * Copyright (C) March 2002, Mellanox Technologies Ltd. ALL RIGHTS RESERVED. + * + * Except as specifically permitted herein, no portion of the information, + * including but not limited to object code and source code, may be reproduced, + * modified, distributed, republished or otherwise exploited in any form or by + * any means for any purpose without the prior written permission of Mellanox + * Technologies Ltd. Use of software subject to the terms and conditions + * detailed in the file "LICENSE.txt". + * + * End of legal section ...................................................... + * + * mtcr_i2c.h - Mellanox Software tools (mst) driver: I2C master routines + * + * Version: $Id$ + * + */ + +#ifndef _MTCR_I2C_H +#define _MTCR_I2C_H + +#ifdef __cplusplus +extern "C" { +#endif + +#define MAX_TRANS_SIZE 64 + +int i2c_master_write_cr(mfile *mf, unsigned int value, unsigned int offset, int len); +int i2c_master_read_cr(mfile *mf, void *value, unsigned int offset, int len); +void i2c_master_restore(mfile *mf); +void i2c_master_set(mfile *mf); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/branches/Ndi/tools/mtcr/user/usb.cpp b/branches/Ndi/tools/mtcr/user/usb.cpp new file mode 100644 index 00000000..9186dfc5 --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/usb.cpp @@ -0,0 +1,510 @@ +/* - Mellanox Confidential and Proprietary - + * + * Copyright (C) December 2001, Mellanox Technologies Ltd. ALL RIGHTS RESERVED. + * + * Except as specifically permitted herein, no portion of the information, + * including but not limited to object code and source code, may be reproduced, + * modified, distributed, republished or otherwise exploited in any form or by + * any means for any purpose without the prior written permission of Mellanox + * Technologies Ltd. Use of software subject to the terms and conditions + * detailed in the file "LICENSE.txt". + * + * End of legal section ...................................................... + * + * eeprom_usb.h - EEPROM access via usb implementation + * + * Version: $Id$ + * + * Author: Alex Rezinsky (alexr@mellanox.co.il) + */ + +#include "com_def.h" +#include "usb.h" + +#ifdef MTCR_USB_SUPPORT +// ------------------------------------------------------------------------------------- +// PREPROCESSOR FLAGS +// ------------------------------------------------------------------------------------- + +// print debug information +//#define DEBUG_PRINT + +// verify read operations +//#define VERIFY_EACH_READ + +// delay between I2C operation */ +//#define SLEEP_AFTER_EACH_USB_IO + +// verify write operations +//#define COMPARE_AFTER_EACH_WRITE + +// ------------------------------------------------------------------------------------- +// LITERALS +// ------------------------------------------------------------------------------------- + #define USB_DEVICE_NAME "UsbI2cIo" + #define USB_WRITE_DELAY_MSEC 16 + #define USB_READ_DELAY_MSEC 16 + +// ------------------------------------------------------------------------------------- +// GLOBALS +// ------------------------------------------------------------------------------------- +I2C_TRANS TransI2C; // parameter structure + + +// ------------------------------------------------------------------------------------- +// DIMAX +// ------------------------------------------------------------------------------------- +static BOOL is_dimax = FALSE; +BOOL usb_is_dimax() { return is_dimax;} + #include "i2cbridge.h" + +// ------------------------------------------------------------------------------------- +// DEBUG PRINT FACILITY +// ------------------------------------------------------------------------------------- + + #ifdef DEBUG_PRINT + +char g_DebugInfo[2048]; +char *g_DebugInfo_p = g_DebugInfo; +int g_DebugInfoCnt = 0; +FILE *fd; +char g_buf[300]; + +void OpenDebugInfo(char * str) +{ + fd = fopen( str, "a" ); +} + +void CloseDebugInfo() +{ + fclose(fd); +} + +void WriteDebugInfo(char * str) +{ + int cnt = strlen(str); + g_DebugInfoCnt++; + if (g_DebugInfo_p + cnt > g_DebugInfo + sizeof(g_DebugInfo) ) + g_DebugInfo_p = g_DebugInfo; + memcpy( g_DebugInfo_p, str, cnt ); + g_DebugInfo_p += cnt; + fputs( str, fd ); +} + +void StoreDebugInfo(HANDLE h) +{ + int cnt; + char lpsDebugInfo[300]; + + cnt = DAPI_ReadDebugBuffer( lpsDebugInfo, h, sizeof(lpsDebugInfo)); + lpsDebugInfo[cnt] = 0; + WriteDebugInfo( lpsDebugInfo ); + cnt = DAPI_ReadDebugBuffer( lpsDebugInfo, h, sizeof(lpsDebugInfo)); + lpsDebugInfo[cnt] = 0; + WriteDebugInfo( lpsDebugInfo ); +} + #endif + + +// ------------------------------------------------------------------------------------- +// USB: LOW-LEVEL ROUTINES +// ------------------------------------------------------------------------------------- + +static call_result_t usb_read_trans( + HANDLE h, + const u_int32_t trans_type, + const u_int8_t slv_addr, + const u_int32_t mem_base, + u_int8_t *data_arr_r, + const u_int32_t data_size) +{ + call_result_t rc; + LONG lReadCnt; + + // debug print +#ifdef DEBUG_PRINT + WriteDebugInfo("\n------ usb_read_trans ------\n"); +#endif + + if (trans_type == I2C_TRANS_NOADR || trans_type == I2C_TRANS_32ADR) { /* work in I2C_TRANS_NOADR mode */ + + // + // write offset + // + + // calculate address size + static int sizes[] = {4,1,2,4,4}; + int mem_base_sz = sizes[trans_type]; + + // prepare parameters + TransI2C.byTransType = I2C_TRANS_NOADR; + TransI2C.wMemoryAddr = 0; + TransI2C.bySlvDevAddr = slv_addr<<1; + TransI2C.wCount = (WORD)mem_base_sz; + memcpy(TransI2C.Data, (char*)&mem_base, mem_base_sz); + + // write the offset + LONG lWriteCnt = DAPI_WriteI2c( h, &TransI2C); + rc = (lWriteCnt == TransI2C.wCount) ? MT_OK : MT_ERROR; + + // debug print +#ifdef DEBUG_PRINT + sprintf( g_buf, "write: type %d, addr 0x%02x, offset 0x%02x, req'd: %d, got: %d)\n", + TransI2C.byTransType, TransI2C.bySlvDevAddr, mem_base, TransI2C.wCount, lWriteCnt); + WriteDebugInfo(g_buf); + StoreDebugInfo( h ); +#endif + + // check the results + if (rc != MT_OK) + return rc; + + // + // read data + // + + // prepare parameters + TransI2C.byTransType = I2C_TRANS_NOADR; + TransI2C.wMemoryAddr = 0; + TransI2C.bySlvDevAddr = slv_addr<<1; + TransI2C.wCount = (WORD)data_size; + + } /* work in I2C_TRANS_NOADR mode */ + else { /* work in I2C_TRANS_xxxADR mode */ + + // prepare parameters + TransI2C.byTransType = (BYTE)trans_type; + TransI2C.wMemoryAddr = (WORD)mem_base; + TransI2C.bySlvDevAddr = slv_addr<<1; + TransI2C.wCount = (WORD)data_size; + + } /* work in I2C_TRANS_xxxADR mode */ + + // read the data + lReadCnt = DAPI_ReadI2c( h, &TransI2C); + rc = (lReadCnt == TransI2C.wCount) ? MT_OK : MT_ERROR; + + // debug print +#ifdef DEBUG_PRINT + sprintf( g_buf, "read: type %d, addr 0x%02x, offset 0x%02x, req'd: %d, got: %d)\n", + TransI2C.byTransType, TransI2C.bySlvDevAddr, TransI2C.wMemoryAddr, TransI2C.wCount, lReadCnt); + WriteDebugInfo(g_buf); + StoreDebugInfo( h ); +#endif + + // check the results + if (rc == MT_OK) + memcpy(data_arr_r, TransI2C.Data, lReadCnt); + + return rc; +} + +// ------------------------------------------------------------------------------------- +// USB: EXPORTED ROUTINES +// ------------------------------------------------------------------------------------- + + +BOOL usb_is_connected() { + // DIMAX + BYTE dev_num; + + dev_num = U2C_GetDeviceCount(); + DPRINT3(("Found %d DIMAX usb devices\n", dev_num)); + if (dev_num) { + return TRUE; + } + + // DEVASYS + dev_num = DAPI_GetDeviceCount(USB_DEVICE_NAME); + DPRINT3(("Found %d DEVASYS (%s) usb devices\n", dev_num, USB_DEVICE_NAME)); + if (dev_num) { + return TRUE; + } + + return FALSE; +} + + +//=============== +//* usb_open +//=============== +HANDLE usb_open() +{ + HANDLE usb; + + // DIMAX + BYTE dev_num = U2C_GetDeviceCount(); + if (!dev_num) { + DPRINT5(("usb_open: no DiMax devices\n" )); + } else { + DPRINT5(("usb_open: found %d DiMax devices\n", dev_num )); + usb = U2C_OpenDevice( 0 ); + if (usb == INVALID_HANDLE_VALUE) { + DPRINT1(("usb_open: U2C_OpenDevice failed\n" )); + } else { + U2C_VERSION_INFO Fw = {0,0}, Dll = {0,0}, Drv = {0,0}; + U2C_GetFirmwareVersion( usb, &Fw ); + Dll = U2C_GetDllVersion(); + U2C_GetDriverVersion( usb, &Drv ); + DPRINT3(("usb_open: DiMax: Dll %d.%d, Drv %d.%d, Fw %d.%d, \n", + Dll.MajorVersion, Dll.MinorVersion, + Drv.MajorVersion, Drv.MinorVersion, + Fw.MajorVersion, Fw.MinorVersion )); + is_dimax = TRUE; + return usb; + } + } + + // debug print +#ifdef DEBUG_PRINT + OpenDebugInfo( "UsbI2cIo.log" ); + WriteDebugInfo("\n------ usb_open ------\n"); +#endif + + // open device + usb = DAPI_OpenDeviceInstance(USB_DEVICE_NAME, 0); + + // debug print +#ifdef DEBUG_PRINT + StoreDebugInfo( usb ); +#endif + + return usb; +} + + +//=============== +//* usb_close +//=============== +call_result_t usb_close(HANDLE h) +{ + call_result_t rc; + + // DIMAX + if (is_dimax) { + rc = ( U2C_CloseDevice(h) == U2C_SUCCESS) ? MT_OK : MT_ERROR; + return rc; + } + + rc = ( DAPI_CloseDeviceInstance(h) ) ? MT_OK : MT_ERROR; + +#ifdef DEBUG_PRINT + WriteDebugInfo("\n------ usb_close ------\n"); + CloseDebugInfo(); +#endif + + return rc; +} + + +//=============== +//* usb_read +//=============== +call_result_t usb_read( + HANDLE h, + const u_int32_t trans_type, + const u_int8_t slv_addr, + const u_int32_t mem_base, + const u_int32_t mem_size, + u_int8_t *data_arr_r, + const u_int32_t data_size) +{ + + // DIMAX + if (is_dimax) { + U2C_TRANSACTION t; + U2C_RESULT u2c_rc; + int i; +#define DIMAX_READ_RETRIES 3 + + // validate parameters + if (data_size > sizeof(t.Buffer)) + return MT_ENORSC; + + // prepare the transaction + t.nSlaveDeviceAddress = (BYTE)slv_addr; + t.nMemoryAddressLength = (BYTE)mem_size; + t.nMemoryAddress = (DWORD)mem_base; + t.nBufferLength = (USHORT)data_size; + DPRINT5(("usb_read: going to read: slv_addr %#x, addr_len %d, mem_base %#x, data_sz %d\n", + (ULONG)t.nSlaveDeviceAddress, + t.nMemoryAddressLength, t.nMemoryAddress, t.nBufferLength )); + + // read + for (i=0; i MAX_I2C_TRANSACTION) + return MT_ENORSC; + +#ifdef VERIFY_EACH_READ + + call_result_t rc; + u_int8_t data1[MAX_I2C_TRANSACTION], data2[MAX_I2C_TRANSACTION]; + if ((rc = usb_read_trans(h, trans_type, slv_addr, mem_base, data1, data_size)) != MT_OK) + return rc; + if ((rc = usb_read_trans(h, trans_type, slv_addr, mem_base, data2, data_size)) != MT_OK) + return rc; + if (memcmp(data1, data2, data_size)) + return MT_ERROR; + memcpy(data_arr_r, data1, data_size); + return MT_OK; + +#else /* VERIFY_EACH_READ */ + + return usb_read_trans(h, trans_type, slv_addr, mem_base, data_arr_r, data_size); + +#endif /* VERIFY_EACH_READ */ + +} + + +//=============== +//* usb_write +//=============== +call_result_t usb_write( + HANDLE h, + const u_int32_t trans_type, + const u_int8_t slv_addr, + const u_int32_t mem_base, + const u_int32_t mem_size, + u_int8_t *data_arr_w, + const u_int32_t data_size) +{ + u_int8_t * data_p = (u_int8_t *)&TransI2C.Data[0]; + + // DIMAX + if (is_dimax) { + U2C_TRANSACTION t; + U2C_RESULT u2c_rc; + + // validate parameters + if (data_size > sizeof(t.Buffer)) + return MT_ENORSC; + + // prepare the transaction + t.nSlaveDeviceAddress = (BYTE)slv_addr; + t.nMemoryAddressLength = (BYTE)mem_size; + t.nMemoryAddress = (DWORD)mem_base; + t.nBufferLength = (USHORT)data_size; + memcpy(t.Buffer, data_arr_w, data_size); + + // read + u2c_rc = U2C_Write( h, &t ); + if (u2c_rc != U2C_SUCCESS) { + DPRINT1(("usb_write: U2C_Write failed (%d), slv_addr %#x, mem_base %#x, data_sz %d\n", + u2c_rc, (ULONG)slv_addr, mem_base, data_size )); + return MT_ERROR; + } + + return MT_OK; + } + + + // validate parameters + if (data_size > MAX_I2C_TRANSACTION) + return MT_ENORSC; + + // debug print +#ifdef DEBUG_PRINT + WriteDebugInfo("\n------ usb_write ------\n"); +#endif + + if (trans_type == I2C_TRANS_NOADR || trans_type == I2C_TRANS_32ADR) { /* work in I2C_TRANS_NOADR mode */ + + // calculate address size + static int sizes[] = {4,1,2,4,4}; + int mem_base_sz = sizes[trans_type]; + + // validate parameters + if (mem_base_sz + data_size > MAX_I2C_TRANSACTION) + return MT_ENORSC; + + // prepare parameters + TransI2C.byTransType = I2C_TRANS_NOADR; + TransI2C.wMemoryAddr = 0; + TransI2C.bySlvDevAddr = slv_addr<<1; + TransI2C.wCount = mem_base_sz + data_size; + memcpy(TransI2C.Data, (char*)&mem_base, mem_base_sz); + data_p += mem_base_sz; + + } /* work in I2C_TRANS_NOADR mode */ + else { /* work in I2C_TRANS_xxxADR mode */ + + // prepare parameters + TransI2C.byTransType = (BYTE)trans_type; + TransI2C.wMemoryAddr = (WORD)mem_base; + TransI2C.bySlvDevAddr = slv_addr<<1; + TransI2C.wCount = (WORD)data_size; + + } /* work in I2C_TRANS_xxxADR mode */ + + // fill data + memcpy(data_p, data_arr_w, data_size); + + // write data + LONG lWriteCnt = DAPI_WriteI2c(h, &TransI2C); + + // debug print +#ifdef DEBUG_PRINT + sprintf( g_buf, "write: type %d, addr 0x%02x, offset 0x%02x, req'd: %d, got: %d)\n", + TransI2C.byTransType, TransI2C.bySlvDevAddr, TransI2C.wMemoryAddr, TransI2C.wCount, lWriteCnt); + WriteDebugInfo(g_buf); + StoreDebugInfo( h ); +#endif + + // check the results + if (lWriteCnt != TransI2C.wCount) + return MT_ERROR; + + // delay on need +#ifdef SLEEP_AFTER_EACH_USB_IO + Sleep(USB_WRITE_DELAY_MSEC); +#endif + + // verify on need +#ifdef COMPARE_AFTER_EACH_WRITE + + u_int8_t data[MAX_I2C_TRANSACTION]; + call_result_t rc; + + // read the written data + if ((rc = usb_read_trans(h, trans_type, slv_addr, mem_base, data, lWriteCnt)) != MT_OK) + return rc; + + // delay on need +#ifdef SLEEP_AFTER_EACH_USB_IO + Sleep(USB_READ_DELAY_MSEC); +#endif + + // compare the read with the written data + if (memcmp(data, data_arr_w, lWriteCnt)) + return MT_ERROR; + return MT_OK; + +#else /* COMPARE_AFTER_EACH_WRITE */ + + return MT_OK; + +#endif /* COMPARE_AFTER_EACH_WRITE */ + +} +#endif diff --git a/branches/Ndi/tools/mtcr/user/usb.h b/branches/Ndi/tools/mtcr/user/usb.h new file mode 100644 index 00000000..347fa9c5 --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/usb.h @@ -0,0 +1,172 @@ +/* - Mellanox Confidential and Proprietary - + * + * Copyright (C) December 2001, Mellanox Technologies Ltd. ALL RIGHTS RESERVED. + * + * Except as specifically permitted herein, no portion of the information, + * including but not limited to object code and source code, may be reproduced, + * modified, distributed, republished or otherwise exploited in any form or by + * any means for any purpose without the prior written permission of Mellanox + * Technologies Ltd. Use of software subject to the terms and conditions + * detailed in the file "LICENSE.txt". + * + * End of legal section ...................................................... + * + * eeprom_usb.h - EEPROM access via usb definitions + * + * Version: $Id$ + * + * Author: Alex Rezinsky (alexr@mellanox.co.il) + */ + +#ifndef __USB_H +#define __USB_H + +#include "com_def.h" +#include "mtcr.h" + +// I2C transaction type +#define I2C_TRANS_32ADR 4 + +// limitation constant +#define MAX_I2C_TRANSACTION 64 + +#ifdef __cplusplus +extern "C" +{ +#endif + +#ifdef MTCR_USB_SUPPORT + +#define USB2I2C + +#include + +/****************************************************************************** + * Function: usb_open + * + * Description: + * Open usb device + * + * Parameters: + * + * Returns: HANDLE - USB device handler or + * INVALID_HANDLE_VALUE - Fail to open + * + *******************************************************************************/ +HANDLE usb_open(); + +/******************************************************************************* + * Function: usb_close + * + * Description: + * Close usb device + * + * Parameters: + * h(IN): HANDLE + * Device handler to close + * + * Returns: MT_OK success + * MT_ERROR + * + ******************************************************************************/ +call_result_t usb_close(HANDLE h); + + +/****************************************************************************** + * Function: usb_read + * + * Description: + * reads the data from the given device on I2C bus + * starting from the given address in the device. + * + * Parameters: + * trans_type(IN): I2C transaction type + * data_arr_r(OUT): array of data + * data_size(IN): the size of the array. + * slv_addr(IN): I2C address of target device. + * mem_base(IN): the address in the memory space of the device + * from which we should start reading the data. + * Returns: MT_OK success + * MT_ERROR + * + * Notes: + * + ******************************************************************************/ +call_result_t usb_read( + HANDLE h, + const u_int32_t trans_type, + const u_int8_t slv_addr, + const u_int32_t mem_base, + const u_int32_t mem_size, + u_int8_t *data_arr_r, + const u_int32_t data_size); + + +/****************************************************************************** + * Function: usb_write + * + * Description: + * writes the data to the given device on I2C bus + * starting from the given address in the device. + * + * Parameters: + * trans_type(IN): I2C transaction type + * data_arr_w(IN ): array of data + * data_size(IN): the size of the array. + * slv_addr(IN): I2C address of target device. + * mem_base(IN): the address in the memory space of the device + * from which we should start writing the data. + * Returns: MT_OK success + * MT_ERROR + * + * Notes: + * + ******************************************************************************/ +call_result_t usb_write( + HANDLE h, + const u_int32_t trans_type, + const u_int8_t slv_addr, + const u_int32_t mem_base, + const u_int32_t mem_size, + u_int8_t *data_arr_r, + const u_int32_t data_size); + +BOOL usb_is_dimax(); + +BOOL usb_is_connected(); + +#else + +static inline BOOL usb_is_dimax() { return FALSE; } +static inline BOOL usb_is_connected() { return FALSE; } +static inline HANDLE usb_open() { return INVALID_HANDLE_VALUE; } +static inline call_result_t usb_close(HANDLE h) { return MT_ERROR; } + +static inline call_result_t usb_read( + HANDLE h, + const u_int32_t trans_type, + const u_int8_t slv_addr, + const u_int32_t mem_base, + const u_int32_t mem_size, + u_int8_t *data_arr_r, + const u_int32_t data_size) { return MT_ERROR; } + +static inline call_result_t usb_write( + HANDLE h, + const u_int32_t trans_type, + const u_int8_t slv_addr, + const u_int32_t mem_base, + const u_int32_t mem_size, + u_int8_t *data_arr_r, + const u_int32_t data_size) { return MT_ERROR; } + +#define I2C_TRANS_16ADR 0 /*dummy */ + +#endif + +#ifdef __cplusplus +} +#endif + +#endif + diff --git a/branches/Ndi/tools/mtcr/user/usb/I2cBrdg.lib b/branches/Ndi/tools/mtcr/user/usb/I2cBrdg.lib new file mode 100644 index 0000000000000000000000000000000000000000..b0974de74184195000d7220fa6f2b89c166b1e76 GIT binary patch literal 16838 zcmcIr&2t+?7Jqg&8!!e;62L$LPKXm@Cyu4qmK=;N*-{d%jjfgJB%5VRXr&=htzvm) zg)O-3CB40yuXc zVB%YV3p1Q%J_I02e#Pn1A^@W=@r*P%!fE;;0HawvBVBmFY3_RfqAcnoS{UV&!E2)Q zN1QTW;62jp*PPCu03e#X%<24Rc#kxTx{0Pfc9!|CGt07Ofla9V5w5G|kPwD>FD zBP|VcTE_k$dgBA4N~Q2-akI2l$>qzdr9yFQvs5W0Yb)*gZKzZ>w%6DD8Q`Bj2Yi`u2pyHP49ZO*{Eww1ob;gFzuVW-VSd( zsYbNT-nTIf@b3@6PR-C#=BnoKoG@J)y30n2Ec^%MgP2T z6Jj<>URCG7qUX0t*^P2`bqjl6?f892Gpje+jgDs-aD&w8TGgLfdlu{E_3ZV0 zC7a8YD(i(Z292P{#Z$zwbgFF@*sQavZ07XGao4MMyt;)~3ZNy=Ny5k4+SPQJ(k~qo zBhyv9*I9W_JNinfvokZD7yt}9i&;gh3%iW_WzU8J@-ShO#4)$Zov$F^EYC(nTHL)|jxpr&U6|9xFr_p3ridameiI-_Cd!0fn z*JyjSjy|hcTMPDpyAZR1)6Z_Bm{p8KkYZK}Ez^rmZGr>0uhsb0RxlS@R&LSj<;KqK zrf2V$H6`3&-$xP^Bj7Al%qkXfAR-F$Hu?=Tj*4PR*s;n4EExKHt*HBmRUEH+CD2 zLc#{NAwu7?23YuoW&~p3?Cbk7gKm`&+R3|2PE37swXbtrrGyoWg?|R*+7d$4U=A{|wJmQTlWW;O0EQFG%-O07J<8G3pz|-m%81o#P7m)eT}rt$coojV zI84HMxCj^EHJE~F7=fek6b!)NBh!6|*O%NY6$g~ZFfYU@YEOib$-0;k+KAD85rCRQ zifTevE<)LWG{q^E2%9l$Kthf4O9XM-6BM4f3EhD@TF6?RmlC_&u1OGojWaRV9LqVw zbE{4=+r2U%y;J>Kqw>U1Aaq+QIq{%T#bQEJVF{7GhT0Y6O+u$yuPwkYA;kyAPhDv{DC zP4?<1Ap!(7&V*PbT!P;zIhL3WkSUE8oE8X_HgQ&mB4ycL9W=F8|u#_T{+i>%fhp`YC;QwWNMReTh^cm#mOCls$ymkL1iv^gd$;$ zbzx;7x11tq$vI5+6$l*i3(1s2mLYSyF{l)`~?CL$Ry zyo?-o(jsRiiuJNK&S-LzfV@3J31Qb-3`RD`y(Y;(ybU&4kxThX;kV9dwf2hdUgznl zDG(amM_=+Yy)FC7tj>9j+#7C1qM(GKDcVAdsU;%Q30iT=cltyuSx=DP0!l3znKK|+ zwJ{Hp7|j5}vmxzwHTZI)FLI{q%_b{cS-+@SnDxJ{dV6Sn;1G=ee0*hi0I;INj)CdA z_t$&3&y@KszzSacOWgS942-NyH|o_+^$gaU_;DOh|3&)IzdwZM49tAz^I^S;ANZ*e zU@54}N4FUm`)&}=N3lLc#}iVcCg9k}5&Vtx(QyQ;QHO_*zS*d?TYIf{I%5;1*T!)F zaE(7!{*pD4Bn^PoEV>%OzlYhCWCk*|*4~9!bza=mtTwIRn7(-VwjB z){&h<;+I+C!5^T@GuQitejlSh{z2ODi!J|-PJ>Xwzxzso56n}Na|y{)2AQY)hhZax z5b7y)_Nezq@)WFm@`F6(-{QtUlcziy4{#aRlZZn5Mca_%B~CLenGzo}_GZ{aw7^F> zyx$Bn46DEVAYY^zCLhua!|E_Ui0^;m#y=ly|GM_6))gc7X-WjGG5QXa=I8nVMJ^sz zh9gverO^cG+G%7{;Al94-!mdEwkjAK*pD@a+;r;j}`Jg=S7OD!2`W`!zi@ zAq)RlcDtcY=>^BPrpqF6(%yMzSyCa>9~I zhxZ9?up*Kk7PkUW93<&fIFeOF@}!I;+J==3DSABicfZG!7(fZhQv#DiwMqsdUAv8t z0mIWh45pPATm4IRqkD;A=8SYPxK-BT;F!~R>scI+X9SMWdR-i=ro7;2zt!+6pbQ|o zA8H~7WoO3(0&8(mh9F(Ld7*d#DKmkVKF1{lmXf7p7NYlpX7njmJ*!|a=OwWtxCTiM zkvu1m7^_O*r4YG$q-&=UTG05&6~y$sj45)}Rzeava+pw!22g3UdQB}j4yu$$Rupfh zJo|+nDtm=l1`*w#4JckzQE0nINk*chFvRwkdMKQB@8#~0uHF1sljm>Zn4Aze;#Jut zNc)IBhIRVMEz zW~#cKk#HDa;_Mq-xjUq5r;#zO1jiK=CKnMKPLh4uW1O(0H0v*_-Jg}PIBjFeKqAK# z)7YE~A~E<#+#D~ZiEj95GJZ4cuZ;1Vbj!HVo?EQmnAgM+{2Us;K^8d7TX`}7>DtLJ l<~4pn--A!~?3@0LQZgAmVx%=OJ&*RMgnjcM*puy{{{vn$WAXq1 literal 0 HcmV?d00001 diff --git a/branches/Ndi/tools/mtcr/user/usb/UsbI2cIo.lib b/branches/Ndi/tools/mtcr/user/usb/UsbI2cIo.lib new file mode 100644 index 0000000000000000000000000000000000000000..b9bfb20716dee07151dba89889a16d02f13734d6 GIT binary patch literal 5962 zcmcgwPfr_16n};!6a&H7z^&D$MJ?%pf@8Y`Rh0vd+EoZ3FwM~zdjU%uFS1Rgo_dKQ zD)pR04!QQ0et=#g^^#M1sFYsQPtaSG8+~tPXZ>b&*R^Gum7do#@4flW%=_n~j6VjLd;&1_3LssUH2V~QXnH|X zDh)t1nY3?lm(fn&kbLZGbnm>`0dWYi?J(84J!*P)25Q8ZF zlBiO#@9%8ymMf*ZdmFpt3aFr+f$S zcgL=L<8(@{`<>HjH@s$X83$@SJgvm=gvR!%(=0jPH)@XEYG}a^&dEsZF`)E(BJTyM~!3K+woeRwt*Mo35{K+T91}DI70eCtI=_k_)1o@ zO^DYWKU*{ABF2`zY}}BT9Foc1ZB`FmXUB8hM)TP4#}H49TxzuARl-c6gO(%+r!FiR z@_L;Ytg_@Bo*l2B9UVEXqOmo!T$8PkY)=}-*dd$);|!j;3d=Hy9VF)f;PE)XlSzPA zQvhYm8Bb>c7N#+8qyXF*fHz3bGXNI$Kg0gFsPuXs;2hs4_j(%)&HGK?<%z0!I3h8})NeuNaT^;mGigv2C=M1hE_B z0%abRj%2t8y;@RKz06=iii~g;1mm$Sw0lN%m&tRg(2MmfGr~l>n-_#~D7-=;96mDo zI8^u{5o1i2&q5|;Jkzk8)hn(;MrMj(Q%5mX4mWt3*>xT_zRuBIK<9BsV4SYS`#1-< zQ?N0jT*F{vbzr0&!%dhxca~7M?^u}j;wuVOYAn@Vm-5+vRbO9bZ~1lDL2@%O3f52O zYyTtwGY#e|fW$=N8vg6?V7|iCgIOv585JxPBw6|q@8A`$_#QY*bvjTM_7v0x{&8{r z{3$|eeW_8ecB%_m)sHD0enq+8eOX##5td*0as?%K9sm5z*W?Ll2%lLX$?2Mi~>keHB|{z;SRI zHSggm&iBH)0v8twQ5ljZI09@1xYa+PLr zr9lpE%^vHdqd{VZiZ%_DH-=p&9cl)qUkh@S%k^41sJocF#vBq;6;MW`&~b+sxGpBE zU57;M;Xzbd1*_C*;vq46tdfe20F%|)!yy*bL2T^n=ZHqtsYHya;WcR9=*M%_t==+_ z7ZH`t6BC(T4LH=`nIZT@s%QI85feWh__z@_j-gO +typedef int BOOL; +typedef u_int8_t BYTE; +typedef u_int8_t UCHAR; +typedef u_int16_t WORD; +typedef u_int32_t DWORD; +typedef u_int32_t ULONG; +typedef void* HANDLE; +#define _stdcall /* nothing */ +#define INVALID_HANDLE_VALUE ((HANDLE)(-1)) +#endif + +#pragma pack(push, 1) + +typedef enum +{ + U2C_SUCCESS, + U2C_BAD_PARAMETER, + U2C_HARDWARE_NOT_FOUND, + U2C_SLAVE_DEVICE_NOT_FOUND, + U2C_TRANSACTION_FAILED, + U2C_SLAVE_OPENNING_FOR_WRITE_FAILED, + U2C_SLAVE_OPENNING_FOR_READ_FAILED, + U2C_SENDING_MEMORY_ADDRESS_FAILED, + U2C_SENDING_DATA_FAILED, + U2C_NOT_IMPLEMENTED, + U2C_NO_ACK, + U2C_DEVICE_BUSY, + U2C_MEMORY_ERROR, + U2C_UNKNOWN_ERROR, +} U2C_RESULT; + + + +typedef struct _U2C_TRANSACTION +{ + BYTE nSlaveDeviceAddress; + BYTE nMemoryAddressLength; // can be from 0 up to 4 bytes + DWORD nMemoryAddress; + unsigned short nBufferLength; // can be from 1 up to 256 + BYTE Buffer[256]; +} U2C_TRANSACTION, *PU2C_TRANSACTION; + + +// I2C bus frequency values: +const BYTE U2C_I2C_FREQ_FAST = 0; +const BYTE U2C_I2C_FREQ_STD = 1; +const BYTE U2C_I2C_FREQ_83KHZ = 2; +const BYTE U2C_I2C_FREQ_71KHZ = 3; +const BYTE U2C_I2C_FREQ_62KHZ = 4; +const BYTE U2C_I2C_FREQ_50KHZ = 6; +const BYTE U2C_I2C_FREQ_25KHZ = 16; +const BYTE U2C_I2C_FREQ_10KHZ = 46; +const BYTE U2C_I2C_FREQ_5KHZ = 96; +const BYTE U2C_I2C_FREQ_2KHZ = 242; + + +// SPI bus frequency values: +const BYTE U2C_SPI_FREQ_200KHZ = 0; +const BYTE U2C_SPI_FREQ_100KHZ = 1; +const BYTE U2C_SPI_FREQ_83KHZ = 2; +const BYTE U2C_SPI_FREQ_71KHZ = 3; +const BYTE U2C_SPI_FREQ_62KHZ = 4; +const BYTE U2C_SPI_FREQ_50KHZ = 6; +const BYTE U2C_SPI_FREQ_25KHZ = 16; +const BYTE U2C_SPI_FREQ_10KHZ = 46; +const BYTE U2C_SPI_FREQ_5KHZ = 96; +const BYTE U2C_SPI_FREQ_2KHZ = 242; + + +typedef struct _U2C_SLAVE_ADDR_LIST +{ + BYTE nDeviceNumber; + BYTE List[256]; +} U2C_SLAVE_ADDR_LIST, *PU2C_SLAVE_ADDR_LIST; + +typedef struct _U2C_VERSION_INFO +{ + WORD MajorVersion; + WORD MinorVersion; +} U2C_VERSION_INFO, *PU2C_VERSION_INFO; + +typedef enum +{ + LS_RELEASED, + LS_DROPPED_BY_I2C_BRIDGE, + LS_DROPPED_BY_SLAVE, + LS_RAISED_BY_I2C_BRIDGE, +} U2C_LINE_STATE; + + +#pragma pack(pop) +// I2CBridge Board Initialization Routines +BYTE _stdcall U2C_GetDeviceCount(); +U2C_RESULT _stdcall U2C_GetSerialNum(HANDLE hDevice, long* pSerialNum); +U2C_RESULT _stdcall U2C_IsHandleValid(HANDLE hDevice); +HANDLE _stdcall U2C_OpenDevice(BYTE nDevice); +HANDLE _stdcall U2C_OpenDeviceBySerialNum(long nSerialNum); +U2C_RESULT _stdcall U2C_CloseDevice(HANDLE hDevice); +U2C_RESULT _stdcall U2C_GetFirmwareVersion(HANDLE hDevice, PU2C_VERSION_INFO pVersion); +U2C_RESULT _stdcall U2C_GetDriverVersion(HANDLE hDevice, PU2C_VERSION_INFO pVersion); +U2C_VERSION_INFO _stdcall U2C_GetDllVersion(); + +// I2C high level and configuration routines +U2C_RESULT _stdcall U2C_SetI2cFreq(HANDLE hDevice, BYTE Frequency); +U2C_RESULT _stdcall U2C_GetI2cFreq(HANDLE hDevice, BYTE *pFrequency); +U2C_RESULT _stdcall U2C_Read(HANDLE hDevice, PU2C_TRANSACTION pTransaction); +U2C_RESULT _stdcall U2C_Write(HANDLE hDevice, PU2C_TRANSACTION pTransaction); +U2C_RESULT _stdcall U2C_ScanDevices(HANDLE hDevice, PU2C_SLAVE_ADDR_LIST pList); + +// I2C low level routines +U2C_RESULT _stdcall U2C_Start(HANDLE hDevice); +U2C_RESULT _stdcall U2C_RepeatedStart(HANDLE hDevice); +U2C_RESULT _stdcall U2C_Stop(HANDLE hDevice); +U2C_RESULT _stdcall U2C_PutByte(HANDLE hDevice, BYTE Data); +U2C_RESULT _stdcall U2C_GetByte(HANDLE hDevice, BYTE *pData); +U2C_RESULT _stdcall U2C_PutByteWithAck(HANDLE hDevice, BYTE Data); +U2C_RESULT _stdcall U2C_GetByteWithAck(HANDLE hDevice, BYTE *pData, BOOL bAck); +U2C_RESULT _stdcall U2C_PutAck(HANDLE hDevice, BOOL bAck); +U2C_RESULT _stdcall U2C_GetAck(HANDLE hDevice); + +// I2c wire level routines +U2C_RESULT _stdcall U2C_ReadScl(HANDLE hDevice, U2C_LINE_STATE *pState); +U2C_RESULT _stdcall U2C_ReadSda(HANDLE hDevice, U2C_LINE_STATE *pState); +U2C_RESULT _stdcall U2C_ReleaseScl(HANDLE hDevice); +U2C_RESULT _stdcall U2C_ReleaseSda(HANDLE hDevice); +U2C_RESULT _stdcall U2C_DropScl(HANDLE hDevice); +U2C_RESULT _stdcall U2C_DropSda(HANDLE hDevice); + +// GPIO routines +U2C_RESULT _stdcall U2C_SetIoDirection(HANDLE hDevice, ULONG Value, ULONG Mask); +U2C_RESULT _stdcall U2C_GetIoDirection(HANDLE hDevice, ULONG *pValue); +U2C_RESULT _stdcall U2C_IoWrite(HANDLE hDevice, ULONG Value, ULONG Mask); +U2C_RESULT _stdcall U2C_IoRead(HANDLE hDevice, ULONG *pValue); +U2C_RESULT _stdcall U2C_SetSingleIoDirection(HANDLE hDevice, ULONG IoNumber, BOOL bOutput); +U2C_RESULT _stdcall U2C_GetSingleIoDirection(HANDLE hDevice, ULONG IoNumber, BOOL *pbOutput); +U2C_RESULT _stdcall U2C_SingleIoWrite(HANDLE hDevice, ULONG IoNumber, BOOL Value); +U2C_RESULT _stdcall U2C_SingleIoRead(HANDLE hDevice, ULONG IoNumber, BOOL *pValue); + +// SPI configuration routines +U2C_RESULT _stdcall U2C_SpiSetConfig(HANDLE hDevice, BYTE CPOL, BYTE CPHA); +U2C_RESULT _stdcall U2C_SpiGetConfig(HANDLE hDevice, BYTE *pCPOL, BYTE *pCPHA); +U2C_RESULT _stdcall U2C_SpiSetFreq(HANDLE hDevice, BYTE Frequency); +U2C_RESULT _stdcall U2C_SpiGetFreq(HANDLE hDevice, BYTE *pFrequency); +U2C_RESULT _stdcall U2C_SpiReadWrite(HANDLE hDevice, BYTE *pOutBuffer, BYTE *pInBuffer, unsigned short Length); +U2C_RESULT _stdcall U2C_SpiWrite(HANDLE hDevice, BYTE *pOutBuffer, unsigned short Length); +U2C_RESULT _stdcall U2C_SpiRead(HANDLE hDevice, BYTE *pInBuffer, unsigned short Length); + +#endif //I2C_BRIDGE_H_10DF0DED_E85F_4f14_88D9_610BEA2211F7 diff --git a/branches/Ndi/tools/mtcr/user/usb/usbi2cio.h b/branches/Ndi/tools/mtcr/user/usb/usbi2cio.h new file mode 100644 index 00000000..04961c80 --- /dev/null +++ b/branches/Ndi/tools/mtcr/user/usb/usbi2cio.h @@ -0,0 +1,83 @@ +// the following ifndef is for preventing double includes of this header file +#if !defined(__USBI2CIO_H__) + #define __USBI2CIO_H__ + + +#define DAPI_MAX_DEVICES 127 + + +#ifdef _DEBUG + #define DbgWrStr(sDebug) OutputDebugString((sDebug)) +#else + #define DbgWrStr(sDebug) +#endif + + + +//----------------------------------------------------------------------------- +// Constants +//----------------------------------------------------------------------------- +typedef enum { + // supported transaction types + I2C_TRANS_NOADR, // read or write with no address cycle + I2C_TRANS_8ADR, // read or write with 8 bit address cycle + I2C_TRANS_16ADR // read or write with 16 bit address cycle +} I2C_TRANS_TYPE; + + +//----------------------------------------------------------------------------- +// Structure Definitions +//----------------------------------------------------------------------------- +typedef struct _DEVINFO { // structure for device information + BYTE byInstance; + BYTE SerialId[9]; +} DEVINFO, *LPDEVINFO; + + +#pragma pack(1) // force byte alignment + +typedef struct _I2C_TRANS { + BYTE byTransType; + BYTE bySlvDevAddr; + WORD wMemoryAddr; + WORD wCount; + BYTE Data[256]; +} I2C_TRANS, *PI2C_TRANS; + + +//----------------------------------------------------------------------------- +// Global Variables +//----------------------------------------------------------------------------- + + +//----------------------------------------------------------------------------- +// Macros +//----------------------------------------------------------------------------- + + +//----------------------------------------------------------------------------- +// API Function Prototypes (exported) +//----------------------------------------------------------------------------- + +WORD _stdcall DAPI_GetDllVersion(void); +HANDLE _stdcall DAPI_OpenDeviceInstance(LPSTR lpsDevName, BYTE byDevInstance); +BOOL _stdcall DAPI_CloseDeviceInstance(HANDLE hDevInstance); +BOOL _stdcall DAPI_DetectDevice(HANDLE hDevInstance); +BYTE _stdcall DAPI_GetDeviceCount( LPSTR lpsDevName ); +BYTE _stdcall DAPI_GetDeviceInfo( LPSTR lpsDevName, LPDEVINFO lpDevInfo); +HANDLE _stdcall DAPI_OpenDeviceBySerialId(LPSTR lpsDevName, LPSTR lpsDevSerialId); +BOOL _stdcall DAPI_GetSerialId(HANDLE hDevInstance, LPSTR lpsDevSerialId); +BOOL _stdcall DAPI_ConfigIoPorts(HANDLE hDevInstance, ULONG ulIoPortConfig); +BOOL _stdcall DAPI_GetIoConfig(HANDLE hDevInstance, LPLONG lpulIoPortData); +BOOL _stdcall DAPI_ReadIoPorts(HANDLE hDevInstance, LPLONG lpulIoPortData); +BOOL _stdcall DAPI_WriteIoPorts(HANDLE hDevInstance, ULONG ulIoPortData, ULONG ulIoPortMask); +LONG _stdcall DAPI_ReadI2c(HANDLE hDevInstance, PI2C_TRANS TransI2C); +LONG _stdcall DAPI_WriteI2c(HANDLE hDevInstance, PI2C_TRANS TransI2C); +void _stdcall DAPI_EnablePolling(void); +void _stdcall DAPI_DisablePolling(void); +void _stdcall DAPI_GetPolledInfo(void); +LONG _stdcall DAPI_ReadDebugBuffer(LPSTR lpsDebugString, HANDLE hDevInstance, LONG ulMaxBytes); + + +// the following #endif is for preventing double includes of this header file +#endif \ No newline at end of file diff --git a/branches/Ndi/tools/mwrite/user/SOURCES b/branches/Ndi/tools/mwrite/user/SOURCES new file mode 100644 index 00000000..d39cbc74 --- /dev/null +++ b/branches/Ndi/tools/mwrite/user/SOURCES @@ -0,0 +1,40 @@ +TARGETNAME=mwrite +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + + +!if !defined(WINIBHOME) +WINIBHOME=..\..\.. +!endif + +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + + + +SOURCES=mwrite.c \ + mwrite.rc + +INCLUDES= $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; \ + $(WINIBHOME)\inc\iba; \ + $(WINIBHOME)\tools\mtcr\user; + +TARGETLIBS= \ +!if $(FREEBUILD) + $(TARGETPATH)\*\mtcr.lib +!else + $(TARGETPATH)\*\mtcr.lib +!endif + +!if $(FREEBUILD) + +!else +C_DEFINES=$(C_DEFINES) -DDEBUG +!endif + +C_DEFINES=$(C_DEFINES) -D__WIN__ + +386_STDCALL=0 + + diff --git a/branches/Ndi/tools/mwrite/user/makefile b/branches/Ndi/tools/mwrite/user/makefile new file mode 100644 index 00000000..80d407b3 --- /dev/null +++ b/branches/Ndi/tools/mwrite/user/makefile @@ -0,0 +1,8 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE ..\..\..\inc\openib.def + diff --git a/branches/Ndi/tools/mwrite/user/mwrite.c b/branches/Ndi/tools/mwrite/user/mwrite.c new file mode 100644 index 00000000..4d982f6c --- /dev/null +++ b/branches/Ndi/tools/mwrite/user/mwrite.c @@ -0,0 +1,62 @@ +/* + * + * mwrite.c - CR Space write access + * + */ + +#include +#include +#include + +#include "mtcr.h" + +void usage(const char *n) +{ + printf("%s []\n", n); + exit(1); +} + +int main(int ac, char *av[]) +{ + char *endp; + int rc=0; + unsigned int addr, val; + mfile *mf; + DType dtype = MST_TAVOR; + + if (ac < 4) + usage(av[0]); + addr = strtoul(av[2], &endp, 0); + if (*endp) + usage(av[0]); + val = strtoul(av[3], &endp, 0); + if (*endp) + usage(av[0]); + + if (strstr(av[1], "mt21108_pci") && !strstr(av[1], "i2cm")) + dtype = MST_GAMLA; + mf = mopend(av[1], dtype); + if ( !mf ) + { + perror("mopen"); + return 1; + } + + if (ac >= 5) + mset_i2c_slave(mf, (unsigned char)strtoul(av[4],0,0)); + + if ((rc = mwrite4(mf, addr, val)) < 0) + { + mclose(mf); + perror("mwrite"); + return 1; + } + if (rc < 4) + { + mclose(mf); + printf("Write only %d bytes\n", rc); + return 1; + } + mclose(mf); + return 0; +} diff --git a/branches/Ndi/tools/perftests/dirs b/branches/Ndi/tools/perftests/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/tools/perftests/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tools/perftests/user/README b/branches/Ndi/tools/perftests/user/README new file mode 100644 index 00000000..213a6e17 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/README @@ -0,0 +1,101 @@ +This directory includes gen2 uverbs microbenchmarks. + +The tests are intended as: + 1) simple, efficient usage examples. + Please see the COPYING file if you intend to copy it literally. + + 2) a useful benchmark + e.g. for HW or SW tuning and/or functional testing. + Please post results/observations to the openib-general mailing + list. See http://openib.org/mailman/listinfo/openib-general + and http://www.openib.org "Contact Us" link for contact details. + + +Testing methodology +------------------- + +- uses CPU cycle counter to get time stamps without context switch. + Some CPU architectures do NOT have such capability. e.g. Intel 80486 + or older PPC. + +- measures round-trip time but reports half of that as one-way latency. + ie. May not be sufficiently accurate for asymetrical configurations. + +- Min/Median/Max result is reported. + The median (vs average) is less sensitive to extreme scores. + Typically the "Max" value is the first value measured. + +- larger samples only marginally help. The default (1000) is pretty good. + Note that an array of cycles_t (typically unsigned long) is allocated + once to collect samples and again to store the difference between them. + Really big sample sizes (e.g. 1 million) might expose other problems + with the program. + +- "-H" option will dump the histogram for additional statistical analysis. + See xgraph, ygraph, r-base (http://www.r-project.org/), pspp, or other + statistical math programs. + +Architectures tested: i686, x86_64, ia64 + + + +Test Descriptions +----------------- + +rdma_lat.c - latency test with RDMA write transactions +rdma_bw.c - streaming BW test with RDMA write transactions + + +The following tests are mainly useful for HW/SW benchmarking. +They are not intended as actual usage examples. +----------------- + +send_lat.c - latency test with send transactions +send_bw.c - BW test with send transactions +write_lat.c - latency test with RDMA write transactions +write_bw.c - BW test with RDMA write transactions +read_lat.c - latency test with RDMA read transactions +read_bw.c - BW test with RDMA read transactions + + +Build Tests +----------- + +"make" to build all tests + + Debian: build-dep on linux-kernel-headers (for asm/timex.h file) + build-dep on libibverbs-dev + depends on libibverbs1 + + +Run Tests +--------- + +Prerequisites: + kernel 2.6 + ib_uverbs (kernel module) matches libibverbs + ("match" means binary compatible, but ideally same SVN rev) + Debian: dpkg -i libibverbs1_0.1.0-1_ia64.deb + +Server: ./ +Client: ./ + + o IMPORTANT: The SAME OPTIONS must be passed to both server and client. + o "--help" will list the available . + o is IPv4 or IPv6 address. + You can use the IPoIB address if you have IPoIB configured. + +You need to be running a Subnet Manager on the switch or one of the nodes +in your fabric. To use the opensm tool for this purpose, run + modprobe ib_umad + opensm & +on one of the nodes + +First load ib_uverbs on both client and server with something like: + modprobe ib_uverbs + +Then (e.g.) "rdma_lat -C" on the server side. +Lastly "rmda_lat -C 10.0.1.31" on the client. + +rmda_lat will exit on both server and client after printing results. + diff --git a/branches/Ndi/tools/perftests/user/TODO b/branches/Ndi/tools/perftests/user/TODO new file mode 100644 index 00000000..9cd24526 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/TODO @@ -0,0 +1 @@ +- support -- option ( like --port ...) diff --git a/branches/Ndi/tools/perftests/user/clock_test.c b/branches/Ndi/tools/perftests/user/clock_test.c new file mode 100644 index 00000000..3effb469 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/clock_test.c @@ -0,0 +1,24 @@ +#include +#include +#include "get_clock.h" + +int main() +{ + double mhz; + mhz = get_cpu_mhz(); + cycles_t c1, c2; + + if (!mhz) { + printf("Unable to calibrate cycles. Exiting.\n"); + return 2; + } + + printf("Type CTRL-C to cancel.\n"); + for(;;) + { + c1 = get_cycles(); + sleep(1); + c2 = get_cycles(); + printf("1 sec = %g usec\n", (c2 - c1) / mhz); + } +} diff --git a/branches/Ndi/tools/perftests/user/dirs b/branches/Ndi/tools/perftests/user/dirs new file mode 100644 index 00000000..efc41e71 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/dirs @@ -0,0 +1,9 @@ +DIRS=\ + send_bw \ + send_lat \ + write_lat \ + write_bw \ + read_lat \ + read_bw + +# write_bw_postlist rdma_lat rdma_bw send_lat write_lat write_bw read_lat read_bwr diff --git a/branches/Ndi/tools/perftests/user/get_clock.c b/branches/Ndi/tools/perftests/user/get_clock.c new file mode 100644 index 00000000..eceb5152 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/get_clock.c @@ -0,0 +1,185 @@ +/* + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + * + * Author: Michael S. Tsirkin + */ + +/* #define DEBUG 1 */ +/* #define DEBUG_DATA 1 */ +/* #define GET_CPU_MHZ_FROM_PROC 1 */ + +/* For gettimeofday */ +#define _BSD_SOURCE +#include + +#include +#include +#include "get_clock.h" + +#ifndef DEBUG +#define DEBUG 0 +#endif +#ifndef DEBUG_DATA +#define DEBUG_DATA 0 +#endif + +#define MEASUREMENTS 200 +#define USECSTEP 10 +#define USECSTART 100 + +/* + Use linear regression to calculate cycles per microsecond. + http://en.wikipedia.org/wiki/Linear_regression#Parameter_estimation +*/ +static double sample_get_cpu_mhz(void) +{ + struct timeval tv1, tv2; + cycles_t start; + double sx = 0, sy = 0, sxx = 0, syy = 0, sxy = 0; + double tx, ty; + int i; + + /* Regression: y = a + b x */ + long x[MEASUREMENTS]; + cycles_t y[MEASUREMENTS]; + double a; /* system call overhead in cycles */ + double b; /* cycles per microsecond */ + double r_2; + + for (i = 0; i < MEASUREMENTS; ++i) { + start = get_cycles(); + + if (gettimeofday(&tv1, NULL)) { + fprintf(stderr, "gettimeofday failed.\n"); + return 0; + } + + do { + if (gettimeofday(&tv2, NULL)) { + fprintf(stderr, "gettimeofday failed.\n"); + return 0; + } + } while ((tv2.tv_sec - tv1.tv_sec) * 1000000 + + (tv2.tv_usec - tv1.tv_usec) < USECSTART + i * USECSTEP); + + x[i] = (tv2.tv_sec - tv1.tv_sec) * 1000000 + + tv2.tv_usec - tv1.tv_usec; + y[i] = get_cycles() - start; + if (DEBUG_DATA) + fprintf(stderr, "x=%ld y=%Ld\n", x[i], (long long)y[i]); + } + + for (i = 0; i < MEASUREMENTS; ++i) { + tx = x[i]; + ty = y[i]; + sx += tx; + sy += ty; + sxx += tx * tx; + syy += ty * ty; + sxy += tx * ty; + } + + b = (MEASUREMENTS * sxy - sx * sy) / (MEASUREMENTS * sxx - sx * sx); + a = (sy - b * sx) / MEASUREMENTS; + + if (DEBUG) + fprintf(stderr, "a = %g\n", a); + if (DEBUG) + fprintf(stderr, "b = %g\n", b); + if (DEBUG) + fprintf(stderr, "a / b = %g\n", a / b); + r_2 = (MEASUREMENTS * sxy - sx * sy) * (MEASUREMENTS * sxy - sx * sy) / + (MEASUREMENTS * sxx - sx * sx) / + (MEASUREMENTS * syy - sy * sy); + + if (DEBUG) + fprintf(stderr, "r^2 = %g\n", r_2); + if (r_2 < 0.9) { + fprintf(stderr,"Correlation coefficient r^2: %g < 0.9\n", r_2); + return 0; + } + + return b; +} + +static double proc_get_cpu_mhz(void) +{ + FILE* f; + char buf[256]; + double mhz = 0.0; + + f = fopen("/proc/cpuinfo","r"); + if (!f) + return 0.0; + while(fgets(buf, sizeof(buf), f)) { + double m; + int rc; + rc = sscanf(buf, "cpu MHz : %lf", &m); + if (rc != 1) { /* PPC has a different format */ + rc = sscanf(buf, "clock : %lf", &m); + if (rc != 1) + continue; + } + if (mhz == 0.0) { + mhz = m; + continue; + } + if (mhz != m) { + fprintf(stderr, "Conflicting CPU frequency values" + " detected: %lf != %lf\n", mhz, m); + return 0.0; + } + } + fclose(f); + return mhz; +} + + +double get_cpu_mhz(void) +{ + double sample, proc, delta; + sample = sample_get_cpu_mhz(); + proc = proc_get_cpu_mhz(); + + if (!proc || !sample) + return 0; + + delta = proc > sample ? proc - sample : sample - proc; + if (delta / proc > 0.01) { + fprintf(stderr, "Warning: measured timestamp frequency " + "%g differs from nominal %g MHz\n", + sample, proc); + return sample; + } + return proc; +} diff --git a/branches/Ndi/tools/perftests/user/get_clock.h b/branches/Ndi/tools/perftests/user/get_clock.h new file mode 100644 index 00000000..d46cb2c2 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/get_clock.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + * + * Author: Michael S. Tsirkin + */ + +#ifndef GET_CLOCK_H +#define GET_CLOCK_H + +#include "complib/cl_timer.h" + +typedef uint64_t cycles_t; + +static int __cdecl cycles_compare(const void * aptr, const void * bptr) +{ + const cycles_t *a = aptr; + const cycles_t *b = bptr; + if (*a < *b) return -1; + if (*a > *b) return 1; + return 0; + +} +/* + * When there is an + * odd number of samples, the median is the middle number. + * even number of samples, the median is the mean of the + * two middle numbers. + * + */ +inline cycles_t get_median(int n, cycles_t delta[]) +{ + if (n % 2) + return(delta[n / 2] + delta[n / 2 - 1]) / 2; + else + return delta[n / 2]; +} + + +static inline cycles_t get_cycles() +{ + return cl_get_tick_count(); +} + +static double get_cpu_mhz(void) +{ + return (double)cl_get_tick_freq(); +} + +#endif diff --git a/branches/Ndi/tools/perftests/user/getopt.c b/branches/Ndi/tools/perftests/user/getopt.c new file mode 100644 index 00000000..485e272a --- /dev/null +++ b/branches/Ndi/tools/perftests/user/getopt.c @@ -0,0 +1,250 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include + +#include "getopt.h" + +/* for preventing C4996-warning on deprecated functions like strtok in VS 8.0. */ +#pragma warning(disable : 4996) + +/* Global variables for getopt_long */ +char *optarg; +int optind = 1; +int opterr = 1; +int optopt = '?'; + +static char* get_char_option(const char* optstring,char*const* argv,int argc, int iArg, int* opt_ind,char* opt_p); + +/* * * * * * * * * * */ + +int iArg = 1; + +int getopt(int argc, char *const*argv, const char *optstring) +{ + + char chOpt; + + if (iArg == argc) + { + return (EOF); + } + + if (argv[iArg][0] != '-') + { + /* Does not start with a - - we are done scanning */ + return (EOF); + } + + /*termination of scanning */ + if (!strcmp("--",argv[iArg])) { + return EOF; + } + optarg = get_char_option(optstring,argv,argc,iArg,&optind,&chOpt); + iArg = optind; + return chOpt; +} + +/* * * * * * * * * * */ + +int getopt_long(int argc, char *const*argv, + const char *optstring, + const struct option *longopts, int *longindex) +{ + char chOpt; + char tmp_str[256]; + char* tmp_arg = NULL; + char* tok=NULL; + int i; + char tokens[2] = {'='}; + + if (iArg == argc) + { + return (EOF); + } + + if (argv[iArg][0] != '-') + { + /* Does not start with a - - we are done scanning */ + return (EOF); + } + + /*termination of scanning */ + if (!strcmp("--",argv[iArg])) { + return EOF; + } + + + /* char option : -d 5 */ + if ((argv[iArg][0] == '-') &&(argv[iArg][1] != '-') ) { + optarg = get_char_option(optstring,argv,argc,iArg,&optind,&chOpt); + iArg = optind; + return chOpt; + } + + /* Look for this string in longopts */ + strcpy(tmp_str,&(argv[iArg][2])); + + /*get the option */ + tok = strtok(tmp_str,tokens); + + for (i = 0; longopts[i].name; i++){ + if (strcmp (tok, longopts[i].name) == 0) + { + /* We have a match */ + if (longindex != NULL) *longindex = i; + + if (longopts[i].flag != NULL) { + *(longopts[i].flag) = longopts[i].val; + } + + if (longopts[i].has_arg != no_argument) + { + /*get the argument */ + + if (strchr(argv[iArg],'=') != NULL) + { + optarg = strtok(NULL,tokens); + }else { + /*the next arg in cmd line is the param */ + tmp_arg = argv[iArg+1]; + if (*tmp_arg == '-') { + /*no param is found */ + chOpt = '?'; + if ((longopts[i].has_arg == required_argument) && opterr) + { + fprintf (stderr, "Option %s requires argument\n",tok); + } + + }else { + optarg = tmp_arg; + iArg++; + optind++; + } + } + + }/*longopts */ + + iArg++; + optind++; + if (longopts[i].flag == 0) + return (longopts[i].val); + else return 0; + + }/*end if strcmp */ + } + + return ('?'); +} + +/* * * * * * * * * * * */ + +static char* get_char_option(const char* optstring,char*const* argv,int argc, int iArg, int* opt_ind,char* opt_p) + { + char chOpt; + char* tmp_str; + char* prm = NULL; + + chOpt = argv[iArg][1]; + + + /*non valid argument*/ + if (!isalpha(chOpt)) + { + chOpt = EOF; + goto end; + } + + tmp_str = strchr(optstring, chOpt); + + /*the argument wasn't found in optstring */ + if (tmp_str == NULL){ + chOpt = EOF; + optopt = chOpt; + goto end; + } + + /* don't need argument */ + if (tmp_str[1]!= ':' ) { + goto end; + } + + if (argv[iArg][2] != '\0') + { + // param is attached to option: -po8889 + prm = &(argv[iArg][2]); + goto end; + } + + // must look at next argv for param + /*at the end of arg list */ + if ((iArg)+1 == argc) { + /* no param will be found */ + if (tmp_str[2]== ':' ) { + /* optional argument ::*/ + goto end; + }else{ + chOpt = EOF; + goto end; + } + } + + prm = &(argv[(iArg)+1][0]); + if (*prm == '-' ) + { + // next argv is a new option, so param + // not given for current option + if (tmp_str[2]== ':' ) { + /* optional argument ::*/ + goto end; + } + else + { + chOpt = EOF; + goto end; + } + } + + // next argv is the param + (*opt_ind)++; + + +end: + (*opt_ind)++; + *opt_p = chOpt; + return prm; +} + diff --git a/branches/Ndi/tools/perftests/user/getopt.h b/branches/Ndi/tools/perftests/user/getopt.h new file mode 100644 index 00000000..20efd5cc --- /dev/null +++ b/branches/Ndi/tools/perftests/user/getopt.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef GETOPT_H +#define GETOPT_H + +/* the string argument that came with the option */ +extern char *optarg; + +/* Index in ARGV of the next element to be scanned. + + When `getopt' returns -1, this is the index of the first of the + non-option elements that the caller should itself scan. + + Otherwise, `optind' communicates from one call to the next + how much of ARGV has been scanned so far. */ + +extern int optind; + +/* Callers store zero here to inhibit the error message `getopt' prints + for unrecognized options. */ + +extern int opterr; + +/* Set to an option character which was unrecognized. */ +extern int optopt; + +/* Describe the long-named options requested by the application. + The LONG_OPTIONS argument to getopt_long or getopt_long_only is a vector + of `struct option' terminated by an element containing a name which is + zero. + + The field `has_arg' is: + no_argument (or 0) if the option does not take an argument, + required_argument (or 1) if the option requires an argument, + optional_argument (or 2) if the option takes an optional argument. + + If the field `flag' is not NULL, it points to a variable that is set + to the value given in the field `val' when the option is found, but + left unchanged if the option is not found. + + To have a long-named option do something other than set an `int' to + a compiled-in constant, such as set a value from `optarg', set the + option's `flag' field to zero and its `val' field to a nonzero + value (the equivalent single-letter option character, if there is + one). For long options that have a zero `flag' field, `getopt' + returns the contents of the `val' field. */ + +struct option +{ + const char *name; + int has_arg; + int *flag; + int val; +}; + +/* Names for the values of the `has_arg' field of `struct option'. */ +#define no_argument 0 +#define required_argument 1 +#define optional_argument 2 + +/* Return the option character from OPTS just read. Return -1 when + there are no more options. For unrecognized options, or options + missing arguments, `optopt' is set to the option letter, and '?' is + returned. + + The OPTS string is a list of characters which are recognized option + letters, optionally followed by colons, specifying that that letter + takes an argument, to be placed in `optarg'. + + If a letter in OPTS is followed by two colons, its argument is + optional. This behavior is specific to the GNU `getopt'. + + The argument `--' causes premature termination of argument + scanning, explicitly telling `getopt' that there are no more + options. + + If OPTS begins with `--', then non-option arguments are treated as + arguments to the option '\0'. This behavior is specific to the GNU + `getopt'. */ + +extern int getopt(int argc, char *const *argv, const char *shortopts); +extern int getopt_long(int argc, char *const*argv, + const char *optstring, + const struct option *longopts, int *longindex); + + +#endif diff --git a/branches/Ndi/tools/perftests/user/perf_defs.h b/branches/Ndi/tools/perftests/user/perf_defs.h new file mode 100644 index 00000000..0719b12b --- /dev/null +++ b/branches/Ndi/tools/perftests/user/perf_defs.h @@ -0,0 +1,155 @@ +/* + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + * + * Author: Yossi Leybovich + */ + +#ifndef H_PERF_SOCK_H +#define H_PERF_SOCK_H + + +#include +#include + +#include +#include +#include +#include +#include +#include + + +#include +#include + +#define KEY_MSG_SIZE (sizeof "0000:000000:000000:00000000:0000000000000000") +#define KEY_PRINT_FMT "%04x:%06x:%06x:%08x:%x" +#define KEY_SCAN_FMT "%x:%x:%x:%x:%x" + +#define VERSION 2.0 +#define ALL 1 + +#define RC 0 +#define UC 1 +#define UD 3 + + +#define PINGPONG_SEND_WRID 1 +#define PINGPONG_RECV_WRID 2 +#define PINGPONG_RDMA_WRID 3 + + +#define SIGNAL 1 +#define MAX_INLINE 400 + + +#if 0 +#define PERF_ENTER printf("%s: ===>\n",__FUNCTION__); +#define PERF_EXIT printf("%s: <===\n",__FUNCTION__); +#define PERF_DEBUG printf +#else +#define PERF_ENTER +#define PERF_EXIT +#define PERF_DEBUG // +#endif + +struct pingpong_context { + ib_ca_handle_t context; + ib_ca_handle_t ca; + ib_ca_attr_t *ca_attr; + ib_al_handle_t al; +//PORTED struct ibv_comp_channel *channel; + void* channel; //PORTED REMOVE + ib_pd_handle_t pd; + ib_mr_handle_t mr; + uint32_t rkey; + uint32_t lkey; + ib_cq_handle_t scq; + ib_cq_handle_t rcq; + ib_qp_handle_t *qp; + ib_qp_attr_t *qp_attr; + void *buf; + unsigned size; + int tx_depth; + + ib_local_ds_t list; + ib_local_ds_t recv_list; + ib_send_wr_t wr; + ib_recv_wr_t rwr; + + ib_av_handle_t av; + + volatile char *post_buf; + volatile char *poll_buf; + + int *scnt,*ccnt; +}; + + +struct pingpong_dest { + ib_net16_t lid; + ib_net32_t qpn; + ib_net32_t psn; + uint32_t rkey; + uint64_t vaddr; +}; + + +struct report_options { + int unsorted; + int histogram; + int cycles; /* report delta's in cycles, not microsec's */ +}; + + +static int +pp_write_keys(SOCKET sockfd, const struct pingpong_dest *my_dest); + +static int +pp_read_keys(SOCKET sockfd, struct pingpong_dest *rem_dest); + + SOCKET + pp_client_connect(const char *servername, int port); + +int +pp_client_exch_dest(SOCKET sockfd, const struct pingpong_dest *my_dest, + struct pingpong_dest *rem_dest); + +SOCKET +pp_server_connect(int port); + +int +pp_server_exch_dest(SOCKET sockfd, const struct pingpong_dest *my_dest, + struct pingpong_dest* rem_dest); + +#endif diff --git a/branches/Ndi/tools/perftests/user/perf_utils.c b/branches/Ndi/tools/perftests/user/perf_utils.c new file mode 100644 index 00000000..395e4925 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/perf_utils.c @@ -0,0 +1,207 @@ + +#include "perf_defs.h" + +const char *sock_get_error_str(void) +{ + switch (WSAGetLastError()) { + case WSANOTINITIALISED: + return "WSANOTINITIALISED"; /* A successful WSAStartup call must occur before using this function */ + case WSAENETDOWN: + return "WSAENETDOWN"; /* The network subsystem has failed */ + case WSAEFAULT: + return "WSAEFAULT"; /* The buf parameter is not completely contained in a valid part of the user address space */ + case WSAENOTCONN: + return "WSAENOTCONN"; /* The socket is not connected */ + case WSAEINTR: + return "WSAEINTR"; /* The (blocking) call was canceled through WSACancelBlockingCall */ + case WSAEINPROGRESS: + return "WSAEINPROGRESS"; /* A blocking Windows Sockets 1.1 call is in progress, or the service provider is still processing a callback function */ + case WSAENETRESET: + return "WSAENETRESET"; /* The connection has been broken due to the keep-alive activity detecting a failure while the operation was in progress */ + case WSAENOTSOCK: + return "WSAENOTSOCK"; /* The descriptor is not a socket */ + case WSAEOPNOTSUPP: + return "WSAEOPNOTSUPP"; /* MSG_OOB was specified, but the socket is not stream-style such as type SOCK_STREAM, OOB data is not supported in the communication domain associated with this socket, or the socket is unidirectional and supports only send operations */ + case WSAESHUTDOWN: + return "WSAESHUTDOWN"; /* The socket has been shut down; it is not possible to receive on a socket after shutdown has been invoked with how set to SD_RECEIVE or SD_BOTH */ + case WSAEWOULDBLOCK: + return "WSAEWOULDBLOCK"; /* The socket is marked as nonblocking and the receive operation would block */ + case WSAEMSGSIZE: + return "WSAEMSGSIZE"; /* The message was too large to fit into the specified buffer and was truncated */ + case WSAEINVAL: + return "WSAEINVAL"; /* The socket has not been bound with bind, or an unknown flag was specified, or MSG_OOB was specified for a socket with SO_OOBINLINE enabled or (for byte stream sockets only) len was zero or negative */ + case WSAECONNABORTED: + return "WSAECONNABORTED"; /* The virtual circuit was terminated due to a time-out or other failure. The application should close the socket as it is no longer usable */ + case WSAETIMEDOUT: + return "WSAETIMEDOUT"; /* The connection has been dropped because of a network failure or because the peer system failed to respond */ + case WSAECONNRESET: + return "WSAECONNRESET"; /* The virtual circuit was reset by the remote side executing a hard or abortive close. The application should close the socket as it is no longer usable. On a UPD-datagram socket this error would indicate that a previous send operation resulted in an ICMP "Port Unreachable" message */ + default: + return "Unknown error"; + } +} + +static int pp_write_keys(SOCKET sockfd, const struct pingpong_dest *my_dest) +{ + char msg[KEY_MSG_SIZE]; + PERF_ENTER; + sprintf(msg, KEY_PRINT_FMT,my_dest->lid, my_dest->qpn, + my_dest->psn, my_dest->rkey, my_dest->vaddr); + + if (send(sockfd, msg, sizeof msg,0) != sizeof msg) { + perror("pp_write_keys"); + fprintf(stderr, "Couldn't send local address %s\n",sock_get_error_str()); + return -1; + } + PERF_EXIT; + return 0; +} + +static int pp_read_keys(SOCKET sockfd, + struct pingpong_dest *rem_dest) +{ + int parsed; + + char msg[KEY_MSG_SIZE]; + PERF_ENTER; + if (recv(sockfd, msg, sizeof msg, 0) != sizeof msg) { + perror("pp_read_keys"); + fprintf(stderr, "Couldn't read remote address %s\n",sock_get_error_str()); + return -1; + } + + parsed = sscanf(msg, KEY_SCAN_FMT, &rem_dest->lid, &rem_dest->qpn, + &rem_dest->psn,&rem_dest->rkey, &rem_dest->vaddr); + + if (parsed != 5) { + fprintf(stderr, "Couldn't parse line <%.*s > parsed = %d %s\n", + (int)sizeof msg, msg,parsed,sock_get_error_str()); + return -1; + } + rem_dest->vaddr = (uintptr_t) rem_dest->vaddr; + PERF_EXIT; + return 0; +} + +SOCKET pp_client_connect(const char *servername, int port) +{ + struct addrinfo *res, *t; + struct addrinfo hints = { + 0, //ai_flags + AF_UNSPEC, // ai_family + SOCK_STREAM //ai_socktype + }; + char service[8]; + int n; + SOCKET sockfd = INVALID_SOCKET; + PERF_ENTER; + sprintf(service, "%d", port); + n = getaddrinfo(servername, service, &hints, &res); + + if (n) { + fprintf(stderr, "%s for %s:%d\n", sock_get_error_str(), servername, port); + return sockfd; + } + + for (t = res; t; t = t->ai_next) { + sockfd = socket(t->ai_family, t->ai_socktype, t->ai_protocol); + if (sockfd != INVALID_SOCKET) { + if (!connect(sockfd, t->ai_addr, t->ai_addrlen)) + break; + closesocket(sockfd); + sockfd = INVALID_SOCKET; + } + } + + freeaddrinfo(res); + + if (sockfd == INVALID_SOCKET) { + fprintf(stderr, "Couldn't connect to %s:%d\n", servername, port); + return sockfd; + } + PERF_EXIT; + return sockfd; +} + +int pp_client_exch_dest(SOCKET sockfd, const struct pingpong_dest *my_dest, + struct pingpong_dest *rem_dest) +{ + PERF_ENTER; + if (pp_write_keys(sockfd, my_dest)) + return -1; + PERF_EXIT; + return pp_read_keys(sockfd,rem_dest); +} + +SOCKET pp_server_connect(int port) +{ + struct addrinfo *res, *t; + struct addrinfo hints = { + AI_PASSIVE, //ai_flags + AF_UNSPEC, // ai_family + SOCK_STREAM //ai_socktype + }; + char service[8]; + SOCKET sockfd = INVALID_SOCKET, connfd; + int n; + PERF_ENTER; + sprintf(service, "%d", port); + n = getaddrinfo(NULL, service, &hints, &res); + + if (n) { + fprintf(stderr, "%s for port %d\n", sock_get_error_str(), port); + return n; + } + + for (t = res; t; t = t->ai_next) { + sockfd = socket(t->ai_family, t->ai_socktype, t->ai_protocol); + if (sockfd != INVALID_SOCKET) { + n = 1; + + setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (const char*)&n, sizeof n); + + if (!bind(sockfd, t->ai_addr, t->ai_addrlen)) + break; + closesocket(sockfd); + sockfd = INVALID_SOCKET; + } + } + + freeaddrinfo(res); + + if (sockfd == INVALID_SOCKET) { + fprintf(stderr, "Couldn't listen to port %d\n", port); + return sockfd; + } + + listen(sockfd, 1); + connfd = accept(sockfd, NULL, 0); + if (connfd == INVALID_SOCKET) { + perror("server accept"); + fprintf(stderr, "accept() failed\n"); + closesocket(sockfd); + return connfd; + } + + closesocket(sockfd); + PERF_EXIT; + return connfd; +} + +int pp_server_exch_dest(SOCKET sockfd, const struct pingpong_dest *my_dest, + struct pingpong_dest* rem_dest) +{ + PERF_ENTER; + if (pp_read_keys(sockfd, rem_dest)) + return -1; + + PERF_EXIT; + return pp_write_keys(sockfd, my_dest); +} + + + + + + + diff --git a/branches/Ndi/tools/perftests/user/read_bw/SOURCES b/branches/Ndi/tools/perftests/user/read_bw/SOURCES new file mode 100644 index 00000000..e08f73a6 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/read_bw/SOURCES @@ -0,0 +1,28 @@ +TARGETNAME=ib_read_bw +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +C_DEFINES=$(C_DEFINES) /D__WIN__ + +SOURCES=read_bw.rc \ + ..\getopt.c \ + ..\perf_utils.c \ + read_bw.c + +INCLUDES=..;..\..\..\..\inc;..\..\..\..\inc\user + +RCOPTIONS=/I..\..\win\include + +TARGETLIBS= \ + $(DDK_LIB_PATH)\Ws2_32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tools/perftests/user/read_bw/makefile b/branches/Ndi/tools/perftests/user/read_bw/makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/read_bw/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/perftests/user/read_bw/read_bw.c b/branches/Ndi/tools/perftests/user/read_bw/read_bw.c new file mode 100644 index 00000000..c6c96364 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/read_bw/read_bw.c @@ -0,0 +1,785 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: read_bw.c 1955 2007-02-19 14:46:04Z sleybo $ + */ + +#include "getopt.h" +#include "perf_defs.h" +#include "get_clock.h" + +struct user_parameters { + const char *servername; + int connection_type; + int mtu; + int all; /* run all msg size */ + int iters; + int tx_depth; + int max_out_read; +}; + +static int page_size; + +cycles_t *tposted; +cycles_t *tcompleted; + + +void +pp_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + UNUSED_PARAM( cq_context); + return ; +} + +static struct pingpong_context *pp_init_ctx(unsigned size,int port, struct user_parameters *user_parm) +{ + + struct pingpong_context *ctx; + ib_api_status_t ib_status = IB_SUCCESS; + size_t guid_count; + ib_net64_t *ca_guid_array; + + + ctx = malloc(sizeof *ctx); + if (!ctx){ + perror("malloc"); + return NULL; + } + + memset(ctx, 0, sizeof(struct pingpong_context)); + ctx->size = size; + ctx->tx_depth = user_parm->tx_depth; + + ctx->qp = malloc(sizeof (ib_qp_handle_t)); + if (!ctx->qp) { + perror("malloc"); + return NULL; + } + + ctx->qp_attr = malloc(sizeof (ib_qp_attr_t)); + if (!ctx->qp_attr) { + perror("malloc"); + return NULL; + } + + ctx->buf = malloc( size * 2 ); + if (!ctx->buf) { + fprintf(stderr, "Couldn't allocate work buf.\n"); + return NULL; + } + + memset(ctx->buf, 0, size * 2 ); + + + /* + * Open the AL instance + */ + ib_status = ib_open_al(&ctx->al); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_al failed status = %d\n", ib_status); + return NULL; + } + + /* + * Get the Local CA Guids + */ + ib_status = ib_get_ca_guids(ctx->al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr,"ib_get_ca_guids1 failed status = %d\n", (uint32_t)ib_status); + return NULL; + } + + /* + * If no CA's Present then return + */ + + if(guid_count == 0) + return NULL; + + + ca_guid_array = (ib_net64_t*)malloc(sizeof(ib_net64_t) * guid_count); + + ib_status = ib_get_ca_guids(ctx->al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_get_ca_guids2 failed with status = %d\n", ib_status); + return NULL; + } + + /* + * Open only the first HCA + */ + /* Open the CA */ + ib_status = ib_open_ca(ctx->al ,ca_guid_array[0] ,NULL, + NULL, //ca_context + &ctx->ca); + + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_ca failed with status = %d\n", ib_status); + return NULL; + } + + //xxx + //printf("ib_open_ca passed i=%d\n",i); + //xxx + + + + { + /* Query the CA */ + uint32_t bsize = 0; + ib_status = ib_query_ca(ctx->ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr, "Failed to query device props"); + return NULL; + } + + ctx->ca_attr = (ib_ca_attr_t *)malloc(bsize); + + ib_status = ib_query_ca(ctx->ca, ctx->ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + printf("ib_query_ca failed with status = %d\n", ib_status); + return NULL; + } + if (user_parm->mtu == 0) {/*user did not ask for specific mtu */ + if (ctx->ca_attr->dev_id == 23108) { + user_parm->mtu = 1024; + } else { + user_parm->mtu = 2048; + } + } + } + + ib_status = ib_alloc_pd(ctx->ca , + IB_PDT_NORMAL, + ctx, //pd_context + &ctx->pd); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate PD\n"); + return NULL; + } + + + /* We dont really want IBV_ACCESS_LOCAL_WRITE, but IB spec says: + * The Consumer is not allowed to assign Remote Write or Remote Atomic to + * a Memory Region that has not been assigned Local Write. */ + + + { + ib_mr_create_t mr_create; + + mr_create.length = size * 2; + + mr_create.vaddr = ctx->buf; + mr_create.access_ctrl = IB_AC_RDMA_WRITE| IB_AC_LOCAL_WRITE|IB_AC_RDMA_READ; + + ib_status = ib_reg_mem(ctx->pd ,&mr_create ,&ctx->lkey ,&ctx->rkey ,&ctx->mr); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate MR\n"); + return NULL; + } + } + + { + ib_cq_create_t cq_create; + + cq_create.size = user_parm->tx_depth; + cq_create.h_wait_obj = NULL; + cq_create.pfn_comp_cb = pp_cq_comp_cb; + ib_status = ib_create_cq(ctx->ca,&cq_create ,ctx, NULL, &ctx->scq); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't create CQ ib_status = %d\n",ib_status); + return NULL; + } + } + + { + + ib_qp_create_t qp_create; + ib_qp_mod_t qp_modify; + ib_qp_attr_t qp_attr; + + memset(&qp_create, 0, sizeof(ib_qp_create_t)); + qp_create.h_sq_cq = ctx->scq; + qp_create.h_rq_cq = ctx->scq; + qp_create.sq_depth = user_parm->tx_depth; + qp_create.rq_depth = user_parm->tx_depth; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + //TODO MAX_INLINE + qp_create.qp_type= IB_QPT_RELIABLE_CONN; + qp_create.sq_signaled = FALSE; + /*attr.sq_sig_all = 0;*/ + + ib_status = ib_create_qp(ctx->pd, &qp_create,NULL,NULL,&ctx->qp[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Couldn't create QP\n"); + return NULL; + } + + + + + memset(&qp_modify, 0, sizeof(ib_qp_mod_t)); + qp_modify.req_state = IB_QPS_INIT; + qp_modify.state.init.pkey_index = 0 ; + qp_modify.state.init.primary_port = (uint8_t)port; + qp_modify.state.init.access_ctrl = IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE|IB_AC_RDMA_READ; + + + ib_status = ib_modify_qp(ctx->qp[0], &qp_modify); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + + memset(&qp_attr, 0, sizeof(ib_qp_attr_t)); + ib_status = ib_query_qp(ctx->qp[0],&ctx->qp_attr[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + fprintf(stderr, "max inline size %d\n",ctx->qp_attr[0].sq_max_inline); + } + return ctx; +} + + + + + +static int pp_connect_ctx(struct pingpong_context *ctx, int port, int my_psn, + struct pingpong_dest *dest, struct user_parameters *user_parm,int qpindex) +{ + + ib_api_status_t ib_status; + ib_qp_mod_t attr; + memset(&attr, 0, sizeof(ib_qp_mod_t)); + + attr.req_state = IB_QPS_RTR; + switch (user_parm->mtu) { + case 256 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_256; + break; + case 512 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_512; + break; + case 1024 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_1024; + break; + case 2048 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_2048; + break; + } + printf("Mtu : %d\n", user_parm->mtu); + attr.state.rtr.dest_qp = dest->qpn;; + attr.state.rtr.rq_psn = dest->psn; + attr.state.rtr.resp_res = (uint8_t)user_parm->max_out_read; + attr.state.rtr.rnr_nak_timeout = 12; + attr.state.rtr.primary_av.grh_valid = 0; + attr.state.rtr.primary_av.dlid = dest->lid; + attr.state.rtr.primary_av.sl = 0; + attr.state.rtr.primary_av.path_bits = 0; + attr.state.rtr.primary_av.port_num = (uint8_t)port; + attr.state.rtr.primary_av.static_rate = IB_PATH_RECORD_RATE_10_GBS; + attr.state.rtr.opts = IB_MOD_QP_LOCAL_ACK_TIMEOUT | + IB_MOD_QP_RESP_RES | + IB_MOD_QP_PRIMARY_AV; + + + ib_status = ib_modify_qp(ctx->qp[0], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to RTR\n"); + return 1; + } + + + + memset(&attr, 0, sizeof(ib_qp_mod_t)); + attr.req_state = IB_QPS_RTS; + attr.state.rts.sq_psn = my_psn; + attr.state.rts.init_depth = (uint8_t)user_parm->max_out_read; + attr.state.rts.local_ack_timeout = 14; + attr.state.rts.retry_cnt = 7; + attr.state.rts.rnr_retry_cnt = 7; + attr.state.rts.opts = IB_MOD_QP_RNR_RETRY_CNT | + IB_MOD_QP_RETRY_CNT | + IB_MOD_QP_INIT_DEPTH | + IB_MOD_QP_LOCAL_ACK_TIMEOUT; + + ib_status = ib_modify_qp(ctx->qp[0], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to RTS\n"); + return 1; + } + return 0; +} + + +static SOCKET pp_open_port(struct pingpong_context *ctx, const char * servername, + int ib_port, int port, struct pingpong_dest **p_my_dest, + struct pingpong_dest **p_rem_dest,struct user_parameters *user_parm) +{ + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + SOCKET sockfd; + int rc; + int i; + int numofqps = 1; + + /* Create connection between client and server. + * We do it by exchanging data over a TCP socket connection. */ + + + my_dest = malloc( sizeof (struct pingpong_dest) * numofqps); + if (!my_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + rem_dest = malloc(sizeof (struct pingpong_dest) * numofqps ); + if (!rem_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + sockfd = servername ? pp_client_connect(servername, port) : + pp_server_connect(port); + + if (sockfd == INVALID_SOCKET) { + printf("pp_connect_sock(%s,%d) failed (%d)!\n", + servername, port, sockfd); + return INVALID_SOCKET; + } + + + for (i =0 ;ica_attr->p_port_attr[ib_port-1].lid; + my_dest[i].psn = rand() & 0xffffff; + if (!my_dest[i].lid) { + fprintf(stderr, "Local lid 0x0 detected. Is an SM running?\n"); + return 1; + } + my_dest[i].qpn = ctx->qp_attr[i].num; + /* TBD this should be changed inot VA and different key to each qp */ + my_dest[i].rkey = ctx->rkey; + my_dest[i].vaddr = (uintptr_t)ctx->buf + ctx->size; + + printf(" local address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + my_dest[i].lid, my_dest[i].qpn, my_dest[i].psn, + my_dest[i].rkey, my_dest[i].vaddr); + + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + printf(" remote address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + rem_dest[i].lid, rem_dest[i].qpn, rem_dest[i].psn, + rem_dest[i].rkey, rem_dest[i].vaddr); + + if (pp_connect_ctx(ctx, ib_port, my_dest[i].psn, &rem_dest[i], user_parm,i)) + return INVALID_SOCKET; + /* An additional handshake is required *after* moving qp to RTR. + Arbitrarily reuse exch_dest for this purpose. */ + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + } + *p_rem_dest = rem_dest; + *p_my_dest = my_dest; + return sockfd; +} + +static void usage(const char *argv0) +{ + printf("Usage:\n"); + printf(" %s start a server and wait for connection\n", argv0); + printf(" %s connect to server at \n", argv0); + printf("\n"); + printf("Options:\n"); + printf(" -p, --port= listen on/connect to port (default 18515)\n"); + printf(" -i, --ib-port= use port of IB device (default 1)\n"); + printf(" -m, --mtu= mtu size (default 1024)\n"); + printf(" -o, --outs= num of outstanding read/atom(default 4)\n"); + printf(" -s, --size= size of message to exchange (default 65536)\n"); + printf(" -a, --all Run sizes from 2 till 2^23\n"); + printf(" -t, --tx-depth= size of tx queue (default 100)\n"); + printf(" -n, --iters= number of exchanges (at least 2, default 1000)\n"); + printf(" -b, --bidirectional measure bidirectional bandwidth (default unidirectional)\n"); + printf(" -V, --version display version number\n"); +} + +static void print_report(unsigned int iters, unsigned size, int duplex, + cycles_t *tposted, cycles_t *tcompleted) +{ + double cycles_to_units; + uint64_t tsize; /* Transferred size, in megabytes */ + unsigned int i, j; + int opt_posted = 0, opt_completed = 0; + cycles_t opt_delta; + cycles_t t; + + + opt_delta = tcompleted[opt_posted] - tposted[opt_completed]; + + /* Find the peak bandwidth */ + for (i = 0; i < iters; ++i) + for (j = i; j < iters; ++j) { + t = (tcompleted[j] - tposted[i]) / (j - i + 1); + if (t < opt_delta) { + opt_delta = t; + opt_posted = i; + opt_completed = j; + } + } + + cycles_to_units = get_cpu_mhz() ; + + tsize = duplex ? 2 : 1; + tsize = tsize * size; + printf("%7d %d %7.2f %7.2f\n", + size,iters,tsize * cycles_to_units / opt_delta / 0x100000, + (uint64_t)tsize * iters * cycles_to_units /(tcompleted[iters - 1] - tposted[0]) / 0x100000); +} + + + +int run_iter(struct pingpong_context *ctx, struct user_parameters *user_param, + struct pingpong_dest *rem_dest, int size) +{ + ib_api_status_t ib_status; + int scnt, ccnt ; + ib_send_wr_t *bad_wr; + + ctx->list.vaddr = (uintptr_t) ctx->buf; + ctx->list.length = size; + ctx->list.lkey = ctx->lkey; + ctx->wr.remote_ops.vaddr = rem_dest->vaddr; + ctx->wr.remote_ops.rkey = rem_dest->rkey; + ctx->wr.wr_id = PINGPONG_RDMA_WRID; + ctx->wr.ds_array = &ctx->list; + ctx->wr.num_ds = 1; + ctx->wr.wr_type = WR_RDMA_READ; + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED; + ctx->wr.p_next = NULL; + + scnt = 0; + ccnt = 0; + + /* Done with setup. Start the test. */ + while (scnt < user_param->iters || ccnt < user_param->iters) { + + while (scnt < user_param->iters && (scnt - ccnt) < user_param->tx_depth ) { + + tposted[scnt] = get_cycles(); + ib_status = ib_post_send(ctx->qp[0], &ctx->wr, &bad_wr); + if (ib_status != IB_SUCCESS) + { + fprintf(stderr, "Couldn't post send: scnt %d ccnt %d\n",scnt,ccnt); + return 1; + } + ++scnt; + PERF_DEBUG("scnt = %d \n",scnt); + + } + + if (ccnt < user_param->iters) + { + ib_wc_t wc; + ib_wc_t *p_wc_done,*p_wc_free; + + p_wc_free = &wc; + p_wc_done = NULL; + p_wc_free->p_next = NULL; + + + do { + ib_status = ib_poll_cq(ctx->scq, &p_wc_free, &p_wc_done); + if (ib_status == IB_SUCCESS) { + tcompleted[ccnt] = get_cycles(); + if (p_wc_done->status != IB_WCS_SUCCESS) { + fprintf(stderr, "Completion wth error at %s:\n", + user_param->servername ? "client" : "server"); + fprintf(stderr, "Failed status %d: wr_id %d syndrom 0x%x\n", + p_wc_done->status, (int) p_wc_done->wr_id, p_wc_done->vendor_specific); + return 1; + } + + /*here the id is the index to the qp num */ + ++ccnt; + PERF_DEBUG("ccnt = %d \n",ccnt); + p_wc_free = p_wc_done; + p_wc_free->p_next = NULL; + p_wc_done = NULL; + } + + + } while (ib_status == IB_SUCCESS); + + if (ib_status != IB_NOT_FOUND) { + fprintf(stderr, "Poll Recieve CQ failed %d\n", ib_status); + return 12; + } + + } + } + + return(0); +} + +int __cdecl main(int argc, char *argv[]) +{ + struct pingpong_context *ctx; + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + struct user_parameters user_param; + char *ib_devname = NULL; + int port = 18515; + int ib_port = 1; + unsigned size = 65536; + SOCKET sockfd = INVALID_SOCKET; + WSADATA wsaData; + int iResult; + int i = 0; + int duplex = 0; + + /* init default values to user's parameters */ + memset(&user_param, 0, sizeof(struct user_parameters)); + user_param.mtu = 0; /* signal choose default by device */ + user_param.iters = 1000; + user_param.tx_depth = 100; + user_param.servername = NULL; + user_param.connection_type = RC; + user_param.max_out_read = 4; /* the device capability on gen2 */ + /* Parameter parsing. */ + while (1) { + int c; + + static struct option long_options[] = { + { "port", 1, NULL, 'p' }, + { "ib-dev", 1, NULL, 'd' }, + { "ib-port", 1, NULL, 'i' }, + { "mtu", 1, NULL, 'm' }, + { "outs", 1, NULL, 'o' }, + { "size", 1, NULL, 's' }, + { "iters", 1, NULL, 'n' }, + { "tx-depth", 1, NULL, 't' }, + { "all", 0, NULL, 'a' }, + { "bidirectional", 0, NULL, 'b' }, + { "version", 0, NULL, 'V' }, + { 0 } + }; + c = getopt_long(argc, argv, "p:d:i:m:o:s:n:t:ba", long_options, NULL); + if (c == -1) + break; + + switch (c) { + case 'p': + port = strtol(optarg, NULL, 0); + if (port < 0 || port > 65535) { + usage(argv[0]); + return 1; + } + break; + + case 'd': + ib_devname = _strdup(optarg); + break; + case 'm': + user_param.mtu = strtol(optarg, NULL, 0); + break; + case 'o': + user_param.max_out_read = strtol(optarg, NULL, 0); + break; + case 'a': + user_param.all = ALL; + break; + case 'V': + printf("read_bw version : %.2f\n",VERSION); + return 0; + break; + case 'i': + ib_port = strtol(optarg, NULL, 0); + if (ib_port < 0) { + usage(argv[0]); + return 1; + } + break; + + case 's': + size = strtol(optarg, NULL, 0); + if (size < 1 || size > UINT_MAX / 2) { + usage(argv[0]); + return 1; + } + break; + + case 't': + user_param.tx_depth = strtol(optarg, NULL, 0); + if (user_param.tx_depth < 1) { usage(argv[0]); return 1; } + break; + + case 'n': + user_param.iters = strtol(optarg, NULL, 0); + if (user_param.iters < 2) { + usage(argv[0]); + return 1; + } + + break; + + case 'b': + duplex = 1; + break; + + default: + usage(argv[0]); + return 1; + } + } + + if (optind == argc - 1) + user_param.servername = _strdup(argv[optind]); + else if (optind < argc) { + usage(argv[0]); + return 6; + } + printf("------------------------------------------------------------------\n"); + if (duplex == 1) { + printf(" RDMA_Read Bidirectional BW Test\n"); + } else { + printf(" RDMA_Read BW Test\n"); + } + printf("Connection type : RC\n"); + /* Done with parameter parsing. Perform setup. */ + + // Initialize Winsock + iResult = WSAStartup(MAKEWORD(2,2), &wsaData); + if (iResult != NO_ERROR) { + printf("Error at WSAStartup()\n"); + return 1; + } + + + if (user_param.all == ALL) { + /*since we run all sizes */ + size = 8388608; /*2^23 */ + } + + srand(GetCurrentProcessId() * GetTickCount()); + + //TODO: get pagesize from sysinfo + page_size = 4096; + + //TODO:get the device names + + + ctx = pp_init_ctx(size,ib_port, &user_param); + if (!ctx) + return 8; + + + sockfd = pp_open_port(ctx, user_param.servername, ib_port, port,&my_dest,&rem_dest,&user_param); + if (sockfd == INVALID_SOCKET) + return 9; + + + printf("------------------------------------------------------------------\n"); + printf(" #bytes #iterations BW peak[MB/sec] BW average[MB/sec] \n"); + /* For half duplex tests, server just waits for client to exit */ + /* use dummy my_dest struct*/ + if (!user_param.servername && !duplex) { + pp_server_exch_dest(sockfd, my_dest,rem_dest); + send(sockfd, "done", sizeof "done",0); + closesocket(sockfd); + return 0; + } + + tposted = malloc(user_param.iters * sizeof *tposted); + + if (!tposted) { + perror("malloc"); + return 1; + } + + tcompleted = malloc(user_param.iters * sizeof *tcompleted); + + if (!tcompleted) { + perror("malloc"); + return 1; + } + + + + if (user_param.all == ALL) { + for (i = 1; i < 24 ; ++i) { + size = 1 << i; + if(run_iter(ctx, &user_param, rem_dest, size)) + return 17; + print_report(user_param.iters, size, duplex, tposted, tcompleted); + } + } else { + if(run_iter(ctx, &user_param, rem_dest, size)) + return 18; + print_report(user_param.iters, size, duplex, tposted, tcompleted); + } + + if (user_param.servername) { + pp_client_exch_dest(sockfd, my_dest,rem_dest); + } else { + pp_server_exch_dest(sockfd, my_dest,rem_dest); + } + + send(sockfd, "done", sizeof "done",0); + closesocket(sockfd); + + free(tposted); + free(tcompleted); + + printf("------------------------------------------------------------------\n"); + return 0; +} diff --git a/branches/Ndi/tools/perftests/user/read_bw/read_bw.rc b/branches/Ndi/tools/perftests/user/read_bw/read_bw.rc new file mode 100644 index 00000000..13e5f43d --- /dev/null +++ b/branches/Ndi/tools/perftests/user/read_bw/read_bw.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: write_bw.rc 1611 2006-08-20 14:48:55Z sleybo $ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "RDMA read Bandwidth Test (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "RDMA read Bandwidth Test " +#endif + +#define VER_INTERNALNAME_STR "ib_read_bw.exe" +#define VER_ORIGINALFILENAME_STR "ib_read_bw.exe" + +#include diff --git a/branches/Ndi/tools/perftests/user/read_lat/SOURCES b/branches/Ndi/tools/perftests/user/read_lat/SOURCES new file mode 100644 index 00000000..25df9e4e --- /dev/null +++ b/branches/Ndi/tools/perftests/user/read_lat/SOURCES @@ -0,0 +1,28 @@ +TARGETNAME=ib_read_lat +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +C_DEFINES=$(C_DEFINES) /D__WIN__ + +SOURCES=read_lat.rc \ + ..\getopt.c \ + ..\perf_utils.c \ + read_lat.c + +INCLUDES=..;..\..\..\..\inc;..\..\..\..\inc\user + +RCOPTIONS=/I..\..\win\include + +TARGETLIBS= \ + $(DDK_LIB_PATH)\Ws2_32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tools/perftests/user/read_lat/makefile b/branches/Ndi/tools/perftests/user/read_lat/makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/read_lat/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/perftests/user/read_lat/read_lat.c b/branches/Ndi/tools/perftests/user/read_lat/read_lat.c new file mode 100644 index 00000000..b6a9bf92 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/read_lat/read_lat.c @@ -0,0 +1,807 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2005 Hewlett Packard, Inc (Grant Grundler) + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: read_lat.c 1955 2007-02-19 14:46:04Z sleybo $ + */ + +#include "getopt.h" +#include "perf_defs.h" +#include "get_clock.h" + + + + +struct user_parameters { + const char *servername; + int connection_type; + int mtu; + int all; /* run all msg size */ + int iters; + int tx_depth; + int max_out_read; +}; + +static int page_size; + +cycles_t *tstamp; + + + +void +pp_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + UNUSED_PARAM( cq_context); + return ; +} + +static struct pingpong_context *pp_init_ctx(unsigned size,int port, struct user_parameters *user_parm) +{ + + + struct pingpong_context *ctx; + ib_api_status_t ib_status = IB_SUCCESS; + size_t guid_count; + ib_net64_t *ca_guid_array; + + + ctx = malloc(sizeof *ctx); + if (!ctx){ + perror("malloc"); + return NULL; + } + memset(ctx, 0, sizeof(struct pingpong_context)); + ctx->size = size; + ctx->tx_depth = user_parm->tx_depth; + + ctx->qp = malloc(sizeof (ib_qp_handle_t)); + if (!ctx->qp) { + perror("malloc"); + return NULL; + } + + ctx->qp_attr = malloc(sizeof (ib_qp_attr_t)); + if (!ctx->qp_attr) { + perror("malloc"); + return NULL; + } + + ctx->buf = malloc( size * 2); + if (!ctx->buf) { + fprintf(stderr, "Couldn't allocate work buf.\n"); + return NULL; + } + + memset(ctx->buf, 0, size * 2); + + + /* + * Open the AL instance + */ + ib_status = ib_open_al(&ctx->al); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_al failed status = %d\n", ib_status); + return NULL; + } + + /* + * Get the Local CA Guids + */ + ib_status = ib_get_ca_guids(ctx->al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr,"ib_get_ca_guids1 failed status = %d\n", (uint32_t)ib_status); + return NULL; + } + + /* + * If no CA's Present then return + */ + + if(guid_count == 0) + return NULL; + + + ca_guid_array = (ib_net64_t*)malloc(sizeof(ib_net64_t) * guid_count); + + ib_status = ib_get_ca_guids(ctx->al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_get_ca_guids2 failed with status = %d\n", ib_status); + return NULL; + } + + /* + * Open only the first HCA + */ + /* Open the CA */ + ib_status = ib_open_ca(ctx->al ,ca_guid_array[0] ,NULL, + NULL, //ca_context + &ctx->ca); + + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_ca failed with status = %d\n", ib_status); + return NULL; + } + + //xxx + //printf("ib_open_ca passed i=%d\n",i); + //xxx + + + + + { + /* Query the CA */ + uint32_t bsize = 0; + ib_status = ib_query_ca(ctx->ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr, "Failed to query device props"); + return NULL; + } + + ctx->ca_attr = (ib_ca_attr_t *)malloc(bsize); + + ib_status = ib_query_ca(ctx->ca, ctx->ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + printf("ib_query_ca failed with status = %d\n", ib_status); + return NULL; + } + if (user_parm->mtu == 0) {/*user did not ask for specific mtu */ + if (ctx->ca_attr->dev_id == 23108) { + user_parm->mtu = 1024; + } else { + user_parm->mtu = 2048; + } + } + } + + + + ib_status = ib_alloc_pd(ctx->ca , + IB_PDT_NORMAL, + ctx, //pd_context + &ctx->pd); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate PD\n"); + return NULL; + } + + + /* We dont really want IBV_ACCESS_LOCAL_WRITE, but IB spec says: + * The Consumer is not allowed to assign Remote Write or Remote Atomic to + * a Memory Region that has not been assigned Local Write. */ + + + { + ib_mr_create_t mr_create; + + mr_create.length = size * 2; + + mr_create.vaddr = ctx->buf; + mr_create.access_ctrl = IB_AC_RDMA_WRITE| IB_AC_LOCAL_WRITE|IB_AC_RDMA_READ; + + ib_status = ib_reg_mem(ctx->pd ,&mr_create ,&ctx->lkey ,&ctx->rkey ,&ctx->mr); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate MR\n"); + return NULL; + } + } + + { + ib_cq_create_t cq_create; + + cq_create.size = user_parm->tx_depth; + cq_create.h_wait_obj = NULL; + cq_create.pfn_comp_cb = pp_cq_comp_cb; + ib_status = ib_create_cq(ctx->ca,&cq_create ,ctx, NULL, &ctx->scq); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't create CQ ib_status = %d\n",ib_status); + return NULL; + } + } + + { + + ib_qp_create_t qp_create; + ib_qp_mod_t qp_modify; + ib_qp_attr_t qp_attr; + + memset(&qp_create, 0, sizeof(ib_qp_create_t)); + qp_create.h_sq_cq = ctx->scq; + qp_create.h_rq_cq = ctx->scq; + qp_create.sq_depth = user_parm->tx_depth; + qp_create.rq_depth = 1; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + //TODO MAX_INLINE + qp_create.qp_type= IB_QPT_RELIABLE_CONN; + qp_create.sq_signaled = FALSE; + /*attr.sq_sig_all = 0;*/ + + ib_status = ib_create_qp(ctx->pd, &qp_create,NULL,NULL,&ctx->qp[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Couldn't create QP\n"); + return NULL; + } + + + + + memset(&qp_modify, 0, sizeof(ib_qp_mod_t)); + qp_modify.req_state = IB_QPS_INIT; + qp_modify.state.init.pkey_index = 0 ; + qp_modify.state.init.primary_port = (uint8_t)port; + qp_modify.state.init.access_ctrl = IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE|IB_AC_RDMA_READ; + + + ib_status = ib_modify_qp(ctx->qp[0], &qp_modify); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + + + memset(&qp_attr, 0, sizeof(ib_qp_attr_t)); + ib_status = ib_query_qp(ctx->qp[0], &ctx->qp_attr[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + fprintf(stderr, "max inline size %d\n",ctx->qp_attr[0].sq_max_inline); + } + return ctx; +} + + + + + +static int pp_connect_ctx(struct pingpong_context *ctx, int port, int my_psn, + struct pingpong_dest *dest, struct user_parameters *user_parm,int qpindex) +{ + + ib_api_status_t ib_status; + ib_qp_mod_t attr; + memset(&attr, 0, sizeof(ib_qp_mod_t)); + + attr.req_state = IB_QPS_RTR; + switch (user_parm->mtu) { + case 256 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_256; + break; + case 512 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_512; + break; + case 1024 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_1024; + break; + case 2048 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_2048; + break; + } + printf("Mtu : %d\n", user_parm->mtu); + attr.state.rtr.dest_qp = dest->qpn;; + attr.state.rtr.rq_psn = dest->psn; + attr.state.rtr.resp_res = (uint8_t)user_parm->max_out_read; + attr.state.rtr.rnr_nak_timeout = 12; + attr.state.rtr.primary_av.grh_valid = 0; + attr.state.rtr.primary_av.dlid = dest->lid; + attr.state.rtr.primary_av.sl = 0; + attr.state.rtr.primary_av.path_bits = 0; + attr.state.rtr.primary_av.port_num = (uint8_t)port; + attr.state.rtr.primary_av.static_rate = IB_PATH_RECORD_RATE_10_GBS; + attr.state.rtr.opts = IB_MOD_QP_LOCAL_ACK_TIMEOUT | + IB_MOD_QP_RESP_RES | + IB_MOD_QP_PRIMARY_AV; + + + ib_status = ib_modify_qp(ctx->qp[0], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to RTR\n"); + return 1; + } + + + + memset(&attr, 0, sizeof(ib_qp_mod_t)); + attr.req_state = IB_QPS_RTS; + attr.state.rts.sq_psn = my_psn; + + attr.state.rts.init_depth = (uint8_t)user_parm->max_out_read; + attr.state.rts.local_ack_timeout = 14; + attr.state.rts.retry_cnt = 7; + attr.state.rts.rnr_retry_cnt = 7; + attr.state.rts.opts = IB_MOD_QP_RNR_RETRY_CNT | + IB_MOD_QP_RETRY_CNT | + IB_MOD_QP_INIT_DEPTH | + IB_MOD_QP_LOCAL_ACK_TIMEOUT; + + ib_status = ib_modify_qp(ctx->qp[0], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to RTS\n"); + return 1; + } + + return 0; +} + + +static SOCKET pp_open_port(struct pingpong_context *ctx, const char * servername, + int ib_port, int port, struct pingpong_dest **p_my_dest, + struct pingpong_dest **p_rem_dest,struct user_parameters *user_parm) +{ + //char addr_fmt[] = "%8s address: LID %#04x QPN %#06x PSN %#06x RKey %#08x VAddr %#016Lx\n"; + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + SOCKET sockfd; + int rc; + int i; + int numofqps = 1; + + /* Create connection between client and server. + * We do it by exchanging data over a TCP socket connection. */ + + + my_dest = malloc( sizeof (struct pingpong_dest) * numofqps); + if (!my_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + rem_dest = malloc(sizeof (struct pingpong_dest) * numofqps ); + if (!rem_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + sockfd = servername ? pp_client_connect(servername, port) : + pp_server_connect(port); + + if (sockfd == INVALID_SOCKET) { + printf("pp_connect_sock(%s,%d) failed (%d)!\n", + servername, port, sockfd); + return INVALID_SOCKET; + } + + + for (i =0 ;ica_attr->p_port_attr[ib_port-1].lid; + my_dest[i].psn = rand() & 0xffffff; + if (!my_dest[i].lid) { + fprintf(stderr, "Local lid 0x0 detected. Is an SM running?\n"); + return 1; + } + my_dest[i].qpn = ctx->qp_attr[i].num; + /* TBD this should be changed inot VA and different key to each qp */ + my_dest[i].rkey = ctx->rkey; + my_dest[i].vaddr = (uintptr_t)ctx->buf + ctx->size; + + printf(" local address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + my_dest[i].lid, my_dest[i].qpn, my_dest[i].psn, + my_dest[i].rkey, my_dest[i].vaddr); + + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + printf(" remote address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + rem_dest[i].lid, rem_dest[i].qpn, rem_dest[i].psn, + rem_dest[i].rkey, rem_dest[i].vaddr); + + if (pp_connect_ctx(ctx, ib_port, my_dest[i].psn, &rem_dest[i], user_parm, i)) + return INVALID_SOCKET; + /* An additional handshake is required *after* moving qp to RTR. + Arbitrarily reuse exch_dest for this purpose. */ + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + } + *p_rem_dest = rem_dest; + *p_my_dest = my_dest; + return sockfd; +} + +static void usage(const char *argv0) +{ + printf("Usage:\n"); + printf(" %s start a server and wait for connection\n", argv0); + printf(" %s connect to server at \n", argv0); + printf("\n"); + printf("Options:\n"); + printf(" -p, --port= listen on/connect to port (default 18515)\n"); + printf(" -m, --mtu= mtu size (default 256)\n"); + printf(" -i, --ib-port= use port of IB device (default 1)\n"); + printf(" -s, --size= size of message to exchange (default 1)\n"); + printf(" -t, --tx-depth= size of tx queue (default 50)\n"); + printf(" -n, --iters= number of exchanges (at least 2, default 1000)\n"); + printf(" -o, --outs= num of outstanding read/atom(default 4)\n"); + printf(" -a, --all Run sizes from 2 till 2^23\n"); + printf(" -C, --report-cycles report times in cpu cycle units (default microseconds)\n"); + printf(" -H, --report-histogram print out all results (default print summary only)\n"); + printf(" -U, --report-unsorted (implies -H) print out unsorted results (default sorted)\n"); + printf(" -V, --version display version number\n"); +} + + + +static void print_report(struct report_options * options, + unsigned int iters, cycles_t *tstamp,int size) +{ + double cycles_to_units; + cycles_t median; + unsigned int i; + const char* units; + cycles_t *delta = malloc(iters * sizeof *delta); + + if (!delta) { + perror("malloc"); + return; + } + + for (i = 0; i < iters - 1; ++i) + delta[i] = tstamp[i + 1] - tstamp[i]; + + + if (options->cycles) { + cycles_to_units = 1; + units = "cycles"; + } else { + cycles_to_units = get_cpu_mhz()/1000000; + units = "usec"; + } + + if (options->unsorted) { + printf("#, %s\n", units); + for (i = 0; i < iters - 1; ++i) + printf("%d, %g\n", i + 1, delta[i] / cycles_to_units ); + } + + qsort(delta, iters - 1, sizeof *delta, cycles_compare); + + if (options->histogram) { + printf("#, %s\n", units); + for (i = 0; i < iters - 1; ++i) + printf("%d, %g\n", i + 1, delta[i] / cycles_to_units ); + } + + median = get_median(iters - 1, delta); + printf("%7d %d %7.2f %7.2f %7.2f\n", + size,iters,delta[0] / cycles_to_units , + delta[iters - 3] / cycles_to_units ,median / cycles_to_units ); + + free(delta); +} + +int run_iter(struct pingpong_context *ctx, struct user_parameters *user_param, + struct pingpong_dest *rem_dest, int size) +{ + ib_api_status_t ib_status; + int scnt, ccnt ; + ib_send_wr_t *bad_wr; + + ctx->list.vaddr = (uintptr_t) ctx->buf ; + ctx->list.length = size; + ctx->list.lkey = ctx->lkey; + ctx->wr.remote_ops.vaddr = rem_dest->vaddr; + ctx->wr.remote_ops.rkey = rem_dest->rkey; + ctx->wr.wr_id = PINGPONG_RDMA_WRID; + ctx->wr.ds_array = &ctx->list; + ctx->wr.num_ds = 1; + ctx->wr.wr_type = WR_RDMA_READ; + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED; + ctx->wr.p_next = NULL; + + scnt = 0; + ccnt = 0; + + + /* Done with setup. Start the test. */ + + if(user_param->servername) { + while (scnt < user_param->iters ) { + + tstamp[scnt] = get_cycles(); + ib_status = ib_post_send(ctx->qp[0], &ctx->wr, &bad_wr); + if (ib_status != IB_SUCCESS) + { + fprintf(stderr, "Couldn't post send: scnt %d ccnt %d\n",scnt,ccnt); + return 1; + } + ++scnt; + PERF_DEBUG("scnt = %d \n",scnt); + + { + ib_wc_t wc; + ib_wc_t *p_wc_done,*p_wc_free; + + p_wc_free = &wc; + p_wc_done = NULL; + p_wc_free->p_next = NULL; + + do{ + ib_status = ib_poll_cq(ctx->scq, &p_wc_free, &p_wc_done); + } while (ib_status == IB_NOT_FOUND); + + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Poll Send CQ failed %d\n", ib_status); + return 12; + } + + if (p_wc_done->status != IB_WCS_SUCCESS) { + fprintf(stderr, "Completion wth error at %s:\n", + user_param->servername ? "client" : "server"); + fprintf(stderr, "Failed status %d: wr_id %d syndrom 0x%x\n", + p_wc_done->status, (int) p_wc_done->wr_id, p_wc_done->vendor_specific); + return 1; + } + ++ccnt; + PERF_DEBUG("ccnt = %d \n",ccnt); + + } + + } + } + return(0); +} + + +int __cdecl main(int argc, char *argv[]) +{ + struct pingpong_context *ctx; + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + struct user_parameters user_param; + char *ib_devname = NULL; + int port = 18515; + int ib_port = 1; + unsigned tmp_size,size = 2; + SOCKET sockfd = INVALID_SOCKET; + WSADATA wsaData; + int iResult; + int i = 0; + struct report_options report = {0}; + + + /* init default values to user's parameters */ + memset(&user_param, 0, sizeof(struct user_parameters)); + user_param.mtu = 0; /* signal choose default by device */ + user_param.iters = 1000; + user_param.tx_depth = 50; + user_param.servername = NULL; + user_param.connection_type = RC; + user_param.max_out_read = 4; /* the device capability on gen2 */ +/* Parameter parsing. */ + while (1) { + int c; + + static struct option long_options[] = { + { "port", 1, NULL, 'p' }, + { "mtu", 1, NULL, 'm' }, + { "outs", 1, NULL, 'o' }, + { "ib-dev", 1, NULL, 'd' }, + { "ib-port", 1, NULL, 'i' }, + { "size", 1, NULL, 's' }, + { "iters", 1, NULL, 'n' }, + { "tx-depth", 1, NULL, 't' }, + { "all", 0, NULL, 'a' }, + { "report-cycles", 0, NULL, 'C' }, + { "report-histogram", 0, NULL, 'H' }, + { "report-unsorted", 0, NULL, 'U' }, + { "version", 0, NULL, 'V' }, + { 0 } + }; + + + c = getopt_long(argc, argv, "p:m:d:i:s:n:t:aHUV", long_options, NULL); + if (c == -1) + break; + + switch (c) { + case 'p': + port = strtol(optarg, NULL, 0); + if (port < 0 || port > 65535) { + usage(argv[0]); + return 1; + } + break; + case 'm': + user_param.mtu = strtol(optarg, NULL, 0); + break; + case 'o': + user_param.max_out_read = strtol(optarg, NULL, 0); + break; + case 'a': + user_param.all = ALL; + break; + case 'V': + printf("read_lat version : %.2f\n",VERSION); + return 0; + break; + case 'd': + ib_devname = _strdup(optarg); + break; + + case 'i': + ib_port = strtol(optarg, NULL, 0); + if (ib_port < 0) { + usage(argv[0]); + return 2; + } + break; + + case 's': + size = strtol(optarg, NULL, 0); + if (size < 1) { + usage(argv[0]); return 3; + } + break; + + case 't': + user_param.tx_depth = strtol(optarg, NULL, 0); + if (user_param.tx_depth < 1) { + usage(argv[0]); return 4; + } + break; + + case 'n': + user_param.iters = strtol(optarg, NULL, 0); + if (user_param.iters < 2) { + usage(argv[0]); + return 5; + } + + break; + + case 'C': + report.cycles = 1; + break; + + case 'H': + report.histogram = 1; + break; + + case 'U': + report.unsorted = 1; + break; + + default: + usage(argv[0]); + return 5; + } + } + + if (optind == argc - 1) + user_param.servername = _strdup(argv[optind]); + else if (optind < argc) { + usage(argv[0]); + return 6; + } + + /* + * Done with parameter parsing. Perform setup. + */ + tstamp = malloc(user_param.iters * sizeof *tstamp); + if (!tstamp) { + perror("malloc"); + return 10; + } + printf("------------------------------------------------------------------\n"); + printf(" RDMA_Read Latency Test\n"); + + + // Initialize Winsock + iResult = WSAStartup(MAKEWORD(2,2), &wsaData); + if (iResult != NO_ERROR) { + printf("Error at WSAStartup()\n"); + return 1; + } + + tmp_size = size; + /* anyway make sure the connection is RC */ + if (user_param.all == ALL) { + /*since we run all sizes */ + size = 8388608; /*2^23 */ + } else if (size < 128) { + /* can cut up to 70 nsec probably related to cache line size */ + size = 128; + } + + srand(GetCurrentProcessId() * GetTickCount()); + + //TODO: get pagesize from sysinfo + page_size = 4096; + + //TODO:get the device names + + + ctx = pp_init_ctx(size,ib_port, &user_param); + if (!ctx) + return 8; + + + sockfd = pp_open_port(ctx, user_param.servername, ib_port, port,&my_dest,&rem_dest,&user_param); + if (sockfd == INVALID_SOCKET) + return 9; + + + printf("------------------------------------------------------------------\n"); + printf(" #bytes #iterations t_min[usec] t_max[usec] t_typical[usec]\n"); + /* For half duplex tests, server just waits for client to exit */ + /* use dummy my_dest struct*/ + if (!user_param.servername) { + pp_server_exch_dest(sockfd, my_dest,rem_dest); + send(sockfd, "done", sizeof "done",0); + closesocket(sockfd); + return 0; + } + + /* fix for true size in small msg size */ + if (tmp_size < 128) { + size = tmp_size ; + } + + if (user_param.all == ALL) { + for (i = 1; i < 24 ; ++i) { + size = 1 << i; + if(run_iter(ctx, &user_param, rem_dest, size)) + return 17; + print_report(&report, user_param.iters, tstamp, size); + } + } else { + if(run_iter(ctx, &user_param, rem_dest, size)) + return 18; + print_report(&report, user_param.iters, tstamp, size); + } + + pp_client_exch_dest(sockfd, my_dest,rem_dest); + send(sockfd, "done", sizeof "done",0); + closesocket(sockfd); + + printf("------------------------------------------------------------------\n"); + free(tstamp); + return 0; +} diff --git a/branches/Ndi/tools/perftests/user/read_lat/read_lat.rc b/branches/Ndi/tools/perftests/user/read_lat/read_lat.rc new file mode 100644 index 00000000..2dd3fb9e --- /dev/null +++ b/branches/Ndi/tools/perftests/user/read_lat/read_lat.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: write_bw.rc 1611 2006-08-20 14:48:55Z sleybo $ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "RDMA read Latency Test (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "RDMA read Latency Test " +#endif + +#define VER_INTERNALNAME_STR "ib_read_lat.exe" +#define VER_ORIGINALFILENAME_STR "ib_read_lat.exe" + +#include diff --git a/branches/Ndi/tools/perftests/user/send_bw/SOURCES b/branches/Ndi/tools/perftests/user/send_bw/SOURCES new file mode 100644 index 00000000..5e626c84 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/send_bw/SOURCES @@ -0,0 +1,28 @@ +TARGETNAME=ib_send_bw +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +C_DEFINES=$(C_DEFINES) /D__WIN__ + +SOURCES=send_bw.rc \ + ..\getopt.c \ + ..\perf_utils.c \ + send_bw.c + +INCLUDES=..;..\..\..\..\inc;..\..\..\..\inc\user + +RCOPTIONS=/I..\..\win\include + +TARGETLIBS= \ + $(DDK_LIB_PATH)\Ws2_32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tools/perftests/user/send_bw/makefile b/branches/Ndi/tools/perftests/user/send_bw/makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/send_bw/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/perftests/user/send_bw/send_bw.c b/branches/Ndi/tools/perftests/user/send_bw/send_bw.c new file mode 100644 index 00000000..edc26515 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/send_bw/send_bw.c @@ -0,0 +1,1167 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "getopt.h" +#include "get_clock.h" + +#include "perf_defs.h" + +#define SIGNAL 1 +#define MAX_INLINE 400 + +struct user_parameters { + const char *servername; + int connection_type; + int mtu; + int all; /* run all msg size */ + int signal_comp; + int iters; + int tx_depth; + int duplex; + int use_event; +}; + +static int page_size; +cycles_t *tposted; +cycles_t *tcompleted; +int post_recv; + + +void +pp_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + UNUSED_PARAM( cq_context); + return ; +} + + + +static struct pingpong_context *pp_init_ctx(unsigned size,int port, struct user_parameters *user_parm) +{ + + struct pingpong_context *ctx; + ib_api_status_t ib_status = IB_SUCCESS; + size_t guid_count; + ib_net64_t *ca_guid_array; + + + + ctx = malloc(sizeof *ctx); + if (!ctx) + return NULL; + + ctx->qp = malloc(sizeof (ib_qp_handle_t)); + if (!ctx->qp) { + perror("malloc"); + return NULL; + } + ctx->qp_attr = malloc(sizeof (ib_qp_attr_t)); + if (!ctx->qp_attr) { + perror("malloc"); + return NULL; + } + + ctx->size = size; + ctx->tx_depth = user_parm->tx_depth; + /* in case of UD need space for the GRH */ + if (user_parm->connection_type==UD) { + ctx->buf = malloc(( size + 40 ) * 2); //PORTED ALINGED + if (!ctx->buf) { + fprintf(stderr, "Couldn't allocate work buf.\n"); + return NULL; + } + memset(ctx->buf, 0, ( size + 40 ) * 2); + } else { + ctx->buf = malloc( size * 2); //PORTED ALINGED + if (!ctx->buf) { + fprintf(stderr, "Couldn't allocate work buf.\n"); + return NULL; + } + memset(ctx->buf, 0, size * 2); + } + + /* + * Open the AL instance + */ + ib_status = ib_open_al(&ctx->al); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_al failed status = %d\n", ib_status); + return NULL; + } + + /* + * Get the Local CA Guids + */ + ib_status = ib_get_ca_guids(ctx->al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr,"ib_get_ca_guids1 failed status = %d\n", (uint32_t)ib_status); + return NULL; + } + + /* + * If no CA's Present then return + */ + + if(guid_count == 0) + return NULL; + + + ca_guid_array = (ib_net64_t*)malloc(sizeof(ib_net64_t) * guid_count); + + ib_status = ib_get_ca_guids(ctx->al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_get_ca_guids2 failed with status = %d\n", ib_status); + return NULL; + } + + /* + * Open only the first HCA + */ + /* Open the CA */ + ib_status = ib_open_ca(ctx->al ,ca_guid_array[0] ,NULL, + NULL, //ca_context + &ctx->ca); + + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_ca failed with status = %d\n", ib_status); + return NULL; + } + + //xxx + //printf("ib_open_ca passed i=%d\n",i); + //xxx + + + { + + /* Query the CA */ + uint32_t bsize = 0; + ib_status = ib_query_ca(ctx->ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr, "Failed to query device props"); + return NULL; + } + + ctx->ca_attr = (ib_ca_attr_t *)malloc(bsize); + + ib_status = ib_query_ca(ctx->ca, ctx->ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + printf("ib_query_ca failed with status = %d\n", ib_status); + return NULL; + } + if (user_parm->mtu == 0) {/*user did not ask for specific mtu */ + if (ctx->ca_attr->dev_id == 23108) { + user_parm->mtu = 1024; + } else { + user_parm->mtu = 2048; + } + } + } + + if (user_parm->use_event) { +//PORTED ctx->channel = ibv_create_comp_channel(ctx->context); + ctx->channel = NULL;//remove when PORTED + if (!ctx->channel) { + fprintf(stderr, "Couldn't create completion channel\n"); + return NULL; + } + } else + ctx->channel = NULL; + + ib_status = ib_alloc_pd(ctx->ca , + IB_PDT_NORMAL, + ctx, //pd_context + &ctx->pd); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate PD\n"); + return NULL; + } + + + { + ib_mr_create_t mr_create; + ib_cq_create_t cq_create; + /* We dont really want IBV_ACCESS_LOCAL_WRITE, but IB spec says: + * The Consumer is not allowed to assign Remote Write or Remote Atomic to + * a Memory Region that has not been assigned Local Write. */ + if (user_parm->connection_type==UD) { + mr_create.length = (size + 40 ) * 2; + } else { + mr_create.length = size * 2; + } + + mr_create.vaddr = ctx->buf; + mr_create.access_ctrl = IB_AC_RDMA_WRITE| IB_AC_LOCAL_WRITE; + + ib_status = ib_reg_mem(ctx->pd ,&mr_create ,&ctx->lkey ,&ctx->rkey ,&ctx->mr); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate MR\n"); + return NULL; + } + + cq_create.size = user_parm->tx_depth*2; + cq_create.h_wait_obj = NULL; + cq_create.pfn_comp_cb = pp_cq_comp_cb; + ib_status = ib_create_cq(ctx->ca,&cq_create ,ctx, NULL, &ctx->scq); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't create CQ\n"); + return NULL; + } + } + + { + ib_qp_create_t qp_create; + memset(&qp_create, 0, sizeof(ib_qp_create_t)); + qp_create.h_sq_cq = ctx->scq; + qp_create.h_rq_cq = ctx->scq; + qp_create.sq_depth = user_parm->tx_depth; + qp_create.rq_depth = user_parm->tx_depth; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + //TODO MAX_INLINE + + switch (user_parm->connection_type) { + case RC : + qp_create.qp_type= IB_QPT_RELIABLE_CONN; + break; + case UC : + qp_create.qp_type = IB_QPT_UNRELIABLE_CONN; + break; + case UD : + qp_create.qp_type = IB_QPT_UNRELIABLE_DGRM; + break; + default: + fprintf(stderr, "Unknown connection type %d \n",user_parm->connection_type); + return NULL; + } + + qp_create.sq_signaled = FALSE; + /*attr.sq_sig_all = 0;*/ + + ib_status = ib_create_qp(ctx->pd, &qp_create,NULL,NULL,&ctx->qp[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Couldn't create QP\n"); + return NULL; + } + } + + { + ib_qp_mod_t qp_modify; + ib_qp_attr_t qp_attr; + memset(&qp_modify, 0, sizeof(ib_qp_mod_t)); + qp_modify.req_state = IB_QPS_INIT; + qp_modify.state.init.pkey_index = 0 ; + qp_modify.state.init.primary_port = (uint8_t)port; + if (user_parm->connection_type==UD) { + qp_modify.state.init.qkey = 0x11111111; + } else { + qp_modify.state.init.access_ctrl = IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE; + } + + ib_status = ib_modify_qp(ctx->qp[0], &qp_modify); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + + + memset(&qp_attr, 0, sizeof(ib_qp_attr_t)); + ib_status = ib_query_qp(ctx->qp[0], &ctx->qp_attr[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + fprintf(stderr, "max inline size %d\n",ctx->qp_attr[0].sq_max_inline); + + } + return ctx; + +} + +static int pp_connect_ctx(struct pingpong_context *ctx, int port, int my_psn, + struct pingpong_dest *dest, struct user_parameters *user_parm,int index) +{ + + ib_api_status_t ib_status; + ib_qp_mod_t attr; + memset(&attr, 0, sizeof(ib_qp_mod_t)); + + attr.req_state = IB_QPS_RTR; + switch (user_parm->mtu) { + case 256 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_256; + break; + case 512 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_512; + break; + case 1024 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_1024; + break; + case 2048 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_2048; + break; + } + printf("Mtu : %d\n", user_parm->mtu); + attr.state.rtr.dest_qp = (dest->qpn); + attr.state.rtr.rq_psn = (dest->psn); + if (user_parm->connection_type==RC) { + attr.state.rtr.resp_res = 1; + attr.state.rtr.rnr_nak_timeout = 12; + } + attr.state.rtr.primary_av.grh_valid = 0; + attr.state.rtr.primary_av.dlid = dest->lid; + attr.state.rtr.primary_av.sl = 0; + attr.state.rtr.primary_av.path_bits = 0; + attr.state.rtr.primary_av.port_num = (uint8_t)port; + attr.state.rtr.primary_av.static_rate = IB_PATH_RECORD_RATE_10_GBS; + attr.state.rtr.opts = IB_MOD_QP_LOCAL_ACK_TIMEOUT | + IB_MOD_QP_RESP_RES | + IB_MOD_QP_PRIMARY_AV; + + + ib_status = ib_modify_qp(ctx->qp[0], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify UC QP to RTR\n"); + return 1; + } + + if (user_parm->connection_type == UD) { + ib_av_attr_t av_attr; + + av_attr.grh_valid = 0; + av_attr.dlid = dest->lid; + av_attr.sl = 0; + av_attr.path_bits = 0; + av_attr.port_num = (uint8_t)port; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + + ib_status = ib_create_av(ctx->pd,&av_attr, &ctx->av); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Failed to create AH for UD\n"); + return 1; + } + } + + + memset(&attr, 0, sizeof(ib_qp_mod_t)); + attr.req_state = IB_QPS_RTS; + attr.state.rts.sq_psn = my_psn; + + if (user_parm->connection_type == RC) { + attr.state.rts.resp_res = 1; + attr.state.rts.local_ack_timeout = 14; + attr.state.rts.retry_cnt = 7; + attr.state.rts.rnr_retry_cnt = 7; + attr.state.rts.opts = IB_MOD_QP_RNR_RETRY_CNT | + IB_MOD_QP_RETRY_CNT | + IB_MOD_QP_LOCAL_ACK_TIMEOUT; + + } + ib_status = ib_modify_qp(ctx->qp[index], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify UC QP to RTS\n"); + return 1; + } + + + + /* post recieve max msg size*/ + { + int i; + ib_recv_wr_t *bad_wr_recv; + //recieve + ctx->rwr.wr_id = PINGPONG_RECV_WRID; + ctx->rwr.ds_array = &ctx->recv_list; + ctx->rwr.num_ds = 1; + ctx->rwr.p_next = NULL; + ctx->recv_list.vaddr = (uintptr_t) ctx->buf; + if (user_parm->connection_type==UD) { + ctx->recv_list.length = ctx->size + 40; + } else { + ctx->recv_list.length = ctx->size; + } + ctx->recv_list.lkey = ctx->lkey; + for (i = 0; i < user_parm->tx_depth; ++i) { + ib_status = ib_post_recv(ctx->qp[index], &ctx->rwr, &bad_wr_recv); + if (ib_status != IB_SUCCESS) + { + fprintf(stderr, "Couldn't post recv: counter=%d\n", i); + return 14; + } + PERF_DEBUG("rcnt = %d \n",i); + } + } + post_recv = user_parm->tx_depth; + + return 0; +} + +static SOCKET pp_open_port(struct pingpong_context *ctx, const char * servername, + int ib_port, int port, struct pingpong_dest **p_my_dest, + struct pingpong_dest **p_rem_dest,struct user_parameters *user_parm) +{ + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + SOCKET sockfd; + int rc; + int i; + int numofqps = 1; + + /* Create connection between client and server. + * We do it by exchanging data over a TCP socket connection. */ + + + my_dest = malloc( sizeof (struct pingpong_dest) * numofqps); + if (!my_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + rem_dest = malloc(sizeof (struct pingpong_dest) * numofqps ); + if (!rem_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + sockfd = servername ? pp_client_connect(servername, port) : + pp_server_connect(port); + + if (sockfd == INVALID_SOCKET) { + printf("pp_connect_sock(%s,%d) failed (%d)!\n", + servername, port, sockfd); + return INVALID_SOCKET; + } + + + for (i =0 ;ica_attr->p_port_attr[ib_port-1].lid; + my_dest[i].psn = rand() & 0xffffff; + if (!my_dest[i].lid) { + fprintf(stderr, "Local lid 0x0 detected. Is an SM running?\n"); + return 1; + } + my_dest[i].qpn = ctx->qp_attr[i].num; + /* TBD this should be changed inot VA and different key to each qp */ + my_dest[i].rkey = ctx->rkey; + my_dest[i].vaddr = (uintptr_t)ctx->buf + ctx->size; + + printf(" local address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + my_dest[i].lid, my_dest[i].qpn, my_dest[i].psn, + my_dest[i].rkey, my_dest[i].vaddr); + + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + printf(" remote address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + rem_dest[i].lid, rem_dest[i].qpn, rem_dest[i].psn, + rem_dest[i].rkey, rem_dest[i].vaddr); + + if (pp_connect_ctx(ctx, ib_port, my_dest[i].psn, &rem_dest[i], user_parm, i)) + return INVALID_SOCKET; + /* An additional handshake is required *after* moving qp to RTR. + Arbitrarily reuse exch_dest for this purpose. */ + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + } + *p_rem_dest = rem_dest; + *p_my_dest = my_dest; + return sockfd; +} + + +static void usage(const char *argv0) +{ + printf("Usage:\n"); + printf(" %s start a server and wait for connection\n", argv0); + printf(" %s connect to server at \n", argv0); + printf("\n"); + printf("Options:\n"); + printf(" -p, --port= listen on/connect to port (default 18515)\n"); + printf(" -i, --ib-port= use port of IB device (default 1)\n"); + printf(" -c, --connection= connection type RC/UC/UD (default RC)\n"); + printf(" -m, --mtu= mtu size (default 1024)\n"); + printf(" -s, --size= size of message to exchange (default 65536)\n"); + printf(" -a, --all Run sizes from 2 till 2^23\n"); + printf(" -t, --tx-depth= size of tx queue (default 300)\n"); + printf(" -n, --iters= number of exchanges (at least 2, default 1000)\n"); + printf(" -b, --bidirectional measure bidirectional bandwidth (default unidirectional)\n"); + printf(" -V, --version display version number\n"); +} + +static void print_report(unsigned int iters, unsigned size, int duplex, + cycles_t *tposted, cycles_t *tcompleted) +{ + double cycles_to_units; + uint64_t tsize; /* Transferred size, in megabytes */ + int i, j; + int opt_posted = 0, opt_completed = 0; + cycles_t opt_delta; + cycles_t t; + + + opt_delta = tcompleted[opt_posted] - tposted[opt_completed]; + + /* Find the peak bandwidth */ + for (i = 0; i < (int)iters; ++i) + for (j = i; j < (int)iters; ++j) { + t = (tcompleted[j] - tposted[i]) / (j - i + 1); + if (t < opt_delta) { + opt_delta = t; + opt_posted = i; + opt_completed = j; + } + } + + cycles_to_units = get_cpu_mhz(); + + tsize = duplex ? 2 : 1; + tsize = tsize * size; + printf("%7d %d %7.2f %7.2f \n", + size,iters,tsize * cycles_to_units / opt_delta / 0x100000, + (uint64_t)tsize * iters * cycles_to_units /(tcompleted[iters - 1] - tposted[0]) / 0x100000); +} + + +int run_iter_bi(struct pingpong_context *ctx, struct user_parameters *user_param, + struct pingpong_dest *rem_dest, int size) +{ + + ib_qp_handle_t qp; + int scnt, ccnt, rcnt; + ib_recv_wr_t *bad_wr_recv; + ib_api_status_t ib_status; + + /********************************************* + * Important note : + * In case of UD/UC this is NOT the way to measure + * BW sicen we are running with loop on the send side + * while we should run on the recieve side or enable retry in SW + * Since the sender may be faster than the reciver than although + * we had posted recieve it is not enough and might end this will + * result in deadlock of test since both sides are stuck on poll cq + * In this test i do not solve this for the general test ,need to write + * seperate test for UC/UD but in case the tx_depth is ~1/3 from the + * number of iterations this should be ok . + * Also note that the sender is limited in the number of send, ans + * i try to make the reciver full + *********************************************/ + /* send */ + if (user_param->connection_type==UD) { + ctx->list.vaddr = (uintptr_t) ctx->buf + 40; + ctx->wr.dgrm.ud.h_av = ctx->av; + ctx->wr.dgrm.ud.remote_qp = rem_dest->qpn; + ctx->wr.dgrm.ud.remote_qkey = 0x11111111; + } else { + ctx->list.vaddr = (uintptr_t) ctx->buf; + } + ctx->list.lkey = ctx->lkey; + ctx->wr.wr_id = PINGPONG_SEND_WRID; + ctx->wr.ds_array = &ctx->list; + ctx->wr.num_ds = 1; + ctx->wr.wr_type = WR_SEND; + ctx->wr.p_next = NULL; + + if ((uint32_t)size > ctx->qp_attr[0].sq_max_inline) { /*complaince to perf_main */ + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED; + } else { + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED | IB_SEND_OPT_INLINE; + } + + ctx->list.length = size; + scnt = 0; + ccnt = 0; + rcnt = 0; + qp = ctx->qp[0]; + + while (ccnt < user_param->iters || rcnt < user_param->iters ) { + while (scnt < user_param->iters && (scnt - ccnt) < user_param->tx_depth / 2) { + ib_send_wr_t *bad_wr; + tposted[scnt] = get_cycles(); + ib_status = ib_post_send(qp, &ctx->wr, &bad_wr); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't post send: scnt=%d ib_status %d\n", + scnt,ib_status); + return 1; + } + ++scnt; + PERF_DEBUG("scnt = %d \n",scnt); + } + + { + ib_wc_t wc; + ib_wc_t *p_wc_done,*p_wc_free; + + + p_wc_free = &wc; + p_wc_free->p_next = NULL; + p_wc_done = NULL; +#if PORTED + if (user_param->use_event) { + struct ibv_cq *ev_cq; + void *ev_ctx; + if (ibv_get_cq_event(ctx->channel, &ev_cq, &ev_ctx)) { + fprintf(stderr, "Failed to get cq_event\n"); + return 1; + } + if (ev_cq != ctx->cq) { + fprintf(stderr, "CQ event for unknown CQ %p\n", ev_cq); + return 1; + } + if (ibv_req_notify_cq(ctx->cq, 0)) { + fprintf(stderr, "Couldn't request CQ notification\n"); + return 1; + } + } +#endif + do { + ib_status = ib_poll_cq(ctx->scq, &p_wc_free, &p_wc_done); + if (ib_status == IB_SUCCESS ) { + if (p_wc_done->status != IB_WCS_SUCCESS) { + fprintf(stderr, "Completion wth error at %s:\n", + user_param->servername ? "client" : "server"); + fprintf(stderr, "Failed status %d: wr_id %d syndrom 0x%x\n", + p_wc_done->status, (int) p_wc_done->wr_id, p_wc_done->vendor_specific); + fprintf(stderr, "scnt=%d, ccnt=%d\n", + scnt, ccnt); + return 1; + } + switch ((int) p_wc_done->wr_id) { + case PINGPONG_SEND_WRID: + tcompleted[ccnt] = get_cycles(); + ++ccnt; + break; + case PINGPONG_RECV_WRID: + if (--post_recv <= user_param->tx_depth - 2) { + while (rcnt < user_param->iters && (user_param->tx_depth - post_recv) > 0 ) { + post_recv++; + ib_status = ib_post_recv(ctx->qp[0], &ctx->rwr, &bad_wr_recv); + if (ib_status != IB_SUCCESS) + { + fprintf(stderr, "Couldn't post recv: rcnt=%d\n", + rcnt); + return 15; + } + } + } + ++rcnt; + break; + default: + fprintf(stderr, "Completion for unknown wr_id %d\n", + (int) wc.wr_id); + break; + } + p_wc_free = p_wc_done; + p_wc_free->p_next = NULL; + p_wc_done = NULL; + PERF_DEBUG("ccnt = %d \n",ccnt); + PERF_DEBUG("rcnt = %d \n",rcnt); + } + } while (ib_status == IB_SUCCESS ); + + if (ib_status != IB_NOT_FOUND) { + fprintf(stderr, "poll CQ failed %d\n", ib_status); + return 1; + } + + } + } + return(0); +} + + +int run_iter_uni(struct pingpong_context *ctx, struct user_parameters *user_param, + struct pingpong_dest *rem_dest, int size) +{ + + ib_qp_handle_t qp; + int scnt, ccnt, rcnt; + ib_recv_wr_t *bad_wr_recv; + ib_api_status_t ib_status; + + + /* send */ + if (user_param->connection_type==UD) { + ctx->list.vaddr = (uintptr_t) ctx->buf + 40; + ctx->wr.dgrm.ud.h_av = ctx->av; + ctx->wr.dgrm.ud.remote_qp = rem_dest->qpn; + ctx->wr.dgrm.ud.remote_qkey = 0x11111111; + } else { + ctx->list.vaddr = (uintptr_t) ctx->buf; + } + ctx->list.lkey = ctx->lkey; + ctx->wr.wr_id = PINGPONG_SEND_WRID; + ctx->wr.ds_array = &ctx->list; + ctx->wr.num_ds = 1; + ctx->wr.wr_type = WR_SEND; + ctx->wr.p_next = NULL; + + + if ((uint32_t)size > ctx->qp_attr[0].sq_max_inline) { /*complaince to perf_main */ + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED; + } else { + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED | IB_SEND_OPT_INLINE; + } + ctx->list.length = size; + + scnt = 0; + ccnt = 0; + rcnt = 0; + qp = ctx->qp[0]; + if (!user_param->servername) { + while (rcnt < user_param->iters) { + ib_wc_t wc; + ib_wc_t *p_wc_done,*p_wc_free; + + p_wc_free = &wc; + p_wc_done = NULL; + p_wc_free->p_next = NULL; + + /*Server is polling on recieve first */ +#if PORTED + if (user_param->use_event) { + struct ibv_cq *ev_cq; + void *ev_ctx; + if (ibv_get_cq_event(ctx->channel, &ev_cq, &ev_ctx)) { + fprintf(stderr, "Failed to get cq_event\n"); + return 1; + } + if (ev_cq != ctx->cq) { + fprintf(stderr, "CQ event for unknown CQ %p\n", ev_cq); + return 1; + } + if (ibv_req_notify_cq(ctx->cq, 0)) { + fprintf(stderr, "Couldn't request CQ notification\n"); + return 1; + } + } +#endif + do { + ib_status = ib_poll_cq(ctx->scq, &p_wc_free, &p_wc_done); + if (ib_status == IB_SUCCESS) { + tcompleted[ccnt] = get_cycles(); + if (p_wc_done->status != IB_WCS_SUCCESS) { + fprintf(stderr, "Completion wth error at %s:\n", + user_param->servername ? "client" : "server"); + fprintf(stderr, "Failed status %d: wr_id %d syndrom 0x%x\n", + p_wc_done->status, (int) p_wc_done->wr_id, p_wc_done->vendor_specific); + fprintf(stderr, "scnt=%d, ccnt=%d\n", + scnt, ccnt); + return 1; + } + + ib_status = ib_post_recv(ctx->qp[0], &ctx->rwr, &bad_wr_recv); + if (ib_status != IB_SUCCESS) + { + fprintf(stderr, "Couldn't post recv: rcnt=%d\n", + rcnt); + return 15; + } + ++rcnt; + ++ccnt; + PERF_DEBUG("ccnt = %d \n",ccnt); + PERF_DEBUG("rcnt = %d \n",rcnt); + + p_wc_free = p_wc_done; + p_wc_free->p_next = NULL; + p_wc_done = NULL; + } + + + } while (ib_status == IB_SUCCESS); + if (ib_status != IB_NOT_FOUND) { + fprintf(stderr, "Poll Recieve CQ failed %d\n", ib_status); + return 12; + } + + } + } else { + /* client is posting and not receiving. */ + while (scnt < user_param->iters || ccnt < user_param->iters) { + while (scnt < user_param->iters && (scnt - ccnt) < user_param->tx_depth ) { + ib_send_wr_t *bad_wr; + tposted[scnt] = get_cycles(); + ib_status = ib_post_send(qp, &ctx->wr, &bad_wr); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't post send: scnt=%d ib_status %d\n", + scnt,ib_status); + return 1; + } + ++scnt; + PERF_DEBUG("scnt = %d \n",scnt); + } + if (ccnt < user_param->iters) { + ib_wc_t wc; + ib_wc_t *p_wc_done,*p_wc_free; + + + p_wc_free = &wc; + p_wc_free->p_next = NULL; + p_wc_done = NULL; + +#if PORTED + if (user_param->use_event) { + struct ibv_cq *ev_cq; + void *ev_ctx; + if (ibv_get_cq_event(ctx->channel, &ev_cq, &ev_ctx)) { + fprintf(stderr, "Failed to get cq_event\n"); + return 1; + } + if (ev_cq != ctx->cq) { + fprintf(stderr, "CQ event for unknown CQ %p\n", ev_cq); + return 1; + } + if (ibv_req_notify_cq(ctx->cq, 0)) { + fprintf(stderr, "Couldn't request CQ notification\n"); + return 1; + } + } +#endif + do { + ib_status = ib_poll_cq(ctx->scq, &p_wc_free, &p_wc_done); + if (ib_status == IB_SUCCESS ) { + tcompleted[ccnt] = get_cycles(); + if (p_wc_done->status != IB_WCS_SUCCESS) { + fprintf(stderr, "Completion wth error at %s:\n", + user_param->servername ? "client" : "server"); + fprintf(stderr, "Failed status %d: wr_id %d syndrom 0x%x\n", + p_wc_done->status, (int) p_wc_done->wr_id, p_wc_done->vendor_specific); + fprintf(stderr, "scnt=%d, ccnt=%d\n", + scnt, ccnt); + return 1; + } + ccnt ++; + p_wc_free = p_wc_done; + p_wc_free->p_next = NULL; + p_wc_done = NULL; + } + + } while (ib_status == IB_SUCCESS ); + if (ib_status != IB_NOT_FOUND) { + fprintf(stderr, "poll CQ failed %d\n", ib_status); + return 1; + } + + PERF_DEBUG("ccnt = %d \n",ccnt); + } + } + } + return(0); +} + + +int __cdecl main(int argc, char *argv[]) +{ + struct pingpong_context *ctx; + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + struct user_parameters user_param; + char *ib_devname = NULL; + int port = 18515; + int ib_port = 1; + unsigned size = 65536; + SOCKET sockfd = INVALID_SOCKET; + int i = 0; + int size_max_pow = 24; + WSADATA wsaData; + int iResult; + + + + /* init default values to user's parameters */ + memset(&user_param, 0, sizeof(struct user_parameters)); + user_param.mtu = 0; + user_param.iters = 1000; + user_param.tx_depth = 1000; + user_param.servername = NULL; + user_param.use_event = 0; + user_param.duplex = 0; + /* Parameter parsing. */ + while (1) { + int c; + + static struct option long_options[] = { + { "port", 1, NULL, 'p' }, + { "ib-dev", 1, NULL, 'd' }, + { "ib-port", 1, NULL, 'i' }, + { "mtu", 1, NULL, 'm' }, + { "connection", 1, NULL, 'c' }, + { "size", 1, NULL, 's' }, + { "iters", 1, NULL, 'n' }, + { "tx-depth", 1, NULL, 't' }, + { "all", 0, NULL, 'a' }, + { "bidirectional", 0, NULL, 'b' }, + { "version", 0, NULL, 'V' }, + { "events", 0, NULL, 'e' }, + { 0 } + }; + + c = getopt_long(argc, argv, "p:d:i:m:c:s:n:t:ebaV", long_options, NULL); + if (c == -1) + break; + + switch (c) { + case 'p': + port = strtol(optarg, NULL, 0); + if (port < 0 || port > 65535) { + usage(argv[0]); + return 1; + } + break; + case 'e': + ++user_param.use_event; + break; + case 'd': + ib_devname = _strdup(optarg); + break; + case 'c': + if (strcmp("UC",optarg)==0) + user_param.connection_type=UC; + if (strcmp("UD",optarg)==0) + user_param.connection_type=UD; + break; + + case 'm': + user_param.mtu = strtol(optarg, NULL, 0); + break; + case 'a': + user_param.all = ALL; + break; + case 'V': + printf("send_bw version : %.2f\n",VERSION); + return 0; + break; + case 'i': + ib_port = strtol(optarg, NULL, 0); + if (ib_port < 0) { + usage(argv[0]); + return 1; + } + break; + + case 's': + size = (unsigned)_strtoui64(optarg, NULL, 0); + if (size < 1 || size > UINT_MAX / 2) { + usage(argv[0]); + return 1; + } + + break; + + case 't': + user_param.tx_depth = strtol(optarg, NULL, 0); + if (user_param.tx_depth < 1) { usage(argv[0]); return 1; } + break; + + case 'n': + user_param.iters = strtol(optarg, NULL, 0); + if (user_param.iters < 2) { + usage(argv[0]); + return 1; + } + + break; + + case 'b': + user_param.duplex = 1; + break; + case 'h': + default: + usage(argv[0]); + return 1; + } + } + + if (optind == argc - 1) + user_param.servername = _strdup(argv[optind]); + else if (optind < argc) { + usage(argv[0]); + return 1; + } + printf("------------------------------------------------------------------\n"); + if (user_param.duplex == 1) + printf(" Send Bidirectional BW Test\n"); + else + printf(" Send BW Test\n"); + + printf("Inline data is used up to 400 bytes message\n"); + if (user_param.connection_type==RC) { + printf("Connection type : RC\n"); + } else if (user_param.connection_type==UC) { + printf("Connection type : UC\n"); + } else { + printf("Connection type : UD\n"); + } + + /* Done with parameter parsing. Perform setup. */ + + // Initialize Winsock + iResult = WSAStartup(MAKEWORD(2,2), &wsaData); + if (iResult != NO_ERROR) { + printf("Error at WSAStartup()\n"); + return 1; + } + + if (user_param.all == ALL && user_param.connection_type!=UD) { + /*since we run all sizes */ + printf("test\n"); + size = 8388608; /*2^23 */ + } else if (user_param.connection_type==UD ) { + printf("Max msg size in UD is 2048 changing to 2048\n"); + size = 2048; + } + + + srand(GetCurrentProcessId() * GetTickCount()); + + //TODO: get pagesize from sysinfo + page_size = 4096; + + //TODO:get the device names + + + // init the context + ctx = pp_init_ctx(size, ib_port, &user_param); + if (!ctx) + return 1; + + sockfd = pp_open_port(ctx, user_param.servername, ib_port, port,&my_dest,&rem_dest,&user_param); + if (sockfd == INVALID_SOCKET) + return 9; + +#if PORTED + if (user_param.use_event) { + printf("Test with events.\n"); + if (ibv_req_notify_cq(ctx->cq, 0)) { + fprintf(stderr, "Couldn't request CQ notification\n"); + return 1; + } + } +#endif + printf("------------------------------------------------------------------\n"); + printf(" #bytes #iterations BW peak[MB/sec] BW average[MB/sec] \n"); + + tposted = malloc(user_param.iters * sizeof *tposted); + + if (!tposted) { + perror("malloc"); + return 1; + } + + tcompleted = malloc(user_param.iters * sizeof *tcompleted); + + if (!tcompleted) { + perror("malloc"); + return 1; + } + + + if (user_param.all == ALL) { + if (user_param.connection_type==UD) { + size_max_pow = 12; + } + for (i = 1; i < size_max_pow ; ++i) { + size = 1 << i; + if (user_param.duplex) { + if(run_iter_bi(ctx, &user_param, rem_dest, size)) + return 17; + } else { + if(run_iter_uni(ctx, &user_param, rem_dest, size)) + return 17; + } + if (user_param.servername) { + print_report(user_param.iters, size, user_param.duplex, tposted, tcompleted); + /* sync again for the sake of UC/UC */ + if(pp_client_exch_dest(sockfd, my_dest,rem_dest)) + return 19; + + } else { + if(pp_server_exch_dest(sockfd,my_dest,rem_dest)) + return 19; + + } + + } + } else { + if (user_param.duplex) { + if(run_iter_bi(ctx, &user_param,rem_dest, size)) + return 18; + } else { + if(run_iter_uni(ctx, &user_param,rem_dest, size)) + return 18; + } + if (user_param.servername) { + print_report(user_param.iters, size, user_param.duplex, tposted, tcompleted); + } + } + + /* close sockets */ + if (user_param.servername) { + pp_client_exch_dest(sockfd, my_dest,rem_dest); + } else { + pp_server_exch_dest(sockfd, my_dest,rem_dest); + } + + send(sockfd, "done", sizeof "done",0); + closesocket(sockfd); + + free(tposted); + free(tcompleted); + + printf("------------------------------------------------------------------\n"); + goto end; + + +end: + WSACleanup(); + return 0; +} diff --git a/branches/Ndi/tools/perftests/user/send_bw/send_bw.rc b/branches/Ndi/tools/perftests/user/send_bw/send_bw.rc new file mode 100644 index 00000000..b66c9e13 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/send_bw/send_bw.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Send/Recv Bandwidth Test (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Send/Recv Bandwidth Test " +#endif + +#define VER_INTERNALNAME_STR "ib_send_bw.exe" +#define VER_ORIGINALFILENAME_STR "ib_send_bw.exe" + +#include diff --git a/branches/Ndi/tools/perftests/user/send_lat/SOURCES b/branches/Ndi/tools/perftests/user/send_lat/SOURCES new file mode 100644 index 00000000..71c14699 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/send_lat/SOURCES @@ -0,0 +1,28 @@ +TARGETNAME=ib_send_lat +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +C_DEFINES=$(C_DEFINES) /D__WIN__ + +SOURCES=send_lat.rc \ + ..\getopt.c \ + ..\perf_utils.c \ + send_lat.c + +INCLUDES=..;..\..\..\..\inc;..\..\..\..\inc\user + +RCOPTIONS=/I..\..\win\include + +TARGETLIBS= \ + $(DDK_LIB_PATH)\Ws2_32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tools/perftests/user/send_lat/makefile b/branches/Ndi/tools/perftests/user/send_lat/makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/send_lat/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/perftests/user/send_lat/send_lat.c b/branches/Ndi/tools/perftests/user/send_lat/send_lat.c new file mode 100644 index 00000000..0368dd1f --- /dev/null +++ b/branches/Ndi/tools/perftests/user/send_lat/send_lat.c @@ -0,0 +1,1022 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2005 Hewlett Packard, Inc (Grant Grundler) + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "getopt.h" +#include "get_clock.h" +#include "perf_defs.h" + + + +static int page_size; +cycles_t *tstamp; + +struct user_parameters { + const char *servername; + int connection_type; + int mtu; + int signal_comp; + int all; /* run all msg size */ + int iters; + int tx_depth; +}; + + + +void +pp_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + UNUSED_PARAM( cq_context); + return ; +} + + +static struct pingpong_context *pp_init_ctx(unsigned int size,int port,struct user_parameters *user_parm) { + + struct pingpong_context *ctx; + ib_api_status_t ib_status = IB_SUCCESS; + size_t guid_count; + ib_net64_t *ca_guid_array; + + + + ctx = malloc(sizeof *ctx); + if (!ctx) + return NULL; + + ctx->qp = malloc(sizeof (ib_qp_handle_t)); + if (!ctx->qp) { + perror("malloc"); + return NULL; + } + + ctx->qp_attr = malloc(sizeof (ib_qp_attr_t)); + if (!ctx->qp_attr) { + perror("malloc"); + return NULL; + } + + ctx->size = size; + ctx->tx_depth = user_parm->tx_depth; + /* in case of UD need space for the GRH */ + if (user_parm->connection_type==UD) { + ctx->buf = malloc(( size + 40 ) * 2); //PORTED ALINGED + if (!ctx->buf) { + fprintf(stderr, "Couldn't allocate work buf.\n"); + return NULL; + } + memset(ctx->buf, 0, ( size + 40 ) * 2); + } else { + ctx->buf = malloc( size * 2); //PORTED ALINGED + if (!ctx->buf) { + fprintf(stderr, "Couldn't allocate work buf.\n"); + return NULL; + } + memset(ctx->buf, 0, size * 2); + } + + + ctx->post_buf = (char*)ctx->buf + (size - 1); + ctx->poll_buf = (char*)ctx->buf + (2 * size - 1); + + /* + * Open the AL instance + */ + ib_status = ib_open_al(&ctx->al); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_al failed status = %d\n", ib_status); + return NULL; + } + + /* + * Get the Local CA Guids + */ + ib_status = ib_get_ca_guids(ctx->al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr,"ib_get_ca_guids1 failed status = %d\n", (uint32_t)ib_status); + return NULL; + } + + /* + * If no CA's Present then return + */ + + if(guid_count == 0) + return NULL; + + + ca_guid_array = (ib_net64_t*)malloc(sizeof(ib_net64_t) * guid_count); + + ib_status = ib_get_ca_guids(ctx->al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_get_ca_guids2 failed with status = %d\n", ib_status); + return NULL; + } + + /* + * Open only the first HCA + */ + /* Open the CA */ + ib_status = ib_open_ca(ctx->al ,ca_guid_array[0] ,NULL, + NULL, //ca_context + &ctx->ca); + + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_ca failed with status = %d\n", ib_status); + return NULL; + } + + //xxx + //printf("ib_open_ca passed i=%d\n",i); + //xxx + + + { + + + /* Query the CA */ + uint32_t bsize = 0; + ib_status = ib_query_ca(ctx->ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr, "Failed to query device props"); + return NULL; + } + + ctx->ca_attr = (ib_ca_attr_t *)malloc(bsize); + + ib_status = ib_query_ca(ctx->ca, ctx->ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + printf("ib_query_ca failed with status = %d\n", ib_status); + return NULL; + } + if (user_parm->mtu == 0) {/*user did not ask for specific mtu */ + if (ctx->ca_attr->dev_id == 23108) { + user_parm->mtu = 1024; + } else { + user_parm->mtu = 2048; + } + } + } + + ib_status = ib_alloc_pd(ctx->ca , + IB_PDT_NORMAL, + ctx, //pd_context + &ctx->pd); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate PD\n"); + return NULL; + } + + + { + ib_mr_create_t mr_create; + ib_cq_create_t cq_create; + /* We dont really want IBV_ACCESS_LOCAL_WRITE, but IB spec says: + * The Consumer is not allowed to assign Remote Write or Remote Atomic to + * a Memory Region that has not been assigned Local Write. */ + if (user_parm->connection_type==UD) { + mr_create.length = (size + 40 ) * 2; + } else { + mr_create.length = size * 2; + } + + mr_create.vaddr = ctx->buf; + mr_create.access_ctrl = IB_AC_RDMA_WRITE| IB_AC_LOCAL_WRITE; + + ib_status = ib_reg_mem(ctx->pd ,&mr_create ,&ctx->lkey ,&ctx->rkey ,&ctx->mr); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate MR\n"); + return NULL; + } + + + cq_create.size = user_parm->tx_depth*2; + cq_create.h_wait_obj = NULL; + cq_create.pfn_comp_cb = pp_cq_comp_cb; + ib_status = ib_create_cq(ctx->ca,&cq_create ,ctx, NULL, &ctx->rcq); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't create CQ\n"); + return NULL; + } + + + cq_create.size = user_parm->tx_depth*2; + cq_create.h_wait_obj = NULL; + cq_create.pfn_comp_cb = pp_cq_comp_cb; + ib_status = ib_create_cq(ctx->ca,&cq_create ,ctx, NULL, &ctx->scq); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't create CQ\n"); + return NULL; + } + } + + { + ib_qp_create_t qp_create; + memset(&qp_create, 0, sizeof(ib_qp_create_t)); + qp_create.h_sq_cq = ctx->scq; + qp_create.h_rq_cq = ctx->rcq; + qp_create.sq_depth = user_parm->tx_depth; + qp_create.rq_depth = user_parm->tx_depth; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + //TODO MAX_INLINE + + switch (user_parm->connection_type) { + case RC : + qp_create.qp_type= IB_QPT_RELIABLE_CONN; + break; + case UC : + qp_create.qp_type = IB_QPT_UNRELIABLE_CONN; + break; + case UD : + qp_create.qp_type = IB_QPT_UNRELIABLE_DGRM; + break; + default: + fprintf(stderr, "Unknown connection type %d \n",user_parm->connection_type); + return NULL; + } + + qp_create.sq_signaled = FALSE; + /*attr.sq_sig_all = 0;*/ + + ib_status = ib_create_qp(ctx->pd, &qp_create,NULL,NULL,&ctx->qp[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Couldn't create QP\n"); + return NULL; + } + } + + { + ib_qp_mod_t qp_modify; + ib_qp_attr_t qp_attr; + memset(&qp_modify, 0, sizeof(ib_qp_mod_t)); + qp_modify.req_state = IB_QPS_INIT; + qp_modify.state.init.pkey_index = 0 ; + qp_modify.state.init.primary_port = (uint8_t)port; + if (user_parm->connection_type==UD) { + qp_modify.state.init.qkey = 0x11111111; + } else { + qp_modify.state.init.access_ctrl = IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE; + } + + ib_status = ib_modify_qp(ctx->qp[0], &qp_modify); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + + + memset(&qp_attr, 0, sizeof(ib_qp_attr_t)); + ib_status = ib_query_qp(ctx->qp[0], &ctx->qp_attr[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + } + + + //send + ctx->wr.wr_id = PINGPONG_SEND_WRID; + ctx->wr.ds_array = &ctx->list; + ctx->wr.num_ds = 1; + ctx->wr.wr_type = WR_SEND; + ctx->wr.p_next = NULL; + + //recieve + ctx->rwr.wr_id = PINGPONG_RECV_WRID; + ctx->rwr.ds_array = &ctx->recv_list; + ctx->rwr.num_ds = 1; + ctx->rwr.p_next = NULL; + return ctx; +} + +static int pp_connect_ctx(struct pingpong_context *ctx, int port, int my_psn, + struct pingpong_dest *dest,struct user_parameters *user_parm,int index) +{ + ib_api_status_t ib_status; + ib_qp_mod_t attr; + memset(&attr, 0, sizeof(ib_qp_mod_t)); + + attr.req_state = IB_QPS_RTR; + switch (user_parm->mtu) { + case 256 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_256; + break; + case 512 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_512; + break; + case 1024 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_1024; + break; + case 2048 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_2048; + break; + } + printf("Mtu : %d\n", user_parm->mtu); + attr.state.rtr.dest_qp = (dest->qpn); + attr.state.rtr.rq_psn = (dest->psn); + if (user_parm->connection_type==RC) { + attr.state.rtr.resp_res = 1; + attr.state.rtr.rnr_nak_timeout = 12; + } + attr.state.rtr.primary_av.grh_valid = 0; + attr.state.rtr.primary_av.dlid = dest->lid; + attr.state.rtr.primary_av.sl = 0; + attr.state.rtr.primary_av.path_bits = 0; + attr.state.rtr.primary_av.port_num = (uint8_t)port; + attr.state.rtr.primary_av.static_rate = IB_PATH_RECORD_RATE_10_GBS; + attr.state.rtr.opts = IB_MOD_QP_LOCAL_ACK_TIMEOUT | + IB_MOD_QP_RESP_RES | + IB_MOD_QP_PRIMARY_AV; + + + ib_status = ib_modify_qp(ctx->qp[0], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify UC QP to RTR\n"); + return 1; + } + + + if (user_parm->connection_type == UD) { + ib_av_attr_t av_attr; + + av_attr.grh_valid = 0; + av_attr.dlid = dest->lid; + av_attr.dlid = dest->lid; + av_attr.sl = 0; + av_attr.path_bits = 0; + av_attr.port_num = (uint8_t)port; + av_attr.static_rate = IB_PATH_RECORD_RATE_10_GBS; + ib_status = ib_create_av(ctx->pd,&av_attr, &ctx->av); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Failed to create AH for UD\n"); + return 1; + } + } + memset(&attr, 0, sizeof(ib_qp_mod_t)); + attr.req_state = IB_QPS_RTS; + attr.state.rts.sq_psn = my_psn; + + if (user_parm->connection_type == RC) { + attr.state.rts.resp_res = 1; + attr.state.rts.local_ack_timeout = 14; + attr.state.rts.retry_cnt = 7; + attr.state.rts.rnr_retry_cnt = 7; + attr.state.rts.opts = IB_MOD_QP_RNR_RETRY_CNT | + IB_MOD_QP_RETRY_CNT | + IB_MOD_QP_LOCAL_ACK_TIMEOUT; + + } + ib_status = ib_modify_qp(ctx->qp[index], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify UC QP to RTS\n"); + return 1; + } + + + + /* post recieve max msg size*/ + { + int i; + ib_recv_wr_t *bad_wr_recv; + //recieve + ctx->rwr.wr_id = PINGPONG_RECV_WRID; + ctx->rwr.ds_array = &ctx->recv_list; + ctx->rwr.num_ds = 1; + ctx->rwr.p_next = NULL; + ctx->recv_list.vaddr = (uintptr_t) ctx->buf; + if (user_parm->connection_type==UD) { + ctx->recv_list.length = ctx->size + 40; + } else { + ctx->recv_list.length = ctx->size; + } + ctx->recv_list.lkey = ctx->lkey; + for (i = 0; i < user_parm->tx_depth / 2; ++i) { + if (ib_post_recv(ctx->qp[index], &ctx->rwr, &bad_wr_recv)) { + fprintf(stderr, "Couldn't post recv: counter=%d\n", i); + return 14; + } + } + } + return 0; +} + +static SOCKET pp_open_port(struct pingpong_context *ctx, const char * servername, + int ib_port, int port, struct pingpong_dest **p_my_dest, + struct pingpong_dest **p_rem_dest,struct user_parameters *user_parm) +{ + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + SOCKET sockfd; + int rc; + int i; + int numofqps = 1; + + /* Create connection between client and server. + * We do it by exchanging data over a TCP socket connection. */ + + + my_dest = malloc( sizeof (struct pingpong_dest) * numofqps); + if (!my_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + rem_dest = malloc(sizeof (struct pingpong_dest) * numofqps ); + if (!rem_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + sockfd = servername ? pp_client_connect(servername, port) : + pp_server_connect(port); + + if (sockfd == INVALID_SOCKET) { + printf("pp_connect_sock(%s,%d) failed (%d)!\n", + servername, port, sockfd); + return INVALID_SOCKET; + } + + + for (i =0 ;ica_attr->p_port_attr[ib_port-1].lid; + my_dest[i].psn = rand() & 0xffffff; + if (!my_dest[i].lid) { + fprintf(stderr, "Local lid 0x0 detected. Is an SM running?\n"); + return 1; + } + my_dest[i].qpn = ctx->qp_attr[i].num; + /* TBD this should be changed inot VA and different key to each qp */ + my_dest[i].rkey = ctx->rkey; + my_dest[i].vaddr = (uintptr_t)ctx->buf + ctx->size; + + printf(" local address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + my_dest[i].lid, my_dest[i].qpn, my_dest[i].psn, + my_dest[i].rkey, my_dest[i].vaddr); + + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + printf(" remote address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + rem_dest[i].lid, rem_dest[i].qpn, rem_dest[i].psn, + rem_dest[i].rkey, rem_dest[i].vaddr); + + if (pp_connect_ctx(ctx, ib_port, my_dest[i].psn, &rem_dest[i], user_parm, i)) + return INVALID_SOCKET; + /* An additional handshake is required *after* moving qp to RTR. + Arbitrarily reuse exch_dest for this purpose. */ + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + } + *p_rem_dest = rem_dest; + *p_my_dest = my_dest; + return sockfd; +} + + + +static void usage(const char *argv0) +{ + printf("Usage:\n"); + printf(" %s start a server and wait for connection\n", argv0); + printf(" %s connect to server at \n", argv0); + printf("\n"); + printf("Options:\n"); + printf(" -p, --port= listen on/connect to port (default 18515)\n"); + printf(" -c, --connection= connection type RC/UC (default RC)\n"); + printf(" -m, --mtu= mtu size (default 2048)\n"); + printf(" -i, --ib-port= use port of IB device (default 1)\n"); + printf(" -s, --size= size of message to exchange (default 1)\n"); + printf(" -t, --tx-depth= size of tx queue (default 50)\n"); + printf(" -l, --signal signal completion on each msg\n"); + printf(" -a, --all Run sizes from 2 till 2^23\n"); + printf(" -n, --iters= number of exchanges (at least 2, default 1000)\n"); + printf(" -C, --report-cycles report times in cpu cycle units (default microseconds)\n"); + printf(" -H, --report-histogram print out all results (default print summary only)\n"); + printf(" -U, --report-unsorted (implies -H) print out unsorted results (default sorted)\n"); + printf(" -V, --version display version number\n"); +} + + + +static void print_report(struct report_options * options, + unsigned int iters, cycles_t *tstamp,int size) +{ + double cycles_to_units; + cycles_t median; + unsigned int i; + const char* units; + cycles_t *delta = malloc(iters * sizeof *delta); + + if (!delta) { + perror("malloc"); + return; + } + + for (i = 0; i < iters - 1; ++i) + delta[i] = tstamp[i + 1] - tstamp[i]; + + + if (options->cycles) { + cycles_to_units = 1; + units = "cycles"; + } else { + cycles_to_units = get_cpu_mhz()/1000000; + units = "usec"; + } + + if (options->unsorted) { + printf("#, %s\n", units); + for (i = 0; i < iters - 1; ++i) + printf("%d, %g\n", i + 1, delta[i] / cycles_to_units / 2); + } + + qsort(delta, iters - 1, sizeof *delta, cycles_compare); + + if (options->histogram) { + printf("#, %s\n", units); + for (i = 0; i < iters - 1; ++i) + printf("%d, %g\n", i + 1, delta[i] / cycles_to_units / 2); + } + + median = get_median(iters - 1, delta); + printf("%7d %d %7.2f %7.2f %7.2f\n", + size,iters,delta[0] / cycles_to_units / 2, + delta[iters - 2] / cycles_to_units / 2,median / cycles_to_units / 2); + free(delta); +} + +int run_iter(struct pingpong_context *ctx, struct user_parameters *user_param, + struct pingpong_dest *rem_dest, int size) +{ + ib_api_status_t ib_status; + ib_qp_handle_t qp; + ib_recv_wr_t rwr; + ib_recv_wr_t *bad_wr_recv; + volatile char *poll_buf; + volatile char *post_buf; + + int scnt, rcnt, ccnt, poll; + int iters; + int tx_depth; + iters = user_param->iters; + tx_depth = user_param->tx_depth; + + ///send // + if (user_param->connection_type==UD) { + ctx->list.vaddr = (uintptr_t) ctx->buf + 40; + } else { + ctx->list.vaddr = (uintptr_t) ctx->buf; + } + ctx->list.length = size; + ctx->list.lkey = ctx->lkey; + if (user_param->connection_type==UD) { + ctx->wr.dgrm.ud.h_av = ctx->av; + ctx->wr.dgrm.ud.remote_qp = rem_dest->qpn; + ctx->wr.dgrm.ud.remote_qkey = 0x11111111; + } + + /// receive // + rwr = ctx->rwr; + ctx->recv_list.vaddr = (uintptr_t) ctx->buf; + if (user_param->connection_type==UD) { + ctx->recv_list.length = ctx->size + 40; + } else { + ctx->recv_list.length = ctx->size; + } + + ctx->recv_list.lkey = ctx->lkey; + + scnt = 0; + rcnt = 0; + ccnt = 0; + poll = 0; + poll_buf = ctx->poll_buf; + post_buf = ctx->post_buf; + qp = ctx->qp[0]; + if ((uint32_t)size > ctx->qp_attr[0].sq_max_inline || size == 0) {/* complaince to perf_main don't signal*/ + ctx->wr.send_opt = 0; + } else { + ctx->wr.send_opt = IB_SEND_OPT_INLINE; + } + + while (scnt < iters || rcnt < iters) { + if (rcnt < iters && !(scnt < 1 && user_param->servername)) { + ib_wc_t wc; + ib_wc_t *p_wc_done,*p_wc_free; + + p_wc_free = &wc; + p_wc_done = NULL; + p_wc_free->p_next = NULL; + PERF_DEBUG("rcnt %d\n",rcnt); + PERF_DEBUG("scnt %d\n",scnt); + /*Server is polling on recieve first */ + ++rcnt; + if (ib_post_recv(qp, &rwr, &bad_wr_recv)) { + fprintf(stderr, "Couldn't post recv: rcnt=%d\n", + rcnt); + return 15; + } + +#if PORTED + if (user_param->use_event) { + struct ibv_cq *ev_cq; + void *ev_ctx; + + if (ibv_get_cq_event(ctx->channel, &ev_cq, &ev_ctx)) { + fprintf(stderr, "Failed to get receive cq_event\n"); + return 1; + } + + if (ev_cq != ctx->rcq) { + fprintf(stderr, "CQ event for unknown RCQ %p\n", ev_cq); + return 1; + } + + if (ibv_req_notify_cq(ctx->rcq, 0)) { + fprintf(stderr, "Couldn't request RCQ notification\n"); + return 1; + } + } +#endif + + do { + ib_status = ib_poll_cq(ctx->rcq,&p_wc_free, &p_wc_done); + } while (ib_status == IB_NOT_FOUND); + + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Poll Recieve CQ failed %d\n", ib_status); + return 12; + } + + if (p_wc_done->status != IB_WCS_SUCCESS) { + fprintf(stderr, "Recieve Completion wth error at %s:\n", + user_param->servername ? "client" : "server"); + fprintf(stderr, "Failed status %d: wr_id %d\n", + wc.status, (int) wc.wr_id); + fprintf(stderr, "scnt=%d, rcnt=%d, ccnt=%d\n", + scnt, rcnt, ccnt); + return 13; + } + } + + if (scnt < iters ) { + ib_send_wr_t *bad_wr; + + PERF_DEBUG("rcnt1 %d\n",rcnt); + PERF_DEBUG("scnt1 %d\n",scnt); + if (ccnt == (tx_depth - 2) || (user_param->signal_comp == SIGNAL) + || (scnt == (iters - 1)) ) { + ccnt = 0; + poll=1; + if ((uint32_t)size > ctx->qp_attr[0].sq_max_inline || size == 0) {/* complaince to perf_main */ + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED; + } else { + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED | IB_SEND_OPT_INLINE; + } + + } + + /* client post first */ + tstamp[scnt] = get_cycles(); + *post_buf = (char)++scnt; + if (ib_post_send(qp,&ctx->wr, &bad_wr)) { + fprintf(stderr, "Couldn't post send: scnt=%d\n", + scnt); + return 11; + } + } + if (poll == 1) { + ib_wc_t wc; + ib_wc_t *p_wc_done,*p_wc_free; + + PERF_DEBUG("rcnt2 %d\n",rcnt); + PERF_DEBUG("scnt2 %d\n",scnt); + p_wc_free = &wc; + p_wc_done = NULL; + p_wc_free->p_next = NULL; + + + /* poll on scq */ + do { + ib_status = ib_poll_cq(ctx->scq, &p_wc_free, &p_wc_done); + } while (ib_status == IB_NOT_FOUND); + + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Poll Recieve CQ failed %d\n", ib_status); + return 12; + } + + if (wc.status != IB_WCS_SUCCESS) { + fprintf(stderr, "Recieve Completion wth error at %s:\n", + user_param->servername ? "client" : "server"); + fprintf(stderr, "Failed status %d: wr_id %d\n", + wc.status, (int) wc.wr_id); + fprintf(stderr, "scnt=%d, rcnt=%d, ccnt=%d\n", + scnt, rcnt, ccnt); + return 13; + } + + poll = 0; + if ((uint32_t)size > ctx->qp_attr[0].sq_max_inline || size == 0) {/* complaince to perf_main don't signal*/ + ctx->wr.send_opt = 0; + } else { + ctx->wr.send_opt = IB_SEND_OPT_INLINE; + } + + } + ++ccnt; + } + + return(0); +} + + + +int __cdecl main(int argc, char *argv[]) +{ + + struct pingpong_context *ctx; + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + struct user_parameters user_param; + char *ib_devname = NULL; + int port = 18515; + int ib_port = 1; + unsigned size = 2; + SOCKET sockfd = INVALID_SOCKET; + int i = 0; + int size_max_pow = 24; + WSADATA wsaData; + int iResult; + + + struct report_options report = {0}; + + /* init default values to user's parameters */ + memset(&user_param, 0, sizeof(struct user_parameters)); + user_param.mtu = 0; + user_param.iters = 1000; + user_param.tx_depth = 50; + user_param.servername = NULL; + /* Parameter parsing. */ + while (1) { + int c; + + static struct option long_options[] = { + { "port", 1,NULL, 'p' }, + { "connection", 1,NULL, 'c' }, + { "mtu", 1,NULL, 'm' }, + { "ib-dev", 1,NULL, 'd' }, + { "ib-port", 1,NULL, 'i' }, + { "size", 1,NULL, 's' }, + { "iters", 1,NULL, 'n' }, + { "tx-depth", 1,NULL, 't' }, + { "signal", 0,NULL, 'l' }, + { "all", 0,NULL, 'a' }, + { "report-cycles", 0,NULL, 'C' }, + { "report-histogram", 0,NULL, 'H' }, + { "report-unsorted", 0,NULL, 'U' }, + { "version", 0,NULL, 'V' }, + { 0 } + }; + + c = getopt_long(argc, argv, "p:c:m:d:i:s:n:t:laeCHUV", long_options, NULL); + if (c == -1) + break; + + switch (c) { + case 'p': + port = strtol(optarg, NULL, 0); + if (port < 0 || port > 65535) { + usage(argv[0]); + return 1; + } + break; + case 'c': + if (strcmp("UC",optarg)==0) + user_param.connection_type=UC; + if (strcmp("UD",optarg)==0) + user_param.connection_type=UD; + /* default is 0 for any other option RC*/ + break; + case 'm': + user_param.mtu = strtol(optarg, NULL, 0); + break; + case 'l': + user_param.signal_comp = SIGNAL; + break; + case 'a': + user_param.all = SIGNAL; + break; + case 'V': + printf("perftest version : %.2f\n",VERSION); + return 0; + break; + case 'd': + ib_devname = _strdup(optarg); + break; + + case 'i': + ib_port = strtol(optarg, NULL, 0); + if (ib_port < 0) { + usage(argv[0]); + return 2; + } + break; + + case 's': + size = strtol(optarg, NULL, 0); + if (size < 1) { + usage(argv[0]); return 3; + } + break; + + case 't': + user_param.tx_depth = strtol(optarg, NULL, 0); + if (user_param.tx_depth < 1) { + usage(argv[0]); return 4; + } + break; + + case 'n': + user_param.iters = strtol(optarg, NULL, 0); + if (user_param.iters < 2) { + usage(argv[0]); + return 5; + } + + break; + + case 'C': + report.cycles = 1; + break; + + case 'H': + report.histogram = 1; + break; + + case 'U': + report.unsorted = 1; + break; + + default: + usage(argv[0]); + return 5; + } + } + + if (optind == argc - 1) + user_param.servername = _strdup(argv[optind]); + else if (optind < argc) { + usage(argv[0]); + return 6; + } + + /* + * Done with parameter parsing. Perform setup. + */ + tstamp = malloc(user_param.iters * sizeof *tstamp); + if (!tstamp) { + perror("malloc"); + return 10; + } + /* Print header data */ + printf("------------------------------------------------------------------\n"); + printf(" Send Latency Test\n"); + printf("Inline data is used up to 400 bytes message\n"); + if (user_param.connection_type==RC) { + printf("Connection type : RC\n"); + } else if (user_param.connection_type==UC) { + printf("Connection type : UC\n"); + } else { + printf("Connection type : UD\n"); + } + + /* Done with parameter parsing. Perform setup. */ + + // Initialize Winsock + iResult = WSAStartup(MAKEWORD(2,2), &wsaData); + if (iResult != NO_ERROR) { + printf("Error at WSAStartup()\n"); + return 1; + } + + + if (user_param.all == ALL && user_param.connection_type!=UD) { + /*since we run all sizes */ + printf("test\n"); + size = 8388608; /*2^23 */ + } else if (user_param.connection_type==UD ) { + printf("Max msg size in UD is 2048 changing to 2048\n"); + size = 2048; + } + + srand(GetCurrentProcessId() * GetTickCount()); + + //TODO: get pagesize from sysinfo + page_size = 4096; + + //TODO get the device names + + ctx = pp_init_ctx( size, ib_port,&user_param); + if (!ctx) + return 8; + + sockfd = pp_open_port(ctx, user_param.servername, ib_port, port,&my_dest,&rem_dest,&user_param); + if (sockfd == INVALID_SOCKET) + return 9; + + +#if PORTED + if (user_param.use_event) { + printf("Test with events.\n"); + if (ibv_req_notify_cq(ctx->rcq, 0)) { + fprintf(stderr, "Couldn't request RCQ notification\n"); + return 1; + } + if (ibv_req_notify_cq(ctx->scq, 0)) { + fprintf(stderr, "Couldn't request SCQ notification\n"); + return 1; + } + } +#endif + + printf("------------------------------------------------------------------\n"); + printf(" #bytes #iterations t_min[usec] t_max[usec] t_typical[usec]\n"); + + if (user_param.all == 1) { + if (user_param.connection_type==UD) { + size_max_pow = 12; + } + for (i = 1; i < size_max_pow ; ++i) { + size = 1 << i; + if(run_iter(ctx, &user_param, rem_dest, size)) + return 17; + + print_report(&report, user_param.iters, tstamp, size); + } + } else { + if(run_iter(ctx, &user_param, rem_dest, size)) + return 18; + print_report(&report, user_param.iters, tstamp, size); + } + printf("------------------------------------------------------------------\n"); + + send(sockfd, "done", sizeof "done",0); + closesocket(sockfd); + + + free(tstamp); + return 0; +} diff --git a/branches/Ndi/tools/perftests/user/send_lat/send_lat.rc b/branches/Ndi/tools/perftests/user/send_lat/send_lat.rc new file mode 100644 index 00000000..f661becc --- /dev/null +++ b/branches/Ndi/tools/perftests/user/send_lat/send_lat.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Send/Recv Latency Test (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Send/Recv Latency Test " +#endif + +#define VER_INTERNALNAME_STR "ib_send_lat.exe" +#define VER_ORIGINALFILENAME_STR "ib_send_lat.exe" + +#include diff --git a/branches/Ndi/tools/perftests/user/write_bw/SOURCES b/branches/Ndi/tools/perftests/user/write_bw/SOURCES new file mode 100644 index 00000000..c369f95a --- /dev/null +++ b/branches/Ndi/tools/perftests/user/write_bw/SOURCES @@ -0,0 +1,28 @@ +TARGETNAME=ib_write_bw +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +C_DEFINES=$(C_DEFINES) /D__WIN__ + +SOURCES=write_bw.rc \ + ..\getopt.c \ + ..\perf_utils.c \ + write_bw.c + +INCLUDES=..;..\..\..\..\inc;..\..\..\..\inc\user + +RCOPTIONS=/I..\..\win\include + +TARGETLIBS= \ + $(DDK_LIB_PATH)\Ws2_32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tools/perftests/user/write_bw/makefile b/branches/Ndi/tools/perftests/user/write_bw/makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/write_bw/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/perftests/user/write_bw/write_bw.c b/branches/Ndi/tools/perftests/user/write_bw/write_bw.c new file mode 100644 index 00000000..dfcc1924 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/write_bw/write_bw.c @@ -0,0 +1,878 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "getopt.h" +#include "perf_defs.h" +#include "get_clock.h" + + + +struct user_parameters { + const char *servername; + int connection_type; + int mtu; + int all; /* run all msg size */ + int iters; + int tx_depth; + int numofqps; + int maxpostsofqpiniteration; +}; + +static int page_size; + +cycles_t *tposted; +cycles_t *tcompleted; + + +void +pp_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + UNUSED_PARAM( cq_context); + return ; +} +static struct pingpong_context *pp_init_ctx(unsigned size, int port, struct user_parameters *user_parm) +{ + + struct pingpong_context *ctx; + ib_api_status_t ib_status = IB_SUCCESS; + size_t guid_count; + ib_net64_t *ca_guid_array; + int counter; + + ctx = malloc(sizeof *ctx); + if (!ctx){ + perror("malloc"); + return NULL; + } + memset(ctx, 0, sizeof(struct pingpong_context)); + ctx->size = size; + ctx->tx_depth = user_parm->tx_depth; + + ctx->qp = malloc(sizeof (ib_qp_handle_t) * user_parm->numofqps ); + if (!ctx->qp) { + perror("malloc"); + return NULL; + } + ctx->qp_attr = malloc(sizeof (ib_qp_attr_t) * user_parm->numofqps ); + if (!ctx->qp_attr) { + perror("malloc"); + return NULL; + } + + ctx->scnt = malloc(user_parm->numofqps * sizeof (int)); + if (!ctx->scnt) { + perror("malloc"); + return NULL; + } + ctx->ccnt = malloc(user_parm->numofqps * sizeof (int)); + if (!ctx->ccnt) { + perror("malloc"); + return NULL; + } + memset(ctx->scnt, 0, user_parm->numofqps * sizeof (int)); + memset(ctx->ccnt, 0, user_parm->numofqps * sizeof (int)); + + ctx->buf = malloc( size * 2 * user_parm->numofqps ); + if (!ctx->buf) { + fprintf(stderr, "Couldn't allocate work buf.\n"); + return NULL; + } + + memset(ctx->buf, 0, size * 2 * user_parm->numofqps); + + + + /* + * Open the AL instance + */ + ib_status = ib_open_al(&ctx->al); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_al failed status = %d\n", ib_status); + return NULL; + } + + /* + * Get the Local CA Guids + */ + ib_status = ib_get_ca_guids(ctx->al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr,"ib_get_ca_guids1 failed status = %d\n", (uint32_t)ib_status); + return NULL; + } + + /* + * If no CA's Present then return + */ + + if(guid_count == 0) + return NULL; + + + ca_guid_array = (ib_net64_t*)malloc(sizeof(ib_net64_t) * guid_count); + + ib_status = ib_get_ca_guids(ctx->al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_get_ca_guids2 failed with status = %d\n", ib_status); + return NULL; + } + + /* + * Open only the first HCA + */ + /* Open the CA */ + ib_status = ib_open_ca(ctx->al ,ca_guid_array[0] ,NULL, + NULL, //ca_context + &ctx->ca); + + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_ca failed with status = %d\n", ib_status); + return NULL; + } + + //xxx + //printf("ib_open_ca passed i=%d\n",i); + //xxx + + + + + { + /* Query the CA */ + uint32_t bsize = 0; + ib_status = ib_query_ca(ctx->ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr, "Failed to query device props"); + return NULL; + } + + ctx->ca_attr = (ib_ca_attr_t *)malloc(bsize); + + ib_status = ib_query_ca(ctx->ca, ctx->ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + printf("ib_query_ca failed with status = %d\n", ib_status); + return NULL; + } + if (user_parm->mtu == 0) {/*user did not ask for specific mtu */ + if (ctx->ca_attr->dev_id == 23108) { + user_parm->mtu = 1024; + } else { + user_parm->mtu = 2048; + } + } + } + + + ib_status = ib_alloc_pd(ctx->ca , + IB_PDT_NORMAL, + ctx, //pd_context + &ctx->pd); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate PD\n"); + return NULL; + } + + + { + ib_mr_create_t mr_create; + + mr_create.length = size * 2; + + mr_create.vaddr = ctx->buf; + mr_create.access_ctrl = IB_AC_RDMA_WRITE| IB_AC_LOCAL_WRITE; + + ib_status = ib_reg_mem(ctx->pd ,&mr_create ,&ctx->lkey ,&ctx->rkey ,&ctx->mr); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate MR\n"); + return NULL; + } + } + + { + ib_cq_create_t cq_create; + + cq_create.size = user_parm->tx_depth * user_parm->numofqps; + cq_create.h_wait_obj = NULL; + cq_create.pfn_comp_cb = pp_cq_comp_cb; + ib_status = ib_create_cq(ctx->ca,&cq_create ,ctx, NULL, &ctx->scq); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't create CQ ib_status = %d\n",ib_status); + return NULL; + } + } + + + + + for (counter =0 ; counter < user_parm->numofqps ; counter++) + { + + ib_qp_create_t qp_create; + ib_qp_mod_t qp_modify; + ib_qp_attr_t qp_attr; + + memset(&qp_create, 0, sizeof(ib_qp_create_t)); + qp_create.h_sq_cq = ctx->scq; + qp_create.h_rq_cq = ctx->scq; + qp_create.sq_depth = user_parm->tx_depth; + qp_create.rq_depth = user_parm->tx_depth; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + //TODO MAX_INLINE + + switch (user_parm->connection_type) { + case RC : + qp_create.qp_type= IB_QPT_RELIABLE_CONN; + break; + case UC : + qp_create.qp_type = IB_QPT_UNRELIABLE_CONN; + break; + default: + fprintf(stderr, "Unknown connection type %d \n",user_parm->connection_type); + return NULL; + } + + qp_create.sq_signaled = FALSE; + /*attr.sq_sig_all = 0;*/ + + ib_status = ib_create_qp(ctx->pd, &qp_create,NULL,NULL,&ctx->qp[counter]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Couldn't create QP\n"); + return NULL; + } + + + + + memset(&qp_modify, 0, sizeof(ib_qp_mod_t)); + qp_modify.req_state = IB_QPS_INIT; + qp_modify.state.init.pkey_index = 0 ; + qp_modify.state.init.primary_port = (uint8_t)port; + qp_modify.state.init.access_ctrl = IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE; + + + ib_status = ib_modify_qp(ctx->qp[counter], &qp_modify); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + + + memset(&qp_attr, 0, sizeof(ib_qp_attr_t)); + ib_status = ib_query_qp(ctx->qp[counter], &ctx->qp_attr[counter]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + fprintf(stderr, "max inline size %d\n",ctx->qp_attr[counter].sq_max_inline); + } + + return ctx; +} + + +static int pp_connect_ctx(struct pingpong_context *ctx, int port, int my_psn, + struct pingpong_dest *dest, struct user_parameters *user_parm, int qpindex) +{ + + ib_api_status_t ib_status; + ib_qp_mod_t attr; + memset(&attr, 0, sizeof(ib_qp_mod_t)); + + attr.req_state = IB_QPS_RTR; + switch (user_parm->mtu) { + case 256 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_256; + break; + case 512 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_512; + break; + case 1024 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_1024; + break; + case 2048 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_2048; + break; + } + printf("Mtu : %d\n", user_parm->mtu); + attr.state.rtr.dest_qp = dest->qpn;; + attr.state.rtr.rq_psn = dest->psn; + if (user_parm->connection_type==RC) { + attr.state.rtr.resp_res = 1; + attr.state.rtr.rnr_nak_timeout = 12; + } + attr.state.rtr.primary_av.grh_valid = 0; + attr.state.rtr.primary_av.dlid = dest->lid; + attr.state.rtr.primary_av.sl = 0; + attr.state.rtr.primary_av.path_bits = 0; + attr.state.rtr.primary_av.port_num = (uint8_t)port; + attr.state.rtr.primary_av.static_rate = IB_PATH_RECORD_RATE_10_GBS; + attr.state.rtr.opts = IB_MOD_QP_LOCAL_ACK_TIMEOUT | + IB_MOD_QP_RESP_RES | + IB_MOD_QP_PRIMARY_AV; + + + ib_status = ib_modify_qp(ctx->qp[qpindex], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to RTR\n"); + return 1; + } + + memset(&attr, 0, sizeof(ib_qp_mod_t)); + attr.req_state = IB_QPS_RTS; + attr.state.rts.sq_psn = my_psn; + + if (user_parm->connection_type == RC) { + attr.state.rts.init_depth = 1; + attr.state.rts.local_ack_timeout = 14; + attr.state.rts.retry_cnt = 7; + attr.state.rts.rnr_retry_cnt = 7; + attr.state.rts.opts = IB_MOD_QP_RNR_RETRY_CNT | + IB_MOD_QP_RETRY_CNT | + IB_MOD_QP_INIT_DEPTH | + IB_MOD_QP_LOCAL_ACK_TIMEOUT; + + } + ib_status = ib_modify_qp(ctx->qp[qpindex], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to RTS\n"); + return 1; + } + + return 0; + +} + +static SOCKET pp_open_port(struct pingpong_context *ctx, const char * servername, + int ib_port, int port, struct pingpong_dest **p_my_dest, + struct pingpong_dest **p_rem_dest,struct user_parameters *user_parm) +{ + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + SOCKET sockfd; + int rc; + int i; + int numofqps = user_parm->numofqps; + + /* Create connection between client and server. + * We do it by exchanging data over a TCP socket connection. */ + + + my_dest = malloc( sizeof (struct pingpong_dest) * numofqps); + if (!my_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + rem_dest = malloc(sizeof (struct pingpong_dest) * numofqps ); + if (!rem_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + sockfd = servername ? pp_client_connect(servername, port) : + pp_server_connect(port); + + if (sockfd == INVALID_SOCKET) { + printf("pp_connect_sock(%s,%d) failed (%d)!\n", + servername, port, sockfd); + return INVALID_SOCKET; + } + + + for (i =0 ;ica_attr->p_port_attr[ib_port-1].lid; + my_dest[i].psn = rand() & 0xffffff; + if (!my_dest[i].lid) { + fprintf(stderr, "Local lid 0x0 detected. Is an SM running?\n"); + return 1; + } + my_dest[i].qpn = ctx->qp_attr[i].num; + /* TBD this should be changed inot VA and different key to each qp */ + my_dest[i].rkey = ctx->rkey; + my_dest[i].vaddr = (uintptr_t)ctx->buf + ctx->size; + + printf(" local address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + my_dest[i].lid, my_dest[i].qpn, my_dest[i].psn, + my_dest[i].rkey, my_dest[i].vaddr); + + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + printf(" remote address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + rem_dest[i].lid, rem_dest[i].qpn, rem_dest[i].psn, + rem_dest[i].rkey, rem_dest[i].vaddr); + + if (pp_connect_ctx(ctx, ib_port, my_dest[i].psn, &rem_dest[i], user_parm, i)) + return INVALID_SOCKET; + /* An additional handshake is required *after* moving qp to RTR. + Arbitrarily reuse exch_dest for this purpose. */ + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + } + *p_rem_dest = rem_dest; + *p_my_dest = my_dest; + return sockfd; +} + + +static void usage(const char *argv0) +{ + printf("Usage:\n"); + printf(" %s start a server and wait for connection\n", argv0); + printf(" %s connect to server at \n", argv0); + printf("\n"); + printf("Options:\n"); + printf(" -p, --port= listen on/connect to port (default 18515)\n"); + printf(" -i, --ib-port= use port of IB device (default 1)\n"); + printf(" -c, --connection= connection type RC/UC (default RC)\n"); + printf(" -m, --mtu= mtu size (default 1024)\n"); + printf(" -g, --post= number of posts for each qp in the chain (default tx_depth)\n"); + printf(" -q, --qp= Num of qp's(default 1)\n"); + printf(" -s, --size= size of message to exchange (default 65536)\n"); + printf(" -a, --all Run sizes from 2 till 2^23\n"); + printf(" -t, --tx-depth= size of tx queue (default 100)\n"); + printf(" -n, --iters= number of exchanges (at least 2, default 5000)\n"); + printf(" -b, --bidirectional measure bidirectional bandwidth (default unidirectional)\n"); + printf(" -V, --version display version number\n"); +} + +static void + print_report(unsigned int iters, unsigned size, int duplex, + cycles_t *tposted, cycles_t *tcompleted, struct user_parameters *user_param) +{ + double cycles_to_units; + uint64_t tsize; /* Transferred size, in megabytes */ + unsigned int i, j; + int opt_posted = 0, opt_completed = 0; + cycles_t opt_delta; + cycles_t t; + + + opt_delta = tcompleted[opt_posted] - tposted[opt_completed]; + + /* Find the peak bandwidth */ + for (i = 0; i < iters * user_param->numofqps; ++i) + for (j = i; j < iters * user_param->numofqps; ++j) { + t = (tcompleted[j] - tposted[i]) / (j - i + 1); + if (t < opt_delta) { + opt_delta = t; + opt_posted = i; + opt_completed = j; + } + } + + + cycles_to_units = get_cpu_mhz(); + + tsize = duplex ? 2 : 1; + tsize = tsize * size; + printf("%7d %d %7.2f %7.2f\n", + size,iters,tsize * cycles_to_units / opt_delta / 0x100000, + (uint64_t)tsize * iters * user_param->numofqps * cycles_to_units /(tcompleted[(iters* user_param->numofqps) - 1] - tposted[0]) / 0x100000); + +} + + +int run_iter(struct pingpong_context *ctx, struct user_parameters *user_param, + struct pingpong_dest *rem_dest, int size) +{ + ib_api_status_t ib_status; + ib_qp_handle_t qp; + int scnt, ccnt ; + int index; + ib_send_wr_t *bad_wr; + + + + ctx->list.vaddr = (uintptr_t) ctx->buf; + ctx->list.length = size; + ctx->list.lkey = ctx->lkey; + + ctx->wr.ds_array = &ctx->list; + ctx->wr.num_ds= 1; + ctx->wr.wr_type = WR_RDMA_WRITE; + + if ((uint32_t)size > ctx->qp_attr[0].sq_max_inline) { /*complaince to perf_main */ + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED; + } else { + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED | IB_SEND_OPT_INLINE; + } + ctx->wr.p_next = NULL; + + scnt = 0; + ccnt = 0; + /*clear the scnt ccnt counters for each iteration*/ + for (index =0 ; index < user_param->numofqps ; index++) { + ctx->scnt[index] = 0; + ctx->ccnt[index] = 0; + } + index = 0; + + + /* main loop for posting */ + while (scnt < (user_param->iters * user_param->numofqps) || + ccnt < (user_param->iters * user_param->numofqps) ) + { + /* main loop to run over all the qps and post each time n messages */ + for (index =0 ; index < user_param->numofqps ; index++) { + + ctx->wr.remote_ops.vaddr = rem_dest[index].vaddr; + ctx->wr.remote_ops.rkey = rem_dest[index].rkey; + qp = ctx->qp[index]; + ctx->wr.wr_id = index ; + + while (ctx->scnt[index] < user_param->iters && + (ctx->scnt[index] - ctx->ccnt[index]) < user_param->maxpostsofqpiniteration) + { + //if(ctx->scnt[index] - ctx->ccnt[index] < 10 ) + // fprintf(stderr, "Lower WQEs: qp index = %d qp scnt=%d total scnt %d qp ccnt=%d total ccnt %d\n", + // index,ctx->scnt[index],scnt,ctx->ccnt[index],ccnt); + tposted[scnt] = get_cycles(); + ib_status = ib_post_send(qp, &ctx->wr, &bad_wr); + if (ib_status != IB_SUCCESS) + { + fprintf(stderr, "Couldn't post send: qp index = %d qp scnt=%d total scnt %d qp ccnt=%d total ccnt %d\n", + index,ctx->scnt[index],scnt,ctx->ccnt[index],ccnt); + return 1; + } + ctx->scnt[index]= ctx->scnt[index]+1; + ++scnt; + PERF_DEBUG("scnt = %d \n",scnt); + } + + } + + /* finished posting now polling */ + if (ccnt < (user_param->iters * user_param->numofqps) ) + { + ib_wc_t wc; + ib_wc_t *p_wc_done,*p_wc_free; + + p_wc_free = &wc; + p_wc_done = NULL; + p_wc_free->p_next = NULL; + + do{ + ib_status = ib_poll_cq(ctx->scq, &p_wc_free, &p_wc_done); + if (ib_status == IB_SUCCESS) { + tcompleted[ccnt] = get_cycles(); + if (p_wc_done->status != IB_WCS_SUCCESS) { + fprintf(stderr, "Completion wth error at %s:\n", + user_param->servername ? "client" : "server"); + fprintf(stderr, "Failed status %d: wr_id %d syndrom 0x%x\n", + p_wc_done->status, (int) p_wc_done->wr_id, p_wc_done->vendor_specific); + return 1; + } + + /*here the id is the index to the qp num */ + ctx->ccnt[(int)wc.wr_id] = ctx->ccnt[(int)wc.wr_id]+1; + ++ccnt; + PERF_DEBUG("ccnt = %d \n",ccnt); + p_wc_free = p_wc_done; + p_wc_free->p_next = NULL; + p_wc_done = NULL; + } + + + } while (ib_status == IB_SUCCESS); + + if (ib_status != IB_NOT_FOUND) { + fprintf(stderr, "Poll Recieve CQ failed %d\n", ib_status); + return 12; + } + + + + } + } + return(0); +} + + +int __cdecl main(int argc, char *argv[]) +{ + struct pingpong_context *ctx; + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + struct user_parameters user_param; + char *ib_devname = NULL; + int port = 18515; + int ib_port = 1; + unsigned size = 65536; + SOCKET sockfd = INVALID_SOCKET; + WSADATA wsaData; + int iResult; + int i = 0; + int duplex = 0; + + + /* init default values to user's parameters */ + memset(&user_param, 0, sizeof(struct user_parameters)); + user_param.mtu = 0; + user_param.iters = 5000; + user_param.tx_depth = 100; + user_param.servername = NULL; + user_param.numofqps = 1; + user_param.maxpostsofqpiniteration = 100; + + /* Parameter parsing. */ + while (1) { + int c; + + static struct option long_options[] = { + { "port", 1, NULL, 'p' }, + { "ib-dev", 1, NULL, 'd' }, + { "ib-port", 1, NULL, 'i' }, + { "mtu", 1, NULL, 'm' }, + { "qp", 1, NULL, 'q' }, + { "post", 1, NULL, 'g' }, + { "connection", 1, NULL, 'c' }, + { "size", 1, NULL, 's' }, + { "iters", 1, NULL, 'n' }, + { "tx-depth", 1, NULL, 't' }, + { "all", 0, NULL, 'a' }, + { "bidirectional", 0, NULL, 'b' }, + { "version", 0, NULL, 'V' }, + { 0 } + }; + + c = getopt_long(argc, argv, "p:d:i:m:q:g:c:s:n:t:baV", long_options, NULL); + if (c == -1) + break; + + switch (c) { + case 'p': + port = strtol(optarg, NULL, 0); + if (port < 0 || port > 65535) { + usage(argv[0]); + return 1; + } + break; + + case 'd': + ib_devname = _strdup(optarg); + break; + case 'c': + if (strcmp("UC",optarg)==0) + user_param.connection_type=UC; + break; + + case 'm': + user_param.mtu = strtol(optarg, NULL, 0); + break; + case 'q': + user_param.numofqps = strtol(optarg, NULL, 0); + break; + case 'g': + user_param.maxpostsofqpiniteration = strtol(optarg, NULL, 0); + break; + case 'a': + user_param.all = ALL; + break; + case 'V': + printf("rdma_bw version : %.2f\n",VERSION); + return 0; + break; + case 'i': + ib_port = strtol(optarg, NULL, 0); + if (ib_port < 0) { + usage(argv[0]); + return 2; + } + break; + + case 's': + size = strtol(optarg, NULL, 0); + if (size < 1 || size > UINT_MAX / 2) { + usage(argv[0]); + return 1; + } + break; + + case 't': + user_param.tx_depth = strtol(optarg, NULL, 0); + if (user_param.tx_depth < 1) { + usage(argv[0]); return 4; + } + break; + + case 'n': + user_param.iters = strtol(optarg, NULL, 0); + if (user_param.iters < 2) { + usage(argv[0]); + return 5; + } + + break; + + case 'b': + duplex = 1; + break; + + default: + usage(argv[0]); + return 5; + } + } + + if (optind == argc - 1) + user_param.servername = _strdup(argv[optind]); + else if (optind < argc) { + usage(argv[0]); + return 6; + } + + printf("------------------------------------------------------------------\n"); + if (duplex == 1) { + printf(" RDMA_Write Bidirectional BW Test\n"); + } else { + printf(" RDMA_Write BW Test\n"); + } + + printf("Number of qp's running %d\n",user_param.numofqps); + printf("Number of iterations %d\n",user_param.iters); + printf("Massege size %d\n",size); + if (user_param.connection_type==RC) { + printf("Connection type : RC\n"); + } else { + printf("Connection type : UC\n"); + } + if (user_param.maxpostsofqpiniteration > user_param.tx_depth ) { + printf("Can not post more than tx_depth , adjusting number of post to tx_depth\n"); + user_param.maxpostsofqpiniteration = user_param.tx_depth; + } else { + printf("Each Qp will post up to %d messages each time\n",user_param.maxpostsofqpiniteration); + } + /* Done with parameter parsing. Perform setup. */ + + // Initialize Winsock + iResult = WSAStartup(MAKEWORD(2,2), &wsaData); + if (iResult != NO_ERROR) { + printf("Error at WSAStartup()\n"); + return 1; + } + + + if (user_param.all == ALL) { + /*since we run all sizes lets allocate big enough buffer */ + size = 8388608; /*2^23 */ + } + srand(GetCurrentProcessId() * GetTickCount()); + + //TODO: get pagesize from sysinfo + page_size = 4096; + + //TODO get the device names + + + ctx = pp_init_ctx(size, ib_port, &user_param); + if (!ctx) + return 8; + sockfd = pp_open_port(ctx, user_param.servername, ib_port, port,&my_dest,&rem_dest,&user_param); + if (sockfd == INVALID_SOCKET) + return 9; + + + printf("------------------------------------------------------------------\n"); + printf(" #bytes #iterations BW peak[MB/sec] BW average[MB/sec] \n"); + /* For half duplex tests, server just waits for client to exit */ + /* use dummy my_dest struct*/ + if (!user_param.servername && !duplex) { + pp_server_exch_dest(sockfd, my_dest,rem_dest); + send(sockfd, "done", sizeof "done",0); + closesocket(sockfd); + return 0; + } + + tposted = malloc(user_param.iters * user_param.numofqps * sizeof *tposted); + + if (!tposted) { + perror("malloc"); + return 1; + } + + tcompleted = malloc(user_param.iters * user_param.numofqps * sizeof *tcompleted); + + if (!tcompleted) { + perror("malloc"); + return 1; + } + + if (user_param.all == ALL) { + for (i = 1; i < 24 ; ++i) { + size = 1 << i; + if(run_iter(ctx, &user_param, rem_dest, size)) + return 17; + print_report(user_param.iters, size, duplex, tposted, tcompleted, &user_param); + } + } else { + if(run_iter(ctx, &user_param, rem_dest, size)) + return 18; + print_report(user_param.iters, size, duplex, tposted, tcompleted, &user_param); + + } + + /* use dummy my_dest struct*/ + if (user_param.servername) { + pp_client_exch_dest(sockfd, my_dest,rem_dest); + } else { + pp_server_exch_dest(sockfd, my_dest,rem_dest); + } + send(sockfd, "done", sizeof "done",0); + closesocket(sockfd); + + free(tposted); + free(tcompleted); + printf("------------------------------------------------------------------\n"); + return 0; +} diff --git a/branches/Ndi/tools/perftests/user/write_bw/write_bw.rc b/branches/Ndi/tools/perftests/user/write_bw/write_bw.rc new file mode 100644 index 00000000..98e02bb3 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/write_bw/write_bw.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "RDMA write Bandwidth Test (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "RDMA write Bandwidth Test " +#endif + +#define VER_INTERNALNAME_STR "ib_write_bw.exe" +#define VER_ORIGINALFILENAME_STR "ib_write_bw.exe" + +#include diff --git a/branches/Ndi/tools/perftests/user/write_lat/SOURCES b/branches/Ndi/tools/perftests/user/write_lat/SOURCES new file mode 100644 index 00000000..2c83f613 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/write_lat/SOURCES @@ -0,0 +1,28 @@ +TARGETNAME=ib_write_lat +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +C_DEFINES=$(C_DEFINES) /D__WIN__ + +SOURCES=write_lat.rc \ + ..\getopt.c \ + ..\perf_utils.c \ + write_lat.c + +INCLUDES=..;..\..\..\..\inc;..\..\..\..\inc\user + +RCOPTIONS=/I..\..\win\include + +TARGETLIBS= \ + $(DDK_LIB_PATH)\Ws2_32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tools/perftests/user/write_lat/makefile b/branches/Ndi/tools/perftests/user/write_lat/makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/write_lat/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/perftests/user/write_lat/write_lat.c b/branches/Ndi/tools/perftests/user/write_lat/write_lat.c new file mode 100644 index 00000000..f4b9c4ed --- /dev/null +++ b/branches/Ndi/tools/perftests/user/write_lat/write_lat.c @@ -0,0 +1,831 @@ +/* + * Copyright (c) 2005 Topspin Communications. All rights reserved. + * Copyright (c) 2005 Mellanox Technologies Ltd. All rights reserved. + * Copyright (c) 2005 Hewlett Packard, Inc (Grant Grundler) + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "getopt.h" +#include "perf_defs.h" +#include "get_clock.h" + + + +struct user_parameters { + const char *servername; + int connection_type; + int mtu; + int all; /* run all msg size */ + int iters; + int tx_depth; +}; + +static int page_size; + +cycles_t *tstamp; + + + +void +pp_cq_comp_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + UNUSED_PARAM( h_cq ); + UNUSED_PARAM( cq_context); + return ; +} + + +static struct pingpong_context *pp_init_ctx(unsigned size, int port, struct user_parameters *user_parm) +{ + + + struct pingpong_context *ctx; + ib_api_status_t ib_status = IB_SUCCESS; + size_t guid_count; + ib_net64_t *ca_guid_array; + + ctx = malloc(sizeof *ctx); + if (!ctx){ + perror("malloc"); + return NULL; + } + memset(ctx, 0, sizeof(struct pingpong_context)); + ctx->size = size; + ctx->tx_depth = user_parm->tx_depth; + + ctx->qp = malloc(sizeof (ib_qp_handle_t)); + if (!ctx->qp) { + perror("malloc"); + return NULL; + } + ctx->qp_attr = malloc(sizeof (ib_qp_attr_t)); + if (!ctx->qp_attr) { + perror("malloc"); + return NULL; + } + + ctx->buf = malloc( size * 2); + if (!ctx->buf) { + fprintf(stderr, "Couldn't allocate work buf.\n"); + return NULL; + } + + memset(ctx->buf, 0, size * 2); + ctx->post_buf = (char*)ctx->buf + (size - 1); + ctx->poll_buf = (char*)ctx->buf + (2 * size - 1); + + + + /* + * Open the AL instance + */ + ib_status = ib_open_al(&ctx->al); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_al failed status = %d\n", ib_status); + return NULL; + } + + /* + * Get the Local CA Guids + */ + ib_status = ib_get_ca_guids(ctx->al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr,"ib_get_ca_guids1 failed status = %d\n", (uint32_t)ib_status); + return NULL; + } + + /* + * If no CA's Present then return + */ + + if(guid_count == 0) + return NULL; + + + ca_guid_array = (ib_net64_t*)malloc(sizeof(ib_net64_t) * guid_count); + + ib_status = ib_get_ca_guids(ctx->al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_get_ca_guids2 failed with status = %d\n", ib_status); + return NULL; + } + + /* + * Open only the first HCA + */ + /* Open the CA */ + ib_status = ib_open_ca(ctx->al ,ca_guid_array[0] ,NULL, + NULL, //ca_context + &ctx->ca); + + if(ib_status != IB_SUCCESS) + { + fprintf(stderr,"ib_open_ca failed with status = %d\n", ib_status); + return NULL; + } + + //xxx + //printf("ib_open_ca passed i=%d\n",i); + //xxx + + + + + { + /* Query the CA */ + uint32_t bsize = 0; + ib_status = ib_query_ca(ctx->ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + fprintf(stderr, "Failed to query device props"); + return NULL; + } + + ctx->ca_attr = (ib_ca_attr_t *)malloc(bsize); + + ib_status = ib_query_ca(ctx->ca, ctx->ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + printf("ib_query_ca failed with status = %d\n", ib_status); + return NULL; + } + if (user_parm->mtu == 0) {/*user did not ask for specific mtu */ + if (ctx->ca_attr->dev_id == 23108) { + user_parm->mtu = 1024; + } else { + user_parm->mtu = 2048; + } + } + } + + + ib_status = ib_alloc_pd(ctx->ca , + IB_PDT_NORMAL, + ctx, //pd_context + &ctx->pd); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate PD\n"); + return NULL; + } + + + { + ib_mr_create_t mr_create; + + mr_create.length = size * 2; + + mr_create.vaddr = ctx->buf; + mr_create.access_ctrl = IB_AC_RDMA_WRITE| IB_AC_LOCAL_WRITE; + + ib_status = ib_reg_mem(ctx->pd ,&mr_create ,&ctx->lkey ,&ctx->rkey ,&ctx->mr); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't allocate MR\n"); + return NULL; + } + } + + { + ib_cq_create_t cq_create; + + cq_create.size = user_parm->tx_depth; + cq_create.h_wait_obj = NULL; + cq_create.pfn_comp_cb = pp_cq_comp_cb; + ib_status = ib_create_cq(ctx->ca,&cq_create ,ctx, NULL, &ctx->scq); + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Couldn't create CQ ib_status = %d\n",ib_status); + return NULL; + } + } + + { + + ib_qp_create_t qp_create; + ib_qp_mod_t qp_modify; + ib_qp_attr_t qp_attr; + + memset(&qp_create, 0, sizeof(ib_qp_create_t)); + qp_create.h_sq_cq = ctx->scq; + qp_create.h_rq_cq = ctx->scq; + qp_create.sq_depth = user_parm->tx_depth; + qp_create.rq_depth = 1; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + //TODO MAX_INLINE + + switch (user_parm->connection_type) { + case RC : + qp_create.qp_type= IB_QPT_RELIABLE_CONN; + break; + case UC : + qp_create.qp_type = IB_QPT_UNRELIABLE_CONN; + break; + default: + fprintf(stderr, "Unknown connection type %d \n",user_parm->connection_type); + return NULL; + } + + qp_create.sq_signaled = FALSE; + /*attr.sq_sig_all = 0;*/ + + ib_status = ib_create_qp(ctx->pd, &qp_create,NULL,NULL,&ctx->qp[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Couldn't create QP\n"); + return NULL; + } + + + + + memset(&qp_modify, 0, sizeof(ib_qp_mod_t)); + qp_modify.req_state = IB_QPS_INIT; + qp_modify.state.init.pkey_index = 0 ; + qp_modify.state.init.primary_port = (uint8_t)port; + qp_modify.state.init.access_ctrl = IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE; + + + ib_status = ib_modify_qp(ctx->qp[0], &qp_modify); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + + + memset(&qp_attr, 0, sizeof(ib_qp_attr_t)); + ib_status = ib_query_qp(ctx->qp[0], &ctx->qp_attr[0]); + if (ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to INIT\n"); + return NULL; + } + fprintf(stderr, "max inline size %d\n",ctx->qp_attr[0].sq_max_inline); + } + + return ctx; +} + + + + +static int pp_connect_ctx(struct pingpong_context *ctx, int port, int my_psn, + struct pingpong_dest *dest, struct user_parameters *user_parm,int qpindex) +{ + + ib_api_status_t ib_status; + ib_qp_mod_t attr; + memset(&attr, 0, sizeof(ib_qp_mod_t)); + + attr.req_state = IB_QPS_RTR; + switch (user_parm->mtu) { + case 256 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_256; + break; + case 512 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_512; + break; + case 1024 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_1024; + break; + case 2048 : + attr.state.rtr.primary_av.conn.path_mtu = IB_MTU_LEN_2048; + break; + } + printf("Mtu : %d\n", user_parm->mtu); + attr.state.rtr.dest_qp = dest->qpn;; + attr.state.rtr.rq_psn = dest->psn; + if (user_parm->connection_type==RC) { + attr.state.rtr.resp_res = 1; + attr.state.rtr.rnr_nak_timeout = 12; + } + attr.state.rtr.primary_av.grh_valid = 0; + attr.state.rtr.primary_av.dlid = dest->lid; + attr.state.rtr.primary_av.sl = 0; + attr.state.rtr.primary_av.path_bits = 0; + attr.state.rtr.primary_av.port_num = (uint8_t)port; + attr.state.rtr.primary_av.static_rate = IB_PATH_RECORD_RATE_10_GBS; + attr.state.rtr.opts = IB_MOD_QP_LOCAL_ACK_TIMEOUT | + IB_MOD_QP_RESP_RES | + IB_MOD_QP_PRIMARY_AV; + + + ib_status = ib_modify_qp(ctx->qp[0], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to RTR\n"); + return 1; + } + + memset(&attr, 0, sizeof(ib_qp_mod_t)); + attr.req_state = IB_QPS_RTS; + attr.state.rts.sq_psn = my_psn; + + if (user_parm->connection_type == RC) { + attr.state.rts.init_depth = 1; + attr.state.rts.local_ack_timeout = 14; + attr.state.rts.retry_cnt = 7; + attr.state.rts.rnr_retry_cnt = 7; + attr.state.rts.opts = IB_MOD_QP_RNR_RETRY_CNT | + IB_MOD_QP_RETRY_CNT | + IB_MOD_QP_INIT_DEPTH | + IB_MOD_QP_LOCAL_ACK_TIMEOUT; + + } + ib_status = ib_modify_qp(ctx->qp[0], &attr); + if(ib_status != IB_SUCCESS){ + fprintf(stderr, "Failed to modify QP to RTS\n"); + return 1; + } + + return 0; + +} + + +static SOCKET pp_open_port(struct pingpong_context *ctx, const char * servername, + int ib_port, int port, struct pingpong_dest **p_my_dest, + struct pingpong_dest **p_rem_dest,struct user_parameters *user_parm) +{ + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + SOCKET sockfd; + int rc; + int i; + int numofqps = 1; + + /* Create connection between client and server. + * We do it by exchanging data over a TCP socket connection. */ + + + my_dest = malloc( sizeof (struct pingpong_dest) * numofqps); + if (!my_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + rem_dest = malloc(sizeof (struct pingpong_dest) * numofqps ); + if (!rem_dest){ + perror("malloc"); + return INVALID_SOCKET; + } + + sockfd = servername ? pp_client_connect(servername, port) : + pp_server_connect(port); + + if (sockfd == INVALID_SOCKET) { + printf("pp_connect_sock(%s,%d) failed (%d)!\n", + servername, port, sockfd); + return INVALID_SOCKET; + } + + + for (i =0 ;ica_attr->p_port_attr[ib_port-1].lid; + my_dest[i].psn = rand() & 0xffffff; + if (!my_dest[i].lid) { + fprintf(stderr, "Local lid 0x0 detected. Is an SM running?\n"); + return 1; + } + my_dest[i].qpn = ctx->qp_attr[i].num; + /* TBD this should be changed inot VA and different key to each qp */ + my_dest[i].rkey = ctx->rkey; + my_dest[i].vaddr = (uintptr_t)ctx->buf + ctx->size; + + printf(" local address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + my_dest[i].lid, my_dest[i].qpn, my_dest[i].psn, + my_dest[i].rkey, my_dest[i].vaddr); + + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + printf(" remote address: LID %#04x, QPN %#06x, PSN %#06x, " + "RKey %#08x VAddr %#016Lx\n", + rem_dest[i].lid, rem_dest[i].qpn, rem_dest[i].psn, + rem_dest[i].rkey, rem_dest[i].vaddr); + + if (pp_connect_ctx(ctx, ib_port, my_dest[i].psn, &rem_dest[i], user_parm, i)) + return INVALID_SOCKET; + /* An additional handshake is required *after* moving qp to RTR. + Arbitrarily reuse exch_dest for this purpose. */ + rc = servername ? pp_client_exch_dest(sockfd, &my_dest[i],&rem_dest[i]): + pp_server_exch_dest(sockfd, &my_dest[i],&rem_dest[i]); + if (rc) + return INVALID_SOCKET; + } + *p_rem_dest = rem_dest; + *p_my_dest = my_dest; + return sockfd; +} + +static void usage(const char *argv0) +{ + printf("Usage:\n"); + printf(" %s start a server and wait for connection\n", argv0); + printf(" %s connect to server at \n", argv0); + printf("\n"); + printf("Options:\n"); + printf(" -p, --port= listen on/connect to port (default 18515)\n"); + printf(" -c, --connection= connection type RC/UC (default RC)\n"); + printf(" -m, --mtu= mtu size (default 1024)\n"); + printf(" -i, --ib-port= use port of IB device (default 1)\n"); + printf(" -s, --size= size of message to exchange (default 1)\n"); + printf(" -a, --all Run sizes from 2 till 2^23\n"); + printf(" -t, --tx-depth= size of tx queue (default 50)\n"); + printf(" -n, --iters= number of exchanges (at least 2, default 1000)\n"); + printf(" -C, --report-cycles report times in cpu cycle units (default microseconds)\n"); + printf(" -H, --report-histogram print out all results (default print summary only)\n"); + printf(" -U, --report-unsorted (implies -H) print out unsorted results (default sorted)\n"); + printf(" -V, --version display version number\n"); +} + + + +static void print_report(struct report_options * options, + unsigned int iters, cycles_t *tstamp, int size) +{ + double cycles_to_units; + cycles_t median; + unsigned int i; + const char* units; + cycles_t *delta = malloc(iters * sizeof *delta); + + if (!delta) { + perror("malloc"); + return; + } + + for (i = 0; i < iters - 1; ++i) + delta[i] = tstamp[i + 1] - tstamp[i]; + + + if (options->cycles) { + cycles_to_units = 1; + units = "cycles"; + } else { + cycles_to_units = get_cpu_mhz()/1000000; + units = "usec"; + } + + if (options->unsorted) { + printf("#, %s\n", units); + for (i = 0; i < iters - 1; ++i) + printf("%d, %g\n", i + 1, delta[i] / cycles_to_units / 2); + } + + qsort(delta, iters - 1, sizeof *delta, cycles_compare); + + if (options->histogram) { + printf("#, %s\n", units); + for (i = 0; i < iters - 1; ++i) + printf("%d, %g\n", i + 1, delta[i] / cycles_to_units / 2); + } + + median = get_median(iters - 1, delta); + printf("%7d %d %7.2f %7.2f %7.2f\n", + size,iters,delta[0] / cycles_to_units / 2, + delta[iters - 2] / cycles_to_units / 2,median / cycles_to_units / 2); + + free(delta); +} + + + +int run_iter(struct pingpong_context *ctx, struct user_parameters *user_param, + struct pingpong_dest *rem_dest, int size) +{ + ib_api_status_t ib_status; + int scnt, ccnt, rcnt; + ib_send_wr_t *bad_wr; + volatile char *poll_buf; + volatile char *post_buf; + + + + + + ctx->list.vaddr = (uintptr_t) ctx->buf ; + ctx->list.length = size; + ctx->list.lkey = ctx->lkey; + ctx->wr.remote_ops.vaddr = rem_dest->vaddr; + ctx->wr.remote_ops.rkey = rem_dest->rkey; + ctx->wr.wr_id = PINGPONG_RDMA_WRID; + ctx->wr.ds_array = &ctx->list; + ctx->wr.num_ds = 1; + ctx->wr.wr_type = WR_RDMA_WRITE; + + if ((uint32_t)size > ctx->qp_attr[0].sq_max_inline) {/* complaince to perf_main */ + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED; + } else { + ctx->wr.send_opt = IB_SEND_OPT_SIGNALED | IB_SEND_OPT_INLINE; + } + scnt = 0; + rcnt = 0; + ccnt = 0; + + if(user_param->all == ALL) { + post_buf = (char*)ctx->buf + size - 1; + poll_buf = (char*)ctx->buf + 8388608 + size - 1; + } else { + poll_buf = ctx->poll_buf; + post_buf = ctx->post_buf; + } + + /* Done with setup. Start the test. */ + while (scnt < user_param->iters || ccnt < user_param->iters || rcnt < user_param->iters) { + + /* Wait till buffer changes. */ + if (rcnt < user_param->iters && !(scnt < 1 && user_param->servername)) { + ++rcnt; + while (*poll_buf != (char)rcnt) + ; + /* Here the data is already in the physical memory. + If we wanted to actually use it, we may need + a read memory barrier here. */ + } + + if (scnt < user_param->iters) { + + tstamp[scnt] = get_cycles(); + *post_buf = (char)++scnt; + + ib_status = ib_post_send(ctx->qp[0], &ctx->wr, &bad_wr); + if (ib_status != IB_SUCCESS) + { + fprintf(stderr, "Couldn't post send:scnt %d ccnt=%d \n",scnt,ccnt); + return 1; + } + } + + if (ccnt < user_param->iters) { + ib_wc_t wc; + ib_wc_t *p_wc_done,*p_wc_free; + + p_wc_free = &wc; + p_wc_done = NULL; + p_wc_free->p_next = NULL; + + do{ + ib_status = ib_poll_cq(ctx->scq, &p_wc_free, &p_wc_done); + } while (ib_status == IB_NOT_FOUND); + + if (ib_status != IB_SUCCESS) { + fprintf(stderr, "Poll Send CQ failed %d\n", ib_status); + return 12; + } + + if (p_wc_done->status != IB_WCS_SUCCESS) { + fprintf(stderr, "Completion wth error at %s:\n", + user_param->servername ? "client" : "server"); + fprintf(stderr, "Failed status %d: wr_id %d syndrom 0x%x\n", + p_wc_done->status, (int) p_wc_done->wr_id, p_wc_done->vendor_specific); + return 1; + } + + ++ccnt; + } + PERF_DEBUG("ccnt = %d \n",ccnt); + } + return(0); +} + + + + + + +int __cdecl main(int argc, char *argv[]) +{ + struct pingpong_context *ctx; + struct pingpong_dest *my_dest; + struct pingpong_dest *rem_dest; + struct user_parameters user_param; + char *ib_devname = NULL; + int port = 18515; + int ib_port = 1; + unsigned size = 2; + SOCKET sockfd = INVALID_SOCKET; + WSADATA wsaData; + int i = 0; + int iResult; + struct report_options report = {0}; + + + /* init default values to user's parameters */ + memset(&user_param, 0, sizeof(struct user_parameters)); + user_param.mtu = 0; /* signal choose default by device */ + user_param.iters = 1000; + user_param.tx_depth = 50; + user_param.servername = NULL; + /* Parameter parsing. */ + while (1) { + int c; + + static struct option long_options[] = { + { "port", 1, NULL, 'p' }, + { "connection", 1, NULL, 'c' }, + { "mtu", 1, NULL, 'm' }, + { "ib-dev", 1, NULL, 'd' }, + { "ib-port", 1, NULL, 'i' }, + { "size", 1, NULL, 's' }, + { "iters", 1, NULL, 'n' }, + { "tx-depth", 1, NULL, 't' }, + { "all", 0, NULL, 'a' }, + { "report-cycles", 0, NULL, 'C' }, + { "report-histogram", 0, NULL, 'H' }, + { "report-unsorted", 0, NULL, 'U' }, + { "version", 0, NULL, 'V' }, + { 0 } + }; + + c = getopt_long(argc, argv, "p:c:m:d:i:s:n:t:aCHUV", long_options, NULL); + if (c == -1) + break; + + switch (c) { + case 'p': + port = strtol(optarg, NULL, 0); + if (port < 0 || port > 65535) { + usage(argv[0]); + return 1; + } + break; + case 'c': + if (strcmp("UC",optarg)==0) + user_param.connection_type=UC; + break; + + case 'm': + user_param.mtu = strtol(optarg, NULL, 0); + break; + case 'a': + user_param.all = ALL; + break; + case 'V': + printf("perftest version : %.2f\n",VERSION); + return 0; + break; + case 'd': + ib_devname = _strdup(optarg); + break; + + case 'i': + ib_port = strtol(optarg, NULL, 0); + if (ib_port < 0) { + usage(argv[0]); + return 2; + } + break; + + case 's': + size = strtol(optarg, NULL, 0); + if (size < 1) { + usage(argv[0]); return 3; + } + break; + + case 't': + user_param.tx_depth = strtol(optarg, NULL, 0); + if (user_param.tx_depth < 1) { + usage(argv[0]); return 4; + } + break; + + case 'n': + user_param.iters = strtol(optarg, NULL, 0); + if (user_param.iters < 2) { + usage(argv[0]); + return 5; + } + + break; + + case 'C': + report.cycles = 1; + break; + + case 'H': + report.histogram = 1; + break; + + case 'U': + report.unsorted = 1; + break; + + default: + usage(argv[0]); + return 5; + } + } + + if (optind == argc - 1) + user_param.servername = _strdup(argv[optind]); + else if (optind < argc) { + usage(argv[0]); + return 6; + } + + /* + * Done with parameter parsing. Perform setup. + */ + + tstamp = malloc(user_param.iters * sizeof *tstamp); + if (!tstamp) { + perror("malloc"); + return 10; + } + printf("------------------------------------------------------------------\n"); + printf(" RDMA_Write Latency Test\n"); + if (user_param.connection_type==0) { + printf("Connection type : RC\n"); + } else { + printf("Connection type : UC\n"); + } + + + + // Initialize Winsock + iResult = WSAStartup(MAKEWORD(2,2), &wsaData); + if (iResult != NO_ERROR) { + printf("Error at WSAStartup()\n"); + return 1; + } + + + if (user_param.all == ALL) { + /*since we run all sizes lets allocate big enough buffer */ + size = 8388608; /*2^23 */ + } + srand(GetCurrentProcessId() * GetTickCount()); + + //TODO: get pagesize from sysinfo + page_size = 4096; + + //TODO get the device names + + + ctx = pp_init_ctx( size, ib_port,&user_param); + if (!ctx) + return 8; + sockfd = pp_open_port(ctx, user_param.servername, ib_port, port,&my_dest,&rem_dest,&user_param); + if (sockfd == INVALID_SOCKET) + return 9; + + printf("------------------------------------------------------------------\n"); + printf(" #bytes #iterations t_min[usec] t_max[usec] t_typical[usec]\n"); + + if (user_param.all == ALL) { + for (i = 1; i < 24 ; ++i) { + size = 1 << i; + if(run_iter(ctx, &user_param, rem_dest, size)) + return 17; + print_report(&report, user_param.iters, tstamp, size); + } + } else { + if(run_iter(ctx, &user_param, rem_dest, size)) + return 18; + print_report(&report, user_param.iters, tstamp, size); + } + send(sockfd, "done", sizeof "done",0); + closesocket(sockfd); + + + printf("------------------------------------------------------------------\n"); + free(tstamp); + return 0; +} diff --git a/branches/Ndi/tools/perftests/user/write_lat/write_lat.rc b/branches/Ndi/tools/perftests/user/write_lat/write_lat.rc new file mode 100644 index 00000000..9cd1a3f9 --- /dev/null +++ b/branches/Ndi/tools/perftests/user/write_lat/write_lat.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "RDMA write Latency Test (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "RDMA write Latency Test " +#endif + +#define VER_INTERNALNAME_STR "ib_write_lat.exe" +#define VER_ORIGINALFILENAME_STR "ib_write_lat.exe" + +#include diff --git a/branches/Ndi/tools/spark/dirs b/branches/Ndi/tools/spark/dirs new file mode 100644 index 00000000..5a7e8b31 --- /dev/null +++ b/branches/Ndi/tools/spark/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tools/spark/user/SOURCES b/branches/Ndi/tools/spark/user/SOURCES new file mode 100644 index 00000000..06c29496 --- /dev/null +++ b/branches/Ndi/tools/spark/user/SOURCES @@ -0,0 +1,57 @@ +TARGETNAME=spark +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 +USE_NTDLL=1 + + +!if !defined(WINIBHOME) +WINIBHOME=..\..\.. +!endif + +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + + +SOURCES=spark.rc \ + spark.cpp + + +INCLUDES= $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; \ + $(WINIBHOME)\inc\iba; \ + $(WINIBHOME)\tools\mtcr\user; \ + $(ZLIB)\include; + +TARGETLIBS= \ +!if $(FREEBUILD) + $(CRT_LIB_PATH)\msvcprt.lib \ + $(SDK_LIB_PATH)\Ws2_32.lib\ + $(TARGETPATH)\*\mtcr.lib +!else + $(CRT_LIB_PATH)\msvcprt.lib\ + $(SDK_LIB_PATH)\Ws2_32.lib\ + $(TARGETPATH)\*\mtcr.lib +!endif + +USER_C_FLAGS=$(USER_C_FLAGS) /Ze /EHsc + +# TODO:Should I define the __WIN__ manually +C_DEFINES=$(C_DEFINES) -D__WIN__ + + +!if $(FREEBUILD) + +!else +C_DEFINES=$(C_DEFINES) -DDEBUG +!endif + +# Version: +!if !defined(MFT_BLD_VER) +MFT_BLD_VER=Devel +!endif +C_DEFINES=$(C_DEFINES) "-DVERSION_ID=$(MFT_BLD_VER)" + +386_STDCALL=0 + +MSC_WARNING_LEVEL= /W3 + diff --git a/branches/Ndi/tools/spark/user/makefile b/branches/Ndi/tools/spark/user/makefile new file mode 100644 index 00000000..128ed372 --- /dev/null +++ b/branches/Ndi/tools/spark/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/spark/user/spark.cpp b/branches/Ndi/tools/spark/user/spark.cpp new file mode 100644 index 00000000..1e7f8f10 --- /dev/null +++ b/branches/Ndi/tools/spark/user/spark.cpp @@ -0,0 +1,3498 @@ +/* - Mellanox Confidential and Proprietary - + * + * Copyright (C) May 2003, Mellanox Technologies Ltd. ALL RIGHTS RESERVED. + * + * Except as specifically permitted herein, no portion of the information, + * including but not limited to object code and source code, may be reproduced, + * modified, distributed, republished or otherwise exploited in any form or by + * any means for any purpose without the prior written permission of Mellanox + * Technologies Ltd. Use of software subject to the terms and conditions + * detailed in the file "LICENSE.txt". + * + * End of legal section ...................................................... + * + * eburn.cpp - Command line EEPROM burning tool (Gamla/Anafa) + * + * Version: $Id$ + * + * Standalone compiling: + * g++ -g -Wall -I/usr/mst/include -L/usr/mst/lib -o spark spark.cpp -lmtcr + * + * Author: Oren Kladnitsky orenk@mellanox.co.il + */ + + +#include +#include +#include +#include +#include +#include +#include + + + + +#ifndef __WIN__ + +// +// Linux +// + +#include +#include +#include +#include + +#else // __WIN__ + +// +// Windows (Under DDK) +// + +#include +#include + +// Sleep adaptor +#define usleep(x) Sleep((x)/1000) +#define sleep(x) Sleep((x)*1000) + +#define vsnprintf _vsnprintf +#define strtoull _strtoui64 +#define isatty _isatty + +#define COMP_CDECL __cdecl + +#define __LITTLE_ENDIAN 1234 +#define __BIG_ENDIAN 4321 +#define __BYTE_ORDER __LITTLE_ENDIAN + + +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define bswap_32(x) ntohl(x) +#define bswap_16(x) ntohs(x) +#else +#error windows is assumed to run a on little endian architecture +#endif + + +// DEBUG : Internal imp of strtoull for win2k msvcrt does not have _strtoui64 + + +#endif // __WIN__ + + + + +#include +#include +#include +#include +#include +#include + + +#include "mtcr.h" + +// +// endianess issues: +// + +#ifndef __be32_to_cpu + #define __be32_to_cpu(x) ntohl(x) +#endif +#ifndef __cpu_to_be32 + #define __cpu_to_be32(x) htonl(x) +#endif + +#if __BYTE_ORDER == __LITTLE_ENDIAN + #ifndef __cpu_to_le32 + #define __cpu_to_le32(x) (x) + #endif + #ifndef __le32_to_cpu + #define __le32_to_cpu(x) (x) + #endif +#elif __BYTE_ORDER == __BIG_ENDIAN + #ifndef __cpu_to_le32 + #define __cpu_to_le32(x) bswap_32(x) + #endif + #ifndef __le32_to_cpu + #define __le32_to_cpu(x) bswap_32(x) + #endif +#else + #ifndef __cpu_to_le32 + #define __cpu_to_le32(x) bswap_32(__cpu_to_be32(x)) + #endif + #ifndef __le32_to_cpu + #define __le32_to_cpu(x) __be32_to_cpu(bswap_32(x)) + #endif +#endif + +#if __BYTE_ORDER == __LITTLE_ENDIAN + #ifndef __cpu_to_le16 + #define __cpu_to_le16(x) (x) + #endif + #ifndef __le16_to_cpu + #define __le16_to_cpu(x) (x) + #endif + #ifndef __cpu_to_be16 + #define __cpu_to_be16(x) bswap_16(x) + #endif + #ifndef __be16_to_cpu + #define __be16_to_cpu(x) bswap_16(x) + #endif +#elif __BYTE_ORDER == __BIG_ENDIAN + #ifndef __cpu_to_le16 + #define __cpu_to_le16(x) bswap_16(x) + #endif + #ifndef __le16_to_cpu + #define __le16_to_cpu(x) bswap_16(x) + #endif + #ifndef __cpu_to_be16 + #define __cpu_to_be16(x) (x) + #endif + #ifndef __be16_to_cpu + #define __be16_to_cpu(x) (x) + #endif +#else + #ifndef __cpu_to_le16 + #define __cpu_to_le16(x) bswap_16(__cpu_to_be16(x)) + #endif + #ifndef __le16_to_cpu + #define __le16_to_cpu(x) __be16_to_cpu(bswap_16(x)) + #endif + #ifndef __cpu_to_be16 + #define __cpu_to_be16(x) (x) + #endif + #ifndef __be16_to_cpu + #define __be16_to_cpu(x) (x) + #endif +#endif + +// Version globals +#ifdef VERSION_ID +#define __VFSTR(x) #x +#define _VFSTR(x) __VFSTR(x) +char* _versionID = _VFSTR( VERSION_ID ) ; +#else +char* _versionID = "VERSION_ID_HERE"; +#endif + +char* _cvsID = "$Revision: 2676 $"; + +using namespace std; + +typedef std::vector DataVec; + +struct EeLoc { + EeLoc(u_int32_t o = 0, u_int8_t d = 0) : offset(o) , dev(d) {} + u_int32_t offset; + u_int8_t dev; +}; + + +class ErrMsg +{ +public: + ErrMsg() : _err(0) { } + ~ErrMsg() { err_clear(); } + const char *err() const { return _err; } + void err_clear() { delete [] _err; _err = 0; } + +protected: + + char *vprint(const char *format, va_list args) + { + const int INIT_VAL = 1024; + int max_str, max_buf = INIT_VAL; + char *out_buf; + + while (1) + { + out_buf = new char[max_buf]; + max_str = max_buf - 1; + + if (vsnprintf(out_buf, max_str, format, args) < max_str) + return out_buf; + delete [] out_buf; + max_buf *= 2; + } + } + + + bool errmsg(const char *format, ...) +#ifdef __GNUC__ + __attribute__ ((format (printf, 2, 3))) +#endif + ; + +private: + + char *_err; +}; + + +bool ErrMsg::errmsg(const char *format, ...) { + va_list args; + + char* prev_err = _err; + + va_start(args, format); + _err = vprint(format, args); + va_end(args); + + delete[] prev_err; + + return false; +} + +typedef pair EeRange; + +// +// Endianness handler: +// +inline +u_int32_t ImgBytes2Dw(const u_int8_t* p) { + return __be32_to_cpu(*(u_int32_t*)p); +} + +inline +void PatchImgBytes(u_int8_t* p, u_int32_t val) { + u_int32_t correct_endianess_val = __cpu_to_be32(val); + memcpy(p , &correct_endianess_val, 4); +} + + + +//////////////////////////////////////////////////////////////////////// +// // +// ****************************************************************** // +// CRC16 CALCULATION // +// ****************************************************************** // +// // +//////////////////////////////////////////////////////////////////////// +class Crc16 { +public: + Crc16(bool d = false) : _debug(d) { clear();} + u_int16_t get() { return _crc;} + void clear() { _crc = 0xffff;} + void operator<<(u_int32_t val) { add(val);} + void add(u_int32_t val); + void finish(); +private: + u_int16_t _crc; + bool _debug; +}; + +//////////////////////////////////////////////////////////////////////// +void Crc16::add(u_int32_t o) +{ + if (_debug) + printf("Crc16::add(%08x)\n", o); + for (int i=0; i<32; i++) { + if (_crc & 0x8000) + _crc = (u_int16_t) ((((_crc<<1) | (o>>31)) ^ 0x100b) & 0xffff); + else + _crc= (u_int16_t) (((_crc<<1) | (o>>31)) & 0xffff); + o = (o<<1) & 0xffffffff; + } +} // Crc16::add + + +//////////////////////////////////////////////////////////////////////// +void Crc16::finish() +{ + for (int i=0; i<16; i++) { + if (_crc & 0x8000) + _crc=((_crc<<1) ^ 0x100b) & 0xffff; + else + _crc=(_crc<<1) & 0xffff; + } + + // Revert 16 low bits + _crc = _crc ^ 0xffff; + +} // Crc16::finish + + +// +// Interface class for translations between addr/dev and contiguous address space. +// +class EeAddressConvertor { +public: + enum { + EEPROMS = 8 + }; + + virtual bool Init (const u_int32_t eepromData[EEPROMS]); + virtual bool Init (const u_int8_t* eepromDataBytes); + + virtual u_int32_t Convert (const EeLoc& ee_loc) const; + virtual EeLoc Convert (u_int32_t addr ) const; + + virtual std::vector + ConvertRange (u_int32_t addr, + u_int32_t len) const; + + virtual u_int32_t GetTotalSize () const; + + virtual void Clear() {_devmap.clear();} + + virtual ~EeAddressConvertor () {} + +protected: + + // In this vector the offset of the EeLoc represents size of Eeprom + std::vector _devmap; + +}; + +// +// FW Data Section +// +struct Section { + Section(u_int8_t s, u_int32_t o) : + location(o, s) , + addr(0) {} + + Section(const char *n, u_int8_t s, u_int32_t o) : + name(n), + location(o, s), + addr(0) {} + + std::string name; + EeLoc location; + u_int32_t addr; + u_int8_t type; + + DataVec data; +}; + +typedef std::vector SectionVec; + + +// IoBase +class IoBase : public ErrMsg { +public: + IoBase() : + _conv(NULL), + _primaryEeprom(0), + _secondaryEeprom(0) {} + + virtual ~IoBase() {} + + virtual bool open (const char *) {return false;} + + virtual bool close () = 0; + + virtual bool read (u_int32_t addr, + u_int32_t len, + DataVec& data); + + virtual bool read (u_int8_t i2c_slave, + u_int32_t offset, + u_int32_t len, + DataVec& data) = 0; + + virtual bool SetAddressConvertor(EeAddressConvertor* conv) {_conv = conv; return true;} + + virtual const EeAddressConvertor* + GetAddressConvertor() {return _conv;} + + virtual u_int8_t GetPrimaryEeprom() {return _primaryEeprom;} + virtual u_int8_t GetSecondaryEeprom() {return _secondaryEeprom;} + + virtual void SetPrimaryEeprom(u_int8_t eeprom) {_primaryEeprom = eeprom;} + virtual void SetSecondaryEeprom(u_int8_t eeprom) {_secondaryEeprom = eeprom;} + +protected: + EeAddressConvertor* _conv; + + u_int8_t _primaryEeprom; + u_int8_t _secondaryEeprom; +}; + +class ImageFile : public IoBase { +public: + + virtual bool open (const char* imageFile); + virtual bool close () {return true;} + + virtual bool read (u_int8_t i2c_slave, + u_int32_t offset, + u_int32_t len, + DataVec& data); + + + virtual const SectionVec& + GetSections() {return _sections;} + + + virtual const string& + FileName() {return _fileName;} + + +private: + + typedef std::map DeviceDataMap; + + bool LoadBin (const char* imageFile) {imageFile = NULL; return false;} + bool LoadImg (const char* imageFile); + + DeviceDataMap _deviceData; + std::string _fileName; + + SectionVec _sections; + +}; + + + +// ProgressMeter (Base). + +class ProgressMeter { +public: + virtual bool Add (u_int32_t amount) = 0; + virtual bool Reset (u_int32_t max = 0) = 0; + virtual ~ProgressMeter () {} +}; + +class PercentProgressMeter : public ProgressMeter { +public: + PercentProgressMeter (const std::string& msg = "" , u_int32_t max = 0) : + _add_count(0), + _current_progress(0), + _progress_max (max), + _done (false), + _status("OK"), + _msg (msg) {} + + virtual bool Add (u_int32_t amount); + virtual bool Reset (u_int32_t max) {_add_count = 0; + _current_progress = 0; + _done = false; + if (max) _progress_max = max ; + return Add(0);} + + void SetMsg(const std::string& msg) {_msg = msg;} + void Done(const char* status = NULL) {if (status) _status = status; _current_progress = _progress_max; Add(0);} + +private: + u_int32_t _add_count; + u_int32_t _current_progress; + u_int32_t _progress_max; + bool _done; + const char* _status; + + std::string _msg; + + static const char _spinner[8]; +}; + + +class Eeprom : public IoBase { +public: + Eeprom() : + _prog (NULL) {} + + virtual bool open (const char* dev); + virtual bool close (); + + virtual bool write (u_int32_t addr, + DataVec& data); + + virtual bool write (u_int8_t i2c_slave, + u_int32_t offset, + DataVec& data); + + + virtual bool read (u_int8_t i2c_slave, + u_int32_t offset, + u_int32_t len, + DataVec& data); + + virtual void SetProgressMeter(ProgressMeter* prog) {_prog = prog;} + + + bool InitImageAddressesFromUser (u_int8_t primarySlaveAddr, + u_int8_t secondarySlaveAddr); + + bool InitImageAddressesFromIs3 (u_int8_t is3SlaveAddr); + + +private: + + ProgressMeter* _prog; + + enum EPROM_CMD { + EPROM_WRITE, EPROM_READ, EPROM_END // EEPROM commands + }; + enum CONSTANTS { + MAX_ERROR_LEN = 512, // Maximal length of error string + EPROM_UNIT = 4, // EEPROM unit length + EPROM_PRIMARY_COUNT = 5000, // Amount of "fast" check + EPROM_SECONDARY_COUNT = 100, // Amount of "slow" check + EPROM_REQUEST_PAUSE = 100, // Microseconds for pause + EPROM_ALIGN_W = 32, // Write accesses can't cross this boundary + EPROM_BURST_W = 32, // Maximal data burst on I2C bus on write + EPROM_BURST_R = 64, // Maximal data burst on I2C bus on read + MAX_REPETITION = 100, // Maximal number of transaction repet. + MAX_I2C_WR_RETRY = 256, // Maximal number of writing attempts + MAX_I2C_RD_RETRY = 16 // Maximal number of reading attempts + }; + + bool read_field (u_int32_t addr, u_int8_t offs, u_int8_t len, u_int32_t* v); + bool write_field(u_int32_t addr, u_int8_t offs, u_int8_t len, u_int32_t v); + bool eprom_wait(u_int32_t exp); + bool eprom_cmd(EPROM_CMD cmd); + bool eprom_check(); + + void micro_sleep(const int n); + + // + // PCI access functions (MT43132 only) + // + bool eprom_w_trans(int len, u_int8_t *data); + bool eprom_r_trans(int len, u_int8_t *data); + bool pci_read(u_int32_t addr, u_int8_t *data, u_int32_t length); + bool pci_write(u_int32_t addr, u_int8_t *data, u_int32_t length); + + void eprom_wdelay(int data_size) { micro_sleep((4700 + data_size*10250 + 4700) / 1000 + 1); }; + + mfile *_mf; + bool _was_tmo; + bool _verbose; + + const char* _dev; + + bool _dbg_high; + bool _dbg_low; + bool _internal_read; + +public: + void wdelay() { eprom_wdelay(EPROM_BURST_W); } + +}; + + + +#define MISCC_SPM_ADR7 0x3180, 0, 7 +#define MISCC_SPM_STS 0x3180, 16, 3 +#define MISCC_SPM_VBT 0x3180, 22, 2 +#define MISCC_SPM_CMD 0x3180, 29, 3 +#define MISCC_SPM_DAT_3 0x3184, 0, 32 + +#define MASK32(S) ( ((u_int32_t) ~0L) >> (32-(S)) ) +#define BITS32(O,S) ( MASK32(S) << (O) ) +#define EXTRACT32(W,O,S) ( ((W)>>(O)) & MASK32(S) ) +#define INSERT32(W,F,O,S) ((W)= ( ( (W) & (~BITS32(O,S)) ) | (((F) & MASK32(S))<<(O)) )) + +//////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////// + +bool Eeprom::InitImageAddressesFromIs3(u_int8_t is3SlaveAddr) { + + // The Primary and Secondary I2c slave addresses of the eeproms are taken from + // strapping options and accessible from cr-space. + // Primary eeprom - Cr Address 0x60010 bits 14:8 + // Seconary eeprom - Hard wires 4'b1010 concatenated to Cr Address 0x60010 bits {17,16,26} + + // Need to open another interface because address width for is3 access is 32 bits, + // and for eeproms it's 16 bits. + mfile* mf; + u_int32_t initData; + int ret; + + if (!_mf) + return errmsg("Internal error: InitImageAddressesFromIs3 before open()"); + + if (!(mf = mopend(_dev , MST_TAVOR))) + return errmsg("Failed to open device %s for IS3 access", _dev); + + mset_i2c_slave(mf, is3SlaveAddr); + ret = mread4(mf, 0x60010, &initData); + mclose(mf); + + if (ret != 4) { + return errmsg("IS3 device access failed" ); + } + + _primaryEeprom = EXTRACT32(initData, 8, 7); + _secondaryEeprom = 0x50 | (EXTRACT32(initData, 16, 2) << 1) | EXTRACT32(initData, 26, 1); + + //printf("-D- InitData:%08x Eeprom addresses: P: %02x S: %02x\n", initData, _primaryEeprom, _secondaryEeprom); + return true; +} + +bool Eeprom::InitImageAddressesFromUser(u_int8_t primarySlaveAddr, u_int8_t secondarySlaveAddr) { + _primaryEeprom = primarySlaveAddr; + _secondaryEeprom = secondarySlaveAddr; + return true; +} + +bool Eeprom::open (const char* dev) { + // Open MTCR device + if ( !(_mf = mopend(dev, MST_GAMLA)) ) + { + return errmsg(strerror(errno)); + } + if (_mf->sock != -1) + { + return errmsg("Remote operation not supporeted."); + } + + _dev = dev; + return true; +} + +bool Eeprom::close () { + mclose (_mf); + _mf = NULL; + + return true; +} + + +bool Eeprom::read_field(u_int32_t addr, u_int8_t offs, u_int8_t len, u_int32_t* v) +{ + + if (mread4(_mf, addr, v) != 4) + return errmsg("CR read failed: %s", strerror(errno)); + *v = __le32_to_cpu(*v); + *v = EXTRACT32(*v, offs, len); + + if (_dbg_low) + printf("%sEeprom::read_field(0x%x, %d, %d) -> 0x%x\n", + _internal_read ? " " : "", addr, offs, len, *v); + return true; +} // Eeprom::read_field + +//////////////////////////////////////////////////////////////////////// +bool Eeprom::write_field(u_int32_t addr, u_int8_t offs, u_int8_t len, u_int32_t val) +{ + _internal_read = true; + u_int32_t v; + if (!read_field(addr, 0, 32, &v)) return false; + _internal_read = false; + + INSERT32(v, val, offs, len); + v = __cpu_to_le32(v); + if (_dbg_low) + printf("Eeprom::write_field(0x%x, %d, %d, 0x%x) /0x%x/\n", + addr, offs, len, val, v); + + if (mwrite4(_mf, (unsigned int)addr, (unsigned int)v) != 4) + return errmsg("Eeprom write field failed: %s", strerror(errno)); + + return true; +} // Eeprom::write_field + +//////////////////////////////////////////////////////////////////////// +bool Eeprom::eprom_wait(u_int32_t exp) +{ + u_int32_t f; + u_int32_t cnt = 0; + + while(1) { + if (!read_field(MISCC_SPM_CMD, &f )) return false; + if (f == 0) + break; + if (cnt > EPROM_PRIMARY_COUNT) + micro_sleep(EPROM_REQUEST_PAUSE); + if (cnt > EPROM_PRIMARY_COUNT+EPROM_SECONDARY_COUNT) + { + _was_tmo = true; + return errmsg("EPROM command timeout (command is 0x%x instead of 0)", f); + } + cnt++; + } + + if (!read_field(MISCC_SPM_STS, &f )) return false; + if (f != exp) + { + return errmsg("EPROM invalid status (status is 0x%x instead of 0x%x)", f , exp); + } + + return true; +} // Eeprom::eprom_wait + +//////////////////////////////////////////////////////////////////////// +bool Eeprom::eprom_cmd(EPROM_CMD cmd) +{ + int exp=0; + + switch (cmd) + { + case EPROM_READ: + if (!write_field(MISCC_SPM_CMD, 1)) return false; + exp = 7; + break; + case EPROM_WRITE: + if (!write_field(MISCC_SPM_CMD, 2)) return false; + exp = 7; + break; + case EPROM_END: + if (!write_field(MISCC_SPM_CMD, 3)) return false; + exp = 0; + break; + } + if (!eprom_wait(exp)) return false; + + return true; +} // Eeprom::eprom_cmd + +//////////////////////////////////////////////////////////////////////// +bool Eeprom::eprom_check(void) +{ + u_int32_t f; + + if (!read_field(MISCC_SPM_STS, &f)) return false; + if (f != 0) { + if (!eprom_cmd(EPROM_END)) return false; + } + + return true; +} // Eeprom::eprom_check + +//////////////////////////////////////////////////////////////////////// +bool Eeprom::eprom_w_trans(int len, u_int8_t *data) +{ + int vbt=-1; // Initialized w/ invalid value + u_int32_t val = 0; + + switch (len) + { + case 1: + vbt = 0; + val = (*data << 24) & 0xff000000; + break; + case 2: + vbt = 1; + val = ((*data << 24) & 0xff000000) | ((data[1] << 16) & 0xff0000); + break; + case 3: + vbt = 2; + val = ((*data << 24) & 0xff000000) | ((data[1] << 16) & 0xff0000) | + ((data[2] << 8) & 0xff00); + break; + case 4: + vbt = 3; + val = ((*data << 24) & 0xff000000) | ((data[1] << 16) & 0xff0000) | + ((data[2] << 8) & 0xff00) | (data[3] & 0xff); + break; + default: + return errmsg("Invalid EEPROM unit length: %d", len); + } + if (_dbg_high) + printf("eprom_w_trans; len:%d val:%08x\n", len, val); + + if (!write_field(MISCC_SPM_VBT, vbt)) return false; + if (!write_field(MISCC_SPM_DAT_3, val)) return false; + if (!eprom_cmd(EPROM_WRITE)) return false; + + return true; +} // Eeprom::eprom_w_trans + +//////////////////////////////////////////////////////////////////////// +bool Eeprom::eprom_r_trans(int len, u_int8_t *data) +{ + int vbt=-1; // Initialized w/ invalid value + u_int32_t val = 0; + + switch (len) + { + case 1: + vbt = 0; + break; + case 2: + vbt = 1; + break; + case 3: + vbt = 2; + break; + case 4: + vbt = 3; + break; + default: + return errmsg("Invalid EEPROM unit length: %d", len); + } + + if (!write_field(MISCC_SPM_VBT, vbt)) return false; + if (!eprom_cmd(EPROM_READ)) return false; + if (!read_field(MISCC_SPM_DAT_3, &val)) return false; + + switch (len) + { + case 1: + *data = (val >> 24) & 0xff; + break; + case 2: + *data++ = (val >> 24) & 0xff; + *data = (val >> 16) & 0xff; + break; + case 3: + *data++ = (val >> 24) & 0xff; + *data++ = (val >> 16) & 0xff; + *data = (val >> 8) & 0xff; + break; + case 4: + *data++ = (val >> 24) & 0xff; + *data++ = (val >> 16) & 0xff; + *data++ = (val >> 8) & 0xff; + *data = val & 0xff; + break; + default: + return errmsg("Invalid EEPROM unit length: %d", len); + } + if (_dbg_high) + printf("eprom_r_trans; len:%d %08x\n", len, val); + + return true; +} + +//////////////////////////////////////////////////////////////////////// +bool Eeprom::pci_read(u_int32_t addr, u_int8_t *data, u_int32_t length) +{ + u_int16_t addr16 = (u_int16_t) (addr & 0xffff); + int orig_len = length, repeat=1; + u_int8_t *orig_data = data; + + _was_tmo = false; + while (repeat) + { + + addr16 = __cpu_to_be16(addr16); + if (!eprom_check() || // Check precondition + !eprom_w_trans(2, (u_int8_t *)&addr16)) { // Write address + goto PCI_READ_ERROR; + } + + // Read reply + while (length) + { + int unit_len = length > EPROM_UNIT ? (int)EPROM_UNIT : length; + if (!eprom_r_trans(unit_len, data)) { + goto PCI_READ_ERROR; + } + data += unit_len; + length -= unit_len; + } + + eprom_cmd(EPROM_END); // End transaction + repeat = 0; // If error wasn't set everything is OK + + continue; + + // Error handling: + PCI_READ_ERROR: + if (_was_tmo) + { + if (_dbg_high) + printf("pci_read - transaction repeated\n"); + _was_tmo = false; + eprom_cmd(EPROM_END); + addr16 = __cpu_to_be16(addr16); + length = orig_len; + data = orig_data; + if (++repeat > MAX_REPETITION) + return errmsg("Pci read failed %d times: %s", MAX_REPETITION, err()); + } + else + return false; + } + + return true; +} // Eeprom::pci_read + +//////////////////////////////////////////////////////////////////////// +bool Eeprom::pci_write(u_int32_t addr, u_int8_t *data, u_int32_t length) +{ + u_int16_t addr16 = (u_int16_t) (addr & 0xffff); + int orig_len = length, repeat=1; + u_int8_t *orig_data = data; + + _was_tmo = false; + while (repeat) + { + + addr16 = __cpu_to_be16(addr16); + if (!eprom_check() || // Check precondition + !eprom_w_trans(2, (u_int8_t *)&addr16)) { // Write address + goto PCI_WRITE_ERROR; + } + + // Write data + while (length) + { + int unit_len = length > EPROM_UNIT ? (int)EPROM_UNIT : length; + eprom_w_trans(unit_len, data); + data += unit_len; + length -= unit_len; + } + + eprom_cmd(EPROM_END); // End transaction + eprom_wdelay(orig_len); + repeat = 0; // If error wasn't set everything is OK + + continue; + + PCI_WRITE_ERROR: + + if (_was_tmo) + { + if (_dbg_high) + printf("pci_write - transaction repeated, cnt=%d\n", repeat); + _was_tmo = false; + eprom_cmd(EPROM_END); + eprom_wdelay(orig_len); + addr16 = __cpu_to_be16(addr16); + length = orig_len; + data = orig_data; + if (++repeat > MAX_REPETITION) + return errmsg("Pci readwrite failed %d times: %s", MAX_REPETITION, err()); + } + else + return false; + + } + + return true; +} + + +//////////////////////////////////////////////////////////////////////// +void Eeprom::micro_sleep(const int n) +{ + usleep(n); +} // Eeprom::micro_sleep + + +//////////////////////////////////////////////////////////////////////// + + +bool Eeprom::write (u_int32_t addr, + DataVec& data) { + + if (!_conv) { + return errmsg("Internal error: read() when _conv not initialized"); + } + + vector eer = _conv->ConvertRange(addr, data.size()); + + if (eer.size() > 1) { + u_int32_t totWritten = 0; + for (vector::iterator it = eer.begin() ; it != eer.end(); ++it ) { + DataVec curData; + + u_int32_t chunkSize = it->second; + curData.insert(curData.begin(), + data.begin() + totWritten, + data.begin() + totWritten + chunkSize); + + if (!write(it->first.dev, it->first.offset, curData)) + return false; + + totWritten += chunkSize; + } + } else if (eer.size() == 1) { + if (!write(eer[0].first.dev, eer[0].first.offset, data)) + return false; + } else { + // eer.size() == 0 => no conversion: + return errmsg("No address/range conversion to eeprom address/size = %x/%x . Total eproms size: %x", + addr, + (u_int32_t)data.size(), + _conv->GetTotalSize()); + + } + + return true; +} + +bool Eeprom::write (u_int8_t i2c_slave, + u_int32_t offset, + DataVec& data) { + + u_int32_t len = data.size(); + u_int8_t* pdata = &(data[0]); + u_int32_t addr = offset; + + + while (len) + { + int i2c_wr_retry=0; + int burst_len = len > EPROM_BURST_W ? (int)EPROM_BURST_W : len; + int till_next_boundary = EPROM_ALIGN_W - (addr % EPROM_ALIGN_W); + if (burst_len > till_next_boundary) + burst_len = till_next_boundary; + + if (_mf) + { + _mf->i2c_slave = i2c_slave; + switch(_mf->tp) + { + case MST_CALBR: + if (mwrite64(_mf, addr, pdata, burst_len) != burst_len) + { + return errmsg("Write error: I2C slave:0x%02x, ADDR:0x%x - %s\n", + i2c_slave, addr, strerror(errno)); + + } + break; + case MST_USB: + case MST_USB_DIMAX: + case MST_IB: + case MST_IF: + while (mwrite64(_mf, addr, pdata, burst_len) != burst_len) + { + if (++i2c_wr_retry > MAX_I2C_WR_RETRY) + { + return errmsg("Write error: I2C slave:0x%02x, ADDR:0x%x - %s\n", + i2c_slave, addr, strerror(errno)); + } + } + break; + case MST_PPC: + case MST_PCI: + case MST_PCICONF: + if (!write_field(MISCC_SPM_ADR7, _mf->i2c_slave & 0x7f)) return false; + if (!pci_write(addr, pdata, burst_len)) return false; + break; + } + } + else + eprom_wdelay(burst_len); + + if (_prog) { + _prog->Add(burst_len); + } + + pdata += burst_len; + len -= burst_len; + addr += burst_len; + } + + return true; + +} // burn + + +//////////////////////////////////////////////////////////////////////// +bool Eeprom::read (u_int8_t i2c_slave, + u_int32_t offset, + u_int32_t len, + DataVec& data) { + + u_int32_t currSize = data.size(); + data.resize(currSize + len ); + + u_int8_t* pdata = &(data[currSize]); + u_int32_t addr = offset; + + u_int8_t vdata[EPROM_BURST_R]; + + while (len) + { + int i2c_rd_retry=0; + int burst_len = len > EPROM_BURST_R ? (int)EPROM_BURST_R : len; + + if (_mf) + { + _mf->i2c_slave = i2c_slave; + switch(_mf->tp) + { + case MST_CALBR: + if (mread64(_mf, addr, vdata, burst_len) != burst_len) + { + return errmsg("Read error: I2C slave:0x%02x, ADDR:0x%x - %s\n", + i2c_slave, addr, strerror(errno)); + } + break; + case MST_USB: + case MST_USB_DIMAX: + case MST_IB: + case MST_IF: + while (mread64(_mf, addr, vdata, burst_len) != burst_len) + { + if (++i2c_rd_retry > MAX_I2C_RD_RETRY) + { + return errmsg("Read error: I2C slave:0x%02x, ADDR:0x%x - %s\n", + i2c_slave, addr, strerror(errno)); + } + } + break; + case MST_PPC: + case MST_PCI: + case MST_PCICONF: + if (!write_field(MISCC_SPM_ADR7, _mf->i2c_slave & 0x7f)) return false; + if (!pci_read(addr, vdata, burst_len)) return false; + break; + } + + memcpy(pdata, vdata, burst_len); + + } + else + eprom_wdelay(burst_len); + + if (_prog) { + _prog->Add(burst_len); + } + + pdata += burst_len; + len -= burst_len; + addr += burst_len; + } + + return true; +} + + + +// +// ProgressBar: +// + +const char PercentProgressMeter::_spinner[8] = {'-','\\','|','/','-','\\','|','/'}; + +bool PercentProgressMeter::Add (u_int32_t amount) { + _current_progress += amount; + _add_count++; + u_int32_t progress_percent = (_current_progress * 100) / _progress_max; + u_int32_t spin_pos = _add_count % sizeof(_spinner); + + if (_current_progress >= _progress_max) { + if (_done) { + return true; + } + + _done = true; + spin_pos = 0; + } + + printf("\r"); + printf("%c %s - ", _spinner[spin_pos], _msg.c_str()); + if (_done) { + printf(" %s \n", _status); + } else { + printf("%%%02d", progress_percent); + fflush(stdout); + } + + return true; +} + + + +// Algorithm + + +//////////////////////////////////////////////////////////////////////// +// +// Burn Operations functions +// +//////////////////////////////////////////////////////////////////////// + +class Operations : public ErrMsg { +public: + + Operations() : + _useDefaultAnswer(false) + { + InitNames(); + } + + // + // IS3 FW IMAGE DATA + // + + enum { + MAX_IMAGE_SIZE = (128 * 1024), + PSID_CR_ADDR = 0x3ff0c + }; + + + // Data initialization types + enum SectionType { + TYPE_IWI = 1, // Individual words initialization + TYPE_NBI = 2, // Non-homogeneous block initialization + TYPE_HBI = 3, // Homogeneous block initialization + TYPE_RMW = 4, // Read-modify-write block initialization + TYPE_JMP = 5, // Jump address initialization + TYPE_COD = 97, // Code. Same as TYPE_NBI + TYPE_SPC = 98, // Special structire + TYPE_LST = 99, // Last data record + + // Following typed are not "real" type field values (meaning + // that they are not recognized by FW as a valid "type" fiels in + // a section). + TYPE_PLL = 200, + TYPE_BOOT= 201, // Boot section - different structure + TYPE_UNKNOWN= 0, + }; + + // Special STRUCT ID + enum FieldId { + FIELD_UNKNOWN= 0, + SYS_GUID_ID = 1, + NODE_DESC_ID = 2, + NODE_GUID_ID = 5, + BRD_ID = 3, + VS_CLS_EN_ID = 4, + BSN_ID = 6, + + // Not in Special Section: + PSID = 20, + CRC = 21 + }; + + // Simly burn the ImageFile content to rom. No checks or replaces. + bool RawBurn (Eeprom& eeprom, ImageFile& imageFile); + + bool CompareContent (Eeprom& eeprom, ImageFile& imageFile); + + // Failsafe burn flow: + // 1. Read and check imagefile (Primary Image). Exit if bad. + // 2. Read and verify full image on eeproms. + // Remember image state (which is valid). Exit if both are bad. + // 3. ? Compare AddressConvertors and Make sure they're the same. + // 4. Read PSID and Special section data from eeprom. + // 5. Get PSID and Special section data + adresses from image. Duplicate them for both images). + // 6. Patch the image with the data from eeprom (or from user). + // 7. Failsafe burn patched image according to the eeprom image state. + + struct ReplaceFieldData { + ReplaceFieldData() : + type(FIELD_UNKNOWN), + addr(0), + size(0) {} + FieldId type; + u_int32_t addr; + u_int32_t size; + DataVec data; + }; + + typedef std::map SpecialFieldsMap; + + struct FwImageData { + SectionVec _sections; + EeAddressConvertor _conv; + SpecialFieldsMap _specialFields; + + void Clear() { + for (SectionVec::iterator it = _sections.begin(); it != _sections.end(); ++it) { + delete *it; + } + + _conv.Clear(); + _specialFields.clear(); + } + + ~FwImageData() { + Clear(); + } + }; + + bool IS3FwBurn (Eeprom& eeprom, ImageFile& imageFile, const SpecialFieldsMap& userFields, bool fsBurn); + + bool CheckFullImage (IoBase& img, u_int8_t primaryI2cDev, u_int8_t secondaryI2cDev = 0, bool report = true); + bool CheckImage (IoBase& img, u_int8_t i2cDev, FwImageData& fwData, bool report, bool onlyDataSections = false); + + bool QueryImage (IoBase& img, u_int8_t primaryI2cDev, u_int8_t secondaryI2cDev); + bool DumpImage (Eeprom& eeprom, u_int8_t primaryI2cDev, u_int8_t secondaryI2cDev, const char* filename); + + bool ReadBlock (IoBase& img, u_int8_t i2cDev, u_int32_t offset, u_int32_t size, const char* filename); + + bool GetFieldFromString(SpecialFieldsMap& fields, FieldId fid, const char* str); + + void UseDefaultAnswers() {_useDefaultAnswer = true;} + +private: + + bool AskUser (const char* msg, bool def = true); + + bool GetSpecialData (const SectionVec& sectVec, SpecialFieldsMap& specialData); + bool ExtractSpecial (Section* s, SpecialFieldsMap& specialData); + + + bool DumpSections (FILE* f, Operations::FwImageData& imgData, bool convertRange = true); + bool DumpSpecial (FILE* f, Operations::FwImageData& imgData); + + bool GetFieldStringRepresentation(const DataVec& data, FieldId t, string& str); + + + bool FsBurnImage (Eeprom& eeprom, Operations::FwImageData& image, u_int32_t imgOffset); + + bool PatchNewImage (FwImageData& newImageData, + const SpecialFieldsMap& curFields, + const SpecialFieldsMap& userFields, + bool ignoreCurrent = false); + + bool BurnSections (Eeprom& eeprom, + const SectionVec& sectVec, + bool useAddress, + u_int32_t imgOffset, + const char* msg); + + bool CompareSections (Eeprom& eeprom, + const SectionVec& sectVec, + bool useAddress, + u_int32_t imgOffset, + const char* msg); + + bool ReplaceField (FwImageData& image, FieldId fid, const DataVec& fieldData); + bool FixSpecialCrc (FwImageData& image); + + // Verify CRC of the section. CRC is assumed to be found in the lase + // DW (BE) in the vector, and calculated over all the vector except + // last DW. + // Returns true ic CRC is ok, false otherwise. + bool CheckCrc (const DataVec& section); + + u_int32_t CalcCrc (const DataVec& data, u_int32_t size = 0); + + bool RecalcCrc (Section* s); + + + // Section check methods. The section is read from the given eeprom/addr. + // Read data is stores in the given sect struct, allocated by the caller. + bool CheckPlls (IoBase& img, u_int8_t eeprom, Section* sect); + bool CheckBoot (IoBase& img, u_int8_t eeprom, Section* sect); + bool CheckSection (IoBase& img, u_int32_t addr, Section* sect); + + + // Printing methods for verified image + bool ReportSection (Section* sect, const char* status = NULL); + bool ReportSectionHeader(); + + + bool PrintSpecialData (Operations::SpecialFieldsMap& fields, const char* src); + bool PrettyPrintSpecialData (Operations::SpecialFieldsMap& fields); + + std::map _sectionTypeNames; + std::map _fieldIdNames; + std::map _fieldIdSizes; + + bool _useDefaultAnswer; + + void InitNames() { + // Initialize the (static) names map. + if (_sectionTypeNames.empty()) { + _sectionTypeNames[TYPE_PLL] = "PLL"; + _sectionTypeNames[TYPE_BOOT] = "BOOT"; + _sectionTypeNames[TYPE_IWI] = "IWI"; + _sectionTypeNames[TYPE_NBI] = "NBI"; + _sectionTypeNames[TYPE_HBI] = "HBI"; + _sectionTypeNames[TYPE_RMW] = "RMW"; + _sectionTypeNames[TYPE_JMP] = "JUMP"; + _sectionTypeNames[TYPE_COD] = "CODE"; + _sectionTypeNames[TYPE_SPC] = "SPECIAL"; + _sectionTypeNames[TYPE_LST] = "LAST"; + _sectionTypeNames[TYPE_UNKNOWN] = "UNKNOWN"; + + _fieldIdNames[SYS_GUID_ID] = "System Image GUID" ; + _fieldIdNames[NODE_GUID_ID] = "Node GUID" ; + _fieldIdNames[NODE_DESC_ID] = "Node Description"; + _fieldIdNames[BSN_ID] = "Board Serial Number"; + _fieldIdNames[PSID] = "PSID"; + + + _fieldIdSizes[SYS_GUID_ID] = 8; + _fieldIdSizes[NODE_GUID_ID] = 8; + _fieldIdSizes[NODE_DESC_ID] = 64; + _fieldIdSizes[BSN_ID] = 64; + _fieldIdSizes[BRD_ID] = 4; + _fieldIdSizes[VS_CLS_EN_ID] = 4; + _fieldIdSizes[PSID] = 16; + } + } + + +}; + + +bool EeAddressConvertor::Init (const u_int8_t* eepromDataBytes) { + u_int32_t eepromData[EEPROMS]; + for (u_int32_t i = 0 ; i < EEPROMS ; i++ ) { + eepromData[i] = ImgBytes2Dw(eepromDataBytes + i * 4); + } + + return Init(eepromData); +} + +bool EeAddressConvertor::Init (const u_int32_t eepromData[EEPROMS]) { + _devmap.clear(); + for (u_int32_t i = 0 ; i < EEPROMS ; i++ ) { + u_int8_t d = eepromData[i] >> 24; + u_int32_t o = eepromData[i] & 0xffffff; + + _devmap.push_back(EeLoc(o,d)); + } + + return true; +} + + +u_int32_t EeAddressConvertor::GetTotalSize () const { + u_int32_t res = 0; + for (vector::const_iterator it = _devmap.begin(); it != _devmap.end() ; ++it) { + res += it->offset; + } + + return res; +} + + +EeLoc EeAddressConvertor::Convert (u_int32_t addr) const { + vector eer = ConvertRange(addr,1); + + if (eer.empty()) { + // TODO: Check return value for all calls of this function. + return EeLoc(0xffffffff, 0xff); + } else { + return eer[0].first; + } +} + + +vector EeAddressConvertor::ConvertRange (u_int32_t addr, u_int32_t len) const { + vector res; + //printf("-D- Converted (addr,size) => (dev,offset,size) : (%06x, %06x) = > ",addr, len); // NOTE - remove below printf too!!! + for (u_int32_t i = 0; len && i < _devmap.size() ; i++) { + u_int32_t devSize = _devmap[i].offset; + + if (addr < devSize) { + if (addr + len <= devSize) { + res.push_back(EeRange(EeLoc(addr, _devmap[i].dev), len)); + + //for (vector::iterator it = res.begin(); it != res.end(); ++it) + // printf("(%02x,%06x,%06x) ", it->first.dev, it->first.offset, it->second); + // printf("\n"); + + return res; + } + + u_int32_t curr_len = devSize - addr; + res.push_back(EeRange(EeLoc(addr, _devmap[i].dev), curr_len)); + len -= curr_len; + addr = 0; + + } else { + addr -= _devmap[i].offset; + } + } + + // If we're here - no mapping found. + // IBADM_THROW("Addr2OffsetDev: No mapping for the given addr (" << hex << addr << ")."); + res.clear(); + return res; + +} + + +u_int32_t EeAddressConvertor::Convert(const EeLoc& ee_loc) const { + if (_devmap.empty() && ee_loc.dev == 0) { + return ee_loc.offset; + } + + u_int32_t addr = 0; + + for (u_int32_t i = 0; i < _devmap.size() ; i++) { + if (_devmap[i].dev && (_devmap[i].dev == ee_loc.dev)) { + return addr + ee_loc.offset; + } else { + addr += _devmap[i].offset; + } + } + + // If we're here - no mapping found. + return (u_int32_t)(-1); +} + + + +bool ImageFile::open (const char* imageFile) { + + string tmp = imageFile; + string::size_type dotPos = tmp.rfind("."); + string ext; + + + if (dotPos == string::npos) { + ext = ""; + } else { + ext = tmp.substr(dotPos, tmp.size() - dotPos); + } + + if (ext == ".img" || ext == ".eeprom") { + return LoadImg(imageFile); +// } else if (ext == ".bin") { +// return LoadBin(imageFile); + } else { + return errmsg("Unsupported file format (%s). Supported formats: .img ", + ext.c_str()); + } +} + + +bool ImageFile::LoadImg(const char* imageFile) { + // Read and parse image file + + // 1. Load and Store IMG sections in _sections vector + // 2. Move data to the _devceData map. + // 3. Mark _primaryEeprom (the first section). + + + FILE *fp; + if ((fp = fopen(imageFile, "r")) == NULL) + { + return errmsg(strerror(errno)); + } + + u_int32_t lineNum = 0; + + Section *curr_sect = 0; + while(1) + { + const int MAX_STR = 1024; + u_int32_t curr_addr, curr_eepr, curr_value; + static char name[MAX_STR], str[MAX_STR], st[MAX_STR]; + char *endp; + + lineNum++; + + if (!curr_sect) + { + /* + * Outside section + */ + if (!fgets(str, MAX_STR, fp)) + break; + + // Skip comments and empty lines + if (str[strspn(str, " \t")] == '#') + continue; + if (str[strspn(str, " \t")] == '\n') + continue; + + sscanf(str, "%s %s %x %x", st, name, &curr_addr, &curr_eepr); + if (strcmp(st, "START")) + continue; + curr_sect = new Section(name, (u_int8_t)curr_eepr, curr_addr); + } + else + { + /* + * Inside section + */ + if (fscanf(fp, "%s", str) != 1) + { + return errmsg("%s:%d: Wrong EEPROM image format - unexpected EOF", + _fileName.c_str(), + lineNum); + } + if (!strcmp(str, "END")) + { + if (fscanf(fp, "%s", name) != 1) + { + return errmsg( + "Wrong EEPROM image format - after END name of " + "the input section is expected."); + } + if (strcmp(curr_sect->name.c_str(), name)) + { + return errmsg( + "Wrong EEPROM image format - end of section \"%s\" is" + " expected,\nwhile end of section \"%s\" is detected.", + curr_sect->name.c_str(), name); + } + _sections.push_back(curr_sect); + curr_sect = 0; + } + else + { + curr_value = strtoul(str, &endp, 16); + if (*endp != '\0') + return errmsg("Wrong EEPROM image - hexa constant %s" + "has invalid characters.", str); + + curr_sect->data.push_back((u_int8_t)curr_value); + } + } + } + fclose(fp); + + + //for (vector::iterator it = _sections.begin(); it != _sections.end(); ++it) { + // Section* s = *it; + // printf("-D- %-30s %02x %08x\n",s->name.c_str(), s->location.dev, s->location.offset); + //} + + + if (_sections.empty()) { + return errmsg("No image sections found."); + } + + // The first section in the given file is written to the primary eeprom. + _primaryEeprom = (*_sections.begin())->location.dev; + + // Put the data in the deviceData map + for (vector::iterator it = _sections.begin(); it != _sections.end(); ++it) { + + Section* s = *it; + DeviceDataMap::iterator dit = _deviceData.find(s->location.dev); + + if (dit == _deviceData.end()) { + // Add this device + _deviceData[s->location.dev]; + + dit = _deviceData.find(s->location.dev); + } + + DataVec& devData = dit->second; + + if (devData.size() < s->location.offset) { + // new section starts beyond the place where prev section ended - resize to start: + devData.resize(s->location.offset, 0); + } + + if (devData.size() == s->location.offset) { + // The section continues previous section - add to end. + devData.insert(devData.end(), s->data.begin(), s->data.end()); + } else if (devData.size() > s->location.offset + s->data.size() ) { + for (u_int32_t i = 0; i < s->data.size() ; i++) { + devData[s->location.offset + i] = s->data[i]; + } + } else { + return errmsg("Failed to process sections: Found section %s (%02x,$08x) of size %x bytes, but device current size is %x bytes.", + s->name.c_str(), + s->location.dev, + s->location.offset, + (u_int32_t)devData.size()); + } + } + + //for (DeviceDataMap::iterator it = _deviceData.begin(); it != _deviceData.end(); ++it) { + // printf("-D- Dev %02x Size %08x\n", it->first, it->second.size()); + //} + + return true; +} + + +bool IoBase::read (u_int32_t addr, + u_int32_t len, + DataVec& data) { + + if (!_conv) { + return errmsg("Internal error: read() when _conv not initialized"); + } + + vector eer = _conv->ConvertRange(addr, len); + + for (vector::iterator it = eer.begin() ; it != eer.end(); ++it ) { + if (!read(it->first.dev, it->first.offset, it->second, data)) + return false; + } + + return true; +} + + +// read() : Insert the extraced data to the given data vector. +// Note that if data is not empty, new the new data is appended to it. +bool ImageFile::read (u_int8_t i2c_slave, + u_int32_t offset, + u_int32_t len, + DataVec& data) { + + + DeviceDataMap::iterator dit = _deviceData.find(i2c_slave); + + if (dit == _deviceData.end()) { + return errmsg("Tried to read %x bytes from offset %x from a non existing device: %02x.", + len, + offset, + i2c_slave); + } + + DataVec& devData = dit->second; + + if (offset + len > devData.size()) { + return errmsg("Tried to read %x bytes from offset %x - Beyond device %02x size - %x.", + len, + offset, + i2c_slave, + (u_int32_t)devData.size()); + } + + + data.insert(data.end(), devData.begin() + offset, devData.begin() + offset + len); + + return true; +} + + + + +// +// Operations class implementation +// + +bool Operations::CheckPlls(IoBase& img, u_int8_t eeprom, Section* sect) { + sect->type = TYPE_PLL; + img.read(eeprom, 0x0, 0x28 , sect->data ); + + // Verify Checksum + const u_int8_t expectedCheckSum = 0xAB; + u_int8_t actualCheckSum = 0; + for (u_int32_t i = 0; i < 16 ; i++) { + actualCheckSum += sect->data[i]; + } + + if (actualCheckSum != expectedCheckSum) { + return errmsg("Bad PLL checksum: Expected: %02x, Actual: %02x", expectedCheckSum, actualCheckSum); + } + + return true; +} + + +bool Operations::CheckBoot(IoBase& img, u_int8_t eeprom, Section* sect) { + + DataVec data; + sect->type = TYPE_BOOT; + + // Read boot2 section size + img.read(eeprom, 0x2c, 4, data); + + u_int32_t bootSectSize = ImgBytes2Dw(&data[0]); + + // check reasonable size to make sure we're not working on a + // corrupted data (e.g. 0xffffffff of blank eeprom). The boot + // section size (and its crc + PLL boot record) can not cross 32KB. + + if (bootSectSize * 4 >= 32*1024 - 0x38) { + return errmsg("Boot section check failed. Read boot size from eeprom %02x, " + "offset %x is %x bytes, which is larger than 32KB.", + eeprom, + 0x2c, + bootSectSize * 4); + } + + u_int32_t bootSize = bootSectSize * 4 + 0x10; + + img.read(eeprom, 0x28, bootSize, sect->data); + + if (!CheckCrc(sect->data)) { + return false; + } + + return true; +} + +bool Operations::CheckCrc(const DataVec& data) { + + u_int32_t calc_crc = CalcCrc(data, data.size() - 4); + u_int32_t sect_crc = ImgBytes2Dw(&data[data.size() - 4]); + + if (sect_crc != calc_crc) { + return errmsg("Bad CRC: Aclual: %04x, Expected: %04x", + sect_crc, + calc_crc); + } + + return true; +} + + +u_int32_t Operations::CalcCrc(const DataVec& data, u_int32_t size) { + if (size == 0) { + size = data.size(); + } + + assert((size % 4) == 0); + + Crc16 crc; + for (u_int32_t i = 0; i < size ; i += 4) { + u_int32_t w = ImgBytes2Dw(&data[i]); + crc.add(w); + } + crc.finish(); + + return crc.get(); +} + +bool Operations::CheckSection(IoBase& img, u_int32_t addr, Section* sect) { + DataVec data; + + img.read(addr + 4, 4, data); + u_int32_t type_size = ImgBytes2Dw (&data[0]); + + sect->type = type_size >> 24; + u_int32_t sectSize = (type_size & 0xffff) * 4 + 8; + + img.read(addr, sectSize, sect->data); + + if (!CheckCrc(sect->data)) { + return false; + } + + return true; +} + +bool Operations::ReadBlock (IoBase& img, u_int8_t i2cDev, u_int32_t offset, u_int32_t size, const char* filename) { + FwImageData readData; + + Section* sect = new Section("ReadBlock" , i2cDev, offset); + readData._sections.push_back(sect); + + //printf("-D- bool Operations::ReadBlock ( , %x, %x, %x, %s)\n", i2cDev, offset, size,filename); + + if (!img.read(i2cDev, offset, size, sect->data)) + return errmsg("Read failed: %s", img.err()); + + FILE* of = fopen(filename, "w"); + if (of == NULL) { + return errmsg("Failed to open file %s for writing: %s", + filename, + strerror(errno)); + } + + if (!DumpSections(of, readData, false)) { + return false; + } + + fclose(of); + + return true; +} + + + +bool Operations::DumpImage (Eeprom& eeprom, u_int8_t primaryI2cDev, u_int8_t secondaryI2cDev, const char* filename) { + + bool primaryOk = false; + bool secondaryOk = false; + + primaryI2cDev = 0; //COMPILER WARNING; + secondaryI2cDev = 0; //COMPILER WARNING; + + FwImageData primaryImageData; + FwImageData secondaryImageData; + + PercentProgressMeter prog("Reading primary image " , MAX_IMAGE_SIZE); + eeprom.SetProgressMeter(&prog); + + primaryOk = CheckImage(eeprom, eeprom.GetPrimaryEeprom(), primaryImageData, false); + + if (primaryOk) { + prog.Done(); + } else { + prog.Done(err()); + } + + + PercentProgressMeter progSec("Reading secondary image " , MAX_IMAGE_SIZE); + eeprom.SetProgressMeter(&progSec); + secondaryOk = CheckImage(eeprom, eeprom.GetSecondaryEeprom(), secondaryImageData, false); + if (secondaryOk) { + progSec.Done(); + } else { + progSec.Done(err()); + } + + // Get special fields on eeprom and image file : + GetSpecialData(primaryImageData._sections, primaryImageData._specialFields); + GetSpecialData(secondaryImageData._sections, secondaryImageData._specialFields); + + FILE* of = fopen(filename, "w"); + + if (of == NULL) { + return errmsg("Failed to open file %s for writing: %s", + filename, + strerror(errno)); + } + + printf("- Writing image file ...\n"); + + + DumpSpecial(of, primaryImageData); + DumpSpecial(of, secondaryImageData); + + DumpSections(of, primaryImageData); + DumpSections(of, secondaryImageData); + + + fclose(of); + + if (!(primaryOk && secondaryOk)) { + // TODO: Should this be only a warning ? + return errmsg("Errors detected in the eeprom image - Image file may not be full"); + } + + + return true; +} + + +bool Operations::DumpSpecial(FILE* f, Operations::FwImageData& imgData) { + //fprintf(f, "# Operations::DumpSpecial - NOT YET IMPLEMENTED\n\n"); + f=NULL; + imgData._sections.size(); + return true; +} + +bool Operations::DumpSections(FILE* f, Operations::FwImageData& imgData, bool convertRange) { + for (SectionVec::const_iterator it = imgData._sections.begin() ; it != imgData._sections.end() ; ++it) { + Section* s = *it; + + // Sections are dumped in physical device boundaries: + vector eer; + + if (convertRange) + eer = imgData._conv.ConvertRange(s->addr, s->data.size()); + else { + eer.push_back(EeRange(s->location, s->data.size())); + } + + u_int32_t currOffset = 0; + + for (u_int32_t i = 0 ; i < eer.size() ; ++i) { + const char* sectName; + char sectNameBuff[64]; + SectionType t = (SectionType)s->type; + + if (eer.size() > 1) { + sprintf(sectNameBuff, "%s-%d", _sectionTypeNames[t], i); + sectName = sectNameBuff; + } else { + sectName = _sectionTypeNames[t]; + } + + fprintf(f , + "START %s %08x %02x\n", + sectName, + eer[i].first.offset, + eer[i].first.dev); + + const int bytesInLine = 4; + u_int32_t j; + for (j = 0 ; j < eer[i].second ; ++j) { + fprintf(f , " %02x", s->data[currOffset + j]); + if (((j+1) % bytesInLine) == 0) fprintf(f , "\n"); + } + + if (j % bytesInLine) fprintf(f , "\n"); + + fprintf(f , + "END %s\n\n", + sectName); + + currOffset += eer[i].second; + } + } + return true; +} + +bool Operations::CheckFullImage(IoBase& img, u_int8_t primaryI2cDev, u_int8_t secondaryI2cDev, bool report) { + bool ret; + FwImageData primaryImage; + FwImageData secondaryImage; + + if (report) printf("Primary Image:\n"); + ret = CheckImage(img, primaryI2cDev, primaryImage, report); + + if (ret == false && img.GetAddressConvertor() == NULL && secondaryI2cDev == 0) { + // Bad primary image - Address convertor not initialized, and secondary + // eeprom is not given - Can't check secondary image. + // TODO: See if to return errmsg here; + return false; + } + + // CheckImage Initiates address convertor: + if (secondaryI2cDev == 0) { + // Auto detect secondary I2c slave address - It is located in the middle + // of the eeproms size: + u_int32_t secondaryImageAddr = primaryImage._conv.GetTotalSize() / 2 ; + secondaryI2cDev = primaryImage._conv.Convert(secondaryImageAddr).dev; + } + + if (report) printf("\nSecondary Image:\n"); + if (!CheckImage(img, secondaryI2cDev, secondaryImage, report)) { + return false; + } + + return true; +} + +bool Operations::QueryImage(IoBase& img, u_int8_t primaryI2cDev, u_int8_t secondaryI2cDev) { + + bool ret; + FwImageData primaryImage; + FwImageData secondaryImage; + + FwImageData* validImage = NULL; + + ret = CheckImage(img, primaryI2cDev, primaryImage, false, false); + if (ret) + validImage = &primaryImage; + + if (ret == false && img.GetAddressConvertor() == NULL && secondaryI2cDev == 0) { + // Bad primary image - Address convertor not initialized, and secondary + // eeprom is not given - Can't check secondary image. + // TODO: See if to return errmsg here; + return false; + } + + if (validImage == NULL) { + // Check secondary image + if (secondaryI2cDev == 0) { + // Auto detect secondary I2c slave address - It is located in the middle + // of the eeproms size: + u_int32_t secondaryImageAddr = primaryImage._conv.GetTotalSize() / 2 ; + secondaryI2cDev = primaryImage._conv.Convert(secondaryImageAddr).dev; + } + + ret = CheckImage(img, secondaryI2cDev, secondaryImage, false , true); + if (ret) + validImage = &secondaryImage; + } + + if (validImage == NULL) { + return errmsg("No valid image."); + } + + // Print the selected fields: + printf("\nQuery:\n"); + if (!GetSpecialData(validImage->_sections, validImage->_specialFields)) { + return false; + } + + PrettyPrintSpecialData(validImage->_specialFields); + + return true; +} + + +bool Operations::PrettyPrintSpecialData(Operations::SpecialFieldsMap& fields) { + + FieldId queriedFields[] = {NODE_GUID_ID, SYS_GUID_ID, NODE_DESC_ID, BSN_ID, PSID}; + + for (u_int32_t i = 0 ; i < sizeof(queriedFields)/sizeof(queriedFields[0]) ; i++) { + FieldId fid = queriedFields[i]; + + ReplaceFieldData& rep = fields[fid]; + string val; + + if (!GetFieldStringRepresentation (rep.data, rep.type, val)) { + return false; + } + + if (_fieldIdNames[fid]) { + printf(" %-20s %s\n", _fieldIdNames[fid], val.c_str()); + } + } + + return true; +} + + +bool Operations::PrintSpecialData(Operations::SpecialFieldsMap& fields, const char* src) { + printf(" Special data fields of %s:\n", src); + printf("%-20s %-8s %-s4 %s\n", "FIELD", "ADDR", "SIZE", "VALUE"); + for (SpecialFieldsMap::iterator it = fields.begin(); + it != fields.end(); + ++it) { + + ReplaceFieldData& rep = it->second; + + string val; + + if (!GetFieldStringRepresentation (rep.data, rep.type, val)) { + return false; + } + + printf("%-20s %8x %4d %s\n", + _fieldIdNames[it->first], + rep.addr, + (u_int32_t)rep.data.size(), + val.c_str()); + + } + + return true; +} + + +bool Operations::CheckImage(IoBase& img, + u_int8_t i2cDev, + Operations::FwImageData& imageData, + bool report, + bool onlyDataSections) { + + Section* sect; + + if (report) ReportSectionHeader(); + + // PLL Section + sect = new Section(i2cDev, 0); + sect->addr = 0; + + if (!CheckPlls(img, i2cDev, sect)) { + if (report) ReportSection(sect, err()); + return false; + } + imageData._sections.push_back(sect); + if (report) ReportSection(sect); + + sect = new Section(i2cDev, 0x28); + sect->addr = 0x28; + + if (!CheckBoot(img, i2cDev, sect)) { + if (report) ReportSection(sect, err()); + return false; + } + imageData._sections.push_back(sect); + if (report) ReportSection(sect); + + if (!imageData._conv.Init(§->data[8])) { + return errmsg("Address convertor initiation failed"); + } + img.SetAddressConvertor(&imageData._conv); + + // boot start boot size (in bytes) + u_int32_t currAddress = 0x28 + sect->data.size(); + u_int32_t imageOffset = imageData._conv.Convert(EeLoc(0, i2cDev)); + + // Add the imageOffset to the sections already checked: + for (SectionVec::iterator it = imageData._sections.begin(); it != imageData._sections.end(); ++it) { + Section* s = *it; + s->addr += imageOffset; + } + + u_int8_t stopAtType = onlyDataSections ? TYPE_JMP : TYPE_LST; + + while(sect->type != stopAtType && currAddress < MAX_IMAGE_SIZE) { + u_int32_t absAddress = imageOffset + currAddress; + EeLoc loc = imageData._conv.Convert(absAddress); + + sect = new Section(loc.dev , loc.offset); + sect->addr = absAddress; + + if (!CheckSection(img, absAddress, sect)) { + if (report) ReportSection(sect, err()); + + return false; + } + + if (report) ReportSection(sect); + + currAddress += sect->data.size(); + imageData._sections.push_back(sect); + } + + return true; +} + + +bool Operations::ReportSectionHeader() { + printf(" %-3s %-6s %-6s %-10s %s\n", + "DEV", + "OFFSET", + "SIZE", + "TYPE", + "STATUS"); + + return true; +} + +bool Operations::ReportSection(Section* sect, const char* status) { + char unknownNameBuff[64]; + const char* sectName; + + if (status == NULL) { + status = "OK"; + } + + map::iterator it = _sectionTypeNames.find((SectionType)sect->type); + + if (it != _sectionTypeNames.end()) { + sectName = it->second; + } else { + sprintf(unknownNameBuff, "UNKNOWN(%d)" , sect->type); + sectName = unknownNameBuff; + } + + printf(" %02x %06x %06x %-10s %s\n", + sect->location.dev, + sect->location.offset, + (u_int32_t)sect->data.size(), + sectName, + status); + + return true; + +} + + + +bool Operations::GetSpecialData (const SectionVec& sectVec, SpecialFieldsMap& specialData) { + Section* psidSect = NULL; + Section* specialSect = NULL; + + // Find special section and PSID section. + for (SectionVec::const_iterator it = sectVec.begin(); + it != sectVec.end() && (psidSect == NULL || specialSect == NULL) ; ++it) { + Section* s = *it; + + if (s->type == TYPE_SPC) { + specialSect = s; + } else if (s->type == TYPE_NBI && s->data.size() == 32) { + // Check if this section initiates the PSID according to its CR address. + u_int32_t initAddr = ImgBytes2Dw(&s->data[8]); + if (initAddr == PSID_CR_ADDR) { + psidSect = s; + } + } + } + + // Parse the sections + if (!psidSect) { + return errmsg("Failed to find PSID section in the given image"); + } else { + specialData[PSID]; // Add the entry to the map. + ReplaceFieldData& psidField = specialData[PSID]; + + psidField.type = PSID; + psidField.addr = psidSect->addr + 12; + psidField.size = 16; + psidField.data.insert(psidField.data.begin(), + psidSect->data.begin() + 12, + psidSect->data.begin() + 12 + 16); + + + } + + if (!specialSect) { + return errmsg("Failed to find Special section in the given image"); + } + + return ExtractSpecial(specialSect, specialData); + +} + + +bool Operations::ExtractSpecial(Section* s, SpecialFieldsMap& specialData) { + + // Extract the data out of the dpecial section: + + u_int32_t fieldOffset = 8; // + + + while (fieldOffset < s->data.size() - 4) { + u_int16_t type = (u_int16_t)ImgBytes2Dw(&s->data[fieldOffset]); + u_int32_t fieldSize; + + std::map::iterator it = _fieldIdSizes.find((FieldId)type); + + if (it == _fieldIdSizes.end()) { + return errmsg("Failed to parse Special section: Unknown special field id: %x", type); + } else { + fieldSize = it->second; + } + + if (fieldOffset + fieldSize + 8 > s->data.size()) { + return errmsg("Failed to parse Special section: Field \"%s\" of size %d bytes from offset 0x%x " + "exceeds section size 0x%x", + _fieldIdNames[(FieldId)type], + fieldSize, + fieldOffset, + (u_int32_t)s->data.size()); + } + + + specialData[(FieldId)type]; // Add the entry to the map. + ReplaceFieldData& f = specialData[(FieldId)type]; + f.data.clear(); + + f.type = (FieldId)type; + f.addr = s->addr + fieldOffset + 4; + f.size = fieldSize; + f.data.insert(f.data.begin(), + s->data.begin() + fieldOffset + 4, + s->data.begin() + fieldOffset + 4 + fieldSize); + + // printf("-D- Extracted special %d %-20s %06x %d\n", type, _fieldIdNames[f.type], f.addr, f.size); + + fieldOffset = fieldOffset + fieldSize + 4; + + } + + return true; +} + +bool Operations::AskUser (const char* msg, bool def) { + printf(msg); + if (_useDefaultAnswer) { + printf("%s\n", def ? "y" : "n"); + return def; + } else { + char ansbuff[32]; + ansbuff[0] = '\0'; + + if (!isatty(0)) { + return errmsg("Not on tty - Can't interact. assuming \"no\" for question \"%s\"", msg); + } + fflush(stdout); + fgets(ansbuff, 30, stdin); + + if ( strcmp(ansbuff, "y\n") && + strcmp(ansbuff, "Y\n") && + strcmp(ansbuff, "yes\n") && + strcmp(ansbuff, "Yes\n") && + strcmp(ansbuff, "YES\n")) + return errmsg("Aborted by user"); + } + return true; +} + +bool Operations::GetFieldFromString(Operations::SpecialFieldsMap& fields, Operations::FieldId fid, const char* str) { + + u_int64_t guid; + char* eptr; + + u_int32_t len = strlen(str); + + switch (fid) { + case SYS_GUID_ID: + case NODE_GUID_ID: + guid = strtoull(str, &eptr, 16); + if (*eptr != '\0' || (guid == 0xffffffffffffffffULL && errno == ERANGE)) { + return errmsg("Failed to convert string \"%s\" to %s: %s", + str, + _fieldIdNames[fid], + errno ? strerror(errno) : "Bad format" ); + } else { + ReplaceFieldData& f = fields[fid]; // Creates the field in the fields map. + f.type = fid; + f.data.clear(); + f.data.resize(8,0); + + ::PatchImgBytes(&f.data[0], guid >> 32); + ::PatchImgBytes(&f.data[4], guid & 0xffffffff); + } + break; + + case BSN_ID: + case NODE_DESC_ID: + + if (len > 64) { + return errmsg("Field %s size must be less than 64 characters. Given string is %d characters long.", + _fieldIdNames[fid], + len); + } else { + ReplaceFieldData& f = fields[fid]; // Creates the field in the fields map. + f.type = fid; + f.data.clear(); + f.data.resize(64,0); + + for (u_int32_t i = 0; i < len; i++) { + f.data[i] = str[i]; + } + } + + break; + + default: + return errmsg("Failed to convert the given string \"%s\" to field id %d", str, fid); + } + + return true; +} + + +bool Operations::GetFieldStringRepresentation(const DataVec& data, FieldId t, string& str) { + + char buff[65]; + u_int32_t guid_high; + u_int32_t guid_low; + u_int32_t tmp; + u_int32_t i; + + //printf("-D- To string: type:%d, size:%d\n", t, data.size()); + + switch (t) { + case SYS_GUID_ID: + case NODE_GUID_ID: + guid_high = ImgBytes2Dw(&data[0]); + guid_low = ImgBytes2Dw(&data[4]); + + sprintf(buff, "0x%08x%08x", guid_high, guid_low); + break; + + case BSN_ID: + case NODE_DESC_ID: + case PSID: + + for (i = 0; i < data.size(); i++) { + buff[i] = data[i]; + } + + buff[i] = '\0'; + + break; + + case BRD_ID: + case VS_CLS_EN_ID: + tmp = ImgBytes2Dw(&data[0]); + sprintf(buff, "0x%08x", tmp); + break; + + + default: + return errmsg("No string representation fot the given type ID (%d)", t); + } + + str = buff; + return true; +} + + +bool Operations::PatchNewImage (Operations::FwImageData& newImageData, + const Operations::SpecialFieldsMap& curFields, + const Operations::SpecialFieldsMap& userFields, + bool ignoreCurrent) { + + // Patch fields from eeproms to image. + FieldId keepFields[] = {SYS_GUID_ID, NODE_DESC_ID, NODE_GUID_ID, BSN_ID }; + + for (u_int32_t i = 0 ; i < sizeof(keepFields)/sizeof(keepFields[0]); i++) { + // Check if the field is forced by user + FieldId fid = keepFields[i]; + + const DataVec* fieldData; + + SpecialFieldsMap::const_iterator userFieldIt = userFields.find(fid); + SpecialFieldsMap::const_iterator curFieldIt = curFields.find(fid); + + if (ignoreCurrent) { + // Take what ever field we can from current image + // If not in image (when burning a blank eeprom, take from user defined values. + // If not in user defined values, leave as is in new image. + + if (userFieldIt != userFields.end()) { + fieldData = &userFieldIt->second.data; + } else if (curFieldIt != curFields.end()){ + fieldData = &curFieldIt->second.data; + } else { + // Keep the current value of the field in the image: + continue; + } + } else { + + // + // Consider current fields - If a field is not found in the current image, error. + // Prompt user for user defined values. + // + + if (curFieldIt == curFields.end()) { + return errmsg("Can not read field \"%s\" from the eeprom", _fieldIdNames[fid]); + } + + const ReplaceFieldData& curField = curFieldIt->second; + + if (userFieldIt != userFields.end()) { + string curVal; + string userVal; + + const ReplaceFieldData& userField = userFieldIt->second; + + GetFieldStringRepresentation(curField.data, fid, curVal); + GetFieldStringRepresentation(userField.data, fid, userVal); + + + printf("\n You are about to replace %s in eeprom:\n" + " Current value: %s\n" + " New value: %s\n", + _fieldIdNames[fid], + curVal.c_str(), + userVal.c_str() + ); + + if (!AskUser("\n Is it OK ? (y/n) [n] : ")) + return errmsg("Aborted by user"); + + fieldData = &userField.data; + + } else { + fieldData = &curField.data; + } + } + + // Apply the field to the new image: + if (!ReplaceField(newImageData, fid, *fieldData)) { + return false; + } + } + + // Fix CRC after applying current/user supplied fields. + if (!FixSpecialCrc(newImageData)) { + return false; + } + + return true; +} + +bool Operations::ReplaceField(Operations::FwImageData& image, Operations::FieldId fid, const DataVec& fieldData) { + + ReplaceFieldData& f = image._specialFields[fid]; + + Section* targetSection = NULL; + + // Looking for the section to patch with new data. + // Assuming that the new data occupies a single section, + // and that sections are ordered in aecending addr + for (SectionVec::iterator it = image._sections.begin(); it != image._sections.end(); ++it) { + Section* s = *it; + + if (s->addr <= f.addr && (s->addr + s->data.size()) > f.addr ) { + targetSection = s; + break; + } + + } + + u_int32_t fieldSectOffset = f.addr - targetSection->addr; + if (targetSection->data.size() < fieldSectOffset + fieldData.size()) { + return errmsg("Bad image structure: Replaced field %s at address %x of size %d " + "does not fit in the section starting at address %x , size %x", + _fieldIdNames[fid], + f.addr, + (u_int32_t)fieldData.size(), + targetSection->addr, + (u_int32_t)targetSection->data.size()); + } + + // Patch the section data: + for (u_int32_t i = 0; i < fieldData.size() ; i++) { + targetSection->data[fieldSectOffset + i] = fieldData[i]; + } + + return true; +} + +bool Operations::FixSpecialCrc(Operations::FwImageData& image) { + Section* specialSect = NULL; + + // Find special section and PSID section. + for (SectionVec::const_iterator it = image._sections.begin(); + it != image._sections.end() ; ++it) { + Section* s = *it; + + if (s->type == TYPE_SPC) { + specialSect = s; + break; + } + } + + if (specialSect == NULL) { + return errmsg("Special section not found"); + } + + return RecalcCrc(specialSect); +} + + +bool Operations::RecalcCrc(Section* s) { + u_int32_t crc = CalcCrc(s->data, s->data.size() - 4); + + ::PatchImgBytes(&s->data[s->data.size()-4], crc); + return true; +} + +bool Operations::IS3FwBurn (Eeprom& eeprom, + ImageFile& imageFile, + const SpecialFieldsMap& userFields, + bool fsBurn) { + // Failsafe burn flow: + // 1. Read and check imagefile (Primary Image). Exit if bad. + // 2. Read and verify full image on eeproms. + // Remember image state (which is valid). Exit if both are bad. + // 3. ? Compare AddressConvertors and Make sure they're the same. + // 4. Read PSID and Special section data from eeprom. + // 5. Get PSID and Special section data + adresses from image file. Duplicate them for both images). + // 6. Patch the image with the data from eeprom (or from user). + // 7. Failsafe burn patched image according to the eeprom image state + + u_int8_t primaryI2cDev = imageFile.GetPrimaryEeprom(); + + FwImageData newImageData; + if (!CheckImage(imageFile, primaryI2cDev, newImageData, false)) { + return errmsg("Bad image in file %s: %s", imageFile.FileName().c_str(), err()); + } + + + bool primaryOk = false; + bool secondaryOk = false; + + FwImageData curImageData; + + PercentProgressMeter prog("Checking primary image " , MAX_IMAGE_SIZE); + eeprom.SetProgressMeter(&prog); + + primaryOk = CheckImage(eeprom, eeprom.GetPrimaryEeprom(), curImageData, false, !fsBurn); + + if (primaryOk) { + prog.Done(); + } else { + prog.Done(err()); + } + + if (!primaryOk) { + curImageData.Clear(); + PercentProgressMeter progSec("Checking secondary image " , MAX_IMAGE_SIZE); + eeprom.SetProgressMeter(&progSec); + secondaryOk = CheckImage(eeprom, eeprom.GetSecondaryEeprom(), curImageData, false, !fsBurn); + if (secondaryOk) { + progSec.Done(); + } else { + progSec.Done(err()); + } + } + + if (!primaryOk && !secondaryOk) { + if (fsBurn) { + return errmsg("Both images on eeproms %02x and %02x are not valid. Can not perform failsafe burn.", + eeprom.GetPrimaryEeprom(), + eeprom.GetSecondaryEeprom()); + } else { + eeprom.SetAddressConvertor(&newImageData._conv); + } + } + + // Get special fields on eeprom and image file : + if (!GetSpecialData(newImageData._sections, newImageData._specialFields)) { + return errmsg("Failed get special fields from the given image file: %s", err()); + } + + if (!GetSpecialData(curImageData._sections, curImageData._specialFields) && fsBurn) { + return errmsg("Failed get special fields from the eeprom image: %s", err()); + } + + //PrintSpecialData(newImageData._specialFields, "Image file"); + //PrintSpecialData(curImageData._specialFields, "Eeprom"); + + // Check PSIDs + string curPsid; + string newPsid; + if (!GetFieldStringRepresentation(curImageData._specialFields[PSID].data, PSID, curPsid) ) { + return errmsg("Failed getting PSID of the image on eeprom."); + } + + if (!GetFieldStringRepresentation(newImageData._specialFields[PSID].data, PSID, newPsid) ) { + return errmsg("Failed getting PSID of the given image file."); + } + + if (curPsid != newPsid) { + printf("\n You are about to replace current PSID in eeprom - \"%s\" with a different PSID - \"%s\".\n", + curPsid.c_str(), + newPsid.c_str()); + + if (!AskUser("\n Is it OK ? (y/n) [n] : ")) + return errmsg("Aborted by user"); + } + + if (!PatchNewImage(newImageData, curImageData._specialFields , userFields, !fsBurn)) { + return false; + } + + // + // Burn: + // + + u_int32_t primaryOffset = 0; + u_int32_t secondaryOffset = newImageData._conv.Convert(EeLoc(0, eeprom.GetSecondaryEeprom())); + + if (secondaryOffset == 0xffffffff) { + return errmsg("Can't map secondary eeprom i2c address (%02x).", eeprom.GetSecondaryEeprom()); + } + + //printf("\n-I- Burning image with the following values:\n"); + //PrettyPrintSpecialData(newImageData._specialFields); + //printf("\n\n"); + + if (fsBurn) { + if (secondaryOk) { + // Burn primary image first + if (!FsBurnImage(eeprom, newImageData, primaryOffset )) return false; + if (!FsBurnImage(eeprom, newImageData, secondaryOffset)) return false; + } else { + // burn primary image first. + if (!FsBurnImage(eeprom, newImageData, secondaryOffset)) return false; + if (!FsBurnImage(eeprom, newImageData, primaryOffset )) return false; + } + } else { + if (!FsBurnImage(eeprom, newImageData, primaryOffset )) return false; + } + return true; +} + + + + +bool Operations::FsBurnImage(Eeprom& eeprom, Operations::FwImageData& image, u_int32_t imgOffset) { + + // Invalidate the PLL boot record checksum. + + SectionVec& sectVec = image._sections; + + DataVec origData(4); + DataVec& pllSectData = sectVec[0]->data; + for (u_int32_t i = 0 ; i < 4 ;i++) { + origData[i] = pllSectData[i]; + } + + pllSectData[3] = pllSectData[3] + 0xA; + + const char *burnMsg; + const char *verifyMsg; + if (imgOffset == 0) { + burnMsg = "Burning primary image "; + verifyMsg = "Verifying primary image "; + } else { + burnMsg = "Burning secondary image "; + verifyMsg = "Verifying secondary image "; + } + + if (!BurnSections (eeprom, sectVec, true, imgOffset, burnMsg)) { + return false; + } + + if (!CompareSections(eeprom, sectVec, true, imgOffset, verifyMsg)) { + return false; + } + + // Restore PLL boot record section with correct Checksum. + // TODO: Verify: + if (!eeprom.write(imgOffset , origData)) { + return errmsg("EEprom write failed: %s", eeprom.err()); + } + + // Restore the image in memory for future use + for (u_int32_t i = 0 ; i < 4 ;i++) { + pllSectData[i] = origData[i]; + } + + return true; +} + + +bool Operations::BurnSections(Eeprom& eeprom, const SectionVec& sectVec, bool useAddress, u_int32_t imgOffset, const char* msg ) { + + u_int32_t totSize = 0; + SectionVec::const_iterator it; + + for (it = sectVec.begin() ; it != sectVec.end() ; ++it) { + Section* s = *it; + totSize += s->data.size(); + } + + PercentProgressMeter prog(msg, totSize); + eeprom.SetProgressMeter(&prog); + + for (it = sectVec.begin() ; it != sectVec.end(); ++it) { + Section* s = *it; + if (useAddress) { + eeprom.write(s->addr + imgOffset, s->data); + } else { + eeprom.write(s->location.dev, s->location.offset, s->data); + } + } + + eeprom.SetProgressMeter(NULL); + return true; +} + +bool Operations::CompareSections(Eeprom& eeprom, const SectionVec& sectVec, bool useAddress, u_int32_t imgOffset, const char* msg ) { + + u_int32_t totSize = 0; + SectionVec::const_iterator it; + + for (it = sectVec.begin() ; it != sectVec.end() ; ++it) { + Section* s = *it; + totSize += s->data.size(); + } + + PercentProgressMeter prog(msg, totSize); + eeprom.SetProgressMeter(&prog); + + for (SectionVec::const_iterator it = sectVec.begin() ; + it != sectVec.end(); + ++it) { + + Section* s = *it; + DataVec readData; + + if (useAddress) { + ((IoBase*)(&eeprom))->read(s->addr + imgOffset, s->data.size(), readData); + } else { + eeprom.read(s->location.dev, s->location.offset, s->data.size(), readData); + } + + // Compare: + for (u_int32_t i = 0; i < readData.size(); i++) { + if (readData[i] != s->data[i]) { + if (useAddress) { + //for (u_int32_t j = 0; j < 100 ; j++ ) { + // printf("%06x %02x %02x\n", + // } + return errmsg("Address %06x: Expected %02x, got %02x", + s->addr + imgOffset + i, + s->data[i], + readData[i]); + } else { + return errmsg("Eeprom %02x, Offset %x: Expected %02x, got %02x", + s->location.dev, + s->location.offset + i, + s->data[i], + readData[i]); + } + } + } + } + + eeprom.SetProgressMeter(NULL); + return true; +} + + + +bool Operations::RawBurn(Eeprom& eeprom, ImageFile& imageFile) { + const SectionVec& sectVec = imageFile.GetSections(); + + return BurnSections(eeprom, sectVec, false, 0, "Burning Image"); +} + +bool Operations::CompareContent(Eeprom& eeprom, ImageFile& imageFile) { + const SectionVec& sectVec = imageFile.GetSections(); + + return CompareSections(eeprom, sectVec, false, 0, "Verifying "); +} + + + + +//////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////// +//////////////////////////////////////////////////////////////////////// + +#ifndef SPARK_NAME +#define SPARK_NAME "spark" +#define DEV_MST_EXAMPLE1 "/dev/mst/mtusb-1" +#define DEV_MST_EXAMPLE2 "/dev/mst/mtusb-1" +#endif + +void usage(const char *sname, bool full = false) +{ + const char *descr = + "\n" + " SPARK - Eeprom FW burning tool for Mellanox IS/IS3 switches.\n" + "\n" + "\n" + "Usage:\n" + "------\n" + "\n" + " " SPARK_NAME " [switches...] [parameters...]\n" + "\n" + "\n" + "Switches summary:\n" + "-----------------\n" + "\n" +// " -crc - Print CRC after each section when verify.\n" +// "\n" + " -d[evice] - The device the eeprom is connected to.\n" + " Commands affected: all\n" + "\n" + " -i[mage] - Image file in \".img\" format.\n" + " Commands affected: burn, verify\n" + "\n" + " -guid - Use the given guid as the node guid of the burnt image.\n" + " By default, the guid is taken from the image on the eeprom\n" + "\n" + " Commands affected: burn\n" + "\n" + " -sysguid - Use the given guid as the system image guid of the burnt image.\n" + " By default, this value is taken from the current image on the eeprom\n" + "\n" + " Commands affected: burn\n" + "\n" + " -bsn - Mellanox Board Serial Number (BSN).\n" + " Valid BSN format is:\n" + " MTxxxxx[-]R[xx]ddmmyy-nnn[-cc]\n" + " By default, this value is taken from the current image on the eeprom\n" + "\n" + " Commands affected: burn\n" + "\n" + " -ndesc - Use the given string (max 64 characters) as the node description.\n" + " By default, this value is taken from the current image on the eeprom\n" + "\n" + " Commands affected: burn\n" + "\n" + " -is3_i2c - Provides the I2C address of the switch device. If this flag is not specified,\n" + " then the the default address for Mellanox switch devices is: 0x6c.\n" + "\n" + " -pe_i2c - Provides the I2C address of the primary EEPROM. By default, this address is read\n" + " from the Mellanox switch device. Use this flag only if the switch device is not\n" + " accessible.\n" + "\n" + " -se_i2c - Provides the I2C address of the secondary EEPROM. By default, this address is read \n" + " from the Mellanox switch device. Use this flag only if the switch device is not\n" + " accessible.\n" + "\n" + + " -h[elp] - Prints this message and exits\n" + " -hh - Prints extended command help\n" + "\n" + " -nofs - Burn image not in a failsafe manner.\n" + "\n" +// " -s[ilent] - Do not print burn progress flyer.\n" +// " Commands affected: burn\n" +// "\n" + " -y[es] - Non interactive mode - assume answer\n" + " \"yes\" to all questions.\n" + " Commands affected: all\n" + "\n" + " -v - Version info.\n" + "\n" + "Commands summary (use -hh flag for full commands description):\n" + "-----------------\n" + " b[urn] - Burn flash\n" + " q[uery] - Query misc. eeprom/FW characteristics\n" + " v[erify] - Verify entire eeprom\n" + " bb - Burn Block - Burns the given image as is. No checks are done.\n" + " ri - Read the fw image on the flash.\n" +// " dc - Dump Configuration: print fw configuration file for the given image.\n" + "\n"; + + + + const char* full_descr = + "\n" + "Command descriptions:\n" + "----------------------------\n" + "\n" + "* Burn eeprom\n" + " Burns entire flash from raw binary image.\n" + "\n" + " Command:\n" + " b[urn]\n" + " Parameters:\n" + " None\n" + " Examples:\n" + " " SPARK_NAME " -d " DEV_MST_EXAMPLE1 " -i image1.img burn\n" + " " SPARK_NAME " -d " DEV_MST_EXAMPLE2 " -guid 0x2c9000100d050 -i image1.img b\n" + "\n" + "\n" + "* Burn Block\n" + " Burns the entire eeprom from raw image as is. No checks are done on the eeprom or\n" + " on the given image file. No fields (such as BSN or Guids) are read from eeprom. \n" + " This command can bu used to burn Infiniscale devices.\n" + "\n" + " Command:\n" + " bb\n" + " Parameters:\n" + " None\n" + " Examples:\n" + " " SPARK_NAME " -d " DEV_MST_EXAMPLE1 " -i image1.img bb\n" + "\n" + "\n" + "* Query miscellaneous FW and eeprom parameters\n" + "\n" + " Command:\n" + " q[uery]\n" + " Parameters:\n" + " None\n" + " Example:\n" + " " SPARK_NAME " -d " DEV_MST_EXAMPLE1 " query\n" + "\n" + "* Verify entire flash.\n" + "\n" + " Command:\n" + " v[erify]\n" + " Parameters:\n" + " None\n" + " Example:\n" + " " SPARK_NAME " -d " DEV_MST_EXAMPLE1 " v\n" + "\n" + "* Read the FW image from eeprom and write it to a file.\n" + "\n" + " Command:\n" + " ri\n" + " Parameters:\n" + " file - filename to write the image to (in .img format).\n" + " Example:\n" + " " SPARK_NAME " -d " DEV_MST_EXAMPLE1 " ri file.img\n" + "\n" + "\n"; + + printf(descr, sname); + + if (full) { + printf(full_descr, sname); + } +} + + +// +// Commands database and parsing methods +// +enum CommandInput { + CI_NONE = 0x01, + CI_IMG_ONLY = 0x02, + CI_DEV_ONLY = 0x04, + CI_IMG_OR_DEV = 0x06, + CI_IMG_AND_DEV = 0x08 +}; + +enum CommandType { + CMD_UNKNOWN, + CMD_BURN, + CMD_BURN_BLOCK, + CMD_QUERY, + CMD_VERIFY, + CMD_READ_BLOCK, + CMD_READ_IMAGE, +}; + +struct CommandInfo { + CommandType cmd; + const char* cmdName; + bool requireExactMatch; + int maxArgs; + CommandInput requiredInput; + const char* cmdDescription; + +}; + +CommandInfo const g_commands[] = { + { CMD_BURN , "burn" ,false , 0 , CI_IMG_AND_DEV , ""}, + { CMD_BURN_BLOCK , "bb" ,true , 0 , CI_IMG_AND_DEV , ""}, + { CMD_QUERY , "query" ,false , 0 , CI_IMG_OR_DEV , ""}, + { CMD_VERIFY , "verify",false , 0 , CI_IMG_OR_DEV , ""}, + { CMD_READ_BLOCK , "rb" ,true , 4 , CI_DEV_ONLY , ""}, + { CMD_READ_IMAGE , "ri" ,true , 1 , CI_DEV_ONLY , ""}, +}; + +#define numbel(x) (sizeof(x)/sizeof((x)[0])) + + +const CommandInfo* GetCommandInfo(CommandType cmd) { + for (u_int32_t i = 0 ; i < numbel(g_commands); i++ ) { + if (cmd == g_commands[i].cmd) { + return &g_commands[i]; + } + } + + return NULL; +} + +CommandType ParseCommand(const char* cmd) { + u_int32_t cmdLenGiven = strlen(cmd); + + for (u_int32_t i = 0 ; i < numbel(g_commands); i++ ) { + if (g_commands[i].requireExactMatch ) { + if (!strcmp(cmd, g_commands[i].cmdName)) { + return g_commands[i].cmd; + } + } else { + // Match if given cmd maches the beginning of the checked cmd + if (!strncmp(cmd, g_commands[i].cmdName, cmdLenGiven )) { + return g_commands[i].cmd; + } + } + } + return CMD_UNKNOWN; +} + + +bool CheckCommandInputs(const char* dev, + const char* img, + CommandType cmd) { + + const CommandInfo* cmdInfo = GetCommandInfo(cmd); + + if (!cmdInfo) { + printf("*** INTERNAL ERROR *** Unknown command given to CheckCommandInputs() (%d)\n", cmd); + return false; + } + + char* inputDesStr [] = { + NULL, + "nither device nor an image file", // CI_NONE + "only an image file", // CI_IMG_ONLY, + NULL, + "only a device", // CI_DEV_ONLY, + NULL, + "either an image file or a device", // CI_IMG_OR_DEV, + NULL, + "both an image file and a device" // CI_IMG_AND_DEV + }; + + CommandInput given; + + if ( dev && img) { + given = CI_IMG_AND_DEV; + } else if (!dev && img) { + given = CI_IMG_ONLY; + } else if (dev && !img) { + given = CI_DEV_ONLY; + } else { + given = CI_NONE; + } + + if ((given & cmdInfo->requiredInput) == 0) { + printf("-E- Command \"%s\" requires %s, but %s %s given.\n", + cmdInfo->cmdName, + inputDesStr[cmdInfo->requiredInput], + inputDesStr[given], + given == CI_IMG_AND_DEV ? "are" : "is"); + return false; + } + + return true; +} + +bool CheckMaxCmdArguments(CommandType cmd, int numArgs) { + const CommandInfo* cmdInfo = GetCommandInfo(cmd); + if (!cmdInfo) { + printf("*** INTERNAL ERROR *** Unknown command given to CheckMaxCmdArguments (%d)\n", cmd); + return false; + } + + if (cmdInfo->maxArgs >= 0 && numArgs > cmdInfo->maxArgs) { + printf("-E- Command \"%s\" requires %d arguments, but %d arguments were given\n", + cmdInfo->cmdName, + cmdInfo->maxArgs, + numArgs); + return false; + } + return true; +} + + + + +#define SETERR(args) do { printf("-E- "); printf args ; printf("\n"); return 1; } while(0) + +//////////////////////////////////////////////////////////////////////// +#define NEXT(s) do { \ + if (i+1 >= ac) \ + { \ + SETERR(("Missing parameter after \"%s\" switch", s)); \ + exit(1); \ + } else i++;} while(0) + +#define NEXTC(c,param) do { \ + if (++i >= ac) \ + { \ + SETERR(("Command \"%s\": Missing parameter <%s> ", c , param)); \ + exit(1); \ + }} while(0) + +int main(int ac, char *av[]) +{ + bool verbose = true; + + bool fsBurn = true; + bool useDefaultAnswers = false; + + + char *image_fname=0, *dev=0; + + const char* userNodeGuid = NULL; + const char* userSysGuid = NULL; + const char* userBsn = NULL; + const char* userNodeDesc = NULL; + + const char* cmdStr = NULL; + + u_int8_t is3I2cSlave = 0x6C; // Default IS3 I2C address in Mellanox switch systems. + u_int8_t userPrimaryI2cAddr = 0; + u_int8_t userSecondaryI2cAddr = 0; + + int i; + + // Command line parameters + if (ac < 2) + usage(av[0]); + for (i = 1; i < ac; i++) + { + if (*av[i] == '-') + { + int switchLen = strlen(av[i]); + + if (!strncmp(av[i], "-device", switchLen)) { + NEXT("-device"); + dev = av[i]; + } + else if (!strncmp(av[i], "-image", switchLen)) { + NEXT("-i"); + image_fname = av[i]; + } + else if (!strcmp(av[i], "-v") || !strcmp(av[i], "-vv")) { + printf("%s: %s .", + av[0], + _versionID); + + if (!strcmp(av[i], "-vv")) { + printf(" SVN %s", _cvsID + 1); + } + + printf("\n"); + return 0; + } + else if (!strcmp(av[i], "-s")) + verbose = false; + else if (!strncmp(av[i], "-yes", switchLen)) + useDefaultAnswers = true; + else if (!strcmp(av[i], "-nofs")) + fsBurn = false; + else if (!strcmp(av[i], "-guid")) { + NEXT("-guid"); + userNodeGuid = av[i]; + } + else if (!strcmp(av[i], "-sysguid")) { + NEXT("-sysguid"); + userSysGuid = av[i]; + } + else if (!strcmp(av[i], "-bsn")) { + NEXT("-bsn"); + userBsn = av[i]; + } + else if (!strcmp(av[i], "-ndesc")) { + NEXT("-ndesc"); + userNodeDesc = av[i]; + } + else if (!strcmp(av[i], "-is3_i2c")) { + NEXT("-is3_i2c"); + char* ep; + is3I2cSlave = strtoul(av[i], &ep, 16); + if (*ep) { + SETERR(("Bad argument for %s flag (%s). Expecting a hexadecimal integer.", av[i-1], av[i])); + } + } + else if (!strcmp(av[i], "-pe_i2c") || !strcmp(av[i], "-pe")) { + NEXT("-pe_i2c"); + char* ep; + userPrimaryI2cAddr = strtoul(av[i], &ep, 16); + if (*ep) { + SETERR(("Bad argument for %s flag (%s). Expecting a hexadecimal integer.", av[i-1], av[i])); + } + } + else if (!strcmp(av[i], "-se_i2c") || !strcmp(av[i], "-se") ) { + NEXT(av[i]); + char* ep; + userSecondaryI2cAddr = strtoul(av[i], &ep, 16); + if (*ep) { + SETERR(("Bad argument for %s flag (%s). Expecting a hexadecimal integer.", av[i-1], av[i])); + } + } + else if (!strncmp(av[i], "-hh", 3)) { + usage(av[0], true); + return 1; + } + else if (!strncmp(av[i], "-help", switchLen) || !strncmp(av[i], "--h", 3)) { + usage(av[0]); + return 1; + + } + else { + SETERR(("Invalid switch \"%s\" is specified.\n", av[i])); + } + } else { + cmdStr = av[i]; + break; + } + } + + if (cmdStr == NULL) { + SETERR(("No command given. See help for details.")); + } + + IoBase* img; + + Eeprom* eeprom = NULL; + ImageFile* imageFile = NULL; + + Operations::SpecialFieldsMap userFields; + + Operations op; + + if (useDefaultAnswers) { + op.UseDefaultAnswers(); + } + + // + // Check command + // + CommandType cmd = CMD_UNKNOWN; + cmd = ParseCommand(av[i]); + if (cmd == CMD_UNKNOWN) { + SETERR(("Invalid command \"%s\".\n", av[i])); + } + + if (!CheckCommandInputs(dev, image_fname, cmd)) { + return 1; + } + + if (!CheckMaxCmdArguments(cmd, ac - i - 1 )) { + return 1; + } + + + // + // Open device and image file + // + if (dev) { + eeprom = new Eeprom; + img = eeprom; + if (!img->open(dev)) { + SETERR(("Failed to open device %s: %s", dev, img->err())); + } + + if (cmd != CMD_BURN_BLOCK && cmd != CMD_READ_BLOCK) { + if (userPrimaryI2cAddr) { + eeprom->InitImageAddressesFromUser(userPrimaryI2cAddr, userSecondaryI2cAddr); + } else { + if (!eeprom->InitImageAddressesFromIs3(is3I2cSlave)) { + SETERR(("Failed getting eeproms I2C addresses from IS3 device on I2C address %02x : %s", + is3I2cSlave, + eeprom->err())); + } + } + } + } + + if (image_fname) { + imageFile = new ImageFile; + img = imageFile; + if (!img->open(image_fname)) { + SETERR(("Failed to open image file %s: %s", image_fname, img->err())); + } + } + + // + // Parse and perform commands + // + switch (cmd) { + case CMD_VERIFY: + if (!op.CheckFullImage(*img, img->GetPrimaryEeprom(), img->GetSecondaryEeprom())) { + SETERR(("Image check failed: %s", op.err())); + } + break; + + case CMD_QUERY: + if (!op.QueryImage(*img, img->GetPrimaryEeprom() , img->GetSecondaryEeprom() )) { + SETERR(("Image query failed: %s", op.err())); + } + break; + + case CMD_BURN_BLOCK: + + if (!op.RawBurn(*eeprom, *imageFile)) { + SETERR(("Image check failed: %s", op.err())); + } + + if (!op.CompareContent(*eeprom, *imageFile)) { + SETERR(("Image check failed: %s", op.err())); + } + + break; + + case CMD_READ_IMAGE: + { + + NEXTC("ri" , "output image file"); + const char* outFile = av[i]; + + if (!eeprom) { + SETERR(("A device should be given for ri command\n")); + } + + if (!op.DumpImage(*eeprom, eeprom->GetPrimaryEeprom(), eeprom->GetSecondaryEeprom(), outFile)) { + SETERR((op.err())); + } + } + break; + + case CMD_READ_BLOCK: + { + // rb + u_int32_t args[3]; + char* argNames[3] = {"i2c slave", "offset", "size"}; + + for (int argIdx = 0; argIdx < 3 ; argIdx++ ) { + char* ep; + NEXTC("rb" , argNames[argIdx]); + args[argIdx] = strtoul(av[i], &ep, 16); + if (*ep) { + SETERR(("Bad argument #%d (%s) for rb command (%s). Expecting a hexadecimal integer.", + argIdx, + argNames[argIdx], + av[i])); + } + } + + NEXTC("rb" , "output image file"); + const char* outFile = av[i]; + + if (!op.ReadBlock (*eeprom, (u_int8_t) args[0], args[1], args[2], outFile)) { + SETERR((op.err())); + } + } + break; + + case CMD_BURN: + + // Get user 'setable' fields + if (userNodeGuid) { + if (!op.GetFieldFromString(userFields, Operations::NODE_GUID_ID, userNodeGuid)) { + SETERR((op.err())); + } + } + + if (userSysGuid) { + if (!op.GetFieldFromString(userFields, Operations::SYS_GUID_ID, userSysGuid)) { + SETERR((op.err())); + } + } + + if (userBsn) { + if (!op.GetFieldFromString(userFields, Operations::BSN_ID, userBsn)) { + SETERR((op.err())); + } + } + + if (userNodeDesc) { + if (!op.GetFieldFromString(userFields, Operations::NODE_DESC_ID , userNodeDesc)) { + SETERR((op.err())); + } + } + + + if (!op.IS3FwBurn(*eeprom, *imageFile, userFields, fsBurn)) { + SETERR(("%sBurn failed: %s\n", + fsBurn ? "Failsafe " : "" , + op.err())); + } + break; + + default: + SETERR(("Internal error: Unknown command %d given. See help for details.\n", cmd)); + } + + return 0; +} + diff --git a/branches/Ndi/tools/spark/user/spark.rc b/branches/Ndi/tools/spark/user/spark.rc new file mode 100644 index 00000000..ff451eaf --- /dev/null +++ b/branches/Ndi/tools/spark/user/spark.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#if DBG +#define VER_FILEDESCRIPTION_STR "Mellanox Switches FW burning tool. (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Mellanox Switches FW burning tool." +#endif + +#define VER_INTERNALNAME_STR "spark.exe" +#define VER_ORIGINALFILENAME_STR "spark.exe" + +#include diff --git a/branches/Ndi/tools/vstat/dirs b/branches/Ndi/tools/vstat/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/tools/vstat/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tools/vstat/user/SOURCES b/branches/Ndi/tools/vstat/user/SOURCES new file mode 100644 index 00000000..20475d69 --- /dev/null +++ b/branches/Ndi/tools/vstat/user/SOURCES @@ -0,0 +1,23 @@ +TARGETNAME=vstat +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +SOURCES=vstat.rc \ + vstat_main.c + +INCLUDES=..\..\..\inc;..\..\..\inc\user; + +RCOPTIONS=/I..\..\win\include + +TARGETLIBS= \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/tools/vstat/user/makefile b/branches/Ndi/tools/vstat/user/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/tools/vstat/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/tools/vstat/user/vstat.rc b/branches/Ndi/tools/vstat/user/vstat.rc new file mode 100644 index 00000000..19a9103b --- /dev/null +++ b/branches/Ndi/tools/vstat/user/vstat.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "HCA Status Report Application (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "HCA Status Report Application" +#endif + +#define VER_INTERNALNAME_STR "vstat.exe" +#define VER_ORIGINALFILENAME_STR "vstat.exe" + +#include diff --git a/branches/Ndi/tools/vstat/user/vstat_main.c b/branches/Ndi/tools/vstat/user/vstat_main.c new file mode 100644 index 00000000..2a8579c0 --- /dev/null +++ b/branches/Ndi/tools/vstat/user/vstat_main.c @@ -0,0 +1,642 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "stdio.h" +#include "string.h" +#include "stdlib.h" + + +#include +#include +#ifndef WIN32 +#include +#endif +#include + + +#define VEND_ID_MELLNOX 0x02c9 +#define VEND_ID_VOLTAIRE 0x08f1 + + +/******************************************************************* +*******************************************************************/ + + +void print64bit(ib_net64_t u64, BOOLEAN hexFormat){ + ib_net64_t mask = (1<<16)-1; + ib_net16_t tmp; + int i; + for(i=0;i<4;i++){ + tmp = (uint16_t)((u64>>(i*16))& mask); + if(hexFormat){ + printf("%04x",cl_hton16(tmp)); + if(i<3){ + printf(":"); + } + }else{ + + if((tmp>>8)<100){ + printf("%02d", tmp>>8); + }else{ + printf("%03d", tmp>>8); + } + printf("."); + if((tmp&(mask<<8)) <100){ + printf("%02d", tmp&(mask<<8)); + }else{ + printf("%03d", tmp&(mask<<8)); + } + + } + } +} + +void printGUID(ib_net64_t guid){ + printf("\tnode_guid="); + print64bit(guid, TRUE); + printf("\n"); +} + +void printPortGID(ib_net64_t subnetPrefix, ib_net64_t portGuid){ + printf("\t\tGID[0]="); + print64bit(subnetPrefix, TRUE); + printf(":"); + print64bit(portGuid, TRUE); + printf("\n"); +} + + +void printPortLinkState(int portState){ //TODO: check that these are all the options and that they are correct + switch(portState){ + case 1: + printf("\t\tport_state=PORT_DOWN (%d)\n",portState); + break; + case 2: + printf("\t\tport_state=PORT_INITIALIZE (%d)\n",portState); + break; + case 3: + printf("\t\tport_state=PORT_ARMED (%d)\n",portState); + break; + case 4: + printf("\t\tport_state=PORT_ACTIVE (%d)\n",portState); + break; + case 5: + printf("\t\tport_state=PORT_ACTDEFER (%d)\n",portState); + break; + default: + printf("\t\tport_state=UNKNOWN (%d)\n",portState); + } +} + +void printPortRate(int speed, int width){ + switch(speed){ + case 1: + printf("\t\tlink_speed=2.5Gbps (%d)\n",speed); + break; + case 2: + printf("\t\tlink_speed=5Gbps (%d)\n",speed); + break; + case 4: + printf("\t\tlink_speed=10Gbps (%d)\n",speed); + break; + default: + printf("\t\tlink_speed=UNKNOWN (%d)\n",speed); + } + + switch (width){ + case 1: + printf("\t\tlink_width=1x (%d) \n\t\trate=%d\n",width,1*speed); + break; + case 2: + printf("\t\tlink_width=4x (%d) \n\t\trate=%d\n",width,10*speed); + break; + case 4: + printf("\t\tlink_width=8x (%d) \n\t\trate=%d\n",width,20*speed); + break; + case 8: + printf("\t\tlink_width=12x (%d) \n\t\trate=%d\n",width,30*speed); + break; + default: + printf("\t\tlink_width=UNKNOWN (%d)\n",width); + } + + +} + + +void printPortMTU(int mtu){ //TODO: check that these are all the options and that they are correct + switch(mtu){ + case 1: + printf("\t\tmax_mtu=256 (%d)\n",mtu); + break; + case 2: + printf("\t\tmax_mtu=512 (%d)\n",mtu); + break; + case 3: + printf("\t\tmax_mtu=1024 (%d)\n",mtu); + break; + case 4: + printf("\t\tmax_mtu=2048 (%d)\n",mtu); + break; + case 5: + printf("\t\tmax_mtu=4096 (%d)\n",mtu); + break; + default: + printf("\t\tmax_mtu=UNKNOWN (%d)\n",mtu); + } +} + +void printPortCaps(ib_port_cap_t *ibal_port_cap_p) +{ +#define PRINT_CAP(cap, name) if (ibal_port_cap_p->cap) printf( #name "," ) + + printf("\t\tcapabilities: "); + PRINT_CAP(cm, CM); + PRINT_CAP(snmp, SNMP_TUNNEL); + PRINT_CAP(dev_mgmt, DEVICE_MGMT); + PRINT_CAP(sm_disable, SM_DISABLED); + PRINT_CAP(sm, SM); + PRINT_CAP(vend, VENDOR_CLASS); + PRINT_CAP(notice, NOTICE); + PRINT_CAP(trap, TRAP); + PRINT_CAP(apm, APM); + PRINT_CAP(slmap, SL_MAP); + PRINT_CAP(ledinfo, LED_INFO); + PRINT_CAP(client_reregister, CLIENT_REG); + PRINT_CAP(sysguid, SYSGUID); + PRINT_CAP(boot_mgmt, BOOT_MGMT); + PRINT_CAP(pkey_switch_ext_port, PKEY_SW_EXT_PORT_TRAP); + PRINT_CAP(link_rtl, LINK_LATENCY); + PRINT_CAP(reinit, REINIT); + PRINT_CAP(ipd, OPT_IPD); + PRINT_CAP(mkey_nvram, MKEY_NVRAM); + PRINT_CAP(pkey_nvram, PKEY_NVRAM); + printf("\n"); +} +void printPortInfo(ib_port_attr_t* portPtr, ib_port_info_t portInfo, BOOLEAN fullPrint){ + printf("\t\tport=%d\n", portPtr->port_num); + printPortLinkState(portPtr->link_state); + printPortRate(portInfo.link_speed>>4,portInfo.link_width_active); + printf("\t\tsm_lid=0x%04x\n", cl_ntoh16(portPtr->sm_lid)); + printf("\t\tport_lid=0x%04x\n", cl_ntoh16(portPtr->lid)); + printf("\t\tport_lmc=0x%x\n", portPtr->lmc); + printPortMTU(portPtr->mtu); + if(fullPrint){ + printf("\t\tmax_msg_sz=0x%x (Max message size)\n", portPtr->max_msg_size); + printPortCaps( &portPtr->cap ); + printf("\t\tmax_vl_num=0x%x (Maximum number of VL supported by this port)\n", portPtr->max_vls); + printf("\t\tbad_pkey_counter=0x%x (Bad PKey counter)\n", portPtr->pkey_ctr); + printf("\t\tqkey_viol_counter=0x%x (QKey violation counter)\n", portPtr->qkey_ctr); + printf("\t\tsm_sl=0x%x (IB_SL to be used in communication with subnet manager)\n", portPtr->sm_sl); + printf("\t\tpkey_tbl_len=0x%x (Current size of pkey table)\n", portPtr->num_pkeys); + printf("\t\tgid_tbl_len=0x%x (Current size of GID table)\n", portPtr->num_gids); + printf("\t\tsubnet_timeout=0x%x (Subnet Timeout for this port (see PortInfo))\n", portPtr->subnet_timeout); + printf("\t\tinitTypeReply=0x%x (optional InitTypeReply value. 0 if not supported)\n", portPtr->init_type_reply); + printPortGID(portPtr->p_gid_table->unicast.prefix, portPtr->p_gid_table->unicast.interface_id); + } + printf("\n"); +} + + +void print_uplink_info(ib_ca_attr_t* ca_attr) +{ + uplink_info_t*p_uplink_info = mthca_get_uplink_info(ca_attr); + char *bus_type, *link_speed, cap; + int freq; + + switch (p_uplink_info->bus_type) { + case UPLINK_BUS_PCI: bus_type = "PCI"; break; + case UPLINK_BUS_PCIX: bus_type = "PCI_X"; break; + case UPLINK_BUS_PCIE: bus_type = "PCI_E"; break; + default: printf("\tuplink={BUS=UNRECOGNIZED (%d)}\n", p_uplink_info->bus_type); return; + } + + switch (p_uplink_info->bus_type) { + case UPLINK_BUS_PCI: + case UPLINK_BUS_PCIX: + if (p_uplink_info->u.pci_x.capabilities == UPLINK_BUS_PCIX_133) + freq = 133; + else + freq = 66; + printf("\tuplink={BUS=%s, CAPS=%d MHz}\n", bus_type, freq ); + return; + + case UPLINK_BUS_PCIE: + cap = p_uplink_info->u.pci_e.capabilities; + if (p_uplink_info->u.pci_e.link_speed == UPLINK_BUS_PCIE_SDR) + link_speed = "2.5 Gbps"; + else + if (p_uplink_info->u.pci_e.link_speed == UPLINK_BUS_PCIE_DDR) + link_speed = "5.0 Gbps"; + else + link_speed = "unknown"; + printf("\tuplink={BUS=%s, SPEED=%s, WIDTH=x%d, CAPS=%s*x%d}\n", + bus_type, link_speed, p_uplink_info->u.pci_e.link_width, + (cap&1) ? "2.5" : "5", cap>>2 ); + return; + } +} + +void vstat_print_ca_attr(int idx, ib_ca_attr_t* ca_attr, ib_port_info_t* vstat_port_info, BOOLEAN fullPrint){ + int i; + + printf("\n\thca_idx=%d\n", idx); + if (ca_attr->dev_id & 1) + printf("\tATTENTION! \n\t The device is in 'Flash Recovery' mode, probably due to an incorrect firmware." + "\n\t Use firmware tools to solve the problem.\n",idx); + /*printf("\tpci_location={BUS=NA,DEV/FUNC=NA}\n");*/ + print_uplink_info(ca_attr); + printf("\tvendor_id=0x%04x\n", ca_attr->vend_id); + printf("\tvendor_part_id=0x%04x\n", ca_attr->dev_id); + printf("\thw_ver=0x%x\n", ca_attr->revision); //TODO: ??? + if(ca_attr->vend_id == VEND_ID_MELLNOX || ca_attr->vend_id == VEND_ID_VOLTAIRE) { + printf("\tfw_ver=%d.%.2d.%.4d\n", + (uint16_t )(ca_attr->fw_ver>>32), + (uint16_t)(ca_attr->fw_ver>>16), + (uint16_t )(ca_attr->fw_ver)); + printf("\tPSID=%s\n",mthca_get_board_id(ca_attr)); + }else{ + printf("\tfw_ver=0x%I64x\n",ca_attr->fw_ver); + } + printGUID(ca_attr->ca_guid); + if(fullPrint){ + printf("\tnum_phys_ports = %d\n",ca_attr->num_ports); + printf("\tmax_num_qp = 0x%x (Maximum Number of QPs supported)\n", ca_attr->max_qps); + printf("\tmax_qp_ous_wr = 0x%x (Maximum Number of outstanding WR on any WQ)\n", ca_attr->max_wrs); + printf("\tmax_num_sg_ent = 0x%x (Max num of scatter/gather entries for WQE other than RD)\n", ca_attr->max_sges); + printf("\tmax_num_sg_ent_rd = 0x%x (Max num of scatter/gather entries for RD WQE)\n", ca_attr->max_rd_sges); + printf("\tmax_num_srq = 0x%x (Maximum Number of SRQs supported)\n", ca_attr->max_srq); + printf("\tmax_wqe_per_srq = 0x%x (Maximum Number of outstanding WR on any SRQ)\n", ca_attr->max_srq_wrs); + printf("\tmax_srq_sentries = 0x%x (Maximum Number of scatter/gather entries for SRQ WQE)\n", ca_attr->max_srq_sges); + printf("\tsrq_resize_supported = %d (SRQ resize supported)\n", ca_attr->modify_srq_depth); + printf("\tmax_num_cq = 0x%x (Max num of supported CQs)\n", ca_attr->max_cqs); + printf("\tmax_num_ent_cq = 0x%x (Max num of supported entries per CQ)\n", ca_attr->max_cqes); + printf("\tmax_num_mr = 0x%x (Maximum number of memory region supported)\n", ca_attr->init_regions); + printf("\tmax_mr_size = 0x%x (Largest contiguous block of memory region in bytes)\n", ca_attr->init_region_size); + printf("\tmax_pd_num = 0x%x (Maximum number of protection domains supported)\n", ca_attr->max_pds); + printf("\tpage_size_cap = 0x%x (Largest page size supported by this HCA)\n",ca_attr->p_page_size[ca_attr->num_page_sizes-1]); + + printf("\tlocal_ca_ack_delay = 0x%x (Log2 4.096usec Max. RX to ACK or NAK delay)\n", ca_attr->local_ack_delay); + printf("\tmax_qp_ous_rd_atom = 0x%x (Maximum number of oust. RDMA read/atomic as target)\n",ca_attr->max_qp_resp_res); + printf("\tmax_ee_ous_rd_atom = 0 (EE Maximum number of outs. RDMA read/atomic as target)\n"); + printf("\tmax_res_rd_atom = 0x%x (Max. Num. of resources used for RDMA read/atomic as target)\n",ca_attr->max_resp_res); + printf("\tmax_qp_init_rd_atom = 0x%x (Max. Num. of outs. RDMA read/atomic as initiator)\n",ca_attr->max_qp_init_depth); + printf("\tmax_ee_init_rd_atom = 0 (EE Max. Num. of outs. RDMA read/atomic as initiator)\n"); + printf("\tatomic_cap = %s (Level of Atomicity supported)\n",ca_attr->atomicity == IB_ATOMIC_GLOBAL?"GLOBAL": + ca_attr->atomicity == IB_ATOMIC_LOCAL?"LOCAL":"NORMAL"); + printf("\tmax_ee_num = 0x0 (Maximum number of EEC supported)\n"); + printf("\tmax_rdd_num = 0x0 (Maximum number of IB_RDD supported)\n"); + printf("\tmax_mw_num = 0x%x (Maximum Number of memory windows supported)\n", ca_attr->init_windows); + printf("\tmax_raw_ipv6_qp = 0x%x (Maximum number of Raw IPV6 QPs supported)\n", ca_attr->max_ipv6_qps); + printf("\tmax_raw_ethy_qp = 0x%x (Maximum number of Raw Ethertypes QPs supported)\n", ca_attr->max_ether_qps); + printf("\tmax_mcast_grp_num = 0x%x (Maximum Number of multicast groups)\n", ca_attr->max_mcast_grps); + printf("\tmax_mcast_qp_attach_num = 0x%x (Maximum number of QP per multicast group)\n", ca_attr->max_qps_per_mcast_grp); + printf("\tmax_ah_num = 0x%x (Maximum number of address handles)\n", ca_attr->max_addr_handles); + printf("\tmax_num_fmr = 0x%x (Maximum number FMRs)\n", ca_attr->max_fmr); + printf("\tmax_num_map_per_fmr = 0x%x (Maximum number of (re)maps per FMR before an unmap operation in required)\n", ca_attr->max_map_per_fmr); + }else{ + printf("\tnum_phys_ports=%d\n", ca_attr->num_ports); + } + for (i = 0; inum_ports; i++){ + printPortInfo(ca_attr->p_port_attr+i, vstat_port_info[i], fullPrint); + } +} +/* Internal Functions */ + +void vstat_get_counters(ib_ca_handle_t h_ca,uint8_t port_num) +{ + ib_mad_t *mad_in = NULL; + ib_mad_t *mad_out = NULL; + ib_port_counters_t *port_counters; + ib_api_status_t ib_status = IB_SUCCESS; + int i; + + mad_out = (ib_mad_t*)cl_zalloc(256); + CL_ASSERT(mad_out); + + mad_in = (ib_mad_t*)cl_zalloc(256); + CL_ASSERT(mad_in); + + + mad_in->attr_id = IB_MAD_ATTR_PORT_CNTRS; + mad_in->method = IB_MAD_METHOD_GET; + mad_in->base_ver = 1; + mad_in->class_ver =1; + mad_in->mgmt_class = IB_MCLASS_PERF; + + port_counters = (ib_port_counters_t*)(((ib_gmp_t*)mad_in)->data); + + port_counters->port_select= port_num; + port_counters->counter_select= 0xff; + + ib_status = ib_local_mad(h_ca ,port_num ,mad_in ,mad_out); + if(ib_status != IB_SUCCESS) + { + printf("ib_local_mad failed with status = %d\n", ib_status); + return; + } + + port_counters = (ib_port_counters_t*)(((ib_gmp_t*)mad_out)->data); + + printf("\n\tport counters for port %d\n",port_num); + printf("\t\tlink_error_recovery_counter\t0x%x \n",port_counters->link_error_recovery_counter); + printf("\t\tlink_down_counter\t\t0x%x \n",port_counters->link_down_counter); + printf("\t\tport_rcv_errors\t\t\t0x%x \n",CL_NTOH16(port_counters->port_rcv_errors)); + printf("\t\tport_rcv_remote_physical_errors\t0x%x \n",CL_NTOH16(port_counters->port_rcv_remote_physical_errors)); + printf("\t\tport_rcv_switch_relay_errors\t0x%x \n",CL_NTOH16(port_counters->port_rcv_switch_relay_errors)); + printf("\t\tport_xmit_discard\t\t0x%x \n",CL_NTOH16(port_counters->port_xmit_discard)); + printf("\t\tport_xmit_constraint_errors\t0x%x \n",port_counters->port_xmit_constraint_errors); + printf("\t\tport_rcv_constraint_errors\t0x%x \n",port_counters->port_rcv_constraint_errors); + printf("\t\tvl15_dropped\t\t\t0x%x \n",CL_NTOH16(port_counters->vl15_dropped)); + printf("\t\tport_rcv_data\t\t\t0x%x \n",CL_NTOH32(port_counters->port_rcv_data)); + printf("\t\tport_xmit_data\t\t\t0x%x \n",CL_NTOH32(port_counters->port_xmit_data)); + printf("\t\tport_rcv_pkts\t\t\t0x%x \n",CL_NTOH32(port_counters->port_rcv_pkts)); + printf("\t\tport_xmit_pkts\t\t\t0x%x \n\n",CL_NTOH32(port_counters->port_xmit_pkts)); + +} + + +void vstat_get_port_info(ib_ca_handle_t h_ca,uint8_t port_num, ib_port_info_t* vstat_port_info) +{ + ib_mad_t *mad_in = NULL; + ib_mad_t *mad_out = NULL; + ib_api_status_t ib_status = IB_SUCCESS; + int i; + + mad_out = (ib_mad_t*)cl_zalloc(256); + CL_ASSERT(mad_out); + + mad_in = (ib_mad_t*)cl_zalloc(256); + CL_ASSERT(mad_in); + + + mad_in->attr_id = IB_MAD_ATTR_PORT_INFO; + mad_in->method = IB_MAD_METHOD_GET; + mad_in->base_ver = 1; + mad_in->class_ver =1; + mad_in->mgmt_class = IB_MCLASS_SUBN_LID; + + + + ib_status = ib_local_mad(h_ca ,port_num ,mad_in ,mad_out); + if(ib_status != IB_SUCCESS && 0 != mad_in->status ) + { + printf("ib_local_mad failed with status = %d mad status = %d\n", ib_status,mad_in->status); + return; + } + + cl_memcpy(vstat_port_info,(ib_port_info_t*)(((ib_smp_t*)mad_out)->data),sizeof(ib_port_info_t)); + + +} + + +ib_api_status_t +vstat_ca_attr( + boolean_t modify_attr, + BOOLEAN fullPrint, + BOOLEAN getCounters + ) +{ + ib_al_handle_t h_al = NULL; + ib_api_status_t ib_status = IB_SUCCESS; + ib_api_status_t ret_status = IB_SUCCESS; + size_t guid_count; + ib_net64_t *ca_guid_array; + ib_ca_attr_t *vstat_ca_attr; + ib_port_info_t vstat_port_info[2]; + size_t i; + ib_ca_handle_t h_ca = NULL; + uint32_t bsize; + ib_port_attr_mod_t port_attr_mod; + uint8_t port_idx; + + while(1) + { + /* + * Open the AL instance + */ + ib_status = ib_open_al(&h_al); + if(ib_status != IB_SUCCESS) + { + printf("ib_open_al failed status = %d\n", ib_status); + ret_status = ib_status; + break; + } + //xxxx + //printf("ib_open_al PASSED.\n"); + //xxx + CL_ASSERT(h_al); + + /* + * Get the Local CA Guids + */ + ib_status = ib_get_ca_guids(h_al, NULL, &guid_count); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + printf("ib_get_ca_guids1 failed status = %d\n", (uint32_t)ib_status); + ret_status = ib_status; + goto Cleanup1; + } + + + + /* + * If no CA's Present then return + */ + + if(guid_count == 0) + goto Cleanup1; + + + ca_guid_array = (ib_net64_t*)cl_malloc(sizeof(ib_net64_t) * guid_count); + CL_ASSERT(ca_guid_array); + + ib_status = ib_get_ca_guids(h_al, ca_guid_array, &guid_count); + if(ib_status != IB_SUCCESS) + { + printf("ib_get_ca_guids2 failed with status = %d\n", ib_status); + ret_status = ib_status; + goto Cleanup1; + } + + + + /* + * For Each CA Guid found Open the CA, + * Query the CA Attribute and close the CA + */ + for(i=0; i < guid_count; i++) + { + + /* Open the CA */ + ib_status = ib_open_ca(h_al, + ca_guid_array[i], + NULL, + NULL, //ca_context + &h_ca); + + if(ib_status != IB_SUCCESS) + { + printf("ib_open_ca failed with status = %d\n", ib_status); + ret_status = ib_status; + goto Cleanup1; + } + + //xxx + //printf("ib_open_ca passed i=%d\n",i); + //xxx + + + /* Query the CA */ + bsize = 0; + ib_status = ib_query_ca(h_ca, NULL, &bsize); + if(ib_status != IB_INSUFFICIENT_MEMORY) + { + printf("ib_query_ca failed with status = %d\n", ib_status); + ret_status = ib_status; + goto Cleanup2; + } + CL_ASSERT(bsize); + //xxxx + //printf("ib_query_ca PASSED bsize = 0x%x.\n",bsize); + //xxx + /* Allocate the memory needed for query_ca */ + + vstat_ca_attr = (ib_ca_attr_t *)cl_zalloc(bsize); + CL_ASSERT(vstat_ca_attr); + + ib_status = ib_query_ca(h_ca, vstat_ca_attr, &bsize); + if(ib_status != IB_SUCCESS) + { + printf("ib_query_ca failed with status = %d\n", ib_status); + ret_status = ib_status; + goto Cleanup2; + } + + for(port_idx =0; port_idx< vstat_ca_attr->num_ports;port_idx++){ + vstat_get_port_info(h_ca ,port_idx+1,&vstat_port_info[port_idx]); + } + + vstat_print_ca_attr((int)i, vstat_ca_attr, vstat_port_info, fullPrint); + if(getCounters) + { + for(port_idx =0; port_idx< vstat_ca_attr->num_ports;port_idx++){ + vstat_get_counters(h_ca ,port_idx+1); + } + } + + /* Free the memory */ + cl_free(vstat_ca_attr); + vstat_ca_attr = NULL; + /* Close the current open CA */ + ib_status = ib_close_ca(h_ca, NULL); + if(ib_status != IB_SUCCESS) + { + printf("ib_close_ca failed status = %d", ib_status); + ret_status = ib_status; + } + h_ca = NULL; + + } + +Cleanup2: + if(h_ca != NULL) + { + ib_status = ib_close_ca(h_ca, NULL); + if(ib_status != IB_SUCCESS) + { + printf("ib_close_ca failed status = %d", ib_status); + } + } + +Cleanup1: + cl_free(ca_guid_array); + ib_status = ib_close_al(h_al); + + if(ib_status != IB_SUCCESS) + { + printf("ib_close_al failed status = %d", ib_status); + } + + break; + + } //End of while(1) + + + return ret_status; +} + +void vstat_help() +{ + printf("\n\tUsage: vstat [-v] [-c]\n"); + printf("\t\t -v - verbose mode\n"); + printf("\t\t -c - HCA error/statistic counters\n"); +} + +int32_t __cdecl +main( + int32_t argc, + char* argv[]) +{ + ib_api_status_t ib_status; + BOOLEAN fullPrint = FALSE; + BOOLEAN getCounters = FALSE; + BOOLEAN showHelp = FALSE; + if(argc>1){ + int i = 2; + while(i<=argc){ + if(!_stricmp(argv[i-1], "-v")){ + fullPrint = TRUE; + i+=1; + }else if(!_stricmp(argv[i-1], "-h") || + !_stricmp(argv[i-1], "-help")){ + showHelp = TRUE; + i+=1; + }else if(!_stricmp(argv[i-1], "-c")){ + getCounters = TRUE; + i+=1; + }else{ + i+=2; + } + } + } + if (showHelp) + vstat_help(); + else + ib_status = vstat_ca_attr(FALSE, fullPrint,getCounters); + + return 0; +} + diff --git a/branches/Ndi/tools/wsdinstall/dirs b/branches/Ndi/tools/wsdinstall/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/tools/wsdinstall/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/tools/wsdinstall/user/InstallSP.sln b/branches/Ndi/tools/wsdinstall/user/InstallSP.sln new file mode 100644 index 00000000..9c8f2cb0 --- /dev/null +++ b/branches/Ndi/tools/wsdinstall/user/InstallSP.sln @@ -0,0 +1,21 @@ +Microsoft Visual Studio Solution File, Format Version 8.00 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "InstallSP", "InstallSP.vcproj", "{B3A2B7A0-1906-413E-A457-8AD2FC5E88BB}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfiguration) = preSolution + Debug = Debug + Release = Release + EndGlobalSection + GlobalSection(ProjectConfiguration) = postSolution + {B3A2B7A0-1906-413E-A457-8AD2FC5E88BB}.Debug.ActiveCfg = Debug|Win32 + {B3A2B7A0-1906-413E-A457-8AD2FC5E88BB}.Debug.Build.0 = Debug|Win32 + {B3A2B7A0-1906-413E-A457-8AD2FC5E88BB}.Release.ActiveCfg = Release|Win32 + {B3A2B7A0-1906-413E-A457-8AD2FC5E88BB}.Release.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + EndGlobalSection + GlobalSection(ExtensibilityAddIns) = postSolution + EndGlobalSection +EndGlobal diff --git a/branches/Ndi/tools/wsdinstall/user/SOURCES b/branches/Ndi/tools/wsdinstall/user/SOURCES new file mode 100644 index 00000000..b0bba4b0 --- /dev/null +++ b/branches/Ndi/tools/wsdinstall/user/SOURCES @@ -0,0 +1,23 @@ +TARGETNAME=installsp +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +INCLUDES=..\..\..\inc;\ + ..\..\..\inc\user;\ + $(PLATFORM_SDK_PATH)\include; + +SOURCES= \ + installsp.rc \ + installsp.c + +USER_C_FLAGS=$(USER_C_FLAGS) -DPERFMON_ENABLED + +TARGETLIBS=\ + $(SDK_LIB_PATH)\ws2_32.lib \ + $(SDK_LIB_PATH)\LoadPerf.lib + +MSC_WARNING_LEVEL= /W3 + +LINKER_FLAGS=$(LINKER_FLAGS) diff --git a/branches/Ndi/tools/wsdinstall/user/installsp.c b/branches/Ndi/tools/wsdinstall/user/installsp.c new file mode 100644 index 00000000..a13bf39c --- /dev/null +++ b/branches/Ndi/tools/wsdinstall/user/installsp.c @@ -0,0 +1,744 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Module Name: installsp.c + * Description: This module installs/removes a winsock service provider for infiniband. + * execute: + * To install the service provider + * installsp -i + * To remove the service provider + * installsp -r + */ + +#include +#include +#include + +/* Initialize the LSP's provider path for Infiband Service Provider dll */ +static const WCHAR provider_path[] = L"%SYSTEMROOT%\\system32\\ibwsd.dll"; +static const WCHAR provider_name[] = L"OpenIB Winsock Direct for InfiniBand"; +static const char winsock_key_path[] = + "System\\CurrentControlSet\\Services\\Winsock\\Parameters\\TCP on SAN"; +static const char openib_key_name[] = "OpenIB Alliance"; + +/* Unique provider GUID generated with "uuidgen -s" */ +static GUID provider_guid = { + /* c943654d-2c84-4db7-af3e-fdf1c5322458 */ + 0xc943654d, 0x2c84, 0x4db7, + { 0xaf, 0x3e, 0xfd, 0xf1, 0xc5, 0x32, 0x24, 0x58 } +}; + +#ifdef _WIN64 +#define WSCInstallProvider WSCInstallProvider64_32 +#endif /* _WIN64 */ + +#ifdef PERFMON_ENABLED +#include +#include "wsd/ibsp_regpath.h" + + +typedef struct _pm_symbol_def +{ + DWORD name_def; + CHAR name_str[40]; + CHAR name_desc[40]; + CHAR help_desc[256]; + +} pm_symbol_def_t; + +static pm_symbol_def_t _pm_symbols[]= +{ + { IBSP_PM_OBJ, + "IBSP_PM_OBJ", + "IB Winsock Direct", + "InfiniBand Windows Sockets Direct Provider." + }, + { IBSP_PM_COUNTER(BYTES_SEND), + "IBSP_PM_BYTES_TX_SEC", + "Send bytes/sec", + "Send bytes/second, excluding RDMA Write." + }, + { IBSP_PM_COUNTER(BYTES_RECV), + "IBSP_PM_BYTES_RX_SEC", + "Recv bytes/sec", + "Receive bytes/second, excluding RDMA Read." + }, + { IBSP_PM_COUNTER(BYTES_WRITE), + "IBSP_PM_RDMA_WR_SEC", + "RDMA Write bytes/sec", + "RDMA Write bytes/second." + }, + { IBSP_PM_COUNTER(BYTES_READ), + "IBSP_PM_RDMA_RD_SEC", + "RDMA Read bytes/sec", + "RDMA Read bytes/second." + }, + { IBSP_PM_COUNTER(BYTES_TOTAL), + "IBSP_PM_BYTES_SEC", + "Total bytes/sec", + "Total bytes transmitted per second, including send, " + "receive, RDMA Write, and RDMA Read." + }, + { IBSP_PM_COUNTER(COMP_SEND), + "IBSP_PM_SEND_COMPLETIONS_SEC", + "Send Completions/sec", + "Send and RDMA Write Completions/sec." + }, + { IBSP_PM_COUNTER(COMP_RECV), + "IBSP_PM_RECV_COMPLETIONS_SEC", + "Recv Completions/sec", + "Recv and RDMA Read Completions/sec." + }, + { IBSP_PM_COUNTER(COMP_TOTAL), + "IBSP_PM_COMPLETIONS_SEC", + "Total Completions/sec", + "Total Completions processed per second." + }, + { IBSP_PM_COUNTER(INTR_TOTAL), + "IBSP_PM_COMPLETIONS_INTR", + "Total Interrupts/sec", + "Completion Queue events per second." + } +}; + +#define IBSP_PM_NUM_SYMBOLS (sizeof(_pm_symbols)/sizeof(pm_symbol_def_t)) +#define IBSP_PM_LANGUAGE "009" /* good for English */ + +static CHAR * +_IBSPGenerateFileName(char *header, char *file ) +{ + DWORD size1, size; + CHAR *full_file_name; + int header_len = header == NULL ? 0 : strlen(header); + + size = GetTempPath(0, NULL); + if (size == 0) + { + fprintf( stderr, "GetTempPath failed\n" ); + return NULL; + } + size1 = size + strlen(file) + header_len; + full_file_name = HeapAlloc (GetProcessHeap (), HEAP_ZERO_MEMORY, size1); + if ( full_file_name == NULL ) + { + fprintf( stderr, "GetTempPath failed\n" ); + return NULL; + } + size1 = GetTempPath(size, full_file_name + header_len); + if (size != size1 + 1) + { + fprintf( stderr, "Very strange, GetTempPath returned something different\n" ); + HeapFree (GetProcessHeap (), 0, full_file_name); + return NULL; + } + if (header_len != 0) + { + memcpy(full_file_name, header, header_len); + } + strcat(full_file_name, file); + return full_file_name; +} + + +static DWORD +_IBSPPerfmonIniFilesGenerate( void ) +{ + FILE *f_handle; + DWORD num; + DWORD ret = ERROR_SUCCESS; + char *ibsp_pm_sym_file = NULL; + char *ibsp_pm_ini_file = NULL; + + /* create ".h" file first */ + ibsp_pm_sym_file = _IBSPGenerateFileName(NULL, IBSP_PM_SYM_H_FILE); + if( !ibsp_pm_sym_file ) + { + fprintf( stderr, "_IBSPGenerateFileName failed\n" ); + ret = ERROR_NOT_ENOUGH_MEMORY; + goto Cleanup; + } + + f_handle = fopen( ibsp_pm_sym_file, "w+" ); + + if( !f_handle ) + { + fprintf( stderr, "Create Header file %s failed\n", ibsp_pm_sym_file ); + ret = ERROR_FILE_INVALID; + goto Cleanup; + } + + fprintf( + f_handle, "/* %s Generated by program */ \r\n", ibsp_pm_sym_file ); + + + for( num = 0; num < IBSP_PM_NUM_SYMBOLS; num++ ) + { + fprintf( f_handle, "#define\t%s\t%d\r\n", + _pm_symbols[num].name_str, _pm_symbols[num].name_def ); + } + + fflush( f_handle ); + fclose( f_handle ); + + /* create 'ini' file next */ + ibsp_pm_ini_file = _IBSPGenerateFileName(NULL, IBSP_PM_INI_FILE); + if( !ibsp_pm_sym_file ) + { + fprintf( stderr, "_IBSPGenerateFileName failed\n" ); + ret = ERROR_NOT_ENOUGH_MEMORY; + goto Cleanup; + } + f_handle = fopen( ibsp_pm_ini_file, "w+" ); + + if( !f_handle ) + { + fprintf( stderr, "Create INI file %s failed\n", ibsp_pm_ini_file ); + ret = ERROR_FILE_INVALID; + goto Cleanup; + } + + fprintf( f_handle, "[info]\r\ndrivername=" IBSP_PM_SUBKEY_NAME + "\r\nsymbolfile=%s\r\n\r\n", ibsp_pm_sym_file ); + fprintf( f_handle,"[languages]\r\n" IBSP_PM_LANGUAGE + "=language" IBSP_PM_LANGUAGE "\r\n\r\n" ); + + fprintf( f_handle, + "[objects]\r\n%s_" IBSP_PM_LANGUAGE "_NAME=%s\r\n\r\n[text]\r\n", + _pm_symbols[0].name_str, _pm_symbols[0].name_desc ); + + for( num = 0; num < IBSP_PM_NUM_SYMBOLS; num++ ) + { + fprintf( f_handle,"%s_" IBSP_PM_LANGUAGE "_NAME=%s\r\n", + _pm_symbols[num].name_str, _pm_symbols[num].name_desc ); + fprintf( f_handle,"%s_" IBSP_PM_LANGUAGE "_HELP=%s\r\n", + _pm_symbols[num].name_str, _pm_symbols[num].help_desc ); + } + + fflush( f_handle ); + fclose( f_handle ); + +Cleanup: + if ( ibsp_pm_sym_file ) + { + HeapFree (GetProcessHeap (), 0, ibsp_pm_sym_file); + } + if ( ibsp_pm_ini_file ) + { + HeapFree (GetProcessHeap (), 0, ibsp_pm_ini_file); + } + return ret; +} + + +static void +_IBSPPerfmonIniFilesRemove( void ) +{ + char *ibsp_pm_sym_file = NULL; + char *ibsp_pm_ini_file = NULL; + + ibsp_pm_sym_file = _IBSPGenerateFileName(NULL, IBSP_PM_SYM_H_FILE); + if( !ibsp_pm_sym_file ) + { + fprintf( stderr, "_IBSPGenerateFileName failed\n" ); + goto Cleanup; + } + + ibsp_pm_ini_file = _IBSPGenerateFileName(NULL, IBSP_PM_INI_FILE); + if( !ibsp_pm_sym_file ) + { + fprintf( stderr, "_IBSPGenerateFileName failed\n" ); + goto Cleanup; + } + + if( !DeleteFile( ibsp_pm_ini_file ) ) + { + fprintf( stderr, "Delete file %s failed status %d\n", + ibsp_pm_ini_file, GetLastError() ); + } + if( !DeleteFile( ibsp_pm_sym_file ) ) + { + fprintf( stderr,"Delete file %s failed status %d\n", + ibsp_pm_sym_file, GetLastError() ); + } + +Cleanup: + if ( ibsp_pm_sym_file ) + { + HeapFree (GetProcessHeap (), 0, ibsp_pm_sym_file); + } + if ( ibsp_pm_ini_file ) + { + HeapFree (GetProcessHeap (), 0, ibsp_pm_ini_file); + } + +} + + +/* Try to create IB WSD Performance Register Keys */ +static LONG +_IBSPPerfmonRegisterKeys( void ) +{ + LONG reg_status; + HKEY pm_hkey; + DWORD typesSupp = 7; + + reg_status = RegCreateKeyEx( HKEY_LOCAL_MACHINE, + IBSP_PM_REGISTRY_PATH IBSP_PM_SUBKEY_PERF, 0, NULL, + REG_OPTION_NON_VOLATILE, KEY_ALL_ACCESS, NULL, &pm_hkey, NULL ); + + if( reg_status != ERROR_SUCCESS ) + { + fprintf( stderr, + "_IBSPPerfmonRegisterKeys Create Key %s failed with %d\n", + IBSP_PM_REGISTRY_PATH IBSP_PM_SUBKEY_PERF, reg_status ); + return reg_status; + } + + /* create/assign values to the key */ + RegSetValueExW( pm_hkey, L"Library", 0, REG_EXPAND_SZ, + (LPBYTE)provider_path, sizeof(provider_path) ); + + RegSetValueEx( pm_hkey, TEXT("Open"), 0, REG_SZ, + (LPBYTE)TEXT("IBSPPmOpen"), sizeof(TEXT("IBSPPmOpen")) ); + + RegSetValueEx( pm_hkey, TEXT("Collect"), 0, REG_SZ, + (LPBYTE)TEXT("IBSPPmCollectData"), sizeof(TEXT("IBSPPmCollectData")) ); + + RegSetValueEx( pm_hkey, TEXT("Close"), 0, REG_SZ, + (LPBYTE)TEXT("IBSPPmClose"), sizeof(TEXT("IBSPPmClose")) ); + + RegFlushKey( pm_hkey ); + RegCloseKey( pm_hkey ); + + reg_status = RegCreateKeyEx( HKEY_LOCAL_MACHINE, + IBSP_PM_EVENTLOG_PATH, 0, NULL, + REG_OPTION_NON_VOLATILE, KEY_ALL_ACCESS, NULL, &pm_hkey, NULL ); + + if( reg_status != ERROR_SUCCESS ) + { + fprintf(stderr, "Create EventLog Key failed with %d\n", reg_status ); + return reg_status; + } + + /* create/assign values to the key */ + RegSetValueExW( pm_hkey, L"EventMessageFile", 0, REG_EXPAND_SZ,\ + (LPBYTE)provider_path, sizeof(provider_path) ); + + RegSetValueEx( pm_hkey, TEXT("TypesSupported"), 0, REG_DWORD, + (LPBYTE)&typesSupp, sizeof(typesSupp) ); + + RegFlushKey( pm_hkey ); + RegCloseKey( pm_hkey ); + + return reg_status; +} + + +/* Try to destroy IB WSD Performance Register Keys */ +static LONG +_IBSPPerfmonDeregisterKeys( void ) +{ + LONG reg_status; + + reg_status = RegDeleteKeyEx( HKEY_LOCAL_MACHINE, + IBSP_PM_REGISTRY_PATH IBSP_PM_SUBKEY_PERF, + (KEY_WOW64_32KEY | KEY_WOW64_64KEY), 0 ); + + if( reg_status != ERROR_SUCCESS ) + { + fprintf( stderr, + "_IBSPPerfmonRegisterKeys Remove SubKey failed with %d\n", + GetLastError() ); + } + + reg_status = RegDeleteKeyEx( HKEY_LOCAL_MACHINE, + IBSP_PM_REGISTRY_PATH, (KEY_WOW64_32KEY | KEY_WOW64_64KEY), 0 ); + + if( reg_status != ERROR_SUCCESS ) + { + fprintf( stderr, + "_IBSPPerfmonRegisterKeys Remove SubKey failed with %d\n", + GetLastError() ); + } + + reg_status = RegDeleteKeyEx( HKEY_LOCAL_MACHINE, + IBSP_PM_EVENTLOG_PATH, (KEY_WOW64_32KEY | KEY_WOW64_64KEY), 0 ); + + if( reg_status != ERROR_SUCCESS ) + { + fprintf( stderr, + "_IBSPPerfmonRegisterKeys Remove SubKey failed with %d\n", + GetLastError() ); + } + + return reg_status; +} + + +/* + * functions will try to register performance counters + * definitions with PerfMon application. + * API externally called by lodctr.exe/unlodctr.exe utilities. + */ +static DWORD +_IBSPPerfmonRegisterCounters( void ) +{ + DWORD status; + char *ibsp_pm_ini_file = NULL; + + ibsp_pm_ini_file = _IBSPGenerateFileName("unused ", IBSP_PM_INI_FILE); + if( !ibsp_pm_ini_file ) + { + fprintf( stderr, "_IBSPGenerateFileName failed\n" ); + status = ERROR_NOT_ENOUGH_MEMORY; + goto Cleanup; + } + + /* + * format commandline string, as per SDK : + * Pointer to a null-terminated string that consists of one or more + * arbitrary letters, a space, and then the name of the initialization + * file. + */ + status = LoadPerfCounterTextStrings( ibsp_pm_ini_file, TRUE ); + if( status != ERROR_SUCCESS ) + { + status = GetLastError(); + fprintf( stderr, + "IBSPPerfmonRegisterCounters install failed status %d\n", status ); + } +Cleanup: + if ( ibsp_pm_ini_file ) + { + HeapFree (GetProcessHeap (), 0, ibsp_pm_ini_file); + } + + return status; +} + + +/* + * functions will try to unregister performance counters + * definitions with PerfMon application. + * API externally called by lodctr.exe/unlodctr.exe utilities. + */ +static DWORD +_IBSPPerfmonDeregisterCounters( void ) +{ + DWORD status; + + /* + * format commandline string, as per SDK : + * Pointer to a null-terminated string that consists of one or more + * arbitrary letters, a space, and then the name of the initialization + * file. + */ + status = UnloadPerfCounterTextStrings( + TEXT("unused ") TEXT(IBSP_PM_SUBKEY_NAME), TRUE ); + if( status != ERROR_SUCCESS ) + { + fprintf( stderr, + "IBSPPerfmonDeregisterCounters remove failed status %d\n", + status ); + } + return status; +} + +#endif /* PERFMON_ENABLED */ + + +/* + * Function: usage + * Description: Prints usage information. + */ +static void +usage (char *progname) +{ + printf ("usage: %s [-i/-r [-p]]\n", progname); + printf (" -i Install the service provider\n" + " -r Remove the OpenIB service provider\n" + " -r Remove the specified service provider\n" + " -l List service providers\n"); +} + + +/* Function: print_providers + * Description: + * This function prints out each entry in the Winsock catalog. +*/ +static void print_providers(void) +{ + WSAPROTOCOL_INFOW *protocol_info; + unsigned int protocol_count; + unsigned int i; + DWORD protocol_size; + INT errno; + int rc; + + /* Find the size of the buffer */ + protocol_size = 0; + rc = WSCEnumProtocols (NULL, NULL, &protocol_size, &errno); + if (rc == SOCKET_ERROR && errno != WSAENOBUFS) { + printf("WSCEnumProtocols() returned error (%d)\n", errno); + return; + } + + /* Allocate the buffer */ + protocol_info = HeapAlloc (GetProcessHeap (), HEAP_ZERO_MEMORY, protocol_size); + if (protocol_info == NULL) { + printf("HeapAlloc() failed\n"); + return; + } + + /* Enumerate the catalog for real */ + rc = WSCEnumProtocols (NULL, protocol_info, &protocol_size, &errno); + if (rc == SOCKET_ERROR) { + printf("WSCEnumProtocols returned error for real enumeration (%d)\n", + errno); + HeapFree (GetProcessHeap (), 0, protocol_info); + return; + } + + protocol_count = rc; + + for (i = 0; i < protocol_count; i++) { + printf ("%010d - %S\n", protocol_info[i].dwCatalogEntryId, + protocol_info[i].szProtocol); + } + + HeapFree (GetProcessHeap (), 0, protocol_info); + + return; +} + +/* + * Function: install_provider + * Description: installs the service provider + * + * Note: most of the information setup here comes from "MSDN Home > + * MSDN Library > Windows Development > Network Devices and + * Protocols > Design Guide > System Area Networks > Windows Sockets + * Direct > Windows Sockets Direct Component Operation > Installing + * Windows Sockets Direct Components". + * The direct link is http://msdn.microsoft.com/library/default.asp?url=/library/en-us/network/hh/network/wsdp_2xrb.asp + */ +static void install_provider(void) +{ + int rc; + INT errno; + LONG reg_error; + WSAPROTOCOL_INFOW provider; + HKEY hkey; + + /* Now setup the key. */ + reg_error = RegCreateKeyExA( HKEY_LOCAL_MACHINE, winsock_key_path, + 0, NULL, REG_OPTION_NON_VOLATILE, (KEY_WRITE | KEY_READ), NULL, + &hkey, NULL ); + if( reg_error == ERROR_SUCCESS ) + { + reg_error = RegSetValueExA( hkey, openib_key_name, 0, REG_BINARY, + (PBYTE)&provider_guid, sizeof(GUID) ); + if( reg_error == ERROR_SUCCESS ) + { + /* Force the system to write the new key now. */ + RegFlushKey(hkey); + } + else + { + fprintf(stderr, "RegSetValueEx failed with %d\n", GetLastError()); + } + + RegCloseKey(hkey); + } + else + { + fprintf(stderr, "Could not get a handle on Winsock registry (%d)\n", GetLastError()); + } + + /* Setup the values in PROTOCOL_INFO */ + provider.dwServiceFlags1 = + XP1_GUARANTEED_DELIVERY | + XP1_GUARANTEED_ORDER | + XP1_MESSAGE_ORIENTED | + XP1_GRACEFUL_CLOSE; + provider.dwServiceFlags2 = 0; /* Reserved */ + provider.dwServiceFlags3 = 0; /* Reserved */ + provider.dwServiceFlags4 = 0; /* Reserved */ + provider.dwProviderFlags = PFL_HIDDEN; + provider.ProviderId = provider_guid; /* Service Provider ID provided by vendor. Need to be changed later */ + provider.dwCatalogEntryId = 0; + provider.ProtocolChain.ChainLen = 1; /* Base Protocol Service Provider */ + provider.iVersion = 2; /* don't know what it is */ + provider.iAddressFamily = AF_INET; + provider.iMaxSockAddr = 16; + provider.iMinSockAddr = 16; + provider.iSocketType = SOCK_STREAM; + provider.iProtocol = IPPROTO_TCP; + provider.iProtocolMaxOffset = 0; + provider.iNetworkByteOrder = BIGENDIAN; + provider.iSecurityScheme = SECURITY_PROTOCOL_NONE; + provider.dwMessageSize = 0xFFFFFFFF; /* IB supports 32-bit lengths for data transfers on RC */ + provider.dwProviderReserved = 0; + wcscpy( provider.szProtocol, provider_name ); + + rc = WSCInstallProvider( + &provider_guid, provider_path, &provider, 1, &errno ); + if( rc == SOCKET_ERROR ) + { + if( errno == WSANO_RECOVERY ) + printf("The provider is already installed\n"); + else + printf("install_provider: WSCInstallProvider failed: %d\n", errno); + } +} + +/* + * Function: remove_provider + * Description: removes our provider. + */ +static void remove_provider( const char* const provider_name ) +{ + int rc; + int errno; + LONG reg_error; + HKEY hkey; + + /* Remove our key */ + reg_error = RegOpenKeyExA(HKEY_LOCAL_MACHINE, + winsock_key_path, + 0, + (KEY_WRITE | KEY_READ), + &hkey); + if (reg_error == ERROR_SUCCESS) { + + reg_error = RegDeleteValueA(hkey, provider_name); + if (reg_error == ERROR_SUCCESS) { + /* Force the system to remove the key now. */ + RegFlushKey(hkey); + } else { + fprintf(stderr, "RegDeleteValue failed with %d\n", GetLastError()); + } + + RegCloseKey(hkey); + } + + /* Remove from the catalog */ + rc = WSCDeinstallProvider(&provider_guid, &errno); + if (rc == SOCKET_ERROR) { + printf ("WSCDeinstallProvider failed: %d\n", errno); + } + +#ifdef _WIN64 + /* Remove from the 32-bit catalog too! */ + rc = WSCDeinstallProvider32(&provider_guid, &errno); + if (rc == SOCKET_ERROR) { + printf ("WSCDeinstallProvider32 failed: %d\n", errno); + } +#endif /* _WIN64 */ +} + +/* Function: main + * + * Description: + * Parse the command line arguments and call either the install or remove + * routine. + */ +int __cdecl main (int argc, char *argv[]) +{ + WSADATA wsd; + + /* Load Winsock */ + if (WSAStartup (MAKEWORD (2, 2), &wsd) != 0) { + printf ("InstallSP: Unable to load Winsock: %d\n", GetLastError ()); + return -1; + } + + /* Confirm that the WinSock DLL supports 2.2. Note that if the + * DLL supports versions greater than 2.2 in addition to 2.2, it + * will still return 2.2 in wVersion since that is the version we + * requested. */ + if (LOBYTE (wsd.wVersion) != 2 || HIBYTE (wsd.wVersion) != 2) { + + /* Tell the user that we could not find a usable WinSock DLL. */ + WSACleanup (); + printf + ("InstallSP: Unable to find a usable version of Winsock DLL\n"); + return -1; + } + if (argc < 2) { + usage (argv[0]); + return -1; + } + if ((strlen (argv[1]) != 2) && (argv[1][0] != '-') + && (argv[1][0] != '/')) { + usage (argv[0]); + return -1; + } + switch (tolower (argv[1][1])) { + + case 'i': + /* Install the Infiniband Service Provider */ + install_provider (); +#ifdef PERFMON_ENABLED + _IBSPPerfmonIniFilesGenerate(); + if ( _IBSPPerfmonRegisterKeys() == ERROR_SUCCESS ) + _IBSPPerfmonRegisterCounters(); +#endif + break; + + case 'r': + /* Remove the service provider */ + if( argc == 2 ) + remove_provider( openib_key_name ); + else + remove_provider( argv[2] ); +#ifdef PERFMON_ENABLED + _IBSPPerfmonIniFilesRemove(); + if ( _IBSPPerfmonDeregisterCounters() == ERROR_SUCCESS ) + _IBSPPerfmonDeregisterKeys(); +#endif + break; + + case 'l': + /* List existing providers */ + print_providers(); + break; + + default: + usage (argv[0]); + break; + } + + WSACleanup (); + + return 0; +} diff --git a/branches/Ndi/tools/wsdinstall/user/installsp.rc b/branches/Ndi/tools/wsdinstall/user/installsp.rc new file mode 100644 index 00000000..51a73c81 --- /dev/null +++ b/branches/Ndi/tools/wsdinstall/user/installsp.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Winsock Direct for InfiniBand installer (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Winsock Direct for InfiniBand installer" +#endif + +#define VER_INTERNALNAME_STR "installsp.exe" +#define VER_ORIGINALFILENAME_STR "installsp.exe" + +#include diff --git a/branches/Ndi/tools/wsdinstall/user/makefile b/branches/Ndi/tools/wsdinstall/user/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/tools/wsdinstall/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_adapter_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_adapter_util.h new file mode 100644 index 00000000..21cd8392 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_adapter_util.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_adapter_util.h + * + * PURPOSE: Utility defs & routines for the adapter data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_ADAPTER_UTIL_H_ +#define _DAPL_ADAPTER_UTIL_H_ + +#include +typedef enum async_handler_type +{ + DAPL_ASYNC_UNAFILIATED, + DAPL_ASYNC_CQ_ERROR, + DAPL_ASYNC_CQ_COMPLETION, + DAPL_ASYNC_QP_ERROR +} DAPL_ASYNC_HANDLER_TYPE; + + +DAT_RETURN dapls_ib_enum_hcas ( + OUT DAPL_HCA_NAME **hca_names, + OUT DAT_COUNT *total_hca_count, + IN const char *vendor ); + +DAT_RETURN dapls_ib_open_hca ( + IN IB_HCA_NAME name, + OUT ib_hca_handle_t *ib_hca_handle); + +DAT_RETURN dapls_ib_close_hca ( + IN ib_hca_handle_t ib_hca_handle); + +DAT_RETURN dapls_ib_qp_alloc ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_ptr, + IN DAPL_EP *ep_ctx_ptr ); + +DAT_RETURN dapls_ib_qp_free ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_ptr ); + +DAT_RETURN dapls_ib_qp_modify ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_ptr, + IN DAT_EP_ATTR *ep_attr ); +DAT_RETURN dapls_ib_connect ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_IA_ADDRESS_PTR remote_ia_address, + IN DAT_CONN_QUAL remote_conn_qual, + IN DAT_COUNT prd_size, + IN DAPL_PRIVATE *prd_ptr ); + +DAT_RETURN dapls_ib_disconnect ( + IN DAPL_EP *ep_ptr, + IN DAT_CLOSE_FLAGS close_flags ); + +DAT_RETURN dapls_ib_setup_conn_listener ( + IN DAPL_IA *ia_ptr, + IN DAT_UINT64 ServiceID, + IN DAPL_SP *sp_ptr ); + +DAT_RETURN dapls_ib_remove_conn_listener ( + IN DAPL_IA *ia_ptr, + IN DAPL_SP *sp_ptr); + +DAT_RETURN dapls_ib_accept_connection ( + IN DAT_CR_HANDLE cr_handle, + IN DAT_EP_HANDLE ep_handle, + IN DAPL_PRIVATE *prd_ptr ); + +DAT_RETURN dapls_ib_reject_connection ( + IN ib_cm_handle_t cm_handle, + IN int reject_reason ); + +DAT_RETURN dapls_ib_cr_handoff ( + IN DAT_CR_HANDLE cr_handle, + IN DAT_CONN_QUAL cr_handoff ); + +DAT_RETURN dapls_ib_setup_async_callback ( + IN DAPL_IA *ia_ptr, + IN DAPL_ASYNC_HANDLER_TYPE handler_type, + IN unsigned int * callback_handle, + IN ib_async_handler_t callback, + IN void *context ); + +DAT_RETURN dapls_ib_cq_alloc ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_ptr, + IN DAT_COUNT *cqlen); + +DAT_RETURN dapls_ib_cq_resize ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_ptr, + IN DAT_COUNT *qlen ); + +DAT_RETURN dapls_ib_cq_free ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_ptr); + +DAT_RETURN dapls_set_cq_notify ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_ptr); + +DAT_RETURN dapls_ib_cqd_create ( + IN DAPL_HCA *hca_ptr); + +DAT_RETURN dapls_ib_cqd_destroy ( + IN DAPL_HCA *hca_ptr); + +DAT_RETURN dapls_ib_pd_alloc ( + IN DAPL_IA *ia_ptr, + IN DAPL_PZ *pz); + +DAT_RETURN dapls_ib_pd_free ( + IN DAPL_PZ *pz); + +DAT_RETURN dapls_ib_mr_register ( + IN DAPL_IA *ia_ptr, + IN DAPL_LMR *lmr, + IN DAT_PVOID virt_addr, + IN DAT_VLEN length, + IN DAT_MEM_PRIV_FLAGS privileges); + +DAT_RETURN dapls_ib_mr_deregister ( + IN DAPL_LMR *lmr); + +DAT_RETURN dapls_ib_mr_register_shared ( + IN DAPL_IA *ia_ptr, + IN DAPL_LMR *lmr, + IN DAT_MEM_PRIV_FLAGS privileges); + +DAT_RETURN dapls_ib_get_any_svid ( + IN DAPL_HCA *hca_ptr, + OUT DAT_CONN_QUAL *p_svid); + +DAT_RETURN dapls_ib_mw_alloc ( + IN DAPL_RMR *rmr); + +DAT_RETURN dapls_ib_mw_free ( + IN DAPL_RMR *rmr); + +DAT_RETURN dapls_ib_mw_bind ( + IN DAPL_RMR *rmr, + IN DAPL_LMR *lmr, + IN DAPL_EP *ep, + IN DAPL_COOKIE *cookie, + IN DAT_VADDR virtual_address, + IN DAT_VLEN length, + IN DAT_MEM_PRIV_FLAGS mem_priv, + IN ib_bool_t is_signaled); + +DAT_RETURN dapls_ib_mw_unbind ( + IN DAPL_RMR *rmr, + IN DAPL_EP *ep, + IN DAPL_COOKIE *cookie, + IN ib_bool_t is_signaled); + +DAT_RETURN dapls_ib_query_hca ( + IN DAPL_HCA *hca_ptr, + OUT DAT_IA_ATTR *ia_attr, + OUT DAT_EP_ATTR *ep_attr, + OUT DAT_SOCK_ADDR6 *ip_addr); + +DAT_RETURN dapls_ib_completion_poll ( + IN ib_hca_handle_t hca_handle, + IN ib_cq_handle_t cq_handle, + IN ib_work_completion_t *cqe_ptr); + +DAT_RETURN dapls_ib_completion_notify ( + IN ib_hca_handle_t hca_handle, + IN ib_cq_handle_t cq_handle, + IN ib_notification_type_t type); + +DAT_RETURN +dapls_ib_n_completions_notify ( + IN ib_hca_handle_t hca_handle, + IN ib_cq_handle_t cq_handle, + IN uint32_t n_cqes); + +DAT_RETURN +dapls_ib_peek_cq ( + IN ib_cq_handle_t cq_handle, + OUT uint32_t* p_n_cqes); + +DAT_RETURN +dapls_ib_wait_object_create ( + IN cl_waitobj_handle_t *p_cq_wait_obj_handle); + +DAT_RETURN +dapls_ib_wait_object_destroy ( + IN cl_waitobj_handle_t cq_wait_obj_handle); + +DAT_RETURN +dapls_ib_wait_object_wakeup ( + IN cl_waitobj_handle_t cq_wait_obj_handle); + +DAT_RETURN +dapls_ib_wait_object_wait ( + IN cl_waitobj_handle_t cq_wait_obj_handle, + IN uint32_t timeout); + +DAT_DTO_COMPLETION_STATUS +dapls_ib_get_dto_status( + IN ib_work_completion_t *cqe_ptr); + +DAT_RETURN +dapls_ib_reinit_ep ( + IN DAPL_EP *ep_ptr); + +void dapls_ib_disconnect_clean ( + IN DAPL_EP *ep_ptr, + IN DAT_BOOLEAN passive, + IN const ib_cm_events_t ib_cm_event); + +DAT_RETURN dapls_ib_get_async_event( + IN ib_error_record_t *cause_ptr, + OUT DAT_EVENT_NUMBER *async_event); + +DAT_COUNT dapls_ib_private_data_size ( + IN DAPL_PRIVATE *prd_ptr, + IN DAPL_PDATA_OP conn_op); + +DAT_EVENT_NUMBER dapls_ib_get_dat_event ( + IN const ib_cm_events_t ib_cm_event, + IN DAT_BOOLEAN active); + +ib_cm_events_t dapls_ib_get_cm_event ( + IN DAT_EVENT_NUMBER dat_event_num); + +DAT_RETURN dapls_ib_cm_remote_addr ( + IN DAT_HANDLE dat_handle, + IN DAPL_PRIVATE *prd_ptr, + OUT DAT_SOCK_ADDR6 *remote_ia_address ); + +/* + * Values for provider DAT_NAMED_ATTR + */ +#define IB_QP_STATE 1 /* QP state change request */ + + +#ifndef NO_NAME_SERVICE + +DAT_RETURN dapls_ib_ns_create_gid_map ( + IN DAPL_HCA *hca_ptr); + +DAT_RETURN dapls_ib_ns_remove_gid_map ( + IN DAPL_HCA *hca_ptr); + +DAT_RETURN dapls_ib_ns_map_gid ( + IN DAPL_HCA *hca_ptr, + IN DAT_IA_ADDRESS_PTR p_ia_address, + OUT GID *p_gid); + +DAT_RETURN dapls_ib_ns_map_ipaddr ( + IN DAPL_HCA *hca_ptr, + IN GID gid, + OUT DAT_IA_ADDRESS_PTR p_ia_address); + +#endif /* NO_NAME_SERVICE */ + +#ifdef IBAPI +#include "dapl_ibapi_dto.h" +#elif VAPI +#include "dapl_vapi_dto.h" +#else +#include "dapl_ibal_dto.h" +#endif + + +#endif /* _DAPL_ADAPTER_UTIL_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_create.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_create.c new file mode 100644 index 00000000..f7fee96b --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_create.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cno_create.c + * + * PURPOSE: Consumer Notification Object creation + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3.2.1 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_cno_util.h" +#include "dapl_ia_util.h" + +/* + * dapl_cno_create + * + * DAPL Requirements Version xxx, 6.3.4.1 + * + * Create a consumer notification object instance + * + * Input: + * ia_handle + * wait_agent + * cno_handle + * + * Output: + * cno_handle + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + */ +DAT_RETURN dapl_cno_create( + IN DAT_IA_HANDLE ia_handle, /* ia_handle */ + IN DAT_OS_WAIT_PROXY_AGENT wait_agent, /* agent */ + OUT DAT_CNO_HANDLE *cno_handle) /* cno_handle */ + +{ + DAPL_IA *ia_ptr; + DAPL_CNO *cno_ptr; + DAT_RETURN dat_status; + + ia_ptr = (DAPL_IA *)ia_handle; + cno_ptr = NULL; + dat_status = DAT_SUCCESS; + + if (DAPL_BAD_HANDLE (ia_handle, DAPL_MAGIC_IA)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_IA); + goto bail; + } + + cno_ptr = dapl_cno_alloc(ia_ptr, wait_agent); + + if (!cno_ptr) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + cno_ptr->cno_state = DAPL_CNO_STATE_UNTRIGGERED; + + dapl_ia_link_cno(ia_ptr, cno_ptr); + + *cno_handle = cno_ptr; + + bail: + if (dat_status != DAT_SUCCESS && cno_ptr != NULL) + { + dapl_cno_dealloc(cno_ptr); + } + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_free.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_free.c new file mode 100644 index 00000000..5efee23d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_free.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cno_free.c + * + * PURPOSE: Consumer Notification Object destruction + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3.2.2 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ia_util.h" +#include "dapl_cno_util.h" + +/* + * dapl_cno_free + * + * DAPL Requirements Version xxx, 6.3.2.2 + * + * Destroy a consumer notification object instance + * + * Input: + * cno_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_STATE + */ +DAT_RETURN dapl_cno_free( + IN DAT_CNO_HANDLE cno_handle) /* cno_handle */ + +{ + DAPL_CNO *cno_ptr; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + cno_ptr = (DAPL_CNO *)cno_handle; + + if (DAPL_BAD_HANDLE (cno_handle, DAPL_MAGIC_CNO)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_CNO); + goto bail; + } + + if (cno_ptr->cno_ref_count != 0 + || cno_ptr->cno_waiters != 0) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_STATE_CNO_IN_USE); + goto bail; + } + + dapl_ia_unlink_cno (cno_ptr->header.owner_ia, cno_ptr); + dapl_cno_dealloc (cno_ptr); + +bail: + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_modify_agent.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_modify_agent.c new file mode 100644 index 00000000..b60ebb9d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_modify_agent.c @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cno_modify_agent.c + * + * PURPOSE: Modify the wait proxy agent associted with the CNO + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3.2.4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_cno_modify_agent + * + * DAPL Requirements Version xxx, 6.3.2.4 + * + * Modify the wait proxy agent associted with the CNO + * + * Input: + * cno_handle + * prx_agent + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + */ +DAT_RETURN dapl_cno_modify_agent( + IN DAT_CNO_HANDLE cno_handle, /* cno_handle */ + IN DAT_OS_WAIT_PROXY_AGENT prx_agent ) /* agent */ + +{ + DAPL_CNO *cno_ptr; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + if ( DAPL_BAD_HANDLE (cno_handle, DAPL_MAGIC_CNO) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_CNO); + goto bail; + } + + cno_ptr = (DAPL_CNO *) cno_handle; + dapl_os_lock(&cno_ptr->header.lock); + cno_ptr->cno_wait_agent = prx_agent; + dapl_os_unlock(&cno_ptr->header.lock); + + bail: + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_query.c new file mode 100644 index 00000000..76b3e4ab --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_query.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cno_query.c + * + * PURPOSE: Return the consumer parameters of the CNO + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3.2.5 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_cno_query + * + * DAPL Requirements Version xxx, 6.3.2.5 + * + * Return the consumer parameters of the CNO + * + * Input: + * cno_handle + * cno_param_mask + * cno_param + * + * Output: + * cno_param + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + */ +DAT_RETURN dapl_cno_query( + IN DAT_CNO_HANDLE cno_handle, /* cno_handle */ + IN DAT_CNO_PARAM_MASK cno_param_mask, /* cno_param_mask */ + OUT DAT_CNO_PARAM *cno_param ) /* cno_param */ + + +{ + DAPL_CNO *cno_ptr; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (cno_handle, DAPL_MAGIC_CNO) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_CNO); + goto bail; + } + + if ( NULL == cno_param ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + cno_ptr = (DAPL_CNO *) cno_handle; + if ( cno_param_mask ) + { + cno_param->ia_handle = cno_ptr->header.owner_ia; + cno_param->agent = cno_ptr->cno_wait_agent; + } + else + { + dat_status = DAT_ERROR(DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + } + bail: + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_util.c new file mode 100644 index 00000000..cc83220b --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_util.c @@ -0,0 +1,195 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cno_util.c + * + * PURPOSE: Manage CNO Info structure + * + * $Id$ + **********************************************************************/ + +#include "dapl_ia_util.h" +#include "dapl_cno_util.h" +#include "dapl_adapter_util.h" + + + +/* + * dapl_cno_alloc + * + * alloc and initialize an EVD struct + * + * Input: + * ia + * + * Returns: + * cno_ptr, or null on failure. + */ +DAPL_CNO * +dapl_cno_alloc( + IN DAPL_IA *ia_ptr, + IN DAT_OS_WAIT_PROXY_AGENT wait_agent) +{ + DAPL_CNO *cno_ptr; + + cno_ptr = (DAPL_CNO *) dapl_os_alloc(sizeof (DAPL_CNO)); + if (!cno_ptr) + { + return NULL; + } + + /* zero the structure */ + dapl_os_memzero(cno_ptr, sizeof(DAPL_CNO)); + + /* + * Initialize the header. + */ + cno_ptr->header.provider = ia_ptr->header.provider; + cno_ptr->header.magic = DAPL_MAGIC_CNO; + cno_ptr->header.handle_type = DAT_HANDLE_TYPE_CNO; + cno_ptr->header.owner_ia = ia_ptr; + cno_ptr->header.user_context.as_64 = 0; + cno_ptr->header.user_context.as_ptr = NULL; + dapl_llist_init_entry (&cno_ptr->header.ia_list_entry); + dapl_os_lock_init (&cno_ptr->header.lock); + + /* + * Initialize the body + */ + cno_ptr->cno_waiters = 0; + cno_ptr->cno_ref_count = 0; + cno_ptr->cno_state = DAPL_CNO_STATE_UNTRIGGERED; + cno_ptr->cno_evd_triggered = NULL; + cno_ptr->cno_wait_agent = wait_agent; + dapl_os_wait_object_init(&cno_ptr->cno_wait_object); + + return cno_ptr; +} + +/* + * dapl_cno_dealloc + * + * Free the passed in CNO structure. + * + * Input: + * cno_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_cno_dealloc( + IN DAPL_CNO *cno_ptr) +{ + dapl_os_assert (cno_ptr->header.magic == DAPL_MAGIC_CNO); + dapl_os_assert (cno_ptr->cno_ref_count == 0); + + /* + * deinitialize the header + */ + cno_ptr->header.magic = DAPL_MAGIC_INVALID; /* reset magic to prevent reuse */ + + dapl_os_wait_object_destroy (&cno_ptr->cno_wait_object); + dapl_os_free (cno_ptr, sizeof (DAPL_CNO)); +} + + +/* + * dapl_cno_trigger + * + * DAPL Internal routine to trigger the specified CNO. + * Called by the callback of some EVD associated with the CNO. + * + * Input: + * cno_ptr + * evd_ptr EVD triggering + * + * Output: + * None + * + * Returns: + * None + */ +void +dapl_cno_trigger( + IN DAPL_CNO *cno_ptr, + IN DAPL_EVD *evd_ptr) +{ + dapl_os_assert (cno_ptr != NULL ); + dapl_os_assert(cno_ptr->header.magic == DAPL_MAGIC_CNO); + dapl_os_assert(evd_ptr == NULL || evd_ptr->header.magic == DAPL_MAGIC_EVD); + + dapl_os_lock(&cno_ptr->header.lock); + + /* Maybe I should just return, but this really shouldn't happen. */ + dapl_os_assert(cno_ptr->cno_state != DAPL_CNO_STATE_DEAD); + + if (cno_ptr->cno_state == DAPL_CNO_STATE_UNTRIGGERED) + { + DAT_OS_WAIT_PROXY_AGENT agent; + + /* Squirrel away wait agent, and delete link. */ + agent = cno_ptr->cno_wait_agent; + + /* Separate assignments for windows compiler. */ + cno_ptr->cno_wait_agent.instance_data = NULL; + cno_ptr->cno_wait_agent.proxy_agent_func = NULL; + + cno_ptr->cno_evd_triggered = evd_ptr; + + /* + * Must set to triggerred and let waiter untrigger to handle + * timeout of waiter. + */ + cno_ptr->cno_state = DAPL_CNO_STATE_TRIGGERED; + if (cno_ptr->cno_waiters > 0) + { + dapl_os_wait_object_wakeup(&cno_ptr->cno_wait_object); + } + + dapl_os_unlock(&cno_ptr->header.lock); + + /* Trigger the OS proxy wait agent, if one exists. */ + if (agent.proxy_agent_func != (DAT_AGENT_FUNC) NULL) + { + agent.proxy_agent_func ( + agent.instance_data, (DAT_EVD_HANDLE) evd_ptr ); + } + } + else + { + dapl_os_unlock(&cno_ptr->header.lock); + } + + return; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_util.h new file mode 100644 index 00000000..4d647e6e --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_util.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_cno_util.h + * + * PURPOSE: Utility defs & routines for the cno data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_CNO_UTIL_H_ +#define _DAPL_CNO_UTIL_H_ + +#include "dapl.h" + +DAPL_CNO * +dapl_cno_alloc ( + IN DAPL_IA *ia_ptr, + IN DAT_OS_WAIT_PROXY_AGENT wait_agent) ; + +void +dapl_cno_dealloc ( + IN DAPL_CNO *cno_ptr) ; + +void +dapl_cno_trigger( + IN DAPL_CNO *cno_ptr, + IN DAPL_EVD *evd_ptr); + +#endif diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_wait.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_wait.c new file mode 100644 index 00000000..0504734a --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cno_wait.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cno_wait.c + * + * PURPOSE: Wait for a consumer notification event + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3.2.3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_cno_wait + * + * DAPL Requirements Version xxx, 6.3.2.3 + * + * Wait for a consumer notification event + * + * Input: + * cno_handle + * timeout + * evd_handle + * + * Output: + * evd_handle + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_QUEUE_EMPTY + * DAT_INVALID_PARAMETER + */ +DAT_RETURN dapl_cno_wait( + IN DAT_CNO_HANDLE cno_handle, /* cno_handle */ + IN DAT_TIMEOUT timeout, /* agent */ + OUT DAT_EVD_HANDLE *evd_handle) /* ia_handle */ + +{ + DAPL_CNO *cno_ptr; + DAT_RETURN dat_status; + + if ( DAPL_BAD_HANDLE (cno_handle, DAPL_MAGIC_CNO) ) + { + dat_status = DAT_INVALID_HANDLE | DAT_INVALID_HANDLE_CNO; + goto bail; + } + + dat_status = DAT_SUCCESS; + + cno_ptr = (DAPL_CNO *) cno_handle; + + if ( cno_ptr->cno_state == DAPL_CNO_STATE_DEAD ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_STATE_CNO_DEAD); + goto bail; + } + + dapl_os_lock ( &cno_ptr->header.lock ); + while (cno_ptr->cno_state == DAPL_CNO_STATE_UNTRIGGERED + && DAT_GET_TYPE(dat_status) != DAT_TIMEOUT_EXPIRED) + { + cno_ptr->cno_waiters++; + dapl_os_unlock ( &cno_ptr->header.lock ); + dat_status = dapl_os_wait_object_wait ( + &cno_ptr->cno_wait_object, timeout ); + dapl_os_lock ( &cno_ptr->header.lock ); + cno_ptr->cno_waiters--; + } + + if ( cno_ptr->cno_state == DAPL_CNO_STATE_DEAD ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_STATE_CNO_DEAD); + } + else if ( dat_status == DAT_SUCCESS ) + { + /* + * After the first triggering, this will be a valid handle. + * If we're racing with wakeups of other CNO waiters, + * that's ok. + */ + dapl_os_assert ( cno_ptr->cno_state == DAPL_CNO_STATE_TRIGGERED ); + cno_ptr->cno_state = DAPL_CNO_STATE_UNTRIGGERED; + *evd_handle = cno_ptr->cno_evd_triggered; + } + else if ( dat_status == DAT_TIMEOUT_EXPIRED ) + { + cno_ptr->cno_state = DAPL_CNO_STATE_UNTRIGGERED; + *evd_handle = NULL; + dat_status = DAT_QUEUE_EMPTY; + } + else + { + /* + * The only other reason we could have made it out of + * the loop is a timeout or an interrupted system call. + */ + dapl_os_assert(DAT_GET_TYPE(dat_status) == DAT_TIMEOUT_EXPIRED || + DAT_GET_TYPE(dat_status) == DAT_INTERRUPTED_CALL); + } + dapl_os_unlock ( &cno_ptr->header.lock ); + + bail: + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cookie.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cookie.c new file mode 100644 index 00000000..28dbddfd --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cookie.c @@ -0,0 +1,400 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cookie.c + * + * PURPOSE: Manage CQE cookie structures + * + * The DAPL spec requires that all a cookies passed to a posting operation + * be returned in the operation's corresponding completion. + * + * Implementing this feature is complicated by the user's ability to + * suppress event generation for specific operations. When these operations + * complete successfully, the provider does not have an easy way to + * deallocate resources devoted to storing context data for these operations. + * + * To support this feature, a pool of memory is allocated up front large + * enough to hold cookie data for the maximum number of operations possible + * on an endpoint. + * + * Two pieces of information are maintained to manage cookie allocation: + * + * head index : index of next unallocated cookie + * tail index : index of last unallocated cookie + * + * Each cookie store its index in this memory pool. + * + * When an event is received, the index stored in the event's cookie will be + * used to update the tail. This will implicitly deallocate all of the cookies + * "between" the old tail and the new tail. + * + * The implementation relies on the following assumptions: + * + * - there can be only 1 thread in dat_ep_post_send(), dat_ep_post_rdma_write(), + * dat_ep_post_rdma_read(), or dat_rmr_bind() at a time, therefore + * dapls_cb_get() does not need to be thread safe when manipulating + * request data structures. + * + * - there can be only 1 thread in dat_ep_post_recv(), therefore + * dapls_cb_get() does not need to be thread safe when manipulating + * receive data structures. + * + * - there can be only 1 thread generating completions for a given EP's request + * opeartions, therefore dapls_cb_put() does not need to be thread safe when + * manipulating request data structures. + * + * - there can be only 1 thread generating completions for a given EP's receive + * opeartions therefore dapls_cb_put() does not need to be thread safe when + * manipulating receive data structures. + * + * - completions are delivered in order + * + * $Id$ + **********************************************************************/ + +#include "dapl_cookie.h" +#include "dapl_ring_buffer_util.h" + + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +DAT_RETURN +dapls_cb_get ( + DAPL_COOKIE_BUFFER *buffer, + DAPL_COOKIE **cookie_ptr ); + +DAT_RETURN +dapls_cb_put ( + DAPL_COOKIE_BUFFER *buffer, + DAPL_COOKIE *cookie ); + + +/********************************************************************* + * * + * Function Definitions * + * * + *********************************************************************/ + + +/* + * dapls_cb_create + * + * Given a DAPL_COOKIE_BUFFER, allocate and initialize memory for + * the data structure. + * + * Input: + * buffer pointer to DAPL_COOKIE_BUFFER + * ep endpoint to associate with cookies + * size number of elements to allocate & manage + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_cb_create ( + IN DAPL_COOKIE_BUFFER *buffer, + IN DAPL_EP *ep, + IN DAT_COUNT size ) +{ + DAT_COUNT i; + + /* + * allocate one additional entry so that the tail + * can always point at an empty location + */ + size++; + + buffer->pool = dapl_os_alloc (size * sizeof (DAPL_COOKIE)); + if ( NULL != buffer->pool ) + { + buffer->pool_size = size; + buffer->head = 0; + buffer->tail = 0; + dapl_os_lock_init (&buffer->lock); + + for ( i = 0; i < size; i++ ) + { +#ifdef DAPL_DBG + buffer->pool[i].type = DAPL_COOKIE_TYPE_NULL; +#endif + buffer->pool[i].index = i; + buffer->pool[i].ep = ep; + } + + return (DAT_SUCCESS); + } + else + { + return (DAT_INSUFFICIENT_RESOURCES); + } +} + + +/* + * dapls_cb_free + * + * Free the data structure + * + * Input: + * buffer pointer to DAPL_COOKIE_BUFFER + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapls_cb_free ( + IN DAPL_COOKIE_BUFFER *buffer ) +{ + dapl_os_lock ( &buffer->lock ); + if ( NULL != buffer->pool ) + { + dapl_os_free (buffer->pool, buffer->pool_size * sizeof (DAPL_COOKIE)); + } + dapl_os_unlock ( &buffer->lock ); +} + + +/* + * dapls_cb_get + * + * Remove an entry from the buffer + * + * Input: + * buffer pointer to DAPL_COOKIE_BUFFER + * + * Output: + * cookie_ptr pointer to pointer to cookie + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_cb_get ( + IN DAPL_COOKIE_BUFFER *buffer, + OUT DAPL_COOKIE **cookie_ptr ) +{ + DAT_RETURN dat_status; + DAT_COUNT new_head; + + dapl_os_assert(NULL != cookie_ptr); + dapl_os_lock ( &buffer->lock ); + + new_head = (buffer->head + 1) % buffer->pool_size; + + if ( new_head == buffer->tail ) + { + dat_status = DAT_INSUFFICIENT_RESOURCES; + goto bail; + } + else + { + buffer->head = new_head; + + *cookie_ptr = &buffer->pool[buffer->head]; + +#ifdef DAPL_DBG + if ( buffer->pool[buffer->head].type != DAPL_COOKIE_TYPE_NULL ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsCkGet: EP %p cookie %p at %d in use as 0x%x\n", + buffer->pool[buffer->head].ep, + &buffer->pool[buffer->head], buffer->head, + buffer->pool[buffer->head].type ); + } +#endif + dat_status = DAT_SUCCESS; + } +bail: + dapl_os_unlock ( &buffer->lock ); + return dat_status; +} + + +/* + * dapls_cb_put + * + * Add entry(s) to the buffer + * + * Input: + * buffer pointer to DAPL_COOKIE_BUFFER + * cookie pointer to cookie + * + * Output: + * entry entry removed from the ring buffer + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_EMPTY + * + */ +DAT_RETURN +dapls_cb_put ( + IN DAPL_COOKIE_BUFFER *buffer, + IN DAPL_COOKIE *cookie ) +{ + dapl_os_lock ( &buffer->lock ); + +#ifdef DAPL_DBG + cookie->type = DAPL_COOKIE_TYPE_NULL; +#endif + buffer->tail = cookie->index; + + dapl_os_unlock ( &buffer->lock ); + + return DAT_SUCCESS; +} + + +/* + * dapls_rmr_cookie_alloc + * + * Allocate an RMR Bind cookie + * + * Input: + * buffer pointer to DAPL_COOKIE_BUFFER + * rmr rmr to associate with the cookie + * user_cookie user's cookie data + * + * Output: + * cookie_ptr pointer to pointer to allocated cookie + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_EMPTY + * + */ +DAT_RETURN +dapls_rmr_cookie_alloc ( + IN DAPL_COOKIE_BUFFER *buffer, + IN DAPL_RMR *rmr, + IN DAT_RMR_COOKIE user_cookie, + OUT DAPL_COOKIE **cookie_ptr ) +{ + DAPL_COOKIE *cookie; + DAT_RETURN dat_status; + + if ( DAT_SUCCESS != dapls_cb_get (buffer, &cookie) ) + { + *cookie_ptr = NULL; + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_status = DAT_SUCCESS; + cookie->type = DAPL_COOKIE_TYPE_RMR; + cookie->val.rmr.rmr = rmr; + cookie->val.rmr.cookie = user_cookie; + + *cookie_ptr = cookie; + + bail: + return dat_status; +} + + +/* + * dapls_dto_cookie_alloc + * + * Allocate a DTO cookie + * + * Input: + * buffer pointer to DAPL_COOKIE_BUFFER + * type DTO type + * user_cookie user's cookie data + * + * Output: + * cookie_ptr pointer to pointer to allocated cookie + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_EMPTY + * + */ +DAT_RETURN +dapls_dto_cookie_alloc ( + IN DAPL_COOKIE_BUFFER *buffer, + IN DAPL_DTO_TYPE type, + IN DAT_DTO_COOKIE user_cookie, + OUT DAPL_COOKIE **cookie_ptr ) +{ + DAPL_COOKIE *cookie; + + if ( DAT_SUCCESS != dapls_cb_get (buffer, &cookie) ) + { + *cookie_ptr = NULL; + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + } + + cookie->type = DAPL_COOKIE_TYPE_DTO; + cookie->val.dto.type = type; + cookie->val.dto.cookie = user_cookie; + cookie->val.dto.size = 0; +/* + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsCkAlloc: %p, ck(%p,%llx,%llx) u_ck(%p,%llx,%llx)\n", + cookie, + cookie->val.dto.cookie.as_ptr, + cookie->val.dto.cookie.as_64, + cookie->val.dto.cookie.as_index, + user_cookie.as_ptr, user_cookie.as_64, user_cookie.as_index ); +*/ + + *cookie_ptr = cookie; + return DAT_SUCCESS; +} + +void +dapls_cookie_dealloc ( + IN DAPL_COOKIE_BUFFER *buffer, + IN DAPL_COOKIE *cookie) +{ + dapls_cb_put (buffer, cookie); +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cookie.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_cookie.h new file mode 100644 index 00000000..8502b2c4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cookie.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_cookie.h + * + * PURPOSE: Utility defs & routines for the cookie data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_COOKIE_H +#define _DAPL_COOKIE_H_ + +#include "dapl.h" + +extern DAT_RETURN +dapls_cb_create ( + DAPL_COOKIE_BUFFER *buffer, + DAPL_EP *ep, + DAT_COUNT size ); + +extern void +dapls_cb_free ( + DAPL_COOKIE_BUFFER *buffer ); + +extern DAT_RETURN +dapls_rmr_cookie_alloc ( + IN DAPL_COOKIE_BUFFER *buffer, + IN DAPL_RMR *rmr, + IN DAT_RMR_COOKIE user_cookie, + OUT DAPL_COOKIE **cookie_ptr ); + +extern DAT_RETURN +dapls_dto_cookie_alloc ( + IN DAPL_COOKIE_BUFFER *buffer, + IN DAPL_DTO_TYPE type, + IN DAT_DTO_COOKIE user_cookie, + OUT DAPL_COOKIE **cookie_ptr ); + +extern void +dapls_cookie_dealloc ( + IN DAPL_COOKIE_BUFFER *buffer, + IN DAPL_COOKIE *cookie ); + +#endif /* _DAPL_COOKIE_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_accept.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_accept.c new file mode 100644 index 00000000..9a0d07e9 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_accept.c @@ -0,0 +1,321 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cr_accept.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" +#include "dapl_evd_util.h" +#include "dapl_sp_util.h" +#include "dapl_cr_util.h" +/* + * dapl_cr_accept + * + * DAPL Requirements Version xxx, 6.4.2.1 + * + * Establish a connection between active remote side requesting Endpoint + * and passic side local Endpoint. + * + * Input: + * cr_handle + * ep_handle + * private_data_size + * private_data + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * DAT_INVALID_ATTRIBUTE + */ +DAT_RETURN +dapl_cr_accept ( + IN DAT_CR_HANDLE cr_handle, + IN DAT_EP_HANDLE ep_handle, + IN DAT_COUNT private_data_size, + IN const DAT_PVOID private_data ) +{ + DAPL_EP *ep_ptr; + DAT_RETURN dat_status; + DAPL_PRIVATE prd; + DAPL_CR *cr_ptr; + DAPL_SP *sp_ptr; + DAT_UINT32 ib_rep_pdata_size; + DAT_EP_STATE entry_ep_state; + DAT_EP_HANDLE entry_ep_handle; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_cr_accept (%p, %p, %d, %p)\n", + cr_handle, + ep_handle, + private_data_size, + private_data); + + if ( DAPL_BAD_HANDLE (cr_handle, DAPL_MAGIC_CR) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_CR); + goto bail; + } + + cr_ptr = (DAPL_CR *) cr_handle; + + /* + * Return an error if we have an ep_handle and the CR already has an + * EP, indicating this is an RSP connection or PSP_PROVIDER_FLAG was + * specified. + */ + if ( ep_handle != NULL && + ( DAPL_BAD_HANDLE (ep_handle, DAPL_MAGIC_EP) || + cr_ptr->param.local_ep_handle != NULL ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + + /* Make sure we have an EP handle in one place or another */ + if ( ep_handle == NULL && cr_ptr->param.local_ep_handle == NULL ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); + goto bail; + } + + if ( (0 != private_data_size) && (NULL == private_data) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG4); + goto bail; + } + + /* + * Verify the private data size doesn't exceed the max + */ + ib_rep_pdata_size = private_data_size + + (sizeof(DAPL_PRIVATE) - DAPL_MAX_PRIVATE_DATA_SIZE); + + if (ib_rep_pdata_size > IB_MAX_REP_PDATA_SIZE) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + /* + * ep_handle is NULL if the user specified DAT_PSP_PROVIDER_FLAG + * OR this is an RSP connection; retrieve it from the cr. + */ + if ( ep_handle == NULL ) + { + ep_handle = cr_ptr->param.local_ep_handle; + if ( (((DAPL_EP *) ep_handle)->param.ep_state != + DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING) && + (((DAPL_EP *)ep_handle)->param.ep_state != + DAT_EP_STATE_PASSIVE_CONNECTION_PENDING) ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_STATE_EP_NOTREADY); + goto bail; + } + } else + { + /* ensure this EP isn't connected or in use*/ + if ( ((DAPL_EP *) ep_handle)->param.ep_state != + DAT_EP_STATE_UNCONNECTED ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_STATE_EP_NOTREADY); + goto bail; + } + } + + ep_ptr = (DAPL_EP *) ep_handle; + + /* + * Verify the attributes of the EP handle before we connect it. Test + * all of the handles to make sure they are currently valid. + * Specifically: + * pz_handle required + * recv_evd_handle optional, but must be valid + * request_evd_handle optional, but must be valid + * connect_evd_handle required + * We do all verification and state change under lock, at which + * point the EP state should protect us from most races. + */ + dapl_os_lock ( &ep_ptr->header.lock ); + if ( ep_ptr->param.pz_handle == NULL + || DAPL_BAD_HANDLE (ep_ptr->param.pz_handle, DAPL_MAGIC_PZ) + /* test connect handle */ + || ep_ptr->param.connect_evd_handle == NULL + || DAPL_BAD_HANDLE (ep_ptr->param.connect_evd_handle, DAPL_MAGIC_EVD) + || ! (((DAPL_EVD *)ep_ptr->param.connect_evd_handle)->evd_flags & DAT_EVD_CONNECTION_FLAG) + /* test optional completion handles */ + || (ep_ptr->param.recv_evd_handle != DAT_HANDLE_NULL && + (DAPL_BAD_HANDLE (ep_ptr->param.recv_evd_handle, DAPL_MAGIC_EVD))) + || (ep_ptr->param.request_evd_handle != DAT_HANDLE_NULL && + (DAPL_BAD_HANDLE (ep_ptr->param.request_evd_handle, DAPL_MAGIC_EVD)))) + { + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + + if ( ep_ptr->qp_state == DAPL_QP_STATE_UNATTACHED ) + { + /* + * If we are lazy attaching the QP then we may need to + * hook it up here. Typically, we run this code only for + * DAT_PSP_PROVIDER_FLAG + */ + dat_status = dapls_ib_qp_alloc ( cr_ptr->header.owner_ia, ep_ptr, ep_ptr ); + + if ( dat_status != DAT_SUCCESS) + { + /* This is not a great error code, but all the spec allows */ + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + } + else + { + dapl_dbg_log (DAPL_DBG_TYPE_CM,"<<< CR ACCEPT:EP:%p qp_state:%#x\n", ep_ptr,ep_ptr->qp_state ); + if (ep_ptr->qp_state == IB_QPS_RESET || + ep_ptr->qp_state == IB_QPS_ERROR ) + { + if (ep_ptr->qp_handle != IB_INVALID_HANDLE ) + { + dat_status = dapls_ib_reinit_ep (ep_ptr); + if ( dat_status != DAT_SUCCESS) + { + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + } + } + } + entry_ep_state = ep_ptr->param.ep_state; + entry_ep_handle = cr_ptr->param.local_ep_handle; + ep_ptr->param.ep_state = DAT_EP_STATE_COMPLETION_PENDING; + ep_ptr->cm_handle = cr_ptr->ib_cm_handle; + ep_ptr->cr_ptr = cr_ptr; + ep_ptr->param.remote_ia_address_ptr = cr_ptr->param.remote_ia_address_ptr; + cr_ptr->param.local_ep_handle = ep_handle; + + /* + * Deal with private data: we need to put the size in the header + * before sending. Given that this is a reply, we don't need to + * put the IP address in the header. + */ + dapl_os_memcpy(prd.private_data, private_data, private_data_size); + dapl_os_memzero (prd.private_data + private_data_size, + DAPL_MAX_PRIVATE_DATA_SIZE - private_data_size); + + dapl_os_unlock ( &ep_ptr->header.lock ); + + dat_status = dapls_ib_accept_connection ( cr_handle, + ep_handle, + &prd ); + + /* + * If the provider failed, unwind the damage so we are back at + * the initial state. + */ + if ( dat_status != DAT_SUCCESS) + { + if ( DAT_GET_TYPE(dat_status) == DAT_INVALID_STATE ) + { + /* The remote connection request has disappeared; timeout, + * system error, app termination, perhaps other reasons. + */ + + /* Take the CR off the list, we can't use it */ + sp_ptr = cr_ptr->sp_ptr; + + cr_ptr->header.magic = DAPL_MAGIC_CR_DESTROYED; + + dapl_sp_remove_cr (sp_ptr, cr_ptr); + + dapls_cr_free (cr_ptr); + + ep_ptr->cr_ptr = NULL; + ep_ptr->param.ep_state = entry_ep_state; + ep_ptr->param.remote_ia_address_ptr = NULL; + dapls_ib_qp_free (ep_ptr->header.owner_ia, ep_ptr); + dat_status = dapls_ib_reinit_ep (ep_ptr ); /* it will reallocate QP for EP */ + + /* notify User */ + dat_status = dapls_evd_post_connection_event ( + ep_ptr->param.request_evd_handle, + DAT_CONNECTION_EVENT_ACCEPT_COMPLETION_ERROR, + (DAT_HANDLE) ep_ptr, + 0, + 0 ); + } + else + { + ep_ptr->param.ep_state = entry_ep_state; + cr_ptr->param.local_ep_handle = entry_ep_handle; + ep_ptr->cr_ptr = NULL; + ep_ptr->param.remote_ia_address_ptr = NULL; + } + + /* + * After restoring values above, we now check if we need + * to translate the error + */ + if ( DAT_GET_TYPE(dat_status) == DAT_LENGTH_ERROR ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + } + + } + else + { + /* Make this CR invalid. We need to hang on to it until + * the connection terminates, but it's destroyed from + * the app point of view. + */ + cr_ptr->header.magic = DAPL_MAGIC_CR_DESTROYED; + } + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_callback.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_callback.c new file mode 100644 index 00000000..b67043b6 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_callback.c @@ -0,0 +1,615 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapls_cr_callback.c + * + * PURPOSE: implements passive side connection callbacks + * + * Description: Accepts asynchronous callbacks from the Communications Manager + * for EVDs that have been specified as the connection_evd. + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" +#include "dapl_cr_util.h" +#include "dapl_ia_util.h" +#include "dapl_sp_util.h" +#include "dapl_ep_util.h" +#include "dapl_adapter_util.h" +#include "dapl_ring_buffer_util.h" + + +/* + * Prototypes + */ +DAT_RETURN dapli_connection_request ( + IN ib_cm_handle_t ib_cm_handle, + IN DAPL_SP *sp_ptr, + IN DAPL_PRIVATE *prd_ptr, + IN DAPL_EVD *evd_ptr, + IN DAT_IA_ADDRESS_PTR p_remote_ia_addr); + +DAPL_EP * dapli_get_sp_ep ( + IN ib_cm_handle_t ib_cm_handle, + IN DAPL_SP *sp_ptr, + IN DAT_EVENT_NUMBER dat_event_num); + + +/* + * dapls_cr_callback + * + * The callback function registered with verbs for passive side of + * connection requests. The interface is specified by cm_api.h + * + * + * Input: + * ib_cm_handle, Handle to CM + * ib_cm_event Specific CM event + * instant_data Private data with DAT ADDRESS header + * context SP pointer + * + * Output: + * None + * + */ +void +dapls_cr_callback ( + IN ib_cm_handle_t ib_cm_handle, + IN const ib_cm_events_t ib_cm_event, + IN const void *private_data_ptr, /* event data */ + IN const void *context, + IN DAT_IA_ADDRESS_PTR p_remote_ia_addr) +{ + DAPL_EP *ep_ptr; + DAPL_EVD *evd_ptr; + DAPL_SP *sp_ptr; + DAPL_PRIVATE *prd_ptr; + DAT_EVENT_NUMBER dat_event_num; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "--> dapl_cr_callback! context: %p event: %x cm_handle %d\n", + context, + ib_cm_event, + ib_cm_handle.cid ); + + /* + * Passive side of the connection, context is a SP and + * we need to look up the EP. + */ + sp_ptr = (DAPL_SP *) context; + /* + * The context pointer could have been cleaned up in a racing + * CM callback, check to see if we should just exit here + */ + if (sp_ptr->header.magic == DAPL_MAGIC_INVALID) + { + return; + } + dapl_os_assert ( sp_ptr->header.magic == DAPL_MAGIC_PSP || + sp_ptr->header.magic == DAPL_MAGIC_RSP ); + + /* Obtain the event number from the provider layer */ + dat_event_num = dapls_ib_get_dat_event (ib_cm_event, DAT_FALSE); + + /* + * CONNECT_REQUEST events create an event on the PSP + * EVD, which will trigger connection processing. The + * sequence is: + * CONNECT_REQUEST Event to SP + * CONNECTED Event to EP + * DISCONNECT Event to EP + * + * Obtain the EP if required and set an event up on the correct + * EVD. + */ + if (dat_event_num == DAT_CONNECTION_REQUEST_EVENT) + { + ep_ptr = NULL; + evd_ptr = sp_ptr->evd_handle; + } + else + { + /* see if there is an EP connected with this CM handle */ + ep_ptr = dapli_get_sp_ep ( ib_cm_handle, + sp_ptr, + dat_event_num ); + + /* if we lost a race with the CM just exit. */ + if (ep_ptr == NULL) + { + return; + } + evd_ptr = (DAPL_EVD *) ep_ptr->param.connect_evd_handle; + /* if something has happened to our EVD, bail. */ + if (evd_ptr == NULL) + { + return; + } + } + + prd_ptr = (DAPL_PRIVATE *)private_data_ptr; + + dat_status = DAT_INTERNAL_ERROR; /* init to ERR */ + + switch (dat_event_num) + { + case DAT_CONNECTION_REQUEST_EVENT: + { + /* + * Requests arriving on a disabled SP are immediatly rejected + */ + + dapl_os_lock (&sp_ptr->header.lock); + if ( sp_ptr->listening == DAT_FALSE ) + { + dapl_os_unlock (&sp_ptr->header.lock); + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "---> dapls_cr_callback: conn event on down SP\n"); + (void)dapls_ib_reject_connection (ib_cm_handle, + DAT_CONNECTION_EVENT_UNREACHABLE ); + + return; + } + + if ( sp_ptr->header.handle_type == DAT_HANDLE_TYPE_RSP ) + { + /* + * RSP connections only allow a single connection. Close + * it down NOW so we reject any further connections. + */ + sp_ptr->listening = DAT_FALSE; + } + dapl_os_unlock (&sp_ptr->header.lock); + + /* + * Only occurs on the passive side of a connection + * dapli_connection_request will post the connection + * event if appropriate. + */ + dat_status = dapli_connection_request ( ib_cm_handle, + sp_ptr, + prd_ptr, + evd_ptr, + p_remote_ia_addr ); + /* Set evd_ptr = NULL so we don't generate an event below */ + evd_ptr = NULL; + + break; + } + case DAT_CONNECTION_EVENT_ESTABLISHED: + { + /* This is just a notification the connection is now + * established, there isn't any private data to deal with. + * + * Update the EP state and cache a copy of the cm handle, + * then let the user know we are ready to go. + */ + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> dapls_cr_callback: CONN_EVENT_ESTABLISHED EP: %p ep_state: %x\n",ep_ptr, + ep_ptr->param.ep_state); + dapl_os_lock ( &ep_ptr->header.lock ); + if (ep_ptr->header.magic != DAPL_MAGIC_EP || + ep_ptr->param.ep_state != DAT_EP_STATE_COMPLETION_PENDING) + { + /* If someone pulled the plug on the EP or connection, + * just exit + */ + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_SUCCESS; + break; + } + + + ep_ptr->param.ep_state = DAT_EP_STATE_CONNECTED; + ep_ptr->cm_handle = ib_cm_handle; + dapl_os_unlock ( &ep_ptr->header.lock ); + + break; + } + case DAT_CONNECTION_EVENT_DISCONNECTED: + { + /* + * EP is now fully disconnected; initiate any post processing + * to reset the underlying QP and get the EP ready for + * another connection + */ + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> dapls_cr_callback: EVENT_DISCONNECTED EP: %p ep_state: %x\n",ep_ptr, + ep_ptr->param.ep_state); + dapl_os_lock ( &ep_ptr->header.lock ); + if (ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED) + { + /* The disconnect has already occurred, we are now + * cleaned up and ready to exit + */ + dapl_os_unlock ( &ep_ptr->header.lock ); + return; + } + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + dapls_ib_disconnect_clean (ep_ptr, DAT_FALSE, ib_cm_event); + dapl_sp_remove_ep (ep_ptr); + dapl_os_unlock ( &ep_ptr->header.lock ); + + break; + } + case DAT_CONNECTION_EVENT_NON_PEER_REJECTED: + case DAT_CONNECTION_EVENT_PEER_REJECTED: + case DAT_CONNECTION_EVENT_UNREACHABLE: + { + /* + * After posting an accept the requesting node has + * stopped talking. + */ + dapl_os_lock ( &ep_ptr->header.lock ); + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + cl_memclr( &ep_ptr->cm_handle, sizeof(ib_cm_handle_t) ); + ep_ptr->cm_handle.cid = 0xFFFFFFFF; + dapls_ib_disconnect_clean (ep_ptr, DAT_FALSE, ib_cm_event); + dapl_os_unlock ( &ep_ptr->header.lock ); + + break; + } + case DAT_CONNECTION_EVENT_BROKEN: + { + dapl_os_lock ( &ep_ptr->header.lock ); + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + dapls_ib_disconnect_clean (ep_ptr, DAT_FALSE, ib_cm_event); + dapl_os_unlock ( &ep_ptr->header.lock ); + + break; + } + default: + { + evd_ptr = NULL; + dapl_os_assert (0); /* shouldn't happen */ + break; + } + } + dapl_dbg_log (DAPL_DBG_TYPE_CM," dapl_cr_callback: POST EVENT %d EP: %p EVD: %p \n",dat_event_num, ep_ptr, evd_ptr); + if (evd_ptr != NULL ) + { + dat_status = dapls_evd_post_connection_event ( + evd_ptr, + dat_event_num, + (DAT_HANDLE) ep_ptr, + 0, + NULL); + } + + if (dat_status != DAT_SUCCESS) + { + /* The event post failed; take appropriate action. */ + (void)dapls_ib_reject_connection ( ib_cm_handle, + DAT_CONNECTION_EVENT_BROKEN); + + return; + } +} + + +/* + * dapli_connection_request + * + * Process a connection request on the Passive side of a connection. + * Create a CR record and link it on to the SP so we can update it + * and free it later. Create an EP if specified by the PSP flags. + * + * Input: + * ib_cm_handle, + * sp_ptr + * event_ptr + * prd_ptr + * + * Output: + * None + * + * Returns + * DAT_INSUFFICIENT_RESOURCES + * DAT_SUCCESS + * + */ +DAT_RETURN +dapli_connection_request ( + IN ib_cm_handle_t ib_cm_handle, + IN DAPL_SP *sp_ptr, + IN DAPL_PRIVATE *prd_ptr, + IN DAPL_EVD *evd_ptr, + IN DAT_IA_ADDRESS_PTR p_remote_ia_addr) +{ + DAT_RETURN dat_status; + + DAPL_CR *cr_ptr; + DAPL_EP *ep_ptr; + DAPL_IA *ia_ptr; + DAT_SP_HANDLE sp_handle; + + cr_ptr = dapls_cr_alloc (sp_ptr->header.owner_ia); + if ( cr_ptr == NULL ) + { + /* Invoking function will call dapls_ib_cm_reject() */ + return DAT_INSUFFICIENT_RESOURCES; + } + + /* + * Set up the CR + */ + cr_ptr->sp_ptr = sp_ptr; /* maintain sp_ptr in case of reject */ + cr_ptr->param.remote_port_qual = 0; + cr_ptr->ib_cm_handle = ib_cm_handle; + + /* + * Copy the remote address and private data out of the private_data + * payload and put them in a local structure + */ + if (p_remote_ia_addr != NULL) + { + dapl_os_memcpy ((void *)&cr_ptr->remote_ia_address, + (void *)p_remote_ia_addr, + sizeof (DAT_SOCK_ADDR6)); + } + cr_ptr->param.remote_ia_address_ptr = (DAT_IA_ADDRESS_PTR)&cr_ptr->remote_ia_address; + + /* + * Private data size is always the max allowable by IB + */ + cr_ptr->param.private_data_size = IB_MAX_REQ_PDATA_SIZE; + cr_ptr->param.private_data = cr_ptr->private_data; + dapl_os_memcpy (cr_ptr->private_data, +#ifdef NO_NAME_SERVICE + prd_ptr->private_data, +#else + (void *) prd_ptr, +#endif + IB_MAX_REQ_PDATA_SIZE); + +#ifdef DAPL_DBG +#if 0 + { + int i; + + dapl_dbg_log ( DAPL_DBG_TYPE_EP, "--> private_data: "); + + for ( i = 0 ; i < IB_MAX_REQ_PDATA_SIZE ; i++ ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_EP, + "0x%x ", cr_ptr->private_data[i]); + + } + dapl_dbg_log ( DAPL_DBG_TYPE_EP, "\n"); + + } +#endif +#endif + + /* EP will be NULL unless RSP service point */ + ep_ptr = (DAPL_EP *) sp_ptr->ep_handle; + + if ( sp_ptr->psp_flags == DAT_PSP_PROVIDER_FLAG ) + { + /* + * Never true for RSP connections + * + * Create an EP for the user. If we can't allocate an + * EP we are out of resources and need to tell the + * requestor that we cant help them. + */ + ia_ptr = sp_ptr->header.owner_ia; + ep_ptr = dapl_ep_alloc (ia_ptr, NULL); + if ( ep_ptr == NULL ) + { + dapls_cr_free (cr_ptr); + /* Invoking function will call dapls_ib_cm_reject() */ + return DAT_INSUFFICIENT_RESOURCES; + } + /* + * Link the EP onto the IA + */ + dapl_ia_link_ep (ia_ptr, ep_ptr); + } + + cr_ptr->param.local_ep_handle = ep_ptr; + + if ( ep_ptr != NULL ) + { + /* Assign valid EP fields: RSP and PSP_PROVIDER_FLAG only */ + if ( sp_ptr->psp_flags == DAT_PSP_PROVIDER_FLAG ) + { + ep_ptr->param.ep_state = + DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING; + } + else + /* RSP */ + { + dapl_os_assert (sp_ptr->header.handle_type == DAT_HANDLE_TYPE_RSP); + ep_ptr->param.ep_state = + DAT_EP_STATE_PASSIVE_CONNECTION_PENDING; + } + ep_ptr->cm_handle = ib_cm_handle; + ep_ptr->cr_ptr = cr_ptr; + } + + /* link the CR onto the SP so we can pick it up later */ + dapl_sp_link_cr (sp_ptr, cr_ptr); + + /* Post the event. */ + /* assign sp_ptr to union to avoid typecast errors from some compilers */ + sp_handle.psp_handle = (DAT_PSP_HANDLE)sp_ptr; + dat_status = dapls_evd_post_cr_arrival_event ( + evd_ptr, + DAT_CONNECTION_REQUEST_EVENT, + sp_handle, + (DAT_IA_ADDRESS_PTR)&sp_ptr->header.owner_ia->hca_ptr->hca_address, + sp_ptr->conn_qual, + (DAT_CR_HANDLE)cr_ptr); + if (dat_status != DAT_SUCCESS) + { + if ( ep_ptr != NULL ) + { + ep_ptr->cr_ptr = NULL; + } + dapls_cr_free (cr_ptr); + (void)dapls_ib_reject_connection (ib_cm_handle, + DAT_CONNECTION_EVENT_BROKEN); + + /* Take the CR off the list, we can't use it */ + dapl_os_lock (&sp_ptr->header.lock); + dapl_sp_remove_cr (sp_ptr, cr_ptr); + dapl_os_unlock (&sp_ptr->header.lock); + return DAT_INSUFFICIENT_RESOURCES; + } + + + return DAT_SUCCESS; +} + + +/* + * dapli_get_sp_ep + * + * Passive side of a connection is now fully established. Clean + * up resources and obtain the EP pointer associated with a CR in + * the SP + * + * Input: + * ib_cm_handle, + * sp_ptr + * connection_event + * + * Output: + * none + * + * Returns + * ep_ptr + * + */ +DAPL_EP * +dapli_get_sp_ep ( + IN ib_cm_handle_t ib_cm_handle, + IN DAPL_SP *sp_ptr, + IN DAT_EVENT_NUMBER dat_event_num) +{ + DAPL_CR *cr_ptr; + DAPL_EP *ep_ptr; + + /* + * acquire the lock, we may be racing with other threads here + */ + dapl_os_lock (&sp_ptr->header.lock); + /* Verify under lock that the SP is still valid */ + if (sp_ptr->header.magic == DAPL_MAGIC_INVALID) + { + dapl_os_unlock (&sp_ptr->header.lock); + return NULL; + } + /* + * There are potentially multiple connections in progress. Need to + * go through the list and find the one we are interested + * in. There is no guarantee of order. dapl_sp_search_cr + * leaves the CR on the SP queue. + */ + cr_ptr = dapl_sp_search_cr (sp_ptr, ib_cm_handle); + if ( cr_ptr == NULL ) + { + dapl_os_unlock (&sp_ptr->header.lock); + return NULL; + } + + ep_ptr = (DAPL_EP *)cr_ptr->param.local_ep_handle; + + + /* Quick check to ensure our EP is still valid */ + if ( (DAPL_BAD_HANDLE (ep_ptr, DAPL_MAGIC_EP )) ) + { + ep_ptr = NULL; + } + + /* The CR record is discarded in all except for the CONNECTED case, + * as it will have no further relevance. + */ + if (dat_event_num != DAT_CONNECTION_EVENT_ESTABLISHED) + { + /* Remove the CR from the queue */ + dapl_sp_remove_cr (sp_ptr, cr_ptr); + /* + * Last event, time to clean up and dispose of the resource + */ + if (ep_ptr != NULL) + { + ep_ptr->cr_ptr = NULL; + } + + /* + * If this SP has been removed from service, free it + * up after the last CR is removed + */ + if ( sp_ptr->listening != DAT_TRUE && sp_ptr->cr_list_count == 0 && + sp_ptr->state != DAPL_SP_STATE_FREE ) + { + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> dapli_get_sp_ep! disconnect dump sp: %p \n", sp_ptr); + /* Decrement the ref count on the EVD */ + if (sp_ptr->evd_handle) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)sp_ptr->evd_handle)->evd_ref_count); + sp_ptr->evd_handle = NULL; + } + sp_ptr->state = DAPL_SP_STATE_FREE; + dapl_os_unlock (&sp_ptr->header.lock); + (void)dapls_ib_remove_conn_listener ( sp_ptr->header.owner_ia, + sp_ptr ); + dapls_ia_unlink_sp ( (DAPL_IA *)sp_ptr->header.owner_ia, + sp_ptr ); + dapls_sp_free_sp ( sp_ptr ); + goto skip_unlock; + } + + dapl_os_unlock (&sp_ptr->header.lock); + /* free memory outside of the lock */ + dapls_cr_free (cr_ptr); + } + else + { + dapl_os_unlock (&sp_ptr->header.lock); + } + +skip_unlock: + return ep_ptr; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * c-brace-offset: -4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_handoff.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_handoff.c new file mode 100644 index 00000000..9495d785 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_handoff.c @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cr_handoff.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" + +/* + * dapl_cr_handoff + * + * DAPL Requirements Version xxx, 6.4.2.4 + * + * Hand the connection request to another Sevice pont specified by the + * Connectin Qualifier. + * + * Input: + * cr_handle + * cr_handoff + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_cr_handoff ( + IN DAT_CR_HANDLE cr_handle, + IN DAT_CONN_QUAL cr_handoff ) /* handoff */ +{ + DAT_RETURN dat_status; + + if ( DAPL_BAD_HANDLE (cr_handle, DAPL_MAGIC_CR) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_CR); + goto bail; + } + + dat_status = dapls_ib_cr_handoff ( cr_handle, cr_handoff ); + +bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_query.c new file mode 100644 index 00000000..4bb7de02 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_query.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cr_query.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" + +/* + * dapl_cr_query + * + * DAPL Requirements Version xxx, 6.4.2.1 + * + * Return Connection Request args + * + * Input: + * cr_handle + * cr_param_mask + * + * Output: + * cr_param + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * DAT_INVALID_HANDLE + */ +DAT_RETURN +dapl_cr_query ( + IN DAT_CR_HANDLE cr_handle, + IN DAT_CR_PARAM_MASK cr_param_mask, + OUT DAT_CR_PARAM *cr_param) +{ + DAPL_CR *cr_ptr; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_cr_query (%p, %x, %p)\n", + cr_handle, + cr_param_mask, + cr_param); + + dat_status = DAT_SUCCESS; + if ( DAPL_BAD_HANDLE (cr_handle, DAPL_MAGIC_CR) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_CR); + goto bail; + } + + if (NULL == cr_param) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + cr_ptr = (DAPL_CR *) cr_handle; + + /* obtain the remote IP address */ + if (cr_param_mask & DAT_CR_FIELD_REMOTE_IA_ADDRESS_PTR) + { + dat_status = dapls_ib_cm_remote_addr ((DAT_HANDLE)cr_handle, + NULL, + &cr_ptr->remote_ia_address ); + } + + /* since the arguments are easily accessible, ignore the mask */ + dapl_os_memcpy (cr_param, &cr_ptr->param, sizeof (DAT_CR_PARAM)); + + bail: + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_reject.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_reject.c new file mode 100644 index 00000000..fbe6356a --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_reject.c @@ -0,0 +1,141 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cr_reject.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_cr_util.h" +#include "dapl_sp_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_cr_reject + * + * DAPL Requirements Version xxx, 6.4.2.2 + * + * Reject a connection request from the active remote side requesting + * an Endpoint. + * + * Input: + * cr_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_cr_reject ( + IN DAT_CR_HANDLE cr_handle ) +{ + DAPL_CR *cr_ptr; + DAPL_EP *ep_ptr; + DAT_EP_STATE entry_ep_state; + DAT_EP_HANDLE entry_ep_handle; + DAPL_SP *sp_ptr; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, "dapl_cr_reject (%p)\n", cr_handle); + + if ( DAPL_BAD_HANDLE (cr_handle, DAPL_MAGIC_CR) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_CR); + goto bail; + } + + cr_ptr = (DAPL_CR *)cr_handle; + + /* + * Clean up provider created EP if there is one: only if + * DAT_PSP_PROVIDER_FLAG was set on the PSP + */ + ep_ptr = (DAPL_EP *)cr_ptr->param.local_ep_handle; + entry_ep_handle = cr_ptr->param.local_ep_handle; + entry_ep_state = 0; + if ( ep_ptr != NULL ) + { + entry_ep_state = ep_ptr->param.ep_state; + ep_ptr->param.ep_state = DAT_EP_STATE_UNCONNECTED; + cr_ptr->param.local_ep_handle = NULL; + } + + dat_status = dapls_ib_reject_connection ( cr_ptr->ib_cm_handle, + IB_CM_REJ_REASON_CONSUMER_REJ ); + + if ( dat_status != DAT_SUCCESS) + { + if ( ep_ptr != NULL ) + { + /* Revert our state to the beginning */ + ep_ptr->param.ep_state = entry_ep_state; + cr_ptr->param.local_ep_handle = entry_ep_handle; + cr_ptr->param.local_ep_handle = (DAT_EP_HANDLE)ep_ptr; + } + } + else + { + /* + * If this EP has been allocated by the provider, clean it up; + * see DAT 1.1 spec, page 100, lines 3-4 (section 6.4.3.1.1.1). + * RSP and user-provided EPs are in the control of the user. + */ + sp_ptr = cr_ptr->sp_ptr; + if ( ep_ptr != NULL && + sp_ptr->psp_flags == DAT_PSP_PROVIDER_FLAG ) + { + (void)dapl_ep_free (ep_ptr); + } + + /* Remove the CR from the queue, then free it */ + dapl_os_lock (&sp_ptr->header.lock); + dapl_sp_remove_cr ( sp_ptr, cr_ptr ); + dapl_os_unlock (&sp_ptr->header.lock); + + dapls_cr_free ( cr_ptr ); + } + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_util.c new file mode 100644 index 00000000..e0c5b6f9 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_util.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_cr_util.c + * + * PURPOSE: Manage CR (Connection Request) structure + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_cr_util.h" + +/* + * dapls_cr_create + * + * Create a CR. Part of the passive side of a connection + * + * Input: + * ia_ptr + * cno_ptr + * qlen + * evd_flags + * + * Output: + * evd_ptr_ptr + * + * Returns: + * none + * + */ + +DAPL_CR * +dapls_cr_alloc ( + DAPL_IA *ia_ptr ) +{ + DAPL_CR *cr_ptr; + + /* Allocate EP */ + cr_ptr = (DAPL_CR *)dapl_os_alloc (sizeof (DAPL_CR)); + if ( cr_ptr == NULL ) + { + return (NULL); + } + + /* zero the structure */ + dapl_os_memzero (cr_ptr, sizeof (DAPL_CR)); + + /* + * initialize the header + */ + cr_ptr->header.provider = ia_ptr->header.provider; + cr_ptr->header.magic = DAPL_MAGIC_CR; + cr_ptr->header.handle_type = DAT_HANDLE_TYPE_CR; + cr_ptr->header.owner_ia = ia_ptr; + cr_ptr->header.user_context.as_64 = 0; + cr_ptr->header.user_context.as_ptr = NULL; + dapl_llist_init_entry (&cr_ptr->header.ia_list_entry); + dapl_os_lock_init (&cr_ptr->header.lock); + + return (cr_ptr); +} + + +/* + * dapls_cr_free + * + * Free the passed in EP structure. + * + * Input: + * entry point pointer + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapls_cr_free ( + IN DAPL_CR *cr_ptr ) +{ + dapl_os_assert (cr_ptr->header.magic == DAPL_MAGIC_CR || + cr_ptr->header.magic == DAPL_MAGIC_CR_DESTROYED ); + + cr_ptr->header.magic = DAPL_MAGIC_INVALID; /* reset magic to prevent reuse */ + dapl_os_free (cr_ptr, sizeof (DAPL_CR)); +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_util.h new file mode 100644 index 00000000..392f3ef4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_cr_util.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_cr_util.h + * + * PURPOSE: Utility defs & routines for the CR data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_CR_UTIL_H_ +#define _DAPL_CR_UTIL_H_ + +#include "dapl.h" + +DAPL_CR * +dapls_cr_alloc ( + DAPL_IA *ia_ptr ); + +void +dapls_cr_free ( + IN DAPL_CR *cr_ptr ); + +void +dapls_cr_callback ( + IN ib_cm_handle_t ib_cm_handle, + IN const ib_cm_events_t ib_cm_event, + IN const void *instant_data_p, + IN const void *context, + IN DAT_IA_ADDRESS_PTR p_remote_ia_addr); + +#endif /* _DAPL_CR_UTIL_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_debug.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_debug.c new file mode 100644 index 00000000..bd4b20a4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_debug.c @@ -0,0 +1,91 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_debug.h" +#include "dapl.h" +#include +#include + +#ifdef DAPL_DBG +DAPL_DBG_TYPE g_dapl_dbg_type; /* initialized in dapl_init.c */ +DAPL_DBG_DEST g_dapl_dbg_dest; /* initialized in dapl_init.c */ + +void dapl_internal_dbg_log ( DAPL_DBG_TYPE type, const char *fmt, ...) +{ + va_list args; + + if ( type & g_dapl_dbg_type ) + { + va_start (args, fmt); + + if ( DAPL_DBG_DEST_STDOUT & g_dapl_dbg_dest ) + { + dapl_os_vprintf (fmt, args); + } + + if ( DAPL_DBG_DEST_SYSLOG & g_dapl_dbg_dest ) + { + dapl_os_syslog(fmt, args); + } + va_end (args); + } +} + +#if defined(DAPL_COUNTERS) +long dapl_dbg_counters[DAPL_CNTR_MAX]; + +/* + * The order of this list must match exactly with the #defines + * in dapl_debug.h + */ +char *dapl_dbg_counter_names[] = { + "dapl_ep_create", + "dapl_ep_free", + "dapl_ep_connect", + "dapl_ep_disconnect", + "dapl_ep_post_send", + "dapl_ep_post_recv", + "dapl_ep_post_rdma_write", + "dapl_ep_post_rdma_read", + "dapl_evd_create", + "dapl_evd_free", + "dapl_evd_wait", + "dapl_evd_blocked", + "dapl_evd_completion_notify", + "dapl_evd_dto_callback", + "dapl_evd_connection_callback", + "dapl_evd_dequeue", + "dapl_evd_poll", + "dapl_evd_found", + "dapl_evd_not_found", + "dapls_timer_set", + "dapls_timer_cancel", + 0 +}; + +#endif /* DAPL_COUNTERS */ +#endif + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_connect.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_connect.c new file mode 100644 index 00000000..a2c6c096 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_connect.c @@ -0,0 +1,373 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_connect.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ep_util.h" +#include "dapl_adapter_util.h" +#include "dapl_evd_util.h" +#include "dapl_timer_util.h" + +/* + * dapl_ep_connect + * + * DAPL Requirements Version xxx, 6.5.7 + * + * Request a connection be established between the local Endpoint + * and a remote Endpoint. This operation is used by the active/client + * side of a connection + * + * Input: + * ep_handle + * remote_ia_address + * remote_conn_qual + * timeout + * private_data_size + * privaet_data + * qos + * connect_flags + * + * Output: + * None + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOUCRES + * DAT_INVALID_PARAMETER + * DAT_MODLE_NOT_SUPPORTED + */ +DAT_RETURN +dapl_ep_connect ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_IA_ADDRESS_PTR remote_ia_address, + IN DAT_CONN_QUAL remote_conn_qual, + IN DAT_TIMEOUT timeout, + IN DAT_COUNT private_data_size, + IN const DAT_PVOID private_data, + IN DAT_QOS qos, + IN DAT_CONNECT_FLAGS connect_flags ) +{ + DAPL_EP *ep_ptr; + DAPL_PRIVATE prd; + DAPL_EP alloc_ep; + DAT_RETURN dat_status; + DAT_COUNT ib_req_pdata_size; + DAT_COUNT max_req_pdata_size; + + dapl_dbg_log (DAPL_DBG_TYPE_API | DAPL_DBG_TYPE_CM, + "dapl_ep_connect (%p, {%u.%u.%u.%u}, %#I64x, %d, %d, %p, %x, %x)\n", + ep_handle, + remote_ia_address->sa_data[2], + remote_ia_address->sa_data[3], + remote_ia_address->sa_data[4], + remote_ia_address->sa_data[5], + remote_conn_qual, + timeout, + private_data_size, + private_data, + qos, + connect_flags); + DAPL_CNTR (DCNT_EP_CONNECT); + + dat_status = DAT_SUCCESS; + ep_ptr = (DAPL_EP *) ep_handle; + + /* + * Verify parameter & state. The connection handle must be good + * at this point. + */ + if ( DAPL_BAD_HANDLE (ep_ptr, DAPL_MAGIC_EP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); + goto bail; + } + + if ( DAPL_BAD_HANDLE (ep_ptr->param.connect_evd_handle, DAPL_MAGIC_EVD) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CONN); + goto bail; + } + + /* Can't do a connection in 0 time, reject outright */ + if ( timeout == 0 ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); + goto bail; + } + + /* + * If the endpoint needs a QP, associated the QP with it. + * This needs to be done carefully, in order to: + * * Avoid allocating under a lock. + * * Not step on data structures being altered by + * routines with which we are racing. + * So we: + * * Confirm that a new QP is needed and is not forbidden by the + * current state. + * * Allocate it into a separate EP. + * * Take the EP lock. + * * Reconfirm that the EP is in a state where it needs a QP. + * * Assign the QP and release the lock. + */ + if ( ep_ptr->qp_state == DAPL_QP_STATE_UNATTACHED ) + { + if ( ep_ptr->param.pz_handle == NULL + || DAPL_BAD_HANDLE (ep_ptr->param.pz_handle, DAPL_MAGIC_PZ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_EP_NOTREADY); + goto bail; + } + alloc_ep = *ep_ptr; + + dat_status = dapls_ib_qp_alloc ( ep_ptr->header.owner_ia, + &alloc_ep, + ep_ptr ); + if ( dat_status != DAT_SUCCESS ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + dapl_os_lock ( &ep_ptr->header.lock ); + /* + * PZ shouldn't have changed since we're only racing with + * dapl_cr_accept() + */ + if ( ep_ptr->qp_state != DAPL_QP_STATE_UNATTACHED ) + { + /* Bail, cleaning up. */ + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = dapls_ib_qp_free ( ep_ptr->header.owner_ia, + &alloc_ep ); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ep_connect: ib_qp_free failed with %x\n", + dat_status); + } + dat_status = DAT_ERROR (DAT_INVALID_STATE, dapls_ep_state_subtype (ep_ptr)); + goto bail; + } + + ep_ptr->qp_handle = alloc_ep.qp_handle; + ep_ptr->qpn = alloc_ep.qpn; + ep_ptr->qp_state = alloc_ep.qp_state; + + dapl_os_unlock ( &ep_ptr->header.lock ); + } + + /* + * We do state checks and transitions under lock. + * The only code we're racing against is dapl_cr_accept. + */ + dapl_os_lock ( &ep_ptr->header.lock ); + + /* + * Verify the attributes of the EP handle before we connect it. Test + * all of the handles to make sure they are currently valid. + * Specifically: + * pz_handle required + * recv_evd_handle optional, but must be valid + * request_evd_handle optional, but must be valid + * connect_evd_handle required + */ + if ( ep_ptr->param.pz_handle == NULL + || DAPL_BAD_HANDLE (ep_ptr->param.pz_handle, DAPL_MAGIC_PZ) + /* test connect handle */ + || ep_ptr->param.connect_evd_handle == NULL + || DAPL_BAD_HANDLE (ep_ptr->param.connect_evd_handle, DAPL_MAGIC_EVD) + || ! (((DAPL_EVD *)ep_ptr->param.connect_evd_handle)->evd_flags & DAT_EVD_CONNECTION_FLAG) + /* test optional completion handles */ + || (ep_ptr->param.recv_evd_handle != DAT_HANDLE_NULL && + (DAPL_BAD_HANDLE (ep_ptr->param.recv_evd_handle, DAPL_MAGIC_EVD))) + || (ep_ptr->param.request_evd_handle != DAT_HANDLE_NULL && + (DAPL_BAD_HANDLE (ep_ptr->param.request_evd_handle, DAPL_MAGIC_EVD)))) + { + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_EP_NOTREADY); + goto bail; + } + + /* Check both the EP state and the QP state: if we don't have a QP + * we need to attach one now. + */ + if ( ep_ptr->qp_state == DAPL_QP_STATE_UNATTACHED ) + { + dat_status = dapls_ib_qp_alloc ( ep_ptr->header.owner_ia, + ep_ptr, ep_ptr ); + + if ( dat_status != DAT_SUCCESS) + { + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_TEP); + goto bail; + } + } + + if ( ep_ptr->param.ep_state != DAT_EP_STATE_UNCONNECTED ) + { + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_INVALID_STATE, dapls_ep_state_subtype (ep_ptr)); + goto bail; + } + + if ( qos != DAT_QOS_BEST_EFFORT || + connect_flags != DAT_CONNECT_DEFAULT_FLAG ) + { + /* + * At this point we only support one QOS level + */ + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_MODEL_NOT_SUPPORTED, 0); + goto bail; + } + + /* + * Verify the private data size doesn't exceed the max + */ + ib_req_pdata_size = private_data_size + + (sizeof (DAPL_PRIVATE) - DAPL_MAX_PRIVATE_DATA_SIZE); + + max_req_pdata_size = dapls_ib_private_data_size (NULL, DAPL_PDATA_CONN_REQ); + + if (ib_req_pdata_size > max_req_pdata_size) + { + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG5); + goto bail; + } + + /* transition the state before requesting a connection to avoid + * race conditions + */ + ep_ptr->param.ep_state = DAT_EP_STATE_ACTIVE_CONNECTION_PENDING; + + /* + * At this point we're committed, and done with the endpoint + * except for the connect, so we can drop the lock. + */ + dapl_os_unlock ( &ep_ptr->header.lock ); + + /* + * Copy interesting data into the private data header + */ +#ifdef NO_NAME_SERVICE + dapl_os_memcpy (&prd.hca_address, + &ep_ptr->header.owner_ia->hca_ptr->hca_address, + sizeof (DAT_SOCK_ADDR)); +#endif + + /* + * The spec calls for us to use the entire private data size; + * put the message in and pad with NULLs + */ + dapl_os_memcpy (prd.private_data, private_data, private_data_size); + dapl_os_memzero (prd.private_data + private_data_size, + DAPL_MAX_PRIVATE_DATA_SIZE - private_data_size); + + /* Copy the connection qualifiers */ + dapl_os_memcpy ( ep_ptr->param.remote_ia_address_ptr, + remote_ia_address, + sizeof ( DAT_SOCK_ADDR) ); + ep_ptr->param.remote_port_qual = remote_conn_qual; + + dat_status = dapls_ib_connect ( ep_handle, + remote_ia_address, + remote_conn_qual, + private_data_size, + &prd ); + + if ( dat_status != DAT_SUCCESS ) + { + ep_ptr->param.ep_state = DAT_EP_STATE_UNCONNECTED; + + /* + * Some implementations provide us with an error code that the + * remote destination is unreachable, but DAT doesn't have a + * synchronous error code to communicate this. So the provider + * layer generates an INTERNAL_ERROR with a subtype; when + * this happens, return SUCCESS and generate the event + */ + if (dat_status == DAT_ERROR (DAT_INTERNAL_ERROR, 1) ) + { + dapls_evd_post_connection_event ( + (DAPL_EVD *)ep_ptr->param.connect_evd_handle, + DAT_CONNECTION_EVENT_UNREACHABLE, + (DAT_HANDLE) ep_ptr, + 0, + 0); + dat_status = DAT_SUCCESS; + } + } + else + { + /* + * Acquire the lock and recheck the state of the EP; this + * thread could have been descheduled after issuing the connect + * request and the EP is now connected. Set up a timer if + * necessary. + */ + dapl_os_lock ( &ep_ptr->header.lock ); + if (ep_ptr->param.ep_state == DAT_EP_STATE_ACTIVE_CONNECTION_PENDING && + timeout != DAT_TIMEOUT_INFINITE ) + { + ep_ptr->cxn_timer = + (DAPL_OS_TIMER *)dapl_os_alloc (sizeof (DAPL_OS_TIMER)); + + dapls_timer_set ( ep_ptr->cxn_timer, + dapls_ep_timeout, + ep_ptr, + timeout ); + } + dapl_os_unlock ( &ep_ptr->header.lock ); + } + +bail: + dapl_dbg_log (DAPL_DBG_TYPE_RTN | DAPL_DBG_TYPE_CM, + "dapl_ep_connect () returns 0x%x\n", + dat_status); + + return dat_status; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_create.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_create.c new file mode 100644 index 00000000..f68e4a0e --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_create.c @@ -0,0 +1,344 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_create.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the kDAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ia_util.h" +#include "dapl_ep_util.h" +#include "dapl_adapter_util.h" + + +/* + * dapl_ep_create + * + * uDAPL Version 1.1, 6.5.3 + * + * Create an instance of an Endpoint that is provided to the + * consumer at ep_handle. + * + * Input: + * ia_handle + * pz_handle + * recv_evd_handle (recv DTOs) + * request_evd_handle (xmit DTOs) + * connect_evd_handle + * ep_attrs + * + * Output: + * ep_handle + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_INVALID_ATTRIBUTE + * DAT_MODEL_NOT_SUPPORTED + */ +DAT_RETURN +dapl_ep_create ( + IN DAT_IA_HANDLE ia_handle, + IN DAT_PZ_HANDLE pz_handle, + IN DAT_EVD_HANDLE recv_evd_handle, + IN DAT_EVD_HANDLE request_evd_handle, + IN DAT_EVD_HANDLE connect_evd_handle, + IN const DAT_EP_ATTR *ep_attr, + OUT DAT_EP_HANDLE *ep_handle ) +{ + DAPL_IA *ia_ptr; + DAPL_EP *ep_ptr; + DAT_EP_ATTR ep_attr_limit; + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ep_create (%p, %p, %p, %p, %p, %p, %p)\n", + ia_handle, + pz_handle, + recv_evd_handle, + request_evd_handle, + connect_evd_handle, + ep_attr, + ep_handle); + DAPL_CNTR(DCNT_EP_CREATE); + + ia_ptr = (DAPL_IA *)ia_handle; + dat_status = DAT_SUCCESS; + + /* + * Verify parameters + */ + if ( DAPL_BAD_HANDLE (ia_ptr, DAPL_MAGIC_IA) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); + goto bail; + } + + /* + * Verify non-required parameters. + * N.B. Assumption: any parameter that can be + * modified by dat_ep_modify() is not strictly + * required when the EP is created + */ + if ( pz_handle != NULL && + DAPL_BAD_HANDLE (pz_handle, DAPL_MAGIC_PZ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_PZ); + goto bail; + } + + /* If connect handle is !NULL verify handle is good */ + if (connect_evd_handle != DAT_HANDLE_NULL && + (DAPL_BAD_HANDLE (connect_evd_handle, DAPL_MAGIC_EVD) || + ! (((DAPL_EVD *)connect_evd_handle)->evd_flags & DAT_EVD_CONNECTION_FLAG)) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_CONN); + goto bail; + } + /* If recv_evd is !NULL, verify handle is good and flags are valid */ + if (recv_evd_handle != DAT_HANDLE_NULL && + (DAPL_BAD_HANDLE (recv_evd_handle, DAPL_MAGIC_EVD) || + ! (((DAPL_EVD *)recv_evd_handle)->evd_flags & DAT_EVD_DTO_FLAG) ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_RECV); + goto bail; + } + + /* If req_evd is !NULL, verify handle is good and flags are valid */ + if (request_evd_handle != DAT_HANDLE_NULL && + (DAPL_BAD_HANDLE (request_evd_handle, DAPL_MAGIC_EVD) || + ! (((DAPL_EVD *)request_evd_handle)->evd_flags & DAT_EVD_DTO_FLAG) ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_REQUEST); + goto bail; + } + + if ( ep_handle == NULL ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG7); + goto bail; + } + + if (( DAT_UVERYLONG _w64)ep_attr & 3) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG6); + goto bail; + } + + /* + * Qualify EP Attributes are legal and make sense. Note that if one + * or both of the DTO handles are NULL, then the corresponding + * max_*_dtos must 0 as the user will not be able to post dto ops on + * the respective queue. + */ + if (ep_attr != NULL && + (ep_attr->service_type != DAT_SERVICE_TYPE_RC || + (recv_evd_handle == DAT_HANDLE_NULL && ep_attr->max_recv_dtos != 0) || + (recv_evd_handle != DAT_HANDLE_NULL && ep_attr->max_recv_dtos == 0) || + (request_evd_handle == DAT_HANDLE_NULL && ep_attr->max_request_dtos != 0) || + (request_evd_handle != DAT_HANDLE_NULL && ep_attr->max_request_dtos == 0) || + ep_attr->max_recv_iov == 0 || + ep_attr->max_request_iov == 0 || + (DAT_SUCCESS != dapl_ep_check_recv_completion_flags ( + ep_attr->recv_completion_flags)) )) + { + dat_status = DAT_INVALID_PARAMETER | DAT_INVALID_ARG6; + goto bail; + } + + /* Verify the attributes against the transport */ + if (ep_attr != NULL) + { + dapl_os_memzero (&ep_attr_limit, sizeof (DAT_EP_ATTR)); + dat_status = dapls_ib_query_hca (ia_ptr->hca_ptr, + NULL, &ep_attr_limit, NULL); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + if ( ep_attr->max_mtu_size > ep_attr_limit.max_mtu_size || + ep_attr->max_rdma_size > ep_attr_limit.max_rdma_size || + ep_attr->max_recv_dtos > ep_attr_limit.max_recv_dtos || + ep_attr->max_request_dtos > ep_attr_limit.max_request_dtos || + ep_attr->max_recv_iov > ep_attr_limit.max_recv_iov || + ep_attr->max_request_iov > ep_attr_limit.max_request_iov || + ep_attr->max_rdma_read_in > ep_attr_limit.max_rdma_read_in || + ep_attr->max_rdma_read_out> ep_attr_limit.max_rdma_read_out ) + + { + dat_status = DAT_INVALID_PARAMETER | DAT_INVALID_ARG6; + goto bail; + } + } + + /* + * Verify the completion flags for the EVD and the EP + */ + /* + * XXX FIXME + * XXX Need to make assign the EVD to the right completion type + * XXX depending on the EP attributes. Fail if the types don't + * XXX match, they are mutually exclusive. + */ + evd_ptr = (DAPL_EVD *)recv_evd_handle; + if (evd_ptr != NULL && evd_ptr->completion_type == DAPL_EVD_STATE_INIT) + { + if (ep_attr != NULL && + ep_attr->recv_completion_flags == DAT_COMPLETION_DEFAULT_FLAG) + { + evd_ptr->completion_type = DAPL_EVD_STATE_THRESHOLD; + } + else + { + evd_ptr->completion_type = ep_attr->recv_completion_flags; + } + } + + evd_ptr = (DAPL_EVD *)request_evd_handle; + if (evd_ptr != NULL && evd_ptr->completion_type == DAPL_EVD_STATE_INIT) + { + if (ep_attr != NULL && + ep_attr->request_completion_flags == DAT_COMPLETION_DEFAULT_FLAG) + { + evd_ptr->completion_type = DAPL_EVD_STATE_THRESHOLD; + } + else + { + evd_ptr->completion_type = ep_attr->request_completion_flags; + } + } + + + /* Allocate EP */ + ep_ptr = dapl_ep_alloc ( ia_ptr, ep_attr ); + if ( ep_ptr == NULL ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + /* + * Fill in the EP + */ + ep_ptr->header.owner_ia = ia_ptr; + ep_ptr->param.ia_handle = ia_handle; + ep_ptr->param.ep_state = DAT_EP_STATE_UNCONNECTED; + ep_ptr->param.local_ia_address_ptr= + (DAT_IA_ADDRESS_PTR)&ia_ptr->hca_ptr->hca_address; + /* Set the remote address pointer to the end of the EP struct */ + ep_ptr->param.remote_ia_address_ptr = (DAT_IA_ADDRESS_PTR) (ep_ptr + 1); + + ep_ptr->param.pz_handle = pz_handle; + ep_ptr->param.recv_evd_handle = recv_evd_handle; + ep_ptr->param.request_evd_handle = request_evd_handle; + ep_ptr->param.connect_evd_handle = connect_evd_handle; + + /* + * Make sure we handle the NULL DTO EVDs + */ + if ( recv_evd_handle == DAT_HANDLE_NULL && ep_attr == NULL ) + { + ep_ptr->param.ep_attr.max_recv_dtos = 0; + } + + if ( request_evd_handle == DAT_HANDLE_NULL && ep_attr == NULL ) + { + ep_ptr->param.ep_attr.max_request_dtos = 0; + } + + /* + * If the user has specified a PZ handle we allocate a QP for + * this EP; else we defer until it is assigned via ep_modify(). + * As much as possible we try to keep QP creation out of the + * connect path to avoid resource errors in strange places. + */ + if (pz_handle != DAT_HANDLE_NULL ) + { + /* Take a reference on the PZ handle */ + dapl_os_atomic_inc (& ((DAPL_PZ *)pz_handle)->pz_ref_count); + + /* + * Get a QP from the IB provider + */ + dat_status = dapls_ib_qp_alloc ( ia_ptr, ep_ptr, ep_ptr ); + + if ( dat_status != DAT_SUCCESS) + { + dapl_os_atomic_dec (& ((DAPL_PZ *)pz_handle)->pz_ref_count); + dapl_ep_dealloc ( ep_ptr ); + goto bail; + } + } + else + { + ep_ptr->qp_state = DAPL_QP_STATE_UNATTACHED; + } + + /* + * Update ref counts. See the spec where the endpoint marks + * a data object as 'in use' + * pz_handle: dat_pz_free, uDAPL Document, 6.6.1.2 + * evd_handles: + * + * N.B. This should really be done by a util routine. + */ + dapl_os_atomic_inc (& ((DAPL_EVD *)connect_evd_handle)->evd_ref_count); + /* Optional handles */ + if (recv_evd_handle != DAT_HANDLE_NULL) + { + dapl_os_atomic_inc (& ((DAPL_EVD *)recv_evd_handle)->evd_ref_count); + } + if (request_evd_handle != DAT_HANDLE_NULL) + { + dapl_os_atomic_inc (& ((DAPL_EVD *)request_evd_handle)->evd_ref_count); + } + + /* Link it onto the IA */ + dapl_ia_link_ep (ia_ptr, ep_ptr); + + *ep_handle = ep_ptr; + +bail: + return dat_status; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_disconnect.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_disconnect.c new file mode 100644 index 00000000..a26f2294 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_disconnect.c @@ -0,0 +1,152 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_disconnect.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ia_util.h" +#include "dapl_ep_util.h" +#include "dapl_sp_util.h" +#include "dapl_evd_util.h" +#include "dapl_adapter_util.h" +#include "dapl_cr_util.h" /* for callback routine */ + +/* + * dapl_ep_disconnect + * + * DAPL Requirements Version xxx, 6.5.9 + * + * Terminate a connection. + * + * Input: + * ep_handle + * disconnect_flags + * + * Output: + * None + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_ep_disconnect ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_CLOSE_FLAGS disconnect_flags) +{ + DAPL_EP *ep_ptr; + DAPL_EVD *evd_ptr; + DAPL_CR *cr_ptr; + DAT_RETURN dat_status = DAT_SUCCESS; + + dapl_dbg_log (DAPL_DBG_TYPE_API | DAPL_DBG_TYPE_CM, + "dapl_ep_disconnect (EP :%p, close flag: %x)\n", + ep_handle, + disconnect_flags); + DAPL_CNTR(DCNT_EP_DISCONNECT); + + ep_ptr = (DAPL_EP *) ep_handle; + + /* + * Verify parameter & state + */ + if ( DAPL_BAD_HANDLE (ep_ptr, DAPL_MAGIC_EP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); + goto bail; + } + + /* + * Do the verification of parameters and the state change atomically + */ + dapl_os_lock (&ep_ptr->header.lock); + + switch ( ep_ptr->param.ep_state ) + { + case DAT_EP_STATE_DISCONNECTED: + break; + case DAT_EP_STATE_COMPLETION_PENDING: + dapls_ib_disconnect(ep_ptr, DAT_CLOSE_ABRUPT_FLAG); + break; + case DAT_EP_STATE_DISCONNECT_PENDING: + if (disconnect_flags == DAT_CLOSE_GRACEFUL_FLAG ) + { + break; + } + case DAT_EP_STATE_CONNECTED: + if (disconnect_flags == DAT_CLOSE_GRACEFUL_FLAG ) + { + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECT_PENDING; + } + if (ep_ptr->sent_discreq != TRUE ) + { + dapls_ib_disconnect(ep_ptr, disconnect_flags); + } + break; + case DAT_EP_STATE_ACTIVE_CONNECTION_PENDING: + if (ep_ptr->sent_discreq != TRUE ) + { + dapls_ib_disconnect(ep_ptr, disconnect_flags); + } + evd_ptr = (DAPL_EVD *) ep_ptr->param.connect_evd_handle; + dapls_evd_post_connection_event(evd_ptr, + DAT_CONNECTION_EVENT_DISCONNECTED, + (DAT_HANDLE) ep_ptr, + 0, + 0); + break; + default: + dat_status = DAT_ERROR (DAT_INVALID_STATE,dapls_ep_state_subtype (ep_ptr)); + break; + } + + dapl_os_unlock ( &ep_ptr->header.lock ); + +bail: + dapl_dbg_log (DAPL_DBG_TYPE_RTN | DAPL_DBG_TYPE_CM, + "dapl_ep_disconnect () returns 0x%x\n", dat_status); + + return dat_status; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_dup_connect.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_dup_connect.c new file mode 100644 index 00000000..d5eb6cb7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_dup_connect.c @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_dup_connect.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ep_util.h" +#include "dapl_adapter_util.h" +#include "dapl_timer_util.h" + +/* + * dapl_ep_dup_connect + * + * DAPL Requirements Version xxx, 6.5.8 + * + * Requst that a connection be established between the local Endpoint + * and a remote Endpoint. The remote Endpoint is identified by the + * dup_ep. + * + * Input: + * ep_handle + * ep_dup_handle + * conn_qual + * timeout + * private_data_size + * private_data + * qos + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + * DAT_MODEL_NOT_SUPPORTED + */ +DAT_RETURN +dapl_ep_dup_connect ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_EP_HANDLE ep_dup_handle, + IN DAT_TIMEOUT timeout, + IN DAT_COUNT private_data_size, + IN const DAT_PVOID private_data, + IN DAT_QOS qos) +{ + DAPL_EP *ep_dup_ptr; + DAT_RETURN dat_status; + DAT_IA_ADDRESS_PTR remote_ia_address_ptr; + DAT_CONN_QUAL remote_conn_qual; + + ep_dup_ptr = (DAPL_EP *) ep_dup_handle; + + /* + * Verify the dup handle, which must be connected. All other + * parameters will be verified by dapl_ep_connect + */ + if ( DAPL_BAD_HANDLE (ep_dup_handle, DAPL_MAGIC_EP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + + /* Can't do a connection in 0 time, reject outright */ + if ( timeout == 0 ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + + /* Check both the EP state and the QP state: if we don't have a QP + * there is a problem. Do this under a lock and pull out + * the connection parameters for atomicity. + */ + dapl_os_lock ( &ep_dup_ptr->header.lock ); + if ( ep_dup_ptr->param.ep_state != DAT_EP_STATE_CONNECTED ) + { + dapl_os_unlock ( &ep_dup_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_INVALID_STATE,dapls_ep_state_subtype (ep_dup_ptr)); + goto bail; + } + remote_ia_address_ptr = ep_dup_ptr->param.remote_ia_address_ptr; + remote_conn_qual = ep_dup_ptr->param.remote_port_qual; + dapl_os_unlock ( &ep_dup_ptr->header.lock ); + + dat_status = dapl_ep_connect ( ep_handle, + remote_ia_address_ptr, + remote_conn_qual, + timeout, + private_data_size, + private_data, + qos, + DAT_CONNECT_DEFAULT_FLAG ); + bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_free.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_free.c new file mode 100644 index 00000000..b6c206f7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_free.c @@ -0,0 +1,203 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_free.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5.4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ia_util.h" +#include "dapl_ep_util.h" +#include "dapl_adapter_util.h" +#include "dapl_ring_buffer_util.h" +#include "dapl_timer_util.h" + +/* + * dapl_ep_free + * + * DAPL Requirements Version xxx, 6.5.3 + * + * Destroy an instance of the Endpoint + * + * Input: + * ep_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + */ +DAT_RETURN +dapl_ep_free ( + IN DAT_EP_HANDLE ep_handle) +{ + DAPL_EP *ep_ptr; + DAPL_IA *ia_ptr; + DAT_EP_PARAM *param; + ib_qp_state_t save_qp_state; + DAT_RETURN dat_status = DAT_SUCCESS; + + dapl_dbg_log (DAPL_DBG_TYPE_API, "dapl_ep_free (%p)\n", ep_handle); + DAPL_CNTR(DCNT_EP_FREE); + + ep_ptr = (DAPL_EP *) ep_handle; + param = &ep_ptr->param; + + /* + * Verify parameter & state + */ + if ( DAPL_BAD_HANDLE (ep_ptr, DAPL_MAGIC_EP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); + goto bail; + } + + if ( ep_ptr->param.ep_state == DAT_EP_STATE_RESERVED || + ep_ptr->param.ep_state == DAT_EP_STATE_PASSIVE_CONNECTION_PENDING || + ep_ptr->param.ep_state == DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING ) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "--> dapl_ep_free: invalid state: %x, ep %p\n", + ep_ptr->param.ep_state, + ep_ptr); + dat_status = DAT_ERROR (DAT_INVALID_STATE, + dapls_ep_state_subtype (ep_ptr)); + goto bail; + } + + ia_ptr = ep_ptr->header.owner_ia; + + /* If we are connected, issue a disconnect. If we are in the + * disconnect_pending state, disconnect with the ABRUPT flag + * set. + */ + + /* + * Invoke ep_disconnect to clean up outstanding connections + */ + (void) dapl_ep_disconnect (ep_ptr, DAT_CLOSE_ABRUPT_FLAG); + dapl_os_assert (ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED || + ep_ptr->param.ep_state == DAT_EP_STATE_UNCONNECTED); + + /* + * Do verification of parameters and the state change atomically. + */ + dapl_os_lock ( &ep_ptr->header.lock ); + + if (ep_ptr->cxn_timer != NULL) + { + dapls_timer_cancel ( ep_ptr->cxn_timer ); + dapl_os_free ( ep_ptr->cxn_timer, sizeof ( DAPL_OS_TIMER ) ); + ep_ptr->cxn_timer = NULL; + } + + /* Remove the EP from the IA */ + dapl_ia_unlink_ep ( ia_ptr, ep_ptr ); + + /* + * Update ref counts. Note the user may have used ep_modify + * to set handles to NULL. Set handles to NULL so this routine + * is idempotent. + */ + if ( param->pz_handle != NULL ) + { + dapl_os_atomic_dec (& ((DAPL_PZ *)param->pz_handle)->pz_ref_count); + param->pz_handle = NULL; + } + if ( param->recv_evd_handle != NULL ) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)param->recv_evd_handle)->evd_ref_count); + param->recv_evd_handle = NULL; + } + if ( param->request_evd_handle != NULL ) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)param->request_evd_handle)->evd_ref_count); + param->request_evd_handle = NULL; + } + if ( param->connect_evd_handle != NULL ) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)param->connect_evd_handle)->evd_ref_count); + param->connect_evd_handle = NULL; + } + + /* + * Finish tearing everything down. + */ + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "dapl_ep_free: Free EP: %x, ep %p qp_state %x qp_handle %x\n", + ep_ptr->param.ep_state, + ep_ptr, + ep_ptr->qp_state, + ep_ptr->qp_handle); + /* + * Take care of the transport resource. Make a copy of the qp_state + * to prevent race conditions when we exit the lock. + */ + save_qp_state = ep_ptr->qp_state; + ep_ptr->qp_state = DAPL_QP_STATE_UNATTACHED; + dapl_os_unlock (&ep_ptr->header.lock); + + /* Free the QP. If the EP has never been used, the QP is invalid */ + if ( save_qp_state != DAPL_QP_STATE_UNATTACHED ) + { + dat_status = dapls_ib_qp_free (ia_ptr, ep_ptr); + /* This should always succeed, but report to the user if + * there is a problem. The qp_state must be restored so + * they can try it again in the face of EINTR or similar + * where the QP is OK but the call couldn't complete. + */ + if (dat_status != DAT_SUCCESS) + { + ep_ptr->qp_state = save_qp_state; + goto bail; + } + } + + /* Free the resource */ + dapl_ep_dealloc (ep_ptr); + +bail: + return dat_status; + +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_get_status.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_get_status.c new file mode 100644 index 00000000..217581a4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_get_status.c @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_get_status.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ring_buffer_util.h" + +/* + * dapl_ep_get_status + * + * DAPL Requirements Version xxx, 6.5.4 + * + * Provide the consumer with a quick snapshot of the Endpoint. + * The snapshot consists of Endpoint state and DTO information. + * + * Input: + * ep_handle + * + * Output: + * ep_state + * in_dto_idle + * out_dto_idle + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_ep_get_status ( + IN DAT_EP_HANDLE ep_handle, + OUT DAT_EP_STATE *ep_state, + OUT DAT_BOOLEAN *in_dto_idle, + OUT DAT_BOOLEAN *out_dto_idle) +{ + DAPL_EP *ep_ptr; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ep_get_status (%p, %p, %p, %p)\n", + ep_handle, + ep_state, + in_dto_idle, + out_dto_idle); + + ep_ptr = (DAPL_EP *) ep_handle; + dat_status = DAT_SUCCESS; + + /* + * Verify parameter & state + */ + if ( DAPL_BAD_HANDLE (ep_ptr, DAPL_MAGIC_EP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + + /* + * Gather state info for user + */ + if ( ep_state != NULL ) + { + *ep_state = ep_ptr->param.ep_state; + } + + if ( in_dto_idle != NULL ) + { + *in_dto_idle = (ep_ptr->recv_count) ? DAT_FALSE : DAT_TRUE; + } + + if ( out_dto_idle != NULL ) + { + *out_dto_idle = (ep_ptr->req_count) ? DAT_FALSE : DAT_TRUE; + } + + bail: + return dat_status; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_modify.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_modify.c new file mode 100644 index 00000000..6c0a84ed --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_modify.c @@ -0,0 +1,756 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_modify.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.0 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_cookie.h" +#include "dapl_ep_util.h" +#include "dapl_adapter_util.h" + +/* + * Internal prototypes + */ + +static _INLINE_ DAT_RETURN +dapli_ep_modify_validate_parameters ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_EP_PARAM_MASK ep_param_mask, + IN const DAT_EP_PARAM *ep_param, + OUT DAPL_IA **ia_ptr, + OUT DAPL_EP **ep_ptr, + OUT DAT_EP_ATTR *ep_attr_ptr ); + + +/* + * dapl_ep_modify + * + * DAPL Requirements Version xxx, 6.5.6 + * + * Provide the consumer parameters, including attributes and status of + * the Endpoint. + * + * Input: + * ep_handle + * ep_args_mask + * + * Output: + * ep_args + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * DAT_INVALID_ATTRIBUTE + * DAT_INVALID_STATE + */ +DAT_RETURN +dapl_ep_modify ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_EP_PARAM_MASK ep_param_mask, + IN const DAT_EP_PARAM *ep_param ) +{ + DAPL_IA *ia; + DAPL_EP *ep1, *ep2; + DAT_EP_ATTR ep_attr1, ep_attr2; + DAPL_EP new_ep, copy_of_old_ep; + DAPL_EP alloc_ep; /* Holder for resources. */ + DAPL_PZ *tmp_pz; + DAPL_EVD *tmp_evd; + DAT_RETURN dat_status; + + /* Flag indicating we've allocated a new one of these. */ + DAT_BOOLEAN qp_allocated = DAT_FALSE; + DAT_BOOLEAN rqst_cb_allocated = DAT_FALSE; + DAT_BOOLEAN recv_cb_allocated = DAT_FALSE; + DAT_BOOLEAN rqst_iov_allocated = DAT_FALSE; + DAT_BOOLEAN recv_iov_allocated = DAT_FALSE; + + /* Flag indicating we've used (assigned to QP) a new one of these. */ + DAT_BOOLEAN qp_used = DAT_FALSE; + DAT_BOOLEAN rqst_cb_used = DAT_FALSE; + DAT_BOOLEAN recv_cb_used = DAT_FALSE; + DAT_BOOLEAN rqst_iov_used = DAT_FALSE; + DAT_BOOLEAN recv_iov_used = DAT_FALSE; + + dapl_os_memzero ( (void*)&alloc_ep, sizeof(DAPL_EP) ); + dapl_os_memzero ( (void*)&new_ep, sizeof(DAPL_EP) ); + dapl_os_memzero ( (void*)©_of_old_ep, sizeof(DAPL_EP) ); + + dat_status = dapli_ep_modify_validate_parameters ( ep_handle, + ep_param_mask, + ep_param, + &ia, + &ep1, + &ep_attr1); + if ( DAT_SUCCESS != dat_status) + { + goto bail; + } + + /* + * Setup the alloc_ep with the appropriate parameters (primarily + * for allocating the QP. + */ + alloc_ep = *ep1; + alloc_ep.param.ep_attr = ep_attr1; + if ( ep_param_mask & DAT_EP_FIELD_PZ_HANDLE ) + { + alloc_ep.param.pz_handle = ep_param->pz_handle; + } + + if ( ep_param_mask & DAT_EP_FIELD_RECV_EVD_HANDLE ) + { + alloc_ep.param.recv_evd_handle = ep_param->recv_evd_handle; + } + + if ( ep_param_mask & DAT_EP_FIELD_REQUEST_EVD_HANDLE ) + { + alloc_ep.param.request_evd_handle = ep_param->request_evd_handle; + } + + if ( ep_param_mask & DAT_EP_FIELD_CONNECT_EVD_HANDLE ) + { + alloc_ep.param.connect_evd_handle = ep_param->connect_evd_handle; + } + + /* + * Allocate everything that might be needed. + * We allocate separately, and into a different "holding" + * ep, since we a) want the copy of the old ep into the new ep to + * be atomic with the assignment back (under lock), b) want the + * assignment of the allocated materials to be after the copy of the + * old ep into the new ep, and c) don't want the allocation done + * under lock. + */ + dat_status = dapls_cb_create ( + &alloc_ep.req_buffer, + ep1, /* For pointer in buffer bool. */ + ep_attr1.max_request_dtos ); + if ( DAT_SUCCESS != dat_status ) + { + goto bail; + } + rqst_cb_allocated = DAT_TRUE; + + dat_status = dapls_cb_create ( + &alloc_ep.recv_buffer, + ep1, /* For pointer in buffer bool. */ + ep_attr1.max_recv_dtos ); + if ( DAT_SUCCESS != dat_status ) + { + goto bail; + } + recv_cb_allocated = DAT_TRUE; + + alloc_ep.send_iov_num = ep_attr1.max_request_iov; + alloc_ep.send_iov = dapl_os_alloc (ep_attr1.max_request_iov + * sizeof (ib_data_segment_t)); + if ( alloc_ep.recv_iov == NULL ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + recv_iov_allocated = DAT_TRUE; + + alloc_ep.recv_iov_num = ep_attr1.max_recv_iov; + alloc_ep.recv_iov = dapl_os_alloc (ep_attr1.max_recv_iov + * sizeof (ib_data_segment_t)); + if ( alloc_ep.recv_iov == NULL ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + recv_iov_allocated = DAT_TRUE; + + dat_status = dapls_ib_qp_alloc ( ia, &alloc_ep, ep1 ); + if ( dat_status != DAT_SUCCESS ) + { + goto bail; + } + qp_allocated = DAT_TRUE; + + /* + * Now we atomically modify the EP, under lock + * There's a lot of work done here, but there should be no + * allocation or blocking. + */ + dapl_os_lock ( &ep1->header.lock ); + + /* + * Revalidate parameters; make sure that races haven't + * changed anything important. + */ + dat_status = dapli_ep_modify_validate_parameters ( ep_handle, + ep_param_mask, + ep_param, + &ia, + &ep2, + &ep_attr2 ); + if ( DAT_SUCCESS != dat_status ) + { + dapl_os_unlock ( &ep2->header.lock ); + goto bail; + } + + /* + * All of the following should be impossible, if validation + * occurred. But they're important to the logic of this routine, + * so we check. + */ + dapl_os_assert ( ep1 == ep2 ); + dapl_os_assert ( ep_attr2.max_recv_dtos == ep_attr1.max_recv_dtos ); + dapl_os_assert ( ep_attr2.max_request_dtos == ep_attr1.max_request_dtos ); + dapl_os_assert ( ep_attr2.max_recv_iov == ep_attr1.max_recv_iov ); + dapl_os_assert ( ep_attr2.max_request_iov == ep_attr1.max_request_iov ); + + copy_of_old_ep = *ep2; + + /* + * Setup new ep. + */ + new_ep = *ep2; + new_ep.param.ep_attr = ep_attr2; + + /* + * We can initialize the PZ and EVD handles from the alloc_ep because + * the only thing that could have changed since we setup the alloc_ep + * is stuff changed by dapl_cr_accept, and neither PZ nor EVD is in that + * list. + */ + new_ep.param.pz_handle = alloc_ep.param.pz_handle; + new_ep.param.recv_evd_handle = alloc_ep.param.recv_evd_handle; + new_ep.param.request_evd_handle = alloc_ep.param.request_evd_handle; + new_ep.param.connect_evd_handle = alloc_ep.param.connect_evd_handle; + + /* Deal with each of the allocation fields. */ + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_RECV_DTOS + && (ep_param->ep_attr.max_recv_dtos + != ep2->param.ep_attr.max_recv_dtos) ) + { + new_ep.recv_buffer = alloc_ep.recv_buffer; + recv_cb_used = DAT_TRUE; + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_DTOS + && (ep_param->ep_attr.max_request_dtos + != ep2->param.ep_attr.max_request_dtos) ) + { + new_ep.req_buffer = alloc_ep.req_buffer; + rqst_cb_used = DAT_TRUE; + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_RECV_IOV + && new_ep.recv_iov_num != ep2->recv_iov_num ) + { + new_ep.recv_iov = alloc_ep.recv_iov; + recv_iov_used = DAT_TRUE; + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_IOV + && new_ep.send_iov_num != ep2->send_iov_num ) + { + new_ep.send_iov = alloc_ep.send_iov; + rqst_iov_used = DAT_TRUE; + } + + /* + * We need to change the QP only if there already was a QP + * (leave things the way you found them!) and one of the + * following has changed: send/recv EVD, send/recv reqs/IOV max. + */ + if ( DAPL_QP_STATE_UNATTACHED != new_ep.qp_state + && (ep_param_mask + & (DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_IOV + | DAT_EP_FIELD_EP_ATTR_MAX_RECV_IOV + | DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_DTOS + | DAT_EP_FIELD_EP_ATTR_MAX_RECV_DTOS + | DAT_EP_FIELD_RECV_EVD_HANDLE + | DAT_EP_FIELD_REQUEST_EVD_HANDLE)) ) + { + /* + * We shouldn't be racing with connection establishment + * because the parameter validate routine should protect us, + * but it's an important enough point that we assert it. + */ + dapl_os_assert ( (ep2->param.ep_state + != DAT_EP_STATE_PASSIVE_CONNECTION_PENDING) + && (ep2->param.ep_state + != DAT_EP_STATE_ACTIVE_CONNECTION_PENDING) ); + + new_ep.qp_handle = alloc_ep.qp_handle; + new_ep.qpn = alloc_ep.qpn; + } + + /* + * The actual assignment, including modifying QP parameters. + * Modifying QP parameters needs to come first, as if it fails + * we need to exit. + */ + if ( DAPL_QP_STATE_UNATTACHED != new_ep.qp_state ) + { + dat_status = dapls_ib_qp_modify ( ia, ep2, &ep_attr2 ); + if ( dat_status != DAT_SUCCESS ) + { + dapl_os_unlock ( & ep2->header.lock ); + goto bail; + } + } + *ep2 = new_ep; + + dapl_os_unlock ( &ep2->header.lock ); + + /* + * Modify reference counts, incrementing new ones + * and then decrementing old ones (so if they're the same + * the refcount never drops to zero). + */ + tmp_pz = (DAPL_PZ *) new_ep.param.pz_handle; + if ( NULL != tmp_pz ) + { + dapl_os_atomic_inc (&tmp_pz->pz_ref_count); + } + + tmp_evd = (DAPL_EVD *) new_ep.param.recv_evd_handle; + if ( NULL != tmp_evd ) + { + dapl_os_atomic_inc (&tmp_evd->evd_ref_count); + } + + tmp_evd = (DAPL_EVD *) new_ep.param.request_evd_handle; + if ( NULL != tmp_evd ) + { + dapl_os_atomic_inc (&tmp_evd->evd_ref_count); + } + + tmp_evd = (DAPL_EVD *) new_ep.param.connect_evd_handle; + if ( NULL != tmp_evd ) + { + dapl_os_atomic_inc (&tmp_evd->evd_ref_count); + } + + /* decreament the old reference counts */ + tmp_pz = (DAPL_PZ *) copy_of_old_ep.param.pz_handle; + if ( NULL != tmp_pz ) + { + dapl_os_atomic_dec (&tmp_pz->pz_ref_count); + } + + tmp_evd = (DAPL_EVD *) copy_of_old_ep.param.recv_evd_handle; + if ( NULL != tmp_evd ) + { + dapl_os_atomic_dec (&tmp_evd->evd_ref_count); + } + + tmp_evd = (DAPL_EVD *) copy_of_old_ep.param.request_evd_handle; + if ( NULL != tmp_evd ) + { + dapl_os_atomic_dec (&tmp_evd->evd_ref_count); + } + + tmp_evd = (DAPL_EVD *) copy_of_old_ep.param.connect_evd_handle; + if ( NULL != tmp_evd ) + { + dapl_os_atomic_dec (&tmp_evd->evd_ref_count); + } + +bail: + if ( qp_allocated ) + { + DAT_RETURN local_dat_status; + if ( dat_status != DAT_SUCCESS || !qp_used ) + { + local_dat_status = dapls_ib_qp_free (ia, &alloc_ep ); + } + else + { + local_dat_status = dapls_ib_qp_free (ia, ©_of_old_ep ); + } + if (local_dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ep_modify: Failed to free QP; status %x\n", + local_dat_status); + } + } + + if ( rqst_cb_allocated ) + { + if ( dat_status != DAT_SUCCESS || !rqst_cb_used ) + { + dapls_cb_free ( &alloc_ep.req_buffer ); + } + else + { + dapls_cb_free ( ©_of_old_ep.req_buffer ); + } + } + + if ( recv_cb_allocated ) + { + if ( dat_status != DAT_SUCCESS || !recv_cb_used ) + { + dapls_cb_free ( &alloc_ep.recv_buffer ); + } + else + { + dapls_cb_free ( ©_of_old_ep.recv_buffer ); + } + } + + if ( rqst_iov_allocated ) + { + if ( dat_status != DAT_SUCCESS || !rqst_iov_used ) + { + dapl_os_free ( alloc_ep.send_iov, + (alloc_ep.send_iov_num + * sizeof (ib_data_segment_t))); + } + else + { + dapl_os_free ( copy_of_old_ep.send_iov, + (copy_of_old_ep.send_iov_num + * sizeof (ib_data_segment_t))); + } + } + + if ( recv_iov_allocated ) + { + if ( dat_status != DAT_SUCCESS || !recv_iov_used ) + { + dapl_os_free ( alloc_ep.recv_iov, + (alloc_ep.recv_iov_num + * sizeof (ib_data_segment_t))); + } + else + { + dapl_os_free ( copy_of_old_ep.recv_iov, + (copy_of_old_ep.recv_iov_num + * sizeof (ib_data_segment_t))); + } + } + return dat_status; +} + + +/* + * dapli_ep_modify_validate_parameters + * + * Validate parameters + * + * The space for the ep_attr_ptr parameter should be allocated by the + * consumer. Upon success, this parameter will contain the current ep + * attribute values with the requested modifications made. + * + */ + +static DAT_RETURN +dapli_ep_modify_validate_parameters ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_EP_PARAM_MASK ep_param_mask, + IN const DAT_EP_PARAM *ep_param, + OUT DAPL_IA **ia_ptr, + OUT DAPL_EP **ep_ptr, + OUT DAT_EP_ATTR *ep_attr_ptr ) +{ + DAPL_IA *ia; + DAPL_EP *ep; + DAT_EP_ATTR ep_attr; + DAT_EP_ATTR ep_attr_limit; + DAT_EP_ATTR ep_attr_request; + DAT_RETURN dat_status; + + *ia_ptr = NULL; + *ep_ptr = NULL; + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (ep_handle, DAPL_MAGIC_EP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); + goto bail; + } + + ep = (DAPL_EP *) ep_handle; + ia = ep->header.owner_ia; + + /* + * Verify parameters valid in current EP state + */ + if ( ep_param_mask & (DAT_EP_FIELD_IA_HANDLE | + DAT_EP_FIELD_EP_STATE | + DAT_EP_FIELD_LOCAL_IA_ADDRESS_PTR | + DAT_EP_FIELD_LOCAL_PORT_QUAL | + DAT_EP_FIELD_REMOTE_IA_ADDRESS_PTR | + DAT_EP_FIELD_REMOTE_PORT_QUAL) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + goto bail; + } + + /* + * Can only change the PZ handle if we are UNCONNECTED or + * TENTATIVE_CONNECTION_PENDING (psp PROVIDER allocated EP) + */ + if ( (ep_param_mask & DAT_EP_FIELD_PZ_HANDLE) && + ( ep->param.ep_state != DAT_EP_STATE_UNCONNECTED && + ep->param.ep_state != DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, dapls_ep_state_subtype (ep)); + goto bail; + } + + if ( (ep_param_mask & (DAT_EP_FIELD_RECV_EVD_HANDLE | + DAT_EP_FIELD_REQUEST_EVD_HANDLE | + DAT_EP_FIELD_CONNECT_EVD_HANDLE | + DAT_EP_FIELD_EP_ATTR_SERVICE_TYPE | + DAT_EP_FIELD_EP_ATTR_MAX_MESSAGE_SIZE | + DAT_EP_FIELD_EP_ATTR_MAX_RDMA_SIZE | + DAT_EP_FIELD_EP_ATTR_QOS | + DAT_EP_FIELD_EP_ATTR_REQUEST_COMPLETION_FLAGS | + DAT_EP_FIELD_EP_ATTR_RECV_COMPLETION_FLAGS | + DAT_EP_FIELD_EP_ATTR_MAX_RECV_DTOS | + DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_DTOS | + DAT_EP_FIELD_EP_ATTR_MAX_RECV_IOV | + DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_IOV )) && + ( ep->param.ep_state != DAT_EP_STATE_UNCONNECTED && + ep->param.ep_state != DAT_EP_STATE_RESERVED && + ep->param.ep_state != DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, dapls_ep_state_subtype (ep)); + goto bail; + } + + /* + * Validate handles being modified + */ + if ( ep_param_mask & DAT_EP_FIELD_PZ_HANDLE ) + { + if (ep_param->pz_handle != NULL && + DAPL_BAD_HANDLE (ep_param->pz_handle, DAPL_MAGIC_PZ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + } + + if ( ep_param_mask & DAT_EP_FIELD_RECV_EVD_HANDLE ) + { + if (ep_param->recv_evd_handle != NULL && + (DAPL_BAD_HANDLE (ep_param->recv_evd_handle, DAPL_MAGIC_EVD) || + ! ((DAPL_EVD *)ep_param->recv_evd_handle)->evd_flags & DAT_EVD_DTO_FLAG)) + + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + } + + if ( ep_param_mask & DAT_EP_FIELD_REQUEST_EVD_HANDLE ) + { + if (ep_param->request_evd_handle != NULL && + DAPL_BAD_HANDLE (ep_param->request_evd_handle, DAPL_MAGIC_EVD)) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + } + + if ( ep_param_mask & DAT_EP_FIELD_CONNECT_EVD_HANDLE ) + { + if (ep_param->connect_evd_handle != NULL && + DAPL_BAD_HANDLE (ep_param->connect_evd_handle, DAPL_MAGIC_EVD) && + ! (((DAPL_EVD *)ep_param->connect_evd_handle)->evd_flags & DAT_EVD_CONNECTION_FLAG) ) + + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + } + + /* + * Validate the attributes against the HCA limits + */ + ep_attr = ep->param.ep_attr; + + dapl_os_memzero (&ep_attr_limit, sizeof (DAT_EP_ATTR)); + dat_status = dapls_ib_query_hca (ia->hca_ptr, NULL, &ep_attr_limit, NULL); + if ( dat_status != DAT_SUCCESS ) + { + goto bail; + } + + ep_attr_request = ep_param->ep_attr; + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_SERVICE_TYPE ) + { + if ( ep_attr_request.service_type != DAT_SERVICE_TYPE_RC ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_MESSAGE_SIZE ) + { + if ( ep_attr_request.max_mtu_size > ep_attr_limit.max_mtu_size) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + else + { + ep_attr.max_mtu_size = ep_attr_request.max_mtu_size; + } + } + + /* Do nothing if the DAT_EP_FIELD_EP_ATTR_MAX_RDMA_SIZE flag is */ + /* set. Each RDMA transport/provider may or may not have a limit */ + /* on the size of an RDMA DTO. For InfiniBand, this parameter is */ + /* validated in the implementation of the dapls_ib_qp_modify() */ + /* function. */ + /* */ + /* if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_RDMA_SIZE ) */ + /* { */ + /* */ + /* } */ + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_QOS ) + { + /* Do nothing, not defined in the spec yet */ + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_RECV_COMPLETION_FLAGS ) + { + dat_status = dapl_ep_check_recv_completion_flags ( + ep_attr_request.recv_completion_flags); + if ( dat_status != DAT_SUCCESS ) + + { + goto bail; + } + else + { + ep_attr.recv_completion_flags = + ep_attr_request.recv_completion_flags; + } + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_REQUEST_COMPLETION_FLAGS ) + { + dat_status = dapl_ep_check_request_completion_flags ( + ep_attr_request.request_completion_flags); + if ( dat_status != DAT_SUCCESS ) + { + goto bail; + } + else + { + ep_attr.request_completion_flags = + ep_attr_request.request_completion_flags; + } + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_RECV_DTOS ) + { + if ( ep_attr_request.max_recv_dtos > ep_attr_limit.max_recv_dtos || + ( ep_param->recv_evd_handle == DAT_HANDLE_NULL && + ( ep_attr_request.max_recv_dtos > 0 ) ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + else + { + ep_attr.max_recv_dtos = ep_attr_request.max_recv_dtos; + } + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_DTOS ) + { + if ( ep_attr_request.max_request_dtos > ep_attr_limit.max_request_dtos || + ( ep_param->request_evd_handle == DAT_HANDLE_NULL && + ( ep_attr_request.max_request_dtos > 0 ) ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + else + { + ep_attr.max_request_dtos = ep_attr_request.max_request_dtos; + } + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_RECV_IOV ) + { + if ( ep_attr_request.max_recv_iov > ep_attr_limit.max_recv_iov) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + else + { + ep_attr.max_recv_iov = ep_attr_request.max_recv_iov; + } + } + + if ( ep_param_mask & DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_IOV ) + { + if ( ep_attr_request.max_request_iov > ep_attr_limit.max_request_iov) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + else + { + ep_attr.max_request_iov = ep_attr_request.max_request_iov; + } + } + + *ia_ptr = ia; + *ep_ptr = ep; + *ep_attr_ptr = ep_attr; + +bail: + return dat_status; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_rdma_read.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_rdma_read.c new file mode 100644 index 00000000..8d5b8d20 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_rdma_read.c @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_post_rdma_read.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl_ep_util.h" + +/* + * dapl_ep_post_rdma_read + * + * DAPL Requirements Version xxx, 6.5.12 + * + * Request the xfer of all data specified by the remote_iov over the + * connection of ep handle Endpint into the local_iov + * + * Input: + * ep_handle + * num_segments + * local_iov + * user_cookie + * remote_iov + * completion_flags + * + * Output: + * None. + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + * DAT_LENGTH_ERROR + * DAT_PROTECTION_VIOLATION + * DAT_PRIVILEGES_VIOLATION + */ +DAT_RETURN +dapl_ep_post_rdma_read ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_COUNT num_segments, + IN DAT_LMR_TRIPLET *local_iov, + IN DAT_DTO_COOKIE user_cookie, + IN const DAT_RMR_TRIPLET *remote_iov, + IN DAT_COMPLETION_FLAGS completion_flags) +{ + + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ep_post_rdma_read (%p, %d, %p, %p, %p, %x)\n", + ep_handle, + num_segments, + local_iov, + user_cookie.as_64, + remote_iov, + completion_flags); + DAPL_CNTR(DCNT_POST_RDMA_READ); + + dat_status = dapl_ep_post_send_req(ep_handle, + num_segments, + local_iov, + user_cookie, + remote_iov, + completion_flags, + DAPL_DTO_TYPE_RDMA_READ, + OP_RDMA_READ); + + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_ep_post_rdma_read () returns 0x%x", + dat_status); + + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_rdma_write.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_rdma_write.c new file mode 100644 index 00000000..40e09ff7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_rdma_write.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_post_rdma_write.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl_ep_util.h" + +/* + * dapl_ep_post_rdma_write + * + * DAPL Requirements Version xxx, 6.5.13 + * + * Request the xfer of all data specified by the local_iov over the + * connection of ep handle Endpint into the remote_iov + * + * Input: + * ep_handle + * num_segments + * local_iov + * user_cookie + * remote_iov + * compltion_flags + * + * Output: + * None. + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + * DAT_LENGTH_ERROR + * DAT_PROTECTION_VIOLATION + * DAT_PRIVILEGES_VIOLATION + */ +DAT_RETURN +dapl_ep_post_rdma_write ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_COUNT num_segments, + IN DAT_LMR_TRIPLET *local_iov, + IN DAT_DTO_COOKIE user_cookie, + IN const DAT_RMR_TRIPLET *remote_iov, + IN DAT_COMPLETION_FLAGS completion_flags ) +{ + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ep_post_rdma_write (%p, %d, %p, %#I64x, %p, %x)\n", + ep_handle, + num_segments, + local_iov, + user_cookie.as_64, + remote_iov, + completion_flags); + DAPL_CNTR(DCNT_POST_RDMA_WRITE); + + dat_status = dapl_ep_post_send_req(ep_handle, + num_segments, + local_iov, + user_cookie, + remote_iov, + completion_flags, + DAPL_DTO_TYPE_RDMA_WRITE, + OP_RDMA_WRITE); + + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_ep_post_rdma_write () returns 0x%x", + dat_status); + + + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_recv.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_recv.c new file mode 100644 index 00000000..b0bb7948 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_recv.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_post_recv.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_cookie.h" +#include "dapl_adapter_util.h" + +/* + * dapl_ep_post_recv + * + * DAPL Requirements Version xxx, 6.5.11 + * + * Request to receive data over the connection of ep handle into + * local_iov + * + * Input: + * ep_handle + * num_segments + * local_iov + * user_cookie + * completion_flags + * + * Output: + * None. + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + * DAT_PROTECTION_VIOLATION + * DAT_PROVILEGES_VIOLATION + */ +DAT_RETURN +dapl_ep_post_recv ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_COUNT num_segments, + IN DAT_LMR_TRIPLET *local_iov, + IN DAT_DTO_COOKIE user_cookie, + IN DAT_COMPLETION_FLAGS completion_flags ) +{ + DAPL_EP *ep_ptr; + DAPL_COOKIE *cookie; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ep_post_recv (%p, %d, %p, %P, %x)\n", + ep_handle, + num_segments, + local_iov, + user_cookie.as_64, + completion_flags); + DAPL_CNTR (DCNT_POST_RECV); + + if ( DAPL_BAD_HANDLE (ep_handle, DAPL_MAGIC_EP) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); + goto bail; + } + + ep_ptr = (DAPL_EP *) ep_handle; + + /* + * Synchronization ok since this buffer is only used for receive + * requests, which aren't allowed to race with each other. + */ + dat_status = dapls_dto_cookie_alloc (&ep_ptr->recv_buffer, + DAPL_DTO_TYPE_RECV, + user_cookie, + &cookie); + if ( DAT_SUCCESS != dat_status) + { + goto bail; + } + + /* + * Take reference before posting to avoid race conditions with + * completions + */ + dapl_os_atomic_inc (&ep_ptr->recv_count); + + /* + * Invoke provider specific routine to post DTO + */ + dat_status = dapls_ib_post_recv (ep_ptr, cookie, num_segments, local_iov); + + if ( dat_status != DAT_SUCCESS ) + { + dapl_os_atomic_dec (&ep_ptr->recv_count); + dapls_cookie_dealloc (&ep_ptr->recv_buffer, cookie); + } + +bail: + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_ep_post_recv () returns 0x%x\n", + dat_status); + + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_send.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_send.c new file mode 100644 index 00000000..31074f9f --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_post_send.c @@ -0,0 +1,100 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_post_send.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl_ep_util.h" + +/* + * dapl_ep_post_send + * + * DAPL Requirements Version xxx, 6.5.10 + * + * Request a transfer of all the data from the local_iov over + * the connection of the ep handle Endpoint to the remote side. + * + * Input: + * ep_handle + * num_segments + * local_iov + * user_cookie + * completion_flags + * + * Output: + * None + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + * DAT_PROTECTION_VIOLATION + * DAT_PRIVILEGES_VIOLATION + */ +DAT_RETURN +dapl_ep_post_send ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_COUNT num_segments, + IN DAT_LMR_TRIPLET *local_iov, + IN DAT_DTO_COOKIE user_cookie, + IN DAT_COMPLETION_FLAGS completion_flags ) +{ + DAT_RMR_TRIPLET remote_iov = {0,0,0,0}; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ep_post_send (%p, %d, %p, %I64x, %x)\n", + ep_handle, + num_segments, + local_iov, + user_cookie.as_64, + completion_flags); + DAPL_CNTR(DCNT_POST_SEND); + + dat_status = dapl_ep_post_send_req(ep_handle, + num_segments, + local_iov, + user_cookie, + &remote_iov, + completion_flags, + DAPL_DTO_TYPE_SEND, + OP_SEND); + + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_ep_post_send () returns 0x%x\n", + dat_status); + + + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_query.c new file mode 100644 index 00000000..77a80103 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_query.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_query.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" + +/* + * dapl_ep_query + * + * DAPL Requirements Version xxx, 6.5.5 + * + * Provide the consumer parameters, including attributes and status of + * the Endpoint. + * + * Input: + * ep_handle + * ep_param_mask + * + * Output: + * ep_param + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_ep_query ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_EP_PARAM_MASK ep_param_mask, + OUT DAT_EP_PARAM *ep_param ) +{ + DAPL_EP *ep_ptr; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ep_query (%p, %x, %p)\n", + ep_handle, + ep_param_mask, + ep_param); + + dat_status = DAT_SUCCESS; + ep_ptr = (DAPL_EP *) ep_handle; + + /* + * Verify parameter & state + */ + if ( DAPL_BAD_HANDLE(ep_ptr, DAPL_MAGIC_EP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + + if ( ep_param == NULL) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + /* + * Fill in according to user request + * + * N.B. Just slam all values into the user structure, there + * is nothing to be gained by checking for each bit. + */ + if ( ep_param_mask & DAT_EP_FIELD_ALL ) + { + if ( ep_ptr->param.ep_state == DAT_EP_STATE_CONNECTED ) + { + /* obtain the remote IP address */ + dat_status = dapls_ib_cm_remote_addr ((DAT_HANDLE)ep_handle, + NULL, + &ep_ptr->remote_ia_address ); + } + ep_ptr->param.remote_ia_address_ptr = + (DAT_IA_ADDRESS_PTR) &ep_ptr->remote_ia_address; + *ep_param = ep_ptr->param; + } + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_reset.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_reset.c new file mode 100644 index 00000000..31bb1103 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_reset.c @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_reset.c + * + * PURPOSE: Endpoint management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 5.13 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ia_util.h" +#include "dapl_ep_util.h" +#include "dapl_adapter_util.h" +#include "dapl_ring_buffer_util.h" + +/* + * dapl_ep_reset + * + * DAPL Requirements Version 1.1, 6.5.13 + * + * Reset the QP attached to this Endpoint, transitioning back to the + * INIT state + * + * Input: + * ep_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + */ +DAT_RETURN +dapl_ep_reset ( + IN DAT_EP_HANDLE ep_handle) +{ + DAPL_EP *ep_ptr; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + + ep_ptr = (DAPL_EP *) ep_handle; + + /* + * Verify parameter & state + */ + if ( DAPL_BAD_HANDLE (ep_ptr, DAPL_MAGIC_EP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + + if ( ep_ptr->param.ep_state != DAT_EP_STATE_UNCONNECTED + && ep_ptr->param.ep_state != DAT_EP_STATE_DISCONNECTED ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,dapls_ep_state_subtype (ep_ptr)); + goto bail; + } + + if ( ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED ) + { + dat_status = dapls_ib_reinit_ep ( ep_ptr ); + ep_ptr->param.ep_state = DAT_EP_STATE_UNCONNECTED; + } + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_util.c new file mode 100644 index 00000000..0ef2ff50 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_util.c @@ -0,0 +1,559 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ep_util.c + * + * PURPOSE: Manage EP Info structure + * + * $Id$ + **********************************************************************/ + +#include "dapl_ep_util.h" +#include "dapl_ring_buffer_util.h" +#include "dapl_cookie.h" +#include "dapl_adapter_util.h" +#include "dapl_evd_util.h" + +/* + * Local definitions + */ +/* + * Default number of I/O operations on an end point + */ +#define IB_IO_DEFAULT 16 + +/* + * Default number of scatter/gather entries available to a single + * post send/recv + */ +#define IB_IOV_DEFAULT 4 + +/* + * Default number of RDMA operations in progress at a time + */ +#define IB_RDMA_DEFAULT 4 + +extern void dapli_ep_default_attrs ( + IN DAPL_EP *ep_ptr ); + + +/* + * dapl_ep_alloc + * + * alloc and initialize an EP INFO struct + * + * Input: + * IA INFO struct ptr + * + * Output: + * ep_ptr + * + * Returns: + * none + * + */ +DAPL_EP * +dapl_ep_alloc ( + IN DAPL_IA *ia_ptr, + IN const DAT_EP_ATTR *ep_attr ) +{ + DAPL_EP *ep_ptr; + + /* Allocate EP */ + ep_ptr = (DAPL_EP *)dapl_os_alloc (sizeof (DAPL_EP) + sizeof (DAT_SOCK_ADDR)); + if ( ep_ptr == NULL ) + { + goto bail; + } + + /* zero the structure */ + dapl_os_memzero (ep_ptr, sizeof (DAPL_EP)+ sizeof (DAT_SOCK_ADDR)); + + /* + * initialize the header + */ + ep_ptr->header.provider = ia_ptr->header.provider; + ep_ptr->header.magic = DAPL_MAGIC_EP; + ep_ptr->header.handle_type = DAT_HANDLE_TYPE_EP; + ep_ptr->header.owner_ia = ia_ptr; + ep_ptr->header.user_context.as_64 = 0; + ep_ptr->header.user_context.as_ptr = NULL; + + dapl_llist_init_entry (&ep_ptr->header.ia_list_entry); + dapl_os_lock_init (&ep_ptr->header.lock); + + /* + * Initialize the body + */ + dapl_os_memzero (&ep_ptr->param, sizeof (DAT_EP_PARAM)); + ep_ptr->param.ep_state = DAT_EP_STATE_UNCONNECTED; + ep_ptr->param.local_ia_address_ptr = + (DAT_IA_ADDRESS_PTR)&ia_ptr->hca_ptr->hca_address; + /* Set the remote address pointer to the end of the EP struct */ + ep_ptr->param.remote_ia_address_ptr = (DAT_IA_ADDRESS_PTR)(ep_ptr + 1); + + /* + * Set up default parameters if the user passed in a NULL + */ + if ( ep_attr == NULL ) + { + dapli_ep_default_attrs (ep_ptr); + } + else + { + ep_ptr->param.ep_attr = *ep_attr; + } + + /* + * IBM OS API specific fields + */ + ep_ptr->qp_handle = IB_INVALID_HANDLE; + ep_ptr->qpn = 0; + ep_ptr->qp_state = DAPL_QP_STATE_UNATTACHED; + cl_memclr( &ep_ptr->cm_handle, sizeof(ib_cm_handle_t) ); + ep_ptr->cm_handle.cid = 0xFFFFFFFF; + + ep_ptr->req_count = 0; + ep_ptr->recv_count = 0; + + ep_ptr->recv_discreq = DAT_FALSE; + ep_ptr->sent_discreq = DAT_FALSE; + + /* allocate viol event queue size = max_recv_dtos / 2 */ + if (DAT_SUCCESS != dapls_rbuf_alloc ( + &ep_ptr->viol_event_queue, + ep_ptr->param.ep_attr.max_recv_dtos / 2) ) + { + dapl_ep_dealloc (ep_ptr); + ep_ptr = NULL; + goto bail; + } + ep_ptr->viol_order = DAT_FALSE; + + if ( DAT_SUCCESS != dapls_cb_create ( + &ep_ptr->req_buffer, + ep_ptr, + ep_ptr->param.ep_attr.max_request_dtos) ) + { + dapl_ep_dealloc (ep_ptr); + ep_ptr = NULL; + goto bail; + } + + if ( DAT_SUCCESS != dapls_cb_create ( + &ep_ptr->recv_buffer, + ep_ptr, + ep_ptr->param.ep_attr.max_recv_dtos) ) + { + dapl_ep_dealloc (ep_ptr); + ep_ptr = NULL; + goto bail; + } + + ep_ptr->recv_iov_num = ep_ptr->param.ep_attr.max_recv_iov; + ep_ptr->send_iov_num = ep_ptr->param.ep_attr.max_request_iov; + + ep_ptr->recv_iov = dapl_os_alloc ( + ep_ptr->recv_iov_num * sizeof (ib_data_segment_t)); + + if ( NULL == ep_ptr->recv_iov ) + { + dapl_ep_dealloc (ep_ptr); + ep_ptr = NULL; + goto bail; + } + + ep_ptr->send_iov = dapl_os_alloc ( + ep_ptr->send_iov_num * sizeof (ib_data_segment_t)); + + if ( NULL == ep_ptr->send_iov ) + { + dapl_ep_dealloc (ep_ptr); + ep_ptr = NULL; + goto bail; + } + + dapls_io_trc_alloc (ep_ptr); + +bail: + return ep_ptr; +} + + +/* + * dapl_ep_dealloc + * + * Free the passed in EP structure. + * + * Input: + * entry point pointer + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ep_dealloc ( + IN DAPL_EP *ep_ptr ) +{ + dapl_os_assert (ep_ptr->header.magic == DAPL_MAGIC_EP); + + ep_ptr->header.magic = DAPL_MAGIC_INVALID; /* reset magic to prevent reuse */ + + dapls_cb_free ( &ep_ptr->req_buffer ); + dapls_cb_free ( &ep_ptr->recv_buffer ); + + if ( NULL != ep_ptr->recv_iov ) + { + dapl_os_free (ep_ptr->recv_iov, ep_ptr->recv_iov_num * sizeof (ib_data_segment_t)); + } + + if ( NULL != ep_ptr->send_iov ) + { + dapl_os_free (ep_ptr->send_iov, ep_ptr->send_iov_num * sizeof (ib_data_segment_t)); + } + + if ( NULL != ep_ptr->cxn_timer ) + { + dapl_os_free ( ep_ptr->cxn_timer, sizeof ( DAPL_OS_TIMER ) ); + } + + dapl_os_free (ep_ptr, sizeof (DAPL_EP) + sizeof (DAT_SOCK_ADDR) ); +} + + +/* + * dapl_ep_default_attrs + * + * Set default values in the parameter fields + * + * Input: + * entry point pointer + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapli_ep_default_attrs ( + IN DAPL_EP *ep_ptr ) +{ + DAT_EP_ATTR *ep_attr; + + ep_attr = &ep_ptr->param.ep_attr; + /* Set up defaults */ + dapl_os_memzero (ep_attr, sizeof (DAT_EP_ATTR)); + + /* mtu and rdma sizes fixed in IB as per IBTA 1.1, 9.4.3, 9.4.4, 9.7.7. */ + ep_attr->max_mtu_size = 0x80000000; + ep_attr->max_rdma_size = 0x80000000; + + ep_attr->qos = DAT_QOS_BEST_EFFORT; + ep_attr->service_type = DAT_SERVICE_TYPE_RC; + ep_attr->max_recv_dtos = IB_IO_DEFAULT; + ep_attr->max_request_dtos = IB_IO_DEFAULT; + ep_attr->max_recv_iov = IB_IOV_DEFAULT; + ep_attr->max_request_iov = IB_IOV_DEFAULT; + ep_attr->max_rdma_read_in = IB_RDMA_DEFAULT; + ep_attr->max_rdma_read_out= IB_RDMA_DEFAULT; + + /* + * Configure the EP as a standard completion type, which will be + * used by the EVDs. A threshold of 1 is the default state of an + * EVD. + */ + ep_attr->request_completion_flags = DAT_COMPLETION_EVD_THRESHOLD_FLAG; + ep_attr->recv_completion_flags = DAT_COMPLETION_EVD_THRESHOLD_FLAG; + /* + * Unspecified defaults: + * - ep_privileges: No RDMA capabilities + * - num_transport_specific_params: none + * - transport_specific_params: none + * - num_provider_specific_params: 0 + * - provider_specific_params: 0 + */ + + return; +} + + +DAT_RETURN +dapl_ep_check_recv_completion_flags ( + DAT_COMPLETION_FLAGS flags ) +{ + + /* + * InfiniBand will not allow signal suppression for RECV completions, + * see the 1.0.1 spec section 10.7.3.1, 10.8.6. + * N.B. SIGNALLED has a different meaning in dapl than it does + * in IB; IB SIGNALLED is the same as DAPL SUPPRESS. DAPL + * SIGNALLED simply means the user will not get awakened when + * an EVD completes, even though the dapl handler is invoked. + */ + + if (flags & DAT_COMPLETION_SUPPRESS_FLAG) + { + return DAT_INVALID_PARAMETER; + } + + return DAT_SUCCESS; +} + +DAT_RETURN +dapl_ep_check_request_completion_flags ( + DAT_COMPLETION_FLAGS flags ) +{ + UNREFERENCED_PARAMETER(flags); + return DAT_SUCCESS; +} + + +DAT_RETURN +dapl_ep_post_send_req ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_COUNT num_segments, + IN DAT_LMR_TRIPLET *local_iov, + IN DAT_DTO_COOKIE user_cookie, + IN const DAT_RMR_TRIPLET *remote_iov, + IN DAT_COMPLETION_FLAGS completion_flags, + IN DAPL_DTO_TYPE dto_type, + IN ib_send_op_type_t op_type) +{ + DAPL_EP *ep_ptr; + DAPL_COOKIE *cookie = NULL; + DAT_RETURN dat_status; + + if ( DAPL_BAD_HANDLE (ep_handle, DAPL_MAGIC_EP) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + + ep_ptr = (DAPL_EP *) ep_handle; + + /* + * Synchronization ok since this buffer is only used for send + * requests, which aren't allowed to race with each other. + * Only if completion is expected + */ + if (!(DAT_COMPLETION_SUPPRESS_FLAG & completion_flags)) + { + dat_status = dapls_dto_cookie_alloc ( + &ep_ptr->req_buffer, + dto_type, + user_cookie, + &cookie ); + if ( dat_status != DAT_SUCCESS ) + { + goto bail; + } + dapl_os_atomic_inc (&ep_ptr->req_count); + } + + /* + * Invoke provider specific routine to post DTO + */ + dat_status = dapls_ib_post_send ( ep_ptr, + op_type, + cookie, + num_segments, + local_iov, + remote_iov, + completion_flags ); + + if ( dat_status != DAT_SUCCESS ) + { + if ( cookie != NULL ) + { + dapls_cookie_dealloc (&ep_ptr->req_buffer, cookie); + dapl_os_atomic_dec (&ep_ptr->req_count); + } + } + + bail: + return dat_status; +} + + +/* + * dapli_ep_timeout + * + * If this routine is invoked before a connection occurs, generate an + * event + */ +void +dapls_ep_timeout ( + void *arg ) +{ + DAPL_EP *ep_ptr; + ib_cm_events_t ib_cm_event; + + dapl_dbg_log (DAPL_DBG_TYPE_CM, "--> dapls_ep_timeout! ep %lx\n", arg); + + ep_ptr = (DAPL_EP *)arg; + + /* reset the EP state */ + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + + /* Clean up the EP and put the underlying QP into the ERROR state. + * The disconnect_clean interface requires the provided dependent + *cm event number. + */ + ib_cm_event = dapls_ib_get_cm_event (DAT_CONNECTION_EVENT_DISCONNECTED); + dapls_ib_disconnect_clean ( ep_ptr, + DAT_TRUE, + ib_cm_event ); + + (void) dapls_evd_post_connection_event ( + (DAPL_EVD *)ep_ptr->param.connect_evd_handle, + DAT_CONNECTION_EVENT_TIMED_OUT, + (DAT_HANDLE) ep_ptr, + 0, + 0); +} + + +/* + * dapls_ep_state_subtype + * + * Return the INVALID_STATE connection subtype associated with an + * INVALID_STATE on an EP. Strictly for error reporting. + */ +DAT_RETURN_SUBTYPE +dapls_ep_state_subtype( + IN DAPL_EP *ep_ptr ) +{ + DAT_RETURN_SUBTYPE dat_status; + + switch ( ep_ptr->param.ep_state ) + { + case DAT_EP_STATE_UNCONNECTED: + { + dat_status = DAT_INVALID_STATE_EP_UNCONNECTED; + break; + } + case DAT_EP_STATE_RESERVED: + { + dat_status = DAT_INVALID_STATE_EP_RESERVED; + break; + } + case DAT_EP_STATE_PASSIVE_CONNECTION_PENDING: + { + dat_status = DAT_INVALID_STATE_EP_PASSCONNPENDING; + break; + } + case DAT_EP_STATE_ACTIVE_CONNECTION_PENDING: + { + dat_status = DAT_INVALID_STATE_EP_ACTCONNPENDING; + break; + } + case DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING: + { + dat_status = DAT_INVALID_STATE_EP_TENTCONNPENDING; + break; + } + case DAT_EP_STATE_CONNECTED: + { + dat_status = DAT_INVALID_STATE_EP_CONNECTED; + break; + } + case DAT_EP_STATE_DISCONNECT_PENDING: + { + dat_status = DAT_INVALID_STATE_EP_DISCPENDING; + break; + } + case DAT_EP_STATE_DISCONNECTED: + { + dat_status = DAT_INVALID_STATE_EP_DISCONNECTED; + break; + } + case DAT_EP_STATE_COMPLETION_PENDING: + { + dat_status = DAT_INVALID_STATE_EP_COMPLPENDING; + break; + } + + default: + { + dat_status = 0; + break; + } + } + + return dat_status; +} + +#ifdef DAPL_DBG_IO_TRC +/* allocate trace buffer */ +void +dapls_io_trc_alloc ( + DAPL_EP *ep_ptr) +{ + DAT_RETURN dat_status; + int i; + struct io_buf_track *ibt; + + ep_ptr->ibt_dumped = 0; /* bool to control how often we print */ + dat_status = dapls_rbuf_alloc (&ep_ptr->ibt_queue, DBG_IO_TRC_QLEN); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + ibt = (struct io_buf_track *)dapl_os_alloc (sizeof (struct io_buf_track) * DBG_IO_TRC_QLEN); + + if (dat_status != DAT_SUCCESS) + { + dapls_rbuf_destroy (&ep_ptr->ibt_queue); + goto bail; + } + ep_ptr->ibt_base = ibt; + dapl_os_memzero (ibt, sizeof (struct io_buf_track) * DBG_IO_TRC_QLEN); + + /* add events to free event queue */ + for (i = 0; i < DBG_IO_TRC_QLEN; i++) + { + dapls_rbuf_add (&ep_ptr->ibt_queue, ibt++); + } +bail: + return; +} +#endif /* DAPL_DBG_IO_TRC */ + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_util.h new file mode 100644 index 00000000..a1675221 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ep_util.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_ep_util.h + * + * PURPOSE: Utility defs & routines for the EP data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_EP_UTIL_H_ +#define _DAPL_EP_UTIL_H_ + +#include "dapl.h" +#include "dapl_adapter_util.h" + +/* function prototypes */ + +extern DAPL_EP * +dapl_ep_alloc ( + IN DAPL_IA *ia, + IN const DAT_EP_ATTR *ep_attr ); + +extern void +dapl_ep_dealloc ( + IN DAPL_EP *ep_ptr ); + +extern DAT_RETURN +dapl_ep_check_recv_completion_flags ( + DAT_COMPLETION_FLAGS flags ); + +extern DAT_RETURN +dapl_ep_check_request_completion_flags ( + DAT_COMPLETION_FLAGS flags ); + +extern DAT_RETURN +dapl_ep_post_send_req ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_COUNT num_segments, + IN DAT_LMR_TRIPLET *local_iov, + IN DAT_DTO_COOKIE user_cookie, + IN const DAT_RMR_TRIPLET *remote_iov, + IN DAT_COMPLETION_FLAGS completion_flags, + IN DAPL_DTO_TYPE dto_type, + IN ib_send_op_type_t op_type ); + +void dapls_ep_timeout (void *arg ); + +DAT_RETURN_SUBTYPE +dapls_ep_state_subtype( + IN DAPL_EP *ep_ptr ); + +#endif /* _DAPL_EP_UTIL_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_clear_unwaitable.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_clear_unwaitable.c new file mode 100644 index 00000000..eeda8a24 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_clear_unwaitable.c @@ -0,0 +1,81 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_clear_unwaitable.c + * + * PURPOSE: EVENT management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3.4.8 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_evd_clear_unwaitable + * + * DAPL Requirements Version 1.1, 6.3.4.8 + * + * Transition the Event Dispatcher into a waitable state + * + * Input: + * evd_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + */ +DAT_RETURN +dapl_evd_clear_unwaitable ( + IN DAT_EVD_HANDLE evd_handle ) +{ + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + + evd_ptr = (DAPL_EVD *)evd_handle; + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD) ) + + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + dapl_os_lock ( &evd_ptr->header.lock ); + evd_ptr->evd_waitable = DAT_TRUE; + dapl_os_unlock ( &evd_ptr->header.lock ); + + dat_status = DAT_SUCCESS; + +bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_connection_callb.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_connection_callb.c new file mode 100644 index 00000000..1510ad09 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_connection_callb.c @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_connection_callback.c + * + * PURPOSE: implements connection callbacks + * + * Description: Accepts asynchronous callbacks from the Communications Manager + * for EVDs that have been specified as the connection_evd. + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" +#include "dapl_ep_util.h" +#include "dapl_timer_util.h" +#include "dapl_ring_buffer_util.h" + + +/* + * dapl_evd_connection_callback + * + * Connection callback function for ACTIVE connection requests; callbacks + * generated by the Connection Manager in response to issuing a + * connect call. + * + * Input: + * ib_cm_handle, + * ib_cm_event + * private_data_ptr + * context (evd) + * cr_pp + * + * Output: + * None + * + */ + +void +dapl_evd_connection_callback ( + IN ib_cm_handle_t ib_cm_handle, + IN const ib_cm_events_t ib_cm_event, + IN const void *private_data_ptr, + IN const void *context) +{ + DAPL_EP *ep_ptr; + DAPL_EVD *evd_ptr; + DAPL_PRIVATE *prd_ptr; + DAT_EVENT_NUMBER dat_event_num; + DAT_RETURN dat_status; + int private_data_size; + + + dapl_dbg_log ( + DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "--> dapl_evd_connection_callback: ctxt: %p event: %x cm_handle %d\n", + context, + ib_cm_event, + ib_cm_handle.cid); + DAPL_CNTR(DCNT_EVD_CONN_CALLBACK); + + /* + * Determine the type of handle passed back to us in the context + * and sort out key parameters. + */ + if ( context == NULL || ((DAPL_HEADER *)context)->magic != DAPL_MAGIC_EP) + { + return; + } + + /* + * Active side of the connection, context is an EP and + * PSP is irrelevant. + */ + ep_ptr = (DAPL_EP *) context; + evd_ptr = (DAPL_EVD *) ep_ptr->param.connect_evd_handle; + + prd_ptr = (DAPL_PRIVATE *)private_data_ptr; + private_data_size = 0; + /* + * All operations effect the EP, so lock it once and unlock + * when necessary + */ + dapl_os_lock (&ep_ptr->header.lock); + + /* + * If a connection timer has been set up on this EP, cancel it now + */ + if ( ep_ptr->cxn_timer != NULL ) + { + dapls_timer_cancel ( ep_ptr->cxn_timer ); + dapl_os_free ( ep_ptr->cxn_timer, sizeof ( DAPL_OS_TIMER ) ); + ep_ptr->cxn_timer = NULL; + } + + /* Obtain the event number from the provider layer */ + dat_event_num = dapls_ib_get_dat_event (ib_cm_event, DAT_TRUE); + + switch (dat_event_num) + { + case DAT_CONNECTION_EVENT_ESTABLISHED: + { + /* If we don't have an EP at this point we are very screwed + * up + */ + if ( ep_ptr->param.ep_state != DAT_EP_STATE_ACTIVE_CONNECTION_PENDING) + { + /* If someone pulled the plug on the connection, just + * exit + */ + dapl_os_unlock ( &ep_ptr->header.lock ); + dat_status = DAT_SUCCESS; + break; + } + ep_ptr->param.ep_state = DAT_EP_STATE_CONNECTED; + ep_ptr->cm_handle = ib_cm_handle; + if (prd_ptr == NULL) + { + private_data_size = 0; + } + else + { + private_data_size = + dapls_ib_private_data_size (prd_ptr, DAPL_PDATA_CONN_REP); + } + + if (private_data_size > 0) + { + /* copy in the private data */ + dapl_os_memcpy ( ep_ptr->private_data, + prd_ptr->private_data, + DAPL_MIN (private_data_size, DAPL_MAX_PRIVATE_DATA_SIZE)); + } + dapl_os_unlock (&ep_ptr->header.lock); + + break; + } + case DAT_CONNECTION_EVENT_DISCONNECTED: + { + /* + * EP is now fully disconnected; initiate any post processing + * to reset the underlying QP and get the EP ready for + * another connection + */ + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + ep_ptr->recv_discreq = DAT_FALSE; + ep_ptr->sent_discreq = DAT_FALSE; + dapls_ib_disconnect_clean (ep_ptr, DAT_TRUE, ib_cm_event); + dapl_os_unlock (&ep_ptr->header.lock); + + break; + } + case DAT_CONNECTION_EVENT_PEER_REJECTED: + { + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + dapls_ib_disconnect_clean (ep_ptr, DAT_TRUE, ib_cm_event); + dapl_os_unlock (&ep_ptr->header.lock); + + break; + } + case DAT_CONNECTION_EVENT_UNREACHABLE: + { + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + dapls_ib_disconnect_clean (ep_ptr, DAT_TRUE, ib_cm_event); + dapl_os_unlock (&ep_ptr->header.lock); + + break; + } + case DAT_CONNECTION_EVENT_NON_PEER_REJECTED: + { + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + dapls_ib_disconnect_clean (ep_ptr, DAT_TRUE, ib_cm_event); + dapl_os_unlock (&ep_ptr->header.lock); + + break; + } + case DAT_CONNECTION_EVENT_BROKEN: + { + dapl_os_lock ( &ep_ptr->header.lock ); + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + dapls_ib_disconnect_clean (ep_ptr, DAT_FALSE, ib_cm_event); + dapl_os_unlock ( &ep_ptr->header.lock ); + + break; + } + case DAT_CONNECTION_REQUEST_EVENT: + default: + { + dapl_os_unlock (&ep_ptr->header.lock); + evd_ptr = NULL; + + dapl_os_assert (0); /* shouldn't happen */ + break; + } + } + + /* + * Post the event + * If the EP has been freed, the evd_ptr will be NULL + */ + if ( evd_ptr != NULL ) + { + dat_status = dapls_evd_post_connection_event ( + evd_ptr, + dat_event_num, + (DAT_HANDLE) ep_ptr, + private_data_size, /* 0 except for CONNECTED */ + ep_ptr->private_data ); + + if (dat_status != DAT_SUCCESS && + dat_event_num == DAT_CONNECTION_EVENT_ESTABLISHED) + { + /* We can't tell the user we are connected, something + * is wrong locally. Just kill the connection and + * reset the state to DISCONNECTED as we don't + * expect a callback on an ABRUPT disconnect. + */ + dapls_ib_disconnect (ep_ptr, DAT_CLOSE_ABRUPT_FLAG); + dapl_os_lock (&ep_ptr->header.lock); + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + dapl_os_unlock (&ep_ptr->header.lock); + } + } + + dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "dapl_evd_connection_callback () returns\n"); + +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_cq_async_error_callb.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_cq_async_error_callb.c new file mode 100644 index 00000000..aba28eaf --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_cq_async_error_callb.c @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_cq_async_error_callback.c + * + * PURPOSE: implements CQ async_callbacks from verbs + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" + +/* + * dapl_evd_cq_async_error_callback + * + * The callback function registered with verbs for cq async errors + * + * Input: + * ib_hca_handle, + * error_ptr + * context (evd) + * + * Output: + * None + * + */ + +void +dapl_evd_cq_async_error_callback ( + IN ib_hca_handle_t ib_hca_handle, + IN ib_error_record_t *error_ptr, + IN void *context) + +{ + DAPL_EVD *async_evd; + DAPL_EVD *evd; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_CALLBACK | DAPL_DBG_TYPE_EXCEPTION , + "dapl_evd_cq_async_error_callback (%p, %p, %p)\n", + ib_hca_handle, + error_ptr, + context); + + if ( NULL == context ) + { + dapl_os_panic ("NULL == context\n"); + } + + evd = (DAPL_EVD *) context; + async_evd = evd->header.owner_ia->async_error_evd; + + dat_status = dapls_evd_post_async_error_event( + async_evd, + DAT_ASYNC_ERROR_EVD_OVERFLOW, + (DAT_IA_HANDLE) async_evd->header.owner_ia); + + + if ( dat_status != DAT_SUCCESS ) + { + dapl_os_panic ("async EVD overflow\n"); + } + + dapl_dbg_log (DAPL_DBG_TYPE_CALLBACK | DAPL_DBG_TYPE_EXCEPTION , + "dapl_evd_cq_async_error_callback () returns\n"); +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_create.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_create.c new file mode 100644 index 00000000..c0e16236 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_create.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_create.c + * + * PURPOSE: EVENT management + * + * Description: Interfaces in this file are completely defined in + * the uDAPL 1.1 API, Chapter 6, section 3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" + +/* + * dapl_evd_create + * + * DAPL Requirements Version xxx, 6.3.2.1 + * + * Create and instance of Event Dispatcher. + * + * Input: + * ia_handle + * cno_handle + * evd_min_qlen + * evd_flags + * + * Output: + * evd_handle + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + */ + +/* ** REVISIT ** + * + * Selecting the cqe handing domain must still be done. + * We *probably* want one per hca, but we could have one + * per provider or one per consumer. + */ +/* Note that if there already is a cq, it is not deleted + * even if it is not required. However, it will not be armed. + */ + +DAT_RETURN dapl_evd_create ( + IN DAT_IA_HANDLE ia_handle, + IN DAT_COUNT evd_min_qlen, + IN DAT_CNO_HANDLE cno_handle, + IN DAT_EVD_FLAGS evd_flags, + OUT DAT_EVD_HANDLE *evd_handle ) +{ + DAPL_IA *ia_ptr; + DAPL_EVD *evd_ptr; + DAPL_CNO *cno_ptr; + DAT_RETURN dat_status; + DAT_PROVIDER_ATTR provider_attr; + int i; + int j; + int flag_mask[6]; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_evd_create (%p, %d, %p, 0x%x, %p)\n", + ia_handle, + evd_min_qlen, + cno_handle, + evd_flags, + evd_handle); + DAPL_CNTR(DCNT_EVD_CREATE); + + ia_ptr = (DAPL_IA *)ia_handle; + cno_ptr = (DAPL_CNO *)cno_handle; + evd_ptr = NULL; + *evd_handle = NULL; + dat_status = DAT_SUCCESS; + + if (DAPL_BAD_HANDLE (ia_handle, DAPL_MAGIC_IA)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_IA); + goto bail; + } + + if ( evd_min_qlen <= 0 ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + goto bail; + } + if ( evd_min_qlen > ia_ptr->hca_ptr->ia_attr.max_evd_qlen ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_TEVD); + goto bail; + } + + if (cno_handle != DAT_HANDLE_NULL + && DAPL_BAD_HANDLE(cno_handle, DAPL_MAGIC_CNO)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_CNO); + goto bail; + } + + /* + * Check the merging attributes to ensure the combination of + * flags requested is supported. + */ + dapl_ia_query (ia_handle, NULL, + 0, NULL, + DAT_PROVIDER_FIELD_ALL, &provider_attr); + + /* Set up an array of flags to compare against; the EVD bits are + * a sparse array that need to be mapped to the merging flags + */ + flag_mask[0] = DAT_EVD_SOFTWARE_FLAG; + flag_mask[1] = DAT_EVD_CR_FLAG; + flag_mask[2] = DAT_EVD_DTO_FLAG; + flag_mask[3] = DAT_EVD_CONNECTION_FLAG; + flag_mask[4] = DAT_EVD_RMR_BIND_FLAG; + flag_mask[5] = DAT_EVD_ASYNC_FLAG; + + for (i = 0; i < 6; i++) + { + if (flag_mask[i] & evd_flags) + { + for (j = 0; j < 6; j++) + { + if (flag_mask[j] & evd_flags) + { + if (provider_attr.evd_stream_merging_supported[i][j] == DAT_FALSE) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG4); + goto bail; + } + } + } /* end for j */ + } + } /* end for i */ + + dat_status = dapls_evd_internal_create (ia_ptr, + cno_ptr, + evd_min_qlen, + evd_flags, + &evd_ptr); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + evd_ptr->evd_state = DAPL_EVD_STATE_OPEN; + + *evd_handle = (DAT_EVD_HANDLE) evd_ptr; + +bail: + if (dat_status != DAT_SUCCESS) + { + if (evd_ptr) + { + dapl_evd_free (evd_ptr); + } + } + + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_evd_create () returns 0x%x\n", + dat_status); + + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_dequeue.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_dequeue.c new file mode 100644 index 00000000..bc67cab6 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_dequeue.c @@ -0,0 +1,145 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_dequeue.c + * + * PURPOSE: Event Management + * + * Description: Interfaces in this file are completely described in + * the uDAPL 1.1 API, Chapter 6, section 3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ring_buffer_util.h" +#include "dapl_evd_util.h" + +/* + * dapl_evd_dequeue + * + * DAPL Requirements Version xxx, 6.3.2.7 + * + * Remove first element from an event dispatcher + * + * Input: + * evd_handle + * + * Output: + * event + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + * DAT_QUEUE_EMPTY + */ + +DAT_RETURN dapl_evd_dequeue ( + IN DAT_EVD_HANDLE evd_handle, + OUT DAT_EVENT *event) + +{ + DAPL_EVD *evd_ptr; + DAT_EVENT *local_event; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_evd_dequeue (%p, %p)\n", + evd_handle, + event); + DAPL_CNTR(DCNT_EVD_DEQUEUE); + + evd_ptr = (DAPL_EVD *)evd_handle; + dat_status = DAT_SUCCESS; + + if (DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + + if (event == NULL) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + goto bail; + } + + /* + * We need to dequeue under lock, as the IB OS Access API + * restricts us from having multiple threads in CQ poll, and the + * DAPL 1.1 API allows multiple threads in dat_evd_dequeue() + */ + dapl_os_lock ( &evd_ptr->header.lock ); + + /* + * Make sure there are no other waiters and the evd is active. + * Currently this means only the OPEN state is allowed. + */ + if (evd_ptr->evd_state != DAPL_EVD_STATE_OPEN || + evd_ptr->catastrophic_overflow) + { + dapl_os_unlock ( &evd_ptr->header.lock ); + dat_status = DAT_ERROR (DAT_INVALID_STATE,0); + goto bail; + } + + /* + * Try the EVD rbuf first; poll from the CQ only if that's empty. + * This keeps events in order if dat_evd_wait() has copied events + * from CQ to EVD. + */ + local_event = (DAT_EVENT *)dapls_rbuf_remove (&evd_ptr->pending_event_queue); + if (local_event != NULL) + { + *event = *local_event; + dat_status = dapls_rbuf_add (&evd_ptr->free_event_queue, + local_event); + DAPL_CNTR(DCNT_EVD_DEQUEUE_FOUND); + } + else if (evd_ptr->ib_cq_handle != IB_INVALID_HANDLE) + { + dat_status = dapls_evd_cq_poll_to_event(evd_ptr, event); + DAPL_CNTR(DCNT_EVD_DEQUEUE_POLL); + } + else + { + dat_status = DAT_ERROR (DAT_QUEUE_EMPTY,0); + DAPL_CNTR(DCNT_EVD_DEQUEUE_NOT_FOUND); + } + + dapl_os_unlock ( &evd_ptr->header.lock ); + bail: + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_evd_dequeue () returns 0x%x\n", + dat_status); + + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_disable.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_disable.c new file mode 100644 index 00000000..a35659c6 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_disable.c @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_disable.c + * + * PURPOSE: EVENT management + * + * Description: Interfaces in this file are completely defined in + * the uDAPL 1.1 API, Chapter 6, section 3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_evd_disable + * + * DAPL Requirements Version xxx, 6.3.2.5 + * + * Modify the size fo the event queue of an Event Dispatcher + * + * Input: + * evd_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + */ + +DAT_RETURN dapl_evd_disable ( + IN DAT_EVD_HANDLE evd_handle) +{ + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + + evd_ptr = (DAPL_EVD *)evd_handle; + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD) ) + + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + + evd_ptr->evd_enabled = DAT_FALSE; + +bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_dto_callb.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_dto_callb.c new file mode 100644 index 00000000..76a38a1f --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_dto_callb.c @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_dto_callback.c + * + * PURPOSE: implements DTO callbacks from verbs + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" +#include "dapl_cno_util.h" +#include "dapl_cookie.h" +#include "dapl_adapter_util.h" + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + + +/********************************************************************* + * * + * Function Definitions * + * * + *********************************************************************/ + +/* + * dapl_evd_dto_callback + * + * Input: + * hca_handle_in, + * cq_handle_in, + * user_context_cq_p + * + * Output: + * none + * + * This is invoked for both DTO and MW bind completions. Strictly + * speaking it is an event callback rather than just a DTO callback. + * + */ + +void +dapl_evd_dto_callback ( + IN ib_hca_handle_t hca_handle, + IN ib_cq_handle_t cq_handle, + IN void* user_context) +{ + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + DAPL_EVD_STATE state; + + dapl_dbg_log (DAPL_DBG_TYPE_CALLBACK, + "dapl_evd_dto_callback(%p, %p, %p)\n", + hca_handle, + cq_handle, + user_context); + DAPL_CNTR(DCNT_EVD_DTO_CALLBACK); + + evd_ptr = (DAPL_EVD *) user_context; + + dapl_os_assert (hca_handle == evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle); + dapl_os_assert (evd_ptr->ib_cq_handle == cq_handle); + dapl_os_assert (evd_ptr->header.magic == DAPL_MAGIC_EVD); + + /* Read once. */ + state = *(volatile DAPL_EVD_STATE *) &evd_ptr->evd_state; + + dapl_dbg_log (DAPL_DBG_TYPE_EVD, + "--> dapl_evd_dto_callback: CQ %p, state %x\n", + (void *)evd_ptr->ib_cq_handle, + state); + + /* + * This function does not dequeue from the CQ; only the consumer + * can do that. Instead, it wakes up waiters if any exist. + * It rearms the completion only if completions should always occur + * (specifically if a CNO is associated with the EVD and the + * EVD is enabled. + */ + + if (state == DAPL_EVD_STATE_WAITED) + { + /* + * If we could, it would be best to avoid this wakeup + * (and the context switch) unless the number of events/CQs + * waiting for the waiter was its threshold. We don't + * currently have the ability to determine that without + * dequeueing the events, and we can't do that for + * synchronization reasons (racing with the waiter waking + * up and dequeuing, sparked by other callbacks). + */ + + /* + * We don't need to worry about taking the lock for the + * wakeup because wakeups are sticky. + */ + if (evd_ptr->cq_wait_obj_handle) + { + dapls_ib_wait_object_wakeup(evd_ptr->cq_wait_obj_handle); + } + else + { + dapl_os_wait_object_wakeup(&evd_ptr->wait_object); + } + } + else if (state == DAPL_EVD_STATE_OPEN) + { + DAPL_CNO *cno = evd_ptr->cno_ptr; + if (evd_ptr->evd_enabled && (evd_ptr->cno_ptr != NULL)) + { + /* + * Re-enable callback, *then* trigger. + * This guarantees we won't miss any events. + */ + dat_status = dapls_ib_completion_notify (hca_handle, + evd_ptr->ib_cq_handle, + IB_NOTIFY_ON_NEXT_COMP); + + if ( DAT_SUCCESS != dat_status ) + { + (void) dapls_evd_post_async_error_event( + evd_ptr->header.owner_ia->async_error_evd, + DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR, + (DAT_IA_HANDLE) evd_ptr->header.owner_ia); + } + + dapl_cno_trigger(cno, evd_ptr); + } + } + dapl_dbg_log (DAPL_DBG_TYPE_RTN, "dapl_evd_dto_callback () returns\n"); +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_enable.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_enable.c new file mode 100644 index 00000000..6e2758e7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_enable.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_enable.c + * + * PURPOSE: EVENT management + * + * Description: Interfaces in this file are completely defined in + * the uDAPL 1.1 API, Chapter 6, section 3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" + +/* + * dapl_evd_enable + * + * DAPL Requirements Version xxx, 6.3.2.5 + * + * Modify the size fo the event queue of an Event Dispatcher + * + * Input: + * evd_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + */ + +DAT_RETURN dapl_evd_enable ( + IN DAT_EVD_HANDLE evd_handle) +{ + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + + evd_ptr = (DAPL_EVD *)evd_handle; + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD) ) + + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + + evd_ptr->evd_enabled = DAT_TRUE; + + /* We need to enable the callback handler if there is a CNO. */ + if (evd_ptr->cno_ptr != NULL && + evd_ptr->ib_cq_handle != IB_INVALID_HANDLE ) + { + dat_status = dapls_ib_completion_notify ( + evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle, + evd_ptr->ib_cq_handle, + IB_NOTIFY_ON_NEXT_COMP); + + /* FIXME report error */ + dapl_os_assert(dat_status == DAT_SUCCESS); + } + +bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_free.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_free.c new file mode 100644 index 00000000..3835a083 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_free.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_free.c + * + * PURPOSE: Event management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" +#include "dapl_ia_util.h" +#include "dapl_cno_util.h" /* for __KDAPL__ */ + +/* + * dapl_evd_free + * + * DAPL Requirements Version xxx, 6.3.2.2 + * + * Destroy a specific instance of the Event Dispatcher + * + * Input: + * evd_handle + * + * Output: + * None + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_STATE + */ +DAT_RETURN dapl_evd_free ( + IN DAT_EVD_HANDLE evd_handle) + +{ + DAPL_EVD *evd_ptr; + DAPL_CNO *cno_ptr; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, "dapl_evd_free (%p)\n", evd_handle); + DAPL_CNTR (DCNT_EVD_FREE); + + dat_status = DAT_SUCCESS; + evd_ptr = (DAPL_EVD *)evd_handle; + + if (DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, 0); + goto bail; + } + + if (evd_ptr->evd_ref_count != 0) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_EVD_IN_USE); + goto bail; + } + + /* obtain the cno_ptr before the evd is released, which must occur + * before deallocating the CNO + */ + cno_ptr = evd_ptr->cno_ptr; + + dapl_ia_unlink_evd (evd_ptr->header.owner_ia, evd_ptr); + + dat_status = dapls_evd_dealloc (evd_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_ia_link_evd (evd_ptr->header.owner_ia, evd_ptr); + } + + if (cno_ptr != NULL) + { + if (cno_ptr->cno_ref_count == 0 && cno_ptr->cno_waiters > 0) + { + /* + * Last reference on the CNO, trigger a notice. See + * uDAPL 1.1 spec 6.3.2.3 + */ + dapl_cno_trigger (cno_ptr, NULL); + } + } + +bail: + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_evd_free () returns 0x%x\n", + dat_status); + + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_modify_cno.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_modify_cno.c new file mode 100644 index 00000000..88ede7dc --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_modify_cno.c @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_modify_cno.c + * + * PURPOSE: Event Management + * + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" + +/* + * dapl_evd_modify_cno + * + * DAPL Requirements Version xxx, 6.3.2.4 + * + * Modify the CNO associated with the EVD + * + * Input: + * evd_handle + * cno_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCSSS + * DAT_INVALID_HANDLE + */ + +DAT_RETURN dapl_evd_modify_cno ( + IN DAT_EVD_HANDLE evd_handle, + IN DAT_CNO_HANDLE cno_handle) + + +{ + DAPL_EVD *evd_ptr; + DAPL_CNO *cno_ptr; + DAPL_CNO *old_cno_ptr; + DAT_RETURN dat_status; + + evd_ptr = (DAPL_EVD *)evd_handle; + cno_ptr = (DAPL_CNO *)cno_handle; + dat_status = DAT_SUCCESS; + + if (DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, 0); + goto bail; + } + if (cno_handle != NULL && + DAPL_BAD_HANDLE (cno_handle, DAPL_MAGIC_CNO)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_CNO); + goto bail; + } + dapl_os_lock (&evd_ptr->header.lock); + old_cno_ptr = evd_ptr->cno_ptr; + evd_ptr->cno_ptr = cno_ptr; + dapl_os_unlock (&evd_ptr->header.lock); + if (cno_ptr) + { + dapl_os_atomic_inc ( & (cno_ptr->cno_ref_count) ); + } + if (old_cno_ptr) + { + dapl_os_atomic_dec ( & (old_cno_ptr->cno_ref_count) ); + } + + /* We need to enable the callback handler if the EVD is enabled. */ + if (evd_ptr->evd_enabled && + cno_handle != DAT_HANDLE_NULL && + evd_ptr->ib_cq_handle != IB_INVALID_HANDLE) + { + dat_status = dapls_ib_completion_notify ( + evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle, + evd_ptr->ib_cq_handle, + IB_NOTIFY_ON_NEXT_COMP); + + /* FIXME report error */ + dapl_os_assert (dat_status == DAT_SUCCESS); + } + +bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_post_se.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_post_se.c new file mode 100644 index 00000000..05614fb4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_post_se.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_post_se.c + * + * PURPOSE: Event Management + * + * Description: Interfaces in this file are completely defined in + * the uDAPL 1.1 API, Chapter 6, section 3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" +#include "dapl_ia_util.h" + +/* + * dapl_evd_post_se + * + * DAPL Requirements Version xxx, 6.3.2.7 + * + * Post a software event to the Event Dispatcher event queue. + * + * Input: + * evd_handle + * event + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ + + +DAT_RETURN dapl_evd_post_se ( + DAT_EVD_HANDLE evd_handle, + const DAT_EVENT *event) + +{ + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + + evd_ptr = (DAPL_EVD *)evd_handle; + dat_status = DAT_SUCCESS; + + if (DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + /* Only post to EVDs that are specific to software events */ + if ( !(evd_ptr->evd_flags & DAT_EVD_SOFTWARE_FLAG) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG1); + goto bail; + } + + if (!event) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + goto bail; + } + if (event->event_number != DAT_SOFTWARE_EVENT) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + goto bail; + } + + dat_status = dapls_evd_post_software_event( + evd_ptr, + DAT_SOFTWARE_EVENT, + event->event_data.software_event_data.pointer); + + bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_qp_async_error_callb.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_qp_async_error_callb.c new file mode 100644 index 00000000..70425530 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_qp_async_error_callb.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_qp_async_error_callback.c + * + * PURPOSE: implements QP callbacks from verbs + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_evd_qp_async_error_callback + * + * The callback function registered with verbs for qp async erros + * + * Maps to ib_async_handler_t as follow: + * + * typedef void (*ib_async_handler_t)( + * IN ib_hca_handle_t ib_hca_handle, + * IN ib_error_record_t *err_code, + * IN void *context); + * + * Output: + * None + * + */ + +void +dapl_evd_qp_async_error_callback ( + IN ib_hca_handle_t ib_hca_handle, + IN ib_error_record_t *err_code, + IN void *context) + +{ + /* + * This is an affiliated error and hence should be able to + * supply us with exact information on the error type and QP. + * + * However IB vendor APIs for registering this callback + * are different. + * + * Therefore we always specify the context as the asyncronous EVD + * to be compatible with all APIs. + */ + + DAPL_IA *ia_ptr; + DAPL_EP *ep_ptr; + DAPL_EVD *async_evd; + DAT_EVENT_NUMBER async_event; + DAT_RETURN dat_status; + UNREFERENCED_PARAMETER(ib_hca_handle); + + ep_ptr = (DAPL_EP *) context; + ia_ptr = ep_ptr->header.owner_ia; + async_evd = (DAPL_EVD *) ia_ptr->async_error_evd; + + dapl_dbg_log ( + DAPL_DBG_TYPE_CALLBACK | DAPL_DBG_TYPE_EXCEPTION, + "--> dapl_evd_qp_async_error_callback: ep %p qp %p (%x) state %d\n", + ep_ptr, + ep_ptr->qp_handle, + ep_ptr->qpn, + ep_ptr->param.ep_state); + + /* + * Transition to ERROR if we are connected; other states need to + * complete first (e.g. pending states) + */ + if ( ep_ptr->param.ep_state == DAT_EP_STATE_CONNECTED) + { + ep_ptr->param.ep_state = DAT_EP_STATE_ERROR; + } + + dapl_os_assert (async_evd != NULL); + + dat_status = dapls_ib_get_async_event(err_code, &async_event); + if ( dat_status == DAT_SUCCESS ) + { + /* + * If dapls_ib_get_async_event is not successful, + * an event has been generated by the provide that + * we are not interested in. + */ + (void) dapls_evd_post_async_error_event( async_evd, + async_event, + async_evd->header.owner_ia); + } + dapl_dbg_log (DAPL_DBG_TYPE_CALLBACK | DAPL_DBG_TYPE_EXCEPTION, + "dapl_evd_qp_async_error_callback () returns\n"); +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_query.c new file mode 100644 index 00000000..5aa596bc --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_query.c @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_query.c + * + * PURPOSE: Event management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_evd_query + * + * DAPL Requirements Version xxx, 6.3.2.3 + * + * Provides the consumer with arguments of the Event Dispatcher. + * + * Input: + * evd_handle + * evd_mask + * + * Output: + * evd_param + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_evd_query ( + IN DAT_EVD_HANDLE evd_handle, + IN DAT_EVD_PARAM_MASK evd_param_mask, + OUT DAT_EVD_PARAM *evd_param ) +{ + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + + if ( NULL == evd_param ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + /* Note: the spec. allows for events to be directed to a NULL EVD */ + /* with handle of type DAT_HANDLE_NULL. See 6.3.1 */ + if ( DAT_HANDLE_NULL == evd_handle ) + { + dapl_os_memzero (evd_param, sizeof (DAT_EVD_PARAM)); + } + else + { + if ( DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + + evd_ptr = (DAPL_EVD *) evd_handle; + + /* + * We may be racing against the thread safe modify + * calls here (dat_evd_{enable,disable,{set,clear}_unwaitable}). + * They are thread safe, so our reads need to be atomic with + * regard to those calls. The below is ok (a single bit + * read counts as atomic; if it's in transition you'll get one + * of the correct values) but we'll need to be careful + * about reading the state variable atomically when we add + * in waitable/unwaitable. + */ + if (evd_param_mask & DAT_EVD_FIELD_ALL ) + { + evd_param->evd_state = + (evd_ptr->evd_enabled ? DAT_EVD_STATE_ENABLED : DAT_EVD_STATE_DISABLED); + evd_param->evd_state |= + (evd_ptr->evd_waitable ? DAT_EVD_STATE_WAITABLE : DAT_EVD_STATE_UNWAITABLE); + evd_param->ia_handle = evd_ptr->header.owner_ia; + evd_param->evd_qlen = evd_ptr->qlen; + evd_param->cno_handle = (DAT_CNO_HANDLE) evd_ptr->cno_ptr; + evd_param->evd_flags = evd_ptr->evd_flags; + } + } + +bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_resize.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_resize.c new file mode 100644 index 00000000..f183a3e8 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_resize.c @@ -0,0 +1,193 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_resize.c + * + * PURPOSE: EVENT management + * + * Description: Interfaces in this file are completely defined in + * the uDAPL 1.1 API, Chapter 6, section 3 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" +#include "dapl_adapter_util.h" +#include "dapl_ring_buffer_util.h" + +/* + * dapl_evd_resize + * + * DAPL Requirements Version xxx, 6.3.2.5 + * + * Modify the size fo the event queue of an Event Dispatcher + * + * Input: + * evd_handle + * evd_qlen + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_STATE + */ + +DAT_RETURN dapl_evd_resize ( + IN DAT_EVD_HANDLE evd_handle, + IN DAT_COUNT evd_qlen ) +{ + + DAPL_EVD *evd_ptr; + DAT_EVENT *event_ptr; + DAT_EVENT *events; + DAT_EVENT *orig_event; + DAPL_RING_BUFFER free_event_queue; + DAPL_RING_BUFFER pending_event_queue; + DAT_COUNT i; + DAT_COUNT pend_cnt; + + evd_ptr = (DAPL_EVD *) evd_handle; + + dapl_dbg_log (DAPL_DBG_TYPE_API, "dapl_evd_resize (%p, %d)\n", + evd_handle, evd_qlen); + + if (DAPL_BAD_HANDLE (evd_ptr, DAPL_MAGIC_EVD)) + { + return DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG1); + } + if (evd_qlen <= 0) + { + return DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + } + + dapl_os_lock(&evd_ptr->header.lock); + + if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED) + { + dapl_os_unlock(&evd_ptr->header.lock); + return DAT_ERROR (DAT_INVALID_STATE,0); + } + + pend_cnt = dapls_rbuf_count(&evd_ptr->pending_event_queue); + if (pend_cnt > evd_qlen) { + dapl_os_unlock(&evd_ptr->header.lock); + return DAT_ERROR (DAT_INVALID_STATE,0); + } + + if (DAT_SUCCESS != dapls_ib_cq_resize(evd_ptr->header.owner_ia, + evd_ptr, + &evd_qlen)) { + dapl_os_unlock(&evd_ptr->header.lock); + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + } + + /* Allocate EVENTs */ + events = (DAT_EVENT *) dapl_os_alloc (evd_qlen * sizeof (DAT_EVENT)); + if (!events) + { + dapl_os_unlock(&evd_ptr->header.lock); + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + } + event_ptr = events; + + /* allocate free event queue */ + + if (DAT_SUCCESS != dapls_rbuf_alloc (&free_event_queue, evd_qlen)) + { + dapl_os_free(event_ptr, evd_qlen * sizeof (DAT_EVENT)); + dapl_os_unlock(&evd_ptr->header.lock); + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + } + + /* allocate pending event queue */ + if (DAT_SUCCESS != dapls_rbuf_alloc (&pending_event_queue, evd_qlen)) + { + dapl_os_free(event_ptr, evd_qlen * sizeof (DAT_EVENT)); + dapl_os_unlock(&evd_ptr->header.lock); + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + } + + for (i = 0; i < pend_cnt; i++) { + orig_event = dapls_rbuf_remove(&evd_ptr->pending_event_queue); + if (orig_event == NULL) { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, " Inconsistent event queue\n"); + dapl_os_free(event_ptr, evd_qlen * sizeof (DAT_EVENT)); + dapl_os_unlock(&evd_ptr->header.lock); + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + } + memcpy(event_ptr, orig_event, sizeof(DAT_EVENT)); + if (DAT_SUCCESS != dapls_rbuf_add(&pending_event_queue, event_ptr)) { + dapl_os_free(event_ptr, evd_qlen * sizeof (DAT_EVENT)); + dapl_os_unlock(&evd_ptr->header.lock); + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + } + event_ptr++; + } + + for (i = pend_cnt; i < evd_qlen; i++) + { + if (DAT_SUCCESS != dapls_rbuf_add(&free_event_queue, + (void *) event_ptr)) { + dapl_os_free(event_ptr, evd_qlen * sizeof (DAT_EVENT)); + dapl_os_unlock(&evd_ptr->header.lock); + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + } + event_ptr++; + } + + dapls_rbuf_destroy (&evd_ptr->free_event_queue); + dapls_rbuf_destroy (&evd_ptr->pending_event_queue); + if (evd_ptr->events) + { + dapl_os_free (evd_ptr->events, evd_ptr->qlen * sizeof (DAT_EVENT)); + } + evd_ptr->free_event_queue = free_event_queue; + evd_ptr->pending_event_queue = pending_event_queue; + evd_ptr->events = events; + evd_ptr->qlen = evd_qlen; + + dapl_os_unlock(&evd_ptr->header.lock); + + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_evd_resize returns SUCCESS\n"); + + return DAT_SUCCESS; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_set_unwaitable.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_set_unwaitable.c new file mode 100644 index 00000000..f2fea18f --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_set_unwaitable.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_set_unwaitable.c + * + * PURPOSE: EVENT management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 3.4.7 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" +/* + * dapl_evd_set_unwaitable + * + * DAPL Requirements Version 1.1, 6.3.4.7 + * + * Transition the Event Dispatcher into an unwaitable state + * + * Input: + * evd_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + */ +DAT_RETURN +dapl_evd_set_unwaitable ( + IN DAT_EVD_HANDLE evd_handle ) +{ + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + + evd_ptr = (DAPL_EVD *)evd_handle; + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD) ) + + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + dapl_os_lock ( &evd_ptr->header.lock ); + evd_ptr->evd_waitable = DAT_FALSE; + dapl_os_unlock ( &evd_ptr->header.lock ); + + /* + * If this evd is waiting, wake it up. There is an obvious race + * condition here where we may wakeup the waiter before it goes to + * sleep; but the wait_object allows this and will do the right + * thing. + */ + if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED) + { + if (evd_ptr->cq_wait_obj_handle) + { + dapls_ib_wait_object_wakeup (evd_ptr->cq_wait_obj_handle); + } + else + { + dapl_os_wait_object_wakeup (&evd_ptr->wait_object); + } + } +bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_un_async_error_callb.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_un_async_error_callb.c new file mode 100644 index 00000000..c55a280a --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_un_async_error_callb.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_un_async_error_callback.c + * + * PURPOSE: implements Unaffiliated callbacks from verbs + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_evd_un_async_error_callback + * + * The callback function registered with verbs for unaffiliated async errors + * + * Input: + * ib_hca_handle, + * error_ptr + * context (async_evd) + * + * Output: + * None + * + */ + +void +dapl_evd_un_async_error_callback ( + IN ib_hca_handle_t ib_hca_handle, + IN ib_error_record_t *error_ptr, + IN void *context) + +{ + DAPL_EVD *async_evd; + DAT_EVENT_NUMBER async_event; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_CALLBACK | DAPL_DBG_TYPE_EXCEPTION, + "dapl_evd_un_async_error_callback (%p, %p, %p)\n", + ib_hca_handle, + error_ptr, + context); + + if ( NULL == context ) + { + dapl_os_panic ("NULL == context\n"); + return; + } + + async_evd = (DAPL_EVD *) context; + + dat_status = dapls_ib_get_async_event(error_ptr, &async_event); + + if ( dat_status == DAT_SUCCESS ) + { + /* + * If dapls_ib_get_async_event is not successful, + * an event has been generated by the provider that + * we are not interested in. E.g. LINK_UP. + */ + dapls_evd_post_async_error_event( async_evd, + async_event, + async_evd->header.owner_ia); + } + dapl_dbg_log (DAPL_DBG_TYPE_CALLBACK | DAPL_DBG_TYPE_EXCEPTION, + "dapl_evd_un_async_error_callback () returns\n"); +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_util.c new file mode 100644 index 00000000..a5cf340f --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_util.c @@ -0,0 +1,1457 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_util.c + * + * PURPOSE: Manage EVD Info structure + * + * $Id$ + **********************************************************************/ + +#include "dapl_evd_util.h" +#include "dapl_ia_util.h" +#include "dapl_cno_util.h" +#include "dapl_ring_buffer_util.h" +#include "dapl_adapter_util.h" +#include "dapl_cookie.h" +#include "dapl.h" + +STATIC _INLINE_ void dapli_evd_eh_print_cqe ( + IN ib_work_completion_t cqe); + +DAT_RETURN dapli_evd_event_alloc ( + IN DAPL_EVD *evd_ptr, + IN DAPL_CNO *cno_ptr, + IN DAT_COUNT qlen); + + +/* + * dapls_evd_internal_create + * + * actually create the evd. this is called after all parameter checking + * has been performed in dapl_ep_create. it is also called from dapl_ia_open + * to create the default async evd. + * + * Input: + * ia_ptr + * cno_ptr + * qlen + * evd_flags + * + * Output: + * evd_ptr_ptr + * + * Returns: + * none + * + */ + +DAT_RETURN +dapls_evd_internal_create ( + DAPL_IA *ia_ptr, + DAPL_CNO *cno_ptr, + DAT_COUNT min_qlen, + DAT_EVD_FLAGS evd_flags, + DAPL_EVD **evd_ptr_ptr) +{ + DAPL_EVD *evd_ptr; + DAT_COUNT cq_len; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + *evd_ptr_ptr = NULL; + cq_len = min_qlen; + + evd_ptr = dapls_evd_alloc (ia_ptr, + cno_ptr, + evd_flags, + min_qlen); + if (!evd_ptr) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + /* + * If we are dealing with event streams besides a CQ event stream, + * be conservative and set producer side locking. Otherwise, no. + */ + evd_ptr->evd_producer_locking_needed = + ((evd_flags & ~ (DAT_EVD_DTO_FLAG|DAT_EVD_RMR_BIND_FLAG)) != 0); + + /* Before we setup any callbacks, transition state to OPEN. */ + evd_ptr->evd_state = DAPL_EVD_STATE_OPEN; + + if (evd_flags & DAT_EVD_ASYNC_FLAG) + { + /* + * There is no cq associate with async evd. Set it to invalid + */ + evd_ptr->ib_cq_handle = IB_INVALID_HANDLE; + + } + else if ( 0 != (evd_flags & ~ (DAT_EVD_SOFTWARE_FLAG + | DAT_EVD_CONNECTION_FLAG + | DAT_EVD_CR_FLAG) ) ) + { + +#if defined(_VENDOR_IBAL_) + + /* + * The creation of CQ required a PD (PZ) associated with it and + * we do not have a PD here; therefore, the work-around is that we + * will postpone the creation of the cq till the creation of QP which + * this cq will associate with. + */ + evd_ptr->ib_cq_handle = IB_INVALID_HANDLE; + +#else + + dat_status = dapls_ib_cq_alloc (ia_ptr, + evd_ptr, + &cq_len); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + dat_status = dapls_ib_setup_async_callback (ia_ptr, + DAPL_ASYNC_CQ_COMPLETION, + (unsigned int *) evd_ptr->ib_cq_handle, + (ib_async_handler_t)dapl_evd_dto_callback, + evd_ptr); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + dat_status = dapls_set_cq_notify (ia_ptr, evd_ptr); + + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + +#endif /* _VENDOR_IBAL_ */ + + } + + /* We now have an accurate count of events, so allocate them into + * the EVD + */ + dat_status = dapli_evd_event_alloc (evd_ptr, cno_ptr, cq_len); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + /* We're assuming success in the following. */ + dapl_os_assert (dat_status == DAT_SUCCESS); + dapl_ia_link_evd (ia_ptr, evd_ptr); + *evd_ptr_ptr = evd_ptr; + +bail: + if (dat_status != DAT_SUCCESS) + { + if (evd_ptr) + { + dapls_evd_dealloc (evd_ptr); + } + } + + return dat_status; +} + +/* + * dapls_evd_alloc + * + * alloc and initialize an EVD struct + * + * Input: + * ia + * + * Output: + * evd_ptr + * + * Returns: + * none + * + */ +DAPL_EVD * +dapls_evd_alloc ( + IN DAPL_IA *ia_ptr, + IN DAPL_CNO *cno_ptr, + IN DAT_EVD_FLAGS evd_flags, + IN DAT_COUNT qlen) +{ + DAPL_EVD *evd_ptr; + UNREFERENCED_PARAMETER(cno_ptr); + evd_ptr = NULL; + + /* Allocate EVD */ + evd_ptr = (DAPL_EVD *)dapl_os_alloc (sizeof (DAPL_EVD)); + if (!evd_ptr) + { + goto bail; + } + + /* zero the structure */ + dapl_os_memzero (evd_ptr, sizeof (DAPL_EVD)); + + /* + * initialize the header + */ + evd_ptr->header.provider = ia_ptr->header.provider; + evd_ptr->header.magic = DAPL_MAGIC_EVD; + evd_ptr->header.handle_type = DAT_HANDLE_TYPE_EVD; + evd_ptr->header.owner_ia = ia_ptr; + evd_ptr->header.user_context.as_64 = 0; + evd_ptr->header.user_context.as_ptr = NULL; + dapl_llist_init_entry (&evd_ptr->header.ia_list_entry); + dapl_os_lock_init (&evd_ptr->header.lock); + + /* + * Initialize the body + */ + evd_ptr->evd_state = DAPL_EVD_STATE_INITIAL; + evd_ptr->evd_flags = evd_flags; + evd_ptr->evd_enabled = DAT_TRUE; + evd_ptr->evd_waitable = DAT_TRUE; + evd_ptr->evd_producer_locking_needed = 1;/* Conservative value. */ + evd_ptr->ib_cq_handle = IB_INVALID_HANDLE; + evd_ptr->evd_ref_count = 0; + evd_ptr->catastrophic_overflow = DAT_FALSE; + evd_ptr->qlen = qlen; + evd_ptr->completion_type = DAPL_EVD_STATE_THRESHOLD; /* FIXME: should be DAPL_EVD_STATE_INIT */ + dapl_os_wait_object_init (&evd_ptr->wait_object); + evd_ptr->cq_wait_obj_handle = 0; + //dapls_ib_wait_object_create (&evd_ptr->cq_wait_obj_handle); + +bail: + return evd_ptr; +} + + +/* + * dapls_evd_event_alloc + * + * alloc events into an EVD. + * + * Input: + * evd_ptr + * qlen + * + * Output: + * NONE + * + * Returns: + * DAT_SUCCESS + * ERROR + * + */ +DAT_RETURN +dapli_evd_event_alloc ( + IN DAPL_EVD *evd_ptr, + IN DAPL_CNO *cno_ptr, + IN DAT_COUNT qlen) +{ + DAT_EVENT *event_ptr; + DAT_COUNT i; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + event_ptr = NULL; + + /* Allocate EVENTs */ + event_ptr = (DAT_EVENT *) dapl_os_alloc (evd_ptr->qlen * sizeof (DAT_EVENT)); + if (!event_ptr) + { + goto bail; + } + evd_ptr->events = event_ptr; + + /* allocate free event queue */ + dat_status = dapls_rbuf_alloc (&evd_ptr->free_event_queue, qlen); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + /* allocate pending event queue */ + dat_status = dapls_rbuf_alloc (&evd_ptr->pending_event_queue, qlen); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + /* add events to free event queue */ + for (i = 0; i < evd_ptr->qlen; i++) + { + dapls_rbuf_add (&evd_ptr->free_event_queue, (void *)event_ptr); + event_ptr++; + } + + evd_ptr->cq_notified = DAT_FALSE; + evd_ptr->cq_notified_when = 0; + evd_ptr->cno_active_count = 0; + if ( cno_ptr != NULL ) + { + /* Take a reference count on the CNO */ + dapl_os_atomic_inc ( &cno_ptr->cno_ref_count ); + } + evd_ptr->cno_ptr = cno_ptr; + evd_ptr->threshold = 0; + + bail: + return dat_status; +} + +/* + * dapls_evd_event_realloc + * + * realloc events into an EVD. + * + * Input: + * evd_ptr + * qlen + * + * Output: + * NONE + * + * Returns: + * DAT_SUCCESS + * ERROR + * + */ +DAT_RETURN +dapls_evd_event_realloc ( + IN DAPL_EVD *evd_ptr, + IN DAT_COUNT qlen) +{ + DAT_EVENT *event_ptr; + DAT_COUNT i; + DAT_RETURN dat_status; + + /* Allocate EVENTs */ + event_ptr = (DAT_EVENT *) dapl_os_realloc (evd_ptr->events, + qlen * sizeof (DAT_EVENT)); + if (event_ptr == NULL) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + evd_ptr->events = event_ptr; + + /* allocate free event queue */ + dat_status = dapls_rbuf_realloc (&evd_ptr->free_event_queue, qlen); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + /* allocate pending event queue */ + dat_status = dapls_rbuf_realloc (&evd_ptr->pending_event_queue, qlen); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + evd_ptr->qlen = qlen; + + /* + * add events to free event queue. Need to verify that an entry is + * not on the current queues before putting it on the free queue + */ + for (i = 0; i < qlen; i++, event_ptr++) + { + if (dapls_rbuf_contains (&evd_ptr->free_event_queue, event_ptr) + || dapls_rbuf_contains (&evd_ptr->pending_event_queue, event_ptr)) + { + continue; + } + dapls_rbuf_add (&evd_ptr->free_event_queue, (void *)event_ptr); + } + +bail: + + return dat_status; +} + +/* + * dapls_evd_dealloc + * + * Free the passed in EVD structure. If an error occurs, this function + * will clean up all of the internal data structures and report the + * error. + * + * Input: + * evd_ptr + * + * Output: + * none + * + * Returns: + * status + * + */ +DAT_RETURN +dapls_evd_dealloc ( + IN DAPL_EVD *evd_ptr ) +{ + DAT_RETURN dat_status; + DAPL_IA *ia_ptr; + + dat_status = DAT_SUCCESS; + + dapl_os_assert (evd_ptr->header.magic == DAPL_MAGIC_EVD); + dapl_os_assert (evd_ptr->evd_ref_count == 0); + + /* + * Destroy the CQ first, to keep any more callbacks from coming + * up from it. + */ + if (evd_ptr->ib_cq_handle != IB_INVALID_HANDLE) + { + ia_ptr = evd_ptr->header.owner_ia; + + dat_status = dapls_ib_cq_free (ia_ptr, evd_ptr); + + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dapl_evd_dealloc failed to rel. CQ %p \n", + evd_ptr->ib_cq_handle); + goto bail; + } + } + + /* + * We should now be safe to invalidate the EVD; reset the + * magic to prevent reuse. + */ + evd_ptr->header.magic = DAPL_MAGIC_INVALID; + + /* Release reference on the CNO if it exists */ + if ( evd_ptr->cno_ptr != NULL ) + { + dapl_os_atomic_dec ( &evd_ptr->cno_ptr->cno_ref_count ); + evd_ptr->cno_ptr = NULL; + } + + /* If the ring buffer allocation failed, then the dapls_rbuf_destroy */ + /* function will detect that the ring buffer's internal data (ex. base */ + /* pointer) are invalid and will handle the situation appropriately */ + dapls_rbuf_destroy (&evd_ptr->free_event_queue); + dapls_rbuf_destroy (&evd_ptr->pending_event_queue); + + if (evd_ptr->events) + { + dapl_os_free (evd_ptr->events, evd_ptr->qlen * sizeof (DAT_EVENT)); + } + + dapl_os_wait_object_destroy (&evd_ptr->wait_object); + if (evd_ptr->cq_wait_obj_handle) + dapls_ib_wait_object_destroy (evd_ptr->cq_wait_obj_handle); + dapl_os_free (evd_ptr, sizeof (DAPL_EVD)); + +bail: + return dat_status; +} + + +/* + * dapli_evd_eh_print_cqe + * + * Input: + * cqe + * + * Output: + * none + * + * Prints out a CQE for debug purposes + * + */ +void +dapli_evd_eh_print_cqe ( + IN ib_work_completion_t cqe) +{ +#if defined(DAPL_DBG) + static char *optable[] = + { + "INVALID_OP", + "OP_SEND", + "OP_RDMA_WRITE", + "OP_RDMA_READ", + "OP_COMP_AND_SWAP", + "OP_FETCH_AND_ADD", + "OP_RECEIVE", + "OP_BIND_MW", + "INVALID_OP" + }; + + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, + "\t >>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<\n"); + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, + "\t dapl_evd_dto: CQE \n"); + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, "\t\t work_req_id 0x" F64x "\n", + DAPL_GET_CQE_WRID(&cqe)); + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, "\t\t op_type: %s\n", + optable[DAPL_GET_CQE_OPTYPE(&cqe)] ); + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, "\t\t bytes_num %d\n", + DAPL_GET_CQE_BYTESNUM(&cqe)); + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, "\t\t status %d\n", + DAPL_GET_CQE_STATUS(&cqe)); + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, + "\t >>>>>>>>>>>>>>>>>>>>>>><<<<<<<<<<<<<<<<<<<\n"); +#endif /* DAPL_DBG */ +} + + + + +/* + * Event posting code follows. + */ + +/* + * These next two functions (dapli_evd_get_event and dapli_evd_post_event) + * are a pair. They are always called together, from one of the functions + * at the end of this file (dapl_evd_post_*_event). + * + * Note that if producer side locking is enabled, the first one takes the + * EVD lock and the second releases it. + */ + +/* dapli_evd_get_event + * + * Get an event struct from the evd. The caller should fill in the event + * and call dapl_evd_post_event. + * + * If there are no events available, an overflow event is generated to the + * async EVD handler. + * + * If this EVD required producer locking, a successful return implies + * that the lock is held. + * + * Input: + * evd_ptr + * + * Output: + * event + * + */ + +static DAT_EVENT * +dapli_evd_get_event ( + DAPL_EVD *evd_ptr) +{ + DAT_EVENT *event; + + if (evd_ptr->evd_producer_locking_needed) + { + dapl_os_lock(&evd_ptr->header.lock); + } + + event = (DAT_EVENT *)dapls_rbuf_remove (&evd_ptr->free_event_queue); + + /* Release the lock if it was taken and the call failed. */ + if (!event && evd_ptr->evd_producer_locking_needed) + { + dapl_os_unlock(&evd_ptr->header.lock); + } + + return event; +} + +/* dapli_evd_post_event + * + * Post the to the evd. If possible, invoke the evd's CNO. + * Otherwise post the event on the pending queue. + * + * If producer side locking is required, the EVD lock must be held upon + * entry to this function. + * + * Input: + * evd_ptr + * event + * + * Output: + * none + * + */ + +static void +dapli_evd_post_event ( + IN DAPL_EVD *evd_ptr, + IN const DAT_EVENT *event_ptr) +{ + DAT_RETURN dat_status; + DAPL_CNO *cno_to_trigger = NULL; + + dapl_dbg_log(DAPL_DBG_TYPE_EVD, + "--> dapli_evd_post_event: Called with event # %x %p\n", + event_ptr->event_number, event_ptr); + + dat_status = dapls_rbuf_add (&evd_ptr->pending_event_queue, + (void *)event_ptr); + dapl_os_assert (dat_status == DAT_SUCCESS); + + dapl_os_assert (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED + || evd_ptr->evd_state == DAPL_EVD_STATE_OPEN); + + if (evd_ptr->evd_state == DAPL_EVD_STATE_OPEN) + { + /* No waiter. Arrange to trigger a CNO if it exists. */ + + if (evd_ptr->evd_enabled) + { + cno_to_trigger = evd_ptr->cno_ptr; + } + if (evd_ptr->evd_producer_locking_needed) + { + dapl_os_unlock (&evd_ptr->header.lock); + } + } + else + { + DAT_COUNT total_events = 0; + DAT_BOOLEAN wakeup = FALSE; + DAT_UINT32 num_cqes = 0; + + /* + * We're in DAPL_EVD_STATE_WAITED. Take the lock if + * we don't have it, recheck, and signal. + */ + if (!evd_ptr->evd_producer_locking_needed) + { + dapl_os_lock(&evd_ptr->header.lock); + } + + total_events = dapls_rbuf_count (&evd_ptr->pending_event_queue); + + if (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED + && (total_events >= evd_ptr->threshold)) + + { + dapl_os_unlock(&evd_ptr->header.lock); + wakeup = TRUE; + } + else if (total_events < evd_ptr->threshold) + { + dat_status = dapls_ib_peek_cq ( evd_ptr->ib_cq_handle, &num_cqes ); + + if ( dat_status != DAT_SUCCESS ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "--> DiEPE: peek_cq failed to rel. CQ %p \n", + evd_ptr->ib_cq_handle); + } + else if ( (num_cqes >= (DAT_UINT32)(evd_ptr->threshold - total_events)) ) + { + if ( evd_ptr->evd_state == DAPL_EVD_STATE_WAITED ) + wakeup = TRUE; + } + else + { + if (evd_ptr->completion_type != DAPL_EVD_STATE_SOLICITED_WAIT) + { + dat_status = dapls_ib_n_completions_notify ( + evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle, + evd_ptr->ib_cq_handle, + evd_ptr->threshold - total_events ); + } + } + + dapl_os_unlock(&evd_ptr->header.lock); + } + else + { + dapl_os_unlock(&evd_ptr->header.lock); + } + + if (wakeup) + { + if (evd_ptr->cq_wait_obj_handle) + { + dapls_ib_wait_object_wakeup (evd_ptr->cq_wait_obj_handle); + } + else + { + dapl_os_wait_object_wakeup (&evd_ptr->wait_object); + } + } + } + + if (cno_to_trigger != NULL) + { + dapl_cno_trigger(cno_to_trigger, evd_ptr); + } +} + + +/* dapli_evd_post_event_nosignal + * + * Post the to the evd. Do not do any wakeup processing. + * This function should only be called if it is known that there are + * no waiters that it is appropriate to wakeup on this EVD. An example + * of such a situation is during internal dat_evd_wait() processing. + * + * If producer side locking is required, the EVD lock must be held upon + * entry to this function. + * + * Input: + * evd_ptr + * event + * + * Output: + * none + * + */ + +static void +dapli_evd_post_event_nosignal ( + IN DAPL_EVD *evd_ptr, + IN const DAT_EVENT *event_ptr) +{ + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_EVD, + "dapli_evd_post_event_nosignal: event # %x %p\n", + event_ptr->event_number, event_ptr); + + dat_status = dapls_rbuf_add (&evd_ptr->pending_event_queue, + (void *)event_ptr); + dapl_os_assert (dat_status == DAT_SUCCESS); + + dapl_os_assert (evd_ptr->evd_state == DAPL_EVD_STATE_WAITED + || evd_ptr->evd_state == DAPL_EVD_STATE_OPEN); + + if (evd_ptr->evd_producer_locking_needed) + { + dapl_os_unlock (&evd_ptr->header.lock); + } +} + +/* dapli_evd_format_overflow_event + * + * format an overflow event for posting + * + * Input: + * evd_ptr + * event_ptr + * + * Output: + * none + * + */ +static void +dapli_evd_format_overflow_event ( + IN DAPL_EVD *evd_ptr, + OUT DAT_EVENT *event_ptr) +{ + DAPL_IA *ia_ptr; + + ia_ptr = evd_ptr->header.owner_ia; + + event_ptr->evd_handle = (DAT_EVD_HANDLE)evd_ptr; + event_ptr->event_number = DAT_ASYNC_ERROR_EVD_OVERFLOW; + event_ptr->event_data.asynch_error_event_data.ia_handle = (DAT_IA_HANDLE)ia_ptr; +} + +/* dapli_evd_post_overflow_event + * + * post an overflow event + * + * Input: + * async_evd_ptr + * evd_ptr + * + * Output: + * none + * + */ +static void +dapli_evd_post_overflow_event ( + IN DAPL_EVD *async_evd_ptr, + IN DAPL_EVD *overflow_evd_ptr) +{ + DAT_EVENT *overflow_event; + + /* The overflow_evd_ptr mght be the same as evd. + * In that case we've got a catastrophic overflow. + */ + if (async_evd_ptr == overflow_evd_ptr) + { + async_evd_ptr->catastrophic_overflow = DAT_TRUE; + async_evd_ptr->evd_state = DAPL_EVD_STATE_DEAD; + return; + } + + overflow_event = dapli_evd_get_event (overflow_evd_ptr); + if (!overflow_event) + { + /* this is not good */ + overflow_evd_ptr->catastrophic_overflow = DAT_TRUE; + overflow_evd_ptr->evd_state = DAPL_EVD_STATE_DEAD; + return; + } + dapli_evd_format_overflow_event (overflow_evd_ptr, overflow_event); + dapli_evd_post_event (overflow_evd_ptr, overflow_event); + + return; +} + +static DAT_EVENT * +dapli_evd_get_and_init_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT_NUMBER event_number) +{ + DAT_EVENT *event_ptr; + + event_ptr = dapli_evd_get_event (evd_ptr); + if (NULL == event_ptr) + { + dapli_evd_post_overflow_event ( + evd_ptr->header.owner_ia->async_error_evd, + evd_ptr); + } + else + { + event_ptr->evd_handle = (DAT_EVD_HANDLE) evd_ptr; + event_ptr->event_number = event_number; + } + + return event_ptr; +} + +DAT_RETURN +dapls_evd_post_cr_arrival_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT_NUMBER event_number, + IN DAT_SP_HANDLE sp_handle, + DAT_IA_ADDRESS_PTR ia_address_ptr, + DAT_CONN_QUAL conn_qual, + DAT_CR_HANDLE cr_handle) +{ + DAT_EVENT *event_ptr; + event_ptr = dapli_evd_get_and_init_event (evd_ptr, event_number); + /* + * Note event lock may be held on successful return + * to be released by dapli_evd_post_event(), if provider side locking + * is needed. + */ + + if (!event_ptr) + { + return DAT_INSUFFICIENT_RESOURCES | DAT_RESOURCE_MEMORY; + } + + event_ptr->event_data.cr_arrival_event_data.sp_handle = sp_handle; + event_ptr->event_data.cr_arrival_event_data.local_ia_address_ptr + = ia_address_ptr; + event_ptr->event_data.cr_arrival_event_data.conn_qual = conn_qual; + event_ptr->event_data.cr_arrival_event_data.cr_handle = cr_handle; + + dapli_evd_post_event (evd_ptr, event_ptr); + return DAT_SUCCESS; +} + + +DAT_RETURN +dapls_evd_post_connection_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT_NUMBER event_number, + IN DAT_EP_HANDLE ep_handle, + IN DAT_COUNT private_data_size, + IN DAT_PVOID private_data) +{ + DAT_EVENT *event_ptr; + event_ptr = dapli_evd_get_and_init_event (evd_ptr, event_number); + /* + * Note event lock may be held on successful return + * to be released by dapli_evd_post_event(), if provider side locking + * is needed. + */ + + if (!event_ptr) + { + return DAT_INSUFFICIENT_RESOURCES | DAT_RESOURCE_MEMORY; + } + + event_ptr->event_data.connect_event_data.ep_handle = ep_handle; + event_ptr->event_data.connect_event_data.private_data_size + = private_data_size; + event_ptr->event_data.connect_event_data.private_data = private_data; + + dapli_evd_post_event (evd_ptr, event_ptr); + return DAT_SUCCESS; +} + + +DAT_RETURN +dapls_evd_post_async_error_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT_NUMBER event_number, + IN DAT_IA_HANDLE ia_handle) +{ + DAT_EVENT *event_ptr; + event_ptr = dapli_evd_get_and_init_event (evd_ptr, event_number); + /* + * Note event lock may be held on successful return + * to be released by dapli_evd_post_event(), if provider side locking + * is needed. + */ + + if (!event_ptr) + { + return DAT_INSUFFICIENT_RESOURCES | DAT_RESOURCE_MEMORY; + } + + event_ptr->event_data.asynch_error_event_data.ia_handle = ia_handle; + + dapli_evd_post_event (evd_ptr, event_ptr); + return DAT_SUCCESS; +} + + +DAT_RETURN +dapls_evd_post_software_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT_NUMBER event_number, + IN DAT_PVOID pointer) +{ + DAT_EVENT *event_ptr; + event_ptr = dapli_evd_get_and_init_event (evd_ptr, event_number); + /* + * Note event lock may be held on successful return + * to be released by dapli_evd_post_event(), if provider side locking + * is needed. + */ + + if (!event_ptr) + { + return DAT_QUEUE_FULL; + } + + event_ptr->event_data.software_event_data.pointer = pointer; + + dapli_evd_post_event (evd_ptr, event_ptr); + return DAT_SUCCESS; +} + +/* + * dapli_evd_cqe_to_event + * + * Convert a CQE into an event structure. + * + * Input: + * evd_ptr + * cqe_ptr + * + * Output: + * event_ptr + * + * Returns: + * ep_ptr + * + */ +static DAPL_EP * +dapli_evd_cqe_to_event ( + IN DAPL_EVD *evd_ptr, + IN ib_work_completion_t *cqe_ptr, + OUT DAT_EVENT *event_ptr) +{ + DAPL_EP *ep_ptr; + DAPL_COOKIE *cookie; + DAT_DTO_COMPLETION_STATUS dto_status; + + /* + * All that can be relied on if the status is bad is the status + * and WRID. + */ + dto_status = dapls_ib_get_dto_status (cqe_ptr); + + cookie = (DAPL_COOKIE *) DAPL_GET_CQE_WRID (cqe_ptr); + dapl_os_assert ( (NULL != cookie) ); + + dapl_dbg_log (DAPL_DBG_TYPE_EVD, + "dapli_evd_cqe_to_event: EP %p cqe %p cqe_type %#x dto_status %#x\n", + cookie->ep, cookie, cookie->type, dto_status); + + ep_ptr = cookie->ep; + dapl_os_assert ( (NULL != ep_ptr) ); + dapl_os_assert ( (ep_ptr->header.magic == DAPL_MAGIC_EP) || + (ep_ptr->header.magic == DAPL_MAGIC_EP_EXIT) ); + + dapls_io_trc_update_completion (ep_ptr, cookie, dto_status); + + event_ptr->evd_handle = (DAT_EVD_HANDLE) evd_ptr; + + switch (cookie->type) + { + case DAPL_COOKIE_TYPE_DTO: + { + DAPL_COOKIE_BUFFER *buffer; + + if ( DAPL_DTO_TYPE_RECV == cookie->val.dto.type ) + { + dapl_os_atomic_dec (&ep_ptr->recv_count); + buffer = &ep_ptr->recv_buffer; + } + else + { + dapl_os_atomic_dec (&ep_ptr->req_count); + buffer = &ep_ptr->req_buffer; + } + + event_ptr->event_number = DAT_DTO_COMPLETION_EVENT; + event_ptr->event_data.dto_completion_event_data.ep_handle = + cookie->ep; + event_ptr->event_data.dto_completion_event_data.user_cookie = + cookie->val.dto.cookie; + event_ptr->event_data.dto_completion_event_data.status = dto_status; + +#ifdef DAPL_DBG + if (dto_status == DAT_DTO_SUCCESS) + { + uint32_t ibtype; + + ibtype = DAPL_GET_CQE_OPTYPE (cqe_ptr); + + dapl_os_assert ((ibtype == OP_SEND && + cookie->val.dto.type == DAPL_DTO_TYPE_SEND) + || (ibtype == OP_RECEIVE && + cookie->val.dto.type == DAPL_DTO_TYPE_RECV) + || (ibtype == OP_RDMA_WRITE && + cookie->val.dto.type == DAPL_DTO_TYPE_RDMA_WRITE) + || (ibtype == OP_RDMA_READ && + cookie->val.dto.type == DAPL_DTO_TYPE_RDMA_READ)); + } +#endif /* DAPL_DBG */ + + if ( cookie->val.dto.type == DAPL_DTO_TYPE_SEND || + cookie->val.dto.type == DAPL_DTO_TYPE_RDMA_WRITE ) + { + /* Get size from DTO; CQE value may be off. */ + event_ptr->event_data.dto_completion_event_data.transfered_length = + cookie->val.dto.size; + } + else + { + event_ptr->event_data.dto_completion_event_data.transfered_length = + DAPL_GET_CQE_BYTESNUM (cqe_ptr); + } + + dapls_cookie_dealloc (buffer, cookie); + break; + } + + case DAPL_COOKIE_TYPE_RMR: + { + dapl_os_atomic_dec (&ep_ptr->req_count); + + event_ptr->event_number = DAT_RMR_BIND_COMPLETION_EVENT; + + event_ptr->event_data.rmr_completion_event_data.rmr_handle = + cookie->val.rmr.rmr; + event_ptr->event_data.rmr_completion_event_data.user_cookie = + cookie->val.rmr.cookie; + if (dto_status == DAT_DTO_SUCCESS) + { + event_ptr->event_data.rmr_completion_event_data.status = + DAT_RMR_BIND_SUCCESS; + dapl_os_assert ((DAPL_GET_CQE_OPTYPE (cqe_ptr)) == OP_BIND_MW); + } + else + { + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, + " MW bind completion ERROR: %d: op %#x ep: %p\n", + dto_status, + DAPL_GET_CQE_OPTYPE (cqe_ptr), ep_ptr); + event_ptr->event_data.rmr_completion_event_data.status = + DAT_RMR_OPERATION_FAILED; + } + + dapls_cookie_dealloc (&ep_ptr->req_buffer, cookie); + break; + } + default: + { + dapl_os_assert (!"Invalid Operation type"); + break; + } + } /* end switch */ + + /* + * Most error DTO ops result in disconnecting the EP. See + * IBTA Vol 1.1, Chapter 10,Table 68, for expected effect on + * state. + */ + if ((dto_status != DAT_DTO_SUCCESS) && + (dto_status != DAT_DTO_ERR_FLUSHED)) + { + DAPL_EVD *evd_ptr; + + /* + * If we are connected, generate disconnect and generate an + * event. We may be racing with other disconnect ops, so we + * need to check. We may also be racing CM connection events, + * requiring us to check for connection pending states too. + */ + dapl_os_lock ( &ep_ptr->header.lock ); + if (ep_ptr->param.ep_state == DAT_EP_STATE_CONNECTED || + ep_ptr->param.ep_state == DAT_EP_STATE_ACTIVE_CONNECTION_PENDING || + ep_ptr->param.ep_state == DAT_EP_STATE_PASSIVE_CONNECTION_PENDING|| + ep_ptr->param.ep_state == DAT_EP_STATE_COMPLETION_PENDING ) + + { + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECTED; + dapl_os_unlock ( &ep_ptr->header.lock ); + dapls_io_trc_dump (ep_ptr, cqe_ptr, dto_status); + + /* Let the other side know we have disconnected */ + (void) dapls_ib_disconnect (ep_ptr, DAT_CLOSE_ABRUPT_FLAG); + + /* ... and clean up the local side */ + evd_ptr = (DAPL_EVD *) ep_ptr->param.connect_evd_handle; + if (evd_ptr != NULL) + { + dapls_evd_post_connection_event (evd_ptr, + DAT_CONNECTION_EVENT_BROKEN, + (DAT_HANDLE) ep_ptr, + 0, + 0); + } + } + else + { + dapl_os_unlock ( &ep_ptr->header.lock ); + } + + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, + " DTO completion ERROR: %d: op %#x (ep disconnected)\n", + DAPL_GET_CQE_STATUS (cqe_ptr), + DAPL_GET_CQE_OPTYPE (cqe_ptr)); + } + return ep_ptr; +} + +/* + * dapls_evd_copy_cq + * + * Copy all entries on a CQ associated with the EVD onto that EVD + * Up to caller to handle races, if any. Note that no EVD waiters will + * be awoken by this copy. + * + * Input: + * evd_ptr + * + * Output: + * None + * + * Returns: + * none + * + */ +void +dapls_evd_copy_cq ( + DAPL_EVD *evd_ptr) +{ + ib_work_completion_t cur_cqe; + DAT_RETURN dat_status; + ib_cq_handle_t cq_handle; + ib_hca_handle_t hca_handle; + DAPL_EP *ep_ptr; + DAT_EVENT *event; + + cq_handle = evd_ptr->ib_cq_handle; + hca_handle = evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle; + + if (cq_handle == IB_INVALID_HANDLE) + { + /* Nothing to do if no CQ. */ + return; + } + + for ( ;; ) + { + dat_status = dapls_ib_completion_poll(hca_handle, + cq_handle, + &cur_cqe); + + if (dat_status != DAT_SUCCESS) + { + break; + } + + /* + * Can use DAT_DTO_COMPLETION_EVENT because dapli_evd_cqe_to_event + * will overwrite. + */ + + event = dapli_evd_get_and_init_event ( + evd_ptr, DAT_DTO_COMPLETION_EVENT ); + if ( event == NULL ) + { + /* We've already attempted the overflow post; return. */ + return; + } + + ep_ptr = dapli_evd_cqe_to_event ( evd_ptr, &cur_cqe, event ); + dapl_os_assert ( (NULL != ep_ptr) ); + + /* For debugging. */ + if (DAPL_GET_CQE_STATUS(&cur_cqe)) + { + dapli_evd_eh_print_cqe(cur_cqe); + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, + "--> DsECP_ERR: EP=%p QP=%p event=%p state=0x%x \n", + ep_ptr, ep_ptr->qp_handle, + event, ep_ptr->param.ep_state); + } + + if ( (ep_ptr->param.ep_state == DAT_EP_STATE_COMPLETION_PENDING) || + (ep_ptr->param.ep_state == DAT_EP_STATE_ACTIVE_CONNECTION_PENDING) ) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "--> DsECC: EP = %p QP = %p viol_event = %p VIOL_ORDER\n", + ep_ptr, ep_ptr->qp_handle, event); + + dapl_os_lock (&ep_ptr->header.lock); + ep_ptr->viol_order = TRUE; + dapls_rbuf_add (&ep_ptr->viol_event_queue, + (void *) event); + + dapl_os_unlock (&ep_ptr->header.lock); + + if (evd_ptr->evd_producer_locking_needed) + { + dapl_os_unlock(&evd_ptr->header.lock); + } + } + else + { + dapli_evd_post_event_nosignal ( evd_ptr, event ); + } + } + + if ( DAT_GET_TYPE (dat_status) != DAT_QUEUE_EMPTY) + { + dapl_os_printf( + "dapls_evd_copy_cq: dapls_ib_completion_poll returned 0x%x\n", + dat_status); + //dapl_os_assert(!"Bad return from dapls_ib_completion_poll"); + } +} + +/* + * dapls_evd_cq_poll_to_event + * + * Attempt to dequeue a single CQE from a CQ and turn it into + * an event. + * + * Input: + * evd_ptr + * + * Output: + * event + * + * Returns: + * Status of operation + * + */ +DAT_RETURN +dapls_evd_cq_poll_to_event ( + IN DAPL_EVD *evd_ptr, + OUT DAT_EVENT *event) +{ + DAT_RETURN dat_status; + ib_cq_handle_t cq_handle; + ib_hca_handle_t hca_handle; + ib_work_completion_t cur_cqe; + DAPL_EP *ep_ptr; + DAT_EVENT *viol_event; + + cq_handle = evd_ptr->ib_cq_handle; + hca_handle = evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle; + ep_ptr = NULL; + + dat_status = dapls_ib_completion_poll(hca_handle, + cq_handle, + &cur_cqe); + if (dat_status == DAT_SUCCESS) + { + + + ep_ptr = dapli_evd_cqe_to_event ( evd_ptr, &cur_cqe, event ); + dapl_os_assert ( (NULL != ep_ptr) ); + + /* For debugging. */ + if (DAPL_GET_CQE_STATUS(&cur_cqe)) + { + dapli_evd_eh_print_cqe(cur_cqe); + dapl_dbg_log (DAPL_DBG_TYPE_DTO_COMP_ERR, + "--> DsECPTE_ERR: EP=%p QP=%p event=%p state=0x%x \n", + ep_ptr, ep_ptr->qp_handle, + event, ep_ptr->param.ep_state ); + } + + if ( (ep_ptr->param.ep_state == DAT_EP_STATE_COMPLETION_PENDING) || + (ep_ptr->param.ep_state == DAT_EP_STATE_ACTIVE_CONNECTION_PENDING) ) + { + if (evd_ptr->evd_producer_locking_needed) + { + dapl_os_unlock(&evd_ptr->header.lock); + } + + viol_event = dapli_evd_get_and_init_event ( + evd_ptr, DAT_DTO_COMPLETION_EVENT ); + if ( viol_event == NULL ) + { + return dat_status = DAT_QUEUE_EMPTY; + } + + *viol_event = *event; + + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "--> DsECPTE: EP = %p QP = %p viol_event = %p VIOL_ORDER\n", + ep_ptr, ep_ptr->qp_handle, viol_event); + + dapl_os_lock (&ep_ptr->header.lock); + ep_ptr->viol_order = TRUE; + dapls_rbuf_add (&ep_ptr->viol_event_queue, + (void *) viol_event); + dapl_os_unlock (&ep_ptr->header.lock); + + if (evd_ptr->evd_producer_locking_needed) + { + dapl_os_unlock(&evd_ptr->header.lock); + } + /* Overiding the status */ + dat_status = DAT_QUEUE_EMPTY; + } + } + + return dat_status; +} + + +DAT_RETURN +dapls_evd_post_viol_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT *event_ptr) +{ + dapli_evd_post_event(evd_ptr, event_ptr); + return DAT_SUCCESS; +} +#ifdef DAPL_DBG_IO_TRC +/* + * Update I/O completions in the I/O trace buffer. I/O is posted to + * the buffer, then we find it here using the cookie and mark it + * completed with the completion status + */ +void +dapls_io_trc_update_completion ( + DAPL_EP *ep_ptr, + DAPL_COOKIE *cookie, + DAT_DTO_COMPLETION_STATUS dto_status) +{ + int i; + static unsigned int c_cnt = 1; + + for (i = 0; i < DBG_IO_TRC_QLEN; i++) + { + if (ep_ptr->ibt_base[i].cookie == cookie) + { + ep_ptr->ibt_base[i].status = dto_status; + ep_ptr->ibt_base[i].done = c_cnt++; + } + } +} + +/* + * Dump the I/O trace buffers + */ +void +dapls_io_trc_dump ( + DAPL_EP *ep_ptr, + ib_work_completion_t *cqe_ptr, + DAT_DTO_COMPLETION_STATUS dto_status) +{ + struct io_buf_track *ibt; + int i; + int cnt; + + dapl_os_printf ("DISCONNECTING: dto_status = %x\n", dto_status); + dapl_os_printf (" OpType = %x\n", + DAPL_GET_CQE_OPTYPE (cqe_ptr)); + dapl_os_printf (" Bytes = %x\n", + DAPL_GET_CQE_BYTESNUM (cqe_ptr)); + dapl_os_printf (" WRID (cookie) = %llx\n", + DAPL_GET_CQE_WRID (cqe_ptr)); + + if (ep_ptr->ibt_dumped == 0) + { + + dapl_os_printf ("EP %p (qpn %d) I/O trace buffer\n", + ep_ptr, ep_ptr->qpn); + + ep_ptr->ibt_dumped = 1; + ibt = (struct io_buf_track *)dapls_rbuf_remove (&ep_ptr->ibt_queue); + cnt = DBG_IO_TRC_QLEN; + while (ibt != NULL && cnt > 0) + { + dapl_os_printf ("%2d. %3s (%2d, %d) OP: %x cookie %p wqe %p rmv_target_addr %llx rmv_rmr_context %x\n", + cnt, ibt->done == 0 ? "WRK" : "DON", ibt->status, ibt->done, + ibt->op_type, ibt->cookie, ibt->wqe, + ibt->remote_iov.target_address, + ibt->remote_iov.rmr_context); + for (i = 0; i < 3; i++) + { + if (ibt->iov[i].segment_length != 0) + { + dapl_os_printf (" (%4llx, %8x, %8llx)\n", + ibt->iov[i].segment_length, + ibt->iov[i].lmr_context, + ibt->iov[i].virtual_address); + } + } + ibt = (struct io_buf_track *)dapls_rbuf_remove (&ep_ptr->ibt_queue); + cnt--; + } + } +} +#endif /* DAPL_DBG_IO_TRC */ + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_util.h new file mode 100644 index 00000000..7fe76c9f --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_util.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_evd_util.h + * + * PURPOSE: Utility defs & routines for the EVD data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_EVD_UTIL_H_ +#define _DAPL_EVD_UTIL_H_ + +#include "dapl.h" + +DAT_RETURN +dapls_evd_internal_create ( + IN DAPL_IA *ia_ptr, + IN DAPL_CNO *cno_ptr, + IN DAT_COUNT min_qlen, + IN DAT_EVD_FLAGS evd_flags, + OUT DAPL_EVD **evd_ptr_ptr) ; + +DAPL_EVD * +dapls_evd_alloc ( + IN DAPL_IA *ia_ptr, + IN DAPL_CNO *cno_ptr, + IN DAT_EVD_FLAGS evd_flags, + IN DAT_COUNT qlen) ; + +DAT_RETURN +dapls_evd_dealloc ( + IN DAPL_EVD *evd_ptr) ; + +DAT_RETURN dapls_evd_event_realloc ( + IN DAPL_EVD *evd_ptr, + IN DAT_COUNT qlen); + +/* + * Each of these functions will retrieve a free event from + * the specified EVD, fill in the elements of that event, and + * post the event back to the EVD. If there is no EVD available, + * an overflow event will be posted to the async EVD associated + * with the EVD. + * + * DAT_INSUFFICIENT_RESOURCES will be returned on overflow, + * DAT_SUCCESS otherwise. + */ + +DAT_RETURN +dapls_evd_post_cr_arrival_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT_NUMBER event_number, + IN DAT_SP_HANDLE sp_handle, + DAT_IA_ADDRESS_PTR ia_address_ptr, + DAT_CONN_QUAL conn_qual, + DAT_CR_HANDLE cr_handle); + +DAT_RETURN +dapls_evd_post_connection_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT_NUMBER event_number, + IN DAT_EP_HANDLE ep_handle, + IN DAT_COUNT private_data_size, + IN DAT_PVOID private_data); + +DAT_RETURN +dapls_evd_post_async_error_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT_NUMBER event_number, + IN DAT_IA_HANDLE ia_handle); + +DAT_RETURN +dapls_evd_post_software_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT_NUMBER event_number, + IN DAT_PVOID pointer); + +DAT_RETURN +dapls_evd_post_viol_event ( + IN DAPL_EVD *evd_ptr, + IN DAT_EVENT *event_ptr); + +/************************************* + * dapl internal callbacks functions * + *************************************/ + +/* connection verb callback */ +extern void dapl_evd_connection_callback ( + IN ib_cm_handle_t ib_cm_handle, + IN const ib_cm_events_t ib_cm_events, + IN const void *instant_data_p, + IN const void * context ); + +/* dto verb callback */ +extern void dapl_evd_dto_callback ( + IN ib_hca_handle_t ib_hca_handle, + IN ib_cq_handle_t ib_cq_handle, + IN void* context); + +/* async verb callbacks */ +extern void dapl_evd_un_async_error_callback ( + IN ib_hca_handle_t ib_hca_handle, + IN ib_error_record_t *err_code, + IN void * context); + +extern void dapl_evd_cq_async_error_callback ( + IN ib_hca_handle_t ib_hca_handle, + IN ib_error_record_t *err_code, + IN void * context); + +extern void dapl_evd_qp_async_error_callback ( + IN ib_hca_handle_t ib_hca_handle, + IN ib_error_record_t *err_code, + IN void * context); + +extern void dapls_evd_copy_cq ( + DAPL_EVD *evd_ptr); + +extern DAT_RETURN dapls_evd_cq_poll_to_event ( + IN DAPL_EVD *evd_ptr, + OUT DAT_EVENT *event); + +#endif diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_wait.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_wait.c new file mode 100644 index 00000000..ac3e3f48 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_evd_wait.c @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under one of the following licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * + * 2) under the terms of the "The BSD License" a copy of which is + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * 3) under the terms of the "GNU General Public License (GPL) Version 2" a + * copy of which is available from the Open Source Initiative, see + * http://www.opensource.org/licenses/gpl-license.php. + * + * Licensee has the right to choose one of the above licenses. + * + * Redistributions of source code must retain the above copyright + * notice and one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_evd_wait.c + * + * PURPOSE: EVENT management + * + * Description: Interfaces in this file are completely defined in + * the uDAPL 1.1 API specification + * + * $Id:$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_evd_util.h" +#include "dapl_ring_buffer_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_evd_wait + * + * UDAPL Requirements Version xxx, + * + * Wait, up to specified timeout, for notification event on EVD. + * Then return first available event. + * + * Input: + * evd_handle + * timeout + * + * Output: + * event + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + */ + +DAT_RETURN dapl_evd_wait ( + IN DAT_EVD_HANDLE evd_handle, + IN DAT_TIMEOUT time_out, + IN DAT_COUNT threshold, + OUT DAT_EVENT *event, + OUT DAT_COUNT *nmore) + +{ + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + DAT_EVENT *local_event; + DAT_BOOLEAN notify_requested = DAT_FALSE; + DAT_BOOLEAN waitable; + DAPL_EVD_STATE evd_state; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_evd_wait (%p, %d, %d, %p, %p)\n", + evd_handle, + time_out, + threshold, + event, + nmore); + DAPL_CNTR(DCNT_EVD_WAIT); + + evd_ptr = (DAPL_EVD *)evd_handle; + dat_status = DAT_SUCCESS; + + if (DAPL_BAD_HANDLE (evd_ptr, DAPL_MAGIC_EVD)) + { + /* + * We return directly rather than bailing because + * bailing attempts to update the evd, and we don't have + * one. + */ + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + if (!event) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG4); + goto bail; + } + if (!nmore) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG5); + goto bail; + } + if (threshold <= 0 || + (threshold > 1 && evd_ptr->completion_type != DAPL_EVD_STATE_THRESHOLD) || + threshold > evd_ptr->qlen) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + if ( evd_ptr->catastrophic_overflow ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,0); + goto bail; + } + + dapl_dbg_log (DAPL_DBG_TYPE_EVD, + "dapl_evd_wait: EVD %p, CQ %p\n", + evd_ptr, + (void *)evd_ptr->ib_cq_handle); + + /* + * Make sure there are no other waiters and the evd is active. + * Currently this means only the OPEN state is allowed. + * Do this atomically. We need to take a lock to synchronize + * with dapl_evd_dequeue(), but the atomic transition allows + * non-locking synchronization with dapl_evd_query() and + * dapl_evd_{query,enable,disable,{set,clear}_unwaitable}. + */ + + dapl_os_lock ( &evd_ptr->header.lock ); + waitable = evd_ptr->evd_waitable; + + dapl_os_assert ( sizeof(DAT_COUNT) == sizeof(DAPL_EVD_STATE) ); + evd_state = dapl_os_atomic_assign ( (DAPL_ATOMIC *)&evd_ptr->evd_state, + (DAT_COUNT) DAPL_EVD_STATE_OPEN, + (DAT_COUNT) DAPL_EVD_STATE_WAITED ); + dapl_os_unlock ( &evd_ptr->header.lock ); + + if ( evd_state != DAPL_EVD_STATE_OPEN ) + { + /* Bogus state, bail out */ + dat_status = DAT_ERROR (DAT_INVALID_STATE,0); + goto bail; + } + + if (!waitable) + { + /* This EVD is not waitable, reset the state and bail */ + (void) dapl_os_atomic_assign ((DAPL_ATOMIC *)&evd_ptr->evd_state, + (DAT_COUNT) DAPL_EVD_STATE_WAITED, + evd_state); + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_EVD_UNWAITABLE); + goto bail; + } + + /* + * We now own the EVD, even though we don't have the lock anymore, + * because we're in the WAITED state. + */ + + evd_ptr->threshold = threshold; + + for (;;) + { + /* + * Ideally we'd just check the number of entries on the CQ, but + * we don't have a way to do that. Because we have to set *nmore + * at some point in this routine, we'll need to do this copy + * sometime even if threshold == 1. + * + * For connection evd or async evd, the function checks and + * return right away if the ib_cq_handle associate with these evd + * equal to IB_INVALID_HANDLE + */ + dapls_evd_copy_cq(evd_ptr); + + if (dapls_rbuf_count(&evd_ptr->pending_event_queue) >= threshold) + { + break; + } + + /* + * Do not enable the completion notification if this evd is not + * a DTO_EVD or RMR_BIND_EVD + */ + if ( (!notify_requested) && + ((evd_ptr->evd_flags & DAT_EVD_DTO_FLAG) || + (evd_ptr->evd_flags & DAT_EVD_RMR_BIND_FLAG)) ) + { + dat_status = dapls_ib_completion_notify ( + evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle, + evd_ptr->ib_cq_handle, + (evd_ptr->completion_type == DAPL_EVD_STATE_SOLICITED_WAIT) ? + IB_NOTIFY_ON_SOLIC_COMP : IB_NOTIFY_ON_NEXT_COMP ); + + DAPL_CNTR(DCNT_EVD_WAIT_CMP_NTFY); + /* FIXME report error */ + dapl_os_assert(dat_status == DAT_SUCCESS); + + notify_requested = DAT_TRUE; + + /* Try again. */ + continue; + } + + + /* + * Unused by poster; it has no way to tell how many + * items are on the queue without copying them over to the + * EVD queue, and we're the only ones allowed to dequeue + * from the CQ for synchronization/locking reasons. + */ + evd_ptr->threshold = threshold; + + DAPL_CNTR(DCNT_EVD_WAIT_BLOCKED); + +#ifdef CQ_WAIT_OBJECT + if (evd_ptr->cq_wait_obj_handle) + dat_status = dapls_ib_wait_object_wait ( + evd_ptr->cq_wait_obj_handle, time_out ); + else +#endif + dat_status = dapl_os_wait_object_wait ( + &evd_ptr->wait_object, time_out ); + /* + * FIXME: if the thread loops around and waits again + * the time_out value needs to be updated. + */ + + notify_requested = DAT_FALSE; /* We've used it up. */ + + /* See if we were awakened by evd_set_unwaitable */ + if ( !evd_ptr->evd_waitable ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,0); + } + + if (dat_status != DAT_SUCCESS) + { + /* + * If the status is DAT_TIMEOUT, we'll break out of the + * loop, *not* dequeue an event (because dat_status + * != DAT_SUCCESS), set *nmore (as we should for timeout) + * and return DAT_TIMEOUT. + */ + break; + } + } + + evd_ptr->evd_state = DAPL_EVD_STATE_OPEN; + + if (dat_status == DAT_SUCCESS) + { + local_event = dapls_rbuf_remove(&evd_ptr->pending_event_queue); + *event = *local_event; + dapls_rbuf_add(&evd_ptr->free_event_queue, local_event); + } + + /* + * Valid if dat_status == DAT_SUCCESS || dat_status == DAT_TIMEOUT + * Undefined otherwise, so ok to set it. + */ + *nmore = dapls_rbuf_count(&evd_ptr->pending_event_queue); + + bail: + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_evd_wait () returns 0x%x\n", + dat_status); + + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_get_consumer_context.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_get_consumer_context.c new file mode 100644 index 00000000..90125b1f --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_get_consumer_context.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_get_consumer_context.c + * + * PURPOSE: Interface Adapter management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 2 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_get_consumer_context + * + * DAPL Requirements Version xxx, 6.2.2.2 + * + * Gets the consumer context from the specified dat_object + * + * Input: + * dat_handle + * + * Output: + * context + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_get_consumer_context ( + IN DAT_HANDLE dat_handle, + OUT DAT_CONTEXT *context ) +{ + DAT_RETURN dat_status; + DAPL_HEADER *header; + + dat_status = DAT_SUCCESS; + + header = (DAPL_HEADER *)dat_handle; + if ( ((header) == NULL) || + ((DAT_UVERYLONG)(header) & 3) || + (header->magic != DAPL_MAGIC_IA && + header->magic != DAPL_MAGIC_EVD && + header->magic != DAPL_MAGIC_EP && + header->magic != DAPL_MAGIC_LMR && + header->magic != DAPL_MAGIC_RMR && + header->magic != DAPL_MAGIC_PZ && + header->magic != DAPL_MAGIC_PSP && + header->magic != DAPL_MAGIC_RSP && + header->magic != DAPL_MAGIC_CR)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + if ( context == NULL || ((DAT_UVERYLONG)(header) & 3) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + goto bail; + } + + *context = header->user_context; + +bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_get_handle_type.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_get_handle_type.c new file mode 100644 index 00000000..009b72fa --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_get_handle_type.c @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_get_handle_type.c + * + * PURPOSE: Interface Adapter management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 2 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_get_handle_type + * + * DAPL Requirements Version xxx, 6.2.2.6 + * + * Gets the handle type for the given dat_handle + * + * Input: + * dat_handle + * + * Output: + * handle_type + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ + +DAT_RETURN +dapl_get_handle_type ( + IN DAT_HANDLE dat_handle, + OUT DAT_HANDLE_TYPE *handle_type ) +{ + DAT_RETURN dat_status; + DAPL_HEADER *header; + + dat_status = DAT_SUCCESS; + + header = (DAPL_HEADER *)dat_handle; + if ( ((header) == NULL) || + ((DAT_UVERYLONG)(header) & 3) || + (header->magic != DAPL_MAGIC_IA && + header->magic != DAPL_MAGIC_EVD && + header->magic != DAPL_MAGIC_EP && + header->magic != DAPL_MAGIC_LMR && + header->magic != DAPL_MAGIC_RMR && + header->magic != DAPL_MAGIC_PZ && + header->magic != DAPL_MAGIC_PSP && + header->magic != DAPL_MAGIC_RSP && + header->magic != DAPL_MAGIC_CR)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + *handle_type = header->handle_type; + +bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_hash.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_hash.c new file mode 100644 index 00000000..7b5011ba --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_hash.c @@ -0,0 +1,537 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_hash.c + * + * PURPOSE: Hash Table + * Description: + * + * Provides a generic hash table with chaining. + * + * $Id$ + **********************************************************************/ + +#include "dapl_hash.h" + +/********************************************************************* + * * + * Structures * + * * + *********************************************************************/ + +/* + * A hash table element + */ +typedef struct DAPL_HASH_ELEM +{ + struct DAPL_HASH_ELEM * next_element; + DAPL_HASH_KEY key; + void * datum; +} DAPL_HASH_ELEM; + +/* + * The hash table + */ +struct dapl_hash_table +{ + unsigned long num_entries; + unsigned long tbl_size; + DAPL_HASH_ELEM *table; + DAPL_OS_LOCK lock; + /* + * statistics - we tally on insert operations, counting + * the number of entries in the whole hash table, as + * well as the length of chains we walk to insert. This + * ignores empty buckets, giving us data on overall table + * occupancy, as well as max/average chain length for + * the buckets used. If our hash function results in + * hot buckets, this will show it. + */ + uint64_t hash_tbl_inserts; /* total inserts ops */ + uint64_t hash_tbl_max; /* max in entire table */ + uint64_t hash_tbl_total; /* total in table */ + uint64_t hash_chn_max; /* longest chain */ + uint64_t hash_chn_total; /* total non-0 lenghts */ +}; + + +/********************************************************************* + * * + * Defines * + * * + *********************************************************************/ + +/* datum value in empty table slots (use 0UL or ~0UL as appropriate) */ +#define NO_DATUM_VALUE ((void *) 0UL) +#define NO_DATUM(value) ((value) == NO_DATUM_VALUE) + +/* Lookup macro (which falls back to function to rehash) */ +#define DAPL_HASHLOOKUP( p_table, in_key, out_datum, bucket_head) \ + { \ + DAPL_HASH_KEY save_key = in_key; \ + DAPL_HASH_ELEM *element = \ + &((p_table)->table)[DAPL_DOHASH(in_key,(p_table)->tbl_size)]; \ + in_key = save_key; \ + if (NO_DATUM(element->datum)) { \ + (bucket_head) = (void *)0; \ + } else if (element->key == (DAPL_HASH_KEY) (in_key)) { \ + (out_datum) = element->datum; \ + (bucket_head) = (void *)element; \ + } else if (element->next_element) { \ + dapli_hash_rehash(element, \ + (in_key), \ + (void **)&(out_datum), \ + (DAPL_HASH_ELEM **)&(bucket_head)); \ + } else { \ + (bucket_head) = (void *)0; \ + }\ + } + + +/********************************************************************* + * * + * Internal Functions * + * * + *********************************************************************/ + +/* + * Rehash the key (used by add and lookup functions) + * + * Inputs: element element to rehash key + * key, datum datum for key head + * head head for list + */ +static void +dapli_hash_rehash ( + DAPL_HASH_ELEM * element, + DAPL_HASH_KEY key, + void **datum, + DAPL_HASH_ELEM ** head) +{ + /* + * assume we looked at the contents of element already, + * and start with the next element. + */ + dapl_os_assert (element->next_element); + dapl_os_assert (!NO_DATUM (element->datum)); + + *head = element; + for(;;) + { + element = element->next_element; + if (!element) + { + break; + } + if (element->key == key) + { + *datum = element->datum; + return; + } + } + *head = 0; +} + +/* + * Add a new key to the hash table + * + * Inputs: + * table, key and datum to be added + * allow_dup - DAT_TRUE if dups are allowed + * Outputs: + * report_dup - should you care to know + * Returns: + * DAT_TRUE on success + */ +static DAT_BOOLEAN +dapli_hash_add ( + DAPL_HASH_TABLEP p_table, + DAPL_HASH_KEY key, + void *datum, + DAT_BOOLEAN allow_dup, + DAT_BOOLEAN * report_dup) +{ + void *olddatum; + DAPL_HASH_KEY hashValue, save_key = key; + DAPL_HASH_ELEM *found; + DAT_BOOLEAN status = DAT_FALSE; + unsigned int chain_len = 0; + + if (report_dup) + { + (*report_dup) = DAT_FALSE; + } + + if (NO_DATUM (datum)) + { + /* + * Reserved value used for datum + */ + dapl_dbg_log ( + DAPL_DBG_TYPE_ERR, + "dapli_hash_add () called with magic NO_DATA value (%p) " + "used as datum!\n", datum); + return DAT_FALSE; + } + + DAPL_HASHLOOKUP (p_table, key, olddatum, found); + if (found) + { + /* + * key exists already + */ + if (report_dup) + { + *report_dup = DAT_TRUE; + } + + if (!allow_dup) + { + dapl_dbg_log ( + DAPL_DBG_TYPE_ERR, + "dapli_hash_add () called with duplicate key (" F64x ")\n", + key); + return DAT_FALSE; + } + } + + hashValue = DAPL_DOHASH (key, p_table->tbl_size); + key = save_key; + if (NO_DATUM (p_table->table[hashValue].datum)) + { + /* + * Empty head - just fill it in + */ + p_table->table[hashValue].key = key; + p_table->table[hashValue].datum = datum; + p_table->table[hashValue].next_element = 0; + p_table->num_entries++; + status = DAT_TRUE; + } + else + { + DAPL_HASH_ELEM *newelement = (DAPL_HASH_ELEM *) + dapl_os_alloc (sizeof (DAPL_HASH_ELEM)); + /* + * Add an element to the end of the chain + */ + if (newelement) + { + DAPL_HASH_ELEM *lastelement; + newelement->key = key; + newelement->datum = datum; + newelement->next_element = 0; + for (lastelement = &p_table->table[hashValue]; + lastelement->next_element; + lastelement = lastelement->next_element) + { + /* Walk to the end of the chain */ + chain_len++; + } + lastelement->next_element = newelement; + p_table->num_entries++; + status = DAT_TRUE; + } + else + { + /* allocation failed - should not happen */ + status = DAT_FALSE; + } + } + + /* + * Tally up our counters. chain_len is one less than current chain + * length. + */ + chain_len++; + p_table->hash_tbl_inserts++; + p_table->hash_tbl_total += p_table->num_entries; + p_table->hash_chn_total += chain_len; + if (p_table->num_entries > p_table->hash_tbl_max) + { + p_table->hash_tbl_max = p_table->num_entries; + } + if (chain_len > p_table->hash_chn_max) + { + p_table->hash_chn_max = chain_len; + } + + return status; +} + + +/* + * Remove element from hash bucket + * + * Inputs: + * element, key to be deleted + * Returns: + * DAT_TRUE on success + */ +static DAT_BOOLEAN +dapl_hash_delete_element (DAPL_HASH_ELEM * element, + DAPL_HASH_KEY key, + DAPL_HASH_DATA *p_datum) +{ + DAPL_HASH_ELEM *curelement; + DAPL_HASH_ELEM *lastelement; + + lastelement = NULL; + for (curelement = element; + curelement; + lastelement = curelement, curelement = curelement->next_element) + { + if (curelement->key == key) + { + if (p_datum) + { + *p_datum = curelement->datum; + } + if (lastelement) + { + /* + * curelement was malloc'd; free it + */ + lastelement->next_element = curelement->next_element; + dapl_os_free ((void *) curelement, sizeof (DAPL_HASH_ELEM)); + } + else + { + /* + * curelement is static list head + */ + DAPL_HASH_ELEM *n = curelement->next_element; + if (n) + { + /* + * If there is a next element, copy its contents into the + * head and free the original next element. + */ + curelement->key = n->key; + curelement->datum = n->datum; + curelement->next_element = n->next_element; + dapl_os_free ((void *) n, sizeof (DAPL_HASH_ELEM)); + } + else + { + curelement->datum = NO_DATUM_VALUE; + } + } + break; + } + } + + return ( curelement != NULL ? DAT_TRUE : DAT_FALSE ); +} + + +/********************************************************************* + * * + * External Functions * + * * + *********************************************************************/ + + +/* + * Create a new hash table with at least 'table_size' hash buckets. + */ +DAT_RETURN +dapls_hash_create ( + IN DAT_COUNT table_size, + OUT DAPL_HASH_TABLE **pp_table) +{ + DAPL_HASH_TABLE *p_table; + DAT_COUNT table_length = table_size * sizeof (DAPL_HASH_ELEM); + DAT_RETURN dat_status; + DAT_COUNT i; + + dapl_os_assert (pp_table); + dat_status = DAT_SUCCESS; + + /* Allocate hash table */ + p_table = dapl_os_alloc (sizeof (DAPL_HASH_TABLE)); + if (NULL == p_table) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + /* Init hash table, allocate and init and buckets */ + dapl_os_memzero (p_table, sizeof (DAPL_HASH_TABLE)); + p_table->tbl_size = table_size; + p_table->table = (DAPL_HASH_ELEM *) dapl_os_alloc (table_length); + if (NULL == p_table->table) + { + dapl_os_free (p_table, sizeof (DAPL_HASH_TABLE)); + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dapl_os_lock_init (&p_table->lock); + for (i = 0; i < table_size; i++) + { + p_table->table[i].datum = NO_DATUM_VALUE; + p_table->table[i].key = 0; + p_table->table[i].next_element = 0; + } + + *pp_table = p_table; + + bail: + return DAT_SUCCESS; +} + + +/* + * Destroy a hash table + */ +DAT_RETURN +dapls_hash_free ( + IN DAPL_HASH_TABLE *p_table) +{ + dapl_os_assert (p_table && p_table->table); + + dapl_os_lock_destroy (&p_table->lock); + dapl_os_free (p_table->table, sizeof (DAPL_HASH_ELEM) * p_table->tbl_size); + dapl_os_free (p_table, sizeof (DAPL_HASH_TABLE)); + + return DAT_SUCCESS; +} + + +/* + * Returns the number of elements stored in the table + */ + +DAT_RETURN +dapls_hash_size ( + IN DAPL_HASH_TABLE *p_table, + OUT DAT_COUNT *p_size) +{ + dapl_os_assert (p_table && p_size); + + *p_size = p_table->num_entries; + + return DAT_SUCCESS; +} + + +/* + * Inserts the specified data into the table with the given key. + * Duplicates are not expected, and return in error, having done nothing. + */ + +DAT_RETURN +dapls_hash_insert ( + IN DAPL_HASH_TABLE *p_table, + IN DAPL_HASH_KEY key, + IN DAPL_HASH_DATA data) +{ + DAT_RETURN dat_status; + + dapl_os_assert (p_table); + dat_status = DAT_SUCCESS; + + dapl_os_lock (&p_table->lock); + if (!dapli_hash_add (p_table, key, data, DAT_FALSE, NULL)) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + } + dapl_os_unlock (&p_table->lock); + + return dat_status; +} + + +/* + * Searches for the given key. If found, + * DAT_SUCCESS is returned and the associated + * data is returned in the DAPL_HASH_DATA + * pointer if that pointer is not NULL. + */ +DAT_RETURN +dapls_hash_search ( + IN DAPL_HASH_TABLE *p_table, + IN DAPL_HASH_KEY key, + OUT DAPL_HASH_DATA *p_data) +{ + DAT_RETURN dat_status; + void *olddatum = NULL; + DAPL_HASH_ELEM *found; + + dapl_os_assert (p_table); + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,0); + + dapl_os_lock (&p_table->lock); + DAPL_HASHLOOKUP (p_table, key, olddatum, found); + dapl_os_unlock (&p_table->lock); + + if (found) + { + if (p_data) + { + *p_data = olddatum; + } + dat_status = DAT_SUCCESS; + } + + return dat_status; +} + + +DAT_RETURN +dapls_hash_remove ( + IN DAPL_HASH_TABLE *p_table, + IN DAPL_HASH_KEY key, + OUT DAPL_HASH_DATA *p_data) +{ + DAT_RETURN dat_status; + DAPL_HASH_KEY hashValue, save_key = key; + + dapl_os_assert (p_table); + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,0); + + if (p_table->num_entries == 0) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dapls_hash_remove () called on empty hash table!\n"); + return dat_status; + } + + hashValue = DAPL_DOHASH (key, p_table->tbl_size); + key = save_key; + dapl_os_lock (&p_table->lock); + if (dapl_hash_delete_element (&p_table->table[hashValue], key, p_data)) + { + p_table->num_entries--; + dat_status = DAT_SUCCESS; + } + dapl_os_unlock (&p_table->lock); + + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_hash.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_hash.h new file mode 100644 index 00000000..0c5c15b4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_hash.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_hash.h + * + * PURPOSE: Utility defs & routines for the hash data structure + * + * $Id$ + **********************************************************************/ + +#ifndef _DAPL_HASH_H_ +#define _DAPL_HASH_H_ + +#include "dapl.h" + + +/********************************************************************* + * * + * Defines * + * * + *********************************************************************/ + +/* + * Hash table size. + * + * Default is small; use the larger sample values for hash tables + * known to be heavily used. The sample values chosen are the + * largest primes below 2^8, 2^9, and 2^10. + */ +#define DAPL_DEF_HASHSIZE 251 +#define DAPL_MED_HASHSIZE 509 +#define DAPL_LRG_HASHSIZE 1021 + +#define DAPL_HASH_TABLE_DEFAULT_CAPACITY DAPL_DEF_HASHSIZE + +/* The hash function */ +#define DAPL_DOHASH(key,hashsize) ((uint64_t)((key) % (hashsize))) + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +extern DAT_RETURN +dapls_hash_create( + IN DAT_COUNT capacity, + OUT DAPL_HASH_TABLE **pp_table); + +extern DAT_RETURN +dapls_hash_free( + IN DAPL_HASH_TABLE *p_table); + +extern DAT_RETURN +dapls_hash_size( + IN DAPL_HASH_TABLE *p_table, + OUT DAT_COUNT *p_size); + +extern DAT_RETURN +dapls_hash_insert( + IN DAPL_HASH_TABLE *p_table, + IN DAPL_HASH_KEY key, + IN DAPL_HASH_DATA data); + +extern DAT_RETURN +dapls_hash_search( + IN DAPL_HASH_TABLE *p_table, + IN DAPL_HASH_KEY key, + OUT DAPL_HASH_DATA *p_data); + +extern DAT_RETURN +dapls_hash_remove( + IN DAPL_HASH_TABLE *p_table, + IN DAPL_HASH_KEY key, + OUT DAPL_HASH_DATA *p_data); + + +#endif /* _DAPL_HASH_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_hca_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_hca_util.c new file mode 100644 index 00000000..87204727 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_hca_util.c @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_hca_util.c + * + * PURPOSE: Manage HCA structure + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" +#include "dapl_provider.h" +#include "dapl_hca_util.h" +#include "dapl_hash.h" + + +/* + * dapl_hca_alloc + * + * alloc and initialize an HCA struct + * + * Input: + * name + * port + * + * Output: + * hca_ptr + * + * Returns: + * none + * + */ +DAPL_HCA * +dapl_hca_alloc ( + char *name, + char *port ) +{ + DAPL_HCA *hca_ptr; + + hca_ptr = dapl_os_alloc (sizeof (DAPL_HCA)); + if ( NULL != hca_ptr ) + { + dapl_os_memzero (hca_ptr, sizeof (DAPL_HCA)); + + if ( DAT_SUCCESS == dapls_hash_create ( + DAPL_HASH_TABLE_DEFAULT_CAPACITY, &hca_ptr->lmr_hash_table) ) + { + dapl_os_lock_init(&hca_ptr->lock); + dapl_llist_init_head(&hca_ptr->ia_list_head); + + hca_ptr->name = dapl_ib_convert_name(name); + hca_ptr->ib_hca_handle = IB_INVALID_HANDLE; + hca_ptr->port_num = (ib_hca_port_t)dapl_os_strtol(port, NULL, 0); + hca_ptr->null_ib_cq_handle = IB_INVALID_HANDLE; + } + else + { + dapl_os_free (hca_ptr, sizeof (DAPL_HCA)); + hca_ptr = NULL; + } + } + + return (hca_ptr); +} + +/* + * dapl_hca_free + * + * free an IA INFO struct + * + * Input: + * hca_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_hca_free ( DAPL_HCA *hca_ptr ) +{ + (void) dapls_hash_free ( hca_ptr->lmr_hash_table ); + dapl_os_free (hca_ptr, sizeof (DAPL_HCA)); +} + +/* + * dapl_hca_link_ia + * + * Add an ia to the HCA structure + * + * Input: + * hca_ptr + * ia_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_hca_link_ia ( + IN DAPL_HCA *hca_ptr, + IN DAPL_IA *ia_ptr) +{ + dapl_os_lock (&hca_ptr->lock); + dapl_llist_add_head (&hca_ptr->ia_list_head, + &ia_ptr->hca_ia_list_entry, + ia_ptr); + dapl_os_unlock (&hca_ptr->lock); +} + +/* + * dapl_hca_unlink_ia + * + * Remove an ia from the hca info structure + * + * Input: + * hca_ptr + * ia_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_hca_unlink_ia ( + IN DAPL_HCA *hca_ptr, + IN DAPL_IA *ia_ptr) +{ + dapl_os_lock (&hca_ptr->lock); + /* + * If an error occurred when we were opening the IA it + * will not be linked on the list; don't unlink an unlinked + * list! + */ + if ( ! dapl_llist_is_empty (&hca_ptr->ia_list_head) ) + { + dapl_llist_remove_entry (&hca_ptr->ia_list_head, + &ia_ptr->hca_ia_list_entry); + } + dapl_os_unlock (&hca_ptr->lock); +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_hca_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_hca_util.h new file mode 100644 index 00000000..f722e70d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_hca_util.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_hca_util.h + * + * PURPOSE: Utility defs & routines for the HCA data structure + * + * $Id$ + **********************************************************************/ + +#ifndef _DAPL_HCA_UTIL_H_ +#define _DAPL_HCA_UTIL_H_ + +#include "dapl.h" + +DAPL_HCA * +dapl_hca_alloc ( char *name, + char *port ) ; + +void +dapl_hca_free ( DAPL_HCA *hca_ptr ) ; + +void +dapl_hca_link_ia ( + IN DAPL_HCA *hca_ptr, + IN DAPL_IA *ia_info ) ; + +void +dapl_hca_unlink_ia ( + IN DAPL_HCA *hca_ptr, + IN DAPL_IA *ia_info ) ; + + +#endif diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_close.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_close.c new file mode 100644 index 00000000..e34fb2fa --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_close.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ia_close.c + * + * PURPOSE: Interface Adapter management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 2 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ia_util.h" + +/* + * dapl_ia_close + * + * DAPL Requirements Version xxx, 6.2.1.2 + * + * Close a provider, clean up resources, etc. + * + * Input: + * ia_handle + * + * Output: + * none + * + * Return Values: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_ia_close ( + IN DAT_IA_HANDLE ia_handle, + IN DAT_CLOSE_FLAGS ia_flags) +{ + DAPL_IA *ia_ptr; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ia_close (%p, %d)\n", + ia_handle, + ia_flags); + + ia_ptr = (DAPL_IA *)ia_handle; + + if (DAPL_BAD_HANDLE (ia_ptr, DAPL_MAGIC_IA)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_IA); + goto bail; + } + + if ( DAT_CLOSE_ABRUPT_FLAG == ia_flags ) + { + dat_status = dapl_ia_abrupt_close (ia_ptr); + } + else if ( DAT_CLOSE_GRACEFUL_FLAG == ia_flags ) + { + dat_status = dapl_ia_graceful_close (ia_ptr); + } + else + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + } + + bail: + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_open.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_open.c new file mode 100644 index 00000000..283cfdbc --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_open.c @@ -0,0 +1,529 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ia_open.c + * + * PURPOSE: Interface Adapter management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 2 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_provider.h" +#include "dapl_evd_util.h" +#include "dapl_hca_util.h" +#include "dapl_ia_util.h" +#include "dapl_adapter_util.h" + +#ifndef NO_NAME_SERVICE +#include "dapl_name_service.h" +#endif + +/* + * LOCAL PROTOTYPES + */ +#ifdef IBHOSTS_NAMING +void dapli_assign_hca_ip_address( + DAPL_HCA *hca_ptr, + char *device_name); +#endif +char *dapli_get_adapter_num ( + char *device_name); + +void dapli_setup_dummy_addr( + IN DAPL_HCA *hca_ptr, + IN char *hca_name); + +void dapli_hca_cleanup ( + DAPL_HCA *hca_ptr, + DAT_BOOLEAN dec_ref ); + +int32_t dapl_ib_init_complete = FALSE; + +/* + * dapl_ia_open + * + * DAPL Requirements Version xxx, 6.2.1.1 + * + * Open a provider and return a handle. The handle enables the user + * to invoke operations on this provider. + * + * The dat_ia_open call is actually part of the DAT registration module. + * That function maps the DAT_NAME parameter of dat_ia_open to a DAT_PROVIDER, + * and calls this function. + * + * Input: + * provider + * async_evd_qlen + * async_evd_handle_ptr + * + * Output: + * async_evd_handle + * ia_handle + * + * Return Values: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_INVALID_HANDLE + * DAT_PROVIDER_NOT_FOUND (returned by dat registry if necessary) + */ +DAT_RETURN +dapl_ia_open ( + IN const DAT_NAME_PTR name, + IN DAT_COUNT async_evd_qlen, + INOUT DAT_EVD_HANDLE *async_evd_handle_ptr, + OUT DAT_IA_HANDLE *ia_handle_ptr) +{ + DAT_RETURN dat_status; + DAT_PROVIDER *provider; + DAPL_HCA *hca_ptr; + DAPL_IA *ia_ptr; + DAPL_EVD *evd_ptr; + + dat_status = DAT_SUCCESS; + hca_ptr = NULL; + ia_ptr = NULL; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ia_open (%s, %d, %p, %p)\n", + name, + async_evd_qlen, + async_evd_handle_ptr, + ia_handle_ptr); + + /* Initialize IB verbs library and provider list */ + if ( !dapl_ib_init_complete ) + { + dapls_ib_init (); + dapl_ib_init_complete = TRUE; + + /* initialize the provider list */ + dat_status = dapl_provider_list_create(); + if (DAT_SUCCESS != dat_status) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dapl_provider_list_create failed %d\n", dat_status); + goto bail; + } + } + + dat_status = dapl_provider_list_search (name, &provider); + if (dat_status != DAT_SUCCESS) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); + goto bail; + } + + /* ia_handle_ptr and async_evd_handle_ptr cannot be NULL */ + if (ia_handle_ptr == NULL) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); + goto bail; + } + if (async_evd_handle_ptr == NULL) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + + /* initialize the caller's OUT param */ + *ia_handle_ptr = DAT_HANDLE_NULL; + + /* get the hca_ptr */ + hca_ptr = (DAPL_HCA *)provider->extension; + + /* + * Open the HCA if it has not been done before. + */ + dapl_os_lock (&hca_ptr->lock); + if (hca_ptr->ib_hca_handle == IB_INVALID_HANDLE ) + { + /* register with the HW */ + dat_status = dapls_ib_open_hca (hca_ptr->name, + &hca_ptr->ib_hca_handle); + + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, + "dapls_ib_open_hca failed %d\n", dat_status); + dapl_os_unlock (&hca_ptr->lock); + goto bail; + } + + /* create a cq domain for this HCA */ + dat_status = dapls_ib_cqd_create (hca_ptr); + + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, + "ERR: Cannot allocate CQD: err 0x%x\n", + dat_status); + dapli_hca_cleanup (hca_ptr, DAT_FALSE); + dapl_os_unlock (&hca_ptr->lock); + goto bail; + } + /* + * Obtain the IP address associated with this name and HCA. + */ +#ifdef IBHOSTS_NAMING + dapli_assign_hca_ip_address (hca_ptr, name); +#endif + /* + * Obtain IA attributes from the HCA to limit certain operations. + * If using DAPL_ATS naming, ib_query_hca will also set the ip + * address -- FIXME: will revisit this when add DAPL_ATS flag + */ + dat_status = dapls_ib_query_hca (hca_ptr, + &hca_ptr->ia_attr, + NULL, + &hca_ptr->hca_address); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, + "dapls_ib_query_hca failed 0x%x\n", dat_status); + dapli_hca_cleanup (hca_ptr, DAT_FALSE); + dapl_os_unlock (&hca_ptr->lock); + goto bail; + } + + } + + /* Take a reference on the hca_handle */ + dapl_os_atomic_inc (& hca_ptr->handle_ref_count ); + dapl_os_unlock (&hca_ptr->lock); + + /* Allocate and initialize ia structure */ + ia_ptr = dapl_ia_alloc (provider, hca_ptr); + if (!ia_ptr) + { + dapl_os_lock (&hca_ptr->lock); + dapli_hca_cleanup (hca_ptr, DAT_TRUE); + dapl_os_unlock (&hca_ptr->lock); + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + /* we need an async EVD for this IA + * use the one passed in (if non-NULL) or create one + */ + + evd_ptr = (DAPL_EVD *) *async_evd_handle_ptr; + if (evd_ptr) + { + if (evd_ptr != DAT_EVD_ASYNC_EXISTS) + { + if (DAPL_BAD_HANDLE (evd_ptr, DAPL_MAGIC_EVD) || + ! (evd_ptr->evd_flags & DAT_EVD_ASYNC_FLAG)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_ASYNC); + goto bail; + } + + /* InfiniBand allows only 1 asychronous event handler per HCA */ + /* (see InfiniBand Spec, release 1.1, vol I, section 11.5.2, */ + /* page 559). */ + /* */ + /* We need only make sure that this EVD's CQ belongs to */ + /* the same HCA as is being opened. */ + + if ( evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle != + hca_ptr->ib_hca_handle ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EVD_ASYNC); + goto bail; + } + + ia_ptr->cleanup_async_error_evd = DAT_FALSE; + ia_ptr->async_error_evd = evd_ptr; + } + } + else + { + /* + * Verify we have > 0 length, and let the provider check the size + */ + if (async_evd_qlen <= 0) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + goto bail; + } + dat_status = dapls_evd_internal_create (ia_ptr, + NULL, /* CNO ptr */ + async_evd_qlen, + DAT_EVD_ASYNC_FLAG, + &evd_ptr); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + /* Reference to the EVD from the IA/async event stream. */ + dapl_os_atomic_inc (& evd_ptr->evd_ref_count); + + dapl_os_lock ( &hca_ptr->lock ); + if ( hca_ptr->async_evd != (DAPL_EVD *) 0 ) + { + /* + * The async EVD for this HCA has already been assigned. + * It's an error to try and assign another one. + */ + + dapl_os_atomic_dec ( &evd_ptr->evd_ref_count ); + dapl_evd_free ( evd_ptr ); + /* + dapl_os_unlock ( &hca_ptr->lock ); + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); + goto bail; + */ + ia_ptr->cleanup_async_error_evd = DAT_FALSE; + ia_ptr->async_error_evd = hca_ptr->async_evd; + dapl_os_unlock ( &hca_ptr->lock ); + + dat_status = DAT_SUCCESS; + *async_evd_handle_ptr = hca_ptr->async_evd; + } + else + { + hca_ptr->async_evd = evd_ptr; + dapl_os_unlock ( &hca_ptr->lock ); + + ia_ptr->cleanup_async_error_evd = DAT_TRUE; + ia_ptr->async_error_evd = evd_ptr; + + /* Register the handlers associated with the async EVD. */ + dat_status = dapls_ia_setup_callbacks ( ia_ptr, evd_ptr ); + if ( dat_status != DAT_SUCCESS ) + { + goto bail; + } + *async_evd_handle_ptr = evd_ptr; + } + } + + *ia_handle_ptr = ia_ptr; + +bail: + if (dat_status != DAT_SUCCESS) + { + if (ia_ptr) + { + dapl_ia_close (ia_ptr, DAT_CLOSE_ABRUPT_FLAG); + } + } + + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_ia_open() returns 0x%x\n", dat_status); + + return (dat_status); +} + +/* + * dapli_hca_cleanup + * + * Clean up partially allocated HCA stuff. Strictly to make cleanup + * simple. + */ +void +dapli_hca_cleanup ( + DAPL_HCA *hca_ptr, + DAT_BOOLEAN dec_ref ) +{ + dapls_ib_close_hca (hca_ptr->ib_hca_handle); + hca_ptr->ib_hca_handle = IB_INVALID_HANDLE; + if ( dec_ref == DAT_TRUE ) + { + dapl_os_atomic_dec (& hca_ptr->handle_ref_count ); + } +} + + +/* + * dapli_assign_hca_ip_address + * + * Obtain the IP address of the adapter. This is a simple + * scheme that creates a name that must appear available to + * DNS, e.g. it must be in the local site DNS or in the local + * /etc/hosts file, etc. + * + * -ib + * + * This scheme obviously doesn't work with adapters from + * multiple vendors, but will suffice in common installations. + * + * Input: + * hca_ptr Pointer to HCA structure + * device_name Name of device as reported by the provider + * + * Output: + * none + * + * Returns: + * char * to string number + */ + +#define NAMELEN 128 + +void +dapli_assign_hca_ip_address ( + DAPL_HCA *hca_ptr, + char *device_name) +{ + char *adapter_num; +#ifdef IBHOSTS_NAMING + struct addrinfo *addr; +#endif + char hostname[NAMELEN]; + char *str; + int rc; + + rc = gethostname (hostname, NAMELEN); + + /* + * Strip off domain info if it exists (e.g. mynode.mydomain.com) + */ + for (str = hostname; *str && *str != '.'; ) + { + str++; + } + if ( *str == '.' ) + { + *str = '\0'; + } + strcat (hostname, "_ib"); + adapter_num = dapli_get_adapter_num (device_name); + strcat (hostname, adapter_num); + +#ifdef IBHOSTS_NAMING + rc = dapls_osd_getaddrinfo (hostname, &addr); + if(!rc) + { + hca_ptr->hca_address = *((DAT_SOCK_ADDR6 *)addr->ai_addr); + return; + } + dapls_osd_freeaddrinfo(addr); +#endif /* IBHOSTS_NAMING */ + /* Not registered in DNS, provide a dummy value */ + dapli_setup_dummy_addr(hca_ptr, hostname); +#if 0 + dapl_os_memzero (&hca_ptr->hca_address, sizeof (DAT_SOCK_ADDR6)); + + hca_ptr->hca_address.sin6_family = AF_INET6; + + dapl_os_memcpy (&hca_ptr->hca_address.sin6_addr.s6_addr[12], + &((struct sockaddr_in *)addr->ai_addr)->sin_addr.s_addr, + 4); +#endif +} + + +/* + * dapli_stup_dummy_addr + * + * Set up a dummy local address for the HCA. Things are not going + * to work too well if this happens. + * We call this routine if: + * - remote host adapter name is not in DNS + * - IPoIB implementation is not correctly set up + * - Similar nonsense. + * + * Input: + * hca_ptr + * rhost_name Name of remote adapter + * + * Output: + * none + * + * Returns: + * none + */ +void +dapli_setup_dummy_addr ( + IN DAPL_HCA *hca_ptr, + IN char *rhost_name ) +{ + struct sockaddr_in *si; + + /* Not registered in DNS, provide a dummy value */ + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "WARNING: <%s> not registered in DNS, using dummy IP value\n", + rhost_name); + si = (struct sockaddr_in *)&hca_ptr->hca_address; + hca_ptr->hca_address.sin6_family = AF_INET; + si->sin_addr.s_addr = 0x01020304; +} + + +/* + * dapls_get_adapter_num + * + * Given a device name, return a string of the device number + * + * Input: + * device_name Name of device as reported by the provider + * + * Output: + * none + * + * Returns: + * char * to string number + */ +char * +dapli_get_adapter_num ( + char *device_name) +{ + static char *zero = "0"; + char *p; + + /* + * Optimisticaly simple algorithm: the device number appears at + * the end of the device name string. Device that do not end + * in a number are by default "0". + */ + + for (p = device_name; *p; p++) + { + if ( isdigit (*p) ) + { + return p; + } + } + + return zero; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_query.c new file mode 100644 index 00000000..1527d1ba --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_query.c @@ -0,0 +1,222 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ia_query.c + * + * PURPOSE: Interface Adapter management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 2 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" +#include "dapl_vendor.h" + +/* + * dapl_ia_query + * + * DAPL Requirements Version xxx, 6.2.1.3 + * + * Provide the consumer with Interface Adapter and Provider parameters. + * + * Input: + * ia_handle + * ia_mask + * provider_mask + * + * Output: + * async_evd_handle + * ia_parameters + * provider_parameters + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_ia_query ( + IN DAT_IA_HANDLE ia_handle, + OUT DAT_EVD_HANDLE *async_evd_handle, + IN DAT_IA_ATTR_MASK ia_attr_mask, + OUT DAT_IA_ATTR *ia_attr, + IN DAT_PROVIDER_ATTR_MASK provider_attr_mask, + OUT DAT_PROVIDER_ATTR *provider_attr ) +{ + DAPL_IA *ia_ptr; + DAT_RETURN dat_status; + struct evd_merge_type { + DAT_BOOLEAN array[6][6]; + } *evd_merge; + DAT_BOOLEAN val; + int i; + int j; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_ia_query (%p, %p, 0x%x, %p, 0x%x, %p)\n", + ia_handle, + async_evd_handle, + ia_attr_mask, + ia_attr, + provider_attr_mask, + provider_attr); + + ia_ptr = (DAPL_IA *)ia_handle; + dat_status = DAT_SUCCESS; + + if (DAPL_BAD_HANDLE (ia_ptr, DAPL_MAGIC_IA)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); + goto bail; + } + + if ( NULL != async_evd_handle ) + { + *async_evd_handle = ia_ptr->async_error_evd; + } + + if ( ia_attr_mask & DAT_IA_ALL ) + { + if ( NULL == ia_attr ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); + goto bail; + } + + /* + * Obtain parameters from the HCA. Protect against multiple + * IAs beating on the HCA at the same time. + */ + dat_status = dapls_ib_query_hca (ia_ptr->hca_ptr, ia_attr, NULL, NULL); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + } + + if ( ia_attr_mask & ~DAT_IA_ALL ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + + if ( provider_attr_mask & DAT_PROVIDER_FIELD_ALL ) + { + if ( NULL == provider_attr ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG6); + goto bail; + } + + dapl_os_memcpy ( provider_attr->provider_name, + ia_ptr->header.provider->device_name, + min ((int)dapl_os_strlen (ia_ptr->header.provider->device_name),(int)DAT_NAME_MAX_LENGTH ) ); + provider_attr->provider_version_major = VN_PROVIDER_MAJOR; + provider_attr->provider_version_minor = VN_PROVIDER_MINOR; + provider_attr->dapl_version_major = DAT_VERSION_MAJOR; + provider_attr->dapl_version_minor = DAT_VERSION_MINOR; + provider_attr->lmr_mem_types_supported = + DAT_MEM_TYPE_VIRTUAL | DAT_MEM_TYPE_LMR; +#if VN_MEM_SHARED_VIRTUAL_SUPPORT > 0 + provider_attr->lmr_mem_types_supported |= DAT_MEM_TYPE_SHARED_VIRTUAL; +#endif + provider_attr->iov_ownership_on_return = DAT_IOV_CONSUMER; + provider_attr->dat_qos_supported = DAT_QOS_BEST_EFFORT; + provider_attr->completion_flags_supported = DAT_COMPLETION_DEFAULT_FLAG; + provider_attr->is_thread_safe = DAT_THREADSAFE; + /* + * N.B. The second part of the following equation will evaluate + * to 0 unless IBHOSTS_NAMING is enabled. + */ + provider_attr->max_private_data_size = + dapls_ib_private_data_size (NULL, DAPL_PDATA_CONN_REQ) - + (sizeof (DAPL_PRIVATE) - DAPL_MAX_PRIVATE_DATA_SIZE); + provider_attr->supports_multipath = DAT_FALSE; + provider_attr->ep_creator = DAT_PSP_CREATES_EP_NEVER; + provider_attr->optimal_buffer_alignment = DAT_OPTIMAL_ALIGNMENT; + provider_attr->num_provider_specific_attr = 0; + provider_attr->provider_specific_attr = NULL; + /* + * Set up evd_stream_merging_supported options. Note there is + * one bit per allowable combination, using the ordinal + * position of the DAT_EVD_FLAGS as positions in the + * array. e.g. + * [0][0] is DAT_EVD_SOFTWARE_FLAG | DAT_EVD_SOFTWARE_FLAG, + * [0][1] is DAT_EVD_SOFTWARE_FLAG | DAT_EVD_CR_FLAG, and + * [2][4] is DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG + * + * Most combinations are true, so initialize the array that way. + * Then finish by resetting the bad combinations. + * + * DAT_EVD_ASYNC_FLAG is not supported. InfiniBand only allows + * a single asynchronous event handle per HCA, and the first + * dat_ia_open forces the creation of the only one that can be + * used. We disallow the user from creating an ASYNC EVD here. + */ + + evd_merge = (struct evd_merge_type *)&provider_attr->evd_stream_merging_supported[0][0]; + val = DAT_TRUE; + for ( i = 0; i < 6; i++) + { + if (i > 4) + { + /* ASYNC EVD is 5, so entire row will be 0 */ + val = DAT_FALSE; + } + for ( j = 0; j < 5; j++) + { + evd_merge->array[i][j] = val; + } + /* Set the ASYNC_EVD column to FALSE */ + evd_merge->array[i][5] = DAT_FALSE; + } + +#ifndef DAPL_MERGE_CM_DTO + /* DAT_EVD_DTO_FLAG | DAT_EVD_CONNECTION_FLAG */ + evd_merge->array[2][3] = DAT_FALSE; + /* DAT_EVD_CONNECTION_FLAG | DAT_EVD_DTO_FLAG */ + evd_merge->array[3][2] = DAT_FALSE; +#endif + } + +bail: + dapl_dbg_log (DAPL_DBG_TYPE_RTN, + "dapl_ia_query () returns 0x%x\n", + dat_status); + + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_util.c new file mode 100644 index 00000000..6a970db6 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_util.c @@ -0,0 +1,1244 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ia_util.c + * + * PURPOSE: Manage IA Info structure + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_hca_util.h" +#include "dapl_ia_util.h" +#include "dapl_sp_util.h" +#include "dapl_evd_util.h" +#include "dapl_cno_util.h" +#include "dapl_cr_util.h" +#include "dapl_adapter_util.h" +#ifndef NO_NAME_SERVICE +#include "dapl_name_service.h" +#endif + +/* Internal prototype */ +void dapli_ia_release_hca ( + DAPL_HCA *hca_ptr ); + + +/* + * dapl_ia_alloc + * + * alloc and initialize an IA INFO struct + * + * Input: + * none + * + * Output: + * ia_ptr + * + * Returns: + * none + * + */ +DAPL_IA * +dapl_ia_alloc ( DAT_PROVIDER * provider, DAPL_HCA * hca_ptr ) +{ + DAPL_IA * ia_ptr; + + /* Allocate IA */ + ia_ptr = (DAPL_IA *) dapl_os_alloc (sizeof (DAPL_IA)); + if (ia_ptr == NULL) + { + return (NULL); + } + + /* zero the structure */ + dapl_os_memzero (ia_ptr, sizeof (DAPL_IA)); + + /* + * initialize the header + */ + ia_ptr->header.provider = provider; + ia_ptr->header.magic = DAPL_MAGIC_IA; + ia_ptr->header.handle_type = DAT_HANDLE_TYPE_IA; + ia_ptr->header.owner_ia = ia_ptr; + ia_ptr->header.user_context.as_64 = 0; + ia_ptr->header.user_context.as_ptr = NULL; + dapl_llist_init_entry (&ia_ptr->header.ia_list_entry); + dapl_os_lock_init (&ia_ptr->header.lock); + + /* + * initialize the body + */ + ia_ptr->hca_ptr = hca_ptr; + ia_ptr->async_error_evd = NULL; + ia_ptr->cleanup_async_error_evd = DAT_FALSE; + dapl_llist_init_entry (&ia_ptr->hca_ia_list_entry); + dapl_llist_init_head (&ia_ptr->ep_list_head); + dapl_llist_init_head (&ia_ptr->lmr_list_head); + dapl_llist_init_head (&ia_ptr->rmr_list_head); + dapl_llist_init_head (&ia_ptr->pz_list_head); + dapl_llist_init_head (&ia_ptr->evd_list_head); + dapl_llist_init_head (&ia_ptr->cno_list_head); + dapl_llist_init_head (&ia_ptr->rsp_list_head); + dapl_llist_init_head (&ia_ptr->psp_list_head); + + dapl_hca_link_ia (hca_ptr, ia_ptr); + + return (ia_ptr); +} + + +/* + * dapl_ia_abrupt_close + * + * Performs an abrupt close of the IA + * + * Input: + * ia_ptr + * + * Output: + * none + * + * Returns: + * status + * + */ + + +DAT_RETURN +dapl_ia_abrupt_close ( + IN DAPL_IA *ia_ptr ) +{ + DAT_RETURN dat_status; + DAPL_EP *ep_ptr, *next_ep_ptr; + DAPL_LMR *lmr_ptr, *next_lmr_ptr; + DAPL_RMR *rmr_ptr, *next_rmr_ptr; + DAPL_PZ *pz_ptr, *next_pz_ptr; + DAPL_EVD *evd_ptr, *next_evd_ptr; + DAPL_CNO *cno_ptr, *next_cno_ptr; + DAPL_SP *sp_ptr, *next_sp_ptr; /* for PSP and RSP queues */ + DAPL_CR *cr_ptr, *next_cr_ptr; + DAPL_HCA *hca_ptr; + + dapl_dbg_log (DAPL_DBG_TYPE_API | DAPL_DBG_TYPE_CM, + "dapl_ia_abrupt_close (%p)\n",ia_ptr); + + dat_status = DAT_SUCCESS; + + /* + * clear all the data structures associated with the IA. + * this must be done in order (rmr,rsp) before (ep lmr psp) before + * (pz evd) + * + * Note that in all the following we can leave the loop either + * when we run out of entries, or when we get back to the head + * if we end up skipping an entry. + */ + + rmr_ptr = (dapl_llist_is_empty (&ia_ptr->rmr_list_head) + ? NULL : dapl_llist_peek_head (&ia_ptr->rmr_list_head)); + while (rmr_ptr != NULL) + { + next_rmr_ptr = dapl_llist_next_entry (&ia_ptr->rmr_list_head, + &rmr_ptr->header.ia_list_entry); + dat_status = dapl_rmr_free (rmr_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): rmr_free(%p) returns %x\n", + rmr_ptr, + dat_status ); + } + rmr_ptr = next_rmr_ptr; + } + + sp_ptr = (dapl_llist_is_empty (&ia_ptr->rsp_list_head) + ? NULL : dapl_llist_peek_head (&ia_ptr->rsp_list_head)); + while (sp_ptr != NULL) + { + next_sp_ptr = dapl_llist_next_entry (&ia_ptr->rsp_list_head, + &sp_ptr->header.ia_list_entry); + dat_status = dapl_rsp_free (sp_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): rsp_free(%p) returns %x\n", + sp_ptr, + dat_status ); + } + sp_ptr = next_sp_ptr; + } + + ep_ptr = (dapl_llist_is_empty (&ia_ptr->ep_list_head) + ? NULL : dapl_llist_peek_head (&ia_ptr->ep_list_head)); + while (ep_ptr != NULL) + { + next_ep_ptr = dapl_llist_next_entry (&ia_ptr->ep_list_head, + &ep_ptr->header.ia_list_entry); + /* + * Issue a disconnect if the EP needs it + */ + if ( ep_ptr->param.ep_state == DAT_EP_STATE_CONNECTED || + ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED || + ep_ptr->param.ep_state == DAT_EP_STATE_ACTIVE_CONNECTION_PENDING || + ep_ptr->param.ep_state == DAT_EP_STATE_COMPLETION_PENDING || + ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECT_PENDING ) + { + dat_status = dapl_ep_disconnect (ep_ptr, DAT_CLOSE_ABRUPT_FLAG); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): ep_disconnect(%p) returns %x\n", + ep_ptr, + dat_status ); + } + } + /* force the EP into error state to force flush all posted DTOs. */ + { + DAT_EP_ATTR ep_attr; + DAT_NAMED_ATTR ep_state; + + dapl_os_memzero (&ep_attr, sizeof (DAT_EP_ATTR)); + ep_state.name = (char *)IB_QP_STATE; + ep_state.value = (char *)DAPL_QP_STATE_ERROR; + ep_attr.ep_provider_specific_count = 1; + ep_attr.ep_provider_specific = &ep_state; + + (void) dapls_ib_qp_modify (ia_ptr, + ep_ptr, + &ep_attr ); + } + + dat_status = dapl_ep_free (ep_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): ep_free(%p) returns %x\n", + ep_ptr, + dat_status ); + } + ep_ptr = next_ep_ptr; + } + + lmr_ptr = (dapl_llist_is_empty (&ia_ptr->lmr_list_head) + ? NULL : dapl_llist_peek_head (&ia_ptr->lmr_list_head)); + while (lmr_ptr != NULL) + { + next_lmr_ptr = dapl_llist_next_entry (&ia_ptr->lmr_list_head, + &lmr_ptr->header.ia_list_entry); + dat_status = dapl_lmr_free (lmr_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): lmr_free(%p) returns %x\n", + lmr_ptr, + dat_status ); + } + lmr_ptr = next_lmr_ptr; + } + + sp_ptr = (dapl_llist_is_empty (&ia_ptr->psp_list_head) + ? NULL : dapl_llist_peek_head (&ia_ptr->psp_list_head)); + while (sp_ptr != NULL) + { + /* + * Shut down the PSP so we get no further callbacks. There + * should be no competing threads after this. + */ + dat_status = dapls_ib_remove_conn_listener (ia_ptr, + sp_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): psp cannot remove listener, returns %x\n", + dat_status); + } + + next_sp_ptr = dapl_llist_next_entry (&ia_ptr->psp_list_head, + &sp_ptr->header.ia_list_entry); + + /* Remove CR's from this PSP and clean them up */ + cr_ptr = dapl_llist_is_empty (&sp_ptr->cr_list_head) ? NULL : + dapl_llist_peek_head (&sp_ptr->cr_list_head); + while (cr_ptr != NULL) + { + next_cr_ptr = dapl_llist_next_entry (&sp_ptr->cr_list_head, + &cr_ptr->header.ia_list_entry); + /* Remove the CR from the queue & cleanup*/ + dapl_os_lock (&sp_ptr->header.lock); + dapl_sp_remove_cr (sp_ptr, cr_ptr); + dapl_os_unlock (&sp_ptr->header.lock); + + dapls_cr_free (cr_ptr); + cr_ptr = next_cr_ptr; + } + + dat_status = dapl_psp_free (sp_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): psp_free(%p) returns %x\n", + sp_ptr, + dat_status ); + } + + sp_ptr = next_sp_ptr; + } + + pz_ptr = (dapl_llist_is_empty (&ia_ptr->pz_list_head) + ? NULL : dapl_llist_peek_head (&ia_ptr->pz_list_head)); + while (pz_ptr != NULL) + { + next_pz_ptr = dapl_llist_next_entry (&ia_ptr->pz_list_head, + &pz_ptr->header.ia_list_entry); + dat_status = dapl_pz_free (pz_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): pz_free(%p) returns %x\n", + pz_ptr, + dat_status ); + } + pz_ptr = next_pz_ptr; + } + + /* + * EVDs are tricky; we want to release all except for the async + * EVD. That EVD needs to stick around until after we close the + * HCA, to accept any async events that occur. So we cycle through + * the list with dapl_llist_next_entry instead of dapl_llist_is_empty. + */ + evd_ptr = (dapl_llist_is_empty (&ia_ptr->evd_list_head) + ? NULL : dapl_llist_peek_head (&ia_ptr->evd_list_head)); + while (evd_ptr != NULL) + { + next_evd_ptr = dapl_llist_next_entry (&ia_ptr->evd_list_head, + &evd_ptr->header.ia_list_entry); + if (evd_ptr == ia_ptr->async_error_evd) + { + /* Don't delete the EVD, but break any CNO connections. */ + dapl_evd_disable(evd_ptr); + dapl_evd_modify_cno(evd_ptr, DAT_HANDLE_NULL); + } + else + { + /* it isn't the async EVD; delete it. */ + dat_status = dapl_evd_free (evd_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): evd_free(%p) returns %x\n", + evd_ptr, + dat_status ); + } + } + evd_ptr = next_evd_ptr; + } + + cno_ptr = (dapl_llist_is_empty (&ia_ptr->cno_list_head) + ? NULL : dapl_llist_peek_head (&ia_ptr->cno_list_head)); + while (cno_ptr != NULL) + { + next_cno_ptr = dapl_llist_next_entry (&ia_ptr->cno_list_head, + &cno_ptr->header.ia_list_entry); + if (cno_ptr->cno_waiters > 0) + { + /* Notify the waiter the IA is going away: see uDAPL 1.1 spec, + * 6.3.2.3 + */ + dapl_cno_trigger (cno_ptr, NULL); + } + /* clean up the CNO */ + dat_status = dapl_cno_free (cno_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): cno_free(%p) returns %x\n", + cno_ptr, + dat_status ); + } + cno_ptr = next_cno_ptr; + } + + hca_ptr = ia_ptr->hca_ptr; + + /* + * Free the async EVD, shutting down callbacks from the HCA. + */ + if ( ia_ptr->async_error_evd && + (DAT_TRUE == ia_ptr->cleanup_async_error_evd) ) + { + dat_status = dapls_ia_teardown_callbacks ( ia_ptr ); + + hca_ptr->async_evd = NULL; /* It was our async EVD; nuke it. */ + + dapl_os_atomic_dec (& ia_ptr->async_error_evd->evd_ref_count); + dat_status = dapl_evd_free (ia_ptr->async_error_evd); + + if (DAT_SUCCESS != dat_status) + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "ia_close(ABRUPT): evd_free(%p) returns %x\n", + ia_ptr->async_error_evd, + dat_status ); + } + + ia_ptr->async_error_evd = NULL; + } + + /* + * Release our reference on the hca_handle. If we are the last + * one, close it + */ + dapli_ia_release_hca (hca_ptr); + + dapls_ia_free (ia_ptr); + + return DAT_SUCCESS; /* Abrupt close can't fail. */ +} + + +/* + * dapl_ia_graceful_close + * + * Performs an graceful close of the IA + * + * Input: + * ia_ptr + * + * Output: + * none + * + * Returns: + * status + * + */ + +DAT_RETURN +dapl_ia_graceful_close ( + IN DAPL_IA *ia_ptr ) +{ + DAT_RETURN dat_status; + DAT_RETURN cur_dat_status; + DAPL_EVD *evd_ptr; + DAPL_LLIST_ENTRY *entry; + DAPL_HCA *hca_ptr; + + dat_status = DAT_SUCCESS; + + if ( !dapl_llist_is_empty (&ia_ptr->rmr_list_head) || + !dapl_llist_is_empty (&ia_ptr->rsp_list_head) || + !dapl_llist_is_empty (&ia_ptr->ep_list_head) || + !dapl_llist_is_empty (&ia_ptr->lmr_list_head) || + !dapl_llist_is_empty (&ia_ptr->psp_list_head) || + !dapl_llist_is_empty (&ia_ptr->pz_list_head) ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_IA_IN_USE); + goto bail; + } + + /* if the async evd does not need to be cleaned up */ + /* (ie. it was not created by dapl_ia_open) */ + /* then the evd list should be empty */ + if ( DAT_FALSE == ia_ptr->cleanup_async_error_evd ) + { + if ( !dapl_llist_is_empty (&ia_ptr->evd_list_head) ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_IA_IN_USE); + goto bail; + } + } + /* else the async evd should be the only evd in */ + /* the list. */ + else + { + evd_ptr = (dapl_llist_is_empty (&ia_ptr->evd_list_head) + ? NULL : dapl_llist_peek_head (&ia_ptr->evd_list_head)); + + if ( evd_ptr != NULL && + ! (evd_ptr->evd_flags & DAT_EVD_ASYNC_FLAG) ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_IA_IN_USE); + goto bail; + } + + entry = ia_ptr->evd_list_head; + + /* if the async evd is not the only element in the list */ + if ( entry->blink != entry->flink ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_IA_IN_USE); + goto bail; + } + + /* + * If the async evd has a non-unary ref count (i.e. it's in + * use by someone besides us. + */ + if ( evd_ptr->evd_ref_count != 1 ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_IA_IN_USE); + goto bail; + } + } + + /* + * We've validated the call; now we can start the teardown. + * Because we're in the IA close routine, we're safe from races with DAPL + * consumers on this IA (operate/destroy races are disallowed in + * DAPL). + */ + hca_ptr = ia_ptr->hca_ptr; + + /* Tear down the async EVD if needed, first shutting down callbacks. */ + if ( ia_ptr->async_error_evd && + (DAT_TRUE == ia_ptr->cleanup_async_error_evd) ) + { + cur_dat_status = dapls_ia_teardown_callbacks ( ia_ptr ); + if ( DAT_SUCCESS != cur_dat_status ) + { + dat_status = cur_dat_status; + } + hca_ptr->async_evd = NULL; + dapl_os_atomic_dec (& ia_ptr->async_error_evd->evd_ref_count); + cur_dat_status = dapl_evd_free (ia_ptr->async_error_evd); + if ( DAT_SUCCESS != cur_dat_status ) + { + dat_status = cur_dat_status; + } + + ia_ptr->async_error_evd = NULL; + } + + dapli_ia_release_hca (hca_ptr); + + dapls_ia_free (ia_ptr); + +bail: + return dat_status; +} + +/* + * Release a reference on the HCA handle. If it is 0, close the + * handle. Manipulate under lock to prevent races with threads trying to + * open the HCA. + */ +void +dapli_ia_release_hca ( + DAPL_HCA *hca_ptr ) +{ + DAT_RETURN dat_status; + + dapl_os_lock (&hca_ptr->lock); + dapl_os_atomic_dec (& hca_ptr->handle_ref_count ); + if ( hca_ptr->handle_ref_count == 0 ) + { + +#ifndef NO_NAME_SERVICE + /* + * Remove an record {ServiceID, IP-address} + */ + dat_status = dapls_ns_remove_gid_map (hca_ptr); + + if (DAT_SUCCESS != dat_status) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dapls_ns_revmove_gid_map failed %d\n", dat_status); + } + +#endif /* NO_NAME_SERVICE */ + + /* + * Get rid of the cqd associated with the hca. + * Print out instead of status return as this routine + * shouldn't fail. + */ + dat_status = dapls_ib_cqd_destroy (hca_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "ERR: Cannot free CQD: err %x\n", dat_status); + } + + dat_status = dapls_ib_close_hca (hca_ptr->ib_hca_handle); + if (dat_status != DAT_SUCCESS) + { + /* this is not good - I mean, this is bad! */ + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "<1> ERROR: hca_close failed %d\n", dat_status); + } + hca_ptr->ib_hca_handle = IB_INVALID_HANDLE; + } + dapl_os_unlock (&hca_ptr->lock); +} + + +/* + * dapls_ia_free + * + * free an IA INFO struct + * + * Input: + * ia_ptr + * + * Output: + * one + * + * Returns: + * none + * + */ +void +dapls_ia_free ( DAPL_IA *ia_ptr ) +{ + dapl_os_assert (ia_ptr->header.magic == DAPL_MAGIC_IA); + + dapl_os_assert (ia_ptr->async_error_evd == NULL); + dapl_os_assert (dapl_llist_is_empty (&ia_ptr->lmr_list_head)); + dapl_os_assert (dapl_llist_is_empty (&ia_ptr->rmr_list_head)); + dapl_os_assert (dapl_llist_is_empty (&ia_ptr->ep_list_head)); + dapl_os_assert (dapl_llist_is_empty (&ia_ptr->evd_list_head)); + dapl_os_assert (dapl_llist_is_empty (&ia_ptr->cno_list_head)); + dapl_os_assert (dapl_llist_is_empty (&ia_ptr->psp_list_head)); + dapl_os_assert (dapl_llist_is_empty (&ia_ptr->rsp_list_head)); + + /* + * deinitialize the header + */ + dapl_hca_unlink_ia (ia_ptr->hca_ptr, ia_ptr); + ia_ptr->header.magic = DAPL_MAGIC_INVALID; /* reset magic to prevent reuse */ + dapl_os_lock_destroy (&ia_ptr->header.lock); + + dapl_os_free (ia_ptr, sizeof (DAPL_IA)); +} + +/* + * dapl_ia_link_ep + * + * Add an ep to the IA structure + * + * Input: + * ia_ptr + * ep_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_link_ep ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_add_head (&ia_ptr->ep_list_head, + &ep_ptr->header.ia_list_entry, + ep_ptr); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_unlink_ep + * + * Remove an ep from the ia info structure + * + * Input: + * ia_ptr + * ep_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_unlink_ep ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_remove_entry (&ia_ptr->ep_list_head, + &ep_ptr->header.ia_list_entry); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_link_lmr + * + * Add an lmr to the IA structure + * + * Input: + * ia_ptr + * lmr_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_link_lmr ( + IN DAPL_IA *ia_ptr, + IN DAPL_LMR *lmr_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_add_head (&ia_ptr->lmr_list_head, + &lmr_ptr->header.ia_list_entry, + lmr_ptr); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_unlink_lmr + * + * Remove an lmr from the ia info structure + * + * Input: + * ia_ptr + * lmr_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_unlink_lmr ( + IN DAPL_IA *ia_ptr, + IN DAPL_LMR *lmr_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_remove_entry (&ia_ptr->lmr_list_head, + &lmr_ptr->header.ia_list_entry); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_link_rmr + * + * Add an rmr to the IA structure + * + * Input: + * ia_ptr + * rmr_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_link_rmr ( + IN DAPL_IA *ia_ptr, + IN DAPL_RMR *rmr_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_add_head (&ia_ptr->rmr_list_head, + &rmr_ptr->header.ia_list_entry, + rmr_ptr); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_unlink_rmr + * + * Remove an rmr from the ia info structure + * + * Input: + * ia_ptr + * rmr_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_unlink_rmr ( + IN DAPL_IA *ia_ptr, + IN DAPL_RMR *rmr_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_remove_entry (&ia_ptr->rmr_list_head, + &rmr_ptr->header.ia_list_entry); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_link_pz + * + * Add an pz to the IA structure + * + * Input: + * ia_ptr + * pz_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_link_pz ( + IN DAPL_IA *ia_ptr, + IN DAPL_PZ *pz_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_add_head (&ia_ptr->pz_list_head, + &pz_ptr->header.ia_list_entry, + pz_ptr); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_unlink_pz + * + * Remove an pz from the ia info structure + * + * Input: + * ia_ptr + * pz_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_unlink_pz ( + IN DAPL_IA *ia_ptr, + IN DAPL_PZ *pz_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_remove_entry (&ia_ptr->pz_list_head, + &pz_ptr->header.ia_list_entry); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_link_evd + * + * Add an evd to the IA structure + * + * Input: + * ia_ptr + * evd_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_link_evd ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_add_head (&ia_ptr->evd_list_head, + &evd_ptr->header.ia_list_entry, + evd_ptr); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_unlink_evd + * + * Remove an evd from the ia info structure + * + * Input: + * ia_ptr + * evd_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_unlink_evd ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_remove_entry (&ia_ptr->evd_list_head, + &evd_ptr->header.ia_list_entry); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_link_cno + * + * Add an cno to the IA structure + * + * Input: + * ia_ptr + * cno_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_link_cno ( + IN DAPL_IA *ia_ptr, + IN DAPL_CNO *cno_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_add_head (&ia_ptr->cno_list_head, + &cno_ptr->header.ia_list_entry, + cno_ptr); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_unlink_cno + * + * Remove an cno from the ia info structure + * + * Input: + * ia_ptr + * cno_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_unlink_cno ( + IN DAPL_IA *ia_ptr, + IN DAPL_CNO *cno_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_remove_entry (&ia_ptr->cno_list_head, + &cno_ptr->header.ia_list_entry); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapl_ia_link_psp + * + * Add an psp to the IA structure + * + * Input: + * ia_ptr + * sp_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_link_psp ( + IN DAPL_IA *ia_ptr, + IN DAPL_SP *sp_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_add_head (&ia_ptr->psp_list_head, + &sp_ptr->header.ia_list_entry, + sp_ptr); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * daps_ia_unlink_sp + * + * Remove an sp from the appropriate ia rsp or psp queue + * + * Input: + * ia_ptr + * sp_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapls_ia_unlink_sp ( + IN DAPL_IA *ia_ptr, + IN DAPL_SP *sp_ptr) +{ + DAPL_LLIST_HEAD *list_head; + + if ( sp_ptr->header.handle_type == DAT_HANDLE_TYPE_PSP ) + { + list_head = &ia_ptr->psp_list_head; + } + else + { + dapl_os_assert (sp_ptr->header.handle_type == DAT_HANDLE_TYPE_RSP); + list_head = &ia_ptr->rsp_list_head; + } + + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_remove_entry (list_head, + &sp_ptr->header.ia_list_entry); + dapl_os_unlock (&ia_ptr->header.lock); +} + +/* + * dapls_ia_sp_search + * + * Find an RSP or PSP on the IA list with a matching conn_qual value + * + * Input: + * ia_ptr + * sp_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +DAPL_SP * +dapls_ia_sp_search ( + IN DAPL_IA *ia_ptr, + IN DAT_CONN_QUAL conn_qual, + IN DAT_BOOLEAN is_psp ) +{ + DAPL_SP *sp_ptr; + DAPL_LLIST_HEAD *list_head; + + if ( is_psp ) + { + list_head = &ia_ptr->psp_list_head; + } + else + { + list_head = &ia_ptr->rsp_list_head; + } + + dapl_os_lock (&ia_ptr->header.lock); + + sp_ptr = (dapl_llist_is_empty (list_head) ? NULL : + dapl_llist_peek_head (list_head)); + + while (sp_ptr != NULL) + { + if ( sp_ptr->conn_qual == conn_qual ) + { + break; + } + sp_ptr = dapl_llist_next_entry (list_head, + &sp_ptr->header.ia_list_entry); + } + + dapl_os_unlock (&ia_ptr->header.lock); + + return sp_ptr; +} + + +/* + * dapl_ia_link_rsp + * + * Add an rsp to the IA structure + * + * Input: + * ia_ptr + * sp_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_ia_link_rsp ( + IN DAPL_IA *ia_ptr, + IN DAPL_SP *sp_ptr) +{ + dapl_os_lock (&ia_ptr->header.lock); + dapl_llist_add_head (&ia_ptr->rsp_list_head, + &sp_ptr->header.ia_list_entry, + sp_ptr); + dapl_os_unlock (&ia_ptr->header.lock); +} + + +DAT_RETURN +dapls_ia_setup_callbacks ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *async_evd_ptr ) +{ + dapl_ibal_ca_t *p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle; + DAT_RETURN dat_status = DAT_SUCCESS; + + /* back reference to ia_ptr needed for DAPL_ASYNC_UNAFILIATED callback */ + if ( p_ca == NULL ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dapls_ia_setup_callbacks failed %d\n", dat_status); + return (DAT_INVALID_HANDLE); + } + + p_ca->ia_ptr = (DAT_PVOID*)ia_ptr; + + /* unaffiliated handler */ + dat_status = + dapls_ib_setup_async_callback ( + ia_ptr, + DAPL_ASYNC_UNAFILIATED, + NULL, + (ib_async_handler_t)dapl_evd_un_async_error_callback, + async_evd_ptr); + + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "ib_set_un_async_error_eh failed %d\n", dat_status); + goto bail; + } + + /* affiliated cq handler */ + dat_status = dapls_ib_setup_async_callback ( + ia_ptr, + DAPL_ASYNC_CQ_ERROR, + NULL, + (ib_async_handler_t)dapl_evd_cq_async_error_callback, + async_evd_ptr); + + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "ib_set_cq_async_error_eh failed %d\n", dat_status); + goto bail; + } + + /* affiliated qp handler */ + dat_status = dapls_ib_setup_async_callback ( + ia_ptr, + DAPL_ASYNC_QP_ERROR, + NULL, + (ib_async_handler_t)dapl_evd_qp_async_error_callback, + ia_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "ib_set_qp_async_error_eh failed %d\n", dat_status); + goto bail; + } + +bail: + return dat_status; +} + + +DAT_RETURN +dapls_ia_teardown_callbacks ( + IN DAPL_IA *ia_ptr) +{ + DAT_RETURN dat_status = DAT_SUCCESS; + + /* unaffiliated handler */ + dat_status = + dapls_ib_setup_async_callback ( + ia_ptr, + DAPL_ASYNC_UNAFILIATED, + NULL, + (ib_async_handler_t) 0, + NULL); + + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "ib_set_un_async_error_eh failed %d\n", dat_status); + goto bail; + } + + /* affiliated cq handler */ + dat_status = dapls_ib_setup_async_callback ( + ia_ptr, + DAPL_ASYNC_CQ_ERROR, + NULL, + (ib_async_handler_t) 0, + NULL); + + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "ib_set_cq_async_error_eh failed %d\n", dat_status); + goto bail; + } + + /* affiliated qp handler */ + dat_status = dapls_ib_setup_async_callback ( + ia_ptr, + DAPL_ASYNC_QP_ERROR, + NULL, + (ib_async_handler_t) 0, + NULL); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "ib_set_qp_async_error_eh failed %d\n", dat_status); + goto bail; + } + +bail: + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_util.h new file mode 100644 index 00000000..a674a019 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ia_util.h @@ -0,0 +1,147 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_ia_util.h + * + * PURPOSE: Utility defs & routines for the IA data structure + * + * $Id$ + **********************************************************************/ + +#ifndef _DAPL_IA_UTIL_H_ +#define _DAPL_IA_UTIL_H_ + +#include "dapl.h" + +DAPL_IA * +dapl_ia_alloc ( + DAT_PROVIDER *provider, + DAPL_HCA *hca_ptr) ; + +DAT_RETURN +dapl_ia_abrupt_close ( + IN DAPL_IA *ia_ptr ) ; + +DAT_RETURN +dapl_ia_graceful_close ( + IN DAPL_IA *ia_ptr ) ; + +void +dapls_ia_free ( DAPL_IA *ia_ptr ) ; + +void +dapl_ia_link_ep ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_info ) ; + +void +dapl_ia_unlink_ep ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_info ) ; + +void +dapl_ia_link_lmr ( + IN DAPL_IA *ia_ptr, + IN DAPL_LMR *lmr_info ) ; + +void +dapl_ia_unlink_lmr ( + IN DAPL_IA *ia_ptr, + IN DAPL_LMR *lmr_info ) ; + +void +dapl_ia_link_rmr ( + IN DAPL_IA *ia_ptr, + IN DAPL_RMR *rmr_info ) ; + +void +dapl_ia_unlink_rmr ( + IN DAPL_IA *ia_ptr, + IN DAPL_RMR *rmr_info ) ; + +void +dapl_ia_link_pz ( + IN DAPL_IA *ia_ptr, + IN DAPL_PZ *pz_info ) ; + +void +dapl_ia_unlink_pz ( + IN DAPL_IA *ia_ptr, + IN DAPL_PZ *pz_info ) ; + +void +dapl_ia_link_evd ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_info ) ; + +void +dapl_ia_unlink_evd ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_info ) ; + +void +dapl_ia_link_cno ( + IN DAPL_IA *ia_ptr, + IN DAPL_CNO *cno_info ) ; + +void +dapl_ia_unlink_cno ( + IN DAPL_IA *ia_ptr, + IN DAPL_CNO *cno_info ) ; + +void +dapl_ia_link_psp ( + IN DAPL_IA *ia_ptr, + IN DAPL_SP *sp_info ) ; + +void +dapls_ia_unlink_sp ( + IN DAPL_IA *ia_ptr, + IN DAPL_SP *sp_info ) ; + +void +dapl_ia_link_rsp ( + IN DAPL_IA *ia_ptr, + IN DAPL_SP *sp_info ) ; + +DAPL_SP * +dapls_ia_sp_search ( + IN DAPL_IA *ia_ptr, + IN DAT_CONN_QUAL conn_qual, + IN DAT_BOOLEAN is_psp ) ; + +DAT_RETURN +dapls_ia_setup_callbacks ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *async_evd_ptr ); + +DAT_RETURN +dapls_ia_teardown_callbacks ( + IN DAPL_IA *ia_ptr ); + +#endif diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_init.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_init.h new file mode 100644 index 00000000..2002c911 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_init.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_init.h + * + * PURPOSE: Prototypes for library-interface init and fini functions + * + * $Id$ + * + **********************************************************************/ + + +#ifndef _DAPL_INIT_H_ +#define _DAPL_INIT_H_ + +DAPL_EXPORT void DAT_API +DAT_PROVIDER_INIT_FUNC_NAME ( + IN const DAT_PROVIDER_INFO *, + IN const char * ); /* instance data */ + +DAPL_EXPORT void DAT_API +DAT_PROVIDER_FINI_FUNC_NAME ( + IN const DAT_PROVIDER_INFO * ); + +extern void +dapl_init ( void ) ; + +extern void +dapl_fini ( void ) ; + +#endif diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_llist.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_llist.c new file mode 100644 index 00000000..95d2d8ef --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_llist.c @@ -0,0 +1,380 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_llist.c + * + * PURPOSE: Manage doubly linked lists within the DAPL Reference Implementation + * + * A link list head points to the first member of a linked list, but + * is itself not a member of the list. + * + * +---------------------------------------------+ + * | entry entry entry | + * HEAD -> | +-------+ +-------+ +-------+ | + * +--> | flink | --> | flink | --> | flink | >--+ + * | data | | data | | data | + * +--< | blink | <-- | blink | <-- | blink | <--| + * | +-------+ +-------+ +-------+ | + * | | + * +---------------------------------------------+ + * + * Note: Each of the remove functions takes an assertion failure if + * an element cannot be removed from the list. + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_llist_init_head() + * + * Purpose: initialize a linked list head + */ +void +dapl_llist_init_head (DAPL_LLIST_HEAD * head) +{ + *head = NULL; +} + +/* + * dapl_llist_init_entry() + * + * Purpose: initialize a linked list entry + */ +void +dapl_llist_init_entry (DAPL_LLIST_ENTRY * entry) +{ + entry->blink = NULL; + entry->flink = NULL; + entry->data = 0; + entry->list_head = NULL; +} + +/* + * dapl_llist_is_empty() + * + * Purpose: returns TRUE if the linked list is empty + */ +DAT_BOOLEAN +dapl_llist_is_empty (DAPL_LLIST_HEAD * head) +{ + return (*head == NULL); +} + +/* + * dapl_llist_add_head() + * + * Purpose: Add an entry to the head of a linked list + */ +void +dapl_llist_add_head (DAPL_LLIST_HEAD * head, + DAPL_LLIST_ENTRY * entry, + void * data) +{ + DAPL_LLIST_ENTRY *first; + + /* deal with empty list */ + if (dapl_llist_is_empty (head)) + { + entry->flink = entry; + entry->blink = entry; + } + else + { + first = *head; + entry->flink = first; + entry->blink = first->blink; + first->blink->flink = entry; + first->blink = entry; + } + + *head = entry; + entry->data = data; + entry->list_head = head; +} + +/* + * dapl_llist_add_tail() + * + * Purpose: Add an entry to the tail of a linked list + */ +void +dapl_llist_add_tail (DAPL_LLIST_HEAD * head, + DAPL_LLIST_ENTRY * entry, + void * data) +{ + DAPL_LLIST_ENTRY *last; + + /* deal with empty list */ + if (dapl_llist_is_empty (head)) + { + *head = entry; + entry->flink = entry; + entry->blink = entry; + } + else + { + last = (*head)->blink; + entry->flink = last->flink; + entry->blink = last; + last->flink->blink = entry; + last->flink = entry; + } + entry->data = data; + entry->list_head = head; +} + + +/* + * dapl_llist_add_entry() + * + * Purpose: Add an entry before an element in the list. Does + * not verify the list or the validity of the entries passed in. + */ +void +dapl_llist_add_entry (DAPL_LLIST_HEAD * head, + DAPL_LLIST_ENTRY * entry, + DAPL_LLIST_ENTRY * new_entry, + void * data) +{ + DAPL_LLIST_ENTRY *last; + + /* deal with empty list */ + if (dapl_llist_is_empty (head)) + { + *head = entry; + entry->flink = entry; + entry->blink = entry; + } + else + { + last = entry->blink; + entry->blink = new_entry; + last->flink = new_entry; + + new_entry->flink = entry; + new_entry->blink = last; + + } + new_entry->data = data; + new_entry->list_head = head; +} + +/* + * dapl_llist_remove_head() + * + * Purpose: Remove the first entry of a linked list + */ +void * +dapl_llist_remove_head (DAPL_LLIST_HEAD *head) +{ + DAPL_LLIST_ENTRY *first; + + dapl_os_assert (!dapl_llist_is_empty (head)); + first = *head; + *head = first->flink; + + first->flink->blink = first->blink; + first->blink->flink = first->flink; + + if (first->flink == first) + { + *head = NULL; + } + /* clean up the links for good measure */ + first->flink = NULL; + first->blink = NULL; + first->list_head = NULL; + return (first->data); +} + +/* + * dapl_llist_remove_tail() + * + * Purpose: Remove the last entry of a linked list + */ +void * +dapl_llist_remove_tail (DAPL_LLIST_HEAD *head) +{ + DAPL_LLIST_ENTRY *last; + + dapl_os_assert (!dapl_llist_is_empty (head)); + last = (*head)->blink; + + last->blink->flink = last->flink; + last->flink->blink = last->blink; + + if (last->flink == last) + { + *head = NULL; + } + /* clean up the links for good measure */ + last->flink = NULL; + last->blink = NULL; + last->list_head = NULL; + + return (last->data); +} + +/* + * dapl_llist_remove_entry() + * + * Purpose: Remove the specified entry from a linked list + */ +void * +dapl_llist_remove_entry (DAPL_LLIST_HEAD *head, DAPL_LLIST_ENTRY *entry) +{ + DAPL_LLIST_ENTRY *first; + + dapl_os_assert (!dapl_llist_is_empty (head)); + first = *head; + + /* if it's the first entry, pull it off */ + if (first == entry) + { + (*head) = first->flink; + /* if it was the only entry, kill the list */ + if (first->flink == first) + { + (*head) = NULL; + } + } +#ifdef VERIFY_LINKED_LIST + else + { + DAPL_LLIST_ENTRY *try_entry; + + try_entry = first->flink; + for (;;) + { + if (try_entry == first) + { + /* not finding the element on the list is a BAD thing */ + dapl_os_assert (0); + break; + } + if (try_entry == entry) + { + break; + } + try_entry = try_entry->flink; + } + } +#endif /* VERIFY_LINKED_LIST */ + + dapl_os_assert ( entry->list_head == head ); + entry->list_head = NULL; + + entry->flink->blink = entry->blink; + entry->blink->flink = entry->flink; + entry->flink = NULL; + entry->blink = NULL; + + return (entry->data); +} + +/* + * dapl_llist_peek_head + */ + +void * +dapl_llist_peek_head (DAPL_LLIST_HEAD *head) +{ + DAPL_LLIST_ENTRY *first; + + dapl_os_assert (!dapl_llist_is_empty (head)); + first = *head; + return (first->data); +} + + +/* + * dapl_llist_next_entry + * + * Obtain the next entry in the list, return NULL when we get to the + * head + */ + +void * +dapl_llist_next_entry (IN DAPL_LLIST_HEAD *head, + IN DAPL_LLIST_ENTRY *cur_ent) +{ + DAPL_LLIST_ENTRY *next; + + dapl_os_assert (!dapl_llist_is_empty (head)); + if ( cur_ent == NULL ) + { + next = *head; + } else + { + next = cur_ent->flink; + if ( next == *head ) + { + return NULL; + } + } + return (next->data); +} + +#ifdef DAPL_DBG +/* + * dapl_llist_debug_print_list() + * + * Purpose: Prints the linked list for debugging + */ +void +dapl_llist_debug_print_list (DAPL_LLIST_HEAD *head) +{ + DAPL_LLIST_ENTRY * entry; + DAPL_LLIST_ENTRY * first; + first = *head; + if (!first) + { + dapl_os_printf ("EMPTY_LIST\n"); + return; + } + dapl_os_printf ("HEAD %p\n", *head); + dapl_os_printf ("Entry %p %p %p %p\n", + first, + first->flink, + first->blink, + first->data); + entry = first->flink; + while (entry != first) + { + dapl_os_printf ("Entry %p %p %p %p\n", + entry, + entry->flink, + entry->blink, + entry->data); + entry = entry->flink; + } +} + +#endif /* DAPL_DBG */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_create.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_create.c new file mode 100644 index 00000000..07773d37 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_create.c @@ -0,0 +1,537 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_lmr_create.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl_lmr_util.h" +#include "dapl_adapter_util.h" + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +STATIC _INLINE_ DAT_RETURN +dapl_lmr_create_virtual ( + IN DAPL_IA *ia, + IN DAT_PVOID virt_addr, + IN DAT_VLEN length, + IN DAPL_PZ *pz, + IN DAT_MEM_PRIV_FLAGS privileges, + OUT DAT_LMR_HANDLE *lmr_handle, + OUT DAT_LMR_CONTEXT *lmr_context, + OUT DAT_RMR_CONTEXT *rmr_context, + OUT DAT_VLEN *registered_length, + OUT DAT_VADDR *registered_address ); + +STATIC _INLINE_ DAT_RETURN +dapl_lmr_create_lmr ( + IN DAPL_IA *ia, + IN DAPL_LMR *original_lmr, + IN DAPL_PZ *pz, + IN DAT_MEM_PRIV_FLAGS privileges, + OUT DAT_LMR_HANDLE *lmr_handle, + OUT DAT_LMR_CONTEXT *lmr_context, + OUT DAT_RMR_CONTEXT *rmr_context, + OUT DAT_VLEN *registered_length, + OUT DAT_VADDR *registered_address ); + +STATIC _INLINE_ DAT_RETURN +dapl_lmr_create_shared_virtual ( + IN DAPL_IA *ia, + IN DAT_SHARED_MEMORY shared_memory, + IN DAT_VLEN length, + IN DAPL_PZ *pz, + IN DAT_MEM_PRIV_FLAGS privileges, + OUT DAT_LMR_HANDLE *lmr_handle, + OUT DAT_LMR_CONTEXT *lmr_context, + OUT DAT_RMR_CONTEXT *rmr_context, + OUT DAT_VLEN *registered_length, + OUT DAT_VADDR *registered_address ); + +/********************************************************************* + * * + * Function Definitions * + * * + *********************************************************************/ + +STATIC _INLINE_ DAT_RETURN +dapl_lmr_create_virtual ( + IN DAPL_IA *ia, + IN DAT_PVOID virt_addr, + IN DAT_VLEN length, + IN DAPL_PZ *pz, + IN DAT_MEM_PRIV_FLAGS privileges, + OUT DAT_LMR_HANDLE *lmr_handle, + OUT DAT_LMR_CONTEXT *lmr_context, + OUT DAT_RMR_CONTEXT *rmr_context, + OUT DAT_VLEN *registered_length, + OUT DAT_VADDR *registered_address ) +{ + DAPL_LMR *lmr; + DAT_REGION_DESCRIPTION reg_desc; + DAT_RETURN dat_status; + + reg_desc.for_va = virt_addr; + dat_status = DAT_SUCCESS; + + lmr = dapl_lmr_alloc (ia, + DAT_MEM_TYPE_VIRTUAL, + reg_desc, + length, + (DAT_PZ_HANDLE) pz, + privileges); + + if ( NULL == lmr ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_status = dapls_ib_mr_register (ia, + lmr, + virt_addr, + length, + privileges); + + if (DAT_SUCCESS != dat_status) + { + dapl_lmr_dealloc (lmr); + goto bail; + } + + /* if the LMR context is already in the hash table */ + dat_status = dapls_hash_search (ia->hca_ptr->lmr_hash_table, + lmr->param.lmr_context, + NULL); + if (dat_status == DAT_SUCCESS) + { + (void)dapls_ib_mr_deregister (lmr); + dapl_lmr_dealloc (lmr); + + dat_status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_STATE_LMR_IN_USE); + goto bail; + } + + dat_status = dapls_hash_insert (ia->hca_ptr->lmr_hash_table, + lmr->param.lmr_context, + lmr); + if (dat_status != DAT_SUCCESS) + { + (void)dapls_ib_mr_deregister (lmr); + dapl_lmr_dealloc (lmr); + + /* The value returned by dapls_hash_insert(.) is not */ + /* returned to the consumer because the spec. requires */ + /* that dat_lmr_create(.) return only certain values. */ + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dapl_os_atomic_inc (&pz->pz_ref_count); + + if (NULL != lmr_handle) + { + *lmr_handle = (DAT_LMR_HANDLE) lmr; + } + if (NULL != lmr_context) + { + *lmr_context = lmr->param.lmr_context; + } + if (NULL != rmr_context) + { + *rmr_context = lmr->param.rmr_context; + } + if (NULL != registered_length) + { + *registered_length = lmr->param.registered_size; + } + if (NULL != registered_address) + { + *registered_address = lmr->param.registered_address; + } + + bail: + return dat_status; +} + + +STATIC _INLINE_ DAT_RETURN +dapl_lmr_create_lmr ( + IN DAPL_IA *ia, + IN DAPL_LMR *original_lmr, + IN DAPL_PZ *pz, + IN DAT_MEM_PRIV_FLAGS privileges, + OUT DAT_LMR_HANDLE *lmr_handle, + OUT DAT_LMR_CONTEXT *lmr_context, + OUT DAT_RMR_CONTEXT *rmr_context, + OUT DAT_VLEN *registered_length, + OUT DAT_VADDR *registered_address ) +{ + DAPL_LMR *lmr; + DAT_REGION_DESCRIPTION reg_desc; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_lmr_create_lmr (%p, %p, %p, %x, %p, %p, %p, %p)\n", + ia, + original_lmr, + pz, privileges, + lmr_handle, + lmr_context, + registered_length, + registered_address); + + dat_status = dapls_hash_search (ia->hca_ptr->lmr_hash_table, + original_lmr->param.lmr_context, + (DAPL_HASH_DATA *) &lmr); + if ( dat_status != DAT_SUCCESS ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + goto bail; + } + + reg_desc.for_lmr_handle = (DAT_LMR_HANDLE) original_lmr; + + lmr = dapl_lmr_alloc (ia, + DAT_MEM_TYPE_LMR, + reg_desc, + 0, /* length is meaningless */ + (DAT_PZ_HANDLE) pz, + privileges); + + if (NULL == lmr) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_status = dapls_ib_mr_register_shared (ia, + lmr, + privileges); + + if (DAT_SUCCESS != dat_status) + { + dapl_lmr_dealloc (lmr); + goto bail; + } + + /* if the LMR context is already in the hash table */ + dat_status = dapls_hash_search (ia->hca_ptr->lmr_hash_table, + lmr->param.lmr_context, + NULL); + if (dat_status == DAT_SUCCESS) + { + dapls_ib_mr_deregister (lmr); + dapl_lmr_dealloc (lmr); + + dat_status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_STATE_LMR_IN_USE); + goto bail; + } + + dat_status = dapls_hash_insert (ia->hca_ptr->lmr_hash_table, + lmr->param.lmr_context, + lmr); + if (dat_status != DAT_SUCCESS) + { + dapls_ib_mr_deregister (lmr); + dapl_lmr_dealloc (lmr); + + /* The value returned by dapls_hash_insert(.) is not */ + /* returned to the consumer because the spec. requires */ + /* that dat_lmr_create(.) return only certain values. */ + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dapl_os_atomic_inc (&pz->pz_ref_count); + + if (NULL != lmr_handle) + { + *lmr_handle = (DAT_LMR_HANDLE) lmr; + } + if (NULL != lmr_context) + { + *lmr_context = lmr->param.lmr_context; + } + if (NULL != rmr_context) + { + *rmr_context = lmr->param.rmr_context; + } + if (NULL != registered_length) + { + *registered_length = original_lmr->param.registered_size; + } + if (NULL != registered_address) + { + *registered_address = original_lmr->param.registered_address; + } + + bail: + return DAT_SUCCESS; +} + + +STATIC _INLINE_ DAT_RETURN +dapl_lmr_create_shared_virtual ( + IN DAPL_IA *ia, + IN DAT_SHARED_MEMORY shared_memory, + IN DAT_VLEN length, + IN DAPL_PZ *pz, + IN DAT_MEM_PRIV_FLAGS privileges, + OUT DAT_LMR_HANDLE *lmr_handle, + OUT DAT_LMR_CONTEXT *lmr_context, + OUT DAT_RMR_CONTEXT *rmr_context, + OUT DAT_VLEN *registered_length, + OUT DAT_VADDR *registered_address ) +{ + DAT_RETURN dat_status; + DAPL_LMR *new_lmr; + DAT_REGION_DESCRIPTION reg_desc; + + reg_desc.for_shared_memory = shared_memory; + dat_status = DAT_SUCCESS; + + new_lmr = dapl_lmr_alloc ( ia, + DAT_MEM_TYPE_SHARED_VIRTUAL, + reg_desc, + length, + (DAT_PZ_HANDLE) pz, + privileges); + + if (NULL == new_lmr) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + dapl_os_memcpy ( + new_lmr->param.region_desc.for_shared_memory.shared_memory_id, + shared_memory.shared_memory_id, DAT_LMR_COOKIE_SIZE ); + + + dat_status = dapls_ib_mr_register (ia, + new_lmr, + shared_memory.virtual_address, + length, + privileges); + + if (DAT_SUCCESS != dat_status) + { + dapl_lmr_dealloc (new_lmr); + goto bail; + } + + /* if the LMR context is already in the hash table */ + dat_status = dapls_hash_search (ia->hca_ptr->lmr_hash_table, + new_lmr->param.lmr_context, + NULL); + if (DAT_SUCCESS == dat_status) + { + (void)dapls_ib_mr_deregister (new_lmr); + dapl_lmr_dealloc (new_lmr); + dat_status = DAT_ERROR (DAT_INVALID_STATE, DAT_INVALID_STATE_LMR_IN_USE); + goto bail; + } + + dat_status = dapls_hash_insert (ia->hca_ptr->lmr_hash_table, + new_lmr->param.lmr_context, + new_lmr); + if (DAT_SUCCESS != dat_status) + { + (void)dapls_ib_mr_deregister (new_lmr); + dapl_lmr_dealloc (new_lmr); + goto bail; + + /* The value returned by dapls_hash_insert(.) is not */ + /* returned to the consumer because the spec. requires */ + /* that dat_lmr_create(.) return only certain values. */ + } + + dat_status = dapls_ib_mr_register_shared ( ia, + new_lmr, + privileges ); + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DLCSV: register_shared ret %x, mr_handle %p\n", + dat_status, new_lmr->mr_handle); + + if (DAT_SUCCESS != dat_status) + { + + dapl_lmr_dealloc ( new_lmr ); + goto bail; + } + + (void) dapl_os_atomic_inc (&pz->pz_ref_count); + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DaLmrCsv: ++ LMR(%p) pz(%p)->ref_count = %d\n", + new_lmr, pz, pz->pz_ref_count ); + + if (NULL != lmr_handle) + { + *lmr_handle = (DAT_LMR_HANDLE) new_lmr; + } + if (NULL != lmr_context) + { + *lmr_context = new_lmr->param.lmr_context; + } + if (NULL != rmr_context) + { + *rmr_context = new_lmr->param.rmr_context; + } + if (NULL != registered_length) + { + *registered_length = length; + } + if (NULL != registered_address) + { + *registered_address = (DAT_VADDR) ((uintptr_t) shared_memory.virtual_address); + } + +bail: + return dat_status; + +} + + + +/* + * dapl_lmr_create + * + * DAPL Requirements Version xxx, 6.6.3.1 + * + * Register a memory region with an Interface Adaptor. + * + * Input: + * ia_handle + * mem_type + * region_description + * length + * pz_handle + * privileges + * + * Output: + * lmr_handle + * lmr_context + * registered_length + * registered_address + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + * DAT_MODEL_NOT_SUPPORTED + * + */ +DAT_RETURN +dapl_lmr_create ( + IN DAT_IA_HANDLE ia_handle, + IN DAT_MEM_TYPE mem_type, + IN DAT_REGION_DESCRIPTION region_description, + IN DAT_VLEN length, + IN DAT_PZ_HANDLE pz_handle, + IN DAT_MEM_PRIV_FLAGS privileges, + OUT DAT_LMR_HANDLE *lmr_handle, + OUT DAT_LMR_CONTEXT *lmr_context, + OUT DAT_RMR_CONTEXT *rmr_context, + OUT DAT_VLEN *registered_length, + OUT DAT_VADDR *registered_address ) +{ + DAPL_IA *ia; + DAPL_PZ *pz; + DAT_RETURN dat_status; + + if ( DAPL_BAD_HANDLE (ia_handle, DAPL_MAGIC_IA) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); + goto bail; + } + if ( DAPL_BAD_HANDLE (pz_handle, DAPL_MAGIC_PZ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_PZ); + goto bail; + } + + ia = (DAPL_IA *) ia_handle; + pz = (DAPL_PZ *) pz_handle; + + switch (mem_type) + { + case DAT_MEM_TYPE_VIRTUAL: + { + dat_status = dapl_lmr_create_virtual ( + ia, region_description.for_va, length, pz, privileges, + lmr_handle, lmr_context, rmr_context, registered_length, + registered_address); + break; + } + case DAT_MEM_TYPE_LMR: + { + DAPL_LMR *lmr; + + if ( DAPL_BAD_HANDLE (region_description.for_lmr_handle, DAPL_MAGIC_LMR) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_LMR); + goto bail; + } + + lmr = (DAPL_LMR *) region_description.for_lmr_handle; + + dat_status = dapl_lmr_create_lmr ( + ia, lmr, pz, privileges, lmr_handle, + lmr_context, rmr_context, registered_length, registered_address); + break; + } + case DAT_MEM_TYPE_SHARED_VIRTUAL: + { + dat_status = dapl_lmr_create_shared_virtual ( + ia, region_description.for_shared_memory, length, pz, + privileges, lmr_handle, lmr_context, rmr_context, + registered_length, registered_address); + break; + } + default: + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + break; + } + } + +bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_free.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_free.c new file mode 100644 index 00000000..923ad9b1 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_free.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_lmr_free.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl_lmr_util.h" +#include "dapl_adapter_util.h" +#include "dapl_ia_util.h" + +/* + * dapl_lmr_free + * + * DAPL Requirements Version xxx, 6.6.3.2 + * + * Destroy an instance of the Local Memory Region + * + * Input: + * lmr_handle + * + * Output: + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + */ + +DAT_RETURN +dapl_lmr_free ( + IN DAT_LMR_HANDLE lmr_handle ) +{ + DAPL_LMR *lmr; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, "dapl_lmr_free (%p)\n", lmr_handle); + + if ( DAPL_BAD_HANDLE (lmr_handle, DAPL_MAGIC_LMR) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_LMR); + goto bail; + } + + lmr = (DAPL_LMR *) lmr_handle; + + switch (lmr->param.mem_type) + { + case DAT_MEM_TYPE_VIRTUAL: + /* fall through */ + case DAT_MEM_TYPE_LMR: + /* fall through */ + case DAT_MEM_TYPE_SHARED_VIRTUAL: + { + DAPL_PZ *pz; + + if ( 0 != lmr->lmr_ref_count ) + { + return DAT_INVALID_STATE; + } + + dat_status = dapls_hash_remove (lmr->header.owner_ia->hca_ptr->lmr_hash_table, + lmr->param.lmr_context, NULL); + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + dat_status = dapls_ib_mr_deregister (lmr); + + if (dat_status == DAT_SUCCESS) + { + + pz = (DAPL_PZ *) lmr->param.pz_handle; + dapl_os_atomic_dec (&pz->pz_ref_count); + + dapl_lmr_dealloc (lmr); + } + else + { + /* + * Deregister failed; put it back in the + * hash table. + */ + dapls_hash_insert (lmr->header.owner_ia->hca_ptr->lmr_hash_table, + lmr->param.lmr_context, lmr); + } + + break; + } + default: + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "--> DLF: lmr_free(%p) mem_type %d mr_handle %p\n", + lmr, lmr->param.mem_type, lmr->mr_handle); + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); + break; + } + } + +bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_query.c new file mode 100644 index 00000000..35b89c6a --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_query.c @@ -0,0 +1,93 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_lmr_query.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_lmr_query + * + * DAPL Requirements Version xxx, 6.6.3.3 + * + * Provide the LMR arguments. + * + * Input: + * lmr_handle + * lmr_param_mask + * lmr_param + * + * Output: + * lmr_param + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_lmr_query ( + IN DAT_LMR_HANDLE lmr_handle, + IN DAT_LMR_PARAM_MASK lmr_param_mask, + IN DAT_LMR_PARAM *lmr_param ) +{ + DAPL_LMR *lmr; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_lmr_query (%p, 0x%x, %p)\n", + lmr_handle, + lmr_param_mask, + lmr_param); + + if ( DAPL_BAD_HANDLE (lmr_handle, DAPL_MAGIC_LMR) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_LMR); + goto bail; + } + if (NULL == lmr_param) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + dat_status = DAT_SUCCESS; + lmr = (DAPL_LMR *) lmr_handle; + + dapl_os_memcpy (lmr_param, &lmr->param, sizeof (DAT_LMR_PARAM)); + + bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_util.c new file mode 100644 index 00000000..99de0c9a --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_util.c @@ -0,0 +1,109 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_lmr_util.c + * + * PURPOSE: Memory management support routines + * Description: Support routines for LMR functions + * + * $Id$ + **********************************************************************/ + +#include "dapl_lmr_util.h" +#include "dapl_ia_util.h" + +DAPL_LMR * +dapl_lmr_alloc ( + IN DAPL_IA *ia, + IN DAT_MEM_TYPE mem_type, + IN DAT_REGION_DESCRIPTION region_desc, + IN DAT_VLEN length, + IN DAT_PZ_HANDLE pz_handle, + IN DAT_MEM_PRIV_FLAGS mem_priv) +{ + DAPL_LMR *lmr; + + /* Allocate LMR */ + lmr = (DAPL_LMR *) dapl_os_alloc (sizeof (DAPL_LMR)); + if (NULL == lmr) + { + return (NULL); + } + + /* zero the structure */ + dapl_os_memzero (lmr, sizeof (DAPL_LMR)); + + /* + * initialize the header + */ + lmr->header.provider = ia->header.provider; + lmr->header.magic = DAPL_MAGIC_LMR; + lmr->header.handle_type = DAT_HANDLE_TYPE_LMR; + lmr->header.owner_ia = ia; + lmr->header.user_context.as_64 = 0; + lmr->header.user_context.as_ptr = NULL; + dapl_llist_init_entry (&lmr->header.ia_list_entry); + dapl_ia_link_lmr (ia, lmr); + dapl_os_lock_init (&lmr->header.lock); + + /* + * initialize the body + */ + lmr->param.ia_handle = (DAT_IA_HANDLE) ia; + lmr->param.mem_type = mem_type; + lmr->param.region_desc = region_desc; + lmr->param.length = length; + lmr->param.pz_handle = pz_handle; + lmr->param.mem_priv = mem_priv; + lmr->lmr_ref_count = 0; + + if (mem_type == DAT_MEM_TYPE_SHARED_VIRTUAL) + { + lmr->param.region_desc.for_shared_memory.shared_memory_id = + dapl_os_alloc (DAT_LMR_COOKIE_SIZE); + } + + return (lmr); +} + +void +dapl_lmr_dealloc ( + IN DAPL_LMR *lmr) +{ + lmr->header.magic = DAPL_MAGIC_INVALID; /* reset magic to prevent reuse */ + dapl_ia_unlink_lmr (lmr->header.owner_ia, lmr); + dapl_os_lock_destroy (&lmr->header.lock); + + if (lmr->param.mem_type == DAT_MEM_TYPE_SHARED_VIRTUAL) + { + dapl_os_free (lmr->param.region_desc.for_shared_memory.shared_memory_id, + DAT_LMR_COOKIE_SIZE); + } + + dapl_os_free ((void *) lmr, sizeof (DAPL_LMR)); +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_util.h new file mode 100644 index 00000000..5e92f186 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_lmr_util.h @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_lmr_util.h + * + * PURPOSE: Utility defs & routines for the LMR data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_LMR_UTIL_H_ +#define _DAPL_LMR_UTIL_H_ + +#include "dapl_mr_util.h" + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +extern DAPL_LMR * +dapl_lmr_alloc ( + IN DAPL_IA *ia, + IN DAT_MEM_TYPE mem_type, + IN DAT_REGION_DESCRIPTION region_desc, + IN DAT_VLEN length, + IN DAT_PZ_HANDLE pz_handle, + IN DAT_MEM_PRIV_FLAGS mem_priv); + +extern void +dapl_lmr_dealloc ( + IN DAPL_LMR *lmr); + +STATIC _INLINE_ int32_t +dapl_lmr_convert_privileges ( + IN DAT_MEM_PRIV_FLAGS privileges); + + +/********************************************************************* + * * + * Inline Functions * + * * + *********************************************************************/ + +STATIC _INLINE_ int32_t +dapl_lmr_convert_privileges ( + IN DAT_MEM_PRIV_FLAGS privileges) +{ + int32_t value = 0; + + /* + * if (DAT_MEM_PRIV_LOCAL_READ_FLAG & privileges) + * do nothing + */ + + if (DAT_MEM_PRIV_LOCAL_WRITE_FLAG & privileges) + { + value |= IB_ACCESS_LOCAL_WRITE; + } + + if (DAT_MEM_PRIV_REMOTE_READ_FLAG & privileges) + { + value |= IB_ACCESS_REMOTE_READ; + } + + if (DAT_MEM_PRIV_REMOTE_WRITE_FLAG & privileges) + { + value |= IB_ACCESS_REMOTE_WRITE; + } + + return value; +} + +#endif /* _DAPL_LMR_UTIL_H_*/ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_mr_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_mr_util.c new file mode 100644 index 00000000..0b931a02 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_mr_util.c @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_mr_util.c + * + * PURPOSE: Common Memory Management functions and data structures + * + * $Id$ + **********************************************************************/ + +#include "dapl_mr_util.h" + +/********************************************************************* + * * + * Function Definitions * + * * + *********************************************************************/ + +/* + * dapl_mr_get_address + * + * Returns the memory address associated with the given memory descriptor + * + */ + +DAT_VADDR +dapl_mr_get_address (DAT_REGION_DESCRIPTION desc, DAT_MEM_TYPE type) +{ + switch (type) + { + case DAT_MEM_TYPE_VIRTUAL: + { + return (DAT_VADDR) (uintptr_t) desc.for_va; + } + case DAT_MEM_TYPE_LMR: + { + DAPL_LMR *lmr; + + lmr = (DAPL_LMR *) desc.for_lmr_handle; + + /* Since this function is recoursive we cannot inline it */ + return dapl_mr_get_address (lmr->param.region_desc, + lmr->param.mem_type); + } + case DAT_MEM_TYPE_SHARED_VIRTUAL: + { + return (DAT_VADDR) (uintptr_t) desc.for_shared_memory.virtual_address; + } + default: + { + dapl_os_assert (0); + return 0; + } + } +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_mr_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_mr_util.h new file mode 100644 index 00000000..4b07164d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_mr_util.h @@ -0,0 +1,92 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_mr_util.h + * + * PURPOSE: Utility defs & routines for memory registration functions + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_MR_UTIL_H_ +#define _DAPL_MR_UTIL_H_ + +#include "dapl.h" +#include "dapl_hash.h" + + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +extern DAT_VADDR +dapl_mr_get_address ( + IN DAT_REGION_DESCRIPTION desc, + IN DAT_MEM_TYPE type); + +STATIC _INLINE_ DAT_BOOLEAN +dapl_mr_bounds_check ( + IN DAT_VADDR addr_a, + IN DAT_VLEN length_a, + IN DAT_VADDR addr_b, + IN DAT_VLEN length_b); + + +/********************************************************************* + * * + * Inline Functions * + * * + *********************************************************************/ + +/* + * dapl_mr_bounds_check + * + * Returns true if region B is contained within region A + * and false otherwise + * + */ + +STATIC _INLINE_ DAT_BOOLEAN +dapl_mr_bounds_check (DAT_VADDR addr_a, DAT_VLEN length_a, + DAT_VADDR addr_b, DAT_VLEN length_b) +{ + if ( (addr_a <= addr_b) && + (addr_b + length_b) <= (addr_a + length_a)) + { + return DAT_TRUE; + } + else + { + return DAT_FALSE; + } +} + +#endif /* _DAPL_MR_UTIL_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_provider.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_provider.c new file mode 100644 index 00000000..01c0db38 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_provider.c @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_provider.c + * + * PURPOSE: Provider function table + * Description: DAT Interfaces to this provider + * + * $Id$ + **********************************************************************/ + +#include "dapl_provider.h" + +extern DAT_RETURN dapl_not_implemented (void); + +/********************************************************************* + * * + * Global Data * + * * + *********************************************************************/ + +DAPL_PROVIDER_LIST g_dapl_provider_list; + + +/* + * the function table for this provider + */ + +DAT_PROVIDER g_dapl_provider_template = +{ + NULL, + 0, + &dapl_ia_open, + &dapl_ia_query, + &dapl_ia_close, + + &dapl_set_consumer_context, + &dapl_get_consumer_context, + &dapl_get_handle_type, + + &dapl_cno_create, + &dapl_cno_modify_agent, + &dapl_cno_query, + &dapl_cno_free, + &dapl_cno_wait, + + &dapl_cr_query, + &dapl_cr_accept, + &dapl_cr_reject, + &dapl_cr_handoff, + + &dapl_evd_create, + &dapl_evd_query, + &dapl_evd_modify_cno, + &dapl_evd_enable, + &dapl_evd_disable, + &dapl_evd_wait, + &dapl_evd_resize, + &dapl_evd_post_se, + &dapl_evd_dequeue, + &dapl_evd_free, + + &dapl_ep_create, + &dapl_ep_query, + &dapl_ep_modify, + &dapl_ep_connect, + &dapl_ep_dup_connect, + &dapl_ep_disconnect, + &dapl_ep_post_send, + &dapl_ep_post_recv, + &dapl_ep_post_rdma_read, + &dapl_ep_post_rdma_write, + &dapl_ep_get_status, + &dapl_ep_free, + + &dapl_lmr_create, + &dapl_lmr_query, + &dapl_lmr_free, + + &dapl_rmr_create, + &dapl_rmr_query, + &dapl_rmr_bind, + &dapl_rmr_free, + + &dapl_psp_create, + &dapl_psp_query, + &dapl_psp_free, + + &dapl_rsp_create, + &dapl_rsp_query, + &dapl_rsp_free, + + &dapl_pz_create, + &dapl_pz_query, + &dapl_pz_free, + + &dapl_psp_create_any, + &dapl_ep_reset, + &dapl_evd_set_unwaitable, + &dapl_evd_clear_unwaitable +}; + + + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +static DAT_BOOLEAN +dapl_provider_list_key_cmp ( + const char *name_a, + const char *name_b ); + + +/********************************************************************* + * * + * Function Definitions * + * * + *********************************************************************/ + +DAT_RETURN +dapl_provider_list_create ( void ) +{ + DAT_RETURN status; + + status = DAT_SUCCESS; + + /* create the head node */ + g_dapl_provider_list.head = dapl_os_alloc (sizeof (DAPL_PROVIDER_LIST_NODE)); + if ( NULL == g_dapl_provider_list.head ) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + dapl_os_memzero (g_dapl_provider_list.head, sizeof (DAPL_PROVIDER_LIST_NODE)); + + /* create the tail node */ + g_dapl_provider_list.tail = dapl_os_alloc (sizeof (DAPL_PROVIDER_LIST_NODE)); + if ( NULL == g_dapl_provider_list.tail ) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + dapl_os_memzero (g_dapl_provider_list.tail, sizeof (DAPL_PROVIDER_LIST_NODE)); + + g_dapl_provider_list.head->next = g_dapl_provider_list.tail; + g_dapl_provider_list.tail->prev = g_dapl_provider_list.head; + g_dapl_provider_list.size = 0; + g_dapl_provider_list.initialized = DAT_TRUE; + +bail: + if ( DAT_SUCCESS != status ) + { + if ( NULL != g_dapl_provider_list.head ) + { + dapl_os_free (g_dapl_provider_list.head, sizeof (DAPL_PROVIDER_LIST_NODE)); + } + + if ( NULL != g_dapl_provider_list.tail ) + { + dapl_os_free (g_dapl_provider_list.tail, sizeof (DAPL_PROVIDER_LIST_NODE)); + } + g_dapl_provider_list.initialized = DAT_FALSE; + } + + return status; +} + + +DAT_RETURN +dapl_provider_list_destroy ( void ) +{ + DAPL_PROVIDER_LIST_NODE *cur_node; + + while ( NULL != g_dapl_provider_list.head ) + { + cur_node = g_dapl_provider_list.head; + g_dapl_provider_list.head = cur_node->next; + + dapl_os_free (cur_node, sizeof (DAPL_PROVIDER_LIST_NODE)); + } + + g_dapl_provider_list.initialized = DAT_FALSE; + + return DAT_SUCCESS; +} + + +DAT_COUNT +dapl_provider_list_size ( void ) +{ + return g_dapl_provider_list.size; +} + + +DAT_RETURN +dapl_provider_list_insert ( + IN const char *name, + IN DAT_PROVIDER **p_data ) +{ + DAPL_PROVIDER_LIST_NODE *cur_node, *prev_node, *next_node; + DAT_RETURN status; + unsigned int len; + + status = DAT_SUCCESS; + + cur_node = dapl_os_alloc (sizeof (DAPL_PROVIDER_LIST_NODE)); + + if ( NULL == cur_node ) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + if ( g_dapl_provider_list.initialized != DAT_TRUE ) + { + status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_ARG2); + goto bail; + } + + len = dapl_os_strlen(name); + + if ( DAT_NAME_MAX_LENGTH <= len ) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + /* insert node at end of list to preserve registration order*/ + prev_node = g_dapl_provider_list.tail->prev; + next_node = g_dapl_provider_list.tail; + + dapl_os_memcpy (cur_node->name, name, len); + cur_node->name[len] = '\0'; + cur_node->data = g_dapl_provider_template; + cur_node->data.device_name = cur_node->name; + cur_node->next = next_node; + cur_node->prev = prev_node; + + prev_node->next = cur_node; + next_node->prev = cur_node; + + g_dapl_provider_list.size++; + + if ( NULL != p_data ) + { + *p_data = &cur_node->data; + } + +bail: + if ( DAT_SUCCESS != status ) + { + if ( NULL != cur_node ) + { + dapl_os_free (cur_node, sizeof (DAPL_PROVIDER_LIST_NODE)); + } + } + + return status; +} + + +DAT_RETURN +dapl_provider_list_search ( + IN const char *name, + OUT DAT_PROVIDER **p_data ) +{ + DAPL_PROVIDER_LIST_NODE *cur_node; + DAT_RETURN status; + + status = DAT_ERROR (DAT_PROVIDER_NOT_FOUND,0); + + if ( g_dapl_provider_list.initialized != DAT_TRUE ) + { + status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_ARG2); + goto bail; + } + + + for (cur_node = g_dapl_provider_list.head->next; + g_dapl_provider_list.tail != cur_node; + cur_node = cur_node->next) + { + if ( dapl_provider_list_key_cmp (cur_node->name, name) ) + { + if ( NULL != p_data ) + { + *p_data = &cur_node->data; + } + + status = DAT_SUCCESS; + goto bail; + } + } + +bail: + return status; +} + + +DAT_RETURN +dapl_provider_list_remove ( + IN const char *name ) +{ + DAPL_PROVIDER_LIST_NODE *cur_node, *prev_node, *next_node; + DAT_RETURN status; + + status = DAT_ERROR (DAT_PROVIDER_NOT_FOUND,0); + + if ( g_dapl_provider_list.initialized != DAT_TRUE ) + { + status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_ARG2); + goto bail; + } + + for (cur_node = g_dapl_provider_list.head->next; + g_dapl_provider_list.tail != cur_node; + cur_node = cur_node->next) + { + if ( dapl_provider_list_key_cmp (cur_node->name, name) ) + { + prev_node = cur_node->prev; + next_node = cur_node->next; + + prev_node->next = next_node; + next_node->prev = prev_node; + + dapl_os_free (cur_node, sizeof (DAPL_PROVIDER_LIST_NODE)); + + g_dapl_provider_list.size--; + + status = DAT_SUCCESS; + goto bail; + } + } + +bail: + return status; +} + + +DAT_BOOLEAN +dapl_provider_list_key_cmp ( + const char *name_a, + const char *name_b ) +{ + unsigned int len; + + len = dapl_os_strlen (name_a); + + if ( dapl_os_strlen (name_b) != len ) + { + return DAT_FALSE; + } + else if ( dapl_os_memcmp (name_a, name_b, len) ) + { + return DAT_FALSE; + } + else + { + return DAT_TRUE; + } +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_provider.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_provider.h new file mode 100644 index 00000000..06a2d7ff --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_provider.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_provider.h + * + * PURPOSE: Provider function table + * Description: DAT Interfaces to this provider + * + * $Id$ + **********************************************************************/ + +#ifndef _DAPL_PROVIDER_H_ +#define _DAPL_PROVIDER_H_ + +#include "dapl.h" + + +/********************************************************************* + * * + * Structures * + * * + *********************************************************************/ + +typedef struct DAPL_PROVIDER_LIST_NODE +{ + char name[DAT_NAME_MAX_LENGTH]; + DAT_PROVIDER data; + struct DAPL_PROVIDER_LIST_NODE *next; + struct DAPL_PROVIDER_LIST_NODE *prev; +} DAPL_PROVIDER_LIST_NODE; + + +typedef struct DAPL_PROVIDER_LIST +{ + DAPL_PROVIDER_LIST_NODE *head; + DAPL_PROVIDER_LIST_NODE *tail; + DAT_COUNT size; + DAT_BOOLEAN initialized; +} DAPL_PROVIDER_LIST; + + +/********************************************************************* + * * + * Global Data * + * * + *********************************************************************/ + +extern DAPL_PROVIDER_LIST g_dapl_provider_list; +extern DAT_PROVIDER g_dapl_provider_template; +extern int g_dapl_loopback_connection; + + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +extern DAT_RETURN +dapl_provider_list_create( void ); + +extern DAT_RETURN +dapl_provider_list_destroy( void ); + +extern DAT_COUNT +dapl_provider_list_size( void ); + +extern DAT_RETURN +dapl_provider_list_insert( + IN const char *name, + OUT DAT_PROVIDER **p_data ); + +extern DAT_RETURN +dapl_provider_list_search( + IN const char *name, + OUT DAT_PROVIDER **p_data ); + +extern DAT_RETURN +dapl_provider_list_remove( + IN const char *name ); + + +#endif /* _DAPL_PROVIDER_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_create.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_create.c new file mode 100644 index 00000000..7fd3024c --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_create.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_psp_create.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_sp_util.h" +#include "dapl_ia_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_psp_create + * + * uDAPL: User Direct Access Program Library Version 1.1, 6.4.1.1 + * + * Create a persistent Public Service Point that can recieve multiple + * requests for connections and generate multiple connection request + * instances that wil be delivered to the specified Event Dispatcher + * in a notification event. + * + * Input: + * ia_handle + * conn_qual + * evd_handle + * psp_flags + * + * Output: + * psp_handle + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_CONN_QUAL_IN_USE + * DAT_MODEL_NOT_SUPPORTED + */ +DAT_RETURN +dapl_psp_create ( + IN DAT_IA_HANDLE ia_handle, + IN DAT_CONN_QUAL conn_qual, + IN DAT_EVD_HANDLE evd_handle, + IN DAT_PSP_FLAGS psp_flags, + OUT DAT_PSP_HANDLE *psp_handle ) +{ + DAPL_IA *ia_ptr; + DAPL_SP *sp_ptr; + DAPL_EVD *evd_ptr; + DAT_BOOLEAN sp_found; + DAT_RETURN dat_status; + + ia_ptr = (DAPL_IA *)ia_handle; + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (ia_ptr, DAPL_MAGIC_IA)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_IA); + goto bail; + } + if (DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EVD_CR); + goto bail; + } + + if ( psp_handle == NULL ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG5); + goto bail; + } + + evd_ptr = (DAPL_EVD *)evd_handle; + if ( ! (evd_ptr->evd_flags & DAT_EVD_CR_FLAG) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EVD_CR); + goto bail; + } + + if (psp_flags != DAT_PSP_CONSUMER_FLAG && + psp_flags != DAT_PSP_PROVIDER_FLAG) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG4); + goto bail; + } + + + /* + * See if we have a quiescent listener to use for this PSP, else + * create one and set it listening + */ + sp_ptr = dapls_ia_sp_search (ia_ptr, conn_qual, DAT_TRUE); + sp_found = DAT_TRUE; + if (sp_ptr == NULL) + { + /* Allocate PSP */ + sp_found = DAT_FALSE; + sp_ptr = dapls_sp_alloc ( ia_ptr, DAT_TRUE ); + if ( sp_ptr == NULL ) + { + dat_status = + DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + } + else if (sp_ptr->listening == DAT_TRUE) + { + dat_status = DAT_ERROR (DAT_CONN_QUAL_IN_USE, 0); + goto bail; + } + + /* + * Fill out the args for a PSP + */ + sp_ptr->ia_handle = ia_handle; + sp_ptr->conn_qual = conn_qual; + sp_ptr->evd_handle = evd_handle; + sp_ptr->psp_flags = psp_flags; + sp_ptr->ep_handle = NULL; + + /* + * Take a reference on the EVD handle + */ + dapl_os_atomic_inc (& ((DAPL_EVD *)evd_handle)->evd_ref_count); + + /* + * Set up a listener for a connection. Connections can arrive + * even before this call returns! + */ + sp_ptr->state = DAPL_SP_STATE_PSP_LISTENING; + sp_ptr->listening = DAT_TRUE; + + /* + * If this is a new sp we need to add it to the IA queue, and set up + * a conn_listener. + */ + if (sp_found == DAT_FALSE) + { + /* Link it onto the IA before enabling it to receive conn + * requests + */ + dapl_ia_link_psp (ia_ptr, sp_ptr); + + dat_status = dapls_ib_setup_conn_listener ( ia_ptr, + conn_qual, + sp_ptr ); + + if ( dat_status != DAT_SUCCESS ) + { + /* + * Have a problem setting up the connection, something + * wrong! Decrements the EVD refcount & release it. + */ + dapl_os_atomic_dec (& ((DAPL_EVD *)evd_handle)->evd_ref_count); + sp_ptr->evd_handle = NULL; + dapls_ia_unlink_sp ( ia_ptr, sp_ptr ); + dapls_sp_free_sp ( sp_ptr ); + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> dapl_psp_create setup_conn_listener failed: %x\n", + dat_status); + + goto bail; + } + } + + /* + * Return handle to the user + */ + *psp_handle = (DAT_PSP_HANDLE)sp_ptr; + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * c-brace-offset: -4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_create_any.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_create_any.c new file mode 100644 index 00000000..64f1728d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_create_any.c @@ -0,0 +1,213 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_psp_create.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_sp_util.h" +#include "dapl_ia_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_psp_create_any + * + * uDAPL: User Direct Access Program Library Version 1.1, 6.4.3.3 + * + * Create a persistent Public Service Point that can recieve multiple + * requests for connections and generate multiple connection request + * instances that wil be delivered to the specified Event Dispatcher + * in a notification event. Differs from dapl_psp_create() in that + * the conn_qual is selected by the implementation and returned to + * the user. + * + * Input: + * ia_handle + * evd_handle + * psp_flags + * + * Output: + * conn_qual + * psp_handle + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + * DAT_CONN_QUAL_IN_USE + * DAT_MODEL_NOT_SUPPORTED + */ +DAT_RETURN +dapl_psp_create_any ( + IN DAT_IA_HANDLE ia_handle, + OUT DAT_CONN_QUAL *conn_qual, + IN DAT_EVD_HANDLE evd_handle, + IN DAT_PSP_FLAGS psp_flags, + OUT DAT_PSP_HANDLE *psp_handle ) +{ + DAPL_IA *ia_ptr; + DAPL_SP *sp_ptr; + DAPL_EVD *evd_ptr; + DAT_RETURN dat_status; + static DAT_CONN_QUAL hint_conn_qual = 1000; /* seed value */ + DAT_CONN_QUAL limit_conn_qual; + + ia_ptr = (DAPL_IA *)ia_handle; + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (ia_ptr, DAPL_MAGIC_IA)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_IA); + goto bail; + } + if (DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EVD_CR); + goto bail; + } + + if ( psp_handle == NULL ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG5); + goto bail; + } + if (conn_qual == NULL) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + goto bail; + } + + evd_ptr = (DAPL_EVD *)evd_handle; + if ( ! (evd_ptr->evd_flags & DAT_EVD_CR_FLAG) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EVD_CR); + goto bail; + } + + if (psp_flags != DAT_PSP_CONSUMER_FLAG && + psp_flags != DAT_PSP_PROVIDER_FLAG) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG4); + goto bail; + } + + /* Allocate PSP */ + sp_ptr = dapls_sp_alloc ( ia_ptr, DAT_TRUE ); + if ( sp_ptr == NULL ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + /* + * Fill out the args for a PSP + */ + sp_ptr->ia_handle = ia_handle; + sp_ptr->evd_handle = evd_handle; + sp_ptr->psp_flags = psp_flags; + sp_ptr->ep_handle = NULL; + + /* + * Take a reference on the EVD handle + */ + dapl_os_atomic_inc (& ((DAPL_EVD *)evd_handle)->evd_ref_count); + + /* Link it onto the IA */ + dapl_ia_link_psp (ia_ptr, sp_ptr); + + /* + * Set up a listener for a connection. Connections can arrive + * even before this call returns! + */ + sp_ptr->state = DAPL_SP_STATE_PSP_LISTENING; + sp_ptr->listening = DAT_TRUE; + + limit_conn_qual = 0; + dat_status = ~DAT_SUCCESS; + + while (dat_status != DAT_SUCCESS) + { + srand ( (DAT_UINT32)dapl_os_get_ticks () ); + hint_conn_qual += rand(); + sp_ptr->conn_qual = hint_conn_qual; + + dat_status = dapls_ib_setup_conn_listener ( ia_ptr, + sp_ptr->conn_qual, + sp_ptr ); + /* + * If we have a big number of tries and we still haven't + * found a service_ID we can use, bail out with an error, + * something is wrong! + */ + if ( limit_conn_qual++ > 100000) + { + dat_status = DAT_CONN_QUAL_UNAVAILABLE; + break; + } + } + + if ( dat_status != DAT_SUCCESS ) + { + /* + * Have a problem setting up the connection, something wrong! + */ + dapl_os_atomic_dec (& ((DAPL_EVD *)evd_handle)->evd_ref_count); + sp_ptr->evd_handle = NULL; + dapls_ia_unlink_sp ( ia_ptr, sp_ptr ); + dapls_sp_free_sp ( sp_ptr ); + + dapl_os_printf ("--> dapl_psp_create cannot set up conn listener: %x\n", dat_status); + + goto bail; + } + + /* + * Return handle to the user + */ + *conn_qual = sp_ptr->conn_qual; + *psp_handle = (DAT_PSP_HANDLE)sp_ptr; + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * c-brace-offset: -4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_free.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_free.c new file mode 100644 index 00000000..1f184279 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_free.c @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_psp_free.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_sp_util.h" +#include "dapl_ia_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_psp_free + * + * uDAPL: User Direct Access Program Library Version 1.1, 6.4.1.2 + * + * Destroy a specific instance of a Service Point. + * + * Input: + * psp_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_psp_free ( + IN DAT_PSP_HANDLE psp_handle ) +{ + DAPL_IA *ia_ptr; + DAPL_SP *sp_ptr; + DAT_RETURN dat_status; + + sp_ptr = (DAPL_SP *) psp_handle; + dat_status = DAT_SUCCESS; + /* + * Verify handle + */ + dapl_dbg_log (DAPL_DBG_TYPE_CM, ">>> dapl_psp_free %p\n", psp_handle); + + if ( DAPL_BAD_HANDLE (sp_ptr, DAPL_MAGIC_PSP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_PSP); + goto bail; + } + + /* ia_ptr = (DAPL_IA *)sp_ptr->header.owner_ia; */ + ia_ptr = sp_ptr->header.owner_ia; + /* + * Remove the connection listener if it has been established + * and there are no current connections in progress. + * If we defer removing the sp it becomes something of a zombie + * container until the last connection is disconnected, after + * which it will be cleaned up. + */ + dapl_os_lock (&sp_ptr->header.lock); + + sp_ptr->listening = DAT_FALSE; + + /* Release reference on EVD. If an error was encountered in a previous + * free the evd_handle will be NULL + */ + if (sp_ptr->evd_handle) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)sp_ptr->evd_handle)->evd_ref_count); + sp_ptr->evd_handle = NULL; + } + + /* + * Release the base resource if there are no outstanding + * connections; else the last disconnect on this PSP will free it + * up. The PSP is used to contain CR records for each connection, + * which contain information necessary to disconnect. + */ + if ( (sp_ptr->state == DAPL_SP_STATE_PSP_LISTENING || + sp_ptr->state == DAPL_SP_STATE_PSP_PENDING) && + sp_ptr->cr_list_count == 0 ) + { + sp_ptr->state = DAPL_SP_STATE_FREE; + dapl_os_unlock (&sp_ptr->header.lock); + + dat_status = dapls_ib_remove_conn_listener ( ia_ptr, + sp_ptr ); + if (dat_status != DAT_SUCCESS) + { + /* revert to entry state on error */ + sp_ptr->state = DAPL_SP_STATE_PSP_LISTENING; + goto bail; + } + dapls_ia_unlink_sp ( ia_ptr, sp_ptr ); + dapls_sp_free_sp ( sp_ptr ); + } + else + { + /* The PSP is now in the pending state, where it will sit until + * the last connection terminates or the app uses the same + * ServiceID again, which will reactivate it. + */ + sp_ptr->state = DAPL_SP_STATE_PSP_PENDING; + dapl_os_unlock (&sp_ptr->header.lock); + dapl_dbg_log (DAPL_DBG_TYPE_CM, ">>> dapl_psp_free: PSP PENDING\n"); + } + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * c-brace-offset: -4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_query.c new file mode 100644 index 00000000..6bd49c89 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_psp_query.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_psp_query.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_psp_query + * + * uDAPL: User Direct Access Program Library Version 1.1, 6.4.1.3 + * + * Provide arguments of the public service points + * + * Input: + * psp_handle + * psp_args_mask + * + * Output: + * psp_args + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_psp_query ( + IN DAT_PSP_HANDLE psp_handle, + IN DAT_PSP_PARAM_MASK psp_args_mask, + OUT DAT_PSP_PARAM *psp_param ) +{ + DAPL_SP *sp_ptr; + DAT_RETURN dat_status; + UNREFERENCED_PARAMETER(psp_args_mask); + + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (psp_handle, DAPL_MAGIC_PSP) || + ((DAPL_SP *)psp_handle)->listening != DAT_TRUE ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_PSP); + goto bail; + } + + if (NULL == psp_param) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + sp_ptr = (DAPL_SP *) psp_handle; + + /* + * Fill in the PSP params + */ + psp_param->ia_handle = sp_ptr->ia_handle; + psp_param->conn_qual = sp_ptr->conn_qual; + psp_param->evd_handle = sp_ptr->evd_handle; + psp_param->psp_flags = sp_ptr->psp_flags; + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * c-brace-offset: -4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_create.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_create.c new file mode 100644 index 00000000..b129b845 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_create.c @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_pz_create.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl_pz_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_pz_create + * + * DAPL Requirements Version xxx, 6.6.2.1 + * + * Create an instance of a protection zone + * + * Input: + * ia_handle + * + * Output: + * pz_handle + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_pz_create ( + IN DAT_IA_HANDLE ia_handle, + OUT DAT_PZ_HANDLE *pz_handle) +{ + DAPL_IA *ia; + DAPL_PZ *pz; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_pz_create (%p, %p)\n", + ia_handle, + pz_handle); + + dat_status = DAT_SUCCESS; + if ( DAPL_BAD_HANDLE (ia_handle, DAPL_MAGIC_IA) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_IA); + goto bail; + } + + if (NULL == pz_handle) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + goto bail; + } + + ia = (DAPL_IA *) ia_handle; + + pz = dapl_pz_alloc (ia); + if ( pz == NULL ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_status = dapls_ib_pd_alloc (ia, pz); + + if ( dat_status != DAT_SUCCESS ) + { + dapl_pz_dealloc (pz); + pz = NULL; + } + + *pz_handle = pz; + + bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_free.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_free.c new file mode 100644 index 00000000..95c3950a --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_free.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_pz_create.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_pz_util.h" +#include "dapl_adapter_util.h" +#include "dapl_ia_util.h" + +/* + * dapl_pz_free + * + * DAPL Requirements Version xxx, 6.6.2.1 + * + * Remove an instance of a protection zone + * + * Input: + * pz_handle + * + * Output: + * None. + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + */ +DAT_RETURN +dapl_pz_free ( + IN DAT_PZ_HANDLE pz_handle) +{ + DAPL_PZ *pz; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, "dapl_pz_free (%p)\n", pz_handle); + + dat_status = DAT_SUCCESS; + if ( DAPL_BAD_HANDLE (pz_handle, DAPL_MAGIC_PZ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_PZ); + goto bail; + } + + pz = (DAPL_PZ *) pz_handle; + + if ( 0 != pz->pz_ref_count ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,DAT_INVALID_STATE_PZ_IN_USE); + goto bail; + } + + dat_status = dapls_ib_pd_free (pz); + + if ( dat_status == DAT_SUCCESS ) + { + dapl_pz_dealloc (pz); + } + + bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_query.c new file mode 100644 index 00000000..7ebcd534 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_query.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_pz_query.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_pz_query + * + * DAPL Requirements Version xxx, 6.6.2.1 + * + * Return the ia associated with the protection zone pz + * + * Input: + * pz_handle + * pz_param_mask + * + * Output: + * pz_param + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_pz_query ( + IN DAT_PZ_HANDLE pz_handle, + IN DAT_PZ_PARAM_MASK pz_param_mask, + OUT DAT_PZ_PARAM *pz_param) +{ + DAPL_PZ *pz; + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_API, + "dapl_pz_query (%p, %x, %p)\n", + pz_handle, + pz_param_mask, + pz_param); + + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (pz_handle, DAPL_MAGIC_PZ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_PZ); + goto bail; + } + if (NULL == pz_param) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + pz = (DAPL_PZ *) pz_handle; + + /* Since the DAT_PZ_ARGS values are easily accessible, */ + /* don't bother checking the DAT_PZ_ARGS_MASK value */ + pz_param->ia_handle = (DAT_IA_HANDLE) pz->header.owner_ia; + + bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_util.c new file mode 100644 index 00000000..ac3d871e --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_util.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_pz_util.c + * + * PURPOSE: Manage PZ structure + * + * $Id$ + **********************************************************************/ + +#include "dapl_pz_util.h" +#include "dapl_ia_util.h" + +/* + * dapl_pz_alloc + * + * alloc and initialize an PZ struct + * + * Input: + * none + * + * Output: + * pz_ptr + * + * Returns: + * none + * + */ +DAPL_PZ * +dapl_pz_alloc ( + IN DAPL_IA *ia) +{ + DAPL_PZ *pz; + + /* Allocate PZ */ + pz = (DAPL_PZ *) dapl_os_alloc (sizeof (DAPL_PZ)); + if (NULL == pz) + { + return (NULL); + } + + /* zero the structure */ + dapl_os_memzero (pz, sizeof (DAPL_PZ)); + + /* + * initialize the header + */ + pz->header.provider = ia->header.provider; + pz->header.magic = DAPL_MAGIC_PZ; + pz->header.handle_type = DAT_HANDLE_TYPE_PZ; + pz->header.owner_ia = ia; + pz->header.user_context.as_64 = 0; + pz->header.user_context.as_ptr = NULL; + dapl_llist_init_entry (&pz->header.ia_list_entry); + dapl_ia_link_pz (ia, pz); + dapl_os_lock_init (&pz->header.lock); + + /* + * initialize the body + */ + pz->pz_ref_count = 0; + + return (pz); +} + +/* + * dapl_pz_free + * + * free an PZ struct + * + * Input: + * pz_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_pz_dealloc ( + IN DAPL_PZ *pz) +{ + pz->header.magic = DAPL_MAGIC_INVALID; /* reset magic to prevent reuse */ + dapl_ia_unlink_pz (pz->header.owner_ia, pz); + dapl_os_lock_destroy (&pz->header.lock); + + dapl_os_free (pz, sizeof (DAPL_PZ)); +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_util.h new file mode 100644 index 00000000..6e90fd72 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_pz_util.h @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_pz_util.h + * + * PURPOSE: Utility defs & routines for the PZ data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_PZ_UTIL_H_ +#define _DAPL_PZ_UTIL_H_ + +#include "dapl.h" + +extern DAPL_PZ * +dapl_pz_alloc ( + IN DAPL_IA *ia); + +extern void +dapl_pz_dealloc ( + IN DAPL_PZ *pz); + +#endif /* _DAPL_PZ_UTIL_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ring_buffer_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_ring_buffer_util.c new file mode 100644 index 00000000..11d574ff --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ring_buffer_util.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_ring_buffer_util.c + * + * PURPOSE: Ring buffer management + * Description: Support and management functions for ring buffers + * + * $Id$ + **********************************************************************/ + +#include "dapl_ring_buffer_util.h" + + +/* + * dapls_rbuf_alloc + * + * Given a DAPL_RING_BUFFER, initialize it and provide memory for + * the ringbuf itself. A passed in size will be adjusted to the next + * largest power of two number to simplify management. + * + * Input: + * rbuf pointer to DAPL_RING_BUFFER + * size number of elements to allocate & manage + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_rbuf_alloc ( + INOUT DAPL_RING_BUFFER *rbuf, + IN DAT_COUNT size ) +{ + unsigned int rsize; /* real size */ + + /* The circular buffer must be allocated one too large. + * This eliminates any need for a distinct counter, as that + * having the two pointers equal always means "empty" -- never "full" + */ + size++; + + /* Put size on a power of 2 boundary */ + rsize = 1; + while ( (DAT_COUNT)rsize < size) + { + rsize <<= 1; + } + + rbuf->base = (void *) dapl_os_alloc (rsize * sizeof (void *)); + if ( rbuf->base != NULL ) + { + rbuf->lim = rsize - 1; + rbuf->head = 0; + rbuf->tail = 0; + } + else + { + return DAT_INSUFFICIENT_RESOURCES | DAT_RESOURCE_MEMORY; + } + + return DAT_SUCCESS; +} + + +/* + * dapls_rbuf_realloc + * + * Resizes an empty DAPL_RING_BUFFER. This function is not thread safe; + * adding or removing elements from a ring buffer while resizing + * will have indeterminate results. + * + * Input: + * rbuf pointer to DAPL_RING_BUFFER + * size number of elements to allocate & manage + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_STATE + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_rbuf_realloc ( + INOUT DAPL_RING_BUFFER *rbuf, + IN DAT_COUNT size ) +{ + int rsize; /* real size */ + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + + /* if the ring buffer is not empty */ + if ( rbuf->head != rbuf->tail ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,0); + goto bail; + } + + /* Put size on a power of 2 boundary */ + rsize = 1; + while (rsize < size) + { + rsize <<= 1; + } + + rbuf->base = (void *) dapl_os_realloc (rbuf->base, rsize * sizeof (void *)); + if ( NULL == rbuf->base ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + rbuf->lim = rsize - 1; + + bail: + return dat_status; +} + + +/* + * dapls_rbuf_destroy + * + * Release the buffer and reset pointers to a DAPL_RING_BUFFER + * + * Input: + * rbuf pointer to DAPL_RING_BUFFER + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapls_rbuf_destroy ( + IN DAPL_RING_BUFFER *rbuf) +{ + if ( (NULL == rbuf) || + (NULL == rbuf->base) ) + { + return; + } + + dapl_os_free (rbuf->base, (rbuf->lim + 1) * sizeof (void *)); + rbuf->base = NULL; + rbuf->lim = 0; + + return; +} + +/* + * dapls_rbuf_add + * + * Add an entry to the ring buffer + * + * Input: + * rbuf pointer to DAPL_RING_BUFFER + * entry entry to add + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES (queue full) + * + */ +DAT_RETURN +dapls_rbuf_add ( + IN DAPL_RING_BUFFER *rbuf, + IN void *entry) +{ + int pos; + int val; + + while ( ((rbuf->head + 1) & rbuf->lim) != (rbuf->tail & rbuf->lim) ) + { + pos = rbuf->head; + val = dapl_os_atomic_assign (&rbuf->head, pos, pos + 1); + if ( val == pos ) + { + pos = (pos + 1) & rbuf->lim; /* verify in range */ + rbuf->base[pos] = entry; + return DAT_SUCCESS; + } + } + + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + +} + + +/* + * dapls_rbuf_remove + * + * Remove an entry from the ring buffer + * + * Input: + * rbuf pointer to DAPL_RING_BUFFER + * + * Output: + * entry entry removed from the ring buffer + * + * Returns: + * DAT_SUCCESS + * DAT_QUEUE_EMPTY + + */ +void * +dapls_rbuf_remove ( + IN DAPL_RING_BUFFER *rbuf ) +{ + int pos; + int val; + + while ( rbuf->head != rbuf->tail ) + { + pos = rbuf->tail; + val = dapl_os_atomic_assign (&rbuf->tail, pos, pos + 1); + if ( val == pos ) + { + pos = (pos + 1) & rbuf->lim; /* verify in range */ + + return (rbuf->base[pos]); + } + } + + return NULL; + +} + +/* + * dapli_rbuf_count + * + * Return the number of entries in use in the ring buffer + * + * Input: + * rbuf pointer to DAPL_RING_BUFFER + * + * + * Output: + * none + * + * Returns: + * count of entries + * + */ +DAT_COUNT +dapls_rbuf_count ( + IN DAPL_RING_BUFFER *rbuf ) +{ + DAT_COUNT count; + int head; + int tail; + + head = rbuf->head & rbuf->lim; + tail = rbuf->tail & rbuf->lim; + if ( head > tail ) + { + count = head - tail; + } + else + { + /* add 1 to lim as it is a mask, number of entries - 1 */ + count = (rbuf->lim + 1 - tail + head) & rbuf->lim; + } + + return count; +} + + +/* + * dapls_rbuf_contains + * + * Return TRUE or FALSE if an element exists in a ring buffer + * + * Input: + * rbuf pointer to DAPL_RING_BUFFER + * entry entry to match + * + * Output: + * none + * + * Returns: + * DAT_TRUE + * DAT_FALSE + + */ +DAT_BOOLEAN +dapls_rbuf_contains ( + IN DAPL_RING_BUFFER *rbuf, + IN void *entry) +{ + int pos; + + pos = rbuf->head; + while ( pos != rbuf->tail ) + { + if (rbuf->base[pos] == entry) + return DAT_TRUE; + pos = (pos + 1) & rbuf->lim; /* verify in range */ + } + + return DAT_FALSE; + +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_ring_buffer_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_ring_buffer_util.h new file mode 100644 index 00000000..58a585b1 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_ring_buffer_util.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_ring_buffer_util.h + * + * PURPOSE: Utility defs & routines for the ring buffer data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_RING_BUFFER_H_ +#define _DAPL_RING_BUFFER_H_ + +#include "dapl.h" + +/* + * Prototypes + */ +DAT_RETURN dapls_rbuf_alloc ( + DAPL_RING_BUFFER *rbuf, + DAT_COUNT size ); + +DAT_RETURN dapls_rbuf_realloc ( + DAPL_RING_BUFFER *rbuf, + DAT_COUNT size ); + +void dapls_rbuf_destroy ( + DAPL_RING_BUFFER *rbuf); + +DAT_RETURN dapls_rbuf_add ( + DAPL_RING_BUFFER *rbuf, + void *entry); + +void * dapls_rbuf_remove ( + DAPL_RING_BUFFER *rbuf); + +DAT_COUNT dapls_rbuf_count ( + DAPL_RING_BUFFER *rbuf ); + +DAT_BOOLEAN dapls_rbuf_contains ( + IN DAPL_RING_BUFFER *rbuf, + IN void *entry); + + +/* + * Simple functions + */ +#define dapls_rbuf_empty(rbuf) (rbuf->head == rbuf->tail) + + +#endif /* _DAPL_RING_BUFFER_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_bind.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_bind.c new file mode 100644 index 00000000..4de96685 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_bind.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_rmr_bind.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_rmr_util.h" +#include "dapl_ep_util.h" +#include "dapl_cookie.h" +#include "dapl_adapter_util.h" + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +STATIC _INLINE_ DAT_RETURN +dapli_rmr_bind_fuse ( + IN DAPL_RMR *rmr, + IN const DAT_LMR_TRIPLET *lmr_triplet, + IN DAT_MEM_PRIV_FLAGS mem_priv, + IN DAPL_EP *ep_ptr, + IN DAT_RMR_COOKIE user_cookie, + IN DAT_COMPLETION_FLAGS completion_flags, + OUT DAT_RMR_CONTEXT *rmr_context ); + +STATIC _INLINE_ DAT_RETURN +dapli_rmr_bind_unfuse ( + IN DAPL_RMR *rmr, + IN DAPL_EP *ep_ptr, + IN DAT_RMR_COOKIE user_cookie, + IN DAT_COMPLETION_FLAGS completion_flags); + + +/********************************************************************* + * * + * Function Definitions * + * * + *********************************************************************/ + +STATIC _INLINE_ DAT_RETURN +dapli_rmr_bind_fuse ( + IN DAPL_RMR *rmr, + IN const DAT_LMR_TRIPLET* lmr_triplet, + IN DAT_MEM_PRIV_FLAGS mem_priv, + IN DAPL_EP *ep_ptr, + IN DAT_RMR_COOKIE user_cookie, + IN DAT_COMPLETION_FLAGS completion_flags, + OUT DAT_RMR_CONTEXT *rmr_context ) +{ + DAPL_LMR *lmr; + DAPL_COOKIE *cookie; + DAT_RETURN dat_status; + ib_bool_t is_signaled; + + dat_status = dapls_hash_search (rmr->header.owner_ia->hca_ptr->lmr_hash_table, + lmr_triplet->lmr_context, + (DAPL_HASH_DATA *) &lmr); + if ( DAT_SUCCESS != dat_status) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + goto bail; + } + + /* if the ep in unconnected return an error. IB requires that the */ + /* QP be connected to change a memory window binding since: */ + /* */ + /* - memory window bind operations are WQEs placed on a QP's */ + /* send queue */ + /* */ + /* - QP's only process WQEs on the send queue when the QP is in */ + /* the RTS state */ + if (DAT_EP_STATE_CONNECTED != ep_ptr->param.ep_state) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, dapls_ep_state_subtype (ep_ptr)); + goto bail; + } + + if ( DAT_FALSE == dapl_mr_bounds_check ( + dapl_mr_get_address (lmr->param.region_desc, lmr->param.mem_type), + lmr->param.length, + lmr_triplet->virtual_address, + lmr_triplet->segment_length) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + goto bail; + } + + /* If the LMR, RMR, and EP are not in the same PZ, there is an error */ + if ( (ep_ptr->param.pz_handle != lmr->param.pz_handle) || + (ep_ptr->param.pz_handle != rmr->param.pz_handle) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); + goto bail; + } + + if ( !dapl_rmr_validate_completion_flag (DAT_COMPLETION_SUPPRESS_FLAG, + ep_ptr->param.ep_attr.request_completion_flags, + completion_flags) || + !dapl_rmr_validate_completion_flag (DAT_COMPLETION_UNSIGNALLED_FLAG, + ep_ptr->param.ep_attr.request_completion_flags, + completion_flags) || + !dapl_rmr_validate_completion_flag (DAT_COMPLETION_BARRIER_FENCE_FLAG, + ep_ptr->param.ep_attr.request_completion_flags, + completion_flags) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG4); + goto bail; + } + + dat_status = dapls_rmr_cookie_alloc (&ep_ptr->req_buffer, + rmr, + user_cookie, + &cookie); + if ( DAT_SUCCESS != dat_status ) + { + goto bail; + } + + is_signaled = (completion_flags & DAT_COMPLETION_SUPPRESS_FLAG) ? false : true; + + /* + * Take reference before posting to avoid race conditions with + * completions + */ + dapl_os_atomic_inc (&ep_ptr->req_count); + + dat_status = dapls_ib_mw_bind (rmr, + lmr, + ep_ptr, + cookie, + lmr_triplet->virtual_address, + lmr_triplet->segment_length, + mem_priv, + is_signaled); + if ( DAT_SUCCESS != dat_status ) + { + dapl_os_atomic_dec (&ep_ptr->req_count); + dapls_cookie_dealloc (&ep_ptr->req_buffer, cookie); + goto bail; + } + + dapl_os_atomic_inc (&lmr->lmr_ref_count); + + /* if the RMR was previously bound */ + if ( NULL != rmr->lmr ) + { + dapl_os_atomic_dec (&rmr->lmr->lmr_ref_count); + } + + rmr->param.mem_priv = mem_priv; + rmr->param.lmr_triplet = *lmr_triplet; + rmr->ep = ep_ptr; + rmr->lmr = lmr; + + if ( NULL != rmr_context ) + { + *rmr_context = rmr->param.rmr_context; + } + +bail: + return dat_status; +} + + +STATIC _INLINE_ DAT_RETURN +dapli_rmr_bind_unfuse ( + IN DAPL_RMR *rmr, + IN DAPL_EP *ep_ptr, + IN DAT_RMR_COOKIE user_cookie, + IN DAT_COMPLETION_FLAGS completion_flags) +{ + DAPL_COOKIE *cookie; + DAT_RETURN dat_status; + ib_bool_t is_signaled; + + dat_status = DAT_SUCCESS; + /* + * if the ep in unconnected return an error. IB requires that the + * QP be connected to change a memory window binding since: + * + * - memory window bind operations are WQEs placed on a QP's + * send queue + * + * - QP's only process WQEs on the send queue when the QP is in + * the RTS state + */ + if (DAT_EP_STATE_CONNECTED != ep_ptr->param.ep_state) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE, dapls_ep_state_subtype (ep_ptr)); + goto bail1; + } + + /* If the RMR and EP are not in the same PZ, there is an error */ + if ( ep_ptr->param.pz_handle != rmr->param.pz_handle ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + goto bail1; + } + + if ( !dapl_rmr_validate_completion_flag (DAT_COMPLETION_SUPPRESS_FLAG, + ep_ptr->param.ep_attr.request_completion_flags, + completion_flags) || + !dapl_rmr_validate_completion_flag (DAT_COMPLETION_UNSIGNALLED_FLAG, + ep_ptr->param.ep_attr.request_completion_flags, + completion_flags) || + !dapl_rmr_validate_completion_flag (DAT_COMPLETION_BARRIER_FENCE_FLAG, + ep_ptr->param.ep_attr.request_completion_flags, + completion_flags) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + goto bail1; + } + + dat_status = dapls_rmr_cookie_alloc (&ep_ptr->req_buffer, + rmr, + user_cookie, + &cookie); + if ( DAT_SUCCESS != dat_status ) + { + goto bail1; + } + + is_signaled = (completion_flags & DAT_COMPLETION_UNSIGNALLED_FLAG) ? false : true; + + /* + * Take reference before posting to avoid race conditions with + * completions + */ + dapl_os_atomic_inc (&ep_ptr->req_count); + + dat_status = dapls_ib_mw_unbind (rmr, + ep_ptr, + cookie, + is_signaled); + if ( DAT_SUCCESS != dat_status ) + { + dapl_os_atomic_dec (&ep_ptr->req_count); + dapls_cookie_dealloc (&ep_ptr->req_buffer, cookie); + goto bail1; + } + + /* if the RMR was previously bound */ + if ( NULL != rmr->lmr ) + { + dapl_os_atomic_dec (&rmr->lmr->lmr_ref_count); + } + + rmr->param.mem_priv = DAT_MEM_PRIV_NONE_FLAG; + rmr->param.lmr_triplet.lmr_context = 0; + rmr->param.lmr_triplet.virtual_address = 0; + rmr->param.lmr_triplet.segment_length = 0; + rmr->ep = ep_ptr; + rmr->lmr = NULL; + +bail1: + return dat_status; +} + + +/* + * dapl_rmr_bind + * + * DAPL Requirements Version xxx, 6.6.4.4 + * + * Bind the RMR to the specified memory region within the LMR and + * provide a new rmr_context value. + * + * Input: + * Output: + */ +DAT_RETURN +dapl_rmr_bind ( + IN DAT_RMR_HANDLE rmr_handle, + IN const DAT_LMR_TRIPLET *lmr_triplet, + IN DAT_MEM_PRIV_FLAGS mem_priv, + IN DAT_EP_HANDLE ep_handle, + IN DAT_RMR_COOKIE user_cookie, + IN DAT_COMPLETION_FLAGS completion_flags, + OUT DAT_RMR_CONTEXT *rmr_context ) +{ + DAPL_RMR *rmr; + DAPL_EP *ep_ptr; + + if ( DAPL_BAD_HANDLE (rmr_handle, DAPL_MAGIC_RMR) ) + { + return DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_RMR); + } + if ( DAPL_BAD_HANDLE (ep_handle, DAPL_MAGIC_EP) ) + { + return DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_EP); + } + + rmr = (DAPL_RMR *) rmr_handle; + ep_ptr = (DAPL_EP *) ep_handle; + + /* if the rmr should be bound */ + if (0 != lmr_triplet->segment_length) + { + return dapli_rmr_bind_fuse (rmr, + lmr_triplet, + mem_priv, + ep_ptr, + user_cookie, + completion_flags, + rmr_context); + } + else /* the rmr should be unbound */ + { + return dapli_rmr_bind_unfuse (rmr, + ep_ptr, + user_cookie, + completion_flags); + } +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_create.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_create.c new file mode 100644 index 00000000..3b47d761 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_create.c @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_rmr_create.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl_rmr_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_rmr_create + * + * DAPL Requirements Version xxx, 6.6.4.1 + * + * Create a remote memory region for the specified protection zone + * + * Input: + * pz_handle + * + * Output: + * rmr_handle + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_rmr_create ( + IN DAT_PZ_HANDLE pz_handle, + OUT DAT_RMR_HANDLE *rmr_handle) +{ + DAPL_PZ *pz; + DAPL_RMR *rmr; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (pz_handle, DAPL_MAGIC_PZ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_PZ); + goto bail; + } + + pz = (DAPL_PZ *) pz_handle; + + rmr = dapl_rmr_alloc (pz); + + if ( rmr == NULL ) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_status = dapls_ib_mw_alloc (rmr); + + if ( dat_status != DAT_SUCCESS ) + { + dapl_rmr_dealloc (rmr); + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY_REGION); + goto bail; + } + + dapl_os_atomic_inc (&pz->pz_ref_count); + + *rmr_handle = rmr; + + bail: + return dat_status; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_free.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_free.c new file mode 100644 index 00000000..15878bd5 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_free.c @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_rmr_free.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl_rmr_util.h" +#include "dapl_adapter_util.h" +#include "dapl_ia_util.h" + +/* + * dapl_rmr_free + * + * DAPL Requirements Version xxx, 6.6.4.2 + * + * Destroy an instance of the Remote Memory Region + * + * Input: + * rmr_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_rmr_free ( + IN DAT_RMR_HANDLE rmr_handle ) +{ + DAPL_RMR *rmr; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (rmr_handle, DAPL_MAGIC_RMR) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_RMR); + goto bail; + } + + rmr = (DAPL_RMR *) rmr_handle; + + /* + * If the user did not perform an unbind op, release + * counts here. + */ + if ( rmr->param.lmr_triplet.virtual_address != 0 ) + { + dapl_os_atomic_dec (&rmr->lmr->lmr_ref_count); + rmr->param.lmr_triplet.virtual_address = 0; + } + + dat_status = dapls_ib_mw_free (rmr); + + if (dat_status != DAT_SUCCESS) + { + goto bail; + } + + dapl_os_atomic_dec (&rmr->pz->pz_ref_count); + + dapl_rmr_dealloc (rmr); + + bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_query.c new file mode 100644 index 00000000..500b7b05 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_query.c @@ -0,0 +1,95 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_rmr_query.c + * + * PURPOSE: Memory management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 6 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_rmr_query + * + * DAPL Requirements Version xxx, 6.6.4.3 + * + * Provide the RMR arguments. + * + * Input: + * rmr_handle + * rmr_args_mask + * + * Output: + * rmr_args + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_rmr_query ( + IN DAT_RMR_HANDLE rmr_handle, + IN DAT_RMR_PARAM_MASK rmr_param_mask, + IN DAT_RMR_PARAM *rmr_param ) +{ + DAPL_RMR *rmr; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (rmr_handle, DAPL_MAGIC_RMR) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_RMR); + goto bail; + } + if (NULL == rmr_param) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + rmr = (DAPL_RMR *) rmr_handle; + + /* If the RMR is unbound, there is no LMR triplet associated with */ + /* this RMR. If the consumer requests this field, return an error. */ + if ( (rmr_param_mask & DAT_RMR_FIELD_LMR_TRIPLET) && (NULL == rmr->lmr) ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG2); + goto bail; + } + + dapl_os_memcpy (rmr_param, &rmr->param, sizeof (DAT_RMR_PARAM)); + + bail: + return dat_status; +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_util.c new file mode 100644 index 00000000..23fc30fa --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_util.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_rmr_util.c + * + * PURPOSE: Utility defs & routines for the RMR data structure + * + * + **********************************************************************/ + +#include "dapl_rmr_util.h" +#include "dapl_ia_util.h" + +DAPL_RMR * +dapl_rmr_alloc( + IN DAPL_PZ *pz) +{ + DAPL_RMR *rmr; + + /* Allocate LMR */ + rmr = (DAPL_RMR *) dapl_os_alloc(sizeof(DAPL_RMR)); + if (NULL == rmr) + { + return (NULL); + } + + /* zero the structure */ + dapl_os_memzero(rmr, sizeof(DAPL_RMR)); + + /* + * initialize the header + */ + rmr->header.provider = pz->header.provider; + rmr->header.magic = DAPL_MAGIC_RMR; + rmr->header.handle_type = DAT_HANDLE_TYPE_RMR; + rmr->header.owner_ia = pz->header.owner_ia; + rmr->header.user_context.as_64 = 0; + rmr->header.user_context.as_ptr = 0; + dapl_llist_init_entry (&rmr->header.ia_list_entry); + dapl_ia_link_rmr(rmr->header.owner_ia, rmr); + dapl_os_lock_init(&rmr->header.lock); + + /* + * initialize the body + */ + rmr->param.ia_handle = (DAT_IA_HANDLE) pz->header.owner_ia; + rmr->param.pz_handle = (DAT_PZ_HANDLE) pz; + rmr->param.lmr_triplet.lmr_context = 0; + rmr->param.lmr_triplet.pad = 0; + rmr->param.lmr_triplet.virtual_address = 0; + rmr->param.lmr_triplet.segment_length = 0; + + rmr->param.mem_priv = 0; + rmr->pz = pz; + rmr->lmr = NULL; + + return (rmr); +} + + +void +dapl_rmr_dealloc( + IN DAPL_RMR *rmr) +{ + rmr->header.magic = DAPL_MAGIC_INVALID; /* reset magic to prevent reuse */ + + dapl_ia_unlink_rmr(rmr->header.owner_ia, rmr); + dapl_os_lock_destroy(&rmr->header.lock); + + dapl_os_free((void *) rmr, sizeof(DAPL_RMR)); +} diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_util.h new file mode 100644 index 00000000..e9cf2eb9 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_rmr_util.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_rmr_util.h + * + * PURPOSE: Utility defs & routines for the RMR data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_RMR_UTIL_H_ +#define _DAPL_RMR_UTIL_H_ + +#include "dapl_mr_util.h" + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +extern DAPL_RMR * +dapl_rmr_alloc ( + IN DAPL_PZ *pz); + +extern void +dapl_rmr_dealloc ( + IN DAPL_RMR *rmr); + +STATIC _INLINE_ DAT_BOOLEAN +dapl_rmr_validate_completion_flag ( + IN DAT_COMPLETION_FLAGS mask, + IN DAT_COMPLETION_FLAGS allow, + IN DAT_COMPLETION_FLAGS request); + +STATIC _INLINE_ int32_t +dapl_rmr_convert_privileges ( + IN DAT_MEM_PRIV_FLAGS privileges); + + +/********************************************************************* + * * + * Inline Functions * + * * + *********************************************************************/ + +STATIC _INLINE_ DAT_BOOLEAN +dapl_rmr_validate_completion_flag ( + IN DAT_COMPLETION_FLAGS mask, + IN DAT_COMPLETION_FLAGS allow, + IN DAT_COMPLETION_FLAGS request) +{ + if ( (mask & request ) && ! (mask & allow) ) + { + return DAT_FALSE; + } + else + { + return DAT_TRUE; + } +} + +STATIC _INLINE_ int32_t +dapl_rmr_convert_privileges ( + IN DAT_MEM_PRIV_FLAGS privileges) +{ + int32_t value = 0; + + if (DAT_MEM_PRIV_REMOTE_READ_FLAG & privileges) + { + value |= IB_ACCESS_REMOTE_READ; + } + + if (DAT_MEM_PRIV_REMOTE_WRITE_FLAG & privileges) + { + value |= IB_ACCESS_REMOTE_WRITE; + } + + return value; +} + +#endif /* _DAPL_RMR_UTIL_H_*/ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_create.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_create.c new file mode 100644 index 00000000..2434369c --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_create.c @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_rsp_create.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_sp_util.h" +#include "dapl_ia_util.h" +#include "dapl_ep_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_rsp_create + * + * uDAPL: User Direct Access Program Library Version 1.1, 6.4.3.4.1 + * + * Create a Resereved Service Point with the specified Endpoint + * that generates at most one Connection Request that is + * delivered to the specified Event Dispatcher in a notification + * event + * + * Input: + * ia_handle + * conn_qual + * ep_handle + * evd_handle + * + * Output: + * rsp_handle + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * DAT_INVALID_STATE + * DAT_CONN_QUAL_IN_USE + */ +DAT_RETURN +dapl_rsp_create ( + IN DAT_IA_HANDLE ia_handle, + IN DAT_CONN_QUAL conn_qual, + IN DAT_EP_HANDLE ep_handle, + IN DAT_EVD_HANDLE evd_handle, + OUT DAT_RSP_HANDLE *rsp_handle ) +{ + DAPL_IA *ia_ptr; + DAPL_SP *sp_ptr; + DAPL_EVD *evd_ptr; + DAPL_EP *ep_ptr; + DAT_BOOLEAN sp_found; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + ia_ptr = (DAPL_IA *)ia_handle; + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + ">>> dapl_rsp_free conn_qual: %x EP: %p\n", + conn_qual, ep_handle); + + if ( DAPL_BAD_HANDLE (ia_ptr, DAPL_MAGIC_IA) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_IA); + goto bail; + } + if ( DAPL_BAD_HANDLE (ep_handle, DAPL_MAGIC_EP) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EP); + goto bail; + } + if ( DAPL_BAD_HANDLE (evd_handle, DAPL_MAGIC_EVD) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EVD_CR); + goto bail; + } + + if ( rsp_handle == NULL ) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG5); + goto bail; + } + + ep_ptr = (DAPL_EP *) ep_handle; + if ( ep_ptr->param.ep_state != DAT_EP_STATE_UNCONNECTED ) + { + dat_status = DAT_ERROR (DAT_INVALID_STATE,dapls_ep_state_subtype (ep_ptr)); + goto bail; + } + + evd_ptr = (DAPL_EVD *)evd_handle; + if ( ! (evd_ptr->evd_flags & DAT_EVD_CR_FLAG) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_EVD_CR); + goto bail; + } + + sp_ptr = dapls_ia_sp_search (ia_ptr, conn_qual, DAT_FALSE); + sp_found = DAT_TRUE; + if (sp_ptr == NULL) + { + sp_found = DAT_FALSE; + + /* Allocate RSP */ + sp_ptr = dapls_sp_alloc ( ia_ptr, DAT_FALSE ); + if ( sp_ptr == NULL ) + { + dat_status = + DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + } + + /* + * Fill out the RSP args + */ + sp_ptr->ia_handle = ia_handle; + sp_ptr->conn_qual = conn_qual; + sp_ptr->evd_handle = evd_handle; + sp_ptr->psp_flags = 0; + sp_ptr->ep_handle = ep_handle; + + /* + * Take a reference on the EVD handle + */ + dapl_os_atomic_inc (& ((DAPL_EVD *)evd_handle)->evd_ref_count); + + /* + * Update the EP state indicating the provider now owns it + */ + ep_ptr->param.ep_state = DAT_EP_STATE_RESERVED; + + /* + * Set up a listener for a connection. Connections can arrive + * even before this call returns! + */ + sp_ptr->state = DAPL_SP_STATE_RSP_LISTENING; + sp_ptr->listening = DAT_TRUE; + + if (sp_found == DAT_FALSE) + { + /* Link it onto the IA */ + dapl_ia_link_rsp (ia_ptr, sp_ptr); + + dat_status = dapls_ib_setup_conn_listener ( ia_ptr, + conn_qual, + sp_ptr ); + + if ( dat_status != DAT_SUCCESS ) + { + /* + * Have a problem setting up the connection, something + * wrong! Decrements the EVD refcount & release it. Set + * the state to FREE, so we know the call failed. + */ + dapl_os_atomic_dec (& ((DAPL_EVD *)evd_handle)->evd_ref_count); + sp_ptr->evd_handle = NULL; + sp_ptr->state = DAPL_SP_STATE_FREE; + dapls_ia_unlink_sp (ia_ptr, sp_ptr); + dapls_sp_free_sp (sp_ptr); + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> dapl_rsp_create setup_conn_listener failed: %x\n", + dat_status); + + goto bail; + } + } + + /* + * Return handle to the user + */ + *rsp_handle = (DAT_RSP_HANDLE)sp_ptr; + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * c-brace-offset: -4 + * tab-width: 8 + * End: + */ + + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_free.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_free.c new file mode 100644 index 00000000..2c279e90 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_free.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_rsp_free.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_sp_util.h" +#include "dapl_ia_util.h" +#include "dapl_adapter_util.h" + +/* + * dapl_rsp_free + * + * uDAPL: User Direct Access Program Library Version 1.1, 6.4.3.5 + * + * Destroy a specific instance of a Reserved Service Point. + * + * Input: + * rsp_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + */ +DAT_RETURN +dapl_rsp_free ( + IN DAT_RSP_HANDLE rsp_handle ) +{ + DAPL_IA *ia_ptr; + DAPL_SP *sp_ptr; + DAPL_EP *ep_ptr; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + + sp_ptr = (DAPL_SP *) rsp_handle; + /* + * Verify handle + */ + dapl_dbg_log (DAPL_DBG_TYPE_CM, + ">>> dapl_rsp_free %p\n", + rsp_handle); + if ( DAPL_BAD_HANDLE (sp_ptr, DAPL_MAGIC_RSP ) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_RSP); + goto bail; + } + + /* ia_ptr = (DAPL_IA *)sp_ptr->header.owner_ia; */ + ia_ptr = sp_ptr->header.owner_ia; + + /* + * Remove the connection listener if there are no connections. If + * we defer removing the sp it becomes something of a zombie + * container until disconnection, after which it will be cleaned up. + */ + dapl_os_lock (&sp_ptr->header.lock); + + /* + * Make sure we don't leave a dangling EP. If the state is still + * RESERVED then the RSP still owns it. + */ + ep_ptr = (DAPL_EP *)sp_ptr->ep_handle; + if ( ep_ptr != NULL && ep_ptr->param.ep_state == DAT_EP_STATE_RESERVED ) + { + ep_ptr->param.ep_state = DAT_EP_STATE_UNCONNECTED; + } + sp_ptr->ep_handle = NULL; + + /* Release reference on EVD. If an error was encountered in a previous + * free the evd_handle will be NULL + */ + if (sp_ptr->evd_handle) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)sp_ptr->evd_handle)->evd_ref_count); + sp_ptr->evd_handle = NULL; + } + + /* + * Release the base resource if there are no outstanding connections; + * else the last disconnect on this RSP will free it up. The RSP + * is used to contain CR records for each connection, which + * contain information necessary to disconnect. + * sp_ptr->listening will be DAT_TRUE if there has never been a + * connection event, and DAT_FALSE if a connection attempt resulted + * in a reject. + */ + if ( sp_ptr->cr_list_count == 0 ) + { + /* This RSP has never been used. Clean it up */ + sp_ptr->listening = DAT_FALSE; + sp_ptr->state = DAPL_SP_STATE_FREE; + dapl_os_unlock (&sp_ptr->header.lock); + + dat_status = dapls_ib_remove_conn_listener ( ia_ptr, + sp_ptr ); + if (dat_status != DAT_SUCCESS) + { + sp_ptr->state = DAPL_SP_STATE_RSP_LISTENING; + goto bail; + } + dapls_ia_unlink_sp ( ia_ptr, sp_ptr ); + dapls_sp_free_sp ( sp_ptr ); + } + else + { + /* The RSP is now in the pending state, where it will sit until + * the connection terminates or the app uses the same + * ServiceID again, which will reactivate it. + */ + sp_ptr->state = DAPL_SP_STATE_RSP_PENDING; + dapl_os_unlock (&sp_ptr->header.lock); + } + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * c-brace-offset: -4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_query.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_query.c new file mode 100644 index 00000000..dc811de7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_rsp_query.c @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_rsp_query.c + * + * PURPOSE: Connection management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 4 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_rsp_query + * + * uDAPL: User Direct Access Program Library Version 1.1, 6.4.1.6 + * + * Provide arguments of the reserved service points + * + * Input: + * rsp_handle + * rsp_args_mask + * + * Output: + * rsp_args + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapl_rsp_query ( + IN DAT_RSP_HANDLE rsp_handle, + IN DAT_RSP_PARAM_MASK rsp_mask, + OUT DAT_RSP_PARAM *rsp_param ) +{ + DAPL_SP *sp_ptr; + DAT_RETURN dat_status; + UNREFERENCED_PARAMETER(rsp_mask); + + dat_status = DAT_SUCCESS; + + if ( DAPL_BAD_HANDLE (rsp_handle, DAPL_MAGIC_RSP) ) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,DAT_INVALID_HANDLE_RSP); + goto bail; + } + + if (NULL == rsp_param) + { + dat_status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + sp_ptr = (DAPL_SP *) rsp_handle; + + /* + * Fill in the RSP params + */ + rsp_param->ia_handle = sp_ptr->ia_handle; + rsp_param->conn_qual = sp_ptr->conn_qual; + rsp_param->evd_handle = sp_ptr->evd_handle; + rsp_param->ep_handle = sp_ptr->ep_handle; + + bail: + return dat_status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * c-brace-offset: -4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_set_consumer_context.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_set_consumer_context.c new file mode 100644 index 00000000..7f0914ef --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_set_consumer_context.c @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_set_consumer_context.c + * + * PURPOSE: Interface Adapter management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 2 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" + +/* + * dapl_set_consumer_context + * + * DAPL Requirements Version xxx, 6.2.2.1 + * + * Set a consumer context in the provided dat_handle + * + * Input: + * dat_handle + * context + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + */ +DAT_RETURN +dapl_set_consumer_context ( + IN DAT_HANDLE dat_handle, + IN DAT_CONTEXT context ) +{ + DAT_RETURN dat_status; + DAPL_HEADER *header; + + dat_status = DAT_SUCCESS; + + header = (DAPL_HEADER *)dat_handle; + if ( ((header) == NULL) || + (((DAT_UVERYLONG)header) & 3) || + (header->magic != DAPL_MAGIC_IA && + header->magic != DAPL_MAGIC_EVD && + header->magic != DAPL_MAGIC_EP && + header->magic != DAPL_MAGIC_LMR && + header->magic != DAPL_MAGIC_RMR && + header->magic != DAPL_MAGIC_PZ && + header->magic != DAPL_MAGIC_PSP && + header->magic != DAPL_MAGIC_RSP && + header->magic != DAPL_MAGIC_CR)) + { + dat_status = DAT_ERROR (DAT_INVALID_HANDLE,0); + goto bail; + } + header->user_context = context; + +bail: + return dat_status; +} + diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_sp_util.c b/branches/Ndi/ulp/dapl/dapl/common/dapl_sp_util.c new file mode 100644 index 00000000..a5554bf4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_sp_util.c @@ -0,0 +1,316 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_sp_util.c + * + * PURPOSE: Manage PSP Info structure + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_ep_util.h" +#include "dapl_sp_util.h" +#include "dapl_cr_util.h" + +/* + * Local definitions + */ + + +/* + * dapl_sp_alloc + * + * alloc and initialize a PSP INFO struct + * + * Input: + * IA INFO struct ptr + * + * Output: + * sp_ptr + * + * Returns: + * NULL + * pointer to sp info struct + * + */ +DAPL_SP * +dapls_sp_alloc ( + IN DAPL_IA *ia_ptr, + IN DAT_BOOLEAN is_psp ) +{ + DAPL_SP *sp_ptr; + + /* Allocate EP */ + sp_ptr = (DAPL_SP *)dapl_os_alloc (sizeof (DAPL_SP)); + if ( sp_ptr == NULL ) + { + return (NULL); + } + + /* zero the structure */ + dapl_os_memzero (sp_ptr, sizeof (DAPL_SP)); + + /* + * initialize the header + */ + sp_ptr->header.provider = ia_ptr->header.provider; + if ( is_psp ) + { + sp_ptr->header.magic = DAPL_MAGIC_PSP; + sp_ptr->header.handle_type = DAT_HANDLE_TYPE_PSP; + } + else + { + sp_ptr->header.magic = DAPL_MAGIC_RSP; + sp_ptr->header.handle_type = DAT_HANDLE_TYPE_RSP; + } + sp_ptr->header.owner_ia = ia_ptr; + sp_ptr->header.user_context.as_64 = 0; + sp_ptr->header.user_context.as_ptr = NULL; + dapl_llist_init_entry (&sp_ptr->header.ia_list_entry); + dapl_os_lock_init (&sp_ptr->header.lock); + dapl_os_wait_object_init( &sp_ptr->wait_object ); + + /* + * Initialize the Body (set to NULL above) + */ + dapl_llist_init_head (&sp_ptr->cr_list_head); + + return ( sp_ptr ); +} + + +/* + * dapl_sp_free + * + * Free the passed in PSP structure. + * + * Input: + * entry point pointer + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapls_sp_free_sp ( + IN DAPL_SP *sp_ptr ) +{ + dapl_os_assert (sp_ptr->header.magic == DAPL_MAGIC_PSP || + sp_ptr->header.magic == DAPL_MAGIC_RSP); + dapl_os_assert (dapl_llist_is_empty(&sp_ptr->cr_list_head)); + + dapl_os_lock (&sp_ptr->header.lock); + sp_ptr->header.magic = DAPL_MAGIC_INVALID; /* reset magic to prevent reuse */ + dapl_os_unlock (&sp_ptr->header.lock); + dapl_os_wait_object_destroy (&sp_ptr->wait_object); + dapl_os_free (sp_ptr, sizeof (DAPL_SP)); +} + + +/* + * dapl_cr_link_cr + * + * Add a cr to a PSP structure + * + * Input: + * sp_ptr + * cr_ptr + * + * Output: + * none + * + * Returns: + * none + * + */ +void +dapl_sp_link_cr ( + IN DAPL_SP *sp_ptr, + IN DAPL_CR *cr_ptr ) +{ + dapl_os_lock (&sp_ptr->header.lock); + dapl_llist_add_tail (&sp_ptr->cr_list_head, + &cr_ptr->header.ia_list_entry, + cr_ptr); + sp_ptr->cr_list_count++; + dapl_os_unlock (&sp_ptr->header.lock); +} + + +/* + * dapl_sp_search_cr + * + * Search for a CR on the PSP cr_list with a matching cm_handle. When + * found, remove it from the list and update fields. + * + * Must be called with the sp_ptr lock taken. + * + * Input: + * sp_ptr + * ib_cm_handle + * + * Output: + * none + * + * Returns: + * cr_ptr_fnd Pointer to matching DAPL_CR + * + */ +DAPL_CR * +dapl_sp_search_cr ( + IN DAPL_SP *sp_ptr, + IN ib_cm_handle_t ib_cm_handle ) +{ + DAPL_CR *cr_ptr; + DAPL_CR *cr_ptr_fnd; + + dapl_os_lock (&sp_ptr->header.lock); + if ( dapl_llist_is_empty (&sp_ptr->cr_list_head) ) + { + dapl_os_unlock (&sp_ptr->header.lock); + return NULL; + } + cr_ptr_fnd = NULL; + cr_ptr = (DAPL_CR *) dapl_llist_peek_head (&sp_ptr->cr_list_head); + + do + { + if ( cr_ptr->ib_cm_handle.cid == ib_cm_handle.cid ) + { + cr_ptr_fnd = cr_ptr; + + break; + } + cr_ptr = cr_ptr->header.ia_list_entry.flink->data; + } while ((void *)cr_ptr != (void *)sp_ptr->cr_list_head->data); + + dapl_os_unlock (&sp_ptr->header.lock); + + return cr_ptr_fnd; +} + + + +/* + * dapl_sp_remove_cr + * + * Remove the CR from the PSP. Done prior to freeing the CR resource. + * + * Must be called with the sp_ptr lock taken. + * + * Input: + * sp_ptr + * cr_ptr + * + * Output: + * none + * + * Returns: + * void + * + */ +void +dapl_sp_remove_cr ( + IN DAPL_SP *sp_ptr, + IN DAPL_CR *cr_ptr ) +{ + dapl_os_lock (&sp_ptr->header.lock); + + if ( dapl_llist_is_empty(&sp_ptr->cr_list_head) ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "***dapl_sp_remove_cr: removing from empty queue! sp %p\n", + sp_ptr ); + dapl_os_unlock (&sp_ptr->header.lock); + return; + } + + dapl_llist_remove_entry (&sp_ptr->cr_list_head, + &cr_ptr->header.ia_list_entry); + sp_ptr->cr_list_count--; + + dapl_os_unlock (&sp_ptr->header.lock); +} + + + +/* + * dapl_sp_remove_ep + * + * Remove a CR from a PSP, given an EP. + * + * + * Input: + * ep_ptr + * + * Output: + * none + * + * Returns: + * void + * + */ +void +dapl_sp_remove_ep ( + IN DAPL_EP *ep_ptr ) +{ + DAPL_SP *sp_ptr; + DAPL_CR *cr_ptr; + + cr_ptr = ep_ptr->cr_ptr; + + if (cr_ptr != NULL) + { + sp_ptr = cr_ptr->sp_ptr; + + dapl_os_lock (&sp_ptr->header.lock); + + /* Remove the CR from the queue */ + dapl_sp_remove_cr (sp_ptr, cr_ptr); + + dapl_os_unlock (&sp_ptr->header.lock); + + /* free memory outside of the lock */ + dapls_cr_free (cr_ptr); + + return; + } +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/common/dapl_sp_util.h b/branches/Ndi/ulp/dapl/dapl/common/dapl_sp_util.h new file mode 100644 index 00000000..1442ace0 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/common/dapl_sp_util.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_sp_util.h + * + * PURPOSE: Utility defs & routines for the PSP & RSP data structure + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_PSP_UTIL_H_ +#define _DAPL_PSP_UTIL_H_ + +DAPL_SP *dapls_sp_alloc ( + IN DAPL_IA *ia_ptr, + IN DAT_BOOLEAN is_psp ); + +void dapls_sp_free_sp ( + IN DAPL_SP *sp_ptr ); + +void dapl_sp_link_cr ( + IN DAPL_SP *sp_ptr, + IN DAPL_CR *cr_ptr ); + +DAPL_CR *dapl_sp_search_cr ( + IN DAPL_SP *sp_ptr, + IN ib_cm_handle_t ib_cm_handle ); + +void dapl_sp_remove_cr ( + IN DAPL_SP *sp_ptr, + IN DAPL_CR *cr_ptr ); + +void dapl_sp_remove_ep ( + IN DAPL_EP *ep_ptr ); + +#endif /* _DAPL_PSP_UTIL_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/dirs b/branches/Ndi/ulp/dapl/dapl/dirs new file mode 100644 index 00000000..2d7badc4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/dirs @@ -0,0 +1 @@ +DIRS=udapl diff --git a/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_cm.c b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_cm.c new file mode 100644 index 00000000..eacf3043 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_cm.c @@ -0,0 +1,1970 @@ + +/* + * Copyright (c) 2002, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under the terms of the "Common Public + * License" a copy of which is in the file LICENSE.txt in the root + * directory. The license is also available from the Open Source + * Initiative, see http://www.opensource.org/licenses/cpl.php. + * + */ + +/********************************************************************** + * + * MODULE: dapl_ibal_cm.c + * + * PURPOSE: IB Connection routines for access to IBAL APIs + * + * $Id$ + * + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" +#include "dapl_evd_util.h" +#include "dapl_cr_util.h" +#include "dapl_sp_util.h" +#include "dapl_ia_util.h" +#include "dapl_ibal_util.h" +#include "dapl_name_service.h" + +#define IB_INFINITE_SERVICE_LEASE 0xFFFFFFFF +#define DAPL_ATS_SERVICE_ID ATS_SERVICE_ID //0x10000CE100415453 +#define DAPL_ATS_NAME ATS_NAME +#define HCA_IPV6_ADDRESS_LENGTH 16 + +int g_dapl_loopback_connection = 0; +extern dapl_ibal_root_t dapl_ibal_root; + +/* + * Prototypes + */ +static void +dapli_ib_sa_query_cb ( + IN ib_query_rec_t *p_query_rec ); + + + +#ifndef NO_NAME_SERVICE + +static void +dapli_ib_reg_svc_cb ( + IN ib_reg_svc_rec_t *p_reg_svc_rec ) +{ + DAPL_HCA *hca_ptr; + + hca_ptr = (DAPL_HCA * __ptr64) p_reg_svc_rec->svc_context; + + dapl_os_assert (hca_ptr); + + if (IB_SUCCESS == p_reg_svc_rec->req_status) + { + hca_ptr->name_service_handle = (void *) p_reg_svc_rec->h_reg_svc; + + dapl_dbg_log ( DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "--> DiRScb: register to SA successfully for port %d\n", + hca_ptr->port_num); + + dapl_dbg_log ( DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "--> DiRScb: reg_service_handle %p\n", + hca_ptr->name_service_handle); + + } + else + { + hca_ptr->name_service_handle = IB_INVALID_HANDLE; + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DiRScb: Register service to SA failed = %s\n", + ib_get_err_str(p_reg_svc_rec->req_status)); + } + +} + + + +DAT_RETURN +dapls_ib_ns_map_gid ( + IN DAPL_HCA *hca_ptr, + IN DAT_IA_ADDRESS_PTR p_ia_address, + OUT GID *p_gid) +{ + ib_user_query_t user_query; + dapl_ibal_ca_t *p_ca; + dapl_ibal_port_t *p_active_port; + ib_service_record_t service_rec; + ib_api_status_t ib_status; + ib_query_req_t query_req; + DAT_SOCK_ADDR6 ipv6_addr; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (NULL == p_ca) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsNMG: There is no HCA = %d\n", __LINE__); + return (DAT_INVALID_HANDLE); + } + + /* + * We are using the first active port in the list for + * communication. We have to get back here when we decide to support + * fail-over and high-availability. + */ + p_active_port = dapli_ibal_get_port ( p_ca, (uint8_t)hca_ptr->port_num ); + + if (NULL == p_active_port) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsNMG: Port %d is not available = %d\n", + hca_ptr->port_num, __LINE__); + return (DAT_INVALID_STATE); + } + + if (p_active_port->p_attr->lid == 0) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsNMG: Port %d has no LID assigned; can not operate\n", + p_active_port->p_attr->port_num); + return (DAT_INVALID_STATE); + } + else + { + if (!dapl_os_memcmp (p_ia_address, + &hca_ptr->hca_address, + HCA_IPV6_ADDRESS_LENGTH)) + { + /* + * We are operating in the LOOPBACK mode + */ + p_gid->guid = + p_active_port->p_attr->p_gid_table[0].unicast.interface_id; + p_gid->gid_prefix = + p_active_port->p_attr->p_gid_table[0].unicast.prefix; + return DAT_SUCCESS; + } + else if (p_active_port->p_attr->link_state != IB_LINK_ACTIVE) + { + /* + * Port is DOWN; can not send or recv messages + */ + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsNMG: Port %d is DOWN; can not send to fabric\n", + p_active_port->p_attr->port_num); + return (DAT_INVALID_STATE); + } + } + + dapl_os_memzero (&user_query, sizeof (ib_user_query_t)); + dapl_os_memzero (&service_rec, sizeof (ib_service_record_t)); + dapl_os_memzero (&query_req, sizeof (ib_query_req_t)); + dapl_os_memzero (&ipv6_addr, sizeof (DAT_SOCK_ADDR6)); + + if (p_ia_address->sa_family == AF_INET) + { + dapl_os_memcpy (&ipv6_addr.sin6_addr.s6_addr[12], + &((struct sockaddr_in *)p_ia_address)->sin_addr.s_addr, + 4); +#ifdef DAPL_DBG + { + int rval; + + rval = ((struct sockaddr_in *) p_ia_address)->sin_addr.s_addr; + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DsNMG: Remote ia_address = %d.%d.%d.%d \n", + (rval >> 0) & 0xff, + (rval >> 8) & 0xff, + (rval >> 16) & 0xff, + (rval >> 24) & 0xff); + } +#endif + + } + else + { + /* + * Assume IPv6 address + */ + dapl_os_assert (p_ia_address->sa_family == AF_INET6); + dapl_os_memcpy (ipv6_addr.sin6_addr.s6_addr, + ((DAT_SOCK_ADDR6 *)p_ia_address)->sin6_addr.s6_addr, + HCA_IPV6_ADDRESS_LENGTH); +#ifdef DAPL_DBG + { + int i; + uint8_t *tmp = ipv6_addr.sin6_addr.s6_addr; + + dapl_dbg_log ( DAPL_DBG_TYPE_CM, + "--> DsNMG: Remote ia_address - "); + + for ( i = 1; i < HCA_IPV6_ADDRESS_LENGTH; i++) + { + dapl_dbg_log ( DAPL_DBG_TYPE_CM, "%x:", + tmp[i-1] ); + } + dapl_dbg_log ( DAPL_DBG_TYPE_CM, "%x\n", + tmp[i-1] ); + } +#endif + + } + + /* + * query SA for GID + */ + //service_rec.service_id = CL_HTON64 (DAPL_ATS_SERVICE_ID); + dapl_os_memcpy ( service_rec.service_name, ATS_NAME, __min(sizeof(ATS_NAME),sizeof(ib_svc_name_t))); + dapl_os_memcpy (&service_rec.service_data8[0], + ipv6_addr.sin6_addr.s6_addr, + HCA_IPV6_ADDRESS_LENGTH); + service_rec.service_lease = IB_INFINITE_SERVICE_LEASE; + service_rec.service_pkey = IB_DEFAULT_PKEY; + + user_query.method = IB_MAD_METHOD_GETTABLE; + user_query.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + user_query.comp_mask = IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SNAME | + IB_SR_COMPMASK_SDATA8_0 | + IB_SR_COMPMASK_SDATA8_1 | + IB_SR_COMPMASK_SDATA8_2 | + IB_SR_COMPMASK_SDATA8_3 | + IB_SR_COMPMASK_SDATA8_4 | + IB_SR_COMPMASK_SDATA8_5 | + IB_SR_COMPMASK_SDATA8_6 | + IB_SR_COMPMASK_SDATA8_7 | + IB_SR_COMPMASK_SDATA8_8 | + IB_SR_COMPMASK_SDATA8_9 | + IB_SR_COMPMASK_SDATA8_10 | + IB_SR_COMPMASK_SDATA8_11 | + IB_SR_COMPMASK_SDATA8_12 | + IB_SR_COMPMASK_SDATA8_13 | + IB_SR_COMPMASK_SDATA8_14 | + IB_SR_COMPMASK_SDATA8_15; + + user_query.attr_size = sizeof (ib_service_record_t); + user_query.p_attr = (void *)&service_rec; + + query_req.query_type = IB_QUERY_USER_DEFINED; + query_req.p_query_input = (void *)&user_query; + query_req.flags = IB_FLAGS_SYNC; /* this is a blocking call */ + query_req.timeout_ms = 1 * 1000; /* 1 second */ + query_req.retry_cnt = 5; + /* query SA using this port */ + query_req.port_guid = p_active_port->p_attr->port_guid; + query_req.query_context = (void *) &user_query; + query_req.pfn_query_cb = dapli_ib_sa_query_cb; + + ib_status = ib_query (dapl_ibal_root.h_al, &query_req, NULL); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"ns_map_gid: status %s @line = %d\n", + ib_get_err_str(ib_status), __LINE__); + return (dapl_ib_status_convert (ib_status)); + } + else if (service_rec.service_gid.unicast.interface_id == 0) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> %s: query SA found no record\n","DsNMG"); + return DAT_INVALID_PARAMETER; + } + + /* + * return the GID + */ + p_gid->guid = service_rec.service_gid.unicast.interface_id; + p_gid->gid_prefix = service_rec.service_gid.unicast.prefix; + + return DAT_SUCCESS; +} + + + +DAT_RETURN +dapls_ib_ns_map_ipaddr ( + IN DAPL_HCA *hca_ptr, + IN GID gid, + OUT DAT_IA_ADDRESS_PTR p_ia_address) +{ + ib_user_query_t user_query; + dapl_ibal_ca_t *p_ca; + dapl_ibal_port_t *p_active_port; + ib_service_record_t service_rec; + ib_api_status_t ib_status; + ib_query_req_t query_req; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (NULL == p_ca) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsNMI: There is no HCA = %d\n", __LINE__); + return (DAT_INVALID_HANDLE); + } + + /* + * We are using the first active port in the list for + * communication. We have to get back here when we decide to support + * fail-over and high-availability. + */ + p_active_port = dapli_ibal_get_port ( p_ca, (uint8_t)hca_ptr->port_num ); + + if (NULL == p_active_port) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsNMI: Port %d is not available = %d\n", + hca_ptr->port_num, __LINE__); + return (DAT_INVALID_STATE); + } + + if (p_active_port->p_attr->lid == 0) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsNMI: Port %d has no LID assigned; can not operate\n", + p_active_port->p_attr->port_num); + return (DAT_INVALID_STATE); + } + /*else + { + // + // We are operating in the LOOPBACK mode + // + if ((gid.gid_prefix == + p_active_port->p_attr->p_gid_table[0].unicast.prefix) && + (gid.guid == + p_active_port->p_attr->p_gid_table[0].unicast.interface_id)) + { + dapl_os_memcpy (((DAT_SOCK_ADDR6 *)p_ia_address)->sin6_addr.s6_addr, + hca_ptr->hca_address.sin6_addr.s6_addr, + HCA_IPV6_ADDRESS_LENGTH); + return DAT_SUCCESS; + } + + }*/ + if (p_active_port->p_attr->link_state != IB_LINK_ACTIVE) + { + /* + * Port is DOWN; can not send or recv messages + */ + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsNMI: Port %d is DOWN; can not send/recv to/from fabric\n", + p_active_port->p_attr->port_num); + return (DAT_INVALID_STATE); + } + + dapl_os_memzero (&user_query, sizeof (ib_user_query_t)); + dapl_os_memzero (&service_rec, sizeof (ib_service_record_t)); + dapl_os_memzero (&query_req, sizeof (ib_query_req_t)); + + /* + * query SA for IPAddress + */ + //service_rec.service_id = CL_HTON64 (DAPL_ATS_SERVICE_ID); + dapl_os_memcpy( service_rec.service_name, ATS_NAME, __min (sizeof(ATS_NAME), sizeof(ib_svc_name_t))); + service_rec.service_gid.unicast.interface_id = gid.guid; + service_rec.service_gid.unicast.prefix = gid.gid_prefix; + service_rec.service_pkey = IB_DEFAULT_PKEY; + service_rec.service_lease = IB_INFINITE_SERVICE_LEASE; + + user_query.method = IB_MAD_METHOD_GETTABLE; + user_query.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + + user_query.comp_mask = IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SNAME; + + user_query.attr_size = sizeof (ib_service_record_t); + user_query.p_attr = (void *)&service_rec; + + query_req.query_type = IB_QUERY_USER_DEFINED; + query_req.p_query_input = (void *)&user_query; + query_req.flags = IB_FLAGS_SYNC; /* this is a blocking call */ + query_req.timeout_ms = 1 * 1000; /* 1 second */ + query_req.retry_cnt = 5; + /* query SA using this port */ + query_req.port_guid = p_active_port->p_attr->port_guid; + query_req.query_context = (void *) &user_query; + query_req.pfn_query_cb = dapli_ib_sa_query_cb; + + ib_status = ib_query (dapl_ibal_root.h_al, &query_req, NULL); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"ns_map_ipaddr: exits status %s @line = %d\n", + ib_get_err_str(ib_status), __LINE__); + return (dapl_ib_status_convert (ib_status)); + } + + /* *********************** + * return the IP_address + *************************/ + dapl_os_memcpy ((void *)&((struct sockaddr_in *)p_ia_address)->sin_addr.s_net, + (const void *)&service_rec.service_data8[ATS_IPV4_OFFSET], 4); + //HCA_IPV6_ADDRESS_LENGTH); + ((DAT_SOCK_ADDR6 *)p_ia_address)->sin6_family = AF_INET; + + return (DAT_SUCCESS); +} + + +/* + * dapls_ib_ns_create_gid_map() + * + * Register a ServiceRecord containing uDAPL_svc_id, IP address and GID to SA + * Other nodes can look it up by quering the SA + * + * Input: + * hca_ptr HCA device pointer + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapls_ib_ns_create_gid_map ( + IN DAPL_HCA *hca_ptr) +{ + UNUSED_PARAM( hca_ptr ); + return (DAT_SUCCESS); +} + + +DAT_RETURN +dapls_ib_ns_remove_gid_map ( + IN DAPL_HCA *hca_ptr) +{ + UNUSED_PARAM( hca_ptr ); + return (DAT_SUCCESS); +} + +#endif /* NO_NAME_SERVICE */ + + +static void +dapli_ib_sa_query_cb ( + IN ib_query_rec_t *p_query_rec ) +{ + ib_api_status_t ib_status; + + if (IB_SUCCESS != p_query_rec->status) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"sa_query_cb: SA query callback failed status %s\n", + ib_get_err_str(p_query_rec->status)); + return; + } + + if (!p_query_rec->p_result_mad) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"sa_query_cb: SA query callback [no mad] @line %d\n",__LINE__); + return; + } + + switch (p_query_rec->query_type) + { + case IB_QUERY_PATH_REC_BY_GIDS: + { + ib_path_rec_t *p_path_rec; + + p_path_rec = ib_get_query_path_rec (p_query_rec->p_result_mad, 0); + if (p_path_rec) + { + dapl_os_memcpy ((void * __ptr64) p_query_rec->query_context, + (void *) p_path_rec, + sizeof (ib_path_rec_t)); + dapl_dbg_log ( + DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "sa_query_cb: path {slid: 0x%x, dlid: 0x%x}\n", + p_path_rec->slid, p_path_rec->dlid); + } + else + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"sa_query_cb: return NULL? @line %d\n",__LINE__); + return; + } + break; + } + + case IB_QUERY_SVC_REC_BY_ID: + { + ib_service_record_t *p_svc_rec; + + p_svc_rec = ib_get_query_svc_rec (p_query_rec->p_result_mad, 0); + if (p_svc_rec) + { + dapl_os_memcpy ((void * __ptr64) p_query_rec->query_context, + (void *) p_svc_rec, + sizeof (ib_service_record_t)); + dapl_dbg_log ( + DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "sa_query_cb: SER{0x%I64x, 0x%I64x}\n", + cl_hton64 (p_svc_rec->service_gid.unicast.prefix), + cl_hton64 (p_svc_rec->service_gid.unicast.interface_id)); + } + else + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"sa_query_cb: return NULL? @line %d\n",__LINE__); + return; + } + break; + + } + + case IB_QUERY_USER_DEFINED: + { + ib_user_query_t *p_user_query; + + p_user_query = (ib_user_query_t * __ptr64) p_query_rec->query_context; + if (p_user_query) + { + switch (p_user_query->attr_id) + { + case IB_MAD_ATTR_SERVICE_RECORD: + { + ib_service_record_t *p_svc_rec; + + p_svc_rec = ib_get_query_svc_rec (p_query_rec->p_result_mad, 0); + if (p_svc_rec) + { + dapl_os_memcpy ((void *) p_user_query->p_attr, + (void *) p_svc_rec, + sizeof (ib_service_record_t)); + dapl_dbg_log ( + DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "sa_query_cb: GID{0x" F64x ", 0x" F64x "} record count %d\n", + cl_hton64( p_svc_rec->service_gid.unicast.prefix ), + cl_hton64( p_svc_rec->service_gid.unicast.interface_id ), + p_query_rec->result_cnt); + } + else + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"sa_query_cb: return NULL? @line %d\n", + __LINE__); + return; + } + break; + + } + default: + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"sa_query_cb: USER_DEFINED %d\n", + p_user_query->attr_id); + break; + } + } + } + else + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"sa_query_cb: return NULL? @line %d\n",__LINE__); + return; + } + break; + } + + default: + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"sa_query_cb: unsupportedTYPE %d\n", + p_query_rec->query_type); + break; + } + + } + + if ((ib_status = ib_put_mad (p_query_rec->p_result_mad)) != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"sa_query_cb: can not free MAD %s\n", + ib_get_err_str(ib_status)); + } +} + + +static void +dapli_ibal_listen_err_cb ( + IN ib_listen_err_rec_t *p_listen_err_rec ) +{ + UNUSED_PARAM( p_listen_err_rec ); + dapl_dbg_log (DAPL_DBG_TYPE_CM, "--> %s: CM callback listen error\n", + "DiLEcb"); +} + +static void +dapli_ib_cm_apr_cb ( + IN ib_cm_apr_rec_t *p_cm_apr_rec ) +{ + UNUSED_PARAM( p_cm_apr_rec ); + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DiCAcb: CM callback APR (Alternate Path Request)\n"); +} + +static void +dapli_ib_cm_lap_cb ( + IN ib_cm_lap_rec_t *p_cm_lap_rec ) +{ + UNUSED_PARAM( p_cm_lap_rec ); + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DiCLcb: CM callback LAP (Load Alternate Path)\n"); +} + +static void +dapli_ib_cm_dreq_cb ( + IN ib_cm_dreq_rec_t *p_cm_dreq_rec ) +{ + ib_cm_drep_t cm_drep; + DAPL_SP *sp_ptr; + DAPL_EP *ep_ptr; + + dapl_os_assert (p_cm_dreq_rec); + + ep_ptr = (DAPL_EP * __ptr64) p_cm_dreq_rec->qp_context; + + if ( ep_ptr == NULL || + ep_ptr->header.magic == DAPL_MAGIC_INVALID ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "--> DiCDcb: EP = %p invalid or FREED\n", ep_ptr); + return; + } + + dapl_os_lock (&ep_ptr->header.lock); + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DiCDcb: EP = %p QP = %p in state = %d\n", + ep_ptr, ep_ptr->qp_handle, ep_ptr->param.ep_state); + + if (ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED) + { + dapl_os_unlock (&ep_ptr->header.lock); + return; + } + + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECT_PENDING; + ep_ptr->recv_discreq = DAT_TRUE; + dapl_os_unlock (&ep_ptr->header.lock); + + dapl_os_memzero (&cm_drep, sizeof ( ib_cm_drep_t)); + + /* Could fail if we received reply from other side, no need to retry */ + /* Wait for any transaction in process holding reference */ + while ( ep_ptr->req_count ) + { + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DiCDcb: WAIT for EP=%p req_count(%d) == 0 \n", + ep_ptr, ep_ptr->req_count); + dapl_os_sleep_usec (1000); /* 1 ms */ + } + + ib_cm_drep (p_cm_dreq_rec->h_cm_dreq, &cm_drep); + + /* CM puts QP in reset state */ + ep_ptr->qp_state = IB_QPS_RESET; + + if (ep_ptr->cr_ptr) + { + sp_ptr = ((DAPL_CR *) ep_ptr->cr_ptr)->sp_ptr; + + dapls_cr_callback ( p_cm_dreq_rec->h_cm_dreq, + IB_CME_DISCONNECTED, + (void * __ptr64) p_cm_dreq_rec->p_dreq_pdata, + (void *) sp_ptr, + NULL); + } + else + { + sp_ptr = NULL; + + dapl_evd_connection_callback ( p_cm_dreq_rec->h_cm_dreq, + IB_CME_DISCONNECTED, + (void * __ptr64) p_cm_dreq_rec->p_dreq_pdata, + p_cm_dreq_rec->qp_context); + } +} + +static void +dapli_ib_cm_drep_cb ( + IN ib_cm_drep_rec_t *p_cm_drep_rec ) +{ + DAPL_SP *sp_ptr; + DAPL_EP *ep_ptr; + + dapl_os_assert (p_cm_drep_rec != NULL); + + ep_ptr = (DAPL_EP * __ptr64) p_cm_drep_rec->qp_context; + + if (ep_ptr) + { + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DiCDpcb: EP = %p QP = %p in state = %d\n", + ep_ptr, ep_ptr->qp_handle, ep_ptr->param.ep_state); + + if ( ep_ptr->param.ep_state == DAT_EP_STATE_DISCONNECTED ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "--> DiCDpcb: EP = %p QP = %p already disconnected\n", + ep_ptr, ep_ptr->qp_handle); + return; + } + + if (ep_ptr->cr_ptr) + { + sp_ptr = ((DAPL_CR *) ep_ptr->cr_ptr)->sp_ptr; + + dapls_cr_callback ( ep_ptr->cm_handle, + IB_CME_DISCONNECTED, + (void * __ptr64) p_cm_drep_rec->p_drep_pdata, + (void *) sp_ptr, + NULL); + } + else + { + sp_ptr = NULL; + + dapl_evd_connection_callback ( ep_ptr->cm_handle, + IB_CME_DISCONNECTED, + (void * __ptr64) p_cm_drep_rec->p_drep_pdata, + p_cm_drep_rec->qp_context); + } + } +} + + +static void +dapli_ib_cm_rep_cb ( + IN ib_cm_rep_rec_t *p_cm_rep_rec ) +{ + ib_api_status_t ib_status; + ib_cm_rtu_t cm_rtu; + uint8_t cm_cb_op; + DAPL_PRIVATE *prd_ptr; + DAPL_EP *ep_ptr; + dapl_ibal_ca_t *p_ca; + + dapl_os_assert (p_cm_rep_rec != NULL); + + dapl_os_memzero (&cm_rtu, sizeof ( ib_cm_rtu_t )); + + dapl_os_assert ( ((DAPL_HEADER * __ptr64) p_cm_rep_rec->qp_context)->magic == + DAPL_MAGIC_EP ); + + ep_ptr = (DAPL_EP * __ptr64) p_cm_rep_rec->qp_context; + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DiCRpcb: EP = %p local_max_rdma_read_in %d\n", + ep_ptr, p_cm_rep_rec->resp_res); + + p_ca = (dapl_ibal_ca_t *) + ep_ptr->header.owner_ia->hca_ptr->ib_hca_handle; + + cm_rtu.pfn_cm_apr_cb = dapli_ib_cm_apr_cb; + cm_rtu.pfn_cm_dreq_cb = dapli_ib_cm_dreq_cb; + cm_rtu.p_rtu_pdata = NULL; + cm_rtu.access_ctrl = IB_AC_LOCAL_WRITE|IB_AC_RDMA_WRITE|IB_AC_MW_BIND; + if ((ep_ptr->param.ep_attr.max_rdma_read_in > 0) || + (ep_ptr->param.ep_attr.max_rdma_read_out > 0)) + { + cm_rtu.access_ctrl |= IB_AC_RDMA_READ; + } + + cm_rtu.rq_depth = 0; + cm_rtu.sq_depth = 0; + + ib_status = ib_cm_rtu (p_cm_rep_rec->h_cm_rep, &cm_rtu); + + if (ib_status == IB_SUCCESS) + { + cm_cb_op = IB_CME_CONNECTED; + } + else + { + cm_cb_op = IB_CME_LOCAL_FAILURE; + } + + prd_ptr = (DAPL_PRIVATE * __ptr64) p_cm_rep_rec->p_rep_pdata; + +#ifdef DAPL_DBG +#if 0 + { + int i; + + dapl_dbg_log ( DAPL_DBG_TYPE_EP, "--> DiCRpcb: private_data: "); + + for ( i = 0 ; i < IB_MAX_REP_PDATA_SIZE ; i++ ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_EP, + "0x%x ", prd_ptr->private_data[i]); + + } + dapl_dbg_log ( DAPL_DBG_TYPE_EP, "\n"); + + } +#endif +#endif + + dapl_evd_connection_callback ( + p_cm_rep_rec->h_cm_rep, + cm_cb_op, + (void *) prd_ptr, + (void * __ptr64) p_cm_rep_rec->qp_context); +} + + +static void +dapli_ib_cm_rej_cb ( + IN ib_cm_rej_rec_t *p_cm_rej_rec ) +{ + DAPL_EP *ep_ptr; + ib_cm_events_t cm_event; + + dapl_os_assert (p_cm_rej_rec); + + ep_ptr = (DAPL_EP * __ptr64) p_cm_rej_rec->qp_context; + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DiCRjcb: EP = %p QP = %p rej reason = 0x%x\n", + ep_ptr, ep_ptr->qp_handle, CL_NTOH16(p_cm_rej_rec->rej_status)); + + switch (p_cm_rej_rec->rej_status) + { + case IB_REJ_INSUF_RESOURCES: + case IB_REJ_INSUF_QP: + case IB_REJ_INVALID_COMM_ID: + case IB_REJ_INVALID_COMM_INSTANCE: + case IB_REJ_INVALID_PKT_RATE: + case IB_REJ_INVALID_ALT_GID: + case IB_REJ_INVALID_ALT_LID: + case IB_REJ_INVALID_ALT_SL: + case IB_REJ_INVALID_ALT_TRAFFIC_CLASS: + case IB_REJ_INVALID_ALT_PKT_RATE: + case IB_REJ_INVALID_ALT_HOP_LIMIT: + case IB_REJ_INVALID_ALT_FLOW_LBL: + case IB_REJ_INVALID_GID: + case IB_REJ_INVALID_LID: + case IB_REJ_INVALID_SID: + case IB_REJ_INVALID_SL: + case IB_REJ_INVALID_TRAFFIC_CLASS: + case IB_REJ_PORT_REDIRECT: + case IB_REJ_INVALID_MTU: + case IB_REJ_INSUFFICIENT_RESP_RES: + case IB_REJ_INVALID_CLASS_VER: + case IB_REJ_INVALID_FLOW_LBL: + cm_event = IB_CME_DESTINATION_REJECT; + break; + + case IB_REJ_TIMEOUT: + cm_event = IB_CME_DESTINATION_UNREACHABLE; + break; + + case IB_REJ_USER_DEFINED: + cm_event = IB_CME_DESTINATION_REJECT; + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DiCRjcb: user defined rej reason %s\n", + p_cm_rej_rec->p_ari); + break; + + default: + cm_event = IB_CME_LOCAL_FAILURE; + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DiCRjcb: with unknown status %x\n", + p_cm_rej_rec->rej_status); + break; + } + + /* FIXME - Vu + * We do not take care off the user defined rej reason with additional + * rejection information (p_ari) + */ + + if (ep_ptr->cr_ptr) + { + dapls_cr_callback ( ep_ptr->cm_handle, + cm_event, + (void * __ptr64) p_cm_rej_rec->p_rej_pdata, + (void *) ((DAPL_CR *) ep_ptr->cr_ptr)->sp_ptr, + NULL); + } + else + { + dapl_evd_connection_callback ( ep_ptr->cm_handle, + cm_event, + (void * __ptr64) p_cm_rej_rec->p_rej_pdata, + (void * __ptr64) p_cm_rej_rec->qp_context); + } + +} + +static void +dapli_ib_cm_req_cb ( + IN ib_cm_req_rec_t *p_cm_req_rec ) +{ + DAPL_SP *sp_ptr; + DAT_SOCK_ADDR6 dest_ia_addr; + + + dapl_os_assert (p_cm_req_rec); + + sp_ptr = (DAPL_SP * __ptr64) p_cm_req_rec->context; + + dapl_os_assert (sp_ptr); + + + /* + * Save the cm_srvc_handle to avoid the race condition between + * the return of the ib_cm_listen and the notification of a conn req + */ + if (sp_ptr->cm_srvc_handle != p_cm_req_rec->h_cm_listen) + { + dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "--> DiCRqcb: cm_service_handle is changed\n"); + sp_ptr->cm_srvc_handle = p_cm_req_rec->h_cm_listen; + } + + dapl_os_memzero (&dest_ia_addr, sizeof (dest_ia_addr)); + +#ifdef NO_NAME_SERVICE + + { + DAPL_PRIVATE *prd_ptr; + + prd_ptr = (DAPL_PRIVATE *)p_cm_req_rec->p_req_pdata; + + dapl_os_memcpy ((void *)&dest_ia_addr, + (void *)&prd_ptr->hca_address, + sizeof (DAT_SOCK_ADDR6)); + } + +#else + + { + GID dest_gid; + + dapl_os_memzero (&dest_gid, sizeof (dest_gid)); + + dest_gid.guid = p_cm_req_rec->primary_path.dgid.unicast.interface_id; + dest_gid.gid_prefix = p_cm_req_rec->primary_path.dgid.unicast.prefix; + + if (DAT_SUCCESS != dapls_ns_map_ipaddr ( + sp_ptr->header.owner_ia->hca_ptr, + dest_gid, + (DAT_IA_ADDRESS_PTR)&dest_ia_addr)) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"cm_req_cb: SP = %p failed mapping GID-IPaddr\n", + sp_ptr); + } + } + +#endif /* NO_NAME_SERVICE */ + +#ifdef DAPL_DBG + { + int rval; + + rval = ((struct sockaddr_in *) &(dest_ia_addr))->sin_addr.s_addr; + + dapl_dbg_log (DAPL_DBG_TYPE_CM|DAPL_DBG_TYPE_CALLBACK, + "cm_req_cb: query SA for RemoteAddr: %d.%d.%d.%d\n", + (rval >> 0) & 0xff, + (rval >> 8) & 0xff, + (rval >> 16) & 0xff, + (rval >> 24) & 0xff); + } +#endif + + /* FIXME - Vu + * We have NOT used/saved the primary and alternative path record + * ie. p_cm_req_rec->p_primary_path and p_cm_req_rec->p_alt_path + * We should cache some fields in path record in the Name Service DB + * such as: dgid, dlid + * Also we do not save resp_res (ie. max_oustanding_rdma_read/atomic) + * rnr_retry_cnt and flow_ctrl fields + */ + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "cm_req_cb: SP = %p rem_max_rdma_read_in %d\n", + sp_ptr, p_cm_req_rec->resp_res); + + dapls_cr_callback ( p_cm_req_rec->h_cm_req, + IB_CME_CONNECTION_REQUEST_PENDING, + (void * __ptr64) p_cm_req_rec->p_req_pdata, + (void * __ptr64) p_cm_req_rec->context, + (DAT_IA_ADDRESS_PTR)&dest_ia_addr); +} + + +static void +dapli_ib_cm_mra_cb ( + IN ib_cm_mra_rec_t *p_cm_mra_rec ) +{ + UNUSED_PARAM( p_cm_mra_rec ); + dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "--> DiCMcb: CM callback MRA\n"); +} + +static void +dapli_ib_cm_rtu_cb ( + IN ib_cm_rtu_rec_t *p_cm_rtu_rec ) +{ + DAPL_EP *ep_ptr; + + dapl_os_assert (p_cm_rtu_rec != NULL); + + ep_ptr = (DAPL_EP * __ptr64) p_cm_rtu_rec->qp_context; + + dapl_dbg_log (DAPL_DBG_TYPE_CM | DAPL_DBG_TYPE_CALLBACK, + "--> DiCRucb: EP = %p QP = %p\n", ep_ptr, ep_ptr->qp_handle); + + if (ep_ptr->cr_ptr) + { + DAPL_SP *sp_ptr; + + sp_ptr = ((DAPL_CR *) ep_ptr->cr_ptr)->sp_ptr; + + dapls_cr_callback ( ep_ptr->cm_handle, + IB_CME_CONNECTED, + (void * __ptr64) p_cm_rtu_rec->p_rtu_pdata, + (void *) sp_ptr, + NULL); + + } + else + { + dapl_evd_connection_callback ( + ep_ptr->cm_handle, + IB_CME_CONNECTED, + (void * __ptr64) p_cm_rtu_rec->p_rtu_pdata, + (void *) ep_ptr); + } +} + +static void +dapli_ib_cm_cancel_cb( void *context ) +{ + DAPL_SP *sp_ptr; + + sp_ptr = (DAPL_SP *) context; + dapl_os_assert ( sp_ptr ); + dapl_os_assert ( sp_ptr->header.magic == DAPL_MAGIC_PSP || + sp_ptr->header.magic == DAPL_MAGIC_RSP ); + + dapl_os_wait_object_wakeup( &sp_ptr->wait_object ); +} + +DAT_RETURN +dapls_ib_cm_remote_addr ( + IN DAT_HANDLE dat_handle, + IN DAPL_PRIVATE *prd_ptr, + OUT DAT_SOCK_ADDR6 *remote_ia_address ) +{ + UNUSED_PARAM( dat_handle ); + UNUSED_PARAM( prd_ptr ); + UNUSED_PARAM( remote_ia_address ); + return DAT_SUCCESS; +} + + +/* + * dapls_ib_connect + * + * Initiate a connection with the passive listener on another node + * + * Input: + * ep_handle, + * remote_ia_address, + * remote_conn_qual, + * prd_size size of private data and structure + * prd_prt pointer to private data structure + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * + */ +DAT_RETURN +dapls_ib_connect ( + IN DAT_EP_HANDLE ep_handle, + IN DAT_IA_ADDRESS_PTR remote_ia_address, + IN DAT_CONN_QUAL remote_conn_qual, + IN DAT_COUNT prd_size, + IN DAPL_PRIVATE *prd_ptr ) +{ + DAPL_EP *ep_ptr; + DAPL_IA *ia_ptr; + ib_api_status_t ib_status; + dapl_ibal_port_t *p_active_port; + dapl_ibal_ca_t *p_ca; + ib_cm_req_t cm_req; + ib_path_rec_t path_rec; + GID dest_GID; + ib_query_req_t query_req; + ib_gid_pair_t gid_pair; + ib_service_record_t service_rec; + int retry_cnt; + DAT_RETURN dat_status; + + ep_ptr = (DAPL_EP *) ep_handle; + ia_ptr = ep_ptr->header.owner_ia; + ep_ptr->cr_ptr = NULL; + retry_cnt = 0; + dat_status = DAT_SUCCESS; + + p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle; + + /* + * We are using the first active port in the list for + * communication. We have to get back here when we decide to support + * fail-over and high-availability. + */ + p_active_port = dapli_ibal_get_port ( p_ca, (uint8_t)ia_ptr->hca_ptr->port_num ); + + if (NULL == p_active_port) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsC: Port %d is not available = %d\n", + ia_ptr->hca_ptr->port_num, __LINE__); + return (DAT_INVALID_STATE); + } + + dapl_os_memzero (&dest_GID, sizeof (GID)); + dapl_os_memzero (&cm_req, sizeof (ib_cm_req_t)); + dapl_os_memzero (&path_rec, sizeof (ib_path_rec_t)); + dapl_os_memzero (&service_rec, sizeof (ib_service_record_t)); + dapl_os_memzero (&query_req, sizeof (ib_query_req_t)); + dapl_os_memzero (&gid_pair, sizeof (ib_gid_pair_t)); + dapl_os_memzero (&ep_ptr->remote_ia_address, sizeof (DAT_SOCK_ADDR6)); + + dapl_os_memcpy (&ep_ptr->remote_ia_address, + remote_ia_address, + sizeof (ep_ptr->remote_ia_address)); + + +#ifdef NO_NAME_SERVICE + + if (DAT_SUCCESS != + (dat_status = dapls_ns_lookup_address ( + ia_ptr, + remote_ia_address, + &dest_GID ))) + { + /* + * Remote address not in the table, this is a + * strange return code! + */ + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsC: exits status = %x\n", dat_status); + return dat_status; + } + + dest_GID.guid = CL_HTON64 (dest_GID.guid); + dest_GID.gid_prefix = CL_HTON64 (dest_GID.gid_prefix); + +#else + + /* + * We query the SA to get the dest_gid with the + * {uDAPL_svc_id, IP-address} as the key to get GID. + */ + if (DAT_SUCCESS != + (dat_status = dapls_ns_map_gid (ia_ptr->hca_ptr, + remote_ia_address, + &dest_GID))) + + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsC: fail to map remote_ia_addr (sa_family %d) to gid\n", + remote_ia_address->sa_family); + return dat_status; + } + +#endif /* NO_NAME_SERVICE */ + + gid_pair.dest_gid.unicast.interface_id = dest_GID.guid; + gid_pair.dest_gid.unicast.prefix = dest_GID.gid_prefix; + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "dapls_ib_connect: EP = %p QP = %p SERVER GID{0x" F64x ", 0x" F64x "}\n", + ep_ptr, ep_ptr->qp_handle, cl_hton64 (gid_pair.dest_gid.unicast.prefix), + cl_hton64 (gid_pair.dest_gid.unicast.interface_id)); + + gid_pair.src_gid = p_active_port->p_attr->p_gid_table[0]; +/* + if ((gid_pair.src_gid.unicast.interface_id == + gid_pair.dest_gid.unicast.interface_id ) && + (gid_pair.src_gid.unicast.prefix == + gid_pair.dest_gid.unicast.prefix )) + { + path_rec.dgid = gid_pair.dest_gid; + path_rec.sgid = gid_pair.src_gid; + path_rec.slid = path_rec.dlid = p_active_port->p_attr->lid; + path_rec.pkey = p_active_port->p_attr->p_pkey_table[0]; + path_rec.mtu = p_active_port->p_attr->mtu; + path_rec.pkt_life = 18; // 1 sec + path_rec.rate = IB_PATH_RECORD_RATE_10_GBS; + + } + else + { + */ /* + * Query SA to get the path record from pair of GIDs + */ + dapl_os_memzero (&query_req, sizeof (ib_query_req_t)); + query_req.query_type = IB_QUERY_PATH_REC_BY_GIDS; + query_req.p_query_input = (void *) &gid_pair; + query_req.flags = IB_FLAGS_SYNC; + query_req.timeout_ms = 1 * 1000; /* 1 second */ + query_req.retry_cnt = 3; + /* query SA using this port */ + query_req.port_guid = p_active_port->p_attr->port_guid; + query_req.query_context = (void *) &path_rec; + query_req.pfn_query_cb = dapli_ib_sa_query_cb; + + ib_status = ib_query (dapl_ibal_root.h_al, &query_req, NULL); + + if ((ib_status != IB_SUCCESS) || (!path_rec.dlid)) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsC: EP = %p QP = %p query pair_gids status = %s\n", + ep_ptr, ep_ptr->qp_handle, ib_get_err_str(ib_status)); + return DAT_INVALID_PARAMETER; + } + + //} + + /* + * Tavor has a HW bug that causes bandwidth with 2K MTU to be less than + * with 1K MTU. Cap the MTU based on device ID to compensate for this. + */ + if( (p_ca->p_ca_attr->dev_id == 0x5A44) && + (ib_path_rec_mtu( &path_rec ) > IB_MTU_LEN_1024) ) + { + /* Local endpoint is Tavor - cap MTU to 1K for extra bandwidth. */ + path_rec.mtu &= IB_PATH_REC_SELECTOR_MASK; + path_rec.mtu |= IB_MTU_LEN_1024; + } + + /* + * prepare the Service ID from conn_qual + */ + cm_req.svc_id = remote_conn_qual; + cm_req.p_primary_path = &path_rec; + cm_req.p_alt_path = NULL; + cm_req.h_qp = ep_ptr->qp_handle; + cm_req.qp_type = IB_QPT_RELIABLE_CONN; + cm_req.p_req_pdata = (uint8_t *) prd_ptr; + cm_req.req_length = (uint8_t)prd_size; + /* cm retry to send this request messages, IB max of 4 bits */ + cm_req.max_cm_retries = 15; /* timer outside of call, s/be infinite */ + /* qp retry to send any wr */ + cm_req.retry_cnt = 5; + /* max num of oustanding RDMA read/atomic support */ + cm_req.resp_res = (uint8_t)ep_ptr->param.ep_attr.max_rdma_read_in; + /* max num of oustanding RDMA read/atomic will use */ + cm_req.init_depth = (uint8_t)ep_ptr->param.ep_attr.max_rdma_read_out; + + /* time wait before retrying a pkt after receiving a RNR NAK */ + cm_req.rnr_nak_timeout = 12; /* 163.84ms */ + + /* + * number of time local QP should retry after receiving RNR NACK before + * reporting an error + */ + cm_req.rnr_retry_cnt = 6; /* 7 is infinite */ + + cm_req.remote_resp_timeout = 16; /* 250ms */ + cm_req.local_resp_timeout = 16; /* 250ms */ + + cm_req.flow_ctrl = TRUE; + cm_req.flags = 0; + /* + * We do not use specific data buffer to check for specific connection + */ + cm_req.p_compare_buffer = NULL; + cm_req.compare_offset = 0; + cm_req.compare_length = 0; + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DsConn: EP=%p QP=%p rio=%d,%d, pl=%d, mtu=%d slid=%#x dlid=%#x\n", + ep_ptr, ep_ptr->qp_handle, cm_req.resp_res, + cm_req.init_depth, + ib_path_rec_pkt_life(&path_rec), + ib_path_rec_mtu(&path_rec), + cm_req.p_primary_path->slid, + cm_req.p_primary_path->dlid); + + /* + * We do not support peer_to_peer; therefore, we set pfn_cm_req_cb = NULL + */ + cm_req.pfn_cm_req_cb = NULL; + cm_req.pfn_cm_rep_cb = dapli_ib_cm_rep_cb; + cm_req.pfn_cm_rej_cb = dapli_ib_cm_rej_cb; + /* callback when a message received acknowledgement is received */ + cm_req.pfn_cm_mra_cb = dapli_ib_cm_mra_cb; + + ib_status = ib_cm_req (&cm_req); + + if ( ib_status != IB_SUCCESS ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsC: EP = %p QP = %p conn_request failed = %s\n", + ep_ptr, ep_ptr->qp_handle, ib_get_err_str(ib_status)); + return (dapl_ib_status_convert (ib_status)); + } + + return DAT_SUCCESS; +} + + +/* + * dapls_ib_disconnect + * + * Disconnect an EP + * + * Input: + * ep_handle, + * disconnect_flags + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * + */ +DAT_RETURN + dapls_ib_disconnect ( + IN DAPL_EP *ep_ptr, + IN DAT_CLOSE_FLAGS disconnect_flags) +{ + DAPL_IA *ia_ptr; + ib_api_status_t ib_status; + ib_cm_dreq_t cm_dreq; + + UNUSED_PARAM( disconnect_flags ); + + ia_ptr = ep_ptr->header.owner_ia; + ib_status = IB_SUCCESS; + + dapl_os_memzero(&cm_dreq, sizeof(ib_cm_dreq_t)); + + cm_dreq.qp_type = IB_QPT_RELIABLE_CONN; + cm_dreq.h_qp = ep_ptr->qp_handle; + cm_dreq.pfn_cm_drep_cb = dapli_ib_cm_drep_cb; + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DsD: EP=%p QP=%p ep_state=%d, dr %d, ds %d\n", + ep_ptr, ep_ptr->qp_handle, + ep_ptr->param.ep_state, + ep_ptr->recv_discreq, ep_ptr->sent_discreq ); + + /* + * Currently we do not send any disconnect private data to + * the other endpoint because DAT 1.0 & 1.1 does not support + */ + cm_dreq.p_dreq_pdata = NULL; + + if ( (ep_ptr->recv_discreq == DAT_FALSE ) && (ep_ptr->sent_discreq == DAT_FALSE ) ) + //(disconnect_flags == DAT_CLOSE_ABRUPT_FLAG ) ) + { + ep_ptr->sent_discreq = DAT_TRUE; + ib_status = ib_cm_dreq ( &cm_dreq ); + + dapl_dbg_log (DAPL_DBG_TYPE_CM,"--> DsD: EP=%p QP= %p DREQ SENT status =%s\n", + ep_ptr, ep_ptr->qp_handle,ib_get_err_str(ib_status)); + } + return dapl_ib_status_convert (ib_status); +} + + +/* + * dapl_ib_setup_conn_listener + * + * Have the CM set up a connection listener. + * + * Input: + * ibm_hca_handle HCA handle + * qp_handle QP handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * + */ +DAT_RETURN +dapls_ib_setup_conn_listener ( + IN DAPL_IA *ia_ptr, + IN DAT_UINT64 ServiceID, + IN DAPL_SP *sp_ptr ) +{ + ib_api_status_t ib_status; + ib_cm_listen_t cm_listen; + dapl_ibal_ca_t *p_ca; + dapl_ibal_port_t *p_active_port; + + p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle; + + /* + * We are using the first active port in the list for + * communication. We have to get back here when we decide to support + * fail-over and high-availability. + */ + p_active_port = dapli_ibal_get_port ( p_ca, (uint8_t)ia_ptr->hca_ptr->port_num ); + + if (NULL == p_active_port) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"setup_conn_listener: SP = %p port %d is not available\n", + sp_ptr, ia_ptr->hca_ptr->port_num); + return (DAT_INVALID_STATE); + } + + if (p_active_port->p_attr->lid == 0) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsSCL: SP = %p SID = 0x" F64x " port %d\n", + sp_ptr, cl_hton64(ServiceID), p_active_port->p_attr->port_num); + return (DAT_INVALID_STATE); + } + + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "Setup_conn_listener: SP = %p port %d GID{0x" F64x ", 0x" F64x "} and SID 0x" F64x "\n", + sp_ptr, p_active_port->p_attr->port_num, + cl_hton64 (p_active_port->p_attr->p_gid_table[0].unicast.prefix), + cl_hton64 (p_active_port->p_attr->p_gid_table[0].unicast.interface_id), + cl_hton64 (ServiceID)); + + dapl_os_memzero (&cm_listen, sizeof (ib_cm_listen_t)); + + /* + * Listen for all request on this specific CA + */ + cm_listen.ca_guid = (p_ca->p_ca_attr->ca_guid); + cm_listen.svc_id = ServiceID; + cm_listen.qp_type = IB_QPT_RELIABLE_CONN; + + /* + * We do not use specific data buffer to check for specific connection + */ + cm_listen.p_compare_buffer = NULL;//(uint8_t*)&sp_ptr->conn_qual; + cm_listen.compare_offset = 0;//IB_MAX_REQ_PDATA_SIZE - sizeof(DAT_CONN_QUAL); + cm_listen.compare_length = 0;//sizeof(DAT_CONN_QUAL); + + /* + * We can pick a port here for communication and the others are reserved + * for fail-over / high-availability - TBD + */ + cm_listen.port_guid = p_active_port->p_attr->port_guid; + cm_listen.lid = p_active_port->p_attr->lid; + cm_listen.pkey = p_active_port->p_attr->p_pkey_table[0]; + + /* + * Register request or mra callback functions + */ + cm_listen.pfn_cm_req_cb = dapli_ib_cm_req_cb; + + ib_status = ib_cm_listen ( dapl_ibal_root.h_al, + &cm_listen, + dapli_ibal_listen_err_cb, + (void *) sp_ptr, + &sp_ptr->cm_srvc_handle); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"setup_conn_listener: SP = %p SID = 0x" F64x " listen failed = %s\n", + sp_ptr, cl_hton64 (ServiceID), ib_get_err_str(ib_status)); + } + + return dapl_ib_status_convert (ib_status); +} + +/* + * dapl_ib_remove_conn_listener + * + * Have the CM remove a connection listener. + * + * Input: + * ia_handle IA handle + * ServiceID IB Channel Service ID + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * + */ +DAT_RETURN dapls_ib_remove_conn_listener ( + IN DAPL_IA *ia_ptr, + IN DAPL_SP *sp_ptr) +{ + ib_api_status_t ib_status; + DAT_RETURN dat_status = DAT_SUCCESS; + + UNUSED_PARAM( ia_ptr ); + + if (sp_ptr->cm_srvc_handle) + { + ib_status = ib_cm_cancel (sp_ptr->cm_srvc_handle, + dapli_ib_cm_cancel_cb ); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsRCL: SP = %p ib_cm_cancel failed = %s\n", + sp_ptr, ib_get_err_str(ib_status)); + return (DAT_INVALID_PARAMETER); + } + dat_status = dapl_os_wait_object_wait ( + &sp_ptr->wait_object, DAT_TIMEOUT_INFINITE ); + + if ( DAT_SUCCESS != dat_status ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsRCL: SP = %p wait failed = 0x%x\n", + sp_ptr, dat_status ); + return dat_status; + } + + sp_ptr->cm_srvc_handle = NULL; + } + + return DAT_SUCCESS; +} + +/* + * dapls_ib_reject_connection + * + * Perform necessary steps to reject a connection + * + * Input: + * cr_handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * + */ +DAT_RETURN +dapls_ib_reject_connection ( + IN ib_cm_handle_t ib_cm_handle, + IN int reject_reason ) +{ + ib_api_status_t ib_status; + ib_cm_rej_t cm_rej; + static char *rej_table[] = + { + "INVALID_REJ_REASON", + "INVALID_REJ_REASON", + "INVALID_REJ_REASON", + "INVALID_REJ_REASON", + "INVALID_REJ_REASON", + "IB_CME_DESTINATION_REJECT", + "IB_CME_DESTINATION_REJECT_PRIVATE_DATA", + "IB_CME_DESTINATION_UNREACHABLE", + "IB_CME_TOO_MANY_CONNECTION_REQUESTS", + "IB_CME_LOCAL_FAILURE", + "IB_CM_LOCAL_FAILURE" + }; + +#define REJ_TABLE_SIZE IB_CM_LOCAL_FAILURE + + reject_reason = __min( reject_reason & 0xff, REJ_TABLE_SIZE); + + cm_rej.rej_status = IB_REJ_USER_DEFINED; + cm_rej.p_ari = (ib_ari_t *)&rej_table[reject_reason]; + cm_rej.ari_length = (uint8_t)strlen (rej_table[reject_reason]); + cm_rej.p_rej_pdata = NULL; + cm_rej.rej_length = 0; + + ib_status = ib_cm_rej ( ib_cm_handle, &cm_rej); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsRjC: cm_handle = %p reject failed = %s\n", + &ib_cm_handle, ib_get_err_str(ib_status)); + } + + return ( dapl_ib_status_convert ( ib_status ) ); + +} + + + + +/* + * dapls_ib_accept_connection + * + * Perform necessary steps to accept a connection + * + * Input: + * cr_handle + * ep_handle + * private_data_size + * private_data + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * + */ +DAT_RETURN +dapls_ib_accept_connection ( + IN DAT_CR_HANDLE cr_handle, + IN DAT_EP_HANDLE ep_handle, + IN DAPL_PRIVATE *prd_ptr ) +{ + DAPL_CR *cr_ptr; + DAPL_EP *ep_ptr; + DAPL_IA *ia_ptr; + DAT_RETURN dat_status; + ib_api_status_t ib_status; + dapl_ibal_ca_t *p_ca; + dapl_ibal_port_t *p_active_port; + ib_cm_rep_t cm_rep; + + cr_ptr = (DAPL_CR *) cr_handle; + ep_ptr = (DAPL_EP *) ep_handle; + ia_ptr = ep_ptr->header.owner_ia; + + if ( ep_ptr->qp_state == DAPL_QP_STATE_UNATTACHED ) + { + /* + * If we are lazy attaching the QP then we may need to + * hook it up here. Typically, we run this code only for + * DAT_PSP_PROVIDER_FLAG + */ + dat_status = dapls_ib_qp_alloc ( ia_ptr, ep_ptr, ep_ptr ); + + if ( dat_status != DAT_SUCCESS) + { + /* This is not a great error code, but all the spec allows */ + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsAC: CR = %p EP = %p alloc QP failed = 0x%x\n", + cr_ptr, ep_ptr, dat_status); + return (dat_status); + } + } + + p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle; + p_active_port = dapli_ibal_get_port ( p_ca, (uint8_t)ia_ptr->hca_ptr->port_num ); + + if (NULL == p_active_port) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsAC: CR = %p EP = %p port %d is not available\n", + cr_ptr, ep_ptr, ia_ptr->hca_ptr->port_num); + return (DAT_INVALID_STATE); + } + + cr_ptr->param.local_ep_handle = ep_handle; + ep_ptr->cm_handle = cr_ptr->ib_cm_handle; + ep_ptr->qp_state = IB_QPS_INIT; + ep_ptr->cr_ptr = cr_ptr; + + dapl_os_memzero ( &cm_rep, sizeof (ib_cm_rep_t) ); + + cm_rep.h_qp = ep_ptr->qp_handle; + cm_rep.qp_type = IB_QPT_RELIABLE_CONN; + cm_rep.p_rep_pdata = (uint8_t *) prd_ptr; + cm_rep.rep_length = IB_MAX_REP_PDATA_SIZE; + + dapl_dbg_log ( DAPL_DBG_TYPE_CM, + "--> DsAC: CR = %p EP = %p QP = %p cm_handle = %p\n", + cr_ptr, ep_ptr, ep_ptr->qp_handle, ep_ptr->cm_handle ); + +#ifdef DAPL_DBG +#if 0 + { + int i; + + dapl_dbg_log ( DAPL_DBG_TYPE_EP, "--> DsAC: private_data: "); + + for ( i = 0 ; i < IB_MAX_REP_PDATA_SIZE ; i++ ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_EP, + "0x%x ", prd_ptr->private_data[i]); + + } + dapl_dbg_log ( DAPL_DBG_TYPE_EP, "\n"); + + } +#endif +#endif + + cm_rep.pfn_cm_rej_cb = dapli_ib_cm_rej_cb; + cm_rep.pfn_cm_mra_cb = dapli_ib_cm_mra_cb; + cm_rep.pfn_cm_rtu_cb = dapli_ib_cm_rtu_cb; + cm_rep.pfn_cm_lap_cb = dapli_ib_cm_lap_cb; + cm_rep.pfn_cm_dreq_cb = dapli_ib_cm_dreq_cb; + + /* + * FIXME - Vu + * Play attention to the attributes. + * Some of them are desirably set by DAT consumers + */ + /* + * We enable the qp associate with this connection ep all the access right + * We enable the flow_ctrl, retry till success + * We will limit the access right and flow_ctrl upon DAT consumers + * requirements + */ + cm_rep.access_ctrl = IB_AC_LOCAL_WRITE|IB_AC_RDMA_WRITE|IB_AC_MW_BIND; + if ((ep_ptr->param.ep_attr.max_rdma_read_in > 0) + || (ep_ptr->param.ep_attr.max_rdma_read_out > 0)) + { + cm_rep.access_ctrl |= IB_AC_RDMA_READ; + } + + cm_rep.sq_depth = 0; + cm_rep.rq_depth = 0; + cm_rep.init_depth = (uint8_t)ep_ptr->param.ep_attr.max_rdma_read_out; + cm_rep.flow_ctrl = TRUE; + cm_rep.flags = 0; + cm_rep.failover_accepted = IB_FAILOVER_ACCEPT_UNSUPPORTED; + cm_rep.target_ack_delay = 14; + cm_rep.rnr_nak_timeout = 12; + cm_rep.rnr_retry_cnt = 6; + cm_rep.p_recv_wr = NULL; + cm_rep.pp_recv_failure = NULL; + + ib_status = ib_cm_rep ( cr_ptr->ib_cm_handle, &cm_rep); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsAC: EP = %p QP = %p cm_handle = %p reply failed = %#x\n", + ep_ptr, ep_ptr->qp_handle, ep_ptr->cm_handle, ib_status ); + } + + return ( dapl_ib_status_convert ( ib_status ) ); +} + + + +/* + * dapls_ib_disconnect_clean + * + * Clean up outstanding connection data. This routine is invoked + * after the final disconnect callback has occurred. Only on the + * ACTIVE side of a connection. + * + * Input: + * ep_ptr DAPL_EP + * + * Output: + * none + * + * Returns: + * void + * + */ +void +dapls_ib_disconnect_clean ( + IN DAPL_EP *ep_ptr, + IN DAT_BOOLEAN active, + IN const ib_cm_events_t ib_cm_event ) +{ + DAPL_IA *ia_ptr; + ib_qp_attr_t qp_attr; + ib_api_status_t ib_status; + + ia_ptr = ep_ptr->header.owner_ia; + + if ( ia_ptr == NULL || ia_ptr->header.magic != DAPL_MAGIC_IA ) + { + return; + } + dapl_os_assert ( ep_ptr->header.magic == DAPL_MAGIC_EP || + ep_ptr->header.magic == DAPL_MAGIC_EP_EXIT ); + + /* + * Query the QP to get the current state */ + ib_status = ib_query_qp ( ep_ptr->qp_handle, &qp_attr ); + + if ( ib_status != IB_SUCCESS ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, ">>>DSCONN_CLEAN(%s): Query QP failed = %#x\n", + (active?"Act":"Pas"),ib_status ); + return; + } + + ep_ptr->qp_state = qp_attr.state; + + dapl_dbg_log (DAPL_DBG_TYPE_CM,">>>DSCONN_CLEAN(%s): cm_event: %d ep_ptr=%p ep_state:%d qp_state: %#x\n", + (active?"A":"P"), + ib_cm_event, + ep_ptr, + ep_ptr->param.ep_state, + ep_ptr->qp_state); + if ( ep_ptr->qp_state != IB_QPS_ERROR && + ep_ptr->qp_state != IB_QPS_RESET && + ep_ptr->qp_state != IB_QPS_INIT ) + { + ep_ptr->qp_state = IB_QPS_ERROR; + dapls_modify_qp_state_to_error (ep_ptr->qp_handle); + } + return; +} + + +/* + * dapls_ib_cr_handoff + * + * Hand off the connection request to another service point + * + * Input: + * cr_handle DAT_CR_HANDLE + * handoff_serv_id DAT_CONN_QUAL + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + * + */ +DAT_RETURN +dapls_ib_cr_handoff ( + IN DAT_CR_HANDLE cr_handle, + IN DAT_CONN_QUAL handoff_serv_id ) +{ + DAPL_CR *cr_ptr; + ib_api_status_t ib_status; + + cr_ptr = (DAPL_CR *) cr_handle; + + if (cr_ptr->ib_cm_handle.cid == 0xFFFFFFFF) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsCH: CR = %p invalid cm handle\n", cr_ptr); + return DAT_INVALID_PARAMETER; + } + + if (cr_ptr->sp_ptr == IB_INVALID_HANDLE) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsCH: CR = %p invalid psp handle\n", cr_ptr); + return DAT_INVALID_PARAMETER; + } + + ib_status = ib_cm_handoff (cr_ptr->ib_cm_handle, handoff_serv_id); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR,"--> DsCH: CR = %p handoff failed = %s\n", + cr_ptr, ib_get_err_str(ib_status)); + return dapl_ib_status_convert (ib_status); + } + + /* Remove the CR from the queue */ + dapl_sp_remove_cr (cr_ptr->sp_ptr, cr_ptr); + + /* + * If this SP has been removed from service, free it + * up after the last CR is removed + */ + dapl_os_lock (&cr_ptr->sp_ptr->header.lock); + if ( cr_ptr->sp_ptr->listening != DAT_TRUE && + cr_ptr->sp_ptr->cr_list_count == 0 && + cr_ptr->sp_ptr->state != DAPL_SP_STATE_FREE ) + { + dapl_dbg_log (DAPL_DBG_TYPE_CM, + "--> DsCH: CR = %p disconnect dump SP = %p \n", + cr_ptr, cr_ptr->sp_ptr); + /* Decrement the ref count on the EVD */ + if (cr_ptr->sp_ptr->evd_handle) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)cr_ptr->sp_ptr->evd_handle)->evd_ref_count); + cr_ptr->sp_ptr->evd_handle = NULL; + } + cr_ptr->sp_ptr->state = DAPL_SP_STATE_FREE; + dapl_os_unlock (&cr_ptr->sp_ptr->header.lock); + (void)dapls_ib_remove_conn_listener ( cr_ptr->sp_ptr->header.owner_ia, + cr_ptr->sp_ptr ); + dapls_ia_unlink_sp ( (DAPL_IA *)cr_ptr->sp_ptr->header.owner_ia, + cr_ptr->sp_ptr ); + dapls_sp_free_sp ( cr_ptr->sp_ptr ); + } + else + { + dapl_os_unlock (&cr_ptr->sp_ptr->header.lock); + } + + /* + * Clean up and dispose of the resource + */ + dapls_cr_free (cr_ptr); + + return (DAT_SUCCESS); +} + +/* + * dapls_ib_private_data_size + * + * Return the size of private data given a connection op type + * + * Input: + * prd_ptr private data pointer + * conn_op connection operation type + * + * If prd_ptr is NULL, this is a query for the max size supported by + * the provider, otherwise it is the actual size of the private data + * contained in prd_ptr. + * + * Infiniband has fixed size private data, so prd_ptr is ignored. + * + * Output: + * None + * + * Returns: + * length of private data + * + */ +DAT_COUNT +dapls_ib_private_data_size ( + IN DAPL_PRIVATE *prd_ptr, + IN DAPL_PDATA_OP conn_op) +{ + int size; + + UNUSED_PARAM( prd_ptr ); + + switch (conn_op) + { + case DAPL_PDATA_CONN_REQ: + { + size = IB_MAX_REQ_PDATA_SIZE; + break; + } + case DAPL_PDATA_CONN_REP: + { + size = IB_MAX_REP_PDATA_SIZE; + break; + } + case DAPL_PDATA_CONN_REJ: + { + size = IB_MAX_REJ_PDATA_SIZE; + break; + } + case DAPL_PDATA_CONN_DREQ: + { + size = IB_MAX_DREQ_PDATA_SIZE; + break; + } + case DAPL_PDATA_CONN_DREP: + { + size = IB_MAX_DREP_PDATA_SIZE; + break; + } + default: + { + size = 0; + } + } /* end case */ + + return size; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ + diff --git a/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_dto.h b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_dto.h new file mode 100644 index 00000000..d71feace --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_dto.h @@ -0,0 +1,327 @@ + +/* + * Copyright (c) 2002, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under the terms of the "Common Public + * License" a copy of which is in the file LICENSE.txt in the root + * directory. The license is also available from the Open Source + * Initiative, see http://www.opensource.org/licenses/cpl.php. + * + */ + +/********************************************************************** + * + * MODULE: dapl_ibal_dto.h + * + * PURPOSE: Utility routines for data transfer operations using the + * IBAL APIs + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_IBAL_DTO_H +#define _DAPL_IBAL_DTO_H + +#include "dapl_ibal_util.h" + +STATIC _INLINE_ int +dapls_cqe_opcode_convert (ib_work_completion_t *cqe_p); + +extern DAT_RETURN +dapls_ib_cq_late_alloc ( + IN ib_pd_handle_t pd_handle, + IN DAPL_EVD *evd_ptr); + +#define DAPL_DEFAULT_DS_ENTRIES 8 + +/* + * dapls_ib_post_recv + * + * Provider specific Post RECV function + */ +STATIC _INLINE_ DAT_RETURN +dapls_ib_post_recv ( + IN DAPL_EP *ep_ptr, + IN DAPL_COOKIE *cookie, + IN DAT_COUNT num_segments, + IN DAT_LMR_TRIPLET *local_iov) +{ + ib_api_status_t ib_status; + ib_recv_wr_t recv_wr, *failed_wr_p; + ib_local_ds_t ds_array[DAPL_DEFAULT_DS_ENTRIES], *ds_array_p; + DAT_COUNT i, total_len; + + dapl_os_memzero(&recv_wr, sizeof(ib_recv_wr_t)); + recv_wr.wr_id = (DAT_UINT64) cookie; + recv_wr.num_ds = num_segments; + + if( num_segments <= DAPL_DEFAULT_DS_ENTRIES ) + { + ds_array_p = ds_array; + } + else + { + ds_array_p = dapl_os_alloc( num_segments * sizeof(ib_local_ds_t) ); + } + recv_wr.ds_array = ds_array_p; + + if (NULL == ds_array_p) + { + return (DAT_INSUFFICIENT_RESOURCES); + } + + total_len = 0; + + for (i = 0; i < num_segments; i++, ds_array_p++) + { + ds_array_p->length = (uint32_t)local_iov[i].segment_length; + ds_array_p->lkey = local_iov[i].lmr_context; + ds_array_p->vaddr = local_iov[i].virtual_address; + total_len += ds_array_p->length; + } + + if (cookie != NULL) + { + cookie->val.dto.size = total_len; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, "--> DsPR: EP = %p QP = %p cookie= %p, num_seg= %d\n", + ep_ptr, ep_ptr->qp_handle, cookie, num_segments); + } + + recv_wr.p_next = NULL; + + ib_status = ib_post_recv( ep_ptr->qp_handle, &recv_wr, &failed_wr_p ); + + if( num_segments > DAPL_DEFAULT_DS_ENTRIES ) + dapl_os_free( recv_wr.ds_array, num_segments * sizeof(ib_local_ds_t) ); + + if (IB_SUCCESS == ib_status) + { + return DAT_SUCCESS; + } + else + { + dapl_dbg_log (DAPL_DBG_TYPE_EP, "--> DsPR: post_recv status = %s\n", + ib_get_err_str(ib_status)); + + /* + * Moving QP to error state; + */ + ib_status = dapls_modify_qp_state_to_error ( ep_ptr->qp_handle); + ep_ptr->qp_state = IB_QPS_ERROR; + + return (dapl_ib_status_convert (ib_status)); + } +} + + +/* + * dapls_ib_post_send + * + * Provider specific Post SEND function + */ +STATIC _INLINE_ DAT_RETURN +dapls_ib_post_send ( + IN DAPL_EP *ep_ptr, + IN ib_send_op_type_t op_type, + IN DAPL_COOKIE *cookie, + IN DAT_COUNT num_segments, + IN DAT_LMR_TRIPLET *local_iov, + IN const DAT_RMR_TRIPLET *remote_iov, + IN DAT_COMPLETION_FLAGS completion_flags) +{ + ib_api_status_t ib_status; + ib_send_wr_t send_wr, *failed_wr_p; + ib_local_ds_t ds_array[DAPL_DEFAULT_DS_ENTRIES], *ds_array_p; + DAT_COUNT i, total_len; + + if (ep_ptr->param.ep_state != DAT_EP_STATE_CONNECTED) + { + ib_qp_attr_t qp_attr; + ib_query_qp ( ep_ptr->qp_handle, &qp_attr ); + + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsPS: !CONN EP (%p) ep_state=%d QP_state=%d\n", + ep_ptr, ep_ptr->param.ep_state, qp_attr.state ); + + return ( DAT_ERROR(DAT_INVALID_STATE, DAT_INVALID_STATE_EP_DISCONNECTED ) ); + } + dapl_os_memzero (&send_wr, sizeof(ib_send_wr_t)); + send_wr.wr_type = op_type; + send_wr.num_ds = num_segments; + + if( num_segments <= DAPL_DEFAULT_DS_ENTRIES ) + { + ds_array_p = ds_array; + } + else + { + ds_array_p = dapl_os_alloc( num_segments * sizeof(ib_local_ds_t) ); + } + send_wr.ds_array = ds_array_p; + + if (NULL == ds_array_p) + { + return (DAT_INSUFFICIENT_RESOURCES); + } + + total_len = 0; + + for (i = 0; i < num_segments; i++, ds_array_p++) + { + ds_array_p->length = (uint32_t)local_iov[i].segment_length; + ds_array_p->lkey = local_iov[i].lmr_context; + ds_array_p->vaddr = local_iov[i].virtual_address; + total_len += ds_array_p->length; + } + + if (cookie != NULL) + { + cookie->val.dto.size = total_len; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsPS: EVD=%p EP=%p QP=%p type=%d, sg=%d ln=%d, ck=%p 0x" F64x "\n", + ep_ptr->param.request_evd_handle, ep_ptr, ep_ptr->qp_handle, + op_type, num_segments, total_len, + cookie, cookie->val.dto.cookie.as_64 ); + } + + send_wr.wr_id = (DAT_UINT64)cookie; + + /* RC for now */ + if (total_len > 0) + { + send_wr.remote_ops.vaddr = remote_iov->target_address; + send_wr.remote_ops.rkey = remote_iov->rmr_context; + } + + send_wr.send_opt = 0; + + send_wr.send_opt |= (DAT_COMPLETION_BARRIER_FENCE_FLAG & + completion_flags) ? IB_SEND_OPT_FENCE : 0; + send_wr.send_opt |= (DAT_COMPLETION_SUPPRESS_FLAG & + completion_flags) ? 0 : IB_SEND_OPT_SIGNALED; + send_wr.send_opt |= (DAT_COMPLETION_SOLICITED_WAIT_FLAG & + completion_flags) ? IB_SEND_OPT_SOLICITED : 0; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, "--> DsPS: EP=%p QP=%p send_opt=0x%x," + "rem_addr=%p, rem_rkey=0x%x completion_flags=0x%x\n", + ep_ptr, ep_ptr->qp_handle, + send_wr.send_opt, (void *)(uintptr_t) send_wr.remote_ops.vaddr, + send_wr.remote_ops.rkey, completion_flags); + + send_wr.p_next = NULL; + + /* hold reference for QP command serialization on destruction */ + dapl_os_atomic_inc (&ep_ptr->req_count); + ib_status = ib_post_send( ep_ptr->qp_handle, &send_wr, &failed_wr_p ); + dapl_os_atomic_dec (&ep_ptr->req_count); + + if( num_segments > DAPL_DEFAULT_DS_ENTRIES ) + dapl_os_free( send_wr.ds_array, num_segments * sizeof(ib_local_ds_t) ); + + if (IB_SUCCESS == ib_status) + { + return DAT_SUCCESS; + } + else + { + dapl_dbg_log (DAPL_DBG_TYPE_EP, "--> DsPS: EP=%p post_send status = %s\n", + ep_ptr, ib_get_err_str(ib_status)); + + /* + * Moving QP to error state; + */ + ib_status = dapls_modify_qp_state_to_error ( ep_ptr->qp_handle); + ep_ptr->qp_state = IB_QPS_ERROR; + + return (dapl_ib_status_convert (ib_status)); + } +} + +/* + * dapls_ib_optional_prv_dat + * + * Allocate space for private data to be used in CR calls + * + * Input: + * cr_ptr CR handle + * event_data data provided by the provider callback function + * cr_pp Pointer for private data + * + * Output: + * cr_pp Area + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +STATIC _INLINE_ DAT_RETURN +dapls_ib_optional_prv_dat ( + IN DAPL_CR *cr_ptr, + IN const void *event_data, + OUT DAPL_CR **cr_pp) +{ + DAT_RETURN dat_status = DAT_SUCCESS; + DAPL_PRIVATE *p_prv_data = (DAPL_PRIVATE *)event_data; + + if ( ! cr_ptr->param.private_data_size ) + { + cr_ptr->param.private_data_size = sizeof(cr_ptr->private_data); + cr_ptr->param.private_data = cr_ptr->private_data; + dapl_os_memcpy(cr_ptr->private_data, p_prv_data->private_data, cr_ptr->param.private_data_size ); + *cr_pp = (DAPL_CR *)cr_ptr->param.private_data; + } + return dat_status; +} + + +STATIC _INLINE_ int +dapls_cqe_opcode_convert (ib_work_completion_t *cqe_p) +{ + switch (((ib_work_completion_t *)cqe_p)->wc_type) + { + case IB_WC_SEND: + { + return (OP_SEND); + } + case IB_WC_RDMA_WRITE: + case IB_WC_RECV_RDMA_WRITE: + { + return (OP_RDMA_WRITE); + } + case IB_WC_RDMA_READ: + { + return (OP_RDMA_READ); + } + case IB_WC_COMPARE_SWAP: + { + return (OP_COMP_AND_SWAP); + } + case IB_WC_FETCH_ADD: + { + return (OP_FETCH_AND_ADD); + } + case IB_WC_MW_BIND: + { + return (OP_BIND_MW); + } + case IB_WC_RECV: + { + return (OP_RECEIVE); + } + default : + { + /* error */ + return (IB_ERROR); + } + } +} + +#define DAPL_GET_CQE_WRID(cqe_p) ((ib_work_completion_t *)cqe_p)->wr_id +#define DAPL_GET_CQE_OPTYPE(cqe_p) dapls_cqe_opcode_convert(cqe_p) +#define DAPL_GET_CQE_BYTESNUM(cqe_p) ((ib_work_completion_t *)cqe_p)->length +#define DAPL_GET_CQE_STATUS(cqe_p) ((ib_work_completion_t *)cqe_p)->status + +#endif /* _DAPL_IBAL_DTO_H */ diff --git a/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_kmod.h b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_kmod.h new file mode 100644 index 00000000..89ce48a8 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_kmod.h @@ -0,0 +1,91 @@ + +/* + * Copyright (c) 2002, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under the terms of the "Common Public + * License" a copy of which is in the file LICENSE.txt in the root + * directory. The license is also available from the Open Source + * Initiative, see http://www.opensource.org/licenses/cpl.php. + * + */ + +/********************************************************************** + * + * MODULE: dapl_ibal_kmod.h + * + * PURPOSE: Utility defs & routines for access to Intel IBAL APIs + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_IBAL_KMOD_H_ +#define _DAPL_IBAL_KMOD_H_ + +#include +#include +#include + +#define MVDAPL_DEV_KEY 'm' +#define MVDAPL_GET_ANY_SVID _IO ( MVDAPL_DEV_KEY, psp_get_any_svid ) +#define MVDAPL_MRDB_RECORD_INSERT _IO ( MVDAPL_DEV_KEY, mrdb_record_insert ) +#define MVDAPL_MRDB_RECORD_DEC _IO ( MVDAPL_DEV_KEY, mrdb_record_dec ) +#define MVDAPL_MRDB_RECORD_QUERY _IO ( MVDAPL_DEV_KEY, mrdb_record_query ) +#define MVDAPL_MRDB_RECORD_UPDATE _IO ( MVDAPL_DEV_KEY, mrdb_record_update ) + +typedef enum +{ + psp_get_any_svid, + mrdb_record_insert, + mrdb_record_dec, + mrdb_record_query, + mrdb_record_update, + mvdapl_max_ops +} mvdapl_dev_ops_t; + +typedef struct _mvdapl_user_ctx +{ + cl_spinlock_t oust_mrdb_lock; + cl_qlist_t oust_mrdb_head; +} mvdapl_user_ctx_t; + + +typedef struct _mvdapl_ca_t +{ + cl_spinlock_t mrdb_lock; + cl_qlist_t mrdb_head; + boolean_t initialized; + cl_dev_handle_t mrdb_dev_handle; + ib_net64_t ca_guid; +} mvdapl_ca_t; + + +typedef struct _mvdapl_root +{ + ib_al_handle_t h_al; + intn_t guid_count; + mvdapl_ca_t *mvdapl_ca_tbl; + +} mvdapl_root_t; + +typedef struct _mrdb_record_t +{ + cl_list_item_t next; + ib_lmr_cookie_t key_cookie; + void *mr_handle; + int ib_shmid; + uint32_t ref_count; + boolean_t initialized; + cl_spinlock_t record_lock; +} mrdb_record_t; + + +typedef struct _oust_mrdb_rec +{ + cl_list_item_t next; + mrdb_record_t *p_record; + uint32_t ref_count; +} oust_mrdb_rec_t; + + +#endif /* _DAPL_IBAL_KMOD_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.c b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.c new file mode 100644 index 00000000..ea7f37c5 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.c @@ -0,0 +1,392 @@ + +/* + * Copyright (c) 2002, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under the terms of the "Common Public + * License" a copy of which is in the file LICENSE.txt in the root + * directory. The license is also available from the Open Source + * Initiative, see http://www.opensource.org/licenses/cpl.php. + * + */ + +/********************************************************************** + * + * MODULE: dapl_ibal_mrdb.c + * + * PURPOSE: Utility routines for access to IBAL APIs + * + * $Id$ + * + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" +#include "dapl_ibal_kmod.h" +#include "dapl_ibal_mrdb.h" + +DAT_RETURN dapls_mrdb_init ( + IN DAPL_HCA *hca_ptr) +{ + cl_status_t cl_status; + char name[32]; + dapl_ibal_ca_t *p_ca; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: p_ca is NULL\n","DsMI"); + return DAT_INVALID_PARAMETER; + } + + sprintf (name, + "/dev/mvdapl%x", + (uint32_t) cl_ntoh64 (p_ca->p_ca_attr->ca_guid)); + + cl_status = cl_open_device ( (cl_dev_name_t) name, &p_ca->mlnx_device); + + if (cl_status != CL_SUCCESS) + { + /* dapl_dbg_log ( DAPL_DBG_TYPE_UTIL, + "--> DsMI: Init MRDB failed = 0x%x\n", cl_status); */ + p_ca->mlnx_device = 0; + } + + return DAT_SUCCESS; +} + + +DAT_RETURN dapls_mrdb_exit ( + IN DAPL_HCA *hca_ptr) +{ + dapl_ibal_ca_t *p_ca; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: p_ca is NULL\n","DsME"); + return DAT_INVALID_PARAMETER; + } + + if (p_ca->mlnx_device) + { + cl_close_device (p_ca->mlnx_device); + } + + return DAT_SUCCESS; +} + + +DAT_RETURN dapls_mrdb_record_insert ( + IN DAPL_HCA *hca_ptr, + IN DAT_LMR_COOKIE shared_mem_id, + OUT int *p_ib_shmid) +{ + cl_status_t cl_status; + mrdb_rec_insert_ioctl_t ioctl_buf; + uintn_t bytes_ret; + dapl_ibal_ca_t *p_ca; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: p_ca is NULL\n","DsMRI"); + return DAT_INVALID_PARAMETER; + } + + bytes_ret = 0; + cl_memclr (&ioctl_buf, sizeof (ioctl_buf)); + cl_memcpy (ioctl_buf.shared_mem_id, shared_mem_id, IBAL_LMR_COOKIE_SIZE); + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRI: MVDAPL_MRDB_REC_INSERT mem_cookie %p\n", + shared_mem_id); +#if defined(DAPL_DBG) + { + int i; + char *c = (char *) shared_mem_id; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRI: mem_cookie: \n"); + + for ( i = 0; i < IBAL_LMR_COOKIE_SIZE ; i++) + { + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "0x%x ", *(c+i)); + } + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, "\n"); + + } +#endif /* DAPL_DBG */ + + cl_status = cl_ioctl_device ( p_ca->mlnx_device, + MVDAPL_MRDB_RECORD_INSERT, + &ioctl_buf, + sizeof (mrdb_rec_insert_ioctl_t), + &bytes_ret); + if ((cl_status != CL_SUCCESS) || + (ioctl_buf.status == IB_INSUFFICIENT_MEMORY)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsMRI: Failed to IOCTL record_insert 0x%x\n", cl_status); + return DAT_INSUFFICIENT_RESOURCES; + } + + *p_ib_shmid = (int) ioctl_buf.inout_f; + + if (ioctl_buf.status == IB_ERROR) + { + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRI: There is a record with shmid 0x%x\n", + *p_ib_shmid); + return DAT_INVALID_STATE; + } + else + { + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRI: Insert new mrdb record with shmid 0x%x\n", + *p_ib_shmid); + } + + return DAT_SUCCESS; +} + +DAT_RETURN dapls_mrdb_record_dec ( + IN DAPL_HCA *hca_ptr, + IN DAT_LMR_COOKIE shared_mem_id) +{ + cl_status_t cl_status; + mrdb_rec_dec_ioctl_t ioctl_buf; + uintn_t bytes_ret; + dapl_ibal_ca_t *p_ca; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: p_ca is NULL\n","DsMRD"); + return DAT_INVALID_PARAMETER; + } + + bytes_ret = 0; + cl_memclr (&ioctl_buf, sizeof (ioctl_buf)); + cl_memcpy (ioctl_buf.shared_mem_id, shared_mem_id, IBAL_LMR_COOKIE_SIZE); + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRD: MVDAPL_MRDB_REC_DEC mem_cookie 0x%p\n", + shared_mem_id); +#if defined(DAPL_DBG) + { + int i; + char *c = (char *) shared_mem_id; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRD: mem_cookie: \n"); + + for ( i = 0; i < IBAL_LMR_COOKIE_SIZE ; i++) + { + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "0x%x ", *(c+i)); + } + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, "\n"); + + } +#endif /* DAPL_DBG */ + + cl_status = cl_ioctl_device ( p_ca->mlnx_device, + MVDAPL_MRDB_RECORD_DEC, + &ioctl_buf, + sizeof (mrdb_rec_dec_ioctl_t), + &bytes_ret); + if ((cl_status != CL_SUCCESS) || + (ioctl_buf.status != IB_SUCCESS)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsMRD: IOCTL failed 'cause there is no record %s\n", + ib_get_err_str(ioctl_buf.status)); + return DAT_INVALID_STATE; + } + + return DAT_SUCCESS; +} + +DAT_RETURN dapls_mrdb_record_update ( + IN DAPL_HCA *hca_ptr, + IN DAT_LMR_COOKIE shared_mem_id, + IN ib_mr_handle_t mr_handle) +{ + cl_status_t cl_status; + mrdb_rec_update_ioctl_t ioctl_buf; + uintn_t bytes_ret; + dapl_ibal_ca_t *p_ca; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: p_ca is NULL\n","DsMRU"); + return DAT_INVALID_PARAMETER; + } + + bytes_ret = 0; + cl_memclr (&ioctl_buf, sizeof (ioctl_buf)); + cl_memcpy (ioctl_buf.shared_mem_id, shared_mem_id, IBAL_LMR_COOKIE_SIZE); + ioctl_buf.mr_handle = mr_handle; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRU: MVDAPL_MRDB_REC_UPDATE mr_handle %p\n", mr_handle); +#if defined(DAPL_DBG) + { + int i; + char *c = (char *) shared_mem_id; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRU: mem_cookie: \n"); + + for ( i = 0; i < IBAL_LMR_COOKIE_SIZE ; i++) + { + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "0x%x ", *(c+i)); + } + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, "\n"); + + } +#endif /* DAPL_DBG */ + + cl_status = cl_ioctl_device ( p_ca->mlnx_device, + MVDAPL_MRDB_RECORD_UPDATE, + &ioctl_buf, + sizeof (mrdb_rec_update_ioctl_t), + &bytes_ret); + if ((cl_status != CL_SUCCESS) || + (ioctl_buf.status != IB_SUCCESS)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsMRU: IOCTL update_record failed %s\n", + ib_get_err_str(ioctl_buf.status)); + return DAT_INTERNAL_ERROR; + } + + return DAT_SUCCESS; +} + + +DAT_RETURN dapls_mrdb_record_query ( + IN DAPL_HCA *hca_ptr, + IN DAT_LMR_COOKIE shared_mem_id, + OUT int *p_ib_shmid, + OUT ib_mr_handle_t *p_mr_handle) +{ + cl_status_t cl_status; + mrdb_rec_query_ioctl_t ioctl_buf; + uintn_t bytes_ret; + dapl_ibal_ca_t *p_ca; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: p_ca is NULL\n","DsMRQ"); + return DAT_INVALID_PARAMETER; + } + + bytes_ret = 0; + cl_memclr (&ioctl_buf, sizeof (ioctl_buf)); + + cl_memcpy (ioctl_buf.shared_mem_id, shared_mem_id, IBAL_LMR_COOKIE_SIZE); + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRQ: MVDAPL_MRDB_REC_QUERY mem_cookie 0x%p\n", + shared_mem_id); + #if defined(DAPL_DBG) + { + int i; + char *c = (char *) shared_mem_id; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRQ: mem_cookie: \n"); + + for ( i = 0; i < IBAL_LMR_COOKIE_SIZE ; i++) + { + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "0x%x ", *(c+i)); + } + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, "\n"); + + } + #endif + + cl_status = cl_ioctl_device ( p_ca->mlnx_device, + MVDAPL_MRDB_RECORD_QUERY, + &ioctl_buf, + sizeof (mrdb_rec_query_ioctl_t), + &bytes_ret); + if ((cl_status != CL_SUCCESS) || + (ioctl_buf.status != IB_SUCCESS)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsMRQ: IOCTL query_record failed %s\n", + ib_get_err_str(ioctl_buf.status)); + return DAT_INTERNAL_ERROR; + } + + *p_mr_handle = ioctl_buf.mr_handle; + *p_ib_shmid = (int) ioctl_buf.inout_f; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsMRQ: MVDAPL_MRDB_REC_QUERY mr_handle 0x%p shmid 0x%x\n", + *p_mr_handle, *p_ib_shmid); + + return DAT_SUCCESS; +} + + +DAT_RETURN dapls_ib_get_any_svid ( + IN DAPL_HCA *hca_ptr, + OUT DAT_CONN_QUAL *p_svid) +{ + cl_status_t cl_status; + psp_get_any_svid_ioctl_t ioctl_buf; + uintn_t bytes_ret; + dapl_ibal_ca_t *p_ca; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: p_ca is NULL\n","DsPGAS"); + return DAT_INVALID_PARAMETER; + } + + bytes_ret = 0; + cl_memclr (&ioctl_buf, sizeof (ioctl_buf)); + + cl_status = cl_ioctl_device ( p_ca->mlnx_device, + MVDAPL_GET_ANY_SVID, + &ioctl_buf, + sizeof (psp_get_any_svid_ioctl_t), + &bytes_ret); + if ((cl_status != CL_SUCCESS) || + (ioctl_buf.status != IB_SUCCESS)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsMRQ: IOCTL query_record failed %s\n", + ib_get_err_str(ioctl_buf.status)); + return DAT_INTERNAL_ERROR; + } + + *p_svid = (DAT_CONN_QUAL) ioctl_buf.inout_f; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsPGAS: new ServiceID 0x%x\n", + *p_svid); + + return DAT_SUCCESS; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ + diff --git a/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.h b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.h new file mode 100644 index 00000000..eb89ee0e --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_mrdb.h @@ -0,0 +1,52 @@ + +/* + * Copyright (c) 2002, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under the terms of the "Common Public + * License" a copy of which is in the file LICENSE.txt in the root + * directory. The license is also available from the Open Source + * Initiative, see http://www.opensource.org/licenses/cpl.php. + * + */ + +/********************************************************************** + * + * MODULE: dapl_ibal_mrdb.h + * + * PURPOSE: Utility defs & routines for access to Intel IBAL APIs + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_IBAL_MRDB_H_ +#define _DAPL_IBAL_MRDB_H_ + +#include +#include + +#define MVDAPL_BASE_SHMID 0xF00 +#define MVDAPL_BASE_SVID 0xF +#define MVDAPL_MAX_SHMID 0xFFFFFFFF +#define MVDAPL_MAX_SVID 0xEFFFFFFF + +#define IBAL_LMR_COOKIE_SIZE 40 +typedef char (* ib_lmr_cookie_t)[IBAL_LMR_COOKIE_SIZE]; + +typedef struct _mrdb_record_ioctl +{ + char *shared_mem_id[IBAL_LMR_COOKIE_SIZE]; + void *mr_handle; + ib_net64_t inout_f; + ib_api_status_t status; +} mrdb_record_ioctl_t; + +typedef mrdb_record_ioctl_t mrdb_rec_dec_ioctl_t; +typedef mrdb_record_ioctl_t mrdb_rec_insert_ioctl_t; +typedef mrdb_record_ioctl_t mrdb_rec_query_ioctl_t; +typedef mrdb_record_ioctl_t mrdb_rec_update_ioctl_t; +typedef mrdb_record_ioctl_t psp_get_any_svid_ioctl_t; + + +#endif /* _DAPL_IBAL_MRDB_H_ */ + diff --git a/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_qp.c b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_qp.c new file mode 100644 index 00000000..c6a45744 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_qp.c @@ -0,0 +1,681 @@ + +/* + * Copyright (c) 2002, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under the terms of the "Common Public + * License" a copy of which is in the file LICENSE.txt in the root + * directory. The license is also available from the Open Source + * Initiative, see http://www.opensource.org/licenses/cpl.php. + * + */ + +/********************************************************************** + * + * MODULE: dapl_ibal_qp.c + * + * PURPOSE: IB QP routines for access to IBAL APIs + * + * $Id$ + * + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" +#include "dapl_evd_util.h" +#include "dapl_ibal_util.h" + +#define DAPL_IBAL_QKEY 0 +#define DAPL_IBAL_START_PSN 0 + +static void +dapli_ib_qp_async_error_cb( + IN ib_async_event_rec_t* p_err_rec ) +{ + DAPL_EP *ep_ptr = (DAPL_EP *)p_err_rec->context; + DAPL_EVD *evd_ptr; + DAPL_IA *ia_ptr; + dapl_ibal_ca_t *p_ca; + dapl_ibal_evd_cb_t *evd_cb; + + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiQpAEC QP error %d for qp context %p\n", + p_err_rec->code, p_err_rec->context); + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiQpAEC qp_handle %p qpn %u\n", + ((DAPL_EP *)p_err_rec->context)->qp_handle, + ((DAPL_EP *)p_err_rec->context)->qpn); + + /* + * Verify handles EP, EVD, and hca_handle + */ + if ( DAPL_BAD_HANDLE (ep_ptr, DAPL_MAGIC_EP ) || + DAPL_BAD_HANDLE (ep_ptr->param.connect_evd_handle, DAPL_MAGIC_EVD) ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiQpAEC: invalid EP %p \n", ep_ptr); + return; + } + ia_ptr = ep_ptr->header.owner_ia; + evd_ptr = ia_ptr->async_error_evd; + + if (DAPL_BAD_HANDLE (evd_ptr, DAPL_MAGIC_EVD) || + ! (evd_ptr->evd_flags & DAT_EVD_ASYNC_FLAG)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiQpAEC: invalid EVD %p \n", evd_ptr); + return; + } + p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle; + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiQpAEC: can't find %s HCA\n", + (ia_ptr->header.provider)->device_name); + return; + } + + /* find QP error callback using ia_ptr for context */ + evd_cb = dapli_find_evd_cb_by_context (ia_ptr, p_ca); + if ((evd_cb == NULL) || (evd_cb->pfn_async_qp_err_cb == NULL)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiQpAEC: no ERROR cb on %p found \n", p_ca); + return; + } + + dapl_os_lock (&ep_ptr->header.lock); + ep_ptr->param.ep_state = DAT_EP_STATE_DISCONNECT_PENDING; + dapl_os_unlock (&ep_ptr->header.lock); + + /* force disconnect, QP error state, to insure DTO's get flushed */ + dapls_ib_disconnect ( ep_ptr, DAT_CLOSE_ABRUPT_FLAG ); + + /* maps to dapl_evd_qp_async_error_callback(), context is EP */ + evd_cb->pfn_async_qp_err_cb( (ib_hca_handle_t)p_ca, + (ib_error_record_t*)&p_err_rec->code, ep_ptr); +} + +/* + * dapl_ib_qp_alloc + * + * Alloc a QP + * + * Input: + * *ia_ptr pointer to DAPL IA + * *ep_ptr pointer to DAPL EP + * *ep_ctx_ptr pointer to DAPL EP context + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_qp_alloc ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_ptr, + IN DAPL_EP *ep_ctx_ptr) +{ + DAT_EP_ATTR *attr; + DAPL_EVD *recv_evd_ptr, *request_evd_ptr; + DAT_RETURN dat_status; + ib_api_status_t ib_status; + ib_qp_create_t qp_create; + ib_pd_handle_t ib_pd_handle; + ib_cq_handle_t cq_recv; + ib_cq_handle_t cq_send; + dapl_ibal_ca_t *p_ca; + dapl_ibal_port_t *p_active_port; + ib_qp_attr_t qp_attr; + + attr = &ep_ptr->param.ep_attr; + + dapl_os_assert ( ep_ptr->param.pz_handle != NULL ); + + ib_pd_handle = ((DAPL_PZ *)ep_ptr->param.pz_handle)->pd_handle; + recv_evd_ptr = (DAPL_EVD *) ep_ptr->param.recv_evd_handle; + request_evd_ptr = (DAPL_EVD *) ep_ptr->param.request_evd_handle; + + cq_recv = IB_INVALID_HANDLE; + cq_send = IB_INVALID_HANDLE; + + dapl_os_assert ( recv_evd_ptr != DAT_HANDLE_NULL ); + { + cq_recv = (ib_cq_handle_t) recv_evd_ptr->ib_cq_handle; + + if ((cq_recv == IB_INVALID_HANDLE) && + ( 0 != (recv_evd_ptr->evd_flags & ~DAT_EVD_SOFTWARE_FLAG) )) + { + dat_status = dapls_ib_cq_late_alloc ( + ib_pd_handle, + recv_evd_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: failed to create CQ\n","DsQA"); + return (dat_status); + } + + dat_status = dapls_set_cq_notify (ia_ptr, recv_evd_ptr); + + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: failed to enable notify CQ\n","DsQA"); + return (dat_status); + } + + cq_recv = (ib_cq_handle_t) recv_evd_ptr->ib_cq_handle; + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsQA: alloc_recv_CQ = %p\n", cq_recv); + + } + } + + dapl_os_assert ( request_evd_ptr != DAT_HANDLE_NULL ); + { + cq_send = (ib_cq_handle_t) request_evd_ptr->ib_cq_handle; + + if ((cq_send == IB_INVALID_HANDLE) && + ( 0 != (request_evd_ptr->evd_flags & ~DAT_EVD_SOFTWARE_FLAG) )) + { + dat_status = dapls_ib_cq_late_alloc ( + ib_pd_handle, + request_evd_ptr); + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: failed to create CQ\n","DsQA"); + return (dat_status); + } + + dat_status = dapls_set_cq_notify (ia_ptr, request_evd_ptr); + + if (dat_status != DAT_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: failed to enable notify CQ\n","DsQA"); + return (dat_status); + } + + cq_send = (ib_cq_handle_t) request_evd_ptr->ib_cq_handle; + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsQA: alloc_send_CQ = %p\n", cq_send); + } + } + + /* + * Get the CA structure + */ + p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle; + + dapl_os_memzero (&qp_create, sizeof (qp_create)); + qp_create.qp_type = IB_QPT_RELIABLE_CONN; + qp_create.sq_depth = attr->max_request_dtos; + qp_create.rq_depth = attr->max_recv_dtos; + qp_create.sq_sge = attr->max_recv_iov; + qp_create.rq_sge = attr->max_request_iov; + qp_create.h_sq_cq = cq_send; + qp_create.h_rq_cq = cq_recv; + qp_create.sq_signaled = FALSE; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsQA: sqd,iov=%d,%d rqd,iov=%d,%d\n", + attr->max_request_dtos, attr->max_request_iov, + attr->max_recv_dtos, attr->max_recv_iov); + + ib_status = ib_create_qp ( + ib_pd_handle, + &qp_create, + (void *) ep_ctx_ptr /* context */, + dapli_ib_qp_async_error_cb, + &ep_ptr->qp_handle); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "--> DsQA: Create QP failed = %s\n", ib_get_err_str(ib_status)); + return (DAT_INSUFFICIENT_RESOURCES); + } + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsQA: EP=%p, tEVD=%p, rEVD=%p QP=%p\n", + ep_ptr, ep_ptr->param.request_evd_handle, + ep_ptr->param.recv_evd_handle, + ep_ptr->qp_handle ); + + ep_ptr->qp_state = IB_QPS_RESET; + + p_active_port = dapli_ibal_get_port ( p_ca, (uint8_t)ia_ptr->hca_ptr->port_num ); + + if (NULL == p_active_port) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsQA: Port %d is not available = %d\n", + ia_ptr->hca_ptr->port_num, __LINE__); + return (DAT_INVALID_STATE); + } + + ib_status = dapls_modify_qp_state_to_init ( + ep_ptr->qp_handle, + &ep_ptr->param.ep_attr, p_active_port); + + if ( ib_status != IB_SUCCESS ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsQA: Change QP state to INIT failed = %s\n", + ib_get_err_str(ib_status)); + return (DAT_INVALID_HANDLE); + } + ib_status = ib_query_qp ( ep_ptr->qp_handle, &qp_attr ); + + ep_ptr->qp_state = qp_attr.state; + ep_ptr->qpn = qp_attr.num; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsQAQA: EP:%p new_QP = %p state = %#x\n", ep_ptr, ep_ptr->qp_handle, ep_ptr->qp_state); + + return (DAT_SUCCESS); +} + + +/* + * dapl_ib_qp_free + * + * Free a QP + * + * Input: + * *ia_ptr pointer to IA structure + * *ep_ptr pointer to EP structure + * + * Output: + * none + * + * Returns: + * none + * + */ +DAT_RETURN +dapls_ib_qp_free ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_ptr ) +{ + + ib_qp_handle_t qp_handle; + UNREFERENCED_PARAMETER(ia_ptr); + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsQF: free %p, state=%d\n", + ep_ptr->qp_handle,ep_ptr->qp_state ); + + if (( ep_ptr->qp_handle != IB_INVALID_HANDLE ) && + ( ep_ptr->qp_state != DAPL_QP_STATE_UNATTACHED )) + { + qp_handle = ep_ptr->qp_handle; + ep_ptr->qp_handle = IB_INVALID_HANDLE; + ep_ptr->qp_state = DAPL_QP_STATE_UNATTACHED; + ib_destroy_qp ( qp_handle, NULL /* callback */); + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsQF: freed QP %p\n", qp_handle ); + } + + return DAT_SUCCESS; +} + + +/* + * dapl_ib_qp_modify + * + * Set the QP to the parameters specified in an EP_PARAM + * + * We can't be sure what state the QP is in so we first obtain the state + * from the driver. The EP_PARAM structure that is provided has been + * sanitized such that only non-zero values are valid. + * + * Input: + * *ia_ptr pointer to DAPL IA + * *ep_ptr pointer to DAPL EP + * *ep_attr pointer to DAT EP attribute + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * + */ +DAT_RETURN +dapls_ib_qp_modify ( + IN DAPL_IA *ia_ptr, + IN DAPL_EP *ep_ptr, + IN DAT_EP_ATTR *ep_attr ) +{ + ib_qp_attr_t qp_attr; + ib_api_status_t ib_status; + ib_qp_handle_t qp_handle; + ib_qp_state_t qp_state; + ib_qp_mod_t qp_mod; + ib_av_attr_t *p_av_attr; + ib_qp_opts_t *p_qp_opts; + uint32_t *p_sq_depth, *p_rq_depth; + DAT_BOOLEAN need_modify; + DAT_RETURN dat_status; + + qp_handle = ep_ptr->qp_handle; + need_modify = DAT_FALSE; + dat_status = DAT_SUCCESS; + if ( ia_ptr == NULL || ia_ptr->header.magic != DAPL_MAGIC_IA ) + { + dat_status = DAT_INVALID_HANDLE; + goto bail; + } + /* + * Query the QP to get the current state */ + ib_status = ib_query_qp ( qp_handle, &qp_attr ); + + if ( ib_status != IB_SUCCESS ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsIQM: Query QP failed = %s\n", ib_get_err_str(ib_status)); + dat_status = DAT_INTERNAL_ERROR; + goto bail; + } + + qp_state = qp_attr.state; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIQM: modify qp state=%d\n",qp_state); + /* + * Check if we have the right qp_state or not + */ + if ( (qp_state != IB_QPS_RTR ) && + (qp_state != IB_QPS_RTS ) ) + { + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIQM: postpone to modify qp to EP values later\n"); + dat_status = DAT_SUCCESS; + goto bail; + } + + dapl_os_memzero (&qp_mod, sizeof (ib_qp_mod_t)); + + if (qp_state == IB_QPS_RTR) + { + p_av_attr = &qp_mod.state.rtr.primary_av; + p_qp_opts = &qp_mod.state.rtr.opts; + p_sq_depth = &qp_mod.state.rtr.sq_depth; + p_rq_depth = &qp_mod.state.rtr.rq_depth; + } + else + { + /* + * RTS does not have primary_av field + */ + p_av_attr = &qp_mod.state.rts.alternate_av; + p_qp_opts = &qp_mod.state.rts.opts; + p_sq_depth = &qp_mod.state.rts.sq_depth; + p_rq_depth = &qp_mod.state.rts.rq_depth; + } + + if ( (ep_attr->max_recv_dtos > 0) && + ((DAT_UINT32)ep_attr->max_recv_dtos != qp_attr.rq_depth) ) + { + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIQM: rq_depth modified (%d,%d)\n", + qp_attr.rq_depth, ep_attr->max_recv_dtos); + + *p_rq_depth = ep_attr->max_recv_dtos; + *p_qp_opts |= IB_MOD_QP_RQ_DEPTH; + need_modify = DAT_TRUE; + } + + if ( (ep_attr->max_request_dtos > 0) && + ((DAT_UINT32)ep_attr->max_request_dtos != qp_attr.sq_depth) ) + { + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIQM: sq_depth modified (%d,%d)\n", + qp_attr.sq_depth, ep_attr->max_request_dtos); + + *p_sq_depth = ep_attr->max_request_dtos; + *p_qp_opts |= IB_MOD_QP_SQ_DEPTH; + need_modify = DAT_TRUE; + } + + qp_mod.req_state = qp_state; + + if ( need_modify == DAT_TRUE ) + { + ib_status = ib_modify_qp (qp_handle, &qp_mod); + + if ( ib_status != IB_SUCCESS) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> %s: ib_status = %d\n", "DsIQM", ib_status); + dat_status = DAT_INTERNAL_ERROR; + } + } + +bail: + + return dat_status; +} + + +ib_api_status_t +dapls_modify_qp_state_to_error ( + ib_qp_handle_t qp_handle ) +{ + ib_qp_mod_t qp_mod; + ib_api_status_t ib_status; + + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "--> DsIQM_ERR: QP state change\n"); + + dapl_os_memzero (&qp_mod, sizeof (ib_qp_mod_t)); + + qp_mod.req_state = IB_QPS_ERROR; + + ib_status = ib_modify_qp (qp_handle, &qp_mod); + + return (ib_status); +} + + +ib_api_status_t +dapls_modify_qp_state_to_reset ( + ib_qp_handle_t qp_handle ) +{ + ib_qp_mod_t qp_mod; + ib_api_status_t ib_status; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIQM_RESET: QP state change\n"); + + dapl_os_memzero (&qp_mod, sizeof (ib_qp_mod_t)); + + qp_mod.req_state = IB_QPS_RESET; + + ib_status = ib_modify_qp (qp_handle, &qp_mod); + + return (ib_status); +} + + +ib_api_status_t +dapls_modify_qp_state_to_init ( + IN ib_qp_handle_t qp_handle, + IN DAT_EP_ATTR *p_attr, + IN dapl_ibal_port_t *p_port ) +{ + ib_qp_mod_t qp_mod; + ib_api_status_t ib_status; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIQM_INIT: QP state change\n"); + + dapl_os_memzero (&qp_mod, sizeof (ib_qp_mod_t)); + + qp_mod.req_state = IB_QPS_INIT; + qp_mod.state.init.primary_port = p_port->p_attr->port_num; + qp_mod.state.init.qkey = DAPL_IBAL_QKEY; + qp_mod.state.init.pkey_index = 0; + qp_mod.state.init.access_ctrl = + IB_AC_LOCAL_WRITE|IB_AC_RDMA_WRITE|IB_AC_MW_BIND; + if ((p_attr->max_rdma_read_in > 0) || + (p_attr->max_rdma_read_out > 0)) + { + qp_mod.state.init.access_ctrl |= IB_AC_RDMA_READ; + } + ib_status = ib_modify_qp (qp_handle, &qp_mod); + + return (ib_status); +} + +ib_api_status_t +dapls_modify_qp_state_to_rtr ( + ib_qp_handle_t qp_handle, + ib_net32_t dest_qp, + ib_lid_t dest_lid, + dapl_ibal_port_t *p_port) +{ + ib_qp_mod_t qp_mod; + ib_api_status_t ib_status; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIQM_RTR: QP state change\n"); + + dapl_os_memzero (&qp_mod, sizeof (ib_qp_mod_t)); + + qp_mod.req_state = IB_QPS_RTR; + qp_mod.state.rtr.rq_psn = DAPL_IBAL_START_PSN; + qp_mod.state.rtr.dest_qp = dest_qp; + qp_mod.state.rtr.resp_res = 7; + qp_mod.state.rtr.rnr_nak_timeout = 7; + qp_mod.state.rtr.primary_av.sl = 0; + qp_mod.state.rtr.primary_av.dlid = dest_lid; + qp_mod.state.rtr.primary_av.port_num = p_port->p_attr->port_num; + qp_mod.state.rtr.primary_av.grh_valid = 0; /* FALSE */ + qp_mod.state.rtr.primary_av.path_bits = 0; + qp_mod.state.rtr.primary_av.static_rate = IB_PATH_RECORD_RATE_10_GBS; + qp_mod.state.rtr.primary_av.conn.path_mtu = p_port->p_attr->mtu; + qp_mod.state.rtr.primary_av.conn.rnr_retry_cnt = 7; + qp_mod.state.rtr.primary_av.conn.local_ack_timeout = 7; + qp_mod.state.rtr.primary_av.conn.seq_err_retry_cnt = 7; + + qp_mod.state.rtr.opts = IB_MOD_QP_PRIMARY_AV | IB_MOD_QP_RESP_RES; + + ib_status = ib_modify_qp (qp_handle, &qp_mod); + + return (ib_status); +} + +ib_api_status_t +dapls_modify_qp_state_to_rts ( + ib_qp_handle_t qp_handle ) +{ + ib_qp_mod_t qp_mod; + ib_api_status_t ib_status; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIQM_RTS: QP state change\n"); + + dapl_os_memzero (&qp_mod, sizeof (ib_qp_mod_t)); + + qp_mod.req_state = IB_QPS_RTS; + qp_mod.state.rts.sq_psn = DAPL_IBAL_START_PSN; + qp_mod.state.rts.retry_cnt = 7; + qp_mod.state.rts.rnr_retry_cnt = 6; + qp_mod.state.rts.rnr_nak_timeout = 7; + qp_mod.state.rts.local_ack_timeout = 7; + qp_mod.state.rts.init_depth = 4; + + ib_status = ib_modify_qp (qp_handle, &qp_mod); + + return (ib_status); +} + + +/* + * dapls_ib_reinit_ep + * + * Move the QP to INIT state again. + * + * Input: + * ep_ptr DAPL_EP + * + * Output: + * none + * + * Returns: + * void + * + */ +DAT_RETURN +dapls_ib_reinit_ep ( + IN DAPL_EP *ep_ptr) +{ + DAPL_IA *ia_ptr; + ib_api_status_t ib_status; + dapl_ibal_ca_t *p_ca; + dapl_ibal_port_t *p_active_port; + + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIQM_REINIT: EP(%p) QP(%p) state change\n", + ep_ptr, ep_ptr->qp_handle ); + + if ( ep_ptr->param.ep_state != DAT_EP_STATE_DISCONNECTED ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DsIRE: EP invalid state(%d)\n", ep_ptr->param.ep_state); + return DAT_INVALID_STATE; + } + + ia_ptr = ep_ptr->header.owner_ia; + + /* Re-create QP if cleaned up, alloc will return init state */ + if ( ep_ptr->qp_handle == IB_INVALID_HANDLE ) + { + dapl_dbg_log (DAPL_DBG_TYPE_EP, + "--> DsIRE: !EP(%p)->qp_handle, re-create QP\n",ep_ptr); + return ( dapls_ib_qp_alloc ( ia_ptr, ep_ptr, ep_ptr ) ); + } + + ib_status = dapls_modify_qp_state_to_reset ( ep_ptr->qp_handle); + + if ( ib_status != IB_SUCCESS ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DsIRE: failed to move qp to RESET status = %s\n", + ib_get_err_str(ib_status)); + return DAT_INTERNAL_ERROR; + } + + ep_ptr->qp_state = IB_QPS_RESET; + + p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle; + p_active_port = dapli_ibal_get_port ( p_ca, (uint8_t)ia_ptr->hca_ptr->port_num ); + + if (NULL == p_active_port) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DsIRE: Port %d is not available = %d\n", + ia_ptr->hca_ptr->port_num, __LINE__); + return DAT_INTERNAL_ERROR; + } + + /* May fail if QP still RESET and in timewait, keep in reset state */ + ib_status = dapls_modify_qp_state_to_init ( ep_ptr->qp_handle, + &ep_ptr->param.ep_attr, + p_active_port); + if ( ib_status != IB_SUCCESS ) + { + ep_ptr->qp_state = IB_QPS_RESET; + + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> DsIRE: failed to move qp to INIT status = %s\n", + ib_get_err_str(ib_status)); + return DAT_INTERNAL_ERROR; + } + + ep_ptr->qp_state = IB_QPS_INIT; + + return DAT_SUCCESS; + +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ + diff --git a/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.c b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.c new file mode 100644 index 00000000..e39cb5de --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.c @@ -0,0 +1,2477 @@ + +/* + * Copyright (c) 2002, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under the terms of the "Common Public + * License" a copy of which is in the file LICENSE.txt in the root + * directory. The license is also available from the Open Source + * Initiative, see http://www.opensource.org/licenses/cpl.php. + * + */ + +/********************************************************************** + * + * MODULE: dapl_ibal_util.c + * + * PURPOSE: Utility routines for access to IBAL APIs + * + * $Id$ + * + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" +#include "dapl_evd_util.h" +#include "dapl_cr_util.h" +#include "dapl_lmr_util.h" +#include "dapl_rmr_util.h" +#include "dapl_cookie.h" +#include "dapl_ring_buffer_util.h" + +#ifndef NO_NAME_SERVICE +#include "dapl_name_service.h" +#endif /* NO_NAME_SERVICE */ + +#define DAPL_IBAL_MAX_CA 4 +#define DAT_ADAPTER_NAME "InfiniHost (Tavor)" +#define DAT_VENDOR_NAME "Mellanox Technolgy Inc." + +/* + * Root data structure for DAPL_IIBA. + */ +dapl_ibal_root_t dapl_ibal_root; +DAPL_HCA_NAME dapl_ibal_hca_name_array [DAPL_IBAL_MAX_CA] = + {"IbalHca0", "IbalHca1", "IbalHca2", "IbalHca3"}; +ib_net64_t *gp_ibal_ca_guid_tbl = NULL; + +/* + * DAT spec does not tie max_mtu_size with IB MTU + * +static ib_net32_t dapl_ibal_mtu_table[6] = {0, 256, 512, 1024, 2048, 4096}; + */ + +int g_loopback_connection = 0; + + +static cl_status_t +dapli_init_root_ca_list( + IN dapl_ibal_root_t *root ) +{ + cl_status_t status; + + cl_qlist_init (&root->ca_head); + status = cl_spinlock_init (&root->ca_lock); + + if (status == CL_SUCCESS) + { + /* + * Get the time ready to go but don't start here + */ + root->shutdown = FALSE; + root->initialized = TRUE; + } + else + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DiIRCL: cl_spinlock_init returned %d\n", status); + root->initialized = FALSE; + } + + root->h_al = NULL; + + return (status); +} + + +static cl_status_t +dapli_destroy_root_ca_list( + IN dapl_ibal_root_t *root ) +{ + + root->initialized = FALSE; + + /* + * At this point the lock should not be necessary + */ + if (!cl_is_qlist_empty (&root->ca_head) ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> Destroying nonempty ca list (%s)\n", + "DiDRCL"); + } + cl_spinlock_destroy (&root->ca_lock); + + return CL_SUCCESS; +} + + +static void +dapli_shutdown_port_access( + IN dapl_ibal_ca_t *ca ) +{ + dapl_ibal_port_t *p_port; + + TAKE_LOCK( ca->port_lock ); + { + while ( ! cl_is_qlist_empty( &ca->port_head ) ) + { + p_port = (dapl_ibal_port_t *)cl_qlist_remove_head( &ca->port_head ); + RELEASE_LOCK( ca->port_lock ); + { + REMOVE_REFERENCE( &p_port->refs ); + REMOVE_REFERENCE( &p_port->ca->refs ); + + dapl_os_free (p_port, sizeof (dapl_ibal_port_t)); + } + TAKE_LOCK( ca->port_lock ); + } + } + RELEASE_LOCK( ca->port_lock ); +} + + +static void dapli_shutdown_ca_access (void) +{ + dapl_ibal_ca_t *ca; + + if ( dapl_ibal_root.initialized == FALSE ) + { + goto destroy_root; + } + + TAKE_LOCK (dapl_ibal_root.ca_lock); + { + while ( ! cl_is_qlist_empty (&dapl_ibal_root.ca_head) ) + { + ca = (dapl_ibal_ca_t *) cl_qlist_remove_head (&dapl_ibal_root.ca_head); + + if (ca->p_ca_attr) + { + dapl_os_free (ca->p_ca_attr, sizeof (ib_ca_attr_t)); + } + + + RELEASE_LOCK (dapl_ibal_root.ca_lock); + { + dapli_shutdown_port_access (ca); + REMOVE_REFERENCE (&ca->refs); + } + TAKE_LOCK (dapl_ibal_root.ca_lock); + } + } + RELEASE_LOCK (dapl_ibal_root.ca_lock); + +destroy_root: + /* + * Destroy the root CA list and list lock + */ + dapli_destroy_root_ca_list (&dapl_ibal_root); + + /* + * Signal we're all done and wake any waiter + */ + dapl_ibal_root.shutdown = FALSE; +} + + +dapl_ibal_evd_cb_t * +dapli_find_evd_cb_by_context( + IN void *context, + IN dapl_ibal_ca_t *ca) +{ + dapl_ibal_evd_cb_t *evd_cb = NULL; + + TAKE_LOCK( ca->evd_cb_lock ); + + evd_cb = (dapl_ibal_evd_cb_t *) cl_qlist_head( &ca->evd_cb_head ); + while ( &evd_cb->next != cl_qlist_end( &ca->evd_cb_head ) ) + { + if ( context == evd_cb->context) + { + goto found; + } + + /* + * Try again + */ + evd_cb = (dapl_ibal_evd_cb_t *) cl_qlist_next( &evd_cb->next ); + } + /* + * No joy + */ + evd_cb = NULL; + +found: + + RELEASE_LOCK( ca->evd_cb_lock ); + + return ( evd_cb ); +} + + +static cl_status_t +dapli_init_ca_evd_cb_list( + IN dapl_ibal_ca_t *ca ) +{ + cl_status_t status; + + cl_qlist_init( &ca->evd_cb_head ); + status = cl_spinlock_init( &ca->evd_cb_lock ); + if ( status != CL_SUCCESS ) + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DiICECL: cl_spinlock_init returned %d\n", status); + return ( status ); +} + + +static cl_status_t +dapli_init_ca_port_list( + IN dapl_ibal_ca_t *ca ) +{ + cl_status_t status; + + cl_qlist_init( &ca->port_head ); + status = cl_spinlock_init( &ca->port_lock ); + if ( status != CL_SUCCESS ) + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DiICPL: cl_spinlock_init returned %d\n", status); + return ( status ); +} + +dapl_ibal_port_t * +dapli_ibal_get_port ( + IN dapl_ibal_ca_t *p_ca, + IN uint8_t port_num) +{ + cl_list_item_t *p_active_port = NULL; + + TAKE_LOCK (p_ca->port_lock); + for ( p_active_port = cl_qlist_head( &p_ca->port_head ); + p_active_port != cl_qlist_end ( &p_ca->port_head); + p_active_port = cl_qlist_next ( p_active_port ) ) + { + if (((dapl_ibal_port_t *)p_active_port)->p_attr->port_num == port_num) + break; + } + RELEASE_LOCK (p_ca->port_lock); + + return (dapl_ibal_port_t *)p_active_port; +} + +static void +dapli_ibal_cq_async_error_callback( + IN ib_async_event_rec_t* p_err_rec ) +{ + DAPL_EVD *evd_ptr = (DAPL_EVD*)((void *)p_err_rec->context); + DAPL_EVD *async_evd_ptr; + DAPL_IA *ia_ptr; + dapl_ibal_ca_t *p_ca; + dapl_ibal_evd_cb_t *evd_cb; + + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: CQ error %d for EVD context %p\n", + p_err_rec->code, p_err_rec->context); + + if (DAPL_BAD_HANDLE (evd_ptr, DAPL_MAGIC_EVD)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: invalid EVD %p \n", evd_ptr); + return; + } + + ia_ptr = evd_ptr->header.owner_ia; + async_evd_ptr = ia_ptr->async_error_evd; + if (async_evd_ptr == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: can't find async_error_evd on %s HCA\n", + (ia_ptr->header.provider)->device_name ); + return; + } + + p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle; + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: can't find %s HCA\n", + (ia_ptr->header.provider)->device_name); + return; + } + + /* find CQ error callback using ia_ptr for context */ + evd_cb = dapli_find_evd_cb_by_context ( async_evd_ptr, p_ca ); + if ((evd_cb == NULL) || (evd_cb->pfn_async_cq_err_cb == NULL)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: no ERROR cb on %p found \n", p_ca); + return; + } + + /* maps to dapl_evd_cq_async_error_callback(), context is EVD */ + evd_cb->pfn_async_cq_err_cb( (ib_hca_handle_t)p_ca, + (ib_error_record_t*)&p_err_rec->code, evd_ptr); + +} + +void +dapli_ibal_ca_async_error_callback( + IN ib_async_event_rec_t* p_err_rec ) +{ + dapl_ibal_ca_t *p_ca = (dapl_ibal_ca_t*)((void *)p_err_rec->context); + dapl_ibal_evd_cb_t *evd_cb; + DAPL_IA *ia_ptr; + + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCaAEC: CA error %d for context %p\n", + p_err_rec->code, p_err_rec->context); + + if (p_ca == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCaAEC: invalid p_ca(%p)in async event rec\n",p_ca); + return; + } + + ia_ptr = (DAPL_IA*)p_ca->ia_ptr; + if (ia_ptr == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCaAEC: invalid ia_ptr in %p ca \n", p_ca ); + return; + } + + if (ia_ptr->async_error_evd == NULL) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCqAEC: can't find async_error_evd on %s HCA\n", + (ia_ptr->header.provider)->device_name ); + return; + } + + /* find QP error callback using p_ca for context */ + evd_cb = dapli_find_evd_cb_by_context (ia_ptr->async_error_evd, p_ca); + if ((evd_cb == NULL) || (evd_cb->pfn_async_err_cb == NULL)) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"--> DiCaAEC: no ERROR cb on %p found \n", p_ca); + return; + } + + /* maps to dapl_evd_un_async_error_callback(), context is async_evd */ + evd_cb->pfn_async_err_cb( (ib_hca_handle_t)p_ca, + (ib_error_record_t*)&p_err_rec->code, + ia_ptr->async_error_evd); + +} + + +static dapl_ibal_port_t * +dapli_alloc_port( + IN dapl_ibal_ca_t *ca, + IN ib_port_attr_t *ib_port ) +{ + dapl_ibal_port_t *p_port = NULL; + if (ca->h_ca == NULL ) + { + return NULL; + } + /* + * Allocate the port structure memory. This will also deal with the + * copying ib_port_attr_t including GID and P_Key tables + */ + p_port = dapl_os_alloc ( sizeof(dapl_ibal_port_t ) ); + + if ( p_port ) + { + dapl_os_memzero (p_port, sizeof(dapl_ibal_port_t ) ); + + /* + * We're good to go after initializing reference. + */ + INIT_REFERENCE( &p_port->refs, 1, p_port, NULL /* pfn_destructor */ ); + + p_port->p_attr = ib_port; + } + return ( p_port ); +} + +static void +dapli_add_active_port( + IN dapl_ibal_ca_t *ca) +{ + dapl_ibal_port_t *p_port; + ib_port_attr_t *p_port_attr; + ib_ca_attr_t *p_ca_attr; + int i; + + p_ca_attr = ca->p_ca_attr; + + dapl_os_assert (p_ca_attr != NULL); + + for (i = 0; i < p_ca_attr->num_ports; i++) + { + p_port_attr = &p_ca_attr->p_port_attr[i]; + + { + p_port = dapli_alloc_port( ca, p_port_attr ); + if ( p_port ) + { + TAKE_REFERENCE (&ca->refs); + + /* + * Record / update attribues + */ + p_port->p_attr = p_port_attr; + + /* + * Remember the parant CA keeping the reference we took above + */ + p_port->ca = ca; + + /* + * We're good to go - Add the new port to the list on the CA + */ + LOCK_INSERT_TAIL( ca->port_lock, ca->port_head, p_port->next ); + } + else + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: Could not allocate dapl_ibal_port_t\n", + "DiAAP"); + } + } + dapl_dbg_log( DAPL_DBG_TYPE_UTIL, "--> DiAAP: Port %d logical link is %s lid = %#x\n", + p_port_attr->port_num, + ( p_port_attr->link_state != IB_LINK_ACTIVE ? "DOWN": "UP" ), + CL_HTON16(p_port_attr->lid) ); + + } /* for loop */ + + return; +} + +static dapl_ibal_ca_t * +dapli_alloc_ca( + IN ib_al_handle_t h_al, + IN ib_net64_t ca_guid) +{ + dapl_ibal_ca_t *p_ca; + ib_api_status_t status; + uint32_t attr_size; + + /* + * Allocate the CA structure + */ + p_ca = dapl_os_alloc( sizeof(dapl_ibal_ca_t) ); + dapl_os_memzero (p_ca, sizeof(dapl_ibal_ca_t) ); + + if ( p_ca ) + { + /* + * Now we pass dapli_ibal_ca_async_error_callback as the + * async error callback + */ + status = ib_open_ca( h_al, + ca_guid, + dapli_ibal_ca_async_error_callback, + p_ca, + &p_ca->h_ca ); + if ( status != IB_SUCCESS ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DiAC: ib_open_ca returned %d\n", status); + dapl_os_free (p_ca, sizeof (dapl_ibal_ca_t)); + return (NULL); + } + + /* + * Get port list lock and list head initialized + */ + if (( dapli_init_ca_port_list( p_ca ) != CL_SUCCESS ) || + ( dapli_init_ca_evd_cb_list( p_ca ) != CL_SUCCESS )) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: dapli_init_ca_port_list returned failed\n", + "DiAC"); + goto close_and_free_ca; + } + + attr_size = 0; + status = ib_query_ca (p_ca->h_ca, NULL, &attr_size); + if (status != IB_INSUFFICIENT_MEMORY) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DiAC: ib_query_ca returned failed status = %d\n", + status); + goto close_and_free_ca; + } + + p_ca->p_ca_attr = dapl_os_alloc ((int)attr_size); + if (p_ca->p_ca_attr == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: dapli_alloc_ca failed to alloc memory\n", + "DiAC"); + goto close_and_free_ca; + } + + status = ib_query_ca ( + p_ca->h_ca, + p_ca->p_ca_attr, + &attr_size); + if (status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> ib_query_ca returned failed status = %d\n", status); + dapl_os_free (p_ca->p_ca_attr, (int)attr_size); + goto close_and_free_ca; + } + + p_ca->ca_attr_size = attr_size; + + INIT_REFERENCE( &p_ca->refs, 1, p_ca, NULL /* pfn_destructor */ ); + + dapli_add_active_port (p_ca); + + /* + * We're good to go + */ + return ( p_ca ); + } + else + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: Error allocating CA structure\n","DiAC"); + return ( NULL ); + } + +close_and_free_ca: + /* + * Close the CA. + */ + (void) ib_close_ca ( p_ca->h_ca, NULL /* callback */); + dapl_os_free (p_ca, sizeof (dapl_ibal_ca_t)); + + /* + * If we get here, there was an initialization failure + */ + return ( NULL ); +} + + +static dapl_ibal_ca_t * +dapli_add_ca( + IN ib_al_handle_t h_al, + IN ib_net64_t ca_guid) +{ + dapl_ibal_ca_t *p_ca; + + /* + * Allocate a CA structure + */ + p_ca = dapli_alloc_ca( h_al, ca_guid ); + if ( p_ca ) + { + /* + * Add the new CA to the list + */ + LOCK_INSERT_TAIL( dapl_ibal_root.ca_lock, + dapl_ibal_root.ca_head, p_ca->next ); + } + else + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: Could not allocate dapl_ibal_ca_t \n","DiAA"); + } + + return ( p_ca ); +} + + +int32_t dapls_ib_init (void) +{ + ib_api_status_t status; + + /* + * Initialize the root structure + */ + if (dapli_init_root_ca_list (&dapl_ibal_root) == CL_SUCCESS) + { + /* + * Register with the access layer + */ + status = ib_open_al (&dapl_ibal_root.h_al); + + if (status == IB_SUCCESS) + { + intn_t guid_count; + + status = ib_get_ca_guids (dapl_ibal_root.h_al, NULL, &(size_t)guid_count); + if (status != IB_INSUFFICIENT_MEMORY) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsII: ib_get_ca_guids failed = %d\n", status); + return -1; + } + + if (guid_count == 0) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: found NO HCA in the system\n", + "DsII"); + return -1; + } + + if (guid_count > DAPL_IBAL_MAX_CA) + { + guid_count = DAPL_IBAL_MAX_CA; + } + + gp_ibal_ca_guid_tbl = ( ib_net64_t*)dapl_os_alloc ((int)(guid_count * + sizeof (ib_net64_t)) ); + + if (gp_ibal_ca_guid_tbl == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: can not alloc gp_ibal_ca_guid_tbl\n", + "DsII"); + + return -1; + } + + status = ib_get_ca_guids ( dapl_ibal_root.h_al, + gp_ibal_ca_guid_tbl, + &(size_t)guid_count ); + + + if ( status != IB_SUCCESS ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsII: ib_get_ca_guids failed = %s\n", + ib_get_err_str(status) ); + return -1; + } + + dapl_dbg_log ( DAPL_DBG_TYPE_UTIL, + "--> DsII: Success open AL & found %d HCA avail\n", + guid_count); + return 0; + } + else + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsII: ib_open_al returned %s\n", ib_get_err_str(status)); + /* + * Undo CA list + */ + dapli_destroy_root_ca_list (&dapl_ibal_root); + } + } + return -1; +} + + +int32_t dapls_ib_release (void) +{ + dapl_ibal_root.shutdown = TRUE; + + dapli_shutdown_ca_access(); + + /* + * If shutdown not complete, wait for it + */ + if (dapl_ibal_root.shutdown) + { + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIR: timeout waiting for completion\n"); + } + + if ( dapl_ibal_root.h_al != NULL ) + { + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIR: ib_close_al called\n"); + ib_close_al (dapl_ibal_root.h_al); + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIR: ib_close_al return\n"); + dapl_ibal_root.h_al = NULL; + } + + return 0; +} + + +/* + * dapls_ib_enum_hcas + * + * Enumerate all HCAs on the system + * + * Input: + * none + * + * Output: + * hca_names Array of hca names + * total_hca_count + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_enum_hcas ( + OUT DAPL_HCA_NAME **hca_names, + OUT DAT_COUNT *total_hca_count, + IN const char *vendor ) +{ + intn_t guid_count; + ib_api_status_t ib_status; + UNREFERENCED_PARAMETER(vendor); + + ib_status = ib_get_ca_guids (dapl_ibal_root.h_al, NULL, &(size_t)guid_count); + if (ib_status != IB_INSUFFICIENT_MEMORY) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIEH: ib_get_ca_guids failed status = %d\n", ib_status); + return dapl_ib_status_convert (ib_status); + } + + if (guid_count == 0) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: ib_get_ca_guids no HCA in the system\n", + "DsIEH"); + return (DAT_PROVIDER_NOT_FOUND); + } + + if (guid_count > DAPL_IBAL_MAX_CA) + { + guid_count = DAPL_IBAL_MAX_CA; + } + + gp_ibal_ca_guid_tbl = (ib_net64_t *)dapl_os_alloc ((int)(guid_count * sizeof (ib_net64_t)) ); + + if (gp_ibal_ca_guid_tbl == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: can not alloc resources @line%d\n", + "DsIEH", __LINE__); + return (DAT_INSUFFICIENT_RESOURCES); + } + + ib_status = ib_get_ca_guids ( + dapl_ibal_root.h_al, gp_ibal_ca_guid_tbl, &(size_t)guid_count); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIEH: ib_get_ca_guids failed status = %s\n", + ib_get_err_str(ib_status)); + return dapl_ib_status_convert (ib_status); + } + + *hca_names = (DAPL_HCA_NAME*)dapl_os_alloc ((int)(guid_count * sizeof (DAPL_HCA_NAME)) ); + + if (*hca_names == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: can not alloc resources @line%d\n", + "DsIEH", __LINE__); + return (DAT_INSUFFICIENT_RESOURCES); + } + + dapl_os_memcpy (*hca_names, + dapl_ibal_hca_name_array, + (int)(guid_count * sizeof (DAPL_HCA_NAME)) ); + + *total_hca_count = (DAT_COUNT)guid_count; + + { + int i; + + for (i = 0; i < guid_count; i++) + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIEH: %d) hca_names = %s\n", + i, dapl_ibal_hca_name_array[i]); + } + + return (DAT_SUCCESS); +} + + + +IB_HCA_NAME +dapl_ib_convert_name( + IN char *name) +{ + int i; + + if (gp_ibal_ca_guid_tbl == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DICN: found no HCA with name %s\n", name); + return 0; + } + + for (i = 0; i < DAPL_IBAL_MAX_CA; i++) + { + if (strcmp (name, dapl_ibal_hca_name_array[i]) == 0) + { + break; + } + } + + if (i >= DAPL_IBAL_MAX_CA) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DICN: can't find any HCA with name %s\n", name); + return 0; + } + + return (gp_ibal_ca_guid_tbl[i]); +} + + +/* + * dapls_ib_open_hca + * + * Open HCA + * + * Input: + * *hca_name pointer to provider device name + * *ib_hca_handle_p pointer to provide HCA handle + * + * Output: + * none + * + * Return: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN dapls_ib_open_hca ( + IN IB_HCA_NAME hca_name, + OUT ib_hca_handle_t *p_ib_hca_handle) +{ + dapl_ibal_ca_t *p_ca; + + if (gp_ibal_ca_guid_tbl == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIOH: found no HCA with ca_guid" F64x "\n", hca_name); + return (DAT_PROVIDER_NOT_FOUND); + } + + p_ca = dapli_add_ca (dapl_ibal_root.h_al, hca_name); + + if (p_ca == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIOH: can not create ca with ca_guid" F64x "\n", hca_name); + return (DAT_INSUFFICIENT_RESOURCES); + } + + *p_ib_hca_handle = (ib_hca_handle_t) p_ca; + + return (DAT_SUCCESS); +} + + +/* + * dapls_ib_close_hca + * + * Open HCA + * + * Input: + * ib_hca_handle provide HCA handle + * + * Output: + * none + * + * Return: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN dapls_ib_close_hca ( + IN ib_hca_handle_t ib_hca_handle) +{ + dapl_ibal_ca_t *p_ca; + + p_ca = (dapl_ibal_ca_t *) ib_hca_handle; + + /* + * Remove it from the list + */ + TAKE_LOCK (dapl_ibal_root.ca_lock); + { + cl_qlist_remove_item (&dapl_ibal_root.ca_head, &p_ca->next); + } + RELEASE_LOCK (dapl_ibal_root.ca_lock); + + dapli_shutdown_port_access (p_ca); + + /* + * Remove the constructor reference + */ + REMOVE_REFERENCE (&p_ca->refs); + + cl_spinlock_destroy (&p_ca->port_lock); + cl_spinlock_destroy (&p_ca->evd_cb_lock); + + if (p_ca->p_ca_attr) + dapl_os_free (p_ca->p_ca_attr, sizeof (ib_ca_attr_t)); + + (void) ib_close_ca (p_ca->h_ca, NULL /* close_callback */); + + dapl_os_free (p_ca, sizeof (dapl_ibal_ca_t)); + + return (DAT_SUCCESS); +} + +/* + * dapli_ibal_cq_competion_callback + * + * Completion callback for a CQ + * + * Input: + * cq_context User context + * + * Output: + * none + * + * Returns: + */ +static void +dapli_ib_cq_completion_cb ( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + DAPL_EVD *evd_ptr; + dapl_ibal_ca_t *p_ca; + + evd_ptr = (DAPL_EVD *) cq_context; + + dapl_dbg_log (DAPL_DBG_TYPE_CALLBACK, + "--> DiICCC: cq_completion_cb evd %p CQ %p\n", + evd_ptr, evd_ptr->ib_cq_handle); + + dapl_os_assert (evd_ptr != DAT_HANDLE_NULL); + + p_ca = (dapl_ibal_ca_t *) evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle; + + dapl_os_assert( h_cq == evd_ptr->ib_cq_handle ); + + dapl_evd_dto_callback ( + (ib_hca_handle_t) p_ca, + h_cq, + cq_context); + return; +} + + +/* + * dapl_ib_cq_late_alloc + * + * Alloc a CQ + * + * Input: + * ia_handle IA handle + * evd_ptr pointer to EVD struct + * cqlen minimum QLen + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_cq_late_alloc ( + IN ib_pd_handle_t pd_handle, + IN DAPL_EVD *evd_ptr) +{ + ib_cq_create_t cq_create; + ib_api_status_t ib_status; + DAT_RETURN dat_status; + dapl_ibal_ca_t *ibal_ca = (dapl_ibal_ca_t *)evd_ptr->header.owner_ia->hca_ptr->ib_hca_handle; + + dat_status = DAT_SUCCESS; + cq_create.size = evd_ptr->qlen; + + + if (evd_ptr->cq_wait_obj_handle) + { + cq_create.h_wait_obj = evd_ptr->cq_wait_obj_handle; + cq_create.pfn_comp_cb = NULL; + } + else + { + cq_create.h_wait_obj = NULL; + cq_create.pfn_comp_cb = dapli_ib_cq_completion_cb; + } + + ib_status = ib_create_cq ( + (ib_ca_handle_t)ibal_ca->h_ca, + &cq_create, + evd_ptr /* context */, + dapli_ibal_cq_async_error_callback, + &evd_ptr->ib_cq_handle); + + dat_status = dapl_ib_status_convert (ib_status); + + if ( dat_status != DAT_SUCCESS ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsICLA: failed to create CQ for EVD %p\n", evd_ptr); + goto bail; + } + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsCQ_alloc: pd=%p cq=%p Csz=%d Qln=%d \n", + pd_handle, evd_ptr->ib_cq_handle, + cq_create.size, evd_ptr->qlen ); + + + if ( cq_create.size > (uint32_t)evd_ptr->qlen ) + { + DAT_COUNT pending_cnt, free_cnt; + DAT_EVENT *event_ptr; + DAT_COUNT i; + + dapl_os_lock ( &evd_ptr->header.lock ); + + pending_cnt = dapls_rbuf_count ( &evd_ptr->pending_event_queue ); + free_cnt = dapls_rbuf_count ( &evd_ptr->free_event_queue ); + + if ( pending_cnt == 0 ) + { + dat_status = dapls_rbuf_realloc ( &evd_ptr->pending_event_queue, + cq_create.size ); + + if ( dat_status != DAT_SUCCESS ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsICLA: failed to resize EVD pending_event_queue" + "from %d to %d\n", + evd_ptr->qlen, cq_create.size ); + dat_status = DAT_SUCCESS; + } + + } + + + for (i = 0; i < free_cnt; i++) + { + event_ptr = (DAT_EVENT *) + dapls_rbuf_remove ( &evd_ptr->free_event_queue ); + } + + dat_status = dapls_rbuf_realloc ( &evd_ptr->free_event_queue, + cq_create.size ); + + if ( dat_status != DAT_SUCCESS ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsICLA: failed to resize EVD free_event_queue" + "from %d to %d\n", + evd_ptr->qlen, cq_create.size ); + + dapl_os_unlock ( &evd_ptr->header.lock ); + + dapls_ib_cq_free ( evd_ptr->header.owner_ia, evd_ptr); + + goto bail; + } + + if (evd_ptr->events) + { + evd_ptr->events = (void *) + dapl_os_realloc ( + evd_ptr->events, + cq_create.size * sizeof (DAT_EVENT)); + } + else + { + evd_ptr->events = (void *) + dapl_os_alloc ( + cq_create.size * sizeof (DAT_EVENT)); + } + + if ( evd_ptr->events == NULL ) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsICLA: failed to resize EVD events buffers" + "from %d to %d\n", + evd_ptr->qlen, cq_create.size ); + dat_status = DAT_INSUFFICIENT_RESOURCES; + + dapl_os_unlock ( &evd_ptr->header.lock ); + + dapls_ib_cq_free ( evd_ptr->header.owner_ia, evd_ptr); + + goto bail; + } + + event_ptr = evd_ptr->events; + + /* add events to free event queue */ + for (i = 0; (uint32_t)i < cq_create.size; i++) + { + dapls_rbuf_add (&evd_ptr->free_event_queue, (void *)event_ptr); + event_ptr++; + } + + dapl_dbg_log (DAPL_DBG_TYPE_EVD, + "--> DsICLA: resize EVD events buffers from %d to %d\n", + evd_ptr->qlen, cq_create.size); + + evd_ptr->qlen = cq_create.size; + + dapl_os_unlock ( &evd_ptr->header.lock ); + + } + +bail: + return dat_status; +} + +/* + * dapl_ib_cq_free + * + * Dealloc a CQ + * + * Input: + * ia_handle IA handle + * evd_ptr pointer to EVD struct + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_cq_free ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_ptr) +{ + ib_api_status_t ib_status; + + if ( ia_ptr == NULL || ia_ptr->header.magic != DAPL_MAGIC_IA ) + { + return DAT_INVALID_HANDLE; + } + + ib_status = ib_destroy_cq (evd_ptr->ib_cq_handle, + /* destroy_callback */ NULL); + + return dapl_ib_status_convert (ib_status); +} + +/* + * dapls_cq_resize + * + * Resize CQ completion notifications + * + * Input: + * ia_handle IA handle + * evd_ptr pointer to EVD struct + * cqlen minimum QLen + * + * Output: + * cqlen may round up for optimal memory boundaries + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INSUFFICIENT_RESOURCES + * + */ + +DAT_RETURN +dapls_ib_cq_resize ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_ptr, + IN DAT_COUNT *cqlen ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + if ( ia_ptr == NULL || ia_ptr->header.magic != DAPL_MAGIC_IA ) + { + return DAT_INVALID_HANDLE; + } + /* + * Resize CQ only if CQ handle is valid, may be delayed waiting + * for PZ allocation with IBAL + */ +#if defined(_VENDOR_IBAL_) + if ( evd_ptr->ib_cq_handle != IB_INVALID_HANDLE ) +#endif /* _VENDOR_IBAL_ */ + { + ib_status = ib_modify_cq ( evd_ptr->ib_cq_handle, + (uint32_t *)cqlen ); + dapl_dbg_log (DAPL_DBG_TYPE_EVD, + "ib_modify_cq ( new cqlen = %d, status=%d ) \n", + *cqlen, ib_status ); + } + return dapl_ib_status_convert (ib_status); +} + + +/* + * dapl_set_cq_notify + * + * Set up CQ completion notifications + * + * Input: + * ia_handle IA handle + * evd_ptr pointer to EVD struct + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_set_cq_notify ( + IN DAPL_IA *ia_ptr, + IN DAPL_EVD *evd_ptr) +{ + ib_api_status_t ib_status; + if ( ia_ptr == NULL || ia_ptr->header.magic != DAPL_MAGIC_IA ) + { + return DAT_INVALID_HANDLE; + } + ib_status = ib_rearm_cq ( + evd_ptr->ib_cq_handle, + FALSE /* next event but not solicited event */ ); + + return dapl_ib_status_convert (ib_status); +} + + +/* + * dapls_ib_cqd_create + * + * Set up CQ notification event thread + * + * Input: + * ia_handle HCA handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_cqd_create ( + IN DAPL_HCA *hca_ptr) +{ + /* + * We do not have CQD concept + */ + hca_ptr->ib_cqd_handle = IB_INVALID_HANDLE; + + return DAT_SUCCESS; +} + + +/* + * dapl_cqd_destroy + * + * Destroy CQ notification event thread + * + * Input: + * ia_handle IA handle + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_HANDLE + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_cqd_destroy ( + IN DAPL_HCA *hca_ptr) +{ + hca_ptr->ib_cqd_handle = IB_INVALID_HANDLE; + return (DAT_SUCCESS); +} + + +/* + * dapl_ib_pd_alloc + * + * Alloc a PD + * + * Input: + * ia_handle IA handle + * PZ_ptr pointer to PZEVD struct + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_pd_alloc ( + IN DAPL_IA *ia, + IN DAPL_PZ *pz) +{ + ib_api_status_t ib_status; + dapl_ibal_ca_t *p_ca; + + p_ca = (dapl_ibal_ca_t *) ia->hca_ptr->ib_hca_handle; + ib_status = ib_alloc_pd ( + p_ca->h_ca, + IB_PDT_NORMAL, + ia, + &pz->pd_handle); + + return dapl_ib_status_convert (ib_status); +} + + +/* + * dapl_ib_pd_free + * + * Free a PD + * + * Input: + * PZ_ptr pointer to PZ struct + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_pd_free ( + IN DAPL_PZ *pz) +{ + ib_api_status_t ib_status; + + ib_status = ib_dealloc_pd (pz->pd_handle, /* destroy_callback */ NULL); + + pz->pd_handle = IB_INVALID_HANDLE; + + return dapl_ib_status_convert (ib_status); +} + + +/* + * dapl_ib_mr_register + * + * Register a virtual memory region + * + * Input: + * ia_handle IA handle + * lmr pointer to dapl_lmr struct + * virt_addr virtual address of beginning of mem region + * length length of memory region + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_mr_register ( + IN DAPL_IA *ia, + IN DAPL_LMR *lmr, + IN DAT_PVOID virt_addr, + IN DAT_VLEN length, + IN DAT_MEM_PRIV_FLAGS privileges) +{ + ib_api_status_t ib_status; + ib_mr_handle_t mr_handle; + ib_mr_create_t mr_create; + uint32_t l_key, r_key; + if ( ia == NULL || ia->header.magic != DAPL_MAGIC_IA ) + { + return DAT_INVALID_HANDLE; + } + mr_create.vaddr = (void *) virt_addr; + mr_create.length = (size_t)length; + mr_create.access_ctrl = dapl_lmr_convert_privileges (privileges); + mr_create.access_ctrl |= IB_AC_MW_BIND; + + if (lmr->param.mem_type == DAT_MEM_TYPE_SHARED_VIRTUAL) + { + ib_status = ib_reg_shmid ( + ((DAPL_PZ *)lmr->param.pz_handle)->pd_handle, + (const uint8_t*)&lmr->ib_shmid, + &mr_create, + (uint64_t *)&virt_addr, + &l_key, + &r_key, + &mr_handle); + } + else + { + ib_status = ib_reg_mem ( + ((DAPL_PZ *)lmr->param.pz_handle)->pd_handle, + &mr_create, + &l_key, + &r_key, + &mr_handle); + } + + if (ib_status != IB_SUCCESS) + { + return (dapl_ib_status_convert (ib_status)); + } + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIMR: lmr (%p) lkey = 0x%x r_key= %#x mr_handle %p vaddr 0x%LX len 0x%LX\n", + lmr, l_key, r_key, mr_handle, virt_addr, length); + + lmr->param.lmr_context = l_key; + lmr->param.rmr_context = r_key; + lmr->param.registered_size = length; + lmr->param.registered_address = (DAT_VADDR)virt_addr; + lmr->mr_handle = mr_handle; + + return (DAT_SUCCESS); +} + + +/* + * dapl_ib_mr_deregister + * + * Free a memory region + * + * Input: + * lmr pointer to dapl_lmr struct + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_mr_deregister ( + IN DAPL_LMR *lmr) +{ + ib_api_status_t ib_status; + + ib_status = ib_dereg_mr (lmr->mr_handle); + + if (ib_status != IB_SUCCESS) + { + return dapl_ib_status_convert (ib_status); + } + + lmr->param.lmr_context = 0; + lmr->mr_handle = IB_INVALID_HANDLE; + + return (DAT_SUCCESS); +} + + +/* + * dapl_ib_mr_register_shared + * + * Register a virtual memory region + * + * Input: + * ia_handle IA handle + * lmr pointer to dapl_lmr struct + * virt_addr virtual address of beginning of mem region + * length length of memory region + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_mr_register_shared ( + IN DAPL_IA *ia, + IN DAPL_LMR *lmr, + IN DAT_MEM_PRIV_FLAGS privileges) +{ + DAT_VADDR virt_addr; + ib_mr_handle_t mr_handle; + ib_api_status_t ib_status; + ib_mr_handle_t new_mr_handle; + ib_access_t access_ctrl; + uint32_t l_key, r_key; + ib_mr_create_t mr_create; + if ( ia == NULL || ia->header.magic != DAPL_MAGIC_IA ) + { + return DAT_INVALID_HANDLE; + } + virt_addr = dapl_mr_get_address (lmr->param.region_desc, + lmr->param.mem_type); + + access_ctrl = dapl_lmr_convert_privileges (privileges); + access_ctrl |= IB_AC_MW_BIND; + + mr_create.vaddr = (void *) virt_addr; + mr_create.access_ctrl = access_ctrl; + mr_handle = (ib_mr_handle_t) lmr->mr_handle; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIMRS: orig mr_handle %p vaddr %p\n", + mr_handle, virt_addr); + + if (lmr->param.mem_type == DAT_MEM_TYPE_SHARED_VIRTUAL) + { + ib_status = ib_reg_shmid ( + ((DAPL_PZ *)lmr->param.pz_handle)->pd_handle, + (const uint8_t*)&lmr->ib_shmid, + &mr_create, + &virt_addr, + &l_key, + &r_key, + &new_mr_handle); + } + else + { + + ib_status = ib_reg_shared ( + mr_handle, + ((DAPL_PZ *)lmr->param.pz_handle)->pd_handle, + access_ctrl, + /* in/out */(DAT_UINT64 *)&virt_addr, + &l_key, + &r_key, + &new_mr_handle); + } + + if (ib_status != IB_SUCCESS) + { + return dapl_ib_status_convert (ib_status); + } + /* + * FIXME - Vu + * What if virt_addr as an OUTPUT having the actual virtual address + * assigned to the register region + */ + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIMRS: lmr (%p) lkey = 0x%x new mr_handle %p vaddr %p\n", + lmr, l_key, new_mr_handle, virt_addr); + + lmr->param.lmr_context = l_key; + lmr->param.rmr_context = r_key; + lmr->param.registered_address = (DAT_VADDR) (uintptr_t) virt_addr; + lmr->mr_handle = new_mr_handle; + + return (DAT_SUCCESS); +} + + +/* + * dapls_ib_mw_alloc + * + * Bind a protection domain to a memory window + * + * Input: + * rmr Initialized rmr to hold binding handles + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_mw_alloc ( + IN DAPL_RMR *rmr) +{ + ib_api_status_t ib_status; + uint32_t r_key; + ib_mw_handle_t mw_handle; + + ib_status = ib_create_mw ( + ((DAPL_PZ *)rmr->param.pz_handle)->pd_handle, + &r_key, + &mw_handle); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIMA: create MW failed = %s\n", ib_get_err_str(ib_status)); + return dapl_ib_status_convert (ib_status); + } + + rmr->mw_handle = mw_handle; + rmr->param.rmr_context = (DAT_RMR_CONTEXT) r_key; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIMA: mw_handle %p r_key = 0x%x\n", + mw_handle, r_key); + + return (DAT_SUCCESS); +} + + +/* + * dapls_ib_mw_free + * + * Release bindings of a protection domain to a memory window + * + * Input: + * rmr Initialized rmr to hold binding handles + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_mw_free ( + IN DAPL_RMR *rmr) +{ + ib_api_status_t ib_status; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIMF: mw_handle %p\n", rmr->mw_handle); + + ib_status = ib_destroy_mw (rmr->mw_handle); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIMF: Free MW failed = %s\n", ib_get_err_str(ib_status)); + return dapl_ib_status_convert (ib_status); + } + + rmr->param.rmr_context = 0; + rmr->mw_handle = IB_INVALID_HANDLE; + + return (DAT_SUCCESS); +} + +/* + * dapls_ib_mw_bind + * + * Bind a protection domain to a memory window + * + * Input: + * rmr Initialized rmr to hold binding handles + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_mw_bind ( + IN DAPL_RMR *rmr, + IN DAPL_LMR *lmr, + IN DAPL_EP *ep, + IN DAPL_COOKIE *cookie, + IN DAT_VADDR virtual_address, + IN DAT_VLEN length, + IN DAT_MEM_PRIV_FLAGS mem_priv, + IN ib_bool_t is_signaled) +{ + ib_api_status_t ib_status; + ib_bind_wr_t bind_wr_prop; + uint32_t new_rkey; + + bind_wr_prop.local_ds.vaddr = virtual_address; + bind_wr_prop.local_ds.length = (uint32_t)length; + bind_wr_prop.local_ds.lkey = lmr->param.lmr_context; + bind_wr_prop.current_rkey = rmr->param.rmr_context; + bind_wr_prop.access_ctrl = dapl_rmr_convert_privileges (mem_priv); + bind_wr_prop.send_opt = (is_signaled == TRUE) ? + IB_SEND_OPT_SIGNALED : 0; + bind_wr_prop.wr_id = (uint64_t) ((uintptr_t) cookie); + bind_wr_prop.h_mr = lmr->mr_handle; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIMB: mr_handle %p, mw_handle %p vaddr %#I64x length %#I64x\n", + lmr->mr_handle, rmr->mw_handle, virtual_address, length); + + ib_status = ib_bind_mw ( + rmr->mw_handle, + ep->qp_handle, + &bind_wr_prop, + &new_rkey); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIMB: Bind MW failed = %s\n", + ib_get_err_str(ib_status)); + return (dapl_ib_status_convert (ib_status)); + } + + rmr->param.rmr_context = (DAT_RMR_CONTEXT) new_rkey; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIMB: new_rkey = 0x%x\n",new_rkey); + return (DAT_SUCCESS); +} + +/* + * dapls_ib_mw_unbind + * + * Unbind a memory window + * + * Input: + * rmr Initialized rmr to hold binding handles + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * + */ +DAT_RETURN +dapls_ib_mw_unbind ( + IN DAPL_RMR *rmr, + IN DAPL_EP *ep, + IN DAPL_COOKIE *cookie, + IN ib_bool_t is_signaled) +{ + ib_api_status_t ib_status; + ib_bind_wr_t bind_wr_prop; + uint32_t new_rkey; + + bind_wr_prop.local_ds.vaddr = 0; + bind_wr_prop.local_ds.length = 0; + bind_wr_prop.local_ds.lkey = 0; + bind_wr_prop.access_ctrl = 0; + bind_wr_prop.send_opt = (is_signaled == TRUE) ? + IB_SEND_OPT_SIGNALED : 0; + bind_wr_prop.wr_id = (uint64_t) ((uintptr_t) cookie); + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIMU: mw_handle = %p\n", rmr->mw_handle); + + ib_status = ib_bind_mw ( + rmr->mw_handle, + ep->qp_handle, + &bind_wr_prop, + &new_rkey); + + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIMU: Unbind MW failed = %s\n", + ib_get_err_str(ib_status)); + return (dapl_ib_status_convert (ib_status)); + } + + rmr->param.rmr_context = (DAT_RMR_CONTEXT) new_rkey; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> DsIMU: unbind new_rkey = 0x%x\n", new_rkey); + return (DAT_SUCCESS); +} + + +/* + * dapls_ib_setup_async_callback + * + * Set up an asynchronous callbacks of various kinds + * + * Input: + * ia_handle IA handle + * handler_type type of handler to set up + * callback_handle handle param for completion callbacks + * callback callback routine pointer + * context argument for callback routine + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + * + */ +DAT_RETURN +dapls_ib_setup_async_callback ( + IN DAPL_IA *ia_ptr, + IN DAPL_ASYNC_HANDLER_TYPE handler_type, + IN unsigned int *callback_handle, + IN ib_async_handler_t callback, + IN void *context ) +{ + dapl_ibal_ca_t *p_ca; + dapl_ibal_evd_cb_t *evd_cb; + UNREFERENCED_PARAMETER(callback_handle); + + p_ca = (dapl_ibal_ca_t *) ia_ptr->hca_ptr->ib_hca_handle; + + if (p_ca == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsISAC: can't find %s HCA\n", + (ia_ptr->header.provider)->device_name); + return (DAT_INVALID_HANDLE); + } + + if (handler_type != DAPL_ASYNC_CQ_COMPLETION) + { + evd_cb = dapli_find_evd_cb_by_context (context, p_ca); + + if (evd_cb == NULL) + { + /* + * No record for this evd. We allocate one + */ + evd_cb = dapl_os_alloc (sizeof (dapl_ibal_evd_cb_t)); + dapl_os_memzero (evd_cb, sizeof(dapl_ibal_evd_cb_t)); + + if (evd_cb == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: can't alloc res\n","DsISAC"); + return (DAT_INSUFFICIENT_RESOURCES); + } + + evd_cb->context = context; + + /* + * Add the new EVD CB to the list + */ + LOCK_INSERT_TAIL( p_ca->evd_cb_lock, + p_ca->evd_cb_head, + evd_cb->next ); + } + + switch (handler_type) + { + case DAPL_ASYNC_UNAFILIATED: + evd_cb->pfn_async_err_cb = callback; + break; + case DAPL_ASYNC_CQ_ERROR: + evd_cb->pfn_async_cq_err_cb = callback; + break; + case DAPL_ASYNC_QP_ERROR: + evd_cb->pfn_async_qp_err_cb = callback; + break; + default: + break; + } + + } + + return DAT_SUCCESS; +} + + +/* + * dapls_ib_query_hca + * + * Query the hca attribute + * + * Input: + * hca_handl hca handle + * ep_attr attribute of the ep + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ + +DAT_RETURN dapls_ib_query_hca ( + IN DAPL_HCA *hca_ptr, + OUT DAT_IA_ATTR *ia_attr, + OUT DAT_EP_ATTR *ep_attr, + OUT DAT_SOCK_ADDR6 *ip_addr) +{ + ib_ca_attr_t *p_hca_attr; + dapl_ibal_ca_t *p_ca; + ib_api_status_t ib_status; + ib_hca_port_t port_num; + GID gid; + DAT_SOCK_ADDR6 *p_sock_addr; + DAT_RETURN dat_status = DAT_SUCCESS; + port_num = hca_ptr->port_num; + + p_ca = (dapl_ibal_ca_t *) hca_ptr->ib_hca_handle; + + if (p_ca == NULL) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> %s: invalid handle %p", + "DsIQH", hca_ptr); + return (DAT_INVALID_HANDLE); + } + + ib_status = ib_query_ca ( + p_ca->h_ca, + p_ca->p_ca_attr, + &p_ca->ca_attr_size); + if (ib_status != IB_SUCCESS) + { + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, "--> DsIQH: ib_query_ca returned failed status = %s\n", + ib_get_err_str(ib_status)); + return (dapl_ib_status_convert (ib_status)); + } + + p_hca_attr = p_ca->p_ca_attr; + + if (ip_addr != NULL) + { + p_sock_addr = dapl_os_alloc(sizeof(DAT_SOCK_ADDR6)); + if ( !p_sock_addr ) + { + dat_status = DAT_INSUFFICIENT_RESOURCES; + dapl_dbg_log ( DAPL_DBG_TYPE_ERR, " Query Hca alloc Err: status %d\n", dat_status); + return dat_status; + } + dapl_os_memzero(p_sock_addr, sizeof(DAT_SOCK_ADDR6)); + + gid.gid_prefix = p_hca_attr->p_port_attr[port_num-1].p_gid_table->unicast.prefix; + gid.guid = p_hca_attr->p_port_attr[port_num-1].p_gid_table->unicast.interface_id; + + dat_status = dapls_ns_map_ipaddr(hca_ptr, gid, (DAT_IA_ADDRESS_PTR)p_sock_addr); + + if ( dat_status != DAT_SUCCESS ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, " SA Query for local IP failed= %d\n", dat_status ); + /* what to do next ? */ + } + else + { + dapl_dbg_log (DAPL_DBG_TYPE_CM, "SA query GID for IP: "); + dapl_dbg_log ( DAPL_DBG_TYPE_CM, "%0d:%d:%d:%d\n", + (uint8_t)((DAT_IA_ADDRESS_PTR )p_sock_addr)->sa_data[2]&0xff, + (uint8_t)((DAT_IA_ADDRESS_PTR )p_sock_addr)->sa_data[3]&0xff, + (uint8_t)((DAT_IA_ADDRESS_PTR )p_sock_addr)->sa_data[4]&0xff, + (uint8_t)((DAT_IA_ADDRESS_PTR )p_sock_addr)->sa_data[5]&0xff); + } + + hca_ptr->hca_address = *p_sock_addr; + + /* if structure address not from our hca_ptr */ + if ( ip_addr != &hca_ptr->hca_address ) + { + *ip_addr = *p_sock_addr; + } + dapl_os_free (p_sock_addr, sizeof(DAT_SOCK_ADDR6)); + } /* ip_addr != NULL */ + + if ( ia_attr != NULL ) + { + dapl_os_memzero(ia_attr->adapter_name, (int)sizeof(ia_attr->adapter_name )); + dapl_os_memcpy(ia_attr->adapter_name, + DAT_ADAPTER_NAME, + min ( (int)dapl_os_strlen(DAT_ADAPTER_NAME), (int)(DAT_NAME_MAX_LENGTH)-1 ) ); + + dapl_os_memzero (ia_attr->vendor_name, (int)sizeof(ia_attr->vendor_name)); + dapl_os_memcpy(ia_attr->vendor_name, + DAT_VENDOR_NAME, + min ((int)dapl_os_strlen(DAT_VENDOR_NAME), (int)(DAT_NAME_MAX_LENGTH)-1 )); + + /* FIXME : Vu + * this value should be revisited + * It can be set by DAT consumers + */ + ia_attr->ia_address_ptr = (DAT_PVOID)&hca_ptr->hca_address; + ia_attr->hardware_version_major = p_hca_attr->dev_id; + ia_attr->hardware_version_minor = p_hca_attr->revision; + ia_attr->max_eps = p_hca_attr->max_qps; + ia_attr->max_dto_per_ep = p_hca_attr->max_wrs; + ia_attr->max_rdma_read_per_ep = p_hca_attr->max_qp_resp_res; + ia_attr->max_evds = p_hca_attr->max_cqs; + ia_attr->max_evd_qlen = p_hca_attr->max_cqes; + ia_attr->max_iov_segments_per_dto = p_hca_attr->max_sges; + ia_attr->max_lmrs = p_hca_attr->init_regions; + ia_attr->max_lmr_block_size = p_hca_attr->init_region_size; + ia_attr->max_rmrs = p_hca_attr->init_windows; + ia_attr->max_lmr_virtual_address = p_hca_attr->max_addr_handles; + ia_attr->max_rmr_target_address = p_hca_attr->max_addr_handles; + ia_attr->max_pzs = p_hca_attr->max_pds; + /* + * DAT spec does not tie max_mtu_size with IB MTU + * + ia_attr->max_mtu_size = + dapl_ibal_mtu_table[p_hca_attr->p_port_attr->mtu]; + */ + ia_attr->max_mtu_size = + p_hca_attr->p_port_attr->max_msg_size; + ia_attr->max_rdma_size = + p_hca_attr->p_port_attr->max_msg_size; + ia_attr->num_transport_attr = 0; + ia_attr->transport_attr = NULL; + ia_attr->num_vendor_attr = 0; + ia_attr->vendor_attr = NULL; + + dapl_dbg_log(DAPL_DBG_TYPE_UTIL, + " --> DsIMU_qHCA: (ver=%x) ep %d ep_q %d evd %d evd_q %d\n", + ia_attr->hardware_version_major, + ia_attr->max_eps, ia_attr->max_dto_per_ep, + ia_attr->max_evds, ia_attr->max_evd_qlen ); + dapl_dbg_log(DAPL_DBG_TYPE_UTIL, + " --> DsIMU_qHCA: mtu %llu rdma %llu iov %d lmr %d rmr %d" + " rdma_io %d\n", + ia_attr->max_mtu_size, ia_attr->max_rdma_size, + ia_attr->max_iov_segments_per_dto, ia_attr->max_lmrs, + ia_attr->max_rmrs, ia_attr->max_rdma_read_per_ep ); + } + + if ( ep_attr != NULL ) + { + /* + * DAT spec does not tie max_mtu_size with IB MTU + * + ep_attr->max_mtu_size = + dapl_ibal_mtu_table[p_hca_attr->p_port_attr->mtu]; + */ + ep_attr->max_mtu_size = p_hca_attr->p_port_attr->max_msg_size; + ep_attr->max_rdma_size = p_hca_attr->p_port_attr->max_msg_size; + ep_attr->max_recv_dtos = p_hca_attr->max_wrs; + ep_attr->max_request_dtos = p_hca_attr->max_wrs; + ep_attr->max_recv_iov = p_hca_attr->max_sges; + ep_attr->max_request_iov = p_hca_attr->max_sges; + ep_attr->max_rdma_read_in = p_hca_attr->max_qp_resp_res; + ep_attr->max_rdma_read_out= p_hca_attr->max_qp_resp_res; + + dapl_dbg_log(DAPL_DBG_TYPE_UTIL, + " --> DsIMU_qHCA: msg %llu dto %d iov %d rdma i%d,o%d\n", + ep_attr->max_mtu_size, + ep_attr->max_recv_dtos, ep_attr->max_recv_iov, + ep_attr->max_rdma_read_in, ep_attr->max_rdma_read_out); + } + return DAT_SUCCESS; +} + + +DAT_RETURN +dapls_ib_completion_poll ( + IN ib_hca_handle_t hca_handle, + IN ib_cq_handle_t cq_handle, + IN ib_work_completion_t* cqe_ptr) +{ + ib_api_status_t ib_status; + ib_work_completion_t *cqe_filled; + + /* + * FIXME - Vu + * Now we only poll for one cqe. We can poll for more than + * one completions later for better. However, this requires + * to change the logic in dapl_evd_dto_callback function + * to process more than one completion. + */ + cqe_ptr->p_next = NULL; + cqe_filled = NULL; + if ( !hca_handle ) + { + return DAT_INVALID_HANDLE; + } + ib_status = ib_poll_cq (cq_handle, &cqe_ptr, &cqe_filled); + + if ( ib_status == IB_INVALID_CQ_HANDLE ) + ib_status = IB_NOT_FOUND; + + return dapl_ib_status_convert (ib_status); +} + + +DAT_RETURN +dapls_ib_completion_notify ( + IN ib_hca_handle_t hca_handle, + IN ib_cq_handle_t cq_handle, + IN ib_notification_type_t type) +{ + ib_api_status_t ib_status; + DAT_BOOLEAN solic_notify; + if ( !hca_handle ) + { + return DAT_INVALID_HANDLE; + } + solic_notify = (type == IB_NOTIFY_ON_SOLIC_COMP) ? DAT_TRUE : DAT_FALSE; + ib_status = ib_rearm_cq ( + cq_handle, + solic_notify ); + + return dapl_ib_status_convert (ib_status); +} + + +DAT_RETURN +dapls_ib_n_completions_notify ( + IN ib_hca_handle_t hca_handle, + IN ib_cq_handle_t cq_handle, + IN uint32_t n_cqes) +{ + ib_api_status_t ib_status; + UNREFERENCED_PARAMETER(hca_handle); + + ib_status = ib_rearm_n_cq ( + cq_handle, + n_cqes ); + + return dapl_ib_status_convert (ib_status); +} + + +DAT_RETURN +dapls_ib_peek_cq ( + IN ib_cq_handle_t cq_handle, + OUT uint32_t* p_n_cqes) +{ + ib_api_status_t ib_status; + + ib_status = ib_peek_cq ( + cq_handle, + p_n_cqes ); + + return dapl_ib_status_convert (ib_status); +} + + +DAT_RETURN +dapls_ib_wait_object_create ( + IN cl_waitobj_handle_t* p_cq_wait_obj_handle) +{ + cl_status_t cl_status; + + cl_status = cl_waitobj_create (FALSE /* auto_reset */, p_cq_wait_obj_handle); + + if (cl_status == CL_SUCCESS) + return DAT_SUCCESS; + + return DAT_INTERNAL_ERROR; +} + + +DAT_RETURN +dapls_ib_wait_object_destroy ( + IN cl_waitobj_handle_t cq_wait_obj_handle) +{ + cl_status_t cl_status; + + cl_status = cl_waitobj_destroy (cq_wait_obj_handle); + + if (cl_status == CL_SUCCESS) + return DAT_SUCCESS; + + return DAT_INTERNAL_ERROR; +} + + +DAT_RETURN +dapls_ib_wait_object_wakeup ( + IN cl_waitobj_handle_t cq_wait_obj_handle) +{ + cl_status_t cl_status; + + cl_status = cl_waitobj_signal (cq_wait_obj_handle); + + if (cl_status == CL_SUCCESS) + return DAT_SUCCESS; + + return DAT_INTERNAL_ERROR; +} + + +DAT_RETURN +dapls_ib_wait_object_wait ( + IN cl_waitobj_handle_t cq_wait_obj_handle, + IN uint32_t timeout) +{ + cl_status_t cl_status; + + cl_status = cl_waitobj_wait_on (cq_wait_obj_handle, timeout, TRUE ); + + switch (cl_status) + { + case CL_SUCCESS: + return DAT_SUCCESS; + case CL_TIMEOUT: + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> wait_object_wait: cl_timeout: %d\n", timeout); + return DAT_TIMEOUT_EXPIRED; + case CL_NOT_DONE: + return DAT_SUCCESS; + default: + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "--> wait_object_wait: cl_error: %d\n", cl_status); + return DAT_INTERNAL_ERROR; + } +} + + +/* + * dapls_ib_get_async_event + * + * Translate an asynchronous event type to the DAT event. + * Note that different providers have different sets of errors. + * + * Input: + * cause_ptr provider event cause + * + * Output: + * async_event DAT mapping of error + * + * Returns: + * DAT_SUCCESS + * DAT_NOT_IMPLEMENTED Caller is not interested this event + */ + +DAT_RETURN dapls_ib_get_async_event( + IN ib_async_event_rec_t *cause_ptr, + OUT DAT_EVENT_NUMBER *async_event) +{ + ib_async_event_t event_id; + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + event_id = cause_ptr->code; + + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "--> DsAE: event_id = %d%d\n", event_id); + + switch (event_id ) + { + case IB_AE_SQ_ERROR: + case IB_AE_SQ_DRAINED: + case IB_AE_RQ_ERROR: + { + *async_event = DAT_ASYNC_ERROR_EP_BROKEN; + break; + } + + + + /* INTERNAL errors */ + case IB_AE_QP_FATAL: + case IB_AE_CQ_ERROR: + case IB_AE_LOCAL_FATAL: + case IB_AE_WQ_REQ_ERROR: + case IB_AE_WQ_ACCESS_ERROR: + { + *async_event = DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR; + break; + } + + /* CATASTROPHIC errors */ + case IB_AE_FLOW_CTRL_ERROR: + case IB_AE_BUF_OVERRUN: + { + *async_event = DAT_ASYNC_ERROR_IA_CATASTROPHIC; + break; + } + default: + { + /* + * Errors we are not interested in reporting: + * IB_AE_QP_APM + * IB_AE_PKEY_TRAP + * IB_AE_QKEY_TRAP + * IB_AE_MKEY_TRAP + * IB_AE_PORT_TRAP + * IB_AE_QP_APM_ERROR + * IB_AE_PORT_ACTIVE + * ... + */ + dat_status = DAT_NOT_IMPLEMENTED; + } + + } + + return dat_status; +} + +/* + * dapls_ib_get_dto_status + * + * Return the DAT status of a DTO operation + * + * Input: + * cqe_ptr pointer to completion queue entry + * + * Output: + * none + * + * Returns: + * Value from ib_status_map table above + */ + +DAT_DTO_COMPLETION_STATUS +dapls_ib_get_dto_status( + IN ib_work_completion_t *cqe_ptr) +{ + ib_uint32_t ib_status; + + ib_status = DAPL_GET_CQE_STATUS (cqe_ptr); + + switch (ib_status) + { + case IB_COMP_ST_SUCCESS : + return DAT_DTO_SUCCESS; + case IB_COMP_ST_LOCAL_LEN_ERR: + return DAT_DTO_ERR_LOCAL_LENGTH; + case IB_COMP_ST_LOCAL_OP_ERR: + return DAT_DTO_ERR_LOCAL_EP; + case IB_COMP_ST_LOCAL_PROTECT_ERR: + return DAT_DTO_ERR_LOCAL_PROTECTION; + case IB_COMP_ST_WR_FLUSHED_ERR: + return DAT_DTO_ERR_FLUSHED; + case IB_COMP_ST_MW_BIND_ERR: + return DAT_RMR_OPERATION_FAILED; + case IB_COMP_ST_REM_ACC_ERR: + return DAT_DTO_ERR_REMOTE_ACCESS; + case IB_COMP_ST_REM_OP_ERR: + return DAT_DTO_ERR_REMOTE_RESPONDER; + case IB_COMP_ST_RNR_COUNTER: + return DAT_DTO_ERR_RECEIVER_NOT_READY; + case IB_COMP_ST_TRANSP_COUNTER: + return DAT_DTO_ERR_TRANSPORT; + case IB_COMP_ST_REM_REQ_ERR: + return DAT_DTO_ERR_REMOTE_RESPONDER; + case IB_COMP_ST_BAD_RESPONSE_ERR: + return DAT_DTO_ERR_BAD_RESPONSE; + case IB_COMP_ST_EE_STATE_ERR: + case IB_COMP_ST_EE_CTX_NO_ERR: + return DAT_DTO_ERR_TRANSPORT; + default: + return DAT_DTO_FAILURE; + } +} + + +/* + * Map all IBAPI DTO completion codes to the DAT equivelent. + * + * dapls_ib_get_dat_event + * + * Return a DAT connection event given a provider CM event. + * + * N.B. Some architectures combine async and CM events into a + * generic async event. In that case, dapls_ib_get_dat_event() + * and dapls_ib_get_async_event() should be entry points that + * call into a common routine. + * + * Input: + * ib_cm_event event provided to the dapl callback routine + * active switch indicating active or passive connection + * + * Output: + * none + * + * Returns: + * DAT_EVENT_NUMBER of translated provider value + */ + +DAT_EVENT_NUMBER +dapls_ib_get_dat_event ( + IN const ib_cm_events_t ib_cm_event, + IN DAT_BOOLEAN active) +{ + DAT_EVENT_NUMBER dat_event_num = 0; + UNREFERENCED_PARAMETER (active); + + switch ( ib_cm_event) + { + case IB_CME_CONNECTED: + dat_event_num = DAT_CONNECTION_EVENT_ESTABLISHED; + break; + case IB_CME_DISCONNECTED: + dat_event_num = DAT_CONNECTION_EVENT_DISCONNECTED; + break; + case IB_CME_DISCONNECTED_ON_LINK_DOWN: + dat_event_num = DAT_CONNECTION_EVENT_DISCONNECTED; + break; + case IB_CME_CONNECTION_REQUEST_PENDING: + dat_event_num = DAT_CONNECTION_REQUEST_EVENT; + break; + case IB_CME_CONNECTION_REQUEST_PENDING_PRIVATE_DATA: + dat_event_num = DAT_CONNECTION_REQUEST_EVENT; + break; + case IB_CME_DESTINATION_REJECT: + dat_event_num = DAT_CONNECTION_EVENT_NON_PEER_REJECTED; + break; + case IB_CME_DESTINATION_REJECT_PRIVATE_DATA: + dat_event_num = DAT_CONNECTION_EVENT_PEER_REJECTED; + break; + case IB_CME_DESTINATION_UNREACHABLE: + dat_event_num = DAT_CONNECTION_EVENT_UNREACHABLE; + break; + case IB_CME_TOO_MANY_CONNECTION_REQUESTS: + dat_event_num = DAT_CONNECTION_EVENT_NON_PEER_REJECTED; + break; + case IB_CME_LOCAL_FAILURE: + dat_event_num = DAT_CONNECTION_EVENT_BROKEN; + break; + case IB_CME_REPLY_RECEIVED: + case IB_CME_REPLY_RECEIVED_PRIVATE_DATA: + default: + break; + } + dapl_dbg_log (DAPL_DBG_TYPE_CM, + " dapls_ib_get_dat_event: event translation: (%s) " + "ib_event 0x%x dat_event 0x%x\n", + active ? "active" : "passive", + ib_cm_event, + dat_event_num); + + return dat_event_num; +} + + +/* + * dapls_ib_get_dat_event + * + * Return a DAT connection event given a provider CM event. + * + * N.B. Some architectures combine async and CM events into a + * generic async event. In that case, dapls_ib_get_cm_event() + * and dapls_ib_get_async_event() should be entry points that + * call into a common routine. + * + * WARNING: In this implementation, there are multiple CM + * events that map to a single DAT event. Be very careful + * with provider routines that depend on this reverse mapping, + * they may have to accomodate more CM events than they + * 'naturally' would. + * + * Input: + * dat_event_num DAT event we need an equivelent CM event for + * + * Output: + * none + * + * Returns: + * ib_cm_event of translated DAPL value + */ +ib_cm_events_t +dapls_ib_get_cm_event ( + IN DAT_EVENT_NUMBER dat_event_num) +{ + ib_cm_events_t ib_cm_event = 0; + + switch (dat_event_num) + { + case DAT_CONNECTION_EVENT_ESTABLISHED: + ib_cm_event = IB_CME_CONNECTED; + break; + case DAT_CONNECTION_EVENT_DISCONNECTED: + ib_cm_event = IB_CME_DISCONNECTED; + break; + case DAT_CONNECTION_REQUEST_EVENT: + ib_cm_event = IB_CME_CONNECTION_REQUEST_PENDING; + break; + case DAT_CONNECTION_EVENT_NON_PEER_REJECTED: + ib_cm_event = IB_CME_DESTINATION_REJECT; + break; + case DAT_CONNECTION_EVENT_PEER_REJECTED: + ib_cm_event = IB_CME_DESTINATION_REJECT_PRIVATE_DATA; + break; + case DAT_CONNECTION_EVENT_UNREACHABLE: + ib_cm_event = IB_CME_DESTINATION_UNREACHABLE; + break; + case DAT_CONNECTION_EVENT_BROKEN: + ib_cm_event = IB_CME_LOCAL_FAILURE; + break; + default: + break; + } + + return ib_cm_event; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ + diff --git a/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.h b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.h new file mode 100644 index 00000000..4a108a87 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/ibal/dapl_ibal_util.h @@ -0,0 +1,390 @@ + +/* + * Copyright (c) 2002, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under the terms of the "Common Public + * License" a copy of which is in the file LICENSE.txt in the root + * directory. The license is also available from the Open Source + * Initiative, see http://www.opensource.org/licenses/cpl.php. + * + */ + +/********************************************************************** + * + * MODULE: dapl_ibal_util.h + * + * PURPOSE: Utility defs & routines for access to Intel IBAL APIs + * + * $Id$ + * + **********************************************************************/ + +#ifndef _DAPL_IBAL_UTIL_H_ +#define _DAPL_IBAL_UTIL_H_ + +#include +#include +#include +#include + +STATIC _INLINE_ DAT_RETURN +dapl_ib_status_convert ( + IN int32_t ib_status); + +/* + * Typedefs to map IBAL types to more generic 'ib' types + */ +typedef ib_net64_t IB_HCA_NAME; +typedef ib_listen_handle_t ib_cm_srvc_handle_t; +typedef ib_ca_handle_t ib_hca_handle_t; +typedef DAT_PVOID ib_cqd_handle_t; +typedef ib_async_event_rec_t ib_error_record_t; +typedef ib_wr_type_t ib_send_op_type_t; +typedef ib_wc_t ib_work_completion_t; +typedef uint32_t ib_hca_port_t; +typedef uint32_t ib_uint32_t; +typedef uint32_t ib_comp_handle_t; +typedef ib_local_ds_t ib_data_segment_t; + +typedef unsigned __int3264 cl_dev_handle_t; + +typedef void (*ib_async_handler_t)( + IN ib_hca_handle_t ib_hca_handle, + IN ib_error_record_t *err_code, + IN void *context); + +typedef ib_net64_t ib_guid_t; +typedef ib_net16_t ib_lid_t; +typedef boolean_t ib_bool_t; + +typedef struct _GID +{ + uint64_t gid_prefix; + uint64_t guid; +} GID; + +typedef enum +{ + IB_CME_CONNECTED, + IB_CME_DISCONNECTED, + IB_CME_DISCONNECTED_ON_LINK_DOWN, + IB_CME_CONNECTION_REQUEST_PENDING, + IB_CME_CONNECTION_REQUEST_PENDING_PRIVATE_DATA, + IB_CME_DESTINATION_REJECT, + IB_CME_DESTINATION_REJECT_PRIVATE_DATA, + IB_CME_DESTINATION_UNREACHABLE, + IB_CME_TOO_MANY_CONNECTION_REQUESTS, + IB_CME_LOCAL_FAILURE, + IB_CME_REPLY_RECEIVED, + IB_CME_REPLY_RECEIVED_PRIVATE_DATA, + IB_CM_LOCAL_FAILURE +} ib_cm_events_t; + +typedef enum +{ + IB_NOTIFY_ON_NEXT_COMP, + IB_NOTIFY_ON_SOLIC_COMP +} ib_notification_type_t; + +typedef struct _ib_hca_name +{ + DAT_NAME_PTR hca_name[DAT_NAME_MAX_LENGTH]; +} ib_hca_name_t; + + +#define IB_INVALID_HANDLE NULL +#define true TRUE +#define false FALSE + +#define IB_MAX_REQ_PDATA_SIZE 92 +#define IB_MAX_REP_PDATA_SIZE 196 +#define IB_MAX_REJ_PDATA_SIZE 148 +#define IB_MAX_DREQ_PDATA_SIZE 220 +#define IB_MAX_DREP_PDATA_SIZE 224 + +typedef void +(*dapl_ibal_pfn_destructor_t)( + IN void* context ); + +typedef struct _dapl_ibal_refs +{ + atomic32_t count; // number of references + void* context; // context for destructor + dapl_ibal_pfn_destructor_t destructor; // called when reference goes to zero + +} dapl_ibal_refs_t; + + +typedef struct _dapl_ibal_root +{ + ib_al_handle_t h_al; // handle to Access Layer + cl_spinlock_t ca_lock; // CA list lock + cl_qlist_t ca_head; // list head of CAs + boolean_t shutdown; // when true, driver is shutting down + boolean_t initialized; // when true, lib is initialized + +} dapl_ibal_root_t; + + +typedef struct _dapl_ibal_ca +{ + cl_list_item_t next; // peer CA list + ib_ca_handle_t h_ca; // handle to open CA + ib_ca_attr_t *p_ca_attr; // CA attributes + uint32_t ca_attr_size;// size of ca attribute + dapl_ibal_refs_t refs; // reference counting + cl_spinlock_t port_lock; // port list lock + cl_qlist_t port_head; // port list head for this CA + cl_spinlock_t evd_cb_lock; // EVD async error cb list lock + cl_qlist_t evd_cb_head; // EVD async error cb list head for this CA + cl_dev_handle_t mlnx_device; + DAT_PVOID *ia_ptr; // hook for CA async callbacks +} dapl_ibal_ca_t; + + +typedef struct _dapl_ibal_port +{ + cl_list_item_t next; // peer CA list + dapl_ibal_ca_t *ca; // pointer to parent CA + ib_port_attr_t *p_attr; // port attributes + dapl_ibal_refs_t refs; // reference counting +} dapl_ibal_port_t; + +typedef struct _dapl_ibal_evd_cb +{ + cl_list_item_t next; // peer CA list + ib_async_handler_t pfn_async_err_cb; + ib_async_handler_t pfn_async_qp_err_cb; + ib_async_handler_t pfn_async_cq_err_cb; + void *context; +} dapl_ibal_evd_cb_t; + +/* + * Definitions to map DTO OPs + */ +#define OP_RDMA_READ WR_RDMA_READ +#define OP_RDMA_WRITE WR_RDMA_WRITE +#define OP_SEND WR_SEND +#define OP_COMP_AND_SWAP WR_COMPARE_SWAP +#define OP_FETCH_AND_ADD WR_FETCH_ADD +#define OP_RECEIVE 6 /* no-equip */ +#define OP_BIND_MW 7 /* no-equip */ + +/* + * Definitions to map QP state + */ +#define IB_QP_STATE_RESET IB_QPS_RESET +#define IB_QP_STATE_INIT IB_QPS_INIT +#define IB_QP_STATE_RTR IB_QPS_RTR +#define IB_QP_STATE_RTS IB_QPS_RTS +#define IB_QP_STATE_SQE IB_QPS_SQERR +#define IB_QP_STATE_SQD IB_QPS_SQD +#define IB_QP_STATE_ERROR IB_QPS_ERROR + +/* + * Definitions to map Memory OPs + */ +#define IB_ACCESS_LOCAL_WRITE IB_AC_LOCAL_WRITE +#define IB_ACCESS_REMOTE_READ IB_AC_RDMA_READ +#define IB_ACCESS_REMOTE_WRITE IB_AC_RDMA_WRITE + +/* + * CQE status + */ +enum _dapl_comp_status +{ + IB_COMP_ST_SUCCESS = IB_WCS_SUCCESS, + IB_COMP_ST_LOCAL_LEN_ERR = IB_WCS_LOCAL_LEN_ERR, + IB_COMP_ST_LOCAL_OP_ERR = IB_WCS_LOCAL_OP_ERR, + IB_COMP_ST_LOCAL_PROTECT_ERR = IB_WCS_LOCAL_PROTECTION_ERR, + IB_COMP_ST_WR_FLUSHED_ERR = IB_WCS_WR_FLUSHED_ERR, + IB_COMP_ST_MW_BIND_ERR = IB_WCS_MEM_WINDOW_BIND_ERR, + IB_COMP_ST_REM_ACC_ERR = IB_WCS_REM_ACCESS_ERR, + IB_COMP_ST_REM_OP_ERR = IB_WCS_REM_OP_ERR, + IB_COMP_ST_RNR_COUNTER = IB_WCS_RNR_RETRY_ERR, + IB_COMP_ST_TRANSP_COUNTER = IB_WCS_TIMEOUT_RETRY_ERR, + IB_COMP_ST_REM_REQ_ERR = IB_WCS_REM_INVALID_REQ_ERR, + IB_COMP_ST_BAD_RESPONSE_ERR = IB_WCS_UNMATCHED_RESPONSE, + IB_COMP_ST_EE_STATE_ERR, + IB_COMP_ST_EE_CTX_NO_ERR +}; + + +/* + * Macro to check the state of an EP/QP + */ +#define DAPLIB_NEEDS_INIT(ep) ((ep)->qp_state == IB_QPS_ERROR) + + +/* + * Resolve IBAL return codes to their DAPL equivelent. + * Do not return invalid Handles, the user is not able + * to deal with them. + */ +STATIC _INLINE_ DAT_RETURN +dapl_ib_status_convert ( + IN int32_t ib_status) +{ + switch ( ib_status ) + { + case IB_SUCCESS: + { + return DAT_SUCCESS; + } + case IB_INSUFFICIENT_RESOURCES: + case IB_INSUFFICIENT_MEMORY: + case IB_RESOURCE_BUSY: + { + return DAT_INSUFFICIENT_RESOURCES; + } + case IB_INVALID_CA_HANDLE: + case IB_INVALID_CQ_HANDLE: + case IB_INVALID_QP_HANDLE: + case IB_INVALID_PD_HANDLE: + case IB_INVALID_MR_HANDLE: + case IB_INVALID_MW_HANDLE: + case IB_INVALID_AL_HANDLE: + case IB_INVALID_AV_HANDLE: + { + return DAT_INVALID_HANDLE; + } + case IB_INVALID_PKEY: + { + return DAT_PROTECTION_VIOLATION; + } + case IB_INVALID_LKEY: + case IB_INVALID_RKEY: + case IB_INVALID_PERMISSION: + { + return DAT_PRIVILEGES_VIOLATION; + } + case IB_INVALID_MAX_WRS: + case IB_INVALID_MAX_SGE: + case IB_INVALID_CQ_SIZE: + case IB_INVALID_SETTING: + case IB_INVALID_SERVICE_TYPE: + case IB_INVALID_GID: + case IB_INVALID_LID: + case IB_INVALID_GUID: + case IB_INVALID_PARAMETER: + { + return DAT_INVALID_PARAMETER; + } + case IB_INVALID_QP_STATE: + case IB_INVALID_APM_STATE: + case IB_INVALID_PORT_STATE: + case IB_INVALID_STATE: + { + return DAT_INVALID_STATE; + } + case IB_NOT_FOUND: + { + return DAT_QUEUE_EMPTY; + } + case IB_OVERFLOW: + { + return DAT_QUEUE_FULL; + } + case IB_UNSUPPORTED: + { + return DAT_NOT_IMPLEMENTED; + } + case IB_TIMEOUT: + { + return DAT_TIMEOUT_EXPIRED; + } + case IB_CANCELED: + { + return DAT_ABORT; + } + default: + { + return DAT_INTERNAL_ERROR; + } + } +} + +#define TAKE_LOCK( lock ) \ + cl_spinlock_acquire( &(lock) ) + +#define RELEASE_LOCK( lock ) \ + cl_spinlock_release( &(lock) ) + +#define LOCK_INSERT_HEAD( lock, head, item ) \ +{ \ + TAKE_LOCK( lock ); \ + cl_qlist_insert_head( &head, (cl_list_item_t*)(&item) ); \ + RELEASE_LOCK( lock ); \ +} + +#define LOCK_INSERT_TAIL( lock, tail, item ) \ +{ \ + TAKE_LOCK( lock ); \ + cl_qlist_insert_tail( &tail, (cl_list_item_t*)(&item) ); \ + RELEASE_LOCK( lock ); \ +} + +#define INIT_REFERENCE( p_ref, n, con, destruct ) \ +{ \ + (p_ref)->count = n; \ + (p_ref)->context = con; \ + (p_ref)->destructor = destruct; \ +} + +#define TAKE_REFERENCE( p_ref ) \ + cl_atomic_inc( &(p_ref)->count ) + +#define REMOVE_REFERENCE( p_ref ) \ +{ \ + if ( cl_atomic_dec( &(p_ref)->count ) == 0 ) \ + if ( (p_ref)->destructor ) \ + (p_ref)->destructor( (p_ref)->context ); \ +} + + +/* + * Prototype + */ + +extern ib_api_status_t +dapls_modify_qp_state_to_error ( + ib_qp_handle_t qp_handle ); + +extern ib_api_status_t +dapls_modify_qp_state_to_reset ( + ib_qp_handle_t); + +extern ib_api_status_t +dapls_modify_qp_state_to_init ( + ib_qp_handle_t, DAT_EP_ATTR *, dapl_ibal_port_t *); + +extern ib_api_status_t +dapls_modify_qp_state_to_rtr ( + ib_qp_handle_t, ib_net32_t, ib_lid_t, dapl_ibal_port_t *); + +extern ib_api_status_t +dapls_modify_qp_state_to_rts ( + ib_qp_handle_t); + +extern void +dapli_ibal_ca_async_error_callback( + IN ib_async_event_rec_t* p_err_rec ); + +extern dapl_ibal_port_t * +dapli_ibal_get_port ( + IN dapl_ibal_ca_t *p_ca, + IN uint8_t port_num); + +extern int32_t dapls_ib_init (void); +extern int32_t dapls_ib_release (void); + +extern dapl_ibal_evd_cb_t * +dapli_find_evd_cb_by_context( + IN void *context, + IN dapl_ibal_ca_t *ca); + +extern IB_HCA_NAME +dapl_ib_convert_name( + IN char *name); + +#endif /* _DAPL_IBAL_UTIL_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/include/dapl.h b/branches/Ndi/ulp/dapl/dapl/include/dapl.h new file mode 100644 index 00000000..2af45256 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/include/dapl.h @@ -0,0 +1,1042 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl.h + * + * PURPOSE: defines common data structures for the DAPL reference implemenation + * + * Description: This file describes the working data structures used within + * DAPL RI. + * + * + * $Id$ + **********************************************************************/ + +#ifndef _DAPL_H_ +#define _DAPL_H_ + +#include +#include +#include "dapl_osd.h" +#include "dapl_debug.h" + +#ifdef IBAPI +#include "dapl_ibapi_util.h" +#elif VAPI +#include "dapl_vapi_util.h" +#else +#include "dapl_ibal_util.h" +#endif + +/********************************************************************* + * * + * Enumerations * + * * + *********************************************************************/ + +typedef enum dapl_magic +{ + /* magic number values for verification & debug */ + DAPL_MAGIC_IA = 0xCafeF00d, + DAPL_MAGIC_EVD = 0xFeedFace, + DAPL_MAGIC_EP = 0xDeadBabe, + DAPL_MAGIC_LMR = 0xBeefCafe, + DAPL_MAGIC_RMR = 0xABadCafe, + DAPL_MAGIC_PZ = 0xDeafBeef, + DAPL_MAGIC_PSP = 0xBeadeD0c, + DAPL_MAGIC_RSP = 0xFab4Feed, + DAPL_MAGIC_CR = 0xBe12Cee1, + DAPL_MAGIC_CR_DESTROYED = 0xB12bDead, + DAPL_MAGIC_CNO = 0xDeadF00d, + DAPL_MAGIC_EP_EXIT = 0xBabeDead, + DAPL_MAGIC_INVALID = 0xFFFFFFFF +} DAPL_MAGIC; + +typedef enum dapl_evd_state +{ + DAPL_EVD_STATE_TERMINAL, + DAPL_EVD_STATE_INITIAL, + DAPL_EVD_STATE_OPEN, + DAPL_EVD_STATE_WAITED, + DAPL_EVD_STATE_DEAD = 0xDEAD +} DAPL_EVD_STATE; + +typedef enum dapl_evd_completion +{ + DAPL_EVD_STATE_INIT, + DAPL_EVD_STATE_SOLICITED_WAIT, + DAPL_EVD_STATE_THRESHOLD, + DAPL_EVD_STATE_UNSIGNALLED +} DAPL_EVD_COMPLETION; + +typedef enum dapl_cno_state +{ + DAPL_CNO_STATE_UNTRIGGERED, + DAPL_CNO_STATE_TRIGGERED, + DAPL_CNO_STATE_DEAD = 0xDeadFeed, +} DAPL_CNO_STATE; + +typedef enum dapl_qp_state +{ + DAPL_QP_STATE_UNCONNECTED, + DAPL_QP_STATE_RESERVED, + DAPL_QP_STATE_PASSIVE_CONNECTION_PENDING, + DAPL_QP_STATE_ACTIVE_CONNECTION_PENDING, + DAPL_QP_STATE_TENTATIVE_CONNECTION_PENDING, + DAPL_QP_STATE_CONNECTED, + DAPL_QP_STATE_DISCONNECT_PENDING, + DAPL_QP_STATE_ERROR, + DAPL_QP_STATE_NOT_REUSABLE, + DAPL_QP_STATE_FREE +} DAPL_QP_STATE; + + +/********************************************************************* + * * + * Constants * + * * + *********************************************************************/ + +/* + * number of HCAs allowed + */ +#define DAPL_MAX_HCA_COUNT 4 + +/* + * Configures the RMR bind evd restriction + */ + +#define DAPL_RMR_BIND_EVD_RESTRICTION DAT_RMR_EVD_SAME_AS_REQUEST_EVD + +/* + * special qp_state indicating the EP does not have a QP attached yet + */ +#define DAPL_QP_STATE_UNATTACHED 0xFFF0 + +#define DAPL_MAX_PRIVATE_DATA_SIZE 256 + +/********************************************************************* + * * + * Macros * + * * + *********************************************************************/ + +/* + * Simple macro to verify a handle is bad. Conditions: + * - pointer is NULL + * - pointer is not word aligned + * - pointer's magic number is wrong + */ +#define DAPL_BAD_HANDLE(h, magicNum) ( \ + ((h) == NULL) || \ + ((DAT_UVERYLONG)(h) & 3) || \ + (((DAPL_HEADER *)(h))->magic != (magicNum))) + +#define DAPL_MIN(a, b) ((a < b) ? (a) : (b)) +#define DAPL_MAX(a, b) ((a > b) ? (a) : (b)) + +#if NDEBUG > 0 +#define DEBUG_IS_BAD_HANDLE(h, magicNum) (DAPL_BAD_HANDLE(h, magicNum)) +#else +#define DEBUG_IS_BAD_HANDLE(h, magicNum) (0) +#endif + +#define DAT_ERROR(Type, SubType) ((DAT_RETURN)(DAT_CLASS_ERROR | Type | SubType)) + +/********************************************************************* + * * + * Typedefs * + * * + *********************************************************************/ + +typedef struct dapl_llist_entry DAPL_LLIST_ENTRY; +typedef DAPL_LLIST_ENTRY * DAPL_LLIST_HEAD; +typedef struct dapl_ring_buffer DAPL_RING_BUFFER; +typedef struct dapl_cookie_buffer DAPL_COOKIE_BUFFER; + +typedef struct dapl_hash_table DAPL_HASH_TABLE; +typedef struct dapl_hash_table *DAPL_HASH_TABLEP; +typedef DAT_UINT64 DAPL_HASH_KEY; +typedef void * DAPL_HASH_DATA; + +typedef struct dapl_hca DAPL_HCA; + +typedef struct dapl_header DAPL_HEADER; + +typedef struct dapl_ia DAPL_IA; +typedef struct dapl_cno DAPL_CNO; +typedef struct dapl_evd DAPL_EVD; +typedef struct dapl_ep DAPL_EP; +typedef struct dapl_pz DAPL_PZ; +typedef struct dapl_lmr DAPL_LMR; +typedef struct dapl_rmr DAPL_RMR; +typedef struct dapl_sp DAPL_SP; +typedef struct dapl_cr DAPL_CR; + +typedef struct dapl_cookie DAPL_COOKIE; +typedef struct dapl_dto_cookie DAPL_DTO_COOKIE; +typedef struct dapl_rmr_cookie DAPL_RMR_COOKIE; + +typedef struct dapl_private DAPL_PRIVATE; + +typedef void (*DAPL_CONNECTION_STATE_HANDLER) ( + IN DAPL_EP *, + IN ib_cm_events_t, + IN const void *, + OUT DAT_EVENT *); + + +/********************************************************************* + * * + * Structures * + * * + *********************************************************************/ + +struct dapl_llist_entry +{ + struct dapl_llist_entry *flink; + struct dapl_llist_entry *blink; + void *data; + DAPL_LLIST_HEAD *list_head; /* for consistency checking */ +}; + +struct dapl_ring_buffer +{ + void **base; /* base of element array */ + DAT_COUNT lim; /* mask, number of entries - 1 */ + DAPL_ATOMIC head; /* head pointer index */ + DAPL_ATOMIC tail; /* tail pointer index */ +}; + +struct dapl_cookie_buffer +{ + DAPL_OS_LOCK lock; + DAPL_COOKIE *pool; + DAT_COUNT pool_size; + DAPL_ATOMIC head; + DAPL_ATOMIC tail; +}; + +struct dapl_hca +{ + DAPL_OS_LOCK lock; + DAPL_LLIST_HEAD ia_list_head; + DAPL_EVD *async_evd; + DAPL_EVD *async_error_evd; + DAT_SOCK_ADDR6 hca_address; /* local address of HCA*/ + /* Values specific to IB OS API */ + IB_HCA_NAME name; + ib_hca_handle_t ib_hca_handle; + DAPL_ATOMIC handle_ref_count; /* count of ia_opens on handle */ + ib_hca_port_t port_num; /* number of physical port */ + ib_uint32_t partition_max; + ib_guid_t node_GUID; + ib_lid_t lid; + ib_cqd_handle_t ib_cqd_handle; /* cq domain handle */ + ib_cq_handle_t null_ib_cq_handle; /* CQ handle with 0 entries */ + /* Memory Subsystem Support */ + DAPL_HASH_TABLE *lmr_hash_table; + /* Limits & useful HCA attributes */ + DAT_IA_ATTR ia_attr; + /* Name service support */ + void *name_service_handle; /* handle to name service */ +}; + +/* DAPL Objects always have the following header */ +struct dapl_header +{ + DAT_PROVIDER *provider; /* required by DAT - must be first */ + DAPL_MAGIC magic; /* magic number for verification */ + DAT_HANDLE_TYPE handle_type; /* struct type */ + DAPL_IA *owner_ia; /* ia which owns this stuct */ + DAPL_LLIST_ENTRY ia_list_entry; /* link entry on ia struct */ + DAT_CONTEXT user_context; /* user context - opaque to DAPL */ + DAPL_OS_LOCK lock; /* lock - in header for easier macros */ +}; + +/* DAPL_IA maps to DAT_IA_HANDLE */ +struct dapl_ia +{ + DAPL_HEADER header; + DAPL_HCA *hca_ptr; + DAPL_EVD *async_error_evd; + DAT_BOOLEAN cleanup_async_error_evd; + + DAPL_LLIST_ENTRY hca_ia_list_entry; /* HCAs list of IAs */ + DAPL_LLIST_HEAD ep_list_head; /* EP queue */ + DAPL_LLIST_HEAD lmr_list_head; /* LMR queue */ + DAPL_LLIST_HEAD rmr_list_head; /* RMR queue */ + DAPL_LLIST_HEAD pz_list_head; /* PZ queue */ + DAPL_LLIST_HEAD evd_list_head; /* EVD queue */ + DAPL_LLIST_HEAD cno_list_head; /* CNO queue */ + DAPL_LLIST_HEAD psp_list_head; /* PSP queue */ + DAPL_LLIST_HEAD rsp_list_head; /* RSP queue */ +}; + +/* DAPL_CNO maps to DAT_CNO_HANDLE */ +struct dapl_cno +{ + DAPL_HEADER header; + + /* A CNO cannot be freed while it is referenced elsewhere. */ + DAPL_ATOMIC cno_ref_count; + DAPL_CNO_STATE cno_state; + + DAT_COUNT cno_waiters; + DAPL_EVD *cno_evd_triggered; + DAT_OS_WAIT_PROXY_AGENT cno_wait_agent; + + DAPL_OS_WAIT_OBJECT cno_wait_object; +}; + +/* DAPL_EVD maps to DAT_EVD_HANDLE */ +struct dapl_evd +{ + DAPL_HEADER header; + + DAPL_EVD_STATE evd_state; + DAT_EVD_FLAGS evd_flags; + DAT_BOOLEAN evd_enabled; /* For attached CNO. */ + DAT_BOOLEAN evd_waitable; /* EVD state. */ + + /* Derived from evd_flags; see dapls_evd_internal_create. */ + DAT_BOOLEAN evd_producer_locking_needed; + + /* Every EVD has a CQ unless it is a SOFTWARE_EVENT only EVD */ + ib_cq_handle_t ib_cq_handle; + + /* Mellanox Specific completion handle for registration/de-registration */ + ib_comp_handle_t ib_comp_handle; + + /* An Event Dispatcher cannot be freed while + * it is referenced elsewhere. + */ + DAPL_ATOMIC evd_ref_count; + + /* Set if there has been a catastrophic overflow */ + DAT_BOOLEAN catastrophic_overflow; + + /* the actual events */ + DAT_COUNT qlen; + DAT_EVENT *events; + DAPL_RING_BUFFER free_event_queue; + DAPL_RING_BUFFER pending_event_queue; + + /* CQ Completions are not placed into 'deferred_events' + ** rather they are simply left on the Completion Queue + ** and the fact that there was a notification is flagged. + */ + DAT_BOOLEAN cq_notified; + DAPL_OS_TICKS cq_notified_when; + cl_waitobj_handle_t cq_wait_obj_handle; + + DAT_COUNT cno_active_count; + DAPL_CNO *cno_ptr; + + DAPL_OS_WAIT_OBJECT wait_object; + DAT_COUNT threshold; + DAPL_EVD_COMPLETION completion_type; +}; + + + +/* uDAPL timer entry, used to queue timeouts */ +struct dapl_timer_entry +{ + DAPL_LLIST_ENTRY list_entry; /* link entry on ia struct */ + DAPL_OS_TIMEVAL expires; + void (*function)(void*); + void *data; +}; + +#ifdef DAPL_DBG_IO_TRC + +#define DBG_IO_TRC_QLEN 32 /* length of trace buffer */ +#define DBG_IO_TRC_IOV 3 /* iov elements we keep track of */ + +struct io_buf_track +{ + Ib_send_op_type op_type; + DAPL_COOKIE *cookie; + DAT_LMR_TRIPLET iov[DBG_IO_TRC_IOV]; + DAT_RMR_TRIPLET remote_iov; + unsigned int done; /* count to track completion ordering */ + int status; + void *wqe; +}; + +#endif /* DAPL_DBG_IO_TRC */ + +/* DAPL_EP maps to DAT_EP_HANDLE */ +struct dapl_ep +{ + DAPL_HEADER header; + /* What the DAT Consumer asked for */ + DAT_EP_PARAM param; + + /* The RC Queue Pair (IBM OS API) */ + ib_qp_handle_t qp_handle; + unsigned int qpn; /* qp number */ + ib_qp_state_t qp_state; + + /* communications manager handle (IBM OS API) */ + ib_cm_handle_t cm_handle; + /* store the remote IA address here, reference from the param + * struct which only has a pointer, no storage + */ + DAT_SOCK_ADDR6 remote_ia_address; + + /* For passive connections we maintain a back pointer to the CR */ + void * cr_ptr; + + /* pointer to connection timer, if set */ + struct dapl_timer_entry *cxn_timer; + + /* private data container */ + unsigned char private_data[DAPL_MAX_PRIVATE_DATA_SIZE]; + + /* DTO data */ + DAPL_ATOMIC req_count; + DAPL_ATOMIC recv_count; + + DAPL_COOKIE_BUFFER req_buffer; + DAPL_COOKIE_BUFFER recv_buffer; + + ib_data_segment_t *recv_iov; + DAT_COUNT recv_iov_num; + + ib_data_segment_t *send_iov; + DAT_COUNT send_iov_num; + DAT_BOOLEAN recv_discreq; + DAT_BOOLEAN sent_discreq; + DAT_BOOLEAN viol_order; + DAPL_RING_BUFFER viol_event_queue; +#ifdef DAPL_DBG_IO_TRC + int ibt_dumped; + struct io_buf_track *ibt_base; + DAPL_RING_BUFFER ibt_queue; +#endif /* DAPL_DBG_IO_TRC */ +}; + +/* DAPL_PZ maps to DAT_PZ_HANDLE */ +struct dapl_pz +{ + DAPL_HEADER header; + ib_pd_handle_t pd_handle; + DAPL_ATOMIC pz_ref_count; +}; + +/* DAPL_LMR maps to DAT_LMR_HANDLE */ +struct dapl_lmr +{ + DAPL_HEADER header; + DAT_LMR_PARAM param; + ib_mr_handle_t mr_handle; + DAPL_ATOMIC lmr_ref_count; + ib_shmid_t ib_shmid; +}; + +/* DAPL_RMR maps to DAT_RMR_HANDLE */ +struct dapl_rmr +{ + DAPL_HEADER header; + DAT_RMR_PARAM param; + DAPL_EP *ep; + DAPL_PZ *pz; + DAPL_LMR *lmr; + ib_mw_handle_t mw_handle; +}; + +/* SP types, indicating the state and queue */ +typedef enum dapl_sp_state +{ + DAPL_SP_STATE_FREE, + DAPL_SP_STATE_PSP_LISTENING, + DAPL_SP_STATE_PSP_PENDING, + DAPL_SP_STATE_RSP_LISTENING, + DAPL_SP_STATE_RSP_PENDING +} DAPL_SP_STATE; + +/* DAPL_SP maps to DAT_PSP_HANDLE and DAT_RSP_HANDLE */ +struct dapl_sp +{ + DAPL_HEADER header; + DAPL_SP_STATE state; /* type and queue of the SP */ + + /* PSP/RSP PARAM fields */ + DAT_IA_HANDLE ia_handle; + DAT_CONN_QUAL conn_qual; + DAT_EVD_HANDLE evd_handle; + DAT_PSP_FLAGS psp_flags; + DAT_EP_HANDLE ep_handle; + + /* wait object needed for SP destroy */ + DAPL_OS_WAIT_OBJECT wait_object; + + /* maintenence fields */ + DAT_BOOLEAN listening; /* PSP is registered & active */ + ib_cm_srvc_handle_t cm_srvc_handle; /* Used by VAPI CM */ + DAPL_LLIST_HEAD cr_list_head; /* CR pending queue */ + DAT_COUNT cr_list_count; /* count of CRs on queue */ +}; + +/* DAPL_CR maps to DAT_CR_HANDLE */ +struct dapl_cr +{ + DAPL_HEADER header; + + /* for convenience the data is kept as a DAT_CR_PARAM. + * however, the "local_endpoint" field is always NULL + * so this wastes a pointer. This is probably ok to + * simplify code, espedially dat_cr_query. + */ + DAT_CR_PARAM param; + /* IB specific fields */ + ib_cm_handle_t ib_cm_handle; + + DAT_SOCK_ADDR6 remote_ia_address; + /* Assuming that the maximum private data size is small. + * If it gets large, use of a pointer may be appropriate. + */ + unsigned char private_data[DAPL_MAX_PRIVATE_DATA_SIZE]; + /* + * Need to be able to associate the CR back to the PSP for + * dapl_cr_reject. + */ + DAPL_SP *sp_ptr; +}; + +typedef enum dapl_dto_type +{ + DAPL_DTO_TYPE_SEND, + DAPL_DTO_TYPE_RECV, + DAPL_DTO_TYPE_RDMA_WRITE, + DAPL_DTO_TYPE_RDMA_READ, +} DAPL_DTO_TYPE; + +typedef enum dapl_cookie_type +{ + DAPL_COOKIE_TYPE_NULL, + DAPL_COOKIE_TYPE_DTO, + DAPL_COOKIE_TYPE_RMR, +} DAPL_COOKIE_TYPE; + +/* DAPL_DTO_COOKIE used as context for DTO WQEs */ +struct dapl_dto_cookie +{ + DAPL_DTO_TYPE type; + DAT_DTO_COOKIE cookie; + DAT_COUNT size; /* used for SEND and RDMA write */ +}; + +/* DAPL_RMR_COOKIE used as context for bind WQEs */ +struct dapl_rmr_cookie +{ + DAPL_RMR *rmr; + DAT_RMR_COOKIE cookie; +}; + +/* DAPL_COOKIE used as context for WQEs */ +struct dapl_cookie +{ + DAPL_COOKIE_TYPE type; /* Must be first, to define struct. */ + DAPL_EP *ep; + DAT_COUNT index; + union + { + DAPL_DTO_COOKIE dto; + DAPL_RMR_COOKIE rmr; + } val; +}; + +/* DAPL_PRIVATE used to pass private data in a connection */ +struct dapl_private +{ +#ifdef NO_NAME_SERVICE + DAT_SOCK_ADDR6 hca_address; /* local address of HCA*/ +#endif + unsigned char private_data[DAPL_MAX_PRIVATE_DATA_SIZE]; +}; + +/* + * Private Data operations. Used to obtain the size of the private + * data from the provider layer. + */ +typedef enum dapl_private_data_op +{ + DAPL_PDATA_CONN_REQ = 0, /* connect request */ + DAPL_PDATA_CONN_REP = 1, /* connect reply */ + DAPL_PDATA_CONN_REJ = 2, /* connect reject */ + DAPL_PDATA_CONN_DREQ = 3, /* disconnect request */ + DAPL_PDATA_CONN_DREP = 4, /* disconnect reply */ +} DAPL_PDATA_OP; + + +/* + * Generic HCA name field + */ +#define DAPL_HCA_NAME_MAX_LEN 260 +typedef char DAPL_HCA_NAME[DAPL_HCA_NAME_MAX_LEN+1]; + +#ifdef NO_NAME_SERVICE + +/* + * Simple mapping table to match IP addresses to GIDs. Loaded + * by dapl_init. + */ +typedef struct _dapl_gid_map_table { + uint32_t ip_address; + GID gid; +} DAPL_GID_MAP; + +#endif /* NO_NAME_SERVICE */ + +/* + * IBTA defined reason for reject message: See IBTA 1.1 specification, + * 12.6.7.2 REJECTION REASON section. + */ +#define IB_CM_REJ_REASON_CONSUMER_REJ 0x001C + + +#if defined(DAPL_DBG_IO_TRC) +/********************************************************************* + * * + * Debug I/O tracing support prototypes * + * * + *********************************************************************/ +/* + * I/O tracing support + */ +void dapls_io_trc_alloc ( + DAPL_EP *ep_ptr); + +void dapls_io_trc_update_completion ( + DAPL_EP *ep_ptr, + DAPL_COOKIE *cookie, + ib_uint32_t ib_status ); + +void dapls_io_trc_dump ( + DAPL_EP *ep_ptr, + ib_work_completion_t *cqe_ptr, + ib_uint32_t ib_status); + +#else /* DAPL_DBG_IO_TRC */ + +#define dapls_io_trc_alloc(a) +#define dapls_io_trc_update_completion(a, b, c) +#define dapls_io_trc_dump(a, b, c) + +#endif /* DAPL_DBG_IO_TRC */ + + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +/* + * DAT Mandated functions + */ + +extern DAT_RETURN DAT_API dapl_ia_open ( + IN const DAT_NAME_PTR, /* name */ + IN DAT_COUNT, /* asynch_evd_qlen */ + INOUT DAT_EVD_HANDLE *, /* asynch_evd_handle */ + OUT DAT_IA_HANDLE * ); /* ia_handle */ + +extern DAT_RETURN DAT_API dapl_ia_close ( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_CLOSE_FLAGS ); /* ia_flags */ + + +extern DAT_RETURN DAT_API dapl_ia_query ( + IN DAT_IA_HANDLE, /* ia handle */ + OUT DAT_EVD_HANDLE *, /* async_evd_handle */ + IN DAT_IA_ATTR_MASK, /* ia_params_mask */ + OUT DAT_IA_ATTR *, /* ia_params */ + IN DAT_PROVIDER_ATTR_MASK, /* provider_params_mask */ + OUT DAT_PROVIDER_ATTR * ); /* provider_params */ + + +/* helper functions */ + +extern DAT_RETURN DAT_API dapl_set_consumer_context ( + IN DAT_HANDLE, /* dat handle */ + IN DAT_CONTEXT); /* context */ + +extern DAT_RETURN DAT_API dapl_get_consumer_context ( + IN DAT_HANDLE, /* dat handle */ + OUT DAT_CONTEXT * ); /* context */ + +extern DAT_RETURN DAT_API dapl_get_handle_type ( + IN DAT_HANDLE, + OUT DAT_HANDLE_TYPE * ); + +/* CNO functions */ + +extern DAT_RETURN DAT_API dapl_cno_create ( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_OS_WAIT_PROXY_AGENT, /* agent */ + OUT DAT_CNO_HANDLE *); /* cno_handle */ + +extern DAT_RETURN DAT_API dapl_cno_modify_agent ( + IN DAT_CNO_HANDLE, /* cno_handle */ + IN DAT_OS_WAIT_PROXY_AGENT); /* agent */ + +extern DAT_RETURN DAT_API dapl_cno_query ( + IN DAT_CNO_HANDLE, /* cno_handle */ + IN DAT_CNO_PARAM_MASK, /* cno_param_mask */ + OUT DAT_CNO_PARAM * ); /* cno_param */ + +extern DAT_RETURN DAT_API dapl_cno_free ( + IN DAT_CNO_HANDLE); /* cno_handle */ + +extern DAT_RETURN DAT_API dapl_cno_wait ( + IN DAT_CNO_HANDLE, /* cno_handle */ + IN DAT_TIMEOUT, /* timeout */ + OUT DAT_EVD_HANDLE *); /* evd_handle */ + +/* CR Functions */ + +extern DAT_RETURN DAT_API dapl_cr_query ( + IN DAT_CR_HANDLE, /* cr_handle */ + IN DAT_CR_PARAM_MASK, /* cr_args_mask */ + OUT DAT_CR_PARAM * ); /* cwr_args */ + +extern DAT_RETURN DAT_API dapl_cr_accept ( + IN DAT_CR_HANDLE, /* cr_handle */ + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* private_data_size */ + IN const DAT_PVOID ); /* private_data */ + +extern DAT_RETURN DAT_API dapl_cr_reject ( + IN DAT_CR_HANDLE ); + +extern DAT_RETURN DAT_API dapl_cr_handoff ( + IN DAT_CR_HANDLE, /* cr_handle */ + IN DAT_CONN_QUAL); /* handoff */ + +/* EVD Functions */ + +extern DAT_RETURN DAT_API dapl_evd_create ( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_COUNT, /* evd_min_qlen */ + IN DAT_CNO_HANDLE, /* cno_handle */ + IN DAT_EVD_FLAGS, /* evd_flags */ + OUT DAT_EVD_HANDLE * ); /* evd_handle */ + +extern DAT_RETURN DAT_API dapl_evd_query ( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_EVD_PARAM_MASK, /* evd_args_mask */ + OUT DAT_EVD_PARAM * ); /* evd_args */ + +extern DAT_RETURN DAT_API dapl_evd_modify_cno ( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_CNO_HANDLE); /* cno_handle */ + +extern DAT_RETURN DAT_API dapl_evd_enable ( + IN DAT_EVD_HANDLE); /* evd_handle */ + +extern DAT_RETURN DAT_API dapl_evd_disable ( + IN DAT_EVD_HANDLE); /* evd_handle */ + +extern DAT_RETURN DAT_API dapl_evd_wait ( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_TIMEOUT, /* timeout */ + IN DAT_COUNT, /* threshold */ + OUT DAT_EVENT *, /* event */ + OUT DAT_COUNT *); /* nmore */ + +extern DAT_RETURN DAT_API dapl_evd_resize ( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_COUNT ); /* evd_qlen */ + +extern DAT_RETURN DAT_API dapl_evd_wait ( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_TIMEOUT, /* timeout */ + IN DAT_COUNT, /* threshold */ + OUT DAT_EVENT *, /* event */ + OUT DAT_COUNT *); /* nmore */ + +extern DAT_RETURN DAT_API dapl_evd_post_se ( + DAT_EVD_HANDLE, /* evd_handle */ + const DAT_EVENT * ); /* event */ + +extern DAT_RETURN DAT_API dapl_evd_dequeue ( + IN DAT_EVD_HANDLE, /* evd_handle */ + OUT DAT_EVENT * ); /* event */ + +extern DAT_RETURN DAT_API dapl_evd_free ( + IN DAT_EVD_HANDLE ); + +extern DAT_RETURN DAT_API +dapl_evd_set_unwaitable ( + IN DAT_EVD_HANDLE evd_handle ); + +extern DAT_RETURN DAT_API +dapl_evd_clear_unwaitable ( + IN DAT_EVD_HANDLE evd_handle ); + +/* EP functions */ + +extern DAT_RETURN DAT_API dapl_ep_create ( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_PZ_HANDLE, /* pz_handle */ + IN DAT_EVD_HANDLE, /* in_dto_completion_evd_handle */ + IN DAT_EVD_HANDLE, /* out_dto_completion_evd_handle */ + IN DAT_EVD_HANDLE, /* connect_evd_handle */ + IN const DAT_EP_ATTR *, /* ep_parameters */ + OUT DAT_EP_HANDLE * ); /* ep_handle */ + +extern DAT_RETURN DAT_API dapl_ep_query ( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_EP_PARAM_MASK, /* ep_args_mask */ + OUT DAT_EP_PARAM * ); /* ep_args */ + +extern DAT_RETURN DAT_API dapl_ep_modify ( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_EP_PARAM_MASK, /* ep_args_mask */ + IN const DAT_EP_PARAM * ); /* ep_args */ + +extern DAT_RETURN DAT_API dapl_ep_connect ( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_IA_ADDRESS_PTR, /* remote_ia_address */ + IN DAT_CONN_QUAL, /* remote_conn_qual */ + IN DAT_TIMEOUT, /* timeout */ + IN DAT_COUNT, /* private_data_size */ + IN const DAT_PVOID, /* private_data */ + IN DAT_QOS, /* quality_of_service */ + IN DAT_CONNECT_FLAGS ); /* connect_flags */ + +extern DAT_RETURN DAT_API dapl_ep_dup_connect ( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_EP_HANDLE, /* ep_dup_handle */ + IN DAT_TIMEOUT, /* timeout*/ + IN DAT_COUNT, /* private_data_size */ + IN const DAT_PVOID, /* private_data */ + IN DAT_QOS); /* quality_of_service */ + +extern DAT_RETURN DAT_API dapl_ep_disconnect ( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_CLOSE_FLAGS ); /* disconnect_flags */ + +extern DAT_RETURN DAT_API dapl_ep_post_send ( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* num_segments */ + IN DAT_LMR_TRIPLET *, /* local_iov */ + IN DAT_DTO_COOKIE, /* user_cookie */ + IN DAT_COMPLETION_FLAGS ); /* completion_flags */ + +extern DAT_RETURN DAT_API dapl_ep_post_recv ( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* num_segments */ + IN DAT_LMR_TRIPLET *, /* local_iov */ + IN DAT_DTO_COOKIE, /* user_cookie */ + IN DAT_COMPLETION_FLAGS ); /* completion_flags */ + +extern DAT_RETURN DAT_API dapl_ep_post_rdma_read ( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* num_segments */ + IN DAT_LMR_TRIPLET *, /* local_iov */ + IN DAT_DTO_COOKIE, /* user_cookie */ + IN const DAT_RMR_TRIPLET *, /* remote_iov */ + IN DAT_COMPLETION_FLAGS ); /* completion_flags */ + +extern DAT_RETURN DAT_API dapl_ep_post_rdma_write ( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* num_segments */ + IN DAT_LMR_TRIPLET *, /* local_iov */ + IN DAT_DTO_COOKIE, /* user_cookie */ + IN const DAT_RMR_TRIPLET *, /* remote_iov */ + IN DAT_COMPLETION_FLAGS ); /* completion_flags */ + +extern DAT_RETURN DAT_API dapl_ep_get_status ( + IN DAT_EP_HANDLE, /* ep_handle */ + OUT DAT_EP_STATE *, /* ep_state */ + OUT DAT_BOOLEAN *, /* in_dto_idle */ + OUT DAT_BOOLEAN * ); /* out_dto_idle */ + +extern DAT_RETURN DAT_API dapl_ep_free ( + IN DAT_EP_HANDLE); /* ep_handle */ + +extern DAT_RETURN DAT_API dapl_ep_reset ( + IN DAT_EP_HANDLE); /* ep_handle */ + +/* LMR functions */ + +extern DAT_RETURN DAT_API dapl_lmr_create ( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_MEM_TYPE, /* mem_type */ + IN DAT_REGION_DESCRIPTION, /* region_description */ + IN DAT_VLEN, /* length */ + IN DAT_PZ_HANDLE, /* pz_handle */ + IN DAT_MEM_PRIV_FLAGS, /* privileges */ + OUT DAT_LMR_HANDLE *, /* lmr_handle */ + OUT DAT_LMR_CONTEXT *, /* lmr_context */ + OUT DAT_RMR_CONTEXT *, /* rmr_context */ + OUT DAT_VLEN *, /* registered_length */ + OUT DAT_VADDR * ); /* registered_address */ + +extern DAT_RETURN DAT_API dapl_lmr_query ( + IN DAT_LMR_HANDLE, + IN DAT_LMR_PARAM_MASK, + OUT DAT_LMR_PARAM *); + +extern DAT_RETURN DAT_API dapl_lmr_free ( + IN DAT_LMR_HANDLE); + +/* RMR Functions */ + +extern DAT_RETURN DAT_API dapl_rmr_create ( + IN DAT_PZ_HANDLE, /* pz_handle */ + OUT DAT_RMR_HANDLE *); /* rmr_handle */ + +extern DAT_RETURN DAT_API dapl_rmr_query ( + IN DAT_RMR_HANDLE, /* rmr_handle */ + IN DAT_RMR_PARAM_MASK, /* rmr_args_mask */ + OUT DAT_RMR_PARAM *); /* rmr_args */ + +extern DAT_RETURN DAT_API dapl_rmr_bind ( + IN DAT_RMR_HANDLE, /* rmr_handle */ + IN const DAT_LMR_TRIPLET *, /* lmr_triplet */ + IN DAT_MEM_PRIV_FLAGS, /* mem_priv */ + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_RMR_COOKIE, /* user_cookie */ + IN DAT_COMPLETION_FLAGS, /* completion_flags */ + INOUT DAT_RMR_CONTEXT * ); /* context */ + +extern DAT_RETURN DAT_API dapl_rmr_free ( + IN DAT_RMR_HANDLE); + +/* PSP Functions */ + +extern DAT_RETURN DAT_API dapl_psp_create ( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_CONN_QUAL, /* conn_qual */ + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_PSP_FLAGS, /* psp_flags */ + OUT DAT_PSP_HANDLE * ); /* psp_handle */ + +extern DAT_RETURN DAT_API dapl_psp_create_any ( + IN DAT_IA_HANDLE, /* ia_handle */ + OUT DAT_CONN_QUAL *, /* conn_qual */ + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_PSP_FLAGS, /* psp_flags */ + OUT DAT_PSP_HANDLE *); /* psp_handle */ + +extern DAT_RETURN DAT_API dapl_psp_query ( + IN DAT_PSP_HANDLE, + IN DAT_PSP_PARAM_MASK, + OUT DAT_PSP_PARAM * ); + +extern DAT_RETURN DAT_API dapl_psp_free ( + IN DAT_PSP_HANDLE ); /* psp_handle */ + +/* RSP Functions */ + +extern DAT_RETURN DAT_API dapl_rsp_create ( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_CONN_QUAL, /* conn_qual */ + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_EVD_HANDLE, /* evd_handle */ + OUT DAT_RSP_HANDLE * ); /* rsp_handle */ + +extern DAT_RETURN DAT_API dapl_rsp_query ( + IN DAT_RSP_HANDLE, + IN DAT_RSP_PARAM_MASK, + OUT DAT_RSP_PARAM * ); + +extern DAT_RETURN DAT_API dapl_rsp_free ( + IN DAT_RSP_HANDLE ); /* rsp_handle */ + +/* PZ Functions */ + +extern DAT_RETURN DAT_API dapl_pz_create ( + IN DAT_IA_HANDLE, /* ia_handle */ + OUT DAT_PZ_HANDLE * ); /* pz_handle */ + +extern DAT_RETURN DAT_API dapl_pz_query ( + IN DAT_PZ_HANDLE, /* pz_handle */ + IN DAT_PZ_PARAM_MASK, /* pz_args_mask */ + OUT DAT_PZ_PARAM * ); /* pz_args */ + +extern DAT_RETURN DAT_API dapl_pz_free ( + IN DAT_PZ_HANDLE ); /* pz_handle */ + +/* + * DAPL internal utility function prototpyes + */ + +extern void dapl_llist_init_head ( + DAPL_LLIST_HEAD * head); + +extern void dapl_llist_init_entry ( + DAPL_LLIST_ENTRY * entry); + +extern DAT_BOOLEAN dapl_llist_is_empty ( + DAPL_LLIST_HEAD * head); + +extern void dapl_llist_add_head ( + DAPL_LLIST_HEAD * head, + DAPL_LLIST_ENTRY * entry, + void * data); + +extern void dapl_llist_add_tail ( + DAPL_LLIST_HEAD * head, + DAPL_LLIST_ENTRY * entry, + void * data); + +extern void dapl_llist_add_entry ( + DAPL_LLIST_HEAD * head, + DAPL_LLIST_ENTRY * entry, + DAPL_LLIST_ENTRY * new_entry, + void * data); + +extern void * dapl_llist_remove_head ( + DAPL_LLIST_HEAD * head); + +extern void * dapl_llist_remove_tail ( + DAPL_LLIST_HEAD * head); + +extern void * dapl_llist_remove_entry ( + DAPL_LLIST_HEAD * head, + DAPL_LLIST_ENTRY * entry); + +extern void * dapl_llist_peek_head ( + DAPL_LLIST_HEAD * head); + +extern void * dapl_llist_next_entry ( + IN DAPL_LLIST_HEAD *head, + IN DAPL_LLIST_ENTRY *cur_ent); + +extern void dapl_llist_debug_print_list ( + DAPL_LLIST_HEAD * head); + + +#endif /* _DAPL_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/include/dapl_debug.h b/branches/Ndi/ulp/dapl/dapl/include/dapl_debug.h new file mode 100644 index 00000000..abb54b4e --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/include/dapl_debug.h @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_debug.h + * + * PURPOSE: defines common deuggging flags & data for the DAPL reference + * implemenation + * + * Description: + * + * + * $Id$ + **********************************************************************/ + +#ifndef _DAPL_DEBUG_H_ +#define _DAPL_DEBUG_H_ + +/* + * Debug level switches + * + * Use these bits to enable various tracing/debug options. Each bit + * represents debugging in a particular subsystem or area of the code. + * + * The ERR bit should always be on unless someone disables it for a + * reason: The ERR flag is used sparingly and will print useful + * information if it fires. + */ +typedef enum +{ + DAPL_DBG_TYPE_ERR = 0x0001, + DAPL_DBG_TYPE_WARN = 0x0002, + DAPL_DBG_TYPE_EVD = 0x0004, + DAPL_DBG_TYPE_CM = 0x0008, + DAPL_DBG_TYPE_EP = 0x0010, + DAPL_DBG_TYPE_UTIL = 0x0020, + DAPL_DBG_TYPE_CALLBACK = 0x0040, + DAPL_DBG_TYPE_DTO_COMP_ERR = 0x0080, + DAPL_DBG_TYPE_API = 0x0100, + DAPL_DBG_TYPE_RTN = 0x0200, + DAPL_DBG_TYPE_EXCEPTION = 0x0400 +} DAPL_DBG_TYPE; + +typedef enum +{ + DAPL_DBG_DEST_STDOUT = 0x0001, + DAPL_DBG_DEST_SYSLOG = 0x0002, +} DAPL_DBG_DEST; + + +#if defined(DAPL_DBG) + +extern DAPL_DBG_TYPE g_dapl_dbg_type; +extern DAPL_DBG_DEST g_dapl_dbg_dest; + +#define dapl_dbg_log g_dapl_dbg_type==0 ? (void) 1 : dapl_internal_dbg_log +extern void dapl_internal_dbg_log ( DAPL_DBG_TYPE type, const char *fmt, ...); + +#else /* !DAPL_DBG */ +#define dapl_dbg_log +#endif /* !DAPL_DBG */ + +/* + * Counters + */ +#define DCNT_EP_CREATE 0 +#define DCNT_EP_FREE 1 +#define DCNT_EP_CONNECT 2 +#define DCNT_EP_DISCONNECT 3 +#define DCNT_POST_SEND 4 +#define DCNT_POST_RECV 5 +#define DCNT_POST_RDMA_WRITE 6 +#define DCNT_POST_RDMA_READ 7 +#define DCNT_EVD_CREATE 8 +#define DCNT_EVD_FREE 9 +#define DCNT_EVD_WAIT 10 +#define DCNT_EVD_WAIT_BLOCKED 11 +#define DCNT_EVD_WAIT_CMP_NTFY 12 +#define DCNT_EVD_DTO_CALLBACK 13 +#define DCNT_EVD_CONN_CALLBACK 14 +#define DCNT_EVD_DEQUEUE 15 +#define DCNT_EVD_DEQUEUE_POLL 16 +#define DCNT_EVD_DEQUEUE_FOUND 17 +#define DCNT_EVD_DEQUEUE_NOT_FOUND 18 +#define DCNT_TIMER_SET 19 +#define DCNT_TIMER_CANCEL 20 +#define DCNT_LAST_COUNTER 22 /* Always the last counter */ + +#if defined(DAPL_COUNTERS) +#include "dapl_counters.h" + +#define DAPL_CNTR(cntr) dapl_os_atomic_inc (&dapl_dbg_counters[cntr]); +#else + +#define DAPL_CNTR(cntr) +#define DAPL_COUNTERS_INIT() +#define DAPL_COUNTERS_NEW(__tag, __id) +#define DAPL_COUNTERS_RESET(__id, __incr) +#define DAPL_COUNTERS_INCR(__id, __incr) + +#endif /* DAPL_COUNTERS */ + + +#endif /* _DAPL_DEBUG_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/include/dapl_ipoib_names.h b/branches/Ndi/ulp/dapl/dapl/include/dapl_ipoib_names.h new file mode 100644 index 00000000..f0d117d8 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/include/dapl_ipoib_names.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: ipoib_naming.h + * + * PURPOSE: Defines flags and prototypes for IPoIB API + * + * Description: + * This defines a simple naming interface for discovering + * the IP addresses available to a provider, then a set + * of query mechanisms useful to map an IP address to + * a provider specific address; a GID in InfiniBand. + * + * NOTE: As implementations mature this may not be necessary. + * + * $Id$ + **********************************************************************/ + +#ifndef _IPOIB_NAMING_H_ +#define _IPOIB_NAMING_H_ + +typedef enum _ipoib_port_num { + HCA_PORT_1= 1, + HCA_PORT_2, + HCA_PORT_ANY +} IPOIB_PORT_NUM; + +typedef struct if_query_info +{ + uint64_t guid; + uint32_t port_num; + uint32_t state; +}IF_QUERY_INFO; + +/*********************************************************************** + * ipoib_enum_if() + * + * PURPOSE + * Returns count of IP interfaces. + * + * ARGUMENTS + * hca_index: index of HCA in the provider library. In general + * terms, the index represents the HCA number, e.g. + * 1 == First HCA, 2 == Second HCA, etc. + * + * port: an enum of + * HCA_PORT_0 + * HCA_PORT_1 + * HCA_PORT_ANY + * HCA_PORT_ANY enum value returns all IP instances assigned to the HCA. + * + * RETURNS + * count of IP interfaces supported on physical port + * + ***********************************************************************/ +int +ipoib_enum_if( + IN uint32_t hca_index, + IN IPOIB_PORT_NUM port); + + +/*********************************************************************** + * ipoib_get_if() + * + * PURPOSE + * Returns array of IP Addresses of all instances. Port parameter may + * restrict instances of interest. + * + * ARGUMENTS + * hca_index: index of HCA in the provider library. + * + * port: IPOIB_PORT_NUM as described above + * + * ip_addr_list: pointer to user-allocated space in which an array of + * IP addresses found for this hca and port will be returned + * + * ip_addr_count: number of returned addresses + * + * RETURNS + * 0 for SUCCESS + * !0 for failure + * + ***********************************************************************/ +int +ipoib_get_if( + IN uint32_t hca_index, + IN IPOIB_PORT_NUM port, + OUT struct sockaddr **ip_addr_list, + OUT int *ip_addr_count); + +/*********************************************************************** + * + * PURPOSE + * Returns a handle to this interface, to be used for subsequent + * operations + * + * ARGUMENTS + * ip_address: input IP address + * + * ipoib_handle: handle to be used in subsequent operations. + * + * RETURNS + * 0 for SUCCESS + * !0 for failure + * + ***********************************************************************/ +int +ipoib_open_if( + IN struct sockaddr *ip_address, + OUT void *ipoib_handle); + +/*********************************************************************** + * ipoib_query_if() + * + * PURPOSE + * if_query_if returns information on local ipoib_handle such as GID, + * Port number, IPoIB state, anything interesting + * + * ARGUMENTS + * ipoib_handle: handle for instance + * + * if_qry_info: info struct. Looks like: + * + * RETURNS + * 0 for SUCCESS + * !0 for failure + * + ***********************************************************************/ +int +ipoib_query_if( + IN void *ipoib_handle, + OUT IF_QUERY_INFO *if_qry_info); + +/*********************************************************************** + * + * + * PURPOSE + * Obtain a GID from an IP Address. Used by the active side of + * a connection. + * + * The behavior of this routine is specified to provide control + * over the underlying implementation. + * Returns immediately if the remote information is available. If + * callback_routine_ptr is NULL then it will block until information is + * available or known to be unavailable. If callback_routine_ptr is + * specified then it will be invoked when remote information is + * available or known to be unavailable. Remote_Addr_info contains + * remote GID information. + * + * ARGUMENTS + * ipoib_handle: handle for instance + * + * remote_ip_address: IP address of remote instance + * + * callback_routine_ptr: routine to invoke for asynch callback. If + * NULL ipoib_getaddrinfo() will block. + * + * context: argument to pass to asynch callback_routine. + * + * Remote_Addr_info: Remote GID + * + * RETURNS + * 0 for SUCCESS + * !0 for failure + * + ***********************************************************************/ +int +ipoib_getaddrinfo( + IN void *ipoib_handle, + IN struct sockaddr *remote_ip_address, + IN void *callback_routine_ptr, + IN void *context, + OUT void *Remote_Addr_info ); + +/*********************************************************************** + * + * + * PURPOSE + * Obtain an IP Address from a GID. Used by the passive side of a + * connection. + * + * The behavior of this routine is specified to provide control over + * the underlying implementation. Returns immediately if the remote + * information is available. If callback_routine_ptr is NULL then it + * will block until information is available or known to be + * unavailable. If callback_routine_ptr is specified then it will be + * invoked when remote information is available or known to be + * unavailable. + * + * ARGUMENTS + * ipoib_handle: handle for instance + * + * remote_gidAddr: Remote GID. It is not defined on how the application + * will obtain this GID from the connection manager. + * + * callback_routine_ptr: + * routine to invoke for async callback. If NULL + * ipoib_getgidinfo() will block. + * + * context: argument to pass to asynch callback_routine. + * + * remote_ip_address: + * IP address of remote instance + * + * RETURNS + * 0 for SUCCESS + * !0 for failure + * + ***********************************************************************/ +int +ipoib_getgidinfo( + IN void *ipoib_handle, + IN GID *remote_gid, + IN void *callback_routine_ptr, + IN void *context, + OUT struct sockaddr *remote_ip_address); + +/*********************************************************************** + * + * PURPOSE + * Release handle. + * + * ARGUMENTS + * ipoib_handle: handle for instance + * + * RETURNS + * 0 for SUCCESS + * !0 for failure + * + ***********************************************************************/ +int +ipoib_close( + IN void *ipoib_handle); + + +#endif /* _IPOIB_NAMING_H_ */ diff --git a/branches/Ndi/ulp/dapl/dapl/include/dapl_vendor.h b/branches/Ndi/ulp/dapl/dapl/include/dapl_vendor.h new file mode 100644 index 00000000..1741b871 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/include/dapl_vendor.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_vendor.h + * + * PURPOSE: + * Vendor provides values for their implementation. Most of + * these values are returned in the DAT_IA_ATTR parameter of + * dat_ia_query() + * + * $Id$ + **********************************************************************/ + +/********************************************************************** + * DAT_IA_ATTR attributes + * + * These values are used in the provider support routine + * dapls_ib_query_hca (). Many of the values there are HW + * specific, the the vendor should look to make sure they are + * appropriate for their implementation. Specifically, + * vendors are encouraged to update transport and vendor + * attributes: the reference implementation sets these to NULL. + */ + +/* + * Product name of the adapter. + * Returned in DAT_IA_ATTR.adapter_name + */ +#define VN_ADAPTER_NAME "Generic InfiniBand HCA" + + +/* + * Vendor name + * Returned in DAT_IA_ATTR.vendor_name + */ +#define VN_VENDOR_NAME "DAPL Reference Implementation" + + +/********************************************************************** + * PROVIDER Attributes + * + * These values are used in ./common/dapl_ia_query.c, in dapl_ia_query (). + * The values below are the most common for vendors to change, but + * there are several other values that may be updated once the + * implementation becomes mature. + * + */ + +/* + * Provider Versions + * Returned in DAT_PROVIDER_ATTR.provider_version_major and + * DAT_PROVIDER_ATTR.provider_version_minor + */ + +#define VN_PROVIDER_MAJOR 1 +#define VN_PROVIDER_MINOR 0 + +/* + * Provider support for memory types. The reference implementation + * always supports DAT_MEM_TYPE_VIRTUAL and DAT_MEM_TYPE_LMR, so + * the vendor must indicate if they support DAT_MEM_TYPE_SHARED_VIRTUAL. + * Set this value to '1' if DAT_MEM_TYPE_SHARED_VIRTUAL is supported. + * + * Returned in DAT_PROVIDER_ATTR.lmr_mem_types_supported + */ + +#define VN_MEM_SHARED_VIRTUAL_SUPPORT 1 + + +/********************************************************************** + * + * This value will be assigned to dev_name_prefix in ./udapl/dapl_init.c. + * + * DAT is designed to support multiple DAPL instances simultaneously, + * with different dapl libraries originating from different providers. + * There is always the possibility of name conflicts, so a dat name + * prefix is provided to make a vendor's name unique. This is + * especially true of the IBM Access API, which returns adapter + * names that are simply ordinal numbers (e.g. 0, 1, 2). If + * a vendor doesn't need or want a prefix, it should be left + * as a NULL. + * This works by setting up a _VENDOR_ variable in the Makefile + * Values that might be used: + * #define VN_PREFIX "jni" (JNI: OS Acces API) + * #define VN_PREFIX "ibm" (IBM: OS Acces API) + * #define VN_PREFIX "" (Mellanox: VAPI) + * #define VN_PREFIX "" (Intel: IB Common API) + */ +#if defined(_VENDOR_JNI_) +#define VN_PREFIX "jni" +#elif defined(_VENDOR_MELLANOX_) +#define VN_PREFIX "" +#elif defined(_VENDOR_IBAL_) +#define VN_PREFIX "" +#elif defined(_VENDOR_IBM_) +#define VN_PREFIX "ibm" +#else +#define VN_PREFIX "" +#error "Must define _VENDOR_= in Makefile" +#endif diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/Makefile.cygwin b/branches/Ndi/ulp/dapl/dapl/udapl/Makefile.cygwin new file mode 100644 index 00000000..7d1cbbc9 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/Makefile.cygwin @@ -0,0 +1,396 @@ +# +# Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under either one of the following two licenses: +# +# 1) under the terms of the "Common Public License 1.0" a copy of which is +# in the file LICENSE.txt in the root directory. The license is also +# available from the Open Source Initiative, see +# http://www.opensource.org/licenses/cpl.php. +# OR +# +# 2) under the terms of the "The BSD License" a copy of which is in the file +# LICENSE2.txt in the root directory. The license is also available from +# the Open Source Initiative, see +# http://www.opensource.org/licenses/bsd-license.php. +# +# Licensee has the right to choose either one of the above two licenses. +# +# Redistributions of source code must retain both the above copyright +# notice and either one of the license notices. +# +# Redistributions in binary form must reproduce both the above copyright +# notice, either one of the license notices in the documentation +# and/or other materials provided with the distribution. +# + +#********************************************************************** +# +# MODULE: Makefile +# +# PURPOSE: Makefile for dapl reference provider for CYGWIN environment +# +#*********************************************************************/ + + +############################################################## +# Application variables +# + +CP = cp -p -u +AS = $(CROSS_COMPILE)as +LD = $(CROSS_COMPILE)link.exe +CC = $(CROSS_COMPILE)cl.exe +CPP = $(CC) +AR = $(CROSS_COMPILE)ar +NM = $(CROSS_COMPILE)nm +STRIP = $(CROSS_COMPILE)strip +OBJCOPY = $(CROSS_COMPILE)objcopy +OBJDUMP = $(CROSS_COMPILE)objdump +RANLIB = $(CROSS_COMPILE)ranlib +MKDIR = mkdir -p +SED = /bin/sed +SHELL = /bin/sh + +TOPDIR = . + +COMMON = $(TOPDIR)/../common +WINDOWS = $(TOPDIR)/windows + +ifeq ($(VERBS),mellanox) +PROVIDER = $(TOPDIR)/../tavor +PROVIDER_INCDIRS = $(MTHOME)/include +else +PROVIDER = $(TOPDIR)/../torrent +PROVIDER_INCDIRS := ../include/ib/IBM \ + ../include/ib/IBM/us +endif + +OBJ_DIR = $(TOPDIR)/Obj +TARGET_DIR = $(TOPDIR)/Target + +SRCDIRS := \ + $(TOPDIR) \ + $(COMMON) \ + $(WINDOWS) \ + $(PROVIDER) + +INCDIRS := \ + $(SRCDIRS) \ + ../include \ + ../../dat/include \ + $(PROVIDER_INCDIRS) + +vpath %.c . ${SRCDIRS} +vpath %.h . ${INCDIRS} + + +################################################## +# targets +TARLIBS = dapl +TARSHLIBS = dapl + +# data for user libraries +dapl_SOURCES = $(COMMON_SRCS) $(WIN_SRCS) $(PROVIDER_SRCS) $(UDAPL_SRCS) + + +ifeq ($(VERBS),mellanox) +PROVIDER_SRCS = dapl_tavor_util.c dapl_tavor_qp.c dapl_tavor_cm.c +else +PROVIDER_SRCS = dapl_torrent_util.c dapl_torrent_qp.c dapl_torrent_cm.c +endif + +UDAPL_SRCS = dapl_init.c dapl_name_service.c + +WIN_SRCS = dapl_osd.c + +COMMON_SRCS = dapl_cookie.c \ + dapl_cr_accept.c \ + dapl_cr_query.c \ + dapl_cr_reject.c \ + dapl_cr_util.c \ + dapl_cr_callback.c \ + dapl_cr_handoff.c \ + dapl_ep_connect.c \ + dapl_ep_create.c \ + dapl_ep_disconnect.c \ + dapl_ep_dup_connect.c \ + dapl_ep_free.c \ + dapl_ep_get_status.c \ + dapl_ep_modify.c \ + dapl_ep_post_rdma_read.c \ + dapl_ep_post_rdma_write.c \ + dapl_ep_post_recv.c \ + dapl_ep_post_send.c \ + dapl_ep_query.c \ + dapl_ep_util.c \ + dapl_evd_create.c \ + dapl_evd_dequeue.c \ + dapl_evd_disable.c \ + dapl_evd_enable.c \ + dapl_evd_free.c \ + dapl_evd_modify_cno.c \ + dapl_evd_post_se.c \ + dapl_evd_query.c \ + dapl_evd_resize.c \ + dapl_evd_wait.c \ + dapl_evd_util.c \ + dapl_evd_cq_async_error_callb.c \ + dapl_evd_qp_async_error_callb.c \ + dapl_evd_un_async_error_callb.c \ + dapl_evd_connection_callb.c \ + dapl_evd_dto_callb.c \ + dapl_evd_set_unwaitable.c \ + dapl_evd_clear_unwaitable.c \ + dapl_extension_util.c \ + dapl_get_consumer_context.c \ + dapl_get_handle_type.c \ + dapl_hash.c \ + dapl_hca_util.c \ + dapl_ia_close.c \ + dapl_ia_open.c \ + dapl_ia_query.c \ + dapl_ia_util.c \ + dapl_llist.c \ + dapl_lmr_create.c \ + dapl_lmr_free.c \ + dapl_lmr_query.c \ + dapl_lmr_util.c \ + dapl_mr_util.c \ + dapl_provider.c \ + dapl_sp_util.c \ + dapl_psp_create.c \ + dapl_psp_free.c \ + dapl_psp_query.c \ + dapl_pz_create.c \ + dapl_pz_free.c \ + dapl_pz_query.c \ + dapl_pz_util.c \ + dapl_rmr_create.c \ + dapl_rmr_free.c \ + dapl_rmr_bind.c \ + dapl_rmr_query.c \ + dapl_rmr_util.c \ + dapl_rsp_create.c \ + dapl_rsp_free.c \ + dapl_rsp_query.c \ + dapl_cno_create.c \ + dapl_cno_modify_agent.c \ + dapl_cno_free.c \ + dapl_cno_wait.c \ + dapl_cno_query.c \ + dapl_cno_util.c \ + dapl_set_consumer_context.c \ + dapl_ring_buffer_util.c \ + dapl_debug.c + + + +#################################################### +# compiler options CFLAGS +# + +# common flags +UOPTIONS += /nologo /MDd /W3 /GX /Od /FD /GZ /Gm /Zi + +# common defines +UCOMDEFS += /D "_WINDOWS" /D "_MBCS" /D "_USRDLL" /D "WIN32" /D "_DEBUG" \ + -D_WIN32_WINNT=0x0500 -DWINVER=0x0500 +# other options: /FR /Fd + +# private defines +UPRIVDEFS += /D "__WIN__" /D "__MSC__" /D "__i386__" + +CFLAGS += $(UOPTIONS) $(UCOMDEFS) $(UPRIVDEFS) + +# +# Provider specific CFLAGS definition +# + +CFLAGS += -DDAPL_DBG + +ifeq ($(VERBS),mellanox) +CFLAGS += -DSMR_BUSTED -DNO_NAME_SERVICE -DCM_BUSTED # -DPOLLING_COMPLETIONS # -DMW_BUSTED +CFLAGS += -DMTL_MODULE=M_dapl # -DMAX_TRACE=8 -DMAX_DEBUG=8 -DMAX_ERROR=8 +else +CFLAGS += -DNO_NAME_SERVICE # -DCM_BUSTED +endif + +########################################################### +# common included libraries +# +ULDLIBS += kernel32 user32 gdi32 winspool \ + comdlg32 advapi32 shell32 ole32 oleaut32 \ + uuid odbc32 odbccp32 Ws2_32 dat + +# +# Provider specific included libraries +# +ifeq ($(VERBS),mellanox) +ULDLIBS += vapi mtl_common mosal mtib \ + mpga vapi_common mtgetopt +else +ULDLIBS += VerbsLibrary +endif + + + +######################################################### +# link options LDFLAGS +# + +MTARFLAGS= -cr + +TARFLAGS += cr + +# common flags +ULDOPTIONS += /nologo /incremental:no /machine:I386 /debug + +# common directories +ULDDIRS += /LIBPATH:"$(OBJ_DIR)" +ULDDIRS += /LIBPATH:"$(TOPDIR)/../../dat/udat/Target" + +# module entry +ULDENTRY = /entry:DllMain + +# specific DLL flags +ifndef NO_DEF_FILE +USE_DEF_FILE = /def:$(WINDOWS)/dapl_win.def +endif + +ifndef NO_LIB_FILE +USE_LIB_FILE = $(@:%.dll=/implib:%.lib) +endif + +ifndef NO_PDB_FILE +USE_PDB_FILE = $(@:%.dll=/PDB:%.pdb) +endif + +DLLFLAGS += $(USE_DEF_FILE) $(USE_LIB_FILE) $(USE_PDB_FILE) + +# DLL flags +UDLLFLAGS += /dll $(DLLFLAGS) + +LDFLAGS += $(ULDOPTIONS) $(ULDENTRY) $(ULDDIRS) $(ULDLIBS:%=%.lib) + +# +# Provider specific ULDFLAGS +# +LDFLAGS += /LIBPATH:"$(MTHOME)/lib" + +# user DLL +LDSHFLAGS += $(LDFLAGS) $(UDLLFLAGS) + + + +############################################################# +# Local functions +# +bsndir = $(notdir $(basename $1)) + +############################################################ +# Common rules +# +define COMPILE +$(CC) -c $(strip ${CFLAGS}) $(strip $(INCDIRS:%=-I%)) $(EXTRA_CFLAGS) $($(@:${OBJ_DIR}/%.obj=%.c_CFLAGS)) /Fo"$@" $< +endef + +define DEF_SET_VAR_SRCS +@echo "$@_VAR_SRCS += $($(basename $(call bsndir,$@))_SOURCES)" >> $@ +endef + +define DEF_SET_VAR_OBJS +@echo "$@_VAR_OBJS += $($(basename $(call bsndir,$@))_OBJECTS)" >> $@ +endef + + + +########################################################################### +# Start rules +# + +all: $(TARSHLIBS:%=${TARGET_DIR}/%.dll) $(TAROBJS:%=${OBJ_DIR}/%.obj) $(TARLIBS:%=${TARGET_DIR}/%.lib) + + +########################################################################## +# Simple objects (user) + +$(TAROBJS:%=${OBJ_DIR}/%.obj): ${OBJ_DIR}/%.obj: %.c + @if [ ! -d $(OBJ_DIR) ]; then mkdir -p $(OBJ_DIR); fi + $(COMPILE) + +$(OBJ_DIR)/%.obj: %.c + $(COMPILE) + + +########################################################################## +# Static libraries +# +$(TARLIBS:%=$(TARGET_DIR)/%.lib): % : %.mk +$(TARLIBS:%=$(TARGET_DIR)/%.lib.mk): Makefile.cygwin + @if [ ! -d $(OBJ_DIR) ]; then mkdir -p $(OBJ_DIR); fi + @if [ ! -d $(TARGET_DIR) ]; then mkdir -p $(TARGET_DIR); fi + @echo "# Do not edit. Automatically generated file." > $@ + @ + @${DEF_SET_VAR_OBJS} + @${DEF_SET_VAR_SRCS} + @ + @echo "SOURCES += \$$($@_VAR_SRCS)" >> $@ + @ + @echo "$(@:%.mk=%): \$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj) " >> $@ + @echo "$(@:%.mk=%): \$$($@_VAR_OBJS:%.c=$(OBJ_DIR)/%.obj) " >> $@ + @echo -e "\t\$$(AR) \$$(MTARFLAGS) \$$@ \c" >> $@ + @echo -e "\$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj) \c" >> $@ + @echo "\$$($@_VAR_OBJS) \$$(\$$(@:$(OBJ_DIR)/%.lib=%)_ARFLAGS) " >> $@ + @echo -e "\t\$$(RANLIB) \$$@" >> $@ + + +ifneq ($(MAKECMDGOALS), clean) +ifneq ($(strip $(TARLIBS)),) +-include $(patsubst %,$(OBJ_DIR)/%.lib.mk,$(TARLIBS)) +endif +endif + + +########################################################################## +# Shared libraries +# +$(TARSHLIBS:%=$(TARGET_DIR)/%.dll): % : %.mk +$(TARSHLIBS:%=$(TARGET_DIR)/%.dll.mk): Makefile.cygwin + @if [ ! -d $(OBJ_DIR) ]; then mkdir -p $(OBJ_DIR); fi + @if [ ! -d $(TARGET_DIR) ]; then mkdir -p $(TARGET_DIR); fi + @echo "# Do not edit. Automatically generated file." > $@ + @ + @${DEF_SET_VAR_OBJS} + @${DEF_SET_VAR_SRCS} + @ + @echo "SOURCES += \$$($@_VAR_SRCS)" >> $@ + @ + @echo "$(@:%.mk=%): \$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj)" >> $@ + @echo "$(@:%.mk=%): \$$($@_VAR_OBJS:%.c=$(OBJ_DIR)/%.obj)" >> $@ + @echo -e "\t\$$(LD) \$$(LDSHFLAGS) /out:\"\$$@\" \c" >> $@ + @echo -e "\$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj) \c" >> $@ + @echo -e "\$$($@_VAR_OBJS) \c" >> $@ + @echo -e "\$$(LDSHLIBS:%=%) \$$(LIBSHDIRS:%=/LIBPATH:%) \c" >> $@ + + +ifneq ($(MAKECMDGOALS), clean) +ifneq ($(strip $(TARSHLIBS)),) +-include $(patsubst %,$(TARGET_DIR)/%.dll.mk,$(TARSHLIBS)) +endif +endif + + +########################################################################## +# Clean rules +# +CLEANDIRS = $(OBJ_DIR) $(TARGET_DIR) + +CLEANFILES = *.obj *.dll *.lib *.sys *.pdb *.idb *.exp *.ilk *.sbr *.mk + +clean: $(CLEANDIRS) + @echo deleting dump files at $(shell pwd) + @rm -f $(CLEANFILES) + @if [ -d $(OBJ_DIR) ] ; then rm -f $(CLEANFILES:%=$(OBJ_DIR)/%); fi + @if [ -d $(TARGET_DIR) ] ; then rm -f $(CLEANFILES:%=$(TARGET_DIR)/%); fi + diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/Makefile.org b/branches/Ndi/ulp/dapl/dapl/udapl/Makefile.org new file mode 100644 index 00000000..49c880db --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/Makefile.org @@ -0,0 +1,200 @@ + # + # Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + # + # This Software is licensed under either one of the following two licenses: + # + # 1) under the terms of the "Common Public License 1.0" a copy of which is + # in the file LICENSE.txt in the root directory. The license is also + # available from the Open Source Initiative, see + # http://www.opensource.org/licenses/cpl.php. + # OR + # + # 2) under the terms of the "The BSD License" a copy of which is in the file + # LICENSE2.txt in the root directory. The license is also available from + # the Open Source Initiative, see + # http://www.opensource.org/licenses/bsd-license.php. + # + # Licensee has the right to choose either one of the above two licenses. + # + # Redistributions of source code must retain both the above copyright + # notice and either one of the license notices. + # + # Redistributions in binary form must reproduce both the above copyright + # notice, either one of the license notices in the documentation + # and/or other materials provided with the distribution. + # + +#********************************************************************** +# +# MODULE: Makefile +# +# PURPOSE: Makefile for dapl reference provider +# +#*********************************************************************/ + +TOPDIR = $(shell /bin/pwd) + +SRC_PATH = $(TOPDIR) +COMMON = $(TOPDIR)/../common +LINUX = $(TOPDIR)/../udapl/linux +IBA_HOME = ../../.. +PROVIDER = $(TOPDIR)/../ibal +VERBS=ibal + +# +# Set DAPL_EXPOSE_HCA_PORTS to 1 if you want to expose HCA ports as real +# names in the dat registry. This results in 3 entries for each HCA: the +# basename, port 1 and port 2. For example, if my basename is "foo" the +# result will be "foo", "fooa", and "foob". "foo" and "fooa" are +# synonyms, they even share the same data structures. +# NOTE: If DAT is using the static registry, you will need to update +# dat.conf with entries for the ports; "fooa" and "foob" in our example. +# +DAPL_EXPOSE_HCA_PORTS=0 + +SO_TARGET = libdapl.so.0.0 +SO_NAME = libdapl.so +L_TARGET := libdapl.a + +# +# CFLAGS definition +# +EXTRA_CFLAGS = -O $(CPPFLAGS) +ifeq ($(BLD),debug) +EXTRA_CFLAGS += -DDAPL_DBG +endif +EXTRA_CFLAGS += -DIBAL -DOLD_QP_STATE_TO_INIT # -DNO_NAME_SERVICE # +EXTRA_CFLAGS += -I. +EXTRA_CFLAGS += -I.. +EXTRA_CFLAGS += -I../../dat/include +EXTRA_CFLAGS += -I../include +EXTRA_CFLAGS += -I$(PROVIDER) +EXTRA_CFLAGS += -I../udapl/linux +EXTRA_CFLAGS += -I../common +EXTRA_CFLAGS += -Wall +EXTRA_CFLAGS += -Wstrict-prototypes +#EXTRA_CFLAGS += -Wmissing-prototypes +#EXTRA_CFLAGS += -Wmissing-declarations +EXTRA_CFLAGS += -Werror +EXTRA_CFLAGS += -g3 +ifdef GPROF +EXTRA_CFLAGS += -pg +endif +EXTRA_CFLAGS += -D_VENDOR_IBAL_ + +ifeq ($(DAPL_EXPOSE_HCA_PORTS),1) +EXTRA_CFLAGS += -DDAPL_EXPOSE_HCA_PORTS +endif + + +# +# EXTRA_LDFLAGS definition +# +EXTRA_LDFLAGS += -init dapl_init +EXTRA_LDFLAGS += -fini dapl_fini +EXTRA_LDFLAGS += -L${IBA_HOME}/al -R${IBA_HOME}/al +EXTRA_LDFLAGS += -L${IBA_HOME}/complib -R${IBA_HOME}/complib +EXTRA_LDFLAGS += -lallib +EXTRA_LDFLAGS += -lcomplib +EXTRA_LDFLAGS += -ldl -lpthread + +PROVIDER_SRCS = dapl_ibal_util.c dapl_ibal_qp.c dapl_ibal_cm.c dapl_ibal_mrdb.c +VPATH = $(SRC_PATH) $(COMMON) $(LINUX) $(PROVIDER) + +UDAPL_SRCS = dapl_init.c dapl_name_service.c dapl_timer_util.c + +COMMON_SRCS = dapl_cookie.c \ + dapl_cr_accept.c \ + dapl_cr_query.c \ + dapl_cr_reject.c \ + dapl_cr_util.c \ + dapl_cr_callback.c \ + dapl_cr_handoff.c \ + dapl_ep_connect.c \ + dapl_ep_create.c \ + dapl_ep_disconnect.c \ + dapl_ep_dup_connect.c \ + dapl_ep_free.c \ + dapl_ep_get_status.c \ + dapl_ep_modify.c \ + dapl_ep_post_rdma_read.c \ + dapl_ep_post_rdma_write.c \ + dapl_ep_post_recv.c \ + dapl_ep_post_send.c \ + dapl_ep_query.c \ + dapl_ep_reset.c \ + dapl_ep_util.c \ + dapl_evd_create.c \ + dapl_evd_dequeue.c \ + dapl_evd_disable.c \ + dapl_evd_enable.c \ + dapl_evd_free.c \ + dapl_evd_modify_cno.c \ + dapl_evd_post_se.c \ + dapl_evd_query.c \ + dapl_evd_resize.c \ + dapl_evd_wait.c \ + dapl_evd_util.c \ + dapl_evd_cq_async_error_callb.c \ + dapl_evd_qp_async_error_callb.c \ + dapl_evd_un_async_error_callb.c \ + dapl_evd_connection_callb.c \ + dapl_evd_dto_callb.c \ + dapl_evd_set_unwaitable.c \ + dapl_evd_clear_unwaitable.c \ + dapl_get_consumer_context.c \ + dapl_get_handle_type.c \ + dapl_hash.c \ + dapl_hca_util.c \ + dapl_ia_close.c \ + dapl_ia_open.c \ + dapl_ia_query.c \ + dapl_ia_util.c \ + dapl_llist.c \ + dapl_lmr_create.c \ + dapl_lmr_free.c \ + dapl_lmr_query.c \ + dapl_lmr_util.c \ + dapl_mr_util.c \ + dapl_provider.c \ + dapl_sp_util.c \ + dapl_psp_create.c \ + dapl_psp_create_any.c \ + dapl_psp_free.c \ + dapl_psp_query.c \ + dapl_pz_create.c \ + dapl_pz_free.c \ + dapl_pz_query.c \ + dapl_pz_util.c \ + dapl_rmr_create.c \ + dapl_rmr_free.c \ + dapl_rmr_bind.c \ + dapl_rmr_query.c \ + dapl_rmr_util.c \ + dapl_rsp_create.c \ + dapl_rsp_free.c \ + dapl_rsp_query.c \ + dapl_cno_create.c \ + dapl_cno_modify_agent.c \ + dapl_cno_free.c \ + dapl_cno_wait.c \ + dapl_cno_query.c \ + dapl_cno_util.c \ + dapl_set_consumer_context.c \ + dapl_ring_buffer_util.c \ + dapl_debug.c + +LINUX_SRCS = dapl_osd.c + +UDAPL_OBJS = $(UDAPL_SRCS:%.c=%.o) +COMMON_OBJS = $(COMMON_SRCS:%.c=%.o) +LINUX_OBJS = $(LINUX_SRCS:%.c=%.o) +PROVIDER_OBJS = $(PROVIDER_SRCS:%.c=%.o) + +DAPL_OBJS = $(UDAPL_OBJS) $(COMMON_OBJS) $(LINUX_OBJS) $(PROVIDER_OBJS) + +S_OBJS = $(DAPL_OBJS) +L_OBJS = $(S_OBJS) + +include $(IBA_HOME)/Makefile.config +include $(IBA_HOME)/Makefile.rules diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/Makefile.orig b/branches/Ndi/ulp/dapl/dapl/udapl/Makefile.orig new file mode 100644 index 00000000..f98ef9d0 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/Makefile.orig @@ -0,0 +1,305 @@ +# +# Copyright (c) 2002, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under the terms of the "IBM Common Public +# License 1.0" a copy of which is in the file LICENSE.txt in the +# root directory. The license is also available from the Open Source +# Initiative, see http://www.opensource.org/licenses/ibmpl.html. +# +# + +#********************************************************************** +# +# MODULE: Makefile +# +# PURPOSE: Makefile for dapl reference provider +# +# $Id$ +#*********************************************************************/ + +TOPDIR = $(shell /bin/pwd) + +SRC_PATH = $(TOPDIR) +OBJ_PATH = $(TOPDIR)/Obj +TARGET_PATH = $(TOPDIR)/Target + +TARGET = $(TARGET_PATH)/libdapl.so +STATIC = $(TARGET_PATH)/libdapl.a +COMMON = $(TOPDIR)/../common +LINUX = $(TOPDIR)/../udapl/linux + +CC = gcc + +# +# Set DAPL_EXPOSE_HCA_PORTS to 1 if you want to expose HCA ports as real +# names in the dat registry. This results in 3 entries for each HCA: the +# basename, port 1 and port 2. For example, if my basename is "foo" the +# result will be "foo", "fooa", and "foob". "foo" and "fooa" are +# synonyms, they even share the same data structures. +# NOTE: If DAT is using the static registry, you will need to update +# dat.conf with entries for the ports; "fooa" and "foob" in our example. +# +DAPL_EXPOSE_HCA_PORTS=0 + + +# +# CFLAGS definition +# + +CFLAGS = -O $(CPPFLAGS) -DDAPL_DBG +ifeq ($(VERBS),mellanox) +DAPL_IBLIB_DIR = /usr/mellanox/lib +PROVIDER = $(TOPDIR)/../vapi +CFLAGS += -DVAPI -DSMR_BUSTED -DNO_NAME_SERVICE -DOLD_QP_STATE_TO_INIT # -DCM_BUSTED # -DMW_BUSTED +else +ifeq ($(VERBS),ibal) +DAPL_IBLIB_DIR = $(LD_LIBRARY_PATH) +KERVER =$(shell uname -r) +PROVIDER = $(TOPDIR)/../ibal +CFLAGS += -DIBAL -DOLD_QP_STATE_TO_INIT # -DNO_NAME_SERVICE +else +PROVIDER = $(TOPDIR)/../ibapi +CFLAGS += -DIBAPI #-DNO_NAME_SERVICE # -DCM_BUSTED +endif +endif + +CFLAGS += -I. +CFLAGS += -I.. +CFLAGS += -I../../dat/include +CFLAGS += -I../include + +ifeq ($(VERBS),mellanox) +CFLAGS += -I/usr/mellanox/include +#CFLAGS += -I../include/ib/MELLANOX +else +ifeq ($(VERBS),ibal) +CFLAGS += -I../include/ib/IBAL/ +CFLAGS += -I../include/ib/IBAL/iba +CFLAGS += -I/usr/src/linux-$(KERVER)/include +CFLAGS += -DCONFIG_X86 +else +CFLAGS += -I../include/ib/IBM +CFLAGS += -I../include/ib/IBM/us +endif +endif + +CFLAGS += -I$(PROVIDER) +CFLAGS += -I../udapl/linux +CFLAGS += -I../common +CFLAGS += -Wall +CFLAGS += -Wstrict-prototypes +CFLAGS += -Wmissing-prototypes +CFLAGS += -Wmissing-declarations +CFLAGS += -Werror +CFLAGS += -g3 +ifdef GPROF +CFLAGS += -pg +endif + +LD = ld + +# +# LDFLAGS definition +# +LDFLAGS = -shared +LDFLAGS += -lpthread +LDFLAGS += -init dapl_init +LDFLAGS += -fini dapl_fini + +AR = ar +# +# ARFLAGS definition +# +ARFLAGS = r + +# +# To build with IBM verbs: make VERBS=ibm +# To build with JNI verbs: make VERBS=jni [default] +# To build with PSC verbs: make VERBS=psc +# +# Verb libraries should be in /usr/lib +# +# +# Vendor string, _VENDOR_: define for HCA vendor. If you +# add a new vendor name, you must add a new prefix to dev_name_prefix in +# dapl_init.c +# + +# Allow specification in the environment of a location for the +# verbs library, to allow running DAPL without installing +# the verbs library in /usr/lib. For development. +ifneq (${DAPL_IBLIB_DIR},) +LDFLAGS += -L${DAPL_IBLIB_DIR} -R${DAPL_IBLIB_DIR} +endif + +ifeq ($(VERBS),ibm) +CFLAGS += -D_VENDOR_IBM_ +CFLAGS += -D_IBM -DIBM +LDFLAGS += -lIBusd +LDFLAGS += -lIBlueHCAd +LDFLAGS += -ldl +PROVIDER_SRCS = dapl_ibapi_util.c dapl_ibapi_qp.c dapl_ibapi_cm.c +else +ifeq ($(VERBS),mellanox) +CFLAGS += -D_VENDOR_MELLANOX_ +CFLAGS += -DMTL_MODULE=M_dapl -DMAX_TRACE=8 -DMAX_DEBUG=8 -DMAX_ERROR=8 +PROVIDER_SRCS = dapl_vapi_util.c dapl_vapi_qp.c dapl_vapi_cm.c +LDFLAGS += -lvapi +LDFLAGS += -lmpga +LDFLAGS += -lmtl_common +LDFLAGS += -lcm +else +ifeq ($(VERBS),ibal) +CFLAGS += -D_VENDOR_IBAL_ +PROVIDER_SRCS = dapl_ibal_util.c dapl_ibal_qp.c dapl_ibal_cm.c +PROVIDER_SRCS += dapl_ibal_mrdb.c +LDFLAGS += -lallib +LDFLAGS += -lcomplib +LDFLAGS += -ldl +else +CFLAGS += -D_VENDOR_JNI_ # -DIPOIB_NAMING +CFLAGS += -DJNI # -DPOLLING_COMPLETIONS +LDFLAGS += -lJniTavorVerbs +LDFLAGS += -lpthread +PROVIDER_SRCS = dapl_ibapi_util.c dapl_ibapi_qp.c dapl_ibapi_cm.c +endif +endif +endif + +ifeq ($(DAPL_EXPOSE_HCA_PORTS),1) +CFLAGS += -DDAPL_EXPOSE_HCA_PORTS +endif + +UDAPL_SRCS = dapl_init.c dapl_name_service.c dapl_timer_util.c + +COMMON_SRCS = dapl_cookie.c \ + dapl_cr_accept.c \ + dapl_cr_query.c \ + dapl_cr_reject.c \ + dapl_cr_util.c \ + dapl_cr_callback.c \ + dapl_cr_handoff.c \ + dapl_ep_connect.c \ + dapl_ep_create.c \ + dapl_ep_disconnect.c \ + dapl_ep_dup_connect.c \ + dapl_ep_free.c \ + dapl_ep_get_status.c \ + dapl_ep_modify.c \ + dapl_ep_post_rdma_read.c \ + dapl_ep_post_rdma_write.c \ + dapl_ep_post_recv.c \ + dapl_ep_post_send.c \ + dapl_ep_query.c \ + dapl_ep_reset.c \ + dapl_ep_util.c \ + dapl_evd_create.c \ + dapl_evd_dequeue.c \ + dapl_evd_disable.c \ + dapl_evd_enable.c \ + dapl_evd_free.c \ + dapl_evd_modify_cno.c \ + dapl_evd_post_se.c \ + dapl_evd_query.c \ + dapl_evd_resize.c \ + dapl_evd_wait.c \ + dapl_evd_util.c \ + dapl_evd_cq_async_error_callb.c \ + dapl_evd_qp_async_error_callb.c \ + dapl_evd_un_async_error_callb.c \ + dapl_evd_connection_callb.c \ + dapl_evd_dto_callb.c \ + dapl_extension_util.c \ + dapl_get_consumer_context.c \ + dapl_get_handle_type.c \ + dapl_hash.c \ + dapl_hca_util.c \ + dapl_ia_close.c \ + dapl_ia_open.c \ + dapl_ia_query.c \ + dapl_ia_util.c \ + dapl_llist.c \ + dapl_lmr_create.c \ + dapl_lmr_free.c \ + dapl_lmr_query.c \ + dapl_lmr_util.c \ + dapl_mr_util.c \ + dapl_provider.c \ + dapl_sp_util.c \ + dapl_psp_create.c \ + dapl_psp_create_any.c \ + dapl_psp_free.c \ + dapl_psp_query.c \ + dapl_pz_create.c \ + dapl_pz_free.c \ + dapl_pz_query.c \ + dapl_pz_util.c \ + dapl_rmr_create.c \ + dapl_rmr_free.c \ + dapl_rmr_bind.c \ + dapl_rmr_query.c \ + dapl_rmr_util.c \ + dapl_rsp_create.c \ + dapl_rsp_free.c \ + dapl_rsp_query.c \ + dapl_cno_create.c \ + dapl_cno_modify_agent.c \ + dapl_cno_free.c \ + dapl_cno_wait.c \ + dapl_cno_query.c \ + dapl_cno_util.c \ + dapl_set_consumer_context.c \ + dapl_ring_buffer_util.c + +LINUX_SRCS = dapl_osd.c + +SRCS = $(UDAPL_SRCS) $(COMMON_SRCS) $(LINUX_SRCS) $(PROVIDER_SRCS) + + +UDAPL_OBJS = $(UDAPL_SRCS:%.c=$(OBJ_PATH)/%.o) +COMMON_OBJS = $(COMMON_SRCS:%.c=$(OBJ_PATH)/%.o) +LINUX_OBJS = $(LINUX_SRCS:%.c=$(OBJ_PATH)/%.o) +PROVIDER_OBJS = $(PROVIDER_SRCS:%.c=$(OBJ_PATH)/%.o) + +OBJS = $(UDAPL_OBJS) $(COMMON_OBJS) $(LINUX_OBJS) $(PROVIDER_OBJS) + + +all: mkdirs $(TARGET) $(STATIC) + +mkdirs: + @[ -d $(TARGET_PATH) ] || /bin/mkdir -p $(TARGET_PATH) + @[ -d $(OBJ_PATH) ] || /bin/mkdir -p $(OBJ_PATH) + +$(UDAPL_OBJS): $(OBJ_PATH)/%.o : %.c + @echo Compiling $< + $(CC) $(CFLAGS) -c $< -o $@ + +$(COMMON_OBJS): $(OBJ_PATH)/%.o : $(COMMON)/%.c + @echo Compiling $< + $(CC) $(CFLAGS) -c $< -o $@ + +$(LINUX_OBJS): $(OBJ_PATH)/%.o : $(LINUX)/%.c + @echo Compiling $< + $(CC) $(CFLAGS) -c $< -o $@ + +$(PROVIDER_OBJS): $(OBJ_PATH)/%.o : $(PROVIDER)/%.c + @echo Compiling $< + $(CC) $(CFLAGS) -c $< -o $@ + +$(TARGET): $(OBJS) + @echo Linking $(TARGET) + $(LD) $(LDFLAGS) $^ -o $(TARGET) + +$(STATIC): $(OBJS) + @echo Archiving $(STATIC) + @$(AR) $(ARFLAGS) $(STATIC) $^ + +clean: + rm -f $(OBJS) + rm -f $(TARGET) $(STATIC) + +tidy: + rm -f *~ + rm -f ../common/*~ + rm -f ../include/*~ + rm -f linux/*~ diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/SOURCES b/branches/Ndi/ulp/dapl/dapl/udapl/SOURCES new file mode 100644 index 00000000..a8db4403 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/SOURCES @@ -0,0 +1,39 @@ +!if $(FREEBUILD) +TARGETNAME=dapl +!else +TARGETNAME=dapld +!endif +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=DYNLINK +DLLENTRY=_DllMainCRTStartup +DLLDEF=$O\udapl_exports.def +USE_CRTDLL=1 + +SOURCES=\ + dapl_init.c \ + dapl_name_service.c \ + dapl_timer_util.c \ + udapl_sources.c + +INCLUDES=..\include;..\common;windows;..\ibal;..\..\dat\include;\ + ..\..\..\..\inc;..\..\..\..\inc\user; + +USER_C_FLAGS=$(USER_C_FLAGS) -DEXPORT_DAPL_SYMBOLS -D_VENDOR_IBAL_ -DDAPL_MERGE_CM_DTO +!if !$(FREEBUILD) +USER_C_FLAGS=$(USER_C_FLAGS) -DDAPL_DBG +!endif + +TARGETLIBS= \ + $(SDK_LIB_PATH)\kernel32.lib \ + $(SDK_LIB_PATH)\ws2_32.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\dat.lib \ + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\datd.lib \ + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/dapl_init.c b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_init.c new file mode 100644 index 00000000..856be598 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_init.c @@ -0,0 +1,318 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_init.c + * + * PURPOSE: Interface Adapter management + * Description: Interfaces in this file are completely described in + * the DAPL 1.1 API, Chapter 6, section 2 + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_hca_util.h" +#include "dapl_init.h" +#include "dapl_provider.h" +#include "dapl_mr_util.h" +#include "dapl_osd.h" /* needed for g_daplDebugLevel */ +#include "dapl_adapter_util.h" +#include "dapl_name_service.h" +#include "dapl_timer_util.h" +#include "dapl_vendor.h" + + +/* + * If DAPL_EXPOSE_HCA_PORTS is defined then extra entries are placed + * in the registry for each port of the HCA. For example, without + * this option only "jni0" will be registered; but with it, you will + * get "jni0a" and "jni0b". + */ +/* + * DHCA_PORT_COUNT is the number of ports on an HCA. If you are not + * exposing the ports, this is always 1 + */ +#ifdef DAPL_EXPOSE_HCA_PORTS +#define DHCA_PORT_COUNT 2 +#else +#define DHCA_PORT_COUNT 1 +#endif /* DAPL_EXPOSE_HCA_PORTS */ + +extern int32_t dapl_ib_init_complete; + +/* + * dapl_init + * + * initialize this provider + * includes initialization of all global variables + * as well as registering all supported IAs with the dat registry + * + * This function needs to be called once when the provider is loaded. + * + * Input: + * none + * + * Output: + * none + * + * Return Values: + */ +void +dapl_init ( void ) +{ + DAT_RETURN dat_status; + +#if defined(DAPL_DBG) + /* set up debug level */ + g_dapl_dbg_type = dapl_os_get_env_val ( "DAPL_DBG_TYPE", + DAPL_DBG_TYPE_ERR | DAPL_DBG_TYPE_WARN); + /* set up debug level */ + g_dapl_dbg_dest = dapl_os_get_env_val ( "DAPL_DBG_DEST", + DAPL_DBG_DEST_STDOUT); +#endif /* DAPL_DBG */ + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, "DAPL: Started (dapl_init)\n"); + + /* See if the user is on a loopback setup */ + g_dapl_loopback_connection = dapl_os_get_env_bool ( "DAPL_LOOPBACK" ); + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, + "--> %s Setting loopback\n", + g_dapl_loopback_connection ? "" : "NOT" ); + + /* initialize the timer */ + dapls_timer_init(); + + dat_status = dapls_ns_init (); + + if (DAT_SUCCESS != dat_status) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dapls_ns_init failed %d\n", dat_status); + goto bail; + } + + return; + +bail: + dapl_dbg_log (DAPL_DBG_TYPE_ERR, "ERROR: dapl_init failed\n"); + dapl_fini (); + return; +} + +/* + * dapl_fini + * + * finalize this provider + * includes freeing of all global variables + * as well as deregistering all supported IAs from the dat registry + * + * This function needs to be called once when the provider is loaded. + * + * Input: + * none + * + * Output: + * none + * + * Return Values: + */ +void +dapl_fini ( void ) +{ + DAT_RETURN dat_status; + + dapl_dbg_log (DAPL_DBG_TYPE_UTIL, "DAPL: Stopped (dapl_fini)\n"); + + dat_status = dapl_provider_list_destroy (); + if (DAT_SUCCESS != dat_status) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dapl_provider_list_destroy failed %d\n", dat_status); + } + + dapls_ib_release (); + + return; +} + + +/* + * + * This function is called by the registry to initialize a provider + * + * The instance data string is expected to have the following form: + * + * + * + */ +void +DAT_PROVIDER_INIT_FUNC_NAME ( + IN const DAT_PROVIDER_INFO *provider_info, + IN const char *instance_data ) +{ + DAT_PROVIDER *provider; + DAPL_HCA *hca_ptr; + DAT_RETURN dat_status; + char * data; + char * name; + char * port; + unsigned int len = 0; + unsigned int i; + + data = NULL; + provider = NULL; + hca_ptr = NULL; + + dapl_init(); + /* Initialize IB verbs library and provider list */ + if ( !dapl_ib_init_complete ) + { + dapls_ib_init (); + dapl_ib_init_complete = TRUE; + + /* initialize the provider list */ + dat_status = dapl_provider_list_create(); + if (DAT_SUCCESS != dat_status) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dapl_provider_list_create failed %d\n", dat_status); + goto bail; + } + } + + dat_status = dapl_provider_list_insert(provider_info->ia_name, &provider); + if ( DAT_SUCCESS != dat_status ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dat_provider_list_insert failed: %x\n", dat_status); + goto bail; + } + + data = dapl_os_strdup(instance_data); + if ( NULL == data ) + { + goto bail; + } + + len = dapl_os_strlen(data); + + for ( i = 0; i < len; i++ ) + { + if ( ' ' == data[i] ) + { + data[i] = '\0'; + break; + } + } + + /* if the instance data did not have a valid format */ + if ( i == len ) + { + goto bail; + } + + name = data; + port = data + (i + 1); + + hca_ptr = dapl_hca_alloc (name, port); + if ( NULL == hca_ptr ) + { + goto bail; + } + + provider->extension = hca_ptr; + + /* register providers with dat_registry */ + dat_status = dat_registry_add_provider (provider, provider_info); + if ( DAT_SUCCESS != dat_status ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dat_registry_add_provider failed: %x\n", dat_status); + goto bail; + } + + bail: + if ( NULL != data ) + { + dapl_os_free(data, len + 1); + } + + if ( DAT_SUCCESS != dat_status ) + { + if ( NULL != provider ) + { + (void) dapl_provider_list_remove(provider_info->ia_name); + } + + if ( NULL != hca_ptr ) + { + dapl_hca_free (hca_ptr); + } + } +} + + +/* + * + * This function is called by the registry to de-initialize a provider + * + */ +void +DAT_PROVIDER_FINI_FUNC_NAME ( + IN const DAT_PROVIDER_INFO *provider_info ) +{ + DAT_PROVIDER *provider; + DAT_RETURN dat_status; + + dat_status = dapl_provider_list_search(provider_info->ia_name, &provider); + if ( DAT_SUCCESS != dat_status ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dat_provider_list_search failed: %x\n", dat_status); + return; + } + + dat_status = dat_registry_remove_provider (provider, provider_info); + if ( DAT_SUCCESS != dat_status ) + { + dapl_dbg_log (DAPL_DBG_TYPE_ERR, + "dat_registry_remove_provider failed: %x\n", dat_status); + } + + (void) dapl_provider_list_remove(provider_info->ia_name); + dapl_fini(); +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/dapl_name_service.c b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_name_service.c new file mode 100644 index 00000000..1adcad58 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_name_service.c @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_name_service.c + * + * PURPOSE: Provide simple, file base name services in the absence + * of DNS hooks for a particular transport type. If an + * InfiniBand implementation supports IPoIB, this should + * not be used. + * + * Description: Interfaces in this file are completely described in + * dapl_name_service.h + * + * $Id$ + **********************************************************************/ + +/* + * Include files for setting up a network name + */ +#include "dapl.h" +#include "dapl_name_service.h" + +/* + * Prototypes + */ +#ifdef NO_NAME_SERVICE +DAT_RETURN dapli_ns_create_gid_map(void); + +DAT_RETURN dapls_ns_add_address( + IN DAPL_GID_MAP *gme); +#endif /* NO_NAME_SERVICE */ + +/* + * dapls_ns_init + * + * Initialize naming services + * + * Input: + * none + * + * Output: + * none + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapls_ns_init (void) +{ + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; +#ifdef NO_NAME_SERVICE + dat_status = dapli_ns_create_gid_map (); +#endif + + return dat_status; +} + + +#ifdef NO_NAME_SERVICE + +#define MAX_GID_ENTRIES 32 +DAPL_GID_MAP g_gid_map_table[MAX_GID_ENTRIES]; + +#ifdef _WIN32 +#define MAP_FILE "C:/WINDOWS/system32/drivers/etc/ibhosts" +#else +#define MAP_FILE "/etc/dapl/ibhosts" +#endif + + +/* + * dapli_ns_create_gid_map() + * + * Read /usr/local/etc/ibhosts to obtain host names and GIDs. + * Create a table containing IP addresses and GIDs which can + * be used for lookups. + * + * This implementation is a simple method providing name services + * when more advanced mechanisms do not exist. The proper way + * to obtain these mappings is to use a name service such as is + * provided by IPoIB on InfiniBand. + * + * Input: + * device_name Name of device as reported by the provider + * + * Output: + * none + * + * Returns: + * char * to string number + */ +DAT_RETURN +dapli_ns_create_gid_map (void) +{ + FILE *f; + GID gid; + char hostname[128]; + char tmp_buf[512]; + int rc = 0; + struct addrinfo *addr; + struct sockaddr_in *si; + DAPL_GID_MAP gmt; + + f = fopen (MAP_FILE, "r"); + if ( f == NULL ) + { + dapl_os_printf ("ERROR: Must have file <%s> for IP/GID mappings\n", + MAP_FILE); + return DAT_INTERNAL_ERROR; + } + dapl_os_memzero(tmp_buf,sizeof(tmp_buf)); + + while ( rc != EOF ) + { + if(feof(f)) + break; + + if (fgets(tmp_buf,sizeof(tmp_buf),f) != NULL) + { + if(tmp_buf[0] == '#' || tmp_buf[0] == '\n' || tmp_buf[0] == 0x20) + { + continue; + } + if ( (rc = sscanf ( tmp_buf, "%s " F64x " " F64x , hostname, &gid.gid_prefix, &gid.guid) ) != 3 ) + { + continue; + } + } + //rc = fscanf ( f, "%s " F64x " " F64x , hostname, &gid.gid_prefix, &gid.guid); + rc = dapls_osd_getaddrinfo (hostname, &addr); + + if ( rc != 0 ) + { + /* + * hostname not registered in DNS, provide a dummy value + */ + int err = WSAGetLastError(); + dapl_os_printf ("WARNING: <%s> not registered in DNS err = %u, using dummy IP value\n", + hostname, err); + /*dapl_os_memcpy(hca_ptr->hca_address.sa_data, "0x01020304", 4);*/ + gmt.ip_address = 0x01020304; + } + else + { + /* + * Load into the ip/gid mapping table + */ + si = (struct sockaddr_in *)addr->ai_addr; + if ( AF_INET == addr->ai_addr->sa_family ) + { + gmt.ip_address = si->sin_addr.s_addr; + } + else + { + dapl_dbg_log (DAPL_DBG_TYPE_WARN, + "WARNING: <%s> Address family not supported, using dummy IP value\n", + hostname); + gmt.ip_address = 0x01020304; + } + dapls_osd_freeaddrinfo (addr); + } + gmt.gid.gid_prefix = gid.gid_prefix; + gmt.gid.guid = gid.guid; + + dapls_ns_add_address (&gmt); + //rc = fscanf ( f, "%s " F64x " " F64x , hostname, &gid.gid_prefix, &gid.guid); + } + + fclose (f); + + return DAT_SUCCESS; +} + + +/* + * dapls_ns_add_address + * + * Add a table entry to the gid_map_table. + * + * Input: + * remote_ia_address remote IP address + * gid pointer to output gid + * + * Output: + * gid filled in GID + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapls_ns_add_address ( + IN DAPL_GID_MAP *gme) +{ + DAPL_GID_MAP *gmt; + int count; + + gmt = g_gid_map_table; + + for ( count = 0, gmt = g_gid_map_table; gmt->ip_address; gmt++ ) + { + count++; + } + + if ( count > MAX_GID_ENTRIES ) + { + return DAT_INSUFFICIENT_RESOURCES; + } + + *gmt = *gme; + + return DAT_SUCCESS; +} + + +/* + * dapls_ns_lookup_address + * + * Look up the provided IA_ADDRESS in the gid_map_table. Return + * the gid if found. + * + * Input: + * remote_ia_address remote IP address + * gid pointer to output gid + * + * Output: + * gid filled in GID + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dapls_ns_lookup_address ( + IN DAPL_IA *ia_ptr, + IN DAT_IA_ADDRESS_PTR remote_ia_address, + OUT GID *gid) +{ + DAPL_GID_MAP *gmt; + struct sockaddr_in *si; + + ia_ptr = ia_ptr; /* unused here */ + + si = (struct sockaddr_in *)remote_ia_address; + + for ( gmt = g_gid_map_table; gmt->ip_address; gmt++ ) + { + if (gmt->ip_address == si->sin_addr.s_addr) + { + gid->guid = gmt->gid.guid; + gid->gid_prefix = gmt->gid.gid_prefix; + + return DAT_SUCCESS; + } + } + + return DAT_INVALID_PARAMETER; +} + +#else /* NO_NAME_SERVICE */ + +DAT_RETURN +dapls_ns_create_gid_map ( + IN DAPL_HCA *hca_ptr) +{ + return (dapls_ib_ns_create_gid_map (hca_ptr)); +} + + +DAT_RETURN +dapls_ns_remove_gid_map ( + IN DAPL_HCA *hca_ptr) +{ + return (dapls_ib_ns_remove_gid_map (hca_ptr)); +} + + +DAT_RETURN dapls_ns_map_gid ( + IN DAPL_HCA *hca_ptr, + IN DAT_IA_ADDRESS_PTR remote_ia_address, + OUT GID *gid) +{ + return (dapls_ib_ns_map_gid (hca_ptr, remote_ia_address, gid)); +} + +DAT_RETURN dapls_ns_map_ipaddr ( + IN DAPL_HCA *hca_ptr, + IN GID gid, + OUT DAT_IA_ADDRESS_PTR remote_ia_address) +{ + return (dapls_ib_ns_map_ipaddr (hca_ptr, gid, remote_ia_address)); +} + +#endif /* NO_NAME_SERVICE */ + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/dapl_name_service.h b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_name_service.h new file mode 100644 index 00000000..8982f59d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_name_service.h @@ -0,0 +1,70 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_name_service.h + * + * PURPOSE: Utility defs & routines supporting name services + * + * $Id$ + * + **********************************************************************/ + +#include "dapl.h" +#include "dapl_adapter_util.h" + +/* + * Prototypes for name service routines + */ + +DAT_RETURN dapls_ns_init (void); + +#ifdef NO_NAME_SERVICE + +DAT_RETURN dapls_ns_lookup_address ( + IN DAPL_IA *ia_ptr, + IN DAT_IA_ADDRESS_PTR remote_ia_address, + OUT GID *gid); + + +#else + +DAT_RETURN dapls_ns_create_gid_map(DAPL_HCA *hca_ptr); +DAT_RETURN dapls_ns_remove_gid_map(DAPL_HCA *hca_ptr); + +DAT_RETURN dapls_ns_map_gid ( + IN DAPL_HCA *hca_ptr, + IN DAT_IA_ADDRESS_PTR remote_ia_address, + OUT GID *gid); + +DAT_RETURN dapls_ns_map_ipaddr ( + IN DAPL_HCA *hca_ptr, + IN GID gid, + OUT DAT_IA_ADDRESS_PTR remote_ia_address); + +#endif /* NO_NAME_SERVICE */ + diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.c b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.c new file mode 100644 index 00000000..7907a69d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.c @@ -0,0 +1,336 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_timer_util.c + * + * PURPOSE: DAPL timer management + * Description: Routines to add and cancel timer records. A timer record + * is put on the global timer queue. If the timer thread is + * not running, start it. The timer thread will sleep + * until a timer event or until a process wakes it up + * to notice a new timer is available; we use a DAPL_WAIT_OBJ + * for synchronization. + * + * If a timer is cancelled, it is simlpy removed from the + * queue. The timer may wakeup and notice there is no timer + * record to awaken at this time, so it will reset for the + * next entry. When there are no timer records to manage, + * the timer thread just sleeps until awakened. + * + * This file also contains the timer handler thread, + * embodied in dapls_timer_thread(). + * + * $Id$ + **********************************************************************/ + +#include "dapl.h" +#include "dapl_timer_util.h" + + +struct timer_head +{ + DAPL_LLIST_HEAD timer_list_head; + DAPL_OS_LOCK lock; + DAPL_OS_WAIT_OBJECT wait_object; + DAPL_OS_THREAD timeout_thread_handle; +} g_daplTimerHead; + +typedef struct timer_head DAPL_TIMER_HEAD; + + +void dapls_timer_thread (void *arg ); + +void +dapls_timer_init () +{ + /* + * Set up the timer thread elements. The timer thread isn't + * started until it is actually needed + */ + g_daplTimerHead.timer_list_head = NULL; + dapl_os_lock_init ( &g_daplTimerHead.lock ); + dapl_os_wait_object_init ( &g_daplTimerHead.wait_object ); + g_daplTimerHead.timeout_thread_handle = 0; +} + + +/* + * dapls_timer_set + * + * Set a timer. The timer will invoke the specified function + * after a number of useconds expires. + * + * Input: + * timer User provided timer structure + * func Function to invoke when timer expires + * data Argument passed to func() + * expires microseconds until timer fires + * + * Returns: + * no return value + * + */ +DAT_RETURN +dapls_timer_set ( + IN DAPL_OS_TIMER *timer, + IN void (*func) (void*), + IN void *data, + IN DAPL_OS_TIMEVAL expires ) +{ + DAPL_OS_TIMER *list_ptr; + DAPL_OS_TIMEVAL cur_time; + DAT_BOOLEAN wakeup_tmo_thread; + + DAPL_CNTR(DCNT_TIMER_SET); + /* + * Start the timer thread the first time we need a timer + */ + if ( g_daplTimerHead.timeout_thread_handle == 0 ) + { + dapl_os_thread_create ( dapls_timer_thread, + &g_daplTimerHead, + &g_daplTimerHead.timeout_thread_handle ); + } + + dapl_llist_init_entry ( &timer->list_entry); + wakeup_tmo_thread = false; + dapl_os_get_time ( &cur_time ); + timer->expires = cur_time + expires; /* calculate future time */ + timer->function = func; + timer->data = data; + + /* + * Put the element on the queue: sorted by wakeup time, eariliest + * first. + */ + dapl_os_lock ( &g_daplTimerHead.lock ); + /* + * Deal with 3 cases due to our list structure: + * 1) list is empty: become the list head + * 2) New timer is sooner than list head: become the list head + * 3) otherwise, sort the timer into the list, no need to wake + * the timer thread up + */ + if ( dapl_llist_is_empty ( &g_daplTimerHead.timer_list_head) ) + { + /* Case 1: add entry to head of list */ + dapl_llist_add_head ( &g_daplTimerHead.timer_list_head, + (DAPL_LLIST_ENTRY *)&timer->list_entry, + timer ); + wakeup_tmo_thread = true; + } + else + { + list_ptr = (DAPL_OS_TIMER *) + dapl_llist_peek_head (&g_daplTimerHead.timer_list_head); + + if ( timer->expires < list_ptr->expires ) + { + /* Case 2: add entry to head of list */ + dapl_llist_add_head ( &g_daplTimerHead.timer_list_head, + (DAPL_LLIST_ENTRY *)&timer->list_entry, + timer ); + wakeup_tmo_thread = true; + } + else + { + /* Case 3: figure out where entry goes in sorted list */ + list_ptr = dapl_llist_next_entry ( + &g_daplTimerHead.timer_list_head, + (DAPL_LLIST_ENTRY *)&list_ptr->list_entry); + + while (list_ptr != NULL) + { + if ( timer->expires < list_ptr->expires ) + { + dapl_llist_add_entry ( &g_daplTimerHead.timer_list_head, + (DAPL_LLIST_ENTRY *)&list_ptr->list_entry, + (DAPL_LLIST_ENTRY *)&timer->list_entry, + timer ); + break; + + } + list_ptr = dapl_llist_next_entry ( + &g_daplTimerHead.timer_list_head, + (DAPL_LLIST_ENTRY *)&list_ptr->list_entry); + } + if (list_ptr == NULL) + { + /* entry goes to the end of the list */ + dapl_llist_add_tail ( &g_daplTimerHead.timer_list_head, + (DAPL_LLIST_ENTRY *)&timer->list_entry, + timer ); + } + } + + } + dapl_os_unlock ( &g_daplTimerHead.lock ); + + if ( wakeup_tmo_thread ) + { + dapl_os_wait_object_wakeup (&g_daplTimerHead.wait_object); + } + + return DAT_SUCCESS; +} + + +/* + * dapls_os_timer_cancel + * + * Cancel a timer. Simply deletes the timer with no function invocations + * + * Input: + * timer User provided timer structure + * + * Returns: + * no return value + */ +void +dapls_timer_cancel ( + IN DAPL_OS_TIMER *timer) +{ + DAPL_CNTR(DCNT_TIMER_CANCEL); + dapl_os_lock ( &g_daplTimerHead.lock ); + /* + * make sure the entry has not been removed by another thread + */ + if ( ! dapl_llist_is_empty ( &g_daplTimerHead.timer_list_head ) && + timer->list_entry.list_head == &g_daplTimerHead.timer_list_head ) + { + dapl_llist_remove_entry ( &g_daplTimerHead.timer_list_head, + (DAPL_LLIST_ENTRY *)&timer->list_entry ); + } + /* + * If this was the first entry on the queue we could awaken the + * thread and have it reset the list; but it will just wake up + * and find that the timer entry has been removed, then go back + * to sleep, so don't bother. + */ + dapl_os_unlock ( &g_daplTimerHead.lock ); +} + + +/* + * dapls_timer_thread + * + * Core worker thread dealing with all timers. Basic algorithm: + * - Sleep until work shows up + * - Take first element of sorted timer list and wake + * invoke the callback if expired + * - Sleep for the timeout period if not expired + * + * Input: + * timer_head Timer head structure to manage timer lists + * + * Returns: + * no return value + */ +void +dapls_timer_thread ( + void *arg ) +{ + DAPL_OS_TIMER *list_ptr; + DAPL_OS_TIMEVAL cur_time; + DAT_RETURN dat_status = DAT_SUCCESS; + DAPL_TIMER_HEAD *timer_head; + + timer_head = arg; + + for (;;) + { + if ( dapl_llist_is_empty ( &timer_head->timer_list_head ) ) + { + dat_status = dapl_os_wait_object_wait (&timer_head->wait_object, + DAT_TIMEOUT_INFINITE ); + } + + /* + * Lock policy: + * While this thread is accessing the timer list, it holds the + * lock. Otherwise, it doesn't. + */ + dapl_os_lock ( &timer_head->lock ); + while ( ! dapl_llist_is_empty ( &timer_head->timer_list_head) ) + { + list_ptr = (DAPL_OS_TIMER *) + dapl_llist_peek_head ( &g_daplTimerHead.timer_list_head ); + dapl_os_get_time ( &cur_time ); + + if ( list_ptr->expires <= cur_time ) + { + /* + * Remove the entry from the list. Sort out how much + * time we need to sleep for the next one + */ + list_ptr = dapl_llist_remove_head ( &timer_head->timer_list_head ); + dapl_os_unlock ( &timer_head->lock ); + + /* + * Invoke the user callback + */ + list_ptr->function ( list_ptr->data ); + /* timer structure was allocated by caller, we don't + * free it here. + */ + + /* reacquire the lock */ + dapl_os_lock ( &timer_head->lock ); + } + else + { + dapl_os_unlock( &timer_head->lock ); + dat_status = + dapl_os_wait_object_wait ( &timer_head->wait_object, + (DAT_TIMEOUT)(list_ptr->expires - cur_time) ); + dapl_os_lock( &timer_head->lock ); + } + } + /* + * release the lock before going back to the top to sleep + */ + dapl_os_unlock( &timer_head->lock ); + + if ( dat_status == DAT_INTERNAL_ERROR ) + { + /* + * XXX What do we do here? + */ + } + } /* for (;;) */ +} + + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.h b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.h new file mode 100644 index 00000000..21cea8cb --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/dapl_timer_util.h @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_timer_util.h + * + * PURPOSE: DAPL timer management + * Description: support for dapl_timer.h + * + * $Id$ + **********************************************************************/ + +void dapls_timer_init ( void ); + +DAT_RETURN dapls_timer_set ( + IN DAPL_OS_TIMER *timer, + IN void (*func) (void*), + IN void *data, + IN DAPL_OS_TIMEVAL expires ); + +void dapls_timer_cancel ( + IN DAPL_OS_TIMER *timer); diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.c b/branches/Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.c new file mode 100644 index 00000000..fec18fe7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.c @@ -0,0 +1,642 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_osd.c + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent functions with a canonical DAPL + * interface. Designed to be portable and hide OS specific quirks + * of common functions. + * + * + * $Id$ + **********************************************************************/ + +#include "dapl_osd.h" +#include "dapl.h" +#include "dapl_hca_util.h" +#include "dapl_ia_util.h" +#include "dapl_rmr_util.h" +#include "dapl_lmr_util.h" +#include "dapl_pz_util.h" +#include "dapl_ep_util.h" +#include "dapl_cr_util.h" +#include "dapl_evd_util.h" +#include "dapl_sp_util.h" +#include "dapl_adapter_util.h" +#include "dapl_provider.h" +#include "dapl_hash.h" +#include "dapl_timer_util.h" +#include "dapl_debug.h" + +#include +#include /* needed for getenv() */ +#include /* needed for pthread_atfork() */ +#include /* needed for thread setup */ + +static void dapls_osd_fork_cleanup (void); + +/* + * dapl_osd_init + * + * Do Linux initialization: + * - Set up fork handler to clean up DAPL resources in the child + * process after a fork(). + * + * Input: + * none + * + * Returns: + * DAT_SUCCESS + */ +void +dapl_os_init ( ) +{ + int status; + + /* + * Set up fork control + */ + status = pthread_atfork ( NULL, NULL, dapls_osd_fork_cleanup ); + if ( status != 0 ) + { + dapl_os_printf ("WARNING: pthread_atfork %d\n", status); + } +} + + +/* + * dapl_os_get_time + * + * Return 64 bit value of current time in microseconds. + * + * Input: + * loc User location to place current time + * + * Returns: + * DAT_SUCCESS + */ + +DAT_RETURN +dapl_os_get_time ( + OUT DAPL_OS_TIMEVAL * loc) +{ + struct timeval tv; + struct timezone tz; + + + gettimeofday (&tv, &tz); + *loc = ((DAT_UINT64) (tv.tv_sec) * 1000000L) + (DAT_UINT64) tv.tv_usec; + + return DAT_SUCCESS; +} + + +/* + * dapl_os_get__env_bool + * + * Return boolean value of passed in environment variable: 1 if present, + * 0 if not + * + * Input: + * + * + * Returns: + * TRUE or FALSE + */ +int +dapl_os_get_env_bool ( + char *env_str ) +{ + char *env_var; + + env_var = getenv (env_str); + if (env_var != NULL) + { + return 1; + } + + return 0; +} + + +/* + * dapl_os_get_env_val + * + * Update val to value of passed in environment variable if present + * + * Input: + * env_str + * def_val default value if environment variable does not exist + * + * Returns: + * TRUE or FALSE + */ +int +dapl_os_get_env_val ( + char *env_str, + int def_val ) +{ + char *env_var; + + env_var = getenv (env_str); + if ( env_var != NULL ) + { + def_val = strtol (env_var, NULL, 0); + } + + return def_val; +} + + +/* + * Wait object routines + */ + +/* + * dapl_os_wait_object_init + * + * Initialize a wait object + * + * Input: + * wait_obj + * + * Returns: + * DAT_SUCCESS + * DAT_INTERNAL_ERROR + */ +DAT_RETURN +dapl_os_wait_object_init ( + IN DAPL_OS_WAIT_OBJECT *wait_obj) +{ + wait_obj->signaled = DAT_FALSE; + if ( 0 != pthread_cond_init ( &wait_obj->cv, NULL ) ) + { + return DAT_ERROR (DAT_INTERNAL_ERROR,0); + } + + /* Always returns 0. */ + pthread_mutex_init ( &wait_obj->lock, NULL ); + + return DAT_SUCCESS; +} + + +/* Wait on the supplied wait object, up to the specified time_out. + * A timeout of DAT_TIMEOUT_INFINITE will wait indefinitely. + * Timeout should be specified in micro seconds. + * + * Functional returns: + * DAT_SUCCESS -- another thread invoked dapl_os_wait object_wakeup + * DAT_INVALID_STATE -- someone else is already waiting in this wait + * object. + * only one waiter is allowed at a time. + * DAT_ABORT -- another thread invoked dapl_os_wait_object_destroy + * DAT_TIMEOUT -- the specified time limit was reached. + */ + +DAT_RETURN +dapl_os_wait_object_wait ( + IN DAPL_OS_WAIT_OBJECT *wait_obj, + IN DAT_TIMEOUT timeout_val) +{ + DAT_RETURN dat_status; + int pthread_status; + struct timespec future; + + dat_status = DAT_SUCCESS; + pthread_status = 0; + + if ( timeout_val != DAT_TIMEOUT_INFINITE ) + { + struct timeval now; + struct timezone tz; + unsigned int microsecs; + + gettimeofday (&now, &tz); + microsecs = now.tv_usec + (timeout_val % 1000000); + if (microsecs > 1000000) + { + now.tv_sec = now.tv_sec + timeout_val / 1000000 + 1; + now.tv_usec = microsecs - 1000000; + } + else + { + now.tv_sec = now.tv_sec + timeout_val / 1000000; + now.tv_usec = microsecs; + } + + /* Convert timeval to timespec */ + future.tv_sec = now.tv_sec; + future.tv_nsec = now.tv_usec * 1000; + + pthread_mutex_lock (&wait_obj->lock); + while ( wait_obj->signaled == DAT_FALSE && pthread_status == 0) + { + pthread_status = pthread_cond_timedwait ( + &wait_obj->cv , &wait_obj->lock , &future ); + + /* + * No need to reset &future if we go around the loop; + * It's an absolute time. + */ + } + /* Reset the signaled status if we were woken up. */ + if (pthread_status == 0) + { + wait_obj->signaled = DAT_FALSE; + } + pthread_mutex_unlock (&wait_obj->lock); + } + else + { + pthread_mutex_lock (&wait_obj->lock); + while ( wait_obj->signaled == DAT_FALSE && pthread_status == 0) + { + pthread_status = pthread_cond_wait ( + &wait_obj->cv , &wait_obj->lock ); + } + /* Reset the signaled status if we were woken up. */ + if (pthread_status == 0) + { + wait_obj->signaled = DAT_FALSE; + } + pthread_mutex_unlock (&wait_obj->lock); + } + + if (ETIMEDOUT == pthread_status) + { + dat_status = DAT_ERROR (DAT_TIMEOUT_EXPIRED,0); + } + else if ( 0 != pthread_status) + { + dat_status = DAT_ERROR (DAT_INTERNAL_ERROR,0); + } + + return dat_status; +} + + +/* + * dapl_os_wait_object_wakeup + * + * Wakeup a thread waiting on a wait object + * + * Input: + * wait_obj + * + * Returns: + * DAT_SUCCESS + * DAT_INTERNAL_ERROR + */ +DAT_RETURN +dapl_os_wait_object_wakeup ( + IN DAPL_OS_WAIT_OBJECT *wait_obj) +{ + pthread_mutex_lock ( &wait_obj->lock ); + wait_obj->signaled = DAT_TRUE; + pthread_mutex_unlock ( &wait_obj->lock ); + if ( 0 != pthread_cond_signal ( &wait_obj->cv ) ) + { + return DAT_ERROR (DAT_INTERNAL_ERROR,0); + } + + return DAT_SUCCESS; +} + + +/* + * dapl_os_wait_object_destroy + * + * Destroy a wait object + * + * Input: + * wait_obj + * + * Returns: + * DAT_SUCCESS + * DAT_INTERNAL_ERROR + */ +DAT_RETURN +dapl_os_wait_object_destroy ( + IN DAPL_OS_WAIT_OBJECT *wait_obj) +{ + if ( 0 != pthread_cond_destroy ( &wait_obj->cv ) ) + { + return DAT_ERROR (DAT_INTERNAL_ERROR,0); + } + if ( 0 != pthread_mutex_destroy ( &wait_obj->lock ) ) + { + return DAT_ERROR (DAT_INTERNAL_ERROR,0); + } + + return DAT_SUCCESS; +} + + +/* + * dapls_osd_fork_cleanup + * + * Update val to value of passed in environment variable if present + * + * Input: + * env_str + * val Updated if environment variable exists + * + * Returns: + * TRUE or FALSE + */ +void dapls_osd_fork_cleanup (void) +{ + DAPL_PROVIDER_LIST_NODE *cur_node; + DAPL_HCA *hca_ptr; + DAPL_IA *ia_ptr; + DAPL_LMR *lmr_ptr; + DAPL_RMR *rmr_ptr; + DAPL_PZ *pz_ptr; + DAPL_CR *cr_ptr; + DAPL_EP *ep_ptr; + DAPL_EVD *evd_ptr; + DAT_EP_PARAM *param; + DAPL_SP *sp_ptr; + + while ( NULL != g_dapl_provider_list.head ) + { + cur_node = g_dapl_provider_list.head; + g_dapl_provider_list.head = cur_node->next; + + hca_ptr = (DAPL_HCA *) cur_node->data.extension; + + /* + * Walk the list of IA ptrs & clean up. This is purposely + * a destructive list walk, we really don't want to preserve + * any of it. + */ + while (!dapl_llist_is_empty ( &hca_ptr->ia_list_head ) ) + { + ia_ptr = (DAPL_IA *) + dapl_llist_peek_head ( &hca_ptr->ia_list_head ); + + /* + * The rest of the cleanup code is similar to dapl_ia_close, + * the big difference is that we don't release IB resources, + * only memory; the underlying IB subsystem doesn't deal + * with fork at all, so leave IB handles alone. + */ + while (!dapl_llist_is_empty ( &ia_ptr->rmr_list_head ) ) + { + rmr_ptr = (DAPL_RMR *) + dapl_llist_peek_head (&ia_ptr->rmr_list_head); + if ( rmr_ptr->param.lmr_triplet.virtual_address != 0 ) + { + (void) dapl_os_atomic_dec (&rmr_ptr->lmr->lmr_ref_count); + rmr_ptr->param.lmr_triplet.virtual_address = 0; + } + dapl_os_atomic_dec ( &rmr_ptr->pz->pz_ref_count ); + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"FC_421:rmr- pz=%p,ref=0x%x\n", + rmr_ptr->pz,rmr_ptr->pz->pz_ref_count); + + dapl_rmr_dealloc ( rmr_ptr ); + } + + while (!dapl_llist_is_empty ( &ia_ptr->rsp_list_head )) + { + sp_ptr = (DAPL_SP *) + dapl_llist_peek_head ( &ia_ptr->rsp_list_head ); + dapl_os_atomic_dec (& ((DAPL_EVD *)sp_ptr->evd_handle)->evd_ref_count); + dapls_ia_unlink_sp ( ia_ptr, sp_ptr ); + dapls_sp_free_sp ( sp_ptr ); + } + + while (!dapl_llist_is_empty ( &ia_ptr->ep_list_head ) ) + { + ep_ptr = (DAPL_EP *) + dapl_llist_peek_head ( &ia_ptr->ep_list_head ); + param = &ep_ptr->param; + if ( param->pz_handle != NULL ) + { + dapl_os_atomic_dec ( & ((DAPL_PZ *)param->pz_handle)->pz_ref_count ); + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"FC_441:ep- pz=%p,ref=0x%x\n", + ((DAPL_PZ *)param->pz_handle), + ((DAPL_PZ *)param->pz_handle)->pz_ref_count); + } + if ( param->recv_evd_handle != NULL ) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)param->recv_evd_handle)->evd_ref_count); + } + if ( param->request_evd_handle ) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)param->request_evd_handle)->evd_ref_count); + } + if ( param->connect_evd_handle != NULL ) + { + dapl_os_atomic_dec (& ((DAPL_EVD *)param->connect_evd_handle)->evd_ref_count); + } + + /* ...and free the resource */ + dapl_ia_unlink_ep ( ia_ptr, ep_ptr ); + dapl_ep_dealloc ( ep_ptr ); + } + + while ( !dapl_llist_is_empty (&ia_ptr->lmr_list_head) ) + { + lmr_ptr = (DAPL_LMR *) + dapl_llist_peek_head ( &ia_ptr->lmr_list_head ); + + (void) dapls_hash_remove ( lmr_ptr->header.owner_ia->hca_ptr->lmr_hash_table, + lmr_ptr->param.lmr_context, + NULL ); + + pz_ptr = (DAPL_PZ *) lmr_ptr->param.pz_handle; + dapl_os_atomic_dec ( &pz_ptr->pz_ref_count ); + dapl_dbg_log (DAPL_DBG_TYPE_ERR,"FC_471:lmr- pz=%p,ref=0x%x\n", + pz_ptr, pz_ptr->pz_ref_count); + dapl_lmr_dealloc ( lmr_ptr ); + } + + while ( !dapl_llist_is_empty ( &ia_ptr->psp_list_head ) ) + { + sp_ptr = (DAPL_SP *) + dapl_llist_peek_head ( &ia_ptr->psp_list_head ); + while ( !dapl_llist_is_empty (&sp_ptr->cr_list_head) ) + { + cr_ptr = (DAPL_CR *) + dapl_llist_peek_head ( &sp_ptr->cr_list_head ); + dapl_sp_remove_cr (sp_ptr, cr_ptr); + dapls_cr_free (cr_ptr); + } + + dapls_ia_unlink_sp ( ia_ptr, sp_ptr ); + dapl_os_atomic_dec (& ((DAPL_EVD *)sp_ptr->evd_handle)->evd_ref_count); + dapls_sp_free_sp ( sp_ptr ); + } + + while (!dapl_llist_is_empty ( &ia_ptr->pz_list_head ) ) + { + pz_ptr = (DAPL_PZ *) + dapl_llist_peek_head ( &ia_ptr->pz_list_head ); + dapl_pz_dealloc ( pz_ptr ); + } + + while (!dapl_llist_is_empty (&ia_ptr->evd_list_head)) + { + evd_ptr = (DAPL_EVD *) + dapl_llist_peek_head ( &ia_ptr->evd_list_head ); + dapl_ia_unlink_evd ( evd_ptr->header.owner_ia, evd_ptr ); + /* reset the cq_handle to avoid having it removed */ + evd_ptr->ib_cq_handle = IB_INVALID_HANDLE; + dapls_evd_dealloc ( evd_ptr ); + } + + dapl_hca_unlink_ia ( ia_ptr->hca_ptr, ia_ptr ); + /* asycn error evd was taken care of above, reset the pointer */ + ia_ptr->async_error_evd = NULL; + dapls_ia_free ( ia_ptr ); + } /* end while ( ia_ptr != NULL ) */ + + + dapl_os_free (cur_node, sizeof (DAPL_PROVIDER_LIST_NODE)); + } /* end while (NULL != g_dapl_provider_list.head) */ +} + + +/* + * Structure to contain all elements of a thread in order to enable a + * local routine to intercept and do some necessary initialization. + */ +struct thread_draft +{ + void (*func) (void *); /* start routine */ + void *data; /* argument to start routine */ +}; + +void dapli_thread_init ( struct thread_draft *thread_draft); + +/* + * dapls_os_thread_create + * + * Create a thread for dapl + * + * Input: + * func function to invoke thread + * f_arg argument to pass to function + * + * Output + * thread_id handle for thread + * + * Returns: + * DAT_SUCCESS + */ +DAT_RETURN +dapl_os_thread_create ( + IN void (*func) (void *), + IN void *data, + OUT DAPL_OS_THREAD *thread_id ) +{ + pthread_attr_t thread_attr; + struct thread_draft *thread_draft; + int status; + + /* + * Get default set of thread attributes + */ + status = pthread_attr_init ( &thread_attr ); + if ( status != 0 ) + { + return DAT_ERROR (DAT_INTERNAL_ERROR,0); + } + + /* Create dapl threads as detached from this process */ + status = pthread_attr_setdetachstate ( &thread_attr, + PTHREAD_CREATE_DETACHED ); + if ( status != 0 ) + { + return DAT_ERROR (DAT_INTERNAL_ERROR,0); + } + + thread_draft = dapl_os_alloc (sizeof ( struct thread_draft)); + + thread_draft->func = func; + thread_draft->data = data; + + /* Create the thread. Observe that we first invoke a local + * routine to set up OS parameters, before invoking the user + * specified routine. + */ + status = pthread_create ( thread_id, + &thread_attr, + (void * (*) (void *))dapli_thread_init, + (void *)thread_draft ); + + /* clean up resources */ + (void) pthread_attr_destroy ( &thread_attr ); + + if ( status != 0 ) + { + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,0); + } + + return DAT_SUCCESS; +} + +/* + * dapli_thread_init + * + * Need to mask all signals from this thread in order to be a good + * citizen. Signals will arrive randomly and will be processed by + * whatever thread is running unless they are specifically blocked; and + * this should be a user thread, not a dapl thread + */ + +void +dapli_thread_init ( + struct thread_draft *thread_draft) +{ + sigset_t sigset; + void (*func) (void *); + void *data; + + sigfillset (&sigset); + pthread_sigmask ( SIG_BLOCK, &sigset, NULL); + + func = thread_draft->func; + data = thread_draft->data; + dapl_os_free (thread_draft, sizeof ( struct thread_draft )); + + (*func) (data); +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.h b/branches/Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.h new file mode 100644 index 00000000..3f786b6f --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/linux/dapl_osd.h @@ -0,0 +1,553 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_osd.h + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent data structures & functions with + * a canonical DAPL interface. Designed to be portable + * and hide OS specific quirks of common functions. + * + * $Id$ + **********************************************************************/ + +#ifndef _DAPL_OSD_H_ +#define _DAPL_OSD_H_ + +/* + * This file is defined for Linux systems only, including it on any + * other build will cause an error + */ +#ifndef __linux__ +#error UNDEFINED OS TYPE +#endif /* __linux__ */ + +#ifdef __IA64__ +#define IA64 +#endif + +#if !defined (__i386__) && !defined (IA64) +#error UNDEFINED ARCH +#endif + + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include /* for printf */ +#include +#include +#include /* for getaddrinfo */ + +#include "dapl_debug.h" + +/* + * Include files for setting up a network name + */ +#include +#include +#include +#include + +#ifdef IA64 +#include +#include +#endif + + +/* Useful debug definitions */ +#ifndef STATIC +#define STATIC static +#endif /* STATIC */ +#ifndef _INLINE_ +#define _INLINE_ __inline__ +#endif /* _INLINE_ */ + +void dapl_os_init ( void ); /* initialization function */ + +#define dapl_os_panic(...) \ + do { \ + fprintf(stderr, "PANIC in %s:%i:%s\n", __FILE__, __LINE__, __func__); \ + fprintf(stderr, __VA_ARGS__); \ + exit(1); \ + } while(0) + +#define dapl_ip_addr6(sockaddr) (((struct sockaddr_in6 *)sockaddr)->sin6_addr.s6_addr32) + +/* + * Atomic operations + */ + +typedef volatile DAT_COUNT DAPL_ATOMIC; + +/* atomic function prototypes */ +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_inc ( + INOUT DAPL_ATOMIC *v); + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_dec ( + INOUT DAPL_ATOMIC *v); + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_assign ( + INOUT DAPL_ATOMIC *v, + IN DAT_COUNT match_value, + IN DAT_COUNT new_value ); + +int dapl_os_get_env_bool ( + char *env_str ); + +int dapl_os_get_env_val ( + char *env_str, + int def_val ); + + + +/* atomic functions */ + +/* dapl_os_atomic_inc + * + * get the current value of '*v', and then increment it. + * + * This is equivalent to an IB atomic fetch and add of 1, + * except that a DAT_COUNT might be 32 bits, rather than 64 + * and it occurs in local memory. + */ + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_inc ( + INOUT DAPL_ATOMIC *v) +{ + DAT_COUNT old_value; + + /* + * Use the Pentium Exchange and Add instruction. + * The assembler doesn't deal with xadd so forge it. + */ +#ifdef IA64 +IA64_FETCHADD (old_value,v,1,4); + +#else + __asm__ __volatile__ ( + ".byte 0xf0, 0x0f, 0xc1, 0x02" // lock; xaddl %eax, (%edx) + : "=a" (old_value) + : "0" (+1), "m" (*v), "d" (v) + : "memory"); +#endif + return old_value; +} + + +/* dapl_os_atomic_dec + * + * decrement the current value of '*v'. No return value is required. + */ + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_dec ( + INOUT DAPL_ATOMIC *v) +{ + DAT_COUNT old_value; + + assert(*v != 0); + + /* + * Use the Pentium Exchange and Add instruction. + * The assembler doesn't deal with xadd so forge it + */ + +#ifdef IA64 +IA64_FETCHADD (old_value,v,-1,4); + +#else + __asm__ __volatile__ ( + ".byte 0xf0, 0x0f, 0xc1, 0x02" // lock; xaddl %eax, (%edx) + : "=a" (old_value) + : "0" (-1), "m" (*v), "d" (v) + : "memory"); + +#endif + return old_value; +} + + +/* dapl_os_atomic_assign + * + * assign 'new_value' to '*v' if the current value + * matches the provided 'match_value'. + * + * Make no assignment if there is no match. + * + * Return the current value in any case. + * + * This matches the IBTA atomic operation compare & swap + * except that it is for local memory and a DAT_COUNT may + * be only 32 bits, rather than 64. + */ + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_assign ( + INOUT DAPL_ATOMIC *v, + IN DAT_COUNT match_value, + IN DAT_COUNT new_value ) +{ + DAT_COUNT current_value; + + /* + * Use the Pentium compare and exchange instruction + */ + +#ifdef IA64 + +current_value = ia64_cmpxchg("acq",v,match_value,new_value,4); + +#else + __asm__ __volatile__ ( + "lock; cmpxchgl %1, %2" + : "=a" (current_value) + : "q" (new_value), "m" (*v), "0" (match_value) + : "memory"); +#endif + return current_value; +} + +/* + * Thread Functions + */ +typedef pthread_t DAPL_OS_THREAD; + +DAT_RETURN +dapl_os_thread_create ( + IN void (*func) (void *), + IN void *data, + OUT DAPL_OS_THREAD *thread_id ); + + +/* + * Lock Functions + */ + +typedef pthread_mutex_t DAPL_OS_LOCK; + +/* function prototypes */ +STATIC _INLINE_ DAT_RETURN +dapl_os_lock_init ( + IN DAPL_OS_LOCK *m); + +STATIC _INLINE_ DAT_RETURN +dapl_os_lock ( + IN DAPL_OS_LOCK *m); + +STATIC _INLINE_ DAT_RETURN +dapl_os_unlock ( + IN DAPL_OS_LOCK *m); + +STATIC _INLINE_ DAT_RETURN +dapl_os_lock_destroy ( + IN DAPL_OS_LOCK *m); + +/* lock functions */ +STATIC _INLINE_ DAT_RETURN +dapl_os_lock_init ( + IN DAPL_OS_LOCK *m) +{ + /* pthread_mutex_init always returns 0 */ + pthread_mutex_init (m, NULL); + + return DAT_SUCCESS; +} + +STATIC _INLINE_ DAT_RETURN +dapl_os_lock ( + IN DAPL_OS_LOCK *m) +{ + if (0 == pthread_mutex_lock (m)) + { + return DAT_SUCCESS; + } + else + { + return DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } +} + +STATIC _INLINE_ DAT_RETURN +dapl_os_unlock ( + IN DAPL_OS_LOCK *m) +{ + if (0 == pthread_mutex_unlock (m)) + { + return DAT_SUCCESS; + } + else + { + return DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } +} + +STATIC _INLINE_ DAT_RETURN +dapl_os_lock_destroy ( + IN DAPL_OS_LOCK *m) +{ + if (0 == pthread_mutex_destroy (m)) + { + return DAT_SUCCESS; + } + else + { + return DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } +} + + +/* + * Wait Objects + */ + +/* + * The wait object invariant: Presuming a call to dapl_os_wait_object_wait + * occurs at some point, there will be at least one wakeup after each call + * to dapl_os_wait_object_signal. I.e. Signals are not ignored, though + * they may be coallesced. + */ + +typedef struct +{ + DAT_BOOLEAN signaled; + pthread_cond_t cv; + pthread_mutex_t lock; +} DAPL_OS_WAIT_OBJECT; + +/* function prototypes */ +DAT_RETURN +dapl_os_wait_object_init ( + IN DAPL_OS_WAIT_OBJECT *wait_obj); + +DAT_RETURN +dapl_os_wait_object_wait ( + IN DAPL_OS_WAIT_OBJECT *wait_obj, + IN DAT_TIMEOUT timeout_val); + +DAT_RETURN +dapl_os_wait_object_wakeup ( + IN DAPL_OS_WAIT_OBJECT *wait_obj); + +DAT_RETURN +dapl_os_wait_object_destroy ( + IN DAPL_OS_WAIT_OBJECT *wait_obj); + +/* + * Memory Functions + */ + +/* function prototypes */ +STATIC _INLINE_ void *dapl_os_alloc (int size); + +STATIC _INLINE_ void *dapl_os_realloc (void *ptr, int size); + +STATIC _INLINE_ void dapl_os_free (void *ptr, int size); + +STATIC _INLINE_ void * dapl_os_memzero (void *loc, int size); + +STATIC _INLINE_ void * dapl_os_memcpy (void *dest, const void *src, int len); + +STATIC _INLINE_ int dapl_os_memcmp (const void *mem1, const void *mem2, int len); + +/* memory functions */ + + +STATIC _INLINE_ void *dapl_os_alloc (int size) +{ + return malloc (size); +} + +STATIC _INLINE_ void *dapl_os_realloc (void *ptr, int size) +{ + return realloc(ptr, size); +} + +STATIC _INLINE_ void dapl_os_free (void *ptr, int size) +{ + free (ptr); +} + +STATIC _INLINE_ void * dapl_os_memzero (void *loc, int size) +{ + return memset (loc, 0, size); +} + +STATIC _INLINE_ void * dapl_os_memcpy (void *dest, const void *src, int len) +{ + return memcpy (dest, src, len); +} + +STATIC _INLINE_ int dapl_os_memcmp (const void *mem1, const void *mem2, int len) +{ + return memcmp (mem1, mem2, len); +} + +/* + * String Functions + */ + +STATIC _INLINE_ unsigned int dapl_os_strlen(const char *str) +{ + return strlen(str); +} + +STATIC _INLINE_ char * dapl_os_strdup(const char *str) +{ + return strdup(str); +} + + +/* + * Timer Functions + */ + +typedef DAT_UINT64 DAPL_OS_TIMEVAL; + + +typedef struct dapl_timer_entry DAPL_OS_TIMER; +typedef unsigned long long int DAPL_OS_TICKS; + +/* function prototypes */ + +/* + * Sleep for the number of micro seconds specified by the invoking + * function + */ +STATIC _INLINE_ void dapl_os_sleep_usec (int sleep_time) +{ + struct timespec sleep_spec, rem; + + /* + * Sleep for the specified number of microseconds + */ + sleep_spec.tv_sec = sleep_time / 100000; + sleep_spec.tv_nsec = sleep_time % 100000 * 1000; + nanosleep (&sleep_spec, &rem); +} + +STATIC _INLINE_ DAPL_OS_TICKS dapl_os_get_ticks (void); + +STATIC _INLINE_ int dapl_os_ticks_to_seconds (DAPL_OS_TICKS ticks); + + +DAT_RETURN dapl_os_get_time (DAPL_OS_TIMEVAL *); +/* timer functions */ + +STATIC _INLINE_ DAPL_OS_TICKS dapl_os_get_ticks (void) +{ + DAPL_OS_TICKS x; + __asm__ volatile(".byte 0x0f,0x31" : "=A" (x)); + return x; +} + +STATIC _INLINE_ int dapl_os_ticks_to_seconds (DAPL_OS_TICKS ticks) +{ + /* NOT YET IMPLEMENTED IN USER-SPACE */ + return 0; +} + + +/* + * dapl_os_timer_cancel() + * + * Cancel a running timer. The timer will invoke the specified + * function after a number of useconds expires. + * + * Input: + * timer Running timer + * + * Returns: + * no return value + * + */ +void dapl_os_timer_cancel ( DAPL_OS_TIMER *timer ); + + +/* + * + * Name Service Helper functions + * + */ +#define dapls_osd_getaddrinfo(name, addr_ptr) getaddrinfo(name,NULL,NULL,addr_ptr) +#define dapls_osd_freeaddrinfo(addr) freeaddrinfo (addr) + +/* + * *printf format helpers. We use the C string constant concatenation + * ability to define 64 bit formats, which unfortunatly are non standard + * in the C compiler world. E.g. %llx for gcc, %I64x for Windows + */ +#define F64d "%lld" +#define F64u "%llu" +#define F64x "%llx" +#define F64X "%llX" + + +/* + * Conversion Functions + */ + +STATIC _INLINE_ long int +dapl_os_strtol(const char *nptr, char **endptr, int base) +{ + return strtol(nptr, endptr, base); +} + + +/* + * Helper Functions + */ + + +#define dapl_os_assert(expression) assert(expression) +#define dapl_os_printf(...) fprintf(stderr,__VA_ARGS__) +#define dapl_os_vprintf(fmt,args) vfprintf(stderr, fmt, args); +#define dapl_os_syslog(fmt,args) vsyslog (LOG_USER | LOG_DEBUG,fmt,args) + + + +#endif /* _DAPL_OSD_H_ */ + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/makefile b/branches/Ndi/ulp/dapl/dapl/udapl/makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/makefile.wnd b/branches/Ndi/ulp/dapl/dapl/udapl/makefile.wnd new file mode 100644 index 00000000..ba237b8a --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/makefile.wnd @@ -0,0 +1,283 @@ +# +# Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under either one of the following two licenses: +# +# 1) under the terms of the "Common Public License 1.0" a copy of which is +# in the file LICENSE.txt in the root directory. The license is also +# available from the Open Source Initiative, see +# http://www.opensource.org/licenses/cpl.php. +# OR +# +# 2) under the terms of the "The BSD License" a copy of which is in the file +# LICENSE2.txt in the root directory. The license is also available from +# the Open Source Initiative, see +# http://www.opensource.org/licenses/bsd-license.php. +# +# Licensee has the right to choose either one of the above two licenses. +# +# Redistributions of source code must retain both the above copyright +# notice and either one of the license notices. +# +# Redistributions in binary form must reproduce both the above copyright +# notice, either one of the license notices in the documentation +# and/or other materials provided with the distribution. +# + +#********************************************************************* +# +# MODULE: Makefile +# +# PURPOSE: Makefile for DAT registration module +# +#********************************************************************* + +#********************************************************************* +# +# Dot Directives +# +#*********************************************************************/ + +.SUFFIXES : # clear the .SUFFIXES list +.SUFFIXES : .c # initialize .SUFFIXES list + + +#********************************************************************* +# +# Macros +# +#*********************************************************************/ + +UDAPL = . +UDAPL_INCLUDE = $(UDAPL)/../include +UDAPL_COMMON = $(UDAPL)/../common +UDAPL_WINDOWS = $(UDAPL)/windows + +OBJ_PATH = $(UDAPL)/Obj +TARGET_PATH = $(UDAPL)/Target + +OBJS = $(OBJ_PATH)/dapl_init.obj \ + $(OBJ_PATH)/dapl_timer_util.obj \ + $(OBJ_PATH)/dapl_name_service.obj \ + $(OBJ_PATH)/dapl_osd.obj \ + $(OBJ_PATH)/dapl_cookie.obj \ + $(OBJ_PATH)/dapl_cr_accept.obj \ + $(OBJ_PATH)/dapl_cr_query.obj \ + $(OBJ_PATH)/dapl_cr_reject.obj \ + $(OBJ_PATH)/dapl_cr_util.obj \ + $(OBJ_PATH)/dapl_cr_callback.obj \ + $(OBJ_PATH)/dapl_cr_handoff.obj \ + $(OBJ_PATH)/dapl_ep_connect.obj \ + $(OBJ_PATH)/dapl_ep_create.obj \ + $(OBJ_PATH)/dapl_ep_disconnect.obj \ + $(OBJ_PATH)/dapl_ep_dup_connect.obj \ + $(OBJ_PATH)/dapl_ep_free.obj \ + $(OBJ_PATH)/dapl_ep_reset.obj \ + $(OBJ_PATH)/dapl_ep_get_status.obj \ + $(OBJ_PATH)/dapl_ep_modify.obj \ + $(OBJ_PATH)/dapl_ep_post_rdma_read.obj \ + $(OBJ_PATH)/dapl_ep_post_rdma_write.obj \ + $(OBJ_PATH)/dapl_ep_post_recv.obj \ + $(OBJ_PATH)/dapl_ep_post_send.obj \ + $(OBJ_PATH)/dapl_ep_query.obj \ + $(OBJ_PATH)/dapl_ep_util.obj \ + $(OBJ_PATH)/dapl_evd_create.obj \ + $(OBJ_PATH)/dapl_evd_dequeue.obj \ + $(OBJ_PATH)/dapl_evd_disable.obj \ + $(OBJ_PATH)/dapl_evd_enable.obj \ + $(OBJ_PATH)/dapl_evd_free.obj \ + $(OBJ_PATH)/dapl_evd_modify_cno.obj \ + $(OBJ_PATH)/dapl_evd_post_se.obj \ + $(OBJ_PATH)/dapl_evd_query.obj \ + $(OBJ_PATH)/dapl_evd_resize.obj \ + $(OBJ_PATH)/dapl_evd_wait.obj \ + $(OBJ_PATH)/dapl_evd_util.obj \ + $(OBJ_PATH)/dapl_evd_cq_async_error_callb.obj \ + $(OBJ_PATH)/dapl_evd_qp_async_error_callb.obj \ + $(OBJ_PATH)/dapl_evd_un_async_error_callb.obj \ + $(OBJ_PATH)/dapl_evd_connection_callb.obj \ + $(OBJ_PATH)/dapl_evd_dto_callb.obj \ + $(OBJ_PATH)/dapl_evd_set_unwaitable.obj \ + $(OBJ_PATH)/dapl_evd_clear_unwaitable.obj \ + $(OBJ_PATH)/dapl_get_consumer_context.obj \ + $(OBJ_PATH)/dapl_get_handle_type.obj \ + $(OBJ_PATH)/dapl_hash.obj \ + $(OBJ_PATH)/dapl_hca_util.obj \ + $(OBJ_PATH)/dapl_ia_close.obj \ + $(OBJ_PATH)/dapl_ia_open.obj \ + $(OBJ_PATH)/dapl_ia_query.obj \ + $(OBJ_PATH)/dapl_ia_util.obj \ + $(OBJ_PATH)/dapl_llist.obj \ + $(OBJ_PATH)/dapl_lmr_create.obj \ + $(OBJ_PATH)/dapl_lmr_free.obj \ + $(OBJ_PATH)/dapl_lmr_query.obj \ + $(OBJ_PATH)/dapl_lmr_util.obj \ + $(OBJ_PATH)/dapl_mr_util.obj \ + $(OBJ_PATH)/dapl_provider.obj \ + $(OBJ_PATH)/dapl_sp_util.obj \ + $(OBJ_PATH)/dapl_psp_create.obj \ + $(OBJ_PATH)/dapl_psp_create_any.obj \ + $(OBJ_PATH)/dapl_psp_free.obj \ + $(OBJ_PATH)/dapl_psp_query.obj \ + $(OBJ_PATH)/dapl_pz_create.obj \ + $(OBJ_PATH)/dapl_pz_free.obj \ + $(OBJ_PATH)/dapl_pz_query.obj \ + $(OBJ_PATH)/dapl_pz_util.obj \ + $(OBJ_PATH)/dapl_rmr_create.obj \ + $(OBJ_PATH)/dapl_rmr_free.obj \ + $(OBJ_PATH)/dapl_rmr_bind.obj \ + $(OBJ_PATH)/dapl_rmr_query.obj \ + $(OBJ_PATH)/dapl_rmr_util.obj \ + $(OBJ_PATH)/dapl_rsp_create.obj \ + $(OBJ_PATH)/dapl_rsp_free.obj \ + $(OBJ_PATH)/dapl_rsp_query.obj \ + $(OBJ_PATH)/dapl_cno_create.obj \ + $(OBJ_PATH)/dapl_cno_modify_agent.obj \ + $(OBJ_PATH)/dapl_cno_free.obj \ + $(OBJ_PATH)/dapl_cno_wait.obj \ + $(OBJ_PATH)/dapl_cno_query.obj \ + $(OBJ_PATH)/dapl_cno_util.obj \ + $(OBJ_PATH)/dapl_set_consumer_context.obj \ + $(OBJ_PATH)/dapl_ring_buffer_util.obj \ + $(OBJ_PATH)/dapl_debug.obj + +LIBRARY = $(TARGET_PATH)/dapl.dll + +# +# Provider specific flags & files +# + +PROVIDER_LIB = $(SYSTEMROOT)\VerbsLibrary.lib + +# Define Provider for this build +#IBAPI=1 +# VAPI=1 +IBAL=1 +!if defined(VAPI) + +PROVIDER = $(UDAPL)/../vapi + +PROVIDER_OBJS = \ + $(OBJDIR)/dapl_vapi_util.obj \ + $(OBJDIR)/dapl_vapi_qp.obj \ + $(OBJDIR)/dapl_vapi_cm.obj + +PROVIDER_INCLUDES = \ + /I $(PROVIDER) \ + /I $(MTHOME)/include +# /I $(DAPL_BASE)/include/ib/MELLANOX + +PROVIDER_CFLAGS = -DMTL_MODULE=M_dapl -DMAX_TRACE=8 -DMAX_DEBUG=8 -DMAX_ERROR=8 -DDAPL_DBG -DVAPI + +!endif + +!if defined(IBAPI) + +PROVIDER = $(UDAPL)/../ibapi + +PROVIDER_OBJS = $(OBJ_PATH)/dapl_ibapi_cm.obj \ + $(OBJ_PATH)/dapl_ibapi_qp.obj \ + $(OBJ_PATH)/dapl_ibapi_util.obj + +PROVIDER_INCLUDES = \ + /I $(PROVIDER) \ + /I $(UDAPL_INCLUDE)/ib/IBM \ + /I $(UDAPL_INCLUDE)/ib/IBM/us + +PROVIDER_CFLAGS = -DIBAPI -DDAPL_ATS + +!endif +!if defined(IBAL) + +PROVIDER = $(UDAPL)/../ibal + +PROVIDER_OBJS = $(OBJ_PATH)/dapl_ibal_cm.obj \ + $(OBJ_PATH)/dapl_ibal_qp.obj \ + $(OBJ_PATH)/dapl_ibal_util.obj + +PROVIDER_INCLUDES = \ + /I $(PROVIDER) \ + /I $(UDAPL_INCLUDE)/../../../winuser/include/ \ + /I $(UDAPL_INCLUDE)/../../../shared/include/ \ + /I $(UDAPL_INCLUDE)/../../../shared/include/iba + +PROVIDER_CFLAGS = /DIBAL /DDAPL_ATS /D_VENDOR_IBAL_ /DDAPL_DBG + +!endif + +# +# Compiler +# + +CC = cl + +INC_FLAGS = \ + /I ../../dat/include \ + /I $(UDAPL) \ + /I $(UDAPL_INCLUDE) \ + /I $(UDAPL_COMMON) \ + /I $(UDAPL_WINDOWS) \ + $(PROVIDER_INCLUDES) + +CC_FLAGS = \ + /nologo /Zel /Gy /W3 /Gd /QIfdiv- /QIf /QI0f /GB /Gi- /Gm- \ + /GX- /GR- /GF /Z7 /Od /Oi /Oy- /DWIN32 /D_X86_ /D__i386__ \ + $(INC_FLAGS) $(PROVIDER_CFLAGS) + +# +# Linker +# + +LINK = link + +LIBS = libc.lib ws2_32.lib $(PROVIDER_LIB) ../../dat/udat/Target/dat.lib + +LINK_FLAGS = \ + /nologo /dll /DEF:$(UDAPL_WINDOWS)/dapl_win.def \ + /DEBUG /incremental:no /machine:I386 $(LIBS) + +# +# System Utilities +# + +RM = rm -f + + +#********************************************************************* +# +# Inference Rules +# +#*********************************************************************/ + +{$(UDAPL)}.c{$(OBJ_PATH)}.obj: + $(CC) $(CC_FLAGS) /Fo$@ /c $< + +{$(UDAPL_COMMON)}.c{$(OBJ_PATH)}.obj: + $(CC) $(CC_FLAGS) /Fo$@ /c $< + +{$(UDAPL_WINDOWS)}.c{$(OBJ_PATH)}.obj: + $(CC) $(CC_FLAGS) /Fo$@ /c $< + +{$(PROVIDER)}.c{$(OBJ_PATH)}.obj: + $(CC) $(CC_FLAGS) /Fo$@ /c $< + + +#********************************************************************* +# +# Description Blocks +# +#*********************************************************************/ + +all : mkdirs $(LIBRARY) + +mkdirs: + if not exist "$(OBJ_PATH)" mkdir "$(OBJ_PATH)" + if not exist "$(TARGET_PATH)" mkdir "$(TARGET_PATH)" + +$(LIBRARY) : $(OBJS) $(PROVIDER_OBJS) + $(LINK) $(LINK_FLAGS) /out:$(LIBRARY) $(OBJS) $(PROVIDER_OBJS) + +clean: + $(RM) $(OBJS) $(PROVIDER_OBJS) + $(RM) $(LIBRARY) + $(RM) $(TARGET_PATH)/*.pdb $(TARGET_PATH)/*.exp $(TARGET_PATH)/*.lib diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/udapl_exports.src b/branches/Ndi/ulp/dapl/dapl/udapl/udapl_exports.src new file mode 100644 index 00000000..214effb7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/udapl_exports.src @@ -0,0 +1,11 @@ +#if DBG +LIBRARY dapld.dll +#else +LIBRARY dapl.dll +#endif + +#ifndef _WIN64 +EXPORTS +dat_provider_init +dat_provider_fini +#endif diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/udapl_sources.c b/branches/Ndi/ulp/dapl/dapl/udapl/udapl_sources.c new file mode 100644 index 00000000..ae612dff --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/udapl_sources.c @@ -0,0 +1,88 @@ +/* + * Include all files that are not in children directories. + */ + +#include "..\common\dapl_cno_create.c" +#include "..\common\dapl_cno_free.c" +#include "..\common\dapl_cno_modify_agent.c" +#include "..\common\dapl_cno_query.c" +#include "..\common\dapl_cno_util.c" +#include "..\common\dapl_cno_wait.c" +#include "..\common\dapl_cookie.c" +#include "..\common\dapl_cr_accept.c" +#include "..\common\dapl_cr_callback.c" +#include "..\common\dapl_cr_handoff.c" +#include "..\common\dapl_cr_query.c" +#include "..\common\dapl_cr_reject.c" +#include "..\common\dapl_cr_util.c" +#include "..\common\dapl_debug.c" +#include "..\common\dapl_ep_connect.c" +#include "..\common\dapl_ep_create.c" +#include "..\common\dapl_ep_disconnect.c" +#include "..\common\dapl_ep_dup_connect.c" +#include "..\common\dapl_ep_free.c" +#include "..\common\dapl_ep_get_status.c" +#include "..\common\dapl_ep_modify.c" +#include "..\common\dapl_ep_post_rdma_read.c" +#include "..\common\dapl_ep_post_rdma_write.c" +#include "..\common\dapl_ep_post_recv.c" +#include "..\common\dapl_ep_post_send.c" +#include "..\common\dapl_ep_query.c" +#include "..\common\dapl_ep_reset.c" +#include "..\common\dapl_ep_util.c" +#include "..\common\dapl_evd_clear_unwaitable.c" +#include "..\common\dapl_evd_connection_callb.c" +#include "..\common\dapl_evd_cq_async_error_callb.c" +#include "..\common\dapl_evd_create.c" +#include "..\common\dapl_evd_dequeue.c" +#include "..\common\dapl_evd_disable.c" +#include "..\common\dapl_evd_dto_callb.c" +#include "..\common\dapl_evd_enable.c" +#include "..\common\dapl_evd_free.c" +#include "..\common\dapl_evd_modify_cno.c" +#include "..\common\dapl_evd_post_se.c" +#include "..\common\dapl_evd_qp_async_error_callb.c" +#include "..\common\dapl_evd_query.c" +#include "..\common\dapl_evd_resize.c" +#include "..\common\dapl_evd_set_unwaitable.c" +#include "..\common\dapl_evd_un_async_error_callb.c" +#include "..\common\dapl_evd_util.c" +#include "..\common\dapl_evd_wait.c" +#include "..\common\dapl_get_consumer_context.c" +#include "..\common\dapl_get_handle_type.c" +#include "..\common\dapl_hash.c" +#include "..\common\dapl_hca_util.c" +#include "..\common\dapl_ia_close.c" +#include "..\common\dapl_ia_open.c" +#include "..\common\dapl_ia_query.c" +#include "..\common\dapl_ia_util.c" +#include "..\common\dapl_llist.c" +#include "..\common\dapl_lmr_create.c" +#include "..\common\dapl_lmr_free.c" +#include "..\common\dapl_lmr_query.c" +#include "..\common\dapl_lmr_util.c" +#include "..\common\dapl_mr_util.c" +#include "..\common\dapl_provider.c" +#include "..\common\dapl_psp_create.c" +#include "..\common\dapl_psp_create_any.c" +#include "..\common\dapl_psp_free.c" +#include "..\common\dapl_psp_query.c" +#include "..\common\dapl_pz_create.c" +#include "..\common\dapl_pz_free.c" +#include "..\common\dapl_pz_query.c" +#include "..\common\dapl_pz_util.c" +#include "..\common\dapl_ring_buffer_util.c" +#include "..\common\dapl_rmr_bind.c" +#include "..\common\dapl_rmr_create.c" +#include "..\common\dapl_rmr_free.c" +#include "..\common\dapl_rmr_query.c" +#include "..\common\dapl_rmr_util.c" +#include "..\common\dapl_rsp_create.c" +#include "..\common\dapl_rsp_free.c" +#include "..\common\dapl_rsp_query.c" +#include "..\common\dapl_set_consumer_context.c" +#include "..\common\dapl_sp_util.c" +#include "..\ibal\dapl_ibal_cm.c" +#include "..\ibal\dapl_ibal_qp.c" +#include "..\ibal\dapl_ibal_util.c" +#include "windows\dapl_osd.c" diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_osd.c b/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_osd.c new file mode 100644 index 00000000..6d07686b --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_osd.c @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dapl_osd.c + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent functions with a canonical DAPL + * interface. Designed to be portable and hide OS specific quirks + * of common functions. + * + * + * $Id$ + **********************************************************************/ + +/* + * MUST have the Microsoft Platform SDK installed for Windows to build + * and work properly + */ +#include "dapl.h" +#include "dapl_osd.h" +#include +#include /* needed for getenv() */ + + + +/* + * DllMain + * + * Primary Windows entry point + * + * Input: + * hDllHandle handle to DLL module + * fdwReason reason for calling function + * lpReserved reserved + * + * Returns: + * DAT_SUCCESS + */ + +BOOL WINAPI +DllMain ( + IN HINSTANCE hDllHandle, + IN DWORD fdwReason, + IN LPVOID lpReserved ) +{ + UNREFERENCED_PARAMETER(lpReserved); + switch( fdwReason ) + { + case DLL_PROCESS_ATTACH: + /* + * We don't attach/detach threads that need any sort + * of initialization, so disable this ability to optimize + * the working set size of the DLL. Also allows us to + * remove two case statemens: + * DLL_THREAD_DETACH and DLL_THREAD_ATTACH + */ + if ( (DisableThreadLibraryCalls( hDllHandle )) != 0) + { + //fprintf(stderr, "DAPL dll_PROCESS_attach!\n"); + //dapl_init (); + break; + } + else + { + DWORD err = GetLastError(); + dapl_os_printf("DAPL Init Failed with code %u\n", err); + break; + } + case DLL_PROCESS_DETACH: + /* + * Do library cleanup + */ + //dapl_fini (); + break; + } + return TRUE; +} + + +/* + * dapl_osd_init + * + * Do Windows specific initialization: + * - nothing at this time + * + * Input: + * none + * + * Returns: + * none + */ +void +dapl_osd_init ( ) +{ + return; +} + + +/* + * dapl_os_get_time + * + * Return 64 bit value of current time in microseconds. + * + * Input: + * loc User location to place current time + * + * Returns: + * DAT_SUCCESS + */ + +DAT_RETURN +dapl_os_get_time ( + OUT DAPL_OS_TIMEVAL * loc) +{ + struct _timeb tb; + + _ftime ( &tb ); + + *loc = ((DAT_UINT64) (tb.time * 1000000L) + (DAT_UINT64) tb.millitm * 1000); + + return DAT_SUCCESS; +} + + +/* + * dapl_os_get_bool_env + * + * Return boolean value of passed in environment variable: 1 if present, + * 0 if not + * + * Input: + * + * + * Returns: + * TRUE or FALSE + */ +int +dapl_os_get_env_bool ( + char *env_str ) +{ + char *env_var; + + env_var = getenv (env_str); + if (env_var != NULL) + { + return 1; + } + + return 0; +} + + +/* + * dapl_os_get_val_env + * + * Update val to value of passed in environment variable if present + * + * Input: + * env_str + * def_val default value if environment variable does not exist + * + * Returns: + * TRUE or FALSE + */ +int +dapl_os_get_env_val ( + char *env_str, + int def_val ) +{ + char *env_var; + + env_var = getenv (env_str); + if ( env_var != NULL ) + { + def_val = strtol (env_var, NULL, 0); + } + + return def_val; +} + + +/* + * dapls_os_thread_create + * + * Create a thread for dapl + * + * Input: + * func function to invoke thread + * data argument to pass to function + * + * Output + * thread_id handle for thread + * + * Returns: + * DAT_SUCCESS + */ +DAT_RETURN +dapl_os_thread_create ( + IN void (*func) (void *), + IN void *data, + OUT DAPL_OS_THREAD *thread_id ) +{ + + *thread_id = CreateThread( + NULL, /* &thread security attrs */ + 8 * 1024, /* initial thread stack size */ + (LPTHREAD_START_ROUTINE)func, /* &thread function */ + data, /* argument for new thread */ + 0, /* creation flags */ + NULL); /* thread ID (ignore) */ + + if ( *thread_id == NULL ) + { + return DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, 0); + } + + return DAT_SUCCESS; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_osd.h b/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_osd.h new file mode 100644 index 00000000..0f39272b --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_osd.h @@ -0,0 +1,538 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dapl_osd.h + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent data structures & functions with + * a canonical DAPL interface. Designed to be portable + * and hide OS specific quirks of common functions. + * + * $Id$ + **********************************************************************/ + +#ifndef _DAPL_OSD_H_ +#define _DAPL_OSD_H_ + +/* + * This file is defined for Linux systems only, including it on any + * other build will cause an error + */ +#ifndef _WIN32 +#error UNDEFINED OS TYPE +#endif /* WIN32 */ + +#include +#include +#pragma warning ( push, 3 ) +#include +#include +#include +#include +#include +#include +#pragma warning ( pop ) + +#include "dapl_debug.h" + +/* Export Header */ +#ifdef EXPORT_DAPL_SYMBOLS /* 1 when building DAPL DLL, 0 for clients */ +#define DAPL_EXPORT __declspec( dllexport ) +#else +#define DAPL_EXPORT __declspec( dllimport ) +#endif + +/* Useful debug definitions */ +#ifndef STATIC +#define STATIC static +#endif /* STATIC */ +#ifndef _INLINE_ +#define _INLINE_ __inline +#endif /* _INLINE_ */ +/* +typedef int * intptr_t; +typedef unsigned int * uintptr_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +*/ +#define dapl_ip_addr6(sockaddr) (((struct sockaddr_in6 *)sockaddr)->sin6_addr.s6_addr) + + +#define dapl_os_panic(str) \ + { \ + fprintf(stderr, "PANIC in %s:%i:%s\n", __FILE__, __LINE__); \ + fprintf(stderr, str); \ + exit(1); \ + } + +/* + * Atomic operations + */ + +typedef volatile DAT_COUNT DAPL_ATOMIC; + +/* atomic function prototypes */ +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_inc ( + INOUT DAPL_ATOMIC *v); + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_dec ( + INOUT DAPL_ATOMIC *v); + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_assign ( + INOUT DAPL_ATOMIC *v, + IN DAT_COUNT match_value, + IN DAT_COUNT new_value ); + +int dapl_os_get_env_bool ( + char *env_str ); + +int dapl_os_get_env_val ( + char *env_str, + int def_val ); + + +/* atomic functions */ + +/* dapl_os_atomic_inc + * + * get the current value of '*v', and then increment it. + * + * This is equivalent to an IB atomic fetch and add of 1, + * except that a DAT_COUNT might be 32 bits, rather than 64 + * and it occurs in local memory. + */ + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_inc ( + INOUT DAPL_ATOMIC *v) +{ + return InterlockedIncrement( v ); +} + + +/* dapl_os_atomic_dec + * + * decrement the current value of '*v'. No return value is required. + */ + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_dec ( + INOUT DAPL_ATOMIC *v) +{ + return InterlockedDecrement( v ); +} + + +/* dapl_os_atomic_assign + * + * assign 'new_value' to '*v' if the current value + * matches the provided 'match_value'. + * + * Make no assignment if there is no match. + * + * Return the current value in any case. + * + * This matches the IBTA atomic operation compare & swap + * except that it is for local memory and a DAT_COUNT may + * be only 32 bits, rather than 64. + */ + +STATIC _INLINE_ DAT_COUNT +dapl_os_atomic_assign ( + INOUT DAPL_ATOMIC *v, + IN DAT_COUNT match_value, + IN DAT_COUNT new_value ) +{ + return InterlockedCompareExchange((LPLONG)v, + new_value, + match_value); +} + + +/* + * Thread Functions + */ +typedef HANDLE DAPL_OS_THREAD; /* XXX Need to implement! */ + +DAT_RETURN +dapl_os_thread_create ( + IN void (*func) (void *), + IN void *data, + OUT DAPL_OS_THREAD *thread_id ); + + +/* + * Lock Functions + */ +typedef HANDLE DAPL_OS_LOCK; + +/* function prototypes */ +STATIC _INLINE_ DAT_RETURN +dapl_os_lock_init ( + IN DAPL_OS_LOCK *m); + +STATIC _INLINE_ DAT_RETURN +dapl_os_lock ( + IN DAPL_OS_LOCK *m); + +STATIC _INLINE_ DAT_RETURN +dapl_os_unlock ( + IN DAPL_OS_LOCK *m); + +STATIC _INLINE_ DAT_RETURN +dapl_os_lock_destroy ( + IN DAPL_OS_LOCK *m); + +/* lock functions */ +STATIC _INLINE_ DAT_RETURN +dapl_os_lock_init ( + IN DAPL_OS_LOCK *m) +{ + *m = CreateMutex (0, FALSE, 0); + + return *m ? DAT_SUCCESS : (DAT_CLASS_ERROR | DAT_INSUFFICIENT_RESOURCES); +} + +STATIC _INLINE_ DAT_RETURN +dapl_os_lock ( + IN DAPL_OS_LOCK *m) +{ + WaitForSingleObject (*m, INFINITE); + + return DAT_SUCCESS; +} + +STATIC _INLINE_ DAT_RETURN +dapl_os_unlock ( + IN DAPL_OS_LOCK *m) +{ + ReleaseMutex (*m); + + return DAT_SUCCESS; +} + +STATIC _INLINE_ DAT_RETURN +dapl_os_lock_destroy ( + IN DAPL_OS_LOCK *m) +{ + CloseHandle (*m); + + return DAT_SUCCESS; +} + + +/* + * Wait Objects + */ + +/* + * The wait object invariant: Presuming a call to dapl_os_wait_object_wait + * occurs at some point, there will be at least one wakeup after each call + * to dapl_os_wait_object_signal. I.e. Signals are not ignored, though + * they may be coallesced. + */ + +typedef HANDLE DAPL_OS_WAIT_OBJECT; + +/* function prototypes */ +STATIC _INLINE_ DAT_RETURN +dapl_os_wait_object_init ( + IN DAPL_OS_WAIT_OBJECT *wait_obj); + +STATIC _INLINE_ DAT_RETURN +dapl_os_wait_object_wait ( + IN DAPL_OS_WAIT_OBJECT *wait_obj, + IN DAT_TIMEOUT timeout_val); + +STATIC _INLINE_ DAT_RETURN +dapl_os_wait_object_wakeup ( + IN DAPL_OS_WAIT_OBJECT *wait_obj); + +STATIC _INLINE_ DAT_RETURN +dapl_os_wait_object_destroy ( + IN DAPL_OS_WAIT_OBJECT *wait_obj); +/* wait_object functions */ + +/* Initialize a wait object to an empty state + */ + +STATIC _INLINE_ DAT_RETURN +dapl_os_wait_object_init ( + IN DAPL_OS_WAIT_OBJECT *wait_obj) +{ + *wait_obj = CreateEvent(NULL,FALSE,FALSE,NULL); + + if ( *wait_obj == NULL ) + { + return DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } + + return DAT_SUCCESS; +} + + +/* Wait on the supplied wait object, up to the specified time_out, + * and reacquiring it after the wait ends. + * A timeout of DAT_TIMEOUT_INFINITE will wait indefinitely. + * Timeout should be specified in micro seconds. + * + * Functional returns: + * DAT_SUCCESS -- another thread invoked dapl_os_wait object_wakeup + * DAT_INVALID_STATE -- someone else is already waiting in this wait + * object. + * only one waiter is allowed at a time. + * DAT_ABORT -- another thread invoked dapl_os_wait_object_destroy + * DAT_TIMEOUT -- the specified time limit was reached. + */ + +STATIC _INLINE_ DAT_RETURN +dapl_os_wait_object_wait ( + IN DAPL_OS_WAIT_OBJECT *wait_obj, + IN DAT_TIMEOUT timeout_val) +{ + DAT_RETURN status; + DWORD op_status; + + status = DAT_SUCCESS; + + if ( DAT_TIMEOUT_INFINITE == timeout_val ) + { + op_status = WaitForSingleObject(*wait_obj, INFINITE); + } + else + { + /* convert to milliseconds */ + op_status = WaitForSingleObject(*wait_obj, timeout_val/1000); + } + + if (op_status == WAIT_TIMEOUT) + { + status = DAT_CLASS_ERROR | DAT_TIMEOUT_EXPIRED; + } + else if ( op_status == WAIT_FAILED) + { + status = DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } + + return status; +} + +STATIC _INLINE_ DAT_RETURN +dapl_os_wait_object_wakeup ( + IN DAPL_OS_WAIT_OBJECT *wait_obj) +{ + DWORD op_status; + + op_status = SetEvent(*wait_obj); + if ( op_status == 0 ) + { + return DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } + + return DAT_SUCCESS; +} + +STATIC _INLINE_ DAT_RETURN +dapl_os_wait_object_destroy ( + IN DAPL_OS_WAIT_OBJECT *wait_obj) +{ + DWORD op_status; + DAT_RETURN status = DAT_SUCCESS; + + op_status = CloseHandle(*wait_obj); + + if ( op_status == 0 ) + { + status = DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } + + return status; +} + + +/* + * Memory Functions + */ + +/* function prototypes */ +STATIC _INLINE_ void *dapl_os_alloc (int size); + +STATIC _INLINE_ void *dapl_os_realloc (void *ptr, int size); + +STATIC _INLINE_ void dapl_os_free (void *ptr, int size); + +STATIC _INLINE_ void * dapl_os_memzero (void *loc, int size); + +STATIC _INLINE_ void * dapl_os_memcpy (void *dest, const void *src, int len); + +STATIC _INLINE_ int dapl_os_memcmp (const void *mem1, const void *mem2, int len); + +/* memory functions */ + + +STATIC _INLINE_ void *dapl_os_alloc (int size) +{ + return malloc (size); +} + +STATIC _INLINE_ void *dapl_os_realloc (void *ptr, int size) +{ + return realloc(ptr, size); +} + +STATIC _INLINE_ void dapl_os_free (void *ptr, int size) +{ + size = size; + free (ptr); + ptr = NULL; +} + +STATIC _INLINE_ void * dapl_os_memzero (void *loc, int size) +{ + return memset (loc, 0, size); +} + +STATIC _INLINE_ void * dapl_os_memcpy (void *dest, const void *src, int len) +{ + return memcpy (dest, src, len); +} + +STATIC _INLINE_ int dapl_os_memcmp (const void *mem1, const void *mem2, int len) +{ + return memcmp (mem1, mem2, len); +} + + +STATIC _INLINE_ unsigned int dapl_os_strlen(const char *str) +{ + return ((unsigned int)strlen(str)); +} + +STATIC _INLINE_ char * dapl_os_strdup(const char *str) +{ + return _strdup(str); +} + + +/* + * Timer Functions + */ + +typedef DAT_UINT64 DAPL_OS_TIMEVAL; +typedef struct dapl_timer_entry DAPL_OS_TIMER; +typedef unsigned long DAPL_OS_TICKS; + +/* function prototypes */ + +/* + * Sleep for the number of micro seconds specified by the invoking + * function + */ +STATIC _INLINE_ void dapl_os_sleep_usec (int sleep_time) +{ + Sleep(sleep_time/1000); +} + +STATIC _INLINE_ DAPL_OS_TICKS dapl_os_get_ticks (void); + +STATIC _INLINE_ int dapl_os_ticks_to_seconds (DAPL_OS_TICKS ticks); + +DAT_RETURN dapl_os_get_time (DAPL_OS_TIMEVAL *); +/* timer functions */ + +STATIC _INLINE_ DAPL_OS_TICKS dapl_os_get_ticks (void) +{ + return GetTickCount (); +} + +STATIC _INLINE_ int dapl_os_ticks_to_seconds (DAPL_OS_TICKS ticks) +{ + ticks = ticks; + /* NOT YET IMPLEMENTED IN USER-SPACE */ + return 0; +} + + +/* + * + * Name Service Helper functions + * + */ +#ifdef IBHOSTS_NAMING +#define dapls_osd_getaddrinfo(name, addr_ptr) getaddrinfo(name,NULL,NULL,addr_ptr) +#define dapls_osd_freeaddrinfo(addr) freeaddrinfo (addr) + +#endif /* IBHOSTS_NAMING */ + +/* + * *printf format helpers. We use the C string constant concatenation + * ability to define 64 bit formats, which unfortunatly are non standard + * in the C compiler world. E.g. %llx for gcc, %I64x for Windows + */ +#define F64d "%I64d" +#define F64u "%I64u" +#define F64x "%I64x" +#define F64X "%I64X" + +/* + * Conversion Functions + */ + +STATIC _INLINE_ long int +dapl_os_strtol(const char *nptr, char **endptr, int base) +{ + return strtol(nptr, endptr, base); +} + + +/* + * Debug Helper Functions + */ + +#define dapl_os_assert(expression) CL_ASSERT(expression) + +#define dapl_os_printf printf +#define dapl_os_vprintf(fmt,args) vprintf(fmt,args) +#define dapl_os_syslog(fmt,args) /* XXX Need log routine call */ + + +#endif /* _DAPL_OSD_H_ */ + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_win.def b/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_win.def new file mode 100644 index 00000000..681a02c1 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapl_win.def @@ -0,0 +1,61 @@ +EXPORTS + +dapl_ia_open +dapl_ia_close +dapl_ia_query +dapl_set_consumer_context +dapl_get_consumer_context +dapl_get_handle_type +dapl_cno_create +dapl_cno_modify_agent +dapl_cno_query +dapl_cno_free +dapl_cno_wait +dapl_cr_query +dapl_cr_accept +dapl_cr_reject +dapl_cr_handoff +dapl_evd_create +dapl_evd_query +dapl_evd_modify_cno +dapl_evd_enable +dapl_evd_disable +dapl_evd_wait +dapl_evd_post_se +dapl_evd_dequeue +dapl_evd_free +dapl_ep_create +dapl_ep_query +dapl_ep_modify +dapl_ep_connect +dapl_ep_dup_connect +dapl_ep_disconnect +dapl_ep_post_send +dapl_ep_post_recv +dapl_ep_post_rdma_read +dapl_ep_post_rdma_write +dapl_ep_get_status +dapl_ep_free +dapl_lmr_create +dapl_lmr_query +dapl_lmr_free +dapl_rmr_create +dapl_rmr_query +dapl_rmr_bind +dapl_rmr_free +dapl_psp_create +dapl_psp_query +dapl_psp_free +dapl_rsp_create +dapl_rsp_query +dapl_rsp_free +dapl_pz_create +dapl_pz_query +dapl_pz_free +dapl_init +dapl_fini + + + + + diff --git a/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapllib.rc b/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapllib.rc new file mode 100644 index 00000000..e4b5f803 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dapl/udapl/windows/dapllib.rc @@ -0,0 +1,50 @@ +#include + +#ifdef _WIN32 +LANGUAGE LANG_ENGLISH, SUBLANG_ENGLISH_US +#pragma code_page(1252) +#endif //_WIN32 + +///////////////////////////////////////////////////////////////////////////// +// +// Update Version info for each release +// + +VS_VERSION_INFO VERSIONINFO + FILEVERSION 0,0,0,5 + PRODUCTVERSION 0,0,0,5 + FILEFLAGSMASK 0x3fL +#ifdef _DEBUG + FILEFLAGS 0x1L +#else + FILEFLAGS 0x0L +#endif + FILEOS 0x40004L + FILETYPE 0x1L + FILESUBTYPE 0x0L +BEGIN + BLOCK "StringFileInfo" + BEGIN + BLOCK "040904b0" + BEGIN + VALUE "Comments", "DAPL Reference Implementation, available at https://sourceforge.net/projects/dapl/ \0" + VALUE "CompanyName", "DAT Collaborative\0" + VALUE "FileDescription", "dapl.dll\0" + VALUE "FileVersion", "0, 0, 0, 4\0" + VALUE "InternalName", "dapl.dll\0" + VALUE "LegalCopyright", "Common Public License 1.0 or The BSD License\0" + VALUE "LegalTrademarks", "\0" + VALUE "OriginalFilename", "dapl.dll\0" + VALUE "PrivateBuild", "\0" + VALUE "ProductName", "DAPL Reference Implementation\0" + VALUE "ProductVersion", "0, 0, 0, 4\0" + VALUE "SpecialBuild", "\0" + END + END + BLOCK "VarFileInfo" + BEGIN + VALUE "Translation", 0x409, 1200 + END +END + + diff --git a/branches/Ndi/ulp/dapl/dat/common/dat_dictionary.c b/branches/Ndi/ulp/dapl/dat/common/dat_dictionary.c new file mode 100644 index 00000000..9ab6ae26 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/common/dat_dictionary.c @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_dictionary.c + * + * PURPOSE: dictionary data structure + * + **********************************************************************/ + + +#include "dat_dictionary.h" + + +/********************************************************************* + * * + * Structures * + * * + *********************************************************************/ + +typedef struct DAT_DICTIONARY_NODE +{ + DAT_PROVIDER_INFO key; + DAT_DICTIONARY_DATA data; + struct DAT_DICTIONARY_NODE *prev; + struct DAT_DICTIONARY_NODE *next; +} DAT_DICTIONARY_NODE; + + +struct DAT_DICTIONARY +{ + DAT_DICTIONARY_NODE *head; + DAT_DICTIONARY_NODE *tail; + DAT_COUNT size; +}; + +/********************************************************************* + * * + * Function Declarations * + * * + *********************************************************************/ + +static DAT_RETURN +dat_dictionary_key_dup ( + const DAT_PROVIDER_INFO *old_key, + DAT_PROVIDER_INFO *new_key ); + +static DAT_BOOLEAN +dat_dictionary_key_is_equal ( + const DAT_PROVIDER_INFO *key_a, + const DAT_PROVIDER_INFO *key_b ); + + +/********************************************************************* + * * + * External Functions * + * * + *********************************************************************/ + + +/*********************************************************************** + * Function: dat_dictionary_create + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_create ( + OUT DAT_DICTIONARY **pp_dictionary) +{ + DAT_DICTIONARY *p_dictionary; + DAT_RETURN status; + + dat_os_assert ( NULL != pp_dictionary); + + status = DAT_SUCCESS; + + /* create the dictionary */ + p_dictionary = dat_os_alloc (sizeof (DAT_DICTIONARY)); + if (NULL == p_dictionary) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_os_memset (p_dictionary, '\0', sizeof (DAT_DICTIONARY)); + + /* create the head node */ + p_dictionary->head = dat_os_alloc (sizeof (DAT_DICTIONARY_NODE)); + if (NULL == p_dictionary->head) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_os_memset (p_dictionary->head, '\0', sizeof (DAT_DICTIONARY_NODE)); + + /* create the tail node */ + p_dictionary->tail = dat_os_alloc (sizeof (DAT_DICTIONARY_NODE)); + if (NULL == p_dictionary->tail) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_os_memset (p_dictionary->tail, '\0', sizeof (DAT_DICTIONARY_NODE)); + + p_dictionary->head->next = p_dictionary->tail; + p_dictionary->tail->prev = p_dictionary->head; + + *pp_dictionary = p_dictionary; + + bail: + if ( DAT_SUCCESS != status ) + { + if ( NULL != p_dictionary ) + { + dat_os_free (p_dictionary, sizeof (DAT_DICTIONARY)); + + if ( NULL != p_dictionary->head ) + { + dat_os_free (p_dictionary->head, sizeof (DAT_DICTIONARY_NODE)); + } + + if ( NULL != p_dictionary->tail ) + { + dat_os_free (p_dictionary->tail, sizeof (DAT_DICTIONARY_NODE)); + } + } + } + + return status; +} + + +/*********************************************************************** + * Function: dat_dictionary_destroy + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_destroy ( + IN DAT_DICTIONARY *p_dictionary) +{ + DAT_DICTIONARY_NODE *cur_node; + + dat_os_assert (NULL != p_dictionary); + + while (NULL != p_dictionary->head) + { + cur_node = p_dictionary->head; + p_dictionary->head = cur_node->next; + + dat_os_free (cur_node, sizeof (DAT_DICTIONARY_NODE)); + } + + dat_os_free (p_dictionary, sizeof (DAT_DICTIONARY)); + + return DAT_SUCCESS; +} + + +/*********************************************************************** + * Function: dat_dictionary_size + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_size ( + IN DAT_DICTIONARY *p_dictionary, + OUT DAT_COUNT *p_size) +{ + dat_os_assert (NULL != p_dictionary); + dat_os_assert (NULL != p_size); + + *p_size = p_dictionary->size; + + return DAT_SUCCESS; +} + + +/*********************************************************************** + * Function: dat_dictionary_entry_create + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_entry_create ( + OUT DAT_DICTIONARY_ENTRY *p_entry) +{ + DAT_DICTIONARY_NODE *node; + DAT_RETURN dat_status; + + dat_os_assert (NULL != p_entry); + + dat_status = DAT_SUCCESS; + + node = dat_os_alloc (sizeof (DAT_DICTIONARY_NODE)); + if (NULL == node) + { + dat_status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + *p_entry = node; + + bail: + return dat_status; +} + + +/*********************************************************************** + * Function: dat_dictionary_entry_destroy + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_entry_destroy ( + OUT DAT_DICTIONARY_ENTRY entry) +{ + dat_os_free (entry, sizeof (DAT_DICTIONARY_NODE)); + return DAT_SUCCESS; +} + + +/*********************************************************************** + * Function: dat_dictionary_insert + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_insert ( + IN DAT_DICTIONARY *p_dictionary, + IN DAT_DICTIONARY_ENTRY entry, + IN const DAT_PROVIDER_INFO *key, + IN DAT_DICTIONARY_DATA data) +{ + DAT_RETURN dat_status; + DAT_DICTIONARY_NODE *cur_node, *prev_node, *next_node; + + dat_os_assert (NULL != p_dictionary); + dat_os_assert (NULL != entry); + + cur_node = entry; + + if ( DAT_SUCCESS == dat_dictionary_search (p_dictionary, key, NULL) ) + { + dat_status = DAT_ERROR (DAT_PROVIDER_ALREADY_REGISTERED,0); + goto bail; + } + + dat_status = dat_dictionary_key_dup ( key, &cur_node->key ); + if ( DAT_SUCCESS != dat_status ) + { + goto bail; + } + + /* insert node at end of list to preserve registration order*/ + prev_node = p_dictionary->tail->prev; + next_node = p_dictionary->tail; + + cur_node->data = data; + cur_node->next = next_node; + cur_node->prev = prev_node; + + prev_node->next = cur_node; + next_node->prev = cur_node; + + p_dictionary->size++; + + bail: + return dat_status; +} + + +/*********************************************************************** + * Function: dat_dictionary_search + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_search ( + IN DAT_DICTIONARY *p_dictionary, + IN const DAT_PROVIDER_INFO *key, + OUT DAT_DICTIONARY_DATA *p_data) +{ + DAT_DICTIONARY_NODE *cur_node; + DAT_RETURN status; + + dat_os_assert (NULL != p_dictionary); + + status = DAT_ERROR (DAT_PROVIDER_NOT_FOUND,0); + + for (cur_node = p_dictionary->head->next; + p_dictionary->tail != cur_node; + cur_node = cur_node->next) + { + if ( DAT_TRUE == dat_dictionary_key_is_equal (&cur_node->key, key) ) + { + if ( NULL != p_data ) + { + *p_data = cur_node->data; + } + + status = DAT_SUCCESS; + goto bail; + } + } + + bail: + return status; +} + + +/*********************************************************************** + * Function: dat_dictionary_enumerate + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_enumerate ( + IN DAT_DICTIONARY *p_dictionary, + IN DAT_DICTIONARY_DATA array[], + IN DAT_COUNT array_size) +{ + DAT_DICTIONARY_NODE *cur_node; + DAT_COUNT i; + DAT_RETURN status; + + dat_os_assert (NULL != p_dictionary); + dat_os_assert (NULL != array); + + status = DAT_SUCCESS; + + if ( array_size < p_dictionary->size ) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,0); + goto bail; + } + + for (cur_node = p_dictionary->head->next, i = 0; + p_dictionary->tail != cur_node; + cur_node = cur_node->next, i++) + { + array[i] = cur_node->data; + } + + bail: + return status; +} + + +/*********************************************************************** + * Function: dat_dictionary_remove + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_remove ( + IN DAT_DICTIONARY *p_dictionary, + IN DAT_DICTIONARY_ENTRY *p_entry, + IN const DAT_PROVIDER_INFO *key, + OUT DAT_DICTIONARY_DATA *p_data) +{ + DAT_DICTIONARY_NODE *cur_node, *prev_node, *next_node; + DAT_RETURN status; + + dat_os_assert (NULL != p_dictionary); + dat_os_assert (NULL != p_entry); + + status = DAT_ERROR (DAT_PROVIDER_NOT_FOUND,0); + + for (cur_node = p_dictionary->head->next; + p_dictionary->tail != cur_node; + cur_node = cur_node->next) + { + if ( DAT_TRUE == dat_dictionary_key_is_equal (&cur_node->key, key) ) + { + if ( NULL != p_data ) + { + *p_data = cur_node->data; + } + + prev_node = cur_node->prev; + next_node = cur_node->next; + + prev_node->next = next_node; + next_node->prev = prev_node; + + *p_entry = cur_node; + + p_dictionary->size--; + + status = DAT_SUCCESS; + goto bail; + } + } + + bail: + return status; +} + + +/********************************************************************* + * * + * Internal Function Definitions * + * * + *********************************************************************/ + + +/*********************************************************************** + * Function: dat_dictionary_key_create + ***********************************************************************/ + +DAT_RETURN +dat_dictionary_key_dup ( + const DAT_PROVIDER_INFO *old_key, + DAT_PROVIDER_INFO *new_key ) +{ + dat_os_assert (NULL != old_key); + dat_os_assert (NULL != new_key); + + dat_os_strncpy (new_key->ia_name, old_key->ia_name, DAT_NAME_MAX_LENGTH); + new_key->dapl_version_major = old_key->dapl_version_major; + new_key->dapl_version_minor = old_key->dapl_version_minor; + new_key->is_thread_safe = old_key->is_thread_safe; + + return DAT_SUCCESS; +} + + +/*********************************************************************** + * Function: dat_dictionary_key_is_equal + ***********************************************************************/ + +DAT_BOOLEAN +dat_dictionary_key_is_equal ( + const DAT_PROVIDER_INFO *key_a, + const DAT_PROVIDER_INFO *key_b ) +{ + if ( ( dat_os_strlen (key_a->ia_name) == dat_os_strlen (key_b->ia_name) ) && + ( !dat_os_strncmp (key_a->ia_name, key_b->ia_name, dat_os_strlen (key_a->ia_name)) ) && + ( key_a->dapl_version_major == key_b->dapl_version_major ) && + ( key_a->dapl_version_minor == key_b->dapl_version_minor ) && + ( key_a->is_thread_safe == key_b->is_thread_safe ) ) + { + return DAT_TRUE; + } + else + { + return DAT_FALSE; + } +} diff --git a/branches/Ndi/ulp/dapl/dat/common/dat_dictionary.h b/branches/Ndi/ulp/dapl/dat/common/dat_dictionary.h new file mode 100644 index 00000000..a96c4fbe --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/common/dat_dictionary.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_dictionary.h + * + * PURPOSE: dictionary data structure + * + **********************************************************************/ + +#ifndef _DAT_DICTIONARY_H_ +#define _DAT_DICTIONARY_H_ + + +#include "dat_osd.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +/********************************************************************* + * * + * Typedefs * + * * + *********************************************************************/ + +typedef struct DAT_DICTIONARY DAT_DICTIONARY; +typedef void * DAT_DICTIONARY_DATA; +typedef void * DAT_DICTIONARY_ENTRY; + + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +extern DAT_RETURN +dat_dictionary_create( + OUT DAT_DICTIONARY **pp_dictionary); + +extern DAT_RETURN +dat_dictionary_destroy( + IN DAT_DICTIONARY *p_dictionary); + +extern DAT_RETURN +dat_dictionary_size( + IN DAT_DICTIONARY *p_dictionary, + OUT DAT_COUNT *p_size); + +extern DAT_RETURN +dat_dictionary_entry_create( + OUT DAT_DICTIONARY_ENTRY *p_entry); + +extern DAT_RETURN +dat_dictionary_entry_destroy( + IN DAT_DICTIONARY_ENTRY entry); + +extern DAT_RETURN +dat_dictionary_insert( + IN DAT_DICTIONARY *p_dictionary, + IN DAT_DICTIONARY_ENTRY entry, + IN const DAT_PROVIDER_INFO *key, + IN DAT_DICTIONARY_DATA data); + +extern DAT_RETURN +dat_dictionary_search( + IN DAT_DICTIONARY *p_dictionary, + IN const DAT_PROVIDER_INFO *key, + OUT DAT_DICTIONARY_DATA *p_data); + +extern DAT_RETURN +dat_dictionary_enumerate( + IN DAT_DICTIONARY *p_dictionary, + IN DAT_DICTIONARY_DATA array[], + IN DAT_COUNT array_size); + + +extern DAT_RETURN +dat_dictionary_remove( + IN DAT_DICTIONARY *p_dictionary, + IN DAT_DICTIONARY_ENTRY *p_entry, + IN const DAT_PROVIDER_INFO *key, + OUT DAT_DICTIONARY_DATA *p_data); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/branches/Ndi/ulp/dapl/dat/common/dat_dr.c b/branches/Ndi/ulp/dapl/dat/common/dat_dr.c new file mode 100644 index 00000000..7c117807 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/common/dat_dr.c @@ -0,0 +1,381 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_dr.c + * + * PURPOSE: dynamic registry implementation + * + **********************************************************************/ + + +#include "dat_dr.h" + +#include "dat_dictionary.h" + + +/********************************************************************* + * * + * Global Variables * + * * + *********************************************************************/ + +static DAT_OS_LOCK g_dr_lock; +static DAT_DICTIONARY *g_dr_dictionary = NULL; + + +/********************************************************************* + * * + * External Functions * + * * + *********************************************************************/ + + +//*********************************************************************** +// Function: dat_dr_init +//*********************************************************************** + +DAT_RETURN +dat_dr_init ( void ) +{ + DAT_RETURN status; + + status = dat_os_lock_init (&g_dr_lock); + if ( DAT_SUCCESS != status ) + { + return status; + } + + status = dat_dictionary_create (&g_dr_dictionary); + if ( DAT_SUCCESS != status ) + { + dat_os_dbg_print (DAT_OS_DBG_TYPE_GENERIC, + "DAT Registry: DR Init Failed\n"); + return status; + } + + return DAT_SUCCESS; +} + + +//*********************************************************************** +// Function: dat_dr_fini +//*********************************************************************** + +DAT_RETURN +dat_dr_fini ( void ) +{ + DAT_RETURN status; + + status = dat_os_lock_destroy (&g_dr_lock); + if ( DAT_SUCCESS != status ) + { + return status; + } + + status = dat_dictionary_destroy (g_dr_dictionary); + if ( DAT_SUCCESS != status ) + { + return status; + } + + return DAT_SUCCESS; +} + + +//*********************************************************************** +// Function: dat_dr_insert +//*********************************************************************** + +extern DAT_RETURN +dat_dr_insert ( + IN const DAT_PROVIDER_INFO *info, + IN DAT_DR_ENTRY *entry ) +{ + DAT_RETURN status; + DAT_DICTIONARY_ENTRY dict_entry = NULL; + DAT_DR_ENTRY *data; + + data = dat_os_alloc (sizeof (DAT_DR_ENTRY)); + if ( NULL == data ) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + *data = *entry; + + status = dat_dictionary_entry_create (&dict_entry); + if ( DAT_SUCCESS != status ) + { + goto bail; + } + + dat_os_lock (&g_dr_lock); + + status = dat_dictionary_insert (g_dr_dictionary, + dict_entry, + info, + (DAT_DICTIONARY_DATA *) data); + + dat_os_unlock (&g_dr_lock); + +bail: + if ( DAT_SUCCESS != status ) + { + if ( NULL != data ) + { + dat_os_free (data, sizeof (DAT_DR_ENTRY)); + } + + + if ( NULL != dict_entry ) + { + (void) dat_dictionary_entry_destroy(dict_entry); + } + } + + return status; +} + + +//*********************************************************************** +// Function: dat_dr_remove +//*********************************************************************** + +extern DAT_RETURN +dat_dr_remove ( + IN const DAT_PROVIDER_INFO *info ) +{ + DAT_DR_ENTRY *data; + DAT_DICTIONARY_ENTRY dict_entry = NULL; + DAT_RETURN status; + + dat_os_lock (&g_dr_lock); + + status = dat_dictionary_search ( g_dr_dictionary, + info, + (DAT_DICTIONARY_DATA *) &data); + + if ( DAT_SUCCESS != status ) + { + /* return status from dat_dictionary_search() */ + goto bail; + } + + if ( 0 != data->ref_count ) + { + status = DAT_ERROR (DAT_PROVIDER_IN_USE, 0); + goto bail; + } + + status = dat_dictionary_remove ( g_dr_dictionary, + &dict_entry, + info, + (DAT_DICTIONARY_DATA *) &data); + if ( DAT_SUCCESS != status ) + { + /* return status from dat_dictionary_remove() */ + goto bail; + } + + dat_os_free (data, sizeof (DAT_DR_ENTRY)); + +bail: + dat_os_unlock (&g_dr_lock); + + if ( NULL != dict_entry ) + { + (void) dat_dictionary_entry_destroy(dict_entry); + } + + return status; +} + + +//*********************************************************************** +// Function: dat_dr_provider_open +//*********************************************************************** + +extern DAT_RETURN +dat_dr_provider_open ( + IN const DAT_PROVIDER_INFO *info, + OUT DAT_IA_OPEN_FUNC *p_ia_open_func ) +{ + DAT_RETURN status; + DAT_DR_ENTRY *data; + + dat_os_lock (&g_dr_lock); + + status = dat_dictionary_search ( g_dr_dictionary, + info, + (DAT_DICTIONARY_DATA *) &data); + + dat_os_unlock (&g_dr_lock); + + if ( DAT_SUCCESS == status ) + { + data->ref_count++; + *p_ia_open_func = data->ia_open_func; + } + + return status; +} + + +//*********************************************************************** +// Function: dat_dr_provider_close +//*********************************************************************** + +extern DAT_RETURN +dat_dr_provider_close ( + IN const DAT_PROVIDER_INFO *info ) +{ + DAT_RETURN status; + DAT_DR_ENTRY *data; + + dat_os_lock (&g_dr_lock); + + status = dat_dictionary_search ( g_dr_dictionary, + info, + (DAT_DICTIONARY_DATA *) &data); + + dat_os_unlock (&g_dr_lock); + + if ( DAT_SUCCESS == status ) + { + data->ref_count--; + } + + return status; +} + + +//*********************************************************************** +// Function: dat_dr_size +//*********************************************************************** + +DAT_RETURN +dat_dr_size ( + OUT DAT_COUNT *size ) +{ + return dat_dictionary_size (g_dr_dictionary, size); +} + + +//*********************************************************************** +// Function: dat_dr_list +//*********************************************************************** + +DAT_RETURN +dat_dr_list ( + IN DAT_COUNT max_to_return, + OUT DAT_COUNT *entries_returned, + OUT DAT_PROVIDER_INFO * (dat_provider_list[]) ) +{ + DAT_DR_ENTRY **array; + DAT_COUNT array_size; + DAT_COUNT i; + DAT_RETURN status; + + array = NULL; + status = DAT_SUCCESS; + + /* The dictionary size may increase between the call to */ + /* dat_dictionary_size() and dat_dictionary_enumerate(). */ + /* Therefore we loop until a successful enumeration is made. */ + *entries_returned = 0; + for (;;) + { + status = dat_dictionary_size (g_dr_dictionary, &array_size); + if ( status != DAT_SUCCESS ) + { + goto bail; + } + + if (array_size == 0) + { + status = DAT_SUCCESS; + goto bail; + } + + array = dat_os_alloc (array_size * sizeof (DAT_DR_ENTRY *)); + if ( array == NULL ) + { + status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_os_lock (&g_dr_lock); + + status = dat_dictionary_enumerate (g_dr_dictionary, + (DAT_DICTIONARY_DATA *) array, + array_size); + + dat_os_unlock (&g_dr_lock); + + if ( DAT_SUCCESS == status ) + { + break; + } + else + { + dat_os_free (array, array_size * sizeof (DAT_DR_ENTRY *)); + array = NULL; + continue; + } + } + + for ( i = 0; (i < max_to_return) && (i < array_size); i++) + { + if ( NULL == dat_provider_list[i] ) + { + status = DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + goto bail; + } + + *dat_provider_list[i] = array[i]->info; + } + + *entries_returned = i; + +bail: + if ( NULL != array ) + { + dat_os_free (array, array_size * sizeof (DAT_DR_ENTRY *)); + } + + return status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dat/common/dat_dr.h b/branches/Ndi/ulp/dapl/dat/common/dat_dr.h new file mode 100644 index 00000000..12546f7d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/common/dat_dr.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_dr.h + * + * PURPOSE: dynamic registry interface declarations + * + **********************************************************************/ + +#ifndef __DAT_DR_H__ +#define __DAT_DR_H__ + + +#include "dat_osd.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +/********************************************************************* + * * + * Strucutres * + * * + *********************************************************************/ + +typedef struct +{ + DAT_COUNT ref_count; + DAT_IA_OPEN_FUNC ia_open_func; + DAT_PROVIDER_INFO info; +} DAT_DR_ENTRY; + + +/********************************************************************* + * * + * Function Declarations * + * * + *********************************************************************/ + +extern DAT_RETURN +dat_dr_init( void ); + +extern DAT_RETURN +dat_dr_fini( void ); + +extern DAT_RETURN +dat_dr_insert ( + IN const DAT_PROVIDER_INFO *info, + IN DAT_DR_ENTRY *entry ); + +extern DAT_RETURN +dat_dr_remove ( + IN const DAT_PROVIDER_INFO *info ); + + +extern DAT_RETURN +dat_dr_provider_open ( + IN const DAT_PROVIDER_INFO *info, + OUT DAT_IA_OPEN_FUNC *p_ia_open_func ); + +extern DAT_RETURN +dat_dr_provider_close ( + IN const DAT_PROVIDER_INFO *info); + +extern DAT_RETURN +dat_dr_size ( + OUT DAT_COUNT *size); + +extern DAT_RETURN +dat_dr_list ( + IN DAT_COUNT max_to_return, + OUT DAT_COUNT *entries_returned, + OUT DAT_PROVIDER_INFO * (dat_provider_list[]) ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/branches/Ndi/ulp/dapl/dat/common/dat_init.c b/branches/Ndi/ulp/dapl/dat/common/dat_init.c new file mode 100644 index 00000000..3d7dbc37 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/common/dat_init.c @@ -0,0 +1,161 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_init.c + * + * PURPOSE: DAT registry implementation for uDAPL + * Description: init and fini functions for DAT module. + * + **********************************************************************/ + +#include "dat_init.h" + +#include "dat_dr.h" +#include "dat_osd.h" + +#ifndef DAT_NO_STATIC_REGISTRY +#include "dat_sr.h" +#endif + + +/********************************************************************* + * * + * Global Variables * + * * + *********************************************************************/ + +/* + * Ideally, the following two rules could be enforced: + * + * - The DAT Registry's initialization function is executed before that + * of any DAT Providers and hence all calls into the registry occur + * after the registry module is initialized. + * + * - The DAT Registry's deinitialization function is executed after that + * of any DAT Providers and hence all calls into the registry occur + * before the registry module is deinitialized. + * + * However, on many platforms few guarantees are provided regarding the + * order in which modules initialization and deinitialization functions + * are invoked. + * + * To understand why these rules are difficult to enforce using only + * features common to all platforms, consider the Linux platform. The order + * in which Linux shared libraries are loaded into a process's address space + * is undefined. When a DAT consumer explicitly links to DAT provider + * libraries, the order in which library initialization and deinitialization + * functions are invoked becomes important. For example if the DAPL provider + * calls dat_registry_add_provider() before the registry has been initialized, + * an error will occur. + * + * We assume that modules are loaded with a single thread. Given + * this assumption, we can use a simple state variable to determine + * the state of the DAT registry. + */ + +static DAT_MODULE_STATE g_module_state = DAT_MODULE_STATE_UNINITIALIZED; + + +//*********************************************************************** +// Function: dat_module_get_state +//*********************************************************************** + +DAT_MODULE_STATE +dat_module_get_state ( void ) +{ + return g_module_state; +} + + +//*********************************************************************** +// Function: dat_init +//*********************************************************************** + +void +dat_init ( void ) +{ + DAT_RETURN status= DAT_SUCCESS; + if ( DAT_MODULE_STATE_UNINITIALIZED == g_module_state ) + { + /* + * update the module state flag immediately in case there + * is a recursive call to dat_init(). + */ + g_module_state = DAT_MODULE_STATE_INITIALIZING; + + dat_os_dbg_init (); + + dat_os_dbg_print (DAT_OS_DBG_TYPE_GENERIC, + "DAT Registry: Started (dat_init)\n"); + +#ifndef DAT_NO_STATIC_REGISTRY + dat_sr_init (); +#endif + status = dat_dr_init (); + if (status != DAT_SUCCESS) + { + dat_os_dbg_print (DAT_OS_DBG_TYPE_GENERIC, + "DAT Registry: (dat_init) failed\n"); + g_module_state = DAT_MODULE_STATE_UNINITIALIZED; + return; + } + g_module_state = DAT_MODULE_STATE_INITIALIZED; + } +} + + +//*********************************************************************** +// Function: dat_fini +//*********************************************************************** + +void +dat_fini ( void ) +{ + if ( DAT_MODULE_STATE_INITIALIZED == g_module_state ) + { + g_module_state = DAT_MODULE_STATE_DEINITIALIZING; + + dat_dr_fini (); +#ifndef DAT_NO_STATIC_REGISTRY + dat_sr_fini (); +#endif + + dat_os_dbg_print (DAT_OS_DBG_TYPE_GENERIC, "DAT Registry: Stopped (dat_fini)\n"); + + g_module_state = DAT_MODULE_STATE_DEINITIALIZED; + } +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dat/common/dat_init.h b/branches/Ndi/ulp/dapl/dat/common/dat_init.h new file mode 100644 index 00000000..523d8133 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/common/dat_init.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_init.h + * + * PURPOSE: DAT registry global data + * + **********************************************************************/ + +#ifndef _DAT_INIT_H_ +#define _DAT_INIT_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +/********************************************************************* + * * + * Enumerations * + * * + *********************************************************************/ + +typedef enum +{ + DAT_MODULE_STATE_UNINITIALIZED, + DAT_MODULE_STATE_INITIALIZING, + DAT_MODULE_STATE_INITIALIZED, + DAT_MODULE_STATE_DEINITIALIZING, + DAT_MODULE_STATE_DEINITIALIZED +} DAT_MODULE_STATE; + +/********************************************************************* + * * + * Function Prototypes * + * * + *********************************************************************/ + +DAT_MODULE_STATE +dat_module_get_state ( void ) ; + +void +dat_init ( void ) ; + +void +dat_fini ( void ) ; + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/branches/Ndi/ulp/dapl/dat/common/dat_sr.c b/branches/Ndi/ulp/dapl/dat/common/dat_sr.c new file mode 100644 index 00000000..e140ccec --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/common/dat_sr.c @@ -0,0 +1,412 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_sr.c + * + * PURPOSE: static registry implementation + * + **********************************************************************/ + + +#include "dat_sr.h" + +#include "dat_dictionary.h" +#include "udat_sr_parser.h" + + +/********************************************************************* + * * + * Global Variables * + * * + *********************************************************************/ + +static DAT_OS_LOCK g_sr_lock; +static DAT_DICTIONARY *g_sr_dictionary = NULL; + + +/********************************************************************* + * * + * External Functions * + * * + *********************************************************************/ + + +//*********************************************************************** +// Function: dat_sr_init +//*********************************************************************** + +DAT_RETURN +dat_sr_init ( void ) +{ + DAT_RETURN status; + + status = dat_os_lock_init (&g_sr_lock); + if ( DAT_SUCCESS != status ) + { + return status; + } + + status = dat_dictionary_create (&g_sr_dictionary); + if ( DAT_SUCCESS != status ) + { + return status; + } + + /* + * Since DAT allows providers to be loaded by either the static + * registry or explicitly through OS dependent methods, do not + * return an error if no providers are loaded via the static registry. + */ + + status = dat_sr_load (); + dat_os_dbg_print (DAT_OS_DBG_TYPE_GENERIC, + "DAT Registry: SR Load return %#x\n", status); + + return DAT_SUCCESS; +} + + +//*********************************************************************** +// Function: dat_sr_fini +//*********************************************************************** + +extern DAT_RETURN +dat_sr_fini ( void ) +{ + DAT_RETURN status; + + status = dat_os_lock_destroy (&g_sr_lock); + if ( DAT_SUCCESS != status ) + { + return status; + } + + status = dat_dictionary_destroy (g_sr_dictionary); + if ( DAT_SUCCESS != status ) + { + return status; + } + + return DAT_SUCCESS; +} + + +//*********************************************************************** +// Function: dat_sr_insert +//*********************************************************************** + +extern DAT_RETURN +dat_sr_insert ( + IN const DAT_PROVIDER_INFO *info, + IN DAT_SR_ENTRY *entry ) +{ + DAT_RETURN status; + DAT_SR_ENTRY *data; + DAT_UINT32 lib_path_size = 0; + DAT_UINT32 lib_path_len = 0; + DAT_UINT32 ia_params_size = 0; + DAT_UINT32 ia_params_len = 0; + DAT_DICTIONARY_ENTRY dict_entry = NULL; + + if ( NULL == (data = dat_os_alloc (sizeof (DAT_SR_ENTRY))) ) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + lib_path_len = (DAT_UINT32)dat_os_strlen (entry->lib_path); + lib_path_size = (lib_path_len + 1) * sizeof (char); + + if ( NULL == (data->lib_path = dat_os_alloc (lib_path_size)) ) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_os_strncpy (data->lib_path, entry->lib_path, lib_path_len); + data->lib_path[lib_path_len] = '\0'; + + ia_params_len = (DAT_UINT32)dat_os_strlen (entry->ia_params); + ia_params_size = (ia_params_len + 1) * sizeof (char); + + if ( NULL == (data->ia_params = dat_os_alloc (ia_params_size)) ) + { + status = DAT_ERROR (DAT_INSUFFICIENT_RESOURCES,DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_os_strncpy (data->ia_params, entry->ia_params, ia_params_len); + data->ia_params[ia_params_len] = '\0'; + + data->info = entry->info; + data->lib_handle = entry->lib_handle; + data->ref_count = entry->ref_count; + + dict_entry = NULL; + status = dat_dictionary_entry_create (&dict_entry); + if ( DAT_SUCCESS != status ) + { + goto bail; + } + + dat_os_lock (&g_sr_lock); + + status = dat_dictionary_insert (g_sr_dictionary, + dict_entry, + info, + (DAT_DICTIONARY_DATA *) data); + dat_os_unlock (&g_sr_lock); + +bail: + if ( DAT_SUCCESS != status ) + { + if ( NULL != data ) + { + if ( NULL != data->lib_path ) + { + dat_os_free (data->lib_path, lib_path_size); + } + + if ( NULL != data->ia_params ) + { + dat_os_free (data->ia_params, ia_params_size); + } + + dat_os_free (data, sizeof (DAT_SR_ENTRY)); + } + + if ( NULL != dict_entry ) + { + (void) dat_dictionary_entry_destroy(dict_entry); + } + } + + dat_os_dbg_print(DAT_OS_DBG_TYPE_SR, + "DAT Registry: insert to sr_dictionary for %s : 0x%x\n", + entry->info.ia_name, status); + + return status; +} + + +//*********************************************************************** +// Function: dat_sr_size +//*********************************************************************** + +extern DAT_RETURN +dat_sr_size ( + OUT DAT_COUNT *size ) +{ + return dat_dictionary_size (g_sr_dictionary, size); +} + + +//*********************************************************************** +// Function: dat_sr_list +//*********************************************************************** + +extern DAT_RETURN +dat_sr_list ( + IN DAT_COUNT max_to_return, + OUT DAT_COUNT *entries_returned, + OUT DAT_PROVIDER_INFO * (dat_provider_list[]) ) +{ + DAT_SR_ENTRY **array; + DAT_COUNT array_size; + DAT_COUNT i; + DAT_RETURN status; + + array = NULL; + status = DAT_SUCCESS; + + /* The dictionary size may increase between the call to */ + /* dat_dictionary_size() and dat_dictionary_enumerate(). */ + /* Therefore we loop until a successful enumeration is made. */ + *entries_returned = 0; + for (;;) + { + status = dat_dictionary_size (g_sr_dictionary, &array_size); + if ( DAT_SUCCESS != status ) + { + goto bail; + } + + if (array_size == 0) + { + status = DAT_SUCCESS; + goto bail; + } + + array = dat_os_alloc (array_size * sizeof (DAT_SR_ENTRY *)); + if ( array == NULL ) + { + status = DAT_ERROR(DAT_INSUFFICIENT_RESOURCES, DAT_RESOURCE_MEMORY); + goto bail; + } + + dat_os_lock (&g_sr_lock); + + status = dat_dictionary_enumerate (g_sr_dictionary, + (DAT_DICTIONARY_DATA *) array, + array_size); + + dat_os_unlock (&g_sr_lock); + + if ( DAT_SUCCESS == status ) + { + break; + } + else + { + dat_os_free (array, array_size * sizeof (DAT_SR_ENTRY *)); + array = NULL; + continue; + } + } + + for ( i = 0; (i < max_to_return) && (i < array_size); i++) + { + if ( NULL == dat_provider_list[i] ) + { + status = DAT_ERROR (DAT_INVALID_PARAMETER,DAT_INVALID_ARG3); + goto bail; + } + + *dat_provider_list[i] = array[i]->info; + } + + *entries_returned = i; + +bail: + if ( NULL != array ) + { + dat_os_free (array, array_size * sizeof (DAT_SR_ENTRY *)); + } + + return status; +} + + + +//*********************************************************************** +// Function: dat_sr_provider_open +//*********************************************************************** + +extern DAT_RETURN +dat_sr_provider_open ( + IN const DAT_PROVIDER_INFO *info ) +{ + DAT_RETURN status; + DAT_SR_ENTRY *data; + + dat_os_lock (&g_sr_lock); + + status = dat_dictionary_search (g_sr_dictionary, + info, + (DAT_DICTIONARY_DATA *) &data); + + if ( DAT_SUCCESS == status ) + { + if ( 0 == data->ref_count ) + { + status = dat_os_library_load (data->lib_path, &data->lib_handle); + if ( status == DAT_SUCCESS ) + { + data->ref_count++; + } + else + { + dat_os_dbg_print (DAT_OS_DBG_TYPE_SR, + "DAT Registry: static registry unable to " + "load library %s\n", data->lib_path); + goto bail; + } + + data->init_func = (DAT_PROVIDER_INIT_FUNC)dat_os_library_sym (data->lib_handle, DAT_PROVIDER_INIT_FUNC_STR); + data->fini_func = (DAT_PROVIDER_FINI_FUNC)dat_os_library_sym (data->lib_handle, DAT_PROVIDER_FINI_FUNC_STR); + + if ( NULL != data->init_func ) + { + (*data->init_func)(&data->info, data->ia_params); + } + } + else + { + data->ref_count++; + } + } + + bail: + dat_os_unlock (&g_sr_lock); + + return status; +} + + +//*********************************************************************** +// Function: dat_sr_provider_close +//*********************************************************************** + +extern DAT_RETURN +dat_sr_provider_close ( + IN const DAT_PROVIDER_INFO *info ) +{ + DAT_RETURN status; + DAT_SR_ENTRY *data; + + dat_os_lock (&g_sr_lock); + + status = dat_dictionary_search (g_sr_dictionary, + info, + (DAT_DICTIONARY_DATA *) &data); + + if ( DAT_SUCCESS == status ) + { + if ( 1 == data->ref_count ) + { + if ( NULL != data->fini_func ) + { + (*data->fini_func)(&data->info); + } + + status = dat_os_library_unload (data->lib_handle); + if ( status == DAT_SUCCESS ) + { + data->ref_count--; + } + } + else + { + data->ref_count--; + } + } + + dat_os_unlock (&g_sr_lock); + + return status; +} diff --git a/branches/Ndi/ulp/dapl/dat/common/dat_sr.h b/branches/Ndi/ulp/dapl/dat/common/dat_sr.h new file mode 100644 index 00000000..5551ee20 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/common/dat_sr.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_sr.h + * + * PURPOSE: static registry (SR) inteface declarations + * + **********************************************************************/ + +#ifndef _DAT_SR_H_ +#define _DAT_SR_H_ + + +#include +#include + +#include "dat_osd.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +/********************************************************************* + * * + * Strucutres * + * * + *********************************************************************/ + +typedef struct +{ + DAT_PROVIDER_INFO info; + char * lib_path; + char * ia_params; + DAT_OS_LIBRARY_HANDLE lib_handle; + DAT_PROVIDER_INIT_FUNC init_func; + DAT_PROVIDER_FINI_FUNC fini_func; + DAT_COUNT ref_count; +} DAT_SR_ENTRY; + + +/********************************************************************* + * * + * Function Declarations * + * * + *********************************************************************/ + +extern DAT_RETURN +dat_sr_init( void ); + +extern DAT_RETURN +dat_sr_fini( void ); + +extern DAT_RETURN +dat_sr_insert ( + IN const DAT_PROVIDER_INFO *info, + IN DAT_SR_ENTRY *entry ); + +extern DAT_RETURN +dat_sr_size ( + OUT DAT_COUNT *size); + +extern DAT_RETURN +dat_sr_list ( + IN DAT_COUNT max_to_return, + OUT DAT_COUNT *entries_returned, + OUT DAT_PROVIDER_INFO *(dat_provider_list[]) ); + +extern DAT_RETURN +dat_sr_provider_open ( + IN const DAT_PROVIDER_INFO *info ); + +extern DAT_RETURN +dat_sr_provider_close ( + IN const DAT_PROVIDER_INFO *info ); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/branches/Ndi/ulp/dapl/dat/common/dat_strerror.c b/branches/Ndi/ulp/dapl/dat/common/dat_strerror.c new file mode 100644 index 00000000..6ee77fe4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/common/dat_strerror.c @@ -0,0 +1,600 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_strerror.c + * + * PURPOSE: Convert DAT_RETURN values to humman readable string + * + * $Id$ + **********************************************************************/ + +#include + + +/********************************************************************* + * * + * Internal Function Declarations * + * * + *********************************************************************/ + +DAT_RETURN +dat_strerror_major ( + IN DAT_RETURN value, + OUT const char **message ); + +DAT_RETURN +dat_strerror_minor ( + IN DAT_RETURN value, + OUT const char **message ); + + +/********************************************************************* + * * + * Internal Function Definitions * + * * + *********************************************************************/ + +DAT_RETURN +dat_strerror_major ( + IN DAT_RETURN value, + OUT const char **message ) +{ + switch ( DAT_GET_TYPE(value) ) + { + case DAT_SUCCESS: + { + *message = "DAT_SUCCESS"; + return DAT_SUCCESS; + } + case DAT_ABORT: + { + *message = "DAT_ABORT"; + return DAT_SUCCESS; + } + case DAT_CONN_QUAL_IN_USE: + { + *message = "DAT_CONN_QUAL_IN_USE"; + return DAT_SUCCESS; + } + case DAT_INSUFFICIENT_RESOURCES: + { + *message = "DAT_INSUFFICIENT_RESOURCES"; + return DAT_SUCCESS; + } + case DAT_INTERNAL_ERROR: + { + *message = "DAT_INTERNAL_ERROR"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE: + { + *message = "DAT_INVALID_HANDLE"; + return DAT_SUCCESS; + } + case DAT_INVALID_PARAMETER: + { + *message = "DAT_INVALID_PARAMETER"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE: + { + *message = "DAT_INVALID_STATE"; + return DAT_SUCCESS; + } + case DAT_LENGTH_ERROR: + { + *message = "DAT_LENGTH_ERROR"; + return DAT_SUCCESS; + } + case DAT_MODEL_NOT_SUPPORTED: + { + *message = "DAT_MODEL_NOT_SUPPORTED"; + return DAT_SUCCESS; + } + case DAT_PROVIDER_NOT_FOUND: + { + *message = "DAT_PROVIDER_NOT_FOUND"; + return DAT_SUCCESS; + } + case DAT_PRIVILEGES_VIOLATION: + { + *message = "DAT_PRIVILEGES_VIOLATION"; + return DAT_SUCCESS; + } + case DAT_PROTECTION_VIOLATION: + { + *message = "DAT_PROTECTION_VIOLATION"; + return DAT_SUCCESS; + } + case DAT_QUEUE_EMPTY: + { + *message = "DAT_QUEUE_EMPTY"; + return DAT_SUCCESS; + } + case DAT_QUEUE_FULL: + { + *message = "DAT_QUEUE_FULL"; + return DAT_SUCCESS; + } + case DAT_TIMEOUT_EXPIRED: + { + *message = "DAT_TIMEOUT_EXPIRED"; + return DAT_SUCCESS; + } + case DAT_PROVIDER_ALREADY_REGISTERED: + { + *message = "DAT_PROVIDER_ALREADY_REGISTERED"; + return DAT_SUCCESS; + } + case DAT_PROVIDER_IN_USE: + { + *message = "DAT_PROVIDER_IN_USE"; + return DAT_SUCCESS; + } + case DAT_INVALID_ADDRESS: + { + *message = "DAT_INVALID_ADDRESS"; + return DAT_SUCCESS; + } + case DAT_INTERRUPTED_CALL: + { + *message = "DAT_INTERRUPTED_CALL"; + return DAT_SUCCESS; + } + case DAT_NOT_IMPLEMENTED: + { + *message = "DAT_NOT_IMPLEMENTED"; + return DAT_SUCCESS; + } + default: + { + return DAT_INVALID_PARAMETER; + } + } +} + + +DAT_RETURN +dat_strerror_minor ( + IN DAT_RETURN value, + OUT const char **message ) +{ + switch ( DAT_GET_SUBTYPE(value) ) + { + + case DAT_NO_SUBTYPE: /* NO subtype */ + { + *message = ""; + return DAT_SUCCESS; + } + case DAT_SUB_INTERRUPTED: + { + *message = "DAT_SUB_INTERRUPTED"; + return DAT_SUCCESS; + } + case DAT_RESOURCE_MEMORY: + { + *message = "DAT_RESOURCE_MEMORY"; + return DAT_SUCCESS; + } + case DAT_RESOURCE_DEVICE: + { + *message = "DAT_RESOURCE_DEVICE"; + return DAT_SUCCESS; + } + case DAT_RESOURCE_TEP: + { + *message = "DAT_RESOURCE_TEP"; + return DAT_SUCCESS; + } + case DAT_RESOURCE_TEVD: + { + *message = "DAT_RESOURCE_TEVD"; + return DAT_SUCCESS; + } + case DAT_RESOURCE_PROTECTION_DOMAIN: + { + *message = "DAT_RESOURCE_PROTECTION_DOMAIN"; + return DAT_SUCCESS; + } + case DAT_RESOURCE_MEMORY_REGION: + { + *message = "DAT_RESOURCE_MEMORY_REGION"; + return DAT_SUCCESS; + } + case DAT_RESOURCE_ERROR_HANDLER: + { + *message = "DAT_RESOURCE_ERROR_HANDLER"; + return DAT_SUCCESS; + } + case DAT_RESOURCE_CREDITS: + { + *message = "DAT_RESOURCE_CREDITS"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_IA: + { + *message = "DAT_INVALID_HANDLE_IA"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_EP: + { + *message = "DAT_INVALID_HANDLE_EP"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_LMR: + { + *message = "DAT_INVALID_HANDLE_LMR"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_RMR: + { + *message = "DAT_INVALID_HANDLE_RMR"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_PZ: + { + *message = "DAT_INVALID_HANDLE_PZ"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_PSP: + { + *message = "DAT_INVALID_HANDLE_PSP"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_RSP: + { + *message = "DAT_INVALID_HANDLE_RSP"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_CR: + { + *message = "DAT_INVALID_HANDLE_CR"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_CNO: + { + *message = "DAT_INVALID_HANDLE_CNO"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_EVD_CR: + { + *message = "DAT_INVALID_HANDLE_EVD_CR"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_EVD_REQUEST: + { + *message = "DAT_INVALID_HANDLE_EVD_REQUEST"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_EVD_RECV: + { + *message = "DAT_INVALID_HANDLE_EVD_RECV"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_EVD_CONN: + { + *message = "DAT_INVALID_HANDLE_EVD_CONN"; + return DAT_SUCCESS; + } + case DAT_INVALID_HANDLE_EVD_ASYNC: + { + *message = "DAT_INVALID_HANDLE_EVD_ASYNC"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG1: + { + *message = "DAT_INVALID_ARG1"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG2: + { + *message = "DAT_INVALID_ARG2"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG3: + { + *message = "DAT_INVALID_ARG3"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG4: + { + *message = "DAT_INVALID_ARG4"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG5: + { + *message = "DAT_INVALID_ARG5"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG6: + { + *message = "DAT_INVALID_ARG6"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG7: + { + *message = "DAT_INVALID_ARG7"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG8: + { + *message = "DAT_INVALID_ARG8"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG9: + { + *message = "DAT_INVALID_ARG9"; + return DAT_SUCCESS; + } + case DAT_INVALID_ARG10: + { + *message = "DAT_INVALID_ARG10"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_UNCONNECTED: + { + *message = "DAT_INVALID_STATE_EP_UNCONNECTED"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_ACTCONNPENDING: + { + *message = "DAT_INVALID_STATE_EP_ACTCONNPENDING"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_PASSCONNPENDING: + { + *message = "DAT_INVALID_STATE_EP_PASSCONNPENDING"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_TENTCONNPENDING: + { + *message = "DAT_INVALID_STATE_EP_TENTCONNPENDING"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_CONNECTED: + { + *message = "DAT_INVALID_STATE_EP_CONNECTED"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_DISCONNECTED: + { + *message = "DAT_INVALID_STATE_EP_DISCONNECTED"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_RESERVED: + { + *message = "DAT_INVALID_STATE_EP_RESERVED"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_COMPLPENDING: + { + *message = "DAT_INVALID_STATE_EP_COMPLPENDING"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_DISCPENDING: + { + *message = "DAT_INVALID_STATE_EP_DISCPENDING"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_PROVIDERCONTROL: + { + *message = "DAT_INVALID_STATE_EP_PROVIDERCONTROL"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EP_NOTREADY: + { + *message = "DAT_INVALID_STATE_EP_NOTREADY"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_CNO_IN_USE: + { + *message = "DAT_INVALID_STATE_CNO_IN_USE"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_CNO_DEAD: + { + *message = "DAT_INVALID_STATE_CNO_DEAD"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_OPEN: + { + *message = "DAT_INVALID_STATE_EVD_OPEN"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_ENABLED: + { + *message = "DAT_INVALID_STATE_EVD_ENABLED"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_DISABLED: + { + *message = "DAT_INVALID_STATE_EVD_DISABLED"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_WAITABLE: + { + *message = "DAT_INVALID_STATE_EVD_WAITABLE"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_UNWAITABLE: + { + *message = "DAT_INVALID_STATE_EVD_UNWAITABLE"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_IN_USE: + { + *message = "DAT_INVALID_STATE_EVD_IN_USE"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_CONFIG_NOTIFY: + { + *message = "DAT_INVALID_STATE_EVD_CONFIG_NOTIFY"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_CONFIG_SOLICITED: + { + *message = "DAT_INVALID_STATE_EVD_CONFIG_SOLICITED"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_CONFIG_THRESHOLD: + { + *message = "DAT_INVALID_STATE_EVD_CONFIG_THRESHOLD"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_WAITER: + { + *message = "DAT_INVALID_STATE_EVD_WAITER"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_EVD_ASYNC: + { + *message = "DAT_INVALID_STATE_EVD_ASYNC"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_IA_IN_USE: + { + *message = "DAT_INVALID_STATE_IA_IN_USE"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_LMR_IN_USE: + { + *message = "DAT_INVALID_STATE_LMR_IN_USE"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_LMR_FREE: + { + *message = "DAT_INVALID_STATE_LMR_FREE"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_PZ_IN_USE: + { + *message = "DAT_INVALID_STATE_PZ_IN_USE"; + return DAT_SUCCESS; + } + case DAT_INVALID_STATE_PZ_FREE: + { + *message = "DAT_INVALID_STATE_PZ_FREE"; + return DAT_SUCCESS; + } + case DAT_PRIVILEGES_READ: + { + *message = "DAT_PRIVILEGES_READ"; + return DAT_SUCCESS; + } + case DAT_PRIVILEGES_WRITE: + { + *message = "DAT_PRIVILEGES_WRITE"; + return DAT_SUCCESS; + } + case DAT_PRIVILEGES_RDMA_READ: + { + *message = "DAT_PRIVILEGES_RDMA_READ"; + return DAT_SUCCESS; + } + case DAT_PRIVILEGES_RDMA_WRITE: + { + *message = "DAT_PRIVILEGES_RDMA_WRITE"; + return DAT_SUCCESS; + } + case DAT_PROTECTION_READ: + { + *message = "DAT_PROTECTION_READ"; + return DAT_SUCCESS; + } + case DAT_PROTECTION_WRITE: + { + *message = "DAT_PROTECTION_WRITE"; + return DAT_SUCCESS; + } + case DAT_PROTECTION_RDMA_READ: + { + *message = "DAT_PROTECTION_RDMA_READ"; + return DAT_SUCCESS; + } + case DAT_PROTECTION_RDMA_WRITE: + { + *message = "DAT_PROTECTION_RDMA_WRITE"; + return DAT_SUCCESS; + } + case DAT_INVALID_ADDRESS_UNSUPPORTED: + { + *message = "DAT_INVALID_ADDRESS_UNSUPPORTED"; + return DAT_SUCCESS; + } + case DAT_INVALID_ADDRESS_UNREACHABLE: + { + *message = "DAT_INVALID_ADDRESS_UNREACHABLE"; + return DAT_SUCCESS; + } + case DAT_INVALID_ADDRESS_MALFORMED: + { + *message = "DAT_INVALID_ADDRESS_MALFORMED"; + return DAT_SUCCESS; + } + default: + { + return DAT_INVALID_PARAMETER; + } + } +} + + +/********************************************************************* + * * + * External Function Definitions * + * * + *********************************************************************/ + +DAT_RETURN DAT_API +dat_strerror ( + IN DAT_RETURN value, + OUT const char **major_message, + OUT const char **minor_message ) +{ + /* + * The DAT specification contains a note to implementers + * suggesting that the consumer's DAT_RETURN value be used + * as an index into a table of text strings. However, + * the DAT_RETURN values are not consecutive. Therefore this + * implementation does not follow the suggested implementation. + */ + + if ( DAT_SUCCESS != dat_strerror_major(value, major_message) ) + { + return DAT_INVALID_PARAMETER; + } + else if ( DAT_SUCCESS != dat_strerror_minor(value, minor_message) ) + { + return DAT_INVALID_PARAMETER; + } + else + { + return DAT_SUCCESS; + } +} diff --git a/branches/Ndi/ulp/dapl/dat/dirs b/branches/Ndi/ulp/dapl/dat/dirs new file mode 100644 index 00000000..97d2b67c --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/dirs @@ -0,0 +1 @@ +DIRS=udat diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/dat.h b/branches/Ndi/ulp/dapl/dat/include/dat/dat.h new file mode 100644 index 00000000..10854020 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/dat.h @@ -0,0 +1,958 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat.h + * + * PURPOSE: defines the common DAT API for uDAPL and kDAPL. + * + * Description: Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + **********************************************************************/ + +#ifndef _DAT_H_ +#define _DAT_H_ + +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + +/* Generic DAT types */ + +typedef char * DAT_NAME_PTR; /* Format for ia_name and attributes */ +#define DAT_NAME_MAX_LENGTH 256 + +/* + * Used for provider, vendor, transport, hardware specific attributes + * definitions. + */ + +typedef struct dat_named_attr + { + const char * name; /* Name of attribute */ + const char * value; /* Value of attribute */ + } DAT_NAMED_ATTR; + +typedef enum dat_boolean + { + DAT_FALSE = 0, + DAT_TRUE = 1 + } DAT_BOOLEAN; + +typedef union dat_context + { + DAT_PVOID as_ptr; + DAT_UINT64 as_64; + DAT_UVERYLONG as_index; + } DAT_CONTEXT; + +typedef DAT_CONTEXT DAT_DTO_COOKIE; +typedef DAT_CONTEXT DAT_RMR_COOKIE; + +typedef enum dat_completion_flags + { + /* Completes with notification */ + DAT_COMPLETION_DEFAULT_FLAG = 0x00, + + /* Completions suppressed if successful */ + DAT_COMPLETION_SUPPRESS_FLAG = 0x01, + + /* Sender controlled notification for recv completion */ + DAT_COMPLETION_SOLICITED_WAIT_FLAG = 0x02, + + /* Completions with unsignaled notifications */ + DAT_COMPLETION_UNSIGNALLED_FLAG = 0x04, + + /* Do not start processing until all previous RDMA reads complete. */ + DAT_COMPLETION_BARRIER_FENCE_FLAG = 0x08, + + /* Only valid for uDAPL as EP attribute for Recv Completion flags. + * Waiter unblocking is controlled by Threshold value of dat_evd_wait. + * UNSIGNALLED for RECV not allowed when EP has this attribute */ + DAT_COMPLETION_EVD_THRESHOLD_FLAG = 0x10 + + } DAT_COMPLETION_FLAGS; + + +typedef DAT_UINT32 DAT_TIMEOUT; /* microseconds */ + +/* timeout = infinity */ +#define DAT_TIMEOUT_INFINITE ((DAT_TIMEOUT) ~0) + +/* dat handles */ +typedef DAT_PVOID DAT_HANDLE; +typedef DAT_HANDLE DAT_CR_HANDLE; +typedef DAT_HANDLE DAT_EP_HANDLE; +typedef DAT_HANDLE DAT_EVD_HANDLE; +typedef DAT_HANDLE DAT_IA_HANDLE; +typedef DAT_HANDLE DAT_LMR_HANDLE; +typedef DAT_HANDLE DAT_PSP_HANDLE; +typedef DAT_HANDLE DAT_PZ_HANDLE; +typedef DAT_HANDLE DAT_RMR_HANDLE; +typedef DAT_HANDLE DAT_RSP_HANDLE; + + +/* dat NULL handles */ +#define DAT_HANDLE_NULL ((DAT_HANDLE)NULL) + +typedef DAT_SOCK_ADDR * DAT_IA_ADDRESS_PTR; + +typedef DAT_UINT64 DAT_CONN_QUAL; +typedef DAT_UINT64 DAT_PORT_QUAL; + +/* QOS definitions */ +typedef enum dat_qos + { + DAT_QOS_BEST_EFFORT = 0x00, + DAT_QOS_HIGH_THROUGHPUT = 0x01, + DAT_QOS_LOW_LATENCY = 0x02, + /* not low latency, nor high throughput */ + DAT_QOS_ECONOMY = 0x04, + /* both low latency and high throughput */ + DAT_QOS_PREMIUM = 0x08 + } DAT_QOS; + +/* + * FLAGS + */ + +typedef enum dat_connect_flags + { + DAT_CONNECT_DEFAULT_FLAG = 0x00, + DAT_CONNECT_MULTIPATH_FLAG = 0x01 + } DAT_CONNECT_FLAGS; + +typedef enum dat_close_flags + { + DAT_CLOSE_ABRUPT_FLAG = 0x00, + DAT_CLOSE_GRACEFUL_FLAG = 0x01 + } DAT_CLOSE_FLAGS; + +#define DAT_CLOSE_DEFAULT DAT_CLOSE_ABRUPT_FLAG + +typedef enum dat_evd_flags + { + DAT_EVD_SOFTWARE_FLAG = 0x01, + DAT_EVD_CR_FLAG = 0x10, + DAT_EVD_DTO_FLAG = 0x20, + DAT_EVD_CONNECTION_FLAG = 0x40, + DAT_EVD_RMR_BIND_FLAG = 0x80, + DAT_EVD_ASYNC_FLAG = 0x100, + /* DAT events only, no software events */ + DAT_EVD_DEFAULT_FLAG = 0x1F0 + } DAT_EVD_FLAGS; + +typedef enum dat_psp_flags + { + DAT_PSP_CONSUMER_FLAG = 0x00, /* Consumer creates an endpoint */ + DAT_PSP_PROVIDER_FLAG = 0x01 /* Provider creates an endpoint */ + + } DAT_PSP_FLAGS; + +/* + * Memory Buffers + * + * Both LMR and RMR triplets specify 64-bit addresses in the local host's byte + * order, even when that exceeds the size of a DAT_PVOID for the host + * architecture. + */ + +typedef DAT_UINT32 DAT_LMR_CONTEXT; +typedef DAT_UINT32 DAT_RMR_CONTEXT; + +typedef DAT_UINT64 DAT_VLEN; +typedef DAT_UINT64 DAT_VADDR; + +/* It is legal for Consumer to specify zero for segment_length + * of the dat_lmr_triplet. When 0 is specified for the + * segment_length then the other two elements of the + * dat_lmr_triplet are irrelevant and can be invalid. */ + +typedef struct dat_lmr_triplet + { + DAT_LMR_CONTEXT lmr_context; + DAT_UINT32 pad; + DAT_VADDR virtual_address; + DAT_VLEN segment_length; + } DAT_LMR_TRIPLET; + +typedef struct dat_rmr_triplet + { + DAT_RMR_CONTEXT rmr_context; + DAT_UINT32 pad; + DAT_VADDR target_address; + DAT_VLEN segment_length; + } DAT_RMR_TRIPLET; + + +/* Memory privileges */ + +typedef enum dat_mem_priv_flags + { + DAT_MEM_PRIV_NONE_FLAG = 0x00, + DAT_MEM_PRIV_LOCAL_READ_FLAG = 0x01, + DAT_MEM_PRIV_REMOTE_READ_FLAG = 0x02, + DAT_MEM_PRIV_LOCAL_WRITE_FLAG = 0x10, + DAT_MEM_PRIV_REMOTE_WRITE_FLAG = 0x20, + DAT_MEM_PRIV_ALL_FLAG = 0x33 + } DAT_MEM_PRIV_FLAGS; + +/* For backwards compatibility with DAT-1.0 memory privileges values are supported */ +#define DAT_MEM_PRIV_READ_FLAG (DAT_MEM_PRIV_LOCAL_READ_FLAG | DAT_MEM_PRIV_REMOTE_READ_FLAG) +#define DAT_MEM_PRIV_WRITE_FLAG (DAT_MEM_PRIV_LOCAL_WRITE_FLAG | DAT_MEM_PRIV_REMOTE_WRITE_FLAG) + +/* LMR Arguments Mask */ + +typedef enum dat_lmr_param_mask + { + DAT_LMR_FIELD_IA_HANDLE = 0x001, + DAT_LMR_FIELD_MEM_TYPE = 0x002, + DAT_LMR_FIELD_REGION_DESC = 0x004, + DAT_LMR_FIELD_LENGTH = 0x008, + DAT_LMR_FIELD_PZ_HANDLE = 0x010, + DAT_LMR_FIELD_MEM_PRIV = 0x020, + DAT_LMR_FIELD_LMR_CONTEXT = 0x040, + DAT_LMR_FIELD_RMR_CONTEXT = 0x080, + DAT_LMR_FIELD_REGISTERED_SIZE = 0x100, + DAT_LMR_FIELD_REGISTERED_ADDRESS = 0x200, + + DAT_LMR_FIELD_ALL = 0x3FF + } DAT_LMR_PARAM_MASK; + +/* RMR Argumments & RMR Arguments Mask */ + +typedef struct dat_rmr_param + { + DAT_IA_HANDLE ia_handle; + DAT_PZ_HANDLE pz_handle; + DAT_LMR_TRIPLET lmr_triplet; + DAT_MEM_PRIV_FLAGS mem_priv; + DAT_RMR_CONTEXT rmr_context; + } DAT_RMR_PARAM; + +typedef enum dat_rmr_param_mask + { + DAT_RMR_FIELD_IA_HANDLE = 0x01, + DAT_RMR_FIELD_PZ_HANDLE = 0x02, + DAT_RMR_FIELD_LMR_TRIPLET = 0x04, + DAT_RMR_FIELD_MEM_PRIV = 0x08, + DAT_RMR_FIELD_RMR_CONTEXT = 0x10, + + DAT_RMR_FIELD_ALL = 0x1F + } DAT_RMR_PARAM_MASK; + +/* Provider attributes */ + +typedef enum dat_iov_ownership + { + /* Not modification by provider, consumer can use anytime. */ + DAT_IOV_CONSUMER = 0x0, + + /* Provider does not modify returned IOV DTO on completion. */ + DAT_IOV_PROVIDER_NOMOD = 0x1, + + /* Provider may modify IOV DTO on completion, can't trust it. */ + DAT_IOV_PROVIDER_MOD = 0x2 + + } DAT_IOV_OWNERSHIP; + +typedef enum dat_ep_creator_for_psp + { + DAT_PSP_CREATES_EP_NEVER, /* provider never creates endpoint */ + DAT_PSP_CREATES_EP_IFASKED, /* provider creates endpoint if asked */ + DAT_PSP_CREATES_EP_ALWAYS /* provider always creates endpoint */ + } DAT_EP_CREATOR_FOR_PSP; + +/* Endpoint attributes */ + +typedef enum dat_service_type + { + DAT_SERVICE_TYPE_RC /* reliable connections */ + } DAT_SERVICE_TYPE; + +typedef struct dat_ep_attr { + DAT_SERVICE_TYPE service_type; + DAT_VLEN max_mtu_size; + DAT_VLEN max_rdma_size; + DAT_QOS qos; + DAT_COMPLETION_FLAGS recv_completion_flags; + DAT_COMPLETION_FLAGS request_completion_flags; + DAT_COUNT max_recv_dtos; + DAT_COUNT max_request_dtos; + DAT_COUNT max_recv_iov; + DAT_COUNT max_request_iov; + DAT_COUNT max_rdma_read_in; + DAT_COUNT max_rdma_read_out; + DAT_COUNT ep_transport_specific_count; + DAT_NAMED_ATTR * ep_transport_specific; + DAT_COUNT ep_provider_specific_count; + DAT_NAMED_ATTR * ep_provider_specific; + } DAT_EP_ATTR; + +/* Endpoint Parameters */ + +/* For backwards compatability */ +#define DAT_EP_STATE_ERROR DAT_EP_STATE_DISCONNECTED + +typedef enum dat_ep_state + { + DAT_EP_STATE_UNCONNECTED, /* quiescent state */ + DAT_EP_STATE_RESERVED, + DAT_EP_STATE_PASSIVE_CONNECTION_PENDING, + DAT_EP_STATE_ACTIVE_CONNECTION_PENDING, + DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING, + DAT_EP_STATE_CONNECTED, + DAT_EP_STATE_DISCONNECT_PENDING, + DAT_EP_STATE_DISCONNECTED, + DAT_EP_STATE_COMPLETION_PENDING + } DAT_EP_STATE; + +typedef struct dat_ep_param + { + DAT_IA_HANDLE ia_handle; + DAT_EP_STATE ep_state; + DAT_IA_ADDRESS_PTR local_ia_address_ptr; + DAT_PORT_QUAL local_port_qual; + DAT_IA_ADDRESS_PTR remote_ia_address_ptr; + DAT_PORT_QUAL remote_port_qual; + DAT_PZ_HANDLE pz_handle; + DAT_EVD_HANDLE recv_evd_handle; + DAT_EVD_HANDLE request_evd_handle; + DAT_EVD_HANDLE connect_evd_handle; + DAT_EP_ATTR ep_attr; + } DAT_EP_PARAM; + +typedef enum dat_ep_param_mask + { + DAT_EP_FIELD_IA_HANDLE = 0x00000001, + DAT_EP_FIELD_EP_STATE = 0x00000002, + DAT_EP_FIELD_LOCAL_IA_ADDRESS_PTR = 0x00000004, + DAT_EP_FIELD_LOCAL_PORT_QUAL = 0x00000008, + DAT_EP_FIELD_REMOTE_IA_ADDRESS_PTR = 0x00000010, + DAT_EP_FIELD_REMOTE_PORT_QUAL = 0x00000020, + DAT_EP_FIELD_PZ_HANDLE = 0x00000040, + DAT_EP_FIELD_RECV_EVD_HANDLE = 0x00000080, + DAT_EP_FIELD_REQUEST_EVD_HANDLE = 0x00000100, + DAT_EP_FIELD_CONNECT_EVD_HANDLE = 0x00000200, + + /* Remainder of values from EP_ATTR, 0x00001000 and up */ + + DAT_EP_FIELD_EP_ATTR_SERVICE_TYPE = 0x00001000, + DAT_EP_FIELD_EP_ATTR_MAX_MESSAGE_SIZE = 0x00002000, + DAT_EP_FIELD_EP_ATTR_MAX_RDMA_SIZE = 0x00004000, + DAT_EP_FIELD_EP_ATTR_QOS = 0x00008000, + + DAT_EP_FIELD_EP_ATTR_RECV_COMPLETION_FLAGS = 0x00010000, + DAT_EP_FIELD_EP_ATTR_REQUEST_COMPLETION_FLAGS = 0x00020000, + DAT_EP_FIELD_EP_ATTR_MAX_RECV_DTOS = 0x00040000, + DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_DTOS = 0x00080000, + + DAT_EP_FIELD_EP_ATTR_MAX_RECV_IOV = 0x00100000, + DAT_EP_FIELD_EP_ATTR_MAX_REQUEST_IOV = 0x00200000, + + DAT_EP_FIELD_EP_ATTR_MAX_RDMA_READ_IN = 0x00400000, + DAT_EP_FIELD_EP_ATTR_MAX_RDMA_READ_OUT = 0x00800000, + + DAT_EP_FIELD_EP_ATTR_NUM_TRANSPORT_ATTR = 0x01000000, + DAT_EP_FIELD_EP_ATTR_TRANSPORT_SPECIFIC_ATTR = 0x02000000, + + DAT_EP_FIELD_EP_ATTR_NUM_PROVIDER_ATTR = 0x04000000, + DAT_EP_FIELD_EP_ATTR_PROVIDER_SPECIFIC_ATTR = 0x08000000, + + DAT_EP_FIELD_EP_ATTR_ALL = 0x0FFFF000, + DAT_EP_FIELD_ALL = 0x0FFFF5FF + } DAT_EP_PARAM_MASK; + +/* PZ Parameters */ + +typedef struct dat_pz_param + { + DAT_IA_HANDLE ia_handle; + } DAT_PZ_PARAM; + +typedef enum dat_pz_param_mask + { + DAT_PZ_FIELD_IA_HANDLE = 0x01, + + DAT_PZ_FIELD_ALL = 0x01 + } DAT_PZ_PARAM_MASK; + +/* PSP Parameters */ + +typedef struct dat_psp_param + { + DAT_IA_HANDLE ia_handle; + DAT_CONN_QUAL conn_qual; + DAT_EVD_HANDLE evd_handle; + DAT_PSP_FLAGS psp_flags; + } DAT_PSP_PARAM; + +typedef enum dat_psp_param_mask + { + DAT_PSP_FIELD_IA_HANDLE = 0x01, + DAT_PSP_FIELD_CONN_QUAL = 0x02, + DAT_PSP_FIELD_EVD_HANDLE = 0x04, + DAT_PSP_FIELD_PSP_FLAGS = 0x08, + + DAT_PSP_FIELD_ALL = 0x0F + } DAT_PSP_PARAM_MASK; + +/* RSP Parameters */ + +typedef struct dat_rsp_param + { + DAT_IA_HANDLE ia_handle; + DAT_CONN_QUAL conn_qual; + DAT_EVD_HANDLE evd_handle; + DAT_EP_HANDLE ep_handle; + } DAT_RSP_PARAM; + +typedef enum dat_rsp_param_mask + { + DAT_RSP_FIELD_IA_HANDLE = 0x01, + DAT_RSP_FIELD_CONN_QUAL = 0x02, + DAT_RSP_FIELD_EVD_HANDLE = 0x04, + DAT_RSP_FIELD_EP_HANDLE = 0x08, + + DAT_RSP_FIELD_ALL = 0x0F + } DAT_RSP_PARAM_MASK; + +/* Connection Request Parameters. + * + * The Connection Request does not provide Remote Endpoint attributes. + * If a local Consumer needs this information, the remote Consumer should + * encode it into Private Data. + */ + +typedef struct dat_cr_param + { + /* Remote IA whose Endpoint requested the connection. */ + DAT_IA_ADDRESS_PTR remote_ia_address_ptr; + + /* Port qualifier of the remote Endpoint of the requested connection. */ + DAT_PORT_QUAL remote_port_qual; + + /* Size of the Private Data. */ + DAT_COUNT private_data_size; + + /* Pointer to the Private Data passed by remote side in the Connection + * Request. */ + DAT_PVOID private_data; + + /* The local Endpoint provided by the Service Point for the requested + * connection. It is the only Endpoint that can accept a Connection + * Request on this Service Point. The value DAT_HANDLE_NULL represents + * that there is no associated local Endpoint for the requested + * connection. */ + DAT_EP_HANDLE local_ep_handle; + + } DAT_CR_PARAM; + +typedef enum dat_cr_param_mask + { + DAT_CR_FIELD_REMOTE_IA_ADDRESS_PTR = 0x01, + DAT_CR_FIELD_REMOTE_PORT_QUAL = 0x02, + DAT_CR_FIELD_PRIVATE_DATA_SIZE = 0x04, + DAT_CR_FIELD_PRIVATE_DATA = 0x08, + DAT_CR_FIELD_LOCAL_EP_HANDLE = 0x10, + + DAT_CR_FIELD_ALL = 0x1F + } DAT_CR_PARAM_MASK; + +/************************** Events ******************************************/ + +/* Completion status flags */ + + /* dto completion status */ + + /* For backwards compatability */ +#define DAT_DTO_LENGTH_ERROR DAT_DTO_ERR_LOCAL_LENGTH +#define DAT_DTO_FAILURE DAT_DTO_ERR_FLUSHED + +typedef enum dat_dto_completion_status + { + DAT_DTO_SUCCESS = 0, + DAT_DTO_ERR_FLUSHED = 1, + DAT_DTO_ERR_LOCAL_LENGTH = 2, + DAT_DTO_ERR_LOCAL_EP = 3, + DAT_DTO_ERR_LOCAL_PROTECTION = 4, + DAT_DTO_ERR_BAD_RESPONSE = 5, + DAT_DTO_ERR_REMOTE_ACCESS = 6, + DAT_DTO_ERR_REMOTE_RESPONDER = 7, + DAT_DTO_ERR_TRANSPORT = 8, + DAT_DTO_ERR_RECEIVER_NOT_READY = 9, + DAT_DTO_ERR_PARTIAL_PACKET = 10, + DAT_RMR_OPERATION_FAILED = 11 + } DAT_DTO_COMPLETION_STATUS; + + /* rmr completion status */ + + /* For backwards compatability */ +#define DAT_RMR_BIND_SUCCESS DAT_DTO_SUCCESS +#define DAT_RMR_BIND_FAILURE DAT_DTO_ERR_FLUSHED + +#define DAT_RMR_BIND_COMPLETION_STATUS DAT_DTO_COMPLETION_STATUS + +/* Completion group structs (six total) */ + + /* DTO completion event data */ +typedef struct dat_dto_completion_event_data + { + DAT_EP_HANDLE ep_handle; + DAT_DTO_COOKIE user_cookie; + DAT_DTO_COMPLETION_STATUS status; + DAT_VLEN transfered_length; + } DAT_DTO_COMPLETION_EVENT_DATA; + + /* rmr bind completion event data */ +typedef struct dat_rmr_bind_completion_event_data + { + DAT_RMR_HANDLE rmr_handle; + DAT_RMR_COOKIE user_cookie; + DAT_RMR_BIND_COMPLETION_STATUS status; + } DAT_RMR_BIND_COMPLETION_EVENT_DATA; + +typedef union dat_sp_handle +{ + DAT_RSP_HANDLE rsp_handle; + DAT_PSP_HANDLE psp_handle; +} DAT_SP_HANDLE; + + /* Connection Request Arrival event data */ +typedef struct dat_cr_arrival_event_data + { + /* Handle to the Service Point that received the Connection Request from + * the remote side. If the Service Point was Reserved, sp is DAT_HANDLE_NULL + * because the reserved Service Point is automatically destroyed upon + * generating this event. Can be PSP or RSP. */ + DAT_SP_HANDLE sp_handle; + + /* Address of the IA on which the Connection Request arrived. */ + DAT_IA_ADDRESS_PTR local_ia_address_ptr; + + /* Connection Qualifier of the IA on which the Service Point received a + * Connection Request. */ + DAT_CONN_QUAL conn_qual; + + /* The Connection Request instance created by a Provider for the arrived + * Connection Request. Consumers can find out private_data passed by a remote + * Consumer from cr_handle. It is up to a Consumer to dat_cr_accept or + * dat_cr_reject of the Connection Request. */ + DAT_CR_HANDLE cr_handle; + + } DAT_CR_ARRIVAL_EVENT_DATA; + + + /* Connection event data */ +typedef struct dat_connection_event_data + { + DAT_EP_HANDLE ep_handle; + DAT_COUNT private_data_size; + DAT_PVOID private_data; + } DAT_CONNECTION_EVENT_DATA; + + /* Async Error event data */ +typedef struct dat_asynch_error_event_data + { + DAT_IA_HANDLE ia_handle; + } DAT_ASYNCH_ERROR_EVENT_DATA; + + /* SE completion event data */ +typedef struct dat_software_event_data + { + DAT_PVOID pointer; + } DAT_SOFTWARE_EVENT_DATA; + +typedef enum dat_event_number + { + DAT_DTO_COMPLETION_EVENT = 0x00001, + + DAT_RMR_BIND_COMPLETION_EVENT = 0x01001, + + DAT_CONNECTION_REQUEST_EVENT = 0x02001, + + DAT_CONNECTION_EVENT_ESTABLISHED = 0x04001, + DAT_CONNECTION_EVENT_PEER_REJECTED = 0x04002, + DAT_CONNECTION_EVENT_NON_PEER_REJECTED = 0x04003, + DAT_CONNECTION_EVENT_ACCEPT_COMPLETION_ERROR = 0x04004, + DAT_CONNECTION_EVENT_DISCONNECTED = 0x04005, + DAT_CONNECTION_EVENT_BROKEN = 0x04006, + DAT_CONNECTION_EVENT_TIMED_OUT = 0x04007, + DAT_CONNECTION_EVENT_UNREACHABLE = 0x04008, + + DAT_ASYNC_ERROR_EVD_OVERFLOW = 0x08001, + DAT_ASYNC_ERROR_IA_CATASTROPHIC = 0x08002, + DAT_ASYNC_ERROR_EP_BROKEN = 0x08003, + DAT_ASYNC_ERROR_TIMED_OUT = 0x08004, + DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR = 0x08005, + + DAT_SOFTWARE_EVENT = 0x10001 + } DAT_EVENT_NUMBER; + +/* Union for event Data */ + +typedef union dat_event_data + { + DAT_DTO_COMPLETION_EVENT_DATA dto_completion_event_data; + DAT_RMR_BIND_COMPLETION_EVENT_DATA rmr_completion_event_data; + DAT_CR_ARRIVAL_EVENT_DATA cr_arrival_event_data; + DAT_CONNECTION_EVENT_DATA connect_event_data; + DAT_ASYNCH_ERROR_EVENT_DATA asynch_error_event_data; + DAT_SOFTWARE_EVENT_DATA software_event_data; + } DAT_EVENT_DATA; + +/* Event struct that holds all event information */ + +typedef struct dat_event + { + DAT_EVENT_NUMBER event_number; + DAT_EVD_HANDLE evd_handle; + DAT_EVENT_DATA event_data; + } DAT_EVENT; + +/* Provider/registration info */ + +typedef struct dat_provider_info { + char ia_name[DAT_NAME_MAX_LENGTH]; + DAT_UINT32 dapl_version_major; + DAT_UINT32 dapl_version_minor; + DAT_BOOLEAN is_thread_safe; + } DAT_PROVIDER_INFO ; + +/**************************************************************************** + * FUNCTION PROTOTYPES + ****************************************************************************/ +/* + * IA functions + * + * Note that there are actual 'dat_ia_open' and 'dat_ia_close' + * functions, it is not just a re-directing #define. That is + * because the functions may have to ensure that the provider + * library is loaded before it can call it, and may choose to + * unload the library after the last close. + */ + +typedef DAT_RETURN (DAT_API *DAT_IA_OPEN_FUNC)( + IN const DAT_NAME_PTR, /* provider */ + IN DAT_COUNT, /* asynch_evd_min_qlen */ + INOUT DAT_EVD_HANDLE *, /* asynch_evd_handle */ + OUT DAT_IA_HANDLE *); /* ia_handle */ + +typedef DAT_RETURN (DAT_API *DAT_IA_OPENV_FUNC)( + IN const DAT_NAME_PTR, /* provider */ + IN DAT_COUNT, /* asynch_evd_min_qlen */ + INOUT DAT_EVD_HANDLE *, /* asynch_evd_handle */ + OUT DAT_IA_HANDLE *, /* ia_handle */ + IN DAT_UINT32, /* dat major version number */ + IN DAT_UINT32, /* dat minor version number */ + IN DAT_BOOLEAN); /* dat thread safety */ + +typedef DAT_RETURN (DAT_API *DAT_IA_CLOSE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_CLOSE_FLAGS ); /* close_flags */ + +/* helper functions */ + +typedef DAT_RETURN (DAT_API *DAT_SET_CONSUMER_CONTEXT_FUNC)( + IN DAT_HANDLE, /* dat handle */ + IN DAT_CONTEXT); /* context */ + +typedef DAT_RETURN (DAT_API *DAT_GET_CONSUMER_CONTEXT_FUNC)( + IN DAT_HANDLE, /* dat handle */ + OUT DAT_CONTEXT * ); /* context */ + +typedef DAT_RETURN (DAT_API *DAT_GET_HANDLE_TYPE_FUNC)( + IN DAT_HANDLE, + OUT DAT_HANDLE_TYPE * ); + +/* CR Functions */ + +typedef DAT_RETURN (DAT_API *DAT_CR_QUERY_FUNC)( + IN DAT_CR_HANDLE, /* cr_handle */ + IN DAT_CR_PARAM_MASK, /* cr_param_mask */ + OUT DAT_CR_PARAM * ); /* cr_param */ + +typedef DAT_RETURN (DAT_API *DAT_CR_ACCEPT_FUNC)( + IN DAT_CR_HANDLE, /* cr_handle */ + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* private_data_size */ + IN const DAT_PVOID ); /* private_data */ + +typedef DAT_RETURN (DAT_API *DAT_CR_REJECT_FUNC)( + IN DAT_CR_HANDLE ); + +/* For DAT-1.1 this function is defined for both uDAPL and kDAPL. + * For DAT-1.0 it was only defined for uDAPL. + */ +typedef DAT_RETURN (DAT_API *DAT_CR_HANDOFF_FUNC)( + IN DAT_CR_HANDLE, /* cr_handle */ + IN DAT_CONN_QUAL); /* handoff */ + +/* EVD Functions */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_RESIZE_FUNC)( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_COUNT ); /* evd_min_qlen */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_POST_SE_FUNC)( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN const DAT_EVENT * ); /* event */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_DEQUEUE_FUNC)( + IN DAT_EVD_HANDLE, /* evd_handle */ + OUT DAT_EVENT * ); /* event */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_FREE_FUNC)( + IN DAT_EVD_HANDLE ); + +/* ep functions */ + +typedef DAT_RETURN (DAT_API *DAT_EP_CREATE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_PZ_HANDLE, /* pz_handle */ + IN DAT_EVD_HANDLE, /* recv_completion_evd_handle */ + IN DAT_EVD_HANDLE, /* request_completion_evd_handle */ + IN DAT_EVD_HANDLE, /* connect_evd_handle */ + IN const DAT_EP_ATTR *, /* ep_attributes */ + OUT DAT_EP_HANDLE * ); /* ep_handle */ + +typedef DAT_RETURN (DAT_API *DAT_EP_QUERY_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_EP_PARAM_MASK, /* ep_param_mask */ + OUT DAT_EP_PARAM * ); /* ep_param */ + +typedef DAT_RETURN (DAT_API *DAT_EP_MODIFY_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_EP_PARAM_MASK, /* ep_param_mask */ + IN const DAT_EP_PARAM * ); /* ep_param */ + +typedef DAT_RETURN (DAT_API *DAT_EP_CONNECT_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_IA_ADDRESS_PTR, /* remote_ia_address */ + IN DAT_CONN_QUAL, /* remote_conn_qual */ + IN DAT_TIMEOUT, /* timeout */ + IN DAT_COUNT, /* private_data_size */ + IN const DAT_PVOID, /* private_data */ + IN DAT_QOS, /* quality_of_service */ + IN DAT_CONNECT_FLAGS ); /* connect_flags */ + +typedef DAT_RETURN (DAT_API *DAT_EP_DUP_CONNECT_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_EP_HANDLE, /* ep_dup_handle */ + IN DAT_TIMEOUT, /* timeout */ + IN DAT_COUNT, /* private_data_size */ + IN const DAT_PVOID, /* private_data */ + IN DAT_QOS); /* quality_of_service */ + +typedef DAT_RETURN (DAT_API *DAT_EP_DISCONNECT_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_CLOSE_FLAGS ); /* close_flags */ + +typedef DAT_RETURN (DAT_API *DAT_EP_POST_SEND_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* num_segments */ + IN DAT_LMR_TRIPLET *, /* local_iov */ + IN DAT_DTO_COOKIE, /* user_cookie */ + IN DAT_COMPLETION_FLAGS ); /* completion_flags */ + +typedef DAT_RETURN (DAT_API *DAT_EP_POST_RECV_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* num_segments */ + IN DAT_LMR_TRIPLET *, /* local_iov */ + IN DAT_DTO_COOKIE, /* user_cookie */ + IN DAT_COMPLETION_FLAGS ); /* completion_flags */ + +typedef DAT_RETURN (DAT_API *DAT_EP_POST_RDMA_READ_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* num_segments */ + IN DAT_LMR_TRIPLET *, /* local_iov */ + IN DAT_DTO_COOKIE, /* user_cookie */ + IN const DAT_RMR_TRIPLET *,/* remote_iov */ + IN DAT_COMPLETION_FLAGS ); /* completion_flags */ + +typedef DAT_RETURN (DAT_API *DAT_EP_POST_RDMA_WRITE_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_COUNT, /* num_segments */ + IN DAT_LMR_TRIPLET *, /* local_iov */ + IN DAT_DTO_COOKIE, /* user_cookie */ + IN const DAT_RMR_TRIPLET *,/* remote_iov */ + IN DAT_COMPLETION_FLAGS ); /* completion_flags */ + +typedef DAT_RETURN (DAT_API *DAT_EP_GET_STATUS_FUNC)( + IN DAT_EP_HANDLE, /* ep_handle */ + OUT DAT_EP_STATE *, /* ep_state */ + OUT DAT_BOOLEAN *, /* recv_idle */ + OUT DAT_BOOLEAN * ); /* request_idle */ + +typedef DAT_RETURN (DAT_API *DAT_EP_FREE_FUNC)( + IN DAT_EP_HANDLE); /* ep_handle */ + +typedef DAT_RETURN (DAT_API *DAT_EP_RESET_FUNC)( + IN DAT_EP_HANDLE); /* ep_handle */ + +/* LMR functions */ + +typedef DAT_RETURN (DAT_API *DAT_LMR_FREE_FUNC)( + IN DAT_LMR_HANDLE); + +/* RMR Functions */ + +typedef DAT_RETURN (DAT_API *DAT_RMR_CREATE_FUNC)( + IN DAT_PZ_HANDLE, /* pz_handle */ + OUT DAT_RMR_HANDLE *); /* rmr_handle */ + +typedef DAT_RETURN (DAT_API *DAT_RMR_QUERY_FUNC)( + IN DAT_RMR_HANDLE, /* rmr_handle */ + IN DAT_RMR_PARAM_MASK, /* rmr_param_mask */ + OUT DAT_RMR_PARAM *); /* rmr_param */ + +typedef DAT_RETURN (DAT_API *DAT_RMR_BIND_FUNC)( + IN DAT_RMR_HANDLE, /* rmr_handle */ + IN const DAT_LMR_TRIPLET *,/* lmr_triplet */ + IN DAT_MEM_PRIV_FLAGS, /* mem_priv */ + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_RMR_COOKIE, /* user_cookie */ + IN DAT_COMPLETION_FLAGS, /* completion_flags */ + OUT DAT_RMR_CONTEXT * ); /* context */ + +typedef DAT_RETURN (DAT_API *DAT_RMR_FREE_FUNC)( + IN DAT_RMR_HANDLE); + +/* PSP Functions */ + +typedef DAT_RETURN (DAT_API *DAT_PSP_CREATE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_CONN_QUAL, /* conn_qual */ + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_PSP_FLAGS, /* psp_flags */ + OUT DAT_PSP_HANDLE * ); /* psp_handle */ + +typedef DAT_RETURN (DAT_API *DAT_PSP_CREATE_ANY_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + OUT DAT_CONN_QUAL *, /* conn_qual */ + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_PSP_FLAGS, /* psp_flags */ + OUT DAT_PSP_HANDLE * ); /* psp_handle */ + +typedef DAT_RETURN (DAT_API *DAT_PSP_QUERY_FUNC)( + IN DAT_PSP_HANDLE, + IN DAT_PSP_PARAM_MASK, + OUT DAT_PSP_PARAM * ); + +typedef DAT_RETURN (DAT_API *DAT_PSP_FREE_FUNC)( + IN DAT_PSP_HANDLE ); /* psp_handle */ + +/* RSP Functions */ + +typedef DAT_RETURN (DAT_API *DAT_RSP_CREATE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_CONN_QUAL, /* conn_qual */ + IN DAT_EP_HANDLE, /* ep_handle */ + IN DAT_EVD_HANDLE, /* evd_handle */ + OUT DAT_RSP_HANDLE * ); /* rsp_handle */ + +typedef DAT_RETURN (DAT_API *DAT_RSP_QUERY_FUNC) ( + IN DAT_RSP_HANDLE, + IN DAT_RSP_PARAM_MASK, + OUT DAT_RSP_PARAM * ); + +typedef DAT_RETURN (DAT_API *DAT_RSP_FREE_FUNC)( + IN DAT_RSP_HANDLE ); /* rsp_handle */ + +/* PZ Functions */ + +typedef DAT_RETURN (DAT_API *DAT_PZ_CREATE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + OUT DAT_PZ_HANDLE * ); /* pz_handle */ + +typedef DAT_RETURN (DAT_API *DAT_PZ_QUERY_FUNC)( + IN DAT_PZ_HANDLE, /* pz_handle */ + IN DAT_PZ_PARAM_MASK, /* pz_param_mask */ + OUT DAT_PZ_PARAM *); /* pz_param */ + +typedef DAT_RETURN (DAT_API *DAT_PZ_FREE_FUNC)( + IN DAT_PZ_HANDLE ); /* pz_handle */ + +/* + * DAT registry functions. + * + * Note the dat_ia_open and dat_ia_close functions are linked to + * registration code which "redirects" to the appropriate provider. + */ + +DAT_EXPORT DAT_RETURN DAT_API dat_ia_openv ( + IN const DAT_NAME_PTR, /* device name */ + IN DAT_COUNT, /* async_evd_qlen */ + INOUT DAT_EVD_HANDLE *, /* async_evd_handle */ + OUT DAT_IA_HANDLE *, /* ia_handle */ + IN DAT_UINT32, /* dat major version number */ + IN DAT_UINT32, /* dat minor version number */ + IN DAT_BOOLEAN); /* dat thread safety */ + +#define dat_ia_open(name,qlen,async_evd,ia) \ + dat_ia_openv((name), (qlen), (async_evd), (ia), \ + DAT_VERSION_MAJOR, DAT_VERSION_MINOR, \ + DAT_THREADSAFE) + +DAT_EXPORT DAT_RETURN DAT_API dat_ia_close ( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_CLOSE_FLAGS ); /* close_flags */ + +DAT_EXPORT DAT_RETURN DAT_API dat_registry_list_providers( + IN DAT_COUNT, /* max_to_return */ + OUT DAT_COUNT *, /* entries_returned */ + OUT DAT_PROVIDER_INFO *(dat_provider_list[]) ); /* dat_provider_list */ + +typedef DAT_RETURN ( DAT_API *DAT_REGISTRY_LIST_PROVIDERS_FUNC)( + IN DAT_COUNT, /* max_to_return */ + OUT DAT_COUNT *, /* entries_returned */ + OUT DAT_PROVIDER_INFO *(dat_provider_list[])); /* dat_provider_list */ + +/* + * DAT error functions. + */ +DAT_EXPORT DAT_RETURN DAT_API dat_strerror ( + IN DAT_RETURN, /* dat function return */ + OUT const char ** , /* major message string */ + OUT const char ** ); /* minor message string */ + +#ifdef __cplusplus +} +#endif + +#endif /* _DAT_H_ */ + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/dat_error.h b/branches/Ndi/ulp/dapl/dat/include/dat/dat_error.h new file mode 100644 index 00000000..67e46f09 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/dat_error.h @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/*********************************************************** + * + * HEADER: dat_error.h + * + * PURPOSE: DAT return codes + * + * Description: Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + * + * Mapping rules: Major error codes occupies upper 16 of a 32 bit field. + * detailed error code occupies lower 16 bits. + * + * + *********************************************************/ + +#ifndef _DAT_ERROR_H_ +#define _DAT_ERROR_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +/* + * + * All return codes are actually a 3-way tuple: + * + * type: CLASS DAT_TYPE_STATUS DAT_SUBTYPE_STATUS + * bits: 31-30 29-16 15-0 + * + * +----------------------------------------------------------------------+ + * |3130 | 29282726252423222120191817 | 1615141312111009080706054003020100| + * |CLAS | DAT_TYPE_STATUS | SUBTYPE_STATUS | + * +----------------------------------------------------------------------+ + */ + +/* + * Class Bits + */ +#define DAT_CLASS_ERROR 0x80000000 +#define DAT_CLASS_WARNING 0x40000000 +#define DAT_CLASS_SUCCESS 0x00000000 + +/* + * DAT Error bits + */ +#define DAT_TYPE_MASK 0x3fff0000 /* mask for DAT_TYPE_STATUS bits */ +#define DAT_SUBTYPE_MASK 0x0000FFFF /* mask for DAT_SUBTYPE_STATUS bits */ + +/* + * Determining the success of an operation is best done with a macro, + * each of these returns a boolean value. + */ +#define DAT_IS_SUCCESS(status) ((int)(status) >= 0) +#define DAT_IS_WARNING(status) ((DAT_UINT32)(status) & DAT_CLASS_ WARNING) + +#define DAT_GET_TYPE(status) ((DAT_UINT32)(status) & DAT_TYPE_MASK) +#define DAT_GET_SUBTYPE(status) ((DAT_UINT32)(status) & DAT_SUBTYPE_MASK) + +/* + * DAT return types. The ERROR bit is enabled for these definitions + */ +typedef enum dat_return + { + /* The operation was successful. */ + DAT_SUCCESS = 0x000000, + + /* The operation was aborted because IA was closed or EVD was * + * destroyed. */ + DAT_ABORT = 0x00010000, + + /* The specified Connection Qualifier was in use. */ + DAT_CONN_QUAL_IN_USE = 0x00020000, + + /* The operation failed due to resource limitations. */ + DAT_INSUFFICIENT_RESOURCES = 0x00030000, + + /* Provider internal error. This error can be returned by any * + * operation when the Provider have detected an internal error. * + * This error does no mask any error causes by Consumer. */ + DAT_INTERNAL_ERROR = 0x00040000, + + /* One of the DAT handles was invalid. */ + DAT_INVALID_HANDLE = 0x00050000, + + /* One of the parameters was invalid. */ + DAT_INVALID_PARAMETER = 0x00060000, + + /* One of the parameters was invalid for this operation. There * + * are Event Streams associated with the Event Dispatcher feeding * + * it. */ + DAT_INVALID_STATE = 0x00070000, + + /* The size of the receiving buffer is too small for sending * + * buffer data. The size of the local buffer is too small for * + * the data of the remote buffer. */ + DAT_LENGTH_ERROR = 0x00080000, + + /* The requested Model was not supported by the Provider. */ + DAT_MODEL_NOT_SUPPORTED = 0x00090000, + + /* The specified IA name was not found in the list of registered * + * Providers. */ + + DAT_PROVIDER_NOT_FOUND = 0x000A0000, + + /* Protection violation for local or remote memory * + * access. Protection Zone mismatch between an LMR of one of the * + * local_iov segments and the local Endpoint. */ + DAT_PRIVILEGES_VIOLATION = 0x000B0000, + + /* Privileges violation for local or re-mote memory access. One * + * of the LMRs used in local_iov was either invalid or did not * + * have the local read privileges. */ + DAT_PROTECTION_VIOLATION = 0x000C0000, + + /* The operation timed out without a notification. */ + DAT_QUEUE_EMPTY = 0x000D0000, + + /* The Event Dispatcher queue is full. */ + DAT_QUEUE_FULL = 0x000E0000, + + /* The operation timed out. UDAPL ONLY */ + DAT_TIMEOUT_EXPIRED = 0x000F0000, + + /* The provider name was already registered */ + DAT_PROVIDER_ALREADY_REGISTERED = 0x00100000, + + /* The provider is "in-use" and cannot be closed at this time */ + DAT_PROVIDER_IN_USE = 0x00110000, + + /* The requested remote address is not valid or not reachable */ + DAT_INVALID_ADDRESS = 0x00120000, + + /* [Unix only] dat_evd_wait or dat_cno_wait has been interrupted. */ + DAT_INTERRUPTED_CALL = 0x00130000, + + /* No Connection Qualifiers are available */ + DAT_CONN_QUAL_UNAVAILABLE = 0x00140000, + DAT_NOT_IMPLEMENTED = 0x0FFF0000 + + } DAT_RETURN; + + +/* + * DAT_RETURN_SUBTYPE + * + * The 16 bits of subtype information allow for 256 different + * values. + */ + +typedef enum dat_return_subtype + { + /* First element is no subtype */ + DAT_NO_SUBTYPE, + /* ABORT sub types */ + /* call was interrupted by a signal, or otherwise */ + DAT_SUB_INTERRUPTED, + + /* DAT_CONN_QUAL_IN_USE has no subtypes */ + + /* INSUFFICIENT_RESOURCES subtypes */ + DAT_RESOURCE_MEMORY, + DAT_RESOURCE_DEVICE, + DAT_RESOURCE_TEP, /* transport endpoint, e.g. QP */ + DAT_RESOURCE_TEVD, /* transport EVD, e.g. CQ */ + DAT_RESOURCE_PROTECTION_DOMAIN, + DAT_RESOURCE_MEMORY_REGION, /* HCA memory for LMR or RMR */ + DAT_RESOURCE_ERROR_HANDLER, + DAT_RESOURCE_CREDITS, /* e.g outstanding RDMA Read credit as target */ + + /* DAT_INTERNAL_ERROR has no subtypes */ + + /* INVALID_HANDLE subtypes */ + DAT_INVALID_HANDLE_IA, + DAT_INVALID_HANDLE_EP, + DAT_INVALID_HANDLE_LMR, + DAT_INVALID_HANDLE_RMR, + DAT_INVALID_HANDLE_PZ, + DAT_INVALID_HANDLE_PSP, + DAT_INVALID_HANDLE_RSP, + DAT_INVALID_HANDLE_CR, + DAT_INVALID_HANDLE_CNO, + DAT_INVALID_HANDLE_EVD_CR, + DAT_INVALID_HANDLE_EVD_REQUEST, + DAT_INVALID_HANDLE_EVD_RECV, + DAT_INVALID_HANDLE_EVD_CONN, + DAT_INVALID_HANDLE_EVD_ASYNC, + + /* DAT_INVALID_PARAMETER subtypes */ + DAT_INVALID_ARG1, + DAT_INVALID_ARG2, + DAT_INVALID_ARG3, + DAT_INVALID_ARG4, + DAT_INVALID_ARG5, + DAT_INVALID_ARG6, + DAT_INVALID_ARG7, + DAT_INVALID_ARG8, + DAT_INVALID_ARG9, + DAT_INVALID_ARG10, + + /* DAT_INVALID_EP_STATE subtypes */ + DAT_INVALID_STATE_EP_UNCONNECTED, + DAT_INVALID_STATE_EP_ACTCONNPENDING, + DAT_INVALID_STATE_EP_PASSCONNPENDING, + DAT_INVALID_STATE_EP_TENTCONNPENDING, + DAT_INVALID_STATE_EP_CONNECTED, + DAT_INVALID_STATE_EP_DISCONNECTED, + DAT_INVALID_STATE_EP_RESERVED, + DAT_INVALID_STATE_EP_COMPLPENDING, + DAT_INVALID_STATE_EP_DISCPENDING, + DAT_INVALID_STATE_EP_PROVIDERCONTROL, + DAT_INVALID_STATE_EP_NOTREADY, + + DAT_INVALID_STATE_CNO_IN_USE, + DAT_INVALID_STATE_CNO_DEAD, + + /* EVD states. Enabled/Disabled, Waitable/Unwaitable, * + * and Notify/Solicited/Threshold are 3 orthogonal * + * bands of EVD state.The Threshold one is uDAPL specific. */ + DAT_INVALID_STATE_EVD_OPEN, + /* EVD can be either in enabled or disabled but not both * + * or neither at the same time */ + DAT_INVALID_STATE_EVD_ENABLED, + DAT_INVALID_STATE_EVD_DISABLED, + /* EVD can be either in waitable or unwaitable but not * + * both or neither at the same time */ + DAT_INVALID_STATE_EVD_WAITABLE, + DAT_INVALID_STATE_EVD_UNWAITABLE, + /* Do not release an EVD if it is in use */ + DAT_INVALID_STATE_EVD_IN_USE, + + /* EVD can be either in notify or solicited or threshold * + * but not any pair, or all, or none at the same time. * + * The threshold one is for uDAPL only */ + DAT_INVALID_STATE_EVD_CONFIG_NOTIFY, + DAT_INVALID_STATE_EVD_CONFIG_SOLICITED, + DAT_INVALID_STATE_EVD_CONFIG_THRESHOLD, + DAT_INVALID_STATE_EVD_WAITER, + DAT_INVALID_STATE_EVD_ASYNC, /* Async EVD required */ + DAT_INVALID_STATE_IA_IN_USE, + DAT_INVALID_STATE_LMR_IN_USE, + DAT_INVALID_STATE_LMR_FREE, + DAT_INVALID_STATE_PZ_IN_USE, + DAT_INVALID_STATE_PZ_FREE, + + /* DAT_LENGTH_ERROR has no subtypes */ + /* DAT_MODEL_NOT_SUPPORTED has no subtypes */ + + /* DAT_PROVIDER_NOT_FOUND has no subtypes */ + + /* DAT_PRIVILEGES_VIOLATION subtypes */ + DAT_PRIVILEGES_READ, + DAT_PRIVILEGES_WRITE, + DAT_PRIVILEGES_RDMA_READ, + DAT_PRIVILEGES_RDMA_WRITE, + + /* DAT_PROTECTION_VIOLATION subtypes */ + DAT_PROTECTION_READ, + DAT_PROTECTION_WRITE, + DAT_PROTECTION_RDMA_READ, + DAT_PROTECTION_RDMA_WRITE, + + /* DAT_QUEUE_EMPTY has no subtypes */ + /* DAT_QUEUE_FULL has no subtypes */ + /* DAT_TIMEOUT_EXPIRED has no subtypes */ + /* DAT_PROVIDER_ALREADY_REGISTERED has no subtypes */ + /* DAT_PROVIDER_IN_USE has no subtypes */ + + /* DAT_INVALID_ADDRESS subtypes */ + /* Unsupported addresses - those that are not Malformed, * + * but are incorrect for use in DAT (regardless of local * + * routing capabilities): IPv6 Multicast Addresses (ff/8) * + * IPv4 Broadcast/Multicast Addresses */ + DAT_INVALID_ADDRESS_UNSUPPORTED, + /* Unreachable addresses - A Provider may know that certain * + * addresses are unreachable immediately. One examples would * + * be an IPv6 addresses on an IPv4-only system. This may also * + * be returned if it is known that there is no route to the * + * host. A Provider is not obligated to check for this + * condition. */ + DAT_INVALID_ADDRESS_UNREACHABLE, + /* Malformed addresses -- these cannot be valid in any context.* + * Those listed in RFC1884 section 2.3 as "Reserved" or + * "Unassigned". */ + DAT_INVALID_ADDRESS_MALFORMED, + + /* DAT_INTERRUPTED_CALL */ + + /* DAT_PROVIDER_NOT_FOUND subtypes. Erratta to the 1.1 spec */ + DAT_NAME_NOT_REGISTERED, + DAT_MAJOR_NOT_FOUND, + DAT_MINOR_NOT_FOUND, + DAT_THREAD_SAFETY_NOT_FOUND, + + + } DAT_RETURN_SUBTYPE; + +#ifdef __cplusplus +} +#endif + +#endif /* _DAT_ERROR_H_ */ + + + + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/dat_platform_specific.h b/branches/Ndi/ulp/dapl/dat/include/dat/dat_platform_specific.h new file mode 100644 index 00000000..31ab38f9 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/dat_platform_specific.h @@ -0,0 +1,184 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_platform_specific.h + * + * PURPOSE: defines Platform specific types. + * + * Description: Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + * + * Mapping rules: + * + **********************************************************************/ + +#ifndef _DAT_PLATFORM_SPECIFIC_H_ +#define _DAT_PLATFORM_SPECIFIC_H_ + +/* OS, processor, compiler type definitions. Add OS's as needed. */ + +/* + * This captures the alignment for the bus transfer from the HCA/IB chip + * to the main memory. + */ +#ifndef DAT_OPTIMAL_ALIGNMENT +#define DAT_OPTIMAL_ALIGNMENT 256 /* Performance optimal alignment */ +#endif /* DAT_OPTIMAL_ALIGNMENT */ + +/* Assume all O/Ss use sockaddr, for address family: IPv4 == AF_INET, + * IPv6 == AF_INET6. Use of "namelen" field indicated. + * + * The Interface Adaptor Address names an Interface Adaptor local or + * remote that is used for connection management and Name + * Service. The format of the dat_ia_address_ptr follows the normal + * socket programming practice of struct sockaddr *. DAT supports both + * IPv4 and IPv6 address families. Allocation and initialization of + * DAT IA address structures must follow normal Sockets programming + * procedures. The underlying type of the DAT IA address is the native + * struct sockaddr for each target operating system. In all cases, + * storage appropriate for the address family in use by the target + * Provider must be allocated. For instance, when IPv6 addressing is + * in use, this should be allocated as struct sockaddr_net6 . The + * sockaddr sa_family and, if present, sa_len fields must be + * initialized appropriately, as well as the address information. + * When passed across the DAPL API this storage is cast to the + * DAT_IA_ADDRESS_PTR type. It is the responsibility of the callee to + * verify that the sockaddr contains valid data for the requested + * operation. It is always the responsibility of the caller to manage + * the storage. + * + * Code example for Linux: + * + * #include + * #include + * #include + * #include + * + * struct sockaddr_in6 addr; + * DAT_IA_ADDRESS_PTR ia_addr; + * int status, i; + * + * // Note: linux pton requires explicit encoding of IPv4 in IPv6 + * + * addr.sin6_family = AF_INET6; + * if (inet_pton(AF_INET6, "0:0:0:0:0:FFFF:192.168.0.1", + * &addr.sin6_addr) <= 0) + * return(-1); // Bad address or no address family support + * + * // initialize other necessary fields such as port, flow, etc + * + * ia_addr = (DAT_IA_ADDRESS_PTR) &addr; + * dat_ep_connect(ep_handle, ia_addr, conn_qual, timeout, 0, NULL, + * qos, DAT_CONNECT_DEFAULT_FLAG); + * + */ + +#if defined(sun) || defined(__sun) || defined (_sun_) || defined (__solaris__) /* Solaris */ +#include + +typedef uint32_t DAT_UINT32; /* Unsigned host order, 32 bits */ +typedef uint64_t DAT_UINT64; /* unsigned host order, 64 bits */ +typedef unsigned long long DAT_UVERYLONG; /* unsigned longest native to compiler */ + +typedef void * DAT_PVOID; +typedef int DAT_COUNT; + +#include +#include +typedef struct sockaddr DAT_SOCK_ADDR; /* Socket address header native to OS */ +typedef struct sockaddr_in6 DAT_SOCK_ADDR6; /* Socket address header native to OS */ + +#define DAT_AF_INET AF_INET +#define DAT_AF_INET6 AF_INET6 + +/* Solaris */ + +#elif defined(__linux__) /* Linux */ +#include + +typedef u_int32_t DAT_UINT32; /* unsigned host order, 32 bits */ +typedef u_int64_t DAT_UINT64; /* unsigned host order, 64 bits */ +typedef unsigned long long DAT_UVERYLONG; /* unsigned longest native to compiler */ + +typedef void * DAT_PVOID; +typedef int DAT_COUNT; + +#include +typedef struct sockaddr DAT_SOCK_ADDR; /* Socket address header native to OS */ +typedef struct sockaddr_in6 DAT_SOCK_ADDR6; /* Socket address header native to OS */ + +#define DAT_AF_INET AF_INET +#define DAT_AF_INET6 AF_INET6 + +/* Linux */ + +#elif defined(_MSC_VER) || defined(_WIN32) /* NT. MSC compiler, Win32 platform */ +#include +typedef unsigned __int32 DAT_UINT32; /* Unsigned host order, 32 bits */ +typedef unsigned __int64 DAT_UINT64; /* unsigned host order, 64 bits */ +typedef unsigned __int16 DAT_UINT16; +typedef unsigned __int8 DAT_UINT8; +typedef unsigned __int3264 __w64 DAT_UVERYLONG; /* unsigned longest native to compiler */ + +typedef void * DAT_PVOID; +typedef long DAT_COUNT; + +typedef struct sockaddr DAT_SOCK_ADDR; /* Socket address header native to OS */ +typedef struct sockaddr_in6 DAT_SOCK_ADDR6; /* Socket address header native to OS */ + +#define DAT_AF_INET AF_INET +#define DAT_AF_INET6 AF_INET6 + +#ifdef EXPORT_DAT_SYMBOLS +#define DAT_EXPORT __declspec( dllexport ) +#else +#define DAT_EXPORT __declspec( dllimport ) +#endif + +#define DAT_API __stdcall + +/* Win32 */ + +#else +#error dat_platform_specific.h : OS type not defined +#endif + +#ifndef IN +#define IN +#endif +#ifndef OUT +#define OUT +#endif +#ifndef INOUT +#define INOUT +#endif + +#endif /* _DAT_PLATFORM_SPECIFIC_H_ */ + + + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/dat_redirection.h b/branches/Ndi/ulp/dapl/dat/include/dat/dat_redirection.h new file mode 100644 index 00000000..7324a57c --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/dat_redirection.h @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_redirection.h + * + * PURPOSE: Defines the common redirection macros + * + * Description: Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + * + **********************************************************************/ + +#ifndef _DAT_REDIRECTION_H_ +#define _DAT_REDIRECTION_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +typedef struct dat_provider DAT_PROVIDER; + +#ifndef DAT_HANDLE_TO_PROVIDER + +/* A utility macro to fetch the Provider Library for any object + * + * An alternate version could be defined for single library systems. + * it would look something like: + * extern const struct dat_ia my_single_ia_provider; + * #define DAT_HANDLE_TO_PROVIDER(ignore) &my_single_ia_provider + * + * This would allow a good compiler to avoid indirection overhead when + * making function calls. + */ + +#define DAT_HANDLE_TO_PROVIDER(handle) (*(DAT_PROVIDER **)(handle)) +#endif + +#define dat_ia_query(ia,evd,ia_msk,ia_ptr,p_msk,p_ptr) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->ia_query_func)(\ + (ia),\ + (evd),\ + (ia_msk),\ + (ia_ptr),\ + (p_msk),\ + (p_ptr)) + +#define dat_set_consumer_context(handle,context) \ + (*DAT_HANDLE_TO_PROVIDER(handle)->set_consumer_context_func)(\ + (handle),\ + (context)) + +#define dat_get_consumer_context(handle,context) \ + (*DAT_HANDLE_TO_PROVIDER(handle)->get_consumer_context_func)(\ + (handle),\ + (context)) + +#define dat_get_handle_type(handle,handle_type) \ + (*DAT_HANDLE_TO_PROVIDER(handle)->get_handle_type_func)(\ + (handle),\ + (handle_type)) + +#define dat_cr_query(cr,mask,param) \ + (*DAT_HANDLE_TO_PROVIDER(cr)->cr_query_func)(\ + (cr),\ + (mask),\ + (param)) + +#define dat_cr_accept(cr,ep,size,pdata) \ + (*DAT_HANDLE_TO_PROVIDER(cr)->cr_accept_func)(\ + (cr),\ + (ep),\ + (size),\ + (pdata)) + +#define dat_cr_reject(cr) \ + (*DAT_HANDLE_TO_PROVIDER(cr)->cr_reject_func)(\ + (cr)) + +#define dat_evd_query(evd,mask,param) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_query_func)(\ + (evd),\ + (mask),\ + (param)) + +#define dat_evd_resize(evd,qsize) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_resize_func)(\ + (evd),\ + (qsize)) + +#define dat_evd_post_se(evd,event) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_post_se_func)(\ + (evd),\ + (event)) + +#define dat_evd_dequeue(evd,event) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_dequeue_func)(\ + (evd),\ + (event)) + +#define dat_evd_free(evd)\ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_free_func)(\ + (evd)) + +#define dat_ep_create(ia,pz,in_evd,out_evd,connect_evd,attr,ep) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->ep_create_func)(\ + (ia),\ + (pz),\ + (in_evd),\ + (out_evd),\ + (connect_evd),\ + (attr),\ + (ep)) + +#define dat_ep_query(ep,mask,param) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_query_func)(\ + (ep),\ + (mask),\ + (param)) + +#define dat_ep_modify(ep,mask,param) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_modify_func)(\ + (ep),\ + (mask),\ + (param)) + +#define dat_ep_connect(ep,ia_addr,conn_qual,timeout,psize,pdata,qos,flags) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_connect_func)(\ + (ep),\ + (ia_addr),\ + (conn_qual),\ + (timeout),\ + (psize),\ + (pdata),\ + (qos),\ + (flags)) + +#define dat_ep_dup_connect(ep,dup,timeout,psize,pdata,qos) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_dup_connect_func)(\ + (ep),\ + (dup),\ + (timeout),\ + (psize),\ + (pdata),\ + (qos)) + +#define dat_ep_disconnect(ep,flags) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_disconnect_func)(\ + (ep),\ + (flags)) + +#define dat_ep_post_send(ep,size,lbuf,cookie,flags) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_post_send_func)(\ + (ep),\ + (size),\ + (lbuf),\ + (cookie),\ + (flags)) + +#define dat_ep_post_recv(ep,size,lbuf,cookie,flags) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_post_recv_func)(\ + (ep),\ + (size),\ + (lbuf),\ + (cookie),\ + (flags)) + +#define dat_ep_post_rdma_read(ep,size,lbuf,cookie,rbuf,flags) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_post_rdma_read_func)(\ + (ep),\ + (size),\ + (lbuf),\ + (cookie),\ + (rbuf),\ + (flags)) + +#define dat_ep_post_rdma_write(ep,size,lbuf,cookie,rbuf,flags) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_post_rdma_write_func)(\ + (ep),\ + (size),\ + (lbuf),\ + (cookie),\ + (rbuf),\ + (flags)) + +#define dat_ep_get_status(ep,ep_state,recv_idle,request_idle) \ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_get_status_func)(\ + (ep),\ + (ep_state),\ + (recv_idle),\ + (request_idle)) + +#define dat_ep_free(ep)\ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_free_func)(\ + (ep)) + +#define dat_ep_reset(ep)\ + (*DAT_HANDLE_TO_PROVIDER(ep)->ep_reset_func)(\ + (ep)) + +#define dat_lmr_query(lmr,mask,param)\ + (*DAT_HANDLE_TO_PROVIDER(lmr)->lmr_query_func)(\ + (lmr),\ + (mask),\ + (param)) + +#define dat_lmr_free(lmr)\ + (*DAT_HANDLE_TO_PROVIDER(lmr)->lmr_free_func)(\ + (lmr)) + +#define dat_rmr_create(pz,rmr) \ + (*DAT_HANDLE_TO_PROVIDER(pz)->rmr_create_func)(\ + (pz),\ + (rmr)) + +#define dat_rmr_query(rmr,mask,param) \ + (*DAT_HANDLE_TO_PROVIDER(rmr)->rmr_query_func)(\ + (rmr),\ + (mask),\ + (param)) + +#define dat_rmr_bind(rmr,lmr,mem_priv,ep,cookie,flags,context) \ + (*DAT_HANDLE_TO_PROVIDER(rmr)->rmr_bind_func)(\ + (rmr),\ + (lmr),\ + (mem_priv),\ + (ep),\ + (cookie),\ + (flags),\ + (context)) + +#define dat_rmr_free(rmr)\ + (*DAT_HANDLE_TO_PROVIDER(rmr)->rmr_free_func)(\ + (rmr)) + +#define dat_psp_create(ia,conn_qual,evd,flags,handle) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->psp_create_func)(\ + (ia),\ + (conn_qual),\ + (evd),\ + (flags),\ + (handle)) + +#define dat_psp_create_any(ia,conn_qual,evd,flags,handle) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->psp_create_any_func)(\ + (ia),\ + (conn_qual),\ + (evd),\ + (flags),\ + (handle)) + +#define dat_psp_query(psp,mask,param) \ + (*DAT_HANDLE_TO_PROVIDER(psp)->psp_query_func)(\ + (psp),\ + (mask),\ + (param)) + +#define dat_psp_free(psp)\ + (*DAT_HANDLE_TO_PROVIDER(psp)->psp_free_func)(\ + (psp)) + +#define dat_rsp_create(ia,conn_qual,ep,evd,handle) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->rsp_create_func)(\ + (ia),\ + (conn_qual),\ + (ep),\ + (evd),\ + (handle)) + +#define dat_rsp_query(rsp,mask,param) \ + (*DAT_HANDLE_TO_PROVIDER(rsp)->rsp_query_func)(\ + (rsp),\ + (mask),\ + (param)) + +#define dat_rsp_free(rsp)\ + (*DAT_HANDLE_TO_PROVIDER(rsp)->rsp_free_func)(\ + (rsp)) + +#define dat_pz_create(ia,pz) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->pz_create_func)(\ + (ia),\ + (pz)) + +#define dat_pz_query(pz,mask,param) \ + (*DAT_HANDLE_TO_PROVIDER(pz)->pz_query_func)(\ + (pz),\ + (mask),\ + (param)) + +#define dat_pz_free(pz) \ + (*DAT_HANDLE_TO_PROVIDER(pz)->pz_free_func)(\ + (pz)) + +#ifdef __cplusplus +} +#endif + +#endif /* _DAT_REDIRECTION_H_ */ + + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/dat_registry.h b/branches/Ndi/ulp/dapl/dat/include/dat/dat_registry.h new file mode 100644 index 00000000..c0b20384 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/dat_registry.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_registry.h + * + * PURPOSE: DAT registration API signatures + * + * Description: Contains registration external reference signatures for + * dat registry functions. This file is *only* included by + * providers, not consumers. + * + * Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + * + **********************************************************************/ + +#ifndef _DAT_REGISTRY_H_ +#define _DAT_REGISTRY_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +/* + * dat registration API. + * + * Technically the dat_ia_open is part of the registration API. This + * is so the registration module can map the device name to a provider + * structure and then call the provider dat_ia_open function. + * dat_is_close is also part of the registration API so that the + * registration code can be aware when an ia is no longer in use. + * + * + */ + +DAT_EXPORT DAT_RETURN DAT_API dat_registry_add_provider( + IN DAT_PROVIDER*, /* provider */ + IN const DAT_PROVIDER_INFO* ); /* provider info */ + +DAT_EXPORT DAT_RETURN DAT_API dat_registry_remove_provider( + IN DAT_PROVIDER*, /* provider */ + IN const DAT_PROVIDER_INFO* ); /* provider info */ + +/* + * Provider initialization APIs. + * + * Providers that support being automatically loaded by the Registry must + * implement these APIs and export them as public symbols. + */ + +#define DAT_PROVIDER_INIT_FUNC_NAME dat_provider_init +#define DAT_PROVIDER_FINI_FUNC_NAME dat_provider_fini + +#define DAT_PROVIDER_INIT_FUNC_STR "dat_provider_init" +#define DAT_PROVIDER_FINI_FUNC_STR "dat_provider_fini" + +typedef void (DAT_API *DAT_PROVIDER_INIT_FUNC)( + IN const DAT_PROVIDER_INFO *, + IN const char *); /* instance data */ + +typedef void (DAT_API *DAT_PROVIDER_FINI_FUNC) ( + IN const DAT_PROVIDER_INFO *); + +#ifdef __cplusplus +} +#endif + +#endif /* _DAT_REGISTRY_H_ */ + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/dat_vendor_specific.h b/branches/Ndi/ulp/dapl/dat/include/dat/dat_vendor_specific.h new file mode 100644 index 00000000..54f1398b --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/dat_vendor_specific.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_vendor_specific.h + * + * PURPOSE: + * + * Description: Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + **********************************************************************/ + +#ifndef _DAT_VENDOR_SPECIFIC_H_ +#define _DAT_VENDOR_SPECIFIC_H_ + + +/* General Interface Adapter attributes. These apply to both udat and kdat. */ + +/* To support backwards compatibility for DAPL-1.0 */ +#define max_rdma_read_per_ep max_rdma_read_per_ep_in +#define DAT_IA_FIELD_IA_MAX_DTO_PER_OP DAT_IA_FIELD_IA_ MAX_DTO_PER_EP_IN + +#ifdef __cplusplus +extern "C" +{ +#endif + +typedef struct dat_ia_attr + { + char adapter_name[DAT_NAME_MAX_LENGTH]; + char vendor_name[DAT_NAME_MAX_LENGTH]; + DAT_UINT32 hardware_version_major; + DAT_UINT32 hardware_version_minor; + DAT_UINT32 firmware_version_major; + DAT_UINT32 firmware_version_minor; + DAT_IA_ADDRESS_PTR ia_address_ptr; + DAT_COUNT max_eps; + DAT_COUNT max_dto_per_ep; + DAT_COUNT max_rdma_read_per_ep_in; + DAT_COUNT max_rdma_read_per_ep_out; + DAT_COUNT max_evds; + DAT_COUNT max_evd_qlen; + DAT_COUNT max_iov_segments_per_dto; + DAT_COUNT max_lmrs; + DAT_VLEN max_lmr_block_size; + DAT_VADDR max_lmr_virtual_address; + DAT_COUNT max_pzs; + DAT_VLEN max_mtu_size; + DAT_VLEN max_rdma_size; + DAT_COUNT max_rmrs; + DAT_VADDR max_rmr_target_address; + DAT_COUNT num_transport_attr; + DAT_NAMED_ATTR *transport_attr; + DAT_COUNT num_vendor_attr; + DAT_NAMED_ATTR *vendor_attr; + } DAT_IA_ATTR; + +typedef enum dat_ia_attr_mask + { + DAT_IA_FIELD_IA_ADAPTER_NAME = 0x000001, + DAT_IA_FIELD_IA_VENDOR_NAME = 0x000002, + DAT_IA_FIELD_IA_HARDWARE_MAJOR_VERSION = 0x000004, + DAT_IA_FIELD_IA_HARDWARE_MINOR_VERSION = 0x000008, + DAT_IA_FIELD_IA_FIRMWARE_MAJOR_VERSION = 0x000010, + DAT_IA_FIELD_IA_FIRMWARE_MINOR_VERSION = 0x000020, + DAT_IA_FIELD_IA_ADDRESS_PTR = 0x000040, + DAT_IA_FIELD_IA_MAX_EPS = 0x000080, + DAT_IA_FIELD_IA_MAX_DTO_PER_EP = 0x000100, + DAT_IA_FIELD_IA_MAX_RDMA_READ_PER_EP_IN = 0x000200, + DAT_IA_FIELD_IA_MAX_RDMA_READ_PER_EP_OUT = 0x000400, + DAT_IA_FIELD_IA_MAX_EVDS = 0x000800, + DAT_IA_FIELD_IA_MAX_EVD_QLEN = 0x001000, + DAT_IA_FIELD_IA_MAX_IOV_SEGMENTS_PER_DTO = 0x002000, + DAT_IA_FIELD_IA_MAX_LMRS = 0x004000, + DAT_IA_FIELD_IA_MAX_LMR_BLOCK_SIZE = 0x008000, + DAT_IA_FIELD_IA_MAX_LMR_VIRTUAL_ADDRESS = 0x010000, + DAT_IA_FIELD_IA_MAX_PZS = 0x020000, + DAT_IA_FIELD_IA_MAX_MTU_SIZE = 0x040000, + DAT_IA_FIELD_IA_MAX_RDMA_SIZE = 0x080000, + DAT_IA_FIELD_IA_MAX_RMRS = 0x100000, + DAT_IA_FIELD_IA_MAX_RMR_TARGET_ADDRESS = 0x200000, + DAT_IA_FIELD_IA_NUM_TRANSPORT_ATTR = 0x400000, + DAT_IA_FIELD_IA_TRANSPORT_ATTR = 0x800000, + DAT_IA_FIELD_IA_NUM_VENDOR_ATTR = 0x1000000, + DAT_IA_FIELD_IA_VENDOR_ATTR = 0x2000000, + + DAT_IA_ALL = 0x3FFFFFF + } DAT_IA_ATTR_MASK; + + +/* Vendor Specific extensions */ + +#if defined(_JNI) + +#elif defined(_INTEL) + +#elif defined(_INFINISWITCH) + +#elif defined(_MELLANOX) + +#elif defined(_INFINICON) + +#elif defined(_TOPSPIN) + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _VENDOR_SPECIFIC_H_ */ + + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/kdat.h b/branches/Ndi/ulp/dapl/dat/include/dat/kdat.h new file mode 100644 index 00000000..d30a601e --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/kdat.h @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: kdat.h + * + * PURPOSE: defines the KDAT API + * + * Description: + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + * + **********************************************************************/ + +#ifndef _KDAT_H_ +#define _KDAT_H_ + +#include + +typedef enum dat_mem_type + { + /* Shared between udat and kdat */ + DAT_MEM_TYPE_VIRTUAL = 0x00, + DAT_MEM_TYPE_LMR = 0x01, + /* kdat specific */ + DAT_MEM_TYPE_PHYSICAL = 0x10, + DAT_MEM_TYPE_PLATFORM = 0x20, + DAT_MEM_TYPE_IA = 0x40, + DAT_MEM_TYPE_BYPASS = 0x80 + } DAT_MEM_TYPE; + +/* dat handle types */ +typedef enum dat_handle_type + { + DAT_HANDLE_TYPE_CR, + DAT_HANDLE_TYPE_EP, + DAT_HANDLE_TYPE_EVD, + DAT_HANDLE_TYPE_IA, + DAT_HANDLE_TYPE_LMR, + DAT_HANDLE_TYPE_PSP, + DAT_HANDLE_TYPE_PZ, + DAT_HANDLE_TYPE_RMR, + DAT_HANDLE_TYPE_RSP + } DAT_HANDLE_TYPE; + +typedef enum dat_evd_param_mask + { + DAT_EVD_FIELD_IA_HANDLE = 0x01, + DAT_EVD_FIELD_EVD_QLEN = 0x02, + DAT_EVD_FIELD_UPCALL_POLICY = 0x04, + DAT_EVD_FIELD_UPCALL = 0x08, + DAT_EVD_FIELD_EVD_FLAGS = 0x10, + + DAT_EVD_FIELD_ALL = 0x1F + + } DAT_EVD_PARAM_MASK; + + +#include + +#include + +/* Upcall support */ + +typedef enum dat_upcall_policy + { + DAT_UPCALL_DISABLE = 0, /* support no_upcalls */ + DAT_UPCALL_SINGLE_INSTANCE = 1, /* support only one upcall */ + DAT_UPCALL_MANY = 100 /* support multiple upcalls */ + } DAT_UPCALL_POLICY; + +typedef void (*DAT_UPCALL_FUNC)( + DAT_PVOID, /* instance_data */ + const DAT_EVENT *, /* event */ + DAT_BOOLEAN); /* more_events */ + +typedef struct dat_upcall_object + { + DAT_PVOID instance_data; + DAT_UPCALL_FUNC upcall_func; + } DAT_UPCALL_OBJECT; + +/* Define NULL upcall */ + +#define DAT_UPCALL_NULL(DAT_UPCALL_OBJECT) \ + {(DAT_PVOID) NULL, \ + (DAT_UPCALL_FUNC) NULL} + +typedef struct dat_evd_param + { + DAT_IA_HANDLE ia_handle; + DAT_COUNT evd_qlen; + DAT_UPCALL_POLICY upcall_policy; + DAT_UPCALL_OBJECT upcall; + DAT_EVD_FLAGS evd_flags; + } DAT_EVD_PARAM; + +#include + +/* + * Memory types + * + * Specifing memory type for LMR create. A consumer must use a single + * value when registering memory. The union of any of these + * flags is used in the provider parameter to indicate what memory + * type provider supports for LMR memory creation. + */ + +/* memory data types */ + +typedef enum dat_mem_optimize_flags + { + DAT_MEM_OPTIMIZE_DONT_CARE = 0x00, + DAT_MEM_OPTIMIZE_IA = 0x01, + DAT_MEM_OPTIMIZE_MIN_EXPOSURE = 0x02, + DAT_MEM_OPTIMIZE_EXACT_EXPOSURE = 0x04 + } DAT_MEM_OPTIMIZE_FLAGS; + + +typedef union dat_region_description + { + DAT_PVOID for_va; + DAT_LMR_HANDLE for_lmr_handle; + void * for_pointer; /* For kdapl only */ + void * for_array; /* For kdapl only */ + } DAT_REGION_DESCRIPTION; + +typedef struct dat_lmr_param + { + DAT_IA_HANDLE ia_handle; + DAT_MEM_TYPE mem_type; + DAT_REGION_DESCRIPTION region_desc; + DAT_VLEN length; + DAT_PZ_HANDLE pz_handle; + DAT_MEM_PRIV_FLAGS mem_priv; + DAT_LMR_CONTEXT lmr_context; + DAT_VLEN registered_size; + DAT_VADDR registered_address; + } DAT_LMR_PARAM; + + +/****************************************************************************/ + +/* + * Kernel DAT function call definitions, + */ + +typedef DAT_RETURN (*DAT_LMR_KCREATE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_MEM_TYPE, /* mem_type */ + IN DAT_REGION_DESCRIPTION, /* region_description */ + IN DAT_VLEN, /* length */ + IN DAT_PZ_HANDLE, /* pz_handle */ + IN DAT_MEM_PRIV_FLAGS, /* privileges */ + IN DAT_MEM_OPTIMIZE_FLAGS, /* mem_optimization */ + OUT DAT_LMR_HANDLE *, /* lmr_handle */ + OUT DAT_LMR_CONTEXT *, /* lmr_context */ + OUT DAT_VLEN *, /* registered_length */ + OUT DAT_VADDR * ); /* registered_address */ + + +typedef DAT_RETURN (*DAT_IA_MEMTYPE_HINT_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_MEM_TYPE, /* mem_type */ + IN DAT_VLEN, /* length */ + IN DAT_MEM_OPTIMIZE_FLAGS, /* mem_optimization */ + OUT DAT_VLEN *, /* preferred_length */ + OUT DAT_VADDR * ); /* preferred_alignment */ + +typedef DAT_RETURN (*DAT_EVD_KCREATE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_COUNT, /* evd_min_qlen */ + IN DAT_UPCALL_POLICY, /* upcall_policy */ + IN DAT_UPCALL_OBJECT, /* upcall */ + IN DAT_EVD_FLAGS, /* evd_flags */ + OUT DAT_EVD_HANDLE * ); /* evd_handle */ + +typedef DAT_RETURN (*DAT_EVD_MODIFY_UPCALL_FUNC)( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_UPCALL_POLICY, /* upcall_policy */ + IN DAT_UPCALL_OBJECT ); /* upcall */ + +/* The following two DAT function calls are also found in udat.h. + * They were removed from dat.h to remove dependancy issues with + * dat.h file. There may be a better way to fix the dependancy + * + */ + +typedef DAT_RETURN (*DAT_IA_QUERY_FUNC)( + IN DAT_IA_HANDLE, /* ia handle */ + OUT DAT_EVD_HANDLE *, /* async_evd_handle */ + IN DAT_IA_ATTR_MASK, /* ia_attr_mask */ + OUT DAT_IA_ATTR *, /* ia_attr */ + IN DAT_PROVIDER_ATTR_MASK, /* provider_attr_mask */ + OUT DAT_PROVIDER_ATTR * ); /* provider_attr */ + +typedef DAT_RETURN (*DAT_EVD_QUERY_FUNC)( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_EVD_PARAM_MASK, /* evd_param_mask */ + OUT DAT_EVD_PARAM * ); /* evd_param */ + +typedef DAT_RETURN (*DAT_LMR_QUERY_FUNC)( + IN DAT_LMR_HANDLE, + IN DAT_LMR_PARAM_MASK, + OUT DAT_LMR_PARAM *); + +#include + +#endif /* _KDAT_H_ */ + + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/kdat_config.h b/branches/Ndi/ulp/dapl/dat/include/dat/kdat_config.h new file mode 100644 index 00000000..6e45f162 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/kdat_config.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: kdat_config.h + * + * PURPOSE: defines the common DAT API for uDAPL and kDAPL. + * + * Description: Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + **********************************************************************/ + +#ifndef _KDAT_CONFIG_H_ +#define _UDAT_CONFIG_H_ + +#define DAT_VERSION_MAJOR 1 +#define DAT_VERSION_MINOR 1 + + +/* + * The official header files will default DAT_THREADSAFE to DAT_TRUE. If + * your project does not wish to use this default, you must ensure that + * DAT_THREADSAFE will be set to DAT_FALSE. This may be done by an + * explicit #define in a common project header file that is included + * before any DAT header files, or through command line directives to the + * compiler (presumably controlled by the make environment). + */ + +/* + * A site, project or platform may consider setting an alternate default + * via their make rules, but are discouraged from doing so by editing + * the official header files. + */ + +/* + * The Reference Implementation is not Thread Safe. The Reference + * Implementation has chosen to go with the first method and define it + * explicitly in the header file. + */ + +#define DAT_THREADSAFE DAT_FALSE + +#ifndef DAT_THREADSAFE +#define DAT_THREADSAFE DAT_TRUE +#endif /* DAT_THREADSAFE */ + +#endif /* _KDAT_CONFIG_H_ */ diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/kdat_redirection.h b/branches/Ndi/ulp/dapl/dat/include/dat/kdat_redirection.h new file mode 100644 index 00000000..532e1d95 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/kdat_redirection.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: kdat_redirection.h + * + * PURPOSE: Kernel DAT macro definitions + * + * Description: + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + * + **********************************************************************/ + +#ifndef _KDAT_REDIRCTION_H_ +#define _KDAT_REDIRCTION_H_ + + + +/* ia_memtype_hint macro */ + +#define dat_ia_memtype_hint(ia,mem_type,len,mem_opt,pref_len,pref_align) \ + (DAT_HANDLE_TO_PROVIDER(ia)->ia_memtype_hint_func)(\ + (ia),\ + (mem_type),\ + (len),\ + (mem_opt),\ + (pref_len),\ + (pref_align)) + +/* evd_modify_upcall macro */ + +#define dat_evd_modify_upcall(evd,policy,upcall) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_modify_upcall_func)(\ + (evd),\ + (policy),\ + (upcall)) + +/* evd_create macro */ + +#define dat_evd_kcreate(ia,policy,upcall,qlen,flags,handle) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->evd_kcreate_func)(\ + (ia),\ + (qlen),\ + (policy),\ + (upcall),\ + (flags),\ + (handle)) + +/* lmr_create macro */ + +#define dat_lmr_kcreate(ia,mtype,reg_desc,len,pz,priv,mem_opt,\ + lmr,context,reg_len,reg_addr) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->lmr_kcreate_func)(\ + (ia),\ + (mtype),\ + (reg_desc),\ + (len),\ + (pz),\ + (priv),\ + (mem_opt),\ + (lmr),\ + (context),\ + (reg_len),\ + (reg_addr)) + + +#include "dat_redirection.h" + +struct dat_provider + { + const char * device_name; + DAT_PVOID extension; + + DAT_IA_OPEN_FUNC ia_open_func; + DAT_IA_QUERY_FUNC ia_query_func; + DAT_IA_CLOSE_FUNC ia_close_func; + DAT_IA_MEMTYPE_HINT_FUNC ia_memtype_hint_func; /* kdat only */ + + DAT_SET_CONSUMER_CONTEXT_FUNC set_consumer_context_func; + DAT_GET_CONSUMER_CONTEXT_FUNC get_consumer_context_func; + DAT_GET_HANDLE_TYPE_FUNC get_handle_type_func; + + DAT_CR_QUERY_FUNC cr_query_func; + DAT_CR_ACCEPT_FUNC cr_accept_func; + DAT_CR_REJECT_FUNC cr_reject_func; + + DAT_EVD_KCREATE_FUNC evd_kcreate_func; + DAT_EVD_QUERY_FUNC evd_query_func; + + DAT_EVD_MODIFY_UPCALL_FUNC evd_modify_upcall_func; /* kdat only */ + + DAT_EVD_RESIZE_FUNC evd_resize_func; + DAT_EVD_POST_SE_FUNC evd_post_se_func; + DAT_EVD_DEQUEUE_FUNC evd_dequeue_func; + DAT_EVD_FREE_FUNC evd_free_func; + + DAT_EP_CREATE_FUNC ep_create_func; + DAT_EP_QUERY_FUNC ep_query_func; + DAT_EP_MODIFY_FUNC ep_modify_func; + DAT_EP_CONNECT_FUNC ep_connect_func; + DAT_EP_DUP_CONNECT_FUNC ep_dup_connect_func; + DAT_EP_DISCONNECT_FUNC ep_disconnect_func; + DAT_EP_POST_SEND_FUNC ep_post_send_func; + DAT_EP_POST_RECV_FUNC ep_post_recv_func; + DAT_EP_POST_RDMA_READ_FUNC ep_post_rdma_read_func; + DAT_EP_POST_RDMA_WRITE_FUNC ep_post_rdma_write_func; + DAT_EP_GET_STATUS_FUNC ep_get_status_func; + DAT_EP_FREE_FUNC ep_free_func; + + DAT_LMR_KCREATE_FUNC lmr_kcreate_func; + DAT_LMR_QUERY_FUNC lmr_query_func; + DAT_LMR_FREE_FUNC lmr_free_func; + + DAT_RMR_CREATE_FUNC rmr_create_func; + DAT_RMR_QUERY_FUNC rmr_query_func; + DAT_RMR_BIND_FUNC rmr_bind_func; + DAT_RMR_FREE_FUNC rmr_free_func; + + DAT_PSP_CREATE_FUNC psp_create_func; + DAT_PSP_QUERY_FUNC psp_query_func; + DAT_PSP_FREE_FUNC psp_free_func; + + DAT_RSP_CREATE_FUNC rsp_create_func; + DAT_RSP_QUERY_FUNC rsp_query_func; + DAT_RSP_FREE_FUNC rsp_free_func; + + DAT_PZ_CREATE_FUNC pz_create_func; + DAT_PZ_QUERY_FUNC pz_query_func; + DAT_PZ_FREE_FUNC pz_free_func; + }; + +#endif /* _KDAT_REDIRCTION_H_ */ + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/kdat_vendor_specific.h b/branches/Ndi/ulp/dapl/dat/include/dat/kdat_vendor_specific.h new file mode 100644 index 00000000..d617713c --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/kdat_vendor_specific.h @@ -0,0 +1,112 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: kdat_vendor_specific.h + * + * PURPOSE: + * + * Description: + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + **********************************************************************/ + +#ifndef _KDAT_VENDOR_SPECIFIC_H_ +#define _KDAT_VENDOR_SPECIFIC_H_ + +/* General Provider attributes. kdat specific. */ +typedef struct dat_provider_attr + { + char provider_name[DAT_NAME_MAX_LENGTH]; + DAT_UINT32 provider_version_major; + DAT_UINT32 provider_version_minor; + DAT_UINT32 dapl_version_major; + DAT_UINT32 dapl_version_minor; + DAT_MEM_TYPE lmr_mem_types_supported; + DAT_RMR_BIND_EVD_RESTRICTION_TYPE rmr_bind_evd_restriction; + DAT_IOV_OWNERSHIP iov_ownership_on_return; + DAT_QOS dat_qos_supported; + DAT_COMPLETION_FLAGS completion_flags_supported; + DAT_BOOLEAN is_thread_safe; + DAT_COUNT max_private_data_size; + DAT_BOOLEAN supports_multipath; + DAT_EP_CREATOR_FOR_PSP ep_creator; + DAT_UPCALL_POLICY upcall_policy; + DAT_COUNT num_provider_specific_attr; + DAT_NAMED_ATTR * provider_specific_attr; + } DAT_PROVIDER_ATTR; + +typedef enum dat_provider_attr_mask + { + DAT_PROVIDER_FIELD_PROVIDER_NAME = 0x00001, + DAT_PROVIDER_FIELD_PROVIDER_VERSION_MAJOR = 0x00002, + DAT_PROVIDER_FIELD_PROVIDER_VERSION_MINOR = 0x00004, + DAT_PROVIDER_FIELD_DAPL_VERSION_MAJOR = 0x00008, + DAT_PROVIDER_FIELD_DAPL_VERSION_MINOR = 0x00010, + DAT_PROVIDER_FIELD_LMR_MEM_TYPE_SUPPORTED = 0x00020, + DAT_PROVIDER_FIELD_RMR_BIND_RESTRICTION = 0x00040, + DAT_PROVIDER_FIELD_IOV_OWNERSHIP = 0x00080, + DAT_PROVIDER_FIELD_DAT_QOS_SUPPORTED = 0x00100, + DAT_PROVIDER_FIELD_COMPLETION_FLAGS_SUPPORTED = 0x00200, + DAT_PROVIDER_FIELD_IS_THREAD_SAFE = 0x00400, + DAT_PROVIDER_FIELD_MAX_PRIVATE_DATA_SIZE = 0x00800, + DAT_PROVIDER_FIELD_SUPPORTS_MULTIPATH = 0x01000, + DAT_PROVIDER_FIELD_EP_CREATOR = 0x02000, + DAT_PROVIDER_FIELD_UPCALL_POLICY = 0x04000, + DAT_PROVIDER_FIELD_NUM_PROVIDER_SPECIFIC_ATTR = 0x10000, + DAT_PROVIDER_FIELD_PROVIDER_SPECIFIC_ATTR = 0x20000, + + DAT_PROVIDER_FIELD_ALL = 0x37FFF + } DAT_PROVIDER_ATTR_MASK; + +#include "dat_vendor_specific.h" /* Interface Adaptor attributes */ + +/* Vendor specific extensions */ + +#if defined(_JNI) + +#elif defined(_INTEL) + +#elif defined(_INFINISWITCH) + +#elif defined(_MELLANOX) + +#elif defined(_INFINICON) + +#elif defined(_TOPSPIN) + +#endif + +#endif /* _KDAT_VENDOR_SPECIFIC_H_ */ + + + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/udat.h b/branches/Ndi/ulp/dapl/dat/include/dat/udat.h new file mode 100644 index 00000000..1900408c --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/udat.h @@ -0,0 +1,332 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: udat.h + * + * PURPOSE: defines the user DAT API + * + * Description: Interfaces in this file are completely described in + * the uDAPL 1.1 API + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + **********************************************************************/ + +#ifndef _UDAT_H_ +#define _UDAT_H_ + +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + +typedef enum dat_mem_type + { + /* Shared between udat and kdat */ + DAT_MEM_TYPE_VIRTUAL = 0x00, + DAT_MEM_TYPE_LMR = 0x01, + /* udat specific */ + DAT_MEM_TYPE_SHARED_VIRTUAL = 0x02 + } DAT_MEM_TYPE; + +/* dat handle types */ +typedef enum dat_handle_type + { + DAT_HANDLE_TYPE_CR, + DAT_HANDLE_TYPE_EP, + DAT_HANDLE_TYPE_EVD, + DAT_HANDLE_TYPE_IA, + DAT_HANDLE_TYPE_LMR, + DAT_HANDLE_TYPE_PSP, + DAT_HANDLE_TYPE_PZ, + DAT_HANDLE_TYPE_RMR, + DAT_HANDLE_TYPE_RSP, + DAT_HANDLE_TYPE_CNO + } DAT_HANDLE_TYPE; + +/* + * EVD state consists of 3 orthogonal substates. One for + * enabled/disabled, one for waitable/unwaitable, and one + * for configuration. Within each substates the values are + * mutually exclusive. + */ +typedef enum dat_evd_state + { + DAT_EVD_STATE_ENABLED = 0x01, + DAT_EVD_STATE_DISABLED = 0x02, + DAT_EVD_STATE_WAITABLE = 0x04, + DAT_EVD_STATE_UNWAITABLE = 0x08, + DAT_EVD_STATE_CONFIG_NOTIFY = 0x10, + DAT_EVD_STATE_CONFIG_SOLICITED = 0x20, + DAT_EVD_STATE_CONFIG_THRESHOLD = 0x30 + } DAT_EVD_STATE; + +typedef enum dat_evd_param_mask + { + DAT_EVD_FIELD_IA_HANDLE = 0x01, + DAT_EVD_FIELD_EVD_QLEN = 0x02, + DAT_EVD_FIELD_EVD_STATE = 0x04, + DAT_EVD_FIELD_CNO = 0x08, + DAT_EVD_FIELD_EVD_FLAGS = 0x10, + + DAT_EVD_FIELD_ALL = 0x1F + } DAT_EVD_PARAM_MASK; + +#include + +#include + +typedef DAT_HANDLE DAT_CNO_HANDLE; + +typedef struct dat_evd_param + { + DAT_IA_HANDLE ia_handle; + DAT_COUNT evd_qlen; + DAT_EVD_STATE evd_state; + DAT_CNO_HANDLE cno_handle; + DAT_EVD_FLAGS evd_flags; + } DAT_EVD_PARAM; + +#define DAT_LMR_COOKIE_SIZE 40 /* size of DAT_LMR_COOKIE in bytes */ +typedef char (* DAT_LMR_COOKIE)[DAT_LMR_COOKIE_SIZE]; + +/* Format for OS wait proxy agent function */ + +typedef void (DAT_API *DAT_AGENT_FUNC) + ( + DAT_PVOID, /* instance data */ + DAT_EVD_HANDLE /* Event Dispatcher*/ + ); + +/* Definition */ + +typedef struct dat_os_wait_proxy_agent + { + DAT_PVOID instance_data; + DAT_AGENT_FUNC proxy_agent_func; + } DAT_OS_WAIT_PROXY_AGENT; + +/* Define NULL Proxy agent */ + +#define DAT_OS_WAIT_PROXY_AGENT_NULL \ + (DAT_OS_WAIT_PROXY_AGENT) { \ + (DAT_PVOID) NULL, \ + (DAT_AGENT_FUNC) NULL } + + +/* Flags */ + +/* The value specified by the uDAPL Consumer for dat_ia_open to indicate + * that not async EVD should be created for the opening instance of an IA. + * The same IA have been open before that has the only async EVD to + * handle async errors for all open instances of the IA. + */ + +#define DAT_EVD_ASYNC_EXISTS (DAT_EVD_HANDLE) 0x1 + +/* + * The value return by the dat_ia_query for the case when there is no + * async EVD for the IA instance. Consumer had specified the value of + * DAT_EVD_ASYNC_EXISTS for the async_evd_handle for dat_ia_open. + */ + +#define DAT_EVD_OUT_OF_SCOPE (DAT_EVD_HANDLE) 0x2 + +/* + * Memory types + * + * Specifing memory type for LMR create. A consumer must use a single + * value when registering memory. The union of any of these + * flags is used in the provider parameters to indicate what memory + * type provider supports for LMR memory creation. + */ + + + +/* For udapl only */ + +typedef struct dat_shared_memory + { + DAT_PVOID virtual_address; + DAT_LMR_COOKIE shared_memory_id; + } DAT_SHARED_MEMORY; + +typedef union dat_region_description + { + DAT_PVOID for_va; + DAT_LMR_HANDLE for_lmr_handle; + DAT_SHARED_MEMORY for_shared_memory; /* For udapl only */ + } DAT_REGION_DESCRIPTION; + +/* LMR Arguments */ + +typedef struct dat_lmr_param + { + DAT_IA_HANDLE ia_handle; + DAT_MEM_TYPE mem_type; + DAT_REGION_DESCRIPTION region_desc; + DAT_VLEN length; + DAT_PZ_HANDLE pz_handle; + DAT_MEM_PRIV_FLAGS mem_priv; + DAT_LMR_CONTEXT lmr_context; + DAT_RMR_CONTEXT rmr_context; + DAT_VLEN registered_size; + DAT_VADDR registered_address; + } DAT_LMR_PARAM; + + +typedef struct dat_cno_param + { + DAT_IA_HANDLE ia_handle; + DAT_OS_WAIT_PROXY_AGENT agent; + } DAT_CNO_PARAM; + +typedef enum dat_cno_param_mask + { + DAT_CNO_FIELD_IA_HANDLE = 0x1, + DAT_CNO_FIELD_AGENT = 0x2, + + DAT_CNO_FIELD_ALL = 0x3 + } DAT_CNO_PARAM_MASK; + +#include + +/****************************************************************************/ + +/* + * User DAT functions definitions. + */ + + +typedef DAT_RETURN (DAT_API *DAT_LMR_CREATE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_MEM_TYPE, /* mem_type */ + IN DAT_REGION_DESCRIPTION, /* region_description */ + IN DAT_VLEN, /* length */ + IN DAT_PZ_HANDLE, /* pz_handle */ + IN DAT_MEM_PRIV_FLAGS, /* privileges */ + OUT DAT_LMR_HANDLE *, /* lmr_handle */ + OUT DAT_LMR_CONTEXT *, /* lmr_context */ + OUT DAT_RMR_CONTEXT *, /* rmr_context */ + OUT DAT_VLEN *, /* registered_length */ + OUT DAT_VADDR * ); /* registered_address */ + +/* Event Functions */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_CREATE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_COUNT, /* evd_min_qlen */ + IN DAT_CNO_HANDLE, /* cno_handle */ + IN DAT_EVD_FLAGS, /* evd_flags */ + OUT DAT_EVD_HANDLE * ); /* evd_handle */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_MODIFY_CNO_FUNC)( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_CNO_HANDLE); /* cno_handle */ + +typedef DAT_RETURN (DAT_API *DAT_CNO_CREATE_FUNC)( + IN DAT_IA_HANDLE, /* ia_handle */ + IN DAT_OS_WAIT_PROXY_AGENT,/* agent */ + OUT DAT_CNO_HANDLE *); /* cno_handle */ + +typedef DAT_RETURN (DAT_API *DAT_CNO_MODIFY_AGENT_FUNC)( + IN DAT_CNO_HANDLE, /* cno_handle */ + IN DAT_OS_WAIT_PROXY_AGENT);/* agent */ + +typedef DAT_RETURN (DAT_API *DAT_CNO_QUERY_FUNC)( + IN DAT_CNO_HANDLE, /* cno_handle */ + IN DAT_CNO_PARAM_MASK, /* cno_param_mask */ + OUT DAT_CNO_PARAM * ); /* cno_param */ + +typedef DAT_RETURN (DAT_API *DAT_CNO_FREE_FUNC)( + IN DAT_CNO_HANDLE); /* cno_handle */ + +typedef DAT_RETURN (DAT_API *DAT_CNO_WAIT_FUNC)( + IN DAT_CNO_HANDLE, /* cno_handle */ + IN DAT_TIMEOUT, /* timeout */ + OUT DAT_EVD_HANDLE *); /* evd_handle */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_ENABLE_FUNC)( + IN DAT_EVD_HANDLE); /* evd_handle */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_WAIT_FUNC)( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_TIMEOUT, /* Timeout */ + IN DAT_COUNT, /* Threshold */ + OUT DAT_EVENT *, /* event */ + OUT DAT_COUNT * ); /* N more events */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_DISABLE_FUNC)( + IN DAT_EVD_HANDLE); /* evd_handle */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_SET_UNWAITABLE_FUNC)( + IN DAT_EVD_HANDLE); /* evd_handle */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_CLEAR_UNWAITABLE_FUNC)( + IN DAT_EVD_HANDLE); /* evd_handle */ + +/* The following three DAT function calls are also found in kdat.h. + * They were removed from dat.h to remove dependancy issues with + * dat.h file. There may be a better way to fix the dependancy. + */ + +typedef DAT_RETURN (DAT_API *DAT_IA_QUERY_FUNC)( + IN DAT_IA_HANDLE, /* ia handle */ + OUT DAT_EVD_HANDLE *, /* async_evd_handle */ + IN DAT_IA_ATTR_MASK, /* ia_attr_mask */ + OUT DAT_IA_ATTR *, /* ia_attr */ + IN DAT_PROVIDER_ATTR_MASK, /* provider_attr_mask */ + OUT DAT_PROVIDER_ATTR * ); /* provider_attr */ + +typedef DAT_RETURN (DAT_API *DAT_EVD_QUERY_FUNC)( + IN DAT_EVD_HANDLE, /* evd_handle */ + IN DAT_EVD_PARAM_MASK, /* evd_param_mask */ + OUT DAT_EVD_PARAM * ); /* evd_param */ + +typedef DAT_RETURN (DAT_API *DAT_LMR_QUERY_FUNC)( + IN DAT_LMR_HANDLE, + IN DAT_LMR_PARAM_MASK, + OUT DAT_LMR_PARAM *); + +#include + +#ifdef __cplusplus +} +#endif + +#endif /* _UDAT_H_ */ + + + + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/udat_config.h b/branches/Ndi/ulp/dapl/dat/include/dat/udat_config.h new file mode 100644 index 00000000..172a12ba --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/udat_config.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: udat_config.h + * + * PURPOSE: defines the common DAT API for uDAPL and kDAPL. + * + * Description: Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + **********************************************************************/ + +#ifndef _UDAT_CONFIG_H_ +#define _UDAT_CONFIG_H_ + +#define DAT_VERSION_MAJOR 1 +#define DAT_VERSION_MINOR 1 + +/* + * The official header files will default DAT_THREADSAFE to DAT_TRUE. If + * your project does not wish to use this default, you must ensure that + * DAT_THREADSAFE will be set to DAT_FALSE. This may be done by an + * explicit #define in a common project header file that is included + * before any DAT header files, or through command line directives to the + * compiler (presumably controlled by the make environment). + */ + +/* + * A site, project or platform may consider setting an alternate default + * via their make rules, but are discouraged from doing so by editing + * the official header files. + */ + +/* + * The Reference Implementation is not Thread Safe. The Reference + * Implementation has chosen to go with the first method and define it + * explicitly in the header file. + */ + + +#ifndef DAT_THREADSAFE +#define DAT_THREADSAFE DAT_TRUE +#endif /* DAT_THREADSAFE */ + +#endif /* _UDAT_CONFIG_H_ */ diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/udat_redirection.h b/branches/Ndi/ulp/dapl/dat/include/dat/udat_redirection.h new file mode 100644 index 00000000..848679c9 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/udat_redirection.h @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: udat_redirection.h + * + * PURPOSE: User DAT macro definitions + * + * Description: Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + * + **********************************************************************/ + +#ifndef _UDAT_REDIRECTION_H_ +#define _UDAT_REDIRECTION_H_ + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define dat_lmr_create(ia,mtype,reg_desc,len,pz,priv,\ + lmr,lmr_context,rmr_context,reg_len,reg_addr) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->lmr_create_func)(\ + (ia),\ + (mtype),\ + (reg_desc),\ + (len),\ + (pz),\ + (priv),\ + (lmr),\ + (lmr_context),\ + (rmr_context),\ + (reg_len),\ + (reg_addr)) + +#define dat_evd_create(ia,qlen,cno,flags,handle) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->evd_create_func)(\ + (ia),\ + (qlen),\ + (cno),\ + (flags),\ + (handle)) + +#define dat_evd_enable(evd) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_enable_func)(\ + (evd)) + +#define dat_evd_wait(evd,timeout,threshold,event,nmore) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_wait_func)(\ + (evd),\ + (timeout),\ + (threshold),\ + (event),\ + (nmore)) + +#define dat_evd_disable(evd) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_disable_func)(\ + (evd)) + +#define dat_evd_set_unwaitable(evd) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_set_unwaitable_func)(\ + (evd)) + +#define dat_evd_clear_unwaitable(evd) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_clear_unwaitable_func)(\ + (evd)) + +#define dat_evd_modify_cno(evd,cno) \ + (*DAT_HANDLE_TO_PROVIDER(evd)->evd_modify_cno_func)(\ + (evd),\ + (cno)) + +#define dat_cno_create(ia,proxy,cno) \ + (*DAT_HANDLE_TO_PROVIDER(ia)->cno_create_func)(\ + (ia),\ + (proxy),\ + (cno)) + +#define dat_cno_modify_agent(cno,proxy) \ + (*DAT_HANDLE_TO_PROVIDER(cno)->cno_modify_agent_func)(\ + (cno),\ + (proxy)) + +#define dat_cno_query(cno,mask,param) \ + (*DAT_HANDLE_TO_PROVIDER(cno)->cno_query_func)(\ + (cno),\ + (mask),\ + (param)) + +#define dat_cno_free(cno) \ + (*DAT_HANDLE_TO_PROVIDER(cno)->cno_free_func)(\ + (cno)) + +#define dat_cno_wait(cno,timeout,evd) \ + (*DAT_HANDLE_TO_PROVIDER(cno)->cno_wait_func)(\ + (cno),\ + (timeout),\ + (evd)) + +#define dat_cr_handoff(cr,qual) \ + (*DAT_HANDLE_TO_PROVIDER(cr)->cr_handoff_func)(\ + (cr),\ + (qual)) + +#include + +struct dat_provider + { + const char * device_name; + DAT_PVOID extension; + + DAT_IA_OPEN_FUNC ia_open_func; + DAT_IA_QUERY_FUNC ia_query_func; + DAT_IA_CLOSE_FUNC ia_close_func; + + DAT_SET_CONSUMER_CONTEXT_FUNC set_consumer_context_func; + DAT_GET_CONSUMER_CONTEXT_FUNC get_consumer_context_func; + DAT_GET_HANDLE_TYPE_FUNC get_handle_type_func; + + DAT_CNO_CREATE_FUNC cno_create_func; /* udat only */ + DAT_CNO_MODIFY_AGENT_FUNC cno_modify_agent_func; /* udat only */ + DAT_CNO_QUERY_FUNC cno_query_func; /* udat only */ + DAT_CNO_FREE_FUNC cno_free_func; /* udat only */ + DAT_CNO_WAIT_FUNC cno_wait_func; /* udat only */ + + DAT_CR_QUERY_FUNC cr_query_func; + DAT_CR_ACCEPT_FUNC cr_accept_func; + DAT_CR_REJECT_FUNC cr_reject_func; + DAT_CR_HANDOFF_FUNC cr_handoff_func; /* udat only */ + + DAT_EVD_CREATE_FUNC evd_create_func; + DAT_EVD_QUERY_FUNC evd_query_func; + + DAT_EVD_MODIFY_CNO_FUNC evd_modify_cno_func; /* udat only */ + DAT_EVD_ENABLE_FUNC evd_enable_func; /* udat only */ + DAT_EVD_DISABLE_FUNC evd_disable_func; /* udat only */ + DAT_EVD_WAIT_FUNC evd_wait_func; /* udat only */ + + DAT_EVD_RESIZE_FUNC evd_resize_func; + DAT_EVD_POST_SE_FUNC evd_post_se_func; + DAT_EVD_DEQUEUE_FUNC evd_dequeue_func; + DAT_EVD_FREE_FUNC evd_free_func; + + DAT_EP_CREATE_FUNC ep_create_func; + DAT_EP_QUERY_FUNC ep_query_func; + DAT_EP_MODIFY_FUNC ep_modify_func; + DAT_EP_CONNECT_FUNC ep_connect_func; + DAT_EP_DUP_CONNECT_FUNC ep_dup_connect_func; + DAT_EP_DISCONNECT_FUNC ep_disconnect_func; + DAT_EP_POST_SEND_FUNC ep_post_send_func; + DAT_EP_POST_RECV_FUNC ep_post_recv_func; + DAT_EP_POST_RDMA_READ_FUNC ep_post_rdma_read_func; + DAT_EP_POST_RDMA_WRITE_FUNC ep_post_rdma_write_func; + DAT_EP_GET_STATUS_FUNC ep_get_status_func; + DAT_EP_FREE_FUNC ep_free_func; + + DAT_LMR_CREATE_FUNC lmr_create_func; + DAT_LMR_QUERY_FUNC lmr_query_func; + + DAT_LMR_FREE_FUNC lmr_free_func; + + DAT_RMR_CREATE_FUNC rmr_create_func; + DAT_RMR_QUERY_FUNC rmr_query_func; + DAT_RMR_BIND_FUNC rmr_bind_func; + DAT_RMR_FREE_FUNC rmr_free_func; + + DAT_PSP_CREATE_FUNC psp_create_func; + DAT_PSP_QUERY_FUNC psp_query_func; + DAT_PSP_FREE_FUNC psp_free_func; + + DAT_RSP_CREATE_FUNC rsp_create_func; + DAT_RSP_QUERY_FUNC rsp_query_func; + DAT_RSP_FREE_FUNC rsp_free_func; + + DAT_PZ_CREATE_FUNC pz_create_func; + DAT_PZ_QUERY_FUNC pz_query_func; + DAT_PZ_FREE_FUNC pz_free_func; + + /* udat-1.1 */ + DAT_PSP_CREATE_ANY_FUNC psp_create_any_func; + DAT_EP_RESET_FUNC ep_reset_func; + DAT_EVD_SET_UNWAITABLE_FUNC evd_set_unwaitable_func; + DAT_EVD_CLEAR_UNWAITABLE_FUNC evd_clear_unwaitable_func; + + }; + +#ifdef __cplusplus +} +#endif + +#endif /* _UDAT_REDIRECTION_H_ */ + + diff --git a/branches/Ndi/ulp/dapl/dat/include/dat/udat_vendor_specific.h b/branches/Ndi/ulp/dapl/dat/include/dat/udat_vendor_specific.h new file mode 100644 index 00000000..6321abbc --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/include/dat/udat_vendor_specific.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: udat_vendor_specific.h + * + * PURPOSE: + * + * Description: Header file for "uDAPL: User Direct Access Programming + * Library, Version: 1.1" + * + * Mapping rules: + * All global symbols are prepended with "DAT_" or "dat_" + * All DAT objects have an 'api' tag which, such as 'ep' or 'lmr' + * The method table is in the provider definition structure. + * + * + **********************************************************************/ + +#ifndef _UDAT_VENDOR_SPECIFIC_H_ +#define _UDAT_VENDOR_SPECIFIC_H_ + +/* General Provider attributes. udat specific. */ +#ifdef __cplusplus +extern "C" +{ +#endif + +typedef enum dat_pz_support + { + DAT_PZ_UNIQUE, + DAT_PZ_SAME, + DAT_PZ_SHAREABLE + } DAT_PZ_SUPPORT; + +/* Provider should support merging of all event stream types. Provider + * attribute specify support for merging different event stream types. + * It is a 2D binary matrix where each row and column represents an event + * stream type. Each binary entry is 1 if the event streams of its raw + * and column can fed the same EVD, and 0 otherwise. The order of event + * streams in row and column is the same as in the definition of + * DAT_EVD_FLAGS: index 0 - Software Event, 1- Connection Request, + * 2 - DTO Completion, 3 - Connection event, 4 - RMR Bind Completion, + * 5 - Asynchronous event. By definition each diagonal entry is 1. + * Consumer allocates an array for it and passes it IN as a pointer + * for the array that Provider fills. Provider must fill the array + * that Consumer passes. + */ + +typedef struct dat_provider_attr + { + char provider_name[DAT_NAME_MAX_LENGTH]; + DAT_UINT32 provider_version_major; + DAT_UINT32 provider_version_minor; + DAT_UINT32 dapl_version_major; + DAT_UINT32 dapl_version_minor; + DAT_MEM_TYPE lmr_mem_types_supported; + DAT_IOV_OWNERSHIP iov_ownership_on_return; + DAT_QOS dat_qos_supported; + DAT_COMPLETION_FLAGS completion_flags_supported; + DAT_BOOLEAN is_thread_safe; + DAT_COUNT max_private_data_size; + DAT_BOOLEAN supports_multipath; + DAT_EP_CREATOR_FOR_PSP ep_creator; + DAT_UINT32 optimal_buffer_alignment; + DAT_BOOLEAN evd_stream_merging_supported[6][6]; + DAT_PZ_SUPPORT pz_support; + DAT_COUNT num_provider_specific_attr; + DAT_NAMED_ATTR * provider_specific_attr; + } DAT_PROVIDER_ATTR; + +typedef enum dat_provider_attr_mask + { + DAT_PROVIDER_FIELD_PROVIDER_NAME = 0x00001, + DAT_PROVIDER_FIELD_PROVIDER_VERSION_MAJOR = 0x00002, + DAT_PROVIDER_FIELD_PROVIDER_VERSION_MINOR = 0x00004, + DAT_PROVIDER_FIELD_DAPL_VERSION_MAJOR = 0x00008, + DAT_PROVIDER_FIELD_DAPL_VERSION_MINOR = 0x00010, + DAT_PROVIDER_FIELD_LMR_MEM_TYPE_SUPPORTED = 0x00020, + DAT_PROVIDER_FIELD_IOV_OWNERSHIP = 0x00040, + DAT_PROVIDER_FIELD_DAT_QOS_SUPPORTED = 0x00080, + DAT_PROVIDER_FIELD_COMPLETION_FLAGS_SUPPORTED = 0x00100, + DAT_PROVIDER_FIELD_IS_THREAD_SAFE = 0x00200, + DAT_PROVIDER_FIELD_MAX_PRIVATE_DATA_SIZE = 0x00400, + DAT_PROVIDER_FIELD_SUPPORTS_MULTIPATH = 0x00800, + DAT_PROVIDER_FIELD_EP_CREATOR = 0x01000, + DAT_PROVIDER_FIELD_PZ_SUPPORT = 0x02000, + DAT_PROVIDER_FIELD_OPTIMAL_BUFFER_ALIGNMENT = 0x04000, + DAT_PROVIDER_FIELD_EVD_STREAM_MERGING_SUPPORTED = 0x08000, + DAT_PROVIDER_FIELD_NUM_PROVIDER_SPECIFIC_ATTR = 0x10000, + DAT_PROVIDER_FIELD_PROVIDER_SPECIFIC_ATTR = 0x20000, + + DAT_PROVIDER_FIELD_ALL = 0x37FFF + } DAT_PROVIDER_ATTR_MASK; + +#include + +#if defined(_JNI) + +#elif defined(_INTEL) + +#elif defined(_INFINISWITCH) + +#elif defined(_MELLANOX) + +#elif defined(_INFINICON) + +#elif defined(_TOPSPIN) + +#endif + +#ifdef __cplusplus +} +#endif + +#endif /* _UDAT_VENDOR_SPECIFIC_H_ */ + + + diff --git a/branches/Ndi/ulp/dapl/dat/kdat/Makefile b/branches/Ndi/ulp/dapl/dat/kdat/Makefile new file mode 100644 index 00000000..875fa01d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/kdat/Makefile @@ -0,0 +1,113 @@ +# +# Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under either one of the following two licenses: +# +# 1) under the terms of the "Common Public License 1.0" a copy of which is +# in the file LICENSE.txt in the root directory. The license is also +# available from the Open Source Initiative, see +# http://www.opensource.org/licenses/cpl.php. +# OR +# +# 2) under the terms of the "The BSD License" a copy of which is in the file +# LICENSE2.txt in the root directory. The license is also available from +# the Open Source Initiative, see +# http://www.opensource.org/licenses/bsd-license.php. +# +# Licensee has the right to choose either one of the above two licenses. +# +# Redistributions of source code must retain both the above copyright +# notice and either one of the license notices. +# +# Redistributions in binary form must reproduce both the above copyright +# notice, either one of the license notices in the documentation +# and/or other materials provided with the distribution. +# + +#********************************************************************* +# +# MODULE: Makefile +# +# PURPOSE: Makefile for DAT registration module +# +# $Id$ +#*********************************************************************/ +UNAME = $(strip $(shell /bin/uname -r)) +KERNEL_INCLUDE_DIR = /lib/modules/$(UNAME)/build/include + +TOPDIR = $(shell /bin/pwd) +COMMON = $(TOPDIR)/../common + +SRC_PATH = $(TOPDIR)/ +OBJ_PATH = $(TOPDIR)/Obj/ +#TARGET_PATH = $(TOPDIR)/Target/$(UNAME)/ +TARGET_PATH = $(TOPDIR)/Target + +TARGET = $(TARGET_PATH)/dat_registry.o + +CC=gcc + +# +# CFLAGS definition +# +CFLAGS = -O $(CPPFLAGS) +CFLAGS += -D__KERNEL__ +CFLAGS += -DMODULE +CFLAGS += -I. +CFLAGS += -I.. +CFLAGS += -I../include +CFLAGS += -I./linux +CFLAGS += -I../common +CFLAGS += -I$(KERNEL_INCLUDE_DIR) +CFLAGS += -Wall +CFLAGS += -Wstrict-prototypes +# The following two lines will cause warnings to appear on the +# compile time output when including certain kernel .h files +#CFLAGS += -Wmissing-prototypes +#CFLAGS += -Wmissing-declarations +CFLAGS += -Werror + +KDAT_SRCS = dat_kdapl.c \ + dat_module.c + +COMMON_SRCS = dat_data.c \ + dat_init.c \ + dat_register.c + +SRCS = $(KDAT_SRCS) $(COMMON_SRCS) + +KDAT_OBJS = $(KDAT_SRCS:%.c=$(OBJ_PATH)%.o) +COMMON_OBJS = $(COMMON_SRCS:%.c=$(OBJ_PATH)%.o) + +OBJS = $(KDAT_OBJS) $(COMMON_OBJS) + + +all: mkdirs $(TARGET) + +mkdirs: + @[ -d $(TARGET_PATH) ] || /bin/mkdir -p $(TARGET_PATH) + @[ -d $(OBJ_PATH) ] || /bin/mkdir -p $(OBJ_PATH) + +$(KDAT_OBJS): $(OBJ_PATH)%.o : %.c + @echo Compiling $< + $(CC) $(CFLAGS) -c $< -o $@ + +$(COMMON_OBJS): $(OBJ_PATH)%.o : $(COMMON)/%.c + @echo Compiling $< + $(CC) $(CFLAGS) -c $< -o $@ + +$(TARGET): $(OBJS) + @echo Linking $(TARGET) + $(LD) -r $^ -o $(TARGET) + +clean: + rm -f $(OBJS) + rm -f $(TARGET) + +load: $(TARGET) + @sudo insmod $(TARGET) + @sudo tail -3 /var/log/messages | grep -v sudo + +unload: + @sudo rmmod dat_registry + @sudo tail -3 /var/log/messages | grep -v sudo diff --git a/branches/Ndi/ulp/dapl/dat/kdat/dat_kdapl.c b/branches/Ndi/ulp/dapl/dat/kdat/dat_kdapl.c new file mode 100644 index 00000000..c702e931 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/kdat/dat_kdapl.c @@ -0,0 +1,101 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_kdapl.c + * + * PURPOSE: kdapl functions required to be part of the DAT registry. + * Description: Interfaces in this file are completely described in + *the kDAPL 1.0 API + * + * $Id$ + **********************************************************************/ + +#include "dat_osd.h" +#include +#include "dat_register.h" + +/*********************************************************************** + * Function: dat_ia_open + ***********************************************************************/ +DAT_RETURN +dat_ia_open ( + IN const DAT_NAME_PTR device_name, + IN DAT_COUNT async_event_qlen, + INOUT DAT_EVD_HANDLE *async_event_handle, + OUT DAT_IA_HANDLE *ia_handle) +{ + DAT_RETURN status; + DAT_PROVIDER *provider; + + if (!ia_handle) + { + return DAT_INVALID_HANDLE | DAT_INVALID_HANDLE_IA; + } + *ia_handle = 0; + dat_os_printf ("<1>DAT Registry: dat_ia_open\n"); + + /* make sure the provider is registered */ + provider = dat_get_provider(device_name); + if (!provider) + { + status = DAT_NAME_NOT_FOUND; + goto error_exit; + } + + /* call the real provider open function */ + status = (*provider->ia_open_func) ( provider, + async_event_qlen, + async_event_handle, + ia_handle); + + error_exit: + return status; +} + +/*********************************************************************** + * Function: dat_ia_close + ***********************************************************************/ +DAT_RETURN +dat_ia_close (IN DAT_IA_HANDLE ia_handle) +{ + DAT_RETURN status; + DAT_PROVIDER *provider; + + provider = DAT_HANDLE_TO_PROVIDER(ia_handle); + status = (*provider->ia_close_func) (ia_handle); + + return status; +} + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dat/kdat/dat_module.c b/branches/Ndi/ulp/dapl/dat/kdat/dat_module.c new file mode 100644 index 00000000..f1264227 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/kdat/dat_module.c @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_module.c + * + * PURPOSE: DAT registry module implementation, exists in the Linux + * kernel + * Description: a linux module implementation + * + * $Id$ + **********************************************************************/ + +#include "dat_osd.h" +#include +#include "dat_data.h" + +/*********************************************************************** + * init_module + * + * Entry point for a Linux module, performs simple initialization + * + * Input: + * none + * + * Output: + * none + * + * Returns: + * SUCCESS + ***********************************************************************/ + +int init_module(void) +{ + int i; + + dat_os_printf ("<1>DAT Registry: Started\n"); + + dat_os_lock_init(&dat_registry_lock); + for (i = 0; i < DAT_MAX_NICS; i++) + { + dat_registry[i] = NULL; + } + return (0); /* success */ +} + +/*********************************************************************** + * cleanup_module + * + * Entry point for a Linux module, cleans up the module on exit + * + * Input: + * none + * + * Output: + * none + * + * Returns: + * void + ***********************************************************************/ +void cleanup_module(void) +{ + dat_os_lock_destroy(&dat_registry_lock); + dat_os_printf ("<1>DAT Registry: Stopped\n"); + return; +} + +/* Module exports */ + +EXPORT_SYMBOL (dat_ia_open); +EXPORT_SYMBOL (dat_ia_close); + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dat/kdat/linux/dat_osd.c b/branches/Ndi/ulp/dapl/dat/kdat/linux/dat_osd.c new file mode 100644 index 00000000..b463e68b --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/kdat/linux/dat_osd.c @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_osd.c + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent functions with a canonical DAT + * interface. Designed to be portable and hide OS specific quirks + * of common functions. + * + * + * $Id$ + **********************************************************************/ + +#include +#include + +#include +#include "dat_osd.h" + +/* + * Internal structures + */ + +typedef struct THREAD_INFO { + void (*function)(void *); + void *arg; + int thread_id; + DAT_OS_THREAD_STATE state; + wait_queue_head_t wchan; +} THREAD_INFO; + + +/* + * dat_os_start_thread + * + * Simple internal routine that allows us to catch the thread before + * invoking the user supplied function. Linux threads are activated + * and run, but our semantics allow us to create them in a sleep + * state. + * + * Input: + * arg Thread state structure to pass initial data + * + * Returns: + * DAT_SUCCESS + */ +static int +dat_os_thread_start (void *arg) +{ + IN THREAD_INFO *thread_state = (THREAD_INFO *) arg; + + if (thread_state->state == DAT_OS_THREAD_WAIT) + { + /* + * Wait for a wakeup event on our thread_state + * structure. A thread_resume() or a signal will + * wake us up. + */ + interruptible_sleep_on (&thread_state->wchan); + } + + /* + * invoke the user specified routine + */ + (thread_state->function) (thread_state->arg); + + return (0); +} + + +/* + * dat_os_thread_create + * + * Returns a pointer to an internal thread structure that provides + * the implementation with a measure of control over the threads. + * Much of this is mandated by the desire to start threads in a + * wait queue, then to enable them when the implementation is ready. + * + * There is no upfront checking, invoker is assumed to know what they + * are doing. + * + * Input: + * function Function to invoke thread + * arg Argument to start routine + * state Initial thread state + * DAT_OS_THREAD_RUN Immediate execution + * DAT_OS_THREAD_WAIT new thread will wait + * thread_id User allocated space for thread ID. + * + * Returns: + * DAT_SUCCESS + * DAT_INSUFFICIENT_RESOURCES + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dat_os_thread_create ( + IN void (*function) (void *), + IN void *arg, + IN DAT_OS_THREAD_STATE state, + OUT DAT_OS_THREAD *thread_id ) +{ + int pid; + THREAD_INFO *thread_state; + + /* Get a thread_state structure and fill it in. */ + thread_state = kmalloc(sizeof(thread_state), GFP_ATOMIC); + if (thread_state == NULL) + { + return DAT_INSUFFICIENT_RESOURCES | DAT_RESOURCE_MEMORY; + } + + thread_state->function = function; + thread_state->arg = arg; + thread_state->state = state; + + /* set up a wait queue for a delayed start */ + if ( state == DAT_OS_THREAD_WAIT ) + { + init_waitqueue_head (&thread_state->wchan); + } + + if ((pid = kernel_thread (dat_os_thread_start, + thread_state, /* argument */ + 0)) < 0) /* clone arguments 0 */ + { + /* Error: options are: + * EAGAIN: Need to try again + * else, insufficient resources. + * Just return INSUFFICIENT_RESOURCES for any error + */ + kfree(thread_state); + return DAT_INSUFFICIENT_RESOURCES; + } + + thread_state->thread_id = pid; + *thread_id = (DAT_OS_THREAD)thread_state; + + return DAT_SUCCESS; +} + + +/* + * dat_os_thread_resume + * + * Set a blocked thread running. + * + * Input: + * thread_id Pointer to thread_state entry + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dat_os_thread_resume ( + IN DAT_OS_THREAD thread_id ) +{ + THREAD_INFO *thread_state = (THREAD_INFO *) thread_id; + + if (thread_state == NULL) + { + return DAT_INVALID_PARAMETER | DAT_INVALID_ARG1; + } + + if (thread_state->state == DAT_OS_THREAD_WAIT) + { + /* Set the thread to running */ + thread_state->state = DAT_OS_THREAD_RUN; + wake_up_interruptible (&thread_state->wchan); + } + + return DAT_SUCCESS; +} + +/* + * dat_os_thread_kill + * + * Set a blocked thread running. + * + * Input: + * thread_id Pointer to thread_state entry + * + * Returns: + * DAT_SUCCESS + * DAT_INVALID_PARAMETER + */ +DAT_RETURN +dat_os_thread_destroy ( + IN DAT_OS_THREAD thread_id ) +{ + THREAD_INFO *thread_state = (THREAD_INFO *) thread_id; + + if (thread_state == NULL) + { + return DAT_INVALID_PARAMETER | DAT_INVALID_ARG1; + } + + kill_proc (thread_state->thread_id, SIGTERM, 1); + + kfree(thread_state); /* release thread resources */ + + return DAT_SUCCESS; +} + + +/* + * dat_os_get_time + * + * Return 64 bit value of current time in microseconds. + * + * Input: + * tm User location to place current time + * + * Returns: + * DAT_SUCCESS + */ +DAT_RETURN +dat_os_get_time (DAT_OS_TIMEVAL * loc) +{ + struct timeval tv; + + do_gettimeofday (&tv); + *loc = ((uint64_t) (tv.tv_sec) * 1000000L) + (uint64_t) tv.tv_usec; + + return DAT_SUCCESS; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dat/kdat/linux/dat_osd.h b/branches/Ndi/ulp/dapl/dat/kdat/linux/dat_osd.h new file mode 100644 index 00000000..1a4cf15c --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/kdat/linux/dat_osd.h @@ -0,0 +1,258 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_osd.h + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent data structures & functions with + * a canonical DAT interface. Designed to be portable + * and hide OS specific quirks of common functions. + * + * $Id$ + **********************************************************************/ + +#ifndef _DAT_OSD_H_ +#define _DAT_OSD_H_ + +/* + * This file is defined for Linux systems only, including it on any + * other build will cause an error + */ +#ifndef __linux__ +#error UNDEFINED OS TYPE +#endif /* __linux__ */ + +#ifndef __KERNEL__ +#error "Must compile for the kernel" +#endif /* __KERNEL__ */ + + +#include + + +#include +#include +#include +#include +#include + + +/* + * Thread package + */ +typedef void * DAT_OS_THREAD; + +typedef enum dat_os_thread_state +{ + DAT_OS_THREAD_WAIT, + DAT_OS_THREAD_RUN +} DAT_OS_THREAD_STATE; + + +/* + * Lock functions: Simply use spinlocks at this point. + * + * N.B. No lock can be taken in both common code and an upcall, + * which has the possibility of being called in interrupt + * context. If this is a requirement we need to change + * from buzz locks (spinlock) to semaphores. + */ +typedef spinlock_t DAT_OS_LOCK; + + +STATIC _INLINE_ void dat_os_lock_init(DAT_OS_LOCK *lock) +{ + spin_lock_init( lock ); +} + +STATIC _INLINE_ void dat_os_lock_destroy(DAT_OS_LOCK *lock) +{ + /* Nothing */; +} + +STATIC _INLINE_ void dat_os_lock(DAT_OS_LOCK *lock) +{ + spin_lock(lock); /* down(mutex); */ +} + +STATIC _INLINE_ void dat_os_unlock(DAT_OS_LOCK *lock) +{ + spin_unlock(lock); /* up(mutex); */ +} + + +/* + * Atomic operations + */ +typedef atomic_t DAT_OS_ATOMIC; + +STATIC _INLINE_ void dat_os_atomic_add(int i, DAT_OS_ATOMIC * v) +{ + atomic_add(i,v); +} + +STATIC _INLINE_ void dat_os_atomic_sub(int i, DAT_OS_ATOMIC * v) +{ + atomic_sub(i,v); +} + + +/* + * Memory Functions + */ +STATIC _INLINE_ void *dat_os_alloc(int size) +{ + return kmalloc(size, GFP_ATOMIC); + /* If too much memory is requested from the Linux SLAB + * allocator, we may need to use vmalloc(sz) instead + */ +} + +STATIC _INLINE_ void dat_os_free(void *ptr, int size) +{ + kfree(ptr); + /* If too much memory is requested from the Linux SLAB + * allocator, we may need to use vfree(ptr) instead + */ +} + + +/* + * memory block functions + */ + +STATIC _INLINE_ void * dat_os_memzero(void *loc, int size) +{ + return memset(loc,0,size); +} + +STATIC _INLINE_ void * dat_os_memcpy(void *dest, void *src, int len) +{ + return memcpy(dest,src,len); +} + +STATIC _INLINE_ int dat_os_memcmp(void *mem1, void *mem2, int len) +{ + return memcmp(mem1,mem2,len); +} + +/* + * Timers + */ +typedef uint64_t DAT_OS_TIMEVAL; +typedef struct timer_list *DAT_OS_TIMER; +typedef unsigned long DAT_OS_TICKS; + +DAT_RETURN dat_os_get_time (DAT_OS_TIMEVAL *); + +STATIC _INLINE_ DAT_OS_TICKS dat_os_get_ticks(void) +{ + return jiffies; +} + +STATIC _INLINE_ int dat_os_ticks_to_seconds(DAT_OS_TICKS ticks) +{ + return ticks / HZ; +} + + +/* + * dat_os_set_timer() + * + * Set a timer. The timer will invoke the specified function + * after a number of useconds expires. + * + * Input: + * timer User provided timer structure + * func Function to invoke when timer expires + * data Argument passed to func() + * expires microseconds until timer fires + * + * Returns: + * no return value + * + */ +STATIC _INLINE_ void dat_os_set_timer( + DAT_OS_TIMER timer, + void (*func)(unsigned long), + unsigned long data, + DAT_OS_TIMEVAL expires ) +{ + init_timer(timer); + timer->function = func; + timer->data = data; + /* Change from useconds to jiffies */ + expires += 1000000L / HZ - 1; + expires /= 1000000L / HZ; + /* set the timer */ + timer->expires = jiffies + (unsigned long)expires; + + add_timer(timer); +} + + +/* + * dat_os_timer_cancel() + * + * Cancel a running timer. The timer will invoke the specified + * function after a number of useconds expires. + * + * Input: + * timer Running timer + * + * Returns: + * no return value + * + */ +STATIC _INLINE_ void dat_os_timer_cancel(DAT_OS_TIMER timer) +{ + /* del_timer_sync returns number of times timer was deleted; + * just ignore */ + (void) del_timer_sync(timer); +} + + +/* + * Thread functions: prototypes + */ +DAT_RETURN dat_os_thread_create ( void (*)(void *), void *, + DAT_OS_THREAD_STATE, DAT_OS_THREAD *); +DAT_RETURN dat_os_thread_resume (DAT_OS_THREAD); +DAT_RETURN dat_os_thread_destroy (DAT_OS_THREAD); + + +/* + * Debug helper routines in the Linux kernel + */ +#define dat_os_printf printk +#define dat_os_assert assert +#define dat_os_breakpoint breakpoint + + +#endif /* _DAT_OSD_H_ */ diff --git a/branches/Ndi/ulp/dapl/dat/udat/Makefile.cygwin b/branches/Ndi/ulp/dapl/dat/udat/Makefile.cygwin new file mode 100644 index 00000000..aad1d67c --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/Makefile.cygwin @@ -0,0 +1,271 @@ +# +# Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under either one of the following two licenses: +# +# 1) under the terms of the "Common Public License 1.0" a copy of which is +# in the file LICENSE.txt in the root directory. The license is also +# available from the Open Source Initiative, see +# http://www.opensource.org/licenses/cpl.php. +# OR +# +# 2) under the terms of the "The BSD License" a copy of which is in the file +# LICENSE2.txt in the root directory. The license is also available from +# the Open Source Initiative, see +# http://www.opensource.org/licenses/bsd-license.php. +# +# Licensee has the right to choose either one of the above two licenses. +# +# Redistributions of source code must retain both the above copyright +# notice and either one of the license notices. +# +# Redistributions in binary form must reproduce both the above copyright +# notice, either one of the license notices in the documentation +# and/or other materials provided with the distribution. +# + +#********************************************************************** +# +# MODULE: Makefile +# +# PURPOSE: Makefile for DAT registration module for CYGWIN environment +# +#*********************************************************************/ + + +############################################################## +# Application variables +# + +CP = cp -p -u +AS = $(CROSS_COMPILE)as +LD = $(CROSS_COMPILE)link.exe +CC = $(CROSS_COMPILE)cl.exe +CPP = $(CC) +AR = $(CROSS_COMPILE)ar +NM = $(CROSS_COMPILE)nm +STRIP = $(CROSS_COMPILE)strip +OBJCOPY = $(CROSS_COMPILE)objcopy +OBJDUMP = $(CROSS_COMPILE)objdump +RANLIB = $(CROSS_COMPILE)ranlib +MKDIR = mkdir -p +SED = /bin/sed +SHELL = /bin/sh + +TOPDIR = . + +COMMON = $(TOPDIR)/../common +WINDOWS = $(TOPDIR)/windows + +OBJ_DIR = $(TOPDIR)/Obj +TARGET_DIR = $(TOPDIR)/Target + +SRCDIRS := \ + $(TOPDIR) \ + $(COMMON) \ + $(WINDOWS) + +INCDIRS := \ + $(SRCDIRS) \ + $(TOPDIR)/../include + +vpath %.c . ${SRCDIRS} +vpath %.h . ${INCDIRS} + + +################################################## +# targets +TARLIBS = dat +TARSHLIBS = dat + +# data for user libraries +dat_SOURCES = $(COMMON_SRCS) $(UDAT_SRCS) $(WIN_SRCS) + +UDAT_SRCS = dat_udapl.c + +WIN_SRCS = # dat_osd.c + +COMMON_SRCS = dat_data.c \ + dat_init.c \ + dat_register.c + + + +#################################################### +# compiler options CFLAGS +# + +# common flags +UOPTIONS += /nologo /MDd /W3 /GX /Od /FD /GZ /Gm /Zi + +# common defines +UCOMDEFS += /D "_WINDOWS" /D "_MBCS" /D "_USRDLL" /D "WIN32" /D "_DEBUG" \ + -D_WIN32_WINNT=0x0500 -DWINVER=0x0500 +# other options: /FR /Fd + +# private defines +UPRIVDEFS += /D "__WIN__" /D "__MSC__" /D "__i386__" + +CFLAGS += $(UOPTIONS) $(UCOMDEFS) $(UPRIVDEFS) + +########################################################### +# common included libraries +# +ULDLIBS += kernel32 user32 gdi32 winspool \ + comdlg32 advapi32 shell32 ole32 oleaut32 \ + uuid odbc32 odbccp32 Ws2_32 + + +######################################################### +# link options LDFLAGS +# + +MTARFLAGS= -cr + +TARFLAGS += cr + +# common flags +ULDOPTIONS += /nologo /incremental:no /machine:I386 /debug + +# common directories +ULDDIRS += /LIBPATH:"$(OBJ_DIR)" + +# module entry +ULDENTRY = /noentry + +# specific DLL flags +ifndef NO_DEF_FILE +USE_DEF_FILE = /def:$(WINDOWS)/dat_win.def +endif + +ifndef NO_LIB_FILE +USE_LIB_FILE = $(@:%.dll=/implib:%.lib) +endif + +ifndef NO_PDB_FILE +USE_PDB_FILE = $(@:%.dll=/PDB:%.pdb) +endif + +DLLFLAGS += $(USE_DEF_FILE) $(USE_LIB_FILE) $(USE_PDB_FILE) + +# DLL flags +UDLLFLAGS += /dll $(DLLFLAGS) + +LDFLAGS += $(ULDOPTIONS) $(ULDENTRY) $(ULDDIRS) $(ULDLIBS:%=%.lib) + +# user DLL +LDSHFLAGS += $(LDFLAGS) $(UDLLFLAGS) + + + +############################################################# +# Local functions +# +bsndir = $(notdir $(basename $1)) + +############################################################ +# Common rules +# +define COMPILE +$(CC) -c $(strip ${CFLAGS}) $(strip $(INCDIRS:%=-I%)) $(EXTRA_CFLAGS) $($(@:${OBJ_DIR}/%.obj=%.c_CFLAGS)) /Fo"$@" $< +endef + +define DEF_SET_VAR_SRCS +@echo "$@_VAR_SRCS += $($(basename $(call bsndir,$@))_SOURCES)" >> $@ +endef + +define DEF_SET_VAR_OBJS +@echo "$@_VAR_OBJS += $($(basename $(call bsndir,$@))_OBJECTS)" >> $@ +endef + + + +########################################################################### +# Start rules +# + +all: $(TARSHLIBS:%=${TARGET_DIR}/%.dll) $(TAROBJS:%=${OBJ_DIR}/%.obj) $(TARLIBS:%=${TARGET_DIR}/%.lib) + + +########################################################################## +# Simple objects (user) + +$(TAROBJS:%=${OBJ_DIR}/%.obj): ${OBJ_DIR}/%.obj: %.c + @if [ ! -d $(OBJ_DIR) ]; then mkdir -p $(OBJ_DIR); fi + $(COMPILE) + +$(OBJ_DIR)/%.obj: %.c + $(COMPILE) + + +########################################################################## +# Static libraries +# +$(TARLIBS:%=$(TARGET_DIR)/%.lib): % : %.mk +$(TARLIBS:%=$(TARGET_DIR)/%.lib.mk): Makefile.cygwin + @if [ ! -d $(OBJ_DIR) ]; then mkdir -p $(OBJ_DIR); fi + @if [ ! -d $(TARGET_DIR) ]; then mkdir -p $(TARGET_DIR); fi + @echo "# Do not edit. Automatically generated file." > $@ + @ + @${DEF_SET_VAR_OBJS} + @${DEF_SET_VAR_SRCS} + @ + @echo "SOURCES += \$$($@_VAR_SRCS)" >> $@ + @ + @echo "$(@:%.mk=%): \$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj) " >> $@ + @echo "$(@:%.mk=%): \$$($@_VAR_OBJS:%.c=$(OBJ_DIR)/%.obj) " >> $@ + @echo -e "\t\$$(AR) \$$(MTARFLAGS) \$$@ \c" >> $@ + @echo -e "\$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj) \c" >> $@ + @echo "\$$($@_VAR_OBJS) \$$(\$$(@:$(OBJ_DIR)/%.lib=%)_ARFLAGS) " >> $@ + @echo -e "\t\$$(RANLIB) \$$@" >> $@ + + +ifneq ($(MAKECMDGOALS), clean) +ifneq ($(strip $(TARLIBS)),) +-include $(patsubst %,$(OBJ_DIR)/%.lib.mk,$(TARLIBS)) +endif +endif + + +########################################################################## +# Shared libraries +# +$(TARSHLIBS:%=$(TARGET_DIR)/%.dll): % : %.mk +$(TARSHLIBS:%=$(TARGET_DIR)/%.dll.mk): Makefile.cygwin + @if [ ! -d $(OBJ_DIR) ]; then mkdir -p $(OBJ_DIR); fi + @if [ ! -d $(TARGET_DIR) ]; then mkdir -p $(TARGET_DIR); fi + @echo "# Do not edit. Automatically generated file." > $@ + @ + @${DEF_SET_VAR_OBJS} + @${DEF_SET_VAR_SRCS} + @ + @echo "SOURCES += \$$($@_VAR_SRCS)" >> $@ + @ + @echo "$(@:%.mk=%): \$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj)" >> $@ + @echo "$(@:%.mk=%): \$$($@_VAR_OBJS:%.c=$(OBJ_DIR)/%.obj)" >> $@ + @echo -e "\t\$$(LD) \$$(LDSHFLAGS) /out:\"\$$@\" \c" >> $@ + @echo -e "\$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj) \c" >> $@ + @echo -e "\$$($@_VAR_OBJS) \c" >> $@ + @echo -e "\$$(LDSHLIBS:%=%) \$$(LIBSHDIRS:%=/LIBPATH:%) \c" >> $@ + + +ifneq ($(MAKECMDGOALS), clean) +ifneq ($(strip $(TARSHLIBS)),) +-include $(patsubst %,$(TARGET_DIR)/%.dll.mk,$(TARSHLIBS)) +endif +endif + + +########################################################################## +# Clean rules +# +CLEANDIRS = $(OBJ_DIR) $(TARGET_DIR) + +CLEANFILES = *.obj *.dll *.lib *.sys *.pdb *.idb *.exp *.ilk *.sbr *.mk + +clean: $(CLEANDIRS) + @echo deleting dump files at $(shell pwd) + @rm -f $(CLEANFILES) + @if [ -d $(OBJ_DIR) ] ; then rm -f $(CLEANFILES:%=$(OBJ_DIR)/%); fi + @if [ -d $(TARGET_DIR) ] ; then rm -f $(CLEANFILES:%=$(TARGET_DIR)/%); fi + diff --git a/branches/Ndi/ulp/dapl/dat/udat/Makefile.org b/branches/Ndi/ulp/dapl/dat/udat/Makefile.org new file mode 100644 index 00000000..fdb4c64d --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/Makefile.org @@ -0,0 +1,86 @@ + + # + # Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + # + # This Software is licensed under either one of the following two licenses: + # + # 1) under the terms of the "Common Public License 1.0" a copy of which is + # in the file LICENSE.txt in the root directory. The license is also + # available from the Open Source Initiative, see + # http://www.opensource.org/licenses/cpl.php. + # OR + # + # 2) under the terms of the "The BSD License" a copy of which is in the file + # LICENSE2.txt in the root directory. The license is also available from + # the Open Source Initiative, see + # http://www.opensource.org/licenses/bsd-license.php. + # + # Licensee has the right to choose either one of the above two licenses. + # + # Redistributions of source code must retain both the above copyright + # notice and either one of the license notices. + # + # Redistributions in binary form must reproduce both the above copyright + # notice, either one of the license notices in the documentation + # and/or other materials provided with the distribution. + # + +#********************************************************************* +# +# MODULE: Makefile +# +# PURPOSE: Makefile for DAT registration module +# +#*********************************************************************/ + +IBA_HOME = ../../.. + +UDAT_ROOT = $(shell /bin/pwd) +UDAT_LINUX = $(UDAT_ROOT)/linux +UDAT_COMMON = $(UDAT_ROOT)/../common + +DAT_HEADERS = $(UDAT_ROOT)/../include +DAT_HEADERS_SYSTEM_PATH = /usr/include/dat + +VPATH = $(UDAT_ROOT) $(UDAT_LINUX) $(UDAT_COMMON) + +SO_TARGET = libdat.so.0.0 +SO_NAME = libdat.so +L_TARGET := libdat.a + +DAT_OBJS = udat.o \ + dat_osd.o \ + dat_dictionary.o \ + dat_dr.o \ + dat_init.o \ + dat_sr.o \ + udat_sr_parser.o \ + dat_strerror.o + +S_OBJS = $(DAT_OBJS) +L_OBJS = $(S_OBJS) + +EXTRA_CFLAGS = -O $(CPPFLAGS) +EXTRA_CFLAGS += -g +EXTRA_CFLAGS += -I. +EXTRA_CFLAGS += -I.. +EXTRA_CFLAGS += -I../.. +EXTRA_CFLAGS += -I../common +EXTRA_CFLAGS += -I./linux +EXTRA_CFLAGS += -I$(DAT_HEADERS) +EXTRA_CFLAGS += -Wall +EXTRA_CFLAGS += -Wstrict-prototypes +EXTRA_CFLAGS += -Wmissing-prototypes +EXTRA_CFLAGS += -Wmissing-declarations +EXTRA_CFLAGS += -Werror +ifdef GPROF +EXTRA_CFLAGS += -pg +endif + +EXTRA_LDFLAGS = -init dat_init +EXTRA_LDFLAGS += -fini dat_fini +EXTRA_LDFLAGS += -ldl +EXTRA_LDFLAGS += -lpthread + +include $(IBA_HOME)/Makefile.config +include $(IBA_HOME)/Makefile.rules diff --git a/branches/Ndi/ulp/dapl/dat/udat/Makefile.orig b/branches/Ndi/ulp/dapl/dat/udat/Makefile.orig new file mode 100644 index 00000000..2e2f4f9c --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/Makefile.orig @@ -0,0 +1,114 @@ +# +# Copyright (c) 2002, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under the terms of the "IBM Common Public +# License 1.0" a copy of which is in the file LICENSE.txt in the +# root directory. The license is also available from the Open Source +# Initiative, see http://www.opensource.org/licenses/ibmpl.html. +# +# + +#********************************************************************* +# +# MODULE: Makefile +# +# PURPOSE: Makefile for DAT registration module +# +# $Id$ +#*********************************************************************/ + +UDAT_ROOT = $(shell /bin/pwd) +UDAT_LINUX = $(UDAT_ROOT)/linux +UDAT_COMMON = $(UDAT_ROOT)/../common + +VPATH = $(UDAT_ROOT) $(UDAT_LINUX) $(UDAT_COMMON) + +OBJ_PATH = $(UDAT_ROOT)/Obj +TARGET_PATH = $(UDAT_ROOT)/Target + +STATIC = $(TARGET_PATH)/libdat.a +DYNAMIC = $(TARGET_PATH)/libdat.so + +OBJS = $(OBJ_PATH)/udat.o \ + $(OBJ_PATH)/dat_osd.o \ + $(OBJ_PATH)/dat_osd_sr.o \ + $(OBJ_PATH)/dat_dictionary.o \ + $(OBJ_PATH)/dat_dr.o \ + $(OBJ_PATH)/dat_init.o \ + $(OBJ_PATH)/dat_sr.o + +# +# CC definitions +# + +CC = gcc + +CFLAGS = -O $(CPPFLAGS) +CFLAGS += -g +CFLAGS += -I. +CFLAGS += -I.. +CFLAGS += -I../.. +CFLAGS += -I../include +CFLAGS += -I./linux +CFLAGS += -I../common +CFLAGS += -Wall +CFLAGS += -Wstrict-prototypes +CFLAGS += -Wmissing-prototypes +CFLAGS += -Wmissing-declarations +CFLAGS += -Werror +ifdef GPROF +CFLAGS += -pg +endif + +# +# LD definitions +# + +LD = ld + +LDFLAGS = -shared +LDFLAGS += -ldl +LDFLAGS += -init dat_init +LDFLAGS += -fini dat_fini + + +# +# AR definitions +# + +AR = ar + +ARFLAGS = r + + +# +# Rules +# + +all: mkdirs $(DYNAMIC) $(STATIC) + +mkdirs: + @[ -d $(TARGET_PATH) ] || /bin/mkdir -p $(TARGET_PATH) + @[ -d $(OBJ_PATH) ] || /bin/mkdir -p $(OBJ_PATH) + +$(OBJ_PATH)/%.o : %.c + @echo Compiling $< + $(CC) $(CFLAGS) -c $< -o $@ + +$(DYNAMIC): $(OBJS) + @echo Linking $(DYNAMIC) + $(LD) $(LDFLAGS) $^ -o $(DYNAMIC) + +$(STATIC): $(OBJS) + @echo Archiving $(STATIC) + $(AR) $(ARFLAGS) $(STATIC) $^ + +clean: + rm -f $(OBJ_PATH)/*.o + rm -f $(DYNAMIC) + rm -f $(STATIC) + +tidy: + rm -f $(UDAT_ROOT)/*~ + rm -f $(UDAT_LINUX)/*~ + rm -f $(UDAT_COMMON)/*~ diff --git a/branches/Ndi/ulp/dapl/dat/udat/SOURCES b/branches/Ndi/ulp/dapl/dat/udat/SOURCES new file mode 100644 index 00000000..5a016c40 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/SOURCES @@ -0,0 +1,25 @@ +!if $(FREEBUILD) +TARGETNAME=dat +!else +TARGETNAME=datd +!endif +TARGETPATH=..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=DYNLINK +DLLENTRY=_DllMainCRTStartup +DLLDEF=$O\udat_exports.def +USE_CRTDLL=1 + +SOURCES=\ + udat.c \ + udat_sr_parser.c \ + udat_sources.c + +INCLUDES=windows;..\common;..\include; +RCOPTIONS=/I..\..\..\..\inc; + +USER_C_FLAGS=$(USER_C_FLAGS) -DEXPORT_DAT_SYMBOLS + +TARGETLIBS= \ + $(SDK_LIB_PATH)\kernel32.lib + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/ulp/dapl/dat/udat/dat.conf b/branches/Ndi/ulp/dapl/dat/udat/dat.conf new file mode 100644 index 00000000..3eaa4782 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/dat.conf @@ -0,0 +1,7 @@ +# DAT configuration file +##################################### + +HcaPort1 u1.1 threadsafe default dapl.dll ri.1.1 "IbalHca0 1" " " +HcaPort2 u1.1 threadsafe default dapl.dll ri.1.1 "IbalHca0 2" " " +# HcaPort1d u1.1 threadsafe default dapld.dll ri.1.1 "IbalHca0 1" " " +# HcaPort2d u1.1 threadsafe default dapld.dll ri.1.1 "IbalHca0 2" " " diff --git a/branches/Ndi/ulp/dapl/dat/udat/ibhosts b/branches/Ndi/ulp/dapl/dat/udat/ibhosts new file mode 100644 index 00000000..1463f9d8 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/ibhosts @@ -0,0 +1,7 @@ +# DAPL IB hosts GID configuration +#hostname Network Port GUID +#-------- ------------------ ------------------ +#endrin_ib0 0xfe80000000000000 0x00066a00a0000405 +#rockaway_ib0 0xfe80000000000000 0x00066a00a0000117 +#gack_ib0 0xfe80000000000000 0x00066a00a00001f2 +#gack_ib1 0xfe80000000000000 0x00066a01a00001f2 diff --git a/branches/Ndi/ulp/dapl/dat/udat/linux/dat-1.1.spec b/branches/Ndi/ulp/dapl/dat/udat/linux/dat-1.1.spec new file mode 100644 index 00000000..1f0c0122 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/linux/dat-1.1.spec @@ -0,0 +1,80 @@ +# Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under either one of the following two licenses: +# +# 1) under the terms of the "Common Public License 1.0" a copy of which is +# in the file LICENSE.txt in the root directory. The license is also +# available from the Open Source Initiative, see +# http://www.opensource.org/licenses/cpl.php. +# OR +# +# 2) under the terms of the "The BSD License" a copy of which is in the file +# LICENSE2.txt in the root directory. The license is also available from +# the Open Source Initiative, see +# http://www.opensource.org/licenses/bsd-license.php. +# +# Licensee has the right to choose either one of the above two licenses. +# +# Redistributions of source code must retain both the above copyright +# notice and either one of the license notices. +# +# Redistributions in binary form must reproduce both the above copyright +# notice, either one of the license notices in the documentation +# and/or other materials provided with the distribution. +# +# +# DAT Registry RPM SPEC file +# + +%define make_dir udat + +# +# Preamble +# + +Summary: DAT Registry +Name: dat +Version: 1.1 +Release: 0 +Vendor: Dat Collaborative +Exclusiveos: Linux +Exclusivearch: i386 +License: BSD and CPL +Group: System/Libraries +Source: %{name}-%{version}.tgz +URL: http://www.datcollaborative.org + +%description +This package contains the DAT Registry. + +# +# Preparation +# + +%prep +%setup -n dat + +# +# Build +# + +%build +cd %{make_dir} +make + +# +# Install +# + +%install +cd %{make_dir} +make install + +# +# Files +# + +%files +/usr/include/dat +/usr/lib/libdat.so +/usr/lib/libdat.a diff --git a/branches/Ndi/ulp/dapl/dat/udat/linux/dat_osd.c b/branches/Ndi/ulp/dapl/dat/udat/linux/dat_osd.c new file mode 100644 index 00000000..3101eea6 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/linux/dat_osd.c @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_osd.c + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent functions with a canonical DAPL + * interface. Designed to be portable and hide OS specific quirks + * of common functions. + * + * $Id$ + **********************************************************************/ + +#include "dat_osd.h" + + +/********************************************************************* + * * + * Constants * + * * + *********************************************************************/ + +#define DAT_DBG_TYPE_ENV "DAT_DBG_TYPE" +#define DAT_DBG_DEST_ENV "DAT_DBG_DEST" + + +/********************************************************************* + * * + * Enumerations * + * * + *********************************************************************/ + +typedef int DAT_OS_DBG_DEST; + +typedef enum +{ + DAT_OS_DBG_DEST_STDOUT = 0x1, + DAT_OS_DBG_DEST_SYSLOG = 0x2, + DAT_OS_DBG_DEST_ALL = 0x3 +} DAT_OS_DBG_DEST_TYPE; + + +/********************************************************************* + * * + * Global Variables * + * * + *********************************************************************/ + +static DAT_OS_DBG_TYPE_VAL g_dbg_type = 0; +static DAT_OS_DBG_DEST g_dbg_dest = DAT_OS_DBG_DEST_STDOUT; + + +/*********************************************************************** + * Function: dat_os_dbg_init + ***********************************************************************/ + +void +dat_os_dbg_init( void ) +{ + char *dbg_type; + char *dbg_dest; + + if ( NULL != (dbg_type = dat_os_getenv (DAT_DBG_TYPE_ENV)) ) + { + g_dbg_type = dat_os_strtol(dbg_type, NULL, 0); + } + + if ( NULL != (dbg_dest = dat_os_getenv (DAT_DBG_DEST_ENV)) ) + { + g_dbg_dest = dat_os_strtol(dbg_dest, NULL, 0); + } +} + + +/*********************************************************************** + * Function: dat_os_dbg_print + ***********************************************************************/ + +void +dat_os_dbg_print ( + DAT_OS_DBG_TYPE_VAL type, + const char * fmt, + ...) +{ + if ( (DAT_OS_DBG_TYPE_ERROR == type) || (type & g_dbg_type) ) + { + va_list args; + + va_start(args, fmt); + + if ( DAT_OS_DBG_DEST_STDOUT & g_dbg_dest ) + { + vfprintf(stderr, fmt, args); + fflush(stderr); + } + + if ( DAT_OS_DBG_DEST_SYSLOG & g_dbg_dest ) + { + vsyslog(LOG_USER | LOG_DEBUG, fmt, args); + } + + va_end(args); + } +} + + +/*********************************************************************** + * Function: dat_os_library_load + ***********************************************************************/ + +DAT_RETURN +dat_os_library_load ( + const char *library_path, + DAT_OS_LIBRARY_HANDLE *library_handle_ptr) +{ + DAT_OS_LIBRARY_HANDLE library_handle; + + if ( NULL != (library_handle = dlopen(library_path, RTLD_NOW)) ) + { + if ( NULL != library_handle_ptr ) + { + *library_handle_ptr = library_handle; + } + + return DAT_SUCCESS; + } + else + { + dat_os_dbg_print(DAT_OS_DBG_TYPE_ERROR, + "DAT: library load failure: %s\n", + dlerror()); + return DAT_INTERNAL_ERROR; + } +} + + +/*********************************************************************** + * Function: dat_os_library_unload + ***********************************************************************/ + +DAT_RETURN +dat_os_library_unload ( + const DAT_OS_LIBRARY_HANDLE library_handle) +{ + if ( 0 != dlclose(library_handle) ) + { + return DAT_INTERNAL_ERROR; + } + else + { + return DAT_SUCCESS; + } +} diff --git a/branches/Ndi/ulp/dapl/dat/udat/linux/dat_osd.h b/branches/Ndi/ulp/dapl/dat/udat/linux/dat_osd.h new file mode 100644 index 00000000..d7f5e312 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/linux/dat_osd.h @@ -0,0 +1,394 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_osd.h + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent data structures & functions with + * a canonical DAT interface. Designed to be portable + * and hide OS specific quirks of common functions. + * + * $Id$ + **********************************************************************/ + +#ifndef _DAT_OSD_H_ +#define _DAT_OSD_H_ + +/* + * This file is defined for Linux systems only, including it on any + * other build will cause an error + */ +#ifndef __linux__ +#error "UNDEFINED OS TYPE" +#endif /* __linux__ */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef STATIC +#define STATIC static +#endif /* STATIC */ + +#ifndef INLINE +#define INLINE __inline__ +#endif /* INLINE */ + + +/********************************************************************* + * * + * Debuging * + * * + *********************************************************************/ + +#define dat_os_assert(expr) assert(expr) + +typedef int DAT_OS_DBG_TYPE_VAL; + +typedef enum +{ + DAT_OS_DBG_TYPE_ERROR = 0x1, + DAT_OS_DBG_TYPE_GENERIC = 0x2, + DAT_OS_DBG_TYPE_SR = 0x4, + DAT_OS_DBG_TYPE_DR = 0x8, + DAT_OS_DBG_TYPE_PROVIDER_API = 0x10, + DAT_OS_DBG_TYPE_CONSUMER_API = 0x20, + DAT_OS_DBG_TYPE_ALL = 0xff +} DAT_OS_DBG_TYPE; + +extern void +dat_os_dbg_init ( void ); + +extern void +dat_os_dbg_print ( + DAT_OS_DBG_TYPE_VAL type, + const char * fmt, + ...); + + +/********************************************************************* + * * + * Utility Functions * + * * + *********************************************************************/ + +#define DAT_ERROR(Type,SubType) ((DAT_RETURN)(DAT_CLASS_ERROR | Type | SubType)) + +typedef size_t DAT_OS_SIZE; +typedef void * DAT_OS_LIBRARY_HANDLE; + +extern DAT_RETURN +dat_os_library_load ( + const char *library_path, + DAT_OS_LIBRARY_HANDLE *library_handle_ptr ); + +STATIC INLINE void * +dat_os_library_sym ( + DAT_OS_LIBRARY_HANDLE library_handle, + char *sym) +{ + return dlsym(library_handle, sym); +} + +extern DAT_RETURN +dat_os_library_unload ( + const DAT_OS_LIBRARY_HANDLE library_handle ); + +STATIC INLINE char * +dat_os_getenv ( + const char *name) +{ + return getenv(name); +} + +STATIC INLINE long int +dat_os_strtol ( + const char *nptr, + char **endptr, + int base) +{ + return strtol(nptr, endptr, base); +} + +STATIC INLINE DAT_OS_SIZE +dat_os_strlen ( + const char *s ) +{ + return strlen(s); +} + +STATIC INLINE int +dat_os_strncmp ( + const char *s1, + const char *s2, + DAT_OS_SIZE n) +{ + return strncmp(s1, s2, n); +} + +STATIC INLINE void * +dat_os_strncpy ( + char *dest, + const char *src, + DAT_OS_SIZE len) +{ + return strncpy (dest, src, len); +} + +STATIC INLINE DAT_BOOLEAN +dat_os_isblank( + int c) +{ + if ( (' ' == c) || ('\t' == c) ) + { + return DAT_TRUE; + } + else + { + return DAT_FALSE; + } +} + +STATIC INLINE DAT_BOOLEAN +dat_os_isdigit( + int c) +{ + if ( isdigit(c) ) + { + return DAT_TRUE; + } + else + { + return DAT_FALSE; + } +} + +STATIC INLINE void +dat_os_usleep( + unsigned long usec) +{ + usleep(usec); +} + + +/********************************************************************* + * * + * Memory Functions * + * * + *********************************************************************/ + +STATIC INLINE void * +dat_os_alloc ( + int size) +{ + return malloc (size); +} + +STATIC INLINE void +dat_os_free ( + void *ptr, + int size) +{ + free (ptr); +} + +STATIC INLINE void * +dat_os_memset (void *loc, int c, DAT_OS_SIZE size) +{ + return memset (loc, c, size); +} + + +/********************************************************************* + * * + * File I/O * + * * + *********************************************************************/ + +typedef FILE DAT_OS_FILE; +typedef fpos_t DAT_OS_FILE_POS; + + +STATIC INLINE DAT_OS_FILE * +dat_os_fopen ( + const char * path) +{ + /* always open files in read only mode*/ + return fopen(path, "r"); +} + +STATIC INLINE DAT_RETURN +dat_os_fgetpos ( + DAT_OS_FILE *file, + DAT_OS_FILE_POS *pos) +{ + if ( 0 == fgetpos(file, pos) ) + { + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + +STATIC INLINE DAT_RETURN +dat_os_fsetpos ( + DAT_OS_FILE *file, + DAT_OS_FILE_POS *pos) +{ + if ( 0 == fsetpos(file, pos) ) + { + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + +/* dat_os_fgetc() returns EOF on error or end of file. */ +STATIC INLINE int +dat_os_fgetc ( + DAT_OS_FILE *file) +{ + return fgetc(file); +} + +/* dat_os_fgetc() returns EOF on error or end of file. */ +STATIC INLINE int +dat_os_fputc ( + DAT_OS_FILE *file, int c) +{ + return fputc(c, file); +} + +/* dat_os_fread returns the number of bytes read from the file. */ +STATIC INLINE DAT_OS_SIZE +dat_os_fread ( + DAT_OS_FILE *file, + char *buf, + DAT_OS_SIZE len) +{ + return fread(buf, sizeof(char), len, file); +} + +STATIC INLINE DAT_RETURN +dat_os_fclose ( + DAT_OS_FILE *file) +{ + if ( 0 == fclose(file) ) { return DAT_SUCCESS; } + else { return DAT_INTERNAL_ERROR; } +} + + +/********************************************************************* + * * + * Locks * + * * + *********************************************************************/ + +typedef pthread_mutex_t DAT_OS_LOCK; + + +/* lock functions */ +STATIC INLINE DAT_RETURN +dat_os_lock_init ( + IN DAT_OS_LOCK *m) +{ + /* pthread_mutex_init always returns 0 */ + pthread_mutex_init(m, NULL); + + return DAT_SUCCESS; +} + +STATIC INLINE DAT_RETURN +dat_os_lock ( + IN DAT_OS_LOCK *m) +{ + if (0 == pthread_mutex_lock(m)) + { + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + +STATIC INLINE DAT_RETURN +dat_os_unlock ( + IN DAT_OS_LOCK *m) +{ + if (0 == pthread_mutex_unlock(m)) + { + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + +STATIC INLINE DAT_RETURN +dat_os_lock_destroy ( + IN DAT_OS_LOCK *m) +{ + if (0 == pthread_mutex_destroy(m)) + { + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + + +#endif /* _DAT_OSD_H_ */ + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ + diff --git a/branches/Ndi/ulp/dapl/dat/udat/makefile b/branches/Ndi/ulp/dapl/dat/udat/makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/ulp/dapl/dat/udat/makefile.wnd b/branches/Ndi/ulp/dapl/dat/udat/makefile.wnd new file mode 100644 index 00000000..0b4e4f9e --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/makefile.wnd @@ -0,0 +1,144 @@ +# +# Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under either one of the following two licenses: +# +# 1) under the terms of the "Common Public License 1.0" a copy of which is +# in the file LICENSE.txt in the root directory. The license is also +# available from the Open Source Initiative, see +# http://www.opensource.org/licenses/cpl.php. +# OR +# +# 2) under the terms of the "The BSD License" a copy of which is in the file +# LICENSE2.txt in the root directory. The license is also available from +# the Open Source Initiative, see +# http://www.opensource.org/licenses/bsd-license.php. +# +# Licensee has the right to choose either one of the above two licenses. +# +# Redistributions of source code must retain both the above copyright +# notice and either one of the license notices. +# +# Redistributions in binary form must reproduce both the above copyright +# notice, either one of the license notices in the documentation +# and/or other materials provided with the distribution. +# + +#********************************************************************* +# +# MODULE: Makefile +# +# PURPOSE: Makefile for DAT registration module +# +# $Id$ +#*********************************************************************/ + +#********************************************************************* +# +# Dot Directives +# +#*********************************************************************/ + +.SUFFIXES : # clear the .SUFFIXES list +.SUFFIXES : .c # initialize .SUFFIXES list + + +#********************************************************************* +# +# Macros +# +#*********************************************************************/ + +UDAT_ROOT = . +UDAT_COMMON = $(UDAT_ROOT)/../common +UDAT_WINDOWS = $(UDAT_ROOT)/windows + +OBJ_PATH = $(UDAT_ROOT)/Obj +TARGET_PATH = $(UDAT_ROOT)/Target + +OBJS = \ + $(OBJ_PATH)/udat.obj \ + $(OBJ_PATH)/dat_osd.obj \ + $(OBJ_PATH)/dat_dictionary.obj \ + $(OBJ_PATH)/dat_dr.obj \ + $(OBJ_PATH)/dat_init.obj \ + $(OBJ_PATH)/dat_sr.obj \ + $(OBJ_PATH)/udat_sr_parser.obj \ + $(OBJ_PATH)/dat_strerror.obj + +LIBRARY = $(TARGET_PATH)/dat.dll + +# +# Compiler +# + +CC = cl + +INC_FLAGS = \ + /I . \ + /I ../include \ + /I $(UDAT_COMMON) \ + /I $(UDAT_WINDOWS) \ + /I ../../../shared/include \ + /I ../../../winuser/include + +CC_FLAGS = \ + /nologo /Zel /Zp1 /Gy /W3 /Gd /QIfdiv- /QIf /QI0f /GB /Gi- /Gm- /GX- \ + /GR- /GF -Z7 /Od /Oi /Oy- /DWIN32 /D_X86_ -D__i386__ $(INC_FLAGS) + + +# +# Linker +# + +LINK = link + +LIBS = libc.lib kernel32.lib + +LINK_FLAGS = \ + /nologo /dll /DEF:$(UDAT_WINDOWS)/dat_win.def \ + /DEBUG /incremental:no /machine:I386 $(LIBS) + +# +# System Utilities +# + +RM = rm -f + + +#********************************************************************* +# +# Inference Rules +# +#*********************************************************************/ + +{$(UDAT_ROOT)}.c{$(OBJ_PATH)}.obj: + $(CC) $(CC_FLAGS) /Fo$@ /c $< + +{$(UDAT_COMMON)}.c{$(OBJ_PATH)}.obj: + $(CC) $(CC_FLAGS) /Fo$@ /c $< + +{$(UDAT_WINDOWS)}.c{$(OBJ_PATH)}.obj: + $(CC) $(CC_FLAGS) /Fo$@ /c $< + + +#********************************************************************* +# +# Description Blocks +# +#*********************************************************************/ + +all : mkdirs $(LIBRARY) + +mkdirs : + if not exist "$(OBJ_PATH)" mkdir "$(OBJ_PATH)" + if not exist "$(TARGET_PATH)" mkdir "$(TARGET_PATH)" + +$(LIBRARY) : $(OBJS) + $(LINK) $(LINK_FLAGS) /out:$(LIBRARY) $(OBJS) + +clean : + $(RM) $(OBJS) + $(RM) $(LIBRARY) + $(RM) $(TARGET_PATH)/*.pdb $(TARGET_PATH)/*.exp $(TARGET_PATH)/*.lib + diff --git a/branches/Ndi/ulp/dapl/dat/udat/udat.c b/branches/Ndi/ulp/dapl/dat/udat/udat.c new file mode 100644 index 00000000..99678152 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/udat.c @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: udat.c + * + * PURPOSE: DAT Provider and Consumer registry functions. + * + * $Id$ + **********************************************************************/ + +#include +#include /* Provider API function prototypes */ + +#include "dat_dr.h" +#include "dat_init.h" +#include "dat_osd.h" +#ifndef DAT_NO_STATIC_REGISTRY +#include "dat_sr.h" +#endif + + +#define UDAT_IS_BAD_POINTER(p) ( NULL == (p) ) + +/********************************************************************* + * * + * Internal Function Declarations * + * * + *********************************************************************/ + +DAT_BOOLEAN +udat_check_state ( void ); + + +/********************************************************************* + * * + * External Function Definitions * + * * + *********************************************************************/ + + +/* + * + * Provider API + * + */ + + +/*********************************************************************** + * Function: dat_registry_add_provider + ***********************************************************************/ + +DAT_RETURN DAT_API +dat_registry_add_provider ( + IN DAT_PROVIDER *provider, + IN const DAT_PROVIDER_INFO *provider_info ) +{ + DAT_DR_ENTRY entry; + + dat_os_dbg_print (DAT_OS_DBG_TYPE_PROVIDER_API, + "DAT Registry: dat_registry_add_provider () called\n"); + + if ( UDAT_IS_BAD_POINTER (provider) ) + { + return DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); + } + + if ( UDAT_IS_BAD_POINTER (provider_info) ) + { + return DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + } + + if ( DAT_FALSE == udat_check_state () ) + { + return DAT_ERROR (DAT_INVALID_STATE, 0); + } + + entry.ref_count = 0; + entry.ia_open_func = provider->ia_open_func; + entry.info = *provider_info; + + return dat_dr_insert (provider_info, &entry); +} + + +//*********************************************************************** +// Function: dat_registry_remove_provider +//*********************************************************************** + +DAT_RETURN DAT_API +dat_registry_remove_provider ( + IN DAT_PROVIDER *provider, + IN const DAT_PROVIDER_INFO *provider_info ) +{ + dat_os_dbg_print (DAT_OS_DBG_TYPE_PROVIDER_API, + "DAT Registry: dat_registry_remove_provider () called\n"); + + if ( UDAT_IS_BAD_POINTER (provider) ) + { + return DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); + } + + if ( DAT_FALSE == udat_check_state () ) + { + return DAT_ERROR (DAT_INVALID_STATE, 0); + } + + return dat_dr_remove (provider_info); +} + + +/* + * + * Consumer API + * + */ + +/*********************************************************************** + * Function: dat_ia_open + ***********************************************************************/ + +DAT_RETURN DAT_API +dat_ia_openv ( + IN const DAT_NAME_PTR name, + IN DAT_COUNT async_event_qlen, + INOUT DAT_EVD_HANDLE *async_event_handle, + OUT DAT_IA_HANDLE *ia_handle, + IN DAT_UINT32 dapl_major, + IN DAT_UINT32 dapl_minor, + IN DAT_BOOLEAN thread_safety ) +{ + DAT_IA_OPEN_FUNC ia_open_func; + DAT_PROVIDER_INFO info; + DAT_RETURN status; + DAT_OS_SIZE len; + + dat_os_dbg_print (DAT_OS_DBG_TYPE_CONSUMER_API, + "DAT Registry: dat_ia_open () called\n"); + + if ( UDAT_IS_BAD_POINTER (name) ) + { + return DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); + } + + len = dat_os_strlen(name); + + if ( DAT_NAME_MAX_LENGTH < len ) + { + return DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); + } + + if ( UDAT_IS_BAD_POINTER (ia_handle) ) + { + return DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); + } + + if ( DAT_FALSE == udat_check_state () ) + { + return DAT_ERROR (DAT_INVALID_STATE, 0); + } + + dat_os_strncpy(info.ia_name, name, len); + info.ia_name[len] = '\0'; + + info.dapl_version_major = dapl_major; + info.dapl_version_minor = dapl_minor; + info.is_thread_safe = thread_safety; + + /* + * Since DAT allows providers to be loaded by either the static + * registry or explicitly through OS dependent methods, do not + * return an error if no providers are loaded via the static registry. + * Don't even bother calling the static registry if DAT is compiled + * with no static registry support. + */ + +#ifndef DAT_NO_STATIC_REGISTRY + (void) dat_sr_provider_open ( &info ); +#endif + + status = dat_dr_provider_open ( &info, &ia_open_func); + if ( status != DAT_SUCCESS ) + { + dat_os_dbg_print (DAT_OS_DBG_TYPE_CONSUMER_API, + "DAT Registry: dat_ia_open () provider information " + "for IA name %s not found in dynamic registry\n", + info.ia_name); + + return status; + } + + return (*ia_open_func) (name, + async_event_qlen, + async_event_handle, + ia_handle); +} + + +/************************************************************************ + * Function: dat_ia_close + ***********************************************************************/ + +DAT_RETURN DAT_API +dat_ia_close ( + IN DAT_IA_HANDLE ia_handle, + IN DAT_CLOSE_FLAGS ia_flags) +{ + DAT_PROVIDER *provider; + DAT_PROVIDER_ATTR provider_attr; + DAT_RETURN status; + const char *ia_name; + + dat_os_dbg_print (DAT_OS_DBG_TYPE_CONSUMER_API, + "DAT Registry: dat_ia_close () called\n"); + + if ( UDAT_IS_BAD_POINTER (ia_handle) ) + { + return DAT_ERROR (DAT_INVALID_HANDLE, DAT_INVALID_HANDLE_IA); + } + + if ( DAT_FALSE == udat_check_state () ) + { + return DAT_ERROR (DAT_INVALID_STATE, 0); + } + + provider = DAT_HANDLE_TO_PROVIDER(ia_handle); + + if ( !provider ) + return DAT_INVALID_HANDLE; + + ia_name = provider->device_name; + + if ( DAT_SUCCESS != (status = dat_ia_query (ia_handle, + NULL, + 0, + NULL, + DAT_PROVIDER_FIELD_ALL, + &provider_attr)) ) + { + dat_os_dbg_print (DAT_OS_DBG_TYPE_CONSUMER_API, + "DAT Registry: query function for %s provider failed\n", + ia_name); + } + else if ( DAT_SUCCESS != (status = + (*provider->ia_close_func)(ia_handle, ia_flags)) ) + { + dat_os_dbg_print (DAT_OS_DBG_TYPE_CONSUMER_API, + "DAT Registry: close function for %s provider failed\n", + ia_name); + } + else + { + DAT_PROVIDER_INFO info; + DAT_OS_SIZE len; + + len = dat_os_strlen(ia_name); + + dat_os_assert( len <= DAT_NAME_MAX_LENGTH ); + + dat_os_strncpy(info.ia_name, ia_name, len); + info.ia_name[len] = '\0'; + + info.dapl_version_major = provider_attr.dapl_version_major; + info.dapl_version_minor = provider_attr.dapl_version_minor; + info.is_thread_safe = provider_attr.is_thread_safe; + + status = dat_dr_provider_close ( &info ); + if ( DAT_SUCCESS != status ) + { + dat_os_dbg_print (DAT_OS_DBG_TYPE_CONSUMER_API, + "DAT Registry: dynamic registry unable to close " + "provider for IA name %s\n", + ia_name); + } + +#ifndef DAT_NO_STATIC_REGISTRY + status = dat_sr_provider_close ( &info ); + if ( DAT_SUCCESS != status ) + { + dat_os_dbg_print (DAT_OS_DBG_TYPE_CONSUMER_API, + "DAT Registry: static registry unable to close " + "provider for IA name %s\n", + ia_name); + } +#endif + } + + return status; +} + + +//*********************************************************************** +// Function: dat_registry_list_providers +//*********************************************************************** + +DAT_RETURN DAT_API +dat_registry_list_providers ( + IN DAT_COUNT max_to_return, + OUT DAT_COUNT *entries_returned, + OUT DAT_PROVIDER_INFO * (dat_provider_list[])) +{ + DAT_RETURN dat_status; + + dat_status = DAT_SUCCESS; + dat_os_dbg_print (DAT_OS_DBG_TYPE_CONSUMER_API, + "DAT Registry: dat_registry_list_providers () called\n"); + + if ( DAT_FALSE == udat_check_state () ) + { + return DAT_ERROR (DAT_INVALID_STATE, 0); + } + + if ( ( UDAT_IS_BAD_POINTER (entries_returned) ) ) + { + return DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG2); + } + + if (0 != max_to_return && ( UDAT_IS_BAD_POINTER (dat_provider_list) ) ) + { + return DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG3); + } + + if ( 0 == max_to_return ) + { + /* the user is allowed to call with max_to_return set to zero. + * in which case we simply return (in *entries_returned) the + * number of providers currently installed. We must also + * (per spec) return an error + */ +#ifndef DAT_NO_STATIC_REGISTRY + (void) dat_sr_size ( entries_returned ); +#else + (void) dat_dr_size ( entries_returned ); +#endif + return DAT_ERROR (DAT_INVALID_PARAMETER, DAT_INVALID_ARG1); + } + else + { +#ifndef DAT_NO_STATIC_REGISTRY + dat_status = dat_sr_list (max_to_return, + entries_returned, + dat_provider_list); +#else + dat_status = dat_dr_list (max_to_return, + entries_returned, + dat_provider_list); +#endif + } + return dat_status; +} + + +/********************************************************************* + * * + * Internal Function Definitions * + * * + *********************************************************************/ + + +//*********************************************************************** +// Function: udat_check_state +//*********************************************************************** + +/* + * This function returns TRUE if the DAT registry is in a state capable + * of handling DAT API calls and false otherwise. + */ + +DAT_BOOLEAN +udat_check_state ( void ) +{ + DAT_MODULE_STATE state; + DAT_BOOLEAN status = DAT_FALSE; + + state = dat_module_get_state (); + + if ( DAT_MODULE_STATE_UNINITIALIZED == state ) + { + dat_init (); + state = dat_module_get_state (); + } + if ( DAT_MODULE_STATE_INITIALIZED == state ) + { + status = DAT_TRUE; + } + return status; +} + + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ diff --git a/branches/Ndi/ulp/dapl/dat/udat/udat_exports.src b/branches/Ndi/ulp/dapl/dat/udat/udat_exports.src new file mode 100644 index 00000000..1ee36f72 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/udat_exports.src @@ -0,0 +1,15 @@ +#if DBG +LIBRARY datd.dll +#else +LIBRARY dat.dll +#endif + +#ifndef _WIN64 +EXPORTS +dat_ia_openv +dat_ia_close +dat_registry_list_providers +dat_strerror +dat_registry_add_provider +dat_registry_remove_provider +#endif diff --git a/branches/Ndi/ulp/dapl/dat/udat/udat_sources.c b/branches/Ndi/ulp/dapl/dat/udat/udat_sources.c new file mode 100644 index 00000000..7f518625 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/udat_sources.c @@ -0,0 +1,10 @@ +/* + * Include all files that are not in children directories. + */ + +#include "../common/dat_dictionary.c" +#include "../common/dat_dr.c" +#include "../common/dat_init.c" +#include "../common/dat_sr.c" +#include "../common/dat_strerror.c" +#include "windows/dat_osd.c" diff --git a/branches/Ndi/ulp/dapl/dat/udat/udat_sr_parser.c b/branches/Ndi/ulp/dapl/dat/udat/udat_sr_parser.c new file mode 100644 index 00000000..092546d3 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/udat_sr_parser.c @@ -0,0 +1,1551 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_sr_parser.c + * + * PURPOSE: static registry parser + * + * $Id$ + **********************************************************************/ + + +#include "udat_sr_parser.h" +#include "dat_sr.h" + + +/********************************************************************* + * * + * Constants * + * * + *********************************************************************/ + +#define DAT_SR_CONF_ENV "DAT_OVERRIDE" +#define DAT_SR_SYSTEM_DRIVE "SystemDrive" +#define DAT_SR_DRIVE_DEFAULT "C:/" +#define DAT_SR_CONF_DEFAULT "/Dat/dat.conf" +#define DAT_SR_TOKEN_THREADSAFE "threadsafe" +#define DAT_SR_TOKEN_NONTHREADSAFE "nonthreadsafe" +#define DAT_SR_TOKEN_DEFAULT "default" +#define DAT_SR_TOKEN_NONDEFAULT "nondefault" + +#define DAT_SR_CHAR_NEWLINE '\n' +#define DAT_SR_CHAR_COMMENT '#' +#define DAT_SR_CHAR_QUOTE '"' +#define DAT_SR_CHAR_BACKSLASH '\\' + + +/********************************************************************* + * * + * Enumerations * + * * + *********************************************************************/ + +typedef enum +{ + DAT_SR_TOKEN_STRING, /* text field (both quoted or unquoted) */ + DAT_SR_TOKEN_EOR, /* end of record (newline) */ + DAT_SR_TOKEN_EOF /* end of file */ +} DAT_SR_TOKEN_TYPE; + +typedef enum +{ + DAT_SR_API_UDAT, + DAT_SR_API_KDAT +} DAT_SR_API_TYPE; + + +/********************************************************************* + * * + * Structures * + * * + *********************************************************************/ + +typedef struct +{ + DAT_SR_TOKEN_TYPE type; + char * value; /* valid if type is DAT_SR_TOKEN_STRING */ + DAT_OS_SIZE value_len; +} DAT_SR_TOKEN; + +typedef struct DAT_SR_STACK_NODE +{ + DAT_SR_TOKEN token; + struct DAT_SR_STACK_NODE *next; +} DAT_SR_STACK_NODE; + +typedef struct +{ + DAT_UINT32 major; + DAT_UINT32 minor; +} DAT_SR_VERSION; + +typedef struct +{ + char * id; + DAT_SR_VERSION version; +} DAT_SR_PROVIDER_VERSION; + +typedef struct +{ + DAT_SR_API_TYPE type; + DAT_SR_VERSION version; +} DAT_SR_API_VERSION; + +typedef struct +{ + char * ia_name; + DAT_SR_API_VERSION api_version; + DAT_BOOLEAN is_thread_safe; + DAT_BOOLEAN is_default; + char * lib_path; + DAT_SR_PROVIDER_VERSION provider_version; + char * ia_params; + char * platform_params; +} DAT_SR_CONF_ENTRY; + + +/********************************************************************* + * * + * Internal Function Declarations * + * * + *********************************************************************/ + +static DAT_RETURN +dat_sr_load_entry ( + DAT_SR_CONF_ENTRY *entry); + +static DAT_BOOLEAN +dat_sr_is_valid_entry ( + DAT_SR_CONF_ENTRY *entry); + +static char * +dat_sr_type_to_str( + DAT_SR_TOKEN_TYPE type); + +static DAT_RETURN +dat_sr_parse_eof( + DAT_OS_FILE *file); + +static DAT_RETURN +dat_sr_parse_entry( + DAT_OS_FILE *file); + +static DAT_RETURN +dat_sr_parse_ia_name( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry); + +static DAT_RETURN +dat_sr_parse_api( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry); + +static DAT_RETURN +dat_sr_parse_thread_safety( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry); + +static DAT_RETURN +dat_sr_parse_default( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry); + +static DAT_RETURN +dat_sr_parse_lib_path( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry); + +static DAT_RETURN +dat_sr_parse_provider_version( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry); + +static DAT_RETURN +dat_sr_parse_ia_params( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry); + +static DAT_RETURN +dat_sr_parse_platform_params( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry); + +static DAT_RETURN +dat_sr_parse_eoe( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry); + +static DAT_RETURN +dat_sr_convert_api( + char *str, + DAT_SR_API_VERSION *api_version); + +static DAT_RETURN +dat_sr_convert_thread_safety( + char *str, + DAT_BOOLEAN *is_thread_safe); + +static DAT_RETURN +dat_sr_convert_default( + char *str, + DAT_BOOLEAN *is_default); + +static DAT_RETURN +dat_sr_convert_provider_version( + char *str, + DAT_SR_PROVIDER_VERSION *provider_version); + +static DAT_RETURN +dat_sr_get_token ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token); + +static DAT_RETURN +dat_sr_put_token ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token); + +static DAT_RETURN +dat_sr_read_token ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token); + +static DAT_RETURN +dat_sr_read_str ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token, + DAT_OS_SIZE token_len); + +static DAT_RETURN +dat_sr_read_quoted_str ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token, + DAT_OS_SIZE token_len, + DAT_COUNT num_escape_seq); + +static void +dat_sr_read_comment( + DAT_OS_FILE *file); + + +/********************************************************************* + * * + * Global Variables * + * * + *********************************************************************/ + +static DAT_SR_STACK_NODE *g_token_stack = NULL; + + +/********************************************************************* + * * + * External Function Definitions * + * * + *********************************************************************/ + +/*********************************************************************** + * Function: dat_sr_load + ***********************************************************************/ + +DAT_RETURN +dat_sr_load (void) +{ + char *sr_path; + DAT_OS_FILE *sr_file; + char env_path[256]; + + dat_os_memset(env_path, 0, sizeof(env_path)); + + sr_path = dat_os_getenv (DAT_SR_CONF_ENV); + if ( sr_path == NULL ) + { + sr_path = dat_os_getenv(DAT_SR_SYSTEM_DRIVE); + if ( sr_path != NULL ) + { + strncat(env_path,sr_path, __min(sizeof(env_path)-1,strlen(sr_path))); + + if ( strlen(env_path) < sizeof(env_path) ) + { + strncat( &env_path[strlen(env_path)],DAT_SR_CONF_DEFAULT, + __min(sizeof(env_path)-strlen(env_path)-1, strlen(DAT_SR_CONF_DEFAULT ))); + } + sr_path = env_path; + } + else + { + strncat(env_path,DAT_SR_DRIVE_DEFAULT,sizeof(DAT_SR_DRIVE_DEFAULT)); + if ( strlen(env_path) < sizeof(env_path) ) + { + strncat(&env_path[strlen(env_path)],DAT_SR_CONF_DEFAULT, + __min( sizeof(env_path)-strlen(env_path)-1, strlen(DAT_SR_CONF_DEFAULT ))); + sr_path = env_path; + } + } + } + + dat_os_dbg_print(DAT_OS_DBG_TYPE_SR, + "DAT Registry: static registry file <%s> \n", sr_path); + + sr_file = dat_os_fopen(sr_path); + if ( sr_file == NULL ) + { + dat_os_dbg_print(DAT_OS_DBG_TYPE_ERROR, + "DAT Registry: Failed to open static registry file <%s> \n", sr_path); + return DAT_INTERNAL_ERROR; + } + + for (;;) + { + if ( DAT_SUCCESS == dat_sr_parse_eof(sr_file) ) + { + break; + } + else if ( DAT_SUCCESS == dat_sr_parse_entry(sr_file) ) + { + continue; + } + else + { + dat_os_assert(!"unable to parse static registry file"); + break; + } + } + + if ( 0 != dat_os_fclose(sr_file) ) + { + return DAT_INTERNAL_ERROR; + } + + return DAT_SUCCESS; +} + + +/********************************************************************* + * * + * Internal Function Definitions * + * * + *********************************************************************/ + +/*********************************************************************** + * Function: dat_sr_is_valid_entry + ***********************************************************************/ + +DAT_BOOLEAN +dat_sr_is_valid_entry ( + DAT_SR_CONF_ENTRY *entry) +{ + if ( ( DAT_SR_API_UDAT == entry->api_version.type ) && + (entry->is_default) ) + { + return DAT_TRUE; + } + else + { + return DAT_FALSE; + } +} + + +/*********************************************************************** + * Function: dat_sr_load_entry + ***********************************************************************/ + +DAT_RETURN +dat_sr_load_entry ( + DAT_SR_CONF_ENTRY *conf_entry) +{ + DAT_SR_ENTRY entry; + + if ( DAT_NAME_MAX_LENGTH < (strlen(conf_entry->ia_name) + 1) ) + { + dat_os_dbg_print(DAT_OS_DBG_TYPE_SR, + "DAT Registry: ia name %s is longer than " + "DAT_NAME_MAX_LENGTH (%i)\n", + conf_entry->ia_name, DAT_NAME_MAX_LENGTH); + + return DAT_INSUFFICIENT_RESOURCES; + } + + dat_os_strncpy(entry.info.ia_name, conf_entry->ia_name, DAT_NAME_MAX_LENGTH); + entry.info.dapl_version_major = conf_entry->api_version.version.major; + entry.info.dapl_version_minor = conf_entry->api_version.version.minor; + entry.info.is_thread_safe = conf_entry->is_thread_safe; + entry.lib_path = conf_entry->lib_path; + entry.ia_params = conf_entry->ia_params; + entry.lib_handle = NULL; + entry.ref_count = 0; + + dat_os_dbg_print(DAT_OS_DBG_TYPE_SR, + "DAT Registry: loading provider for %s\n", + conf_entry->ia_name); + + return dat_sr_insert(&entry.info, &entry); +} + + +/*********************************************************************** + * Function: dat_sr_type_to_str + ***********************************************************************/ + +char * +dat_sr_type_to_str ( + DAT_SR_TOKEN_TYPE type) +{ + static char *str_array[] = { "string", "eor", "eof" }; + + if ( (type < 0) || (2 < type) ) + { + return "error: invalid token type"; + } + + return str_array[type]; +} + + +/*********************************************************************** + * Function: dat_sr_parse_eof + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_eof( + DAT_OS_FILE *file) +{ + DAT_SR_TOKEN token; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( DAT_SR_TOKEN_EOF == token.type ) + { + return DAT_SUCCESS; + } + else + { + dat_sr_put_token(file, &token); + return DAT_INTERNAL_ERROR; + } +} + + +/*********************************************************************** + * Function: dat_sr_parse_ia_name + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_entry( + DAT_OS_FILE *file) +{ + DAT_SR_CONF_ENTRY entry; + DAT_RETURN status; + + dat_os_memset(&entry, 0, sizeof(DAT_SR_CONF_ENTRY)); + + if ( (DAT_SUCCESS == dat_sr_parse_ia_name(file, &entry)) && + (DAT_SUCCESS == dat_sr_parse_api(file, &entry)) && + (DAT_SUCCESS == dat_sr_parse_thread_safety(file, &entry)) && + (DAT_SUCCESS == dat_sr_parse_default(file, &entry)) && + (DAT_SUCCESS == dat_sr_parse_lib_path(file, &entry)) && + (DAT_SUCCESS == dat_sr_parse_provider_version(file, &entry)) && + (DAT_SUCCESS == dat_sr_parse_ia_params(file, &entry)) && + (DAT_SUCCESS == dat_sr_parse_platform_params(file, &entry)) && + (DAT_SUCCESS == dat_sr_parse_eoe(file, &entry)) ) + { + dat_os_dbg_print(DAT_OS_DBG_TYPE_SR, + "\n" + "DAT Registry: entry \n" + " ia_name %s\n" + " api_version\n" + " type 0x%X\n" + " major.minor %d.%d\n" + " is_thread_safe %d\n" + " is_default %d\n" + " lib_path %s\n" + " provider_version\n" + " id %s\n" + " major.minor %d.%d\n" + " ia_params %s\n" + "\n", + entry.ia_name, + entry.api_version.type, + entry.api_version.version.major, + entry.api_version.version.minor, + entry.is_thread_safe, + entry.is_default, + entry.lib_path, + entry.provider_version.id, + entry.provider_version.version.major, + entry.provider_version.version.minor, + entry.ia_params); + + if ( DAT_TRUE == dat_sr_is_valid_entry(&entry) ) + { + /* + * The static registry configuration file may have multiple + * entries with the same IA name. The first entry will be + * installed in the static registry causing subsequent attempts + * to register the same IA name to fail. Therefore the return code + * from dat_sr_load_entry() is ignored. + */ + (void) dat_sr_load_entry(&entry); + } + + status = DAT_SUCCESS; + } + else /* resync */ + { + DAT_SR_TOKEN token; + + /* + * The static registry format is specified in the DAT specification. + * While the registry file's contents may change between revisions of + * the specification, there is no way to determine the specification + * version to which the configuration file conforms. If an entry is + * found that does not match the expected format, the entry is discarded + * and the parsing of the file continues. There is no way to determine if + * the entry was an error or an entry confirming to an alternate version + * of specification. + */ + + for (;;) + { + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + status = DAT_INTERNAL_ERROR; + break; + } + + if ( DAT_SR_TOKEN_STRING != token.type ) + { + status = DAT_SUCCESS; + break; + } + else + { + dat_os_free(token.value, + (sizeof(char) * (int)dat_os_strlen(token.value)) + 1); + continue; + } + } + } + + /* free resources */ + if ( NULL != entry.ia_name ) + { + dat_os_free(entry.ia_name, + sizeof(char) * ((int)dat_os_strlen(entry.ia_name) + 1)); + } + if ( NULL != entry.lib_path ) + { + dat_os_free(entry.lib_path, + sizeof(char) * ((int)dat_os_strlen(entry.lib_path) + 1)); + } + + if ( NULL != entry.provider_version.id ) + { + dat_os_free(entry.provider_version.id, + sizeof(char) * ((int)dat_os_strlen(entry.provider_version.id) + 1)); + } + + if ( NULL != entry.ia_params ) + { + dat_os_free(entry.ia_params, + sizeof(char) * ((int)dat_os_strlen(entry.ia_params) + 1)); + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_parse_ia_name + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_ia_name( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry) +{ + DAT_SR_TOKEN token; + DAT_RETURN status; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( DAT_SR_TOKEN_STRING != token.type ) + { + status = DAT_INTERNAL_ERROR; + } + else + { + entry->ia_name = token.value; + + status = DAT_SUCCESS; + } + + if ( DAT_SUCCESS != status ) + { + DAT_RETURN status_success; + + status_success = dat_sr_put_token(file, &token); + dat_os_assert( DAT_SUCCESS == status_success); + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_parse_ia_name + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_api( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry) +{ + DAT_SR_TOKEN token; + DAT_RETURN status; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( DAT_SR_TOKEN_STRING != token.type ) + { + status = DAT_INTERNAL_ERROR; + } + else if ( DAT_SUCCESS != dat_sr_convert_api( + token.value, &entry->api_version) ) + { + status = DAT_INTERNAL_ERROR; + } + else + { + dat_os_free(token.value, + (sizeof(char) * (int)dat_os_strlen(token.value)) + 1); + + status = DAT_SUCCESS; + } + + if ( DAT_SUCCESS != status ) + { + DAT_RETURN status_success; + + status_success = dat_sr_put_token(file, &token); + dat_os_assert( DAT_SUCCESS == status_success); + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_parse_thread_safety + ***********************************************************************/ + +static DAT_RETURN +dat_sr_parse_thread_safety( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry) +{ + DAT_SR_TOKEN token; + DAT_RETURN status; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( DAT_SR_TOKEN_STRING != token.type ) + { + status = DAT_INTERNAL_ERROR; + } + else if ( DAT_SUCCESS != dat_sr_convert_thread_safety( + token.value, &entry->is_thread_safe) ) + { + status = DAT_INTERNAL_ERROR; + } + else + { + dat_os_free(token.value, + (sizeof(char) * (int)dat_os_strlen(token.value)) + 1); + + status = DAT_SUCCESS; + } + + if ( DAT_SUCCESS != status ) + { + DAT_RETURN status_success; + + status_success = dat_sr_put_token(file, &token); + dat_os_assert( DAT_SUCCESS == status_success); + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_parse_default + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_default( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry) +{ + DAT_SR_TOKEN token; + DAT_RETURN status; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( DAT_SR_TOKEN_STRING != token.type ) + { + status = DAT_INTERNAL_ERROR; + } + else if ( DAT_SUCCESS != dat_sr_convert_default( + token.value, &entry->is_default) ) + { + status = DAT_INTERNAL_ERROR; + } + else + { + dat_os_free(token.value, + (sizeof(char) * (int)dat_os_strlen(token.value)) + 1); + + status = DAT_SUCCESS; + } + + if ( DAT_SUCCESS != status ) + { + DAT_RETURN status_success; + + status_success = dat_sr_put_token(file, &token); + dat_os_assert( DAT_SUCCESS == status_success); + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_parse_lib_path + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_lib_path( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry) +{ + DAT_SR_TOKEN token; + DAT_RETURN status; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( DAT_SR_TOKEN_STRING != token.type ) + { + status = DAT_INTERNAL_ERROR; + } + else + { + entry->lib_path = token.value; + + status = DAT_SUCCESS; + } + + if ( DAT_SUCCESS != status ) + { + DAT_RETURN status_success; + + status_success = dat_sr_put_token(file, &token); + dat_os_assert( DAT_SUCCESS == status_success); + } + + return status; +} + +/*********************************************************************** + * Function: dat_sr_parse_provider_version + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_provider_version( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry) +{ + DAT_SR_TOKEN token; + DAT_RETURN status; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( DAT_SR_TOKEN_STRING != token.type ) + { + status = DAT_INTERNAL_ERROR; + } + else if ( DAT_SUCCESS != dat_sr_convert_provider_version( + token.value, &entry->provider_version) ) + { + status = DAT_INTERNAL_ERROR; + } + else + { + dat_os_free(token.value, + (sizeof(char) * (int)dat_os_strlen(token.value)) + 1); + + status = DAT_SUCCESS; + } + + if ( DAT_SUCCESS != status ) + { + DAT_RETURN status_success; + + status_success = dat_sr_put_token(file, &token); + dat_os_assert( DAT_SUCCESS == status_success); + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_parse_ia_params + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_ia_params( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry) +{ + DAT_SR_TOKEN token; + DAT_RETURN status; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( DAT_SR_TOKEN_STRING != token.type ) + { + status = DAT_INTERNAL_ERROR; + } + else + { + entry->ia_params = token.value; + + status = DAT_SUCCESS; + } + + if ( DAT_SUCCESS != status ) + { + DAT_RETURN status_success; + + status_success = dat_sr_put_token(file, &token); + dat_os_assert( DAT_SUCCESS == status_success); + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_parse_platform_params + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_platform_params( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry) +{ + DAT_SR_TOKEN token; + DAT_RETURN status; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( DAT_SR_TOKEN_STRING != token.type ) + { + status = DAT_INTERNAL_ERROR; + } + else + { + entry->platform_params = token.value; + + status = DAT_SUCCESS; + } + + if ( DAT_SUCCESS != status ) + { + DAT_RETURN status_success; + + status_success = dat_sr_put_token(file, &token); + dat_os_assert( DAT_SUCCESS == status_success); + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_parse_eoe + ***********************************************************************/ + +DAT_RETURN +dat_sr_parse_eoe( + DAT_OS_FILE *file, + DAT_SR_CONF_ENTRY *entry) +{ + DAT_SR_TOKEN token; + DAT_RETURN status; + + if( entry == NULL ) + return DAT_INVALID_PARAMETER; + + if ( DAT_SUCCESS != dat_sr_get_token(file, &token) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( (DAT_SR_TOKEN_EOF != token.type) && + (DAT_SR_TOKEN_EOR != token.type) ) + { + status = DAT_INTERNAL_ERROR; + } + else + { + status = DAT_SUCCESS; + } + + if ( DAT_SUCCESS != status ) + { + DAT_RETURN status_success; + + status_success = dat_sr_put_token(file, &token); + dat_os_assert( DAT_SUCCESS == status_success); + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_convert_api + ***********************************************************************/ + +DAT_RETURN +dat_sr_convert_api ( + char *str, + DAT_SR_API_VERSION *api_version) +{ + int i; + int minor_i; + + dat_os_assert( 0 < dat_os_strlen(str) ); + + if ( 'u' == str[0] ) + { + api_version->type = DAT_SR_API_UDAT; + } + else if ( 'k' == str[0] ) + { + api_version->type = DAT_SR_API_KDAT; + } + else + { + return DAT_INTERNAL_ERROR; + } + + for ( i = 1 /* move past initial [u|k] */; '\0' != str[i]; i++ ) + { + if ( '.' == str[i] ) + { + break; + } + else if ( DAT_TRUE != dat_os_isdigit(str[i]) ) + { + return DAT_INTERNAL_ERROR; + } + } + + api_version->version.major = (DAT_UINT32) dat_os_strtol(str + 1, NULL, 10); + + /* move past '.' */ + minor_i = ++i; + + for ( ; '\0' != str[i]; i++ ) + { + if ( DAT_TRUE != dat_os_isdigit(str[i]) ) + { + return DAT_INTERNAL_ERROR; + } + } + + api_version->version.minor = (DAT_UINT32) dat_os_strtol(str + minor_i, NULL, 10); + + if ( '\0' != str[i] ) + { + return DAT_INTERNAL_ERROR; + } + + return DAT_SUCCESS; +} + + +/*********************************************************************** + * Function: dat_sr_convert_thread_safety + ***********************************************************************/ + +static DAT_RETURN +dat_sr_convert_thread_safety( + char *str, + DAT_BOOLEAN *is_thread_safe) +{ + if ( !dat_os_strncmp(str, + DAT_SR_TOKEN_THREADSAFE, + dat_os_strlen(DAT_SR_TOKEN_THREADSAFE)) ) + { + *is_thread_safe = DAT_TRUE; + return DAT_SUCCESS; + } + else if ( !dat_os_strncmp(str, + DAT_SR_TOKEN_NONTHREADSAFE, + dat_os_strlen(DAT_SR_TOKEN_NONTHREADSAFE)) ) + { + *is_thread_safe = DAT_FALSE; + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + + +/*********************************************************************** + * Function: dat_sr_convert_default + ***********************************************************************/ + +static DAT_RETURN +dat_sr_convert_default ( + char *str, + DAT_BOOLEAN *is_default) +{ + if ( !dat_os_strncmp(str, + DAT_SR_TOKEN_DEFAULT, + dat_os_strlen(DAT_SR_TOKEN_DEFAULT)) ) + { + *is_default = DAT_TRUE; + return DAT_SUCCESS; + } + else if ( !dat_os_strncmp(str, + DAT_SR_TOKEN_NONDEFAULT, + dat_os_strlen(DAT_SR_TOKEN_NONDEFAULT)) ) + { + *is_default = DAT_FALSE; + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + + +/*********************************************************************** + * Function: dat_sr_convert_provider_version + ***********************************************************************/ + +DAT_RETURN +dat_sr_convert_provider_version ( + char *str, + DAT_SR_PROVIDER_VERSION *provider_version) +{ + DAT_RETURN status; + int i; + int decimal_i; + + dat_os_assert( 0 < dat_os_strlen(str) ); + dat_os_assert( NULL == provider_version->id ); + + status = DAT_SUCCESS; + + for ( i = 0; '\0' != str[i]; i++ ) + { + if ( '.' == str[i] ) + { + break; + } + } + + /* if no id value was found */ + if ( 0 == i ) + { + status = DAT_INTERNAL_ERROR; + goto exit; + } + + if ( NULL == (provider_version->id = dat_os_alloc(sizeof(char) * (i + 1))) ) + { + status = DAT_INSUFFICIENT_RESOURCES | DAT_RESOURCE_MEMORY; + goto exit; + } + + dat_os_strncpy(provider_version->id, str, i); + provider_version->id[i] = '\0'; + + /* move past '.' */ + decimal_i = ++i; + + for ( ; '\0' != str[i]; i++ ) + { + if ( '.' == str[i] ) + { + break; + } + else if ( DAT_TRUE != dat_os_isdigit(str[i]) ) + { + status = DAT_INTERNAL_ERROR; + goto exit; + } + } + + /* if no version value was found */ + if ( decimal_i == i ) + { + status = DAT_INTERNAL_ERROR; + goto exit; + } + + provider_version->version.major = (DAT_UINT32) + dat_os_strtol(str + decimal_i, NULL, 10); + + /* move past '.' */ + decimal_i = ++i; + + for ( ; '\0' != str[i]; i++ ) + { + if ( DAT_TRUE != dat_os_isdigit(str[i]) ) + { + status = DAT_INTERNAL_ERROR; + goto exit; + } + } + + /* if no version value was found */ + if ( decimal_i == i ) + { + status = DAT_INTERNAL_ERROR; + goto exit; + } + + provider_version->version.minor = (DAT_UINT32) + dat_os_strtol(str + decimal_i, NULL, 10); + + if ( '\0' != str[i] ) + { + status = DAT_INTERNAL_ERROR; + goto exit; + } + + exit: + if ( DAT_SUCCESS != status ) + { + if ( NULL != provider_version->id ) + { + dat_os_free(provider_version->id, + sizeof(char) * ((int)dat_os_strlen(provider_version->id) + 1)); + provider_version->id = NULL; + } + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_get_token + ***********************************************************************/ + +DAT_RETURN +dat_sr_get_token ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token) +{ + if ( NULL == g_token_stack ) + { + return dat_sr_read_token(file, token); + } + else + { + DAT_SR_STACK_NODE *top; + + top = g_token_stack; + + *token = top->token; + g_token_stack = top->next; + + dat_os_free(top, sizeof(DAT_SR_STACK_NODE)); + + return DAT_SUCCESS; + } +} + + +/*********************************************************************** + * Function: dat_sr_put_token + ***********************************************************************/ + +DAT_RETURN +dat_sr_put_token ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token) +{ + DAT_SR_STACK_NODE *top; + + if ( NULL == (top = dat_os_alloc(sizeof(DAT_SR_STACK_NODE))) ) + { + return DAT_INSUFFICIENT_RESOURCES | DAT_RESOURCE_MEMORY; + } + + top->token = *token; + top->next = g_token_stack; + g_token_stack = top; + + return DAT_SUCCESS; +} + + +/*********************************************************************** + * Function: dat_sr_read_token + ***********************************************************************/ + +DAT_RETURN +dat_sr_read_token ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token) +{ + DAT_OS_FILE_POS pos; + DAT_OS_SIZE token_len; + DAT_COUNT num_escape_seq; + DAT_BOOLEAN is_quoted_str; + DAT_BOOLEAN is_prev_char_backslash; + DAT_OS_FILE_POS cur_pos = 0; + /* + * The DAT standard does not specify a maximum size for quoted strings. + * Therefore the tokenizer must be able to read in a token of arbitrary + * size. Instead of allocating a fixed length buffer, the tokenizer first + * scans the input a single character at a time looking for the begining + * and end of the token. Once the these positions are found, the entire + * token is read into memory. By using this algorithm,the implementation + * does not place an arbitrary maximum on the token size. + */ + + token_len = 0; + num_escape_seq = 0; + is_quoted_str = DAT_FALSE; + is_prev_char_backslash = DAT_FALSE; + + for (;;) + { + + int c; + + /* if looking for start of the token */ + if ( 0 == token_len ) + { + if ( DAT_SUCCESS != dat_os_fgetpos(file, &cur_pos) ) + { + return DAT_INTERNAL_ERROR; + } + } + + c = dat_os_fgetc(file); + + /* if looking for start of the token */ + if ( 0 == token_len ) + { + if ( EOF == c ) + { + token->type = DAT_SR_TOKEN_EOF; + token->value = NULL; + token->value_len = 0; + goto success; + } + else if ( DAT_SR_CHAR_NEWLINE == c ) + { + token->type = DAT_SR_TOKEN_EOR; + token->value = NULL; + token->value_len = 0; + goto success; + } + else if ( dat_os_isblank(c) ) + { + continue; + } + else if ( DAT_SR_CHAR_COMMENT == c ) + { + dat_sr_read_comment(file); + continue; + } + else + { + if ( DAT_SR_CHAR_QUOTE == c ) + { + is_quoted_str = DAT_TRUE; + } + + pos = cur_pos; + token_len++; + } + } + else /* looking for the end of the token */ + { + if ( EOF == c ) + { + break; + } + else if ( DAT_SR_CHAR_NEWLINE == c ) + { + /* put back the newline */ + dat_os_fputc(file, c); + break; + } + else if ( !is_quoted_str && dat_os_isblank(c) ) + { + break; + } + else + { + token_len++; + + if ( (DAT_SR_CHAR_QUOTE == c) && !is_prev_char_backslash ) + { + break; + } + else if ( (DAT_SR_CHAR_BACKSLASH == c) && !is_prev_char_backslash ) + { + is_prev_char_backslash = DAT_TRUE; + num_escape_seq++; + } + else + { + is_prev_char_backslash = DAT_FALSE; + } + } + } + } + + /* the token was a string */ + if ( DAT_SUCCESS != dat_os_fsetpos(file, &pos) ) + { + return DAT_INTERNAL_ERROR; + } + + if ( is_quoted_str ) + { + if ( DAT_SUCCESS != dat_sr_read_quoted_str(file, + token, + token_len, + num_escape_seq) ) + { + return DAT_INTERNAL_ERROR; + } + } + else + { + if ( DAT_SUCCESS != dat_sr_read_str(file, + token, + token_len) ) + { + return DAT_INTERNAL_ERROR; + } + } + + success: + dat_os_dbg_print(DAT_OS_DBG_TYPE_SR, + "\n" + "DAT Registry: token\n" + " type %s\n" + " value <%s>\n" + "\n", + dat_sr_type_to_str(token->type), + ((DAT_SR_TOKEN_STRING == token->type) ? token->value : "") ); + + return DAT_SUCCESS; +} + + +/*********************************************************************** + * Function: dat_sr_read_str + ***********************************************************************/ + +DAT_RETURN +dat_sr_read_str ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token, + DAT_OS_SIZE token_len) +{ + token->type = DAT_SR_TOKEN_STRING; + token->value_len = sizeof(char) * (token_len + 1); /* +1 for null termination */ + if ( NULL == (token->value = dat_os_alloc((int)token->value_len)) ) + { + return DAT_INSUFFICIENT_RESOURCES | DAT_RESOURCE_MEMORY; + } + + if ( token_len != dat_os_fread(file, token->value, token_len) ) + { + dat_os_free(token->value, (int)token->value_len); + token->value = NULL; + + return DAT_INTERNAL_ERROR; + } + + token->value[token->value_len - 1] = '\0'; + + return DAT_SUCCESS; +} + + +/*********************************************************************** + * Function: dat_sr_read_quoted_str + ***********************************************************************/ + +DAT_RETURN +dat_sr_read_quoted_str ( + DAT_OS_FILE *file, + DAT_SR_TOKEN *token, + DAT_OS_SIZE token_len, + DAT_COUNT num_escape_seq) +{ + DAT_OS_SIZE str_len; + DAT_OS_SIZE i; + DAT_OS_SIZE j; + int c; + DAT_RETURN status; + DAT_BOOLEAN is_prev_char_backslash; + + str_len = token_len - 2; /* minus 2 " characters */ + is_prev_char_backslash = DAT_FALSE; + status = DAT_SUCCESS; + + token->type = DAT_SR_TOKEN_STRING; + /* +1 for null termination */ + token->value_len = sizeof(char) * (str_len - num_escape_seq + 1); + + if ( NULL == (token->value = dat_os_alloc((int)token->value_len)) ) + { + status = DAT_INSUFFICIENT_RESOURCES | DAT_RESOURCE_MEMORY; + goto exit; + } + + /* throw away " */ + if ( DAT_SR_CHAR_QUOTE != dat_os_fgetc(file) ) + { + status = DAT_INTERNAL_ERROR; + goto exit; + } + + for ( i = 0, j = 0; i < str_len; i++ ) + { + c = dat_os_fgetc(file); + + if ( EOF == c ) + { + status = DAT_INTERNAL_ERROR; + goto exit; + } + else if ( (DAT_SR_CHAR_BACKSLASH == c) && !is_prev_char_backslash ) + { + is_prev_char_backslash = DAT_TRUE; + } + else + { + token->value[j] = (DAT_UINT8)c; + j++; + + is_prev_char_backslash = DAT_FALSE; + } + } + + /* throw away " */ + if ( DAT_SR_CHAR_QUOTE != dat_os_fgetc(file) ) + { + status = DAT_INTERNAL_ERROR; + goto exit; + } + + token->value[token->value_len - 1] = '\0'; + +exit: + if ( DAT_SUCCESS != status ) + { + if ( NULL != token->value ) + { + dat_os_free(token->value, (int)token->value_len); + token->value = NULL; + } + } + + return status; +} + + +/*********************************************************************** + * Function: dat_sr_read_comment + ***********************************************************************/ + +void +dat_sr_read_comment ( + DAT_OS_FILE *file) +{ + int c; + + /* read up to an EOR or EOF to move past the comment */ + do + { + c = dat_os_fgetc(file); + } while ( (DAT_SR_CHAR_NEWLINE != c) && (EOF != c) ); + + /* put back the newline */ + dat_os_fputc(file, c); +} diff --git a/branches/Ndi/ulp/dapl/dat/udat/udat_sr_parser.h b/branches/Ndi/ulp/dapl/dat/udat/udat_sr_parser.h new file mode 100644 index 00000000..de5b3e61 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/udat_sr_parser.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_sr_parser.h + * + * PURPOSE: static registry (SR) parser inteface declarations + * + * $Id$ + **********************************************************************/ + +#ifndef _DAT_SR_PARSER_H_ +#define _DAT_SR_PARSER_H_ + + +#include "dat_osd.h" + + +/********************************************************************* + * * + * Function Declarations * + * * + *********************************************************************/ + +/* + * The static registry exports the same interface regardless of + * platform. The particular implementation of dat_sr_load() is + * found with other platform dependent sources. + */ + +extern DAT_RETURN +dat_sr_load (void); + + +#endif /* _DAT_SR_PARSER_H_ */ diff --git a/branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd.c b/branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd.c new file mode 100644 index 00000000..8cf0b3f3 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd.c @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * MODULE: dat_osd.c + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent functions with a canonical DAPL + * interface. Designed to be portable and hide OS specific quirks + * of common functions. + * + * $Id$ + **********************************************************************/ + +#include "dat_osd.h" +#include "dat_init.h" + + +/********************************************************************* + * * + * Constants * + * * + *********************************************************************/ + +#define DAT_DBG_LEVEL_ENV "DAT_DBG_LEVEL" +#define DAT_DBG_DEST_ENV "DAT_DBG_DEST" + + +/********************************************************************* + * * + * Enumerations * + * * + *********************************************************************/ + +typedef int DAT_OS_DBG_DEST; + +typedef enum +{ + DAT_OS_DBG_DEST_STDOUT = 0x1, +} DAT_OS_DBG_DEST_TYPE; + + +/********************************************************************* + * * + * Global Variables * + * * + *********************************************************************/ + +static DAT_OS_DBG_TYPE_VAL g_dbg_type = DAT_OS_DBG_TYPE_ERROR; +static DAT_OS_DBG_DEST g_dbg_dest = DAT_OS_DBG_DEST_STDOUT; + + +/*********************************************************************** + * Function: dat_os_dbg_set_level + ***********************************************************************/ + +void +dat_os_dbg_init ( void ) +{ + char *dbg_type; + char *dbg_dest; + + dbg_type = dat_os_getenv (DAT_DBG_LEVEL_ENV); + if ( dbg_type != NULL ) + { + g_dbg_type = dat_os_strtol(dbg_type, NULL, 0); + } + + dbg_dest = dat_os_getenv (DAT_DBG_DEST_ENV); + if ( dbg_dest != NULL ) + { + g_dbg_dest = dat_os_strtol(dbg_dest, NULL, 0); + } +} + + +/*********************************************************************** + * Function: dat_os_dbg_print + ***********************************************************************/ + +void +dat_os_dbg_print ( + DAT_OS_DBG_TYPE_VAL type, + const char * fmt, + ...) +{ + if ( (DAT_OS_DBG_TYPE_ERROR == type) || (type & g_dbg_type) ) + { + va_list args; + + va_start(args, fmt); + + if ( DAT_OS_DBG_DEST_STDOUT & g_dbg_dest ) + { + vfprintf(stdout, fmt, args); + } + + va_end(args); + + fflush(stdout); + } +} + + +BOOL APIENTRY +DllMain( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ) +{ + UNREFERENCED_PARAMETER( lp_reserved ); + + switch( ul_reason_for_call ) + { + case DLL_PROCESS_ATTACH: + DisableThreadLibraryCalls( h_module ); + dat_init(); + break; + + case DLL_PROCESS_DETACH: + dat_fini(); + } + return TRUE; +} diff --git a/branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd.h b/branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd.h new file mode 100644 index 00000000..e25e2f43 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd.h @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +/********************************************************************** + * + * HEADER: dat_osd.h + * + * PURPOSE: Operating System Dependent layer + * Description: + * Provide OS dependent data structures & functions with + * a canonical DAPL interface. Designed to be portable + * and hide OS specific quirks of common functions. + * + * $Id$ + **********************************************************************/ + +#ifndef _DAT_OSD_H_ +#define _DAT_OSD_H_ + +/* + * This file is defined for windows systems only, including it on any + * other build will cause an error + */ +#ifndef WIN32 +#error "UNDEFINED OS TYPE" +#endif /* WIN32 */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifndef STATIC +#define STATIC static +#endif /* STATIC */ + +#ifndef INLINE +#define INLINE __inline +#endif /* INLINE */ + +/********************************************************************* + * * + * Debuging * + * * + *********************************************************************/ + +#define dat_os_assert(expr) assert(expr) + +typedef int DAT_OS_DBG_TYPE_VAL; + +typedef enum +{ + DAT_OS_DBG_TYPE_ERROR = 0x1, + DAT_OS_DBG_TYPE_GENERIC = 0x2, + DAT_OS_DBG_TYPE_SR = 0x4, + DAT_OS_DBG_TYPE_DR = 0x8, + DAT_OS_DBG_TYPE_PROVIDER_API = 0x10, + DAT_OS_DBG_TYPE_CONSUMER_API = 0x20, + DAT_OS_DBG_TYPE_ALL = 0xff +} DAT_OS_DBG_TYPE_TYPE; + +extern void +dat_os_dbg_init ( void ); + +extern void +dat_os_dbg_print ( + DAT_OS_DBG_TYPE_VAL type, + const char * fmt, + ...); + + +/********************************************************************* + * * + * Utility Functions * + * * + *********************************************************************/ + +#define DAT_ERROR(Type,SubType) ((DAT_RETURN)(DAT_CLASS_ERROR | Type | SubType)) + +typedef size_t DAT_OS_SIZE; +typedef HMODULE DAT_OS_LIBRARY_HANDLE; + +STATIC INLINE DAT_RETURN +dat_os_library_load ( + const char *library_path, + DAT_OS_LIBRARY_HANDLE *library_handle_ptr) +{ + DAT_OS_LIBRARY_HANDLE library_handle; + + if ( NULL != (library_handle = LoadLibrary(library_path)) ) + { + if ( NULL != library_handle_ptr ) + { + *library_handle_ptr = library_handle; + } + + return DAT_SUCCESS; + } + else + { + dat_os_dbg_print(DAT_OS_DBG_TYPE_ERROR, + "DAT: library load failure\n"); + return DAT_INTERNAL_ERROR; + } +} +#ifdef WIN32_LEAN_AND_MEAN +#define dat_os_library_sym GetProcAddress +#else +STATIC INLINE +dat_os_library_sym ( + DAT_OS_LIBRARY_HANDLE library_handle, + char *sym) +{ + return GetProcAddress(library_handle, sym); +} +#endif /* WIN32_LEAN_AND_MEAN */ +STATIC INLINE DAT_RETURN +dat_os_library_unload ( + const DAT_OS_LIBRARY_HANDLE library_handle) +{ + if ( 0 == FreeLibrary(library_handle) ) + { + return DAT_INTERNAL_ERROR; + } + else + { + return DAT_SUCCESS; + } +} + +STATIC INLINE char * +dat_os_getenv ( + const char *name) +{ + return getenv(name); +} + +STATIC INLINE long int +dat_os_strtol ( + const char *nptr, + char **endptr, + int base) +{ + return strtol(nptr, endptr, base); +} + +STATIC INLINE DAT_OS_SIZE +dat_os_strlen ( + const char *s ) +{ + return strlen(s); +} + +STATIC INLINE int +dat_os_strncmp ( + const char *s1, + const char *s2, + DAT_OS_SIZE n) +{ + return strncmp(s1, s2, n); +} + +STATIC INLINE void * +dat_os_strncpy ( + char *dest, + const char *src, + DAT_OS_SIZE len) +{ + return strncpy (dest, src, len); +} + +STATIC INLINE DAT_BOOLEAN +dat_os_isblank( + int c) +{ + if ( (' ' == c) || ('\t' == c) ) { return DAT_TRUE; } + else { return DAT_FALSE; } +} + +STATIC INLINE DAT_BOOLEAN +dat_os_isdigit( + int c) +{ + if ( isdigit(c) ) { return DAT_TRUE; } + else { return DAT_FALSE; } +} + +STATIC INLINE void +dat_os_usleep( + unsigned long usec) +{ + Sleep(usec/1000); +} + + +/********************************************************************* + * * + * Memory Functions * + * * + *********************************************************************/ + +STATIC INLINE void * +dat_os_alloc ( + int size) +{ + return malloc (size); +} + +STATIC INLINE void +dat_os_free ( + void *ptr, + int size) +{ + free (ptr); +} + +STATIC INLINE void * +dat_os_memset (void *loc, int c, DAT_OS_SIZE size) +{ + return memset (loc, c, size); +} + + +/********************************************************************* + * * + * File I/O * + * * + *********************************************************************/ + +typedef FILE DAT_OS_FILE; +typedef fpos_t DAT_OS_FILE_POS; + + +STATIC INLINE DAT_OS_FILE * +dat_os_fopen ( + const char * path) +{ + /* always open files in read only mode*/ + return fopen(path, "r"); +} + +STATIC INLINE DAT_RETURN +dat_os_fgetpos ( + DAT_OS_FILE *file, + DAT_OS_FILE_POS *pos) +{ + if ( 0 == fgetpos(file, pos) ) + { + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + +STATIC INLINE DAT_RETURN +dat_os_fsetpos ( + DAT_OS_FILE *file, + DAT_OS_FILE_POS *pos) +{ + if ( 0 == fsetpos(file, pos) ) + { + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + +/* dat_os_fgetc() returns EOF on error or end of file. */ +STATIC INLINE int +dat_os_fgetc ( + DAT_OS_FILE *file) +{ + return fgetc(file); +} + +/* dat_os_fgetc() returns EOF on error or end of file. */ +STATIC INLINE int +dat_os_fputc ( + DAT_OS_FILE *file, int c) +{ + return fputc(c, file); +} + +/* dat_os_fread returns the number of bytes read from the file. */ +STATIC INLINE DAT_OS_SIZE +dat_os_fread ( + DAT_OS_FILE *file, + char *buf, + DAT_OS_SIZE len) +{ + return fread(buf, sizeof(char), len, file); +} + +STATIC INLINE DAT_RETURN +dat_os_fclose ( + DAT_OS_FILE *file) +{ + if ( 0 == fclose(file) ) + { + return DAT_SUCCESS; + } + else + { + return DAT_INTERNAL_ERROR; + } +} + + +/********************************************************************* + * * + * Locks * + * * + *********************************************************************/ + +typedef HANDLE DAT_OS_LOCK; + +/* lock functions */ +STATIC INLINE DAT_RETURN +dat_os_lock_init ( + IN DAT_OS_LOCK *m) +{ + *m = CreateMutex (0, FALSE, 0); + if (*(HANDLE *)m == NULL) + { + return DAT_INTERNAL_ERROR; + } + return DAT_SUCCESS; +} + +STATIC INLINE DAT_RETURN +dat_os_lock ( + IN DAT_OS_LOCK *m) +{ + WaitForSingleObject(*m, INFINITE); + + return DAT_SUCCESS; +} + +STATIC INLINE DAT_RETURN +dat_os_unlock ( + IN DAT_OS_LOCK *m) +{ + ReleaseMutex (*m); + + return DAT_SUCCESS; +} + +STATIC INLINE DAT_RETURN +dat_os_lock_destroy ( + IN DAT_OS_LOCK *m) +{ + CloseHandle (*m); + + return DAT_SUCCESS; +} + + +#endif /* _DAT_OSD_H_ */ + +/* + * Local variables: + * c-indent-level: 4 + * c-basic-offset: 4 + * tab-width: 8 + * End: + */ + diff --git a/branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd_sr.h b/branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd_sr.h new file mode 100644 index 00000000..0c9de746 --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/windows/dat_osd_sr.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2002, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under the terms of the "IBM Common Public + * License 1.0" a copy of which is in the file LICENSE.txt in the + * root directory. The license is also available from the Open Source + * Initiative, see http://www.opensource.org/licenses/ibmpl.html. + * + */ + +/********************************************************************** + * + * MODULE: dat_osd_sr.h + * + * PURPOSE: static registry (SR) platform specific inteface declarations + * + * $Id$ + **********************************************************************/ + +#ifndef _DAT_OSD_SR_H_ +#define _DAT_OSD_SR_H_ + + +#include "dat_osd.h" + + +/********************************************************************* + * * + * Function Declarations * + * * + *********************************************************************/ + +/* + * The static registry exports the same interface regardless of + * platform. The particular implementation of dat_sr_load() is + * found with other platform dependent sources. + */ + +extern DAT_RETURN +dat_sr_load (void); + + +#endif /* _DAT_OSD_SR_H_ */ diff --git a/branches/Ndi/ulp/dapl/dat/udat/windows/dat_win.def b/branches/Ndi/ulp/dapl/dat/udat/windows/dat_win.def new file mode 100644 index 00000000..976ef99a --- /dev/null +++ b/branches/Ndi/ulp/dapl/dat/udat/windows/dat_win.def @@ -0,0 +1,9 @@ +EXPORTS + +dat_init +dat_fini +dat_registry_add_provider +dat_registry_remove_provider +dat_registry_list_providers +dat_ia_openv +dat_ia_close diff --git a/branches/Ndi/ulp/dapl/dirs b/branches/Ndi/ulp/dapl/dirs new file mode 100644 index 00000000..11081a4b --- /dev/null +++ b/branches/Ndi/ulp/dapl/dirs @@ -0,0 +1,4 @@ +DIRS=\ + test \ + dat \ + dapl diff --git a/branches/Ndi/ulp/dapl/doc/dapl_coding_style.txt b/branches/Ndi/ulp/dapl/doc/dapl_coding_style.txt new file mode 100644 index 00000000..cf41ae68 --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dapl_coding_style.txt @@ -0,0 +1,264 @@ +####################################################################### +# # +# DAPL Coding style reference # +# # +# Steve Sears # +# sjs2 at users.sourceforge.net # +# # +# 12/13/2002 # +# # +####################################################################### + +====================================================================== +Introduction +====================================================================== + +The purpose of this document is to establish the coding style adopted by +the team implementing the DAPL reference implementation. The rules +presented here were arrived at by consensus, they are intended to +provide consistency of implementation and make it intuitive to work with +the source code. + +====================================================================== +Source code conventions +====================================================================== + +1. Brackets + + Brackets should follow C99 conventions and declare a block. The + following convention is followed: + + if (x) + { + statement; + statement; + } + + The following bracket styles are to be avoided: + + K&R style: + + if (x) { /* DON'T DO THIS */ + statement; + } + + GNU style: + + if (x) /* DON'T DO THIS */ + { + statement; + } + + Statements are always indented from brackets. + + Brackets are always used for any statement in order to avoid dangling + clause bugs. E.g. + + RIGHT: + if ( x ) + { + j = 0; + } + + WRONG: + if ( x ) + j = 0; + +2. Indents + + Indents are always 4, tabs 8. A tab may serve as a double + indent. Many of the reference implementation file have an emacs + format statement at the bottom. + +3. Comments + + Comments are always full C style comments, and never C++ + style. Comments take the form: + + /* + * comment + */ + +4. Variable Declarations + + Variables are always declared on their own line, we do not declare + multiple variables on the same line. + + Variables are never initialized in their declaration, they are + initialized in the body of the code. + +5. Function Declarations + + The return type of a function is declared on a separate line from the + function name. + + Parameters each receive a line and should be clearly labeled as IN + or OUT or INOUT. Parameter declarations begin one tab stop from the + margin. + + For example: + + DAT_RETURN + dapl_function ( + IN DAT_IA_HANDLE ia_handle, + OUT DAT_EP_HANDLE *ep_handle ) + { + ... function body ... + } + +5. White space + + Don't be afraid of white space, the goal is to make the code readable + and maintainable. We use white space: + + - One space following function names or conditional expressions. It + might be better to say one space before any open parenthesis. + + - Suggestion: One space following open parens and one space before + closing parens. Not all of the code follows this convention, use + your best judgment. + + Example: + + foo ( x1, x2 ); + +6. Conditional code + + We generally try to avoid conditional compilation, but there are + certain places where it cannot be avoided. Whenever possible, move + the conditional code into a macro or otherwise work to put it into an + include file that can be used by the platform (e.g. Linux or Windows + osd files), or by the underlying provider (e.g. IBM Torrent or + Mellanox Tavor). + + Conditionals should be descriptive, and the associated #endif should + contain the declaration. E.g. + + #ifdef THIS_IS_AN_EXAMPLE + + /* code */ + + #endif /* THIS_IS_AN_EXAMPLE */ + + You may change the ending comment if a #else clause is present. E.g. + + #ifdef THIS_IS_AN_EXAMPLE + /* code */ + + #else + /* other code */ + + #endif /* !THIS_IS_AN_EXAMPLE */ + + +====================================================================== +Naming conventions +====================================================================== + +1. Variable Names + + Variable names for DAPL data structures generally follow their type + and should be the same in all source files. A few examples: + + Handles + DAT_IA_HANDLE ia_handle + DAT_EP_HANDLE ep_handle + + Pointers + + DAPL_IA *ia_ptr; + DAPL_EP *ep_ptr; + +2. Return Code Names + + There are at least two different subsystems supported in the DAPL + reference implementation. In order to bring sanity to the error + space, return codes are named and used for their appropriate + subsystem. E.g. + + ib_status: InfiniBand status return code + dat_status: DAT/DAPL return code + +3. Function Names + + Function names describe the scope to which they apply. There are + essentially three names in the reference implementation: + + dapl_* Name of an exported function visible externally. + These functions have a 1 to 1 correspondence to + their DAT counterparts. + + dapls_* Name of a function that is called from more than one + source file, but is limited to a subsystem. + + dapli_* Local function, internal to a file. Should always be + of type STATIC. + + +====================================================================== +Util files +====================================================================== + +The Reference implementation is organized such that a single, exported +function is located in its' own file. If you are trying to find the DAPL +function to create and End Point, it will be found in the dapl version +of the DAT function in the spec. E.g. + +dapl_ep_create() is found in dapl_ep_create.c +dapl_evd_free() is found in dapl_evd_free.c + +It is often the case that the implementation must interact with data +structures or call into other subsystems. All utility functions for a +subsystem are gathered into the appropriate "util" file. + +For example, dapl_ep_create must allocate a DAPL_EP structure. The +routine to allocate and initialize memory is found in the +dapl_ep_util.c file and is named dapl_ep_alloc(). Appropriate routines +for the util file are + + - Alloc + - Free + - Assign defaults + - linking routines + - Check restrictions + - Perform operations on a data structure. + +The idea of a util file is an object oriented idea for a non OO +language. It encourages a clean implementation. + +For each util.c file, there is also a util.h file. The purpose of the +util include file is to define the prototypes for the util file, and to +supply any local flags or values necessary to the subsystem. + +====================================================================== +Include files, prototypes +====================================================================== + +Include files are organized according to subsystem and/or OS +platform. The include directory contains files that are global to the +entire source set. Prototypes are found in include files that pertain to +the data they support. + +Commenting on the DAPL Reference Implementation tree: + + dapl/common + dapl/include + Contains global dapl data structures, symbols, and + prototypes + dapl/tavor + Contains tavor prototypes and symbols + dapl/torrent + Contains torrent prototypes and symbols + dapl/udapl + Contains include files to support udapl specific files + dapl/udapl/linux + Contains osd files for Linux + dapl/udapl/windows + Contains osd files for Windows + +For completeness, the dat files described by the DAT Specification are +in the tree under the dat/ subdirectory, + + dat/include/dat/ + + diff --git a/branches/Ndi/ulp/dapl/doc/dapl_end_point_design.txt b/branches/Ndi/ulp/dapl/doc/dapl_end_point_design.txt new file mode 100644 index 00000000..c03cda76 --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dapl_end_point_design.txt @@ -0,0 +1,908 @@ +####################################################################### +# # +# DAPL End Point Management Design # +# # +# Steve Sears # +# sjs2 at users.sourceforge.net # +# # +# 10/04/2002 # +# # +####################################################################### + + +====================================================================== +Referenced Documents +====================================================================== + +uDAPL: User Direct Access Programming Library, Version 1.0. Published +6/21/2002. http://www.datcollaborative.org/uDAPL_062102.pdf. +Referred to in this document as the "DAT Specification". + +InfiniBand Access Application Programming Interface Specification, +Version 1.2, 4/15/2002. In DAPL SourceForge repository at +doc/api/access_api.pdf. Referred to in this document as the "IBM +Access API Specification". + +InfiniBand Architecture Specification Volume 1, Release 1.0.a Referred +to in this document at the "InfiniBand Spec". + +====================================================================== +Introduction to EndPoints +====================================================================== + +An EndPoint is the fundamental channel abstraction for the DAT API. An +application communicates and exchanges data using an +EndPoint. Most of the time EndPoints are explicitly allocated, but +there is an exception whereby a connection event can yield an EndPoint +as a side effect; this is not supported by all transports or +implementations, and it is not currently supported in the InfiniBand +reference implementation. + +Each DAT API function is implemented in a file named + + dapl_.c + +There is a simple mapping provided by the dat library that maps +dat_* to dapl_*. For example, dat_pz_create is implemented in +dapl_pz_create.c. For example: + + DAT DAPL Found in + ------------ --------------- ------------------ + dat_ep_create dapl_ep_create dapl_ep_create.c + dat_ep_query dapl_ep_query dapl_ep_query.c + +There are very few exceptions to this naming convention, the Reference +Implementation tried to be consistent. + +There are also dapl__util.{h,c} files for each object. +For example, there are dapl_pz_util.h and dapl_pz_util.c files which +contain common helper functions specific to the 'pz' subsystem. The +use of util files follows the convention used elsewhere in the DAPL +reference implementation. These files contain common object creation +and destruction code, link list manipulation, other helper functions. + +This implementation has a simple naming convention designed to alert +someone reading the source code to the nature and scope of a +function. The convention is in the function name, such that: + + dapl_ Primary entry from a dat_ function, e.g. + dapl_ep_create(), which mirrors dat_ep_create(). + dapls_ The 's' restricts it to the subsystem, e.g. the + 'ep' subsystem. dapls_ functions are not exposed + externally, but are internal to dapl. + dapli_ The 'i' restricts the function to the file where it + is declared. These functions are always 'static' C + functions. + +1. EndPoints (EPs) +------------------------- +DAPL EndPoints provide a channel abstraction necessary to transmit and +receive data. EndPoints interact with Service Points, either Public +Service Points or Reserved Service Points, to establish a connection +from one provider to another. + +The primary EP entry points in the DAT API as they relate to DAPL are +listed in the following table: + + dat_ep_create + dat_ep_query + dat_ep_modify + dat_ep_connect + dat_ep_dup_connect + dat_ep_disconnect + dat_ep_post_send + dat_ep_post_recv + dat_ep_post_rdma_read + dat_ep_post_rdma_write + dat_ep_get_status + dat_ep_free + +Additionally, the following connection functions interact with +EndPoints: + dat_psp_create + dat_psp_query + dat_psp_free + dat_rsp_create + dat_rsp_query + dat_rsp_free + +The reference implementation maps the EndPoint abstraction onto an +InfiniBand Queue Pair (QP). + +The DAPL_EP structure is used to maintain the state and components of +the EP object and the underlying QP. As will be explained below, +keeping track of the QP state is critical for successful +operation. Accesses to the DAPL_EP fields are done atomically. + + +====================================================================== +Goals +====================================================================== + +Initial goals +------------- +-- Implement the dat_ep_* calls described in the DAT Specification with + the following exceptions: + - dat_dup_connect + - the timeout value of dap_ep_connect + +-- Implement connection calls described in the DAT Specification with + the following exceptions: + - dat_rsp_* calls + - support for DAT_PSP_PROVIDER flag on PSP creation + +-- The implementation should be as portable as possible, to facilitate + HCA Vendors efforts to implement vendor-specific versions of DAPL. + +-- The implementation must be able to work during ongoing development + of InfiniBand agents, drivers, etc. + +Later goals +----------- +-- Examine various possible performance optimizations. This document + lists potential performance improvements, but the specific + performance improvements implemented should be guided by customer + requirements. + +-- Implement the dat_rsp_* calls described in the DAT 1.0 spec + +-- Implement dat_dup_connect + +-- Resolve the timeout issue for dat_ep_connect + +-- Implement DAT_PSP_PROVIDER flag on PSP creation + +-- Remove hacks & work arounds necessitated by developing IB + implementations. + +============================================ +Requirements, constraints, and design inputs +============================================ + +The EndPoint is the base channel abstraction. An Endpoint must be +established before data can be exchanged with a remote node. The +EndPoint is mapped to the underlying InfiniBand QP channel +abstraction. When a connection is initiated, the InfiniBand +Connection Manager will be solicited. The implementation is +constrained by the capabilities and behavior of the underlying +InfiniBand facilities. + +An EP is not an exact match to an InfiniBand QP, the differences +introduce constraints that are not obvious. There are three primary +areas of conflict between the DAPL and InfiniBand models: + +1) EP and QP creation differences +2) Provider provided EPs on passive side of connections +3) Connection timeouts + +-- EP and QP creation + +The most obvious difference between an EP and a QP is the presence of +a memory handle with the object is created. InfiniBand requires a +Protection Domain be specified when a QP is created; in the DAPL +world, a Protection Zone (PZ) maps to an InfiniBand Protection Domain. +DAPL does not require a PZ to be present when an EP is created, and +that introduces two problems: + +1) If a PZ is NULL when an EP is created, a QP will not be bound to + the EP until dat_ep_modify is used to assign it later. A PZ is + required before RECV requests can be posted and before a connection + can be established. + +2) If a DAPL user changes the PZ on an EP before it is connected, + DAPL must release the current QP and create a new one with a + new Protection Domain. + +-- Provider provided EPs on connection + +The second area where the DAPL and IB models conflict is a direct result +of the requirement to specify a Protection Domain when a QP is created. + +DAPL allows a PSP to be created in such a way that an EP will +automatically be provided to the user when a connection occurs. This +is not critical to the DAPL model but in fact does provide some +convenience to the user. InfiniBand provides a similar mechanism, but +with an important difference: InfiniBand requires the user to supply +the Protection Domain for the passive connection endpoint that will be +supplied to all QPs created as a result of connection requests; DAPL +mandates a NULL PZ and requires the user to change the PZ before using +the EP. + +The reference implementation will create an 'empty' EP when the user +specifies the DAT_PSP_PROVIDER flag; it is empty in the sense that a +QP is not attached to the EP. Before the user can dat_cr_accept the +connection, the EP must be modified to have a PZ bound to it, which in +turn will cause a QP to be bound to the EP. + +-- Connection Timeouts + +The third difference in the DAPL and InfiniBand models has to do with +timeouts on connections. InfiniBand does not provide a way to specify +a connection timeout, it will wait indefinitely for a connection to +occur. dat_ep_connect supports a timeout value providing the user with +control over how long they are willing to wait for a connection to +occur. The initial implementation does not resolve this mismatch, +although it could be resolved with a separate timeout thread that will +wakeup and terminate the connection request. + +====================================================================== +DAPL EP Subsystem Design +====================================================================== + +In section 6.5.1 of the DAT Specification there is a UML state +transition diagram for an EndPoint which goes over the transitions and +states during the lifetime of an EP. It is nearly impossible to read. +The reference implementation is faithful to the DAT Spec and is +believed to be correct. + +This description of the EP will follow from creation to connection to +termination. It will also discuss the source code organization as this +is part of the design expression. + +-- EP and QP creation + +The preamble to creating an EP requires us to verify the attributes +specified by the user. If a user were to specify max_recv_dtos as 0, +for example, the EP would not be useful in any regard. If the user +does not provide EP attrs, the DAPL layer will supply a set of common +defaults resulting in a reasonable EP. + +A number of handles are bound to the EP, so a reference count is taken +on each of them. All reference counts in the DAPL system are +incremented or decremented using atomic operations; it is important to +always use the OS dependent atomic routines and not substitute a lock, +as it will not be observed elsewhere in the system and will have +unpredictable results. + +As has been discussed above, each EP is bound to a QP before it can be +connected. If a valid PZ is provided at creation time then a QP is bound +to the EP immediately. If the user later uses ep_modify to change the PZ, +the QP will be destroyed and a new one created with the appropriate +Protection Domain. + +Finally, an EP is an IA resource and is linked onto the EP chain of +the superior IA. EP's linked onto an IA are assumed to be complete, +so this is the final step of EP creation. + +After an EP is created, the ep_state will be DAT_EP_STATE_UNCONNECTED +and the qp_state will either be DAPL_QP_STATE_UNATTACHED or +IB_QP_STATE_INIT. The qp_state indicates the QP binding and the +current state of the QP. + +A qp_state of DAPL_QP_STATE_UNATTACHED indicates there is no QP bound +to this EP. This is a result of a NULL PZ when dat_ep_create was +invoked, and which has been explained in detail above. The user must +call dat_ep_modify and install a valid PZ before the EP can be used. + +When a QP is created it is in the RESET state, which is specified in +the InfiniBand Spec, section 10.3. However, DAPL requires an +unconnected EP to be able to queue RECV requests before a connection +occurs. The InfiniBand spec allows RECV requests to be queued on an QP +if the QP is in the INIT state, so after creating a QP the DAPL code +will transition it to the INIT state. + +There is an obvious design tradeoff in transitioning the QP +state. Immediately moving the state to INIT takes extra time at +creation but allows immediate posting of RECV operations; however, it +will involve a more complex tear down procedure if the QP must be +replaced as a side effect of a dat_ep_modify operation. The +alternative would be to delay transitioning the QP to INIT until a +post operation is invoked, but that requires a run time check for +every post operation. This design assumes users will infrequently +cause a QP to be replaced after it is created and prefer to pay the +state transition penalty at creation time. + +-- EP Query and Modify operations + +Because all of the ep_param data are kept up to date in the dapl_ep +structure, and because they use the complete DAT specified structure, a +query operation is trivial; a simple assignment from the internal +structure to the user parameter. uDAPL allows the implementation to +either return the fields specified by the user, or to return more than +the user requested; the reference implementation does the latter. It is +simpler and faster to copy the entire structure rather than to determine +which of all of the possible fields the user requested. + +The ep_modify operation will modify the fields in the DAT_PARAM +structure. There are some fields that cannot be updated, and there are +others that can only be updated if the EP is in the correct state. The +uDAPL spec outlines the EP states permitting ep modifications, but +generally they are DAT_EP_STATE_UNCONNECTED and +DAT_EP_STATE_PASSIVE_CONNECTION_PENDING. + +When replacing EVD handles it is a simple matter of releasing a +reference on the previous handle and taking a new reference on the new +handle. All of the implementation does resource tracking using +reference counts, which guarantees a particular handle will not be +released prematurely. Reference counts are checked in the free +routines for various objects. + +As has been mentioned previously, if the PZ handle is changed then the +QP must be released, if there is one, and a new QP must be created to +bind to this EP. + +There are some fields in the DAT_PARAM structure that are related to the +underlying hardware implementation. For these values DAPL will do a +fresh query of the QP, rather than depend on stale values. Even so, the +values returned are 'best effort' as a competing thread may change +certain values before the requesting thread has the opportunity to read +them. Applications should protect against this. + +Finally, the underlying IB provider is invoked to update the QP with +the new values, but only if some of the attributes have been changed. +As is true of most of the implementation, we only invoke the provider +code when necessary. + +====================================================================== +Connections +====================================================================== + +There are of course two sides to a connection, and in the DAPL PSP model +there is an Active and a Passive side. For clarity, the Passive side +is a server waiting for a connection, and the Active side is a client +requesting a connection from the Passive server. We will discuss each +of these in turn. + +Connections happen in the InfiniBand world by using a Connection Manager +interface. Those unfamiliar with the IB model of addressing and +management agents may want to familiarize themselves with these aspects of +the IB spec before proceeding in this document. + +First, let's walk through a primitive diagram of a connection: + + +SERVER (passive) CLIENT (active) +--------------- --------------- +1. dapl_psp_create + [ now listening ] + +2. dapl_ep_connect + <------------- +3. dapls_cr_callback + IB_CME_CONNECTION_REQUEST_PENDING_PRIVATE_DATA + [ Create and post a DAT_CONNECTION_REQUEST_EVENT event ] + +4. Event code processing + +5. Create an EP (unless PSP created) + +6. dapl_cr_accept or dapl_cr_reject + -------------> +7. dapl_evd_connection_callback + IB_CME_CONNECTED + [ Create and post a + DAT_CONNECTION_EVENT_ESTABLISHED + event ] + +8i. <------------- RTU + +9. dapls_cr_callback + IB_CME_CONNECTED + [ Create and post a DAT_CONNECTION_EVENT_ESTABLISHED + event ] + +10. ...processing... + +11. Either side issues a dat_ep_disconnect + +12. dapls_cr_callback + IB_CME_DISCONNECTED + + [ Create and post a + DAT_CONNECTION_EVENT_DISCONNECTED + event ] + +13. dapl_evd_connection_callback + IB_CME_DISCONNECTED + [ Create and post a + DAT_CONNECTION_EVENT_DISCONNECTED + event ] + + +In the above diagram, time is numbered in the left hand column and is +represented vertically. + +We will continue our discussion of connections using the above +diagram, following a sequential order for connection establishment. + +There are in fact two types of service points detailed in the uDAPL +specification. At this time only the PSP model, which is a client +server model, has been implemented, so our discussion will focus +there. + +The reader should observe that all passive side connection events will +be received by dapls_cr_callback(), and all active side connection +events occur through dapl_evd_connection_callback(). At one point +during the implementation these routines were combined as they are +very similar, but there are subtle differences causing them to remain +separate. + +Progressing through the series of events as outlined in the diagram +above: + +1. dapl_psp_create + + When a PSP is created, the final act will be to set it listening + for connections from remote nodes. It is important to realize that + a connection may in fact arrive from a remote node before the + routine setting up a listener has returned to dapl_psp_create; as + soon as dapls_ib_setup_conn_listener() is invoked connection + callbacks may arrive. To avoid race conditions this routine must be + called as the last practical operation when creating a PSP. + + dapls_ib_setup_conn_listener() is provider specific. The key + insight is that the DAPL connection qualifier (conn_qual) will + become the InfiniBand Service ID. The passive side of the + connection is now listening for connection requests. It should be + obvious that the conn_qual must be unique. + +2. dapl_ep_connect + + The active side initiates a connection with dapl_ep_connect, which + will transition the EP into DAT_EP_STATE_ACTIVE_CONNECTION_PENDING. + Again, connections are in the domain of the providers' Connection + Manager and the mechanics are very much provider specific. The key + points are that a DAT_IA_ADDRESS_PTR must be translated to a GID + before a connection initiation can occur. This is discussed below. + + InfiniBand supports different amounts of private data on various + connection functions. The DAPL connection code does not enforce a + fixed amount of private data, but rather makes available to the + user all it has available. When initiating a connection, and when + the remote node accepts a connection, we prefix the user data with + a private data header. The header contains + + - Local host IP address + - private data size + - private data payload + + The underlying implementation will copy the private data into a + buffer to be sent, so we need to assemble all of it before passing + it down to the CM. + + The Local host IP address is required by the DAT spec when a + connection event occurs, and there is no other way for the remote + node to get it; even with a LID or GID the IB implementation could + support multiple IP interfaces, so it must be supplied here. + + The private data size is transmitted to provide the obvious + information. Some CM implementations do not keep track of the size, + so it is required for them; for other implementations it is a + convenience. + + At this time there is no InfiniBand API available that will allow us + to transmit or obtain the host IP address. The DAT Spec is very + careful to avoid imposing a protocol, and yet here is a wire protocol + that vendors must implement if there is any hope of + interoperability. The header just described is currently considered + temporary until a better method can be ascertained, presumably with a + fully working IPoIB implementation. However, it may be the case that + this simple transmission of the host IP address in the private data + is the best solution, in which case it should be promoted into the + DAT Spec as required by InfiniBand providers. + + We observe in passing that the IP address can take up to 16 bytes, + while there are 92 bytes total private data in a REQ message + (connection request); leaving 76 bytes for an application. This + exactly allows the SDP Hello Header to fit in the Private Data + space remaining. An SDP implementation could be done on top of + DAPL. + + +* Addressing and Naming + + The DAT Spec calls for a DAT_IA_ADDRESS_PTR to be an IP address, + either IPv4 or IPv6. It is in fact a struct sockaddr in most + systems. + + The long term solution to resolving an IP address to a GID is to us + an IPoIB implementation with an API capable of performing this + function. At the time of this writing this API is being worked out + with an HCA vendor, with the hope that other HCA vendors will + follow suite. + + Until IPoIB is working properly, the DAPL implementation provides a + simple name service facility under the #ifdef NO_NAME_SERVICE. This + depends on two things: valid IP addresses registered and available + to standard DNS system calls such as gethostbyname(); and a + name/GID mapping file. + + IP addresses may be set up by system administrators or by a local + power user simply by editing the values into the /etc/hosts file. + Setting IP addresses up in this manner is beyond the scope of this + document. + + A simple mapping of names to GIDs is maintained in the ibhosts + file, currently located at /etc/dapl/ibhosts. The format of + the file is: + + 0x 0x + + For example: + + dat-linux3-ib0p0 0xfe80000000000000 0x0001730000003d11 + dat-linux3-ib0p1 0xfe80000000000000 0x0001730000003d11 + dat-linux3-ib1 0xfe80000000000000 0x0001730000003d52 + dat-linux5-ib0 0xfe80000000000000 0x0001730000003d91 + + And for each hostname, there must be an entry in the /etc/hosts file + similar to: + + dat-linux3-ib0p0 198.165.10.11 + dat-linux3-ib0p1 198.165.10.12 + dat-linux3-ib1 198.165.10.21 + dat-linux5-ib0 198.165.10.31 + + + In this example we have adopted the convention of naming each + InfiniBand interface by using the form + + -ib[port_number] + + In the above example we can see that the machine dat-linux3 has three + InfiniBand interfaces, which in this case we have named two ports on + the first HCA and another port on a second. Utilizing standard DNS + naming, the conventions used for identifying individual ports is + completely up to the administrator. + + The GID Prefix and GUID are obtained from the HCA and map a port on + the HCA: together they form the GID that is required by a CM to + connect with the remote node. + + The simple name service builds an internal table after processing + the ibhosts file which contains IP addresses and GIDs. It will use + the standard getaddrinfo() function to obtain IP address + information. + + When an application invoked dat_ep_connect(), the + DAT_IA_ADDRESS_PTR will be compared in the table for a match and + the destination GID established if found. If the address is not + found then the user must first add the name to the ibhosts file. + + With a valid GID for the destination node, the underlying CM is + invoked to make a connection. + +* Connection Management + + Getting a working CM has taken some time, in fact the DAPL project + was nearly complete by the time a CM was available. In order to + make progress, a connection hack was introduced that allows + specific connections to take place. This is noted in the code by + the CM_BUSTED #def. + + CM_BUSTED takes the place of a CM and will manually transition a QP + through the various states to connect: INIT->RTR->RTS. It will also + disconnect the connection, although the Torrent implementation + simply destroys the QP and recreates a new one rather than + transitioning through the typical disconnect states (which didn't + work on early IB implementations). + + CM_BUSTED makes some assumptions about the remote end of the + connection as no real information is exchanged. The Torrent + implementation assumes both HCAs have the same LID, which implies + there is no SM running. The Tavor implementation assumes the LIDs + are 0 and 1. Depending on the hardware, the LID value may in fact + not make any difference. This code does not set the Global Route + Header (GRH), which would cause the InfiniBand chip to be carefully + checking LID information. + + The QP number is assumed to be identical on both ends of the + connection, of differing by 1 if this is a loopback. There is an + environment variable that will be read at initialization time if + you are configured with a loopback, this value is checked when + setting up a QP. The obvious downside to this scheme is that + applications must stay synchronized in their QP usage or the + initial exchange will fail as they are not truly connected. + + Add to this the limitation that HCAs must be connected in + Point-to-Point topology or in a loopback. Without a GRH it will not + work in a fabric. Again, using an SM will not work when CM_BUSTED + is enabled. + + Despite these shortcomings, CM_BUSTED has proven very useful and + will remain in the code for a while in order to aid development + groups with new hardware and software. + +3. dapls_cr_callback + + The connection sequence is entirely event driven. An operation is + posted, then an asynchronous event will occur some time later. The + event may cause other actions to occur which may result in still + more events. + + dapls_ib_setup_conn_listener() registered for a callback for + connection events, and we now receive + IB_CME_CONNECTION_REQUEST_PENDING_PRIVATE_DATA. As has been + discussed above, DAPL will always send private data on a connection + so this is the event we will get. + + The astute reader will observe that there is not a dapl_cr_create + call: CR records are created as part of a connection attempt on the + passive side of the connection. A CR is created now and set up. A + point that will become important later, caps for emphasis: + + A CR WILL EXIST FOR THE LIFE OF A CONNECTION; THEY ARE DESTROYED AT + DISCONNECT TIME. + + In the connection request processing a CR and an EVENT are created, + the event will be posted along with the connection information just + received. + + We also check the PSP to see if an EP is created at this point, and + allocate one if so. The EP will be provided back to the user in the + event, as per the DAT model. + +4. Event code processing +5. Create an EP (unless PSP created) + + (4) and (5) are all done in user mode. The only interesting thing is + that when the user calls dat_cr_accept a ready EP must be + provided. If the EP was supplied by the PSP in the callback, it + must have a PZ associated with it and whatever other attributes + need to be set. + +6. dapl_cr_accept or dapl_cr_reject + + For discussion purposes, we will follow the accept + path. dapl_cr_reject says you are done and there will be no further + events to deal with. + + The accept call is intuitive and simple. Again, private data will + be exchanged and so the private data header is used. The requesting + node presumably know where the request comes from so we don't put + the address into the header here. + +7. dapl_evd_connection_callback + + The connecting side of the connection will now get a callback in + dapl_evd_connection_callback() with connection event + IB_CME_CONNECTED. + + The code will transition the EP to CONNECTED and post a + DAT_CONNECTION_EVENT_ESTABLISHED event for the user to pick up. + The connection is now established, however the passive or server + side of the connection doesn't know it yet: the EP is still in + DAT_EP_STATE_PASSIVE_CONNECTION_PENDING. + +8i. RTU + + This item is labeled "8i" as it is internal to the InfiniBand + implementation, it is not initiated by dapl. The final leg of a + connection is an RTU sent from the initiating node to the server + node, indicating the connection has been made successfully. + +9. dapls_cr_callback + + The RTU above results in another callback into dapls_cr_callback, + this time with connection event IB_CME_CONNECTED. + + There is no reason to deal with private data, from a dapl point of + view this was purely an internal message and simply a connection + state change. A DAT_CONNECTION_EVENT_ESTABLISHED event is created + and posted. + + The architectually interesting feature of this exchange occurs + because of differences in the InfiniBand and the DAT connection + models, which will be briefly outlined. + + InfiniBand maintains the original connecting objects throughout the + life of the connection. That is, we originally get a callback event + associated with the Service (DAT PSP) that is listening for + connection events. A QP will be connected but the callback event + will still be received on the Service. Later, a callback event will + occur for a DISCONNECT, and again the Service will be the object of + the connection. In the DAPL implementation, the Service will + provide the PSP that is registered as listening on that connection + qualifier. + + DAT has a PSP receive a connection event, but subsequently hands + all connection events off to an EP. After a dat_cr_accept is + issued, all connection/disconnection events occur on the EP. + + To support the DAT model the CR is maintained through the life of + the connection. There is exactly one CR per connection, but any + number of CRs may exist for any given PSP. CRs are maintained on a + link list pointed to by the PSP structure. A search routine will + match the cm_handle, unique for each connection, with the + appropriate CR. This allows us to find the appropriate EP which + will be used to create an event to be posted to the user. + +* dat_psp_destroy + + It should be understood that the PSP will maintain all of the CR + records, and hence the PSP must persist until the final disconnect. + In the DAT model there is no association between a PSP and a + connected QP, so there is no reason not to destroy a PSP before the + final disconnect. + + Because of the model mismatch we must preserve the PSP until the + final disconnect. If the user invokes dat_psp_destroy(), all of the + associations maintained by the PSP will be severed; but the PSP + structure itself remains as a container for the CR records. The PSP + structure maintains a simple count of CR records so we can easily + determine the final disconnect and release memory. Once a + disconnect event is received for a specific cm_handle, no further + events will be received and it is safe to discard the CR record. + +10. ...processing... + + This is just a place holder to show that applications actually do + something after making a connection. They might not too... + +11. Either side issues a dat_ep_disconnect + + dat_ep_disconnect() can be initiated by either side of a + connection. There are two kinds of disconnect flags that can be + passed in, but the final result is largely the same. + + DAT_CLOSE_ABRUPT_FLAG will cause the connection to be immediately + terminated. In InfiniBand terms, the QP is immediately moved to the + ERROR state, and after some time it will be moved to the RESET + state. + + DAT_CLOSE_GRACEFUL_FLAG will allow in-progress DTOs to complete. + The underlying implementation will first transition the QP to the + SQE state, before going to RESET. + + Both cases are handled by the underlying CM, there is no extra work + for DAPL. + + +12. dapls_cr_callback + + A disconnect will arrive on the passive side of the connection + through dapls_cr_callback() with connection event + IB_CME_DISCONNECTED. With this event the EP lookup code will free + the CR associated with the connection, and may free the PSP if it + is no longer listening, indicating it has been freed by the + application. + + The callback will create and post a + DAT_CONNECTION_EVENT_DISCONNECTED event for the user. + +13. dapl_evd_connection_callback + + The active side of the connection will receive IB_CME_DISCONNECTED + as the connection event for dapl_evd_connection_callback(), and + will create and post a DAT_CONNECTION_EVENT_DISCONNECTED event. + Other than transitioning the EP to the DISCONNECTED state, there is + no further processing. + + +-- Notes on Disconnecting + +An EP can only be disconnected if it is connected or unconnected; you +cannot disconnect 'in progress' connections. An 'in progress +connection may in fact time out, but the DAT Spec does not allow you +to 'kill' it. DAPL will use the CM interface to disconnect from the +remote node; this of course results in an asynchronous callback +notifying the application the disconnect is complete. + +Disconnecting an unconnected EP is currently the only way to remove +pending RECV operations from the EP. The DAPL spec notes that all +DTO's must be removed from an EP before it can be deallocated, yet +there is no explicit interface to remove pending RECV DTOs. The user +will disconnect an unconnected EP to force the pending operations off +of the queue, resulting in DTO callbacks indicating an error. The +underlying InfiniBand implementation will cause the correct behavior +to result. When doing this operation the DAT_CLOSE flag is ignored, +DAPL will instruct the IB layer to abruptly disconnect the QP. + +As has been noted previously, specifying DAT_CLOSE_ABRUPT_FLAG as the +disconnect completion flag will cause the CM implementation to +transition the QP to the ERROR state to abort all operations, and then +transition to the RESET state; if the flag is DAT_CLOSE_GRACEFUL_FLAG, +the CM will first move to the SQE state and allow all pending I/O's to +drain before moving to the RESET state. In either case, DAPL only +needs to know that the QP is now in the RESET state, as it will need +to be transitioned to the INIT state before it can be used again. + +====================================================================== +Data Transfer Operations (DTOs) +====================================================================== + +The DTO code is a straightforward translation of the DAT_LMR_TRIPLET +to an InfiniBand work request. Unfortunately, IB does not specify what +a work request looks like so this tends to be very vendor specific +code. Each provider will supply a routine for this operation. + +InfiniBand allows the DTO to attach a unique 64 bit work_req_id to +each work request. The DAPL implementation will install a pointer to a +DAPL_DTO_COOKIE in this field. Observe that a DAPL_DTO_COOKIE is not +the same as the user DAT_DTO_COOKIE; indeed, the former has a pointer +field pointing to the latter. Different values will be placed in the +cookie, according to the type of operation it is and the type of data +required by its completion event. This is a simple scheme to bind DAPL +data to the DTO and associated completion callback. Each DTO has a +unique cookie associated with it. + +DAPL_DTO_COOKIE structures are currently allocated using a simple +malloc. An obvious performance gain can be had by using a ready made +pool of structures to minimize the time involved on this critical +path. + +The underlying InfiniBand implementation will invoke +dapl_evd_dto_callback() upon completion of DTO operations. During the +development of the Reference Implementation there was a period when +DTO callbacks were not working, so we implemented a mechanism where a +thread continually polls the CQs looking for completions, and then +invokes the callback when something completes. This code may be useful +in the future and is still maintained in the code under the +POLLING_COMPLETIONS #ifdef. + +POLLING_COMPLETIONS will simulate callbacks but will not provide a high +performance implementation. + +dapl_evd_dto_callback() is the asynchronous completion for a DTO and +will create and post an event for the user. Much of this callback is +concerned with managing error completions. + + +====================================================================== +Data Structure +====================================================================== + +The main data structure for and EndPoint is the dapl_ep structure, +defined in include/dapl.h. The reference implementation uses the +InfiniBand QP to maintain hardware state, providing a relatively +simple mapping. + +/* DAPL_EP maps to DAT_EP_HANDLE */ +struct dapl_ep +{ + DAPL_HEADER header; + /* What the DAT Consumer asked for */ + DAT_EP_PARAM param; + + /* The RC Queue Pair (IBM OS API) */ + ib_qp_handle_t qp_handle; + int qpn; /* qp number */ + Ib_qp_state qp_state; + + /* communications manager handle (IBM OS API) */ + ib_cm_handle_t cm_handle; + + /* The DTO Circular buffers */ + DAPL_RING_BUFFER out; + DAPL_RING_BUFFER in; + + /* state dependent connection event handler */ + DAPL_CONNECTION_STATE_HANDLER ep_connect_handler; +}; + + +The simple explanation of the fields in the dapl_ep structure follows: + +header: The dapl object header, common to all dapl objects. + It contains a lock field, links to appropriate lists, and + handles specifying the IA domain it is a part of. + +param: The bulk of the EP attributes called out in the DAT + specification and are maintained in the DAT_EP_PARAM + structure. All internal references to these fields + use this structure. + +qp_handle: Handle to the underlying InfiniBand provider implementation + for a QP. All EPs are mapped to an InfiniBand QP. + +qpn: Number of the QP as returned by the underlying provider + implementation. Primarily useful for debugging. + +qp_state: Current state of the QP. The values of this field indicate + if a QP is bound to the EP, and the current state of a + QP. + +cm_handle: Handle to the IB provider's CMA (Connection Manager Agent). + Used for CM operations used to connect and disconnect. + +out: Ring buffer tracking all SEND work requests (WR). A WR + is put into the list when a SEND is initiated, then removed + when the send completes. + +in: Ring buffer tracking all RECV work requests (WR). A WR + is put into the list when a RECV is initiated, then removed + when the recv completes. + +ep_connect_handler: + Pointer to callback routine invoked when CM events appear. + MAY BE UNUSED IN THE IMPLEMENTATION. diff --git a/branches/Ndi/ulp/dapl/doc/dapl_environ.txt b/branches/Ndi/ulp/dapl/doc/dapl_environ.txt new file mode 100644 index 00000000..6cca3b27 --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dapl_environ.txt @@ -0,0 +1,42 @@ + DAPL Environment Guide v. 0.01 + ------------------------------ + +The following environment variables affect the behavior of the DAPL +provider library: + + +DAPL_DBG_TYPE +------------- + + Value specifies which parts of the registry will print debugging + information, valid values are + + DAPL_DBG_TYPE_ERR = 0x0001 + DAPL_DBG_TYPE_WARN = 0x0002 + DAPL_DBG_TYPE_EVD = 0x0004 + DAPL_DBG_TYPE_CM = 0x0008 + DAPL_DBG_TYPE_EP = 0x0010 + DAPL_DBG_TYPE_UTIL = 0x0020 + DAPL_DBG_TYPE_CALLBACK = 0x0040 + DAPL_DBG_TYPE_DTO_COMP_ERR = 0x0080 + DAPL_DBG_TYPE_API = 0x0100 + DAPL_DBG_TYPE_RTN = 0x0200 + DAPL_DBG_TYPE_EXCEPTION = 0x0400 + + or any combination of these. For example you can use 0xC to get both + EVD and CM output. + + Example setenv DAPL_DBG_TYPE 0xC + + +DAPL_DBG_DEST +------------- + + Value sets the output destination, valid values are + + DAPL_DBG_DEST_STDOUT = 0x1 + DAPL_DBG_DEST_SYSLOG = 0x2 + DAPL_DBG_DEST_ALL = 0x3 + + For example, 0x3 will output to both stdout and the syslog. + diff --git a/branches/Ndi/ulp/dapl/doc/dapl_event_design.txt b/branches/Ndi/ulp/dapl/doc/dapl_event_design.txt new file mode 100644 index 00000000..0b896bb7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dapl_event_design.txt @@ -0,0 +1,875 @@ + DAPL Event Subsystem Design v. 0.96 + ----------------------------------- + +================= +Table of Contents +================= + +* Table of Contents +* Referenced Documents +* Goals + + Initial Goals + + Later Goals +* Requirements, constraints, and design inputs + + DAT Specification Constraints + + Object and routine functionality, in outline + + Detailed object and routine specification + + Synchronization + + IBM Access API constraints + + Nature of DAPL Event Streams in IBM Access API. + + Nature of access to CQs + + Operating System (Pthread) Constraints + + Performance model + + A note on context switches +* DAPL Event Subsystem Design + + OS Proxy Wait Object + + Definition + + Suggested Usage + + Event Storage + + Synchronization + + EVD Synchronization: Locking vs. Producer/Consumer queues + + EVD Synchronization: Waiter vs. Callback + + CNO Synchronization + + Inter-Object Synchronization + + CQ -> CQEH Assignments + + CQ Callbacks + + Dynamic Resizing of EVDs + + Structure and pseudo-code + + EVD + + CNO +* Future directions + + Performance improvements: Reducing context switches + + Performance improvements: Reducing copying of event data + + Performance improvements: Reducing locking + + Performance improvements: Reducing atomic operations + + Performance improvements: Incrementing concurrency. + +==================== +Referenced Documents +==================== + +uDAPL: User Direct Access Programming Library, Version 1.0. Published +6/21/2002. http://www.datcollaborative.org/uDAPL_062102.pdf. +Referred to in this document as the "DAT Specification". + +InfiniBand Access Application Programming Interface Specification, +Version 1.2, 4/15/2002. In DAPL SourceForge repository at +doc/api/access_api.pdf. Referred to in this document as the "IBM +Access API Specification". + +===== +Goals +===== + +Initial goals +------------- +-- Implement the dat_evd_* calls described in the DAT Specification (except + for dat_evd_resize). + +-- The implementation should be as portable as possible, to facilitate + HCA Vendors efforts to implement vendor-specific versions of DAPL. + +Later goals +----------- +-- Examine various possible performance optimizations. This document + lists potential performance improvements, but the specific + performance improvements implemented should be guided by customer + requirements. + +-- Implement the dat_cno_* calls described in the DAT 1.0 spec + +-- Implement OS Proxy Wait Objects. + +-- Implement dat_evd_resize + +Non-goals +--------- +-- Thread safe implementation + +============================================ +Requirements, constraints, and design inputs +============================================ + +DAT Specification Constraints +----------------------------- + +-- Object and routine functionality, in outline + +The following section summarizes the requirements of the DAT +Specification in a form that is simpler to follow for purposes of +implementation. This section presumes the reader has read the DAT +Specification with regard to events. + +Events are delivered to DAPL through Event Streams. Each Event Stream +targets a specific Event Descriptor (EVD); multiple Event Streams may +target the same EVD. The Event Stream<->EVD association is +effectively static; it may not be changed after the time at which +events start being delivered. The DAT Consumer always retrieves +events from EVDs. EVDs are intended to be 1-to-1 associated with the +"native" event convergence object on the underlying transport. For +InfiniBand, this would imply a 1-to-1 association between EVDs and +CQs. + +EVDs may optionally have an associated Consumer Notification Object +(CNO). Multiple EVDs may target the same CNO, and the EVD<->CNO +association may be dynamically altered. The DAT Consumer may wait for +events on either EVDs or CNOs; if there is no waiter on an EVD and it +is enabled, its associated CNO is triggered on event arrival. An EVD +may have only a single waiter; a CNO may have multiple waiters. +Triggering of a CNO is "sticky"; if there is no waiter on a CNO when +it is triggered, the next CNO waiter will return immediately. + +CNOs may have an associated OS Proxy Wait Object, which is signaled +when the CNO is triggered. + +-- Detailed object and routine specification + +Individual events may be "signaling" or "non-signaling", depending +on the interaction of: + * Receive completion endpoint attributes + * Request completion endpoint attributes + * dat_ep_post_send completion flags + * dat_ep_post_recv completion flags +The nature of this interaction is outside the scope of this document; +see the DAT Specification 1.0 (or, failing that, clarifications in a +later version of the DAT Specification). + +A call to dat_evd_dequeue returns successfully if there are events on +the EVD to dequeue. A call to dat_evd_wait blocks if there are fewer +events present on the EVD than the value of the "threshold" parameter +passed in the call. Such a call to dat_evd_wait will be awoken by the +first signaling event arriving on the EVD that raises the EVD's event +count to >= the threshold value specified by dat_evd_wait(). + +If a signaling event arrives on an EVD that does not have a waiter, +and that EVD is enabled, the CNO associated with the EVD will be +triggered. + +A CNO has some number of associated waiters, and an optional +associated OS Proxy Wait Object. When a CNO is triggered, two things +happen independently: + * The OS Proxy Wait Object associated with the CNO, if any, is + signaled, given the handle of an EVD associated with the CNO + that has an event on it, and disassociated from the CNO. + * If: + * there is one or more waiters associated with the + CNO, one of the waiters is unblocked and given the + handle of an EVD associated with the CNO that has an + event on it. + * there are no waiters associated with the CNO, the + CNO is placed in the triggered state. + +When a thread waits on a CNO, if: + * The CNO is in the untriggered state, the waiter goes to + sleep pending the CNO being triggered. + * The CNO is in the triggered state, the waiter returns + immediately with the handle of an EVD associated with the + CNO that has an event on it, and the CNO is moved to the + untriggered state. + +Note specifically that the signaling of the OS Proxy Wait Object is +independent of the CNO moving into the triggered state or not; it +occurs based on the state transition from Not-Triggered to Triggered. +Signaling the OS Proxy Wait Object only occurs when a CNO is +triggered. In contrast, waiters on a CNO are unblocked whenever the +CNO is in the triggered *state*, and that state is sticky. + +Note also that which EVD is returned to the caller in a CNO wait is +not specified; it may be any EVD associated with the CNO on which an +event arrival might have triggered the CNO. This includes the +possibility that the EVD returned to the caller may not have any +events on it, if the dat_cno_wait() caller raced with a separate +thread doing a dat_evd_dequeue(). + +The DAT Specification is silent as to what behavior is to be expected +from an EVD after an overflow error has occurred on it. Thus this +design will also be silent on that issue. + +The DAT Specification has minimal requirements on inter-Event Stream +ordering of events. Specifically, any connection events must precede +(in consumption order) any DTO Events for the same endpoint. +Similarly, any successful disconnection events must follow any DTO +Events for an endpoint. + +-- Synchronization + +Our initial implementation is not thread safe. This means that we do +not need to protect against the possibility of multiple simultaneous +user calls occurring on the same object (EVD, CNO, EP, etc.); that is +the responsibility of the DAT Consumer. + +However, there are synchronization guards that we do need to protect +against because the DAT Consumer cannot. Specifically, since the user +cannot control the timing of callbacks from the IBM Access API +Implementation, we need to protect against possible collisions between +user calls and such callbacks. We also need to make sure that such +callbacks do not conflict with one another in some fashion, possibly +by assuring that they are single-threaded. + +In addition, for the sake of simplicity in the user interface, I have +defined "not thread safe" as "It is the DAT Consumer's responsibility +to make sure that all calls against an individual object do not +conflict". This does, however, suggest that the DAPL library needs to +protect against calls to different objects that may result in +collisions "under the covers" (e.g. a call on an EVD vs. a call on its +associated CNO). + +So our synchronization requirements for this implementation are: + + Protection against collisions between user calls and IBM + Access API callbacks. + + Avoidance of or protection against collisions between + different IBM Access API callbacks. + + Protection against collisions between user calls targeted at + different DAT objects. + +IBM Access API constraints +-------------------------- + +-- Nature of DAPL Event Streams in IBM Access API + +DAPL Event Streams are delivered through the IBM Access API in two fashions: + + Delivery of a completion to a CQ. + + A callback is made directly to a previously registered DAPL + function with parameters describing the event. +(Software events are not delivered through the IBM Access API). + +The delivery of a completion to a CQ may spark a call to a previously +registered callback depending on the attributes of the CQ and the +reason for the completion. Event Streams that fall into this class +are: + + Send data transport operation + + Receive data transport operation + + RMR bind + +The Event Streams that are delivered directly through a IBM Access API +callback include: + + Connection request arrival + + Connection resolution (establishment or rejection) + + Disconnection + + Asynchronous errors + +Callbacks associated with CQs are further structured by a member of a +particular CQ Event Handling (CQEH) domain (specified at CQ creation +time). All CQ callbacks within a CQEH domain are serviced by the same +thread, and hence will not collide. + +In addition, all connection-related callbacks are serviced by the same +thread, and will not collide. Similarly, all asynchronous error +callbacks are serviced by the same thread, and will not collide. +Collisions between any pair of a CQEH domain, a connection callback, +and an asynchronous error callback are possible. + +-- Nature of access to CQs + +The only probe operation the IBM Access API allows on CQs is +dequeuing. The only notification operation the IBM Access API +supports for CQs is calling a previously registered callback. + +Specifically, the IB Consumer may not query the number of completions +on the CQ; the only way to find out the number of completions on a CQ +is through dequeuing them all. It is not possible to block waiting +on a CQ for the next completion to arrive, with or without a +threshold parameter. + +Operating System Constraints +---------------------------- + +The initial platform for implementation of DAPL is RedHat Linux 7.2 on +Intel hardware. On this platform, inter-thread synchronization is +provided by a POSIX Pthreads implementation. From the viewpoint of +DAPL, the details of the Pthreads interface are platform specific. +However, Pthreads is a very widely used threading library, common on +almost all Unix variants (though not used on the different variations +of Microsoft Windows(tm)). In addition, RedHat Linux 7.2 provides +POSIX thread semaphore operations (e.g. see sem_init(3)), which are +not normally considered part of pthreads. + +Microsoft Windows(tm) provides many synchronization primitives, +including mutual exclusion locks, and semaphores. + +DAPL defines an internal API (not exposed to the consumer), though +which it accesses Operating Systems Dependent services; this is called +the OSD API. It is intended that this layer contain all operating +system dependencies, and that porting DAPL to a new operating system +should only require changes to this layer. + +We have chosen to define the synchronization interfaces established at +this layer in terms of two specific objects: mutexes and sempahores w/ +timeout on waiting. Mutexes provide mutual exclusion in a way that is +common to all known operating systems. The functionality of +semaphores also exists on most known operating systems, though the +sempahores provided by POSIX do not provide timeout capabilities. +This is for three reasons. First, in contrast to Condition Variables +(the native pthreads waiting/signalling object), operations on +sempahores do not require use of other synchronization variables +(i.e. mutexes). Second, it is fairly easy to emulate sempahores using +condition variables, and it is not simple to emulate condition +variables using semaphores. And third, there are some anticipated +platforms for DAPL that implement condition variables in relation to +some types of locks but not others, and hence constrain appropriate +implementation choices for a potential DAPL interface modeled after +condition variables. + +Implementation of the DAPL OS Wait Objects will initially be based on +condition variables (requiring the use of an internal lock) since +POSIX semaphores do not provide a needed timeout capability. However, +if improved performance is required, a helper thread could be created +that arranges to signal waiting semaphores when timeouts have +expired. This is a potential future (or vendor) optimization. + +Performance Model +----------------- +One constraint on the DAPL Event Subsystem implementation is that it +should perform as well as possible. We define "as well as possible" +by listing the characteristics of this subsystem that will affect its +performance most strongly. In approximate order of importance, these +are: + + The number of context switches on critical path + + The amount of copying on the critical path. + + The base cost of locking (assuming no contention) on the + critical path. This is proportional to the number of locks + taken. + + The amount of locking contention expected. We make a + simplifying assumption and take this as the number of cycles + for which we expect to hold locks on the critical path. + + The number of "atomic" bus operations executed (these take + more cycles than normal operations, as they require locking + the bus). + +We obviously wish to minimize all of these costs. + +-- A note on context switches + +In general, it's difficult to minimize context switches in a user +space library directly communicating with a hardware board. This is +because context switches, by their nature, have to go through the +operating system, but the information about which thread to wake up +and whether to wake it up is generally in user space. In addition, +the IBM Access API delivers all Event Streams as callbacks in user +context (as opposed to, for example, allowing a thread to block within +the API waiting for a wakeup). For this reason, the default sequence +of events for a wakeup generated from the hardware is: + * Hardware interrupts the main processor. + * Interrupt thread schedules a user-level IBM Access API + provider service thread parked in the kernel. + * Provider service thread wakes up the sleeping user-level + event DAT implementation thread. +This implies that any wakeup will involve three context switches. +This could be reduced by one if there were a way for user threads to +block in the kernel, we might skip the user-level provider thread. + +=========================== +DAPL Event Subsystem Design +=========================== + + +OS Proxy Wait Object +-------------------- + +The interface and nature of the OS Proxy Wait Object is specified in +the uDAPL v. 1.0 header files as a DAT_OS_WAIT_PROXY_AGENT via the +following defines: + +typedef void (*DAT_AGENT_FUNC) + ( + DAT_PVOID, /* instance data */ + DAT_EVD_HANDLE /* Event Dispatcher*/ + ); + +typedef struct dat_os_wait_proxy_agent + { + DAT_PVOID instance_data; + DAT_AGENT_FUNC proxy_agent_func; + } DAT_OS_WAIT_PROXY_AGENT; + +In other words, an OS Proxy Wait Object is a (function, data) pair, +and signalling the OS Proxy Wait Object is a matter of calling the +function on the data and an EVD handle associated with the CNO. +The nature of that function and its associated data is completely up +to the uDAPL consumer. + +Event Storage +------------- + +The data associated with an Event (the type, the EVD, and any type +specific data required) must be stored between event production and +event consumption. If storage is not provided by the underlying +Verbs, that data must be stored in the EVD itself. This may require +an extra copy (one at event production and one at event consumption). + +Event Streams associated purely with callbacks (i.e. IB events that +are not mediated by CQs) or user calls (i.e. software events) don't +have any storage allocated for them by the underlying verbs and hence +must store their data in the EVD. + +Event Streams that are associated with CQs have the possibility of +leaving the information associated with the CQ between the time the +event is produced and the time it is consumed. However, even in this +case, if the user calls dat_evd_wait with a threshold argument, the +events information must be copied to storage in the CQ. This is +because it is not possible to determine how many completions there are +on a CQ without dequeuing them, and that determination must be made by +the CQ notification callback in order to decide whether to wakeup a +dat_evd_wait() waiter. Note that this determination must be made +dynamically based on the arguments to dat_evd_wait(). + +Further, leaving events from Event Streams associated with the CQs "in +the CQs" until event consumption raises issues about in what order +events should be dequeued if there are multiple event streams entering +an EVD. Should the CQ events be dequeued first, or should the events +stored in the EVD be dequeued first? In general this is a complex +question; the uDAPL spec does not put many restrictions on event +order, but the major one that it does place is to restrict connection +events associated with a QP to be dequeued before DTOs associated with +that QP, and disconnection events after. Unfortunately, if we adopt +the policy of always dequeueing CQ events first, followed by EVD +events, this means that in situations where CQ events have been copied +to the EVD, CQ events may be received on the EVD out of order. + +However, leaving events from Event Streams associated with CQs allows +us to avoid enabling CQ callbacks in cases where there is no waiter +associated with the EVDs. This can be a potentially large savings of +gratuitous context switches. + +For the initial implementation, we will leave all event information +associated with CQs until dequeued by the consumer. All other event +information will be put in storage on the EVD itself. We will always +dequeue from the EVD first and the CQ second, to handle ordering among +CQ events in cases in which CQ events have been copied to the EVD. + + +Synchronization +--------------- + +-- EVD synchronization: Locking vs. Producer/Consumer queues. + +In the current code, two circular producer/consumer queues are used +for non-CQ event storage (one holds free events, one holds posted +events). Event producers "consume" events from the free queue, and +produce events onto the posted event queue. Event consumers consume +events from the posted event queue, and "produce" events onto the free +queue. In what follows, we discuss synchronization onto the posted +event queue, but since the usage of the queues is symmetric, all of +what we say also applies to the free event queue (just in the reverse +polarity). + +The reason for using these circular queues is to allow synchronization +between producer and consumer without locking in some situations. +Unfortunately, a circular queue is only an effective method of +synchronization if we can guarantee that there are only two accessors +to it at a given time: one producer, and one consumer. The model will +not work if there are multiple producers, or if there are multiple +consumers (though obviously a subsidiary lock could be used to +single-thread either the producers or the consumers). + +There are several difficulties with guaranteeing the producers and +consumers will each be single threaded in accessing the EVD: + * Constraints of the IB specification and IBM Access API + (differing sources for event streams without guarantees of + IB provider synchronization between them) make it difficult + to avoid multiple producers. + * The primitives used for the producer/consumer queue are not + as widely accepted as locks, and may render the design less + portable. + +We will take locks when needed when producing events. The details of +this plan are described below. + +This reasoning is described in more detail below to inform judgments +about future performance improvements. + +* EVD producer synchronization + +The producers fall into two classes: + * Callbacks announcing IA associated events such as connection + requests, connections, disconnections, DT ops, RMR bind, + etc. + * User calls posting a software event onto the EVD. + +It is the users responsibility to protect against simultaneous +postings of software events onto the same EVD. Similarly, the CQEH +mechanism provided by the IBM Access API allows us to avoid collisions +between IBM Access API callbacks associated with CQs. However, we +must protect against software events colliding with IBM Access API +callbacks, and against non-CQ associated IB verb callbacks (connection +events and asynchronous errors) colliding with CQ associated IBM +Access API callbacks, or with other non-CQ associated IBM Access API +callbacks (i.e. a connection callback colliding with an asynchronous +error callback). + +Note that CQ related callbacks do not act as producers on the circular +list; instead they leave the event information on the CQ until +dequeue; see "Event Storage" above. However, there are certain +situations in which it is necessary for the consumer to determine the +number of events on the EVD. The only way that IB provides to do this +is to dequeue the CQEs from the CQ and count them. In these +situations, the consumer will also act as an event producer for the +EVD event storage, copying all event information from the CQ to the +EVD. + +Based on the above, the only case in which we may do without locking +on the producer side is when all Event Streams of all of the following +types may be presumed to be single threaded: + * Software events + * Non-CQ associated callbacks + * Consumer's within dat_evd_wait + +We use a lock on the producer side of the EVD whenever we have +multiple threads of producers. + +* EVD Consumer synchronization + +It is the consumer's responsibility to avoid multiple callers into +dat_evd_wait and dat_evd_dequeue. For this reason, there is no +requirement for a lock on the consumer side. + +* CQ synchronization + +We simplify synchronization on the CQ by identifying the CQ consumer +with the EVD consumer. In other words, we prohibit any thread other +than a user thread in dat_evd_wait() or dat_evd_dequeue() from +dequeueing events from the CQ. This means that we can rely on the +uDAPL spec guarantee that only a single thread will be in the +dat_evd_wait() or dat_evd_dequeue() on a single CQ at a time. It has +the negative cost that (because there is no way to probe for the +number of entries on a CQ without dequeueing) the thread blocked in +dat_evd_wait() with a threshold argument greater than 1 will be woken +up on each notification on that CQ, in order to dequeue entries from +the CQ and determine if the threshold value has been reached. + +-- EVD Synchronization: Waiter vs. Callback + +Our decision to restrict dequeueing from the IB CQ to the user thread +(rather than the notification callback thread) means that +re-requesting notifications must also be done from that thread. This +leads to a subtle requirement for synchronization: the request for +notification (ib_completion_notify) must be atomic with the wait on +the condition variable by the user thread (atomic in the sense that +locks must be held to force the signalling from any such notification +to occur after the sleep on the condition variable). Otherwise it is +possible for the notification requested by the ib_completion_notify +call to occur before the return from that call. The signal done by +that notify will be ignored, and no further notifications will be +enabled, resulting in the thread sleep waiting forever. The CQE +associated with the notification might be noticed upon return from the +notify request, but that CQE might also have been reaped by a previous +call. + +-- CNO Synchronization + +In order to protect data items that are changed during CNO signalling +(OS Proxy Wait Object, EVD associated with triggering, CNO state), it +is necessary to use locking when triggering and waiting on a CNO. + +Note that the synchronization between trigerrer and waiter on CNO must +take into account the possibility of the waiter returning from the +wait because of a timeout. I.e. it must handle the possibility that, +even though the waiter was detected and the OS Wait Object signalled +under an atomic lock, there would be no waiter on the OS Wait Object +when it was signalled. To handle this case, we make the job of the +triggerer to be setting the state to triggered and signalling the OS +Wait Object; all other manipulation is done by the waiter. + +-- Inter-Object Synchronization + +By the requirements specified above, the DAPL implementation is +responsible for avoiding collisions between DAT Consumer calls on +different DAT objects, even in a non-thread safe implementation. +Luckily, no such collisions exist in this implementation; all exported +DAPL Event Subsystem calls involve operations only on the objects to +which they are targeted. No inter-object synchronization is +required. + +The one exception to this is the posting of a software event on an EVD +associated with a CNO; this may result in triggering the CNO. +However, this case was dealt with above in the discussion of +synchronizing between event producers and consumers; the posting of a +software event is a DAPL API call, but it's also a event producer. + +To avoid lock hierarchy issues between EVDs and CNOs and minimize lock +contention, we arrange not to hold the EVD lock when triggering the +CNO. That is the only context in which we would naturally attempt to +hold both locks. + +-- CQ -> CQEH Assignments + +For the initial implementation, we will assign all CQs to the same +CQEH. This is for simplicity and efficient use of threading +resources; we do not want to dedicate a thread per CQ (where the +number of CQs may grow arbitrarily high), and we have no way of +knowing which partitioning of CQs is best for the DAPL consumer. + +CQ Callbacks +------------ + +The responsibility of a CQ callback is to wakeup any waiters +associated with the CQ--no data needs to be dequeued/delivered, since +that is always done by the consumer. Therefore, CQ callbacks must be +enabled when: + * Any thread is in dat_evd_wait() on the EVD associated with + the CQ. + * The EVD is enabled and has a non-null CNO. (An alternative + design would be to have waiters on a CNO enable callbacks on + all CQs associated with all EVDs associated with the CNO, + but this choice does not scale well as the number of EVDs + associated with a CNO increases). + +Dynamic Resizing of EVDs +------------------------ + +dat_evd_resize() creates a special problem for the implementor, as it +requires that the storage allocated in the EVD be changed in size as +events may be arriving. If a lock is held by all operations that use +the EVD, implementation of dat_evd_resize() is trivial; it substitutes +a new storage mechanism for the old one, copying over all current +events, all under lock. + +However, we wish to avoid universal locking for the initial +implementation. This puts the implementation of dat_evd_resize() into +a tar pit. Because of the DAT Consumer requirements for a non-thread +safe DAPL Implementation, there will be no danger of conflict with +Event Consumers. However, if an Event Producer is in process of +adding an event to the circular list when the resize occurs, that +event may be lost or overwrite freed memory. + +If we are willing to make the simplifying decision that any EVD that +has non-CQ events on it will always do full producer side locking, we +can solve this problem relatively easily. Resizing of the underlying +CQ can be done via ib_cq_resize(), which we can assume available +because of the IB spec. Resizing of the EVD storage may be done under +lock, and there will be no collisions with other uses of the EVD as +all other uses of the EVD must either take the lock or are prohibitted +by the uDAPL spec. + +dat_evd_resize() has not yet been implemented in the DAPL Event +subsystem. + +Structure and pseudo-code +------------------------- + +-- EVD + +All EVDs will have associated with them: + + a lock + + A DAPL OS Wait Object + + An enabled/disabled bit + + A CNO pointer (may be null) + + A state (no_waiter, waiter, dead) + + A threshold count + + An event list + + A CQ (optional, but common) + +Posting an event to the EVD (presumably from a callback) will involve: +^ + Checking for valid state +|lock A + Putting the event on the event list +| ^lock B + Signal the DAPL OS Wait Object, if appropriate +v v (waiter & signaling event & over threshold) + + Trigger the CNO if appropriate (enabled & signaling + event & no waiter). Note that the EVD lock is not + held for this operation to avoid holding multiple locks. + +("lock A" is used if producer side locking is needed. "lock B" is +used if producer side locking is not needed. Regardless, the lock is +only held to confirm that the EVD is in the WAITED state, not for +the wakeup). + +Waiting on an EVD will include: + + Loop: + + Copy all elements from CQ to EVD + + If we have enough, break + + If we haven't enabled the CQ callback + + Enable it + + Continue + + Sleep on DAPL OS Wait Object + + Dequeue and return an event + +The CQ callback will include: + + If there's a waiter: + + Signal it + + Otherwise, if the evd is in the OPEN state, there's + a CNO, and the EVD is enabled: + + Reenable completion + + Trigger CNO + +Setting the enable/disable state of the EVD or setting the associated +CNO will simply set the bits and enable the completion if needed (if a +CNO trigger is implied); no locking is required. + +-- CNO + +All CNOs will have associated with them: + + A lock + + A DAPL OS Wait Object + + A state (triggered, untriggered, dead) + + A waiter count + + An EVD handle (last event which triggered the CNO) + + An OS Proxy Wait Object pointer (may be null) + +Triggering a CNO will involve: + ^ + If the CNO state is untriggerred: + | + Set it to triggered + | + Note the OS Proxy wait object and zero it. + | + If there are any waiters associated with the CNO, + | signal them. + v + Signal the OS proxy wait object if noted + +Waiting on a CNO will involve: + ^ + While the state is not triggered and the timeout has not occurred: + | + Increment the CNO waiter count + lock + Wait on the DAPL OS Wait Object + | + Decrement the CNO waiter count + v + If the state is trigerred, note fact&EVD and set to untrigerred. + + Return EVD and success if state was trigerred + + Return timeout otherwise + +Setting the OS Proxy Wait Object on a CNO, under lock, checks for a +valid state and sets the OS Proxy Wait Object. + + +============== +Known Problems +============== + +-- Because many event streams are actually delivered to EVDs by + callbacks, we cannot in general make any guarantees about the order + in which those event streams arrive; we are at the mercy of the + thread scheduler. Thus we cannot hold to the guarantee given by + the uDAPL 1.0 specification that within a particular EVD, + connection events on a QP will always be before successful DTO + operations on that QP. + + Because we have chosen to dequeue EVD events first and CQ events + second, we will also not be able to guarantee that all successful + DTO events will be received before a disconnect event. Ability to + probe the CQ for its number of entries would solve this problem. + + +================= +Future Directions +================= + +This section includes both functionality enhancements, and a series of +performance improvements. I mark these performance optimizations with +the following flags: + * VerbMod: Requires modifications to the IB Verbs/the IBM + Access API to be effective. + * VerbInteg: Requires integration between the DAPL + implementation and the IB Verbs implementation and IB device + driver. + +Functionality Enhancements +-------------------------- + +-- dat_evd_resize() may be implemented by forcing producer side + locking whenever an event producer may occur asynchronously with + calls to dat_evd_resize() (i.e. when there are non-CQ event streams + associated with the EVD). See the details under "Dynamic Resizing + of EVDs" above. + +-- [VerbMod] If we ahd a verbs modification allowing us to probe for + the current number of entries on a CQ, we could: + * Avoid waking up a dat_evd_wait(threshold>1) thread until + there were enough events for it. + * Avoid copying events from the CQ to the EVD to satisfy the + requirements of the "*nmore" out argument to dat_evd_wait(), + as well as the non-unary threshold argument. + * Implement the "all successful DTO operation events before + disconnect event" uDAPL guarantee (because we would no + longer have to copy CQ events to an EVD, and hence dequeue + first from the EVD and then from the CQ. + This optimization also is relevant for two of the performance + improvements cases below (Reducing context switches, and reducing + copies). + + +Performance improvements: Reducing context switches +--------------------------------------------------- +-- [VerbMod] If we had a verbs modification allowing us to probe for + the current size of a CQ, we could avoid waking up a + dat_evd_wait(threshhold>1) thread until there were enough events + for it. See the Functionality Enhancement entry covering this + possibility. + +-- [VerbMod] If we had a verbs modification allowing threads to wait + for completions to occur on CQs (presumably in the kernel in some + efficient manner), we could optimize the case of + dat_evd_wait(...,threshold=1,...) on EVDs with only a single CQ + associated Event Stream. In this case, we could avoid the extra + context switch into the user callback thread; instead, the user + thread waiting on the EVD would be woken up by the kernel directly. + +-- [VerbMod] If we had the above verbs modification with a threshold + argument on CQs, we could implement the threshold=n case. + +-- [VerbInteg] In general, It would be useful to provide ways for + threads blocked on EVDs or CNOs to sleep in the hardware driver, + and for the driver interrupt thread to determine if they should be + awoken rather than handing that determination off to another, + user-level thread. This would allow us to reduce by one the number + of context switches required for waking up the various blocked + threads. + +-- If an EVD has only a single Event Stream coming into it that is + only associated with one work queue (send or receive), it may be + possible to create thresholding by marking only ever nth WQE on + the associated send or receive WQ to signal a completion. The + difficulty with this is that the threshold is specified when + waiting on an EVD, and requesting completion signaling is + specified when posting a WQE; those two events may not in general + be synchronized enough for this strategy. It is probably + worthwhile letting the consumer implement this strategy directly if + they so choose, by specifying the correct flags on EP and DTO so + that the CQ events are only signaling on every nth completion. + They could then use dat_evd_wait() with a threshold of 1. + +Performance improvements: Reducing copying of event data +-------------------------------------------------------- +-- [VerbMod] If we had the ability to query a CQ for the number of + completions on it, we could avoid the cost of copying event data from the + CQ to the EVD. This is a duplicate of the second entry under + "Functionality Enhancements" above. + +Performance improvements: Reducing locking +------------------------------------------ +-- dat_evd_dequeue() may be modified to not take any locks. + +-- If there is no waiter associated with an EVD and there is only a + single event producer, we may avoid taking any locks in producing + events onto that EVD. This must be done carefully to handle the + case of racing with a waiter waiting on the EVD as we deliver the + event. + +-- If there is no waiter associated with an EVD, and we create a + producer/consumer queue per event stream with a central counter + modified with atomic operations, we may avoid locking on the EVD. + +-- It may be possible, though judicious use of atomic operations, to + avoid locking when triggering a CNO unless there is a waiter on the + CNO. This has not been done to keep the initial design simple. + +Performance improvements: Reducing atomic operations +---------------------------------------------------- +-- We could combine the EVD circular lists, to avoid a single atomic + operation on each production and each consumption of an event. In + this model, event structures would not move from list to list; + whether or not they had valid information on them would simply + depend on where they were on the lists. + +-- We may avoid the atomic increments on the circular queues (which + have a noticeable performance cost on the bus) if all accesses to an + EVD take locks. + + +Performance improvements: Increasing concurrency +------------------------------------------------ +-- When running on a multi-CPU platform, it may be appropriate to + assign CQs to several separate CQEHs, to increase the concurrency + of execution of CQ callbacks. However, note that consumer code is + never run within a CQ callback, so those callbacks should take very + little time per callback. This plan would only make sense in + situations where there were very many CQs, all of which were + active, and for whatever reason (high threshold, polling, etc) + user threads were usually not woken up by the execution of a + provider CQ callback. + + diff --git a/branches/Ndi/ulp/dapl/doc/dapl_memory_management_design.txt b/branches/Ndi/ulp/dapl/doc/dapl_memory_management_design.txt new file mode 100644 index 00000000..70d41dba --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dapl_memory_management_design.txt @@ -0,0 +1,173 @@ +####################################################################### +# # +# DAPL Memory Management Design # +# # +# James Lentini # +# jlentini at users.sourceforge.net # +# # +# Created 05/06/2002 # +# Updated 08/22/2002 # +# # +####################################################################### + + +Contents +------- +0. Introduction +1. Protection Zones (PZs) +2. Local Memory Regions (LMRs) +3. Remote Memory Regions (RMRs) + + +0. Introduction +--------------- + + The memory management subsystem allows consumers to register and +unregister memory regions. The DAT API distinguishes between local +and remote memory areas. The former server as local buffers for DTO +operations while the later are used for RDMA operations. + +Each DAT function is implemented in a file named dapl_.c. +For example, dat_pz_create is implemented in dapl_pz_create.c. There +are also dapl__util.{h,c} files for each object. For +example, there are dapl_pz_util.h and dapl_pz_util.c files. The +use of util files follows the convention used elsewhere in the DAPL +reference provider. These files contain common object creation and +destruction code. + + +1. Protection Zones (PZs) +------------------------- + + DAPL protection zones provide consumers with a means to associate +various DAPL objects with one another. The association can then be +validated before allowing these objects to be manipulated. The DAT +functions related to PZs are: + +dat_pz_create +dat_pz_free +dat_pz_query + +These are implemented in the DAPL reference provider by + +dapl_pz_create +dapl_pz_free +dapl_pz_query + +The reference implementation maps the DAPL PZ concept onto Infiniband +protections domains (PDs). + +The DAT_PZ_HANDLE value returned to DAT consumers is a pointer to a +DAPL_PZ data structure. The DAPL_PZ structure is used to represent all +PZ objects. Code that manipulates this structure should atomically +increment and decrement the ref_count member to track the number of +objects referencing the PZ. + + +2. Local Memory Regions (LMRs) +------------------------------ + + DAPL local memory regions represent a memory area on the host +system that the consumer wishes to access via local DTO operations. +The DAT functions related to LMRs are: + +dat_lmr_create +dat_lmr_free +dat_lmr_query + +These are implemented in + +dapl_lmr_create +dapl_lmr_free +dapl_lmr_query + +In the reference implementation, DAPL LMRs are mapped onto +Infiniband memory regions (MRs). + +LMR creation produces two values: a DAT_LMR_CONTEXT and a +DAT_LRM_HANDLE. + +The DAT_LMR_CONTEXT value is used to uniquely identify the LMR +when posting data transfer operations. These values map directly +to Infiniband L_KEYs. + +Since some DAT functions need to translate a DAT_LMR_CONTEXT value +into a DAT_LMR_HANDLE (ex. dat_rmr_bind), a dictionary data structure +is used to associate DAT_LMR_CONTEXT values with their corresponding +DAT_LMR_HANDLE. Each time a new LMR is created, the DAT_LMR_HANDLE +should be inserted into the dictionary with the associated +DAT_LMR_CONTEXT as the key. + +A hash table was chosen to implement this data structure. Since the +L_KEY values are being used by the CA hardware for indexing purposes, +there distribution is expected to be uniform and hence ideal for hashing. + +The DAT_LMR_HANDLE value returned to DAT consumers is a pointer to +a DAPL_LMR data structure. The DAPL_LMR structure is used to represent +all LMR objects. The ref_count member should be used to track objects +associated with a given LMR. + +The DAT API exposes the DAT_LMR_CONTEXT to consumers to allow +for sharing of memory registrations between multiple address spaces. +The mechanism by which such a feature would be implemented does not +yet exist. Consumers may be able to take advantage of this +feature on future transports. + + +3. Remote Memory Regions (RMRs) +------------------------------- + + DAPL remote memory regions represent a memory area on the host +system to which the consumer wishes to allow RMDA operations. The +related DAT functions are + +dat_rmr_create +dat_rmr_free +dat_rmr_query +dat_rmr_bind + +which are implemented in + +dapl_rmr_create +dapl_rmr_free +dapl_rmr_query +dapl_rmr_bind + +The reference provider maps RMR objects onto Infiniband memory +windows. + +The DAT_RMR_HANDLE value returned to DAT consumers is a pointer to +a DAPL_RMR data structure. The DAPL_RMR structure is used to represent +all RMR objects. + +The API for binding a LMR to a RMR has the following function +signature: + +DAT_RETURN +dapl_rmr_bind ( + IN DAT_RMR_HANDLE rmr_handle, + IN const DAT_LMR_TRIPLET *lmr_triplet, + IN DAT_MEM_PRIV_FLAGS mem_priv, + IN DAT_EP_HANDLE ep_handle, + IN DAT_RMR_COOKIE user_cookie, + IN DAT_COMPLETION_FLAGS completion_flags, + OUT DAT_RMR_CONTEXT *rmr_context ) + +where a DAT_LMR_TRIPLET is defined as: + +typedef struct dat_lmr_triplet + { + DAT_LMR_CONTEXT lmr_context; + DAT_UINT32 pad; + DAT_VADDR virtual_address; + DAT_VLEN segment_length; + } DAT_LMR_TRIPLET; + +In the case of IB, the DAT_LMR_CONTEXT value is a L_KEY. +As described in the IB spec, the Bind Memory Window verb +takes both a L_KEY and Memory Region Handle among other +parameters. Therefore a data structure must be used to +map a DAT_LMR_CONTEXT (L_KEY) value to a DAPL_LMR so +that the needed memory region handle can be retrieved. +The LMR hash table described above is used for this +purpose. diff --git a/branches/Ndi/ulp/dapl/doc/dapl_registry_design.txt b/branches/Ndi/ulp/dapl/doc/dapl_registry_design.txt new file mode 100644 index 00000000..c4701a25 --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dapl_registry_design.txt @@ -0,0 +1,631 @@ + DAT Registry Subsystem Design v. 0.90 + ------------------------------------- + +================= +Table of Contents +================= + +* Table of Contents +* Referenced Documents +* Introduction +* Goals +* Provider API +* Consumer API +* Registry Design + + Registry Database + + Provider API pseudo-code + + Consumer API pseudo-code + + Platform Specific API pseudo-code + +==================== +Referenced Documents +==================== + +uDAPL: User Direct Access Programming Library, Version 1.0. Published +6/21/2002. http://www.datcollaborative.org/uDAPL_062102.pdf. Referred +to in this document as the "DAT Specification". + +============ +Introduction +============ + +The DAT architecture supports the use of multiple DAT providers within +a single consumer application. Consumers implicitly select a provider +using the Interface Adapter name parameter passed to dat_ia_open(). + +The subsystem that maps Interface Adapter names to provider +implementations is known as the DAT registry. When a consumer calls +dat_ia_open(), the appropriate provider is found and notified of the +consumer's request to access the IA. After this point, all DAT API +calls acting on DAT objects are automatically directed to the +appropriate provider entry points. + +A persistent, administratively configurable database is used to store +mappings from IA names to provider information. This provider +information includes: the file system path to the provider library +object, version information, and thread safety information. The +location and format of the registry is platform dependent. This +database is know as the Static Registry (SR). The process of adding a +provider entry is termed Static Registration. + +Within each DAT consumer, there is a per-process database that +maps from ia_name -> provider information. When dat_ia_open() is +called, the provider library is loaded, the ia_open_func is found, and +the ia_open_func is called. + +===== +Goals +===== + +-- Implement the registration mechanism described in the uDAPL + Specification. + +-- The DAT registry should be thread safe. + +-- On a consumer's performance critical data transfer path, the DAT + registry should not require any significant overhead. + +-- The DAT registry should not limit the number of IAs or providers + supported. + +-- The user level registry should be tolerant of arbitrary library + initialization orders and support calls from library initialization + functions. + +============ +Provider API +============ + +Provider libraries must register themselves with the DAT registry. +Along with the Interface Adapter name they wish to map, they must +provide a routines vector containing provider-specific implementations +of all DAT APIs. If a provider wishes to service multiple Interface +Adapter names with the same DAT APIs, it must register each name +separately with the DAT registry. The Provider API is not exposed to +consumers. + +The user level registry must ensure that the Provider API may be +called from a library's initialization function. Therefore the +registry must not rely on a specific library initialization order. + + DAT_RETURN + dat_registry_add_provider( + IN DAT_PROVIDER *provider ) + +Description: Allows the provider to add a mapping. It will return an +error if the Interface Adapter name already exists. + + DAT_RETURN + dat_registry_remove_provider( + IN DAT_PROVIDER *provider ) + +Description: Allows the Provider to remove a mapping. It will return +an error if the mapping does not already exist. + +============ +Consumer API +============ + +Consumers that wish to use a provider library call the DAT registry to +map Interface Adapter names to provider libraries. The consumer API is +exposed to both consumers and providers. + + DAT_RETURN + dat_ia_open ( + IN const DAT_NAME device_name, + IN DAT_COUNT async_event_qlen, + INOUT DAT_EVD_HANDLE *async_event_handle, + OUT DAT_IA_HANDLE *ia_handle ) + +Description: Upon success, this function returns an DAT_IA_HANDLE to +the consumer. This handle, while opaque to the consumer, provides +direct access to the provider supplied library. To support this +feature, all DAT_HANDLEs must be pointers to a pointer to a +DAT_PROVIDER structure. + + DAT_RETURN + dat_ia_close ( + IN DAT_IA_HANDLE ia_handle ) + +Description: Closes the Interface Adapter. + + DAT_RETURN + dat_registry_list_providers( + IN DAT_COUNT max_to_return, + OUT DAT_COUNT *entries_returned, + OUT DAT_PROVIDER_INFO *(dat_provider_list[]) ) + +Description: Lists the current mappings. + +=============== +Registry Design +=============== + +There are three separate portions of the DAT registry system: + +* Registry Database + +* Provider API + +* Consumer API + +We address each of these areas in order. The final section will +describe any necessary platform specific functions. + +Registry Database +----------------- + +Static Registry +................ + +The Static Registry is a persistent database containing provider +information keyed by Interface Adapter name. The Static Registry will +be examined once when the DAT library is loaded. + +There is no synchronization mechanism protecting access to the Static +Registry. Multiple readers and writers may concurrently access the +Static Registry and as a result there is no guarantee that the +database will be in a consistent format at any given time. DAT +consumers should be aware of this and not run DAT programs when the +registry is being modified (for example, when a new provider is being +installed). However, the DAT library must be robust enough to recognize +an inconsistent Static Registry and ignore invalid entries. + +Information in the Static Registry will be used to initialize the +registry database. The registry will refuse to load libraries for DAT +API versions different than its DAT API version. Switching API +versions will require switching versions of the registry library (the +library explicitly placed on the link line of DAPL programs) as well +as the header files included by the program. + +Set DAT_NO_STATIC_REGISTRY at compile time if you wish to compile +DAT without a static registry. + +UNIX Registry Format +..................... + +The UNIX registry will be a plain text file with the following +properties: + * All characters after # on a line are ignored (comments). + * Lines on which there are no characters other than whitespace + and comments are considered blank lines and are ignored. + * Non-blank lines must have seven whitespace separated fields. + These fields may contain whitespace if the field is quoted + with double quotes. Within fields quoated with double quotes, + the following are valid escape sequences: + + \\ backslash + \" quote + + * Each non-blank line will contain the following fields: + + - The IA Name. + - The API version of the library: + [k|u]major.minor where "major" and "minor" are both integers + in decimal format. Examples: "k1.0", "u1.0", and "u1.1". + - Whether the library is thread-safe: + [threadsafe|nonthreadsafe] + - Whether this is the default section: [default|nondefault] + - The path name for the library image to be loaded. + - The version of the driver: major.minor, for example, "5.13". + +The format of any remaining fields on the line is dependent on the API +version of the library specified on that line. For API versions 1.0 +and 1.1 (both kDAPL and uDAPL), there is only a single additional +field, which is: + + - An optional string with instance data, which will be passed to + the loaded library as its run-time arguments. + +This file format is described by the following grammar: + + -> | + -> + [|] | + [| -> string + -> [k|u]decimal.decimal + -> [threadsafe|nonthreadsafe] + -> [default|nondefault] + -> string + -> decimal.decimal + -> string + -> end of file + -> newline + +The location of this file may be specified by setting the environment +variable DAT_CONF. If DAT_CONF is not set, the default location will +be /etc/dat.conf. + +Windows Registry Format +....................... + +Standardization of the Windows registry format is not complete at this +time. + +Registry Database Data Structures +................................. + +The Registry Database is implemented as a dictionary data structure that +stores (key, value) pairs. + +Initially the dictionary will be implemented as a linked list. This +will allow for an arbitrary number of mappings within the resource +limits of a given system. Although the search algorithm will have O(n) +worst case time when n elements are stored in the data structure, we +do not anticipate this to be an issue. We believe that the number of +IA names and providers will remain relatively small (on the order of +10). If performance is found to be an issue, the dictionary can be +re-implemented using another data structure without changing the +Registry Database API. + +The dictionary uses IA name strings as keys and stores pointers to a +DAT_REGISTRY_ENTRY structure, which contains the following +information: + + - provider library path string, library_path + - DAT_OS_LIBRARY_HANDLE, library_handle + - IA parameter string, ia_params + - DAT_IA_OPEN_FUNC function pointer, ia_open_func + - thread safety indicator, is_thread_safe + - reference counter, ref_count + +The entire registry database data structure is protected by a single +lock. All threads that wish to query/modify the database must posses +this lock. Serializing access in this manner is not expected to have a +detrimental effect on performance as contention is expected to be +minimal. + +An important property of the registry is that entries may be inserted +into the registry, but no entries are ever removed. The contents of +the static registry are used to populate the initially empty registry +database. Since these mapping are by definition persistent, no +mechanism is provided to remove them from the registry database. + +NOTE: There is currently no DAT interface to set a provider's IA +specific parameters. A solution for this problem has been proposed for +uDAPL 1.1. + +Registry Database API +..................... + +The static variable Dat_Registry_Db is used to store information about +the Registry Database and has the following members: + + - lock + - dictionary + +The Registry Database is accessed via the following internal API: + +Algorithm: dat_registry_init + Input: void + Output: DAT_RETURN +{ + initialize Dat_Registry_Db + + dat_os_sr_load() +} + +Algorithm: dat_registry_insert + Input: IN const DAT_STATIC_REGISTRY_ENTRY sr_entry + Output: DAT_RETURN +{ + dat_os_lock(&Dat_Registry_Db.lock) + + create and initialize DAT_REGISTRY_ENTRY structure + + dat_dictionary_add(&Dat_Registry_Db.dictionary, &entry) + + dat_os_unlock(&Dat_Registry_Db.lock) +} + +Algorithm: dat_registry_search + Input: IN const DAT_NAME_PTR ia_name + IN DAT_REGISTRY_ENTRY **entry + Output: DAT_RETURN +{ + dat_os_lock(&Dat_Registry_Db.lock) + + entry gets dat_dictionary_search(&Dat_Registry_Db.dictionary, &ia_name) + + dat_os_unlock(&Dat_Registry_Db.lock) +} + +Algorithm: dat_registry_list + Input: IN DAT_COUNT max_to_return + OUT DAT_COUNT *entries_returned + OUT DAT_PROVIDER_INFO *(dat_provider_list[]) + Output: DAT_RETURN +{ + dat_os_lock(&Dat_Registry_Db.lock) + + size = dat_dictionary_size(Dat_Registry_Db.dictionary) + + for ( i = 0, j = 0; + (i < max_to_return) && (j < size); + i++, j++ ) + { + initialize dat_provider_list[i] w/ j-th element in dictionary + } + + dat_os_unlock(&Dat_Registry_Db.lock) + + *entries_returned = i; +} + +Provider API pseudo-code +------------------------ + ++ dat_registry_add_provider() + +Algorithm: dat_registry_add_provider + Input: IN DAT_PROVIDER *provider + Output: DAT_RETURN +{ + dat_init() + + dat_registry_search(provider->device_name, &entry) + + if IA name is not found then dat_registry_insert(new entry) + + if entry.ia_open_func is not NULL return an error + + entry.ia_open_func = provider->ia_open_func +} + ++ dat_registry_remove_provider() + +Algorithm: dat_registry_remove_provider + Input: IN DAT_PROVIDER *provider + Output: DAT_RETURN +{ + dat_init() + + dat_registry_search(provider->device_name, &entry) + + if IA name is not found return an error + + entry.ia_open_func = NULL +} + +Consumer API pseudo-code +------------------------ + +* dat_ia_open() + +This function looks up the specified IA name in the ia_dictionary, +loads the provider library, retrieves a function pointer to the +provider's IA open function from the provider_dictionary, and calls +the providers IA open function. + +Algorithm: dat_ia_open + Input: IN const DAT_NAME_PTR name + IN DAT_COUNT async_event_qlen + INOUT DAT_EVD_HANDLE *async_event_handle + OUT DAT_IA_HANDLE *ia_handle + Output: DAT_RETURN + +{ + dat_registry_search(name, &entry) + + if the name is not found return an error + + dat_os_library_load(entry.library_path, &entry.library_handle) + + if the library fails to load return an error + + if the entry's ia_open_func is invalid + { + dl_os_library_unload(entry.library_handle) + return an error + } + + (*ia_open_func) (name, + async_event_qlen, + async_event_handle, + ia_handle); +} + +* dat_ia_close() + +Algorithm: dat_ia_close + Input: IN DAT_IA_HANDLE ia_handle + IN DAT_CLOSE_FLAGS ia_flags + Output: DAT_RETURN +{ + provider = DAT_HANDLE_TO_PROVIDER(ia_handle) + + (*provider->ia_close_func) (ia_handle, ia_flags) + + dat_registry_search(provider->device_name, &entry) + + dat_os_library_unload(entry.library_handle) +} + ++ dat_registry_list_providers() + +Algorithm: dat_registry_list_providers + Input: IN DAT_COUNT max_to_return + OUT DAT_COUNT *entries_returned + OUT DAT_PROVIDER_INFO *(dat_provider_list[]) + Output: DAT_RETURN +{ + validate parameters + + dat_registry_list(max_to_return, entries_returned, dat_provider_list) +} + +Platform Specific API pseudo-code +-------------------------------- + +Below are descriptions of platform specific functions required by the +DAT Registry. These descriptions are for Linux. + +Each entry in the static registry is represented by an OS specific +structure, DAT_OS_STATIC_REGISTRY_ENTRY. On Linux, this structure will +have the following members: + + - IA name string + - API version + - thread safety + - default section + - library path string + - driver version + - IA parameter string + +The tokenizer will return a DAT_OS_SR_TOKEN structure +containing: + + - DAT_OS_SR_TOKEN_TYPE value + - string with the fields value + +The tokenizer will ignore all white space and comments. The tokenizer +will also translate any escape sequences found in a string. + +Algorithm: dat_os_sr_load + Input: n/a + Output: DAT_RETURN +{ + if DAT_CONF environment variable is set + static_registry_file = contents of DAT_CONF + else + static_registry_file = /etc/dat.conf + + sr_fd = dat_os_open(static_registry_file) + + forever + { + initialize DAT_OS_SR_ENTRY entry + + do + { + // discard blank lines + dat_os_token_next(sr_fd, &token) + } while token is newline + + if token type is EOF then break // all done + // else the token must be a string + + entry.ia_name = token.value + + dat_os_token_next(sr_fd, &token) + + if token type is EOF then break // all done + else if token type is not string then + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + else if ( dat_os_convert_api(token.value, &entry.api) fails ) + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + + dat_os_token_next(sr_fd, &token) + + if token type is EOF then break // all done + else if token type is not string then + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + else if ( dat_os_convert_thread_safety(token.value, &entry.thread_safety) fails ) + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + + dat_os_token_next(sr_fd, &token) + + if token type is EOF then break // all done + else if token type is not string then + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + else if ( dat_os_convert_default(token.value, &entry.default) fails ) + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + + dat_os_token_next(sr_fd, &token) + + if token type is EOF then break // all done + else if token type is not string then + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + + entry.lib_path = token.value + + dat_os_token_next(sr_fd, &token) + + if token type is EOF then break // all done + else if token type is not string then + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + else if ( dat_os_convert_driver_version(token.value, &entry.driver_version) fails ) + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + + dat_os_token_next(sr_fd, &token) + + if token type is EOF then break // all done + else if token type is not string then + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + + entry.ia_params = token.value + + dat_os_token_next(sr_fd, &token) + + if token type is EOF then break // all done + else if token type is not newline then + { + // an error has occurred + dat_os_token_sync(sr_fd) + continue + } + + if ( dat_os_sr_is_valid(entry) ) + { + dat_registry_insert(entry) + } + } + + dat_os_close(sr_fd) +} + +Algorithm: dat_os_library_load + Input: IN const DAT_NAME_PTR *library_path + OUT DAT_LIBRARY_HANDLE *library_handle + Output: DAT_RETURN +{ + *library_handle = dlopen(library_path); +} + +Algorithm: dat_os_library_unload + Input: IN const DAT_LIBRARY_HANDLE library_handle + Output: DAT_RETURN +{ + dlclose(library_handle) +} diff --git a/branches/Ndi/ulp/dapl/doc/dapl_shared_memory_design.txt b/branches/Ndi/ulp/dapl/doc/dapl_shared_memory_design.txt new file mode 100644 index 00000000..90fedb7b --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dapl_shared_memory_design.txt @@ -0,0 +1,867 @@ +####################################################################### +# # +# DAPL Shared Memory Design # +# # +# James Lentini # +# jlentini at users.sourceforge.net # +# # +# Created 09/17/2002 # +# Version 0.03 # +# # +####################################################################### + + +Contents +-------- +0. Introduction +1. Referenced Documents +2. Requirements +3. Interface +4. Implementation Options + + +Introduction +------------ + +This document describes the design of shared memory registration for +the DAPL reference implementation (RI). + +Implementing shared memory support completely within the DAPL RI +would not be a ideal solution. A more robust and efficient +implementation can be acheived by HCA vendors that integrate a DAT +provider into their software stack. Therefore the RI will not contain +an implementation of this feature. + + +Referenced Documents +-------------------- + +uDAPL: User Direct Access Programming Library, Version 1.1. +Available at http://www.datcollaborative.org/uDAPLv11.pdf. +Referred to in this document as the "DAT Specification". + +InfiniBand Access Application Programming Interface Specification, +Version 1.2, 4/15/2002. In DAPL SourceForge repository at +doc/api/access_api.pdf. Referred to in this document as the "IBM +Access API Specification". + +InfiniBand Architecture Specification, Volumes 1 and 2, Release +1.1, Available from http://www.infinibandta.org/ +Referred to in this document as the "Infiniband Specification". + + +Requirements +------------ + +The DAT shared memory model can be characterized as a peer-to-peer +model since the order in which consumers register a region is not +dictated by the programming interface. + +The DAT API function used to register shared memory is: + +DAT_RETURN +dat_lmr_create ( + IN DAT_IA_HANDLE ia_handle, + IN DAT_MEM_TYPE mem_type, + IN DAT_REGION_DESCRIPTION region_description, + IN DAT_VLEN length, + IN DAT_PZ_HANDLE pz_handle, + IN DAT_MEM_PRIV_FLAGS mem_privileges, + OUT DAT_LMR_HANDLE *lmr_handle, + OUT DAT_LMR_CONTEXT *lmr_context, + OUT DAT_RMR_CONTEXT *rmr_context, + OUT DAT_VLEN *registered_length, + OUT DAT_VADDR *registered_address ); + +where a DAT_REGION_DESCRIPTION is defined as: + +typedef union dat_region_description + { + DAT_PVOID for_va; + DAT_LMR_HANDLE for_lmr_handle; + DAT_SHARED_MEMORY for_shared_memory; + } DAT_REGION_DESCRIPTION; + +In the case of a shared memory registration the DAT consumer will set +the DAT_MEM_TYPE flag to DAT_MEM_TYPE_SHARED_VIRTUAL and place a +cookie in the DAT_REGION_DESCRIPTION union's DAT_SHARED_MEMORY +member. The DAT_SHARED_MEMORY type is defined as follows: + +typedef struct dat_shared_memory + { + DAT_PVOID virtual_address; + DAT_LMR_COOKIE shared_memory_id; + } DAT_SHARED_MEMORY; + +Unlike the DAT peer-to-peer model, the Infiniband shared memory model +requires a master-slave relationship. A memory region must first be +registered using the Register Memory Region verb with subsequent +registrations made using the Register Shared Memory Region verb. This +verb is implemented in the IBM OS Access API as: + +ib_int32_t +ib_mr_shared_register_us( + ib_hca_handle_t hca_handle, + ib_mr_handle_t *mr_handle, /* IN-OUT: could be changed */ + ib_pd_handle_t pd_handle, /* IN */ + ib_uint32_t access_control, /* IN */ + ib_uint32_t *l_key, /* OUT */ + ib_uint32_t *r_key, /* OUT: if remote access needed */ + ib_uint8_t **va ); /* IN-OUT: virt. addr. to register */ + +The important parameter is the memory region handle which must be the +same as an already registered region. + +Two requirements are implied by this difference between the DAT and +Infiniband models. First, DAPL implementations need a way to determine +the first registration of a shared region. Second implementations must +map DAT_LMR_COOKIE values to memory region handles both within and +across processes. To satisfy the above requirements DAPL must maintain +this information in a system wide database. + +The difficulty of implementing such a database at the DAT provider +level is the reason the RI's shared memory code is meant to be +temporary. Such a database is much better suited as part of the HCA +vendor's software stack, specifically as part of their HCA driver. + +If DAPL was based on a master-slave model like InfiniBand, the +implementation of shared memory would be straight +forward. Specifically the complexity is a result of the consumer being +responsible for specifying the DAT_LMR_COOKIE values. If the DAPL +spec. were changed to allow the provider and not the consumer to +specify the DAT_LMR_COOKIE value, the implementation of this feature +would be greatly simplified. Since the DAPL API already requires +consumers to communicate the DAT_LMR_COOKIE values between processes, +such a change places minimal additional requirements on the +consumer. The dapl_lmr_query call could easily be adapted to allow the +consumer to query the provider for a given LMR's DAT_LMR_COOKIE +value. The only spec changes needed would be to add a DAT_LMR_COOKIE +member to the DAT_LMR_PARAM structure and a DAT_LMR_FIELD_LMR_COOKIE +constant to the DAT_LMR_PARAM_MASK enumeration. A provider could then +store the given LMR's memory region handle in this value, greatly +simplifying the implementation of shared memory in DAPL. + + +Interface +--------- + +To allow the database implementation to easily change, the RI would use +a well defined interface between the memory subsystem and the +database. Conceptually the database would contain a single table with +the following columns: + +[ LMR Cookie ][ MR Handle ][ Reference Count ][ Initialized ] + +where the LMR Cookie column is the primary key. + +The following functions would be used to access the database: + +DAT_RETURN +dapls_mrdb_init ( + void ); + + Called by dapl_init(.) to perform any necessary database + initialization. + +DAT_RETURN +dapls_mrdb_exit ( + void ); + + Called by dapl_fini(.) to perform any necessary database cleanup. + +DAT_RETURN +dapls_mrdb_record_insert ( + IN DAPL_LMR_COOKIE cookie ); + + If there is no record for the specified cookie, an empty record is + added with a reference count of 1 and the initialized field is set to + false. If a record already exists, the function returns an error. + +DAT_RETURN +dapls_mrdb_record_update ( + IN DAPL_LMR_COOKIE cookie, + IN ib_mr_handle_t mr_handle ); + + If there is a record for the specified cookie, the MR handle field is + set to the specified mr_handle value and the initialized field is set + to true. Otherwise an error is returned. + +DAT_RETURN +dapls_mrdb_record_query ( + IN DAPL_LMR_COOKIE cookie, + OUT ib_mr_handle_t *mr_handle ); + + If there is a record for the specified cookie and the initialized + field is true, the MR handle field is returned and the reference + count field is incremented. Otherwise an error is returned. + +DAT_RETURN +dapls_mrdb_record_dec ( + IN DAPL_LMR_COOKIE cookie ); + + If there is a record for the specified cookie, the reference count + field is decremented. If the reference count is zero after the + decrement, the record is removed from the database. Otherwise an + error is returned. + +The generic algorithms for creating and destroying a shared memory +region are: + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: CreateShared + Inputs: + ia_handle + pz_handle + address + length + lmr_cookie + privileges + Outputs: + lmr_handle + lmr_context + registered_address + registered_length + +forever +{ + if dapls_mrdb_record_insert(cookie) is successful + { + if dapl_lmr_create_virtual is not successful + dapls_mrdb_record_dec(cookie) + return error + + else if dapls_mrdb_record_update(cookie, lmr->mr_handle) is not successful + dapls_mrdb_record_dec(cookie) + return error + + else break + } + else if dapls_mrdb_record_query(cookie, mr_handle) is successful + { + if ib_mrdb_shared_register_us is not successful + dapls_mrdb_record_dec(cookie) + return error + + else break + } +} + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: FreeShared + Inputs: + lmr + Outputs: + +if dapls_ib_mr_deregister(lmr) is successful + dapls_mrdb_record_dec(lmr->cookie) + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + +Implementation Options +---------------------- + +As described above the crucial functionality needed to implement +shared memory support is a system wide database for mapping LMR +cookies to memory region handles. The following designs represent some +of the options for implementing such a database. Adding a database +increases the complexity of DAPL from both an implementor and user's +perspective. These designs should be evaluated on the degree to which +they minimize the additional complexity while still providing a robust +solution. + + + File System Database + -------------------- + +Employing a database that is already part of the system would be +ideal. One option on Linux is to use the file system. An area of the +file system could be set aside for the creation of files to represent +each LMR cookie. The area of the file system could be specified +through a hard coded value, an environment variable, or a +configuration file. A good candidate would be a DAPL subdirectory of +/tmp. + +Exclusive file creation is available through the creat(2) system call +in Linux. The standard I/O interface (fopen(3), etc.) does not support +this feature making porting difficult. However porting to other +environments is not a goal of this design since the entire scheme is +only a temporary solution. + +Determining when to delete the files is a difficult problem. A +reference count is required to properly remove a file once all the +memory regions it represents are deregistered. The synchronization +mechanism necessary for maintaining the reference count is not easily +implemented. As an alternative, a script could be provided to clean up +the database by removing all the files. The script would need to be +run before any DAPL consumers were started to ensure a clean +database. The disadvantage of using a script is that no DAPL instances +can be running when it is used. Another option would be to store the +process ID (PID) of the process that created the file as part of the +file's contents. Upon finding a record for a given LMR cookie value, a +DAPL instance could determine if there was a process with the same PID +in the system. To accomplish this the kill(2) system call could be +used (ex. kill(pid, 0) ). This method of validating the record assumes +that all DAPL instances can signal one another and that the PID values +do not wrap before the check is made. + +Another difficulty with this solution is choosing an accessible +portion of the file system. The area must have permissions that allow +all processes using DAPL to access and modify its contents. System +administrators are typically reluctant to allow areas without any +access controls. Typically such areas are on a dedicated file system +of a minimal size to ensure that malicious or malfunctioning software +does not monopolize the system's storage capacity. Since very little +information will be stored in each file it is unlikely that DAPL would +need a large amount of storage space even if a large number of shared +memory regions were in use. However since a file is needed for each +shared region, a large number of shared registrations may lead to the +consumption of all a file system's inodes. Again since this solution +is meant to be only temporary this constraint may be acceptable. + +There is also the possibility for database corruption should a process +crash or deadlock at an inopportune time. If a process creates file x +and then crashes all other processes waiting for the memory handle to +be written to x will fail. + +The database interface could be implemented as follows: + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_init + Inputs: + + Outputs: + +return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_exit + Inputs: + + Outputs: + +return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_insert + Inputs: + cookie + Outputs: + +file_name = convert cookie to valid file name + +fd = exclusively create file_name +if fd is invalid + return failure + +if close fd fails + return failure + +return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_update + Inputs: + cookie + mr_handle + Outputs: + +file_name = convert cookie to valid file name + +fd = open file_name +if fd is invalid + return failure + +if write mr_handle to file_name fails + return failure + +if close fd fails + return failure + +return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_query + Inputs: + cookie + + Outputs: + mr_handle + +file_name = convert cookie to valid file name + +fd = open file_name +if fd is invalid + return failure + +if read mr_handle from file_name fails + return failure + +if close fd fails + return failure + +return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_dec + Inputs: + cookie + Outputs: + +return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + Daemon Database + --------------- + +The database could be maintained by a separate daemon process. +The DAPL instances would act as clients of the daemon server and +communicate with the daemon through the various IPC mechanisms +available on Linux: Unix sockets, TCP/IP sockets, System V message +queues, FIFOs, or RPCs. + +As with the file system based database, process crashes can potentially +cause database corruption. + +While the portability of this implementation will depend on the chosen +IPC mechanism, this approach will be at best Unix centric and possibly +Linux specific. + +The database interface could be implemented as follows: + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_init + Inputs: + + Outputs: + +initialize IPC mechanism + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_exit + Inputs: + + Outputs: + +shutdown IPC mechanism + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_insert + Inputs: + cookie + Outputs: + +if send insert message for cookie fails + return error + +if receive insert response message fails + return error + +if insert success + return success +else return error + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_update + Inputs: + cookie + mr_handle + + Outputs: + +if send update message for cookie and mr_handle fails + return error +else return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_query + Inputs: + cookie + + Outputs: + mr_handle + +if send query message for cookie fails + return error + +else if receive query response message with mr_handle fails + return error + +else return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_dec + Inputs: + cookie + Outputs: + +if send decrement message for cookie fails + return error +else return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + Shared Memory Database + ---------------------- + +The database could be maintained in an area of memory shared by all +DAPL instances running on a system. Linux supports the System V shared +memory functions shmget(2), shmctl(2), shmat(2), and shmdt(2). A hard +coded key_t value could be used so that each DAPL instance attached to +the same piece of shared memory. The size of the database would be +constrained by the size of the shared memory region. Synchronization +could be achieved by using atomic operations targeting memory in the +shared region. + +Such a design would suffer from the corruption problems described +above. If a process crashed there would be no easy way to clean up its +locks and roll back the database to a consistent state. + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_init + Inputs: + + Outputs: + +attach shared region + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_exit + Inputs: + + Outputs: + +detach shared region + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_insert + Inputs: + cookie + Outputs: + +lock database + +if db does not contain cookie + add record for cookie + +unlock database + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_update + Inputs: + cookie + mr_handle + Outputs: + +lock database + +if db contains cookie + update record's mr_handle + +unlock database + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_query + Inputs: + cookie + + Outputs: + mr_handle + +lock database + +if db contains cookie + set mr_handle to record's value + increment record's reference count + +unlock database + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_dec + Inputs: + cookie + Outputs: + +lock database + +if db contains cookie + decrement record's reference count + + if reference count is 0 + remove record + +unlock database + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + + + Kernel Module Database + ---------------------- + +If the DAT library were integrated with an HCA vendor's software +stack, the database could be managed by the HCA driver. Placing the +database in the kernel would alleviate the synchronization problems +posed by multiple processes. Since memory registration operations +already involve a transition into the kernel, no extra overhead would +be incurred by this design. + +The RI could include a kernel module with this functionality as a +temporary solution. The module could identify itself as a character +device driver and communicate with user level processes through an +ioctl(2). The driver could also create an entry in the proc file +system to display the database's contents for diagnostic purposes. + +A major benefit of a kernel based implementation is that the database +can remain consistent even in the presence of application +errors. Since DAPL instances communicate with the driver by means of +ioctl(2) calls on a file, the driver can be arrange to be informed +when the file is closed and perform any necessary actions. The driver +is guaranteed to be notified of a close regardless of the manner in +which the process exits. + +The database could be implemented as a dictionary using the LMR cookie +values as keys. + +The following pseudo-code describes the functions needed by the kernel +module and the database interface. + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: KernelModuleInit + Inputs: + + Outputs: + +dictionary = create_dictionary() +create_proc_entry() +create_character_device_entry() + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: KernelModuleExit + Inputs: + + Outputs: + +remove_character_device_entry() +remove_proc_entry() +fee_dictionary(dictionary) + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: DeviceOpen + Inputs: + file + + Outputs: + +dev_data = allocate device data + +file->private_data = dev_data + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: DeviceClose + Inputs: + file + + Outputs: + +dev_data = file->private_data + +for each record in dev_data +{ + RecordDecIoctl +} + +deallocate dev_data + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: RecordInsertIoctl + Inputs: + file + cookie + + Outputs: + +lock dictionary + +if cookie is not in dictionary + insert cookie into dictionary + + +unlock dictionary + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: RecordUpdateIoctl + Inputs: + file + cookie + mr_handle + + Outputs: + +dev_data = file->private_data + +lock dictionary + +if cookie is in dictionary + add record reference to dev_data + update mr_handle + +unlock dictionary + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: RecordQueryIoctl + Inputs: + file + cookie + + Outputs: + mr_handle + +dev_data = file->private_data + +lock dictionary + +if cookie is in dictionary + add record reference to dev_data + retrieve mr_handle + +unlock dictionary + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: RecordDecIoctl + Inputs: + file + cookie + + Outputs: + +dev_data = file->private_data +remove record reference from dev_data + +lock dictionary + +if cookie is in dictionary + decrement reference count + if reference count is 0 + remove record + +unlock dictionary + + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_init + Inputs: + + Outputs: + +fd = open device file + +if fd is invalid + return error +else + return success + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_exit + Inputs: + + Outputs: + +close fd + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_insert + Inputs: + cookie + Outputs: + +ioctl on fd + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_update + Inputs: + cookie + mr_handle + Outputs: + +ioctl on fd + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_query + Inputs: + cookie + + Outputs: + mr_handle + +ioctl on fd + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + Function: dapls_mrdb_record_dec + Inputs: + cookie + Outputs: + +ioctl on fd + ++-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ diff --git a/branches/Ndi/ulp/dapl/doc/dapl_vendor_specific_changes.txt b/branches/Ndi/ulp/dapl/doc/dapl_vendor_specific_changes.txt new file mode 100644 index 00000000..aa437807 --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dapl_vendor_specific_changes.txt @@ -0,0 +1,394 @@ + Suggested Vendor-Specific Changes v. 0.92 + ----------------------------------------- + +================= +Table of Contents +================= + +* Table of Contents +* Introduction +* Referenced documents +* Functionality Changes + + Missing Functionality + + dat_evd_resize + + Ordering guarantees on connect/disconnect. + + Shared memory + + dat_cr_handoff +* Performance optimizations + + Reduction of context switches + [Many interrelated optimizations] + + Reducing copying of data + + Avoidance of s/g list copy on posting + + Avoidance of event data copy from CQ to EVD + + Elimination of locks + + Eliminating subroutine calls + + +============ +Introduction +============ + +This document is a list of functionality enhancements and +optimizations hardware vendors porting uDAPL may want to consider as +part of their port. The functionality enhancements mentioned in this +document are situations in which HCA Vendors, with their access to +driver and verb-level source code, and their reduced portability +concerns, are in a much better position than the reference +implementation to implement portions of the uDAPL v. 1.0 +specification. (Additional areas in which the reference +implementation, because of a lack of time or resources, did not fully +implement the uDAPL 1.0 specification are not addressed in this file; +see the file doc/dapl_unimplemented_functionality.txt, forthcoming). +Vendors should be guided in their implementation of these +functionality enhancements by their customers need for the features +involved. + +The optimizations suggested in this document have been identified by +the uDAPL Reference Implementation team as areas in which performance +may be improved by "breaching" the IB Verbs API boundary. They are +inappropriate for the reference implementation (which has portability +as one of its primary goals) but may be appropriate for a HCA-specific +port of uDAPL. Note that no expected performance gain is attached to +the suggested optimizations. This is intentional. Vendors should be +guided in their performance improvements by performance evaluations +done in the context of a representative workload, and the expected +benefit from a particular optimization weighed against the cost in +code complexity and scheduling, before the improvement is implemented. +This document is intended to seed that process; it is not intended to +be a roadmap for that process. + +We divide functionality changes into two categories + * Areas in which functionality is lacking in the reference + implementation. + * Areas in which the functionality is present in the reference + implementation, but needs improvement. + +We divide performance improvements into three types: + * Reducing context switches + * Reducing copying of data (*) + * Eliminating subroutine calls + +(*) Note that the data referred to in "reducing copying of data" is +the meta data describing an operation (e.g. scatter/gather list or +event information), not the actual data to be transferred. No data +transfer copies are required within the uDAPL reference +implementation. + +==================== +Referenced Documents +==================== + +uDAPL: User Direct Access Programming Library, Version 1.0. Published +6/21/2002. http://www.datcollaborative.org/uDAPL_062102.pdf. +Referred to in this document as the "DAT Specification". + +InfiniBand Access Application Programming Interface Specification, +Version 1.2, 4/15/2002. In DAPL SourceForge repository at +doc/api/access_api.pdf. Referred to in this document as the "IBM +Access API Specification". + +uDAPL Reference Implementation Event System Design. In DAPL +SourceForge repository at doc/dapl_event_design.txt. + +uDAPL Reference Implementation Shared Memory Design. In DAPL +SourceForge repository at doc/dapl_shared_memory_design.txt. + +uDAPL list of unimplmented functionality. In DAPL SourceForge +repository at doc/dapl_unimplemented_funcitonality.txt (forthcoming). + +=========================================== +Suggested Vendor Functionality Enhancements +=========================================== + +Missing Functionality +--------------------- +-- dat_evd_resize + +The uDAPL event system does not currently implement dat_evd_resize. +The primary reason for this is that it is not currently possible to +identify EVDs with the CQs that back them. Hence uDAPL must keep a +separate list of events, and any changes to the size of that event +list would require careful synchronization with all users of that EVD +(see the uDAPL Event System design for more details). If the various +vendor specific optimizations in this document were implemented that +eliminated the requirement for the EVD to keep its own event list, +dat_evd_resize might be easily implemented by a call or calls to +ib_cq_resize. + +-- Ordering guarantees on connect/disconnect. + +The DAPL 1.1 specification specifies that if an EVD combines event +streams for connection events and DTO events for the same endpoint, +there is an ordering guarantee: the connection event on the AP occurs +before any DTO events, and the disconnection event occurs after all +successful DTO events. Since DTO events are provided by the IBM OS +Access API through ib_completion_poll (in response to consumer +request) and connection events are provided through callbacks (which +may race with consumer requests) there is no trivial way to implement +this functionality. The functionality may be implemented through +under the table synchronizations between EVD and EP; specifically: + * The first time a DTO event is seen on an endpoint, if the + connection event has not yet arrived it is created and + delivered ahead of that DTO event. + * When a connection event is seen on an endpoint, if a + connection event has already been created for that endpoint + it is silently discarded. + * When a disconnection event is seen on an endpoint, it is + "held" until either: a) all expected DTO events for that + endpoint have completed, or b) a DTO marked as "flushed by + disconnect" is received. At that point it is delivered. + +Because of the complexity and performance overhead of implementating +this feature, the DAPL 1.1 reference implementation has chosen to take +the second approach allowed by the 1.1 specification: disallowing +integration of connection and data transfer events on the same EVD. +This fineses the problem, is in accordance with the specification, and +is more closely aligned with the ITWG IT-API currently in development, +which only allows a single event stream type for each simple EVD. +However, other vendors may choose to implement the functionality +described above in order to support more integration of event streams. + +-- Shared memory implementation + +The difficulties involved in the dapl shared memory implementation are +fully described in doc/dapl_shared_memory_design.txt. To briefly +recap: + +The uDAPL spec describes a peer-to-peer shared memory model; all uDAPL +instances indicate that they want to share registration resources for +a section of memory do so by providing the same cookie. No uDAPL +instance is unique; all register their memory in the same way, and no +communication between the instances is required. + +In contrast, the IB shared memory interface requires the first process +to register the memory to do so using the standard memory registration +verbs. All other processes after that must use the shared memory +registration verb, and provide to that verb the memory region handle +returned from the initial call. This means that the first process to +register the memory must communicate the memory region handle it +receives to all the other processes who wish to share this memory. +This is a master-slave model of shared memory registration; the +initial process (the master), is unique in its role, and it must tell +the slaves how to register the memory after it. + +To translate between these two models, the uDAPL implementation +requires some mapping between the shared cookie and the memory region +handle. This mapping must be exclusive and must have inserts occur +atomically with lookups (so that only one process can set the memory +region handle; the others retrieve it). It must also track the +deregistration of the shared memory, and the exiting of the processes +registering the shared memory; when all processes have deregistered +(possibly by exitting) it must remove the mapping from cookie to +memory region handle. + +This mapping must obviously be shared between all uDAPL +implementations on a given host. Implementing such a shared mapping +is problematic in a pure user-space implementation (as the reference +implementation is) but is expected to be relatively easy in vendor +supplied uDAFS implementations, which will presumably include a +kernel/device driver component. For this reason, we have chosen to +leave this functionality unimplemented in the reference implementation. + +-- Implementation of dat_cr_handoff + +Given that the change of service point involves a change in associated +connection qualifier, which has been advertised at the underlying +Verbs/driver level, it is not clear how to implement this function +cleanly within the reference implementation. We thus choose to defer +it for implementation by the hardware vendors. + +========================= +Performance Optimizations +========================= + + +Reduction of context switches +----------------------------- +Currently, three context switches are required on the standard +uDAPL notification path. These are: + * Invocation of the hardware interrupt handler in the kernel. + Through this method the hardware notifies the CPU of + completion queue entries for operations that have requested + notification. + * Unblocking of the per-process IB provider service thread + blocked within the driver. This thread returns to + user-space within its process, where it causes + * Unblocking of the user thread blocked within the uDAPL entry + point (dat_evd_wait() or dat_cno_wait()). + +There are several reasons for the high number of context switches, +specifically: + * The fact that the IB interface delivers notifications + through callbacks rather than through unblocking waiting + threads; this does not match uDAPL's blocking interface. + * The fact that the IB interface for blocking on a CQ doesn't + have a threshhold. If it did, we could often convert a + dat_evd_wait() into a wait on that CQ. + * The lack of a parallel concept to the CNO within IB. + +These are all areas in which closer integration between the IB +verbs/driver and uDAPL could allow the user thread to wait within the +driver. This would allow the hardware interrupt thread to directly +unblock the user thread, saving a context switch. + +A specific listing of the optimizations considered here are: + * Allow blocking on an IB CQ. This would allow removal of the + excess context switch for dat_evd_wait() in cases where + there is a 1-to-1 correspondence between an EVD and a CQ and + no threshold was passed to dat_evd_wait(). + * Allow blocking on an IB CQ to take a threshold argument. + This would allow removal of the excess context switch for + dat_evd_wait() in cases where there is a 1-to-1 + correspondence between an EVD and a CQ regardless of the + threshold value. + * Give the HCA device driver knowledge of and access to the + implementation of the uDAPL EVD, and implement dat_evd_wait() + as an ioctl blocking within the device driver. This would + allow removal of the excess context switch in all cases for + a dat_evd_wait(). + * Give the HCA device driver knowledge of and access to the + implementation of the uDAPL CNO, and implement dat_cno_wait() + as an ioctl blocking within the device driver. This would + allow removal of the excess context switch in all cases for + a dat_cno_wait(), and could improve performance for blocking + on OS Proxy Wait Objects related to the uDAPL CNO. + +See the DAPL Event Subsystem Design (doc/dapl_event_design.txt) for +more details on this class of optimization. + +======================== +Reducing Copying of Data +======================== + +There are two primary places in which a closer integration between the +IB verbs/driver and the uDAPL implementation could reducing copying +costs: + +-- Avoidance of s/g list copy on posting + +Currently there are two copies involved in posting a data transfer +request in uDAPL: + * From the user context to uDAPL. This copy is required + because the scatter/gather list formats for uDAPL and IB + differ; a copy is required to change formats. + * From uDAPL to the WQE. This copy is required because IB + specifies that all user parameters are owned by the user + upon return from the IB call, and therefore IB must keep its + own copy for use during the data transfer operation. + +If the uDAPL data transfer dispatch operations were implemented +directly on the IB hardware, these copies could be combined. + +-- Avoidance of Event data copy from CQ to EVD + +Currently there are two copies of data involved in receiving an event +in a standard data transfer operation: + * From the CQ on which the IB completion occurs to an event + structure held within the uDAPL EVD. This is because the IB + verbs provide no way to discover how many elements have been + posted to a CQ. This copy is not + required for dat_evd_dequeue. However, dat_evd_wait + requires this copy in order to correctly implement the + threshhold argument; the callback must know when to wakeup + the waiting thread. In addition, copying all CQ entries + (not just the one to be returned) is necessary before + returning from dat_evd_wait in order to set the *nmore OUT + parameter. + * From the EVD into the event structure provided in the + dat_evd_wait() call. This copy is required because of the + DAT specification, which requires a user-provided event + structure to the dat_evd_wait() call in which the event + information will be returned. If dat_evd_wait() were + instead, for example, to hand back a pointer to the already + allocated event structure, that would eventually require the + event subsystem to allocate more event structures. This is + avoided in the critical path. + +A tighter integration between the IB verbs/driver and the uDAPL +implementation would allow the avoidance of the first copy. +Specifically, providing a way to get information as to the number of +completions on a CQ would allow avoidance of that copy. + +See the uDAPL Event Subsystem Design for more details on this class of +optimization. + +==================== +Elimination of Locks +==================== + +Currently there is only a single lock used on the critical path in the +reference implementation, in dat_evd_wait() and dat_evd_dequeue(). +This lock is in place because the ib_completion_poll() routine is not +defined as thread safe, and both dat_evd_wait() and dat_evd_dequeue() +are. If there was some way for a vendor to make ib_completion_poll() +thread safe without a lock (e.g. if the appropriate hardware/software +interactions were naturally safe against races), and certain other +modifications made to the code, the lock might be removed. + +The modifications required are: + * Making racing consumers from DAPL ring buffers thread safe. + This is possible, but somewhat tricky; the key is to make + the interaction with the producer occur through a count of + elements on the ring buffer (atomically incremented and + decremented), but to dequeue elements with a separate atomic + pointer increment. The atomic modification of the element + count synchronizes with the producer and acquires the right + to do an atomic pointer increment to get the actual data. + The atomic pointer increment synchronizes with the other + consumers and actually gets the buffer data. + * The optimization described above for avoiding copies from + the CQ to the DAPL EVD Event storage queue. Without this + optimization a potential race between dat_evd_dequeue() and + dat_evd_wait() exists where dat_evd_dequeue will return an + element advanced in the event stream from the one returned + from dat_evd_wait(): + + dat_evd_dequeue() called + + EVD state checked; ok for + dat_evd_dequeue() + dat_evd_wait() called + + State changed to reserve EVD + for dat_evd_wait() + + Partial copy of CQ to EVD Event store + + Dequeue of CQE from CQ + + Completion of copy of CQ to EVD Event store + + Return of first CQE copied to EVD Event store. + + Return of thie CQE from the middle + of the copied stream. + + + If no copy occurs, dat_evd_wait() and dat_evd_dequeue() may + race, but if all operations on which they may race (access + to the EVD Event Queue and access to the CQ) are thread + safe, this race will cause no problems. + +============================ +Eliminating Subroutine Calls +============================ + +This area is the simplest, as there are many DAPL calls on the +critical path that are very thin veneers on top of their IB +equivalents. All of these calls are canidates for being merged with +those IB equivalents. In cases where there are other optimizations +that may be acheived with the call described above (e.g. within the +event subsystem, the data transfer operation posting code), that call +is not mentioned here: + * dat_pz_create + * dat_pz_free + * dat_pz_query + * dat_lmr_create + * dat_lmr_free + * dat_lmr_query + * dat_rmr_create + * dat_rmr_free + * dat_rmr_query + * dat_rmr_bind + + diff --git a/branches/Ndi/ulp/dapl/doc/dat.conf b/branches/Ndi/ulp/dapl/doc/dat.conf new file mode 100644 index 00000000..eba45e7a --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dat.conf @@ -0,0 +1,11 @@ +# +# Generic DAT 1.1 configuration file +# + +# Each entry should have the following fields: +# \ +# + +# This is the dapl provider configuration for HCA0 port 1 +IbalHca0 u1.1 nonthreadsafe default /usr/lib/libdapl.so.0.0 mv_dapl.1.1 "IbalHca0 1" "" + diff --git a/branches/Ndi/ulp/dapl/doc/dat_environ.txt b/branches/Ndi/ulp/dapl/doc/dat_environ.txt new file mode 100644 index 00000000..638ba9dc --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/dat_environ.txt @@ -0,0 +1,45 @@ + DAT Environment Guide v. 0.01 + ----------------------------- + +The following environment variables affect the behavior of the DAT +library: + + +DAT_OVERRIDE +------------ + Value used as the static registry configuration file, overriding the + default location, /etc/dat.conf + + Example: setenv DAT_OVERRIDE /path/to/my/private.conf + + +DAT_DBG_TYPE +------------ + + Value specifies which parts of the registry will print debugging + information, valid values are + + DAT_OS_DBG_TYPE_ERROR = 0x1 + DAT_OS_DBG_TYPE_GENERIC = 0x2 + DAT_OS_DBG_TYPE_SR = 0x4 + DAT_OS_DBG_TYPE_DR = 0x8 + DAT_OS_DBG_TYPE_PROVIDER_API = 0x10 + DAT_OS_DBG_TYPE_CONSUMER_API = 0x20 + DAT_OS_DBG_TYPE_ALL = 0xff + + or any combination of these. For example you can use 0xC to get both + static and dynamic registry output. + + Example setenv DAT_DBG_TYPE 0xC + +DAT_DBG_DEST +------------ + + Value sets the output destination, valid values are + + DAT_OS_DBG_DEST_STDOUT = 0x1 + DAT_OS_DBG_DEST_SYSLOG = 0x2 + DAT_OS_DBG_DEST_ALL = 0x3 + + For example, 0x3 will output to both stdout and the syslog. + diff --git a/branches/Ndi/ulp/dapl/doc/ibhosts b/branches/Ndi/ulp/dapl/doc/ibhosts new file mode 100644 index 00000000..792fb503 --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/ibhosts @@ -0,0 +1,3 @@ +dat-linux3-ib0 0xfe80000000000000 0x0001730000003d11 +dat-linux5-ib0 0xfe80000000000000 0x0001730000003d91 +dat-linux6-ib0 0xfe80000000000000 0x0001730000009791 diff --git a/branches/Ndi/ulp/dapl/doc/mv_dapl_readme.txt b/branches/Ndi/ulp/dapl/doc/mv_dapl_readme.txt new file mode 100644 index 00000000..b7e7dc3f --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/mv_dapl_readme.txt @@ -0,0 +1,226 @@ + +Source Forge Notes + +1) Log onto Source Force site + a. http://infiniband.sourceforge.net + +2) Download "Bitkeeper" software + a. Click on Bitkeeper Repository link (under Source Forge Services) + b. Click on download link + c. Click on Download Bitkeeper + d. Determine proper platform and complete download + e. Note: You must fill in form and provide email address. + SW will email you login info to "bitmover site" + f. http://www.bitmover.com/download + * login: bitkeeper + * password: get bitkeeper + +3) After you have downloaded bitkeeper software, change the mode of the + file to make it executable (i.e. chmod a+x *.bin) + a. chmod a+x 3.0.1-x86-glibc22-linux.bin + b. ./ 3.0.1-x86-glibc22-linux.bin + +4) The above process creates an executable called "bk". It also creates + a symbolic link from bk to /usr/bin/bk + +5) Create a directory where you would like to place the SF project + a. Ex. mkdir /usr/sf-iba + +6) Use the bk program to pick up a clone of the latest Source Force release + for iba + a. cd /usr/sf-iba + b. bk clone http://project.bkbits.net/repository + where: project = infiniband, repository = iba; producing: + c. bk clone http:/infiniband.bkbits.net/iba + +7) Running "bk" will create a sub_folder in your current working directory + a. => /usr/sf-iba/iba + +8) Add Mellanox source to newly downloaded source tree + a. cd /usr/sf-iba/iba + b. Download and copy mellanox_xxx.tgz file to this directory and + unzip/untar file. Note: archive will place into base and create + linux folders. It is very important that you place the mellanox + files into the correct directory structure + c. tar xzvf mellanox_xxx.tgz. This will create the following: + * /usr/sf-iba/iba/base/linux/hca/mellanox + * /usr/sf-iba/iba/linux/run + +9) Create a build tree for the project using the "mktree" command. + This will pull out all sources from the base directory to a directory + of your choosing + a. cd /usr/sf-iba/iba/base/linux/utils + b. ./mktree -source /usr/sf-iba/iba -target /usr/sf-iba/iba + * mktree create 2 new directories linux and linuxuser in + /usr/sf-iba/iba + +10) Rebuild the kernel and boot with new kernel (example using + linux-2.4.18-10 kernel tree) + a. cd /usr/src/linux-2.4.18-10 + b. make xconfig + * make any changes to fit your hardware configuration: + o SMP/UP option (YES for Process type and features/Symmetric + multi-processing support buttons) + o network driver (YES for Network device support/Ethernet + (10 or 100Mbit)/Ethernet (10 or 100Mbit)... buttons + o scsi driver (YES for SCSI support/ SCSI low-level drivers/... + buttons) + o local file system (YES for File systems/Ext3 journaling + file system support - Second extended fs support buttons) + * hit "save and exit" button + c. Patch the kernel to support IPoIB with larger MAX_ADDR_LEN + * edit /usr/src/linux-2.4.18-10/include/linux/netdevice.h to + change MAX_ADDR_LEN = 8 to = 32 + * edit /usr/src/linux-2.4.18-10/net/core/dev.c to change: + The SIOCGIFHWADDR case from: + memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, MAX_ADDR_LEN); + to + memcpy(ifr->ifr_hwaddr.sa_data, dev->dev_addr, + min((unsigned char) sizeof(ifr->ifr_hwaddr.sa_data), + dev->addr_len)); + + The SIOCSIFHWBROADCAST case from: + memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, MAX_ADDR_LEN); + to + memcpy(dev->broadcast, ifr->ifr_hwaddr.sa_data, + min((unsigned char) sizeof(ifr->ifr_hwaddr.sa_data), + dev->addr_len)); + d. make dep + e. make bzImage + f. make install + g. verify /etc/lilo.conf (or /etc/grub.conf) to reboot the system + with new kernel + +11) Install Mellanox MST and MSDK + a. install mst driver + b. install MSDK + c. verify the installation (at least run vapi start) + +12) The next step is to build all the driver for the stack (including + Mellanox module) + a. cd /usr/sf-iba/iba/linux + b. edit Makeconf.inc + * edit LINUX_VERSION = 2.4.18-10 + * edit REDHAT=1 if you run original kernel from RH installation. + In our case, we are running costumed kernel, we leave it as default + * edit LINUX_SRC. In our case, we edit LINUX_VERSION; therefore, + we leave this as default + c. edit Makefile.config + * Change BUILD_SMP line to: BUILD_SMP =0 (if kernel is UP) + * Change BUILD_SMP line to: BUILD_SMP =1 (if kernel is SMP) + d. Edit Makefile; Change "MLNX=n" to "MLNX=y, MODEL,SHIM=y to + MODEL,SHIM=n) + +13) Build/Compile kernel driver components by running make from top level + directory + a. cd /usr/sf-iba/iba/linux + b. make debug (or free) + c. All the object files are in + * ~bin/$(ARCH)/$(LINUX_VERSION)/$(MP_MODE)/$BLD + * in our case ARCH=x86, LINUX_VERSION=2.4.18-10, MP_MODE=up, BLD=debug + +14) Build/Compile user mode driver components. + a. cd /usr/sf-iba/iba/linuxuser/iba + b. make debug (or free) + c. All the executable files (opensm, alts...) are in + /usr/sf-iba/iba/linuxuser/bin/$(ARCH)/$(LINUX_VERSION)/bin/$(BLD) + d. All the libraries are in + /usr/sf-iba/iba/linuxuser/bin/$(ARCH)/$(LINUX_VERSION)/lib/$(BLD) + e. In our case ARCH=x86, LINUX_VERSION=2.4.18-10, BLD=debug + f. create the symbolic link for liballib, libcomplib and libmlnx_uvp + * cd /usr/sf-iba/iba/linuxuser/bin/x86/2.4.18-10/lib/debug + * ln -s liballib.so.0.0 liballib.so + * ln -s libcomplib.so.0.0 libcomplib.so + +15) Run the stack with "ibal" script which is the equivalent to "vapi" script + a. cd /usr/sf-iba/iba/linux/run + b. edit ibal + * edit line 35,36. + * Assuming that we are running UP kernel and debug mode then + o line 35 should be MODPATH_I=$IBAL_PATH/bin/x86/$KERVER/up/debug + o line 36 should be + MODPATH_TV=$IBAL_PATH/drivers/iba/hca/mellanox/thh/obj_linux_x86_up + * Assuming that we are running SMP kernel and free mode then + o line 35 should be MODPATH_I=$IBAL_PATH/bin/x86/$KERVER/smp/free + o line 36 should be + MODPATH_TV=$IBAL_PATH/drivers/iba/hca/mellanox/thh/obj_linux_x86_smp + c. cd /usr/sf-iba/iba/linux and issue command "run/ibal start" + +16) Set up the linking library path. There are two ways. + a. Edit the /root/.bashrc and add in the following line + * export LD_LIBRARY_PATH= + /usr/sf-iba/iba/linuxuser/bin/x86/2.4.18-10/lib/debug + b. Or + * edit /etc/ld.so.conf and add in the following line + /usr/sf-iba/iba/linuxuser/bin/x86/2.4.18-10/lib/debug + * run ldconfig + +17) Run the OPENSM + a. opensm executable file locates at + /usr/sf-iba/iba/linuxuser/bin/x86/2.4.18-10/bin/debug or + /usr/sf-iba/iba/linuxuser/iba/opensm directories + b. cd /usr/sf-iba/iba/linuxuser/bin/x86/2.4.18-10/bin/debug + c. ./opensm + d. Whew... finally + e. leave the opensm running still (sweeping mode) + +18) Run IPoIB + a. cd /usr/sf-iba/iba/linux/bin/x86/2.4.18-10/up/debug + b. insmod ipoib.o + c. ifconfig -a ( you will see ib01 and ib02 pop up) + d. ifconfig ib01 11.0.0.1 + e. ping 11.0.0.1 + +19) Run uDAPL + a. edit /etc/hosts to add in IPaddress for all DAPL nodes in cluster. + Distribute the file for every node in the cluster. IPaddress + can be the same as IPoIB IP address as long as node name has to end + with "xx-ibX". Even though you are not running IPoIB, you have to add + in Interface Adapter Address (IP address) for DAPL IA. For now, + one IP address for each DAPL IA (for each HCA) + + Example: local node is mtilab10 + + 10.2.2.10 mtilab10 ==> this is ethernet 0 + 10.2.2.11 mtilab11 + 192.0.0.10 mtilab10-ib0 ==> this is IPoIB or Interface Adapter + address for local DAPL IA 1 + 192.0.0.14 mtilab10-ib0a ==> this is IPoIB or Interface Adapter + address for local DAPL IA 1 port 1 + 192.0.0.12 mtilab10-ib1 ==> this is IPoIB or Interface Adapter + address for local DAPL IA 2 + 192.0.0.11 mtilab11-ib0 ==> this is IPoIB or Interface Adapter + address for remote DAPL IA + + b. Copy mv_dapl/doc/dat.conf to /etc/dat.conf. Please read + mv_dapl/doc/dapl_registry_design.txt for more infomation how to + edit /etc/dat.conf correctly + + NOTES: If you compile dapltest with EXPLICIT_LINK=1 then dapltest + is explicitly linked to libdapl.so library; therefore, you + have to uncomment the entry in dat.conf or do not have + dat.conf in /etc. + + c. dapltest executable file locates at + /usr/sf-iba/iba/linuxuser/bin/x86/2.4.18-10/up(smp)/debug(free) or + /usr/sf-iba/iba/linuxuser/iba/mv_dapl/test/udapl/dapltest + + c. Run dapltest testsuite for 2 nodes (mtilab10-ib0 and mtilab11-ib0) + * At both nodes + o start the driver (ibal start) + o cd /usr/sf-iba/iba/linuxuser/iba/mv_dapl/test/udapl/dapltest + + * At mtilab10-ib0 + o ./srv.sh ==> start server + + * At mtilab11-ib0 + o ./bw.sh mtilab10-ib0 ==> run bw test + o ./lat_poll.sh mtilab10-ib0 ==> run latency polling mode test + o ./lat_block.sh mtilab10-ib0 ==> run latency blocking mode test + o ./cl.sh mtilab10-ib0 ==> run transaction tests + o ./regress.sh mtilab10-ib0 ==> run transaction regression test + o ./quit.sh mtilab10-ib0 ==> instruct the server to quit + + * both can run limit test ./lim.sh (don't need to run server) + + d. Please reference to dapltest.txt for more test options diff --git a/branches/Ndi/ulp/dapl/doc/mv_dapl_relnotes.txt b/branches/Ndi/ulp/dapl/doc/mv_dapl_relnotes.txt new file mode 100644 index 00000000..5702eea8 --- /dev/null +++ b/branches/Ndi/ulp/dapl/doc/mv_dapl_relnotes.txt @@ -0,0 +1,167 @@ + Release Notes for uDAPL/IBAL + +RELEASE NOTES + + We would like to officially announce the availability of a + public source implementation of uDAPL over IBAL API. + + This uDAPL source code is derived from Source Forge DAPL foundry, see + + http://sourceforge.net/projects/dapl + +BETA RELEASE July 7, 2004 + +1. What is new from Alpha 2.0 Release + + Performance Improvements, additional functionality, bug fixes. + Tested with SourceForge BK Changeset 1.275. + + * DAT 1.1 compliance + * dat_evd_resize implemented + * async callbacks for CQ, QP, and IA implemented + * dat_psp_create_any improvements, no longer require kernel IOCTL. + * Performance enhancements, MTU, removed malloc in speed path, disconnect processing + * cleaned up many disconnect/close serialization issues + + * dynamic DTO-EVD sizing based on request/receive DTO depths + * enable re-use of EP, with dap_ep_reset, after EP connection termination. + * enhancements to dat_evd_wait to prevent missing completions + * dapls_ib_post_send; add refs/checking for disconnecting state, avoids losing completion + * dapl and dapltest build warnings resolved + +2. Expected Bugs and status + + +ALPHA 2.0 RELEASE Sept 25, 2003 + +1. What news from Alpha 1.1 Release + + This release is equivalent to Beta 1.09 uDAPL Source Forge Releases + It basically inherits the feature sets of Source Forge Beta 1.09 + Release plus extra feature sets coming from previous Alpha 1.1 Release + + Please refer to Beta 1.09 uDAPL Soure Force Release README for further + infomation. + + +ALPHA 1.1 RELEASE Jun 20, 2003 + +1. Feature sets + + This release is equivalent to Alpha 10-16 uDAPL Source Forge Releases + It basically inherit the feature sets of those Releases with minus/plus + followed features + + * Shared memory design and reference implementation + + * Map IA per HCA and/or individual port work. + + * Retry connection after a reject + + * Global LMR context hash table per HCA data structure + + * Implementing dat_psp_create_any functionality + + * Partial compliance to DAT 1.1 + +2. Expected Bugs and status + + * dapltest transaction test server fail to RDMA Read/Write to client + due to timing racing issue; however, client can do RDMA Read/Write + to server. + --> fixed + + * While dapltest server and client setup connections, abnormally kill + dapltest (ctrl+c) server and/or client, then unload the driver will + crash the system. + --> half fixed. Sometimes we successfully unload the driver, + sometimes we cannot due to resources are still dangling in kernel + (lsmod shows that module ibal is still in-used by other modules) + --> ChangeSet 1.112 target to fix this bug (# 745700); howerver, it + introduce other bug which dapltest fail to retry connection after + a reject. + --> With ChangeSet 1.110, test is running fine except unloading the + driver will crash system. + + * dapltest transaction test with multiple threads and multiple Endpoints + will have segmentation fault at the end of the test when multiple + threads exits + --> opened + + * dapltest transaction test with multiple threads (30) and multiple + Endpoints (100 for each thread) will fail to set up connection + --> need to invest and collect trace. Preliminary assesmment show + mad drop or SA/SM fail to response to queries. + + + +ALPHA 1.0 RELEASE May 29, 2003 + +1. Feature sets + + This release is derived from Alpha 9.0 uDAPL Source Forge Release + It basically inherit the feature sets of Alpha 9.0 SF Release; moreover + it has the following enhancements + + * Name Service and Address Resolution using SA/SM query mechanism. + Moving away from troublesome IPoIB and file-based mechanisms + + * Introducing dat_cr_handoff functionality. + + * Creating infrastructure to map IA per HCA and/or individual port of + the HCA. This feature can be selected for different type of + applications to obtain the high bandwidth or high availability. + + * Creating infrastructure to operate in loopback mode when the fabric + is down (without SA/SM) + +2. Expected bugs + + * dapltest transaction test server fail to RDMA Read/Write to client + due to timing racing issue; however, client can do RDMA Read/Write + to server. + + * dapltest transaction test with multiple threads and multiple Endpoints + will have segmentation fault at the end of the test when multiple + threads exits + + * While dapltest server and client setup connections, abnormally kill + dapltest (ctrl+c) server and/or client, then unload the driver will + crash the system. + +3. Future Enhancements + + * Performance enhancement. Test and measure performance when OS bypass + functionality of IBAL is available + + * Fixing the bug list mentioned above. + + * Selectively migrate to Source Forge Alpha 10.0 - 15.0 Releases + + * Shared memory region: mapping peer-to-peer DAPL shared memory region + model to master-slave InfiniBand shared memory region model. + + * Implement dat_psp_create_any() functionality + + * Conforming to DAT 1.1 Specification + +4. Performance Statistics + + * The performance of this release is limited due to the inavailability + of OS kernel bypass functionality of IBAL - Mellanox uVP/kVP + + * Testing with the following configuration: + o PIII 1 Ghz, PCI 64/66, 512 MB, ServerWork Grand Champion LE chipset + o Tavor A0 back to back + o Mellanox MSDK 0.0.6, FW 1.18 + o 2.4.18 & 2.4.18-10 UP/SMP kernels + + Bandwidth: 210 MB/s for 64-KB message + Latency polling mode: 26.2 usecs for 4-byte message + Latency blocking mode: 61.2 usecs for 4-byte message + + * With Xeon 2.4 Ghz, PCI-X 133 Mhz, Serverwork chipset for hardware + configuration and OS bypass enable for SW, we expect to achieve + 700-750 MB/s , 15 usecs for polling latency, and 35 usecs for + blocking latency + diff --git a/branches/Ndi/ulp/dapl/test/dirs b/branches/Ndi/ulp/dapl/test/dirs new file mode 100644 index 00000000..2d7badc4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/dirs @@ -0,0 +1 @@ +DIRS=udapl diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_defaults b/branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_defaults new file mode 100644 index 00000000..706d8296 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_defaults @@ -0,0 +1,12 @@ +# +# Defaults for dapltest (chosen for the author's convenience) +# +setenv DBG " " # to debug, or not to debug, that is the question +setenv THR 1 # number of threads to use +setenv EPS 1 # number of EPs per thread +setenv ITER 10 # number of iterations over the set of OPs +setenv HOST dat-linux3 # hostname running the server side of dapltest +setenv DEV JniIbdd0 # name of IA device +setenv CMD SR # OP command: SR, RW, RR +setenv SZ 4096 # OP buffer segment size +setenv SEGS 1 # OP number of segments diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_onetest b/branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_onetest new file mode 100644 index 00000000..a56089e0 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_onetest @@ -0,0 +1,35 @@ +# +# time format: %U user %S sys %E elapsed %P CPU +# %w block (voluntarily, eg: dat_evd_wait) +# %c cswitch (involuntary, eg: time slice expired) +# +# dapltest: whatever is requested, plus 2 ops for inter-loop sync +# +/usr/bin/time -f "%U user %S sys %E real %P cpu %w block" \ +dapltest -T T $DBG -t $THR -w $EPS -i $ITER -s $HOST -D $DEV \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client $CMD $SZ $SEGS client $CMD $SZ $SEGS \ + client SR 8 1 -f server SR 8 1 -f diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_perf.csh b/branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_perf.csh new file mode 100644 index 00000000..88d67aa9 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/.DT_perf.csh @@ -0,0 +1,42 @@ +#!/bin/csh -f +# +# Run through a number of DAPL test cases +# +set wholename = $0 +set me = $wholename:t +unset wholename +set echo +# +# Set Defaults +# +source .DT_defaults +# +# Iterate over the three transfer methods +# +foreach CMD ( SR RW RR) + # + # Iterate over the sizes of interest + # + foreach SZ ( 8 256 4096 65536 262144 ) + # + # XXX Following code is here because server side is run-once; + # XXX when fixed, move out of loops, above. (BURT 79002) + # Run the server asynchronously + # + if ($?DAPL_LOOPBACK) then + dapltest -T S -D $DEV & + else + rsh -n $HOST dapltest -T S -D $DEV & + endif + # + # Run the client side here, 'inline' + # + echo ===== $CMD $SZ ===== + source .DT_onetest + # + # wait for the async server + # + wait + echo "" + end +end diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/DaplTest_how_2.txt b/branches/Ndi/ulp/dapl/test/udapl/dapltest/DaplTest_how_2.txt new file mode 100644 index 00000000..8085e39c --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/DaplTest_how_2.txt @@ -0,0 +1,292 @@ +NAME + + dapltest - test for the Direct Access Programming Library (DAPL) + +DESCRIPTION + + Dapltest is a set of tests developed to exercise, characterize, + and verify the DAPL interfaces during development and porting. + At least two instantiations of the test must be run. One acts + as the server, fielding requests and spawning server-side test + threads as needed. Other client invocations connect to the + server and issue test requests. + + The server side of the test, once invoked, listens continuously + for client connection requests, until quit or killed. Upon + receipt of a connection request, the connection is established, + the server and client sides swap version numbers to verify that + they are able to communicate, and the client sends the test + request to the server. If the version numbers match, and the + test request is well-formed, the server spawns the threads + needed to run the test before awaiting further connections. + +USAGE + + dapltest [ -f script_file_name ] + [ -T S|Q|T|P|L ] [ -D device_name ] [ -d ] [ -R HT|LL|EC|PM|BE ] + + With no arguments, dapltest runs as a server using default values, + and loops accepting requests from clients. The -f option allows + all arguments to be placed in a file, to ease test automation. + The following arguments are common to all tests: + + [ -T S|Q|T|P|L ] Test function to be performed: + S - server loop + Q - quit, client requests that server + wait for any outstanding tests to + complete, then clean up and exit + T - transaction test, transfers data between + client and server + P - performance test, times DTO operations + L - limit test, exhausts various resources, + runs in client w/o server interaction + Default: S + + [ -D device_name ] Specifies the name of the device (interface adapter). + Default: host-specific, look for DT_MdepDeviceName + in dapl_mdep.h + + [ -d ] Enables extra debug verbosity, primarily tracing + of the various DAPL operations as they progress. + Repeating this parameter increases debug spew. + Errors encountered result in the test spewing some + explanatory text and stopping; this flag provides + more detail about what lead up to the error. + Default: zero + + [ -R BE ] Indicate the quality of service (QoS) desired. + Choices are: + HT - high throughput + LL - low latency + EC - economy (neither HT nor LL) + PM - premium + BE - best effort + Default: BE + +USAGE - Quit test client + + dapltest [Common_Args] [ -s server_name ] + + Quit testing (-T Q) connects to the server to ask it to clean up and + exit (after it waits for any outstanding test runs to complete). + In addition to being more polite than simply killing the server, + this test exercises the DAPL object teardown code paths. + There is only one argument other than those supported by all tests: + + -s server_name Specifies the name of the server interface. + No default. + + +USAGE - Transaction test client + + dapltest [Common_Args] [ -s server_name ] + [ -t threads ] [ -w endpoints ] [ -i iterations ] [ -Q ] + [ -V ] [ -P ] OPclient OPserver [ op3, + + Transaction testing (-T T) transfers a variable amount of data between + client and server. The data transfer can be described as a sequence of + individual operations; that entire sequence is transferred 'iterations' + times by each thread over all of its endpoint(s). + + The following parameters determine the behavior of the transaction test: + + -s server_name Specifies the hostname of the dapltest server. + No default. + + [ -t threads ] Specify the number of threads to be used. + Default: 1 + + [ -w endpoints ] Specify the number of connected endpoints per thread. + Default: 1 + + [ -i iterations ] Specify the number of times the entire sequence + of data transfers will be made over each endpoint. + Default: 1000 + + [ -Q ] Funnel completion events into a CNO. + Default: use EVDs + + [ -V ] Validate the data being transferred. + Default: ignore the data + + [ -P ] Turn on DTO completion polling + Default: off + + OP1 OP2 [ OP3, ... ] + A single transaction (OPx) consists of: + + server|client Indicates who initiates the + data transfer. + + SR|RR|RW Indicates the type of transfer: + SR send/recv + RR RDMA read + RW RDMA write + Defaults: none + + [ seg_size [ num_segs ] ] + Indicates the amount and format + of the data to be transferred. + Default: 4096 1 + (i.e., 1 4KB buffer) + + [ -f ] For SR transfers only, indicates + that a client's send transfer + completion should be reaped when + the next recv completion is reaped. + Sends and receives must be paired + (one client, one server, and in that + order) for this option to be used. + + Restrictions: + + Due to the flow control algorithm used by the transaction test, there + must be at least one SR OP for both the client and the server. + + Requesting data validation (-V) causes the test to automatically append + three OPs to those specified. These additional operations provide + synchronization points during each iteration, at which all user-specified + transaction buffers are checked. These three appended operations satisfy + the "one SR in each direction" requirement. + + The transaction OP list is printed out if -d is supplied. + +USAGE - Performance test client + + dapltest [Common_Args] -s server_name [ -m p|b ] + [ -i iterations ] [ -p pipeline ] OP + + Performance testing (-T P) times the transfer of an operation. + The operation is posted 'iterations' times. + + The following parameters determine the behavior of the transaction test: + + -s server_name Specifies the hostname of the dapltest server. + No default. + + -m b|p Used to choose either blocking (b) or polling (p) + Default: blocking (b) + + [ -i iterations ] Specify the number of times the entire sequence + of data transfers will be made over each endpoint. + Default: 1000 + + [ -p pipeline ] Specify the pipline length, valid arguments are in + the range [0,MAX_SEND_DTOS]. If a value greater than + MAX_SEND_DTOS is requested the value will be + adjusted down to MAX_SEND_DTOS. + Default: MAX_SEND_DTOS + + OP + An operation consists of: + + RR|RW Indicates the type of transfer: + RR RDMA read + RW RDMA write + Default: none + + [ seg_size [ num_segs ] ] + Indicates the amount and format + of the data to be transferred. + Default: 4096 1 + (i.e., 1 4KB buffer) + +USAGE - Limit test client + + Limit testing (-T L) neither requires nor connects to any server + instance. The client runs one or more tests which attempt to + exhaust various resources to determine DAPL limits and exercise + DAPL error paths. If no arguments are given, all tests are run. + + Limit testing creates the sequence of DAT objects needed to + move data back and forth, attempting to find the limits supported + for the DAPL object requested. For example, if the LMR creation + limit is being examined, the test will create a set of + {IA, PZ, CNO, EVD, EP} before trying to run dat_lmr_create() to + failure using that set of DAPL objects. The 'width' parameter + can be used to control how many of these parallel DAPL object + sets are created before beating upon the requested constructor. + Use of -m limits the number of dat_*_create() calls that will + be attempted, which can be helpful if the DAPL in use supports + essentailly unlimited numbers of some objects. + + The limit test arguments are: + + [ -m maximum ] Specify the maximum number of dapl_*_create() + attempts. + Default: run to object creation failure + + [ -w width ] Specify the number of DAPL object sets to + create while initializing. + Default: 1 + + [ limit_ia ] Attempt to exhaust dat_ia_open() + + [ limit_pz ] Attempt to exhaust dat_pz_create() + + [ limit_cno ] Attempt to exhaust dat_cno_create() + + [ limit_evd ] Attempt to exhaust dat_evd_create() + + [ limit_ep ] Attempt to exhaust dat_ep_create() + + [ limit_rsp ] Attempt to exhaust dat_rsp_create() + + [ limit_psp ] Attempt to exhaust dat_psp_create() + + [ limit_lmr ] Attempt to exhaust dat_lmr_create(4KB) + + [ limit_rpost ] Attempt to exhaust dat_ep_post_recv(4KB) + + [ limit_size_lmr ] Probe maximum size dat_lmr_create() + + Default: run all tests + + +EXAMPLES + + dapltest -T S -d -D ibnic0 + + Starts a server process with debug verbosity. + + dapltest -T T -d -s winIB -D ibnic0 -i 100 \ + client SR 4096 2 server SR 4096 2 + + Runs a transaction test, with both sides + sending one buffer with two 4KB segments, + one hundred times; dapltest server is on host winIB. + + dapltest -T P -d -s winIB -D JniIbdd0 -i 100 SR 4096 2 + + Runs a performance test, with the client + sending one buffer with two 4KB segments, + one hundred times. + + dapltest -T Q -s winIB -D ibnic0 + + Asks the dapltest server at host 'winIB' to clean up and exit. + + dapltest -T L -D ibnic0 -d -w 16 -m 1000 + + Runs all of the limit tests, setting up + 16 complete sets of DAPL objects, and + creating at most a thousand instances + when trying to exhaust resources. + + dapltest -T T -V -d -t 2 -w 4 -i 55555 -s winIB -D ibnic0 \ + client RW 4096 1 server RW 2048 4 \ + client SR 1024 4 server SR 4096 2 \ + client SR 1024 3 -f server SR 2048 1 -f + + Runs a more complicated transaction test, + with two thread using four EPs each, + sending a more complicated buffer pattern + for a larger number of iterations, + validating the data received. + + +BUGS (and To Do List) + + Use of CNOs (-Q) is not yet supported. + + Further limit tests could be added. diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.cygwin b/branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.cygwin new file mode 100644 index 00000000..a0766ef1 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.cygwin @@ -0,0 +1,250 @@ +# +# Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under either one of the following two licenses: +# +# 1) under the terms of the "Common Public License 1.0" a copy of which is +# in the file LICENSE.txt in the root directory. The license is also +# available from the Open Source Initiative, see +# http://www.opensource.org/licenses/cpl.php. +# OR +# +# 2) under the terms of the "The BSD License" a copy of which is in the file +# LICENSE2.txt in the root directory. The license is also available from +# the Open Source Initiative, see +# http://www.opensource.org/licenses/bsd-license.php. +# +# Licensee has the right to choose either one of the above two licenses. +# +# Redistributions of source code must retain both the above copyright +# notice and either one of the license notices. +# +# Redistributions in binary form must reproduce both the above copyright +# notice, either one of the license notices in the documentation +# and/or other materials provided with the distribution. +# + +#********************************************************************** +# +# MODULE: Makefile +# +# PURPOSE: Makefile for dapl reference provider for CYGWIN environment +# +#*********************************************************************/ + + +############################################################## +# Application variables +# + +LD = $(CROSS_COMPILE)link.exe +CC = $(CROSS_COMPILE)cl.exe +CPP = $(CC) +AR = $(CROSS_COMPILE)ar +NM = $(CROSS_COMPILE)nm +STRIP = $(CROSS_COMPILE)strip +OBJCOPY = $(CROSS_COMPILE)objcopy +OBJDUMP = $(CROSS_COMPILE)objdump +RANLIB = $(CROSS_COMPILE)ranlib +MKDIR = mkdir -p +SED = /bin/sed +SHELL = /bin/sh + +TOPDIR = . + +OBJ_DIR = $(TOPDIR) +TARGET_DIR = $(TOPDIR) + +INCDIRS := \ + ../../../dat/include + +vpath %.h . ${INCDIRS} + + +################################################## +# targets +TAREXES = dapltest + +# data for user libraries +dapltest_SOURCES = $(SRCS) + +SRCS := \ + dapl_bpool.c \ + dapl_client.c \ + dapl_client_info.c \ + dapl_cnxn.c \ + dapl_endian.c \ + dapl_fft_cmd.c \ + dapl_fft_connmgt.c \ + dapl_fft_dataxfer.c \ + dapl_fft_dataxfer_client.c \ + dapl_fft_endpoint.c \ + dapl_fft_hwconn.c \ + dapl_fft_mem.c \ + dapl_fft_pz.c \ + dapl_fft_queryinfo.c \ + dapl_fft_test.c \ + dapl_fft_util.c \ + dapl_getopt.c \ + dapl_limit.c \ + dapl_limit_cmd.c \ + dapl_main.c \ + dapl_mdep.c \ + dapl_memlist.c \ + dapl_netaddr.c \ + dapl_params.c \ + dapl_performance_client.c \ + dapl_performance_cmd.c \ + dapl_performance_server.c \ + dapl_performance_stats.c \ + dapl_performance_util.c \ + dapl_quit_cmd.c \ + dapl_server.c \ + dapl_server_cmd.c \ + dapl_server_info.c \ + dapl_test_data.c \ + dapl_test_util.c \ + dapl_thread.c \ + dapl_transaction_cmd.c \ + dapl_transaction_stats.c \ + dapl_transaction_test.c \ + dapl_transaction_util.c \ + dapl_util.c + + +#################################################### +# compiler options CFLAGS +# + +# common flags +UOPTIONS += /nologo /MDd /W3 /GX /Od /FD /GZ /Gm /Zi + +# common defines +UCOMDEFS += /D "_WINDOWS" /D "_MBCS" /D "_USRDLL" /D "WIN32" /D "_DEBUG" \ + -D_WIN32_WINNT=0x0500 -DWINVER=0x0500 +# other options: /FR /Fd + +# private defines +UPRIVDEFS += /D "__WIN__" /D "__MSC__" /D "__i386__" + +CFLAGS += $(UOPTIONS) $(UCOMDEFS) $(UPRIVDEFS) + +# +# Provider specific CFLAGS definition +# + +########################################################### +# common included libraries +# Provider specific included libraries +# +ULDLIBS = dapl dat Ws2_32 advapi32 + + + + +######################################################### +# link options LDFLAGS +# + +MTARFLAGS= -cr + +TARFLAGS += cr + +# common flags +ULDOPTIONS += /nologo /incremental:no /machine:I386 /debug + +# common directories +ULDDIRS += /LIBPATH:"$(OBJ_DIR)" +ULDDIRS += /LIBPATH:"$(TOPDIR)/../../../dat/udat/Target" +ULDDIRS += /LIBPATH:"$(TOPDIR)/../../../dapl/udapl/Target" + +LDFLAGS += $(ULDOPTIONS) $(ULDDIRS) $(ULDLIBS:%=%.lib) + +# +# Provider specific ULDFLAGS +# +LDFLAGS += /LIBPATH:"$(MTHOME)/lib" + + +############################################################# +# Local functions +# +bsndir = $(notdir $(basename $1)) + +############################################################ +# Common rules +# +define COMPILE +$(CC) -c $(strip ${CFLAGS}) $(strip $(INCDIRS:%=-I%)) $(EXTRA_CFLAGS) $($(@:${OBJ_DIR}/%.obj=%.c_CFLAGS)) /Fo"$@" $< +endef + +define DEF_SET_VAR_SRCS +@echo "$@_VAR_SRCS += $($(basename $(call bsndir,$@))_SOURCES)" >> $@ +endef + +define DEF_SET_VAR_OBJS +@echo "$@_VAR_OBJS += $($(basename $(call bsndir,$@))_OBJECTS)" >> $@ +endef + + + +########################################################################### +# Start rules +# + +all: $(TAREXES:%=${TARGET_DIR}/%) $(TAROBJS:%=${OBJ_DIR}/%.obj) + + +########################################################################## +# Simple objects (user) + +$(TAROBJS:%=${OBJ_DIR}/%.obj): ${OBJ_DIR}/%.obj: %.c + @if [ ! -d $(OBJ_DIR) ]; then mkdir -p $(OBJ_DIR); fi + $(COMPILE) + + +$(OBJ_DIR)/%.obj: %.c + $(COMPILE) + + +########################################################################## +# Simple executables +# +$(TAREXES:%=$(TARGET_DIR)/%): % : %.mk +$(TAREXES:%=$(TARGET_DIR)/%.mk): Makefile.cygwin + @if [ ! -d $(OBJ_DIR) ]; then mkdir -p $(OBJ_DIR); fi + @if [ ! -d $(TARGET_DIR) ]; then mkdir -p $(TARGET_DIR); fi + @echo "# Do not edit. Automatically generated file." > $@ + @ + @${DEF_SET_VAR_OBJS} + @${DEF_SET_VAR_SRCS} + @ + @echo "SOURCES += $($(call bsndir,$@)_SOURCES)" >> $@ + @ + @echo "$(@:%.mk=%): \$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj)" >> $@ + @echo "$(@:%.mk=%): \$$($@_VAR_OBJS:%.c=$(OBJ_DIR)/%.obj)" >> $@ + @echo -e "\t\$$(LD) \$$(LDFLAGS) \$$(\$$(@:$(OBJ_DIR)/%=%)_LDFLAGS) /out:\"\$$(@:%=%.exe)\" \c" >> $@ + @echo -e "\$$($@_VAR_SRCS:%.c=$(OBJ_DIR)/%.obj) \$$($@_VAR_OBJS) \c" >> $@ + @echo -e "\$$(LDLIBS:%=%.lib) \$$(LIBDIRS:%=/LIBPATH:%) \$$(\$$(@:$(OBJ_DIR)/%=%)_LDLIBS:%=%.lib) \c" >> $@ + @echo "\$$(\$$(@:$(OBJ_DIR)/%=%)_LIBDIRS:%=/LIBPATH:%)" >> $@ + + +ifneq ($(MAKECMDGOALS), clean) +ifneq ($(strip $(TAREXES)),) +-include $(patsubst %,$(OBJ_DIR)/%.mk,$(TAREXES)) +endif +endif + + +########################################################################## +# Clean rules +# +CLEANDIRS = $(OBJ_DIR) $(TARGET_DIR) + +CLEANFILES = *.obj *.dll *.lib *.sys *.pdb *.idb *.exp *.ilk *.sbr *.mk *.exe + +clean: $(CLEANDIRS) + @echo deleting dump files at $(shell pwd) + @rm -f $(CLEANFILES) + @if [ -d $(OBJ_DIR) ] ; then rm -f $(CLEANFILES:%=$(OBJ_DIR)/%); fi + diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.org b/branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.org new file mode 100644 index 00000000..479afca2 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.org @@ -0,0 +1,54 @@ +# +# Copyright (c) 2002, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under the terms of the "IBM Common Public +# License 1.0" a copy of which is in the file LICENSE.txt in the +# root directory. The license is also available from the Open Source +# Initiative, see http://www.opensource.org/licenses/ibmpl.html. +# +# + +# ----------------------------------------------------- +# +# Description : Makefile for dapltest +# +# ----------------------------------------------------- + +IBA_HOME = ../../../.. +DAPL_ROOT = ../../.. + +M_OBJS = $(ALLOBJS) +M_TARGET := dapltest +I_TARGET := $(M_TARGET) + +EXTRA_CFLAGS = -g3 +EXTRA_CFLAGS += -Wall +EXTRA_CFLAGS += -Wmissing-prototypes +EXTRA_CFLAGS += -Wstrict-prototypes +EXTRA_CFLAGS += -Wmissing-declarations +EXTRA_CFLAGS += -pipe +EXTRA_CFLAGS += -D__LINUX__ +EXTRA_CFLAGS += -DDAT_THREAD_SAFE=DAT_FALSE +EXTRA_CFLAGS += -I$(DAPL_ROOT)/dat/include/ +EXTRA_CFLAGS += -Werror + + +EXTRA_LDFLAGS = -L$(DAPL_ROOT)/dat/udat +EXTRA_LDFLAGS += -ldat +EXTRA_LDFLAGS += -lpthread +EXTRA_LDFLAGS += -ldl + +ifeq ($(EXPLICIT_LINK),1) +EXTRA_LDFLAGS += -L$(DAPL_ROOT)/dapl/udapl +EXTRA_LDFLAGS += -ldapl +endif + +include $(IBA_HOME)/Makefile.config +include $(IBA_HOME)/Makefile.rules + +ifneq ($(ARCH),ia64) +EXTRA_CFLAGS += -Werror +EXTRA_CFLAGS += -D__PENTIUM__ +else +EXTRA_CFLAGS += -DIA64 +endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.orig b/branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.orig new file mode 100644 index 00000000..996384de --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/Makefile.orig @@ -0,0 +1,134 @@ +# +# Copyright (c) 2002, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under the terms of the "IBM Common Public +# License 1.0" a copy of which is in the file LICENSE.txt in the +# root directory. The license is also available from the Open Source +# Initiative, see http://www.opensource.org/licenses/ibmpl.html. +# +# + +# ----------------------------------------------------- +# +# Description : Makefile for dapltest +# +# ----------------------------------------------------- + + +.SILENT: + + +# +# Variables +# + +# DAPL_ROOT = path to DAPL root directory +DAPL_ROOT = $(shell pwd)/../../.. +# OBJECT_DIR = directory for object files +OBJECT_DIR = . +# SOURCE_DIRS = directories containing source files +SOURCE_DIRS = . +# EXEC = exectuable name +EXEC = dapltest + +# -------------- Automatic Variables ------------------ +SOURCE_FILES = $(shell ls $(foreach DIR, $(SOURCE_DIRS), $(DIR)/*.c)) +OBJECT_FILES = ${patsubst %.c,$(OBJECT_DIR)/%.o,$(notdir ${SOURCE_FILES})} +EXEC_FILE = $(OBJECT_DIR)/$(EXEC) + +VPATH = $(SOURCE_DIRS) +# ----------------------------------------------------- + + +# +# Tools +# + +ECHO = echo +INSTALL = /usr/bin/install +MAKEDEPEND = makedepend +RM = rm + + +# +# Compiler +# + +CC = gcc + +# DEFINES +DEFINES = __LINUX__ +DEFINES += __PENTIUM__ + +# INCLUDE_DIRS = directories containing include files +INCLUDE_DIRS = $(DAPL_ROOT)/dat/include/ + +CC_FLAGS = -g3 +CC_FLAGS += -Wall +CC_FLAGS += -Wmissing-prototypes +CC_FLAGS += -Wstrict-prototypes +CC_FLAGS += -Wmissing-declarations +CC_FLAGS += -Werror +CC_FLAGS += -pipe + +# -------------- Automatic Variables ------------------ +INCLUDE_PATH = $(foreach DIR, $(SOURCE_DIRS), -I$(DIR)) +INCLUDE_PATH += $(foreach DIR, $(INCLUDE_DIRS), -I$(DIR)) + +CC_FLAGS += $(INCLUDE_PATH) +CC_FLAGS += $(foreach DEFINE, $(DEFINES), -D$(DEFINE)) +# ----------------------------------------------------- + + +# +# Linker +# + +LD = gcc + +# LIB_OBJS = library object files +LIB_OBJS = dat +LIB_OBJS += pthread + +# LIB_DIRS = directories for library object files +LIB_DIRS = $(DAPL_ROOT)/dat/udat/Target + +# if the provider library should be explicitly linked +EXPLICIT_LINK=1 +ifeq ($(EXPLICIT_LINK),1) +# in addition to providers listed in the DAT static registry +# the specified provider will be available to the consumer +LIB_OBJS += dapl +LIB_DIRS += $(DAPL_ROOT)/dapl/udapl/Target +endif + +# -------------- Automatic Variables ------------------ +LD_FLAGS = $(foreach DIR, $(LIB_DIRS), -L$(DIR)) +LD_FLAGS += $(foreach DIR, $(LIB_DIRS), -Wl,-R$(DIR)) +LD_FLAGS += $(foreach OBJ, $(LIB_OBJS), -l$(OBJ)) +# ----------------------------------------------------- + + +# +# Rules +# + +all : $(EXEC_FILE) + +$(EXEC_FILE) : $(OBJECT_DIR) $(OBJECT_FILES) + $(ECHO) "--- Linking $@ ---" + $(LD) $(LD_FLAGS) $(OBJECT_FILES) -o $@ + +$(OBJECT_DIR) : + $(ECHO) "--- Creating Directory $@ ---" + $(INSTALL) -d $@ + +$(OBJECT_DIR)/%.o: %.c + $(ECHO) "--- Compiling $< ---" + $(CC) $(CC_FLAGS) -o $@ -c $< + +tidy: + $(RM) -f $(foreach DIR, $(SOURCE_DIRS), $(DIR)/*~) + +clean: tidy + $(RM) -f $(OBJECT_DIR)/*.o $(OBJECT_DIR)/$(EXEC) diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/SOURCES b/branches/Ndi/ulp/dapl/test/udapl/dapltest/SOURCES new file mode 100644 index 00000000..ad9b3a17 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/SOURCES @@ -0,0 +1,63 @@ +TARGETNAME=dapltest +TARGETPATH=..\..\..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +SOURCES= \ + dapl_bpool.c \ + dapl_client.c \ + dapl_client_info.c \ + dapl_cnxn.c \ + dapl_endian.c \ + dapl_fft_cmd.c \ + dapl_fft_connmgt.c \ + dapl_fft_dataxfer.c \ + dapl_fft_dataxfer_client.c \ + dapl_fft_endpoint.c \ + dapl_fft_hwconn.c \ + dapl_fft_mem.c \ + dapl_fft_pz.c \ + dapl_fft_queryinfo.c \ + dapl_fft_test.c \ + dapl_fft_util.c \ + dapl_getopt.c \ + dapl_limit.c \ + dapl_limit_cmd.c \ + dapl_main.c \ + dapl_mdep.c \ + dapl_memlist.c \ + dapl_netaddr.c \ + dapl_params.c \ + dapl_performance_client.c \ + dapl_performance_cmd.c \ + dapl_performance_server.c \ + dapl_performance_stats.c \ + dapl_performance_util.c \ + dapl_quit_cmd.c \ + dapl_server.c \ + dapl_server_cmd.c \ + dapl_server_info.c \ + dapl_test_data.c \ + dapl_test_util.c \ + dapl_thread.c \ + dapl_transaction_cmd.c \ + dapl_transaction_stats.c \ + dapl_transaction_test.c \ + dapl_transaction_util.c \ + dapl_util.c + +INCLUDES=..\..\..\dapl\include;..\..\..\dat\include; +RCOPTIONS=/I..\..\..\..\..\inc; + +# Set defines particular to the driver. A good Idea to build listings +USER_C_FLAGS=$(USER_C_FLAGS) -DDYNAMIC_DAT_LOADING +!if $(FREEBUILD) +USER_C_FLAGS=$(USER_C_FLAGS) -DDAT_DLL_NAME=\"dat.dll\" +!else +USER_C_FLAGS=$(USER_C_FLAGS) -DDAT_DLL_NAME=\"datd.dll\" +!endif + +TARGETLIBS=$(SDK_LIB_PATH)\ws2_32.lib + +MSC_WARNING_LEVEL= /W3 diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/bw.sh b/branches/Ndi/ulp/dapl/test/udapl/dapltest/bw.sh new file mode 100644 index 00000000..9a5c03e5 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/bw.sh @@ -0,0 +1,22 @@ +#!/bin/sh +# +# Sample client invocation +# +# +me=`basename $0` +case $# in +0) echo Usage: $me '[hostname [size [device]]]' 1>&2 ; exit 1;; +1) host=$1 + device=IbalHca0 + size=65536 ;; +2) host=$1 + device=IbalHca0 + size=$2 ;; +3) host=$1 + device=$3 + size=$2 ;; +*) echo Usage: $me '[hostname [size [device]]]' 1>&2 ; exit 1;; +esac + +./dapltest -T P -d -i 1024 -s ${host} -D ${device} \ + -p 16 -m p RW ${size} 1 diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/cl.sh b/branches/Ndi/ulp/dapl/test/udapl/dapltest/cl.sh new file mode 100644 index 00000000..94c8cfe2 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/cl.sh @@ -0,0 +1,30 @@ +#!/bin/sh +# +# Sample client invocation +# +# +me=`basename $0` +case $# in +0) host=mtilab11-ib0 + device=IbalHca0 ;; +1) host=$1 + device=IbalHca0 ;; +2) host=$1 + device=$2 ;; +*) echo Usage: $me '[hostname [device] ]' 1>&2 ; exit 1;; +esac +# +# +# ./dapltest -T T -V -d -t 2 -w 2 -i 1000111 -s ${host} -D ${device} \ +# client RW 4096 1 server RW 2048 4 \ +# client RR 1024 2 server RR 2048 2 \ +# client SR 1024 3 -f server SR 256 3 -f + + ./dapltest -T T -P -d -t 1 -w 1 -i 1024 -s ${host} -D ${device} \ + client RW 4096 1 server RW 2048 4 \ + server RR 1024 2 client RR 2048 2 \ + client SR 1024 3 -f server SR 256 3 -f + +#dapltest -T T -d -s ${host} -D ${device} -i 10000 -t 1 -w 1 \ +# client SR 256 \ +# server SR 256 diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_bpool.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_bpool.c new file mode 100644 index 00000000..0acd5965 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_bpool.c @@ -0,0 +1,348 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_test_data.h" +#include "dapl_bpool.h" +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_proto.h" + +/*****************************************************************************/ +/* + * Allocate buffer pool (data buffers) + * + * Caller wants to allocate buffers of bytes, + * with each buffer aligned as requested. The caller is free to + * use the buffers separately, or as one contiguous segment, so + * we allocate IOV entries enough to support either usage. + */ +Bpool * +DT_BpoolAlloc ( + Per_Test_Data_t * pt_ptr, + DAT_IA_HANDLE ia_handle, + DAT_PZ_HANDLE pz_handle, + DAT_EP_HANDLE ep_handle, + DAT_EVD_HANDLE rmr_evd_handle, + DAT_COUNT seg_size, + DAT_COUNT num_segs, + DAT_COUNT alignment, + DAT_BOOLEAN enable_rdma_write, + DAT_BOOLEAN enable_rdma_read) +{ + unsigned char *module = "DT_BpoolAlloc"; + unsigned char *alloc_ptr = 0; + Bpool *bpool_ptr = 0; + DAT_COUNT alloc_size; + DAT_REGION_DESCRIPTION region; + DAT_RETURN ret; + + /* We'll hand out aligned buffers, compensate here */ + seg_size = DT_RoundSize (seg_size, alignment); + alloc_size = seg_size * num_segs + alignment; + + alloc_ptr = (unsigned char *) DT_MemListAlloc ( pt_ptr, "bpool", BUFF, + alloc_size); + if (!alloc_ptr) + { + DT_Mdep_printf ("No Memory to create bpool buffer!\n"); + goto err; + } + + bpool_ptr = (Bpool *) DT_MemListAlloc (pt_ptr, "bpool", BPOOL, sizeof (Bpool) + + num_segs * sizeof (DAT_LMR_TRIPLET)); + if (!bpool_ptr) + { + DT_Mdep_printf ("No Memory to create Bpool!\n"); + goto err; + } + + bpool_ptr->alloc_ptr = alloc_ptr; + bpool_ptr->alloc_size = alloc_size; + bpool_ptr->pz_handle = pz_handle; + bpool_ptr->num_segs = num_segs; + bpool_ptr->ep_handle = ep_handle; + bpool_ptr->buffer_size = seg_size * num_segs; + bpool_ptr->buffer_start = DT_AlignPtr (alloc_ptr, alignment); + bpool_ptr->tripl_start = (DAT_LMR_TRIPLET *) (bpool_ptr + 1); + bpool_ptr->seg_size = seg_size; + bpool_ptr->enable_rdma_write = enable_rdma_write; + bpool_ptr->enable_rdma_read = enable_rdma_read; + bpool_ptr->rmr_evd_handle = rmr_evd_handle; + + DT_Mdep_spew (3, ("lmr_create [%p, %x]\n", bpool_ptr->buffer_start, + bpool_ptr->buffer_size)); + + memset (®ion, 0, sizeof (region)); + region.for_va = bpool_ptr->buffer_start; + ret = dat_lmr_create (ia_handle, + DAT_MEM_TYPE_VIRTUAL, + region, + bpool_ptr->buffer_size, + pz_handle, + DAT_MEM_PRIV_ALL_FLAG, + &bpool_ptr->lmr_handle, + &bpool_ptr->lmr_context, + &bpool_ptr->rmr_context, + &bpool_ptr->reg_size, + &bpool_ptr->reg_addr); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_lmr_create failed %s\n", + module, DT_RetToString (ret)); + goto err; + } + /* verify that the outputs are reasonable */ + if (((uintptr_t)bpool_ptr->reg_addr > (uintptr_t)bpool_ptr->buffer_start) + || (bpool_ptr->reg_size < bpool_ptr->buffer_size + + ((uintptr_t)bpool_ptr->buffer_start - (uintptr_t)bpool_ptr->reg_addr))) + { + DT_Mdep_printf ( "%s: dat_lmr_create bogus" + "in: 0x%p, %x out 0x" F64x ", " F64x "\n", + module, + bpool_ptr->buffer_start, bpool_ptr->buffer_size, + (DAT_UVERYLONG)bpool_ptr->reg_addr, + (DAT_UVERYLONG)bpool_ptr->reg_size); + goto err; + } + + DT_Mdep_spew (3, ("lmr_create OK [0x" F64x ", " F64x ", lctx=%x]\n", + (DAT_UVERYLONG)bpool_ptr->reg_addr, + (DAT_UVERYLONG)bpool_ptr->reg_size, bpool_ptr->lmr_context)); +#ifdef ALLOW_MW /* no BIND RMR */ /* Enable RDMA if requested */ + if (enable_rdma_write || enable_rdma_read) + { + DAT_LMR_TRIPLET iov; + DAT_RMR_COOKIE cookie; + DAT_MEM_PRIV_FLAGS mflags; + DAT_RMR_BIND_COMPLETION_EVENT_DATA rmr_stat; + + /* create the RMR */ + ret = dat_rmr_create (pz_handle, &bpool_ptr->rmr_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_rmr_create failed %s\n", + module, DT_RetToString (ret)); + goto err; + } + + /* bind the RMR */ + iov.virtual_address = bpool_ptr->reg_addr; + iov.segment_length = bpool_ptr->reg_size; + iov.lmr_context = bpool_ptr->lmr_context; + cookie.as_64 = (DAT_UINT64)0UL; + cookie.as_ptr = (DAT_PVOID) (uintptr_t) bpool_ptr->reg_addr; + mflags = (enable_rdma_write && enable_rdma_read ? DAT_MEM_PRIV_ALL_FLAG + : (enable_rdma_write ? DAT_MEM_PRIV_WRITE_FLAG + : (enable_rdma_read ? DAT_MEM_PRIV_READ_FLAG : 0))); + + DT_Mdep_spew (3, ("rmr_bind [" F64x ", " F64x "]\n", + (DAT_UVERYLONG)bpool_ptr->reg_addr, + (DAT_UVERYLONG)bpool_ptr->reg_size)); + + ret = dat_rmr_bind ( bpool_ptr->rmr_handle, + &iov, + mflags, + bpool_ptr->ep_handle, + cookie, + DAT_COMPLETION_DEFAULT_FLAG, + &bpool_ptr->rmr_context); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_rmr_bind failed %s\n", + module, DT_RetToString (ret)); + goto err; + } + + DT_Mdep_spew (3, ("rmr_bind-wait\n")); + + /* await the bind result */ + if (!DT_rmr_event_wait (bpool_ptr->rmr_evd_handle, &rmr_stat) || + !DT_rmr_check (&rmr_stat, + bpool_ptr->rmr_handle, + (DAT_PVOID) (uintptr_t) bpool_ptr->reg_addr, + "Bpool")) + { + goto err; + } + + DT_Mdep_spew (3, ("rmr_bound [OK Rctx=%x]\n", bpool_ptr->rmr_context)); + } +#endif /** ALLOW_MW */ + + /* + * Finally! Return the newly created Bpool. + */ + return ( bpool_ptr ); + + + /* ********************************* + * Whoops - clean up and return NULL + */ +err: + if (bpool_ptr) + { + if (bpool_ptr->rmr_handle) + { + ret = dat_rmr_free (bpool_ptr->rmr_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_rmr_free failed %s\n", + module, DT_RetToString (ret)); + } + } + if (bpool_ptr->lmr_handle) + { + ret = dat_lmr_free (bpool_ptr->lmr_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_lmr_free failed %s\n", + module, DT_RetToString (ret)); + } + } + DT_MemListFree (pt_ptr, bpool_ptr); + } + if (alloc_ptr) + { + DT_MemListFree (pt_ptr, alloc_ptr); + } + + return ( 0 ); +} + +/*****************************************************************************/ +bool +DT_Bpool_Destroy (Per_Test_Data_t * pt_ptr, + Bpool * bpool_ptr) +{ + unsigned char *module = "DT_Bpool_Destroy"; + bool rval = true; + + if (bpool_ptr) + { + if (bpool_ptr->alloc_ptr) + { + if (bpool_ptr->rmr_handle) + { + DAT_LMR_TRIPLET iov; + DAT_RMR_COOKIE cookie; + DAT_RETURN ret; + + iov.virtual_address = bpool_ptr->reg_addr; + iov.segment_length = 0; /* un-bind */ + iov.lmr_context = bpool_ptr->lmr_context; + cookie.as_64 = (DAT_UINT64)0UL; + cookie.as_ptr = (DAT_PVOID) (uintptr_t)bpool_ptr->reg_addr; + + /* + * Do not attempt to unbind here. The remote node + * is going through the same logic and may disconnect + * before an unbind completes. Any bind/unbind + * operation requires a CONNECTED QP to complete, + * a disconnect will cause problems. Unbind is + * a simple optimization to allow rebinding of + * an RMR, doing an rmr_free will pull the plug + * and cleanup properly. + */ + ret = dat_rmr_free (bpool_ptr->rmr_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_rmr_free failed %s\n", + module, DT_RetToString (ret)); + rval = false; + } + } + + if (bpool_ptr->lmr_handle) + { + DAT_RETURN ret = dat_lmr_free (bpool_ptr->lmr_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_lmr_free failed %s\n", + module, DT_RetToString (ret)); + rval = false; + } + } + DT_MemListFree (pt_ptr, bpool_ptr->alloc_ptr); + } + DT_MemListFree (pt_ptr, bpool_ptr); + } + + return (rval); +} + +/*****************************************************************************/ +unsigned char * +DT_Bpool_GetBuffer (Bpool * bpool_ptr, int index) +{ + return ( bpool_ptr->buffer_start + index * bpool_ptr->seg_size ); +} + +/*****************************************************************************/ +DAT_COUNT +DT_Bpool_GetBuffSize (Bpool * bpool_ptr, int index) +{ + return ( bpool_ptr->seg_size ); +} + +/*****************************************************************************/ +DAT_LMR_TRIPLET * +DT_Bpool_GetIOV (Bpool * bpool_ptr, int index) +{ + return ( bpool_ptr->tripl_start + index ); +} + +/*****************************************************************************/ +DAT_LMR_CONTEXT +DT_Bpool_GetLMR (Bpool * bpool_ptr, int index) +{ + return ( bpool_ptr->lmr_context ); +} + +/*****************************************************************************/ +DAT_RMR_CONTEXT +DT_Bpool_GetRMR (Bpool * bpool_ptr, int index) +{ + return ( bpool_ptr->rmr_context ); +} + +/*****************************************************************************/ +void +DT_Bpool_print (Bpool * bpool_ptr) +{ + DT_Mdep_printf ("BPOOL %p\n", bpool_ptr); + DT_Mdep_printf ("BPOOL alloc_ptr %p\n", (unsigned char *) bpool_ptr->alloc_ptr); + DT_Mdep_printf ("BPOOL alloc_size %x\n", (int) bpool_ptr->alloc_size); + DT_Mdep_printf ("BPOOL pz_handle %p\n", (uintptr_t*) bpool_ptr->pz_handle); + DT_Mdep_printf ("BPOOL num_segs %x\n", (int) bpool_ptr->num_segs); + DT_Mdep_printf ("BPOOL seg_size %x\n", (int) bpool_ptr->seg_size); + DT_Mdep_printf ("BPOOL tripl_start %p\n", bpool_ptr->tripl_start); + DT_Mdep_printf ("BPOOL buffer_start %p\n", bpool_ptr->buffer_start); + DT_Mdep_printf ("BPOOL buffer_size %x\n", (int) bpool_ptr->buffer_size); + DT_Mdep_printf ("BPOOL rdma_write %x\n", + (int) bpool_ptr->enable_rdma_write); + DT_Mdep_printf ("BPOOL rdmaread %x\n", + (int) bpool_ptr->enable_rdma_read); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_bpool.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_bpool.h new file mode 100644 index 00000000..161416e6 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_bpool.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_BPOOL_H__ +#define __DAPL_BPOOL_H__ + +#include "dapl_mdep.h" +#include + +#pragma pack(1) +typedef struct Bpool_tag Bpool; +/* + * struct Bpool + */ + +struct Bpool_tag +{ + unsigned char *alloc_ptr; + DAT_UINT32 alloc_size; + DAT_PZ_HANDLE pz_handle; + DAT_COUNT seg_size; + DAT_COUNT num_segs; /* num segments */ + unsigned char *buffer_start; /* Start of buffer area */ + DAT_COUNT buffer_size; /* Size of data buffer (rounded) */ + DAT_VADDR reg_addr; /* start of registered area */ + DAT_VLEN reg_size; /* size of registered area */ + DAT_EP_HANDLE ep_handle; /* EP area is registered to */ + DAT_LMR_HANDLE lmr_handle; /* local access */ + DAT_LMR_CONTEXT lmr_context; + DAT_LMR_TRIPLET*tripl_start; /* local IOV */ + DAT_BOOLEAN enable_rdma_write; /* remote access */ + DAT_BOOLEAN enable_rdma_read; + DAT_RMR_HANDLE rmr_handle; + DAT_RMR_CONTEXT rmr_context; + DAT_EVD_HANDLE rmr_evd_handle; +}; +#pragma pack() +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client.c new file mode 100644 index 00000000..d170a830 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client.c @@ -0,0 +1,603 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_server_info.h" +#include "dapl_test_data.h" +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_proto.h" +#include "dapl_transaction_test.h" +#include "dapl_version.h" +#include "dapl_cnxn.h" +#include +#include + +#define DFLT_QLEN 40 /* default event queue length */ +#define MAX_CONN_RETRY 8 + +/* + * Client control routine Connect to the server, send the command across. + * Then start the client-side of the test - creating threads as needed + */ +void +DT_cs_Client (Params_t * params_ptr, + char *dapl_name, + char *server_name, + DAT_UINT32 total_threads) +{ + Per_Test_Data_t *pt_ptr = NULL; + DAT_IA_HANDLE ia_handle = DAT_HANDLE_NULL; + DAT_PZ_HANDLE pz_handle = DAT_HANDLE_NULL; + DAT_EVD_HANDLE recv_evd_hdl = DAT_HANDLE_NULL; + DAT_EVD_HANDLE reqt_evd_hdl = DAT_HANDLE_NULL; + DAT_EVD_HANDLE conn_evd_hdl = DAT_HANDLE_NULL; + DAT_EVD_HANDLE async_evd_hdl = DAT_HANDLE_NULL; + DAT_EP_HANDLE ep_handle = DAT_HANDLE_NULL; + Server_Info_t *sinfo = NULL; + Transaction_Cmd_t *Transaction_Cmd = NULL; + Performance_Cmd_t *Performance_Cmd = NULL; + Quit_Cmd_t *Quit_Cmd = NULL; + Bpool *bpool = NULL; + DAT_IA_ADDRESS_PTR remote_netaddr = NULL; + unsigned char *module = "DT_cs_Client"; + unsigned int did_connect = 0; + unsigned int retry_cnt = 0; + DAT_DTO_COOKIE dto_cookie; + + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + DAT_EVENT_NUMBER event_num; + unsigned char * buffp; + DAT_RETURN ret; + + dto_cookie.as_64 = LZERO; + + DT_Mdep_printf ("%s: Starting Test ... \n", module); + + /* Set up the Per_Test_Data */ + pt_ptr = DT_Alloc_Per_Test_Data (); + if (!pt_ptr) + { + DT_Mdep_printf ("%s: no memory for Per_Test_Data\n", module); + return; + } + DT_MemListInit (pt_ptr); /* init MemlistLock and memListHead */ + DT_Thread_Init (pt_ptr); /* init ThreadLock and threadcount */ + pt_ptr->local_is_server = false; + pt_ptr->Client_Info.dapltest_version = DAPLTEST_VERSION; + pt_ptr->Client_Info.is_little_endian = DT_local_is_little_endian; + pt_ptr->Client_Info.test_type = params_ptr->test_type; + pt_ptr->Client_Info.total_threads = total_threads; + memcpy ( (void *) (uintptr_t) &pt_ptr->Params, + (const void *) params_ptr, + sizeof (Params_t)); + + /* Allocate and fill in the Server's address */ + remote_netaddr = DT_NetAddrAlloc (pt_ptr); + if ( !remote_netaddr + || !DT_NetAddrLookupHostAddress (remote_netaddr, server_name)) + { + DT_Mdep_printf ("%s: Cannot find server address\n", module); + goto client_exit; + } + +#ifdef DYNAMIC_DAT_LOADING + /* Open the IA */ + ret = dat_open (dapl_name, + DFLT_QLEN, + &async_evd_hdl, + &ia_handle, + DAT_VERSION_MAJOR, + DAT_VERSION_MINOR, + DAT_THREADSAFE); +#else + ret = dat_ia_open (dapl_name, + DFLT_QLEN, + &async_evd_hdl, + &ia_handle); +#endif //DYNAMIC_DAT_LOADING + + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf( "%s: Could not open %s (%s)\n", + module, dapl_name, DT_RetToString (ret)); + ia_handle = DAT_HANDLE_NULL; + goto client_exit; + } + DT_Mdep_debug (("%s: IA %s opened\n", module, dapl_name)); + + /* Create a PZ */ + ret = dat_pz_create (ia_handle, &pz_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_pz_create error: %s\n", + module, DT_RetToString (ret)); + pz_handle = DAT_HANDLE_NULL; + goto client_exit; + } + + /* Create 3 events - recv, request, connect */ + ret = dat_evd_create (ia_handle, + DFLT_QLEN, + DAT_HANDLE_NULL, + DAT_EVD_DTO_FLAG, + &recv_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_create (recv) failed %s\n", + module, DT_RetToString (ret)); + recv_evd_hdl = DAT_HANDLE_NULL; + goto client_exit; + } + ret = dat_evd_create (ia_handle, + DFLT_QLEN, + DAT_HANDLE_NULL, + DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG, + &reqt_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_create (send) failed %s\n", + module, DT_RetToString (ret)); + reqt_evd_hdl = DAT_HANDLE_NULL; + goto client_exit; + } + ret = dat_evd_create (ia_handle, + DFLT_QLEN, + DAT_HANDLE_NULL, + DAT_EVD_CONNECTION_FLAG, + &conn_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_create (conn) failed %s\n", + module, DT_RetToString (ret)); + conn_evd_hdl = DAT_HANDLE_NULL; + goto client_exit; + } + + /* Create an EP */ + ret = dat_ep_create (ia_handle, /* IA */ + pz_handle, /* PZ */ + recv_evd_hdl, /* recv */ + reqt_evd_hdl, /* request */ + conn_evd_hdl, /* connect */ + (DAT_EP_ATTR *) NULL, + &ep_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_create error: %s\n", + module, + DT_RetToString (ret)); + ep_handle = DAT_HANDLE_NULL; + goto client_exit; + } + DT_Mdep_debug (("%s: EP created\n", module)); + + /* + * Gather whatever info we want about defaults, + * and check that we can handle the requested parameters. + */ + if (!DT_query (pt_ptr, ia_handle, ep_handle) || + !DT_check_params (pt_ptr, module)) + { + goto client_exit; + } + + bpool = DT_BpoolAlloc (pt_ptr, + ia_handle, + pz_handle, + ep_handle, + DAT_HANDLE_NULL, /* no RMR */ + DT_RoundSize (sizeof (Transaction_Cmd_t), 8192), + 3, /* num_buffers */ + DAT_OPTIMAL_ALIGNMENT, + false, + false); + if (bpool == 0) + { + DT_Mdep_printf ("%s: no memory for command buffer pool.\n", module); + goto client_exit; + } + + DT_Mdep_spew (3, ("RecvSrvInfo 0 %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + bpool, 0))); + DT_Mdep_spew (3, ("SndCliInfo 1 %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + bpool, 1))); + DT_Mdep_spew (3, ("SndCommand 2 %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + bpool, 2))); + + /* Post recv buffer for Server_Info (1st buffer in pool) */ + DT_Mdep_debug (("%s: Posting 1 recv buffer\n", module)); +retry_repost: + if (!DT_post_recv_buffer (ep_handle, + bpool, + 0, + DT_Bpool_GetBuffSize (bpool, 0))) + { + DT_Mdep_printf ("%s: cannot post Server_Info recv buffer.\n", module); + goto client_exit; + } + + DT_Mdep_debug (("%s: Connect Endpoint\n", module)); +retry: + ret = dat_ep_connect (ep_handle, + remote_netaddr, + SERVER_PORT_NUMBER, + DAT_TIMEOUT_INFINITE, + 0, (DAT_PVOID) 0, /* no private data */ + params_ptr->ReliabilityLevel, + DAT_CONNECT_DEFAULT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: Cannot connect Endpoint %s\n", + module, DT_RetToString (ret)); + goto client_exit; + } + + DT_Mdep_debug (("%s: Await connection ...\n", module)); + if (!DT_conn_event_wait (ep_handle, conn_evd_hdl, &event_num)) + { + if ( event_num == DAT_CONNECTION_EVENT_PEER_REJECTED ) + { + DAT_EVENT event; + DAT_COUNT drained = 0; + + DT_Mdep_Sleep (1000); + DT_Mdep_printf ("%s: retrying connection...\n", module); + retry_cnt++; + /* + * See if any buffers were flushed as a result of + * the REJECT; clean them up and repost if so + */ + dat_ep_reset (ep_handle); + do + { + + ret = dat_evd_dequeue ( recv_evd_hdl, + &event); + drained++; + } while (ret != DAT_QUEUE_EMPTY); + + if (drained > 1 && retry_cnt < MAX_CONN_RETRY) + { + DT_Mdep_printf("Reposting!!! %d\n", drained); + goto retry_repost; + } + if (retry_cnt < MAX_CONN_RETRY) + { + goto retry; + } + } + DT_Mdep_printf ("%s: bad connection event\n", module); + goto client_exit; + } + + did_connect++; + if (DT_dapltest_debug) + { + DT_Mdep_debug (("%s: Connected!\n", module)); + get_ep_connection_state (ep_handle); + } + +#ifdef CM_BUSTED + /***** XXX Chill out a bit to give the kludged CM a chance ... + *****/DT_Mdep_Sleep (1000); +#endif + + + /* Send Client_Info (using 2nd buffer in the pool) */ + DT_Mdep_debug (("%s: Sending Client_Info\n", module)); + buffp = DT_Bpool_GetBuffer (bpool, 1); + memcpy ( (void *)buffp, + (const void *) &pt_ptr->Client_Info, + sizeof (Client_Info_t)); + DT_Client_Info_Endian ((Client_Info_t *) buffp); + if (!DT_post_send_buffer ( ep_handle, + bpool, + 1, + DT_Bpool_GetBuffSize (bpool, 1))) + { + DT_Mdep_printf ("%s: cannot send Client_Info\n", module); + goto client_exit; + } + /* reap the send and verify it */ + dto_cookie.as_ptr = (DAT_PVOID) (uintptr_t) DT_Bpool_GetBuffer ( bpool, 1); + DT_Mdep_debug (("%s: Sent Client_Info - awaiting completion\n", module)); + if (!DT_dto_event_wait (reqt_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + ep_handle, + DT_Bpool_GetBuffSize (bpool, 1), + dto_cookie, + "Client_Info_Send")) + { + goto client_exit; + } + + /* Set up the Command (using 3rd buffer in pool) */ + DT_Mdep_debug (("%s: Sending Command\n", module)); + buffp = DT_Bpool_GetBuffer (bpool, 2); + switch (pt_ptr->Client_Info.test_type) + { + case QUIT_TEST: + { + Quit_Cmd = &pt_ptr->Params.u.Quit_Cmd; + memcpy ( (void *)buffp, + (const void *)Quit_Cmd, + sizeof (Quit_Cmd_t)); + DT_Quit_Cmd_Endian ((Quit_Cmd_t *) buffp, true); + break; + } + case TRANSACTION_TEST: + { + Transaction_Cmd = &pt_ptr->Params.u.Transaction_Cmd; + memcpy ( (void *)buffp, + (const void *)Transaction_Cmd, + sizeof (Transaction_Cmd_t)); + DT_Transaction_Cmd_Endian ((Transaction_Cmd_t *)buffp, true); + break; + } + case PERFORMANCE_TEST: + { + Performance_Cmd = &pt_ptr->Params.u.Performance_Cmd; + memcpy ( (void *)buffp, + (const void *)Performance_Cmd, + sizeof (Performance_Cmd_t)); + DT_Performance_Cmd_Endian ((Performance_Cmd_t *)buffp); + break; + } + default: + { + DT_Mdep_printf ("Unknown Test Type\n"); + goto client_exit; + } + } + + /* Send the Command buffer */ + if (!DT_post_send_buffer ( ep_handle, + bpool, + 2, + DT_Bpool_GetBuffSize (bpool, 2))) + { + DT_Mdep_printf ("%s: cannot send Command\n", module); + goto client_exit; + } + /* reap the send and verify it */ + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = (DAT_PVOID) (uintptr_t) DT_Bpool_GetBuffer ( bpool, 2); + DT_Mdep_debug (("%s: Sent Command - awaiting completion\n", module)); + if (!DT_dto_event_wait (reqt_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + ep_handle, + DT_Bpool_GetBuffSize (bpool, 2), + dto_cookie, + "Client_Cmd_Send")) + { + goto client_exit; + } + + /************************************************************************/ + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = (DAT_PVOID) (uintptr_t) DT_Bpool_GetBuffer ( bpool, 0); + DT_Mdep_debug (("%s: Waiting for Server_Info\n", module)); + if (!DT_dto_event_wait (recv_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + ep_handle, + DT_Bpool_GetBuffSize (bpool, 0), + dto_cookie, + "Server_Info_Recv")) + { + goto client_exit; + } + + DT_Mdep_debug (("%s: Server_Info Received\n", module)); + sinfo = (Server_Info_t*) DT_Bpool_GetBuffer (bpool, 0); + DT_Server_Info_Endian (sinfo); + memcpy ( (void *) (uintptr_t)&pt_ptr->Server_Info, + (const void *)sinfo, + sizeof (Server_Info_t)); + + /* Perform obligatory version check */ + if (pt_ptr->Server_Info.dapltest_version != DAPLTEST_VERSION) + { + DT_Mdep_printf ("%s: DAPLTEST VERSION MISMATCH: Server %d, Client %d\n", + module, + pt_ptr->Server_Info.dapltest_version, + DAPLTEST_VERSION); + goto client_exit; + } + DT_Mdep_debug (("%s: Version OK!\n", module)); + + /* Dump out what we know, if requested */ + if (DT_dapltest_debug) + { + DT_Server_Info_Print (&pt_ptr->Server_Info); + DT_Client_Info_Print (&pt_ptr->Client_Info); + } + + /* Onward to running the actual test requested */ + switch (pt_ptr->Client_Info.test_type) + { + case TRANSACTION_TEST: + { + if (Transaction_Cmd->debug) + { + DT_Transaction_Cmd_Print (Transaction_Cmd); + } + DT_Transaction_Test_Client (pt_ptr, + ia_handle, + remote_netaddr); + break; + } + case PERFORMANCE_TEST: + { + if (Performance_Cmd->debug) + { + DT_Performance_Cmd_Print (Performance_Cmd); + } + DT_Performance_Test_Client (pt_ptr, + ia_handle, + remote_netaddr); + break; + } + case QUIT_TEST: + { + DT_Quit_Cmd_Print (Quit_Cmd); + break; + } + } + + /********************************************************************* + * Done - clean up and go home + */ +client_exit: + DT_Mdep_debug (("%s: Cleaning Up ...\n", module)); + + /* Disconnect the EP */ + if (ep_handle) + { + /* + * graceful attempt might fail because we got here due to + * some error above, so we may as well try harder. + */ + ret = dat_ep_disconnect (ep_handle, DAT_CLOSE_ABRUPT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_disconnect (abrupt) error: %s\n", + module, + DT_RetToString (ret)); + } + else if (did_connect && + !DT_disco_event_wait (conn_evd_hdl, NULL)) + { + DT_Mdep_printf ("%s: bad disconnect event\n", module); + } + } + + /* Free the bpool (if any) */ + DT_Bpool_Destroy (pt_ptr, bpool); + + /* Free the EP */ + if (ep_handle) + { + DAT_EVENT event; + /* + * Drain off outstanding DTOs that may have been + * generated by racing disconnects + */ + do + { + ret = dat_evd_dequeue ( recv_evd_hdl, + &event); + } while ( DAT_GET_TYPE(ret) != DAT_QUEUE_EMPTY ); + + ret = dat_ep_free (ep_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_free error: %s\n", + module, DT_RetToString (ret)); + /* keep going */ + } + } + + /* Free the 3 EVDs */ + if (conn_evd_hdl) + { + ret = dat_evd_free (conn_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_free (conn) error: %s\n", + module, DT_RetToString (ret)); + /* keep going */ + } + } + if (reqt_evd_hdl) + { + ret = dat_evd_free (reqt_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_free (reqt) error: %s\n", + module, DT_RetToString (ret)); + /* keep going */ + } + } + if (recv_evd_hdl) + { + ret = dat_evd_free (recv_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_free (recv) error: %s\n", + module, DT_RetToString (ret)); + /* keep going */ + } + } + + /* Free the PZ */ + if (pz_handle) + { + ret = dat_pz_free (pz_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_pz_free error: %s\n", + module, DT_RetToString (ret)); + /* keep going */ + } + } + + /* Close the IA */ + if (ia_handle) + { + /* DT_ia_close cleans up async evd handle, too */ + ret = DT_ia_close (ia_handle, DAT_CLOSE_GRACEFUL_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: DT_ia_close (graceful) error: %s\n", + module, DT_RetToString (ret)); + ret = DT_ia_close (ia_handle, DAT_CLOSE_ABRUPT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: DT_ia_close (abrupt) error: %s\n", + module, DT_RetToString (ret)); + } + /* keep going */ + } + else + { + DT_Mdep_debug (("%s: IA %s closed\n", module, dapl_name)); + } + } + + /* Free the Server's address */ + if (remote_netaddr) + { + DT_NetAddrFree (pt_ptr, remote_netaddr); + } + + /* Free the Per_Test_Data */ + DT_Mdep_LockDestroy (&pt_ptr->Thread_counter_lock); + DT_PrintMemList (pt_ptr); /* check if we return all space allocated */ + DT_Mdep_LockDestroy (&pt_ptr->MemListLock); + DT_Free_Per_Test_Data (pt_ptr); + + DT_Mdep_printf ("%s: ========== End of Work -- Client Exiting\n", module); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client_info.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client_info.c new file mode 100644 index 00000000..1e346b3a --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client_info.c @@ -0,0 +1,51 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_client_info.h" +#include "dapl_proto.h" +#include "dapl_test_data.h" + +void +DT_Client_Info_Endian (Client_Info_t * client_info) +{ + client_info->dapltest_version = DT_Endian32 (client_info->dapltest_version); + client_info->is_little_endian = DT_Endian32 (client_info->is_little_endian); + client_info->test_type = DT_Endian32 (client_info->test_type); + client_info->total_threads = DT_Endian32 (client_info->total_threads); +} + + +void +DT_Client_Info_Print (Client_Info_t * client_info) +{ + DT_Mdep_printf ("-------------------------------------\n"); + DT_Mdep_printf ("Client_Info.dapltest_version : %d\n", + client_info->dapltest_version); + DT_Mdep_printf ("Client_Info.is_little_endian : %d\n", + client_info->is_little_endian); + DT_Mdep_printf ("Client_Info.test_type : %d\n", + client_info->test_type); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client_info.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client_info.h new file mode 100644 index 00000000..3a3b20c6 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_client_info.h @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_CLIENT_INFO_H__ +#define __DAPL_CLIENT_INFO_H__ + +#include "dapl_mdep.h" +#include + +#pragma pack(1) + +typedef struct +{ + DAT_UINT32 dapltest_version; + DAT_UINT32 is_little_endian; + DAT_UINT32 test_type; + DAT_UINT32 total_threads; +} Client_Info_t; +#pragma pack() + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_cnxn.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_cnxn.c new file mode 100644 index 00000000..d653150c --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_cnxn.c @@ -0,0 +1,68 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_proto.h" +#include "dapl_cnxn.h" + + +/****************************************************************************/ +int +get_ep_connection_state (DAT_EP_HANDLE ep_handle) +{ + DAT_EP_STATE ep_state; + DAT_BOOLEAN in_dto_idle; + DAT_BOOLEAN out_dto_idle; + DAT_RETURN ret; + char *recv_status = "Idle"; + char *req_status = "Idle"; + ret = dat_ep_get_status (ep_handle, &ep_state, &in_dto_idle, + &out_dto_idle); + if (ret != 0) + { + DT_Mdep_printf ("DAT_ERROR: Can't get Connection State %s\n", + DT_RetToString (ret)); + } + else + { + if (in_dto_idle == 0) + { + recv_status = "Active"; + } + if (out_dto_idle == 0) + { + req_status = "Active"; + } + + DT_Mdep_printf ("DAT_STATE: %s\n", DT_State2Str (ep_state)); + DT_Mdep_printf ("DAT_STATE: Inbound DTO Status: %s \n", recv_status); + DT_Mdep_printf ("DAT_STATE: Outbound DTO Status: %s\n", req_status); + } + + return 0; +} + diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_cnxn.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_cnxn.h new file mode 100644 index 00000000..97548bae --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_cnxn.h @@ -0,0 +1,35 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_CNXN_H__ +#define __DAPL_CNXN_H__ + +#include "dapl_bpool.h" +#include "dapl_mdep.h" + +#define MAXHOSTNAMELEN 256 + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_common.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_common.h new file mode 100644 index 00000000..7216067f --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_common.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_COMMON_H__ +#define __DAPL_COMMON_H__ + +#include + +typedef enum +{ + RDMA_READ, + RDMA_WRITE, + SEND_RECV +} DT_Transfer_Type; + + +typedef struct +{ + DAT_RMR_CONTEXT rmr_context; + DAT_CONTEXT mem_address; +} RemoteMemoryInfo; + + +#endif /* __DAPL_COMMON_H__ */ diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_endian.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_endian.c new file mode 100644 index 00000000..d1ab3fac --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_endian.c @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" + +void +DT_Endian_Init (void) +{ + int endian; + endian = 1; + DT_local_is_little_endian = * ((unsigned char *) (&endian)) == 1; +} + +/* + * Big/Little Endian conversion functions + */ + +#define c1a32 ((DAT_UINT32)0x00FF00FF) +#define c1b32 ((DAT_UINT32)0xFF00FF00) +#define c2a32 ((DAT_UINT32)0x0000FFFF) +#define c2b32 ((DAT_UINT32)0xFFFF0000) +#define c164 ((DAT_UINT64)0x00FF00FF) +#define c1a64 (c164 | (c164 << 32)) +#define c1b64 (c1a64 << 8) +#define c264 ((DAT_UINT64)0x0000FFFF) +#define c2a64 (c264 | (c264 << 32)) +#define c2b64 (c2a64 << 16) +#define c3a64 ((DAT_UINT64)0xFFFFFFFF) +#define c3b64 (c3a64 << 32) + +DAT_UINT32 +DT_Endian32 (DAT_UINT32 val) +{ + if (DT_local_is_little_endian) + { + return val; + } + val = ((val & c1a32) << 8) | ((val & c1b32) >> 8); + val = ((val & c2a32) << 16) | ((val & c2b32) >> 16); + return (val); +} + +DAT_UINT64 +DT_Endian64 (DAT_UINT64 val) +{ + if (DT_local_is_little_endian) + { + return val; + } + val = ((val & c1a64) << 8) | ((val & c1b64) >> 8); + val = ((val & c2a64) << 16) | ((val & c2b64) >> 16); + val = ((val & c3a64) << 32) | ((val & c3b64) >> 32); + return (val); +} + +DAT_UINT32 +DT_EndianMemHandle (DAT_UINT32 val) +{ + val = ((val & c1a32) << 8) | ((val & c1b32) >> 8); + val = ((val & c2a32) << 16) | ((val & c2b32) >> 16); + return (val); +} + +DAT_UINT64 +DT_EndianMemAddress (DAT_UINT64 val) +{ + DAT_UINT64 val64; + val64 = val; + val64 = ((val64 & c1a64) << 8) | ((val64 & c1b64) >> 8); + val64 = ((val64 & c2a64) << 16) | ((val64 & c2b64) >> 16); + val64 = ((val64 & c3a64) << 32) | ((val64 & c3b64) >> 32); + return val64; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_cmd.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_cmd.c new file mode 100644 index 00000000..96fae75a --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_cmd.c @@ -0,0 +1,356 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_test_data.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" +#include "dapl_fft_cmd.h" + +//--------------------------------------------------------------------------- +void DT_FFT_Cmd_Init (FFT_Cmd_t *cmd) +{ + int i; + memset ((void *)cmd, 0, sizeof (FFT_Cmd_t)); + cmd->fft_type = NONE; + cmd->device_name[0] = '\0'; + cmd->server_name[0] = '\0'; + for (i=0; icases_flag[i] = false; + } + cmd->size = 0; + cmd->num_iter = 1000; + cmd->num_threads = 10; + cmd->num_vis = 500; + cmd->ReliabilityLevel = DAT_QOS_BEST_EFFORT; +} + +//------------------------------------------------------------------------------ +bool DT_FFT_Cmd_Parse (FFT_Cmd_t *cmd, + int my_argc, + char ** my_argv, + mygetopt_t *opts) +{ + char c; + int i, caseNum; + unsigned int len; + + for (;;) + { + c = DT_mygetopt_r (my_argc, my_argv, "D:f:s:i:t:v:R:", opts); + if (c == EOF) + { + break; + } + switch (c) + { + case 'D': //device name + { + strcpy (cmd->device_name, opts->optarg); + break; + } + case 's': //server name + { + strcpy (cmd->server_name, opts->optarg); + break; + } + case 'i': // num iterations + { + len = (unsigned int)strspn (opts->optarg, "0123456789"); + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -i option\n"); + DT_FFT_Cmd_Usage (); + return (false); + } + cmd->num_iter = atoi (opts->optarg); + break; + } + case 't': // num threads + { + len = (unsigned int)strspn (opts->optarg, "0123456789"); + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -t option\n"); + DT_FFT_Cmd_Usage (); + return (false); + } + cmd->num_threads = atoi (opts->optarg); + break; + } + case 'v': // num vis + { + len = (unsigned int)strspn (opts->optarg, "0123456789"); + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -v option\n"); + DT_FFT_Cmd_Usage (); + return (false); + } + cmd->num_vis = atoi (opts->optarg); + break; + } + case 'f': //function feature + { + if (strcmp (opts->optarg, "hwconn")==0) + { + cmd->fft_type = HWCONN; + cmd->size = 4; //4 cases for hwconn + break; + } + else if (strcmp (opts->optarg, "cqmgt")==0) + { + cmd->fft_type = CQMGT; + cmd->size = 10; //10 cases for cqmgt + break; + } + else if (strcmp (opts->optarg, "endpoint")==0) + { + cmd->fft_type = ENDPOINT; + cmd->size = 3; //13 cases for endpoint + break; + } + else if (strcmp (opts->optarg, "pz")==0) + { + cmd->fft_type = PTAGMGT; + cmd->size = 3; //10 cases for Ptagmgt + break; + } + else if (strcmp (opts->optarg, "mem")==0) + { + cmd->fft_type = MEMMGT; + cmd->size = 5; //12 cases for Memmgt + break; + } + else if (strcmp (opts->optarg, "connmgt")==0) + { + cmd->fft_type = CONNMGT; + cmd->size = 2; //16 cases for connmgt + break; + } + else if (strcmp (opts->optarg, "connmgt_client")==0) + { + cmd->fft_type = CONNMGT_CLIENT; + cmd->size = 16; //16 cases for connmgt_client + break; + } + else if (strcmp (opts->optarg, "dataxfer")==0) + { + cmd->fft_type = DATAXFER; + cmd->size = 4; //21 cases for dataxfer + break; + } + else if (strcmp (opts->optarg, "dataxfer_client")==0) + { + cmd->fft_type = DATAXFER_CLIENT; + cmd->size = 1; //21 cases for dataxfer_client + break; + } + else if (strcmp (opts->optarg, "queryinfo")==0) + { + cmd->fft_type = QUERYINFO; + cmd->size = 18; //18 cases for queryinfo + break; + } + else if (strcmp (opts->optarg, "ns")==0) + { + cmd->fft_type = NS; + cmd->size = 10; //10 cases for ns + break; + } + else if (strcmp (opts->optarg, "errhand")==0) + { + cmd->fft_type = ERRHAND; + cmd->size = 2; //2 cases for errhand + break; + } + else if (strcmp (opts->optarg, "unsupp")==0) + { + cmd->fft_type = UNSUPP; + cmd->size = 2; //2 cases for unsupp + break; + } + else if (strcmp (opts->optarg, "stress")==0) + { + cmd->fft_type = STRESS; + cmd->size = 6; //6 cases for stress + break; + } + else if (strcmp (opts->optarg, "stress_client")==0) + { + cmd->fft_type = STRESS_CLIENT; + cmd->size = 6; //6 cases for stress_client + break; + } + else + { + DT_Mdep_printf ("don't know this function feature: %s\n", + opts->optarg); + DT_FFT_Cmd_Usage (); + return (false); + } + } + case 'R': // Service Reliability Level + { + cmd->ReliabilityLevel = DT_ParseQoS (opts->optarg); + if (0 == cmd->ReliabilityLevel) + { + DT_Mdep_printf ("Invalid FFT Test Parameter: %c\n", c); + DT_FFT_Cmd_Usage (); + return (false); + } + break; + } + + case '?': + default: + { + DT_Mdep_printf ("Invalid FFT Test Parameter: %c\n", c); + DT_FFT_Cmd_Usage (); + return (false); + } + } + } + if (cmd->device_name[0] == '\0') + { + if (!DT_Mdep_GetDefaultDeviceName (cmd->device_name)) + { + DT_Mdep_printf ("can't get default device name\n"); + DT_FFT_Cmd_Usage (); + return (false); + } + } + + if (cmd->fft_type ==NONE) + { + DT_Mdep_printf ("must define the function feature with -f to test\n"); + DT_FFT_Cmd_Usage (); + return (false); + } + if (cmd->server_name[0] =='\0' && + (cmd->fft_type==CONNMGT_CLIENT || cmd->fft_type == DATAXFER_CLIENT || + cmd->fft_type == UNSUPP || cmd->fft_type == STRESS_CLIENT)) + { + DT_Mdep_printf ("must define the server name with -s option\n"); + DT_FFT_Cmd_Usage (); + return (false); + } + + if (cmd->server_name[0] =='\0' && cmd->fft_type==NS ) + { + DT_Mdep_printf ("\ + Must specify host name or host IP address with -s option to be tested\n"); + DT_FFT_Cmd_Usage (); + return (false); + } + + //now parse the test cases + if (opts->optind == my_argc) //default: test all cases + { + for (i=0; isize; i++) + { + cmd->cases_flag[i] = true; + } + return true; + } + + //test specified cases + i = opts->optind; + while (i=cmd->size) + { + DT_Mdep_printf ("test case number must be within range : 0 -- %d\n", + cmd->size-1); + DT_FFT_Cmd_Usage (); + return (false); + } + cmd->cases_flag[caseNum] = true; + i++; + } + return (true); +} + +//-------------------------------------------------------------- +void DT_FFT_Cmd_Usage (void) +{ + char usage[] = + { + "dapltest -T F [-D ] -f [-i ] \n" + "[-t ] [-v ] [-s ] [case0] [case1] [...]\n" + "USAGE: [-D ]\n" + "USAGE: (Linux: JniIbdd0)\n" + "USAGE: -f \n" + "USAGE: hwconn\n" + "USAGE: endpoint\n" + "USAGE: pz\n" + "USAGE: mem\n" + "USAGE: dataxfer\n" + "USAGE: dataxfer_client\n" + "USAGE: connmgt\n" + "USAGE: connmgt_client (not yet implemented)\n" + "USAGE: cqmgt (not yet implemented)\n" + "USAGE: queryinfo\n" + "USAGE: ns (not yet implemented)\n" + "USAGE: errhand (not yet implemented)\n" + "USAGE: unsupp (not yet implemented)\n" + "USAGE: stress (not yet implemented)\n" + "USAGE: stress_client (not yet implemented)\n" + "USAGE: -i : itreration time for stress test\n" + "USAGE: -t : number of threads for stress test\n" + "USAGE: -v : number of vis for stress test\n" + "USAGE: -s \n" + "USAGE: server host name or ip address\n" + "USAGE: [-R ]\n" + "USAGE: (BE == QOS_BEST_EFFORT - Default )\n" + "USAGE: (HT == QOS_HIGH_THROUGHPUT))\n" + "USAGE: (LL == QOS_LOW_LATENCY)\n" + "USAGE: (EC == QOS_ECONOMY)\n" + "USAGE: (PM == QOS_PREMIUM)\n" + "NOTE: iter_num is just for stress_client test, default 100000\n" + "NOTE: Server_name must be specified for connmgt_client, dataxfer_client, \n" + " NS and unsupp function feature.\n" + "NOTE: if test cases are not specified, test all cases in that function\n" + " feature. else just test the specified cases\n" + }; + + DT_Mdep_printf ("USAGE: -------FFT TEST------------\n"); + DT_Mdep_printf ("%s\n", usage); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_cmd.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_cmd.h new file mode 100644 index 00000000..2a12f845 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_cmd.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_FFT_CMD_H__ +#define __DAPL_FFT_CMD_H__ + +#include "dapl_mdep.h" + +#define MAXCASES 100 + +typedef enum +{ + NONE, + HWCONN, + CQMGT, + ENDPOINT, + PTAGMGT, + MEMMGT, + CONNMGT, + CONNMGT_CLIENT, + DATAXFER, + DATAXFER_CLIENT, + QUERYINFO, + NS, + ERRHAND, + UNSUPP, + STRESS, + STRESS_CLIENT, +} FFT_Type_e; + + +#pragma pack(1) +typedef struct +{ + FFT_Type_e fft_type; + char device_name[256]; //-D + char server_name[256]; + bool cases_flag[MAXCASES]; + int size; + int num_iter; //-i + int num_threads; //-t + int num_vis; //-v + DAT_QOS ReliabilityLevel; //-R +} FFT_Cmd_t; + +typedef struct +{ + int (*fun) ( FFT_Cmd_t*); +} FFT_Testfunc_t; + +#pragma pack() + +/* + typedef struct + { + Mdep_LockType Thread_counter_lock; + int Thread_counter; + + Mdep_LockType Thread_success_lock; + int Thread_success_counter; + } Stress_glob_t; + + typedef struct + { + Stress_glob_t *glob; + Thread *thread_ptr; + + FFT_Cmd_t *cmd; + int flag; + unsigned int discriminator; + bool is_server; + } Stress_arg_t; + + + typedef struct + { + VIP_VI_HANDLE vi_handle; + Bpool *bp; + VIP_DESCRIPTOR *recv_desc; + } Stress_vi_context_t; + */ + +//#pragma pack (2) +//typedef struct +//{ +// char server_name[256]; /* -s */ +// char device_name[256]; /* -D */ +// DAT_UINT32 debug; /* -d */ +// DAT_QOS ReliabilityLevel; /* -R */ +//} Quit_Cmd_t; +//#pragma pack (8) + + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_connmgt.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_connmgt.c new file mode 100644 index 00000000..0c5bb378 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_connmgt.c @@ -0,0 +1,120 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_fft_util.h" + +int DT_connmgt_case0 (FFT_Cmd_t *cmd) +{ + FFT_Connection_t conn; + int res = 1; + DAT_RETURN rc = 0; + + DT_Mdep_printf ("\ + Description: Ensure time in dat_evd_wait works correctly\n"); + + DT_fft_init_server (cmd, &conn); + DT_assert (NULL != conn.ia_handle); + + rc = dat_evd_wait (conn.cr_evd, 10*1000000, 1, &conn.event, + &conn.count); + DT_assert_dat (DAT_GET_TYPE(rc) == DAT_TIMEOUT_EXPIRED); + +cleanup: + rc = DT_fft_destroy_conn_struct (&conn); + DT_assert_clean (rc == DAT_SUCCESS); + + return res; +} + +int DT_connmgt_case1 (FFT_Cmd_t *cmd) +{ + FFT_Connection_t conn; + int res = 1; + DAT_RETURN rc; + + DT_Mdep_printf ("\ + Description: Attempt to use timeout of 0 in dat_evd_wait\n"); + + DT_fft_init_server (cmd, &conn); + DT_assert (NULL != conn.ia_handle); + + rc = dat_evd_wait (conn.cr_evd, 0, 1, &conn.event, &conn.count); + DT_assert_dat (DAT_GET_TYPE(rc) == DAT_TIMEOUT_EXPIRED); + +cleanup: + rc = DT_fft_destroy_conn_struct (&conn); + DT_assert_clean (rc == DAT_SUCCESS); + return res; + +} + + +void DT_connmgt_test (FFT_Cmd_t *cmd) +{ + int i; + int res; + FFT_Testfunc_t cases_func[] = + { + { DT_connmgt_case0 }, + { DT_connmgt_case1 }, + }; + + for (i = 0; i < cmd->size; i++) + { + if (cmd->cases_flag[i]) + { + if (cmd->cases_flag[i]) + { + + DT_Mdep_printf ("\ + *********************************************************************\n"); + DT_Mdep_printf ("\ + Function feature: Connect Management (Server side) case: %d\n", i); + res = cases_func[i].fun (cmd); + if (res==1) + { + DT_Mdep_printf ("Result: PASS\n"); + } + else if (res ==0) + { + DT_Mdep_printf ("Result: FAIL\n"); + } + else if (res ==-1) + { + DT_Mdep_printf ("Result: use other test tool\n"); + } + else if (res ==-2) + { + DT_Mdep_printf ("Result: not support or next stage to develop\n"); + } + + DT_Mdep_printf ("\ + *********************************************************************\n"); + } + } + } +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_dataxfer.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_dataxfer.c new file mode 100644 index 00000000..14786994 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_dataxfer.c @@ -0,0 +1,153 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_fft_util.h" + +/*--------------------------------------------------------*/ +int DT_dataxfer_generic ( FFT_Cmd_t *cmd, int test_case) +{ + FFT_Connection_t conn; + DAT_RETURN rc=0; + int res=1; + DT_fft_init_server (cmd, &conn); + DT_assert (NULL != conn.ia_handle); + + DT_fft_listen (&conn); + + switch (test_case) + { + case 0: + { + DT_Mdep_printf ("Posting null send buffer\n"); + rc = DT_post_send_buffer (0, conn.bpool, 0, + DT_Bpool_GetBuffSize (conn.bpool, 0)); + DT_assert_dat (DAT_GET_TYPE(rc) == DAT_INVALID_HANDLE); + break; + } + case 1: + { + DT_Mdep_printf ("Call evd wait with null evd\n"); + rc =dat_evd_wait (0, DAT_TIMEOUT_INFINITE, 1, &conn.event, + &conn.count); + DT_assert_dat (DAT_GET_TYPE(rc) == DAT_INVALID_HANDLE); + break; + } + case 2: + { + DT_Mdep_printf ("Call evd wait with empty send queue\n"); + rc =dat_evd_wait (conn.send_evd, 10*1000000, 1, &conn.event, + &conn.count); + DT_assert_dat (DAT_GET_TYPE(rc) == DAT_TIMEOUT_EXPIRED); + break; + } + case 3: + { + DT_Mdep_printf ("Posting null recv buffer\n"); + rc =DT_post_recv_buffer (0, conn.bpool, 0, + DT_Bpool_GetBuffSize (conn.bpool, 0)); + DT_assert_dat (DAT_GET_TYPE(rc) == DAT_INVALID_HANDLE); + break; + } + } +cleanup: + DT_assert_clean (DT_fft_destroy_conn_struct (&conn)); + return res; +} + +int DT_dataxfer_case0 ( FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Call dat_ep_post_send with null ep_handle.\n"); + return DT_dataxfer_generic (cmd, 0); +} + +int DT_dataxfer_case1 ( FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Call dat_evd_wait with null evd.\n"); + return DT_dataxfer_generic (cmd, 1); +} + +int DT_dataxfer_case2 ( FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Call dat_evd_wait with null evd.\n"); + return DT_dataxfer_generic (cmd, 2); +} + +int DT_dataxfer_case3 ( FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Call dat_evd_wait with null evd.\n"); + return DT_dataxfer_generic (cmd, 3); +} + +/*-------------------------------------------------------------*/ +void DT_dataxfer_test (FFT_Cmd_t *cmd) +{ + int i; + int res; + FFT_Testfunc_t cases_func[] = + { + { DT_dataxfer_case0 }, + { DT_dataxfer_case1 }, + { DT_dataxfer_case2 }, + { DT_dataxfer_case3 }, + }; + + for (i=0; isize; i++) + { + if (cmd->cases_flag[i]) + { + DT_Mdep_printf ("\ + *********************************************************************\n"); + DT_Mdep_printf ("\ + Function feature: Protection Zone management case: %d\n", i); + res = cases_func[i].fun (cmd); + if (res==1) + { + DT_Mdep_printf ("Result: PASS\n"); + } + else if (res ==0) + { + DT_Mdep_printf ("Result: FAIL\n"); + } + else if (res ==-1) + { + DT_Mdep_printf ("Result: use other test tool\n"); + } + else if (res ==-2) + { + DT_Mdep_printf ("Result: not support or next stage to develop\n"); + } + + DT_Mdep_printf ("\ + *********************************************************************\n"); + } + } + return; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_dataxfer_client.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_dataxfer_client.c new file mode 100644 index 00000000..b37cd6ef --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_dataxfer_client.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_fft_util.h" + +#define CONN_STATE 1 +#define TIMEOUT_TEST 2 +#define DATAXFER_TEST 3 + +int DT_dataxfer_client_generic (FFT_Cmd_t *cmd, int flag) +{ + int res=1; + FFT_Connection_t conn; + DAT_RETURN rc=0; + + DT_fft_init_client (cmd, &conn); + DT_assert_dat(conn.ia_handle != NULL) + + DT_assert (DT_fft_connect (&conn)); + + if (flag == CONN_STATE) + { + res = 1; + goto cleanup; + } + else if (flag == TIMEOUT_TEST) + { + + + } + else if (flag == DATAXFER_TEST) + { + conn.bpool = DT_BpoolAlloc (0, conn.ia_handle, conn.pz_handle, NULL, + NULL, 4096, 2, DAT_OPTIMAL_ALIGNMENT, false, false); + DT_assert (conn.bpool != 0); + rc = DT_post_send_buffer (conn.ep_handle, conn.bpool, 0, + DT_Bpool_GetBuffSize (conn.bpool, 0)); + DT_assert_dat (rc == DAT_SUCCESS); + rc = dat_evd_wait (conn.send_evd, 10*1000000, 1, &conn.event, + &conn.count); + DT_assert_dat (rc == DAT_SUCCESS); + res = 1; + goto cleanup; + } + // cleanup +cleanup: + + if (conn.ep_handle) + { + // disconnect + DT_Mdep_printf ("Disconnect\n"); + rc = dat_ep_disconnect (conn.ep_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_clean (rc == DAT_SUCCESS); + } + rc = DT_fft_destroy_conn_struct (&conn); + DT_assert_clean (rc == DAT_SUCCESS); + + return res; +} + +int DT_dataxfer_client_case0 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: This is a helper case on the client side for dataxfer case0.\n"); + return DT_dataxfer_client_generic (cmd, CONN_STATE); +} + +void DT_dataxfer_client_test (FFT_Cmd_t *cmd) +{ + int i; + int res; + FFT_Testfunc_t cases_func[] = + { + { DT_dataxfer_client_case0 }, + }; + + for (i=0; isize; i++) + { + if (cmd->cases_flag[i]) + { + DT_Mdep_printf ("\ + *********************************************************************\n"); + DT_Mdep_printf ("\ + Function feature: Dataxfer client case: %d\n", i); + res = cases_func[i].fun (cmd); + if (res==1) + { + DT_Mdep_printf ("Result: PASS\n"); + } + else if (res ==0) + { + DT_Mdep_printf ("Result: FAIL\n"); + } + else if (res ==-1) + { + + DT_Mdep_printf ("Result: use other test tool\n"); + } + else if (res ==-2) + { + DT_Mdep_printf ("Result: not support or next stage to develop\n"); + } + + DT_Mdep_printf ("\ + *********************************************************************\n"); + } + } + return; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_endpoint.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_endpoint.c new file mode 100644 index 00000000..4fc9427a --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_endpoint.c @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_fft_util.h" + +#define CQENTRYCOUNT 100 +#define BUFFSIZE 1024 +#define DEFAULT_QUEUE_LEN 10 + +int DT_endpoint_generic (FFT_Cmd_t *cmd, + bool destroy_pz_early) +{ + char *dev_name; + DAT_IA_HANDLE ia_handle; + DAT_PZ_HANDLE pz_handle; + DAT_EP_HANDLE ep_handle; + DAT_EVD_HANDLE evd_handle; + DAT_EVD_HANDLE conn_evd_handle; + DAT_EVD_HANDLE send_evd_handle; + DAT_EVD_HANDLE recv_evd_handle; + DAT_RETURN rc, wanted; + int res; + + res = 1; + ia_handle = NULL; + pz_handle = NULL; + ep_handle = NULL; + evd_handle = NULL; + conn_evd_handle = NULL; + send_evd_handle = NULL; + recv_evd_handle = NULL; + dev_name = cmd->device_name; + evd_handle = DAT_HANDLE_NULL; + +#ifdef DYNAMIC_DAT_LOADING + rc = dat_open ((const DAT_NAME_PTR)dev_name, + DEFAULT_QUEUE_LEN, &evd_handle, &ia_handle, + DAT_VERSION_MAJOR, + DAT_VERSION_MINOR, + DAT_THREADSAFE); +#else + rc = dat_ia_open ((const DAT_NAME_PTR)dev_name, + DEFAULT_QUEUE_LEN, &evd_handle, &ia_handle); +#endif //DYNAMIC_DAT_LOADING + DT_assert_dat (rc == DAT_SUCCESS); + + rc = dat_pz_create (ia_handle, &pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + if (destroy_pz_early) + { + if (pz_handle) + { + rc = dat_pz_free (pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + } + } + + rc = dat_evd_create (ia_handle, DEFAULT_QUEUE_LEN, NULL, + DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG, + &send_evd_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + rc = dat_evd_create (ia_handle, DEFAULT_QUEUE_LEN, NULL, DAT_EVD_DTO_FLAG, + &recv_evd_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + rc = dat_evd_create (ia_handle, DEFAULT_QUEUE_LEN, NULL, + DAT_EVD_CONNECTION_FLAG, &conn_evd_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + rc = dat_ep_create (ia_handle, pz_handle, recv_evd_handle, send_evd_handle, + conn_evd_handle, NULL, &ep_handle); + if (destroy_pz_early) + { + wanted = DAT_INVALID_HANDLE; + } + else + { + wanted = DAT_SUCCESS; + } + DT_assert_dat (DAT_GET_TYPE(rc) == wanted); + +cleanup: + if (ep_handle) + { + rc = dat_ep_free (ep_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (send_evd_handle) + { + rc = dat_evd_free (send_evd_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (recv_evd_handle) + { + rc = dat_evd_free (recv_evd_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (conn_evd_handle) + { + rc = dat_evd_free (conn_evd_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (!destroy_pz_early && pz_handle) + { + rc = dat_pz_free (pz_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (ia_handle) + { + rc = DT_ia_close (ia_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_clean (rc == DAT_SUCCESS); + } + return res; +} + +int DT_endpoint_case0 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Test if we can normally create endpoint and destory it.\n"); + DT_Mdep_printf ("\ + The endpoint is not associated with a CQ\n"); + return DT_endpoint_generic (cmd, + false); /* destroy pz early */ +} + +int DT_endpoint_case1 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: try to create endpoint with pz already destroyed\n"); + return DT_endpoint_generic (cmd, + true); /* destroy pz early */ +} + +int DT_endpoint_case2 (FFT_Cmd_t *cmd) +{ + char *dev_name; + DAT_IA_HANDLE ia_handle; + DAT_EP_HANDLE ep_handle; + DAT_EVD_HANDLE send_evd, conn_evd, recv_evd, cr_evd; + DAT_PZ_HANDLE pz_handle; + DAT_EVENT event; + Bpool *bpool; + int res; + DAT_RETURN rc; + + DT_Mdep_printf ("\ + Description: try to destroy ep with descriptor still in working queue\n"); + res = 1; + bpool = 0; + pz_handle = 0; + ia_handle = 0; + ep_handle = 0; + send_evd = 0; + conn_evd = 0; + recv_evd = 0; + cr_evd = 0; + dev_name = cmd->device_name; + + rc = DT_ia_open (dev_name, &ia_handle); + DT_assert_dat (rc == DAT_SUCCESS); + rc = dat_pz_create (ia_handle, &pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + rc = DT_ep_create (ia_handle, pz_handle, &cr_evd, &conn_evd, &send_evd, + &recv_evd, &ep_handle); + DT_assert_dat (rc == DAT_SUCCESS); + bpool = DT_BpoolAlloc (0, ia_handle, pz_handle, NULL, NULL, 4096, 1, + DAT_OPTIMAL_ALIGNMENT, false, false); + DT_assert (bpool != 0); + DT_assert (DT_post_recv_buffer (ep_handle, bpool, 0, 4096) == true); + if (ep_handle) + { + rc = dat_ep_free (ep_handle); + DT_assert_dat (rc == DAT_SUCCESS); + } + + /* + * Remove all DTOs. The disconnect above may have + * flushed all posted operations, so this is just a + * clean up. + */ + do + { + rc = dat_evd_dequeue ( recv_evd, + &event); + } while ( DAT_GET_TYPE(rc) != DAT_QUEUE_EMPTY ); +cleanup: + if (bpool) + { + rc = DT_Bpool_Destroy (0, bpool); + DT_assert_clean (rc != false); + } + if (pz_handle) + { + rc = dat_pz_free (pz_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (ia_handle) + { + rc = DT_ia_close (ia_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_clean (rc == DAT_SUCCESS); + } + return res; + +} + +/*-------------------------------------------------------------*/ +void DT_endpoint_test (FFT_Cmd_t *cmd) +{ + int i; + int res; + FFT_Testfunc_t cases_func[] = + { + { DT_endpoint_case0 }, + { DT_endpoint_case1 }, + { DT_endpoint_case2 }, + }; + + for (i=0; isize; i++) + { + if (cmd->cases_flag[i]) + { + DT_Mdep_printf ("\ + *********************************************************************\n"); + DT_Mdep_printf ("\ + Function feature: EndPoint management case: %d\n", i + ); + res = cases_func[i].fun (cmd); + if (res==1) + { + DT_Mdep_printf ("Result: PASS\n"); + } + else if (res ==0) + { + DT_Mdep_printf ("Result: FAIL\n"); + } + else if (res ==-1) + { + DT_Mdep_printf ("Result: use other test tool\n"); + } + else if (res ==-2) + { + DT_Mdep_printf ("Result: not support or next stage to develop\n"); + } + + DT_Mdep_printf ("\ + *********************************************************************\n"); + } + } + return; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_hwconn.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_hwconn.c new file mode 100644 index 00000000..6cd9e332 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_hwconn.c @@ -0,0 +1,225 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" + + +/*--------------------------------------------------------*/ +int DT_hwconn_case0 ( FFT_Cmd_t *cmd) +{ + char* dev_name; + DAT_IA_HANDLE nic_handle; + DAT_EVD_HANDLE evd_handle; + DAT_RETURN rc; + int res = 1; + + DT_Mdep_printf ("\ + Description: Test if we can normally Open NIC and then close it\n"); + + dev_name= cmd->device_name; + nic_handle=0; + evd_handle = DAT_HANDLE_NULL; +#ifdef DYNAMIC_DAT_LOADING + rc=dat_open ((const DAT_NAME_PTR)dev_name, 10, &evd_handle, &nic_handle, + DAT_VERSION_MAJOR, + DAT_VERSION_MINOR, + DAT_THREADSAFE); +#else + rc=dat_ia_open ((const DAT_NAME_PTR)dev_name, 10, &evd_handle, &nic_handle); +#endif // DYNAMIC_DAT_LOADING + DT_assert_dat (rc == DAT_SUCCESS); + rc=DT_ia_close (nic_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_dat (rc == DAT_SUCCESS); +cleanup: + + return res; +} + +/*--------------------------------------------------------*/ +int DT_hwconn_case1 ( FFT_Cmd_t *cmd) +{ + DAT_IA_HANDLE nic_handle; + DAT_RETURN rc; + DAT_EVD_HANDLE evd_handle; + char dev_name[100]; + int i; + + DT_Mdep_printf ("Description: try to open NIC with incorrect device name\n"); + DT_Mdep_printf (" (just num, one letter, multiple letter, num_letter\n"); + DT_Mdep_printf ("letter_num). You alse can do this test manually\n"); + DT_Mdep_printf ("dapltest -T F -D -f hwconn \n"); + + for (i=0; i< 5; i++) + { + if (i==0) + { + sprintf (dev_name, "%s", "40"); /* just number */ + } + else if (i==1) + { + sprintf (dev_name, "%s", "x"); /* just letter */ + } + else if (i==2) + { + sprintf (dev_name, "%s", "xsdf"); /* multiple letter */ + } + else if (i==3) + { + sprintf (dev_name, "%s", "x34"); /* letter_number */ + } + else if (i==4) + { + sprintf (dev_name, "%s", "34df"); /* number_letter */ + } + + evd_handle = DAT_HANDLE_NULL; +#ifdef DYNAMIC_DAT_LOADING + rc=dat_open ((const DAT_NAME_PTR)dev_name, 10, &evd_handle, &nic_handle, + DAT_VERSION_MAJOR, + DAT_VERSION_MINOR, + DAT_THREADSAFE); +#else + rc=dat_ia_open ((const DAT_NAME_PTR)dev_name, 10, &evd_handle, &nic_handle); +#endif //DYNAMIC_DAT_LOADING + if (DAT_GET_TYPE(rc) != DAT_PROVIDER_NOT_FOUND) + { + //const char *major_msg, *minor_msg; + + DT_Mdep_printf (" \ + fff not get expected result when open NIC with device name: %s\n", dev_name); + //dat_strerror (rc, &major_msg, &minor_msg); + DT_Mdep_printf ("ERROR: %x \n",rc); + + + if (rc==DAT_SUCCESS) + { + rc = DT_ia_close (nic_handle, DAT_CLOSE_ABRUPT_FLAG); + + DT_assert_clean (rc == DAT_SUCCESS); + } + return 0; + } + } + return 1; +} + +/*--------------------------------------------------------*/ +int DT_hwconn_case2 ( FFT_Cmd_t *cmd) +{ + DAT_IA_HANDLE nic_handle; + DAT_RETURN rc; + int res=1; + + DT_Mdep_printf ("\ + Description: Try to close nic with Nic handle is null (NIC not open)\n"); + nic_handle=0; + rc=DT_ia_close (nic_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_dat (DAT_GET_TYPE(rc) ==DAT_INVALID_HANDLE); + +cleanup: + return res; +} + +/*--------------------------------------------------------*/ +int DT_hwconn_case3 ( FFT_Cmd_t *cmd) +{ + FFT_Connection_t conn; + DAT_RETURN rc; + int res; + + DT_Mdep_printf ("Description: Test if we can close NIC when the created \n"); + DT_Mdep_printf ("endpoint has not been destroyed.\n"); + DT_Mdep_printf ("The problem for this case is that once the hca is closed, \n"); + DT_Mdep_printf ("there is no way to destroy the endpoint's resources\n"); + DT_Mdep_printf ("thus the test leaks a small amount of memory\n"); + + res=1; + + DT_fft_init_client (cmd, &conn); + + /* try to close nic when vi have not destroyed */ + if (conn.ia_handle) + { + rc= DT_ia_close (conn.ia_handle, DAT_CLOSE_ABRUPT_FLAG); + if (rc !=DAT_SUCCESS) + { + DT_Mdep_printf ("Warning: DT_ia_close fails %s, reboot for cleanup\n", + DT_RetToString (rc)); + return 0; + } + } + /* if nic is closed, it is impossible to destory vi and ptag */ + //DT_fft_destroy_conn_struct(&conn); + return res; + +} + + +/*-------------------------------------------------------------*/ +void DT_hwconn_test (FFT_Cmd_t *cmd) +{ + int i; + int res; + FFT_Testfunc_t cases_func[] = + { + { DT_hwconn_case0 }, + { DT_hwconn_case1 }, + { DT_hwconn_case2 }, + { DT_hwconn_case3 }, + }; + + for (i=0; isize; i++) + { + if (cmd->cases_flag[i]) + { + DT_Mdep_printf ("\ + *********************************************************************\n"); + DT_Mdep_printf ("\ + Function feature: Hardware connection case: %d\n", i); + res = cases_func[i].fun (cmd); + if (res==1) + { + DT_Mdep_printf ("Result: PASS\n"); + } + else if (res ==0) + { + DT_Mdep_printf ("Result: FAIL\n"); + } + else if (res ==-1) + { + DT_Mdep_printf ("Result: use other test tool\n"); + } + else if (res ==-2) + { + DT_Mdep_printf ("Result: next stage to develop\n"); + } + + DT_Mdep_printf ("\ + *********************************************************************\n"); + } + } + return; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_mem.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_mem.c new file mode 100644 index 00000000..fe263066 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_mem.c @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_fft_util.h" +#include + +#define CQENTRYCOUNT 100 +#define BUFFSIZE 1024 + +/*--------------------------------------------------------*/ +int DT_mem_generic (FFT_Cmd_t *cmd, int flag) +{ + DAT_RETURN rc, expect; + FFT_Connection_t conn; + DAT_REGION_DESCRIPTION region; + DAT_VLEN reg_size; + DAT_LMR_HANDLE lmr_handle; + DAT_LMR_CONTEXT lmr_context; + DAT_VADDR reg_addr; + unsigned char *alloc_ptr; + int res; + DAT_COUNT buffer_size; + DAT_IA_HANDLE ia_handle; + DAT_PZ_HANDLE pz_handle; + + rc = 0; + expect = 0; + res = 1; + lmr_handle = 0; + lmr_context = 0; + reg_addr = 0; + alloc_ptr = 0; + ia_handle = 0; + pz_handle = 0; + + DT_fft_init_client (cmd, &conn); + DT_assert (NULL != conn.ia_handle); + + if (flag == 2) + { + buffer_size = 0; + alloc_ptr = 0; + } + else + { + buffer_size = BUFFSIZE * sizeof (unsigned char); + alloc_ptr = (unsigned char *)DT_Mdep_Malloc (buffer_size); + DT_assert (alloc_ptr); + } + + + memset (®ion, 0, sizeof (region)); + region.for_va = alloc_ptr; + + ia_handle = conn.ia_handle; + + if (flag != 3) + { + pz_handle = conn.pz_handle; + } + + if (flag != 4) + { + DT_Mdep_printf ("Registering memory\n"); + rc = dat_lmr_create (ia_handle, + DAT_MEM_TYPE_VIRTUAL, + region, + buffer_size, + conn.pz_handle, + DAT_MEM_PRIV_ALL_FLAG, + &lmr_handle, + &lmr_context, + NULL, /* FIXME */ + ®_size, + ®_addr); + if (flag == 2) + { + expect = DAT_LENGTH_ERROR; + } + else + { + expect = DAT_SUCCESS; + } + DT_assert_dat (DAT_GET_TYPE(rc) == expect); + } + if (flag == 1) + { + if (lmr_handle) + { + rc = dat_lmr_free (lmr_handle); + DT_assert_dat (rc == DAT_SUCCESS); + } + lmr_handle = 0; + + rc = dat_lmr_create (conn.ia_handle, + DAT_MEM_TYPE_VIRTUAL, + region, + buffer_size, + conn.pz_handle, + DAT_MEM_PRIV_ALL_FLAG, + &lmr_handle, + &lmr_context, + NULL, /* FIXME */ + ®_size, + ®_addr); + DT_assert_dat (rc == DAT_SUCCESS); + } + +cleanup: + if (lmr_handle) + { + rc = dat_lmr_free (lmr_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (alloc_ptr) + { + DT_Mdep_Free (alloc_ptr); + } + rc = DT_fft_destroy_conn_struct (&conn); + DT_assert_clean (rc == DAT_SUCCESS); + + return res; + +} +int DT_mem_case0 ( FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Test if we can register typical size of memory\n"); + DT_Mdep_printf ("\ + then deregister it.\n"); + return DT_mem_generic (cmd, 0); +} + +/*--------------------------------------------------------*/ +int DT_mem_case1 ( FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Test if we can register typical size of memory\n"); + DT_Mdep_printf ("\ + deregister, then register it again.\n"); + return DT_mem_generic (cmd, 1); +} + +/*--------------------------------------------------------*/ +int DT_mem_case2 ( FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Try to register memory with memory size 0\n"); + return DT_mem_generic (cmd, 2); +} + +/*--------------------------------------------------------*/ +int DT_mem_case3 ( FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Try to register memory with null pz\n"); + return DT_mem_generic (cmd, 3); +} + +/*--------------------------------------------------------*/ +int DT_mem_case4 ( FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("\ + Description: Try to deregister memory with null lmr_handle\n"); + return DT_mem_generic (cmd, 4); +} + +/*-------------------------------------------------------------*/ +void DT_mem_test (FFT_Cmd_t *cmd) +{ + int i; + int res; + FFT_Testfunc_t cases_func[] = + { + { DT_mem_case0 }, + { DT_mem_case1 }, + { DT_mem_case2 }, + { DT_mem_case3 }, + { DT_mem_case4 }, + }; + + for (i=0; isize; i++) + { + if (cmd->cases_flag[i]) + { + DT_Mdep_printf ("\ + *********************************************************************\n"); + DT_Mdep_printf ("\ + Function feature: Memory register/deregister case: %d\n", i); + res = cases_func[i].fun (cmd); + if (res==1) + { + DT_Mdep_printf ("Result: PASS\n"); + } + else if (res ==0) + { + DT_Mdep_printf ("Result: FAIL\n"); + } + else if (res ==-1) + { + DT_Mdep_printf ("Result: use other test tool\n"); + } + else if (res ==-2) + { + DT_Mdep_printf ("Result: not support or next stage to develop\n"); + } + + DT_Mdep_printf ("\ + *********************************************************************\n"); + } + } + return; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_pz.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_pz.c new file mode 100644 index 00000000..c7d48c6b --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_pz.c @@ -0,0 +1,254 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_fft_cmd.h" +#include "dapl_fft_util.h" + +#define CQENTRYCOUNT 100 +#define BUFFSIZE 1024 + +/*--------------------------------------------------------*/ +int DT_pz_case0 ( FFT_Cmd_t *cmd) +{ + char* dev_name; + DAT_IA_HANDLE ia_handle; + DAT_PZ_HANDLE pz_handle; + DAT_EVD_HANDLE evd_handle; + DAT_RETURN rc; + int res; + + DT_Mdep_printf ("\ + Description: Test if we can normally create pz and destroy it.\n"); + + res=1; + ia_handle=0; + pz_handle =0; + evd_handle = DAT_HANDLE_NULL; + dev_name= cmd->device_name; + + rc = DT_ia_open (dev_name, &ia_handle); + DT_assert_dat (rc == DAT_SUCCESS); + rc = dat_pz_create (ia_handle, &pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + +cleanup: + if (pz_handle) + { + rc = dat_pz_free (pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + } + if (ia_handle) + { + rc = DT_ia_close (ia_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_dat (rc == DAT_SUCCESS); + } + return res; +} + +/*--------------------------------------------------------*/ +int DT_pz_case1 ( FFT_Cmd_t *cmd) +{ + char* dev_name; + DAT_IA_HANDLE ia_handle; + DAT_PZ_HANDLE pz_handle; + DAT_EP_HANDLE ep_handle; + DAT_EVD_HANDLE conn_evd, send_evd, recv_evd, cr_evd; + DAT_RETURN rc; + int res; + + DT_Mdep_printf ("\ + Description: try to destroy pz with vi still associated with it\n"); + + res=1; + ia_handle=0; + pz_handle =0; + ep_handle=0; + conn_evd = 0; + send_evd = 0; + recv_evd = 0; + cr_evd = 0; + dev_name= cmd->device_name; + + rc = DT_ia_open (dev_name, &ia_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + rc = dat_pz_create (ia_handle, &pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + rc = DT_ep_create (ia_handle, pz_handle, &cr_evd, &conn_evd, &send_evd, + &recv_evd, &ep_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + if (pz_handle) + { + rc = dat_pz_free (pz_handle); + DT_assert_dat (DAT_GET_TYPE(rc) == DAT_INVALID_STATE); + } + +cleanup: + /* corrrect order */ + if (ep_handle) + { + rc=dat_ep_free (ep_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (conn_evd) + { + rc = dat_evd_free (conn_evd); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (send_evd) + { + rc = dat_evd_free (send_evd); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (recv_evd) + { + rc = dat_evd_free (recv_evd); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (pz_handle) + { + rc=dat_pz_free (pz_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (ia_handle) + { + rc=DT_ia_close (ia_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_clean (rc == DAT_SUCCESS); + } + + return res; +} + +/*--------------------------------------------------------*/ +int DT_pz_case2 ( FFT_Cmd_t *cmd) +{ + char* dev_name; + DAT_IA_HANDLE ia_handle; + DAT_PZ_HANDLE pz_handle; + Bpool *bpool; + DAT_RETURN rc; + int res; + + DT_Mdep_printf ("\ + Description: try to destroy pz with registered memory still\n"); + DT_Mdep_printf ("\ + associated with it\n"); + + res=1; + ia_handle=0; + pz_handle =0; + bpool=0; + dev_name= cmd->device_name; + + rc = DT_ia_open (dev_name, &ia_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + rc = dat_pz_create (ia_handle, &pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + /* allocate and register bpool */ + bpool = DT_BpoolAlloc (0, ia_handle, pz_handle, NULL, + NULL, BUFFSIZE, 1, DAT_OPTIMAL_ALIGNMENT, + false, false); + DT_assert (bpool != 0); + + if (pz_handle) + { + rc = dat_pz_free (pz_handle); + DT_assert_dat (DAT_GET_TYPE(rc) == DAT_INVALID_STATE); + } + +cleanup: + + /* deregister and free bpool */ + if (DT_Bpool_Destroy (0, bpool)==false) + { + DT_Mdep_printf ("Warning: Destroy bpool fails, reboot for cleanup\n"); + return 0; + } + if (pz_handle) + { + rc=dat_pz_free (pz_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (ia_handle) + { + rc=DT_ia_close (ia_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_clean (rc == DAT_SUCCESS); + } + + return res; +} + +/*-------------------------------------------------------------*/ +void DT_pz_test (FFT_Cmd_t *cmd) +{ + int i; + int res; + FFT_Testfunc_t cases_func[] = + { + { DT_pz_case0 }, + { DT_pz_case1 }, + { DT_pz_case2 }, + }; + + for (i=0; isize; i++) + { + if (cmd->cases_flag[i]) + { + DT_Mdep_printf ("\ + *********************************************************************\n"); + DT_Mdep_printf ("\ + Function feature: Protection Zone management case: %d\n", i); + res = cases_func[i].fun (cmd); + if (res==1) + { + DT_Mdep_printf ("Result: PASS\n"); + } + else if (res ==0) + { + DT_Mdep_printf ("Result: FAIL\n"); + } + else if (res ==-1) + { + DT_Mdep_printf ("Result: use other test tool\n"); + } + else if (res ==-2) + { + DT_Mdep_printf ("Result: not support or next stage to develop\n"); + } + + DT_Mdep_printf ("\ + *********************************************************************\n"); + } + } + return; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_queryinfo.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_queryinfo.c new file mode 100644 index 00000000..5c0676e3 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_queryinfo.c @@ -0,0 +1,621 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_fft_util.h" + +#define CQENTRYCOUNT 100 +#define BUFFSIZE 1024 +#define DEFAULT_QUEUE_LEN 10 + +#if defined(WIN32) +static DAT_OS_WAIT_PROXY_AGENT NULLPROXY = { + (DAT_PVOID) NULL,(DAT_AGENT_FUNC) NULL}; +#endif + +int DT_queryinfo_basic (FFT_Cmd_t *cmd, + FFT_query_enum object_to_query, + DAT_RETURN result_wanted) +{ + char *dev_name; + DAT_IA_HANDLE ia_handle; + DAT_IA_ATTR ia_attributes; + DAT_PROVIDER_ATTR provider_attributes; + DAT_EVD_HANDLE evd_handle; + DAT_EVD_HANDLE conn_evd_handle; + DAT_EVD_HANDLE cr_evd_handle; + DAT_EVD_HANDLE send_evd_handle; + DAT_EVD_HANDLE recv_evd_handle; + DAT_EP_HANDLE ep_handle; + DAT_EP_PARAM ep_param; + DAT_CNO_HANDLE cno_handle; + DAT_CNO_PARAM cno_param; + DAT_EVD_PARAM evd_param; + DAT_PSP_HANDLE psp_handle; + DAT_PSP_PARAM psp_param; + DAT_RSP_HANDLE rsp_handle; + DAT_RSP_PARAM rsp_param; + DAT_PZ_HANDLE pz_handle; + DAT_PZ_PARAM pz_param; + DAT_LMR_HANDLE lmr_handle; + DAT_LMR_PARAM lmr_param; + DAT_LMR_CONTEXT lmr_context; + DAT_RMR_HANDLE rmr_handle; + DAT_RMR_PARAM rmr_param; + DAT_REGION_DESCRIPTION region; + DAT_VLEN reg_size; + DAT_VADDR reg_addr; + DAT_COUNT buffer_size; + unsigned char *alloc_ptr; + + DAT_RETURN rc; + int res = 1; + buffer_size = BUFFSIZE * sizeof (unsigned char); + reg_addr = 0; + alloc_ptr = 0; + + ia_handle = NULL; + pz_handle = NULL; + ep_handle = NULL; + lmr_handle = NULL; + rmr_handle = NULL; + pz_handle = NULL; + psp_handle = NULL; + rsp_handle = NULL; + cno_handle = NULL; + + evd_handle = DAT_HANDLE_NULL; + conn_evd_handle = DAT_HANDLE_NULL; + cr_evd_handle = DAT_HANDLE_NULL; + recv_evd_handle = DAT_HANDLE_NULL; + send_evd_handle = DAT_HANDLE_NULL; + dev_name = cmd->device_name; + + /* All functions require an ia_handle to be created */ +#ifdef DYNAMIC_DAT_LOADING + rc = dat_open ((const DAT_NAME_PTR)dev_name, + DEFAULT_QUEUE_LEN, + &evd_handle, + &ia_handle, + DAT_VERSION_MAJOR, + DAT_VERSION_MINOR, + DAT_THREADSAFE); +#else + rc = dat_ia_open ((const DAT_NAME_PTR)dev_name, + DEFAULT_QUEUE_LEN, + &evd_handle, + &ia_handle); +#endif // DYNAMIC_DAT_LOADING + DT_assert_dat (rc == DAT_SUCCESS); + + /* These functions require a pz_handle to be created */ + if ( (object_to_query == QUERY_EVD) || + (object_to_query == QUERY_RMR) || + (object_to_query == QUERY_LMR) || + (object_to_query == QUERY_EP) || + (object_to_query == QUERY_RSP) || + (object_to_query == QUERY_PZ) ) + { + rc = dat_pz_create (ia_handle, + &pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + } + + /* These functions require a ep_handle to be created */ + if ( (object_to_query == QUERY_EP) || + (object_to_query == QUERY_RSP) ) + { + rc = dat_evd_create (ia_handle, + DEFAULT_QUEUE_LEN, + cno_handle, + DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG, + &send_evd_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + rc = dat_evd_create (ia_handle, + DEFAULT_QUEUE_LEN, + cno_handle, + DAT_EVD_DTO_FLAG, + &recv_evd_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + rc = dat_evd_create (ia_handle, + DEFAULT_QUEUE_LEN, + cno_handle, + DAT_EVD_CONNECTION_FLAG, + &conn_evd_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + rc = dat_ep_create (ia_handle, + pz_handle, + recv_evd_handle, + send_evd_handle, + conn_evd_handle, + NULL, + &ep_handle); + DT_assert_dat (rc == DAT_SUCCESS); + } + + /* These functions require a CR EVD to be created. */ + if ( (object_to_query == QUERY_PSP) || + (object_to_query == QUERY_RSP) ) + { + rc = dat_evd_create (ia_handle, + DEFAULT_QUEUE_LEN, + cno_handle, + DAT_EVD_CR_FLAG, + &cr_evd_handle); + DT_assert_dat (rc == DAT_SUCCESS); + } + + /* Test dat_ia_query function */ + if (object_to_query == QUERY_IA) + { + if (result_wanted == DAT_SUCCESS) + { + rc = dat_ia_query (ia_handle, + &evd_handle, + DAT_IA_ALL, + &ia_attributes, + DAT_PROVIDER_FIELD_ALL, + &provider_attributes); + } + else if (result_wanted == DAT_INVALID_PARAMETER) + { + /* + * The only way to get an invalid parameter is to + * NULL out ia_attr and for the DAT_IA_ATTR_MASK to + * have values + */ + rc = dat_ia_query (ia_handle, + &evd_handle, + DAT_IA_ALL, + NULL, + DAT_PROVIDER_FIELD_ALL, + &provider_attributes); + } + else if (result_wanted == DAT_INVALID_HANDLE) + { + rc = dat_ia_query (evd_handle, + &evd_handle, + DAT_IA_ALL, + &ia_attributes, + DAT_PROVIDER_FIELD_ALL, + &provider_attributes); + } + } + + /* Test dat_cno_query function */ + else if (object_to_query == QUERY_CNO) + { +#if defined(WIN32) + rc = dat_cno_create (ia_handle, + NULLPROXY, + &cno_handle); +#else + rc = dat_cno_create (ia_handle, + DAT_OS_WAIT_PROXY_AGENT_NULL, + &cno_handle); +#endif + + DT_assert_dat (rc == DAT_SUCCESS); + + if (result_wanted == DAT_SUCCESS) + { + rc = dat_cno_query (cno_handle, + DAT_CNO_FIELD_ALL, + &cno_param); + } + else if (result_wanted == DAT_INVALID_PARAMETER) + { + rc = dat_cno_query (cno_handle, + DAT_CNO_FIELD_ALL, + NULL); + } + else if (result_wanted == DAT_INVALID_HANDLE) + { + rc = dat_cno_query (ia_handle, + DAT_CNO_FIELD_ALL, + &cno_param); + } + } + /* Test dat_evd_query function */ + else if (object_to_query == QUERY_EVD) + { + if (result_wanted == DAT_SUCCESS) + { + rc = dat_evd_query (evd_handle, + DAT_EVD_FIELD_ALL, + &evd_param); + } + else if (result_wanted == DAT_INVALID_PARAMETER) + { + rc = dat_evd_query (evd_handle, + DAT_EVD_FIELD_ALL, + NULL); + } + else if (result_wanted == DAT_INVALID_HANDLE) + { + rc = dat_evd_query (ia_handle, + DAT_EVD_FIELD_ALL, + &evd_param); + } + } + + /* Test dat_psp_query function */ + else if (object_to_query == QUERY_PSP) + { + rc = dat_psp_create (ia_handle, + SERVER_PORT_NUMBER, + cr_evd_handle, + DAT_PSP_PROVIDER_FLAG, + &psp_handle); + DT_assert_dat (rc == DAT_SUCCESS); + if (result_wanted == DAT_SUCCESS) + { + rc = dat_psp_query (psp_handle, + DAT_PSP_FIELD_ALL, + &psp_param); + } + else if (result_wanted == DAT_INVALID_PARAMETER) + { + rc = dat_psp_query (psp_handle, + DAT_PSP_FIELD_ALL, + NULL); + } + else if (result_wanted == DAT_INVALID_HANDLE) + { + rc = dat_psp_query (evd_handle, + DAT_PSP_FIELD_ALL, + &psp_param); + } + } + + /* Test dat_rsp_query function */ + else if (object_to_query == QUERY_RSP) + { + rc = dat_rsp_create (ia_handle, + SERVER_PORT_NUMBER, + ep_handle, + cr_evd_handle, + &rsp_handle); + DT_assert_dat (rc == DAT_SUCCESS); + rc = dat_rsp_query (rsp_handle, + DAT_RSP_FIELD_ALL, + &rsp_param); + } + + /* Test dat_cr_query function */ + else if (object_to_query == QUERY_CR) + { + /* This query is tested in the conmgt test */ + res = -1; + } + + /* Test dat_ep_query function */ + else if (object_to_query == QUERY_EP) + { + rc = dat_ep_query (ep_handle, + DAT_EP_FIELD_ALL, + &ep_param); + } + + /* Test dat_pz_query function */ + else if (object_to_query == QUERY_PZ) + { + rc = dat_pz_query (pz_handle, + DAT_PZ_FIELD_ALL, + &pz_param); + } + + /* Test dat_lmr_query function */ + else if (object_to_query == QUERY_LMR) + { + alloc_ptr = (unsigned char *)DT_Mdep_Malloc (buffer_size); + DT_assert (alloc_ptr); + memset (®ion, 0, sizeof (region)); + region.for_va = alloc_ptr; + rc = dat_lmr_create (ia_handle, + DAT_MEM_TYPE_VIRTUAL, + region, + buffer_size, + pz_handle, + DAT_MEM_PRIV_ALL_FLAG, + &lmr_handle, + &lmr_context, + NULL, /* FIXME */ + ®_size, + ®_addr); + DT_assert_dat (rc == DAT_SUCCESS); + rc = dat_lmr_query (lmr_handle, + DAT_LMR_FIELD_ALL, + &lmr_param); + } + + /* Test dat_rmr_query function */ + else if (object_to_query == QUERY_RMR) + { + rc = dat_rmr_create (pz_handle, + &rmr_handle); + DT_assert_dat (rc == DAT_SUCCESS); + /* We don't bind the RMR to anything, so don't ask for the + * LMR_TRIPLET flag + */ + rc = dat_rmr_query (rmr_handle, + DAT_RMR_FIELD_ALL - DAT_RMR_FIELD_LMR_TRIPLET, + &rmr_param); + } + + DT_assert_dat (DAT_GET_TYPE(rc) == result_wanted); + +cleanup: + if (rsp_handle) + { + rc = dat_rsp_free (rsp_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (ep_handle) + { + rc = dat_ep_free (ep_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (send_evd_handle) + { + rc = dat_evd_free (send_evd_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (recv_evd_handle) + { + rc = dat_evd_free (recv_evd_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (conn_evd_handle) + { + rc = dat_evd_free (conn_evd_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (lmr_handle) + { + rc = dat_lmr_free (lmr_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (rmr_handle) + { + rc = dat_rmr_free (rmr_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (cno_handle) + { + rc = dat_cno_free (cno_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (psp_handle) + { + rc = dat_psp_free (psp_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (cr_evd_handle) + { + rc = dat_evd_free (cr_evd_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (pz_handle) + { + rc = dat_pz_free (pz_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + + if (ia_handle) + { + rc = DT_ia_close (ia_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_clean (rc == DAT_SUCCESS); + } + + return res; +} + +int DT_queryinfo_case0 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify IA Querying information is successful using\nDAT_IA_QUERY.\n"); + return DT_queryinfo_basic (cmd, QUERY_IA, DAT_SUCCESS); +} + +int DT_queryinfo_case1 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify CNO Querying information is successful using\nDAT_CNO_QUERY.\n"); + return DT_queryinfo_basic (cmd, QUERY_CNO, DAT_SUCCESS); +} + +int DT_queryinfo_case2 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify EVD Querying information is successful using\nDAT_EVD_QUERY.\n"); + return DT_queryinfo_basic (cmd, QUERY_EVD, DAT_SUCCESS); +} + +int DT_queryinfo_case3 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify PSP Querying information is successful using\nDAT_PSP_QUERY.\n"); + return DT_queryinfo_basic (cmd, QUERY_PSP, DAT_SUCCESS); +} + +int DT_queryinfo_case4 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify RSP Querying information is successful using\nDAT_RSP_QUERY.\n"); + return DT_queryinfo_basic (cmd, QUERY_RSP, DAT_SUCCESS); +} + +int DT_queryinfo_case5 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify CR Querying information is successful using\nDAT_CR_QUERY.\n"); + return DT_queryinfo_basic (cmd, QUERY_CR, DAT_SUCCESS); +} + +int DT_queryinfo_case6 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify EP Querying information is successful using\nDAT_EP_QUERY.\n"); + return DT_queryinfo_basic (cmd, QUERY_EP, DAT_SUCCESS); +} + +int DT_queryinfo_case7 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify PZ Querying information is successful using\n"); + DT_Mdep_printf ("DAT_PZ_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_PZ, DAT_SUCCESS); +} + +int DT_queryinfo_case8 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify LMR Querying information is successful using\n"); + DT_Mdep_printf ("DAT_LMR_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_LMR, DAT_SUCCESS); +} + +int DT_queryinfo_case9 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify RMR Querying information is successful using\n"); + DT_Mdep_printf ("DAT_RMR_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_RMR, DAT_SUCCESS); +} + +int DT_queryinfo_case10 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify IA Querying fails with DAT_INVALID_PARAMETER when\n"); + DT_Mdep_printf ("passing a bad parameter to DAT_IA_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_IA, DAT_INVALID_PARAMETER); +} + +int DT_queryinfo_case11 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify IA Querying fails with DAT_INVALID_HANDLE when\n"); + DT_Mdep_printf ("passing an invalid handle to DAT_IA_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_IA, DAT_INVALID_HANDLE); +} + +int DT_queryinfo_case12 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify CNO Querying fails with DAT_INVALID_PARAMETER when\n"); + DT_Mdep_printf ("passing a bad parameter to DAT_CNO_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_CNO, DAT_INVALID_PARAMETER); +} + +int DT_queryinfo_case13 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify CNO Querying fails with DAT_INVALID_HANDLE when\n"); + DT_Mdep_printf ("passing an invalid handle to DAT_CNO_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_CNO, DAT_INVALID_HANDLE); +} + +int DT_queryinfo_case14 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify EVD Querying fails with DAT_INVALID_PARAMETER when\n"); + DT_Mdep_printf ("passing a bad parameter to DAT_EVD_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_EVD, DAT_INVALID_PARAMETER); +} + +int DT_queryinfo_case15 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify EVD Querying fails with DAT_INVALID_HANDLE when\n"); + DT_Mdep_printf ("passing an invalid handle to DAT_EVD_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_EVD, DAT_INVALID_HANDLE); +} + +int DT_queryinfo_case16 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify PSP Querying fails with DAT_INVALID_PARAMETER when\n"); + DT_Mdep_printf ("passing a bad parameter to DAT_PSP_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_PSP, DAT_INVALID_PARAMETER); +} + +int DT_queryinfo_case17 (FFT_Cmd_t *cmd) +{ + DT_Mdep_printf ("Description: Verify PSP Querying fails with DAT_INVALID_HANDLE when\n"); + DT_Mdep_printf ("passing an invalid handle to DAT_PSP_QUERY\n"); + return DT_queryinfo_basic (cmd, QUERY_PSP, DAT_INVALID_HANDLE); +} + +/*-------------------------------------------------------------*/ +void DT_queryinfo_test (FFT_Cmd_t *cmd) +{ + int i; + int res; + FFT_Testfunc_t cases_func[] = + { + { DT_queryinfo_case0 }, + { DT_queryinfo_case1 }, + { DT_queryinfo_case2 }, + { DT_queryinfo_case3 }, + { DT_queryinfo_case4 }, + { DT_queryinfo_case5 }, + { DT_queryinfo_case6 }, + { DT_queryinfo_case7 }, + { DT_queryinfo_case8 }, + { DT_queryinfo_case9 }, + { DT_queryinfo_case10 }, + { DT_queryinfo_case11 }, + { DT_queryinfo_case12 }, + { DT_queryinfo_case13 }, + { DT_queryinfo_case14 }, + { DT_queryinfo_case15 }, + { DT_queryinfo_case16 }, + { DT_queryinfo_case17 }, + }; + + for (i=0; i < cmd->size; i++) + { + if (cmd->cases_flag[i]) + { + DT_Mdep_printf ("*********************************************************************\n"); + DT_Mdep_printf ("Function feature: Queryinfo case: %d\n", i + ); + res = cases_func[i].fun (cmd); + if (res==1) + { + DT_Mdep_printf ("Result: PASS\n"); + } + else if (res ==0) + { + DT_Mdep_printf ("Result: FAIL\n"); + } + else if (res == -1) + { + DT_Mdep_printf ("Result: UNSUPP\n"); + } + + DT_Mdep_printf ("*********************************************************************\n"); + } + } + return; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_test.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_test.c new file mode 100644 index 00000000..6852b716 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_test.c @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_fft_cmd.h" +#include + +void +DT_cs_FFT (FFT_Cmd_t * cmd) +{ + switch (cmd->fft_type) + { + case HWCONN: + { + DT_hwconn_test (cmd); + break; + } + case ENDPOINT: + { + DT_endpoint_test (cmd); + break; + } + case PTAGMGT: + { + DT_pz_test (cmd); + break; + } + case MEMMGT: + { + DT_mem_test (cmd); + break; + } + case CONNMGT: + { + DT_connmgt_test (cmd); + break; + } + case DATAXFER: + { + DT_dataxfer_test (cmd); + break; + } + case DATAXFER_CLIENT: + { + DT_dataxfer_client_test (cmd); + break; + } + case QUERYINFO: + { + DT_queryinfo_test (cmd); + break; + } + case CONNMGT_CLIENT: + case NS: + case ERRHAND: + case UNSUPP: + case STRESS: + case STRESS_CLIENT: + case CQMGT: + { + DT_Mdep_printf ("Not Yet Implemented\n"); + break; + } + default: + { + DT_Mdep_printf ("don't know this test\n"); + break; + } + } +} + diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_util.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_util.c new file mode 100644 index 00000000..cbe1d698 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_util.c @@ -0,0 +1,357 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_fft_util.h" + +#define DEFAULT_QUEUE_LEN 10 + + +/* function that is called when an assertion fails, printing out the line + * that failed vi DT_Mdep_printf + */ +void DT_assert_fail (char *exp, char *file, char *baseFile, int line) +{ + if (!strcmp (file, baseFile)) + { + DT_Mdep_printf ("%s failed in file %s, line %d\n", exp, file, + line); + } + else + { + DT_Mdep_printf ("%s failed in file %s (included from %s), line %d\n", + exp, file, baseFile, line); + } +} + +/* helper function to open an IA */ +int DT_ia_open (DAT_NAME_PTR dev_name, DAT_IA_HANDLE *ia_handle) +{ + DAT_EVD_HANDLE evd_handle; + evd_handle = DAT_HANDLE_NULL; +#ifdef DYNAMIC_DAT_LOADING + return dat_open (dev_name, DEFAULT_QUEUE_LEN, &evd_handle, ia_handle, + DAT_VERSION_MAJOR, + DAT_VERSION_MINOR, + DAT_THREADSAFE); +#else + return dat_ia_open (dev_name, DEFAULT_QUEUE_LEN, &evd_handle, ia_handle); +#endif //DYNAMIC_DAT_LOADING + +} + +DAT_RETURN +DT_ia_close (DAT_IA_HANDLE ia_handle, DAT_CLOSE_FLAGS flags) +{ +#ifdef DYNAMIC_DAT_LOADING + return dat_close (ia_handle, flags); +#else + return dat_ia_close (ia_handle, flags); +#endif +} + +/* helper function to create an endpoint and its associated EVDs */ +int DT_ep_create (DAT_IA_HANDLE ia_handle, DAT_PZ_HANDLE pz_handle, + DAT_EVD_HANDLE *cr_evd, + DAT_EVD_HANDLE *conn_evd, DAT_EVD_HANDLE *send_evd, + DAT_EVD_HANDLE *recv_evd, DAT_EP_HANDLE *ep_handle) +{ + DAT_RETURN status; + *conn_evd= 0; + *send_evd= 0; + *recv_evd= 0; + *cr_evd= 0; + + status = dat_evd_create (ia_handle, DEFAULT_QUEUE_LEN, DAT_HANDLE_NULL, + DAT_EVD_CR_FLAG, cr_evd); + if (status != DAT_SUCCESS) + { + DT_Mdep_printf ("dat_evd_create failed %s\n", DT_RetToString (status)); + return status; + } + + status = dat_evd_create (ia_handle, DEFAULT_QUEUE_LEN, DAT_HANDLE_NULL, + DAT_EVD_CONNECTION_FLAG, conn_evd); + if (status != DAT_SUCCESS) + { + DT_Mdep_printf ("dat_evd_create failed %s\n", DT_RetToString (status)); + return status; + } + + status = dat_evd_create (ia_handle, DEFAULT_QUEUE_LEN, DAT_HANDLE_NULL, + DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG, + send_evd); + if (status != DAT_SUCCESS) + { + DT_Mdep_printf ("dat_evd_create failed %s\n", DT_RetToString (status)); + return status; + } + + status = dat_evd_create (ia_handle, DEFAULT_QUEUE_LEN, DAT_HANDLE_NULL, + DAT_EVD_DTO_FLAG, recv_evd); + if (status != DAT_SUCCESS) + { + DT_Mdep_printf ("dat_evd_create failed %s\n", DT_RetToString (status)); + return status; + } + + status = dat_ep_create (ia_handle, pz_handle, *recv_evd, + *send_evd, *conn_evd, NULL, ep_handle); + if (status != DAT_SUCCESS) + { + DT_Mdep_printf ("dat_ep_create failed %s\n", DT_RetToString (status)); + } + return status; +} + +/* function that initializes the connection struct */ +void DT_fft_init_conn_struct (FFT_Connection_t *conn) +{ + conn->ia_handle = 0; + conn->pz_handle = 0; + conn->psp_handle = 0; + conn->ep_handle = 0; + conn->cr_evd = 0; + conn->send_evd = 0; + conn->conn_evd = 0; + conn->recv_evd = 0; + conn->cr_handle = 0; + conn->remote_netaddr = 0; + conn->bpool = 0; + conn->pt_ptr = 0; + conn->connected = false; +} + +/* helper function that simplifies many dat calls for the initiialization of a + * dat "client" + */ +void DT_fft_init_client (FFT_Cmd_t *cmd, FFT_Connection_t *conn) +{ + int res; + DAT_RETURN rc=0; + + /* initialize the struct's members */ + DT_fft_init_conn_struct (conn); + + /* open the IA */ + rc = DT_ia_open (cmd->device_name, &conn->ia_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + /* create a PZ */ + rc = dat_pz_create (conn->ia_handle, &conn->pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + /* create an EP and its EVDs */ + rc =DT_ep_create (conn->ia_handle, conn->pz_handle, &conn->cr_evd, + &conn->conn_evd, &conn->send_evd, &conn->recv_evd, + &conn->ep_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + /* if a server name is given, allocate memory for a net address and set it + * up appropriately + */ + if (cmd->server_name && strlen (cmd->server_name)) + { + conn->pt_ptr = DT_Alloc_Per_Test_Data (); + DT_assert (conn->pt_ptr); + DT_MemListInit (conn->pt_ptr); + conn->remote_netaddr = DT_NetAddrAlloc (conn->pt_ptr); + DT_assert (conn->remote_netaddr); + DT_assert (DT_NetAddrLookupHostAddress (conn->remote_netaddr, + cmd->server_name)); + } +cleanup: + return; +} + +/* helper function to break down a client or server created with one of the + * init helper functions + */ +int DT_fft_destroy_conn_struct (FFT_Connection_t *conn) +{ + DAT_RETURN rc = DAT_SUCCESS; + if (conn->ep_handle) + { + if (conn->connected) + { + rc = dat_ep_disconnect (conn->ep_handle, DAT_CLOSE_DEFAULT); + DT_assert_clean (rc == DAT_SUCCESS); + + if (!DT_disco_event_wait ( conn->cr_evd, NULL )) + + { + DT_Mdep_printf ("DT_fft_destroy_conn_struct: bad disconnect event\n"); + } + } + rc = dat_ep_free (conn->ep_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (conn->bpool) + { + DT_Bpool_Destroy (0, conn->bpool); + } + if (conn->psp_handle) + { + rc = dat_psp_free (conn->psp_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (conn->cr_evd) + { + rc = dat_evd_free (conn->cr_evd); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (conn->conn_evd) + { + rc = dat_evd_free (conn->conn_evd); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (conn->send_evd) + { + rc = dat_evd_free (conn->send_evd); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (conn->recv_evd) + { + rc = dat_evd_free (conn->recv_evd); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (conn->remote_netaddr) + { + DT_NetAddrFree (conn->pt_ptr, conn->remote_netaddr); + } + if (conn->pt_ptr) + { + DT_Free_Per_Test_Data (conn->pt_ptr); + } + if (conn->pz_handle) + { + rc = dat_pz_free (conn->pz_handle); + DT_assert_clean (rc == DAT_SUCCESS); + } + if (conn->ia_handle) + { + rc = DT_ia_close (conn->ia_handle, DAT_CLOSE_ABRUPT_FLAG); + DT_assert_clean (rc == DAT_SUCCESS); + } + return rc; +} + +/* helper function to init a dat "server" */ +void DT_fft_init_server (FFT_Cmd_t *cmd, FFT_Connection_t *conn) +{ + int res; + DAT_RETURN rc=0; + + /* init the connection struct's members */ + DT_fft_init_conn_struct (conn); + + /* open the IA */ + rc = DT_ia_open (cmd->device_name, &conn->ia_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + /* create a PZ */ + rc = dat_pz_create (conn->ia_handle, &conn->pz_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + /* create an EP and its EVDs */ + rc =DT_ep_create (conn->ia_handle, conn->pz_handle, &conn->cr_evd, + &conn->conn_evd, &conn->send_evd, &conn->recv_evd, + &conn->ep_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + /* create a PSP */ + rc = dat_psp_create (conn->ia_handle, SERVER_PORT_NUMBER, conn->cr_evd, + DAT_PSP_CONSUMER_FLAG, &conn->psp_handle); + DT_assert_dat (rc == DAT_SUCCESS); + + /* allocate memory for buffers */ + conn->bpool = DT_BpoolAlloc (0, conn->ia_handle, conn->pz_handle, NULL, NULL, + 8192, 2, DAT_OPTIMAL_ALIGNMENT, false, false); + DT_assert (conn->bpool); +cleanup: + return; +} + +/* helper function that allows a server to listen for a connection */ +void DT_fft_listen (FFT_Connection_t *conn) +{ + int res; + DAT_RETURN rc=0; + + /* wait on a CR event via the CR EVD */ + DT_assert_dat (DT_cr_event_wait (conn->cr_evd, &conn->cr_stat) && + DT_cr_check (&conn->cr_stat, conn->psp_handle, SERVER_PORT_NUMBER, + &conn->cr_handle, "DT_fft_listen")); + + /* accept the connection */ + rc =dat_cr_accept (conn->cr_handle, conn->ep_handle, 0, (DAT_PVOID)0); + DT_assert_dat (rc == DAT_SUCCESS); + + /* wait on a conn event via the conn EVD */ + DT_assert (DT_conn_event_wait (conn->ep_handle, conn->conn_evd, + &conn->event_num) == true); + conn->connected = true; +cleanup: + return; +} + +/* helper function that allows a client to connect to a server */ +int DT_fft_connect (FFT_Connection_t *conn) +{ + int wait_count; + int res; + DAT_RETURN rc=0; + + /* try 10 times to connect */ + for (wait_count = 0; wait_count < 10; wait_count++) + { + DT_Mdep_printf ("Connection to server, attempt #%d\n", wait_count+1); + + /* attempt to connect, timeout = 10 secs */ + rc = dat_ep_connect (conn->ep_handle, conn->remote_netaddr, + SERVER_PORT_NUMBER, 10*1000, 0, (DAT_PVOID)0, + DAT_QOS_BEST_EFFORT, DAT_CONNECT_DEFAULT_FLAG); + DT_assert_dat (rc == DAT_SUCCESS); + + /* wait on conn event */ + DT_assert (DT_conn_event_wait (conn->ep_handle, conn->conn_evd, + &conn->event_num) == true); + + /* make sure we weren't rejected by the peer */ + if (conn->event_num == DAT_CONNECTION_EVENT_PEER_REJECTED) + { + DT_Mdep_Sleep (1000); + DT_Mdep_printf ("Connection rejected by peer; retrying\n"); + } + } +cleanup: + if (conn->event_num == DAT_CONNECTION_EVENT_ESTABLISHED) + { + conn->connected = true; + } + /* returns true if connected, false otherwise */ + return (conn->connected); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_util.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_util.h new file mode 100644 index 00000000..bd046444 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_fft_util.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef DAPL_FFT_UTIL_H +#define DAPL_FFT_UTIL_H + +#define DT_assert_dat(x) if(x) ; \ + else { \ + DT_assert_fail(#x, __FILE__, __BASE_FILE__, __LINE__); \ + DT_Mdep_printf("Error = %d, %s\n", rc, DT_RetToString(rc)); \ + res = 0; \ + goto cleanup; \ + } + +#define DT_assert(x) if(x) ; \ + else { \ + DT_assert_fail(#x, __FILE__, __BASE_FILE__, __LINE__); \ + res = 0; \ + goto cleanup; \ + } + +#define DT_assert_clean(x) if(x) ; \ + else { \ + DT_assert_fail(#x, __FILE__, __BASE_FILE__, __LINE__); \ + DT_Mdep_printf("Error = %d, %s\n", rc, DT_RetToString(rc)); \ + return 0; \ + } + +typedef struct +{ + DAT_IA_HANDLE ia_handle; + DAT_PZ_HANDLE pz_handle; + DAT_PSP_HANDLE psp_handle; + DAT_EP_HANDLE ep_handle; + DAT_EVD_HANDLE cr_evd, conn_evd, send_evd, recv_evd; + DAT_EVENT event; + DAT_COUNT count; + DAT_CR_HANDLE cr_handle; + Bpool *bpool; + DAT_CR_ARRIVAL_EVENT_DATA cr_stat; + DAT_EVENT_NUMBER event_num; + DAT_IA_ADDRESS_PTR remote_netaddr; + Per_Test_Data_t *pt_ptr; + bool connected; +} FFT_Connection_t; + +typedef enum + { + QUERY_CNO, + QUERY_CR, + QUERY_EP, + QUERY_EVD, + QUERY_IA, + QUERY_LMR, + QUERY_RMR, + QUERY_PSP, + QUERY_RSP, + QUERY_PZ, +} FFT_query_enum; + +#endif + diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_funcs.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_funcs.h new file mode 100644 index 00000000..3833b91e --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_funcs.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef _DAPL_I +#define _DAPL_I + +#define IN +#define OUT + +/* Interface Prefixes */ +#define DATIB_INTERFACE_PREFIX "ib" + +/* Types of DAPL functions, taken from uDAPL 1.0 */ + + +#define DAT_CALLTYPE + +/********************************************************************** + * Types of user-supplied callbacks + **********************************************************************/ + +/********************************************************************** + * Types of basic functions + **********************************************************************/ + +/********************************************************************** + * Types of Peer-to-Peer Connection Model APIs + *********************************************************************/ + +/********************************************************************** + * Types of Name service APIs + *********************************************************************/ + + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_getopt.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_getopt.c new file mode 100644 index 00000000..2712f5e5 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_getopt.c @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_getopt.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" + +#define GETOPT_MAGIC 0x04030201 + +#define BADCH '?' +#define BADARG ':' +#define EMSG "" + +/** + * Initialize the getopt fields in preparation for parsing + * a command line. + */ +void +DT_mygetopt_init (mygetopt_t * opts) +{ + opts->magic = GETOPT_MAGIC; + opts->place = EMSG; + opts->opterr = 1; + opts->optind = 1; + opts->optopt = 0; + opts->optarg = 0; +} + +/** + * Parse command line arguments. + * + * Return either the option discovered, or + * (int) -1 when there are no more options + * (int) '?' when an illegal option is found + * (int) ':' when a required option argument is missing. + */ +char +DT_mygetopt_r (int argc, char *const * argv, + const char *ostr, mygetopt_t * opts) +{ + char *p; + char *oli; /* option letter list index */ + if (GETOPT_MAGIC != opts->magic) + { + DT_Mdep_printf ("%s: getopt warning: " + "option argument is not properly initialized.\n", + argc > 0 ? argv[0] : "unknown command"); + DT_mygetopt_init (opts); + } + if (!* (opts->place)) /* update scanning pointer */ + { + if ((opts->optind) >= argc || + * ((opts->place) = argv[ (opts->optind)]) != '-') + { + (opts->place) = EMSG; + return (EOF); + } + if ((opts->place)[0] != '-') + { + /* Invalid 1st argument */ + return (BADCH); + } + if ((opts->place)[1] && *++ (opts->place) == '-') + { + /* found "--" which is an invalid option */ + ++ (opts->optind); + (opts->place) = EMSG; + return (BADCH); + } + } /* option letter okay? */ + opts->optopt = (int) * (opts->place)++; + oli = strchr (ostr, (opts->optopt)); + if (opts->optopt == (int) ':' || ! oli) + { + /* + * if the user didn't specify '-' as an option, assume it means EOF. + */ + if ((opts->optopt) == (int) '-') + { + /* return (EOF); */ + return (BADCH); + } + if (!* (opts->place)) + { + ++ (opts->optind); + } + if ((opts->opterr) && *ostr != ':') + { + p = strchr (*argv, '/'); + if (!p) + { + p = *argv; + } + else + { + ++p; + } + + if (opts->optopt != '?') /* Anything but '?' needs error */ + { + DT_Mdep_printf ("%s: Illegal option -- %c\n", + p, (opts->optopt)); + } + } + return (BADCH); + } + if (*++oli != ':') /* don't need argument */ + { + (opts->optarg) = NULL; + if (!* (opts->place)) + { + ++ (opts->optind); + } + } + else /* need an argument */ + { + if (* (opts->place)) /* no white space */ + { + (opts->optarg) = (opts->place); + } + else + { + if (argc <= ++ (opts->optind)) /* no arg */ + { + (opts->place) = EMSG; + if (*ostr == ':') + { + return (BADARG); + } + p = strchr (*argv, '/'); + if (!p) + { + p = *argv; + } + else + { + ++p; + } + if ((opts->opterr)) + { + DT_Mdep_printf ( + "%s: option requires an argument -- %c\n", + p, (opts->optopt)); + } + return (BADCH); + } + else /* white space */ + { + (opts->optarg) = argv[ (opts->optind)]; + } + } + (opts->place) = EMSG; + ++ (opts->optind); + } + return (opts->optopt); /* dump back option letter */ +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_getopt.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_getopt.h new file mode 100644 index 00000000..2cc379d7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_getopt.h @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_GETOPT_H__ +#define __DAPL_GETOPT_H__ + +typedef struct +{ + int magic; + char *place; + + int opterr; + int optind; + char optopt; + char *optarg; +} mygetopt_t; +/* function prototypes */ +void +DT_mygetopt_init (mygetopt_t * opts); +char +DT_mygetopt_r (int argc, + char *const * argv, + const char *ostr, + mygetopt_t * opts); +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit.c new file mode 100644 index 00000000..b273a4b0 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit.c @@ -0,0 +1,1529 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_proto.h" +#include "dapl_limit_cmd.h" + +/* + * Increase the size of an array of handles + */ +static bool +more_handles (DAT_HANDLE **old_ptrptr, /* pointer to current pointer */ + unsigned int *old_count, /* number pointed to */ + unsigned int size) /* size of one datum */ +{ + unsigned int count = *old_count; + DAT_HANDLE *old_handles = *old_ptrptr; + DAT_HANDLE *handle_tmp = DT_Mdep_Malloc (count * 2 * size); + + if (!handle_tmp) + { + DT_Mdep_printf ("Out of memory for more DAT_HANDLEs\n"); + return (false); + } + + memcpy (handle_tmp, old_handles, count*size); + DT_Mdep_Free (old_handles); + *old_ptrptr = handle_tmp; + *old_count = count * 2; + return (true); +} + + +/* + * Limit test workhorse. + * + * This test creates the sequence of DAT objects needed to move + * data back and forth, attempting to find the limits supported + * for the DAT object indicated by 'depth'. For example, if + * depth == LIM_LMR, the test will create a set of {IA,PZ,CNO,EVD,EP} + * before trying to exhaust LMR creation using the {IA,PZ,CNO,EVD,EP} set. + * + * The 'cmd->width' parameter can be used to control how may of these + * parallel DAT object sets we create before stopping to beat upon + * the constructor for the object indicated by 'depth', providing for + * increased (or at least different) stress on the DAPL. + */ +static bool +limit_test ( Limit_Cmd_t *cmd, + Limit_Index depth) +{ + typedef struct obj_set + { + DAT_IA_HANDLE ia_handle; + DAT_EVD_HANDLE ia_async_handle; + DAT_PZ_HANDLE pz_handle; + DAT_CNO_HANDLE cno_handle; + DAT_EVD_HANDLE evd_handle; + DAT_EP_HANDLE ep_handle; + DAT_LMR_HANDLE lmr_handle; + char * lmr_buffer; + DAT_LMR_CONTEXT lmr_context; + DAT_RMR_HANDLE rmr_handle; + DAT_RMR_CONTEXT rmr_context; + } Obj_Set; + + Obj_Set *hdl_sets = (Obj_Set *) NULL; + bool retval = false; + char *module = "LimitTest"; +#if defined (WIN32) + /* + * The Windows compiler will not deal with complex definitions + * in macros, so create a variable here. + */ +#if defined (DAT_OS_WAIT_PROXY_AGENT_NULL) + #undef DAT_OS_WAIT_PROXY_AGENT_NULL +#endif + DAT_OS_WAIT_PROXY_AGENT DAT_OS_WAIT_PROXY_AGENT_NULL = {NULL, NULL}; +#endif + + DAT_RETURN ret; + # define DFLT_QLEN 10 /* a small event queue size */ + # define START_COUNT 1024 /* initial # handles */ + # define DFLT_BUFFSZ 4096 /* default size for buffer */ + # define CONN_QUAL0 0xAffab1e + + /* Allocate 'width' Obj_Sets */ + if (depth && ! (hdl_sets = DT_Mdep_Malloc (sizeof (Obj_Set) * cmd->width))) + { + DT_Mdep_printf ("%s: No memory for handle array!\n", module); + goto clean_up_now; + } + + /* ----------- + * IA handling + */ + if (depth > LIM_IA) + { + /* + * The abuse is not for us this time, just prep Obj_Set. + */ + unsigned int w; + + DT_Mdep_debug (("%s: dat_ia_open X %d\n", module, cmd->width)); + for (w = 0; w < cmd->width; w++) + { + /* Specify that we want to get back an async EVD. */ + hdl_sets[w].ia_async_handle = DAT_HANDLE_NULL; +#ifdef DYNAMIC_DAT_LOADING + ret = dat_open (cmd->device_name, + DFLT_QLEN, + &hdl_sets[w].ia_async_handle, + &hdl_sets[w].ia_handle, + DAT_VERSION_MAJOR, + DAT_VERSION_MINOR, + DAT_THREADSAFE); +#else + ret = dat_ia_open (cmd->device_name, + DFLT_QLEN, + &hdl_sets[w].ia_async_handle, + &hdl_sets[w].ia_handle); +#endif // DYNAMIC_DAT_LOADING + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ia_open (%s) #%d fails: %s\n", + module, cmd->device_name, + w+1, DT_RetToString (ret)); + /* handle contents undefined on failure */ + hdl_sets[w].ia_async_handle = DAT_HANDLE_NULL; + hdl_sets[w].ia_handle = DAT_HANDLE_NULL; + goto clean_up_now; + } + } + } + else if (depth == LIM_IA) + { + /* + * See how many IAs we can create + */ + typedef struct _ia + { + DAT_IA_HANDLE ia_handle; + DAT_EVD_HANDLE ia_async_handle; + } + OneOpen; + unsigned int count = START_COUNT; + OneOpen *hdlptr = (OneOpen *) + DT_Mdep_Malloc (count * sizeof (*hdlptr)); + + /* IA Exhaustion test loop */ + if (hdlptr) + { + unsigned int w = 0; + unsigned int tmp; + + DT_Mdep_debug (("%s: Exhausting dat_ia_open\n", module)); + for (w = 0; w < cmd->maximum; w++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (w == count + && !more_handles ((DAT_HANDLE **) &hdlptr, + &count, + sizeof (*hdlptr))) + { + DT_Mdep_printf ("%s: IAs opened: %d\n", module, w); + retval = true; + break; + } + /* Specify that we want to get back an async EVD. */ + hdlptr[w].ia_async_handle = DAT_HANDLE_NULL; +#ifdef DYNAMIC_DAT_LOADING + ret = dat_open (cmd->device_name, + DFLT_QLEN, + &hdlptr[w].ia_async_handle, + &hdlptr[w].ia_handle, + DAT_VERSION_MAJOR, + DAT_VERSION_MINOR, + DAT_THREADSAFE); +#else + ret = dat_ia_open (cmd->device_name, + DFLT_QLEN, + &hdlptr[w].ia_async_handle, + &hdlptr[w].ia_handle); +#endif // DYNAMIC_DAT_LOADING + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ia_open (%s) #%d fails: %s\n", + module, cmd->device_name, + w+1, DT_RetToString (ret)); + retval = true; + break; + } + } + + DT_Mdep_printf ("%s: IAs opened: %d\n", module, w); + retval = true; + + /* IA Cleanup loop */ + for (tmp = 0; tmp < w; tmp++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + ret = DT_ia_close (hdlptr[tmp].ia_handle, + DAT_CLOSE_GRACEFUL_FLAG); + + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: DT_ia_close (graceful) fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + ret = DT_ia_close (hdlptr[tmp].ia_handle, + DAT_CLOSE_ABRUPT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: DT_ia_close (abrupt) fails: %s\n", + module, DT_RetToString (ret)); + } + } + } + DT_Mdep_Free (hdlptr); + } + } /* End IA handling */ + + /* ----------- + * PZ handling + */ + if (depth > LIM_PZ) + { + /* + * The abuse is not for us this time, just prep Obj_Set. + */ + unsigned int w; + + DT_Mdep_debug (("%s: dat_pz_create X %d\n", module, cmd->width)); + for (w = 0; w < cmd->width; w++) + { + ret = dat_pz_create (hdl_sets[w].ia_handle, + &hdl_sets[w].pz_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_pz_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + /* handle contents undefined on failure */ + hdl_sets[w].pz_handle = DAT_HANDLE_NULL; + goto clean_up_now; + } + } + } + else if (depth == LIM_PZ) + { + /* + * See how many PZs we can create + */ + unsigned int count = START_COUNT; + DAT_PZ_HANDLE *hdlptr = (DAT_PZ_HANDLE *) + DT_Mdep_Malloc (count * sizeof (*hdlptr)); + + /* PZ Exhaustion test loop */ + if (hdlptr) + { + unsigned int w = 0; + unsigned int tmp; + + DT_Mdep_debug (("%s: Exhausting dat_pz_create\n", module)); + for (w = 0; w < cmd->maximum; w++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (w == count + && !more_handles ((DAT_HANDLE **) &hdlptr, + &count, + sizeof (*hdlptr))) + { + DT_Mdep_printf ("%s: PZs created: %d\n", module, w); + retval = true; + break; + } + ret = dat_pz_create (hdl_sets[w % cmd->width].ia_handle, + &hdlptr[w]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_pz_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + retval = true; + break; + } + } + + DT_Mdep_printf ("%s: PZs created: %d\n", module, w); + retval = true; + + /* PZ Cleanup loop */ + for (tmp = 0; tmp < w; tmp++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + ret = dat_pz_free (hdlptr[tmp]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_pz_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + DT_Mdep_Free (hdlptr); + } + } /* End PZ handling */ + + /* ----------- + * CNO handling + */ + + if (depth > LIM_CNO) + { + /* + * The abuse is not for us this time, just prep Obj_Set. + */ + unsigned int w; + + DT_Mdep_debug (("%s: dat_cno_create X %d\n", module, cmd->width)); + for (w = 0; w < cmd->width; w++) + { + ret = dat_cno_create (hdl_sets[w].ia_handle, + DAT_OS_WAIT_PROXY_AGENT_NULL, + &hdl_sets[w].cno_handle); + if (DAT_GET_TYPE(ret) == DAT_NOT_IMPLEMENTED) + { + DT_Mdep_printf ("%s: dat_cno_create unimplemented\n", module); + hdl_sets[w].cno_handle = DAT_HANDLE_NULL; + /* ignore this error */ + break; + } + else if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_cno_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + /* handle contents undefined on failure */ + hdl_sets[w].cno_handle = DAT_HANDLE_NULL; + goto clean_up_now; + } + } + } + else if (depth == LIM_CNO) + { + /* + * See how many CNOs we can create + */ + unsigned int count = START_COUNT; + DAT_CNO_HANDLE *hdlptr = (DAT_CNO_HANDLE *) + DT_Mdep_Malloc (count * sizeof (*hdlptr)); + + /* CNO Exhaustion test loop */ + if (hdlptr) + { + unsigned int w = 0; + unsigned int tmp; + + DT_Mdep_debug (("%s: Exhausting dat_cno_create\n", module)); + for (w = 0; w < cmd->maximum; w++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (w == count + && !more_handles ((DAT_HANDLE **) &hdlptr, + &count, + sizeof (*hdlptr))) + { + DT_Mdep_printf ("%s: CNOs created: %d\n", module, w); + retval = true; + break; + } + ret = dat_cno_create (hdl_sets[w % cmd->width].ia_handle, + DAT_OS_WAIT_PROXY_AGENT_NULL, + &hdlptr[w]); + if (DAT_GET_TYPE(ret) == DAT_NOT_IMPLEMENTED) + { + DT_Mdep_printf ("%s: dat_cno_create unimplemented\n", + module); + retval = true; + break; + } + else if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_cno_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + retval = true; + break; + } + } + + DT_Mdep_printf ("%s: CNOs created: %d\n", module, w); + retval = true; + + /* CNO Cleanup loop */ + for (tmp = 0; tmp < w; tmp++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + ret = dat_cno_free (hdlptr[tmp]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_cno_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + DT_Mdep_Free (hdlptr); + } + } /* End CNO handling */ + + /* ----------- + * EVD handling + */ + if (depth > LIM_EVD) + { + /* + * The abuse is not for us this time, just prep Obj_Set. + */ + unsigned int w; + DAT_EVD_FLAGS flags = ( DAT_EVD_DTO_FLAG + /* | DAT_EVD_SOFTWARE_FLAG */ + | DAT_EVD_CONNECTION_FLAG + | DAT_EVD_CR_FLAG + | DAT_EVD_RMR_BIND_FLAG ); /* not ASYNC */ + + DT_Mdep_debug (("%s: dat_evd_create X %d\n", module, cmd->width)); + for (w = 0; w < cmd->width; w++) + { + ret = dat_evd_create (hdl_sets[w].ia_handle, + DFLT_QLEN, + hdl_sets[w].cno_handle, + flags, + &hdl_sets[w].evd_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + /* handle contents undefined on failure */ + hdl_sets[w].evd_handle = DAT_HANDLE_NULL; + goto clean_up_now; + } + } + } + else if (depth == LIM_EVD) + { + /* + * See how many EVDs we can create + */ + unsigned int count = START_COUNT; + DAT_EVD_HANDLE *hdlptr = (DAT_EVD_HANDLE *) + DT_Mdep_Malloc (count * sizeof (*hdlptr)); + DAT_EVD_FLAGS flags = ( DAT_EVD_DTO_FLAG + | DAT_EVD_RMR_BIND_FLAG + | DAT_EVD_CONNECTION_FLAG + | DAT_EVD_CR_FLAG); + + /* EVD Exhaustion test loop */ + if (hdlptr) + { + unsigned int w = 0; + unsigned int tmp; + + DT_Mdep_debug (("%s: Exhausting dat_evd_create\n", module)); + for (w = 0; w < cmd->maximum; w++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (w == count + && !more_handles ((DAT_HANDLE **) &hdlptr, + &count, + sizeof (*hdlptr))) + { + DT_Mdep_printf ("%s: EVDs created: %d\n", module, w); + retval = true; + break; + } + ret = dat_evd_create (hdl_sets[w % cmd->width].ia_handle, + DFLT_QLEN, + hdl_sets[w % cmd->width].cno_handle, + flags, + &hdlptr[w]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + retval = true; + break; + } + } + + DT_Mdep_printf ("%s: EVDs created: %d\n", module, w); + retval = true; + + /* EVD Cleanup loop */ + for (tmp = 0; tmp < w; tmp++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + ret = dat_evd_free (hdlptr[tmp]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + DT_Mdep_Free (hdlptr); + } + } /* End EVD handling */ + + /* ----------- + * EP handling + */ + if (depth > LIM_EP) + { + /* + * The abuse is not for us this time, just prep Obj_Set. + */ + unsigned int w; + + DT_Mdep_debug (("%s: dat_ep_create X %d\n", module, cmd->width)); + for (w = 0; w < cmd->width; w++) + { + ret = dat_ep_create (hdl_sets[w].ia_handle, + hdl_sets[w].pz_handle, + hdl_sets[w].evd_handle, /* recv */ + hdl_sets[w].evd_handle, /* request */ + hdl_sets[w].evd_handle, /* connect */ + (DAT_EP_ATTR *) NULL, + &hdl_sets[w].ep_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + /* handle contents undefined on failure */ + hdl_sets[w].ep_handle = DAT_HANDLE_NULL; + goto clean_up_now; + } + } + } + else if (depth == LIM_EP) + { + /* + * See how many EPs we can create + */ + unsigned int count = START_COUNT; + DAT_EP_HANDLE *hdlptr = (DAT_EP_HANDLE *) + DT_Mdep_Malloc (count * sizeof (*hdlptr)); + + /* EP Exhaustion test loop */ + if (hdlptr) + { + unsigned int w = 0; + unsigned int tmp; + + DT_Mdep_debug (("%s: Exhausting dat_ep_create\n", module)); + for (w = 0; w < cmd->maximum; w++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (w == count + && !more_handles ((DAT_HANDLE **) &hdlptr, + &count, + sizeof (*hdlptr))) + { + DT_Mdep_printf ("%s: EPs created: %d\n", module, w); + retval = true; + break; + } + ret = dat_ep_create (hdl_sets[w % cmd->width].ia_handle, + hdl_sets[w % cmd->width].pz_handle, + hdl_sets[w % cmd->width].evd_handle, + hdl_sets[w % cmd->width].evd_handle, + hdl_sets[w % cmd->width].evd_handle, + (DAT_EP_ATTR *) NULL, + &hdlptr[w]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + retval = true; + break; + } + } + + DT_Mdep_printf ("%s: EPs created: %d\n", module, w); + retval = true; + + /* EP Cleanup loop */ + for (tmp = 0; tmp < w; tmp++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + ret = dat_ep_free (hdlptr[tmp]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + DT_Mdep_Free (hdlptr); + } + } /* End EP handling */ + + /* ----------- + * RSP handling + * + * if (depth > LIM_RSP) { + * Since RSPs are not part of the Obj_Set, + * there's nothing to do. + * } else ... + */ + if (depth == LIM_RSP) + { + /* + * See how many RSPs we can create + */ + unsigned int count = START_COUNT; + DAT_RSP_HANDLE *hdlptr = (DAT_RSP_HANDLE *) + DT_Mdep_Malloc (count * sizeof (*hdlptr)); + DAT_EP_HANDLE *epptr = (DAT_EP_HANDLE *) + DT_Mdep_Malloc (count * sizeof (*epptr)); + + /* RSP Exhaustion test loop */ + if (hdlptr) + { + unsigned int w = 0; + unsigned int tmp; + + DT_Mdep_debug (("%s: Exhausting dat_rsp_create\n", module)); + for (w = 0; w < cmd->maximum; w++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (w == count) + { + unsigned int count1 = count; + unsigned int count2 = count; + + if (!more_handles ((DAT_HANDLE **) &hdlptr, + &count1, + sizeof (*hdlptr))) + { + DT_Mdep_printf ("%s: RSPs created: %d\n", module, w); + retval = true; + break; + } + if (!more_handles ((DAT_HANDLE **) &epptr, + &count2, + sizeof (*epptr))) + { + DT_Mdep_printf ("%s: RSPs created: %d\n", module, w); + retval = true; + break; + } + + if (count1 != count2) + { + DT_Mdep_printf ("%s: Mismatch in allocation of handle arrays at point %d\n", + module, w); + retval = true; + break; + } + + count = count1; + } + + /* + * Each RSP needs a unique EP, so create one first + */ + ret = dat_ep_create (hdl_sets[w % cmd->width].ia_handle, + hdl_sets[w % cmd->width].pz_handle, + hdl_sets[w % cmd->width].evd_handle, + hdl_sets[w % cmd->width].evd_handle, + hdl_sets[w % cmd->width].evd_handle, + (DAT_EP_ATTR *) NULL, + &epptr[w]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_create #%d fails: %s testing RSPs\n", + module, w+1, DT_RetToString (ret)); + retval = true; + break; + } + + ret = dat_rsp_create (hdl_sets[w % cmd->width].ia_handle, + CONN_QUAL0 + w, + epptr[w], + hdl_sets[w % cmd->width].evd_handle, + &hdlptr[w]); + if (DAT_GET_TYPE(ret) == DAT_NOT_IMPLEMENTED) + { + DT_Mdep_printf ("%s: dat_rsp_create unimplemented\n", + module); + /* ignore this error */ + retval = true; + break; + } + else if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_rsp_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + /* Cleanup the EP; no-one else will. */ + ret = dat_ep_free (epptr[w]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_free (internal cleanup @ #%d) fails: %s\n", + module, w+1, DT_RetToString (ret)); + } + retval = true; + break; + } + } + + DT_Mdep_printf ("%s: RSPs created: %d\n", module, w); + retval = true; + + /* RSP Cleanup loop */ + for (tmp = 0; tmp < w; tmp++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + ret = dat_rsp_free (hdlptr[tmp]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_rsp_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + /* Free EPs */ + ret = dat_ep_free (epptr[tmp]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_free fails: %s for RSPs\n", + module, DT_RetToString (ret)); + retval = false; + } + } + DT_Mdep_Free (hdlptr); + } + } /* End RSP handling */ + + /* ----------- + * PSP handling + * + * if (depth > LIM_PSP) { + * Since PSPs are not part of the Obj_Set, + * there's nothing to do. + * } else ... + */ + if (depth == LIM_PSP) + { + /* + * See how many PSPs we can create + */ + unsigned int count = START_COUNT; + DAT_PSP_HANDLE *hdlptr = (DAT_PSP_HANDLE *) + DT_Mdep_Malloc (count * sizeof (*hdlptr)); + + /* PSP Exhaustion test loop */ + if (hdlptr) + { + unsigned int w = 0; + unsigned int tmp; + + DT_Mdep_debug (("%s: Exhausting dat_psp_create\n", module)); + for (w = 0; w < cmd->maximum; w++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (w == count + && !more_handles ((DAT_HANDLE **) &hdlptr, + &count, + sizeof (*hdlptr))) + { + DT_Mdep_printf ("%s: PSPs created: %d\n", module, w); + retval = true; + break; + } + ret = dat_psp_create (hdl_sets[w % cmd->width].ia_handle, + CONN_QUAL0 + w, + hdl_sets[w % cmd->width].evd_handle, + DAT_PSP_CONSUMER_FLAG, + &hdlptr[w]); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_psp_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + retval = true; + hdlptr[w] = DAT_HANDLE_NULL; + break; + } + } + + DT_Mdep_printf ("%s: PSPs created: %d\n", module, w); + retval = true; + + /* PSP Cleanup loop */ + for (tmp = 0; tmp < w; tmp++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + ret = dat_psp_free (hdlptr[tmp]); + if (DAT_GET_TYPE(ret) == DAT_NOT_IMPLEMENTED) + { + DT_Mdep_printf ("%s: dat_psp_free unimplemented\n" + "\tNB: Expect EVD+IA cleanup errors!\n", + module); + break; + } + else if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_psp_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + DT_Mdep_Free (hdlptr); + } + } /* End PSP handling */ + + /* ----------- + * LMR handling + */ + if (depth > LIM_LMR) + { + /* + * The abuse is not for us this time, just prep Obj_Set. + */ + unsigned int w; + + DT_Mdep_debug (("%s: dat_lmr_create X %d\n", module, cmd->width)); + for (w = 0; w < cmd->width; w++) + { + DAT_REGION_DESCRIPTION region; + DAT_VLEN reg_size; + DAT_VADDR reg_addr; + + hdl_sets[w].lmr_buffer = DT_Mdep_Malloc (DFLT_BUFFSZ); + if (!hdl_sets[w].lmr_buffer) + { + DT_Mdep_printf ("%s: no memory for LMR buffers\n", module); + goto clean_up_now; + } + memset (®ion, 0, sizeof (region)); + region.for_va = hdl_sets[w].lmr_buffer; + + ret = dat_lmr_create (hdl_sets[w].ia_handle, + DAT_MEM_TYPE_VIRTUAL, + region, + DFLT_BUFFSZ, + hdl_sets[w].pz_handle, + DAT_MEM_PRIV_ALL_FLAG, + &hdl_sets[w].lmr_handle, + &hdl_sets[w].lmr_context, + NULL, /* FIXME */ + ®_size, ®_addr); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_lmr_create #%d fails: %s\n", + module, w+1, DT_RetToString (ret)); + /* handle contents undefined on failure */ + hdl_sets[w].lmr_handle = DAT_HANDLE_NULL; + goto clean_up_now; + } + if ((uintptr_t)reg_addr > (uintptr_t)hdl_sets[w].lmr_buffer + || (reg_size < DFLT_BUFFSZ + ((uintptr_t)reg_addr + - (uintptr_t)hdl_sets[w].lmr_buffer))) + { + DT_Mdep_printf ("%s: dat_lmr_create bogus outputs " + "in: 0x%p, %x out 0x%llx, %llx\n", + module, + hdl_sets[w].lmr_buffer, DFLT_BUFFSZ, + (DAT_UVERYLONG)reg_addr, (DAT_UVERYLONG)reg_size); + goto clean_up_now; + } + } + } + else if (depth == LIM_LMR) + { + /* + * See how many LMRs we can create + */ + unsigned int count = START_COUNT; + Bpool **hdlptr = (Bpool **) + DT_Mdep_Malloc (count * sizeof (*hdlptr)); + + /* LMR Exhaustion test loop */ + if (hdlptr) + { + unsigned int w = 0; + unsigned int tmp; + + DT_Mdep_debug (("%s: Exhausting dat_lmr_create\n", module)); + for (w = 0; w < cmd->maximum; w++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (w == count + && !more_handles ((DAT_HANDLE **) &hdlptr, + &count, + sizeof (*hdlptr))) + { + DT_Mdep_printf ("%s: no memory for LMR handles\n", + module); + DT_Mdep_printf ("%s: LMRs created: %d\n", module, w); + retval = true; + break; + } + /* + * Let BpoolAlloc do the hard work; this means that + * we're testing unique memory registrations rather + * than repeatedly binding the same buffer set. + */ + hdlptr[w] = DT_BpoolAlloc ((Per_Test_Data_t *)0, + hdl_sets[w % cmd->width].ia_handle, + hdl_sets[w % cmd->width].pz_handle, + hdl_sets[w % cmd->width].ep_handle, + hdl_sets[w % cmd->width].evd_handle, + DFLT_BUFFSZ, + 1, + DAT_OPTIMAL_ALIGNMENT, + false, + false); + if (!hdlptr[w]) + { + DT_Mdep_printf ("%s: LMRs created: %d\n", module, w); + retval = true; + break; + } + } + + DT_Mdep_printf ("%s: LMRs created: %d\n", module, w); + retval = true; + + /* LMR Cleanup loop */ + for (tmp = 0; tmp <= w; tmp++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (hdlptr[tmp]) + { + /* ignore rval - DT_Bpool_Destroy will complain */ + (void) DT_Bpool_Destroy ((Per_Test_Data_t *)0, hdlptr[tmp]); + } + } + DT_Mdep_Free (hdlptr); + } + } /* End LMR handling */ + + /* ----------- + * Posted receive buffer handling + */ + if (depth == LIM_RPOST) + { + /* + * See how many receive buffers we can post (to each EP). + * We are posting the same buffer 'cnt' times, deliberately, + * but that should be OK. + */ + unsigned int count = START_COUNT; + DAT_LMR_TRIPLET *hdlptr = (DAT_LMR_TRIPLET *) + DT_Mdep_Malloc (count * cmd->width * sizeof (*hdlptr)); + + /* Recv-Post Exhaustion test loop */ + if (hdlptr) + { + unsigned int w = 0; + unsigned int i = 0; + unsigned int done = 0; + + DT_Mdep_debug (("%s: Exhausting posting of recv buffers\n", module)); + for (w = 0; w < cmd->maximum && !done; w++) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + if (w == count + && !more_handles ((DAT_HANDLE **) &hdlptr, + &count, + cmd->width * sizeof (*hdlptr))) + { + DT_Mdep_printf ("%s: no memory for IOVs \n", + module); + DT_Mdep_printf ("%s: recv buffers posted per EP: %d\n" + "\t\t (total posted: %d)\n", + module, + w, + w * cmd->width); + done = retval = true; + break; + } + for (i = 0; i < cmd->width; i++) + { + DAT_LMR_TRIPLET *iovp = &hdlptr[w * cmd->width + i]; + DAT_DTO_COOKIE cookie; + + iovp->virtual_address = (DAT_VADDR) (uintptr_t) + hdl_sets[i].lmr_buffer; + iovp->segment_length = DFLT_BUFFSZ; + iovp->lmr_context = hdl_sets[i].lmr_context; + cookie.as_64 = (DAT_UINT64)0UL; + cookie.as_ptr = (DAT_PVOID) hdl_sets[i].lmr_buffer; + + DT_Mdep_printf ("%s: dat_ep_post_recv #%d\n", module, + w * cmd->width + i + 1); + ret = dat_ep_post_recv (hdl_sets[i].ep_handle, + 1, + iovp, + cookie, + DAT_COMPLETION_DEFAULT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_post_recv fails: %s\n", + module, DT_RetToString (ret)); + DT_Mdep_printf ("%s: recv buffers posted per EP: %d\n" + "\t\t (total posted: %d)\n", + module, + w, + w * cmd->width + i); + done = retval = true; + break; + } + } /* end for each EP wide */ + } /* end forever (!done) loop */ + + retval = true; + DT_Mdep_printf ("%s: recv buffers posted per EP: %d\n" + "\t\t (total posted: %d)\n", + module, + w, + w * cmd->width); + + /* Rpost Cleanup loop */ + for (i = 0; i < cmd->width; i++) + { + DAT_EVENT event; + + /* + * Disconnecting an unconnected EP should complete + * outstanding recv DTOs in error, and otherwise + * be a no-op. + */ + ret = dat_ep_disconnect (hdl_sets[i].ep_handle, + DAT_CLOSE_ABRUPT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_disconnect (abrupt) fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + else + { + /* + * Remove all DTOs. The disconnect above should have + * flushed all posted operations, so this is just a + * clean up. + */ + do + { + ret = dat_evd_dequeue ( hdl_sets[i].evd_handle, + &event); + } while ( DAT_GET_TYPE(ret) != DAT_QUEUE_EMPTY ); + } + + } + DT_Mdep_Free (hdlptr); + } + } /* end depth == LIM_RPOST */ + + /* ----------- + * Test maximum size of LMR allowed + */ + if (depth == LIM_SIZE_LMR) + { + DAT_COUNT last_size = 0; + DAT_COUNT test_size = DFLT_BUFFSZ; + + for (;;) + { +#ifdef _ONTAP_ + sk_preempt_msec(10); +#endif + Bpool *test_bpool = DT_BpoolAlloc ((Per_Test_Data_t *)0, + hdl_sets[0].ia_handle, + hdl_sets[0].pz_handle, + hdl_sets[0].ep_handle, + hdl_sets[0].evd_handle, + test_size, + 1, + DAT_OPTIMAL_ALIGNMENT, + false, + false); + + if (!test_bpool) + { + DT_Mdep_printf ("%s: Largest LMR was 0x%x bytes\n" + "\t (failed attempting 0x%x bytes)\n", + module, last_size, test_size); + retval = true; + break; + } + else if (!DT_Bpool_Destroy ((Per_Test_Data_t *)0, test_bpool)) + { + DT_Mdep_printf ("%s: Largest LMR was 0x%x bytes\n", + module, test_size); + retval = true; + break; + } + + last_size = test_size; + test_size <<= 1; + if (test_size < last_size) + { + /* could conceivably wrap on 32-bit architectures */ + DT_Mdep_printf ("%s: LMR of 0x%x bytes OK - %s\n", + module, last_size, "stopping now."); + retval = true; + break; + } + } /* end forever loop */ + } /* end depth == LIM_SIZE_LMR */ + + DT_Mdep_debug (("%s: Limit Testing Completed - %s\n", + module, + retval ? "Successfully" : "with errors")); + + + /* ---------------------------------------------------------- + * Clean up and go home + */ +clean_up_now: + + DT_Mdep_debug (("%s: Cleaning up ...\n", module)); + + if (depth > LIM_LMR) + { + unsigned int w; + + for (w = 0; w < cmd->width; w++) + { + if (hdl_sets[w].lmr_handle) + { + ret = dat_lmr_free (hdl_sets[w].lmr_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_lmr_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + if ((void *) hdl_sets[w].lmr_buffer) + { + DT_Mdep_Free ((void *) hdl_sets[w].lmr_buffer); + } + } + } /* end LIM_LMR cleanup */ + + /* + * if (depth == LIM_PSP) { + * Since PSPs are not part of the Obj_Set, + * there's no cleanup to do. + * } + * + * if (depth == LIM_RSP) { + * Since RSPs are not part of the Obj_Set, + * there'no cleanup nothing to do. + * } + */ + + if (depth > LIM_EP) + { + unsigned int w; + + for (w = 0; w < cmd->width; w++) + { + if (hdl_sets[w].ep_handle) + { + ret = dat_ep_free (hdl_sets[w].ep_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + } + } /* end LIM_EP cleanup */ + + if (depth > LIM_EVD) + { + unsigned int w; + + for (w = 0; w < cmd->width; w++) + { + if (hdl_sets[w].evd_handle) + { + ret = dat_evd_free (hdl_sets[w].evd_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + } + } /* end LIM_EVD cleanup */ + + if (depth > LIM_CNO) + { + unsigned int w; + + for (w = 0; w < cmd->width; w++) + { + if (hdl_sets[w].cno_handle) + { + ret = dat_cno_free (hdl_sets[w].cno_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_cno_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + } + } /* end LIM_CNO cleanup */ + + if (depth > LIM_PZ) + { + unsigned int w; + + for (w = 0; w < cmd->width; w++) + { + if (hdl_sets[w].pz_handle) + { + ret = dat_pz_free (hdl_sets[w].pz_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_pz_free fails: %s\n", + module, DT_RetToString (ret)); + retval = false; + } + } + } + } /* end LIM_PZ cleanup */ + + if (depth > LIM_IA) + { + unsigned int w; + + for (w = 0; w < cmd->width; w++) + { + if (hdl_sets[w].ia_handle) + { + /* DT_ia_close cleans up async evd handle, too */ + ret = DT_ia_close (hdl_sets[w].ia_handle, + DAT_CLOSE_GRACEFUL_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: DT_ia_close (graceful) error: %s\n", + module, DT_RetToString (ret)); + /* + * Since we take some pains to clean up properly, + * this really is an error. But if we get here, + * we may as well try the largest hammer we have. + */ + retval = false; + ret = DT_ia_close (hdl_sets[w].ia_handle, + DAT_CLOSE_ABRUPT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: DT_ia_close (abrupt) error: %s\n", + module, DT_RetToString (ret)); + } + } + } + } + } /* end LIM_IA cleanup */ + + if (depth && hdl_sets) + { + DT_Mdep_Free (hdl_sets); + } + + DT_Mdep_debug (("%s: testing and cleanup complete.\n", module)); + + return ( retval ); +} + + +/********************************************************************* + * Framework to run through all of the limit tests + */ +void +DT_cs_Limit (Limit_Cmd_t * cmd) +{ + char *star = + "**********************************************************************" ; + + if (cmd->Test_List[ LIM_IA ]) + { + char list[] = + { + "Limitation Test limit_ia\n" + "Description: Test max num of opens for the same physical IA" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_IA)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + if (cmd->Test_List[ LIM_PZ ]) + { + char list[] = + { + "Limitation Test limit_pz\n" + "Description: Test max num of PZs that are supported by an IA" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_PZ)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + if (cmd->Test_List[ LIM_CNO ]) + { + char list[] = + { + "Limitation Test limit_cno\n" + "Description: Test max num of CNOs that are supported by an IA" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_CNO)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + if (cmd->Test_List[ LIM_EVD ]) + { + char list[] = + { + "Limitation Test limit_evd\n" + "Description: Test max num of EVDs that are supported by an IA" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_EVD)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + if (cmd->Test_List[ LIM_EP ]) + { + char list[] = + { + "Limitation Test limit_ep\n" + "Description: Test max num of EPs that are supported by an IA" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_EP)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + if (cmd->Test_List[ LIM_RSP ]) + { + char list[] = + { + "Limitation Test limit_rsp\n" + "Description: Test max num of RSPs that are supported by an IA" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_RSP)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + if (cmd->Test_List[ LIM_PSP ]) + { + char list[] = + { + "Limitation Test limit_psp\n" + "Description: Test max num of PSPs that are supported by an IA" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_PSP)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + if (cmd->Test_List[ LIM_LMR ]) + { + char list[] = + { + "Limitation Test limit_lmr\n" + "Description: Test max num of LMRs that are supported by an IA" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_LMR)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + if (cmd->Test_List[ LIM_RPOST ]) + { + char list[] = + { + "Limitation Test limit_rpost\n" + "Description: Test max num of receive buffers posted to an EP" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_RPOST)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + if (cmd->Test_List[ LIM_SIZE_LMR ]) + { + char list[] = + { + "Limitation Test limit_size_lmr\n" + "Description: Test max size of LMRs that are supported by an IA" + } + ; + + DT_Mdep_printf ("%s\n", star); + DT_Mdep_printf ("%s\n", list); + if (!limit_test (cmd, LIM_SIZE_LMR)) + { + goto error; + } + DT_Mdep_printf ("%s\n", star); + } + + /* More tests TBS ... */ + + return; + +error: + DT_Mdep_printf ("error occurs, can not continue with limit test\n"); + DT_Mdep_printf ("%s\n", star); + return; +} + diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit_cmd.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit_cmd.c new file mode 100644 index 00000000..3fe9347c --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit_cmd.c @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_test_data.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" +#include "dapl_limit_cmd.h" + +/* --------------------------------------------------- */ +void +DT_Limit_Cmd_Init (Limit_Cmd_t * cmd) +{ + memset ((void *) cmd, 0, sizeof (Limit_Cmd_t)); + cmd->ReliabilityLevel = DAT_QOS_BEST_EFFORT; + cmd->width = 1; + cmd->maximum = ~0U; +} + +/* --------------------------------------------------- */ +bool +DT_Limit_Cmd_Parse ( Limit_Cmd_t * cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts) +{ + char c; + int i; + + for (;;) + { + c = DT_mygetopt_r (my_argc, my_argv, "dm:w:D:R:", opts); + if (c == EOF) + { + break; + } + switch (c) + { + case 'D': /* device name */ + { + strcpy (cmd->device_name, opts->optarg); + break; + } + case 'R': /* Service Reliability Level */ + { + cmd->ReliabilityLevel = DT_ParseQoS (opts->optarg); + break; + } + case 'd': /* print debug messages */ + { + DT_dapltest_debug++; + cmd->debug = true; + break; + } + case 'm': /* maximum for exhaustion testing */ + { + unsigned int len = (unsigned int)strspn (opts->optarg, "0123456789"); + + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -m option\n"); + DT_Limit_Cmd_Usage (); + return (false); + } + cmd->maximum = atol (opts->optarg); + break; + } + case 'w': /* width (number of {ia,evd,ep,...} sets) */ + { + unsigned int len = (unsigned int)strspn (opts->optarg, "0123456789"); + + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -w option\n"); + DT_Limit_Cmd_Usage (); + return (false); + } + cmd->width = atol (opts->optarg); + break; + } + case '?': + default: + { + DT_Mdep_printf ("Invalid Limit Test Parameter: %c\n", c); + DT_Limit_Cmd_Usage (); + return (false); + } + } + } + if (cmd->device_name[0] == '\0') + { + if (!DT_Mdep_GetDefaultDeviceName (cmd->device_name)) + { + DT_Mdep_printf ("can't get default device name\n"); + DT_Limit_Cmd_Usage (); + return (false); + } + } + + /* + * by default: test all limit tests + * otherwise: parse the remaining limit test arguments + */ + if (opts->optind == my_argc) + { + for (i = 0; i < LIM_NUM_TESTS; i++) + { + cmd->Test_List[i] = 1; + } + } + else + { + for (i = opts->optind; i < my_argc; i++) + { + + if (strcmp (my_argv[i], "limit_ia") == 0) + { + cmd->Test_List[LIM_IA] = 1; + continue; + } + if (strcmp (my_argv[i], "limit_pz") == 0) + { + cmd->Test_List[LIM_PZ] = 1; + continue; + } + if (strcmp (my_argv[i], "limit_cno") == 0) + { + cmd->Test_List[LIM_CNO] = 1; + continue; + } + if (strcmp (my_argv[i], "limit_evd") == 0) + { + cmd->Test_List[LIM_EVD] = 1; + continue; + } + if (strcmp (my_argv[i], "limit_ep") == 0) + { + cmd->Test_List[LIM_EP] = 1; + continue; + } + if (strcmp (my_argv[i], "limit_rsp") == 0) + { + cmd->Test_List[LIM_RSP] = 1; + continue; + } + if (strcmp (my_argv[i], "limit_psp") == 0) + { + cmd->Test_List[LIM_PSP] = 1; + continue; + } + if (strcmp (my_argv[i], "limit_lmr") == 0) + { + cmd->Test_List[LIM_LMR] = 1; + continue; + } + if (strcmp (my_argv[i], "limit_rpost") == 0) + { + cmd->Test_List[LIM_RPOST] = 1; + continue; + } + if (strcmp (my_argv[i], "limit_size_lmr") == 0) + { + cmd->Test_List[LIM_SIZE_LMR] = 1; + continue; + } + + DT_Mdep_printf ("Cannot find this limit test: %s\n", my_argv[i]); + DT_Limit_Cmd_Usage (); + return (false); + + } /* end foreach remaining argv */ + } + + return (true); +} + +/* --------------------------------------------------- */ +void +DT_Limit_Cmd_Usage (void) +{ + DT_Mdep_printf ("USAGE: ---- LIMIT TEST ----\n"); + DT_Mdep_printf ("USAGE: dapltest -T L\n"); + DT_Mdep_printf ("USAGE: [-D ]\n"); + DT_Mdep_printf ("USAGE: [-d] : debug (zero)\n"); + DT_Mdep_printf ("USAGE: [-w ]\n"); + DT_Mdep_printf ("USAGE: [-m ]\n"); + DT_Mdep_printf ("USAGE: [-R ]\n"); + DT_Mdep_printf ("USAGE: (BE == QOS_BEST_EFFORT - Default)\n"); + DT_Mdep_printf ("USAGE: (HT == QOS_HIGH_THROUGHPUT)\n"); + DT_Mdep_printf ("USAGE: (LL == QOS_LOW_LATENCY)\n"); + DT_Mdep_printf ("USAGE: (EC == QOS_ECONOMY)\n"); + DT_Mdep_printf ("USAGE: (PM == QOS_PREMIUM)\n"); + DT_Mdep_printf ("USAGE: [limit_ia [limit_pz] [limit_cno] ... ]\n"); + DT_Mdep_printf ("NOTE: If test is not specified, do all the limit tests\n"); + DT_Mdep_printf ("NOTE: Else, just do the specified tests\n"); + DT_Mdep_printf ("NOTE: Each test is separated by space, the test can be:\n"); + + DT_Mdep_printf ("NOTE: [limit_ia] test max num of open IAs\n"); + DT_Mdep_printf ("NOTE: [limit_pz] test max num of PZs\n"); + DT_Mdep_printf ("NOTE: [limit_cno] test max num of CNOs\n"); + DT_Mdep_printf ("NOTE: [limit_evd] test max num of EVDs\n"); + DT_Mdep_printf ("NOTE: [limit_rsp] test max num of RSPs\n"); + DT_Mdep_printf ("NOTE: [limit_psp] test max num of PSPs\n"); + DT_Mdep_printf ("NOTE: [limit_ep] test max num of EPs\n"); + DT_Mdep_printf ("NOTE: [limit_lmr] test max num of LMRs\n"); + DT_Mdep_printf ("NOTE: [limit_rpost] test max num of recvs posted\n"); + DT_Mdep_printf ("NOTE: [limit_size_lmr] test max size of LMR\n"); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit_cmd.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit_cmd.h new file mode 100644 index 00000000..20730b67 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_limit_cmd.h @@ -0,0 +1,62 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_LIMIT_CMD_H__ +#define __DAPL_LIMIT_CMD_H__ + +#include "dapl_mdep.h" + +typedef enum +{ + LIM_IA, + LIM_PZ, + LIM_CNO, + LIM_EVD, + LIM_EP, + LIM_RSP, + LIM_PSP, + LIM_LMR, + LIM_RPOST, + LIM_SIZE_LMR, + /* add further tests here */ + + LIM_NUM_TESTS /* for array size & limit checks */ +} Limit_Index; + +//------------------------------------- +#pragma pack(1) +typedef struct +{ + char device_name[256]; /* -D */ + DAT_QOS ReliabilityLevel; /* -R */ + DAT_UINT32 width; /* -w */ + DAT_UINT32 debug; /* -d */ + DAT_UINT32 maximum; /* -m */ + DAT_UINT32 Test_List[ LIM_NUM_TESTS ]; +} Limit_Cmd_t; +#pragma pack() + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_main.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_main.c new file mode 100644 index 00000000..2a1b2083 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_main.c @@ -0,0 +1,196 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_test_data.h" +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_proto.h" + +#include "dapl_transaction_cmd.h" +#include "dapl_performance_cmd.h" +#include "dapl_quit_cmd.h" +#include "dapl_limit_cmd.h" + +DT_Mdep_LockType g_PerfTestLock; + + +/* Main Entry Point */ +int __cdecl +main (int argc, char *argv[]) +{ + return ( dapltest (argc, argv) ); +} + + +/* + * dapltest main program + */ +int +dapltest (int argc, char *argv[]) +{ + Params_t *params_ptr; + Transaction_Cmd_t *Transaction_Cmd; + Performance_Cmd_t *Performance_Cmd; + Quit_Cmd_t *Quit_Cmd; + Limit_Cmd_t *Limit_Cmd; + FFT_Cmd_t *FFT_Cmd; + + /* check memory leaking */ + /* + * DT_Mdep_LockInit(&Alloc_Count_Lock); alloc_count = 0; + */ + +#if defined(WIN32) + /* Cannot be done from DT_Mdep_Init as dapl_init makes some socket + * calls....So need to do this before calling dapl_init */ + WORD wversion = MAKEWORD (2, 2); + WSADATA wsaData; + int wsa_ret = WSAStartup (wversion, &wsaData); + if ( wsa_ret != 0 ) + { DT_Mdep_printf("WinSock Init return err = %u\n", wsa_ret); + return (wsa_ret); + } +#endif + + DT_Mdep_LockInit(&g_PerfTestLock); + +#ifdef GPROF + { + extern void dapl_init (void);dapl_init (); + } +#endif + DT_dapltest_debug = 0; + + params_ptr = (Params_t *) DT_Mdep_Malloc (sizeof (Params_t)); + if (!params_ptr) + { + DT_Mdep_printf ("Cannot allocate memory for Params structure\n"); + return ( 1 ); + } + + DT_Endian_Init (); /* init endian of local machine */ + if ( !DT_Mdep_Init ()) /* init OS, libraries, etc. */ + { + DT_Mdep_printf ("Failed To Load Dat Library\n"); + return 1; + } + /* + * parse command line arguments + */ + if (!DT_Params_Parse (argc, argv, params_ptr)) + { + DT_Mdep_printf ("Command line syntax error\n"); + return 1; + } + switch (params_ptr->test_type) + { + case SERVER_TEST: + { + DT_cs_Server (params_ptr); + break; + } + case TRANSACTION_TEST: + { + Transaction_Cmd = ¶ms_ptr->u.Transaction_Cmd; + DT_cs_Client ( params_ptr, + Transaction_Cmd->dapl_name, + Transaction_Cmd->server_name, + Transaction_Cmd->num_threads * + Transaction_Cmd->eps_per_thread); + break; + } + case PERFORMANCE_TEST: + { + Performance_Cmd = ¶ms_ptr->u.Performance_Cmd; + DT_cs_Client ( params_ptr, + Performance_Cmd->dapl_name, + Performance_Cmd->server_name, + 1); + break; + } + case QUIT_TEST: + { + Quit_Cmd = ¶ms_ptr->u.Quit_Cmd; + DT_cs_Client ( params_ptr, + Quit_Cmd->device_name, + Quit_Cmd->server_name, + 0); + break; + } + case LIMIT_TEST: + { + Limit_Cmd = ¶ms_ptr->u.Limit_Cmd; + DT_cs_Limit (Limit_Cmd); + break; + } + case FFT_TEST: + { + FFT_Cmd = ¶ms_ptr->u.FFT_Cmd; + DT_cs_FFT (FFT_Cmd); + break; + } + } + + /* cleanup */ + + DT_Mdep_End (); + DT_Mdep_Free (params_ptr); +#ifdef GPROF + { + extern void dapl_fini (void);dapl_fini (); + } +#endif + + /* + * check memory leaking DT_Mdep_printf("App allocated Memory left: %d\n", + * alloc_count); DT_Mdep_LockDestroy(&Alloc_Count_Lock); + */ + + DT_Mdep_LockDestroy(&g_PerfTestLock); + + return ( 0 ); +} + + +void +Dapltest_Main_Usage (void) +{ + DT_Mdep_printf ("USAGE:\n"); + DT_Mdep_printf ("USAGE: dapltest -T [test-specific args]\n"); + DT_Mdep_printf ("USAGE: where \n"); + DT_Mdep_printf ("USAGE: S = Run as a server\n"); + DT_Mdep_printf ("USAGE: T = Transaction Test\n"); + DT_Mdep_printf ("USAGE: Q = Quit Test\n"); + DT_Mdep_printf ("USAGE: L = Limit Test\n"); + DT_Mdep_printf ("USAGE: F = FFT Test\n"); + DT_Mdep_printf ("USAGE:\n"); + DT_Mdep_printf ("NOTE:\tRun as server taking defaults (dapltest -T S)\n"); + DT_Mdep_printf ("NOTE: dapltest\n"); + DT_Mdep_printf ("NOTE:\n"); + DT_Mdep_printf ("NOTE:\tdapltest arguments may be supplied in a script file\n"); + DT_Mdep_printf ("NOTE:\tdapltest -f \n"); + DT_Mdep_printf ("USAGE:\n"); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_mdep.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_mdep.c new file mode 100644 index 00000000..ff887dd2 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_mdep.c @@ -0,0 +1,943 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_mdep.h" +#include "dapl_proto.h" + + + +#if defined(__linux__) +#include /* needed for pthread_atfork() */ +#include +#include /* needed for getenv() */ +#include /* needed for thread setup */ + +#include +#include +#include +#include +#include +#include +#include +#include /* for printf */ +#include +#include +#include /* for getaddrinfo */ + +/* + * Include files for setting up a network name + */ +#include +#include +#include +#include + +#elif defined(__solaris__) +#include +#include +#include +#include /* needed for getenv() */ +#include /* needed for pthread_atfork() */ +#include /* needed for thread setup */ + +#elif defined(WIN32) +#include +#else +#error "Undefined Platform" +#endif + +#if defined(__linux__) +static FILE *Stat_Fp = NULL; +# define DT_STAT_FILE "/proc/stat" +#endif + +#include "dapl_test_data.h" /* for alloc_count */ + + +/* + * Machine dependant initialization + */ + +bool +DT_Mdep_Init (void) +{ +#if defined(__linux__) + Stat_Fp = fopen (DT_STAT_FILE, "r"); + if ( NULL == Stat_Fp ) + { + perror ("fopen of " DT_STAT_FILE " failed"); + exit (1); + } +#elif defined(__solaris__) + /* nothing */ +#elif defined(WIN32) + #ifdef DYNAMIC_DAT_LOADING + bool status = false; + HMODULE library_handle; + if ( (library_handle = LoadLibrary(DAT_DLL_LIB)) == NULL ) + { + DT_Mdep_printf("DAT: library load failure\n"); + return status; + } + dat_open = (DAT_IA_OPENV_FUNC)GetProcAddress(library_handle, (LPCSTR)DAT_LIB_OPEN_ENTRY); + dat_close =(DAT_IA_CLOSE_FUNC)GetProcAddress(library_handle, (LPCSTR)DAT_LIB_CLOSE_ENTRY); + if (dat_open != NULL && dat_close != NULL ) + { + status = true; + } + if ( status != true ) + { + DT_Mdep_printf("DAT: Failured to get ProcAddress of dat_open/dat_close\n"); + } + return status; + #else + /* + * Application shouled be explicitly linked to dat.lib + */ + return true; + #endif //DYNAMIC_DAT_LOADING +#else + #error "Undefined Platform" +#endif +} + +/* + * Machine dependant deinitialization + */ + +void +DT_Mdep_End (void) +{ +#if defined(__linux__) + if ( 0 != fclose (Stat_Fp) ) + { + perror ("fclose of " DT_STAT_FILE " failed"); + exit (1); + } +#elif defined(__solaris__) + /* nothing */ +#elif defined(WIN32) + WSACleanup (); +#else + #error "Undefined Platform" +#endif +} + +/* + * Generate name of IB device + */ + +bool +DT_Mdep_GetDefaultDeviceName (char *dapl_name) +{ + strcpy (dapl_name, DT_MdepDeviceName); + return true; +} + +/* + * Sleep specified number of milliseconds + */ + +void +DT_Mdep_Sleep (int msec) +{ +#if defined(__linux__) + struct timespec t; + t.tv_sec = msec / 1000; /* Whole seconds */ + t.tv_nsec = (msec % 1000) * 1000 * 1000; + nanosleep (&t, 0); +#elif defined(__solaris__) + struct timespec t; + t.tv_sec = msec / 1000; /* Whole seconds */ + t.tv_nsec = (msec % 1000) * 1000 * 1000; + nanosleep (&t, 0); +#elif defined(WIN32) + Sleep (msec); +#else + #error "Undefined Platform" +#endif +} + +/* + * Get system statistics including uptime and idle time + */ + +bool +DT_Mdep_GetCpuStat ( + DT_CpuStat *cpu_stat ) +{ +#if defined(__linux__) + + #define DT_CPU_STAT_STR "cpu" + #define DT_CPU_STAT_BUFFER_SIZE 1024 + #define DT_CPU_STAT_DELIMITER " " + + static char buffer[DT_CPU_STAT_BUFFER_SIZE]; + + if ( 0 != fflush (Stat_Fp) ) + { + perror ("fflush of " DT_STAT_FILE " failed"); + exit (1); + } + + for (;;) + { + if ( NULL == fgets (buffer, DT_CPU_STAT_BUFFER_SIZE, Stat_Fp) ) + { + printf (DT_CPU_STAT_STR " not found\n"); + exit (1); + } + + if ( !strncmp (buffer, DT_CPU_STAT_STR, strlen (DT_CPU_STAT_STR) ) ) + { + break; + } + } + + (void) strtok (buffer, DT_CPU_STAT_DELIMITER); + cpu_stat->user = strtoul (strtok (NULL, DT_CPU_STAT_DELIMITER), NULL, 0); + cpu_stat->user += strtoul (strtok (NULL, DT_CPU_STAT_DELIMITER), NULL, 0); + cpu_stat->system = strtoul (strtok (NULL, DT_CPU_STAT_DELIMITER), NULL, 0); + cpu_stat->idle = strtoul (strtok (NULL, DT_CPU_STAT_DELIMITER), NULL, 0); + + rewind (Stat_Fp); + + return true; + +#elif defined(__solaris__) + /* FIXME not implemented */ + return true; +#elif defined(WIN32) + /* FIXME not implemented */ + return true; +#else + #error "Undefined Platform" +#endif +} + +/* + * Get current time in milliseconds (relative to some fixed point) + */ +unsigned long +DT_Mdep_GetTime (void) +{ +#if defined(__linux__) + struct tms ts; + clock_t t = times (&ts); + return (unsigned long) ((DAT_UINT64) t * 1000 / CLK_TCK); +#elif defined(__solaris__) + struct tms ts; + clock_t t = times (&ts); + return (unsigned long) ((DAT_UINT64) t * 1000 / CLK_TCK); +#elif defined(WIN32) + return GetTickCount (); +#else + #error "Undefined Platform" +#endif +} + +double +DT_Mdep_GetCpuMhz ( + void ) +{ +#if defined(__linux__) + #define DT_CPU_MHZ_BUFFER_SIZE 128 + #define DT_CPU_MHZ_MHZ "cpu MHz" + #define DT_CPU_MHZ_DELIMITER ":" + + FILE *fp; + char buffer[DT_CPU_MHZ_BUFFER_SIZE]; + char *mhz_str; + + fp = fopen ("/proc/cpuinfo", "r"); + if ( NULL == fp ) + { + perror ("fopen of /proc/cpuinfo failed"); + exit (1); + } + + for (;;) + { + if ( NULL == fgets (buffer, DT_CPU_MHZ_BUFFER_SIZE, fp) ) + { + printf ("cpu MHZ not found\n"); + exit (1); + } + + if ( !strncmp (buffer, DT_CPU_MHZ_MHZ, strlen (DT_CPU_MHZ_MHZ) ) ) + { + (void) strtok (buffer, DT_CPU_MHZ_DELIMITER); + mhz_str = strtok (NULL, DT_CPU_MHZ_DELIMITER); + + break; + } + } + + if ( 0 != fclose (fp) ) + { + perror ("fclose of /proc/cpuinfo failed"); + exit (1); + } + + return strtod (mhz_str, NULL); +#elif defined(WIN32) + LONG retVal; + HKEY hKey; + DWORD cpuSpeed = 0; + DWORD dataSize = sizeof (DWORD); + + /* For windows need to query the registry to get the CPU + * Information...-SVSV */ + retVal = RegOpenKeyEx (HKEY_LOCAL_MACHINE, + TEXT ("Hardware\\Description\\System\\CentralProcessor\\0"), + 0, + KEY_QUERY_VALUE, + &hKey); + + if (retVal == ERROR_SUCCESS) + { + retVal = RegQueryValueEx (hKey, + TEXT ("~MHz"), NULL, NULL, + (LPBYTE)&cpuSpeed, &dataSize); + + } + + RegCloseKey (hKey); + + return cpuSpeed; +#else + #error "Undefined Platform" +#endif +} + + +unsigned long +DT_Mdep_GetContextSwitchNum (void ) +{ +#if defined(__linux__) + + #define DT_CTXT_STR "ctxt" + #define DT_CTXT_BUFFER_SIZE 1024 + #define DT_CTXT_DELIMITER " " + + static char buffer[DT_CTXT_BUFFER_SIZE]; + char *ctxt_str; + + if ( 0 != fflush (Stat_Fp) ) + { + perror ("fflush of " DT_STAT_FILE " failed"); + exit (1); + } + + for (;;) + { + if ( NULL == fgets (buffer, DT_CTXT_BUFFER_SIZE, Stat_Fp) ) + { + printf (DT_CTXT_STR " not found\n"); + exit (1); + } + + if ( !strncmp (buffer, DT_CTXT_STR, strlen (DT_CTXT_STR) ) ) + { + (void) strtok (buffer, DT_CTXT_DELIMITER); + ctxt_str = strtok (NULL, DT_CTXT_DELIMITER); + + break; + } + } + + rewind (Stat_Fp); + + return strtoul (ctxt_str, NULL, 0); +#elif defined(WIN32) + return 0; +#else + #error "Undefined Platform" +#endif +} + +/* + * Memory allocate and free routines for control blocks (objects) - regular + * memory, always zeroed. + */ +void * +DT_Mdep_Malloc (size_t l_) +{ + void *rval; + + /* + * check memory leaking DT_Mdep_Lock(&Alloc_Count_Lock); alloc_count++; + * DT_Mdep_Unlock(&Alloc_Count_Lock); + */ + +#if defined(__linux__) + rval = malloc (l_); +#elif defined(__solaris__) + rval = malloc (l_); +#elif defined(WIN32) + rval = malloc (l_); +#else + #error "Undefined Platform" +#endif + + if (rval) + { + memset (rval, 0, l_); + } + return ( rval ); +} + +void +DT_Mdep_Free (void *a_) +{ + /* + * check memory leaking DT_Mdep_Lock(&Alloc_Count_Lock); alloc_count--; + * DT_Mdep_Unlock(&Alloc_Count_Lock); + */ + +#if defined(__linux__) + free (a_); +#elif defined(__solaris__) + free (a_); +#elif defined(WIN32) + free (a_); +#else + #error "Undefined Platform" +#endif +} + +/* + * Lock support + * + * Lock object constructor + */ +bool +DT_Mdep_LockInit (DT_Mdep_LockType * lock_ptr) +{ +#if defined(__linux__) + return pthread_mutex_init (lock_ptr, 0) ? false : true; +#elif defined(__solaris__) + return pthread_mutex_init (lock_ptr, 0) ? false : true; +#elif defined(WIN32) + *lock_ptr = CreateMutex (0, FALSE, 0); + return *lock_ptr ? true : false; +#else + #error "Undefined Platform" +#endif +} + +/* + * Lock object destructor + */ +void +DT_Mdep_LockDestroy (DT_Mdep_LockType * lock_ptr) +{ +#if defined(__linux__) + pthread_mutex_destroy (lock_ptr); +#elif defined(__solaris__) + pthread_mutex_destroy (lock_ptr); +#elif defined(WIN32) + CloseHandle (*lock_ptr); +#else + #error "Undefined Platform" +#endif +} + +/* + * Lock + */ +void +DT_Mdep_Lock (DT_Mdep_LockType * lock_ptr) +{ +#if defined(__linux__) + pthread_mutex_lock (lock_ptr); +#elif defined(__solaris__) + pthread_mutex_lock (lock_ptr); +#elif defined(WIN32) + WaitForSingleObject (*lock_ptr, INFINITE); +#else + #error "Undefined Platform" +#endif +} + +/* + * unlock + */ +void +DT_Mdep_Unlock (DT_Mdep_LockType * lock_ptr) +{ +#if defined(__linux__) + pthread_mutex_unlock (lock_ptr); +#elif defined(__solaris__) + pthread_mutex_unlock (lock_ptr); +#elif defined(WIN32) + ReleaseMutex (*lock_ptr); +#else + #error "Undefined Platform" +#endif +} + +/* + * Init Thread Attributes + */ +void +DT_Mdep_Thread_Init_Attributes (Thread * thread_ptr) +{ +#if defined(__linux__) + pthread_attr_init (&thread_ptr->attr); + pthread_attr_setstacksize (&thread_ptr->attr, thread_ptr->stacksize); + /* Create thread in detached state to free resources on termination; + * this precludes doing a pthread_join, but we don't do it + */ + pthread_attr_setdetachstate (&thread_ptr->attr, PTHREAD_CREATE_DETACHED); +#elif defined(__solaris__) + pthread_attr_init (&thread_ptr->attr); + pthread_attr_setstacksize (&thread_ptr->attr, thread_ptr->stacksize); + /* Create thread in detached state to free resources on termination; + * this precludes doing a pthread_join, but we don't do it + */ + pthread_attr_setdetachstate (&thread_ptr->attr, PTHREAD_CREATE_DETACHED); + +#elif defined(WIN32) + /* nothing */ +#else + #error "Undefined Platform" +#endif +} + +/* + * Destroy Thread Attributes + */ +void +DT_Mdep_Thread_Destroy_Attributes (Thread * thread_ptr) +{ +#if defined(__linux__) + pthread_attr_destroy (&thread_ptr->attr); +#elif defined(__solaris__) + pthread_attr_destroy (&thread_ptr->attr); +#elif defined(WIN32) + /* nothing */ +#else + #error "Undefined Platform" +#endif +} + +/* + * Start the thread + */ +bool +DT_Mdep_Thread_Start (Thread * thread_ptr) +{ +#if defined(__linux__) + return pthread_create (&thread_ptr->thread_handle, + &thread_ptr->attr, + DT_Mdep_Thread_Start_Routine, + thread_ptr) == 0; +#elif defined(__solaris__) + return pthread_create (&thread_ptr->thread_handle, + &thread_ptr->attr, + DT_Mdep_Thread_Start_Routine, + thread_ptr) == 0; +#elif defined(WIN32) + thread_ptr->thread_handle = + CreateThread (NULL, + thread_ptr->stacksize, + (LPTHREAD_START_ROUTINE)thread_ptr->function, + thread_ptr->param, + 0, + thread_ptr->threadId); // NULL); + if (thread_ptr->thread_handle == NULL) + { + return false; + } + return true; +#else + #error "Undefined Platform" +#endif +} + +/* + * Thread execution entry point function + */ +DT_Mdep_Thread_Start_Routine_Return_Type +DT_Mdep_Thread_Start_Routine (void *thread_handle) +{ + Thread *thread_ptr; + thread_ptr = (Thread *) thread_handle; + + thread_ptr->function (thread_ptr->param); +#if defined(__linux__) + return 0; +#elif defined(__solaris__) + return 0; +#elif defined(WIN32) + /* nothing */ +#else + #error "Undefined Platform" +#endif +} + +/* + * Thread detach routine. Allows the pthreads + * interface to clean up resources properly at + * thread's end. + */ +void DT_Mdep_Thread_Detach ( int thread_id ) /* AMM */ +{ + +#if defined(__linux__) + + pthread_detach(thread_id); +#elif defined(__solaris__) + pthread_detach( thread_id); +#elif defined(WIN32) + +#else + #error "Undefined Platform" +#endif +} + +/* + * Allows a thread to get its own ID so it + * can pass it to routines wanting to act + * upon themselves. + */ + +int DT_Mdep_Thread_SELF (void) /* AMM */ +{ + +#if defined(__linux__) + + return (pthread_self()); +#elif defined(__solaris__) + + return (pthread_self()); +#elif defined(WIN32) + return 0; +#else + #error "Undefined Platform" +#endif +} + + +/* + * Allow a thread to exit and cleanup resources. + */ + +void DT_Mdep_Thread_EXIT ( void * thread_handle ) /* AMM */ +{ + +#if defined(__linux__) + + pthread_exit( thread_handle ); +#elif defined(__solaris__) + + pthread_exit( thread_handle ); +#elif defined(WIN32) + /* nothing */ +#else + #error "Undefined Platform" +#endif +} + +/* + * DT_Mdep_wait_object_init + * + * Initialize a wait object + * + * Input: + * wait_obj + * + * Returns: + * 0 if successful + * -1 if unsuccessful + */ +int +DT_Mdep_wait_object_init ( + IN DT_WAIT_OBJECT *wait_obj) +{ + +#if defined(__linux__) || defined(__solaris__) + + wait_obj->signaled = DAT_FALSE; + if ( 0 != pthread_cond_init ( &wait_obj->cv, NULL ) ) + { + return (-1); + } + + /* Always returns 0. */ + pthread_mutex_init ( &wait_obj->lock, NULL ); + return 0; +#elif defined(WIN32) + *wait_obj = CreateEvent(NULL,FALSE,FALSE,NULL); + + if ( *wait_obj == NULL ) + { + return -1; + } + + return 0; + + +#else + #error "Undefined Platform" + +#endif + +} + + +/* Wait on the supplied wait object, up to the specified time_out. + * A timeout of DAT_TIMEOUT_INFINITE will wait indefinitely. + * Timeout should be specified in micro seconds. + * + * Functional returns: + * 0 -- another thread invoked dapl_os_wait object_wakeup + * -1 -- someone else is already waiting in this wait + * object. + * only one waiter is allowed at a time. + * -1 -- another thread invoked dapl_os_wait_object_destroy + * -1 -- the specified time limit was reached. + */ + +int +DT_Mdep_wait_object_wait ( + IN DT_WAIT_OBJECT *wait_obj, + IN int timeout_val) +{ +#if defined(__linux__) || defined(__solaris__) + + int dat_status; + int pthread_status; + struct timespec future; + + dat_status = 0; + pthread_status = 0; + + if ( timeout_val != DAT_TIMEOUT_INFINITE ) + { + struct timeval now; + struct timezone tz; + unsigned int microsecs; + + gettimeofday (&now, &tz); + microsecs = now.tv_usec + (timeout_val % 1000000); + if (microsecs > 1000000) + { + now.tv_sec = now.tv_sec + timeout_val / 1000000 + 1; + now.tv_usec = microsecs - 1000000; + } + else + { + now.tv_sec = now.tv_sec + timeout_val / 1000000; + now.tv_usec = microsecs; + } + + /* Convert timeval to timespec */ + future.tv_sec = now.tv_sec; + future.tv_nsec = now.tv_usec * 1000; + + pthread_mutex_lock (&wait_obj->lock); + while ( wait_obj->signaled == DAT_FALSE && pthread_status == 0) + { + pthread_status = pthread_cond_timedwait ( + &wait_obj->cv , &wait_obj->lock , &future ); + + /* + * No need to reset &future if we go around the loop; + * It's an absolute time. + */ + } + /* Reset the signaled status if we were woken up. */ + if (pthread_status == 0) + { + wait_obj->signaled = false; + } + pthread_mutex_unlock (&wait_obj->lock); + } + else + { + pthread_mutex_lock (&wait_obj->lock); + while ( wait_obj->signaled == DAT_FALSE && pthread_status == 0) + { + pthread_status = pthread_cond_wait ( + &wait_obj->cv , &wait_obj->lock ); + } + /* Reset the signaled status if we were woken up. */ + if (pthread_status == 0) + { + wait_obj->signaled = false; + } + pthread_mutex_unlock (&wait_obj->lock); + } + + if (ETIMEDOUT == pthread_status) + { + return (-1); + } + else if ( 0 != pthread_status) + { + return (-1); + } + + return 0; + +#elif defined(WIN32) + + DAT_RETURN status; + DWORD op_status; + + status = DAT_SUCCESS; + + if ( DAT_TIMEOUT_INFINITE == timeout_val ) + { + op_status = WaitForSingleObject(*wait_obj, INFINITE); + } + else + { + /* convert to milliseconds */ + op_status = WaitForSingleObject(*wait_obj, timeout_val/1000); + } + + if (op_status == WAIT_TIMEOUT) + { + status = DAT_CLASS_ERROR | DAT_TIMEOUT_EXPIRED; + } + else if ( op_status == WAIT_FAILED) + { + status = DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } + + return status; + +#else + #error "Undefined Platform" + +#endif +} + + +/* + * DT_Mdep_wait_object_wakeup + * + * Wakeup a thread waiting on a wait object + * + * Input: + * wait_obj + * + * Returns: + * 0 if successful + * -1 if not successful + */ +int +DT_Mdep_wait_object_wakeup ( + DT_WAIT_OBJECT *wait_obj ) +{ +#if defined(__linux__) || defined(__solaris__) + + pthread_mutex_lock ( &wait_obj->lock ); + wait_obj->signaled = true; + pthread_mutex_unlock ( &wait_obj->lock ); + if ( 0 != pthread_cond_signal ( &wait_obj->cv ) ) + { + return (-1); + } + + return 0; + +#elif defined(WIN32) + DWORD op_status; + + op_status = SetEvent(*wait_obj); + if ( op_status == 0 ) + { + return DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } + + return DAT_SUCCESS; + + +#else + #error "Undefined Platform" + +#endif + +} + + +/* + * DT_Mdep_wait_object_destroy + * + * Destroy a wait object + * + * Input: + * wait_obj + * + * Returns: + * 0 if successful + * -1 if not successful + */ +int +DT_Mdep_wait_object_destroy ( + IN DT_WAIT_OBJECT *wait_obj) +{ +#if defined(__linux__) || defined(__solaris__) + + if ( 0 != pthread_cond_destroy ( &wait_obj->cv ) ) + { + return (-1); + } + if ( 0 != pthread_mutex_destroy ( &wait_obj->lock ) ) + { + return (-1); + } + + return 0; + + +#elif defined(WIN32) + + DWORD op_status; + DAT_RETURN status = DAT_SUCCESS; + + op_status = CloseHandle(*wait_obj); + + if ( op_status == 0 ) + { + status = DAT_CLASS_ERROR | DAT_INTERNAL_ERROR; + } + + return status; + +#else + #error "Undefined Platform" + +#endif + + + +} + diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_mdep.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_mdep.h new file mode 100644 index 00000000..539ff026 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_mdep.h @@ -0,0 +1,333 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_MDEP_H__ +#define __DAPL_MDEP_H__ + +/* include files */ + +#include + +#if defined (__linux__) +# include +# include +# include +# include +# include +# include +# include +# include + +#ifdef IA64 +# include +#endif + +#elif defined(__solaris__) +# include +# include +# include +# include +# include +# include +#elif defined (WIN32) +# include +# include +# include +# include +# include +# include +#else +# error "Undefined Platform" +#endif + +/* Default Device Name */ +#if defined(__linux__) +#define DT_MdepDeviceName "jni0a" + +#elif defined(__solaris__) +#define DT_MdepDeviceName "jni0a" + +#elif defined(WIN32) +#define DT_MdepDeviceName "ibnic0" +#else +#error "Undefined Platform" +#endif + +/* Boolean */ +#if defined(__linux__) +typedef int bool; +#elif defined(__solaris__) +typedef int bool; +#elif defined (WIN32) +typedef int bool; +#else +#error "Undefined Platform" +#endif + +#define true (1) +#define false (0) + +#ifndef __BASE_FILE__ +#define __BASE_FILE__ __FILE__ +#endif /* WIN32 */ + +#ifndef _INLINE_ +#if defined(__linux__) +#define _INLINE_ __inline__ +#elif defined(WIN32) +#define _INLINE_ __inline +#else +#error "Undefined Platform" +#endif +#endif + +/* Mdep function defines */ + +#define DT_Mdep_spew(N, _X_) \ +{ \ + if (DT_dapltest_debug >= (N)) \ + { \ + DT_Mdep_printf _X_; \ + } \ +} + +#define DT_Mdep_debug(_X_) DT_Mdep_spew(1, _X_) + +#if defined(__linux__) +#define DT_Mdep_printf printf +#define DT_Mdep_vprintf vprintf +#define DT_Mdep_flush() fflush(NULL) + +#elif defined(__solaris__) +#define DT_Mdep_printf printf +#define DT_Mdep_flush() fflush(NULL) + +#elif defined(WIN32) +#define DT_Mdep_printf printf +#define DT_Mdep_flush() fflush(NULL) +#else +#error "Undefined Platform" +#endif + + +/* + * Locks + */ + +#if defined(__linux__) +typedef pthread_mutex_t DT_Mdep_LockType; + +#elif defined(__solaris__) +typedef pthread_mutex_t DT_Mdep_LockType; + +#elif defined(WIN32) +typedef HANDLE DT_Mdep_LockType; + +#else +#error "Undefined Platform" +#endif + +/* Wait object used for inter thread communication */ + +#if defined(__linux__) +typedef struct +{ + bool signaled; + pthread_cond_t cv; + pthread_mutex_t lock; +} DT_WAIT_OBJECT; + +#elif defined(__solaris__) + +typedef struct +{ + bool signaled; + pthread_cond_t cv; + pthread_mutex_t lock; +} DT_WAIT_OBJECT; + +#elif defined(WIN32) + +typedef HANDLE DT_WAIT_OBJECT; + +#else + +#endif + +/* + * Thread types + */ +#if defined(__linux__) +typedef pthread_t DT_Mdep_ThreadHandleType; +typedef void (*DT_Mdep_ThreadFunction) (void *param); +typedef void * DT_Mdep_Thread_Start_Routine_Return_Type; +#define DT_MDEP_DEFAULT_STACK_SIZE 65536 + +#elif defined(__solaris__) +typedef pthread_t DT_Mdep_ThreadHandleType; +typedef void (*DT_Mdep_ThreadFunction) (void *param); +typedef void * DT_Mdep_Thread_Start_Routine_Return_Type; +#define DT_MDEP_DEFAULT_STACK_SIZE 65536 + +#elif defined(WIN32) +typedef HANDLE DT_Mdep_ThreadHandleType; +typedef void (*DT_Mdep_ThreadFunction) (void *param); +typedef void DT_Mdep_Thread_Start_Routine_Return_Type; +#define DT_MDEP_DEFAULT_STACK_SIZE 65536 + +#else +#error "Undefined Platform" +#endif + +typedef struct +{ + void (*function) (void *); + void *param; + DT_Mdep_ThreadHandleType thread_handle; + unsigned int stacksize; +#if defined(__solaris__)||defined(__linux__) + pthread_attr_t attr; /* Thread attributes */ +#endif +#if defined (WIN32) + LPDWORD threadId; +#endif +} Thread; + +/* + * System information + * + */ + +typedef struct +{ + unsigned long int system; + unsigned long int user; + unsigned long int idle; +} DT_CpuStat; + +/* + * Timing + */ + +#if defined(__linux__) +typedef unsigned long long int DT_Mdep_TimeStamp; +#elif defined(WIN32) +typedef unsigned __int64 DT_Mdep_TimeStamp; +#else +# error "Undefined Platform" +#endif + +static _INLINE_ DT_Mdep_TimeStamp +DT_Mdep_GetTimeStamp ( void ) +{ +#if defined(__linux__) +#if defined(__GNUC__) && defined(__PENTIUM__) + DT_Mdep_TimeStamp x; + __asm__ volatile (".byte 0x0f, 0x31" : "=A" (x)); + return x; +#else + +#ifdef IA64 + unsigned long x; + + x = get_cycles (); + return x; +#else + #error "Non-Pentium Linux - unimplemented" +#endif +#endif + +#elif defined(WIN32) +#if !defined (WIN64) && !defined (IA64) && !defined (AMD64) + _asm rdtsc +#else +#ifdef AMD64 + return __rdtsc(); +#else + LARGE_INTEGER val; + QueryPerformanceCounter( &val ); + return val.QuadPart; +#endif //endif WIN64 + +#endif //endif WIN64, and IA64 + +#else + #error "Undefined Platform" +#endif +} + +/* + * Define types for Window compatibility + */ +#if defined(WIN32) + +#if !defined(IA64) && !defined(WIN64) +#include +#endif + +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; + + +#define bzero(x, y) memset(x, 0, y) + +#endif + +/* + * Define long format types to be used in *printf format strings. We + * use the C string constant concatenation ability to define 64 bit + * formats, which unfortunatly are non standard in the C compiler + * world. E.g. %llx for gcc, %I64x for Windows + */ +#if defined(WIN32) +#define F64d "%I64d" +#define F64u "%I64u" +#define F64x "%I64x" +#define F64X "%I64X" + +#elif defined(__linux__) + +#define F64d "%lld" +#define F64u "%llu" +#define F64x "%llx" +#define F64X "%llX" +#endif + + +/* + * Define notion of a LONG LONG 0 + */ +#if defined(__linux__) +#define LZERO 0ULL +#elif defined(WIN32) +#define LZERO 0UL +#else +#define LZERO 0 +#endif + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_memlist.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_memlist.c new file mode 100644 index 00000000..8d7d525d --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_memlist.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_memlist.h" +#include "dapl_proto.h" + + +void +DT_MemListInit (Per_Test_Data_t * pt_ptr) +{ + DT_Mdep_LockInit (&pt_ptr->MemListLock); + pt_ptr->MemListHead = 0; +} + +void * +DT_MemListAlloc (Per_Test_Data_t * pt_ptr, char *file, mem_type_e t, int size) +{ + void *buffptr; + MemListEntry_t *entry_ptr; + buffptr = 0; + entry_ptr = 0; + + buffptr = DT_Mdep_Malloc (size); + if (buffptr == 0) + { + return 0; + } + if (pt_ptr == 0) /* not use mem_list */ + { + return buffptr; + } + entry_ptr = (MemListEntry_t *) DT_Mdep_Malloc (sizeof (MemListEntry_t)); + if (entry_ptr == 0) + { + DT_Mdep_Free (buffptr); + return 0; + } + strcpy (entry_ptr->filename, file); + entry_ptr->MemType = t; + entry_ptr->mem_ptr = buffptr; + + DT_Mdep_Lock (&pt_ptr->MemListLock); + entry_ptr->next = pt_ptr->MemListHead; + pt_ptr->MemListHead = entry_ptr; + DT_Mdep_Unlock (&pt_ptr->MemListLock); + + return buffptr; +} + +void +DT_MemListFree (Per_Test_Data_t * pt_ptr, void *ptr) +{ + MemListEntry_t *pre, *cur; + if (pt_ptr == 0) /* not use mem_list */ + { + DT_Mdep_Free (ptr); + return; + } + DT_Mdep_Lock (&pt_ptr->MemListLock); + pre = 0; + cur = pt_ptr->MemListHead; + while (cur) + { + if (cur->mem_ptr == ptr) + { + if (!pre) /* first entry */ + { + pt_ptr->MemListHead = cur->next; + cur->next = 0; + } + else + { + pre->next = cur->next; + cur->next = 0; + } + DT_Mdep_Free (ptr); + DT_Mdep_Free (cur); + goto unlock_and_return; + } + pre = cur; + cur = cur->next; + } +unlock_and_return: + DT_Mdep_Unlock (&pt_ptr->MemListLock); +} + +void +DT_PrintMemList (Per_Test_Data_t * pt_ptr) +{ + char *type[10] = + { + "BPOOL", "BUFF", "PERTESTDATA", "IBNIC", "NETADDRESS", + "TRANSACTIONTEST", "THREAD", "EPCONTEXT" + }; + MemListEntry_t *cur; + + DT_Mdep_Lock (&pt_ptr->MemListLock); + cur = pt_ptr->MemListHead; + if (cur != 0) + { + DT_Mdep_printf ("the allocated memory that have not been returned are:\n"); + } + while (cur) + { + DT_Mdep_printf ("file: dapl_%s, \tMemType:%s\n", + cur->filename, type[cur->MemType]); + cur = cur->next; + } + DT_Mdep_Unlock (&pt_ptr->MemListLock); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_memlist.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_memlist.h new file mode 100644 index 00000000..83d9806a --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_memlist.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_MEMLIST_H__ +#define __DAPL_MEMLIST_H__ + +#include "dapl_mdep.h" +#include "dapl_memlist.h" + +typedef enum +{ + BPOOL, + BUFF, + PERTESTDATA, + IBNIC, + NETADDRESS, + TRANSACTIONTEST, + THREAD, + EPCONTEXT +} mem_type_e; + +struct Mem_list_entry +{ + char filename[50]; + mem_type_e MemType; + void *mem_ptr; + struct Mem_list_entry *next; +}; + +typedef struct Mem_list_entry MemListEntry_t; +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_netaddr.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_netaddr.c new file mode 100644 index 00000000..e88d19a2 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_netaddr.c @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_proto.h" +#include "dapl_cnxn.h" +#include + + +DAT_IA_ADDRESS_PTR +DT_NetAddrAlloc (Per_Test_Data_t * pt_ptr) +{ + DAT_IA_ADDRESS_PTR netaddr; + + netaddr = (DAT_IA_ADDRESS_PTR) DT_MemListAlloc (pt_ptr, "netaddr", + NETADDRESS, sizeof (DAT_SOCK_ADDR)); + if (!netaddr) + { + DT_Mdep_printf ("dapltest: No Memory to create netaddr!\n"); + } + return netaddr; +} + + +void +DT_NetAddrFree (Per_Test_Data_t * pt_ptr, DAT_IA_ADDRESS_PTR netaddr) +{ + DT_MemListFree (pt_ptr, netaddr); +} + + +bool +DT_NetAddrLookupHostAddress (DAT_IA_ADDRESS_PTR to_netaddr, + DAT_NAME_PTR hostname) +{ + struct addrinfo *target; + int rval; + + rval = getaddrinfo (hostname, NULL, NULL, &target); + if (rval != 0) + { + char *whatzit = "unknown error return"; + + switch (rval) + { + case EAI_FAMILY: + { + whatzit = "unsupported address family"; + break; + } + case EAI_SOCKTYPE: + { + whatzit = "unsupported socket type"; + break; + } + case EAI_BADFLAGS: + { + whatzit = "invalid flags"; + break; + } + case EAI_NONAME: + { + whatzit = "unknown node name"; + break; + } + case EAI_SERVICE: + { + whatzit = "service unavailable"; + break; + } +#if !defined(WIN32) + case EAI_ADDRFAMILY: + { + whatzit = "node has no address in this family"; + break; + } + case EAI_NODATA: + { + whatzit = "node has no addresses defined"; + break; + } +#endif + case EAI_MEMORY: + { + whatzit = "out of memory"; + break; + } + case EAI_FAIL: + { + whatzit = "permanent name server failure"; + break; + } + case EAI_AGAIN: + { + whatzit = "temporary name server failure"; + break; + } +#if !defined(WIN32) + case EAI_SYSTEM: + { + whatzit = "system error"; + break; + } +#endif + } + + DT_Mdep_printf ("getnameinfo (%s) failed (%s)\n", + hostname, whatzit); + return DAT_FALSE; + } + + /* Pull out IP address and print it as a sanity check */ + rval = ((struct sockaddr_in *)target->ai_addr)->sin_addr.s_addr; + DT_Mdep_printf ("Server Name: %s \n", hostname); + DT_Mdep_printf ("Server Net Address: %d.%d.%d.%d\n", + (rval >> 0) & 0xff, + (rval >> 8) & 0xff, + (rval >> 16) & 0xff, + (rval >> 24) & 0xff); + + *to_netaddr = * ((DAT_IA_ADDRESS_PTR) target->ai_addr); + + return ( DAT_TRUE ); +} + diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_params.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_params.c new file mode 100644 index 00000000..96516bb4 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_params.c @@ -0,0 +1,296 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_getopt.h" +#include "dapl_test_data.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" + +#include "dapl_server_cmd.h" +#include "dapl_transaction_cmd.h" +#include "dapl_performance_cmd.h" +#include "dapl_quit_cmd.h" +#include "dapl_limit_cmd.h" + +#define MAX_ARGC 500 +#define MAX_ARG_LEN 100 + + +/* Parse command line arguments */ +bool +DT_Params_Parse (int argc, char *argv[], Params_t * params_ptr) +{ + Server_Cmd_t *Server_Cmd; + Transaction_Cmd_t *Transaction_Cmd; + Performance_Cmd_t *Performance_Cmd; + Quit_Cmd_t *Quit_Cmd; + Limit_Cmd_t *Limit_Cmd; + FFT_Cmd_t *FFT_Cmd; + + char *filename; + FILE *fd; + mygetopt_t opts; + char c; + char *cp; + char *sp; + char line[256]; + char *my_argv[MAX_ARGC]; + int my_argc; + int i; + DT_mygetopt_init (&opts); + opts.opterr = 0; /* turn off automatical error handler */ + + fd = 0; + my_argc = 0; + for (i = 0; i < MAX_ARGC; i++) + { + my_argv[i] = NULL; + } + + /* dapltest with no arguments means run as a server with default values */ + if (argc == 1) + { + params_ptr->test_type = SERVER_TEST; + params_ptr->ReliabilityLevel = DAT_QOS_BEST_EFFORT; + Server_Cmd = ¶ms_ptr->u.Server_Cmd; + DT_Server_Cmd_Init (Server_Cmd); + if (!DT_Mdep_GetDefaultDeviceName (Server_Cmd->dapl_name)) + { + DT_Mdep_printf ("can't get default device name\n"); + return false; + } + return true; + } + /* check for a script file */ + if (strncmp (argv[1], "-f", 2) == 0) + { + if (argc == 2) /* dapltest -fdata */ + { + filename = argv[1] + 2; + } + else + { + if (argc == 3 && strcmp (argv[1], "-f") == 0) /* dapltest -f data */ + { + filename = argv[2]; + } + else + { + DT_Mdep_printf ("-f allows no additional options\n"); + goto main_usage; + } + } + + if (!filename || strlen (filename) == 0) + { + DT_Mdep_printf ("Missing with -f option\n"); + goto main_usage; + } + /* read the script file and create a fake argc, argv */ + fd = fopen (filename, "r"); + if (fd == 0) + { + DT_Mdep_printf ("Cannot open script file: %s\n", filename); + goto main_usage; + } + my_argc = 1; + my_argv[0] = DT_Mdep_Malloc (MAX_ARG_LEN); + if (!my_argv[0]) + { + DT_Mdep_printf ("No Memory\n"); + goto error_return; + } + strcpy (my_argv[0], argv[0]); + while (fgets (&line[0], 256, fd)) + { + sp = &line[0]; + for (;;) + { + cp = strtok (sp, " \t\n"); + sp = 0; /* so can continue to parse this string */ + if (!cp) /* no more token found */ + { + break; + } + if (*cp == '#') /* Comment; go to next line. */ + { + break; + } + my_argv[my_argc] = DT_Mdep_Malloc (MAX_ARG_LEN); + if (!my_argv[my_argc]) + { + DT_Mdep_printf ("No Memory\n"); + goto error_return; + } + strcpy (my_argv[my_argc], cp); + my_argc++; + } + } + } + else + { + my_argc = argc; + for (i = 0; i < argc; i++) + { + my_argv[i] = argv[i]; + } + } + +#if 0 + for (i = 0; i < my_argc; i++) + { + DT_Mdep_printf ("ARG %s\n", my_argv[i]); + } + exit (1); +#endif + + /* get test type - which must be the first arg */ + c = DT_mygetopt_r (my_argc, my_argv, "T:", &opts); + if (c != 'T') + { + DT_Mdep_printf ("Must Specify Test (-T) option first\n"); + goto main_usage; + } + if ((opts.optarg == 0) || strlen (opts.optarg) == 0 || *opts.optarg == '-') + { + DT_Mdep_printf ("Must specify test type\n"); + goto main_usage; + } + switch (*opts.optarg) + { + case 'S': /* Server Test */ + { + params_ptr->test_type = SERVER_TEST; + Server_Cmd = ¶ms_ptr->u.Server_Cmd; + DT_Server_Cmd_Init (Server_Cmd); + if (!DT_Server_Cmd_Parse ( Server_Cmd, + my_argc, my_argv, &opts)) + { + goto error_return; + } + params_ptr->ReliabilityLevel = Server_Cmd->ReliabilityLevel; + break; + } + case 'T': /* Transaction Test */ + { + params_ptr->test_type = TRANSACTION_TEST; + Transaction_Cmd = ¶ms_ptr->u.Transaction_Cmd; + DT_Transaction_Cmd_Init (Transaction_Cmd); + if (!DT_Transaction_Cmd_Parse ( Transaction_Cmd, + my_argc, my_argv, &opts)) + { + goto error_return; + } + params_ptr->ReliabilityLevel = Transaction_Cmd->ReliabilityLevel; + break; + } + case 'P': /* Performance Test */ + { + params_ptr->test_type = PERFORMANCE_TEST; + Performance_Cmd = ¶ms_ptr->u.Performance_Cmd; + + if (!DT_Performance_Cmd_Init (Performance_Cmd)) + { + goto error_return; + } + + if (!DT_Performance_Cmd_Parse ( Performance_Cmd, + my_argc, my_argv, &opts)) + { + goto error_return; + } + + params_ptr->ReliabilityLevel = Performance_Cmd->qos; + break; + } + case 'Q': /* Quit server Test */ + { + params_ptr->test_type = QUIT_TEST; + Quit_Cmd = ¶ms_ptr->u.Quit_Cmd; + DT_Quit_Cmd_Init (Quit_Cmd); + if (!DT_Quit_Cmd_Parse ( Quit_Cmd, + my_argc, my_argv, &opts)) + { + goto error_return; + } + params_ptr->ReliabilityLevel = Quit_Cmd->ReliabilityLevel; + break; + } + case 'L': /* Limit Test */ + { + params_ptr->test_type = LIMIT_TEST; + Limit_Cmd = ¶ms_ptr->u.Limit_Cmd; + DT_Limit_Cmd_Init (Limit_Cmd); + if (!DT_Limit_Cmd_Parse (Limit_Cmd, + my_argc, my_argv, &opts)) + { + goto error_return; + } + params_ptr->ReliabilityLevel = Limit_Cmd->ReliabilityLevel; + break; + } + case 'F': + { + params_ptr->test_type = FFT_TEST; + FFT_Cmd = ¶ms_ptr->u.FFT_Cmd; + DT_FFT_Cmd_Init (FFT_Cmd); + if (!DT_FFT_Cmd_Parse (FFT_Cmd, my_argc, my_argv, &opts)) + { + goto error_return; + } + params_ptr->ReliabilityLevel = FFT_Cmd->ReliabilityLevel; + break; + } + default: + { + DT_Mdep_printf ("Invalid Test Type\n"); + goto main_usage; + } + } + + if (fd) + { + for (i = 0; i < my_argc; i++) + { + DT_Mdep_Free (my_argv[i]); + } + fclose (fd); + } + return true; + +main_usage: + Dapltest_Main_Usage (); +error_return: + if (fd) + { + for (i = 0; i < my_argc; i++) + { + DT_Mdep_Free (my_argv[i]); + } + fclose (fd); + } + return false; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_params.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_params.h new file mode 100644 index 00000000..d64fe8d7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_params.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_PARAMS_H__ +#define __DAPL_PARAMS_H__ + + +#include "dapl_server_cmd.h" +#include "dapl_transaction_cmd.h" +#include "dapl_performance_cmd.h" +#include "dapl_limit_cmd.h" +#include "dapl_quit_cmd.h" +#include "dapl_fft_cmd.h" + +typedef enum +{ + SERVER_TEST, + TRANSACTION_TEST, + PERFORMANCE_TEST, + LIMIT_TEST, + QUIT_TEST, + FFT_TEST +} test_type_e; + +typedef struct +{ + test_type_e test_type; + + union + { + Server_Cmd_t Server_Cmd; + Transaction_Cmd_t Transaction_Cmd; + Performance_Cmd_t Performance_Cmd; + Limit_Cmd_t Limit_Cmd; + Quit_Cmd_t Quit_Cmd; + FFT_Cmd_t FFT_Cmd; + } u; + + /* Needed here due to structure of command processing */ + DAT_QOS ReliabilityLevel; +} Params_t; + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_client.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_client.c new file mode 100644 index 00000000..7eb8c667 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_client.c @@ -0,0 +1,545 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include + +#include "dapl_bpool.h" +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_performance_cmd.h" +#include "dapl_performance_stats.h" +#include "dapl_performance_test.h" +#include "dapl_proto.h" +#include "dapl_test_data.h" + +#define MAX_CONN_RETRY 8 + +/****************************************************************************/ +void +DT_Performance_Test_Client ( + Per_Test_Data_t *pt_ptr, + DAT_IA_HANDLE *ia_handle, + DAT_IA_ADDRESS_PTR remote_ia_addr) +{ + Performance_Test_t *test_ptr = NULL; + int connected = 1; + + DT_Mdep_debug (("Client: Starting performance test\n")); + + if ( !DT_Performance_Test_Create (pt_ptr, + ia_handle, + remote_ia_addr, + false, + pt_ptr->Server_Info.is_little_endian, + &test_ptr) ) + { + DT_Mdep_debug (("Client: Resource Creation Failed\n")); + connected = 0; + } + else if ( !DT_Performance_Test_Client_Connect (test_ptr) ) + { + DT_Mdep_debug (("Client: Connection Failed\n")); + connected = 0; + } + + if ( connected ) + { + if ( !DT_Performance_Test_Client_Exchange (test_ptr) ) + { + DT_Mdep_debug (("Client: Test Failed\n")); + } + } + + /* If we never connected, then the test will hang here + * because in the destroy of the test it will waits for a + * disconnect event which will never arrive, simply + * because there was never a connection. + */ + + DT_Performance_Test_Destroy (pt_ptr, test_ptr, false); + +#ifdef CM_BUSTED + /***** XXX Chill out a bit to give the kludged CM a chance ... + *****/DT_Mdep_Sleep (5000); +#endif + + DT_Mdep_debug (("Client: Finished performance test\n")); +} + + +/****************************************************************************/ +bool +DT_Performance_Test_Client_Connect ( + Performance_Test_t *test_ptr) +{ + DAT_RETURN ret; + DAT_EVENT_NUMBER event_num; + unsigned int retry_cnt = 0; + + /* + * Client - connect + */ + DT_Mdep_debug (("Client[" F64x "]: Connect on port 0x" F64x "\n", + (DAT_UVERYLONG)test_ptr->base_port, + (DAT_UVERYLONG)test_ptr->ep_context.port)); + +retry: + ret = dat_ep_connect (test_ptr->ep_context.ep_handle, + test_ptr->remote_ia_addr, + test_ptr->ep_context.port, + DAT_TIMEOUT_INFINITE, + 0, + (DAT_PVOID) 0, /* no private data */ + test_ptr->cmd->qos, + DAT_CONNECT_DEFAULT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_ep_connect error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + return false; + } + + /* wait for DAT_CONNECTION_EVENT_ESTABLISHED */ + if (!DT_conn_event_wait (test_ptr->ep_context.ep_handle, + test_ptr->conn_evd_hdl, + &event_num)) + { + if ( event_num == DAT_CONNECTION_EVENT_PEER_REJECTED ) + { + DAT_EVENT event; + DAT_COUNT drained = 0; + + DT_Mdep_Sleep (1000); + DT_Mdep_printf ("Test[" F64x "]: retrying connection...\n", + (DAT_UVERYLONG)test_ptr->base_port); + retry_cnt++; + + dat_ep_reset (test_ptr->ep_context.ep_handle); + do + { + ret = dat_evd_dequeue (test_ptr->recv_evd_hdl, &event); + drained++; + } while (ret != DAT_QUEUE_EMPTY); + + if (drained > 1 && retry_cnt < MAX_CONN_RETRY) + { + DT_Mdep_printf("Reposting!!! %d\n", drained); + + /* + * Post recv and sync buffers + */ + if ( !DT_post_recv_buffer (test_ptr->ep_context.ep_handle, + test_ptr->ep_context.bp, + DT_PERF_SYNC_RECV_BUFFER_ID, + DT_PERF_SYNC_BUFF_SIZE) ) + { + DT_Mdep_printf ("Test[" F64x "]: repost buffer error: \n", + (DAT_UVERYLONG)test_ptr->base_port); + return false; + } + } + + if (retry_cnt < MAX_CONN_RETRY) + { + goto retry; + } + } + /* error message printed by DT_cr_event_wait */ + return false; + } + +#ifdef CM_BUSTED + /***** XXX Chill out a bit to give the kludged CM a chance ... + *****/DT_Mdep_Sleep (5000); +#endif + + DT_Mdep_debug (("Client[" F64x "]: Got Connection\n", + (DAT_UVERYLONG)test_ptr->base_port)); + + return true; +} + + +/****************************************************************************/ +static bool +DT_Performance_Test_Client_Phase1 ( + Performance_Test_t *test_ptr, + Performance_Stats_t *stats ) +{ + DT_Mdep_TimeStamp pre_ts; + DT_Mdep_TimeStamp post_ts; + DT_CpuStat pre_cpu_stat; + DT_CpuStat post_cpu_stat; + unsigned int post_cnt; + unsigned int reap_cnt; + + /* + * measure bandwidth, OPS, and CPU utilization + */ + + if ( !DT_Mdep_GetCpuStat (&pre_cpu_stat) ) + { + return false; + } + + pre_ts = DT_Mdep_GetTimeStamp (); + + /* + * Fill the pipe + */ + + for ( post_cnt = 0; post_cnt < (unsigned int)test_ptr->ep_context.pipeline_len; post_cnt++ ) + { + if ( !DT_performance_post_rdma_op (&test_ptr->ep_context, + test_ptr->reqt_evd_hdl, + stats) ) + { + DT_Mdep_debug (("Test[" F64x "]: Post %i failed\n", + (DAT_UVERYLONG)test_ptr->base_port, + post_cnt)); + return false; + } + } + + /* + * Reap completions and repost + */ + + for ( reap_cnt = 0; reap_cnt < test_ptr->cmd->num_iterations; ) + { + unsigned int cur_reap_cnt; + unsigned int cur_post_cnt; + unsigned int cur_post_i; + + cur_reap_cnt = DT_performance_reap (test_ptr->reqt_evd_hdl, + test_ptr->cmd->mode, + stats); + + if ( 0 == cur_reap_cnt ) + { + DT_Mdep_debug (("Test[" F64x "]: Poll %i failed\n", + (DAT_UVERYLONG)test_ptr->base_port, + reap_cnt)); + return false; + } + + /* repost */ + cur_post_cnt = DT_min (test_ptr->cmd->num_iterations - post_cnt, + cur_reap_cnt); + + for ( cur_post_i = 0; cur_post_i < cur_post_cnt; cur_post_i++) + { + if ( !DT_performance_post_rdma_op (&test_ptr->ep_context, + test_ptr->reqt_evd_hdl, + stats) ) + { + DT_Mdep_debug (("Test[" F64x "]: Post %i failed\n", + (DAT_UVERYLONG)test_ptr->base_port, + post_cnt)); + return false; + } + } + + reap_cnt += cur_reap_cnt; + post_cnt += cur_post_cnt; + } + + /* end time and update stats */ + post_ts = DT_Mdep_GetTimeStamp (); + stats->time_ts = post_ts - pre_ts; + stats->num_ops = test_ptr->cmd->num_iterations; + + if ( !DT_Mdep_GetCpuStat (&post_cpu_stat) ) + { + return false; + } + + /* calculate CPU utilization */ + { + unsigned long int system; + unsigned long int user; + unsigned long int idle; + unsigned long int total; + + system = post_cpu_stat.system - pre_cpu_stat.system; + user = post_cpu_stat.user - pre_cpu_stat.user; + idle = post_cpu_stat.idle - pre_cpu_stat.idle; + + total = system + user + idle; + + if ( 0 == total ) + { + stats->cpu_utilization = 0.0; + } + else + { + stats->cpu_utilization = 1.0 - ((double) idle / (double) total ); + stats->cpu_utilization *= 100; + } + } + + return true; +} + + +/****************************************************************************/ +static bool +DT_Performance_Test_Client_Phase2 ( + Performance_Test_t *test_ptr, + Performance_Stats_t *stats ) +{ + DAT_LMR_TRIPLET *iov; + DAT_RMR_TRIPLET rmr_triplet; + DAT_DTO_COOKIE cookie; + DAT_EVENT event; + DAT_RETURN ret; + Performance_Ep_Context_t *ep_context; + Performance_Test_Op_t *op; + DT_Mdep_TimeStamp pre_ts; + DT_Mdep_TimeStamp post_ts; + unsigned long int bytes; + unsigned int i; + + /* + * measure latency + */ + + ep_context = &test_ptr->ep_context; + op = &ep_context->op; + iov = DT_Bpool_GetIOV (op->bp, 0); + + bytes = op->seg_size * op->num_segs; + + /* Prep the inputs */ + for (i = 0; i < op->num_segs; i++) + { + iov[i].pad = 0U; + iov[i].virtual_address = (DAT_VADDR) (uintptr_t) + DT_Bpool_GetBuffer (op->bp, i); + iov[i].segment_length = op->seg_size; + iov[i].lmr_context = DT_Bpool_GetLMR (op->bp, i); + } + + rmr_triplet.pad = 0U; + rmr_triplet.target_address = (DAT_VADDR) (uintptr_t) op->Rdma_Address; + rmr_triplet.segment_length = op->seg_size * op->num_segs; + rmr_triplet.rmr_context = op->Rdma_Context; + + cookie.as_ptr = NULL; + + for ( i = 0; i < test_ptr->cmd->num_iterations; i++ ) + { + if ( RDMA_WRITE == op->transfer_type ) + { + pre_ts = DT_Mdep_GetTimeStamp (); + + ret = dat_ep_post_rdma_write (ep_context->ep_handle, + op->num_segs, + iov, + cookie, + &rmr_triplet, + DAT_COMPLETION_DEFAULT_FLAG); + } + else + { + pre_ts = DT_Mdep_GetTimeStamp (); + + ret = dat_ep_post_rdma_read (ep_context->ep_handle, + op->num_segs, + iov, + cookie, + &rmr_triplet, + DAT_COMPLETION_DEFAULT_FLAG); + } + + if ( DAT_SUCCESS != ret ) + { + return false; + } + + for (;;) + { + ret = dat_evd_dequeue ( test_ptr->reqt_evd_hdl, + &event); + + post_ts = DT_Mdep_GetTimeStamp (); + + if (DAT_GET_TYPE(ret) == DAT_QUEUE_EMPTY) + { + continue; + } + else if ( DAT_SUCCESS != ret ) + { + DT_Mdep_printf ("Test Error: dapl_event_dequeue failed: %s\n", + DT_RetToString (ret)); + return false; + } + else if (event.event_number == DAT_DTO_COMPLETION_EVENT) + { + DT_performance_stats_record_latency (stats, post_ts - pre_ts); + break; + } + else /* error */ + { + DT_Mdep_printf ( + "Warning: dapl_performance_wait swallowing %s event\n", + DT_EventToSTr (event.event_number)); + + return false; + } + } + } + + return true; +} + + +/****************************************************************************/ +bool +DT_Performance_Test_Client_Exchange ( + Performance_Test_t *test_ptr) +{ + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + DAT_DTO_COOKIE dto_cookie; + Performance_Stats_t stats; + RemoteMemoryInfo *rmi; + + test_ptr->ep_context.op.bp = + DT_BpoolAlloc (test_ptr->pt_ptr, + test_ptr->ia_handle, + test_ptr->pz_handle, + test_ptr->ep_context.ep_handle, + test_ptr->reqt_evd_hdl, + test_ptr->ep_context.op.seg_size, + test_ptr->ep_context.op.num_segs, + DAT_OPTIMAL_ALIGNMENT, + false, + false); + + if ( !test_ptr->ep_context.op.bp ) + { + DT_Mdep_printf ("Test[" F64x "]: no memory for buffers (RDMA/RD)\n", + (DAT_UVERYLONG)test_ptr->base_port); + return false; + } + + /* + * Recv the other side's info + */ + DT_Mdep_debug (("Test[" F64x "]: Waiting for Sync Msg\n", + (DAT_UVERYLONG)test_ptr->base_port)); + + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context.bp, + DT_PERF_SYNC_RECV_BUFFER_ID); + if ( !DT_dto_event_wait (test_ptr->recv_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + test_ptr->ep_context.ep_handle, + DT_PERF_SYNC_BUFF_SIZE, + dto_cookie, + "Recieve Sync_Msg") ) + { + return false; + } + + /* + * Extract what we need + */ + DT_Mdep_debug (("Test[" F64x "]: Sync Msg Received\n", + (DAT_UVERYLONG)test_ptr->base_port)); + rmi = (RemoteMemoryInfo *) DT_Bpool_GetBuffer (test_ptr->ep_context.bp, + DT_PERF_SYNC_RECV_BUFFER_ID); + + /* + * If the client and server are of different endiannesses, + * we must correct the endianness of the handle and address + * we pass to the other side. The other side cannot (and + * better not) interpret these values. + */ + if (DT_local_is_little_endian != test_ptr->is_remote_little_endian) + { + rmi->rmr_context = DT_EndianMemHandle (rmi->rmr_context); + rmi->mem_address.as_64 =DT_EndianMemAddress (rmi->mem_address.as_64); + } + + test_ptr->ep_context.op.Rdma_Context = rmi->rmr_context; + test_ptr->ep_context.op.Rdma_Address = rmi->mem_address.as_ptr; + + DT_Mdep_spew (3, ("Got RemoteMemInfo [ va=%p, ctx=%x ]\n", + test_ptr->ep_context.op.Rdma_Address, + test_ptr->ep_context.op.Rdma_Context)); + + /* + * Get to work ... + */ + DT_Mdep_debug (("Test[" F64x "]: Begin...\n", + (DAT_UVERYLONG)test_ptr->base_port)); + + DT_performance_stats_init (&stats); + + if ( !DT_Performance_Test_Client_Phase1(test_ptr, &stats) ) + { + return false; + } + + if ( !DT_Performance_Test_Client_Phase2(test_ptr, &stats) ) + { + return false; + } + + DT_Mdep_debug (("Test[" F64x "]: Sending Sync Msg\n", + (DAT_UVERYLONG)test_ptr->base_port)); + + if (!DT_post_send_buffer (test_ptr->ep_context.ep_handle, + test_ptr->ep_context.bp, + DT_PERF_SYNC_SEND_BUFFER_ID, + DT_PERF_SYNC_BUFF_SIZE)) + { + /* error message printed by DT_post_send_buffer */ + return false; + } + + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context.bp, + DT_PERF_SYNC_SEND_BUFFER_ID); + if (!DT_dto_event_wait (test_ptr->reqt_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + test_ptr->ep_context.ep_handle, + DT_PERF_SYNC_BUFF_SIZE, + dto_cookie, + "Client_Sync_Send")) + { + return false; + } + + DT_performance_stats_print (&stats, test_ptr->cmd, test_ptr); + + return true; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_cmd.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_cmd.c new file mode 100644 index 00000000..025c14bc --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_cmd.c @@ -0,0 +1,355 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_bpool.h" +#include "dapl_test_data.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" + +/* + * Map Performance_Mode_Type values to readable strings + */ +const char * +DT_PerformanceModeToString (Performance_Mode_Type mode) +{ + if ( BLOCKING_MODE == mode ) + { + return "blocking"; + } + else if ( POLLING_MODE == mode ) + { + return "polling"; + } + else + { + return "error: unkown mode"; + } +} + + +static void +DT_Performance_Cmd_Usage (void) +{ + DT_Mdep_printf ("USAGE: ---- PERFORMANCE TEST ----\n"); + DT_Mdep_printf ("USAGE: dapltest -T P\n"); + DT_Mdep_printf ("USAGE: -s \n"); + DT_Mdep_printf ("USAGE: [-m b|p]\n"); + DT_Mdep_printf ("USAGE: [-D ]\n"); + DT_Mdep_printf ("USAGE: [-d] : debug (zero)\n"); + DT_Mdep_printf ("USAGE: [-i ] : (1, 000)\n"); + DT_Mdep_printf ("USAGE: [-p ]\n"); + DT_Mdep_printf ("USAGE: [-R ]\n"); + DT_Mdep_printf ("USAGE: (BE == QOS_BEST_EFFORT - Default)\n"); + DT_Mdep_printf ("USAGE: (HT == QOS_HIGH_THROUGHPUT)\n"); + DT_Mdep_printf ("USAGE: (LL == QOS_LOW_LATENCY)\n"); + DT_Mdep_printf ("USAGE: (EC == QOS_ECONOMY)\n"); + DT_Mdep_printf ("USAGE: (PM == QOS_PREMIUM)\n"); + DT_Mdep_printf ("USAGE: \n"); + DT_Mdep_printf ("USAGE:\n"); + DT_Mdep_printf ("USAGE: Each OP consists of:\n"); + DT_Mdep_printf ("USAGE: : \"RR\" (RDMA READ)\n"); + DT_Mdep_printf ("USAGE: : \"RW\" (RDMA WRITE)\n"); + DT_Mdep_printf ("USAGE: [seg_size [num_segs] ] : (4096, 1)\n"); +} + +static bool +DT_Performance_Cmd_Parse_Op ( + Performance_Cmd_t * cmd, + int index, + int my_argc, + char **my_argv) +{ + int i; + + /* + * Op Format: [seg_size] [num_segs] + */ + + if ( index == my_argc ) + { + DT_Mdep_printf ("Operation Missing Transfer Type\n"); + return (false); + } + + for ( i = 0; index < my_argc; i++, index++ ) + { + switch ( i ) + { + case 0: + { + if ( 0 == strncmp (my_argv[index], "RR", strlen ("RR")) ) + { + cmd->op.transfer_type = RDMA_READ; + } + else if ( 0 == strncmp (my_argv[index], "RW", strlen ("RW")) ) + { + cmd->op.transfer_type = RDMA_WRITE; + } + else + { + DT_Mdep_printf ("OP type must be \n"); + return (false); + } + break; + } + case 1: + { + cmd->op.seg_size = atoi (my_argv[index]); + break; + } + case 2: + { + cmd->op.num_segs = atoi (my_argv[index]); + break; + } + default: + { + DT_Mdep_printf ("Too many OP args\n"); + return (false); + } + } + } + + return (true); +} + + +static bool +DT_Performance_Cmd_Validate ( + Performance_Cmd_t *cmd) +{ + if ( '\0' == cmd->server_name[0] ) + { + DT_Mdep_printf ("Must specify server_name in command line or scriptfile\n"); + return (false); + } + + if ( '\0' == cmd->dapl_name[0] ) + { + DT_Mdep_printf ("Must specify device_name in command line or scriptfile\n"); + return (false); + } + + if ( 0 == cmd->pipeline_len ) + { + DT_Mdep_printf ("Pipeline size must not be 0\n"); + return (false); + } + + if ( cmd->debug ) + { + DT_Performance_Cmd_Print (cmd); + } + + return true; +} + + +void +DT_Performance_Cmd_Print ( + Performance_Cmd_t *cmd) +{ + DT_Mdep_printf ("-------------------------------------\n"); + DT_Mdep_printf ("PerfCmd.server_name : %s\n", + cmd->server_name); + DT_Mdep_printf ("PerfCmd.dapl_name : %s\n", + cmd->dapl_name); + DT_Mdep_printf ("PerfCmd.mode : %s\n", + (cmd->mode == BLOCKING_MODE) ? "BLOCKING" : "POLLING"); + DT_Mdep_printf ("PerfCmd.num_iterations : %d\n", + cmd->num_iterations); + DT_Mdep_printf ("PerfCmd.pipeline_len : %d\n", + cmd->pipeline_len); + DT_Mdep_printf ("PerfCmd.op.transfer_type : %s\n", + cmd->op.transfer_type == RDMA_READ ? "RDMA_READ" : + cmd->op.transfer_type == RDMA_WRITE ? "RDMA_WRITE" : + "SEND_RECV"); + DT_Mdep_printf ("PerfCmd.op.num_segs : %d\n", + cmd->op.num_segs); + DT_Mdep_printf ("PerfCmd.op.seg_size : %d\n", + cmd->op.seg_size); +} + + +bool +DT_Performance_Cmd_Parse ( + Performance_Cmd_t *cmd, + int my_argc, + char **my_argv, + mygetopt_t *opts) +{ + char c; + unsigned int len; + + for (;;) + { + c = DT_mygetopt_r (my_argc, my_argv, "D:dm:i:p:R:s:", opts); + + if ( EOF == c ) + { + break; + } + + switch ( c ) + { + case 'D': /* device name */ + { + strncpy (cmd->dapl_name, opts->optarg, NAME_SZ); + break; + } + case 'd': /* print debug messages */ + { + DT_dapltest_debug++; + cmd->debug = true; + break; + } + case 'm': /* mode */ + { + if ( !strncmp (opts->optarg, "b", strlen ("b")) ) + { + cmd->mode = BLOCKING_MODE; + } + else if ( !strncmp (opts->optarg, "p", strlen ("p")) ) + { + cmd->mode = POLLING_MODE; + } + else + { + DT_Mdep_printf ("Syntax Error -m option\n"); + DT_Performance_Cmd_Usage (); + return (false); + } + + break; + } + case 'i': /* num iterations */ + { + len = (unsigned int)strspn (opts->optarg, "0123456789"); + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -i option\n"); + DT_Performance_Cmd_Usage (); + return (false); + } + cmd->num_iterations = atol (opts->optarg); + break; + } + case 'p': /* pipline size */ + { + len = (unsigned int)strspn (opts->optarg, "0123456789"); + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -p option\n"); + DT_Performance_Cmd_Usage (); + return (false); + } + cmd->pipeline_len = atol (opts->optarg); + break; + } + case 'R': /* Service Reliability Level */ + { + cmd->qos = DT_ParseQoS (opts->optarg); + break; + } + case 's': /* server name */ + { + if ((opts->optarg == 0) || + strlen (opts->optarg) == 0 || + *opts->optarg == '-') + { + DT_Mdep_printf ("must specify server name\n"); + DT_Performance_Cmd_Usage (); + return (false); + } + + strncpy (cmd->server_name, opts->optarg, NAME_SZ); + break; + } + default: + { + DT_Mdep_printf ("Invalid Performance Test Parameter: %c\n", c); + DT_Performance_Cmd_Usage (); + return (false); + } + } + } + + /* + * now parse the op + */ + if ( !DT_Performance_Cmd_Parse_Op (cmd, opts->optind, my_argc, my_argv) ) + { + DT_Performance_Cmd_Usage (); + return (false); + } + + if ( !DT_Performance_Cmd_Validate (cmd) ) + { + DT_Performance_Cmd_Usage (); + return (false); + } + + return (true); +} + + +bool +DT_Performance_Cmd_Init (Performance_Cmd_t * cmd) +{ + memset (cmd, 0, sizeof (Performance_Cmd_t)); + cmd->dapltest_version = DAPLTEST_VERSION; + cmd->client_is_little_endian = DT_local_is_little_endian; + cmd->qos = DAT_QOS_BEST_EFFORT; + cmd->debug = false; + cmd->num_iterations = 1000; + cmd->pipeline_len = ~0; + + cmd->op.transfer_type = RDMA_WRITE; + cmd->op.seg_size = 4096; + cmd->op.num_segs = 1; + + if ( !DT_Mdep_GetDefaultDeviceName (cmd->dapl_name) ) + { + DT_Mdep_printf ("can't get default device name\n"); + return (false); + } + + return (true); +} + + +void +DT_Performance_Cmd_Endian (Performance_Cmd_t * cmd) +{ + cmd->dapltest_version = DT_Endian32 (cmd->dapltest_version); + cmd->qos = DT_Endian32 (cmd->qos); + cmd->num_iterations = DT_Endian32 (cmd->num_iterations); + cmd->debug = DT_Endian32 (cmd->debug); + + cmd->op.transfer_type = DT_Endian32 (cmd->op.transfer_type); + cmd->op.seg_size = DT_Endian32 (cmd->op.seg_size); + cmd->op.num_segs = DT_Endian32 (cmd->op.num_segs); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_cmd.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_cmd.h new file mode 100644 index 00000000..fb89ec1d --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_cmd.h @@ -0,0 +1,65 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_PERFORMANCE_CMD_H__ +#define __DAPL_PERFORMANCE_CMD_H__ + +#include + +#define NAME_SZ 256 + +typedef enum +{ + BLOCKING_MODE, + POLLING_MODE +} Performance_Mode_Type; + +#pragma pack (1) +typedef struct +{ + DAT_UINT32 transfer_type; + DAT_UINT32 seg_size; + DAT_UINT32 num_segs; +}Performance_Cmd_Op_t; + +typedef struct +{ + DAT_UINT32 dapltest_version; + DAT_UINT32 client_is_little_endian; + char server_name[NAME_SZ]; /* -s */ + char dapl_name[NAME_SZ]; /* -D */ + DAT_QOS qos; + DAT_UINT32 debug; /* -d */ + Performance_Mode_Type mode; /* -m */ + DAT_UINT32 num_iterations; /* -i */ + DAT_UINT32 pipeline_len; /* -p */ + Performance_Cmd_Op_t op; + DAT_UINT32 use_rsp; /* -r */ + +} Performance_Cmd_t; +#pragma pack() + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_server.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_server.c new file mode 100644 index 00000000..53c85393 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_server.c @@ -0,0 +1,411 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include + +#include "dapl_bpool.h" +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_performance_cmd.h" +#include "dapl_performance_test.h" +#include "dapl_proto.h" +#include "dapl_test_data.h" + + +/****************************************************************************/ +void +DT_Performance_Test_Server ( + void *var) +{ + Per_Test_Data_t *pt_ptr = var; + Performance_Test_t *test_ptr = NULL; + + int success = 1; + + DT_Mdep_debug (("Server: Starting performance test\n")); + + if ( !DT_Performance_Test_Create (pt_ptr, + pt_ptr->ps_ptr->ia_handle, + (DAT_IA_ADDRESS_PTR) 0, + true, + pt_ptr->Client_Info.is_little_endian, + &test_ptr) ) + { + DT_Mdep_printf ("Server: Resource Creation Failed\n"); + success = 0; + } + if ( 1 == success ) + { + if (! DT_Performance_Test_Server_Connect (test_ptr) ) + { + success = 0; + DT_Mdep_printf ("Server: Connection Failed\n"); + } + } + + if ( 1 == success ) + { + if ( ! DT_Performance_Test_Server_Exchange (test_ptr) ) + { + success = 0; + DT_Mdep_printf ("Server: Test Failed\n"); + } + } + +#ifdef CM_BUSTED + /***** XXX Chill out a bit to give the kludged CM a chance ... + *****/DT_Mdep_Sleep (5000); +#endif + + + + DT_Performance_Test_Destroy (pt_ptr, test_ptr, true); + + DT_Mdep_printf ("Server: Finished performance test. Detaching.\n"); + + DT_Mdep_Thread_Detach (DT_Mdep_Thread_SELF ()); /* AMM */ + DT_Thread_Destroy (pt_ptr->thread, pt_ptr); /* destroy Master thread */ + + DT_Mdep_Lock (&pt_ptr->ps_ptr->num_clients_lock); + pt_ptr->ps_ptr->num_clients--; + DT_Mdep_Unlock (&pt_ptr->ps_ptr->num_clients_lock); + + DT_PrintMemList (pt_ptr); /* check if we return all space allocated */ + DT_Mdep_LockDestroy (&pt_ptr->Thread_counter_lock); + DT_Mdep_LockDestroy (&pt_ptr->MemListLock); + DT_Free_Per_Test_Data (pt_ptr); + + DT_Mdep_Unlock (&g_PerfTestLock); + DT_Mdep_printf ("Server: Finished performance test. Exiting.\n"); + + DT_Mdep_Thread_EXIT (NULL); +} + + +/****************************************************************************/ +bool +DT_Performance_Test_Server_Connect ( + Performance_Test_t *test_ptr) +{ + DAT_RETURN ret; + bool status; + DAT_RSP_HANDLE rsp_handle; + DAT_PSP_HANDLE psp_handle; + + DAT_CR_ARRIVAL_EVENT_DATA cr_stat; + DAT_CR_HANDLE cr_handle; + DAT_EVENT_NUMBER event_num; + + rsp_handle = DAT_HANDLE_NULL; + psp_handle = DAT_HANDLE_NULL; +#if 0 /* FIXME */ + if (test_ptr->cmd->use_rsp) + { + /* + * Server - create a single-use RSP and + * await a connection for this EP + */ + ret = dat_rsp_create (test_ptr->ia_handle, + test_ptr->ep_context.port, + test_ptr->ep_context.ep_handle, + test_ptr->creq_evd_hdl, + &rsp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_rsp_create error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + status = false; + goto psp_free; + } + + DT_Mdep_debug (("Server[" F64x "]: Listen on RSP port 0x" F64x "\n", + (DAT_UVERYLONG)test_ptr->base_port, + (DAT_UVERYLONG)test_ptr->ep_context.port)); + + /* wait for the connection request */ + if (!DT_cr_event_wait (test_ptr->conn_evd_hdl, &cr_stat) || + !DT_cr_check ( &cr_stat, + DAT_HANDLE_NULL, + test_ptr->ep_context.port, + &cr_handle, + "Server") ) + { + status = false; + goto psp_free; + } + + /* what, me query? just try to accept the connection */ + ret = dat_cr_accept (cr_handle, + test_ptr->ep_context.ep_handle, + 0, (DAT_PVOID)0 /* no private data */ ); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_cr_accept error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* cr_handle consumed on failure */ + status = false; + goto psp_free; + } + + /* wait for DAT_CONNECTION_EVENT_ESTABLISHED */ + if (!DT_conn_event_wait ( test_ptr->ep_context.ep_handle, + test_ptr->conn_evd_hdl, + &event_num)) + { + /* error message printed by DT_conn_event_wait */ + status = false; + goto psp_free; + } + + } + else +#endif /* FIXME */ + { + /* + * Server - use a short-lived PSP instead of an RSP + */ + status = true; + + ret = dat_psp_create (test_ptr->ia_handle, + test_ptr->ep_context.port, + test_ptr->creq_evd_hdl, + DAT_PSP_CONSUMER_FLAG, + &psp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_psp_create error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + status = false; + psp_handle = DAT_HANDLE_NULL; + return (status); + } + + } + + /* + * Here's where we tell the main server process that + * this thread is ready to wait for a connection request + * from the remote end. + */ + DT_Mdep_wait_object_wakeup (&test_ptr->pt_ptr->synch_wait_object); + + DT_Mdep_debug (("Server[" F64x "]: Listen on PSP port 0x" F64x "\n", + (DAT_UVERYLONG)test_ptr->base_port, + (DAT_UVERYLONG)test_ptr->ep_context.port)); + + /* wait for a connection request */ + if (!DT_cr_event_wait (test_ptr->creq_evd_hdl, &cr_stat) || + !DT_cr_check ( &cr_stat, + psp_handle, + test_ptr->ep_context.port, + &cr_handle, + "Server") ) + { + status = false; + goto psp_free; + } + + /* what, me query? just try to accept the connection */ + ret = dat_cr_accept (cr_handle, + test_ptr->ep_context.ep_handle, + 0, + (DAT_PVOID)0 /* no private data */ ); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_cr_accept error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* cr_handle consumed on failure */ + status = false; + goto psp_free; + } + + /* wait for DAT_CONNECTION_EVENT_ESTABLISHED */ + if (!DT_conn_event_wait (test_ptr->ep_context.ep_handle, + test_ptr->conn_evd_hdl, + &event_num ) ) + { + /* error message printed by DT_cr_event_wait */ + status = false; + goto psp_free; + } + + DT_Mdep_debug (("Server[" F64x "]: Accept on port 0x" F64x "\n", + (DAT_UVERYLONG)test_ptr->base_port, + (DAT_UVERYLONG)test_ptr->ep_context.port)); +psp_free: + if ( DAT_HANDLE_NULL != psp_handle ) + { + /* throw away single-use PSP */ + ret = dat_psp_free (psp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_psp_free error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + status = false; + } + } + if ( DAT_HANDLE_NULL != rsp_handle ) + { + /* throw away single-use PSP */ + ret = dat_rsp_free (rsp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_rsp_free error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + status = false; + } + } /* end short-lived PSP */ + +#ifdef CM_BUSTED + /***** XXX Chill out a bit to give the kludged CM a chance ... + *****/DT_Mdep_Sleep (5000); +#endif + + return status; +} + + + +/****************************************************************************/ +bool +DT_Performance_Test_Server_Exchange ( + Performance_Test_t *test_ptr) +{ + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + RemoteMemoryInfo *rmi; + DAT_DTO_COOKIE dto_cookie; + + test_ptr->ep_context.op.bp = + DT_BpoolAlloc (test_ptr->pt_ptr, + test_ptr->ia_handle, + test_ptr->pz_handle, + test_ptr->ep_context.ep_handle, + test_ptr->reqt_evd_hdl, + test_ptr->ep_context.op.seg_size, + test_ptr->ep_context.op.num_segs, + DAT_OPTIMAL_ALIGNMENT, + true, + true); + + if ( !test_ptr->ep_context.op.bp ) + { + DT_Mdep_printf ("Test[" F64x "]: no memory for buffers (RDMA/RD)\n", + (DAT_UVERYLONG)test_ptr->base_port); + return false; + } + + test_ptr->ep_context.op.Rdma_Context = + DT_Bpool_GetRMR (test_ptr->ep_context.op.bp, 0); + test_ptr->ep_context.op.Rdma_Address = + (DAT_PVOID) (uintptr_t) DT_Bpool_GetBuffer (test_ptr->ep_context.op.bp, 0); + + /* + * Prep send buffer with memory information + */ + rmi = (RemoteMemoryInfo *) DT_Bpool_GetBuffer (test_ptr->ep_context.bp, + DT_PERF_SYNC_SEND_BUFFER_ID); + + rmi->rmr_context = test_ptr->ep_context.op.Rdma_Context; + rmi->mem_address.as_64 = (DAT_UINT64) LZERO; + rmi->mem_address.as_ptr =test_ptr->ep_context.op.Rdma_Address; + + if ( rmi->mem_address.as_ptr ) + { + DT_Mdep_spew (3, ("RemoteMemInfo va=" F64x ", ctx=%x\n", + (DAT_UVERYLONG)rmi->mem_address.as_64, + rmi->rmr_context)); + } + + /* + * If the client and server are of different endiannesses, + * we must correct the endianness of the handle and address + * we pass to the other side. The other side cannot (and + * better not) interpret these values. + */ + if (DT_local_is_little_endian != test_ptr->is_remote_little_endian) + { + rmi->rmr_context = DT_EndianMemHandle (rmi->rmr_context); + rmi->mem_address.as_64 =DT_EndianMemAddress (rmi->mem_address.as_64); + } + + /* + * Send our memory info + */ + DT_Mdep_debug (("Test[" F64x "]: Sending Sync Msg\n", + (DAT_UVERYLONG)test_ptr->base_port)); + + /* post the send buffer */ + if (!DT_post_send_buffer (test_ptr->ep_context.ep_handle, + test_ptr->ep_context.bp, + DT_PERF_SYNC_SEND_BUFFER_ID, + DT_PERF_SYNC_BUFF_SIZE)) + { + /* error message printed by DT_post_send_buffer */ + return false; + } + + /* reap the send and verify it */ + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context.bp, + DT_PERF_SYNC_SEND_BUFFER_ID); + if ( !DT_dto_event_wait (test_ptr->reqt_evd_hdl, &dto_stat) || + !DT_dto_check (&dto_stat, + test_ptr->ep_context.ep_handle, + DT_PERF_SYNC_BUFF_SIZE, + dto_cookie, + "Send Sync_Msg") ) + { + return false; + } + + /* + * Recv the other side's info + */ + DT_Mdep_debug (("Test[" F64x "]: Waiting for Sync Msg\n", + (DAT_UVERYLONG)test_ptr->base_port)); + + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context.bp, + DT_PERF_SYNC_RECV_BUFFER_ID); + if ( !DT_dto_event_wait (test_ptr->recv_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + test_ptr->ep_context.ep_handle, + DT_PERF_SYNC_BUFF_SIZE, + dto_cookie, + "Recieve Sync_Msg") ) + { + return false; + } + + DT_Mdep_debug (("Test[" F64x "]: Received Sync Msg\n", + (DAT_UVERYLONG)test_ptr->base_port)); + + return true; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_stats.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_stats.c new file mode 100644 index 00000000..1f42887b --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_stats.c @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_performance_stats.h" +#include "dapl_proto.h" +#include "dapl_test_data.h" + + +void +DT_performance_stats_init ( + Performance_Stats_t *stats) +{ + stats->num_ops = 0; + stats->bytes = 0; + stats->post_ctxt_switch_num = 0; + stats->reap_ctxt_switch_num = 0; + + stats->cpu_utilization = 0.0; + stats->time_ts = 0; + + stats->posts_sans_ctxt.num = 0; + stats->posts_sans_ctxt.total_ts = 0; + stats->posts_sans_ctxt.max_ts = 0; + stats->posts_sans_ctxt.min_ts = ~0; + + stats->posts_with_ctxt.num = 0; + stats->posts_with_ctxt.total_ts = 0; + stats->posts_with_ctxt.max_ts = 0; + stats->posts_with_ctxt.min_ts = ~0; + + stats->reaps_sans_ctxt.num = 0; + stats->reaps_sans_ctxt.total_ts = 0; + stats->reaps_sans_ctxt.max_ts = 0; + stats->reaps_sans_ctxt.min_ts = ~0; + + stats->reaps_with_ctxt.num = 0; + stats->reaps_with_ctxt.total_ts = 0; + stats->reaps_with_ctxt.max_ts = 0; + stats->reaps_with_ctxt.min_ts = ~0; + + stats->latency.num = 0; + stats->latency.total_ts = 0; + stats->latency.max_ts = 0; + stats->latency.min_ts = ~0; +} + + +void +DT_performance_stats_record_post ( + Performance_Stats_t *stats, + unsigned long ctxt_switch_num, + DT_Mdep_TimeStamp ts) +{ + if ( ctxt_switch_num ) + { + stats->posts_with_ctxt.num++; + stats->posts_with_ctxt.total_ts += ts; + stats->posts_with_ctxt.max_ts = + DT_max (stats->posts_with_ctxt.max_ts, ts); + stats->posts_with_ctxt.min_ts = + DT_min (stats->posts_with_ctxt.min_ts, ts); + + stats->post_ctxt_switch_num += ctxt_switch_num; + } + else + { + stats->posts_sans_ctxt.num++; + stats->posts_sans_ctxt.total_ts += ts; + stats->posts_sans_ctxt.max_ts = + DT_max (stats->posts_sans_ctxt.max_ts, ts); + stats->posts_sans_ctxt.min_ts = + DT_min (stats->posts_sans_ctxt.min_ts, ts); + } +} + + +void +DT_performance_stats_record_reap ( + Performance_Stats_t *stats, + unsigned long ctxt_switch_num, + DT_Mdep_TimeStamp ts) +{ + if ( ctxt_switch_num ) + { + stats->reaps_with_ctxt.num++; + stats->reaps_with_ctxt.total_ts += ts; + stats->reaps_with_ctxt.max_ts = + DT_max (stats->reaps_with_ctxt.max_ts, ts); + stats->reaps_with_ctxt.min_ts = + DT_min (stats->reaps_with_ctxt.min_ts, ts); + + stats->reap_ctxt_switch_num += ctxt_switch_num; + } + else + { + stats->reaps_sans_ctxt.num++; + stats->reaps_sans_ctxt.total_ts += ts; + stats->reaps_sans_ctxt.max_ts = + DT_max (stats->reaps_sans_ctxt.max_ts, ts); + stats->reaps_sans_ctxt.min_ts = + DT_min (stats->reaps_sans_ctxt.min_ts, ts); + } +} + + +void +DT_performance_stats_record_latency ( + Performance_Stats_t *stats, + DT_Mdep_TimeStamp ts) +{ + stats->latency.num++; + stats->latency.total_ts += ts; + stats->latency.max_ts = + DT_max (stats->latency.max_ts, ts); + stats->latency.min_ts = + DT_min (stats->latency.min_ts, ts); +} + +void +DT_performance_stats_data_combine ( + Performance_Stats_Data_t *dest, + Performance_Stats_Data_t *src_a, + Performance_Stats_Data_t *src_b) +{ + dest->num = src_a->num + src_b->num; + dest->total_ts = src_a->total_ts + src_b->total_ts; + dest->max_ts = DT_max (src_a->max_ts, src_b->max_ts); + dest->min_ts = DT_min (src_a->min_ts, src_b->min_ts); +} + + +void +DT_performance_stats_combine ( + Performance_Stats_t *dest, + Performance_Stats_t *src_a, + Performance_Stats_t *src_b) +{ + dest->num_ops = + src_a->num_ops + src_b->num_ops; + + dest->bytes = + src_a->bytes + src_b->bytes; + + dest->post_ctxt_switch_num = + src_a->post_ctxt_switch_num + src_b->post_ctxt_switch_num; + + dest->reap_ctxt_switch_num = + src_b->reap_ctxt_switch_num + src_b->reap_ctxt_switch_num; + + dest->cpu_utilization = DT_max (src_a->cpu_utilization, + src_b->cpu_utilization); + dest->time_ts = DT_max (src_a->time_ts, src_b->time_ts); + + DT_performance_stats_data_combine (&dest->posts_sans_ctxt, + &src_a->posts_sans_ctxt, + &src_b->posts_sans_ctxt); + + DT_performance_stats_data_combine (&dest->posts_with_ctxt, + &src_a->posts_with_ctxt, + &src_b->posts_with_ctxt); + + DT_performance_stats_data_combine (&dest->reaps_sans_ctxt, + &src_a->reaps_sans_ctxt, + &src_b->reaps_sans_ctxt); + + DT_performance_stats_data_combine (&dest->reaps_with_ctxt, + &src_a->reaps_with_ctxt, + &src_b->reaps_with_ctxt); + + DT_performance_stats_data_combine (&dest->latency, + &src_a->latency, + &src_b->latency); +} + + +double +DT_performance_stats_data_print ( + Performance_Stats_Data_t *data, + double cpu_mhz) +{ + double average; + + average = (double)data->total_ts / (data->num * cpu_mhz); + + DT_Mdep_printf ("%-32s : %11.04f us\n" + "%-32s : %11.04f us\n" + "%-32s : %11.04f us\n", + " arithmetic mean", + average, + " maximum", + (double)data->max_ts / cpu_mhz, + " minimum", + (double)data->min_ts / cpu_mhz); + + return average; +} + + +void +DT_performance_stats_print ( + Performance_Stats_t *stats, + Performance_Cmd_t *cmd, + Performance_Test_t *test) +{ + double cpu_mhz; + double time_s; + double mbytes; + double ops_per_sec; + double bandwidth; + double latency; + double time_per_post; + double time_per_reap; + + cpu_mhz = DT_Mdep_GetCpuMhz (); + latency = 0; + + time_s = ((double)stats->time_ts / (1000000.0 * cpu_mhz)); + + mbytes = (double) (stats->bytes >> 20 ); + + if ( 0.0 == time_s ) + { + DT_Mdep_printf ("Error determining time\n"); + return; + } + else if ( 0 == stats->num_ops ) + { + DT_Mdep_printf ("Error determining number of operations\n"); + return; + } + else if ( 0.0 == cpu_mhz ) + { + DT_Mdep_printf ("Error determining CPU speed\n"); + return; + } + + ops_per_sec = (double)stats->num_ops / time_s; + bandwidth = mbytes / time_s; + + DT_Mdep_printf ("\n" + "------------------------- Statistics -------------------------\n" + "\n" + "%-32s : %8s\n" + "%-32s : %8s\n" + "%-32s : %8u\n" + "%-32s : %8u bytes\n" + "%-32s : %8u\n" + "%-32s : %8u\n" + "\n", + "Mode", + DT_PerformanceModeToString (cmd->mode), + "Operation Type", + DT_TransferTypeToString (cmd->op.transfer_type), + "Number of Operations", + cmd->num_iterations, + "Segment Size", + cmd->op.seg_size, + "Number of Segments", + cmd->op.num_segs, + "Pipeline Size", + test->ep_context.pipeline_len); + + DT_Mdep_printf ("%-32s : %11.04f sec\n" + "%-32s : %11.04f MB\n" + "%-32s : %11.04f%%\n" + "%-32s : %11.04f ops/sec\n" + "%-32s : %11.04f MB/sec\n", + "Total Time", + time_s, + "Total Data Exchanged", + mbytes, + "CPU Utilization", + stats->cpu_utilization, + "Operation Throughput", + ops_per_sec, + "Bandwidth", + bandwidth); + + DT_Mdep_printf ("\n" + "Latency\n" + "\n"); + + if ( stats->latency.num ) + { + latency = DT_performance_stats_data_print (&stats->latency, cpu_mhz); + } + + DT_Mdep_printf ("\n" + "Time Per Post\n" + "\n" + "%-32s : %8u\n", + " posts without context switches", + stats->posts_sans_ctxt.num); + + if ( stats->posts_sans_ctxt.num ) + { + DT_performance_stats_data_print (&stats->posts_sans_ctxt, cpu_mhz); + } + + DT_Mdep_printf ("\n" + "%-32s : %8u\n", + " posts with context switches", + stats->posts_with_ctxt.num); + + if ( stats->posts_with_ctxt.num ) + { + DT_Mdep_printf ("%-32s : %8u\n", + " number of context switches", + stats->post_ctxt_switch_num); + DT_performance_stats_data_print (&stats->posts_with_ctxt, cpu_mhz); + } + + DT_Mdep_printf ("\n" + "Time Per Reap\n" + "\n" + "%-32s : %8u\n", + " reaps without context switches", + stats->reaps_sans_ctxt.num); + + if ( stats->reaps_sans_ctxt.num ) + { + DT_performance_stats_data_print (&stats->reaps_sans_ctxt, cpu_mhz); + } + + + DT_Mdep_printf ("\n" + "%-32s : %8u\n", + " reaps with context switches", + stats->reaps_with_ctxt.num); + + if ( stats->reaps_with_ctxt.num ) + { + DT_Mdep_printf ("%-32s : %8u\n", + " number of context switches", + stats->reap_ctxt_switch_num); + + DT_performance_stats_data_print (&stats->reaps_with_ctxt, cpu_mhz); + } + + time_per_post = + (int64_t) (stats->posts_sans_ctxt.total_ts + stats->posts_with_ctxt.total_ts) / + (cpu_mhz * (stats->posts_sans_ctxt.num + stats->posts_with_ctxt.num)); + + time_per_reap = + (int64_t) (stats->reaps_sans_ctxt.total_ts + stats->reaps_with_ctxt.total_ts) / + (cpu_mhz * (stats->reaps_sans_ctxt.num + stats->reaps_with_ctxt.num)); + + DT_Mdep_printf ("\n" + "NOTE:\n" + " 1 MB = 1024 KB = 1048576 B \n" + "-------------------------------------------------------------\n" + "raw: %s, %u, %u, %u, %u, %f, %f, %f, %f, %f, %f \n" + "-------------------------------------------------------------\n" + "\n", + DT_TransferTypeToString (cmd->op.transfer_type), + cmd->num_iterations, + cmd->op.seg_size, + cmd->op.num_segs, + test->ep_context.pipeline_len, + stats->cpu_utilization, + ops_per_sec, + bandwidth, + latency, + time_per_post, + time_per_reap); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_stats.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_stats.h new file mode 100644 index 00000000..143291f1 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_stats.h @@ -0,0 +1,61 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_STATS_H__ +#define __DAPL_STATS_H__ + +#include "dapl_mdep.h" + +#define DT_min(a,b) ((a < b) ? (a) : (b)) +#define DT_max(a,b) ((a > b) ? (a) : (b)) +#define DT_whole(num) ((unsigned int)(num)) +#define DT_hundredths(num) ((unsigned int)(((num) - (unsigned int)(num)) * 100)) + +typedef struct +{ + unsigned int num; + DT_Mdep_TimeStamp total_ts; + DT_Mdep_TimeStamp max_ts; + DT_Mdep_TimeStamp min_ts; +} Performance_Stats_Data_t; + + +typedef struct +{ + unsigned int num_ops; + int64_t bytes; + unsigned int post_ctxt_switch_num; + unsigned int reap_ctxt_switch_num; + double cpu_utilization; + DT_Mdep_TimeStamp time_ts; + Performance_Stats_Data_t posts_sans_ctxt; + Performance_Stats_Data_t posts_with_ctxt; + Performance_Stats_Data_t reaps_sans_ctxt; + Performance_Stats_Data_t reaps_with_ctxt; + Performance_Stats_Data_t latency; +} Performance_Stats_t; + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_test.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_test.h new file mode 100644 index 00000000..ff220f4b --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_test.h @@ -0,0 +1,86 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_PERFORMANCE_TEST_H__ +#define __DAPL_PERFORMANCE_TEST_H__ + +#include "dapl_common.h" +#include "dapl_test_data.h" +#include "dapl_mdep.h" +#include "dapl_performance_cmd.h" + + +#define DT_PERF_SYNC_SEND_BUFFER_ID 0 +#define DT_PERF_SYNC_RECV_BUFFER_ID 1 +#define DT_PERF_SYNC_BUFF_SIZE sizeof(RemoteMemoryInfo) +#define DT_PERF_DFLT_EVD_LENGTH 8 + +#pragma pack(1) +typedef struct +{ + DT_Transfer_Type transfer_type; + DAT_UINT32 num_segs; + DAT_UINT32 seg_size; + Bpool *bp; + + /* RDMA info */ + DAT_RMR_CONTEXT Rdma_Context; + DAT_PVOID Rdma_Address; +} Performance_Test_Op_t; + +typedef struct +{ + DAT_EP_HANDLE ep_handle; + DAT_EP_ATTR ep_attr; + DAT_CONN_QUAL port; + DAT_COUNT pipeline_len; + Bpool *bp; + Performance_Test_Op_t op; +} Performance_Ep_Context_t; + +typedef struct +{ + Per_Test_Data_t *pt_ptr; + Performance_Cmd_t *cmd; + DAT_IA_ADDRESS_PTR remote_ia_addr; + DAT_BOOLEAN is_remote_little_endian; + DAT_CONN_QUAL base_port; + DAT_IA_ATTR ia_attr; + DAT_IA_HANDLE ia_handle; + DAT_PZ_HANDLE pz_handle; + DAT_CNO_HANDLE cno_handle; + DAT_COUNT reqt_evd_length; + DAT_EVD_HANDLE reqt_evd_hdl; /* request+rmr */ + DAT_COUNT recv_evd_length; + DAT_EVD_HANDLE recv_evd_hdl; /* receive */ + DAT_COUNT conn_evd_length; + DAT_EVD_HANDLE conn_evd_hdl; /* connect */ + DAT_COUNT creq_evd_length; + DAT_EVD_HANDLE creq_evd_hdl; /* "" request */ + Performance_Ep_Context_t ep_context; +} Performance_Test_t; +#pragma pack() +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_util.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_util.c new file mode 100644 index 00000000..48db4add --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_performance_util.c @@ -0,0 +1,652 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_bpool.h" +#include "dapl_mdep.h" +#include "dapl_performance_test.h" +#include "dapl_proto.h" + +#define DT_Mdep_GetContextSwitchNum() 0 /* FIXME */ + +/****************************************************************************/ +bool +DT_Performance_Test_Create ( + Per_Test_Data_t *pt_ptr, + DAT_IA_HANDLE *ia_handle, + DAT_IA_ADDRESS_PTR remote_ia_addr, + DAT_BOOLEAN is_server, + DAT_BOOLEAN is_remote_little_endian, + Performance_Test_t **perf_test) +{ + Performance_Test_t *test_ptr; + DAT_COUNT pipeline_len; + DAT_RETURN ret; + + test_ptr = DT_MemListAlloc (pt_ptr, + "transaction_test_t", + TRANSACTIONTEST, + sizeof (Performance_Test_t)); + if ( NULL == test_ptr ) + { + return false; + } + + *perf_test = test_ptr; + + test_ptr->pt_ptr = pt_ptr; + test_ptr->remote_ia_addr = remote_ia_addr; + test_ptr->is_remote_little_endian = is_remote_little_endian; + test_ptr->base_port = (DAT_CONN_QUAL) pt_ptr->Server_Info.first_port_number; + test_ptr->ia_handle = ia_handle; + test_ptr->cmd = &pt_ptr->Params.u.Performance_Cmd; + + ret = dat_ia_query (test_ptr->ia_handle, + NULL, + DAT_IA_ALL, + &test_ptr->ia_attr, + 0, + NULL); + if ( DAT_SUCCESS != ret) + { + DT_Mdep_printf ("Test[" F64x "]: dat_ia_query error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + return false; + } + + pipeline_len = DT_min ( + DT_min (test_ptr->cmd->num_iterations, + test_ptr->cmd->pipeline_len), + (DAT_UINT32)DT_min (test_ptr->ia_attr.max_dto_per_ep, + test_ptr->ia_attr.max_evd_qlen)); + + if ( RDMA_READ == test_ptr->cmd->op.transfer_type ) + { + pipeline_len = DT_min (pipeline_len, + test_ptr->ia_attr.max_rdma_read_per_ep); + } + + test_ptr->reqt_evd_length = pipeline_len; + test_ptr->recv_evd_length = DT_PERF_DFLT_EVD_LENGTH; + test_ptr->conn_evd_length = DT_PERF_DFLT_EVD_LENGTH; + test_ptr->creq_evd_length = DT_PERF_DFLT_EVD_LENGTH; + + /* create a protection zone */ + ret = dat_pz_create (test_ptr->ia_handle, &test_ptr->pz_handle); + if ( DAT_SUCCESS != ret) + { + DT_Mdep_printf ("Test[" F64x "]: dat_pz_create error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->pz_handle = DAT_HANDLE_NULL; + return false; + } + + /* create 4 EVDs - recv, request+RMR, conn-request, connect */ + ret = dat_evd_create (test_ptr->ia_handle, + test_ptr->recv_evd_length, + test_ptr->cno_handle, + DAT_EVD_DTO_FLAG, + &test_ptr->recv_evd_hdl); /* recv */ + if ( DAT_SUCCESS != ret) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_create (recv) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->recv_evd_hdl = DAT_HANDLE_NULL; + return false; + } + + ret = dat_evd_create (test_ptr->ia_handle, + test_ptr->reqt_evd_length, + test_ptr->cno_handle, + DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG, + &test_ptr->reqt_evd_hdl); /* request + rmr bind */ + if ( DAT_SUCCESS != ret) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_create (request) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->reqt_evd_hdl = DAT_HANDLE_NULL; + return false; + } + + + if ( is_server ) + { + /* Client-side doesn't need CR events */ + ret = dat_evd_create (test_ptr->ia_handle, + test_ptr->creq_evd_length, + DAT_HANDLE_NULL, + DAT_EVD_CR_FLAG, + &test_ptr->creq_evd_hdl); /* cr */ + if ( DAT_SUCCESS != ret) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_create (cr) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->creq_evd_hdl = DAT_HANDLE_NULL; + return false; + } + } + + ret = dat_evd_create (test_ptr->ia_handle, + test_ptr->conn_evd_length, + DAT_HANDLE_NULL, + DAT_EVD_CONNECTION_FLAG, + &test_ptr->conn_evd_hdl); /* conn */ + if ( DAT_SUCCESS != ret) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_create (conn) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->conn_evd_hdl = DAT_HANDLE_NULL; + return false; + } + + /* + * Set up the EP context: + * create the EP + * allocate buffers for remote memory info and sync message + * post the receive buffers + * connect + * set up buffers and remote memory info + * send across our info + * recv the other side's info and extract what we need + */ + test_ptr->ep_context.ep_attr = test_ptr->pt_ptr->ep_attr; + test_ptr->ep_context.ep_attr.max_request_dtos = pipeline_len; + + /* Create EP */ + ret = dat_ep_create (test_ptr->ia_handle, /* IA */ + test_ptr->pz_handle, /* PZ */ + test_ptr->recv_evd_hdl, /* recv */ + test_ptr->reqt_evd_hdl, /* request */ + test_ptr->conn_evd_hdl, /* connect */ + &test_ptr->ep_context.ep_attr, /* EP attrs */ + &test_ptr->ep_context.ep_handle); + if ( DAT_SUCCESS != ret) + { + DT_Mdep_printf ("Test[" F64x "]: dat_ep_create error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->ep_context.ep_handle = DAT_HANDLE_NULL; + return false; + } + + /* + * Allocate a buffer pool so we can exchange the + * remote memory info and initialize. + */ + test_ptr->ep_context.bp = DT_BpoolAlloc (test_ptr->pt_ptr, + test_ptr->ia_handle, + test_ptr->pz_handle, + test_ptr->ep_context.ep_handle, + DAT_HANDLE_NULL, /* rmr */ + DT_PERF_SYNC_BUFF_SIZE, + 2, /* 2 RMIs */ + DAT_OPTIMAL_ALIGNMENT, + false, + false); + if ( !test_ptr->ep_context.bp ) + { + DT_Mdep_printf ("Test[" F64x "]: no memory for remote memory buffers\n", + (DAT_UVERYLONG)test_ptr->base_port); + return false; + } + + DT_Mdep_spew (3, ("0: SYNC_SEND %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context.bp, DT_PERF_SYNC_SEND_BUFFER_ID))); + DT_Mdep_spew (3, ("1: SYNC_RECV %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context.bp, DT_PERF_SYNC_RECV_BUFFER_ID))); + + /* + * Post recv and sync buffers + */ + if ( !DT_post_recv_buffer (test_ptr->ep_context.ep_handle, + test_ptr->ep_context.bp, + DT_PERF_SYNC_RECV_BUFFER_ID, + DT_PERF_SYNC_BUFF_SIZE) ) + { + /* error message printed by DT_post_recv_buffer */ + return false; + } + + /* + * Fill in the test_ptr with relevant command info + */ + test_ptr->ep_context.op.transfer_type + = test_ptr->cmd->op.transfer_type; + test_ptr->ep_context.op.num_segs + = test_ptr->cmd->op.num_segs; + test_ptr->ep_context.op.seg_size + = test_ptr->cmd->op.seg_size; + + /* + * Exchange remote memory info: If we're going to participate + * in an RDMA, we need to allocate memory buffers and advertise + * them to the other side. + */ + test_ptr->ep_context.op.Rdma_Context = (DAT_RMR_CONTEXT) 0; + test_ptr->ep_context.op.Rdma_Address = (DAT_PVOID) 0; + test_ptr->ep_context.port = test_ptr->base_port; + test_ptr->ep_context.pipeline_len = pipeline_len; + + return true; +} + + +/****************************************************************************/ +void +DT_Performance_Test_Destroy ( + Per_Test_Data_t *pt_ptr, + Performance_Test_t *test_ptr, + DAT_BOOLEAN is_server) +{ + DAT_RETURN ret; + DAT_EP_HANDLE ep_handle; + + ep_handle = DAT_HANDLE_NULL; + + /* Free the per-op buffers */ + if (test_ptr->ep_context.op.bp) + { + if (!DT_Bpool_Destroy (test_ptr->pt_ptr, + test_ptr->ep_context.op.bp)) + { + DT_Mdep_printf ("Test[" F64x "]: Warning: Bpool destroy fails\n", + (DAT_UVERYLONG)test_ptr->base_port); + /* carry on trying, regardless */ + } + } + + /* Free the remote memory info exchange buffers */ + if (test_ptr->ep_context.bp) + { + if (!DT_Bpool_Destroy (test_ptr->pt_ptr, + test_ptr->ep_context.bp)) + { + DT_Mdep_printf ("Test[" F64x "]: Warning: Bpool destroy fails\n", + (DAT_UVERYLONG)test_ptr->base_port); + /* carry on trying, regardless */ + } + } + + /* + * Disconnect -- we may have left recv buffers posted, if we + * bailed out mid-setup, or ran to completion + * normally, so we use abrupt closure. + */ + if (test_ptr->ep_context.ep_handle) + { + ret = dat_ep_disconnect (test_ptr->ep_context.ep_handle, + DAT_CLOSE_ABRUPT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: Warning: dat_ep_disconnect error %s\n", + (DAT_UVERYLONG)test_ptr->base_port, + DT_RetToString (ret)); + /* carry on trying, regardless */ + } + else if (!DT_disco_event_wait ( test_ptr->conn_evd_hdl, + &ep_handle)) + { + DT_Mdep_printf ("Test[" F64x "]: bad disconnect event\n", + (DAT_UVERYLONG)test_ptr->base_port); + } + } + + if ( DAT_HANDLE_NULL != ep_handle) + { + /* Destroy the EP */ + ret = dat_ep_free (ep_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_ep_free error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* carry on trying, regardless */ + } + } + + /* clean up the EVDs */ + if (test_ptr->conn_evd_hdl) + { + ret = dat_evd_free (test_ptr->conn_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_free (conn) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + if (is_server) + { + if (test_ptr->creq_evd_hdl) + { + ret = dat_evd_free (test_ptr->creq_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_free (creq) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + } + if (test_ptr->reqt_evd_hdl) + { + ret = dat_evd_free (test_ptr->reqt_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_free (reqt) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + if (test_ptr->recv_evd_hdl) + { + ret = dat_evd_free (test_ptr->recv_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_free (recv) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + +#if 0 /* FIXME */ + /* clean up the CNO */ + if (test_ptr->cmd->use_cno && test_ptr->cno_handle) + { + ret = dat_cno_free (test_ptr->cno_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_cno_free error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } +#endif /* FIXME */ + + /* clean up the PZ */ + if (test_ptr->pz_handle) + { + ret = dat_pz_free (test_ptr->pz_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_pz_free error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + + DT_Mdep_debug (("Test[" F64x "]: cleanup is done\n", + (DAT_UVERYLONG)test_ptr->base_port)); + DT_MemListFree (test_ptr->pt_ptr, test_ptr); +} + + +/****************************************************************************/ +bool +DT_performance_post_rdma_op ( + Performance_Ep_Context_t *ep_context, + DAT_EVD_HANDLE reqt_evd_hdl, + Performance_Stats_t *stats) +{ + unsigned int j; + unsigned long int bytes; + unsigned long pre_ctxt_num; + unsigned long post_ctxt_num; + DT_Mdep_TimeStamp pre_ts; + DT_Mdep_TimeStamp post_ts; + DAT_DTO_COOKIE cookie; + DAT_RETURN ret; + Performance_Test_Op_t *op = &ep_context->op; + DAT_LMR_TRIPLET *iov = DT_Bpool_GetIOV (op->bp, 0); + DAT_RMR_TRIPLET rmr_triplet; + + bytes = op->seg_size * op->num_segs; + + /* Prep the inputs */ + for (j = 0; j < op->num_segs; j++) + { + iov[j].pad = 0U; + iov[j].virtual_address = (DAT_VADDR) (uintptr_t) + DT_Bpool_GetBuffer (op->bp, j); + iov[j].segment_length = op->seg_size; + iov[j].lmr_context = DT_Bpool_GetLMR (op->bp, j); + } + + rmr_triplet.pad = 0U; + rmr_triplet.target_address = (DAT_VADDR) (uintptr_t) op->Rdma_Address; + rmr_triplet.segment_length = op->seg_size * op->num_segs; + rmr_triplet.rmr_context = op->Rdma_Context; + + cookie.as_ptr = NULL; + + if ( RDMA_WRITE == op->transfer_type ) + { + pre_ctxt_num = DT_Mdep_GetContextSwitchNum (); + pre_ts = DT_Mdep_GetTimeStamp (); + + ret = dat_ep_post_rdma_write (ep_context->ep_handle, + op->num_segs, + iov, + cookie, + &rmr_triplet, + DAT_COMPLETION_DEFAULT_FLAG); + + post_ts = DT_Mdep_GetTimeStamp (); + post_ctxt_num = DT_Mdep_GetContextSwitchNum (); + + stats->bytes += bytes; + } + else + { + pre_ctxt_num = DT_Mdep_GetContextSwitchNum (); + pre_ts = DT_Mdep_GetTimeStamp (); + + ret = dat_ep_post_rdma_read (ep_context->ep_handle, + op->num_segs, + iov, + cookie, + &rmr_triplet, + DAT_COMPLETION_DEFAULT_FLAG); + + post_ts = DT_Mdep_GetTimeStamp (); + post_ctxt_num = DT_Mdep_GetContextSwitchNum (); + + stats->bytes += bytes; + } + + if ( DAT_SUCCESS != ret ) + { + return false; + } + + DT_performance_stats_record_post (stats, + post_ctxt_num - pre_ctxt_num, + post_ts - pre_ts); + + return true; +} + +/****************************************************************************/ +unsigned int +DT_performance_reap ( + DAT_EVD_HANDLE evd_handle, + Performance_Mode_Type mode, + Performance_Stats_t *stats) +{ + if ( BLOCKING_MODE == mode ) + { + return DT_performance_wait (evd_handle, stats); + } + else + { + return DT_performance_poll (evd_handle, stats); + } +} + +/****************************************************************************/ +unsigned int +DT_performance_wait ( + DAT_EVD_HANDLE evd_handle, + Performance_Stats_t *stats) +{ + DAT_COUNT i; + DAT_COUNT queue_size; + DAT_RETURN ret; + DAT_EVENT event; + unsigned long pre_ctxt_num; + unsigned long post_ctxt_num; + DT_Mdep_TimeStamp pre_ts; + DT_Mdep_TimeStamp post_ts; + + queue_size = 0; + + pre_ctxt_num = DT_Mdep_GetContextSwitchNum (); + pre_ts = DT_Mdep_GetTimeStamp (); + + ret = dat_evd_wait ( evd_handle, + DAT_TIMEOUT_INFINITE, + 1, + &event, + &queue_size); + + post_ts = DT_Mdep_GetTimeStamp (); + post_ctxt_num = DT_Mdep_GetContextSwitchNum (); + + if ( DAT_SUCCESS != ret ) + { + DT_Mdep_printf ("Test Error: dapl_event_dequeue failed: %s\n", + DT_RetToString (ret)); + return 0; + } + else if (event.event_number == DAT_DTO_COMPLETION_EVENT) + { + DT_performance_stats_record_reap (stats, + post_ctxt_num - pre_ctxt_num, + post_ts - pre_ts); + } + else + { + /* This should not happen. There has been an error if it does. */ + + DT_Mdep_printf ("Warning: dapl_performance_wait swallowing %s event\n", + DT_EventToSTr (event.event_number)); + + return 0; + } + + for ( i = 0; i < queue_size; i++ ) + { + pre_ctxt_num = DT_Mdep_GetContextSwitchNum (); + pre_ts = DT_Mdep_GetTimeStamp (); + + ret = dat_evd_dequeue ( evd_handle, + &event); + + post_ts = DT_Mdep_GetTimeStamp (); + post_ctxt_num = DT_Mdep_GetContextSwitchNum (); + + if (DAT_GET_TYPE(ret) == DAT_QUEUE_EMPTY) + { + continue; + } + else if ( DAT_SUCCESS != ret ) + { + DT_Mdep_printf ("Test Error: dapl_event_dequeue failed: %s\n", + DT_RetToString (ret)); + return 0; + } + else if (event.event_number == DAT_DTO_COMPLETION_EVENT) + { + DT_performance_stats_record_reap (stats, + post_ctxt_num - pre_ctxt_num, + post_ts - pre_ts); + } + else + { + /* This should not happen. There has been an error if it does. */ + + DT_Mdep_printf ("Warning: dapl_performance_wait swallowing %s event\n", + DT_EventToSTr (event.event_number)); + + return 0; + } + } + + return ++queue_size; +} + +/****************************************************************************/ +unsigned int +DT_performance_poll ( + DAT_EVD_HANDLE evd_handle, + Performance_Stats_t *stats) +{ + DAT_RETURN ret; + DAT_EVENT event; + unsigned long pre_ctxt_num; + unsigned long post_ctxt_num; + DT_Mdep_TimeStamp pre_ts; + DT_Mdep_TimeStamp post_ts; + + for (;;) + { + pre_ctxt_num = DT_Mdep_GetContextSwitchNum (); + pre_ts = DT_Mdep_GetTimeStamp (); + + ret = dat_evd_dequeue ( evd_handle, + &event); + + post_ts = DT_Mdep_GetTimeStamp (); + post_ctxt_num = DT_Mdep_GetContextSwitchNum (); + + if (DAT_GET_TYPE(ret) == DAT_QUEUE_EMPTY) + { + continue; + } + else if ( DAT_SUCCESS != ret ) + { + DT_Mdep_printf ("Test Error: dapl_event_dequeue failed: %s\n", + DT_RetToString (ret)); + return 0; + } + else if (event.event_number == DAT_DTO_COMPLETION_EVENT) + { + DT_performance_stats_record_reap (stats, + post_ctxt_num - pre_ctxt_num, + post_ts - pre_ts); + return 1; + } + else + { + /* This should not happen. There has been an error if it does. */ + + DT_Mdep_printf ("Warning: dapl_performance_wait swallowing %s event\n", + DT_EventToSTr (event.event_number)); + + return 0; + } + } + + /*never reached */ + return 0; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_proto.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_proto.h new file mode 100644 index 00000000..559fab91 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_proto.h @@ -0,0 +1,611 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_PROTO_H__ +#define __DAPL_PROTO_H__ + +#include +#include +#include +#include +#include +#include "dapl_common.h" +#include "dapl_client_info.h" +#include "dapl_cnxn.h" +#include "dapl_bpool.h" +#include "dapl_client_info.h" +#include "dapl_transaction_stats.h" +#include "dapl_getopt.h" +#include "dapl_limit_cmd.h" +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_params.h" +#include "dapl_performance_stats.h" +#include "dapl_performance_test.h" +#include "dapl_quit_cmd.h" +#include "dapl_server_info.h" +#include "dapl_test_data.h" +#include "dapl_transaction_cmd.h" +#include "dapl_transaction_test.h" +#include "dapl_version.h" +#include "dapl_fft_cmd.h" +#include "dapl_fft_util.h" + +/* + * Prototypes + */ + +/* dapl_bpool.c */ +Bpool * DT_BpoolAlloc (Per_Test_Data_t * pt_ptr, + DAT_IA_HANDLE ia_handle, + DAT_PZ_HANDLE pz_handle, + DAT_EP_HANDLE ep_handle, + DAT_EVD_HANDLE rmr_evd_handle, + DAT_COUNT seg_size, + DAT_COUNT num_segs, + DAT_COUNT alignment, + DAT_BOOLEAN enable_rdma_write, + DAT_BOOLEAN enable_rdma_read); + +bool DT_Bpool_Destroy (Per_Test_Data_t * pt_ptr, + Bpool * bpool_ptr); + +unsigned char *DT_Bpool_GetBuffer (Bpool * bpool_ptr, int index); +DAT_COUNT DT_Bpool_GetBuffSize (Bpool * bpool_ptr, int index); +DAT_LMR_TRIPLET *DT_Bpool_GetIOV (Bpool * bpool_ptr, int index); +DAT_LMR_CONTEXT DT_Bpool_GetLMR (Bpool * bpool_ptr, int index); +DAT_RMR_CONTEXT DT_Bpool_GetRMR (Bpool * bpool_ptr, int index); + +void DT_Bpool_print (Bpool * bpool_ptr); + +/* dapl_cnxn.c */ +int get_ep_connection_state (DAT_EP_HANDLE ep_handle); + +/* dapl_client.c */ +void DT_cs_Client (Params_t * params_ptr, + char *dapl_name, + char *server_name, + DAT_UINT32 total_threads); + +/* dapl_client_info.c */ +void DT_Client_Info_Endian (Client_Info_t * client_info); + +void DT_Client_Info_Print (Client_Info_t * client_info); + +/* dapl_transaction_stats.c */ +void DT_init_transaction_stats (Transaction_Stats_t * transaction_stats, + unsigned int nums); +void DT_transaction_stats_set_ready (Transaction_Stats_t * transaction_stats); + +bool DT_transaction_stats_wait_for_all (Transaction_Stats_t * transaction_stats); + +void DT_update_transaction_stats (Transaction_Stats_t * transaction_stats, + unsigned int num_ops, + unsigned int time_ms, + unsigned int bytes_send, + unsigned int bytes_recv, + unsigned int bytes_rdma_read, + unsigned int bytes_rdma_write); + +void DT_print_transaction_stats (Transaction_Stats_t * transaction_stats, + unsigned int num_threads, + unsigned int num_EPs); + +/* dapl_endian.c */ +void DT_Endian_Init (void); +DAT_UINT32 DT_Endian32 (DAT_UINT32 val); +DAT_UINT64 DT_Endian64 (DAT_UINT64 val); +DAT_UINT32 DT_EndianMemHandle (DAT_UINT32 val); +DAT_UINT64 DT_EndianMemAddress (DAT_UINT64 val); + +/* dapl_main.c */ +int __cdecl main (int argc, char *argv[]); + +int dapltest (int argc, char *argv[]); + +void Dapltest_Main_Usage (void); + +/* dapl_mdep.c */ +bool DT_Mdep_Init (void); +void DT_Mdep_End (void); +bool DT_Mdep_GetDefaultDeviceName (char *dapl_name); +void DT_Mdep_Sleep (int msec); +bool DT_Mdep_GetCpuStat (DT_CpuStat *sys_stat); +unsigned long DT_Mdep_GetTime (void); +double DT_Mdep_GetCpuMhz (void); +unsigned long DT_Mdep_GetContextSwitchNum (void); +void *DT_Mdep_Malloc (size_t l_); +void DT_Mdep_Free (void *a_); +bool DT_Mdep_LockInit (DT_Mdep_LockType * lock_ptr); +void DT_Mdep_LockDestroy (DT_Mdep_LockType * lock_ptr); +void DT_Mdep_Lock (DT_Mdep_LockType * lock_ptr); +void DT_Mdep_Unlock (DT_Mdep_LockType * lock_ptr); +void DT_Mdep_Thread_Init_Attributes (Thread * thread_ptr); +void DT_Mdep_Thread_Destroy_Attributes (Thread * thread_ptr); +bool DT_Mdep_Thread_Start (Thread * thread_ptr); + +void DT_Mdep_Thread_Detach (int thread_id); +int DT_Mdep_Thread_SELF ( void ); +void DT_Mdep_Thread_EXIT ( void * thread_handle ); +int DT_Mdep_wait_object_init ( IN DT_WAIT_OBJECT *wait_obj); +int DT_Mdep_wait_object_wait ( + IN DT_WAIT_OBJECT *wait_obj, + IN int timeout_val); +int DT_Mdep_wait_object_wakeup ( IN DT_WAIT_OBJECT *wait_obj); +int DT_Mdep_wait_object_destroy ( IN DT_WAIT_OBJECT *wait_obj); + + +DT_Mdep_Thread_Start_Routine_Return_Type + DT_Mdep_Thread_Start_Routine (void *thread_handle); + +/* dapl_memlist.c */ +void DT_MemListInit (Per_Test_Data_t * pt_ptr); +void *DT_MemListAlloc (Per_Test_Data_t * pt_ptr, + char *file, + mem_type_e t, + int size); +void DT_MemListFree (Per_Test_Data_t * pt_ptr, + void *ptr); +void DT_PrintMemList (Per_Test_Data_t * pt_ptr); + +/* dapl_netaddr.c */ +bool DT_NetAddrLookupHostAddress (DAT_IA_ADDRESS_PTR to_netaddr, + char *hostname); + +DAT_IA_ADDRESS_PTR DT_NetAddrAlloc (Per_Test_Data_t * pt_ptr); + +void DT_NetAddrFree (Per_Test_Data_t * pt_ptr, + DAT_IA_ADDRESS_PTR netaddr); + +/* dapl_params.c */ +bool DT_Params_Parse (int argc, + char *argv[], + Params_t * params_ptr); + +/* dapl_performance_cmd.c */ +const char * DT_PerformanceModeToString (Performance_Mode_Type mode); + +bool DT_Performance_Cmd_Init (Performance_Cmd_t * cmd); + +bool DT_Performance_Cmd_Parse (Performance_Cmd_t * cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts); + +void DT_Performance_Cmd_Print (Performance_Cmd_t * cmd); + +void DT_Performance_Cmd_Endian (Performance_Cmd_t * cmd); + +/* dapl_performance_client.c */ +void DT_Performance_Test_Client (Per_Test_Data_t * pt_ptr, + DAT_IA_HANDLE * ia_handle, + DAT_IA_ADDRESS_PTR remote); + +bool DT_Performance_Test_Client_Connect ( + Performance_Test_t * test_ptr); + +bool DT_Performance_Test_Client_Exchange ( + Performance_Test_t *test_ptr); + +/* dapl_performance_server.c */ +void DT_Performance_Test_Server (void * pt_ptr); + +bool DT_Performance_Test_Server_Connect ( + Performance_Test_t * test_ptr); + +bool DT_Performance_Test_Server_Exchange ( + Performance_Test_t *test_ptr); + +/* dapl_performance_util.c */ +bool DT_Performance_Test_Create (Per_Test_Data_t * pt_ptr, + DAT_IA_HANDLE * ia_handle, + DAT_IA_ADDRESS_PTR remote_ia_addr, + DAT_BOOLEAN is_server, + DAT_BOOLEAN is_remote_little_endian, + Performance_Test_t **perf_test); + +void DT_Performance_Test_Destroy (Per_Test_Data_t * pt_ptr, + Performance_Test_t *test_ptr, + DAT_BOOLEAN is_server); + +bool DT_performance_post_rdma_op (Performance_Ep_Context_t *ep_context, + DAT_EVD_HANDLE reqt_evd_hdl, + Performance_Stats_t *stats); + +unsigned int DT_performance_reap (DAT_EVD_HANDLE evd_handle, + Performance_Mode_Type mode, + Performance_Stats_t *stats); + +unsigned int DT_performance_wait (DAT_EVD_HANDLE evd_handle, + Performance_Stats_t *stats); + +unsigned int DT_performance_poll (DAT_EVD_HANDLE evd_handle, + Performance_Stats_t *stats); + +/* dapl_performance_stats.c */ +void DT_performance_stats_init (Performance_Stats_t * stats); + +void DT_performance_stats_record_post (Performance_Stats_t *stats, + unsigned long ctxt_switch_num, + DT_Mdep_TimeStamp ts); + +void DT_performance_stats_record_reap (Performance_Stats_t *stats, + unsigned long ctxt_switch_num, + DT_Mdep_TimeStamp ts); + +void DT_performance_stats_record_latency (Performance_Stats_t *stats, + DT_Mdep_TimeStamp ts); + +void DT_performance_stats_data_combine (Performance_Stats_Data_t * dest, + Performance_Stats_Data_t * src_a, + Performance_Stats_Data_t * src_b); + +void DT_performance_stats_combine (Performance_Stats_t * dest, + Performance_Stats_t * src_a, + Performance_Stats_t * src_b); + +double DT_performance_stats_data_print (Performance_Stats_Data_t * data, + double cpu_mhz); + +void DT_performance_stats_print (Performance_Stats_t * stats, + Performance_Cmd_t * cmd, + Performance_Test_t * test); + + +/* dapl_server.c */ +void DT_cs_Server (Params_t * params_ptr); + +/* dapl_server_cmd.c */ +void DT_Server_Cmd_Init (Server_Cmd_t * Server_Cmd); + +bool DT_Server_Cmd_Parse (Server_Cmd_t * Server_Cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts); + +void DT_Server_Cmd_Print (Server_Cmd_t * Server_Cmd); + +void DT_Server_Cmd_Usage (void); + +/* dapl_server_info.c */ +void DT_Server_Info_Endian (Server_Info_t * server_info); + +void DT_Server_Info_Print (Server_Info_t * server_info); + +/* dapl_test_data.c */ +Per_Test_Data_t *DT_Alloc_Per_Test_Data (void); + +void DT_Free_Per_Test_Data (Per_Test_Data_t * pt_ptr); + +/* dapl_test_util.c */ +DAT_BOOLEAN DT_query (Per_Test_Data_t *pt_ptr, + DAT_IA_HANDLE ia_handle, + DAT_EP_HANDLE ep_handle); + +DAT_BOOLEAN DT_post_recv_buffer (DAT_EP_HANDLE ep_handle, + Bpool * bp, + int index, + int size); + +DAT_BOOLEAN DT_post_send_buffer (DAT_EP_HANDLE ep_handle, + Bpool * bp, + int index, + int size); + +bool DT_conn_event_wait (DAT_EP_HANDLE ep_handle, + DAT_EVD_HANDLE evd_handle, + DAT_EVENT_NUMBER *event_number); + +bool DT_disco_event_wait ( DAT_EVD_HANDLE evd_handle, + DAT_EP_HANDLE *ep_handle ); + +bool DT_cr_event_wait (DAT_EVD_HANDLE evd_handle, + DAT_CR_ARRIVAL_EVENT_DATA *cr_stat_p); + +bool DT_dto_event_reap (DAT_EVD_HANDLE evd_handle, + bool poll, + DAT_DTO_COMPLETION_EVENT_DATA *dtop); + +bool DT_dto_event_wait (DAT_EVD_HANDLE evd_handle, + DAT_DTO_COMPLETION_EVENT_DATA *dtop); + +bool DT_dto_event_poll (DAT_EVD_HANDLE evd_handle, + DAT_DTO_COMPLETION_EVENT_DATA *dtop); + +bool DT_rmr_event_wait (DAT_EVD_HANDLE evd_handle, + DAT_RMR_BIND_COMPLETION_EVENT_DATA *rmr_ptr); + +bool DT_dto_check ( DAT_DTO_COMPLETION_EVENT_DATA *dto_p, + DAT_EP_HANDLE ep_expected, + DAT_VLEN len_expected, + DAT_DTO_COOKIE cookie_expected, + char *message); + +bool DT_rmr_check ( DAT_RMR_BIND_COMPLETION_EVENT_DATA *rmr_p, + DAT_RMR_HANDLE rmr_expected, + DAT_PVOID cookie_expected, + char *message); + +bool DT_cr_check (DAT_CR_ARRIVAL_EVENT_DATA *cr_stat_p, + DAT_PSP_HANDLE psp_handle_expected, + DAT_CONN_QUAL port_expected, + DAT_CR_HANDLE *cr_handlep, + char *message); + +/* dapl_thread.c */ +void DT_Thread_Init (Per_Test_Data_t * pt_ptr); + +void DT_Thread_End (Per_Test_Data_t * pt_ptr); + +Thread *DT_Thread_Create (Per_Test_Data_t * pt_ptr, + void (*fn) (void *), + void *param, + unsigned int stacksize); + +void DT_Thread_Destroy (Thread * thread_ptr, + Per_Test_Data_t * pt_ptr); + +bool DT_Thread_Start (Thread * thread_ptr); + +/* dapl_quit_cmd.c */ +void DT_Quit_Cmd_Init (Quit_Cmd_t * cmd); + +bool DT_Quit_Cmd_Parse (Quit_Cmd_t * cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts); + +bool DT_Quit_Cmd_Validate (Quit_Cmd_t * cmd); + +void DT_Quit_Cmd_Endian (Quit_Cmd_t * cmd, + bool to_wire); + +void DT_Quit_Cmd_Print (Quit_Cmd_t * cmd); + +void DT_Quit_Cmd_Usage (void); + +/* dapl_transaction_cmd.c */ +void DT_Transaction_Cmd_Init (Transaction_Cmd_t * cmd); + +bool DT_Transaction_Cmd_Parse (Transaction_Cmd_t * cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts); + +void DT_Transaction_Cmd_Print (Transaction_Cmd_t * cmd); + +void DT_Transaction_Cmd_Endian (Transaction_Cmd_t * cmd, + bool to_wire); +/* dapl_transaction_test.c */ +void DT_Transaction_Test_Client (Per_Test_Data_t * pt_ptr, + DAT_IA_HANDLE ia_handle, + DAT_IA_ADDRESS_PTR remote); + +void DT_Transaction_Test_Server (void *params); + +bool DT_Transaction_Create_Test (Per_Test_Data_t * pt_ptr, + DAT_IA_HANDLE * ia_handle, + DAT_BOOLEAN is_server, + unsigned int port_num, + DAT_BOOLEAN remote_is_little_endian, + DAT_IA_ADDRESS_PTR remote_ia_addr); + +void DT_Transaction_Main (void *param); +bool DT_Transaction_Run (Transaction_Test_t * test_ptr); +void DT_Transaction_Validation_Fill (Transaction_Test_t * test_ptr, + unsigned int iteration); +bool DT_Transaction_Validation_Check (Transaction_Test_t * test_ptr, + int iteration); +void DT_Print_Transaction_Test (Transaction_Test_t * test_ptr); +void DT_Print_Transaction_Stats (Transaction_Test_t * test_ptr); + +/* dapl_transaction_util.c */ +bool DT_handle_post_recv_buf (Ep_Context_t * ep_context, + unsigned int num_eps, + int op_indx); + +bool DT_handle_send_op (Ep_Context_t * ep_context, + DAT_EVD_HANDLE reqt_evd_hdl, + unsigned int num_eps, + int op_indx, + bool poll); + +bool DT_handle_recv_op (Ep_Context_t * ep_context, + DAT_EVD_HANDLE recv_evd_hdl, + DAT_EVD_HANDLE reqt_evd_hdl, + unsigned int num_eps, + int op_indx, + bool poll, + bool repost_recv); + +bool DT_handle_rdma_op (Ep_Context_t * ep_context, + DAT_EVD_HANDLE reqt_evd_hdl, + unsigned int num_eps, + DT_Transfer_Type opcode, + int op_indx, + bool poll); + +bool DT_check_params (Per_Test_Data_t *pt_ptr, + unsigned char *module); + +void DT_Test_Error (void); + +/* dapl_util.c */ +const char *DT_RetToString (DAT_RETURN ret_value); + +const char *DT_TransferTypeToString (DT_Transfer_Type type); + +const char *DT_AsyncErr2Str (DAT_EVENT_NUMBER error_code); + +const char *DT_EventToSTr (DAT_EVENT_NUMBER event_code); + +const char *DT_State2Str (DAT_EP_STATE state_code); + +DAT_QOS DT_ParseQoS (char *arg); + +unsigned char *DT_AlignPtr (void * val, unsigned int align); + +DAT_COUNT DT_RoundSize (DAT_COUNT val, DAT_COUNT align); + +/* dapl_limit_cmd.c */ +void DT_Limit_Cmd_Init ( Limit_Cmd_t * cmd); + +bool DT_Limit_Cmd_Parse ( Limit_Cmd_t * cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts); + +void DT_Limit_Cmd_Usage (void); + +/* dapl_limit.c */ +void DT_cs_Limit (Limit_Cmd_t * cmd); + +/* dapl_fft_cmd.c */ +void DT_FFT_Cmd_Init ( FFT_Cmd_t * cmd); + +bool DT_FFT_Cmd_Parse ( FFT_Cmd_t * cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts); + +void DT_FFT_Cmd_Usage (void); + +/* dapl_fft_test.c */ +void DT_cs_FFT ( FFT_Cmd_t * cmd); + +/* dapl_fft_hwconn.c */ +void DT_hwconn_test (FFT_Cmd_t *cmd); +int DT_hwconn_case0 (FFT_Cmd_t *cmd); +int DT_hwconn_case1 (FFT_Cmd_t *cmd); +int DT_hwconn_case2 (FFT_Cmd_t *cmd); +int DT_hwconn_case3 (FFT_Cmd_t *cmd); +int DT_hwconn_case4 (FFT_Cmd_t *cmd); +int DT_hwconn_case5 (FFT_Cmd_t *cmd); +int DT_hwconn_case6 (FFT_Cmd_t *cmd); +int DT_hwconn_case7 (FFT_Cmd_t *cmd); + +/* dapl_fft_endpoint.c */ +void DT_endpoint_test (FFT_Cmd_t *cmd); +int DT_endpoint_generic (FFT_Cmd_t *cmd, + bool destroy_pz_early); +int DT_endpoint_case0 (FFT_Cmd_t *cmd); +int DT_endpoint_case1 (FFT_Cmd_t *cmd); +int DT_endpoint_case2 (FFT_Cmd_t *cmd); +int DT_endpoint_case3 (FFT_Cmd_t *cmd); +int DT_endpoint_case4 (FFT_Cmd_t *cmd); + +/* dapl_fft_pz.c */ +void DT_pz_test (FFT_Cmd_t *cmd); +int DT_pz_case0 (FFT_Cmd_t *cmd); +int DT_pz_case1 (FFT_Cmd_t *cmd); +int DT_pz_case2 (FFT_Cmd_t *cmd); +int DT_pz_case3 (FFT_Cmd_t *cmd); +int DT_pz_case4 (FFT_Cmd_t *cmd); +int DT_pz_case5 (FFT_Cmd_t *cmd); +int DT_pz_case6 (FFT_Cmd_t *cmd); + +/* dapl_fft_util.c */ +void DT_assert_fail (char *exp, char *file, char *baseFile, int line); +int DT_ia_open (DAT_NAME_PTR dev_name, DAT_IA_HANDLE *ia_handle); +DAT_RETURN DT_ia_close (DAT_IA_HANDLE, DAT_CLOSE_FLAGS); +int DT_ep_create (DAT_IA_HANDLE ia_handle, DAT_PZ_HANDLE pz_handle, + DAT_EVD_HANDLE *cr_evd, + DAT_EVD_HANDLE *conn_evd, DAT_EVD_HANDLE *send_evd, + DAT_EVD_HANDLE *recv_evd, DAT_EP_HANDLE *ep_handle); +void DT_fft_init_conn_struct (FFT_Connection_t *conn); +void DT_fft_init_client (FFT_Cmd_t *cmd, FFT_Connection_t *conn); +int DT_fft_destroy_conn_struct (FFT_Connection_t *conn); +void DT_fft_init_server (FFT_Cmd_t *cmd, FFT_Connection_t *conn); +void DT_fft_listen (FFT_Connection_t *conn); +int DT_fft_connect (FFT_Connection_t *conn); + +/* dapl_fft_dataxfer.c */ +int DT_dataxfer_case0 (FFT_Cmd_t *cmd); +int DT_dataxfer_case1 (FFT_Cmd_t *cmd); +int DT_dataxfer_case2 (FFT_Cmd_t *cmd); +int DT_dataxfer_case3 (FFT_Cmd_t *cmd); +int DT_dataxfer_generic (FFT_Cmd_t *cmd, int test_case); +void DT_dataxfer_test (FFT_Cmd_t *cmd); + +/* dapl_fft_dataxfer_client.c */ +int DT_dataxfer_client_generic (FFT_Cmd_t *cmd, int flag); +int DT_dataxfer_client_case0 (FFT_Cmd_t *cmd); +void DT_dataxfer_client_test (FFT_Cmd_t *cmd); + +/* dapl_fft_connmgt.c */ +int DT_connmgt_case0 (FFT_Cmd_t *cmd); +int DT_connmgt_case1 (FFT_Cmd_t *cmd); +void DT_connmgt_test (FFT_Cmd_t *cmd); + +/* dapl_fft_mem.c */ +int DT_mem_generic (FFT_Cmd_t *cmd, int flag); +int DT_mem_case0 (FFT_Cmd_t *cmd); +int DT_mem_case1 (FFT_Cmd_t *cmd); +int DT_mem_case2 (FFT_Cmd_t *cmd); +int DT_mem_case3 (FFT_Cmd_t *cmd); +int DT_mem_case4 (FFT_Cmd_t *cmd); +int DT_mem_case5 (FFT_Cmd_t *cmd); +int DT_mem_case6 (FFT_Cmd_t *cmd); +int DT_mem_case7 (FFT_Cmd_t *cmd); +int DT_mem_case8 (FFT_Cmd_t *cmd); +int DT_mem_case9 (FFT_Cmd_t *cmd); +int DT_mem_case10 (FFT_Cmd_t *cmd); +int DT_mem_case11 (FFT_Cmd_t *cmd); +void DT_mem_test (FFT_Cmd_t *cmd); + +/* dapl_fft_queryinfo.c */ +int DT_queryinfo_basic (FFT_Cmd_t *cmd, + FFT_query_enum object_to_query, + DAT_RETURN result_wanted); +int DT_queryinfo_case0 (FFT_Cmd_t *cmd); +int DT_queryinfo_case1 (FFT_Cmd_t *cmd); +int DT_queryinfo_case2 (FFT_Cmd_t *cmd); +int DT_queryinfo_case3 (FFT_Cmd_t *cmd); +int DT_queryinfo_case4 (FFT_Cmd_t *cmd); +int DT_queryinfo_case5 (FFT_Cmd_t *cmd); +int DT_queryinfo_case6 (FFT_Cmd_t *cmd); +int DT_queryinfo_case7 (FFT_Cmd_t *cmd); +int DT_queryinfo_case8 (FFT_Cmd_t *cmd); +int DT_queryinfo_case9 (FFT_Cmd_t *cmd); +int DT_queryinfo_case10 (FFT_Cmd_t *cmd); +int DT_queryinfo_case11 (FFT_Cmd_t *cmd); +int DT_queryinfo_case12 (FFT_Cmd_t *cmd); +int DT_queryinfo_case13 (FFT_Cmd_t *cmd); +int DT_queryinfo_case14 (FFT_Cmd_t *cmd); +int DT_queryinfo_case15 (FFT_Cmd_t *cmd); +int DT_queryinfo_case16 (FFT_Cmd_t *cmd); +int DT_queryinfo_case17 (FFT_Cmd_t *cmd); +void DT_queryinfo_test (FFT_Cmd_t *cmd); + +#ifdef DYNAMIC_DAT_LOADING +#define DAT_DLL_LIB DAT_DLL_NAME +#define DAT_LIB_OPEN_ENTRY "dat_ia_openv" +#define DAT_LIB_CLOSE_ENTRY "dat_ia_close" +DAT_IA_OPENV_FUNC dat_open; +DAT_IA_CLOSE_FUNC dat_close; +#endif //DYNAMIC_DAT_LOADING + +#endif /* __DAPL_PROTO_H__ */ diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_quit_cmd.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_quit_cmd.c new file mode 100644 index 00000000..5580eba2 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_quit_cmd.c @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_getopt.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" +#include "dapl_quit_cmd.h" + +/*--------------------------------------------------------- */ +void +DT_Quit_Cmd_Init (Quit_Cmd_t * cmd) +{ + memset ((void *)cmd, 0, sizeof (Quit_Cmd_t)); + cmd->ReliabilityLevel = DAT_QOS_BEST_EFFORT; +} + +/*--------------------------------------------------------- */ +bool +DT_Quit_Cmd_Parse (Quit_Cmd_t * cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts) +{ + char c; + + for (;;) + { + c = DT_mygetopt_r (my_argc, my_argv, "ds:D:R:", opts); + if (c == EOF) + { + break; + } + switch (c) + { + case 'D': + { + strcpy (cmd->device_name, opts->optarg); + break; + } + case 's': + { + strcpy (cmd->server_name, opts->optarg); + break; + } + case 'd': /* print debug messages */ + { + DT_dapltest_debug++; + cmd->debug = true; + break; + } + case 'R': /* Service Reliability Level */ + { + cmd->ReliabilityLevel= DT_ParseQoS (opts->optarg); + break; + } + case '?': + default: + { + DT_Mdep_printf ("Invalid Quit option: %c\n", opts->optopt); + DT_Quit_Cmd_Usage (); + return (false); + } + } + } + if (cmd->device_name[0] == '\0') + { + if (!DT_Mdep_GetDefaultDeviceName (cmd->device_name)) + { + DT_Mdep_printf ("can't get default device name\n"); + DT_Quit_Cmd_Usage (); + return (false); + } + } + if (!DT_Quit_Cmd_Validate (cmd)) + { + DT_Quit_Cmd_Usage (); + return (false); + } + return (true); +} + +/*------------------------------------------------------------------------------ */ +bool +DT_Quit_Cmd_Validate (Quit_Cmd_t * cmd) +{ + if (cmd->server_name[0] == '\0') + { + DT_Mdep_printf ("Must specify server_name in command line or scriptfile\n"); + return (false); + } + return (true); +} + +/*--------------------------------------------------------- */ +void +DT_Quit_Cmd_Endian (Quit_Cmd_t * cmd, + bool to_wire) +{ + /* do nothing */ +} + +/*--------------------------------------------------------- */ +void +DT_Quit_Cmd_Print (Quit_Cmd_t * cmd) +{ + DT_Mdep_printf ("Quit_Cmd.server_name: %s\n", cmd->server_name); + DT_Mdep_printf ("Quit_Cmd.device_name: %s\n", cmd->device_name); +} + +/*--------------------------------------------------------- */ +void +DT_Quit_Cmd_Usage (void) +{ + DT_Mdep_printf ("USAGE: ---- QUIT TEST ----\n"); + DT_Mdep_printf ("USAGE: dapltest -T Q\n"); + DT_Mdep_printf ("USAGE: -s \n"); + DT_Mdep_printf ("USAGE: [-D ]\n"); + DT_Mdep_printf ("USAGE: [-d] : debug (zero)\n"); + DT_Mdep_printf ("USAGE: [-R ]\n"); + DT_Mdep_printf ("USAGE: (BE == QOS_BEST_EFFORT - Default)\n"); + DT_Mdep_printf ("USAGE: (HT == QOS_HIGH_THROUGHPUT)\n"); + DT_Mdep_printf ("USAGE: (LL == QOS_LOW_LATENCY)\n"); + DT_Mdep_printf ("USAGE: (EC == QOS_ECONOMY)\n"); + DT_Mdep_printf ("USAGE: (PM == QOS_PREMIUM)\n"); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_quit_cmd.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_quit_cmd.h new file mode 100644 index 00000000..c8276a40 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_quit_cmd.h @@ -0,0 +1,41 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_QUIT_CMD_H__ +#define __DAPL_QUIT_CMD_H__ + + +#pragma pack(1) +typedef struct +{ + char server_name[256]; /* -s */ + char device_name[256]; /* -D */ + DAT_UINT32 debug; /* -d */ + DAT_QOS ReliabilityLevel; /* -R */ +} Quit_Cmd_t; +#pragma pack() + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server.c new file mode 100644 index 00000000..13ef24bf --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server.c @@ -0,0 +1,842 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include + +#include "dapl_bpool.h" +#include "dapl_client_info.h" +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_proto.h" +#include "dapl_server_info.h" +#include "dapl_test_data.h" +#include "dapl_transaction_cmd.h" +#include "dapl_transaction_test.h" +#include "dapl_performance_cmd.h" +#include "dapl_performance_test.h" +#include "dapl_version.h" + +#define DFLT_QLEN 40 /* default event queue length */ + +int send_control_data(unsigned char *buffp, + Per_Server_Data_t *ps_ptr, + Per_Test_Data_t *pt_ptr); + +void +DT_cs_Server (Params_t * params_ptr) +{ + Server_Cmd_t *Server_Cmd = ¶ms_ptr->u.Server_Cmd; + Client_Info_t *Client_Info = NULL; + Transaction_Cmd_t *Transaction_Cmd= NULL; + Performance_Cmd_t *Performance_Cmd= NULL; + Quit_Cmd_t *Quit_Cmd = NULL; + Per_Server_Data_t *ps_ptr = NULL; + Per_Test_Data_t *pt_ptr = NULL; + Started_server_t *temp_list = NULL; + Started_server_t *pre_list = NULL; + unsigned char *buffp = NULL; + unsigned char *module = "DT_cs_Server"; + + DAT_DTO_COOKIE dto_cookie; + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + DAT_RETURN ret; + + /* Check if device from command line already in use */ + temp_list = DT_started_server_list; + while (temp_list) + { + if (strcmp (temp_list->devicename, Server_Cmd->dapl_name) == 0) + { + DT_Mdep_printf ("NOTICE: server already started for this NIC: %s\n", + Server_Cmd->dapl_name); + return; + } + temp_list = temp_list->next; + } + + /* Alloc memory for server list */ + temp_list = (Started_server_t *) DT_Mdep_Malloc (sizeof (Started_server_t)); + if (temp_list == 0) + { + DT_Mdep_printf ("no memory for server_list\n"); + return; + } + strcpy (temp_list->devicename, Server_Cmd->dapl_name); + temp_list->next = DT_started_server_list; + DT_started_server_list = temp_list; + + if (Server_Cmd->debug) + { + /* Echo our inputs if debugging */ + DT_Server_Cmd_Print (Server_Cmd); + } + + /* Allocate memory for Per_Server_Data */ + ps_ptr = (Per_Server_Data_t *) DT_Mdep_Malloc (sizeof (Per_Server_Data_t)); + if (ps_ptr == 0) + { + DT_Mdep_printf ("no memory for ps_data\n"); + goto server_exit; + } + DT_Mdep_LockInit (&ps_ptr->num_clients_lock); + ps_ptr->NextPortNumber = SERVER_PORT_NUMBER + 1; + ps_ptr->num_clients = 0; + + /* Open the IA */ +#ifdef DYNAMIC_DAT_LOADING + ret = dat_open (Server_Cmd->dapl_name, + DFLT_QLEN, + &ps_ptr->async_evd_hdl, + &ps_ptr->ia_handle, + DAT_VERSION_MAJOR, + DAT_VERSION_MINOR, + DAT_THREADSAFE); +#else + ret = dat_ia_open (Server_Cmd->dapl_name, + DFLT_QLEN, + &ps_ptr->async_evd_hdl, + &ps_ptr->ia_handle); +#endif //DYNAMIC_DAT_LOADING + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: Could not open %s (%s)\n", + module, + Server_Cmd->dapl_name, + DT_RetToString (ret)); + ps_ptr->ia_handle = DAT_HANDLE_NULL; + goto server_exit; + } + DT_Mdep_debug (("%s: IA %s opened\n", module, Server_Cmd->dapl_name)); + + /* Create a PZ */ + ret = dat_pz_create (ps_ptr->ia_handle, &ps_ptr->pz_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_pz_create error: %s\n", + module, + DT_RetToString (ret)); + ps_ptr->pz_handle = DAT_HANDLE_NULL; + goto server_exit; + } + DT_Mdep_debug (("%s: PZ created\n", module)); + + /* Create 4 events - recv, request, connection-request, connect */ + ret = dat_evd_create (ps_ptr->ia_handle, + DFLT_QLEN, + DAT_HANDLE_NULL, + DAT_EVD_DTO_FLAG, + &ps_ptr->recv_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_create (recv) failed %s\n", + module, DT_RetToString (ret)); + ps_ptr->recv_evd_hdl = DAT_HANDLE_NULL; + goto server_exit; + } + ret = dat_evd_create (ps_ptr->ia_handle, + DFLT_QLEN, + DAT_HANDLE_NULL, + DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG, + &ps_ptr->reqt_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_create (send) failed %s\n", + module, DT_RetToString (ret)); + ps_ptr->reqt_evd_hdl = DAT_HANDLE_NULL; + goto server_exit; + } + ret = dat_evd_create (ps_ptr->ia_handle, + DFLT_QLEN, + DAT_HANDLE_NULL, + DAT_EVD_CR_FLAG, + &ps_ptr->creq_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_create (cr) failed %s\n", + module, DT_RetToString (ret)); + ps_ptr->creq_evd_hdl = DAT_HANDLE_NULL; + goto server_exit; + } + ret = dat_evd_create (ps_ptr->ia_handle, + DFLT_QLEN, + DAT_HANDLE_NULL, + DAT_EVD_CONNECTION_FLAG, + &ps_ptr->conn_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_create (conn) failed %s\n", + module, DT_RetToString (ret)); + ps_ptr->conn_evd_hdl = DAT_HANDLE_NULL; + goto server_exit; + } + + /* Create the EP */ + ret = dat_ep_create (ps_ptr->ia_handle, /* IA */ + ps_ptr->pz_handle, /* PZ */ + ps_ptr->recv_evd_hdl, /* recv */ + ps_ptr->reqt_evd_hdl, /* request */ + ps_ptr->conn_evd_hdl, /* connect */ + (DAT_EP_ATTR *) NULL, + &ps_ptr->ep_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_create error: %s\n", + module, + DT_RetToString (ret)); + ps_ptr->ep_handle = DAT_HANDLE_NULL; + goto server_exit; + } + DT_Mdep_debug (("%s: EP created\n", module)); + + /* Create PSP */ + ret = dat_psp_create (ps_ptr->ia_handle, + SERVER_PORT_NUMBER, + ps_ptr->creq_evd_hdl, + DAT_PSP_CONSUMER_FLAG, + &ps_ptr->psp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_psp_create error: %s\n", + module, + DT_RetToString (ret)); + ps_ptr->psp_handle = DAT_HANDLE_NULL; + goto server_exit; + } + DT_Mdep_debug (("%s: PSP created\n", module)); + + /* + * Create two buffers, large enough to hold ClientInfo and the largest + * command we'll use. + */ + ps_ptr->bpool = DT_BpoolAlloc (NULL, + ps_ptr->ia_handle, + ps_ptr->pz_handle, + ps_ptr->ep_handle, + DAT_HANDLE_NULL, /* no RMR */ + DT_RoundSize (sizeof (Transaction_Cmd_t), 8192), + 3, /* num_buffers */ + DAT_OPTIMAL_ALIGNMENT, + false, + false); + if (ps_ptr->bpool == 0) + { + DT_Mdep_printf ("%s: no memory for command buffer pool.\n", module); + goto server_exit; + } + + DT_Mdep_spew (3, ("Recv 0 %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + ps_ptr->bpool, 0))); + DT_Mdep_spew (3, ("Recv 1 %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + ps_ptr->bpool, 1))); + DT_Mdep_spew (3, ("SrvInfo 2 %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + ps_ptr->bpool, 2))); + + /* Initialize the performance test lock in case an incoming test + * is a performance test, so we can allow only one at a time. + * Otherwise, multiple performance tests cause a race condition + * between the server creating a new thread trying to allocate a + * PSP with the same ID as another thread that is either running + * a test on that same ID or hasn't yet destroyed it. Only one + * PSP with a particular ID can exist at a time. It's a + * de-facto shared resource that must be protected. + */ + /************************************************************************ + * Loop accepting connections and acting on them + */ + for (;/* EVER */;) + { + DAT_CR_HANDLE cr_handle; + DAT_CR_ARRIVAL_EVENT_DATA cr_stat; + DAT_EVENT_NUMBER event_num; + + /* Set up the Per_Test_Data */ + pt_ptr = DT_Alloc_Per_Test_Data (); + if (!pt_ptr) + { + DT_Mdep_printf ("%s: no memory for Per_Test_Data\n", module); + goto server_exit; + } + DT_MemListInit (pt_ptr); + DT_Thread_Init (pt_ptr); + pt_ptr->local_is_server = true; + pt_ptr->ps_ptr = ps_ptr; + /* Server_Info, Client_Info, Params set up below */ + + /* Gather whatever info we want about defaults */ + if (!DT_query (pt_ptr, ps_ptr->ia_handle, ps_ptr->ep_handle)) + { + goto server_exit; + } + + /* Post recv buffers for ClientInfo and Transaction_Cmd_t */ + DT_Mdep_debug (("%s: Posting 2 recvs\n", module)); + if (!DT_post_recv_buffer (ps_ptr->ep_handle, + ps_ptr->bpool, + 0, + DT_Bpool_GetBuffSize (ps_ptr->bpool, 0))) + { + DT_Mdep_printf ("%s: cannot post ClientInfo recv buffer\n", module); + goto server_exit; + } + if (!DT_post_recv_buffer (ps_ptr->ep_handle, + ps_ptr->bpool, + 1, + DT_Bpool_GetBuffSize (ps_ptr->bpool, 1))) + { + DT_Mdep_printf ("%s: cannot post Transaction_Cmd_t recv buffer\n", + module); + goto server_exit; + } + + /* message to help automated test scripts know when to start the client */ + DT_Mdep_printf ("Dapltest: Service Point Ready - %s\n", + Server_Cmd->dapl_name); + + DT_Mdep_flush (); + + DT_Mdep_debug (("%s: Waiting for Connection Request\n", module)); + if (!DT_cr_event_wait (ps_ptr->creq_evd_hdl, &cr_stat) || + !DT_cr_check (&cr_stat, + ps_ptr->psp_handle, + SERVER_PORT_NUMBER, + &cr_handle, + module)) + { + + DT_Mdep_printf ("CR Check failed, file %s line %d\n", __FILE__, + __LINE__); + goto server_exit; + } + + DT_Mdep_debug (("%s: Accepting Connection Request\n", module)); + ret = dat_cr_accept (cr_handle, ps_ptr->ep_handle, 0, (DAT_PVOID)0); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_cr_accept error: %s\n", + module, + DT_RetToString (ret)); + goto server_exit; + } + + DT_Mdep_debug (("%s: Awaiting connection ...\n", module)); + if (!DT_conn_event_wait (ps_ptr->ep_handle, ps_ptr->conn_evd_hdl, + &event_num)) + { + DT_Mdep_printf ("%s: error awaiting conn-established event\n", + module); + goto server_exit; + } + + if (DT_dapltest_debug) + { + DT_Mdep_debug (("%s: Connected!\n", module)); + get_ep_connection_state (ps_ptr->ep_handle); + } + + /* Wait for Client_Info */ + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) (uintptr_t) DT_Bpool_GetBuffer ( ps_ptr->bpool, 0); + DT_Mdep_debug (("%s: Waiting for Client_Info\n", module)); + if (!DT_dto_event_wait (ps_ptr->recv_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + ps_ptr->ep_handle, + DT_Bpool_GetBuffSize (ps_ptr->bpool, 0), + dto_cookie, + "Client_Info_Recv")) + { + goto server_exit; + } + DT_Mdep_debug (("%s: Got Client_Info\n", module)); + + /* Extract the Client_Info */ + Client_Info = (Client_Info_t*) DT_Bpool_GetBuffer (ps_ptr->bpool, 0); + DT_Client_Info_Endian (Client_Info); + memcpy ( (void *) (uintptr_t)&pt_ptr->Client_Info, + (const void *)Client_Info, + sizeof (Client_Info_t)); + + /* Wait for client's command info */ + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) (uintptr_t) DT_Bpool_GetBuffer ( ps_ptr->bpool, 1); + DT_Mdep_debug (("%s: Waiting for Client_Cmd_Info\n", module)); + if (!DT_dto_event_wait (ps_ptr->recv_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + ps_ptr->ep_handle, + DT_Bpool_GetBuffSize (ps_ptr->bpool, 1), + dto_cookie, + "Client_Cmd_Recv")) + { + goto server_exit; + } + + /* Extract the client's command info */ + switch (Client_Info->test_type) + { + case TRANSACTION_TEST: + { + Transaction_Cmd = (Transaction_Cmd_t *) + DT_Bpool_GetBuffer (ps_ptr->bpool, 1); + DT_Transaction_Cmd_Endian (Transaction_Cmd, false); + memcpy ( (void *) (uintptr_t)&pt_ptr->Params.u.Transaction_Cmd, + (const void *)Transaction_Cmd, + sizeof (Transaction_Cmd_t)); + break; + } + + case PERFORMANCE_TEST: + { + Performance_Cmd = (Performance_Cmd_t *) + DT_Bpool_GetBuffer (ps_ptr->bpool, 1); + DT_Performance_Cmd_Endian (Performance_Cmd); + memcpy ( (void *) (uintptr_t)&pt_ptr->Params.u.Performance_Cmd, + (const void *)Performance_Cmd, + sizeof (Performance_Cmd_t)); + break; + } + + case QUIT_TEST: + { + Quit_Cmd = (Quit_Cmd_t*)DT_Bpool_GetBuffer (ps_ptr->bpool, 1); + DT_Quit_Cmd_Endian (Quit_Cmd, false); + memcpy ( (void *) (uintptr_t)&pt_ptr->Params.u.Quit_Cmd, + (const void *)Quit_Cmd, + sizeof (Quit_Cmd_t)); + break; + } + + default: + { + DT_Mdep_printf ("Unknown TestType received\n"); + goto server_exit; + break; + } + } + + /* Setup Server Info */ + DT_Mdep_debug (("%s: Send Server_Info\n", module)); + pt_ptr->Server_Info.dapltest_version = DAPLTEST_VERSION; + pt_ptr->Server_Info.is_little_endian = DT_local_is_little_endian; + pt_ptr->Server_Info.first_port_number = ps_ptr->NextPortNumber; + ps_ptr->NextPortNumber += pt_ptr->Client_Info.total_threads; + + /* This had to be done here because the pt_ptr is being fed to + * the thread as its context, and if it isn't properly + * initialized before the thread spawns then the thread may + * incorrectly set up its PSP and the server will be listening + * on the WRONG PORT! + */ + + switch (Client_Info->test_type) + { + case TRANSACTION_TEST: + { + /* create a thread to handle this pt_ptr; */ + DT_Mdep_debug (("%s: Creating Transaction Test Thread\n", module)); + pt_ptr->thread = DT_Thread_Create (pt_ptr, + DT_Transaction_Test_Server, + pt_ptr, + DT_MDEP_DEFAULT_STACK_SIZE); + if (pt_ptr->thread == 0) + { + DT_Mdep_printf ("no memory to create thread\n"); + goto server_exit; + } + break; + } + + case PERFORMANCE_TEST: + { + /* create a thread to handle this pt_ptr; */ + DT_Mdep_debug (("%s: Creating Performance Test Thread\n", module)); + pt_ptr->thread = DT_Thread_Create (pt_ptr, + DT_Performance_Test_Server, + pt_ptr, + DT_MDEP_DEFAULT_STACK_SIZE); + if (pt_ptr->thread == 0) + { + DT_Mdep_printf ("no memory to create thread\n"); + goto server_exit; + } + /* take the performance test lock to serialize */ + DT_Mdep_Lock ( &g_PerfTestLock ); + + break; + } + + case QUIT_TEST: + { + DT_Mdep_debug (("Client Requests Server to Quit\n")); + (void) send_control_data(buffp, ps_ptr, pt_ptr); + goto server_exit; + break; + } + + case LIMIT_TEST: + { + DT_Mdep_debug (("Limit Test is Client-side Only!\n")); + (void) send_control_data(buffp, ps_ptr, pt_ptr); + goto server_exit; + break; + } + + default: + { + DT_Mdep_printf ("Unknown TestType received\n"); + (void) send_control_data(buffp, ps_ptr, pt_ptr); + goto server_exit; + break; + } + } + + /* Start the new test thread */ + DT_Mdep_debug (("%s: Starting Test Thread\n", module)); + if (DT_Thread_Start (pt_ptr->thread) == false) + { + DT_Mdep_debug (("failed to start test thread\n")); + goto server_exit; + } + + buffp = DT_Bpool_GetBuffer (ps_ptr->bpool, 2); /* 3rd buffer */ + memcpy ( (void *)buffp, + (const void *)&pt_ptr->Server_Info, + sizeof (Server_Info_t)); + DT_Server_Info_Endian ((Server_Info_t *) buffp); + + /* Perform obligatory version check */ + if (pt_ptr->Client_Info.dapltest_version != DAPLTEST_VERSION) + { + DT_Mdep_printf ("%s: %s: Server %d, Client %d\n", + module, + "DAPLTEST VERSION MISMATCH", + DAPLTEST_VERSION, + pt_ptr->Client_Info.dapltest_version); + goto server_exit; + } + DT_Mdep_debug (("%s: Version OK!\n", module)); + + DT_Mdep_wait_object_wait ( + &pt_ptr->synch_wait_object, + DAT_TIMEOUT_INFINITE); + + /* Send the Server_Info */ + DT_Mdep_debug (("%s: Send Server_Info\n", module)); + + if (!DT_post_send_buffer ( ps_ptr->ep_handle, + ps_ptr->bpool, + 2, + DT_Bpool_GetBuffSize (ps_ptr->bpool, 2))) + { + DT_Mdep_printf ("%s: cannot send Server_Info\n", module); + goto server_exit; + } + /* reap the send and verify it */ + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) (uintptr_t) DT_Bpool_GetBuffer ( ps_ptr->bpool, 2); + if (!DT_dto_event_wait (ps_ptr->reqt_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + ps_ptr->ep_handle, + DT_Bpool_GetBuffSize (ps_ptr->bpool, 2), + dto_cookie, + "Server_Info_Send")) + { + goto server_exit; + } + + + /* Count this new client and get ready for the next */ + DT_Mdep_Lock (&ps_ptr->num_clients_lock); + ps_ptr->num_clients++; + DT_Mdep_Unlock (&ps_ptr->num_clients_lock); + + /* we passed the pt_ptr to the thread and must now 'forget' it */ + pt_ptr = 0; + +#ifdef CM_BUSTED + DT_Mdep_debug (("%s: Server exiting because provider does not support\n" + " multiple connections to the same service point\n", + module)); + /* Until connections are healthier we run just one test */ + break; +#else + ret = dat_ep_disconnect (ps_ptr->ep_handle, DAT_CLOSE_GRACEFUL_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_disconnect fails: %s\n", + module, DT_RetToString (ret)); + goto server_exit; + } + if (!DT_disco_event_wait ( ps_ptr->conn_evd_hdl, NULL)) + { + DT_Mdep_printf ("%s: bad disconnect event\n", module); + goto server_exit; + } + + /* reset the EP to get back into the game */ + dat_ep_reset (ps_ptr->ep_handle); + DT_Mdep_debug (("%s: Waiting for another client...\n", module)); +#endif + + + + + } /* end loop accepting connections */ + + /************************************************************************ + * Finished (or had an error) so clean up and go home + */ +server_exit: + + /* Wait until all of our clients are gone */ + DT_Mdep_debug (("%s: Waiting for clients to all go away...\n", module)); + while (ps_ptr && ps_ptr->num_clients > 0) + { + DT_Mdep_Sleep (100); + } + + /* Clean up the Per_Test_Data (if any) */ + DT_Mdep_debug (("%s: Cleaning up ...\n", module)); + if (pt_ptr) + { + DT_Mdep_LockDestroy (&pt_ptr->Thread_counter_lock); + DT_Mdep_LockDestroy (&pt_ptr->MemListLock); + DT_Free_Per_Test_Data (pt_ptr); + } + + /* Clean up the Per_Server_Data */ + if (ps_ptr) + { + + /* + * disconnect the most recent EP + * + * we also get here on error, hence abrupt closure to + * flush any lingering buffers posted. + */ + if (ps_ptr->ep_handle) + { + ret = dat_ep_disconnect (ps_ptr->ep_handle, + DAT_CLOSE_ABRUPT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_disconnect fails: %s\n", + module, DT_RetToString (ret)); + /* keep trying */ + } + else if (!DT_disco_event_wait ( ps_ptr->conn_evd_hdl, + NULL)) + { + DT_Mdep_printf ("%s: bad disconnect event\n", module); + } + } + + /* Destroy the Bpool */ + if (ps_ptr->bpool) + { + if (!DT_Bpool_Destroy (NULL, ps_ptr->bpool)) + { + DT_Mdep_printf ("%s: error destroying buffer pool\n", module); + /* keep trying */ + } + } + + /* Free the PSP */ + if (ps_ptr->psp_handle) + { + ret = dat_psp_free (ps_ptr->psp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_psp_free error: %s\n", + module, + DT_RetToString (ret)); + /* keep trying */ + } + } + + /* Free the EP */ + if (ps_ptr->ep_handle) + { + ret = dat_ep_free (ps_ptr->ep_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_free error: %s\n", + module, DT_RetToString (ret)); + /* keep trying */ + } + } + + /* Free the 4 EVDs */ + if (ps_ptr->conn_evd_hdl) + { + ret = dat_evd_free (ps_ptr->conn_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_free (conn) error: %s\n", + module, DT_RetToString (ret)); + /* keep trying */ + } + } + if (ps_ptr->creq_evd_hdl) + { + ret = dat_evd_free (ps_ptr->creq_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_free (creq) error: %s\n", + module, DT_RetToString (ret)); + /* keep trying */ + } + } + if (ps_ptr->reqt_evd_hdl) + { + ret = dat_evd_free (ps_ptr->reqt_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_free (reqt) error: %s\n", + module, DT_RetToString (ret)); + /* keep trying */ + } + } + if (ps_ptr->recv_evd_hdl) + { + ret = dat_evd_free (ps_ptr->recv_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_evd_free (recv) error: %s\n", + module, DT_RetToString (ret)); + /* keep trying */ + } + } + + /* Free the PZ */ + if (ps_ptr->pz_handle) + { + ret = dat_pz_free (ps_ptr->pz_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_pz_free error: %s\n", + module, DT_RetToString (ret)); + /* keep trying */ + } + } + + /* Close the IA */ + if (ps_ptr->ia_handle) + { + /* DT_ia_close cleans up async evd handle, too */ + ret = DT_ia_close (ps_ptr->ia_handle, DAT_CLOSE_GRACEFUL_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: DT_ia_close (graceful) error: %s\n", + module, DT_RetToString (ret)); + ret = DT_ia_close (ps_ptr->ia_handle, DAT_CLOSE_ABRUPT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: DT_ia_close (abrupt) error: %s\n", + module, DT_RetToString (ret)); + } + /* keep trying */ + } + else + { + DT_Mdep_debug (("%s: IA %s closed\n", + module, + Server_Cmd->dapl_name)); + } + } + + /* Destroy the ps_ptr */ + DT_Mdep_LockDestroy (&ps_ptr->num_clients_lock); + DT_Mdep_Free (ps_ptr); + } /* end if ps_ptr */ + + /* Clean up the server list */ + pre_list = 0; + temp_list = DT_started_server_list; + while (temp_list) + { + if (strcmp (temp_list->devicename, Server_Cmd->dapl_name) == 0) + { + if (pre_list == 0) /* first one */ + { + DT_started_server_list = temp_list->next; + } + else + { + pre_list->next = temp_list->next; + } + DT_Mdep_Free (temp_list); + break; + } + pre_list = temp_list; + temp_list = temp_list->next; + } + + DT_Mdep_printf ("%s (%s): Exiting.\n", module, Server_Cmd->dapl_name); +} + + +int +send_control_data( + unsigned char *buffp, + Per_Server_Data_t *ps_ptr, + Per_Test_Data_t *pt_ptr) +{ + unsigned char *module = "send_control_data"; + DAT_DTO_COOKIE dto_cookie; + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + + buffp = DT_Bpool_GetBuffer (ps_ptr->bpool, 2); /* 3rd buffer */ + memcpy ( (void *)buffp, + (const void *)&pt_ptr->Server_Info, + sizeof (Server_Info_t)); + DT_Server_Info_Endian ((Server_Info_t *) buffp); + + if (!DT_post_send_buffer ( ps_ptr->ep_handle, + ps_ptr->bpool, + 2, + DT_Bpool_GetBuffSize (ps_ptr->bpool, 2))) + { + DT_Mdep_printf ("%s: cannot send Server_Info\n", module); + return 1; + } + /* reap the send and verify it */ + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) (uintptr_t) DT_Bpool_GetBuffer ( ps_ptr->bpool, 2); + if (!DT_dto_event_wait (ps_ptr->reqt_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + ps_ptr->ep_handle, + DT_Bpool_GetBuffSize (ps_ptr->bpool, 2), + dto_cookie, + "Server_Info_Send")) + { + return 1; + } + + return 0; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_cmd.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_cmd.c new file mode 100644 index 00000000..059cbe6e --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_cmd.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_getopt.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" +#include "dapl_server_cmd.h" + + +void +DT_Server_Cmd_Init (Server_Cmd_t * Server_Cmd) +{ + DT_dapltest_debug = 0; + Server_Cmd->debug = false; + Server_Cmd->dapl_name[0] = '\0'; + Server_Cmd->ReliabilityLevel = DAT_QOS_BEST_EFFORT; +} + + +bool +DT_Server_Cmd_Parse (Server_Cmd_t * Server_Cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts) +{ + char c; + for (;;) + { + c = DT_mygetopt_r (my_argc, my_argv, "dD:R:", opts); + if (c == EOF) + { + break; + } + switch (c) + { + case 'D': + { + strcpy (Server_Cmd->dapl_name, opts->optarg); + break; + } + case 'd': + { + DT_dapltest_debug++; + Server_Cmd->debug = true; + break; + } + case 'R': /* Service Reliability Level */ + { + Server_Cmd->ReliabilityLevel = DT_ParseQoS (opts->optarg); + if (0 == Server_Cmd->ReliabilityLevel) + { + return (false); + } + break; + } + case '?': + default: + { + DT_Mdep_printf ("Invalid Server option: %c\n", opts->optopt); + DT_Server_Cmd_Usage (); + return (false); + } + } + } + if (Server_Cmd->dapl_name == '\0') + { + if (!DT_Mdep_GetDefaultDeviceName (Server_Cmd->dapl_name)) + { + DT_Mdep_printf ("can't get default device name\n"); + DT_Server_Cmd_Usage (); + return (false); + } + } + return (true); +} + + +void +DT_Server_Cmd_Print (Server_Cmd_t * Server_Cmd) +{ + DT_Mdep_printf ("Server_Cmd.debug: %d\n", Server_Cmd->debug); + DT_Mdep_printf ("Server_Cmd.dapl_name: %s\n", Server_Cmd->dapl_name); +} + +void +DT_Server_Cmd_Usage (void) +{ + DT_Mdep_printf ("USAGE: ---- SERVER MODE ----\n"); + DT_Mdep_printf ("USAGE: dapltest -T S\n"); + DT_Mdep_printf ("USAGE: [-D ]\n"); + DT_Mdep_printf ("USAGE: [-d] : debug (zero)\n"); + DT_Mdep_printf ("USAGE: [-R ]\n"); + DT_Mdep_printf ("USAGE: (BE == QOS_BEST_EFFORT - Default)\n"); + DT_Mdep_printf ("USAGE: (HT == QOS_HIGH_THROUGHPUT)\n"); + DT_Mdep_printf ("USAGE: (LL == QOS_LOW_LATENCY)\n"); + DT_Mdep_printf ("USAGE: (EC == QOS_ECONOMY)\n"); + DT_Mdep_printf ("USAGE: (PM == QOS_PREMIUM)\n"); + DT_Mdep_printf ("USAGE: Run as server using default parameters\n"); + DT_Mdep_printf ("USAGE: dapltest\n"); + DT_Mdep_printf ("USAGE:\n"); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_cmd.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_cmd.h new file mode 100644 index 00000000..0bee79f6 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_cmd.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_SERVER_CMD_H__ +#define __DAPL_SERVER_CMD_H__ + +#include "dapl_mdep.h" + +#pragma pack(1) + +typedef struct +{ + bool debug; /* -d */ + char dapl_name[256]; /* -D device name */ + DAT_QOS ReliabilityLevel; /* -R */ +} Server_Cmd_t; +#pragma pack() + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_info.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_info.c new file mode 100644 index 00000000..8ce3413b --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_info.c @@ -0,0 +1,50 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_server_info.h" +#include "dapl_proto.h" +#include "dapl_test_data.h" + + +Started_server_t *DT_started_server_list = 0; + +void +DT_Server_Info_Endian (Server_Info_t * server_info) +{ + server_info->dapltest_version = DT_Endian32 (server_info->dapltest_version); + server_info->is_little_endian = DT_Endian32 (server_info->is_little_endian); + server_info->first_port_number= DT_Endian32 (server_info->first_port_number); +} + +void +DT_Server_Info_Print (Server_Info_t * server_info) +{ + DT_Mdep_printf ("-------------------------------------\n"); + DT_Mdep_printf ("Server_Info.dapltest_version : %d\n", + server_info->dapltest_version); + DT_Mdep_printf ("Server_Info.is_little_endian : %d\n", + server_info->is_little_endian); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_info.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_info.h new file mode 100644 index 00000000..aadf2044 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_server_info.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_SERVER_INFO_H__ +#define __DAPL_SERVER_INFO_H__ + +#include "dapl_mdep.h" +#include + +#pragma pack(1) + +struct started_server +{ + char devicename[80]; + struct started_server *next; +}; + +typedef struct started_server Started_server_t; + +extern Started_server_t *DT_started_server_list; + +#define SERVER_PORT_NUMBER (0xBabb1e) + +typedef struct +{ + DAT_UINT32 dapltest_version; + DAT_UINT32 is_little_endian; + DAT_UINT32 first_port_number; +} Server_Info_t; +#pragma pack() + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_data.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_data.c new file mode 100644 index 00000000..20d826d5 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_data.c @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_mdep.h" +#include "dapl_proto.h" +#include "dapl_server_info.h" +#include "dapl_client_info.h" +#include "dapl_transaction_test.h" + +DAT_COUNT DT_dapltest_debug = 0; +bool DT_local_is_little_endian; +/* + * check memory leaking int alloc_count; DT_Mdep_LockType + * Alloc_Count_Lock; + */ + +Per_Test_Data_t * +DT_Alloc_Per_Test_Data (void) +{ + Per_Test_Data_t *pt_ptr; + pt_ptr = 0; + + pt_ptr = DT_Mdep_Malloc (sizeof (Per_Test_Data_t)); + if (!pt_ptr) + { + DT_Mdep_printf ("No Memory to create per_test_data!\n"); + } + + return (pt_ptr); +} + +void +DT_Free_Per_Test_Data (Per_Test_Data_t * pt_ptr) +{ + DT_Mdep_Free (pt_ptr); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_data.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_data.h new file mode 100644 index 00000000..6b682137 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_data.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_TEST_DATA_H__ +#define __DAPL_TEST_DATA_H__ + +#include "dapl_bpool.h" +#include "dapl_mdep.h" +#include "dapl_client_info.h" +#include "dapl_transaction_stats.h" +#include "dapl_memlist.h" +#include "dapl_params.h" +#include "dapl_server_info.h" +#include + +extern DAT_COUNT DT_dapltest_debug; +extern bool DT_local_is_little_endian; + +/* This lock allows the client side to run + * in a shell script loop without breaking + * connections. Remove it and due to timing + * problems on the server side occasionally + * the server will reject connections. + */ +extern DT_Mdep_LockType g_PerfTestLock; + +/* + * check memory leaking extern int alloc_count ; extern + * DT_Mdep_LockType Alloc_Count_Lock; + */ +#pragma pack(1) +typedef struct +{ + int NextPortNumber; + int num_clients; + DT_Mdep_LockType num_clients_lock; + DAT_IA_HANDLE ia_handle; + DAT_PZ_HANDLE pz_handle; + DAT_EVD_HANDLE recv_evd_hdl; + DAT_EVD_HANDLE reqt_evd_hdl; + DAT_EVD_HANDLE conn_evd_hdl; + DAT_EVD_HANDLE creq_evd_hdl; + DAT_EVD_HANDLE async_evd_hdl; + DAT_EVD_HANDLE rmr_evd_hdl; + DAT_EP_HANDLE ep_handle; + DAT_PSP_HANDLE psp_handle; + Bpool *bpool; +} Per_Server_Data_t; + +typedef struct +{ + DT_Mdep_LockType MemListLock; + MemListEntry_t *MemListHead; + + DT_Mdep_LockType Thread_counter_lock; + int Thread_counter; + Thread *thread; + + bool local_is_server; + Server_Info_t Server_Info; + Client_Info_t Client_Info; + Params_t Params; + DAT_IA_ATTR ia_attr; + DAT_PROVIDER_ATTR provider_attr; + DAT_EP_ATTR ep_attr; + Per_Server_Data_t *ps_ptr; + Transaction_Stats_t Client_Stats; + + /* synchronize the server with the server's spawned test thread. + * That test thread uses a PSP that only one test at a time can + * use. If we don't synchronize access between the teardown and + * creation of that PSP then the client will fail to connect + * randomly, a symptom that the server is not coordinated with + * its test threads. Remove this at your own peril, or if you + * really want your test client to experience rejection on a + * random but regular basis. + */ + DT_WAIT_OBJECT synch_wait_object; + int Countdown_Counter; + +} Per_Test_Data_t; +#pragma pack() +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_util.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_util.c new file mode 100644 index 00000000..60ba5873 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_test_util.c @@ -0,0 +1,743 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_bpool.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" +#include +#include "dapl_test_data.h" + + +/* ----------------------------------------------------------- + * Gather info about default attributes + */ +DAT_BOOLEAN +DT_query ( Per_Test_Data_t *pt_ptr, + DAT_IA_HANDLE ia_handle, + DAT_EP_HANDLE ep_handle) +{ + unsigned char *module = "DT_query"; + DAT_EVD_HANDLE async_evd_hdl; /* not used */ + DAT_EP_PARAM ep_params; + DAT_RETURN ret; + + /* Query the IA */ + ret = dat_ia_query (ia_handle, + &async_evd_hdl, + DAT_IA_ALL, + &pt_ptr->ia_attr, + DAT_PROVIDER_FIELD_ALL, + &pt_ptr->provider_attr); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ia_query error: %s\n", + module, + DT_RetToString (ret)); + return ( false ); + } + + /* Query the EP */ + ret = dat_ep_query ( ep_handle, + DAT_EP_FIELD_ALL, + &ep_params); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("%s: dat_ep_query error: %s\n", + module, + DT_RetToString (ret)); + return ( false ); + } + pt_ptr->ep_attr = ep_params.ep_attr; + + /* + * If debugging, print out some interesting attributes + */ + if (DT_dapltest_debug) + { + DAT_SOCK_ADDR6 *ip6_addr; + struct sockaddr_in *ip_addr; + + DT_Mdep_printf ("***** DAPL Characteristics *****\n"); + DT_Mdep_printf ("Provider: %s Version %d.%d DAPL %d.%d\n", + pt_ptr->provider_attr.provider_name, + pt_ptr->provider_attr.provider_version_major, + pt_ptr->provider_attr.provider_version_minor, + pt_ptr->provider_attr.dapl_version_major, + pt_ptr->provider_attr.dapl_version_minor ); + DT_Mdep_printf ("Adapter: %s by %s Version %d.%d\n", + pt_ptr->ia_attr.adapter_name, + pt_ptr->ia_attr.vendor_name, + pt_ptr->ia_attr.hardware_version_major, + pt_ptr->ia_attr.hardware_version_minor ); + DT_Mdep_printf ("Supporting:\n"); + DT_Mdep_printf ("\t%d EPs with %d DTOs and %d RDMA/RDs each\n", + pt_ptr->ia_attr.max_eps, + pt_ptr->ia_attr.max_dto_per_ep, + pt_ptr->ia_attr.max_rdma_read_per_ep ); + DT_Mdep_printf ("\t%d EVDs of up to %d entries " + " (default S/R size is %d/%d)\n", + pt_ptr->ia_attr.max_evds, + pt_ptr->ia_attr.max_evd_qlen, + pt_ptr->ep_attr.max_request_dtos, + pt_ptr->ep_attr.max_recv_dtos ); + DT_Mdep_printf ("\tIOVs of up to %d elements\n", + pt_ptr->ia_attr.max_iov_segments_per_dto ); + DT_Mdep_printf ("\t%d LMRs (and %d RMRs) of up to 0x" F64x " bytes\n", + pt_ptr->ia_attr.max_lmrs, + pt_ptr->ia_attr.max_rmrs, + (DAT_UVERYLONG)pt_ptr->ia_attr.max_lmr_block_size ); + DT_Mdep_printf ("\tMaximum MTU 0x" F64x " bytes, RDMA 0x" F64x " bytes\n", + (DAT_UVERYLONG)pt_ptr->ia_attr.max_mtu_size, + (DAT_UVERYLONG)pt_ptr->ia_attr.max_rdma_size ); + DT_Mdep_printf ("\tMaximum Private data size %d bytes\n", + pt_ptr->provider_attr.max_private_data_size ); + + ip6_addr = (DAT_SOCK_ADDR6 *)pt_ptr->ia_attr.ia_address_ptr; + if (ip6_addr->sin6_family == AF_INET6 ) + { + DT_Mdep_printf ("\tLocal IP address %x:%x:%x:%x:%x:%x:%x:%x:\n", + ip6_addr->sin6_addr.s6_addr[0], + ip6_addr->sin6_addr.s6_addr[1], + ip6_addr->sin6_addr.s6_addr[2], + ip6_addr->sin6_addr.s6_addr[3], + ip6_addr->sin6_addr.s6_addr[4], + ip6_addr->sin6_addr.s6_addr[5], + ip6_addr->sin6_addr.s6_addr[6], + ip6_addr->sin6_addr.s6_addr[7]); + DT_Mdep_printf ("%x:%x:%x:%x:%x:%x:%x:%x\n", + ip6_addr->sin6_addr.s6_addr[8], + ip6_addr->sin6_addr.s6_addr[9], + ip6_addr->sin6_addr.s6_addr[10], + ip6_addr->sin6_addr.s6_addr[11], + ip6_addr->sin6_addr.s6_addr[12], + ip6_addr->sin6_addr.s6_addr[13], + ip6_addr->sin6_addr.s6_addr[14], + ip6_addr->sin6_addr.s6_addr[15]); + } + else if (ip6_addr->sin6_family == AF_INET ) + + { + int rval; + + ip_addr = (struct sockaddr_in *)pt_ptr->ia_attr.ia_address_ptr; + rval = (int) ip_addr->sin_addr.s_addr; + + DT_Mdep_printf ("\tLocal IP address %d.%d.%d.%d\n", + (rval >> 0) & 0xff, + (rval >> 8) & 0xff, + (rval >> 16) & 0xff, + (rval >> 24) & 0xff); + } + + DT_Mdep_printf ("***** ***** ***** ***** ***** *****\n"); + } + + return ( true ); +} + + +/* ----------------------------------------------------------- + * Post a recv buffer + */ +DAT_BOOLEAN +DT_post_recv_buffer (DAT_EP_HANDLE ep_handle, + Bpool * bp, + int index, + int size) +{ + unsigned char *buff = DT_Bpool_GetBuffer (bp, index); + DAT_LMR_TRIPLET *iov = DT_Bpool_GetIOV (bp, index); + DAT_LMR_CONTEXT lmr_c = DT_Bpool_GetLMR (bp, index); + DAT_DTO_COOKIE cookie; + DAT_RETURN ret; + + /* + * Prep the inputs + */ + iov->virtual_address = (DAT_VADDR) (uintptr_t) buff; + iov->segment_length = size; + iov->lmr_context = lmr_c; + cookie.as_64 = (DAT_UINT64)0UL; + cookie.as_ptr = (DAT_PVOID) buff; + + DT_Mdep_spew (3, ("Post-Recv #%d [%p, %x]\n", index, buff, size)); + + /* Post the recv buffer */ + ret = dat_ep_post_recv (ep_handle, + 1, + iov, + cookie, + DAT_COMPLETION_DEFAULT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dat_ep_post_recv failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + return false; + } + return true; +} + + +/* ----------------------------------------------------------- + * Post a send buffer + */ +DAT_BOOLEAN +DT_post_send_buffer (DAT_EP_HANDLE ep_handle, + Bpool * bp, + int index, + int size) +{ + unsigned char *buff = DT_Bpool_GetBuffer (bp, index); + DAT_LMR_TRIPLET *iov = DT_Bpool_GetIOV (bp, index); + DAT_LMR_CONTEXT lmr_c = DT_Bpool_GetLMR (bp, index); + DAT_DTO_COOKIE cookie; + DAT_RETURN ret; + + /* + * Prep the inputs + */ + iov->virtual_address = (DAT_VADDR) (uintptr_t) buff; + iov->segment_length = size; + iov->lmr_context = lmr_c; + cookie.as_64 = (DAT_UINT64)0UL; + cookie.as_ptr = (DAT_PVOID) buff; + + DT_Mdep_spew (3, ("Post-Send #%d [%p, %x]\n", index, buff, size)); + + /* Post the recv buffer */ + ret = dat_ep_post_send (ep_handle, + 1, + iov, + cookie, + DAT_COMPLETION_DEFAULT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dat_ep_post_send failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + return false; + } + return true; +} + + +/* ----------------------------------------------------------- + * Wait for a CR event, returning false on error. + */ +bool +DT_cr_event_wait ( DAT_EVD_HANDLE evd_handle, + DAT_CR_ARRIVAL_EVENT_DATA *cr_stat_p) +{ + int err_cnt; + + err_cnt = 0; + + for (;;) + { + DAT_RETURN ret; + DAT_EVENT event; + DAT_COUNT count; + + ret = dat_evd_wait ( evd_handle, + DAT_TIMEOUT_INFINITE, + 1, + &event, + &count); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dapl_event_wait (CR) failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + /* + * If we get an error due to the client breaking the + * connection early or some transients, just ignore it + * and keep going. If we get a bunch of errors, bail + * out. + */ +/* if ( err_cnt++ < 10 ) */ +/* { */ +/* continue; */ +/* } */ + + break; + } + + if (event.event_number == DAT_CONNECTION_REQUEST_EVENT) + { + /* + * Pass back what we know, if requested. + */ + if (cr_stat_p) + { + *cr_stat_p = event.event_data.cr_arrival_event_data; + } + return (true); + } + + DT_Mdep_printf ("Warning: cr_event_wait swallowing %s event\n", + DT_EventToSTr (event.event_number)); + } + + return (false); +} + + +/* ----------------------------------------------------------- + * Wait for a connection event, returning false on error. + */ +bool +DT_conn_event_wait (DAT_EP_HANDLE ep_handle, + DAT_EVD_HANDLE evd_handle, + DAT_EVENT_NUMBER *event_number) + +{ + for (;;) + { + DAT_RETURN ret; + DAT_EVENT event; + DAT_COUNT count; + + ret = dat_evd_wait (evd_handle, + DAT_TIMEOUT_INFINITE, + 1, + &event, + &count); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dapl_event_wait (CONN) failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + break; + } + *event_number = event.event_number; + if (event.event_number == DAT_CONNECTION_EVENT_PEER_REJECTED + || event.event_number == DAT_CONNECTION_EVENT_NON_PEER_REJECTED + || event.event_number == DAT_CONNECTION_EVENT_ACCEPT_COMPLETION_ERROR + || event.event_number == DAT_CONNECTION_EVENT_DISCONNECTED + || event.event_number == DAT_CONNECTION_EVENT_BROKEN + || event.event_number == DAT_CONNECTION_EVENT_UNREACHABLE + || event.event_number == DAT_CONNECTION_EVENT_TIMED_OUT) + { + DT_Mdep_printf ("Warning: conn_event_wait %s\n", + DT_EventToSTr (event.event_number)); + break; + } + if (event.event_number == DAT_CONNECTION_EVENT_ESTABLISHED) + { + /* + * Could return DAT_CONNECTION_EVENT_DATA and verify: + * event.event_data.connect_event_data.ep_handle + * event.event_data.connect_event_data.private_data_size + * event.event_data.connect_event_data.private_data + */ + return (true); + } + + DT_Mdep_printf ("Warning: conn_event_wait swallowing %s event\n", + DT_EventToSTr (event.event_number)); + } + + return (false); +} + + +/* ----------------------------------------------------------- + * Wait for a disconnection event, returning false on error. + */ +bool +DT_disco_event_wait ( DAT_EVD_HANDLE evd_handle, + DAT_EP_HANDLE *ep_handle ) +{ + for (;;) + { + DAT_RETURN ret; + DAT_EVENT event; + DAT_COUNT count; + + ret = dat_evd_wait (evd_handle, + DAT_TIMEOUT_INFINITE, + 1, + &event, + &count); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dapl_event_wait (DISCONN) failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + break; + } + if (event.event_number == DAT_CONNECTION_EVENT_PEER_REJECTED + || event.event_number == DAT_CONNECTION_EVENT_NON_PEER_REJECTED + || event.event_number == DAT_CONNECTION_EVENT_ACCEPT_COMPLETION_ERROR + || event.event_number == DAT_CONNECTION_EVENT_BROKEN + || event.event_number == DAT_CONNECTION_EVENT_UNREACHABLE + || event.event_number == DAT_CONNECTION_EVENT_TIMED_OUT) + { + DT_Mdep_printf ("Warning: disconn_event_wait %s\n", + DT_EventToSTr (event.event_number)); + break; + } + + + if (event.event_number == DAT_CONNECTION_EVENT_DISCONNECTED) + { + if ( ep_handle != NULL ) + { + *ep_handle = event.event_data.connect_event_data.ep_handle; + } + return (true); + } + + DT_Mdep_printf ("Warning: conn_event_wait swallowing %s event\n", + DT_EventToSTr (event.event_number)); + } + + return (false); +} + + +/* ----------------------------------------------------------- + * Reap a DTO event using a wait or polling, returning false on error. + */ +bool +DT_dto_event_reap (DAT_EVD_HANDLE evd_handle, + bool poll, + DAT_DTO_COMPLETION_EVENT_DATA *dto_statusp) +{ + if (poll) + { + return DT_dto_event_poll (evd_handle, dto_statusp); + } + else + { + return DT_dto_event_wait (evd_handle, dto_statusp); + } +} + + +/* ----------------------------------------------------------- + * Poll for a DTO event, returning false on error. + */ +bool +DT_dto_event_poll (DAT_EVD_HANDLE evd_handle, + DAT_DTO_COMPLETION_EVENT_DATA *dto_statusp) +{ + for (;;) + { + DAT_RETURN ret; + DAT_EVENT event; + + ret = dat_evd_dequeue ( evd_handle, + &event); + + if (DAT_GET_TYPE(ret) == DAT_QUEUE_EMPTY) + { + continue; + } + + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dapl_event_wait (DTO) failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + break; + } + + if (event.event_number == DAT_DTO_COMPLETION_EVENT) + { + /* + * Pass back all the useful bits if requested: + * ep_handle, user_cookie.as_ptr + * status, transfered_length + */ + if (dto_statusp) + { + *dto_statusp = event.event_data.dto_completion_event_data; + } + + return (true); + } + + DT_Mdep_printf ("Warning: dto_event_poll swallowing %s event\n", + DT_EventToSTr (event.event_number)); + } + + return (false); +} + + +/* ----------------------------------------------------------- + * Wait for a DTO event, returning false on error. + */ +bool +DT_dto_event_wait (DAT_EVD_HANDLE evd_handle, + DAT_DTO_COMPLETION_EVENT_DATA *dto_statusp) +{ + for (;;) + { + DAT_RETURN ret; + DAT_EVENT event; + DAT_COUNT count; + + ret = dat_evd_wait ( evd_handle, + DAT_TIMEOUT_INFINITE, + 1, + &event, + &count); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dapl_event_wait (DTO) failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + break; + } + + if (event.event_number == DAT_DTO_COMPLETION_EVENT) + { + /* + * Pass back all the useful bits if requested: + * ep_handle, user_cookie.as_ptr + * status, transfered_length + */ + if (dto_statusp) + { + *dto_statusp = event.event_data.dto_completion_event_data; + } + return (true); + } + + DT_Mdep_printf ("Warning: dto_event_wait swallowing %s event\n", + DT_EventToSTr (event.event_number)); + } + + return (false); +} + + +/* ----------------------------------------------------------- + * Wait for a RMR event, returning false on error. + */ +bool +DT_rmr_event_wait (DAT_EVD_HANDLE evd_handle, + DAT_RMR_BIND_COMPLETION_EVENT_DATA *rmr_statusp) +{ + for (;;) + { + DAT_RETURN ret; + DAT_EVENT event; + DAT_COUNT count; + + ret = dat_evd_wait ( evd_handle, + DAT_TIMEOUT_INFINITE, + 1, + &event, + &count); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dapl_event_wait (RMR) failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + break; + } + + if (event.event_number == DAT_RMR_BIND_COMPLETION_EVENT) + { + /* + * Pass back all the useful bits if requested: + * rmr_handle, user_cookie, status + */ + if (rmr_statusp) + { + *rmr_statusp = event.event_data.rmr_completion_event_data; + } + return (true); + } + + DT_Mdep_printf ("Warning: rmr_event_wait swallowing %s event\n", + DT_EventToSTr (event.event_number)); + } + + return (false); +} + + +/* ----------------------------------------------------------- + * Check a DTO and print some debug info if anything is amiss. + */ +bool +DT_dto_check ( DAT_DTO_COMPLETION_EVENT_DATA *dto_p, + DAT_EP_HANDLE ep_expected, + DAT_VLEN len_expected, + DAT_DTO_COOKIE cookie_expected, + char *message ) +{ + static char *completion_status_msg[]={ + "DAT_DTO_SUCCESS", + "DAT_DTO_ERR_FLUSHED", + "DAT_DTO_ERR_LOCAL_LENGTH", + "DAT_DTO_ERR_LOCAL_EP", + "DAT_DTO_ERR_LOCAL_PROTECTION", + "DAT_DTO_ERR_BAD_RESPONSE", + "DAT_DTO_ERR_REMOTE_ACCESS", + "DAT_DTO_ERR_REMOTE_RESPONDER", + "DAT_DTO_ERR_TRANSPORT", + "DAT_DTO_ERR_RECEIVER_NOT_READY", + "DAT_DTO_ERR_PARTIAL_PACKET", + "DAT_RMR_OPERATION_FAILED" + }; + if ( ( (ep_expected != NULL) && (dto_p->ep_handle != ep_expected) ) + || dto_p->transfered_length != len_expected + || dto_p->user_cookie.as_64 != cookie_expected.as_64 + || dto_p->status != DAT_DTO_SUCCESS ) + { + DT_Mdep_printf ("Test Error: %s-reaping DTO problem, status = %s\n", + message, (completion_status_msg[dto_p->status])); + DT_Test_Error (); + if ( (ep_expected != NULL) && (dto_p->ep_handle != ep_expected) ) + { + DT_Mdep_printf ("\tEndPoint mismatch (got %p wanted %p)\n", + dto_p->ep_handle, + ep_expected); + } + if (dto_p->transfered_length != len_expected) + { + DT_Mdep_printf ( + "\tLength mismatch (xfer 0x" F64x " wanted 0x" F64x ")\n", + (DAT_UVERYLONG)dto_p->transfered_length, + (DAT_UVERYLONG)len_expected); + } + if (dto_p->user_cookie.as_64 != cookie_expected.as_64) + { + DT_Mdep_printf ("\tCookie mismatch (got " F64x " wanted " F64x ")\n", + (DAT_UVERYLONG)dto_p->user_cookie.as_64, + (DAT_UVERYLONG)cookie_expected.as_64); + } + return ( false ); + } + + return ( true ); +} + + +/* ----------------------------------------------------------- + * Check an RMR Bind and print some debug info if anything is amiss. + */ +bool +DT_rmr_check ( DAT_RMR_BIND_COMPLETION_EVENT_DATA *rmr_p, + DAT_RMR_HANDLE rmr_expected, + DAT_PVOID cookie_expected, + char *message) +{ + if ( rmr_p->rmr_handle != rmr_expected + || rmr_p->user_cookie.as_ptr != cookie_expected + || rmr_p->status != DAT_RMR_BIND_SUCCESS ) + { + + DT_Mdep_printf ("Test Error: %s RMR bind problem, status = %s\n", + message, + (rmr_p->status == DAT_RMR_BIND_SUCCESS + ? "OK" : "FAILURE")); + DT_Test_Error (); + if (rmr_p->rmr_handle != rmr_expected) + { + DT_Mdep_printf ("\tRMR handle mismatch (got 0x%p wanted 0x%p)\n", + rmr_p->rmr_handle, + rmr_expected); + } + if (rmr_p->user_cookie.as_ptr != cookie_expected) + { + DT_Mdep_printf ("\tCookie mismatch (got %p wanted %p)\n", + rmr_p->user_cookie.as_ptr, + cookie_expected); + } + return ( false ); + } + + return ( true ); +} + + +/* ----------------------------------------------------------- + * Check a CR and print some debug info if anything is amiss. + */ +bool +DT_cr_check ( DAT_CR_ARRIVAL_EVENT_DATA *cr_stat_p, + DAT_PSP_HANDLE psp_handle_expected, + DAT_CONN_QUAL port_expected, + DAT_CR_HANDLE *cr_handlep, + char *message) +{ + DAT_RETURN ret; + + if (cr_handlep) + { + *cr_handlep = (DAT_CR_HANDLE) 0; + } + + if (cr_stat_p->conn_qual != port_expected || + (psp_handle_expected && + cr_stat_p->sp_handle.psp_handle != psp_handle_expected)) + { + + DT_Mdep_printf ("Test Error: %s CR data problem\n", message); + DT_Test_Error (); + if (cr_stat_p->conn_qual != port_expected) + { + DT_Mdep_printf ("\tCR conn_qual mismatch " + " (got 0x" F64x " wanted 0x" F64x ")\n", + (DAT_UVERYLONG)cr_stat_p->conn_qual, + (DAT_UVERYLONG)port_expected); + } + if (psp_handle_expected && + cr_stat_p->sp_handle.psp_handle != psp_handle_expected) + { + DT_Mdep_printf ("\tPSP mismatch (got 0x%p wanted 0x%p)\n", + cr_stat_p->sp_handle.psp_handle, + psp_handle_expected); + } + if (!cr_stat_p->cr_handle) + { + DT_Mdep_printf ("\tGot NULL cr_handle\n"); + } + else + { + ret = dat_cr_reject (cr_stat_p->cr_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("\tdat_cr_reject error: %s\n", + DT_RetToString (ret)); + } + } + return ( false ); + } + + if (cr_handlep) + { + *cr_handlep = cr_stat_p->cr_handle; + } + return ( true ); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_thread.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_thread.c new file mode 100644 index 00000000..3f7a4058 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_thread.c @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_proto.h" +/* + * Class Thread + * + * Threads subsystem initialization + */ +void +DT_Thread_Init (Per_Test_Data_t * pt_ptr) +{ + DT_Mdep_LockInit (&pt_ptr->Thread_counter_lock); + pt_ptr->Thread_counter = 0; + + /* + * Initialize the synchronization event in the pt_ptr so it's ready + * to be signalled when the time comes. The countdown counter + * lets me coordinate with all the test threads so that the server + * thread doesn't get notified that the test endpoints are ready + * until they actually are. Only transaction tests use this * + * functionality; if the performance test gets changed to use + * multiple threads on the server side then that code semantic + * will need to be added for final test endpoint setup + * notification or there will continue to be a race condition + * between the main server thread and the server test threads. + */ + DT_Mdep_wait_object_init(&pt_ptr->synch_wait_object); + pt_ptr->Countdown_Counter = 0; + +} + +/* + * Threads subsystem destroying + */ +void +DT_Thread_End (Per_Test_Data_t * pt_ptr) +{ + DT_Mdep_LockDestroy (&pt_ptr->Thread_counter_lock); + + /* + * destroy the wait object created by init. + */ + DT_Mdep_wait_object_destroy ( + &pt_ptr->synch_wait_object ); + +} + +/* + * Thread constructor + * + * NOTE: This routine does NOT create a thread as the name implies. The thread + * is created in DT_Thread_Start (which is counter intuitive) + */ +Thread * +DT_Thread_Create (Per_Test_Data_t * pt_ptr, + void (*fn) (void *), + void *param, + unsigned int stacksize) +{ + Thread *thread_ptr; + thread_ptr = (Thread *) DT_MemListAlloc (pt_ptr, "thread.c", THREAD, sizeof (Thread)); + if (thread_ptr == NULL) + { + return NULL; + } + thread_ptr->param = param; + thread_ptr->function = fn; + thread_ptr->thread_handle = 0; + thread_ptr->stacksize = stacksize; + + DT_Mdep_Lock (&pt_ptr->Thread_counter_lock); + pt_ptr->Thread_counter++; + DT_Mdep_Unlock (&pt_ptr->Thread_counter_lock); + + DT_Mdep_Thread_Init_Attributes (thread_ptr); + + return thread_ptr; +} + +/* + * Thread destructor + */ +void +DT_Thread_Destroy (Thread * thread_ptr, Per_Test_Data_t * pt_ptr) +{ + if (thread_ptr) + { + DT_Mdep_Lock (&pt_ptr->Thread_counter_lock); + pt_ptr->Thread_counter--; + DT_Mdep_Unlock (&pt_ptr->Thread_counter_lock); + + DT_Mdep_Thread_Destroy_Attributes (thread_ptr); + DT_MemListFree (pt_ptr, thread_ptr); + } +} + +/* + * Start thread execution NOTE: This routine DOES create a thread in addition + * to starting it whereas DT_Thread_Create just sets up some data structures. + * (this is counter-intuitive) + */ +bool +DT_Thread_Start (Thread * thread_ptr) +{ + return DT_Mdep_Thread_Start (thread_ptr); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_cmd.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_cmd.c new file mode 100644 index 00000000..d2518c82 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_cmd.c @@ -0,0 +1,543 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_bpool.h" +#include "dapl_test_data.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" +#include "dapl_transaction_test.h" + + +static bool +DT_Transaction_Cmd_Parse_Op (Transaction_Cmd_t * cmd, char *arg) +{ + char *c_ptr; + int op; + if (cmd->num_ops >= MAX_OPS) + { + DT_Mdep_printf ("Client: Too Many Ops - Max %d\n", MAX_OPS); + goto error_return; + } + op = cmd->num_ops; + cmd->num_ops++; + + /* set some defaults */ + cmd->op[op].seg_size = 4096; + cmd->op[op].num_segs = 1; + cmd->op[op].reap_send_on_recv = false; + + /* + * packet format: [seg_size] [num_segs] + */ + c_ptr = strtok (arg, " \t"); + if (!c_ptr) + { + DT_Mdep_printf ("OP first arg must \n"); + goto error_return; + } + /* first token is : */ + if (strcmp (c_ptr, "server") == 0) + { + cmd->op[op].server_initiated = true; + } + else + { + if (strcmp (c_ptr, "client") == 0) + { + cmd->op[op].server_initiated = false; + } + else + { + DT_Mdep_printf ("OP first arg must \n"); + goto error_return; + } + } + + c_ptr = strtok (0, " \t"); + if (!c_ptr) + { + DT_Mdep_printf ("OP Second arg must be \n"); + goto error_return; + } + /* second token is : */ + if (strcmp (c_ptr, "RR") == 0) + { + cmd->op[op].transfer_type = RDMA_READ; + } + else + { + if (strcmp (c_ptr, "RW") == 0) + { + cmd->op[op].transfer_type = RDMA_WRITE; + } + else + { + if (strcmp (c_ptr, "SR") == 0) + { + cmd->op[op].transfer_type = SEND_RECV; + } + else + { + DT_Mdep_printf ("OP Second arg must be \n"); + goto error_return; + } + } + } + /* + * there may or may not be additional parameters... [seg_size] [num_segs] + * [-f] + */ + c_ptr = strtok (0, " \t"); + if (c_ptr && strspn (c_ptr, "0123456789") != 0) + { + cmd->op[op].seg_size = atoi (c_ptr); + c_ptr = strtok (0, " \t"); + } + if (c_ptr && strspn (c_ptr, "0123456789") != 0) + { + cmd->op[op].num_segs = atoi (c_ptr); + c_ptr = strtok (0, " \t"); + } + if (c_ptr && strcmp (c_ptr, "-f") == 0) + { + cmd->op[op].reap_send_on_recv = true; + if (cmd->op[op].transfer_type != SEND_RECV) + { + DT_Mdep_printf ("OP: -f only valid on SEND_RECV\n"); + goto error_return; + } + c_ptr = strtok (0, " \t"); + } + if (c_ptr) + { + DT_Mdep_printf ("OP too many args \n"); + goto error_return; + } + return true; + +error_return: + return false; +} + + +void +DT_Transaction_Cmd_Print (Transaction_Cmd_t * cmd) +{ + unsigned int i; + DT_Mdep_printf ("-------------------------------------\n"); + DT_Mdep_printf ("TransCmd.server_name : %s\n", + cmd->server_name); + DT_Mdep_printf ("TransCmd.num_iterations : %d\n", + cmd->num_iterations); + DT_Mdep_printf ("TransCmd.num_threads : %d\n", + cmd->num_threads); + DT_Mdep_printf ("TransCmd.eps_per_thread : %d\n", + cmd->eps_per_thread); + DT_Mdep_printf ("TransCmd.validate : %d\n", + cmd->validate); + DT_Mdep_printf ("TransCmd.dapl_name : %s\n", + cmd->dapl_name); + DT_Mdep_printf ("TransCmd.num_ops : %d\n", + cmd->num_ops); + + for (i = 0; i < cmd->num_ops; i++) + { + DT_Mdep_printf ("TransCmd.op[%d].transfer_type : %s %s\n", + i, + cmd->op[i].transfer_type == 0 ? "RDMA_READ" : + cmd->op[i].transfer_type == 1 ? "RDMA_WRITE" : + "SEND_RECV", + cmd->op[i].server_initiated ? " (server)" : " (client)" ); + DT_Mdep_printf ("TransCmd.op[%d].seg_size : %d\n", + i, + cmd->op[i].seg_size); + DT_Mdep_printf ("TransCmd.op[%d].num_segs : %d\n", + i, + cmd->op[i].num_segs); + DT_Mdep_printf ("TransCmd.op[%d].reap_send_on_recv : %d\n", + i, + cmd->op[i].reap_send_on_recv); + } +} + + +static bool +DT_Transaction_Cmd_Validate (Transaction_Cmd_t * cmd) +{ + unsigned int i; + bool has_server_send; + bool has_client_send; + unsigned int reap_count; + has_server_send = false; + has_client_send = false; + reap_count = 0; + + if (cmd->server_name[0] == '\0') + { + DT_Mdep_printf ("Must specify server_name in command line or scriptfile\n"); + return (false); + } + for (i = 0; i < cmd->num_ops; i++) + { + switch (cmd->op[i].transfer_type) + { + case SEND_RECV: + { + if (cmd->op[i].server_initiated) + { + has_server_send = true; + } + else + { + has_client_send = true; + } + if (cmd->op[i].reap_send_on_recv) + { + if (!cmd->op[i].server_initiated) + { + /* client */ + reap_count++; + } + else + { + /* server */ + if (reap_count > 0) + { + reap_count--; + } + else + { + DT_Mdep_printf ("OP: Unbalanced -f options\n"); + return false; + } + } + } + break; + } + case RDMA_READ: + { + break; + } + case RDMA_WRITE: + { + break; + } + } + } + + if (!has_server_send || !has_client_send) + { + DT_Mdep_printf ("Error: Transaction test requires \n"); + DT_Mdep_printf ("Error: At least one server SR and one client SR Operation\n"); + return false; + } + if (reap_count != 0) + { + DT_Mdep_printf ("OP: Unbalanced -f options\n"); + return false; + } + if (cmd->debug) + { + DT_Transaction_Cmd_Print (cmd); + } + return true; +} + + +static void +DT_Transaction_Cmd_Usage (void) +{ + DT_Mdep_printf ("USAGE: ---- TRANSACTION TEST ----\n"); + DT_Mdep_printf ("USAGE: dapltest -T T\n"); + DT_Mdep_printf ("USAGE: -s \n"); + DT_Mdep_printf ("USAGE: [-D ]\n"); + DT_Mdep_printf ("USAGE: [-d] : debug (zero)\n"); + DT_Mdep_printf ("USAGE: [-i ] : (1, 000)\n"); + DT_Mdep_printf ("USAGE: [-t ] : (1)\n"); + DT_Mdep_printf ("USAGE: [-w ] : (1)\n"); + DT_Mdep_printf ("USAGE: [-V ] : Validate data: (false)\n"); + DT_Mdep_printf ("USAGE: [-P ] : DTO Completion Polling: (false)\n"); + DT_Mdep_printf ("USAGE: [-Q ] : Use CNOs: (false)\n"); + DT_Mdep_printf ("USAGE: [-r ] : Use RSPs: (false)\n"); + DT_Mdep_printf ("USAGE: [-R ]\n"); + DT_Mdep_printf ("USAGE: (BE == QOS_BEST_EFFORT - Default)\n"); + DT_Mdep_printf ("USAGE: (HT == QOS_HIGH_THROUGHPUT)\n"); + DT_Mdep_printf ("USAGE: (LL == QOS_LOW_LATENCY)\n"); + DT_Mdep_printf ("USAGE: (EC == QOS_ECONOMY)\n"); + DT_Mdep_printf ("USAGE: (PM == QOS_PREMIUM)\n"); + DT_Mdep_printf ("USAGE: : \"server\"/\"client\"\n"); + DT_Mdep_printf ("USAGE: : \"SR\" (SEND/RECV)\n"); + DT_Mdep_printf ("USAGE: : \"RR\" (RDMA READ)\n"); + DT_Mdep_printf ("USAGE: : \"RW\" (RDMA WRITE)\n"); + DT_Mdep_printf ("USAGE: [seg_size [num_segs] ] : (4096, 1)\n"); + DT_Mdep_printf ("USAGE: [-f] : Reap sends on recv\n"); + DT_Mdep_printf ("USAGE:\n"); + DT_Mdep_printf ("NOTE: -f is only allowed on \"SR\" OPs\n"); + DT_Mdep_printf ("NOTE: -f must appear in pairs (one client, one server)\n"); + DT_Mdep_printf ( + "NOTE: At least one server SR and one client SR OP are required\n"); + DT_Mdep_printf ( + "NOTE: and use of -V results in the use of three extra OPs\n"); +} + + +void +DT_Transaction_Cmd_Init (Transaction_Cmd_t * cmd) +{ + memset ((void *)cmd, 0, sizeof (Transaction_Cmd_t)); + cmd->dapltest_version = DAPLTEST_VERSION; + cmd->client_is_little_endian = DT_local_is_little_endian; + cmd->num_iterations = 1000; + cmd->num_threads = 1; + cmd->eps_per_thread = 1; + cmd->debug = false; + cmd->validate = false; + cmd->ReliabilityLevel = DAT_QOS_BEST_EFFORT; +} + + +bool +DT_Transaction_Cmd_Parse (Transaction_Cmd_t * cmd, + int my_argc, + char **my_argv, + mygetopt_t * opts) +{ + char c; + unsigned int len; + int i; + char op[100]; + for (;;) + { + c = DT_mygetopt_r (my_argc, my_argv, "rQVPdw:s:D:i:t:v:R:", opts); + if (c == EOF) + { + break; + } + switch (c) + { + case 's': /* server name */ + { + if ((opts->optarg == 0) || strlen (opts->optarg) == 0 + || *opts->optarg == '-') + { + DT_Mdep_printf ("must specify server name\n"); + DT_Transaction_Cmd_Usage (); + return (false); + } + strcpy (cmd->server_name, opts->optarg); + break; + } + case 'D': /* device name */ + { + strcpy (cmd->dapl_name, opts->optarg); + break; + } + + case 'i': /* num iterations */ + { + len = (unsigned int)strspn (opts->optarg, "0123456789"); + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -i option\n"); + DT_Transaction_Cmd_Usage (); + return (false); + } + cmd->num_iterations = atol (opts->optarg); + + break; + } + case 't': /* num threads */ + { + len = (unsigned int)strspn (opts->optarg, "0123456789"); + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -t option\n"); + DT_Transaction_Cmd_Usage (); + return (false); + } + cmd->num_threads = atol (opts->optarg); + break; + } + case 'w': /* num EPs per thread */ + { + len = (unsigned int)strspn (opts->optarg, "0123456789"); + if (len == 0 || len != strlen (opts->optarg)) + { + DT_Mdep_printf ("Syntax Error -w option\n"); + DT_Transaction_Cmd_Usage (); + return (false); + } + cmd->eps_per_thread = atol (opts->optarg); + break; + } + case 'd': /* print debug messages */ + { + DT_dapltest_debug++; + cmd->debug = true; + break; + } + case 'Q': /* funnel EVDs => CNO */ + { + cmd->use_cno = true; + break; + } + case 'r': /* use RSP instead of PSP */ + { + cmd->use_rsp = true; + break; + } + case 'V': /* validate data being sent/received */ + { + cmd->validate = true; + break; + } + case 'P': /* use completion polling */ + { + cmd->poll = true; + break; + } + case 'R': /* Service Reliability Level */ + { + cmd->ReliabilityLevel = DT_ParseQoS (opts->optarg); + break; + } + case '?': + default: + { + DT_Mdep_printf ("Invalid Transaction Test Parameter: %c\n", c); + DT_Transaction_Cmd_Usage (); + return (false); + } + } + } + if (cmd->dapl_name[0] == '\0') + { + if (!DT_Mdep_GetDefaultDeviceName (cmd->dapl_name)) + { + DT_Mdep_printf ("can't get default device name\n"); + DT_Transaction_Cmd_Usage (); + return (false); + } + } + /* + * now parse the transaction ops this is ugly, but it's easier to gather + * each transaction into a single string + */ + for (i = opts->optind; i < my_argc; i++) + { + strcpy (&op[0], my_argv[i]); + while (i < my_argc - 1) + { + i++; + if ((strncmp (my_argv[i], "client", 6) == 0) || + strncmp (my_argv[i], "server", 6) == 0) + { + i--; + break; + } + strcat (op, " "); + strcat (op, my_argv[i]); + } + if (!DT_Transaction_Cmd_Parse_Op (cmd, op)) + { + DT_Transaction_Cmd_Usage (); + return (false); + } + } + + /* + * If we're going to validate the data, we append 3 OPs that + * serve as barriers so that both the client and server can + * validate their entire set of recv transactions without + * interference. + * + * The first op appended serves to notify the client that the + * server is at the rendezvous and will transfer nothing else, + * so the client can validate all recv buffers. The second op + * notifies the server that the client is quiescent, so the + * server can safely validate its recv buffers. The final op + * tells the client that the server is done, and both can + * proceed with the next iteration. + */ + if (cmd->validate) + { + DT_Mdep_printf ("NOTE: Adding OP \"server SR\" - for validation\n"); + memcpy (op, "server SR", strlen ("server SR") + 1); + DT_Transaction_Cmd_Parse_Op (cmd, op); + + DT_Mdep_printf ("NOTE: Adding OP \"client SR\" - for validation\n"); + memcpy (op, "client SR", strlen ("client SR") + 1); + DT_Transaction_Cmd_Parse_Op (cmd, op); + + DT_Mdep_printf ("NOTE: Adding OP \"server SR\" - for validation\n"); + memcpy (op, "server SR", strlen ("server SR") + 1); + DT_Transaction_Cmd_Parse_Op (cmd, op); + } + if (!DT_Transaction_Cmd_Validate (cmd)) + { + DT_Transaction_Cmd_Usage (); + return (false); + } + return (true); +} + + +void +DT_Transaction_Cmd_Endian (Transaction_Cmd_t * cmd, bool to_wire) +{ + unsigned int i; + + cmd->dapltest_version = DT_Endian32 (cmd->dapltest_version); + cmd->num_iterations = DT_Endian32 (cmd->num_iterations); + cmd->num_threads = DT_Endian32 (cmd->num_threads); + cmd->eps_per_thread = DT_Endian32 (cmd->eps_per_thread); + cmd->use_cno = DT_Endian32 (cmd->use_cno); + cmd->use_rsp = DT_Endian32 (cmd->use_rsp); + cmd->debug = DT_Endian32 (cmd->debug); + cmd->validate = DT_Endian32 (cmd->validate); + cmd->ReliabilityLevel = DT_Endian32 (cmd->ReliabilityLevel); + + if (!to_wire) + { + cmd->num_ops = DT_Endian32 (cmd->num_ops); + } + for (i = 0; i < cmd->num_ops; i++) + { + cmd->op[i].server_initiated = DT_Endian32 (cmd->op[i].server_initiated); + cmd->op[i].transfer_type = DT_Endian32 (cmd->op[i].transfer_type); + cmd->op[i].num_segs = DT_Endian32 (cmd->op[i].num_segs); + cmd->op[i].seg_size = DT_Endian32 (cmd->op[i].seg_size); + cmd->op[i].reap_send_on_recv = + DT_Endian32 (cmd->op[i].reap_send_on_recv); + } + if (to_wire) + { + cmd->num_ops = DT_Endian32 (cmd->num_ops); + } +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_cmd.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_cmd.h new file mode 100644 index 00000000..a1190e74 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_cmd.h @@ -0,0 +1,66 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_TRANSACTION_CMD_H__ +#define __DAPL_TRANSACTION_CMD_H__ + +#include "dapl_mdep.h" +#include + +#define MAX_OPS 100 +#define NAME_SZ 256 + +#pragma pack(1) +typedef struct +{ + DAT_UINT32 server_initiated; + DAT_UINT32 transfer_type; + DAT_UINT32 num_segs; + DAT_UINT32 seg_size; + DAT_UINT32 reap_send_on_recv; +}Transaction_Cmd_Op_t; + +typedef struct +{ + DAT_UINT32 dapltest_version; + DAT_UINT32 client_is_little_endian; + char server_name[NAME_SZ]; /* -s */ + DAT_UINT32 num_iterations; /* -i */ + DAT_UINT32 num_threads; /* -t */ + DAT_UINT32 eps_per_thread; /* -w */ + DAT_UINT32 use_cno; /* -Q */ + DAT_UINT32 use_rsp; /* -r */ + DAT_UINT32 debug; /* -d */ + DAT_UINT32 validate; /* -V */ + DAT_UINT32 poll; /* -P */ + char dapl_name[NAME_SZ]; /* -D */ + DAT_QOS ReliabilityLevel; + DAT_UINT32 num_ops; + Transaction_Cmd_Op_t op[MAX_OPS]; +} Transaction_Cmd_t; +#pragma pack() + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_stats.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_stats.c new file mode 100644 index 00000000..65972351 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_stats.c @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_transaction_stats.h" +#include "dapl_proto.h" +#include "dapl_test_data.h" + +void +DT_init_transaction_stats (Transaction_Stats_t * transaction_stats, unsigned int num) +{ + DT_Mdep_LockInit (&transaction_stats->lock); + + transaction_stats->wait_count = num; + transaction_stats->num_ops = 0; + transaction_stats->time_ms = 0; + transaction_stats->bytes_send = 0; + transaction_stats->bytes_recv = 0; + transaction_stats->bytes_rdma_read = 0; + transaction_stats->bytes_rdma_write = 0; +} + +void +DT_transaction_stats_set_ready (Transaction_Stats_t * transaction_stats) +{ + DT_Mdep_Lock (&transaction_stats->lock); + transaction_stats->wait_count--; + + DT_Mdep_debug (("Received Sync Message from server (%d left)\n", + transaction_stats->wait_count)); + DT_Mdep_Unlock (&transaction_stats->lock); +} + +bool +DT_transaction_stats_wait_for_all (Transaction_Stats_t * transaction_stats) +{ + unsigned int loop_count; + loop_count = 100 * 10; /* 100 * 10ms * 10 = 10 seconds */ + while (transaction_stats->wait_count != 0 && loop_count != 0) + { + DT_Mdep_Sleep (10); + loop_count--; + } + if (loop_count == 0) + { + DT_Mdep_printf ("FAIL: %d Server test connections did not report ready.\n", + transaction_stats->wait_count); + return false; + } + return true; +} + + +/* + * + */ +void +DT_update_transaction_stats (Transaction_Stats_t * transaction_stats, + unsigned int num_ops, + unsigned int time_ms, + unsigned int bytes_send, + unsigned int bytes_recv, + unsigned int bytes_rdma_read, + unsigned int bytes_rdma_write) +{ + DT_Mdep_Lock (&transaction_stats->lock); + + /* look for the longest time... */ + if (time_ms > transaction_stats->time_ms) + { + transaction_stats->time_ms = time_ms; + } + + transaction_stats->num_ops += num_ops; + transaction_stats->bytes_send += bytes_send; + transaction_stats->bytes_recv += bytes_recv; + transaction_stats->bytes_rdma_read += bytes_rdma_read; + transaction_stats->bytes_rdma_write += bytes_rdma_write; + DT_Mdep_Unlock (&transaction_stats->lock); +} + +/* + * + */ +void +DT_print_transaction_stats (Transaction_Stats_t * transaction_stats, + unsigned int num_threads, + unsigned int num_EPs) +{ + double time_s; + double mbytes_send; + double mbytes_recv; + double mbytes_rdma_read; + double mbytes_rdma_write; + int total_ops; + DT_Mdep_Lock (&transaction_stats->lock); + time_s = (double) (transaction_stats->time_ms) / 1000; + if (time_s == 0.0) + { + DT_Mdep_printf ("----- Test completed successfully, but cannot calculate stats as not\n" + "----- enough time has lapsed.\n" + "----- Try running the test with more iterations.\n"); + goto unlock_and_return; + } + mbytes_send = (double) transaction_stats->bytes_send / 1000 / 1000; + mbytes_recv = (double) transaction_stats->bytes_recv / 1000 / 1000; + mbytes_rdma_read = (double) transaction_stats->bytes_rdma_read / 1000 / 1000; + mbytes_rdma_write = (double) transaction_stats->bytes_rdma_write / 1000 / 1000; + total_ops = transaction_stats->num_ops; + + if ( 0 == total_ops ) + { + DT_Mdep_printf ("----- Test completed successfully, but no operations!\n"); + goto unlock_and_return; + } + + DT_Mdep_printf ("----- Stats ---- : %u threads, %u EPs\n", + num_threads, num_EPs); + DT_Mdep_printf ("Total IBWQE : %7d.%02d WQE/Sec\n", + whole (total_ops / time_s), + hundredths (total_ops / time_s)); + DT_Mdep_printf ("Total Time : %7d.%02d sec\n", + whole (time_s), + hundredths (time_s)); + DT_Mdep_printf ("Total Send : %7d.%02d MB - %7d.%02d MB/Sec\n", + whole (mbytes_send), + hundredths (mbytes_send), + whole (mbytes_send / time_s), + hundredths (mbytes_send / time_s)); + DT_Mdep_printf ("Total Recv : %7d.%02d MB - %7d.%02d MB/Sec\n", + whole (mbytes_recv), + hundredths (mbytes_recv), + whole (mbytes_recv / time_s), + hundredths (mbytes_recv / time_s)); + DT_Mdep_printf ("Total RDMA Read : %7d.%02d MB - %7d.%02d MB/Sec\n", + whole (mbytes_rdma_read), + hundredths (mbytes_rdma_read), + whole (mbytes_rdma_read / time_s), + hundredths (mbytes_rdma_read / time_s)); + DT_Mdep_printf ("Total RDMA Write : %7d.%02d MB - %7d.%02d MB/Sec\n", + whole (mbytes_rdma_write), + hundredths (mbytes_rdma_write), + whole (mbytes_rdma_write / time_s), + hundredths (mbytes_rdma_write / time_s)); + +unlock_and_return: + DT_Mdep_Unlock (&transaction_stats->lock); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_stats.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_stats.h new file mode 100644 index 00000000..ed1cb6eb --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_stats.h @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_TRANSACTION_STATS_H__ +#define __DAPL_TRANSACTION_STATS_H__ + +#include "dapl_mdep.h" + +#define whole(num) ((unsigned int)(num)) +#define hundredths(num) ((unsigned int)(((num) - (unsigned int)(num)) * 100)) +#pragma pack(1) +typedef struct +{ + DT_Mdep_LockType lock; + unsigned int wait_count; + unsigned int num_ops; + unsigned int time_ms; + unsigned int bytes_send; + unsigned int bytes_recv; + unsigned int bytes_rdma_read; + unsigned int bytes_rdma_write; +} Transaction_Stats_t; +#pragma pack() +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_test.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_test.c new file mode 100644 index 00000000..c358de74 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_test.c @@ -0,0 +1,1922 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_bpool.h" +#include "dapl_transaction_stats.h" +#include "dapl_test_data.h" +#include "dapl_mdep.h" +#include "dapl_memlist.h" +#include "dapl_proto.h" +#include "dapl_transaction_cmd.h" +#include "dapl_transaction_test.h" +#include + +#define RMI_SEND_BUFFER_ID 0 +#define RMI_RECV_BUFFER_ID 1 +#define SYNC_SEND_BUFFER_ID 2 +#define SYNC_RECV_BUFFER_ID 3 + +/* + * The sync buffers are sent to say "Go!" to the other side. + * This is a handy place to test whether a zero-sized send into + * a zero-sized buffer actually works. If the client side hangs + * in 'Wait for Sync Message' when this is zero, it's a DAPL bug. + */ +#define SYNC_BUFF_SIZE 64 + +#define DFLT_QLEN 8 /* default event queue length */ +#define DFLT_TMO 10 /* default timeout (seconds) */ +#define MAX_CONN_RETRY 8 + +/****************************************************************************/ +void +DT_Transaction_Test_Client (Per_Test_Data_t * pt_ptr, + DAT_IA_HANDLE ia_handle, + DAT_IA_ADDRESS_PTR remote_ia_addr) +{ + Transaction_Cmd_t *cmd = &pt_ptr->Params.u.Transaction_Cmd; + unsigned int i; + + DT_init_transaction_stats (&pt_ptr->Client_Stats, + cmd->num_threads * cmd->eps_per_thread); + + /* Now go set up the client test threads */ + for (i = 0; i < cmd->num_threads; i++) + { + unsigned int port_num = pt_ptr->Server_Info.first_port_number + + i * cmd->eps_per_thread; + + DT_Mdep_debug (("Client: Starting Client side of test\n")); + if (!DT_Transaction_Create_Test (pt_ptr, + ia_handle, + false, + port_num, + pt_ptr->Server_Info.is_little_endian, + remote_ia_addr)) + { + DT_Mdep_printf ("Client: Cannot Create Test!\n"); + break; + } + +#ifdef CM_BUSTED + /***** XXX Chill out a bit to give the kludged CM a chance ... + *****/DT_Mdep_Sleep (5000); +#endif + + } + + /* Wait until end of all threads */ + while (pt_ptr->Thread_counter > 0) + { + DT_Mdep_Sleep (100); + } + + DT_print_transaction_stats (&pt_ptr->Client_Stats, + cmd->num_threads, + cmd->eps_per_thread); +} + + +/****************************************************************************/ +void +DT_Transaction_Test_Server (void *params) +{ + Per_Test_Data_t *pt_ptr = (Per_Test_Data_t *) params; + Transaction_Cmd_t *cmd = &pt_ptr->Params.u.Transaction_Cmd; + unsigned int i; + + pt_ptr->Countdown_Counter = cmd->num_threads; + + for (i = 0; i < cmd->num_threads; i++) + { + unsigned int port_num = pt_ptr->Server_Info.first_port_number + + i * cmd->eps_per_thread; + + if (!DT_Transaction_Create_Test (pt_ptr, + pt_ptr->ps_ptr->ia_handle, + true, + port_num, + pt_ptr->Client_Info.is_little_endian, + (DAT_IA_ADDRESS_PTR) 0)) + { + DT_Mdep_printf ("Server: Cannot Create Test!\n"); + break; + } + +#ifdef CM_BUSTED + /***** XXX Chill out a bit to give the kludged CM a chance ... + *****/DT_Mdep_Sleep (5000); +#endif + + } + + /* Wait until end of all sub-threads */ + while (pt_ptr->Thread_counter > 1) + { + DT_Mdep_Sleep (100); + } + DT_Thread_Destroy (pt_ptr->thread, pt_ptr); /* destroy Master thread */ + + DT_Mdep_Lock (&pt_ptr->ps_ptr->num_clients_lock); + pt_ptr->ps_ptr->num_clients--; + DT_Mdep_Unlock (&pt_ptr->ps_ptr->num_clients_lock); + + /* NB: Server has no pt_ptr->remote_netaddr */ + DT_PrintMemList (pt_ptr); /* check if we return all space allocated */ + DT_Mdep_LockDestroy (&pt_ptr->Thread_counter_lock); + DT_Mdep_LockDestroy (&pt_ptr->MemListLock); + DT_Free_Per_Test_Data (pt_ptr); + DT_Mdep_printf ("Server: Transaction Test Finished for this client\n"); + /* + * check memory leaking DT_Mdep_printf("Server: App allocated Memory Left: + * %d\n", alloc_count); + */ +} + + +/****************************************************************************/ +/* + * DT_Transaction_Create_Test() + * + * Initialize what we can in the test structure. Then fork a thread to do the + * work. + */ + +bool +DT_Transaction_Create_Test (Per_Test_Data_t * pt_ptr, + DAT_IA_HANDLE *ia_handle, + DAT_BOOLEAN is_server, + unsigned int port_num, + DAT_BOOLEAN remote_is_little_endian, + DAT_IA_ADDRESS_PTR remote_ia_addr) +{ + Transaction_Test_t *test_ptr; + + test_ptr = (Transaction_Test_t *) DT_MemListAlloc (pt_ptr, + "transaction_test_t", + TRANSACTIONTEST, + sizeof (Transaction_Test_t)); + if (!test_ptr) + { + DT_Mdep_printf ("No Memory to create transaction test structure!\n"); + return false; + } + + /* Unused fields zeroed by allocator */ + test_ptr->remote_is_little_endian = remote_is_little_endian; + test_ptr->is_server = is_server; + test_ptr->pt_ptr = pt_ptr; + test_ptr->ia_handle = ia_handle; + test_ptr->base_port = (DAT_CONN_QUAL) port_num; + test_ptr->cmd = &pt_ptr->Params.u.Transaction_Cmd; + test_ptr->time_out = DFLT_TMO * 1000; /* DFLT_TMO seconds */ + + /* FIXME more analysis needs to go into determining the minimum */ + /* possible value for DFLT_QLEN. This evd_length value will be */ + /* used for all EVDs. There are a number of dependencies imposed */ + /* by this design (ex. min(cr_evd_len) != min(recv_evd_len) ). */ + /* In the future it may be best to use individual values. */ + test_ptr->evd_length = DT_max ( + DFLT_QLEN, + test_ptr->cmd->eps_per_thread * test_ptr->cmd->num_ops); + + test_ptr->remote_ia_addr = remote_ia_addr; + + test_ptr->thread = DT_Thread_Create (pt_ptr, + DT_Transaction_Main, + test_ptr, + DT_MDEP_DEFAULT_STACK_SIZE); + if (test_ptr->thread == 0) + { + DT_Mdep_printf ("No memory!\n"); + DT_MemListFree (test_ptr->pt_ptr, test_ptr); + return false; + } + DT_Thread_Start (test_ptr->thread); + + return true; +} + + +/****************************************************************************/ +/* + * Main Transaction Test Execution Routine + * + * Both client and server threads start here, with IA already open. + * Each test thread establishes a connection with its counterpart. + * They swap remote memory information (if necessary), then set up + * buffers and local data structures. When ready, the two sides + * synchronize, then testing begins. + */ +void +DT_Transaction_Main (void *param) +{ + Transaction_Test_t *test_ptr = (Transaction_Test_t *) param; + DAT_RETURN ret; + DAT_UINT32 i, j; + bool success = false; + Per_Test_Data_t *pt_ptr; + Thread *thread; + DAT_DTO_COOKIE dto_cookie; + char *private_data_str = "DAPL and RDMA rule! Test 4321."; + DAT_EVENT_NUMBER event_num; + + pt_ptr = test_ptr->pt_ptr; + thread = test_ptr->thread; + + /* create a protection zone */ + ret = dat_pz_create (test_ptr->ia_handle, &test_ptr->pz_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_pz_create error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->pz_handle = DAT_HANDLE_NULL; + goto test_failure; + } + + /* Create a CNO if necessary */ + if (test_ptr->cmd->use_cno) + { + DT_Mdep_printf ("Test[" F64x "]: Warning: CNO use not yet supported (%s)\n", + (DAT_UVERYLONG)test_ptr->base_port, "ignored"); + /* ignored - just fall through */ + } + + /* create 4 EVDs - recv, request+RMR, conn-request, connect */ + ret = dat_evd_create (test_ptr->ia_handle, + test_ptr->evd_length, + test_ptr->cno_handle, + DAT_EVD_DTO_FLAG, + &test_ptr->recv_evd_hdl); /* recv */ + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_create (recv) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->recv_evd_hdl = DAT_HANDLE_NULL; + goto test_failure; + } + + ret = dat_evd_create (test_ptr->ia_handle, + test_ptr->evd_length, + test_ptr->cno_handle, + DAT_EVD_DTO_FLAG | DAT_EVD_RMR_BIND_FLAG, + &test_ptr->reqt_evd_hdl); /* request + rmr bind */ + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_create (request) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->reqt_evd_hdl = DAT_HANDLE_NULL; + goto test_failure; + } + + if (pt_ptr->local_is_server) + { + /* Client-side doesn't need CR events */ + ret = dat_evd_create (test_ptr->ia_handle, + test_ptr->evd_length, + DAT_HANDLE_NULL, + DAT_EVD_CR_FLAG, + &test_ptr->creq_evd_hdl); /* cr */ + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_create (cr) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->creq_evd_hdl = DAT_HANDLE_NULL; + goto test_failure; + } + } + + ret = dat_evd_create (test_ptr->ia_handle, + test_ptr->evd_length, + DAT_HANDLE_NULL, + DAT_EVD_CONNECTION_FLAG, + &test_ptr->conn_evd_hdl); /* conn */ + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_create (conn) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + test_ptr->conn_evd_hdl = DAT_HANDLE_NULL; + goto test_failure; + } + + /* Allocate per-EP data */ + test_ptr->ep_context = (Ep_Context_t *) + DT_MemListAlloc (pt_ptr, + "transaction_test", + EPCONTEXT, + test_ptr->cmd->eps_per_thread + * sizeof (Ep_Context_t)); + if (!test_ptr->ep_context) + { + DT_Mdep_printf ("Test[" F64x "]: no memory for EP context\n", + (DAT_UVERYLONG)test_ptr->base_port); + goto test_failure; + } + + /* + * Set up the per-EP contexts: + * create the EP + * allocate buffers for remote memory info exchange + * post the receive buffers + * connect + * set up buffers and remote memory info + * send across our info + * recv the other side's info and extract what we need + */ + for (i = 0; i < test_ptr->cmd->eps_per_thread; i++) + { + DAT_EP_ATTR ep_attr; + DAT_UINT32 buff_size = MAX_OPS * sizeof (RemoteMemoryInfo); + + /* + * Adjust default EP attributes to fit the requested test. + * This is simplistic; in that we don't count ops of each + * type and direction, checking EP limits. We just try to + * be sure the EP's WQs are large enough. The "+2" is for + * the RemoteMemInfo and Sync receive buffers. + */ + ep_attr = pt_ptr->ep_attr; + if (ep_attr.max_recv_dtos < (DAT_COUNT)test_ptr->cmd->num_ops + 2) + { + ep_attr.max_recv_dtos = (DAT_COUNT)test_ptr->cmd->num_ops + 2; + } + if (ep_attr.max_request_dtos < (DAT_COUNT)test_ptr->cmd->num_ops + 2) + { + ep_attr.max_request_dtos = test_ptr->cmd->num_ops + 2; + } + + /* Create EP */ + ret = dat_ep_create (test_ptr->ia_handle, /* IA */ + test_ptr->pz_handle, /* PZ */ + test_ptr->recv_evd_hdl, /* recv */ + test_ptr->reqt_evd_hdl, /* request */ + test_ptr->conn_evd_hdl, /* connect */ + &ep_attr, /* EP attrs */ + &test_ptr->ep_context[i].ep_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_ep_create #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + test_ptr->ep_context[i].ep_handle = DAT_HANDLE_NULL; + goto test_failure; + } + + /* + * Allocate a buffer pool so we can exchange the + * remote memory info and initialize. + */ + test_ptr->ep_context[i].bp = DT_BpoolAlloc (pt_ptr, + test_ptr->ia_handle, + test_ptr->pz_handle, + test_ptr->ep_context[i].ep_handle, + DAT_HANDLE_NULL, /* rmr */ + buff_size, + 4, + DAT_OPTIMAL_ALIGNMENT, + false, + false); + if (!test_ptr->ep_context[i].bp) + { + DT_Mdep_printf ("Test[" F64x "]: no memory for remote memory buffers\n", + (DAT_UVERYLONG)test_ptr->base_port); + goto test_failure; + } + + DT_Mdep_spew (3, ("0: RMI_SEND %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context[i].bp, 0))); + DT_Mdep_spew (3, ("1: RMI_RECV %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context[i].bp, 1))); + DT_Mdep_spew (3, ("2: SYNC_SEND %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context[i].bp, 2))); + DT_Mdep_spew (3, ("3: SYNC_RECV %p\n", (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context[i].bp, 3))); + + /* + * Post recv and sync buffers + */ + if (!DT_post_recv_buffer ( test_ptr->ep_context[i].ep_handle, + test_ptr->ep_context[i].bp, + RMI_RECV_BUFFER_ID, + buff_size)) + { + /* error message printed by DT_post_recv_buffer */ + goto test_failure; + } + if (!DT_post_recv_buffer ( test_ptr->ep_context[i].ep_handle, + test_ptr->ep_context[i].bp, + SYNC_RECV_BUFFER_ID, + SYNC_BUFF_SIZE)) + { + /* error message printed by DT_post_recv_buffer */ + goto test_failure; + } + + /* + * Establish the connection + */ + test_ptr->ep_context[i].ia_port = test_ptr->base_port + i; + + if (pt_ptr->local_is_server) + { + if (test_ptr->cmd->use_rsp) + { + /* + * Server - create a single-use RSP and + * await a connection for this EP + */ + + ret = dat_rsp_create (test_ptr->ia_handle, + test_ptr->ep_context[i].ia_port, + test_ptr->ep_context[i].ep_handle, + test_ptr->creq_evd_hdl, + &test_ptr->ep_context[i].rsp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_rsp_create #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + goto test_failure; + } + } + else + { + ret = dat_psp_create (test_ptr->ia_handle, + test_ptr->ep_context[i].ia_port, + test_ptr->creq_evd_hdl, + DAT_PSP_CONSUMER_FLAG, + &test_ptr->ep_context[i].psp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_psp_create #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + goto test_failure; + } + + DT_Mdep_debug (("Server[" F64x "]: Listen #%d on PSP port 0x" F64x "\n", + (DAT_UVERYLONG)test_ptr->base_port, i, + (DAT_UVERYLONG)test_ptr->ep_context[i].ia_port)); + } + } + } + + /* Here's where we tell the server process that this thread is + * ready to wait for connection requests from the remote end. + * Modify the synch wait semantics at your own risk - if these + * signals and waits aren't here, there will be chronic + * connection rejection timing problems. + */ + if (pt_ptr->local_is_server) + { + DT_Mdep_Lock (&pt_ptr->Thread_counter_lock); + pt_ptr->Countdown_Counter--; + /* Deliberate pre-decrement. Post decrement won't + * work here, so don't do it. + */ + if (pt_ptr->Countdown_Counter <= 0 ) + { + DT_Mdep_wait_object_wakeup(&pt_ptr->synch_wait_object); + } + + DT_Mdep_Unlock (&pt_ptr->Thread_counter_lock); + } + + for (i = 0; i < test_ptr->cmd->eps_per_thread; i++) + { + DAT_UINT32 buff_size = MAX_OPS * sizeof (RemoteMemoryInfo); + RemoteMemoryInfo *RemoteMemInfo; + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + DAT_CR_ARRIVAL_EVENT_DATA cr_stat; + DAT_CR_HANDLE cr_handle; + + /* + * Establish the connection + */ + + if (pt_ptr->local_is_server) + { + DAT_CR_PARAM cr_param; + + if (test_ptr->cmd->use_rsp) + { + + /* wait for the connection request */ + if (!DT_cr_event_wait (test_ptr->creq_evd_hdl, &cr_stat) || + !DT_cr_check ( &cr_stat, + test_ptr->ep_context[i].rsp_handle, + test_ptr->ep_context[i].ia_port, + &cr_handle, + "Server") ) + { + goto test_failure; + } + + ret = dat_cr_query (cr_handle, + DAT_CR_FIELD_ALL, + &cr_param); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_cr_query #%d error:(%x) %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, ret, DT_RetToString (ret)); + } + else + { + if ( cr_param.private_data_size == 0 || + strncmp((char *)cr_param.private_data, + private_data_str, + cr_param.private_data_size) != 0 ) + { + DT_Mdep_printf ("--Private Data mismatch!\n"); + } + else + { + DT_Mdep_debug ( ("--Private Data: %d: <%s>\n", + cr_param.private_data_size, + (char *)cr_param.private_data) ); + } + } + + /* what, me query? just try to accept the connection */ + ret = dat_cr_accept (cr_handle, + 0, /* NULL for RSP */ + 0, (DAT_PVOID)0 /* no private data */ ); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_cr_accept #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + /* cr_handle consumed on failure */ + goto test_failure; + } + + /* wait for DAT_CONNECTION_EVENT_ESTABLISHED */ + if (!DT_conn_event_wait ( test_ptr->ep_context[i].ep_handle, + test_ptr->conn_evd_hdl, + &event_num)) + { + /* error message printed by DT_conn_event_wait */ + goto test_failure; + } + /* throw away single-use PSP */ + ret = dat_rsp_free (test_ptr->ep_context[i].rsp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_rsp_free #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + goto test_failure; + } + + } + else + { + /* + * Server - use a short-lived PSP instead of an RSP + */ + /* wait for a connection request */ + if (!DT_cr_event_wait (test_ptr->creq_evd_hdl, + &cr_stat) ) + { + DT_Mdep_printf ("Test[" F64x "]: dat_psp_create #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + goto test_failure; + } + + if ( !DT_cr_check ( &cr_stat, + test_ptr->ep_context[i].psp_handle, + test_ptr->ep_context[i].ia_port, + &cr_handle, + "Server") ) + { + goto test_failure; + } + + ret = dat_cr_query (cr_handle, + DAT_CR_FIELD_ALL, + &cr_param); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_cr_query #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + } + else + { + if ( cr_param.private_data_size == 0 || + strncmp((char *)cr_param.private_data, + private_data_str, + cr_param.private_data_size) != 0 ) + { + DT_Mdep_printf ("--Private Data mismatch!\n"); + } + else + { + DT_Mdep_debug ( ("--Private Data: %d: <%s>\n", + cr_param.private_data_size, + (char *)cr_param.private_data) ); + } + } + + + /* what, me query? just try to accept the connection */ + ret = dat_cr_accept (cr_handle, + test_ptr->ep_context[i].ep_handle, + 0, (DAT_PVOID)0 /* no private data */ ); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_cr_accept #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + /* cr_handle consumed on failure */ + (void) dat_psp_free (test_ptr->ep_context[i].psp_handle); + goto test_failure; + } + + /* wait for DAT_CONNECTION_EVENT_ESTABLISHED */ + if (!DT_conn_event_wait ( test_ptr->ep_context[i].ep_handle, + test_ptr->conn_evd_hdl, + &event_num)) + { + /* error message printed by DT_cr_event_wait */ + (void) dat_psp_free (&test_ptr->ep_context[i].psp_handle); + goto test_failure; + } + + /* throw away single-use PSP */ + ret = dat_psp_free (test_ptr->ep_context[i].psp_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_psp_free #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + goto test_failure; + } + } /* end short-lived PSP */ + + DT_Mdep_debug (("Server[" F64x "]: Accept #%d on port 0x" F64x "\n", + (DAT_UVERYLONG)test_ptr->base_port, i, + (DAT_UVERYLONG)test_ptr->ep_context[i].ia_port)); + } + else + { + /* + * Client - connect + */ + unsigned int retry_cnt = 0; + DAT_UINT32 buff_size = MAX_OPS * sizeof (RemoteMemoryInfo); + + DT_Mdep_debug (("Client[" F64x "]: Connect #%d on port 0x" F64x "\n", + (DAT_UVERYLONG)test_ptr->base_port, i, + (DAT_UVERYLONG)test_ptr->ep_context[i].ia_port)); + +#ifdef CM_BUSTED + /***** XXX Chill out a bit to give the kludged CM a chance ... + *****/DT_Mdep_Sleep (5000); +#endif + +retry: + ret = dat_ep_connect (test_ptr->ep_context[i].ep_handle, + test_ptr->remote_ia_addr, + test_ptr->ep_context[i].ia_port, + DAT_TIMEOUT_INFINITE, + (DAT_COUNT)strlen(private_data_str),private_data_str, + /* 0, (DAT_PVOID) 0, */ /* no private data */ + pt_ptr->Params.ReliabilityLevel, + DAT_CONNECT_DEFAULT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_ep_connect #%d error: %s (0x%x)\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret), ret); + goto test_failure; + } + + /* wait for DAT_CONNECTION_EVENT_ESTABLISHED */ + if (!DT_conn_event_wait ( test_ptr->ep_context[i].ep_handle, + test_ptr->conn_evd_hdl, + &event_num)) + { + /* error message printed by DT_cr_event_wait */ + if ( event_num == DAT_CONNECTION_EVENT_PEER_REJECTED ) + { + DT_Mdep_Sleep (1000); + /* + * See if any buffers were flushed as a result of + * the REJECT; clean them up and repost if so + */ + { + DAT_EVENT event; + DAT_COUNT drained = 0; + + dat_ep_reset (test_ptr->ep_context[i].ep_handle); + do + { + ret = dat_evd_dequeue ( test_ptr->recv_evd_hdl, + &event); + drained++; + } while (ret != DAT_QUEUE_EMPTY); + + if (drained > 1) + { + /* + * Post recv and sync buffers + */ + if (!DT_post_recv_buffer ( test_ptr->ep_context[i].ep_handle, + test_ptr->ep_context[i].bp, + RMI_RECV_BUFFER_ID, + buff_size)) + { + /* error message printed by DT_post_recv_buffer */ + goto test_failure; + } + if (!DT_post_recv_buffer ( test_ptr->ep_context[i].ep_handle, + test_ptr->ep_context[i].bp, + SYNC_RECV_BUFFER_ID, + SYNC_BUFF_SIZE)) + { + /* error message printed by DT_post_recv_buffer */ + goto test_failure; + } + } + } + DT_Mdep_printf ("Client[" F64x "]: retrying connection...\n", + (DAT_UVERYLONG)test_ptr->base_port); + retry_cnt++; + if (retry_cnt < MAX_CONN_RETRY) + { + goto retry; + } + } + /* error message printed by DT_cr_event_wait */ + goto test_failure; + } + + DT_Mdep_debug (("Client[" F64x "]: Got Connection #%d\n", + (DAT_UVERYLONG)test_ptr->base_port, i)); + } + +#ifdef CM_BUSTED + /***** XXX Chill out a bit to give the kludged CM a chance ... + *****/DT_Mdep_Sleep (5000); +#endif + + /* + * Fill in the test_ptr with relevant command info + */ + for (j = 0; j < test_ptr->cmd->num_ops; j++) + { + test_ptr->ep_context[i].op[j].server_initiated + = test_ptr->cmd->op[j].server_initiated; + test_ptr->ep_context[i].op[j].transfer_type + = test_ptr->cmd->op[j].transfer_type; + test_ptr->ep_context[i].op[j].num_segs + = test_ptr->cmd->op[j].num_segs; + test_ptr->ep_context[i].op[j].seg_size + = test_ptr->cmd->op[j].seg_size; + test_ptr->ep_context[i].op[j].reap_send_on_recv + = test_ptr->cmd->op[j].reap_send_on_recv; + } + + /* + * Exchange remote memory info: If we're going to participate + * in an RDMA, we need to allocate memory buffers and advertise + * them to the other side. + */ + for (j = 0; j < test_ptr->cmd->num_ops; j++) + { + DAT_BOOLEAN us; + + us = (pt_ptr->local_is_server && + test_ptr->ep_context[i].op[j].server_initiated) || + (!pt_ptr->local_is_server && + !test_ptr->ep_context[i].op[j].server_initiated); + + test_ptr->ep_context[i].op[j].Rdma_Context = (DAT_RMR_CONTEXT) 0; + test_ptr->ep_context[i].op[j].Rdma_Address = (DAT_PVOID) 0; + + switch (test_ptr->ep_context[i].op[j].transfer_type) + { + case RDMA_READ: + { + test_ptr->ep_context[i].op[j].bp = + DT_BpoolAlloc (pt_ptr, + test_ptr->ia_handle, + test_ptr->pz_handle, + test_ptr->ep_context[i].ep_handle, + test_ptr->reqt_evd_hdl, + test_ptr->ep_context[i].op[j].seg_size, + test_ptr->ep_context[i].op[j].num_segs, + DAT_OPTIMAL_ALIGNMENT, + false, + !us ? true : false); + if (!test_ptr->ep_context[i].op[j].bp) + { + DT_Mdep_printf ("Test[" F64x "]: no memory for buffers (RDMA/RD)\n", + (DAT_UVERYLONG)test_ptr->base_port); + goto test_failure; + } + if (!us) + { + test_ptr->ep_context[i].op[j].Rdma_Context = + DT_Bpool_GetRMR (test_ptr->ep_context[i].op[j].bp, 0); + test_ptr->ep_context[i].op[j].Rdma_Address = + (DAT_PVOID) (uintptr_t) + DT_Bpool_GetBuffer (test_ptr->ep_context[i].op[j].bp, + 0); + DT_Mdep_spew (3, ("not-us: RDMA/RD [ va=%p, ctxt=%x ]\n", + test_ptr->ep_context[i].op[j].Rdma_Address, + test_ptr->ep_context[i].op[j].Rdma_Context)); + } + break; + } + + case RDMA_WRITE: + { + test_ptr->ep_context[i].op[j].bp = + DT_BpoolAlloc (pt_ptr, + test_ptr->ia_handle, + test_ptr->pz_handle, + test_ptr->ep_context[i].ep_handle, + test_ptr->reqt_evd_hdl, + test_ptr->ep_context[i].op[j].seg_size, + test_ptr->ep_context[i].op[j].num_segs, + DAT_OPTIMAL_ALIGNMENT, + !us ? true : false, + false); + if (!test_ptr->ep_context[i].op[j].bp) + { + DT_Mdep_printf ("Test[" F64x "]: no memory for buffers (RDMA/WR)\n", + (DAT_UVERYLONG)test_ptr->base_port); + goto test_failure; + } + if (!us) + { + test_ptr->ep_context[i].op[j].Rdma_Context = + DT_Bpool_GetRMR (test_ptr->ep_context[i].op[j].bp, 0); + test_ptr->ep_context[i].op[j].Rdma_Address = + (DAT_PVOID) (uintptr_t) + DT_Bpool_GetBuffer (test_ptr->ep_context[i].op[j].bp, + 0); + DT_Mdep_spew (3, ("not-us: RDMA/WR [ va=%p, ctxt=%x ]\n", + test_ptr->ep_context[i].op[j].Rdma_Address, + test_ptr->ep_context[i].op[j].Rdma_Context)); + } + break; + } + + case SEND_RECV: + { + test_ptr->ep_context[i].op[j].bp = + DT_BpoolAlloc (pt_ptr, + test_ptr->ia_handle, + test_ptr->pz_handle, + test_ptr->ep_context[i].ep_handle, + DAT_HANDLE_NULL, /* rmr */ + test_ptr->ep_context[i].op[j].seg_size, + test_ptr->ep_context[i].op[j].num_segs, + DAT_OPTIMAL_ALIGNMENT, + false, + false); + if (!test_ptr->ep_context[i].op[j].bp) + { + DT_Mdep_printf ("Test[" F64x "]: no memory for buffers (S/R)\n", + (DAT_UVERYLONG)test_ptr->base_port); + goto test_failure; + } + + DT_Mdep_spew (3, ("%d: S/R [ va=%p ]\n", j, (DAT_PVOID) + DT_Bpool_GetBuffer ( test_ptr->ep_context[i].op[j].bp, 0))); + break; + } + } + } /* end foreach op */ + + /* + * Prep send buffer with memory information + */ + RemoteMemInfo = (RemoteMemoryInfo *) + DT_Bpool_GetBuffer (test_ptr->ep_context[i].bp, + RMI_SEND_BUFFER_ID); + + for (j = 0; j < test_ptr->cmd->num_ops; j++) + { + RemoteMemInfo[j].rmr_context = + test_ptr->ep_context[i].op[j].Rdma_Context; + RemoteMemInfo[j].mem_address.as_64 = (DAT_UINT64) 0UL; + RemoteMemInfo[j].mem_address.as_ptr = + test_ptr->ep_context[i].op[j].Rdma_Address; + if (RemoteMemInfo[j].mem_address.as_ptr) + { + DT_Mdep_spew (3, ("RemoteMemInfo[%d] va=" F64x ", ctx=%x\n", + j, + (DAT_UVERYLONG)RemoteMemInfo[j].mem_address.as_64, + RemoteMemInfo[j].rmr_context)); + } + /* + * If the client and server are of different endiannesses, + * we must correct the endianness of the handle and address + * we pass to the other side. The other side cannot (and + * better not) interpret these values. + */ + if (DT_local_is_little_endian != test_ptr->remote_is_little_endian) + { + RemoteMemInfo[j].rmr_context = + DT_EndianMemHandle (RemoteMemInfo[j].rmr_context); + RemoteMemInfo[j].mem_address.as_64 = + DT_EndianMemAddress (RemoteMemInfo[j].mem_address.as_64); + } + } /* end foreach op */ + + /* + * Send our memory info (synchronously) + */ + DT_Mdep_debug (("Test[" F64x "]: Sending %s Memory Info\n", + (DAT_UVERYLONG)test_ptr->base_port, + test_ptr->is_server ? "Server" : "Client")); + + /* post the send buffer */ + if (!DT_post_send_buffer (test_ptr->ep_context[i].ep_handle, + test_ptr->ep_context[i].bp, + RMI_SEND_BUFFER_ID, + buff_size)) + { + /* error message printed by DT_post_send_buffer */ + goto test_failure; + } + /* reap the send and verify it */ + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context[i].bp, + RMI_SEND_BUFFER_ID); + if (!DT_dto_event_wait (test_ptr->reqt_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + test_ptr->ep_context[i].ep_handle, + buff_size, + dto_cookie, + test_ptr->is_server ? "Client_Mem_Info_Send" + : "Server_Mem_Info_Send")) + { + goto test_failure; + } + + /* + * Recv the other side's info + */ + DT_Mdep_debug (("Test[" F64x "]: Waiting for %s Memory Info\n", + (DAT_UVERYLONG)test_ptr->base_port, + test_ptr->is_server ? "Client" : "Server")); + dto_cookie.as_64 = LZERO; + dto_cookie.as_ptr = + (DAT_PVOID) DT_Bpool_GetBuffer ( + test_ptr->ep_context[i].bp, + RMI_RECV_BUFFER_ID); + if (!DT_dto_event_wait (test_ptr->recv_evd_hdl, &dto_stat) || + !DT_dto_check ( &dto_stat, + test_ptr->ep_context[i].ep_handle, + buff_size, + dto_cookie, + test_ptr->is_server ? "Client_Mem_Info_Recv" + : "Server_Mem_Info_Recv")) + { + goto test_failure; + } + + /* + * Extract what we need + */ + DT_Mdep_debug (("Test[" F64x "]: Memory Info received \n", + (DAT_UVERYLONG)test_ptr->base_port)); + RemoteMemInfo = (RemoteMemoryInfo *) + DT_Bpool_GetBuffer (test_ptr->ep_context[i].bp, + RMI_RECV_BUFFER_ID); + for (j = 0; j < test_ptr->cmd->num_ops; j++) + { + DAT_BOOLEAN us; + + us = (pt_ptr->local_is_server && + test_ptr->ep_context[i].op[j].server_initiated) || + (!pt_ptr->local_is_server && + !test_ptr->ep_context[i].op[j].server_initiated); + if (us && + (test_ptr->ep_context[i].op[j].transfer_type == RDMA_READ || + test_ptr->ep_context[i].op[j].transfer_type == RDMA_WRITE)) + { + test_ptr->ep_context[i].op[j].Rdma_Context = + RemoteMemInfo[j].rmr_context; + test_ptr->ep_context[i].op[j].Rdma_Address = + RemoteMemInfo[j].mem_address.as_ptr; + DT_Mdep_spew (3, ("Got RemoteMemInfo [ va=%p, ctx=%x ]\n", + test_ptr->ep_context[i].op[j].Rdma_Address, + test_ptr->ep_context[i].op[j].Rdma_Context)); + } + } + } /* end foreach EP context */ + + /* + * Dump out the state of the world if we're debugging + */ + if (test_ptr->cmd->debug) + { + DT_Print_Transaction_Test (test_ptr); + } + + /* + * Finally! Run the test. + */ + success = DT_Transaction_Run (test_ptr); + + /* + * Now clean up and go home + */ +test_failure: + if (test_ptr->ep_context) + { + + /* Foreach EP */ + for (i = 0; i < test_ptr->cmd->eps_per_thread; i++) + { + DAT_EP_HANDLE ep_handle; + + ep_handle = DAT_HANDLE_NULL; + + /* Free the per-op buffers */ + for (j = 0; j < test_ptr->cmd->num_ops; j++) + { + if (test_ptr->ep_context[i].op[j].bp) + { + if (!DT_Bpool_Destroy (pt_ptr, + test_ptr->ep_context[i].op[j].bp)) + { + DT_Mdep_printf ("test[" F64x "]: Warning: Bpool destroy fails\n", + (DAT_UVERYLONG)test_ptr->base_port); + /* carry on trying, regardless */ + } + } + } + + /* Free the remote memory info exchange buffers */ + if (test_ptr->ep_context[i].bp) + { + if (!DT_Bpool_Destroy (pt_ptr, + test_ptr->ep_context[i].bp)) + { + DT_Mdep_printf ("Test[" F64x "]: Warning: Bpool destroy fails\n", + (DAT_UVERYLONG)test_ptr->base_port); + /* carry on trying, regardless */ + } + } + + /* + * Disconnect -- we may have left recv buffers posted, if we + * bailed out mid-setup, or ran to completion + * normally, so we use abrupt closure. + */ + if (test_ptr->ep_context[i].ep_handle) + { + ret = dat_ep_disconnect (test_ptr->ep_context[i].ep_handle, + DAT_CLOSE_ABRUPT_FLAG); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: Warning: dat_ep_disconnect (%s) " + "#%d error %s\n", + (DAT_UVERYLONG)test_ptr->base_port, + success ? "graceful" : "abrupt", + i, DT_RetToString (ret)); + /* carry on trying, regardless */ + } + } + + /* + * Wait on each of the outstanding EP handles. Some of them + * may be disconnected by the remote side, we are racing + * here. + */ + + if ( success ) /* Ensure DT_Transaction_Run did not return error otherwise may get stuck waiting for disconnect event*/ + { + if (!DT_disco_event_wait ( test_ptr->conn_evd_hdl, + &ep_handle)) + { + DT_Mdep_printf ("Test[" F64x "]: bad disconnect event\n", + (DAT_UVERYLONG)test_ptr->base_port); + } + else + { + /* + * We have successfully obtained a completed EP. We are + * racing with the remote node on disconnects, so we + * don't know which EP this is. Run the list and + * remove it so we don't disconnect a disconnected EP + */ + for (j = 0; j < test_ptr->cmd->eps_per_thread; j++) + { + if ( test_ptr->ep_context[j].ep_handle == ep_handle ) + { + test_ptr->ep_context[j].ep_handle = NULL; + } + } + } + } else /* !success - QP may be in error state */ + ep_handle = test_ptr->ep_context[i].ep_handle; + + /* + * Free the handle returned by the disconnect event. + * With multiple EPs, it may not be the EP we just + * disconnected as we are racing with the remote side + * disconnects. + */ + if ( DAT_HANDLE_NULL != ep_handle) + { + DAT_EVENT event; + /* + * Drain off outstanding DTOs that may have been + * generated by racing disconnects + */ + do + { + ret = dat_evd_dequeue ( test_ptr->recv_evd_hdl, + &event); + } while ( DAT_GET_TYPE(ret) != DAT_QUEUE_EMPTY ); + /* Destroy the EP */ + ret = dat_ep_free (ep_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_ep_free #%d error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, i, DT_RetToString (ret)); + /* carry on trying, regardless */ + } + } + } /* end foreach per-EP context */ + + DT_MemListFree (pt_ptr, test_ptr->ep_context); + } + + /* clean up the EVDs */ + if (test_ptr->conn_evd_hdl) + { + ret = dat_evd_free (test_ptr->conn_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_free (conn) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + if (pt_ptr->local_is_server) + { + if (test_ptr->creq_evd_hdl) + { + ret = dat_evd_free (test_ptr->creq_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_free (creq) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + } + if (test_ptr->reqt_evd_hdl) + { + ret = dat_evd_free (test_ptr->reqt_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_free (reqt) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + if (test_ptr->recv_evd_hdl) + { + ret = dat_evd_free (test_ptr->recv_evd_hdl); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_evd_free (recv) error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + + /* clean up the CNO */ + if (test_ptr->cmd->use_cno && test_ptr->cno_handle) + { + ret = dat_cno_free (test_ptr->cno_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_cno_free error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + + /* clean up the PZ */ + if (test_ptr->pz_handle) + { + ret = dat_pz_free (test_ptr->pz_handle); + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test[" F64x "]: dat_pz_free error: %s\n", + (DAT_UVERYLONG)test_ptr->base_port, DT_RetToString (ret)); + /* fall through, keep trying */ + } + } + + DT_Mdep_debug (("Test[" F64x "]: cleanup is done\n", (DAT_UVERYLONG)test_ptr->base_port)); + DT_MemListFree ( pt_ptr, test_ptr ); + DT_Thread_Destroy ( thread, pt_ptr ); + DT_Mdep_Thread_Detach ( DT_Mdep_Thread_SELF() ); /* AMM */ + DT_Mdep_Thread_EXIT(NULL); /* AMM */ +} + + +/* ----------------------------------------------------------------------- + * The actual performance test + */ +bool +DT_Transaction_Run (Transaction_Test_t * test_ptr) +{ + unsigned int op; + unsigned int iteration; + int bytes; + bool ours; + bool success = false; + bool repost_recv; + unsigned int i; + + /* pre-post all receive buffers */ + for (op = 0; op < test_ptr->cmd->num_ops; op++) + { + /* if it is a SEND/RECV, we must post receive buffers */ + if (test_ptr->ep_context[0].op[op].transfer_type == SEND_RECV) + { + ours = (test_ptr->is_server == + test_ptr->ep_context[0].op[op].server_initiated); + if (!ours) + { + if (!DT_handle_post_recv_buf (test_ptr->ep_context, + test_ptr->cmd->eps_per_thread, + op)) + { + goto bail; + } + } + } + } + + /* initialize data if we are validating it */ + if (test_ptr->cmd->validate) + { + DT_Transaction_Validation_Fill (test_ptr, 0); + } + + /* + * Now that we've posted our receive buffers... + * synchronize with the other side. + */ + DT_Mdep_debug (("Test[" F64x "]: Synchronize with the other side\n", + (DAT_UVERYLONG)test_ptr->base_port)); + + + /* + * Each server thread sends a sync message to the corresponding + * client thread. All clients wait until all server threads + * have sent their sync messages. Then all clients send + * sync message. + * + * Since all of the events are directed to the same EVD, + * we do not use DT_dto_check(.) to verify the attributes + * of the sync message event. DT_dto_check(.) requires the + * comsumer to pass the expected EP, but we do not know + * what to expect. DAPL does not guarantee the order of + * completions across EPs. Therfore we only know that + * test_ptr->cmd->eps_per_thread number of completion events + * will be generated but not the order in which they will + * complete. + */ + + if (test_ptr->is_server) + { + /* + * Server + */ + DT_Mdep_debug (("Test[" F64x "]: Send Sync to Client\n", + (DAT_UVERYLONG)test_ptr->base_port)); + for (i = 0; i < test_ptr->cmd->eps_per_thread; i++) + { + if (!DT_post_send_buffer (test_ptr->ep_context[i].ep_handle, + test_ptr->ep_context[i].bp, + SYNC_SEND_BUFFER_ID, + SYNC_BUFF_SIZE)) + { + DT_Mdep_debug (("Test[" F64x "]: Server sync send error\n", + (DAT_UVERYLONG)test_ptr->base_port)); + goto bail; + } + } + for (i = 0; i < test_ptr->cmd->eps_per_thread; i++) + { + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + + if ( !DT_dto_event_wait (test_ptr->reqt_evd_hdl, &dto_stat) ) + { + DT_Mdep_debug (("Test[" F64x "]: Server sync send error\n", + (DAT_UVERYLONG)test_ptr->base_port)); + + goto bail; + } + } + + DT_Mdep_debug (("Test[" F64x "]: Wait for Sync Message\n", + (DAT_UVERYLONG)test_ptr->base_port)); + for (i = 0; i < test_ptr->cmd->eps_per_thread; i++) + { + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + + if ( !DT_dto_event_wait (test_ptr->recv_evd_hdl, &dto_stat) ) + { + DT_Mdep_debug (("Test[" F64x "]: Server sync recv error\n", + (DAT_UVERYLONG)test_ptr->base_port)); + goto bail; + } + } + } + else + { + /* + * Client + */ + DT_Mdep_debug (("Test[" F64x "]: Wait for Sync Message\n", + (DAT_UVERYLONG)test_ptr->base_port)); + for (i = 0; i < test_ptr->cmd->eps_per_thread; i++) + { + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + + if ( !DT_dto_event_wait (test_ptr->recv_evd_hdl, &dto_stat) ) + { + DT_Mdep_debug (("Test[" F64x "]: Client sync recv error\n", + (DAT_UVERYLONG)test_ptr->base_port)); + goto bail; + } + DT_transaction_stats_set_ready (&test_ptr->pt_ptr->Client_Stats); + } + + /* check if it is time for client to send sync */ + if (!DT_transaction_stats_wait_for_all (&test_ptr->pt_ptr->Client_Stats)) + { + goto bail; + } + + DT_Mdep_debug (("Test[" F64x "]: Send Sync Msg\n", (DAT_UVERYLONG)test_ptr->base_port)); + for (i = 0; i < test_ptr->cmd->eps_per_thread; i++) + { + if (!DT_post_send_buffer (test_ptr->ep_context[i].ep_handle, + test_ptr->ep_context[i].bp, + SYNC_SEND_BUFFER_ID, + SYNC_BUFF_SIZE)) + { + DT_Mdep_debug (("Test[" F64x "]: Client sync send error\n", + (DAT_UVERYLONG)test_ptr->base_port)); + goto bail; + } + } + for (i = 0; i < test_ptr->cmd->eps_per_thread; i++) + { + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + + if ( !DT_dto_event_wait (test_ptr->reqt_evd_hdl, &dto_stat) ) + { + goto bail; + } + } + } + + /* + * Get to work ... + */ + DT_Mdep_debug (("Test[" F64x "]: Begin...\n", (DAT_UVERYLONG)test_ptr->base_port)); + test_ptr->stats.start_time = DT_Mdep_GetTime (); + + for (iteration = 0; + iteration < test_ptr->cmd->num_iterations; + iteration++) + { + + DT_Mdep_debug (("iteration: %d\n", iteration)); + + /* repost unless this is the last iteration */ + repost_recv = (iteration + 1 != test_ptr->cmd->num_iterations); + + for (op = 0; op < test_ptr->cmd->num_ops; op++) + { + ours = (test_ptr->is_server == + test_ptr->ep_context[0].op[op].server_initiated); + bytes = (test_ptr->ep_context[0].op[op].seg_size * + test_ptr->ep_context[0].op[op].num_segs * + test_ptr->cmd->eps_per_thread); + + switch (test_ptr->ep_context[0].op[op].transfer_type) + { + case RDMA_READ: + { + test_ptr->stats.stat_bytes_rdma_read += bytes; + if (ours) + { + DT_Mdep_debug (("Test[" F64x "]: RdmaRead [%d]\n", + (DAT_UVERYLONG)test_ptr->base_port, + op)); + if (!DT_handle_rdma_op (test_ptr->ep_context, + test_ptr->reqt_evd_hdl, + test_ptr->cmd->eps_per_thread, + RDMA_READ, + op, + test_ptr->cmd->poll)) + { + DT_Mdep_printf ("Test[" F64x "]: RdmaRead error[%d]\n", + (DAT_UVERYLONG)test_ptr->base_port, op); + goto bail; + } + } + break; + } + + case RDMA_WRITE: + { + test_ptr->stats.stat_bytes_rdma_write += bytes; + if (ours) + { + DT_Mdep_debug (("Test[" F64x "]: RdmaWrite [%d]\n", + (DAT_UVERYLONG)test_ptr->base_port, + op)); + if (!DT_handle_rdma_op (test_ptr->ep_context, + test_ptr->reqt_evd_hdl, + test_ptr->cmd->eps_per_thread, + RDMA_WRITE, + op, + test_ptr->cmd->poll)) + { + DT_Mdep_printf ("Test[" F64x "]: RdmaWrite error[%d]\n", + (DAT_UVERYLONG)test_ptr->base_port, op); + goto bail; + } + } + break; + } + + case SEND_RECV: + { + if (ours) + { + test_ptr->stats.stat_bytes_send += bytes; + DT_Mdep_debug (("Test[" F64x "]: postsend [%d] \n", + (DAT_UVERYLONG)test_ptr->base_port, op)); + /* send data */ + if (!DT_handle_send_op (test_ptr->ep_context, + test_ptr->reqt_evd_hdl, + test_ptr->cmd->eps_per_thread, + op, + test_ptr->cmd->poll)) + { + goto bail; + } + } + else + { + test_ptr->stats.stat_bytes_recv += bytes; + DT_Mdep_debug (("Test[" F64x "]: RecvWait and Re-Post [%d] \n", + (DAT_UVERYLONG)test_ptr->base_port, op)); + + if (!DT_handle_recv_op (test_ptr->ep_context, + test_ptr->recv_evd_hdl, + test_ptr->reqt_evd_hdl, + test_ptr->cmd->eps_per_thread, + op, + test_ptr->cmd->poll, + repost_recv)) + { + goto bail; + } + } + + /* now before going on, is it time to validate? */ + if (test_ptr->cmd->validate) + { + if (!test_ptr->pt_ptr->local_is_server) /* CLIENT */ + { + /* the client validates on the third to last op */ + if (op == test_ptr->cmd->num_ops - 3) + { + if (!DT_Transaction_Validation_Check (test_ptr, + iteration)) + { + goto bail; + } + DT_Transaction_Validation_Fill (test_ptr, + iteration + 1); + } + } + else /* SERVER */ + { + /* the server validates on the second to last op */ + if (op == test_ptr->cmd->num_ops - 2) + { + if (!DT_Transaction_Validation_Check (test_ptr, + iteration)) + { + goto bail; + } + DT_Transaction_Validation_Fill (test_ptr, + iteration + 1); + } + } + } /* end validate */ + break; + } + } /* end switch for transfer type */ + } /* end loop for each op */ + } /* end loop for iteration */ + + /* end time and print stats */ + test_ptr->stats.end_time = DT_Mdep_GetTime (); + if (!test_ptr->pt_ptr->local_is_server) + { + DT_update_transaction_stats (&test_ptr->pt_ptr->Client_Stats, + test_ptr->cmd->eps_per_thread * test_ptr->cmd->num_ops * + test_ptr->cmd->num_iterations, + test_ptr->stats.end_time - test_ptr->stats.start_time, + test_ptr->stats.stat_bytes_send, + test_ptr->stats.stat_bytes_recv, + test_ptr->stats.stat_bytes_rdma_read, + test_ptr->stats.stat_bytes_rdma_write); + } + DT_Mdep_debug (("Test[" F64x "]: End Successfully\n", (DAT_UVERYLONG)test_ptr->base_port)); + success = true; + +bail: + return ( success ); +} + + +/*------------------------------------------------------------------------------ */ +void +DT_Transaction_Validation_Fill ( Transaction_Test_t * test_ptr, + unsigned int iteration) +{ + bool ours; + unsigned int op; + unsigned int i; + unsigned int j; + unsigned int ind; + unsigned char *buff; + + if (iteration >= test_ptr->cmd->num_iterations) + { + return; + } + DT_Mdep_debug (("Test[" F64x "]: FILL Buffers Iteration %d\n", + (DAT_UVERYLONG)test_ptr->base_port, iteration)); + + /* + * fill all but the last three ops, which + * were added to create barriers for data validation + */ + for (ind = 0; ind < test_ptr->cmd->eps_per_thread; ind++) + { + for (op = 0; op < test_ptr->cmd->num_ops - 3; op++) + { + ours = (test_ptr->is_server == + test_ptr->ep_context[ind].op[op].server_initiated); + + switch (test_ptr->ep_context[ind].op[op].transfer_type) + + { + case RDMA_READ: + { + if (!ours) + { + for (i = 0; + i < test_ptr->ep_context[ind].op[op].num_segs; + i++) + { + + buff = DT_Bpool_GetBuffer ( + test_ptr->ep_context[ind].op[op].bp, i); + for (j = 0; + j < test_ptr->ep_context[ind].op[op].seg_size; + j++) + { + /* Avoid using all zero bits the 1st time */ + buff[j] = (iteration + 1) & 0xFF; + } + } + } + break; + } + + case RDMA_WRITE: + { + if (ours) + { + for (i = 0; + i < test_ptr->ep_context[ind].op[op].num_segs; + i++) + { + + buff = DT_Bpool_GetBuffer ( + test_ptr->ep_context[ind].op[op].bp, i); + for (j = 0; + j < test_ptr->ep_context[ind].op[op].seg_size; + j++) + { + /* Avoid using all zero bits the 1st time */ + buff[j] = (iteration + 1) & 0xFF; + } + } + } + break; + } + + case SEND_RECV: + { + if (ours) + { + for (i = 0; + i < test_ptr->ep_context[ind].op[op].num_segs; + i++) + { + + buff = DT_Bpool_GetBuffer ( + test_ptr->ep_context[ind].op[op].bp, + i); + /***** + DT_Mdep_printf( + "\tFill: wq=%d op=%d seg=%d ptr=[%p, %d]\n", + ind, op, i, buff, j); + *****/ + for (j = 0; + j < test_ptr->ep_context[ind].op[op].seg_size; + j++) + { + /* Avoid using all zero bits the 1st time */ + buff[j] = (iteration + 1) & 0xFF; + } + } + } + break; + } + } /* end switch transfer_type */ + } /* end for each op */ + } /* end for each ep per thread */ +} + + +/*------------------------------------------------------------------------------ */ +bool +DT_Transaction_Validation_Check (Transaction_Test_t * test_ptr, + int iteration) +{ + bool ours; + bool success = true; + unsigned int op; + unsigned int i; + unsigned int j; + unsigned int ind; + unsigned char *buff; + unsigned char expect; + unsigned char got; + + DT_Mdep_debug (("Test[" F64x "]: VALIDATE Buffers Iteration %d\n", + (DAT_UVERYLONG)test_ptr->base_port, + iteration)); + + /* + * fill all but the last three ops, which + * were added to create barriers for data validation + */ + for (ind = 0; ind < test_ptr->cmd->eps_per_thread; ind++) + { + for (op = 0; op < test_ptr->cmd->num_ops - 3; op++) + { + ours = (test_ptr->is_server == + test_ptr->ep_context[ind].op[op].server_initiated); + + switch (test_ptr->ep_context[ind].op[op].transfer_type) + { + case RDMA_READ: + { + if (ours) + { + for (i = 0; + i < test_ptr->ep_context[ind].op[op].num_segs; i++) + { + + buff = DT_Bpool_GetBuffer ( + test_ptr->ep_context[ind].op[op].bp, i); + + for (j = 0; + j < test_ptr->ep_context[ind].op[op].seg_size; + j++) + { + + expect = (iteration + 1) & 0xFF; + got = buff[j]; + if (expect != got) + { + DT_Mdep_printf ( + "Test[" F64x "]: Validation Error :: %d\n", + (DAT_UVERYLONG)test_ptr->base_port, + op); + DT_Mdep_printf ( + "Test[" F64x "]: Expected %x Got %x\n", + (DAT_UVERYLONG)test_ptr->base_port, + expect, + got); + DT_Mdep_spew (3, + ("\twq=%d op=%d seg=%d byte=%d ptr=%p\n", + ind, op, i, j, buff)); + success = false; + break; + } + } + } + } + break; + } + + case RDMA_WRITE: + { + if (!ours) + { + for (i = 0; + i < test_ptr->ep_context[ind].op[op].num_segs; + i++) + { + + buff = DT_Bpool_GetBuffer ( + test_ptr->ep_context[ind].op[op].bp, i); + for (j = 0; + j < test_ptr->ep_context[ind].op[op].seg_size; + j++) + { + + expect = (iteration + 1) & 0xFF; + got = buff[j]; + if (expect != got) + { + DT_Mdep_printf ("Test[" F64x "]: Validation Error :: %d\n", + (DAT_UVERYLONG)test_ptr->base_port, + op); + DT_Mdep_printf ("Test[" F64x "]: Expected %x Got %x\n", + (DAT_UVERYLONG)test_ptr->base_port, + expect, + got); + DT_Mdep_spew (3, + ("\twq=%d op=%d seg=%d byte=%d ptr=%p\n", + ind, op, i, j, buff)); + success = false; + break; + } + } + } + } + break; + } + + case SEND_RECV: + { + if (!ours) + { + for (i = 0; + i < test_ptr->ep_context[ind].op[op].num_segs; + i++) + { + + buff = DT_Bpool_GetBuffer ( + test_ptr->ep_context[ind].op[op].bp, i); + DT_Mdep_spew (3, ( + "\tCheck:wq=%d op=%d seg=%d ptr=[%p, %d]\n", + ind, op, i, buff, + test_ptr->ep_context[ind].op[op].seg_size)); + + for (j = 0; + j < test_ptr->ep_context[ind].op[op].seg_size; + j++) + { + + expect = (iteration + 1) & 0xFF; + got = buff[j]; + if (expect != got) + { + DT_Mdep_printf ( + "Test[" F64x "]: Validation Error :: %d\n", + (DAT_UVERYLONG)test_ptr->base_port, + op); + DT_Mdep_printf ("Test[" F64x "]: Expected %x Got %x\n", + (DAT_UVERYLONG)test_ptr->base_port, + expect, + got); + DT_Mdep_spew (3, + ("\twq=%d op=%d seg=%d byte=%d ptr=%p\n", + ind, op, i, j, buff)); + success = false; + break; + } + } + } + } + break; + } + } /* end switch transfer_type */ + } /* end for each op */ + } /* end for each ep per thread */ + + return ( success ); +} + + +/*------------------------------------------------------------------------------ */ +void +DT_Print_Transaction_Test (Transaction_Test_t * test_ptr) +{ + DT_Mdep_printf ("-------------------------------------\n"); + DT_Mdep_printf ("TransTest.is_server : %d\n", + test_ptr->is_server); + DT_Mdep_printf ("TransTest.remote_little_endian : %d\n", + test_ptr->remote_is_little_endian); + DT_Mdep_printf ("TransTest.base_port : " F64x "\n", + (DAT_UVERYLONG)test_ptr->base_port); + DT_Mdep_printf ("TransTest.pz_handle : %p\n", + test_ptr->pz_handle); + /* statistics */ + DT_Mdep_printf ("TransTest.bytes_send : %d\n", + test_ptr->stats.stat_bytes_send); + DT_Mdep_printf ("TransTest.bytes_recv : %d\n", + test_ptr->stats.stat_bytes_recv); + DT_Mdep_printf ("TransTest.bytes_rdma_read : %d\n", + test_ptr->stats.stat_bytes_rdma_read); + DT_Mdep_printf ("TransTest.bytes_rdma_write : %d\n", + test_ptr->stats.stat_bytes_rdma_write); +} + + +/*------------------------------------------------------------------------------ */ +void +DT_Print_Transaction_Stats (Transaction_Test_t * test_ptr) +{ + double time; + double mbytes_send; + double mbytes_recv; + double mbytes_rdma_read; + double mbytes_rdma_write; + int total_ops; + time = (double) (test_ptr->stats.end_time - test_ptr->stats.start_time) / 1000; + mbytes_send = (double) test_ptr->stats.stat_bytes_send / 1024 / 1024; + mbytes_recv = (double) test_ptr->stats.stat_bytes_recv / 1024 / 1024; + mbytes_rdma_read = (double) test_ptr->stats.stat_bytes_rdma_read / 1024 / 1024; + mbytes_rdma_write = (double) test_ptr->stats.stat_bytes_rdma_write / 1024 / 1024; + total_ops = test_ptr->cmd->num_ops * test_ptr->cmd->num_iterations; + + DT_Mdep_printf ("Test[: " F64x "] ---- Stats ----\n", (DAT_UVERYLONG)test_ptr->base_port); + DT_Mdep_printf ("Iterations : %u\n", test_ptr->cmd->num_iterations); + DT_Mdep_printf ("Ops : %7d.%02d Ops/Sec\n", + whole (total_ops / time), + hundredths (total_ops / time)); + DT_Mdep_printf ("Time : %7d.%02d sec\n", + whole (time), + hundredths (time)); + DT_Mdep_printf ("Sent : %7d.%02d MB - %7d.%02d MB/Sec\n", + whole (mbytes_send), + hundredths (mbytes_send), + whole (mbytes_send / time), + hundredths (mbytes_send / time)); + DT_Mdep_printf ("Recv : %7d.%02d MB - %7d.%02d MB/Sec\n", + whole (mbytes_recv), + hundredths (mbytes_recv), + whole (mbytes_recv / time), + hundredths (mbytes_recv / time)); + DT_Mdep_printf ("RDMA Read : %7d.%02d MB - %7d.%02d MB/Sec\n", + whole (mbytes_rdma_read), + hundredths (mbytes_rdma_read), + whole (mbytes_rdma_read / time), + hundredths (mbytes_rdma_read / time)); + DT_Mdep_printf ("RDMA Write : %7d.%02d MB - %7d.%02d MB/Sec\n", + whole (mbytes_rdma_write), + hundredths (mbytes_rdma_write), + whole (mbytes_rdma_write / time), + hundredths (mbytes_rdma_write / time)); +} + diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_test.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_test.h new file mode 100644 index 00000000..96baddd7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_test.h @@ -0,0 +1,99 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_TRANSACTION_TEST_H__ +#define __DAPL_TRANSACTION_TEST_H__ + +#include "dapl_common.h" +#include "dapl_test_data.h" +#include "dapl_transaction_cmd.h" +#include "dapl_mdep.h" + +#pragma pack(1) +typedef struct +{ + DAT_BOOLEAN server_initiated; + DT_Transfer_Type transfer_type; + DAT_UINT32 num_segs; + DAT_UINT32 seg_size; + DAT_BOOLEAN reap_send_on_recv; + Bpool *bp; + + /* RDMA info */ + DAT_RMR_CONTEXT Rdma_Context; + DAT_PVOID Rdma_Address; +} Transaction_Test_Op_t; + +typedef struct +{ + DAT_EP_HANDLE ep_handle; + DAT_EP_ATTR ep_attr; + DAT_CONN_QUAL ia_port; + Bpool *bp; + Transaction_Test_Op_t op[ MAX_OPS ]; + DAT_RSP_HANDLE rsp_handle; + DAT_PSP_HANDLE psp_handle; + +} Ep_Context_t; + +typedef struct +{ + unsigned int stat_bytes_send; + unsigned int stat_bytes_recv; + unsigned int stat_bytes_rdma_read; + unsigned int stat_bytes_rdma_write; + unsigned int start_time; + unsigned int end_time; +} Transaction_Test_Stats_t; + +typedef struct +{ + /* This group set up by DT_Transaction_Create_Test() */ + DAT_BOOLEAN is_server; + DAT_BOOLEAN remote_is_little_endian; + Per_Test_Data_t *pt_ptr; + DAT_IA_HANDLE ia_handle; + Transaction_Cmd_t *cmd; + DAT_IA_ADDRESS_PTR remote_ia_addr; + DAT_CONN_QUAL base_port; + DAT_TIMEOUT time_out; + DAT_COUNT evd_length; + Thread *thread; + + /* This group set up by each thread in DT_Transaction_Main() */ + DAT_PZ_HANDLE pz_handle; + DAT_CNO_HANDLE cno_handle; + DAT_EVD_HANDLE recv_evd_hdl; /* receive */ + DAT_EVD_HANDLE reqt_evd_hdl; /* request+rmr */ + DAT_EVD_HANDLE conn_evd_hdl; /* connect */ + DAT_EVD_HANDLE creq_evd_hdl; /* "" request */ + Ep_Context_t *ep_context; + + /* Statistics set by DT_Transaction_Run() */ + Transaction_Test_Stats_t stats; +} Transaction_Test_t; +#pragma pack() +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_util.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_util.c new file mode 100644 index 00000000..2d6e73a7 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_transaction_util.c @@ -0,0 +1,730 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_bpool.h" +#include "dapl_mdep.h" +#include "dapl_proto.h" + +/* ----------------------------------------------------------- + * Post a recv buffer on each of this thread's EPs. + */ +bool +DT_handle_post_recv_buf (Ep_Context_t * ep_context, + unsigned int num_eps, + int op_indx) +{ + unsigned int i, j; + + for (i = 0; i < num_eps; i++) + { + Transaction_Test_Op_t *op = &ep_context[i].op[op_indx]; + DAT_LMR_TRIPLET *iov = DT_Bpool_GetIOV (op->bp, 0); + DAT_DTO_COOKIE cookie; + DAT_RETURN ret; + + /* Prep the inputs */ + for (j = 0; j < op->num_segs; j++) + { + iov[j].pad = 0U; + iov[j].virtual_address = (DAT_VADDR) (uintptr_t) + DT_Bpool_GetBuffer (op->bp, j); + iov[j].segment_length = op->seg_size; + iov[j].lmr_context = DT_Bpool_GetLMR (op->bp, j); + } + cookie.as_64 = + ((((DAT_UINT64) i) << 32) + | (((uintptr_t) DT_Bpool_GetBuffer (op->bp, 0)) & 0xffffffffUL)); + + /* Post the recv */ + ret = dat_ep_post_recv ( ep_context[i].ep_handle, + op->num_segs, + iov, + cookie, + DAT_COMPLETION_DEFAULT_FLAG); + + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dat_ep_post_recv failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + return false; + } + } + + return true; +} + + +/* ----------------------------------------------------------- + * Post a send buffer on each of this thread's EPs. + */ +bool +DT_handle_send_op (Ep_Context_t * ep_context, + DAT_EVD_HANDLE reqt_evd_hdl, + unsigned int num_eps, + int op_indx, + bool poll) +{ + unsigned int i, j; + unsigned char *completion_reaped; + + completion_reaped = DT_Mdep_Malloc (num_eps * sizeof (unsigned char)); + + if (!completion_reaped) + { + return false; + } + + for (i = 0; i < num_eps; i++) + { + Transaction_Test_Op_t *op = &ep_context[i].op[op_indx]; + DAT_LMR_TRIPLET *iov = DT_Bpool_GetIOV (op->bp, 0); + DAT_DTO_COOKIE cookie; + DAT_RETURN ret; + + /* Prep the inputs */ + for (j = 0; j < op->num_segs; j++) + { + iov[j].pad = 0U; + iov[j].virtual_address = (DAT_VADDR) (uintptr_t) + DT_Bpool_GetBuffer (op->bp, j); + iov[j].segment_length = op->seg_size; + iov[j].lmr_context = DT_Bpool_GetLMR (op->bp, j); + } + cookie.as_64 = + ((((DAT_UINT64) i) << 32) + | (((uintptr_t) DT_Bpool_GetBuffer (op->bp, 0)) & 0xffffffffUL)); + + /* Post the send */ + ret = dat_ep_post_send ( ep_context[i].ep_handle, + op->num_segs, + iov, + cookie, + DAT_COMPLETION_DEFAULT_FLAG); + + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dat_ep_post_send failed: %s\n", + DT_RetToString (ret)); + DT_Test_Error (); + DT_Mdep_Free (completion_reaped); + return false; + } + } + + for (i = 0; i < num_eps; i++) + { + Transaction_Test_Op_t *op = &ep_context[i].op[op_indx]; + + if (op->reap_send_on_recv && !op->server_initiated) + { + /* we will reap the send on the recv (Client SR) */ + DT_Mdep_Free (completion_reaped); + return true; + } + } + + bzero ((void *) completion_reaped, sizeof (unsigned char) * num_eps); + + /* reap the send completion */ + for (i = 0; i < num_eps; i++) + { + Transaction_Test_Op_t *op; + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + DAT_DTO_COOKIE dto_cookie; + unsigned int epnum; + + if (!DT_dto_event_reap (reqt_evd_hdl, poll, &dto_stat)) + { + DT_Mdep_Free (completion_reaped); + return false; + } + + epnum = (uint32_t)(dto_stat.user_cookie.as_64 >> 32); + if (epnum > num_eps) + { + DT_Mdep_printf ("Test Error: Send: Invalid endpoint completion reaped.\n" + "\tEndpoint: 0x%p, Cookie: 0x" F64x ", Length: " F64u "\n", + dto_stat.ep_handle, dto_stat.user_cookie.as_64, + dto_stat.transfered_length); + DT_Test_Error (); + DT_Mdep_Free (completion_reaped); + return false; + } + + op = &ep_context[epnum].op[op_indx]; + + dto_cookie.as_64 = + ((((DAT_UINT64) epnum) << 32) + | (((uintptr_t) DT_Bpool_GetBuffer (op->bp, 0)) & 0xffffffffUL)); + + if (!DT_dto_check (&dto_stat, + ep_context[epnum].ep_handle, + op->num_segs * op->seg_size, + dto_cookie, + "Send")) + { + DT_Mdep_Free (completion_reaped); + return false; + } + + if (completion_reaped[epnum]) + { + DT_Mdep_printf ("Test Error: Send: Secondary completion seen for endpoint 0x%p (%d)\n", + ep_context[epnum].ep_handle, epnum); + DT_Test_Error (); + DT_Mdep_Free (completion_reaped); + return ( false ); + } + completion_reaped[epnum] = 1; + } + + for (i = 0; i < num_eps; i++) + { + if (completion_reaped[i] == 0) + { + DT_Mdep_printf ("Test Error: Send: No completion seen for endpoint 0x%p (#%d)\n", + ep_context[i].ep_handle, i); + DT_Test_Error (); + DT_Mdep_Free (completion_reaped); + return ( false ); + } + } + + DT_Mdep_Free (completion_reaped); + return true; +} + + +/* ----------------------------------------------------------- + * Reap a recv op on each of this thread's EPs, + * then if requested reap the corresponding send ops, + * and re-post all of the recv buffers. + */ +bool +DT_handle_recv_op (Ep_Context_t * ep_context, + DAT_EVD_HANDLE recv_evd_hdl, + DAT_EVD_HANDLE reqt_evd_hdl, + unsigned int num_eps, + int op_indx, + bool poll, + bool repost_recv) +{ + unsigned int i; + unsigned char *recv_completion_reaped; + unsigned char *send_completion_reaped; + + recv_completion_reaped = DT_Mdep_Malloc (num_eps); + if (recv_completion_reaped == NULL) + { + return false; + } + + send_completion_reaped = DT_Mdep_Malloc (num_eps); + if (send_completion_reaped == NULL) + { + DT_Mdep_Free (recv_completion_reaped); + return false; + } + + /* Foreach EP, reap */ + bzero ((void *) recv_completion_reaped, sizeof (unsigned char) * num_eps); + bzero ((void *) send_completion_reaped, sizeof (unsigned char) * num_eps); + for (i = 0; i < num_eps; i++) + { + Transaction_Test_Op_t *op; + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + DAT_DTO_COOKIE dto_cookie; + unsigned int epnum; + + /* First reap the recv DTO event */ + if (!DT_dto_event_reap (recv_evd_hdl, poll, &dto_stat)) + { + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return false; + } + + epnum = (uint32_t)(dto_stat.user_cookie.as_64 >> 32); + if (epnum > num_eps) + { + DT_Mdep_printf ("Test Error: Receive: Invalid endpoint completion reaped.\n" + "\tEndpoint: 0x%p, Cookie: 0x" F64x ", Length: " F64u "\n", + dto_stat.ep_handle, dto_stat.user_cookie.as_64, + dto_stat.transfered_length); + DT_Test_Error (); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return false; + } + + op = &ep_context[epnum].op[op_indx]; + dto_cookie.as_64 = + ((((DAT_UINT64) epnum) << 32) + | (((uintptr_t) DT_Bpool_GetBuffer (op->bp, 0)) & 0xffffffffUL)); + + if (!DT_dto_check (&dto_stat, + ep_context[epnum].ep_handle, + op->num_segs * op->seg_size, + dto_cookie, + "Recv")) + { + DT_Mdep_printf ("Test Error: recv DTO problem\n"); + DT_Test_Error (); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return false; + } + + if (recv_completion_reaped[epnum]) + { + DT_Mdep_printf ("Test Error: Receive: Secondary completion seen for endpoint 0x%p (%d)\n", + ep_context[epnum].ep_handle, epnum); + DT_Test_Error (); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return ( false ); + } + recv_completion_reaped[epnum] = 1; + + /* + * Check the current op to see whether we are supposed + * to reap the previous send op now. + */ + if (op->reap_send_on_recv && op->server_initiated) + { + if (op_indx <= 0) + /* shouldn't happen, but let's be certain */ + { + DT_Mdep_printf ("Internal Error: reap_send_on_recv" + " but current op == #%d\n", op_indx); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return false; + } + + if (!DT_dto_event_reap (reqt_evd_hdl, poll, &dto_stat)) + { + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return false; + } + + epnum = (uint32_t)(dto_stat.user_cookie.as_64 >> 32); + if (epnum > num_eps) + { + DT_Mdep_printf ("Test Error: Send (ror): Invalid endpoint completion reaped.\n" + "\tEndpoint: 0x%p, Cookie: 0x" F64x ", Length: "F64u "\n", + dto_stat.ep_handle, dto_stat.user_cookie.as_64, + dto_stat.transfered_length); + DT_Test_Error (); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return false; + } + + /* + * We're reaping the last transaction, a + * send completion that we skipped when it was sent. + */ + op = &ep_context[epnum].op[op_indx - 1]; + + dto_cookie.as_64 = + ((((DAT_UINT64) epnum) << 32) + | (((uintptr_t) DT_Bpool_GetBuffer (op->bp, 0)) + & 0xffffffffUL)); + + /* + * If we have multiple EPs we can't guarantee the order of + * completions, so disable ep_handle check + */ + if (!DT_dto_check (&dto_stat, + num_eps == 1?ep_context[i].ep_handle: NULL, + op->num_segs * op->seg_size, + dto_cookie, + "Send-reaped-on-recv")) + { + DT_Mdep_printf ("Test Error: send DTO problem\n"); + DT_Test_Error (); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return false; + } + + if (send_completion_reaped[epnum]) + { + DT_Mdep_printf ("Test Error: Send (ror): Secondary completion seen for endpoint 0x%p (%d)\n", + ep_context[epnum].ep_handle, epnum); + DT_Test_Error (); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return ( false ); + } + send_completion_reaped[epnum] = 1; + } + } + + for (i = 0; i < num_eps; i++) + { + if (recv_completion_reaped[i] == 0) + { + DT_Mdep_printf ("Test Error: Receive: No completion seen for endpoint 0x%p (#%d)\n", + ep_context[i].ep_handle, i); + DT_Test_Error (); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return ( false ); + } + } + + if (ep_context[0].op[op_indx].reap_send_on_recv + && ep_context[0].op[op_indx].server_initiated) + { + for (i = 0; i < num_eps; i++) + { + if (send_completion_reaped[i] == 0) + { + DT_Mdep_printf ("Test Error: Send (ror): No completion seen for endpoint 0x%p (#%d)\n", + ep_context[i].ep_handle, i); + DT_Test_Error (); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return ( false ); + } + } + } + + if (repost_recv) + { + /* repost the receive buffer */ + if (!DT_handle_post_recv_buf (ep_context, num_eps, op_indx)) + { + DT_Mdep_printf ("Test Error: recv re-post problem\n"); + DT_Test_Error (); + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return false; + } + } + + DT_Mdep_Free (recv_completion_reaped); + DT_Mdep_Free (send_completion_reaped); + return true; +} + + +/* ----------------------------------------------------------- + * Initiate an RDMA op (synchronous) on each of this thread's EPs. + */ +bool +DT_handle_rdma_op (Ep_Context_t * ep_context, + DAT_EVD_HANDLE reqt_evd_hdl, + unsigned int num_eps, + DT_Transfer_Type opcode, + int op_indx, + bool poll) +{ + unsigned int i, j; + DAT_RETURN ret; + unsigned char *completion_reaped; + + completion_reaped = DT_Mdep_Malloc (num_eps * sizeof (unsigned char)); + + if (!completion_reaped) + { + return false; + } + + /* Initiate the operation */ + for (i = 0; i < num_eps; i++) + { + Transaction_Test_Op_t *op = &ep_context[i].op[op_indx]; + DAT_LMR_TRIPLET *iov = DT_Bpool_GetIOV (op->bp, 0); + DAT_DTO_COOKIE cookie; + DAT_RMR_TRIPLET rmr_triplet; + + /* Prep the inputs */ + for (j = 0; j < op->num_segs; j++) + { + iov[j].pad = 0U; + iov[j].virtual_address = (DAT_VADDR) (uintptr_t) + DT_Bpool_GetBuffer (op->bp, j); + iov[j].segment_length = op->seg_size; + iov[j].lmr_context = DT_Bpool_GetLMR (op->bp, j); + } + cookie.as_64 = + ((((DAT_UINT64) i) << 32) + | (((uintptr_t) DT_Bpool_GetBuffer (op->bp, 0)) & 0xffffffffUL)); + + rmr_triplet.pad = 0U; + rmr_triplet.target_address = (DAT_VADDR) (uintptr_t) op->Rdma_Address; + rmr_triplet.segment_length = op->seg_size * op->num_segs; + rmr_triplet.rmr_context = op->Rdma_Context; + + DT_Mdep_spew (3, ("Call dat_ep_post_rdma_%s [" F64x ", sz=" F64x ", ctxt=%x]\n", + (opcode == RDMA_WRITE ? "write" : "read"), + rmr_triplet.target_address, + rmr_triplet.segment_length, + rmr_triplet.rmr_context )); + + /* Post the operation */ + if (opcode == RDMA_WRITE) + { + + ret = dat_ep_post_rdma_write (ep_context[i].ep_handle, + op->num_segs, + iov, + cookie, + &rmr_triplet, + DAT_COMPLETION_DEFAULT_FLAG); + + } + else /* opcode == RDMA_READ */ + { + + ret = dat_ep_post_rdma_read ( ep_context[i].ep_handle, + op->num_segs, + iov, + cookie, + &rmr_triplet, + DAT_COMPLETION_DEFAULT_FLAG); + + } + if (ret != DAT_SUCCESS) + { + DT_Mdep_printf ("Test Error: dat_ep_post_rdma_%s failed: %s\n", + (opcode == RDMA_WRITE ? "write" : "read"), + DT_RetToString (ret)); + DT_Test_Error (); + DT_Mdep_Free (completion_reaped); + return ( false ); + } + else + { + DT_Mdep_spew (3, ("Done dat_ep_post_rdma_%s %s\n", + (opcode == RDMA_WRITE ? "write" : "read"), + " () Waiting ...")); + } + } + + bzero ((void *) completion_reaped, sizeof (unsigned char) * num_eps); + /* Wait for it to happen */ + for (i = 0; i < num_eps; i++) + { + Transaction_Test_Op_t *op; + DAT_DTO_COMPLETION_EVENT_DATA dto_stat; + DAT_DTO_COOKIE dto_cookie; + unsigned int epnum; + + if (!DT_dto_event_reap (reqt_evd_hdl, poll, &dto_stat)) + { + DT_Mdep_Free (completion_reaped); + return ( false ); + } + + epnum = (uint32_t)(dto_stat.user_cookie.as_64 >> 32); + if (epnum > num_eps) + { + DT_Mdep_printf ("Test Error: %s: Invalid endpoint completion reaped.\n" + "\tEndpoint: 0x%p, Cookie: 0x" F64x ", Length: " F64u "\n", + opcode == RDMA_WRITE ? "RDMA/WR" : "RDMA/RD", + dto_stat.ep_handle, dto_stat.user_cookie.as_64, + dto_stat.transfered_length); + DT_Test_Error (); + DT_Mdep_Free (completion_reaped); + return false; + } + op = &ep_context[epnum].op[op_indx]; + + dto_cookie.as_64 = + ((((DAT_UINT64) epnum) << 32) + | (((uintptr_t) DT_Bpool_GetBuffer (op->bp, 0)) & 0xffffffffUL)); + + if (!DT_dto_check (&dto_stat, + ep_context[epnum].ep_handle, + op->num_segs * op->seg_size, + dto_cookie, + (opcode == RDMA_WRITE ? "RDMA/WR" : "RDMA/RD"))) + { + DT_Mdep_Free (completion_reaped); + return ( false ); + } + + if (completion_reaped[epnum]) + { + DT_Mdep_printf ("Test Error: %s: Secondary completion seen for endpoint 0x%p (%d)\n", + opcode == RDMA_WRITE ? "RDMA/WR" : "RDMA/RD", + ep_context[epnum].ep_handle, epnum); + DT_Test_Error (); + DT_Mdep_Free (completion_reaped); + return ( false ); + } + completion_reaped[epnum] = 1; + + DT_Mdep_spew (3, ("dat_ep_post_rdma_%s OK\n", + (opcode == RDMA_WRITE ? "RDMA/WR" : "RDMA/RD"))); + } + + for (i = 0; i < num_eps; i++) + { + if (completion_reaped[i] == 0) + { + DT_Mdep_printf ("Test Error: %s: No completion seen for endpoint 0x%p (#%d)\n", + opcode == RDMA_WRITE ? "RDMA/WR" : "RDMA/RD", + ep_context[i].ep_handle, i); + DT_Test_Error (); + DT_Mdep_Free (completion_reaped); + return ( false ); + } + } + + DT_Mdep_Free (completion_reaped); + + return ( true ); +} + + +/* ----------------------------------------------------------- + * Verify whether we (the client side) can support + * the requested 'T' test. + */ +bool +DT_check_params (Per_Test_Data_t *pt_ptr, + unsigned char *module) +{ + Transaction_Cmd_t * cmd = &pt_ptr->Params.u.Transaction_Cmd; + unsigned long num_recvs = 0U; + unsigned long num_sends = 0U; + unsigned long num_rdma_rd = 0U; + unsigned long num_rdma_wr = 0U; + unsigned long max_size = 0U; + unsigned long max_segs = 0U; + bool rval = true; + unsigned int i; + + /* Count up what's requested (including -V appended sync points) */ + for (i = 0; i < cmd->num_ops; i++) + { + unsigned int xfer_size; + + xfer_size = cmd->op[i].num_segs * cmd->op[i].seg_size; + if (xfer_size > max_size) + { + max_size = xfer_size; + } + if (cmd->op[i].num_segs > max_segs) + { + max_segs = cmd->op[i].num_segs; + } + + switch (cmd->op[i].transfer_type) + { + case SEND_RECV: + { + if (cmd->op[i].server_initiated) + { + num_recvs++; + } + else + { + num_sends++; + } + break; + } + + case RDMA_READ: + { + num_rdma_rd++; + break; + } + + case RDMA_WRITE: + { + num_rdma_wr++; + break; + } + } + } + + /* + * Now check the IA and EP attributes, and check for some of the + * more obvious resource problems. This is hardly exhaustive, + * and some things will inevitably fall through to run-time. + * + * We don't compare + * num_rdma_rd > pt_ptr->ia_attr.max_rdma_read_per_ep + * num_rdma_wr > pt_ptr->ia_attr.max_dto_per_ep + * because each thread has its own EPs, and transfers are issued + * synchronously (across a thread's EPs, and ignoring -f, which allows + * a per-EP pipeline depth of at most 2 and applies only to SR ops), + * so dapltest actually attempts almost no pipelining on a single EP. + * But we do check that pre-posted recv buffers will all fit. + */ + if ((DAT_COUNT)num_recvs > pt_ptr->ia_attr.max_dto_per_ep || + (DAT_COUNT)num_sends > pt_ptr->ia_attr.max_dto_per_ep) + { + DT_Mdep_printf ( + "%s: S/R: cannot supply %ld SR ops (maximum: %d)\n", + module, + num_recvs > num_sends ? num_recvs : num_sends, + pt_ptr->ia_attr.max_dto_per_ep); + rval = false; + } + if (max_size > pt_ptr->ia_attr.max_lmr_block_size) + { + DT_Mdep_printf ( + "%s: buffer too large: 0x%lx (maximum: " F64x " bytes)\n", + module, + max_size, + pt_ptr->ia_attr.max_lmr_block_size); + rval = false; + } + if ((DAT_COUNT)max_segs > pt_ptr->ep_attr.max_recv_iov || + (DAT_COUNT)max_segs > pt_ptr->ep_attr.max_request_iov) + { + /* + * In an ideal world, we'd just ask for more segments + * when creating the EPs for the test, rather than + * checking against default EP attributes. + */ + DT_Mdep_printf ( + "%s: cannot use %ld segments (maxima: S %d, R %d)\n", + module, + max_segs, + pt_ptr->ep_attr.max_request_iov, + pt_ptr->ep_attr.max_recv_iov ); + rval = false; + } + + return ( rval ); +} + +/* Empty function in which to set breakpoints. */ +void +DT_Test_Error (void) +{ + ; +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_util.c b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_util.c new file mode 100644 index 00000000..8ddf787d --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_util.c @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#include "dapl_mdep.h" +#include "dapl_proto.h" +#include + + + +/* + * Map DAT_RETURN values to readable strings, + * but don't assume the values are zero-based or contiguous. + */ +const char * +DT_RetToString (DAT_RETURN ret_value) +{ + unsigned int i; + static struct + { + const char *name; + DAT_RETURN value; + } dat_errors[] = + { + # define DATxx(x) { # x, x } + DATxx (DAT_SUCCESS), + DATxx (DAT_ABORT), + DATxx (DAT_CONN_QUAL_IN_USE), + DATxx (DAT_INSUFFICIENT_RESOURCES), + DATxx (DAT_INTERNAL_ERROR), + DATxx (DAT_INVALID_HANDLE), + DATxx (DAT_INVALID_PARAMETER), + DATxx (DAT_INVALID_STATE), + DATxx (DAT_LENGTH_ERROR), + DATxx (DAT_MODEL_NOT_SUPPORTED), + DATxx (DAT_PROVIDER_NOT_FOUND), + DATxx (DAT_PRIVILEGES_VIOLATION), + DATxx (DAT_PROTECTION_VIOLATION), + DATxx (DAT_QUEUE_EMPTY), + DATxx (DAT_QUEUE_FULL), + DATxx (DAT_TIMEOUT_EXPIRED), + DATxx (DAT_PROVIDER_ALREADY_REGISTERED), + DATxx (DAT_PROVIDER_IN_USE), + DATxx (DAT_NOT_IMPLEMENTED) + # undef DATxx + }; + # define NUM_ERRORS (sizeof(dat_errors)/sizeof(dat_errors[0])) + + for (i = 0; i < NUM_ERRORS; i++) + { + if (dat_errors[i].value == DAT_GET_TYPE(ret_value)) + { + return ( (dat_errors[i].name)); + } + + } + return ( "Invalid_DAT_RETURN" ); +} + +/* + * Map DAT_RETURN values to readable strings, + * but don't assume the values are zero-based or contiguous. + */ +const char * +DT_TransferTypeToString (DT_Transfer_Type type) +{ + static char *DT_Type[] = + { + "RR", + "RW", + "SR" + }; + + if ( (0 <= type) && (type <= 2) ) + { + return DT_Type[type]; + } + else + { + return "Error: Unkown Transfer Type"; + } +} + + +/* + * Map DAT_ASYNC_ERROR_CODE values to readable strings + */ +const char * +DT_AsyncErr2Str (DAT_EVENT_NUMBER error_code) +{ + unsigned int i; + static struct + { + const char *name; + DAT_RETURN value; + } dat_errors[] = + { + # define DATxx(x) { # x, x } + DATxx (DAT_DTO_COMPLETION_EVENT), + DATxx (DAT_RMR_BIND_COMPLETION_EVENT), + DATxx (DAT_CONNECTION_REQUEST_EVENT), + DATxx (DAT_CONNECTION_EVENT_ESTABLISHED), + DATxx (DAT_CONNECTION_EVENT_PEER_REJECTED), + DATxx (DAT_CONNECTION_EVENT_NON_PEER_REJECTED), + DATxx (DAT_CONNECTION_EVENT_ACCEPT_COMPLETION_ERROR), + DATxx (DAT_CONNECTION_EVENT_DISCONNECTED), + DATxx (DAT_CONNECTION_EVENT_BROKEN), + DATxx (DAT_CONNECTION_EVENT_TIMED_OUT), + DATxx (DAT_ASYNC_ERROR_EVD_OVERFLOW), + DATxx (DAT_ASYNC_ERROR_IA_CATASTROPHIC), + DATxx (DAT_ASYNC_ERROR_EP_BROKEN), + DATxx (DAT_ASYNC_ERROR_TIMED_OUT), + DATxx (DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR), + DATxx (DAT_SOFTWARE_EVENT) + # undef DATxx + }; + # define NUM_ERRORS (sizeof(dat_errors)/sizeof(dat_errors[0])) + + for (i = 0; i < NUM_ERRORS; i++) + { + if (dat_errors[i].value == error_code) + { + return ( dat_errors[i].name ); + } + } + + return ( "Invalid_DAT_EVENT_NUMBER" ); +} + +/* + * Map DAT_EVENT_CODE values to readable strings + */ +const char * +DT_EventToSTr (DAT_EVENT_NUMBER event_code) +{ + unsigned int i; + static struct + { + const char *name; + DAT_RETURN value; + } + dat_events[] = + { + # define DATxx(x) { # x, x } + DATxx (DAT_DTO_COMPLETION_EVENT), + DATxx (DAT_RMR_BIND_COMPLETION_EVENT), + DATxx (DAT_CONNECTION_REQUEST_EVENT), + DATxx (DAT_CONNECTION_EVENT_ESTABLISHED), + DATxx (DAT_CONNECTION_EVENT_PEER_REJECTED), + DATxx (DAT_CONNECTION_EVENT_NON_PEER_REJECTED), + DATxx (DAT_CONNECTION_EVENT_ACCEPT_COMPLETION_ERROR), + DATxx (DAT_CONNECTION_EVENT_DISCONNECTED), + DATxx (DAT_CONNECTION_EVENT_BROKEN), + DATxx (DAT_CONNECTION_EVENT_TIMED_OUT), + DATxx (DAT_CONNECTION_EVENT_UNREACHABLE), + DATxx (DAT_ASYNC_ERROR_EVD_OVERFLOW), + DATxx (DAT_ASYNC_ERROR_IA_CATASTROPHIC), + DATxx (DAT_ASYNC_ERROR_EP_BROKEN), + DATxx (DAT_ASYNC_ERROR_TIMED_OUT), + DATxx (DAT_ASYNC_ERROR_PROVIDER_INTERNAL_ERROR), + DATxx (DAT_SOFTWARE_EVENT) + # undef DATxx + }; + # define NUM_EVENTS (sizeof(dat_events)/sizeof(dat_events[0])) + + for (i = 0; i < NUM_EVENTS; i++) + { + if (dat_events[i].value == event_code) + { + return ( dat_events[i].name ); + } + } + + return ( "Invalid_DAT_EVENT_NUMBER" ); +} + + +/* + * Map DAT_EP_STATE_CODE values to readable strings + */ +const char * +DT_State2Str (DAT_EP_STATE state_code) +{ + unsigned int i; + static struct + { + const char *name; + DAT_RETURN value; + } + dat_state[] = + { + # define DATxx(x) { # x, x } + DATxx (DAT_EP_STATE_UNCONNECTED), + DATxx (DAT_EP_STATE_RESERVED), + DATxx (DAT_EP_STATE_PASSIVE_CONNECTION_PENDING), + DATxx (DAT_EP_STATE_ACTIVE_CONNECTION_PENDING), + DATxx (DAT_EP_STATE_TENTATIVE_CONNECTION_PENDING), + DATxx (DAT_EP_STATE_CONNECTED), + DATxx (DAT_EP_STATE_DISCONNECT_PENDING), + DATxx (DAT_EP_STATE_ERROR) + # undef DATxx + }; + # define NUM_STATES (sizeof(dat_state)/sizeof(dat_state[0])) + + for (i = 0; i < NUM_STATES; i++) + { + if (dat_state[i].value == state_code) + { + return ( dat_state[i].name ); + } + } + + return ( "Invalid_DAT_STATE_NUMBER" ); +} + + +/* + * Parse a QOS argument into a DAT_QOS. + * + * Returns no errors: defaults to best effort. + */ +DAT_QOS +DT_ParseQoS (char *arg) +{ + if (0 == strcmp (arg, "HT")) + { + return ( DAT_QOS_HIGH_THROUGHPUT ); + } + + if (0 == strcmp (arg, "LL")) + { + return ( DAT_QOS_LOW_LATENCY ); + } + + if (0 == strcmp (arg, "EC")) + { + return ( DAT_QOS_ECONOMY ); + } + + if (0 == strcmp (arg, "PM")) + { + return ( DAT_QOS_PREMIUM ); + } + /* + * Default to "BE" so no point in checking further + */ + return ( DAT_QOS_BEST_EFFORT ); +} + + +/* + * A couple of round-up routines (for pointers and counters) + * which both assume a power-of-two 'align' factor, + * and do the correct thing if align == 0. + */ +unsigned char * +DT_AlignPtr (void * val, unsigned int align) +{ + if (align) + { + return ( (unsigned char *) + (((DAT_UVERYLONG)val + ((DAT_UVERYLONG)align) - 1) & ~ (((DAT_UVERYLONG)align) - 1))); + } + return (val); +} + +DAT_COUNT +DT_RoundSize (DAT_COUNT val, DAT_COUNT align) +{ + if (align) + { + return ( ((val + align - 1) & ~ (align - 1)) ); + } + return ( val ); +} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_version.h b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_version.h new file mode 100644 index 00000000..a625c462 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/dapl_version.h @@ -0,0 +1,37 @@ +/* + * Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. + * + * This Software is licensed under either one of the following two licenses: + * + * 1) under the terms of the "Common Public License 1.0" a copy of which is + * in the file LICENSE.txt in the root directory. The license is also + * available from the Open Source Initiative, see + * http://www.opensource.org/licenses/cpl.php. + * OR + * + * 2) under the terms of the "The BSD License" a copy of which is in the file + * LICENSE2.txt in the root directory. The license is also available from + * the Open Source Initiative, see + * http://www.opensource.org/licenses/bsd-license.php. + * + * Licensee has the right to choose either one of the above two licenses. + * + * Redistributions of source code must retain both the above copyright + * notice and either one of the license notices. + * + * Redistributions in binary form must reproduce both the above copyright + * notice, either one of the license notices in the documentation + * and/or other materials provided with the distribution. + */ + +#ifndef __DAPL_VERSION_H +#define __DAPL_VERSION_H +/* + * Dapltest version number + * + * This should be bumped everytime the "cross-the-wire" behavior changes. + */ + +#define DAPLTEST_VERSION 0x00000005 + +#endif diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/lat_block.sh b/branches/Ndi/ulp/dapl/test/udapl/dapltest/lat_block.sh new file mode 100644 index 00000000..572d9ba0 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/lat_block.sh @@ -0,0 +1,22 @@ +#!/bin/sh +# +# Sample client invocation +# +# +me=`basename $0` +case $# in +0) echo Usage: $me '[hostname [size [device]]]' 1>&2 ; exit 1;; +1) host=$1 + device=IbalHca0 + size=4 ;; +2) host=$1 + device=IbalHca0 + size=$2 ;; +3) host=$1 + device=$3 + size=$2 ;; +*) echo Usage: $me '[hostname [size [device]]]' 1>&2 ; exit 1;; +esac + +./dapltest -T P -d -i 1024 -s ${host} -D ${device} \ + -p 1 -m b RW ${size} 1 diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/lat_poll.sh b/branches/Ndi/ulp/dapl/test/udapl/dapltest/lat_poll.sh new file mode 100644 index 00000000..f79edb11 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/lat_poll.sh @@ -0,0 +1,22 @@ +#!/bin/sh +# +# Sample client invocation +# +# +me=`basename $0` +case $# in +0) echo Usage: $me '[hostname [size [device]]]' 1>&2 ; exit 1;; +1) host=$1 + device=IbalHca0 + size=4 ;; +2) host=$1 + device=IbalHca0 + size=$2 ;; +3) host=$1 + device=$3 + size=$2 ;; +*) echo Usage: $me '[hostname [size [device]]]' 1>&2 ; exit 1;; +esac + +./dapltest -T P -d -i 1024 -s ${host} -D ${device} \ + -p 1 -m p RW ${size} 1 diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/lim.sh b/branches/Ndi/ulp/dapl/test/udapl/dapltest/lim.sh new file mode 100644 index 00000000..350316ea --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/lim.sh @@ -0,0 +1,17 @@ +#!/bin/sh + +me=`basename $0` + +case $# in +0) device=IbalHca0 ;; +1) device=$1 ;; +*) echo Usage: $me '[device]' 1>&2 ; exit 1;; +esac + +# +# -d debug verbosity +# -w width sets up 'width' sets of IA,PZ,EVD,EP,LMR,RMR,... +# -m maximum provides a bound on exhaustion tests +# +./dapltest -T L -D ${device} -d -w 8 -m 100 limit_ia limit_pz limit_evd \ + limit_ep limit_psp limit_lmr limit_rpost diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/makefile b/branches/Ndi/ulp/dapl/test/udapl/dapltest/makefile new file mode 100644 index 00000000..d4938551 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\..\inc\openib.def diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/makefile.wnd b/branches/Ndi/ulp/dapl/test/udapl/dapltest/makefile.wnd new file mode 100644 index 00000000..6464da17 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/makefile.wnd @@ -0,0 +1,179 @@ +# +# Copyright (c) 2002-2003, Network Appliance, Inc. All rights reserved. +# +# This Software is licensed under either one of the following two licenses: +# +# 1) under the terms of the "Common Public License 1.0" a copy of which is +# in the file LICENSE.txt in the root directory. The license is also +# available from the Open Source Initiative, see +# http://www.opensource.org/licenses/cpl.php. +# OR +# +# 2) under the terms of the "The BSD License" a copy of which is in the file +# LICENSE2.txt in the root directory. The license is also available from +# the Open Source Initiative, see +# http://www.opensource.org/licenses/bsd-license.php. +# +# Licensee has the right to choose either one of the above two licenses. +# +# Redistributions of source code must retain both the above copyright +# notice and either one of the license notices. +# +# Redistributions in binary form must reproduce both the above copyright +# notice, either one of the license notices in the documentation +# and/or other materials provided with the distribution. +# + +#********************************************************************* +# +# NMAKE Options (passed by macro) +# +# Option Invoke NMKAE +# ----------------------- ----------------------- +# expilicit linking nmake EXPLCIT_LINK=1 +# +#*********************************************************************/ + + +#********************************************************************* +# +# Dot Directives +# +#*********************************************************************/ + +.SUFFIXES : # clear the .SUFFIXES list +.SUFFIXES : .c # initialize .SUFFIXES list + + +#********************************************************************* +# +# Macros +# +#*********************************************************************/ + +DAT_PATH = ../../../dat + +OBJ_PATH = Obj +TARGET_PATH = Target + +OBJS = $(OBJ_PATH)/dapl_bpool.obj \ + $(OBJ_PATH)/dapl_client.obj \ + $(OBJ_PATH)/dapl_client_info.obj \ + $(OBJ_PATH)/dapl_cnxn.obj \ + $(OBJ_PATH)/dapl_endian.obj \ + $(OBJ_PATH)/dapl_fft_cmd.obj \ + $(OBJ_PATH)/dapl_fft_connmgt.obj \ + $(OBJ_PATH)/dapl_fft_dataxfer.obj \ + $(OBJ_PATH)/dapl_fft_dataxfer_client.obj \ + $(OBJ_PATH)/dapl_fft_endpoint.obj \ + $(OBJ_PATH)/dapl_fft_hwconn.obj \ + $(OBJ_PATH)/dapl_fft_mem.obj \ + $(OBJ_PATH)/dapl_fft_pz.obj \ + $(OBJ_PATH)/dapl_fft_queryinfo.obj \ + $(OBJ_PATH)/dapl_fft_test.obj \ + $(OBJ_PATH)/dapl_fft_util.obj \ + $(OBJ_PATH)/dapl_getopt.obj \ + $(OBJ_PATH)/dapl_limit.obj \ + $(OBJ_PATH)/dapl_limit_cmd.obj \ + $(OBJ_PATH)/dapl_main.obj \ + $(OBJ_PATH)/dapl_mdep.obj \ + $(OBJ_PATH)/dapl_memlist.obj \ + $(OBJ_PATH)/dapl_netaddr.obj \ + $(OBJ_PATH)/dapl_params.obj \ + $(OBJ_PATH)/dapl_performance_client.obj \ + $(OBJ_PATH)/dapl_performance_cmd.obj \ + $(OBJ_PATH)/dapl_performance_server.obj \ + $(OBJ_PATH)/dapl_performance_stats.obj \ + $(OBJ_PATH)/dapl_performance_util.obj \ + $(OBJ_PATH)/dapl_quit_cmd.obj \ + $(OBJ_PATH)/dapl_server.obj \ + $(OBJ_PATH)/dapl_server_cmd.obj \ + $(OBJ_PATH)/dapl_server_info.obj \ + $(OBJ_PATH)/dapl_test_data.obj \ + $(OBJ_PATH)/dapl_test_util.obj \ + $(OBJ_PATH)/dapl_thread.obj \ + $(OBJ_PATH)/dapl_transaction_cmd.obj \ + $(OBJ_PATH)/dapl_transaction_stats.obj \ + $(OBJ_PATH)/dapl_transaction_test.obj \ + $(OBJ_PATH)/dapl_transaction_util.obj \ + $(OBJ_PATH)/dapl_util.obj + +EXEC = dapltest.exe + +# +# Compiler +# + +CC = cl + +INC_FLAGS = \ + /I $(DAT_PATH)/include \ + /I $(DAT_PATH)/common \ + /I $(DAT_PATH)/udat \ + /I $(DAT_PATH)/udat/windows + +CC_FLAGS= \ + /nologo /Zel /Zp1 /Gy /W3 /Gd /QIfdiv- /QIf /QI0f /GB /Gi- /Gm- /GX- \ + /GR- /GF -Z7 /Od /Oi /Oy- $(INC_FLAGS) \ + /DWIN32 /D_X86_ /D__i386__ /D__PENTIUM__ /DDAT_THREADSAFE=DAT_FALSE + +# +# Linker +# + +LINK = link + +LIBS = libc.lib ws2_32.lib advapi32.lib User32.lib $(DAT_PATH)/udat/Debug/udat.lib + +LINK_FLAGS = \ + /nologo /subsystem:console /DEBUG /incremental:yes /machine:I386 $(LIBS) + +#LIBS = ws2_32.lib advapi32.lib $(DAT_PATH)/udat/Target/UDAT.lib + +# if the provider library should be explicitly linked +!IFDEF EXPLICIT_LINK +# in addition to providers listed in the DAT static registry +# the specified provider will be available to the consumer +DAPL_PATH = ../../../dapl +# +# the /INCLUDE option is used to force a symbol reference to the DAPL +# provider library. If there are no references, Windows will not load +# the DAPL library when dapltest is executed. +# +LIBS = $(LIBS) $(DAPL_PATH)/udapl/Debug/dapl.lib /INCLUDE:_dapl_ia_open +!ENDIF + +# +# System Utilities +# + +RM = rm -f + + +#********************************************************************* +# Inference Rules +# +#*********************************************************************/ + +.c{$(OBJ_PATH)}.obj: + $(CC) $(CC_FLAGS) /Fo$@ /c $< + + +#********************************************************************* +# +# Description Blocks +# +#*********************************************************************/ + +all : mkdirs $(EXEC) + +mkdirs: + if not exist "$(OBJ_PATH)" mkdir "$(OBJ_PATH)" + if not exist "$(TARGET_PATH)" mkdir "$(TARGET_PATH)" + +$(EXEC) : $(OBJS) + $(LINK) $(LINK_FLAGS) /out:$(EXEC) $(OBJS) + +clean: + $(RM) $(OBJS) + $(RM) $(EXEC) diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/quit.sh b/branches/Ndi/ulp/dapl/test/udapl/dapltest/quit.sh new file mode 100644 index 00000000..75c040f0 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/quit.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +me=`basename $0` +case $# in +0) echo Usage: $me '[hostname [device] ]' 1>&2 ; exit 1;; +1) host=$1 + device=IbalHca0 ;; +2) host=$1 + device=$2 ;; +*) echo Usage: $me '[hostname [device] ]' 1>&2 ; exit 1;; +esac + + +# +# -d debug verbosity +# -w width sets up 'width' sets of IA,PZ,EVD,EP,LMR,RMR,... +# -m maximum provides a bound on exhaustion tests +# +./dapltest -T Q -D ${device} -s ${host} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/regress.sh b/branches/Ndi/ulp/dapl/test/udapl/dapltest/regress.sh new file mode 100644 index 00000000..9757e1ab --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/regress.sh @@ -0,0 +1,66 @@ +#!/bin/sh +# +# Sample regression client invocation +# + +me=`basename $0` +case $# in +0) echo Usage: $me '[hostname [device] ]' 1>&2 ; exit 1;; +1) host=$1 + device=IbalHca0 ;; +2) host=$1 + device=$2 ;; +*) echo Usage: $me '[hostname [device] ]' 1>&2 ; exit 1;; +esac + + +#==================================================================== +#client1 +#==================================================================== +./dapltest -T T -s ${host} -D ${device} -d -i 10000 -t 1 -w 1 \ + client SR 256 \ + server SR 256 + +#==================================================================== +#client2 +#==================================================================== +./dapltest -T T -s ${host} -D ${device} -d -i 10000 -t 1 -w 1 \ + client SR 256 \ + client RW 4096 \ + server SR 256 + +#==================================================================== +#client3 +#==================================================================== +./dapltest -T T -s ${host} -D ${device} -d -i 10000 -t 1 -w 1 \ + client SR 256 \ + client RR 4096 \ + server SR 256 + +#==================================================================== +#client4 +#==================================================================== +./dapltest -T T -s ${host} -D ${device} -d -i 10000 -t 1 -w 1 \ + client SR 256 \ + client RW 4096 \ + server SR 256 \ + client SR 256 \ + client RR 4096 \ + server SR 256 \ + client SR 4096 \ + server SR 256 + +#==================================================================== +#client5 +#==================================================================== +./dapltest -T T -s ${host} -D ${device} -d -i 10000 -t 4 -w 8 \ + client SR 256 \ + client RW 4096 \ + server SR 256 \ + client SR 256 \ + client RR 4096 \ + server SR 256 \ + client SR 4096 \ + server SR 256 + + diff --git a/branches/Ndi/ulp/dapl/test/udapl/dapltest/srv.sh b/branches/Ndi/ulp/dapl/test/udapl/dapltest/srv.sh new file mode 100644 index 00000000..45d4b1f1 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dapltest/srv.sh @@ -0,0 +1,15 @@ +#!/bin/sh +# +# Sample server invocation +# +me=`basename $0` +case $# in +0) device=IbalHca0;; +1) device=$1 ;; +*) echo Usage: $me '[device]' 1>&2 ; exit 1;; +esac +# +# +# ./dapltest -T S -d -D ${device} + + ./dapltest -T S -d -D ${device} diff --git a/branches/Ndi/ulp/dapl/test/udapl/dirs b/branches/Ndi/ulp/dapl/test/udapl/dirs new file mode 100644 index 00000000..fc0447f1 --- /dev/null +++ b/branches/Ndi/ulp/dapl/test/udapl/dirs @@ -0,0 +1 @@ +DIRS=dapltest diff --git a/branches/Ndi/ulp/dirs b/branches/Ndi/ulp/dirs new file mode 100644 index 00000000..e5ba0c88 --- /dev/null +++ b/branches/Ndi/ulp/dirs @@ -0,0 +1,7 @@ +DIRS=\ + opensm \ + dapl \ + ipoib \ + srp \ + wsd \ + inic diff --git a/branches/Ndi/ulp/inic/dirs b/branches/Ndi/ulp/inic/dirs new file mode 100644 index 00000000..ed41dcf4 --- /dev/null +++ b/branches/Ndi/ulp/inic/dirs @@ -0,0 +1,2 @@ +DIRS=\ + kernel diff --git a/branches/Ndi/ulp/inic/kernel/SOURCES b/branches/Ndi/ulp/inic/kernel/SOURCES new file mode 100644 index 00000000..d56ea8cf --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/SOURCES @@ -0,0 +1,75 @@ +######################################################################## +# +# Copyright(c) Infinicon Systems All rights reserved. +# +######################################################################## + + +# The TARGETNAME. This is name of the item being built (without the +# extension. +TARGETNAME=vnic + +######################################################################## +# The path where all binaries are built. +# +TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) + +######################################################################## +# The type of item that is being built. This is manditory. +# Value Meaning +# DYNLINK - A DLL. +# DRIVER - A kernel device driver. +# EXPORT_DRIVER - A kernel device driver with exports. +# PROGRAM - A windows executable. +# PROGLIB - A windows library. +# MINPORT - A miniport driver. +# GDI_DRIVER - A video driver. +# LIBRARY - A library +TARGETTYPE=MINIPORT + +######################################################################## +# The type of driver being built. This affects the include paths. +# Comment out for non-WDM drivers. +#DRIVERTYPE=WDM + +# +######################################################################## +# All the source files in this project. +# +SOURCES= inic.rc \ + vnic_driver.c \ + vnic_adapter.c \ + vnic_ib.c \ + vnic_control.c \ + vnic_data.c \ + vnic_netpath.c \ + vnic_viport.c \ + + + + +INCLUDES=..;..\..\..\inc;..\..\..\inc\kernel; + +C_DEFINES=$(C_DEFINES) -DNDIS_MINIPORT_DRIVER -DNDIS_WDM=1 \ + -DDEPRECATE_DDK_FUNCTIONS -DNDIS51_MINIPORT -DNEED_CL_OBJ -DBINARY_COMPATIBLE=0 + +TARGETLIBS= \ + $(DDK_LIB_PATH)\ntoskrnl.lib \ + $(DDK_LIB_PATH)\hal.lib \ + $(DDK_LIB_PATH)\ndis.lib \ + $(TARGETPATH)\*\complib.lib + +#!if !defined(DDK_TARGET_OS) || "$(DDK_TARGET_OS)"=="Win2K" +# +# The driver is built in the Win2K build environment +# - use the library version of safe strings +# +#TARGETLIBS= $(TARGETLIBS) $(DDK_LIB_PATH)\ntstrsafe.lib +#!endif + +######################################################################## +# Set the warning levels to maximum. +MSC_WARNING_LEVEL= /W4 +# + + diff --git a/branches/Ndi/ulp/inic/kernel/inic.rc b/branches/Ndi/ulp/inic/kernel/inic.rc new file mode 100644 index 00000000..93bede17 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/inic.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_DRV_NETWORK + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Virtual Nic NDIS Miniport (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Virtual Nic NDIS Miniport" +#endif + +#define VER_INTERNALNAME_STR "vnic.sys" +#define VER_ORIGINALFILENAME_STR "vnic.sys" + +#include diff --git a/branches/Ndi/ulp/inic/kernel/makefile b/branches/Ndi/ulp/inic/kernel/makefile new file mode 100644 index 00000000..128ed372 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/ulp/inic/kernel/netvnic.inf b/branches/Ndi/ulp/inic/kernel/netvnic.inf new file mode 100644 index 00000000..42482a26 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/netvnic.inf @@ -0,0 +1,146 @@ +; SilverStorm Technologies Ethernet over Infiniband NIC. +; Copyright 2006 SilverStorm Technologies all Rights Reserved. + +[Version] +Signature="$Windows NT$" +Class=Net +ClassGUID={4d36e972-e325-11ce-bfc1-08002be10318} +Provider=%Inf_Provider% +DriverVer=03/08/2006,1.0.0000.614 + +[ControlFlags] +ExcludeFromSelect = IBA\V00066aP00000030 + +; ================= Device Install section ===================== + +[DestinationDirs] +DefaultDestDir=12 + +[SourceDisksNames.x86] +1=%DiskId%,,,\x86 + +[SourceDisksNames.amd64] +1=%DiskId%,,,\amd64 + +[SourceDisksNames.ia64] +1=%DiskId%,,,\ia64 + +[SourceDisksFiles] +vnic.sys=1 + +[Manufacturer] +%Inf_Provider% = VNIC.DeviceSection,ntx86,ntamd64,ntia64 + +[VNIC.DeviceSection] +; empty since we don't support W9x/Me + +[VNIC.DeviceSection.ntx86] +%VNIC.DeviceDesc% = VNIC.DDInstall,IBA\V00066aP00000030S00066as00000030v0001, \ + IBA\V00066aP00000030S00066as00000030, \ + IBA\V00066aP00000030v0001, \ + IBA\V00066aP00000030 + +[VNIC.DeviceSection.ntamd64] +%VNIC.DeviceDesc% = VNIC.DDInstall,IBA\V00066aP00000030S00066as00000030v0001, \ + IBA\V00066aP00000030S00066as00000030, \ + IBA\V00066aP00000030v0001, \ + IBA\V00066aP00000030 + +[VNIC.DeviceSection.ntia64] +%VNIC.DeviceDesc% = VNIC.DDInstall,IBA\V00066aP00000030S00066as00000030v0001, \ + IBA\V00066aP00000030S00066as00000030, \ + IBA\V00066aP00000030v0001, \ + IBA\V00066aP00000030 + +[VNIC.DDInstall.ntx86] +Characteristics = %CHARACTERISTICS% +AddReg = VNIC.AddReg +CopyFiles = VNIC.CopyFiles + +[VNIC.DDInstall.ntamd64] +Characteristics = %CHARACTERISTICS% +AddReg = VNIC.AddReg +CopyFiles = VNIC.CopyFiles + +[VNIC.DDInstall.ntia64] +Characteristics = %CHARACTERISTICS% +AddReg = VNIC.AddReg +CopyFiles = VNIC.CopyFiles + +[VNIC.DDInstall.ntx86.Services] +AddService = vnic,%SPSVCINST_ASSOCSERVICE%,VNIC.ServiceInstall,VNIC.EventLogInstall + +[VNIC.DDInstall.ntamd64.Services] +AddService = vnic,%SPSVCINST_ASSOCSERVICE%,VNIC.ServiceInstall,VNIC.EventLogInstall + +[VNIC.DDInstall.ntia64.Services] +AddService = vnic,%SPSVCINST_ASSOCSERVICE%,VNIC.ServiceInstall,VNIC.EventLogInstall + +[VNIC.CopyFiles] +vnic.sys + +[VNIC.AddReg] +HKR, Ndi, Service, 0, "vnic" +HKR, Ndi\Interfaces, UpperRange, 0, "ndis5" +HKR, Ndi\Interfaces, LowerRange, 0, "ethernet" + + +; ============= Service Install section ============== + +[VNIC.ServiceInstall] +DisplayName = %VNIC.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\vnic.sys +LoadOrderGroup = NDIS +AddReg = VNIC.ParamsReg + +[VNIC.ParamsReg] +HKR,"Params\PnpInterface",%InternalBus%,%REG_DWORD%,1 +HKR,"Params\PnpInterface",%PNPBus%,%REG_DWORD%,1 + +HKR, Params, MinMtu,, "1500" +HKR, Params, MaxMtu,, "9500" +HKR, Params, MinHostPoolSz,, "256" +HKR, Params, HostRecvPoolEntries,, "512" +HKR, Params, MinEiocPoolSz,, "256" +HKR, Params, MaxEiocPoolSz,, "512" +HKR, Params, MinHostKickTimeout,, "50" +HKR, Params, MaxHostKickTimeout,, "100" +HKR, Params, MinHostKickEntries,, "1" +HKR, Params, MaxHostKickEntries,, "64" +HKR, Params, MinHostKickBytes,, "0" +HKR, Params, MaxHostKickBytes,, "5000" +HKR, Params, MinHostUpdateSz,, "8" +HKR, Params, MaxHostUpdateSz,, "32" +HKR, Params, MinEiocUpdateSz,, "8" +HKR, Params, MaxEiocUpdateSz,, "32" +HKR, Params, UseRxCsum, %REG_DWORD%,1 +HKR, Params, UseTxCsum, %REG_DWORD%,1 + +[VNIC.EventLogInstall] +AddReg = VNIC.EventLogAddReg + +[VNIC.EventLogAddReg] +HKR,,EventMessageFile,%REG_EXPAND_SZ%,"%%SystemRoot%%\System32\netevent.dll" +HKR,,TypesSupported,%REG_DWORD%,7 + +[Strings] +NetClassGuid = "{4d36e972-e325-11ce-bfc1-08002be10318}" +Inf_Provider = "SilverStorm Technologies" +VNIC.DeviceDesc = "Ethernet over InfiniBand Virtual NIC" +VNIC.ServiceDesc = "Virtual NIC" +DiskId = "SilverStorm Ethernet over InfiniBand Virtual NIC installation disk" +InternalBus = 0 +PNPBus = 15 +SPSVCINST_NULL = 0x0 +SPSVCINST_ASSOCSERVICE = 0x00000002 +SERVICE_KERNEL_DRIVER = 1 +SERVICE_DEMAND_START = 3 +SERVICE_ERROR_NORMAL = 1 +REG_DWORD = 0x00010001 +REG_DWORD_NO_CLOBBER = 0x00010003 +REG_EXPAND_SZ = 0x00020000 +CHARACTERISTICS = 0x81 ; NCF_VIRTUAL | NCF_HAS_UI + diff --git a/branches/Ndi/ulp/inic/kernel/vnic_adapter.c b/branches/Ndi/ulp/inic/kernel/vnic_adapter.c new file mode 100644 index 00000000..afe25797 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_adapter.c @@ -0,0 +1,1120 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include "vnic_adapter.h" + +//#include "vnic_driver.h" + +extern struct _vnic_globals g_vnic; + +NDIS_STATUS +vnic_get_adapter_params( + IN NDIS_HANDLE h_handle, + OUT vnic_params_t* const p_params ); + +NDIS_STATUS +vnic_get_adapter_interface( + IN NDIS_HANDLE h_handle, + IN vnic_adapter_t *p_adapter); + +static ib_api_status_t +__path_record_add( + IN vnic_adapter_t *p_adapter, + IN ib_path_rec_t *p_path_rec ); + +static ib_api_status_t +__path_record_remove( + IN vnic_adapter_t *p_adapter, + IN ib_path_rec_t *p_path_rec ); + +static BOOLEAN +__path_records_match( + IN ib_path_rec_t *p_path1, + IN ib_path_rec_t *p_path2 ); + +static void +__path_records_cleanup( + IN vnic_adapter_t *p_adapter ); + +static ib_api_status_t +_vnic_get_ca_info( + IN vnic_adapter_t *p_adapter ); + +static inline uint8_t +_get_ioc_num_from_iocguid( + IN ib_net64_t *p_iocguid ); + +static BOOLEAN +__sid_valid( + IN vnic_adapter_t *p_adapter, + IN ib_net64_t sid ); + +static void +_vnic_viport_free( + IN vnic_adapter_t* const p_adapter ); + +ib_api_status_t +vnic_create_adapter( + IN NDIS_HANDLE h_handle, + IN NDIS_HANDLE wrapper_config_context, + OUT vnic_adapter_t** const pp_adapter) +{ + NDIS_STATUS status; + ib_api_status_t ib_status; + vnic_adapter_t *p_adapter; + KIRQL irql; + + VNIC_ENTER( VNIC_DBG_ADAPTER ); + + status = NdisAllocateMemoryWithTag( &p_adapter, sizeof(vnic_adapter_t), 'pada'); + + if ( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT(VNIC_DBG_ERROR,("Failed to allocate adapter\n")); + return IB_INSUFFICIENT_MEMORY; + } + + NdisZeroMemory( p_adapter, sizeof(vnic_adapter_t) ); + + NdisAllocateSpinLock( &p_adapter->lock ); + + status = vnic_get_adapter_params( wrapper_config_context, &p_adapter->params ); + + if ( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + (" vnic_get_adapter_params failed with status %d\n", status)); + NdisFreeMemory( p_adapter, sizeof(vnic_adapter_t), 0 ); + return IB_INVALID_PARAMETER; + } + + status = vnic_get_adapter_interface( h_handle, p_adapter ); + if ( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("failed status %x\n", status ) ); + NdisFreeMemory( p_adapter, sizeof(vnic_adapter_t), 0 ); + return IB_INVALID_PARAMETER; + } + + /*Open AL */ + ib_status = p_adapter->ifc.open_al( &p_adapter->h_al ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("ib_open_al returned %s\n", p_adapter->ifc.get_err_str( ib_status )) ); + NdisFreeMemory( p_adapter, sizeof(vnic_adapter_t), 0 ); + return ib_status; + } + + /* ca is opened here */ + ib_status = _vnic_get_ca_info( p_adapter ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE ( VNIC_DBG_ERROR, + ("_get_ca_info return status %s\n", p_adapter->ifc.get_err_str( ib_status )) ); + NdisFreeMemory( p_adapter, sizeof(vnic_adapter_t), 0 ); + return ib_status; + } + + KeAcquireSpinLock( &g_vnic.lock, &irql ); + InsertTailList( &g_vnic.adapter_list, &p_adapter->list_entry ); + InterlockedIncrement( &g_vnic.adapters ); + KeReleaseSpinLock( &g_vnic.lock, irql ); + + p_adapter->h_handle = h_handle; + *pp_adapter = p_adapter; + + VNIC_EXIT( VNIC_DBG_ADAPTER ); + return IB_SUCCESS; +} + + +void +vnic_destroy_adapter( + IN vnic_adapter_t *p_adapter) +{ + ib_api_status_t ib_status = IB_SUCCESS; + KIRQL irql; + + VNIC_ENTER( VNIC_DBG_ADAPTER ); + + ASSERT( KeGetCurrentIrql() < DISPATCH_LEVEL ); + + KeAcquireSpinLock( &g_vnic.lock, &irql ); + RemoveEntryList( &p_adapter->list_entry ); + InterlockedDecrement( &g_vnic.adapters ); + KeReleaseSpinLock( &g_vnic.lock, irql ); + + if( p_adapter->h_pnp ) + { + ib_status = + p_adapter->ifc.dereg_pnp( p_adapter->h_pnp, ib_sync_destroy ); + } + + vnic_viport_cleanup( p_adapter ); + + if ( p_adapter->ca.region.h_mr ) + { + ib_status = p_adapter->ifc.dereg_mr(p_adapter->ca.region.h_mr ); + ASSERT( ib_status == IB_SUCCESS ); + } + + if( p_adapter->ca.hPd ) + { + ib_status = p_adapter->ifc.dealloc_pd( p_adapter->ca.hPd, NULL ); + ASSERT( ib_status == IB_SUCCESS ); + } + + if ( p_adapter->h_ca ) + { + ib_status = p_adapter->ifc.close_ca( p_adapter->h_ca, NULL ); + ASSERT( ib_status == IB_SUCCESS ); + } + + if ( p_adapter->h_al ) + { + ib_status = p_adapter->ifc.close_al( p_adapter->h_al ); + ASSERT( ib_status == IB_SUCCESS ); + } + + if ( p_adapter->p_svc_entries ) + cl_free ( p_adapter->p_svc_entries ); + + NdisFreeSpinLock( &p_adapter->lock ); + NdisFreeMemory( p_adapter, sizeof(vnic_adapter_t), 0 ); + + VNIC_EXIT( VNIC_DBG_ADAPTER ); +} + + +static ib_api_status_t +_vnic_get_ca_info( + IN vnic_adapter_t *p_adapter ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_al_ifc_t *p_ifc = &p_adapter->ifc; + uint32_t attr_size; + ib_ca_attr_t *p_ca_attrs; + uint32_t num; + uint64_t start_addr = 0; + + ib_status = p_ifc->open_ca( p_adapter->h_al, + p_adapter->ifc_data.ca_guid, + ib_asyncEvent, + p_adapter, + &p_adapter->h_ca ); + + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Failed to open hca\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + + ib_status = p_ifc->query_ca( p_adapter->h_ca, NULL , &attr_size ); + if( ib_status != IB_INSUFFICIENT_MEMORY ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("ib_query_ca failed status %s\n", + p_ifc->get_err_str( ib_status )) ); + return IB_INSUFFICIENT_RESOURCES; + } + + ASSERT( attr_size ); + + p_ca_attrs = cl_zalloc( attr_size ); + if ( p_ca_attrs == NULL ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Allocate %d bytes failed for Channel adapter\n", attr_size )); + return IB_INSUFFICIENT_MEMORY; + } + + ib_status = p_ifc->query_ca( p_adapter->h_ca, p_ca_attrs , &attr_size ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Query failed for channel adapter\n") ); + cl_free ( p_ca_attrs ); + return ib_status; + } + + p_adapter->ca.numPorts = p_ca_attrs->num_ports; + if( p_adapter->ca.numPorts > VNIC_CA_MAX_PORTS ) + p_adapter->ca.numPorts = VNIC_CA_MAX_PORTS; + + for( num = 0; num < p_adapter->ca.numPorts; num++ ) + p_adapter->ca.portGuids[num] = p_ca_attrs->p_port_attr[num].port_guid; + + p_adapter->ca.caGuid = p_adapter->ifc_data.ca_guid; + + cl_free ( p_ca_attrs ); + + ib_status = p_adapter->ifc.alloc_pd( p_adapter->h_ca, + IB_PDT_NORMAL, p_adapter, &p_adapter->ca.hPd ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT ( VNIC_DBG_ERROR, + ("alloc PD failed status %s(%d)\n", + p_adapter->ifc.get_err_str(ib_status), ib_status )); + return ib_status; + } + + if ( ( ib_status = ibregion_physInit( p_adapter, + &p_adapter->ca.region, + p_adapter->ca.hPd, + &start_addr, + MAX_PHYS_MEMORY ) ) != IB_SUCCESS ) + { + VNIC_TRACE_EXIT ( VNIC_DBG_ERROR, + ("phys region init failed status %s(%d)\n", + p_adapter->ifc.get_err_str(ib_status), ib_status )); + + p_adapter->ifc.dealloc_pd( p_adapter->ca.hPd, NULL ); + } + + return ib_status; +} + +static BOOLEAN +_vnic_params_sanity_check(vnic_params_t *p_params) +{ + DEFAULT_PARAM( p_params->MaxAddressEntries, MAX_ADDRESS_ENTRIES ); + DEFAULT_PARAM( p_params->MinAddressEntries, MIN_ADDRESS_ENTRIES ); + + DEFAULT_PARAM( p_params->ViportStatsInterval, VIPORT_STATS_INTERVAL ); + DEFAULT_PARAM( p_params->ViportHbInterval, VIPORT_HEARTBEAT_INTERVAL ); + DEFAULT_PARAM( p_params->ViportHbTimeout, VIPORT_HEARTBEAT_TIMEOUT ); + + DEFAULT_PARAM( p_params->ControlRspTimeout, CONTROL_RSP_TIMEOUT ); + DEFAULT_PARAM( p_params->ControlReqRetryCount, CONTROL_REQ_RETRY_COUNT ); + + DEFAULT_PARAM( p_params->RetryCount, RETRY_COUNT ); + DEFAULT_PARAM( p_params->MinRnrTimer, MIN_RNR_TIMER ); + + DEFAULT_PARAM( p_params->MaxViportsPerNetpath, MAX_VIPORTS_PER_NETPATH ); + DEFAULT_PARAM( p_params->DefaultViportsPerNetpath, DEFAULT_VIPORTS_PER_NETPATH ); + + DEFAULT_PARAM( p_params->DefaultPkey, DEFAULT_PKEY ); + DEFAULT_PARAM( p_params->NotifyBundleSz, NOTIFY_BUNDLE_SZ ); + DEFAULT_PARAM( p_params->DefaultNoPathTimeout, DEFAULT_NO_PATH_TIMEOUT ); + DEFAULT_PARAM( p_params->DefaultPrimaryConnectTimeout, DEFAULT_PRI_CON_TIMEOUT ); + DEFAULT_PARAM( p_params->DefaultPrimaryReconnectTimeout, DEFAULT_PRI_RECON_TIMEOUT ); + DEFAULT_PARAM( p_params->DefaultPrimarySwitchTimeout, DEFAULT_PRI_SWITCH_TIMEOUT ); + DEFAULT_PARAM( p_params->DefaultPreferPrimary, DEFAULT_PREFER_PRIMARY ); + + U32_RANGE( p_params->MaxAddressEntries ); + U32_RANGE( p_params->MinAddressEntries ); + RANGE_CHECK( p_params->MinMtu, MIN_MTU, MAX_MTU ); + RANGE_CHECK( p_params->MaxMtu, MIN_MTU, MAX_MTU ); + U32_RANGE( p_params->HostRecvPoolEntries ); + U32_RANGE( p_params->MinHostPoolSz ); + U32_RANGE( p_params->MinEiocPoolSz ); + U32_RANGE( p_params->MaxEiocPoolSz ); + U32_ZERO_RANGE( p_params->MinHostKickTimeout ); + U32_ZERO_RANGE( p_params->MaxHostKickTimeout ); + U32_ZERO_RANGE( p_params->MinHostKickEntries ); + U32_ZERO_RANGE( p_params->MaxHostKickEntries ); + U32_ZERO_RANGE( p_params->MinHostKickBytes ); + U32_ZERO_RANGE( p_params->MaxHostKickBytes ); + U32_RANGE( p_params->MinHostUpdateSz ); + U32_RANGE( p_params->MaxHostUpdateSz ); + U32_RANGE( p_params->MinEiocUpdateSz ); + U32_RANGE( p_params->MaxEiocUpdateSz ); + U8_RANGE( p_params->NotifyBundleSz ); + U32_ZERO_RANGE( p_params->ViportStatsInterval ); + U32_ZERO_RANGE( p_params->ViportHbInterval ); + U32_ZERO_RANGE( p_params->ViportHbTimeout ); + U32_RANGE( p_params->ControlRspTimeout ); + U8_RANGE( p_params->ControlReqRetryCount ); + ZERO_RANGE_CHECK( p_params->RetryCount, 0, 7 ); + ZERO_RANGE_CHECK( p_params->MinRnrTimer, 0, 31 ); + U32_RANGE( p_params->DefaultViportsPerNetpath ); + U8_RANGE( p_params->MaxViportsPerNetpath ); + U16_ZERO_RANGE( p_params->DefaultPkey ); + U32_RANGE( p_params->DefaultNoPathTimeout ); + U32_RANGE( p_params->DefaultPrimaryConnectTimeout ); + U32_RANGE( p_params->DefaultPrimaryReconnectTimeout ); + U32_RANGE( p_params->DefaultPrimarySwitchTimeout ); + BOOLEAN_RANGE( p_params->DefaultPreferPrimary ); + BOOLEAN_RANGE( p_params->UseRxCsum ); + BOOLEAN_RANGE( p_params->UseTxCsum ); + + LESS_THAN_OR_EQUAL( p_params->MinAddressEntries, p_params->MaxAddressEntries ); + LESS_THAN_OR_EQUAL( p_params->MinMtu, p_params->MaxMtu ); + LESS_THAN_OR_EQUAL( p_params->MinHostPoolSz, p_params->HostRecvPoolEntries ); + POWER_OF_2( p_params->HostRecvPoolEntries ); + POWER_OF_2( p_params->MinHostPoolSz ); + POWER_OF_2( p_params->NotifyBundleSz ); + LESS_THAN( p_params->NotifyBundleSz, p_params->MinEiocPoolSz ); + LESS_THAN_OR_EQUAL( p_params->MinEiocPoolSz, p_params->MaxEiocPoolSz ); + POWER_OF_2( p_params->MinEiocPoolSz ); + POWER_OF_2( p_params->MaxEiocPoolSz ); + LESS_THAN_OR_EQUAL( p_params->MinHostKickTimeout, p_params->MaxHostKickTimeout ); + LESS_THAN_OR_EQUAL( p_params->MinHostKickEntries, p_params->MaxHostKickEntries ); + LESS_THAN_OR_EQUAL( p_params->MinHostKickBytes, p_params->MaxHostKickBytes ); + LESS_THAN_OR_EQUAL( p_params->MinHostUpdateSz, p_params->MaxHostUpdateSz ); + POWER_OF_2( p_params->MinHostUpdateSz ); + POWER_OF_2( p_params->MaxHostUpdateSz ); + LESS_THAN( p_params->MinHostUpdateSz, p_params->MinHostPoolSz ); + LESS_THAN( p_params->MaxHostUpdateSz, p_params->HostRecvPoolEntries ); + LESS_THAN_OR_EQUAL( p_params->MinEiocUpdateSz, p_params->MaxEiocUpdateSz ); + POWER_OF_2( p_params->MinEiocUpdateSz ); + POWER_OF_2( p_params->MaxEiocUpdateSz ); + LESS_THAN( p_params->MinEiocUpdateSz, p_params->MinEiocPoolSz ); + LESS_THAN( p_params->MaxEiocUpdateSz, p_params->MaxEiocPoolSz ); + LESS_THAN_OR_EQUAL( p_params->DefaultViportsPerNetpath, p_params->MaxViportsPerNetpath ); + + return TRUE; + +} +NDIS_STATUS +vnic_get_adapter_params( + IN NDIS_HANDLE wrapper_config_context, + OUT vnic_params_t* const p_params ) +{ + NDIS_STATUS status; + NDIS_HANDLE h_config; + NDIS_CONFIGURATION_PARAMETER *p_reg_prm; + NDIS_STRING keyword; + + VNIC_ENTER( VNIC_DBG_ADAPTER ); + + CL_ASSERT(p_params ); + + /* prepare params for default initialization */ + cl_memset( p_params, 0xff, sizeof (vnic_params_t) ); + + NdisOpenConfiguration( &status, &h_config, wrapper_config_context ); + if( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("NdisOpenConfiguration returned 0x%.8x\n", status) ); + return status; + } + + status = NDIS_STATUS_FAILURE; + p_reg_prm = NULL; + + RtlInitUnicodeString( &keyword, L"DebugFlags" ); + + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + if( status == NDIS_STATUS_SUCCESS ) + g_vnic_dbg_lvl |= p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MinMtu" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MinMtu = ( status != NDIS_STATUS_SUCCESS ) ? MIN_MTU: + p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MaxMtu" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MaxMtu =( status != NDIS_STATUS_SUCCESS )? MAX_MTU: + p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"UseRxCsum" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->UseRxCsum = ( status != NDIS_STATUS_SUCCESS ) ? + TRUE : ( p_reg_prm->ParameterData.IntegerData )? TRUE : FALSE; + + RtlInitUnicodeString( &keyword, L"UseTxCsum" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + /* turn it on by default, if not present */ + p_params->UseTxCsum = ( status != NDIS_STATUS_SUCCESS ) ? + TRUE : ( p_reg_prm->ParameterData.IntegerData )? TRUE : FALSE; + + RtlInitUnicodeString( &keyword, L"MinEiocUpdateSz" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MinEiocUpdateSz = ( status != NDIS_STATUS_SUCCESS ) ? + MIN_EIOC_UPDATE_SZ : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MaxEiocUpdateSz" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MaxEiocUpdateSz = ( status != NDIS_STATUS_SUCCESS ) ? + MAX_EIOC_UPDATE_SZ : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MinHostUpdateSz" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MinHostUpdateSz = ( status != NDIS_STATUS_SUCCESS ) ? + MIN_HOST_UPDATE_SZ : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MaxHostUpdateSz" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MaxHostUpdateSz = ( status != NDIS_STATUS_SUCCESS ) ? + MAX_HOST_UPDATE_SZ : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MinHostKickBytes" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MinHostKickBytes = ( status != NDIS_STATUS_SUCCESS ) ? + MIN_HOST_KICK_BYTES : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MaxHostKickBytes" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MaxHostKickBytes = ( status != NDIS_STATUS_SUCCESS ) ? + MAX_HOST_KICK_BYTES : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MinHostKickEntries" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MinHostKickEntries = ( status != NDIS_STATUS_SUCCESS ) ? + MIN_HOST_KICK_ENTRIES : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MaxHostKickEntries" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MaxHostKickEntries = ( status != NDIS_STATUS_SUCCESS ) ? + MAX_HOST_KICK_ENTRIES : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MinHostKickTimeout" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MinHostKickTimeout = ( status != NDIS_STATUS_SUCCESS ) ? + MIN_HOST_KICK_TIMEOUT : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MaxHostKickTimeout" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MaxHostKickTimeout = ( status != NDIS_STATUS_SUCCESS ) ? + MAX_HOST_KICK_TIMEOUT : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MinEiocPoolSz" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MinEiocPoolSz = ( status != NDIS_STATUS_SUCCESS ) ? + MIN_EIOC_POOL_SZ : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MaxEiocPoolSz" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MaxEiocPoolSz = ( status != NDIS_STATUS_SUCCESS ) ? + MAX_EIOC_POOL_SZ : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"MinHostPoolSz" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->MinHostPoolSz = ( status != NDIS_STATUS_SUCCESS ) ? + MIN_HOST_POOL_SZ : p_reg_prm->ParameterData.IntegerData; + + RtlInitUnicodeString( &keyword, L"HostRecvPoolEntries" ); + NdisReadConfiguration( + &status, &p_reg_prm, h_config, &keyword, NdisParameterInteger ); + + p_params->HostRecvPoolEntries = ( status != NDIS_STATUS_SUCCESS ) ? + HOST_RECV_POOL_ENTRIES : p_reg_prm->ParameterData.IntegerData; + + NdisCloseConfiguration( h_config ); + + status = ( _vnic_params_sanity_check(p_params)? + NDIS_STATUS_SUCCESS: NDIS_STATUS_FAILURE ); + + VNIC_EXIT( VNIC_DBG_ADAPTER ); + return status; +} + +ib_api_status_t +vnic_viport_allocate( + IN vnic_adapter_t* const p_adapter, + IN OUT viport_t** const pp_viport ) +{ + viport_t *p_viport; + NDIS_STATUS status; + VNIC_ENTER( VNIC_DBG_ADAPTER ); + + NdisAcquireSpinLock( &p_adapter->lock ); + status = NdisAllocateMemoryWithTag( &p_viport, sizeof(viport_t), 'trop' ); + + if( status != NDIS_STATUS_SUCCESS ) + { + NdisReleaseSpinLock( &p_adapter->lock ); + VNIC_TRACE_EXIT(VNIC_DBG_ERROR, + ( "Failed allocating Viport structure\n" )); + return IB_ERROR; + } + + NdisZeroMemory( p_viport, sizeof(viport_t) ); + + KeInitializeSpinLock( &p_viport->lock ); + InitializeListHead( &p_viport->listPtrs ); + InitializeListHead( &p_viport->send_pending_list ); + NdisAllocateSpinLock(&p_viport->pending_list_lock ); + + KeInitializeEvent( &p_viport->conn_event, SynchronizationEvent, FALSE ); + + p_viport->p_adapter = p_adapter; + + viport_get_adapter_name( p_viport ); + + viport_config_defaults ( p_viport ); + VNIC_TRACE( VNIC_DBG_PNP, ("Configure Viport default values\n") ); + + control_construct( &p_viport->control, p_viport ); + data_construct( &p_viport->data, p_viport ); + + p_viport->ioc_num = _get_ioc_num_from_iocguid( &p_adapter->ifc_data.guid ); + p_adapter->ioc_num = p_viport->ioc_num; + + *pp_viport = p_viport; + + NdisReleaseSpinLock( &p_adapter->lock ); + VNIC_EXIT( VNIC_DBG_ADAPTER ); + return IB_SUCCESS; +} +static void +_vnic_viport_free( + IN vnic_adapter_t* const p_adapter ) +{ + viport_t *p_viport = p_adapter->p_viport; + VNIC_ENTER( VNIC_DBG_ADAPTER ); + + if ( p_viport ) + { + p_adapter->p_viport = NULL; + NdisFreeMemory( p_viport, sizeof(viport_t), 0 ); + } + p_adapter->state = INIC_UNINITIALIZED; + + VNIC_EXIT( VNIC_DBG_ADAPTER ); +} + + +NDIS_STATUS +vnic_set_mcast( + IN vnic_adapter_t* const p_adapter, + IN mac_addr_t* const p_mac_array, + IN const uint8_t mc_count ) +{ + NDIS_STATUS status; + + VNIC_ENTER( VNIC_DBG_MCAST ); + + VNIC_TRACE( VNIC_DBG_INIT, + ("MCAST COUNT to set = %d\n", mc_count)); + /* Copy the MC address list into the adapter. */ + if( mc_count ) + { + RtlCopyMemory( + p_adapter->mcast_array, p_mac_array, mc_count * MAC_ADDR_LEN ); + } + p_adapter->mc_count = mc_count; + + if( !p_adapter->p_currentPath->pViport ) + return NDIS_STATUS_SUCCESS; + + ++p_adapter->pending_set; + status = viport_setMulticast( p_adapter->p_currentPath->pViport ); + if( status != NDIS_STATUS_PENDING ) + { + --p_adapter->pending_set; + } + + VNIC_EXIT( VNIC_DBG_MCAST ); + return status; +} + + +static BOOLEAN +__path_records_match( + IN ib_path_rec_t *p_path1, + IN ib_path_rec_t *p_path2 ) +{ + if ( p_path1->dgid.unicast.prefix != p_path2->dgid.unicast.prefix ) + return FALSE; + if ( p_path1->dgid.unicast.interface_id != p_path2->dgid.unicast.interface_id ) + return FALSE; + if ( p_path1->dlid != p_path2->dlid ) + return FALSE; + if ( p_path1->pkey != p_path2->pkey ) + return FALSE; + if ( p_path1->rate != p_path2->rate ) + return FALSE; + + return TRUE; +} + +static BOOLEAN +__sid_valid( + IN vnic_adapter_t *p_adapter, + IN ib_net64_t sid ) +{ + vnic_sid_t svc_id; + svc_id.as_uint64 = sid; + if( ( svc_id.s.base_id & 0x10 ) != 0x10 ) + return FALSE; + if( svc_id.s.oui[0] != 0x00 && + svc_id.s.oui[1] != 0x06 && + svc_id.s.oui[2] != 0x6a ) + return FALSE; + if ( svc_id.s.type != CONTROL_SID && + svc_id.s.type != DATA_SID ) + return FALSE; + if ( svc_id.s.ioc_num != _get_ioc_num_from_iocguid( &p_adapter->ifc_data.guid ) ) + return FALSE; + return TRUE; +} +static inline uint8_t +_get_ioc_num_from_iocguid( + IN ib_net64_t *p_iocguid ) +{ + return ( (vnic_ioc_guid_t *)p_iocguid)->s.ioc_num; +} + +static ib_api_status_t +__path_record_add( + IN vnic_adapter_t *p_adapter, + IN ib_path_rec_t *p_path_rec ) +{ + + NdisAcquireSpinLock( &p_adapter->lock ); + + if ( !__path_records_match( &p_adapter->path_record.path_rec, p_path_rec ) ) + { + p_adapter->path_record.path_rec = *p_path_rec; + p_adapter->path_record.num_entries++; + } + + NdisReleaseSpinLock( &p_adapter->lock ); + return IB_SUCCESS; +} + +static ib_api_status_t +__path_record_remove( + IN vnic_adapter_t *p_adapter, + IN ib_path_rec_t *p_path_rec ) +{ + + NdisAcquireSpinLock( &p_adapter->lock ); + + if ( __path_records_match( &p_adapter->path_record.path_rec, p_path_rec ) ) + { + --p_adapter->path_record.num_entries; + cl_memclr( &p_adapter->path_record.path_rec, sizeof( ib_path_rec_t )); + } + + NdisReleaseSpinLock( &p_adapter->lock ); + return IB_SUCCESS; +} + +static void +__path_records_cleanup( + vnic_adapter_t *p_adapter ) +{ + + NdisAcquireSpinLock( &p_adapter->lock ); + + cl_memclr( &p_adapter->path_record.path_rec, sizeof( ib_path_rec_t )); + p_adapter->path_record.num_entries = 0; + + NdisReleaseSpinLock( &p_adapter->lock ); + return; +} +ib_api_status_t +__vnic_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_pnp_ioc_rec_t *p_ioc_rec; + ib_pnp_ioc_path_rec_t *p_ioc_path; + + vnic_adapter_t * __ptr64 p_adapter = (vnic_adapter_t * __ptr64)p_pnp_rec->pnp_context; + + VNIC_ENTER( VNIC_DBG_PNP ); + + CL_ASSERT( p_adapter ); + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_IOC_ADD: + p_ioc_rec = (ib_pnp_ioc_rec_t*)p_pnp_rec; + + VNIC_TRACE( VNIC_DBG_PNP, ("IB_PNP_IOC_ADD for %s.\n", p_ioc_rec->info.profile.id_string) ); + + if( p_adapter->ifc_data.ca_guid != p_ioc_rec->ca_guid ) + { + VNIC_TRACE_EXIT( VNIC_DBG_WARN, ("Invalid CA GUID.\n") ); + ib_status = IB_INVALID_GUID; + break; + } + if( p_adapter->ifc_data.guid != p_ioc_rec->info.profile.ioc_guid ) + { + VNIC_TRACE_EXIT( VNIC_DBG_WARN, ("Invalid IOC GUID.\n") ); + ib_status = IB_INVALID_GUID; + break; + } + /* get ioc profile data */ + NdisAcquireSpinLock( &p_adapter->lock ); + + InterlockedExchange( (volatile LONG*)&p_adapter->pnp_state, IB_PNP_IOC_ADD ); + + p_adapter->ioc_info = p_ioc_rec->info; + p_adapter->num_svc_entries = p_ioc_rec->info.profile.num_svc_entries; + + CL_ASSERT(p_adapter->num_svc_entries >= 2 ); + + if( !__sid_valid( p_adapter, p_ioc_rec->svc_entry_array[0].id ) ) + { + NdisReleaseSpinLock( &p_adapter->lock ); + + VNIC_TRACE_EXIT( VNIC_DBG_WARN, + ("Invalid Service ID %#I64x\n",p_ioc_rec->svc_entry_array[0].id ) ); + + ib_status = IB_INVALID_GUID; // should it be set INVALID_SERVICE_TYPE ? + break; + } + + p_adapter->p_svc_entries = + cl_zalloc( sizeof(ib_svc_entry_t) * + p_adapter->ioc_info.profile.num_svc_entries ); + + if( p_adapter->p_svc_entries == NULL ) + { + NdisReleaseSpinLock( &p_adapter->lock ); + + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, ("Insufficient Memory.\n") ); + ib_status = IB_INSUFFICIENT_MEMORY; + break; + } + + cl_memcpy( p_adapter->p_svc_entries, + p_ioc_rec->svc_entry_array, + sizeof(ib_svc_entry_t) * p_adapter->ioc_info.profile.num_svc_entries); + + VNIC_TRACE( VNIC_DBG_PNP, + ("Found %d Service Entries.\n", p_adapter->ioc_info.profile.num_svc_entries)); + NdisReleaseSpinLock( &p_adapter->lock ); + break; + + case IB_PNP_IOC_REMOVE: + CL_ASSERT( p_pnp_rec->guid == p_adapter->ifc_data.guid ); + + p_ioc_rec = (ib_pnp_ioc_rec_t*)p_pnp_rec; + + InterlockedExchange( (volatile LONG*)&p_adapter->pnp_state, IB_PNP_IOC_REMOVE ); + + VNIC_TRACE( VNIC_DBG_PNP, ("IB_PNP_IOC_REMOVE for %s.\n", + p_adapter->ioc_info.profile.id_string) ); + + viport_linkDown( p_adapter->p_viport ); + break; + + case IB_PNP_IOC_PATH_ADD: + /* path for our IOC ? */ + if ( p_pnp_rec->guid != p_adapter->ifc_data.guid ) + { + VNIC_TRACE( VNIC_DBG_PNP, + ("Getting path for wrong IOC\n") ); + ib_status = IB_INVALID_GUID; + break; + } + p_ioc_path = (ib_pnp_ioc_path_rec_t*)p_pnp_rec; + + if( p_adapter->state != INIC_UNINITIALIZED ) + break; + + ib_status = __path_record_add( p_adapter, &p_ioc_path->path ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Failed to add path record\n") ); + break; + } + + netpath_init( &p_adapter->primaryPath, p_adapter ); + p_adapter->p_currentPath = &p_adapter->primaryPath; + + ib_status = vnic_viport_allocate( p_adapter, &p_adapter->p_viport ); + + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE ( VNIC_DBG_ERROR, + ("Failed status %s\n", p_adapter->ifc.get_err_str( ib_status )) ); + _vnic_viport_free( p_adapter ); + return ib_status; + } + + netpath_addPath( p_adapter->p_currentPath, p_adapter->p_viport ); + viport_setPath( p_adapter->p_viport, + &p_adapter->path_record.path_rec, + &p_adapter->ioc_info.profile.ioc_guid ); + + ib_status = viport_control_connect( p_adapter->p_viport ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Control connect return %s\n", p_adapter->ifc.get_err_str( ib_status )) ); + vnic_viport_cleanup( p_adapter ); + break; + } + + ib_status = viport_data_connect( p_adapter->p_viport ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Data path connect failed status %s\n", + p_adapter->ifc.get_err_str( ib_status )) ); + vnic_viport_cleanup( p_adapter ); + break; + } + + p_adapter->state = INIC_REGISTERED; + break; + + case IB_PNP_IOC_PATH_REMOVE: + p_ioc_path = (ib_pnp_ioc_path_rec_t*)p_pnp_rec; + + VNIC_TRACE( VNIC_DBG_PNP, + ("IB_PNP_IOC_PATH_REMOVE (slid:%d dlid:%d) for %s.\n", + ntoh16( p_ioc_path->path.slid ), + ntoh16( p_ioc_path->path.dlid ), + p_adapter->ioc_info.profile.id_string)); + + ib_status = __path_record_remove( p_adapter, &p_ioc_path->path ); + + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Failed to remove path record\n") ); + } + break; + + default: + VNIC_TRACE( VNIC_DBG_PNP, + (" Received unhandled PnP event %#x\n", p_pnp_rec->pnp_event ) ); + break; + } + + VNIC_EXIT( VNIC_DBG_PNP ); + return ib_status; +} + + +NDIS_STATUS +vnic_get_adapter_interface( + IN NDIS_HANDLE h_handle, + IN vnic_adapter_t *p_adapter) +{ + NTSTATUS status; + ib_al_ifc_data_t data; + IO_STACK_LOCATION io_stack; + + VNIC_ENTER( VNIC_DBG_ADAPTER ); + + NdisMGetDeviceProperty( h_handle, &p_adapter->p_pdo, NULL, NULL, NULL, NULL ); + + data.size = sizeof(ioc_ifc_data_t); + data.type = &GUID_IOC_INTERFACE_DATA; + data.version = IOC_INTERFACE_DATA_VERSION; + data.p_data = &p_adapter->ifc_data; + + io_stack.MinorFunction = IRP_MN_QUERY_INTERFACE; + io_stack.Parameters.QueryInterface.Version = AL_INTERFACE_VERSION; + io_stack.Parameters.QueryInterface.Size = sizeof(ib_al_ifc_t); + io_stack.Parameters.QueryInterface.Interface = (INTERFACE*)&p_adapter->ifc; + io_stack.Parameters.QueryInterface.InterfaceSpecificData = &data; + io_stack.Parameters.QueryInterface.InterfaceType = &GUID_IB_AL_INTERFACE; + + status = cl_fwd_query_ifc( p_adapter->p_pdo, &io_stack ); + + if( !NT_SUCCESS( status ) ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Query interface for VNIC interface returned %08x.\n", status) ); + return status; + } + /* + * Dereference the interface now so that the bus driver doesn't fail a + * query remove IRP. We will always get unloaded before the bus driver + * since we're a child device. + */ + p_adapter->ifc.wdm.InterfaceDereference( + p_adapter->ifc.wdm.Context ); + + VNIC_EXIT( VNIC_DBG_ADAPTER ); + + return NDIS_STATUS_SUCCESS; +} + + +void +vnic_viport_cleanup( + IN vnic_adapter_t *p_adapter ) +{ + VNIC_ENTER( VNIC_DBG_ADAPTER ); + + if ( p_adapter->p_viport ) + { + if( ( InterlockedExchange( (volatile LONG *)&p_adapter->state, + INIC_DEREGISTERING )) == INIC_DEREGISTERING ) + { + VNIC_TRACE(VNIC_DBG_INIT, + ("vnic viport cleanup - already destroying\n" )); + return; + } + viport_timerStop( p_adapter->p_viport ); + InterlockedExchange( (volatile LONG *)&p_adapter->p_viport->disconnect, TRUE ); + viport_cleanup(p_adapter->p_viport ); + } + p_adapter->p_viport = NULL; + + VNIC_EXIT( VNIC_DBG_ADAPTER ); +} + +void +__vnic_pnp_dereg_cb( + IN void* context ) +{ + vnic_adapter_t* p_adapter; + ib_api_status_t ib_status; + ib_pnp_event_t state; + ib_pnp_req_t pnp_req; + viport_t *p_viport; + + VNIC_ENTER( VNIC_DBG_INIT ); + + p_adapter = (vnic_adapter_t*)context; + + CL_ASSERT( !p_adapter->h_pnp ); + + if( p_adapter->pnp_state != IB_PNP_IOC_REMOVE ) + p_adapter->pnp_state = IB_PNP_IOC_ADD; + + state = p_adapter->pnp_state; + + /* Destroy the current port instance if it still exists. */ + p_viport = InterlockedExchangePointer( (void *)&p_adapter->p_viport, NULL ); + + if( p_viport ) + { + viport_cleanup( p_viport ); + } + + if( state != IB_PNP_IOC_REMOVE ) + { + /* Register for IOC events */ + pnp_req.pfn_pnp_cb = __vnic_pnp_cb; + pnp_req.pnp_class = IB_PNP_IOC | IB_PNP_FLAG_REG_SYNC; + pnp_req.pnp_context = p_adapter; + + ib_status = p_adapter->ifc.reg_pnp( p_adapter->h_al, &pnp_req, &p_adapter->h_pnp ); + if( ib_status != IB_SUCCESS ) + { + p_adapter->reset = FALSE; + VNIC_TRACE( VNIC_DBG_ERROR, + ("pnp_reg returned %s\n", + p_adapter->ifc.get_err_str( ib_status )) ); + NdisMResetComplete( + p_adapter->h_handle, NDIS_STATUS_HARD_ERRORS, TRUE ); + } + } + else + { + p_adapter->reset = FALSE; + NdisMResetComplete( + p_adapter->h_handle, NDIS_STATUS_SUCCESS, TRUE ); + ib_status = IB_SUCCESS; + } + + VNIC_EXIT( VNIC_DBG_INIT ); +} + + +ib_api_status_t +vnic_reset_adapter( + IN vnic_adapter_t* const p_adapter ) +{ + + ib_api_status_t status; + ib_pnp_handle_t h_pnp; + ib_pnp_req_t pnp_req; + + VNIC_ENTER( VNIC_DBG_INIT ); + + if( p_adapter->reset ) + return IB_INVALID_STATE; + + p_adapter->hung = 0; + p_adapter->reset = TRUE; + + if( p_adapter->h_pnp ) + { + h_pnp = p_adapter->h_pnp; + p_adapter->h_pnp = NULL; + status = p_adapter->ifc.dereg_pnp( h_pnp, __vnic_pnp_dereg_cb ); + if( status == IB_SUCCESS ) + status = IB_NOT_DONE; + } + else + { + /* Register for IOC events */ + pnp_req.pfn_pnp_cb = __vnic_pnp_cb; + pnp_req.pnp_class = IB_PNP_IOC | IB_PNP_FLAG_REG_SYNC; + pnp_req.pnp_context = p_adapter; + + status = p_adapter->ifc.reg_pnp( p_adapter->h_al, &pnp_req, &p_adapter->h_pnp ); + if( status == IB_SUCCESS ) + { + p_adapter->hung = FALSE; + } + } + VNIC_EXIT( VNIC_DBG_INIT ); + return status; +} diff --git a/branches/Ndi/ulp/inic/kernel/vnic_adapter.h b/branches/Ndi/ulp/inic/kernel/vnic_adapter.h new file mode 100644 index 00000000..5cd9f284 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_adapter.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#if !defined _VNIC_ADAPTER_H_ +#define _VNIC_ADAPTER_H_ + +#include +#include +#include +#include +#include "vnic_ib.h" +#include "vnic_controlpkt.h" +#include "vnic_config.h" +#include "vnic_control.h" +#include "vnic_data.h" +#include "vnic_viport.h" +#include + +typedef struct _pending_oid +{ + NDIS_OID oid; + PVOID p_buf; + ULONG buf_len; + PULONG p_bytes_used; + PULONG p_bytes_needed; + +} pending_oid_t; + +typedef struct _ipv4_address_item +{ + union _net_address_item_address + { + ULONG as_ulong; + UCHAR as_bytes[4]; + } address; + +} ipv4_address_item_t; + +typedef struct _vnic_params { + uint32_t MaxAddressEntries; + uint32_t MinAddressEntries; + uint32_t MinMtu; + uint32_t MaxMtu; + uint32_t HostRecvPoolEntries; + uint32_t MinHostPoolSz; + uint32_t MinEiocPoolSz; + uint32_t MaxEiocPoolSz; + uint32_t MinHostKickTimeout; + uint32_t MaxHostKickTimeout; + uint32_t MinHostKickEntries; + uint32_t MaxHostKickEntries; + uint32_t MinHostKickBytes; + uint32_t MaxHostKickBytes; + uint32_t MinHostUpdateSz; + uint32_t MaxHostUpdateSz; + uint32_t MinEiocUpdateSz; + uint32_t MaxEiocUpdateSz; + uint32_t NotifyBundleSz; + uint32_t ViportStatsInterval; + uint32_t ViportHbInterval; + uint32_t ViportHbTimeout; + uint32_t ControlRspTimeout; + uint32_t ControlReqRetryCount; + uint32_t RetryCount; + uint32_t MinRnrTimer; + uint32_t MaxViportsPerNetpath; + uint32_t DefaultViportsPerNetpath; + uint32_t DefaultPkey; + uint32_t DefaultNoPathTimeout; + uint32_t DefaultPrimaryConnectTimeout; + uint32_t DefaultPrimaryReconnectTimeout; + uint32_t DefaultPrimarySwitchTimeout; + uint32_t DefaultPreferPrimary; + uint32_t UseRxCsum; + uint32_t UseTxCsum; + mac_addr_t conf_mac; +} vnic_params_t; + + +typedef struct _vnic_adapter { + LIST_ENTRY list_entry; + NDIS_HANDLE h_handle; + DEVICE_OBJECT *p_pdo; + NDIS_SPIN_LOCK lock; + ib_al_ifc_t ifc; + ioc_ifc_data_t ifc_data; + ib_ioc_info_t ioc_info; + vnic_path_record_t path_record; + ib_al_handle_t h_al; + ib_ca_handle_t h_ca; + ib_pnp_handle_t h_pnp; + ib_pnp_event_t pnp_state; + IbCa_t ca; + struct _viport *p_viport; + InicState_t state; + struct Netpath primaryPath; + struct Netpath secondaryPath; + struct Netpath *p_currentPath; + vnic_params_t params; + int open; + int macSet; + int mc_count; + mac_addr_t mcast_array[MAX_MCAST]; + LONG xmitStarted; + LONG carrier; + uint32_t ioc_num; + uint32_t link_speed; + uint32_t packet_filter; + int hung; + BOOLEAN reset; + int pending_set; + BOOLEAN pending_query; + pending_oid_t query_oid; + pending_oid_t set_oid; + uint8_t num_svc_entries; + ib_svc_entry_t *p_svc_entries; + char name[65]; + InicNPEvent_t np_event[INICNP_NUM_EVENTS]; +#ifdef VNIC_STATISTIC + struct { + uint64_t startTime; + uint64_t connTime; + uint64_t disconnRef; /* Intermediate time */ + uint64_t disconnTime; + uint32_t disconnNum; + uint64_t xmitTime; + uint32_t xmitNum; + uint32_t xmitFail; + uint64_t recvTime; + uint32_t recvNum; + uint64_t xmitRef; /* Intermediate time */ + uint64_t xmitOffTime; + uint32_t xmitOffNum; + uint64_t carrierRef; /* Intermediate time */ + uint64_t carrierOffTime; + uint32_t carrierOffNum; + } statistics; +#endif /* VNIC_STATISTIC */ + +} vnic_adapter_t; + +ib_api_status_t +vnic_create_adapter( + IN NDIS_HANDLE h_handle, + IN NDIS_HANDLE wrapper_config_context, + OUT vnic_adapter_t** const pp_adapter); + +void +vnic_destroy_adapter( + IN vnic_adapter_t* p_adapter); + +ib_api_status_t +vnic_construct_adapter( + IN vnic_adapter_t *p_adapter); + +NDIS_STATUS +vnic_set_mcast( + IN vnic_adapter_t* const p_adapter, + IN mac_addr_t* const p_mac_array, + IN const uint8_t mc_count ); + +ib_api_status_t +vnic_viport_allocate( + IN vnic_adapter_t* const p_adapter, + IN OUT viport_t** const pp_viport ); + +void +vnic_viport_cleanup( + IN vnic_adapter_t *p_adapter ); + +void +vnic_resume_set_oids( + IN vnic_adapter_t* const p_adapter ); + +void +vnic_resume_oids( + IN vnic_adapter_t* const p_adapter ); + +ib_api_status_t +__vnic_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ); + +ib_api_status_t +ibregion_physInit( + IN struct _vnic_adapter* p_adapter, + OUT IbRegion_t *pRegion, + IN ib_pd_handle_t hPd, + IN uint64_t *p_vaddr, + IN uint64_t len ); + +void +__vnic_pnp_dereg_cb( + IN void* context ); + +ib_api_status_t +vnic_reset_adapter( + IN vnic_adapter_t* const p_adapter ); + +#endif /* !defined _VNIC_ADAPTER_H_ */ diff --git a/branches/Ndi/ulp/inic/kernel/vnic_config.h b/branches/Ndi/ulp/inic/kernel/vnic_config.h new file mode 100644 index 00000000..3dccc7ed --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_config.h @@ -0,0 +1,240 @@ +#ifndef _VNIC_CONFIG_H_ +#define _VNIC_CONFIG_H_ + +#include "vnic_util.h" +/* These are hard, compile time limits. + * Lower runtime overrides may be in effect + */ +#define INIC_CLASS_SUBCLASS 0x2000066A +#define INIC_PROTOCOL 0 +#define INIC_PROT_VERSION 1 + +#define INIC_MAJORVERSION 1 +#define INIC_MINORVERSION 1 + +#define MAX_ADDRESS_ENTRIES 64 /* max entries to negotiate with remote */ +#define MIN_ADDRESS_ENTRIES 16 /* min entries remote can return to us that we agree with */ +#define MAX_ADDR_ARRAY 32 /* address array we can handle. for now */ +#define MIN_MTU 1500 /* Minimum Negotiated payload size */ +#define MAX_MTU 9500 /* max Jumbo frame payload size */ +#define ETH_VLAN_HLEN 18 /* ethernet header with VLAN tag */ + +#define HOST_RECV_POOL_ENTRIES 512 /* TBD: Abritrary */ +#define MIN_HOST_POOL_SZ 256 /* TBD: Abritrary */ +#define MIN_EIOC_POOL_SZ 256 /* TBD: Abritrary */ +#define MAX_EIOC_POOL_SZ 512 /* TBD: Abritrary */ + +#define MIN_HOST_KICK_TIMEOUT 100 /* TBD: Arbitrary */ +#define MAX_HOST_KICK_TIMEOUT 200 /* In uSec */ + +#define MIN_HOST_KICK_ENTRIES 1 /* TBD: Arbitrary */ +#define MAX_HOST_KICK_ENTRIES 128 /* TBD: Arbitrary */ + +#define MIN_HOST_KICK_BYTES 0 +#define MAX_HOST_KICK_BYTES 5000 + +#define MIN_HOST_UPDATE_SZ 8 /* TBD: Arbitrary */ +#define MAX_HOST_UPDATE_SZ 32 /* TBD: Arbitrary */ +#define MIN_EIOC_UPDATE_SZ 8 /* TBD: Arbitrary */ +#define MAX_EIOC_UPDATE_SZ 32 /* TBD: Arbitrary */ + +#define NOTIFY_BUNDLE_SZ 32 + +#define MAX_PARAM_VALUE 0x40000000 + +#define DEFAULT_VIPORTS_PER_NETPATH 1 +#define MAX_VIPORTS_PER_NETPATH 1 + +#define INIC_USE_RX_CSUM TRUE +#define INIC_USE_TX_CSUM TRUE +#define DEFAULT_NO_PATH_TIMEOUT 10000 /* TBD: Arbitrary */ +#define DEFAULT_PRI_CON_TIMEOUT 10000 /* TBD: Arbitrary */ +#define DEFAULT_PRI_RECON_TIMEOUT 10000 /* TBD: Arbitrary */ +#define DEFAULT_PRI_SWITCH_TIMEOUT 10000 /* TBD: Arbitrary */ +#define DEFAULT_PREFER_PRIMARY TRUE + +/* timeouts: !! all data defined in milliseconds, + some later will be converted to microseconds */ +#define VIPORT_STATS_INTERVAL 5000 /* 5 sec */ +#define VIPORT_HEARTBEAT_INTERVAL 2000 /* 2 seconds */ +#define VIPORT_HEARTBEAT_TIMEOUT 64000 /* 64 sec */ +#define CONTROL_RSP_TIMEOUT 2000 /* 2 sec */ + +#define _100NS_IN_1MS (10000) +inline uint64_t +get_time_stamp_ms( void ) +{ + return( (KeQueryInterruptTime() / _100NS_IN_1MS ) ); +} + +/* InfiniBand Connection Parameters */ +#define CONTROL_REQ_RETRY_COUNT 4 +#define RETRY_COUNT 3 +#define MIN_RNR_TIMER 22 /* 20 ms */ +#define DEFAULT_PKEY 0 /* Pkey table index */ + +/* phys memory size to register with HCA*/ +#define MEM_REG_SIZE 0xFFFFFFFFFFFFFFFF + +/* link speed in 100 bits/sec units */ +#define LINK_SPEED_1MBIT_x100BPS 10000 +#define LINK_SPEED_1GBIT_x100BPS 10000000 +#define LINK_SPEED_10GBIT_x100BPS 100000000 + /* if VEx does not report it's link speed, so set it 1Gb/s so far */ +#define DEFAULT_LINK_SPEED_x100BPS LINK_SPEED_1GBIT_x100BPS + +#define DEFAULT_PARAM(x,y) if(x == MAXU32) { \ + x = y; } +#define POWER_OF_2(x) if (!IsPowerOf2(x)) { \ + VNIC_TRACE( VNIC_DBG_WARN, (" %s (%d) must be a power of 2\n",#x,x) ); \ + x = SetMinPowerOf2(x); \ + } +#define LESS_THAN(lo, hi) if (lo >= hi) { \ + VNIC_TRACE( VNIC_DBG_ERROR, (" %s (%d) must be less than %s (%d)\n",#lo,lo,#hi,hi) ); \ + lo = hi >> 1; \ + } +#define LESS_THAN_OR_EQUAL(lo, hi) if (lo > hi) { \ + VNIC_TRACE( VNIC_DBG_WARN, (" %s (%d) cannot be greater than %s (%d)\n",#lo,lo,#hi,hi) ); \ + lo = hi; \ + } +#define RANGE_CHECK(x, min, max) if ((x < min) || (x > max)) { \ + VNIC_TRACE( VNIC_DBG_WARN, (" %s (%d) must be between %d and %d\n",#x,x,min,max) ); \ + if (x < min) \ + x = min; \ + else \ + x = max; \ + } +#define ZERO_RANGE_CHECK(x, min, max) if (x > max) { \ + VNIC_TRACE( VNIC_DBG_WARN, (" %s (%d) must be between %d and %d\n",#x,x,min,max) ); \ + x = max; \ + } + +#define BOOLEAN_RANGE(x) ZERO_RANGE_CHECK(x, 0, 1) +#define U32_ZERO_RANGE(x) ZERO_RANGE_CHECK(x, 0, 0x7FFFFFFF) +#define U32_RANGE(x) RANGE_CHECK(x, 1, 0x7FFFFFFF) +#define U16_ZERO_RANGE(x) ZERO_RANGE_CHECK(x, 0, 0xFFFF) +#define U16_RANGE(x) RANGE_CHECK(x, 1, 0xFFFF) +#define U8_ZERO_RANGE(x) ZERO_RANGE_CHECK(x, 0, 0xFF) +#define U8_RANGE(x) RANGE_CHECK(x, 1, 0xFF) + + +typedef struct { + uint64_t ioc_guid; + uint64_t portGuid; + uint64_t port; + uint64_t hca; + uint64_t instance; + char ioc_string[65]; + char ioc_guid_set; + char ioc_string_set; +} PathParam_t; + +typedef struct _vnic_globals { + NDIS_HANDLE ndis_handle; // ndis wrapper handle + volatile LONG dev_reg; // dev register counter + volatile LONG adapters; // created adapters counter + KSPIN_LOCK lock; + LIST_ENTRY adapter_list; + uint8_t host_name[IB_NODE_DESCRIPTION_SIZE + 1]; +} vnic_globals_t; +/* +struct members: + lock: + take this lock before access to adapter_list + adapter_list: + head of list of virtual adapters initialized by driver +*/ + +typedef struct IbConfig { + ib_path_rec_t pathInfo; + uint64_t sid; + Inic_ConnectionData_t connData; + uint32_t retryCount; + uint32_t rnrRetryCount; + uint8_t minRnrTimer; + uint32_t numSends; + uint32_t numRecvs; + uint32_t recvScatter; /* 1 */ + uint32_t sendGather; /* 1 or 2 */ + uint32_t overrides; +} IbConfig_t; + +typedef struct ControlConfig { + IbConfig_t ibConfig; + uint32_t numRecvs; + uint8_t inicInstance; + uint16_t maxAddressEntries; + uint16_t minAddressEntries; + uint32_t rspTimeout; + uint8_t reqRetryCount; + uint32_t overrides; +} ControlConfig_t; + +typedef struct DataConfig { + IbConfig_t ibConfig; + uint64_t pathId; + uint32_t numRecvs; + uint32_t hostRecvPoolEntries; + Inic_RecvPoolConfig_t hostMin; + Inic_RecvPoolConfig_t hostMax; + Inic_RecvPoolConfig_t eiocMin; + Inic_RecvPoolConfig_t eiocMax; + uint32_t notifyBundle; + uint32_t overrides; +} DataConfig_t; + +typedef struct ViportConfig { + struct _viport *pViport; + ControlConfig_t controlConfig; + DataConfig_t dataConfig; + uint32_t hca; + uint32_t port; + uint32_t statsInterval; + uint32_t hbInterval; /* heartbeat interval */ + uint32_t hbTimeout; /* heartbeat timeout */ + uint64_t portGuid; + uint64_t guid; + size_t pathIdx; + char ioc_string[65]; + +#define HB_INTERVAL_OVERRIDE 0x1 +#define GUID_OVERRIDE 0x2 +#define STRING_OVERRIDE 0x4 +#define HCA_OVERRIDE 0x8 +#define PORT_OVERRIDE 0x10 +#define PORTGUID_OVERRIDE 0x20 + uint32_t overrides; +} ViportConfig_t; + +/* + * primaryConnectTimeout - If the secondary connects first, how long do we + * give the primary? + * primaryReconnectTimeout - Same as above, but used when recovering when + * both paths fail + * primaryReconnectTimeout - How long do we wait before switching to the + * primary when it comes back? + */ +#define IFNAMSIZ 65 +typedef struct InicConfig { + //struct Inic *pInic; + char name[IFNAMSIZ]; + uint32_t noPathTimeout; + uint32_t primaryConnectTimeout; + uint32_t primaryReconnectTimeout; + uint32_t primarySwitchTimeout; + int preferPrimary; + BOOLEAN useRxCsum; + BOOLEAN useTxCsum; +#define USE_RX_CSUM_OVERRIDE 0x1 +#define USE_TX_CSUM_OVERRIDE 0x2 + uint32_t overrides; +} InicConfig_t; + +typedef enum { + INIC_UNINITIALIZED, + INIC_DEREGISTERING, + INIC_REGISTERED, +} InicState_t; + +#endif /* _VNIC_CONFIG_H_ */ + diff --git a/branches/Ndi/ulp/inic/kernel/vnic_control.c b/branches/Ndi/ulp/inic/kernel/vnic_control.c new file mode 100644 index 00000000..799c7a75 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_control.c @@ -0,0 +1,2007 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "vnic_adapter.h" + +static void +control_recv( Control_t *pControl, RecvIo_t *pRecvIo ); + +static void +control_recvComplete( + IN Io_t *pIo ); + +static ib_api_status_t +control_send( Control_t *pControl ); + +static void +control_sendComplete( Io_t *pIo ); +static void +control_timeout( void * context ); + +static void +control_timer( Control_t *pControl, int timeout ); + +static void +control_timerStop( Control_t *pControl ); + +static void +control_initHdr( Control_t *pControl, uint8_t cmd ); + +static RecvIo_t * +control_getRsp( Control_t *pControl ); + +static void +copyRecvPoolConfig( Inic_RecvPoolConfig_t *pSrc, + Inic_RecvPoolConfig_t *pDst ); + +static BOOLEAN +checkRecvPoolConfigValue( + IN void *pSrc, + IN void *pDst, + IN void *pMax, + IN void *pMin, + IN char *name ); + +static BOOLEAN checkRecvPoolConfig( + Inic_RecvPoolConfig_t *pSrc, + Inic_RecvPoolConfig_t *pDst, + Inic_RecvPoolConfig_t *pMax, + Inic_RecvPoolConfig_t *pMin); + +static void +__control_logControlPacket( + Inic_ControlPacket_t *pPkt ); + +void +control_construct( + IN Control_t *pControl, + IN viport_t *pViport ) +{ + VNIC_ENTER( VNIC_DBG_CTRL ); + + cl_memclr(pControl, sizeof(Control_t)); + + pControl->p_viport = pViport; + + pControl->reqOutstanding = FALSE; + pControl->seqNum = 0; + + pControl->pResponse = NULL; + pControl->pInfo = NULL; + + pControl->p_viport->addrs_query_done = TRUE; + + InitializeListHead(&pControl->failureList); + KeInitializeSpinLock(&pControl->ioLock); + + cl_timer_construct( &pControl->timer ); + + ibqp_construct( &pControl->qp, pViport ); + + VNIC_EXIT( VNIC_DBG_CTRL ); +} + + +ib_api_status_t +control_init( + IN Control_t *pControl, + IN viport_t *pViport, + IN ControlConfig_t *pConfig, + IN uint64_t guid ) +{ + ib_api_status_t ib_status; + ib_pd_handle_t hPd; + Inic_ControlPacket_t *pkt; + Io_t *pIo; + int sz; + unsigned int i; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pControl->p_conf = pConfig; + + hPd = pViport->p_adapter->ca.hPd; + + cl_timer_init( &pControl->timer, control_timeout, pControl ); + + ib_status = ibqp_init( + &pControl->qp, guid, &pConfig->ibConfig ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, ("ibqp_init returned %s\n", + pViport->p_adapter->ifc.get_err_str( ib_status )) ); + goto failure; + } + + sz = (sizeof(RecvIo_t) * pConfig->numRecvs ) + + (sizeof(Inic_ControlPacket_t) * (pConfig->numRecvs + 1)); + + pControl->pLocalStorage = cl_zalloc( sz ); + + if ( pControl->pLocalStorage == NULL ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Failed allocating space for local storage\n" )); + + ibqp_cleanup(&pControl->qp); + ib_status = IB_INSUFFICIENT_MEMORY; + goto failure; + } + + pControl->pRecvIos = (RecvIo_t *)pControl->pLocalStorage; + + pkt = (Inic_ControlPacket_t *)(pControl->pLocalStorage + + sizeof(SendIo_t) * pConfig->numRecvs); + + sz = sizeof(Inic_ControlPacket_t) * (pConfig->numRecvs + 1); + + ib_status = ibregion_init( pViport, &pControl->region, hPd, + pkt, sz, IB_AC_LOCAL_WRITE ); + if ( ib_status != IB_SUCCESS ) + { + /* NOTE: I'm allowing recvs into the send buffer as well + * as the receive buffers. I'm doing this to combine them + * into a single region, and conserve a region. + */ + VNIC_TRACE_EXIT( VNIC_DBG_ERROR , + (" Failed setting up control space region\n" )); + + ibqp_cleanup( &pControl->qp ); + cl_free( pControl->pLocalStorage ); + goto failure; + } + pIo = &pControl->sendIo.io; + pIo->pViport = pViport; + pIo->pRoutine = control_sendComplete; + + pIo->wrq.p_next = NULL; + pIo->wrq.wr_type = WR_SEND; + pIo->wrq.send_opt = IB_SEND_OPT_SIGNALED; + pIo->wrq.wr_id = (uint64_t)(pIo); + pIo->wrq.num_ds = 1; + pIo->wrq.ds_array = &pControl->sendIo.dsList; + pIo->wrq.ds_array[0].length = sizeof(Inic_ControlPacket_t); + pIo->wrq.ds_array[0].lkey = pControl->region.lkey; + pIo->wrq.ds_array[0].vaddr = (uint64_t)(pkt++); + + for (i = 0; i < pConfig->numRecvs; i++ ) + { + pIo = &pControl->pRecvIos[i].io; + pIo->pViport = pViport; + pIo->pRoutine = control_recvComplete; + + pIo->r_wrq.wr_id = (uint64_t)(pIo); + pIo->r_wrq.p_next = NULL; + pIo->r_wrq.num_ds = 1; + pIo->r_wrq.ds_array = &pControl->pRecvIos[i].dsList; + pIo->r_wrq.ds_array[0].length = sizeof(Inic_ControlPacket_t); + pIo->r_wrq.ds_array[0].vaddr = (uint64_t)(pkt++); + pIo->r_wrq.ds_array[0].lkey = pControl->region.lkey; + + if ( ibqp_postRecv( &pControl->qp, pIo ) != IB_SUCCESS ) + { + control_cleanup( pControl ); + ib_status = IB_ERROR; + goto failure; + } + } + +failure: + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + + +void +control_cleanup( + IN Control_t *pControl ) +{ + VNIC_ENTER( VNIC_DBG_CTRL ); + + control_timerStop( pControl ); + ibqp_detach( &pControl->qp ); + ibregion_cleanup( pControl->p_viport, &pControl->region ); + + if ( pControl->pLocalStorage ) + { + cl_free( pControl->pLocalStorage ); + pControl->pLocalStorage = NULL; + } + + cl_timer_destroy( &pControl->timer ); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return; +} + +void +control_processAsync( + IN Control_t *pControl ) +{ + RecvIo_t *pRecvIo; + Inic_ControlPacket_t *pPkt; + LIST_ENTRY *p_list_entry; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pRecvIo = InterlockedExchangePointer( &pControl->pInfo, NULL ); + + if ( pRecvIo != NULL ) + { + VNIC_TRACE( VNIC_DBG_CTRL, + ("IOC %d: processing info packet\n", + pControl->p_viport->ioc_num ) ); + + pPkt = control_packet( pRecvIo ); + + if ( pPkt->hdr.pktCmd == CMD_REPORT_STATUS ) + { + switch( ntoh32(pPkt->cmd.reportStatus.statusNumber) ) + { + case INIC_STATUS_LINK_UP: + VNIC_TRACE( VNIC_DBG_CTRL, + ("IOC %d: Link Up\n", pControl->p_viport->ioc_num ) ); + /* renew link speed info */ + pControl->p_viport->p_adapter->link_speed = + ntoh32( pPkt->cmd.reportStatus.statusInfo ); + viport_linkUp( pControl->p_viport ); + break; + + case INIC_STATUS_LINK_DOWN: + VNIC_TRACE( VNIC_DBG_CTRL, + ("IOC %d: Link Down\n", pControl->p_viport->ioc_num ) ); + + viport_linkDown( pControl->p_viport ); + break; + + default: + VNIC_TRACE( VNIC_DBG_CTRL | VNIC_DBG_ERROR, + ("IOC %d: Asynchronous status received from EIOC\n", + pControl->p_viport->ioc_num ) ); + __control_logControlPacket( pPkt ); + break; + } + } + + if ( pPkt->hdr.pktCmd != CMD_REPORT_STATUS || + pPkt->cmd.reportStatus.isFatal ) + { + viport_failure( pControl->p_viport ); + } + + control_recv( pControl, pRecvIo ); + } + + while ( !IsListEmpty( &pControl->failureList ) ) + { + + VNIC_TRACE( VNIC_DBG_CTRL, + ("IOC %d: processing error packet\n", + pControl->p_viport->ioc_num ) ); + + p_list_entry = ExInterlockedRemoveHeadList( &pControl->failureList, &pControl->ioLock ); + pRecvIo = (RecvIo_t *)p_list_entry; + pPkt = control_packet( pRecvIo ); + + VNIC_TRACE( VNIC_DBG_CTRL, + ("IOC %d: Asynchronous error received from EIOC\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( pPkt ); + + if ( ( pPkt->hdr.pktType != TYPE_ERR ) || + ( pPkt->hdr.pktCmd != CMD_REPORT_STATUS ) || + ( pPkt->cmd.reportStatus.isFatal ) ) + { + viport_failure( pControl->p_viport ); + } + + control_recv( pControl, pRecvIo ); + } + + VNIC_EXIT( VNIC_DBG_CTRL ); +} + + +ib_api_status_t +control_initInicReq( + IN Control_t *pControl ) +{ + ControlConfig_t *p_conf = pControl->p_conf; + Inic_ControlPacket_t *pPkt; + Inic_CmdInitInicReq_t *pInitInicReq; + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + if( pControl->p_viport->errored ) + { + return IB_ERROR; + } + control_initHdr( pControl, CMD_INIT_INIC ); + + pPkt = control_packet( &pControl->sendIo ); + pInitInicReq = &pPkt->cmd.initInicReq; + pInitInicReq->inicMajorVersion = hton16( INIC_MAJORVERSION ); + pInitInicReq->inicMinorVersion = hton16( INIC_MINORVERSION ); + pInitInicReq->inicInstance = p_conf->inicInstance; + pInitInicReq->numDataPaths = 1; + pInitInicReq->numAddressEntries = hton16( p_conf->maxAddressEntries ); + + ib_status = control_send( pControl ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + + +BOOLEAN +control_initInicRsp( + IN Control_t *pControl, + IN uint32_t *pFeatures, + IN uint8_t *pMacAddress, + IN uint16_t *pNumAddrs, + IN uint16_t *pVlan ) +{ + RecvIo_t *pRecvIo; + ControlConfig_t *p_conf = pControl->p_conf; + Inic_ControlPacket_t *pPkt; + Inic_CmdInitInicRsp_t *pInitInicRsp; + uint8_t numDataPaths, + numLanSwitches; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pRecvIo = control_getRsp( pControl ); + if (!pRecvIo) + return FALSE; + + pPkt = control_packet( pRecvIo ); + + if ( pPkt->hdr.pktCmd != CMD_INIT_INIC ) + { + + VNIC_TRACE( VNIC_DBG_CTRL | VNIC_DBG_ERROR, + ("IOC %d: Sent control request:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( control_lastReq( pControl ) ); + + VNIC_TRACE_EXIT( VNIC_DBG_CTRL | VNIC_DBG_ERROR, + ("IOC %d: Received control response:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( pPkt ); + goto failure; + } + pInitInicRsp = &pPkt->cmd.initInicRsp; + pControl->majVer = ntoh16( pInitInicRsp->inicMajorVersion ); + pControl->minVer = ntoh16( pInitInicRsp->inicMinorVersion ); + numDataPaths = pInitInicRsp->numDataPaths; + numLanSwitches = pInitInicRsp->numLanSwitches; + *pFeatures = ntoh32( pInitInicRsp->featuresSupported ); + *pNumAddrs = ntoh16( pInitInicRsp->numAddressEntries ); + + if ( ( pControl->majVer > INIC_MAJORVERSION ) || + (( pControl->majVer == INIC_MAJORVERSION ) && + ( pControl->minVer > INIC_MINORVERSION )) ) + { + VNIC_TRACE_EXIT( VNIC_DBG_CTRL | VNIC_DBG_ERROR, + ("IOC %d: Unsupported version\n", + pControl->p_viport->ioc_num ) ); + goto failure; + } + + if ( numDataPaths != 1 ) + { + VNIC_TRACE_EXIT( VNIC_DBG_CTRL | VNIC_DBG_ERROR, + ("IOC %d: EIOC returned too many datapaths\n", + pControl->p_viport->ioc_num ) ); + goto failure; + } + + if ( *pNumAddrs > p_conf->maxAddressEntries ) + { + VNIC_TRACE_EXIT( VNIC_DBG_CTRL | VNIC_DBG_ERROR, + ("IOC %d: EIOC returned more Address entries than requested\n", + pControl->p_viport->ioc_num) ); + goto failure; + } + if ( *pNumAddrs < p_conf->minAddressEntries ) + { + VNIC_TRACE_EXIT( VNIC_DBG_CTRL | VNIC_DBG_ERROR, + ("IOC %d: Not enough address entries\n", + pControl->p_viport->ioc_num ) ); + goto failure; + } + if ( numLanSwitches < 1 ) + { + VNIC_TRACE_EXIT( VNIC_DBG_CTRL | VNIC_DBG_ERROR, + ("IOC %d: EIOC returned no lan switches\n", + pControl->p_viport->ioc_num ) ); + goto failure; + } + if ( numLanSwitches > 1 ) + { + VNIC_TRACE_EXIT( VNIC_DBG_CTRL | VNIC_DBG_ERROR, + ("IOC %d: EIOC returned multiple lan switches\n", + pControl->p_viport->ioc_num ) ); + goto failure; + } + + pControl->lanSwitch.lanSwitchNum = + pInitInicRsp->lanSwitch[0].lanSwitchNum ; + pControl->lanSwitch.numEnetPorts = + pInitInicRsp->lanSwitch[0].numEnetPorts ; + pControl->lanSwitch.defaultVlan = + ntoh16( pInitInicRsp->lanSwitch[0].defaultVlan ); + *pVlan = pControl->lanSwitch.defaultVlan; + cl_memcpy( pControl->lanSwitch.hwMacAddress, + pInitInicRsp->lanSwitch[0].hwMacAddress, MAC_ADDR_LEN ); + cl_memcpy( pMacAddress, + pInitInicRsp->lanSwitch[0].hwMacAddress, MAC_ADDR_LEN); + + control_recv( pControl, pRecvIo ); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return TRUE; +failure: + viport_failure( pControl->p_viport ); + return FALSE; +} + + +ib_api_status_t +control_configDataPathReq( + IN Control_t *pControl, + IN uint64_t pathId, + IN Inic_RecvPoolConfig_t *pHost, + IN Inic_RecvPoolConfig_t *pEioc ) +{ + Inic_ControlPacket_t *pPkt; + Inic_CmdConfigDataPath_t *pConfigDataPath; + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + if( pControl->p_viport->errored ) + { + return IB_ERROR; + } + control_initHdr( pControl, CMD_CONFIG_DATA_PATH ); + + pPkt = control_packet( &pControl->sendIo ); + pConfigDataPath = &pPkt->cmd.configDataPathReq; + NdisZeroMemory( pConfigDataPath, sizeof( Inic_CmdConfigDataPath_t ) ); + pConfigDataPath->dataPath = 0; + pConfigDataPath->pathIdentifier = pathId; + copyRecvPoolConfig( pHost, &pConfigDataPath->hostRecvPoolConfig ); + copyRecvPoolConfig( pEioc, &pConfigDataPath->eiocRecvPoolConfig ); + + ib_status = control_send( pControl ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + + +BOOLEAN +control_configDataPathRsp( + IN Control_t *pControl, + IN Inic_RecvPoolConfig_t *pHost, + IN Inic_RecvPoolConfig_t *pEioc, + IN Inic_RecvPoolConfig_t *pMaxHost, + IN Inic_RecvPoolConfig_t *pMaxEioc, + IN Inic_RecvPoolConfig_t *pMinHost, + IN Inic_RecvPoolConfig_t *pMinEioc ) +{ + RecvIo_t *pRecvIo; + Inic_ControlPacket_t *pPkt; + Inic_CmdConfigDataPath_t *pConfigDataPath; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pRecvIo = control_getRsp( pControl ); + if ( !pRecvIo ) + return FALSE; + + pPkt = control_packet( pRecvIo ); + + if ( pPkt->hdr.pktCmd != CMD_CONFIG_DATA_PATH ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Sent control request:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( control_lastReq( pControl ) ); + + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Received control response:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( pPkt ); + goto failure; + } + + pConfigDataPath = &pPkt->cmd.configDataPathRsp; + + if ( pConfigDataPath->dataPath != 0 ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Received CMD_CONFIG_DATA_PATH response for wrong data path: %u\n", + pControl->p_viport->ioc_num , pConfigDataPath->dataPath) ); + goto failure; + } + + if ( !checkRecvPoolConfig(&pConfigDataPath->hostRecvPoolConfig, + pHost, pMaxHost, pMinHost ) + || !checkRecvPoolConfig(&pConfigDataPath->eiocRecvPoolConfig, + pEioc, pMaxEioc, pMinEioc)) + { + goto failure; + } + + control_recv( pControl, pRecvIo ); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return TRUE; +failure: + viport_failure( pControl->p_viport ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return FALSE; +} + + +ib_api_status_t +control_exchangePoolsReq( + IN Control_t *pControl, + IN uint64_t addr, + IN uint32_t rkey ) +{ + Inic_CmdExchangePools_t *pExchangePools; + Inic_ControlPacket_t *pPkt; + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + if( pControl->p_viport->errored ) + { + return IB_ERROR; + } + control_initHdr(pControl, CMD_EXCHANGE_POOLS ); + + pPkt = control_packet( &pControl->sendIo ); + pExchangePools = &pPkt->cmd.exchangePoolsReq; + NdisZeroMemory( pExchangePools, sizeof( Inic_CmdExchangePools_t ) ); + pExchangePools->dataPath = 0; + pExchangePools->poolRKey = rkey; + pExchangePools->poolAddr = hton64( addr ); + + ib_status = control_send( pControl ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + + +BOOLEAN +control_exchangePoolsRsp( + IN Control_t *pControl, + IN OUT uint64_t *pAddr, + IN OUT uint32_t *pRkey ) +{ + RecvIo_t *pRecvIo; + Inic_ControlPacket_t *pPkt; + Inic_CmdExchangePools_t *pExchangePools; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pRecvIo = control_getRsp( pControl ); + if ( !pRecvIo ) + return FALSE; + + pPkt = control_packet( pRecvIo ); + + if ( pPkt->hdr.pktCmd != CMD_EXCHANGE_POOLS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Sent control request:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( control_lastReq(pControl ) ); + + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Received control response:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( pPkt ); + goto failure; + } + + pExchangePools = &pPkt->cmd.exchangePoolsRsp; + *pRkey = pExchangePools->poolRKey; + *pAddr = ntoh64( pExchangePools->poolAddr ); + + if ( hton32( pExchangePools->dataPath ) != 0 ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("IOC %d: Received CMD_EXCHANGE_POOLS response for wrong data path: %u\n", + pControl->p_viport->ioc_num , pExchangePools->dataPath ) ); + goto failure; + } + + control_recv( pControl, pRecvIo ); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return TRUE; + +failure: + viport_failure( pControl->p_viport ); + return FALSE; +} + + +ib_api_status_t +control_configLinkReq( + IN Control_t *pControl, + IN uint8_t flags, + IN uint16_t mtu ) +{ + Inic_CmdConfigLink_t *pConfigLinkReq; + Inic_ControlPacket_t *pPkt; + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + if( pControl->p_viport->errored ) + { + return IB_ERROR; + } + control_initHdr( pControl, CMD_CONFIG_LINK ); + + pPkt = control_packet( &pControl->sendIo ); + pConfigLinkReq = &pPkt->cmd.configLinkReq; + NdisZeroMemory( pConfigLinkReq, sizeof( Inic_CmdConfigLink_t ) ); + pConfigLinkReq->lanSwitchNum = pControl->lanSwitch.lanSwitchNum; + pConfigLinkReq->cmdFlags = INIC_FLAG_SET_MTU; + + if ( flags & INIC_FLAG_ENABLE_NIC ) + { + pConfigLinkReq->cmdFlags |= INIC_FLAG_ENABLE_NIC; + } + else + { + pConfigLinkReq->cmdFlags |= INIC_FLAG_DISABLE_NIC; + } + + if (flags & INIC_FLAG_ENABLE_MCAST_ALL ) + { + pConfigLinkReq->cmdFlags |= INIC_FLAG_ENABLE_MCAST_ALL; + } + else + { + pConfigLinkReq->cmdFlags |= INIC_FLAG_DISABLE_MCAST_ALL; + } + if (flags & INIC_FLAG_ENABLE_PROMISC ) + { + pConfigLinkReq->cmdFlags |= INIC_FLAG_ENABLE_PROMISC; + /* The EIOU doesn't really do PROMISC mode. + * If PROMISC is set, it only receives unicast packets + * I also have to set MCAST_ALL if I want real + * PROMISC mode. + */ + pConfigLinkReq->cmdFlags &= ~INIC_FLAG_DISABLE_MCAST_ALL; + pConfigLinkReq->cmdFlags |= INIC_FLAG_ENABLE_MCAST_ALL; + } + else + { + pConfigLinkReq->cmdFlags |= INIC_FLAG_DISABLE_PROMISC; + } + pConfigLinkReq->mtuSize = hton16( mtu ); + + ib_status = control_send( pControl ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + + +BOOLEAN +control_configLinkRsp( + IN Control_t *pControl, + IN OUT uint8_t *pFlags, + IN OUT uint16_t *pMtu ) +{ + RecvIo_t *pRecvIo; + Inic_ControlPacket_t *pPkt; + Inic_CmdConfigLink_t *pConfigLinkRsp; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pRecvIo = control_getRsp( pControl ); + if ( !pRecvIo ) + return FALSE; + + pPkt = control_packet( pRecvIo ); + if ( pPkt->hdr.pktCmd != CMD_CONFIG_LINK ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Sent control request:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( control_lastReq( pControl ) ); + + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Received control response:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( pPkt ); + goto failure; + } + + pConfigLinkRsp = &pPkt->cmd.configLinkRsp; + + *pFlags = pConfigLinkRsp->cmdFlags; + *pMtu = ntoh16( pConfigLinkRsp->mtuSize ); + + control_recv( pControl, pRecvIo ); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return TRUE; + +failure: + viport_failure( pControl->p_viport ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return FALSE; +} + +/* control_configAddrsReq: + * Return values: + * -1: failure + * 0: incomplete (successful operation, but more address + * table entries to be updated) + * 1: complete + */ +ib_api_status_t +control_configAddrsReq( + IN Control_t *pControl, + IN Inic_AddressOp_t *pAddrs, + IN uint16_t num, + OUT int32_t *pAddrQueryDone ) +{ + Inic_CmdConfigAddresses_t *pConfigAddrsReq; + Inic_ControlPacket_t *pPkt; + uint16_t i; + uint8_t j; + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + if( pControl->p_viport->errored ) + { + return IB_ERROR; + } + control_initHdr( pControl, CMD_CONFIG_ADDRESSES ); + + pPkt = control_packet( &pControl->sendIo ); + pConfigAddrsReq = &pPkt->cmd.configAddressesReq; + NdisZeroMemory( pConfigAddrsReq, sizeof( Inic_CmdConfigAddresses_t ) ); + pConfigAddrsReq->lanSwitchNum = pControl->lanSwitch.lanSwitchNum; + + for ( i=0, j = 0; ( i < num ) && ( j < 16 ); i++ ) + { + if ( !pAddrs[i].operation ) + continue; + + pConfigAddrsReq->listAddressOps[j].index = hton16(i); + pConfigAddrsReq->listAddressOps[j].operation = INIC_OP_SET_ENTRY; + pConfigAddrsReq->listAddressOps[j].valid = pAddrs[i].valid; + + cl_memcpy( pConfigAddrsReq->listAddressOps[j].address, + pAddrs[i].address, MAC_ADDR_LEN ); + pConfigAddrsReq->listAddressOps[j].vlan = hton16( pAddrs[i].vlan ); + pAddrs[i].operation = 0; + j++; + } + pConfigAddrsReq->numAddressOps = j; + + for ( ; i < num; i++ ) + { + if ( pAddrs[i].operation ) + break; + } + *pAddrQueryDone = (i == num); + + ib_status = control_send( pControl ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + + +BOOLEAN +control_configAddrsRsp( + IN Control_t *pControl ) +{ + RecvIo_t *pRecvIo; + Inic_ControlPacket_t *pPkt; + Inic_CmdConfigAddresses_t *pConfigAddrsRsp; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pRecvIo = control_getRsp( pControl ); + if ( !pRecvIo ) + return FALSE; + + pPkt = control_packet( pRecvIo ); + if ( pPkt->hdr.pktCmd != CMD_CONFIG_ADDRESSES ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Sent control request:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( control_lastReq( pControl ) ); + + VNIC_TRACE_EXIT(VNIC_DBG_ERROR, + ("IOC %d: Received control response:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( pPkt ); + + goto failure; + } + + pConfigAddrsRsp = &pPkt->cmd.configAddressesRsp; + + control_recv( pControl, pRecvIo ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return TRUE; + +failure: + viport_failure( pControl->p_viport ); + return FALSE; +} + + +ib_api_status_t +control_reportStatisticsReq( + IN Control_t *pControl ) +{ + Inic_ControlPacket_t *pPkt; + Inic_CmdReportStatisticsReq_t *pReportStatisticsReq; + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + control_initHdr( pControl, CMD_REPORT_STATISTICS ); + + pPkt = control_packet( &pControl->sendIo ); + pReportStatisticsReq = &pPkt->cmd.reportStatisticsReq; + pReportStatisticsReq->lanSwitchNum = pControl->lanSwitch.lanSwitchNum; + + ib_status = control_send( pControl ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + + +BOOLEAN +control_reportStatisticsRsp( + IN Control_t *pControl, + IN Inic_CmdReportStatisticsRsp_t *pStats ) +{ + RecvIo_t *pRecvIo; + Inic_ControlPacket_t *pPkt; + Inic_CmdReportStatisticsRsp_t *pRepStatRsp; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pRecvIo = control_getRsp( pControl ); + if (!pRecvIo) + return FALSE; + + pPkt = control_packet( pRecvIo ); + if ( pPkt->hdr.pktCmd != CMD_REPORT_STATISTICS ) + { + VNIC_TRACE(VNIC_DBG_ERROR, + ("IOC %d: Sent control request:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( control_lastReq( pControl ) ); + + VNIC_TRACE_EXIT(VNIC_DBG_ERROR, + ("IOC %d: Received control response:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( pPkt ); + + goto failure; + } + + pRepStatRsp = &pPkt->cmd.reportStatisticsRsp; + pStats->ifInBroadcastPkts = ntoh64(pRepStatRsp->ifInBroadcastPkts); + pStats->ifInMulticastPkts = ntoh64(pRepStatRsp->ifInMulticastPkts); + pStats->ifInOctets = ntoh64(pRepStatRsp->ifInOctets); + pStats->ifInUcastPkts = ntoh64(pRepStatRsp->ifInUcastPkts); + pStats->ifInNUcastPkts = ntoh64(pRepStatRsp->ifInNUcastPkts); + pStats->ifInUnderrun = ntoh64(pRepStatRsp->ifInUnderrun); + pStats->ifInErrors = ntoh64(pRepStatRsp->ifInErrors); + pStats->ifOutErrors = ntoh64(pRepStatRsp->ifOutErrors); + pStats->ifOutOctets = ntoh64(pRepStatRsp->ifOutOctets); + pStats->ifOutUcastPkts = ntoh64(pRepStatRsp->ifOutUcastPkts); + pStats->ifOutMulticastPkts = ntoh64(pRepStatRsp->ifOutMulticastPkts); + pStats->ifOutBroadcastPkts = ntoh64(pRepStatRsp->ifOutBroadcastPkts); + pStats->ifOutNUcastPkts = ntoh64(pRepStatRsp->ifOutNUcastPkts); + pStats->ifOutOk = ntoh64(pRepStatRsp->ifOutOk); + pStats->ifInOk = ntoh64(pRepStatRsp->ifInOk); + pStats->ifOutUcastBytes = ntoh64(pRepStatRsp->ifOutUcastBytes); + pStats->ifOutMulticastBytes = ntoh64(pRepStatRsp->ifOutMulticastBytes); + pStats->ifOutBroadcastBytes = ntoh64(pRepStatRsp->ifOutBroadcastBytes); + pStats->ifInUcastBytes = ntoh64(pRepStatRsp->ifInUcastBytes); + pStats->ifInMulticastBytes = ntoh64(pRepStatRsp->ifInMulticastBytes); + pStats->ifInBroadcastBytes = ntoh64(pRepStatRsp->ifInBroadcastBytes); + pStats->ethernetStatus = ntoh64(pRepStatRsp->ethernetStatus); + + control_recv(pControl, pRecvIo); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return TRUE; +failure: + viport_failure( pControl->p_viport ); + return FALSE; +} + + +ib_api_status_t +control_resetReq( + IN Control_t *pControl ) +{ + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + control_initHdr( pControl, CMD_RESET ); + ib_status = control_send( pControl ); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + +BOOLEAN +control_resetRsp( + IN Control_t *pControl ) +{ + RecvIo_t *pRecvIo; + Inic_ControlPacket_t *pPkt; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pRecvIo = control_getRsp( pControl ); + if ( !pRecvIo ) return FALSE; + + pPkt = control_packet( pRecvIo ); + + if ( pPkt->hdr.pktCmd != CMD_RESET ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Sent control request:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( control_lastReq( pControl ) ); + + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Received control response:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( pPkt ); + + goto failure; + } + + control_recv( pControl, pRecvIo ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return TRUE; +failure: + viport_failure( pControl->p_viport ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return FALSE; +} + + +ib_api_status_t +control_heartbeatReq( + IN Control_t *pControl, + IN uint32_t hbInterval ) +{ + Inic_ControlPacket_t *pPkt; + Inic_CmdHeartbeat_t *pHeartbeatReq; + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_CTRL ); + if( pControl->p_viport->errored ) + { + return IB_ERROR; + } + control_initHdr(pControl, CMD_HEARTBEAT); + + pPkt = control_packet(&pControl->sendIo); + pHeartbeatReq = &pPkt->cmd.heartbeatReq; + + /* pass timeout for the target in microseconds */ + pHeartbeatReq->hbInterval = hton32( hbInterval*1000 ); + + ib_status = control_send( pControl ); + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + +BOOLEAN +control_heartbeatRsp( + IN Control_t *pControl ) +{ + RecvIo_t *pRecvIo; + Inic_ControlPacket_t *pPkt; + Inic_CmdHeartbeat_t *pHeartbeatRsp; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pRecvIo = control_getRsp( pControl ); + + if (!pRecvIo) + return FALSE; + + pPkt = control_packet(pRecvIo); + + if ( pPkt->hdr.pktCmd != CMD_HEARTBEAT ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Sent control request:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( control_lastReq(pControl) ); + + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Received control response:\n", + pControl->p_viport->ioc_num ) ); + + __control_logControlPacket( pPkt ); + goto failure; + } + + pHeartbeatRsp = &pPkt->cmd.heartbeatRsp; + + control_recv( pControl, pRecvIo ); + VNIC_EXIT ( VNIC_DBG_CTRL ); + return TRUE; + +failure: + viport_failure( pControl->p_viport ); + VNIC_EXIT ( VNIC_DBG_CTRL ); + return FALSE; +} + +static void +control_recv( + IN Control_t *pControl, + IN RecvIo_t *pRecvIo ) +{ + VNIC_ENTER( VNIC_DBG_CTRL ); + + if ( ibqp_postRecv( &pControl->qp, &pRecvIo->io ) != IB_SUCCESS ) + viport_failure( pControl->p_viport ); + + VNIC_EXIT ( VNIC_DBG_CTRL ); +} + + +static void +control_recvComplete( + IN Io_t *pIo ) +{ + RecvIo_t *pRecvIo = (RecvIo_t *)pIo; + RecvIo_t *pLastRecvIo; + BOOLEAN status = FALSE; + Control_t *pControl = &pIo->pViport->control; + viport_t *p_viport = pIo->pViport; + Inic_ControlPacket_t *pPkt = control_packet(pRecvIo); + Inic_ControlHeader_t *pCHdr = &pPkt->hdr; + + if( p_viport->errored ) + { + return; + } + switch ( pCHdr->pktType ) + { + case TYPE_INFO: + pLastRecvIo = InterlockedExchangePointer( &pControl->pInfo, pRecvIo ); + + control_processAsync( pControl ); + + if ( pLastRecvIo ) + { + control_recv( pControl, pLastRecvIo ); + } + return; + + case TYPE_RSP: + break; + + default: + //TODO: Should we ever reach this? Who processes the list entries? + ASSERT( pCHdr->pktType == TYPE_INFO || pCHdr->pktType == TYPE_RSP ); + ExInterlockedInsertTailList( &pRecvIo->io.listPtrs, + &pControl->failureList, + &pControl->ioLock ); + return; + } + + if( (pCHdr->pktSeqNum != pControl->seqNum) || + !InterlockedExchange( (volatile LONG*)&pControl->rspExpected, FALSE ) ) + { + return; + } + + InterlockedExchangePointer( &pControl->pResponse, pRecvIo ); + + switch ( pCHdr->pktCmd ) + { + case CMD_INIT_INIC: + status = control_initInicRsp( pControl, + &p_viport->featuresSupported, + p_viport->hwMacAddress, + &p_viport->numMacAddresses, + &p_viport->defaultVlan ); + if( status ) + { + InterlockedExchange( + (volatile LONG*)&p_viport->linkState, + (LONG)LINK_INITINICRSP ); + } + InterlockedOr( &p_viport->updates, SYNC_QUERY ); + break; + + case CMD_CONFIG_DATA_PATH: + status = control_configDataPathRsp( pControl, + data_hostPool( &p_viport->data ), + data_eiocPool( &p_viport->data ), + data_hostPoolMax( &p_viport->data ), + data_eiocPoolMax( &p_viport->data ), + data_hostPoolMin( &p_viport->data ), + data_eiocPoolMin( &p_viport->data )); + if( status ) + { + InterlockedExchange( + (volatile LONG*)&p_viport->linkState, + (LONG)LINK_CONFIGDATAPATHRSP ); + } + InterlockedOr( &p_viport->updates, SYNC_QUERY ); + break; + + case CMD_EXCHANGE_POOLS: + status = control_exchangePoolsRsp( &p_viport->control, + data_remotePoolAddr( &p_viport->data ), + data_remotePoolRkey( &p_viport->data ) ); + if( status ) + { + InterlockedExchange( + (volatile LONG*)&p_viport->linkState, + (LONG)LINK_XCHGPOOLRSP ); + } + InterlockedOr( &p_viport->updates, SYNC_QUERY ); + break; + + /* process other responses */ + case CMD_CONFIG_LINK: + status = control_configLinkRsp( &p_viport->control, + &p_viport->flags, + &p_viport->mtu ); + if( status ) + { + InterlockedExchange( + (volatile LONG*)&p_viport->linkState, + (LONG)LINK_CONFIGLINKRSP ); + + if( p_viport->flags & INIC_FLAG_ENABLE_NIC ) + { + InterlockedExchange( &p_viport->p_netpath->carrier, TRUE ); + /* don't indicate media state yet if in sync query */ + if( !( p_viport->updates & SYNC_QUERY ) ) + { + viport_linkUp( p_viport ); + } + } + else + { + InterlockedExchange( &p_viport->p_netpath->carrier, FALSE ); + viport_linkDown( p_viport ); + } + InterlockedAnd( &p_viport->updates, ~NEED_LINK_CONFIG ); + } + break; + + case CMD_HEARTBEAT: + status = control_heartbeatRsp( pControl ); + if( status && + !p_viport->errored && + !p_viport->disconnect ) + { + InterlockedExchange( + (volatile LONG*)&p_viport->link_hb_state, + (LONG)LINK_HEARTBEATRSP ); + } + // Don't signal any waiting thread or start processing other updates. + return; + + case CMD_CONFIG_ADDRESSES: + status = control_configAddrsRsp( pControl ); + if( status == TRUE ) + { // need more entries to send? + if( p_viport->addrs_query_done == 0 ) + { + if( !p_viport->errored ) + { + if( control_configAddrsReq( pControl, + p_viport->macAddresses, + p_viport->numMacAddresses, + &p_viport->addrs_query_done ) != IB_SUCCESS ) + { + viport_failure( p_viport ); + } + } + // Don't signal any waiting thread or start processing other updates. + return; + } + + InterlockedAnd( &p_viport->updates, ~NEED_ADDRESS_CONFIG ); + InterlockedExchange( (volatile LONG*)&p_viport->linkState, + (LONG)LINK_CONFIGADDRSRSP ); + } + break; + + case CMD_REPORT_STATISTICS: + status = control_reportStatisticsRsp( pControl, &p_viport->stats ); + if ( status ) + { + if( p_viport->stats.ethernetStatus > 0 && + !p_viport->errored ) + { + viport_linkUp( p_viport ); + } + else + { + viport_linkDown( p_viport ); + } + } + InterlockedAnd( &p_viport->updates, ~NEED_STATS ); + break; + + case CMD_RESET: + status = control_resetRsp( pControl ); + if( status ) + { + status = FALSE; + InterlockedExchange( + (volatile LONG*)&p_viport->linkState, + (LONG)LINK_RESETRSP ); + } + break; + + default: + break; + } + + if( _viport_process_query( p_viport, FALSE ) == STATUS_SUCCESS ) + { + /* Complete any pending set OID. */ + vnic_resume_set_oids( p_viport->p_adapter ); + } + + if( InterlockedAnd( &p_viport->updates, ~SYNC_QUERY ) & SYNC_QUERY ) + cl_event_signal( &p_viport->conn_event ); +} + + +static ib_api_status_t +control_send( + IN Control_t *pControl ) +{ + ib_api_status_t ib_status; + Inic_ControlPacket_t *pPkt = control_packet(&pControl->sendIo); + + VNIC_ENTER ( VNIC_DBG_CTRL ); + + //ASSERT( !pControl->reqOutstanding ); + if ( InterlockedCompareExchange( (volatile LONG*)&pControl->reqOutstanding, + TRUE, FALSE ) == TRUE ) + { + /* IF WE HIT THIS WE ARE HOSED!!! + * by the time we detect this error, the send buffer has been + * overwritten, and if we retry we will send garbage data. + */ + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("IB Send never completed\n" ) ); + viport_failure( pControl->p_viport ); + return IB_ERROR; + } + +#ifdef _DEBUG_ + __control_logControlPacket( pPkt ); +#endif + + InterlockedExchange( (volatile LONG*)&pControl->rspExpected, + (LONG)pPkt->hdr.pktCmd ); + + control_timer( pControl, pControl->p_conf->rspTimeout ); + +#ifdef VNIC_STATISTIC + pControl->statistics.requestTime = cl_get_time_stamp(); +#endif /* VNIC_STATISTIC */ + + ib_status = ibqp_postSend( &pControl->qp, &pControl->sendIo.io ); + if( ib_status != IB_SUCCESS ) + { + InterlockedExchange((volatile LONG*)&pControl->reqOutstanding, FALSE ); + + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Failed to post send\n", pControl->p_viport->ioc_num ) ); + viport_failure( pControl->p_viport ); + } + + VNIC_EXIT( VNIC_DBG_CTRL ); + return ib_status; +} + + +static void +control_sendComplete( + IN Io_t *pIo ) +{ + Control_t *pControl = &pIo->pViport->control; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + InterlockedExchange((volatile LONG*)&pControl->reqOutstanding, FALSE ); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return; +} + +static void +control_timeout( + IN void *p_context ) +{ + Control_t *pControl; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + pControl = (Control_t *)p_context; + + InterlockedExchange( (LONG *)&pControl->timerstate, (LONG)TIMER_EXPIRED ); + + InterlockedExchange( (volatile LONG*)&pControl->rspExpected, FALSE ); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return; +} + +static void +control_timer( + IN Control_t *pControl, + IN int timeout ) +{ + VNIC_ENTER( VNIC_DBG_CTRL ); + + InterlockedExchange( (LONG *)&pControl->timerstate, (LONG)TIMER_ACTIVE ); + + cl_timer_start(&pControl->timer, timeout); + + VNIC_EXIT( VNIC_DBG_CTRL ); + return; +} + +static void +control_timerStop( + IN Control_t *pControl ) +{ + VNIC_ENTER( VNIC_DBG_CTRL ); + + + if ( ( InterlockedExchange( (LONG *)&pControl->timerstate, + (LONG)TIMER_IDLE )) == TIMER_ACTIVE ) + { + cl_timer_stop( &pControl->timer ); + } + + VNIC_EXIT( VNIC_DBG_CTRL ); + return; +} + +static void +control_initHdr( + IN Control_t * pControl, + IN uint8_t cmd ) +{ + ControlConfig_t *p_conf; + Inic_ControlPacket_t *pPkt; + Inic_ControlHeader_t *pHdr; + + VNIC_ENTER( VNIC_DBG_CTRL ); + + p_conf = pControl->p_conf; + + pPkt = control_packet( &pControl->sendIo ); + pHdr = &pPkt->hdr; + + pHdr->pktType = TYPE_REQ; + pHdr->pktCmd = cmd; + pHdr->pktSeqNum = ++pControl->seqNum; + pControl->reqRetryCounter = 0; + pHdr->pktRetryCount = 0; + + VNIC_EXIT( VNIC_DBG_CTRL ); +} + +static RecvIo_t* +control_getRsp( + IN Control_t *pControl ) +{ + RecvIo_t *pRecvIo; + + VNIC_ENTER ( VNIC_DBG_CTRL ); + + pRecvIo = InterlockedExchangePointer( &pControl->pResponse, NULL ); + + if ( pRecvIo != NULL ) + { + control_timerStop(pControl); + return pRecvIo; + } + + if ( ( pControl->timerstate = + InterlockedCompareExchange( (LONG *)&pControl->timerstate, + (LONG)TIMER_IDLE, (LONG)TIMER_EXPIRED )) == TIMER_EXPIRED ) + { + Inic_ControlPacket_t *pPkt = control_packet( &pControl->sendIo ); + Inic_ControlHeader_t *pHdr = &pPkt->hdr; + + VNIC_TRACE( VNIC_DBG_CTRL| VNIC_DBG_ERROR, + ("IOC %d: No response received from EIOC\n", + pControl->p_viport->ioc_num ) ); +#ifdef VNIC_STATISTIC + pControl->statistics.timeoutNum++; +#endif /* VNIC_STATISTIC */ + + pControl->reqRetryCounter++; + + if ( pControl->reqRetryCounter >= pControl->p_conf->reqRetryCount ) + { + VNIC_TRACE( VNIC_DBG_CTRL| VNIC_DBG_ERROR, + ("IOC %d: Control packet retry exceeded\n", + pControl->p_viport->ioc_num ) ); + viport_failure(pControl->p_viport ); + } + else + { + pHdr->pktRetryCount = pControl->reqRetryCounter; + control_send( pControl ); + } + } + return NULL; +} + +static void +copyRecvPoolConfig( + IN Inic_RecvPoolConfig_t *pSrc, + IN OUT Inic_RecvPoolConfig_t *pDst ) +{ + + pDst->sizeRecvPoolEntry = hton32(pSrc->sizeRecvPoolEntry); + pDst->numRecvPoolEntries = hton32(pSrc->numRecvPoolEntries); + pDst->timeoutBeforeKick = hton32(pSrc->timeoutBeforeKick); + pDst->numRecvPoolEntriesBeforeKick = hton32(pSrc->numRecvPoolEntriesBeforeKick); + pDst->numRecvPoolBytesBeforeKick = hton32(pSrc->numRecvPoolBytesBeforeKick); + pDst->freeRecvPoolEntriesPerUpdate = hton32(pSrc->freeRecvPoolEntriesPerUpdate); + return; +} + +static BOOLEAN +checkRecvPoolConfigValue( + IN void *pSrc, + IN void *pDst, + IN void *pMax, + IN void *pMin, + IN char *name ) +{ + uint32_t value; + uint32_t *p_src = (uint32_t *)pSrc; + uint32_t *p_dst = (uint32_t *)pDst; + uint32_t *p_min = (uint32_t *)pMin; + uint32_t *p_max = (uint32_t *)pMax; + + UNREFERENCED_PARAMETER( name ); + + value = ntoh32( *p_src ); + + if (value > *p_max ) + { + VNIC_TRACE( VNIC_DBG_CTRL| VNIC_DBG_ERROR, + ("Value %s too large\n", name) ); + return FALSE; + } + else if (value < *p_min ) + { + VNIC_TRACE( VNIC_DBG_CTRL| VNIC_DBG_ERROR, + ("Value %s too small\n", name) ); + return FALSE; + } + + *p_dst = value; + return TRUE; +} + +static BOOLEAN +checkRecvPoolConfig( + IN Inic_RecvPoolConfig_t *pSrc, + IN Inic_RecvPoolConfig_t *pDst, + IN Inic_RecvPoolConfig_t *pMax, + IN Inic_RecvPoolConfig_t *pMin ) +{ + if (!checkRecvPoolConfigValue(&pSrc->sizeRecvPoolEntry, &pDst->sizeRecvPoolEntry, + &pMax->sizeRecvPoolEntry, &pMin->sizeRecvPoolEntry, "sizeRecvPoolEntry") + || !checkRecvPoolConfigValue(&pSrc->numRecvPoolEntries, &pDst->numRecvPoolEntries, + &pMax->numRecvPoolEntries, &pMin->numRecvPoolEntries, "numRecvPoolEntries") + || !checkRecvPoolConfigValue(&pSrc->timeoutBeforeKick, &pDst->timeoutBeforeKick, + &pMax->timeoutBeforeKick, &pMin->timeoutBeforeKick, "timeoutBeforeKick") + || !checkRecvPoolConfigValue(&pSrc->numRecvPoolEntriesBeforeKick, + &pDst->numRecvPoolEntriesBeforeKick, &pMax->numRecvPoolEntriesBeforeKick, + &pMin->numRecvPoolEntriesBeforeKick, "numRecvPoolEntriesBeforeKick") + || !checkRecvPoolConfigValue(&pSrc->numRecvPoolBytesBeforeKick, + &pDst->numRecvPoolBytesBeforeKick, &pMax->numRecvPoolBytesBeforeKick, + &pMin->numRecvPoolBytesBeforeKick, "numRecvPoolBytesBeforeKick") + || !checkRecvPoolConfigValue(&pSrc->freeRecvPoolEntriesPerUpdate, + &pDst->freeRecvPoolEntriesPerUpdate, &pMax->freeRecvPoolEntriesPerUpdate, + &pMin->freeRecvPoolEntriesPerUpdate, "freeRecvPoolEntriesPerUpdate")) + return FALSE; + + if ( !IsPowerOf2( pDst->numRecvPoolEntries ) ) + { + VNIC_TRACE( VNIC_DBG_CTRL| VNIC_DBG_ERROR, + ("numRecvPoolEntries (%d) must be power of 2\n", + pDst->numRecvPoolEntries) ); + return FALSE; + } + if ( !IsPowerOf2( pDst->freeRecvPoolEntriesPerUpdate ) ) + { + VNIC_TRACE( VNIC_DBG_CTRL| VNIC_DBG_ERROR, + ("freeRecvPoolEntriesPerUpdate (%d) must be power of 2\n", + pDst->freeRecvPoolEntriesPerUpdate) ); + return FALSE; + } + if ( pDst->freeRecvPoolEntriesPerUpdate >= pDst->numRecvPoolEntries ) + { + VNIC_TRACE( VNIC_DBG_CTRL| VNIC_DBG_ERROR, + ("freeRecvPoolEntriesPerUpdate (%d) must be less than numRecvPoolEntries (%d)\n", + pDst->freeRecvPoolEntriesPerUpdate, pDst->numRecvPoolEntries) ); + return FALSE; + } + if ( pDst->numRecvPoolEntriesBeforeKick >= pDst->numRecvPoolEntries ) + { + VNIC_TRACE( VNIC_DBG_CTRL| VNIC_DBG_ERROR, + ("numRecvPoolEntriesBeforeKick (%d) must be less than numRecvPoolEntries (%d)\n", + pDst->numRecvPoolEntriesBeforeKick, pDst->numRecvPoolEntries) ); + return FALSE; + } + + return TRUE; +} + +static void +__control_logControlPacket( + IN Inic_ControlPacket_t *pPkt ) +{ + char *type; + int i; + + switch( pPkt->hdr.pktType ) + { + case TYPE_INFO: + type = "TYPE_INFO"; + break; + case TYPE_REQ: + type = "TYPE_REQ"; + break; + case TYPE_RSP: + type = "TYPE_RSP"; + break; + case TYPE_ERR: + type = "TYPE_ERR"; + break; + default: + type = "UNKNOWN"; + } + switch( pPkt->hdr.pktCmd ) + { + case CMD_INIT_INIC: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("ControlPacket: pktType = %s, pktCmd = CMD_INIT_INIC\n", type ) ); + VNIC_PRINT( VNIC_DBG_CTRL, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" inicMajorVersion = %u, inicMinorVersion = %u\n", + ntoh16(pPkt->cmd.initInicReq.inicMajorVersion), + ntoh16(pPkt->cmd.initInicReq.inicMinorVersion)) ); + if (pPkt->hdr.pktType == TYPE_REQ) + { + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" inicInstance = %u, numDataPaths = %u\n", + pPkt->cmd.initInicReq.inicInstance, + pPkt->cmd.initInicReq.numDataPaths) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" numAddressEntries = %u\n", + ntoh16(pPkt->cmd.initInicReq.numAddressEntries)) ); + } + else + { + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" numLanSwitches = %u, numDataPaths = %u\n", + pPkt->cmd.initInicRsp.numLanSwitches, + pPkt->cmd.initInicRsp.numDataPaths) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" numAddressEntries = %u, featuresSupported = %08x\n", + ntoh16(pPkt->cmd.initInicRsp.numAddressEntries), + ntoh32(pPkt->cmd.initInicRsp.featuresSupported)) ); + if (pPkt->cmd.initInicRsp.numLanSwitches != 0) + { + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("lanSwitch[0] lanSwitchNum = %u, numEnetPorts = %08x\n", + pPkt->cmd.initInicRsp.lanSwitch[0].lanSwitchNum, + pPkt->cmd.initInicRsp.lanSwitch[0].numEnetPorts) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" defaultVlan = %u, hwMacAddress = %02x:%02x:%02x:%02x:%02x:%02x\n", + ntoh16(pPkt->cmd.initInicRsp.lanSwitch[0].defaultVlan), + pPkt->cmd.initInicRsp.lanSwitch[0].hwMacAddress[0], + pPkt->cmd.initInicRsp.lanSwitch[0].hwMacAddress[1], + pPkt->cmd.initInicRsp.lanSwitch[0].hwMacAddress[2], + pPkt->cmd.initInicRsp.lanSwitch[0].hwMacAddress[3], + pPkt->cmd.initInicRsp.lanSwitch[0].hwMacAddress[4], + pPkt->cmd.initInicRsp.lanSwitch[0].hwMacAddress[5]) ); + } + } + break; + case CMD_CONFIG_DATA_PATH: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ( "ControlPacket: pktType = %s, pktCmd = CMD_CONFIG_DATA_PATH\n", type) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pathIdentifier = %"PRIx64", dataPath = %u\n", + pPkt->cmd.configDataPathReq.pathIdentifier, + pPkt->cmd.configDataPathReq.dataPath) ); + + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("Host Config sizeRecvPoolEntry = %u, numRecvPoolEntries = %u\n", + ntoh32(pPkt->cmd.configDataPathReq.hostRecvPoolConfig.sizeRecvPoolEntry), + ntoh32(pPkt->cmd.configDataPathReq.hostRecvPoolConfig.numRecvPoolEntries)) ); + + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" timeoutBeforeKick = %u, numRecvPoolEntriesBeforeKick = %u\n", + ntoh32(pPkt->cmd.configDataPathReq.hostRecvPoolConfig.timeoutBeforeKick), + ntoh32(pPkt->cmd.configDataPathReq.hostRecvPoolConfig.numRecvPoolEntriesBeforeKick)) ); + + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" numRecvPoolBytesBeforeKick = %u, freeRecvPoolEntriesPerUpdate = %u\n", + ntoh32(pPkt->cmd.configDataPathReq.hostRecvPoolConfig.numRecvPoolBytesBeforeKick), + ntoh32(pPkt->cmd.configDataPathReq.hostRecvPoolConfig.freeRecvPoolEntriesPerUpdate)) ); + + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("Eioc Config sizeRecvPoolEntry = %u, numRecvPoolEntries = %u\n", + ntoh32(pPkt->cmd.configDataPathReq.eiocRecvPoolConfig.sizeRecvPoolEntry), + ntoh32(pPkt->cmd.configDataPathReq.eiocRecvPoolConfig.numRecvPoolEntries)) ); + + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" timeoutBeforeKick = %u, numRecvPoolEntriesBeforeKick = %u\n", + ntoh32(pPkt->cmd.configDataPathReq.eiocRecvPoolConfig.timeoutBeforeKick), + ntoh32(pPkt->cmd.configDataPathReq.eiocRecvPoolConfig.numRecvPoolEntriesBeforeKick)) ); + + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" numRecvPoolBytesBeforeKick = %u, freeRecvPoolEntriesPerUpdate = %u\n", + ntoh32(pPkt->cmd.configDataPathReq.eiocRecvPoolConfig.numRecvPoolBytesBeforeKick), + ntoh32(pPkt->cmd.configDataPathReq.eiocRecvPoolConfig.freeRecvPoolEntriesPerUpdate)) ); + break; + case CMD_EXCHANGE_POOLS: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("ControlPacket: pktType = %s, pktCmd = CMD_EXCHANGE_POOLS\n", type ) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" datapath = %u\n", + pPkt->cmd.exchangePoolsReq.dataPath) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" poolRKey = %08x poolAddr = %"PRIx64"\n", + ntoh32(pPkt->cmd.exchangePoolsReq.poolRKey), + ntoh64(pPkt->cmd.exchangePoolsReq.poolAddr)) ); + break; + case CMD_CONFIG_ADDRESSES: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ( "ControlPacket: pktType = %s, pktCmd = CMD_CONFIG_ADDRESSES\n", type ) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" numAddressOps = %x, lanSwitchNum = %d\n", + pPkt->cmd.configAddressesReq.numAddressOps, + pPkt->cmd.configAddressesReq.lanSwitchNum) ); + for (i = 0; ( i < pPkt->cmd.configAddressesReq.numAddressOps) && (i < 16); i++) + { + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" listAddressOps[%u].index = %u\n", + i, ntoh16(pPkt->cmd.configAddressesReq.listAddressOps[i].index)) ); + + switch(pPkt->cmd.configAddressesReq.listAddressOps[i].operation) + { + case INIC_OP_GET_ENTRY: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" listAddressOps[%u].operation = INIC_OP_GET_ENTRY\n", i) ); + break; + case INIC_OP_SET_ENTRY: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" listAddressOps[%u].operation = INIC_OP_SET_ENTRY\n", i) ); + break; + default: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" listAddressOps[%u].operation = UNKNOWN(%d)\n", + i, pPkt->cmd.configAddressesReq.listAddressOps[i].operation) ); + break; + } + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" listAddressOps[%u].valid = %u\n", + i, pPkt->cmd.configAddressesReq.listAddressOps[i].valid) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" listAddressOps[%u].address = %02x:%02x:%02x:%02x:%02x:%02x\n", i, + pPkt->cmd.configAddressesReq.listAddressOps[i].address[0], + pPkt->cmd.configAddressesReq.listAddressOps[i].address[1], + pPkt->cmd.configAddressesReq.listAddressOps[i].address[2], + pPkt->cmd.configAddressesReq.listAddressOps[i].address[3], + pPkt->cmd.configAddressesReq.listAddressOps[i].address[4], + pPkt->cmd.configAddressesReq.listAddressOps[i].address[5]) ); + + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" listAddressOps[%u].vlan = %u\n", + i, ntoh16(pPkt->cmd.configAddressesReq.listAddressOps[i].vlan)) ); + } + break; + case CMD_CONFIG_LINK: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("ControlPacket: pktType = %s, pktCmd = CMD_CONFIG_LINK\n", type) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" cmdFlags = %x\n", + pPkt->cmd.configLinkReq.cmdFlags) ); + + if ( pPkt->cmd.configLinkReq.cmdFlags & INIC_FLAG_ENABLE_NIC ) + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" INIC_FLAG_ENABLE_NIC\n") ); + + if ( pPkt->cmd.configLinkReq.cmdFlags & INIC_FLAG_DISABLE_NIC ) + + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" INIC_FLAG_DISABLE_NIC\n") ); + + if ( pPkt->cmd.configLinkReq.cmdFlags & INIC_FLAG_ENABLE_MCAST_ALL ) + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" INIC_FLAG_ENABLE_MCAST_ALL\n") ); + + if ( pPkt->cmd.configLinkReq.cmdFlags & INIC_FLAG_DISABLE_MCAST_ALL ) + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" INIC_FLAG_DISABLE_MCAST_ALL\n") ); + + if ( pPkt->cmd.configLinkReq.cmdFlags & INIC_FLAG_ENABLE_PROMISC ) + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" INIC_FLAG_ENABLE_PROMISC\n") ); + + if ( pPkt->cmd.configLinkReq.cmdFlags & INIC_FLAG_DISABLE_PROMISC ) + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" INIC_FLAG_DISABLE_PROMISC\n") ); + if ( pPkt->cmd.configLinkReq.cmdFlags & INIC_FLAG_SET_MTU ) + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" INIC_FLAG_SET_MTU\n") ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" lanSwitchNum = %x, mtuSize = %d\n", + pPkt->cmd.configLinkReq.lanSwitchNum, + ntoh16(pPkt->cmd.configLinkReq.mtuSize)) ); + if ( pPkt->hdr.pktType == TYPE_RSP ) + { + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" defaultVlan = %u, hwMacAddress = %02x:%02x:%02x:%02x:%02x:%02x\n", + ntoh16(pPkt->cmd.configLinkReq.defaultVlan), + pPkt->cmd.configLinkReq.hwMacAddress[0], + pPkt->cmd.configLinkReq.hwMacAddress[1], + pPkt->cmd.configLinkReq.hwMacAddress[2], + pPkt->cmd.configLinkReq.hwMacAddress[3], + pPkt->cmd.configLinkReq.hwMacAddress[4], + pPkt->cmd.configLinkReq.hwMacAddress[5]) ); + } + break; + case CMD_REPORT_STATISTICS: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("ControlPacket: pktType = %s, pktCmd = CMD_REPORT_STATISTICS\n", type ) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" lanSwitchNum = %u\n", + pPkt->cmd.reportStatisticsReq.lanSwitchNum) ); + + if (pPkt->hdr.pktType == TYPE_REQ) + break; + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInBroadcastPkts = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInBroadcastPkts)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInMulticastPkts = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInMulticastPkts)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInOctets = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInOctets)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInUcastPkts = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInUcastPkts)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInNUcastPkts = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInNUcastPkts)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInUnderrun = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInUnderrun)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInErrors = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInErrors)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutErrors = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutErrors)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutOctets = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutOctets)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutUcastPkts = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutUcastPkts)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutMulticastPkts = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutMulticastPkts)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutBroadcastPkts = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutBroadcastPkts)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutNUcastPkts = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutNUcastPkts)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutOk = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutOk)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInOk = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInOk)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutUcastBytes = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutUcastBytes)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutMulticastBytes = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutMulticastBytes)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifOutBroadcastBytes = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifOutBroadcastBytes)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInUcastBytes = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInUcastBytes)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInMulticastBytes = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInMulticastBytes)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ifInBroadcastBytes = %"PRIu64, + ntoh64(pPkt->cmd.reportStatisticsRsp.ifInBroadcastBytes)) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" ethernetStatus = %"PRIu64"\n", + ntoh64(pPkt->cmd.reportStatisticsRsp.ethernetStatus)) ); + break; + case CMD_CLEAR_STATISTICS: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("ControlPacket: pktType = %s, pktCmd = CMD_CLEAR_STATISTICS\n", type ) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + break; + case CMD_REPORT_STATUS: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("ControlPacket: pktType = %s, pktCmd = CMD_REPORT_STATUS\n", + type) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" lanSwitchNum = %u, isFatal = %u\n", + pPkt->cmd.reportStatus.lanSwitchNum, + pPkt->cmd.reportStatus.isFatal) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" statusNumber = %u, statusInfo = %u\n", + ntoh32(pPkt->cmd.reportStatus.statusNumber), + ntoh32(pPkt->cmd.reportStatus.statusInfo)) ); + pPkt->cmd.reportStatus.fileName[31] = '\0'; + pPkt->cmd.reportStatus.routine[31] = '\0'; + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" filename = %s, routine = %s\n", + pPkt->cmd.reportStatus.fileName, + pPkt->cmd.reportStatus.routine) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" lineNum = %u, errorParameter = %u\n", + ntoh32(pPkt->cmd.reportStatus.lineNum), + ntoh32(pPkt->cmd.reportStatus.errorParameter)) ); + pPkt->cmd.reportStatus.descText[127] = '\0'; + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" descText = %s\n", + pPkt->cmd.reportStatus.descText) ); + break; + case CMD_RESET: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("ControlPacket: pktType = %s, pktCmd = CMD_RESET\n", type ) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + break; + case CMD_HEARTBEAT: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("ControlPacket: pktType = %s, pktCmd = CMD_HEARTBEAT\n", type ) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" hbInterval = %d\n", + ntoh32(pPkt->cmd.heartbeatReq.hbInterval)) ); + break; + default: + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + ("ControlPacket: pktType = %s, pktCmd = UNKNOWN (%u)\n", + type,pPkt->hdr.pktCmd) ); + VNIC_PRINT( VNIC_DBG_CTRL_PKT, + (" pktSeqNum = %u, pktRetryCount = %u\n", + pPkt->hdr.pktSeqNum, + pPkt->hdr.pktRetryCount) ); + break; + } + return; +} diff --git a/branches/Ndi/ulp/inic/kernel/vnic_control.h b/branches/Ndi/ulp/inic/kernel/vnic_control.h new file mode 100644 index 00000000..f6c2ce36 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_control.h @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _VNIC_CONTROL_H_ +#define _VNIC_CONTROL_H_ + +#include "vnic_controlpkt.h" +#include "vnic_util.h" + +typedef enum { + TIMER_IDLE, + TIMER_ACTIVE, + TIMER_EXPIRED +}timerstate_t; + +typedef struct Control { + struct _viport *p_viport; + struct ControlConfig *p_conf; + IbRegion_t region; + IbQp_t qp; + uint8_t *pLocalStorage; + uint16_t majVer; + uint16_t minVer; + Inic_LanSwitchAttributes_t lanSwitch; + SendIo_t sendIo; + RecvIo_t *pRecvIos; + + timerstate_t timerstate; + cl_timer_t timer; + uint8_t reqRetryCounter; + uint8_t seqNum; + uint32_t reqOutstanding; + uint32_t rspExpected; + RecvIo_t *pResponse; + RecvIo_t *pInfo; + LIST_ENTRY failureList; + KSPIN_LOCK ioLock; + +#ifdef VNIC_STATISTIC + struct { + uint64_t requestTime; /* Intermediate value */ + uint64_t responseTime; + uint32_t responseNum; + uint64_t responseMax; + uint64_t responseMin; + uint32_t timeoutNum; + } statistics; +#endif /* VNIC_STATISTIC */ +} Control_t; + +void +control_construct( + IN Control_t *pControl, + IN struct _viport *pViport ); + +ib_api_status_t control_init(Control_t *pControl, struct _viport *pViport, + struct ControlConfig *p_conf, uint64_t guid); +void control_cleanup(Control_t *pControl); +void control_processAsync(Control_t *pControl); +ib_api_status_t control_initInicReq(Control_t *pControl); +BOOLEAN control_initInicRsp(Control_t *pControl, uint32_t *pFeatures, + uint8_t *pMacAddress, uint16_t *pNumAddrs, uint16_t *pVlan); +ib_api_status_t control_configDataPathReq(Control_t *pControl, uint64_t pathId, + struct Inic_RecvPoolConfig *pHost, + struct Inic_RecvPoolConfig *pEioc); +BOOLEAN control_configDataPathRsp(Control_t *pControl, + struct Inic_RecvPoolConfig *pHost, + struct Inic_RecvPoolConfig *pEioc, + struct Inic_RecvPoolConfig *pMaxHost, + struct Inic_RecvPoolConfig *pMaxEioc, + struct Inic_RecvPoolConfig *pMinHost, + struct Inic_RecvPoolConfig *pMinEioc); +ib_api_status_t control_exchangePoolsReq(Control_t *pControl, uint64_t addr, uint32_t rkey); +BOOLEAN control_exchangePoolsRsp(Control_t *pControl, uint64_t *pAddr, + uint32_t *pRkey); +ib_api_status_t control_configLinkReq(Control_t *pControl, uint8_t flags, uint16_t mtu); +BOOLEAN control_configLinkRsp(Control_t *pControl, uint8_t *pFlags, uint16_t *pMtu); +ib_api_status_t control_configAddrsReq(Control_t *pControl, Inic_AddressOp_t *pAddrs, + uint16_t num, int32_t *pAddrQueryDone); +BOOLEAN control_configAddrsRsp(Control_t *pControl); +ib_api_status_t control_reportStatisticsReq(Control_t *pControl); +BOOLEAN control_reportStatisticsRsp(Control_t *pControl, + struct Inic_CmdReportStatisticsRsp *pStats); +ib_api_status_t control_resetReq( Control_t *pControl ); +BOOLEAN control_resetRsp(Control_t *pControl); +ib_api_status_t control_heartbeatReq(Control_t *pControl, uint32_t hbInterval); +BOOLEAN control_heartbeatRsp(Control_t *pControl); + +#define control_packet(pIo) (Inic_ControlPacket_t *)(LONG_PTR)((pIo)->dsList.vaddr ) +#define control_lastReq(pControl) control_packet(&(pControl)->sendIo) +#define control_features(pControl) (pControl)->featuresSupported +#define control_getMacAddress(pControl,addr) \ + memcpy(addr,(pControl)->lanSwitch.hwMacAddress,MAC_ADDR_LEN) + +#endif /* _VNIC_CONTROL_H_ */ diff --git a/branches/Ndi/ulp/inic/kernel/vnic_controlpkt.h b/branches/Ndi/ulp/inic/kernel/vnic_controlpkt.h new file mode 100644 index 00000000..2c8b4306 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_controlpkt.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _VNIC_CONTROLPKT_H_ +#define _VNIC_CONTROLPKT_H_ + +#include +#define MAX_HOST_NAME_SIZE 64 + +typedef struct Inic_ConnectionData { + uint64_t pathId; + uint8_t inicInstance; + uint8_t pathNum; + uint8_t nodename[MAX_HOST_NAME_SIZE+1]; + uint8_t reserved; + uint32_t featuresSupported; +} Inic_ConnectionData_t; + +typedef struct Inic_ControlHeader { + uint8_t pktType; + uint8_t pktCmd; + uint8_t pktSeqNum; + uint8_t pktRetryCount; + uint32_t reserved; /* for 64-bit alignmnet */ +} Inic_ControlHeader_t; + +/* ptkType values */ +#define TYPE_INFO 0 +#define TYPE_REQ 1 +#define TYPE_RSP 2 +#define TYPE_ERR 3 + +/* ptkCmd values */ +#define CMD_INIT_INIC 1 +#define CMD_CONFIG_DATA_PATH 2 +#define CMD_EXCHANGE_POOLS 3 +#define CMD_CONFIG_ADDRESSES 4 +#define CMD_CONFIG_LINK 5 +#define CMD_REPORT_STATISTICS 6 +#define CMD_CLEAR_STATISTICS 7 +#define CMD_REPORT_STATUS 8 +#define CMD_RESET 9 +#define CMD_HEARTBEAT 10 + +#define MAC_ADDR_LEN HW_ADDR_LEN + +/* pktCmd CMD_INIT_INIC, pktType TYPE_REQ data format */ +typedef struct Inic_CmdInitInicReq { + uint16_t inicMajorVersion; + uint16_t inicMinorVersion; + uint8_t inicInstance; + uint8_t numDataPaths; + uint16_t numAddressEntries; +} Inic_CmdInitInicReq_t; + +/* pktCmd CMD_INIT_INIC, pktType TYPE_RSP subdata format */ +typedef struct Inic_LanSwitchAttributes { + uint8_t lanSwitchNum; + uint8_t numEnetPorts; + uint16_t defaultVlan; + uint8_t hwMacAddress[MAC_ADDR_LEN]; +} Inic_LanSwitchAttributes_t; + +/* pktCmd CMD_INIT_INIC, pktType TYPE_RSP data format */ +typedef struct Inic_CmdInitInicRsp { + uint16_t inicMajorVersion; + uint16_t inicMinorVersion; + uint8_t numLanSwitches; + uint8_t numDataPaths; + uint16_t numAddressEntries; + uint32_t featuresSupported; + Inic_LanSwitchAttributes_t lanSwitch[1]; +} Inic_CmdInitInicRsp_t; + +/* featuresSupported values */ +#define INIC_FEAT_IPV4_HEADERS 0x00000001 +#define INIC_FEAT_IPV6_HEADERS 0x00000002 +#define INIC_FEAT_IPV4_CSUM_RX 0x00000004 +#define INIC_FEAT_IPV4_CSUM_TX 0x00000008 +#define INIC_FEAT_TCP_CSUM_RX 0x00000010 +#define INIC_FEAT_TCP_CSUM_TX 0x00000020 +#define INIC_FEAT_UDP_CSUM_RX 0x00000040 +#define INIC_FEAT_UDP_CSUM_TX 0x00000080 +#define INIC_FEAT_TCP_SEGMENT 0x00000100 +#define INIC_FEAT_IPV4_IPSEC_OFFLOAD 0x00000200 +#define INIC_FEAT_IPV6_IPSEC_OFFLOAD 0x00000400 +#define INIC_FEAT_FCS_PROPAGATE 0x00000800 +#define INIC_FEAT_PF_KICK 0x00001000 +#define INIC_FEAT_PF_FORCE_ROUTE 0x00002000 +#define INIC_FEAT_CHASH_OFFLOAD 0x00004000 +#define INIC_FEAT_RDMA_IMMED 0x00008000 +#define INIC_FEAT_IGNORE_VLAN 0x00010000 + +/* pktCmd CMD_CONFIG_DATA_PATH subdata format */ +typedef struct Inic_RecvPoolConfig { + uint32_t sizeRecvPoolEntry; + uint32_t numRecvPoolEntries; + uint32_t timeoutBeforeKick; + uint32_t numRecvPoolEntriesBeforeKick; + uint32_t numRecvPoolBytesBeforeKick; + uint32_t freeRecvPoolEntriesPerUpdate; +} Inic_RecvPoolConfig_t; + +/* pktCmd CMD_CONFIG_DATA_PATH data format */ +typedef struct Inic_CmdConfigDataPath { + uint64_t pathIdentifier; + uint8_t dataPath; + uint8_t reserved[3]; + Inic_RecvPoolConfig_t hostRecvPoolConfig; + Inic_RecvPoolConfig_t eiocRecvPoolConfig; +} Inic_CmdConfigDataPath_t; + +/* pktCmd CMD_EXCHANGE_POOLS data format */ +typedef struct Inic_CmdExchangePools { + uint8_t dataPath; + uint8_t reserved[3]; + uint32_t poolRKey; + uint64_t poolAddr; +} Inic_CmdExchangePools_t; + +/* pktCmd CMD_CONFIG_ADDRESSES subdata format */ +typedef struct Inic_AddressOp { + uint16_t index; + uint8_t operation; + uint8_t valid; + uint8_t address[6]; + uint16_t vlan; +} Inic_AddressOp_t; + +/* operation values */ +#define INIC_OP_SET_ENTRY 0x01 +#define INIC_OP_GET_ENTRY 0x02 + +/* pktCmd CMD_CONFIG_ADDRESSES data format */ +typedef struct Inic_CmdConfigAddresses { + uint8_t numAddressOps; + uint8_t lanSwitchNum; + Inic_AddressOp_t listAddressOps[1]; +} Inic_CmdConfigAddresses_t; + +/* CMD_CONFIG_LINK data format */ +typedef struct Inic_CmdConfigLink { + uint8_t cmdFlags; + uint8_t lanSwitchNum; + uint16_t mtuSize; + uint16_t defaultVlan; + uint8_t hwMacAddress[6]; +} Inic_CmdConfigLink_t; + +/* cmdFlags values */ +#define INIC_FLAG_ENABLE_NIC 0x01 +#define INIC_FLAG_DISABLE_NIC 0x02 +#define INIC_FLAG_ENABLE_MCAST_ALL 0x04 +#define INIC_FLAG_DISABLE_MCAST_ALL 0x08 +#define INIC_FLAG_ENABLE_PROMISC 0x10 +#define INIC_FLAG_DISABLE_PROMISC 0x20 +#define INIC_FLAG_SET_MTU 0x40 + +/* pktCmd CMD_REPORT_STATISTICS, pktType TYPE_REQ data format */ +typedef struct Inic_CmdReportStatisticsReq { + uint8_t lanSwitchNum; +} Inic_CmdReportStatisticsReq_t; + +/* pktCmd CMD_REPORT_STATISTICS, pktType TYPE_RSP data format */ +typedef struct Inic_CmdReportStatisticsRsp { + uint8_t lanSwitchNum; + uint8_t reserved[7]; /* for 64-bit alignment */ + uint64_t ifInBroadcastPkts; + uint64_t ifInMulticastPkts; + uint64_t ifInOctets; + uint64_t ifInUcastPkts; + uint64_t ifInNUcastPkts; /* ifInBroadcastPkts + ifInMulticastPkts */ + uint64_t ifInUnderrun; /* (OID_GEN_RCV_NO_BUFFER) */ + uint64_t ifInErrors; /* (OID_GEN_RCV_ERROR) */ + uint64_t ifOutErrors; /* (OID_GEN_XMIT_ERROR) */ + uint64_t ifOutOctets; + uint64_t ifOutUcastPkts; + uint64_t ifOutMulticastPkts; + uint64_t ifOutBroadcastPkts; + uint64_t ifOutNUcastPkts; /* ifOutBroadcastPkts + ifOutMulticastPkts */ + uint64_t ifOutOk; /* ifOutNUcastPkts + ifOutUcastPkts (OID_GEN_XMIT_OK)*/ + uint64_t ifInOk; /* ifInNUcastPkts + ifInUcastPkts (OID_GEN_RCV_OK) */ + uint64_t ifOutUcastBytes; /* (OID_GEN_DIRECTED_BYTES_XMT) */ + uint64_t ifOutMulticastBytes; /* (OID_GEN_MULTICAST_BYTES_XMT) */ + uint64_t ifOutBroadcastBytes; /* (OID_GEN_BROADCAST_BYTES_XMT) */ + uint64_t ifInUcastBytes; /* (OID_GEN_DIRECTED_BYTES_RCV) */ + uint64_t ifInMulticastBytes; /* (OID_GEN_MULTICAST_BYTES_RCV) */ + uint64_t ifInBroadcastBytes; /* (OID_GEN_BROADCAST_BYTES_RCV) */ + uint64_t ethernetStatus; /* OID_GEN_MEDIA_CONNECT_STATUS) */ +} Inic_CmdReportStatisticsRsp_t; + +/* pktCmd CMD_CLEAR_STATISTICS data format */ +typedef struct Inic_CmdClearStatistics { + uint8_t lanSwitchNum; +} Inic_CmdClearStatistics_t; + +/* pktCmd CMD_REPORT_STATUS data format */ +typedef struct Inic_CmdReportStatus { + uint8_t lanSwitchNum; + uint8_t isFatal; + uint8_t reserved[2]; /* for 32-bit alignment */ + uint32_t statusNumber; + uint32_t statusInfo; + uint8_t fileName[32]; + uint8_t routine[32]; + uint32_t lineNum; + uint32_t errorParameter; + uint8_t descText[128]; +} Inic_CmdReportStatus_t; + +/* pktCmd CMD_HEARTBEAT data format */ +typedef struct Inic_CmdHeartbeat { + uint32_t hbInterval; +} Inic_CmdHeartbeat_t; + +#define INIC_STATUS_LINK_UP 1 +#define INIC_STATUS_LINK_DOWN 2 +#define INIC_STATUS_ENET_AGGREGATION_CHANGE 3 +#define INIC_STATUS_EIOC_SHUTDOWN 4 +#define INIC_STATUS_CONTROL_ERROR 5 +#define INIC_STATUS_EIOC_ERROR 6 + +#define INIC_MAX_CONTROLPKTSZ 256 +#define INIC_MAX_CONTROLDATASZ \ + (INIC_MAX_CONTROLPKTSZ - sizeof(Inic_ControlHeader_t)) + +typedef struct Inic_ControlPacket { + Inic_ControlHeader_t hdr; + union { + Inic_CmdInitInicReq_t initInicReq; + Inic_CmdInitInicRsp_t initInicRsp; + Inic_CmdConfigDataPath_t configDataPathReq; + Inic_CmdConfigDataPath_t configDataPathRsp; + Inic_CmdExchangePools_t exchangePoolsReq; + Inic_CmdExchangePools_t exchangePoolsRsp; + Inic_CmdConfigAddresses_t configAddressesReq; + Inic_CmdConfigAddresses_t configAddressesRsp; + Inic_CmdConfigLink_t configLinkReq; + Inic_CmdConfigLink_t configLinkRsp; + Inic_CmdReportStatisticsReq_t reportStatisticsReq; + Inic_CmdReportStatisticsRsp_t reportStatisticsRsp; + Inic_CmdClearStatistics_t clearStatisticsReq; + Inic_CmdClearStatistics_t clearStatisticsRsp; + Inic_CmdReportStatus_t reportStatus; + Inic_CmdHeartbeat_t heartbeatReq; + Inic_CmdHeartbeat_t heartbeatRsp; + char cmdData[INIC_MAX_CONTROLDATASZ]; + } cmd; +} Inic_ControlPacket_t; +/* +typedef struct _mac_addr +{ + uint8_t addr[MAC_ADDR_LEN]; +} PACK_SUFFIX mac_addr_t; +*/ +#include + +#endif /* _VNIC_CONTROLPKT_H_ */ diff --git a/branches/Ndi/ulp/inic/kernel/vnic_data.c b/branches/Ndi/ulp/inic/kernel/vnic_data.c new file mode 100644 index 00000000..fc69fba0 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_data.c @@ -0,0 +1,1417 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ +#include +#include "vnic_driver.h" + +static void data_postRecvs(Data_t *pData); +static void _data_receivedKick(Io_t *pIo); +static void _data_xmitComplete(Io_t *pIo); +static void data_sendKickMessage(Data_t *pData); +static void _data_kickTimeoutHandler( void *context ); +static BOOLEAN data_allocXmitBuffer(Data_t *pData, + BufferPoolEntry_t **ppBpe, RdmaIo_t **ppRdmaIo, BOOLEAN *pLast); +static void data_checkXmitBuffers(Data_t *pData); + +static +ib_api_status_t +data_rdmaPacket( + Data_t *pData, + BufferPoolEntry_t *pBpe, + RdmaIo_t *pRdmaIo ); +static +NDIS_PACKET * +_data_recv_to_ndis_pkt( + Data_t *pData, + RdmaDest_t *pRdmaDest ); + +static void +_data_allocBuffers( + Data_t *pData, + BOOLEAN initialAllocation ); + +static void +_data_addFreeBuffer( + Data_t *pData, + int index, + RdmaDest_t *pRdmaDest ); + +static uint32_t +_data_incomingRecv( + Data_t *pData ); + +static void +_data_sendFreeRecvBuffers( + Data_t *pData ); + +static uint8_t +_tx_chksum_flags( + IN NDIS_PACKET* const p_packet ); + + +static void +_get_first_buffer( + IN NDIS_PACKET *p_packet, + IN OUT NDIS_BUFFER **pp_buf_desc, + OUT void **pp_buf, + OUT ULONG *p_packet_sz ); + +static void +_data_return_recv( + IN NDIS_PACKET *p_packet ); + +static void +_data_kickTimer_start( + IN Data_t *pData, + IN uint32_t microseconds ); + +static void +_data_kickTimer_stop( + IN Data_t *pData ); + +#define LOCAL_IO(x) PTR64((x)) + +#define INBOUND_COPY + +#ifdef VNIC_STATISTIC +int64_t recvRef; +#endif /* VNIC_STATISTIC */ + +void +data_construct( + IN Data_t *pData, + IN viport_t *pViport ) +{ + VNIC_ENTER( VNIC_DBG_DATA ); + + RtlZeroMemory( pData, sizeof(*pData) ); + + pData->p_viport = pViport; + pData->p_phy_region = &pViport->p_adapter->ca.region; + InitializeListHead( &pData->recvIos ); + KeInitializeSpinLock ( &pData->recvIosLock ); + KeInitializeSpinLock ( &pData->xmitBufLock ); + cl_timer_construct( &pData->kickTimer ); + + ibqp_construct( &pData->qp, pViport ); + + VNIC_EXIT( VNIC_DBG_DATA ); +} + + +ib_api_status_t +data_init( + IN Data_t *pData, + IN DataConfig_t *p_conf, + IN uint64_t guid ) +{ + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_DATA ); + + ASSERT( pData->p_viport != NULL ); + pData->p_conf = p_conf; + + cl_timer_init( &pData->kickTimer, _data_kickTimeoutHandler, pData ); + + ib_status = ibqp_init(&pData->qp, guid, &p_conf->ibConfig ); + if( ib_status != IB_SUCCESS ) + VNIC_TRACE( VNIC_DBG_ERROR, ("data ibqp_init failed\n") ); + + VNIC_EXIT( VNIC_DBG_DATA ); + return ib_status; +} + + +ib_api_status_t +data_connect( + IN Data_t *pData ) +{ + NDIS_STATUS status; + ib_api_status_t ib_status; + XmitPool_t *pXmitPool = &pData->xmitPool; + RecvPool_t *pRecvPool = &pData->recvPool; + RecvIo_t *pRecvIo; + SendIo_t *pSendIo; + RdmaIo_t *pRdmaIo; + RdmaDest_t *pRdmaDest; + uint8_t *pRegionData; + int sz, regionSz; + unsigned int i, j; + + VNIC_ENTER( VNIC_DBG_DATA ); + + pRecvPool->poolSz = pData->p_conf->hostRecvPoolEntries; + pRecvPool->eiocPoolSz = pData->hostPoolParms.numRecvPoolEntries; + + if ( pRecvPool->poolSz > pRecvPool->eiocPoolSz ) + { + pRecvPool->poolSz = pData->hostPoolParms.numRecvPoolEntries; + } + pRecvPool->szFreeBundle = + pData->hostPoolParms.freeRecvPoolEntriesPerUpdate; + pRecvPool->numFreeBufs = 0; + pRecvPool->numPostedBufs = 0; + pRecvPool->nextFullBuf = 0; + pRecvPool->nextFreeBuf = 0; + pRecvPool->kickOnFree = FALSE; + pRecvPool->bufferSz = pData->hostPoolParms.sizeRecvPoolEntry; + + pXmitPool->bufferSz = pData->eiocPoolParms.sizeRecvPoolEntry; + pXmitPool->poolSz = pData->eiocPoolParms.numRecvPoolEntries; + pXmitPool->notifyCount = 0; + pXmitPool->notifyBundle = pData->p_conf->notifyBundle; + pXmitPool->nextXmitPool = 0; + +#if TRUE // LIMIT_OUTSTANDING_SENDS + pXmitPool->numXmitBufs = pXmitPool->notifyBundle * 2; +#else /* !LIMIT_OUTSTANDING_SENDS */ + pXmitPool->numXmitBufs = pXmitPool->poolSz; +#endif /* LIMIT_OUTSTANDING_SENDS */ + + pXmitPool->nextXmitBuf = 0; + pXmitPool->lastCompBuf = pXmitPool->numXmitBufs - 1; + pXmitPool->kickCount = 0; + pXmitPool->kickByteCount = 0; + pXmitPool->sendKicks = + (BOOLEAN)(( pData->eiocPoolParms.numRecvPoolEntriesBeforeKick != 0 ) + || ( pData->eiocPoolParms.numRecvPoolBytesBeforeKick != 0 )); + pXmitPool->kickBundle = + pData->eiocPoolParms.numRecvPoolEntriesBeforeKick; + pXmitPool->kickByteBundle = + pData->eiocPoolParms.numRecvPoolBytesBeforeKick; + pXmitPool->needBuffers = TRUE; + + sz = sizeof(RdmaDest_t) * pRecvPool->poolSz; + sz += sizeof(RecvIo_t) * pData->p_conf->numRecvs; + sz += sizeof(RdmaIo_t) * pXmitPool->numXmitBufs; + + regionSz = 4 * pData->p_conf->numRecvs; + regionSz += sizeof(BufferPoolEntry_t) * pRecvPool->eiocPoolSz; + regionSz += sizeof(BufferPoolEntry_t) * pXmitPool->poolSz; + sz += regionSz; + + status = NdisAllocateMemoryWithTag( &pData->pLocalStorage, + (UINT)sz, + 'grtS' ); + if ( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Failed allocating %d bytes local storage\n", sz ) ); + ib_status = IB_INSUFFICIENT_MEMORY; + goto err1; + } + + NdisZeroMemory( pData->pLocalStorage, sz ); + pData->localStorageSz = sz; + + pRecvPool->pRecvBufs = (RdmaDest_t *)pData->pLocalStorage; + sz = sizeof(RdmaDest_t) * pRecvPool->poolSz; + pRecvIo = (RecvIo_t *)(pData->pLocalStorage + sz); + sz += sizeof(RecvIo_t) * pData->p_conf->numRecvs; + + pXmitPool->pXmitBufs = (RdmaIo_t *)(pData->pLocalStorage + sz); + sz += sizeof(RdmaIo_t) * pXmitPool->numXmitBufs; + + pRegionData = pData->pLocalStorage + sz; + sz += 4 * pData->p_conf->numRecvs; + + pRecvPool->bufPool = (BufferPoolEntry_t *)(pData->pLocalStorage + sz); + sz += sizeof(BufferPoolEntry_t) * pRecvPool->eiocPoolSz; + pXmitPool->bufPool = (BufferPoolEntry_t *)(pData->pLocalStorage + sz); + + ib_status = ibregion_init( pData->p_viport, &pData->region, + pData->p_viport->p_adapter->ca.hPd, pRegionData, regionSz, + ( IB_AC_LOCAL_WRITE | IB_AC_RDMA_WRITE ) ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, ("ib_region_init failed\n") ); + goto err2; + } + + pRdmaIo = &pData->freeBufsIo; + pRdmaIo->io.pViport = pData->p_viport; + pRdmaIo->io.pRoutine = NULL; + pRdmaIo->io.wrq.p_next = NULL; + pRdmaIo->io.wrq.wr_type = WR_RDMA_WRITE; + pRdmaIo->io.wrq.wr_id = PTR64( pRdmaIo ); + pRdmaIo->io.wrq.num_ds = 1; + pRdmaIo->io.wrq.ds_array = pRdmaIo->dsList; + pRdmaIo->dsList[0].lkey = pData->region.lkey; + pRdmaIo->io.wrq.send_opt = IB_SEND_OPT_SIGNALED; + + pSendIo = &pData->kickIo; + pSendIo->io.pViport = pData->p_viport; + pSendIo->io.pRoutine = NULL; + pSendIo->io.wrq.p_next = NULL; + pSendIo->io.wrq.wr_type = WR_SEND; + pSendIo->io.wrq.wr_id = PTR64( pSendIo ); + pSendIo->io.wrq.num_ds = 1; + pSendIo->io.wrq.ds_array = &pSendIo->dsList; + + pSendIo->io.wrq.send_opt = IB_SEND_OPT_SIGNALED; + + pSendIo->dsList.length = 0; + pSendIo->dsList.vaddr = PTR64( pRegionData ); + pSendIo->dsList.lkey = pData->region.lkey; + + for ( i = 0; i < pData->p_conf->numRecvs; i++ ) + { + pRecvIo[i].io.pViport = pData->p_viport; + pRecvIo[i].io.pRoutine = _data_receivedKick; + pRecvIo[i].io.r_wrq.wr_id = PTR64( &pRecvIo[i].io ); + pRecvIo[i].io.r_wrq.p_next = NULL; + pRecvIo[i].io.r_wrq.num_ds = 1; + pRecvIo[i].io.r_wrq.ds_array = &pRecvIo[i].dsList; + pRecvIo[i].dsList.length = 4; + pRecvIo[i].dsList.vaddr = PTR64( pRegionData ); + pRecvIo[i].dsList.lkey = pData->region.lkey; + + ExInterlockedInsertTailList( &pData->recvIos, &pRecvIo[i].io.listPtrs, &pData->recvIosLock ); + /* Do not need to move pointer since the receive info + * is not read. Note, we could reduce the amount + * of memory allocated and the size of the region. + * pRegionData += 4; + * */ + } + + sz = pRecvPool->poolSz * pRecvPool->bufferSz; + status = NdisAllocateMemoryWithTag(&pData->p_recv_bufs, + sz, 'fubr'); + if( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Allocate recv buffers failed\n")); + ib_status = IB_INSUFFICIENT_MEMORY; + goto err3; + } + NdisZeroMemory( pData->p_recv_bufs, sz ); + + pData->recv_bufs_sz = sz; + + ib_status = ibregion_init( pData->p_viport, &pData->rbuf_region, + pData->p_viport->p_adapter->ca.hPd, pData->p_recv_bufs, sz, + (IB_AC_LOCAL_WRITE | IB_AC_RDMA_WRITE) ); + if( ib_status != IB_SUCCESS ) + { + goto err4; + } + + NdisAllocatePacketPool( &status, + &pData->h_recv_pkt_pool, + pRecvPool->poolSz, + PROTOCOL_RESERVED_SIZE_IN_PACKET ); + + if( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Allocate packet pool failed status %#x\n", status )); + ib_status = IB_INSUFFICIENT_MEMORY; + goto err5; + } + + NdisAllocateBufferPool( + &status, &pData->h_recv_buf_pool, pRecvPool->poolSz ); + + if( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Allocate packet pool failed status %#x\n", status )); + ib_status = IB_INSUFFICIENT_MEMORY; + goto err6; + } + pData->recvPool.recv_pkt_array = + cl_zalloc( sizeof(NDIS_PACKET*)* pRecvPool->poolSz ); + if( !pData->recvPool.recv_pkt_array ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Allocate packet array failed\n" ) ); + ib_status = IB_INSUFFICIENT_MEMORY; + goto err7; + } + + InitializeListHead( &pRecvPool->availRecvBufs ); + + for ( i = 0; i < pRecvPool->poolSz; i++ ) + { + pRdmaDest = &pRecvPool->pRecvBufs[i]; + pRdmaDest->data = pData->p_recv_bufs + (i * pRecvPool->bufferSz ); + pRdmaDest->region = pData->rbuf_region; + InsertTailList( &pRecvPool->availRecvBufs, &pRdmaDest->listPtrs ); + } + + for ( i = 0; i < pXmitPool->numXmitBufs; i++ ) + { + pRdmaIo = &pXmitPool->pXmitBufs[i]; + pRdmaIo->index = (uint16_t)i; + pRdmaIo->io.pViport = pData->p_viport; + pRdmaIo->io.pRoutine = _data_xmitComplete; + pRdmaIo->io.wrq.p_next = NULL; + pRdmaIo->io.wrq.wr_type = WR_RDMA_WRITE; + pRdmaIo->io.wrq.wr_id = PTR64(pRdmaIo); + pRdmaIo->io.wrq.num_ds = MAX_NUM_SGE; // will set actual number when transmit + pRdmaIo->io.wrq.ds_array = pRdmaIo->dsList; + pRdmaIo->p_trailer = (ViportTrailer_t *)&pRdmaIo->data[0]; + for( j = 0; j < MAX_NUM_SGE; j++ ) + { + pRdmaIo->dsList[j].lkey = pData->p_phy_region->lkey; + } + } + + pXmitPool->rdmaRKey = pData->region.rkey; + pXmitPool->rdmaAddr = PTR64( pXmitPool->bufPool ); + + data_postRecvs( pData ); + + ib_status = ibqp_connect( &pData->qp ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, ("ibqp_connect returned %s\n", + pData->p_viport->p_adapter->ifc.get_err_str( ib_status )) ); +err7: + NdisFreeBufferPool( pData->h_recv_buf_pool ); + pData->h_recv_buf_pool = NULL; +err6: + NdisFreePacketPool( pData->h_recv_pkt_pool ); + pData->h_recv_pkt_pool = NULL; +err5: + ibregion_cleanup( pData->p_viport, &pData->rbuf_region ); +err4: + NdisFreeMemory( pData->p_recv_bufs, pData->recv_bufs_sz, 0 ); + pData->p_recv_bufs = NULL; +err3: + ibregion_cleanup(pData->p_viport, &pData->region ); +err2: + NdisFreeMemory( pData->pLocalStorage, pData->localStorageSz, 0 ); + pData->pLocalStorage = NULL; +err1: + pRecvPool->poolSz = 0; + } + + VNIC_EXIT( VNIC_DBG_DATA ); + return ib_status; +} + + +void +data_connected( + IN Data_t *pData ) +{ + VNIC_ENTER( VNIC_DBG_DATA ); + + pData->freeBufsIo.io.wrq.remote_ops.rkey = + pData->recvPool.eiocRdmaRkey; + + _data_allocBuffers(pData, TRUE); + _data_sendFreeRecvBuffers(pData); + pData->connected = TRUE; + + VNIC_EXIT( VNIC_DBG_DATA ); + return; +} + +void +data_disconnect( + IN Data_t *pData ) +{ + RecvPool_t *pRecvPool = &pData->recvPool; + viport_t *p_viport = pData->p_viport; + NDIS_PACKET *p_packet; + LIST_ENTRY *p_list_item; + unsigned int i; + + VNIC_ENTER( VNIC_DBG_DATA ); + + _data_kickTimer_stop ( pData ); + + pData->connected = FALSE; + + ibqp_detach( &pData->qp ); + + ibregion_cleanup( p_viport, &pData->rbuf_region ); + ibregion_cleanup( p_viport, &pData->region ); + + for ( i = 0; i < pRecvPool->poolSz; i++ ) + { + p_packet = pRecvPool->pRecvBufs[i].p_packet; + if ( p_packet != NULL ) + { + pRecvPool->pRecvBufs[i].p_packet = NULL; + _data_return_recv( p_packet ); + } + } + /* clear pending queue if any */ + while( ( p_list_item = NdisInterlockedRemoveHeadList( + &p_viport->send_pending_list, + &p_viport->pending_list_lock )) != NULL ) + { + p_packet = VNIC_PACKET_FROM_LIST_ITEM( p_list_item ); + if ( p_packet ) + { + NDIS_SET_PACKET_STATUS( p_packet, NDIS_STATUS_FAILURE ); + NdisMSendComplete( p_viport->p_adapter->h_handle, + p_packet, NDIS_STATUS_FAILURE ); + p_viport->stats.ifOutErrors++; + } + } + + VNIC_EXIT( VNIC_DBG_DATA ); + return; +} + +BOOLEAN +data_xmitPacket( + IN Data_t *pData, + IN NDIS_PACKET* const p_packet ) +{ + XmitPool_t *p_xmitPool = &pData->xmitPool; + RdmaIo_t *pRdmaIo; + BufferPoolEntry_t *pBpe; + BOOLEAN last; + uint8_t *p_buf; + uint32_t buf_len; + NDIS_BUFFER *p_buf_desc; + eth_hdr_t* p_eth_hdr; + int pad; + SCATTER_GATHER_LIST *p_sgl; + uint32_t i; + PHYSICAL_ADDRESS phy_addr; + + VNIC_ENTER( VNIC_DBG_DATA ); + + if( !data_allocXmitBuffer( pData, &pBpe, &pRdmaIo, &last ) ) + { + return FALSE; + } + p_sgl = NDIS_PER_PACKET_INFO_FROM_PACKET( p_packet, + ScatterGatherListPacketInfo ); + if ( p_sgl == NULL ) + { + return FALSE; + } + + NdisGetFirstBufferFromPacketSafe( p_packet, + &p_buf_desc, + &p_buf, + &buf_len, + &pRdmaIo->packet_sz, + NormalPagePriority ); + if( pRdmaIo->packet_sz > p_xmitPool->bufferSz ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Outbound packet too large, size = %d\n", pRdmaIo->packet_sz ) ); + return FALSE; + } + + if ( p_sgl->NumberOfElements > (ULONG)MAX_NUM_SGE - 1 ) + { + VNIC_TRACE( VNIC_DBG_DATA, + (" Xmit packet exceeded SGE limit - %d\n", + p_sgl->NumberOfElements ) ); + return FALSE; + } + pRdmaIo->p_packet = p_packet; + + for( i=0; i < p_sgl->NumberOfElements; i++ ) + { + pRdmaIo->dsList[i].vaddr = p_sgl->Elements[i].Address.QuadPart; + pRdmaIo->dsList[i].length = p_sgl->Elements[i].Length; + } + + pRdmaIo->len = (uint32_t)ROUNDUPP2( + max(60, pRdmaIo->packet_sz), VIPORT_TRAILER_ALIGNMENT ); + pad = pRdmaIo->len - pRdmaIo->packet_sz; + + p_eth_hdr = (eth_hdr_t *)p_buf; + + pRdmaIo->p_trailer = (ViportTrailer_t *)&pRdmaIo->data[pad]; + cl_memclr( pRdmaIo->data, pad + sizeof( ViportTrailer_t ) ); + cl_memcpy( pRdmaIo->p_trailer->destMacAddr, p_eth_hdr->dst.addr, MAC_ADDR_LEN ); + + pRdmaIo->p_trailer->dataLength = + hton16( (uint16_t)max( 60, pRdmaIo->packet_sz ) ); + + NdisGetNextBuffer( p_buf_desc, &p_buf_desc ); + + /* should handle VLAN tag */ + if( pRdmaIo->packet_sz > 16 ) + { + if( p_eth_hdr->type == hton16(0x8100) ) + { + if( p_sgl->Elements[0].Length > sizeof(eth_hdr_t) ) + { + pRdmaIo->p_trailer->vLan = *(uint16_t *)((uint8_t *)p_eth_hdr + 14 ); + pRdmaIo->p_trailer->pktFlags |= PF_VLAN_INSERT; + } + else + { + if( p_buf_desc ) + { + NdisQueryBufferSafe( p_buf_desc, &p_buf, &buf_len, NormalPagePriority ); + + pad = sizeof(eth_hdr_t) - p_sgl->Elements[0].Length; + pRdmaIo->p_trailer->vLan = *(uint16_t *)(p_buf + pad + 2); + pRdmaIo->p_trailer->pktFlags |= PF_VLAN_INSERT; + } + } + } + /* hash operation seem quite expensive + * should figure out anther way to do this + * meanwhile let's embedded do it for us. + ******************************************** + else if( p_eth_hdr->type == ETH_PROT_TYPE_IP && + !( p_eth_hdr->dst.addr[0] & 0x01 ) ) + { + if( p_buf_desc ) + { + NdisQueryBufferSafe( p_buf_desc, &p_buf, &buf_len, NormalPagePriority ); + + if( ((ip_pkt_t*)p_buf)->hdr.prot == IP_PROT_UDP || + ((ip_pkt_t*)p_buf)->hdr.prot == IP_PROT_TCP ) + { + // use socket src port + dest port to generate hash value + // for link aggregation distribution function. + // + pRdmaIo->p_trailer->connectionHashAndValid = CHV_VALID | + ((uint8_t)ntoh16(((ip_pkt_t*)p_buf)->prot.tcp.src_port ) + + (uint8_t)ntoh16(((ip_pkt_t*)p_buf)->prot.tcp.dst_port )) & CHV_HASH_MASH; + pRdmaIo->p_trailer->pktFlags |= PF_CHASH_VALID; + } + else + pRdmaIo->p_trailer->pktFlags &= ~PF_CHASH_VALID; + } + } + **********************/ + } + + pRdmaIo->p_trailer->txChksumFlags = _tx_chksum_flags( p_packet ); + pRdmaIo->p_trailer->connectionHashAndValid |= CHV_VALID; + + if( last ) + pRdmaIo->p_trailer->pktFlags |= PF_KICK; + + /* fill last data segment with trailer and pad */ + phy_addr = MmGetPhysicalAddress( pRdmaIo->data ); + + pRdmaIo->dsList[p_sgl->NumberOfElements].vaddr = phy_addr.QuadPart; + pRdmaIo->dsList[p_sgl->NumberOfElements].length = pRdmaIo->len - + pRdmaIo->packet_sz + + sizeof( ViportTrailer_t ); + + pRdmaIo->io.wrq.num_ds =p_sgl->NumberOfElements + 1; + + if( data_rdmaPacket( pData, pBpe, pRdmaIo ) != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("RDMA WRITE Failed\n")); + return FALSE; + } + + if( p_xmitPool->sendKicks ) + { + /* EIOC needs kicks to inform it of sent packets */ + + p_xmitPool->kickCount++; + p_xmitPool->kickByteCount += pRdmaIo->packet_sz; + if( ( p_xmitPool->kickCount >= p_xmitPool->kickBundle ) + || ( p_xmitPool->kickByteCount >= p_xmitPool->kickByteBundle ) ) + { + data_sendKickMessage( pData ); + } + else if( p_xmitPool->kickCount == 1 ) + { + _data_kickTimer_start( pData, pData->eiocPoolParms.timeoutBeforeKick ); + } + } + return TRUE; +} +static uint8_t +_tx_chksum_flags( + IN NDIS_PACKET* const p_packet ) + +{ + NDIS_TCP_IP_CHECKSUM_PACKET_INFO *p_packet_info; + ULONG packet_info; + uint8_t txChksumFlags = 0; + + if( NDIS_PROTOCOL_ID_TCP_IP == NDIS_GET_PACKET_PROTOCOL_TYPE(p_packet) ) + { + packet_info = PtrToUlong( NDIS_PER_PACKET_INFO_FROM_PACKET( p_packet, TcpIpChecksumPacketInfo)); + p_packet_info = ( NDIS_TCP_IP_CHECKSUM_PACKET_INFO *)&packet_info; + + if( p_packet_info && + p_packet_info->Transmit.NdisPacketChecksumV4 ) + { + txChksumFlags = TX_CHKSUM_FLAGS_CHECKSUM_V4 + | ( p_packet_info->Transmit.NdisPacketIpChecksum ? TX_CHKSUM_FLAGS_IP_CHECKSUM: 0 ) + | ( p_packet_info->Transmit.NdisPacketTcpChecksum ? TX_CHKSUM_FLAGS_TCP_CHECKSUM: 0 ) + | ( p_packet_info->Transmit.NdisPacketUdpChecksum ? TX_CHKSUM_FLAGS_UDP_CHECKSUM: 0 ); + } + } + + VNIC_TRACE( VNIC_DBG_DATA , + ("txChksumFlags = %d: V4 %c, V6 %c, IP %c, TCP %c, UDP %c\n", + txChksumFlags, + ((txChksumFlags & TX_CHKSUM_FLAGS_CHECKSUM_V4 )? '+': '-'), + ((txChksumFlags & TX_CHKSUM_FLAGS_CHECKSUM_V6 )? '+': '-'), + ((txChksumFlags & TX_CHKSUM_FLAGS_IP_CHECKSUM )? '+': '-'), + ((txChksumFlags & TX_CHKSUM_FLAGS_TCP_CHECKSUM )? '+': '-'), + ((txChksumFlags & TX_CHKSUM_FLAGS_UDP_CHECKSUM )? '+': '-') )); + + return txChksumFlags; +} + +static void +_get_first_buffer( + IN NDIS_PACKET *p_packet, + IN OUT NDIS_BUFFER **pp_buf_desc, + OUT void **pp_buf, + OUT ULONG *p_packet_sz ) +{ + UINT buf_len; + VNIC_ENTER( VNIC_DBG_DATA ); + + NdisGetFirstBufferFromPacketSafe( p_packet, + pp_buf_desc, + pp_buf, + &buf_len, + p_packet_sz, + NormalPagePriority ); + VNIC_EXIT( VNIC_DBG_DATA ); +} + +static void +data_postRecvs( + IN Data_t *pData ) +{ + RecvIo_t *pRecvIo; + LIST_ENTRY *p_list_entry; + ib_api_status_t ib_status; + + VNIC_ENTER ( VNIC_DBG_DATA ); + + while( ( p_list_entry = ExInterlockedRemoveHeadList( &pData->recvIos, + &pData->recvIosLock )) + != NULL ) + { + pRecvIo = (RecvIo_t *)p_list_entry; + + ib_status = ibqp_postRecv( &pData->qp, &pRecvIo->io ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, ("ibqp_postRecv returned %s\n", + pData->p_viport->p_adapter->ifc.get_err_str( ib_status )) ); + viport_failure( pData->p_viport ); + return; + } + } + + VNIC_EXIT( VNIC_DBG_DATA ); + return; +} + +static void +_data_receivedKick( + IN Io_t *pIo ) +{ + Data_t *pData = &pIo->pViport->data; + uint32_t num_pkts = 0; + + VNIC_ENTER( VNIC_DBG_DATA ); + +#ifdef VNIC_STATISTIC + recvRef = cl_get_tick_count(); +#endif /* VNIC_STATISTIC */ + + ExInterlockedInsertTailList( &pData->recvIos, &pIo->listPtrs, &pData->recvIosLock ); + + data_postRecvs( pData ); + +#ifdef VNIC_STATISTIC + pData->statistics.kickRecvs++; +#endif /* VNIC_STATISTIC */ + + data_checkXmitBuffers( pData ); + + num_pkts = _data_incomingRecv( pData ); + + if( num_pkts ) + { + NdisMIndicateReceivePacket( pData->p_viport->p_adapter->h_handle, + pData->recvPool.recv_pkt_array, + num_pkts ); + pData->p_viport->stats.ifInOk += num_pkts; + } + + if( pData->p_viport->data.connected == TRUE && + !pData->p_viport->errored ) + { + _data_allocBuffers( pData, FALSE ); + _data_sendFreeRecvBuffers( pData ); + } + + VNIC_EXIT( VNIC_DBG_DATA ); + return; +} + +static void +_data_xmitComplete( + IN Io_t *pIo ) +{ + RdmaIo_t *pRdmaIo = (RdmaIo_t *)pIo; + Data_t *pData = &pIo->pViport->data; + XmitPool_t *p_xmitPool = &pData->xmitPool; + NDIS_PACKET *p_packet; + NDIS_STATUS ndis_status; + + VNIC_ENTER( VNIC_DBG_DATA ); + + while ( p_xmitPool->lastCompBuf != pRdmaIo->index ) + { + INC(p_xmitPool->lastCompBuf, 1, p_xmitPool->numXmitBufs); + p_packet = p_xmitPool->pXmitBufs[p_xmitPool->lastCompBuf].p_packet; + + p_xmitPool->pXmitBufs[p_xmitPool->lastCompBuf].p_packet = NULL; + + if( p_packet != NULL ) + { + if( pIo->wc_status != IB_WCS_SUCCESS ) + { + ndis_status = NDIS_STATUS_FAILURE; + pIo->pViport->stats.ifOutErrors++; + pIo->wc_status = IB_WCS_SUCCESS; + } + else + { + ndis_status = NDIS_STATUS_SUCCESS; + pIo->pViport->stats.ifOutOk++; + } + NDIS_SET_PACKET_STATUS( p_packet, ndis_status ); + NdisMSendComplete( pIo->pViport->p_adapter->h_handle, + p_packet, ndis_status ); + } + } + + if( !pData->p_viport->errored ) + { + data_checkXmitBuffers( pData ); + } + VNIC_EXIT( VNIC_DBG_DATA ); + return; +} + +static void +data_sendKickMessage( + IN Data_t *pData ) +{ + XmitPool_t *pPool = &pData->xmitPool; + + VNIC_ENTER( VNIC_DBG_DATA ); + + /* stop timer for BundleTimeout */ + _data_kickTimer_stop( pData ); + + pPool->kickCount = 0; + pPool->kickByteCount = 0; + + /* TBD: Keep track of when kick is outstanding, and + * don't reuse until complete + */ + if ( ibqp_postSend( &pData->qp, &pData->kickIo.io ) != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Unable to send kick to EIOC\n") ); + viport_failure( pData->p_viport ); + } + + VNIC_EXIT( VNIC_DBG_DATA ); +} + +static void +_data_kickTimeoutHandler( void * context ) +{ + Data_t* pData = (Data_t *)context; + + VNIC_ENTER( VNIC_DBG_DATA ); + + InterlockedExchange( &pData->kickTimerOn, FALSE ); + data_sendKickMessage( pData ); + + VNIC_EXIT( VNIC_DBG_DATA ); + + return; +} + +static BOOLEAN +data_allocXmitBuffer( + IN Data_t *pData, + OUT BufferPoolEntry_t **ppBpe, + OUT RdmaIo_t **ppRdmaIo, + OUT BOOLEAN *pLast ) +{ + XmitPool_t *p_xmitPool = &pData->xmitPool; + KIRQL flags; + + VNIC_ENTER( VNIC_DBG_DATA ); + + KeAcquireSpinLock( &pData->xmitBufLock, &flags ); + + *pLast = FALSE; + *ppRdmaIo = &p_xmitPool->pXmitBufs[p_xmitPool->nextXmitBuf]; + *ppBpe = &p_xmitPool->bufPool[p_xmitPool->nextXmitPool]; + + if ( (*ppBpe)->valid && p_xmitPool->nextXmitBuf != p_xmitPool->lastCompBuf ) + { + INC(p_xmitPool->nextXmitBuf, 1, p_xmitPool->numXmitBufs); + INC(p_xmitPool->nextXmitPool, 1, p_xmitPool->poolSz); + + if ( !p_xmitPool->bufPool[p_xmitPool->nextXmitPool].valid ) + { + VNIC_TRACE( VNIC_DBG_DATA, + ("Just used the last EIOU receive buffer\n") ); + + *pLast = TRUE; + p_xmitPool->needBuffers = TRUE; + viport_stopXmit( pData->p_viport ); +#ifdef VNIC_STATISTIC + pData->statistics.kickReqs++; +#endif /* VNIC_STATISTIC */ + } + else if ( p_xmitPool->nextXmitBuf == p_xmitPool->lastCompBuf ) + { + VNIC_TRACE( VNIC_DBG_DATA, + ("Just used our last xmit buffer\n") ); + + p_xmitPool->needBuffers = TRUE; + viport_stopXmit( pData->p_viport ); + } + + (*ppBpe)->valid = 0; + + KeReleaseSpinLock( &pData->xmitBufLock, flags ); + return TRUE; + } + else + { +#ifdef VNIC_STATISTIC + pData->statistics.noXmitBufs++; +#endif /* VNIC_STATISTIC */ + + VNIC_TRACE( VNIC_DBG_ERROR, + ("Out of xmit buffers\n") ); + + viport_stopXmit( pData->p_viport ); + + KeReleaseSpinLock( &pData->xmitBufLock, flags ); + return FALSE; + } +} + +static void +data_checkXmitBuffers( + IN Data_t *pData ) +{ + XmitPool_t *p_xmitPool = &pData->xmitPool; + KIRQL flags; + + VNIC_ENTER( VNIC_DBG_DATA ); + + KeAcquireSpinLock( &pData->xmitBufLock, &flags ); + + if ( pData->xmitPool.needBuffers + && p_xmitPool->bufPool[p_xmitPool->nextXmitPool].valid + && p_xmitPool->nextXmitBuf != p_xmitPool->lastCompBuf ) + { + pData->xmitPool.needBuffers = FALSE; + viport_restartXmit( pData->p_viport ); + + VNIC_TRACE( VNIC_DBG_DATA, + ("There are free xmit buffers\n") ); + } + + KeReleaseSpinLock( &pData->xmitBufLock, flags ); + + VNIC_EXIT( VNIC_DBG_DATA ); + return; +} + +static +ib_api_status_t +data_rdmaPacket( + IN Data_t *pData, + IN BufferPoolEntry_t *pBpe, + IN RdmaIo_t *pRdmaIo ) +{ + ib_send_wr_t *pWrq; + uint64_t remote_addr; + + VNIC_ENTER( VNIC_DBG_DATA ); + + pWrq = &pRdmaIo->io.wrq; + + remote_addr = ntoh64( pBpe->remoteAddr ); + remote_addr += pData->xmitPool.bufferSz; + remote_addr -= ( pRdmaIo->len + sizeof(ViportTrailer_t) ); + + pWrq->remote_ops.vaddr = remote_addr; + pWrq->remote_ops.rkey = pBpe->rKey; + + pData->xmitPool.notifyCount++; + + if( pData->xmitPool.notifyCount >= pData->xmitPool.notifyBundle ) + { + pData->xmitPool.notifyCount = 0; + pWrq->send_opt = IB_SEND_OPT_SIGNALED; + } + else + { + pWrq->send_opt &= ~IB_SEND_OPT_SIGNALED; + } + pWrq->send_opt = IB_SEND_OPT_SIGNALED; + + if( pData->p_viport->featuresSupported & INIC_FEAT_RDMA_IMMED ) + { + pWrq->send_opt |= IB_SEND_OPT_IMMEDIATE; + pWrq->immediate_data = 0; + } + + if( ibqp_postSend( &pData->qp, &pRdmaIo->io ) != IB_SUCCESS ) + { + VNIC_TRACE(VNIC_DBG_ERROR, + ("Failed sending data to EIOC\n") ); + return IB_ERROR; + } +#ifdef VNIC_STATISTIC + pData->statistics.xmitNum++; +#endif /* VNIC_STATISTIC */ + + VNIC_EXIT( VNIC_DBG_DATA ); + return IB_SUCCESS; +} + +static NDIS_PACKET * +_data_recv_to_ndis_pkt( + IN Data_t *pData, + IN RdmaDest_t *pRdmaDest ) +{ + struct ViportTrailer *pTrailer; + NDIS_PACKET *p_packet; + NDIS_STATUS ndis_status; + int start; + unsigned int len; + uint8_t rxChksumFlags; + NDIS_TCP_IP_CHECKSUM_PACKET_INFO packet_info; + + VNIC_ENTER( VNIC_DBG_DATA ); + + pTrailer = pRdmaDest->pTrailer; + start = (int)data_offset(pData, pTrailer); + len = data_len(pData, pTrailer); + + NdisAllocatePacket( &ndis_status, + &p_packet, + pData->h_recv_pkt_pool ); + + if ( ndis_status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ( "NdisAllocatePacket failed %#x\n", ndis_status ) ); + return NULL; + } + NdisAllocateBuffer( &ndis_status, + &pRdmaDest->p_buf, + pData->h_recv_buf_pool, + pRdmaDest->data + start, + len ); + + if ( ndis_status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ( "NdisAllocateBuffer failed %#x\n", ndis_status ) ); + NdisFreePacket( p_packet ); + return NULL; + } + + NdisChainBufferAtFront( p_packet, pRdmaDest->p_buf ); + pRdmaDest->p_packet = p_packet; + + if ( pTrailer->pktFlags & PF_VLAN_INSERT && + !( pData->p_viport->featuresSupported & INIC_FEAT_IGNORE_VLAN ) ) + { + /* TODO: + * add OID_GEN_VLAN_ID + * handle VLAN tag insertion + * set packet header size = eth_hdr + 4 + */ + } + + NDIS_SET_PACKET_HEADER_SIZE( p_packet, sizeof(eth_hdr_t) ); + + rxChksumFlags = pTrailer->rxChksumFlags; + + VNIC_TRACE( VNIC_DBG_DATA, + ("rxChksumFlags = %d, LOOP = %c, IP = %c, TCP = %c, UDP = %c\n", + rxChksumFlags, + (rxChksumFlags & RX_CHKSUM_FLAGS_LOOPBACK)? 'Y': 'N', + (rxChksumFlags & RX_CHKSUM_FLAGS_IP_CHECKSUM_SUCCEEDED)? 'Y': + (rxChksumFlags & RX_CHKSUM_FLAGS_IP_CHECKSUM_FAILED)? 'N': '-', + (rxChksumFlags & RX_CHKSUM_FLAGS_TCP_CHECKSUM_SUCCEEDED)? 'Y': + (rxChksumFlags & RX_CHKSUM_FLAGS_TCP_CHECKSUM_FAILED)? 'N': '-', + (rxChksumFlags & RX_CHKSUM_FLAGS_UDP_CHECKSUM_SUCCEEDED)? 'Y': + (rxChksumFlags & RX_CHKSUM_FLAGS_UDP_CHECKSUM_FAILED)? 'N': '-') ); + + packet_info.Value = 0; + + if( rxChksumFlags & RX_CHKSUM_FLAGS_IP_CHECKSUM_SUCCEEDED ) + packet_info.Receive.NdisPacketIpChecksumSucceeded = TRUE; + else if( rxChksumFlags & RX_CHKSUM_FLAGS_IP_CHECKSUM_FAILED ) + packet_info.Receive.NdisPacketIpChecksumFailed = TRUE; + + if( rxChksumFlags & RX_CHKSUM_FLAGS_TCP_CHECKSUM_SUCCEEDED ) + packet_info.Receive.NdisPacketTcpChecksumSucceeded = TRUE; + else if( rxChksumFlags & RX_CHKSUM_FLAGS_TCP_CHECKSUM_FAILED ) + packet_info.Receive.NdisPacketTcpChecksumFailed = TRUE; + + if( rxChksumFlags & RX_CHKSUM_FLAGS_TCP_CHECKSUM_SUCCEEDED ) + packet_info.Receive.NdisPacketUdpChecksumSucceeded = TRUE; + else if( rxChksumFlags & RX_CHKSUM_FLAGS_TCP_CHECKSUM_FAILED ) + packet_info.Receive.NdisPacketUdpChecksumFailed = TRUE; + + NDIS_PER_PACKET_INFO_FROM_PACKET( p_packet, TcpIpChecksumPacketInfo )= + (void *)(uintn_t)packet_info.Value; + + VNIC_RECV_FROM_PACKET( p_packet ) = pRdmaDest; + NDIS_SET_PACKET_STATUS( p_packet, NDIS_STATUS_SUCCESS ); + + VNIC_EXIT( VNIC_DBG_DATA ); + return p_packet; +} + +/* NOTE: This routine is not reentrant */ +static void +_data_allocBuffers( + IN Data_t *pData, + IN BOOLEAN initialAllocation ) +{ + RecvPool_t *p_recvPool = &pData->recvPool; + RdmaDest_t *pRdmaDest; + LIST_ENTRY *p_list_entry; + int index; + + VNIC_ENTER( VNIC_DBG_DATA ); + + index = ADD(p_recvPool->nextFreeBuf, p_recvPool->numFreeBufs, p_recvPool->eiocPoolSz); + + while ( !IsListEmpty( &p_recvPool->availRecvBufs ) ) + { + p_list_entry = RemoveHeadList( &p_recvPool->availRecvBufs ); + pRdmaDest = (RdmaDest_t*)p_list_entry; + + if( initialAllocation ) + { + pRdmaDest->buf_sz = p_recvPool->bufferSz; + pRdmaDest->pTrailer = + (struct ViportTrailer*)(pRdmaDest->data + pRdmaDest->buf_sz + - sizeof(struct ViportTrailer)); + pRdmaDest->pTrailer->connectionHashAndValid = 0; + } + + pRdmaDest->p_packet = NULL; + _data_addFreeBuffer(pData, index, pRdmaDest); + index = NEXT(index,p_recvPool->eiocPoolSz); + } + + VNIC_EXIT( VNIC_DBG_DATA ); + return; +} + +static void +_data_addFreeBuffer( + IN Data_t *pData, + IN int index, + IN RdmaDest_t *pRdmaDest ) +{ + RecvPool_t *p_recvPool = &pData->recvPool; + BufferPoolEntry_t *pBpe; + + pRdmaDest->pTrailer->connectionHashAndValid = 0; + pBpe = &p_recvPool->bufPool[index]; + + pBpe->rKey = pRdmaDest->region.rkey; + pBpe->remoteAddr = hton64( PTR64( pRdmaDest->data ) ); + pBpe->valid = (uint32_t)(pRdmaDest - &p_recvPool->pRecvBufs[0]) + 1; + ++p_recvPool->numFreeBufs; + + return; +} + +static uint32_t +_data_incomingRecv( + IN Data_t *pData ) +{ + RecvPool_t *p_recvPool = &pData->recvPool; + RdmaDest_t *pRdmaDest; + ViportTrailer_t *pTrailer; + BufferPoolEntry_t *pBpe; + NDIS_PACKET *p_packet = NULL; + uint32_t idx = 0; + BOOLEAN status = FALSE; + + VNIC_ENTER( VNIC_DBG_DATA ); + + while( !status ) + { + if ( p_recvPool->nextFullBuf == p_recvPool->nextFreeBuf ) + return idx; + + pBpe = &p_recvPool->bufPool[p_recvPool->nextFullBuf]; + pRdmaDest = &p_recvPool->pRecvBufs[pBpe->valid - 1]; + pTrailer = pRdmaDest->pTrailer; + + if ( ( pTrailer != NULL ) && + ( pTrailer->connectionHashAndValid & CHV_VALID ) ) + { + /* received a packet */ + if ( pTrailer->pktFlags & PF_KICK ) + { + p_recvPool->kickOnFree = TRUE; + } + /* we do not want to indicate packet if no filter is set */ + if( pData->p_viport->p_adapter->packet_filter && + p_recvPool->numPostedBufs > 0 ) + { + p_packet = _data_recv_to_ndis_pkt( pData, pRdmaDest ); + if ( p_packet != NULL ) + { + p_recvPool->recv_pkt_array[idx++] = p_packet; + } + } + else + { /* put back to free buffers pool */ + InsertTailList( &p_recvPool->availRecvBufs, + &pRdmaDest->listPtrs ); + } + pBpe->valid = 0; + INC( p_recvPool->nextFullBuf, 1, p_recvPool->eiocPoolSz ); + p_recvPool->numPostedBufs--; +#ifdef VNIC_STATISTIC + pData->statistics.recvNum++; +#endif /* VNIC_STATISTIC */ + } + else + break; + } + return idx; +} + + +void +vnic_return_packet( + IN NDIS_HANDLE adapter_context, + IN NDIS_PACKET * const p_packet ) +{ + + + vnic_adapter_t *p_adapter = (vnic_adapter_t *)adapter_context; + viport_t *p_viport = p_adapter->p_currentPath->pViport; + RdmaDest_t *p_rdma_dest = VNIC_RECV_FROM_PACKET( p_packet ); + + ASSERT( p_rdma_dest->p_packet == p_packet ); + _data_return_recv( p_packet ); + p_rdma_dest->p_packet = NULL; + + InsertTailList( &p_viport->data.recvPool.availRecvBufs, + &p_rdma_dest->listPtrs ); + +} +static void +_data_return_recv( + IN NDIS_PACKET *p_packet ) +{ + NDIS_BUFFER *p_buf; + /* Unchain the NDIS buffer. */ + NdisUnchainBufferAtFront( p_packet, &p_buf ); + CL_ASSERT( p_buf ); + /* Return the NDIS packet and NDIS buffer to their pools. */ + NdisFreeBuffer( p_buf ); + NdisFreePacket( p_packet ); +} + +static void +_data_sendFreeRecvBuffers( + IN Data_t *pData ) +{ + RecvPool_t *p_recvPool = &pData->recvPool; + ib_send_wr_t *pWrq = &pData->freeBufsIo.io.wrq; + BOOLEAN bufsSent = FALSE; + uint64_t rdmaAddr; + uint32_t offset; + uint32_t sz; + unsigned int numToSend, + nextIncrement; + + VNIC_ENTER( VNIC_DBG_DATA ); + + for ( numToSend = MIN_EIOC_UPDATE_SZ; + numToSend <= p_recvPool->numFreeBufs; + numToSend += MIN_EIOC_UPDATE_SZ ) + { + + /* Handle multiple bundles as one when possible. */ + nextIncrement = numToSend + MIN_EIOC_UPDATE_SZ; + if (( nextIncrement <= p_recvPool->numFreeBufs ) + && ( p_recvPool->nextFreeBuf + nextIncrement <= p_recvPool->eiocPoolSz ) ) + { + continue; + } + + offset = p_recvPool->nextFreeBuf * sizeof(BufferPoolEntry_t); + sz = numToSend * sizeof(BufferPoolEntry_t); + rdmaAddr = p_recvPool->eiocRdmaAddr + offset; + + pWrq->ds_array->length = sz; + pWrq->ds_array->vaddr = PTR64((uint8_t *)p_recvPool->bufPool + offset); + pWrq->remote_ops.vaddr = rdmaAddr; + + if ( ibqp_postSend( &pData->qp, &pData->freeBufsIo.io ) != IB_SUCCESS ) + { + VNIC_TRACE(VNIC_DBG_ERROR, + ("Unable to rdma free buffers to EIOC\n") ); + + viport_failure( pData->p_viport ); + break; + } + + INC( p_recvPool->nextFreeBuf, numToSend, p_recvPool->eiocPoolSz ); + p_recvPool->numFreeBufs -= numToSend; + p_recvPool->numPostedBufs += numToSend; + bufsSent = TRUE; + } + + if( bufsSent ) + { + if( p_recvPool->kickOnFree ) + { + data_sendKickMessage( pData ); + } + } + if( p_recvPool->numPostedBufs == 0 ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC %d: Unable to allocate receive buffers\n", + pData->p_viport->ioc_num ) ); + + //viport_failure( pData->p_viport ); + } + VNIC_EXIT( VNIC_DBG_DATA ); +} + + +void +data_cleanup( + IN Data_t *pData ) +{ + VNIC_ENTER( VNIC_DBG_DATA ); + + VNIC_TRACE(VNIC_DBG_INIT, + ("IOC[%d]data cleanup\n", pData->p_viport->ioc_num )); + + if( pData->p_recv_bufs ) + { + NdisFreeMemory( pData->p_recv_bufs, pData->recv_bufs_sz, 0 ); + pData->p_recv_bufs = NULL; + } + + if( pData->recvPool.recv_pkt_array ) + { + cl_free( pData->recvPool.recv_pkt_array ); + pData->recvPool.recv_pkt_array = NULL; + pData->recvPool.poolSz = 0; + } + + if ( pData->pLocalStorage ) + { + NdisFreeMemory( pData->pLocalStorage, pData->localStorageSz, 0 ); + pData->pLocalStorage = NULL; + } + + if( pData->h_recv_buf_pool ) + { + NdisFreeBufferPool( pData->h_recv_buf_pool ); + pData->h_recv_buf_pool = NULL; + } + + if ( pData->h_recv_pkt_pool ) + { + if( NdisPacketPoolUsage(pData->h_recv_pkt_pool) != 0) + { + VNIC_TRACE( VNIC_DBG_WARN, + ("Recv packet pool is not empty!!!\n") ); + } + NdisFreePacketPool( pData->h_recv_pkt_pool ); + pData->h_recv_pkt_pool = NULL; + } + // clear Qp struct for reuse + cl_memclr( &pData->qp, sizeof( IbQp_t) ); + + cl_timer_destroy( &pData->kickTimer ); + + VNIC_EXIT( VNIC_DBG_DATA ); +} + + +static void +_data_kickTimer_start( + IN Data_t *pData, + IN uint32_t microseconds ) +{ + VNIC_ENTER( VNIC_DBG_DATA ); + + InterlockedExchange( (LONG *)&pData->kickTimerOn, TRUE ); + + usec_timer_start(&pData->kickTimer, microseconds ); + + VNIC_EXIT( VNIC_DBG_DATA ); + return; +} + +static void +_data_kickTimer_stop( + IN Data_t *pData ) +{ + VNIC_ENTER( VNIC_DBG_DATA ); + + if( InterlockedExchange( &pData->kickTimerOn, FALSE ) == TRUE ) + { + cl_timer_stop( &pData->kickTimer ); + } + + VNIC_EXIT( VNIC_DBG_DATA ); +} diff --git a/branches/Ndi/ulp/inic/kernel/vnic_data.h b/branches/Ndi/ulp/inic/kernel/vnic_data.h new file mode 100644 index 00000000..4f156e3a --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_data.h @@ -0,0 +1,204 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _VNIC_DATA_H_ +#define _VNIC_DATA_H_ + +#include "vnic_trailer.h" + +typedef struct RdmaDest { + LIST_ENTRY listPtrs; + IbRegion_t region; + NDIS_PACKET *p_packet; + NDIS_BUFFER *p_buf; + uint32_t buf_sz; + uint8_t *data; + struct ViportTrailer *pTrailer; +} RdmaDest_t; + +typedef struct BufferPoolEntry { + uint64_t remoteAddr; + net32_t rKey; + uint32_t valid; +} BufferPoolEntry_t; + +typedef struct RecvPool { + uint32_t bufferSz; + uint32_t poolSz; + uint32_t eiocPoolSz; + uint32_t eiocRdmaRkey; + uint64_t eiocRdmaAddr; + uint32_t nextFullBuf; + uint32_t nextFreeBuf; + uint32_t numFreeBufs; + uint32_t numPostedBufs; + uint32_t szFreeBundle; + BOOLEAN kickOnFree; + BufferPoolEntry_t *bufPool; + RdmaDest_t *pRecvBufs; + LIST_ENTRY availRecvBufs; + NDIS_PACKET **recv_pkt_array; +} RecvPool_t; + +typedef struct XmitPool { + uint32_t bufferSz; + uint32_t poolSz; + uint32_t notifyCount; + uint32_t notifyBundle; + uint32_t nextXmitBuf; + uint32_t lastCompBuf; + uint32_t numXmitBufs; + uint32_t nextXmitPool; + uint32_t kickCount; + uint32_t kickByteCount; + uint32_t kickBundle; + uint32_t kickByteBundle; + BOOLEAN needBuffers; + BOOLEAN sendKicks; + uint32_t rdmaRKey; + uint64_t rdmaAddr; + BufferPoolEntry_t *bufPool; + RdmaIo_t *pXmitBufs; +} XmitPool_t; + +typedef struct Data { + struct _viport *p_viport; + DataConfig_t *p_conf; + IbRegion_t *p_phy_region; + IbRegion_t region; + IbRegion_t rbuf_region; + IbQp_t qp; + uint8_t *pLocalStorage; + uint32_t localStorageSz; + uint8_t *p_recv_bufs; + uint32_t recv_bufs_sz; + Inic_RecvPoolConfig_t hostPoolParms; + Inic_RecvPoolConfig_t eiocPoolParms; + RecvPool_t recvPool; + XmitPool_t xmitPool; + RdmaIo_t freeBufsIo; + SendIo_t kickIo; + LIST_ENTRY recvIos; + KSPIN_LOCK recvIosLock; + KSPIN_LOCK xmitBufLock; + volatile LONG kickTimerOn; + BOOLEAN connected; + cl_timer_t kickTimer; + NDIS_HANDLE h_recv_pkt_pool; + NDIS_HANDLE h_recv_buf_pool; +#ifdef VNIC_STATISTIC + struct { + uint32_t xmitNum; + uint32_t recvNum; + uint32_t freeBufSends; + uint32_t freeBufNum; + uint32_t freeBufMin; + uint32_t kickRecvs; + uint32_t kickReqs; + uint32_t noXmitBufs; + uint64_t noXmitBufTime; + } statistics; +#endif /* VNIC_STATISTIC */ +} Data_t; + +void +vnic_return_packet( + IN NDIS_HANDLE adapter_context, + IN NDIS_PACKET* const p_packet ); + +void +data_construct( + IN Data_t *pData, + IN struct _viport *pViport ); + +ib_api_status_t +data_init( + Data_t *pData, + DataConfig_t *p_conf, + uint64_t guid ); + +ib_api_status_t +data_connect( + Data_t *pData ); + +void +data_connected( + Data_t *pData ); + +void +data_disconnect( + Data_t *pData ); + +BOOLEAN +data_xmitPacket( + Data_t *pData, + NDIS_PACKET* const p_pkt ); + +void +data_cleanup( + Data_t *pData ); + +#define data_pathId(pData) (pData)->p_conf->pathId +#define data_eiocPool(pData) &(pData)->eiocPoolParms +#define data_hostPool(pData) &(pData)->hostPoolParms +#define data_eiocPoolMin(pData) &(pData)->p_conf->eiocMin +#define data_hostPoolMin(pData) &(pData)->p_conf->hostMin +#define data_eiocPoolMax(pData) &(pData)->p_conf->eiocMax +#define data_hostPoolMax(pData) &(pData)->p_conf->hostMax +#define data_localPoolAddr(pData) (pData)->xmitPool.rdmaAddr +#define data_localPoolRkey(pData) (pData)->xmitPool.rdmaRKey +#define data_remotePoolAddr(pData) &(pData)->recvPool.eiocRdmaAddr +#define data_remotePoolRkey(pData) &(pData)->recvPool.eiocRdmaRkey +#define data_maxMtu(pData) MAX_PAYLOAD(min((pData)->recvPool.bufferSz, (pData)->xmitPool.bufferSz)) - ETH_VLAN_HLEN +#define data_len(pData, pTrailer) ntoh16(pTrailer->dataLength) +#define data_offset(pData, pTrailer) \ + pData->recvPool.bufferSz - sizeof(struct ViportTrailer) \ + - (uint32_t)ROUNDUPP2(data_len(pData, pTrailer), VIPORT_TRAILER_ALIGNMENT) \ + + pTrailer->dataAlignmentOffset + + +/* The following macros manipulate ring buffer indexes. + * The ring buffer size must be a power of 2. + */ +#define ADD(index, increment, size) (((index) + (increment))&((size) - 1)) +#define NEXT(index, size) ADD(index, 1, size) +#define INC(index, increment, size) (index) = ADD(index, increment, size) + +#define VNIC_RECV_FROM_PACKET( P ) \ + (((RdmaDest_t **)P->MiniportReservedEx)[1]) + +#define VNIC_LIST_ITEM_FROM_PACKET( P ) \ + ((LIST_ENTRY *)P->MiniportReservedEx) + +#define VNIC_PACKET_FROM_LIST_ITEM( I ) \ + (PARENT_STRUCT( I, NDIS_PACKET, MiniportReservedEx )) + +#endif /* _VNIC_DATA_H_ */ \ No newline at end of file diff --git a/branches/Ndi/ulp/inic/kernel/vnic_debug.h b/branches/Ndi/ulp/inic/kernel/vnic_debug.h new file mode 100644 index 00000000..9a69dd47 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_debug.h @@ -0,0 +1,105 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _VNIC_DEBUG_H_ +#define _VNIC_DEBUG_H_ + + +#include + +/* + * Debug macros + */ +extern uint32_t g_vnic_dbg_lvl; + + +#define VNIC_DBG_INIT ( ( VNIC_DBG_INFO ) | 0x00000001 ) +#define VNIC_DBG_PNP ( ( VNIC_DBG_INFO ) | 0x00000002 ) +#define VNIC_DBG_SEND ( ( VNIC_DBG_INFO ) | 0x00000004 ) +#define VNIC_DBG_RECV ( ( VNIC_DBG_INFO ) | 0x00000008 ) +#define VNIC_DBG_STATUS ( ( VNIC_DBG_INFO ) | 0x00000010 ) +#define VNIC_DBG_IB ( ( VNIC_DBG_INFO ) | 0x00000020 ) +#define VNIC_DBG_BUF ( ( VNIC_DBG_INFO ) | 0x00000040 ) +#define VNIC_DBG_MCAST ( ( VNIC_DBG_INFO ) | 0x00000080 ) +#define VNIC_DBG_ALLOC ( ( VNIC_DBG_INFO ) | 0x00000100 ) +#define VNIC_DBG_OID ( ( VNIC_DBG_INFO ) | 0x00000200 ) +#define VNIC_DBG_DATA ( ( VNIC_DBG_INFO ) | 0x00000400 ) +#define VNIC_DBG_CTRL ( ( VNIC_DBG_INFO ) | 0x00000800 ) +#define VNIC_DBG_CTRL_PKT ( ( VNIC_DBG_INFO ) | 0x00001000 ) +#define VNIC_DBG_CONF ( ( VNIC_DBG_INFO ) | 0x00002000 ) +#define VNIC_DBG_VIPORT ( ( VNIC_DBG_INFO ) | 0x00004000 ) +#define VNIC_DBG_ADAPTER ( ( VNIC_DBG_INFO ) | 0x00008000 ) +#define VNIC_DBG_NETPATH ( ( VNIC_DBG_INFO ) | 0x00010000 ) + +#define VNIC_DBG_FUNC (0x10000000) /* For function entry/exit */ +#define VNIC_DBG_INFO (0x20000000) /* For verbose information */ +#define VNIC_DBG_WARN (0x40000000) /* For warnings. */ +#define VNIC_DBG_ERROR CL_DBG_ERROR +#define VNIC_DBG_ALL CL_DBG_ALL + +#define VNIC_DEBUG_FLAGS ( VNIC_DBG_ERROR | VNIC_DBG_WARN | VNIC_DBG_INFO | VNIC_DBG_PNP | VNIC_DBG_INIT ) +/* Enter and exit macros automatically add VNIC_DBG_FUNC bit */ +#define VNIC_ENTER( lvl ) \ + CL_ENTER( (lvl | VNIC_DBG_FUNC), g_vnic_dbg_lvl ) + +#define VNIC_EXIT( lvl ) \ + CL_EXIT( (lvl | VNIC_DBG_FUNC), g_vnic_dbg_lvl ) + +#define VNIC_TRACE( lvl, msg ) \ + CL_TRACE( (lvl), g_vnic_dbg_lvl, msg ) + +#define VNIC_TRACE_EXIT( lvl, msg ) \ + CL_TRACE_EXIT( (lvl), g_vnic_dbg_lvl, msg ) + +#define VNIC_PRINT( lvl, msg ) \ + CL_PRINT ( (lvl), g_vnic_dbg_lvl, msg ) + +#define VNIC_TRACE_BYTES( lvl, ptr, len ) \ + { \ + size_t _loop_; \ + for (_loop_ = 0; _loop_ < (len); ++_loop_) \ + { \ + CL_PRINT( (lvl), g_vnic_dbg_lvl, ("0x%.2X ", ((uint8_t*)(ptr))[_loop_])); \ + if ((_loop_ + 1)% 16 == 0) \ + { \ + CL_PRINT( (lvl), g_vnic_dbg_lvl, ("\n") ); \ + } \ + else if ((_loop_ % 4 + 1) == 0) \ + { \ + CL_PRINT( (lvl), g_vnic_dbg_lvl, (" ") ); \ + } \ + } \ + CL_PRINT( (lvl), g_vnic_dbg_lvl, ("\n") ); \ + } + + +#endif /* _VNIC_DEBUG_H_ */ diff --git a/branches/Ndi/ulp/inic/kernel/vnic_driver.c b/branches/Ndi/ulp/inic/kernel/vnic_driver.c new file mode 100644 index 00000000..cba109e2 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_driver.c @@ -0,0 +1,1922 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include +#include "vnic_driver.h" + + +vnic_globals_t g_vnic; + +#define DEFAULT_HOST_NAME "VNIC Host" + +uint32_t g_vnic_dbg_lvl = VNIC_DEBUG_FLAGS; + +static void +_vnic_complete_query( + IN vnic_adapter_t* const p_adapter, + IN pending_oid_t* const p_oid_info, + IN const NDIS_STATUS status, + IN const void* const p_buf, + IN const ULONG buf_len ); + +static NDIS_STATUS +__vnic_get_tcp_task_offload( + IN vnic_adapter_t* p_adapter, + IN pending_oid_t* const p_oid_info ); + +static NDIS_STATUS +__vnic_set_tcp_task_offload( + IN vnic_adapter_t* p_adapter, + IN void* const p_info_buf, + IN ULONG* const p_info_len ); + +static NDIS_STATUS +_vnic_process_packet_filter( + IN vnic_adapter_t* const p_adapter, + IN ULONG pkt_filter ); + +static void +__vnic_read_machine_name( void ); + +static NDIS_STATUS +__vnic_set_machine_name( + IN VOID *p_uni_array, + IN USHORT buf_size ); + +/* +p_drv_obj + Pointer to Driver Object for this device driver +p_registry_path + Pointer to unicode string containing path to this driver's registry area +return + STATUS_SUCCESS, NDIS_STATUS_BAD_CHARACTERISTICS, NDIS_STATUS_BAD_VERSION, + NDIS_STATUS_RESOURCES, or NDIS_STATUS_FAILURE +IRQL = PASSIVE_LEVEL +*/ +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_drv_obj, + IN PUNICODE_STRING p_registry_path ) +{ + NDIS_STATUS status; + NDIS_MINIPORT_CHARACTERISTICS characteristics; + + VNIC_ENTER( VNIC_DBG_INIT ); + +#ifdef _DEBUG_ + PAGED_CODE(); +#endif + + status = CL_INIT; + + if( !NT_SUCCESS( status ) ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("cl_init failed.\n") ); + return status; + } + + status = NDIS_STATUS_SUCCESS; + + KeInitializeSpinLock( &g_vnic.lock ); + InitializeListHead( &g_vnic.adapter_list ); + + g_vnic.adapters = 0; + g_vnic.ndis_handle = NULL; + + NdisMInitializeWrapper( &g_vnic.ndis_handle, p_drv_obj, p_registry_path, NULL ); + + if ( g_vnic.ndis_handle == NULL ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("NdisMInitializeWrapper failed\n")); + CL_DEINIT; + return NDIS_STATUS_FAILURE; + } + + cl_memclr( &characteristics, sizeof(characteristics) ); + + characteristics.MajorNdisVersion = MAJOR_NDIS_VERSION; + characteristics.MinorNdisVersion = MINOR_NDIS_VERSION; + //characteristics.CheckForHangHandler = vnic_check_for_hang; + characteristics.HaltHandler = vnic_halt; + characteristics.InitializeHandler = vnic_initialize; + characteristics.QueryInformationHandler = vnic_oid_query_info; + characteristics.ResetHandler = vnic_reset; + characteristics.SetInformationHandler = vnic_oid_set_info; + characteristics.ReturnPacketHandler = vnic_return_packet; + characteristics.SendPacketsHandler = vnic_send_packets; + +#ifdef NDIS51_MINIPORT + characteristics.PnPEventNotifyHandler = vnic_pnp_notify; + characteristics.AdapterShutdownHandler = vnic_shutdown; +#endif + + status = NdisMRegisterMiniport( + g_vnic.ndis_handle, &characteristics, sizeof(characteristics) ); + + if( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("NdisMRegisterMiniport failed with status of %d\n", status) ); + NdisTerminateWrapper( g_vnic.ndis_handle, NULL ); + CL_DEINIT; + } + else + { + NdisMRegisterUnloadHandler( g_vnic.ndis_handle, vnic_unload ); + + __vnic_read_machine_name(); + } + + VNIC_EXIT( VNIC_DBG_INIT ); + return status; +} + + +VOID +vnic_unload( + IN PDRIVER_OBJECT p_drv_obj ) +{ + VNIC_ENTER( VNIC_DBG_INIT ); + + UNREFERENCED_PARAMETER( p_drv_obj ); + CL_DEINIT; + + VNIC_EXIT( VNIC_DBG_INIT ); +} + + +//! Initialization function called for each IOC discovered +/* The MiniportInitialize function is a required function that sets up a +NIC (or virtual NIC) for network I/O operations, claims all hardware +resources necessary to the NIC in the registry, and allocates resources +the driver needs to carry out network I/O operations. +IRQL = PASSIVE_LEVEL + +@param p_open_status Pointer to a status field set if this function returns NDIS_STATUS_OPEN_ERROR +@param p_selected_medium_index Pointer to unsigned integer noting index into medium_array for this NIC +@param medium_array Array of mediums for this NIC +@param medium_array_size Number of elements in medium_array +@param h_handle Handle assigned by NDIS for this NIC +@param wrapper_config_context Handle used for Ndis initialization functions +@return NDIS_STATUS_SUCCESS, NDIS_STATUS_UNSUPPORTED_MEDIA, NDIS_STATUS_RESOURCES, +NDIS_STATUS_NOT_SUPPORTED +*/ +NDIS_STATUS +vnic_initialize( + OUT PNDIS_STATUS p_open_status, + OUT PUINT p_selected_medium_index, + IN PNDIS_MEDIUM medium_array, + IN UINT medium_array_size, + IN NDIS_HANDLE h_handle, + IN NDIS_HANDLE wrapper_config_context ) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + ib_api_status_t ib_status; + ib_pnp_req_t pnp_req; + UINT medium_index; + vnic_adapter_t *p_adapter; + ULONG buffer_size; + + VNIC_ENTER( VNIC_DBG_INIT ); + +#ifdef _DEBUG_ + PAGED_CODE(); +#endif + + UNUSED_PARAM( p_open_status ); + + /* Search for our medium */ + for( medium_index = 0; medium_index < medium_array_size; ++medium_index ) + { + /* Check to see if we found our medium */ + if( medium_array[medium_index] == NdisMedium802_3 ) + break; + } + + if( medium_index == medium_array_size ) /* Never found it */ + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, ("No supported media.\n") ); + return NDIS_STATUS_UNSUPPORTED_MEDIA; + } + + *p_selected_medium_index = medium_index; + + /* Create the adapter */ + ib_status = vnic_create_adapter( h_handle, wrapper_config_context, &p_adapter ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("returned status %x\n", ib_status ) ); + return NDIS_STATUS_FAILURE; + } + + /* set NDIS features we support */ + NdisMSetAttributesEx( h_handle, + (NDIS_HANDLE)p_adapter, + 2, /*check for hung t-out */ + NDIS_ATTRIBUTE_BUS_MASTER | + NDIS_ATTRIBUTE_DESERIALIZE | + NDIS_ATTRIBUTE_SURPRISE_REMOVE_OK | + NDIS_ATTRIBUTE_USES_SAFE_BUFFER_APIS, + NdisInterfacePNPBus ); + + buffer_size = ROUNDUPP2(p_adapter->params.MinMtu + sizeof(eth_hdr_t), 8 ); + + status = NdisMInitializeScatterGatherDma( h_handle, TRUE, buffer_size ); + if ( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Init ScatterGatherDma failed status %#x\n", status ) ); + goto failure; + } + + /* Register for IOC events */ + pnp_req.pfn_pnp_cb = __vnic_pnp_cb; + pnp_req.pnp_class = IB_PNP_IOC | IB_PNP_FLAG_REG_SYNC; + pnp_req.pnp_context = (const void *)p_adapter; + + ib_status = p_adapter->ifc.reg_pnp( p_adapter->h_al, &pnp_req, &p_adapter->h_pnp ); + + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("ib_reg_pnp returned %s\n", p_adapter->ifc.get_err_str( ib_status )) ); + status = NDIS_STATUS_FAILURE; + goto failure; + } + + if( p_adapter->state == INIC_UNINITIALIZED ) + { + status = NDIS_STATUS_FAILURE; +failure: + vnic_destroy_adapter( p_adapter ); + return status; + } + + VNIC_EXIT( VNIC_DBG_INIT ); + return status; + +} + + +//! Deallocates resources when the NIC is removed and halts the NIC.. +/* IRQL = DISPATCH_LEVEL + +@param adapter_context The adapter context allocated at start +*/ +void +vnic_halt( + IN NDIS_HANDLE adapter_context ) +{ + vnic_adapter_t *p_adapter; + + VNIC_ENTER( VNIC_DBG_INIT ); + CL_ASSERT( adapter_context ); + + p_adapter = (vnic_adapter_t*)adapter_context; + + vnic_destroy_adapter( p_adapter ); + + VNIC_EXIT( VNIC_DBG_INIT ); +} + + +//! Reports the state of the NIC, or monitors the responsiveness of an underlying device driver. +/* IRQL = DISPATCH_LEVEL + +@param adapter_context The adapter context allocated at start +@return TRUE if the driver determines that its NIC is not operating +*/ +#define VNIC_MAX_HUNG_CHK 3 +BOOLEAN +vnic_check_for_hang( + IN NDIS_HANDLE adapter_context ) +{ + vnic_adapter_t *p_adapter; + + CL_ASSERT( adapter_context ); + p_adapter = (vnic_adapter_t*)adapter_context; + + if( p_adapter->p_viport ) + { + if( p_adapter->p_viport->errored != 0 ) + p_adapter->hung++; + } + if( p_adapter->hung > VNIC_MAX_HUNG_CHK ) + { + VNIC_TRACE( VNIC_DBG_WARN, ("Adapter Hung\n")); + p_adapter->hung = 0; + return TRUE; + } + return FALSE; +} + + +//! Returns information about the capabilities and status of the driver and/or its NIC. +/* IRQL = DISPATCH_LEVEL + +@param adapter_context The adapter context allocated at start +@param oid Object ID representing the query operation to be carried out +@param info_buf Buffer containing any input for this query and location for output +@param info_buf_len Number of bytes available in info_buf +@param p_bytes_written Pointer to number of bytes written into info_buf +@param p_bytes_needed Pointer to number of bytes needed to satisfy this oid +@return NDIS_STATUS_SUCCESS, NDIS_STATUS_PENDING, NDIS_STATUS_INVALID_OID, +NDIS_STATUS_INVALID_LENGTH, NDIS_STATUS_NOT_ACCEPTED, NDIS_STATUS_NOT_SUPPORTED, +NDIS_STATUS_RESOURCES +*/ +NDIS_STATUS +vnic_oid_query_info( + IN NDIS_HANDLE adapter_context, + IN NDIS_OID oid, + IN PVOID info_buf, + IN ULONG info_buf_len, + OUT PULONG p_bytes_written, + OUT PULONG p_bytes_needed ) +{ + vnic_adapter_t *p_adapter; + NDIS_STATUS status; + USHORT version; + uint32_t info32; + uint64_t info64; + PVOID src_buf; + ULONG buf_len; + pending_oid_t oid_info; + + VNIC_ENTER( VNIC_DBG_OID ); + + oid_info.oid = oid; + oid_info.p_buf = info_buf; + oid_info.buf_len = info_buf_len; + oid_info.p_bytes_used = p_bytes_written; + oid_info.p_bytes_needed = p_bytes_needed; + + CL_ASSERT( adapter_context ); + p_adapter = (vnic_adapter_t *)adapter_context; + + CL_ASSERT( p_bytes_written ); + CL_ASSERT( p_bytes_needed ); + + status = NDIS_STATUS_SUCCESS; + src_buf = &info32; + buf_len = sizeof(info32); + + if( !p_adapter->p_viport || + !p_adapter->p_currentPath->carrier ) + { + status = NDIS_STATUS_NOT_ACCEPTED; + goto complete; + } + + switch( oid ) + { + /* Required General */ + case OID_GEN_SUPPORTED_LIST: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_SUPPORTED_LIST\n") ); + src_buf = (PVOID)SUPPORTED_OIDS; + buf_len = sizeof(SUPPORTED_OIDS); + break; + + case OID_GEN_HARDWARE_STATUS: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_HARDWARE_STATUS\n") ); + + if( p_adapter->p_currentPath->carrier ) + { + VNIC_TRACE( VNIC_DBG_OID, + ("returning NdisHardwareStatusReady\n") ); + info32 = NdisHardwareStatusReady; + } + else + { + VNIC_TRACE( VNIC_DBG_OID, + ("returning NdisHardwareStatusInitializing\n") ); + info32 = NdisHardwareStatusNotReady; + } + break; + + case OID_GEN_MEDIA_SUPPORTED: + case OID_GEN_MEDIA_IN_USE: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_MEDIA_SUPPORTED " + "or OID_GEN_MEDIA_IN_USE\n") ); + info32 = NdisMedium802_3; + break; + + case OID_GEN_MAXIMUM_FRAME_SIZE: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_MAXIMUM_FRAME_SIZE\n") ); + if( !p_adapter->p_currentPath->carrier ) + { + info32 = p_adapter->params.MinMtu; + } + else + { + info32 = p_adapter->p_currentPath->pViport->mtu; + /*TODO: add VLAN tag size if support request */ + } + break; + + case OID_GEN_LINK_SPEED: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_LINK_SPEED\n") ); + + if( p_adapter->p_currentPath->carrier ) + { + /* if we get link speed value - it is in Mbps units - have to convert to 100bps*/ + info32 = ( p_adapter->link_speed )? + ( p_adapter->link_speed * LINK_SPEED_1MBIT_x100BPS ): + DEFAULT_LINK_SPEED_x100BPS; + } + else + { + status = NDIS_STATUS_NOT_ACCEPTED; + } + break; + + case OID_GEN_TRANSMIT_BUFFER_SPACE: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_TRANSMIT_BUFFER_SPACE\n") ); + if ( !p_adapter->p_currentPath->carrier ) + { + status= NDIS_STATUS_NOT_ACCEPTED; + } + else + { + info32 = p_adapter->p_viport->data.xmitPool.bufferSz * + p_adapter->p_viport->data.xmitPool.poolSz; + } + break; + + case OID_GEN_RECEIVE_BUFFER_SPACE: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_RECEIVE_BUFFER_SPACE " + "or OID_GEN_RECEIVE_BUFFER_SPACE\n") ); + if ( !p_adapter->p_currentPath->carrier ) + { + status = NDIS_STATUS_NOT_ACCEPTED; + } + else + { + info32 = p_adapter->p_viport->data.recvPool.bufferSz * + p_adapter->p_viport->data.recvPool.poolSz; + } + break; + case OID_GEN_MAXIMUM_LOOKAHEAD: + case OID_GEN_CURRENT_LOOKAHEAD: + case OID_GEN_TRANSMIT_BLOCK_SIZE: + case OID_GEN_RECEIVE_BLOCK_SIZE: + case OID_GEN_MAXIMUM_TOTAL_SIZE: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_MAXIMUM_LOOKAHEAD " + "or OID_GEN_CURRENT_LOOKAHEAD or " + "OID_GEN_TRANSMIT_BLOCK_SIZE or " + "OID_GEN_RECEIVE_BLOCK_SIZE or " + "OID_GEN_MAXIMUM_TOTAL_SIZE\n") ); + if( !p_adapter->p_currentPath->carrier ) + { + info32 = p_adapter->params.MinMtu; + } + else + { + info32 = p_adapter->p_currentPath->pViport->mtu; + } + /*TODO: add VLAN tag size if support requested */ + info32 += sizeof(eth_hdr_t); + break; + + case OID_GEN_VENDOR_ID: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_VENDOR_ID\n") ); + + src_buf = (void*)VENDOR_ID; + buf_len = sizeof(VENDOR_ID); + break; + + case OID_GEN_VENDOR_DESCRIPTION: + VNIC_TRACE( VNIC_DBG_OID, + ("received query for OID_GEN_VENDOR_DESCRIPTION\n") ); + src_buf = VENDOR_DESCRIPTION; + buf_len = sizeof(VENDOR_DESCRIPTION); + break; + + case OID_GEN_VENDOR_DRIVER_VERSION: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_VENDOR_DRIVER_VERSION\n" ) ); + src_buf = &version; + buf_len = sizeof(version); + //TODO: Figure out what the right version is. + version = INIC_MAJORVERSION << 8 | INIC_MINORVERSION; + break; + + case OID_GEN_PHYSICAL_MEDIUM: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_PHYSICAL_MEDIUM\n" ) ); + info32 = NdisPhysicalMediumUnspecified; + break; + + case OID_GEN_CURRENT_PACKET_FILTER: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_CURRENT_PACKET_FILTER\n" ) ); + info32 = p_adapter->packet_filter; + break; + + case OID_GEN_DRIVER_VERSION: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_DRIVER_VERSION\n" ) ); + src_buf = &version; + buf_len = sizeof(version); + version = MAJOR_NDIS_VERSION << 8 | MINOR_NDIS_VERSION; + break; + + case OID_GEN_MAC_OPTIONS: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_MAC_OPTIONS\n" ) ); + info32 = NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA | + NDIS_MAC_OPTION_TRANSFERS_NOT_PEND | + NDIS_MAC_OPTION_NO_LOOPBACK | + NDIS_MAC_OPTION_FULL_DUPLEX ; +//TODO: Figure out if we will support priority and VLANs. +// NDIS_MAC_OPTION_8021P_PRIORITY; +//#ifdef NDIS51_MINIPORT +// info |= NDIS_MAC_OPTION_8021Q_VLAN; +//#endif + break; + + case OID_GEN_MEDIA_CONNECT_STATUS: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_MEDIA_CONNECT_STATUS\n" ) ); + + info32 = ( p_adapter->carrier )? + NdisMediaStateConnected : + NdisMediaStateDisconnected; + break; + + case OID_GEN_MAXIMUM_SEND_PACKETS: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_MAXIMUM_SEND_PACKETS\n" ) ); + info32 = MAXLONG; // NDIS ignored it anyway + break; + + /* Required General Statistics */ + case OID_GEN_XMIT_OK: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_XMIT_OK\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifOutOk; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_RCV_OK: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_RCV_OK\n" ) ); + if ( !p_adapter->p_currentPath->carrier) + { + info32 = 0; + break; + } + if ( info_buf_len == sizeof(info32) ) + { + info32 = (uint32_t)p_adapter->p_viport->stats.ifInOk; + } + else + { + info64 = p_adapter->p_viport->stats.ifInOk; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_XMIT_ERROR: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_XMIT_ERROR\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifOutErrors; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_RCV_ERROR: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_RCV_ERROR\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifInErrors; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_RCV_NO_BUFFER: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_RCV_NO_BUFFER\n" ) ); + info32 = 0; + break; + + case OID_GEN_DIRECTED_BYTES_XMIT: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_DIRECTED_BYTES_XMIT\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifOutUcastBytes; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_DIRECTED_FRAMES_XMIT: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_DIRECTED_FRAMES_XMIT\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifOutNUcastPkts; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_MULTICAST_BYTES_XMIT: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_MULTICAST_BYTES_XMIT\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifOutMulticastBytes; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_MULTICAST_FRAMES_XMIT: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_MULTICAST_FRAMES_XMIT\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifOutMulticastPkts; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_BROADCAST_BYTES_XMIT: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_BROADCAST_BYTES_XMIT\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifOutBroadcastBytes; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_BROADCAST_FRAMES_XMIT: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_BROADCAST_FRAMES_XMIT\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifOutBroadcastPkts; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + case OID_GEN_DIRECTED_BYTES_RCV: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_DIRECTED_BYTES_RCV\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifInUcastBytes; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_DIRECTED_FRAMES_RCV: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_DIRECTED_FRAMES_RCV\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifInNUcastPkts; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_MULTICAST_BYTES_RCV: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_MULTICAST_BYTES_RCV\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifInMulticastBytes; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_MULTICAST_FRAMES_RCV: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_MULTICAST_FRAMES_RCV\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifInMulticastPkts; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_BROADCAST_BYTES_RCV: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_BROADCAST_BYTES_RCV\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifInBroadcastBytes; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + case OID_GEN_BROADCAST_FRAMES_RCV: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_GEN_BROADCAST_FRAMES_RCV\n" ) ); + if ( !p_adapter->p_currentPath->carrier ) + { + info32 = 0; + } + else + { + info64 = p_adapter->p_viport->stats.ifInBroadcastPkts; + src_buf = &info64; + buf_len = sizeof(info64); + } + break; + + /* Required Ethernet operational characteristics */ + case OID_802_3_PERMANENT_ADDRESS: + case OID_802_3_CURRENT_ADDRESS: +#if defined( _DEBUG_ ) + if( oid == OID_802_3_PERMANENT_ADDRESS ) + { + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_802_3_PERMANENT_ADDRESS\n" ) ); + } + else + { + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_802_3_CURRENT_ADDRESS\n" ) ); + } +#endif /* defined( _DEBUG_ )*/ + if( !p_adapter->p_viport || + p_adapter->p_viport->errored || + p_adapter->p_viport->disconnect ) + { + status = NDIS_STATUS_NOT_ACCEPTED; + break; + } + if ( !p_adapter->macSet ) + { + p_adapter->pending_query = TRUE; + p_adapter->query_oid = oid_info; + + VNIC_TRACE( VNIC_DBG_OID, + ("returning NDIS_STATUS_PENDING\n") ); + status = NDIS_STATUS_PENDING; + } + else + { + src_buf = &p_adapter->p_viport->hwMacAddress; + buf_len = HW_ADDR_LEN; + } + break; + + case OID_802_3_MULTICAST_LIST: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_802_3_MULTICAST_LIST\n" ) ); + + if (!p_adapter->p_currentPath->carrier || + !(p_adapter->p_viport->flags & INIC_FLAG_ENABLE_NIC) ) + { + p_adapter->pending_query = TRUE; + p_adapter->query_oid = oid_info; + + VNIC_TRACE( VNIC_DBG_OID, + ("returning NDIS_STATUS_PENDING\n") ); + status = NDIS_STATUS_PENDING; + } + else if ( p_adapter->mc_count > 0 ) + { + buf_len = p_adapter->mc_count * sizeof( mac_addr_t ); + src_buf = &p_adapter->mcast_array; + } + else + { + info32 = 0; + } + break; + + case OID_802_3_MAXIMUM_LIST_SIZE: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_802_3_MAXIMUM_LIST_SIZE\n" ) ); + if ( !p_adapter->macSet ) + { + info32 = MAX_MCAST; + } + else + { + info32 = p_adapter->p_viport->numMacAddresses - MCAST_ADDR_START; + } + break; + case OID_802_3_MAC_OPTIONS: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_802_3_MAC_OPTIONS\n" ) ); + info32 = 0; + break; + + /* Required Ethernet stats */ + case OID_802_3_RCV_ERROR_ALIGNMENT: + case OID_802_3_XMIT_ONE_COLLISION: + case OID_802_3_XMIT_MORE_COLLISIONS: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_802_3_RCV_ERROR_ALIGNMENT or " + "OID_802_3_XMIT_ONE_COLLISION or " + "OID_802_3_XMIT_MORE_COLLISIONS\n" ) ); + info32 = 0; + break; + + case OID_TCP_TASK_OFFLOAD: + VNIC_TRACE( VNIC_DBG_OID, + (" received query for OID_TCP_TASK_OFFLOAD\n" ) ); + + src_buf = NULL; + status = __vnic_get_tcp_task_offload( p_adapter, &oid_info ); + break; + + /* Optional General */ + case OID_GEN_SUPPORTED_GUIDS: +#ifdef NDIS51_MINIPORT + case OID_GEN_VLAN_ID: +#endif + + /* Optional General Stats */ + case OID_GEN_RCV_CRC_ERROR: + case OID_GEN_TRANSMIT_QUEUE_LENGTH: + + /* Optional Ethernet Stats */ + case OID_802_3_XMIT_DEFERRED: + case OID_802_3_XMIT_MAX_COLLISIONS: + case OID_802_3_RCV_OVERRUN: + case OID_802_3_XMIT_UNDERRUN: + case OID_802_3_XMIT_HEARTBEAT_FAILURE: + case OID_802_3_XMIT_TIMES_CRS_LOST: + case OID_802_3_XMIT_LATE_COLLISIONS: + case OID_PNP_CAPABILITIES: + status = NDIS_STATUS_NOT_SUPPORTED; + VNIC_TRACE( VNIC_DBG_OID, + (" received an unsupported oid of 0x%.8X!\n" , oid) ); + break; + + case OID_GEN_PROTOCOL_OPTIONS: + case OID_GEN_TRANSPORT_HEADER_OFFSET: +#ifdef NDIS51_MINIPORT + case OID_GEN_MACHINE_NAME: + case OID_GEN_RNDIS_CONFIG_PARAMETER: +#endif + default: + status = NDIS_STATUS_INVALID_OID; + VNIC_TRACE( VNIC_DBG_OID, + (" received an invalid oid of 0x%.8X!\n" , oid) ); + break; + } + + /* + * Complete the request as if it was handled asynchronously to maximize + * code reuse for when we really handle the requests asynchronously. + * Note that this requires the QueryInformation entry point to always + * return NDIS_STATUS_PENDING + */ +complete: + if( status != NDIS_STATUS_PENDING ) + { + _vnic_complete_query( + p_adapter, &oid_info, status, src_buf, buf_len ); + } + + VNIC_EXIT( VNIC_DBG_OID ); + return NDIS_STATUS_PENDING; +} + + +static void +_vnic_complete_query( + IN vnic_adapter_t* const p_adapter, + IN pending_oid_t* const p_oid_info, + IN const NDIS_STATUS status, + IN const void* const p_buf, + IN const ULONG buf_len ) +{ + NDIS_STATUS oid_status = status; + + VNIC_ENTER( VNIC_DBG_OID ); + + CL_ASSERT( status != NDIS_STATUS_PENDING ); + + if( status == NDIS_STATUS_SUCCESS ) + { + if( p_oid_info->buf_len < buf_len ) + { + VNIC_TRACE( VNIC_DBG_OID, + ("Insufficient buffer space. " + "Returning NDIS_STATUS_INVALID_LENGTH.\n") ); + oid_status = NDIS_STATUS_INVALID_LENGTH; + *p_oid_info->p_bytes_needed = buf_len; + *p_oid_info->p_bytes_used = 0; + } + else if( p_oid_info->p_buf ) + { + /* Only copy if we have a distinct source buffer. */ + if( p_buf ) + { + NdisMoveMemory( p_oid_info->p_buf, p_buf, buf_len ); + *p_oid_info->p_bytes_used = buf_len; + } + } + else + { + VNIC_TRACE( VNIC_DBG_OID, + ("Returning NDIS_NOT_ACCEPTED") ); + oid_status = NDIS_STATUS_NOT_ACCEPTED; + } + } + else + { + *p_oid_info->p_bytes_used = 0; + } + + p_adapter->pending_query = FALSE; + + NdisMQueryInformationComplete( p_adapter->h_handle, oid_status ); + + VNIC_EXIT( VNIC_DBG_OID ); +} + + +static NDIS_STATUS +__vnic_get_tcp_task_offload( + IN vnic_adapter_t* p_adapter, + IN pending_oid_t* const p_oid_info ) +{ + NDIS_TASK_OFFLOAD_HEADER *p_offload_hdr; + NDIS_TASK_OFFLOAD *p_offload_task; + NDIS_TASK_TCP_IP_CHECKSUM *p_offload_chksum; + //uint8_t port_num; + ULONG buf_len; + + buf_len = sizeof(NDIS_TASK_OFFLOAD_HEADER) + + sizeof(NDIS_TASK_OFFLOAD) + + sizeof(NDIS_TASK_TCP_IP_CHECKSUM) - 1; + + *(p_oid_info->p_bytes_needed) = buf_len; + + if( p_oid_info->buf_len < buf_len ) + return NDIS_STATUS_INVALID_LENGTH; + + p_offload_hdr = (NDIS_TASK_OFFLOAD_HEADER*)p_oid_info->p_buf; + if( p_offload_hdr->Version != NDIS_TASK_OFFLOAD_VERSION ) + return NDIS_STATUS_INVALID_DATA; + + if( p_offload_hdr->EncapsulationFormat.Encapsulation != + IEEE_802_3_Encapsulation ) + { + return NDIS_STATUS_INVALID_DATA; + } + + p_offload_hdr->OffsetFirstTask = sizeof(NDIS_TASK_OFFLOAD_HEADER); + p_offload_task = (NDIS_TASK_OFFLOAD*)(p_offload_hdr + 1); + p_offload_task->Version = NDIS_TASK_OFFLOAD_VERSION; + p_offload_task->Size = sizeof(NDIS_TASK_OFFLOAD); + p_offload_task->Task = TcpIpChecksumNdisTask; + p_offload_task->OffsetNextTask = 0; + p_offload_task->TaskBufferLength = sizeof(NDIS_TASK_TCP_IP_CHECKSUM); + p_offload_chksum = + (NDIS_TASK_TCP_IP_CHECKSUM*)p_offload_task->TaskBuffer; + + p_offload_chksum->V4Transmit.IpOptionsSupported = + p_adapter->params.UseTxCsum; + p_offload_chksum->V4Transmit.TcpOptionsSupported = + p_adapter->params.UseTxCsum; + p_offload_chksum->V4Transmit.TcpChecksum = + p_adapter->params.UseTxCsum; + p_offload_chksum->V4Transmit.UdpChecksum = + p_adapter->params.UseTxCsum; + p_offload_chksum->V4Transmit.IpChecksum = + p_adapter->params.UseTxCsum; + + p_offload_chksum->V4Receive.IpOptionsSupported = TRUE; + p_offload_chksum->V4Receive.TcpOptionsSupported = TRUE; + p_offload_chksum->V4Receive.TcpChecksum = + p_adapter->params.UseRxCsum; + p_offload_chksum->V4Receive.UdpChecksum = + p_adapter->params.UseRxCsum; + p_offload_chksum->V4Receive.IpChecksum = + p_adapter->params.UseRxCsum; + + p_offload_chksum->V6Transmit.IpOptionsSupported = FALSE; + p_offload_chksum->V6Transmit.TcpOptionsSupported = FALSE; + p_offload_chksum->V6Transmit.TcpChecksum = FALSE; + p_offload_chksum->V6Transmit.UdpChecksum = FALSE; + + p_offload_chksum->V6Receive.IpOptionsSupported = FALSE; + p_offload_chksum->V6Receive.TcpOptionsSupported = FALSE; + p_offload_chksum->V6Receive.TcpChecksum = FALSE; + p_offload_chksum->V6Receive.UdpChecksum = FALSE; + + *(p_oid_info->p_bytes_used) = buf_len; + + return NDIS_STATUS_SUCCESS; +} + +static NDIS_STATUS +__vnic_set_tcp_task_offload( + IN vnic_adapter_t* p_adapter, + IN void* const p_info_buf, + IN ULONG* const p_info_len ) +{ + NDIS_TASK_OFFLOAD_HEADER *p_offload_hdr; + NDIS_TASK_OFFLOAD *p_offload_task; + NDIS_TASK_TCP_IP_CHECKSUM *p_offload_chksum; + + VNIC_ENTER( VNIC_DBG_OID ); + + p_offload_hdr = (NDIS_TASK_OFFLOAD_HEADER*)p_info_buf; + + if( *p_info_len < sizeof(NDIS_TASK_OFFLOAD_HEADER) ) + return NDIS_STATUS_INVALID_LENGTH; + + if( p_offload_hdr->Version != NDIS_TASK_OFFLOAD_VERSION ) + return NDIS_STATUS_INVALID_DATA; + + if( p_offload_hdr->Size != sizeof(NDIS_TASK_OFFLOAD_HEADER) ) + return NDIS_STATUS_INVALID_LENGTH; + + if( !p_offload_hdr->OffsetFirstTask ) + return NDIS_STATUS_SUCCESS; + + if( p_offload_hdr->EncapsulationFormat.Encapsulation != + IEEE_802_3_Encapsulation ) + { + return NDIS_STATUS_INVALID_DATA; + } + + p_offload_task = (NDIS_TASK_OFFLOAD*) + (((UCHAR*)p_offload_hdr) + p_offload_hdr->OffsetFirstTask); + + if( *p_info_len < sizeof(NDIS_TASK_OFFLOAD_HEADER) + + offsetof( NDIS_TASK_OFFLOAD, TaskBuffer ) + + sizeof(NDIS_TASK_TCP_IP_CHECKSUM) ) + { + return NDIS_STATUS_INVALID_LENGTH; + } + + if( p_offload_task->Version != NDIS_TASK_OFFLOAD_VERSION ) + return NDIS_STATUS_INVALID_DATA; + p_offload_chksum = + (NDIS_TASK_TCP_IP_CHECKSUM*)p_offload_task->TaskBuffer; + + if( !p_adapter->params.UseTxCsum && + (p_offload_chksum->V4Transmit.IpOptionsSupported || + p_offload_chksum->V4Transmit.TcpOptionsSupported || + p_offload_chksum->V4Transmit.TcpChecksum || + p_offload_chksum->V4Transmit.UdpChecksum || + p_offload_chksum->V4Transmit.IpChecksum) ) + { + return NDIS_STATUS_NOT_SUPPORTED; + } + + if( !p_adapter->params.UseRxCsum && + (p_offload_chksum->V4Receive.IpOptionsSupported || + p_offload_chksum->V4Receive.TcpOptionsSupported || + p_offload_chksum->V4Receive.TcpChecksum || + p_offload_chksum->V4Receive.UdpChecksum || + p_offload_chksum->V4Receive.IpChecksum) ) + { + return NDIS_STATUS_NOT_SUPPORTED; + } + if( p_offload_chksum->V6Receive.IpOptionsSupported || + p_offload_chksum->V6Receive.TcpOptionsSupported || + p_offload_chksum->V6Receive.TcpChecksum || + p_offload_chksum->V6Receive.UdpChecksum || + p_offload_chksum->V6Transmit.IpOptionsSupported || + p_offload_chksum->V6Transmit.TcpOptionsSupported || + p_offload_chksum->V6Transmit.TcpChecksum || + p_offload_chksum->V6Transmit.UdpChecksum ) + { + return NDIS_STATUS_NOT_SUPPORTED; + } + VNIC_EXIT( VNIC_DBG_OID ); + + return NDIS_STATUS_SUCCESS; +} + + +//! Issues a hardware reset to the NIC and/or resets the driver's software state. +/* Tear down the connection and start over again. This is only called when there is a problem. +For example, if a send, query info, or set info had a time out. MiniportCheckForHang will +be called first. +IRQL = DISPATCH_LEVEL + +@param p_addr_resetPointer to BOOLLEAN that is set to TRUE if the NDIS +library should call MiniportSetInformation to restore addressing information to the current values. +@param adapter_context The adapter context allocated at start +@return NDIS_STATUS_SUCCESS, NDIS_STATUS_PENDING, NDIS_STATUS_NOT_RESETTABLE, +NDIS_STATUS_RESET_IN_PROGRESS, NDIS_STATUS_SOFT_ERRORS, NDIS_STATUS_HARD_ERRORS +*/ +NDIS_STATUS +vnic_reset( + OUT PBOOLEAN p_addr_reset, + IN NDIS_HANDLE adapter_context) +{ + vnic_adapter_t* p_adapter; + ib_api_status_t status; + + VNIC_ENTER( VNIC_DBG_INIT ); + + CL_ASSERT( p_addr_reset ); + CL_ASSERT( adapter_context ); + + p_adapter = (vnic_adapter_t*)adapter_context; + + status = vnic_reset_adapter( p_adapter ); + VNIC_EXIT( VNIC_DBG_INIT ); + switch( status ) + { + case IB_SUCCESS: + *p_addr_reset = TRUE; + return NDIS_STATUS_SUCCESS; + + case IB_NOT_DONE: + return NDIS_STATUS_PENDING; + + case IB_INVALID_STATE: + return NDIS_STATUS_RESET_IN_PROGRESS; + + default: + return NDIS_STATUS_HARD_ERRORS; + } +} + +//! Request changes in the state information that the miniport driver maintains +/* For example, this is used to set multicast addresses and the packet filter. +IRQL = DISPATCH_LEVEL + +@param adapter_context The adapter context allocated at start +@param oid Object ID representing the set operation to be carried out +@param info_buf Buffer containing input for this set and location for any output +@param info_buf_len Number of bytes available in info_buf +@param p_bytes_read Pointer to number of bytes read from info_buf +@param p_bytes_needed Pointer to number of bytes needed to satisfy this oid +@return NDIS_STATUS_SUCCESS, NDIS_STATUS_PENDING, NDIS_STATUS_INVALID_OID, +NDIS_STATUS_INVALID_LENGTH, NDIS_STATUS_INVALID_DATA, NDIS_STATUS_NOT_ACCEPTED, +NDIS_STATUS_NOT_SUPPORTED, NDIS_STATUS_RESOURCES +*/ +NDIS_STATUS +vnic_oid_set_info( + IN NDIS_HANDLE adapter_context, + IN NDIS_OID oid, + IN PVOID info_buf, + IN ULONG info_buf_len, + OUT PULONG p_bytes_read, + OUT PULONG p_bytes_needed ) +{ + vnic_adapter_t* p_adapter; + NDIS_STATUS status; + ULONG buf_len; + pending_oid_t oid_info; + + VNIC_ENTER( VNIC_DBG_OID ); + + CL_ASSERT( adapter_context ); + p_adapter = (vnic_adapter_t*)adapter_context; + + CL_ASSERT( p_bytes_read ); + CL_ASSERT( p_bytes_needed ); + CL_ASSERT( p_adapter->pending_set == 0 ); + + status = NDIS_STATUS_SUCCESS; + *p_bytes_needed = 0; + buf_len = sizeof(ULONG); + + oid_info.oid = oid; + oid_info.p_buf = info_buf; + oid_info.buf_len = info_buf_len; + oid_info.p_bytes_used = p_bytes_read; + oid_info.p_bytes_needed = p_bytes_needed; + + /* do not set anything until IB path initialized and NIC is enabled */ + if( !p_adapter->p_currentPath->carrier ) + { + *p_bytes_read = 0; + return NDIS_STATUS_NOT_ACCEPTED; + } + + switch( oid ) + { + /* Required General */ + case OID_GEN_CURRENT_PACKET_FILTER: + VNIC_TRACE( VNIC_DBG_OID, + (" IOC %d received set for OID_GEN_CURRENT_PACKET_FILTER, %#x\n", + p_adapter->p_currentPath->pViport->ioc_num, + *(uint32_t*)info_buf )); + if ( !p_adapter->p_currentPath->carrier ) + { + status = NDIS_STATUS_NOT_ACCEPTED; + break; + } + if( info_buf_len < sizeof( p_adapter->packet_filter ) ) + { + status = NDIS_STATUS_INVALID_LENGTH; + } + else if( !info_buf ) + { + status = NDIS_STATUS_INVALID_DATA; + } + else + { + p_adapter->set_oid = oid_info; + status = _vnic_process_packet_filter( p_adapter, *((uint32_t*)info_buf) ); + } + break; + + case OID_GEN_CURRENT_LOOKAHEAD: + VNIC_TRACE( VNIC_DBG_OID, + (" received set for OID_GEN_CURRENT_LOOKAHEAD\n" )); + if( info_buf_len < buf_len ) + status = NDIS_STATUS_INVALID_LENGTH; + break; + + case OID_GEN_PROTOCOL_OPTIONS: + VNIC_TRACE( VNIC_DBG_OID, + (" received set for OID_GEN_PROTOCOL_OPTIONS\n" )); + if( info_buf_len < buf_len ) + status = NDIS_STATUS_INVALID_LENGTH; + break; + +#ifdef NDIS51_MINIPORT + case OID_GEN_MACHINE_NAME: + VNIC_TRACE( VNIC_DBG_OID, + (" received set for OID_GEN_MACHINE_NAME\n" ) ); + if( info_buf_len < buf_len ) + status = NDIS_STATUS_INVALID_LENGTH; + // else + // status = __vnic_set_machine_name( info_buf, + // (USHORT)info_buf_len ); + break; +#endif + + /* Required Ethernet operational characteristics */ + case OID_802_3_MULTICAST_LIST: + if( !p_adapter->p_currentPath->carrier ) + { + status = NDIS_STATUS_NOT_ACCEPTED; + break; + } + VNIC_TRACE( VNIC_DBG_OID, + (" IOC %d received set for OID_802_3_MULTICAST_LIST\n", + p_adapter->p_currentPath->pViport->ioc_num ) ); + if( info_buf_len > MAX_MCAST * sizeof(mac_addr_t) ) + { + VNIC_TRACE( VNIC_DBG_OID, + (" OID_802_3_MULTICAST_LIST - Multicast list full.\n" ) ); + status = NDIS_STATUS_MULTICAST_FULL; + *p_bytes_needed = MAX_MCAST * sizeof(mac_addr_t); + } + else if( info_buf_len % sizeof(mac_addr_t) ) + { + VNIC_TRACE( VNIC_DBG_OID, + (" OID_802_3_MULTICAST_LIST - Invalid input buffer length.\n" ) ); + status = NDIS_STATUS_INVALID_DATA; + } + else if( info_buf == NULL && info_buf_len != 0 ) + { + VNIC_TRACE( VNIC_DBG_OID, + (" OID_802_3_MULTICAST_LIST - Invalid input buffer.\n" ) ); + status = NDIS_STATUS_INVALID_DATA; + } + else + { + p_adapter->set_oid = oid_info; + status = vnic_set_mcast( p_adapter, (mac_addr_t*)info_buf, + (uint8_t)(info_buf_len / sizeof(mac_addr_t)) ); + } + break; + + case OID_TCP_TASK_OFFLOAD: + VNIC_TRACE( VNIC_DBG_OID, + (" received set for OID_TCP_TASK_OFFLOAD\n" ) ); + buf_len = info_buf_len; + status = __vnic_set_tcp_task_offload( p_adapter, info_buf, &buf_len ); + break; + + /* Optional General */ + case OID_GEN_TRANSPORT_HEADER_OFFSET: + VNIC_TRACE( VNIC_DBG_OID, + ("Set for OID_GEN_TRANSPORT_HEADER_OFFSET\n") ); + break; +#ifdef NDIS51_MINIPORT + case OID_GEN_RNDIS_CONFIG_PARAMETER: + case OID_GEN_VLAN_ID: +#endif + status = NDIS_STATUS_NOT_SUPPORTED; + VNIC_TRACE( VNIC_DBG_OID, + (" received an unsupported oid of 0x%.8X!\n" , oid)); + break; + + case OID_GEN_SUPPORTED_LIST: + case OID_GEN_HARDWARE_STATUS: + case OID_GEN_MEDIA_SUPPORTED: + case OID_GEN_MEDIA_IN_USE: + case OID_GEN_MAXIMUM_FRAME_SIZE: + case OID_GEN_LINK_SPEED: + case OID_GEN_TRANSMIT_BUFFER_SPACE: + case OID_GEN_RECEIVE_BUFFER_SPACE: + case OID_GEN_MAXIMUM_LOOKAHEAD: + case OID_GEN_TRANSMIT_BLOCK_SIZE: + case OID_GEN_RECEIVE_BLOCK_SIZE: + case OID_GEN_MAXIMUM_TOTAL_SIZE: + case OID_GEN_VENDOR_ID: + case OID_GEN_VENDOR_DESCRIPTION: + case OID_GEN_VENDOR_DRIVER_VERSION: + case OID_GEN_DRIVER_VERSION: + case OID_GEN_MAC_OPTIONS: + case OID_GEN_MEDIA_CONNECT_STATUS: + case OID_GEN_MAXIMUM_SEND_PACKETS: + case OID_GEN_SUPPORTED_GUIDS: + case OID_GEN_PHYSICAL_MEDIUM: + default: + status = NDIS_STATUS_INVALID_OID; + VNIC_TRACE( VNIC_DBG_OID, + (" received an invalid oid of 0x%.8X!\n" , oid)); + break; + } + + if( status == NDIS_STATUS_SUCCESS ) + { + *p_bytes_read = buf_len; + } + else + { + if( status == NDIS_STATUS_INVALID_LENGTH ) + { + if ( !*p_bytes_needed ) + { + *p_bytes_needed = buf_len; + } + } + + *p_bytes_read = 0; + } + + VNIC_EXIT( VNIC_DBG_OID ); + return status; +} + + +//! Transfers some number of packets, specified as an array of packet pointers, over the network. +/* For a deserialized driver, these packets are completed asynchronously +using NdisMSendComplete. +IRQL <= DISPATCH_LEVEL + +@param adapter_context Pointer to vnic_adapter_t structure with per NIC state +@param packet_array Array of packets to send +@param numPackets Number of packets in the array +*/ +void +vnic_send_packets( + IN NDIS_HANDLE adapter_context, + IN PPNDIS_PACKET packet_array, + IN UINT num_packets ) +{ + vnic_adapter_t* const p_adapter =(vnic_adapter_t* const )adapter_context; + UINT packet_num; + + VNIC_ENTER( VNIC_DBG_SEND ); + + CL_ASSERT( adapter_context ); + + for( packet_num = 0; packet_num < num_packets; ++packet_num ) + { + netpath_xmitPacket( p_adapter->p_currentPath, + packet_array[packet_num] ); + } + VNIC_EXIT( VNIC_DBG_SEND ); +} + +void +vnic_pnp_notify( + IN NDIS_HANDLE adapter_context, + IN NDIS_DEVICE_PNP_EVENT pnp_event, + IN PVOID info_buf, + IN ULONG info_buf_len ) +{ + vnic_adapter_t *p_adapter; + + VNIC_ENTER( VNIC_DBG_PNP ); + + UNUSED_PARAM( info_buf ); + UNUSED_PARAM( info_buf_len ); + + p_adapter = (vnic_adapter_t*)adapter_context; + + VNIC_TRACE( VNIC_DBG_PNP, ("Event %d IOC[%d]\n", pnp_event, p_adapter->ioc_num ) ); + if( pnp_event != NdisDevicePnPEventPowerProfileChanged ) + { + InterlockedExchange( (volatile LONG *)&p_adapter->pnp_state, IB_PNP_IOC_REMOVE ); + vnic_resume_oids( p_adapter ); + } + + VNIC_EXIT( VNIC_DBG_PNP ); +} + + +void +vnic_shutdown( + IN PVOID adapter_context ) +{ + vnic_adapter_t *p_adapter; + VNIC_ENTER( VNIC_DBG_INIT ); + p_adapter = (vnic_adapter_t *)adapter_context; + + if( p_adapter ) + { + VNIC_TRACE( VNIC_DBG_INIT, + ("IOC[%d]Shutdown -early retierement\n", p_adapter->ioc_num )); + + if( p_adapter->p_currentPath && + p_adapter->p_currentPath->pViport ) + { + viport_stopXmit( p_adapter->p_currentPath->pViport ); + viport_linkDown( p_adapter->p_currentPath->pViport ); + InterlockedExchange( &p_adapter->p_currentPath->carrier, (LONG)FALSE ); + } + vnic_destroy_adapter( p_adapter ); + } + + VNIC_EXIT( VNIC_DBG_INIT ); +} + + +void +vnic_resume_set_oids( + IN vnic_adapter_t* const p_adapter ) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + int pending_set; + pending_oid_t set_oid = {0}; + + VNIC_ENTER( VNIC_DBG_OID ); + + NdisAcquireSpinLock( &p_adapter->lock ); + /* + * Set the status depending on our state. Fail OID requests that + * are pending while we reset the adapter. + */ + switch( p_adapter->pnp_state ) + { + case IB_PNP_IOC_ADD: + break; + + case IB_PNP_IOC_REMOVE: + default: + status = NDIS_STATUS_NOT_ACCEPTED; + break; + } + + pending_set = p_adapter->pending_set; + if( pending_set ) + { + set_oid = p_adapter->set_oid; + --p_adapter->pending_set; + } + NdisReleaseSpinLock( &p_adapter->lock ); + + ASSERT( KeGetCurrentIrql() == DISPATCH_LEVEL ); + + if( pending_set ) + { + switch( set_oid.oid ) + { + case OID_GEN_CURRENT_PACKET_FILTER: + VNIC_TRACE( VNIC_DBG_OID, + (" IOC %d resume PACKET_FILTER set \n", + p_adapter->p_currentPath->pViport->ioc_num ) ); + /* Validation already performed in the SetInformation path. */ + p_adapter->packet_filter = *(PULONG)set_oid.p_buf; + NdisMSetInformationComplete( p_adapter->h_handle, status ); + break; + + case OID_GEN_MACHINE_NAME: + status = __vnic_set_machine_name ( p_adapter->set_oid.p_buf, (USHORT)p_adapter->set_oid.buf_len ); + NdisMSetInformationComplete( p_adapter->h_handle, status ); + break; + + case OID_802_3_MULTICAST_LIST: + VNIC_TRACE( VNIC_DBG_OID, + (" IOC %d resume MULTICAST_LIST\n", + p_adapter->p_currentPath->pViport->ioc_num ) ); + + NdisMSetInformationComplete( p_adapter->h_handle, status ); + break; + + default: + CL_ASSERT( set_oid.oid && 0 ); + break; + } + } + + VNIC_EXIT( VNIC_DBG_OID ); +} + + +void +vnic_resume_oids( + IN vnic_adapter_t* const p_adapter ) +{ + ULONG info; + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + BOOLEAN pending_query; + pending_oid_t query_oid = {0}; + KIRQL irql; + + uint8_t mac[HW_ADDR_LEN]; + + VNIC_ENTER( VNIC_DBG_OID ); + + NdisAcquireSpinLock( &p_adapter->lock ); + /* + * Set the status depending on our state. Fail OID requests that + * are pending while we reset the adapter. + */ + if( !p_adapter->p_viport || + p_adapter->p_viport->disconnect == TRUE || + p_adapter->p_viport->errored == TRUE ) + { + status = NDIS_STATUS_NOT_ACCEPTED; + } + + switch( p_adapter->pnp_state ) + { + case IB_PNP_IOC_ADD: + break; + + case IB_PNP_IOC_REMOVE: + default: + status = NDIS_STATUS_NOT_ACCEPTED; + break; + } + + pending_query = p_adapter->pending_query; + + if( pending_query ) + { + query_oid = p_adapter->query_oid; + p_adapter->pending_query = FALSE; + } + NdisReleaseSpinLock( &p_adapter->lock ); + + KeRaiseIrql( DISPATCH_LEVEL, &irql ); + + /* + * If we had a pending OID request for OID_GEN_LINK_SPEED, + * complete it now. Note that we hold the object lock since + * NdisMQueryInformationComplete is called at DISPATCH_LEVEL. + */ + if( pending_query ) + { + switch( query_oid.oid ) + { + case OID_802_3_CURRENT_ADDRESS: + case OID_802_3_PERMANENT_ADDRESS: + if ( status == NDIS_STATUS_SUCCESS ) + { + cl_memcpy( mac, p_adapter->p_viport->hwMacAddress, HW_ADDR_LEN ); + } + _vnic_complete_query( p_adapter, + &query_oid, + status, + mac, + HW_ADDR_LEN ); + break; + case OID_GEN_LINK_SPEED: + info = DEFAULT_LINK_SPEED_x100BPS; + _vnic_complete_query( p_adapter, + &query_oid, + status, + &info, + sizeof( info ) ); + break; + + case OID_GEN_MEDIA_CONNECT_STATUS: + info = ( p_adapter->carrier )? NdisMediaStateConnected : + NdisMediaStateDisconnected; + _vnic_complete_query( p_adapter, + &query_oid, + status, + &info, + sizeof( info ) ); + break; + case OID_802_3_MULTICAST_LIST: + ASSERT( p_adapter->p_viport ); + _vnic_complete_query( p_adapter, + &query_oid, + status, + &p_adapter->p_viport->macAddresses[MCAST_ADDR_START], + (p_adapter->p_viport->numMacAddresses - + MCAST_ADDR_START) * sizeof( mac_addr_t ) ); + break; + case OID_GEN_TRANSMIT_BUFFER_SPACE: + info = p_adapter->p_viport->data.xmitPool.bufferSz * + p_adapter->p_viport->data.xmitPool.numXmitBufs; + _vnic_complete_query( p_adapter, + &query_oid, + status, + &info, + sizeof( info ) ); + break; + case OID_GEN_RECEIVE_BUFFER_SPACE: + info = p_adapter->p_viport->data.recvPool.bufferSz * + p_adapter->p_viport->data.recvPool.poolSz; + _vnic_complete_query( p_adapter, + &query_oid, + status, + &info, + sizeof( info ) ); + break; + default: + CL_ASSERT( query_oid.oid == OID_GEN_LINK_SPEED || + query_oid.oid == OID_GEN_MEDIA_CONNECT_STATUS || + query_oid.oid == OID_802_3_MULTICAST_LIST || + query_oid.oid == OID_802_3_CURRENT_ADDRESS || + query_oid.oid == OID_802_3_PERMANENT_ADDRESS || + query_oid.oid == OID_GEN_RECEIVE_BUFFER_SPACE || + query_oid.oid == OID_GEN_TRANSMIT_BUFFER_SPACE ); + break; + } + } + + vnic_resume_set_oids( p_adapter ); + + KeLowerIrql( irql ); + + VNIC_EXIT( VNIC_DBG_OID ); +} + +static NDIS_STATUS +__vnic_set_machine_name( + IN VOID *p_uni_array, + IN USHORT buf_size ) +{ + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + uint8_t *p_src_buf = (uint8_t *)p_uni_array; + int i; + + VNIC_ENTER( VNIC_DBG_OID ); + + cl_memclr(g_vnic.host_name, 65 ); + for( i = 0; i < ( buf_size >1 ); i++ ) + { + g_vnic.host_name[i] = *(p_src_buf + i*2); + } + + return status; +} + +static void +__vnic_read_machine_name( void ) +{ + /* this code is borrowed from the bus_driver */ + + NTSTATUS nt_status; + /* Remember the terminating entry in the table below. */ + RTL_QUERY_REGISTRY_TABLE table[2]; + UNICODE_STRING hostNamePath; + UNICODE_STRING hostNameW; + ANSI_STRING hostName; + + VNIC_ENTER( VNIC_DBG_INIT ); + + /* Get the host name. */ + RtlInitUnicodeString( &hostNamePath, L"ComputerName\\ComputerName" ); + RtlInitUnicodeString( &hostNameW, NULL ); + + /* + * Clear the table. This clears all the query callback pointers, + * and sets up the terminating table entry. + */ + cl_memclr( table, sizeof(table) ); + + /* Setup the table entries. */ + table[0].Flags = RTL_QUERY_REGISTRY_DIRECT | RTL_QUERY_REGISTRY_REQUIRED; + table[0].Name = L"ComputerName"; + table[0].EntryContext = &hostNameW; + table[0].DefaultType = REG_SZ; + table[0].DefaultData = &hostNameW; + table[0].DefaultLength = 0; + + /* Have at it! */ + nt_status = RtlQueryRegistryValues( RTL_REGISTRY_CONTROL, + hostNamePath.Buffer, table, NULL, NULL ); + if( NT_SUCCESS( nt_status ) ) + { + /* Convert the UNICODE host name to UTF-8 (ASCII). */ + hostName.Length = 0; + hostName.MaximumLength = sizeof(g_vnic.host_name) - 1; + hostName.Buffer = (PCHAR)g_vnic.host_name; + nt_status = RtlUnicodeStringToAnsiString( &hostName, &hostNameW, FALSE ); + RtlFreeUnicodeString( &hostNameW ); + } + else + { + VNIC_TRACE(VNIC_DBG_ERROR , ("Failed to get host name from registry\n") ); + /* Use the default name... */ + cl_memcpy( g_vnic.host_name, + DEFAULT_HOST_NAME, + min (sizeof( g_vnic.host_name), sizeof(DEFAULT_HOST_NAME) ) ); + } + + VNIC_EXIT( VNIC_DBG_INIT ); +} + +/* function: usec_timer_start + uses cl_timer* functionality (init/destroy/stop/start ) + except it takes expiration time parameter in microseconds. +*/ +cl_status_t +usec_timer_start( + IN cl_timer_t* const p_timer, + IN const uint32_t time_usec ) +{ + LARGE_INTEGER due_time; + + CL_ASSERT( p_timer ); + CL_ASSERT( p_timer->pfn_callback ); + CL_ASSERT( KeGetCurrentIrql() <= DISPATCH_LEVEL ); + + /* Due time is in 100 ns increments. Negative for relative time. */ + due_time.QuadPart = -(int64_t)(((uint64_t)time_usec) * 10); + + /* Store the timeout time in the timer object. in microseconds */ + p_timer->timeout_time = cl_get_time_stamp() + (((uint64_t)time_usec)); + + KeSetTimer( &p_timer->timer, due_time, &p_timer->dpc ); + return( CL_SUCCESS ); +} + + +static NDIS_STATUS +_vnic_process_packet_filter( + IN vnic_adapter_t* const p_adapter, + IN ULONG pkt_filter ) +{ + NDIS_STATUS status; + + VNIC_ENTER(VNIC_DBG_FUNC ); + + ASSERT( p_adapter ); + ASSERT( p_adapter->p_viport ); + ASSERT( !(p_adapter->p_viport->updates & SYNC_QUERY) ); + + InterlockedExchange((volatile LONG*)&p_adapter->packet_filter, pkt_filter ); + + ASSERT( (p_adapter->p_viport->updates & ~MCAST_OVERFLOW) == 0 ); + + if( !( p_adapter->p_viport->flags & INIC_FLAG_ENABLE_NIC ) ) + { + p_adapter->p_viport->newFlags &= ~INIC_FLAG_DISABLE_NIC; + p_adapter->p_viport->newFlags |= INIC_FLAG_ENABLE_NIC; + InterlockedOr( &p_adapter->p_viport->updates, NEED_LINK_CONFIG ); + } + + if( pkt_filter & NDIS_PACKET_TYPE_ALL_MULTICAST ) + { + if( !( p_adapter->p_viport->flags & INIC_FLAG_ENABLE_MCAST_ALL ) ) + { + p_adapter->p_viport->newFlags |= INIC_FLAG_ENABLE_MCAST_ALL; + // TODO: Shouldn't MCAST_OVERFLOW be a flag bit, not an update bit? + InterlockedOr( &p_adapter->p_viport->updates, + NEED_LINK_CONFIG | MCAST_OVERFLOW ); + } + } + + if ( pkt_filter & NDIS_PACKET_TYPE_PROMISCUOUS ) + { + if( !( p_adapter->p_viport->flags & INIC_FLAG_ENABLE_PROMISC ) ) + { + p_adapter->p_viport->newFlags |= INIC_FLAG_ENABLE_PROMISC; + InterlockedOr( &p_adapter->p_viport->updates, NEED_LINK_CONFIG ); + } + } + + /* ENABLE NIC, BROADCAST and MULTICAST flags set on start */ + + ++p_adapter->pending_set; + status = _viport_process_query( p_adapter->p_viport, FALSE ); + VNIC_TRACE( VNIC_DBG_OID, + ("LINK CONFIG status %x\n", status )); + if( status != NDIS_STATUS_PENDING ) + { + --p_adapter->pending_set; + } + VNIC_EXIT( VNIC_DBG_FUNC ); + return status; +} + diff --git a/branches/Ndi/ulp/inic/kernel/vnic_driver.h b/branches/Ndi/ulp/inic/kernel/vnic_driver.h new file mode 100644 index 00000000..132f7967 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_driver.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _VNIC_DRIVER_H_ +#define _VNIC_DRIVER_H_ + +#include "vnic_adapter.h" +#include "vnic_debug.h" + + +#if defined(NDIS50_MINIPORT) +#define MAJOR_NDIS_VERSION 5 +#define MINOR_NDIS_VERSION 0 +#elif defined (NDIS51_MINIPORT) +#define MAJOR_NDIS_VERSION 5 +#define MINOR_NDIS_VERSION 1 +#else +#error NDIS Version not defined, try defining NDIS50_MINIPORT or NDIS51_MINIPORT +#endif + +#include + +static const NDIS_OID SUPPORTED_OIDS[] = +{ + OID_GEN_SUPPORTED_LIST, + OID_GEN_HARDWARE_STATUS, + OID_GEN_MEDIA_SUPPORTED, + OID_GEN_MEDIA_IN_USE, + OID_GEN_MAXIMUM_LOOKAHEAD, + OID_GEN_MAXIMUM_FRAME_SIZE, + OID_GEN_LINK_SPEED, + OID_GEN_TRANSMIT_BUFFER_SPACE, + OID_GEN_RECEIVE_BUFFER_SPACE, + OID_GEN_TRANSMIT_BLOCK_SIZE, + OID_GEN_RECEIVE_BLOCK_SIZE, + OID_GEN_VENDOR_ID, + OID_GEN_VENDOR_DESCRIPTION, + OID_GEN_CURRENT_PACKET_FILTER, + OID_GEN_CURRENT_LOOKAHEAD, + OID_GEN_DRIVER_VERSION, + OID_GEN_MAXIMUM_TOTAL_SIZE, + OID_GEN_PROTOCOL_OPTIONS, // ? + OID_GEN_MAC_OPTIONS, + OID_GEN_MEDIA_CONNECT_STATUS, + OID_GEN_MAXIMUM_SEND_PACKETS, + OID_GEN_VENDOR_DRIVER_VERSION, + OID_GEN_PHYSICAL_MEDIUM, + OID_GEN_XMIT_OK, + OID_GEN_RCV_OK, + OID_GEN_XMIT_ERROR, + OID_GEN_RCV_ERROR, + OID_GEN_RCV_NO_BUFFER, + OID_GEN_DIRECTED_BYTES_XMIT, + OID_GEN_DIRECTED_FRAMES_XMIT, + OID_GEN_MULTICAST_BYTES_XMIT, + OID_GEN_MULTICAST_FRAMES_XMIT, + OID_GEN_BROADCAST_BYTES_XMIT, + OID_GEN_BROADCAST_FRAMES_XMIT, + OID_GEN_DIRECTED_BYTES_RCV, + OID_GEN_DIRECTED_FRAMES_RCV, + OID_GEN_MULTICAST_BYTES_RCV, + OID_GEN_MULTICAST_FRAMES_RCV, + OID_GEN_BROADCAST_BYTES_RCV, + OID_GEN_BROADCAST_FRAMES_RCV, + OID_802_3_PERMANENT_ADDRESS, + OID_802_3_CURRENT_ADDRESS, + OID_802_3_MULTICAST_LIST, + OID_802_3_MAXIMUM_LIST_SIZE, + OID_802_3_MAC_OPTIONS, + OID_802_3_RCV_ERROR_ALIGNMENT, + OID_802_3_XMIT_ONE_COLLISION, + OID_802_3_XMIT_MORE_COLLISIONS, + OID_TCP_TASK_OFFLOAD +}; + +static const unsigned char VENDOR_ID[] = {0x00, 0x06, 0x6A, 0x00}; +#define VENDOR_DESCRIPTION "Virtual Ethernet over InfiniBand" +#define DEFAULT_VNIC_NAME "VNIC" + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_drv_obj, + IN PUNICODE_STRING p_reg_path ); + +VOID +vnic_unload( + IN PDRIVER_OBJECT p_drv_obj ); + +NDIS_STATUS +vnic_initialize( + OUT PNDIS_STATUS p_open_err_status, + OUT PUINT p_selected_medium_index, + IN PNDIS_MEDIUM medium_array, + IN UINT medium_array_size, + IN NDIS_HANDLE h_handle, + IN NDIS_HANDLE wrapper_configuration_context ); + +BOOLEAN +vnic_check_for_hang( + IN NDIS_HANDLE adapter_context ); + +void +vnic_halt( + IN NDIS_HANDLE adapter_context ); + +NDIS_STATUS +vnic_oid_query_info( + IN NDIS_HANDLE adapter_context, + IN NDIS_OID oid, + IN PVOID info_buf, + IN ULONG info_buf_len, + OUT PULONG p_bytes_written, + OUT PULONG p_bytes_needed ); + +NDIS_STATUS +vnic_reset( + OUT PBOOLEAN p_addressing_reset, + IN NDIS_HANDLE adapter_context ); + +NDIS_STATUS +vnic_oid_set_info( + IN NDIS_HANDLE adapter_context, + IN NDIS_OID oid, + IN PVOID info_buf, + IN ULONG info_buf_length, + OUT PULONG p_bytes_read, + OUT PULONG p_bytes_needed ); + +void +vnic_send_packets( + IN NDIS_HANDLE adapter_context, + IN PPNDIS_PACKET packet_array, + IN UINT num_packets ); + +void +vnic_pnp_notify( + IN NDIS_HANDLE adapter_context, + IN NDIS_DEVICE_PNP_EVENT pnp_event, + IN PVOID info_buf, + IN ULONG info_buf_len ); + +void +vnic_shutdown( + IN PVOID adapter_context ); + +NDIS_STATUS +vnic_get_agapter_interface( + IN NDIS_HANDLE h_handle, + IN vnic_adapter_t *p_adapter); + + +/* same as cl_timer_start() except it takes timeout param in usec */ +/* need to run kicktimer */ +cl_status_t +usec_timer_start( + IN cl_timer_t* const p_timer, + IN const uint32_t time_usec ); + +#endif /* _VNIC_DRIVER_H_ */ \ No newline at end of file diff --git a/branches/Ndi/ulp/inic/kernel/vnic_ib.c b/branches/Ndi/ulp/inic/kernel/vnic_ib.c new file mode 100644 index 00000000..020e8218 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_ib.c @@ -0,0 +1,890 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ +#include "vnic_adapter.h" +#include "vnic_util.h" + +#ifndef max_t +#define max_t(type,x,y) \ + ({ type __x = (x); type __y = (y); __x > __y ? __x: __y; }) +#endif + + +static int inicIbInited = 0; + +extern struct _vnic_globals g_vnic; + +void +ib_asyncEvent( + ib_async_event_rec_t *pEventRecord ); + +static void +_ib_qpCompletion( + IN IbQp_t *pQp); +static void +_ibqp_connect_cb( + IN ib_cm_rep_rec_t *p_cm_rep ); + +static void +_ibqp_detach_cb( + IN ib_cm_drep_rec_t *p_drep_rec ); + +static void +_ibqp_dreq_cb( + IN ib_cm_dreq_rec_t *p_dreq_rec ); + +static void +_ibqp_mra_cb( + IN ib_cm_mra_rec_t *p_mra_rec ); + +static void +_ibqp_rej_cb( + IN ib_cm_rej_rec_t *p_rej_rec ); +static +void ib_workCompletion( + IN ib_cq_handle_t h_cq, + IN void *cqContext ); + + +uint8_t +ibca_findPortNum( + struct _viport *p_viport, + uint64_t guid ) +{ + uint8_t port; + + for (port = 0; port < p_viport->p_adapter->ca.numPorts; port++ ) + { + if (p_viport->p_adapter->ca.portGuids[port] == guid ) + { + return port+1; + } + } + return 0; +} + +ib_api_status_t +ibregion_physInit( + IN struct _vnic_adapter* p_adapter, + IN IbRegion_t *pRegion, + IN ib_pd_handle_t hPd, + IN uint64_t *p_vaddr, + IN uint64_t len ) +{ + ib_phys_create_t phys_mem; + ib_phys_range_t phys_range; + ib_api_status_t ib_status = IB_SUCCESS; + uint64_t vaddr = 0; + VNIC_ENTER ( VNIC_DBG_IB ); + + UNUSED_PARAM( p_vaddr ); + + phys_range.base_addr = *(p_vaddr); + phys_range.size = len; + phys_mem.access_ctrl = ( IB_AC_LOCAL_WRITE | IB_AC_RDMA_WRITE ); + phys_mem.buf_offset = 0; + phys_mem.hca_page_size = PAGE_SIZE; + phys_mem.length = len; + phys_mem.num_ranges = 1; + phys_mem.range_array = &phys_range; + + ib_status = p_adapter->ifc.reg_phys( hPd, + &phys_mem, + &vaddr, + &pRegion->lkey, + &pRegion->rkey, + &pRegion->h_mr ); + + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("ib_reg_phys failed status %s(%d)\n", + p_adapter->ifc.get_err_str(ib_status), ib_status ) ); + pRegion->h_mr = NULL; + } + pRegion->virtAddress = vaddr; + pRegion->len = len; + + VNIC_EXIT( VNIC_DBG_IB ); + return ib_status; +} + +ib_api_status_t +ibregion_init ( + IN viport_t *p_viport, + IN OUT IbRegion_t *pRegion, + IN ib_pd_handle_t hPd, + IN void* __ptr64 vaddr, + IN uint64_t len, + IN ib_access_t access_ctrl ) +{ + ib_api_status_t ib_status; + ib_mr_create_t create_mem; + + VNIC_ENTER ( VNIC_DBG_IB ); + + create_mem.length = len; + create_mem.vaddr = vaddr; + create_mem.access_ctrl = access_ctrl; + + ib_status = p_viport->p_adapter->ifc.reg_mem( hPd, + &create_mem, + &pRegion->lkey, + &pRegion->rkey, + &pRegion->h_mr ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("ib_reg_mem failed status %s(%d)\n", + p_viport->p_adapter->ifc.get_err_str( ib_status ), ib_status )); + pRegion->h_mr = NULL; + } + else + { + pRegion->len = len; + pRegion->virtAddress = (uint64_t)( vaddr ); + } + VNIC_EXIT ( VNIC_DBG_IB ); + return ib_status; +} + +void +ibregion_cleanup( + IN viport_t *p_viport, + IN IbRegion_t *pRegion ) +{ + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_IB ); + + if( pRegion->h_mr != NULL ) + { + ib_status = p_viport->p_adapter->ifc.dereg_mr( pRegion->h_mr ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Dereg MR failed status (%d)\n", ib_status )); + return; + } + InterlockedExchangePointer( (void *)&pRegion->h_mr, NULL ); + } + + VNIC_EXIT( VNIC_DBG_IB ); +} + + +void +ibqp_construct( + IN OUT IbQp_t *pQp, + IN viport_t *pViport ) +{ + VNIC_ENTER ( VNIC_DBG_IB ); + + ASSERT( pQp->qpState == IB_UNINITTED ); + + pQp->qp = NULL; + pQp->cq = NULL; + pQp->pViport = pViport; + pQp->pCa = &pViport->p_adapter->ca; + + NdisAllocateSpinLock( &pQp->qpLock ); + + InitializeListHead( &pQp->listPtrs); +} + + +ib_api_status_t +ibqp_init( + IN OUT IbQp_t *pQp, + IN uint64_t guid, + IN IbConfig_t *p_conf ) +{ + ib_qp_create_t attribCreate; + ib_qp_mod_t attribMod; + ib_qp_attr_t qpAttribs; + + ib_cq_create_t cq_create; + ib_cq_handle_t h_cq; + + ib_api_status_t ib_status = IB_SUCCESS; + + VNIC_ENTER ( VNIC_DBG_IB ); + + if (pQp->qpState != IB_UNINITTED && pQp->qpState != IB_DETACHED ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("ibqp_init: out of state (%d)\n", pQp->qpState) ); + return IB_ERROR; + } + + InterlockedExchange( &pQp->qpState, IB_UNINITTED ); + + pQp->p_conf = p_conf; + + cq_create.size = p_conf->numSends + p_conf->numRecvs; + cq_create.pfn_comp_cb = ib_workCompletion; + cq_create.h_wait_obj = NULL; + + ib_status = pQp->pViport->p_adapter->ifc.create_cq( + pQp->pViport->p_adapter->h_ca, &cq_create, pQp, NULL, &h_cq ); + + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Failed allocating completion queue\n") ); + goto err1; + } + + pQp->cq = h_cq; + + cl_memclr( &attribCreate, sizeof(attribCreate) ); + + attribCreate.qp_type = IB_QPT_RELIABLE_CONN; + attribCreate.sq_depth = p_conf->numSends; + attribCreate.rq_depth = p_conf->numRecvs; + attribCreate.sq_sge = p_conf->sendGather; + attribCreate.rq_sge = p_conf->recvScatter; + attribCreate.h_sq_cq = pQp->cq; + attribCreate.h_rq_cq = pQp->cq; + attribCreate.sq_signaled = FALSE; + + ib_status = pQp->pViport->p_adapter->ifc.create_qp( + pQp->pViport->p_adapter->ca.hPd, &attribCreate, pQp, NULL, &pQp->qp ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_IB, ("Create QP failed %s\n", + pQp->pViport->p_adapter->ifc.get_err_str( ib_status )) ); + goto err2; + } + ib_status = pQp->pViport->p_adapter->ifc.query_qp(pQp->qp, &qpAttribs); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, ("Query QP failed %s\n", + pQp->pViport->p_adapter->ifc.get_err_str( ib_status )) ); + goto err3; + } + pQp->qpNumber = qpAttribs.num; + pQp->portGuid = guid; + cl_memclr( &attribMod, sizeof(attribMod) ); + + attribMod.req_state = IB_QPS_INIT; + attribMod.state.init.primary_port = ibca_findPortNum( pQp->pViport, guid ); + attribMod.state.init.pkey_index = 0; + attribMod.state.init.access_ctrl = IB_AC_LOCAL_WRITE; + attribMod.state.init.access_ctrl |= (p_conf->sendGather > 1) ? IB_AC_RDMA_WRITE : 0; + + ib_status = pQp->pViport->p_adapter->ifc.modify_qp( pQp->qp, &attribMod ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_IB, ("Init QP failed %s\n", + pQp->pViport->p_adapter->ifc.get_err_str( ib_status )) ); + +err3: + pQp->pViport->p_adapter->ifc.destroy_qp( pQp->qp, NULL ); +err2: + pQp->pViport->p_adapter->ifc.destroy_cq( pQp->cq, NULL ); + } + else + { + InterlockedExchange( &pQp->qpState, IB_INITTED ); + } + +err1: + VNIC_EXIT ( VNIC_DBG_IB ); + return ib_status; +} + + +ib_api_status_t +ibqp_connect( + IN IbQp_t *pQp) +{ + IbCa_t *pCa; + viport_t *p_viport; + IbConfig_t *p_conf; + ib_api_status_t ib_status = IB_SUCCESS; + ib_cm_req_t conn_req; + + VNIC_ENTER( VNIC_DBG_IB ); + + if ( pQp->qpState != IB_INITTED ) + { + VNIC_TRACE_EXIT( VNIC_DBG_IB, + ("ibqp_connect: out of state (%d)\n",pQp->qpState) ); + return IB_INVALID_STATE; + } + + p_viport = pQp->pViport; + pCa = pQp->pCa; + p_conf = pQp->p_conf; + +#ifdef VNIC_STATISTIC + pQp->statistics.connectionTime = cl_get_time_stamp(); +#endif + + cl_memclr( &conn_req, sizeof(conn_req)); + + conn_req.h_al = p_viport->p_adapter->h_al; + conn_req.qp_type = IB_QPT_RELIABLE_CONN; + conn_req.h_qp = pQp->qp; + conn_req.p_req_pdata = (uint8_t*)&pQp->p_conf->connData; + conn_req.req_length = sizeof( Inic_ConnectionData_t ); + conn_req.svc_id = pQp->p_conf->sid; + conn_req.pkey = pQp->p_conf->pathInfo.pkey; + + conn_req.p_primary_path = &pQp->p_conf->pathInfo; + + conn_req.retry_cnt = (uint8_t)pQp->p_conf->retryCount; + conn_req.rnr_nak_timeout = pQp->p_conf->minRnrTimer; + conn_req.rnr_retry_cnt = (uint8_t)pQp->p_conf->rnrRetryCount; + conn_req.max_cm_retries = 5; + conn_req.remote_resp_timeout = ib_path_rec_pkt_life( &pQp->p_conf->pathInfo ) + 1; + conn_req.local_resp_timeout = ib_path_rec_pkt_life( &pQp->p_conf->pathInfo ) + 1; + + conn_req.compare_length = 0; + conn_req.resp_res = 0; + + conn_req.flow_ctrl = TRUE; + + conn_req.pfn_cm_req_cb = NULL; + conn_req.pfn_cm_rep_cb = _ibqp_connect_cb; + conn_req.pfn_cm_mra_cb = _ibqp_mra_cb; + conn_req.pfn_cm_rej_cb = _ibqp_rej_cb; + + InterlockedExchange( &pQp->qpState, IB_ATTACHING ); + + ib_status = p_viport->p_adapter->ifc.cm_req ( &conn_req ); + if ( ib_status != IB_SUCCESS && ib_status != IB_PENDING ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Connect request return status %s\n", + p_viport->p_adapter->ifc.get_err_str( ib_status )) ); + InterlockedExchange( &pQp->qpState, IB_DETACHED ); + return ib_status; + } + + if( cl_event_wait_on( + &p_viport->conn_event, EVENT_NO_TIMEOUT, FALSE ) != CL_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, ("conn event timeout!\n") ); + return IB_TIMEOUT; + } + + if( pQp->qpState != IB_ATTACHED ) + { + VNIC_TRACE( VNIC_DBG_ERROR, ("QP connect failed\n") ); + ib_status = IB_ERROR; + } + + VNIC_EXIT( VNIC_DBG_IB ); + return ib_status; +} + + +static void +_ibqp_detach_cb( + IN ib_cm_drep_rec_t *p_drep_rec ) +{ + IbQp_t *pQp = (IbQp_t * __ptr64 )p_drep_rec->qp_context; + VNIC_ENTER( VNIC_DBG_IB ); + CL_ASSERT( p_drep_rec ); + + InterlockedExchange( &pQp->qpState, IB_DETACHED ); + viport_failure( pQp->pViport ); + + VNIC_EXIT( VNIC_DBG_IB ); +} + +static void +_ibqp_rej_cb( + IN ib_cm_rej_rec_t *p_rej_rec ) +{ + IbQp_t *pQp = (IbQp_t * __ptr64 )p_rej_rec->qp_context; + CL_ASSERT(p_rej_rec ); + + InterlockedExchange( &pQp->qpState, IB_DETACHED ); + switch ( p_rej_rec->rej_status ) + { + case IB_REJ_USER_DEFINED: + + VNIC_TRACE ( VNIC_DBG_IB | VNIC_DBG_ERROR, + ("Conn req user reject status %d\nARI: %s\n", + cl_ntoh16( p_rej_rec->rej_status ), + p_rej_rec->p_ari )); + break; + default: + VNIC_TRACE( VNIC_DBG_IB | VNIC_DBG_ERROR, + ("Conn req reject status %d\n", + cl_ntoh16( p_rej_rec->rej_status )) ); + } + viport_failure( pQp->pViport ); + cl_event_signal( &pQp->pViport->conn_event ); +} + +static void +_ibqp_mra_cb( + IN ib_cm_mra_rec_t *p_mra_rec ) +{ + VNIC_ENTER( VNIC_DBG_IB ); + CL_ASSERT( p_mra_rec ); + UNREFERENCED_PARAMETER( p_mra_rec ); + VNIC_EXIT( VNIC_DBG_IB ); +} + +static void +_ibqp_dreq_cb( + IN ib_cm_dreq_rec_t *p_dreq_rec ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + ib_cm_drep_t cm_drep; + IbQp_t *pQp = (IbQp_t * __ptr64 )p_dreq_rec->qp_context; + + VNIC_ENTER( VNIC_DBG_IB ); + CL_ASSERT( p_dreq_rec ); + + cm_drep.drep_length = 0; + cm_drep.p_drep_pdata = NULL; + + ib_status = pQp->pViport->p_adapter->ifc.cm_drep( + p_dreq_rec->h_cm_dreq, &cm_drep ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("dreq_cb failed status %s(%d)\n", + pQp->pViport->p_adapter->ifc.get_err_str( ib_status ), ib_status)); + return; + } + + InterlockedExchange (&pQp->qpState, IB_DETACHED ); + + VNIC_EXIT( VNIC_DBG_IB ); +} + +void +ibqp_detach( + IN IbQp_t *pQp ) +{ + ib_cm_dreq_t cm_dreq; + ib_api_status_t ib_status = IB_SUCCESS; + + VNIC_ENTER( VNIC_DBG_IB ); + + if( InterlockedCompareExchange( &pQp->qpState, + IB_DETACHING, IB_ATTACHED ) == IB_ATTACHED ) + { + NdisAcquireSpinLock( &pQp->qpLock ); + + cm_dreq.h_qp = pQp->qp; + cm_dreq.qp_type = IB_QPT_RELIABLE_CONN; + cm_dreq.p_dreq_pdata = NULL; + cm_dreq.dreq_length = 0; + cm_dreq.pfn_cm_drep_cb = _ibqp_detach_cb; + ib_status = pQp->pViport->p_adapter->ifc.cm_dreq( &cm_dreq ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + (" cm_dreq failed status %s\n", + pQp->pViport->p_adapter->ifc.get_err_str( ib_status ))); + } + + NdisReleaseSpinLock( &pQp->qpLock ); + + ib_status = pQp->pViport->p_adapter->ifc.destroy_qp( pQp->qp, NULL ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + (" QP destroy failed status %s\n", + pQp->pViport->p_adapter->ifc.get_err_str( ib_status ))); + } + } + + VNIC_EXIT( VNIC_DBG_IB ); + return; +} + +void +ibqp_cleanup( + IN IbQp_t *pQp ) +{ + ib_api_status_t ib_status = IB_SUCCESS; + LONG qp_state; + + VNIC_ENTER( VNIC_DBG_IB ); + + qp_state = InterlockedExchange( &pQp->qpState, IB_UNINITTED ); + if ( qp_state != IB_UNINITTED && + qp_state != IB_DETACHED && + qp_state != IB_DETACHING ) + { + ib_status = pQp->pViport->p_adapter->ifc.destroy_qp( pQp->qp, NULL ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("destroy_qp failed status %s\n", + pQp->pViport->p_adapter->ifc.get_err_str( ib_status )) ); + } + } + + VNIC_EXIT( VNIC_DBG_IB ); + return; +} + +ib_api_status_t ibqp_postSend(IbQp_t *pQp, Io_t *pIo) +{ + ib_api_status_t ib_status; + +#ifdef VNIC_STATISTIC + int64_t postTime; +#endif /* VNIC_STATISTIC */ + + VNIC_ENTER( VNIC_DBG_IB ); + + NdisAcquireSpinLock( &pQp->qpLock ); + + if( pQp->qpState != IB_ATTACHED ) + { + ib_status = IB_INVALID_STATE; + goto failure; + } + +#ifdef VNIC_STATISTIC + pIo->time = postTime = cl_get_time_stamp(); +#endif /* VNIC_STATISTIC */ + if (pIo->wrq.wr_type == WR_RDMA_WRITE ) + pIo->type = RDMA; + else + pIo->type = SEND; + + ib_status = + pQp->pViport->p_adapter->ifc.post_send( pQp->qp, &pIo->wrq, NULL ); + +#ifdef VNIC_STATISTIC + postTime = cl_get_time_stamp() - postTime; +#endif /* VNIC_STATISTIC */ + + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, ("ib_post_send returned %s\n", + pQp->pViport->p_adapter->ifc.get_err_str( ib_status )) ); + } + else + { +#ifdef VNIC_STATISTIC + if (pIo->wrq.wr_type == WR_RDMA_WRITE) + { + pQp->statistics.rdmaPostTime += postTime; + pQp->statistics.rdmaPostIos++; + } + else + { + pQp->statistics.sendPostTime += postTime; + pQp->statistics.sendPostIos++; + } +#endif /* VNIC_STATISTIC */ + } + +failure: + NdisReleaseSpinLock( &pQp->qpLock ); + VNIC_EXIT( VNIC_DBG_IB ); + return ib_status; +} + + +ib_api_status_t +ibqp_postRecv(IbQp_t *pQp, Io_t *pIo) +{ + ib_api_status_t ib_status = IB_SUCCESS; +#ifdef VNIC_STATISTIC + int64_t postTime; +#endif /* VNIC_STATISTIC */ + + NdisAcquireSpinLock( &pQp->qpLock ); + /* can post recvieves before connecting queue pair */ + if( pQp->qpState != IB_INITTED && pQp->qpState != IB_ATTACHED ) + { + NdisReleaseSpinLock( &pQp->qpLock ); + return IB_INVALID_STATE; + } + pIo->type = RECV; + +#ifdef VNIC_STATISTIC + postTime = cl_get_time_stamp(); + if (pIo->time != 0) + { + pQp->statistics.recvCompTime += postTime - pIo->time; + pQp->statistics.recvCompIos++; + } +#endif /* VNIC_STATISTIC */ + + ib_status = + pQp->pViport->p_adapter->ifc.post_recv(pQp->qp, &pIo->r_wrq, NULL ); + +#ifdef VNIC_STATISTIC + postTime = (cl_get_time_stamp()) - postTime; +#endif /* VNIC_STATISTIC */ + + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Post Recv failed status %s(%d)\n", + pQp->pViport->p_adapter->ifc.get_err_str(ib_status), ib_status )); + + NdisReleaseSpinLock( &pQp->qpLock ); + return ib_status; + } + else + { +#ifdef VNIC_STATISTIC + pQp->statistics.recvPostTime += postTime; + pQp->statistics.recvPostIos++; +#endif /* VNIC_STATISTIC */ + } + + NdisReleaseSpinLock( &pQp->qpLock ); + return ib_status; +} + + +static void +_ibqp_connect_cb( + IN ib_cm_rep_rec_t *p_cm_rep ) +{ + IbQp_t *pQp; + ib_api_status_t ib_status = IB_SUCCESS; + + union + { + ib_cm_rtu_t cm_rtu; + ib_cm_mra_t cm_mra; + ib_cm_rej_t cm_rej; + } u_reply; + + viport_t *p_viport; + + VNIC_ENTER( VNIC_DBG_IB ); + + pQp = (IbQp_t * __ptr64 )p_cm_rep->qp_context; + p_viport = pQp->pViport; + + ASSERT( pQp->qpState == IB_ATTACHING ); + + ib_status = p_viport->p_adapter->ifc.rearm_cq( pQp->cq, FALSE ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Rearm CQ failed %s\n", + p_viport->p_adapter->ifc.get_err_str( ib_status )) ); + + cl_memclr( &u_reply.cm_rej, sizeof( u_reply.cm_rej ) ); + u_reply.cm_rej.rej_status = IB_REJ_INSUF_RESOURCES; + + p_viport->p_adapter->ifc.cm_rej( p_cm_rep->h_cm_rep, &u_reply.cm_rej ); + goto err; + } + + cl_memclr( &u_reply.cm_rtu, sizeof( u_reply.cm_rtu ) ); + u_reply.cm_rtu.access_ctrl = IB_AC_LOCAL_WRITE; + + if( pQp->p_conf->sendGather > 1 ) + u_reply.cm_rtu.access_ctrl |= ( IB_AC_RDMA_WRITE ); + + u_reply.cm_rtu.pfn_cm_dreq_cb = _ibqp_dreq_cb; + + ib_status = p_viport->p_adapter->ifc.cm_rtu ( p_cm_rep->h_cm_rep, &u_reply.cm_rtu ); + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Send RTU failed\n") ); +err: + InterlockedExchange( &pQp->qpState, IB_DETACHED ); + viport_failure( p_viport ); + } + else + { + InterlockedExchange ( &pQp->qpState, IB_ATTACHED ); + } + + cl_event_signal( &p_viport->conn_event ); + VNIC_EXIT( VNIC_DBG_IB ); +} + + +#define WC_LIST_SIZE_TO_POLL 32 +static void +_ib_qpCompletion( + IN IbQp_t *pQp ) +{ + Io_t *pIo; + ib_wc_t wc[WC_LIST_SIZE_TO_POLL]; + ib_wc_t *p_free_wc; + ib_wc_t *p_done_wc; + ib_api_status_t ib_status = IB_SUCCESS; + int i; + +#ifdef VNIC_STATISTIC + int64_t compTime; + uint32_t compNum = 0; +#endif /* VNIC_STATISTIC */ + + VNIC_ENTER ( VNIC_DBG_IB ); + + if ( pQp->qpState != IB_ATTACHING && pQp->qpState != IB_ATTACHED ) + return; + +#ifdef VNIC_STATISTIC + compTime = cl_get_time_stamp(); + pQp->statistics.numCallbacks++; +#endif /* VNIC_STATISTIC */ + + for( i = 0; i < (WC_LIST_SIZE_TO_POLL - 1); i++ ) + { + wc[i].p_next = &wc[i + 1]; + } + wc[(WC_LIST_SIZE_TO_POLL - 1)].p_next = NULL; + + p_free_wc = wc; + p_done_wc = NULL; + + ib_status = pQp->pViport->p_adapter->ifc.poll_cq( pQp->cq, &p_free_wc, &p_done_wc ); + + if ( ib_status != IB_SUCCESS && ib_status != IB_NOT_FOUND ) + { + VNIC_TRACE ( VNIC_DBG_ERROR, + ("ib_poll_cq failed status %d(%s)\n", ib_status, + pQp->pViport->p_adapter->ifc.get_err_str( ib_status )) ); + return; + } + + while ( p_done_wc ) + { + pIo = (Io_t *)(uintn_t)p_done_wc->wr_id; + + /* keep completion status for proper ndis packet return status */ + pIo->wc_status = p_done_wc->status; + + if( p_done_wc->status != IB_WCS_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_IB, + ("Failed Completion: WcType = %d, Status = %d, Length = %d\n", + p_done_wc->wc_type, + p_done_wc->status, + ( p_done_wc->wc_type == IB_WC_RECV )? p_done_wc->length : 0 ) ); + + if( pIo && pIo->type == RDMA ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Failed RDMA Op, WC type = %d, WC status = %d IO type %d\n", + p_done_wc->wc_type, p_done_wc->status, pIo->type )); + + /* we must complete send packets */ + (*pIo->pRoutine)( pIo ); + } + } + else if(pIo) + { + if( pIo->pRoutine ) + { + (*pIo->pRoutine)( pIo ); + } + } + + p_done_wc = p_done_wc->p_next; + } + + ib_status = pQp->pViport->p_adapter->ifc.rearm_cq( pQp->cq, FALSE ); + + if ( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Rearm CQ failed status %d(%s)\n", ib_status, + pQp->pViport->p_adapter->ifc.get_err_str( ib_status )) ); + viport_failure( pQp->pViport ); + } + return; +} + + +static +void ib_workCompletion( + IN ib_cq_handle_t h_cq, + IN void *cqContext ) +{ + IbQp_t *pQp; + + VNIC_ENTER ( VNIC_DBG_IB ); + UNREFERENCED_PARAMETER( h_cq ); + pQp = (IbQp_t *)cqContext; + _ib_qpCompletion(pQp); + + VNIC_EXIT ( VNIC_DBG_IB ); + return; +} + +void +ib_asyncEvent( + IN ib_async_event_rec_t *pEventRecord ) +{ + vnic_adapter_t *p_adapter; + + VNIC_ENTER ( VNIC_DBG_IB ); + + if ( pEventRecord ) + { + p_adapter = (vnic_adapter_t * __ptr64 )pEventRecord->context; + + switch ( pEventRecord->code ) + { + case IB_AE_QP_COMM: + case IB_AE_QP_FATAL: + VNIC_TRACE( VNIC_DBG_ERROR, + ("Async Event %d\n", pEventRecord->code )); + break; + case IB_AE_PORT_DOWN: + default: + VNIC_TRACE( VNIC_DBG_ERROR, + ("Async Event %d received\n", pEventRecord->code) ); + if( p_adapter && p_adapter->p_viport ) + viport_stopXmit( p_adapter->p_viport ); + break; + } + } + else + { + VNIC_TRACE( VNIC_DBG_ERROR, ("Unknown NULL event\n") ); + } + VNIC_EXIT ( VNIC_DBG_IB ); +} + + + + + + + diff --git a/branches/Ndi/ulp/inic/kernel/vnic_ib.h b/branches/Ndi/ulp/inic/kernel/vnic_ib.h new file mode 100644 index 00000000..2755fddd --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_ib.h @@ -0,0 +1,209 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ +#ifndef _VNIC_IB_H_ +#define _VNIC_IB_H_ + + +#include +#include +#include "vnic_trailer.h" +struct Io; + +typedef void (CompRoutine_t)(struct Io *pIo); + +#define MAX_HCAS 4 +#define MAX_NUM_SGE 8 + +#define MAX_PHYS_MEMORY 0xFFFFFFFFFFFFFFFF +#define CONTROL_SID 0 +#define DATA_SID 1 + +#include +typedef union _vnic_sid { + uint64_t as_uint64; + struct { + uint8_t base_id; /* base id for vnic is 0x10 */ + uint8_t oui[3]; /* OUI */ + uint16_t reserved; /* should be zero */ + uint8_t type; /* control or data */ + uint8_t ioc_num; /* ioc number */ + }s; +} vnic_sid_t; + +typedef union _vnic_ioc_guid { + uint64_t as_uint64; + struct { + uint8_t oui[3]; + uint8_t ioc_num; + uint32_t counter; /* SST device type: 8 bits, counter:24 bits */ + }s; +} vnic_ioc_guid_t; +#include + +typedef enum { + IB_UNINITTED = 0, + IB_INITTED, + IB_ATTACHING, + IB_ATTACHED, + IB_DETACHING, + IB_DETACHED, + IB_DISCONNECTED +} IbState_t; +#pragma warning ( disable : 4324 ) +typedef struct _vnic_path_record { + cl_list_item_t list_entry; + uint32_t num_entries; + ib_path_rec_t path_rec; +} vnic_path_record_t; +#pragma warning( default : 4324 ) + +typedef struct IbQp { + LIST_ENTRY listPtrs; + struct _viport *pViport; + struct IbConfig *p_conf; + NDIS_SPIN_LOCK qpLock; + volatile LONG qpState; + uint32_t qpNumber; + struct IbCa *pCa; + uint64_t portGuid; + ib_qp_handle_t qp; + ib_cq_handle_t cq; +#ifdef VNIC_STATISTIC + struct { + int64_t connectionTime; + int64_t rdmaPostTime; + uint32_t rdmaPostIos; + int64_t rdmaCompTime; + uint32_t rdmaCompIos; + int64_t sendPostTime; + uint32_t sendPostIos; + int64_t sendCompTime; + uint32_t sendCompIos; + int64_t recvPostTime; + uint32_t recvPostIos; + int64_t recvCompTime; + uint32_t recvCompIos; + uint32_t numIos; + uint32_t numCallbacks; + uint32_t maxIos; + } statistics; +#endif /* VNIC_STATISTIC */ +} IbQp_t; + +typedef struct IbRegion { + uint64_t virtAddress; + net64_t len; + ib_mr_handle_t h_mr; + net32_t lkey; + net32_t rkey; +} IbRegion_t; + + +#define VNIC_CA_MAX_PORTS 2 +typedef struct IbCa { + cl_list_item_t list_entry; + net64_t caGuid; + ib_pd_handle_t hPd; + IbRegion_t region; + uint32_t numPorts; + uint64_t portGuids[VNIC_CA_MAX_PORTS]; +} IbCa_t; + +typedef enum _OpType { RECV, RDMA, SEND }OpType_t; + +typedef struct Io { + LIST_ENTRY listPtrs; + struct _viport *pViport; + CompRoutine_t *pRoutine; + ib_send_wr_t wrq; + ib_recv_wr_t r_wrq; + ib_wc_status_t wc_status; +#ifdef VNIC_STATISTIC + int64_t time; +#endif /* VNIC_STATISTIC */ + OpType_t type; +} Io_t; + +typedef struct RdmaIo { + Io_t io; + ib_local_ds_t dsList[MAX_NUM_SGE]; + uint16_t index; + uint32_t len; + NDIS_PACKET *p_packet; + NDIS_BUFFER *p_buf; + ULONG packet_sz; + struct ViportTrailer *p_trailer; + uint8_t data[2* VIPORT_TRAILER_ALIGNMENT]; +} RdmaIo_t; + +typedef struct SendIo { + Io_t io; + ib_local_ds_t dsList; +} SendIo_t; + +typedef struct RecvIo { + Io_t io; + ib_local_ds_t dsList; +} RecvIo_t; + +void +ibqp_construct( + IN OUT IbQp_t *pQp, + IN struct _viport *pViport ); +ib_api_status_t ibqp_init(IbQp_t *pQp, uint64_t guid, struct IbConfig *p_conf); +ib_api_status_t ibqp_connect(IbQp_t *pQp); +void ibqp_detach(IbQp_t *pQp); +void ibqp_cleanup(IbQp_t *pQp); +ib_api_status_t ibqp_postSend(IbQp_t *pQp, Io_t *pIo); +ib_api_status_t ibqp_postRecv(IbQp_t *pQp, Io_t *pIo); + +uint8_t ibca_findPortNum( struct _viport *p_viport, uint64_t guid ); + +ib_api_status_t +ibregion_init( + IN struct _viport *p_viport, + OUT IbRegion_t *pRegion, + IN ib_pd_handle_t hPd, + IN void* __ptr64 vaddr, + IN uint64_t len, + IN ib_access_t access_ctrl ); + +void +ibregion_cleanup( + struct _viport *p_viport, + IbRegion_t *pRegion ); + +void ib_asyncEvent( ib_async_event_rec_t *pEventRecord ); + +#define ibpd_fromCa(pCa) (&(pCa)->pd) + + +#endif /* _VNIC_IB_H_ */ diff --git a/branches/Ndi/ulp/inic/kernel/vnic_netpath.c b/branches/Ndi/ulp/inic/kernel/vnic_netpath.c new file mode 100644 index 00000000..9d2af3b1 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_netpath.c @@ -0,0 +1,217 @@ + +#include "vnic_adapter.h" + +extern vnic_globals_t g_vnic; + + +void +netpath_init( + IN Netpath_t *pNetpath, + IN vnic_adapter_t *p_adapter ) +{ + int i; + + VNIC_ENTER( VNIC_DBG_FUNC ); + + pNetpath->p_adapter = p_adapter; + pNetpath->carrier = 0; + pNetpath->pViport = NULL; + pNetpath->timerState = NETPATH_TS_IDLE; + + for ( i = 0; i < INICNP_NUM_EVENTS; i++ ) + { + p_adapter->np_event[i].p_adapter = p_adapter; + p_adapter->np_event[i].event_num = (uint8_t)i; + InitializeListHead( &p_adapter->np_event[i].listPtrs ); + } + + VNIC_EXIT( VNIC_DBG_FUNC ); + return; +} + +BOOLEAN +netpath_addPath ( + Netpath_t *pNetpath, + viport_t *pViport ) +{ + + if( pNetpath->pViport ) + { + return FALSE; + } + else + { + pNetpath->pViport = pViport; + viport_setParent( pViport, pNetpath ); + return TRUE; + } +} + +BOOLEAN +netpath_removePath( + Netpath_t *pNetpath, + viport_t *pViport) +{ + if( pNetpath->pViport != pViport ) + { + return FALSE; + } + else + { + pNetpath->pViport = NULL; + viport_unsetParent( pViport, pNetpath ); + return TRUE; + } +} + + + +int netpath_maxMtu(Netpath_t *pNetpath) +{ + int ret = MAX_PARAM_VALUE; + + if (pNetpath->pViport) { + ret = viport_maxMtu(pNetpath->pViport); + } + return ret; +} + +BOOLEAN +netpath_xmitPacket( + IN Netpath_t *pNetpath, + IN NDIS_PACKET * const p_packet ) +{ + BOOLEAN ret = FALSE; + + if ( pNetpath->pViport ) + { + ret = viport_xmitPacket(pNetpath->pViport, p_packet ); + } + return ret; +} + + +void +netpath_stopXmit( + IN Netpath_t *pNetpath ) + +{ + + VNIC_ENTER( VNIC_DBG_NETPATH ); + + if (pNetpath == pNetpath->p_adapter->p_currentPath ) + { + InterlockedCompareExchange( &pNetpath->p_adapter->xmitStarted, 0, 1 ); + } +#ifdef INIC_STATISTICS + if ( pNetpath->p_adapter->statistics.xmitRef == 0) + { + pNetpath->p_adapter->statistics.xmitRef = get_time_stamp_ms(); + } +#endif /* INIC_STATISTICS */ + return; +} + +void netpath_restartXmit( + IN Netpath_t *pNetpath ) +{ + VNIC_ENTER( VNIC_DBG_NETPATH ); + + if (pNetpath == pNetpath->p_adapter->p_currentPath ) + { + if( !pNetpath->pViport->errored ) + { + InterlockedCompareExchange( &pNetpath->p_adapter->xmitStarted, 1, 0 ); + } + } +#ifdef INIC_STATISTICS + if (pNetpath->p_adapter->statistics.xmitRef != 0) + { + pNetpath->p_adapter->statistics.xmitOffTime += + get_time_stamp_ms() - pNetpath->p_adapter->statistics.xmitRef; + pNetpath->p_adapter->statistics.xmitOffNum++; + pNetpath->p_adapter->statistics.xmitRef = 0; + } +#endif /* INIC_STATISTICS */ + return; +} + +// Viport on input calls this +void netpath_recvPacket( + IN Netpath_t *pNetpath, + IN NDIS_PACKET* p_packet ) +{ + #ifdef INIC_STATISTICS + extern cycles_t recvRef; +#endif /* INIC_STATISTICS */ + + VNIC_ENTER( VNIC_DBG_NETPATH ); + +#ifdef INIC_STATISTICS + pNetpath->p_adapter->statistics.recvTime += get_time_stamp_ms() - recvRef; + pNetpath->p_adapter->statistics.recvNum++; +#endif /* INIC_STATISTICS */ + + NdisMIndicateReceivePacket( pNetpath->p_adapter->h_handle, &p_packet, 1 ); + + return; +} + +void netpath_tx_timeout( + IN Netpath_t *pNetpath ) +{ + if ( pNetpath->pViport ) + { + viport_failure( pNetpath->pViport ); + } +} + +const char * +netpath_to_string( + IN vnic_adapter_t *p_adapter, + IN Netpath_t *pNetpath ) +{ + if ( !pNetpath ) + { + return "NULL"; + } + else if ( pNetpath == &p_adapter->primaryPath ) + { + return "PRIMARY"; + } + else if ( pNetpath == &p_adapter->secondaryPath ) + { + return "SECONDARY"; + } else + { + return "UNKNOWN"; + } +} + + +static BOOLEAN +__netpath_npevent_register(vnic_adapter_t *p_adapter, Netpath_t *pNetpath) +{ + VNIC_ENTER( VNIC_DBG_NETPATH ); + + UNUSED_PARAM( pNetpath ); + p_adapter->state = INIC_REGISTERED; + return TRUE; +} + +static const char * const inicNPEventStr[] = { + "PRIMARY CONNECTED", + "PRIMARY DISCONNECTED", + "PRIMARY CARRIER", + "PRIMARY NO CARRIER", + "PRIMARY TIMER EXPIRED", + "SETLINK", + + "SECONDARY CONNECTED", + "SECONDARY DISCONNECTED", + "SECONDARY CARRIER", + "SECONDARY NO CARRIER", + "SECONDARY TIMER EXPIRED", + "FREE INIC", +}; + diff --git a/branches/Ndi/ulp/inic/kernel/vnic_trailer.h b/branches/Ndi/ulp/inic/kernel/vnic_trailer.h new file mode 100644 index 00000000..05b1df19 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_trailer.h @@ -0,0 +1,110 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _VNIC_TRAILER_H_ +#define _VNIC_TRAILER_H_ + +/* pktFlags values */ +#define PF_CHASH_VALID 0x01 +#define PF_IPSEC_VALID 0x02 +#define PF_TCP_SEGMENT 0x04 +#define PF_KICK 0x08 +#define PF_VLAN_INSERT 0x10 +#define PF_PVID_OVERRIDDEN 0x20 +#define PF_FCS_INCLUDED 0x40 +#define PF_FORCE_ROUTE 0x80 + +/* txChksumFlags values */ +#define TX_CHKSUM_FLAGS_CHECKSUM_V4 0x01 +#define TX_CHKSUM_FLAGS_CHECKSUM_V6 0x02 +#define TX_CHKSUM_FLAGS_TCP_CHECKSUM 0x04 +#define TX_CHKSUM_FLAGS_UDP_CHECKSUM 0x08 +#define TX_CHKSUM_FLAGS_IP_CHECKSUM 0x10 + +/* rxChksumFlags values */ +#define RX_CHKSUM_FLAGS_TCP_CHECKSUM_FAILED 0x01 +#define RX_CHKSUM_FLAGS_UDP_CHECKSUM_FAILED 0x02 +#define RX_CHKSUM_FLAGS_IP_CHECKSUM_FAILED 0x04 +#define RX_CHKSUM_FLAGS_TCP_CHECKSUM_SUCCEEDED 0x08 +#define RX_CHKSUM_FLAGS_UDP_CHECKSUM_SUCCEEDED 0x10 +#define RX_CHKSUM_FLAGS_IP_CHECKSUM_SUCCEEDED 0x20 +#define RX_CHKSUM_FLAGS_LOOPBACK 0x40 +#define RX_CHKSUM_FLAGS_RESERVED 0x80 + +/* connectionHashAndValid values */ +#define CHV_VALID 0x80 +#define CHV_HASH_MASH 0x7f + +/* round down value to align, align must be a power of 2 */ +#ifndef ROUNDDOWNP2 +#define ROUNDDOWNP2(val, align) \ + (((uintn_t)(val)) & (~((uintn_t)(align)-1))) +#endif + +/* round up value to align, align must be a power of 2 */ +#ifndef ROUNDUPP2 +#define ROUNDUPP2(val, align) \ + (((uintn_t)(val) + (uintn_t)(align) - 1) & (~((uintn_t)(align)-1))) +#endif +/* roundup address to align */ +#define ADDR_ROUNDUPP2(_addr, _align) \ + ((VOID *)(((ULONG_PTR)(_addr) + (_align - 1)) & ( ~(_align - 1) ))) + +#define VIPORT_TRAILER_ALIGNMENT 32 +#define BUFFER_SIZE(len) (sizeof(ViportTrailer_t) + ROUNDUPP2((len), VIPORT_TRAILER_ALIGNMENT)) +#define MAX_PAYLOAD(len) ROUNDDOWNP2((len) - sizeof(ViportTrailer_t), VIPORT_TRAILER_ALIGNMENT) + +#include + +typedef struct ViportTrailer { + int8_t dataAlignmentOffset; + uint8_t rndisHeaderLength; /* reserved for use by Edp */ + uint16_t dataLength; + uint8_t pktFlags; + + uint8_t txChksumFlags; + + uint8_t rxChksumFlags; + + uint8_t ipSecFlags; + uint32_t tcpSeqNo; + uint32_t ipSecOffloadHandle; + uint32_t ipSecNextOffloadHandle; + uint8_t destMacAddr[6]; + uint16_t vLan; + uint16_t timeStamp; + uint8_t origin; + uint8_t connectionHashAndValid; +} ViportTrailer_t; + +#include + +#endif /* _VNIC_TRAILER_H_ */ \ No newline at end of file diff --git a/branches/Ndi/ulp/inic/kernel/vnic_util.h b/branches/Ndi/ulp/inic/kernel/vnic_util.h new file mode 100644 index 00000000..7586ee2e --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_util.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _VNIC_UTIL_H_ +#define _VNIC_UTIL_H_ + +#include "vnic_debug.h" + +#define MAXU32 0xffffffff +#define MAXU64 ((uint64_t)(~0)) + +#define MODULE_NAME "VNIC" + +#define PTR64(what) ((uint64_t)(void * __ptr64)(what)) + +#define CONV2JIFFIES(time) (((time) * HZ) / 100) /* source time is 100ths of a sec */ +#define CONV2USEC(time) ((time) * 10000) /* source time is 100ths of a sec */ + +#ifndef min +#define min(a,b) ((a)<(b)?(a):(b)) +#endif + +#define hton16(x) _byteswap_ushort(x) +#define hton32(x) _byteswap_ulong(x) +#define hton64(x) _byteswap_uint64(x) + +#define ntoh16(x) hton16(x) +#define ntoh32(x) hton32(x) +#define ntoh64(x) hton64(x) + +#define IsPowerOf2(value) (((value) & ((value - 1))) == 0) +/* round down to closest power of 2 value */ + +#define SetMinPowerOf2(_val) ((_val) & ( 1 << RtlFindMostSignificantBit( (uint64_t)(_val) ))) + +#define PRINT(level,x) VNIC_PRINT(level, x ) + +#define PRINT_CONDITIONAL(level,x,condition) if (condition){PRINT( level, x)} + +#ifdef _DEBUG_ +#define IB_FSTATUS(functionName) \ + if( (ib_status != IB_SUCCESS) && \ + (ib_status != IB_PENDING) ) \ + VNIC_PRINT( g_vnic_dbg_lvl,("[%s]: %d\n", functionName,ib_status)) + +#else +#define IB_FSTATUS(functionName) +#endif + +#define IB_TEST_FSTATUS( functionName ) IB_FSTATUS( functionName ); \ + if ((ib_status != IB_SUCCESS) && (ib_status != IB_PENDING)) goto failure + +#endif /* _VNIC_UTIL_H_ */ + diff --git a/branches/Ndi/ulp/inic/kernel/vnic_viport.c b/branches/Ndi/ulp/inic/kernel/vnic_viport.c new file mode 100644 index 00000000..e8238f45 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_viport.c @@ -0,0 +1,1098 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "iba/ib_al.h" +#include "vnic_util.h" +#include "vnic_driver.h" +#include "vnic_viport.h" +#include "vnic_control.h" +#include "vnic_data.h" +#include "vnic_config.h" +#include "vnic_controlpkt.h" + +extern vnic_globals_t g_vnic; + +static void viport_timeout( void *context ); +static ib_api_status_t viport_initMacAddresses( viport_t *p_viport ); +static void viport_statemachine( void *context ); + +uint32_t +viport_get_adapter_name( + IN viport_t *p_viport ) +{ + /* TODO: consider unique addressable and friendly name + should I use ioc profile string ? seem too long */ + cl_memcpy(p_viport->p_adapter->name, DEFAULT_VNIC_NAME, sizeof(DEFAULT_VNIC_NAME) ); + return ( sizeof(DEFAULT_VNIC_NAME) ); +} + +BOOLEAN +viport_config_defaults( + IN viport_t *p_viport ) +{ + + vnic_adapter_t *p_adapter = p_viport->p_adapter; + ViportConfig_t *pConfig = &p_viport->port_config; + ControlConfig_t *pControlConfig = &p_viport->port_config.controlConfig; + DataConfig_t *pDataConfig = &p_viport->port_config.dataConfig; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + p_viport->state = VIPORT_DISCONNECTED; + p_viport->linkState = LINK_RETRYWAIT; + p_viport->newMtu = 1500; + p_viport->newFlags = 0; + + cl_timer_init( &p_viport->timer, viport_timeout, p_viport ); + + pConfig->statsInterval = p_adapter->params.ViportStatsInterval; + pConfig->hbInterval = p_adapter->params.ViportHbInterval; + pConfig->hbTimeout = p_adapter->params.ViportHbTimeout; + + cl_memcpy ( pConfig->ioc_string, + p_adapter->ioc_info.profile.id_string, + min( sizeof( p_adapter->ioc_info.profile.id_string ), + sizeof( pConfig->ioc_string )) ); + + pControlConfig->ibConfig.sid = 0; /* will set it later, from svc entries */ + pControlConfig->ibConfig.connData.pathId = 0; + pControlConfig->ibConfig.connData.inicInstance = 0; + pControlConfig->ibConfig.connData.pathNum = 0; + + pControlConfig->ibConfig.retryCount = p_adapter->params.RetryCount; + pControlConfig->ibConfig.rnrRetryCount = p_adapter->params.RetryCount; + pControlConfig->ibConfig.minRnrTimer = (uint8_t)p_adapter->params.MinRnrTimer; + pControlConfig->ibConfig.numRecvs = 5; /* Not configurable */ + pControlConfig->ibConfig.numSends = 1; /* Not configurable */ + pControlConfig->ibConfig.recvScatter = 1; /* Not configurable */ + pControlConfig->ibConfig.sendGather = 1; /* Not configurable */ + + /* indicate new features support capabilities */ + pControlConfig->ibConfig.connData.featuresSupported = + hton32((uint32_t)(INIC_FEAT_IGNORE_VLAN | INIC_FEAT_RDMA_IMMED )); + + cl_memcpy ( pControlConfig->ibConfig.connData.nodename, + g_vnic.host_name, + min( sizeof( g_vnic.host_name ), + sizeof( pControlConfig->ibConfig.connData.nodename )) ); + + pControlConfig->numRecvs = pControlConfig->ibConfig.numRecvs; + + pControlConfig->inicInstance = pControlConfig->ibConfig.connData.inicInstance; + pControlConfig->maxAddressEntries = (uint16_t)p_adapter->params.MaxAddressEntries; + pControlConfig->minAddressEntries = (uint16_t)p_adapter->params.MinAddressEntries; + pControlConfig->reqRetryCount = (uint8_t)p_adapter->params.ControlReqRetryCount; + pControlConfig->rspTimeout = p_adapter->params.ControlRspTimeout; + + pDataConfig->ibConfig.sid = 0; /* will set it later, from svc entries */ + pDataConfig->ibConfig.connData.pathId = get_time_stamp_ms(); + pDataConfig->ibConfig.connData.inicInstance = pControlConfig->inicInstance; + pDataConfig->ibConfig.connData.pathNum = 0; + + pDataConfig->ibConfig.retryCount = p_adapter->params.RetryCount; + pDataConfig->ibConfig.rnrRetryCount = p_adapter->params.RetryCount; + pDataConfig->ibConfig.minRnrTimer = (uint8_t)p_adapter->params.MinRnrTimer; + + /* + * NOTE: The numRecvs size assumes that the EIOC could + * RDMA enough packets to fill all of the host recv + * pool entries, plus send a kick message after each + * packet, plus RDMA new buffers for the size of + * the EIOC recv buffer pool, plus send kick messages + * after each MinHostUpdateSz of new buffers all + * before the Host can even pull off the first completed + * receive off the completion queue, and repost the + * receive. NOT LIKELY! + */ + pDataConfig->ibConfig.numRecvs = p_adapter->params.HostRecvPoolEntries + + ( p_adapter->params.MaxEiocPoolSz / p_adapter->params.MinHostUpdateSz ); + +#if TRUE //defined(LIMIT_OUTSTANDING_SENDS) + + pDataConfig->ibConfig.numSends = (2 * p_adapter->params.NotifyBundleSz ) + + ( p_adapter->params.HostRecvPoolEntries / p_adapter->params.MinEiocUpdateSz ) + 1; + +#else /* !defined(LIMIT_OUTSTANDING_SENDS) */ + /* + * NOTE: The numSends size assumes that the HOST could + * post RDMA sends for every single buffer in the EIOCs + * receive pool, and allocate a full complement of + * receive buffers on the host, and RDMA free buffers + * every MinEiocUpdateSz entries all before the HCA + * can complete a single RDMA transfer. VERY UNLIKELY, + * BUT NOT COMPLETELY IMPOSSIBLE IF THERE IS AN IB + * PROBLEM! + */ + pDataConfig->ibConfig.numSends = p_adapter->params.MaxEiocPoolSz + + ( p_adapter->params.HostRecvPoolEntries / p_adapter->params.MinEiocUpdateSz ) + 1; + +#endif /* !defined(LIMIT_OUTSTANDING_SENDS) */ + + pDataConfig->ibConfig.recvScatter = 1; /* Not configurable */ + pDataConfig->ibConfig.sendGather = MAX_NUM_SGE; /* Not configurable */ + + pDataConfig->numRecvs = pDataConfig->ibConfig.numRecvs; + pDataConfig->pathId = pDataConfig->ibConfig.connData.pathId; + + pDataConfig->hostMin.sizeRecvPoolEntry = + (uint32_t)BUFFER_SIZE(ETH_VLAN_HLEN + p_adapter->params.MinMtu); + pDataConfig->hostMax.sizeRecvPoolEntry = + (uint32_t)BUFFER_SIZE(ETH_VLAN_HLEN + p_adapter->params.MaxMtu); + pDataConfig->eiocMin.sizeRecvPoolEntry = + (uint32_t)BUFFER_SIZE(ETH_VLAN_HLEN + p_adapter->params.MinMtu); + pDataConfig->eiocMax.sizeRecvPoolEntry = MAX_PARAM_VALUE; + + pDataConfig->hostRecvPoolEntries = p_adapter->params.HostRecvPoolEntries; + pDataConfig->notifyBundle = p_adapter->params.NotifyBundleSz; + + pDataConfig->hostMin.numRecvPoolEntries = p_adapter->params.MinHostPoolSz; + pDataConfig->hostMax.numRecvPoolEntries = MAX_PARAM_VALUE; + pDataConfig->eiocMin.numRecvPoolEntries = p_adapter->params.MinEiocPoolSz; + pDataConfig->eiocMax.numRecvPoolEntries = p_adapter->params.MaxEiocPoolSz; + + pDataConfig->hostMin.timeoutBeforeKick = p_adapter->params.MinHostKickTimeout; + pDataConfig->hostMax.timeoutBeforeKick = p_adapter->params.MaxHostKickTimeout; + pDataConfig->eiocMin.timeoutBeforeKick = 0; + pDataConfig->eiocMax.timeoutBeforeKick = MAX_PARAM_VALUE; + + pDataConfig->hostMin.numRecvPoolEntriesBeforeKick = p_adapter->params.MinHostKickEntries; + pDataConfig->hostMax.numRecvPoolEntriesBeforeKick = p_adapter->params.MaxHostKickEntries; + pDataConfig->eiocMin.numRecvPoolEntriesBeforeKick = 0; + pDataConfig->eiocMax.numRecvPoolEntriesBeforeKick = MAX_PARAM_VALUE; + + pDataConfig->hostMin.numRecvPoolBytesBeforeKick = p_adapter->params.MinHostKickBytes; + pDataConfig->hostMax.numRecvPoolBytesBeforeKick = p_adapter->params.MaxHostKickBytes; + pDataConfig->eiocMin.numRecvPoolBytesBeforeKick = 0; + pDataConfig->eiocMax.numRecvPoolBytesBeforeKick = MAX_PARAM_VALUE; + + pDataConfig->hostMin.freeRecvPoolEntriesPerUpdate = p_adapter->params.MinHostUpdateSz; + pDataConfig->hostMax.freeRecvPoolEntriesPerUpdate = p_adapter->params.MaxHostUpdateSz; + pDataConfig->eiocMin.freeRecvPoolEntriesPerUpdate = p_adapter->params.MinEiocUpdateSz; + pDataConfig->eiocMax.freeRecvPoolEntriesPerUpdate = p_adapter->params.MaxEiocUpdateSz; + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return TRUE; +} + +static BOOLEAN config_isValid(ViportConfig_t *pConfig) +{ + UNREFERENCED_PARAMETER( pConfig ); + return TRUE; +} + + +void +viport_cleanup( + viport_t *p_viport ) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + + if( p_viport ) + { + VNIC_TRACE(VNIC_DBG_INIT, + ("IOC[%d]viport cleanup\n", p_viport->ioc_num )); + InterlockedExchange( &p_viport->p_adapter->carrier, FALSE ); + InterlockedExchange( &p_viport->p_netpath->carrier, FALSE ); + viport_timerStop( p_viport ); + data_disconnect( &p_viport->data ); + + control_cleanup( &p_viport->control ); + + data_cleanup( &p_viport->data ); + + if( p_viport->macAddresses != NULL ) + { + NdisFreeMemory( p_viport->macAddresses, + p_viport->numMacAddresses * sizeof(Inic_AddressOp_t), 0 ); + } + + cl_timer_destroy( &p_viport->timer ); + + InterlockedExchange( (volatile LONG *)&p_viport->p_adapter->state, + INIC_UNINITIALIZED ); + + NdisFreeMemory ( p_viport, sizeof(viport_t), 0 ); + } + VNIC_EXIT( VNIC_DBG_VIPORT ); + return; +} + + +void +viport_setPath( + IN viport_t *p_viport, + IN ib_path_rec_t *p_path, + IN uint64_t *p_guid ) +{ + + UNUSED_PARAM( p_guid ); + + VNIC_ENTER( VNIC_DBG_FUNC ); + + VNIC_TRACE( VNIC_DBG_PNP, + ("Using SLID=%d DLID=%d Target:%s\n", + cl_ntoh16( p_path->slid ), + cl_ntoh16( p_path->dlid ), + p_viport->p_adapter->ioc_info.profile.id_string) ); + + p_viport->portGuid = p_path->sgid.unicast.interface_id; + p_viport->iocGuid = *p_guid; + p_viport->port_config.dataConfig.ibConfig.pathInfo = *p_path; + p_viport->port_config.controlConfig.ibConfig.pathInfo = *p_path; + p_viport->port_config.controlConfig.ibConfig.sid = + p_viport->p_adapter->p_svc_entries[0].id; + p_viport->port_config.dataConfig.ibConfig.sid = + p_viport->p_adapter->p_svc_entries[1].id; + + VNIC_EXIT( VNIC_DBG_FUNC ); +} + +BOOLEAN +viport_setParent( + IN viport_t *p_viport, + IN Netpath_t *pNetpath ) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + + + if(p_viport->p_netpath != NULL) + { + return FALSE; + } + + p_viport->p_netpath = pNetpath; + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return TRUE; +} + +BOOLEAN +viport_unsetParent( + IN viport_t *p_viport, + IN Netpath_t *pNetpath ) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + if( pNetpath->pViport == p_viport ) + viport_free( p_viport ); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return TRUE; +} + +void +viport_free( + IN viport_t *p_viport ) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + + viport_timerStop( p_viport ); + viport_disconnect(p_viport); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return; +} + +void +viport_disconnect( + IN viport_t *p_viport ) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + + InterlockedExchange( (volatile LONG *)&p_viport->disconnect, TRUE ); + viport_failure( p_viport ); + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return; +} + + +NDIS_STATUS +viport_setLink( + IN viport_t *p_viport, + IN uint8_t flags, + IN uint16_t mtu ) +{ + KIRQL irql; + NDIS_STATUS status = NDIS_STATUS_SUCCESS; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + if(mtu > data_maxMtu(&p_viport->data)) + { + viport_failure(p_viport); + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Configuration error. Mtu of %d unsupported by %s\n", mtu, p_viport->p_adapter->name ) ); + return NDIS_STATUS_FAILURE; + } + + KeAcquireSpinLock( &p_viport->lock, &irql ); + + flags &= ( INIC_FLAG_ENABLE_NIC | + INIC_FLAG_ENABLE_MCAST_ALL | + INIC_FLAG_ENABLE_PROMISC ); + + if( (p_viport->newFlags != flags) || + (p_viport->newMtu != mtu)) + { + p_viport->newFlags = flags; + p_viport->newMtu = mtu; + InterlockedOr( &p_viport->updates, NEED_LINK_CONFIG ); + } + KeReleaseSpinLock( &p_viport->lock, irql ); + + status = _viport_process_query( p_viport, TRUE ); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return status; +} + +BOOLEAN +viport_setUnicast( + IN viport_t *p_viport, + IN uint8_t *p_address ) +{ + KIRQL flags; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + if( !p_viport ) + return FALSE; + + KeAcquireSpinLock( &p_viport->lock, &flags ); + if( p_viport->macAddresses == NULL ) + { + KeReleaseSpinLock( &p_viport->lock, flags ); + return FALSE; + } + if( cl_memcmp(p_viport->macAddresses[UNICAST_ADDR].address, + p_address, MAC_ADDR_LEN ) ) + { + cl_memcpy( p_viport->macAddresses[UNICAST_ADDR].address, + p_address, MAC_ADDR_LEN ); + p_viport->macAddresses[UNICAST_ADDR].operation + = INIC_OP_SET_ENTRY; + InterlockedOr( &p_viport->updates, NEED_ADDRESS_CONFIG ); + } + KeReleaseSpinLock( &p_viport->lock, flags ); + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return TRUE; +} + + +/* Returns flags for state machine operations. */ +NDIS_STATUS +viport_setMulticast( + IN viport_t* const p_viport ) +{ + vnic_adapter_t *p_adapter = p_viport->p_adapter; + uint32_t updates = 0; + int i; + KIRQL flags; + NDIS_STATUS status; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + KeAcquireSpinLock( &p_viport->lock, &flags ); + + if( p_viport->macAddresses == NULL ) + { + KeReleaseSpinLock( &p_viport->lock, flags ); + return NDIS_STATUS_NOT_ACCEPTED; + } + + ASSERT( (p_viport->updates & ~MCAST_OVERFLOW) == 0 ); + + if( p_adapter->mc_count > p_viport->numMacAddresses - MCAST_ADDR_START ) + { + updates |= NEED_LINK_CONFIG | MCAST_OVERFLOW; + } + else if( p_adapter->mc_count == 0 ) + { + /* invalidate all entries for the remote */ + for (i = MCAST_ADDR_START; + i < min( MAX_ADDR_ARRAY, p_viport->numMacAddresses ); i++ ) + { + p_viport->macAddresses[i].valid = 0; + p_viport->macAddresses[i].operation = INIC_OP_SET_ENTRY; + } + } + else + { + if( InterlockedAnd( + &p_viport->updates, ~MCAST_OVERFLOW ) & MCAST_OVERFLOW ) + { + updates |= NEED_LINK_CONFIG; + } + /* Brute force algorithm */ + for (i = MCAST_ADDR_START; + i < min( MAX_ADDR_ARRAY, p_adapter->mc_count + MCAST_ADDR_START ); + i++ ) + { + if( p_viport->macAddresses[i].valid && + NdisEqualMemory( p_viport->macAddresses[i].address, + p_adapter->mcast_array[i - MCAST_ADDR_START].addr, + MAC_ADDR_LEN ) ) + { + continue; + } + + NdisMoveMemory( &p_viport->macAddresses[i].address, + p_adapter->mcast_array[i - MCAST_ADDR_START].addr, + MAC_ADDR_LEN ); + + p_viport->macAddresses[i].valid = 1; + p_viport->macAddresses[i].operation = INIC_OP_SET_ENTRY; + + updates |= NEED_ADDRESS_CONFIG; + } + for (; i < min( MAX_ADDR_ARRAY, p_viport->numMacAddresses ); i++ ) + { + if( !p_viport->macAddresses[i].valid ) + continue; + + updates |= NEED_ADDRESS_CONFIG; + + p_viport->macAddresses[i].valid = 0; + p_viport->macAddresses[i].operation = INIC_OP_SET_ENTRY; + } + } + + /* + * Now that the mac array is setup, we can set the update bits + * to send the request. + */ + InterlockedOr( &p_viport->updates, updates ); + KeReleaseSpinLock( &p_viport->lock, flags ); + + status = _viport_process_query( p_viport, FALSE ); + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return status; +} + +NDIS_STATUS +viport_getStats( + IN viport_t *p_viport ) +{ + uint64_t stats_update_ms; + NDIS_STATUS status = STATUS_SUCCESS; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + stats_update_ms = get_time_stamp_ms(); + + if( stats_update_ms > p_viport->lastStatsTime + p_viport->port_config.statsInterval ) + { + p_viport->lastStatsTime = (uint32_t)stats_update_ms; + + InterlockedOr( &p_viport->updates, NEED_STATS ); + + status = _viport_process_query( p_viport, FALSE ); + if ( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Query NEED_STATS Failed\n") ); + } + } + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return status; +} + + +BOOLEAN +viport_xmitPacket( + IN viport_t* const p_viport, + IN NDIS_PACKET* const p_packet ) +{ + BOOLEAN status = TRUE; + KIRQL flags; + LIST_ENTRY *p_list_item; + NDIS_PACKET *p_packet_from_list; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + if( !p_viport->p_adapter->xmitStarted ) + { + NdisInterlockedInsertTailList( + &p_viport->send_pending_list, + VNIC_LIST_ITEM_FROM_PACKET( p_packet ), + &p_viport->pending_list_lock ); + } + else + { + KeAcquireSpinLock( &p_viport->lock, &flags ); + + while( ( p_list_item = NdisInterlockedRemoveHeadList( + &p_viport->send_pending_list, + &p_viport->pending_list_lock ) ) != NULL ) + { + p_packet_from_list = VNIC_PACKET_FROM_LIST_ITEM( p_list_item ); + + status = data_xmitPacket( &p_viport->data, p_packet_from_list ); + if( !status ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC[%d] Xmit Pending Packet failed\n", p_viport->ioc_num )); + /* put it back on pending list - will complete it on cleanup */ + NdisInterlockedInsertTailList( + &p_viport->send_pending_list, + VNIC_LIST_ITEM_FROM_PACKET( p_packet_from_list ), + &p_viport->pending_list_lock ); + viport_stopXmit( p_viport ); + break; + } + } + + if( !status ) + { /*do not try to send, just exit */ + NdisInterlockedInsertTailList( + &p_viport->send_pending_list, + VNIC_LIST_ITEM_FROM_PACKET( p_packet ), + &p_viport->pending_list_lock ); + viport_stopXmit( p_viport ); + + KeReleaseSpinLock( &p_viport->lock, flags ); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return status; + } + + /* just send a packet */ + status = data_xmitPacket( &p_viport->data, p_packet ); + + if( !status ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC[%d] XmitPacket failed\n", p_viport->ioc_num )); + /* put it on pending list - will complete it on cleanup */ + NdisInterlockedInsertTailList( + &p_viport->send_pending_list, + VNIC_LIST_ITEM_FROM_PACKET( p_packet ), + &p_viport->pending_list_lock ); + viport_stopXmit( p_viport ); + } + + KeReleaseSpinLock( &p_viport->lock, flags ); + } + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return status; +} + +void viport_linkUp(viport_t *p_viport) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + + if( p_viport->p_netpath->carrier == TRUE && + InterlockedExchange( &p_viport->p_adapter->carrier, TRUE ) == FALSE ) + { + NdisMIndicateStatus( p_viport->p_adapter->h_handle, + NDIS_STATUS_MEDIA_CONNECT, NULL, 0 ); + NdisMIndicateStatusComplete( p_viport->p_adapter->h_handle ); + } + VNIC_EXIT( VNIC_DBG_VIPORT ); + return; +} + +void viport_linkDown(viport_t *p_viport) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + if ( InterlockedExchange( &p_viport->p_adapter->carrier, FALSE ) == TRUE ) + { + NdisMIndicateStatus( p_viport->p_adapter->h_handle, + NDIS_STATUS_MEDIA_DISCONNECT, NULL, 0 ); + NdisMIndicateStatusComplete( p_viport->p_adapter->h_handle ); + } + VNIC_EXIT( VNIC_DBG_VIPORT ); + return; +} + +void viport_stopXmit(viport_t *p_viport) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + netpath_stopXmit( p_viport->p_netpath ); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return; +} + +void viport_restartXmit(viport_t *p_viport) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + netpath_restartXmit( p_viport->p_netpath ); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return; +} + +void +viport_recvPacket( + IN viport_t *p_viport, + IN NDIS_PACKET *p_packet ) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + netpath_recvPacket(p_viport->p_netpath, p_packet ); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return; +} + + +void +viport_failure( + IN viport_t *p_viport ) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + + CL_ASSERT( p_viport ); + + InterlockedExchange( &p_viport->p_netpath->carrier, FALSE ); + + if( InterlockedExchange( (volatile LONG*)&p_viport->errored, TRUE ) == FALSE ) + { + viport_stopXmit( p_viport ); + viport_linkDown( p_viport ); + } + + VNIC_EXIT( VNIC_DBG_VIPORT ); +} + + +void +viport_timeout( + IN void *context ) +{ + viport_t *p_viport = (viport_t *)context; + CL_ASSERT( p_viport ); + + InterlockedExchange( &p_viport->timerActive, FALSE ); + // did we get response from previous query ? + if( p_viport->link_hb_state != LINK_HEARTBEATRSP ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC[%d] NO HEARTBEAT RESPONSE\n", p_viport->ioc_num )); + viport_failure( p_viport ); + return; + } + if( p_viport && + p_viport->data.connected && + !p_viport->errored ) + { + viport_timer( p_viport, p_viport->port_config.hbInterval ); + } +} + + +void +viport_timer( + IN viport_t *p_viport, + IN int timeout ) +{ + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + if( !p_viport->control.reqOutstanding ) + { + InterlockedExchange( &p_viport->timerActive, TRUE ); + p_viport->link_hb_state = LINK_HEARTBEATREQ; + + cl_timer_start( &p_viport->timer, (uint32_t)timeout ); + + ib_status = control_heartbeatReq( &p_viport->control, + p_viport->port_config.hbTimeout ); + + if( ib_status != IB_SUCCESS ) + { + viport_timerStop( p_viport ); + VNIC_TRACE( VNIC_DBG_ERROR, + ("IOC[%d] HEARTBEAT send failed\n", p_viport->ioc_num )); + viport_failure( p_viport ); + return; + } + } + VNIC_EXIT( VNIC_DBG_VIPORT ); +} + + +void +viport_timerStop( + IN viport_t *p_viport ) +{ + VNIC_ENTER( VNIC_DBG_VIPORT ); + + if( p_viport ) + { + if( InterlockedExchange( &p_viport->timerActive, FALSE ) == TRUE ) + { + cl_timer_stop( &p_viport->timer ); + } + } + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return; +} + +static ib_api_status_t +viport_initMacAddresses( + IN viport_t *p_viport ) +{ + int i, size; + KIRQL flags; + NDIS_STATUS status; + VNIC_ENTER( VNIC_DBG_VIPORT ); + + size = p_viport->numMacAddresses * sizeof(Inic_AddressOp_t); + status = NdisAllocateMemoryWithTag( &p_viport->macAddresses, size , 'acam' ); + + if ( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("Failed allocating MAC address table size %d\n", size) ); + return IB_INSUFFICIENT_MEMORY; + } + + NdisZeroMemory( p_viport->macAddresses, size ); + + KeAcquireSpinLock( &p_viport->lock, &flags ); + for( i = 0; i < p_viport->numMacAddresses; i++ ) + { + p_viport->macAddresses[i].index = (uint16_t)i; + p_viport->macAddresses[i].vlan = p_viport->defaultVlan; + } + + NdisFillMemory( p_viport->macAddresses[BROADCAST_ADDR].address, + MAC_ADDR_LEN, 0xFF ); + p_viport->macAddresses[BROADCAST_ADDR].valid = TRUE; + + NdisMoveMemory( p_viport->macAddresses[UNICAST_ADDR].address, + p_viport->hwMacAddress, MAC_ADDR_LEN ); + + p_viport->macAddresses[UNICAST_ADDR].valid = TRUE; + p_viport->p_adapter->macSet = TRUE; + + KeReleaseSpinLock( &p_viport->lock, flags ); + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return IB_SUCCESS; +} + + +ib_api_status_t +viport_control_connect( + IN viport_t* const p_viport ) +{ + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + ib_status = control_init( &p_viport->control, p_viport, + &p_viport->port_config.controlConfig, p_viport->portGuid ); + if( ib_status != IB_SUCCESS ) + { + VNIC_EXIT( VNIC_DBG_VIPORT ); + return ib_status; + } + + ib_status = ibqp_connect( &p_viport->control.qp ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("control QP connect failed\n")); + control_cleanup( &p_viport->control ); + return ib_status; + } + + InterlockedExchange( (volatile LONG*)&p_viport->linkState, + (LONG)LINK_INITINICREQ ); + + ib_status = control_initInicReq( &p_viport->control ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("CMD_INIT_INIC REQ failed\n") ); + control_cleanup( &p_viport->control ); + return ib_status; + } + cl_event_wait_on( &p_viport->conn_event, + (p_viport->control.p_conf->rspTimeout << 11), TRUE ); + + if( p_viport->linkState != LINK_INITINICRSP ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("CMD_INIT_INIC RSP failed\n")); + ib_status = IB_INSUFFICIENT_RESOURCES; + control_cleanup( &p_viport->control ); + return ib_status; + } + + vnic_resume_oids( p_viport->p_adapter ); + + ib_status = viport_initMacAddresses( p_viport ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("Init MAC Addresses failed\n")); + control_cleanup( &p_viport->control ); + } + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return ib_status; +} + +ib_api_status_t +viport_data_connect( + IN viport_t* const p_viport ) +{ + NDIS_STATUS status; + ib_api_status_t ib_status; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + ib_status = data_init( &p_viport->data, + &p_viport->port_config.dataConfig, + p_viport->portGuid ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, ("Data init returned %s\n", + p_viport->p_adapter->ifc.get_err_str( ib_status )) ); + return ib_status; + } + InterlockedExchange( (volatile LONG*)&p_viport->linkState, + (LONG)LINK_CONFIGDATAPATHREQ ); + + ib_status = control_configDataPathReq( &p_viport->control, + data_pathId(&p_viport->data ), data_hostPoolMax( &p_viport->data ), + data_eiocPoolMax( &p_viport->data ) ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("command CONFIGDATAPATH REQ failed\n")); + return ib_status; + } + cl_event_wait_on( &p_viport->conn_event, + (p_viport->control.p_conf->rspTimeout << 11), TRUE ); + + if( p_viport->linkState != LINK_CONFIGDATAPATHRSP ) + { + VNIC_TRACE( VNIC_DBG_ERROR, + ("failed to get CONFIGDATAPATH RSP\n")); + return IB_INSUFFICIENT_RESOURCES; + } + + ib_status = data_connect( &p_viport->data ); + if( ib_status != IB_SUCCESS ) + { + VNIC_EXIT( VNIC_DBG_VIPORT ); + return ib_status; + } + cl_event_wait_on( &p_viport->conn_event, + (p_viport->control.p_conf->rspTimeout << 11), TRUE ); + if( p_viport->data.qp.qpState != IB_ATTACHED ) + { + VNIC_EXIT( VNIC_DBG_VIPORT ); + return IB_ERROR; + } + InterlockedExchange( (volatile LONG*)&p_viport->linkState, + (LONG)LINK_XCHGPOOLREQ ); + ib_status = control_exchangePoolsReq( &p_viport->control, + data_localPoolAddr(&p_viport->data), + data_localPoolRkey(&p_viport->data) ); + if( ib_status != IB_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("command XCHGPOOL REQ failed\n")); + return ib_status; + } + cl_event_wait_on( &p_viport->conn_event, + (p_viport->control.p_conf->rspTimeout << 11), TRUE ); + + if( p_viport->linkState != LINK_XCHGPOOLRSP ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("failed to get LINK_XCHGPOOL RSP\n")); + return IB_ERROR; + } + + InterlockedExchange( (volatile LONG*)&p_viport->linkState, + (LONG)LINK_INITIALIZED ); + p_viport->state = VIPORT_CONNECTED; + + data_connected(&p_viport->data); + + InterlockedExchange( (volatile LONG *)&p_viport->mtu, 1500 ); + InterlockedExchange( (volatile LONG *)&p_viport->flags, 0 ); + status = viport_setLink( p_viport, INIC_FLAG_ENABLE_NIC, 1500 ); + if( status != NDIS_STATUS_SUCCESS ) + { + VNIC_TRACE_EXIT( VNIC_DBG_ERROR, + ("failed to set Link flags\n")); + return IB_ERROR; + } + + /* now we have to start periodic heartbeat timer */ + if( p_viport->port_config.hbInterval ) + { + viport_timer( p_viport, p_viport->port_config.hbInterval ); + } + + VNIC_EXIT( VNIC_DBG_VIPORT ); + return IB_SUCCESS; +} + + +NDIS_STATUS +_viport_process_query( + IN viport_t* const p_viport, + IN BOOLEAN sync ) +{ + NDIS_STATUS status; + ib_api_status_t ib_status; + KIRQL irql; + LinkState_t expected_state = 0; + + VNIC_ENTER( VNIC_DBG_VIPORT ); + + if ( p_viport->state != VIPORT_CONNECTED || + p_viport->errored != 0 ) + { + VNIC_TRACE_EXIT( VNIC_DBG_INFO, ("Invalid state or error.\n") ); + return NDIS_STATUS_NOT_ACCEPTED; + } + + // Check for updates. Note that unless sync is set to TRUE, this + // is the only way for this function to return success. + if( !InterlockedCompareExchange( &p_viport->updates, 0, 0 ) ) + { + VNIC_TRACE_EXIT( VNIC_DBG_VIPORT, ("No updates.\n") ); + /* now can restart heartbeats */ + if( !p_viport->timerActive && + p_viport->port_config.hbInterval ) + { + viport_timer( p_viport, p_viport->port_config.hbInterval ); + } + return NDIS_STATUS_SUCCESS; + } + + if( sync ) + { + status = NDIS_STATUS_SUCCESS; + InterlockedOr( &p_viport->updates, SYNC_QUERY ); + } + else + { + status = NDIS_STATUS_PENDING; + } + /* stop heartbeat timer to serve another query */ + viport_timerStop( p_viport ); + + // Handle update bits one at a time. + if( p_viport->updates & NEED_ADDRESS_CONFIG ) + { + VNIC_TRACE( VNIC_DBG_OID, + ("QUERY NEED_ADDRESS_CONFIG\n")); + KeAcquireSpinLock(&p_viport->lock, &irql ); + p_viport->linkState = LINK_CONFIGADDRSREQ; + ib_status = control_configAddrsReq( + &p_viport->control, p_viport->macAddresses, + p_viport->numMacAddresses, &p_viport->addrs_query_done ); + KeReleaseSpinLock( &p_viport->lock, irql ); + if ( ib_status != IB_SUCCESS ) + { + InterlockedAnd( &p_viport->updates, ~NEED_ADDRESS_CONFIG ); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return NDIS_STATUS_FAILURE; + } + expected_state = LINK_CONFIGADDRSRSP; + } + else if( p_viport->updates & NEED_LINK_CONFIG ) + { + VNIC_TRACE( VNIC_DBG_OID, + ("QUERY NEED_LINK_CONFIG\n")); + + KeAcquireSpinLock(&p_viport->lock, &irql ); + p_viport->linkState = LINK_CONFIGLINKREQ; + + if( (InterlockedAnd( + &p_viport->updates, ~MCAST_OVERFLOW ) & MCAST_OVERFLOW) ) + { + p_viport->newFlags |= INIC_FLAG_ENABLE_MCAST_ALL; + } + else + { + p_viport->newFlags &= ~INIC_FLAG_ENABLE_MCAST_ALL; + } + + if ( p_viport->mtu != p_viport->newMtu ) + p_viport->mtu = p_viport->newMtu; + + ib_status = control_configLinkReq( &p_viport->control, + p_viport->newFlags, p_viport->mtu ); + KeReleaseSpinLock( &p_viport->lock, irql ); + if( ib_status != IB_SUCCESS ) + { + InterlockedAnd( &p_viport->updates, ~NEED_LINK_CONFIG ); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return NDIS_STATUS_FAILURE; + } + expected_state = LINK_CONFIGLINKRSP; + } + else if( p_viport->updates & NEED_STATS ) + { + // TODO: This is dead code. + VNIC_TRACE( VNIC_DBG_OID, + ("QUERY NEED_STATS\n")); + + KeAcquireSpinLock( &p_viport->lock, &irql ); + p_viport->linkState = LINK_REPORTSTATREQ; + + ib_status = control_reportStatisticsReq( &p_viport->control ); + KeReleaseSpinLock( &p_viport->lock, irql ); + if( ib_status != IB_SUCCESS ) + { + InterlockedAnd( &p_viport->updates, ~NEED_STATS ); + VNIC_EXIT( VNIC_DBG_VIPORT ); + return NDIS_STATUS_FAILURE; + } + expected_state = LINK_REPORTSTATRSP; + } + + if( sync ) + { + cl_event_wait_on( &p_viport->conn_event, EVENT_NO_TIMEOUT, TRUE ); + + if( p_viport->linkState != expected_state ) + { + status = NDIS_STATUS_FAILURE; + VNIC_TRACE( VNIC_DBG_ERROR, + ("Link state error: expected %d but got %d\n", + expected_state, p_viport->linkState)); + } + } + VNIC_EXIT( VNIC_DBG_VIPORT ); + return status; +} + diff --git a/branches/Ndi/ulp/inic/kernel/vnic_viport.h b/branches/Ndi/ulp/inic/kernel/vnic_viport.h new file mode 100644 index 00000000..f4da6432 --- /dev/null +++ b/branches/Ndi/ulp/inic/kernel/vnic_viport.h @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ +#ifndef _VNIC_VIPORT_H_ +#define _VNIC_VIPORT_H_ + +typedef struct _mc_list { + uint8_t mc_addr[MAC_ADDR_LEN]; +} mc_list_t; + +typedef enum { + VIPORT_DISCONNECTED, + VIPORT_CONNECTED +} viport_state_t; + +typedef enum { + LINK_UNINITIALIZED, + LINK_INITIALIZE, + LINK_INITIALIZECONTROL, + LINK_INITIALIZEDATA, + LINK_CONTROLCONNECT, + LINK_CONTROLCONNECTWAIT, + LINK_INITINICREQ, + LINK_INITINICRSP, + LINK_BEGINDATAPATH, + LINK_CONFIGDATAPATHREQ, + LINK_CONFIGDATAPATHRSP, + LINK_DATACONNECT, + LINK_DATACONNECTWAIT, + LINK_XCHGPOOLREQ, + LINK_XCHGPOOLRSP, + LINK_INITIALIZED, + LINK_IDLE, + LINK_IDLING, + LINK_CONFIGLINKREQ, + LINK_CONFIGLINKRSP, + LINK_CONFIGADDRSREQ, + LINK_CONFIGADDRSRSP, + LINK_REPORTSTATREQ, + LINK_REPORTSTATRSP, + LINK_HEARTBEATREQ, + LINK_HEARTBEATRSP, + LINK_RESET, + LINK_RESETRSP, + LINK_RESETCONTROL, + LINK_RESETCONTROLRSP, + LINK_DATADISCONNECT, + LINK_CONTROLDISCONNECT, + LINK_CLEANUPDATA, + LINK_CLEANUPCONTROL, + LINK_DISCONNECTED, + LINK_RETRYWAIT +} LinkState_t; + +/* index entries */ +#define BROADCAST_ADDR 0 +#define UNICAST_ADDR 1 +#define MCAST_ADDR_START 2 +#define MAX_MCAST (MAX_ADDR_ARRAY - MCAST_ADDR_START) + +#define currentMacAddress macAddresses[UNICAST_ADDR].address + +#define NEED_STATS 0x00000001 +#define NEED_ADDRESS_CONFIG 0x00000002 +#define NEED_LINK_CONFIG 0x00000004 +#define MCAST_OVERFLOW 0x00000008 +#define SYNC_QUERY 0x80000000 + +typedef enum { + NETPATH_TS_IDLE, + NETPATH_TS_ACTIVE, + NETPATH_TS_EXPIRED +} netpathTS_t; + + +typedef struct { + LIST_ENTRY listPtrs; + struct _vnic_adapter *p_adapter; + uint8_t event_num; +} InicNPEvent_t; + +typedef enum { + INICNP_CONNECTED = 0, + INICNP_DISCONNECTED, + INICNP_LINKUP, + INICNP_LINKDOWN, + INICNP_TIMEREXPIRED, + INICNP_UNIVERSAL1, + /* SECONDARYOFFSET MUST ALWAYS COME AT THE END */ + INICNP_SECONDARYOFFSET +} InicNPEventPos_t; + +#define INICNP_NUM_EVENTS (2 * INICNP_SECONDARYOFFSET) + +#define INIC_PRINP_CONNECTED INICNP_CONNECTED +#define INIC_PRINP_DISCONNECTED INICNP_DISCONNECTED +#define INIC_PRINP_LINKUP INICNP_LINKUP +#define INIC_PRINP_LINKDOWN INICNP_LINKDOWN +#define INIC_PRINP_TIMEREXPIRED INICNP_TIMEREXPIRED +#define INIC_NP_SETLINK INICNP_UNIVERSAL1 + +#define INIC_SECNP_CONNECTED (INICNP_CONNECTED + INICNP_SECONDARYOFFSET) +#define INIC_SECNP_DISCONNECTED (INICNP_DISCONNECTED + INICNP_SECONDARYOFFSET) +#define INIC_SECNP_LINKUP (INICNP_LINKUP + INICNP_SECONDARYOFFSET) +#define INIC_SECNP_LINKDOWN (INICNP_LINKDOWN + INICNP_SECONDARYOFFSET) +#define INIC_SECNP_TIMEREXPIRED (INICNP_TIMEREXPIRED + INICNP_SECONDARYOFFSET) +#define INIC_NP_FREEINIC (INICNP_UNIVERSAL1 + INICNP_SECONDARYOFFSET) + + +typedef struct Netpath { + volatile LONG carrier; + struct _vnic_adapter *p_adapter; + struct _viport *pViport; + size_t pathIdx; + uint64_t connectTime; + netpathTS_t timerState; +} Netpath_t; + +typedef enum { + WAIT, + DELAY, + NOW +} conn_wait_state_t; + +typedef struct _viport { + LIST_ENTRY listPtrs; + KSPIN_LOCK lock; + cl_obj_t obj; + LIST_ENTRY send_pending_list; + NDIS_SPIN_LOCK pending_list_lock; + struct _vnic_adapter *p_adapter; + struct Netpath *p_netpath; + struct ViportConfig port_config; + struct Control control; + struct Data data; + uint64_t iocGuid; + uint64_t portGuid; + uint32_t ioc_num; + // connected/disconnected state of control and data QPs. + viport_state_t state; + + // State machine state? + LinkState_t linkState; + LinkState_t link_hb_state; + + Inic_CmdReportStatisticsRsp_t stats; + uint64_t lastStatsTime; + uint32_t featuresSupported; + uint8_t hwMacAddress[MAC_ADDR_LEN]; + uint16_t defaultVlan; + uint16_t numMacAddresses; + Inic_AddressOp_t *macAddresses; + int32_t addrs_query_done; + + // Indicates actions (to the VEx) that need to be taken. + volatile LONG updates; + // ??? + uint8_t flags; + // TODO: Can we eliminate newFlags? + uint8_t newFlags; + + uint16_t mtu; + uint16_t newMtu; + uint32_t errored; + uint32_t disconnect; + volatile LONG timerActive; + cl_timer_t timer; + cl_event_t conn_event; + +} viport_t; + + +BOOLEAN +viport_xmitPacket( + viport_t* const p_viport, + NDIS_PACKET* const p_pkt ); + +BOOLEAN +viport_config_defaults( + IN viport_t *p_viport ); + +uint32_t +viport_get_adapter_name( + IN viport_t *p_viport ); + +void viport_cleanup(viport_t *p_viport ); +void viport_setPath(viport_t *pViport, ib_path_rec_t *path, uint64_t *guid); +BOOLEAN viport_setParent(viport_t *pViport, struct Netpath *pNetpath); +BOOLEAN viport_unsetParent(viport_t *pViport, struct Netpath *pNetpath); +void viport_free(viport_t *pViport); +void viport_disconnect(viport_t *pViport); +NDIS_STATUS viport_setLink(viport_t *pViport, uint8_t flags, uint16_t mtu); +NDIS_STATUS viport_getStats(viport_t *pViport ); + +void viport_timer( viport_t *p_viport, int timeout ); +void viport_timerStop( viport_t *p_viport ); + +void viport_linkUp(viport_t *pViport); +void viport_linkDown(viport_t *pViport); +void viport_stopXmit(viport_t *pViport); +void viport_restartXmit(viport_t *pViport); +void viport_recvPacket(viport_t *pViport, NDIS_PACKET *pkt ); +void viport_failure(viport_t *pViport); +BOOLEAN viport_setUnicast(viport_t *pViport, uint8_t *pAddress); +NDIS_STATUS viport_setMulticast( viport_t* const pViport ); +#define viport_portGuid(pViport) ((pViport)->portGuid) +#define viport_maxMtu(pViport) data_maxMtu(&(pViport)->data) + +#define viport_getHwAddr(pViport,pAddress) \ + cl_memcpy(pAddress, (pViport)->hwMacAddress, MAC_ADDR_LEN) + +#define viport_features(pViport) ((pViport)->featuresSupported) +#define viport_canTxCsum(pViport) (((pViport)->featuresSupported & \ + (INIC_FEAT_IPV4_CSUM_TX|INIC_FEAT_TCP_CSUM_TX|INIC_FEAT_UDP_CSUM_TX)) \ + == (INIC_FEAT_IPV4_CSUM_TX|INIC_FEAT_TCP_CSUM_TX|INIC_FEAT_UDP_CSUM_TX)) + + +void netpath_init( struct Netpath *pNetpath, struct _vnic_adapter *p_adapter ); +BOOLEAN netpath_addPath(struct Netpath *pNetpath, viport_t *pViport); +BOOLEAN netpath_removePath(struct Netpath *pNetpath, viport_t *pViport); +BOOLEAN netpath_getStats(struct Netpath *pNetpath ); +BOOLEAN netpath_setMulticast(struct Netpath *pNetpath, mc_list_t *mc_list, + int mc_count); +int netpath_maxMtu(struct Netpath *pNetpath); + +BOOLEAN +netpath_xmitPacket( + struct Netpath* pNetpath, + NDIS_PACKET* const p_pkt ); +void +netpath_recvPacket( + struct Netpath* pNetpath, + NDIS_PACKET* p_pkt ); + +void netpath_stopXmit(struct Netpath *pNetpath ); +void netpath_restartXmit(struct Netpath *pNetpath ); + +void netpath_kick(struct Netpath *pNetpath); +void netpath_timer(struct Netpath *pNetpath, int timeout); +void netpath_tx_timeout(struct Netpath *pNetpath); + +const char * netpath_to_string(struct _vnic_adapter *p_adapter, struct Netpath *pNetpath); +#define netpath_getHwAddr(pNetpath, pAddress) viport_getHwAddr((pNetpath)->pViport, pAddress) +#define netpath_isConnected(pNetpath) (pNetpath->state == NETPATH_CONNECTED) +#define netpath_canTxCsum(pNetpath) viport_canTxCsum(pNetpath->pViport) + +ib_api_status_t +viport_control_connect( + IN viport_t* const p_viport ); +ib_api_status_t +viport_data_connect( + IN viport_t* const p_viport ); + +NDIS_STATUS +_viport_process_query( + IN viport_t* const p_viport, + IN BOOLEAN sync ); + + +#endif /* _VNIC_VIPORT_H_ */ diff --git a/branches/Ndi/ulp/ipoib/dirs b/branches/Ndi/ulp/ipoib/dirs new file mode 100644 index 00000000..ed41dcf4 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/dirs @@ -0,0 +1,2 @@ +DIRS=\ + kernel diff --git a/branches/Ndi/ulp/ipoib/ip_stats.h b/branches/Ndi/ulp/ipoib/ip_stats.h new file mode 100644 index 00000000..0182475c --- /dev/null +++ b/branches/Ndi/ulp/ipoib/ip_stats.h @@ -0,0 +1,150 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _IP_STATS_H_ +#define _IP_STATS_H_ + + +#include + + +/****s* IB Network Drivers/ip_data_stats_t +* NAME +* ip_data_stats_t +* +* DESCRIPTION +* Defines data transfer statistic information for an IP device. +* +* SYNOPSIS +*/ +typedef struct _ip_data_stats +{ + uint64_t bytes; + uint64_t frames; + +} ip_data_stats_t; +/* +* FIELDS +* bytes +* Total number of bytes transfered. +* +* frames +* Total number of frames transfered. +* +* SEE ALSO +* IPoIB, INIC, ip_comp_stats_t, ip_stats_t +*********/ + + +/****s* IB Network Drivers/ip_comp_stats_t +* NAME +* ip_comp_stats_t +* +* DESCRIPTION +* Defines transfer completion statistic information for an IP device. +* +* SYNOPSIS +*/ +typedef struct _ip_comp_stats +{ + uint64_t success; + uint64_t error; + uint64_t dropped; + +} ip_comp_stats_t; +/* +* FIELDS +* success +* Total number of requests transfered successfully. +* +* error +* Total number of requests that failed being transfered. +* +* dropped +* Total number of requests that were dropped. +* +* SEE ALSO +* IPoIB, INIC, ip_data_stats_t, ip_stats_t +*********/ + + +/****s* IB Network Drivers/ip_stats_t +* NAME +* ip_stats_t +* +* DESCRIPTION +* Defines statistic information for an IP device. +* +* SYNOPSIS +*/ +typedef struct _ip_stats +{ + ip_comp_stats_t comp; + ip_data_stats_t ucast; + ip_data_stats_t bcast; + ip_data_stats_t mcast; + +} ip_stats_t; +/* +* FIELDS +* comp +* Request completion statistics. +* +* ucast +* Data statistics for unicast packets +* +* bcast +* Data statistics for broadcast packets +* +* mcast +* Data statistics for multicast packets +* +* SEE ALSO +* IPoIB, INIC, ip_data_stats_t, ip_comp_stats_t +*********/ + + +typedef enum _ip_stat_sel +{ + IP_STAT_SUCCESS, + IP_STAT_ERROR, + IP_STAT_DROPPED, + IP_STAT_UCAST_BYTES, + IP_STAT_UCAST_FRAMES, + IP_STAT_BCAST_BYTES, + IP_STAT_BCAST_FRAMES, + IP_STAT_MCAST_BYTES, + IP_STAT_MCAST_FRAMES + +} ip_stat_sel_t; + +#endif /* _IP_STATS_H_ */ diff --git a/branches/Ndi/ulp/ipoib/kernel/SOURCES b/branches/Ndi/ulp/ipoib/kernel/SOURCES new file mode 100644 index 00000000..d98b10fd --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/SOURCES @@ -0,0 +1,48 @@ +TARGETNAME=ipoib +TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=DRIVER + + +!if $(FREEBUILD) +ENABLE_EVENT_TRACING=1 +!else +#ENABLE_EVENT_TRACING=1 +!endif + + +SOURCES= ipoib_log.mc \ + ipoib.rc \ + ipoib_driver.c \ + ipoib_adapter.c \ + ipoib_endpoint.c \ + ipoib_port.c \ + ipoib_ibat.c + +INCLUDES=..;..\..\..\inc;..\..\..\inc\kernel; + +C_DEFINES=$(C_DEFINES) -DNDIS_MINIPORT_DRIVER -DNDIS_WDM=1 \ + -DDEPRECATE_DDK_FUNCTIONS -DNDIS51_MINIPORT -DNEED_CL_OBJ -DBINARY_COMPATIBLE=0 + +TARGETLIBS= \ + $(TARGETPATH)\*\complib.lib \ + $(DDK_LIB_PATH)\ndis.lib + +!if !defined(DDK_TARGET_OS) || "$(DDK_TARGET_OS)"=="Win2K" +# +# The driver is built in the Win2K build environment +# - use the library version of safe strings +# +TARGETLIBS= $(TARGETLIBS) $(DDK_LIB_PATH)\ntstrsafe.lib +!endif + +!IFDEF ENABLE_EVENT_TRACING + +C_DEFINES = $(C_DEFINES) -DEVENT_TRACING + +RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H \ + -scan:ipoib_debug.h \ + -func:IPOIB_PRINT(LEVEL,FLAGS,(MSG,...)) \ + -func:IPOIB_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) +!ENDIF + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib.rc b/branches/Ndi/ulp/ipoib/kernel/ipoib.rc new file mode 100644 index 00000000..525372fd --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib.rc @@ -0,0 +1,48 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "IP over InfiniBand NDIS Miniport (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "IP over InfiniBand NDIS Miniport" +#endif + +#define VER_INTERNALNAME_STR "ipoib.sys" +#define VER_ORIGINALFILENAME_STR "ipoib.sys" + +#include +#include "ipoib_log.rc" diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.c b/branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.c new file mode 100644 index 00000000..27e0cb18 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.c @@ -0,0 +1,1337 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "ipoib_adapter.h" +#include "ipoib_port.h" +#include "ipoib_driver.h" +#include "ipoib_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ipoib_adapter.tmh" +#endif + + +#define ITEM_POOL_START 16 +#define ITEM_POOL_GROW 16 + + +/* IB Link speeds in 100bps */ +#define ONE_X_IN_100BPS 25000000 +#define FOUR_X_IN_100BPS 100000000 +#define TWELVE_X_IN_100BPS 300000000 + + +/* Declarations */ +static void +adapter_construct( + IN ipoib_adapter_t* const p_adapter ); + + +static ib_api_status_t +adapter_init( + IN ipoib_adapter_t* const p_adapter ); + + +static void +__adapter_destroying( + IN cl_obj_t* const p_obj ); + + +static void +__adapter_cleanup( + IN cl_obj_t* const p_obj ); + + +static void +__adapter_free( + IN cl_obj_t* const p_obj ); + + +static ib_api_status_t +__ipoib_pnp_reg( + IN ipoib_adapter_t* const p_adapter, + IN ib_pnp_class_t flags ); + + +static void +__ipoib_pnp_dereg( + IN void* context ); + + +static void +__ipoib_adapter_reset( + IN void* context); + + +static ib_api_status_t +__ipoib_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ); + + +void +ipoib_join_mcast( + IN ipoib_adapter_t* const p_adapter ); + + +/* Leaves all mcast groups when port goes down. */ +static void +ipoib_clear_mcast( + IN ipoib_port_t* const p_port ); + +NDIS_STATUS +ipoib_get_adapter_guids( + IN NDIS_HANDLE* const h_adapter, + IN OUT ipoib_adapter_t *p_adapter ); + +NDIS_STATUS +ipoib_get_adapter_params( + IN NDIS_HANDLE* const wrapper_config_context, + IN OUT ipoib_adapter_t *p_adapter ); + + +/* Implementation */ +ib_api_status_t +ipoib_create_adapter( + IN NDIS_HANDLE wrapper_config_context, + IN void* const h_adapter, + OUT ipoib_adapter_t** const pp_adapter ) +{ + ipoib_adapter_t *p_adapter; + ib_api_status_t status; + cl_status_t cl_status; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_adapter = cl_zalloc( sizeof(ipoib_adapter_t) ); + if( !p_adapter ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to allocate ipoib_adapter_t (%d bytes)", + sizeof(ipoib_adapter_t)) ); + return IB_INSUFFICIENT_MEMORY; + } + + adapter_construct( p_adapter ); + + p_adapter->h_adapter = h_adapter; + + p_adapter->p_ifc = cl_zalloc( sizeof(ib_al_ifc_t) ); + if( !p_adapter->p_ifc ) + { + __adapter_free( &p_adapter->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_create_adapter failed to alloc ipoib_ifc_t %d bytes\n", + sizeof(ib_al_ifc_t)) ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Get the CA and port GUID from the bus driver. */ + status = ipoib_get_adapter_guids( h_adapter, p_adapter ); + if( status != NDIS_STATUS_SUCCESS ) + { + __adapter_free( &p_adapter->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_get_adapter_guids returned 0x%.8X.\n", status) ); + return status; + } + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Port %016I64x (CA %016I64x port %d) initializing\n", + p_adapter->guids.port_guid, p_adapter->guids.ca_guid, + p_adapter->guids.port_num) ); + + cl_status = cl_obj_init( &p_adapter->obj, CL_DESTROY_SYNC, + __adapter_destroying, NULL, __adapter_free ); + if( cl_status != CL_SUCCESS ) + { + __adapter_free( &p_adapter->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_obj_init returned %s\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + status = adapter_init( p_adapter ); + if( status != IB_SUCCESS ) + { + cl_obj_destroy( &p_adapter->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("adapter_init returned %s.\n", + p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Read configuration parameters. */ + status = ipoib_get_adapter_params( wrapper_config_context, + p_adapter ); + if( status != NDIS_STATUS_SUCCESS ) + { + cl_obj_destroy( &p_adapter->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_get_adapter_params returned 0x%.8x.\n", status) ); + return status; + } + + *pp_adapter = p_adapter; + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +ib_api_status_t +ipoib_start_adapter( + IN ipoib_adapter_t* const p_adapter ) +{ + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + status = __ipoib_pnp_reg( p_adapter, + IB_PNP_FLAG_REG_SYNC | IB_PNP_FLAG_REG_COMPLETE ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +void +ipoib_destroy_adapter( + IN ipoib_adapter_t* const p_adapter ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + + CL_ASSERT( p_adapter ); + + /* + * Flag the adapter as being removed. We use the IB_PNP_PORT_REMOVE state + * for this purpose. Note that we protect this state change with both the + * mutex and the lock. The mutex provides synchronization as a whole + * between destruction and AL callbacks (PnP, Query, Destruction). + * The lock provides protection + */ + KeWaitForMutexObject( + &p_adapter->mutex, Executive, KernelMode, FALSE, NULL ); + cl_obj_lock( &p_adapter->obj ); + p_adapter->state = IB_PNP_PORT_REMOVE; + + /* + * Clear the pointer to the port object since the object destruction + * will cascade to child objects. This prevents potential duplicate + * destruction (or worse, stale pointer usage). + */ + p_adapter->p_port = NULL; + + cl_obj_unlock( &p_adapter->obj ); + + KeReleaseMutex( &p_adapter->mutex, FALSE ); + + cl_obj_destroy( &p_adapter->obj ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static void +adapter_construct( + IN ipoib_adapter_t* const p_adapter ) +{ + cl_obj_construct( &p_adapter->obj, IPOIB_OBJ_INSTANCE ); + cl_spinlock_construct( &p_adapter->send_stat_lock ); + cl_spinlock_construct( &p_adapter->recv_stat_lock ); + cl_qpool_construct( &p_adapter->item_pool ); + KeInitializeMutex( &p_adapter->mutex, 0 ); + + cl_thread_construct(&p_adapter->destroy_thread); + + cl_vector_construct( &p_adapter->ip_vector ); + + cl_perf_construct( &p_adapter->perf ); + + p_adapter->state = IB_PNP_PORT_ADD; + p_adapter->rate = FOUR_X_IN_100BPS; +} + + +static ib_api_status_t +adapter_init( + IN ipoib_adapter_t* const p_adapter ) +{ + cl_status_t cl_status; + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + cl_status = cl_perf_init( &p_adapter->perf, MaxPerf ); + if( cl_status != CL_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_perf_init returned %s\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + cl_status = cl_spinlock_init( &p_adapter->send_stat_lock ); + if( cl_status != CL_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_spinlock_init returned %s\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + cl_status = cl_spinlock_init( &p_adapter->recv_stat_lock ); + if( cl_status != CL_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_spinlock_init returned %s\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + cl_status = cl_qpool_init( &p_adapter->item_pool, ITEM_POOL_START, 0, + ITEM_POOL_GROW, sizeof(cl_pool_obj_t), NULL, NULL, NULL ); + if( cl_status != CL_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_qpool_init returned %s\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + + /* We manually manage the size and capacity of the vector. */ + cl_status = cl_vector_init( &p_adapter->ip_vector, 0, + 0, sizeof(net_address_item_t), NULL, NULL, p_adapter ); + if( cl_status != CL_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_vector_init for ip_vector returned %s\n", + CL_STATUS_MSG(cl_status)) ); + return IB_ERROR; + } + + + /* Validate the port GUID and generate the MAC address. */ + status = + ipoib_mac_from_guid( p_adapter->guids.port_guid, &p_adapter->mac ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_mac_from_guid returned %s\n", + p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + + + /* Open AL. */ + status = p_adapter->p_ifc->open_al( &p_adapter->h_al ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_open_al returned %s\n", + p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +static ib_api_status_t +__ipoib_pnp_reg( + IN ipoib_adapter_t* const p_adapter, + IN ib_pnp_class_t flags ) +{ + ib_api_status_t status; + ib_pnp_req_t pnp_req; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + CL_ASSERT( !p_adapter->h_pnp ); + CL_ASSERT( !p_adapter->registering ); + + p_adapter->registering = TRUE; + + /* Register for PNP events. */ + cl_memclr( &pnp_req, sizeof(pnp_req) ); + pnp_req.pnp_class = IB_PNP_PORT | flags; + /* + * Context is the cl_obj of the adapter to allow passing cl_obj_deref + * to ib_dereg_pnp. + */ + pnp_req.pnp_context = &p_adapter->obj; + pnp_req.pfn_pnp_cb = __ipoib_pnp_cb; + status = p_adapter->p_ifc->reg_pnp( p_adapter->h_al, &pnp_req, &p_adapter->h_pnp ); + if( status != IB_SUCCESS ) + { + p_adapter->registering = FALSE; + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_reg_pnp returned %s\n", + p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + /* + * Reference the adapter on behalf of the PNP registration. + * This allows the destruction to block until the PNP deregistration + * completes. + */ + cl_obj_ref( &p_adapter->obj ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +static void +__adapter_destroying( + IN cl_obj_t* const p_obj ) +{ + ipoib_adapter_t *p_adapter; + KLOCK_QUEUE_HANDLE hdl; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_adapter = PARENT_STRUCT( p_obj, ipoib_adapter_t, obj ); + + /* + * The adapter's object will be dereferenced when the deregistration + * completes. No need to lock here since all PnP related API calls + * are driven by NDIS (via the Init/Reset/Destroy paths). + */ + if( p_adapter->h_pnp ) + { + p_adapter->p_ifc->dereg_pnp( p_adapter->h_pnp, cl_obj_deref ); + p_adapter->h_pnp = NULL; + } + + if( p_adapter->packet_filter ) + { + KeAcquireInStackQueuedSpinLock( &g_ipoib.lock, &hdl ); + cl_obj_lock( &p_adapter->obj ); + + ASSERT( cl_qlist_count( &g_ipoib.adapter_list ) ); + cl_qlist_remove_item( &g_ipoib.adapter_list, &p_adapter->entry ); + + p_adapter->packet_filter = 0; + + cl_obj_unlock( &p_adapter->obj ); + KeReleaseInStackQueuedSpinLock( &hdl ); + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static void +__adapter_free( + IN cl_obj_t* const p_obj ) +{ + ipoib_adapter_t *p_adapter; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_adapter = PARENT_STRUCT( p_obj, ipoib_adapter_t, obj ); + + if( p_adapter->p_ifc ) + { + if( p_adapter->h_al ) + p_adapter->p_ifc->close_al( p_adapter->h_al ); + + cl_free( p_adapter->p_ifc ); + } + + cl_vector_destroy( &p_adapter->ip_vector ); + cl_qpool_destroy( &p_adapter->item_pool ); + cl_spinlock_destroy( &p_adapter->recv_stat_lock ); + cl_spinlock_destroy( &p_adapter->send_stat_lock ); + cl_obj_deinit( p_obj ); + + cl_perf_destroy( &p_adapter->perf, TRUE ); + + cl_free( p_adapter ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static ib_api_status_t +__ipoib_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + ib_api_status_t status; + ipoib_adapter_t *p_adapter; + ipoib_port_t *p_port; + ib_pnp_event_t old_state; + ib_pnp_port_rec_t *p_port_rec; + + IPOIB_ENTER( IPOIB_DBG_PNP ); + + CL_ASSERT( p_pnp_rec ); + + p_adapter = + PARENT_STRUCT( p_pnp_rec->pnp_context, ipoib_adapter_t, obj ); + + CL_ASSERT( p_adapter ); + + /* Synchronize with destruction */ + KeWaitForMutexObject( + &p_adapter->mutex, Executive, KernelMode, FALSE, NULL ); + cl_obj_lock( &p_adapter->obj ); + old_state = p_adapter->state; + cl_obj_unlock( &p_adapter->obj ); + if( old_state == IB_PNP_PORT_REMOVE ) + { + KeReleaseMutex( &p_adapter->mutex, FALSE ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_PNP, + ("Aborting - Adapter destroying.\n") ); + return IB_NOT_DONE; + } + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_PNP, + ("p_pnp_rec->pnp_event = 0x%x (%s)\n", + p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) ); + + p_port_rec = (ib_pnp_port_rec_t*)p_pnp_rec; + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_PORT_ADD: + CL_ASSERT( !p_pnp_rec->context ); + /* Only process our port GUID. */ + if( p_pnp_rec->guid != p_adapter->guids.port_guid ) + { + status = IB_NOT_DONE; + break; + } + + /* Don't process if we're destroying. */ + if( p_adapter->obj.state == CL_DESTROYING ) + { + status = IB_NOT_DONE; + break; + } + + CL_ASSERT( !p_adapter->p_port ); + /* Allocate all IB resources. */ + cl_obj_lock( &p_adapter->obj ); + p_adapter->state = IB_PNP_PORT_ADD; + cl_obj_unlock( &p_adapter->obj ); + status = ipoib_create_port( p_adapter, p_port_rec, &p_port ); + cl_obj_lock( &p_adapter->obj ); + if( status != IB_SUCCESS ) + { + p_adapter->state = old_state; + cl_obj_unlock( &p_adapter->obj ); + p_adapter->hung = TRUE; + break; + } + + p_pnp_rec->context = p_port; + + p_adapter->p_port = p_port; + cl_obj_unlock( &p_adapter->obj ); + break; + + case IB_PNP_PORT_REMOVE: + /* Release all IB resources. */ + CL_ASSERT( p_pnp_rec->context ); + + cl_obj_lock( &p_adapter->obj ); + p_adapter->state = IB_PNP_PORT_REMOVE; + p_port = p_adapter->p_port; + p_adapter->p_port = NULL; + cl_obj_unlock( &p_adapter->obj ); + ipoib_port_destroy( p_port ); + p_pnp_rec->context = NULL; + status = IB_SUCCESS; + break; + + case IB_PNP_PORT_ACTIVE: + /* Join multicast groups and put QP in RTS. */ + CL_ASSERT( p_pnp_rec->context ); + + cl_obj_lock( &p_adapter->obj ); + p_adapter->state = IB_PNP_PORT_INIT; + cl_obj_unlock( &p_adapter->obj ); + ipoib_port_up( p_adapter->p_port, p_port_rec ); + + status = IB_SUCCESS; + break; + + case IB_PNP_PORT_ARMED: + status = IB_SUCCESS; + break; + + case IB_PNP_PORT_INIT: + /* + * Init could happen if the SM brings the port down + * without changing the physical link. + */ + case IB_PNP_PORT_DOWN: + CL_ASSERT( p_pnp_rec->context ); + + cl_obj_lock( &p_adapter->obj ); + old_state = p_adapter->state; + p_adapter->state = IB_PNP_PORT_DOWN; + cl_obj_unlock( &p_adapter->obj ); + status = IB_SUCCESS; + + if( !p_adapter->registering && old_state != IB_PNP_PORT_DOWN ) + { + NdisMIndicateStatus( p_adapter->h_adapter, + NDIS_STATUS_MEDIA_DISCONNECT, NULL, 0 ); + NdisMIndicateStatusComplete( p_adapter->h_adapter ); + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Link DOWN!\n") ); + + ipoib_port_down( p_adapter->p_port ); + } + break; + + case IB_PNP_REG_COMPLETE: + if( p_adapter->registering ) + { + p_adapter->registering = FALSE; + cl_obj_lock( &p_adapter->obj ); + old_state = p_adapter->state; + cl_obj_unlock( &p_adapter->obj ); + + if( old_state == IB_PNP_PORT_DOWN ) + { + /* If we were initializing, we might have pended some OIDs. */ + ipoib_resume_oids( p_adapter ); + NdisMIndicateStatus( p_adapter->h_adapter, + NDIS_STATUS_MEDIA_DISCONNECT, NULL, 0 ); + NdisMIndicateStatusComplete( p_adapter->h_adapter ); + } + } + + if( p_adapter->reset && p_adapter->state != IB_PNP_PORT_INIT ) + { + p_adapter->reset = FALSE; + NdisMResetComplete( + p_adapter->h_adapter, NDIS_STATUS_SUCCESS, TRUE ); + } + status = IB_SUCCESS; + break; + + default: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("IPOIB: Received unhandled PnP event 0x%x (%s)\n", + p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) ); + /* Fall through. */ + case IB_PNP_PKEY_CHANGE: + case IB_PNP_SM_CHANGE: + case IB_PNP_GID_CHANGE: + case IB_PNP_LID_CHANGE: + status = IB_SUCCESS; + + /* We ignore this event if the link is not active. */ + if( p_port_rec->p_port_attr->link_state != IB_LINK_ACTIVE ) + break; + + cl_obj_lock( &p_adapter->obj ); + old_state = p_adapter->state; + switch( old_state ) + { + case IB_PNP_PORT_DOWN: + p_adapter->state = IB_PNP_PORT_INIT; + break; + + default: + p_adapter->state = IB_PNP_PORT_DOWN; + } + cl_obj_unlock( &p_adapter->obj ); + + if( p_adapter->registering ) + break; + + switch( old_state ) + { + case IB_PNP_PORT_ACTIVE: + case IB_PNP_PORT_INIT: + NdisMIndicateStatus( p_adapter->h_adapter, + NDIS_STATUS_MEDIA_DISCONNECT, NULL, 0 ); + NdisMIndicateStatusComplete( p_adapter->h_adapter ); + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Link DOWN!\n") ); + + ipoib_port_down( p_adapter->p_port ); + /* Fall through. */ + + case IB_PNP_PORT_DOWN: + cl_obj_lock( &p_adapter->obj ); + p_adapter->state = IB_PNP_PORT_INIT; + cl_obj_unlock( &p_adapter->obj ); + ipoib_port_up( p_adapter->p_port, (ib_pnp_port_rec_t*)p_pnp_rec ); + } + break; + } + + KeReleaseMutex( &p_adapter->mutex, FALSE ); + + IPOIB_EXIT( IPOIB_DBG_PNP ); + return status; +} + + +/* Joins/leaves mcast groups based on currently programmed mcast MACs. */ +void +ipoib_refresh_mcast( + IN ipoib_adapter_t* const p_adapter, + IN mac_addr_t* const p_mac_array, + IN const uint8_t num_macs ) +{ + uint8_t i, j; + ipoib_port_t *p_port = NULL; + + IPOIB_ENTER( IPOIB_DBG_MCAST ); + cl_obj_lock( &p_adapter->obj ); + if( p_adapter->state == IB_PNP_PORT_ACTIVE ) + { + p_port = p_adapter->p_port; + ipoib_port_ref( p_port, ref_refresh_mcast ); + } + cl_obj_unlock( &p_adapter->obj ); + + if( p_port ) + { + /* Purge old entries. */ + for( i = 0; i < p_adapter->mcast_array_size; i++ ) + { + for( j = 0; j < num_macs; j++ ) + { + if( !cl_memcmp( &p_adapter->mcast_array[i], &p_mac_array[j], + sizeof(mac_addr_t) ) ) + { + break; + } + } + if( j != num_macs ) + continue; + + ipoib_port_remove_endpt( p_port, p_adapter->mcast_array[i] ); + } + + /* Add new entries */ + for( i = 0; i < num_macs; i++ ) + { + for( j = 0; j < p_adapter->mcast_array_size; j++ ) + { + if( !cl_memcmp( &p_adapter->mcast_array[j], &p_mac_array[i], + sizeof(mac_addr_t) ) ) + { + break; + } + } + + if( j != p_adapter->mcast_array_size ) + continue; + + ipoib_port_join_mcast( p_port, p_mac_array[i] ,IB_MC_REC_STATE_FULL_MEMBER); + } + } + + /* Copy the MAC array. */ + NdisMoveMemory( p_adapter->mcast_array, p_mac_array, + num_macs * sizeof(mac_addr_t) ); + p_adapter->mcast_array_size = num_macs; + + if( p_port ) + ipoib_port_deref( p_port, ref_refresh_mcast ); + + IPOIB_EXIT( IPOIB_DBG_MCAST ); +} + + +ib_api_status_t +ipoib_reset_adapter( + IN ipoib_adapter_t* const p_adapter ) +{ + ib_api_status_t status; + ib_pnp_handle_t h_pnp; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + if( p_adapter->reset ) + return IB_INVALID_STATE; + + p_adapter->hung = FALSE; + p_adapter->reset = TRUE; + + if( p_adapter->h_pnp ) + { + h_pnp = p_adapter->h_pnp; + p_adapter->h_pnp = NULL; + status = p_adapter->p_ifc->dereg_pnp( h_pnp, __ipoib_pnp_dereg ); + if( status == IB_SUCCESS ) + status = IB_NOT_DONE; + } + else + { + status = __ipoib_pnp_reg( p_adapter, IB_PNP_FLAG_REG_COMPLETE ); + if( status == IB_SUCCESS ) + p_adapter->hung = FALSE; + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +static void +__ipoib_pnp_dereg( + IN void* context ) +{ + ipoib_adapter_t* p_adapter; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_adapter = PARENT_STRUCT( context, ipoib_adapter_t, obj ); + + cl_thread_init(&p_adapter->destroy_thread, __ipoib_adapter_reset, (void*)p_adapter, "destroy_thread"); + + IPOIB_ENTER( IPOIB_DBG_INIT ); + +} + +static void +__ipoib_adapter_reset( + IN void* context) +{ + + ipoib_adapter_t *p_adapter; + ipoib_port_t *p_port; + ib_api_status_t status; + ib_pnp_event_t state; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_adapter = (ipoib_adapter_t*)context; + + /* Synchronize with destruction */ + KeWaitForMutexObject( + &p_adapter->mutex, Executive, KernelMode, FALSE, NULL ); + + cl_obj_lock( &p_adapter->obj ); + + CL_ASSERT( !p_adapter->h_pnp ); + + if( p_adapter->state != IB_PNP_PORT_REMOVE ) + p_adapter->state = IB_PNP_PORT_ADD; + + state = p_adapter->state; + + /* Destroy the current port instance if it still exists. */ + p_port = p_adapter->p_port; + p_adapter->p_port = NULL; + cl_obj_unlock( &p_adapter->obj ); + + if( p_port ) + ipoib_port_destroy( p_port ); + + if( state != IB_PNP_PORT_REMOVE ) + { + status = __ipoib_pnp_reg( p_adapter, IB_PNP_FLAG_REG_COMPLETE ); + if( status != IB_SUCCESS ) + { + p_adapter->reset = FALSE; + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__ipoib_pnp_reg returned %s\n", + p_adapter->p_ifc->get_err_str( status )) ); + NdisMResetComplete( + p_adapter->h_adapter, NDIS_STATUS_HARD_ERRORS, TRUE ); + } + } + else + { + p_adapter->reset = FALSE; + NdisMResetComplete( + p_adapter->h_adapter, NDIS_STATUS_SUCCESS, TRUE ); + status = IB_SUCCESS; + } + + /* Dereference the adapter since the previous registration is now gone. */ + cl_obj_deref( &p_adapter->obj ); + + KeReleaseMutex( &p_adapter->mutex, FALSE ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +void +ipoib_set_rate( + IN ipoib_adapter_t* const p_adapter, + IN const uint8_t link_width, + IN const uint8_t link_speed ) +{ + uint32_t rate; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + /* Set the link speed based on the IB link speed (1x vs 4x, etc). */ + switch( link_speed ) + { + case IB_LINK_SPEED_ACTIVE_2_5: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Link speed is 2.5Gs\n") ); + rate = IB_LINK_SPEED_ACTIVE_2_5; + break; + + case IB_LINK_SPEED_ACTIVE_5: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Link speed is 5G\n") ); + rate = IB_LINK_SPEED_ACTIVE_5; + break; + + case IB_LINK_SPEED_ACTIVE_10: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Link speed is 10G\n") ); + rate = IB_LINK_SPEED_ACTIVE_10; + break; + + default: + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid link speed %d.\n", link_speed) ); + rate = 0; + } + + switch( link_width ) + { + case IB_LINK_WIDTH_ACTIVE_1X: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Link width is 1X\n") ); + rate *= ONE_X_IN_100BPS; + break; + + case IB_LINK_WIDTH_ACTIVE_4X: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Link width is 4X\n") ); + rate *= FOUR_X_IN_100BPS; + break; + + case IB_LINK_WIDTH_ACTIVE_12X: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Link width is 12X\n") ); + rate *= TWELVE_X_IN_100BPS; + break; + + default: + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid link rate (%d).\n", link_width) ); + rate = 0; + } + + p_adapter->rate = rate; + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +void +ipoib_set_active( + IN ipoib_adapter_t* const p_adapter ) +{ + ib_pnp_event_t old_state; + uint8_t i; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + cl_obj_lock( &p_adapter->obj ); + old_state = p_adapter->state; + + /* Change the state to indicate that we are now connected and live. */ + if( old_state == IB_PNP_PORT_INIT ) + p_adapter->state = IB_PNP_PORT_ACTIVE; + + cl_obj_unlock( &p_adapter->obj ); + + /* + * If we had a pending OID request for OID_GEN_LINK_SPEED, + * complete it now. + */ + switch( old_state ) + { + case IB_PNP_PORT_ADD: + ipoib_reg_addrs( p_adapter ); + /* Fall through. */ + + case IB_PNP_PORT_REMOVE: + ipoib_resume_oids( p_adapter ); + break; + + default: + /* Join all programmed multicast groups. */ + for( i = 0; i < p_adapter->mcast_array_size; i++ ) + { + ipoib_port_join_mcast( + p_adapter->p_port, p_adapter->mcast_array[i] ,IB_MC_REC_STATE_FULL_MEMBER); + } + + /* Register all existing addresses. */ + ipoib_reg_addrs( p_adapter ); + + ipoib_resume_oids( p_adapter ); + + /* + * Now that we're in the broadcast group, notify that + * we have a link. + */ + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, ("Link UP!\n") ); + NdisWriteErrorLogEntry( p_adapter->h_adapter, + EVENT_IPOIB_PORT_UP + (p_adapter->rate/ONE_X_IN_100BPS), + 1, p_adapter->rate ); + + if( !p_adapter->reset ) + { + NdisMIndicateStatus( p_adapter->h_adapter, NDIS_STATUS_MEDIA_CONNECT, + NULL, 0 ); + NdisMIndicateStatusComplete( p_adapter->h_adapter ); + } + } + + if( p_adapter->reset ) + { + p_adapter->reset = FALSE; + NdisMResetComplete( + p_adapter->h_adapter, NDIS_STATUS_SUCCESS, TRUE ); + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +/* + * If something goes wrong after the port goes active, e.g. + * - PortInfo query failure + * - MC Join timeout + * - etc + * Mark the port state as down, resume any pended OIDS, etc. + */ +void +ipoib_set_inactive( + IN ipoib_adapter_t* const p_adapter ) +{ + ib_pnp_event_t old_state; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + cl_obj_lock( &p_adapter->obj ); + old_state = p_adapter->state; + if( old_state != IB_PNP_PORT_REMOVE ) + p_adapter->state = IB_PNP_PORT_DOWN; + cl_obj_unlock( &p_adapter->obj ); + + /* + * If we had a pending OID request for OID_GEN_LINK_SPEED, + * complete it now. + */ + if( old_state == IB_PNP_PORT_INIT ) + { + NdisMIndicateStatus( p_adapter->h_adapter, + NDIS_STATUS_MEDIA_DISCONNECT, NULL, 0 ); + NdisMIndicateStatusComplete( p_adapter->h_adapter ); + + ipoib_resume_oids( p_adapter ); + } + + if( p_adapter->reset ) + { + p_adapter->reset = FALSE; + NdisMResetComplete( + p_adapter->h_adapter, NDIS_STATUS_SUCCESS, TRUE ); + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +NDIS_STATUS +ipoib_get_recv_stat( + IN ipoib_adapter_t* const p_adapter, + IN const ip_stat_sel_t stat_sel, + IN pending_oid_t* const p_oid_info ) +{ + uint64_t stat; + + IPOIB_ENTER( IPOIB_DBG_STAT ); + + CL_ASSERT( p_adapter ); + + cl_spinlock_acquire( &p_adapter->recv_stat_lock ); + switch( stat_sel ) + { + case IP_STAT_SUCCESS: + stat = p_adapter->recv_stats.comp.success; + break; + + case IP_STAT_ERROR: + stat = p_adapter->recv_stats.comp.error; + break; + + case IP_STAT_DROPPED: + stat = p_adapter->recv_stats.comp.dropped; + break; + + case IP_STAT_UCAST_BYTES: + stat = p_adapter->recv_stats.ucast.bytes; + break; + + case IP_STAT_UCAST_FRAMES: + stat = p_adapter->recv_stats.ucast.frames; + break; + + case IP_STAT_BCAST_BYTES: + stat = p_adapter->recv_stats.bcast.bytes; + break; + + case IP_STAT_BCAST_FRAMES: + stat = p_adapter->recv_stats.bcast.frames; + break; + + case IP_STAT_MCAST_BYTES: + stat = p_adapter->recv_stats.mcast.bytes; + break; + + case IP_STAT_MCAST_FRAMES: + stat = p_adapter->recv_stats.mcast.frames; + break; + + default: + stat = 0; + } + cl_spinlock_release( &p_adapter->recv_stat_lock ); + + *p_oid_info->p_bytes_needed = sizeof(uint64_t); + + if( p_oid_info->buf_len >= sizeof(uint64_t) ) + { + *((uint64_t*)p_oid_info->p_buf) = stat; + *p_oid_info->p_bytes_used = sizeof(uint64_t); + } + else if( p_oid_info->buf_len >= sizeof(uint32_t) ) + { + *((uint32_t*)p_oid_info->p_buf) = (uint32_t)stat; + *p_oid_info->p_bytes_used = sizeof(uint32_t); + } + else + { + *p_oid_info->p_bytes_used = 0; + IPOIB_EXIT( IPOIB_DBG_STAT ); + return NDIS_STATUS_INVALID_LENGTH; + } + + IPOIB_EXIT( IPOIB_DBG_STAT ); + return NDIS_STATUS_SUCCESS; +} + + +void +ipoib_inc_recv_stat( + IN ipoib_adapter_t* const p_adapter, + IN const ip_stat_sel_t stat_sel, + IN const size_t bytes OPTIONAL ) +{ + IPOIB_ENTER( IPOIB_DBG_STAT ); + + cl_spinlock_acquire( &p_adapter->recv_stat_lock ); + switch( stat_sel ) + { + case IP_STAT_ERROR: + p_adapter->recv_stats.comp.error++; + break; + + case IP_STAT_DROPPED: + p_adapter->recv_stats.comp.dropped++; + break; + + case IP_STAT_UCAST_BYTES: + case IP_STAT_UCAST_FRAMES: + p_adapter->recv_stats.comp.success++; + p_adapter->recv_stats.ucast.frames++; + p_adapter->recv_stats.ucast.bytes += bytes; + break; + + case IP_STAT_BCAST_BYTES: + case IP_STAT_BCAST_FRAMES: + p_adapter->recv_stats.comp.success++; + p_adapter->recv_stats.bcast.frames++; + p_adapter->recv_stats.bcast.bytes += bytes; + break; + + case IP_STAT_MCAST_BYTES: + case IP_STAT_MCAST_FRAMES: + p_adapter->recv_stats.comp.success++; + p_adapter->recv_stats.mcast.frames++; + p_adapter->recv_stats.mcast.bytes += bytes; + break; + + default: + break; + } + cl_spinlock_release( &p_adapter->recv_stat_lock ); + + IPOIB_EXIT( IPOIB_DBG_STAT ); +} + +NDIS_STATUS +ipoib_get_send_stat( + IN ipoib_adapter_t* const p_adapter, + IN const ip_stat_sel_t stat_sel, + IN pending_oid_t* const p_oid_info ) +{ + uint64_t stat; + + IPOIB_ENTER( IPOIB_DBG_STAT ); + + CL_ASSERT( p_adapter ); + + cl_spinlock_acquire( &p_adapter->send_stat_lock ); + switch( stat_sel ) + { + case IP_STAT_SUCCESS: + stat = p_adapter->send_stats.comp.success; + break; + + case IP_STAT_ERROR: + stat = p_adapter->send_stats.comp.error; + break; + + case IP_STAT_DROPPED: + stat = p_adapter->send_stats.comp.dropped; + break; + + case IP_STAT_UCAST_BYTES: + stat = p_adapter->send_stats.ucast.bytes; + break; + + case IP_STAT_UCAST_FRAMES: + stat = p_adapter->send_stats.ucast.frames; + break; + + case IP_STAT_BCAST_BYTES: + stat = p_adapter->send_stats.bcast.bytes; + break; + + case IP_STAT_BCAST_FRAMES: + stat = p_adapter->send_stats.bcast.frames; + break; + + case IP_STAT_MCAST_BYTES: + stat = p_adapter->send_stats.mcast.bytes; + break; + + case IP_STAT_MCAST_FRAMES: + stat = p_adapter->send_stats.mcast.frames; + break; + + default: + stat = 0; + } + cl_spinlock_release( &p_adapter->send_stat_lock ); + + *p_oid_info->p_bytes_needed = sizeof(uint64_t); + + if( p_oid_info->buf_len >= sizeof(uint64_t) ) + { + *((uint64_t*)p_oid_info->p_buf) = stat; + *p_oid_info->p_bytes_used = sizeof(uint64_t); + } + else if( p_oid_info->buf_len >= sizeof(uint32_t) ) + { + *((uint32_t*)p_oid_info->p_buf) = (uint32_t)stat; + *p_oid_info->p_bytes_used = sizeof(uint32_t); + } + else + { + *p_oid_info->p_bytes_used = 0; + IPOIB_EXIT( IPOIB_DBG_STAT ); + return NDIS_STATUS_INVALID_LENGTH; + } + + IPOIB_EXIT( IPOIB_DBG_STAT ); + return NDIS_STATUS_SUCCESS; +} + + +void +ipoib_inc_send_stat( + IN ipoib_adapter_t* const p_adapter, + IN const ip_stat_sel_t stat_sel, + IN const size_t bytes OPTIONAL ) +{ + IPOIB_ENTER( IPOIB_DBG_STAT ); + + cl_spinlock_acquire( &p_adapter->send_stat_lock ); + switch( stat_sel ) + { + case IP_STAT_ERROR: + p_adapter->send_stats.comp.error++; + break; + + case IP_STAT_DROPPED: + p_adapter->send_stats.comp.dropped++; + break; + + case IP_STAT_UCAST_BYTES: + case IP_STAT_UCAST_FRAMES: + p_adapter->send_stats.comp.success++; + p_adapter->send_stats.ucast.frames++; + p_adapter->send_stats.ucast.bytes += bytes; + break; + + case IP_STAT_BCAST_BYTES: + case IP_STAT_BCAST_FRAMES: + p_adapter->send_stats.comp.success++; + p_adapter->send_stats.bcast.frames++; + p_adapter->send_stats.bcast.bytes += bytes; + break; + + case IP_STAT_MCAST_BYTES: + case IP_STAT_MCAST_FRAMES: + p_adapter->send_stats.comp.success++; + p_adapter->send_stats.mcast.frames++; + p_adapter->send_stats.mcast.bytes += bytes; + break; + + default: + break; + } + cl_spinlock_release( &p_adapter->send_stat_lock ); + + IPOIB_EXIT( IPOIB_DBG_STAT ); +} diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.h b/branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.h new file mode 100644 index 00000000..58de4eff --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_adapter.h @@ -0,0 +1,424 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _IPOIB_ADAPTER_H_ +#define _IPOIB_ADAPTER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ip_stats.h" + + +/* + * Definitions + */ +#define MAX_MCAST 32 + +#define IPV4_ADDR_SIZE 4 + +#define PORT_NUM_INDEX_IN_GUID 3 /* 0 based index into big endian GUID to get port number */ + +/* + * Macros + */ + + +typedef struct _ipoib_params +{ + int32_t rq_depth; + int32_t rq_low_watermark; + int32_t sq_depth; + boolean_t send_chksum_offload; + boolean_t recv_chksum_offload; + uint32_t sa_timeout; + uint32_t sa_retry_cnt; + uint32_t recv_pool_ratio; + uint32_t payload_mtu; + uint32_t xfer_block_size; + mac_addr_t conf_mac; + +} ipoib_params_t; +/* +* FIELDS +* rq_depth +* Number of receive WQEs to allocate. +* +* rq_low_watermark +* Receives are indicated with NDIS_STATUS_RESOURCES when the number of +* receives posted to the RQ falls bellow this value. +* +* sq_depth +* Number of send WQEs to allocate. +* +* send_chksum_offload +* Flag to indicate whether to offload send checksums. This will make it +* so that IPoIB packets should never be forwarded out of the IB subnet +* without recalculating the checksum. +* +* recv_chksum_offload +* Flag to indicate whether to offload recv checksums. +* +* wsdp_enabled +* Flag to indicate whether WSDP is enabled for an adapter adapter. +* +* static_lid +* LID to assign to the port if that port is down (not init) and has none. +* This feature allows a LID to be assigned, alowing locally targetted +* traffic to occur even on ports that are not plugged in. +* +* sa_timeout +* Time, in milliseconds, to wait for a response before retransmitting an +* SA query request. +* +* sa_retry_cnt +* Number of times to retry an SA query request. +* +* recv_pool_ratio +* Initial ratio of receive pool size to receive queue depth. +* +* grow_thresh +* Threshold at which to grow the receive pool. Valid values start are +* powers of 2, excluding 1. When zero, grows only when the pool is +* exhausted. Other values indicate fractional values +* (i.e. 2 indicates 1/2, 4 indicates 1/4, etc.) +*********/ + + +typedef struct _pending_oid +{ + NDIS_OID oid; + PVOID p_buf; + ULONG buf_len; + PULONG p_bytes_used; + PULONG p_bytes_needed; + +} pending_oid_t; + + +typedef struct _ipoib_adapter +{ + cl_obj_t obj; + NDIS_HANDLE h_adapter; + ipoib_ifc_data_t guids; + + cl_list_item_t entry; + + ib_al_handle_t h_al; + ib_pnp_handle_t h_pnp; + + ib_pnp_event_t state; + boolean_t hung; + boolean_t reset; + boolean_t registering; + + boolean_t pending_query; + pending_oid_t query_oid; + boolean_t pending_set; + pending_oid_t set_oid; + + struct _ipoib_port *p_port; + + uint32_t rate; + + ipoib_params_t params; + cl_spinlock_t recv_stat_lock; + ip_stats_t recv_stats; + cl_spinlock_t send_stat_lock; + ip_stats_t send_stats; + + boolean_t is_primary; + struct _ipoib_adapter *p_primary; + + uint32_t packet_filter; + + mac_addr_t mac; + mac_addr_t mcast_array[MAX_MCAST]; + uint8_t mcast_array_size; + + cl_qpool_t item_pool; + + KMUTEX mutex; + + cl_thread_t destroy_thread; + cl_vector_t ip_vector; + + cl_perf_t perf; + ib_al_ifc_t *p_ifc; + +} ipoib_adapter_t; +/* +* FIELDS +* obj +* Complib object for reference counting and destruction synchronization. +* +* h_adapter +* NDIS adapter handle. +* +* guids +* CA and port GUIDs returned by the bus driver. +* +* entry +* List item for storing all adapters in a list for address translation. +* We add adapters when their packet filter is set to a non-zero value, +* and remove them when their packet filter is cleared. This is needed +* since user-mode removal events are generated after the packet filter +* is cleared, but before the adapter is destroyed. +* +* h_al +* AL handle for all IB resources. +* +* h_pnp +* PNP registration handle for port events. +* +* state +* State of the adapter. IB_PNP_PORT_ADD indicates that the adapter +* is ready to transfer data. +* +* hung +* Boolean flag used to return whether we are hung or not. +* +* p_port +* Pointer to an ipoib_port_t representing all resources for moving data +* on the IB fabric. +* +* rate +* Rate, in 100bps increments, of the link. +* +* params +* Configuration parameters. +* +* pending_query +* Indicates that an query OID request is being processed asynchronously. +* +* query_oid +* Information about the pended query OID request. +* Valid only if pending_query is TRUE. +* +* pending_set +* Indicates that an set OID request is being processed asynchronously. +* +* set_oid +* Information about the pended set OID request. +* Valid only if pending_set is TRUE. +* +* recv_lock +* Spinlock protecting receive processing. +* +* recv_stats +* Receive statistics. +* +* send_lock +* Spinlock protecting send processing. +* +* send_stats +* Send statistics. +* +* is_primary +* Boolean flag to indicate if an adapter is the primary adapter +* of a bundle. +* +* p_primary +* Pointer to the primary adapter for a bundle. +* +* packet_filter +* Packet filter set by NDIS. +* +* mac_addr +* Ethernet MAC address reported to NDIS. +* +* mcast_array +* List of multicast MAC addresses programmed by NDIS. +* +* mcast_array_size +* Number of entries in the multicat MAC address array; +* +* item_pool +* Pool of cl_pool_obj_t structures to use for queueing pending +* packets for transmission. +* +* mutex +* Mutex to synchronized PnP callbacks with destruction. +* +* ip_vector +* Vector of assigned IP addresses. +* +* p_ifc +* Pointer to transport interface. +* +*********/ + + +typedef struct _ats_reg +{ + ipoib_adapter_t *p_adapter; + ib_reg_svc_handle_t h_reg_svc; + +} ats_reg_t; +/* +* FIELDS +* p_adapter +* Pointer to the adapter to which this address is assigned. +* +* h_reg_svc +* Service registration handle. +*********/ + + +typedef struct _net_address_item +{ + ats_reg_t *p_reg; + union _net_address_item_address + { + ULONG as_ulong; + UCHAR as_bytes[IPV4_ADDR_SIZE]; + } address; + +} net_address_item_t; +/* +* FIELDS +* p_reg +* Pointer to the ATS registration assigned to this address. +* +* address +* Union representing the IP address as an unsigned long or as +* an array of bytes. +* +* as_ulong +* The IP address represented as an unsigned long. Windows stores +* IPs this way. +* +* as_bytes +* The IP address represented as an array of bytes. +*********/ + + +ib_api_status_t +ipoib_create_adapter( + IN NDIS_HANDLE wrapper_config_context, + IN void* const h_adapter, + OUT ipoib_adapter_t** const pp_adapter ); + + +ib_api_status_t +ipoib_start_adapter( + IN ipoib_adapter_t* const p_adapter ); + + +void +ipoib_destroy_adapter( + IN ipoib_adapter_t* const p_adapter ); + + +/* Joins/leaves mcast groups based on currently programmed mcast MACs. */ +void +ipoib_refresh_mcast( + IN ipoib_adapter_t* const p_adapter, + IN mac_addr_t* const p_mac_array, + IN const uint8_t num_macs ); +/* +* PARAMETERS +* p_adapter +* Instance whose multicast MAC address list to modify. +* +* p_mac_array +* Array of multicast MAC addresses assigned to the adapter. +* +* num_macs +* Number of MAC addresses in the array. +*********/ + + +NDIS_STATUS +ipoib_get_recv_stat( + IN ipoib_adapter_t* const p_adapter, + IN const ip_stat_sel_t stat_sel, + IN pending_oid_t* const p_oid_info ); + + +void +ipoib_inc_recv_stat( + IN ipoib_adapter_t* const p_adapter, + IN const ip_stat_sel_t stat_sel, + IN const size_t bytes OPTIONAL ); + + +NDIS_STATUS +ipoib_get_send_stat( + IN ipoib_adapter_t* const p_adapter, + IN const ip_stat_sel_t stat_sel, + IN pending_oid_t* const p_oid_info ); + + +void +ipoib_inc_send_stat( + IN ipoib_adapter_t* const p_adapter, + IN const ip_stat_sel_t stat_sel, + IN const size_t bytes OPTIONAL ); + + +void +ipoib_set_rate( + IN ipoib_adapter_t* const p_adapter, + IN const uint8_t link_width, + IN const uint8_t link_speed ); + + +void +ipoib_set_active( + IN ipoib_adapter_t* const p_adapter ); + +void +ipoib_set_inactive( + IN ipoib_adapter_t* const p_adapter ); + +ib_api_status_t +ipoib_reset_adapter( + IN ipoib_adapter_t* const p_adapter ); + +void +ipoib_reg_addrs( + IN ipoib_adapter_t* const p_adapter ); + +void +ipoib_dereg_addrs( + IN ipoib_adapter_t* const p_adapter ); + +#endif /* _IPOIB_ADAPTER_H_ */ diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_debug.h b/branches/Ndi/ulp/ipoib/kernel/ipoib_debug.h new file mode 100644 index 00000000..6a5e3c7a --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_debug.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _IPOIB_DEBUG_H_ +#define _IPOIB_DEBUG_H_ + + +#define __MODULE__ "[IPoIB]" + +#include + + +/* Object types for passing into complib. */ +#define IPOIB_OBJ_INSTANCE 1 +#define IPOIB_OBJ_PORT 2 +#define IPOIB_OBJ_ENDPOINT 3 + + +extern uint32_t g_ipoib_dbg_level; +extern uint32_t g_ipoib_dbg_flags; + + +#if defined(EVENT_TRACING) +// +// Software Tracing Definitions +// +#define WPP_CONTROL_GUIDS \ + WPP_DEFINE_CONTROL_GUID( \ + IPOIBCtlGuid,(3F9BC73D, EB03, 453a, B27B, 20F9A664211A), \ + WPP_DEFINE_BIT(IPOIB_DBG_ERROR) \ + WPP_DEFINE_BIT(IPOIB_DBG_INIT) \ + WPP_DEFINE_BIT(IPOIB_DBG_PNP) \ + WPP_DEFINE_BIT(IPOIB_DBG_SEND) \ + WPP_DEFINE_BIT(IPOIB_DBG_RECV) \ + WPP_DEFINE_BIT(IPOIB_DBG_ENDPT) \ + WPP_DEFINE_BIT(IPOIB_DBG_IB) \ + WPP_DEFINE_BIT(IPOIB_DBG_BUF) \ + WPP_DEFINE_BIT(IPOIB_DBG_MCAST) \ + WPP_DEFINE_BIT(IPOIB_DBG_ALLOC) \ + WPP_DEFINE_BIT(IPOIB_DBG_OID) \ + WPP_DEFINE_BIT(IPOIB_DBG_IOCTL) \ + WPP_DEFINE_BIT(IPOIB_DBG_STAT) \ + WPP_DEFINE_BIT(IPOIB_DBG_OBJ)) + + + +#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) \ + (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl) +#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags) +#define WPP_FLAG_ENABLED(flags) \ + (WPP_LEVEL_ENABLED(flags) && \ + WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE) +#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags) + +// begin_wpp config +// IPOIB_ENTER(FLAG); +// IPOIB_EXIT(FLAG); +// USEPREFIX(IPOIB_PRINT, "%!STDPREFIX! [IPoIB] :%!FUNC!() :"); +// USEPREFIX(IPOIB_PRINT_EXIT, "%!STDPREFIX! [IPoIB] :%!FUNC!() :"); +// USESUFFIX(IPOIB_PRINT_EXIT, "[IpoIB] :%!FUNC!():]"); +// USESUFFIX(IPOIB_ENTER, " [IPoIB] :%!FUNC!():["); +// USESUFFIX(IPOIB_EXIT, " [IPoIB] :%!FUNC!():]"); +// end_wpp + +#else + +#include + + +/* + * Debug macros + */ +#define IPOIB_DBG_ERR (1 << 0) +#define IPOIB_DBG_INIT (1 << 1) +#define IPOIB_DBG_PNP (1 << 2) +#define IPOIB_DBG_SEND (1 << 3) +#define IPOIB_DBG_RECV (1 << 4) +#define IPOIB_DBG_ENDPT (1 << 5) +#define IPOIB_DBG_IB (1 << 6) +#define IPOIB_DBG_BUF (1 << 7) +#define IPOIB_DBG_MCAST (1 << 8) +#define IPOIB_DBG_ALLOC (1 << 9) +#define IPOIB_DBG_OID (1 << 10) +#define IPOIB_DBG_IOCTL (1 << 11) +#define IPOIB_DBG_STAT (1 << 12) +#define IPOIB_DBG_OBJ (1 << 13) + +#define IPOIB_DBG_ERROR (CL_DBG_ERROR | IPOIB_DBG_ERR) +#define IPOIB_DBG_ALL CL_DBG_ALL + + +#if DBG + +// assignment of _level_ is need to to overcome warning C4127 +#define IPOIB_PRINT(_level_,_flag_,_msg_) \ + { \ + if( g_ipoib_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_ipoib_dbg_flags, _msg_ ); \ + } + +#define IPOIB_PRINT_EXIT(_level_,_flag_,_msg_) \ + { \ + if( g_ipoib_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_ipoib_dbg_flags, _msg_ );\ + IPOIB_EXIT(_flag_);\ + } + +#define IPOIB_ENTER(_flag_) \ + { \ + if( g_ipoib_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_ENTER( _flag_, g_ipoib_dbg_flags ); \ + } + +#define IPOIB_EXIT(_flag_)\ + { \ + if( g_ipoib_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_EXIT( _flag_, g_ipoib_dbg_flags ); \ + } + +#define IPOIB_TRACE_BYTES( lvl, ptr, len ) \ + { \ + if( g_ipoib_dbg_level >= (_level_) && \ + (g_ipoib_dbg_flags & (_flag_)) ) \ + { \ + size_t _loop_; \ + for( _loop_ = 0; _loop_ < (len); ++_loop_ ) \ + { \ + DbgPrint( "0x%.2X ", ((uint8_t*)(ptr))[_loop_] ); \ + if( (_loop_ + 1)% 16 == 0 ) \ + DbgPrint("\n"); \ + else if( (_loop_ % 4 + 1) == 0 ) \ + DbgPrint(" "); \ + } \ + DbgPrint("\n"); \ + } \ + } + +#else + +#define IPOIB_PRINT(lvl, flags, msg) + +#define IPOIB_PRINT_EXIT(_level_,_flag_,_msg_) + +#define IPOIB_ENTER(_flag_) + +#define IPOIB_EXIT(_flag_) + +#define IPOIB_TRACE_BYTES( lvl, ptr, len ) + +#endif + +#endif //EVENT_TRACING + + +enum ipoib_perf_counters +{ + SendBundle, + SendPackets, + PortSend, + GetEthHdr, + SendMgrQueue, + GetEndpt, + EndptQueue, + QueuePacket, + BuildSendDesc, + SendMgrFilter, + FilterIp, + QueryIp, + SendTcp, + FilterUdp, + QueryUdp, + SendUdp, + FilterDhcp, + FilterArp, + SendGen, + SendCopy, + PostSend, + ProcessFailedSends, + SendCompBundle, + SendCb, + PollSend, + SendComp, + FreeSendBuf, + RearmSend, + PortResume, + RecvCompBundle, + RecvCb, + PollRecv, + FilterRecv, + GetRecvEndpts, + GetEndptByGid, + GetEndptByLid, + EndptInsert, + RecvTcp, + RecvUdp, + RecvDhcp, + RecvArp, + RecvGen, + BuildPktArray, + PreparePkt, + GetNdisPkt, + RecvNdisIndicate, + PutRecvList, + RepostRecv, + GetRecv, + PostRecv, + RearmRecv, + ReturnPacket, + ReturnPutRecv, + ReturnRepostRecv, + ReturnPreparePkt, + ReturnNdisIndicate, + + /* Must be last! */ + MaxPerf + +}; + + +enum ref_cnt_buckets +{ + ref_init = 0, + ref_refresh_mcast, /* only used in refresh_mcast */ + ref_send_packets, /* only in send_packets */ + ref_get_recv, + ref_repost, /* only in __recv_mgr_repost */ + ref_recv_cb, /* only in __recv_cb */ + ref_send_cb, /* only in __send_cb */ + ref_port_up, + ref_get_bcast, + ref_bcast, /* join and create, used as base only */ + ref_join_mcast, + ref_leave_mcast, + ref_endpt_track, /* used when endpt is in port's child list. */ + + ref_array_size, /* Used to size the array of ref buckets. */ + ref_mask = 100, /* Used to differentiate derefs. */ + + ref_failed_recv_wc = 100 | ref_get_recv, + ref_recv_inv_len = 200 | ref_get_recv, + ref_recv_loopback = 300 | ref_get_recv, + ref_recv_filter = 400 | ref_get_recv, + + ref_bcast_get_cb = 100 | ref_get_bcast, + + ref_join_bcast = 100 | ref_bcast, + ref_create_bcast = 200 | ref_bcast, + ref_bcast_inv_state = 300 | ref_bcast, + ref_bcast_req_failed = 400 | ref_bcast, + ref_bcast_error = 500 | ref_bcast, + ref_bcast_join_failed = 600 | ref_bcast, + ref_bcast_create_failed = 700 | ref_bcast, + + ref_mcast_inv_state = 100 | ref_join_mcast, + ref_mcast_req_failed = 200 | ref_join_mcast, + ref_mcast_no_endpt = 300 | ref_join_mcast, + ref_mcast_av_failed = 400 | ref_join_mcast, + ref_mcast_join_failed = 500 | ref_join_mcast, + + ref_port_info_cb = 100 | ref_port_up + +}; + + +#endif /* _IPOIB_DEBUG_H_ */ diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_driver.c b/branches/Ndi/ulp/ipoib/kernel/ipoib_driver.c new file mode 100644 index 00000000..6c7daef0 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_driver.c @@ -0,0 +1,2528 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ipoib_driver.h" +#include "ipoib_debug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ipoib_driver.tmh" +#endif + +#include "ipoib_port.h" +#include "ipoib_ibat.h" +#include +#include +#include +#include + + +#if defined(NDIS50_MINIPORT) +#define MAJOR_NDIS_VERSION 5 +#define MINOR_NDIS_VERSION 0 +#elif defined (NDIS51_MINIPORT) +#define MAJOR_NDIS_VERSION 5 +#define MINOR_NDIS_VERSION 1 +#else +#error NDIS Version not defined, try defining NDIS50_MINIPORT or NDIS51_MINIPORT +#endif + +static const NDIS_OID SUPPORTED_OIDS[] = +{ + OID_GEN_SUPPORTED_LIST, + OID_GEN_HARDWARE_STATUS, + OID_GEN_MEDIA_SUPPORTED, + OID_GEN_MEDIA_IN_USE, + OID_GEN_MAXIMUM_LOOKAHEAD, + OID_GEN_MAXIMUM_FRAME_SIZE, + OID_GEN_LINK_SPEED, + OID_GEN_TRANSMIT_BUFFER_SPACE, + OID_GEN_RECEIVE_BUFFER_SPACE, + OID_GEN_TRANSMIT_BLOCK_SIZE, + OID_GEN_RECEIVE_BLOCK_SIZE, + OID_GEN_VENDOR_ID, + OID_GEN_VENDOR_DESCRIPTION, + OID_GEN_CURRENT_PACKET_FILTER, + OID_GEN_CURRENT_LOOKAHEAD, + OID_GEN_DRIVER_VERSION, + OID_GEN_MAXIMUM_TOTAL_SIZE, + OID_GEN_PROTOCOL_OPTIONS, + OID_GEN_MAC_OPTIONS, + OID_GEN_MEDIA_CONNECT_STATUS, + OID_GEN_MAXIMUM_SEND_PACKETS, + OID_GEN_NETWORK_LAYER_ADDRESSES, + OID_GEN_VENDOR_DRIVER_VERSION, + OID_GEN_PHYSICAL_MEDIUM, + OID_GEN_XMIT_OK, + OID_GEN_RCV_OK, + OID_GEN_XMIT_ERROR, + OID_GEN_RCV_ERROR, + OID_GEN_RCV_NO_BUFFER, + OID_GEN_DIRECTED_BYTES_XMIT, + OID_GEN_DIRECTED_FRAMES_XMIT, + OID_GEN_MULTICAST_BYTES_XMIT, + OID_GEN_MULTICAST_FRAMES_XMIT, + OID_GEN_BROADCAST_BYTES_XMIT, + OID_GEN_BROADCAST_FRAMES_XMIT, + OID_GEN_DIRECTED_BYTES_RCV, + OID_GEN_DIRECTED_FRAMES_RCV, + OID_GEN_MULTICAST_BYTES_RCV, + OID_GEN_MULTICAST_FRAMES_RCV, + OID_GEN_BROADCAST_BYTES_RCV, + OID_GEN_BROADCAST_FRAMES_RCV, + OID_802_3_PERMANENT_ADDRESS, + OID_802_3_CURRENT_ADDRESS, + OID_802_3_MULTICAST_LIST, + OID_802_3_MAXIMUM_LIST_SIZE, + OID_802_3_MAC_OPTIONS, + OID_802_3_RCV_ERROR_ALIGNMENT, + OID_802_3_XMIT_ONE_COLLISION, + OID_802_3_XMIT_MORE_COLLISIONS, + OID_TCP_TASK_OFFLOAD +}; + +static const unsigned char VENDOR_ID[] = {0x00, 0x06, 0x6A, 0x00}; + +#define VENDOR_DESCRIPTION "Internet Protocol over InfiniBand" + +#define IB_INFINITE_SERVICE_LEASE 0xFFFFFFFF + + +/* Global driver debug level */ +uint32_t g_ipoib_dbg_level = TRACE_LEVEL_ERROR; +uint32_t g_ipoib_dbg_flags = 0x00000fff; +ipoib_globals_t g_ipoib = {0}; + + +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_drv_obj, + IN PUNICODE_STRING p_reg_path ); + +VOID +ipoib_unload( + IN PDRIVER_OBJECT p_drv_obj ); + +NDIS_STATUS +ipoib_initialize( + OUT PNDIS_STATUS p_open_err_status, + OUT PUINT p_selected_medium_index, + IN PNDIS_MEDIUM medium_array, + IN UINT medium_array_size, + IN NDIS_HANDLE h_adapter, + IN NDIS_HANDLE wrapper_configuration_context ); + +BOOLEAN +ipoib_check_for_hang( + IN NDIS_HANDLE adapter_context ); + +void +ipoib_halt( + IN NDIS_HANDLE adapter_context ); + +NDIS_STATUS +ipoib_query_info( + IN NDIS_HANDLE adapter_context, + IN NDIS_OID oid, + IN PVOID info_buf, + IN ULONG info_buf_len, + OUT PULONG p_bytes_written, + OUT PULONG p_bytes_needed ); + +NDIS_STATUS +ipoib_reset( + OUT PBOOLEAN p_addressing_reset, + IN NDIS_HANDLE adapter_context ); + +NDIS_STATUS +ipoib_set_info( + IN NDIS_HANDLE adapter_context, + IN NDIS_OID oid, + IN PVOID info_buf, + IN ULONG info_buf_length, + OUT PULONG p_bytes_read, + OUT PULONG p_bytes_needed ); + +void +ipoib_send_packets( + IN NDIS_HANDLE adapter_context, + IN PPNDIS_PACKET packet_array, + IN UINT num_packets ); + +void +ipoib_pnp_notify( + IN NDIS_HANDLE adapter_context, + IN NDIS_DEVICE_PNP_EVENT pnp_event, + IN PVOID info_buf, + IN ULONG info_buf_len ); + +void +ipoib_shutdown( + IN PVOID adapter_context ); + +static void +ipoib_complete_query( + IN ipoib_adapter_t* const p_adapter, + IN pending_oid_t* const p_oid_info, + IN const NDIS_STATUS status, + IN const void* const p_buf, + IN const ULONG buf_len ); + +static NDIS_STATUS +__ipoib_set_net_addr( + IN ipoib_adapter_t * p_adapter, + IN PVOID info_buf, + IN ULONG info_buf_len, + OUT PULONG p_bytes_read, + OUT PULONG p_bytes_needed ); + +static NDIS_STATUS +__ipoib_get_tcp_task_offload( + IN ipoib_adapter_t* p_adapter, + IN pending_oid_t* const p_oid_info ); + +static void +__ipoib_ats_reg_cb( + IN ib_reg_svc_rec_t *p_reg_svc_rec ); + +static void +__ipoib_ats_dereg_cb( + IN void *context ); + +static NTSTATUS +__ipoib_read_registry( + IN UNICODE_STRING* const p_registry_path ); + + +//! Standard Windows Device Driver Entry Point +/*! DriverEntry is the first routine called after a driver is loaded, and +is responsible for initializing the driver. On W2k this occurs when the PnP +Manager matched a PnP ID to one in an INF file that references this driver. +Any not success return value will cause the driver to fail to load. +IRQL = PASSIVE_LEVEL + +@param p_drv_obj Pointer to Driver Object for this device driver +@param p_registry_path Pointer to unicode string containing path to this driver's registry area +@return STATUS_SUCCESS, NDIS_STATUS_BAD_CHARACTERISTICS, NDIS_STATUS_BAD_VERSION, +NDIS_STATUS_RESOURCES, or NDIS_STATUS_FAILURE +*/ +NTSTATUS +DriverEntry( + IN PDRIVER_OBJECT p_drv_obj, + IN PUNICODE_STRING p_registry_path ) +{ + NDIS_STATUS status; + NDIS_HANDLE ndis_handle; + NDIS_MINIPORT_CHARACTERISTICS characteristics; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + +#ifdef _DEBUG_ + PAGED_CODE(); +#endif +#if defined(EVENT_TRACING) + WPP_INIT_TRACING(p_drv_obj, p_registry_path); +#endif + status = CL_INIT; + if( !NT_SUCCESS( status ) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_init failed.\n") ); + return status; + } + + status = NDIS_STATUS_SUCCESS; + ndis_handle = NULL; + + __ipoib_read_registry(p_registry_path); + + KeInitializeSpinLock( &g_ipoib.lock ); + cl_qlist_init( &g_ipoib.adapter_list ); + + NdisMInitializeWrapper( + &g_ipoib.h_ndis_wrapper, p_drv_obj, p_registry_path, NULL ); + + memset(&characteristics, 0, sizeof(characteristics)); + characteristics.MajorNdisVersion = MAJOR_NDIS_VERSION; + characteristics.MinorNdisVersion = MINOR_NDIS_VERSION; + characteristics.CheckForHangHandler = ipoib_check_for_hang; + characteristics.HaltHandler = ipoib_halt; + characteristics.InitializeHandler = ipoib_initialize; + characteristics.QueryInformationHandler = ipoib_query_info; + characteristics.ResetHandler = ipoib_reset; + characteristics.SetInformationHandler = ipoib_set_info; + + characteristics.ReturnPacketHandler = ipoib_return_packet; + characteristics.SendPacketsHandler = ipoib_send_packets; + +#ifdef NDIS51_MINIPORT + characteristics.PnPEventNotifyHandler = ipoib_pnp_notify; + characteristics.AdapterShutdownHandler = ipoib_shutdown; +#endif + + status = NdisMRegisterMiniport( + g_ipoib.h_ndis_wrapper, &characteristics, sizeof(characteristics) ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("NdisMRegisterMiniport failed with status of %d\n", status) ); + NdisTerminateWrapper( g_ipoib.h_ndis_wrapper, NULL ); + CL_DEINIT; + return status; + } + + NdisMRegisterUnloadHandler( g_ipoib.h_ndis_wrapper, ipoib_unload ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +static NTSTATUS +__ipoib_read_registry( + IN UNICODE_STRING* const p_registry_path ) +{ + NTSTATUS status; + /* Remember the terminating entry in the table below. */ + RTL_QUERY_REGISTRY_TABLE table[4]; + UNICODE_STRING param_path; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + RtlInitUnicodeString( ¶m_path, NULL ); + param_path.MaximumLength = p_registry_path->Length + + sizeof(L"\\Parameters"); + param_path.Buffer = cl_zalloc( param_path.MaximumLength ); + if( !param_path.Buffer ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to allocate parameters path buffer.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + RtlAppendUnicodeStringToString( ¶m_path, p_registry_path ); + RtlAppendUnicodeToString( ¶m_path, L"\\Parameters" ); + + /* + * Clear the table. This clears all the query callback pointers, + * and sets up the terminating table entry. + */ + cl_memclr( table, sizeof(table) ); + + /* Setup the table entries. */ + table[0].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[0].Name = L"DebugLevel"; + table[0].EntryContext = &g_ipoib_dbg_level; + table[0].DefaultType = REG_DWORD; + table[0].DefaultData = &g_ipoib_dbg_level; + table[0].DefaultLength = sizeof(ULONG); + + table[1].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[1].Name = L"DebugFlags"; + table[1].EntryContext = &g_ipoib_dbg_flags; + table[1].DefaultType = REG_DWORD; + table[1].DefaultData = &g_ipoib_dbg_flags; + table[1].DefaultLength = sizeof(ULONG); + + table[2].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[2].Name = L"bypass_check_bcast_rate"; + table[2].EntryContext = &g_ipoib.bypass_check_bcast_rate; + table[2].DefaultType = REG_DWORD; + table[2].DefaultData = &g_ipoib.bypass_check_bcast_rate; + table[2].DefaultLength = sizeof(ULONG); + + /* Have at it! */ + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, + param_path.Buffer, table, NULL, NULL ); + + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("debug level %d debug flags 0x%.8x\n", + g_ipoib_dbg_level, + g_ipoib_dbg_flags)); + +#if DBG + if( g_ipoib_dbg_flags & IPOIB_DBG_ERR ) + g_ipoib_dbg_flags |= CL_DBG_ERROR; +#endif + + cl_free( param_path.Buffer ); + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +VOID +ipoib_unload( + IN PDRIVER_OBJECT p_drv_obj ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + #if defined(EVENT_TRACING) + WPP_CLEANUP(p_drv_obj); + #endif + + UNREFERENCED_PARAMETER( p_drv_obj ); + CL_DEINIT; + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +NDIS_STATUS +ipoib_get_adapter_params( + IN NDIS_HANDLE* const wrapper_config_context, + IN OUT ipoib_adapter_t *p_adapter ) +{ + NDIS_STATUS status; + NDIS_HANDLE h_config; + NDIS_CONFIGURATION_PARAMETER *p_param; + NDIS_STRING keyword; + PUCHAR mac; + UINT len; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + NdisOpenConfiguration( &status, &h_config, wrapper_config_context ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("NdisOpenConfiguration returned 0x%.8x\n", status) ); + return status; + } + + /* Required: Receive queue depth. */ + RtlInitUnicodeString( &keyword, L"RqDepth" ); + NdisReadConfiguration( + &status, &p_param, h_config, &keyword, NdisParameterInteger ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Receive Queue Depth parameter missing.\n") ); + return status; + } + p_adapter->params.rq_depth = p_param->ParameterData.IntegerData; + + /* Optional: Receive queue low watermark. */ + RtlInitUnicodeString( &keyword, L"RqLowWatermark" ); + NdisReadConfiguration( + &status, &p_param, h_config, &keyword, NdisParameterInteger ); + if( status != NDIS_STATUS_SUCCESS || !p_param->ParameterData.IntegerData ) + { + p_adapter->params.rq_low_watermark = p_adapter->params.rq_depth >> 2; + } + else + { + p_adapter->params.rq_low_watermark = + p_adapter->params.rq_depth / p_param->ParameterData.IntegerData; + } + + /* Required: Send queue depth. */ + RtlInitUnicodeString( &keyword, L"SqDepth" ); + NdisReadConfiguration( + &status, &p_param, h_config, &keyword, NdisParameterInteger ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Send Queue Depth parameter missing.\n") ); + return status; + } + p_adapter->params.sq_depth = p_param->ParameterData.IntegerData; + /* Send queue depth needs to be a power of two. */ + if( p_adapter->params.sq_depth <= 128 ) + p_adapter->params.sq_depth = 128; + else if( p_adapter->params.sq_depth <= 256 ) + p_adapter->params.sq_depth = 256; + else if( p_adapter->params.sq_depth <= 512 ) + p_adapter->params.sq_depth = 512; + else + p_adapter->params.sq_depth = 1024; + + /* Required: Send Checksum Offload. */ + RtlInitUnicodeString( &keyword, L"SendChksum" ); + NdisReadConfiguration( + &status, &p_param, h_config, &keyword, NdisParameterInteger ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Send Checksum Offload parameter missing.\n") ); + return status; + } + p_adapter->params.send_chksum_offload = (p_param->ParameterData.IntegerData != 0); + + /* Required: Send Checksum Offload. */ + RtlInitUnicodeString( &keyword, L"RecvChksum" ); + NdisReadConfiguration( + &status, &p_param, h_config, &keyword, NdisParameterInteger ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Recv Checksum Offload parameter missing.\n") ); + return status; + } + p_adapter->params.recv_chksum_offload = (p_param->ParameterData.IntegerData != 0); + + /* Required: SA query timeout, in milliseconds. */ + RtlInitUnicodeString( &keyword, L"SaTimeout" ); + NdisReadConfiguration( + &status, &p_param, h_config, &keyword, NdisParameterInteger ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("SA query timeout parameter missing.\n") ); + return status; + } + p_adapter->params.sa_timeout = p_param->ParameterData.IntegerData; + + /* Required: SA query retry count. */ + RtlInitUnicodeString( &keyword, L"SaRetries" ); + NdisReadConfiguration( + &status, &p_param, h_config, &keyword, NdisParameterInteger ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("SA query retry count parameter missing.\n") ); + return status; + } + p_adapter->params.sa_retry_cnt = p_param->ParameterData.IntegerData; + + /* Required: Receive pool to queue depth ratio. */ + RtlInitUnicodeString( &keyword, L"RecvRatio" ); + NdisReadConfiguration( + &status, &p_param, h_config, &keyword, NdisParameterInteger ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Receive pool to queue depth ratio parameter missing.\n") ); + return status; + } + p_adapter->params.recv_pool_ratio = p_param->ParameterData.IntegerData; + + /* required: MTU size. */ + RtlInitUnicodeString( &keyword, L"PayloadMtu" ); + NdisReadConfiguration( + &status, &p_param, h_config, &keyword, NdisParameterInteger ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("PayloadMtu parameter missing. Use the default.\n") ); + return status; + } + p_adapter->params.payload_mtu = p_param->ParameterData.IntegerData; + p_adapter->params.xfer_block_size = (sizeof(eth_hdr_t) + p_adapter->params.payload_mtu); + NdisReadNetworkAddress( &status, &mac, &len, h_config ); + + ETH_COPY_NETWORK_ADDRESS( p_adapter->params.conf_mac.addr, p_adapter->mac.addr ); + /* If there is a NetworkAddress override in registry, use it */ + if( (status == NDIS_STATUS_SUCCESS) && (len == HW_ADDR_LEN) ) + { + if( ETH_IS_MULTICAST(mac) || ETH_IS_BROADCAST(mac) || + !ETH_IS_LOCALLY_ADMINISTERED(mac) ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_INIT, + ("Overriding NetworkAddress is invalid - " + "%02x-%02x-%02x-%02x-%02x-%02x\n", + mac[0], mac[1], mac[2], + mac[3], mac[4], mac[5]) ); + } + else + { + ETH_COPY_NETWORK_ADDRESS( p_adapter->params.conf_mac.addr, mac ); + } + } + + NdisCloseConfiguration( h_config ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return NDIS_STATUS_SUCCESS; +} + + +NDIS_STATUS +ipoib_get_adapter_guids( + IN NDIS_HANDLE* const h_adapter, + IN OUT ipoib_adapter_t *p_adapter ) +{ + NTSTATUS status; + ib_al_ifc_data_t data; + IO_STACK_LOCATION io_stack, *p_fwd_io_stack; + DEVICE_OBJECT *p_pdo; + IRP *p_irp; + KEVENT event; + IO_STATUS_BLOCK io_status; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + NdisMGetDeviceProperty( h_adapter, &p_pdo, NULL, NULL, NULL, NULL ); + + /* Query for our interface */ + data.size = sizeof(ipoib_ifc_data_t); + data.version = IPOIB_INTERFACE_DATA_VERSION; + data.type = &GUID_IPOIB_INTERFACE_DATA; + data.p_data = &p_adapter->guids; + + io_stack.MinorFunction = IRP_MN_QUERY_INTERFACE; + io_stack.Parameters.QueryInterface.Version = AL_INTERFACE_VERSION; + io_stack.Parameters.QueryInterface.Size = sizeof(ib_al_ifc_t); + io_stack.Parameters.QueryInterface.Interface = + (INTERFACE*)p_adapter->p_ifc; + io_stack.Parameters.QueryInterface.InterfaceSpecificData = &data; + io_stack.Parameters.QueryInterface.InterfaceType = + &GUID_IB_AL_INTERFACE; + + KeInitializeEvent( &event, NotificationEvent, FALSE ); + + /* Build the IRP for the HCA. */ + p_irp = IoBuildSynchronousFsdRequest( IRP_MJ_PNP, p_pdo, + NULL, 0, NULL, &event, &io_status ); + if( !p_irp ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to allocate query interface IRP.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + /* Copy the request query parameters. */ + p_fwd_io_stack = IoGetNextIrpStackLocation( p_irp ); + p_fwd_io_stack->MinorFunction = IRP_MN_QUERY_INTERFACE; + p_fwd_io_stack->Parameters.QueryInterface = + io_stack.Parameters.QueryInterface; + p_irp->IoStatus.Status = STATUS_NOT_SUPPORTED; + + /* Send the IRP. */ + status = IoCallDriver( p_pdo, p_irp ); + if( status == STATUS_PENDING ) + { + KeWaitForSingleObject( &event, Executive, KernelMode, + FALSE, NULL ); + status = io_status.Status; + } + + if( !NT_SUCCESS( status ) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Query interface for IPOIB interface returned %08x.\n", status) ); + return status; + } + + /* + * Dereference the interface now so that the bus driver doesn't fail a + * query remove IRP. We will always get unloaded before the bus driver + * since we're a child device. + */ + p_adapter->p_ifc->wdm.InterfaceDereference( + p_adapter->p_ifc->wdm.Context ); + IPOIB_EXIT( IPOIB_DBG_INIT ); + return NDIS_STATUS_SUCCESS; +} + + +//! Initialization function called for each IOC discovered +/* The MiniportInitialize function is a required function that sets up a +NIC (or virtual NIC) for network I/O operations, claims all hardware +resources necessary to the NIC in the registry, and allocates resources +the driver needs to carry out network I/O operations. +IRQL = PASSIVE_LEVEL + +@param p_open_status Pointer to a status field set if this function returns NDIS_STATUS_OPEN_ERROR +@param p_selected_medium_index Pointer to unsigned integer noting index into medium_array for this NIC +@param medium_array Array of mediums for this NIC +@param medium_array_size Number of elements in medium_array +@param h_adapter Handle assigned by NDIS for this NIC +@param wrapper_config_context Handle used for Ndis initialization functions +@return NDIS_STATUS_SUCCESS, NDIS_STATUS_UNSUPPORTED_MEDIA, NDIS_STATUS_RESOURCES, +NDIS_STATUS_NOT_SUPPORTED +*/ +NDIS_STATUS +ipoib_initialize( + OUT PNDIS_STATUS p_open_status, + OUT PUINT p_selected_medium_index, + IN PNDIS_MEDIUM medium_array, + IN UINT medium_array_size, + IN NDIS_HANDLE h_adapter, + IN NDIS_HANDLE wrapper_config_context ) +{ + NDIS_STATUS status; + ib_api_status_t ib_status; + UINT medium_index; + ipoib_adapter_t *p_adapter; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + +#ifdef _DEBUG_ + PAGED_CODE(); +#endif + + UNUSED_PARAM( p_open_status ); + UNUSED_PARAM( wrapper_config_context ); + + /* Search for our medium */ + for( medium_index = 0; medium_index < medium_array_size; ++medium_index ) + { + /* Check to see if we found our medium */ + if( medium_array[medium_index] == NdisMedium802_3 ) + break; + } + + if( medium_index == medium_array_size ) /* Never found it */ + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("No supported media.\n") ); + return NDIS_STATUS_UNSUPPORTED_MEDIA; + } + + *p_selected_medium_index = medium_index; + + /* Create the adapter adapter */ + ib_status = ipoib_create_adapter( wrapper_config_context, h_adapter, &p_adapter ); + if( ib_status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_create_adapter returned status %d.\n", ib_status ) ); + return NDIS_STATUS_FAILURE; + } + + /* Allow ten seconds for all SA queries to finish up. */ + NdisMSetAttributesEx( h_adapter, p_adapter, 5, + NDIS_ATTRIBUTE_BUS_MASTER | NDIS_ATTRIBUTE_DESERIALIZE | + NDIS_ATTRIBUTE_USES_SAFE_BUFFER_APIS, + NdisInterfacePNPBus ); + +#if IPOIB_USE_DMA + status = + NdisMInitializeScatterGatherDma( h_adapter, TRUE, p_adapter->params.xfer_block_size ); + if( status != NDIS_STATUS_SUCCESS ) + { + ipoib_destroy_adapter( p_adapter ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("NdisMInitializeScatterGatherDma returned 0x%.8x.\n", status) ); + return status; + } +#endif + + /* Create the adapter adapter */ + ib_status = ipoib_start_adapter( p_adapter ); + if( ib_status != IB_SUCCESS ) + { + NdisWriteErrorLogEntry( h_adapter, + NDIS_ERROR_CODE_HARDWARE_FAILURE, 0 ); + ipoib_destroy_adapter( p_adapter ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_start_adapter returned status %d.\n", ib_status ) ); + return NDIS_STATUS_FAILURE; + } + + ipoib_ref_ibat(); + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +//! Deallocates resources when the NIC is removed and halts the NIC.. +/* IRQL = DISPATCH_LEVEL + +@param adapter_context The adapter context allocated at start +*/ +void +ipoib_halt( + IN NDIS_HANDLE adapter_context ) +{ + ipoib_adapter_t *p_adapter; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + ipoib_deref_ibat(); + + CL_ASSERT( adapter_context ); + p_adapter = (ipoib_adapter_t*)adapter_context; + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Port %016I64x (CA %016I64x port %d) halting\n", + p_adapter->guids.port_guid, p_adapter->guids.ca_guid, + p_adapter->guids.port_num) ); + + ipoib_destroy_adapter( p_adapter ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +//! Reports the state of the NIC, or monitors the responsiveness of an underlying device driver. +/* IRQL = DISPATCH_LEVEL + +@param adapter_context The adapter context allocated at start +@return TRUE if the driver determines that its NIC is not operating +*/ +BOOLEAN +ipoib_check_for_hang( + IN NDIS_HANDLE adapter_context ) +{ + ipoib_adapter_t *p_adapter; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + CL_ASSERT( adapter_context ); + p_adapter = (ipoib_adapter_t*)adapter_context; + + if( p_adapter->reset ) + { + IPOIB_EXIT( IPOIB_DBG_INIT ); + return FALSE; + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return (p_adapter->hung? TRUE:FALSE); +} + + +//! Returns information about the capabilities and status of the driver and/or its NIC. +/* IRQL = DISPATCH_LEVEL + +@param adapter_context The adapter context allocated at start +@param oid Object ID representing the query operation to be carried out +@param info_buf Buffer containing any input for this query and location for output +@param info_buf_len Number of bytes available in info_buf +@param p_bytes_written Pointer to number of bytes written into info_buf +@param p_bytes_needed Pointer to number of bytes needed to satisfy this oid +@return NDIS_STATUS_SUCCESS, NDIS_STATUS_PENDING, NDIS_STATUS_INVALID_OID, +NDIS_STATUS_INVALID_LENGTH, NDIS_STATUS_NOT_ACCEPTED, NDIS_STATUS_NOT_SUPPORTED, +NDIS_STATUS_RESOURCES +*/ +NDIS_STATUS +ipoib_query_info( + IN NDIS_HANDLE adapter_context, + IN NDIS_OID oid, + IN PVOID info_buf, + IN ULONG info_buf_len, + OUT PULONG p_bytes_written, + OUT PULONG p_bytes_needed ) +{ + ipoib_adapter_t *p_adapter; + NDIS_STATUS status; + USHORT version; + ULONG info; + PVOID src_buf; + ULONG buf_len; + pending_oid_t oid_info; + uint8_t port_num; + + IPOIB_ENTER( IPOIB_DBG_OID ); + + oid_info.oid = oid; + oid_info.p_buf = info_buf; + oid_info.buf_len = info_buf_len; + oid_info.p_bytes_used = p_bytes_written; + oid_info.p_bytes_needed = p_bytes_needed; + + CL_ASSERT( adapter_context ); + p_adapter = (ipoib_adapter_t*)adapter_context; + + CL_ASSERT( p_bytes_written ); + CL_ASSERT( p_bytes_needed ); + CL_ASSERT( !p_adapter->pending_query ); + + status = NDIS_STATUS_SUCCESS; + src_buf = &info; + buf_len = sizeof(info); + + port_num = p_adapter->guids.port_num; + + switch( oid ) + { + /* Required General */ + case OID_GEN_SUPPORTED_LIST: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_SUPPORTED_LIST\n", port_num) ); + src_buf = (PVOID)SUPPORTED_OIDS; + buf_len = sizeof(SUPPORTED_OIDS); + break; + + case OID_GEN_HARDWARE_STATUS: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_HARDWARE_STATUS\n", port_num) ); + cl_obj_lock( &p_adapter->obj ); + switch( p_adapter->state ) + { + case IB_PNP_PORT_ADD: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d returning NdisHardwareStatusInitializing\n", port_num) ); + info = NdisHardwareStatusInitializing; + break; + + case IB_PNP_PORT_ACTIVE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d returning NdisHardwareStatusReady\n", port_num) ); + info = NdisHardwareStatusReady; + break; + + default: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d returning NdisHardwareStatusNotReady\n", port_num) ); + info = NdisHardwareStatusNotReady; + } + cl_obj_unlock( &p_adapter->obj ); + break; + + case OID_GEN_MEDIA_SUPPORTED: + case OID_GEN_MEDIA_IN_USE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MEDIA_SUPPORTED " + "or OID_GEN_MEDIA_IN_USE\n", port_num) ); + info = NdisMedium802_3; + break; + + case OID_GEN_MAXIMUM_FRAME_SIZE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MAXIMUM_FRAME_SIZE\n", port_num) ); + info = p_adapter->params.payload_mtu; + break; + + case OID_GEN_LINK_SPEED: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_LINK_SPEED\n", port_num) ); + cl_obj_lock( &p_adapter->obj ); + switch( p_adapter->state ) + { + case IB_PNP_PORT_ADD: + /* Mark the adapter as pending an OID */ + p_adapter->pending_query = TRUE; + + /* Save the request parameters. */ + p_adapter->query_oid = oid_info; + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d returning NDIS_STATUS_PENDING\n", port_num) ); + status = NDIS_STATUS_PENDING; + break; + + case IB_PNP_PORT_REMOVE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d returning NDIS_STATUS_NOT_ACCEPTED\n", port_num) ); + status = NDIS_STATUS_NOT_ACCEPTED; + break; + + default: + CL_ASSERT( p_adapter->p_port ); + info = p_adapter->rate; + break; + } + cl_obj_unlock( &p_adapter->obj ); + break; + + case OID_GEN_TRANSMIT_BUFFER_SPACE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_TRANSMIT_BUFFER_SPACE\n", port_num) ); + info = p_adapter->params.sq_depth * p_adapter->params.xfer_block_size; + break; + + case OID_GEN_RECEIVE_BUFFER_SPACE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_TRANSMIT_BUFFER_SPACE " + "or OID_GEN_RECEIVE_BUFFER_SPACE\n", port_num) ); + info = p_adapter->params.rq_depth * p_adapter->params.xfer_block_size; + break; + + case OID_GEN_MAXIMUM_LOOKAHEAD: + case OID_GEN_CURRENT_LOOKAHEAD: + case OID_GEN_TRANSMIT_BLOCK_SIZE: + case OID_GEN_RECEIVE_BLOCK_SIZE: + case OID_GEN_MAXIMUM_TOTAL_SIZE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MAXIMUM_LOOKAHEAD " + "or OID_GEN_CURRENT_LOOKAHEAD or " + "OID_GEN_TRANSMIT_BLOCK_SIZE or " + "OID_GEN_RECEIVE_BLOCK_SIZE or " + "OID_GEN_MAXIMUM_TOTAL_SIZE\n", port_num) ); + info = p_adapter->params.xfer_block_size; + break; + + case OID_GEN_VENDOR_ID: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_VENDOR_ID\n", port_num) ); + src_buf = (void*)VENDOR_ID; + buf_len = sizeof(VENDOR_ID); + break; + + case OID_GEN_VENDOR_DESCRIPTION: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_VENDOR_DESCRIPTION\n", port_num) ); + src_buf = VENDOR_DESCRIPTION; + buf_len = sizeof(VENDOR_DESCRIPTION); + break; + + case OID_GEN_VENDOR_DRIVER_VERSION: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_VENDOR_DRIVER_VERSION\n", port_num) ); + src_buf = &version; + buf_len = sizeof(version); + //TODO: Figure out what the right version is. + version = 1 << 8 | 1; + break; + + case OID_GEN_PHYSICAL_MEDIUM: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_PHYSICAL_MEDIUM\n", port_num) ); + info = NdisPhysicalMediumUnspecified; + break; + + case OID_GEN_CURRENT_PACKET_FILTER: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_CURRENT_PACKET_FILTER\n", port_num) ); + info = p_adapter->packet_filter; + break; + + case OID_GEN_DRIVER_VERSION: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_DRIVER_VERSION\n", port_num) ); + src_buf = &version; + buf_len = sizeof(version); + version = MAJOR_NDIS_VERSION << 8 | MINOR_NDIS_VERSION; + break; + + case OID_GEN_MAC_OPTIONS: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MAC_OPTIONS\n", port_num) ); + info = NDIS_MAC_OPTION_COPY_LOOKAHEAD_DATA | + NDIS_MAC_OPTION_TRANSFERS_NOT_PEND | + NDIS_MAC_OPTION_NO_LOOPBACK | + NDIS_MAC_OPTION_FULL_DUPLEX; +//TODO: Figure out if we will support priority and VLANs. +// NDIS_MAC_OPTION_8021P_PRIORITY; +//#ifdef NDIS51_MINIPORT +// info |= NDIS_MAC_OPTION_8021Q_VLAN; +//#endif + break; + + case OID_GEN_MEDIA_CONNECT_STATUS: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MEDIA_CONNECT_STATUS\n", port_num) ); + cl_obj_lock( &p_adapter->obj ); + switch( p_adapter->state ) + { + case IB_PNP_PORT_ADD: + case IB_PNP_PORT_INIT: + /* + * Delay reporting media state until we know whether the port is + * either up or down. + */ + p_adapter->pending_query = TRUE; + p_adapter->query_oid = oid_info; + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d returning NDIS_STATUS_PENDING\n", port_num) ); + status = NDIS_STATUS_PENDING; + break; + + case IB_PNP_PORT_ACTIVE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d returning NdisMediaStateConnected\n", port_num) ); + info = NdisMediaStateConnected; + break; + + case IB_PNP_PORT_REMOVE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d returning NDIS_STATUS_NOT_ACCEPTED\n", port_num) ); + status = NDIS_STATUS_NOT_ACCEPTED; + break; + + default: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d returning NdisMediaStateDisconnected\n", port_num) ); + info = NdisMediaStateDisconnected; + } + cl_obj_unlock( &p_adapter->obj ); + break; + + case OID_GEN_MAXIMUM_SEND_PACKETS: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MAXIMUM_SEND_PACKETS\n", port_num) ); + info = MAXULONG; + break; + + /* Required General Statistics */ + case OID_GEN_XMIT_OK: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_XMIT_OK\n", port_num) ); + src_buf = NULL; + status = ipoib_get_send_stat( p_adapter, IP_STAT_SUCCESS, &oid_info ); + break; + + case OID_GEN_RCV_OK: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_RCV_OK\n", port_num) ); + src_buf = NULL; + status = ipoib_get_recv_stat( p_adapter, IP_STAT_SUCCESS, &oid_info ); + break; + + case OID_GEN_XMIT_ERROR: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_XMIT_ERROR\n", port_num) ); + src_buf = NULL; + status = ipoib_get_send_stat( p_adapter, IP_STAT_ERROR, &oid_info ); + break; + + case OID_GEN_RCV_ERROR: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_RCV_ERROR\n", port_num) ); + src_buf = NULL; + status = ipoib_get_recv_stat( p_adapter, IP_STAT_ERROR, &oid_info ); + break; + + case OID_GEN_RCV_NO_BUFFER: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_RCV_NO_BUFFER\n", port_num) ); + src_buf = NULL; + status = ipoib_get_recv_stat( p_adapter, IP_STAT_DROPPED, &oid_info ); + break; + + case OID_GEN_DIRECTED_BYTES_XMIT: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_DIRECTED_BYTES_XMIT\n", port_num) ); + src_buf = NULL; + status = ipoib_get_send_stat( p_adapter, IP_STAT_UCAST_BYTES, &oid_info ); + break; + + case OID_GEN_DIRECTED_FRAMES_XMIT: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_DIRECTED_FRAMES_XMIT\n", port_num) ); + src_buf = NULL; + status = ipoib_get_send_stat( p_adapter, IP_STAT_UCAST_FRAMES, &oid_info ); + break; + + case OID_GEN_MULTICAST_BYTES_XMIT: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MULTICAST_BYTES_XMIT\n", port_num) ); + src_buf = NULL; + status = ipoib_get_send_stat( p_adapter, IP_STAT_MCAST_BYTES, &oid_info ); + break; + + case OID_GEN_MULTICAST_FRAMES_XMIT: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MULTICAST_FRAMES_XMIT\n", port_num) ); + src_buf = NULL; + status = ipoib_get_send_stat( p_adapter, IP_STAT_MCAST_FRAMES, &oid_info ); + break; + + case OID_GEN_BROADCAST_BYTES_XMIT: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_BROADCAST_BYTES_XMIT\n", port_num) ); + src_buf = NULL; + status = ipoib_get_send_stat( p_adapter, IP_STAT_BCAST_BYTES, &oid_info ); + break; + + case OID_GEN_BROADCAST_FRAMES_XMIT: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_BROADCAST_FRAMES_XMIT\n", port_num) ); + src_buf = NULL; + status = ipoib_get_send_stat( p_adapter, IP_STAT_BCAST_FRAMES, &oid_info ); + break; + + case OID_GEN_DIRECTED_BYTES_RCV: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_DIRECTED_BYTES_RCV\n", port_num) ); + src_buf = NULL; + status = ipoib_get_recv_stat( p_adapter, IP_STAT_UCAST_BYTES, &oid_info ); + break; + + case OID_GEN_DIRECTED_FRAMES_RCV: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_DIRECTED_FRAMES_RCV\n", port_num) ); + src_buf = NULL; + status = ipoib_get_recv_stat( p_adapter, IP_STAT_UCAST_FRAMES, &oid_info ); + break; + + case OID_GEN_MULTICAST_BYTES_RCV: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MULTICAST_BYTES_RCV\n", port_num) ); + src_buf = NULL; + status = ipoib_get_recv_stat( p_adapter, IP_STAT_MCAST_BYTES, &oid_info ); + break; + + case OID_GEN_MULTICAST_FRAMES_RCV: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_MULTICAST_FRAMES_RCV\n", port_num) ); + src_buf = NULL; + status = ipoib_get_recv_stat( p_adapter, IP_STAT_MCAST_FRAMES, &oid_info ); + break; + + case OID_GEN_BROADCAST_BYTES_RCV: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_BROADCAST_BYTES_RCV\n", port_num) ); + src_buf = NULL; + status = ipoib_get_recv_stat( p_adapter, IP_STAT_BCAST_BYTES, &oid_info ); + break; + + case OID_GEN_BROADCAST_FRAMES_RCV: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_GEN_BROADCAST_FRAMES_RCV\n", port_num) ); + src_buf = NULL; + status = ipoib_get_recv_stat( p_adapter, IP_STAT_BCAST_FRAMES, &oid_info ); + break; + + /* Required Ethernet operational characteristics */ + case OID_802_3_PERMANENT_ADDRESS: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_802_3_PERMANENT_ADDRESS\n", port_num) ); + src_buf = &p_adapter->mac; + buf_len = sizeof(p_adapter->mac); + break; + + case OID_802_3_CURRENT_ADDRESS: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_802_3_CURRENT_ADDRESS\n", port_num) ); + src_buf = &p_adapter->params.conf_mac; + buf_len = sizeof(p_adapter->params.conf_mac); + break; + + case OID_802_3_MULTICAST_LIST: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_802_3_MULTICAST_LIST\n", port_num) ); + src_buf = p_adapter->mcast_array; + buf_len = p_adapter->mcast_array_size * sizeof(mac_addr_t); + break; + + case OID_802_3_MAXIMUM_LIST_SIZE: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_802_3_MAXIMUM_LIST_SIZE\n", port_num) ); + info = MAX_MCAST; + break; + + case OID_802_3_MAC_OPTIONS: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_802_3_MAC_OPTIONS\n", port_num) ); + info = 0; + break; + + /* Required Ethernet stats */ + case OID_802_3_RCV_ERROR_ALIGNMENT: + case OID_802_3_XMIT_ONE_COLLISION: + case OID_802_3_XMIT_MORE_COLLISIONS: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_802_3_RCV_ERROR_ALIGNMENT or " + "OID_802_3_XMIT_ONE_COLLISION or " + "OID_802_3_XMIT_MORE_COLLISIONS\n", port_num) ); + info = 0; + break; + + case OID_TCP_TASK_OFFLOAD: + src_buf = NULL; + status = __ipoib_get_tcp_task_offload( p_adapter, &oid_info ); + break; + + /* Optional General */ + case OID_GEN_SUPPORTED_GUIDS: +#ifdef NDIS51_MINIPORT + case OID_GEN_VLAN_ID: +#endif + + /* Optional General Stats */ + case OID_GEN_RCV_CRC_ERROR: + case OID_GEN_TRANSMIT_QUEUE_LENGTH: + + /* Optional Ethernet Stats */ + case OID_802_3_XMIT_DEFERRED: + case OID_802_3_XMIT_MAX_COLLISIONS: + case OID_802_3_RCV_OVERRUN: + case OID_802_3_XMIT_UNDERRUN: + case OID_802_3_XMIT_HEARTBEAT_FAILURE: + case OID_802_3_XMIT_TIMES_CRS_LOST: + case OID_802_3_XMIT_LATE_COLLISIONS: + case OID_PNP_CAPABILITIES: + status = NDIS_STATUS_NOT_SUPPORTED; + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received an unsupported oid of 0x%.8X!\n", port_num, oid) ); + break; + + case OID_GEN_PROTOCOL_OPTIONS: + case OID_GEN_NETWORK_LAYER_ADDRESSES: + case OID_GEN_TRANSPORT_HEADER_OFFSET: +#ifdef NDIS51_MINIPORT + case OID_GEN_MACHINE_NAME: + case OID_GEN_RNDIS_CONFIG_PARAMETER: +#endif + default: + status = NDIS_STATUS_INVALID_OID; + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received an invalid oid of 0x%.8X!\n", port_num, oid) ); + break; + } + + /* + * Complete the request as if it was handled asynchronously to maximize + * code reuse for when we really handle the requests asynchronously. + * Note that this requires the QueryInformation entry point to always + * return NDIS_STATUS_PENDING + */ + if( status != NDIS_STATUS_PENDING ) + { + ipoib_complete_query( + p_adapter, &oid_info, status, src_buf, buf_len ); + } + + IPOIB_EXIT( IPOIB_DBG_OID ); + return NDIS_STATUS_PENDING; +} + + +static void +ipoib_complete_query( + IN ipoib_adapter_t* const p_adapter, + IN pending_oid_t* const p_oid_info, + IN const NDIS_STATUS status, + IN const void* const p_buf, + IN const ULONG buf_len ) +{ + NDIS_STATUS oid_status = status; + + IPOIB_ENTER( IPOIB_DBG_OID ); + + CL_ASSERT( status != NDIS_STATUS_PENDING ); + + if( status == NDIS_STATUS_SUCCESS ) + { + if( p_oid_info->buf_len < buf_len ) + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Insufficient buffer space. " + "Returning NDIS_STATUS_INVALID_LENGTH.\n") ); + oid_status = NDIS_STATUS_INVALID_LENGTH; + *p_oid_info->p_bytes_needed = buf_len; + *p_oid_info->p_bytes_used = 0; + } + else if( p_oid_info->p_buf ) + { + /* Only copy if we have a distinct source buffer. */ + if( p_buf ) + { + NdisMoveMemory( p_oid_info->p_buf, p_buf, buf_len ); + *p_oid_info->p_bytes_used = buf_len; + } + } + else + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Returning NDIS_NOT_ACCEPTED") ); + oid_status = NDIS_STATUS_NOT_ACCEPTED; + } + } + else + { + *p_oid_info->p_bytes_used = 0; + } + + p_adapter->pending_query = FALSE; + + NdisMQueryInformationComplete( p_adapter->h_adapter, oid_status ); + + IPOIB_EXIT( IPOIB_DBG_OID ); +} + + +static NDIS_STATUS +__ipoib_get_tcp_task_offload( + IN ipoib_adapter_t* p_adapter, + IN pending_oid_t* const p_oid_info ) +{ + NDIS_TASK_OFFLOAD_HEADER *p_offload_hdr; + NDIS_TASK_OFFLOAD *p_offload_task; + NDIS_TASK_TCP_IP_CHECKSUM *p_offload_chksum; + ULONG buf_len; + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received query for OID_TCP_TASK_OFFLOAD\n", + p_adapter->guids.port_num) ); + + buf_len = sizeof(NDIS_TASK_OFFLOAD_HEADER) + + offsetof( NDIS_TASK_OFFLOAD, TaskBuffer ) + + sizeof(NDIS_TASK_TCP_IP_CHECKSUM); + + *(p_oid_info->p_bytes_needed) = buf_len; + + if( p_oid_info->buf_len < buf_len ) + return NDIS_STATUS_INVALID_LENGTH; + + p_offload_hdr = (NDIS_TASK_OFFLOAD_HEADER*)p_oid_info->p_buf; + if( p_offload_hdr->Version != NDIS_TASK_OFFLOAD_VERSION ) + return NDIS_STATUS_INVALID_DATA; + + if( p_offload_hdr->EncapsulationFormat.Encapsulation != + IEEE_802_3_Encapsulation ) + { + return NDIS_STATUS_INVALID_DATA; + } + + p_offload_hdr->OffsetFirstTask = sizeof(NDIS_TASK_OFFLOAD_HEADER); + p_offload_task = (NDIS_TASK_OFFLOAD*)(p_offload_hdr + 1); + p_offload_task->Version = NDIS_TASK_OFFLOAD_VERSION; + p_offload_task->Size = sizeof(NDIS_TASK_OFFLOAD); + p_offload_task->Task = TcpIpChecksumNdisTask; + p_offload_task->OffsetNextTask = 0; + p_offload_task->TaskBufferLength = sizeof(NDIS_TASK_TCP_IP_CHECKSUM); + p_offload_chksum = + (NDIS_TASK_TCP_IP_CHECKSUM*)p_offload_task->TaskBuffer; + + p_offload_chksum->V4Transmit.IpOptionsSupported = + p_adapter->params.send_chksum_offload; + p_offload_chksum->V4Transmit.TcpOptionsSupported = + p_adapter->params.send_chksum_offload; + p_offload_chksum->V4Transmit.TcpChecksum = + p_adapter->params.send_chksum_offload; + p_offload_chksum->V4Transmit.UdpChecksum = + p_adapter->params.send_chksum_offload; + p_offload_chksum->V4Transmit.IpChecksum = + p_adapter->params.send_chksum_offload; + + p_offload_chksum->V4Receive.IpOptionsSupported = + p_adapter->params.recv_chksum_offload; + p_offload_chksum->V4Receive.TcpOptionsSupported = + p_adapter->params.recv_chksum_offload; + p_offload_chksum->V4Receive.TcpChecksum = + p_adapter->params.recv_chksum_offload; + p_offload_chksum->V4Receive.UdpChecksum = + p_adapter->params.recv_chksum_offload; + p_offload_chksum->V4Receive.IpChecksum = + p_adapter->params.recv_chksum_offload; + + p_offload_chksum->V6Transmit.IpOptionsSupported = FALSE; + p_offload_chksum->V6Transmit.TcpOptionsSupported = FALSE; + p_offload_chksum->V6Transmit.TcpChecksum = FALSE; + p_offload_chksum->V6Transmit.UdpChecksum = FALSE; + + p_offload_chksum->V6Receive.IpOptionsSupported = FALSE; + p_offload_chksum->V6Receive.TcpOptionsSupported = FALSE; + p_offload_chksum->V6Receive.TcpChecksum = FALSE; + p_offload_chksum->V6Receive.UdpChecksum = FALSE; + + *(p_oid_info->p_bytes_used) = buf_len; + + return NDIS_STATUS_SUCCESS; +} + + +static NDIS_STATUS +__ipoib_set_tcp_task_offload( + IN ipoib_adapter_t* p_adapter, + IN void* const p_info_buf, + IN ULONG* const p_info_len ) +{ + NDIS_TASK_OFFLOAD_HEADER *p_offload_hdr; + NDIS_TASK_OFFLOAD *p_offload_task; + NDIS_TASK_TCP_IP_CHECKSUM *p_offload_chksum; + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received set for OID_TCP_TASK_OFFLOAD\n", + p_adapter->guids.port_num) ); + + p_offload_hdr = (NDIS_TASK_OFFLOAD_HEADER*)p_info_buf; + + if( *p_info_len < sizeof(NDIS_TASK_OFFLOAD_HEADER) ) + return NDIS_STATUS_INVALID_LENGTH; + + if( p_offload_hdr->Version != NDIS_TASK_OFFLOAD_VERSION ) + return NDIS_STATUS_INVALID_DATA; + + if( p_offload_hdr->Size != sizeof(NDIS_TASK_OFFLOAD_HEADER) ) + return NDIS_STATUS_INVALID_LENGTH; + + if( !p_offload_hdr->OffsetFirstTask ) + return NDIS_STATUS_SUCCESS; + + if( p_offload_hdr->EncapsulationFormat.Encapsulation != + IEEE_802_3_Encapsulation ) + { + return NDIS_STATUS_INVALID_DATA; + } + + p_offload_task = (NDIS_TASK_OFFLOAD*) + (((UCHAR*)p_offload_hdr) + p_offload_hdr->OffsetFirstTask); + + if( *p_info_len < sizeof(NDIS_TASK_OFFLOAD_HEADER) + + offsetof( NDIS_TASK_OFFLOAD, TaskBuffer ) + + sizeof(NDIS_TASK_TCP_IP_CHECKSUM) ) + { + return NDIS_STATUS_INVALID_LENGTH; + } + + if( p_offload_task->Version != NDIS_TASK_OFFLOAD_VERSION ) + return NDIS_STATUS_INVALID_DATA; + p_offload_chksum = + (NDIS_TASK_TCP_IP_CHECKSUM*)p_offload_task->TaskBuffer; + + if( !p_adapter->params.send_chksum_offload && + (p_offload_chksum->V4Transmit.IpOptionsSupported || + p_offload_chksum->V4Transmit.TcpOptionsSupported || + p_offload_chksum->V4Transmit.TcpChecksum || + p_offload_chksum->V4Transmit.UdpChecksum || + p_offload_chksum->V4Transmit.IpChecksum) ) + { + return NDIS_STATUS_NOT_SUPPORTED; + } + + if( !p_adapter->params.recv_chksum_offload && + (p_offload_chksum->V4Receive.IpOptionsSupported || + p_offload_chksum->V4Receive.TcpOptionsSupported || + p_offload_chksum->V4Receive.TcpChecksum || + p_offload_chksum->V4Receive.UdpChecksum || + p_offload_chksum->V4Receive.IpChecksum) ) + { + return NDIS_STATUS_NOT_SUPPORTED; + } + + return NDIS_STATUS_SUCCESS; +} + + +//! Issues a hardware reset to the NIC and/or resets the driver's software state. +/* Tear down the connection and start over again. This is only called when there is a problem. +For example, if a send, query info, or set info had a time out. MiniportCheckForHang will +be called first. +IRQL = DISPATCH_LEVEL + +@param p_addr_resetPointer to BOOLLEAN that is set to TRUE if the NDIS +library should call MiniportSetInformation to restore addressing information to the current values. +@param adapter_context The adapter context allocated at start +@return NDIS_STATUS_SUCCESS, NDIS_STATUS_PENDING, NDIS_STATUS_NOT_RESETTABLE, +NDIS_STATUS_RESET_IN_PROGRESS, NDIS_STATUS_SOFT_ERRORS, NDIS_STATUS_HARD_ERRORS +*/ +NDIS_STATUS +ipoib_reset( + OUT PBOOLEAN p_addr_reset, + IN NDIS_HANDLE adapter_context) +{ + ipoib_adapter_t* p_adapter; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + CL_ASSERT( p_addr_reset ); + CL_ASSERT( adapter_context ); + p_adapter = (ipoib_adapter_t*)adapter_context; + + switch( ipoib_reset_adapter( p_adapter ) ) + { + case IB_NOT_DONE: + IPOIB_EXIT( IPOIB_DBG_INIT ); + return NDIS_STATUS_PENDING; + + case IB_SUCCESS: + IPOIB_EXIT( IPOIB_DBG_INIT ); + *p_addr_reset = TRUE; + return NDIS_STATUS_SUCCESS; + + case IB_INVALID_STATE: + IPOIB_EXIT( IPOIB_DBG_INIT ); + return NDIS_STATUS_RESET_IN_PROGRESS; + + default: + IPOIB_EXIT( IPOIB_DBG_INIT ); + return NDIS_STATUS_HARD_ERRORS; + } +} + + +//! Request changes in the state information that the miniport driver maintains +/* For example, this is used to set multicast addresses and the packet filter. +IRQL = DISPATCH_LEVEL + +@param adapter_context The adapter context allocated at start +@param oid Object ID representing the set operation to be carried out +@param info_buf Buffer containing input for this set and location for any output +@param info_buf_len Number of bytes available in info_buf +@param p_bytes_read Pointer to number of bytes read from info_buf +@param p_bytes_needed Pointer to number of bytes needed to satisfy this oid +@return NDIS_STATUS_SUCCESS, NDIS_STATUS_PENDING, NDIS_STATUS_INVALID_OID, +NDIS_STATUS_INVALID_LENGTH, NDIS_STATUS_INVALID_DATA, NDIS_STATUS_NOT_ACCEPTED, +NDIS_STATUS_NOT_SUPPORTED, NDIS_STATUS_RESOURCES +*/ +NDIS_STATUS +ipoib_set_info( + IN NDIS_HANDLE adapter_context, + IN NDIS_OID oid, + IN PVOID info_buf, + IN ULONG info_buf_len, + OUT PULONG p_bytes_read, + OUT PULONG p_bytes_needed ) +{ + ipoib_adapter_t* p_adapter; + NDIS_STATUS status; + + ULONG buf_len; + uint8_t port_num; + + KLOCK_QUEUE_HANDLE hdl; + + IPOIB_ENTER( IPOIB_DBG_OID ); + + CL_ASSERT( adapter_context ); + p_adapter = (ipoib_adapter_t*)adapter_context; + + CL_ASSERT( p_bytes_read ); + CL_ASSERT( p_bytes_needed ); + CL_ASSERT( !p_adapter->pending_set ); + + status = NDIS_STATUS_SUCCESS; + *p_bytes_needed = 0; + buf_len = sizeof(ULONG); + + port_num = p_adapter->guids.port_num; + + switch( oid ) + { + /* Required General */ + case OID_GEN_CURRENT_PACKET_FILTER: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received set for OID_GEN_CURRENT_PACKET_FILTER\n", port_num)); + if( info_buf_len < sizeof(p_adapter->packet_filter) ) + { + status = NDIS_STATUS_INVALID_LENGTH; + } + else if( !info_buf ) + { + status = NDIS_STATUS_INVALID_DATA; + } + else + { + KeAcquireInStackQueuedSpinLock( &g_ipoib.lock, &hdl ); + cl_obj_lock( &p_adapter->obj ); + switch( p_adapter->state ) + { + case IB_PNP_PORT_ADD: + p_adapter->set_oid.oid = oid; + p_adapter->set_oid.p_buf = info_buf; + p_adapter->set_oid.buf_len = info_buf_len; + p_adapter->set_oid.p_bytes_used = p_bytes_read; + p_adapter->set_oid.p_bytes_needed = p_bytes_needed; + p_adapter->pending_set = TRUE; + status = NDIS_STATUS_PENDING; + break; + + case IB_PNP_PORT_REMOVE: + status = NDIS_STATUS_NOT_ACCEPTED; + break; + + default: + if( !p_adapter->packet_filter && (*(uint32_t*)info_buf) ) + { + cl_qlist_insert_tail( + &g_ipoib.adapter_list, &p_adapter->entry ); + + /* + * Filter was zero, now non-zero. Register IP addresses + * with SA. + */ + ipoib_reg_addrs( p_adapter ); + } + else if( p_adapter->packet_filter && !(*(uint32_t*)info_buf) ) + { + /* + * Filter was non-zero, now zero. Deregister IP addresses. + */ + ipoib_dereg_addrs( p_adapter ); + + ASSERT( cl_qlist_count( &g_ipoib.adapter_list ) ); + cl_qlist_remove_item( + &g_ipoib.adapter_list, &p_adapter->entry ); + } + + p_adapter->packet_filter = *(uint32_t*)info_buf; + } + cl_obj_unlock( &p_adapter->obj ); + KeReleaseInStackQueuedSpinLock( &hdl ); + } + break; + + case OID_GEN_CURRENT_LOOKAHEAD: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received set for OID_GEN_CURRENT_LOOKAHEAD\n", port_num)); + if( info_buf_len < buf_len ) + status = NDIS_STATUS_INVALID_LENGTH; + break; + + case OID_GEN_PROTOCOL_OPTIONS: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received set for OID_GEN_PROTOCOL_OPTIONS\n", port_num)); + if( info_buf_len < buf_len ) + status = NDIS_STATUS_INVALID_LENGTH; + break; + + case OID_GEN_NETWORK_LAYER_ADDRESSES: + status = __ipoib_set_net_addr( p_adapter, info_buf, info_buf_len, p_bytes_read, p_bytes_needed); + break; + +#ifdef NDIS51_MINIPORT + case OID_GEN_MACHINE_NAME: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_OID, + ("Port %d received set for OID_GEN_MACHINE_NAME\n", port_num) ); + break; +#endif + + /* Required Ethernet operational characteristics */ + case OID_802_3_MULTICAST_LIST: + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_OID, + ("Port %d received set for OID_802_3_MULTICAST_LIST\n", port_num) ); + if( info_buf_len > MAX_MCAST * sizeof(mac_addr_t) ) + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d OID_802_3_MULTICAST_LIST - Multicast list full.\n", port_num) ); + status = NDIS_STATUS_MULTICAST_FULL; + *p_bytes_needed = MAX_MCAST * sizeof(mac_addr_t); + } + else if( info_buf_len % sizeof(mac_addr_t) ) + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d OID_802_3_MULTICAST_LIST - Invalid input buffer.\n", port_num) ); + status = NDIS_STATUS_INVALID_DATA; + } + else if( !info_buf && info_buf_len ) + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d OID_802_3_MULTICAST_LIST - Invalid input buffer.\n", port_num) ); + status = NDIS_STATUS_INVALID_DATA; + } + else + { + ipoib_refresh_mcast( p_adapter, (mac_addr_t*)info_buf, + (uint8_t)(info_buf_len / sizeof(mac_addr_t)) ); + + buf_len = info_buf_len; + /* + * Note that we don't return pending. It will likely take longer + * for our SA transactions to complete than NDIS will give us + * before reseting the adapter. If an SA failure is encountered, + * the adapter will be marked as hung and we will get reset. + */ + status = NDIS_STATUS_SUCCESS; + } + break; + + case OID_TCP_TASK_OFFLOAD: + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received set for OID_TCP_TASK_OFFLOAD\n", port_num) ); + + buf_len = info_buf_len; + status = + __ipoib_set_tcp_task_offload( p_adapter, info_buf, &buf_len ); + break; + + /* Optional General */ + case OID_GEN_TRANSPORT_HEADER_OFFSET: +#ifdef NDIS51_MINIPORT + case OID_GEN_RNDIS_CONFIG_PARAMETER: + case OID_GEN_VLAN_ID: +#endif + status = NDIS_STATUS_NOT_SUPPORTED; + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received an unsupported oid of 0x%.8X!\n", port_num, oid)); + break; + + case OID_GEN_SUPPORTED_LIST: + case OID_GEN_HARDWARE_STATUS: + case OID_GEN_MEDIA_SUPPORTED: + case OID_GEN_MEDIA_IN_USE: + case OID_GEN_MAXIMUM_FRAME_SIZE: + case OID_GEN_LINK_SPEED: + case OID_GEN_TRANSMIT_BUFFER_SPACE: + case OID_GEN_RECEIVE_BUFFER_SPACE: + case OID_GEN_MAXIMUM_LOOKAHEAD: + case OID_GEN_TRANSMIT_BLOCK_SIZE: + case OID_GEN_RECEIVE_BLOCK_SIZE: + case OID_GEN_MAXIMUM_TOTAL_SIZE: + case OID_GEN_VENDOR_ID: + case OID_GEN_VENDOR_DESCRIPTION: + case OID_GEN_VENDOR_DRIVER_VERSION: + case OID_GEN_DRIVER_VERSION: + case OID_GEN_MAC_OPTIONS: + case OID_GEN_MEDIA_CONNECT_STATUS: + case OID_GEN_MAXIMUM_SEND_PACKETS: + case OID_GEN_SUPPORTED_GUIDS: + case OID_GEN_PHYSICAL_MEDIUM: + default: + status = NDIS_STATUS_INVALID_OID; + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received an invalid oid of 0x%.8X!\n", port_num, oid)); + break; + } + + if( status == NDIS_STATUS_SUCCESS ) + { + *p_bytes_read = buf_len; + } + else + { + if( status == NDIS_STATUS_INVALID_LENGTH ) + { + if ( !*p_bytes_needed ) + { + *p_bytes_needed = buf_len; + } + } + + *p_bytes_read = 0; + } + + IPOIB_EXIT( IPOIB_DBG_OID ); + return status; +} + + +//! Transfers some number of packets, specified as an array of packet pointers, over the network. +/* For a deserialized driver, these packets are completed asynchronously +using NdisMSendComplete. +IRQL <= DISPATCH_LEVEL + +@param adapter_context Pointer to ipoib_adapter_t structure with per NIC state +@param packet_array Array of packets to send +@param numPackets Number of packets in the array +*/ +void +ipoib_send_packets( + IN NDIS_HANDLE adapter_context, + IN PPNDIS_PACKET packet_array, + IN UINT num_packets ) +{ + ipoib_adapter_t *p_adapter; + ipoib_port_t *p_port; + UINT packet_num; + PERF_DECLARE( SendPackets ); + PERF_DECLARE( PortSend ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + cl_perf_start( SendPackets ); + + CL_ASSERT( adapter_context ); + p_adapter = (ipoib_adapter_t*)adapter_context; + + cl_obj_lock( &p_adapter->obj ); + if( p_adapter->state != IB_PNP_PORT_ACTIVE || !p_adapter->p_port ) + { + cl_obj_unlock( &p_adapter->obj ); + for( packet_num = 0; packet_num < num_packets; ++packet_num ) + { + ipoib_inc_send_stat( p_adapter, IP_STAT_DROPPED, 0 ); + NdisMSendComplete( p_adapter->h_adapter, + packet_array[packet_num], NDIS_STATUS_ADAPTER_NOT_READY ); + } + IPOIB_EXIT( IPOIB_DBG_SEND ); + return; + } + + p_port = p_adapter->p_port; + ipoib_port_ref( p_port, ref_send_packets ); + cl_obj_unlock( &p_adapter->obj ); + + cl_perf_start( PortSend ); + ipoib_port_send( p_port, packet_array, num_packets ); + cl_perf_stop( &p_port->p_adapter->perf, PortSend ); + ipoib_port_deref( p_port, ref_send_packets ); + + cl_perf_stop( &p_adapter->perf, SendPackets ); + + cl_perf_log( &p_adapter->perf, SendBundle, num_packets ); + + IPOIB_EXIT( IPOIB_DBG_SEND ); +} + + +void +ipoib_pnp_notify( + IN NDIS_HANDLE adapter_context, + IN NDIS_DEVICE_PNP_EVENT pnp_event, + IN PVOID info_buf, + IN ULONG info_buf_len ) +{ + ipoib_adapter_t *p_adapter; + + IPOIB_ENTER( IPOIB_DBG_PNP ); + + UNUSED_PARAM( info_buf ); + UNUSED_PARAM( info_buf_len ); + + p_adapter = (ipoib_adapter_t*)adapter_context; + + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_PNP, ("Event %d\n", pnp_event) ); + if( pnp_event != NdisDevicePnPEventPowerProfileChanged ) + { + cl_obj_lock( &p_adapter->obj ); + p_adapter->state = IB_PNP_PORT_REMOVE; + cl_obj_unlock( &p_adapter->obj ); + + ipoib_resume_oids( p_adapter ); + } + + IPOIB_EXIT( IPOIB_DBG_PNP ); +} + + +void +ipoib_shutdown( + IN PVOID adapter_context ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + UNUSED_PARAM( adapter_context ); + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +void +ipoib_resume_oids( + IN ipoib_adapter_t* const p_adapter ) +{ + ULONG info; + NDIS_STATUS status; + boolean_t pending_query, pending_set; + pending_oid_t query_oid = {0}; + pending_oid_t set_oid = {0}; + KLOCK_QUEUE_HANDLE hdl; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + cl_obj_lock( &p_adapter->obj ); + /* + * Set the status depending on our state. Fail OID requests that + * are pending while we reset the adapter. + */ + switch( p_adapter->state ) + { + case IB_PNP_PORT_ADD: + status = NDIS_STATUS_FAILURE; + break; + + case IB_PNP_PORT_REMOVE: + status = NDIS_STATUS_NOT_ACCEPTED; + break; + + default: + status = NDIS_STATUS_SUCCESS; + } + + pending_query = p_adapter->pending_query; + if( pending_query ) + { + query_oid = p_adapter->query_oid; + p_adapter->pending_query = FALSE; + } + pending_set = p_adapter->pending_set; + if( pending_set ) + { + set_oid = p_adapter->set_oid; + p_adapter->pending_set = FALSE; + } + cl_obj_unlock( &p_adapter->obj ); + + /* + * If we had a pending OID request for OID_GEN_LINK_SPEED, + * complete it now. Note that we hold the object lock since + * NdisMQueryInformationComplete is called at DISPATCH_LEVEL. + */ + if( pending_query ) + { + switch( query_oid.oid ) + { + case OID_GEN_LINK_SPEED: + ipoib_complete_query( p_adapter, &query_oid, + status, &p_adapter->rate, sizeof(p_adapter->rate) ); + break; + + case OID_GEN_MEDIA_CONNECT_STATUS: + info = NdisMediaStateConnected; + ipoib_complete_query( p_adapter, &query_oid, + status, &info, sizeof(info) ); + break; + + default: + CL_ASSERT( query_oid.oid == OID_GEN_LINK_SPEED || + query_oid.oid == OID_GEN_MEDIA_CONNECT_STATUS ); + break; + } + } + + if( pending_set ) + { + switch( set_oid.oid ) + { + case OID_GEN_CURRENT_PACKET_FILTER: + /* Validation already performed in the SetInformation path. */ + + KeAcquireInStackQueuedSpinLock( &g_ipoib.lock, &hdl ); + cl_obj_lock( &p_adapter->obj ); + if( !p_adapter->packet_filter && (*(PULONG)set_oid.p_buf) ) + { + cl_qlist_insert_tail( + &g_ipoib.adapter_list, &p_adapter->entry ); + /* + * Filter was zero, now non-zero. Register IP addresses + * with SA. + */ + ipoib_reg_addrs( p_adapter ); + } + else if( p_adapter->packet_filter && !(*(PULONG)set_oid.p_buf) ) + { + /* Filter was non-zero, now zero. Deregister IP addresses. */ + ipoib_dereg_addrs( p_adapter ); + + ASSERT( cl_qlist_count( &g_ipoib.adapter_list ) ); + cl_qlist_remove_item( + &g_ipoib.adapter_list, &p_adapter->entry ); + } + p_adapter->packet_filter = *(PULONG)set_oid.p_buf; + + cl_obj_unlock( &p_adapter->obj ); + KeReleaseInStackQueuedSpinLock( &hdl ); + + NdisMSetInformationComplete( p_adapter->h_adapter, status ); + break; + + case OID_GEN_NETWORK_LAYER_ADDRESSES: + status = __ipoib_set_net_addr( p_adapter, + p_adapter->set_oid.p_buf, + p_adapter->set_oid.buf_len, + p_adapter->set_oid.p_bytes_used, + p_adapter->set_oid.p_bytes_needed ); + if( status != NDIS_STATUS_PENDING ) + { + NdisMSetInformationComplete( p_adapter->h_adapter, status ); + } + break; + + default: + CL_ASSERT( set_oid.oid && 0 ); + break; + } + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static NDIS_STATUS +__ipoib_set_net_addr( + IN ipoib_adapter_t * p_adapter, + IN PVOID info_buf, + IN ULONG info_buf_len, + OUT PULONG p_bytes_read, + OUT PULONG p_bytes_needed ) +{ + NDIS_STATUS status; + PNETWORK_ADDRESS_LIST p_net_addrs; + PNETWORK_ADDRESS p_net_addr_oid; + PNETWORK_ADDRESS_IP p_ip_addr; + + net_address_item_t *p_addr_item; + + cl_status_t cl_status; + + size_t idx; + LONG i; + ULONG addr_size; + ULONG total_size; + + uint8_t port_num; + + IPOIB_ENTER( IPOIB_DBG_OID ); + + status = NDIS_STATUS_SUCCESS; + port_num = p_adapter->guids.port_num; + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d received set for OID_GEN_NETWORK_LAYER_ADDRESSES\n", + port_num) ); + + if( !info_buf ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Port %d - OID_GEN_NETWORK_LAYER_ADDRESSES - " + "NULL buffer\n", port_num) ); + IPOIB_EXIT( IPOIB_DBG_OID ); + return NDIS_STATUS_INVALID_DATA; + } + + /* + * Must use field offset because the structures define array's of size one + * of a the incorrect type for what is really stored. + */ + if( info_buf_len < FIELD_OFFSET(NETWORK_ADDRESS_LIST, Address) ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - " + "bad length of %d, not enough " + "for NETWORK_ADDRESS_LIST (%d)\n", port_num, info_buf_len, + FIELD_OFFSET(NETWORK_ADDRESS_LIST, Address)) ); + *p_bytes_needed = FIELD_OFFSET(NETWORK_ADDRESS_LIST, Address); + IPOIB_EXIT( IPOIB_DBG_OID ); + return NDIS_STATUS_INVALID_LENGTH; + } + + p_net_addrs = (PNETWORK_ADDRESS_LIST)info_buf; + if( p_net_addrs->AddressCount == 0) + { + if( p_net_addrs->AddressType == NDIS_PROTOCOL_ID_TCP_IP ) + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - " + "clear TCP/IP addresses\n", port_num) ); + } + else + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - " + "Non TCP/IP address type of 0x%.4X on clear\n", + port_num, p_net_addrs->AddressType) ); + IPOIB_EXIT( IPOIB_DBG_OID ); + return NDIS_STATUS_SUCCESS; + } + } + + addr_size = FIELD_OFFSET(NETWORK_ADDRESS, Address) + + NETWORK_ADDRESS_LENGTH_IP; + total_size = FIELD_OFFSET(NETWORK_ADDRESS_LIST, Address) + + addr_size * p_net_addrs->AddressCount; + + if( info_buf_len < total_size ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - " + "bad length of %d, %d required for %d addresses\n", + port_num, info_buf_len, total_size, p_net_addrs->AddressCount) ); + *p_bytes_needed = total_size; + IPOIB_EXIT( IPOIB_DBG_OID ); + return NDIS_STATUS_INVALID_LENGTH; + } + + /* Lock lists for duration since SA callbacks can occur on other CPUs */ + cl_obj_lock( &p_adapter->obj ); + + /* Set the capacity of the vector to accomodate all assinged addresses. */ + cl_status = cl_vector_set_capacity( + &p_adapter->ip_vector, p_net_addrs->AddressCount ); + if( cl_status != CL_SUCCESS ) + { + cl_obj_unlock( &p_adapter->obj ); + IPOIB_PRINT(TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Port %d - OID_GEN_NETWORK_LAYER_ADDRESSES - " + "Failed to set IP vector capacity: %s\n", port_num, + CL_STATUS_MSG(cl_status)) ); + IPOIB_EXIT( IPOIB_DBG_OID ); + return NDIS_STATUS_RESOURCES; + } + + *p_bytes_read = total_size; + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - List contains %d addresses\n", + port_num, p_net_addrs->AddressCount)); + + /* First look for addresses we had that should be removed */ + for( idx = 0; idx != cl_vector_get_size( &p_adapter->ip_vector ); idx++ ) + { + p_addr_item = (net_address_item_t*) + cl_vector_get_ptr( &p_adapter->ip_vector, idx ); + p_net_addr_oid = (PNETWORK_ADDRESS)p_net_addrs->Address; + + for( i = 0; i < p_net_addrs->AddressCount; ++i, p_net_addr_oid = + (PNETWORK_ADDRESS)((uint8_t *)p_net_addr_oid + + FIELD_OFFSET(NETWORK_ADDRESS, Address) + + p_net_addr_oid->AddressLength) ) + { + + if( p_net_addr_oid->AddressType != NDIS_PROTOCOL_ID_TCP_IP ) + { + IPOIB_PRINT( TRACE_LEVEL_WARNING, IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - Address %d is wrong type of 0x%.4X, " + "should be 0x%.4X\n", port_num, i, p_net_addr_oid->AddressType, + NDIS_PROTOCOL_ID_TCP_IP)); + continue; + } + + if( p_net_addr_oid->AddressLength != NETWORK_ADDRESS_LENGTH_IP) + { + IPOIB_PRINT( TRACE_LEVEL_WARNING, IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - Address %d is wrong size of %d, " + "should be %d\n", port_num, i, p_net_addr_oid->AddressLength, + NETWORK_ADDRESS_LENGTH_IP)); + continue; + } + + p_ip_addr = (PNETWORK_ADDRESS_IP)p_net_addr_oid->Address; + if( !cl_memcmp( &p_ip_addr->in_addr, + &p_addr_item->address.as_ulong, sizeof(ULONG) ) ) + { + break; + } + } + + if( i == p_net_addrs->AddressCount ) + { + /* Didn't find a match, delete from SA */ + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - Deleting Address %d.%d.%d.%d\n", + port_num, + p_addr_item->address.as_bytes[0], + p_addr_item->address.as_bytes[1], + p_addr_item->address.as_bytes[2], + p_addr_item->address.as_bytes[3])); + + if( p_addr_item->p_reg ) + { + if( p_addr_item->p_reg->h_reg_svc ) + { + p_adapter->p_ifc->dereg_svc( + p_addr_item->p_reg->h_reg_svc, __ipoib_ats_dereg_cb ); + } + else + { + cl_free( p_addr_item->p_reg ); + } + p_addr_item->p_reg = NULL; + } + p_addr_item->address.as_ulong = 0; + } + } + + /* Now look for new addresses */ + p_net_addr_oid = (NETWORK_ADDRESS *)p_net_addrs->Address; + idx = 0; + for( i = 0; i < p_net_addrs->AddressCount; i++, p_net_addr_oid = + (PNETWORK_ADDRESS)((uint8_t *)p_net_addr_oid + + FIELD_OFFSET(NETWORK_ADDRESS, Address) + p_net_addr_oid->AddressLength) ) + { + + if( p_net_addr_oid->AddressType != NDIS_PROTOCOL_ID_TCP_IP ) + { + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - Address %d is wrong type of 0x%.4X, " + "should be 0x%.4X\n", port_num, i, p_net_addr_oid->AddressType, + NDIS_PROTOCOL_ID_TCP_IP)); + continue; + } + + if( p_net_addr_oid->AddressLength != NETWORK_ADDRESS_LENGTH_IP) + { + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - Address %d is wrong size of %d, " + "should be %d\n", port_num, i, p_net_addr_oid->AddressLength, + NETWORK_ADDRESS_LENGTH_IP)); + continue; + } + + p_ip_addr = (PNETWORK_ADDRESS_IP)p_net_addr_oid->Address; + + /* Size the vector as needed. */ + if( cl_vector_get_size( &p_adapter->ip_vector ) <= idx ) + cl_vector_set_size( &p_adapter->ip_vector, idx + 1 ); + + p_addr_item = cl_vector_get_ptr( &p_adapter->ip_vector, idx ); + if( !cl_memcmp( &p_ip_addr->in_addr, &p_addr_item->address.as_ulong, + sizeof(ULONG) ) ) + { + idx++; + /* Already have this address - no change needed */ + continue; + } + + /* + * Copy the address information, but don't register yet - the port + * could be down. + */ + if( p_addr_item->p_reg ) + { + /* If in use by some other address, deregister. */ + if( p_addr_item->p_reg->h_reg_svc ) + { + p_adapter->p_ifc->dereg_svc( + p_addr_item->p_reg->h_reg_svc, __ipoib_ats_dereg_cb ); + } + else + { + cl_free( p_addr_item->p_reg ); + } + p_addr_item->p_reg = NULL; + } + memcpy ((void *)&p_addr_item->address.as_ulong, (const void *)&p_ip_addr->in_addr, sizeof(ULONG) ); + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - Adding Address %d.%d.%d.%d\n", + port_num, + p_addr_item->address.as_bytes[0], + p_addr_item->address.as_bytes[1], + p_addr_item->address.as_bytes[2], + p_addr_item->address.as_bytes[3]) ); + idx++; + } + + /* Now clear any extra entries that shouldn't be there. */ + while( idx < cl_vector_get_size( &p_adapter->ip_vector ) ) + { + p_addr_item = (net_address_item_t*) + cl_vector_get_ptr( &p_adapter->ip_vector, + cl_vector_get_size( &p_adapter->ip_vector ) - 1 ); + + if( p_addr_item->p_reg ) + { + if( p_addr_item->p_reg->h_reg_svc ) + { + p_adapter->p_ifc->dereg_svc( + p_addr_item->p_reg->h_reg_svc, __ipoib_ats_dereg_cb ); + } + else + { + cl_free( p_addr_item->p_reg ); + } + p_addr_item->p_reg = NULL; + p_addr_item->address.as_ulong = 0; + } + + /* No need to check return value - shrinking always succeeds. */ + cl_vector_set_size( &p_adapter->ip_vector, + cl_vector_get_size( &p_adapter->ip_vector ) - 1 ); + } + + if( p_adapter->state == IB_PNP_PORT_ACTIVE && p_adapter->packet_filter ) + ipoib_reg_addrs( p_adapter ); + + cl_obj_unlock( &p_adapter->obj ); + + IPOIB_EXIT( IPOIB_DBG_OID ); + return NDIS_STATUS_SUCCESS; +} + + +/* Object lock is held when this function is called. */ +void +ipoib_reg_addrs( + IN ipoib_adapter_t* const p_adapter ) +{ + net_address_item_t *p_addr_item; + + size_t idx; + + uint8_t port_num; + + ib_api_status_t ib_status; + ib_reg_svc_req_t ib_service; + ib_gid_t port_gid; + + IPOIB_ENTER( IPOIB_DBG_OID ); + + port_num = p_adapter->guids.port_num; + + /* Setup our service call with things common to all calls */ + cl_memset( &ib_service, 0, sizeof(ib_service) ); + + /* BUGBUG Only register local subnet GID prefix for now */ + ib_gid_set_default( &port_gid, p_adapter->guids.port_guid ); + ib_service.svc_rec.service_gid = port_gid; + + ib_service.svc_rec.service_pkey = IB_DEFAULT_PKEY; + ib_service.svc_rec.service_lease = IB_INFINITE_SERVICE_LEASE; + + /* Must cast here because the service name is an array of unsigned chars but + * strcpy want a pointer to a signed char */ + strcpy( (char *)ib_service.svc_rec.service_name, ATS_NAME ); + + /* IP Address in question will be put in below */ + ib_service.port_guid = p_adapter->guids.port_guid; + ib_service.timeout_ms = p_adapter->params.sa_timeout; + ib_service.retry_cnt = p_adapter->params.sa_retry_cnt; + + /* Can't set IB_FLAGS_SYNC here because I can't wait at dispatch */ + ib_service.flags = 0; + + /* Service context will be put in below */ + + ib_service.svc_data_mask = IB_SR_COMPMASK_SID | + IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SNAME | + IB_SR_COMPMASK_SDATA8_12 | + IB_SR_COMPMASK_SDATA8_13 | + IB_SR_COMPMASK_SDATA8_14 | + IB_SR_COMPMASK_SDATA8_15; + ib_service.pfn_reg_svc_cb = __ipoib_ats_reg_cb; + + for( idx = 0; idx < cl_vector_get_size( &p_adapter->ip_vector); idx++ ) + { + p_addr_item = (net_address_item_t*) + cl_vector_get_ptr( &p_adapter->ip_vector, idx ); + + if( p_addr_item->p_reg ) + continue; + + p_addr_item->p_reg = cl_zalloc( sizeof(ats_reg_t) ); + if( !p_addr_item->p_reg ) + break; + + p_addr_item->p_reg->p_adapter = p_adapter; + + ib_service.svc_context = p_addr_item->p_reg; + + ib_service.svc_rec.service_id = + ATS_SERVICE_ID & CL_HTON64(0xFFFFFFFFFFFFFF00); + /* ATS service IDs start at 0x10000CE100415453 */ + ib_service.svc_rec.service_id |= ((uint64_t)(idx + 0x53)) << 56; + + cl_memcpy( &ib_service.svc_rec.service_data8[ATS_IPV4_OFFSET], + p_addr_item->address.as_bytes, IPV4_ADDR_SIZE ); + + /* Take a reference for each service request. */ + cl_obj_ref(&p_adapter->obj); + ib_status = p_adapter->p_ifc->reg_svc( + p_adapter->h_al, &ib_service, &p_addr_item->p_reg->h_reg_svc ); + if( ib_status != IB_SUCCESS ) + { + if( ib_status == IB_INVALID_GUID ) + { + /* If this occurs, we log the error but do not fail the OID yet */ + IPOIB_PRINT( TRACE_LEVEL_WARNING, IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - " + "Failed to register IP Address " + "of %d.%d.%d.%d with error IB_INVALID_GUID\n", + port_num, + p_addr_item->address.as_bytes[0], + p_addr_item->address.as_bytes[1], + p_addr_item->address.as_bytes[2], + p_addr_item->address.as_bytes[3]) ); + } + else + { + /* Fatal error. */ + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - Failed to register IP Address " + "of %d.%d.%d.%d with error %s\n", + port_num, + p_addr_item->address.as_bytes[0], + p_addr_item->address.as_bytes[1], + p_addr_item->address.as_bytes[2], + p_addr_item->address.as_bytes[3], + p_adapter->p_ifc->get_err_str( ib_status )) ); + p_adapter->hung = TRUE; + } + cl_obj_deref(&p_adapter->obj); + cl_free( p_addr_item->p_reg ); + p_addr_item->p_reg = NULL; + } + } + + IPOIB_EXIT( IPOIB_DBG_OID ); +} + + +/* Object lock is held when this function is called. */ +void +ipoib_dereg_addrs( + IN ipoib_adapter_t* const p_adapter ) +{ + net_address_item_t *p_addr_item; + + size_t idx; + + IPOIB_ENTER( IPOIB_DBG_OID ); + + for( idx = 0; idx < cl_vector_get_size( &p_adapter->ip_vector); idx++ ) + { + p_addr_item = (net_address_item_t*) + cl_vector_get_ptr( &p_adapter->ip_vector, idx ); + + if( !p_addr_item->p_reg ) + continue; + + if( p_addr_item->p_reg->h_reg_svc ) + { + p_adapter->p_ifc->dereg_svc( + p_addr_item->p_reg->h_reg_svc, __ipoib_ats_dereg_cb ); + } + else + { + cl_free( p_addr_item->p_reg ); + } + p_addr_item->p_reg = NULL; + } + + IPOIB_EXIT( IPOIB_DBG_OID ); +} + + +static void +__ipoib_ats_reg_cb( + IN ib_reg_svc_rec_t *p_reg_svc_rec ) +{ + ats_reg_t *p_reg; + uint8_t port_num; + + IPOIB_ENTER( IPOIB_DBG_OID ); + + CL_ASSERT( p_reg_svc_rec ); + CL_ASSERT( p_reg_svc_rec->svc_context ); + + p_reg = (ats_reg_t* __ptr64)p_reg_svc_rec->svc_context; + port_num = p_reg->p_adapter->guids.port_num; + + cl_obj_lock( &p_reg->p_adapter->obj ); + + if( p_reg_svc_rec->req_status == IB_SUCCESS && + !p_reg_svc_rec->resp_status ) + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION,IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - Registered IP Address " + "of %d.%d.%d.%d\n", + port_num, + p_reg_svc_rec->svc_rec.service_data8[ATS_IPV4_OFFSET], + p_reg_svc_rec->svc_rec.service_data8[ATS_IPV4_OFFSET+1], + p_reg_svc_rec->svc_rec.service_data8[ATS_IPV4_OFFSET+2], + p_reg_svc_rec->svc_rec.service_data8[ATS_IPV4_OFFSET+3]) ); + } + else if( p_reg_svc_rec->req_status != IB_CANCELED ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_OID, + ("Port %d OID_GEN_NETWORK_LAYER_ADDRESSES - Failed to register IP Address " + "of %d.%d.%d.%d with error %s\n", + port_num, + p_reg_svc_rec->svc_rec.service_data8[ATS_IPV4_OFFSET], + p_reg_svc_rec->svc_rec.service_data8[ATS_IPV4_OFFSET+1], + p_reg_svc_rec->svc_rec.service_data8[ATS_IPV4_OFFSET+2], + p_reg_svc_rec->svc_rec.service_data8[ATS_IPV4_OFFSET+3], + p_reg->p_adapter->p_ifc->get_err_str( p_reg_svc_rec->resp_status )) ); + p_reg->p_adapter->hung = TRUE; + p_reg->h_reg_svc = NULL; + } + + cl_obj_unlock( &p_reg->p_adapter->obj ); + cl_obj_deref(&p_reg->p_adapter->obj); + + IPOIB_EXIT( IPOIB_DBG_OID ); +} + + +static void +__ipoib_ats_dereg_cb( + IN void *context ) +{ + cl_free( context ); +} diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_driver.h b/branches/Ndi/ulp/ipoib/kernel/ipoib_driver.h new file mode 100644 index 00000000..01291c65 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_driver.h @@ -0,0 +1,129 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _IPOIB_DRIVER_H_ +#define _IPOIB_DRIVER_H_ + + +#include "ipoib_log.h" +#include "ipoib_adapter.h" +#include +#include +#include "ipoib_debug.h" + + +/* + * Definitions + */ +#define MAX_BUNDLE_ID_LENGTH 32 + + +#define IB_MTU 2048 +/* + * Header length as defined by IPoIB spec: + * http://www.ietf.org/internet-drafts/draft-ietf-ipoib-ip-over-infiniband-04.txt + */ + +#define MAX_PAYLOAD_MTU (IB_MTU - sizeof(ipoib_hdr_t)) + +/* + * Only the protocol type is sent as part of the UD payload + * since the rest of the Ethernet header is encapsulated in the + * various IB headers. We report out buffer space as if we + * transmit the ethernet headers. + */ +#define MAX_XFER_BLOCK_SIZE (sizeof(eth_hdr_t) + MAX_PAYLOAD_MTU) + + +typedef struct _ipoib_globals +{ + KSPIN_LOCK lock; + cl_qlist_t adapter_list; + cl_qlist_t bundle_list; + + atomic32_t laa_idx; + + NDIS_HANDLE h_ndis_wrapper; + NDIS_HANDLE h_ibat_dev; + volatile LONG ibat_ref; + uint32_t bypass_check_bcast_rate; + +} ipoib_globals_t; +/* +* FIELDS +* lock +* Spinlock to protect list access. +* +* adapter_list +* List of all adapter instances. Used for address translation support. +* +* bundle_list +* List of all adapter bundles. +* +* laa_idx +* Global counter for generating LAA MACs +* +* h_ibat_dev +* Device handle returned by NdisMRegisterDevice. +*********/ + +extern ipoib_globals_t g_ipoib; + + +typedef struct _ipoib_bundle +{ + cl_list_item_t list_item; + char bundle_id[MAX_BUNDLE_ID_LENGTH]; + cl_qlist_t adapter_list; + +} ipoib_bundle_t; +/* +* FIELDS +* list_item +* List item for storing the bundle in a quick list. +* +* bundle_id +* Bundle identifier. +* +* adapter_list +* List of adapters in the bundle. The adapter at the head is the +* primary adapter of the bundle. +*********/ + + + +void +ipoib_resume_oids( + IN ipoib_adapter_t* const p_adapter ); + +#endif /* _IPOIB_DRIVER_H_ */ diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_endpoint.c b/branches/Ndi/ulp/ipoib/kernel/ipoib_endpoint.c new file mode 100644 index 00000000..d727b02e --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_endpoint.c @@ -0,0 +1,456 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "ipoib_endpoint.h" +#include "ipoib_port.h" +#include "ipoib_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ipoib_endpoint.tmh" +#endif +#include + + +static void +__endpt_destroying( + IN cl_obj_t* p_obj ); + +static void +__endpt_cleanup( + IN cl_obj_t* p_obj ); + +static void +__endpt_free( + IN cl_obj_t* p_obj ); + +static ib_api_status_t +__create_mcast_av( + IN ib_pd_handle_t h_pd, + IN uint8_t port_num, + IN ib_member_rec_t* const p_member_rec, + OUT ib_av_handle_t* const ph_av ); + +static inline ipoib_port_t* +__endpt_parent( + IN ipoib_endpt_t* const p_endpt ); + +static void +__path_query_cb( + IN ib_query_rec_t *p_query_rec ); + +static void +__endpt_resolve( + IN ipoib_endpt_t* const p_endpt ); + + +ipoib_endpt_t* +ipoib_endpt_create( + IN const ib_gid_t* const p_dgid, + IN const net16_t dlid, + IN const net32_t qpn ) +{ + ipoib_endpt_t *p_endpt; + cl_status_t status; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + p_endpt = cl_zalloc( sizeof(ipoib_endpt_t) ); + if( !p_endpt ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to allocate endpoint (%d bytes)\n", + sizeof(ipoib_endpt_t)) ); + return NULL; + } + + cl_obj_construct( &p_endpt->obj, IPOIB_OBJ_ENDPOINT ); + + status = cl_obj_init( &p_endpt->obj, CL_DESTROY_ASYNC, + __endpt_destroying, __endpt_cleanup, __endpt_free ); + + p_endpt->dgid = *p_dgid; + p_endpt->dlid = dlid; + p_endpt->qpn = qpn; + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return p_endpt; +} + + +static ib_api_status_t +__create_mcast_av( + IN ib_pd_handle_t h_pd, + IN uint8_t port_num, + IN ib_member_rec_t* const p_member_rec, + OUT ib_av_handle_t* const ph_av ) +{ + ib_av_attr_t av_attr; + uint32_t flow_lbl; + uint8_t hop_lmt; + ib_api_status_t status; + ipoib_endpt_t *p_endpt; + + IPOIB_ENTER( IPOIB_DBG_MCAST ); + + p_endpt = PARENT_STRUCT(ph_av, ipoib_endpt_t, h_av ); + + cl_memclr( &av_attr, sizeof(ib_av_attr_t) ); + av_attr.port_num = port_num; + ib_member_get_sl_flow_hop( p_member_rec->sl_flow_hop, + &av_attr.sl, &flow_lbl, &hop_lmt ); + av_attr.dlid = p_member_rec->mlid; + av_attr.grh_valid = TRUE; + av_attr.grh.hop_limit = hop_lmt; + av_attr.grh.dest_gid = p_member_rec->mgid; + av_attr.grh.src_gid = p_member_rec->port_gid; + av_attr.grh.ver_class_flow = + ib_grh_set_ver_class_flow( 6, p_member_rec->tclass, flow_lbl ); + av_attr.static_rate = p_member_rec->rate & IB_PATH_REC_BASE_MASK; + av_attr.path_bits = 0; + /* port is not attached to endpoint at this point, so use endpt ifc reference */ + status = p_endpt->p_ifc->create_av( h_pd, &av_attr, ph_av ); + + if( status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_create_av returned %s\n", + p_endpt->p_ifc->get_err_str( status )) ); + } + + IPOIB_EXIT( IPOIB_DBG_MCAST ); + return status; +} + + +ib_api_status_t +ipoib_endpt_set_mcast( + IN ipoib_endpt_t* const p_endpt, + IN ib_pd_handle_t h_pd, + IN uint8_t port_num, + IN ib_mcast_rec_t* const p_mcast_rec ) +{ + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_ENDPT, + ("Create av for MAC: %02X-%02X-%02X-%02X-%02X-%02X\n", + p_endpt->mac.addr[0], p_endpt->mac.addr[1], + p_endpt->mac.addr[2], p_endpt->mac.addr[3], + p_endpt->mac.addr[4], p_endpt->mac.addr[5]) ); + + status = __create_mcast_av( h_pd, port_num, p_mcast_rec->p_member_rec, + &p_endpt->h_av ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__create_mcast_av returned %s\n", + p_endpt->p_ifc->get_err_str( status )) ); + return status; + } + p_endpt->h_mcast = p_mcast_rec->h_mcast; + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return IB_SUCCESS; +} + + +static void +__endpt_destroying( + IN cl_obj_t* p_obj ) +{ + ipoib_endpt_t *p_endpt; + ipoib_port_t *p_port; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + p_endpt = PARENT_STRUCT( p_obj, ipoib_endpt_t, obj ); + p_port = __endpt_parent( p_endpt ); + + cl_obj_lock( p_obj ); + if( p_endpt->h_query ) + { + p_port->p_adapter->p_ifc->cancel_query( + p_port->p_adapter->h_al, p_endpt->h_query ); + p_endpt->h_query = NULL; + } + + /* Leave the multicast group if it exists. */ + if( p_endpt->h_mcast ) + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_ENDPT, + ("Leaving MCast group\n") ); + ipoib_port_ref(p_port, ref_leave_mcast); + p_port->p_adapter->p_ifc->leave_mcast( p_endpt->h_mcast, ipoib_leave_mcast_cb ); + } + + cl_obj_unlock( p_obj ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); +} + + +static void +__endpt_cleanup( + IN cl_obj_t* p_obj ) +{ + ipoib_endpt_t *p_endpt; + ipoib_port_t *p_port; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + p_endpt = PARENT_STRUCT( p_obj, ipoib_endpt_t, obj ); + p_port = __endpt_parent( p_endpt ); + + /* Destroy the AV if it exists. */ + if( p_endpt->h_av ) + p_port->p_adapter->p_ifc->destroy_av( p_endpt->h_av ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); +} + + +static void +__endpt_free( + IN cl_obj_t* p_obj ) +{ + ipoib_endpt_t *p_endpt; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + p_endpt = PARENT_STRUCT( p_obj, ipoib_endpt_t, obj ); + + cl_obj_deinit( p_obj ); + cl_free( p_endpt ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); +} + + +static inline ipoib_port_t* +__endpt_parent( + IN ipoib_endpt_t* const p_endpt ) +{ + return PARENT_STRUCT( p_endpt->rel.p_parent_obj, ipoib_port_t, obj ); +} + + +/* + * This function is called with the port object's send lock held and + * a reference held on the endpoint. If we fail, we release the reference. + */ +NDIS_STATUS +ipoib_endpt_queue( + IN ipoib_endpt_t* const p_endpt ) +{ + ib_api_status_t status; + ipoib_port_t *p_port; + ib_query_req_t query; + ib_user_query_t info; + ib_path_rec_t path; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + if( p_endpt->h_av ) + { + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return NDIS_STATUS_SUCCESS; + } + + if( p_endpt->h_query || + p_endpt->qpn == CL_HTON32(0x00FFFFFF) ) + { + ipoib_endpt_deref( p_endpt ); + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return NDIS_STATUS_PENDING; + } + + /* This is the first packet for this endpoint. Query the SA. */ + p_port = __endpt_parent( p_endpt ); + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + info.method = IB_MAD_METHOD_GETTABLE; + info.attr_id = IB_MAD_ATTR_PATH_RECORD; + info.attr_size = sizeof(ib_path_rec_t); + info.comp_mask = IB_PR_COMPMASK_DGID | IB_PR_COMPMASK_SGID | + IB_PR_COMPMASK_REVERSIBLE | IB_PR_COMPMASK_NUM_PATH; + info.p_attr = &path; + + cl_memclr( &path, sizeof(ib_path_rec_t) ); + path.dgid = p_endpt->dgid; + ib_gid_set_default( &path.sgid, p_port->p_adapter->guids.port_guid ); + path.num_path = 0x1; + + cl_memclr( &query, sizeof(ib_query_req_t) ); + query.query_type = IB_QUERY_USER_DEFINED; + query.p_query_input = &info; + query.port_guid = p_port->p_adapter->guids.port_guid; + query.timeout_ms = p_port->p_adapter->params.sa_timeout; + query.retry_cnt = p_port->p_adapter->params.sa_retry_cnt; + + query.query_context = p_endpt; + query.pfn_query_cb = __path_query_cb; + + status = p_port->p_adapter->p_ifc->query( + p_port->p_adapter->h_al, &query, &p_endpt->h_query ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ENDPT, + ("ib_query for path returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + ipoib_endpt_deref( p_endpt ); + /* Flag the adapter as hung. */ + p_port->p_adapter->hung = TRUE; + } + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return NDIS_STATUS_PENDING; +} + + +static void +__path_query_cb( + IN ib_query_rec_t *p_query_rec ) +{ + ib_api_status_t status; + ipoib_endpt_t *p_endpt; + ipoib_port_t *p_port; + ib_av_attr_t av_attr; + ib_path_rec_t *p_path; + net32_t flow_lbl; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + p_endpt = (ipoib_endpt_t*__ptr64)p_query_rec->query_context; + p_port = __endpt_parent( p_endpt ); + + cl_obj_lock( &p_endpt->obj ); + p_endpt->h_query = NULL; + if( p_endpt->obj.state == CL_DESTROYING ) + { + cl_obj_unlock( &p_endpt->obj ); + ipoib_endpt_deref( p_endpt ); + if( p_query_rec->p_result_mad ) + p_port->p_adapter->p_ifc->put_mad( p_query_rec->p_result_mad ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Endpoint destroying, aborting.\n") ); + return; + } + cl_obj_unlock( &p_endpt->obj ); + + if( p_query_rec->status != IB_SUCCESS || !p_query_rec->result_cnt ) + { + p_port->p_adapter->hung = TRUE; + ipoib_endpt_deref( p_endpt ); + if( p_query_rec->p_result_mad ) + p_port->p_adapter->p_ifc->put_mad( p_query_rec->p_result_mad ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Path query failed with %s\n", + p_port->p_adapter->p_ifc->get_err_str( p_query_rec->status )) ); + return; + } + + p_path = ib_get_query_path_rec( p_query_rec->p_result_mad, 0 ); + + cl_memclr( &av_attr, sizeof(ib_av_attr_t) ); + + av_attr.port_num = p_port->port_num; + + av_attr.sl = ib_path_rec_sl( p_path ); + av_attr.dlid = p_path->dlid; + + /* + * We always send the GRH so that we preferably lookup endpoints + * by GID rather than by LID. This allows certain WHQL tests + * such as the 2c_MediaCheck test to succeed since they don't use + * IP. This allows endpoints to be created on the fly for requests + * for which there is no match, something that doesn't work when + * using LIDs only. + */ + flow_lbl = ib_path_rec_flow_lbl( p_path ); + av_attr.grh_valid = TRUE; + av_attr.grh.ver_class_flow = ib_grh_set_ver_class_flow( + 6, p_path->tclass, flow_lbl ); + av_attr.grh.resv1 = 0; + av_attr.grh.resv2 = 0; + av_attr.grh.hop_limit = ib_path_rec_hop_limit( p_path ); + av_attr.grh.src_gid = p_path->sgid; + av_attr.grh.dest_gid = p_path->dgid; + + cl_obj_lock( &p_port->obj ); + if( !p_endpt->dlid ) + { + cl_map_item_t *p_qitem; + + /* This is a subnet local endpoint that does not have its LID set. */ + p_endpt->dlid = p_path->dlid; + /* + * Insert the item in the LID map so that locally routed unicast + * traffic will resolve it properly. + */ + p_qitem = cl_qmap_insert( &p_port->endpt_mgr.lid_endpts, + p_endpt->dlid, &p_endpt->lid_item ); + CL_ASSERT( p_qitem == &p_endpt->lid_item ); + } + cl_obj_unlock( &p_port->obj ); + av_attr.static_rate = ib_path_rec_rate( p_path ); + av_attr.path_bits = 0; + + /* Done with the path record. Release the MAD. */ + p_port->p_adapter->p_ifc->put_mad( p_query_rec->p_result_mad ); + + /* Create the AV. */ + status = p_port->p_adapter->p_ifc->create_av( + p_port->ib_mgr.h_pd, &av_attr, &p_endpt->h_av ); + if( status != IB_SUCCESS ) + { + p_port->p_adapter->hung = TRUE; + ipoib_endpt_deref( p_endpt ); + cl_obj_unlock( &p_endpt->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_create_av failed with %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return; + } + + /* Try to send all pending sends. */ + ipoib_port_resume( p_port ); + + /* Release the reference taken for the SA query. */ + ipoib_endpt_deref( p_endpt ); + IPOIB_EXIT( IPOIB_DBG_ENDPT ); +} diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_endpoint.h b/branches/Ndi/ulp/ipoib/kernel/ipoib_endpoint.h new file mode 100644 index 00000000..c90d1545 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_endpoint.h @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _IPOIB_ENDPOINT_H_ +#define _IPOIB_ENDPOINT_H_ + + +#include +#include +#include +#include +#include +#include "iba/ipoib_ifc.h" +#include +#include "ipoib_debug.h" + + +typedef struct _ipoib_endpt +{ + cl_obj_t obj; + cl_obj_rel_t rel; + cl_map_item_t mac_item; + cl_fmap_item_t gid_item; + cl_map_item_t lid_item; + ib_query_handle_t h_query; + ib_mcast_handle_t h_mcast; + mac_addr_t mac; + ib_gid_t dgid; + net16_t dlid; + net32_t qpn; + ib_av_handle_t h_av; + boolean_t expired; + ib_al_ifc_t *p_ifc; + +} ipoib_endpt_t; +/* +* FIELDS +* mac_item +* Map item for storing the endpoint in a map. The key is the +* destination MAC address. +* +* lid_item +* Map item for storing the endpoint in a map. The key is the +* destination LID. +* +* gid_item +* Map item for storing the endpoint in a map. The key is the +* destination GID. +* +* h_query +* Query handle for cancelling SA queries. +* +* h_mcast +* For multicast endpoints, the multicast handle. +* +* mac +* MAC address. +* +* dgid +* Destination GID. +* +* dlid +* Destination LID. The destination LID is only set for endpoints +* that are on the same subnet. It is used as key in the LID map. +* +* qpn +* Destination queue pair number. +* +* h_av +* Address vector for sending data. +* +* expired +* Flag to indicate that the endpoint should be flushed. +* +* p_ifc +* Reference to transport functions, can be used +* while endpoint is not attached to port yet. +* +* NOTES +* If the h_mcast member is set, the endpoint is never expired. +*********/ + + +ipoib_endpt_t* +ipoib_endpt_create( + IN const ib_gid_t* const p_dgid, + IN const net16_t dlid, + IN const net32_t qpn ); + + +ib_api_status_t +ipoib_endpt_set_mcast( + IN ipoib_endpt_t* const p_endpt, + IN ib_pd_handle_t h_pd, + IN uint8_t port_num, + IN ib_mcast_rec_t* const p_mcast_rec ); + + +static inline void +ipoib_endpt_ref( + IN ipoib_endpt_t* const p_endpt ) +{ + CL_ASSERT( p_endpt ); + + cl_obj_ref( &p_endpt->obj ); + /* + * Anytime we reference the endpoint, we're either receiving data + * or trying to send data to that endpoint. Clear the expired flag + * to prevent the AV from being flushed. + */ + p_endpt->expired = FALSE; +} + + +static inline void +ipoib_endpt_deref( + IN ipoib_endpt_t* const p_endpt ) +{ + cl_obj_deref( &p_endpt->obj ); +} + + +NDIS_STATUS +ipoib_endpt_queue( + IN ipoib_endpt_t* const p_endpt ); + + +#endif /* _IPOIB_ENDPOINT_H_ */ diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.c b/branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.c new file mode 100644 index 00000000..81297093 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.c @@ -0,0 +1,495 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ipoib_driver.h" +#include "ipoib_adapter.h" +#include "ipoib_port.h" +#include "ipoib_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ipoib_ibat.tmh" +#endif +#include + + +static NTSTATUS +__ipoib_create( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp ); + +static NTSTATUS +__ipoib_cleanup( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp ); + +static NTSTATUS +__ipoib_close( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp ); + +static NTSTATUS +__ipoib_dispatch( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp ); + + +static NTSTATUS +__ibat_get_ports( + IN IRP *pIrp, + IN IO_STACK_LOCATION *pIoStack ) +{ + IOCTL_IBAT_PORTS_IN *pIn; + IOCTL_IBAT_PORTS_OUT *pOut; + KLOCK_QUEUE_HANDLE hdl; + cl_list_item_t *pItem; + ipoib_adapter_t *pAdapter; + LONG nPorts; + + IPOIB_ENTER(IPOIB_DBG_IOCTL); + + if( pIoStack->Parameters.DeviceIoControl.InputBufferLength != + sizeof(IOCTL_IBAT_PORTS_IN) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid input buffer size.\n") ); + return STATUS_INVALID_PARAMETER; + } + + if( pIoStack->Parameters.DeviceIoControl.OutputBufferLength < + sizeof(IOCTL_IBAT_PORTS_OUT) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid output buffer size.\n") ); + return STATUS_INVALID_PARAMETER; + } + + pIn = pIrp->AssociatedIrp.SystemBuffer; + pOut = pIrp->AssociatedIrp.SystemBuffer; + + if( pIn->Version != IBAT_IOCTL_VERSION ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid version.\n") ); + return STATUS_INVALID_PARAMETER; + } + + KeAcquireInStackQueuedSpinLock( &g_ipoib.lock, &hdl ); + nPorts = (LONG)cl_qlist_count( &g_ipoib.adapter_list ); + switch( nPorts ) + { + case 0: + cl_memclr( pOut->Ports, sizeof(pOut->Ports) ); + /* Fall through */ + case 1: + pOut->Size = sizeof(IOCTL_IBAT_PORTS_OUT); + break; + + default: + pOut->Size = sizeof(IOCTL_IBAT_PORTS_OUT) + + (sizeof(IBAT_PORT_RECORD) * (nPorts - 1)); + break; + } + + pIrp->IoStatus.Information = pOut->Size; + + if( pOut->Size > pIoStack->Parameters.DeviceIoControl.OutputBufferLength ) + { + nPorts = 1 + + (pIoStack->Parameters.DeviceIoControl.OutputBufferLength - + sizeof(IOCTL_IBAT_PORTS_OUT)) / sizeof(IBAT_PORT_RECORD); + + pIrp->IoStatus.Information = sizeof(IOCTL_IBAT_PORTS_OUT) + + ((nPorts - 1) * sizeof(IBAT_PORT_RECORD)); + } + + pOut->NumPorts = 0; + pItem = cl_qlist_head( &g_ipoib.adapter_list ); + while( pOut->NumPorts != nPorts ) + { + pAdapter = CONTAINING_RECORD( pItem, ipoib_adapter_t, entry ); + pOut->Ports[pOut->NumPorts].CaGuid = pAdapter->guids.ca_guid; + pOut->Ports[pOut->NumPorts].PortGuid = pAdapter->guids.port_guid; + pOut->Ports[pOut->NumPorts].PortNum = pAdapter->guids.port_num; + pOut->NumPorts++; + + pItem = cl_qlist_next( pItem ); + } + + KeReleaseInStackQueuedSpinLock( &hdl ); + IPOIB_EXIT( IPOIB_DBG_IOCTL ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +__ibat_get_ips( + IN IRP *pIrp, + IN IO_STACK_LOCATION *pIoStack ) +{ + IOCTL_IBAT_IP_ADDRESSES_IN *pIn; + IOCTL_IBAT_IP_ADDRESSES_OUT *pOut; + KLOCK_QUEUE_HANDLE hdl; + cl_list_item_t *pItem; + ipoib_adapter_t *pAdapter; + LONG nIps, maxIps; + size_t idx; + net_address_item_t *pAddr; + UINT64 PortGuid; + + IPOIB_ENTER(IPOIB_DBG_IOCTL); + + if( pIoStack->Parameters.DeviceIoControl.InputBufferLength != + sizeof(IOCTL_IBAT_IP_ADDRESSES_IN) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid input buffer size.\n") ); + return STATUS_INVALID_PARAMETER; + } + + if( pIoStack->Parameters.DeviceIoControl.OutputBufferLength < + sizeof(IOCTL_IBAT_IP_ADDRESSES_OUT) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid output buffer size.\n") ); + return STATUS_INVALID_PARAMETER; + } + + pIn = pIrp->AssociatedIrp.SystemBuffer; + pOut = pIrp->AssociatedIrp.SystemBuffer; + + if( pIn->Version != IBAT_IOCTL_VERSION ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid version.\n") ); + return STATUS_INVALID_PARAMETER; + } + + PortGuid = pIn->PortGuid; + + nIps = 0; + pOut->AddressCount = 0; + maxIps = 1 + + ((pIoStack->Parameters.DeviceIoControl.OutputBufferLength - + sizeof(IOCTL_IBAT_IP_ADDRESSES_OUT)) / sizeof(IP_ADDRESS)); + + KeAcquireInStackQueuedSpinLock( &g_ipoib.lock, &hdl ); + for( pItem = cl_qlist_head( &g_ipoib.adapter_list ); + pItem != cl_qlist_end( &g_ipoib.adapter_list ); + pItem = cl_qlist_next( pItem ) ) + { + pAdapter = CONTAINING_RECORD( pItem, ipoib_adapter_t, entry ); + if( PortGuid && pAdapter->guids.port_guid != PortGuid ) + continue; + + cl_obj_lock( &pAdapter->obj ); + nIps += (LONG)cl_vector_get_size( &pAdapter->ip_vector ); + + for( idx = 0; + idx < cl_vector_get_size( &pAdapter->ip_vector ); + idx++ ) + { + if( pOut->AddressCount == maxIps ) + break; + + pAddr = (net_address_item_t*) + cl_vector_get_ptr( &pAdapter->ip_vector, idx ); + + pOut->Address[pOut->AddressCount].IpVersion = 4; + cl_memclr( &pOut->Address[pOut->AddressCount].Address, + sizeof(IP_ADDRESS) ); + cl_memcpy( &pOut->Address[pOut->AddressCount].Address[12], + pAddr->address.as_bytes, IPV4_ADDR_SIZE ); + + pOut->AddressCount++; + } + cl_obj_unlock( &pAdapter->obj ); + } + + pOut->Size = sizeof(IOCTL_IBAT_IP_ADDRESSES_OUT); + if( --nIps ) + pOut->Size += sizeof(IP_ADDRESS) * nIps; + + pIrp->IoStatus.Information = sizeof(IOCTL_IBAT_IP_ADDRESSES_OUT); + if( --maxIps < nIps ) + pIrp->IoStatus.Information += (sizeof(IP_ADDRESS) * maxIps); + else + pIrp->IoStatus.Information += (sizeof(IP_ADDRESS) * nIps); + + KeReleaseInStackQueuedSpinLock( &hdl ); + IPOIB_EXIT( IPOIB_DBG_IOCTL ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +__ibat_mac_to_gid( + IN IRP *pIrp, + IN IO_STACK_LOCATION *pIoStack ) +{ + NTSTATUS status = STATUS_INVALID_PARAMETER; + IOCTL_IBAT_MAC_TO_GID_IN *pIn; + IOCTL_IBAT_MAC_TO_GID_OUT *pOut; + KLOCK_QUEUE_HANDLE hdl; + cl_list_item_t *pItem; + ipoib_adapter_t *pAdapter; + + IPOIB_ENTER(IPOIB_DBG_IOCTL); + + if( pIoStack->Parameters.DeviceIoControl.InputBufferLength != + sizeof(IOCTL_IBAT_MAC_TO_GID_IN) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid input buffer size.\n") ); + return STATUS_INVALID_PARAMETER; + } + + if( pIoStack->Parameters.DeviceIoControl.OutputBufferLength != + sizeof(IOCTL_IBAT_MAC_TO_GID_OUT) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid output buffer size.\n") ); + return STATUS_INVALID_PARAMETER; + } + + pIn = pIrp->AssociatedIrp.SystemBuffer; + pOut = pIrp->AssociatedIrp.SystemBuffer; + + if( pIn->Version != IBAT_IOCTL_VERSION ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid version.\n") ); + return STATUS_INVALID_PARAMETER; + } + + KeAcquireInStackQueuedSpinLock( &g_ipoib.lock, &hdl ); + + for( pItem = cl_qlist_head( &g_ipoib.adapter_list ); + pItem != cl_qlist_end( &g_ipoib.adapter_list ); + pItem = cl_qlist_next( pItem ) ) + { + pAdapter = CONTAINING_RECORD( pItem, ipoib_adapter_t, entry ); + if( pIn->PortGuid != pAdapter->guids.port_guid ) + continue; + + /* Found the port - lookup the MAC. */ + cl_obj_lock( &pAdapter->obj ); + if( pAdapter->p_port ) + { + status = ipoib_mac_to_gid( + pAdapter->p_port, *(mac_addr_t*)pIn->DestMac, &pOut->DestGid ); + if( NT_SUCCESS( status ) ) + { + pIrp->IoStatus.Information = + sizeof(IOCTL_IBAT_MAC_TO_GID_OUT); + } + } + cl_obj_unlock( &pAdapter->obj ); + break; + } + + KeReleaseInStackQueuedSpinLock( &hdl ); + + IPOIB_EXIT( IPOIB_DBG_IOCTL ); + return status; +} + + +void +ipoib_ref_ibat() +{ + NDIS_STATUS status; + NDIS_STRING DeviceName; + NDIS_STRING DeviceLinkUnicodeString; + PDRIVER_DISPATCH DispatchTable[IRP_MJ_MAXIMUM_FUNCTION+1]; + DEVICE_OBJECT *p_dev_obj; + + IPOIB_ENTER( IPOIB_DBG_IOCTL ); + + if( InterlockedIncrement( &g_ipoib.ibat_ref ) == 1 ) + { + NdisInitUnicodeString( &DeviceName, IBAT_DEV_NAME ); + NdisInitUnicodeString( &DeviceLinkUnicodeString, IBAT_DOS_DEV_NAME ); + + NdisZeroMemory( DispatchTable, sizeof(DispatchTable) ); + + DispatchTable[IRP_MJ_CREATE] = __ipoib_create; + DispatchTable[IRP_MJ_CLEANUP] = __ipoib_cleanup; + DispatchTable[IRP_MJ_CLOSE] = __ipoib_close; + DispatchTable[IRP_MJ_DEVICE_CONTROL] = __ipoib_dispatch; + DispatchTable[IRP_MJ_INTERNAL_DEVICE_CONTROL] = __ipoib_dispatch; + + status = NdisMRegisterDevice( g_ipoib.h_ndis_wrapper, + &DeviceName, &DeviceLinkUnicodeString, &DispatchTable[0], + &p_dev_obj, &g_ipoib.h_ibat_dev ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("NdisMRegisterDevice failed with status of %d\n", status) ); + } + } + + IPOIB_EXIT( IPOIB_DBG_IOCTL ); +} + + +void +ipoib_deref_ibat() +{ + IPOIB_ENTER( IPOIB_DBG_IOCTL ); + + if( InterlockedDecrement( &g_ipoib.ibat_ref ) ) + { + IPOIB_EXIT( IPOIB_DBG_IOCTL ); + return; + } + + if( g_ipoib.h_ibat_dev ) + { + NdisMDeregisterDevice( g_ipoib.h_ibat_dev ); + g_ipoib.h_ibat_dev = NULL; + } + + IPOIB_EXIT( IPOIB_DBG_IOCTL ); +} + + +static NTSTATUS +__ipoib_create( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp ) +{ + IPOIB_ENTER( IPOIB_DBG_IOCTL ); + + UNREFERENCED_PARAMETER( pDevObj ); + + ipoib_ref_ibat(); + + pIrp->IoStatus.Status = STATUS_SUCCESS; + pIrp->IoStatus.Information = 0; + IoCompleteRequest( pIrp, IO_NO_INCREMENT ); + + IPOIB_EXIT( IPOIB_DBG_IOCTL ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +__ipoib_cleanup( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp ) +{ + IPOIB_ENTER( IPOIB_DBG_IOCTL ); + + UNREFERENCED_PARAMETER( pDevObj ); + + ipoib_deref_ibat(); + + pIrp->IoStatus.Status = STATUS_SUCCESS; + pIrp->IoStatus.Information = 0; + IoCompleteRequest( pIrp, IO_NO_INCREMENT ); + + IPOIB_EXIT( IPOIB_DBG_IOCTL ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +__ipoib_close( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp ) +{ + IPOIB_ENTER( IPOIB_DBG_IOCTL ); + + UNREFERENCED_PARAMETER( pDevObj ); + + pIrp->IoStatus.Status = STATUS_SUCCESS; + pIrp->IoStatus.Information = 0; + IoCompleteRequest( pIrp, IO_NO_INCREMENT ); + + IPOIB_EXIT( IPOIB_DBG_IOCTL ); + return STATUS_SUCCESS; +} + + +static NTSTATUS +__ipoib_dispatch( + IN DEVICE_OBJECT* const pDevObj, + IN IRP* const pIrp ) +{ + IO_STACK_LOCATION *pIoStack; + NTSTATUS status = STATUS_SUCCESS; + + IPOIB_ENTER( IPOIB_DBG_IOCTL ); + + UNREFERENCED_PARAMETER( pDevObj ); + + pIoStack = IoGetCurrentIrpStackLocation( pIrp ); + + pIrp->IoStatus.Information = 0; + + switch( pIoStack->Parameters.DeviceIoControl.IoControlCode ) + { + case IOCTL_IBAT_PORTS: + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_IOCTL, + ("IOCTL_IBAT_PORTS recieved\n") ); + status = __ibat_get_ports( pIrp, pIoStack ); + break; + + case IOCTL_IBAT_IP_ADDRESSES: + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_IOCTL, + ("IOCTL_IBAT_IP_ADDRESSES recieved\n" )); + status = __ibat_get_ips( pIrp, pIoStack ); + break; + + case IOCTL_IBAT_MAC_TO_GID: + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_IOCTL, + ("IOCTL_IBAT_MAC_TO_GID recieved\n" )); + status = __ibat_mac_to_gid( pIrp, pIoStack ); + break; + + default: + IPOIB_PRINT( TRACE_LEVEL_WARNING, IPOIB_DBG_IOCTL, + ("unknow IOCTL code = 0x%x\n", + pIoStack->Parameters.DeviceIoControl.IoControlCode) ); + status = STATUS_INVALID_PARAMETER; + } + + pIrp->IoStatus.Status = status; + IoCompleteRequest( pIrp, IO_NO_INCREMENT ); + + IPOIB_EXIT( IPOIB_DBG_IOCTL ); + return status; +} + diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.h b/branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.h new file mode 100644 index 00000000..83d01951 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_ibat.h @@ -0,0 +1,45 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _IPOIB_IBAT_H_ +#define _IPOIB_IBAT_H_ + + +void +ipoib_ref_ibat(); + +void +ipoib_deref_ibat(); + + +#endif /* _IPOIB_IBAT_H_ */ diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_log.mc b/branches/Ndi/ulp/ipoib/kernel/ipoib_log.mc new file mode 100644 index 00000000..9676ff29 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_log.mc @@ -0,0 +1,285 @@ +;/*++ +;============================================================================= +;Copyright (c) 2001 Mellanox Technologies +; +;Module Name: +; +; ipoiblog.mc +; +;Abstract: +; +; IPoIB Driver event log messages +; +;Authors: +; +; Yossi Leybovich +; +;Environment: +; +; Kernel Mode . +; +;============================================================================= +;--*/ +; +MessageIdTypedef = NDIS_ERROR_CODE + +SeverityNames = ( + Success = 0x0:STATUS_SEVERITY_SUCCESS + Informational = 0x1:STATUS_SEVERITY_INFORMATIONAL + Warning = 0x2:STATUS_SEVERITY_WARNING + Error = 0x3:STATUS_SEVERITY_ERROR + ) + +FacilityNames = ( + System = 0x0 + RpcRuntime = 0x2:FACILITY_RPC_RUNTIME + RpcStubs = 0x3:FACILITY_RPC_STUBS + Io = 0x4:FACILITY_IO_ERROR_CODE + IPoIB = 0x7:FACILITY_IPOIB_ERROR_CODE + ) + + +MessageId=0x0001 +Facility=IPoIB +Severity=Warning +SymbolicName=EVENT_IPOIB_PORT_DOWN +Language=English +%2: Network controller link is down. +. + +MessageId=0x0002 +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_PORT_UP +Language=English +%2: Network controller link is up. +. + + +MessageId=0x0003 +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_PORT_UP1 +Language=English +%2: Network controller link is up at 2.5Gbps. +. + +MessageId=0x0004 +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_PORT_UP2 +Language=English +%2: Network controller link is up at 5Gbps. +. + +MessageId=0x0006 +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_PORT_UP3 +Language=English +%2: Network controller link is up at 10Gbps. +. + +MessageId=0x000a +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_PORT_UP4 +Language=English +%2: Network controller link is up at 20Gps. +. + +MessageId=0x000e +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_PORT_UP5 +Language=English +%2: Network controller link is up at 30Gps. +. + +MessageId=0x0012 +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_PORT_UP6 +Language=English +%2: Network controller link is up at 40Gps. +. + +MessageId=0x001a +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_PORT_UP7 +Language=English +%2: Network controller link is up at 60Gps. +. + +MessageId=0x0032 +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_PORT_UP8 +Language=English +%2: Network controller link is up at 120Gps. +. + +MessageId=0x0040 +Facility=IPoIB +Severity=Informational +SymbolicName=EVENT_IPOIB_INIT_SUCCESS +Language=English +%2: Driver Initialized succesfully. +. + +MessageId=0x0041 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_OPEN_CA +Language=English +%2: Failed to open Channel Adapter. +. + +MessageId=0x0042 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_ALLOC_PD +Language=English +%2: Failed to allocate Protection Domain. +. + +MessageId=0x0043 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_CREATE_RECV_CQ +Language=English +%2: Failed to create receive Completion Queue. +. + +MessageId=0x0044 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_CREATE_SEND_CQ +Language=English +%2: Failed to create send Completion Queue. +. + +MessageId=0x0045 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_CREATE_QP +Language=English +%2: Failed to create Queue Pair. +. + +MessageId=0x0046 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_QUERY_QP +Language=English +%2: Failed to get Queue Pair number. +. + +MessageId=0x0047 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_REG_PHYS +Language=English +%2: Failed to create DMA Memory Region. +. + +MessageId=0x0048 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_RECV_POOL +Language=English +%2: Failed to create receive descriptor pool. +. + +MessageId=0x0049 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_RECV_PKT_POOL +Language=English +%2: Failed to create NDIS_PACKET pool for receive indications. +. + +MessageId=0x004A +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_RECV_BUF_POOL +Language=English +%2: Failed to create NDIS_BUFFER pool for receive indications. +. + +MessageId=0x004B +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_SEND_PKT_POOL +Language=English +%2: Failed to create NDIS_PACKET pool for send processing. +. + +MessageId=0x004C +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_SEND_BUF_POOL +Language=English +%2: Failed to create NDIS_BUFFER pool for send processing. +. + +MessageId=0x004D +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_RECV_PKT_ARRAY +Language=English +%2: Failed to allocate receive indication array. +. + +MessageId=0x004E +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_PORT_INFO_TIMEOUT +Language=English +%2: Subnet Administrator query for port information timed out. +Make sure the SA is functioning properly. Increasing the number +of retries and retry timeout adapter parameters may solve the +issue. +. + +MessageId=0x004F +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_PORT_INFO_REJECT +Language=English +%2: Subnet Administrator failed the query for port information. +Make sure the SA is functioning properly and compatible. +. + +MessageId=0x0050 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_QUERY_PORT_INFO +Language=English +%2: Subnet Administrator query for port information failed. +. + +MessageId=0x0055 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_BCAST_GET +Language=English +%2: Subnet Administrator failed query for broadcast group information. +. + +MessageId=0x0056 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_BCAST_JOIN +Language=English +%2: Subnet Administrator failed request to joing broadcast group. +. + +MessageId=0x0057 +Facility=IPoIB +Severity=Error +SymbolicName=EVENT_IPOIB_BCAST_RATE +Language=English +%2: The local port rate is too slow for the existing broadcast MC group. +. diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_port.c b/branches/Ndi/ulp/ipoib/kernel/ipoib_port.c new file mode 100644 index 00000000..8a5898d2 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_port.c @@ -0,0 +1,5659 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#include "ipoib_port.h" +#include "ipoib_adapter.h" +#include "ipoib_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ipoib_port.tmh" +#endif + + +/* Amount of physical memory to register. */ +#define MEM_REG_SIZE 0xFFFFFFFFFFFFFFFF + +/* Number of work completions to chain for send and receive polling. */ +#define MAX_SEND_WC 8 +#define MAX_RECV_WC 16 + + +ib_gid_t bcast_mgid_template = { + 0xff, /* multicast field */ + 0x12, /* scope (to be filled in) */ + 0x40, 0x1b, /* IPv4 signature */ + 0xff, 0xff, /* 16 bits of P_Key (to be filled in) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 bits of zeros */ + 0xff, 0xff, 0xff, 0xff, /* 32 bit IPv4 broadcast address */ +}; + + +#ifdef _DEBUG_ +/* Handy pointer for debug use. */ +ipoib_port_t *gp_ipoib_port; +#endif + + +/****************************************************************************** +* +* Declarations +* +******************************************************************************/ +static void +__port_construct( + IN ipoib_port_t* const p_port ); + +static ib_api_status_t +__port_init( + IN ipoib_port_t* const p_port, + IN ipoib_adapter_t* const p_adapter, + IN ib_pnp_port_rec_t* const p_pnp_rec ); + +static void +__port_destroying( + IN cl_obj_t* const p_obj ); + +static void +__port_cleanup( + IN cl_obj_t* const p_obj ); + +static void +__port_free( + IN cl_obj_t* const p_obj ); + + +/****************************************************************************** +* +* IB resource manager operations +* +******************************************************************************/ +static void +__ib_mgr_construct( + IN ipoib_port_t* const p_port ); + +static ib_api_status_t +__ib_mgr_init( + IN ipoib_port_t* const p_port ); + +static void +__ib_mgr_destroy( + IN ipoib_port_t* const p_port ); + +static void +__qp_event( + IN ib_async_event_rec_t *p_event_rec ); + +static void +__cq_event( + IN ib_async_event_rec_t *p_event_rec ); + +static ib_api_status_t +__ib_mgr_activate( + IN ipoib_port_t* const p_port ); + +/****************************************************************************** +* +* Buffer manager operations. +* +******************************************************************************/ +static void +__buf_mgr_construct( + IN ipoib_port_t* const p_port ); + +static ib_api_status_t +__buf_mgr_init( + IN ipoib_port_t* const p_port ); + +static void +__buf_mgr_destroy( + IN ipoib_port_t* const p_port ); + +static cl_status_t +__recv_ctor( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ); + +#if !IPOIB_INLINE_RECV +static void +__recv_dtor( + IN const cl_pool_item_t* const p_pool_item, + IN void *context ); +#endif /* IPOIB_INLINE_RECV */ + +static inline ipoib_send_desc_t* +__buf_mgr_get_send( + IN ipoib_port_t* const p_port ); + +static inline void +__buf_mgr_put_send( + IN ipoib_port_t* const p_port, + IN ipoib_send_desc_t* const p_desc ); + +static inline ipoib_recv_desc_t* +__buf_mgr_get_recv( + IN ipoib_port_t* const p_port ); + +static inline void +__buf_mgr_put_recv( + IN ipoib_port_t* const p_port, + IN ipoib_recv_desc_t* const p_desc, + IN NDIS_PACKET* const p_packet OPTIONAL ); + +static inline void +__buf_mgr_put_recv_list( + IN ipoib_port_t* const p_port, + IN cl_qlist_t* const p_list ); + +static inline NDIS_PACKET* +__buf_mgr_get_ndis_pkt( + IN ipoib_port_t* const p_port, + IN ipoib_recv_desc_t* const p_desc ); + + +/****************************************************************************** +* +* Receive manager operations. +* +******************************************************************************/ +static void +__recv_mgr_construct( + IN ipoib_port_t* const p_port ); + +static ib_api_status_t +__recv_mgr_init( + IN ipoib_port_t* const p_port ); + +static void +__recv_mgr_destroy( + IN ipoib_port_t* const p_port ); + +/* Posts receive buffers to the receive queue. */ +static ib_api_status_t +__recv_mgr_repost( + IN ipoib_port_t* const p_port ); + +static void +__recv_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); + +static void +__recv_get_endpts( + IN ipoib_port_t* const p_port, + IN ipoib_recv_desc_t* const p_desc, + IN ib_wc_t* const p_wc, + OUT ipoib_endpt_t** const pp_src, + OUT ipoib_endpt_t** const pp_dst ); + +static int32_t +__recv_mgr_filter( + IN ipoib_port_t* const p_port, + IN ib_wc_t* const p_done_wc_list, + OUT cl_qlist_t* const p_done_list, + OUT cl_qlist_t* const p_bad_list ); + +static ib_api_status_t +__recv_gen( + IN const ipoib_pkt_t* const p_ipoib, + OUT eth_pkt_t* const p_eth, + IN ipoib_endpt_t* const p_src, + IN ipoib_endpt_t* const p_dst ); + +static ib_api_status_t +__recv_dhcp( + IN ipoib_port_t* const p_port, + IN const ipoib_pkt_t* const p_ipoib, + OUT eth_pkt_t* const p_eth, + IN ipoib_endpt_t* const p_src, + IN ipoib_endpt_t* const p_dst ); + +static ib_api_status_t +__recv_arp( + IN ipoib_port_t* const p_port, + IN ib_wc_t* const p_wc, + IN const ipoib_pkt_t* const p_ipoib, + OUT eth_pkt_t* const p_eth, + IN ipoib_endpt_t** const p_src, + IN ipoib_endpt_t* const p_dst ); + +static ib_api_status_t +__recv_mgr_prepare_pkt( + IN ipoib_port_t* const p_port, + IN ipoib_recv_desc_t* const p_desc, + OUT NDIS_PACKET** const pp_packet ); + +static uint32_t +__recv_mgr_build_pkt_array( + IN ipoib_port_t* const p_port, + IN int32_t shortage, + OUT cl_qlist_t* const p_done_list, + OUT int32_t* const p_discarded ); + +/****************************************************************************** +* +* Send manager operations. +* +******************************************************************************/ +static void +__send_mgr_construct( + IN ipoib_port_t* const p_port ); + +static void +__send_mgr_destroy( + IN ipoib_port_t* const p_port ); + +static NDIS_STATUS +__send_gen( + IN ipoib_port_t* const p_port, + IN ipoib_send_desc_t* const p_desc ); + +static NDIS_STATUS +__send_mgr_filter_ip( + IN ipoib_port_t* const p_port, + IN const eth_hdr_t* const p_eth_hdr, + IN NDIS_BUFFER* p_buf, + IN size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ); + +static NDIS_STATUS +__send_mgr_filter_udp( + IN ipoib_port_t* const p_port, + IN const ip_hdr_t* const p_ip_hdr, + IN NDIS_BUFFER* p_buf, + IN size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ); + +static NDIS_STATUS +__send_mgr_filter_dhcp( + IN ipoib_port_t* const p_port, + IN const udp_hdr_t* const p_udp_hdr, + IN NDIS_BUFFER* p_buf, + IN size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ); + +static NDIS_STATUS +__send_mgr_filter_arp( + IN ipoib_port_t* const p_port, + IN const eth_hdr_t* const p_eth_hdr, + IN NDIS_BUFFER* p_buf, + IN size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ); + +static void +__process_failed_send( + IN ipoib_port_t* const p_port, + IN ipoib_send_desc_t* const p_desc, + IN const NDIS_STATUS status ); + +static void +__send_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ); + + +/****************************************************************************** +* +* Endpoint manager operations +* +******************************************************************************/ +static void +__endpt_mgr_construct( + IN ipoib_port_t* const p_port ); + +static ib_api_status_t +__endpt_mgr_init( + IN ipoib_port_t* const p_port ); + +static void +__endpt_mgr_destroy( + IN ipoib_port_t* const p_port ); + +/****f* IPoIB/__endpt_mgr_remove_all +* NAME +* __endpt_mgr_remove_all +* +* DESCRIPTION +* Removes all enpoints from the port, dereferencing them to initiate +* destruction. +* +* SYNOPSIS +*/ +static void +__endpt_mgr_remove_all( + IN ipoib_port_t* const p_port ); +/* +********/ + +static void +__endpt_mgr_remove( + IN ipoib_port_t* const p_port, + IN ipoib_endpt_t* const p_endpt ); + +static void +__endpt_mgr_reset_all( + IN ipoib_port_t* const p_port ); + +static inline NDIS_STATUS +__endpt_mgr_ref( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + OUT ipoib_endpt_t** const pp_endpt ); + +static inline NDIS_STATUS +__endpt_mgr_get_gid_qpn( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + OUT ib_gid_t* const p_gid, + OUT UNALIGNED net32_t* const p_qpn ); + +static inline ipoib_endpt_t* +__endpt_mgr_get_by_gid( + IN ipoib_port_t* const p_port, + IN const ib_gid_t* const p_gid ); + +static inline ipoib_endpt_t* +__endpt_mgr_get_by_lid( + IN ipoib_port_t* const p_port, + IN const net16_t lid ); + +static inline ib_api_status_t +__endpt_mgr_insert_locked( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + IN ipoib_endpt_t* const p_endpt ); + +static inline ib_api_status_t +__endpt_mgr_insert( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + IN ipoib_endpt_t* const p_endpt ); + +static ib_api_status_t +__endpt_mgr_add_local( + IN ipoib_port_t* const p_port, + IN ib_port_info_t* const p_port_info ); + +static ib_api_status_t +__endpt_mgr_add_bcast( + IN ipoib_port_t* const p_port, + IN ib_mcast_rec_t *p_mcast_rec ); + +/****************************************************************************** +* +* MCast operations. +* +******************************************************************************/ +static ib_api_status_t +__port_get_bcast( + IN ipoib_port_t* const p_port ); + +static ib_api_status_t +__port_join_bcast( + IN ipoib_port_t* const p_port, + IN ib_member_rec_t* const p_member_rec ); + +static ib_api_status_t +__port_create_bcast( + IN ipoib_port_t* const p_port ); + +static void +__port_info_cb( + IN ib_query_rec_t *p_query_rec ); + + +static void +__bcast_get_cb( + IN ib_query_rec_t *p_query_rec ); + + +static void +__bcast_cb( + IN ib_mcast_rec_t *p_mcast_rec ); + + +static void +__mcast_cb( + IN ib_mcast_rec_t *p_mcast_rec ); + +void +__leave_error_mcast_cb( + IN void *context ); + + +static intn_t +__gid_cmp( + IN const void* const p_key1, + IN const void* const p_key2 ) +{ + return cl_memcmp( p_key1, p_key2, sizeof(ib_gid_t) ); +} + + +inline void ipoib_port_ref( ipoib_port_t * p_port, int type ) +{ + cl_obj_ref( &p_port->obj ); +#if DBG + cl_atomic_inc( &p_port->ref[type % ref_mask] ); + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_OBJ, + ("ref type %d ref_cnt %d\n", type, p_port->obj.ref_cnt) ); +#else + UNREFERENCED_PARAMETER(type); +#endif +} + + +inline void ipoib_port_deref(ipoib_port_t * p_port, int type) +{ + cl_obj_deref( &p_port->obj ); + +#if DBG + cl_atomic_dec( &p_port->ref[type % ref_mask] ); + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_OBJ, + ("deref type %d ref_cnt %d\n", type, p_port->obj.ref_cnt) ); +#else + UNREFERENCED_PARAMETER(type); +#endif +} + + +/****************************************************************************** +* +* Implementation +* +******************************************************************************/ +ib_api_status_t +ipoib_create_port( + IN ipoib_adapter_t* const p_adapter, + IN ib_pnp_port_rec_t* const p_pnp_rec, + OUT ipoib_port_t** const pp_port ) +{ + ib_api_status_t status; + ipoib_port_t *p_port; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + CL_ASSERT( !p_adapter->p_port ); + + p_port = cl_zalloc( sizeof(ipoib_port_t) + + (sizeof(ipoib_hdr_t) * (p_adapter->params.sq_depth - 1)) ); + if( !p_port ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to allocate ipoib_port_t (%d bytes)\n", + sizeof(ipoib_port_t)) ); + return IB_INSUFFICIENT_MEMORY; + } + +#ifdef _DEBUG_ + gp_ipoib_port = p_port; +#endif + + __port_construct( p_port ); + + status = __port_init( p_port, p_adapter, p_pnp_rec ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_port_init returned %s.\n", + p_adapter->p_ifc->get_err_str( status )) ); + __port_cleanup( &p_port->obj ); + __port_free( &p_port->obj ); + return status; + } + + *pp_port = p_port; + IPOIB_EXIT( IPOIB_DBG_INIT ); + return IB_SUCCESS; +} + + +void +ipoib_port_destroy( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + + CL_ASSERT( p_port ); + CL_ASSERT( p_port->p_adapter ); + CL_ASSERT( !p_port->p_adapter->p_port ); + + cl_obj_destroy( &p_port->obj ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static void +__port_construct( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_port->state = IB_QPS_RESET; + + cl_obj_construct( &p_port->obj, IPOIB_OBJ_PORT ); + cl_spinlock_construct( &p_port->send_lock ); + cl_spinlock_construct( &p_port->recv_lock ); + __ib_mgr_construct( p_port ); + __buf_mgr_construct( p_port ); + + __recv_mgr_construct( p_port ); + __send_mgr_construct( p_port ); + + __endpt_mgr_construct( p_port ); + + KeInitializeEvent( &p_port->sa_event, NotificationEvent, TRUE ); + KeInitializeEvent( &p_port->leave_mcast_event, NotificationEvent, TRUE ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static ib_api_status_t +__port_init( + IN ipoib_port_t* const p_port, + IN ipoib_adapter_t* const p_adapter, + IN ib_pnp_port_rec_t* const p_pnp_rec ) +{ + cl_status_t cl_status; + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_port->port_num = p_pnp_rec->p_port_attr->port_num; + p_port->p_adapter = p_adapter; + + cl_status = cl_spinlock_init( &p_port->send_lock ); + if( cl_status != CL_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_spinlock_init returned %s\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + cl_status = cl_spinlock_init( &p_port->recv_lock ); + if( cl_status != CL_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_spinlock_init returned %s\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + /* Initialize the IB resource manager. */ + status = __ib_mgr_init( p_port ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__ib_mgr_init returned %s\n", + p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Initialize the buffer manager. */ + status = __buf_mgr_init( p_port ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__buf_mgr_init returned %s\n", + p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Initialize the receive manager. */ + status = __recv_mgr_init( p_port ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__recv_mgr_init returned %s\n", + p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Initialize the endpoint manager. */ + status = __endpt_mgr_init( p_port ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__endpt_mgr_init returned %s\n", + p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* We only ever destroy from the PnP callback thread. */ + cl_status = cl_obj_init( &p_port->obj, CL_DESTROY_SYNC, + __port_destroying, __port_cleanup, __port_free ); + +#if DBG + cl_atomic_inc( &p_port->ref[ref_init] ); + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_OBJ, + ("ref type %d ref_cnt %d\n", ref_init, p_port->obj.ref_cnt) ); +#endif + + if( cl_status != CL_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_obj_init returned %s\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + cl_status = cl_obj_insert_rel( &p_port->rel, &p_adapter->obj, &p_port->obj ); + if( cl_status != CL_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_obj_insert_rel returned %s\n", cl_status_text[cl_status]) ); + cl_obj_destroy( &p_port->obj ); + return IB_ERROR; + } + +#if DBG + cl_atomic_inc( &p_port->ref[ref_init] ); + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_OBJ, + ("ref type %d ref_cnt %d\n", ref_init, p_port->obj.ref_cnt) ); +#endif + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return IB_SUCCESS; +} + + +static void +__port_destroying( + IN cl_obj_t* const p_obj ) +{ + ipoib_port_t *p_port; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + CL_ASSERT( p_obj ); + + p_port = PARENT_STRUCT( p_obj, ipoib_port_t, obj ); + + ipoib_port_down( p_port ); + + __endpt_mgr_remove_all( p_port ); + + ipoib_port_resume( p_port ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static void +__port_cleanup( + IN cl_obj_t* const p_obj ) +{ + ipoib_port_t *p_port; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + CL_ASSERT( p_obj ); + + p_port = PARENT_STRUCT( p_obj, ipoib_port_t, obj ); + + /* Wait for all sends and receives to get flushed. */ + while( p_port->send_mgr.depth || p_port->recv_mgr.depth ) + cl_thread_suspend( 0 ); + + /* Destroy the send and receive managers before closing the CA. */ + __ib_mgr_destroy( p_port ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static void +__port_free( + IN cl_obj_t* const p_obj ) +{ + ipoib_port_t *p_port; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + CL_ASSERT( p_obj ); + + p_port = PARENT_STRUCT( p_obj, ipoib_port_t, obj ); + + __endpt_mgr_destroy( p_port ); + __recv_mgr_destroy( p_port ); + __send_mgr_destroy( p_port ); + __buf_mgr_destroy( p_port ); + + cl_spinlock_destroy( &p_port->send_lock ); + cl_spinlock_destroy( &p_port->recv_lock ); + + cl_obj_deinit( p_obj ); + + cl_free( p_port ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + + +/****************************************************************************** +* +* IB resource manager implementation. +* +******************************************************************************/ +static void +__ib_mgr_construct( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + + cl_memclr( &p_port->ib_mgr, sizeof(ipoib_ib_mgr_t) ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static ib_api_status_t +__ib_mgr_init( + IN ipoib_port_t* const p_port ) +{ + ib_api_status_t status; + ib_cq_create_t cq_create; + ib_qp_create_t qp_create; + ib_phys_create_t phys_create; + ib_phys_range_t phys_range; + uint64_t vaddr; + net32_t rkey; + ib_qp_attr_t qp_attr; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + /* Open the CA. */ + status = p_port->p_adapter->p_ifc->open_ca( + p_port->p_adapter->h_al, p_port->p_adapter->guids.ca_guid, + NULL, p_port, &p_port->ib_mgr.h_ca ); + if( status != IB_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_OPEN_CA, 1, status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_open_ca returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Allocate the PD. */ + status = p_port->p_adapter->p_ifc->alloc_pd( + p_port->ib_mgr.h_ca, IB_PDT_UD, p_port, &p_port->ib_mgr.h_pd ); + if( status != IB_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_ALLOC_PD, 1, status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_alloc_pd returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Allocate receive CQ. */ + cq_create.size = p_port->p_adapter->params.rq_depth; + cq_create.pfn_comp_cb = __recv_cb; + cq_create.h_wait_obj = NULL; + + status = p_port->p_adapter->p_ifc->create_cq( + p_port->ib_mgr.h_ca, &cq_create, p_port, + __cq_event, &p_port->ib_mgr.h_recv_cq ); + if( status != IB_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_CREATE_RECV_CQ, 1, status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_create_cq returned %s.\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Allocate send CQ. */ + cq_create.size = p_port->p_adapter->params.sq_depth; + cq_create.pfn_comp_cb = __send_cb; + + status = p_port->p_adapter->p_ifc->create_cq( + p_port->ib_mgr.h_ca, &cq_create, p_port, + __cq_event, &p_port->ib_mgr.h_send_cq ); + if( status != IB_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_CREATE_SEND_CQ, 1, status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_create_cq returned %s.\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Allocate the QP. */ + cl_memclr( &qp_create, sizeof(qp_create) ); + qp_create.qp_type = IB_QPT_UNRELIABLE_DGRM; + qp_create.rq_depth = p_port->p_adapter->params.rq_depth; + qp_create.rq_sge = 2; /* To support buffers spanning pages. */ + qp_create.h_rq_cq = p_port->ib_mgr.h_recv_cq; + qp_create.sq_depth = p_port->p_adapter->params.sq_depth; + //TODO: Figure out the right number of SGE entries for sends. + qp_create.sq_sge = MAX_SEND_SGE; + qp_create.h_sq_cq = p_port->ib_mgr.h_send_cq; + qp_create.sq_signaled = TRUE; + status = p_port->p_adapter->p_ifc->create_qp( + p_port->ib_mgr.h_pd, &qp_create, p_port, + __qp_event, &p_port->ib_mgr.h_qp ); + if( status != IB_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_CREATE_QP, 1, status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_create_qp returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + /* Query the QP so we can get our QPN. */ + status = p_port->p_adapter->p_ifc->query_qp( + p_port->ib_mgr.h_qp, &qp_attr ); + if( status != IB_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_QUERY_QP, 1, status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_query_qp returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + p_port->ib_mgr.qpn = qp_attr.num; + + /* Register all of physical memory */ + phys_create.length = MEM_REG_SIZE; + phys_create.num_ranges = 1; + phys_create.range_array = &phys_range; + phys_create.buf_offset = 0; + phys_create.hca_page_size = PAGE_SIZE; + phys_create.access_ctrl = IB_AC_LOCAL_WRITE; + phys_range.base_addr = 0; + phys_range.size = MEM_REG_SIZE; + vaddr = 0; + status = p_port->p_adapter->p_ifc->reg_phys( + p_port->ib_mgr.h_pd, &phys_create, &vaddr, + &p_port->ib_mgr.lkey, &rkey, &p_port->ib_mgr.h_mr ); + if( status != IB_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_REG_PHYS, 1, status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_reg_phys returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return IB_SUCCESS; +} + + +static void +__ib_mgr_destroy( + IN ipoib_port_t* const p_port ) +{ + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + if( p_port->ib_mgr.h_ca ) + { + status = + p_port->p_adapter->p_ifc->close_ca( p_port->ib_mgr.h_ca, NULL ); + CL_ASSERT( status == IB_SUCCESS ); + p_port->ib_mgr.h_ca = NULL; + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + + +/****************************************************************************** +* +* Buffer manager implementation. +* +******************************************************************************/ +static void +__buf_mgr_construct( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + + cl_qpool_construct( &p_port->buf_mgr.recv_pool ); + + p_port->buf_mgr.h_packet_pool = NULL; + p_port->buf_mgr.h_buffer_pool = NULL; + + ExInitializeNPagedLookasideList( &p_port->buf_mgr.send_buf_list, + NULL, NULL, 0, MAX_XFER_BLOCK_SIZE, 'bipi', 0 ); + + p_port->buf_mgr.h_send_pkt_pool = NULL; + p_port->buf_mgr.h_send_buf_pool = NULL; + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static ib_api_status_t +__buf_mgr_init( + IN ipoib_port_t* const p_port ) +{ + cl_status_t cl_status; + NDIS_STATUS ndis_status; + ipoib_params_t *p_params; + + IPOIB_ENTER(IPOIB_DBG_INIT ); + + CL_ASSERT( p_port ); + CL_ASSERT( p_port->p_adapter ); + + p_params = &p_port->p_adapter->params; + + /* Allocate the receive descriptor pool */ + cl_status = cl_qpool_init( &p_port->buf_mgr.recv_pool, + p_params->rq_depth * p_params->recv_pool_ratio, +#if IPOIB_INLINE_RECV + 0, 0, sizeof(ipoib_recv_desc_t), __recv_ctor, NULL, p_port ); +#else /* IPOIB_INLINE_RECV */ + 0, 0, sizeof(ipoib_recv_desc_t), __recv_ctor, __recv_dtor, p_port ); +#endif /* IPOIB_INLINE_RECV */ + if( cl_status != CL_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_RECV_POOL, 1, cl_status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_qpool_init for recvs returned %s\n", + cl_status_text[cl_status]) ); + return IB_INSUFFICIENT_MEMORY; + } + + /* Allocate the NDIS buffer and packet pools for receive indication. */ + NdisAllocatePacketPool( &ndis_status, &p_port->buf_mgr.h_packet_pool, + p_params->rq_depth, PROTOCOL_RESERVED_SIZE_IN_PACKET ); + if( ndis_status != NDIS_STATUS_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_RECV_PKT_POOL, 1, ndis_status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("NdisAllocatePacketPool returned %08X\n", ndis_status) ); + return IB_INSUFFICIENT_RESOURCES; + } + + NdisAllocateBufferPool( &ndis_status, &p_port->buf_mgr.h_buffer_pool, + p_params->rq_depth ); + if( ndis_status != NDIS_STATUS_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_RECV_BUF_POOL, 1, ndis_status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("NdisAllocateBufferPool returned %08X\n", ndis_status) ); + return IB_INSUFFICIENT_RESOURCES; + } + + /* Allocate the NDIS buffer and packet pools for send formatting. */ + NdisAllocatePacketPool( &ndis_status, &p_port->buf_mgr.h_send_pkt_pool, + 1, PROTOCOL_RESERVED_SIZE_IN_PACKET ); + if( ndis_status != NDIS_STATUS_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_SEND_PKT_POOL, 1, ndis_status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("NdisAllocatePacketPool returned %08X\n", ndis_status) ); + return IB_INSUFFICIENT_RESOURCES; + } + + NdisAllocateBufferPool( &ndis_status, + &p_port->buf_mgr.h_send_buf_pool, 1 ); + if( ndis_status != NDIS_STATUS_SUCCESS ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_SEND_BUF_POOL, 1, ndis_status ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("NdisAllocateBufferPool returned %08X\n", ndis_status) ); + return IB_INSUFFICIENT_RESOURCES; + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return IB_SUCCESS; +} + + +static void +__buf_mgr_destroy( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER(IPOIB_DBG_INIT ); + + CL_ASSERT( p_port ); + + /* Destroy the send packet and buffer pools. */ + if( p_port->buf_mgr.h_send_buf_pool ) + NdisFreeBufferPool( p_port->buf_mgr.h_send_buf_pool ); + if( p_port->buf_mgr.h_send_pkt_pool ) + NdisFreePacketPool( p_port->buf_mgr.h_send_pkt_pool ); + + /* Destroy the receive packet and buffer pools. */ + if( p_port->buf_mgr.h_buffer_pool ) + NdisFreeBufferPool( p_port->buf_mgr.h_buffer_pool ); + if( p_port->buf_mgr.h_packet_pool ) + NdisFreePacketPool( p_port->buf_mgr.h_packet_pool ); + + /* Free the receive and send descriptors. */ + cl_qpool_destroy( &p_port->buf_mgr.recv_pool ); + + /* Free the lookaside list of scratch buffers. */ + ExDeleteNPagedLookasideList( &p_port->buf_mgr.send_buf_list ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static cl_status_t +__recv_ctor( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + ipoib_recv_desc_t *p_desc; + ipoib_port_t *p_port; + uint32_t ds0_len; + + IPOIB_ENTER( IPOIB_DBG_ALLOC ); + + CL_ASSERT( p_object ); + CL_ASSERT( context ); + + p_desc = (ipoib_recv_desc_t*)p_object; + p_port = (ipoib_port_t*)context; + + /* Setup the work request. */ + p_desc->wr.ds_array = p_desc->local_ds; + p_desc->wr.wr_id = (uintn_t)p_desc; + +#if IPOIB_INLINE_RECV + /* Sanity check on the receive buffer layout */ + CL_ASSERT( (void*)&p_desc->buf.eth.pkt.type == + (void*)&p_desc->buf.ib.pkt.type ); + CL_ASSERT( sizeof(recv_buf_t) == sizeof(ipoib_pkt_t) + sizeof(ib_grh_t) ); + + /* Setup the local data segment. */ + p_desc->local_ds[0].vaddr = cl_get_physaddr( &p_desc->buf ); + p_desc->local_ds[0].lkey = p_port->ib_mgr.lkey; + ds0_len = + PAGE_SIZE - ((uint32_t)p_desc->local_ds[0].vaddr & (PAGE_SIZE - 1)); + if( ds0_len >= sizeof(recv_buf_t) ) + { + /* The whole buffer is within a page. */ + p_desc->local_ds[0].length = ds0_len; + p_desc->wr.num_ds = 1; + } + else + { + /* The buffer crosses page boundaries. */ + p_desc->local_ds[0].length = ds0_len; + p_desc->local_ds[1].vaddr = cl_get_physaddr( + ((uint8_t*)&p_desc->buf) + ds0_len ); + p_desc->local_ds[1].lkey = p_port->ib_mgr.lkey; + p_desc->local_ds[1].length = sizeof(recv_buf_t) - ds0_len; + p_desc->wr.num_ds = 2; + } +#else /* IPOIB_INLINE_RECV */ + /* Allocate the receive buffer. */ + p_desc->p_buf = (recv_buf_t*)cl_zalloc( sizeof(recv_buf_t) ); + if( !p_desc->p_buf ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to allocate receive buffer.\n") ); + return CL_INSUFFICIENT_MEMORY; + } + + /* Sanity check on the receive buffer layout */ + CL_ASSERT( (void*)&p_desc->p_buf->eth.pkt.type == + (void*)&p_desc->p_buf->ib.pkt.type ); + + /* Setup the local data segment. */ + p_desc->local_ds[0].vaddr = cl_get_physaddr( p_desc->p_buf ); + p_desc->local_ds[0].length = sizeof(ipoib_pkt_t) + sizeof(ib_grh_t); + p_desc->local_ds[0].lkey = p_port->ib_mgr.lkey; +#endif /* IPOIB_INLINE_RECV */ + + *pp_pool_item = &p_desc->item; + + IPOIB_EXIT( IPOIB_DBG_ALLOC ); + return CL_SUCCESS; +} + + +#if !IPOIB_INLINE_RECV +static void +__recv_dtor( + IN const cl_pool_item_t* const p_pool_item, + IN void *context ) +{ + ipoib_recv_desc_t *p_desc; + + IPOIB_ENTER( IPOIB_DBG_ALLOC ); + + UNUSED_PARAM( context ); + + p_desc = PARENT_STRUCT( p_pool_item, ipoib_recv_desc_t, item ); + + if( p_desc->p_buf ) + cl_free( p_desc->p_buf ); + + IPOIB_EXIT( IPOIB_DBG_ALLOC ); +} +#endif + + +static inline ipoib_recv_desc_t* +__buf_mgr_get_recv( + IN ipoib_port_t* const p_port ) +{ + ipoib_recv_desc_t *p_desc; + IPOIB_ENTER( IPOIB_DBG_RECV ); + p_desc = (ipoib_recv_desc_t*)cl_qpool_get( &p_port->buf_mgr.recv_pool ); + /* Reference the port object for the send. */ + if( p_desc ) + { + ipoib_port_ref( p_port, ref_get_recv ); + CL_ASSERT( p_desc->wr.wr_id == (uintn_t)p_desc ); +#if IPOIB_INLINE_RECV + CL_ASSERT( p_desc->local_ds[0].vaddr == + cl_get_physaddr( &p_desc->buf ) ); +#else /* IPOIB_INLINE_RECV */ + CL_ASSERT( p_desc->local_ds[0].vaddr == + cl_get_physaddr( p_desc->p_buf ) ); + CL_ASSERT( p_desc->local_ds[0].length == + (sizeof(ipoib_pkt_t) + sizeof(ib_grh_t)) ); +#endif /* IPOIB_INLINE_RECV */ + CL_ASSERT( p_desc->local_ds[0].lkey == p_port->ib_mgr.lkey ); + } + IPOIB_EXIT( IPOIB_DBG_RECV ); + return p_desc; +} + + +static inline void +__buf_mgr_put_recv( + IN ipoib_port_t* const p_port, + IN ipoib_recv_desc_t* const p_desc, + IN NDIS_PACKET* const p_packet OPTIONAL ) +{ + NDIS_BUFFER *p_buf; + + IPOIB_ENTER(IPOIB_DBG_RECV ); + + if( p_packet ) + { + /* Unchain the NDIS buffer. */ + NdisUnchainBufferAtFront( p_packet, &p_buf ); + CL_ASSERT( p_buf ); + /* Return the NDIS packet and NDIS buffer to their pools. */ + NdisDprFreePacketNonInterlocked( p_packet ); + NdisFreeBuffer( p_buf ); + } + + /* Return the descriptor to its pools. */ + cl_qpool_put( &p_port->buf_mgr.recv_pool, &p_desc->item ); + + /* + * Dereference the port object since the receive is no longer outstanding. + */ + ipoib_port_deref( p_port, ref_get_recv ); + IPOIB_EXIT( IPOIB_DBG_RECV ); +} + + +static inline void +__buf_mgr_put_recv_list( + IN ipoib_port_t* const p_port, + IN cl_qlist_t* const p_list ) +{ + //IPOIB_ENTER( IPOIB_DBG_RECV ); + cl_qpool_put_list( &p_port->buf_mgr.recv_pool, p_list ); + //IPOIB_EXIT( IPOIB_DBG_RECV ); +} + + +static inline NDIS_PACKET* +__buf_mgr_get_ndis_pkt( + IN ipoib_port_t* const p_port, + IN ipoib_recv_desc_t* const p_desc ) +{ + NDIS_STATUS status; + NDIS_PACKET *p_packet; + NDIS_BUFFER *p_buffer; + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + NdisDprAllocatePacketNonInterlocked( &status, &p_packet, + p_port->buf_mgr.h_packet_pool ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to allocate NDIS_PACKET: %08x\n", status) ); + return NULL; + } + + IPOIB_PORT_FROM_PACKET( p_packet ) = p_port; + IPOIB_RECV_FROM_PACKET( p_packet ) = p_desc; + + NdisAllocateBuffer( &status, &p_buffer, +#if IPOIB_INLINE_RECV + p_port->buf_mgr.h_buffer_pool, &p_desc->buf.eth.pkt, p_desc->len ); +#else /* IPOIB_INLINE_RECV */ + p_port->buf_mgr.h_buffer_pool, &p_desc->p_buf->eth.pkt, p_desc->len ); +#endif /* IPOIB_INLINE_RECV */ + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to allocate NDIS_BUFFER: %08x\n", status) ); + NdisDprFreePacketNonInterlocked( p_packet ); + return NULL; + } + + NdisChainBufferAtFront( p_packet, p_buffer ); + NDIS_SET_PACKET_HEADER_SIZE( p_packet, sizeof(eth_hdr_t) ); + + IPOIB_EXIT( IPOIB_DBG_RECV ); + return p_packet; +} + +/****************************************************************************** +* +* Receive manager implementation. +* +******************************************************************************/ +static void +__recv_mgr_construct( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + + cl_qlist_init( &p_port->recv_mgr.done_list ); + + p_port->recv_mgr.recv_pkt_array = NULL; + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static ib_api_status_t +__recv_mgr_init( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + + /* Allocate the NDIS_PACKET pointer array for indicating receives. */ + p_port->recv_mgr.recv_pkt_array = cl_malloc( + sizeof(NDIS_PACKET*) * p_port->p_adapter->params.rq_depth ); + if( !p_port->recv_mgr.recv_pkt_array ) + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_RECV_PKT_ARRAY, 0 ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("cl_malloc for PNDIS_PACKET array failed.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return IB_SUCCESS; +} + + +static void +__recv_mgr_destroy( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + + CL_ASSERT( cl_is_qlist_empty( &p_port->recv_mgr.done_list ) ); + CL_ASSERT( !p_port->recv_mgr.depth ); + + if( p_port->recv_mgr.recv_pkt_array ) + cl_free( p_port->recv_mgr.recv_pkt_array ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +/* + * Posts receive buffers to the receive queue and returns the number + * of receives needed to bring the RQ to its low water mark. Note + * that the value is signed, and can go negative. All tests must + * be for > 0. + */ +static int32_t +__recv_mgr_repost( + IN ipoib_port_t* const p_port ) +{ + ipoib_recv_desc_t *p_head = NULL, *p_tail = NULL, *p_next; + ib_api_status_t status; + ib_recv_wr_t *p_failed; + PERF_DECLARE( GetRecv ); + PERF_DECLARE( PostRecv ); + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + CL_ASSERT( p_port ); + cl_obj_lock( &p_port->obj ); + if( p_port->state != IB_QPS_RTS ) + { + cl_obj_unlock( &p_port->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_RECV, + ("Port in invalid state. Not reposting.\n") ); + return 0; + } + ipoib_port_ref( p_port, ref_repost ); + cl_obj_unlock( &p_port->obj ); + + while( p_port->recv_mgr.depth < p_port->p_adapter->params.rq_depth ) + { + /* Pull receives out of the pool and chain them up. */ + cl_perf_start( GetRecv ); + p_next = __buf_mgr_get_recv( p_port ); + cl_perf_stop( &p_port->p_adapter->perf, GetRecv ); + if( !p_next ) + { + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_RECV, + ("Out of receive descriptors! recv queue depath 0x%x\n",p_port->recv_mgr.depth) ); + break; + } + + if( !p_tail ) + { + p_tail = p_next; + p_next->wr.p_next = NULL; + } + else + { + p_next->wr.p_next = &p_head->wr; + } + + p_head = p_next; + + p_port->recv_mgr.depth++; + } + + if( p_head ) + { + cl_perf_start( PostRecv ); + status = p_port->p_adapter->p_ifc->post_recv( + p_port->ib_mgr.h_qp, &p_head->wr, &p_failed ); + cl_perf_stop( &p_port->p_adapter->perf, PostRecv ); + + if( status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ip_post_recv returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + /* return the descriptors to the pool */ + while( p_failed ) + { + p_head = PARENT_STRUCT( p_failed, ipoib_recv_desc_t, wr ); + p_failed = p_failed->p_next; + + __buf_mgr_put_recv( p_port, p_head, NULL ); + p_port->recv_mgr.depth--; + } + } + } + + ipoib_port_deref( p_port, ref_repost ); + IPOIB_EXIT( IPOIB_DBG_RECV ); + return p_port->p_adapter->params.rq_low_watermark - p_port->recv_mgr.depth; +} + + +void +ipoib_return_packet( + IN NDIS_HANDLE adapter_context, + IN NDIS_PACKET *p_packet ) +{ + cl_list_item_t *p_item; + ipoib_port_t *p_port; + ipoib_recv_desc_t *p_desc; + ib_api_status_t status = IB_NOT_DONE; + int32_t shortage; + PERF_DECLARE( ReturnPacket ); + PERF_DECLARE( ReturnPutRecv ); + PERF_DECLARE( ReturnRepostRecv ); + PERF_DECLARE( ReturnPreparePkt ); + PERF_DECLARE( ReturnNdisIndicate ); + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + UNUSED_PARAM( adapter_context ); + CL_ASSERT( p_packet ); + + cl_perf_start( ReturnPacket ); + + /* Get the port and descriptor from the packet. */ + p_port = IPOIB_PORT_FROM_PACKET( p_packet ); + p_desc = IPOIB_RECV_FROM_PACKET( p_packet ); + + cl_spinlock_acquire( &p_port->recv_lock ); + + cl_perf_start( ReturnPutRecv ); + __buf_mgr_put_recv( p_port, p_desc, p_packet ); + cl_perf_stop( &p_port->p_adapter->perf, ReturnPutRecv ); + + /* Repost buffers. */ + cl_perf_start( ReturnRepostRecv ); + shortage = __recv_mgr_repost( p_port ); + cl_perf_stop( &p_port->p_adapter->perf, ReturnRepostRecv ); + + for( p_item = cl_qlist_remove_head( &p_port->recv_mgr.done_list ); + p_item != cl_qlist_end( &p_port->recv_mgr.done_list ); + p_item = cl_qlist_remove_head( &p_port->recv_mgr.done_list ) ) + { + p_desc = (ipoib_recv_desc_t*)p_item; + + cl_perf_start( ReturnPreparePkt ); + status = __recv_mgr_prepare_pkt( p_port, p_desc, &p_packet ); + cl_perf_stop( &p_port->p_adapter->perf, ReturnPreparePkt ); + if( status == IB_SUCCESS ) + { + if( shortage > 0 ) + NDIS_SET_PACKET_STATUS( p_packet, NDIS_STATUS_RESOURCES ); + else + NDIS_SET_PACKET_STATUS( p_packet, NDIS_STATUS_SUCCESS ); + + cl_spinlock_release( &p_port->recv_lock ); + cl_perf_start( ReturnNdisIndicate ); + NdisMIndicateReceivePacket( p_port->p_adapter->h_adapter, + &p_packet, 1 ); + cl_perf_stop( &p_port->p_adapter->perf, ReturnNdisIndicate ); + cl_spinlock_acquire( &p_port->recv_lock ); + + if( shortage > 0 ) + { + cl_perf_start( ReturnPutRecv ); + __buf_mgr_put_recv( p_port, p_desc, p_packet ); + cl_perf_stop( &p_port->p_adapter->perf, ReturnPutRecv ); + + /* Repost buffers. */ + cl_perf_start( ReturnRepostRecv ); + shortage = __recv_mgr_repost( p_port ); + cl_perf_stop( &p_port->p_adapter->perf, ReturnRepostRecv ); + } + } + else if( status != IB_NOT_DONE ) + { + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_RECV, + ("__recv_mgr_prepare_pkt returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + /* Return the item to the head of the list. */ + cl_qlist_insert_head( &p_port->recv_mgr.done_list, p_item ); + break; + } + } + cl_spinlock_release( &p_port->recv_lock ); + cl_perf_stop( &p_port->p_adapter->perf, ReturnPacket ); + + IPOIB_EXIT( IPOIB_DBG_RECV ); +} + + +static void +__recv_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + ipoib_port_t *p_port; + ib_api_status_t status; + ib_wc_t wc[MAX_RECV_WC], *p_free, *p_wc; + int32_t pkt_cnt, recv_cnt = 0, shortage, discarded; + cl_qlist_t done_list, bad_list; + size_t i; + PERF_DECLARE( RecvCompBundle ); + PERF_DECLARE( RecvCb ); + PERF_DECLARE( PollRecv ); + PERF_DECLARE( RepostRecv ); + PERF_DECLARE( FilterRecv ); + PERF_DECLARE( BuildPktArray ); + PERF_DECLARE( RecvNdisIndicate ); + PERF_DECLARE( RearmRecv ); + PERF_DECLARE( PutRecvList ); + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + cl_perf_clr( RecvCompBundle ); + + cl_perf_start( RecvCb ); + + UNUSED_PARAM( h_cq ); + + p_port = (ipoib_port_t*)cq_context; + + cl_qlist_init( &done_list ); + cl_qlist_init( &bad_list ); + + ipoib_port_ref( p_port, ref_recv_cb ); + for( i = 0; i < MAX_RECV_WC; i++ ) + wc[i].p_next = &wc[i + 1]; + wc[MAX_RECV_WC - 1].p_next = NULL; + + /* + * We'll be accessing the endpoint map so take a reference + * on it to prevent modifications. + */ + cl_obj_lock( &p_port->obj ); + cl_atomic_inc( &p_port->endpt_rdr ); + cl_obj_unlock( &p_port->obj ); + + do + { + /* If we get here, then the list of WCs is intact. */ + p_free = wc; + + cl_perf_start( PollRecv ); + status = p_port->p_adapter->p_ifc->poll_cq( + p_port->ib_mgr.h_recv_cq, &p_free, &p_wc ); + cl_perf_stop( &p_port->p_adapter->perf, PollRecv ); + CL_ASSERT( status == IB_SUCCESS || status == IB_NOT_FOUND ); + + /* Look at the payload now and filter ARP and DHCP packets. */ + cl_perf_start( FilterRecv ); + recv_cnt += __recv_mgr_filter( p_port, p_wc, &done_list, &bad_list ); + cl_perf_stop( &p_port->p_adapter->perf, FilterRecv ); + + } while( !p_free ); + + /* We're done looking at the endpoint map, release the reference. */ + cl_atomic_dec( &p_port->endpt_rdr ); + + cl_perf_log( &p_port->p_adapter->perf, RecvCompBundle, recv_cnt ); + + cl_spinlock_acquire( &p_port->recv_lock ); + + /* Update our posted depth. */ + p_port->recv_mgr.depth -= recv_cnt; + + /* Return any discarded receives to the pool */ + cl_perf_start( PutRecvList ); + __buf_mgr_put_recv_list( p_port, &bad_list ); + cl_perf_stop( &p_port->p_adapter->perf, PutRecvList ); + + do + { + /* Repost ASAP so we don't starve the RQ. */ + cl_perf_start( RepostRecv ); + shortage = __recv_mgr_repost( p_port ); + cl_perf_stop( &p_port->p_adapter->perf, RepostRecv ); + + cl_perf_start( BuildPktArray ); + /* Notify NDIS of any and all possible receive buffers. */ + pkt_cnt = __recv_mgr_build_pkt_array( + p_port, shortage, &done_list, &discarded ); + cl_perf_stop( &p_port->p_adapter->perf, BuildPktArray ); + + /* Only indicate receives if we actually had any. */ + if( discarded && shortage > 0 ) + { + /* We may have thrown away packets, and have a shortage */ + cl_perf_start( RepostRecv ); + __recv_mgr_repost( p_port ); + cl_perf_stop( &p_port->p_adapter->perf, RepostRecv ); + } + + if( !pkt_cnt ) + break; + + cl_spinlock_release( &p_port->recv_lock ); + + cl_perf_start( RecvNdisIndicate ); + NdisMIndicateReceivePacket( p_port->p_adapter->h_adapter, + p_port->recv_mgr.recv_pkt_array, pkt_cnt ); + cl_perf_stop( &p_port->p_adapter->perf, RecvNdisIndicate ); + + /* + * Cap the number of receives to put back to what we just indicated + * with NDIS_STATUS_RESOURCES. + */ + if( shortage > 0 ) + { + if( pkt_cnt < shortage ) + shortage = pkt_cnt; + + /* Return all but the last packet to the pool. */ + cl_spinlock_acquire( &p_port->recv_lock ); + while( shortage-- > 1 ) + { + __buf_mgr_put_recv( p_port, + IPOIB_RECV_FROM_PACKET( p_port->recv_mgr.recv_pkt_array[shortage] ), + p_port->recv_mgr.recv_pkt_array[shortage] ); + } + cl_spinlock_release( &p_port->recv_lock ); + + /* + * Return the last packet as if NDIS returned it, so that we repost + * and report any other pending receives. + */ + ipoib_return_packet( NULL, p_port->recv_mgr.recv_pkt_array[0] ); + } + cl_spinlock_acquire( &p_port->recv_lock ); + + } while( pkt_cnt ); + cl_spinlock_release( &p_port->recv_lock ); + + /* + * Rearm after filtering to prevent contention on the enpoint maps + * and eliminate the possibility of having a call to + * __endpt_mgr_insert find a duplicate. + */ + cl_perf_start( RearmRecv ); + status = p_port->p_adapter->p_ifc->rearm_cq( + p_port->ib_mgr.h_recv_cq, FALSE ); + cl_perf_stop( &p_port->p_adapter->perf, RearmRecv ); + CL_ASSERT( status == IB_SUCCESS ); + + ipoib_port_deref( p_port, ref_recv_cb ); + + cl_perf_stop( &p_port->p_adapter->perf, RecvCb ); + + IPOIB_EXIT( IPOIB_DBG_RECV ); +} + + +static void +__recv_get_endpts( + IN ipoib_port_t* const p_port, + IN ipoib_recv_desc_t* const p_desc, + IN ib_wc_t* const p_wc, + OUT ipoib_endpt_t** const pp_src, + OUT ipoib_endpt_t** const pp_dst ) +{ + ib_api_status_t status; + mac_addr_t mac; + PERF_DECLARE( GetEndptByGid ); + PERF_DECLARE( GetEndptByLid ); + PERF_DECLARE( EndptInsert ); + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + /* Setup our shortcut pointers based on whether GRH is valid. */ + if( p_wc->recv.ud.recv_opt & IB_RECV_OPT_GRH_VALID ) + { + /* Lookup the source endpoints based on GID. */ + cl_perf_start( GetEndptByGid ); + *pp_src = +#if IPOIB_INLINE_RECV + __endpt_mgr_get_by_gid( p_port, &p_desc->buf.ib.grh.src_gid ); +#else /* IPOIB_INLINE_RECV */ + __endpt_mgr_get_by_gid( p_port, &p_desc->p_buf->ib.grh.src_gid ); +#endif /* IPOIB_INLINE_RECV */ + cl_perf_stop( &p_port->p_adapter->perf, GetEndptByGid ); + + /* + * Lookup the destination endpoint based on GID. + * This is used along with the packet filter to determine + * whether to report this to NDIS. + */ + cl_perf_start( GetEndptByGid ); + *pp_dst = +#if IPOIB_INLINE_RECV + __endpt_mgr_get_by_gid( p_port, &p_desc->buf.ib.grh.dest_gid ); +#else /* IPOIB_INLINE_RECV */ + __endpt_mgr_get_by_gid( p_port, &p_desc->p_buf->ib.grh.dest_gid ); +#endif /* IPOIB_INLINE_RECV */ + cl_perf_stop( &p_port->p_adapter->perf, GetEndptByGid ); + + /* + * Create the source endpoint if it does not exist. Note that we + * can only do this for globally routed traffic since we need the + * information from the GRH to generate the MAC. + */ + if( !*pp_src ) + { + status = ipoib_mac_from_guid( +#if IPOIB_INLINE_RECV + p_desc->buf.ib.grh.src_gid.unicast.interface_id, &mac ); +#else /* IPOIB_INLINE_RECV */ + p_desc->p_buf->ib.grh.src_gid.unicast.interface_id, &mac ); +#endif /* IPOIB_INLINE_RECV */ + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_mac_from_guid returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return; + } + + /* Create the endpoint. */ +#if IPOIB_INLINE_RECV + *pp_src = ipoib_endpt_create( &p_desc->buf.ib.grh.src_gid, +#else /* IPOIB_INLINE_RECV */ + *pp_src = ipoib_endpt_create( &p_desc->p_buf->ib.grh.src_gid, +#endif /* IPOIB_INLINE_RECV */ + 0, p_wc->recv.ud.remote_qp ); + if( !*pp_src ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_endpt_create failed\n") ); + return; + } + cl_perf_start( EndptInsert ); + cl_obj_lock( &p_port->obj ); + status = __endpt_mgr_insert( p_port, mac, *pp_src ); + if( status != IB_SUCCESS ) + { + cl_obj_unlock( &p_port->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__endpt_mgr_insert returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return; + } + cl_obj_unlock( &p_port->obj ); + cl_perf_stop( &p_port->p_adapter->perf, EndptInsert ); + } + } + else + { + /* + * Lookup the remote endpoint based on LID. Note that only + * unicast traffic can be LID routed. + */ + cl_perf_start( GetEndptByLid ); + *pp_src = __endpt_mgr_get_by_lid( p_port, p_wc->recv.ud.remote_lid ); + cl_perf_stop( &p_port->p_adapter->perf, GetEndptByLid ); + *pp_dst = p_port->p_local_endpt; + CL_ASSERT( *pp_dst ); + } + + if( *pp_src && !ipoib_is_voltaire_router_gid( &(*pp_src)->dgid ) && + (*pp_src)->qpn != p_wc->recv.ud.remote_qp ) + { + /* Update the QPN for the endpoint. */ + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_RECV, + ("Updating QPN for MAC: %02X-%02X-%02X-%02X-%02X-%02X\n", + (*pp_src )->mac.addr[0], (*pp_src )->mac.addr[1], + (*pp_src )->mac.addr[2], (*pp_src )->mac.addr[3], + (*pp_src )->mac.addr[4], (*pp_src )->mac.addr[5]) ); + (*pp_src)->qpn = p_wc->recv.ud.remote_qp; + } + + if( *pp_src && *pp_dst ) + { + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_RECV, + ("Recv:\n" + "\tsrc MAC: %02X-%02X-%02X-%02X-%02X-%02X\n" + "\tdst MAC: %02X-%02X-%02X-%02X-%02X-%02X\n", + (*pp_src )->mac.addr[0], (*pp_src )->mac.addr[1], + (*pp_src )->mac.addr[2], (*pp_src )->mac.addr[3], + (*pp_src )->mac.addr[4], (*pp_src )->mac.addr[5], + (*pp_dst )->mac.addr[0], (*pp_dst )->mac.addr[1], + (*pp_dst )->mac.addr[2], (*pp_dst )->mac.addr[3], + (*pp_dst )->mac.addr[4], (*pp_dst )->mac.addr[5]) ); + } + + IPOIB_EXIT( IPOIB_DBG_RECV ); +} + + +static int32_t +__recv_mgr_filter( + IN ipoib_port_t* const p_port, + IN ib_wc_t* const p_done_wc_list, + OUT cl_qlist_t* const p_done_list, + OUT cl_qlist_t* const p_bad_list ) +{ + ipoib_recv_desc_t *p_desc; + ib_wc_t *p_wc; + ipoib_pkt_t *p_ipoib; + eth_pkt_t *p_eth; + ipoib_endpt_t *p_src, *p_dst; + ib_api_status_t status; + uint32_t len; + int32_t recv_cnt = 0; + PERF_DECLARE( GetRecvEndpts ); + PERF_DECLARE( RecvGen ); + PERF_DECLARE( RecvTcp ); + PERF_DECLARE( RecvUdp ); + PERF_DECLARE( RecvDhcp ); + PERF_DECLARE( RecvArp ); + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + for( p_wc = p_done_wc_list; p_wc; p_wc = p_wc->p_next ) + { + CL_ASSERT( p_wc->wc_type == IB_WC_RECV ); + p_desc = (ipoib_recv_desc_t*)(uintn_t)p_wc->wr_id; + recv_cnt++; + + if( p_wc->status != IB_WCS_SUCCESS ) + { + if( p_wc->status != IB_WCS_WR_FLUSHED_ERR ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed completion %s (vendor specific %#x)\n", + p_port->p_adapter->p_ifc->get_wc_status_str( p_wc->status ), + (int)p_wc->vendor_specific) ); + ipoib_inc_recv_stat( p_port->p_adapter, IP_STAT_ERROR, 0 ); + } + else + { + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_RECV, + ("Flushed completion %s\n", + p_port->p_adapter->p_ifc->get_wc_status_str( p_wc->status )) ); + ipoib_inc_recv_stat( p_port->p_adapter, IP_STAT_DROPPED, 0 ); + } + cl_qlist_insert_tail( p_bad_list, &p_desc->item.list_item ); + /* Dereference the port object on behalf of the failed receive. */ + ipoib_port_deref( p_port, ref_failed_recv_wc ); + continue; + } + + len = p_wc->length - sizeof(ib_grh_t); + + if( len < sizeof(ipoib_hdr_t) ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Received ETH packet < min size\n") ); + ipoib_inc_recv_stat( p_port->p_adapter, IP_STAT_ERROR, 0 ); + cl_qlist_insert_tail( p_bad_list, &p_desc->item.list_item ); + ipoib_port_deref( p_port, ref_recv_inv_len ); + continue; + } + + if((len - sizeof(ipoib_hdr_t)) > p_port->p_adapter->params.payload_mtu) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Received ETH packet > payload MTU (%d)\n", + p_port->p_adapter->params.payload_mtu) ); + ipoib_inc_recv_stat( p_port->p_adapter, IP_STAT_ERROR, 0 ); + cl_qlist_insert_tail( p_bad_list, &p_desc->item.list_item ); + ipoib_port_deref( p_port, ref_recv_inv_len ); + continue; + + } + /* Successful completion. Get the receive information. */ + cl_perf_start( GetRecvEndpts ); + __recv_get_endpts( p_port, p_desc, p_wc, &p_src, &p_dst ); + cl_perf_stop( &p_port->p_adapter->perf, GetRecvEndpts ); + +#if IPOIB_INLINE_RECV + p_ipoib = &p_desc->buf.ib.pkt; + p_eth = &p_desc->buf.eth.pkt; +#else /* IPOIB_INLINE_RECV */ + p_ipoib = &p_desc->p_buf->ib.pkt; + p_eth = &p_desc->p_buf->eth.pkt; +#endif /*IPOIB_INLINE_RECV */ + + if( p_src ) + { + /* Don't report loopback traffic - we requested SW loopback. */ + if( !cl_memcmp( &p_port->p_adapter->params.conf_mac, + &p_src->mac, sizeof(p_port->p_adapter->params.conf_mac) ) ) + { + /* + * "This is not the packet you're looking for" - don't update + * receive statistics, the packet never happened. + */ + cl_qlist_insert_tail( p_bad_list, &p_desc->item.list_item ); + /* Dereference the port object on behalf of the failed recv. */ + ipoib_port_deref( p_port, ref_recv_loopback ); + continue; + } + } + + switch( p_ipoib->hdr.type ) + { + case ETH_PROT_TYPE_IP: + if( len < (sizeof(ipoib_hdr_t) + sizeof(ip_hdr_t)) ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Received IP packet < min size\n") ); + status = IB_INVALID_SETTING; + break; + } + + if( p_ipoib->type.ip.hdr.offset || + p_ipoib->type.ip.hdr.prot != IP_PROT_UDP ) + { + /* Unfiltered. Setup the ethernet header and report. */ + cl_perf_start( RecvTcp ); + status = __recv_gen( p_ipoib, p_eth, p_src, p_dst ); + cl_perf_stop( &p_port->p_adapter->perf, RecvTcp ); + break; + } + + /* First packet of a UDP transfer. */ + if( len < + (sizeof(ipoib_hdr_t) + sizeof(ip_hdr_t) + sizeof(udp_hdr_t)) ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Received UDP packet < min size\n") ); + status = IB_INVALID_SETTING; + break; + } + + /* Check if DHCP conversion is required. */ + if( (p_ipoib->type.ip.prot.udp.hdr.dst_port == DHCP_PORT_SERVER && + p_ipoib->type.ip.prot.udp.hdr.src_port == DHCP_PORT_CLIENT) || + (p_ipoib->type.ip.prot.udp.hdr.dst_port == DHCP_PORT_CLIENT && + p_ipoib->type.ip.prot.udp.hdr.src_port == DHCP_PORT_SERVER) ) + { + if( len < (sizeof(ipoib_hdr_t) + sizeof(ip_hdr_t) + + sizeof(udp_hdr_t) + DHCP_MIN_SIZE) ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Received DHCP < min size\n") ); + status = IB_INVALID_SETTING; + break; + } + /* UDP packet with BOOTP ports in src/dst port numbers. */ + cl_perf_start( RecvDhcp ); + status = __recv_dhcp( p_port, p_ipoib, p_eth, p_src, p_dst ); + cl_perf_stop( &p_port->p_adapter->perf, RecvDhcp ); + } + else + { + /* Unfiltered. Setup the ethernet header and report. */ + cl_perf_start( RecvUdp ); + status = __recv_gen( p_ipoib, p_eth, p_src, p_dst ); + cl_perf_stop( &p_port->p_adapter->perf, RecvUdp ); + } + break; + + case ETH_PROT_TYPE_ARP: + if( len < (sizeof(ipoib_hdr_t) + sizeof(ipoib_arp_pkt_t)) ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Received ARP < min size\n") ); + status = IB_INVALID_SETTING; + break; + } + cl_perf_start( RecvArp ); + status = __recv_arp( p_port, p_wc, p_ipoib, p_eth, &p_src, p_dst ); + cl_perf_stop( &p_port->p_adapter->perf, RecvArp ); + len = sizeof(ipoib_hdr_t) + sizeof(arp_pkt_t); + break; + + default: + /* Unfiltered. Setup the ethernet header and report. */ + cl_perf_start( RecvGen ); + status = __recv_gen( p_ipoib, p_eth, p_src, p_dst ); + cl_perf_stop( &p_port->p_adapter->perf, RecvGen ); + } + + if( status != IB_SUCCESS ) + { + /* Update stats. */ + ipoib_inc_recv_stat( p_port->p_adapter, IP_STAT_ERROR, 0 ); + cl_qlist_insert_tail( p_bad_list, &p_desc->item.list_item ); + /* Dereference the port object on behalf of the failed receive. */ + ipoib_port_deref( p_port, ref_recv_filter ); + } + else + { + p_desc->len = + len + sizeof(eth_hdr_t) - sizeof(ipoib_hdr_t); + if( p_dst->h_mcast) + { + if( p_dst->dgid.multicast.raw_group_id[10] == 0xFF && + p_dst->dgid.multicast.raw_group_id[11] == 0xFF && + p_dst->dgid.multicast.raw_group_id[12] == 0xFF && + p_dst->dgid.multicast.raw_group_id[13] == 0xFF ) + { + p_desc->type = PKT_TYPE_BCAST; + } + else + { + p_desc->type = PKT_TYPE_MCAST; + } + } + else + { + p_desc->type = PKT_TYPE_UCAST; + } + cl_qlist_insert_tail( p_done_list, &p_desc->item.list_item ); + } + } + + IPOIB_EXIT( IPOIB_DBG_RECV ); + return recv_cnt; +} + + +static ib_api_status_t +__recv_gen( + IN const ipoib_pkt_t* const p_ipoib, + OUT eth_pkt_t* const p_eth, + IN ipoib_endpt_t* const p_src, + IN ipoib_endpt_t* const p_dst ) +{ + IPOIB_ENTER( IPOIB_DBG_RECV ); + + if( !p_src || !p_dst ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Received packet with no matching endpoints.\n") ); + return IB_NOT_DONE; + } + + /* + * Fill in the ethernet header. Note that doing so will overwrite + * the IPoIB header, so start by moving the information from the IPoIB + * header. + */ + p_eth->hdr.type = p_ipoib->hdr.type; + p_eth->hdr.src = p_src->mac; + p_eth->hdr.dst = p_dst->mac; + + IPOIB_EXIT( IPOIB_DBG_RECV ); + return IB_SUCCESS; +} + + +static ib_api_status_t +__recv_dhcp( + IN ipoib_port_t* const p_port, + IN const ipoib_pkt_t* const p_ipoib, + OUT eth_pkt_t* const p_eth, + IN ipoib_endpt_t* const p_src, + IN ipoib_endpt_t* const p_dst ) +{ + ib_api_status_t status; + dhcp_pkt_t *p_dhcp; + uint8_t *p_option; + uint8_t *p_cid = NULL; + ib_gid_t gid; + uint8_t msg = 0; + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + UNUSED_PARAM( p_port ); + + /* Create the ethernet header. */ + status = __recv_gen( p_ipoib, p_eth, p_src, p_dst ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__recv_gen returned %s.\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Fixup the payload. */ + p_dhcp = &p_eth->type.ip.prot.udp.dhcp; + if( p_dhcp->op != DHCP_REQUEST && p_dhcp->op != DHCP_REPLY ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid DHCP op code.\n") ); + return IB_INVALID_SETTING; + } + + /* + * Find the client identifier option, making sure to skip + * the "magic cookie". + */ + p_option = &p_dhcp->options[0]; + if ( *(uint32_t *)p_option != DHCP_COOKIE ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("DHCP cookie corrupted.\n") ); + return IB_INVALID_PARAMETER; + } + + p_option = &p_dhcp->options[4]; + while( *p_option != DHCP_OPT_END ) + { + switch( *p_option ) + { + case DHCP_OPT_PAD: + p_option++; + break; + + case DHCP_OPT_MSG: + msg = p_option[2]; + p_option += 3; + break; + + case DHCP_OPT_CLIENT_ID: + p_cid = p_option; + /* Fall through. */ + + default: + /* + * All other options have a length byte following the option code. + * Offset by the length to get to the next option. + */ + p_option += (p_option[1] + 2); + } + } + + switch( msg ) + { + /* message from client */ + case DHCPDISCOVER: + case DHCPREQUEST: + case DHCPDECLINE: + case DHCPRELEASE: + case DHCPINFORM: + if( !p_cid ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to find required Client-identifier option.\n") ); + return IB_INVALID_SETTING; + } + if( p_dhcp->htype != DHCP_HW_TYPE_IB ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalid hardware address type.\n") ); + return IB_INVALID_SETTING; + } + break; + /* message from DHCP server */ + case DHCPOFFER: + case DHCPACK: + case DHCPNAK: + break; + + default: + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalide message type.\n") ); + return IB_INVALID_PARAMETER; + } + p_eth->type.ip.prot.udp.hdr.chksum = 0; + p_dhcp->htype = DHCP_HW_TYPE_ETH; + p_dhcp->hlen = HW_ADDR_LEN; + + if( p_cid ) /* from client */ + { + /* Validate that the length and type of the option is as required. */ + if( p_cid[1] != 21 ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Client-identifier length not 21 as required.\n") ); + return IB_INVALID_SETTING; + } + if( p_cid[2] != DHCP_HW_TYPE_IB ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Client-identifier type is wrong.\n") ); + return IB_INVALID_SETTING; + } + /* + * Copy the GID value from the option so that we can make aligned + * accesses to the contents. + * Recover CID to standard type. + */ + cl_memcpy( &gid, &p_cid[7], sizeof(ib_gid_t) ); + p_cid[1] = HW_ADDR_LEN +1;// CID length + p_cid[2] = DHCP_HW_TYPE_ETH;// CID type + status = ipoib_mac_from_guid( gid.unicast.interface_id, (mac_addr_t*)&p_cid[3] ); + p_cid[HW_ADDR_LEN + 3] = DHCP_OPT_END; //terminate tag + } + IPOIB_EXIT( IPOIB_DBG_RECV ); + return status; +} + + +static ib_api_status_t +__recv_arp( + IN ipoib_port_t* const p_port, + IN ib_wc_t* const p_wc, + IN const ipoib_pkt_t* const p_ipoib, + OUT eth_pkt_t* const p_eth, + IN ipoib_endpt_t** const pp_src, + IN ipoib_endpt_t* const p_dst ) +{ + ib_api_status_t status; + arp_pkt_t *p_arp; + const ipoib_arp_pkt_t *p_ib_arp; + ib_gid_t gid; + mac_addr_t mac; + ipoib_hw_addr_t null_hw = {0}; + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + if( !p_dst ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Unknown destination endpoint\n") ); + return IB_INVALID_SETTING; + } + + p_ib_arp = &p_ipoib->type.arp; + p_arp = &p_eth->type.arp; + + if( p_ib_arp->hw_type != ARP_HW_TYPE_IB ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ARP hardware type is not IB\n") ); + return IB_INVALID_SETTING; + } + + if( p_ib_arp->hw_size != sizeof(ipoib_hw_addr_t) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ARP hardware address size is not sizeof(ipoib_hw_addr_t)\n") ); + return IB_INVALID_SETTING; + } + + if( p_ib_arp->prot_type != ETH_PROT_TYPE_IP ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ARP protocal type not IP\n") ); + return IB_INVALID_SETTING; + } + + /* + * If we don't have a source, lookup the endpoint specified in the payload. + */ + if( !*pp_src ) + *pp_src = __endpt_mgr_get_by_gid( p_port, &p_ib_arp->src_hw.gid ); + + /* + * If the endpoint exists for the GID, make sure + * the dlid and qpn match the arp. + */ + if( *pp_src ) + { + if( cl_memcmp( &(*pp_src)->dgid, &p_ib_arp->src_hw.gid, + sizeof(ib_gid_t) ) ) + { + /* + * GIDs for the endpoint are different. The ARP must + * have been proxied. Dereference it. + */ + *pp_src = NULL; + } + else if( (*pp_src)->dlid && + (*pp_src)->dlid != p_wc->recv.ud.remote_lid ) + { + /* Out of date! Destroy the endpoint and replace it. */ + __endpt_mgr_remove( p_port, *pp_src ); + *pp_src = NULL; + } + else if( ipoib_is_voltaire_router_gid( &(*pp_src)->dgid ) ) + { + if( (*pp_src)->qpn != + (p_ib_arp->src_hw.flags_qpn & CL_HTON32(0x00FFFFFF)) && + p_wc->recv.ud.remote_qp != + (p_ib_arp->src_hw.flags_qpn & CL_HTON32(0x00FFFFFF)) ) + { + /* Out of date! Destroy the endpoint and replace it. */ + __endpt_mgr_remove( p_port, *pp_src ); + *pp_src = NULL; + } + } + else if( (*pp_src)->qpn != p_wc->recv.ud.remote_qp ) + { + /* Out of date! Destroy the endpoint and replace it. */ + __endpt_mgr_remove( p_port, *pp_src ); + *pp_src = NULL; + } + } + + /* Do we need to create an endpoint for this GID? */ + if( !*pp_src ) + { + /* Copy the src GID to allow aligned access */ + cl_memcpy( &gid, &p_ib_arp->src_hw.gid, sizeof(ib_gid_t) ); + status = ipoib_mac_from_guid( gid.unicast.interface_id, &mac ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_mac_from_guid returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + /* + * Create the endpoint. Note that the LID is left blank and will be + * resolved by a path query as needed. This is done because the + * remote LID/GID from the work completion may not be the original + * initiator. + */ + *pp_src = ipoib_endpt_create( &p_ib_arp->src_hw.gid, + 0, (p_ib_arp->src_hw.flags_qpn & CL_HTON32(0x00FFFFFF)) ); + + if( !*pp_src ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_endpt_create failed\n") ); + return status; + } + + cl_obj_lock( &p_port->obj ); + status = __endpt_mgr_insert( p_port, mac, *pp_src ); + if( status != IB_SUCCESS ) + { + cl_obj_unlock( &p_port->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__endpt_mgr_insert return %s \n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + cl_obj_unlock( &p_port->obj ); + } + + CL_ASSERT( !cl_memcmp( + &(*pp_src)->dgid, &p_ib_arp->src_hw.gid, sizeof(ib_gid_t) ) ); + CL_ASSERT( ipoib_is_voltaire_router_gid( &(*pp_src)->dgid ) || + (*pp_src)->qpn == + (p_ib_arp->src_hw.flags_qpn & CL_HTON32(0x00FFFFFF)) ); + /* Now swizzle the data. */ + p_arp->hw_type = ARP_HW_TYPE_ETH; + p_arp->hw_size = sizeof(mac_addr_t); + p_arp->src_hw = (*pp_src)->mac; + p_arp->src_ip = p_ib_arp->src_ip; + + if( cl_memcmp( &p_ib_arp->dst_hw, &null_hw, sizeof(ipoib_hw_addr_t) ) ) + { + if( cl_memcmp( &p_dst->dgid, &p_ib_arp->dst_hw.gid, sizeof(ib_gid_t) ) ) + { + /* + * We received bcast ARP packet that means + * remote port lets everyone know it was changed IP/MAC + * or just activated + */ + + /* Guy: TODO: Check why this check fails in case of Voltaire IPR */ + + if ( !ipoib_is_voltaire_router_gid( &(*pp_src)->dgid ) && + !ib_gid_is_multicast( (const ib_gid_t*)&p_dst->dgid ) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ARP: is not ARP MCAST\n") ); + return IB_INVALID_SETTING; + } + + p_arp->dst_hw = p_port->p_local_endpt->mac; + p_dst->mac = p_port->p_local_endpt->mac; + /* + * we don't care what receiver ip addr is, + * as long as OS' ARP table is global ??? + */ + p_arp->dst_ip = (net32_t)0; + } + else /* we've got reply to our ARP request */ + { + p_arp->dst_hw = p_dst->mac; + p_arp->dst_ip = p_ib_arp->dst_ip; + CL_ASSERT( p_dst->qpn == + (p_ib_arp->dst_hw.flags_qpn & CL_HTON32(0x00FFFFFF)) ); + } + } + else /* we got ARP reqeust */ + { + cl_memclr( &p_arp->dst_hw, sizeof(mac_addr_t) ); + p_arp->dst_ip = p_ib_arp->dst_ip; + } + + /* + * Create the ethernet header. Note that this is done last so that + * we have a chance to create a new endpoint. + */ + status = __recv_gen( p_ipoib, p_eth, *pp_src, p_dst ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__recv_gen returned %s.\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + IPOIB_EXIT( IPOIB_DBG_RECV ); + return IB_SUCCESS; +} + + +static ib_api_status_t +__recv_mgr_prepare_pkt( + IN ipoib_port_t* const p_port, + IN ipoib_recv_desc_t* const p_desc, + OUT NDIS_PACKET** const pp_packet ) +{ + NDIS_STATUS status; + uint32_t pkt_filter; + ip_stat_sel_t type; + NDIS_TCP_IP_CHECKSUM_PACKET_INFO chksum; + PERF_DECLARE( GetNdisPkt ); + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + pkt_filter = p_port->p_adapter->packet_filter; + /* Check the packet filter. */ + switch( p_desc->type ) + { + default: + case PKT_TYPE_UCAST: + if( pkt_filter & NDIS_PACKET_TYPE_PROMISCUOUS || + pkt_filter & NDIS_PACKET_TYPE_ALL_FUNCTIONAL || + pkt_filter & NDIS_PACKET_TYPE_SOURCE_ROUTING || + pkt_filter & NDIS_PACKET_TYPE_DIRECTED ) + { + /* OK to report. */ + type = IP_STAT_UCAST_BYTES; + status = NDIS_STATUS_SUCCESS; + } + else + { + type = IP_STAT_DROPPED; + status = NDIS_STATUS_FAILURE; + } + break; + case PKT_TYPE_BCAST: + if( pkt_filter & NDIS_PACKET_TYPE_PROMISCUOUS || + pkt_filter & NDIS_PACKET_TYPE_BROADCAST ) + { + /* OK to report. */ + type = IP_STAT_BCAST_BYTES; + status = NDIS_STATUS_SUCCESS; + } + else + { + type = IP_STAT_DROPPED; + status = NDIS_STATUS_FAILURE; + } + break; + case PKT_TYPE_MCAST: + if( pkt_filter & NDIS_PACKET_TYPE_PROMISCUOUS || + pkt_filter & NDIS_PACKET_TYPE_ALL_MULTICAST || + pkt_filter & NDIS_PACKET_TYPE_MULTICAST ) + { + /* OK to report. */ + type = IP_STAT_MCAST_BYTES; + status = NDIS_STATUS_SUCCESS; + } + else + { + type = IP_STAT_DROPPED; + status = NDIS_STATUS_FAILURE; + } + break; + } + + if( status != NDIS_STATUS_SUCCESS ) + { + ipoib_inc_recv_stat( p_port->p_adapter, type, 0 ); + /* Return the receive descriptor to the pool. */ + __buf_mgr_put_recv( p_port, p_desc, NULL ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_RECV, + ("Packet filter doesn't match receive. Dropping.\n") ); + /* + * Return IB_NOT_DONE since the packet has been completed, + * but has not consumed an array entry. + */ + return IB_NOT_DONE; + } + + cl_perf_start( GetNdisPkt ); + *pp_packet = __buf_mgr_get_ndis_pkt( p_port, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, GetNdisPkt ); + if( !*pp_packet ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__buf_mgr_get_ndis_pkt failed\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + + /* Flag the checksums as having been calculated. */ + chksum.Value = 0; + chksum.Receive.NdisPacketTcpChecksumSucceeded = TRUE; + chksum.Receive.NdisPacketUdpChecksumSucceeded = TRUE; + chksum.Receive.NdisPacketIpChecksumSucceeded = TRUE; + NDIS_PER_PACKET_INFO_FROM_PACKET( *pp_packet, TcpIpChecksumPacketInfo ) = + (void*)(uintn_t)chksum.Value; + + ipoib_inc_recv_stat( p_port->p_adapter, type, p_desc->len ); + + IPOIB_EXIT( IPOIB_DBG_RECV ); + return IB_SUCCESS; +} + + +static uint32_t +__recv_mgr_build_pkt_array( + IN ipoib_port_t* const p_port, + IN int32_t shortage, + OUT cl_qlist_t* const p_done_list, + OUT int32_t* const p_discarded ) +{ + cl_list_item_t *p_item; + ipoib_recv_desc_t *p_desc; + uint32_t i = 0; + ib_api_status_t status; + PERF_DECLARE( PreparePkt ); + + IPOIB_ENTER( IPOIB_DBG_RECV ); + + *p_discarded = 0; + + /* Move any existing receives to the head to preserve ordering. */ + cl_qlist_insert_list_head( p_done_list, &p_port->recv_mgr.done_list ); + p_item = cl_qlist_remove_head( p_done_list ); + while( p_item != cl_qlist_end( p_done_list ) ) + { + p_desc = (ipoib_recv_desc_t*)p_item; + + cl_perf_start( PreparePkt ); + status = __recv_mgr_prepare_pkt( p_port, p_desc, + &p_port->recv_mgr.recv_pkt_array[i] ); + cl_perf_stop( &p_port->p_adapter->perf, PreparePkt ); + if( status == IB_SUCCESS ) + { + CL_ASSERT( p_port->recv_mgr.recv_pkt_array[i] ); + if( shortage-- > 0 ) + { + NDIS_SET_PACKET_STATUS( + p_port->recv_mgr.recv_pkt_array[i], NDIS_STATUS_RESOURCES ); + } + else + { + NDIS_SET_PACKET_STATUS( + p_port->recv_mgr.recv_pkt_array[i], NDIS_STATUS_SUCCESS ); + } + i++; + } + else if( status == IB_NOT_DONE ) + { + (*p_discarded)++; + } + else + { + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_RECV, + ("__recv_mgr_prepare_pkt returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + /* Put all completed receives on the port's done list. */ + cl_qlist_insert_tail( &p_port->recv_mgr.done_list, p_item ); + cl_qlist_insert_list_tail( &p_port->recv_mgr.done_list, p_done_list ); + break; + } + + p_item = cl_qlist_remove_head( p_done_list ); + } + + IPOIB_EXIT( IPOIB_DBG_RECV ); + return i; +} + + + + +/****************************************************************************** +* +* Send manager implementation. +* +******************************************************************************/ +static void +__send_mgr_construct( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_SEND ); + p_port->send_mgr.depth = 0; + cl_qlist_init( &p_port->send_mgr.pending_list ); + IPOIB_EXIT( IPOIB_DBG_SEND ); +} + + +static void +__pending_list_destroy( + IN ipoib_port_t* const p_port ) +{ + cl_list_item_t *p_item; + NDIS_PACKET *p_packet; + + cl_spinlock_acquire( &p_port->send_lock ); + /* Complete any pending packets. */ + for( p_item = cl_qlist_remove_head( &p_port->send_mgr.pending_list ); + p_item != cl_qlist_end( &p_port->send_mgr.pending_list ); + p_item = cl_qlist_remove_head( &p_port->send_mgr.pending_list ) ) + { + p_packet = IPOIB_PACKET_FROM_LIST_ITEM( p_item ); + NdisMSendComplete( p_port->p_adapter->h_adapter, p_packet, + NDIS_STATUS_RESET_IN_PROGRESS ); + } + cl_spinlock_release( &p_port->send_lock ); +} + +static void +__send_mgr_destroy( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_SEND ); + __pending_list_destroy(p_port); + + IPOIB_EXIT( IPOIB_DBG_SEND ); +} + + +static NDIS_STATUS +__send_mgr_filter( + IN ipoib_port_t* const p_port, + IN const eth_hdr_t* const p_eth_hdr, + IN NDIS_BUFFER* const p_buf, + IN size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ) +{ + NDIS_STATUS status; + + PERF_DECLARE( FilterIp ); + PERF_DECLARE( FilterArp ); + PERF_DECLARE( SendGen ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + /* + * We already checked the ethernet header length, so we know it's safe + * to decrement the buf_len without underflowing. + */ + buf_len -= sizeof(eth_hdr_t); + + switch( p_eth_hdr->type ) + { + case ETH_PROT_TYPE_IP: + cl_perf_start( FilterIp ); + status = __send_mgr_filter_ip( + p_port, p_eth_hdr, p_buf, buf_len, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, FilterIp ); + break; + + case ETH_PROT_TYPE_ARP: + cl_perf_start( FilterArp ); + status = __send_mgr_filter_arp( + p_port, p_eth_hdr, p_buf, buf_len, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, FilterArp ); + break; + + default: + /* + * The IPoIB spec doesn't define how to send non IP or ARP packets. + * Just send the payload and hope for the best. + */ + cl_perf_start( SendGen ); + status = __send_gen( p_port, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, SendGen ); + break; + } + + IPOIB_EXIT( IPOIB_DBG_SEND ); + return status; +} + + +static NDIS_STATUS +__send_copy( + IN ipoib_port_t* const p_port, + IN ipoib_send_desc_t* const p_desc ) +{ + NDIS_PACKET *p_packet; + NDIS_BUFFER *p_buf; + NDIS_STATUS status; + UINT tot_len, bytes_copied = 0; + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + p_desc->p_buf = + ExAllocateFromNPagedLookasideList( &p_port->buf_mgr.send_buf_list ); + if( !p_desc->p_buf ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to allocate buffer for packet copy.\n") ); + return NDIS_STATUS_RESOURCES; + } + + NdisAllocatePacket( &status, &p_packet, p_port->buf_mgr.h_send_pkt_pool ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_WARNING, IPOIB_DBG_SEND, + ("Failed to allocate NDIS_PACKET for copy.\n") ); + return status; + } + + NdisAllocateBuffer( &status, &p_buf, p_port->buf_mgr.h_send_buf_pool, + p_desc->p_buf, p_port->p_adapter->params.xfer_block_size ); + if( status != NDIS_STATUS_SUCCESS ) + { + NdisFreePacket( p_packet ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_WARNING, IPOIB_DBG_SEND, + ("Failed to allocate NDIS_BUFFER for copy.\n") ); + return status; + } + + NdisChainBufferAtFront( p_packet, p_buf ); + + NdisQueryPacketLength( p_desc->p_pkt, &tot_len ); + + /* Setup the work request. */ + p_desc->local_ds[1].vaddr = cl_get_physaddr( + ((uint8_t*)p_desc->p_buf) + sizeof(eth_hdr_t) ); + p_desc->local_ds[1].length = tot_len - sizeof(eth_hdr_t); + p_desc->local_ds[1].lkey = p_port->ib_mgr.lkey; + p_desc->wr.num_ds = 2; + + /* Copy the packet. */ + NdisCopyFromPacketToPacketSafe( p_packet, bytes_copied, tot_len, + p_desc->p_pkt, bytes_copied, &bytes_copied, + NormalPagePriority ); + + /* Free our temp packet now that the data is copied. */ + NdisUnchainBufferAtFront( p_packet, &p_buf ); + NdisFreeBuffer( p_buf ); + NdisFreePacket( p_packet ); + + if( bytes_copied != tot_len ) + { + /* Something went wrong. Drop the packet. */ + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to copy full packet: %d of %d bytes copied.\n", + bytes_copied, tot_len) ); + return NDIS_STATUS_RESOURCES; + } + + IPOIB_EXIT( IPOIB_DBG_SEND ); + return NDIS_STATUS_SUCCESS; +} + + +#if !IPOIB_USE_DMA +/* Send using the MDL's page information rather than the SGL. */ +static ib_api_status_t +__send_gen( + IN ipoib_port_t* const p_port, + IN ipoib_send_desc_t* const p_desc ) +{ + uint32_t i, j = 1; + ULONG offset; + MDL *p_mdl; + UINT num_pages, tot_len; + ULONG buf_len; + PPFN_NUMBER page_array; + boolean_t hdr_done = FALSE; + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + NdisQueryPacket( p_desc->p_pkt, &num_pages, NULL, &p_mdl, + &tot_len ); + + if( !p_mdl ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("No buffers associated with packet.\n") ); + return IB_ERROR; + } + + /* Remember that one of the DS entries is reserved for the IPoIB header. */ + if( num_pages >= MAX_SEND_SGE ) + { + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_SEND, + ("Too many buffers to fit in WR ds_array. Copying data.\n") ); + status = __send_copy( p_port, p_desc ); + IPOIB_EXIT( IPOIB_DBG_SEND ); + return status; + } + + CL_ASSERT( tot_len > sizeof(eth_hdr_t) ); + CL_ASSERT( tot_len <= p_port->p_adapter->params.xfer_block_size ); + /* + * Assume that the ethernet header is always fully contained + * in the first page of the first MDL. This makes for much + * simpler code. + */ + offset = MmGetMdlByteOffset( p_mdl ) + sizeof(eth_hdr_t); + CL_ASSERT( offset <= PAGE_SIZE ); + + while( tot_len ) + { + buf_len = MmGetMdlByteCount( p_mdl ); + page_array = MmGetMdlPfnArray( p_mdl ); + CL_ASSERT( page_array ); + i = 0; + if( !hdr_done ) + { + CL_ASSERT( buf_len >= sizeof(eth_hdr_t) ); + /* Skip the ethernet header. */ + buf_len -= sizeof(eth_hdr_t); + CL_ASSERT( buf_len <= p_port->p_adapter->params.payload_mtu ); + if( buf_len ) + { + /* The ethernet header is a subset of this MDL. */ + CL_ASSERT( i == 0 ); + if( offset < PAGE_SIZE ) + { + p_desc->local_ds[j].lkey = p_port->ib_mgr.lkey; + p_desc->local_ds[j].vaddr = (page_array[i] << PAGE_SHIFT); + /* Add the byte offset since we're on the 1st page. */ + p_desc->local_ds[j].vaddr += offset; + if( offset + buf_len > PAGE_SIZE ) + { + p_desc->local_ds[j].length = PAGE_SIZE - offset; + buf_len -= p_desc->local_ds[j].length; + } + else + { + p_desc->local_ds[j].length = buf_len; + buf_len = 0; + } + /* This data segment is done. Move to the next. */ + j++; + } + /* This page is done. Move to the next. */ + i++; + } + /* Done handling the ethernet header. */ + hdr_done = TRUE; + } + + /* Finish this MDL */ + while( buf_len ) + { + p_desc->local_ds[j].lkey = p_port->ib_mgr.lkey; + p_desc->local_ds[j].vaddr = (page_array[i] << PAGE_SHIFT); + /* Add the first page's offset if we're on the first page. */ + if( i == 0 ) + p_desc->local_ds[j].vaddr += MmGetMdlByteOffset( p_mdl ); + + if( i == 0 && (MmGetMdlByteOffset( p_mdl ) + buf_len) > PAGE_SIZE ) + { + /* Buffers spans pages. */ + p_desc->local_ds[j].length = + PAGE_SIZE - MmGetMdlByteOffset( p_mdl ); + buf_len -= p_desc->local_ds[j].length; + /* This page is done. Move to the next. */ + i++; + } + else + { + /* Last page of the buffer. */ + p_desc->local_ds[j].length = buf_len; + buf_len = 0; + } + /* This data segment is done. Move to the next. */ + j++; + } + + tot_len -= MmGetMdlByteCount( p_mdl ); + if( !tot_len ) + break; + + NdisGetNextBuffer( p_mdl, &p_mdl ); + if( !p_mdl ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to get next buffer.\n") ); + return IB_ERROR; + } + } + + /* Set the number of data segments. */ + p_desc->wr.num_ds = j; + + IPOIB_EXIT( IPOIB_DBG_SEND ); + return IB_SUCCESS; +} + +#else + +static NDIS_STATUS +__send_gen( + IN ipoib_port_t* const p_port, + IN ipoib_send_desc_t* const p_desc ) +{ + ib_api_status_t status; + SCATTER_GATHER_LIST *p_sgl; + uint32_t i, j = 1; + uint32_t offset = sizeof(eth_hdr_t); + PERF_DECLARE( SendCopy ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + p_sgl = NDIS_PER_PACKET_INFO_FROM_PACKET( p_desc->p_pkt, + ScatterGatherListPacketInfo ); + if( !p_sgl ) + { + ASSERT( p_sgl ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to get SGL from packet.\n") ); + return NDIS_STATUS_FAILURE; + } + + /* Remember that one of the DS entries is reserved for the IPoIB header. */ + if( ( p_sgl->NumberOfElements >= MAX_SEND_SGE && + p_sgl->Elements[0].Length > sizeof(eth_hdr_t)) || + ( p_sgl->NumberOfElements > MAX_SEND_SGE && + p_sgl->Elements[0].Length <= sizeof(eth_hdr_t)) ) + { + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_SEND, + ("Too many buffers to fit in WR ds_array. Copying data.\n") ); + cl_perf_start( SendCopy ); + status = __send_copy( p_port, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, SendCopy ); + IPOIB_EXIT( IPOIB_DBG_SEND ); + return status; + } + + /* + * Skip the ethernet header. It is either the first element, + * or part of it. + */ + i = 0; + while( offset ) + { + if( p_sgl->Elements[i].Length <= sizeof(eth_hdr_t) ) + { + offset -= p_sgl->Elements[i++].Length; + } + else + { + p_desc->local_ds[j].vaddr = + p_sgl->Elements[i].Address.QuadPart + offset; + p_desc->local_ds[j].length = + p_sgl->Elements[i].Length - offset; + p_desc->local_ds[j].lkey = p_port->ib_mgr.lkey; + i++; + j++; + break; + } + } + /* Now fill in the rest of the local data segments. */ + while( i < p_sgl->NumberOfElements ) + { + p_desc->local_ds[j].vaddr = p_sgl->Elements[i].Address.QuadPart; + p_desc->local_ds[j].length = p_sgl->Elements[i].Length; + p_desc->local_ds[j].lkey = p_port->ib_mgr.lkey; + i++; + j++; + } + + /* Set the number of data segments. */ + p_desc->wr.num_ds = j; + + IPOIB_EXIT( IPOIB_DBG_SEND ); + return NDIS_STATUS_SUCCESS; +} +#endif + + +static NDIS_STATUS +__send_mgr_filter_ip( + IN ipoib_port_t* const p_port, + IN const eth_hdr_t* const p_eth_hdr, + IN NDIS_BUFFER* p_buf, + IN size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ) +{ + NDIS_STATUS status; + ip_hdr_t *p_ip_hdr; + + PERF_DECLARE( QueryIp ); + PERF_DECLARE( SendTcp ); + PERF_DECLARE( FilterUdp ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + if( !buf_len ) + { + cl_perf_start( QueryIp ); + NdisGetNextBuffer( p_buf, &p_buf ); + if( !p_buf ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to get IP header buffer.\n") ); + return NDIS_STATUS_FAILURE; + } + NdisQueryBufferSafe( p_buf, &p_ip_hdr, &buf_len, NormalPagePriority ); + if( !p_ip_hdr ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to query IP header buffer.\n") ); + return NDIS_STATUS_FAILURE; + } + cl_perf_stop( &p_port->p_adapter->perf, QueryIp ); + } + else + { + p_ip_hdr = (ip_hdr_t*)(p_eth_hdr + 1); + } + if( buf_len < sizeof(ip_hdr_t) ) + { + /* This buffer is done for. Get the next buffer. */ + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Buffer too small for IP packet.\n") ); + return NDIS_STATUS_BUFFER_TOO_SHORT; + } + + if( p_ip_hdr->offset || + p_ip_hdr->prot != IP_PROT_UDP ) + { + /* Not a UDP packet. */ + cl_perf_start( SendTcp ); + status = __send_gen( p_port, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, SendTcp ); + IPOIB_EXIT( IPOIB_DBG_SEND ); + return status; + } + + buf_len -= sizeof(ip_hdr_t); + + cl_perf_start( FilterUdp ); + status = __send_mgr_filter_udp( + p_port, p_ip_hdr, p_buf, buf_len, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, FilterUdp ); + + IPOIB_EXIT( IPOIB_DBG_SEND ); + return status; +} + + +static NDIS_STATUS +__send_mgr_filter_udp( + IN ipoib_port_t* const p_port, + IN const ip_hdr_t* const p_ip_hdr, + IN NDIS_BUFFER* p_buf, + IN size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ) +{ + ib_api_status_t status; + udp_hdr_t *p_udp_hdr; + PERF_DECLARE( QueryUdp ); + PERF_DECLARE( SendUdp ); + PERF_DECLARE( FilterDhcp ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + if( !buf_len ) + { + cl_perf_start( QueryUdp ); + NdisGetNextBuffer( p_buf, &p_buf ); + if( !p_buf ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to get UDP header buffer.\n") ); + return NDIS_STATUS_FAILURE; + } + NdisQueryBufferSafe( p_buf, &p_udp_hdr, &buf_len, NormalPagePriority ); + if( !p_udp_hdr ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to query UDP header buffer.\n") ); + return NDIS_STATUS_FAILURE; + } + cl_perf_stop( &p_port->p_adapter->perf, QueryUdp ); + } + else + { + p_udp_hdr = (udp_hdr_t*)(p_ip_hdr + 1); + } + /* Get the UDP header and check the destination port numbers. */ + if( buf_len < sizeof(udp_hdr_t) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Buffer not large enough for UDP packet.\n") ); + return NDIS_STATUS_BUFFER_TOO_SHORT; + } + + if( (p_udp_hdr->src_port != DHCP_PORT_CLIENT || + p_udp_hdr->dst_port != DHCP_PORT_SERVER) && + (p_udp_hdr->src_port != DHCP_PORT_SERVER || + p_udp_hdr->dst_port != DHCP_PORT_CLIENT) ) + { + /* Not a DHCP packet. */ + cl_perf_start( SendUdp ); + status = __send_gen( p_port, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, SendUdp ); + IPOIB_EXIT( IPOIB_DBG_SEND ); + return status; + } + + buf_len -= sizeof(udp_hdr_t); + + /* Allocate our scratch buffer. */ + p_desc->p_buf = (send_buf_t*) + ExAllocateFromNPagedLookasideList( &p_port->buf_mgr.send_buf_list ); + if( !p_desc->p_buf ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to query DHCP packet buffer.\n") ); + return NDIS_STATUS_RESOURCES; + } + /* Copy the IP and UDP headers. */ + cl_memcpy( &p_desc->p_buf->ip.hdr, p_ip_hdr , sizeof(ip_hdr_t) ); + cl_memcpy( + &p_desc->p_buf->ip.prot.udp.hdr, p_udp_hdr, sizeof(udp_hdr_t) ); + + cl_perf_start( FilterDhcp ); + status = __send_mgr_filter_dhcp( + p_port, p_udp_hdr, p_buf, buf_len, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, FilterDhcp ); + + IPOIB_EXIT( IPOIB_DBG_SEND ); + return status; +} + + +static NDIS_STATUS +__send_mgr_filter_dhcp( + IN ipoib_port_t* const p_port, + IN const udp_hdr_t* const p_udp_hdr, + IN NDIS_BUFFER* p_buf, + IN size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ) +{ + dhcp_pkt_t *p_dhcp; + dhcp_pkt_t *p_ib_dhcp; + uint8_t *p_option, *p_cid = NULL; + uint8_t msg = 0; + size_t len; + ib_gid_t gid; + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + if( !buf_len ) + { + NdisGetNextBuffer( p_buf, &p_buf ); + if( !p_buf ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to get DHCP buffer.\n") ); + return NDIS_STATUS_FAILURE; + } + NdisQueryBufferSafe( p_buf, &p_dhcp, &buf_len, NormalPagePriority ); + if( !p_dhcp ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to query DHCP buffer.\n") ); + return NDIS_STATUS_FAILURE; + } + } + else + { + p_dhcp = (dhcp_pkt_t*)(p_udp_hdr + 1); + } + + if( buf_len < DHCP_MIN_SIZE ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Buffer not large enough for DHCP packet.\n") ); + return NDIS_STATUS_BUFFER_TOO_SHORT; + } + + p_ib_dhcp = &p_desc->p_buf->ip.prot.udp.dhcp; + cl_memcpy( p_ib_dhcp, p_dhcp, buf_len ); + + /* Now scan through the options looking for the client identifier. */ + p_option = &p_ib_dhcp->options[4]; + while( *p_option != DHCP_OPT_END ) + { + switch( *p_option ) + { + case DHCP_OPT_PAD: + p_option++; + break; + + case DHCP_OPT_MSG: + msg = p_option[2]; + p_option += 3; + break; + + case DHCP_OPT_CLIENT_ID: + p_cid = p_option; + /* Fall through. */ + + default: + /* + * All other options have a length byte following the option code. + * Offset by the length to get to the next option. + */ + p_option += (p_option[1] + 2); + } + } + + switch( msg ) + { + /* Client messages */ + case DHCPDISCOVER: + case DHCPREQUEST: + p_ib_dhcp->flags |= DHCP_FLAGS_BROADCAST; + /* Fall through */ + case DHCPDECLINE: + case DHCPRELEASE: + case DHCPINFORM: + /* Fix up the client identifier option */ + if( p_cid ) + { + /* do we need to replace it ? len eq ETH MAC sz 'and' MAC is mine */ + if( p_cid[1] == HW_ADDR_LEN+1 && !cl_memcmp( &p_cid[3], + &p_port->p_adapter->params.conf_mac.addr, HW_ADDR_LEN ) ) + { + /* Make sure there's room to extend it. 23 is the size of + * the CID option for IPoIB. + */ + if( buf_len + 23 - p_cid[1] > sizeof(dhcp_pkt_t) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Can't convert CID to IPoIB format.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + /* Move the existing options down, and add a new CID option */ + len = p_option - ( p_cid + p_cid[1] + 2 ); + p_option = p_cid + p_cid[1] + 2; + RtlMoveMemory( p_cid, p_option, len ); + + p_cid += len; + p_cid[0] = DHCP_OPT_CLIENT_ID; + p_cid[1] = 21; + p_cid[2] = DHCP_HW_TYPE_IB; + } + else + { + p_cid[2] = DHCP_HW_TYPE_IB; + } + } + else + { + /* + * Make sure there's room to extend it. 23 is the size of + * the CID option for IPoIB. + */ + if( buf_len + 23 > sizeof(dhcp_pkt_t) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Can't convert CID to IPoIB format.\n") ); + return NDIS_STATUS_RESOURCES; + } + + p_cid = p_option; + p_option = p_cid + 23; + p_option[0] = DHCP_OPT_END; + p_cid[0] = DHCP_OPT_CLIENT_ID; + p_cid[1] = 21; + } + + CL_ASSERT( p_cid[1] == 21 ); + p_cid[23]= DHCP_OPT_END; + ib_gid_set_default( &gid, p_port->p_adapter->guids.port_guid ); + cl_memcpy( &p_cid[7], &gid, sizeof(ib_gid_t) ); + cl_memcpy( &p_cid[3], &p_port->ib_mgr.qpn, sizeof(p_port->ib_mgr.qpn) ); + /* Clear the hardware address. */ + p_ib_dhcp->htype = DHCP_HW_TYPE_IB; + p_ib_dhcp->hlen = 0; + cl_memclr( p_ib_dhcp->chaddr, sizeof(p_ib_dhcp->chaddr) ); + break; + + /* Server messages. */ + case DHCPOFFER: + case DHCPACK: + case DHCPNAK: + /* don't touch server messages */ + break; + + default: + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Invalide message type.\n") ); + return NDIS_STATUS_INVALID_DATA; + } + /* no chksum for udp */ + p_desc->p_buf->ip.prot.udp.hdr.chksum = 0; + p_desc->local_ds[1].vaddr = cl_get_physaddr( p_desc->p_buf ); + p_desc->local_ds[1].length = sizeof(ip_hdr_t) + sizeof(udp_hdr_t) + sizeof(dhcp_pkt_t); + p_desc->local_ds[1].lkey = p_port->ib_mgr.lkey; + p_desc->wr.num_ds = 2; + IPOIB_EXIT( IPOIB_DBG_SEND ); + return NDIS_STATUS_SUCCESS; +} + + +static NDIS_STATUS +__send_mgr_filter_arp( + IN ipoib_port_t* const p_port, + IN const eth_hdr_t* const p_eth_hdr, + IN NDIS_BUFFER* p_buf, + IN size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ) +{ + arp_pkt_t *p_arp; + ipoib_arp_pkt_t *p_ib_arp; + NDIS_STATUS status; + mac_addr_t null_hw = {0}; + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + if( !buf_len ) + { + NdisGetNextBuffer( p_buf, &p_buf ); + if( !p_buf ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to get ARP buffer.\n") ); + return NDIS_STATUS_FAILURE; + } + NdisQueryBufferSafe( p_buf, &p_arp, &buf_len, NormalPagePriority ); + if( !p_arp ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to get query ARP buffer.\n") ); + return NDIS_STATUS_FAILURE; + } + } + else + { + p_arp = (arp_pkt_t*)(p_eth_hdr + 1); + } + + /* Single buffer ARP packet. */ + if( buf_len < sizeof(arp_pkt_t) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Buffer too short for ARP.\n") ); + return NDIS_STATUS_BUFFER_TOO_SHORT; + } + + if( p_arp->prot_type != ETH_PROT_TYPE_IP ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Unsupported protocol type.\n") ); + return NDIS_STATUS_INVALID_DATA; + } + + /* Allocate our scratch buffer. */ + p_desc->p_buf = (send_buf_t*) + ExAllocateFromNPagedLookasideList( &p_port->buf_mgr.send_buf_list ); + if( !p_desc->p_buf ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to query ARP packet buffer.\n") ); + return NDIS_STATUS_RESOURCES; + } + p_ib_arp = (ipoib_arp_pkt_t*)p_desc->p_buf; + + /* Convert the ARP payload. */ + p_ib_arp->hw_type = ARP_HW_TYPE_IB; + p_ib_arp->prot_type = p_arp->prot_type; + p_ib_arp->hw_size = sizeof(ipoib_hw_addr_t); + p_ib_arp->prot_size = p_arp->prot_size; + p_ib_arp->op = p_arp->op; + p_ib_arp->src_hw.flags_qpn = p_port->ib_mgr.qpn; + ib_gid_set_default( &p_ib_arp->src_hw.gid, + p_port->p_adapter->guids.port_guid ); + p_ib_arp->src_ip = p_arp->src_ip; + if( cl_memcmp( &p_arp->dst_hw, &null_hw, sizeof(mac_addr_t) ) ) + { + /* Get the endpoint referenced by the dst_hw address. */ + status = __endpt_mgr_get_gid_qpn( p_port, p_arp->dst_hw, + &p_ib_arp->dst_hw.gid, &p_ib_arp->dst_hw.flags_qpn ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed lookup of destination HW address\n") ); + return status; + } + } + else + { + cl_memclr( &p_ib_arp->dst_hw, sizeof(ipoib_hw_addr_t) ); + } + p_ib_arp->dst_ip = p_arp->dst_ip; + + p_desc->local_ds[1].vaddr = cl_get_physaddr( p_ib_arp ); + p_desc->local_ds[1].length = sizeof(ipoib_arp_pkt_t); + p_desc->local_ds[1].lkey = p_port->ib_mgr.lkey; + p_desc->wr.num_ds = 2; + + IPOIB_EXIT( IPOIB_DBG_SEND ); + return NDIS_STATUS_SUCCESS; +} + + +static inline NDIS_STATUS +__send_mgr_get_eth_hdr( + IN NDIS_PACKET* const p_packet, + OUT NDIS_BUFFER** const pp_buf, + OUT eth_hdr_t** const pp_eth_hdr, + OUT UINT* p_buf_len ) +{ + UINT tot_len; + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + NdisGetFirstBufferFromPacketSafe( + p_packet, pp_buf, pp_eth_hdr, p_buf_len, &tot_len, NormalPagePriority ); + + if( !*pp_eth_hdr ) + { + /* Failed to get first buffer. */ + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("NdisMGetFirstBufferSafe failed.\n") ); + return NDIS_STATUS_FAILURE; + } + + if( *p_buf_len < sizeof(eth_hdr_t) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("First buffer in packet smaller than eth_hdr_t: %d.\n", + *p_buf_len) ); + return NDIS_STATUS_BUFFER_TOO_SHORT; + } + + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_SEND, + ("Ethernet header:\n" + "\tsrc MAC: %02X-%02X-%02X-%02X-%02X-%02X\n" + "\tdst MAC: %02X-%02X-%02X-%02X-%02X-%02X\n" + "\tprotocol type: %04X\n", + (*pp_eth_hdr)->src.addr[0], (*pp_eth_hdr)->src.addr[1], + (*pp_eth_hdr)->src.addr[2], (*pp_eth_hdr)->src.addr[3], + (*pp_eth_hdr)->src.addr[4], (*pp_eth_hdr)->src.addr[5], + (*pp_eth_hdr)->dst.addr[0], (*pp_eth_hdr)->dst.addr[1], + (*pp_eth_hdr)->dst.addr[2], (*pp_eth_hdr)->dst.addr[3], + (*pp_eth_hdr)->dst.addr[4], (*pp_eth_hdr)->dst.addr[5], + cl_ntoh16( (*pp_eth_hdr)->type )) ); + + return NDIS_STATUS_SUCCESS; +} + + +static inline NDIS_STATUS +__send_mgr_queue( + IN ipoib_port_t* const p_port, + IN eth_hdr_t* const p_eth_hdr, + OUT ipoib_endpt_t** const pp_endpt ) +{ + NDIS_STATUS status; + + PERF_DECLARE( GetEndpt ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + /* Check the send queue and pend the request if not empty. */ + if( cl_qlist_count( &p_port->send_mgr.pending_list ) ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_WARNING, IPOIB_DBG_SEND, + ("Pending list not empty.\n") ); + return NDIS_STATUS_PENDING; + } + + /* Check the send queue and pend the request if not empty. */ + if( p_port->send_mgr.depth == p_port->p_adapter->params.sq_depth ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_WARNING, IPOIB_DBG_SEND, + ("No available WQEs.\n") ); + return NDIS_STATUS_PENDING; + } + + cl_perf_start( GetEndpt ); + status = __endpt_mgr_ref( p_port, p_eth_hdr->dst, pp_endpt ); + cl_perf_stop( &p_port->p_adapter->perf, GetEndpt ); + + if( status == NDIS_STATUS_NO_ROUTE_TO_DESTINATION && + ETH_IS_MULTICAST( p_eth_hdr->dst.addr ) ) + { + if( ipoib_port_join_mcast( p_port, p_eth_hdr->dst, + IB_MC_REC_STATE_SEND_ONLY_MEMBER) == IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_SEND, + ("Multicast Mac - trying to join.\n") ); + return NDIS_STATUS_PENDING; + } + } + + IPOIB_EXIT( IPOIB_DBG_SEND ); + return status; +} + + +static NDIS_STATUS +__build_send_desc( + IN ipoib_port_t* const p_port, + IN eth_hdr_t* const p_eth_hdr, + IN NDIS_BUFFER* const p_buf, + IN const size_t buf_len, + IN OUT ipoib_send_desc_t* const p_desc ) +{ + NDIS_STATUS status; + int32_t hdr_idx; + + PERF_DECLARE( SendMgrFilter ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + /* Format the send descriptor. */ + cl_perf_start( SendMgrFilter ); + status = __send_mgr_filter( + p_port, p_eth_hdr, p_buf, buf_len, p_desc ); + cl_perf_stop( &p_port->p_adapter->perf, SendMgrFilter ); + if( status != NDIS_STATUS_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__send_mgr_filter returned 0x%08X.\n", status) ); + return status; + } + + /* Format the send descriptor. */ + hdr_idx = cl_atomic_inc( &p_port->hdr_idx ); + hdr_idx &= (p_port->p_adapter->params.sq_depth - 1); + ASSERT( hdr_idx < p_port->p_adapter->params.sq_depth ); + p_port->hdr[hdr_idx].type = p_eth_hdr->type; + p_port->hdr[hdr_idx].resv = 0; + + /* Setup the first local data segment (used for the IPoIB header). */ + p_desc->local_ds[0].vaddr = cl_get_physaddr( &p_port->hdr[hdr_idx] ); + p_desc->local_ds[0].length = sizeof(ipoib_hdr_t); + p_desc->local_ds[0].lkey = p_port->ib_mgr.lkey; + + /* Setup the work request. */ + p_desc->wr.p_next = NULL; + p_desc->wr.wr_id = (uintn_t)p_desc->p_pkt; + p_desc->wr.wr_type = WR_SEND; + p_desc->wr.send_opt = IB_SEND_OPT_SIGNALED; + p_desc->wr.ds_array = p_desc->local_ds; + + p_desc->wr.dgrm.ud.remote_qp = p_desc->p_endpt->qpn; + p_desc->wr.dgrm.ud.remote_qkey = p_port->ib_mgr.bcast_rec.qkey; + p_desc->wr.dgrm.ud.h_av = p_desc->p_endpt->h_av; + p_desc->wr.dgrm.ud.pkey_index = 0; + p_desc->wr.dgrm.ud.rsvd = NULL; + + /* Store context in our reserved area of the packet. */ + IPOIB_PORT_FROM_PACKET( p_desc->p_pkt ) = p_port; + IPOIB_ENDPT_FROM_PACKET( p_desc->p_pkt ) = p_desc->p_endpt; + IPOIB_SEND_FROM_PACKET( p_desc->p_pkt ) = p_desc->p_buf; + + IPOIB_EXIT( IPOIB_DBG_SEND ); + return NDIS_STATUS_SUCCESS; +} + + +static inline void +__process_failed_send( + IN ipoib_port_t* const p_port, + IN ipoib_send_desc_t* const p_desc, + IN const NDIS_STATUS status ) +{ + IPOIB_ENTER( IPOIB_DBG_SEND ); + + /* Complete the packet. */ + NdisMSendComplete( p_port->p_adapter->h_adapter, + p_desc->p_pkt, status ); + ipoib_inc_send_stat( p_port->p_adapter, IP_STAT_ERROR, 0 ); + /* Deref the endpoint. */ + if( p_desc->p_endpt ) + ipoib_endpt_deref( p_desc->p_endpt ); + + if( p_desc->p_buf ) + { + ExFreeToNPagedLookasideList( + &p_port->buf_mgr.send_buf_list, p_desc->p_buf ); + } + + IPOIB_EXIT( IPOIB_DBG_SEND ); +} + + +void +ipoib_port_send( + IN ipoib_port_t* const p_port, + IN NDIS_PACKET **p_packet_array, + IN uint32_t num_packets ) +{ + NDIS_STATUS status; + ib_api_status_t ib_status; + ipoib_send_desc_t desc; + uint32_t i; + eth_hdr_t *p_eth_hdr; + NDIS_BUFFER *p_buf; + UINT buf_len; + + PERF_DECLARE( GetEthHdr ); + PERF_DECLARE( BuildSendDesc ); + PERF_DECLARE( QueuePacket ); + PERF_DECLARE( SendMgrQueue ); + PERF_DECLARE( PostSend ); + PERF_DECLARE( ProcessFailedSends ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + + cl_obj_lock( &p_port->obj ); + if( p_port->state != IB_QPS_RTS ) + { + cl_obj_unlock( &p_port->obj ); + for( i = 0; i < num_packets; ++i ) + { + ipoib_inc_send_stat( p_port->p_adapter, IP_STAT_DROPPED, 0 ); + /* Complete the packet. */ + NdisMSendComplete( p_port->p_adapter->h_adapter, + p_packet_array[i], NDIS_STATUS_ADAPTER_NOT_READY ); + + } + + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_SEND, + ("Invalid state - Aborting.\n") ); + return; + } + cl_obj_unlock( &p_port->obj ); + + + cl_spinlock_acquire( &p_port->send_lock ); + for( i = 0; i < num_packets; i++ ) + { + desc.p_pkt = p_packet_array[i]; + desc.p_endpt = NULL; + desc.p_buf = NULL; + + /* Get the ethernet header so we can find the endpoint. */ + cl_perf_start( GetEthHdr ); + status = __send_mgr_get_eth_hdr( + p_packet_array[i], &p_buf, &p_eth_hdr, &buf_len ); + cl_perf_stop( &p_port->p_adapter->perf, GetEthHdr ); + if( status != NDIS_STATUS_SUCCESS ) + { + cl_perf_start( ProcessFailedSends ); + __process_failed_send( p_port, &desc, status ); + cl_perf_stop( &p_port->p_adapter->perf, ProcessFailedSends ); + continue; + } + + cl_perf_start( SendMgrQueue ); + status = __send_mgr_queue( p_port, p_eth_hdr, &desc.p_endpt ); + cl_perf_stop( &p_port->p_adapter->perf, SendMgrQueue ); + if( status == NDIS_STATUS_PENDING ) + { + /* Queue all remaining packets. */ + cl_perf_start( QueuePacket ); + while( i < num_packets ) + { + cl_qlist_insert_tail( &p_port->send_mgr.pending_list, + IPOIB_LIST_ITEM_FROM_PACKET( p_packet_array[i++] ) ); + } + cl_perf_stop( &p_port->p_adapter->perf, QueuePacket ); + break; + } + if( status != NDIS_STATUS_SUCCESS ) + { + ASSERT( status == NDIS_STATUS_NO_ROUTE_TO_DESTINATION ); + /* + * Complete the send as if we sent it - WHQL tests don't like the + * sends to fail. + */ + cl_perf_start( ProcessFailedSends ); + __process_failed_send( p_port, &desc, NDIS_STATUS_SUCCESS ); + cl_perf_stop( &p_port->p_adapter->perf, ProcessFailedSends ); + continue; + } + + cl_perf_start( BuildSendDesc ); + status = __build_send_desc( p_port, p_eth_hdr, p_buf, buf_len, &desc ); + cl_perf_stop( &p_port->p_adapter->perf, BuildSendDesc ); + if( status != NDIS_STATUS_SUCCESS ) + { + cl_perf_start( ProcessFailedSends ); + __process_failed_send( p_port, &desc, status ); + cl_perf_stop( &p_port->p_adapter->perf, ProcessFailedSends ); + continue; + } + + /* Post the WR. */ + cl_perf_start( PostSend ); + ib_status = p_port->p_adapter->p_ifc->post_send( p_port->ib_mgr.h_qp, &desc.wr, NULL ); + cl_perf_stop( &p_port->p_adapter->perf, PostSend ); + if( ib_status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_post_send returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( ib_status )) ); + cl_perf_start( ProcessFailedSends ); + __process_failed_send( p_port, &desc, NDIS_STATUS_FAILURE ); + cl_perf_stop( &p_port->p_adapter->perf, ProcessFailedSends ); + /* Flag the adapter as hung since posting is busted. */ + p_port->p_adapter->hung = TRUE; + continue; + } + + cl_atomic_inc( &p_port->send_mgr.depth ); + } + cl_spinlock_release( &p_port->send_lock ); + + IPOIB_EXIT( IPOIB_DBG_SEND ); +} + + +void +ipoib_port_resume( + IN ipoib_port_t* const p_port ) +{ + NDIS_STATUS status; + ib_api_status_t ib_status; + cl_list_item_t *p_item; + ipoib_send_desc_t desc; + eth_hdr_t *p_eth_hdr; + NDIS_BUFFER *p_buf; + UINT buf_len; + + PERF_DECLARE( GetEndpt ); + PERF_DECLARE( BuildSendDesc ); + PERF_DECLARE( ProcessFailedSends ); + PERF_DECLARE( PostSend ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + + cl_obj_lock( &p_port->obj ); + if( p_port->state != IB_QPS_RTS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_WARNING, IPOIB_DBG_SEND, + ("Invalid state - Aborting.\n") ); + cl_obj_unlock( &p_port->obj ); + return; + } + cl_obj_unlock( &p_port->obj ); + + cl_spinlock_acquire( &p_port->send_lock ); + + for( p_item = cl_qlist_head( &p_port->send_mgr.pending_list ); + p_item != cl_qlist_end( &p_port->send_mgr.pending_list ); + p_item = cl_qlist_head( &p_port->send_mgr.pending_list ) ) + { + /* Check the send queue and pend the request if not empty. */ + if( p_port->send_mgr.depth == p_port->p_adapter->params.sq_depth ) + { + IPOIB_PRINT( TRACE_LEVEL_WARNING, IPOIB_DBG_SEND, + ("No available WQEs.\n") ); + break; + } + + desc.p_pkt = IPOIB_PACKET_FROM_LIST_ITEM( + cl_qlist_remove_head( &p_port->send_mgr.pending_list ) ); + desc.p_endpt = NULL; + desc.p_buf = NULL; + + /* Get the ethernet header so we can find the endpoint. */ + status = __send_mgr_get_eth_hdr( + desc.p_pkt, &p_buf, &p_eth_hdr, &buf_len ); + if( status != NDIS_STATUS_SUCCESS ) + { + cl_perf_start( ProcessFailedSends ); + __process_failed_send( p_port, &desc, status ); + cl_perf_stop( &p_port->p_adapter->perf, ProcessFailedSends ); + continue; + } + + cl_perf_start( GetEndpt ); + status = __endpt_mgr_ref( p_port, p_eth_hdr->dst, &desc.p_endpt ); + cl_perf_stop( &p_port->p_adapter->perf, GetEndpt ); + if( status == NDIS_STATUS_PENDING ) + { + cl_qlist_insert_head( &p_port->send_mgr.pending_list, + IPOIB_LIST_ITEM_FROM_PACKET( desc.p_pkt ) ); + break; + } + else if( status != NDIS_STATUS_SUCCESS ) + { + ASSERT( status == NDIS_STATUS_NO_ROUTE_TO_DESTINATION ); + + if( ETH_IS_MULTICAST( p_eth_hdr->dst.addr ) ) + { + if( ipoib_port_join_mcast( p_port, p_eth_hdr->dst, + IB_MC_REC_STATE_SEND_ONLY_MEMBER) == IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_SEND, + ("Multicast Mac - trying to join.\n") ); + cl_qlist_insert_head( &p_port->send_mgr.pending_list, + IPOIB_LIST_ITEM_FROM_PACKET( desc.p_pkt ) ); + break; + } + } + + /* + * Complete the send as if we sent it - WHQL tests don't like the + * sends to fail. + */ + cl_perf_start( ProcessFailedSends ); + __process_failed_send( p_port, &desc, NDIS_STATUS_SUCCESS ); + cl_perf_stop( &p_port->p_adapter->perf, ProcessFailedSends ); + continue; + } + + cl_perf_start( BuildSendDesc ); + status = __build_send_desc( p_port, p_eth_hdr, p_buf, buf_len, &desc ); + cl_perf_stop( &p_port->p_adapter->perf, BuildSendDesc ); + if( status != NDIS_STATUS_SUCCESS ) + { + cl_perf_start( ProcessFailedSends ); + __process_failed_send( p_port, &desc, status ); + cl_perf_stop( &p_port->p_adapter->perf, ProcessFailedSends ); + continue; + } + + /* Post the WR. */ + cl_perf_start( PostSend ); + ib_status = p_port->p_adapter->p_ifc->post_send( p_port->ib_mgr.h_qp, &desc.wr, NULL ); + cl_perf_stop( &p_port->p_adapter->perf, PostSend ); + if( ib_status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_post_send returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( ib_status )) ); + cl_perf_start( ProcessFailedSends ); + __process_failed_send( p_port, &desc, NDIS_STATUS_FAILURE ); + cl_perf_stop( &p_port->p_adapter->perf, ProcessFailedSends ); + /* Flag the adapter as hung since posting is busted. */ + p_port->p_adapter->hung = TRUE; + continue; + } + + cl_atomic_inc( &p_port->send_mgr.depth ); + } + cl_spinlock_release( &p_port->send_lock ); + + IPOIB_EXIT( IPOIB_DBG_SEND ); +} + + +static void +__send_cb( + IN const ib_cq_handle_t h_cq, + IN void *cq_context ) +{ + ipoib_port_t *p_port; + ib_api_status_t status; + ib_wc_t wc[MAX_SEND_WC], *p_wc, *p_free; + cl_qlist_t done_list; + NDIS_PACKET *p_packet; + uint32_t length; + ipoib_endpt_t *p_endpt; + send_buf_t *p_send_buf; + ip_stat_sel_t type; + size_t i; + PERF_DECLARE( SendCompBundle ); + PERF_DECLARE( SendCb ); + PERF_DECLARE( PollSend ); + PERF_DECLARE( SendComp ); + PERF_DECLARE( FreeSendBuf ); + PERF_DECLARE( RearmSend ); + PERF_DECLARE( PortResume ); + + IPOIB_ENTER( IPOIB_DBG_SEND ); + + cl_perf_clr( SendCompBundle ); + + cl_perf_start( SendCb ); + + UNUSED_PARAM( h_cq ); + + cl_qlist_init( &done_list ); + + p_port = (ipoib_port_t*)cq_context; + + ipoib_port_ref( p_port, ref_send_cb ); + + for( i = 0; i < MAX_SEND_WC; i++ ) + wc[i].p_next = &wc[i + 1]; + wc[MAX_SEND_WC - 1].p_next = NULL; + + do + { + p_free = wc; + cl_perf_start( PollSend ); + status = p_port->p_adapter->p_ifc->poll_cq( p_port->ib_mgr.h_send_cq, &p_free, &p_wc ); + cl_perf_stop( &p_port->p_adapter->perf, PollSend ); + CL_ASSERT( status == IB_SUCCESS || status == IB_NOT_FOUND ); + + while( p_wc ) + { + cl_perf_start( SendComp ); + CL_ASSERT( p_wc->wc_type == IB_WC_SEND ); + p_packet = (NDIS_PACKET*)(uintn_t)p_wc->wr_id; + CL_ASSERT( p_packet ); + CL_ASSERT( IPOIB_PORT_FROM_PACKET( p_packet ) == p_port ); + + p_endpt = IPOIB_ENDPT_FROM_PACKET( p_packet ); + p_send_buf = IPOIB_SEND_FROM_PACKET( p_packet ); + switch( p_wc->status ) + { + case IB_WCS_SUCCESS: + if( p_endpt->h_mcast ) + { + if( p_endpt->dgid.multicast.raw_group_id[11] == 0xFF && + p_endpt->dgid.multicast.raw_group_id[10] == 0xFF && + p_endpt->dgid.multicast.raw_group_id[12] == 0xFF && + p_endpt->dgid.multicast.raw_group_id[13] == 0xFF ) + { + type = IP_STAT_BCAST_BYTES; + } + else + { + type = IP_STAT_MCAST_BYTES; + } + } + else + { + type = IP_STAT_UCAST_BYTES; + } + NdisQueryPacketLength( p_packet, &length ); + ipoib_inc_send_stat( p_port->p_adapter, type, length ); + NdisMSendComplete( p_port->p_adapter->h_adapter, + p_packet, NDIS_STATUS_SUCCESS ); + break; + + case IB_WCS_WR_FLUSHED_ERR: + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_SEND, + ("Flushed send completion.\n") ); + ipoib_inc_send_stat( p_port->p_adapter, IP_STAT_DROPPED, 0 ); + NdisMSendComplete( p_port->p_adapter->h_adapter, + p_packet, NDIS_STATUS_RESET_IN_PROGRESS ); + break; + + default: + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Send failed with %s (vendor specific %#x)\n", + p_port->p_adapter->p_ifc->get_wc_status_str( p_wc->status ), + (int)p_wc->vendor_specific) ); + ipoib_inc_send_stat( p_port->p_adapter, IP_STAT_ERROR, 0 ); + NdisMSendComplete( p_port->p_adapter->h_adapter, + p_packet, NDIS_STATUS_FAILURE ); + break; + } + cl_perf_stop( &p_port->p_adapter->perf, SendComp ); + /* Dereference the enpoint used for the transfer. */ + ipoib_endpt_deref( p_endpt ); + + if( p_send_buf ) + { + cl_perf_start( FreeSendBuf ); + ExFreeToNPagedLookasideList( &p_port->buf_mgr.send_buf_list, + p_send_buf ); + cl_perf_stop( &p_port->p_adapter->perf, FreeSendBuf ); + } + + cl_atomic_dec( &p_port->send_mgr.depth ); + + p_wc = p_wc->p_next; + cl_perf_inc( SendCompBundle ); + } + /* If we didn't use up every WC, break out. */ + } while( !p_free ); + + /* Rearm the CQ. */ + cl_perf_start( RearmSend ); + status = p_port->p_adapter->p_ifc->rearm_cq( p_port->ib_mgr.h_send_cq, FALSE ); + cl_perf_stop( &p_port->p_adapter->perf, RearmSend ); + CL_ASSERT( status == IB_SUCCESS ); + + /* Resume any sends awaiting resources. */ + cl_perf_start( PortResume ); + ipoib_port_resume( p_port ); + cl_perf_stop( &p_port->p_adapter->perf, PortResume ); + + ipoib_port_deref( p_port, ref_send_cb ); + + cl_perf_stop( &p_port->p_adapter->perf, SendCb ); + cl_perf_update_ctr( &p_port->p_adapter->perf, SendCompBundle ); + + IPOIB_EXIT( IPOIB_DBG_SEND ); +} + + +/****************************************************************************** +* +* Endpoint manager implementation +* +******************************************************************************/ +static void +__endpt_mgr_construct( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + cl_qmap_init( &p_port->endpt_mgr.mac_endpts ); + cl_qmap_init( &p_port->endpt_mgr.lid_endpts ); + cl_fmap_init( &p_port->endpt_mgr.gid_endpts, __gid_cmp ); + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static ib_api_status_t +__endpt_mgr_init( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + UNUSED_PARAM( p_port ); + IPOIB_EXIT( IPOIB_DBG_INIT ); + return IB_SUCCESS; +} + + +static void +__endpt_mgr_destroy( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_INIT ); + CL_ASSERT( cl_is_qmap_empty( &p_port->endpt_mgr.mac_endpts ) ); + CL_ASSERT( cl_is_qmap_empty( &p_port->endpt_mgr.lid_endpts ) ); + CL_ASSERT( cl_is_fmap_empty( &p_port->endpt_mgr.gid_endpts ) ); + UNUSED_PARAM( p_port ); + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static void +__endpt_mgr_remove_all( + IN ipoib_port_t* const p_port ) +{ + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + cl_obj_lock( &p_port->obj ); + /* Wait for all readers to complete. */ + while( p_port->endpt_rdr ) + ; + /* + * We don't need to initiate destruction - this is called only + * from the __port_destroying function, and destruction cascades + * to all child objects. Just clear all the maps. + */ + cl_qmap_remove_all( &p_port->endpt_mgr.mac_endpts ); + cl_qmap_remove_all( &p_port->endpt_mgr.lid_endpts ); + cl_fmap_remove_all( &p_port->endpt_mgr.gid_endpts ); + cl_obj_unlock( &p_port->obj ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); +} + + +static void +__endpt_mgr_reset_all( + IN ipoib_port_t* const p_port ) +{ + cl_map_item_t *p_item; + ipoib_endpt_t *p_endpt; + cl_qlist_t mc_list; + uint32_t local_exist = 0; + + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + cl_qlist_init( &mc_list ); + + cl_obj_lock( &p_port->obj ); + /* Wait for all readers to complete. */ + while( p_port->endpt_rdr ) + ; + + if( p_port->p_local_endpt ) + { + cl_qmap_remove_item( &p_port->endpt_mgr.mac_endpts, + &p_port->p_local_endpt->mac_item ); + cl_fmap_remove_item( &p_port->endpt_mgr.gid_endpts, + &p_port->p_local_endpt->gid_item ); + cl_qmap_remove_item( &p_port->endpt_mgr.lid_endpts, + &p_port->p_local_endpt->lid_item ); + + cl_qlist_insert_head( + &mc_list, &p_port->p_local_endpt->mac_item.pool_item.list_item ); + local_exist = 1; + + p_port->p_local_endpt = NULL; + } + + p_item = cl_qmap_head( &p_port->endpt_mgr.mac_endpts ); + while( p_item != cl_qmap_end( &p_port->endpt_mgr.mac_endpts ) ) + { + p_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, mac_item ); + p_item = cl_qmap_next( p_item ); + if( p_endpt->h_mcast ) + { + /* + * We destroy MC endpoints since they will get recreated + * when the port comes back up and we rejoin the MC groups. + */ + cl_qmap_remove_item( &p_port->endpt_mgr.mac_endpts, + &p_endpt->mac_item ); + cl_fmap_remove_item( &p_port->endpt_mgr.gid_endpts, + &p_endpt->gid_item ); + + cl_qlist_insert_tail( + &mc_list, &p_endpt->mac_item.pool_item.list_item ); + } + else if( p_endpt->h_av ) + { + /* Destroy the AV for all other endpoints. */ + p_port->p_adapter->p_ifc->destroy_av( p_endpt->h_av ); + p_endpt->h_av = NULL; + } + + if( p_endpt->dlid ) + { + cl_qmap_remove_item( &p_port->endpt_mgr.lid_endpts, + &p_endpt->lid_item ); + p_endpt->dlid = 0; + } + + } + + cl_obj_unlock( &p_port->obj ); + + + if(cl_qlist_count( &mc_list ) - local_exist) + { + p_port->mcast_cnt = (uint32_t)cl_qlist_count( &mc_list ) - local_exist; + } + else + { + p_port->mcast_cnt = 0; + KeSetEvent( &p_port->leave_mcast_event, EVENT_INCREMENT, FALSE ); + } + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_ENDPT,("p_port->mcast_cnt = %d\n", p_port->mcast_cnt - local_exist)); + + /* Destroy all multicast endpoints now that we have released the lock. */ + while( cl_qlist_count( &mc_list ) ) + { + cl_obj_destroy( &PARENT_STRUCT( cl_qlist_remove_head( &mc_list ), + ipoib_endpt_t, mac_item.pool_item.list_item )->obj ); + } + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); +} + + +/* + * Called when updating an endpoint entry in response to an ARP. + * Because receive processing is serialized, and holds a reference + * on the endpoint reader, we wait for all *other* readers to exit before + * removing the item. + */ +static void +__endpt_mgr_remove( + IN ipoib_port_t* const p_port, + IN ipoib_endpt_t* const p_endpt ) +{ + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + cl_obj_lock( &p_port->obj ); + /* Wait for all readers to complete. */ + while( p_port->endpt_rdr > 1 ) + ; + + /* Remove the endpoint from the maps so further requests don't find it. */ + cl_qmap_remove_item( &p_port->endpt_mgr.mac_endpts, &p_endpt->mac_item ); + /* + * The enpoints are *ALWAYS* in both the MAC and GID maps. They are only + * in the LID map if the GID has the same subnet prefix as us. + */ + cl_fmap_remove_item( &p_port->endpt_mgr.gid_endpts, &p_endpt->gid_item ); + + if( p_endpt->dlid ) + { + cl_qmap_remove_item( &p_port->endpt_mgr.lid_endpts, + &p_endpt->lid_item ); + } + + cl_obj_unlock( &p_port->obj ); + + cl_obj_destroy( &p_endpt->obj ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); +} + + +NTSTATUS +ipoib_mac_to_gid( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + OUT ib_gid_t* p_gid ) +{ + ipoib_endpt_t* p_endpt; + cl_map_item_t *p_item; + uint64_t key = 0; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + cl_memcpy( &key, &mac, sizeof(mac_addr_t) ); + + cl_obj_lock( &p_port->obj ); + + p_item = cl_qmap_get( &p_port->endpt_mgr.mac_endpts, key ); + if( p_item == cl_qmap_end( &p_port->endpt_mgr.mac_endpts ) ) + { + cl_obj_unlock( &p_port->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed endpoint lookup.\n") ); + return STATUS_INVALID_PARAMETER; + } + + p_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, mac_item ); + *p_gid = p_endpt->dgid; + + cl_obj_unlock( &p_port->obj ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return STATUS_SUCCESS; +} + + +static inline NDIS_STATUS +__endpt_mgr_ref( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + OUT ipoib_endpt_t** const pp_endpt ) +{ + NDIS_STATUS status; + cl_map_item_t *p_item; + uint64_t key; + + PERF_DECLARE( EndptQueue ); + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + if( !cl_memcmp( &mac, &p_port->p_adapter->params.conf_mac, sizeof(mac) ) ) + { + /* Discard loopback traffic. */ + IPOIB_PRINT(TRACE_LEVEL_WARNING, IPOIB_DBG_ENDPT, + ("Discarding loopback traffic\n") ); + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return NDIS_STATUS_NO_ROUTE_TO_DESTINATION; + } + + key = 0; + cl_memcpy( &key, &mac, sizeof(mac_addr_t) ); + + cl_obj_lock( &p_port->obj ); + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_ENDPT, + ("Look for :\t MAC: %02X-%02X-%02X-%02X-%02X-%02X\n", + mac.addr[0], mac.addr[1], mac.addr[2], + mac.addr[3], mac.addr[4], mac.addr[5]) ); + + p_item = cl_qmap_get( &p_port->endpt_mgr.mac_endpts, key ); + if( p_item == cl_qmap_end( &p_port->endpt_mgr.mac_endpts ) ) + { + cl_obj_unlock( &p_port->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_ENDPT, + ("Failed endpoint lookup.\n") ); + return NDIS_STATUS_NO_ROUTE_TO_DESTINATION; + } + + *pp_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, mac_item ); + ipoib_endpt_ref( *pp_endpt ); + + cl_obj_unlock( &p_port->obj ); + + cl_perf_start( EndptQueue ); + status = ipoib_endpt_queue( *pp_endpt ); + cl_perf_stop( &p_port->p_adapter->perf, EndptQueue ); + if( status != NDIS_STATUS_SUCCESS ) + *pp_endpt = NULL; + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return status; +} + + +static inline NDIS_STATUS +__endpt_mgr_get_gid_qpn( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + OUT ib_gid_t* const p_gid, + OUT UNALIGNED net32_t* const p_qpn ) +{ + UNALIGNED + cl_map_item_t *p_item; + ipoib_endpt_t *p_endpt; + uint64_t key; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + cl_obj_lock( &p_port->obj ); + + key = 0; + cl_memcpy( &key, &mac, sizeof(mac_addr_t) ); + p_item = cl_qmap_get( &p_port->endpt_mgr.mac_endpts, key ); + if( p_item == cl_qmap_end( &p_port->endpt_mgr.mac_endpts ) ) + { + cl_obj_unlock( &p_port->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_ENDPT, + ("Failed endpoint lookup.\n") ); + return NDIS_STATUS_FAILURE; + } + + p_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, mac_item ); + + *p_gid = p_endpt->dgid; + *p_qpn = p_endpt->qpn; + + cl_obj_unlock( &p_port->obj ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return NDIS_STATUS_SUCCESS; +} + + +static inline ipoib_endpt_t* +__endpt_mgr_get_by_gid( + IN ipoib_port_t* const p_port, + IN const ib_gid_t* const p_gid ) +{ + cl_fmap_item_t *p_item; + ipoib_endpt_t *p_endpt; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + p_item = cl_fmap_get( &p_port->endpt_mgr.gid_endpts, p_gid ); + if( p_item == cl_fmap_end( &p_port->endpt_mgr.gid_endpts ) ) + p_endpt = NULL; + else + p_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, gid_item ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return p_endpt; +} + + +static ipoib_endpt_t* +__endpt_mgr_get_by_lid( + IN ipoib_port_t* const p_port, + IN const net16_t lid ) +{ + cl_map_item_t *p_item; + ipoib_endpt_t *p_endpt; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + p_item = cl_qmap_get( &p_port->endpt_mgr.lid_endpts, lid ); + if( p_item == cl_qmap_end( &p_port->endpt_mgr.lid_endpts ) ) + p_endpt = NULL; + else + p_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, lid_item ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return p_endpt; +} + + +inline ib_api_status_t +__endpt_mgr_insert_locked( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + IN ipoib_endpt_t* const p_endpt ) +{ + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_ENDPT, + ("insert :\t MAC: %02X-%02X-%02X-%02X-%02X-%02X\n", + mac.addr[0], mac.addr[1], mac.addr[2], + mac.addr[3], mac.addr[4], mac.addr[5]) ); + + cl_obj_lock( &p_port->obj ); + while( p_port->endpt_rdr ) + { + cl_obj_unlock( &p_port->obj ); + cl_obj_lock( &p_port->obj ); + } + /* __endpt_mgr_insert expects *one* reference to be held when being called. */ + cl_atomic_inc( &p_port->endpt_rdr ); + status= __endpt_mgr_insert( p_port, mac, p_endpt ); + cl_atomic_dec( &p_port->endpt_rdr ); + cl_obj_unlock( &p_port->obj ); + + return status; +} + + +inline ib_api_status_t +__endpt_mgr_insert( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + IN ipoib_endpt_t* const p_endpt ) +{ + uint64_t key; + cl_status_t cl_status; + cl_map_item_t *p_qitem; + cl_fmap_item_t *p_fitem; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + /* Wait for all accesses to the map to complete. */ + while( p_port->endpt_rdr > 1 ) + ; + + /* Link the endpoint to the port. */ + cl_status = cl_obj_insert_rel_parent_locked( + &p_endpt->rel, &p_port->obj, &p_endpt->obj ); + + if( cl_status != CL_SUCCESS ) + { + cl_obj_destroy( &p_endpt->obj ); + return IB_INVALID_STATE; + } + +#if DBG + cl_atomic_inc( &p_port->ref[ref_endpt_track] ); + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_OBJ, + ("ref type %d ref_cnt %d\n", ref_endpt_track, p_port->obj.ref_cnt) ); +#endif + + p_endpt->mac = mac; + key = 0; + cl_memcpy( &key, &mac, sizeof(mac_addr_t) ); + p_qitem = cl_qmap_insert( + &p_port->endpt_mgr.mac_endpts, key, &p_endpt->mac_item ); + CL_ASSERT( p_qitem == &p_endpt->mac_item ); + p_fitem = cl_fmap_insert( + &p_port->endpt_mgr.gid_endpts, &p_endpt->dgid, &p_endpt->gid_item ); + CL_ASSERT( p_fitem == &p_endpt->gid_item ); + if( p_endpt->dlid ) + { + p_qitem = cl_qmap_insert( + &p_port->endpt_mgr.lid_endpts, p_endpt->dlid, &p_endpt->lid_item ); + CL_ASSERT( p_qitem == &p_endpt->lid_item ); + } + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); + return IB_SUCCESS; +} + + +static ib_api_status_t +__endpt_mgr_add_bcast( + IN ipoib_port_t* const p_port, + IN ib_mcast_rec_t *p_mcast_rec ) +{ + ib_api_status_t status; + ipoib_endpt_t *p_endpt; + mac_addr_t bcast_mac; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + /* + * Cache the broadcast group properties for creating future mcast groups. + */ + p_port->ib_mgr.bcast_rec = *p_mcast_rec->p_member_rec; + + /* Allocate the broadcast endpoint. */ + p_endpt = ipoib_endpt_create( &p_mcast_rec->p_member_rec->mgid, + p_mcast_rec->p_member_rec->mlid, CL_HTON32(0x00FFFFFF) ); + if( !p_endpt ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_endpt_create failed.\n") ); + return IB_INSUFFICIENT_RESOURCES; + } + /* set reference to transport to be used while is not attached to the port */ + p_endpt->p_ifc = p_port->p_adapter->p_ifc; + status = ipoib_endpt_set_mcast( p_endpt, p_port->ib_mgr.h_pd, + p_port->port_num, p_mcast_rec ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_create_mcast_endpt returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Add the broadcast endpoint to the endpoint map. */ + cl_memset( &bcast_mac, 0xFF, sizeof(bcast_mac) ); + status = __endpt_mgr_insert_locked( p_port, bcast_mac, p_endpt ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +void +ipoib_port_remove_endpt( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac ) +{ + cl_map_item_t *p_item; + ipoib_endpt_t *p_endpt; + uint64_t key; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + key = 0; + cl_memcpy( &key, &mac, sizeof(mac_addr_t) ); + + /* Remove the endpoint from the maps so further requests don't find it. */ + cl_obj_lock( &p_port->obj ); + /* Wait for all readers to finish */ + while( p_port->endpt_rdr ) + ; + p_item = cl_qmap_remove( &p_port->endpt_mgr.mac_endpts, key ); + /* + * Dereference the endpoint. If the ref count goes to zero, it + * will get freed. + */ + if( p_item != cl_qmap_end( &p_port->endpt_mgr.mac_endpts ) ) + { + p_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, mac_item ); + /* + * The enpoints are *ALWAYS* in both the MAC and GID maps. They are only + * in the LID map if the GID has the same subnet prefix as us. + */ + cl_fmap_remove_item( + &p_port->endpt_mgr.gid_endpts, &p_endpt->gid_item ); + + if( p_endpt->dlid ) + { + cl_qmap_remove_item( + &p_port->endpt_mgr.lid_endpts, &p_endpt->lid_item ); + } + + cl_obj_unlock( &p_port->obj ); + cl_obj_destroy( &p_endpt->obj ); +#if DBG + cl_atomic_dec( &p_port->ref[ref_endpt_track] ); + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_OBJ, + ("ref type %d ref_cnt %d\n", ref_endpt_track, p_port->obj.ref_cnt) ); +#endif + + } + else + { + cl_obj_unlock( &p_port->obj ); + } + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); +} + + +void +ipoib_port_flush_endpts( + IN ipoib_port_t* const p_port ) +{ + cl_map_item_t *p_item; + ipoib_endpt_t *p_endpt; + + IPOIB_ENTER( IPOIB_DBG_ENDPT ); + + cl_obj_lock( &p_port->obj ); + p_item = cl_qmap_head( &p_port->endpt_mgr.mac_endpts ); + while( p_item != cl_qmap_end( &p_port->endpt_mgr.mac_endpts ) ) + { + p_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, mac_item ); + p_item = cl_qmap_next( p_item ); + + /* + * If the endpoint has been marked as expired before, and we have + * an AV handle, free the AV. + */ + if( p_endpt->expired && p_endpt->h_av ) + { + CL_ASSERT( p_endpt->obj.ref_cnt == 1 ); + p_port->p_adapter->p_ifc->destroy_av( p_endpt->h_av ); + p_endpt->h_av = NULL; + p_endpt->expired = FALSE; + } + + /* + * If the endpoint is not in use, mark it as expired. + * Note that the ref count is only zero when the endpoint gets + * destroyed, so an endpoint that is not in use has a ref count of 1. + * Also note that we never expire any multicast endpoints. + */ + CL_ASSERT( p_endpt->obj.ref_cnt != 0 ); + if( p_endpt->obj.ref_cnt == 1 && p_endpt->h_av && !p_endpt->h_mcast ) + p_endpt->expired = TRUE; + } + cl_obj_unlock( &p_port->obj ); + + IPOIB_EXIT( IPOIB_DBG_ENDPT ); +} + + +/* + * The sequence for port up is as follows: + * 1. The port goes active. This allows the adapter to send SA queries + * and join the broadcast group (and other groups). + * + * 2. The adapter sends an SA query for the broadcast group. + * + * 3. Upon completion of the query, the adapter joins the broadcast group. + */ + + +/* + * Query the SA for the broadcast group. + */ +void +ipoib_port_up( + IN ipoib_port_t* const p_port, + IN const ib_pnp_port_rec_t* const p_pnp_rec ) +{ + ib_api_status_t status; + ib_query_req_t query; + ib_user_query_t info; + ib_portinfo_record_t port_rec; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + /* Wait for all work requests to get flushed. */ + while( p_port->recv_mgr.depth || p_port->send_mgr.depth ) + cl_thread_suspend( 0 ); + + cl_obj_lock( &p_port->obj ); + p_port->state = IB_QPS_INIT; + KeResetEvent( &p_port->sa_event ); + cl_obj_unlock( &p_port->obj ); + + info.method = IB_MAD_METHOD_GET; + info.attr_id = IB_MAD_ATTR_PORTINFO_RECORD; + info.attr_size = sizeof(ib_portinfo_record_t); + info.comp_mask = IB_PIR_COMPMASK_BASELID; + info.p_attr = &port_rec; + + /* Query requires only the base LID. */ + cl_memclr( &port_rec, sizeof(ib_portinfo_record_t) ); + port_rec.port_info.base_lid = p_pnp_rec->p_port_attr->lid; + + cl_memclr( &query, sizeof(ib_query_req_t) ); + query.query_type = IB_QUERY_USER_DEFINED; + query.p_query_input = &info; + query.port_guid = p_port->p_adapter->guids.port_guid; + query.timeout_ms = p_port->p_adapter->params.sa_timeout; + query.retry_cnt = p_port->p_adapter->params.sa_retry_cnt; + query.query_context = p_port; + query.pfn_query_cb = __port_info_cb; + + /* reference the object for the multicast query. */ + ipoib_port_ref( p_port, ref_port_up ); + + status = p_port->p_adapter->p_ifc->query( + p_port->p_adapter->h_al, &query, &p_port->ib_mgr.h_query ); + if( status != IB_SUCCESS ) + { + KeSetEvent( &p_port->sa_event, EVENT_INCREMENT, FALSE ); + ipoib_set_inactive( p_port->p_adapter ); + ipoib_port_deref( p_port, ref_port_up ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_query returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return; + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static ib_api_status_t +__endpt_mgr_add_local( + IN ipoib_port_t* const p_port, + IN ib_port_info_t* const p_port_info ) +{ + ib_api_status_t status; + ib_gid_t gid; + ipoib_endpt_t *p_endpt; + ib_av_attr_t av_attr; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + ib_gid_set_default( &gid, p_port->p_adapter->guids.port_guid ); + p_endpt = ipoib_endpt_create( + &gid, p_port_info->base_lid, p_port->ib_mgr.qpn ); + if( !p_endpt ) + { + p_port->p_adapter->hung = TRUE; + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Failed to create local endpt\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + cl_memclr( &av_attr, sizeof(ib_av_attr_t) ); + av_attr.port_num = p_port->port_num; + av_attr.sl = 0; + av_attr.dlid = p_port_info->base_lid; + av_attr.static_rate = ib_port_info_compute_rate( p_port_info ); + av_attr.path_bits = 0; + status = p_port->p_adapter->p_ifc->create_av( + p_port->ib_mgr.h_pd, &av_attr, &p_endpt->h_av ); + if( status != IB_SUCCESS ) + { + cl_obj_destroy( &p_endpt->obj ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_create_av for local endpoint returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* __endpt_mgr_insert expects *one* reference to be held. */ + cl_atomic_inc( &p_port->endpt_rdr ); + status = __endpt_mgr_insert( p_port, p_port->p_adapter->params.conf_mac, p_endpt ); + cl_atomic_dec( &p_port->endpt_rdr ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__endpt_mgr_insert for local endpoint returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + p_port->p_local_endpt = p_endpt; + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +static void +__port_info_cb( + IN ib_query_rec_t *p_query_rec ) +{ + ib_api_status_t status; + ipoib_port_t *p_port; + ib_portinfo_record_t *p_port_rec; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_port = (ipoib_port_t* __ptr64)p_query_rec->query_context; + + cl_obj_lock( &p_port->obj ); + p_port->ib_mgr.h_query = NULL; + + if( p_port->state != IB_QPS_INIT ) + { + status = IB_CANCELED; + goto done; + } + + status = p_query_rec->status; + + switch( status ) + { + case IB_SUCCESS: + /* Note that the we report the rate from the port info. */ + p_port_rec = (ib_portinfo_record_t*) + ib_get_query_result( p_query_rec->p_result_mad, 0 ); + + status = __endpt_mgr_add_local( p_port, &p_port_rec->port_info ); + if( status == IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Received port info: link width = %d.\n", + p_port_rec->port_info.link_width_active) ); + + p_port->ib_mgr.rate = + ib_port_info_compute_rate( &p_port_rec->port_info ); + + ipoib_set_rate( p_port->p_adapter, + p_port_rec->port_info.link_width_active, + ib_port_info_get_link_speed_active( &p_port_rec->port_info ) ); + + status = __port_get_bcast( p_port ); + } + else + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__endpt_mgr_add_local returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + } + break; + + case IB_CANCELED: + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Instance destroying - Aborting.\n") ); + break; + + case IB_TIMEOUT: + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_PORT_INFO_TIMEOUT, 0 ); + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Port info query timed out.\n") ); + break; + + case IB_REMOTE_ERROR: + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_PORT_INFO_REJECT, 0 ); + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Port info query rejected by SA.\n") ); + break; + + default: + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_QUERY_PORT_INFO, 1, p_query_rec->status ); + /* Hopefully we'll get an SM change event that will restart things. */ + IPOIB_PRINT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Port info query failed.\n") ); + } + +done: + cl_obj_unlock( &p_port->obj ); + + if( status != IB_SUCCESS ) + { + if( status != IB_CANCELED ) + { + ipoib_set_inactive( p_port->p_adapter ); + __endpt_mgr_reset_all( p_port ); + } + KeSetEvent( &p_port->sa_event, EVENT_INCREMENT, FALSE ); + } + + /* Return the response MAD to AL. */ + if( p_query_rec->p_result_mad ) + p_port->p_adapter->p_ifc->put_mad( p_query_rec->p_result_mad ); + + /* Release the reference taken when issuing the port info query. */ + ipoib_port_deref( p_port, ref_port_info_cb ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static ib_api_status_t +__port_get_bcast( + IN ipoib_port_t* const p_port ) +{ + ib_api_status_t status; + ib_query_req_t query; + ib_user_query_t info; + ib_member_rec_t member_rec; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + info.method = IB_MAD_METHOD_GETTABLE; + info.attr_id = IB_MAD_ATTR_MCMEMBER_RECORD; + info.attr_size = sizeof(ib_member_rec_t); + info.comp_mask = IB_MCR_COMPMASK_MGID; + info.p_attr = &member_rec; + + /* Query requires only the MGID. */ + cl_memclr( &member_rec, sizeof(ib_member_rec_t) ); + member_rec.mgid = bcast_mgid_template; + + cl_memclr( &query, sizeof(ib_query_req_t) ); + query.query_type = IB_QUERY_USER_DEFINED; + query.p_query_input = &info; + query.port_guid = p_port->p_adapter->guids.port_guid; + query.timeout_ms = p_port->p_adapter->params.sa_timeout; + query.retry_cnt = p_port->p_adapter->params.sa_retry_cnt; + query.query_context = p_port; + query.pfn_query_cb = __bcast_get_cb; + + /* reference the object for the multicast query. */ + ipoib_port_ref( p_port, ref_get_bcast ); + + status = p_port->p_adapter->p_ifc->query( + p_port->p_adapter->h_al, &query, &p_port->ib_mgr.h_query ); + if( status != IB_SUCCESS ) + { + ipoib_port_deref( p_port, ref_get_bcast ); + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_query returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +/* Callback for the MCMemberRecord Get query for the IPv4 broadcast group. */ +static void +__bcast_get_cb( + IN ib_query_rec_t *p_query_rec ) +{ + ipoib_port_t *p_port; + ib_member_rec_t *p_mc_req; + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_port = (ipoib_port_t* __ptr64)p_query_rec->query_context; + + cl_obj_lock( &p_port->obj ); + p_port->ib_mgr.h_query = NULL; + if( p_port->state != IB_QPS_INIT ) + { + status = IB_CANCELED; + goto done; + } + + status = p_query_rec->status; + + switch( status ) + { + case IB_SUCCESS: + if( p_query_rec->result_cnt ) + { + p_mc_req = (ib_member_rec_t*) + ib_get_query_result( p_query_rec->p_result_mad, 0 ); + + /* Join the broadcast group. */ + status = __port_join_bcast( p_port, p_mc_req ); + break; + } + /* Fall through. */ + + case IB_REMOTE_ERROR: + /* SA failed the query. Broadcast group doesn't exist, create it. */ + status = __port_create_bcast( p_port ); + break; + + case IB_CANCELED: + IPOIB_PRINT(TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Instance destroying - Aborting.\n") ); + break; + + default: + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_BCAST_GET, 1, p_query_rec->status ); + } + +done: + cl_obj_unlock( &p_port->obj ); + + if( status != IB_SUCCESS ) + { + if( status != IB_CANCELED ) + { + ipoib_set_inactive( p_port->p_adapter ); + __endpt_mgr_reset_all( p_port ); + } + KeSetEvent( &p_port->sa_event, EVENT_INCREMENT, FALSE ); + } + + /* Return the response MAD to AL. */ + if( p_query_rec->p_result_mad ) + p_port->p_adapter->p_ifc->put_mad( p_query_rec->p_result_mad ); + + /* Release the reference taken when issuing the member record query. */ + ipoib_port_deref( p_port, ref_bcast_get_cb ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static ib_api_status_t +__port_join_bcast( + IN ipoib_port_t* const p_port, + IN ib_member_rec_t* const p_member_rec ) +{ + ib_api_status_t status; + ib_mcast_req_t mcast_req; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + /* Check that the rate is realizable for our port. */ + if( p_port->ib_mgr.rate < (p_member_rec->rate & 0x3F) && + (g_ipoib.bypass_check_bcast_rate == 0)) + { + /* + * The MC group rate is higher than our port's rate. Log an error + * and stop. A port transition will drive the retry. + */ + IPOIB_PRINT(TRACE_LEVEL_WARNING, IPOIB_DBG_INIT, + ("Unrealizable join due to rate mismatch.\n") ); + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_BCAST_RATE, 2, + (uint32_t)(p_member_rec->rate & 0x3F), + (uint32_t)p_port->ib_mgr.rate ); + return IB_ERROR; + } + + /* Join the broadcast group. */ + cl_memclr( &mcast_req, sizeof(mcast_req) ); + /* Copy the results of the Get to use as parameters. */ + mcast_req.member_rec = *p_member_rec; + /* We specify our port GID for the join operation. */ + mcast_req.member_rec.port_gid.unicast.prefix = IB_DEFAULT_SUBNET_PREFIX; + mcast_req.member_rec.port_gid.unicast.interface_id = + p_port->p_adapter->guids.port_guid; + + mcast_req.mcast_context = p_port; + mcast_req.pfn_mcast_cb = __bcast_cb; + mcast_req.timeout_ms = p_port->p_adapter->params.sa_timeout; + mcast_req.retry_cnt = p_port->p_adapter->params.sa_retry_cnt; + mcast_req.port_guid = p_port->p_adapter->guids.port_guid; + mcast_req.pkey_index = 0; + + if( ib_member_get_state( mcast_req.member_rec.scope_state ) != + IB_MC_REC_STATE_FULL_MEMBER ) + { + IPOIB_PRINT(TRACE_LEVEL_WARNING, IPOIB_DBG_INIT, + ("Incorrect MC member rec join state in query response.\n") ); + ib_member_set_state( &mcast_req.member_rec.scope_state, + IB_MC_REC_STATE_FULL_MEMBER ); + } + + /* reference the object for the multicast join request. */ + ipoib_port_ref( p_port, ref_join_bcast ); + + status = p_port->p_adapter->p_ifc->join_mcast( + p_port->ib_mgr.h_qp, &mcast_req ); + if( status != IB_SUCCESS ) + { + ipoib_port_deref( p_port, ref_bcast_join_failed ); + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_join_mcast returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + } + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +static ib_api_status_t +__port_create_bcast( + IN ipoib_port_t* const p_port ) +{ + ib_api_status_t status; + ib_mcast_req_t mcast_req; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + /* Join the broadcast group. */ + cl_memclr( &mcast_req, sizeof(mcast_req) ); + mcast_req.create = TRUE; + /* + * Create requires pkey, qkey, SL, flow label, traffic class, joing state + * and port GID. + * + * We specify the MGID since we don't want the SA to generate it for us. + */ + mcast_req.member_rec.mgid = bcast_mgid_template; + ib_gid_set_default( &mcast_req.member_rec.port_gid, + p_port->p_adapter->guids.port_guid ); + /* + * IPOIB spec requires that the QKEY have the MSb set so that the QKEY + * from the QP is used rather than the QKEY in the send WR. + */ + mcast_req.member_rec.qkey = + (uint32_t)(uintn_t)p_port | IB_QP_PRIVILEGED_Q_KEY; + mcast_req.member_rec.mtu = + (IB_PATH_SELECTOR_EXACTLY << 6) | IB_MTU_LEN_2048; + + mcast_req.member_rec.pkey = IB_DEFAULT_PKEY; + + mcast_req.member_rec.sl_flow_hop = ib_member_set_sl_flow_hop( 0, 0, 0 ); + mcast_req.member_rec.scope_state = + ib_member_set_scope_state( 2, IB_MC_REC_STATE_FULL_MEMBER ); + + mcast_req.mcast_context = p_port; + mcast_req.pfn_mcast_cb = __bcast_cb; + mcast_req.timeout_ms = p_port->p_adapter->params.sa_timeout; + mcast_req.retry_cnt = p_port->p_adapter->params.sa_retry_cnt; + mcast_req.port_guid = p_port->p_adapter->guids.port_guid; + mcast_req.pkey_index = 0; + + /* reference the object for the multicast join request. */ + ipoib_port_ref( p_port, ref_join_bcast ); + + status = p_port->p_adapter->p_ifc->join_mcast( p_port->ib_mgr.h_qp, &mcast_req ); + if( status != IB_SUCCESS ) + { + ipoib_port_deref( p_port, ref_bcast_create_failed ); + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_join_mcast returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + } + IPOIB_EXIT( IPOIB_DBG_INIT ); + return status; +} + + +void +ipoib_port_down( + IN ipoib_port_t* const p_port ) +{ + ib_api_status_t status; + ib_qp_mod_t qp_mod; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + /* + * Mark our state. This causes all callbacks to abort. + * Note that we hold the receive lock so that we synchronize + * with reposting. We must take the receive lock before the + * object lock since that is the order taken when reposting. + */ + cl_spinlock_acquire( &p_port->recv_lock ); + cl_obj_lock( &p_port->obj ); + p_port->state = IB_QPS_ERROR; + + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_PORT_DOWN, 0 ); + + if( p_port->ib_mgr.h_query ) + { + p_port->p_adapter->p_ifc->cancel_query( + p_port->p_adapter->h_al, p_port->ib_mgr.h_query ); + p_port->ib_mgr.h_query = NULL; + } + cl_obj_unlock( &p_port->obj ); + cl_spinlock_release( &p_port->recv_lock ); + + KeWaitForSingleObject( + &p_port->sa_event, Executive, KernelMode, FALSE, NULL ); + + /* + * Put the QP in the error state. This removes the need to + * synchronize with send/receive callbacks. + */ + CL_ASSERT( p_port->ib_mgr.h_qp ); + cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) ); + qp_mod.req_state = IB_QPS_ERROR; + status = p_port->p_adapter->p_ifc->modify_qp( p_port->ib_mgr.h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_modify_qp to error state returned %s.\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + p_port->p_adapter->hung = TRUE; + return; + } + + KeResetEvent(&p_port->leave_mcast_event); + + /* Reset all endpoints so we don't flush our ARP cache. */ + __endpt_mgr_reset_all( p_port ); + + KeWaitForSingleObject( + &p_port->leave_mcast_event, Executive, KernelMode, FALSE, NULL ); + + __pending_list_destroy(p_port); + + cl_obj_lock( &p_port->p_adapter->obj ); + ipoib_dereg_addrs( p_port->p_adapter ); + cl_obj_unlock( &p_port->p_adapter->obj ); + + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static void +__bcast_cb( + IN ib_mcast_rec_t *p_mcast_rec ) +{ + ipoib_port_t *p_port; + ib_api_status_t status; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + + p_port = (ipoib_port_t* __ptr64)p_mcast_rec->mcast_context; + + cl_obj_lock( &p_port->obj ); + if( p_port->state != IB_QPS_INIT ) + { + cl_obj_unlock( &p_port->obj ); + if( p_mcast_rec->status == IB_SUCCESS ) + + { + ipoib_port_ref(p_port, ref_leave_mcast); + p_port->p_adapter->p_ifc->leave_mcast( p_mcast_rec->h_mcast, __leave_error_mcast_cb ); + } + KeSetEvent( &p_port->sa_event, EVENT_INCREMENT, FALSE ); + ipoib_port_deref( p_port, ref_bcast_inv_state ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Invalid state - Aborting.\n") ); + return; + } + + status = p_mcast_rec->status; + + if( status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Multicast join for broadcast group returned %s.\n", + p_port->p_adapter->p_ifc->get_err_str( p_mcast_rec->status )) ); + if( status == IB_REMOTE_ERROR ) + { + /* + * Either: + * - the join failed because the group no longer exists + * - the create failed because the group already exists + * + * Kick off a new Get query to the SA to restart the join process + * from the top. Note that as an optimization, it would be + * possible to distinguish between the join and the create. + * If the join fails, try the create. If the create fails, start + * over with the Get. + */ + /* TODO: Assert is a place holder. Can we ever get here if the + state isn't IB_PNP_PORT_ADD or PORT_DOWN or PORT_INIT? */ + CL_ASSERT( p_port->p_adapter->state == IB_PNP_PORT_ADD || + p_port->p_adapter->state == IB_PNP_PORT_DOWN || + p_port->p_adapter->state == IB_PNP_PORT_INIT ); + status = __port_get_bcast( p_port ); + } + else + { + NdisWriteErrorLogEntry( p_port->p_adapter->h_adapter, + EVENT_IPOIB_BCAST_JOIN, 1, p_mcast_rec->status ); + } + + cl_obj_unlock( &p_port->obj ); + if( status != IB_SUCCESS ) + { + ipoib_set_inactive( p_port->p_adapter ); + __endpt_mgr_reset_all( p_port ); + KeSetEvent( &p_port->sa_event, EVENT_INCREMENT, FALSE ); + } + ipoib_port_deref( p_port, ref_bcast_req_failed ); + IPOIB_EXIT( IPOIB_DBG_INIT ); + return; + } + cl_obj_unlock( &p_port->obj ); + + status = __endpt_mgr_add_bcast( p_port, p_mcast_rec ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__endpt_mgr_add_bcast returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + ipoib_port_ref(p_port, ref_leave_mcast); + status = p_port->p_adapter->p_ifc->leave_mcast( p_mcast_rec->h_mcast, __leave_error_mcast_cb ); + CL_ASSERT( status == IB_SUCCESS ); + goto err; + } + + /* Get the QP ready for action. */ + status = __ib_mgr_activate( p_port ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__ib_mgr_activate returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + +err: + /* Flag the adapter as hung. */ + p_port->p_adapter->hung = TRUE; + KeSetEvent( &p_port->sa_event, EVENT_INCREMENT, FALSE ); + ipoib_port_deref( p_port, ref_bcast_error ); + IPOIB_EXIT( IPOIB_DBG_INIT ); + return; + } + + cl_obj_lock( &p_port->obj ); + /* Only change the state if we're still in INIT. */ + if( p_port->state == IB_QPS_INIT ) + p_port->state = IB_QPS_RTS; + cl_obj_unlock( &p_port->obj ); + + /* Prepost receives. */ + cl_spinlock_acquire( &p_port->recv_lock ); + __recv_mgr_repost( p_port ); + cl_spinlock_release( &p_port->recv_lock ); + + /* Notify the adapter that we now have an active connection. */ + ipoib_set_active( p_port->p_adapter ); + + KeSetEvent( &p_port->sa_event, EVENT_INCREMENT, FALSE ); + ipoib_port_deref( p_port, ref_join_bcast ); + IPOIB_EXIT( IPOIB_DBG_INIT ); +} + + +static void +__qp_event( + IN ib_async_event_rec_t *p_event_rec ) +{ + UNUSED_PARAM( p_event_rec ); + CL_ASSERT( p_event_rec->context ); + ((ipoib_port_t* __ptr64)p_event_rec->context)->p_adapter->hung = TRUE; +} + + +static void +__cq_event( + IN ib_async_event_rec_t *p_event_rec ) +{ + UNUSED_PARAM( p_event_rec ); + CL_ASSERT( p_event_rec->context ); + ((ipoib_port_t* __ptr64)p_event_rec->context)->p_adapter->hung = TRUE; +} + + +static ib_api_status_t +__ib_mgr_activate( + IN ipoib_port_t* const p_port ) +{ + ib_api_status_t status; + ib_dgrm_info_t dgrm_info; + ib_qp_mod_t qp_mod; + + IPOIB_ENTER( IPOIB_DBG_INIT ); + /* + * Move the QP to RESET. This allows us to reclaim any + * unflushed receives. + */ + cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) ); + qp_mod.req_state = IB_QPS_RESET; + status = p_port->p_adapter->p_ifc->modify_qp( p_port->ib_mgr.h_qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_modify_qp returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Move the QP to RTS. */ + dgrm_info.port_guid = p_port->p_adapter->guids.port_guid; + dgrm_info.qkey = p_port->ib_mgr.bcast_rec.qkey; + dgrm_info.pkey_index = 0; + status = p_port->p_adapter->p_ifc->init_dgrm_svc( p_port->ib_mgr.h_qp, &dgrm_info ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_init_dgrm_svc returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* Rearm the CQs. */ + status = p_port->p_adapter->p_ifc->rearm_cq( p_port->ib_mgr.h_recv_cq, FALSE ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_rearm_cq for recv returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + status = p_port->p_adapter->p_ifc->rearm_cq( p_port->ib_mgr.h_send_cq, FALSE ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_rearm_cq for send returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + IPOIB_EXIT( IPOIB_DBG_INIT ); + return IB_SUCCESS; +} + + +/* Transition to a passive level thread. */ +ib_api_status_t +ipoib_port_join_mcast( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + IN const uint8_t state) +{ + ib_api_status_t status; + ib_mcast_req_t mcast_req; + ipoib_endpt_t *p_endpt; + + IPOIB_ENTER( IPOIB_DBG_MCAST ); + + switch( __endpt_mgr_ref( p_port, mac, &p_endpt ) ) + { + case NDIS_STATUS_NO_ROUTE_TO_DESTINATION: + break; + + case NDIS_STATUS_SUCCESS: + ipoib_endpt_deref( p_endpt ); + /* Fall through */ + + case NDIS_STATUS_PENDING: + return IB_SUCCESS; + } + + /* + * Issue the mcast request, using the parameters of the broadcast group. + * This allows us to do a create request that should always succeed since + * the required parameters are known. + */ + cl_memclr( &mcast_req, sizeof(mcast_req) ); + mcast_req.create = TRUE; + + /* Copy the settings from the broadcast group. */ + mcast_req.member_rec = p_port->ib_mgr.bcast_rec; + /* Clear fields that aren't specified in the join */ + mcast_req.member_rec.mlid = 0; + ib_member_set_state( &mcast_req.member_rec.scope_state,state); + + if( mac.addr[0] == 1 && mac.addr[1] == 0 && mac.addr[2] == 0x5E ) + { + /* + * Update the address portion of the MGID with the 28 lower bits of the + * IP address. Since we're given a MAC address, we end up using only + * the 24 lower bits of that network-byte-ordered value (assuming MSb + * is zero). + */ + mcast_req.member_rec.mgid.raw[12] = 0; + mcast_req.member_rec.mgid.raw[13] = mac.addr[3]; + mcast_req.member_rec.mgid.raw[14] = mac.addr[4]; + mcast_req.member_rec.mgid.raw[15] = mac.addr[5]; + } + else + { + /* Handle non IP mutlicast MAC addresses. */ + /* Update the signature to use the lower 2 bytes of the OpenIB OUI. */ + mcast_req.member_rec.mgid.raw[2] = 0x14; + mcast_req.member_rec.mgid.raw[3] = 0x05; + /* Now copy the MAC address into the last 6 bytes of the GID. */ + cl_memcpy( &mcast_req.member_rec.mgid.raw[10], mac.addr, 6 ); + } + + mcast_req.mcast_context = p_port; + mcast_req.pfn_mcast_cb = __mcast_cb; + mcast_req.timeout_ms = p_port->p_adapter->params.sa_timeout; + mcast_req.retry_cnt = p_port->p_adapter->params.sa_retry_cnt; + mcast_req.port_guid = p_port->p_adapter->guids.port_guid; + mcast_req.pkey_index = 0; + + /* + * Create the endpoint and insert it in the port. Since we don't wait for + * the mcast SA operations to complete before returning from the multicast + * list set OID asynchronously, it is possible for the mcast entry to be + * cleared before the SA interaction completes. In this case, when the + * mcast callback is invoked, it would not find the corresponding endpoint + * and would be undone. + */ + p_endpt = ipoib_endpt_create( + &mcast_req.member_rec.mgid, 0, CL_HTON32(0x00FFFFFF) ); + if( !p_endpt ) + { + IPOIB_PRINT_EXIT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ipoib_endpt_create failed.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + status = __endpt_mgr_insert_locked( p_port, mac, p_endpt ); + if( status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("__endpt_mgr_insert_locked returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + return status; + } + + /* reference the object for the multicast join request. */ + ipoib_port_ref( p_port, ref_join_mcast ); + + status = p_port->p_adapter->p_ifc->join_mcast( p_port->ib_mgr.h_qp, &mcast_req ); + if( status != IB_SUCCESS ) + { + ipoib_port_deref( p_port, ref_mcast_join_failed ); + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("ib_join_mcast returned %s\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + } + + IPOIB_EXIT( IPOIB_DBG_MCAST ); + return status; +} + + +static void +__mcast_cb( + IN ib_mcast_rec_t *p_mcast_rec ) +{ + ib_api_status_t status; + ipoib_port_t *p_port; + cl_fmap_item_t *p_item; + cl_map_item_t *p_qitem; + ipoib_endpt_t *p_endpt; + + IPOIB_ENTER( IPOIB_DBG_MCAST ); + + p_port = (ipoib_port_t* __ptr64)p_mcast_rec->mcast_context; + + cl_obj_lock( &p_port->obj ); + if( p_port->state != IB_QPS_RTS ) + { + cl_obj_unlock( &p_port->obj ); + if( p_mcast_rec->status == IB_SUCCESS ) + + { + ipoib_port_ref(p_port, ref_leave_mcast); + p_port->p_adapter->p_ifc->leave_mcast( p_mcast_rec->h_mcast, __leave_error_mcast_cb ); + } + ipoib_port_deref( p_port, ref_mcast_inv_state ); + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_INIT, + ("Invalid state - Aborting.\n") ); + return; + } + cl_obj_unlock( &p_port->obj ); + + if( p_mcast_rec->status != IB_SUCCESS ) + { + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_ERROR, + ("Multicast join request failed with status %s.\n", + p_port->p_adapter->p_ifc->get_err_str( p_mcast_rec->status )) ); + /* Flag the adapter as hung. */ + p_port->p_adapter->hung =TRUE; + ipoib_port_deref( p_port, ref_mcast_req_failed ); + IPOIB_EXIT( IPOIB_DBG_MCAST ); + return; + } + + cl_obj_lock( &p_port->obj ); + p_item = cl_fmap_get( + &p_port->endpt_mgr.gid_endpts, &p_mcast_rec->p_member_rec->mgid ); + if( p_item == cl_fmap_end( &p_port->endpt_mgr.gid_endpts ) ) + { + /* + * The endpoint must have been flushed while the join request + * was outstanding. Just leave the group and return. This + * is not an error. + */ + cl_obj_unlock( &p_port->obj ); + IPOIB_PRINT(TRACE_LEVEL_WARNING, IPOIB_DBG_ERROR, + ("Failed to find endpoint for update.\n") ); + + ipoib_port_ref(p_port, ref_leave_mcast); + p_port->p_adapter->p_ifc->leave_mcast( p_mcast_rec->h_mcast, __leave_error_mcast_cb ); + ipoib_port_deref( p_port, ref_mcast_no_endpt ); + IPOIB_EXIT( IPOIB_DBG_MCAST ); + return; + } + + p_endpt = PARENT_STRUCT( p_item, ipoib_endpt_t, gid_item ); + p_endpt->p_ifc = p_port->p_adapter->p_ifc; + + /* Setup the endpoint for use. */ + status = ipoib_endpt_set_mcast( + p_endpt, p_port->ib_mgr.h_pd, p_port->port_num, p_mcast_rec ); + if( status != IB_SUCCESS ) + { + cl_obj_unlock( &p_port->obj ); + IPOIB_PRINT( TRACE_LEVEL_ERROR, IPOIB_DBG_MCAST, + ("ipoib_endpt_set_mcast returned %s.\n", + p_port->p_adapter->p_ifc->get_err_str( status )) ); + /* Flag the adapter as hung. */ + p_port->p_adapter->hung = TRUE; + ipoib_port_deref( p_port, ref_mcast_av_failed ); + IPOIB_EXIT( IPOIB_DBG_MCAST ); + return; + } + + /* + * The endpoint is already in the GID and MAC maps. + * Add it to the LID map if it has local scope. + */ + if( p_endpt->dlid ) + { + p_qitem = cl_qmap_insert( + &p_port->endpt_mgr.lid_endpts, p_endpt->dlid, &p_endpt->lid_item ); + CL_ASSERT( p_qitem == &p_endpt->lid_item ); + } + cl_obj_unlock( &p_port->obj ); + + /* Try to send all pending sends. */ + ipoib_port_resume( p_port ); + + ipoib_port_deref( p_port, ref_join_mcast ); + + IPOIB_EXIT( IPOIB_DBG_MCAST ); +} + + +void +ipoib_leave_mcast_cb( + IN void *context ) +{ + ipoib_port_t *p_port; + + IPOIB_ENTER( IPOIB_DBG_MCAST ); + + p_port = (ipoib_port_t* __ptr64)context; + + IPOIB_PRINT( TRACE_LEVEL_VERBOSE, IPOIB_DBG_MCAST,("p_port->mcast_cnt = %d\n", p_port->mcast_cnt)); + + ipoib_port_deref( p_port, ref_leave_mcast); + cl_atomic_dec( &p_port->mcast_cnt); + + if(0 == p_port->mcast_cnt) + { + KeSetEvent( &p_port->leave_mcast_event, EVENT_INCREMENT, FALSE ); + } + + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_MCAST, + ("Leave mcast callback deref ipoib_port \n") ); + + IPOIB_EXIT( IPOIB_DBG_MCAST ); +} + + + +void +__leave_error_mcast_cb( + IN void *context ) +{ + ipoib_port_t *p_port; + + IPOIB_ENTER( IPOIB_DBG_MCAST ); + + p_port = (ipoib_port_t* __ptr64)context; + + ipoib_port_deref( p_port, ref_leave_mcast); + IPOIB_PRINT_EXIT( TRACE_LEVEL_INFORMATION, IPOIB_DBG_MCAST, + ("Leave mcast callback deref ipoib_port \n") ); + + IPOIB_EXIT( IPOIB_DBG_MCAST ); +} + + diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_port.h b/branches/Ndi/ulp/ipoib/kernel/ipoib_port.h new file mode 100644 index 00000000..443ec61a --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_port.h @@ -0,0 +1,620 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef _IPOIB_PORT_H_ +#define _IPOIB_PORT_H_ + + +#include +#include +#include +#include +#include +#include "ipoib_endpoint.h" +#include "ipoib_xfr_mgr.h" + + +/* + * Define to place receive buffer inline in receive descriptor. + */ +#define IPOIB_INLINE_RECV 1 + + +/* Max send data segment list size. */ +#define MAX_SEND_SGE 8 + + +/* + * Define to control how transfers are done. When defined as 1, causes + * packets to be sent using NDIS DMA facilities (getting the SGL from the + * packet). When defined as 0, uses the NDIS_BUFFER structures as MDLs + * to get the physical page mappings of the buffers. + */ +#define IPOIB_USE_DMA 1 + + +#define IPOIB_PORT_FROM_PACKET( P ) \ + (((ipoib_port_t**)P->MiniportReservedEx)[0]) +#define IPOIB_ENDPT_FROM_PACKET( P ) \ + (((ipoib_endpt_t**)P->MiniportReservedEx)[1]) +#define IPOIB_RECV_FROM_PACKET( P ) \ + (((ipoib_recv_desc_t**)P->MiniportReservedEx)[1]) +#define IPOIB_SEND_FROM_PACKET( P ) \ + (((send_buf_t**)P->MiniportReservedEx)[2]) +#define IPOIB_PACKET_FROM_LIST_ITEM( I ) \ + (PARENT_STRUCT( I, NDIS_PACKET, MiniportReservedEx )) +#define IPOIB_LIST_ITEM_FROM_PACKET( P ) \ + ((cl_list_item_t*)P->MiniportReservedEx) + + +typedef struct _ipoib_ib_mgr +{ + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd; + ib_cq_handle_t h_recv_cq; + ib_cq_handle_t h_send_cq; + ib_qp_handle_t h_qp; + ib_query_handle_t h_query; + net32_t qpn; + + ib_mr_handle_t h_mr; + net32_t lkey; + + uint8_t rate; + ib_member_rec_t bcast_rec; + +} ipoib_ib_mgr_t; +/* +* FIELDS +* h_ca +* CA handle for all IB resources. +* +* h_pd +* PD handle for all IB resources. +* +* h_recv_cq +* Recv CQ handle. +* +* h_send_cq +* Send CQ handle. +* +* h_qp +* QP handle for data transfers. +* +* h_query +* Query handle for cancelling SA queries. +* +* h_mr +* Registration handle for all of physical memory. Used for +* send/receive buffers to simplify growing the receive pool. +* +* lkey +* LKey for the memory region. +* +* bcast_rec +* Cached information about the broadcast group, used to specify +* parameters used to join other multicast groups. +*********/ + + +#include +/****s* IPoIB Driver/ipoib_hdr_t +* NAME +* ipoib_hdr_t +* +* DESCRIPTION +* IPoIB packet header. +* +* SYNOPSIS +*/ +typedef struct _ipoib_hdr +{ + net16_t type; + net16_t resv; + +} PACK_SUFFIX ipoib_hdr_t; +/* +* FIELDS +* type +* Protocol type. +* +* resv +* Reserved portion of IPoIB header. +*********/ + +typedef struct _ipoib_arp_pkt +{ + net16_t hw_type; + net16_t prot_type; + uint8_t hw_size; + uint8_t prot_size; + net16_t op; + ipoib_hw_addr_t src_hw; + net32_t src_ip; + ipoib_hw_addr_t dst_hw; + net32_t dst_ip; + +} PACK_SUFFIX ipoib_arp_pkt_t; + + +/****s* IPoIB Driver/ipoib_pkt_t +* NAME +* ipoib_pkt_t +* +* DESCRIPTION +* Represents an IPoIB packet with no GRH. +* +* SYNOPSIS +*/ +typedef struct _ipoib_pkt +{ + ipoib_hdr_t hdr; + union _payload + { + uint8_t data[MAX_PAYLOAD_MTU]; + ipoib_arp_pkt_t arp; + ip_pkt_t ip; + + } PACK_SUFFIX type; + +} PACK_SUFFIX ipoib_pkt_t; +/* +* FIELDS +* hdr +* IPoIB header. +* +* type +* Union for different types of payloads. +* +* type.data +* raw packet. +* +* type.ib_arp +* IPoIB ARP packet. +* +* type.arp +* Ethernet ARP packet. +* +* type.ip +* IP packet. +*********/ + + +/****s* IPoIB Driver/recv_buf_t +* NAME +* recv_buf_t +* +* DESCRIPTION +* Represents a receive buffer, including the ethernet header +* used to indicate the receive to the OS. +* +* SYNOPSIS +*/ +typedef union _recv_buf +{ + struct _recv_buf_type_eth + { + uint8_t pad[sizeof(ib_grh_t) + + sizeof(ipoib_hdr_t) - + sizeof(eth_hdr_t)]; + eth_pkt_t pkt; /* data starts at sizeof(grh)+sizeof(eth_hdr) */ + + } PACK_SUFFIX eth; + + struct _recv_buf_type_ib + { + ib_grh_t grh; /* Must be same offset as lcl_rt.ib.pkt */ + ipoib_pkt_t pkt; /* data starts at 10+grh+4 */ + + } PACK_SUFFIX ib; + +} PACK_SUFFIX recv_buf_t; +/* +* FIELDS +* eth.pkt +* Ethernet packet, used to indicate the receive to the OS. +* +* ib.grh +* GRH for a globally routed received packet. +* +* ib.pkt +* IPOIB packet representing a globally routed received packet. +* +* NOTES +* When posting the work request, the address of ib.grh is used. +* +* TODO: Do we need a pad to offset the header so that the data ends up +* aligned on a pointer boundary? +*********/ + +/****s* IPoIB Driver/send_buf_t +* NAME +* send_buf_t +* +* DESCRIPTION +* Represents a send buffer, used to convert packets to IPoIB format. +* +* SYNOPSIS +*/ +typedef union _send_buf +{ + uint8_t data[MAX_PAYLOAD_MTU]; + ipoib_arp_pkt_t arp; + ip_pkt_t ip; + +} PACK_SUFFIX send_buf_t; +/* +* FIELDS +* data +* IP/ARP packet. +* +* NOTES +* TODO: Do we need a pad to offset the header so that the data ends up +* aligned on a pointer boundary? +*********/ +#include + + +typedef struct _ipoib_buf_mgr +{ + cl_qpool_t recv_pool; + + NDIS_HANDLE h_packet_pool; + NDIS_HANDLE h_buffer_pool; + + NPAGED_LOOKASIDE_LIST send_buf_list; + NDIS_HANDLE h_send_pkt_pool; + NDIS_HANDLE h_send_buf_pool; + +} ipoib_buf_mgr_t; +/* +* FIELDS +* recv_pool +* Pool of ipoib_recv_desc_t structures. +* +* h_packet_pool +* NDIS packet pool, used to indicate receives to NDIS. +* +* h_buffer_pool +* NDIS buffer pool, used to indicate receives to NDIS. +* +* send_buf_list +* Lookaside list for dynamically allocating send buffers for send +* that require copies (ARP, DHCP, and any with more physical pages +* than can fit in the local data segments). +*********/ + + +typedef enum _ipoib_pkt_type +{ + PKT_TYPE_UCAST, + PKT_TYPE_BCAST, + PKT_TYPE_MCAST + +} ipoib_pkt_type_t; + + +typedef struct _ipoib_recv_desc +{ + cl_pool_item_t item; /* Must be first. */ + uint32_t len; + ipoib_pkt_type_t type; + ib_recv_wr_t wr; + ib_local_ds_t local_ds[2]; +#if IPOIB_INLINE_RECV + recv_buf_t buf; +#else + recv_buf_t *p_buf; +#endif + +} ipoib_recv_desc_t; +/* +* FIELDS +* item +* Pool item for storing descriptors in a pool. +* +* len +* Length to indicate to NDIS. This is different than the length of the +* received data as some data is IPoIB specific and filtered out. +* +* type +* Type of packet, used in filtering received packets against the packet +* filter. Also used to update stats. +* +* wr +* Receive work request. +* +* local_ds +* Local data segments. The second segment is only used if a buffer +* spans physical pages. +* +* buf +* Buffer for the receive. +* +* NOTES +* The pool item is always first to allow casting form a cl_pool_item_t or +* cl_list_item_t to the descriptor. +*********/ + + +typedef struct _ipoib_send_desc +{ + NDIS_PACKET *p_pkt; + ipoib_endpt_t *p_endpt; + send_buf_t *p_buf; + ib_send_wr_t wr; + ipoib_hdr_t pkt_hdr; + ib_local_ds_t local_ds[MAX_SEND_SGE]; /* Must be last. */ + +} ipoib_send_desc_t; +/* +* FIELDS +* p_pkt +* Pointer to the NDIS_PACKET associated with the send operation. +* +* p_endpt +* Endpoint for this send. +* +* p_buf +* Buffer for the send, if allocated. +* +* wr +* Send work request. +* +* pkt_hdr +* IPoIB packet header, pointed to by the first local datasegment. +* +* local_ds +* Local data segment array. Placed last to allow allocating beyond the +* end of the descriptor for additional datasegments. +* +* NOTES +* The pool item is always first to allow casting form a cl_pool_item_t or +* cl_list_item_t to the descriptor. +*********/ + + +typedef struct _ipoib_recv_mgr +{ + int32_t depth; + + NDIS_PACKET **recv_pkt_array; + + cl_qlist_t done_list; + +} ipoib_recv_mgr_t; +/* +* FIELDS +* depth +* Current number of WRs posted. +* +* p_head +* Pointer to work completion in descriptor at the head of the QP. +* +* p_tail +* Pointer to the work completion in the descriptor at the tail of the QP. +* +* recv_pkt_array +* Array of pointers to NDIS_PACKET used to indicate receives. +* +* done_list +* List of receive descriptors that need to be indicated to NDIS. +*********/ + + +typedef struct _ipoib_send_mgr +{ + atomic32_t depth; + cl_qlist_t pending_list; + +} ipoib_send_mgr_t; +/* +* FIELDS +* depth +* Current number of WRs posted, used to queue pending requests. +* +* pending_list +* List of NDIS_PACKET structures that are awaiting available WRs to send. +*********/ + + +typedef struct _ipoib_endpt_mgr +{ + cl_qmap_t mac_endpts; + cl_fmap_t gid_endpts; + cl_qmap_t lid_endpts; + +} ipoib_endpt_mgr_t; +/* +* FIELDS +* mac_endpts +* Map of enpoints, keyed by MAC address. +* +* gid_endpts +* Map of enpoints, keyed by GID. +* +* lid_endpts +* Map of enpoints, keyed by LID. Only enpoints on the same subnet +* are inserted in the LID map. +*********/ + + +typedef struct _ipoib_port +{ + cl_obj_t obj; + cl_obj_rel_t rel; + + ib_qp_state_t state; + + cl_spinlock_t recv_lock; + cl_spinlock_t send_lock; + + struct _ipoib_adapter *p_adapter; + uint8_t port_num; + + KEVENT sa_event; + + atomic32_t mcast_cnt; + KEVENT leave_mcast_event; + + ipoib_ib_mgr_t ib_mgr; + + ipoib_buf_mgr_t buf_mgr; + + ipoib_recv_mgr_t recv_mgr; + ipoib_send_mgr_t send_mgr; + + ipoib_endpt_mgr_t endpt_mgr; + + ipoib_endpt_t *p_local_endpt; + +#if DBG + atomic32_t ref[ref_array_size]; +#endif + + atomic32_t endpt_rdr; + + atomic32_t hdr_idx; + ipoib_hdr_t hdr[1]; /* Must be last! */ + +} ipoib_port_t; +/* +* FIELDS +* obj +* Complib object for reference counting, relationships, +* and destruction synchronization. +* +* rel +* Relationship to associate the port with the adapter. +* +* state +* State of the port object. Tracks QP state fairly closely. +* +* recv_lock +* Spinlock to protect receive operations. +* +* send_lock +* Spinlock to protect send operations. +* +* p_adapter +* Parent adapter. Used to get AL handle. +* +* port_num +* Port number of this adapter. +* +* ib_mgr +* IB resource manager. +* +* recv_mgr +* Receive manager. +* +* send_mgr +* Send manager. +* +* endpt_mgr +* Endpoint manager. +*********/ + + +ib_api_status_t +ipoib_create_port( + IN struct _ipoib_adapter* const p_adapter, + IN ib_pnp_port_rec_t* const p_pnp_rec, + OUT ipoib_port_t** const pp_port ); + +void +ipoib_port_destroy( + IN ipoib_port_t* const p_port ); + +void +ipoib_port_up( + IN ipoib_port_t* const p_port, + IN const ib_pnp_port_rec_t* const p_pnp_rec ); + +void +ipoib_port_down( + IN ipoib_port_t* const p_port ); + +ib_api_status_t +ipoib_port_join_mcast( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + IN const uint8_t state ); + + +void +ipoib_leave_mcast_cb( + IN void *context ); + + +void +ipoib_port_remove_endpt( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac ); + +void +ipoib_port_flush_endpts( + IN ipoib_port_t* const p_port ); + +void +ipoib_port_send( + IN ipoib_port_t* const p_port, + IN NDIS_PACKET **p_packet_array, + IN uint32_t num_packets ); + +void +ipoib_return_packet( + IN NDIS_HANDLE adapter_context, + IN NDIS_PACKET *p_packet ); + +void +ipoib_port_resume( + IN ipoib_port_t* const p_port ); + +NTSTATUS +ipoib_mac_to_gid( + IN ipoib_port_t* const p_port, + IN const mac_addr_t mac, + OUT ib_gid_t* p_gid ); + +inline void ipoib_port_ref( + IN ipoib_port_t * p_port, + IN int type); + +inline void ipoib_port_deref( + IN ipoib_port_t * p_port, + IN int type); + + +#endif /* _IPOIB_PORT_H_ */ diff --git a/branches/Ndi/ulp/ipoib/kernel/ipoib_xfr_mgr.h b/branches/Ndi/ulp/ipoib/kernel/ipoib_xfr_mgr.h new file mode 100644 index 00000000..8e5ca00c --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/ipoib_xfr_mgr.h @@ -0,0 +1,537 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _IPOIB_XFR_MGR_H_ +#define _IPOIB_XFR_MGR_H_ + + +#include +#include +#include +#include +#include +#include +#include + + +#include "ipoib_driver.h" +#include "ip_stats.h" +#include + + +#include +/****s* IPoIB Driver/ipoib_hw_addr_t +* NAME +* ipoib_hw_addr_t +* +* DESCRIPTION +* The ipoib_hw_addr_t structure defines an IPoIB compatible hardware +* address. Values in this structure are stored in network order. +* +* SYNOPSIS +*/ +typedef struct _ipoib_hw_addr +{ + uint32_t flags_qpn; + ib_gid_t gid; + +} PACK_SUFFIX ipoib_hw_addr_t; +/* +* FIELDS +* flags_qpn +* Flags and queue pair number. Use ipoib_addr_get_flags, +* ipoib_addr_set_flags, ipoib_addr_set_qpn, and ipoib_addr_get_qpn +* to manipulate the contents. +* +* gid +* IB GID value. +* +* SEE ALSO +* IPoIB, ipoib_addr_get_flags, ipoib_addr_set_flags, ipoib_addr_set_qpn, +* ipoib_addr_get_qpn +*********/ +#include + + + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/* + * Address accessors + */ + +static inline uint8_t +ipoib_addr_get_flags( + IN const ipoib_hw_addr_t* const p_addr ) +{ + return (uint8_t)(cl_ntoh32( p_addr->flags_qpn ) >> 24); +} + +static inline void +ipoib_addr_set_flags( + IN ipoib_hw_addr_t* const p_addr, + IN const uint8_t flags ) +{ + p_addr->flags_qpn &= cl_ntoh32( 0xFFFFFF00 ); + p_addr->flags_qpn |= cl_ntoh32( flags ); +} + +static inline net32_t +ipoib_addr_get_qpn( + IN const ipoib_hw_addr_t* const p_addr ) +{ + return cl_ntoh32( cl_ntoh32( p_addr->flags_qpn ) >> 8 ); +} + +static inline void +ipoib_addr_set_qpn( + IN ipoib_hw_addr_t* const p_addr, + IN const net32_t qpn ) +{ + p_addr->flags_qpn = cl_ntoh32( (cl_ntoh32( + p_addr->flags_qpn ) & 0x000000FF ) | (cl_ntoh32( qpn ) << 8) ); +} + + +/****f* IPOIB/ipoib_mac_from_sst_guid +* NAME +* ipoib_mac_from_sst_guid +* +* DESCRIPTION +* Generates an ethernet MAC address given a SilverStorm port GUID. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +ipoib_mac_from_sst_guid( + IN const net64_t port_guid, + OUT mac_addr_t* const p_mac_addr ) +{ + const uint8_t *p_guid = (const uint8_t*)&port_guid; + uint32_t low24; + + /* Port guid is in network byte order. OUI is in lower 3 bytes. */ + ASSERT( p_guid[0] == 0x00 && p_guid[1] == 0x06 && p_guid[2] == 0x6a ); + + /* + * We end up using only the lower 23-bits of the GUID. Trap that + * the 24th (bit 23) through 27th (bit 26) bit aren't set. + */ + if( port_guid & CL_HTON64( 0x0000000007800000 ) ) + return IB_INVALID_GUID; + + low24 = 0x00FFF000 - + ((((uint32_t)cl_ntoh64( port_guid ) & 0x00FFFFFF) - 0x101) * 2); + low24 -= p_guid[3]; /* minus port number */ + + p_mac_addr->addr[0] = p_guid[0]; + p_mac_addr->addr[1] = p_guid[1]; + p_mac_addr->addr[2] = p_guid[2]; + p_mac_addr->addr[3] = (uint8_t)(low24 >> 16); + p_mac_addr->addr[4] = (uint8_t)(low24 >> 8); + p_mac_addr->addr[5] = (uint8_t)low24; + + return IB_SUCCESS; +} +/* +* PARAMETERS +* port_guid +* The port GUID, in network byte order, for which to generate a +* MAC address. +* +* p_mac_addr +* Pointer to a mac address in which to store the results. +* +* RETURN VALUES +* IB_SUCCESS +* The MAC address was successfully converted. +* +* IB_INVALID_GUID +* The port GUID provided was not a known GUID format. +* +* NOTES +* The algorithm to convert portGuid to MAC address is as per DN0074, and +* assumes a 2 port HCA. +* +* SEE ALSO +* IPOIB +*********/ + + +/****f* IPOIB/ipoib_mac_from_mlx_guid +* NAME +* ipoib_mac_from_sst_guid +* +* DESCRIPTION +* Generates an ethernet MAC address given a Mellanox port GUID. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +ipoib_mac_from_mlx_guid( + IN const net64_t port_guid, + OUT mac_addr_t* const p_mac_addr ) +{ + const uint8_t *p_guid = (const uint8_t*)&port_guid; + uint32_t low24; + + /* Port guid is in network byte order. OUI is in lower 3 bytes. */ + ASSERT( p_guid[0] == 0x00 && p_guid[1] == 0x02 && p_guid[2] == 0xc9 ); + + if( (port_guid & CL_HTON64( 0x000000ffff000000 )) != + CL_HTON64(0x0000000200000000)) + { + return IB_INVALID_GUID; + } + + low24 = ((uint32_t)cl_ntoh64( port_guid ) & 0x00FFFFFF); + + p_mac_addr->addr[0] = p_guid[0]; + p_mac_addr->addr[1] = p_guid[1]; + p_mac_addr->addr[2] = p_guid[2]; + p_mac_addr->addr[3] = (uint8_t)(low24 >> 16); + p_mac_addr->addr[4] = (uint8_t)(low24 >> 8); + p_mac_addr->addr[5] = (uint8_t)low24; + + return IB_SUCCESS; +} +/* +* PARAMETERS +* port_guid +* The port GUID, in network byte order, for which to generate a +* MAC address. +* +* p_mac_addr +* Pointer to a mac address in which to store the results. +* +* RETURN VALUES +* IB_SUCCESS +* The MAC address was successfully converted. +* +* IB_INVALID_GUID +* The port GUID provided was not a known GUID format. +* +* SEE ALSO +* IPOIB +*********/ + + +/****f* IPOIB/ipoib_mac_from_voltaire_guid +* NAME +* ipoib_mac_from_voltaire_guid +* +* DESCRIPTION +* Generates an ethernet MAC address given a Voltaire port GUID. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +ipoib_mac_from_voltaire_guid( + IN const net64_t port_guid, + OUT mac_addr_t* const p_mac_addr ) +{ + const uint8_t *p_guid = (const uint8_t*)&port_guid; + + /* Port guid is in network byte order. OUI is in lower 3 bytes. */ + ASSERT( p_guid[0] == 0x00 && p_guid[1] == 0x08 && p_guid[2] == 0xf1 ); + + p_mac_addr->addr[0] = p_guid[0]; + p_mac_addr->addr[1] = p_guid[1]; + p_mac_addr->addr[2] = p_guid[2]; + p_mac_addr->addr[3] = p_guid[4] ^ p_guid[6]; + p_mac_addr->addr[4] = p_guid[5] ^ p_guid[7]; + p_mac_addr->addr[5] = p_guid[5] + p_guid[6] + p_guid[7]; + + return IB_SUCCESS; +} +/* +* PARAMETERS +* port_guid +* The port GUID, in network byte order, for which to generate a +* MAC address. +* +* p_mac_addr +* Pointer to a mac address in which to store the results. +* +* RETURN VALUES +* IB_SUCCESS +* The MAC address was successfully converted. +* +* SEE ALSO +* IPOIB +*********/ + + +/****f* IPOIB/ipoib_mac_from_guid +* NAME +* ipoib_mac_from_guid +* +* DESCRIPTION +* Generates an ethernet MAC address given a port GUID. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +ipoib_mac_from_guid( + IN const net64_t port_guid, + OUT mac_addr_t* const p_mac_addr ) +{ + ib_api_status_t status; + const uint8_t *p_guid = (const uint8_t*)&port_guid; + uint32_t laa; + + /* Port guid is in network byte order. OUI is in lower 3 bytes. */ + if( p_guid[0] == 0x00 && p_guid[1] == 0x06 && p_guid[2] == 0x6a ) + { + status = ipoib_mac_from_sst_guid( port_guid, p_mac_addr ); + if( status == IB_SUCCESS ) + return IB_SUCCESS; + } + else if( p_guid[0] == 0x00 && p_guid[1] == 0x02 && p_guid[2] == 0xc9 ) + { + status = ipoib_mac_from_mlx_guid( port_guid, p_mac_addr ); + if( status == IB_SUCCESS ) + return IB_SUCCESS; + } + else if( p_guid[0] == 0x00 && p_guid[1] == 0x08 && p_guid[2] == 0xf1 ) + { + status = ipoib_mac_from_voltaire_guid( port_guid, p_mac_addr ); + if( status == IB_SUCCESS ) + return IB_SUCCESS; + } + + /* Value of zero is reserved. */ + laa = cl_atomic_inc( &g_ipoib.laa_idx ); + + if( !laa ) + return IB_INVALID_GUID; + + p_mac_addr->addr[0] = 2; /* LAA bit */ + p_mac_addr->addr[1] = 0; + p_mac_addr->addr[2] = (uint8_t)(laa >> 24); + p_mac_addr->addr[3] = (uint8_t)(laa >> 16); + p_mac_addr->addr[4] = (uint8_t)(laa >> 8); + p_mac_addr->addr[5] = (uint8_t)laa; + + return IB_SUCCESS; +} +/* +* PARAMETERS +* port_guid +* The port GUID, in network byte order, for which to generate a +* MAC address. +* +* p_mac_addr +* Pointer to a mac address in which to store the results. +* +* RETURN VALUES +* IB_SUCCESS +* The MAC address was successfully converted. +* +* IB_INVALID_GUID +* The port GUID provided was not a known GUID format. +* +* NOTES +* Creates a locally administered address using a global incrementing counter. +* +* SEE ALSO +* IPOIB +*********/ + + +/****f* IPOIB/ipoib_sst_guid_from_mac +* NAME +* ipoib_sst_guid_from_mac +* +* DESCRIPTION +* Generates a port GUID given an ethernet MAC address. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +ipoib_sst_guid_from_mac( + IN const mac_addr_t mac, + OUT net64_t* const p_port_guid ) +{ + uint8_t *p_guid = (uint8_t*)p_port_guid; + uint32_t low24; + + /* MAC address is in network byte order. OUI is in lower 3 bytes. */ + if( mac.addr[0] != 0x00 || + mac.addr[1] != 0x06 || + mac.addr[2] != 0x6a ) + { + return IB_INVALID_GUID; + } + + low24 = mac.addr[3] << 16 || mac.addr[4] << 8 || mac.addr[5]; + + low24 = 0x00FFF000 - low24; + /* Divide by two */ + low24 >>= 1; + /* Add the serial number base offset. */ + low24 += 0x101; + + /* OUI */ + p_guid[0] = mac.addr[0]; + p_guid[1] = mac.addr[1]; + p_guid[2] = mac.addr[2]; + /* Port number */ + p_guid[3] = mac.addr[5] & 0x01; + /* Type */ + p_guid[4] = 0x98; + /* Serial Number */ + p_guid[5] = (uint8_t)(low24 >> 16); + p_guid[6] = (uint8_t)(low24 >> 8); + p_guid[7] = (uint8_t)low24; + + return IB_SUCCESS; +} +/* +* PARAMETERS +* port_guid +* The port GUID, in network byte order, for which to generate a +* MAC address. +* +* p_mac_addr +* Pointer to a mac address in which to store the results. +* +* RETURN VALUES +* IB_SUCCESS +* The MAC address was successfully converted. +* +* IB_INVALID_GUID +* The port GUID provided was not a known GUID format. +* +* NOTES +* The algorithm to convert portGuid to MAC address is as per DN0074, and +* assumes a 2 port HCA. +* +* SEE ALSO +* IPOIB +*********/ + + +/****f* IPOIB/ipoib_mlx_guid_from_mac +* NAME +* ipoib_mlx_guid_from_mac +* +* DESCRIPTION +* Generates a port GUID given an ethernet MAC address. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +ipoib_mlx_guid_from_mac( + IN const mac_addr_t mac, + OUT net64_t* const p_port_guid ) +{ + uint8_t *p_guid = (uint8_t*)p_port_guid; + uint32_t low24; + + /* MAC address is in network byte order. OUI is in lower 3 bytes. */ + if( mac.addr[0] != 0x00 || + mac.addr[1] != 0x02 || + mac.addr[2] != 0xc9 ) + { + return IB_INVALID_GUID; + } + + low24 = mac.addr[3] << 16 || mac.addr[4] << 8 || mac.addr[5]; + + /* OUI */ + p_guid[0] = mac.addr[0]; + p_guid[1] = mac.addr[1]; + p_guid[2] = mac.addr[2]; + p_guid[3] = 0x02; + p_guid[4] = 0x00; + /* Serial Number */ + p_guid[5] = (uint8_t)(low24 >> 16); + p_guid[6] = (uint8_t)(low24 >> 8); + p_guid[7] = (uint8_t)low24; + + return IB_SUCCESS; +} +/* +* PARAMETERS +* port_guid +* The port GUID, in network byte order, for which to generate a +* MAC address. +* +* p_mac_addr +* Pointer to a mac address in which to store the results. +* +* RETURN VALUES +* IB_SUCCESS +* The MAC address was successfully converted. +* +* IB_INVALID_GUID +* The port GUID provided was not a known GUID format. +* +* NOTES +* The algorithm to convert portGuid to MAC address is as +* +* SEE ALSO +* IPOIB +*********/ + + +/****f* IPOIB/ipoib_is_voltaire_router_gid +* NAME +* ipoib_is_voltaire_router_gid +* +* DESCRIPTION +* Checks whether the GID belongs to Voltaire IP router +* +* SYNOPSIS +*/ +boolean_t +static inline +ipoib_is_voltaire_router_gid( + IN const ib_gid_t *p_gid ) +{ + static const uint8_t VOLTAIRE_GUID_PREFIX[] = {0, 0x08, 0xf1, 0, 0x1}; + + return !cl_memcmp( &p_gid->unicast.interface_id, VOLTAIRE_GUID_PREFIX, + sizeof(VOLTAIRE_GUID_PREFIX) ); +} + + +#ifdef __cplusplus +} +#endif + +#endif /* _IPOIB_XFR_MGR_H_ */ diff --git a/branches/Ndi/ulp/ipoib/kernel/makefile b/branches/Ndi/ulp/ipoib/kernel/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/ulp/ipoib/kernel/netipoib.inf b/branches/Ndi/ulp/ipoib/kernel/netipoib.inf new file mode 100644 index 00000000..4b2b8002 --- /dev/null +++ b/branches/Ndi/ulp/ipoib/kernel/netipoib.inf @@ -0,0 +1,196 @@ +; OpenIB Internet Protocol over InfiniBand Adapter +; Copyright 2005 SilverStorm Technologies all Rights Reserved. +; Copyright 2006 Mellanox Technologies all Rights Reserved. + +[Version] +Signature = "$Windows NT$" +Class = Net +ClassGUID = {4d36e972-e325-11ce-bfc1-08002be10318} +Provider = %OPENIB% +DriverVer=03/08/2006,1.0.0000.614 + +[Manufacturer] +%OPENIB% = OPENIB,ntx86,ntamd64,ntia64 + +[ControlFlags] +ExcludeFromSelect = IBA\IPoIB + +[OPENIB] +; empty since we don't support W9x/Me + +[OPENIB.ntx86] +%IpoibDesc% = Ipoib.DDInstall, IBA\IPoIB ; Internet Protocol over InfiniBand Adapter + +[OPENIB.ntamd64] +%IpoibDesc% = Ipoib.DDInstall, IBA\IPoIB ; Internet Protocol over InfiniBand Adapter + +[OPENIB.ntia64] +%IpoibDesc% = Ipoib.DDInstall, IBA\IPoIB ; Internet Protocol over InfiniBand Adapter + +[Ipoib.DDInstall.ntx86] +Characteristics = 0x81 ; NCF_HAS_UI | NCF_VIRTUAL +AddReg = IpoibAddReg +CopyFiles = IpoibCopyFiles +CopyFiles = WsdCopyFiles + +[Ipoib.DDInstall.ntamd64] +Characteristics = 0x81 ; NCF_HAS_UI | NCF_VIRTUAL +AddReg = IpoibAddReg +CopyFiles = IpoibCopyFiles +CopyFiles = WsdCopyFiles +CopyFiles = WOW64CopyFiles + +[Ipoib.DDInstall.ntia64] +Characteristics = 0x81 ; NCF_HAS_UI | NCF_VIRTUAL +AddReg = IpoibAddReg +CopyFiles = IpoibCopyFiles +CopyFiles = WsdCopyFiles +CopyFiles = WOW64CopyFiles + +[Ipoib.DDInstall.ntx86.Services] +AddService = ipoib, 2, IpoibService, IpoibEventLog + +[Ipoib.DDInstall.ntamd64.Services] +AddService = ipoib, 2, IpoibService, IpoibEventLog + +[Ipoib.DDInstall.ntia64.Services] +AddService = ipoib, 2, IpoibService, IpoibEventLog + +[IpoibAddReg] +HKR, Ndi, Service, 0, "ipoib" +HKR, Ndi\Interfaces, UpperRange, 0, "ndis5" +HKR, Ndi\Interfaces, LowerRange, 0, "ethernet" + +HKR, Ndi\Params\RqDepth, ParamDesc, 0, "Receive Queue Depth" +HKR, Ndi\Params\RqDepth, Type, 0, "dword" +HKR, Ndi\Params\RqDepth, Default, 0, "512" +HKR, Ndi\Params\RqDepth, Optional, 0, "0" +HKR, Ndi\Params\RqDepth, Min, 0, "128" +HKR, Ndi\Params\RqDepth, Max, 0, "1024" +HKR, Ndi\Params\RqDepth, Step, 0, "128" + +HKR, Ndi\Params\RqLowWatermark, ParamDesc, 0, "Receive Queue Low Watermark" +HKR, Ndi\Params\RqLowWatermark, Type, 0, "dword" +HKR, Ndi\Params\RqLowWatermark, Default, 0, "4" +HKR, Ndi\Params\RqLowWatermark, Optional, 0, "1" +HKR, Ndi\Params\RqLowWatermark, Min, 0, "2" +HKR, Ndi\Params\RqLowWatermark, Max, 0, "8" +HKR, Ndi\Params\RqLowWatermark, Step, 0, "1" + +HKR, Ndi\Params\SqDepth, ParamDesc, 0, "Send Queue Depth" +HKR, Ndi\Params\SqDepth, Type, 0, "dword" +HKR, Ndi\Params\SqDepth, Default, 0, "512" +HKR, Ndi\Params\SqDepth, Optional, 0, "0" +HKR, Ndi\Params\SqDepth, Min, 0, "128" +HKR, Ndi\Params\SqDepth, Max, 0, "1024" +HKR, Ndi\Params\SqDepth, Step, 0, "128" + +HKR, Ndi\Params\SendChksum, ParamDesc, 0, "Send Checksum Offload" +HKR, Ndi\Params\SendChksum, Type, 0, "enum" +HKR, Ndi\Params\SendChksum, Default, 0, "0" +HKR, Ndi\Params\SendChksum, Optional, 0, "0" +HKR, Ndi\Params\SendChksum\enum, "0", 0, "Disabled" +HKR, Ndi\Params\SendChksum\enum, "1", 0, "Enabled" + +HKR, Ndi\Params\RecvChksum, ParamDesc, 0, "Recv Checksum Offload" +HKR, Ndi\Params\RecvChksum, Type, 0, "enum" +HKR, Ndi\Params\RecvChksum, Default, 0, "0" +HKR, Ndi\Params\RecvChksum, Optional, 0, "0" +HKR, Ndi\Params\RecvChksum\enum, "0", 0, "Disabled" +HKR, Ndi\Params\RecvChksum\enum, "1", 0, "Enabled" + +HKR, Ndi\Params\SaTimeout, ParamDesc, 0, "SA Query Timeout (ms)" +HKR, Ndi\Params\SaTimeout, Type, 0, "dword" +HKR, Ndi\Params\SaTimeout, Default, 0, "1000" +HKR, Ndi\Params\SaTimeout, Optional, 0, "0" +HKR, Ndi\Params\SaTimeout, Min, 0, "500" +HKR, Ndi\Params\SaTimeout, Step, 0, "250" + +HKR, Ndi\Params\SaRetries, ParamDesc, 0, "SA Query Retry Count" +HKR, Ndi\Params\SaRetries, Type, 0, "dword" +HKR, Ndi\Params\SaRetries, Default, 0, "10" +HKR, Ndi\Params\SaRetries, Optional, 0, "0" +HKR, Ndi\Params\SaRetries, Min, 0, "1" + +HKR, Ndi\Params\RecvRatio, ParamDesc, 0, "Receive Pool Ratio" +HKR, Ndi\Params\RecvRatio, Type, 0, "dword" +HKR, Ndi\Params\RecvRatio, Default, 0, "1" +HKR, Ndi\Params\RecvRatio, Optional, 0, "0" +HKR, Ndi\Params\RecvRatio, Min, 0, "1" +HKR, Ndi\Params\RecvRatio, Max, 0, "10" + +HKR, Ndi\Params\PayloadMtu, ParamDesc, 0, "Payload Mtu size" +HKR, Ndi\Params\PayloadMtu, Type, 0, "dword" +HKR, Ndi\Params\PayloadMtu, Default, 0, "2044" +HKR, Ndi\Params\PayloadMtu, Min, 0, "60" +HKR, Ndi\Params\PayloadMtu, Max, 0, "2044" + +[IpoibService] +DisplayName = %IpoibServiceDispName% +ServiceType = 1 ;%SERVICE_KERNEL_DRIVER% +StartType = 3 ;%SERVICE_DEMAND_START% +ErrorControl = 1 ;%SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\ipoib.sys +LoadOrderGroup = NDIS +AddReg = Ipoib.ParamsReg + +[Ipoib.ParamsReg] +HKR,"Parameters","DebugLevel",%REG_DWORD_NO_CLOBBER%,0x00000002 +HKR,"Parameters","DebugFlags",%REG_DWORD_NO_CLOBBER%,0x00000fff +HKR,"Parameters","bypass_check_bcast_rate",%REG_DWORD_NO_CLOBBER%,0x00000000 + +[IpoibEventLog] +AddReg = IpoibAddEventLogReg + +[IpoibAddEventLogReg] +HKR, , EventMessageFile, 0x00020000, "%%SystemRoot%%\System32\netevent.dll;%%SystemRoot%%\System32\drivers\ipoib.sys" +HKR, , TypesSupported, 0x00010001, 7 + + +[IpoibCopyFiles] +ipoib.sys,,,2 + +[WsdCopyFiles] +ibwsd.dll,,,0x00000002 + +[WOW64CopyFiles] +ibwsd.dll,ibwsd32.dll,,0x00000002 + +[SourceDisksNames.x86] +1 = %IcsDisk1%,,,"" + +[SourceDisksNames.amd64] +1 = %IcsDisk1%,,,"" + +[SourceDisksNames.ia64] +1 = %IcsDisk1%,,,"" + +[SourceDisksFiles.x86] +ipoib.sys = 1 +ibwsd.dll = 1 + +[SourceDisksFiles.amd64] +ipoib.sys = 1 +ibwsd.dll = 1 +ibwsd32.dll = 1 + +[SourceDisksFiles.ia64] +ipoib.sys = 1 +ibwsd.dll = 1 +ibwsd32.dll = 1 + +[DestinationDirs] +IpoibCopyFiles = %DIRID_DRIVERS% +WsdCopyFiles = %DIRID_SYSTEM% +WOW64CopyFiles = %DIRID_SYSTEM_X86% +DefaultDestDir = %DIRID_SYSTEM% + +[Strings] +OPENIB = "OpenIB Alliance" +IpoibDesc = "OpenIB IPoIB Adapter" +IpoibServiceDispName = "IPoIB" +IcsDisk1 = "OpenIB IPoIB Disk #1" +DIRID_SYSTEM = 11 +DIRID_DRIVERS = 12 +DIRID_SYSTEM_X86 = 16425 +REG_DWORD_NO_CLOBBER = 0x00010003 diff --git a/branches/Ndi/ulp/opensm/dirs b/branches/Ndi/ulp/opensm/dirs new file mode 100644 index 00000000..db5a8974 --- /dev/null +++ b/branches/Ndi/ulp/opensm/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/ulp/opensm/user/README.opensm-build b/branches/Ndi/ulp/opensm/user/README.opensm-build new file mode 100644 index 00000000..cccd07a1 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/README.opensm-build @@ -0,0 +1,24 @@ +##### Begin svn/gen1/trunk/src/userspace/osm/README.opensm-build + +# How to Build the OpenSM Subnet Manager +# ---------------------------------------- + +# This file is arranged as a shell script so you can paste or execute +# it. This is for building from the SVN source at openib.org. + +# 1. Complete steps outlined in README.kernel-build. +# 2. Complete steps outlined in README.user-build. + +# set TOP to wherever you've checked out the openib repository. +TOP=/usr/src/openib + +export TSHOME=$TOP/src/linux-kernel/infiniband/include/ +export MTHOME=$TOP/src/userspace/hw/mellanox-hca/mthome/ + +# Add util dir to path for makedepend +export PATH=$TOP/src/userspace/osm/util:$PATH + +cd $TOP/src/userspace/osm +make VENDOR=ts + +##### end \ No newline at end of file diff --git a/branches/Ndi/ulp/opensm/user/TODO b/branches/Ndi/ulp/opensm/user/TODO new file mode 100644 index 00000000..684ef454 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/TODO @@ -0,0 +1,16 @@ +Support new HOQ value for the ports feeding HCA ports + +Support Static Lid assignment with a flag for specifying a file with guid to lid. + +Support PKey comp in Path Record +Support SL comp in Path Record + +Support VL traversal in Path Record - compute the SL accordingly + Make it a runtime option, So we do not pay the price if no SL are used. + +Support a fast Path Record mode - if the fabric is totally uniform in rate and + MTU. No need to traverse the path at all... + +Improve the MinHop routing algorithm such that it only calc the min hops +for switches, then + diff --git a/branches/Ndi/ulp/opensm/user/config.h b/branches/Ndi/ulp/opensm/user/config.h new file mode 100644 index 00000000..5d7c42de --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/config.h @@ -0,0 +1,71 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Abstract: + * Windows-specific definitions + * + * Environment: + * Windows + * + * $Revision: $ + */ + +#ifndef _CONFIG_h_ +#define _CONFIG_h_ + +#include +#include +#include + +#define chmod(a,b) _chmod(a,b) +#define S_IRUSR _S_IREAD +#define S_IWUSR _S_IWRITE + +#define snprintf _snprintf +#define fileno _fileno + +#define stat _stat +#define fstat(a,b) fstat_alias((a),(b)) + +inline int +fstat_alias(int filedes, struct _stat *buf) +{ + return _fstat(filedes, buf); +} + +#endif /*_CONFIG_h_ */ + + + + diff --git a/branches/Ndi/ulp/opensm/user/dirs b/branches/Ndi/ulp/opensm/user/dirs new file mode 100644 index 00000000..d7822e5e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/dirs @@ -0,0 +1,7 @@ +DIRS=\ + libvendor \ + libopensm \ + opensm \ + osmtest \ + ibtrapgen + diff --git a/branches/Ndi/ulp/opensm/user/doc/OpenSM_PKey_Mgr.txt b/branches/Ndi/ulp/opensm/user/doc/OpenSM_PKey_Mgr.txt new file mode 100644 index 00000000..df4031cd --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/doc/OpenSM_PKey_Mgr.txt @@ -0,0 +1,79 @@ +OpenSM Partition Management +--------------------------- + +Roadmap: +Phase 1 - provide partition management at the EndPort (HCA, Router and Switch + Port 0) level with no routing affects. +Phase 2 - routing engine should take partitions into account. + +Phase 1 functionality: + +Supported Policy: + +1. EndPort partition groups are to be defined by listing the + PortGUIDs as full and limited members. + +2. Each partition group might be assigned an explicit P_Key (only the 15 + LSB bits are valid) or the SM should assign it randomly. + +3. A flag should control the generation of IPoIB broadcast group for + that partition. Extra optional MGIDs can be provided to be setup (on + top of the IPoIB broadcast group). + +4. A global flag "Disconnect Unconfigured EndPorts": If TRUE prevents + EndPorts that are not explicitly defined as part of any partition + (thus "unconfigured") to communicate with any other EndPort. Otherwise, it + will let these EndPorts send packets to all other EndPorts. + +Functionality: + +1. The policy should be updated: + - during SM bringup + - after kill -HUP + - through SNMP (once it is supported) + +2. Partition tables will be updated on full sweep (new port/trap etc). + As a first step, the policy feasibility should be + verified. Feasibility could be limited by the EndPorts supports for + number of partitions, etc. Unrealizable policy should be reported + and extra rules ignored after providing error messages. + +3. Each EndPort will be assigned P_Keys as follows: + + a. Default partition group limited membership as defined by rule #4 below. + (only the SM port will get 0xffff). + + b. P_Keys for all partition groups it is part of as defined in + the policy. + + c. P_Key update will preserve index for the existing P_Keys on the + port. If port has limited resources that will require reuse of, + on index a message will be provided and some of the settings will be + ommitted. P_Key indexes will not change under any circumstances. + +4. Each Switch Leaf Port (a switch port that is connected to an + EndPort) should be configured according to the same rules that + apply to the EndPort connected to that switch port. + This actually enables unauthorized port isolation (with future + usage of M_Key and ProtectBits). + +5. Policy entries matching a non EndPort will be flagged as + erroneous in the log file and ignored. + +6. At the end of the P_Key setting phase, a check for successful + setting should be made. + Errors should be clearly logged and cause a new sweep. + +7. Each partition that is marked to support IPoIB should define a + broadcast MGRP. If the partition does not support IPoIB, it should + define a dummy MGRP with parameters blocking IPoIB drivers from + registering to it. + +Phase 2 functionality: + +The partition policy should be considered during the routing such that +links are associated with particular partition or a set of +partitions. Policy should be enhanced to provide hints for how to do +that (correlating to QoS too). The exact algorithm is TBD. + + diff --git a/branches/Ndi/ulp/opensm/user/doc/OpenSM_RN_0_3_1.pdf b/branches/Ndi/ulp/opensm/user/doc/OpenSM_RN_0_3_1.pdf new file mode 100644 index 0000000000000000000000000000000000000000..4eaacf87aabeba3261a395dfc2be991e5a4355f5 GIT binary patch literal 83938 zcmce8bzD?i*YKbs0uqXJjnd69w4{`DcgN7(NQn|k3JOYhcQ;5&cS$z_(k1!Lpm?v> zd!OgN@9+D)=MR|Kv(JjXYOl4|IbUrX4T|rtI5%#IkI>f@v_#0jp=HJ)(%7Xra7sLko(^}cv>YA8CY@xP@!XOW|LLf6M7==O9($EUX z4AU#19Mr<(tgP&zU!h{W`YK}!afI0%R5sA5g6dp`x{MHZeFF|dRtN`^fq@~Ig#~2D z%w`DUU}R@zG34O+?=v)^){A0iuWM_6WrB<-WMo3p!r$!}YLGD0KV61`i!}tuAfRin zYi?x(B|%2l2x14N56adRebNs0=1{K!9fC>XYDmG@7NTnan*?3As}U((i>nz>Ie)*& z8e++>Z*O8{3H3ZM9|Rj_P*GI_S~{4U|LG6R8No10S{Xp8P_%>mME7S0^c@k5WOX2v zeyH?eSTwKw)Ax~|9Ek#T1pmBI5MpN! z;|i)Zf1St*W8ud{26-R|YQa$N$^=~is0U?&f@TmlF}H`-&8dpbJk zV=gcGOEPp{)K`9JT{w?=Z$wlPfro{MBp+vW@NH$<^rg{+wp;K;k)2iPPVv`#|Kxh5 zymN<@&+$%o8}AYF^?fw%{|M~=*w>H%mrBBSOXQ~SI3LC^F3yN8oCyao0%2mV>tj=y zm)Fuu)IoSOLth#4o07|jBX4Uiom5!fZe^C4vreIY5l_D0Pq^^l2PzhpdVrZhT zdu7AI=2lLBB8!P08YP+8SpJr-D}TleihcO?<)i#4pxr8 zWBdmK*uDqmPJgV!-z_+7%3tw;fr^%P4%XIIw)W6XlIRnlfR%-{xrwf&z6($uCV2~( zQvD%<@1(FXG5-fru3h;LQvQzhUtR8Z*Te)Ax3qGy1d7_(IsDE&bXWEtP+p7u2g)Cd z@r!GNP5CSLAfU7(#MaRS;`A2|S=cz({{y;ft@?rPn&E5Z|KX5-C_nRc6!@-m%zsfj z*yP{nq6Yz`AWlGGh_1bZ?eB{L27&)f-LKp;|Awx=x*{|32 z7e0USOFx_nG(P~!TN&CrLAe78LZB|j0Ai_c@^{Pni_m|?4!)M^5A1)(`QN!TIp}_d zkseGB^A3OZV{ELyDD)4N`h}Ta{Sh<(T$3R4a}xpGKS6ytbYl#L1``%$4j>D3^9yt7 zN>B%X9SEeLpZ~@ru73Y1C>h|jlez{8g$jen21WY=B5b4n-OKz8AFveZCt<%(|BJhk zfhJ(zZ~Z~<@4&A&B+%3g>P>&l{wLtSI1m}&HK+d!{0IM6z^?`SPrzZn|7l%7Kb+}5 zuggzzVWeLR>7StgV(`#)xu)iyX}_KUg$_#{t|age(64^~6YWgbHuKNWujz$BzZTLz zL5KbRXXw|f`j60m(hGxrP0K$)|HXJ^fIrp!pP^sV3xj^GoBss;7vqF#`!zNH4E=fr z6#7qF`X}hW*s6@JmA*X09%`8k&;kW?^9|daFo;?}_j6CKY)KfpTf5qp3o}6}|GtV0 z!eHpY^SA?q{g`Dk0u7dRyND+DH&n(}i z3j6ubMtQY2{2@SwAG>i@Mz+7-uETEpV|#9|&}k_TLZ3RNsj1P@Xajp#$x_`wMBm?} z#!`Hj#_s~)@9SMTws}iU)zN6jZPXKJ)YH5wVM~X#wP{TN-yZZJl(~(R`?1BWL%*@D zZAt9PA3vyV!|SF%g8S7k94~oUx7uB1 zv4rr@v_xsp+Bm+~bdm3)Bz5u!+&c5N!3zK{CyV)GkEwb#B&xx?yjfO*2s@7V(;SHM zOWvix4^@5kc|iZnT{vZcg8E~ClAl{r#islxPTdD138B7Px*y=^nppTv@LwcJGZh>< z#Aq;84v8128m+?7u@$v;xk$sF!daq!*6GTo(#X2e7pKlRs z=iozWW*j(xpuuRWz1ci}9=zDi?`HC-P?B`StS5BIE%-h5E&50Jq~KdU!CA)J)(Xb{ zdI{lQ9$SX_yvPdcYyTpwQ*|(1DL470N`-v*V-hvrQ+(pDoqeXV4U=enGS3y_vW4%+ zqim9?lSMsCn_ajxR_b8WeWB-^_3pH(EUXCiVGGTG0EG|#D1AMU0g}yyrX2hSsl&9GePLyc-Xz0SoJ5WCRnATm;H3 zvaDNgsW$~sPI42O(@PUeBOrsa zi6y_KoypGuXsQFvBd+d)X28&-obHL0xq&D&xi``Oi)H*0QU3qo`&(SU1epJSxa6!X zbS?kF)-TcKJE}k21#~U>frh%~&{PZhQ4+dOVPt}SQh*jGM4@{mdlP$?U%_2@oooN` z9on^L`a>*NehwxJ18A-ff<^#_rw|h(V`zrR$O2?|Vq$M61F?nW8CI6CycSmI5wJ41 zvX!^i)rVF+1R;(l`VbLYT^HcBivqJlD-%CU3W5R(KMD$e_E48&mzw}oerSaj0x)DkjQZ6il4^sHpdG2(YlQv9Jj6F|nZ+CO$s!A<07^J`D{$ zJq-;HH#avA-_PsvBLMjhzyuHh2lohYIR&@{K)P%OBmn?$@Nicb3={Oa34ptC3;s63 z9YiE#bO78fxSKa_-GE29eFy<`(Nr#=V=j9^MDR^ULaDJhJg(y8R$DvG55ADW+V5 z9+=toMZW+7S(yCx+>X8-3szFmOLD=t3I;-r$@Z++1B&yI-D2;iL5GDsVTyMtJ|*GJ zLyP;Vr7got2TF#Hz7c6s^O}X-Z|27sMJyoFYxOe4kCWzS9O_Cp&3y6)oLHLCb5{xMv$35O^vI z@Ir~BIFHGb&KRRxee%(ucFXnkvL-6V?nA|s#P^59MI-a@xL@HK?JahhMj}O8{3;JwOrv^CjS0xn?i=S^D@Jk>>@c2<-($JB8);n;%5!@(!!shkiOh zj#c-IrZw=Hv?oeBn*^=%btIgq0zFPW*rvFV(8ArQdRFPfQ89O*!R@Wts| zZsMeV2xb475=z8~xJK`kodkD1NyhF&iRf}UtBoe+IZ zEo9I%>xzSNf!gB&)eVT}K~=filGaIUYjG+|5cMmzzA&kS>TD6LpTkdFV!vq%QKe%b=R`^#dxyOl220E4tGBnFn;Z)LbkjpNrY_$Z``+ox{r7NZ5v5))JLi24B9awr2~+r^MUUSMC|{1O za^}((Co^?4Pxy~4D(_@Yy!ux5B!|exvUSZqG96f!T?7AIh=E7iWXwZ6Cuw9tU27(3 z?ja^=nCK%N(LVTYwXM(R8sHd-M-#M!F`Fj&TSxI5`In=sxE%2+PIg+DmPMWsmw>*} zV+2fw+#8(M#F%K=4$o$kcFYK(f@faYy*iVrvV6>RVyGUX;#+1|+naX$qlOM?~-w39i*&_Dz1l zIT#Om-(Te3ol)n9daC9gOgv`yRGAZ1v$U!w{+$sCVM{P?4d0ucC4Ki9TulHIRM16L zhtAE-w8jiYpUhM^Y-~P{psiQ5P5bx+K68?OxX5#EQP(?Nc^2=z$`x*v?Xa12)|A;* z+2VJBl;5v>)If9D|AnXY)koqR@oRo(K0fCHmjHz&q_(lTBF;RTp4@KY(fs|KOF+^J zT3CpZ*@>ZprrsM#i-^S%4+p7{h8=S&;+ylORq=|3*peKILS^cuAA%6XX^X$rzqeb= z+?Tlb`W2ymxw`qdMf!Wo$CG>K`uUUD?pg?>iw@W*wi^yX7ug}jmw=mQioAI*m`+3v za?b+Q$2_|hE&=k#IcJH73^|hJ{(D)dyrbFsR+oT;Sr75cfiG24^sPmC`O4+}{=1(p z0YXd@yz7>ub2MfcKy7hzJZX}T!A}ap#28%7^yBtWj7j5xm0g-s(plrtI_I}B0?e%R zUG7~1yqMb#UJIYJxUPOl4fo`BDl0roM{o5UuvTNo$!%#%$BlpD%UXCIicGU^wND(X zeL48WBmENa$ys}d_drT~U0hSZ=H%_I>U&>H+FstRa2wMo%}cAPF=9kl+eRkrjTT$1 zQj~7*u_PNo(A!^8>*7q;Oz<+fkkRFdmiErwtai=#g1rH52lFLgI0a?~i!Ht=-xso3WxsHc8c26b*01@fiNl1EQs_}& zIMxbA#v**{8r3l|;aV5?jTL!nk!v_E-bag^Tcy;r!`qUpvQ3%mTg6&YRlxY$(H&#E z=jm8h1z~`5bYF=}KqK$AnrGQFN#Y(uMIN|`kaJv>dKJz%Y?-!w8`=ZHyy~aKF5+t! z?(V#43wjy@>!v2CbU{TLQr>6Em&0p&cA32`?&L{u7?}vcB3;zy#MGxJ#N#yOgY7B2 z=rj9eRmFw*!7B!s)E>D-#r|jN=QN-@WAoz?=h%$vO;HYe!y>uy+F7&v%aITK3{-gE zZ9#1tJ3NaiQgk@QtC`+)9jrN&-%w6qfs?xYaSveDW@dE?mv=_yQxtD+dqve2umnfU5k^9V5 zXB;+WF76cHM$p*tqnEKSM>bC^^_g}e%`^ys;(oUU{m|A?6oefYPccVF01@0vwIUvpSxPwL=Zxx8i+frc95mldPk(<4J|(GzZGJ5oR!uHXY3Nz z8i(2z%NyqQ9=ctym>$oe ze!BK%;=U=TBbmLdihjbg-m{e|7J8=^i((wOEXvH}qBmwD1_>12-}e(@H!PAH1V;wP z`G=UdE2VR16hmYaw0-0~rbhvq54|?HwkSSqRLpz}Kkz1A{%~hDK>d8Zf8tdA!h*As zxKxN(upu&Y2)6q$%27?gM^@9o{r+-r^Q1?! zf|~}+^aiEkfTtC~lq^XwX;$kj=B{}FkY_Esu)n!7v6bgQ*XUqlqRLaGt2^PQB{yUP z_5L^!iI5dmn`VGyUeE|A&)$X}@J>-IX06E1)>=rZ^f=LHgxg!`>%4pLW}wXniN+6^ zi9WUdi*+@i(Z&4})%`DqQtBi}8Twi?pXx7%D8zal+?9EA6*GIAyFR74Q5LY)H)P6( z4x6u@3r=rT+b})T&v&o1AvQkS5=QH@c6?R7to`C_Bge_SD(|(w%GM33@o!?TTWgv; zb@>9%e1)d{q|#2b^_RL_dn;Z%__{_Yom!s55?OA5rK`WFwEoHlQcf6bXAeMeR?5id zj&RTlIThQbY0jn&tk!18YD9FZm)50QXwW=~jSn9#+~rJ@oQO4>khf_{Ek%lg1I_O` zQLN7#C!bVb0`NmG0dXU;YutJl_3jmI)C~b~lk)QV8!vGH6Kw4#&pJXmRBI{Lu@ydb zl+L>6iRvho0)Q=1dfN8Y#YB9h5ICIGM{Yw%!?i zl5=2LIAkNOwVz*PYKcq<=uKaLK3e(wh$%sPbYjqU&1%Azo<~x8diSmDtu(i7z#(N_3rJn@zsN=zrhJX-D?Bb5`YS)cJ$f>O)x*lr@TdDlud^qpR{>qhp_;l88Tl;e4%g#mmoV>6keS6)OHo@G;DFI^I3!}mO ziEoO2?y3zg)kCh+6Fays4#NQB+NgN$WLou^$3f}z)l9n#juv-Y%Q^`LW?P9ZXG}|t zhHLP1I^6SICn5v8#8rVP;pf>Dk4YGCd-QFi)uA<+!Kl`enmds}4~jVzaU;i6%ecFX;d&h`%=sBC2P@^u*Xl59 zC03#>?mFbO7o)R3qpvGODR>q6EyW{8|5>bnq2%zw0qYoj3o6kE_rki77d#RE(kG?9 zmjF47;{6JebRynps}-C5_m`s^BEwx6`ZN0R`sPiRH`ht|Gi&#WMU~Rmf(e}~oevFW zT=%FgL3M364&{22T3z*pXt%PQRU^vUv>hZJ6#K(9E#b;Xm2$95zG3qoWW$XZ8OnwP zMRXRew?Q6-R^tN;8&TfO7^QKIiaQP0(1ymZSFRksrMCEp&Wa}p>Kcq37S7Lp5~H4# z%}Z9P%8G2&gKCwvJFAoiWfB7a+@z1MVkQwg%8fh3|*tjIin~4BU^y`d@3t ze^=FoG5SBL<}wJg0Dl}MU6K7qF$UJ9@t(^17BJ9%lx}mB z#%(b~9=ONnueNQH4(``2q6h$GKr+U1nGd!TK0Uz-B@<%;xyfBHtvDV9LLN)WiTXDV zc1~?^lsholqs6hOjc?Z>Go2DdHrAKjm0cqM2lRY04%BZFN*~K|-L_8~UweK~kie@< z$fkGP=gy{c^x!hSsTCoOR3Yz!(#E@5m-I2!^zkSMS@C>~Kw;dMy4bN!Z@4(eB_6SM zCOln?=tNix_0On=}5?c%RsIy7Ru|uX)hT(kkS> zbY|m9KY1ui`te05&$=2vnWo+;`yu{WU*7F?ug$y^nh%FJByFPUm|tuxLDX z{MCxs7-#VFZNtC_<68P!5p!ctlgIuqgJ~jj+H(}TZxDw(UX<^N-HXs99u!MWdg|l- zDxyQgHAhXUDuRdwl_{}V0s8?M!O1)q2QMmpcEQ~+;>I@_=?vnPmPN;MD&e5J4_h7! zJ9QiPI0Z(T+LdY{`8kfpN$$5(3ZcSlTd1DgBdJru^HF0x`H1BeWPXl%$Aym2(_6^P zKk+%Q$9TT7gNw&&{FQrkvK_-su-Y}?oi}Mv9PU~C} zQXI07Pe!XFqxPWqwWQoa+%>#0 zGt_$~bH{{I2x)K+JE?8sbp{-~WH>n|LtD?uXVvYe zu97u>o5#2l+{F-aqla4({&N^OyTDo2TO?82yYRu>#e}DSp3XhHR}XHIV&+~f!@J9zEtf-<5FmufOCk5bhcK}^CLGo09sm_!Ei=O!lqvE7mGe~io&}zW1Xe4* zXyzJQGTY*zTtZ`I7uQCaQOo-|*RV$R1>V;mWPDv}FXdwHY7>GD$?xX_>SHimjCpt; zWyd|&)+x!Cv8YCD2+~8#&i`1fjA=DZ_)Ru5D43Wm$fU`rsOARWvrbi0A^8HI)ac{k z;qX^{H$fyyrct~0fUKxjaEus|uTWnF_Pxe6${>r6?nvqw0Q1WjW2U(v%l9mY-s!Jg zv*RNyPIV+vL?BS^n`|-b@Wx!;nH^_O8ZOtWU3zD{EJdbk5>tbzpweiWhah{P*v!dU zFr;K_&r8W22f>$3UkkumTtnncU!6*^-$G{9mpXo9HF2!GGh!WEn-sm zSHe@U-|z9^l#O>zKLpj>KAm1HIz(WW9fC z?A~@|uZ0ncMJ%zW!UYH*BkN85do+Zd3;*R1;lD>i(Dk|B&u|E9D7`v${TK1@d;I%; zjQ9Nj@B2|4w4E3R1J-v7{r5+_{66!46E9(yelqu?wcx5h=STO-zlwe*6(d+{}wB(=Nkm=jb?^Q_`fdjm7)E_`nQMX zFc|+Q+p8-R|3S<3{tCnW zx~F&Qb)V`r{T&@j{{fyQ;;`<^<$cJ<0JrR1Oi=k|+~#1qs}_S{y!-HjGL^|Ly@p z>LT4MpU36zT;i)7i_0qKQNOAPjATBDY19vr%f18T&`D+v=U5oB{wukOX5syYlm8^yE> ze(<0+vZ0%6hPeT|&EzDD<6YX;`x1B_#P)ciB?fU<59TJ7)-0A@0vz=gde-y-mA)q| zEpI%;yYO}#mhm#+Xg5_B84qcP6hUQQeEMX49H)>Ir3|JmM<6!~5RE+$rW!^VYmnjj z)?P->cIy@P4AddeTIi$RTEd#m!5xgvLX}Fj&UI z9ero3151Tsm4uMe|CJYH&$egyd*yvS8KA%fwoS6*zkQ&d-wr&Lj(^<{hYa z^rH3(lJy7oF}A(%LzheQKQG_%i--xdUqE?oAGBSvt$!a;aS*HeV8eI&T+VbNbVxpN zxwa!ZLV;-0@=_))*93oTMevc|Q;_|0{jhOD*7bA=jAsUN?-EAS?1V5FITv5~O7IqI zCpm>X6?XF5W!dn1FL^0{(CuD^^V*rtX#d^1(gPsA0?+~qu8e-u>DNjK zu^1k@cpK_xrz)`zR(MaiEV9JBJH3+ZnL~~Xi^$?v51bTtdOn08$FsTuqlPFR77$r(7a4(EZ^36is?o*mTY{FG<_6giY+Q&J*^illriP|aPEdfQFV@=158 zaQfV&D`IAU&9|vS{?bjVNxFABrsBUyWu=ShsSAV1$T1`b<)yNV% zHvgnM*dpa_Kdl(@A}^wyJp-nNNa$xPg~7I(B1-j+Ofy+wFKeYLEm~}2gR+e=4{vlsop+e;;MM;1eRIq7m(b^(tnqXIfy)C zvPn6u^A>+Smh>&c zh2@NgCs`67xfP|NKTNGk0zx>8-iY>!!rczmb_J0kx~IBH_{%R#r4vLS zO}}Q&9n&&p>ItrOlSs8BId#a1S?@wUqk61tRw#{2zl71`a*1e&$`e=7NbOs?YWs~J zAHpM3i!y!o@z%s05R=@!?bb5{%qP#D_R-A?E>$MavvSc3@m*5ebw%%}q4I_yCsp=F z9b(LXo*l%UujoEQF7;t}y!>vP;3knkK-Jf(&WLf@bma9_gmU(qjh$LOBcyaEV3VBM zM_=s>G)v$$#*a5|@JudqnCwaEQMwa3xWsO&>iNz~epYL~8|GDh7(&w;TBi4~Y1T5# zWbVd!9n>wZ<6)HVVtIW7jRvs>Nr_b^ z+ti`0irUcA0$nQN;&*7N2o?5<{)RD9>(*o09Pue(6qAnGB9@&eD)VnexwrxZf}FH` zJzZ@ld_j6t@!Gu8{@ihQ!l?)M)Ra=5m8*i2y|zl<9ZWcAt$L6grxzY6+!Up7m<-(3 zXR3JW_5NabX>mcTxD(Mjxtis%cNJT*K?~^%u_vCbi_#qWJWDyvbQP4@nB~|8mS(kG zM%wU+!~|RBql}k}TW254W$d1qGQ|!{X=9cpWx#ug!9OE%5136Tb-rJ;s*{=PW1N$M ziQuj+$FQw(Kf{qS?_Cs{r=ag!I&W5DxilsBQIE%B6kcri!*5FMe#m$qfz%Z%N)sr+ z_>A-|LH$M28+;c>0rdAeg|t&{6kD7fzOhIKs=(@!UiBo|ThAp@?>tFp8s_xW>}>SSHHtw; zJpYn#eCr`qDF-Dz0Zn;$IdQGnYCm2ELG{>9bQ8-wkKT&Ox__+XR*G!3@pfx$nl6n+ z8Tg5gRwUVny3vqYOLiin%)*^mcJgJZDO{5*Yj0|y2L`*1nwbo={l~#!5+|ARLQTgp zEKlVq_Jl+gb_uojER?D~XVwW|iL-9(GJUY@a{8PdtcX?fu3IfSxAKJDW${QtlaW5@ z+v%y3wpilp@w2&v4#x$){7~}{L1RyYib2!~Y67&Aj!6rTh01oTf%pz(y$~$qXu?U+ z+K_?w#}XnaV(h9V!`PeKN@h|b!3jat-Tb#QZ-0IHvQLAAXmNB^3z9<$AEVlVc%UPP z#yYuumPNPtxKoFWWBjF;&})~s8%{1;U-H~uH~Ci9vMjDhiG4_E)Zog!O=u9q-FXx_ z>AI=1=_HHaNqzeMlM1yWkY2)qtjw;lur6=pAz9(oNF~iL?sY*M#-1ltFPAfM7zM9^n zaeLB}IO6flDa7XyJmM`%%p(~xOw;HDxvUGf>FDv&!4w3kGB%7YXUApt)weHB@v#zg zA2A!1K^|qHM=|%t+zLx{OW5WuvHRdPft-BX(6xVPR4rjCVGdA|aEFh-o&LuDx zfFtFix=^H{^|`VHg7dahEr&H~EFZV18cTr$x+Y@paT}A1B1zHod?tUhe9!>M+|+#m zF@7&wL;(3UXh+}J*oxXUKSI`^o~&3SEI&*_R$)qbF7w61`&3CXm9qOfcw1xs1jHqI zUD0p2)zB4t&V3#G3q|TXk?Tb*(HlTPPk7r?BV;lV-N$vfkgMGhMXTLao)O|XtB}z* zlK2+96r`V1QPKo&()C)+@jJ-N|`2HLkm$88ub*ep;Qp@9mD7; z{-h#YYkaURjj!n>SIg<>A;Vlu{qR*7j{@u4ws=~4_ES~$%SKV7W^@X|zOKblh(i*W z(W|F(mD05N*!4I8MAg_RbV5^;lZAAWb4r%8ftAxk*%rl)-h7#^#vzNG3vN!H-gjLJ zaPL_(!0!tge&R}JNuld{*w5z?JV<|FGY#nuk~ep`weS7}QcIb*D1OgH|3w|=dnpFiUHIeK zIMDZE3v}ckAC3EYt^s?H^_N1;_hJq#A^o2|E_Vg$C;5M_B(SkE{_CO%m>v2MAM~+U zW=7~U8PLb5|9g73BJskS$k;Ra*xD`R$y}GUZ z5AuwJI<12tU)n;;a5;hv5Y=wq0iN=Siu7I@q}H#Cd+I%;TruUh#SWzYDuytPMvgU#?mYh^w2p2|u0mt45+ zF8TNKtEcgSgz}z}s7}w8M$T+^7uJq%*=o&Riewy{u?5lF$3K4TkYD9gFEP~?{*}H8 z)83KF>0vOKxn8ZiE)e2r-Hx2PNbhs^U@95^>ta7<4dY%1Tz6_8splslsbwZ9FQ<`g zE9tmG(?u(4U<;UJ8mQ!R?^H+1!-u!#q^UQVrrqJI6FXNyUxym;>O*n!MoTE z%d;#&GeS-GemCKu|h`L_W=+TpXiA$KIKq{tZ1z z{pEVW@?+-I9-l&$6?N;k*%n>Xk0>qptS@?Gdit#N-aSeav6ADBY7qHUq^NX2u$g*a zNJ+Dbga3nb&qR(#R8G8N2hH1i+ku{ox7^1)q{>Ipq8_6zmpBfH69?+Ey1&5IXq?jZ z)f+Wj>#eNsI~QhP0e5BLG3Ack7c4Kc^nHq@itC>AOmBiJ%fpem^5{)qpJSJ)OzXwP za#~)VlM^F{5NZMBGL)^-`Q;YI{gd5&R2qcr*!J4>Da9#b_DrBw_S{<07c68w4!_FC z9>u}hx^bN7*@WE0l+w8ikmBG7ejZqIlNJNNb*jOY@L$Rk+}&cFV(3wX%Nq5 zKO&GX^^W^k(vZ#IE}l=6Rb}7*l7@?S#3qcA@MNieg z);%7oRUT=hkg#7Xq_fI>Q zbo0I()iG7nT$sxViuEN+gLP~SNV$3vIc(|IK4H97rIy+`1|U?vAQmipfZ=Z|xqK`4 zwxs_pPlTF9t^AoujrALIs~Mk{^+}xVGAYszI#sJhPoGA7GLs9hAI?YqW=5Lvk^#3U znlQKC$U&ANp6zXJQ2E%U%|r=$L_mU4`-5~d-PfOT=A*)^;G{mtJaUX4#trxsR`(h= zn^Nh0K@>x~UzbY_w*(f7_wg9KWc*_nElx8g zUoNRpu>js7&OB$U`nGiLIS;Zfv^h|xL)*Y3hS)3po@Oarg?iq% z@ZHYf5KtdltD&KPx;yvYmtBb29sqwMTwFptMO(*^E2Rp@#)}M@ELq~d#OACx``AQP zZ}<|B`rDG*F7MHSF$&%Nyc#Bn^1w&Qr}06!VQ8w>FEmu*VvPu?!_9IgiEQ56sc%Wo z?|BjVS-c8vPyMU~{2X0+`!RB}YZ)$Py8Gozs^(jyQEe}^@YI_XA}Wh(J&i|$B|_`i z`{tz*Z(0bph6v3`ft7<%Y!fd&-o#no$y>E#jQF&g=y9%d@X{w2RXlxdgy!4+6d>jk|`+Vsea5h4VM9tdrz*(H{ECzpBlDq#6F9 znUunFxXk)BbN0bY5Ovs#X4O0_z?nZhfxS=y zO$$x&#^i?Tu?xmF#Tj)=X z=ah)SDZY*tqRSAa?MpkFryw*-G(N4_XKz^Fus&>VB7aFYkn;rrj)dl8F4@_OcEm|v zUDAidc#up4lJEd#6mrfUQYi`oml*m3#pEuQUs1&JjA*6U85_q zC@K@-=j7(}6|q`fae{H?3!gnvub%p0h65Y* z6u$z0WQ)=UKWSQIs)eu`5;~+2?MwE8^osnPuDyoVs4tG@GzO0l9#ynjkfk-eQoYIPxPlD z&z|9Pmr$pmdRLp(>eBMX4QZx6l#qn&^flsIxN~T#HH`CNRryaMx8Zx5 z2b9$)PV~nFqumoOHn0vT#Th0bCZwj!{B+%Vd!-J_pe>3oJZW`t zleQarWN~DK6mLWYPHxHLJo^oC5=N&ko?vq9R0NXe@zE_R*3Q-j zx4r;1JrduHhP^EotOpNZlSCYh?z(19ddMYtDiM9|(0P$EeLq<&S<1=xTm3ES8fM-d z`*Ggak;WEwg_zi`L4w#gMz0<+w3~gCP||pe`6wZqP2e0ezV~wu9I@sU;wcTun|A@d zSp{Ed!rA0wkx7eE(Sf|h=Z|V1gfDYbQ`xHImWCqcX1s$BFBwRpdW!NkOixZuu;WDt zSFZY#`zq9wKDWh+hfLHTihov3tzx5XL6h8<#q2hSc=UC>HZ<3wOO+sIDK^I~=leqr6hhuq#y)Pc8Ufm*t@G#`htd|q2MXVp$<4Wc)vqFtbcz?n)0F^2f@jI`9ON?75^!}7|wX(#Kbu5ZVC z4$+w)tWl7hrxvV8O!(of_;3^i#fHeu)eA+MGgP$q5!ZL;c>|R@Ef*N?)~wLDNuVK` z*U}OO0R7<0_?{nP7o~5Mt!Bp0;EI(Pv(X(6IdYY*K4c+XUodbTDq9uRg-gYFM8-yq(RQXCNf$z5Vhe%nE#B=Gb6$#mwn6walkj?w zMy*1wLY27UfRJ*hRo97>V<&JT`Rt~~dn(u5#54JkW$ulnX$YSGjDNTrl6s+Oi88AS z$w)&`5e^(Cw%vfR9X)2ua7xG*CHi_-CEKFV5ThjpNu&TOhb(V8)sxnQ}`Tq1b2%FuzPJt(A z(%T>Ekt!Y~w^&a+)BxH%RUu0g^naA6V~*24cZU&Ez=Y4}mR2CaU7>qTqMsOhHX0}g zl3fg++il!yq7H#m$H_n4_Lw?Xy<-e-60?Plb4joxWL$;2K-1eU;+K~CDc4Sz9dOZa zjW@snBJoPZ34Y|;swGl7vSM-GLW3dF2vh^!zaU0f@lT1*9q^a3Y&LBZpTc!$rz4O)eiQ<+X^DO_%zR z8wagFpuAF{**f7_;mp)?4|Gy(4C(@Ol|8aC3F=0q%Lh6DZNo=6f0GvfaxDID(_*IU zN63Gs#Z1?aQU9B?_Df8f{}l9=*+|h00<9um*gxh<;BxUrY#nx0Oy2J|>2h9A>( z&U)K4EM$A{RDC7J8=qz&=F!YNw48bRF!i;C=S|bgNsZ!8I&Df4T72O=%<6gMuZ7hg zM>!M6u=8TJ&1aFR2qvs4I9X|r2$+I1IGuIJh&5h+BpC&Q(*%n<%2aMCB7jQH}_t(Gv8uMMaOK)M6Gi9%-&m-BiK@Zet_7 zqAZ)-DIwHSN*73us0oKSbUUfxEG8OCIgA&_$=r+t(t@Zs`mDxpZf#dD1^}L_%F}b> zQ-xw(5Encz7SGG@{z!Ni`7}LFN_Cj-D`Gr-tsG5I7o4ORIj4z8o{n=Zs1Y^M3@NgdcF<$_#rsQ)SigNO zfD+r`cj%Kr{^~1#LRodZq5-dx(5#6&*!Af%N)7#lx<%WVSoTTm(EG=EX~mQu9INMq zF3JZhvUH~v>B`p$@$lw{c!IgH!PVo1=}894B{}J!qiS%VZBFh7@^xHS^Gw2{$m|QWZQoABHpKF*7b?55+Wrb`(1K!q_gdoVc5+Me2sPQ;JaQ5%h6aIVamQ%kceB+^Yj zy{mOZ9T0zD(G2l+V~}y1)v3mhQhz4W#Yl9ga*tanLhOFj!i~dLWGnX!Y+N`F%Wn$t zAxm-qYI~}?RcFQk4gcj-aU>zu3Zg9Oj>a2@`LWO1uoE0s?45hDG4&3YVi=njg!uy0 zX&F0*{Hph>(_NmrkUPgSLd;Ww`v}?Vd6T-lIiuZb$R!o<6=$bY`VkQ^wKW)TS@jG1 z;dmG4@!-8=lm+r-6v7u$H z;gMqvZ%JI*)_Sa)`Hn=qAYT<}=~I{{?;LyU?hHVtwY}%}Sbg;(mY-4QU&rd}SI_(r ztFK>m^Dkock2m-IjMYD0#`3?3)mNZ?lKf#BvIx@6$$L!EGilO6;sti!k zoW+moh#(HM$WlQy)KZqLWRV;W7x2`#PEXW?_J(z`wJK#2yl%>UGx>oRtG?%=?s?4U z@ez%}ncPq~a*KfO`h3#yzKEtqk9i@LVc&Ra*_}eugl`8qJDS6UY-NId4i=$LkPoI1 zg{Cy@%*-CD;faiw2tUxc@wLWFFFCUlGncK1$eXe{@!oKZ9}!`88gDVa+}CW@n1ue& z>`*)0Q6O&k7v7w^m_?u~-eZh?YMtr*859y_CrbMwslnXRrcu(72z zwNHT-D1@mAMF&r;={=Ba{lE@27hNMlThv>6?={hki^;c51Ge)}zWK-C>$zB<@0M-U zzhM*M;dtXqI7)lS*caJuU?YCYok2}g4^g+=3KZ1a1jH(_b82U*l8oqEJPB}R{6a5QmW$OIUEhG##>|NEALAL%kL>fi zyeT=O*N-yr?XO5*)czeyK3T&i*TumB%)tom5LD>pS|O50W~X2v0^ZOz={#=;u`$jI zu(T&Meg&TKkGxd3jZO}<7$~{{Gx^f|p`MlU-M1%=?|A z)=`Z*{Sj1|ba0-x0EeAB#!a!x2vuxP&h-YNSiPE-!Nu>3e6FS5Wpoy>mQ!Z8+UsWI zih-AYY@j{gIy!{b4`lSQGEkDjnMa_x?z3I5#&`oeJfx^#u|XhR3KQ?ts3*kF;w*z* z$v3!z>*hU(y5uQTfk@yCd^j>hJw(}1ov!#s;#3`yVauKlz=8pM8LIV!1n<49P?i76 zS1u$#T~Zcc&;qZAke26d+O2n40y`?r>AEB7uo6ntE`dvovZ2sdh#b1rN3W3$+l$f> z+9VpjKycyNKp27)gi1glq7hB>|89BTUaUOk$Cpg46#8sTgd;HQLM+IkfH&K8xJXWw1e}Y(66i*wF z5fbtO=bczOKl{0Kk~kAM#W7LVMxM&`CtlHv@jw9#(hfcqqJczDo{Yh4OdIef-jmB% z&_yx}i25gT5IsRIpyQ1cpJ!x<40h@klPlfWKI686q6faX%!IRE>fG}7IzU0tax4=W zQdd5*Lmk|v=!YX$pq8V2Ht#-)+kP}*Pa<|aOpK@E8-`6d^(W31gzYEq@S0;XR89p; zvdq>4prwPGsaB{Juk^`vO-gDLSDY}_F#bDtK4VtF0dj{#nz6}`DZ_C*1DdN)L4dd+ zS&Rm^huzLs0ES%>zhr;inm)$0!-S)UDOhsOMKz*MP>O0JZw zuz!MdQvJK>{8m~@|A>&#%Vot6Rpu75BZH`KD`Qh)eA2>9j*Ob(n{S*A zYFNEI5M~BB%_lx3!cr9cPIvYmQ#;Y|@@3zz8|$%6-_(Yr4Q_pot8dYqLHrn0$ny5tP4_f5I9?a|H;Jxm;t?h+OPOaX5l!NyNoxXZOP}3DMJ30)Oa>0<5YCm|PkybJ zo~<;V$OuAb-SI8rQDHXmPS(iO=VXPu!o45q`0gvp6$szz%j8zdh{bV4CrM<9KF#b^BRRQ$cZA@u8wT_@1)ZpeHJ91DC&WjErhNMgPs zNTM~V1iag5b(ja%T$Ll=wpe#7NBYZ5iz=7D`lm_VqU2+Agi+l-CXQ^z`(YK9oTit( zPEub2M^q&mUe8AI^hiB~lAo~{Xs+-0xu@P%+6Y~g5wuUrSXFm2J*NT;)E3tWM+dHV zzbwB~fGN=^5fve<=K>)%MRetoxSIH?)uUT*CWj#k3@=0Xr;hZZAyMC5l#=;!O))wC z4cd0;z$j(GlT*RzpvO2MZt@VF2J4XI>X-_WR7Yt!E4g>a$$QFkr7qLrf+q*VH&Es% zZ$OMfe@;XdobHz#Ri+%`YsR;=jiog*vCT!X+bjH*Xg0)61g+in_Kk|-%eXgS_MQ!7 zKKW>>12I?5ta}nPG3crEUnu=LT^^$lmBDVz5f?FbSn%eXj^aYFY)UeR*e7VAySfzfxG`r!Esvl)GE(pG?FHnRl$qh~F9HVf%A zvB%-eJab^p=%*q_{FiA+`56g}AHiG1c($*-Z173E4~ofxV>C(OPa;C8prd)mHt-%% zJisBzd(U*NAX=)F2o9N0`Ch$3Cd97blHyV;h+P%wP7&RGNY)&Me(>2$1 z*cg~r37eCAW(hP#1|al%G-mM5b+jHN8kcA$tNS0=cTp{;om5iwi3ap3nKWoo4-a}} zW_T-ITnuv=)bll^GY=9aLpasC;d3LbuJ*#OkzjQ#Eu#CGsmAU~KG2%wN6#I%Zth|? z=d_*2d|m=7ZMSDuVph8Aj@+u=rLPS}oPCY-TMJ(suq-`WC!Y4tuQvml=Zib%QR;g4 z43!@@Dd5GAI`m5)+h?m;`{xw|X@<-8T+T3~Z#uO@9UzOMO4i6*v`NX0i`&oe7Kr(( zBflF^UFCt+&u_%SuNK(FW(Yc)9!L>**J&R)o!srq@qAEf`|-P{y8N9FPd~)xQdNS< zmAW-tq*)=C!hNf}z2M&DYHD=~NjV;lK~V6c*lWGoI9H1`zoLX!0goyr)UAEk8W4s( zP$Kl{kIM3VL|S+FM903jZ#aiIOL#QbpKoAk&eur=MP*;6L2G%xJ;p8nm=*e{br%(TMiSNV0*)CfCue=uI2f4YA#8xq0%m?Zx$0V+{wa75>2MePez>h<%zE_mj=*<{tK&UYo3&)wnD3 zC)!n;KAlB?-d9zsUEzvLum@FNvvldY>Z zh+WO9@6fKj^{1hpa>&nD~u0GNn4mS1qTanKz~N@CVV?vcD;>& zTP&&sz9a$q^0QYRZ&_g9-M_X{6R~EK9x!i&5dIw;nadd}a@8 za9cDh^;O06**V4PA}kOvSVQheX4-8l!tn`;v@&TF`klNNHTyc^&lkvsyijj*XJ!;= z?Kg^B-d0v%=dOBv!B1WN)t=+3RE&RLp&r%5`M5}KE3T+yv1ITbe2E)NQ{)%4ak|ZK zjX~o>;#SAaJ$PFT$MS~IpZ)D!B3Xk~`ZOG4#JK_M*Wz-yXdP-$QU!b?MHFwtFUv;-}hc-vN~`7z?F0T=**%~qzvR*mW8xS zQ?3fn$D6vQM&XSUP$0-y#$frRbKh)U3vM6vLQr>fcDl!%>cBe40Dl)^gByauOq&wR09K85a-6d(^G>*j0P>-mxiirtwu)rGqU(Zrlv_ z+OR424*77%c-N@VquQh9N3$;nzqHB2oAcGLGHzQ1w+iqw{@% z`qFAeGK!0q-(Y*vzcd`9R=HnWB&#B;6^{jg%HVCm_yn*Q)1b$#w>02287?~#=nS9H zd^B#QummK~oFz?X$yM^exPR1o2>^axMSOF;RS+lJH^`qV0wsB6Dk%3#4aDL^=#fd9e(Q0T4myPq8sLK#{y~~2cqbYm&xdcU{Q=)YaW<4o3F7_fJ8Ueuyz8= zA6@$%qEgY6Yg3JJVjYU6LRBC+rBEzBV>LKKEc(5;T#xydJqF_VPgh=^o?pEADQ&d6 zhoj<$9%PfFTyd(xI`ln`oD%%9x^DUG#{0!~NUzCjx29@@M3|I zzE8SsXsNqIYiKczO{D*?djggdi9R)WBIkUsgaUD*W+jn?4yKmjr{^)jU&3T~Q7dDD zB3tx>4m3v8q9kUl)%@RYl7l2Fh5*J$+MIu=->F!96IfA#Z{Oox&QbL_!!-nAMWn)O zwm$5Ej_b2w|BII9f8tC3aZB@G4DP>Ex&Mky`QNuR{|>D9A9m{htM=u8T3h~anECyO zN$tOHm$Lqqar6&nem~W9o!5upeU@u>h&@V7TJ=eu>Lk}>*4idlPLbmMJS#dYKTJpZ^qll&c(t!_SeOpn`2-m4QGaoi~kQxH5 zn)oEDsZO|!njh)W{hRt&rl;ICa}3apQfzgHgcU>Nr2fwM<@orKb>30(#h(!7XXoPH z*$bmv*HA91mhcU?{sNY^1JOv;OG0OIYoZ%*VaIkcWNJpysVQIDL4z{VLwn+@z?)+< z3^qBsYI3f@yVYRi7i0BkCF%MS_L9i*FC4`gNp)S z<@75z*l-0Ip|h#jTYAuR`4*+oDuCu6ad(f)K(T|ujsmC&2LmFHN^WNI*0;9#gK z{C-UGTDfK0WAdD0^3bzX8EQ8Bde=1=8}Ow146cl9%y1l%gl;WzhbHaCAL2N$=;j49 zZv%!s!e~Fr{FtgZO|hDy{)}UxpJmc(3to z-<%R#I)J4ZE#K5oQ35-XUvnGPG%qe%G<$o;hx)1$st5?oagytnxZe6TaYxwj7gLSy z;tMkl7g+jOG&s~8Yf(udM{_~`t*+a#x%JLl(-NJ~S*sGf#mxOX+Xkwsa7!Egm_+P{ zUJkK$%L}kZt;%yz-TgVhRn#3J?v=oNn}7xPgd5mf$*a zqamOiN`)|yF`4_ZWx5JM(%@8h9+#9M2TS98<8DM|BuXVX68a+@WSc{qbA2EfrXnx1 zkXVc=TH*E!Q#l{!YlC9FM0Cd%NO3oj{0l~@6 znm%U$*``Oy*ar@vZqV2MeKECWX`~gB&2BWh?I$CL-&EHbYiK)&m8+=cacH6v$fR7v&&QiP*==c!NkVv0U!TEO2?Ly0*?BG9pO_ zsw3f4HLdcM>s<6Am*XO=PlGl@rA@Jtwt2$2$>>Sv;(O)@#%!saGPu>z%UCYUL}#mT zb_kgEcf*^{6R)W#y|uv@lGQThe-(W&BQ!h7bz=YaYga+clVFo-54sj|iyigwmG*pa zRgjm(F|j9u_-T7WGF%M<;Y`5+6Mh7d-p4q=NOJT@&vttZRm2p(%`#Dfr zj6jjHTANPpg33cHf@v*oA40*N^I&*1!wopg%z4k3iR_H*%tG&T=F~_y0`;O+3zHa2TayB^#G19~ zOe>(HDVh1qH-N*eo_Ot_tRiUNbC|u!R$&F$znhzY24QGa{A|1P3gB?vC?35tWL<4X z4>%KSdLXB+ypk&319$r{F`=D~e{k|0GZEOYmBT*8bA|1K)=F}e`9WeADHs*24ab_A zHU6cY7yKHu=Bc`QG_t@rI;GtkmuLkTf=SF#4r$x1Pu{BuNThZ|QZ>15@=#Z>q(8Wp zo8G~k-L?b!9-6oZo(gB&OMaKQHwX(mr)RO!VQ%tB)QqMV_U@>O=)9eU94AOo059k`8mL0r#yF zTYOH7wr7^?F0~DDJgxJ-pbdsWJJOm?_`blLJi>OhcAjdF0?c zzz()VqPRWU>G!)|VpZ}}d+1ZZ5wHY-vi5>3gkEBY+$psTtn6~d}BF#iOYrFhmQM;2X{N zm?fHxI?LuD%1kxt){ilQmcbtEOi+jC=z;<+;PIJdNHj{jg+thT1^sqLo7y=5o9Fzh zZr>%tXp~%2pv+a~?su4hr7`+?kXX7oaw=3qnk&Fmrjoo5&zb-IQ-9p^%N}2VXDBFf z!FZ@ZO&T3z?1vYSxImx!YvBcQXtj~=D)LIES&Hk)CzSJ*_M1iR zTWQ#GiAyOJCUqgGOxEM1ztaXc{H6J&`6LEwM#;NPAr#g|xl^WwNG>VAl?aL}2QwO^ zDo-DUy*k%^ZhVnbTRk}V4dU-;RWJsT5|sy2U~^OlCoHx%$Dp`EETXly7@|*q>`fqu ztYlzfqKw;(gtuJC%t59jS{E?o?RK*z$s;_K<~j!o@hmMFINqt1!T|Yh=Ft0m1yl}M zNXE}MHv+-E>o&=1@oLV>nV8mR$M?I`;A@Y~x;mR65)B3JPIP!grwhR6KDG}I2mRn} z*mwvBd)q27a-Kw~n*kB|kc+Rqxou(+Xd3vr`_YxvvFrU5mZP4izSkKt3$ermh1phy?(j{9lDc`4K0E1osd0}F8hTGv`_ z$H+Wo(((^*GMQEM6LT*((qh}O-)zouL4q2N8I$rnbF6x~S|si${(XwpD5*cUq` zkrArBzg01$rRb1Wsu+xw!d+zs`Kn(E_u7*c4Uwq9u{5mqgk+!fVxo=w&0&>EIZT-x zKl>U@*dRpa@!OIQlDt!Jf7a4f?-Kg$pz??=A>euXhZ3hKPp`gAVol zJ^B5JX7LM=0c4MFNBsF*DC30`Sj9b=C_bTFlc@WgXE2={)=AzlOcaaMygb}oTV+V> zf$`u#WSUIL`RDKU?$)W90$?>5pqx0+;{&+#>ZZ6I2jg%9r&YI-pu2@!hbYq?Iv)R@P=A!-~-~z*_So z@u`Cop6^WkV8&*20}~-=#cYWyHWph1DP1k!joBmRil+2*F|Vwd{CTB%X>s3hWpTqV z1RO0H-hRs4o;UJiw$|-fr*7~)Nt28b_3MrI!=0E@=h%AQd;i%;(=dWWaxE_wj);o_ z6}h_Dl}A1h&l4iIf7uZ@JaS{ZJuLZIGE5aDWx1e+vg+s5yg8+%G6|#VHmu8)(>f>b z`pIi?ZsB6KbH^9isiS`hvN7yPIn6%O`9`4Wcp_|JTdG0uNQdT5sFoVxR2~wrV~Y?( ztsU0qmUPfQIIonuEWN!Z#Vi^=4K?M{8h}BzkY!%|IOUyJrzn-pp%Fm@_of#0lMP5i zQMtxY*FuAI;h-pnle+rn!8#4nF7sYj=@JBx@j`W|WYIgU?5fr^lcS%a)rIGQc~Kjy z*2ic5Acf55U~I&Tl|5_L9e4c>$~8!6BLaEbA`rVa!`_EuLqP{@cY$k>aZ$NqOxtMs ztOoe4?Y;^A{F_lN{*HnrG2!-;795pu*Iv+LfDjhJpilG&>4_;TTp1$v(+Y|p9?MVC z%MgAmpsPw7kC#cjv{iKIK<;a$Z+nH718Myi7TaaFu?tF%C{98_L50Q z5uGRN&_oF1;8^`Ofq2uJB)*abS>%2Dqu3uWWL0EQL_CTH)f$Lvbu-AoI(7HjHU+FK z&R&Qbk#;jc>-JY`@cKCC5q0R(yh5-Q<{PMkfe1-_-)w@rE$lLWl4h9c=4}AjrVU#2 z`_$bm9rYhg%2Vlkt{~@kSt?vWo}c^k1tr`NL9~hiRs8jA!xD-IOG*}5fs;Dh;3?b? z8PqIOG?$73vzf~q-3LcmtbnPqr6E^Ix{>gdAedwN6pNB3cM|J!5HS0515+IM{%4I} zRw_(FaQSNIS;;41yGy$oXC&KZ5m5Ni)D@yw0`z4lR`E_Xs_CWSl7^s8%semHzkt)! zqx}7;2MPCCHCNl|9yz=Bw|{?l6$|&(V1^Fo`qLJU|H#jh?)g#TfC{bvDwF=2h5_3= zt%cK;FJlKPyfItd%V*7L!(~Lb%+fu0)0gr9nA1UJtAy;9~-{!61}=U>)@~S zqQD}8VR(6$G@2C(l6xj(!qjd*TRIz88frA(0{JT6LuQwa9Pl5cl5{AXT&HQ9c_&a( zP_SUt_pPQ$ovAK-+3nfAw?pw@x4zqe`fTc@^ zZ1HqN9-gG+R8P!DM;-6w-bu@+rqluF*zH)J)GIwTIXy~b8nh3k!0um2X^Jg^wc(UI z%zJCM+8#QiTlczKwgyDOnl}tx9*@Tlt3S!4cc=p`S>O`w=0GmQJD!bv_5M^u9zfsD zWR&RXt6=)_n_H2?Ds2Zed)2#Z307eqG!?n`Z(&xRvdNjWYP@-pz>bTW8QEqhdT7+y zyG=C3blyZHD|+IotsS%A+BeXRnqFB;Xbo4$VB7vmNovygC7jdL(@9}T%P61h!lZU( zN^--5K0DJqGLcZ1(J{`Y&W0PhzZTo?Jafwi;@@)njriyO3k=DtU1eF(pwI)SSTAgaR zQAJ1u`}G5Cg6~8IT6n$z1cchbCRm=299z4oT{p#4m0477JB88GmLtfU)L=4e=KGtE z=sj&_^WCRowX?DGJAEwusOp(W_1@>t;o(j^OWZrqx3GvaTgFWj;+fxLiz4_e)!IT} z(b;}xX0=s&?m8X)qZ~jCkrZ5Z!K0df7YU!b9&Qur(25@S^8*SjVQX};9k8yywuqUlwC#RMS9vTxA~i*0b*Qc=2AdG= z?~cNi!KGj}G-!;21?BUVqiI#EryK<~hN{PWfxyK!g(S#f&10^4Z8>MR>r4G%^ zb;V=A{XH4s)kfJCK*3EZ!-xkk$}BwlT=gI-&D<~Mbf`Wdv%iaGLa;Okkklcgj(<7n zigQBsGkgoJ0h3*8dg{i2A7~mptFc!?am5WrLW@t|Zr_wZkM|Tc<~(tdJdZZVBc>rj zZjrMg*A=M`<(6v;U&v*`FX{*1-#OIlLPd)YnH_SN6D48Ldfn3yIdxb6>*bn}lQY|; z^&oouTdvff>p39(enm+vIdu7=#h31Sf6x%sXcg7u0ur_<(Pqj%|HNZQG7g6P7ej^r zv=sDDhYEj~;=g1a|3UTfcb@j&@BD97AOAK~_<#N7|EF4yzd!U}O!WW%LxsP8;{OCB zGco^(`2Gh&g${LT=XJ5~U3cm`;j1u)6JCeMC1ayh&7_xl=Sfio6cLm#DkMrVUp`%z zIlz=_j;8e5beLIU^88)CdxJK&=h4W{G&bimLhxai zXey!e3V}@!R$L{pwx&DfH1hPe%U9VBC(-B>Kcp-~&uZu@v7(S7iyO3pvy95VrL~i56-Y zY|4z(BhZ{u$%S*4b}$9QxdnxM>y3#j`Vz`jbPNeDx3*6StJBJSwQDtv>X9cf@jlt>+qWuZTKZxVsE2`Y&N#kqJx?jBZG zkA!%`7QaT;l7?YKx>)Gl}tJD^CUSqmbUx9A2KHNw$V$CDLGxrIao=UZF zg#m`Nj^Fc2Q4z^%Z9L@fsGIcTpTrV}Qc?Jpt@xAKJDD&A1zIG2SDu(WlIKYxxFir@ zMA~#Y5v+5JmyMgth$J|+U!W+_$rVjqB=6#LmnokjMsC6Q|?h%cagB zy0|n#RI`H2h;NX?LShe5HiEsdqg{?lQuUC&j&I#|NRA;Iek!nehDd+#m_dKNDSt8Qw{;7dV5i@Sd(3Og zBj;~C$xmb4h=vuXq00h(rs(8H3SKX;D(l=Qb@W+cwRF4W#6FQgp)>eQB7Jd1F+{CX)h9T!gEKDr*C!d8R zo-j$y%t-geq?cK+;J8ecW?A%{0#bsj zCXGa|rnl7$c`z!mBK7!F-^~lU_d}jp7Yv%*8ZJF&iYIxC)%JOQX{odEbsPVkm)oz@WjJT*9B!z>RO0By0)>$avW%O&{3%^^anv+ zZU>ogop%tnZjoyNKM}UVby=~okAqT)3<^aIiodfOX0XllbAwmSJisEUU6;@tMjUFM zSX_213%cRF3>6e`@+ZsoH*xO_CmOn||6qH<7}0VPbJnH;(WjyZSUqRTIz7(>CT~zL z!NIOFcP+gg)r;GQc+lCmEdJOZZ!tLBogU8DAadx;0ggJ(JcI5}%2;`;9)_sCY2;(M z*IXLj*q|p@PegARZD8*b+|-lx51fh*pyOvP7xK!@jB`c*K>=zRe)w`(-}7N6>nfs8 z82J=IFYab>?uIrlz)E5;7;) zUD2F>ig|!bEGPN6Aan1_Q6bI@m$F>>z+XGRhur%}=rcXFm74QxMd>x%FSPXK#pNV? z?9rZcd)Phr8b_tr47CZrILBCEaH{L94Gpg=qrG04Yhe4mP9<`@G)IOfECxot=-dOR z3+OcuK}B^;Z<(_!*4I9EQllXN$3`AR-4F_Te()Ke;fI7oera7N9_3m5AOzwbp>+Pa z=8UDXR2WT2z4^(Taf%S8CJ)4Sz++C4(j!gUPAC5XVqXq$NMb$b0Yy@Lkx{4lYM~$B zi#5>5F%s+lTFm~PG5)U#@gEnn|4JM9cS7cmp#OiAxBnl-?7v<4e?!PH|8?oVzUqHN zG5a4j{r;VhVPgNI>;DJEthc(Z{rb@NzX=&=ow^NqCw%P26Wf4B0h%2?Fws1TqysH7 zTTmV7$RVFx<8Sclw86LV`1ySpzoT&Gg z(LBA8Mi7wYyOwCbq!Omf0~plmF2~H}ewGWv8#C-1GNFkz|Z4OgNP|n0QyC@0=-poOuBQUF0LI9CQi#l>&0i$d@MUMssY4 zC~k;_sIi;|bq@X}?7@c>0cur)6_B!31L#E0$MGkq)`aB$-d@ZPj310-y7jy5j-2Bj z#Uu$E7HAG`Zk{lv%A(XqedDPCZud>eI#uGV>@PM3xtm+%kArY$_|59_#{Y^I zZ`vi;LFqP;>meW5HTcKnK+*wNhxPM=IiImhvm0A+77+GeHh8=||if zzetoWz}LGiMIY4TBBBa@9A-BhkowUmSya<&4lxFg@C;OuucPKHSw_qt=(u@1IHia(VS9pG;F!e$c*x|K!WX2i&V-?{DNxt8NyPUh zmPp=Q+Q)5No`?wvV8U9O=czdYXX9bXJXh3b@mM%LKplW=mrhWoMZ;NzN^x&%j_#w9 z`s^~{M6y)dV6t2~ov!-@_7u+^cig7Us6yHV>K#)A&S$UM8F0g?+1mtZyymkV;bP4o z-@dVLx1f=s8T^myTY4^Rnv_@_5sUl5AW?!#y9CLRCIhgVp7ZXj$AWQr_mBId%(C`-}`Pg zr-bFWDbHt}V|&&M&(Gx9A}iW*aHo0~u4Q{o%B~<2ms2XRR|(H&5nU9XZ9`)ek2ON! zIj{Bay+t4-87A^r_~uq1e?)3q_@g|xB9Ax9s6d4}0BpP0PNsHc_VTU9wRe$C)JRgq zwZM0G4T6bEJ1$dV+bmIZk3kp6tTOPN z-mApEHlj9OFaTl!hv0yKms2(&$X|>^4+yXl>@gZv_AX`&KZX z0w~8E83|ZFo>D_-22^$G_fmXE9VJ3K8pyhqE%U>SiQO|bl?^qOQJ|300w$pT@=_cb z5fUD6PT&T^T8LrDH!fK@=&9tdLWRPYN8zazR0_RIR!ga+^5rBRtAxhqN(p{drDYnS zFv$6%^GjcJ+GomM9E)-nW4kzg_15=(Bwad_n|&`Bj0TS7^wG};`n5w zA@Id0=1q1#v;>EHrX9O2&h0amf!v%$2Pk0?;dWhXp*v!p5a@!e4r`{s8GI zi2@WZqTZ57c!|kgu>zvksQf<|{xgg_1ID;Eq|f&88$~8w`G92NU=Dkxq-2eJypIa1 z#^Ck$v4;qsVu~9e7K--J4&I7s`8IlkV1qc3VgXP2Bq+8xK>d~=pv8+VF14jCE7o&| zGqhgH&Q@ zn+d1j!MH_(Vx}m?@CkXEk_0-B9)Td7-@F*_X|c6F^TH#ZPfTPYuiajrX}75(zMB#- z_dP>>ZLT-XV$@7LGb2ubOMfJvjWI!l+s3%>fV>KJPFT0YFR;JZ9;_Dbd&Nl#J>M&s z)omzSydkd>nC(ijwE^h~AnMRLbP=-7WdlB3T29g8m{#YObZgLSf_rWdPg^f*mmHfS z3x@@#>OSRKu3a)!4=64JiAqFYoHZrfc{U0_4ub{73m63*07x!KEm zxTs$}&jC@szsFxMY?o)A5zHWHZD_cKgRXGJtX!)UeXb{#>Kvllw+Q=O}FiS zwWD98bAB4xj$Gkb4ZjaGli?3VH!dQF+0J?J>gUt_E_1}n z$cZ1pE!>skWe*O2Yx?A%=aV*$@X#GeTd&-rqqLuH&G7IS`mntNLEx*3G2sa@5PE2x zGhG0{3$LrB2~sF0^-N3rwD?kI6<}5`xn>0nkU3-i^igYyuh5kKK_13``$zjqmlvsl zPA2}~F_U<5c1tkr>m;HgGxxp%h;`4kADH&NJ>Nb*`>}hb1<&k*5llMVFG(bCZjK9o zDM8~1IDVKM-<%lVq&XPWPB$`gqb}1$62u`f{-%C?& z{Bck$mKpe(itI)~mQ?`;QFos4+#h&a_qt3EOHUGX9Uqvy_fB&R8ik#9mj=*p@i{(UVL$z|BG|PU(%0%73cr>9Pz)9_RRn0u>ME>YvzBm zXa9AU_`6a5CmihGec1oUjq?ArDDWTUA*`%yoc~nx^(VWN?XM)QKe<+H|0GF^`M;U@ z|NCbDPwjaY&OgCp|KLRPOI_Rkk1xdcFXq1_9AhfmdxA~27WQ}VP+yk`dI*u?cXp<- zv3i`R<);lZ7w0vQ5W{es9M0vd4c`peEnkNaI3@B7e)2x*|u;!b+$C4 zM~1Sfhl7h@FWpLfa#z*mW=bXVffNXKSx&0R7bKlMe0 zf}7PJ#lZ%Zev>J?pNhlqepy6*JzzVz+K7`V3^b9aoZqZl=93MS6My#$UEyZBp!z3u z4y-P^`(P^nsS)R_sJ@JVIdW!S$XL)J0TSPj-O zmUt+Q->;6^2$KgRMsLY8)vB)RW0_DsWIkVL4Tkgc8yc376;^H7Fv00Xd>zsW(^!!nK- z^Yh4QVLr+d5SImK_b-VBGy9kzNoOL+9C7g-;`q=<0YPE~--8NgE<}4yNzZy9u~R5= zpb$y!ACV29HtTlQd$v+mVe0HNC&p=pQTYsMpra&6AU3JJTl>Wj$Jr^)CXOq;V~wB? zIPA~G80p%fx_nJLo1e2>yV$gBR<_e5awNF$(iahMgV|=tZ@^{vxav_g>V)S20J4<+ zS67D9(0ri+r*WyGOcgwr?)$!wdw+RZiiE-e{w)r^@K8+OV z5u#gqy_JV`XNKNTJ{lMY#lJHjAlli9*fVGETES*gq=7mdBVIBGRIB0C55toSW=HH1 z6b6_Ei|WT+^v}1lM!!dQJ1WPH?y8J21h+G0l{=d!PITc9MDK)D!Q zq(7uG#n=SY?vKxi=(h;UteZ$9Gac}}`O?3GQS3EV&5#~Ue)j!H#OuaJD$4`A88mAB zKHw1}w!|2xAf)EpX2d%sB?c3dgn8ApB-^CaVCYX_# zLF~IA_9bJ9*q0CzK@h|idq_kQu_h7YLn4Hb#QMKgz09q9Zr%3$dA=`?9QEEhb?U8C zr%s*QcWTu9rM5v$7S%7M?H?Q0@$jtVKPQ}8^wo2X^V@R$QE}w)b?+5g)Ohiio3_kp z_QwYYCVun3hN;~bG~4{^*@CVdMQ1;p5i?_xyB)dz=WBL3vhZ$FREhVi=ZHD7;Pdx~ zZY#IuMo)G2(Q1eC58SZ#bg4IMRCLt(yL{O{a^z_F^e>pbZ$Y0+e>tzu znfrE!=iWay^6{3#ajv%>jJxFMvGnUw>c>;hlv~!M?(r@Cp3JEtlonm^NZdc^O*$>_uGX2Sf=T>FJAAKV&e$(cfe{^5ApyHb0%X8d&VcYRs zNmZ4M&RyO6=FRq%Y!_2)MV3;xP7f`&=hJL^6E7uAET1th=2~*bza8rxot=B!{OUg@ zw*T9fdqU|=AMHDr`|RqsU#xk)SiTuR= zzW*&gd@lFvYfh}6w>x)*J6opIx4FJ+*{g1?iX&V6H7VxmwcMMc`VOnmu1Uj$Z}T$L&8hZYP~_(m2Uj+&@xrt(4!%*M+)LwguS{t&u1MpB`R+Z6j&5GG-QrTUKddox za>eE8FPz_+es#nrHGj)FvBT`~FXjDuDV*tq4_fC)s`F#-s3YHB|9x$ztQWr@dMJH; z({+V6jd}OI&V833ifMoPYSujCuK8N;+di?=)+0^IKYg>ITD0M&1Kk%7Jd{zR%BL}l zv)-Tm&9yS!zG#`A&p@rirfA(wHmyQ;!_hr$oSF6@h23E83l7Cjjw&YOyP_@9sZoxa>b|V&mZ}|%R?oq z`^1q~^4GlGCdXeBeZy`H_e~yM>`1-??|o6H^xs=LHEnn~W>(qKz6tAMmi}4mV4;|V zma)Z#AHK2oz_~mHd;d49S1Y`0xO@4?rZZF9wCNs`)bxCn-N|?M{XRa>XDe05cqrXA1y zRm5{*)mK*w)c>e(y{$VdO)b%@V%eBVAJ>2RLZR_n>QuU1=iR&I=jEXSLO_v{;5<+s{3H?58P`rBhSN0r{tY;?QB1@f-h-mc4|Kk_UpoUB~QesS5m z?tc=;Pj4E#@q?S+WFJ>$)u*Gf)F@H^_MRsN?#1@}scpS#7vApj!rKcUUAfe3<_?v@r$O0Rso?A;-8RbOgl`@GVW-3v!wy3r_V zSKrz7YdhCvTz&7Ub8JSI-}X(rd~WW@@;j&8KlJ_38wZvgc`p8oE~BCuD`A zKh<)*TA;%S;*F;P*!9)$SGk zZ2pWx*DrpvbNSeg%ch)La3l8h{{Ot%q|Ln7x9r%S^~{B0)v|q*Q2b!Azgk^Be_;26 ziYN9Idu>4K2M=~M_`OB(qji>TZeIWOyF;%3@6t*CsRypOdu?WP?L4!1baJ&==fj4> zn){u(mJ}^rYGbwBefq|aFY)~Oi6yUWJJ_h;)A;YNw46V!WW$r?R=rg*BYTN^U5n#) zmIhAH2Jc*TJNNH>N-p2nqVS$7Up;Pca^!cHS{Ha{Lf-Fx&)>Y!ieFAQD%q&aheswX zjxMx%)K|{4{%tizt|&Nc(xGl+hP_{-!Q#e+wkr2`e{o__t`q&X9)CE0VvcP^H!m&z zLB}EAy|VJt^$Fjd8d5Y{s+M&-$|D%Js|MulK#3y<+|ucOJF8_4>n;N1SVqfh;u?^L$CJND0ce{;)!lfJUW zr+N6?h@`b6cxj70t%RO0k%~l}GhAF2~U;VZAi|6CN zjsHF0rB{xwZ`jp#cKyh^Uv>DU`1HQT;@&KIt5vlEefRFX-L}T=V_%K*e_VLWzF9fr zN-tJRPe@hmqA$d}{j0Ceq|^I7oAyUt?pWfTPonzPn!T`8 z!{}4rxn|!PHGf05EpOb-TcURkchx7!N&Eim`(@oK=?5IszdV{@TOW7)d|H9Ni^s*} zd;i{!ZuwkO9zMvH*sI6g{Lh^^@xPDnq)y*|zGf`mF#b$kQ+?~r3AXf(zjRu2rS7(u z2cH|XWL1&e6T3xgRh}Af_?t_ zQJvRzDl%vDa{TU5kMwum9aX1bVSBbg|NgNm-1oVRxO zbd-26%k~@9^?0;L19?v#H`Jz{W*W1=Q{&UWQ zmo6yIJU3RaYojavl>-uHhTHOy|H&z&f0qC=jGYkPgzxbYK@r2 zt2R{5$acQgu6KXS_w&XNzpK0V?$hFv-+8U=goKYvJbA9g^xmVlE^5%I?Ug<`PnKSM zHt*wMy(VYR^>pEbbN)&z_q6J>=3q1Xf*cv!e;bEsfwH^fujMXe{mEB- z;dgc8zj(6cZ0XHSro9k1*M7dolN~2MPtLa``<0DdU5Qt0YtHx&j{2wn;g8FgKelR; zdsY0ErtSXP)_3I9dzCh>-?8F@bM}XyTzKl(oN?&2@^z0d8NFi3n>TM>JN&SwvT(?+ z3zOHzXRL@HzWSr+%2TSXOD?wagZ{^kEvcAu=ZF;-JDk7T_twcD5AC`h6MgSY-h}+z)qViyqo_rE5ixlYRQ0`Kf5d(sx(P9a6A;tEAmU+G^hrO5ctJMLbhZU{U0jFXKa0Y^7tEZ2eZ|9H1mtimFy3Ba@ALdwIYwwuHZQGaG`lc_c#iB6@HKV#+v2{IH{gw(?Vh73rE;$J9mAwya}sYi0p#H8L))!Qc~MJJ^uh6Tu;#-}E5T=h1I{S#8F z#U%IZ8D(RiyGuyw$?}6LD_#y~ck8jQDg>*St9R(wDJp4Tzka%ZAt9^BvQIbsf3d9e zS&nsU?kJBI=-JSzP%A`OHY&kG%v&sY9|c`u(P=3Vbtk2$l;lHILLt)mv zs;Q11LK$6WZjUz#KWW4HDiX#+2&03^>4GUW5L5?a7QwHQoApM>BEmGE*A!+&74)WI zJ(%@o#S;d@@0^>#B9TNhpMVG?@t}^&q&JPY7||rE*Br)L2`mX*0!3auu+{$;Nw}ix z)8$m66mx@Wge=d9BW%&(@G^y}nVW{N<=J3_Ee04qb!#?%ge}hoBW%&Z@F`iZYC1Ns zRz7(aFe|pORrGHfCu_MKZPsEKwBG zJsh9bvhK}gIyS!*OI%o1iUemx6SoiBfCLwTCk$#b1%4}@Fg4wN^Wj+$h4m%FSuut8 zhyBbhB zD*FEyNob=VB$lpZ^J3aqbjjAdL{&l@eTN9@CHt4lNtmMt%6BjqzEoqaDJp(U80T_Y zQiqwwEg7OEaTpM#Jf=NbG6z5n8>AV~Wyu@{H22#soH{LR<5E#_Btu#fhaHKeNK59Z zI6+I1mfT^(G4x|)WypvWlAI`A$iqg`RuZ?FB!&%+T9qLqQ%Hu4Od*tTn^DLlMQqeo zhKx)h88RY;aG)?%HZp~9h%rS*rjQI7nL;w8C55n36v>8dk~P-N30o;rq$PvcN|7Qh z8N?id6lv*%*f7mAxh|SH*85sMOvz5KSrUj3Q--u;5Fe%tX~`h2tz@UyEE&YLl_4z| z#I=(g4D8{2Qq#})*674oCh*m z)<6$lz%m^qC6L&orkN*8kSen7QIo7`ns*k=irZr;l4&NM#tgbjQb{!VOuCdHY5rM{ z#7t_)i<#LKRJPF7#YQPZMk0>~cW9BE(kAuZ{{lr_JLchh{dU>{~#8Pd}GaAjpkOW(tkHNUrZ z^L<0uyojg0ialwVfMVcLdH# zk(Ta<(Ljo{BoI>;+pXCGL&ZiUWo1Z92gH??AuS2Slr=9uann?^oIm6afA9-7PvPP%3WLJ5|Yf!Hu*eMTaXTe4B3n$4n1*vGAzuW1}~A^;@|lm94& z<(?qbH6nl9lHHbOp@k`dVWoXi{xB_`C1iKgJTzj-FjgM2yJ;HwS%H*5x*jo`lHxfx zEmGF^2%s#qq&a9K5W%Mz*OB!%@<&wFBiZ?IMPUJCbwE-i>p*0lu1h!8trSW9j}8g* zc+5+P+$i6)=#ncdLsI{v>yj%gLDCeoVM;vsZ?h@UEZM`Al_9D7F)AxVQvahv!m_IQ z!QV|Y(E3p_Wu-_<0x`=;KF~~A{FTKh zxw0~(C4soIGGs&osphqdZd%l==K;o|Nc;%RKkI>Pm{Md!0!fjP2&70hOybUv?LHy7AIpXS~?(Z zSs9WNh}rsCj>QU!fsMx1VKGq2dr69mx*!p#;lkW>Iw^y6KxQE+lC={fejSooNQN{? zL+g+jrH1#3&6G7sL+gNCSs9WtNQdOg%8-;oIwVup{CyRcj1S5}6!WDr+YhK$4? z4R<7_mbK&$S5}6!BoJ3thGce>5(t*{;9}QIS(7xhZc3Q4QY3XjIwVt8inJsUQ`YyytpY1m}*k~aLY=N zm}d(lS5}531mYiMc_!zM8)!qXdaOL>snA69vjPc&^gx(alJit(BH93yKkN8nuFFO-8kb*k(Tsf%9>~C*%gk( zD7msSB-y`+vNB{O_IS+Sz*lG*THhsCR)!?|mmbM2D?<|hqen8!n%{{lG!3l>a%E*m z;(zo=uB-$})6jY(Q`Y=WT%l=bJ&-FaLs}AuD=R}<5(vtw=Jk>aj!6+XD@Bt1OFvAe ztQ2WUAf~K&!a+e*VoTn z`?TJ{KwHI}L+safBK2v`1eN|a2eFVj6I5CbO_1hHP-!k&kHk_@ki9cCP(&{zMOubI zjJA@?LY3yC^~!Rjd6HJ8ztEvaVwi68TM%~D9$_}6NJ|E>F3sQeRr$MLmaasKv~)tO zOY@CErFm$*d6;3PNMRdmlGr~nEfuAgm1AA@$_?BOms@q=2Y&4AJ5bHy@%yF0zvJb7 zt^A6>3G=%>iXE?o>LebI$IcI? zA(-Ezx$LM6(6{lab{kLAWH$1<;0f{%817>RWfe20pk@-jtUmz@^| z>Lk#SIfqP%&u8b!p0LF0R4_EgoJ^7CwFkeR6|ktHu+g#)Sp`}#D>Io2SpaFvZ0LtD z)Mv1v=7cP;t%ck9+;;xPBQ0YESF*?EzPUgWp)m^~!Iit#}dcz#e3$ov{Q z4H-Yc3&o|_dBm0pRa`zh52^Gbn3lgYrjxlfJ3M~o@KqNKVCUhDu9c$NdEC+}Dy)a1 zXm%cv=$rZMJk-#MRHvQC5PFf@Zu9Fci9S_4Om9dg!ov*1RM{K^xEa<1KeTCg83vD^ zC9=DPiNU%ALj&)uWU8WpH8%DWx%%;Iey82d{iCiSjPB;1PcQR9CeQ>c)XBi&;PC58 zkcmU_I-w1}ddhsD7Yo+r=0BOE7orz_?W2>q&`Ge?G(a+R>_R8_8CEuuux-39^um89 zMi0hNg7?pvVAhFf2Cv%*ZSZ|Jy=}a1^b-6{dSqG{)Kxu@g`8fno$AH-!|p01 z^rNA{CK`qi_E^Do8Xm|~PWst^`(eN3EEv@1f-K~vf?)WIKqaDX)_hDGR85OuXv5Pl zx`uuRHfZRFHrTTmHar_XIO9N_TPE1&goXwm zu>ZLY4%6qwPz)Z^=VhIU6!F0mLL0+SysQ_)tS}<-BZ{NtZ6ss3|mlE7X!)EZi?6yFKiLMtgRxvnh9~i3`e2UM{Ith#<(>8wAi#Qa&(*>D0 zD}F=+fdY)oj{R5xHpSr8{a6C*7cKQ%@?!~Lixp)qbP{y*VQu`_hXVx#x?Wru3ioAt z8SBKLp$jZ#fxghQ8@=$mdmR}5Rxx;gL~W26yg&1bfxC3-TUp=Xz?k?}zuO=3tsX^l zoBCGP2BDIuuENw68R&*8UHD}3DG!$3m} zO|*fEj}Jg5EI7#ESCw$o9|W_-C2sI&$T|`12N~-`^c|{)EiQ3`hYUjzH+aawFkG^kb+)JeBh!vvzeEz62l<_wNi-R#1#P<$SSg5^|M%6TuvSr-vGt62(xXysbX!G(We6G3;9$W2sAUF%GPC^Gp+`(RK5Zd7ktpr_LrZ%9YBA#e*i_Ur$)&gW2WP&q-jAw7*Ho@>KQyUhvi18O1LPIhBf(#mp;TL3>sX*dbX9pJDi18O1 zBB{ZXz4~^3$O7yQYop=R8OYFPHq_v#RWbhZXgG-mva0%a@T-A@sa~j}p-4ph$_iwn zu{9^tP~82{3sX^?0S!miK+;jy378#7C+cM^k7QWd8mNtgfWQ%afgCcCHyVztYS0B_ z+6G6~KvquI3y!CX!5w*FZ1F^c>p12QVeN^o*5kv>yM?Te3K^pDVAdzB4D+ij z!>(t$pOIKR8)&dIk{ZH@@%a*H;}Um#2z_WMOcd@|k(Lry1!Qi>#EF6si7s)eK!)^^ zxKbcPCq@kwqz7E$^3m|K`e0qASQHLn=V5UNyQ_Wcr+8 z*Q#P~HAw~VHmp`&XL-n=hM4Jr3}Y3~1jyB}v5KUG5Mvcd2^q#Jk`gl3iE*sh)LdeW z=XGLJbBQS-$k+n~zCZ@|;Sx^|5aNO2;Q{F+Xee%Ukl}&B!$YR!HK-UoJp@AquM@j7 zQqH2iyzGI(7K?U_m14{fA=5^16<(*0J1BbQ2&>$2Rm>1eXCJqG;v%AFT6xlb%8!% zWdWA}nYdK2sv!%|AyXUdSuS3F7>1V+@+Qzm95WIS=vf>xQUj2QL&hBtOa)Qg;#t?rjTg54BY_te=p_&Q(=og>XD>zu!dPr(o zLjTk*Hc&7$75t$Xze}MHb>X4Oz0wB9rzAM)B@Dp#A`_B&BqpTVgHGF4Ie^wsYfaIb zgLR$Mx~ZdS{9DN>4j#Zjk>sRQOm1LR2A_cJh~{^^kr2;5VhMJsy1~RIru1(Q4Pdn( z|9Gc34M61}#N|*NZIe>N251=LsGHn>K)=NJq#i>Yt<&N$?1ZGWR7UpT{0APQY95nu zd!c!1|27G!X^wUYDZLVVaIGD&{gMac5lZL=Gw=v6@*0_Xcre(+BnC?~;FOTN<1pkn zoN6Y_>xdcHJJk`J_ztG8`w&N)K8dN0RtfzQ@bOOc7+Rqm3Tr#PnNXLb`JjZ9L5T^2 z?V;^3)Xb;1smswUVXz}MAwF$j3Z~E&a-h61HY=yTv7(!RFq`b5usM|K<9)PA?xAVy#KMF6;6f7q#)EU2E940>{|1Lg9~L<7MuR(33kN$*26is| z8K>lsiCZ{uz6!eb77oQ8%89jb;LbSEr41VhHm}futAzt@HFQpH;b2ZX$mbRgIF(Qi zhiy9S6N4-?u^BflwrIkeR^!B;tQrEBpuBP5=CBq1JHT+9hXV3oEX;nQf<4suOcvdQ zpV=ZMx#z$h2`N#PqkE(zcaKkts+X9Wme{W!M5+VV6>uw2_{MBcz7ncO4@~O=LFGR2 zNr`qFJ}?W16jeE}>f>59i*v>)aZX3~tP{ARjyY98v)MVFf-1YZdVnj zyOPsc2~FyzfSzy=v>g(Rh~okcSdXzOCMkVB8&w!PtqbZt}pSG`vqLd=~eel!RV3R0#rY zbB6!HYl=v@MfI}f4T${gO9=drQAuGL`?io#=3^%k_K&WepZQ=xhNBvzmqCWSFp+62 zur##uvgp+y!@V4lAs=d#p=^LW4;S_1dC2h*8L|yTKMki*V;^`Ak+$Qvnxf8rFFV;9 zWVjkL$Us20JcM?jBU_Q83}+GJ*pOLezSGbSIS!)?xmqGar3Arxd5|bR&&%%%1sN`S z$h_fFhdd8=I_xkZV)4;m<6I(-&ms%4oyKoSMVX%;SPU|Lon(;VD!}+WKl`Q*YnI7f z6`wRUatPrw^Ld7L*kY)Rd0Im|KU=d#84Jcmna9c2uTjP_K?WH=-5F%Ou*N83M;qBHx=Ya>=8bFO;4?6hv^1H}$h#sYgoJ3rf6j56Y{ zRIFcOdz>H0yx|oN+n%AFYNE4;asx30;VG_{sD2vT z@eO@Ye~QX)mPai_a8L&KwX^`<_q(R0D9E>tL zA6UM$U!1~-&K|Nas9q0?>!m)-=QGj8$5QnMte>tKKbjcZp)!To+0XOE;`6XPh|Ydw;K=iMJ(2;-Z}AN>e&=J9 zQUB*R;jY>KhR}?TLnE=7>NoMleji;&ejnMVaRp5|iRx#gzw#i^Hrfhjed1#wBYPR% zDJA?i${3dl%ZdKS8b4~)b>aR z6Wb$6PjzN5`x(cElQWSaJ3wV5uJa)MPUaF>buyQ@10$S7@J)0^-7?_|VrtR{3&@T7 zp{#-SLH9CNp+W4zUO6Do!!AW+G|ooR47&&v=LmPHbR4)XBJI4?_Ne-#&+}2+;~IcI z&(AI(MO{#GNVtXyL1I&sViB97{Ep~?ayI%rUKwWSmzM|_WJr(`{ZIr&Wh|3xz-sJd zZj@0TAX!WM@R7a8gWC*ZKU`)|`<5hY6wKz7Fo+~^m z&rciLsq7BXC}UUP1{wRFk5Pueoyw?h!gU3Go|o6J@ls;ZE=bT28=%ODK99Y}X|SJ%{TMKjk=P6sM&zu^s*Z>s!dZLKXp(vSiFAA(kI~Nhz)$a_RE09O%l-sPKU}U z4}A2zfXoApoA9OswSkZBmp+=eMO_v-t3gK2u&4y2Jn-|&LUFu)nvaFcp*%ph#0IF1 zCpPu-`Y=Nu$P^mw=f}x{%IF@20(G*sP-9M?=cRj;-%EJ_Q_<()tqG!wpYQ;yi0o02 zkv$5xPsBGtMtl>>8Oa`noty9gGO|a(-IF~EGQtDAvr1wW)CG`O1v26faH?Q6Ucy{L zM$YW0q9pzRGLqv#>X7UU*rJF(fQ zvM=BYgZKl;h(EwqOZElGh(AF3p7;aQhmvy?WF&Wk)j|9LWMp4JI+nx-*l9_802w() zX(%@*JV0r=as6s2)h0GY@sx3GAp<};iOL1z9)NT)iB)h~AZI$r$e9k|3yD=ABlmO~ zUa~XpgSa|jwZkIsaV1B_i({y9AJp)Qm(jmyNC}eX;r%J&9-yHNhu8onrO%`Jab#K9 zZKLT!&CdD^d!+8$-;qz@E~lW`!eN1lhuYho81 z_lceHW)PV-mF5#s(`%v&y^}*|OL>6nGQxXY&=B4OO2?*A--Hwe;gw47b~OCz4ebN( z(iwSxcVCDNJh-?wwnLJP%!h}U6^gvVn@5DZcprw$5z;EO4;mjJb4}s{+_2Mq0f#Hv z2Rs`YheqxpG!|5ob_h_3%tvjHV2W@JZ$S{-vkl$&JZywSXKX4&XE<&8JpS^xXnQyl z`aF8SfC!21TX>7qIKL=fB7PUw*u=Mc@v^9KY&d?9`S2nKV6-#Xtk21$rNk$q48>Nn zrmcGG#GwhSy24hyVZF{Zd&N5wd_8*ldua)Nch8=^@M@f^m+I|>f_|Us>E*8-xy73( YAhD&U#iyk4(hU@i+A372-#pg#f8MppG5`Po literal 0 HcmV?d00001 diff --git a/branches/Ndi/ulp/opensm/user/doc/OpenSM_UM_0_3.pdf b/branches/Ndi/ulp/opensm/user/doc/OpenSM_UM_0_3.pdf new file mode 100644 index 0000000000000000000000000000000000000000..99511713d56b7e92b2381f2868b675daa12d6645 GIT binary patch literal 223001 zcmb@u1z42b8ZHbgNTNT)O^NQg+Qq?D9` zz+zhI0|Et~p9h1T!{FiMJjaEH z3vy0>JYb&l^SIC3gNFz3urspp@baBs55{>;HXc6ib9(0GRzH@%h$IW+62Yit8zQM->J0}Mp?|B~w+~AxXd@$%a zKj7m#7t??g(6e%IadPpV)hQQ{BF?S{BxyvlNBsJoOSm{8yl3^y#mRFnX1F+^JZJp{ z7(16YxVRwa^vngM;&b2S;yvdZTo9n5oaG1MhMgVbJgP*e<=_MZhWpQVU7U<8ZQxEo zO~c{P0IC@XkdqsSL(b039t2nksCeo?p;EWEcL53MCsxItPRYS)E5Ml z&C3VlF*O34LU`atd{9mYAda((k(0|&G2_IcqmxpRJ}QSmWkW#!uDzNb4sZ~MgprGpjlDU* zK-I_`?hNn)eD8>#65ItqaxnrV07N|+QgL;$0V*~y3*i+WKP|n8u-_s|T`kwXyl*e~3blAfaS$3J}wBhX029HLhr6 z47UN0cz{fdK>DNH|3XKIrVj$-Jf3j#q#O7(@>_B_5YT*ZXj;11z(I^+Oo(+jqyd{D zQvA7v5n}xQrX<|i1;OChhJP>sERImz@k|a45D%~#(9rP0ARwS;fdZ}ubTLq%o8XYP zv~htu0l(WAxxgjiCP(5b!tKmmED(+Z#0NkEv9z-bpj%rUEZGoaLqkI|!>K!6O}M5{ z!5Ew%AEb*csxc3Ivma{tQTvwqBI9h1a!nEnv^MoTZ*2m7?sXOAOz{X0@LfZ1_~Q4? zKwOahr;8=7sH(or16<~Y$g>S z79Prw6qhKeC~Gjp(r&`V)W%{UFJs&i%`>hpd`$|=`fi)(AU{S$!wp5LELr$PNxSQ^ zeN!QqgU#1oRq1!PWiw7|<|SZ8@^gJjQqIem=`wrGcA3OCM%|&UosNCkO0Wkt`exFP zdvRCk@49^ZA-l`Q&#NVC7C2$@K18(JBF(xuDl}It-}YrpSddyMe7zXt-!$I5ltBH*aXDx@>{pjr5KJgEa>k?*EF-C*ktb6txTOeLkF|u|5=m z7*EyB@X?>f^m_Nj zc9MdtFcJ(%Cuah;DVKrKSW`-qo6k$Jq~Tf+fmFp!R-*4=_IDr;m-E1 zP9{JU134T>rHGi|kg&IN0j3>|oun`cCwm8R`#V5fBfiSZ#RYWE;G>3J92ol@P>8@* zf}2_z{W%x>d#<{@3lIpv0;eMSD8u|FEMe~oPz35W5UG|<&H!6P-*!Yy(dg*oudq42 z#2*m}>=S;)J0cxvD1m_V;$#2@b2@{RjO<*EY?yF35Wx*%l;L9Mh4#4852R)l8pa=rL`5uFD12~p;<{*0j)Y+C5WbbV2YymQ|Ge!L50^t1z7Q)U2 z=H`R)!v2PPR4b0k^O49WDgesspDOrmmLp{WTckho)2}a|OdZg*y6-g$F?~6cuWcCj80KHRg6z!j2+Al z0|DvD%+lP|2@!2T*t%FAaRV7JYAcEaVF>}w6aLiW3Cm+W9ydIvun}W_QwQ`E zwi=L2EL}Vhp`-zKah`WKM+&gF zu{7}ju>sNT0ylR8f*a6^(H}YQWTKRvInd9Y(G3@{_xrbQfK!=M4F964qbYx5$OBT8 zlLQ$#J6oFD*&=F=>Jibi_}tJ_A#_GVCoXdAP{#^8r6I(Wzu`kbl8A;D(O~|O@DZ68 z!UY5K{uiN7QpPdpi4Kmv80hhS>ExFeBerFKW|T9T7;&ocS9;t>_Hr@;nq@aQNZQ8U z{j6qS;J?P!FR%R_TgN1i6?IB8e^0Amc0dOpF%zKVAC)T?3-}+JI<8@IrhvsP%`D+g zXG0#?!Tsqlh#lx3J`6jy-zgq{PpSwWKw>qvf|~%1#&O31D8z|VMU%3ez-h!S?T*<0DhV$3Kyw3<69+liJDurmAly*CKb`lCw<3-_5R6Z0?O!Pu@Dq?2 z;2*XQHgKSG{H3?Q#0!*@=dX~z?8f(()NwTSH#Y$Tj;Uec>T;AyP3_%*W(iRQ&o~Mv zjO))V@>|AZz>~Cb>?nUpokwc;J3bhsf!NpoausKHINaeZIPZUx_Q;BecI+4UUxoiy z-sA!dtpWcfyN10j9Ox4NR{FVt!;3$C^4JOgG%4TTa~}dg5^iSXYU2X3cS2a!7HCH$ zjGW;>^7%_23On6ooesd0hVB&Z-xD7f5d6yac5Elue+T0}RgO-;j%|7zTBpGNmfg5O zDsG5|0DdN^b3?dJneF#p<7j=LojmO{h$FhA$o>_|z@MLF+oM;I@^>A#4lP2M$uRNXM_|9awvs(FEh65O$)eGR0F7^&by`2lN zi8FFC1t|eJ+Q<&r0|QA9h))x1kfbHBBeygLI>M`rT~EbTfRl4LIt=7qWdv{ zI{+$AWDFep0`MnP z{*z~bcn7v;r?dT8v!3Lc6WsG{3!wVH^_uFyX+I}BLX&@-GXdv4r)tY7wErA&oq+w- ziB4x%;27^X1#_tWK5Yl?9)R;WUTzRKaDv7I;R7LVND!xT+K9uf-vNU7aK0xwp7Gmj zR6)QE%W+H{FAJ=USQQG~iJYv8xZ3;Gf&S+l5OG)ko8DmN<{LxTU!|3neL)<5C%&-G6w5LFQ3()LLDe=F!W=OY15wDnJbQ>g*~IPt^( z0&qh52;hX!KLJi<22~K^Ny7h<;AF}Xz)4&5Pk>V?0RZ^lHb7kC{q~{XKKD-m#Dag= z`1eKDe^Jp1=_3hFRQgYVQ(>(NI(o{2=&1h-z=@DY04IU@Pk>XQ3;>*b;lBW!kUj!9 ziR6C*oa%E_fm^BnZR8UUM*t^n)IR~v#PRPg?Y~HHLK*=8J|XlkfTIb27#VSk{lDDw zg!B=>Nl^R~;7lYVuFn4lz;Dt=04G85Pk=L#{QI=z-zq{}UH^^)#5;6Uo0% zqy7uP3F#vVPJ-f}0B0ik_s;Ua0GyCM0yqhZe*&C|h!iiu16X8rK zBi`Ko&qz3-jX>Z&Ve~JAqX~b+GS^Yx^xqRI;?>1(J0qS7{nt=9ns5r?;o!*AM0 zGMog)KV>)*&0L6QL`R(d3&II)1j6s`>z@dx!`aE+1UQKSx;YNuWhL;40dXP8A!iGm zkw_f%Uedrd?a}iFX)c5femNC~G#K&Wm;Diq7l)3@(IM;6OgUS`RK&YPfcVj$O5Jgh z0XDP1(Hi2Rgp9_~QomGp^yd=xHug>$4n`)wCP+B*B9{LZf`|!-|9;;`AB`i706fj$ zkdu)3weJJwA3MwMR~o?3AA!$t{33z}4Ey_&2?WT0UP#>2=&~CW#joGFG1xEdRQ-HB zxftJ1l$<*FGI~sFr&FNVHN02GKX4lo^6gZj$g(@NZ#=)vR$RF|^ffbG%w*H|Z5$1L z?|#lA6pB)-iz-goOvciJ#+F6%=uPW>#!A{$Gee40TLkfluK2!T`UDHs(`Dn=zRe54 zvm%vby=gb%I0UqA6Gktu#E__N#Is8?`r0ODnSIZ2X`y8b*Xz+wx9b<%(f4BFg{}U2MD0GOIX0txt^1932 z=r>ngPEu#OCQ5eQnf!YDJ5nVNgR1v}@?cUl3C`EK0~A$n(+X{Wc9*G?!lxnx)VLMj zpjGvS%2|t;$}}f9hhnCp78EPpB8-0i+K*<{Dhy0vzNePT5j;j)wlf*DV0LlM^4dyI zK4!}FiA^+PU;3y219Os#SkYpQuuJjTlugy;wwbPIhaTnu|O9tBY`~%+1UiB(Wj!? z6-xBRFn>NwyM-QUSx=+b-NdIoombo@OYz8!r4Lji zTv|tQtb~+WrPa^fo8=pQg%z&tTgGIbDxS@`Zkcl9Jy|U5`ZJjO(6iVr*zL9tx!q6I zKK&dcdal-Og)F*2Un|DM(eVM#yhfNo!EfUNQm4z1fWa>A)fgPKfn3_58tHmHExV?% zFV_+nA(~-yOiRyW(EK?i=!;_(t|a8>Wk*|HMNVkkNP*>z8sB;BFO}KcEbdL!FYsWu zIJr?ZZ{p2BbZ>Q<*iUSGTamdNLRda`h>KtEzN~!5h7?we()4XsKe=ZQiF3Vyvf=w1 zukU#tOi)r6qlSj9tu0}z+_4VLKIm>bZlmaG-P;bAvE*-tj@{tI(r%x`Mf&2ReBQ-U zy;Q}bR{XX=UZ%q4!*+4@AWP-}X5n0XM*wSri~`cQ+=8$O}}5qV!# zWm1e>Jc3@r{tfBdOKhYs?~v5XaVC8yHrZmM(7|gwlhYGF$KGP@?r2O44`gc>COwQc z{bJ~p-C%eB?WXR93mF&ar;u3;@lt#R@Gp4dUuY471fzF>C_npLC?V<;l``g;qlW(u=jt_Z{%V{2w)AuE{uDyN6lit8Yl4=pST=CWu-L!k`hm6pXDa zh8xN98TE&RF(W}}ka{NTQzT|z%}8RfI9ngqQ}o54z)Z?MyZN0ywRsd5+!_=wR3$OI zjMa>I&Inf~(@S=7iMd)tRPhM}#Dq$>&;gsi>60^+NRBLDKU?gE0h`FK3 zt0fwqF{{e*ba{|^@%|#&qU|CfdHAd(b}szgVELDF>d{q;^npAJj(Lh2hMVFXSyva9 z=BeI28feTCG2$e(3$u`>Zf(-(7~p(K*?Kcvz;JkR0B3i zP)Jnj6`Q4=m72Xy{NPJ_REcSU_Atg0;}V*6+-vy5kp&x^+W7h)uL!SniJ2h0KM# zI^p``rP75$h1`Wpg)eo}OYfVumSMhp{L$qjMR~O)_D9cAvk$x(!QQWI z{<3W)X8iiy2Cs6j2m3zzPqy(Ws&F6US_L5oy`?w^O*G-sxxyQe+HOW+OBqcO9w9)% zKyjN7GsP*DKLr;y?wH5_FtsNIJw=88Z56C4r@FlAt84XwyyKPHxLP|mp{3nkrKz~p z%5K}S<%TA8&=m;gHkMs2_gMCr^_a|fu_wOJt81~>^o1HW1BCnqdxT^JIXs(hHQ%x- zF1!_AhdGfsp|`5GdR4rJQ(A{%j3wpU^j^-c%lEK7!EM)dw$1UWyTdybwc&qGMrsIeIosx^J`$s5qreh>BX2yCz#D<9n2n91C8E`a zHqGN0PcU>}MFe-rvc91Gw%^$W_@M5f#UQl*O@HWoYX6S0pfR)YcAH?t*2AHW`$T(+ z1oXP}eAj|r(Y3pFD7|u?asCwasZrTMUX|{vQi9wic^hTHG)Y-`xzR`$2`h$J1$PDW zD2b?ex^(5tC#Fv>J-HXXn3%636o>YN?uo5pZGE3>rrY!#e%bgud&S0R>dvQK-zDg| zxUW0YZgA*k6~q;keHi;X^Hu1BinFovu9MxDnmKZ3m07Nh&~I*YLOqXqR^O%@e6F?l zkhOt2jJoJ|{XtGkT}$eNvWK5Y?DYh15@`-S`6VqwyrGj7%!YZ$m%FIFPRIv<*`a)EoB`vd{9+n zGcI}CVX|`T)z;|Nd#I%C$k5vr`^CmuF56Gj$Tk^wr}mWElHcH<;o40qj@yh0^2O|` zAmt$IA0$*I(k4bG{!Fq+8cG&VE>59H2}#*WHBEh&CYn~1PMQ8NeK*50VT-u@H2CsR+=pT2qqefBJ$BLC6z3(sAjFBBLQ^c6}L zRu!=oWfl_`KP*8iaV=RaH7p%|q41)m3|3ZDPFJ2%L0A!5iCpPXx%SfHuclwHzu}$8 zyXFDOf%ZYU!JZ-Iq5fg5;gR@$hO8KhjYVVrP+Lv{w^}}yr8)O?#HX)lWTdG?V+qT<#JE7mnzCZl|`_Zv`V|Q-P z{U`d*gng#{ngfM{u|wO#!wZ*?b^Q^=4o!71q9^@z*?+o?J=%jF_vjqImOI&~{-;Sl+Uz1ab5q27c^=?hy)4|)+ye0~ z7ien5EnS>d;ZBIRA@+8NCuYE_M&Mn`@BQlUed>RDVQ%20_^*%hPwy~4-|$e4*&%!5 z!apE4nj=qpO>~IHBs`0fn@hQ{JrsWc^Kom4Qr~chMzl1SwAgY^niio+_Rt-lv7U-x zAJ+Mrs-6lEHY-*qHEVEF%gT$%iP}l8&3&(0v!B`fDr1>1MBEzo18+`vrSEu5%uS=F z?F?MLql~YkZ0Lb}t3>c825SwG4*AViHsbM}ec#4K(>5BuH2)iycBNdMjrZ3Kr{vde zO7s^TF5Wu`n(4bSD+DoVksXiBF6$Y6$`#WUq`#VcC*lhXD-HQZsr&tQNPJbAlwfK7Mrnd9*odMoLo>=e-|`-xjDnsD8@*`0_p}?5nVAbpK3- zZWpchh)X7sbX}D#y7!>ge(s9*E649J*2KM z-$7Q=ESUMCTkWk}K!=gs=KDd=4lhLGQgg}ut2X8SsAw>Y(#y}YnJ>0tAvJpi>A$0^ zK@0C%H2xak)nfH31vdeDp{g?!)PMv*!SV|tyZ4opd&+(A)7*fk+lu`J{%sy1M)$U) z7it>`LzWPMuj~|{fG#r~kq?5%j`&|0$3NI6R%CE8)-lqHCAvr^Cq1zqHa4MuwCyV< z!o_P;+Ej-eoRNE}qYeYZk9`PAhV{^eSfH>#hGML`kqNBKbvJV}H$5+Hx8cJ=|Dz-} z+$TLaH;vTvf2uQJWBN%9*?qXR@lK^Eph1fuK0}`WT~gZQWM11Pw?%RJCwc-lZsdyE zLD=rmkWS(KL)x6N=$JWduM19SB?dO^i<=+zr0D#$8MJViv6RgkXX{~7eKZTtxt&{D zaw+#e@y1VBl-6S6v62>>$fWaGMx+vw1%00GqD20{COt*!5iZOq`E8V<%kRs3D81%& zlnoa)+k_9wsg2*JCYg3Nwf+08Dr*E`3wbaCCbU&bq(1NAc;Y9smtUnhH^7=g!!L+_ z7!6$4V5oQzDw7aiP{i#My29@OrP1c%h8*r(zGBbL@uBoNnuSF zQK=KuGOTTy1tGVAzaz8mwDY9^T_W#t@tz+AiU+Z9cgGhw!tgm)$%zo(h>Lqn8LwPE z4LfAIOpUmYHpsY5fpS6~HZ_}$_-hULZD?!tbK&ZDW<(v3?f zP~<>hXq#oQkA$b686%R{DK{Yw`KM_UwkI4evc)*^3RI&)(hqx9sO18voo=&2 zE2cly6BE%&4bnAaU+j6a#KKAz91%Z9f@i=%^+Euo6n0Z_NlEajF4dDeu5NFNQWsf1 z2ptMOdmXdcxb5jCFGeEgr}fg8Y3rRsRDh;vQ^?zSI)ZSaE+yyIZ_Gs5A$PA9-S#eb zlV~g8wCvS}gVohOOHt#^65-|LY~3r7qrS1ok?Z1uM_PET>=V-mlDXF+M%%j782)U* z2ZXXQYztaE5nkn56}Gj^wX!>}6Zdc#bjPQ9{IAYDzkiKYw|X|P&J)7COW)`4sl&Zx zvO(C0=K>D3?d7aE<)}!S=>T=L+Q)HjZo1~ZareCHmgVK&&pul1v=c&0r+D$1JO^{c z-x8JIwiZ6t-Z_Vv0rwd z4pUc;A{t0`c@G3fJgK-7LgryVh6;jiuUKQle~WXD{Oc-84IEuepb}_bp>wy69oSck8U0@}K=I1Kf@LBbvDs zO0O4R8PGpzU7?i=&Gi@=N1_qA(zKfn5_eBv{vzwRF!iN3?Y#so!@-cYI+x%o6tx<) z9-_c()$!)`vhvTn8+RSG2IjuV*hS@J33Zvj_f;IE>AyK@FP(W`H4b}aaVO9 zusQGJ!2_!g-Qca88efTj)JZ*f@$q^hUws8ztKB2pIBOT{pL_Psqt??a}!#Ydbgd4>97`9g?Rt{spP9}=0%*q|ge^ z?FpvOZ3yO|@i^oNc~JgX4S6M`L2^j1pq$+Udg4tzEHqUmr5=Ba)T>stjwZ3PG3TLV zyrTfyk02>)XoXAYbXmFr0RdAhVT8lrrw#P${iJ@zrLb=X@a?S}!%gCW&>L6fGQL5q zhjZTbbkh|Y5QaY=OY2ckH1{JD)aODoaM77*kINc*9ZFT0$y*v`nn!QT@@Zrheb*;9 z@-z2=WjY5@urxlA7zT&ew82k%^fq_-lu2Z4$>E3ba9x2aq8hzH+aB-6y(f1+)?p7g zPmI)~Z|Ca`G0e$)!&)tr%_NQs)_(xTQrP)GXW6~9*#pf1&BE=zP%sfl;n#j##SUx% z64m6KwdV_H_^CaK_%5Y zq1wOo%SWqDLcA-XY<#vM3k5%E#-y%V_=v3<;B(bvj9pyz_>rZYSrL+ZZ*KpMJ8m$# zw83p{m64Pc(Gk2&v{z^bhdB z5hm4a&mqiY@b*gv@m)?=iJlOVOPo6NVlNlQC2pcNu9&SGVrei9F@xQmqrnLoN>i8A zaaup!Di$$wWO;k&YG{iV-NvIqmhU{z?3q`{xim<=3prP<^vOuh<7XLeU2AT z4W@HdU2S*rqM#*D6_=ZW!&{;8_aG(L;|21gRqj%z@l9n#$Vcr)Z)ERbz2Yd$y2ePH z|D>Hf(TSc)c)J!|VA5*!>wC@jVmzk%-2v^=apYox8^R*^8bSijKm4$?&G+y6nbb_vpJ%eSU00mfBN% z1Y%3iY7Q>XJ&ro4`^o+`B|9Y3jzs5$TSaWz(t82=0&ce*n{xN2132ZIsR{oksb}a9 z1FSxN9~ahAs?X1t-spa0*&=4kruF7a@=hZ|cduP4&W!XTwp12q!N|x^MJ1ei+a{$4 z6rWy77xSGE)k17i&1&wpoE3W`t*ynoFrgPvF3slyWQH%8m*`DBEeyD;>4K7D-taq= z@33}e-HBR9B6-ledwFZ*?fw4yPpjhH-^izv$Ab6<2f-jAYrS8plhVo8>?Em-6{T_T6bEm?-! zM)TLcfv>AA{oXgF5Q^bjjZE2&)3F>HMU!zI z#v2npBIFS?jKYC1>__%yy3>cHa6_Nxu=UXFo^*zC^fVKygGvtTt3fP2tAk>hk^5`? zckQW&obEVWRC<;?EcniC&>EocwmUtQX!+F98}^|kO4 z&57o^(FdR}w=Y%MMT2B{2qSj)A0zomlW_SELc`k_UU*N(2(?j4k6o(yu4DM{no3os z*n8-FXBQz6v3vGYjWP7H(Xd?eKq$N{m%or;ctnW382WB}wP^=rLD)#pxJh1-A&H@T z#i`7%C2)j7!$|GzTko!yZ^_9QRJmWTt#<`6Ex6PVU5!AyXCz`l%=mb-1h!ofnud9W zjeaC$rl=C=Q%Qq6;W`&cW zWq`zPZxU_J{VhnD3K_=%lQ!HH(>C=&Pb@Q|hme=SBF2L!NqU+o$~`OfHyioxznvZG z!I2JnzpySTn*2QCB+i;^nuI+tc~lPSqqEMNOH(+gBv+H{b*n4Gq#^Y;S>$7 zL;Sl7->5GoCt|bvsAA3ReoEP~$My=0XPoh#}tG;Y!8Y#m8<| z>X=LIpp9kE;BE+gp?$R$Lo&7nsjU^(kg7JjoUC2iV6A_DhBvZ>K=5tsKHh`k!>ey# zX?NGU^g~6uu8=;*x%F7|8U1tn$wUebeiN>b>(O2x?%pwI_=1a?6u{%jzzR)zxzbF= z`W>n2X^did$OlwO>&i|{TT4E z9{8+g8~M0#HPksapiZrRMOs;_6^noE+f#v+pSWScZ$XBUomm-pSgyQVIb9hQNsDCBDQ-P0V{as=LT_%+=Sq1O zep9P$imOi4a>*hQt8+g1Vdz4QHcTDJ{%E3X9P@f0F7%!cZ-k$16@IAgc$bh%PVzV! zl-t!e^5b-6{W{^Bb&W&ghF<^dg14{(Z`VB6P*JI{fT*yGqgDyvck<6d`7`{oOvK7_ zmp2_4w~=~uunRR@`Li#@Q_qgjCfC2{qE99SZfs@WiDLMEeOy1ecYm^e!hNz{ZntjL zuR?RdyDu?SXfkzwdBZF1?Z&=%9!9c&mys|Fkz(s|07)w? zA3H{(@8#X6%KdYjWMh@sW__s7QKmWP3_C4wmw&XHere@}`e~GwI5WDu7{;bQT#>?g zBGY+KY}ix(^+n8whUuG_wLsTzKd402^rNNUB(1F{oNah*ozPB2Qr0|S!pL;pKTl<8 zzap@@zlZb^s9g=&4+)jz#z^H#UrJaS`hx_6HKS?FM+~x32F+_p`NLk`X)pL9i!woU znK=Qr9$SO$=6f>q)VftoZqLcI9ylbR-%ffjci>%)m6?cLBlPCV2EFxsFJsx=Rh=PE zbUJB5tgD%wqtZHl-@AD#@gfvu;ICwi#Ry_e!?XP7Dg>+Me5BQ62|4Qd@N33+W-X~kSL6yN7)GZ~?N>0`SCR9s za?4doOqTEyNn1clkaitxZcCEJn8L$#XDaZu*S^=C?6!-`9j_uMoFBtkdKnh)yT~vPdpxi8D zZo^%?;lW{nzjkO89txR}3Y2&6%_DA(I^OTS59|9iW5P?t30CK^VJV#v>5qg{v!y{{S z%5GKsWu4&n)C${>7~1FZdP;EYYGd{rb=$F6izBRlT7f($zB8c=l@`y|$0an-vb_y4 zVu{pZJ}}``+{-0nb&<%nw@IQ!0k^0shPCMwS((he(;6mmPuu?_7M?|@kra(w*T#fq@(+g4wUi~^sp^?@l{_TCZ;4^#- zg=~r-XB&f9zV*f{f|Pk)q6(^sA<^;m_aw^j*mcy+t~K~@V+NQeFhPHGsohY}|DM*S z6!g59=%G3d(?tbmR1Aaa;;d*l&E?O#?`|=E-R!dB?43K~6rT8hwh~m+C6jTMGp?4^s#pvc3s`%Np0 z-UvAQdOJBEvIw9L+Qbl`^1iw0I1o5n9UXp<`z3F?K~*!;A&y!yZK_&QuUf9Tc9RSR z)ytIBNLSw2A@HJ%k(G>gVQJ&xdafjvlq@dV2g7fAUmvF*SmhM7ghi?GU`unvc0I2) zRx5+twlgc*a5kbA;cvgcJkgH#sA>_X!Pz7r2LlwBrCMCi_kD2V$@22$?3)sd6j%6_ zVmzV<^LgF8ia?T5BmP``R2d-)qD_xJVJPHVP8T006#Xn3?+@oCo5N3P@kokLi_Fn3 zw!M4z@bd?w{4VelGm}zHgEGhi8?~kV|>t(hkmNJH0r6(l0;*|s7{A~N#gaEdl8Ge&bzk^f2mGaak^WR`3p z0~W9Hg&zkU=_`(##zQ>U)QF7+)-i>BE?;?6esItf(?7E+LSi%n*~DX7%iw5Ju=aNG zLYw4+;Sf1S*yB)npK<3;s6W#-I%uYg^*8cOB1`&}vRm#N;G(diNS zDA@RM>CM&btuv&pc$G07=H>lw2Jq;v2?{l+O!y zcoY403ORXRdVV32b6_Py<#=`9>Fe#f8%y>YJ7fnJnM(_oNf)r#uxvy-&>o0=$OAvk zSJR7~csh!kZ^?4?s>#en&K$Ko+pzpATBxRUwA9d#aw*u=th50E0g<9rv_u3pbH*KR zU%#hp2;4~D<8y0CGr5>su>1u6SVS_!_2;hF#N4)S>%x3iE|31pbcI~W0x9rj%(Cpl zMUdG(d8382)lk`1v5oLjA$FX{&#V^UA9%vldr9y9b@^t^jOKV_t5ljk7w%NUNDbV&+S`FeKDzjuOpmzVGk{^!)FSmhhC@oMZ!(*72O{L?d_Eo)zC=L zI;>~^%a68WcX`$d{_1Pa&xtsO*#TEn6R=5;F6j!te1bWR^Fe*6I$u+TVU@CYJM7N zEpN(=LBC2f?@$7XHbof<9K{Pm@h`myh zRdZfDtL3HZ9;gxl!CCavfl42pYNj=Lsf1x`y-o3Lxap?XQ=a4vtb{&Zc^I-Ra5w#y zhJY9fO&M1^@?(vK{L#m=+=N$oD{MV$l{8aL2^cP6%!3$1YA|sZ&1(r|u=WLZZrvC5 zpIXu%^&8|Y;+H2VZ*q20FO4GMXzQuV7TDfqJ5b|!Av8jHz0Zi8AU@tZJChG<*>gJA zY+PK{fhIg#d0}wSCC+Q1cO=4^usq{yJO`bhR_gpj z?>}ChWqfdr$cQnl+m9u%wVCR$4wHGeTi}Cmf|}YBB_IFRX3~+xv8xkg%E1cSgZq{{ zaC?b)MRy%lOArQqH^1d-#BA>lg#e$yBxItG{Dti;)ho0~F5T_*AKs|1;s`op%yd*Q z584W3XFgk!DCiLyb>ZC}H&7AtXo5V$j1#|a0<%k@{<0z}<;CePw-w~^-j&kZ9N*A~ z@|KOdIGZPuSb4}@j2S+pBn^s2pU5=E%`dgmW+Jmz3!$B=mVpnVX-qb6g?oD%a~9vZ zGhC3BG@D@=YM?%(IH0)nLH5&~NtfZq!hQ6jH)%^nR+p%HYA@&A(W99HN4AWhi!9zQ zDbIUhR97X)iM(~4NV*NvNO>s6f~HvSJ!J3h?U0O(`d75YvTpKs_UhlH3%w7sn9Ijq z!}7o0Os6WBb@gW@YDwj#uI8wE?Ck&YZ~s#X}YY7RCY&- zm-Ul1Ecki5vJdFOBU22!x179(gWr<8ylZWeFwNOgFz^jcQ5o!qcmkmfZnF#KRwy{{ z+2v}?MtLpfKLv?!_%!Klj}!FN|AMWSDK2Yjm`u)@K(ipOy4JlGK~ViLKdh zeT?#$kCdNIlsgfbYX4PM=*1v3y-|~*d>>ovx=>?e0Y#vKB`T&gartSQH@w7uBAe9uZH?r*Q7#(M9s zl^Ng2ut#!?7i~4V6&4fFXIUxk6=7(O)@(>D*82k$1^&9^$|R?Nz1+}&_vVc|z;KMB(91BPtnAVh?8jN$mHmgd|&%^yG9ZRf~O+bv(y zp458b*17-Lv*@3%hh2oYj7bJFT+gvH=qYcY0k5{gd@y2gGmH@zf9*SaipS@jBv3;2=|+e0?T zAB{q9{qLz8%;L2h6=U>5t~n>33A_1or#B7<2s4Ot`mVc4wB@u0mfz zE2WirB3>|OO8H)nTEo>czaDM6(4BPi?(Ul{Rfe8>kzU%d)VXe>9-#5#s|3~w67{pS@Tz3 z@T0{My6&=jk)keWY6Jx)8$e6^)FQ7a7h-TnuDY@JYHwHJ&(GhLT3tqIu6OC~G><-- z#TkaEeC~B#Jy1xuRmiybq9_)e=Q#HA_1taYa00dW1YInov;yt; zWl6U!5WXV5Zc5uNm&y9z(!~qLnmWwWgF{PY7;g7){IcvA0|j06_oqHBVSDS`{7A&U z{@r27g`iNKMh^3$K5_i*#k)3QZ3~rmrsWQsKybZ#1R$DwnJ7w$WYnpADBML(0+h*v zSPB2gT_cXiw>Jm5$Ry%c{OtogCTwjz@Wg~gz8eN(}{vYUQ}hTw9K0=Qh6`$u4$YPGq` zq_6elz4DyNj;F)~;Stxxe4gdwczx=hx%b{-Vm%XCL@5ea^v?a1IbY!mQY!?jR6Cp1 z%Rxe^Rvj(mEQ9Nd=&V!O7%7U zWfe1L`#SSg8bwGn9<^}(vpf0HSM{@ch6fMtyFbdy)ahSWoNycUEjkR^a$2`t6m0!Y z#c4Yu-CLlmTTzm$mOmX|s+o8GdV{c3A=LD3+rJua$m-AsYO5gBW-Dr5b zahDoPvQ9!QNNQZQkLs$lee2{Jyk8AJ#8UU&(AB|pr^)2i%`Y4~VKOBbOBlh#(hORq zhFE)e2F3PRY+heKKje66NzaCl3RhFVc3Z#x%Y*Ht(U7QjM%r?)Z)zi3#wmuR_pHt5 zeFbl*eXuZEFA)iY2l;*MAjL)PXb61y(K6L6mydSf@B^9TJL(sXJi4(q`bym`c)YxI zRr|)at)Ca-khC$IC|Mt9NPeXVq`r{6`qsU=&i5OZcSml9ol}=)H^gNCjMR6?@G^ip znumD_W^U&Bl7@IWJyJeYn!m;vPsHilK3A0VYqf!zEdy^>(YCAoz8WRzD zt95H`z=arY6otj`BzV%QD`KNLYewIDZ>}kRF19=c&nhoLSz!+wZSFNk;hHi941C8r z3^^BtP|e9OP3)FA_-DjRcJIAf9tIVqxHv*=It3PqS8aD%np_D}iq#7|(I2|)OlWh; zfJ+Q+syw3~)9*3;{BolHg|VxWgu;B4H7P79S&S9~nNKPbvRz$RxCUGj5z0hd@_8Pe zA52AFY3XMX-0x*{nbLqdO|X!%v~R3icX8YGR@mY0*2-!D-xZ^1#ZV_=A>NgyWV>~t z8#6;&(E<}+bsM?`2wnw5nflgd8DHSb%}itrPthn@ZYlRWlm_EIe8X&^n=kF6=(|!^ z;oI?k=y366s?JOn1yKyj<6uTvxm(kC#JCWMikH1atA>cxpew&PHgHBF`jA11EL5v=~4kO9r5Bzl!p z+@eVra@#m{%+UuH#8}{&avC4=*Q$yiQw03Wuy)OfdcCN`_33ShsA7uJEE0Qmx|ha# zZfL2?GV3CBHF@9B{d|Qp2>6GY-KwT$8PZDk8no5j&5YkquwZ{1-+G6??n_X?uO(=VCvEdN$@Laqux}>lWhHbYkxntqv7_BC6)8qTDV~t zA&;2!04drn{u^SN2iLyYc~ome+xobfq{EJFn;qM>ZQHhO+qThB z#~rI<+wRztK7H;NzqxmM&dfjApV~WnJ@r`w1Rgn=%CO2gW;3w3V;(9e?oOV~Cq6&IhO?LjL*eD@07~}2T zCdL08yoyKX<~3tQ@d$62?y+Dp&Cb2Mb2SQ%)3IHgh4eEwQ7c#Yz5objn1egFXB zk=GgsHlo(bZE;XVG3xy>-~=+o-5qR4Ea>xhI%x=1Q~7Del8P%ZmrT3qL%&`#5C=9^ z6+W??XwaG7FUY3Asdg$L$(5DMt?h%f+qQB+= z=9?s6U#D(|O#@v)kYVY7OHCn){DbuEIG%6z8I+iIl$;D|Wf?Kg5fE7kLW(zwXd5&A zEqq*EK=?Ed8DD_;a-=B-xe8#Zuf9?aN>B>FC^*XR`2o7t)*x5|hi*#aZ zUHSL=O#@V*k*^Y~5EE>3QHo-8+sOhkw30Xb8G(p|xL4C-GPCf!%Ug)c^alkp_=W+G zG7!W770>)!x+4$Vl<{6~zDg7377Ah0{e*pBN)~E`YHYe>>js&S>L9nfS?QmX#fOlA zqT0(yjcEr?U}XvEUQWWyv(n~mz?G5j7WOG$0CpH}capNam{=wY$LHep+cQd{_d^U{ z%)-Xvd5rDWl_{&J5HI=VXXWp7jopbKzhKW>T<&vhAKRV_EPsTFMl)jKi0%)4*VE$> z_E%W2>}dC=MX~2t>6V)@bFz%r=*Tx;$gapf?jFdFEv#Q;wdGe!QIrBBH7lk?FHq*( z@;ZweJ!m5oA`NDWf&#)PGZa~FtJ>@oHRE_oBbiC*VVX^zx3yBZ+}V!n@SJ>HFRdi< zM}nrcl^5VA%Ja>YbSdg;v$J0!B)xxW(wQNrl?*J}<_rnYYwZIC+_G)fgxPNn%kRs~`CjQaI3vsY%k$eIayx&OY=|rx%aalCtub0kQAfHO!17A9=fa` zpCN7b_u4Z!3I@YNs$pcyY4*Oya)!YTbrMR=+sSfR7%?al6(ix`woYN{v=;9-v9u{h z1-a=<`sS+k)B1`jiIqf6B`PW(aXbuHfkdpGtsSz{LRk;W<$BF7%40vCwtjBbet`Wv zQGXce5ALkcqSvb~pIED)n*ij8#mbp`Eu0ZcfYve@q|J} z)jo@xuIs;}N5v#ZgqIDl>r122;>1~3J)yy|RfbWda7Bwau>N^SEZlI<4|luBkYjY! zSuS=Acl{L$M5%9nW@@b)AV734CK`(7$+)P+yS~;CEh}=$n7C)D1nLF@im=GRboMsm zIA>*kdM)GOVKKLmR+r%?nI1$57}QXiZb3P@Msad(XatTu z(&~-nXv$Ab9;_NlY?{ioEMg&X8MKT;1YN9U~fWhX*?`;IeIU!ro|$nq=~eJ z_yzJ0uTv691y_(ijg5G1@3WX9^<(te9Qi{D#_bB0ioub|O3 znj@)K+HBPB#Ve8;E9S)2hrPbhpWTR{jn0L9SdMyvW0x@Aq+y9(#}ui9q}J+5^RJB(0-nF)%Kgi2-5p^aO&a3Jyf z1hN!w<)i=XHROk5I87Y6Uyb63UkqDYx(@0h0E$R00o7K^s;@z8D?rzGiL8b1R-c4)4Vb5{~LV zILwAH>k%o2!zv|X1t~fzW7%E?s=+RDF~qj1_{f5V(?W%&<@W(unk7PKsLE ze&u}KrZT?6CwKC69(~o&A0NvZsvcP$J#Gy9D9yL^5?1M(dDzvB8!^n_X?`zOLXk28 z?h)~~M)CWm5=zYFO1RB&@$sUHYZyF=3@<4SB91Q5x+>l_{TCBtr zX_&$6pB>vE?b4|nV`NFk5(HQ{Ncq@e+*he+;&X3flon_!!2Ib-2c80nrn zDF=|)S)w)7GUQ*`*YY&7a)3kr8x-g#a%xy+`#_e|-9u-n6hW;I3>0VFZ%gsySh3b^k z_e<-DRb_m+l;n!#^F^HvqUWV{`wR~|x=j!U5muZx=i}8~N}Hi{^(rQ4cb2mrW`5sM zE+B6NQb;jSzlxv@dA~&s4V+d}98E?2eqaH)?fe-~t`~G^96dtWae8lE2}HB}h8uat zi|gTwE@uhvLAiy}f?iqc)G-X)xWLYChWP$0gs}h!)QPIb^~E37oged4Y4M@)CnCsM zAM4|2xa5G0;S*n&OJL@iFgTvDDi*{?PntS-)jk2Bp_aMoHj4&E>FI1ezVt)Q*^mBQ zP(VDE}8&EvSM&3F#|LoZ!Vh|at2aw|A$yu|}` z)Y|;i`+hU9O1qr0hOScUig>&)Wbx$Mj)p=l#J>sEs|2u z+4s%KLuJD6jqIWUtTUEkipIjO+kk)HbrQ<$piuM%4OR#dmZWrBd@V;h+InsJb^K!X z{4rOh(=>DX%LFEQ6L~2egSvodfr%nyUYeUan}ifunVQt)gs;}JC8wj~!`y;KBeQxH z#P|t%6gem7R*uq)F{z1z@Fz*mesCNf)ctr!_wVPCL^D%f;+oZ2Zp`f4BM>GQ0+dknM%pFM-xPfE{;N-Dsb#mv=_K_-;#iUR_D-O!Y;Jg{07VHq3(2zxpGA)SD>a5G6CYe{KSV)vJ?#%^L zl-0HMsBV}Cb5T5H2_RO7tDm7R%*oVL$~mSr1rIfbE1RKZBwRnZwsQzi`?KH$<^xe~ zMuwjA&F;>JQW}4bP@Jr?GEGzyr({9_GjOtHCuFFAAvf_i80nrWwb31#@!0Sx*;tz1 zk-uR_dW}#SnKk(`k^My$Su?9kt}4al%Y?bWj>AM9Tf_RXAY#}+Uw$ZK7@{lQEwzv; z_;e-=E;swbJW5c9h1_!abpv1hBbHeWKLWZRdGQi309CyBvp5jxoo+o? zA18fU-!gG%aY9T?!8U@OY-vTMBm=X6jaQAsdk1hUOkW`yqX!5 zq(KGSd%gMiQ!t2t@4;Fv6$L+=SL5(^$w|4jPZMn@5^_Pz?~rpQ2Z=sxY<3TeL%468 zKd(x5T1$$hvvRmy-0tkJfVv60Z11+4TK#KK!bcewC>@J#pUlh9Z}})17+n@uIL^Y? z2d>O?jGEe{^n9-N_GM&Z)K{@cO6LP_i$kY-)lu0}LSR9+9a+(h6zph)!m-Gk>FWZ+ zQ9Yhhus<$xjI!eyMy2di!Jr^!&ir7w?ioJnJ|uFo8CzjE>;VQeu4*}3#8NBiZ0b{9 zG4vxXpwaq9?^9=s(|$kcQ$d3|?xU7hX2v|?VF{ba-I;a;tRJhyd|x0TF}eRs#U!cob~k=Pz5#?tqErVUGSSX2tIKlmNoxG)Y0sF#1*Ou>=e`Sirc#sn3r zR?eD&Ec`k)QPEd$IhOJM-pkeD|&3 zOf2OVmV-59!K4H?@4*-TXa>WNPe!A?L};rrg@XbUcnX77RZ)6xcqkL-#WC*d`(Z-8B(Ua^&%W!)wLRY$kbJYLHwaRy#q+I(57acNPex6&J>sKv~{keg5M zOh4+v4kjG<>$7TuDdit*+r1WS@<*iapDQR~aBMKWSNq(A=BYoQN>=)HFvm zNGOmcC|LmGE3WkSa&bFKvr*FNr`V^V)M~8PU)rEhwk&-*5oy9~p7ZIbu(J?!aBcKl zK&6fgS-Hcp-asu_^(ow$DyvSdX^YNO3XtHzENM^8jd`9$p}_ksdf|PS|6RI4JH0r`&CtlC&k|{5>!M6w|sN zToq012mcH(sjPOM(B#X#hm(ByoqII=7JB@d(3!UH+&Pn;cIVuT)up59Y8CbbNs#K? zP>NEWt^H^5(n_FhcN)-dC*th!u5&-N>fw#&AA#4>SjWpE)<`ER45VFpc%JU@ja+Z~ zs6EnItSnS9)!U!?(i1h3eT0~;meqCUy7y4k#7e+AP8JslFGXp~zmwweL_Z3PgycZ= z=>CMcuT_Y?Y^n{zT{%^Fod4c+BHxz%$U9!+y*#%<%k08kn$3N@Fv9;-PeWO1Rwk5Iu@9 zqqqGGvYum_M10C3IV%?(c;Ru@+L-viohK~!y&t_W^I`uKQ4^xh(9q@bs-%-k(TN;MPwhL`}OjlJT;wNKXH{PSrol5^=`|hm`<8!gU-yN*%zI+ zEtGndw3O-WTY@bQbrXe!qu?-B^9d!@XLAQ`F(gX{BZYhQ^QL%}{ID_m@}NE?rOZ=B z)K(?|eq$|VzKpoUqNwf8?RsMrZ$L?>Tk;bb;q`qw#pB&X(k&t)27*iO1BU+wdEd|a z@oW`<4Bu`sfNt0x8Bs_-VGy7#J4g$LSmmhk(t6b-&_=PK1&SdLC)af?Dr!tIQRd8! zTyDpF27O%Hg?egnoV&D_oc44U8=FO(xy(3=?5;v0?g8M}-J5}Ew}=Z#4cVKg-W4A; z7aj`bxVRYfX>D-LgoGG8Oc-vv;GhL7s0sxOF7gGab$r@Dfgg;(q^}GCfC#UxdIYg3 zMJ#|VJZh?uOyrq#r0vNyWG81A*zVhh^kH@>O%}|Yscaly4udfHzN|mp-ejumqjmv9 z(B$V8V)@7pR;emoxmilg!tZ5VBvQTQzPvgmVY=BxX}O)yp(emtK7h$g&nNW%6i zaMa8Nyb%2JHeRk@wC!S_op&_aruw%3!4`q1IjZX-RBooeE?_q{Cy714;>$}r!T>KN zXK0E0W2$LFP!lf@z5Jatq|U|!(%NABkEp@P3If>1;9!<2 z*eb22y1Nj@*EX4Q-B8Anfx?p~ro1_P35gALZREOx?+s(V9?JI3NYe6l4L z?(=C=EE_7ak+G0P38lFe5fLyy!Hv@0XuQN%dnE1z!v_b2tjtujpv$BLlZr~zbuFUy zxEN;ghmXl(HJx&BFBOD!+b%aX_dzNx|Z6>0({+;srRZ4 z9@nqYIJIMQcgJrD=gRIj{_3#V0+5(cBFKvDxyjj`5YRUMCJfz{JR9{0D3FXVo85~Q zxF*CaT9^s!Xk6s?rR@7RnFSGSb1ux9C7;HnU~A%1P95CgOIG()vtdxR&H+IMoq)`E+`z_xwdCi|x4?4C%Vq zKUKhm%=9+cDjOghOK?ZLltYDRGsfPS?;8SKoh-)qurPz#Jf%4XM(T)3S&kb#t&syl z2*6aXJu%%EiJ*G2B%xMKa+O4VvG2>2D&aywV?bc3O3L)fmGVuhm7*QCpa(}BbMzCV zbLB)kA&&Cp%C+nHwsuJ8K#72q6$IHtO0lxTo`AbjzFnIhG!nJO@~$90?C_Grp2I|_ zmqDgb*I_NejT0gM=e4M>akIty_p}@4Ecao{$1`PUaXOh!EE;<_)-zJG-UZ zcLX!@^(g^=6IpW0C`>K1>5vy;Ci_d_XS6r0IRCr=CDc|(C90IN_gQUo`&RimiL!1O zq^4jQGGV0wf}+hy^-gUCO7W~y9EI>+prGpxbC!y7ALd&8ZBiyGqj2>BCyHD9reI1k zGQ;QkeA>zidnLerES);}-HJze%Ihwmo=H{8C@_n!F<2%Kj zsaays*JNrlnB2ugmEnjYh(HwwFr1m(~edk9wJvQ zXbx8Zr7{fH)viW%EuQjEeX;WS6RHsAuMh~2=@f-dlaLo^l3;*vs+L5j!ov?w5Znsy zWcXk04V}6k55Q>J3z%Ij-_i$zS50qS@nCW>fq_f67oA(7d!QM$<=V6I}Z6?LlX0Q*_`IR<92FPMX+Agx>k?<^^6VXf#XXW?sHJC6Ro zwI7qWY6Hjtn27wKA`m6;`YPg{ny$8Ek2uw5>7*=Jk{z|%qttw$oJ8*PzkKO+zv{44 zWs_l3y4p6Zw=a@S1|$3?!W!9EWHlMb4* z&Sl5N=rykAHOY6{K>c9Fhx;9(Lja~H6_AyaLScZ}P!m+mPTI^?Pb%ly2eNX14dALk zpeuBg(a7~=eG^$?>uOd0uLBPx##!>sM}L`XYgy_MFOCG zJvuMW<`X_%Bj)Y<_Ief$K|n)u(~dwLeT0DR(aayS{xpv9)HNX3BweDEb6k(iSCzek zi-Bk@px1=AvVm07P~*(B+EHE3URPS~-)_^m4qP~ST3=hktlB5In=JI78xVEY?DnS~ zRx4MS8#Ys~pR=Z8zaYn$1U8DAi;Wsrt=BOSq$w|?gLfWd=iT`LUs>VYI@{Yp%1on5 z-OC0UHp6s|gsJj34D*{`rg|MTBy6mD=pCG2ueju_G+S(Awv&8&!-APOE(9A-G^ zw4)18?XRw+e-;ikD>HA(OJbBNpR1QAj*IKvitNj3Z0}{k>S43EeDc(thi~IPY1V1r zsN(J-#Sp6JpXVc?FrO)>kf;*8Yw^34@>4RwNT~Fj+C>y z@=5C07Sbds0rVaPcI_J$P97#GD8*-i%Z)R3b9)_I&|_dg-$c>OJ`aCZ10gJ|1!>r^ z7#Hf=K&;ogH(fANQ`W-=o z<^+>t!;+dicQAir`>V8+uGPZgocfDr!&MK9(Zx_m&Zj<`^7_H86m!NKbnANMF4(6V zD(L2US=uOd(<>Mp?VnbcG?QE!oKW3C6ND>hqK`ASS(o(5bh6C#~MCY^|`c1>RuS>Hu z8oyR|FHG?nUN(xhEjiW;TUcoyrbW4Ho5V6fT#$gJvvkdUQ#{xd2&d86F4` zBsOPs(s=;rF}uQyi!+N=pskZljH5(&{Fn_EECaF)fg1pjKtq_!OsZsS+ap z=Uq`hf9{a6yoKkB{4i-|LaBhTlt^-5C)YbvqH+Y`1Vq`i3`V}@`(V;8w8)!Ir3!?i z;sUxi0~O6z|9tpR;VKCUL8xP~M)TDIoG^X)HP$-~?$?|i zw;hf~;B($iRRfEQHIea}m03<3ywSXFH=Gi19$uk>g<32Ea7kc!I781E<$^<@Gf>6P zUpt!$AVCEILGm>KC-W3NoG=_<7#^sG07Y>C1X!`t0j*+$H>!^O5@~Bcu6Be}s(?T$ zO=Xxj=H}=2_#xsi(K}>Vp7Fb!j%~TIb2Uu6yT&f)mBG+&zbEE)k@%}N6YAhoZJHL2 z)3#=aZ)!2!exbox08h{%!(?IB?;{EGBj($B#*!Yn9u(Qwk0l-Q>+q3HN^**=v0=ME zm(89U;UybMXr&F=inR@)kt`|U(*WS{s{urqTcc3gkTv7I4zQi|wceFP8jJ1!#6V%e z@!rUBpgsaSr8d>LK4iA@1-Y|{O3l)1t5iGHq#pr-qUSCv-Xh_t<8P*bGEkI#;H%^!gbF?&BfZV)pz1<>>`oWGrlj%P7 z#=xQga#LOuXBe^HAR>VvlrfI;MxpsG@b32Uz82)ENECDf-W=ryE^-HN*B$fL>trg5 z$Z=P!*&IQI((673MU!1A**mb-3rcn`v!PDjlML~m#X~(Y+)goEleT5uhKD)#8 z6WUyVl`o81r-ZmuIejhS=;(O=>NOS!ZY~=dw1+H?rcWm*s0IQCs*HKXo{-7AMjqvs z?z$S<35HVt2vW}%(3es(++Rg$L&O_?Y`du(N_-k40WPga1FE8+xT$tw@1KM9eoZ8L z=QcCjem6#?}9mY3U4tw5~$x?ePYTCH{5mp`LrEvUWFnE5lX zU$%CP>X_gKS_iD~I!kqYHOiaW?WundTu-gXqm`hv{eHIpfuvzTLD(%11zdXK&JKTC zlGMY;JSi%8HrCKZe605wBU5vB$#~SPEmTD*2pSKM6ax{veJK2#Pjh&i#)_I+wxU(y zQtFr!P!IjXW4>X5R?J`6E@K80@gnPyh>1G=Rm{wwH{mwFe%vM^_Cvp8kKO&|C(Wv$ zZtP^+29+TEm>&eWnhiPMCifR|`NdFi)6}wQJW)9dv~)gLJ|J?YA%;%S;!Xh*-l-rZ zFlV)v^yE=KMZ`W|YcruQRSOBP5KbWHvrkEqS;Kym2Uq=ues zXKw^eW=L-$GtYvO)CiF-6Q)&_rE0*y`nLQEvp79hq*7)IQvfClx(H>Dg3#5V0QPMA z^}U;e0dyJM`Hswv#qYL*hkx4l`XD>KbEJG$GDEXurp`4wTZC!is7X>J1hg4s2_jq1 zuG!afz#n)NOtuheY`SM4OuVO)6n6WQrSdIC62lE+r6V zbQVP&7Q7I2lx7=&K#>sy_cMs1^t)AWVVRvt=cpn(QBJ*sMxZjjj@xcGH@R2)=TA%UB2bi8V%Uk{n{agTH+X=uszP6 zC|O!|A)@IpyW$n+DY_$MERb|6M=uxSu8*Nh9rr73jt(SdAU`jQ8K;NB!72b#OTiOC z>&dHkp1dHHgw$ zlO7*WAboACLfvm1YRvZWINT)5el{~#7bYa{4nGL5!f_#C?dePW86N#DpE*6(SCqv4 zF}-r5m1I)e)}W{z*t>d(EDf~BUd%otXPIE3Z#QsP1?V_@CNVI^xF2vMNQ5f@)5tHr z4rXoFPfigCUQir#f_f-rDJl#O2HMG4s<}4IcLkeA3-nqhn+9GMHDpOA>)C8MRxS<+ zIWcw_z>JDIJ_;Sznv34DSb|u}pRC$^vKn`k zszXP%W+9=HSq8m+1(Jhy_uvUSr67-6SrUfACP_aB5&`4A6lQtvbOw33LYd*0Dl z#;}cmaPrhwsTNxcnw-tVbJj4x!U?<2F)3}HWYN>xbVA(qzI|t7{JqOezHdLHWgiNi zhJx&@)V;c~3FOro(Y^FEqzWb1uz4fMU2>eC^I_bZT$20Z8Oi$QY>VGiuH|9~#F6Vn z1+ay6M?qRmliRToZ61eJ6Cn5zSg~(;hg71QdOan(>~<`iDJ|FRVz-&s3I%DDx>9I# zVV#5)+3gmLVWHu>H}Dk?d|=RE*O#Di0|iWWj~j(7#6k9OWQtD7Vr=g7^4ri-uo!iY z_B{N2Nb$WsOM;IrTU3(KnzJCR7EGg_49KgYT!co)Zja(D6kG_As=u@hC7RwrfX;L{ zl26>yHXAI#H%WH<(`68+FAUY+e6u}fQUTz!fK;3;@7?U;v#;2_6v7ArE5nIZNxXAf z0==c-*G66%>O{V+6)!l2Tg|-VqJy!{{X?XMsX}Y>1ycU@OkbEmJL>W+`&vI zeL=z&56I9MAu36okmiB|7Ie!@_$pQ0d0(nzO1|6(Z8q)5E5|qW?UTH&!^6v$wK;U; zy`*51;ON92O9hV*MqhNpqx6qK0(B_o+-M{D%wzY2p5#=qW<+ zVDLn-frIJ$$j0$n2$Q?oQ8_E>Cu{FWMb0o;xlSe3xHEqH-@U!J~wM0r_sD ze8oS9p&%{*Ao&-p!+A+0Y?Fyd8)A&>^C#I7KtC~Jlmy3S9UU=jpv00~dO*6nex)QEI~;|4~%izfJRU;?5djIaC>cmz?a9AUbXR98?;d94H~qeiOMkF~xW5 zXw?T`s&^8t=&0-Fl;g*C650-TP*!=ukb~G@f;*3VkHfhA^8S@vI3cvc*Zw92Y|DT5 z<`83*g;kJ3r3?mVgy2YIZ{GzD8&fvu)>fIC)$qJyv8V#U0}TGMIJw4~b(C}8>8!gu z)pK0fZ#;i7xw%eUjVG+1(-ydxf2OSseo$tw*+t41_%xZM!wTa^o93;gqtY9u;EU{a zh+GB8{ry+&n$q(Oo6!%wfTbBSBBg1^CLukeK5B%Adi!_fYBxdV-Rkul!>T@odb-iY zz?E70NMm82HVf68VF$b+)(ZvT*>A0~p$GlDF0@6G+SoVnE7h5 zf-tb*p3o(YqfANcA%taTpV)iiZ1)>8T*@1Z>+;K8&U;lask77sR4seaC>0r09^S$8 zXQiJRhT1ZXP7)Su#OU(Z=VoHYyqA)bW`Y&6uE%B`!1zT3FY|*>*PgR?bkx>z;gcHX z$e{@c%FgMS4GjE|K0CD*+BAdMw45-g!n-R{v$D}bS(f>0m8d&3a}@KAqD>33fCh_` zmL%qI6k(v*3L>wClJ-O{7$Uqn#};;a^p0)g9M-uA!f>>htso*&TsR55ZhH#*p0fT9;f&zLbdZ zNrIW4q(vvoP_Hk?09iqM&EY779_ii4`DE~}u80zN2qQ%1Ap4Hu=9|28UzfeBXm-Mi z^faJ9Td9dSXdKw&b7<`&3mO*vs~){&mA_XFOoQ7=in!wGz7-xC&Ui&Y*Jqd%zMuYR z^o>(A2a2=}r&74=RFU`tPdpc*jPTN0va?};!2Sz1il&p24=Pg94_YW$%BPO-`4}|k zX(pZF9bI2qoeMf1`)tM8dk_w|uD3=Qg+KD$vYGDCWA80nTYmAkjsDDbCld_sy!^He zSCo~rSU4==|#Xo z+k0Uq9gvf5D399IfND}(xfDS@v5A6V*#^VZ6lX6Rym7R8N8u3jTkAQUT|Q6L;A+pjP_Xb@4{8!QVDQ#K?G$r8O~zh*#$7+UiS68Y?x5e$L>8{ZT#= z1R}WX$x>r@b{>_KiRp~tA&ruZq}OAQK(UK+6|~6kFdVkmV$e}X*V-%CFyKzHt!)DZ_^obHGUof;oC;YWwyEduQ-=a8~ z!gIY+gZnuwCGd~zw2>Mun1F1yY5j>TS)GQsBt%l=NRG_hdGckV>Y7_OX4xa>g&7hP z9SfYYv;vROQTEu+T|o;p$j>s2%K2oA$Z80Tr|Z(zFDwPzIC2TP0Ci429FA$lt~NJ1!WPD_-@@!+JkKB zHN2I>gDDDKz$9&eo=NBDD|_>0Fv32vmAx?Ug%K*Nup2uLfXdR2t(ZKRsO{M;jd!?4 zky4*fU@&RdOBj#pXNRAA7DmFwnZ(X7#rFk=)d?ooQ+*II8QhMM_!z0<`$}T&O>TS@ zAP)k7c_cY(M17#$J?4h|kF2-YkyoH0hP&XO7$(A*VsyKwUN|y;kP!vXaOWKq`R~um z*}0(#eOyd;VRy&)RBbb|zMTphO`URsmQD+gMWSS0S2#Eu*luC-Is7iq%5jbq$Ux!F zMMa$1+8d~zzgJ0mRG=3^Dx`i>P>ft^^Vp~Y=UE$3q3Wyi-Idh1z{6clN&T>4`sl(^ z3Fl`!#Zi#U{7+tlytIJMb$Ldv^404H!8<~(I_^z2KA`GZ@JayzkJ>bEZ-ev%U~vSV zFX2fTCLQk1vNc0ZdF#58Me>PGRZz@^s~# zbZZ@x-H>87d|eLVZJ?y`fe3R0K8LoKY*5B~GoD*pjmr=ns^C{0w=_jKybu(X;QA># zodu^#hIP2ssTS#}tnwAz8kjr1f9*B1X$w*si|BQ>niVn(gq~s26&S2S`3@OWwY)S& zeO~)6$V(z}j82ES4&y7oKhgd?mnZ2dze3CEqL_1;WS2@spSHJ0n63!dz#EAD`tdXbS3tGyd;73#Zb%|u=*O!r^9U&1 zdP6hLCv$4e*LpCNYkj)gfP#^6>0wG0Qf6jYgX{cw>F1;{Q)7gnBX4>eQ&$8B=btu9 z@A_Th>KWnrwBeilaKN{)p%yCEg78RzpJ`s;t6!o0yYetmL3)%dL@bIFCDhYJDVt%U z9O2u=B28VVe(NHhmg06iu8-1TBjZf#?Ixr%?b0C z5a+Zcg?AXzY&4vjWzV~9!L0G^fY?3h)PUXn>{bZk$OQ4Ye)OIr7&*rr{%UwF^}%+6jYAQj4Cj%Cr)1 zPiD63-_nGsr55VxG-Z&ojPXf#hQUO4gb0GhrU8wS0Eozj6_v)d?&j*Pg6HWg3pYhq zy@FLpUc1BnzT(IDwEG~Vb~1>_ZU&lf`&O|*tP>uBGY-R*ZPCY^d|>(-bm5#=cUbN{wI9}uNLMZn|@ z+&w|4dbX+;u4!w*jXmG%D8BhuB}aory&QVrA_BBw9)khk!a5_$%ahh`pH4GKB#@Gb zC<4(V6#@c4`dH2tnX0z;l3D!(3@i~~_OkZt+%Ihr9D5T=C?~UPayWQAPwoU;y$LpV zi!Yrk2jKO}j3Rwk?X1C`C&VHH-vP`#!BWuOTDu?-;0Xe!L?NM3(_Qk%82aEMy^gUh zNN2v3h%$3t8I%u@O*vu}^WoCPUaz@MY0KF>{%Yi=k?mqqvbVIeiB` zhI8N2iRv(ZNHn(+4T?c7=o3X^>qsTjY=8T_Pd^kunIa-vNneK+oT(qk8*jzy&;RoP zAczh;9PkmTAYLE7kChNx3KBV_xSzgh(YIM-gdPz22ci0nys0NS;NAv=wVF0Tek83g z#~e1wfmms=k04@?*u((;q0350wLx+NzH=ZXCafa`9<00{0~Pm_ItGe$u0s`X?=Z^p zeT`8MlyJBlCv&zNTN`Bg@Ld}8r$%);W4iDetZ2+2xBK@emz2<)sSFhOQW>7d=aYMY z9flL*f1?BW@JIe1em&U_EhQfG7g;^~UnUMV=6@MM<&7L{oa_yZ96n5SzipZSlCk6c z%i}6wZ)3}EYf^20zVX!K!IH2TXd`rp3JLgf`}Ocpq=533E~lCjx(>y5_gtd&12 zIRU!R)~+6$vk^=jnd9Ju9Tr_5kWLx6 zVRk!6iGecKciny=gMlQ|`Z>PcSf)a+wSL{km1tDcC}~3-N3eNzjHpe?0GdnjID--g zpwU7$e%DijOV-dIVc)1bV1b%!Ycp)*3&=lzPvu$_9& zt(w%j>?3n8=8JH=z5szCSn-1nQ+fc6Q(?DLo|br4%z?99w0ynX<|=Je- z8c{iysSnLt{OIijrR5LR09;hR=~4Ez?R{YEExH+-&h}6bgVFpYw!{EI-<6>GAIFR z?4Z4UxPnl4!+3Wm6N?Wy?&iGB`xq z@*n|2w}9T$-CNng(J$N39MQ=y6MK>7AeIx0w~gDENDlFn%kA)BU3<4| zY4a^PCd~!E*hnpMD>{{I zg>PMTlPF{lM8?1k(C#YZ7>~%VddQ}6^ad>Y?A(AOnoBN>^Aj)FR^@5fy`&IY)w7Ay zb}Ro#I088&slg(2r^jUGQJ8nNSh;ULahVc}3=w-ui{QXjM#n5!D(~}li%KXl0VHY1 z48ysm%24x@&^oZf4=$U}gE{4i@Q!5Mr?sp|jrKWPAM;Hw0XDUczxe>#cS5a?>W0!N zJAcN3eAEt$KZ!K+LTe9heu(Czd!l=Tv6Ft{m?Lomhv%b-AyUb7UOV4@k6mR28IIC= z7qqYD^^Eiwf3RZGZoa_%_VkH60DHE0K)310EnZu2vpvpb_7gr)2*2)iWQIgNwaV-) z0B5SsRq0cJB$YPIc9S_|TxKQewp}Yxdr@Oc=5@_yiFcF-$YHFWtP|bWtk6|I9!j!q z9RP1}dVsQpOBJw-&me5nOHx>Cwg={%4#}t<40CqUUn8`r1-r(F=h}a|OhxuA2&zG1 zzO@GWsF1C{sk8Pul(C+l4*YZuVfw;C(Ub4QbGn-Gp74&Jq~)DGikk;O_AOpZ;?z&2 zikdJ^e00_22iL#@+~CPio$D8Gh#Z1>#m{F+#DxgvEdyWx^$|{*>KEoL{a0NJJf8r% zzG@r%-%H^C&=>!!D*U+wGX6^k{ogh4Uk>U27e(;5;Q6=N`FE24?P;d_U28w;=C1<# zx0Raice$neU1aJ0R+`iOZiDW3BY&3)y5G(G?Y95hK>v~2f3^0vg8pykI{n|x(Eo0R z{&zDU*5<$d^>;IWOSKt(uMNZRW*C02&EMYWzueCs^6$Tu)eOIz`CAgr@Vl9h+WD)| z|1SA|NBRGy{qTP>|7o0bAN!V>-7Wv$3TDW zuKyFtNJIA#CLI&gKZ%ry?XRu-3;svwU!CFKQt6qQ+5QQYk?A+}gZ^jguRi^6sf_eY z%>RV?adiCZNz5PgKU4qLi1qL8`+E!1voib>>c?UK7xjbwXX^hNl>SdD8x!+C+OM<> zzo{SeKU4q5FcbR29QfCv`d6%M%nYpmAXX;UzkAaM{m<0@mCXMqm64H^?w?RUT-$$* z`akG@rvAIM^ea|I7FO1O6zhl7@Gt5I{m;~Ymze&hve5pMSQ-ERh4VrGGxgu4roX5k zIrtw@KPt&zv3}71O#P3!iH(K%A8p}}68RVPgZ@YAUv>1~W(hVHx_^@UnLZ?!e^Ec^ zf2RJ&+{DI8`%kEhY=8Hr5Bi^}|1meQvHqhv$H?-#H~pgjnfjk|6DvLaKj_1Z%)fin zFZ!RU|0y>yGBYy#lZ?p7_`5g#qW_utpK=o;0~_lL0}>COS6yzmrRRY|XDb{0sV@qW+qk|GU`4#7g(~o|WY{Yx)KK&r$y< zHZjq&G5nqHm67>3YhwIB|8vwoicP$rbwP4pjo^*7=$1N%R$=>z@GpY;!86FnpA z-}z6+!1kLp{eu4IsDBun=$ZcBv$FhVO~0W3IqDzACZ-Sa_3!L3^KaI~@PYp4sDBun zK2ZNo#li4V%>QD;3?Jx!iu&=ne_vgFc+OwXU_QPLe1;F6;#b%Y_&p^Ppf3?=Je~|`2U%G|6MGj{SW$`jfw5A(C-bZnx4xd@Lv0>WBIey3JvAY z`LyDa8iO|YhXDy-7$7PHMB#zj?@@1CKgOy+fCTOG^n64-FOSB)GNy3j!Rd6w;p=f> zfw*9Q+@buk5ixo9%Jzx1fjAN^Kow0HAR*k9O-?$zr~Ib~(B53P-=)#UVl)ycoj3L> z=DGA}GdCeK^@K|ln4Q&3_(`|j_{4T|z=EOke7{&W^EbB;)As&i+7@LoH;~P4lvQsR zJr4NTo^Dq|PH6laf!-$cz+RVjlT^4b6Sc%qIB7ao10Fcdqu?QNr$R?v<_CiJS%~^H`;(7%bwlnp)J@NtI^Mdj7G`0wNni=z%I^-r z%seF5$rVnSHmf9o8;<`x3;UuiEXYA-g~EJ(rS^OZ&6%MJxEM1g9Z z?VMt0S8pw_EPK-w1J2gfp~|(`UacpQ!q#$}eM&mdBjm_ejBP41W!*&KOjMIVylt}m$8L0c2{i%$nv%& zm#6wE%hVws=Y)u21HHsiRLGVz_{Pi9%Of2k8mA*GqJIo1{h+$gdnuEfp*NSYF+U^C z#e1I)gMrZmDmf4cL?>7nZ9v!l8hS{QyOH`udJmg8G!FlcGQ>>sZLlDx_8UM-nn57D zG?oW_I$nav`4~Inl_+sUwCp5i2nK)5^3Jzk$63T|^de(XR&r9n z>EO+8vfm82RRf#^moS8{PM3(Y1Nj^~h+|@w0*Npuj(#VKCafLxB46SVxmTJIKFJDw*j^fk|B)cEZL?V`a3fPdzhm{@u{9YRcWLLVnHgkv<8Ot>QMTjAV$m=;I(FWNU#F z<795(HljU;(zI$w%kiy--gR4zDKXhGN=Oz_eMNthVUFrtE&W$5DojwV1_k~>y(<#$ zUAT|w(mY_+)o$x*DCzPR7*|f3Li?{j)M}^sHo(~oA}vg00Lfu`v79yxV52;!vg+t} z-1YJG3g=TmyeGcdxjNh9Kd=aupNw@&yP%e}?X5@@)gNCY%B6ey)3CkV(I8`GP^*Y= zN?1y~Cm!r!-jry`saG%`w6LmRe07~X#Y79c{gNqPO<$OpEVn*RVr>}M{}d(O0e&kR zb1|I5vg3**oW4)|qFr@$NP4|P=^rctd#`(Nfl7zuo`n+LRVwbYVl>8}@^RrKXgmr1 zHMr%^IxTykx1N{oo2OzXM64a5yOO=m#BU%o{S*TksxjoGd%MBIS=*zwL3R}5N_!&r zmck_48Obril8i-U`f?+&uYhaYOb;x@xICHi%xdw$U;8-Y3M$@^O`j26n`&*Af(eD{+JiFA)Bch2QwjSb{SHjV%M$A91 z{J?SXnLvciw#9*+-$La!jjRtVYfWeOCh7;T4UTekxro9bLzMAOH;=bIeaT6iW zL2E-*h_iA3?4aFjL6rDX(fC?m1c6GL{wNW~JSDs*m~`~@ox&~V9&s$nQpZz!te${d zp}*0l@I|Ev{1S_sYjp1zCG%y0W4QhaVgw_yCQ2_}6>f>>bzM^v4dqGb%sHBLXWI;H z4~A67t6ppFtIXJJycYw{n|rWEPmn806GvO+)4lIN{f^GWgOp)3f|EA&0Qow)EPJbN zP#MRI2iJoS0OxY`#D8u0F#StnujKx5aQBDW;m<@5OuzMCU}*odA9Ek74?txBh~Pz$#wac7+Z)`1}clYXpoj zuhYwV9+oT?7j-Qh7ieKzp#?$?c--4toE?J2kWNq1gZye&@Fz;6M-89cy3%(V|IK2W zc+iw7H!?hn#w>yxMHCpRSNTeB8;H9E204#8OePXhGSk}%A7*R5>X6`!Hwz63GiG$F z)V{5T4O|X6riX^YAQ+D+ac?%hINQU-1 z75YpB0u2#4LNG?fBArf07ctKmWWS1o5!x@4{l;g`F<74a?!^`L1^dR_2fYX~6vEEq z4nFfnK3(68r*A)J9eE`S6z^KVppPc!)WQF_SsPal`k)0ElJ$Dn_2OPrger>8(!BJC z31eh_WXSWG7?8S5%G9mIxey~dw6Y+peD@+H`aDJIHneXndbE=jJ@Ij}_Isq0k2{{L5!QGUH!geko-BrHS}U9{t-Jf7_@2O#vv)M8i(Q@-fIv zM4*iBkN-9;9_p<`d7;{#*bbt^9S$wL9OAl(EX--`akN0eto)sy7zDW zieHz)$o#=%{^wE{XvA$Co%BCMOit#uHZ-;-G_rQaHj2_Ta{7jrAEE0nFYuRU%13kI zqoemBY-0HfHPycDuktF||Kkq+YGnW1!T*Z@`FqI~tq9YH{OUK8qZMKPXrcaQ%Ky_r z^jClQ$KwAZh%z!TvHX=Nny9*BvnqnhO8`WN)pT27lN-2R|Hm;-vGtAAbTj zCJD9!p99g#bItn(t<0KR+>W;XXTH9(vZ}K(4(GOIYn={eiSjk?ga`LLpojdJ*%GFC zZ?oC67I$ZEqB87I%#vIy)@tun3ujgn+>-2oB%%=hQbD5^KZ((@s z%~fd6wfS%CEgH{X4Luh|UuPfpifXvQlVTujH$6V7oVJ>+{&eNF_FlZ?tZKfBKE0() z=X(Xw^(pCI5aA}nDbc)LIrl4L(U`MHu;7(eN_-G6icV*Pd?8CPt`FlHPE z%`;zuZ|-sST^Tf{cG6?Tpkm}5dSIg2#ui%{pF0cV-X4138`}gpz{^zzX28-I3tL;m z+khC)0yq4*qh3~|dwv)!W56qwI8|GXgt%KQUp49x4aNdJmG6n3Xu(IJT8%pEqt+F9FO$v zdD2DNHN@%@E?VIcgsO;EqcMLeeN494tl#OqHqjt|bk;|z&hQLkfS5pBG3P0y4T5<% z+3L#u$!?=fXOC1y%%jr->k?GQ?1c#2u)#SJXkM1Mv~}?ab<&ov1Pv%J{S#d!J+=u_ za5VK4Kn-3`UR(JozAFT}lgN1x9<+G-)=cC~KWaplD=)-ft?8_7*p52f6D0do@f|7=HKJUmtW6OzcZkcfnF5KK=TIM} z64ieb;j^~@AH(nGFh|UdHHTv1z))_QX2*A;FYKA zB@PIU$pjb`AS=CO%hNt9PWjJ`s8Q!O2qE+y1aBn1NqO$7xt?w)B>UnMEfv_lW-@{n zDDS9_g$@8ZR*VBz&%*NB*TPkG^Be{AX=F^+eroO(n#9(h~zkpkc~7)j+|N>^TjchkIEWOvlA)0*sX?C{*!Itnqq-VCbuz zp1;@FLM>4hpqxh?Q%uT9D0^qdHID@y9V$vcun_N{6tsT$Xn3u8If}?2FzwOdK^xMg z0Kp(9FzFZGWo*8Rnx&(T%8orYlFtH5k+-urvJll72^(6hFi-iuT17n! zuA!uMizVTX!b0GI*qpE&&%4;L5& z#wtMM0$ z6bnjGSW~*~B+0O&6hKElAu7o7gFHd= z^%^%#=@b*vZTr=t#DzsET9?Lh4*-k})gDlVy(-59-<5;kv)MN_b+k(q4WZtepg$i> zsX{n;B1NKjm`QFND$Di! zk%<_AgBTk33~%ePn1BeHu+G$WsEiT9=8GLVjs-4WYRUYt^R2SFrOv>!I!JBvbkbJq zZr86>R5Nz6?A}W6TaA9HnKMOt z?-Aq2^)Ym8;-rtONw$w1 z)Ny(epta1)!x5wqH^tm~lw~2R#6iqUY-oRuNiQeT9*)ai?Uv=pl=vqHJi~qW7kO|s zO)JciSE3})64kHi=+I}GyLEbuDKYx=2zxv}4oYMn_$iWx-VZR{3~H*anIJ3Bh3aMs zl*sz$W{-)!JASbstb>MoGKaxc;6CnV~Q+bny z0nvPI))jxU)FWRAd-m%yZ=!5!z-pxqK!|wFBJJzbs+gpWI~%~AVFgTy<`ck zkrQ}Zkqz}-ha22Yn0|7_v=u|~Qs{16BBH7AfiynUq-c;!L=8XsM8j0`Duz1&Jd_py zTvkzPQf>hjAlchRD_idY&{r_#W?GD&eE!;zc;o8KDZzPIi@>$lwE_IoLbFvHuq_>l zF;)HMfN243j<#rEUuO$q@SyO9w{TSnwce!X_}$&$T+;B~8*~#{$0Wz7!hq8o^s(9Z zVCsVNfY+#1x?_rUV3)Y9XS4AlUWW)v{Nho2Ba+?evTlDEIrvA$^u))itt@C4a?Nu( z<5qBcRjhP9RVDAJ=;OEe6zdI(lrDzRW8gn&oCv$HLlDk!#oR-!dqaE0fkse2V|B9mroBavjKWJonQJJV*@uUaablN0Yf?BCF$S^M zPZ*_`RkAlSfgCDHckqni)7wnrW2v~;*^dad!e`QSFicC82fLa3xC@ign^<%igIkM* zxABq-^>p`dlSCOs&kBe2lu4a21-N=C)CF>V$p%iO3dh``30wBuG}N(>LzhBe_{wkL zM*~VNx^=uz0Bzb5lUHHup<0~2vzO4^y}GgEo8$RO94`}Cs(e8e`cf?%?BFQG-Qt_- zm8OfNpuYY(`hm6uFWn;%gAg5C%G>bfQBY`o>_)-3i}f_v$j|N-xLU<0ss}qD_oD?S z3UXEm)0tV~omnJG;kq&!l#P6vX1`8_AE+3m%V)gB`yWSsBhc57g3aKae|oe08b!jJ zjK01|xjqBQNU#lGazy;4%B8b53QRsU+Y)CPY3y2CkeIG}8506p)d zi+qk)O@~Khgx-zHu_ZJML@M$ank797HGm^HIm%DGvQE)MJ+V|#C}!5$P&Dbb3FKT^ zf(^|;qIaM-*Z%_eQP-|Sh(Vi4Okmbu10q~CiE!-rM*n)n>Wu$;Sf@M^?>Vez?&%NE zOwf5~jVp(s0`{(*aX+l{y7Er(R=)*=KnpR@J~|i4+({TP;kHf>VJoV`xW(nro#Nss`7ullKh=);GdNw zJJVk&B&`&NW%}sgJDw@`4zL+6-R_eF;$lbqo`TtO;3edjaR~skCl(#LrNU;119E zyy!bjd)cp!2&ruTK#m#22oYe)n-^YaeyP2+qoQRrRmlv$W4b9Rs;>cqCm~o9I3>Un zS`~U&EIc-gr!I9idP!4YdqoL61#N_Vgbe)Ub>$ksJ!Bw&BxDDwrB@H>h36PRDg6Gs z;*JUWAVa_s5e0JwL@cc-;#KVqGKVngvaWcO5@-p%X3P5M_93uC0=PKrxL_v&rr_ir zwv)>I#Lzs$Pe&tN(=Ke;ZFAKTri6OHA7uuW%W#ZxlTWn$X_`oMnHG zR>MzCgrB9&=NdB`?P$&@@KJvinGv~}R*G6%rb)?$KiKh7kT+ZuBo4;=f39|AxGrjq!uK`f#HU@xzBw z`_K8SUmxV3&ii}RzaxJBK>g6@eTW}EQ2&ash53)f9*loSuKEiq8v{PW2MzX@Bw-)4 z+3#c*{~u6c|I?DV>JW7SMp4l5@x zrgeeEhK!GuD+tdM!F`W>**ZldlW?&p69JiAJJ?@{8ob?Fn-DQ?T&KAz8BvDzxsgX# zHsX+o)WAbM=gD%I%Q#?CDK64%f1dbY$IKB#?X7$}a;v!%k4Lv0a|E=7NaV7kn>2&m z6F*ICGDOtzy?R47MvrvEZq|i|Z=Bcoo+e~4q)*OoPIs4aE?7SdKBe?`EmRB+CLPp1 zJscCLbagLeQ$#n&qZJz7R`9$dl#dHg7NZ75hietOC#D?L3{A61NUAqQP?dfhF$%@> z;QQVFD1}-cOKY&jtEFledq^5$lp$PlZ@^fkXUgKTrSsY}ISm|^QV-x+@y0^RMlc3c zxaAFrAeS9c9QA7%RpPt$y6pj1M7$P8rZ5MoynLLB#T#Bg;Ag<*Zt9+TYt44xvnr

jtcLU)E zdpO{%=O>gvV$$J}#R9f(oQPLRK5)>XI`_8+_(v*A3D@{y!;`)lya4H1-X6}uR{zkM z#F3-@Zb)azyc&z)gpJ&-ho(6;Ijo=LszzLxO-izb-Ty>WWakY#?|C7CpyqsxI#iDS zu(eWx(5yc|>LiNlw2{tWr^ir&Xr&#Z!|LZ*ambg9^#;gC*Aw!YiFlI5(*!4Ifo1!v z%g!p|Z8&?(iy>CCv-Ehx)a^?TN6C8-$Bzv`7tLgs8j0%xVBpv%>IybmCQ!yZsCo37 zaz4KNl^{j~8EPH|o@DuWmBTUd!<~XJKr-|^#hjDE2hEUNpY){Cir+wNXykdHlUzhs z=<`q7R7q@px@pcibmn>)nz!vbT zjS}oCb?>32kyzlv{@2cdWhpX0^uSMZ`D-Nz#>y8_6hu@e2NMnCo>lpCGIKAr@*Q@7 zfxZ|497E#L$C=!VG9W~$`-7d%QH1%5Qb*|4KktMX$TV3!@u1|NXiybBQzUGjYQSN^ z6Vi^T%y9Zq&t~uytTz_7n2j&OSE8G+o^xbU`2p*)!{8JFvAqlT3qVRijWH5g^vR%_ zq-YQVwCIQ==E?vABFr7}1IskkzuBTPDg|L7ILgatluUZXAf=?~*+`d@K}y(DWn%VzJjbm!FOyv_Sc?Ji?8f zbt{VN_i!e%Ms^nxrf)LM`#KSGyk-VI9eX}`W50In4^=tI4 z@NrvIsuZ^iRaas$<+1!#b)WrIuVmcd(&cUiw%|#eFOn9iZ_~z;GJiajNt(52FYifV z5oXVSzZTGR%SN%fAdHsUm9#}83H?FY5N$PLx-IYXy#`+fnQMIGz%>tn4s5LbD;PeC zzFAu9u+|!p+%rIraON>=hvCo>qgSqR3H8CtfT;A(xVeq7HTMl#&8`NO9=V}Wq?nef z^T1tEm)?TZeG=0SvGjs8O}F^(oO*GYEu?32Yb8iY?s4N%l2t{-o!K#-RQWl)=K_Hf z9U4A?3wpROXaYM=H$v2h=$5*ljy!+&ima}XiwgK(D^L%&aDE2 zKj&J%2G^d_FKhTC0u^K^K{ zYIs`KK5-$*7dlULq9$@-@8w7+WCn$Oyc7R+Ygt9!R-a)LV-CtEwuwZqt!Ouyaxo6t zBoQfB@fph_T#p_iz&nPjaUqCUC~p%9>RheYyrB`?EA|E?jnN)_c<-z#97;n1Uam@}uBVDiiEFI48qp217Y{RB-i#HsZW?yC8CJRgP z8fZYIpQ5Y*_n5h3m|QS2`F<8vgOS+)l%Z_`VG{^^LjbqIBHH@=vjpi-k10UFu90K{ zI4f$^mJF#AByF(sM)JW|E#g2Fi#GE%Ft826D8-M4u}>R!7ZEQ>!uK9n#zBcDF%FC> z2*|Pb@up`9HUvZiHr|LiCOKc0@aa%kEF?>xBkEca(a1sIT3U+vZX3)`RaTeNqx+S) zI4g3ywqXZa)1gOE_e>w-nV~bJH@?Vi8xBhY8l?Lsm2Q&$%7DGuING;@9Do?$8S4*9;p+O)5?6r*oVZX|pYy!5`8>#g?Fo)i)L6R4>j@p{@qu@Ql0K}~BYGM>jy2zfUxK=C z6r(Sb2j6X;$5@H(HI$I_{LEJ{?GSMKx%3W83|Trvc$knmVXb8h(uz>TAEYeCd(G&m z*tr)^fH?s1a2Z?ey~El?^ut4%3kYf z7d{=Ok3MF?o;@E`ubZ`nUk8u@1g_Y?mHe2}h++@`^HD-FVO(TWlf2>sP!v;;HpD5z zaX3=wBbhYqQ0H-AaXbAtI^YFCNGX08RCrO5xI5~JjW9^CMh?7JhCE%F>j}F#XoT~m zh^R0!zaK=iuvlt59=vr8TEP7HDg{d`d^)c=DFS7L=fmF&jd#X)Ccbtmm{hNqZ7kOG3eud!E%hAG8SSD_R))=T zr;HlSc-&{jA?31B3L?p}efs0$R}$QDt6Bh|`;cBwz=?-B{I zS2&MMfzg?_6CO_aWndwW=o-)>>NLi}G{2GO8}Xn&tPnDn`N5EeuIBDSpJT?>HDIrq z$j|0V%G)Z3DaKd>{lJ{Xab82XN#pK zx2^W`LlzB8F88vexD6yu5S=x^h=4cO;0&rom|*Y7RU8KbF-OTWmTd@^p^dz@KhWSZ+HfGi!ST+sp;);$_EFl7{K zzkd~Wm*;YL+A~JNf}7rS-cym)6kYswo{*BncD#BRxC!gp%%D+G8H_5~DRd41{5bjT z=T^6T$CRxh3nr&o>x!dIxjd?+rL{8DfM1JRAr<9&hNR7@fgp0d&ThK8k3C^0(LJHQ z$S1ft2yJyV0QTF+z*pM$d*IZaknj4=u98Xj5pE@0bhZ*Iv0-ugE~BjHO(CMUg~@;v zX{?=FfQTa0Ws|GI4frOJv2UU+y(h-)I<<4nRSS@)QFjHqc79=K%UOUWAQoz}sEtNi zC`4KPOGntj(U{>Gm7kCzzIa#BLnSJf2Gg?nuw{Kozm`3~doCPvcQRZKpg8_Gs;S`t*1tV5z^c^e_UwaOK!WIHZNwLh^NK%Xf`hU#d>Kknxvk8# z-7L=WolYx+rCv{;*yLr)tUrR2DzwAgH>>Y6=v@(@$99&cur$wo9B}LN!HI_YeqFsS zvgFkZ4-MD;#*XBX~A@5T5VbiJ+al%8W;vw^;Yd*MthoW%*?&1EGWF z-G+N^V}XNGH*}xZ1gKkjmWNeq*^ek@Z*P>Yd${l~!AGUmB@aNzF4mXp*bUa|o5k<0seU8TyFjI#775450kxJA}s z7VNJQR7zeNK_v`#ACQv-?KJFmH||A67t$Xl1I}TV4fcj9q9Bp?`J9#Bv_t#K87;u| zP7%=+nvWw|tdz_%B(dHFrhIb#%IM~F@L{if=xb&{8DVkw)oT`&4;HCk5b=Pi)MXSl zK7~;h+ePjVaYL`l;iHjDq_Wb_LmsW`K86HM7xuLm!qO3jha&j;{cLKUl^C zggl(X0kmfr-3zvo&1*`D~bX2fs8 z;r|Yc`YY-``ONS`tw{ysNxN8mnJN+IGEDsBDgA@qv3Vu*2=$2OOHZG|e;+XRsLVrUl1v;>VD)I4tI zF)kO^ba&B}cn3sMT*sD3sO!V;_P4KFw3I4$HOR)JAzi)m=bOi*C_x-5q=keZT;2Dbo0?ifQri@{hl0xQC4iTbcvUkM^1SBbV8 z0{eHjR_3IM>acem^80%A@Cj~aDPQJf;W@`^C8z0VBD7k6Zny}Tnx`63%$t`mP9yFW zDP4D=D?l1cfpM*zqE7f4N=7l%#Fx+IXXtKfAQ#24P^41#stQ4x{kRBndabtDP2rfe zLvjPPPu;h7C{L`KC|9qi*E_G=i~OPe=8I6;_9=jQWQ~^EdGb6YjYo{pk{mzNNIdtX%&X>%5WBex;;THA&#Pom=|W+qVcd z;pu>RPKkVdWh=@orulV@p$yNckr%pi0AS$`>f5r-(*dQ36?WQm&FGHwW_#FY*P-+j z*_!5y*`0K{5Y1jcWl7#3N7xd&I~}vSZ9ilFTPOp& z4O$}NG%D)M7l83yXIGlaYo&YQ2$7PgFju!t;o7a}m@Vc|n&^3W5(ggmb6Xpw@Tys8 zXnx=oC}?+xQhHx+07vfOmi&Iz#^Rr4Z%ZzjCv0H4&LfOHl zrxt&{;>$u~-@vuFS6d#Bz%hjGvE^E9m%8Hf?QJMK zsU$;`66=m@@`}L=*ExJcHD5kUBE^q_s(?aykoN}$)Pq&XKwd!rh#)Om=WGf8POg1< zi07sAyvG8XnR1O4HX$PdG~&suOsgp}`rT~lAr*eE;H3Ig>>v>ox1@>C97_8JDNNv@ zCjxmgn0#60Hdr4ps8DRiU7>)6JdbSm`W^%b9Q@ldp82eT-bDnB>(BZz5W*o5u6*8$ zCy`7R>kj=TZoc4&px*So{*zVpYcMXs-a*_w*CX<`r5muWo2Dp#knizPjcMT$B!okt zp#uK*kk0)oZX^I3hxAIU7ZQ1N=8IU zNEV5fTexn{FZMY6p%|g@=@{m0S&TIRg*fUHx-WZiZ6t1g2L&RCR+?406 zz+G9{N+wuLKJx$s;^m?eQLkhTjcdv`Ll+zJ%lE z5~0ef&?)A;ehp-ha4-FNyqbk!&uqqox5%tHpR(NG{bYp9y>6^kJ{;@yLPH;d@_Vn^^lF^9zqNan zdAu5BgBpxfErpMss=H*%7o1Vm<*KIhhUq>OQ;vAcR=3J z)T~?2GQ?!8ASNL;5yC~61+h zkfh)Hmt2s$-%%i>PeeTFpDe&qqm!Y?XlDBxlix9rQl19~K^jxhIX9TG4M;@aAz;P5 zVZiXelhbvsXgIr{5=YMguMEeY&e}CJD{lwK!NEx!kv~Kc6Yc`c#J_MTNG>eNq%T!q zsWqAiIN^g{hzaw;Kt*NbqQVWR_#qOUJ+(yOP1docDf?Lse8#V(tIqMozg*N#DKD3L zau6xesPbs$B-EnG90LiQs^-I%?=zEw$xV;TCxdw!AV|fQ(M!qs(icukN2(1I1AkaY zSNA?&ImTC>lVVRoMqhvK?@j>XKgu8#yX`Yy{G+fH`P-6Q%19WXrMQ`gJ>c**ZB0TN zS`V=B;mQ@Oe+sU*CFvP*+`iBC6tV}fu-c`2!b}NNI59L2d9xSy&(1)8+<~*2O)>#T zOQTcRHWP7KfV?BwDrX~m7F(R`+al4`?m=b3W-}W>YGgLIS0_$yXXlwrOb-jKkT6f_ zu;OWyrBc{)C1IREn!(;v`B6)O);Ov$XQeS+cAriqDYI&qS0Ttjrxj6x5^2U8QZ4RT zBrNK%8k|J>O_fDO0sIqCC5XA zjk~si`hMgwvUT6{RrR|>^b6Po*XsC=4FN7dfizvYWo4McPGoc-OY8(aDm2#1bzxmL zvZ5T4eY4?l1mZ+4<5EXoK1pTBM;)*&d?g#cF|A(S$}pZbII67tMB*HaDb^3U<5PBi zfJCm#*%BS=(9h>B&nRC~DHU_W0QK&(c0W|}l#-re?-fyw_>y&-H)_b>ObvR({Rui; zKq?TAjilJ}c%QG?p)BbWEh?^o-2BbyHC?QtO~A7{gy%)An2x;JTkUK>uwley{^Lwh z<=05c6@?zA)Z^k#a_FoG%~<@4x~ZRT1E^w+7f3pj0kaRv%vR#E?_+}rS}9m zNTpVLk(}wG>Yy+^1-<=iTB|WJeM!?V33+k4d2xx#g{`2ZRhXNKT!NF*aZ_uABL z5Pf_X>(v+ZV$_i6`}QZtzDf{E_B)8o@+gfU$OHv%B3&(iHoeJKg^}lnxTZ_G$Rnm= z{Cd~*_{F$@`U-a{4nOzo>sy)UdKKrkC9|YQNvm+w0PzenCTG&~z%z9b37VFdQ-2s7 z%B?BCFD+Tb8?c*hA<@1tFF+I6AcBEB(g>Kj#b@vNz{_~L1*@I_8|M;CMxJgQJoSzP zHUFPH53U?xjm#Lw2oSD3ZDjZQ1)`HqB_KnzAl;KqFi+REfCSDv?e-a2+lE(sVp+X8 z$nQ$0TUf9=$7QloR7ob5Y%bF>L>&@HiFOA_e+UGUb}F9F6Xti@dh|nn(qELMc(fO zQh!}c^Km-O|E4%&`As16e;Q%?hvMwF5B|4`ynhk?{J&7-{Z8BSA4MJ$+sDw>y=$n0ke!gcrRp-kkx0e z#uz*6qP)eY{Ia`g(<*NaMarHPnD5PVF%jn~1cNJ_N!w?qvJsOnV!B=FbfE^04Dx0+ z`8rbG%+8j@?a|#wK2-qx$~VcF|N1*el65i`>xRx zEcxV5;wD>UFnN*cpsm#7FSHpTDY5DYDCbQ!nC<+wDF|i#s)1Lfg~01=Duzui{J1noxht$G6k6o4(u`t}w3w4;di-*xLhD z?on#fK;O0hLQRkjYtS_z;()oC97f(?g&wx4jUFHy7pCDOxTvmRZXWR6lYst(V5mJ% z`O9ku%KGT{Pd7|m1whF^_}pnSY{++z=p;gQw`mC_?N=YdRHcjhIOP!&*VAT6A+_)+ zPmcJrC3n~rQ0oZJ$UX~3$PvLT0_bny4xlY@u&|!esYokEwN){%sUTj=w^z^Om4P+k z8zVS|BaDhm@WklLNJMn%SRphgU`Uz{C;)zz+~OlhS#KcML6bFG?z%=|SoE9Cy{Xj9 zhR*hIwSpco*AN!FTwHk^jdbw2fSOo^nyXzK65WnKA}eZ-TDmMli6*4wP^Zh6N9P`y zOH-OWMsZsJMvB9D<8S(LaLZfod#;{`g?H7?kd5-R2cuiL4D-&T+#xoNv#Z4(Ar#|0 zF&Z@qU02SU;q8c$s0#HNxZYvw{P#De_VAv0S8K#E0Ly5UV7pSLveCb4^XtEyR z^E%V*fK&S#0kFhGWDGObL5UNu0`HEvsO!mfNEGSqr`oF*jNs8DHS7Gx!bA1U*dye}7wtE=u**9IVDh=} zj?)U)AdN{ArDrl-PJtEgjoc8S7KOq!2kX}tPRZcB^W70Z)up_MZoOHER&)zrSGV5p zQr!KK-(Wx2=I?7es7Br=U-B5V>+T7;2B1?jIeX?qj1b3n*{t&!f9u*brrl{P4m_d#Mc#3(Mhu_a_%W^Wc>@lpdRQ@RwnZ*gsxp`(UBx;9_Q{W zzJtv{XF)o-qmUh>EGRz`Qk0O^AOL$s?g?^5qSHg9Gy?asX%MW2NH?GE$LVraABf9d zEH)!KskI!5^jJb9ip z_SY^B8&MtJ(XA&K8lb{9xbNHb^TLeCru?_@S@%k`_ihz-s-!BW?yDVjHqlp~1wL;X zcVPKuQoTF@XUDPkqxvDsP#bf$z>pnB3fWGM z`;lRvf32TJT;>?V7T~01R||HTi`BA*q`xhCaukQh*wIib%8MpZB3zx$jCbA-_mr=t z!E5U3zQ?=$4MXo!IQbKjT#;O#l=$WOYofw3v{n{b zy#7upy#Rc#32DS0zEh7N`I&iW8h~1^JO38BvZroMGDxExu>CXtW6V1ccZm=#h>fpA zt^4D_;>hj|$4_6w-FIH0Et75(2|9sc#4$CkdpD4#$#M|PPYcVN(Qd*@VG)z@DO)s? zg5!@M!)d>F8|>J!qZ{?CIa91T7p*n!#^KM$@#Sl9+=FDcp`XM(RbpGLE}W$;Igy$a zAAhoo#0^y?=qp+$yKGtr;1)nw-v`50P84UDDk8qw$KUP5Z=LRcIm%rkVS3_SS1pAD z0c7NSJxZ@aBVp_rtF+*5HYhKizceu$~)QpB6 z9SZgHBaxc~J+^tj9T-zU87~T>{ZX_S%D9j^H73q7Y%J0z71B@U3|V5O&8fnGoPx`5 zIM6dp8Nl|#%mD-^Sb05ZGUuZ8xM@3#j#>ao(Wg+fUa4$y>We*3(q#-7*$vLMx>SyC zF)ypv`b5f#j3|w|x4vBgu2oE&z}Dyc9}7}dU>ah1gjU4bCtuVOrV>0BmkP=i*Lw~I zlRfqF9ZSL)#7W>J&CpWqX(r@zw-mP<7<7F?LnqaYSJgIy?pYmJ=~&C&@Z+`234`tC zvm9-;t1n9BYo_pmth|CI^?6XFGhrL%NwM|7dx(qiAq#GALZL*1?TH;5v*Tanz56Gx zye9J}t}8rDG|~eMIX$<k1Vd5C$3~m;hh} zqa)gL=4MN9PnFZEv)@QkLlJ8bb&td>SV<4#QD#W$eP{Ms{_>=ro8f3#J4WfF`=m zhB$JLuCpdKBlyO7?M`$x9%>Di$+$7iZZS_IqS1FrcI{KoWfbk>X!6@25m9Oa`f&%( z+f~A=M-SL1ndm9!-K)E?NG(|E9CL8y6BX4=mo>%4$|CrK9`i|npBrM$!Q*bkpy0o<8Hptj*iAnycpgGGYTbOsJ`668c(I;q9Z4>t)rIr*vBeJWigkk{2h62QT% zF4>IdJzzn&?zxO6#2Aa;&q8}lM0F%09DPKFi8DjLUa@6C&vAc}tW&m%I@CuyBXpj? z{FqpE;+0))3jh8x8uapM2Xu>lW{|&1&Y1|=PcHYx^0bV|hB9gc>xCO+2PCnqfZh7z7{HIk8R>0ewVKrYb{sf{pk!V$%8{?#FSm`5R*qZ3hifg7smuJUw)c!Z< zsIw89n63tf9rODOg+orC4wpsKGpj9bmU65{9d1^@$IdopZD*#Ny8nAxL>xmgrD>2C z#-)aHY8Gf=ULTQ6PclwU6HS0AzAgQL3f?HFA_sC&Qjiu`xYnEXMCA1F<|Z>1G99U* zOQ(QjUeiN;VBZ0dSj7@wh!5n4e%sEi)Ap@{&Nc!8iBQN84vz|@2-Jw*mihd5n0S_P z>`dqA^`e`(>7E(yC{DyU5gv!>o-d$HC9? zDM|MtT6lduzMzR3>h9&!Y9Y-ERJD#LbW19XPajPzngDdWbtlJNXOBgxEp_8}IQWD; zK@fbt+vyNQ;SY$}wWcBHYo zZ4(PDmxXf4(twq)wae<^1+zZKGhT)05bn+ZVxnwEUeFu%#B!uY?CC%i2>dIKM_fVC zqqQc?N3{e^^DFLZXg&kJYul@-!{ze73AE6f?80eYaFgF_E(7~p+HiCsAAs1BwQvFZ zf5gdrViOFk?RKGdA8sm_dE+k23LD0uk5UYy{?>6>Uo*Qz62Q9WDc5?mvy>-`Ri2d7 zZPu%0)P)7TB%3$6vgygq$=}<55XgPQe6VDsQUr7;+{j_5#w{$>5h7rvk`mA0Ke5n3 zL_b!-m8{^z9EPZ*GL(kP%hCfXMv_0YbxBbLQwh3>4I*esDCt4WSwuFaC6A@shhQ zTqL%~o2pzKm&E?E%&6B;b1*iVzG@}ik^$_RpQR027~ylQA8dfY)|~hMKzsamg;W3K zfXTnYcmEtP`3Ktfp9M_*72o@BX^;O7UHhM)J^mB*_HWuFBlF+7`2T|TXssq~x5CW zZk;r~?+*+-CZm+m@f*gcf9PWI05!2Ed#u|ck90Evtc<6niK^t&`1Cp6bCprDs&WCP zQE#9zc#HE^J^d*Hf5LMC7e*aw1dz5haffYgM#WqNY?*7R7^%_)(uOIz* z2QV`L>iuPFkj=3ssKj+hJqDe4S|=}Ld)Of@1yA}A$P&&6$rQ_77$pv2v70|pzFm1X+N+f)H+e>r3DC|s7#hpAu zV|ob{a_ZXJS|gxO38$jy0bf;@nJyUq_F>%*x-i+{X)>ZCo zVI=~)uJOIcxl5q0Vyjwpo>ee_5>xEI!1RmaHPJD46<$$3(mCvdi3{yyi*V2t;F8HDlE5AYJS2ql4 zB8D!|EqTv?6k|rPV1jo^z^!c;R%1tk2YHB(1J@0$oW-!q1u5U@n*GIr{dxkj$Og&x`iU4P3;j- zy3Ns;B{d?Z^96Gtx+cICmEC=$X;083W@oJ1VN?4R#X318F4)pJFI-O+< z_qY3p1MB)rQOPuq`V^Ib~std)RBErIkA3`$ndga(Mi&6m zfZUPMB^PTon8f+b!Ht=Yu)}Z>`e4PzegF(i300jnm{~vqpKRamu zdJF$M2kn19^?y=g@_+v*{~s!vjrH%A+JE7oRjE(L9FCxFUs792f)TSt+L@kWj{3tk zflE+RZ$h)FCYUQE5hyL@c!%_Iobp8!EU|S-%n4NDwcb2#f%Ds{$?uYmWK7$tintgU ze%=?z$fR#l$WEQKdA_rtmEgXyu^Dk!<$S*xPWx&PE8>v5?>tnDO`q62E9(nsiAn17 z-5?r%Kp(`VwV{j7!I z8>KQibXe_3yS`;=gXdt2{bJjf2#572?eo@I= zbFeg!oQFk5{g z0UofDW@)3GKGB*7nixFgsYjjl!a>fh zKxq6HZJX3z;EG(>gI z^GO4nzT!$SS}cRpOoNB=1fX}eRdFZ}3#>1q2874NGz${_MF1ypI#zUbiSo7#>m@uR z&~XMew7Vx9=2y*_MV>1N@C3on+(=O-qS{x8r4DJGTtp_pSTe%)BO zBBWO&$r35exxK>oC~bUE_|cJ40%^WfustUZ9Y12q7|FT;*_0N_)F3UyKS%7M1MrD z)^y8FTfu*WYKsXT7%}5&yD_To^i>d<{}IC9E+r_o7=TMe^`JRPIAA8}ZI|KJ$6B}7 ziRX|@x7SfxN~--zCk{tLNfopeM zW`rLKP#qY1`l;z2ETtN_mm8yJ-Ed**C3P+M0!UiU6#J$T`3r7vJXunJjWndiv~x!g zrywa*J`lr)kNG%3JG)IF4?*)PWL{PQCUAyil(x!sBPY>Tj^u~>fxwN-*q=$4xNZ+< z$>Nws+?wK{IOsAz$%=2Mh((tJ=9wiX0)Y5Tt^BR=%UVY&vn5V|2GiGxUI>09^)#E{ znJ1aDqlx*fATd%I=!8TZN@O}*jk`4646UqR9x1!YYu<7%T85%4-j}Y^&AEFZkCH&n z1rkU7_hDF1<(Rl!290hn;rD#|2~x;0g@$&D^+eai3~}L!dlsXvVW}Q}1UtacG0hF= zKW^y;na3op1h0QG7DSsR&b2Njn_!3_c)DQ#s8-ouE zsGpjNy^xy@TW1~>$<%vtqkjLW?yzgk#{BLm?_4ZF^zB}~r{Nb-ty9<;c%pjsORz8f zb}gTeL27#XtPujnh_cOC3Xl`I;hl|Q+MC{7eW9R+4k^jLrY?d8m>nQw!mb6GBRP)> z$RT?(V(Yz?T+lBYQw0Q8)tb%hHu<>`$F>YNe zWltM{QWoM>w@kwON^zXJ;*tXRlT_4vM3d~e_{WpAJ(6Zka;d%~u&yKL7~PZARD7(~ z+!XX^_iVHJfD8$zhRDOqK`S3$Bq2?bG(ib$rt6`;;zbau`5bI*qa5tO*zPVc_0TcI z`=VPL00u{rdQPEC!(U8ljzemilkr+D=6O6c)LQg|GXC`^)wRKTFf=p) zr~jzua3-=cu}Uj%{E@4kZK5RKP)q2g_T7Bc^Slw-1XaU&L}h}>bm~uEknPhAjRrf{Atg9|uu1?D z9xS(SM`CUY7N_80p*L_wBTr`-fD;mGlr{Xs;8Pds0va(Lbdqrh(}3sQ5K>IF>zV;M z5VuE@8Qy9nGHvwTQY+C(r8S-lfvey6ag0u+Gc6FkR8ndM`Wj5bvn zseNFwbO@kMFGN&MvDkoyi7x%S5m#k`snf)B)G=0!@tN}$iPSkiX)1)}>7@*mNDYx9 z{J9Y8E+SLaQaX=&_HNvw)&;&Xs1?|9Y4XBA!# zB?wwKF@wCpu!r3bo z2T##;70?-JR3j$1ShAewq9<0rxJ#Qs*tNoY7R?~vZ=a3?x&GQkH&_i)mLL<(9>aK{U zu=#V+EECw}*7_r-LE5<9)Cy+fO$W678KvOBJf8J7Ds4t>j>39As9z<3QuxElgoRBy>ew++6;Cmdll|4{*k( zTel0Bf2^DhupO&-FI7qjk;YoCU3nrwS8)ZU%92WsfAUGgsB?wKO{C)-!nK&|%Z{FX zkYJ=MPEnbFhuMmqWK#4JPlp1Xs`ftnDFai02l*>KApW4(IX}i3)d)=XKW>M??^KJ> zXF3&%QPTX-ZY&bxl|s2tSMIYt#@4>z4_)N)?GO^*JZtLp(fq(%d?lwMPq3YU?r^Rb zrz&o7QiHbCerc*W^}eUc@Z4$z5<%R%%Jj)KH@60+$WX2@!_8j~j3lenB@nK?2k6S` z%Phs@anGLJDn-i-OC-kmI|$@5K`t!NaO%Zu?8->0g!MZYn;?CPbc5m7@10Q%V0Y@p z4vpP-IX)Cat4Vu$t;F(s1FgrQc@Ip+yK?)hbAr z(@&zj4AQ^wRu7#I=e&hN1HTv?LA1-?Fb6M?nYSTL1MV}xD|1irYSWV0}WMk;N3O1sWGEs%#|uL zN#Nu(aHz~;sp+)FV#oFCD10+i zB!ql=#HIrLVhgR|ng@*`^y@4SE|1t(bHi%y5x6I5_+u{w6TqBBM)^wA*Y@Sp*#%lP ztrK{vcNFB41|oXxi3u$sxsjlDeP^lhlqgAbdco9zqxuoL?gwH-R0jdAu?n}VS9?r2 z=O@|{;19j=5t?~4ZZJ?NEZla?#Ifvp9*VJ=ecv5eo zICX4M1zcV@R~U9n6}zlbx`6p8hOKr301Xre5s}1UMs_px zE*Sfp)0W))G3*;$IFM@n;U};!s30+ZwwD8!iO_qC8>&n{(a#W`gXjCaK(>{bmW1+^ z7G#>oI=re0>b!Z- zW6x-F47LPsrMwfi_ddOHC|Fr+D23RKG=1w>U$nf`A(%{I0+R;Rd*X7@x>)*L52^sK z>u=hV4*^#^l~~vf#NX5f1?@rfVt;U)KPC${KZv=uw{PC{4^2fGxoMgjL1GY(=sS1$ zZM82B{~Ur9^EKIDsnC2te0u{Wyn^T37V9~(DBAn-j*nE=)Sae{-G8%uk_jt*BMi5t zl<7VRMp@W8o)ag0X+AStnn_#V+6bzor6V?nHNj7!OWS2}VnpB`?fzO(4E|tclqIMz zSSxmq9^422Ce0~jqUF`W^!-vaTI3x~q|-9zC=bykQV7nu@OQy=o@ee5WXopMQ=GYj zG?XGH++F-Y_BzcvU9Zks7OYcs;f{L0Rqvc4wCAFB6}y1dIVpozY21$DPpfhHWBKeB zA_+%*7piDnyoO?1nFIoLojtRejkkR1#JjI0mr!t9^dNU-qX6&O^|x1_>HBoR+7X~w z=7sOfcu*__8&|XcXC1i>b|wUn6!ui!tUG}%SRCe_q7arCEO0tPh%!ZJKw08Nl>$<$ zZO{mp4pNd*pos8l0v$4~A;JZ7X2H*sjKM6MV%0 zST|nYiKaPX++)qcW^-qrm1-}>!}Wt&AO$ZlyUQY^+l8L#kOM2ALe-SKgh$O%uJu|? zdzFeeYe~D9r4ICLzNr$AET)D0ue>A#&4>15n~t5b3a2Cm{2^#HvVdxDPVF@BNVtj& zk)x_jVTx2P;2ki_69R5+-Isn&s(o?m4c9qm?XCeYA7O!T>Q#Qt)vc<&+;E;Wg;Qr1 z2Ou;=xjvYXb8j%N*+=7v;w{b5*aDMk+?6m@?q?U!HDDHA{piyy$SHCvgY_!mDi|nl z!_S06M9$iHs3do1Hb*eGZb+x>j`(mf_}go>nX4hP(|A+}S+S9x-@#6Nr!UaPuETmY z@qve1qv6G)6O@=lV#jxTSW`(^+S_NjBNl0j$DH-KV?4jLZ})<7=i@b`hSI@Pjm5;N z@`S;hDM-7Wiw~g2a2W7?d3p(Yk&%@w3~a{4c!gWD zUsOoP=qdOk0`*p$%>SV(QmICko{^LMebI&bC)NB-$}UcR?u2p_&)#o|8OLOyY_1IG zbdMf2FsiTzjf9~_(NM;)4KZIl3c?7vRj>J+tR@virPvvV6CFo4nvp|F`O#-XpT|g( zbH`pFc4&(&8EFt!8K7diTPg4yQ3yx+D<9h>l;+yGsAu)r#I)AhBKNO&sfLudR{1cmQCggElh>{N~)&G4-x2*Nm;nlY4Jo@ zDgqfuG;csqqCxjOzig64>4D(q{PqD9A?DWEz0tSD-kLj zU#oDT#aGFi@`tK@{G>@6pdq+?;4Ix_8yT{N(Gq(oH269JaC|{ZOHMEIZhc(jLYGJ# zyx!>N!yT;QA(pa0r7q-xfGoP89Ll0CAH)0zI5=W$DUW_64m*Z>zf=q5QP)r7up=<2O^LcbSUJ zSgq$<(TndzP&||%uYW}IKx9Hu2srBR@_%GljVI4AnF!c*4YH`4OFPMG=dcJUH@FM8 zLc&kxd|nxv3d{hqip~N0D1e09t@&iK)C6k6_YYi0;CwBoC&7tE^<+ckQMoaf(lOt&YWmc)l3#fZ%;+FjC!UB9WrLIGbnD7v50`9*;>T%QHtFi?GeimshkGV;Jc>{cE(zD3rpG>Wo0;R3aGB+V{sz2xN(JJsS0^sBeYw_?(Vm2 zkbN=Du$n$0w~5wa4^rTDIV>8gd>uEo1CZ+rwr6xh9muL*h5$D~^CdTzBe}{xGUpv7 z76iSjgL&r8XJa0RmI3I2vnQ84l|pOKNR(O_g`GJ9goVrJ#Y|Kg{3K`v@9$FdNh^r% z?}Rz0ur4^RW18l7s4B3?14xX>lk_9jCVJS7K$p?C;y+{A?p)<`S1Bzw)zaf?>mC~U z@#Oe|uzl*mkZeY&6{#&im+~>?c;0n$(Rn6#qJmdBb}XbpkW=D#x~}TLMZ0vRdfC%n zE!XhL7Xj?R!-+iV;j5fAfp}ayputJ9%a1C`$%|y9y%XgLf%a})v|H$ym5m^HU}3%%H5t|KdX&j&VK4YUG9Cy z33vD;!{l4>(c|AybG}s+%c9>)oQv?5Q~8OSbG z(;#+IgCX{K@B6BjXcF2W8c|j_OKkY76M=%{Bi$w|KT90KHn^r}P1y^c3{H4#K96(l zX)|g`<%E=v(j9yIpF`vl8RU}ZCCii>P)%DDPJiFG+}RR9?u~d=Wq|qkJqi`VNia2c z{f(U=nJA`-Y*g&Ygsoa{u?9xfv|smB7VDX}wME?Y2WgldSQUO+Qny&s4td#dlJ0i4 z{Ph4UFd=44FQll+t2+#Z(-~6<6P))xyERwrO-2TUi#RW9QpP269=N|wz$MN1eVnhl z1;$Mjj1eGKtS*M1QrPinGTB8jih~yn5=u>>m~%P^O%r4F!JPX{^}9m;VPr(GpgWjQsUd)mYKfV>F0fxZTCLLs!P9D;)qt*ON)+_N<>!*yolLR`~uz_p_v zFf2fB?HsYES!I7tkopWnP|qm`h!(^garJwK<(*jWB0(gmxPU+83YOS+qHWixcFo6g zxShK^q}X(aH9b_bbMRm3PhycSQ|XC_>?d6pk4v$<@4Q`zE=Zd5W>D4XZ!}wnhcwpY zj9!!U8;kO9#iqv{My@{E4DIA!V%{M_*k}U)q&*sCG)9K6ylnx`J?)H;LtjMmEWw7T zxG$?~m!=U)-_8cBck@P412UwxMT`rwY>IRH>-jB%O}6#Pkmy@OdzHT=f7?pv8Xao} zC9y*=VDo7`mV-TtEk|K_SD8qY-nQ3iE9#Amutv+L548+UOyws4xb;g91RIwM4OsFn zv@9_!&2J=sk1V?c!^+26k5RUnW@!#)n~xPKL;s#SLciUX+=`5ErlCITmZc;YXPhR~ z1$9_S*qq58kwCTp=3d>NZqMeWhcgxJnpbWfkdsS`4DYct%9xRLY~y z(hYZj3S6Q9oa|Ky9;v4$Kt}bJ2JS;8q>sTcXU_n;L@HMRoCa|TSSKvQ)1c@8-^pP^ z{8-|61>jw44^a6r5LPG!bAZ04JVlbPdUGG;C*DMX{{d}qfi)+SxibT5G6ZT0+C3Fs ztwgS8x4T-7@W44b3(g0=OE-YLsYG3$2%pI#Fs;t@*wOI=SMx&yM4;MRox4Cv%KtVz5`>wMWNPfoBmUzG_(DO7v&``~q~2en z(~js2>tSXjB%g~{oBN$STYm>0UBaS&iH;w^n_oFoUL{FUVLS4h98;O?F!&;Oj9!n+6l$ zsi@wss}S|~g4~Y->mn;&c5p*AfnZ?xV+MsUA=zd$eYTSY)?Y28{fy7DzOGSR;QIq}tAyj4nhLjhloz;mbc6>t zsOi+hYkl?BTH%68_#9ha?u%QUM)EWF)GfoBgZb>W%$?>^Qe=|lnr{TZTC=?l{Ndw+ z_rT^}&i7o^V&aT!tPzHy+!9PWlm8BQ_vK=tAHoe+%Xd|%e68tP8zgH6pa3|tM$O7; z zKsd3UR}H8$UPPsd11Y-k!DAI<_Z#v}5@UU6#(A+rC_*Qf@46#Cs`yUK zJ=JQQYPf{gmM!Z}rDv-y_$=qB=!|c&;eZSSy!0=QU-*b*;IQ6&&zPyo5so|u?2E>!#xiz0Tsct;b;JXU4eISy&pL)c*;Fgmu znk=~7p;o%>w{bBpNfMLEL8rg!$4GI~ypav_h)sTd(&zep?<4-P{VfEJ5JrL?p6+A- z!N7vVG1eF>>P$_iE435qSqxKuBjPo~2F~SHL>te~UC4UfQWX~Dr%{XXfxo-la7C6N z^70*_DHK$!2zao6=RSXP6Bcu}nS;?;h<*r*BtaA<0Orh_v77d`H*N;labQbQ!9-16 zN%eR%4#(e;a|Kdl(=u*4d=ONM-}P`2BTi-%zpJ2D-D00}s(pcTpi0mOCclsgZ)$WR z6v05VieU@UmLAh z<%7d=7LF+bp{YkhGBi^!bOld+%F*-gKE1s(n_asV1OFPSD0oO-x3!m&ODN>CaphRz z@R!n*aSaS)sYSXMFkAm1;*7Y8I0z771L!w#dq2ucp9sB64`Vx4JEkr*Rke(j?qr?G zWH#2-sR#^|PlIgsY<`NI6=~ql5F7M5+h>2^VRZ47ybxqr7#0>*#?Qp-fKkK@0)5&6 zH;H?Tee)WZf!1ceF587_HAKcwc(EjN@_-N{&s5cXcL{Tiu#$u4LX@crM%9ZBn)bT( zvGFhx&@XW+ruKCHyB+2S8J>nDq@n#fFRppyB5$rFK&4=u`?diDAIUPnWqME>4T6?o zybu`xsa>hJIW)--P%Hk$mNGogXzy8A9F6-K8R`xRW%6eOL)q=WM3))nV||P#gqv=T zdgrtjjj6wxu39YU#t6$;%f_KCKvsGDsbYKWDm?X;>mhKokO@ZQ%+F20$_Gij#$(^$gJy0B28sH$CJ7Eik`#f04ZQO73TKo5XSLoQ$9DA5}0Gicw(2kaw_Z1 z_+#|dbo3TFg|KNrB={2}j%AVZijuv!2V_`qE`sVg*$`Z9-r z=fY^0i_5Xzu>fX-D@cB&)Hw%o?m*-Kl*@@32PXXyz6G8UkAX<~A#+%cWWO0|h^Pv` z3XLtUKv9PMFn`BS9`paTSn%DW1ASvp=fXj5Z7G_Ef)wHpe~Y;#bo)_}aL0LoTLg2e zy`XGkDQxG`wZKD9@5w4IU~AHWCVd>5D#34%M#7`1kK7IuH>Y z<(XS{541))5E2z%f>4QBPq=i$S4f*_-K74~gImV=6Bc+l`2D zXr17go1$;&LBlhInGhG=j=7HHpC55EuxtyB#`xe7+Ix2QWEM&;+;2rfQz|#4dk?v; zTj{;Xw$E$*yKs3V3lsK{q1W|RBYRPh6RdRH;ZO@~O!+j@irkZUDy8!_CA(6^komS; z>h1J7(l-3IlQGTGfSM|kVc7s3YU>inM?WOP=QZom)=8EaA12k0s~z@Si!>zz!iDRAgs~$TNgh7|=f3&St8C>)9qGr)bVD>1b!gXlE2c{z^YlE`5$sNM;0> zK(_~=h=+($Lsl9FiH_S&_=tdE{~amtsg^>euw}xmULZG-_!iC<+uh5ut_O}HF7{-F zsC&{^5dMmwSJEX~-(c0A1Lt(bnW62&IT7NW*SVNuQufj|1~xK4-)FVs$%tjBSBE8T z4~xUV$Ow24p)FZ2kKygH5Az1%Oo@IF59tSk42Oi{EvJo@L~<@mnBg5~Zf)qwWGs4P zE=1H6|5Byp;u{p+t=l&0ezf0z#KM?O_D6e)bt}lGogCW04(X79SQC!Xg#dN$w5Nr3 z%tW&Ls~dh{%-8ae#C#S@>tSpXkXHA$Y<*zq!yheLx zZoNi;Ol;bd73UsGVtKX-fKX~x1H1GJNEs96FuOxqVn&F`!&)suF~}&OF>yw-9YhKy zG?EY8_oMl8n%u0paNZ~bU_hvKTe99YuI+7UiZaJgGi5jQx7;4N4$OtL_o>Jzs%RZj zo5jEy&^dipzCN0%2x-+8Kq4D`}Zf`@HyzU01{|Mx&_eRU{6TYJK<2{)ElI{{mucT%MI6rzLxx7kMzeskwWAQCyD-{mAQPjpTFbr@7DI!_p- z3glD#HG@wf{E)F-d%&7U)US8>;D{QC@$=#LB~S5tzB4@|Yz|AKkIPd$qI zdx`h&Eeg=m4i6O5S7J)WvL55&<8cnv2xat1ZV9fiuxBXsSwvX!f|nd`p6@em1$`G7 z)(?^g*zei(o9U05ga${6%qyX;g96qQ^YPBWK*M^UYps^G#@W=H05}WBql*f<5H@RS4mpEvj4~^%q4VGlmmd z49A+a44vIQYWe`bno#gQBd z@FLhHW-KUvy!&AMyo{ImjB-V-uk@H{TH-+1< zjx^0oC}H_b`~ZKr-y`~Ae#fB|*Fij<6JY^Mi{z808loTedi!Te5Vci|+x6P*>=g?h znF)yb`-qiL@8axyK7r04JYf0}4SDy!8nEBhkj?l_47o32BjpovXCz(h(Ja9WW6<>N`cdJs2qL?1eKf#289Lmh?%0A2m&2Y~C?+58Vg@qcO-`@ayy|CN9I zCv=kiA28*AB8vYlE&CTF<-aA0|2xd{e}X9fPps#^iQ+7O*~$NkC|>n1`+FmZJ&)>B zh+(ocV0N#j(uxyw)z#+b+9ddO1QJ4kfFS^>8`=-}37>o3UjPtQ1vpAI)tZJSUL9WV z_b#*WnakUmeZLyltC8sCqhk5LC4du0R-l0!IcsiopTjA=O5RK8H;q;T1>E8%s(rs=dq^`9c6)@(uS1Gp_!Gb*J^kA0+isA!291pT7fXbk zd|04hfBZoqjez_FVx93>W+?)maFXW9D>8l5tnzCVeC%Fpjn|kBdwmFG?8a|P)U`L6QdK9SfDI@Pm$@GP z{ll7UBwZ@ww-w<{orC|Fc4Qo`eaU;Q6GUB2Oz)hdACl=2j{JanyCXv6g)4@q(n!Y2 z@V}@#ryyOzHB0ZZZQHhO+qP}nwr$(qW!tvxs$EsHPoJ5N?wOA0o{pG{iTLyC{~|LZ zFEVniZ$0b9!$~eBJjdn0OU#FVz;v@AB6d6sm1XFNeOjBF%t2fQ1H(1&nj@$+- zyIg-83P6{-zo{TMLScUAx`ReMMs4X?g5VUY_2x9hqurWvo?0U)E&R z;F*|2gqFu3V#TRWN3|rji*CbE4+lcNObc>cD7J)PCNAyBzJ^754bppWFTK|lQnehSnaB?G z7f5+bcH_z(+p|1$io!+61c_TFXl%l)@7WO$Z$u8}Ou0+Z@h19MfU<^lh~WfC)I{xT zqsRVq6R;bt?2PLp(;Z79?+Q~K)QqH$?OLOuGaJK&VV}NntK|%9`^x*uQc&mbr5=tb zU6aqvt%G?ZGgv>C9haxLb=v0Z^s3O%l8zH%sSeX3#)vv#+4OzZNRw5TZi*i+0v#Q}#>}=29upYj$$h}` z)gP5sPbK*V0?`x>m5&Y}#$j2v6K+w*AiqzX&10ofLZc!G)xdc{F4GIXEf>tfcP={u zQWsxhf{w>bMu2ZBbX3ByN_vye>Lxo4Bu%u)q&x;; ze!>(Jji(?FIm+?vB|%z2T+8Fl*{^5=!4fd1^P>SbS{ZKpg-lhU8i+h`ZHYV-^l%Mw z7*$aY*E1DysqqM4*c-Zs=P_%~QcV9r!k)BroN=K$879kRVi|Q*oSG@U9p4|Ol-g+X z043TQ$|@u#SiEhKtpf&I0nXRLM}Mu~UxWNA2Uh1{iUAeKE@L9s>0p^iM5hwsE)*PZ ztqU=dh4W7b-lV8CbFGxIFQ6*U@x9tj@)deq5CGlLh7@v40r2KH={~}FOBQB{DkCDT znU$}iXuyltO=(~6JPQf_VC9bGGe>5YE9R`5mSQP@SHkxB<+iVmWP?bKC)w#ul$Sa> zhH1sJEgLc6AV(o7M6C`?CB5dtG3=NvOr(`gqe<%X_U@tZOP8&Ufmr;tNivV z$zVJ5WCYAdcfr%qIS=bHjP9>m9S|hDC1z^^Piy!~!lFW_#`5eK#T5YDF6V)z=IX2j zC?QXhU!6u|+OxsE!im)6GjZTB1d+Gghg-U`)g`MTsUz2EJ+Y#qIVM8S!Z1Y;II%s= zF7w0cSnPt8lR@i*7LJvnj`?X&562l6rM6)g+Q~s3f{6+SDHfenf}6^6Duakt9LtOz za}61~fCkKulQzjpz`VDR#6A*ggX&D#$3V1#ha#?bkwxu6F6JP#ELWpWI-V!{gfn2p z#@vFHiT13!`-WHT*B#Ro-FHsRj-7l#!UqHTnmgL{)0hJvPitHfsd^Qx)# z5lT3WceW%M<8(%K%Aidi&#CZ~I9e*Ks_BeqJ?C(OT^|gy$ZUv>CqXf z3ig#Nf`0SD02+VJ&og2m>qLDBjb}&GP?v)qa67VNblc;h_@Wfhz74!1Z3Q<&fFPg4 zzN5?YD&&;)jXo5BL~C)yYqhyvQZ;z7ep{fXL71_t5mp+0xO7UsA^}X;~9r7_)u1aOF;^GxY$O4 z>We}p2QVR@tc@eCfxfzQHK5H^1j$bge!(>KfWOF4@CbRD>K+gTuYHk?#dP8$q1X^@C?`#p+FHc3|P2Y z8<0(w&c|8Rpx&A>?io!G?P=9m6Uw~ybxlVq5f&8K@lNMf=EAXiWjOx$wQkd!SZ9}h za4Tno7VLolX#>~i}hgH#^`vmp2vhc%x7AU94sxM@Y9EQ zDAOkrUeBlE-5iGq|1v&?5yf7e_dCdxaNx}{CW1WB2=cJ-IQjiW-c4chq3d=xwG2M) zna&7wrabqP_zmMJ+mUB}1{YkFP;*)bXT1^jJ45R%R52FPl0oi^;LwXSuH0U?U^B2P z-eR-z-EqqlqD$g zJ6WL3pVV!LKjT_svVC5qnQquY;*j7*;QAWgDHU~sVE_@a&5lRJRk3Q*V3ZfF2Ql_b?;1bkaqbjTuIybQujm7M-sIn zl$n`i&$ZWS*O*OOzfT^(Ft80;;l@sOt31Gz^8aYdaNV<8P+Vp6Y({cYCmOl!`(O?` zT_4XZg&B!;(5Im-9TSPYXQBy#qojop^)d!$gZGpZD&@Mkr+01JIAqK$3FN5HURE>M@92 z#;X?1&NoEe5STYn?6sBwhF(Qu6|^30C?fvEZsT=(6F=`0ztBRfFHFS`VkE}JRkmG;_VYyR5SC(4aKo}Q^%NOVJ^D!Z^!m@Q5-hk5RbV^p}{Tq}* zhqviiLeWO;hVK=8h)IeGL?4n%qQPMtCYksX$PMW0Xs_tklA!fK%D5^ogYMP#$Z}eg zHBEwL3q+jaL1%UDP9Eb1ZlAyE`kg-+v2^v+Vvy^0*E!o`@sXk0tn7pLJE(>F^zhGn z3iS*U=kn1{ePx|}Fa5|nHBJ)!F=yeTsOfFpikhD{PC<1ad=xOVf&f}PxcLE?iZ{tS zamXz>twEMiB0#G+nRrH$Uh{Y-aB8`Z2&|WGI*Ymzvie6V2i;{H=Vg&z+5(+350x<4 z0;0OflyZ6&L&!-awa@3x+S-r|?JbsI!2DSmA4nnE zGJZfKxsx*-`?YFgE&85zgGJZdaYbhrGvEL?1-Q{kQd^$+8Ub=wP<@D(VGTMv;rf;M z>IrybFd&=o!HmzR(M!pC8(TkUFMG8v8}%m<`X;I6)|JOaIky888*vjz(tw^I^13dT zQD++)85Ls7hI6+%3OwxIBz4?F@bV*ExnQ41K2nabXqFerfkb{z+rWeW_w&tavIagjW%sL)P=-rV|Tp0&Oe<89cCN&aS0FmkC?}d zSsnDrDTj}LpthxQkpMMpH9e-lu3CXkb=s<3l&uRYZ(Q;qE#Q+txb2L45ws|POFKCg zMhXTF#dKItn|qH1A97KqK<;_7-~8Qf& z%s7Drhw9O$dCEZs8n$Kq@(f2nT$^gsJyUAWmW$(DI3<-~_^CFU$;rm9M0zc2g8Pm3+h(0^)Z{ztHb}+rW!2Das$8K1UIIlBWUKS&#aa02pJI>iEM33`9u@alHsB9F(=^EB+0ki54Gx&0m zE$V>?Ay|v;FF_8?uRu|tWnA`{_YYKB%yh*+#uvsm!%dM_;ldX7u;R{a7# zDGOK1R{#k{^X~AZ?+O)rt%-MobTD297D+_-#}8AYYFjWjZp>5o+y%9aJQ@%OL@*4^ zT}bgk6Jph{J)+}qjK70^#B@1F7QoOs&jmhm(?o3uMr6F#+G8{F2MeQyWnb_7i&o<; z!=1%NIER``JQZGLPi~QkYvR}GnuKuJJau4yi92Hu5FQB5@DIC`?Xd z9c5j46b`P4%$L-&KA>L>M63$`ftT}dnwtLWUJm;|4Vi!A<*@&2kH~-K<@|Huf5*%D zm#hB2Yz?6kWx;3opJo#;FFp*Nh`Y0xlCy!c2|hPBKAo5n13m*i(?1iM|MI_(owc2# zlD&b^KNko&vHoMhKhvN8Sn!X-#Mby9_xvvp=znVvVP<3bFZnv&YFc)Otf;=Pb$diD z<;;X4+o)?Gr50uBgiTu3+;Rd0?5s9cqN&}9IZ%;bDA#a1PDKaCwBYbjNBbS#rxSQ- zQEa!>jFufb$Pzl`$lqs!99hLFluk*;sx)v>Y-&mcD@Y2(mYT0oY<*kkEm^1*iKP~` zsa7AA{_BGWs(#sp?MpU&QHDyyC8`Q1iP~^tyNl!dVdS-lMa%wDZ|q~Yyo@9ZbssU9 zM-Rvu%ncvEq|%T-kTu?JOw-1$zuUlv$xgO>N3tqVb5AApu*{C6gPosdZO51yoBp~hPgSt# zcgX#tpVZjAZdGc%k;%aY_R)Ky9e?G+=R62IPECF)`5akESh>Pfw6Kvzxo|%8=EG7w zV6rkGW{Lg`KRHxPxmNlk?2yY|KWhAeD_9$h4mB0yk{oos~ zl~xpS_(QUi#kNFKBrTn%$CcNrZhzKAlu>n0`4fs=_Se`W!wCDvu&p!HZz{|dM$^vq zw?sI(+sv}0v=dA@41f`aDASC!U@K(I7U3?a^$^e_Q5Pa}T6+3fR&#*GKzVvXOFa>= zcf+r2-TTk|4fT1j=dMFJx!v&!V`0B(O^>ZrF)s8dj%Sh2Cm-0PDw=*R3rN}ljpGU2 zk8Z_Mq%CXmhWbJsFmB(>5V2J>v6J3`g!USCsoO;lw5- z2I7C<8*@PO(0R+Ir&KFP6FQj!5UPDOYYMdtGz#|&`;%doTEGb&g)!Yur6bB;)3-W4 zM=-t%q$btOuG>T`&-;}Nhm}DoFdSZ024HA>g+mSnkg)0K;ApymCp(R&3QT3i0J2s<$Xx#-Zm6eldyp;tf5G4Pa*g zS2yJ!^JEzh-D@Lx40E8ldj~zq3w7&yY(|sM6=ElrDaq>p;7|CiO+qU&uv(!#>p+o? zLlop<(DE&P#Uve>>`8)J5Q?VLb^I-EzGRSPF-8w|Ej#SR-;FVM+4+9Eo;Q$ckF+c# zqOt{Zsj`awDi=+RyukVSfiEvHVEcz1mS|JYY8=s?E(M_nkJJ9qe)fK7BKoi+Oe+z< zy(1xdz{<;43xz<5)tiE!4AX~Y54;oJZnWT%IQgw6tW#kxU&uUYHBvkyaW9a!$x76r z+^q0k;&;ROY#0}wtdOG}2RnfsafDLRW)v+(qy9BkB!>9-XPGHtE*v#V9RO7xlQU@ay1Ic-UNHU#;mKWIu>}u* zLY0B8wdVtBIxJBRarpYhP8+tdq^5qltE?>E~ zU&N8~rupVwvfDlSZzx|LsskN>qHlA{H0>6?W$PV?#fa?Mhfa~uwCd%9uzAq&&f71+ zztSP&QqzAxcu{fUy`!lGvnyYSqUeBw08Az_R08 zX$sKsdX@USMXKp*h}aD#bPO7E-@y`sY_(mJL!W$1g*9d*N6HNDVCoJS@R5PRpynE+gR{J~i zMnUU^KAM{i9j@d1SGmSl7U#nIn2EKzmnl{{QeUpPD>{Ozz4~{IjjPj5)GUPvWbR&p zwbIxeQTdYD&ZX>iCKHq{yEOii*f&Q@;|O~21E8KV`hz_BMF%}QB{V35QvvmoGOG5O zx;iR0T8Kt`CJW_R2eJ}xk}mp5F=if)o;mAWt~iH?GV#sZS%lNYNlmAqw}twUgEOLQ zp9p#fp&xOH9ATOa{9f@;L-A1*braK`!JPkl#N-#{Z~LAoN=AwzVXkZw47GuNa<-5edOp>j6d>_w_%e+j>%(-c1CQu4@DBl4 z$$}EKP?;KwQ^>tec29dM2}RaZ9ktNw$7)SoTzq=P(lv=me6m1a(bIUcWs~-Bd=ya6 z{Vfo$H9ghEbcf7`^S+wn_omLK4)vbckW|{uNcs73AFz3;aL6~hpM(K7-jpg!-l2w3 zBjxrdz`iWXd~+cJ5GPRr2Ay&3BbXojKS9-u%~J!Clm|q8a~XXArq)Ljg7h#5py}D$ zn|;YZJo%=it5aoS+(^zEfN~w0Hd?UNG-sW(?y$RRlE?%$p%2VF zvAfEhCt=$(23!{lw(FIkZrD;xI$3C7Dx||P6Toxc7r>o;cDVGd6zt=+z->OMOW4^y znV{?6h~!!O7zM7q0$W^``KrJ5w&CTpKBr7q=L>0u)WAT_8$43b%B)$J@hr;#gV>9% zmA>;!v-Hr;mD7c~ABWC_NKvA$oiFLFQ)QI!B8aH;mx?3PoeLHr(VRXmG!?24)IkKj zk(qYY{Sy66zPZ}P7d^mj{<|%6S18j48LzcP13dG1SvH`DfZ3vpW=-6EDk-6LoB_0%wV7AqtTJ3QH>DbcSZgT9l?zg+-FMTm{^ zGOjPK-ucaoa3*A6LilX!HlE^)4_tCUw_QKCIAOGpAB^OjzmCUGgyqxYUQ~;)!KGI? z-Tm{09TU}bQP*Tt4+IE4W%bk|yGrP&ZKiv9B%2Q&itBVf4KcU+zQ6I%;66$!o9Jw0 z`jXAxb zcDR9ZFof$Q^8Rk}%31I^^D|7vA6hb8%zh$bo%JsKrkX8d_EPbIa0%O}4BcSN?^5gU zSj?L)-M&~vsw^hNKZY&-t!n$Moat-!`qDVLiLJ+G@NfY6G>R1R{@-* zl_T81Taj9d-v_aop;#;$>EJ0At95>koa?DEW+A+j!?VamMw_I4ijJrquyfP-o_BS$6g6?9{ zBSwPra-5BLlM~^=yIKYk_T%N?61>O}3k+1I=GtCOEM6eFR#oOX*EQSiCrfGp-#H6e z?KDrb*W>LN04qUGy#Q7LVVy7UJ_iYT0aP-ZFf&{)l5EjIQ}0Iqvj%J-uPn2O^5$f2?4^k~zJ-&{BM>seUVf;vr4gewlGjy9{N*X?ppHdj{e(wC&OT=C!#UxGGu z?Y&gcM)OS8vHY&qDSLf%z`WpN?)}kL>lwRMIy@tCHG; z?u~Iea1S8Df~fUfBFvO@=j4724=E4jNq{Cs|`F?)d z9CBmiQ1)u;#-43j+qke!RKEtB`aYD7wM%WZRshat5Iq4*S#89Zk4sV}2lbEcfDaI`{ku!r! zx@x{Ro;JWEF<%t}N|@!viH$m%+^P>LfqqVt*83An%#{v0NZ5f* zngC5Sb(vVgD)78ys7)}SZ9gZV$-ymh4gI`B%t5mcJw$&qnc}{cfNdEzi*@o9HA>2P}81ATq>GNO(w8Eq8ZYG>Y^Cg67P4icT{wPv!$RN z%N&RF)nGIC>-}IZ8Ttxo61P!5TC|&J=!R*f9;>vMaF1GdlyFnoQOZqdO)x<>VNEny z@qX?8YN&5`_&+^F_J0u2|LV*A$5s0OCnU}O542kGugNB4_dlD5+5Z)r{?9zbf1d5X z$lfn$bga z+qOXxK{#z+2{oXxwGQV+i7-;RbMeQdo+3W+#5G)5pHa^Y{Ts&e{>+BOsYR_X;#Jcd zI&eKooQH(+eT>zNF(t86p}sbi)p^mW@+h>Nsa;>4@7BhwJu&5&+w6Zt_Wn(3gspQ? zB#$Fe4bN^?_Neko9AmM}CQ3*8y-cJR$1lrHPZFp7sB&1p228B}UN6biQg8V*De~3% z!~E$x;Mv^^>#5!6%e8nw_VyujHl*$VFB-*muwAEFZ6>;8n3_fWO86W>H}pj<<&ubM zus+48M#v)P#QcT}cgakP#cAVE2>+9yB`9roKKf+k9PX!rd+^j_njyPi}knoL(BH`VY1uPB%SpwA!u zz0?vj8Q`~6kePpC)UaDM2s@TJ=XkTku(x0^osE%ufatzWmso>*i9~cYIkUc|?KX!J ztIcrsBJvZ@>6OHb&fjUCpm7@Vw9~`u6@jwLC`=PIJSqw0JfF92N*qk?QtD$h%2+Sn zQ$|Nh?yQ%1>24J1%yLL%c>FLb#JZe}_vdgFkGWmU0Q1BYWX#G$w%U})z$&8tbBbC8g*}V933f3nK(&SzRj#OMK zhF{l}0wwSyWQXBBqjr?&R;xdHb*jTILR^tYW*8Tm6B7>NcLcF6z?Fw$!Qi*jS>9$T>rAn&80wd_CPX{IL-}#K~t$$}_9_gAg zV%@GgPMy0we2T%F6Bti^+8|Nu{7Df?#Dn2z09q51g4QssKd$)=rop(ua0{o>(o>-o z9yhvng)4|=?8SJdB|v8v+wyWyGEWwAr}E;C=JjD=vAOrCMk87mZxv!VOtS(wg1JZ* zpPDYM%Q{WQ3sj1%&Dzcb?ZVz>@W4ndB6dkr7utCk$KR7qjVZ~%BZC@g>dByn?7?g* zy}IKVX#LV9nSBIBJizcxkm={y8r?IPM;e7E<3OOXwS=tQNWq#eNFj8tB>C6tqutY) z!kfyq2QdtjHBYCk{wCru5t8MIjoBYVkA#9AX2@7DqJva$fRMI#0Ab=CE{Ljn zu^K}h0>J_)0!S3rrup<+_NFNeLuZX?v&{Ud1BICu&u849uihHPc?QnczBfx?rn9QakzHa^O69{yE^`^Q1FQM6dUGTa(2%@l`Ir zNy$9zm$Q!8V6X|RP>uJ=@Fx5)kl^Zo?x7$7iqANMD_}0zUf4lO_Q$)#4+9cM^MznBNH%6>#8x*2b1XMZKwQ$li#^A4RGkg|nfW}!}hYI&8$ z>=833=b%x?>7wuxuqh77o7p)WYizJ5$l;eD-yPxy08$NGP?`HXirvqtl#S5xkhFj= z&Koiz&az8lZt)O@tyL38s|HxrkI(AEdzR2ZEMmU#Nb=MGU%i5RqCZU@%Eg^vnVr1KcGzG!=Hk-un94a@xPW zbNhbRqaH)C0Fty}cf}2QvdYroCclMsy*rj7r zgPR!SEurHRFng?yjr0RRS}0rQONht-Y`p9h=$*23P z=Sni7LlB_cM=`x&=YWN=-o+7tj7g6nFY>C=neVQJz%G2YCkYl7Zi^_xFH$jY-XT!&-3-k#}_bP{+gv~2*;K5OrQ z2+R;kIJZryE(M5K*MacF-EEX>n!nChH~HRPF~kF0H9tiH2KnOz2dkPdB?dR+ES^p- z%kwD*o|mgp;$5hK*rm|exPaNaiRBgBVE7e2+lcX9;?%D*ocBl?>NTgo9ztD#k4I0A zdKZ-{Lu8)XJeLb51rCLB{JMtC6I(__40ZJc6!Xrs04~M`z}6=07l1YU1y-L+MasTY zcKWLFyA(0x$oFOx8_32{lg}s6Kg1U+@)*(FfbXgVNr-J6pPi z`c+BXeSzv(sn_^Gvm0E`$K2G*Iz+ZsRb}g3MG_@=4(rJ;ARPO*_O+|L@lTK2I$$pI zxAlZHP3vNV_;jNmhQrH2`M43Xpfe9aK|C=MK_tj5Fq4K?a3qjdrJl3fB(0%<_x_J~Cc!Xq&Rh}cDT9mCjp#ta;T z&V7~O&z#n(bFlgs$5&RK-*c+?UjCn>F6@i1F5<1MxcOo!#bTjm=&=TmYxf>>*`0$t z0#^$O+kTLBso1~<_?@rFv{Hqlyd=lJa`|6@fhZ#a=UIj2ZoSO`stg9nXiQ3TUNM4< z)VBRV&4Xi0>-**a?mr^}t##-l>Mg~Z+;w!%!_lIv8;g=EkjbYTD7sh+XUkiaE)O*O z8j_@jO>ewOlq)f3UejjN+`Gve;&Vy?XU9PVb$B1jI9RW;Q_jGqx7nVYhVSs|4a!Rq zU?0#-fQ*TX!ThbexAdYxgAZ2x^KZuYwwd~O5=NwDBfwTBj~Js&u#zC zLKHhnl@zifc6 zQ`ejKOQ;83f`}Io_Oanw+Kz@MX@CGT9UILxE#!^729d6ofUhypT+C2Lt5cJQ_g?b9kjx@o7aU0{X8pmY_!t3hFx z%<{s-N1J|Tm}nJj*j;1lDJ4C|4c%tQ#nB-Fy@L5OW+<)nGi&{w-uM;k-`F;T2b&Sf zZf;cMsU4pT;WJqGS4rWVx=WdKF>htmq&N+!=(1&E#@xw7Nr96kkAyby?KE){r4t6= zz$0TkNIeqYnw=%7gm$n7O7J_$pHme2wC)7W} z7Y>LUgtN(hvUsxT%n*ml8cD&VBpL=MkS=d=JA%8v!0n^I)mdP20m9HgU;8CXP5YWq zJBM1xr3nw0p7as6t$VTzVz#zicbBQjq1kH*`PS$st^dgX^^ncRX99ZRNXndFDz7td zK-)CrXG|^zYO`IJQJ^NOc~lcr!A5Q+G=nCqgVM(6FLEb|iRIQye?S(Ao%Q1)o@wN1 zff54<$5xd`jgKdlMh|QdL@f%W(~l}}mZL|_!!?e?;DNmOPnOP0DL`7ur#viS%|aOL z@!oaK1q(m}A2k^AK&94eyMbM}{1ou^X+iHs&f;2b%evb{?7r z;UyXB=~3AL$TKcVH$XW6=vnUM@BAO1vJ%ua0LCArUmo_CqK!tKoRGSMFSC8R|f0^J6=`U*2 zV9Aq45jgxQm0WHb+HSR_YC*Z5?9{p2bX6HB2ix@LP?A@$Q{mWMX_GdKPd5Rbg5TY6 zUeXfGS1dP&jFdC#FW9a01Xq-{!Agb%?Ar7c(3%U^rW0h*x>u*+^0Y|ThCz=v&+UYQ zMwfpSlu}|VI%-r|cFUw%%_>#=GmeN8TA@VqzK2t_#2gu&;c2Cx6{CG)6X`UI8A;D< zxeg2~?6P-`t=!<>y0L(4k6d0XgAzRTCWnl5`if{BoK`QI;sUB1VIyz;cgyfZ7OjwWN3-h z5B;%7AIKI%1Aho)a>%U#{MZO5(4^^DP@i~(_yyL5^$W~SWNG@PmHwyo)z?0}?DX}9 z%3)XNKJI-aHXpXvK;EM74BoOW|B#`;cB+|avu@H1%thBg-(Q{xz3nfY^c?$<7l5_L zE=oQG*bXYdSL*yd@_tA>hCXng1Sdbfn8avtlxT`9DXVCIpxc#rg&&?(IMO5=6wSJu z4@*4n3v{!=AezlEb5aWji2%{SboW4Fgtzb;R{|E**p^d4bXm5$t!u;a+C6@u0*JeG z9of-&WTg|$ACK6QCLj_Y$;aioQ@XO}2NGT5pYdSKvq%KXR2V@?3-VYFJmdDsqcL0N zpDFJyM=^a{s{vIFJcW2a1hCzjIXK7qIPprRA&D?}y)zdj&hwCZ9_5bGfGkCH1m z-33w?XToU`|KcV}U{T94S&0Cx&+joDW0tEBvkM$Jqh5khe&&zDV)Y z(#zM9U)P!}5upc)#N}Rd4DjZ^!6{7=&e$2)4m{B$hV5=_Oq@h-CW?vi=2R#(TXc?l zbCwpl%lEP}yjT@DUBFB~BH5>&Pv!dODr21{?#~U9yWyYQXz#L>x7-g?ZUTn!wc3$k zT%WxArBwk$fG6pAB6h#QrW5IbbCAfx8tbBH93uUAy>3iw!usp1Qeu5z)xC}D_KHg} zz{cRz&=9;tVDi5`5;M+uTu>`M<*i$z62S+T%P{S(d2;n0f2OETw=&T_E~3fG-)Y)2 zBRVqDp{n9#pz;T=c@rj1|D=rh57JtA%NxNWxDBU!sGoX{yJ>*KKb=60)9BhMelAnF z1iI`NxHWY2z~P=UpQG19*vp+XPs6fIKj+Jq-j-e@Kk}Kt;RHZWLKDbiuTJn5`guPvgO-a}O&O&P&p7@QMC={4-bNN7XMRdNt1rNq zF1Fi~6hZf0{K}*$`Q!k11KWHN%EF=_ibZc;d0T0D7{m=*gdWG+t%C4&(yrv&bW7?u z5f++=&0VQ`-+n0KqPD^HusOT{-GrxGH;^n7!S%Vz{y+`{yU4i@m&^fskL%%Y~T4v#-u|XTD=B z3lA)afUG4}#DCLh3!U7}9eu)?*>>grIwLG4`MsGYL)3$JB1>~DOPWxLZ+8)KG(NiLJKh;$sWl*BF1ST0|O z^Glt^BL0mLT=K?vtby{Ifcy|A*YJf3Rj=fLQ&r~-V#93P)mzlUYJay{ErW zUv#D5zW5fu{mVE4t1RG3U5}i^hQqSE4{RZaqc_ zUVJQu^geO+_6=MZSdOI2FKsLSpg7Gngj`61eR-|JT24~Rpn3*r`jl5B7}VJmlyQux zI0K_DSJycu6&yOcM-;;Qc8W0P3@}Sdz~#!Vio{AhZafZqgV~6>{q4P@D{Ea})(uQ@ zqc>S9X7W6d(?`?VqkxDEcY;yx^a>59pF_$uBw(C*6-2*46M7dVNvY(&&QWftF=j7u zQzRBI21z$|TGhUI^T11ca%7oRPkVrxC%%Dot9l!8JYib7&WaITey?i5Dk|Y4K&hasv*&mXXI1dPrkqXddF?FD$ha-SZsGP){L3KGXT&cjv;s& zau>mcx%2ppkCX<-h2$1M(Kk)GdlFog7ZK)^tB$#g|Fl;rsb9+~X}`cO3);a?QEZkI zyuk@)o+(U+5+i!vS^mVEy3f21z8aezP@+F3vd8~YmHjJbj8Rq1izz&#@{G+Xd%2mR z9B~>4Db7-YChvlL7F!O@O_7e`A%7xy(>ns9kMLzwxSg!m2-;$asVOCvr<4s<=|yn9 ztsNR?PVep;6V774;vd3(rT;uVSfHRCEY7oLSSmP#+h`^*C>>6@&bRp>ND6|{ptAak&ni!=U zbqH@5Mv0oAjmQ^!czk`GOnv8M5}){u>?I4hD(D{(6R#_EtCAoMS(uRy#1KxqG)N`?Omq5aRC|G(Lv z#5%)V+1U-yE8kzZX8fd-Uf;p=`+YXd&UdF; z_Xm-phILZ~6S)F%KaRIi+#~|_K->nlYSnI3S%oMk)UvvB`Lb`PEfwyKLI7>J@9{@H*zq;qgVUO8yH}FHZ&iOq_=Tr_##hlS27fvP!gu5y!ZK(IWoj zqFz!OJzYEXnlb{4ds;da6fd5g*>)Y#CHNA)4X&&u8^|MhkP+^8w%s8k#on3e^W(|y zyod-ucIrLAHM_Ygg1;M-C+AbU-l6j5up-&O3JGQtU$t~>&Y8dtm4rFKoFuverjRXv z;uLxfhcH3ft@$zm>Kc!O3hG=@3{GF-o{e{9!r~X2>_4>fjRPaho*p8t!1wTi67yc>%Nh;TG>nm-haOGT6CX1IEuJjtC5Kl22tQ zvl@Fx#~ALfdtTk(Cv5Y*N@=>3kBtrbUDfZM-w(AHG7?KDD@~D25u$8@NUE4Z%;nTL z@I(BmmYnISHMi}4fngKbgNr=TO+>c@R3l#pe_35rlIh98yMSqlh`8NUx&J6JCSY?7 zLr(lSVQGo`o~i=&l!c(sa`l^J46()3ll*SXDtP|LYEFrzXoJZ?2#c$ePOIju4wP|J zwXkR`5iemON1~Rl8j$JCD0^ZhL-vDx@WpvR(0u_g6@|$ra5P5)E^5B_c#F;69v?U+Q4b6YQH0~t+2(S<*2<)q`ET$o%Bbu5r0Q)Fy_#d z9cH+?X4g?lxS#uUf*(-J;vWugQ0Hci*Q~~Y*vauq$xWZ z{E3wF>UFP=Y->Z`!j3et)0ynQj)f&y?rc)!aDsV<)a!@Q%-?tc!c#;Hz+rOmlOxVkAUY#mF3!xP zb5Uxtx%#E)9^e_2Dij&Arm@dz)e;Lmb#v9Yqu1$uuFh!bWqhi96=!;W6x&0Utg>F= zwb3SA73-~Ub(MmgoI7h)N2oFQM?=mZCt~LLx7P``lpT-##u*tm|3%VuI}(3XNN;-@ zE4fA>89KW&p$lx|SiDFflsSMv%sBO+X$kJGa*0xPc$9a6IxiI}gnNaYg*i90xhz0Q z+`hal;$$$8-27zP{Gfp%tSTX@P>^(^HwI&E^Ey$;P<88;LBo}K_nu+?ev&~b(Mb;) zrul08#9WF=pOY0^J!8FhTufLgp00wH!SbUkl`@UfS)RT1FtZPnEM)Np$H*ddXTo}q z^`r>`!qg%7JXxWD!vnOSAOOK_-&^|k@qWQ^=1hC3k|%(E^khy?AZ5En_)$@w0w?MY zkXq{gukrA2vqg<_NeDNR^!_b|bD()0#b1?Z?qHci#YiEA`Z>6VZW_22tI zr~iYxcMKBsOSe4Jwr$(CZQHhe(zfl)leTT0v~AnYJag(+*MF*~YvNYlp6Q7G{zmK_ z@0Tam+P}4)jVacv!|f^l8WP|g*4T=3{3`>-CI zx*CyXQG^9V`o0&DMRHyE)8r3Pdq|i76Nn$5zghKM_V!D4cV|H@I16jMLDbeiU}91- zMMs(`b`T64IAS%6hetpan$>!D^2@Pch7WLYYc#WB@(ia`MApCWU=?dxO}KO(Q#EEP z!~*FK39%8lpGLZ^8IKSxs)>O z`aXj6v`>!2y(~QaVFv8vOU$urNXu#XxarRSY>CSVBw;exGmO6zkmkAf=6VT4_Jh(t?#$(A8pib8Du>WM$(hK-vm?S{HbGNYwbmKl%;w({+ zki`~2sFtRIavs2tD3r!cjQ#mB$Ck6u7NK?$CgnOeSN~Z$Ht?G_cywDG%-$JGlJT#61hA z5`Z3uMk zZL( z7a{jeFS*A@vjKR-Tu_m5?xkmQeuGL`6WPV9SJ-HWAzGy$hHkVTmz&Mn`s-VK3Th$D zrYqw3d)WM9nvx52LS$ib0OWt*%|6&Y)84+EL(X)R8=?SDU<$wyLYo!fPpF#OSxrtN zt#1sjL!82T8~!x5ZZRM_P<_P|6L0mGkv^8JpUr?Y=OW!{^_geWwe2nIWjcqNapj3A zCu&zk?exr_XY2wZVm7v z0IdA}IS9!RsA!ov^&0I{EYGYlLjlWuwr9uj9z5UCxjZTtN1K^{jC+&zc#oBV6X)&& zem3op!>gWTgxf^dl123;_3%Bgl_#nW40q%_`M99BY<8o=W{d?4-`Kl!bc<#k7 z1n&wQGm#=f2?4_*2#uod3eEP<^zXZ+jqR*AeVw=Y_B+YOoJd=}?pY-|8Q6>GaTZH2 zHMzHDNO(je5n*(sX$m9l6t0*oue%N;uuy7u6%>Fod4HC1tGd>Asz)WsyP3cC_7!T6|mIxrnqPUS> zB6w7G&uS|g6#Tigb~?3!H%jHXOwB9xhfh4%aDC>S^FtpK`(pgl%c7iW9_EJ*x>C$t zQH=>|pMYd1cu$t^K;LZ!gTjwPXyk<}gS&j#0T9gzU$DO3B%k^fBoo}q;UqTb!RmTJ zdi9&EWLKU>`O*f}oHviM?bZa9iIY7qA;w%n0YOR0)-rFyn1pJ8w?(X!FH;Lksb9{_ z2kj>C9J2WkZ=!RUr#R&Sw(ps`3goyxG2`?H{ zuyDVG5h;Rk)CUD0pDVfEP;6W7gBM3tHu6u!#f=lWoCNYhJMvv1j@93>r1-4C9~EP+ zHA($kTwV4`B*?Lul5tHaj6<`{#w@Xzrs5MzrI?dJ=N!6nemBfiqkIo=D{IlD92jO8m;IqLi+w)x9nhEZc#F8 z+E=Drx_;@f^nl}YaiM~HrD9wV=dwvtn~oo|8yO}oqR$41!rq@em=TcO?<#PBubb7k zk($P*3mT5fHmkg@M-2`TH+fVVm`=xnZtGcfnE~BkB9R9Yo`g4E1nP6B7>%5;|M9JF z>{B_y$2Q-E1$kLYF)PThFZo#vo{{*TFIVCM%oPAs!2uX@Pa(&vI`!tC-`rvv*00eq zFZ-C^$la(PYso(@mbl)cbZ<=c*=)i`0fZuDnI#?wGhra6b3+{b}&T!eKBN}2F@b>N9&Mwtzh()(3i zPu{|+$+i$^#3ReLjOm6~w<_E}UKIaQz-9>imX=2#WUDjr3OccCh}i%UEr^+3&WB_1 zy~%XwII|n!G|L^Af5i zMSWOm9UsTQOnAXuML%D$gK5C@UB=>;S|&T^#}_%bBez|^`lCd zn}A;0&r?~Mn^3COj|h*J1^PsI!yK;j-ikTUfcKke^k-mGbJ!*d{{+z^YP`}TMHaC7lSQ9IGI%*Wo(M@2r*b!xMxit$h zDe1TQs+-HExG6ilOptZxo(C_+`*E+*z9$wyWg9)c7CL98d`pLNwH6=pIKQ7{{IsEM z!_B&Zx0?!&05P4-RYaegE5xs0e@HF{Z~e$Z3^PYbc~irvat(8wP#J2Mwla%;$P$&7 z{P z%aZb`rl`4e0===lH^+kvE5`ugn4 zOj7plTm8cFD7pOr^vrvvxZgCn7h+8q4|5MxNMK%4(iMMu|JmT{Apful=^Ep&v{cYk z)iy$EnUO!yI)6FloIzf8npO;?p4cTlf0%hv*m2xWQR_)}Z=EX$e7LRIkk}-vrbWVG zqW-b1m)+cs_Sj7UJm`=n3vKw;Ffp7WY*05H0@lq1dh!G^GZ8#PpiITRR*52VcvdBo ztC*RSX8OdL_Zt>qH1><{f3&GM|JIE0KaJ6I{)4Rl3!~@!3#|XoY^uKjDF3xu)&Ds4 zue<)AW>fuB2H+n&2oonK$G>A!CI6}7*bqbWTdBnW@x!1a3LX3vPm>5>J5Q%Y09!Z5 z*B(l!OdO#X!6GsdtN5yJ^W!#mBfjZ!NEaL7#OZjl-S1>lK58vX-^g^)h_}(u^3Ucm z7KsIpXs8r(G8v=h)tPOd)2a%t!f+LJ)<*Wh@_37*Ft9t5Ts3>+BOTta@RP6|Vds z2-PRA^mV9zwS9gF6#cvv75Tj1d(kZR3z}<=6hf}#HYIwJ%+VLIVr8ySYupTWEKtHO zEnlB$U`f2QOC6>9gVcl6{t{smkmq4Qx-!#L3T+SZ3^3b(f2azcqHoMYBY$p|3|}Op zQEIXiITrq9sSFZj?GRpgGQz{Oa>i8s%My+YV{uV2MO^<+{OAD zzjE^c*U`f&4#?4=}eb9QaJaJ>$jI830ES&3W`SY|aEeb3=XLv-b4(`!D6rGxg9Asj|~Ym|wR zlIG&E)*v^8^3)i~7n(3lI6i;?qgIW)_$W(X$qYZu&_vpFpsMbUz>LX6cz}i<@NbPA z4=Oiq(u~?DXk9A(x`sgYad&!F4MMPh)9$1`=px>jNn<<%ugvj~Ar`q9@DB1dY@E?lqRDzwp++^E9u4n_+tBc0HxKnV`ouF(B=C*5Lk^B z^}%;3IjGn#&wq3D5w&qHy;NMa65y_mot8roph6i6hCfG2)G}&8vFuu_zMLP=&fn8Y z8M$^lQD~N{&bVl=yXKPHKLd_{>=M}0odpg`eYLK)ebb9}OfmxQzEjZV9#a@#TY}7M z$2)`=>RNE@%y;d*VeP}$RI&5$&Tj{xZ|Pgb6(JQZ65QKacA9GKtp;QRtPYgnV&?B0 zre25k8X)vG5ehX`GZd9?uE?QZ0Nj~Q(cE~R1WRwZTS(q+G~9^3zehgD%B&(IlbaA` z&?`AhL_q|ujs`uAM3MOViLPij)L8Wa501-$fz0?iU10r?Neu4xly({bp1raAl|l z>rlja=!=*?Hc~!_J?5|tB)QMIKnhS_Z8~{1I~UVn8M;JNz*L`Knyj8~>RvGk>F0Jq z-5}nf(OX=>8vy#Dk3)gTs&hwUt@@CY_nejM_tfA54+Q#jPFGB3T4`k&)GP!5gX&@* z0j^wFB~(W|r+2wu=Ez)?_ODkJqr^jyI{Q+}u9C(M59J4TA|IDB@Prc^<#jn)u~MM~esO?|B{H*zt))(@j6l?dZ?Wm%ha2uvdJD8Xl9%r}${uR-rw z2$4#_P7#$4q22@2N0|f*tCJaBRYK|WqUVb=6f^?sx4=Ugp<7!0o}voWDF);c%!g3v zs!khXgN{LC5F2qkP<_1Af%gg>&=f&*544^bEeyh5d9`zm$b5+i?l2xH*gO=COY{;J z0oC?9AT4#E;C|kmyj9%+&6JLk&oo(@bic1U|eM_VXEl3!q6n3xJ0c zWp^7CDa*a&8NwmaAl+T0AkLy|kR83Qwi0S6Uocvp`4faUGF*lrMUbpaaH59`>EnUk z?3kz@@f2o0U{K)_mTLFioX?B4F41THkheeioO|6v)MORF`ayyUH18lm$qJ+(WzwhB zzuIYtE7g@P=RH_t+^o`Bt;9fq^OV3_Ns9a>Q+aRly-qD-arj*2tUEh2+W||bM}QqC zcxi7H=Rj*CS44_wek{`rwmJ{Upb@yg$sM1Y3cZj<7lGl|hnY4yp4*In3@B$yY~e7z!+7sS3P5B!k2Wbok3XPU!XwB;^z4rB zGT6nHqPnB(8&bKmpb2_QN$i0#O=KHY75BwcP(Hek4w^(42@*&I4zrr+BZ)x*t&d8| zl3DIc!Yx=s2tXT(;JJh^V=SU&oe=i>vMJSzAWf+JULE^XR-Uf)cnF};eWmaO!zh&< zu)wE}+4Pxiw4$665_Pxbm7)PguiN)Zm9$&Qc}#{t4xl zD1+jmVk(I-vl7qUU1Nb{oTTe9p5wR}-S9$uA&o0H%|*!dX4xZ)_y~%mUb3)(_I7{; zF~jsuND+BZP>^^TNe8i6lObeby)nK~w$}J><~WBBY`~m%k7*JgkR&O5+f_*Yf=4OD0`0vRI?6 z+WFIjk}#%Z$yW(3qqvUamLk8!OgxdcxYd3mtM?ako-Y2o+e6JW>TQD>A)oiy&VZLs z%nR+2C)!dzKq)Q0RF*+fT0Tn4P`MxajH%YYwE8+{$^fX!1$u=-25dvagBSMwosJFr0@2`@#< zS|FdMg_6v5DXxK-lj=j zmsH(dIyqOVQ@v9~&5h^jkZel0rpCwKSua?FCg7XD-L9MXq`N^H*Zr=&YB;e)02{d7 z*6)`{gEIcPTcFKs_RCID02XNgVv*{s_QrRuIZMVt3gF}VvuCEfX^BxZm31?<3IG11 zu0aZl#_R5t^76a%l1D^bXG2irF>2*Oo*4<^XEc%v0Ogpx(=)J|HY(h& z9sy*9!GWt?cJyCH0`Mx;Wi+sIhBuJppsJv79RB=ESmC^<=5z)x3i40jfF?!khPC#? zXVGSs9Pq=I*(Y{)D?s4-#qkLb8~Psgxd4FiPf7a`#Ba)>doMCfGTt}6!%^aHk5|V) zUa$0lDP?$6$e^gm1mbqxn{JqdYNN(|qnFQT=F#?bm<=UvB0B=&y?2RKm4x%0snF8|2ImDg@BRq?@+`4pCM(= ze~JqHLtQa4{wYQLHz0suwGFF7F$CWg^+#bmEx_iF3Oms3uqQKM=^u-+`@&F%^Gl{OGs=XmVr79iZD5%_B`=pVB zS2+u&%utgnORNiSMdkjE(;UbKpf?EBciW&{2RKL(!fd;%`o`!6XxNj3B2O_Lo+GU) z&Ru|ZPu@24F>=+TEbtI7518#@`0*~40kFqLmjW7;XBKvkUYRiWM0jK?0-r=aP~EJ%GSf*RCk31A z_B>E0QeRgrXWD#Q$%~THh$Xl~Ci`I9S*AAxp+k#zDDx@kHz=GYqP+b_#8;q_iDH8g zw`f%{vu5w;-mMK5`jV~iKJbg=lh#G#lAS-sVIO8YZLK@&YEuU z79Mi|SsBWnICTcN{7peh+@j}tR%`zoKHxLXy=lniKmimR|2f5X=?2%!h(XJj=bV=Z~zht)aH-LOpo9c6wv znQoE@(Wa>arsai29;7s^O3^@b@I?8se6xhiy!v0s6tKmb)0hp{OM?!Fg!2fm%$oN; zJB+f&h+(S)E`#`PQ+_-mdyp`D0I_a^(2&y`WoV*ARvB(nQ^LK5_msF3N%~C5vlNe~ zgyo6~!#JoadCy*FUY#f_DPtZ^O(PNHR6apT5;N-jTKN z31;+xQ18iRPQ%l#Vo8Og$)I?cGFgrm>rc6u-5@b>vM=ckb49ejHkv}5){BXn#WFY* z-w2X#p*$+D@l_ftI1VFhn z^(tc*Hhf!kEI)c~ok6x7*ezUc{Pfz8`QqOSRKO*cXzaqN-It-B&*t9h>doa3{l*5s zSYd$s7gOP%VnY8DQ{k^AM)v=$#)9)NaO^*u3V#o7{gxcXB8V$r>ST&^ZM&9a+f#SU+C0F zuYU*a=_%u2Qp-t+Sn__ld(o&y7i+dsOdm_7nkr&Z{SZwZx! z0%Dct1WD=KvtTcnE=svMO7f1WRO3$qEX<(q zoHI+i^O@Y>p)KmDM~Q4|YG=-z)Q*LO_J%hE06zw!yye>5MjI=&Z6%X*Zf?~1fWp&TbzfG?2TqyEIvvcWf2?*=uSa_*<}8>6Ge z6LD|<=fp=j68{k@N5BJRYypJ;q}RAv#)}eU%Go)c|7XyjBvA+n2RNh}XH}p2sQ+`i zaW&hMl-oMN3UCC0=p!1j z#GECtPHl&gFI-w2x6m5Rm}9_8m$eaPCj_8hL0XM0E^%gx`r&+aL%Hp!Nph9BYg>;K zwdtaliUxb-Cxh5(G7UKt_i8@#GCJL5zpyWbokUN40k7*FwWclkf_hT2Ir{2h>~I2| zp}&NvmtZvHGp;Sy9tK#Isq?hI6%Byeg5q-L{K6@vG1^tf%(z*(5d#6GA73dU20$j` zBu0Sg*T1mFaYb$}VVSP)0@}+0HQ}{ex|;wau|5h;4Z*$9W8EK?ME!-!NrS)qVkK9a z_1;yJk1Gv7(|)uA{;$$<)*llyiCwg> zL8AoFlDT;MNm|bP#{jS&9R<9Rdi50`b>KuSn6_Qnvf_(4mTV^Sl;h6k88cgo@GVLU zrfFz3^gI{na($4mHw1EDAxf+*8scWH{3Ze#B1rVK)fuY{RR@MVqi@o>3*5}>rXvy= zUxZV)`xtSee)3=W-1Xh?sFhd0YYB;Sr}1dTgusbCd2d5Y_jgWbZd)}P5o(ctid@D; zCj1z~&3hyclq>u(z)8O%9ZFHAD`W(Nj(-Y3X;IJGu&l z88bhB;A}&}pC*_PPYoMj_{C#@PC+2h#hCkAv_=_dTBUKyGBx|~9r`E1@~5-Ay4<@& z&!*PxQ*m(Zq@aCJO;74<(ilg^ToXyZDT$}sVuJx#cq0VS!+k5cfOnn8n>V>fF9BW& zd9p4>Gsaf|zpfvH!AXc-d&)&sm<{W(I4CvhEjFVadlqr`41FSrxubXBjXRW@NLYB1 zJA{2Wd7#SjJQof(md->LLIISJJa_GxZpRq4E3#}xEQif6_gj-+X?2z#yq zZyQX8^jK}0<#i(L;we(BFfUVy$HD)VJTn+2e{0F zMx1sxD?FWc!JA9;N!QDjH;HUtxpC+h(o8;>xgX&!Jn(L%>ViXamH1QbD7abgXd$5c zGLgPR!|nMCpx3IW@n;CwcR_hv&-av=j`zWh4?X*g)6=h)OD-KR?V2EhqKp_jGCrg# zIz8U4*dIi$qLrB?P_3uwB^*!PZNX^-W)pA2MkSU3?of4*_z4X3uw8*rn5IH7)MMRV zR@fm^cSdn@E=7Ldw+=JG_yumSuT6uMdkWsr$pmV@Pm(|~*`Zzdey0L{?U8cp9bT!shcvI*v6r#S<`S zcqGa@qp+T6YW2Tbzb##)af3slX~_#WJ+dE%c{Ru}u^29wlS%Pz^Do4_KbO}Lbr|&6 z8y$ z-7rfG?8Ocv<8_Q5xQ)V1bn3as%X*IuEQaS3127_Dg?Whj^qigB9{xpse9iWWH)xri z1o#+xKGsR}8=i}tTuZ*l0Zl`LvztcaOjluQu)ngC0DGR~>qpx!73M{+Do_Pn0z#_n zglNJsFz8r2=`4tH(gSNjiD=fjXORsd(VSE1Qw3U7SdnPo_(HN;J7lJ2W-#*Q88!z` z>9?eN11QStWri?V8OzOgkw$$|T%1IOJrEgVoyY@@#efg5TN##?J^$dsP`y2mQc(?YD=ejSZ<(HJ#fQpFUea)BT&wNneH*zSm{9 zU1>u=%eOHDWG2EIaM@ZIDe${Aq&G`f@XRKJ&z>ch*~3OCd9)7gme_JIn)U!W0^;mE zFT`eRdu-SJz&O8j3EChves}l@n`W=T$i-i#?Y%m=Tf1$qTx`7uh#FLLlYOdWXF`>r z01EKE*=hyTPPxscGPrI3Dlu$9YRe&>Nmf34Y^)j zKR;?(NNa|x4kDkA%~aA?0kb{$Pw|5n`p4uD8|=ABizMHZt7}7zdB~*`u?3inoYib( z0*1F?_}fR84(<9O?X6ZGGwRr8fjNJf{IKW7x>}^LXghaxWkZSw_B_{$_Bz)}?zhOZ zVZ~UPz65Z?aoyv+?-Flj7T3ExrgB>t4q20m*D;yWA%4h1AVU|ju#o>LC>}E0>ViAI z1M^`C!?{CE|H{*|EMVf40#Zh6oy1q02mgayv|i+Ylz)s2e~b70<+T0l8pD5}6-uVg z_O4FGrp|x%ef+igM{U<3Ce?e;1Gc6$SYBbN{;T|B?j!-(w8^{kQzL8~>B; zWBqrW!7BANyA27nKhEGHi5~?VLFk8(3y@5rC2@=g7Y4O_ECB-&ZKH4!mZFmp$a}=w ze!DiR zf7-v-R}DTB1Q{NCVdjk(W)IE}A3j||%y2;IXiV+_tl|?^;Pr=wpY^$${qU#uaTCSp z6wED|_>_wy|0Ho66an?Se1{x&TA)xL{C!IL2)PTia{ACva|hSP^Wq3!Rc!T~+6+)< zIOS|S6SwG62cfTLunBE_xLx{HY9@Ex*+VzZYM_M`JSw!-L@FIiH2b3AB;>BoZo5_E zs0}b7TF$keGFMY!B4~*aty;Vkd*{uNY>Y2o4q!xm?<2N7#5ggI#CEZR;f|Qj60@nW z6Nn{uOvXimtpABse`$%)?!dZ50(A_Q&tfWhg782H1tq9=CG!OV)b1vvt8sdZ$M53Y z4f0?Jjt78J(U5du)B_htOJI8j+=vuh0{F4{Ak>ky>u=ie+5Qf2&w~I9wkO)ly&%%N zccV-`Iur~jl1Rrj#xpjm5CjE~s8^xiGbZEtV#UEAZWraHU(s>yM)8D7c|jSE`{8JS z0I95vSfvDqtFPWl!nABktQ_(Vv>#j7bhK96&_BM~&27G8G|a z-xvGU!k9&GS$Leki3Tfs;-y=GjO^SR575wQqvo;^P)x<_OHln&%oa{aN?M5%&K9-f z5}s0l>b4Hl5PagvZl+gz@>_(Rhp96!;kVN(D_xP3bG{ac>)MkLeUpPIS`7^@88MdR zc)BuEp!N3cDGHU06@5}OG}cgWU_tfJTz)#GTGQd zR$M>Y%a&~Gt0QMz%iCoAT_Mt9YAKpJq+ojpfeF>wvnn*HB|dR@dn{YIzqM{Emu7Q= zk%564j%mbIpU#t4OinQ4`1a?yzx|?iqB4+0bO;^o?zU+L22c*}SV1>G}dzniDAS?AwuyL^i@U+flvsjUAz=kR@*v08!UO%ELU6E|Ei|43YMuX z)s=Jx#?(>XH!i8=5YdKHRKCcc-`g|13ZKG*>&FI9atSca1ED!>;DnDAkfya5ZQc3s zsA*#mj24bhvY{D*h}BAwrgDqt&BsDf^B>EubQ?@CQ7f!wuZhyOmM!2S4s*|-UU7-T zp?y>06qcN|JHr7Pj`6vC1JTv9J->G`ai3ke_io$yIE6_J?ux+z=`q|Ltx-xVGIS>v z&Bm$UrD`N73<@-;*dLf5n?(I+ddcU))ZxKrmD#9I0wwP_{hoSy>*5om|nQVoRs-t(R$xJ z*m2~1Ykbgq*gfFTQNxO3=-|~aiUJoBOrIc^=K%QSYV0urY!HXh2meq)bOFc_+JoS) z^3q(Ganw8&sYDC90Q4RznA0iOd_Kt+s(;%QoPP?Mpm}&gZuZ9E-pj3VKh}vro*wR= zn78}h>j>#FF0YJ=6HmKI{i}uLm>+KTAW;&_#O$NYU!>PeUUt3Giw$TAHA9yJ{= z5ogYYfHi8Pu%aE>c^I&Q_AN(SO1E0fF3y`Q>5J08=sgF(i*_NzHjc7I2Bu5HStm#< zODvanf`9hiqqQU@OYCbtNhbUR{wpiQZVfPE!sRENjNHg*rJ3atfTY_@eJ{t*p!lj4 z|KyGHgR63NEo@hwZ2@{3>%OT+$Jpe=tM9J92~Xm{<+!3tSO#stcae}x2D}eM8<1L6 zxzDSY=H+3~Sg5DI&(Bx+@gEtlGl>G@J$^zaiUVe)h{f|+j-~--c5OY>x7XP$oMWVB z!B!D&R%>idsR+`pjiVFR(jWZ>4<0xj#?BHo6!MZFP|BU%#rDRjx3}0(mQ7;_4^sn~ zYncx|X0nAZJP5*S(7Cjp`jvds&6rP znIdC#%;n5jK8K^fxcPpkic7F(*qDj1ITAa{vb?f(%LX_?=SM@_fb!>qQP1rr2?KJH z1A;3x$zSG?a`BIy;KvhDq@5o|gX8bq-DvyC2HR*^=^>Y|ilq))V5XCFnNbgg>!Zsb zL&en#I`QP=gpKCE(;e2MUe7Lo$}EHwU|!{MlWCJM04?8G_Sepe8}D6@J|OPS6=_WRu~LyomnIPJ2O5-u8vcU<|mRHejAu0A$4iw!|aH z>Ae!;QYm3Iq;-|0>N$+`E=TCyrjUJKLFuVrgK(()&O%9a9li?qTfQmGln737Q9ktK zwd`t$&+IZzh11{qCuX`5u{p`L+*LBUZ6z+3r-017I0?eTSolr|pS$cN8X;-@6wNM~ zY7mb}N$$7OB|nuXl9VLm6R9eD1qVkjiIJHncS~yax@i8CaNNaS_!>O9M!u&pQc7om zxXy1o9So$9>TJEz!LZ`{C~5!9~?7L`SQbWMFjaT`YJ}=nft@yk9p#ZSr+! z2mY@F-;2c}fpM@>FO^bcEgS*O23Q6^x5{Dh;}06Tl+Avp0NZE8miVM_HnuR!`lr?2 zI7>ah=k;d1!wZKIP}(+u8ph`-#SK8W^wJeBz+Q!1K_ zq2P@zJyGU{M4g0X5aq+-FoPLYPZN`#K3L0S#@ruY#u!2T{;bsju#iD03{1p)GRH_t z95uMTe^?t2v}XkA%s41G?Qu<Qiya!f>`{&Gf0`PhbK!aC3bzogD9ye)`8NZA!{!5LN&iK`#F z`N};A8X?P=kqAqgB#t8Kn2S)KNF&IUf;=EZ8zBW%B-Dvq`1t#xOUn&8G+?(|AF&*r zt{O?3RMtM_6fr{B4u=SY&Q}3FM9D8NXHAwNMyq_&$eh`2xs%zLg%y}RV*Y3JN1PL2 zi8@7(!*tC^6q~nFAxU$M@?#ebo%|Iqie94jnQwkFq0~4k9r0v z-Z$B>B`3xH-F7|8CYg=hR;Bt)TrjTp{JiI3J-6J86BnaayAI#+`fbH~va~!X0B54N zP2jT``UMYdPd@*_dP8C)-Cn}gonx?Rl4r5CYsD2eshJ5t8g>52C|J*qKmc^mj1h}J z1M96rP@tIo?<2Jh{ME47y-e5`EB;(q}QlKB&q6rEWsaJNGBuM@e79&^5vU z8d#0@XdQ=Usn*4LrBJdrTYE}2LFG1_4`+<@2{@<|6BF_ z#fSW7s_*X)g#R_w_uru_|4XWm`40~Acl}8(_PZe6Q?+hNN*F2FIdu_WGZ@7P=i;GtfC0W>LU%{NU~4 zk$mUW8BX`cVSQ#eaOH^U9nDsDYnPVRzMNlv9iQZsy&fl3yq-fO{_W(DIZg6p9rxcai;YfdwcZ21bdg6yX*k)cLfoKno&y8v>Dq zcM}-u>0uq!eUsWJ*v~>xjJ#QFq?+X4@(^HcN*cbXH90`7KXk9c6Rp;;jZc|5GGEtc zee7{xZt@6>YCZ?u%`XP8$Q)UfIK&byzGONNG{Aq&!m%b`s?bU8MN%18j6fAFrc0xG zx+8sjAuOLVA@B9U51m7&W=N)v<@mX`)}!zF2F`kljFmtzsRZ93vVQouMT|pwLG2LA?wf` zm!2@QqTG5mG;lRo1w7!RGYiLxZ70q|@F%X^HfB{}Py<)KS5n^(qqcVtjn7R3X$Gr# zflX95a<^fQbb}t{_ak^%qJfPG!s0Sp+2KwSwim}|GmnL6umnl&p9o*#Fl9z8sxT;# zN#eFgWf|XFUI?ON-*cO0AlSR~$xaG|*7)hmc${HCTKuG8C`-Q=fw^ooCgfXEEqHHD zVP?RmX_Eb&Vv%Ch=cw4<-H9=6a6PqeJ_Q`1uf)p&RrMCp1al`c9bdAujXH+LW4-0S zXbado?Gw*S2(o3Z^Mmsd71x|%M=Uug#go|Nmtnf8pMjR?Qahyceon5t%X~W^sn-%% z@^~K!{6zhstS0w;?KyMQ`rz;Q$(HrZRq2OSfPwi}#TE{3D72)4w?z}X=n0IjmD5ZF z;&>^Pf)uvhL2q_4KR1_2JyZC#a|8>RG0#`t}9UXh4hFLuZA>; z%34`7rqkvJHozyKV_=at7R6ogh3K_A+H4|X=~1r|)rD=IFt(wRZ3ml!Oi)sb+RPYjMW9J@($9#Fi8H`^;fBhpATn0;tJ<(0sK zH=KNkq=)$_AOxG}Al^8|Y0iP)5DOk|{Yxiv470Z&p5j$8Y^Q$V2p7~qg$%d-w5R7o z;$pc|44<7qrB@UHoz{z}rZJL^H~Kv!7=fAf(&vYcJ6i}uxJN>cnCYVs3@>e(Xkv&D zz2``y!g;KHcig=NKd^Fs7L=XZ=*4Q-%g6gXf%2#gjUBN?3q&s)bV2jEqkxz7)`xHb z@MNGec#?Z37N4^xys%*u?B4OF5{&zA;#zh?@YnE6OZ!kTB;Wf*EfW?Ku9XF`{y*?52gNO@>?lfV6H{YWqL()OYm-yaw z#8**k^%u4&0jp6L@e@oej`@D3{Br{>dNd?GHIktoG^EtWtz9cf(RpvfbG0#jGN$Nv zL``=544LH#7~&(t#I7Q0na!|s9O$cmc({)h^Ag{i^9{-Kn`=7dNt?-?QE0b|EJZm^ z2e_W?m3x^S&t+5%Wx-)jMEoSa2iK~iz$lmUKBtXo zAqZfWA&$~&HQOI@`Be|9!LM_HJVlW&HaP+Fq&*(vBRqwdxl))#XDrTzQ#iKp$){O9 zag#;)RHBel6xEN-{f(nQ!yH z_aImOxb$viK5mApwY8H>Sx4bI@AwzLVh;;GeL9EvRtptGM{})08wsX8r1Bq6rF)jD z?h)!~dO8S<>Aue4>`556+YSwKJ|B}b^v{xk&Aj&A6E%C?La-_g$=gTAR_K0~&UjvQWpkod=9;q>#O2kkHmUOzD%HnmCIlOS$uA34U8GkA z$3q2Po!pTqr*Pa8lcuFcnea!7MYrefaH2@r8!E6#Z1ZU4gwKkOwKY4aTMTLnpqU)y7}05{a%9f|Yb#qi1m=g6;=_M`Ib; z=l1Q;{p)u0o6eB`!TP5%e3gpZ_RR&{94XLmTj=p7JGQ4GDCO z6N>4&ImBciis4~3mI*k+=_K-t?m#!M1@R!KQ%!`Mefe>Mp<)o8WU2z^VPL-}8%*_1 zQO>7XsutI!#buV7_mMQ^L0DX+(D%|O7(w`%e!0@N*^wl?=SH0cnSZPfvnt9KGc|;D zt#2R{=Aw^BCeZQn*;KqGkYp4%=(lg>xk@Vuxuctb-eqcss_K@m>vTC?(f}@cyEUN^ zI)__*PsL!h&Aem(&fbmWW?7kalY#R_H@ugKq8Xo;2Ukj2aDYJBwuS!_sysUIh zBoZumblSq3o1x#JxU+5&p)Dkj*q~V0QGvm(3+sJiW|mL`-QkE?Qqnt>M)LHPk-moG zA$8bdk@ous_glu}N6cri6|bPV#H43WUm13x0+U+16x5tw&^GE_ z-Y<`GC&QRg;BlzNw1O5a-Z9=EBn6{1qxpqJVcA=x5(-$*eu|D%_48dOi$?46tAyq( zPZ*>SsU__4@frk}6s-a<=p&B6JY6%n@5$349SCa=2;QXtbdqk9FKbl|7HApt${>kS zd4aXtQzIN!^8vkJQvuN3tK47)sb7<0SxVPCIBc3D8nPiRqll&W-qFLX=V9A==kf}% z2Dam&N6z`LwE>mtUCk62%9wMQv}USl$sT#T>F0Us`W-yx5>UwF_dNv2Sir$r)mLiG z{-MrNh<-^y&t$&~8Gzv8OlvzLF{?y*v|k1qhyE|>-aH=5u5BAukujO2WQxqgIhtoO z4;ey2nMr2K&_I-VjtVLBtU@S4B!$ROq>zNn5=zv2obLCz@3Zqbj??u#-}Cln||}dX;$yY&NE|Ph#5=EV?(uqHQpa`Nj=C)_0j9M zRL4yFfhqppaYJWg$&F4YnTAOow!LO{PF8MfpRdJK?5l}8Qrw+$`SH@TSfQ_0A58%~+>!h_k*fX4?ZlI^ppj;ipC>Z}ZXMlqV5?v=KCnq@x7W=1^9SaZ zYX(w_`A-C0lWDZ>l-x>{kVRZRbE58LmT2U-$khqguM(D2w5OL(F%Q3g9g-8Ka<%rJ zq|LV(vpmIWC7!r@qgM{q?V~Jxn4jc)hSsF1(y3%HXLLpDBl?nl?Y`XU!So zuj?$|>U1<8CmW!%Infl}`Zysi*hN%O>&Q%|<*BKD@f-WXl*9K=rmWagzDHb0sDAD7 zgw8hp?Nzgi_CGdNO4=^sEOYE68GB=vzBHGvrK@*nOkAYt{xw#vGV}5$kB;x&BU+~( zQ!TdvZ=kLlJDGR-3!jyl@SV=>a9>_R9ul`=_3qvOHf`C~GvsK;{cm$VM_^l%dV%bK%XOjO8h<3J^S<1WvLf#oh{(I!5ln`oVt>$(&ljI@$*-a zrv&LROmcj7VTFPuH*7}855EriwXjD)I_j4h>Ze;#CR@El_$Xfo@|BX6ke*9%eXF`A zS8$5DyZRJM+fhlXCw(uSjusv;NtN)YQ9f}-;^}_h8w<-}*)x_=mru)LhNc^5MS15q zuHGVXv!U>P+!}F^s!aa9F8O`d#8V4PYqAEm(Y^I!XLGxH+S2dkTU#~Iu6X>BJw5WK zp)KsB;Sa$hRMzuf&i}eRxY{{3SV*cKR%ydwd4n;tT=CJj*Rvm$H)a;H)-o_*Bab;dydPv*J9>oAE~>fr_tcItD|L#{xWx;T;AG6G5`DfQ5JU2%en{hT;nc#z3-pX z9?0X_&&V84DOA4Lnv-tomH2l2fr5d&qGqn*;-#M+Bv(eadZ3gSyo)0L9p&X;RnvIa za1avzuEE|xc|rZ_1<(JP^72=C`2X*e7rcV;|2lg|W3d0|&4Vg4%lP3oX55O$dhmGYKG)i4Y6!7R$umP^kJK^J@iuNL6fWF>(08% zQ|y;&GUR_X-)a7M#&4Cj}S)lTv2#SQC~T+7x!SJ0aBE8WfU*{465HYkdRe)v_9_4$Hf zv0UGiJjI&9>K6}}M@28+-trR+tgkhbU$EJfqwX>=zbJ8r&SJ$myQd!MF%s^Giq<{n z7{D}fOY0$*<;^V-y=Pn&(~oZDc`5hHy?i+)Z$-;kbMD8Zyo;JIeyv!4t#xM|6btp_ zu5d|b;riJ9)NQZZz90D6GT-+7NhyBH?+srv9?apsAGpV=(opPWS63O?XtD5_ z6w9D89%J(5QC4H=*y$S&+RJ!jT35v4BfEJRD}O5oq&4LRFtmOXhR9?Sm71-O@mZd?-P`7MXk#S1HJ6lSi9@F_^Um%Hi|G?dG zTl(uuiRC}I)ppBtlP4j6A3qCSMWG zV%KVXCY&A^jQsfagV|P*buuGW?CeC(GwG_H_X*6N%!0_qx2?bnrUs|3%#tZR<-fxs z>-{1wC5mG9t!(>a{&@b4NnWSbW0y*r+XIAuO=^v<7qYVrDakgRYH1^Dbuc+I*1uAj zD43@m__#;ePwpX|spe(1`%md<9fOL5lMQR$Dj0Q+Cu9}1m(k6VKMkx$R=<^1K{X29 zQ1!ZJdA?aNF(>FIvv%sS(p>?~Ki#azCf&LG1<6^?8$N5OKvO1#soJ^cOyRQ{KqvA-}M`lnsHpC@ODOY%GBr-StOL0*GnJ-P*8?!d6 z{qaGj4%pSVH7DXqp1S_*)#MgcD=*Kw=}Gd~C~Dkt@psCZw@ z`PUp9E(ZK~TYrqfa*?m9W8!<RN$o%D43u?w1#F1{RxZ-Zk!$zj&4E*a&-P`>(@88G(VWYok2 zPPU!H<{D!Q`e28Bm%d$a!v^kWa8)-ZZ?W%Z}~lV%5n(zSfqm&IwGIiAcf zhkB`+kAWZ#cW>ZXec1)YegxT?@`@DIykM(De-@e;BO1&BooO5iF$11^b<)O=C zKdno9lZ>z1e;jajE?Z++qcYT9GcsPG+!y=xgzg{1GL75o*@Bxr)Q?AxDQ2M~)sr#z zI6kp=JXZfa9$Pe~dL&HrQ|LvFBBc=Dx9@9)6nW->Po|X*${CVTvK}9wVls>w%#_7$n zx|Cx1n>K0$J!bRMv`@>YH=h`LFbgsncUPX1GDN+ei5y$2E&0T0&-l1jnyP3f^Yih^ zzTo9wwU%jRSBl*!Pmo7R{Ak+G_+c$m#C2hIxPa388oFRWprN|=@IJQJ3qPpBJ~0K9 zBwoKz%fx-8G!gS=(b8O{DpI*8ecU-g_)uw9GQSAs&UBfFg7;JV zi34g&$swL&ROUhbn%_d6sF|`xc~Fo%592C4OJ6#yq(30i?>cEz#Y-A^gggD1N-1Uf zTUBQDW@G#Hfv2C1pX$=}KCm-BF8rJN`_y_W&%>qm6hGq|&%QI>F}~cj%Ds}XXGNn* zYPoo#W?y1laMR3INfN)+SQ~|Kp`W~L`q3VfsfesCa}~A+VuCeCb^Oe+vLPs!lsd=q zvwWs*bj>b0cl5di024qT_SlE?wiVje=iw6x%h`s17cdma@hN9}lcNWc2!+ zrGH3uek8g7yVOeHbY+1DqQgSnhQCbO^ur^&1*5P1c@(AVOo0#f z{y@inX>vI?F{LS{E2pNsmdk$|u_A-D5me10_xpNkvm+{>qB>R7$*g~lYPqB~?>!eg z1CNxQaGVS2R=c?S=%;rdxu0J6PHBuGZ|h z_ryCzwvDC-9Fx-H`LfnWtY_`wyAAJUyqYu@Y#6@u>Dz4DWVy{i{ro4ng=1tFNqt36 zhUgVimyYGS^BwV>)Qi0SmLbo-STp?Xv~snK7vdR>eqT!{KULD^wV{<7bA9(o*&vp! z-;*zYJ)GJjwbbBT?K-l!TKIv)**kMn$%NnblIutb`9{I{(uHr`y}1R)Be{>fX5v5mz5LVy{MK`9}|DBciQ*JF4^nRMXe#CYvy9e9nlWPESa=vhodbXQfRZ z4(nl$Sor$ATM4d}DIiO;V|SD@Re#VQBr`&Jb|p9avEAZe>3N4=p8jlp59Lvp#69R< zJS-SQ9idXlUKsJ8Kk?f1!_|7}hYScOlxf3HtuNa83F-~T&x29)6>>MSDqE$s@3$FC zQD0Fe?_&8TqY#r~T@kM*Z87MdJ`=V2$GPGmM?oUj=IaV)N{(?WzHf(vq;t=RbDQ6E zIQEM)Dns`=B0*}9HfJ+->$&BNcZX#qQ1x><;|eni#T&(P4;5arKArg%V*L#L?mEe@ z!sJuO2M%&PVfSWx;2Cg#5X*CiByN5r-slLse5CBG78!^6Liv7c**UbBeBz}oiRv!B zzDwh@qUrC#hCMDE$Tb?RQbp`{VP^{vkXPtC)7x&Mq8-t0+Z(x`>7>{1uPyiPN3dR) zwA9QnIvzYVce&NtKYDuW;_*@JM*ho;*9?y86ZEd-SxToWzSG}Cm0E_>>71`@eqbQ7 zSK*<01XFscQyHJAPrNqOnT4-oG8?ylO1n*9DP1QG@12i)?0i%2R>x6nhn(%u(TztZ zDH?q@wNJgeYhSf`GU?S+|AS`FZ%=v$PMW!{9SA;tHkEwE!DINUOG^B*>5InNpJUJK zPUgf$TbevGk3QM&W)cwpKsRHfVpO22(S-Fkho{dkGS5F(WJcz?-bOv4Zc}dH(AgBA zl{Ae#T*;AHb+w1#on2S`pm>c_uNno7hokoQg5MT-rb#t(srI`i6U!D0M`T`|xK{Or zugmAw0Uha_{a;L4iqCoRIOM3++VXdP7-n!6GU@SRvvEGWCsZJ+6$ z4f$8|28Vx=1g!-f&Ufsi46-ngo;#)}eBpg{*Qb~! z?@yhLaNOH-GSou%!AV|C(MfK<9I^g@fr}4xXOt?RZED|4QmuNcT#>I_G2zbaUE}(a z!N-nX zck^}h6mzQChQ>v`-t8H`6M=wT0c|n-9fd`Y^qz0-kCH3te0cw zs*A0}=VIk@R}JekURYOa^Vyl5Dj;_~D=MJXsMu7fU1xNbHrr^c;&C(LbRGM9>xz`Z zYZeVcCFz?P2khR|+hztX9(_f#Hskl9^5XncPq%uHpSQ#JE0~Qa*9<;pQ*cN`{q4; z)c2{}9C#Mv_~fAOBbC$l$CY|+lKH>Nx;V}|YB){BQT)B##FlC3#AnqT)n*}C9`{tL zH+X|+j^t+On{p?bUtSnm)gY_U{=Um!g0q@N#*c;S)&sU1LtimT`KjxF_Ohj+rk!Ny z=Dyq+eDpBx)gh5f6`93;M_w4dx*&P=hEHRWmulU-mrU^*%{rX}@VQvi?tx}8y4(3` zPke-b+D#!zl2NCa9HyjZ*rjW{0#R!vVxsPc!+Ps`27GQA(C}K4Fx44Ff03?pJj$3V zjWWzj^!a2a70&zPs&h;7@=FY#Rf%Vq(4PfU{-+f;9I2=J<8GdM{p)Q8d0!INddAuL z3?NZW`g63F#(dUeqinN#B(D5IOLUHW@5?mTFY?yYjMw$!s7KeLj<8Ryzv9@H|7DN`Hs0MvNiUmE?Ojo0Po?vuxp1Xg;Aq|>z6C9dgWa_$cR7znE;bhmweqQl*@`{N zvY>zX!roDPLCx)DO&^N=Q+$YWd)^*Wvxit3!=#rsAIw%>4tY#=eLEsXDi>PHbv;n} z7gD=A713oGT`B(Sq}It7z8|Y57bin+^{y(02A8(Rj-$!Q8f}s;uA~G@{Fpy6OgyOcL7FXim~)zzz0X&Sr;_nG4dx^IePpFFN z9T@CoNN3Pe6dEuxA8Kf%K$UlHE=t1;OoDK=v=QTEfDu16maWa@qPPIX)f?PpsZurmrj{^@jK?vmq!v%ACB8gl!x zQyb@R8{JyDs61j{YWb!_gfb;Hng7YzJgL*;%*SM{bZ>0Vu*nY~-+D3Bcy)j5v0CZR zI3>YP#dhB2$8g!g%;fFt)baFLRbjH}kGZ=`9E)m%4>)A@`%$l8fmfc(j;DuYPX(8$Xn!grZ&h-)lWdX#lzQ6Y-@V`+KL;ADP}kaojM-FpE2CUJ@rtXe&I;U#%Z~NY-ms zQa3Z<#W~a})eu%9SvPWw_R`r9_neBp^<>5T((zfJ;^mcou4+tk&Ur>}&Om)xPCXB9k<^wX$F>{y3|k}>_72LST*@=LV!9&J z1cQs#E+bAJIOMrI;*FEva5fE{^~u~_y7%%u%&D!o#Sj*@4N>a}2OK5C*gTmgP1mjT za(Y$9T%)d{)Iasw<-5I)kq+9w!Qt~-YptvJ?31&+K6f+3`{_-&it`!4wOPGMG$%Hx`NMBc5`@b zToG72reQPvb>F+Ti=1Z=nF619wZ3pYZrA?a)&7(9p!S0aZIL3n{kf_y>*R*z-`}Y3 zFfWWM?K|shoiBMjBhk`Y`#dBI4{rh4YGKONZhCrNz-auuOnW4IpE+j$EkTc?**Tu$L_52?W2|Gk~btY>{e>Wsq^po z2sW_YCyU;bzKF3qSI z%kOJPJ@7Vs728v^y7%7d9%Fyq0M5uWcipH=cahOr(b9AXltg;_J=+(bUtfE5wV)iMFFi3uu(!cDr^|70L9@A!S zKf~)!dhc)Bn=!S^Y}ePOw};=oLJRFac0*^KDn@AtQFGBqp8sal06$X={(=1wnj(?J(_~hZjqh5-*1j`@igjrFvc)Kz+I{+x$IQ#!>p6WjUybcvAH9FP z3AO0ZAB#$7Z#WdV@p(Yhk&Wh$)0g27_srikA3dF`XgIFvZ#|~J8Z*}$>kyv9`T3%C zANnfqv)>eo60IWSOrIl|Gaqco_y=!6?*IOFV{Kz?v4XO6qmpP z`QALGeZS^WdxyNvHTy-)g@k?b-?PW7T){M*ibm_f)QzfTddEY%-b^}MM4mx?v!GWx z^jV}77<9pdn4Bh~)AJPV-@QLBcji78`|hBcH4vIy_Jq{Q7Wg*gD&M@8_*==%ar)>{ z7sskeozCv5M%CDqxO!Pb>W@mzhO6l^Jcb%nMIm>7yc50#e4j&k#k)MmWTS`Gin3%O zsz?}7%#kaGwH+~vth#f3^;=TKyXxAj&&$~YQ3sa!cwX?2Ie%ZiBWw1|!A%yL4t!nJk?8{VRS+%7T9}y#EzbklZ)hf}CaKAI!wnOutxqlgic73~ za=^H2ChG_7a@A9T04?>VKc_pH?20akFyB6XO6ZOb-xnjTIumcf8zv|^1_S+Cmx;g? zVMXctb;B;yy+7(A!eZAeC~8VKj`yGORsYaB8TcZOU1+8&V$$8Yn5WKJe5ZWp zUpIsQXZg-wmFoYG4v zKxP-Kc|3)Qbl2pe2F*p?wmW^N_q$r2JIkZ9Pix_h!WTvRb@C}wX1>dFbCY!F9+sE3 z56o9*OqW*!m%lX}KUucODkhW>wfutf)^UsCoYunCGt1w)#`+z{2F&-R`!9cM(JuLT zKO#p8JNWdIcv|sl-}u?PDw*m>4LLcB{h1%P@2&ZSjO|sr$fi=3BUiZLX`yDZlqJ#f zeay@uJx#PFuene@Rqsc~M#1wZ)q~59U*^pleg@E&WG$`gJx|2WXni&RSX|NA>iVRo z_(Zg>#x3Kw>SAWs)m=ZgPhJ`n>CL@6(CO&=X%FQK|ASbObI$C2-#?+(X6YoQuLUqo zJxssm5|TGCzPj-$s|Z8SLhaVHB^rQntVw*aJNeqI2Ss?QA+L*ll$>$j;q zq)cp;oz-ziYQNuE*=mRo8&6$NOU%B^kQQn^r1{JKjWY9HPIjVkz2=tHa+a-TZuv{L zu`R>?qXRM`zPVSURIipe9IZPcWHub(c=pU&~#s!h`s;c7*a?fHU;!hC_4ed^fx|az7^vP^L*|=P2-0B7@oAp0OUg?|PsiaIwhto5`o-5$2<} z{nDksGm#qY3m5EV*+7zVz89?1SS2yIb+;g9KzY4AC(ttTzR-YF^fk&MK57kbLymsC zyaiQcMRFuZYqL-jM@_L?jhvxd-Rm5l?lTe@-D&>Gk>NE7v_cU^V}7jcOFBwc$4&I^ zzvv;QSGniD#8#lDkCyJtSLrb}G=id+wq_C|UwJO*yVEIr4E4 z|Ei|=m{xv`2yc)5tZU=_WHOIuz5?RQMJ+Z%XJ;EV<2vUA(*8)YXB^1zQx((qba_0Q zU!k2vdS$mRiRKJTWayPvrojiZSuWq6x(f{)moXei#|m+FJQiG(_~lcR`K2MBRizP; z_*nLNSFX>9KJ!oGck!y4vUFAGC5!NQgD(Q>qbolu)ym)d%FWeGrStB&S3~lrQ?Rh$ zv0+X6WW>4DrHrQf#~=NP<*)A(IF)k81+l&h5_z+lyv>ByLG`H#j3-l^l%n7Tb4%qyF>+{b=EyBc(J$JXAw_vx2PE&fLc8s-*_a>^=G- zE?jA5|HVQ|mI3dvK#f-%C1ngDsazynKk4*N7)RKeED5Ul(&x7n_>WzZn* zAG~I$*z-_)ME=CQ!0}6*j#3=WYM+8^6CaMgt>1flGJr4JRP$mJ*B)u?)3%vKoics~ znXFFJlH&Dn<}#oAMT2*`dxei!1(n~mEV`?Gtd$u%IKcY6M*PKNJ9W_~VM5EALfxXW z)W@*_$FkJh6IN_qIt)w=C*1tT_+pFEg2czm3jIpiCR}iZS@roJ0UK4+qr`U(OM9j^ zm1CE0xltaaXnSSG{wi%ic(prc$!~xS;CVzf*IB7QfE4KPo0yjF5Z2r!aA9-+S4#Cz~m6onoy% zS=7@zYTa@+3bgsTR3m5oCS9mtow0e6jGkQ2Y(~IV)p_Vhik0hNhdsZd1`C;0yG|)9 z`^!>AldZr3|0|D`CL3p;|8CqH#>wF0C*XMAfT=un-F4+qh>b55HQoCG+DmkUhnl5N z-};r9)NCDWTy*<-84YLmqiUXr;LriF3#reN$-8bLCy(n(exWw(v=XFqu&tNizi;oO z&;L|q_kplz8ZBKi26^Qpn2B3XyBJ9(itHV$O}^yP7#6R;IALGjayL&rJu~%vhs0ew zTG|{rt@2-g&R(Cr6<^?H){&q|mq({%_50XnR##CHBHbu`kI1uMRS$RnMjyY-o3#^I^58pj% z>m;hu`?z}S*%W0~gwObQjMC+?&EM@}=LRYm!jvxzJh_-Mp?;lYDe#4NO8>XS=aih< z3hIRwi&+J`x}LuN>^NtwCgv)j@i`fvX?hC7b!OwhN$ALPcGyQ%-hc=PR*1p zW-q_L`C9h(m|Qh;`VUh?BeF;|m*lO;3RiKV?90dVT7M_0Jz%lzT^?WhmCDI4Ln7N|KWL+MA+7&2Uz8 zEMB)KL)zx*d85DuKJKr|+UF)o6HONOQeo=T6_JNRjwS7OWILfN+HBF1^0FaSt?yvK zbHv8*b#Y0L=7N4k2A1{B+K0`f24$y<`|5U87Q~$noEMQUV82xKaq#f0Xv(vVv5tLP zjYeXDiDPfgNw!o1X7pdlKMRpX){q>-jEzxTx^S2Kv56JK)XM9;qYm~y_$?Kk<*ADgz4{5r1>5hGJ(OONbb zb0c_vaKGz3Q=F0e$o1$@tbEW=T@QmjiZ7DGDCvChrg#!(#-)$@=u$-aSvyD~j^(`B z+Gwbl`-J>$aqS)D`{AV#Oh5zehA!Dg&c%FRW*Z@Tl_?ns)YqGh`~9OQZinQmQF0E> zi(>qLA5*+I7R5_C*d@;UqW$J-M#a^`d+w8FUMUuNGvvhc=E~`eucAvzn}R*@JeiC3 zylg0tRAwbkxE%-I!VsI*@2_8QBzr4ldaQl-bq8wFl&VG0&O_&Oync!&#KXz zQnGB31@C9IyR_YFg}Ei8TzRh)HQqc+{^wE2^|;a7C+%l8FHGK!_^|s;*O_%r_km*` zrYoO6;+A%#q}aV| z`bWa*&z`pBHrwH+p^+mG*+!kd4p-;WS!cLJy_wUzD1ChT@x=*8&iUex6hoE2b++Pc z#om4PRe!$XdtYwyp7N)$&g6p+PIlFgqLdESNEJV@`t$Y*+L=S0lMSxOzd)a;q#GzT24@06UpB$9W0EndS!V0z!f z82bHt#Dsv%H2&lTU4%K~vROhHExk#Sq5TCbv!X}?hZ^Q^uCU~;VyV0He$_P8E9*B8 zGCQ^Jp}g(CO7~Sl1~Z(R%ep^eWbM(=Yjnl?!@KTYAzRYZyh?uUQUPUAy5-Bdz^8ZP z=v&*~sTcVv2lNl$IA@S5oF&!xfv=XsVqPnpp?;y(lu0u^-5~h%SA|oYewRlV7X22u z(#y=*ecRKx@-HZ+Jvi$=GaJxX6q<4Ot;lA^A@q-Sc`HaL0#jaRN%g1hrv#b5 zvel~3Q!)}us7iT~qHu=nk^t3+0Ak@{&7-)QhN&+JmMg6ZbQ0(|>#`gTrZc-7>O8A! zec3%J%qMa&!mA!kyb8zU$r9P=4E9iT)^!BDdU0%^Q8npS-LfiXRrILe5fs&r5r_TL z@fv{_ zw2xgldClb{A7QQ7=B#VZk>yvKWci#a=3g;Hjn<^tcMOUymqwZev(tUT(ar^GPH6+PdO>>=`F)Vl2)|@x3Uc7&iG)_*YC^u)xHk) z6}wf1z0c&M5S7_SuOdq$m+8|IuAOs7_jgw5?{)3ZonnfAL~k}-m5W76rG_j7nBS6U zm|f#2`cAgql0wI`s$LZqX6P~=xG@)pnYoyMcO%p?(C|FtBMa5Sw86Eeh(`ix^&+E7`=2{|-^#Bst7!Ch-&7U& znz66dz~PYT`+dq?Z(l#X&nT#B!fa!VI>7OTX74&>0^8QB=6AUtfAppPu5y!`@9^pf zQR37DG+Bk3h{oXFobrppzo%HOe(@ed|&GfbXtDfg=?@H1&Ef4mh~Yxqm(+Gzx}a&Ep&i=M32UG=L%^S;mhHSVPL zolMm0WlP9yI;3FaG@=r2OFD>Ny?^B@C&P~s9!KVw9ML1gyY|YYgibw*%}q9-HHjtv z_=wTsG~*fHxxEg%zDyK0MO%uhtz}hC z?@_d{ceK@A{gccgspy~6s)7n6%g5~al6g%@Znbh&s(u%JD=j1XDCI*7w~TKwpNS`( zow}8IG2<;inftfjnOZeAU1xoj+HucUR6H&9`Z9BXGQBn{wO5%p)0WCces~Y<%~+>` zi8Y&wCS#%RCZ0__;!)G~zn;8KTeCdZ#+cqq`c3zco=|4#Gbp?wKm$9qLarjAu@LSSVd_H$HLT&F7n6zwrc} zl@@>aGn+OQg|awy%!xIv@_G6bW8=oUjHh*{Y|OV7Nz#9Nb3uu%cuzn4cMg~T{fU+T z94`O+0>};JtL@;?(>|GJ#=|5$AO-xpZ%?yCHkz>2_P|Jgw^?wt)R-KF&kt{&Fn z!rF0eBdT5n`(!48+qvyB7qvo_&ib4qe^_;Yj{VP5AH#2nbJ^yn^}Me61fw$86wehrb6D%}ltX63Jy&4NsjI?B8;hxXyeO1>I z*vO+*P&0O7$qBD24LmCAO=vgYoVXN{LY-iLnqsmfEw#Qr@T+-!k?E(i_e?DUP`6wP zhEH2e2-EG6<3gQ(crKmP@}AJh$tz+G-0O%@AR%|-YPK+Y`Iu{f#zHrGOoF=Mx*Uq# zzvqx4LfW`;JcG&Xo6V*l*+fK!QysanlG(0eY~j!zXSci4i|d+_-hJn7rEbU8doHq5 z+`PazB@-n+74u~H(jf25d7%E?yF-R-F4Z^W>=5cpEk=9fDEUFhACHDO+C)X@Ne`GL z$|@wZ&arqMP~Vkwq&i%*C+NUi!!)K4%ZLvGjGZaAc^r0A#Ye&ts+=)DHouGIROlU2 z^y|XD8RyJFdm5)1AFXwzi4gl%l9AG+WM}6pANmr5 z8dr%`Xp^_nje0mKkaoZ!Z9LyNMn{RxuKren42@R+1EXijRNv=GPfk{?w#m2yPqS9| z&)H+%E&-RJcP}0gkG)VtzVuK-qxW3!S|-WW59Y^`zBu?eA0M^Y+sta;q19=2k-aTB zNI_%4T<}8UcrYzPZ{bklnPV#5(!2}5_Cz}vuh9QY4rZ%hW!Fi{@)wkit&e>|p7iX5 zT~xruKph0XNK;Kr&Gj1>Zo3?j?eqv3)PKei@y8^s!;?nwddu;p7iXyx=v4&CcIT0J zAP%eOJ_yfb7Au>AGY_f8@kN#K0-4r=0PzSIkLy=RZD}xwF6O+#luz zyX-GIh*qrF8gC%o<}aTiH-^V5YoYx0+M&(n^iL)qJ+HdM;}tkGkAC#S_nbnzdf%t! zkdP}n6Sc=4`tOeuNe=X-ao@+n+=c1#`V`8zp0F_VAVm-PLw5qzay>>NOD=)mSn!*j z;7OHLrw2R+(dYZ`iID7jw?1()oKf-O&DXOXE8WJ|TP&!Rb-{2tF+W(KJ$j7HvQT2b&_w#b5`fQiuBcP$|1^A;U0VD=GAri1RA$EcZ@o<)EPHopx=*Af9B4|F z6)-R@ysSwhdFzyZ2qrD$VkC!A!myHX{E*EhFFg-Gnr zy`wItb@~z)MXKg2$-MW*u~M|?Tc#Tar&{m2AiKM-hq}5}>>B0T*PxkEf9~fp1DWo$ zoI;dji~pS)80YwwJ1@r%UXzcae9djy+BNQ`>O&KYrdb+okHcKm_d~d7^-Hct%4+Gk z$t#pPA2j+LJw0Wx%a@kJ%`fkC;5EuXR;TknRO$j<7^yyY>QY-Z&GE|7l)TL{=;H5@ z5IW5kSMf`4Z`Y%s4^)Tm9!utCaWwaFe^p1u!jYpC+)k>u^}QiDr0LzDl?3`j>-|Jt z)L6AybMzz69}>cvIco6&Y&lSq>{nA#{ zzUIU&*BduMpQQFEK1t{1olCh}k|dQrQO;1=1K#z;|H>YY0T}^Mpk?f5>l@(dz$K15 zz(wFHw%qMJ>|NZQxWvs|+?CyZT>kwR`@~hw_&99i#7$hzIrxaGc(~aU)M!Bz!ToPo z((*my=3@7^EhNN?)BoRWX*#(1JNUZTogpxUJIEzC>Hc3bv>ieNTQ?UkKL-M%e;u!@ zaanRn0~rXea8BIZ;v|>5pPL)r|9~tFzf=zS*J1a67mRnlyiXhiMoLO>VX*(QO=$M7 zEER;5!g3*jta96RLgZfwBM2#p;KBg)29QX+WDUYw0Eq@Z+XspKYgFKD{_DX0pTzes zn*X<;+n1>SO(p^;;4M}HI<~04NC?JBVxS`n)CJ&?7mPz=AcF(EDhCe(x?d6v9n|gG zO}OSEas4O>Z4X2pyv`s(92AH+csRhEBrp`9Iv)k1&EGhz6ihgYkp|a&z@>0Vd&C%n z!vP=gg2JKyn&iOUhmnNtKlBcC!2ngbkoI<>izEz=7+-+)q0!L&fCf!AI3oO`Ne*Bx z0Y(2g6vP@lC_)mTH^5`a@PKH84@6?PB!RkWNFaze1VAkCSA10Q33|7J0l!`zvNdaA<%~5c32Q=vJcfL<5sLByI=3ps|qS9VtOl3J5r4`*z?95_oKeulQ{nZbEQu5Cf5F{{E2z>o-U_tW? zJ{B1GFo_cq-~|z*+cO4qLqWvBHwZ5&B7wa>5rgol6A}rc4!%YB+zAP6ZwW2peE27- z6cX5r{g1I=Z5WMbmq?$0#o@nzaM>&pEDq!2w$oYYWf%$8hw+g_Vha|C@qrj%76&9< zsDT2kCqx2^3l6kMgak{(_!a>m77M)ufHh)5Aiyw4SBdfmXAwGiLvBBf@b_f!Hvurv zSu}2PAs!qIU=_+$uue?SMhq}!Ac5eyhrm@}W`hKRC!W87XlOwb3GB#-w-F7z1p#Ss zC+5J2)5z^m5bF*!3;_T!e+O5A$1EUS-HACUU;_#XBs%Q?osWW636Wr#n1BE%V5mb` z1k1#PK$wgSEE5v|;ZgyxOiT!bKAgZU*~Gg82^XB5n1ckSY{=l?C{2tx2NW7NJ9Z~Ny4l=5Lz*wK_V*; zFv}CMxIIK5`x4kV5#n&P_(x#{6tDj|7Q~_LQ;Yv=7uY_cptt0>%$A5zVnERBgKrT~ zY9ygmG30h-!e4a%hd>Cy7%#R&Xb1d=NE>1GJ0yf&jAxKo#sg@HL@W~F9fV+vZxYxq zK_81Dfumg_7IDFWK8L}Hv&4Z|=#w8LgkX&4Dqs=jkP0bDlsPyc%(fnwv5C71r$0al z#(%qthDiq@1Y-gq^bsy@qe}?Z1UQ(gArx3dCIDig({mJ9MJ5D7XP_vsj7$iG-b&(*1PMkm0T2Vd%|c1+ z#2XBB>WTs@$%Gc6&>f==93G%(g7MluSlB@_I@LxwW_*nR~4E6UDO*l>Rp8~<;!Lj21dW)Mopl5RwSXIWi z2moO+4-{BdCIG^mTcLmw2EwlV+g03WTA;cTEG!dRgkA=4r}R5mgbNOsJb$~2hAuip zfwg46j)X!1VRf@D4^($cz2+n zMH&=XS0=Ow%rB5_1nsW~fJh)yfCTQu9GsX93EYV}FsBkIu%b-pDj*$0Sp+M}gh1%Z zI$ZG!@$NtX!UCklo#q?_^a%ihDU0(&A3cfzIbqU_p6%$Ni7ya}sQF&<%DaWur6l7aOXvXx-&{0)RDAVq;SWdc{RFo^(IQzit$^dzt=C1~TeKJbqM zyM46tmjr>2C9+5Yvpf-_#DF0B;9DfJNN%f^u&xqWB*C&WfkiB^{z7p8SRRP95wM5> zb|8>I5P1kJ!X<-XS(y+BeJ+hV<|ghc8oGc51(uZwETW++HBew#nE(i*xud|cG9eI{ zh@hgnEshZ*0B+uf1cH(sLW?j<7Fbrs2Onsiky?aH0HD!E@+pUB% zNt8a@NI09Ilm}!JCX)mgA>$*rBMay4PW(Xt`+um&ZW|yLSp-a;4FsJC@o@2hHA$2| z+ekQ@MESFggh{o*x-wy8VZGgH0n_Q-y{$|1a!3+3M?$+1GifVXOigrvyFta zNt8d^NSHhXtS#evi{mU@fWYE1J`xuoXdN5{7MBTuF!?rET*e1(w-U}I(ZpvP31^ci zf3}g(Rl+E+zKrkfc4Xna12ihbWtTf}B+>Q^lO*B|)#z$_) z2hQeB{J}u0+i0-XjBgSMEC%>9M10U-tr;JPYbC779r*)mlPG_-Z2~(2NNNniHs9mpTM@0D1&gJ1@tc{XTgFqerRz(Kyrlyf|DS;@wMGbIFm&AvyFtaiL>x` z#ztrpTHQc{1!w$b;%uUU^DM}A?#LiC^dSK5*4z%>qM>hPqrsXp$Xg)0hxB%52Ep|T zQ3h>u52jbZvNOK7I3!$zz`8RC2@@f(@C-u2Lt{42z=&5$bIt zq0=WcSb+vb2s&j$1Ao1fP3WUxG+2WM*@V8gg$9ezASCon6*PnjO|TdtfdT@kUV+e{ z@sK#a!*K{ggeFA7a0n>IAdD@J8~_cNGEbFkzA4l?%@#5|G|tkvq*gxcXJdRubjTwzDwEZCML;fNVPphumr2K|~_ z1lo2M&L&X;;hcpbJGjOfwV4k17jItzy+Ist7v3x_01Ai!A|-3VOfOvnN_Akpau=PDc!%pM#o{!xk#E_L3% zaq*wD2??E3;vU>0oJR1B0^z~k%!gPrF%bIm-#`R(K_&)V>P+A&ba;SIU+&;37JBQ1 zfl#0EJjD$WSR|M`ct~7V!Xm+w4;~T-D;9d+jse#?<40%PSvVw7`fNK3hs3$|_fkvX zEcAvS10g=+0k@rnLxLwCJmj{sa7ggvgNMXL7FsF5y-7!;mD|q3A;FUmp2=-z;gCcL z1UQRoU^b<@brU+#5oI#BuXHhvoHe;X#5V5g+oF}&xFpx*o2Ut@sK!Y zVaN`lJmUekBMXNlN}%n?!Xb$g2%cYkwgiEi!3aXD1mU!LZ5|WAVg=v$ig9sPC(nv!Xb$g2Lt*ZW;?fP`KrFNxi~;M<1QxNt z#uI7>2G*YmfzWm77_k0K2n0^mAT5IULkNUfros9%0TAY_1OwKe34y?$AzcNrhY$#T zKMAvahvlzagb)b*P!8_?A#nnrq2C(E0B65D0HNO?#68lm0}y7O1Zcbi5J=3Sq6*fY z34#N+=4~I!5{oJd`W8P1T>VUF5xSHe_rV0>ZN%M0g>2(a%t1n5qQQWRp9x%ro3FsS zGa(RWJZ?(^#G;CO#Sf~DVAYw>BFs7f%(}#*ih$wj_Pe0SN@JSAS1dFl#dIgEGXU3UfSjqm5(Wv@nDLzjmR9I=4hvS8@o_kda6mA1aIO9)c>oq%-MqcP|7UT9QHrtP;$}i1 z%%lvKmtPlsn+`+|y1!e+QVIKR&f*)%p1VX>| zjRniggh1$`GAvkLCImvi1BUy&1o7Y?pzGpsN!JcQ=*MHQU~!qiRRr`73k!a!`TtaQ zuD^L5#~J?~fd2!5qLBH)B;V^fN3c``anl;L6T^1VpcfRB_}V5SO9Dm5cK-VQX7*fm z=GjB4L6As~cHX_s&dxqNJ0}aStVn+EiKeza^P^JBxkg-!mdX*x@OX{;-al`HC_@yxZPZ3OKA z+M~uZC&4whUPp9+N^N8s(ME`KbLt4J>s_OW8H5S!Q!hQeEcU%{9qK?T>eJP4ZC^L94f<{Yg{20 zlwnSSMwPYJmom&*aK&&b!<+?IC>v#%v)~G4;|Iw=hG)U{o$*mA<}A3PIm$05!IiIZ z*!Qs?SAc!{>EcoOQ%m_91MoCVu&50A<( zC&69&uJEY*au!6lntgoA=IpqlYYIMPb1WTKQ}8Lx*|i^_9+h9teQX~pI^~zMVEgTS zr~GmjY@gOU<(IP{KiwFdPWj~|XnD_@f{$)kY&w?QzbPmlPp+{|EOg2*XTkQ~(J8;2 z1>4t}o$||Buzls%DZiWr+h;@jn)~Bc&c2`68dXxPJTADwMl=P**-1^^w3k(<{Bjm- zFRM=Z+_(cc0 z6;r1KbE&CKI@cV&Vud4ew`A`S+MQUby&d12WQ_td~`}MryBR|FR^r7 z&OcFO`_$4Y!<=huXIktuV@4Ki-}-e*FmK%B`Y3(nB$WCYgdZ`JN-~S&ik;M)8Ci18 zP%6tTk}H;yv3sJQD_=Ud@1Q*;+0L2JY4ozR7Ot>m*Og}$$@Ux2PSa+HWII!A2e0;2 zKihn9r$n>V+0IAODbt)KSH3&ZDb*~J?f1tWjPMivJhZBOw+p%_? zX3-GIb~=~9f#6s9J5HabGLRH$Sjg8mRi~7EV+_PrDNlGLgOtNZEHT+SD%tx zacVV~Sz5bdsWqBeBv&l8lFcHyVyTsF7ReP$t#q?Uu2^d2n?-WPQY+z{CAScSt@%{O zStM61wNlO^xnil6a~8=JORc1{NUm6FWt~NG#ZoKnERrjhT6t%YT(Q(jJZH%br={k6 zKDn1|RxGtr&r;`#rB?1)Bv&l86BL1b7ReP$t?aW%u2^cNpG9)TQu_h*@z$0_&gIp;bp(yyg}%0p+#mE6O<644^r4#w)0i5AKB zglD{D<*W9+Qq#H4m9W=d$!U>nb40yT(<0d>iF)Ox zMRLW0DLE~YD;7-IX_0IPl=ezbi)1_gqgQ@fB-?gy)x7y*?#fcD@84m z?I*vza?~Q(j(6&nq~5p*{+JEFSC%>muW$Ih($pf^=Baz-sk7wDhTkhuoh8>cd}XS$ zq@`7E85E_eMRH}^R<2qk+qY!BlGRyqC4X_RY_&+Xub+FRt3`6fRZzZKB-<>cG9*y<>GXg>z+ zTWocbTyc(DY;`4R(Fm5{sig`ny1EkG!kw&&KB+~AHm}gP`0A=sZ|{m|i?FUly}fIq z4YsioWk1`u*~Y%bSXYwP-c`}Y)mbHK?Ohd>XrXL7_ibjYZ*kUDrQY5(Q7f0u>XoRs zcTKe6UfTufM>!iU;<^%k#Mx*O*HLoiwNBq6u9IY&0PJawj8Y|O18ff@S_`hlTvwtt zz^kGSU8+QBl)XS}R@zXdO48fACMuz48$TD1v+C_#6K&{HRjajk#j|gb*OjEVcTKcm zl`2th@0zHUOS)f)(%zL%@b)eGx{|c^u8KacO4i<0Q9bd~vyWVG4Gy~&rn4&_mgpNC zc9v}45A+QVJ4qhfEPvnNu(RaKdTVglS#rg@*XZd@mTWV$eS^c!k}G>ugTqdeD|g*} zgTu~}?HjSa!C_~~HgDcHIP5IBGPVs4J4>$UP=muxl54wSgTT&`D>PSwz|N9w8oF-~ z*tMj8m}yN^tx+w?0$sUQ9U2UFmTWsZLxaK2l5Hg#8Vq(V>EA4B=TI9O6t;-AIhCQo zVb`KIMs22RXmHqBvdwf2tlYdy)RN`vOigEdXpq<<+P=;m8Z34#YCUbUoA!|(fbCET}xU|S1VQjNJQ6aeOi+? zE6YuTti#^q4Q_`q3=J;3Zm^9~+YAm3E;~!MJ;9;DW!IA4+?7fjWVVQ|)%vt1wL>6= z2Af@1YGc&io`wdST}ygT*D7sjl0hGHei|B#wqLP)oUoqUYGJx->1!JYi=jbkC&`t= z*U%ufYe^fZHPHsEU5nD(l?44mgVN5D?fBUN6%(spO^!FWodj=aaN2dHF1l8$p4)0s z>uH7 zrL|=`cGug1VV*|AW{4!syNff+&{-r$tf$yi=LnhPG-d_b+nmj#QLrQZ^hh>q#t6|T z4^dm7j+Xhky@G5~{6t=1CRsOQ&j)0H>SpjELnhr1$37`mCzwf2qfwwegXZVi>G%1P zA(QS0)e5p^jPJZA9o|!cpDn9m{JtL=7wEw6k56uxlv1Z@m7nvfVhoTU;3~*|zn&*P zl{i>4Vh2q5JQ`?`JTe30>xZdICSdEQpK3IY9oVE*rDaE^a}1f9B)bhztp|KR(R9O4 z7)za%7ga8I<7wBIisz<|Acd46_!duyE95U=GHt5@cnG?5s; z{UugzFf|O;Zz-~MA9SvcRFABs$v*5Z38AAatU|wGuwoO2Vr>2HpaKtOFpYkzF@&A& zudw45tgL<}rTpS{e|@oaM`S6uWvy&SbGwu~f=x;^${wz1&1_vKkk-uBuVAcOut`HE zjB=y-n_F|((!^;`7+XljPOw-r-U2^3t&j49$HUQDGbz{zvTEpJ7#m(%gGFH;7=sn< zpIgH#5x_}R$`5v(VX-TnX`bH;E@MbQ8?Etc#tPg}i(hh8*cN^CE3+E2sV_4+w}$Wu zz&H;5&VFz*=%t0+a)V6^d1w14=Ea6)GNq0|S1^Q2=P1Uxp1w`L<~j^*ORb}=rYR2Y zY`ff!#%{rw&)L{bv$BowHFL%b7E<^|AeNCnPa{|gvK+yt5gaizjy=^|J-3E# z08ZmN7DQ=wvxOWnY@bmbRhRcf0$iN!W*b=w+Tz)brf3{5K5a}Tv%M(g4x7V;rTwuV zoLa+2TL5Pj_JRZ33u6;nIPkR?u+W-ek6JS}ldc!x!=|IzVWzNih6TL=>%ry)H40-8 zcjJDNt*RBB?<^{-I!dyymCo<+SdPi8E@gJ}o-kr;VY_)#C>W_@!6rK~Vl?;6lCf9Q zF_1uyg0cUkC}WgUIhMn2G>g&Hu?ePdf6bQ9*wj;RSWow+y`=mgD;+GXSc-6d z+Rq-~Sdg0c!NO6_S{*DLBWhdVK3FpY_ZR!w zgKcPGSj>j8ruJbq8^|;V+iIA#Fpd|NfvGQpJc?zp)vt-&hpMysA}t1Zbj+iE}8&$g8Su!e0dWp@57 zs%n~v^Pv$vxQvGf%^-^~=kSPRR2 z$0c(VW=#AMV%0)apOH`Vi|+4&|3Q~geM@Z^e$WtOc;zCt8F0!yvooV;I1no$@R$ueFAVVDnGorJN9!?sHIIIo=TXBTz+K{L$rQDVYa zvGZwQ!psJjSzgBGNNd7gYqikqu+@S=y6l4^4AwI?`#XZJIwnJJfB*nfsD<8Y5gxUEI!OdjTz|@pa)KukI%=T%{ zFg8b4^C7L3&YGRr-G02kgornq?t|=I;|G+_~eBGfgN5!L0GV8&Bwv|s^%mra; zn3ZTU!S)w@R5o5ZuttsWo#(OAVxb1#t%l52}Y}< zc_aW6Z|E2|EkODlS1cHV9XB#CW@=oV)7Wrri||Yux$%LEx0=J$!$>m$(b9uiYob-( zaN4GpFjE_zU0)od3E=e4FpMT(ZjJd|$GBZWnKQ8A2+sxz72Ty)%WWRxox2D}Pb)=PZ?A<-vk8zzqpw(*+U*cb6F`IZP^Hj>siD zNsZB(@su`?jqP|PrAS?{2MgX4mC(X?O2dZP*u-NR;D&;!dr%%-Jf2XFR*%QIr8QW^ zZamIq+iF9*YFqia<7``d1_bHlqNyCkyi7^AQS}vPkIRv;==L~!B}GP6&Q9Sa`$Y}z zC>ZB_Z+Z~@cF0>se)KYzzTzo%aX+OXHOKuFjFFF<_GXXjpLOG|J?{8`jC`QE&<(Di zy0o(Q1Farc_R^Yx=}1eP)KXx<`tyU<>NLGr!m}=|!~+{y7dK-vO4Gu)7lYBl(Gt$B zIbcV-v|&CcIChUvP;JLz%JO0&hgxuBi5{o z9^i(YNnlZLFoR&R5^M~u2~5p?Ln>OEJy`Jp> z|04|w3+$N8UXPJKCaXt`Fv?b@eCaEetK=V8M&2CUk{_VLI0%%N|24+_NO{y>gFOY~ zoD(_*7Q07K*hP6LU-@PO+-i9PU;Q99rMzyhu^m{{USh9kO{BEAfId(#&N;zSU{QMq z)<{3HT=Uov-UOJs!r`a9>aJSDS}O0gYm8m6JjJd-x)P}`?!)Oyq`ts#gvT@U(i#qi z<;`>bgLGw%FDTyQfHf=clo!$0a*1c--;8 zIAKKAf}3}+QI{sY;mVv^BGnGYG)J<93l>hWkqHE2h2x*ph6#|fe%!RcI1@)^90SM? zCQI*Dv3xdLIRP7qRojZ>cCxJp;xbyc)ytGmWX}o%#z`}74`7fdaVy}$gcVypA-xDY zc@m8^d@h_wG}a6WyxDkLFpWHvA`2{zJQyY*o}9oi0dXo}%+Sj69)4~OAjT&iU7^gV zB%ZLqFahJ{zO)7&h<&D>N^VwYP2BOpFg|g|2ea11{S~#$d-&2bfh-Y|1<~+#~|VL4@%Hi{oYI-aei>2;#!F z_VL)k?=jf%)B&x^c*Ro(bueDzVPa_wma9C#S`^U^OXZnUjUgP_at`aZCr8&BkQs=c zH9avbc-DLP#0&OiSJNYe8fdO8yNR|ar?zPg)iAhCkBwqvABhT znQ&NBxUpQAjn2R{Hh^3b#$6DM7RFr=j7s7z2!!`!nyHf^hE&Z=$2p6+Jb z$`wk>Z}l>kWxiSuVAkBQXMkCA)0jY!o$(J*n6tiR{tG|I#so#0Fe*tb9!C;tiJLYU zgAzAwPAOCp_hK*``M8qQBnW6%u;DXS!o7nT_;+7Aj+OAN#T}+%2Ko?1T}wOF(Lg+a;J) zlE#az<1pHuWo)nu^f3-1ci}iD3A0M#)E~@#uv@2s)RL^(0sFyL7|UTl7$3)$)?h!_ zR8m2@5(!D@M5`oCirEhaG74iqEd5N-5$p$pn=mRFx7wvO*bhcL6{IUMoD9yAO5$xB z7+OpeXcWeNFf$3WN>W!Wc3|9Um)2lE2=0u+SO)thc<3Ih{t%fA7*x~ZNM!(0OQbTO zRjDL08DLbhglN0p257aUuCVG4kr}6)RT3w~LiK4)?21L=qbH#&_Il?KnG85yDp^9b zOKZ$39>(o5`>m#S>$cU->?7X_m)MLGfI?Vl&);s8~jmv(JXX>un=X75DgXM6bl@ASV|E<7|*^3oOnC zd9>KvxZuJO&WeM?oZszaCeay zPClvxeHDE!zO%!XOI)2?Jagp|7bkbjcFz}AA~#X65vet{t@f!kwypLldW9P%yy!@2 zad&J-v2fnuO4!bJVN$r5r8OlF;_MxB(MA+pJP>fvMzmbCtz5Jbc9v`_H*FTW9tYca zdYGAdR=Bak^^7JHzKT=$F4{~YfVi@ZTL&0dPq7?#1CW`c%Q0y&mjf0j5cE}1uKZ>WP`amgk6o`xUh^lB4o(Hvt_x#0T6@}i`v^D zVq6l4HWF%*K}@WDmE1kcc@3jyo2i!zNKF$}v4GSxa}_g+f!|E7%L4Lhd-7ct&}tfI zZ2_@uo5_1wP^&6;3j4d6{Fk{Tn!cGGm<6S($un6%IyKQM3rI~9uQG!cn6;Ulm<6?} z#<^cWt0{L1zmL`716@rYOM}oAN3%m~W~^a!=UPy)PrklPG(r8ZmPR4YE9+)6Q8Y_Q zT03zx3usd?ku(cRYbTaw23aSTeJ0jqPxNP=W+`e_<$=My*i1~#QqpQllWh-GX;kVI zA+0<-pEQ_rf<2YuMu0bX6P5MFjM|;4rO7RgU0^Fb{qBTX*zj)JbmpJ1*k7KGjs5+S zH{+8V^d)5Dv;%wz$vElimw0`XA^&QBIb5A^iz5RjJy}kVBUr(=pezNO^8L;0zux?CvgVHm?DAVYE8R`MwYl6~ z{`&`hbQ1dWho8Rv@$%;F?InNu?)2e-H)_*S+{Jle)^&^c_VSCjA0BSryt#R}d-wM8 z-S^WEeLdFBBcC#ME)6TwZzgFvvbp@X({Go5c=z`8>HWh8ulKvt{ZAig^t2pYj)~k| zPjY7S{`|Mo{on3Rzc35qoPT|3BRlq#zc6n+ODuKsSB&O#qUCS)AY(Lv{0;7EIr7>xctS=7^D92H}?0XY1|)h zF+4TfZf|GufuiiQlZx^;J8jI>bByA6H3^#KA2sglOX%kD{Af)+5Iymm+_-Kl$2{avTmfLn>!g{_&pFE??dJm3AF3Sp{{b1mF0^E?s^1+xye` z)mNv7o7#Jv_p3hGo zUf~T)`dwaC1f;aEWXq`-*<3KE9MR^;myYGL~Qp<2zsn~y) z8`b#{$(Ns5K_82vCcl64Q2y}d&bspa&CS~%UOfBt)7#VcH$T65c=j?5*k@O!!*A|h zmOr@kRrQnp&(&al{P4d2`TmXT|LyCm)0@*zr?(Fu7~;*Wkhibr5q_lX?d#{S&1#q$ z6wLs6FcZ|Adv7mG^5wF$$*l6?**(kkcl>Ew`BSzP_mgL{Sd7TmI2s=a_Mhzk=o*!+ zzu5e7D;O>RXwOIQesr?O&1dp?F~?Vbc=z+$2aFJZ{QBme@i-sG!ojXN_Eek71~!b& z`!M?JOD(uu@kecMef&p9*Ld`BM@Mz^H^;4cztBvWKYdG>voi^E zwjp6;9Lx3c0*Wz@<#+%k?c;EdlBIHD=h*GQN%%@HHuQO1;rHt7e+ z6{H_9-_j4_0Hhy~|5}IhPN^+U!X(Ue%%Dn#xI>jXxMP$$xR{fEaC0U7;6g_0@L=~; zuRldemV9TFw2iceY9Ixileu zHuViYFxyzl5ty9HkrB`i_Mu*0A!M7l;c-E>CA?_%sQf6)`=q2V_VPLVB~UWX;YV4v z%cRc`cAi!_Y+Ez@~5ou;*O@kT#7wh10mj0eotErqPD@vGW$jVr@ zf3~EveKySQw3DpA?M#^X=*aq`>{QO5o@LB}BopSb!x__Yfm_e^lngo+SQlmShom2@ zX;}+wG}+dX5=m?cnAlSgUB3{Sq8qS{GQVt6+7C~)kow_aC>gWsiG-;EWxokoPY;&LdDw5lJZvvxcJxk| zzuL=~9m5mmX&Vye&rCBWbBR@JjNc%(lay-~dlY|<9X;zVkB?-pAoY|*fh1XH@)PFS z7c-X6280->jR2E%iHk040ZjCd5H`CvPW5`ANycRFA^Dqy4yAJb;3;ALDk5V#=R81> zvEnVH)PWNs^Fan+=?6)-MX$)it95v`vDiM|RA?O@IGxIQz+=KZNlwPZu95soYzdh7 zW}G->?*Y@fB!#TBnS7%S%-FElKIn~XcS%TQ(den1=N-<8hdc`xIYVCK0XMuckt^AN%Gd!BD3VIJU7rLejLUp*e3SX$3QuMI2}`Qiqs=pxxd-~jV;Sk2 zM+YWZPY#qYPh5~OqqY;K=UM`S?K(Y{gUS%Sf)W(HA~;&~iZi?PgRESlSGfE#ADr!F zOgV2!KX^JK^TAnCG0!`Y`sO$N3G=K~8EcG-2co5N{?apJnfoGa>pJswh6!)MyCpe0 zgX!K#V4;jDOHjs?7-$(&(qhS&l4D244<1Q$gM3w@8yssD^Jv#NE)Y|qFYrjBF9bzv z-x@vx32~%vL?KJx2uPK_QLXfi%pB4W=qu5Ch%@O20gloS)J6J1IHJrE59_rbp2R1P zAAGU)!-GUqIk`))%vkD6@mO2?(XhLyxb%ZJ$|B1VyMgJr@OoB%uc5oVVU=;=b*aoR z!6=IP)1NplT=&SFbBvNX9~!zlG<27b0mz(VPPK0h-G!6ReZ#7W?y}{H?%Fn(`$3uw z(OodnUEa)z-360z;T@ff3k;*|0bs&&kwHw(0brTCCKOfeE|~1o_LN`y>o4cS{PHX> z-_LL{A@^m5$(fGWTsaeiiBHYzJ>d#?yeDT3F!ArXu@T+^O#C(8n27HPCVni;wfIQ9 z`;)Z_CO$Rpkl1!GS*w<$Ag?XEy2~~ZOwNP$mOtAAFya5~m3>}=V8Tamt08AoTsXTU zk9vSxr@!Zq6;nCSfSWK+otZJ=jMy2)SLRWo`0Zd~Ct*B>K~td%j~`Xk#W-Z-i~AV^(*uVD|kbI{*YKMeO-^?+33T8F>iO8xb8dkIsHj1?n% z9hj~`Ben9F@^J#`2PBG&JyCmNhoE2d_mtnYYwW!5^8i!VAn8`}p2C|kzo(o6Z^?DP81SZ2^Z?3D#uO?;#(<`X9+0rdr8#dyYat&-DA`{o&3ldLC3OPFU*%9#4{+;>ZV*#@P*BwiAI zAtjQGA8v{Mo`19{wngKS@J=Oe#4vl%n`9*(j&h!rK9%$2tqJp&m>HA%Ei;`u2A)VP z$@(LOfU&EJFApZ%EBOw@j{p;$B(zNQ08DH)sS?DO$J~ofg2`US=*hVoO!R`7Fwp}`bCkyvOwM#{M>&3oXAyi3FX;hvqWBT)3gSnAS=4WwBOVK7y@I6{J%B-x zGaZ*AbPm=9a^#Bu=Z80Zeu}xsA2T*<4K9dwxe0eaj2jmw}Js_WG zw#B#tvR{J9e#yRtxc7#GhP8rLd89Xc)=cf-w{j!WZQhBvO`!nsWP!MRN4mkUhM zQ_f{FznsftemR#(KS+%x^UH;btT!^5$$VhOWqxt>v>%=ZI*uu1uJ%LyFiX!R{jhgx z*`7j(Nk5ESl5+m$HMWJVN7@3ZAY;m5Tz{`&cgYQ;ZNbBrws=DO)E3VZldy)}#c7ps zCO?hn3ofhS1+@=q}Ttzo+qzFEL#J7`az*85eavE<-RGM&ZL~`!X2=?#pCt5mztkgZna>x5j-L@%QSNH15k7W%Wx)mn?eK zxGy6jRs9lD49og$+?SEsNOh9?GU*%BChM2`G8rqbl*A5kUnc$FzD)F)N8B>bywlcx z_=n%3UU9`Dx;uFOq*PAh@LGu#$Hq1hzb3eeuo*KoI^^eF3v0-3h zPszn3_7qHXm-I}s=YfemwY+EA4^C3jH~%O}7z3lFv>zHDMrJ2D|ANW62P$un##iT9%)!@`VPi~tqz1xBHmOYPcLC1xyBCj`| z*@!Oy)hD_OCb~=J2-)+%RCoCS{{2s zfAc!$Nr;?HVQ7Uv1(W-DtZsg*#VZlL>*eK$@B)Oj2rmGpI?4Crr60WKlzwo86&>e| zr;I&RxabD&KD7?L+rf>HeFjX<;)Lf2Cj&JsoD7(7GQ5a1ZTa%J(c<3o8K}YydTs$ln>)Yi0~kIP2yjHNsI$u{1UF- z!l1Mt{-M2Sb2tuaKm1z^shlTb&zRbr#<>i4Quip%WinQnKN%~|Wzr9>4P-tzmq|ak zaMU{FEhtvfKS>+=#z$4M55l()T&}HFVE8@xfcSHdm#&vlYS7#DE(->iQ{A?d?%RjoxIx7{SBvI>+tW&#ChY5 zo$TGXbuu63B;_@Yb0Kw*no)cevMWj*Y${rZr_+z^!?0-`dgs6g9Ocddi>SVZDT4IF zzwegn^<<41)3wDBLe>`Bo7SOskbL@4_fV2~%KEjmmATDiQj|J4cFX$Z0kGHz+(fOz z6Bft*vL$OB`ksVnZ{0)r#*fw^@4LzWro1iJjk<^O3|#x6F-pAr*FBUonCKt$r1Xt% zjmY?sLRa*jGnljmb1(hn3?^%VFbbJV&S2UW|A2O!4`LRzeV)%Xl~YcJ8%VXCgC`?P zvi=#5fZ6lzJbqxZeo<%fOTdJC;HjnT6=1SALQ#khOqj3OD=;}@bL}j5B9`(ukFzD? z&11?Elr;k;YldA*{7x`gGtesHcY=uyancYS;;LNyPB85+cVp5I!g8~oVoJpCgyNAk zV>xGKtcak@Hrv9f^ZMY%Q0JE>=e5AAF|p5>HJxA1TUxKi&fuA8e|bxm_YY=G?!dvs zMi71=HUdocUzmEa5n!_avI2E}2`iOx;aOm|5zuSmZ-9xvf#no`gOCZ;XW|oeTr>{> zU#yd}JzI+S@(!VJwn4QKJjd30 zm5((VQ`_R7p^LV2*BDc1WoaJ|XT`?ygh()46=@$$k@Z0;fh@~wVp)T{d=)*gtt{7J zAsx96ouK1?6c3#uD-qd`^mCwon3wL`G0-J(HYM3aC@`8-5*Xj_N?Gl zce*+3Uhlix*Vm_)pFypi_9St9{r&B$U3WUZI(~mV5q^P}{c3x7ee?2v$ZJLd$D8{H SPvJ$6&;Icr|M|~ 0, more checks are added: Within each group of LIDs assigned to +same target port, + a. use only ports which have same MinHop + b. first prefer the ones that go to different systemImageGuid (then +the previous LID of the same LMC group) + c. if none - prefer those which go through another NodeGuid + d. fall back to the number of paths method (if all go to same node). + + +Effect of Topology Changes + +OpenSM will preserve existing routing in any case where there is no change in +the fabric switches unless the -r (--reassign_lids) option is specified. + +-r +--reassign_lids + This option causes OpenSM to reassign LIDs to all + end nodes. Specifying -r on a running subnet + may disrupt subnet traffic. + Without -r, OpenSM attempts to preserve existing + LID assignments resolving multiple use of same LID. + +If a link is added or removed, OpenSM does not recalculate +the routes that do not have to change. A route has to change +if the port is no longer UP or no longer the MinHop. When routing changes +are performed, the same algorithm for balancing the routes is invoked. + +In the case of using the file based routing, any topology changes are +currently ignored The 'file' routing engine just loads the LFTs from the file +specified, with no reaction to real topology. Obviously, this will not be able +to recheck LIDs (by GUID) for disconnected nodes, and LFTs for non-existent +switches will be skipped. Multicast is not affected by 'file' routing engine +(this uses min hop tables). + + +Min Hop Algorithm +----------------- + +The Min Hop algorithm is invoked when neither UPDN or the file method are +specified. + +The Min Hop algorithm is divided into two stages: computation of +min-hop tables on every switch and LFT output port assignment. Link +subscription is also equalized with the ability to override based on +port GUID. The latter is supplied by: + +-i +-ignore-guids + This option provides the means to define a set of ports + (by guids) that will be ignored by the link load + equalization algorithm. + +LMC awareness routes based on (remote) system or switch basis. + + +UPDN Routing Algorithm +---------------------- + +Purpose of UPDN Algorithm + +The UPDN algorithm is designed to prevent deadlocks from occurring in loops +of the subnet. A loop-deadlock is a situation in which it is no longer +possible to send data between any two hosts connected through the loop. As +such, the UPDN routing algorithm should be used if the subnet is not a pure +Fat Tree, and one of its loops may experience a deadlock (due, for example, +to high pressure). + +The UPDN algorithm is based on the following main stages: + +1. Auto-detect root nodes - based on the CA hop length from any switch in +the subnet, a statistical histogram is built for each switch (hop num vs +number of occurrences). If the histogram reflects a specific column (higher +than others) for a certain node, then it is marked as a root node. Since +the algorithm is statistical, it may not find any root nodes. The list of +the root nodes found by this auto-detect stage is used by the ranking +process stage. + + Note 1: The user can override the node list manually. + Note 2: If this stage cannot find any root nodes, and the user did not + specify a guid list file, OpenSM defaults back to the Min Hop + routing algorithm. + +2. Ranking process - All root switch nodes (found in stage 1) are assigned +a rank of 0. Using the BFS algorithm, the rest of the switch nodes in the +subnet are ranked incrementally. This ranking aids in the process of enforcing +rules that ensure loop-free paths. + +3. Min Hop Table setting - after ranking is done, a BFS algorithm is run from +each (CA or switch) node in the subnet. During the BFS process, the FDB table +of each switch node traversed by BFS is updated, in reference to the starting +node, based on the ranking rules and guid values. + +At the end of the process, the updated FDB tables ensure loop-free paths +through the subnet. + + +UPDN Algorithm Usage + +Activation through OpenSM + +Use '-R updn' option (instead of old '-u') to activate the UPDN algorithm. +Use `-a ' for adding an UPDN guid file that contains the +root nodes for ranking. +If the `-a' option is not used, OpenSM uses its auto-detect root nodes +algorithm. + +Notes on the guid list file: +1. A valid guid file specifies one guid in each line. Lines with an invalid +format will be discarded. +2. The user should specify the root switch guids. However, it is also +possible to specify CA guids; OpenSM will use the guid of the switch (if +it exists) that connects the CA to the subnet as a root node. + + +To learn more about deadlock-free routing, see the article +"Deadlock Free Message Routing in Multiprocessor Interconnection Networks" +by William J Dally and Charles L Seitz (1985). + + +Fat-tree Routing Algorithm +-------------------------- + +Purpose: + +The fat-tree algorithm optimizes routing for "shift" communication pattern. +It should be chosen if a subnet is a symmetrical fat-tree of various types. +It supports not just K-ary-N-Trees, by handling for non-constant K, +cases where not all leafs (CAs) are present, any CBB ratio. +As in UPDN, fat-tree also prevents credit-loop-deadlocks. +Fat-tree algorithm supports topologies that comply with the following rules: + - Tree rank should be between two and eight (inclusively) + - Switches of the same rank should have the same number + of UP-going port groups*, unless they are root switches, + in which case the shouldn't have UP-going ports at all. + - Switches of the same rank should have the same number + of DOWN-going port groups, unless they are leaf switches. + - Switches of the same rank should have the same number + of ports in each UP-going port group. + - Switches of the same rank should have the same number + of ports in each DOWN-going port group. +*ports that are connected to the same remote switch are referenced as +'port group'. + +Note that although fat-tree algorithm supports trees with non-integer CBB +ratio, the routing will not be as balanced as in case of integer CBB ratio. +In addition to this, although the algorithm allows leaf switches to have any +number of CAs, the closer the tree is to be fully populated, the more effective +the "shift" communication pattern will be. + +The algorithm also dumps CA ordering file (osm-ftree-ca-order.dump) in the +same directory where the OpenSM log resides. This ordering file provides the +CA order that may be used to create efficient communication pattern, that +will match the routing tables. + + +Usage: + +Activation through OpenSM + +Use '-R ftree' option to activate the fat-tree algorithm. + +Note: LMC > 0 is not supported by fat-tree routing. If this is +specified, the default routing algorithm is invoked instead. + diff --git a/branches/Ndi/ulp/opensm/user/doc/modular-routing.txt b/branches/Ndi/ulp/opensm/user/doc/modular-routing.txt new file mode 100644 index 00000000..0a593467 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/doc/modular-routing.txt @@ -0,0 +1,78 @@ +Modular Routine Engine + +Modular routing engine structure has been added to allow +for ease of "plugging" new routing modules. + +Currently, only unicast callbacks are supported. Multicast +can be added later. + +One of existing routing modules is up-down "updn", which may +be activated with '-R updn' option (instead of old '-u'). + +General usage is: +$ opensm -R 'module-name' + +There is also a trivial routing module which is able +to load LFT tables from a dump file. + +Main features: + +- this will load switch LFTs and/or LID matrices (min hops tables) +- this will load switch LFTs according to the path entries introduced in + the dump file +- no additional checks will be performed (such as "is port connected", etc.) +- in case when fabric LIDs were changed this will try to reconstruct LFTs + correctly if endport GUIDs are represented in the dump file (in order + to disable this GUIDs may be removed from the dump file or zeroed) + +The dump file format is compatible with output of 'ibroute' util and for +whole fabric may be generated with script like this: + + for sw_lid in `ibswitches | awk '{print $NF}'` ; do + ibroute $sw_lid + done > /path/to/dump_file + +, or using DR paths: + + for sw_dr in `ibnetdiscover -v \ + | sed -ne '/^DR path .* switch /s/^DR path \[\(.*\)\].*$/\1/p' \ + | sed -e 's/\]\[/,/g' \ + | sort -u` ; do + ibroute -D ${sw_dr} + done > /path/to/dump_file + +This script is dump_lfts.sh + +In order to activate new module use: + + opensm -R file -U /path/to/dump_file + +If the dump_file is not found or is in error, the default routing +algorithm is utilized. + +The ability to dump switch lid matrices (aka min hops tables) to file and +later to load these is also supported. + +The usage is similar to unicast forwarding tables loading from dump +file (introduced by 'file' routing engine), but new lid matrix file +name should be specified by -M or --lid_matrix_file option. For example: + +  opensm -R file -M ./opensm-lid-matrix.dump + +The dump file is named 'opensm-lid-matrix.dump' and will be generated in +standard opensm dump directory (/var/log by default) when +OSM_LOG_ROUTING logging flag is set. + +When routing engine 'file' is activated, but dump file is not specified +or not cannot be open default lid matrix algorithm will be used. + +There is also a switch forwarding tables dumper which generates +a file compatible with dump_lfts.sh output. This file can be used +as input for forwarding tables loading by 'file' routing engine. +Both or one of options -U and -M can be specified together with '-R file'. + +NOTE: ibroute has been updated (for switch management ports) to support this. +Also, lmc was added to switch management ports. ibroute needs to be r7855 or +later from the trunk. + + diff --git a/branches/Ndi/ulp/opensm/user/doc/opensm_release_notes_openib-2.0.5.txt b/branches/Ndi/ulp/opensm/user/doc/opensm_release_notes_openib-2.0.5.txt new file mode 100644 index 00000000..a6555733 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/doc/opensm_release_notes_openib-2.0.5.txt @@ -0,0 +1,487 @@ + OpenSM Release Notes 2.0.5 + ============================ + +Version: OpenFabrics Enterprise Distribution (OFED) 1.1 +Repo: https://openib.org/svn/gen2/branches/1.1/src/userspace/management/osm +Version: 9535 (openib-2.0.5) +Date: October 2006 + +1 Overview +---------- +This document describes the contents of the OpenSM OFED 1.1 release. +OpenSM is an InfiniBand compliant Subnet Manager and Administration, +and runs on top of OpenIB. The OpenSM version for this release +is openib-2.0.5 + +This document includes the following sections: +1 This Overview section (describing new features and software + dependencies) +2 Known Issues And Limitations +3 Unsupported IB compliance statements +4 Major Bug Fixes +5 Main Verification Flows +6 Qualified software stacks and devices + +1.1 Major New Features + +* Partition manager: + The partition manager provides a means to setup multiple partitions + by providing a partition policy file. For details please read the + doc/partition-config.txt or the opensm man page. + +* Basic QoS Manager: + Provides a uniform configuration of the entire fabric with values defined + in the OpenSM options file. The options support different settings for + CAs, Switches, and Routers. Note that this is disabled by default and + using -Q enables QoS fabric setup. + +* Loading pre-routes from a file: + A new routing module enables loading pre-routes from a file. + To use this option you should use the command line options: + "-R file --U " or + "--routing_engine file --ucast_file " + For more information refer to the file doc/modular-routing.txt + or the opensm man page. + +* SA MultiPathRecord support: + The SA can now handle requests for multiple PathRecords in one query. + This includes methods SA GetMulti/GetMultiResp and dual sided RMPP. + +* PPC64 is now QAed and supported + +* Support LMC > 0 for Switch Enhanced Port 0: + Allows enhanced switch port 0 (ESP0) to have a non zero + LMC. Use the configured subnet wide LMC for this. Modifications were + necessary to the LID assignment and routing to support this. + Also, added an option to the configuration to use LMC configured for + subnet for enhanced switch port 0 or set it to 0 even if a non zero + LMC is configured for the subnet. The default is currently the + latter option. The new configuration option is: lmc_esp0 + +1.2 Minor New Features: + +* IPoIB broadcast group configuration: + It is now possible to control the IPoIB broadcast group parameters + (MTU, rate, SL) through the partitions configuration file. + +* Limiting OpenSM log file size: + By providing the command line option: "-L " or + "--log_limit " the user can limit the generated log + file size. When specified, the log file will be truncated upon reaching + this limit. + +* Favor 1K MTU for Tavor (MT23108) HCA + In cases where a PathRecord or MultiPathRecord is queried and the + requestor does not specify the MTU or does specify it in a way + that allows for MTU to be 1K and one of the path ends in a Tavor, + limit the MTU to 1K max. + +* Man pages: + Added opensm.8 and osmtest.8 + +* Leaf VL stall count control: + A new parameter (leaf_vl_stall_count) for controlling the number of + sequential packets dropped on a switch port driving a HCA/TCA/Router + that cause the port to enter the VLStalled state was added to the + options file. + +* SM Polling/Handover defaults changed + The default SMInfo polling retries was decreased from 18 to 4 + which reduces the default handover time from 3 min to 40 seconds. + +1.3 Library API Changes + +* cl_mem* APIs deprecated in complib: + These functions are now considered as deprecated and should be + replaced by direct calls to malloc, free, memset, etc. + +* osm_log_init_v2 API added in libopensm: + Supports providing the new option for log file truncation. + +1.4 Software Dependencies + +OpenSM depends on the installation of either OFED 1.1, OFED 1.0, +OpenIB gen2 (e.g. IBG2 distribution), OpenIB gen1 (e.g. IBGD +distribution), or Mellanox VAPI stacks. The qualified driver versions +are provided in Table 2, "Qualified IB Stacks". + +1.5 Supported Devices Firmware + +The main task of OpenSM is to initialize InfiniBand devices. The +qualified devices and their corresponding firmware versions +are listed in Table 3. + +2 Known Issues And Limitations +------------------------------ + +* No Service / Key associations: + There is no way to manage Service access by Keys. + +* No SM to SM SMDB synchronization: + Puts the burden of re-registering services, multicast groups, and + inform-info on the client application (or IB access layer core). + +* No "port down" event handling: + Changing the switch port through which OpenSM connects to the IB + fabric may cause incorrect operation. Please restart OpenSM whenever + such a connectivity change is made. + +* Changing connections during SM operation: + Under some conditions the SM can get confused by a change in + cabling (moving a cable from one switch port to the other) and + momentarily see this as having the same GUID appear connected + to two different IB ports. Under some conditions, when the SM fails to + get the corresponding change event it might mistakenly report this case + as a "duplicated GUID" case and abort. It is advisable to double-check + the syslog after each such change in connectivity and restart + OpenSM if it has exited. + +3 Unsupported IB Compliance Statements +-------------------------------------- +The following section lists all the IB compliance statements which +OpenSM does not support. Please refer to the IB specification for detailed +information regarding each compliance statement. + +* C14-22 (Authentication): + M_Key M_KeyProtectBits and M_KeyLeasePeriod shall be set in one + SubnSet method. As a work-around, an OpenSM option is provided for + defining the protect bits. + +* C14-67 (Authentication): + On SubnGet(SMInfo) and SubnSet(SMInfo) - if M_Key is not zero then + the SM shall generate a SubnGetResp if the M_Key matches, or + silently drop the packet if M_Key does not match. + +* C15-0.1.23.4 (Authentication): + InformInfoRecords shall always be provided with the QPN set to 0, + except for the case of a trusted request, in which case the actual + subscriber QPN shall be returned. + +* o13-17.1.2 (Event-FWD): + If no permission to forward, the subscription should be removed and + no further forwarding should occur. + +* C14-24.1.1.5 and C14-62.1.1.22 (Initialization): + GUIDInfo - SM should enable assigning Port GUIDInfo. + +* C14-44 (Initialization): + If the SM discovers that it is missing an M_Key to update CA/RT/SW, + it should notify the higher level. + +* C14-62.1.1.12 (Initialization): + PortInfo:M_Key - Set the M_Key to a node based random value. + +* C14-62.1.1.13 (Initialization): + PortInfo:P_KeyProtectBits - set according to an optional policy. + +* C14-62.1.1.24 (Initialization): + SwitchInfo:DefaultPort - should be configured for random FDB. + +* C14-62.1.1.32 (Initialization): + RandomForwardingTable should be configured. + +* o15-0.1.12 (Multicast): + If the JoinState is SendOnlyNonMember = 1 (only), then the endport + should join as sender only. + +* o15-0.1.8 (Multicast): + If a request for creating an MCG with fields that cannot be met, + return ERR_REQ_INVALID (currently ignores SL and FlowLabelTClass). + +* C15-0.1.8.6 (SA-Query): + Respond to SubnAdmGetTraceTable - this is an optional attribute. + +* C15-0.1.13 Services: + Reject ServiceRecord create, modify or delete if the given + ServiceP_Key does not match the one included in the ServiceGID port + and the port that sent the request. + +* C15-0.1.14 (Services): + Provide means to associate service name and ServiceKeys. + +4 Major Bug Fixes +----------------- + +The following is a list of bugs that were fixed. Note that other less critical +or visible bugs were also fixed. + +* "Broken" fabric (duplicated port GUIDs) handling improved + Replace assert with a real check to handle invalid physical port + in osm_node_info_rcv.c which could occur on a broken fabric + +* SA client synchronous request failed but status returned was IB_SUCCESS + even if there was no response. + There was a missing setting of the status in the synchronous case. + +* Memory leak fixes: + 1. In libvendor/osm_vendor_ibumad.c:osm_vendor_get_all_port_attr + 2. In libvendor/osm_vendor_ibumad_sa.c:__osmv_sa_mad_rcv_cb + 3. On receiving SMInfo SA request from a node that does not share a + partition, the response mad was allocated but never free'd + as it was never sent. + +* Set(InformInfo) OpenSM Deadlock: + When receiving a request with unknown LID + +* PathRecord to inconsistent multicast destination: + Fix the return error when multicast destination is not consistently + indicated. + +* Remove double calculation of reversible path + In osm_sa_path_record.c:__osm_pr_rcv_get_lid_pair_path a PathRecord + query used to double check if the path is reversible + +* Some PathRecord log messages use "net order": + Fix GUID net to host conversion in some osm_log messages + +* DR/LID routed SMPs direction bit handling: + osm_resp.c:osm_resp_make_resp_smp, set direction bit only if direct + routed class. This bug caused two issues: + 1. Get/Set responses always had direction bit set. + 2. Trap represses never had direction bit set. + The direction bit needs setting in direct routed responses and it + doesn't exist in LID routed responses. + osm_sm_mad_ctrl.c: did not detect the "direction bit" correctly. + +* OpenSM crash due to transaction lookup (interop with Cisco stack) + When a wire TID that maps to internal TID of zero (after applying + mask) was received the lookup of the transaction was successful. + The stale transaction pointed to "free'd" memory. + +* Better handling for Path/MultiPath requests for raw traffic + +* Wrong ProducerType provided in Notice Reports: + When formating an SM generated report, the ProducerType was using + CL_NTOH32 which can not be used to format a 24bit network order number. + +* OpenSM break on PPC64 + complib: Fixed memory corruption in cl_pool.c:cl_qcpool_init. This + affected big endian 64-bit architectures only. + +* Illegal Set(InformInfo) was wrongly successful in updating the SMDB + osm_sa_informinfo.c: In osm_infr_rcv_process_set_method, if sending + error, don't call osm_infr_rcv_process_set_method + +* RMPP queries of InformInfoRecord fail + ib_types.h: Pad ib_inform_info_record_t to be modulo 8 in size so + that attribute offset is calculated properly + +* Returning "invalid request" rather than "unsupported method/attribute" + In these cases, a noncompliant response was being provided. + +* Noncompliant response for SubnAdmGet(PortInfoRecord) with no match + osm_pir_rcv_process, now returns "SA no records error" for SubnAdmGet + with 0 records found + +* Noncompliant non base LID returned by some queries: + The following attributes used to return the request LID rather than + its base LID in responses: PKeyTableRecord, GUIDInfoRecord, + SLtoVLMappingTableRecord, VLArbitrationTableRecord, LinkRecord + +* Noncompliant SubnAdmGet and SubnAdmGetTable: + Mixing of error codes in case of no records or multiple records + fixed for the attributes: + LinearForwardingTableRecord, GUIDInfoRecord, + VLArbitrationTableRecord, LinkRecord, PathRecord + +* segfault in InformInfo flows + Under stress concurrent Set/Delete/Get flows. Fixed by adding + missing lock. + +* SA queries containing LID out if range did not return ERR_REQ_INVALID + +5 Main Verification Flows +------------------------- + +OpenSM verification is run using the following activities: +* osmtest - a stand-alone program +* ibmgtsim (IB management simulator) based - a set of flows that + simulate clusters, inject errors and verify OpenSM capability to + respond and bring up the network correctly. +* small cluster regression testing - where the SM is used on back to + back or single switch configurations. The regression includes + multiple OpenSM dedicated tests. +* cluster testing - when we run OpenSM to setup a large cluster, perform + hand-off, reboots and reconnects, verify routing correctness and SA + responsiveness at the ULP level (IPoIB and SDP). + +5.1 osmtest + +osmtest is an automated verification tool used for OpenSM +testing. Its verification flows are described by list below. + +* Inventory File: Obtain and verify all port info, node info, link and path + records parameters. + +* Service Record: + - Register new service + - Register another service (with a lease period) + - Register another service (with service p_key set to zero) + - Get all services by name + - Delete the first service + - Delete the third service + - Added bad flows of get/delete non valid service + - Add / Get same service with different data + - Add / Get / Delete by different component mask values (services + by Name & Key / Name & Data / Name & Id / Id only ) + +* Multicast Member Record: + - Query of existing Groups (IPoIB) + - BAD Join with insufficient comp mask (o15.0.1.3) + - Create given MGID=0 (o15.0.1.4) + - Create given MGID=0xFF12A01C,FE800000,00000000,12345678 (o15.0.1.4) + - Create BAD MGID=0xFA. (o15.0.1.6) + - Create BAD MGID=0xFF12A01B w/ link-local not set (o15.0.1.6) + - New MGID with invalid join state (o15.0.1.9) + - Retry of existing MGID - See JoinState update (o15.0.1.11) + - BAD RATE when connecting to existing MGID (o15.0.1.13) + - Partial JoinState delete request - removing FullMember (o15.0.1.14) + - Full Delete of a group (o15.0.1.14) + - Verify Delete by trying to Join deleted group (o15.0.1.14) + - BAD Delete of IPoIB membership (no prev join) (o15.0.1.15) + +* GUIDInfo Record: + - All GUIDInfoRecords in subnet are obtained + +* MultiPathRecord: + - Perform some compliant and noncompliant MultiPathRecord requests + - Validation is via status in responses and IB analyzer + +* PKeyTableRecord: + - Perform some compliant and noncompliant PKeyTableRecord queries + - Validation is via status in responses and IB analyzer + +* LinearForwardingTableRecord: + - Perform some compliant and noncompliant LinearForwardingTableRecord queries + - Validation is via status in responses and IB analyzer + +* Event Forwarding: Register for trap forwarding using reports + - Send a trap and wait for report + - Unregister non-existing + +* Trap 64/65 Flow: Register to Trap 64-65, create traps (by + disconnecting/connecting ports) and wait for report, then unregister. + +* Stress Test: send PortInfoRecord queries, both single and RMPP and + check for the rate of responses as well as their validity. + + +5.2 IB Management Simulator OpenSM Test Flows: + +The simulator provides ability to simulate the SM handling of virtual +topologies that are not limited to actual lab equipment availability. +OpenSM was simulated to bring up clusters of up to 10,000 nodes. Daily +regressions use smaller (16 and 128 nodes clusters). + +The following test flows are run on the IB management simulator: + +* Stability: + Up to 12 links from the fabric are randomly selected to drop packets + at drop rates up to 90%. The SM is required to succeed in bringing the + fabric up. The resulting routing is verified to be correct as well. + +* LID Manager: + Using LMC = 2 the fabric is initialized with LIDs. Faults such as + zero LID, Duplicated LID, non-aligned (to LMC) LIDs are + randomly assigned to various nodes and other errors are randomly + output to the guid2lid cache file. The SM sweep is run 5 times and + after each iteration a complete verification is made to ensure that all + LIDs that could possibly be maintained are kept, as well as that all nodes + were assigned a legal LID range. + +* Multicast Routing: + Nodes randomly join the 0xc000 group and eventually the + resulting routing is verified for completeness and adherence to + Up/Down routing rules. + +* osmtest: + The complete osmtest flow as described in the previous table is run on + the simulated fabrics. + +* Stress Test: + This flow merges fabric, LID and stability issues with continuous + PathRecord, ServiceRecord and Multicast Join/Leave activity to + stress the SM/SA during continuous sweeps. InformInfo Set/Delete/Get + were added to the test such both existing and non existing nodes + perform them in random order. + +5.3 OpenSM Regression + +Using a back-to-back or single switch connection, the following set of +tests is run nightly on the stacks described in table 2. The included +tests are: + +* Stress Testing: Flood the SA with queries from multiple channel + adapters to check the robustness of the entire stack up to the SA. + +* Dynamic Changes: Dynamic Topology changes, through randomly + dropping SMP packets, used to test OpenSM adaptation to an unstable + network & verify DB correctness. + +* Trap Injection: This flow injects traps to the SM and verifies that it + handles them gracefully. + +* SA Query Test: This test exhaustively checks the SA responses to all + possible single component mask. To do that the test examines the + entire set of records the SA can provide, classifies them by their + field values and then selects every field (using component mask and a + value) and verifies that the response matches the expected set of records. + A random selection using multiple component mask bits is also performed. + +5.4 Cluster testing: + +Cluster testing is usually run before a distribution release. It +involves real hardware setups of 16 to 32 nodes (or more if a beta site +is available). Each test is validated by running all-to-all ping through the IB +interface. The test procedure includes: + +* Cluster bringup + +* Hand-off between 2 or 3 SM's while performing: + - Node reboots + - Switch power cycles (disconnecting the SM's) + +* Unresponsive port detection and recovery + +* osmtest from multiple nodes + +* Trap injection and recovery + + +6 Qualification +---------------- + +Table 2 - Qualified IB Stacks +============================= + +Stack | Version +-----------------------------------------|-------------------------- +OFED | 1.1 +OFED | 1.0 +OpenIB Gen2 (IBG2 distribution) | 1.0 +OpenIB Gen1 (IBGD distribution) | 1.8.0 +VAPI (Mellanox InfiniBand HCA Driver) | 3.2 and later + +Table 3 - Qualified Devices and Corresponding Firmware +====================================================== + +Mellanox +Device | FW versions +--------|----------------------------------------------------------- +MT43132 | InfiniScale - fw-43132 5.2.0 (and later) +MT47396 | InfiniScale III - fw-47396 0.5.0 (and later) +MT23108 | InfiniHost - fw-23108 3.3.2 (and later) +MT25204 | InfiniHost III Lx - fw-25204 1.0.1i (and later) +MT25208 | InfiniHost III Ex (InfiniHost Mode) - fw-25208 4.6.2 (and later) +MT25208 | InfiniHost III Ex (MemFree Mode) - fw-25218 5.0.1 (and later) + +QLogic/PathScale +Device | Note +--------|----------------------------------------------------------- +iPath | QHT6040 (PathScale InfiniPath HT-460) +iPath | QHT6140 (PathScale InfiniPath HT-465) +iPath | QLE6140 (PathScale InfiniPath PE-880) + +Note: OpenSM does not run on an IBM Galaxy (eHCA) as it does not expose +QP0 and QP1. However, it does support it as a device on the subnet. + diff --git a/branches/Ndi/ulp/opensm/user/doc/qos-config.txt b/branches/Ndi/ulp/opensm/user/doc/qos-config.txt new file mode 100644 index 00000000..c90e6f7b --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/doc/qos-config.txt @@ -0,0 +1,45 @@ +Trivial low level QoS configuration proposition +=============================================== + +Basically there is a set of QoS related low-level configuration parameters. +All these parameter names are prefixed by "qos_" string. Here is a full +list of these parameters: + + qos_max_vls - The maximum number of VLs that will be on the subnet + qos_high_limit - The limit of High Priority component of VL Arbitration + table (IBA 7.6.9) + qos_vlarb_low - High priority VL Arbitration table (IBA 7.6.9) template + qos_vlarb_high - Low priority VL Arbitration table (IBA 7.6.9) template + Both VL arbitration templates are pairs of VL and weight + qos_sl2vl - SL2VL Mapping table (IBA 7.6.6) template. It is a list + of VLs corresponding to SLs 0-15 (Note the VL15 used + here means drop this SL) + +Typical default values (hard-coded in OpenSM initialization) are: + + qos_max_vls=15 + qos_high_limit=0 + qos_vlarb_low=0:0,1:4,2:4,3:4,4:4,5:4,6:4,7:4,8:4,9:4,10:4,11:4,12:4,13:4,14:4 + qos_vlarb_high=0:4,1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0,13:0,14:0 + qos_sl2vl=0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,7 + +The syntax is compatible with rest of OpenSM configuration options and +values may be stored in OpenSM config file (cached options file). + +In addition to the above, we may define separate QoS configuration +parameters sets for various target types. As targets, we currently support +CAs, routers, switch external ports, and switch's enhanced port 0. The +names of such specialized parameters are prefixed by "qos__" +string. Here is a full list of the currently supported sets: + + qos_ca_ - QoS configuration parameters set for CAs. + qos_rtr_ - parameters set for routers. + qos_sw0_ - parameters set for switches' port 0. + qos_swe_ - parameters set for switches' external ports. + +Examples: + + qos_sw0_max_vls=2 + qos_ca_sl2vl=0,1,2,3,5,5,5,12,12,0, + qos_swe_high_limit=0 + diff --git a/branches/Ndi/ulp/opensm/user/ibtrapgen/Makefile b/branches/Ndi/ulp/opensm/user/ibtrapgen/Makefile new file mode 100644 index 00000000..9c985f57 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/ibtrapgen/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/Ndi/ulp/opensm/user/ibtrapgen/SOURCES b/branches/Ndi/ulp/opensm/user/ibtrapgen/SOURCES new file mode 100644 index 00000000..54e5501c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/ibtrapgen/SOURCES @@ -0,0 +1,67 @@ +!if $(FREEBUILD) +TARGETNAME=ibtrapgen +!else +TARGETNAME=ibtrapgend +!endif + +!if !defined(WINIBHOME) +WINIBHOME=..\..\..\.. +!endif + +LIBPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + +!if defined(OSM_TARGET) +TARGETPATH=$(OSM_TARGET)\bin\user\obj$(BUILD_ALT_DIR) +!else +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) +!endif + +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 +OVR_DIR=..\addon + + +SOURCES=\ + main.c \ + ibtrapgen.c + + + +OSM_HOME=.. + +TARGETLIBS=\ +!if $(FREEBUILD) + $(LIBPATH)\*\ibal.lib \ + $(LIBPATH)\*\complib.lib \ + $(TARGETPATH)\*\osmv_ibal.lib \ + $(TARGETPATH)\*\opensm_ibal.lib \ + $(CRT_LIB_PATH)\msvcrt.lib + +!else + $(LIBPATH)\*\ibald.lib \ + $(LIBPATH)\*\complibd.lib \ + $(TARGETPATH)\*\osmv_ibald.lib \ + $(TARGETPATH)\*\opensm_ibald.lib \ + $(CRT_LIB_PATH)\msvcrt.lib +!endif + +#DO NOT TOUCH the order of search path , until ib_types.h merging process will be done +INCLUDES= \ + $(OSM_HOME)\include; \ + $(OSM_HOME); \ + $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; + +# Could be any special flag needed for this project +USER_C_FLAGS=$(USER_C_FLAGS) /MD +#Add preproccessor definitions +C_DEFINES=$(C_DEFINES) -DWIN32 -D__WIN__ -D__i386__ -Dinline=__inline -DMT_LITTLE_ENDIAN -DOSM_VENDOR_INTF_AL +!if !$(FREEBUILD) +#C_DEFINES=$(C_DEFINES) -D_DEBUG -DDEBUG -DDBG +C_DEFINES=$(C_DEFINES) +!endif + +LINKER_FLAGS= $(LINKER_FLAGS) +MSC_WARNING_LEVEL= /W3 + diff --git a/branches/Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.c b/branches/Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.c new file mode 100644 index 00000000..bd378821 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.c @@ -0,0 +1,442 @@ +/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + This software program is available to you under a choice of one of two + licenses. You may choose to be licensed under either the GNU General Public + License (GPL) Version 2, June 1991, available at + http://www.fsf.org/copyleft/gpl.html, or the Intel BSD + Patent License, + the text of which follows: + + "Recipient" has requested a license and Intel Corporation ("Intel") + is willing to grant a license for the software entitled + InfiniBand(tm) System Software (the "Software") being provided by + Intel Corporation. + + The following definitions apply to this License: + + "Licensed Patents" means patent claims licensable by Intel Corporation which + are necessarily infringed by the use or sale of the Software alone or when + combined with the operating system referred to below. + + "Recipient" means the party to whom Intel delivers this Software. + "Licensee" means Recipient and those third parties that receive a license to + any operating system available under the GNU Public License version 2.0 or + later. + + Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + + The license is provided to Recipient and Recipient's Licensees under the + following terms. + + Redistribution and use in source and binary forms of the Software, with or + without modification, are permitted provided that the following + conditions are met: + Redistributions of source code of the Software may retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form of the Software may reproduce the above + copyright notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + Neither the name of Intel Corporation nor the names of its contributors shall + be used to endorse or promote products derived from this Software without + specific prior written permission. + + Intel hereby grants Recipient and Licensees a non-exclusive, worldwide, + royalty-free patent license under Licensed Patents to make, use, sell, offer + to sell, import and otherwise transfer the Software, if any, in source code + and object code form. This license shall include changes to the Software that + are error corrections or other minor changes to the Software that do not add + functionality or features when the Software is incorporated in any version of + a operating system that has been distributed under the GNU General Public + License 2.0 or later. This patent license shall apply to the combination of + the Software and any operating system licensed under the GNU Public License + version 2.0 or later if, at the time Intel provides the Software to + Recipient, such addition of the Software to the then publicly + available versions of such operating system available under the GNU + Public License version 2.0 or later (whether in gold, beta or alpha + form) causes such combination to be covered by the Licensed + Patents. The patent license shall not apply to any other + combinations which include the Software. No hardware per se is + licensed hereunder. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR ITS CONTRIBUTORS + BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, + OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + --------------------------------------------------------------------------*/ + +/* + * Abstract: + * Implementation of ibtrapgen_t. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + +#include +#include +#include +#include +#include +#include +#include +#include "ibtrapgen.h" + +#define GUID_ARRAY_SIZE 64 + +/********************************************************************** + **********************************************************************/ +/* + This function initializes the main object, the log and the Osm Vendor +*/ +ib_api_status_t +ibtrapgen_init( IN ibtrapgen_t * const p_ibtrapgen, + IN ibtrapgen_opt_t * const p_opt, + IN const osm_log_level_t log_flags + ) +{ + ib_api_status_t status; + + /* just making sure - cleanup the static global obj */ + cl_memclr( p_ibtrapgen, sizeof( *p_ibtrapgen ) ); + + /* construct and init the log */ + p_ibtrapgen->p_log = (osm_log_t *)cl_malloc(sizeof(osm_log_t)); + osm_log_construct( p_ibtrapgen->p_log ); + status = osm_log_init( p_ibtrapgen->p_log, p_opt->force_log_flush, + 0x0001, p_opt->log_file,FALSE ); + if( status != IB_SUCCESS ) + return ( status ); + + osm_log_set_level( p_ibtrapgen->p_log, log_flags ); + + /* finaly can declare we are here ... */ + osm_log( p_ibtrapgen->p_log, OSM_LOG_FUNCS, + "ibtrapgen_init: [\n" ); + + /* assign all the opts */ + p_ibtrapgen->p_opt = p_opt; + + /* initialize the osm vendor service object */ + p_ibtrapgen->p_vendor = osm_vendor_new( p_ibtrapgen->p_log, + p_opt->transaction_timeout ); + + if( p_ibtrapgen->p_vendor == NULL ) + { + status = IB_INSUFFICIENT_RESOURCES; + osm_log( p_ibtrapgen->p_log, OSM_LOG_ERROR, + "ibtrapgen_init: ERR 0001: " + "Unable to allocate vendor object" ); + goto Exit; + } + + /* all mads (actually wrappers) are taken and returned to a pool */ + osm_mad_pool_construct( &p_ibtrapgen->mad_pool ); + status = osm_mad_pool_init( + &p_ibtrapgen->mad_pool, p_ibtrapgen->p_log ); + if( status != IB_SUCCESS ) + goto Exit; + + Exit: + osm_log( p_ibtrapgen->p_log, OSM_LOG_FUNCS, + "ibtrapgen_init: ]\n" ); + return ( status ); +} + +/****f* opensm: SM/__ibtrapgen_rcv_callback + * NAME + * __osm_sm_mad_ctrl_rcv_callback + * + * DESCRIPTION + * This is the callback from the transport layer for received MADs. + * + * SYNOPSIS + */ +void +__ibtrapgen_rcv_callback( + IN osm_madw_t *p_madw, + IN void *bind_context, + IN osm_madw_t *p_req_madw ) +{ + ibtrapgen_t* p_ibtrapgen = (ibtrapgen_t*)bind_context; + + OSM_LOG_ENTER( p_ibtrapgen->p_log, __ibtrapgen_rcv_callback ); + + CL_ASSERT( p_madw ); + + osm_log( p_ibtrapgen->p_log, OSM_LOG_VERBOSE, + "__ibtrapgen_rcv_callback: Got callback\n", + cl_ntoh64(p_madw->p_mad->trans_id) ); + + OSM_LOG_EXIT( p_ibtrapgen->p_log ); +} + +/****f* opensm: SM/__ibtrapgen_send_err_cb + * NAME + * __ibtrapgen_send_err_cb + * + * DESCRIPTION + * This is the callback from the transport layer for received MADs. + * + * SYNOPSIS + */ +void +__ibtrapgen_send_err_cb( + IN void *bind_context, + IN osm_madw_t *p_madw ) +{ + ibtrapgen_t* p_ibtrapgen = (ibtrapgen_t*)bind_context; + + OSM_LOG_ENTER( p_ibtrapgen->p_log, __ibtrapgen_send_err_callback ); + + osm_log( p_ibtrapgen->p_log, OSM_LOG_ERROR, + "__ibtrapgen_send_err_cb: ERR 0011: " + "MAD completed in error (%s).\n", + ib_get_err_str( p_madw->status ) ); + + CL_ASSERT( p_madw ); + + osm_log( p_ibtrapgen->p_log, OSM_LOG_ERROR, + "__ibtrapgen_send_err_cb: ERR 0012: " + "We shouldn't be here!! TID:0x%016" PRIx64 ".\n", + cl_ntoh64(p_madw->p_mad->trans_id) ); + OSM_LOG_EXIT( p_ibtrapgen->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +ibtrapgen_bind( IN ibtrapgen_t * p_ibtrapgen ) +{ + ib_api_status_t status; + uint32_t num_ports = GUID_ARRAY_SIZE; + ib_port_attr_t attr_array[GUID_ARRAY_SIZE]; + osm_bind_info_t bind_info; + uint8_t i; + + OSM_LOG_ENTER( p_ibtrapgen->p_log, ibtrapgen_bind ); + + /* + * Call the transport layer for a list of local port + * GUID values. + */ + status = osm_vendor_get_all_port_attr( p_ibtrapgen->p_vendor, + attr_array, &num_ports ); + if ( status != IB_SUCCESS ) + { + osm_log( p_ibtrapgen->p_log, OSM_LOG_ERROR, + "ibtrapgen_bind: ERR 0002: " + "Failure getting local port attributes (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* make sure the requested port exists */ + if ( p_ibtrapgen->p_opt->port_num > num_ports ) + { + osm_log( p_ibtrapgen->p_log, OSM_LOG_ERROR, + "ibtrapgen_bind: ERR 0003: " + "Given port number out of range %u > %u\n", + p_ibtrapgen->p_opt->port_num , num_ports ); + status = IB_NOT_FOUND; + goto Exit; + } + + for ( i = 0 ; i < num_ports ; i++ ) + { + osm_log(p_ibtrapgen->p_log, OSM_LOG_INFO, + "ibtrapgen_bind: Found port number:%u " + " with GUID:0x%016" PRIx64 "\n", + i, cl_ntoh64(attr_array[i].port_guid) ); + } + /* check if the port is active */ +/* if (attr_array[p_ibtrapgen->p_opt->port_num - 1].link_state < 4) */ +/* { */ +/* osm_log( p_ibtrapgen->p_log, OSM_LOG_ERROR, */ +/* "ibtrapgen_bind: ERR 0004: " */ +/* "Given port number link state is not active: %s.\n", */ +/* ib_get_port_state_str( */ +/* attr_array[p_ibtrapgen->p_opt->port_num - 1].link_state ) */ +/* ); */ +/* status = IB_NOT_FOUND; */ +/* goto Exit; */ +/* } */ + + p_ibtrapgen->port_guid = attr_array[p_ibtrapgen->p_opt->port_num - 1].port_guid; + /* save sm_lid as we need it when sending the Trap (dest lid)*/ + p_ibtrapgen->p_opt->sm_lid = attr_array[p_ibtrapgen->p_opt->port_num - 1].sm_lid; + + osm_log(p_ibtrapgen->p_log, OSM_LOG_DEBUG, + "ibtrapgen_bind: Port Num:%u " + "GUID:0x%016"PRIx64"\n", + p_ibtrapgen->p_opt->port_num, + p_ibtrapgen->port_guid ); + + /* ok finaly bind the sa interface to this port */ + /* TODO - BIND LIKE THE osm_sm_mad_ctrl does */ + bind_info.class_version = 1; + bind_info.is_report_processor = TRUE; + bind_info.is_responder = TRUE; + bind_info.is_trap_processor = TRUE; + bind_info.mad_class = IB_MCLASS_SUBN_LID; + bind_info.port_guid = p_ibtrapgen->port_guid; + bind_info.recv_q_size = OSM_SM_DEFAULT_QP0_RCV_SIZE; + bind_info.send_q_size = OSM_SM_DEFAULT_QP0_SEND_SIZE; + + osm_log(p_ibtrapgen->p_log, OSM_LOG_DEBUG, + "ibtrapgen_bind: Trying to bind to GUID:0x%016"PRIx64"\n", + bind_info.port_guid ); + + p_ibtrapgen->h_bind = osm_vendor_bind( p_ibtrapgen->p_vendor, + &bind_info, + &p_ibtrapgen->mad_pool, + __ibtrapgen_rcv_callback, + __ibtrapgen_send_err_cb, + p_ibtrapgen ); + + if( p_ibtrapgen->h_bind == OSM_BIND_INVALID_HANDLE ) + { + osm_log( p_ibtrapgen->p_log, OSM_LOG_ERROR, + "ibtrapgen_bind: ERR 0005: " + "Unable to bind to SA\n" ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_ibtrapgen->p_log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +void +ibtrapgen_destroy( IN ibtrapgen_t * p_ibtrapgen ) +{ + if( p_ibtrapgen->p_vendor ) + { + osm_vendor_delete( &p_ibtrapgen->p_vendor ); + } + + osm_log_destroy( p_ibtrapgen->p_log ); + cl_free( p_ibtrapgen->p_log ); +} + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +ibtrapgen_run( IN ibtrapgen_t * const p_ibtrapgen ) +{ + osm_madw_t* p_report_madw; + ib_mad_notice_attr_t* p_report_ntc; + ib_mad_t* p_mad; + ib_smp_t* p_smp_mad; + osm_mad_addr_t mad_addr; + static atomic32_t trap_fwd_trans_id = 0x02DAB000; + ib_api_status_t status; + osm_log_t *p_log = p_ibtrapgen->p_log; + uint16_t i; + + OSM_LOG_ENTER( p_log, ibtrapgen_run ); + + osm_log( p_log, OSM_LOG_INFO, + "ibtrapgen_run: " + "Sending trap:%u from LID:0x%X %u times\n", + p_ibtrapgen->p_opt->trap_num, + p_ibtrapgen->p_opt->lid, + p_ibtrapgen->p_opt->number ); + + printf("-V- SM lid is : 0x%04X\n",cl_ntoh16(p_ibtrapgen->p_opt->sm_lid)); + mad_addr.dest_lid = (p_ibtrapgen->p_opt->sm_lid); + /* ??? - what is path_bits? What should be the value here?? */ + mad_addr.path_bits = 0; + /* ??? - what is static_rate? What should be the value here?? */ + mad_addr.static_rate = 0; + + mad_addr.addr_type.smi.source_lid = cl_hton16(p_ibtrapgen->p_opt->lid); + mad_addr.addr_type.smi.port_num = p_ibtrapgen->p_opt->src_port; + + for (i = 1 ; i <= p_ibtrapgen->p_opt->number ; i++ ) + { + p_report_madw = osm_mad_pool_get( &p_ibtrapgen->mad_pool, + p_ibtrapgen->h_bind, + MAD_BLOCK_SIZE, + &mad_addr ); + + if( !p_report_madw ) + { + osm_log(p_log, OSM_LOG_ERROR, + "ibtrapgen_run: ERR 00020: " + "osm_mad_pool_get failed.\n" ); + status = IB_ERROR; + goto Exit; + } + + p_report_madw->resp_expected = FALSE; + + /* advance trap trans id (cant simply ++ on some systems inside ntoh) */ + p_mad = osm_madw_get_mad_ptr( p_report_madw ); + ib_mad_init_new(p_mad, + IB_MCLASS_SUBN_LID, + 1, + IB_MAD_METHOD_TRAP, + cl_hton64( (uint64_t)cl_atomic_inc( &trap_fwd_trans_id ) ), + IB_MAD_ATTR_NOTICE, + 0); + + p_smp_mad = osm_madw_get_smp_ptr( p_report_madw ); + + /* The payload is analyzed as mad notice attribute */ + p_report_ntc = (ib_mad_notice_attr_t*)(ib_smp_get_payload_ptr(p_smp_mad)); + + cl_memclr( p_report_ntc, sizeof(*p_report_ntc) ); + p_report_ntc->generic_type = 0x83; /* is generic subn mgt type */ + ib_notice_set_prod_type(p_report_ntc, 2); /* A switch generator */ + p_report_ntc->g_or_v.generic.trap_num = cl_hton16(p_ibtrapgen->p_opt->trap_num); + p_report_ntc->issuer_lid = cl_hton16(p_ibtrapgen->p_opt->lid); + if (p_ibtrapgen->p_opt->trap_num == 128) + { + p_report_ntc->data_details.ntc_128.sw_lid = cl_hton16(p_ibtrapgen->p_opt->lid); + } + else + { + p_report_ntc->data_details.ntc_129_131.lid = + cl_hton16(p_ibtrapgen->p_opt->lid); + p_report_ntc->data_details.ntc_129_131.port_num = + p_ibtrapgen->p_opt->src_port; + } + + status = osm_vendor_send(p_report_madw->h_bind, p_report_madw, FALSE ); + if (status != IB_SUCCESS) + { + osm_log(p_log, OSM_LOG_ERROR, + "ibtrapgen_run: ERR 0021: " + "osm_vendor_send. status = %s\n", + ib_get_err_str(status)); + goto Exit; + } + osm_log(p_log, OSM_LOG_INFO, + "ibtrapgen_run: " + "Sent trap number:%u out of:%u\n", + i, + p_ibtrapgen->p_opt->number ); + /* sleep according to rate time. The usleep is in usec - need to revert + the milisecs to usecs. */ + usleep(p_ibtrapgen->p_opt->rate*1000); + } + + Exit: + // sleep(3); + OSM_LOG_EXIT( p_log ); + return(status); +} diff --git a/branches/Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.h b/branches/Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.h new file mode 100644 index 00000000..6fe54862 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/ibtrapgen/ibtrapgen.h @@ -0,0 +1,313 @@ +/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + This software program is available to you under a choice of one of two + licenses. You may choose to be licensed under either the GNU General Public + License (GPL) Version 2, June 1991, available at + http://www.fsf.org/copyleft/gpl.html, or the Intel BSD + Patent License, + the text of which follows: + + "Recipient" has requested a license and Intel Corporation ("Intel") + is willing to grant a license for the software entitled + InfiniBand(tm) System Software (the "Software") being provided by + Intel Corporation. + + The following definitions apply to this License: + + "Licensed Patents" means patent claims licensable by Intel Corporation which + are necessarily infringed by the use or sale of the Software alone or when + combined with the operating system referred to below. + + "Recipient" means the party to whom Intel delivers this Software. + "Licensee" means Recipient and those third parties that receive a license to + any operating system available under the GNU Public License version 2.0 or + later. + + Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + + The license is provided to Recipient and Recipient's Licensees under the + following terms. + + Redistribution and use in source and binary forms of the Software, with or + without modification, are permitted provided that the following + conditions are met: + Redistributions of source code of the Software may retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form of the Software may reproduce the above + copyright notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + Neither the name of Intel Corporation nor the names of its contributors shall + be used to endorse or promote products derived from this Software without + specific prior written permission. + + Intel hereby grants Recipient and Licensees a non-exclusive, worldwide, + royalty-free patent license under Licensed Patents to make, use, sell, offer + to sell, import and otherwise transfer the Software, if any, in source code + and object code form. This license shall include changes to the Software that + are error corrections or other minor changes to the Software that do not add + functionality or features when the Software is incorporated in any version of + a operating system that has been distributed under the GNU General Public + License 2.0 or later. This patent license shall apply to the combination of + the Software and any operating system licensed under the GNU Public License + version 2.0 or later if, at the time Intel provides the Software to + Recipient, such addition of the Software to the then publicly + available versions of such operating system available under the GNU + Public License version 2.0 or later (whether in gold, beta or alpha + form) causes such combination to be covered by the Licensed + Patents. The patent license shall not apply to any other + combinations which include the Software. No hardware per se is + licensed hereunder. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR ITS CONTRIBUTORS + BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, + OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + --------------------------------------------------------------------------*/ + + +/* + * Abstract: + * Declaration of ibtrapgen_t. + * This object represents the ibtrapgen object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.1 $ + */ + +#ifndef _IBTRAPGEN_H_ +#define _IBTRAPGEN_H_ + +#include +#include +#include +#include +#include +#include + +/****h* Trap_Generator_App/Ibtrapgen + * NAME + * Ibtrapgen + * + * DESCRIPTION + * The Ibtrapgen object create/join and leave multicast group. + * + * AUTHOR + * Yael Kalka, Mellanox. + * + *********/ + +/****s* Trap_Generator_App/ibtrapgen_opt_t + * NAME + * ibtrapgen_opt_t + * + * DESCRIPTION + * Ibtrapgen options structure. This structure contains the various + * specific configuration parameters for ibtrapgen. + * + * SYNOPSYS + */ +typedef struct _ibtrapgen_opt +{ + uint8_t trap_num; + uint16_t number; + uint16_t rate; + uint16_t lid; + uint16_t sm_lid; + uint8_t src_port; + uint8_t port_num; + uint32_t transaction_timeout; + boolean_t force_log_flush; + char *log_file; +} ibtrapgen_opt_t; +/* + * FIELDS + * + * trap_num + * Trap number to generate. + * + * number + * Number of times trap should be generated. + * + * rate + * Rate of trap generation (in miliseconds) + * + * lid + * Lid from which the trap should be generated. + * + * src_port + * Source port from which the trap should be generated. + * + * port_num + * Port num used for communicating with the SA. + * + * SEE ALSO + *********/ + +/****s* Trap Generator App/ibtrapgen_t + * NAME + * ibtrapgen_t + * + * DESCRIPTION + * Ibtrapgen structure. + * + * This object should be treated as opaque and should + * be manipulated only through the provided functions. + * + * SYNOPSYS + */ +typedef struct _ibtrapgen +{ + osm_log_t *p_log; + struct _osm_vendor *p_vendor; + osm_bind_handle_t h_bind; + osm_mad_pool_t mad_pool; + + ibtrapgen_opt_t *p_opt; + ib_net64_t port_guid; +} ibtrapgen_t; +/* + * FIELDS + * p_log + * Log facility used by all Ibtrapgen components. + * + * p_vendor + * Pointer to the vendor transport layer. + * + * h_bind + * The bind handle obtained by osm_vendor_sa_api/osmv_bind_sa + * + * mad_pool + * The mad pool provided for teh vendor layer to allocate mad wrappers in + * + * p_opt + * ibtrapgen options structure + * + * guid + * guid for the port over which ibtrapgen is running. + * + * SEE ALSO + *********/ + +/****f* Trap_Generator_App/ibtrapgen_destroy + * NAME + * ibtrapgen_destroy + * + * DESCRIPTION + * The ibtrapgen_destroy function destroys an ibtrapgen object, releasing + * all resources. + * + * SYNOPSIS + */ +void ibtrapgen_destroy( IN ibtrapgen_t * p_ibtrapgen ); + +/* + * PARAMETERS + * p_ibtrapgen + * [in] Pointer to a Trap_Generator_App object to destroy. + * + * RETURN VALUE + * This function does not return a value. + * + * NOTES + * Performs any necessary cleanup of the specified Trap_Generator_App object. + * Further operations should not be attempted on the destroyed object. + * This function should only be called after a call to ibtrapgen_init. + * + * SEE ALSO + * ibtrapgen_init + *********/ + +/****f* Trap_Generator_App/ibtrapgen_init + * NAME + * ibtrapgen_init + * + * DESCRIPTION + * The ibtrapgen_init function initializes a Trap_Generator_App object for use. + * + * SYNOPSIS + */ +ib_api_status_t ibtrapgen_init( IN ibtrapgen_t * const p_ibtrapgen, + IN ibtrapgen_opt_t * const p_opt, + IN const osm_log_level_t log_flags + ); + +/* + * PARAMETERS + * p_ibtrapgen + * [in] Pointer to an ibtrapgen_t object to initialize. + * + * p_opt + * [in] Pointer to the options structure. + * + * log_flags + * [in] Log level flags to set. + * + * RETURN VALUES + * IB_SUCCESS if the Trap_Generator_App object was initialized successfully. + * + * NOTES + * Allows calling other Trap_Generator_App methods. + * + * SEE ALSO + * ibtrapgen object, ibtrapgen_construct, ibtrapgen_destroy + *********/ + + +/****f* Trap_Generator_App/ibtrapgen_bind + * NAME + * ibtrapgen_bind + * + * DESCRIPTION + * Binds ibtrapgen to a local port. + * + * SYNOPSIS + */ +ib_api_status_t ibtrapgen_bind( IN ibtrapgen_t * p_ibtrapgen ); +/* + * PARAMETERS + * p_ibtrapgen + * [in] Pointer to an ibtrapgen_t object. + * + * RETURN VALUES + * IB_SUCCESS if OK + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* Trap_Generator_App/ibtrapgen_run + * NAME + * ibtrapgen_run + * + * DESCRIPTION + * Runs the ibtrapgen flow: Creation of traps. + * + * SYNOPSIS + */ +ib_api_status_t ibtrapgen_run( IN ibtrapgen_t * const p_ibtrapgen ); + +/* + * PARAMETERS + * p_ibtrapgen + * [in] Pointer to an ibtrapgen_t object. + * + * RETURN VALUES + * IB_SUCCESS on success + * + * NOTES + * + * SEE ALSO + *********/ + +#endif /* */ diff --git a/branches/Ndi/ulp/opensm/user/ibtrapgen/main.c b/branches/Ndi/ulp/opensm/user/ibtrapgen/main.c new file mode 100644 index 00000000..f7f1de7d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/ibtrapgen/main.c @@ -0,0 +1,465 @@ +/*++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ + This software program is available to you under a choice of one of two + licenses. You may choose to be licensed under either the GNU General Public + License (GPL) Version 2, June 1991, available at + http://www.fsf.org/copyleft/gpl.html, or the Intel BSD + Patent License, + the text of which follows: + + "Recipient" has requested a license and Intel Corporation ("Intel") + is willing to grant a license for the software entitled + InfiniBand(tm) System Software (the "Software") being provided by + Intel Corporation. + + The following definitions apply to this License: + + "Licensed Patents" means patent claims licensable by Intel Corporation which + are necessarily infringed by the use or sale of the Software alone or when + combined with the operating system referred to below. + + "Recipient" means the party to whom Intel delivers this Software. + "Licensee" means Recipient and those third parties that receive a license to + any operating system available under the GNU Public License version 2.0 or + later. + + Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + + The license is provided to Recipient and Recipient's Licensees under the + following terms. + + Redistribution and use in source and binary forms of the Software, with or + without modification, are permitted provided that the following + conditions are met: + Redistributions of source code of the Software may retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form of the Software may reproduce the above + copyright notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + + Neither the name of Intel Corporation nor the names of its contributors shall + be used to endorse or promote products derived from this Software without + specific prior written permission. + + Intel hereby grants Recipient and Licensees a non-exclusive, worldwide, + royalty-free patent license under Licensed Patents to make, use, sell, offer + to sell, import and otherwise transfer the Software, if any, in source code + and object code form. This license shall include changes to the Software that + are error corrections or other minor changes to the Software that do not add + functionality or features when the Software is incorporated in any version of + a operating system that has been distributed under the GNU General Public + License 2.0 or later. This patent license shall apply to the combination of + the Software and any operating system licensed under the GNU Public License + version 2.0 or later if, at the time Intel provides the Software to + Recipient, such addition of the Software to the then publicly + available versions of such operating system available under the GNU + Public License version 2.0 or later (whether in gold, beta or alpha + form) causes such combination to be covered by the Licensed + Patents. The patent license shall not apply to any other + combinations which include the Software. No hardware per se is + licensed hereunder. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL OR ITS CONTRIBUTORS + BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, + OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT + OF SUBSTITUTE GOODS OR SERVICES; + LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + POSSIBILITY OF SUCH DAMAGE. + --------------------------------------------------------------------------*/ + + +/* + * Abstract: + * Command line interface for ibtrapgen. + * Parse and fill in the options and call the actual code. + * Implemented in ibtrapgen: + * Initialize the ibmgrp object (and log) + * Bind ibmgrp to the requested IB port. + * Run the actual command + * + * Environment: + * Linux User Mode + * + * $Revision: 1.1 $ + */ + +#include +#include +#ifndef __WIN__ +#include +#endif +#include +#include +#include "ibtrapgen.h" + +#define DEFAULT_RETRY_COUNT 3 +#define DEFAULT_TRANS_TIMEOUT_MILLISEC 1000 + +/********************************************************************** + **********************************************************************/ +boolean_t +ibtrapgen_is_debug() +{ +#if defined( _DEBUG_ ) + return TRUE; +#else + return FALSE; +#endif /* defined( _DEBUG_ ) */ +} + +/********************************************************************** + **********************************************************************/ +void show_usage(void); + +void +show_usage( ) +{ + printf( "\n------- ibtrapgen - Usage and options ----------------------\n" ); + printf( "Usage: one of the following optional flows:\n" ); + printf(" ibtrapgen -t|--trap_num -n|--number \n" + " -r|--rate -l|--lid \n" + " -s|--src_port -p|--port_num \n" ); + printf( "\nOptions:\n" ); + printf( "-t \n" + "--trap_num \n" + " This option specifies the number of the trap to generate.\n" + " Valid values are 128-131.\n" ); + printf( "-n \n" + "--number \n" + " This option specifies the number of times to generate this trap.\n" + " If not specified - default to 1.\n" ); + printf( "-r \n" + "--rate \n" + " This option specifies the rate of the trap generation.\n" + " What is the time period between one generation and another?\n" + " The value is given in miliseconds. \n" + " If the number of trap creations is 1 - this value is ignored.\n" ); + printf( "-l \n" + "--lid \n" + " This option specifies the lid address from where the trap should\n" + " be generated.\n" ); + printf( "-s \n" + "--src_port \n" + " This option specifies the port number from which the trap should\n" + " be generated. If trap number is 128 - this value is ignored (since\n" + " trap 128 is not sent with a specific port number)\n" ); + printf( "-p \n" + "--port_num \n" + " This is the port number used for communicating with\n" + " the SA.\n" ); + printf( "-h\n" + "--help\n" " Display this usage info then exit.\n\n" ); + printf( "-o\n" + "--out_log_file\n" + " This option defines the log to be the given file.\n" + " By default the log goes to stdout.\n\n"); + printf( "-v\n" + " This option increases the log verbosity level.\n" + " The -v option may be specified multiple times\n" + " to further increase the verbosity level.\n" + " See the -vf option for more information about.\n" + " log verbosity.\n\n" ); + printf( "-V\n" + " This option sets the maximum verbosity level and\n" + " forces log flushing.\n" + " The -V is equivalent to '-vf 0xFF -d 2'.\n" + " See the -vf option for more information about.\n" + " log verbosity.\n\n" ); + printf( "-x \n" + " This option sets the log verbosity level.\n" + " A flags field must follow the -vf option.\n" + " A bit set/clear in the flags enables/disables a\n" + " specific log level as follows:\n" + " BIT LOG LEVEL ENABLED\n" + " ---- -----------------\n" + " 0x01 - ERROR (error messages)\n" + " 0x02 - INFO (basic messages, low volume)\n" + " 0x04 - VERBOSE (interesting stuff, moderate volume)\n" + " 0x08 - DEBUG (diagnostic, high volume)\n" + " 0x10 - FUNCS (function entry/exit, very high volume)\n" + " 0x20 - FRAMES (dumps all SMP and GMP frames)\n" + " 0x40 - currently unused.\n" + " 0x80 - currently unused.\n" + " Without -x, ibtrapgen defaults to ERROR + INFO (0x3).\n" + " Specifying -x 0 disables all messages.\n" + " Specifying -x 0xFF enables all messages (see -V).\n\n" ); +} + +/********************************************************************** + **********************************************************************/ +/* + Converts a GID string of the format 0xPPPPPPPPPPPPPPPP:GGGGGGGGGGGGGGGG + to a gid type +*/ +int +str2gid( + IN char *str, + OUT ib_gid_t *p_gid + ); + +int +str2gid( + IN char *str, + OUT ib_gid_t *p_gid + ) +{ + ib_gid_t temp; + char buf[38]; + char *p_prefix, *p_guid; + + CL_ASSERT(p_gid); + + strcpy(buf, str); + p_prefix = buf; + /*p_guid = index(buf, ':');*/ + p_guid = strchr( buf, ':' ); + + if (! p_guid) + { + printf("Wrong format for gid %s\n", buf); + return 1; + } + + *p_guid = '\0'; + p_guid++; + + errno = 0; + temp.unicast.prefix = cl_hton64(strtoull(p_prefix, NULL, 0)); + if (errno) { + printf("Wrong format for gid prefix:%s (got %u)\n", + p_prefix, errno); + return 1; + } + + temp.unicast.interface_id = cl_hton64(strtoull(p_guid, NULL, 16)); + if (errno) { + printf("Wrong format for gid guid:%s\n", p_guid); + return 1; + } + + *p_gid = temp; + return 0; +} +void OsmReportState(IN const char *p_str) +{ +} +/********************************************************************** + **********************************************************************/ +int OSM_CDECL +main( int argc, + char *argv[] ) +{ + static ibtrapgen_t ibtrapgen; + ibtrapgen_opt_t opt = { 0 }; + ib_api_status_t status; + uint32_t log_flags = OSM_LOG_ERROR | OSM_LOG_INFO; + uint32_t next_option; + const char *const short_option = "t:n:r:s:l:p:o:vVh"; + + /* + * In the array below, the 2nd parameter specified the number + * of arguments as follows: + * 0: no arguments + * 1: argument + * 2: optional + */ + const struct option long_option[] = { + {"trap_num", 1, NULL, 't'}, + {"number", 1, NULL, 'n'}, + {"rate", 1, NULL, 'r'}, + {"lid", 1, NULL, 'l'}, + {"src_port", 1, NULL, 's'}, + {"port_num", 1, NULL, 'p'}, + {"help", 0, NULL, 'h'}, + {"verbose", 0, NULL, 'v'}, + {"out_log_file", 1, NULL, 'o'}, + {"vf", 1, NULL, 'x'}, + {"V", 0, NULL, 'V'}, + + {NULL, 0, NULL, 0} /* Required at end of array */ + }; + + + opt.trap_num = 0; + opt.number = 1; /* This is the default value */ + opt.rate = 0; + opt.lid = 0; + opt.src_port = 0; + opt.port_num = 0; + opt.log_file = NULL; + opt.force_log_flush = FALSE; + opt.transaction_timeout = DEFAULT_TRANS_TIMEOUT_MILLISEC; + + do + { + next_option = getopt_long_only( argc, argv, short_option, + long_option, NULL ); + + switch ( next_option ) + { + case 't': + /* + * Define the trap number + */ + opt.trap_num = (uint8_t)atoi( optarg ); + if ((opt.trap_num < 128) || (opt.trap_num > 131)) + { + printf( "-E- Given trap number is illegal! \n" + " Supportes generation of traps 128-131.\n" ); + exit(1); + } + printf( "-I- Trap Number = %u\n", opt.trap_num ); + break; + + case 'n': + /* + * Define the number of occurences + */ + opt.number = (uint16_t)atoi( optarg ); + + printf( "-I- Number Trap Occurences = %u\n", opt.number ); + break; + + case 'r': + /* + * Define the rate of the trap + */ + opt.rate = (uint16_t)atoi( optarg ); + + printf( "-I- Trap Rate = %u miliseconds\n", opt.rate ); + break; + + + case 'l': + /* + * Define the source lid of the trap + */ + opt.lid = (uint16_t)strtoul( optarg , NULL , 16); + + printf( "-I- Trap Lid = 0x%04X\n", opt.lid ); + break; + + case 's': + /* + * Define the source port number of the trap + */ + opt.src_port = (uint8_t)atoi( optarg ); + + printf( "-I- Trap Port Number = %u\n", opt.src_port ); + break; + + case 'p': + /* + * Specifies port guid with which to bind. + */ + opt.port_num = (uint8_t)atoi( optarg ); + printf( "-I- Port Num:%u\n", opt.port_num ); + break; + + case 'o': + opt.log_file = optarg; + printf("-I- Log File:%s\n", opt.log_file ); + break; + + case 'v': + /* + * Increases log verbosity. + */ + log_flags = ( log_flags << 1 ) | 1; + printf( "-I- Verbose option -v (log flags = 0x%X)\n", log_flags ); + break; + + case 'V': + /* + * Specifies maximum log verbosity. + */ + log_flags = 0xFFFFFFFF; + opt.force_log_flush = TRUE; + printf( "-I- Enabling maximum log verbosity\n" ); + break; + + case 'h': + show_usage( ); + return 0; + + case 'x': + log_flags = strtol( optarg, NULL, 0 ); + printf( "-I- Verbose option -vf (log flags = 0x%X)\n", + log_flags ); + break; + + case -1: + /* printf( "Done with args\n" ); */ + break; + + default: /* something wrong */ + abort( ); + } + + } + while( next_option != -1 ); + + /* Check for mandatory options */ + if (opt.trap_num == 0) + { + printf( "-E- Missing trap number.\n" ); + exit(1); + } + if (opt.lid == 0) + { + printf( "-E- Missing lid.\n" ); + exit(1); + } + if (opt.src_port == 0 && opt.trap_num >= 129 && opt.trap_num <= 131) + { + /* for trap 129-131 should be given source port number */ + printf( "-E- source port number.\n" ); + exit(1); + } + if (opt.port_num == 0) + { + printf( "-E- Missing port number.\n" ); + exit(1); + } + if (opt.rate == 0 && opt.number > 1) + { + /* for number of traps greater than 1 need to give the rate for the + trap generation. */ + printf( "-E- Missing rate.\n" ); + exit(1); + } + + + /* init the main object and sub objects (log and osm vendor) */ + status = ibtrapgen_init( &ibtrapgen, &opt, ( osm_log_level_t ) log_flags ); + if( status != IB_SUCCESS ) + { + printf("-E- fail to init ibtrapgen.\n"); + goto Exit; + } + + /* bind to a specific port */ + status = ibtrapgen_bind( &ibtrapgen ); + if (status != IB_SUCCESS) exit(status); + + /* actual work */ + status = ibtrapgen_run( &ibtrapgen ); + if (status != IB_SUCCESS) + { + printf("IBTRAPGEN: FAIL\n"); + } + else + { + printf("IBTRAPGEN: PASS\n"); + } + + //ibtrapgen_destroy( &ibtrapgen ); + + Exit: + exit ( status ); +} diff --git a/branches/Ndi/ulp/opensm/user/include/complib/cl_byteswap.h b/branches/Ndi/ulp/opensm/user/include/complib/cl_byteswap.h new file mode 100644 index 00000000..2ba5a771 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/complib/cl_byteswap.h @@ -0,0 +1,547 @@ +/* + * Copyright (c) 2004,2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * provides byteswapping utilities. Basic fuctions are obtained from platform + * specific implementations from ibyteswap_osd.h. + * + * Environment: + * All + */ + + +#ifndef _CL_BYTESWAP_H_ +#define _CL_BYTESWAP_H_ + + +#include +#include + + +/****h* Component Library/Byte Swapping +* NAME +* Byte Swapping +* +* DESCRIPTION +* The byte swapping functions and macros allow swapping bytes from network +* byte order to host byte order. +* +* All data transmitted between systems should be in network byte order. +* In order to utilize such data, it must be converted to host byte order +* before use. +* +* SEE ALSO +* Functions: +* cl_ntoh16, cl_hton16, cl_ntoh32, cl_hton32, cl_ntoh64, cl_hton64, +* cl_ntoh +* +* Macros: +* CL_NTOH16, CL_HTON16, CL_NTOH32, CL_HTON32, CL_NTOH64, CL_HTON64 +*********/ + + +/* + * The ibyteswap_osd.h provides the following macros. + * __LITTLE_ENDIAN + * __BIG_ENDIAN + * __BYTE_ORDER + * + * If the platform provides byte swapping functions, ibyteswap_osd.h also + * provides the following macros. + * ntoh16, hton16 + * ntoh32, hton32 + * ntoh64, hton64 + */ + +#ifdef __cplusplus +extern "C" +{ +#endif + + +/****d* Component Library: Byte Swapping/CL_NTOH16 +* NAME +* CL_NTOH16 +* +* DESCRIPTION +* The CL_NTOH16 macro converts a 16-bit value from network byte order to +* host byte order. The CL_NTOH16 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_NTOH16 is less efficient +* than the cl_ntoh16 function. +* +* SYNOPSIS +* CL_NTOH16( val ); +* +* PARAMETERS +* val +* [in] 16-bit value to swap from network byte order to host byte order. +* +* RESULT +* Value of val converted to host byte order. +* +* NOTES +* This macro is analogous to CL_HTON16. +* +* SEE ALSO +* Byte Swapping, CL_HTON16, CL_NTOH32, CL_NTOH64, +* cl_ntoh16, cl_ntoh32, cl_ntoh64, cl_ntoh +*********/ +/****d* Component Library: Byte Swapping/CL_HTON16 +* NAME +* CL_HTON16 +* +* DESCRIPTION +* The CL_HTON16 macro converts a 16-bit value from host byte order to +* network byte order. The CL_HTON16 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_HTON16 is less efficient +* than the cl_hton16 function. +* +* SYNOPSIS +* CL_HTON16( val ); +* +* PARAMETERS +* val +* [in] 16-bit value to swap from host byte order to network byte order. +* +* RESULT +* Value of val converted to network byte order. +* +* NOTES +* This macro is analogous to CL_NTOH16. +* +* SEE ALSO +* Byte Swapping, CL_NTOH16, CL_HTON32, CL_HTON64, +* cl_hton16, cl_hton32, cl_hton64, cl_ntoh +*********/ +#if CPU_LE + #define CL_NTOH16( x ) (uint16_t)( \ + (((uint16_t)(x) & 0x00FF) << 8) | \ + (((uint16_t)(x) & 0xFF00) >> 8) ) +#else + #define CL_NTOH16( x ) (x) +#endif +#define CL_HTON16 CL_NTOH16 + + +/****f* Component Library: Byte Swapping/cl_ntoh16 +* NAME +* cl_ntoh16 +* +* DESCRIPTION +* The cl_ntoh16 function converts a 16-bit value from network byte order to +* host byte order. +* +* SYNOPSIS +* uint16_t +* cl_ntoh16( +* IN const uint16_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from network byte order to host byte order. +* +* RETURN VALUE +* Value of val converted to host byte order. +* +* NOTES +* This function is analogous to cl_hton16. +* +* SEE ALSO +* Byte Swapping, cl_hton16, cl_ntoh32, cl_ntoh64, cl_ntoh +*********/ +/****f* Component Library: Byte Swapping/cl_hton16 +* NAME +* cl_hton16 +* +* DESCRIPTION +* The cl_hton16 function converts a 16-bit value from host byte order to +* network byte order. +* +* SYNOPSIS +* uint16_t +* cl_hton16( +* IN const uint16_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from host byte order to network byte order . +* +* RETURN VALUE +* Value of val converted to network byte order. +* +* NOTES +* This function is analogous to cl_ntoh16. +* +* SEE ALSO +* Byte Swapping, cl_ntoh16, cl_hton32, cl_hton64, cl_ntoh +*********/ +#undef cl_ntoh16 +#undef cl_hton16 +#ifndef cl_ntoh16 + #define cl_ntoh16 CL_NTOH16 + #define cl_hton16 CL_HTON16 +#endif + + +/****d* Component Library: Byte Swapping/CL_NTOH32 +* NAME +* CL_NTOH32 +* +* DESCRIPTION +* The CL_NTOH32 macro converts a 32-bit value from network byte order to +* host byte order. The CL_NTOH32 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_NTOH32 is less efficient +* than the cl_ntoh32 function. +* +* SYNOPSIS +* CL_NTOH32( val ); +* +* PARAMETERS +* val +* [in] 32-bit value to swap from network byte order to host byte order. +* +* RESULT +* Value of val converted to host byte order. +* +* NOTES +* This macro is analogous to CL_HTON32. +* +* SEE ALSO +* Byte Swapping, CL_HTON32, CL_NTOH16, CL_NTOH64, +* cl_ntoh16, cl_ntoh32, cl_ntoh64, cl_ntoh +*********/ +/****d* Component Library: Byte Swapping/CL_HTON32 +* NAME +* CL_HTON32 +* +* DESCRIPTION +* The CL_HTON32 macro converts a 32-bit value from host byte order to +* network byte order. The CL_HTON32 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_HTON32 is less efficient +* than the cl_hton32 function. +* +* SYNOPSIS +* CL_HTON32( val ); +* +* PARAMETERS +* val +* [in] 32-bit value to swap from host byte order to network byte order. +* +* RESULT +* Value of val converted to network byte order. +* +* NOTES +* This macro is analogous to CL_NTOH32. +* +* SEE ALSO +* Byte Swapping, CL_NTOH32, CL_HTON16, CL_HTON64, +* cl_hton16, cl_hton32, cl_hton64, cl_ntoh +*********/ +#if CPU_LE + #define CL_NTOH32( x ) (uint32_t)( \ + (((uint32_t)(x) & 0x000000FF) << 24) | \ + (((uint32_t)(x) & 0x0000FF00) << 8) | \ + (((uint32_t)(x) & 0x00FF0000) >> 8) | \ + (((uint32_t)(x) & 0xFF000000) >> 24) ) +#else + #define CL_NTOH32( x ) (x) +#endif +#define CL_HTON32 CL_NTOH32 + + +/****f* Component Library: Byte Swapping/cl_ntoh32 +* NAME +* cl_ntoh32 +* +* DESCRIPTION +* The cl_ntoh32 function converts a 32-bit value from network byte order to +* host byte order. +* +* SYNOPSIS +* uint32_t +* cl_ntoh32( +* IN const uint32_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from network byte order to host byte order. +* +* RETURN VALUE +* Value of val converted in host byte order. +* +* NOTES +* This function is analogous to cl_hton32. +* +* SEE ALSO +* Byte Swapping, cl_hton32, cl_ntoh16, cl_ntoh64, cl_ntoh +*********/ +/****f* Component Library: Byte Swapping/cl_hton32 +* NAME +* cl_hton32 +* +* DESCRIPTION +* The cl_hton32 function converts a 32-bit value from host byte order to +* network byte order. +* +* SYNOPSIS +* uint32_t +* cl_hton32( +* IN const uint32_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from host byte order to network byte order . +* +* RETURN VALUE +* Value of val converted to network byte order. +* +* NOTES +* This function is analogous to cl_ntoh32. +* +* SEE ALSO +* Byte Swapping, cl_ntoh32, cl_hton16, cl_hton64, cl_ntoh +*********/ +#undef cl_ntoh32 +#undef cl_hton32 +#ifndef cl_ntoh32 + #define cl_ntoh32 CL_NTOH32 + #define cl_hton32 CL_HTON32 +#endif + + +/****d* Component Library: Byte Swapping/CL_NTOH64 +* NAME +* CL_NTOH64 +* +* DESCRIPTION +* The CL_NTOH64 macro converts a 64-bit value from network byte order to +* host byte order. The CL_NTOH64 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_NTOH64 is less efficient +* than the cl_ntoh64 function. +* +* SYNOPSIS +* CL_NTOH64( val ); +* +* PARAMETERS +* val +* [in] 64-bit value to swap from network byte order to host byte order. +* +* RESULT +* Value of val converted to host byte order. +* +* NOTES +* This macro is analogous to CL_HTON64. +* +* SEE ALSO +* Byte Swapping, CL_HTON64, CL_NTOH16, CL_NTOH32, +* cl_ntoh16, cl_ntoh32, cl_ntoh64, cl_ntoh +*********/ +/****d* Component Library: Byte Swapping/CL_HTON64 +* NAME +* CL_HTON64 +* +* DESCRIPTION +* The CL_HTON64 macro converts a 64-bit value from host byte order to +* network byte order. The CL_HTON64 macro will cause constant values to be +* swapped by the pre-processor. For variables, CL_HTON64 is less efficient +* than the cl_hton64 function. +* +* SYNOPSIS +* CL_HTON64( val ); +* +* PARAMETERS +* val +* [in] 64-bit value to swap from host byte order to network byte order. +* +* RESULT +* Value of val converted to network byte order. +* +* NOTES +* This macro is analogous to CL_NTOH64. +* +* SEE ALSO +* Byte Swapping, CL_NTOH64, CL_HTON16, CL_HTON32, +* cl_hton16, cl_hton32, cl_hton64, cl_ntoh +*********/ +#if CPU_LE + #define CL_NTOH64( x ) (uint64_t)( \ + (((uint64_t)(x) & CL_CONST64(0x00000000000000FF)) << 56) | \ + (((uint64_t)(x) & CL_CONST64(0x000000000000FF00)) << 40) | \ + (((uint64_t)(x) & CL_CONST64(0x0000000000FF0000)) << 24) | \ + (((uint64_t)(x) & CL_CONST64(0x00000000FF000000)) << 8 ) | \ + (((uint64_t)(x) & CL_CONST64(0x000000FF00000000)) >> 8 ) | \ + (((uint64_t)(x) & CL_CONST64(0x0000FF0000000000)) >> 24) | \ + (((uint64_t)(x) & CL_CONST64(0x00FF000000000000)) >> 40) | \ + (((uint64_t)(x) & CL_CONST64(0xFF00000000000000)) >> 56) ) +#else + #define CL_NTOH64( x ) (x) +#endif +#define CL_HTON64 CL_NTOH64 + + +/****f* Component Library: Byte Swapping/cl_ntoh64 +* NAME +* cl_ntoh64 +* +* DESCRIPTION +* The cl_ntoh64 function converts a 64-bit value from network byte order to +* host byte order. +* +* SYNOPSIS +* uint64_t +* cl_ntoh64( +* IN const uint64_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from network byte order to host byte order. +* +* RETURN VALUE +* Value of val converted in host byte order. +* +* NOTES +* This function is analogous to cl_hton64. +* +* SEE ALSO +* Byte Swapping, cl_hton64, cl_ntoh16, cl_ntoh32, cl_ntoh +*********/ +/****f* Component Library: Byte Swapping/cl_hton64 +* NAME +* cl_hton64 +* +* DESCRIPTION +* The cl_hton64 function converts a 64-bit value from host byte order to +* network byte order. +* +* SYNOPSIS +* uint64_t +* cl_hton64( +* IN const uint64_t val ); +* +* PARAMETERS +* val +* [in] Value to swap from host byte order to network byte order . +* +* RETURN VALUE +* Value of val converted to network byte order. +* +* NOTES +* This function is analogous to cl_ntoh64. +* +* SEE ALSO +* Byte Swapping, cl_ntoh64, cl_hton16, cl_hton32, cl_ntoh +*********/ +#undef cl_ntoh64 +#undef cl_hton64 +#ifndef cl_ntoh64 + #define cl_ntoh64 CL_NTOH64 + #define cl_hton64 CL_HTON64 +#endif + + +/****f* Component Library: Byte Swapping/cl_ntoh +* NAME +* cl_ntoh +* +* DESCRIPTION +* The cl_ntoh function converts a value from network byte order to +* host byte order. +* +* SYNOPSIS +*/ +CL_INLINE void CL_API +cl_ntoh( + OUT char* const p_dest, + IN const char* const p_src, + IN const uint8_t size ) +{ +#if CPU_LE + uint8_t i; + char temp; + + if( p_src == p_dest ) + { + /* Swap in place if source and destination are the same. */ + for( i = 0; i < size / 2; i++ ) + { + temp = p_dest[i]; + p_dest[i] = p_src[size - 1 - i]; + p_dest[size - 1 - i] = temp; + } + } + else + { + for( i = 0; i < size; i++ ) + p_dest[i] = p_src[size - 1 - i]; + } +#else + /* + * If the source and destination are not the same, copy the source to + * the destination. + */ + if( p_src != p_dest ) + cl_memcpy( p_dest, p_src, size ); +#endif +} +/* +* PARAMETERS +* p_dest +* [in] Pointer to a byte array to contain the converted value of p_src. +* +* p_src +* [in] Pointer to a byte array to be converted from network byte +* ordering. +* +* size +* [in] Number of bytes to swap.p_dest +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* cl_ntoh can perform in place swapping if both p_src and p_dest point to +* the same buffer. +* +* SEE ALSO +* Byte Swapping, cl_ntoh16, cl_ntoh32, cl_ntoh64 +*********/ + + +#ifdef __cplusplus +} /* extern "C" */ +#endif + +#endif /* _CL_BYTESWAP_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/complib/cl_dispatcher.h b/branches/Ndi/ulp/opensm/user/include/complib/cl_dispatcher.h new file mode 100644 index 00000000..123bd093 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/complib/cl_dispatcher.h @@ -0,0 +1,660 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of dispatcher abstraction. + * + * Environment: + * All + * + * $Revision: 1.4 $ + */ + +#ifndef _CL_DISPATCHER_H_ +#define _CL_DISPATCHER_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* Component Library/Dispatcher +* NAME +* Dispatcher +* +* DESCRIPTION +* The Dispatcher provides a facility for message routing to +* asynchronous worker threads. +* +* The Dispatcher functions operate on a cl_dispatcher_t structure +* which should be treated as opaque and should be manipulated +* only through the provided functions. +* +* SEE ALSO +* Structures: +* cl_dispatcher_t +* +* Initialization/Destruction: +* cl_disp_construct, cl_disp_init, cl_disp_shutdown, cl_disp_destroy +* +* Manipulation: +* cl_disp_post, cl_disp_reset, cl_disp_wait_on +*********/ + +/****s* Component Library: Dispatcher/cl_disp_msgid_t +* NAME +* cl_disp_msgid_t +* +* DESCRIPTION +* Defines the type of dispatcher messages. +* +* SYNOPSIS +*/ +typedef uint32_t cl_disp_msgid_t; +/**********/ + +/****s* Component Library: Dispatcher/CL_DISP_MSGID_NONE +* NAME +* CL_DISP_MSGID_NONE +* +* DESCRIPTION +* Defines a message value that means "no message". +* This value is used during registration by Dispatcher clients +* that do not wish to receive messages. +* +* No Dispatcher message is allowed to have this value. +* +* SYNOPSIS +*/ +#define CL_DISP_MSGID_NONE 0xFFFFFFFF +/**********/ + +/****s* Component Library: Dispatcher/CL_DISP_INVALID_HANDLE +* NAME +* CL_DISP_INVALID_HANDLE +* +* DESCRIPTION +* Defines the value of an invalid Dispatcher registration handle. +* +* SYNOPSIS +*/ +#define CL_DISP_INVALID_HANDLE ((cl_disp_reg_handle_t)0) +/*********/ + +/****f* Component Library: Dispatcher/cl_pfn_msgrcv_cb_t +* NAME +* cl_pfn_msgrcv_cb_t +* +* DESCRIPTION +* This typedef defines the prototype for client functions invoked +* by the Dispatcher. The Dispatcher calls the corresponding +* client function when delivering a message to the client. +* +* The client function must be reentrant if the user creates a +* Dispatcher with more than one worker thread. +* +* SYNOPSIS +*/ +typedef void +(*cl_pfn_msgrcv_cb_t)( + IN void* context, + IN void* p_data ); +/* +* PARAMETERS +* context +* [in] Client specific context specified in a call to +* cl_disp_register +* +* p_data +* [in] Pointer to the client specific data payload +* of this message. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This typedef provides a function prototype reference for +* the function provided by Dispatcher clients as a parameter +* to the cl_disp_register function. +* +* SEE ALSO +* Dispatcher, cl_disp_register +*********/ + +/****f* Component Library: Dispatcher/cl_pfn_msgdone_cb_t +* NAME +* cl_pfn_msgdone_cb_t +* +* DESCRIPTION +* This typedef defines the prototype for client functions invoked +* by the Dispatcher. The Dispatcher calls the corresponding +* client function after completing delivery of a message. +* +* The client function must be reentrant if the user creates a +* Dispatcher with more than one worker thread. +* +* SYNOPSIS +*/ +typedef void +(*cl_pfn_msgdone_cb_t)( + IN void* context, + IN void* p_data ); +/* +* PARAMETERS +* context +* [in] Client specific context specified in a call to +* cl_disp_post +* +* p_data +* [in] Pointer to the client specific data payload +* of this message. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This typedef provides a function prototype reference for +* the function provided by Dispatcher clients as a parameter +* to the cl_disp_post function. +* +* SEE ALSO +* Dispatcher, cl_disp_post +*********/ + +/****s* Component Library: Dispatcher/cl_dispatcher_t +* NAME +* cl_dispatcher_t +* +* DESCRIPTION +* Dispatcher structure. +* +* The Dispatcher is thread safe. +* +* The cl_dispatcher_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_dispatcher +{ + cl_spinlock_t lock; + cl_ptr_vector_t reg_vec; + cl_qlist_t reg_list; + cl_thread_pool_t worker_threads; + cl_qlist_t msg_fifo; + cl_qpool_t msg_pool; + uint64_t last_msg_queue_time_us; +} cl_dispatcher_t; +/* +* FIELDS +* reg_vec +* Vector of registration info objects. Indexed by message msg_id. +* +* lock +* Spinlock to guard internal structures. +* +* msg_fifo +* FIFO of messages being processed by the Dispatcher. New +* messages are posted to the tail of the FIFO. Worker threads +* pull messages from the front. +* +* worker_threads +* Thread pool of worker threads to dispose of posted messages. +* +* msg_pool +* Pool of message objects to be processed through the FIFO. +* +* reg_count +* Count of the number of registrants. +* +* state +* Indicates the state of the object. +* +* last_msg_queue_time_us +* The time that the last message spent in the Q in usec +* +* SEE ALSO +* Dispatcher +*********/ + +/****s* Component Library: Dispatcher/cl_disp_reg_info_t +* NAME +* cl_disp_reg_info_t +* +* DESCRIPTION +* Defines the dispatcher registration object structure. +* +* The cl_disp_reg_info_t structure is for internal use by the +* Dispatcher only. +* +* SYNOPSIS +*/ +typedef struct _cl_disp_reg_info +{ + cl_list_item_t list_item; + cl_pfn_msgrcv_cb_t pfn_rcv_callback; + const void *context; + atomic32_t ref_cnt; + cl_disp_msgid_t msg_id; + cl_dispatcher_t *p_disp; + +} cl_disp_reg_info_t; +/* +* FIELDS +* pfn_rcv_callback +* Client's message receive callback. +* +* context +* Client's context for message receive callback. +* +* rcv_thread_count +* Number of threads currently in the receive callback. +* +* msg_done_thread_count +* Number of threads currently in the message done callback. +* +* state +* State of this registration object. +* DISP_REGSTATE_INIT: initialized and inactive +* DISP_REGSTATE_ACTIVE: in active use +* DISP_REGSTATE_UNREGPEND: unregistration is pending +* +* msg_id +* Dispatcher message msg_id value for this registration object. +* +* p_disp +* Pointer to parent Dispatcher. +* +* SEE ALSO +*********/ + +/****s* Component Library: Dispatcher/cl_disp_msg_t +* NAME +* cl_disp_msg_t +* +* DESCRIPTION +* Defines the dispatcher message structure. +* +* The cl_disp_msg_t structure is for internal use by the +* Dispatcher only. +* +* SYNOPSIS +*/ +typedef struct _cl_disp_msg +{ + cl_pool_item_t item; + const void *p_data; + cl_disp_reg_info_t *p_src_reg; + cl_disp_reg_info_t *p_dest_reg; + cl_pfn_msgdone_cb_t pfn_xmt_callback; + uint64_t in_time; + const void *context; +} cl_disp_msg_t; +/* +* FIELDS +* item +* List & Pool linkage. Must be first element in the structure!! +* +* msg_id +* The message's numberic ID value. +* +* p_data +* Pointer to the data payload for this message. The payload +* is opaque to the Dispatcher. +* +* p_reg_info +* Pointer to the registration info of the sender. +* +* pfn_xmt_callback +* Client's message done callback. +* +* in_time +* The absolute time the message was inserted into the queue +* +* context +* Client's message done callback context. +* +* SEE ALSO +*********/ + +/****s* Component Library: Dispatcher/cl_disp_reg_info_t +* NAME +* cl_disp_reg_info_t +* +* DESCRIPTION +* Defines the Dispatcher registration handle. This handle +* should be treated as opaque by the client. +* +* SYNOPSIS +*/ +typedef const struct _cl_disp_reg_info *cl_disp_reg_handle_t; +/**********/ + +/****f* Component Library: Dispatcher/cl_disp_construct +* NAME +* cl_disp_construct +* +* DESCRIPTION +* This function constructs a Dispatcher object. +* +* SYNOPSIS +*/ +void +cl_disp_construct( + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_disp_init and cl_disp_destroy. +* +* SEE ALSO +* Dispatcher, cl_disp_init, cl_disp_destroy +*********/ + +/****f* Component Library: Dispatcher/cl_disp_init +* NAME +* cl_disp_init +* +* DESCRIPTION +* This function initializes a Dispatcher object. +* +* SYNOPSIS +*/ +cl_status_t +cl_disp_init( + IN cl_dispatcher_t* const p_disp, + IN const uint32_t thread_count, + IN const char* const name ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* thread_count +* [in] The number of worker threads to create in this Dispatcher. +* A value of 0 causes the Dispatcher to create one worker thread +* per CPU in the system. When the Dispatcher is created with +* only one thread, the Dispatcher guarantees to deliver posted +* messages in order. When the Dispatcher is created with more +* than one thread, messages may be delivered out of order. +* +* name +* [in] Name to associate with the threads. The name may be up to 16 +* characters, including a terminating null character. All threads +* created in the Dispatcher have the same name. +* +* RETURN VALUE +* CL_SUCCESS if the operation is successful. +* +* SEE ALSO +* Dispatcher, cl_disp_destoy, cl_disp_register, cl_disp_unregister, +* cl_disp_post +*********/ + +/****f* Component Library: Dispatcher/cl_disp_shutdown +* NAME +* cl_disp_shutdown +* +* DESCRIPTION +* This function shutdown a Dispatcher object. So it unreg all messages and +* clears the fifo and waits for the threads to exit +* +* SYNOPSIS +*/ +void +cl_disp_shutdown( + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function does not returns until all worker threads +* have exited client callback functions and been successfully +* shutdowned. +* +* SEE ALSO +* Dispatcher, cl_disp_construct, cl_disp_init +*********/ + +/****f* Component Library: Dispatcher/cl_disp_destroy +* NAME +* cl_disp_destroy +* +* DESCRIPTION +* This function destroys a Dispatcher object. +* +* SYNOPSIS +*/ +void +cl_disp_destroy( + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Dispatcher, cl_disp_construct, cl_disp_init +*********/ + +/****f* Component Library: Dispatcher/cl_disp_register +* NAME +* cl_disp_register +* +* DESCRIPTION +* This function registers a client with a Dispatcher object. +* +* SYNOPSIS +*/ +cl_disp_reg_handle_t +cl_disp_register( + IN cl_dispatcher_t* const p_disp, + IN const cl_disp_msgid_t msg_id, + IN cl_pfn_msgrcv_cb_t pfn_callback OPTIONAL, + IN const void* const context ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* msg_id +* [in] Numberic message ID for which the client is registering. +* If the client does not wish to receive any messages, +* (a send-only client) then the caller should set this value +* to CL_DISP_MSGID_NONE. For efficiency, numeric message msg_id +* values should start with 0 and should be contiguous, or nearly so. +* +* pfn_callback +* [in] Message receive callback. The Dispatcher calls this +* function after receiving a posted message with the +* appropriate message msg_id value. Send-only clients may specify +* NULL for this value. +* +* context +* [in] Client context value passed to the cl_pfn_msgrcv_cb_t +* function. +* +* RETURN VALUE +* On success a Dispatcher registration handle. +* CL_CL_DISP_INVALID_HANDLE otherwise. +* +* SEE ALSO +* Dispatcher, cl_disp_unregister, cl_disp_post +*********/ + +/****f* Component Library: Dispatcher/cl_disp_unregister +* NAME +* cl_disp_unregister +* +* DESCRIPTION +* This function unregisters a client from a Dispatcher. +* +* SYNOPSIS +*/ +void +cl_disp_unregister( + IN const cl_disp_reg_handle_t handle ); +/* +* PARAMETERS +* handle +* [in] cl_disp_reg_handle_t value return by cl_disp_register. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function will not return until worker threads have exited +* the callback functions for this client. Do not invoke this +* function from a callback. +* +* SEE ALSO +* Dispatcher, cl_disp_register +*********/ + +/****f* Component Library: Dispatcher/cl_disp_post +* NAME +* cl_disp_post +* +* DESCRIPTION +* This function posts a message to a Dispatcher object. +* +* SYNOPSIS +*/ +cl_status_t +cl_disp_post( + IN const cl_disp_reg_handle_t handle, + IN const cl_disp_msgid_t msg_id, + IN const void* const p_data, + IN cl_pfn_msgdone_cb_t pfn_callback OPTIONAL, + IN const void* const context ); +/* +* PARAMETERS +* handle +* [in] cl_disp_reg_handle_t value return by cl_disp_register. +* +* msg_id +* [in] Numeric message msg_id value associated with this message. +* +* p_data +* [in] Data payload for this message. +* +* pfn_callback +* [in] Pointer to a cl_pfn_msgdone_cb_t function. +* The Dispatcher calls this function after the message has been +* processed by the recipient. +* The caller may pass NULL for this value, which indicates no +* message done callback is necessary. +* +* context +* [in] Client context value passed to the cl_pfn_msgdone_cb_t +* function. +* +* RETURN VALUE +* CL_SUCCESS if the message was successfully queued in the Dispatcher. +* +* NOTES +* The caller must not modify the memory pointed to by p_data until +* the Dispatcher call the pfn_callback function. +* +* SEE ALSO +* Dispatcher +*********/ + +/****f* Component Library: Dispatcher/cl_disp_get_queue_status +* NAME +* cl_disp_get_queue_status +* +* DESCRIPTION +* This function posts a message to a Dispatcher object. +* +* SYNOPSIS +*/ +void +cl_disp_get_queue_status( + IN const cl_disp_reg_handle_t handle, + OUT uint32_t *p_num_queued_msgs, + OUT uint64_t *p_last_msg_queue_time_ms); +/* +* PARAMETERS +* handle +* [in] cl_disp_reg_handle_t value return by cl_disp_register. +* +* p_last_msg_queue_time_ms +* [out] pointer to a variable to hold the time the last popped up message +* spent in the queue +* +* p_num_queued_msgs +* [out] number of messages in the queue +* +* RETURN VALUE +* Thr time the last popped up message stayed in the queue, in msec +* +* NOTES +* Extarnel Locking is not required. +* +* SEE ALSO +* Dispatcher +*********/ + +END_C_DECLS + +#endif /* !defined(_CL_DISPATCHER_H_) */ + diff --git a/branches/Ndi/ulp/opensm/user/include/complib/cl_event_wheel.h b/branches/Ndi/ulp/opensm/user/include/complib/cl_event_wheel.h new file mode 100644 index 00000000..129eadd6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/complib/cl_event_wheel.h @@ -0,0 +1,493 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of event wheel abstraction. + * + * Environment: + * All + * + * $Revision: 1.4 $ + */ + +#ifndef _CL_EVENT_WHEEL_H_ +#define _CL_EVENT_WHEEL_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* Component Library/Event_Wheel +* NAME +* Event_Wheel +* +* DESCRIPTION +* The Event_Wheel provides a facility for registering delayed events +* and getting called once they timeout. +* +* The Event_Wheel functions operate on a cl_event_wheel_t structure +* which should be treated as opaque and should be manipulated +* only through the provided functions. +* +* SEE ALSO +* Structures: +* cl_event_wheel_t +* +* Initialization/Destruction: +* cl_event_wheel_construct, cl_event_wheel_init, cl_event_wheel_destroy +* +* Manipulation: +* cl_event_wheel_reg, cl_event_wheel_unreg +* +*********/ + +/****f* Component Library: Event_Wheel/cl_pfn_event_aged_cb_t +* NAME +* cl_pfn_event_aged_cb_t +* +* DESCRIPTION +* This typedef defines the prototype for client functions invoked +* by the Event_Wheel. The Event_Wheel calls the corresponding +* client function when the specific item has aged. +* +* SYNOPSIS +*/ +typedef uint64_t +(*cl_pfn_event_aged_cb_t)( + IN uint64_t key, + IN uint32_t num_regs, + IN void* context); +/* +* PARAMETERS +* key +* [in] The key used for registering the item in the call to +* cl_event_wheel_reg +* +* num_regs +* [in] The number of times this event was registered (pushed in time). +* +* context +* [in] Client specific context specified in a call to +* cl_event_wheel_reg +* +* RETURN VALUE +* This function returns the abosolute time the event should fire in [usec]. +* If lower then current time means the event should be unregistered +* immediatly. +* +* NOTES +* This typedef provides a function prototype reference for +* the function provided by Event_Wheel clients as a parameter +* to the cl_event_wheel_reg function. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_reg +*********/ + +/****s* Component Library: Event_Wheel/cl_event_wheel_t +* NAME +* cl_event_wheel_t +* +* DESCRIPTION +* Event_Wheel structure. +* +* The Event_Wheel is thread safe. +* +* The cl_event_wheel_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_event_wheel +{ + cl_spinlock_t lock; + cl_spinlock_t *p_external_lock; + + cl_qmap_t events_map; + boolean_t closing; + cl_qlist_t events_wheel; + cl_timer_t timer; + osm_log_t *p_log; +} cl_event_wheel_t; +/* +* FIELDS +* lock +* Spinlock to guard internal structures. +* +* p_external_lock +* Reference to external spinlock to guard internal structures +* if the event wheel is part of a larger object protected by its own lock +* +* events_map +* A Map holding all registered event items by their key. +* +* closing +* A flag indicating the event wheel is closing. This means that +* callbacks that are called when closing == TRUE should just be ignored. +* +* events_wheel +* A list of the events sorted by expiration time. +* +* timer +* The timer scheduling event time propagation. +* +* p_log +* Pointer to opensm log object. +* +* SEE ALSO +* Event_Wheel +*********/ + +/****s* Component Library: Event_Wheel/cl_event_wheel_reg_info_t +* NAME +* cl_event_wheel_reg_info_t +* +* DESCRIPTION +* Defines the event_wheel registration object structure. +* +* The cl_event_wheel_reg_info_t structure is for internal use by the +* Event_Wheel only. +* +* SYNOPSIS +*/ +typedef struct _cl_event_wheel_reg_info +{ + cl_map_item_t map_item; + cl_list_item_t list_item; + uint64_t key; + cl_pfn_event_aged_cb_t pfn_aged_callback; + uint64_t aging_time; + uint32_t num_regs; + void *context; + cl_event_wheel_t *p_event_wheel; +} cl_event_wheel_reg_info_t; +/* +* FIELDS +* map_item +* The map item of this event +* +* list_item +* The sorted by aging time list item +* +* key +* The key by which one can find the event +* +* pfn_aged_callback +* The clients Event-Aged callback +* +* aging_time +* The delta time [msec] for which the event should age. +* +* num_regs +* The number of times the same event (key) was registered +* +* context +* Client's context for event-aged callback. +* +* p_event_wheel +* Pointer to this event wheel object +* +* SEE ALSO +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_construct +* NAME +* cl_event_wheel_construct +* +* DESCRIPTION +* This function constructs a Event_Wheel object. +* +* SYNOPSIS +*/ +void +cl_event_wheel_construct( + IN cl_event_wheel_t* const p_event_wheel ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_event_wheel_init and cl_event_wheel_destroy. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_init, cl_event_wheel_destroy +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_init +* NAME +* cl_event_wheel_init +* +* DESCRIPTION +* This function initializes a Event_Wheel object. +* +* SYNOPSIS +*/ +cl_status_t +cl_event_wheel_init( + IN cl_event_wheel_t* const p_event_wheel, + IN osm_log_t *p_log); + +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* p_log +* [in] Pointer to opensm log object to be used for logging +* +* RETURN VALUE +* CL_SUCCESS if the operation is successful. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_destoy, cl_event_wheel_reg, cl_event_wheel_unreg +* +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_init +* NAME +* cl_event_wheel_init +* +* DESCRIPTION +* This function initializes a Event_Wheel object. +* +* SYNOPSIS +*/ +cl_status_t +cl_event_wheel_init_ex( + IN cl_event_wheel_t* const p_event_wheel, + IN osm_log_t *p_log, + IN cl_spinlock_t *p_external_lock); + +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* p_log +* [in] Pointer to opensm log object to be used for logging +* +* p_external_lock +* [in] Reference to external spinlock to guard internal structures +* if the event wheel is part of a larger object protected by its own lock +* +* RETURN VALUE +* CL_SUCCESS if the operation is successful. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_destoy, cl_event_wheel_reg, cl_event_wheel_unreg +* +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_destroy +* NAME +* cl_event_wheel_destroy +* +* DESCRIPTION +* This function destroys a Event_Wheel object. +* +* SYNOPSIS +*/ +void +cl_event_wheel_destroy( + IN cl_event_wheel_t* const p_event_wheel ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function does not returns until all client callback functions +* been successfully finished. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_construct, cl_event_wheel_init +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_dump +* NAME +* cl_event_wheel_dump +* +* DESCRIPTION +* This function dumps the details of an Event_Whell object. +* +* SYNOPSIS +*/ +void +cl_event_wheel_dump( + IN cl_event_wheel_t* const p_event_wheel ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Note that this function should be called inside a lock of the event wheel! +* It doesn't aquire the lock by itself. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_construct, cl_event_wheel_init +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_reg +* NAME +* cl_event_wheel_reg +* +* DESCRIPTION +* This function registers a client with a Event_Wheel object. +* +* SYNOPSIS +*/ +cl_status_t +cl_event_wheel_reg( + IN cl_event_wheel_t* const p_event_wheel, + IN const uint64_t key, + IN const uint64_t aging_time_usec, + IN cl_pfn_event_aged_cb_t pfn_callback, + IN void* const context ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* key +* [in] The specifc Key by which events are registered. +* +* aging_time_usec +* [in] The absolute time this event should age in usec +* +* pfn_callback +* [in] Event Aging callback. The Event_Wheel calls this +* function after the time the event has registed for has come. +* +* context +* [in] Client context value passed to the cl_pfn_event_aged_cb_t +* function. +* +* RETURN VALUE +* On success a Event_Wheel CL_SUCCESS or CL_ERROR otherwise. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_unreg +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_unreg +* NAME +* cl_event_wheel_unreg +* +* DESCRIPTION +* This function unregisters a client event from a Event_Wheel. +* +* SYNOPSIS +*/ +void +cl_event_wheel_unreg( + IN cl_event_wheel_t* const p_event_wheel, + IN uint64_t key ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* key +* [in] The key used for registering the event +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* After the event has aged it is automatically removed from +* the event wheel. So it should only be invoked when the need arises +* to remove existing events before they age. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_reg +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_num_regs +* NAME +* cl_event_wheel_num_regs +* +* DESCRIPTION +* This function returns the number of times an event was registered. +* +* SYNOPSIS +*/ +uint32_t +cl_event_wheel_num_regs( + IN cl_event_wheel_t* const p_event_wheel, + IN uint64_t key ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* key +* [in] The key used for registering the event +* +* RETURN VALUE +* The number of times the event was registered. +* 0 if never registered or eventually aged. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_reg, cl_event_wheel_unreg +*********/ + +END_C_DECLS + +#endif /* !defined(_CL_EVENT_WHEEL_H_) */ + diff --git a/branches/Ndi/ulp/opensm/user/include/complib/cl_signal_osd.h b/branches/Ndi/ulp/opensm/user/include/complib/cl_signal_osd.h new file mode 100644 index 00000000..1b731560 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/complib/cl_signal_osd.h @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Abstract: + * Declaration of Signal Handler Registration + * + * Environment: + * All + * + * $Revision: 1.3 $ + */ + + +#ifndef _CL_SIG_HDL_H_ +#define _CL_SIG_HDL_H_ + +#include + +/****h* Component Library/Signal Handler +* NAME +* Signal Handler Registration +* +* DESCRIPTION +* The Signal Handler Registration allows to register a callback on the case of incoming signal +* +* SEE ALSO +*********/ +/* TODO : Required when calling signal function in windows*/ +typedef void (__cdecl *cl_sig_hdl)( + IN int sig); +/* Prototypes */ + +/****f* Component Library: Signal Handler/cl_reg_sig_hdl +* NAME +* cl_reg_sig_hdl +* +* DESCRIPTION +* Register the handler for the given signal +* +* SYNOPSIS +*/ + +static inline void +cl_reg_sig_hdl(int sig, cl_sig_hdl pfn_sig_hdl); + + + +/****f* Component Library: Signal Handler/cl_mask_sigint +* NAME +* cl_sig_mask_sigint +* +* DESCRIPTION +* Mask the kill signal +* +* SYNOPSIS +*/ + +static inline void +cl_sig_mask_sigint(void); + + + + +/****f* Component Library: Signal Handler/cl_reg_sig_hdl +* NAME +* cl_reg_sig_hdl +* +* DESCRIPTION +* Register the handler for the given signal +* +* SYNOPSIS +*/ +static inline void +cl_reg_sig_hdl(int sig, cl_sig_hdl pfn_sig_hdl) { + signal(sig,pfn_sig_hdl); + } +/* +*********/ + +/****f* Component Library: Signal Handler/cl_mask_sigint +* NAME +* cl_sig_mask_sigint +* +* DESCRIPTION +* Mask the kill signal +* +* SYNOPSIS +*/ +static inline void +cl_sig_mask_sigint(void) +{ + +} +/* +*********/ + +#endif /* _CL_SIG_HDL_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/iba/ib_types.h b/branches/Ndi/ulp/opensm/user/include/iba/ib_types.h new file mode 100644 index 00000000..9dd1ed6d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/iba/ib_types.h @@ -0,0 +1,10403 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if !defined(__IB_TYPES_H__) +#define __IB_TYPES_H__ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +#if defined( WIN32 ) || defined( _WIN64 ) + #if defined( EXPORT_AL_SYMBOLS ) + #define OSM_EXPORT __declspec(dllexport) + #else + #define OSM_EXPORT __declspec(dllimport) + #endif + #define OSM_API __stdcall + #define OSM_CDECL __cdecl +#else + #define OSM_EXPORT extern + #define OSM_API + #define OSM_CDECL + #define __ptr64 +#endif + +/****h* IBA Base/Constants +* NAME +* Constants +* +* DESCRIPTION +* The following constants are used throughout the IBA code base. +* +* Definitions are from the InfiniBand Architecture Specification v1.2 +* +*********/ + +/****d* IBA Base: Constants/MAD_BLOCK_SIZE +* NAME +* MAD_BLOCK_SIZE +* +* DESCRIPTION +* Size of a non-RMPP MAD datagram. +* +* SOURCE +*/ +#define MAD_BLOCK_SIZE 256 +/**********/ + +/****d* IBA Base: Constants/MAD_RMPP_HDR_SIZE +* NAME +* MAD_RMPP_HDR_SIZE +* +* DESCRIPTION +* Size of an RMPP header, including the common MAD header. +* +* SOURCE +*/ +#define MAD_RMPP_HDR_SIZE 36 +/**********/ + +/****d* IBA Base: Constants/MAD_RMPP_DATA_SIZE +* NAME +* MAD_RMPP_DATA_SIZE +* +* DESCRIPTION +* Size of an RMPP transaction data section. +* +* SOURCE +*/ +#define MAD_RMPP_DATA_SIZE (MAD_BLOCK_SIZE - MAD_RMPP_HDR_SIZE) +/**********/ + +/****d* IBA Base: Constants/MAD_BLOCK_GRH_SIZE +* NAME +* MAD_BLOCK_GRH_SIZE +* +* DESCRIPTION +* Size of a MAD datagram, including the GRH. +* +* SOURCE +*/ +#define MAD_BLOCK_GRH_SIZE 296 +/**********/ + +/****d* IBA Base: Constants/IB_LID_PERMISSIVE +* NAME +* IB_LID_PERMISSIVE +* +* DESCRIPTION +* Permissive LID +* +* SOURCE +*/ +#define IB_LID_PERMISSIVE 0xFFFF +/**********/ + +/****d* IBA Base: Constants/IB_DEFAULT_PKEY +* NAME +* IB_DEFAULT_PKEY +* +* DESCRIPTION +* P_Key value for the default partition. +* +* SOURCE +*/ +#define IB_DEFAULT_PKEY 0xFFFF +/**********/ + +/****d* IBA Base: Constants/IB_QP1_WELL_KNOWN_Q_KEY +* NAME +* IB_QP1_WELL_KNOWN_Q_KEY +* +* DESCRIPTION +* Well-known Q_Key for QP1 privileged mode access (15.4.2). +* +* SOURCE +*/ +#define IB_QP1_WELL_KNOWN_Q_KEY CL_NTOH32(0x80010000) +/*********/ + +#define IB_QP0 0 +#define IB_QP1 CL_NTOH32(1) + +#define IB_QP_PRIVILEGED_Q_KEY CL_NTOH32(0x80000000) + +/****d* IBA Base: Constants/IB_LID_UCAST_START +* NAME +* IB_LID_UCAST_START +* +* DESCRIPTION +* Lowest valid unicast LID value. +* +* SOURCE +*/ +#define IB_LID_UCAST_START_HO 0x0001 +#define IB_LID_UCAST_START (CL_HTON16(IB_LID_UCAST_START_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_LID_UCAST_END +* NAME +* IB_LID_UCAST_END +* +* DESCRIPTION +* Highest valid unicast LID value. +* +* SOURCE +*/ +#define IB_LID_UCAST_END_HO 0xBFFF +#define IB_LID_UCAST_END (CL_HTON16(IB_LID_UCAST_END_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_LID_MCAST_START +* NAME +* IB_LID_MCAST_START +* +* DESCRIPTION +* Lowest valid multicast LID value. +* +* SOURCE +*/ +#define IB_LID_MCAST_START_HO 0xC000 +#define IB_LID_MCAST_START (CL_HTON16(IB_LID_MCAST_START_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_LID_MCAST_END +* NAME +* IB_LID_MCAST_END +* +* DESCRIPTION +* Highest valid multicast LID value. +* +* SOURCE +*/ +#define IB_LID_MCAST_END_HO 0xFFFE +#define IB_LID_MCAST_END (CL_HTON16(IB_LID_MCAST_END_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_DEFAULT_SUBNET_PREFIX +* NAME +* IB_DEFAULT_SUBNET_PREFIX +* +* DESCRIPTION +* Default subnet GID prefix. +* +* SOURCE +*/ +#define IB_DEFAULT_SUBNET_PREFIX (CL_HTON64(0xFE80000000000000ULL)) +/**********/ + +/****d* IBA Base: Constants/IB_NODE_NUM_PORTS_MAX +* NAME +* IB_NODE_NUM_PORTS_MAX +* +* DESCRIPTION +* Maximum number of ports in a single node (14.2.5.7). +* SOURCE +*/ +#define IB_NODE_NUM_PORTS_MAX 0xFE +/**********/ + +/****d* IBA Base: Constants/IB_INVALID_PORT_NUM +* NAME +* IB_INVALID_PORT_NUM +* +* DESCRIPTION +* Value used to indicate an invalid port number (14.2.5.10). +* +* SOURCE +*/ +#define IB_INVALID_PORT_NUM 0xFF +/*********/ + +/****d* IBA Base: Constants/IB_SUBNET_PATH_HOPS_MAX +* NAME +* IB_SUBNET_PATH_HOPS_MAX +* +* DESCRIPTION +* Maximum number of directed route switch hops in a subnet (14.2.1.2). +* +* SOURCE +*/ +#define IB_SUBNET_PATH_HOPS_MAX 64 +/*********/ + +/****d* IBA Base: Constants/IB_PKEY_MAX_BLOCKS +* NAME +* IB_PKEY_MAX_BLOCKS +* +* DESCRIPTION +* Maximum number of PKEY blocks (14.2.5.7). +* +* SOURCE +*/ +#define IB_PKEY_MAX_BLOCKS 2048 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_MAX_BLOCK_ID +* NAME +* IB_MCAST_MAX_BLOCK_ID +* +* DESCRIPTION +* Maximum number of Multicast port mask blocks +* +* SOURCE +*/ +#define IB_MCAST_MAX_BLOCK_ID 511 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_BLOCK_ID_MASK_HO +* NAME +* IB_MCAST_BLOCK_ID_MASK_HO +* +* DESCRIPTION +* Mask (host order) to recover the Multicast block ID. +* +* SOURCE +*/ +#define IB_MCAST_BLOCK_ID_MASK_HO 0x000001FF +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_BLOCK_SIZE +* NAME +* IB_MCAST_BLOCK_SIZE +* +* DESCRIPTION +* Number of port mask entries in a multicast forwarding table block. +* +* SOURCE +*/ +#define IB_MCAST_BLOCK_SIZE 32 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_MASK_SIZE +* NAME +* IB_MCAST_MASK_SIZE +* +* DESCRIPTION +* Number of port mask bits in each entry in the multicast forwarding table. +* +* SOURCE +*/ +#define IB_MCAST_MASK_SIZE 16 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_POSITION_MASK_HO +* NAME +* IB_MCAST_POSITION_MASK_HO +* +* DESCRIPTION +* Mask (host order) to recover the multicast block position. +* +* SOURCE +*/ +#define IB_MCAST_POSITION_MASK_HO 0xF0000000 +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_POSITION_MAX +* NAME +* IB_MCAST_POSITION_MAX +* +* DESCRIPTION +* Maximum value for the multicast block position. +* +* SOURCE +*/ +#define IB_MCAST_POSITION_MAX 0xF +/*********/ + +/****d* IBA Base: Constants/IB_MCAST_POSITION_SHIFT +* NAME +* IB_MCAST_POSITION_SHIFT +* +* DESCRIPTION +* Shift value to normalize the multicast block position value. +* +* SOURCE +*/ +#define IB_MCAST_POSITION_SHIFT 28 +/*********/ + +/****d* IBA Base: Constants/IB_PKEY_ENTRIES_MAX +* NAME +* IB_PKEY_ENTRIES_MAX +* +* DESCRIPTION +* Maximum number of PKEY entries per port (14.2.5.7). +* +* SOURCE +*/ +#define IB_PKEY_ENTRIES_MAX (IB_PKEY_MAX_BLOCKS * IB_NUM_PKEY_ELEMENTS_IN_BLOCK) +/*********/ + +/****d* IBA Base: Constants/IB_PKEY_BASE_MASK +* NAME +* IB_PKEY_BASE_MASK +* +* DESCRIPTION +* Masks for the base P_Key value given a P_Key Entry. +* +* SOURCE +*/ +#define IB_PKEY_BASE_MASK (CL_HTON16(0x7FFF)) +/*********/ + +/****d* IBA Base: Constants/IB_PKEY_TYPE_MASK +* NAME +* IB_PKEY_TYPE_MASK +* +* DESCRIPTION +* Masks for the P_Key membership type given a P_Key Entry. +* +* SOURCE +*/ +#define IB_PKEY_TYPE_MASK (CL_NTOH16(0x8000)) +/*********/ + +/****d* IBA Base: Constants/IB_DEFAULT_PARTIAL_PKEY +* NAME +* IB_DEFAULT_PARTIAL_PKEY +* +* DESCRIPTION +* 0x7FFF in network order +* +* SOURCE +*/ +#define IB_DEFAULT_PARTIAL_PKEY (CL_HTON16(0x7FFF)) +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_SUBN_LID +* NAME +* IB_MCLASS_SUBN_LID +* +* DESCRIPTION +* Subnet Management Class, Subnet Manager LID routed (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_SUBN_LID 0x01 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_SUBN_DIR +* NAME +* IB_MCLASS_SUBN_DIR +* +* DESCRIPTION +* Subnet Management Class, Subnet Manager directed route (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_SUBN_DIR 0x81 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_SUBN_ADM +* NAME +* IB_MCLASS_SUBN_ADM +* +* DESCRIPTION +* Subnet Management Class, Subnet Administration (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_SUBN_ADM 0x03 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_PERF +* NAME +* IB_MCLASS_PERF +* +* DESCRIPTION +* Subnet Management Class, Performance Manager (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_PERF 0x04 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_BM +* NAME +* IB_MCLASS_BM +* +* DESCRIPTION +* Subnet Management Class, Baseboard Manager (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_BM 0x05 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_DEV_MGMT +* NAME +* IB_MCLASS_DEV_MGMT +* +* DESCRIPTION +* Subnet Management Class, Device Management (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_DEV_MGMT 0x06 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_COMM_MGMT +* NAME +* IB_MCLASS_COMM_MGMT +* +* DESCRIPTION +* Subnet Management Class, Communication Management (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_COMM_MGMT 0x07 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_SNMP +* NAME +* IB_MCLASS_SNMP +* +* DESCRIPTION +* Subnet Management Class, SNMP Tunneling (13.4.4) +* +* SOURCE +*/ +#define IB_MCLASS_SNMP 0x08 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_VENDOR_LOW_RANGE_MIN +* NAME +* IB_MCLASS_VENDOR_LOW_RANGE_MIN +* +* DESCRIPTION +* Subnet Management Class, Vendor Specific Low Range Start +* +* SOURCE +*/ +#define IB_MCLASS_VENDOR_LOW_RANGE_MIN 0x09 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_VENDOR_LOW_RANGE_MAX +* NAME +* IB_MCLASS_VENDOR_LOW_RANGE_MAX +* +* DESCRIPTION +* Subnet Management Class, Vendor Specific Low Range End +* +* SOURCE +*/ +#define IB_MCLASS_VENDOR_LOW_RANGE_MAX 0x0f +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_DEV_ADM +* NAME +* IB_MCLASS_DEV_ADM +* +* DESCRIPTION +* Subnet Management Class, Device Administration +* +* SOURCE +*/ +#define IB_MCLASS_DEV_ADM 0x10 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_BIS +* NAME +* IB_MCLASS_BIS +* +* DESCRIPTION +* Subnet Management Class, BIS +* +* SOURCE +*/ +#define IB_MCLASS_BIS 0x12 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_VENDOR_HIGH_RANGE_MIN +* NAME +* IB_MCLASS_VENDOR_HIGH_RANGE_MIN +* +* DESCRIPTION +* Subnet Management Class, Vendor Specific High Range Start +* +* SOURCE +*/ +#define IB_MCLASS_VENDOR_HIGH_RANGE_MIN 0x30 +/**********/ + +/****d* IBA Base: Constants/IB_MCLASS_VENDOR_HIGH_RANGE_MAX +* NAME +* IB_MCLASS_VENDOR_HIGH_RANGE_MAX +* +* DESCRIPTION +* Subnet Management Class, Vendor Specific High Range End +* +* SOURCE +*/ +#define IB_MCLASS_VENDOR_HIGH_RANGE_MAX 0x4f +/**********/ + +/****f* IBA Base: Types/ib_class_is_vendor_specific_low +* NAME +* ib_class_is_vendor_specific_low +* +* DESCRIPTION +* Indicates if the Class Code if a vendor specific class from +* the low range +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_class_is_vendor_specific_low( + IN const uint8_t class_code ) +{ + return( (class_code >= IB_MCLASS_VENDOR_LOW_RANGE_MIN) && + (class_code <= IB_MCLASS_VENDOR_LOW_RANGE_MAX) ); +} +/* +* PARAMETERS +* class_code +* [in] The Management Datagram Class Code +* +* RETURN VALUE +* TRUE if the class is in the Low range of Vendor Specific MADs +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* IB_MCLASS_VENDOR_LOW_RANGE_MIN, IB_MCLASS_VENDOR_LOW_RANGE_MAX +*********/ + +/****f* IBA Base: Types/ib_class_is_vendor_specific_high +* NAME +* ib_class_is_vendor_specific_high +* +* DESCRIPTION +* Indicates if the Class Code if a vendor specific class from +* the high range +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_class_is_vendor_specific_high( + IN const uint8_t class_code ) +{ + return( (class_code >= IB_MCLASS_VENDOR_HIGH_RANGE_MIN) && + (class_code <= IB_MCLASS_VENDOR_HIGH_RANGE_MAX) ); +} +/* +* PARAMETERS +* class_code +* [in] The Management Datagram Class Code +* +* RETURN VALUE +* TRUE if the class is in the High range of Vendor Specific MADs +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* IB_MCLASS_VENDOR_HIGH_RANGE_MIN, IB_MCLASS_VENDOR_HIGH_RANGE_MAX +*********/ + + +/****f* IBA Base: Types/ib_class_is_vendor_specific +* NAME +* ib_class_is_vendor_specific +* +* DESCRIPTION +* Indicates if the Class Code if a vendor specific class +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_class_is_vendor_specific( + IN const uint8_t class_code ) +{ + return( ib_class_is_vendor_specific_low(class_code) || + ib_class_is_vendor_specific_high(class_code) ); +} +/* +* PARAMETERS +* class_code +* [in] The Management Datagram Class Code +* +* RETURN VALUE +* TRUE if the class is a Vendor Specific MAD +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_class_is_vendor_specific_low, ib_class_is_vendor_specific_high +*********/ + +/****f* IBA Base: Types/ib_class_is_rmpp +* NAME +* ib_class_is_rmpp +* +* DESCRIPTION +* Indicates if the Class Code supports RMPP +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_class_is_rmpp( + IN const uint8_t class_code ) +{ + return( (class_code == IB_MCLASS_SUBN_ADM) || + (class_code == IB_MCLASS_DEV_MGMT) || + (class_code == IB_MCLASS_DEV_ADM) || + (class_code == IB_MCLASS_BIS) || + ib_class_is_vendor_specific_high( class_code ) ); +} +/* +* PARAMETERS +* class_code +* [in] The Management Datagram Class Code +* +* RETURN VALUE +* TRUE if the class supports RMPP +* FALSE otherwise. +* +* NOTES +* +*********/ + +/* + * MAD methods + */ + +/****d* IBA Base: Constants/IB_MAX_METHOD +* NAME +* IB_MAX_METHOD +* +* DESCRIPTION +* Total number of methods available to a class, not including the R-bit. +* +* SOURCE +*/ +#define IB_MAX_METHODS 128 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_RESP_MASK +* NAME +* IB_MAD_METHOD_RESP_MASK +* +* DESCRIPTION +* Response mask to extract 'R' bit from the method field. (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_RESP_MASK 0x80 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_GET +* NAME +* IB_MAD_METHOD_GET +* +* DESCRIPTION +* Get() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_GET 0x01 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_SET +* NAME +* IB_MAD_METHOD_SET +* +* DESCRIPTION +* Set() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_SET 0x02 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_GET_RESP +* NAME +* IB_MAD_METHOD_GET_RESP +* +* DESCRIPTION +* GetResp() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_GET_RESP 0x81 +/**********/ + +#define IB_MAD_METHOD_DELETE 0x15 + +/****d* IBA Base: Constants/IB_MAD_METHOD_GETTABLE +* NAME +* IB_MAD_METHOD_GETTABLE +* +* DESCRIPTION +* SubnAdmGetTable() Method (15.2.2) +* +* SOURCE +*/ +#define IB_MAD_METHOD_GETTABLE 0x12 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_GETTABLE_RESP +* NAME +* IB_MAD_METHOD_GETTABLE_RESP +* +* DESCRIPTION +* SubnAdmGetTableResp() Method (15.2.2) +* +* SOURCE +*/ +#define IB_MAD_METHOD_GETTABLE_RESP 0x92 + +/**********/ + +#define IB_MAD_METHOD_GETTRACETABLE 0x13 +#define IB_MAD_METHOD_GETMULTI 0x14 +#define IB_MAD_METHOD_GETMULTI_RESP 0x94 + + +/****d* IBA Base: Constants/IB_MAD_METHOD_SEND +* NAME +* IB_MAD_METHOD_SEND +* +* DESCRIPTION +* Send() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_SEND 0x03 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_TRAP +* NAME +* IB_MAD_METHOD_TRAP +* +* DESCRIPTION +* Trap() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_TRAP 0x05 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_REPORT +* NAME +* IB_MAD_METHOD_REPORT +* +* DESCRIPTION +* Report() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_REPORT 0x06 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_REPORT_RESP +* NAME +* IB_MAD_METHOD_REPORT_RESP +* +* DESCRIPTION +* ReportResp() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_REPORT_RESP 0x86 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_METHOD_TRAP_REPRESS +* NAME +* IB_MAD_METHOD_TRAP_REPRESS +* +* DESCRIPTION +* TrapRepress() Method (13.4.5) +* +* SOURCE +*/ +#define IB_MAD_METHOD_TRAP_REPRESS 0x07 +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_BUSY +* NAME +* IB_MAD_STATUS_BUSY +* +* DESCRIPTION +* Temporarily busy, MAD discarded (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_BUSY (CL_HTON16(0x0001)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_REDIRECT +* NAME +* IB_MAD_STATUS_REDIRECT +* +* DESCRIPTION +* QP Redirection required (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_REDIRECT (CL_HTON16(0x0002)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_CLASS_VER +* NAME +* IB_MAD_STATUS_UNSUP_CLASS_VER +* +* DESCRIPTION +* Unsupported class version (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_UNSUP_CLASS_VER (CL_HTON16(0x0004)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD +* NAME +* IB_MAD_STATUS_UNSUP_METHOD +* +* DESCRIPTION +* Unsupported method (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_UNSUP_METHOD (CL_HTON16(0x0008)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_UNSUP_METHOD_ATTR +* NAME +* IB_MAD_STATUS_UNSUP_METHOD_ATTR +* +* DESCRIPTION +* Unsupported method/attribute combination (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_UNSUP_METHOD_ATTR (CL_HTON16(0x000C)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_STATUS_INVALID_FIELD +* NAME +* IB_MAD_STATUS_INVALID_FIELD +* +* DESCRIPTION +* Attribute contains one or more invalid fields (13.4.7) +* +* SOURCE +*/ +#define IB_MAD_STATUS_INVALID_FIELD (CL_HTON16(0x001C)) +/**********/ + +#define IB_MAD_STATUS_CLASS_MASK (CL_HTON16(0xFF00)) + +#define IB_SA_MAD_STATUS_SUCCESS (CL_HTON16(0x0000)) +#define IB_SA_MAD_STATUS_NO_RESOURCES (CL_HTON16(0x0100)) +#define IB_SA_MAD_STATUS_REQ_INVALID (CL_HTON16(0x0200)) +#define IB_SA_MAD_STATUS_NO_RECORDS (CL_HTON16(0x0300)) +#define IB_SA_MAD_STATUS_TOO_MANY_RECORDS (CL_HTON16(0x0400)) +#define IB_SA_MAD_STATUS_INVALID_GID (CL_HTON16(0x0500)) +#define IB_SA_MAD_STATUS_INSUF_COMPS (CL_HTON16(0x0600)) + +#define IB_DM_MAD_STATUS_NO_IOC_RESP (CL_HTON16(0x0100)) +#define IB_DM_MAD_STATUS_NO_SVC_ENTRIES (CL_HTON16(0x0200)) +#define IB_DM_MAD_STATUS_IOC_FAILURE (CL_HTON16(0x8000)) + +/****d* IBA Base: Constants/IB_MAD_ATTR_CLASS_PORT_INFO +* NAME +* IB_MAD_ATTR_CLASS_PORT_INFO +* +* DESCRIPTION +* ClassPortInfo attribute (13.4.8) +* +* SOURCE +*/ +#define IB_MAD_ATTR_CLASS_PORT_INFO (CL_NTOH16(0x0001)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_NOTICE +* NAME +* IB_MAD_ATTR_NOTICE +* +* DESCRIPTION +* Notice attribute (13.4.8) +* +* SOURCE +*/ +#define IB_MAD_ATTR_NOTICE (CL_NTOH16(0x0002)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_INFORM_INFO +* NAME +* IB_MAD_ATTR_INFORM_INFO +* +* DESCRIPTION +* InformInfo attribute (13.4.8) +* +* SOURCE +*/ +#define IB_MAD_ATTR_INFORM_INFO (CL_NTOH16(0x0003)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_DESC +* NAME +* IB_MAD_ATTR_NODE_DESC +* +* DESCRIPTION +* NodeDescription attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_NODE_DESC (CL_NTOH16(0x0010)) + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_CTRL +* NAME +* IB_MAD_ATTR_PORT_SMPL_CTRL +* +* DESCRIPTION +* NodeDescription attribute (16.1.2) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORT_SMPL_CTRL (CL_NTOH16(0x0010)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_INFO +* NAME +* IB_MAD_ATTR_NODE_INFO +* +* DESCRIPTION +* NodeInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_NODE_INFO (CL_NTOH16(0x0011)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_SMPL_RSLT +* NAME +* IB_MAD_ATTR_PORT_SMPL_RSLT +* +* DESCRIPTION +* NodeInfo attribute (16.1.2) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORT_SMPL_RSLT (CL_NTOH16(0x0011)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO +* NAME +* IB_MAD_ATTR_SWITCH_INFO +* +* DESCRIPTION +* SwitchInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SWITCH_INFO (CL_NTOH16(0x0012)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_CNTRS +* NAME +* IB_MAD_ATTR_PORT_CNTRS +* +* DESCRIPTION +* SwitchInfo attribute (16.1.2) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORT_CNTRS (CL_NTOH16(0x0012)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_GUID_INFO +* NAME +* IB_MAD_ATTR_GUID_INFO +* +* DESCRIPTION +* GUIDInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_GUID_INFO (CL_NTOH16(0x0014)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORT_INFO +* NAME +* IB_MAD_ATTR_PORT_INFO +* +* DESCRIPTION +* PortInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORT_INFO (CL_NTOH16(0x0015)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_P_KEY_TABLE +* NAME +* IB_MAD_ATTR_P_KEY_TABLE +* +* DESCRIPTION +* PartitionTable attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_P_KEY_TABLE (CL_NTOH16(0x0016)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SLVL_TABLE +* NAME +* IB_MAD_ATTR_SLVL_TABLE +* +* DESCRIPTION +* SL VL Mapping Table attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SLVL_TABLE (CL_NTOH16(0x0017)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_VL_ARBITRATION +* NAME +* IB_MAD_ATTR_VL_ARBITRATION +* +* DESCRIPTION +* VL Arbitration Table attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_VL_ARBITRATION (CL_NTOH16(0x0018)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_LIN_FWD_TBL +* NAME +* IB_MAD_ATTR_LIN_FWD_TBL +* +* DESCRIPTION +* Switch linear forwarding table +* +* SOURCE +*/ +#define IB_MAD_ATTR_LIN_FWD_TBL (CL_NTOH16(0x0019)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_RND_FWD_TBL +* NAME +* IB_MAD_ATTR_RND_FWD_TBL +* +* DESCRIPTION +* Switch random forwarding table +* +* SOURCE +*/ +#define IB_MAD_ATTR_RND_FWD_TBL (CL_NTOH16(0x001A)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_MCAST_FWD_TBL +* NAME +* IB_MAD_ATTR_MCAST_FWD_TBL +* +* DESCRIPTION +* Switch multicast forwarding table +* +* SOURCE +*/ +#define IB_MAD_ATTR_MCAST_FWD_TBL (CL_NTOH16(0x001B)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_NODE_RECORD +* NAME +* IB_MAD_ATTR_NODE_RECORD +* +* DESCRIPTION +* NodeRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_NODE_RECORD (CL_NTOH16(0x0011)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PORTINFO_RECORD +* NAME +* IB_MAD_ATTR_PORTINFO_RECORD +* +* DESCRIPTION +* PortInfoRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PORTINFO_RECORD (CL_NTOH16(0x0012)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SWITCH_INFO_RECORD +* NAME +* IB_MAD_ATTR_SWITCH_INFO_RECORD +* +* DESCRIPTION +* SwitchInfoRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SWITCH_INFO_RECORD (CL_NTOH16(0x0014)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_LINK_RECORD +* NAME +* IB_MAD_ATTR_LINK_RECORD +* +* DESCRIPTION +* LinkRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_LINK_RECORD (CL_NTOH16(0x0020)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SM_INFO +* NAME +* IB_MAD_ATTR_SM_INFO +* +* DESCRIPTION +* SMInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SM_INFO (CL_NTOH16(0x0020)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SMINFO_RECORD +* NAME +* IB_MAD_ATTR_SMINFO_RECORD +* +* DESCRIPTION +* SMInfoRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SMINFO_RECORD (CL_NTOH16(0x0018)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_GUIDINFO_RECORD +* NAME +* IB_MAD_ATTR_GUIDINFO_RECORD +* +* DESCRIPTION +* GuidInfoRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_GUIDINFO_RECORD (CL_NTOH16(0x0030)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_VENDOR_DIAG +* NAME +* IB_MAD_ATTR_VENDOR_DIAG +* +* DESCRIPTION +* VendorDiag attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_VENDOR_DIAG (CL_NTOH16(0x0030)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_LED_INFO +* NAME +* IB_MAD_ATTR_LED_INFO +* +* DESCRIPTION +* LedInfo attribute (14.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_LED_INFO (CL_NTOH16(0x0031)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SERVICE_RECORD +* NAME +* IB_MAD_ATTR_SERVICE_RECORD +* +* DESCRIPTION +* ServiceRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SERVICE_RECORD (CL_NTOH16(0x0031)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_LFT_RECORD +* NAME +* IB_MAD_ATTR_LFT_RECORD +* +* DESCRIPTION +* LinearForwardingTableRecord attribute (15.2.5.6) +* +* SOURCE +*/ +#define IB_MAD_ATTR_LFT_RECORD (CL_NTOH16(0x0015)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_MFT_RECORD +* NAME +* IB_MAD_ATTR_MFT_RECORD +* +* DESCRIPTION +* MulticastForwardingTableRecord attribute (15.2.5.8) +* +* SOURCE +*/ +#define IB_MAD_ATTR_MFT_RECORD (CL_NTOH16(0x0017)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PKEYTBL_RECORD +* NAME +* IB_MAD_ATTR_PKEYTBL_RECORD +* +* DESCRIPTION +* PKEY Table Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PKEY_TBL_RECORD (CL_NTOH16(0x0033)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PATH_RECORD +* NAME +* IB_MAD_ATTR_PATH_RECORD +* +* DESCRIPTION +* PathRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PATH_RECORD (CL_NTOH16(0x0035)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_VLARB_RECORD +* NAME +* IB_MAD_ATTR_VLARB_RECORD +* +* DESCRIPTION +* VL Arbitration Table Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_VLARB_RECORD (CL_NTOH16(0x0036)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SLVL_RECORD +* NAME +* IB_MAD_ATTR_SLVL_RECORD +* +* DESCRIPTION +* SLtoVL Mapping Table Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SLVL_RECORD (CL_NTOH16(0x0013)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_MCMEMBER_RECORD +* NAME +* IB_MAD_ATTR_MCMEMBER_RECORD +* +* DESCRIPTION +* MCMemberRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_MCMEMBER_RECORD (CL_NTOH16(0x0038)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_TRACE_RECORD +* NAME +* IB_MAD_ATTR_TRACE_RECORD +* +* DESCRIPTION +* TraceRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_TRACE_RECORD (CL_NTOH16(0x0039)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_MULTIPATH_RECORD +* NAME +* IB_MAD_ATTR_MULTIPATH_RECORD +* +* DESCRIPTION +* MultiPathRecord attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_MULTIPATH_RECORD (CL_NTOH16(0x003A)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SVC_ASSOCIATION_RECORD +* NAME +* IB_MAD_ATTR_SVC_ASSOCIATION_RECORD +* +* DESCRIPTION +* Service Association Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SVC_ASSOCIATION_RECORD (CL_NTOH16(0x003B)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_INFORM_INFO_RECORD +* NAME +* IB_MAD_ATTR_INFORM_INFO_RECORD +* +* DESCRIPTION +* InformInfo Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_INFORM_INFO_RECORD (CL_NTOH16(0x00F3)) + +/****d* IBA Base: Constants/IB_MAD_ATTR_IO_UNIT_INFO +* NAME +* IB_MAD_ATTR_IO_UNIT_INFO +* +* DESCRIPTION +* IOUnitInfo attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_IO_UNIT_INFO (CL_NTOH16(0x0010)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_IO_CONTROLLER_PROFILE +* NAME +* IB_MAD_ATTR_IO_CONTROLLER_PROFILE +* +* DESCRIPTION +* IOControllerProfile attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_IO_CONTROLLER_PROFILE (CL_NTOH16(0x0011)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SERVICE_ENTRIES +* NAME +* IB_MAD_ATTR_SERVICE_ENTRIES +* +* DESCRIPTION +* ServiceEntries attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SERVICE_ENTRIES (CL_NTOH16(0x0012)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT +* NAME +* IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT +* +* DESCRIPTION +* DiagnosticTimeout attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_DIAGNOSTIC_TIMEOUT (CL_NTOH16(0x0020)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_PREPARE_TO_TEST +* NAME +* IB_MAD_ATTR_PREPARE_TO_TEST +* +* DESCRIPTION +* PrepareToTest attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_PREPARE_TO_TEST (CL_NTOH16(0x0021)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_TEST_DEVICE_ONCE +* NAME +* IB_MAD_ATTR_TEST_DEVICE_ONCE +* +* DESCRIPTION +* TestDeviceOnce attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_TEST_DEVICE_ONCE (CL_NTOH16(0x0022)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_TEST_DEVICE_LOOP +* NAME +* IB_MAD_ATTR_TEST_DEVICE_LOOP +* +* DESCRIPTION +* TestDeviceLoop attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_TEST_DEVICE_LOOP (CL_NTOH16(0x0023)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_DIAG_CODE +* NAME +* IB_MAD_ATTR_DIAG_CODE +* +* DESCRIPTION +* DiagCode attribute (16.3.3) +* +* SOURCE +*/ +#define IB_MAD_ATTR_DIAG_CODE (CL_NTOH16(0x0024)) +/**********/ + +/****d* IBA Base: Constants/IB_MAD_ATTR_SVC_ASSOCIATION_RECORD +* NAME +* IB_MAD_ATTR_SVC_ASSOCIATION_RECORD +* +* DESCRIPTION +* Service Association Record attribute (15.2.5) +* +* SOURCE +*/ +#define IB_MAD_ATTR_SVC_ASSOCIATION_RECORD (CL_NTOH16(0x003B)) +/**********/ + +/****d* IBA Base: Constants/IB_NODE_TYPE_CA +* NAME +* IB_NODE_TYPE_CA +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NODE_TYPE_CA 0x01 +/**********/ + +/****d* IBA Base: Constants/IB_NODE_TYPE_SWITCH +* NAME +* IB_NODE_TYPE_SWITCH +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NODE_TYPE_SWITCH 0x02 +/**********/ + +/****d* IBA Base: Constants/IB_NODE_TYPE_ROUTER +* NAME +* IB_NODE_TYPE_ROUTER +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NODE_TYPE_ROUTER 0x03 +/**********/ + +/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_CA +* NAME +* IB_NOTICE_NODE_TYPE_CA +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NOTICE_NODE_TYPE_CA (CL_NTOH32(0x000001)) +/**********/ + +/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_SWITCH +* NAME +* IB_NOTICE_NODE_TYPE_SWITCH +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NOTICE_NODE_TYPE_SWITCH (CL_NTOH32(0x000002)) +/**********/ + +/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_ROUTER +* NAME +* IB_NOTICE_NODE_TYPE_ROUTER +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2) +* +* SOURCE +*/ +#define IB_NOTICE_NODE_TYPE_ROUTER (CL_NTOH32(0x000003)) +/**********/ + +/****d* IBA Base: Constants/IB_NOTICE_NODE_TYPE_SUBN_MGMT +* NAME +* IB_NOTICE_NODE_TYPE_SUBN_MGMT +* +* DESCRIPTION +* Encoded generic node type used in MAD attributes (13.4.8.2). +* Note that this value is not defined for the NodeType field +* of the NodeInfo attribute (14.2.5.3). +* +* SOURCE +*/ +#define IB_NOTICE_NODE_TYPE_SUBN_MGMT (CL_NTOH32(0x000004)) +/**********/ + +/****d* IBA Base: Constants/IB_MTU_LEN_TYPE +* NAME +* IB_MTU_LEN_TYPE +* +* DESCRIPTION +* Encoded path MTU. +* 1: 256 +* 2: 512 +* 3: 1024 +* 4: 2048 +* 5: 4096 +* others: reserved +* +* SOURCE +*/ +#define IB_MTU_LEN_256 1 +#define IB_MTU_LEN_512 2 +#define IB_MTU_LEN_1024 3 +#define IB_MTU_LEN_2048 4 +#define IB_MTU_LEN_4096 5 + +#define IB_MIN_MTU IB_MTU_LEN_256 +#define IB_MAX_MTU IB_MTU_LEN_4096 + +/**********/ + +/****d* IBA Base: Constants/IB_PATH_SELECTOR_TYPE +* NAME +* IB_PATH_SELECTOR_TYPE +* +* DESCRIPTION +* Path selector. +* 0: greater than specified +* 1: less than specified +* 2: exactly the specified +* 3: largest available +* +* SOURCE +*/ +#define IB_PATH_SELECTOR_GREATER_THAN 0 +#define IB_PATH_SELECTOR_LESS_THAN 1 +#define IB_PATH_SELECTOR_EXACTLY 2 +#define IB_PATH_SELECTOR_LARGEST 3 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_NOTACTIVE +* NAME +* IB_SMINFO_STATE_NOTACTIVE +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_NOTACTIVE 0 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_DISCOVERING +* NAME +* IB_SMINFO_STATE_DISCOVERING +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_DISCOVERING 1 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_STANDBY +* NAME +* IB_SMINFO_STATE_STANDBY +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_STANDBY 2 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_MASTER +* NAME +* IB_SMINFO_STATE_MASTER +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_MASTER 3 +/**********/ + +/****d* IBA Base: Constants/IB_PATH_REC_SELECTOR_MASK +* NAME +* IB_PATH_REC_SELECTOR_MASK +* +* DESCRIPTION +* Mask for the selector field for path record MTU, rate, +* and packet lifetime. +* +* SOURCE +*/ +#define IB_PATH_REC_SELECTOR_MASK 0xC0 + +/****d* IBA Base: Constants/IB_MULTIPATH_REC_SELECTOR_MASK +* NAME +* IB_MULTIPATH_REC_SELECTOR_MASK +* +* DESCRIPTION +* Mask for the selector field for multipath record MTU, rate, +* and packet lifetime. +* +* SOURCE +*/ +#define IB_MULTIPATH_REC_SELECTOR_MASK 0xC0 +/**********/ + +/****d* IBA Base: Constants/IB_PATH_REC_BASE_MASK +* NAME +* IB_PATH_REC_BASE_MASK +* +* DESCRIPTION +* Mask for the base value field for path record MTU, rate, +* and packet lifetime. +* +* SOURCE +*/ +#define IB_PATH_REC_BASE_MASK 0x3F +/**********/ + +/****d* IBA Base: Constants/IB_MULTIPATH_REC_BASE_MASK +* NAME +* IB_MULTIPATH_REC_BASE_MASK +* +* DESCRIPTION +* Mask for the base value field for multipath record MTU, rate, +* and packet lifetime. +* +* SOURCE +*/ +#define IB_MULTIPATH_REC_BASE_MASK 0x3F +/**********/ + +/****h* IBA Base/Type Definitions +* NAME +* Type Definitions +* +* DESCRIPTION +* Definitions are from the InfiniBand Architecture Specification v1.2 +* +*********/ + +/****d* IBA Base: Types/ib_net16_t +* NAME +* ib_net16_t +* +* DESCRIPTION +* Defines the network ordered type for 16-bit values. +* +* SOURCE +*/ +typedef uint16_t ib_net16_t; +/**********/ + +/****d* IBA Base: Types/ib_net32_t +* NAME +* ib_net32_t +* +* DESCRIPTION +* Defines the network ordered type for 32-bit values. +* +* SOURCE +*/ +typedef uint32_t ib_net32_t; +/**********/ + +/****d* IBA Base: Types/ib_net64_t +* NAME +* ib_net64_t +* +* DESCRIPTION +* Defines the network ordered type for 64-bit values. +* +* SOURCE +*/ +typedef uint64_t ib_net64_t; +/**********/ + +/****d* IBA Base: Types/ib_gid_prefix_t +* NAME +* ib_gid_prefix_t +* +* DESCRIPTION +* +* SOURCE +*/ +typedef ib_net64_t ib_gid_prefix_t; +/**********/ + +/****d* IBA Base: Constants/ib_link_states_t +* NAME +* ib_link_states_t +* +* DESCRIPTION +* Defines the link states of a port. +* +* SOURCE +*/ +#define IB_LINK_NO_CHANGE 0 +#define IB_LINK_DOWN 1 +#define IB_LINK_INIT 2 +#define IB_LINK_ARMED 3 +#define IB_LINK_ACTIVE 4 +#define IB_LINK_ACT_DEFER 5 +/**********/ + +static const char* const __ib_node_type_str[] = +{ + "UNKNOWN", + "Channel Adapter", + "Switch", + "Router", + "Subnet Management" +}; + +/****f* IBA Base: Types/ib_get_node_type_str +* NAME +* ib_get_node_type_str +* +* DESCRIPTION +* Returns a string for the specified node type. +* +* SYNOPSIS +*/ +static inline const char* OSM_API +ib_get_node_type_str( + IN uint32_t node_type ) +{ + if( node_type >= IB_NOTICE_NODE_TYPE_ROUTER ) + node_type = 0; + return( __ib_node_type_str[node_type] ); +} +/* +* PARAMETERS +* node_type +* [in] Encoded node type as returned in the NodeInfo attribute. + +* RETURN VALUES +* Pointer to the node type string. +* +* NOTES +* +* SEE ALSO +* ib_node_info_t +*********/ + +static const char* const __ib_port_state_str[] = +{ + "No State Change (NOP)", + "DOWN", + "INIT", + "ARMED", + "ACTIVE", + "ACTDEFER", + "UNKNOWN" +}; + +/****f* IBA Base: Types/ib_get_port_state_str +* NAME +* ib_get_port_state_str +* +* DESCRIPTION +* Returns a string for the specified port state. +* +* SYNOPSIS +*/ +static inline const char* OSM_API +ib_get_port_state_str( + IN uint8_t port_state ) +{ + if( port_state > IB_LINK_ACTIVE ) + port_state = IB_LINK_ACTIVE + 1; + return( __ib_port_state_str[port_state] ); +} +/* +* PARAMETERS +* node_type +* [in] Encoded port state as returned in the PortInfo attribute. + +* RETURN VALUES +* Pointer to the port state string. +* +* NOTES +* +* SEE ALSO +* ib_port_info_t +*********/ + +/****f* IBA Base: Types/ib_get_port_state_from_str +* NAME +* ib_get_port_state_from_str +* +* DESCRIPTION +* Returns a string for the specified port state. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_get_port_state_from_str( + IN char* p_port_state_str ) +{ + if( !strncmp(p_port_state_str,"No State Change (NOP)", 12) ) + return(0); + else if( !strncmp(p_port_state_str, "DOWN", 4) ) + return(1); + else if( !strncmp(p_port_state_str, "INIT", 4) ) + return(2); + else if( !strncmp(p_port_state_str, "ARMED" , 5) ) + return(3); + else if( !strncmp(p_port_state_str, "ACTIVE", 6) ) + return(4); + else if( !strncmp(p_port_state_str, "ACTDEFER", 8) ) + return(5); + return(6); +} +/* +* PARAMETERS +* p_port_state_str +* [in] A string matching one returned by ib_get_port_state_str +* +* RETURN VALUES +* The appropriate code. +* +* NOTES +* +* SEE ALSO +* ib_port_info_t +*********/ + +/****d* IBA Base: Constants/Join States +* NAME +* Join States +* +* DESCRIPTION +* Defines the join state flags for multicast group management. +* +* SOURCE +*/ +#define IB_JOIN_STATE_FULL 1 +#define IB_JOIN_STATE_NON 2 +#define IB_JOIN_STATE_SEND_ONLY 4 +/**********/ + +/****f* IBA Base: Types/ib_pkey_get_base +* NAME +* ib_pkey_get_base +* +* DESCRIPTION +* Returns the base P_Key value with the membership bit stripped. +* +* SYNOPSIS +*/ +static inline ib_net16_t OSM_API +ib_pkey_get_base( + IN const ib_net16_t pkey ) +{ + return( (ib_net16_t)(pkey & IB_PKEY_BASE_MASK) ); +} +/* +* PARAMETERS +* pkey +* [in] P_Key value +* +* RETURN VALUE +* Returns the base P_Key value with the membership bit stripped. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_pkey_is_full_member +* NAME +* ib_pkey_is_full_member +* +* DESCRIPTION +* Indicates if the port is a full member of the parition. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_pkey_is_full_member( + IN const ib_net16_t pkey ) +{ + return( (pkey & IB_PKEY_TYPE_MASK) == IB_PKEY_TYPE_MASK ); +} +/* +* PARAMETERS +* pkey +* [in] P_Key value +* +* RETURN VALUE +* TRUE if the port is a full member of the partition. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_pkey_get_base, ib_net16_t +*********/ + +/****f* IBA Base: Types/ib_pkey_is_invalid +* NAME +* ib_pkey_is_invalid +* +* DESCRIPTION +* Returns TRUE if the given P_Key is an invalid P_Key +* C10-116: the CI shall regard a P_Key as invalid if its low-order +* 15 bits are all zero... +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_pkey_is_invalid( + IN const ib_net16_t pkey ) +{ + if (ib_pkey_get_base(pkey) == 0x0000) + return TRUE; + + return FALSE; +} +/* +* PARAMETERS +* pkey +* [in] P_Key value +* +* RETURN VALUE +* Returns the base P_Key value with the membership bit stripped. +* +* NOTES +* +* SEE ALSO +*********/ + +/****d* IBA Base: Types/ib_gid_t +* NAME +* ib_gid_t +* +* DESCRIPTION +* +* SYNOPSIS +*/ +#include +typedef union _ib_gid +{ + uint8_t raw[16]; + struct _ib_gid_unicast + { + ib_gid_prefix_t prefix; + ib_net64_t interface_id; + + } PACK_SUFFIX unicast; + + struct _ib_gid_multicast + { + uint8_t header[2]; + uint8_t raw_group_id[14]; + + } PACK_SUFFIX multicast; + +} PACK_SUFFIX ib_gid_t; +#include +/* +* FIELDS +* raw +* GID represented as an unformated byte array. +* +* unicast +* Typical unicast representation with subnet prefix and +* port GUID. +* +* multicast +* Representation for multicast use. +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_gid_is_multicast +* NAME +* ib_gid_is_multicast +* +* DESCRIPTION +* Returns a boolean indicating whether a GID is a multicast GID. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_gid_is_multicast( + IN const ib_gid_t* p_gid ) +{ + return( p_gid->raw[0] == 0xFF ); +} + +/****f* IBA Base: Types/ib_gid_get_scope +* NAME +* ib_gid_get_scope +* +* DESCRIPTION +* Returns scope of (assumed) multicast GID. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_mgid_get_scope( + IN const ib_gid_t* p_gid ) +{ + return( p_gid->raw[1] & 0x0F ); +} + +/****f* IBA Base: Types/ib_gid_set_scope +* NAME +* ib_gid_set_scope +* +* DESCRIPTION +* Sets scope of (assumed) multicast GID. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_mgid_set_scope( + IN ib_gid_t* const p_gid, + IN const uint8_t scope ) +{ + p_gid->raw[1] &= 0xF0; + p_gid->raw[1] |= scope & 0x0F; +} + +/****f* IBA Base: Types/ib_gid_set_default +* NAME +* ib_gid_set_default +* +* DESCRIPTION +* Sets a GID to the default value. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_gid_set_default( + IN ib_gid_t* const p_gid, + IN const ib_net64_t interface_id ) +{ + p_gid->unicast.prefix = IB_DEFAULT_SUBNET_PREFIX; + p_gid->unicast.interface_id = interface_id; +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* interface_id +* [in] Manufacturer assigned EUI64 value of a port. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_gid_get_subnet_prefix +* NAME +* ib_gid_get_subnet_prefix +* +* DESCRIPTION +* Gets the subnet prefix from a GID. +* +* SYNOPSIS +*/ +static inline ib_net64_t OSM_API +ib_gid_get_subnet_prefix( + IN const ib_gid_t* const p_gid ) +{ + return( p_gid->unicast.prefix ); +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* RETURN VALUES +* 64-bit subnet prefix value. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_gid_is_link_local +* NAME +* ib_gid_is_link_local +* +* DESCRIPTION +* Returns TRUE if the unicast GID scoping indicates link local, +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_gid_is_link_local( + IN const ib_gid_t* const p_gid ) +{ + return( ( ib_gid_get_subnet_prefix( p_gid ) & + CL_HTON64( 0xFFC0000000000000ULL ) ) == IB_DEFAULT_SUBNET_PREFIX ); +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* RETURN VALUES +* Returns TRUE if the unicast GID scoping indicates link local, +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_gid_is_site_local +* NAME +* ib_gid_is_site_local +* +* DESCRIPTION +* Returns TRUE if the unicast GID scoping indicates site local, +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_gid_is_site_local( + IN const ib_gid_t* const p_gid ) +{ + return( ( ib_gid_get_subnet_prefix( p_gid ) & + CL_HTON64( 0xFFFFFFFFFFFF0000ULL ) ) == CL_HTON64( 0xFEC0000000000000ULL ) ); +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* RETURN VALUES +* Returns TRUE if the unicast GID scoping indicates site local, +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_gid_get_guid +* NAME +* ib_gid_get_guid +* +* DESCRIPTION +* Gets the guid from a GID. +* +* SYNOPSIS +*/ +static inline ib_net64_t OSM_API +ib_gid_get_guid( + IN const ib_gid_t* const p_gid ) +{ + return( p_gid->unicast.interface_id ); +} +/* +* PARAMETERS +* p_gid +* [in] Pointer to the GID object. +* +* RETURN VALUES +* 64-bit GUID value. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****s* IBA Base: Types/ib_path_rec_t +* NAME +* ib_path_rec_t +* +* DESCRIPTION +* Path records encapsulate the properties of a given +* route between two end-points on a subnet. +* +* SYNOPSIS +*/ +#include +typedef struct _ib_path_rec +{ + uint8_t resv0[8]; + ib_gid_t dgid; + ib_gid_t sgid; + ib_net16_t dlid; + ib_net16_t slid; + ib_net32_t hop_flow_raw; + uint8_t tclass; + uint8_t num_path; + ib_net16_t pkey; + ib_net16_t sl; + uint8_t mtu; + uint8_t rate; + uint8_t pkt_life; + uint8_t preference; + uint8_t resv2[6]; + +} PACK_SUFFIX ib_path_rec_t; +#include +/* +* FIELDS +* resv0 +* Reserved bytes. +* +* dgid +* GID of destination port. +* +* sgid +* GID of source port. +* +* dlid +* LID of destination port. +* +* slid +* LID of source port. +* +* hop_flow_raw +* Global routing parameters: hop count, flow label and raw bit. +* +* tclass +* Another global routing parameter. +* +* num_path +* Reversible path - 1 bit to say if path is reversible. +* num_path [6:0] In queries, maximum number of paths to return. +* In responses, undefined. +* +* pkey +* Partition key (P_Key) to use on this path. +* +* resv1 +* Reserved byte. +* +* sl +* Service level to use on this path. +* +* mtu +* MTU and MTU selector fields to use on this path +* +* rate +* Rate and rate selector fields to use on this path. +* +* pkt_life +* Packet lifetime +* +* preference +* Indicates the relative merit of this path versus other path +* records returned from the SA. Lower numbers are better. +* +* resv2 +* Reserved bytes. +* SEE ALSO +*********/ + +/* Path Record Component Masks */ +#define IB_PR_COMPMASK_DGID (CL_HTON64(((uint64_t)1)<<2)) +#define IB_PR_COMPMASK_SGID (CL_HTON64(((uint64_t)1)<<3)) +#define IB_PR_COMPMASK_DLID (CL_HTON64(((uint64_t)1)<<4)) +#define IB_PR_COMPMASK_SLID (CL_HTON64(((uint64_t)1)<<5)) +#define IB_PR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<6)) +#define IB_PR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<7)) +#define IB_PR_COMPMASK_FLOWLABEL (CL_HTON64(((uint64_t)1)<<8)) +#define IB_PR_COMPMASK_HOPLIMIT (CL_HTON64(((uint64_t)1)<<9)) +#define IB_PR_COMPMASK_TCLASS (CL_HTON64(((uint64_t)1)<<10)) +#define IB_PR_COMPMASK_REVERSIBLE (CL_HTON64(((uint64_t)1)<<11)) +#define IB_PR_COMPMASK_NUMBPATH (CL_HTON64(((uint64_t)1)<<12)) +#define IB_PR_COMPMASK_PKEY (CL_HTON64(((uint64_t)1)<<13)) +#define IB_PR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<14)) +#define IB_PR_COMPMASK_SL (CL_HTON64(((uint64_t)1)<<15)) +#define IB_PR_COMPMASK_MTUSELEC (CL_HTON64(((uint64_t)1)<<16)) +#define IB_PR_COMPMASK_MTU (CL_HTON64(((uint64_t)1)<<17)) +#define IB_PR_COMPMASK_RATESELEC (CL_HTON64(((uint64_t)1)<<18)) +#define IB_PR_COMPMASK_RATE (CL_HTON64(((uint64_t)1)<<19)) +#define IB_PR_COMPMASK_PKTLIFETIMESELEC (CL_HTON64(((uint64_t)1)<<20)) +#define IB_PR_COMPMASK_PKTLIFETIME (CL_HTON64(((uint64_t)1)<<21)) + +/* Link Record Component Masks */ +#define IB_LR_COMPMASK_FROM_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_LR_COMPMASK_FROM_PORT (CL_HTON64(((uint64_t)1)<<1)) +#define IB_LR_COMPMASK_TO_PORT (CL_HTON64(((uint64_t)1)<<2)) +#define IB_LR_COMPMASK_TO_LID (CL_HTON64(((uint64_t)1)<<3)) + +/* VL Arbitration Record Masks */ +#define IB_VLA_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_VLA_COMPMASK_OUT_PORT (CL_HTON64(((uint64_t)1)<<1)) +#define IB_VLA_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<2)) + +/* SLtoVL Mapping Record Masks */ +#define IB_SLVL_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_SLVL_COMPMASK_IN_PORT (CL_HTON64(((uint64_t)1)<<1)) +#define IB_SLVL_COMPMASK_OUT_PORT (CL_HTON64(((uint64_t)1)<<2)) + +/* P_Key Table Record Masks */ +#define IB_PKEY_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_PKEY_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<1)) +#define IB_PKEY_COMPMASK_PORT (CL_HTON64(((uint64_t)1)<<2)) + +/* Switch Info Record Masks */ +#define IB_SWIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_SWIR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<1)) + +/* LFT Record Masks */ +#define IB_LFTR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_LFTR_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<1)) + +/* MFT Record Masks */ +#define IB_MFTR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_MFTR_COMPMASK_POSITION (CL_HTON64(((uint64_t)1)<<1)) +#define IB_MFTR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<2)) +#define IB_MFTR_COMPMASK_BLOCK (CL_HTON64(((uint64_t)1)<<3)) +#define IB_MFTR_COMPMASK_RESERVED2 (CL_HTON64(((uint64_t)1)<<4)) + +/* NodeInfo Record Masks */ +#define IB_NR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_NR_COMPMASK_RESERVED1 (CL_HTON64(((uint64_t)1)<<1)) +#define IB_NR_COMPMASK_BASEVERSION (CL_HTON64(((uint64_t)1)<<2)) +#define IB_NR_COMPMASK_CLASSVERSION (CL_HTON64(((uint64_t)1)<<3)) +#define IB_NR_COMPMASK_NODETYPE (CL_HTON64(((uint64_t)1)<<4)) +#define IB_NR_COMPMASK_NUMPORTS (CL_HTON64(((uint64_t)1)<<5)) +#define IB_NR_COMPMASK_SYSIMAGEGUID (CL_HTON64(((uint64_t)1)<<6)) +#define IB_NR_COMPMASK_NODEGUID (CL_HTON64(((uint64_t)1)<<7)) +#define IB_NR_COMPMASK_PORTGUID (CL_HTON64(((uint64_t)1)<<8)) +#define IB_NR_COMPMASK_PARTCAP (CL_HTON64(((uint64_t)1)<<9)) +#define IB_NR_COMPMASK_DEVID (CL_HTON64(((uint64_t)1)<<10)) +#define IB_NR_COMPMASK_REV (CL_HTON64(((uint64_t)1)<<11)) +#define IB_NR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<12)) +#define IB_NR_COMPMASK_VENDID (CL_HTON64(((uint64_t)1)<<13)) +#define IB_NR_COMPMASK_NODEDESC (CL_HTON64(((uint64_t)1)<<14)) + +/* Service Record Component Masks Sec 15.2.5.14 Ver 1.1*/ +#define IB_SR_COMPMASK_SID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_SR_COMPMASK_SGID (CL_HTON64(((uint64_t)1)<<1)) +#define IB_SR_COMPMASK_SPKEY (CL_HTON64(((uint64_t)1)<<2)) +#define IB_SR_COMPMASK_RES1 (CL_HTON64(((uint64_t)1)<<3)) +#define IB_SR_COMPMASK_SLEASE (CL_HTON64(((uint64_t)1)<<4)) +#define IB_SR_COMPMASK_SKEY (CL_HTON64(((uint64_t)1)<<5)) +#define IB_SR_COMPMASK_SNAME (CL_HTON64(((uint64_t)1)<<6)) +#define IB_SR_COMPMASK_SDATA8_0 (CL_HTON64(((uint64_t)1)<<7)) +#define IB_SR_COMPMASK_SDATA8_1 (CL_HTON64(((uint64_t)1)<<8)) +#define IB_SR_COMPMASK_SDATA8_2 (CL_HTON64(((uint64_t)1)<<9)) +#define IB_SR_COMPMASK_SDATA8_3 (CL_HTON64(((uint64_t)1)<<10)) +#define IB_SR_COMPMASK_SDATA8_4 (CL_HTON64(((uint64_t)1)<<11)) +#define IB_SR_COMPMASK_SDATA8_5 (CL_HTON64(((uint64_t)1)<<12)) +#define IB_SR_COMPMASK_SDATA8_6 (CL_HTON64(((uint64_t)1)<<13)) +#define IB_SR_COMPMASK_SDATA8_7 (CL_HTON64(((uint64_t)1)<<14)) +#define IB_SR_COMPMASK_SDATA8_8 (CL_HTON64(((uint64_t)1)<<15)) +#define IB_SR_COMPMASK_SDATA8_9 (CL_HTON64(((uint64_t)1)<<16)) +#define IB_SR_COMPMASK_SDATA8_10 (CL_HTON64(((uint64_t)1)<<17)) +#define IB_SR_COMPMASK_SDATA8_11 (CL_HTON64(((uint64_t)1)<<18)) +#define IB_SR_COMPMASK_SDATA8_12 (CL_HTON64(((uint64_t)1)<<19)) +#define IB_SR_COMPMASK_SDATA8_13 (CL_HTON64(((uint64_t)1)<<20)) +#define IB_SR_COMPMASK_SDATA8_14 (CL_HTON64(((uint64_t)1)<<21)) +#define IB_SR_COMPMASK_SDATA8_15 (CL_HTON64(((uint64_t)1)<<22)) +#define IB_SR_COMPMASK_SDATA16_0 (CL_HTON64(((uint64_t)1)<<23)) +#define IB_SR_COMPMASK_SDATA16_1 (CL_HTON64(((uint64_t)1)<<24)) +#define IB_SR_COMPMASK_SDATA16_2 (CL_HTON64(((uint64_t)1)<<25)) +#define IB_SR_COMPMASK_SDATA16_3 (CL_HTON64(((uint64_t)1)<<26)) +#define IB_SR_COMPMASK_SDATA16_4 (CL_HTON64(((uint64_t)1)<<27)) +#define IB_SR_COMPMASK_SDATA16_5 (CL_HTON64(((uint64_t)1)<<28)) +#define IB_SR_COMPMASK_SDATA16_6 (CL_HTON64(((uint64_t)1)<<29)) +#define IB_SR_COMPMASK_SDATA16_7 (CL_HTON64(((uint64_t)1)<<30)) +#define IB_SR_COMPMASK_SDATA32_0 (CL_HTON64(((uint64_t)1)<<31)) +#define IB_SR_COMPMASK_SDATA32_1 (CL_HTON64(((uint64_t)1)<<32)) +#define IB_SR_COMPMASK_SDATA32_2 (CL_HTON64(((uint64_t)1)<<33)) +#define IB_SR_COMPMASK_SDATA32_3 (CL_HTON64(((uint64_t)1)<<34)) +#define IB_SR_COMPMASK_SDATA64_0 (CL_HTON64(((uint64_t)1)<<35)) +#define IB_SR_COMPMASK_SDATA64_1 (CL_HTON64(((uint64_t)1)<<36)) + +/* Port Info Record Component Masks */ +#define IB_PIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_PIR_COMPMASK_PORTNUM (CL_HTON64(((uint64_t)1)<<1)) +#define IB_PIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2)) +#define IB_PIR_COMPMASK_MKEY (CL_HTON64(((uint64_t)1)<<3)) +#define IB_PIR_COMPMASK_GIDPRE (CL_HTON64(((uint64_t)1)<<4)) +#define IB_PIR_COMPMASK_BASELID (CL_HTON64(((uint64_t)1)<<5)) +#define IB_PIR_COMPMASK_SMLID (CL_HTON64(((uint64_t)1)<<6)) +#define IB_PIR_COMPMASK_CAPMASK (CL_HTON64(((uint64_t)1)<<7)) +#define IB_PIR_COMPMASK_DIAGCODE (CL_HTON64(((uint64_t)1)<<8)) +#define IB_PIR_COMPMASK_MKEYLEASEPRD (CL_HTON64(((uint64_t)1)<<9)) +#define IB_PIR_COMPMASK_LOCALPORTNUM (CL_HTON64(((uint64_t)1)<<10)) +#define IB_PIR_COMPMASK_LINKWIDTHENABLED (CL_HTON64(((uint64_t)1)<<11)) +#define IB_PIR_COMPMASK_LNKWIDTHSUPPORT (CL_HTON64(((uint64_t)1)<<12)) +#define IB_PIR_COMPMASK_LNKWIDTHACTIVE (CL_HTON64(((uint64_t)1)<<13)) +#define IB_PIR_COMPMASK_LNKSPEEDSUPPORT (CL_HTON64(((uint64_t)1)<<14)) +#define IB_PIR_COMPMASK_PORTSTATE (CL_HTON64(((uint64_t)1)<<15)) +#define IB_PIR_COMPMASK_PORTPHYSTATE (CL_HTON64(((uint64_t)1)<<16)) +#define IB_PIR_COMPMASK_LINKDWNDFLTSTATE (CL_HTON64(((uint64_t)1)<<17)) +#define IB_PIR_COMPMASK_MKEYPROTBITS (CL_HTON64(((uint64_t)1)<<18)) +#define IB_PIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<19)) +#define IB_PIR_COMPMASK_LMC (CL_HTON64(((uint64_t)1)<<20)) +#define IB_PIR_COMPMASK_LINKSPEEDACTIVE (CL_HTON64(((uint64_t)1)<<21)) +#define IB_PIR_COMPMASK_LINKSPEEDENABLE (CL_HTON64(((uint64_t)1)<<22)) +#define IB_PIR_COMPMASK_NEIGHBORMTU (CL_HTON64(((uint64_t)1)<<23)) +#define IB_PIR_COMPMASK_MASTERSMSL (CL_HTON64(((uint64_t)1)<<24)) +#define IB_PIR_COMPMASK_VLCAP (CL_HTON64(((uint64_t)1)<<25)) +#define IB_PIR_COMPMASK_INITTYPE (CL_HTON64(((uint64_t)1)<<26)) +#define IB_PIR_COMPMASK_VLHIGHLIMIT (CL_HTON64(((uint64_t)1)<<27)) +#define IB_PIR_COMPMASK_VLARBHIGHCAP (CL_HTON64(((uint64_t)1)<<28)) +#define IB_PIR_COMPMASK_VLARBLOWCAP (CL_HTON64(((uint64_t)1)<<29)) +#define IB_PIR_COMPMASK_INITTYPEREPLY (CL_HTON64(((uint64_t)1)<<30)) +#define IB_PIR_COMPMASK_MTUCAP (CL_HTON64(((uint64_t)1)<<31)) +#define IB_PIR_COMPMASK_VLSTALLCNT (CL_HTON64(((uint64_t)1)<<32)) +#define IB_PIR_COMPMASK_HOQLIFE (CL_HTON64(((uint64_t)1)<<33)) +#define IB_PIR_COMPMASK_OPVLS (CL_HTON64(((uint64_t)1)<<34)) +#define IB_PIR_COMPMASK_PARENFIN (CL_HTON64(((uint64_t)1)<<35)) +#define IB_PIR_COMPMASK_PARENFOUT (CL_HTON64(((uint64_t)1)<<36)) +#define IB_PIR_COMPMASK_FILTERRAWIN (CL_HTON64(((uint64_t)1)<<37)) +#define IB_PIR_COMPMASK_FILTERRAWOUT (CL_HTON64(((uint64_t)1)<<38)) +#define IB_PIR_COMPMASK_MKEYVIO (CL_HTON64(((uint64_t)1)<<39)) +#define IB_PIR_COMPMASK_PKEYVIO (CL_HTON64(((uint64_t)1)<<40)) +#define IB_PIR_COMPMASK_QKEYVIO (CL_HTON64(((uint64_t)1)<<41)) +#define IB_PIR_COMPMASK_GUIDCAP (CL_HTON64(((uint64_t)1)<<42)) +#define IB_PIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<43)) +#define IB_PIR_COMPMASK_SUBNTO (CL_HTON64(((uint64_t)1)<<44)) +#define IB_PIR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<45)) +#define IB_PIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<46)) +#define IB_PIR_COMPMASK_LOCALPHYERR (CL_HTON64(((uint64_t)1)<<47)) +#define IB_PIR_COMPMASK_OVERRUNERR (CL_HTON64(((uint64_t)1)<<48)) + +/* Multicast Member Record Component Masks */ +#define IB_MCR_COMPMASK_GID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_MCR_COMPMASK_MGID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_MCR_COMPMASK_PORT_GID (CL_HTON64(((uint64_t)1)<<1)) +#define IB_MCR_COMPMASK_QKEY (CL_HTON64(((uint64_t)1)<<2)) +#define IB_MCR_COMPMASK_MLID (CL_HTON64(((uint64_t)1)<<3)) +#define IB_MCR_COMPMASK_MTU_SEL (CL_HTON64(((uint64_t)1)<<4)) +#define IB_MCR_COMPMASK_MTU (CL_HTON64(((uint64_t)1)<<5)) +#define IB_MCR_COMPMASK_TCLASS (CL_HTON64(((uint64_t)1)<<6)) +#define IB_MCR_COMPMASK_PKEY (CL_HTON64(((uint64_t)1)<<7)) +#define IB_MCR_COMPMASK_RATE_SEL (CL_HTON64(((uint64_t)1)<<8)) +#define IB_MCR_COMPMASK_RATE (CL_HTON64(((uint64_t)1)<<9)) +#define IB_MCR_COMPMASK_LIFE_SEL (CL_HTON64(((uint64_t)1)<<10)) +#define IB_MCR_COMPMASK_LIFE (CL_HTON64(((uint64_t)1)<<11)) +#define IB_MCR_COMPMASK_SL (CL_HTON64(((uint64_t)1)<<12)) +#define IB_MCR_COMPMASK_FLOW (CL_HTON64(((uint64_t)1)<<13)) +#define IB_MCR_COMPMASK_HOP (CL_HTON64(((uint64_t)1)<<14)) +#define IB_MCR_COMPMASK_SCOPE (CL_HTON64(((uint64_t)1)<<15)) +#define IB_MCR_COMPMASK_JOIN_STATE (CL_HTON64(((uint64_t)1)<<16)) +#define IB_MCR_COMPMASK_PROXY (CL_HTON64(((uint64_t)1)<<17)) + +/* GUID Info Record Component Masks */ +#define IB_GIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_GIR_COMPMASK_BLOCKNUM (CL_HTON64(((uint64_t)1)<<1)) +#define IB_GIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<2)) +#define IB_GIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<3)) +#define IB_GIR_COMPMASK_GID0 (CL_HTON64(((uint64_t)1)<<4)) +#define IB_GIR_COMPMASK_GID1 (CL_HTON64(((uint64_t)1)<<5)) +#define IB_GIR_COMPMASK_GID2 (CL_HTON64(((uint64_t)1)<<6)) +#define IB_GIR_COMPMASK_GID3 (CL_HTON64(((uint64_t)1)<<7)) +#define IB_GIR_COMPMASK_GID4 (CL_HTON64(((uint64_t)1)<<8)) +#define IB_GIR_COMPMASK_GID5 (CL_HTON64(((uint64_t)1)<<9)) +#define IB_GIR_COMPMASK_GID6 (CL_HTON64(((uint64_t)1)<<10)) +#define IB_GIR_COMPMASK_GID7 (CL_HTON64(((uint64_t)1)<<11)) + +/* MultiPath Record Component Masks */ +#define IB_MPR_COMPMASK_RAWTRAFFIC (CL_HTON64(((uint64_t)1)<<0)) +#define IB_MPR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<1)) +#define IB_MPR_COMPMASK_FLOWLABEL (CL_HTON64(((uint64_t)1)<<2)) +#define IB_MPR_COMPMASK_HOPLIMIT (CL_HTON64(((uint64_t)1)<<3)) +#define IB_MPR_COMPMASK_TCLASS (CL_HTON64(((uint64_t)1)<<4)) +#define IB_MPR_COMPMASK_REVERSIBLE (CL_HTON64(((uint64_t)1)<<5)) +#define IB_MPR_COMPMASK_NUMBPATH (CL_HTON64(((uint64_t)1)<<6)) +#define IB_MPR_COMPMASK_PKEY (CL_HTON64(((uint64_t)1)<<7)) +#define IB_MPR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<8)) +#define IB_MPR_COMPMASK_SL (CL_HTON64(((uint64_t)1)<<9)) +#define IB_MPR_COMPMASK_MTUSELEC (CL_HTON64(((uint64_t)1)<<10)) +#define IB_MPR_COMPMASK_MTU (CL_HTON64(((uint64_t)1)<<11)) +#define IB_MPR_COMPMASK_RATESELEC (CL_HTON64(((uint64_t)1)<<12)) +#define IB_MPR_COMPMASK_RATE (CL_HTON64(((uint64_t)1)<<13)) +#define IB_MPR_COMPMASK_PKTLIFETIMESELEC (CL_HTON64(((uint64_t)1)<<14)) +#define IB_MPR_COMPMASK_PKTLIFETIME (CL_HTON64(((uint64_t)1)<<15)) +#define IB_MPR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<16)) +#define IB_MPR_COMPMASK_INDEPSELEC (CL_HTON64(((uint64_t)1)<<17)) +#define IB_MPR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<18)) +#define IB_MPR_COMPMASK_SGIDCOUNT (CL_HTON64(((uint64_t)1)<<19)) +#define IB_MPR_COMPMASK_DGIDCOUNT (CL_HTON64(((uint64_t)1)<<20)) +#define IB_MPR_COMPMASK_RESV4 (CL_HTON64(((uint64_t)1)<<21)) + +/* SMInfo Record Component Masks */ +#define IB_SMIR_COMPMASK_LID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_SMIR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<1)) +#define IB_SMIR_COMPMASK_GUID (CL_HTON64(((uint64_t)1)<<2)) +#define IB_SMIR_COMPMASK_SMKEY (CL_HTON64(((uint64_t)1)<<3)) +#define IB_SMIR_COMPMASK_ACTCOUNT (CL_HTON64(((uint64_t)1)<<4)) +#define IB_SMIR_COMPMASK_PRIORITY (CL_HTON64(((uint64_t)1)<<5)) +#define IB_SMIR_COMPMASK_SMSTATE (CL_HTON64(((uint64_t)1)<<6)) + +/* InformInfo Record Component Masks */ +#define IB_IIR_COMPMASK_SUBSCRIBERGID (CL_HTON64(((uint64_t)1)<<0)) +#define IB_IIR_COMPMASK_ENUM (CL_HTON64(((uint64_t)1)<<1)) +#define IB_IIR_COMPMASK_RESV0 (CL_HTON64(((uint64_t)1)<<2)) +#define IB_IIR_COMPMASK_GID (CL_HTON64(((uint64_t)1)<<3)) +#define IB_IIR_COMPMASK_LIDRANGEBEGIN (CL_HTON64(((uint64_t)1)<<4)) +#define IB_IIR_COMPMASK_LIDRANGEEND (CL_HTON64(((uint64_t)1)<<5)) +#define IB_IIR_COMPMASK_RESV1 (CL_HTON64(((uint64_t)1)<<6)) +#define IB_IIR_COMPMASK_ISGENERIC (CL_HTON64(((uint64_t)1)<<7)) +#define IB_IIR_COMPMASK_SUBSCRIBE (CL_HTON64(((uint64_t)1)<<8)) +#define IB_IIR_COMPMASK_TYPE (CL_HTON64(((uint64_t)1)<<9)) +#define IB_IIR_COMPMASK_TRAPNUMB (CL_HTON64(((uint64_t)1)<<10)) +#define IB_IIR_COMPMASK_DEVICEID (CL_HTON64(((uint64_t)1)<<10)) +#define IB_IIR_COMPMASK_QPN (CL_HTON64(((uint64_t)1)<<11)) +#define IB_IIR_COMPMASK_RESV2 (CL_HTON64(((uint64_t)1)<<12)) +#define IB_IIR_COMPMASK_RESPTIME (CL_HTON64(((uint64_t)1)<<13)) +#define IB_IIR_COMPMASK_RESV3 (CL_HTON64(((uint64_t)1)<<14)) +#define IB_IIR_COMPMASK_PRODTYPE (CL_HTON64(((uint64_t)1)<<15)) +#define IB_IIR_COMPMASK_VENDID (CL_HTON64(((uint64_t)1)<<15)) + +/****f* IBA Base: Types/ib_path_rec_init_local +* NAME +* ib_path_rec_init_local +* +* DESCRIPTION +* Initializes a subnet local path record. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_path_rec_init_local( + IN ib_path_rec_t* const p_rec, + IN ib_gid_t* const p_dgid, + IN ib_gid_t* const p_sgid, + IN ib_net16_t dlid, + IN ib_net16_t slid, + IN uint8_t num_path, + IN ib_net16_t pkey, + IN uint8_t sl, + IN uint8_t mtu_selector, + IN uint8_t mtu, + IN uint8_t rate_selector, + IN uint8_t rate, + IN uint8_t pkt_life_selector, + IN uint8_t pkt_life, + IN uint8_t preference ) +{ + p_rec->dgid = *p_dgid; + p_rec->sgid = *p_sgid; + p_rec->dlid = dlid; + p_rec->slid = slid; + p_rec->num_path = num_path; + p_rec->pkey = pkey; + /* Lower 4 bits of path rec's SL are reserved. */ + p_rec->sl = cl_ntoh16( sl ); + p_rec->mtu = (uint8_t)((mtu & IB_PATH_REC_BASE_MASK) | + (uint8_t)(mtu_selector << 6)); + p_rec->rate = (uint8_t)((rate & IB_PATH_REC_BASE_MASK) | + (uint8_t)(rate_selector << 6)); + p_rec->pkt_life = (uint8_t)((pkt_life & IB_PATH_REC_BASE_MASK) | + (uint8_t)(pkt_life_selector << 6)); + p_rec->preference = preference; + + /* Clear global routing fields for local path records */ + p_rec->hop_flow_raw = 0; + p_rec->tclass = 0; + + *((uint64_t*)p_rec->resv0) = 0; + *((uint32_t*)p_rec->resv2) = 0; + *((uint16_t*)p_rec->resv2 + 2) = 0; +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* dgid +* [in] GID of destination port. +* +* sgid +* [in] GID of source port. +* +* dlid +* [in] LID of destination port. +* +* slid +* [in] LID of source port. +* +* num_path +* [in] Reversible path - 1 bit to say if path is reversible. +* num_path [6:0] In queries, maximum number of paths to return. +* In responses, undefined. +* +* pkey +* [in] Partition key (P_Key) to use on this path. +* +* sl +* [in] Service level to use on this path. Lower 4-bits are valid. +* +* mtu_selector +* [in] Encoded MTU selector value to use on this path +* +* mtu +* [in] Encoded MTU to use on this path +* +* rate_selector +* [in] Encoded rate selector value to use on this path. +* +* rate +* [in] Encoded rate to use on this path. +* +* pkt_life_selector +* [in] Encoded Packet selector value lifetime for this path. +* +* pkt_life +* [in] Encoded Packet lifetime for this path. +* +* preference +* [in] Indicates the relative merit of this path versus other path +* records returned from the SA. Lower numbers are better. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* ib_gid_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_num_path +* NAME +* ib_path_rec_num_path +* +* DESCRIPTION +* Get max number of paths to return. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_rec_num_path( + IN const ib_path_rec_t* const p_rec ) +{ + return( p_rec->num_path &0x7F ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Maximum number of paths to return for each unique SGID_DGID combination. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_sl +* NAME +* ib_path_rec_sl +* +* DESCRIPTION +* Get path service level. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_rec_sl( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)((cl_ntoh16( p_rec->sl )) & 0xF) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* SL. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_mtu +* NAME +* ib_path_rec_mtu +* +* DESCRIPTION +* Get encoded path MTU. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_rec_mtu( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->mtu & IB_PATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path MTU. +* 1: 256 +* 2: 512 +* 3: 1024 +* 4: 2048 +* 5: 4096 +* others: reserved +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_mtu_sel +* NAME +* ib_path_rec_mtu_sel +* +* DESCRIPTION +* Get encoded path MTU selector. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_rec_mtu_sel( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->mtu & IB_PATH_REC_SELECTOR_MASK) >> 6) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path MTU selector value (for queries). +* 0: greater than MTU specified +* 1: less than MTU specified +* 2: exactly the MTU specified +* 3: largest MTU available +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_rate +* NAME +* ib_path_rec_rate +* +* DESCRIPTION +* Get encoded path rate. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_rec_rate( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->rate & IB_PATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path rate. +* 2: 2.5 Gb/sec. +* 3: 10 Gb/sec. +* 4: 30 Gb/sec. +* 5: 5 Gb/sec. +* 6: 20 Gb/sec. +* 7: 40 Gb/sec. +* 8: 60 Gb/sec. +* 9: 80 Gb/sec. +* 10: 120 Gb/sec. +* others: reserved +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_rate_sel +* NAME +* ib_path_rec_rate_sel +* +* DESCRIPTION +* Get encoded path rate selector. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_rec_rate_sel( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->rate & IB_PATH_REC_SELECTOR_MASK) >> 6) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path rate selector value (for queries). +* 0: greater than rate specified +* 1: less than rate specified +* 2: exactly the rate specified +* 3: largest rate available +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_pkt_life +* NAME +* ib_path_rec_pkt_life +* +* DESCRIPTION +* Get encoded path pkt_life. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_rec_pkt_life( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->pkt_life & IB_PATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path pkt_life = 4.096 µsec * 2 ** PacketLifeTime. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_pkt_life_sel +* NAME +* ib_path_rec_pkt_life_sel +* +* DESCRIPTION +* Get encoded path pkt_lifetime selector. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_rec_pkt_life_sel( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->pkt_life & IB_PATH_REC_SELECTOR_MASK) >> 6 )); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Encoded path pkt_lifetime selector value (for queries). +* 0: greater than rate specified +* 1: less than rate specified +* 2: exactly the rate specified +* 3: smallest packet lifetime available +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_flow_lbl +* NAME +* ib_path_rec_flow_lbl +* +* DESCRIPTION +* Get flow label. +* +* SYNOPSIS +*/ +static inline uint32_t OSM_API +ib_path_rec_flow_lbl( + IN const ib_path_rec_t* const p_rec ) +{ + return( ((cl_ntoh32(p_rec->hop_flow_raw) >> 8) & 0x000FFFFF) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Flow label of the path record. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****f* IBA Base: Types/ib_path_rec_hop_limit +* NAME +* ib_path_rec_hop_limit +* +* DESCRIPTION +* Get hop limit. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_rec_hop_limit( + IN const ib_path_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->hop_flow_raw & 0x000000FF ) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the path record object. +* +* RETURN VALUES +* Hop limit of the path record. +* +* NOTES +* +* SEE ALSO +* ib_path_rec_t +*********/ + +/****s* IBA Base: Constants/IB_CLASS_CAP_TRAP +* NAME +* IB_CLASS_CAP_TRAP +* +* DESCRIPTION +* ClassPortInfo CapabilityMask bits. This bit will be set +* if the class supports Trap() MADs (13.4.8.1). +* +* SEE ALSO +* ib_class_port_info_t, IB_CLASS_CAP_GETSET +* +* SOURCE +*/ +#define IB_CLASS_CAP_TRAP 0x0001 +/*********/ + +/****s* IBA Base: Constants/IB_CLASS_CAP_GETSET +* NAME +* IB_CLASS_CAP_GETSET +* +* DESCRIPTION +* ClassPortInfo CapabilityMask bits. This bit will be set +* if the class supports Get(Notice) and Set(Notice) MADs (13.4.8.1). +* +* SEE ALSO +* ib_class_port_info_t, IB_CLASS_CAP_TRAP +* +* SOURCE +*/ +#define IB_CLASS_CAP_GETSET 0x0002 +/*********/ + +/****s* IBA Base: Constants/IB_CLASS_RESP_TIME_MASK +* NAME +* IB_CLASS_RESP_TIME_MASK +* +* DESCRIPTION +* Mask bits to extract the reponse time value from the +* resp_time_val field of ib_class_port_info_t. +* +* SEE ALSO +* ib_class_port_info_t +* +* SOURCE +*/ +#define IB_CLASS_RESP_TIME_MASK 0x1F +/*********/ +/****s* IBA Base: Types/ib_class_port_info_t +* NAME +* ib_class_port_info_t +* +* DESCRIPTION +* IBA defined ClassPortInfo attribute (13.4.8.1) +* route between two end-points on a subnet. +* +* SYNOPSIS +*/ +#include +typedef struct _ib_class_port_info +{ + uint8_t base_ver; + uint8_t class_ver; + ib_net16_t cap_mask; + uint8_t reserved[3]; + uint8_t resp_time_val; + ib_gid_t redir_gid; + ib_net32_t redir_tc_sl_fl; + ib_net16_t redir_lid; + ib_net16_t redir_pkey; + ib_net32_t redir_qp; + ib_net32_t redir_qkey; + ib_gid_t trap_gid; + ib_net32_t trap_tc_sl_fl; + ib_net16_t trap_lid; + ib_net16_t trap_pkey; + ib_net32_t trap_hop_qp; + ib_net32_t trap_qkey; + +} PACK_SUFFIX ib_class_port_info_t; +#include +/* +* FIELDS +* base_ver +* Maximum supported MAD Base Version. +* +* class_ver +* Maximum supported management class version. +* +* cap_mask +* Supported capabilities of this management class. +* +* resp_time_value +* Maximum expected response time. +* +* redr_gid +* GID to use for redirection, or zero +* +* recdir_tc_sl_fl +* Traffic class, service level and flow label the requester +* should use if the service is redirected. +* +* redir_lid +* LID used for redirection, or zero +* +* redir_pkey +* P_Key used for redirection +* +* redir_qp +* QP number used for redirection +* +* redir_qkey +* Q_Key associated with the redirected QP. This shall be the +* well known Q_Key value. +* +* trap_gid +* GID value used for trap messages from this service. +* +* trap_tc_sl_fl +* Traffic class, service level and flow label used for +* trap messages originated by this service. +* +* trap_lid +* LID used for trap messages, or zero +* +* trap_pkey +* P_Key used for trap messages +* +* trap_hop_qp +* Hop limit (upper 8 bits) and QP number used for trap messages +* +* trap_qkey +* Q_Key associated with the trap messages QP. +* +* SEE ALSO +* IB_CLASS_CAP_GETSET, IB_CLASS_CAP_TRAP +* +*********/ + +/****s* IBA Base: Types/ib_sm_info_t +* NAME +* ib_sm_info_t +* +* DESCRIPTION +* SMInfo structure (14.2.5.13). +* +* SYNOPSIS +*/ +#include +typedef struct _ib_sm_info +{ + ib_net64_t guid; + ib_net64_t sm_key; + ib_net32_t act_count; + uint8_t pri_state; + +} PACK_SUFFIX ib_sm_info_t; +#include +/* +* FIELDS +* guid +* Port GUID for this SM. +* +* sm_key +* SM_Key of this SM. +* +* act_count +* Activity counter used as a heartbeat. +* +* pri_state +* Priority and State information +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_sminfo_get_priority +* NAME +* ib_sminfo_get_priority +* +* DESCRIPTION +* Returns the priority value. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_sminfo_get_priority( + IN const ib_sm_info_t* const p_smi ) +{ + return( (uint8_t)((p_smi->pri_state & 0xF0)>>4) ); +} +/* +* PARAMETERS +* p_smi +* [in] Pointer to the SMInfo Attribute. +* +* RETURN VALUES +* Returns the priority value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_sminfo_get_state +* NAME +* ib_sminfo_get_state +* +* DESCRIPTION +* Returns the state value. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_sminfo_get_state( + IN const ib_sm_info_t* const p_smi ) +{ + return( (uint8_t)(p_smi->pri_state & 0x0F) ); +} +/* +* PARAMETERS +* p_smi +* [in] Pointer to the SMInfo Attribute. +* +* RETURN VALUES +* Returns the state value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* IBA Base: Types/ib_mad_t +* NAME +* ib_mad_t +* +* DESCRIPTION +* IBA defined MAD header (13.4.3) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_mad +{ + uint8_t base_ver; + uint8_t mgmt_class; + uint8_t class_ver; + uint8_t method; + ib_net16_t status; + ib_net16_t class_spec; + ib_net64_t trans_id; + ib_net16_t attr_id; + ib_net16_t resv; + ib_net32_t attr_mod; +} PACK_SUFFIX ib_mad_t; +#include +/* +* FIELDS +* base_ver +* MAD base format. +* +* mgmt_class +* Class of operation. +* +* class_ver +* Version of MAD class-specific format. +* +* method +* Method to perform, including 'R' bit. +* +* status +* Status of operation. +* +* class_spec +* Reserved for subnet management. +* +* trans_id +* Transaction ID. +* +* attr_id +* Attribute ID. +* +* resv +* Reserved field. +* +* attr_mod +* Attribute modifier. +* +* SEE ALSO +*********/ + + +/****s* IBA Base: Types/ib_rmpp_mad_t +* NAME +* ib_rmpp_mad_t +* +* DESCRIPTION +* IBA defined MAD RMPP header (13.6.2.1) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_rmpp_mad +{ + ib_mad_t common_hdr; + + uint8_t rmpp_version; + uint8_t rmpp_type; + uint8_t rmpp_flags; + uint8_t rmpp_status; + + ib_net32_t seg_num; + ib_net32_t paylen_newwin; + +} PACK_SUFFIX ib_rmpp_mad_t; +#include +/* +* SEE ALSO +* ib_mad_t +*********/ + + +/****f* IBA Base: Types/ib_mad_init_new +* NAME +* ib_mad_init_new +* +* DESCRIPTION +* Initializes a MAD common header. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_mad_init_new( + IN ib_mad_t* const p_mad, + IN const uint8_t mgmt_class, + IN const uint8_t class_ver, + IN const uint8_t method, + IN const ib_net64_t trans_id, + IN const ib_net16_t attr_id, + IN const ib_net32_t attr_mod ) +{ + CL_ASSERT( p_mad ); + p_mad->base_ver = 1; + p_mad->mgmt_class = mgmt_class; + p_mad->class_ver = class_ver; + p_mad->method = method; + p_mad->status = 0; + p_mad->class_spec = 0; + p_mad->trans_id = trans_id; + p_mad->attr_id = attr_id; + p_mad->resv = 0; + p_mad->attr_mod = attr_mod; +} +/* +* PARAMETERS +* p_mad +* [in] Pointer to the MAD common header. +* +* mgmt_class +* [in] Class of operation. +* +* class_ver +* [in] Version of MAD class-specific format. +* +* method +* [in] Method to perform, including 'R' bit. +* +* trans_Id +* [in] Transaction ID. +* +* attr_id +* [in] Attribute ID. +* +* attr_mod +* [in] Attribute modifier. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* ib_mad_t +*********/ + +/****f* IBA Base: Types/ib_mad_init_response +* NAME +* ib_mad_init_response +* +* DESCRIPTION +* Initializes a MAD common header as a response. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_mad_init_response( + IN const ib_mad_t* const p_req_mad, + IN ib_mad_t* const p_mad, + IN const ib_net16_t status ) +{ + CL_ASSERT( p_req_mad ); + CL_ASSERT( p_mad ); + *p_mad = *p_req_mad; + p_mad->status = status; + if( p_mad->method == IB_MAD_METHOD_SET ) + p_mad->method = IB_MAD_METHOD_GET; + p_mad->method |= IB_MAD_METHOD_RESP_MASK; +} +/* +* PARAMETERS +* p_req_mad +* [in] Pointer to the MAD common header in the original request MAD. +* +* p_mad +* [in] Pointer to the MAD common header to initialize. +* +* status +* [in] MAD Status value to return; +* +* RETURN VALUES +* None. +* +* NOTES +* p_req_mad and p_mad may point to the same MAD. +* +* SEE ALSO +* ib_mad_t +*********/ + +/****f* IBA Base: Types/ib_mad_is_response +* NAME +* ib_mad_is_response +* +* DESCRIPTION +* Returns TRUE if the MAD is a response ('R' bit set), +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_mad_is_response( + IN const ib_mad_t* const p_mad ) +{ + CL_ASSERT( p_mad ); + return( (p_mad->method & IB_MAD_METHOD_RESP_MASK) == + IB_MAD_METHOD_RESP_MASK ); +} +/* +* PARAMETERS +* p_mad +* [in] Pointer to the MAD. +* +* RETURN VALUES +* Returns TRUE if the MAD is a response ('R' bit set), +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_mad_t +*********/ + +#define IB_RMPP_TYPE_DATA 1 +#define IB_RMPP_TYPE_ACK 2 +#define IB_RMPP_TYPE_STOP 3 +#define IB_RMPP_TYPE_ABORT 4 + +#define IB_RMPP_NO_RESP_TIME 0x1F +#define IB_RMPP_FLAG_ACTIVE 0x01 +#define IB_RMPP_FLAG_FIRST 0x02 +#define IB_RMPP_FLAG_LAST 0x04 + +#define IB_RMPP_STATUS_SUCCESS 0 +#define IB_RMPP_STATUS_RESX 1 /* resources exhausted */ +#define IB_RMPP_STATUS_T2L 118 /* time too long */ +#define IB_RMPP_STATUS_BAD_LEN 119 /* incon. last and payload len */ +#define IB_RMPP_STATUS_BAD_SEG 120 /* incon. first and segment no */ +#define IB_RMPP_STATUS_BADT 121 /* bad rmpp type */ +#define IB_RMPP_STATUS_W2S 122 /* newwindowlast too small */ +#define IB_RMPP_STATUS_S2B 123 /* segment no too big */ +#define IB_RMPP_STATUS_BAD_STATUS 124 /* illegal status */ +#define IB_RMPP_STATUS_UNV 125 /* unsupported version */ +#define IB_RMPP_STATUS_TMR 126 /* too many retries */ +#define IB_RMPP_STATUS_UNSPEC 127 /* unspecified */ + +/****f* IBA Base: Types/ib_rmpp_is_flag_set +* NAME +* ib_rmpp_is_flag_set +* +* DESCRIPTION +* Returns TRUE if the MAD has the given RMPP flag set. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_rmpp_is_flag_set( + IN const ib_rmpp_mad_t* const p_rmpp_mad, + IN const uint8_t flag ) +{ + CL_ASSERT( p_rmpp_mad ); + return( (p_rmpp_mad->rmpp_flags & flag) == flag ); +} +/* +* PARAMETERS +* ib_rmpp_mad_t +* [in] Pointer to a MAD with an RMPP header. +* +* flag +* [in] The RMPP flag being examined. +* +* RETURN VALUES +* Returns TRUE if the MAD has the given RMPP flag set. +* +* NOTES +* +* SEE ALSO +* ib_mad_t, ib_rmpp_mad_t +*********/ + +static inline void OSM_API +ib_rmpp_set_resp_time( + IN ib_rmpp_mad_t* const p_rmpp_mad, + IN const uint8_t resp_time ) +{ + CL_ASSERT( p_rmpp_mad ); + p_rmpp_mad->rmpp_flags |= (resp_time << 3); +} + + +static inline uint8_t OSM_API +ib_rmpp_get_resp_time( + IN const ib_rmpp_mad_t* const p_rmpp_mad ) +{ + CL_ASSERT( p_rmpp_mad ); + return( (uint8_t)(p_rmpp_mad->rmpp_flags >> 3) ); +} + +/****d* IBA Base: Constants/IB_SMP_DIRECTION +* NAME +* IB_SMP_DIRECTION +* +* DESCRIPTION +* The Direction bit for directed route SMPs. +* +* SOURCE +*/ +#define IB_SMP_DIRECTION_HO 0x8000 +#define IB_SMP_DIRECTION (CL_HTON16(IB_SMP_DIRECTION_HO)) +/**********/ + +/****d* IBA Base: Constants/IB_SMP_STATUS_MASK +* NAME +* IB_SMP_STATUS_MASK +* +* DESCRIPTION +* Mask value for extracting status from a directed route SMP. +* +* SOURCE +*/ +#define IB_SMP_STATUS_MASK_HO 0x7FFF +#define IB_SMP_STATUS_MASK (CL_HTON16(IB_SMP_STATUS_MASK_HO)) +/**********/ + +/****s* IBA Base: Types/ib_smp_t +* NAME +* ib_smp_t +* +* DESCRIPTION +* IBA defined SMP. (14.2.1.2) +* +* SYNOPSIS +*/ +#define IB_SMP_DATA_SIZE 64 +#include +typedef struct _ib_smp +{ + uint8_t base_ver; + uint8_t mgmt_class; + uint8_t class_ver; + uint8_t method; + ib_net16_t status; + uint8_t hop_ptr; + uint8_t hop_count; + ib_net64_t trans_id; + ib_net16_t attr_id; + ib_net16_t resv; + ib_net32_t attr_mod; + ib_net64_t m_key; + ib_net16_t dr_slid; + ib_net16_t dr_dlid; + uint32_t resv1[7]; + uint8_t data[IB_SMP_DATA_SIZE]; + uint8_t initial_path[IB_SUBNET_PATH_HOPS_MAX]; + uint8_t return_path[IB_SUBNET_PATH_HOPS_MAX]; + +} PACK_SUFFIX ib_smp_t; +#include +/* +* FIELDS +* base_ver +* MAD base format. +* +* mgmt_class +* Class of operation. +* +* class_ver +* Version of MAD class-specific format. +* +* method +* Method to perform, including 'R' bit. +* +* status +* Status of operation. +* +* hop_ptr +* Hop pointer for directed route MADs. +* +* hop_count +* Hop count for directed route MADs. +* +* trans_Id +* Transaction ID. +* +* attr_id +* Attribute ID. +* +* resv +* Reserved field. +* +* attr_mod +* Attribute modifier. +* +* m_key +* Management key value. +* +* dr_slid +* Directed route source LID. +* +* dr_dlid +* Directed route destination LID. +* +* resv0 +* Reserved for 64 byte alignment. +* +* data +* MAD data payload. +* +* initial_path +* Outbound port list. +* +* return_path +* Inbound port list. +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_smp_get_status +* NAME +* ib_smp_get_status +* +* DESCRIPTION +* Returns the SMP status value in network order. +* +* SYNOPSIS +*/ +static inline ib_net16_t OSM_API +ib_smp_get_status( + IN const ib_smp_t* const p_smp ) +{ + return( (ib_net16_t)(p_smp->status & IB_SMP_STATUS_MASK) ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* RETURN VALUES +* Returns the SMP status value in network order. +* +* NOTES +* +* SEE ALSO +* ib_smp_t +*********/ + +/****f* IBA Base: Types/ib_smp_is_response +* NAME +* ib_smp_is_response +* +* DESCRIPTION +* Returns TRUE if the SMP is a response MAD, FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_smp_is_response( + IN const ib_smp_t* const p_smp ) +{ + return( ib_mad_is_response( (const ib_mad_t*)p_smp ) ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* RETURN VALUES +* Returns TRUE if the SMP is a response MAD, FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* ib_smp_t +*********/ + +/****f* IBA Base: Types/ib_smp_is_d +* NAME +* ib_smp_is_d +* +* DESCRIPTION +* Returns TRUE if the SMP 'D' (direction) bit is set. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_smp_is_d( + IN const ib_smp_t* const p_smp ) +{ + return( (p_smp->status & IB_SMP_DIRECTION) == IB_SMP_DIRECTION ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* RETURN VALUES +* Returns TRUE if the SMP 'D' (direction) bit is set. +* +* NOTES +* +* SEE ALSO +* ib_smp_t +*********/ + +/****f* IBA Base: Types/ib_smp_init_new +* NAME +* ib_smp_init_new +* +* DESCRIPTION +* Initializes a MAD common header. +* +* TODO +* This is too big for inlining, but leave it here for now +* since there is not yet another convient spot. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_smp_init_new( + IN ib_smp_t* const p_smp, + IN const uint8_t method, + IN const ib_net64_t trans_id, + IN const ib_net16_t attr_id, + IN const ib_net32_t attr_mod, + IN const uint8_t hop_count, + IN const ib_net64_t m_key, + IN const uint8_t* path_out, + IN const ib_net16_t dr_slid, + IN const ib_net16_t dr_dlid ) +{ + CL_ASSERT( p_smp ); + CL_ASSERT( hop_count < IB_SUBNET_PATH_HOPS_MAX ); + p_smp->base_ver = 1; + p_smp->mgmt_class = IB_MCLASS_SUBN_DIR; + p_smp->class_ver = 1; + p_smp->method = method; + p_smp->status = 0; + p_smp->hop_ptr = 0; + p_smp->hop_count = hop_count; + p_smp->trans_id = trans_id; + p_smp->attr_id = attr_id; + p_smp->resv = 0; + p_smp->attr_mod = attr_mod; + p_smp->m_key = m_key; + p_smp->dr_slid = dr_slid; + p_smp->dr_dlid = dr_dlid; + + memset( p_smp->resv1, 0, + sizeof(p_smp->resv1) + + sizeof(p_smp->data) + + sizeof(p_smp->initial_path) + + sizeof(p_smp->return_path) ); + + /* copy the path */ + memcpy( &p_smp->initial_path, path_out, + sizeof( p_smp->initial_path ) ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* method +* [in] Method to perform, including 'R' bit. +* +* trans_Id +* [in] Transaction ID. +* +* attr_id +* [in] Attribute ID. +* +* attr_mod +* [in] Attribute modifier. +* +* hop_count +* [in] Number of hops in the path. +* +* m_key +* [in] Management key for this SMP. +* +* path_out +* [in] Port array for outbound path. +* +* +* RETURN VALUES +* None. +* +* NOTES +* Payload area is initialized to zero. +* +* +* SEE ALSO +* ib_mad_t +*********/ + +/****f* IBA Base: Types/ib_smp_get_payload_ptr +* NAME +* ib_smp_get_payload_ptr +* +* DESCRIPTION +* Gets a pointer to the SMP payload area. +* +* SYNOPSIS +*/ +static inline void* OSM_API +ib_smp_get_payload_ptr( + IN const ib_smp_t* const p_smp ) +{ + return( (void*)p_smp->data ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SMP packet. +* +* RETURN VALUES +* Pointer to SMP payload area. +* +* NOTES +* +* SEE ALSO +* ib_mad_t +*********/ + +/****s* IBA Base: Types/ib_node_info_t +* NAME +* ib_node_info_t +* +* DESCRIPTION +* IBA defined NodeInfo. (14.2.5.3) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_node_info +{ + uint8_t base_version; + uint8_t class_version; + uint8_t node_type; + uint8_t num_ports; + ib_net64_t sys_guid; + ib_net64_t node_guid; + ib_net64_t port_guid; + ib_net16_t partition_cap; + ib_net16_t device_id; + ib_net32_t revision; + ib_net32_t port_num_vendor_id; + +} PACK_SUFFIX ib_node_info_t; +#include +/************/ + +/****s* IBA Base: Types/ib_sa_mad_t +* NAME +* ib_sa_mad_t +* +* DESCRIPTION +* IBA defined SA MAD format. (15.2.1) +* +* SYNOPSIS +*/ +#define IB_SA_DATA_SIZE 200 + +#include +typedef struct _ib_sa_mad +{ + uint8_t base_ver; + uint8_t mgmt_class; + uint8_t class_ver; + uint8_t method; + ib_net16_t status; + ib_net16_t resv; + ib_net64_t trans_id; + ib_net16_t attr_id; + ib_net16_t resv1; + ib_net32_t attr_mod; + + uint8_t rmpp_version; + uint8_t rmpp_type; + uint8_t rmpp_flags; + uint8_t rmpp_status; + + ib_net32_t seg_num; + ib_net32_t paylen_newwin; + + ib_net64_t sm_key; + + ib_net16_t attr_offset; + ib_net16_t resv3; + + ib_net64_t comp_mask; + + uint8_t data[IB_SA_DATA_SIZE]; +} PACK_SUFFIX ib_sa_mad_t; +#include +/**********/ +#define IB_SA_MAD_HDR_SIZE (sizeof(ib_sa_mad_t) - IB_SA_DATA_SIZE) + +static inline uint32_t OSM_API +ib_get_attr_size( + IN const ib_net16_t attr_offset ) +{ + return( ((uint32_t)cl_ntoh16( attr_offset )) << 3 ); +} + +static inline ib_net16_t OSM_API +ib_get_attr_offset( + IN const uint32_t attr_size ) +{ + return( cl_hton16( (uint16_t)(attr_size >> 3) ) ); +} + +/****f* IBA Base: Types/ib_sa_mad_get_payload_ptr +* NAME +* ib_sa_mad_get_payload_ptr +* +* DESCRIPTION +* Gets a pointer to the SA MAD's payload area. +* +* SYNOPSIS +*/ +static inline void* OSM_API +ib_sa_mad_get_payload_ptr( + IN const ib_sa_mad_t* const p_sa_mad ) +{ + return( (void*)p_sa_mad->data ); +} +/* +* PARAMETERS +* p_smp +* [in] Pointer to the SA MAD packet. +* +* RETURN VALUES +* Pointer to SA MAD payload area. +* +* NOTES +* +* SEE ALSO +* ib_mad_t +*********/ + +#define IB_NODE_INFO_PORT_NUM_MASK (CL_NTOH32(0xFF000000)) +#define IB_NODE_INFO_VEND_ID_MASK (CL_NTOH32(0x00FFFFFF)) +#if CPU_LE + #define IB_NODE_INFO_PORT_NUM_SHIFT 0 +#else + #define IB_NODE_INFO_PORT_NUM_SHIFT 24 +#endif + +/****f* IBA Base: Types/ib_node_info_get_local_port_num +* NAME +* ib_node_info_get_local_port_num +* +* DESCRIPTION +* Gets a the local port number from the NodeInfo attribute. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_node_info_get_local_port_num( + IN const ib_node_info_t* const p_ni ) +{ + return( (uint8_t)(( p_ni->port_num_vendor_id & + IB_NODE_INFO_PORT_NUM_MASK ) + >> IB_NODE_INFO_PORT_NUM_SHIFT )); +} +/* +* PARAMETERS +* p_ni +* [in] Pointer to a NodeInfo attribute. +* +* RETURN VALUES +* Local port number that returned the attribute. +* +* NOTES +* +* SEE ALSO +* ib_node_info_t +*********/ + +/****f* IBA Base: Types/ib_node_info_get_vendor_id +* NAME +* ib_node_info_get_vendor_id +* +* DESCRIPTION +* Gets the VendorID from the NodeInfo attribute. +* +* SYNOPSIS +*/ +static inline ib_net32_t OSM_API +ib_node_info_get_vendor_id( + IN const ib_node_info_t* const p_ni ) +{ + return( (ib_net32_t)( p_ni->port_num_vendor_id & + IB_NODE_INFO_VEND_ID_MASK ) ); +} +/* +* PARAMETERS +* p_ni +* [in] Pointer to a NodeInfo attribute. +* +* RETURN VALUES +* VendorID that returned the attribute. +* +* NOTES +* +* SEE ALSO +* ib_node_info_t +*********/ + +#define IB_NODE_DESCRIPTION_SIZE 64 + +#include +typedef struct _ib_node_desc +{ + // Node String is an array of UTF-8 character that + // describes the node in text format + // Note that this string is NOT NULL TERMINATED! + uint8_t description[IB_NODE_DESCRIPTION_SIZE]; + +} PACK_SUFFIX ib_node_desc_t; +#include + +#include +typedef struct _ib_node_record_t +{ + ib_net16_t lid; + ib_net16_t resv; + ib_node_info_t node_info; + ib_node_desc_t node_desc; + uint8_t pad[4]; + +} PACK_SUFFIX ib_node_record_t; +#include + +/****s* IBA Base: Types/ib_port_info_t +* NAME +* ib_port_info_t +* +* DESCRIPTION +* IBA defined PortInfo. (14.2.5.6) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_port_info +{ + ib_net64_t m_key; + ib_net64_t subnet_prefix; + ib_net16_t base_lid; + ib_net16_t master_sm_base_lid; + ib_net32_t capability_mask; + ib_net16_t diag_code; + ib_net16_t m_key_lease_period; + uint8_t local_port_num; + uint8_t link_width_enabled; + uint8_t link_width_supported; + uint8_t link_width_active; + uint8_t state_info1; /* LinkSpeedSupported and PortState */ + uint8_t state_info2; /* PortPhysState and LinkDownDefaultState */ + uint8_t mkey_lmc; + uint8_t link_speed; /* LinkSpeedEnabled and LinkSpeedActive */ + uint8_t mtu_smsl; + uint8_t vl_cap; /* VLCap and InitType */ + uint8_t vl_high_limit; + uint8_t vl_arb_high_cap; + uint8_t vl_arb_low_cap; + uint8_t mtu_cap; + uint8_t vl_stall_life; + uint8_t vl_enforce; + ib_net16_t m_key_violations; + ib_net16_t p_key_violations; + ib_net16_t q_key_violations; + uint8_t guid_cap; + uint8_t subnet_timeout; /* cli_rereg(1b), resrv( +2b), timeout(5b) */ + uint8_t resp_time_value; + uint8_t error_threshold; + +} PACK_SUFFIX ib_port_info_t; +#include +/************/ + +#define IB_PORT_STATE_MASK 0x0F +#define IB_PORT_LMC_MASK 0x07 +#define IB_PORT_LMC_MAX 0x07 +#define IB_PORT_MPB_MASK 0xC0 +#define IB_PORT_MPB_SHIFT 6 +#define IB_PORT_LINK_SPEED_SHIFT 4 +#define IB_PORT_LINK_SPEED_SUPPORTED_MASK 0xF0 +#define IB_PORT_LINK_SPEED_ACTIVE_MASK 0xF0 +#define IB_PORT_LINK_SPEED_ENABLED_MASK 0x0F +#define IB_PORT_PHYS_STATE_MASK 0xF0 +#define IB_PORT_PHYS_STATE_SHIFT 4 +#define IB_PORT_LNKDWNDFTSTATE_MASK 0x0F + +#define IB_PORT_CAP_RESV0 (CL_NTOH32(0x00000001)) +#define IB_PORT_CAP_IS_SM (CL_NTOH32(0x00000002)) +#define IB_PORT_CAP_HAS_NOTICE (CL_NTOH32(0x00000004)) +#define IB_PORT_CAP_HAS_TRAP (CL_NTOH32(0x00000008)) +#define IB_PORT_CAP_HAS_IPD (CL_NTOH32(0x00000010)) +#define IB_PORT_CAP_HAS_AUTO_MIG (CL_NTOH32(0x00000020)) +#define IB_PORT_CAP_HAS_SL_MAP (CL_NTOH32(0x00000040)) +#define IB_PORT_CAP_HAS_NV_MKEY (CL_NTOH32(0x00000080)) +#define IB_PORT_CAP_HAS_NV_PKEY (CL_NTOH32(0x00000100)) +#define IB_PORT_CAP_HAS_LED_INFO (CL_NTOH32(0x00000200)) +#define IB_PORT_CAP_SM_DISAB (CL_NTOH32(0x00000400)) +#define IB_PORT_CAP_HAS_SYS_IMG_GUID (CL_NTOH32(0x00000800)) +#define IB_PORT_CAP_HAS_PKEY_SW_EXT_PORT_TRAP (CL_NTOH32(0x00001000)) +#define IB_PORT_CAP_RESV13 (CL_NTOH32(0x00002000)) +#define IB_PORT_CAP_RESV14 (CL_NTOH32(0x00004000)) +#define IB_PORT_CAP_RESV15 (CL_NTOH32(0x00008000)) +#define IB_PORT_CAP_HAS_COM_MGT (CL_NTOH32(0x00010000)) +#define IB_PORT_CAP_HAS_SNMP (CL_NTOH32(0x00020000)) +#define IB_PORT_CAP_REINIT (CL_NTOH32(0x00040000)) +#define IB_PORT_CAP_HAS_DEV_MGT (CL_NTOH32(0x00080000)) +#define IB_PORT_CAP_HAS_VEND_CLS (CL_NTOH32(0x00100000)) +#define IB_PORT_CAP_HAS_DR_NTC (CL_NTOH32(0x00200000)) +#define IB_PORT_CAP_HAS_CAP_NTC (CL_NTOH32(0x00400000)) +#define IB_PORT_CAP_HAS_BM (CL_NTOH32(0x00800000)) +#define IB_PORT_CAP_HAS_LINK_RT_LATENCY (CL_NTOH32(0x01000000)) +#define IB_PORT_CAP_HAS_CLIENT_REREG (CL_NTOH32(0x02000000)) +#define IB_PORT_CAP_RESV26 (CL_NTOH32(0x04000000)) +#define IB_PORT_CAP_RESV27 (CL_NTOH32(0x08000000)) +#define IB_PORT_CAP_RESV28 (CL_NTOH32(0x10000000)) +#define IB_PORT_CAP_RESV29 (CL_NTOH32(0x20000000)) +#define IB_PORT_CAP_RESV30 (CL_NTOH32(0x40000000)) +#define IB_PORT_CAP_RESV31 (CL_NTOH32(0x80000000)) + +/****f* IBA Base: Types/ib_port_info_get_port_state +* NAME +* ib_port_info_get_port_state +* +* DESCRIPTION +* Returns the port state. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_port_state( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->state_info1 & IB_PORT_STATE_MASK) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Port state. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_port_state +* NAME +* ib_port_info_set_port_state +* +* DESCRIPTION +* Sets the port state. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_port_state( + IN ib_port_info_t* const p_pi, + IN const uint8_t port_state ) +{ + p_pi->state_info1 = (uint8_t)((p_pi->state_info1 & 0xF0) | port_state ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* port_state +* [in] Port state value to set. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_vl_cap +* NAME +* ib_port_info_get_vl_cap +* +* DESCRIPTION +* Gets the VL Capability of a port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_vl_cap( + IN const ib_port_info_t* const p_pi) +{ + return((p_pi->vl_cap >> 4) & 0x0F); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* VL_CAP field +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_init_type +* NAME +* ib_port_info_get_init_type +* +* DESCRIPTION +* Gets the init type of a port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_init_type( + IN const ib_port_info_t* const p_pi) +{ + return (uint8_t) (p_pi->vl_cap & 0x0F); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* InitType field +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_op_vls +* NAME +* ib_port_info_get_op_vls +* +* DESCRIPTION +* Gets the operational VLs on a port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_op_vls( + IN const ib_port_info_t* const p_pi) +{ + return((p_pi->vl_enforce >> 4) & 0x0F); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* OP_VLS field +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_op_vls +* NAME +* ib_port_info_set_op_vls +* +* DESCRIPTION +* Sets the operational VLs on a port. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_op_vls( + IN ib_port_info_t* const p_pi, + IN const uint8_t op_vls ) +{ + p_pi->vl_enforce = (uint8_t)((p_pi->vl_enforce & 0x0F) | (op_vls << 4) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* op_vls +* [in] Encoded operation VLs value. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_state_no_change +* NAME +* ib_port_info_set_state_no_change +* +* DESCRIPTION +* Sets the port state fields to the value for "no change". +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_state_no_change( + IN ib_port_info_t* const p_pi ) +{ + ib_port_info_set_port_state( p_pi, IB_LINK_NO_CHANGE ); + p_pi->state_info2 = 0; +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_link_speed_sup +* NAME +* ib_port_info_get_link_speed_sup +* +* DESCRIPTION +* Returns the encoded value for the link speed supported. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_link_speed_sup( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->state_info1 & + IB_PORT_LINK_SPEED_SUPPORTED_MASK) >> + IB_PORT_LINK_SPEED_SHIFT) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the link speed supported. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_link_speed_sup +* NAME +* ib_port_info_set_link_speed_sup +* +* DESCRIPTION +* Given an integer of the supported link speed supported. +* Set the appropriate bits in state_info1 +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_link_speed_sup( + IN uint8_t const speed, + IN ib_port_info_t* p_pi ) +{ + p_pi->state_info1 = + ( ~IB_PORT_LINK_SPEED_SUPPORTED_MASK & p_pi->state_info1 ) | + ( IB_PORT_LINK_SPEED_SUPPORTED_MASK & + (speed << IB_PORT_LINK_SPEED_SHIFT) ); +} +/* +* PARAMETERS +* speed +* [in] Supported Speeds Code. +* +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_port_phys_state +* NAME +* ib_port_info_get_port_phys_state +* +* DESCRIPTION +* Returns the encoded value for the port physical state. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_port_phys_state( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->state_info2 & + IB_PORT_PHYS_STATE_MASK) >> + IB_PORT_PHYS_STATE_SHIFT) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the port physical state. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_port_phys_state +* NAME +* ib_port_info_set_port_phys_state +* +* DESCRIPTION +* Given an integer of the port physical state, +* Set the appropriate bits in state_info2 +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_port_phys_state( + IN uint8_t const phys_state, + IN ib_port_info_t* p_pi ) +{ + p_pi->state_info2 = + ( ~IB_PORT_PHYS_STATE_MASK & p_pi->state_info2 ) | + ( IB_PORT_PHYS_STATE_MASK & + (phys_state << IB_PORT_PHYS_STATE_SHIFT) ); +} +/* +* PARAMETERS +* phys_state +* [in] port physical state. +* +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_link_down_def_state +* NAME +* ib_port_info_get_link_down_def_state +* +* DESCRIPTION +* Returns the link down default state. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_link_down_def_state( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->state_info2 & IB_PORT_LNKDWNDFTSTATE_MASK) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* link down default state of the port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_link_down_def_state +* NAME +* ib_port_info_set_link_down_def_state +* +* DESCRIPTION +* Sets the link down default state of the port. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_link_down_def_state( + IN ib_port_info_t* const p_pi, + IN const uint8_t link_dwn_state ) +{ + p_pi->state_info2 = (uint8_t)((p_pi->state_info2 & 0xF0) | link_dwn_state ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* link_dwn_state +* [in] Link down default state of the port. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_link_speed_active +* NAME +* ib_port_info_get_link_speed_active +* +* DESCRIPTION +* Returns the Link Speed Active value assigned to this port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_link_speed_active( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->link_speed & + IB_PORT_LINK_SPEED_ACTIVE_MASK) >> + IB_PORT_LINK_SPEED_SHIFT) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the link speed active value assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +#define IB_LINK_WIDTH_ACTIVE_1X 1 +#define IB_LINK_WIDTH_ACTIVE_4X 2 +#define IB_LINK_WIDTH_ACTIVE_12X 8 +#define IB_LINK_SPEED_ACTIVE_2_5 1 +#define IB_LINK_SPEED_ACTIVE_5 2 +#define IB_LINK_SPEED_ACTIVE_10 4 + +/* following v1 ver1.2 p901 */ +#define IB_PATH_RECORD_RATE_2_5_GBS 2 +#define IB_PATH_RECORD_RATE_10_GBS 3 +#define IB_PATH_RECORD_RATE_30_GBS 4 +#define IB_PATH_RECORD_RATE_5_GBS 5 +#define IB_PATH_RECORD_RATE_20_GBS 6 +#define IB_PATH_RECORD_RATE_40_GBS 7 +#define IB_PATH_RECORD_RATE_60_GBS 8 +#define IB_PATH_RECORD_RATE_80_GBS 9 +#define IB_PATH_RECORD_RATE_120_GBS 10 + +#define IB_MIN_RATE IB_PATH_RECORD_RATE_2_5_GBS +#define IB_MAX_RATE IB_PATH_RECORD_RATE_120_GBS + +/****f* IBA Base: Types/ib_port_info_compute_rate +* NAME +* ib_port_info_compute_rate +* +* DESCRIPTION +* Returns the encoded value for the path rate. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_compute_rate( + IN const ib_port_info_t* const p_pi ) +{ + uint8_t rate = 0; + + switch (ib_port_info_get_link_speed_active(p_pi)) + { + case IB_LINK_SPEED_ACTIVE_2_5: + switch (p_pi->link_width_active) + { + case IB_LINK_WIDTH_ACTIVE_1X: + rate = IB_PATH_RECORD_RATE_2_5_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_4X: + rate = IB_PATH_RECORD_RATE_10_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_12X: + rate = IB_PATH_RECORD_RATE_30_GBS; + break; + + default: + rate = IB_PATH_RECORD_RATE_2_5_GBS; + break; + } + break; + case IB_LINK_SPEED_ACTIVE_5: + switch (p_pi->link_width_active) + { + case IB_LINK_WIDTH_ACTIVE_1X: + rate = IB_PATH_RECORD_RATE_5_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_4X: + rate = IB_PATH_RECORD_RATE_20_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_12X: + rate = IB_PATH_RECORD_RATE_60_GBS; + break; + + default: + rate = IB_PATH_RECORD_RATE_5_GBS; + break; + } + break; + case IB_LINK_SPEED_ACTIVE_10: + switch (p_pi->link_width_active) + { + case IB_LINK_WIDTH_ACTIVE_1X: + rate = IB_PATH_RECORD_RATE_10_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_4X: + rate = IB_PATH_RECORD_RATE_40_GBS; + break; + + case IB_LINK_WIDTH_ACTIVE_12X: + rate =IB_PATH_RECORD_RATE_120_GBS; + break; + + default: + rate = IB_PATH_RECORD_RATE_10_GBS; + break; + } + break; + default: + rate = IB_PATH_RECORD_RATE_2_5_GBS; + break; + } + + return rate; +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the link speed supported. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_path_get_ipd +* NAME +* ib_path_get_ipd +* +* DESCRIPTION +* Returns the encoded value for the inter packet delay. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_path_get_ipd( + IN uint8_t local_link_width_supported, + IN uint8_t path_rec_rate ) +{ + uint8_t ipd = 0; + + switch(local_link_width_supported) + { + /* link_width_supported = 1: 1x */ + case 1: + break; + + /* link_width_supported = 3: 1x or 4x */ + case 3: + switch(path_rec_rate & 0x3F) + { + case IB_PATH_RECORD_RATE_2_5_GBS: + ipd = 3; + break; + default: + break; + } + break; + + /* link_width_supported = 11: 1x or 4x or 12x */ + case 11: + switch(path_rec_rate & 0x3F) + { + case IB_PATH_RECORD_RATE_2_5_GBS: + ipd = 11; + break; + case IB_PATH_RECORD_RATE_10_GBS: + ipd = 2; + break; + default: + break; + } + break; + + default: + break; + } + + return ipd; +} +/* +* PARAMETERS +* local_link_width_supported +* [in] link with supported for this port +* +* path_rec_rate +* [in] rate field of the path record +* +* RETURN VALUES +* Returns the ipd +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_mtu_cap +* NAME +* ib_port_info_get_mtu_cap +* +* DESCRIPTION +* Returns the encoded value for the maximum MTU supported by this port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_mtu_cap( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->mtu_cap & 0x0F) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the LMC value assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_neighbor_mtu +* NAME +* ib_port_info_get_neighbor_mtu +* +* DESCRIPTION +* Returns the encoded value for the neighbor MTU at this port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_neighbor_mtu( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->mtu_smsl & 0xF0) >> 4) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the neighbor MTU at this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_neighbor_mtu +* NAME +* ib_port_info_set_neighbor_mtu +* +* DESCRIPTION +* Sets the Neighbor MTU value in the PortInfo attribute. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_neighbor_mtu( + IN ib_port_info_t* const p_pi, + IN const uint8_t mtu ) +{ + CL_ASSERT( mtu <= 5 ); + CL_ASSERT( mtu != 0 ); + p_pi->mtu_smsl = (uint8_t)((p_pi->mtu_smsl & 0x0F) | (mtu << 4)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* mtu +* [in] Encoded MTU value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_master_smsl +* NAME +* ib_port_info_get_master_smsl +* +* DESCRIPTION +* Returns the encoded value for the Master SMSL at this port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_master_smsl( + IN const ib_port_info_t* const p_pi ) +{ + return (uint8_t) (p_pi->mtu_smsl & 0x0F); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the encoded value for the Master SMSL at this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_master_smsl +* NAME +* ib_port_info_set_master_smsl +* +* DESCRIPTION +* Sets the Master SMSL value in the PortInfo attribute. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_master_smsl( + IN ib_port_info_t* const p_pi, + IN const uint8_t smsl ) +{ + p_pi->mtu_smsl = (uint8_t)((p_pi->mtu_smsl & 0xF0) | smsl ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* mtu +* [in] Encoded Master SMSL value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_timeout +* NAME +* ib_port_info_set_timeout +* +* DESCRIPTION +* Sets the encoded subnet timeout value in the PortInfo attribute. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_timeout( + IN ib_port_info_t* const p_pi, + IN const uint8_t timeout ) +{ + CL_ASSERT( timeout <= 0x1F ); + p_pi->subnet_timeout = + (uint8_t)( + (p_pi->subnet_timeout & 0x80) | (timeout & 0x1F)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* timeout +* [in] Encoded timeout value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_client_rereg +* NAME +* ib_port_info_set_client_rereg +* +* DESCRIPTION +* Sets the encoded client reregistration bit value in the PortInfo attribute. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_client_rereg( + IN ib_port_info_t* const p_pi, + IN const uint8_t client_rereg ) +{ + CL_ASSERT( client_rereg <= 0x1 ); + p_pi->subnet_timeout = + (uint8_t)( + (p_pi->subnet_timeout & 0x1F) | ((client_rereg << 7) & 0x80)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* client_rereg +* [in] Client reregistration value to set (either 1 or 0). +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_timeout +* NAME +* ib_port_info_get_timeout +* +* DESCRIPTION +* Gets the encoded subnet timeout value in the PortInfo attribute. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_timeout( + IN ib_port_info_t const* p_pi ) +{ + return(p_pi->subnet_timeout & 0x1F ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* The encoded timeout value +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_client_rereg +* NAME +* ib_port_info_get_client_rereg +* +* DESCRIPTION +* Gets the encoded client reregistration bit value in the PortInfo attribute. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_client_rereg( + IN ib_port_info_t const* p_pi ) +{ + return ( (p_pi->subnet_timeout & 0x80 ) >> 7); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Client reregistration value (either 1 or 0). +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_hoq_lifetime +* NAME +* ib_port_info_set_hoq_lifetime +* +* DESCRIPTION +* Sets the Head of Queue Lifetime for which a packet can live in the head +* of VL queue +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_hoq_lifetime( + IN ib_port_info_t* const p_pi, + IN const uint8_t hoq_life ) +{ + p_pi->vl_stall_life = (uint8_t)((hoq_life & 0x1f) | + (p_pi->vl_stall_life & 0xe0)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* hoq_life +* [in] Encoded lifetime value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_hoq_lifetime +* NAME +* ib_port_info_get_hoq_lifetime +* +* DESCRIPTION +* Gets the Head of Queue Lifetime for which a packet can live in the head +* of VL queue +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_hoq_lifetime( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->vl_stall_life & 0x1f) ); +} + +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Encoded lifetime value +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_vl_stall_count +* NAME +* ib_port_info_set_vl_stall_count +* +* DESCRIPTION +* Sets the VL Stall Count which define the number of contiguous +* HLL (hoq) drops that will put the VL into stalled mode. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_vl_stall_count( + IN ib_port_info_t* const p_pi, + IN const uint8_t vl_stall_count ) +{ + p_pi->vl_stall_life = (uint8_t)((p_pi->vl_stall_life & 0x1f) | + ((vl_stall_count << 5) & 0xe0)); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* vl_stall_count +* [in] value to set +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_vl_stall_count +* NAME +* ib_port_info_get_vl_stall_count +* +* DESCRIPTION +* Gets the VL Stall Count which define the number of contiguous +* HLL (hoq) drops that will put the VL into stalled mode +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_vl_stall_count( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->vl_stall_life & 0xe0) >> 5); +} + +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* vl stall count +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_lmc +* NAME +* ib_port_info_get_lmc +* +* DESCRIPTION +* Returns the LMC value assigned to this port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_lmc( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->mkey_lmc & IB_PORT_LMC_MASK) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the LMC value assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_lmc +* NAME +* ib_port_info_set_lmc +* +* DESCRIPTION +* Sets the LMC value in the PortInfo attribute. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_lmc( + IN ib_port_info_t* const p_pi, + IN const uint8_t lmc ) +{ + CL_ASSERT( lmc <= IB_PORT_LMC_MAX ); + p_pi->mkey_lmc = (uint8_t)((p_pi->mkey_lmc & 0xF8) | lmc); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* lmc +* [in] LMC value to set, must be less than 7. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_link_speed_enabled +* NAME +* ib_port_info_get_link_speed_enabled +* +* DESCRIPTION +* Returns the link speed enabled value assigned to this port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_link_speed_enabled( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)(p_pi->link_speed & IB_PORT_LINK_SPEED_ENABLED_MASK) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Port state. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_link_speed_enabled +* NAME +* ib_port_info_set_link_speed_enabled +* +* DESCRIPTION +* Sets the link speed enabled value in the PortInfo attribute. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_link_speed_enabled( + IN ib_port_info_t* const p_pi, + IN const uint8_t link_speed_enabled ) +{ + p_pi->link_speed = (uint8_t)((p_pi->link_speed & 0xF0) | link_speed_enabled ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* link_speed_enabled +* [in] link speed enabled value to set. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_mpb +* NAME +* ib_port_info_get_mpb +* +* DESCRIPTION +* Returns the M_Key protect bits assigned to this port. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_mpb( + IN const ib_port_info_t* const p_pi ) +{ + return( (uint8_t)((p_pi->mkey_lmc & IB_PORT_MPB_MASK) >> + IB_PORT_MPB_SHIFT) ); +} +/* +* PARAMETERS +* p_ni +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the M_Key protect bits assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_mpb +* NAME +* ib_port_info_set_mpb +* +* DESCRIPTION +* Set the M_Key protect bits of this port. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_mpb( + IN ib_port_info_t* p_pi, + IN uint8_t mpb ) +{ + p_pi->mkey_lmc = + (~IB_PORT_MPB_MASK & p_pi->mkey_lmc) | + ( IB_PORT_MPB_MASK & (mpb << IB_PORT_MPB_SHIFT) ); +} +/* +* PARAMETERS +* mpb +* [in] M_Key protect bits +* p_ni +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_local_phy_err_thd +* NAME +* ib_port_info_get_local_phy_err_thd +* +* DESCRIPTION +* Returns the Phy Link Threshold +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_local_phy_err_thd( + IN const ib_port_info_t* const p_pi ) +{ + return (uint8_t)( (p_pi->error_threshold & 0xF0) >> 4); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the Phy Link error threshold assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_get_overrun_err_thd +* NAME +* ib_port_info_get_local_overrun_err_thd +* +* DESCRIPTION +* Returns the Credits Overrun Errors Threshold +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_port_info_get_overrun_err_thd( + IN const ib_port_info_t* const p_pi ) +{ + return (uint8_t)(p_pi->error_threshold & 0x0F); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the Credits Overrun errors threshold assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_port_info_set_phy_and_overrun_err_thd +* NAME +* ib_port_info_set_phy_and_overrun_err_thd +* +* DESCRIPTION +* Sets the Phy Link and Credits Overrun Errors Threshold +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_port_info_set_phy_and_overrun_err_thd( + IN ib_port_info_t* const p_pi, + IN uint8_t phy_threshold, + IN uint8_t overrun_threshold ) +{ + p_pi->error_threshold = + (uint8_t)( ((phy_threshold & 0x0F) << 4) | (overrun_threshold & 0x0F) ); +} +/* +* PARAMETERS +* p_pi +* [in] Pointer to a PortInfo attribute. +* +* phy_threshold +* [in] Physical Link Errors Threshold above which Trap 129 is generated +* +* overrun_threshold +* [in] Credits overrun Errors Threshold above which Trap 129 is generated +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +typedef uint8_t ib_svc_name_t[64]; + +#include +typedef struct _ib_service_record +{ + ib_net64_t service_id; + ib_gid_t service_gid; + ib_net16_t service_pkey; + ib_net16_t resv; + ib_net32_t service_lease; + uint8_t service_key[16]; + ib_svc_name_t service_name; + uint8_t service_data8[16]; + ib_net16_t service_data16[8]; + ib_net32_t service_data32[4]; + ib_net64_t service_data64[2]; + +} PACK_SUFFIX ib_service_record_t; +#include + +#include +typedef struct _ib_portinfo_record +{ + ib_net16_t lid; + uint8_t port_num; + uint8_t resv; + ib_port_info_t port_info; + uint8_t pad[6]; + +} PACK_SUFFIX ib_portinfo_record_t; +#include + +#include +typedef struct _ib_link_record +{ + ib_net16_t from_lid; + uint8_t from_port_num; + uint8_t to_port_num; + ib_net16_t to_lid; + uint8_t pad[2]; + +} PACK_SUFFIX ib_link_record_t; +#include + +#include +typedef struct _ib_sminfo_record +{ + ib_net16_t lid; + uint16_t resv0; + ib_sm_info_t sm_info; + uint8_t pad[7]; + +} PACK_SUFFIX ib_sminfo_record_t; +#include + +/****s* IBA Base: Types/ib_lft_record_t +* NAME +* ib_lft_record_t +* +* DESCRIPTION +* IBA defined LinearForwardingTableRecord (15.2.5.6) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_lft_record +{ + ib_net16_t lid; + ib_net16_t block_num; + uint32_t resv0; + uint8_t lft[64]; +} PACK_SUFFIX ib_lft_record_t; +#include +/************/ + +/****s* IBA Base: Types/ib_mft_record_t +* NAME +* ib_mft_record_t +* +* DESCRIPTION +* IBA defined MulticastForwardingTableRecord (15.2.5.8) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_mft_record +{ + ib_net16_t lid; + ib_net16_t position_block_num; + uint32_t resv0; + ib_net16_t mft[IB_MCAST_BLOCK_SIZE]; +} PACK_SUFFIX ib_mft_record_t; +#include +/************/ + +/****s* IBA Base: Types/ib_switch_info_t +* NAME +* ib_switch_info_t +* +* DESCRIPTION +* IBA defined SwitchInfo. (14.2.5.4) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_switch_info +{ + ib_net16_t lin_cap; + ib_net16_t rand_cap; + ib_net16_t mcast_cap; + ib_net16_t lin_top; + uint8_t def_port; + uint8_t def_mcast_pri_port; + uint8_t def_mcast_not_port; + uint8_t life_state; + ib_net16_t lids_per_port; + ib_net16_t enforce_cap; + uint8_t flags; + +} PACK_SUFFIX ib_switch_info_t; +#include +/************/ + +#include +typedef struct _ib_switch_info_record +{ + ib_net16_t lid; + uint16_t resv0; + ib_switch_info_t switch_info; + uint8_t pad[3]; + +} PACK_SUFFIX ib_switch_info_record_t; +#include + +#define IB_SWITCH_PSC 0x04 + +/****f* IBA Base: Types/ib_switch_info_get_state_change +* NAME +* ib_switch_info_get_state_change +* +* DESCRIPTION +* Returns the value of the state change flag. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_switch_info_get_state_change( + IN const ib_switch_info_t* const p_si ) +{ + return( (p_si->life_state & IB_SWITCH_PSC) == IB_SWITCH_PSC ); +} +/* +* PARAMETERS +* p_si +* [in] Pointer to a SwitchInfo attribute. +* +* RETURN VALUES +* Returns the value of the state change flag. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_switch_info_clear_state_change +* NAME +* ib_switch_info_clear_state_change +* +* DESCRIPTION +* Clears the switch's state change bit. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_switch_info_clear_state_change( + IN ib_switch_info_t* const p_si ) +{ + p_si->life_state = (uint8_t)(p_si->life_state & 0xFB); +} +/* +* PARAMETERS +* p_ni +* [in] Pointer to a PortInfo attribute. +* +* RETURN VALUES +* Returns the LMC value assigned to this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_switch_info_is_enhanced_port0 +* NAME +* ib_switch_info_is_enhanced_port0 +* +* DESCRIPTION +* Returns TRUE if the enhancedPort0 bit is on (meaning the switch +* port zero supports enhanced functions). +* Returns FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_switch_info_is_enhanced_port0( + IN const ib_switch_info_t* const p_si ) +{ + return( (p_si->flags & 0x08) == 0x08 ); +} +/* +* PARAMETERS +* p_si +* [in] Pointer to a SwitchInfo attribute. +* +* RETURN VALUES +* Returns TRUE if the switch supports enhanced port 0. FALSE otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* IBA Base: Types/ib_guid_info_t +* NAME +* ib_guid_info_t +* +* DESCRIPTION +* IBA defined GuidInfo. (14.2.5.5) +* +* SYNOPSIS +*/ +#define GUID_TABLE_MAX_ENTRIES 8 + +#include +typedef struct _ib_guid_info +{ + ib_net64_t guid[GUID_TABLE_MAX_ENTRIES]; + +} PACK_SUFFIX ib_guid_info_t; +#include +/************/ + +#include +typedef struct _ib_guidinfo_record +{ + ib_net16_t lid; + uint8_t block_num; + uint8_t resv; + uint32_t reserved; + ib_guid_info_t guid_info; +} PACK_SUFFIX ib_guidinfo_record_t; +#include + +#define IB_MULTIPATH_MAX_GIDS 11 /* Support max that can fit into first MAD (for now) */ + +#include +typedef struct _ib_multipath_rec_t +{ + ib_net32_t hop_flow_raw; + uint8_t tclass; + uint8_t num_path; + ib_net16_t pkey; + uint8_t resv0; + uint8_t sl; + uint8_t mtu; + uint8_t rate; + uint8_t pkt_life; + uint8_t resv1; + uint8_t independence; /* formerly resv2 */ + uint8_t sgid_count; + uint8_t dgid_count; + uint8_t resv3[7]; + ib_gid_t gids[IB_MULTIPATH_MAX_GIDS]; +} PACK_SUFFIX ib_multipath_rec_t; +#include +/* +* FIELDS +* hop_flow_raw +* Global routing parameters: hop count, flow label and raw bit. +* +* tclass +* Another global routing parameter. +* +* num_path +* Reversible path - 1 bit to say if path is reversible. +* num_path [6:0] In queries, maximum number of paths to return. +* In responses, undefined. +* +* pkey +* Partition key (P_Key) to use on this path. +* +* sl +* Service level to use on this path. +* +* mtu +* MTU and MTU selector fields to use on this path +* rate +* Rate and rate selector fields to use on this path. +* +* pkt_life +* Packet lifetime +* +* preference +* Indicates the relative merit of this path versus other path +* records returned from the SA. Lower numbers are better. +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_num_path +* NAME +* ib_multipath_rec_num_path +* +* DESCRIPTION +* Get max number of paths to return. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_multipath_rec_num_path( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( p_rec->num_path &0x7F ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Maximum number of paths to return for each unique SGID_DGID combination. +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_sl +* NAME +* ib_multipath_rec_sl +* +* DESCRIPTION +* Get multipath service level. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_multipath_rec_sl( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)((cl_ntoh16( p_rec->sl )) & 0xF) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* SL. +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_mtu +* NAME +* ib_multipath_rec_mtu +* +* DESCRIPTION +* Get encoded path MTU. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_multipath_rec_mtu( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->mtu & IB_MULTIPATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded path MTU. +* 1: 256 +* 2: 512 +* 3: 1024 +* 4: 2048 +* 5: 4096 +* others: reserved +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_mtu_sel +* NAME +* ib_multipath_rec_mtu_sel +* +* DESCRIPTION +* Get encoded multipath MTU selector. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_multipath_rec_mtu_sel( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->mtu & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded path MTU selector value (for queries). +* 0: greater than MTU specified +* 1: less than MTU specified +* 2: exactly the MTU specified +* 3: largest MTU available +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_rate +* NAME +* ib_multipath_rec_rate +* +* DESCRIPTION +* Get encoded multipath rate. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_multipath_rec_rate( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->rate & IB_MULTIPATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded multipath rate. +* 2: 2.5 Gb/sec. +* 3: 10 Gb/sec. +* 4: 30 Gb/sec. +* others: reserved +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_rate_sel +* NAME +* ib_multipath_rec_rate_sel +* +* DESCRIPTION +* Get encoded multipath rate selector. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_multipath_rec_rate_sel( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->rate & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded path rate selector value (for queries). +* 0: greater than rate specified +* 1: less than rate specified +* 2: exactly the rate specified +* 3: largest rate available +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_pkt_life +* NAME +* ib_multipath_rec_pkt_life +* +* DESCRIPTION +* Get encoded multipath pkt_life. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_multipath_rec_pkt_life( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)(p_rec->pkt_life & IB_MULTIPATH_REC_BASE_MASK) ); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded multipath pkt_life = 4.096 µsec * 2 ** PacketLifeTime. +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +/****f* IBA Base: Types/ib_multipath_rec_pkt_life_sel +* NAME +* ib_multipath_rec_pkt_life_sel +* +* DESCRIPTION +* Get encoded multipath pkt_lifetime selector. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_multipath_rec_pkt_life_sel( + IN const ib_multipath_rec_t* const p_rec ) +{ + return( (uint8_t)((p_rec->pkt_life & IB_MULTIPATH_REC_SELECTOR_MASK) >> 6 )); +} +/* +* PARAMETERS +* p_rec +* [in] Pointer to the multipath record object. +* +* RETURN VALUES +* Encoded path pkt_lifetime selector value (for queries). +* 0: greater than rate specified +* 1: less than rate specified +* 2: exactly the rate specified +* 3: smallest packet lifetime available +* +* NOTES +* +* SEE ALSO +* ib_multipath_rec_t +*********/ + +#define IB_NUM_PKEY_ELEMENTS_IN_BLOCK 32 +/****s* IBA Base: Types/ib_pkey_table_t +* NAME +* ib_pkey_table_t +* +* DESCRIPTION +* IBA defined PKey table. (14.2.5.7) +* +* SYNOPSIS +*/ + +#include +typedef struct _ib_pkey_table +{ + ib_net16_t pkey_entry[IB_NUM_PKEY_ELEMENTS_IN_BLOCK]; + +} PACK_SUFFIX ib_pkey_table_t; +#include +/************/ + +/****s* IBA Base: Types/ib_pkey_table_record_t +* NAME +* ib_pkey_table_record_t +* +* DESCRIPTION +* IBA defined P_Key Table Record for SA Query. (15.2.5.11) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_pkey_table_record +{ + ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 + uint16_t block_num; + uint8_t port_num; // for switch: port number, for CA: reserved + uint8_t reserved1; + uint16_t reserved2; + ib_pkey_table_t pkey_tbl; + +} PACK_SUFFIX ib_pkey_table_record_t; +#include +/************/ + +#define IB_DROP_VL 15 +#define IB_MAX_NUM_VLS 16 +/****s* IBA Base: Types/ib_slvl_table_t +* NAME +* ib_slvl_table_t +* +* DESCRIPTION +* IBA defined SL2VL Mapping Table Attribute. (14.2.5.8) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_slvl_table +{ + uint8_t raw_vl_by_sl[IB_MAX_NUM_VLS/2]; +} PACK_SUFFIX ib_slvl_table_t; +#include +/************/ + +/****s* IBA Base: Types/ib_slvl_table_record_t +* NAME +* ib_slvl_table_record_t +* +* DESCRIPTION +* IBA defined SL to VL Mapping Table Record for SA Query. (15.2.5.4) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_slvl_table_record +{ + ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 + uint8_t in_port_num; // reserved for CAs + uint8_t out_port_num; // reserved for CAs + uint32_t resv; + ib_slvl_table_t slvl_tbl; + +} PACK_SUFFIX ib_slvl_table_record_t; +#include +/************/ + +/****f* IBA Base: Types/ib_slvl_table_set +* NAME +* ib_slvl_table_set +* +* DESCRIPTION +* Set slvl table entry. +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_slvl_table_set( + IN ib_slvl_table_t* p_slvl_tbl, + IN uint8_t sl_index, + IN uint8_t vl ) +{ + uint8_t idx = sl_index/2; + CL_ASSERT(vl <= 15); + CL_ASSERT(sl_index <= 15); + + if (sl_index%2) + { + /* this is an odd sl. Need to update the ls bits */ + p_slvl_tbl->raw_vl_by_sl[idx] = ( p_slvl_tbl->raw_vl_by_sl[idx] & 0xF0 ) | vl ; + } + else + { + /* this is an even sl. Need to update the ms bits */ + p_slvl_tbl->raw_vl_by_sl[idx] = ( vl << 4 ) | ( p_slvl_tbl->raw_vl_by_sl[idx] & 0x0F ); + } +} +/* +* PARAMETERS +* p_slvl_tbl +* [in] pointer to ib_slvl_table_t object. +* +* sl_index +* [in] the sl index in the table to be updated. +* +* vl +* [in] the vl value to update for that sl. +* +* RETURN VALUES +* None +* +* NOTES +* +* SEE ALSO +* ib_slvl_table_t +*********/ + +/****f* IBA Base: Types/ib_slvl_table_get +* NAME +* ib_slvl_table_get +* +* DESCRIPTION +* Get slvl table entry. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_slvl_table_get( + IN const ib_slvl_table_t* p_slvl_tbl, + IN uint8_t sl_index ) +{ + uint8_t idx = sl_index/2; + CL_ASSERT(sl_index <= 15); + + if (sl_index%2) + { + /* this is an odd sl. Need to return the ls bits. */ + return ( p_slvl_tbl->raw_vl_by_sl[idx] & 0x0F ); + } + else + { + /* this is an even sl. Need to return the ms bits. */ + return ( (p_slvl_tbl->raw_vl_by_sl[idx] & 0xF0) >> 4 ); + } +} +/* +* PARAMETERS +* p_slvl_tbl +* [in] pointer to ib_slvl_table_t object. +* +* sl_index +* [in] the sl index in the table whose value should be returned. +* +* RETURN VALUES +* vl for the requested sl_index. +* +* NOTES +* +* SEE ALSO +* ib_slvl_table_t +*********/ + +/****s* IBA Base: Types/ib_vl_arb_element_t +* NAME +* ib_vl_arb_element_t +* +* DESCRIPTION +* IBA defined VL Arbitration Table Element. (14.2.5.9) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_vl_arb_element +{ + uint8_t vl; + uint8_t weight; +} PACK_SUFFIX ib_vl_arb_element_t; +#include +/************/ + +#define IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK 32 + +/****s* IBA Base: Types/ib_vl_arb_table_t +* NAME +* ib_vl_arb_table_t +* +* DESCRIPTION +* IBA defined VL Arbitration Table. (14.2.5.9) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_vl_arb_table +{ + ib_vl_arb_element_t vl_entry[IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK]; +} PACK_SUFFIX ib_vl_arb_table_t; +#include +/************/ + +/****s* IBA Base: Types/ib_vl_arb_table_record_t +* NAME +* ib_vl_arb_table_record_t +* +* DESCRIPTION +* IBA defined VL Arbitration Table Record for SA Query. (15.2.5.9) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_vl_arb_table_record +{ + ib_net16_t lid; // for CA: lid of port, for switch lid of port 0 + uint8_t port_num; + uint8_t block_num; + uint32_t reserved; + ib_vl_arb_table_t vl_arb_tbl; +} PACK_SUFFIX ib_vl_arb_table_record_t; +#include +/************/ + +/* + * Global route header information received with unreliable datagram messages + */ +#include +typedef struct _ib_grh +{ + ib_net32_t ver_class_flow; + ib_net16_t resv1; + uint8_t resv2; + uint8_t hop_limit; + ib_gid_t src_gid; + ib_gid_t dest_gid; +} PACK_SUFFIX ib_grh_t; +#include + +/****f* IBA Base: Types/ib_grh_get_ver_class_flow +* NAME +* ib_grh_get_ver_class_flow +* +* DESCRIPTION +* Get encoded version, traffic class and flow label in grh +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_grh_get_ver_class_flow( + IN const ib_net32_t ver_class_flow, + OUT uint8_t* const p_ver, + OUT uint8_t* const p_tclass, + OUT uint32_t* const p_flow_lbl ) +{ + ib_net32_t tmp_ver_class_flow; + + if (p_ver) + *p_ver = (uint8_t)(ver_class_flow & 0x0f); + + tmp_ver_class_flow = ver_class_flow >> 4; + + if (p_tclass) + *p_tclass = (uint8_t)(tmp_ver_class_flow & 0xff); + + tmp_ver_class_flow = tmp_ver_class_flow >> 8; + + if (p_flow_lbl) + *p_flow_lbl = tmp_ver_class_flow & 0xfffff; +} +/* +* PARAMETERS +* ver_class_flow +* [in] the version, traffic class and flow label info. +* +* RETURN VALUES +* p_ver +* [out] pointer to the version info. +* +* p_tclass +* [out] pointer to the traffic class info. +* +* p_flow_lbl +* [out] pointer to the flow label info +* +* NOTES +* +* SEE ALSO +* ib_grh_t +*********/ + +/****f* IBA Base: Types/ib_grh_set_ver_class_flow +* NAME +* ib_grh_set_ver_class_flow +* +* DESCRIPTION +* Set encoded version, traffic class and flow label in grh +* +* SYNOPSIS +*/ +static inline ib_net32_t OSM_API +ib_grh_set_ver_class_flow( + IN const uint8_t ver, + IN const uint8_t tclass, + IN const uint32_t flow_lbl ) +{ + ib_net32_t ver_class_flow; + + ver_class_flow = flow_lbl; + ver_class_flow = ver_class_flow << 8; + ver_class_flow = ver_class_flow | tclass; + ver_class_flow = ver_class_flow << 4; + ver_class_flow = ver_class_flow | ver; + return (ver_class_flow); +} +/* +* PARAMETERS +* ver +* [in] the version info. +* +* tclass +* [in] the traffic class info. +* +* flow_lbl +* [in] the flow label info +* +* RETURN VALUES +* ver_class_flow +* [out] the version, traffic class and flow label info. +* +* NOTES +* +* SEE ALSO +* ib_grh_t +*********/ + +/****s* IBA Base: Types/ib_member_rec_t +* NAME +* ib_member_rec_t +* +* DESCRIPTION +* Multicast member record, used to create, join, and leave multicast +* groups. +* +* SYNOPSIS +*/ +#include +typedef struct _ib_member_rec +{ + ib_gid_t mgid; + ib_gid_t port_gid; + ib_net32_t qkey; + ib_net16_t mlid; + uint8_t mtu; + uint8_t tclass; + ib_net16_t pkey; + uint8_t rate; + uint8_t pkt_life; + ib_net32_t sl_flow_hop; + uint8_t scope_state; + uint8_t proxy_join:1; + uint8_t reserved[2]; + uint8_t pad[4]; + +} PACK_SUFFIX ib_member_rec_t; +#include +/* +* FIELDS +* mgid +* Multicast GID address for this multicast group. +* +* port_gid +* Valid GID of the endpoint joining this multicast group. +* +* qkey +* Q_Key to be sued by this multicast group. +* +* mlid +* Multicast LID for this multicast group. +* +* mtu +* MTU and MTU selector fields to use on this path +* +* tclass +* Another global routing parameter. +* +* pkey +* Partition key (P_Key) to use for this member. +* +* rate +* Rate and rate selector fields to use on this path. +* +* pkt_life +* Packet lifetime +* +* sl_flow_hop +* Global routing parameters: service level, hop count, and flow label. +* +* scope_state +* MGID scope and JoinState of multicast request. +* +* proxy_join +* Enables others in the Partition to proxy add/remove from the group +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/ib_member_get_sl_flow_hop +* NAME +* ib_member_get_sl_flow_hop +* +* DESCRIPTION +* Get encoded sl, flow label, and hop limit +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_member_get_sl_flow_hop( + IN const ib_net32_t sl_flow_hop, + OUT uint8_t* const p_sl, + OUT uint32_t* const p_flow_lbl, + OUT uint8_t* const p_hop ) +{ + uint32_t tmp; + + tmp = cl_ntoh32(sl_flow_hop); + if (p_hop) + *p_hop = (uint8_t)tmp; + tmp >>= 8; + + if (p_flow_lbl) + *p_flow_lbl = (uint32_t)(tmp & 0xfffff); + tmp >>= 20; + + if (p_sl) + *p_sl = (uint8_t)tmp; +} +/* +* PARAMETERS +* sl_flow_hop +* [in] the sl, flow label, and hop limit of MC Group +* +* RETURN VALUES +* p_sl +* [out] pointer to the service level +* +* p_flow_lbl +* [out] pointer to the flow label info +* +* p_hop +* [out] pointer to the hop count limit. +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_set_sl_flow_hop +* NAME +* ib_member_set_sl_flow_hop +* +* DESCRIPTION +* Set encoded sl, flow label, and hop limit +* +* SYNOPSIS +*/ +static inline ib_net32_t OSM_API +ib_member_set_sl_flow_hop( + IN const uint8_t sl, + IN const uint32_t flow_label, + IN const uint8_t hop_limit ) +{ + uint32_t tmp; + + tmp = (sl << 28) | ((flow_label & 0xfffff) << 8) | hop_limit; + return cl_hton32(tmp); +} +/* +* PARAMETERS +* sl +* [in] the service level. +* +* flow_lbl +* [in] the flow label info +* +* hop_limit +* [in] the hop limit. +* +* RETURN VALUES +* sl_flow_hop +* [out] the encoded sl, flow label, and hop limit +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_get_scope_state +* NAME +* ib_member_get_scope_state +* +* DESCRIPTION +* Get encoded MGID scope and JoinState +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_member_get_scope_state( + IN const uint8_t scope_state, + OUT uint8_t* const p_scope, + OUT uint8_t* const p_state ) +{ + uint8_t tmp_scope_state; + + if (p_state) + *p_state = (uint8_t)(scope_state & 0x0f); + + tmp_scope_state = scope_state >> 4; + + if (p_scope) + *p_scope = (uint8_t)(tmp_scope_state & 0x0f); + +} +/* +* PARAMETERS +* scope_state +* [in] the scope and state +* +* RETURN VALUES +* p_scope +* [out] pointer to the MGID scope +* +* p_state +* [out] pointer to the join state +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_set_scope_state +* NAME +* ib_member_set_scope_state +* +* DESCRIPTION +* Set encoded version, MGID scope and JoinState +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_member_set_scope_state( + IN const uint8_t scope, + IN const uint8_t state ) +{ + uint8_t scope_state; + + scope_state = scope; + scope_state = scope_state << 4; + scope_state = scope_state | state; + return (scope_state); +} +/* +* PARAMETERS +* scope +* [in] the MGID scope +* +* state +* [in] the JoinState +* +* RETURN VALUES +* scope_state +* [out] the encoded one +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/****f* IBA Base: Types/ib_member_set_join_state +* NAME +* ib_member_set_join_state +* +* DESCRIPTION +* Set JoinState +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_member_set_join_state( + IN OUT ib_member_rec_t *p_mc_rec, + IN const uint8_t state ) +{ + /* keep the scope as it is */ + p_mc_rec->scope_state = (p_mc_rec->scope_state & 0xF0) | (0x0f & state); +} +/* +* PARAMETERS +* p_mc_rec +* [in] pointer to the member record +* +* state +* [in] the JoinState +* +* RETURN VALUES +* NONE +* +* NOTES +* +* SEE ALSO +* ib_member_rec_t +*********/ + +/* + * Join State Codes: + */ +#define IB_MC_REC_STATE_FULL_MEMBER 0x01 +#define IB_MC_REC_STATE_NON_MEMBER 0x02 +#define IB_MC_REC_STATE_SEND_ONLY_MEMBER 0x04 + +/* + * Generic MAD notice types + */ +#define IB_NOTICE_TYPE_FATAL 0x00 +#define IB_NOTICE_TYPE_URGENT 0x01 +#define IB_NOTICE_TYPE_SECURITY 0x02 +#define IB_NOTICE_TYPE_SUBN_MGMT 0x03 +#define IB_NOTICE_TYPE_INFO 0x04 +#define IB_NOTICE_TYPE_EMPTY 0x7F + +#include +typedef struct _ib_mad_notice_attr // Total Size calc Accumulated +{ + uint8_t generic_type; // 1 1 + + union _notice_g_or_v + { + struct _notice_generic // 5 6 + { + uint8_t prod_type_msb; + ib_net16_t prod_type_lsb; + ib_net16_t trap_num; + } PACK_SUFFIX generic; + + struct _notice_vend + { + uint8_t vend_id_msb; + ib_net16_t vend_id_lsb; + ib_net16_t dev_id; + } PACK_SUFFIX vend; + } g_or_v; + + ib_net16_t issuer_lid; // 2 8 + ib_net16_t toggle_count; // 2 10 + + union _data_details // 54 64 + { + struct _raw_data + { + uint8_t details[54]; + } PACK_SUFFIX raw_data; + + struct _ntc_64_67 + { + uint8_t res[6]; + ib_gid_t gid; // the Node or Multicast Group that came in/out + } PACK_SUFFIX ntc_64_67; + + struct _ntc_128 { + ib_net16_t sw_lid; // the sw lid of which link state changed + } PACK_SUFFIX ntc_128; + + struct _ntc_129_131 { + ib_net16_t pad; + ib_net16_t lid; // lid and port number of the violation + uint8_t port_num; + } PACK_SUFFIX ntc_129_131; + + struct _ntc_144 { + ib_net16_t pad1; + ib_net16_t lid; // lid where capability mask changed + ib_net16_t pad2; + ib_net32_t new_cap_mask; // new capability mask + } PACK_SUFFIX ntc_144; + + struct _ntc_145 { + ib_net16_t pad1; + ib_net16_t lid; // lid where sys guid changed + ib_net16_t pad2; + ib_net64_t new_sys_guid; // new system image guid + } PACK_SUFFIX ntc_145; + + struct _ntc_256 { // total: 54 + ib_net16_t pad1; // 2 + ib_net16_t lid; // 2 + ib_net16_t pad2; // 2 + uint8_t method; // 1 + uint8_t pad3; // 1 + ib_net16_t attr_id; // 2 + ib_net32_t attr_mod; // 4 + ib_net64_t mkey; // 8 + uint8_t dr_slid; // 1 + uint8_t dr_trunc_hop; // 1 + uint8_t dr_rtn_path[30]; // 30 + } PACK_SUFFIX ntc_256; + + struct _ntc_257_258 // violation of p/q_key // 49 + { + ib_net16_t pad1; // 2 + ib_net16_t lid1; // 2 + ib_net16_t lid2; // 2 + ib_net32_t key; // 2 + uint8_t sl; // 1 + ib_net32_t qp1; // 4 + ib_net32_t qp2; // 4 + ib_gid_t gid1; // 16 + ib_gid_t gid2; // 16 + } PACK_SUFFIX ntc_257_258; + + struct _ntc_259 // p/q_key violation with sw info 53 + { + ib_net16_t data_valid; // 2 + ib_net16_t lid1; // 2 + ib_net16_t lid2; // 2 + ib_net32_t key; // 4 + uint8_t sl; // 1 + ib_net32_t qp1; // 4 + uint8_t qp2_msb; // 1 + ib_net16_t qp2_lsb; // 2 + ib_gid_t gid1; // 16 + ib_gid_t gid2; // 16 + ib_net16_t sw_lid; // 2 + uint8_t port_no; // 1 + } PACK_SUFFIX ntc_259; + + } data_details; + + ib_gid_t issuer_gid; // 16 80 + +} PACK_SUFFIX ib_mad_notice_attr_t; +#include + +/****f* IBA Base: Types/ib_notice_is_generic +* NAME +* ib_notice_is_generic +* +* DESCRIPTION +* Check if the notice is generic +* +* SYNOPSIS +*/ +static inline boolean_t OSM_API +ib_notice_is_generic( + IN const ib_mad_notice_attr_t *p_ntc ) +{ + return (p_ntc->generic_type & 0x80); +} +/* +* PARAMETERS +* p_ntc +* [in] Pointer to the notice MAD attribute +* +* RETURN VALUES +* TRUE if mad is generic +* +* SEE ALSO +* ib_mad_notice_attr_t +*********/ + +/****f* IBA Base: Types/ib_notice_get_type +* NAME +* ib_notice_get_type +* +* DESCRIPTION +* Get the notice type +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_notice_get_type( + IN const ib_mad_notice_attr_t *p_ntc ) +{ + return p_ntc->generic_type & 0x7f; +} +/* +* PARAMETERS +* p_ntc +* [in] Pointer to the notice MAD attribute +* +* RETURN VALUES +* TRUE if mad is generic +* +* SEE ALSO +* ib_mad_notice_attr_t +*********/ + +/****f* IBA Base: Types/ib_notice_get_prod_type +* NAME +* ib_notice_get_prod_type +* +* DESCRIPTION +* Get the notice Producer Type of Generic Notice +* +* SYNOPSIS +*/ +static inline ib_net32_t OSM_API +ib_notice_get_prod_type( + IN const ib_mad_notice_attr_t *p_ntc ) +{ + uint32_t pt; + + pt = cl_ntoh16(p_ntc->g_or_v.generic.prod_type_lsb) | + (p_ntc->g_or_v.generic.prod_type_msb << 16); + return cl_hton32(pt); +} +/* +* PARAMETERS +* p_ntc +* [in] Pointer to the notice MAD attribute +* +* RETURN VALUES +* The producer type +* +* SEE ALSO +* ib_mad_notice_attr_t +*********/ + +/****f* IBA Base: Types/ib_notice_set_prod_type +* NAME +* ib_notice_set_prod_type +* +* DESCRIPTION +* Set the notice Producer Type of Generic Notice +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_notice_set_prod_type( + IN ib_mad_notice_attr_t *p_ntc, + IN ib_net32_t prod_type_val ) +{ + uint32_t ptv = cl_ntoh32(prod_type_val); + p_ntc->g_or_v.generic.prod_type_lsb = cl_hton16((uint16_t)(ptv & 0x0000ffff)); + p_ntc->g_or_v.generic.prod_type_msb = (uint8_t)( (ptv & 0x00ff0000) >> 16); +} +/* +* PARAMETERS +* p_ntc +* [in] Pointer to the notice MAD attribute +* +* prod_type +* [in] The producer Type code +* +* RETURN VALUES +* None +* +* SEE ALSO +* ib_mad_notice_attr_t +*********/ + +/****f* IBA Base: Types/ib_notice_set_prod_type_ho +* NAME +* ib_notice_set_prod_type_ho +* +* DESCRIPTION +* Set the notice Producer Type of Generic Notice given Host Order +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_notice_set_prod_type_ho( + IN ib_mad_notice_attr_t *p_ntc, + IN uint32_t prod_type_val_ho ) +{ + p_ntc->g_or_v.generic.prod_type_lsb = + cl_hton16( (uint16_t)(prod_type_val_ho & 0x0000ffff) ); + p_ntc->g_or_v.generic.prod_type_msb = + (uint8_t)( (prod_type_val_ho & 0x00ff0000) >> 16); +} +/* +* PARAMETERS +* p_ntc +* [in] Pointer to the notice MAD attribute +* +* prod_type +* [in] The producer Type code in host order +* +* RETURN VALUES +* None +* +* SEE ALSO +* ib_mad_notice_attr_t +*********/ + +/****f* IBA Base: Types/ib_notice_get_vend_id +* NAME +* ib_notice_get_vend_id +* +* DESCRIPTION +* Get the Vendor Id of Vendor type Notice +* +* SYNOPSIS +*/ +static inline ib_net32_t OSM_API +ib_notice_get_vend_id( + IN const ib_mad_notice_attr_t *p_ntc ) +{ + uint32_t vi; + + vi = cl_ntoh16(p_ntc->g_or_v.vend.vend_id_lsb) | + (p_ntc->g_or_v.vend.vend_id_msb << 16); + return cl_hton32(vi); +} +/* +* PARAMETERS +* p_ntc +* [in] Pointer to the notice MAD attribute +* +* RETURN VALUES +* The Vendor Id of Vendor type Notice +* +* SEE ALSO +* ib_mad_notice_attr_t +*********/ + +/****f* IBA Base: Types/ib_notice_set_vend_id +* NAME +* ib_notice_set_vend_id +* +* DESCRIPTION +* Set the notice Producer Type of Generic Notice +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_notice_set_vend_id( + IN ib_mad_notice_attr_t *p_ntc, + IN ib_net32_t vend_id ) +{ + uint32_t vi = cl_ntoh32(vend_id); + p_ntc->g_or_v.vend.vend_id_lsb = cl_hton16((uint16_t)(vi & 0x0000ffff)); + p_ntc->g_or_v.vend.vend_id_msb = (uint8_t)((vi & 0x00ff0000) >> 16); +} +/* +* PARAMETERS +* p_ntc +* [in] Pointer to the notice MAD attribute +* +* vend_id +* [in] The producer Type code +* +* RETURN VALUES +* None +* +* SEE ALSO +* ib_mad_notice_attr_t +*********/ + +/****f* IBA Base: Types/ib_notice_set_vend_id_ho +* NAME +* ib_notice_set_vend_id_ho +* +* DESCRIPTION +* Set the notice Producer Type of Generic Notice given a host order value +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_notice_set_vend_id_ho( + IN ib_mad_notice_attr_t *p_ntc, + IN uint32_t vend_id_ho ) +{ + p_ntc->g_or_v.vend.vend_id_lsb = + cl_hton16((uint16_t)(vend_id_ho & 0x0000ffff)); + p_ntc->g_or_v.vend.vend_id_msb = + (uint8_t)((vend_id_ho & 0x00ff0000) >> 16); +} +/* +* PARAMETERS +* p_ntc +* [in] Pointer to the notice MAD attribute +* +* vend_id_ho +* [in] The producer Type code in host order +* +* RETURN VALUES +* None +* +* SEE ALSO +* ib_mad_notice_attr_t +*********/ + +#include +typedef struct _ib_inform_info +{ + ib_gid_t gid; + ib_net16_t lid_range_begin; + ib_net16_t lid_range_end; + ib_net16_t reserved1; + uint8_t is_generic; + uint8_t subscribe; + ib_net16_t trap_type; + union _inform_g_or_v + { + struct _inform_generic + { + ib_net16_t trap_num; + ib_net32_t qpn_resp_time_val; + uint8_t reserved2; + uint8_t node_type_msb; + ib_net16_t node_type_lsb; + } PACK_SUFFIX generic; + + struct _inform_vend + { + ib_net16_t dev_id; + ib_net32_t qpn_resp_time_val; + uint8_t reserved2; + uint8_t vendor_id_msb; + ib_net16_t vendor_id_lsb; + } PACK_SUFFIX vend; + + } PACK_SUFFIX g_or_v; + +} PACK_SUFFIX ib_inform_info_t; +#include + +/****f* IBA Base: Types/ib_inform_info_get_qpn_resp_time +* NAME +* ib_inform_info_get_qpn_resp_time +* +* DESCRIPTION +* Get QPN of the inform info +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_inform_info_get_qpn_resp_time( + IN const ib_net32_t qpn_resp_time_val, + OUT ib_net32_t* const p_qpn, + OUT uint8_t* const p_resp_time_val ) +{ + uint32_t tmp = cl_ntoh32(qpn_resp_time_val); + + if (p_qpn) + *p_qpn = cl_hton32((tmp & 0xffffff00) >> 8); + + if (p_resp_time_val) + *p_resp_time_val = (uint8_t)(tmp & 0x0000001f); +} +/* +* PARAMETERS +* qpn_resp_time_val +* [in] the qpn and resp time val from the mad +* +* RETURN VALUES +* p_qpn +* [out] pointer to the qpn +* +* p_state +* [out] pointer to the resp time val +* +* NOTES +* +* SEE ALSO +* ib_inform_info_t +*********/ + +/****f* IBA Base: Types/ib_inform_info_set_qpn +* NAME +* ib_inform_info_set_qpn +* +* DESCRIPTION +* Set the QPN of the inform info +* +* SYNOPSIS +*/ +static inline void OSM_API +ib_inform_info_set_qpn( + IN ib_inform_info_t *p_ii, + IN ib_net32_t const qpn) +{ + uint32_t tmp = cl_ntoh32(p_ii->g_or_v.generic.qpn_resp_time_val); + + p_ii->g_or_v.generic.qpn_resp_time_val = + cl_hton32( + (tmp & 0x000000ff) | + ((cl_ntoh32(qpn) << 8) & 0xffffff00) + ); +} +/* +* PARAMETERS +* +* NOTES +* +* SEE ALSO +* ib_inform_info_t +*********/ + +/****f* IBA Base: Types/ib_inform_info_get_node_type +* NAME +* ib_inform_info_get_node_type +* +* DESCRIPTION +* Get Node Type of the Inform Info +* +* SYNOPSIS +*/ +static inline ib_net32_t OSM_API +ib_inform_info_get_node_type( + IN const ib_inform_info_t *p_inf) +{ + uint32_t nt; + + nt = cl_ntoh16(p_inf->g_or_v.generic.node_type_lsb) | + (p_inf->g_or_v.generic.node_type_msb << 16); + return cl_hton32(nt); +} +/* +* PARAMETERS +* p_inf +* [in] pointer to an inform info +* +* RETURN VALUES +* The node type +* +* NOTES +* +* SEE ALSO +* ib_inform_info_t +*********/ + +/****f* IBA Base: Types/ib_inform_info_get_vend_id +* NAME +* ib_inform_info_get_vend_id +* +* DESCRIPTION +* Get Node Type of the Inform Info +* +* SYNOPSIS +*/ +static inline ib_net32_t OSM_API +ib_inform_info_get_vend_id( + IN const ib_inform_info_t *p_inf) +{ + uint32_t vi; + + vi = cl_ntoh16(p_inf->g_or_v.vend.vendor_id_lsb) | + (p_inf->g_or_v.vend.vendor_id_msb << 16); + return cl_hton32(vi); +} +/* +* PARAMETERS +* p_inf +* [in] pointer to an inform info +* +* RETURN VALUES +* The node type +* +* NOTES +* +* SEE ALSO +* ib_inform_info_t +*********/ + +/****s* IBA Base: Types/ib_inform_info_record_t +* NAME +* ib_inform_info_record_t +* +* DESCRIPTION +* IBA defined InformInfo Record. (15.2.5.12) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_inform_info_record +{ + ib_gid_t subscriber_gid; + ib_net16_t subscriber_enum; + uint8_t reserved[6]; + ib_inform_info_t inform_info; + uint8_t pad[4]; +} PACK_SUFFIX ib_inform_info_record_t; +#include + +/****d* IBA Base: Types/DM_SVC_NAME +* NAME +* DM_SVC_NAME +* +* DESCRIPTION +* IBA defined Device Management service name (16.3) +* +* SYNOPSIS +*/ +#define DM_SVC_NAME "DeviceManager.IBTA" +/* +* SEE ALSO +*********/ + +/****s* IBA Base: Types/ib_dm_mad_t +* NAME +* ib_dm_mad_t +* +* DESCRIPTION +* IBA defined Device Management MAD (16.3.1) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_dm_mad +{ + ib_mad_t header; + uint8_t resv[40]; + +#define IB_DM_DATA_SIZE 192 + uint8_t data[IB_DM_DATA_SIZE]; + +} PACK_SUFFIX ib_dm_mad_t; +#include +/* +* FIELDS +* header +* Common MAD header. +* +* resv +* Reserved. +* +* data +* Device Management payload. The structure and content of this field +* depend upon the method, attr_id, and attr_mod fields in the header. +* +* SEE ALSO +* ib_mad_t +*********/ + +/****s* IBA Base: Types/ib_iou_info_t +* NAME +* ib_iou_info_t +* +* DESCRIPTION +* IBA defined IO Unit information structure (16.3.3.3) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_iou_info +{ + ib_net16_t change_id; + uint8_t max_controllers; + uint8_t diag_rom; + +#define IB_DM_CTRL_LIST_SIZE 128 + + uint8_t controller_list[IB_DM_CTRL_LIST_SIZE]; +#define IOC_NOT_INSTALLED 0x0 +#define IOC_INSTALLED 0x1 +// Reserved values 0x02-0xE +#define SLOT_DOES_NOT_EXIST 0xF + +} PACK_SUFFIX ib_iou_info_t; +#include +/* +* FIELDS +* change_id +* Value incremented, with rollover, by any change to the controller_list. +* +* max_controllers +* Number of slots in controller_list. +* +* diag_rom +* A byte containing two fields: DiagDeviceID and OptionROM. +* These fields may be read using the ib_iou_info_diag_dev_id +* and ib_iou_info_option_rom functions. +* +* controller_list +* A series of 4-bit nibbles, with each nibble representing a slot +* in the IO Unit. Individual nibbles may be read using the +* ioc_at_slot function. +* +* SEE ALSO +* ib_dm_mad_t, ib_iou_info_diag_dev_id, ib_iou_info_option_rom, ioc_at_slot +*********/ + +/****f* IBA Base: Types/ib_iou_info_diag_dev_id +* NAME +* ib_iou_info_diag_dev_id +* +* DESCRIPTION +* Returns the DiagDeviceID. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_iou_info_diag_dev_id( + IN const ib_iou_info_t* const p_iou_info ) +{ + return( (uint8_t)(p_iou_info->diag_rom >> 6 & 1) ); +} +/* +* PARAMETERS +* p_iou_info +* [in] Pointer to the IO Unit information structure. +* +* RETURN VALUES +* DiagDeviceID field of the IO Unit information. +* +* NOTES +* +* SEE ALSO +* ib_iou_info_t +*********/ + +/****f* IBA Base: Types/ib_iou_info_option_rom +* NAME +* ib_iou_info_option_rom +* +* DESCRIPTION +* Returns the OptionROM. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ib_iou_info_option_rom( + IN const ib_iou_info_t* const p_iou_info ) +{ + return( (uint8_t)(p_iou_info->diag_rom >> 7) ); +} +/* +* PARAMETERS +* p_iou_info +* [in] Pointer to the IO Unit information structure. +* +* RETURN VALUES +* OptionROM field of the IO Unit information. +* +* NOTES +* +* SEE ALSO +* ib_iou_info_t +*********/ + +/****f* IBA Base: Types/ioc_at_slot +* NAME +* ioc_at_slot +* +* DESCRIPTION +* Returns the IOC value at the specified slot. +* +* SYNOPSIS +*/ +static inline uint8_t OSM_API +ioc_at_slot( + IN const ib_iou_info_t* const p_iou_info, + IN uint8_t slot ) +{ + if( slot >= IB_DM_CTRL_LIST_SIZE ) return SLOT_DOES_NOT_EXIST; + else return (int8_t) + ( (slot%2) ? + ((p_iou_info->controller_list[slot/2] & 0xf0) >> 4 ): + (p_iou_info->controller_list[slot/2] & 0x0f) ); +} +/* +* PARAMETERS +* p_iou_info +* [in] Pointer to the IO Unit information structure. +* +* slot +* [in] Pointer to the IO Unit information structure. +* +* RETURN VALUES +* OptionROM field of the IO Unit information. +* +* NOTES +* +* SEE ALSO +* ib_iou_info_t +*********/ + +/****s* IBA Base: Types/ib_ioc_profile_t +* NAME +* ib_ioc_profile_t +* +* DESCRIPTION +* IBA defined IO Controller profile structure (16.3.3.4) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_ioc_profile +{ + ib_net64_t ioc_guid; + + ib_net32_t vend_id; + + ib_net32_t dev_id; + ib_net16_t dev_ver; + ib_net16_t resv2; + + ib_net32_t subsys_vend_id; + ib_net32_t subsys_id; + + ib_net16_t io_class; + ib_net16_t io_subclass; + ib_net16_t protocol; + ib_net16_t protocol_ver; + + ib_net32_t resv3; + ib_net16_t send_msg_depth; + uint8_t resv4; + uint8_t rdma_read_depth; + ib_net32_t send_msg_size; + ib_net32_t rdma_size; + + uint8_t ctrl_ops_cap; +#define CTRL_OPS_CAP_ST 0x01 +#define CTRL_OPS_CAP_SF 0x02 +#define CTRL_OPS_CAP_RT 0x04 +#define CTRL_OPS_CAP_RF 0x08 +#define CTRL_OPS_CAP_WT 0x10 +#define CTRL_OPS_CAP_WF 0x20 +#define CTRL_OPS_CAP_AT 0x40 +#define CTRL_OPS_CAP_AF 0x80 + + uint8_t resv5; + + uint8_t num_svc_entries; +#define MAX_NUM_SVC_ENTRIES 0xff + + uint8_t resv6[9]; + +#define CTRL_ID_STRING_LEN 64 + char id_string[CTRL_ID_STRING_LEN]; + +} PACK_SUFFIX ib_ioc_profile_t; +#include +/* +* FIELDS +* ioc_guid +* An EUI-64 GUID used to uniquely identify the IO controller. +* +* vend_id +* IO controller vendor ID, IEEE format. +* +* dev_id +* A number assigned by the vendor to identify the type of controller. +* +* dev_ver +* A number assigned by the vendor to identify the divice version. +* +* subsys_vend_id +* ID of the vendor of the enclosure, if any, in which the IO controller +* resides in IEEE format; otherwise zero. +* +* subsys_id +* A number identifying the subsystem where the controller resides. +* +* io_class +* 0x0000 - 0xfffe = reserved for IO classes encompased by InfiniBand +* Architecture. 0xffff = Vendor specific. +* +* io_subclass +* 0x0000 - 0xfffe = reserved for IO subclasses encompased by InfiniBand +* Architecture. 0xffff = Vendor specific. This shall be set to 0xfff +* if the io_class component is 0xffff. +* +* protocol +* 0x0000 - 0xfffe = reserved for IO subclasses encompased by InfiniBand +* Architecture. 0xffff = Vendor specific. This shall be set to 0xfff +* if the io_class component is 0xffff. +* +* protocol_ver +* Protocol specific. +* +* send_msg_depth +* Maximum depth of the send message queue. +* +* rdma_read_depth +* Maximum depth of the per-channel RDMA read queue. +* +* send_msg_size +* Maximum size of send messages. +* +* ctrl_ops_cap +* Supported operation types of this IO controller. A bit set to one +* for affirmation of supported capability. +* +* num_svc_entries +* Number of entries in the service entries table. +* +* id_string +* UTF-8 encoded string for identifying the controller to an operator. +* +* SEE ALSO +* ib_dm_mad_t +*********/ + +static inline uint32_t OSM_API +ib_ioc_profile_get_vend_id( + IN const ib_ioc_profile_t* const p_ioc_profile ) +{ + return( cl_ntoh32(p_ioc_profile->vend_id) >> 8 ); +} + + +static inline void OSM_API +ib_ioc_profile_set_vend_id( + IN ib_ioc_profile_t* const p_ioc_profile, + IN const uint32_t vend_id ) +{ + p_ioc_profile->vend_id = (cl_hton32(vend_id) << 8); +} + +/****s* IBA Base: Types/ib_svc_entry_t +* NAME +* ib_svc_entry_t +* +* DESCRIPTION +* IBA defined IO Controller service entry structure (16.3.3.5) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_svc_entry +{ +#define MAX_SVC_ENTRY_NAME_LEN 40 + char name[MAX_SVC_ENTRY_NAME_LEN]; + + ib_net64_t id; + +} PACK_SUFFIX ib_svc_entry_t; +#include +/* +* FIELDS +* name +* UTF-8 encoded, null-terminated name of the service. +* +* id +* An identifier of the associated Service. +* +* SEE ALSO +* ib_svc_entries_t +*********/ + +/****s* IBA Base: Types/ib_svc_entries_t +* NAME +* ib_svc_entries_t +* +* DESCRIPTION +* IBA defined IO Controller service entry array (16.3.3.5) +* +* SYNOPSIS +*/ +#include +typedef struct _ib_svc_entries +{ +#define SVC_ENTRY_COUNT 4 + ib_svc_entry_t service_entry[SVC_ENTRY_COUNT]; + +} PACK_SUFFIX ib_svc_entries_t; +#include +/* +* FIELDS +* service_entry +* An array of IO controller service entries. +* +* SEE ALSO +* ib_dm_mad_t, ib_svc_entry_t +*********/ + +static inline void OSM_API +ib_dm_get_slot_lo_hi( + IN const ib_net32_t slot_lo_hi, + OUT uint8_t *const p_slot, + OUT uint8_t *const p_lo, + OUT uint8_t *const p_hi ) +{ + ib_net32_t tmp_slot_lo_hi = CL_NTOH32( slot_lo_hi ); + + if( p_slot ) + *p_slot = (uint8_t)( ( tmp_slot_lo_hi >> 16 ) & 0x0f ); + + if( p_hi ) + *p_hi = (uint8_t)( ( tmp_slot_lo_hi >> 8 ) & 0xff ); + + if( p_lo ) + *p_lo = (uint8_t)( ( tmp_slot_lo_hi >> 0 ) & 0xff ); +} + +/* + * IBA defined information describing an I/O controller + */ +#include +typedef struct _ib_ioc_info +{ + ib_net64_t module_guid; + ib_net64_t iou_guid; + ib_ioc_profile_t ioc_profile; + ib_net64_t access_key; + uint16_t initiators_conf; + uint8_t resv[38]; + +} PACK_SUFFIX ib_ioc_info_t; +#include + +/* + * Defines known Communication management class versions + */ +#define IB_MCLASS_CM_VER_2 2 +#define IB_MCLASS_CM_VER_1 1 + +/* + * Defines the size of user available data in communication management MADs + */ +#define IB_REQ_PDATA_SIZE_VER2 92 +#define IB_MRA_PDATA_SIZE_VER2 222 +#define IB_REJ_PDATA_SIZE_VER2 148 +#define IB_REP_PDATA_SIZE_VER2 196 +#define IB_RTU_PDATA_SIZE_VER2 224 +#define IB_LAP_PDATA_SIZE_VER2 168 +#define IB_APR_PDATA_SIZE_VER2 148 +#define IB_DREQ_PDATA_SIZE_VER2 220 +#define IB_DREP_PDATA_SIZE_VER2 224 +#define IB_SIDR_REQ_PDATA_SIZE_VER2 216 +#define IB_SIDR_REP_PDATA_SIZE_VER2 136 + +#define IB_REQ_PDATA_SIZE_VER1 92 +#define IB_MRA_PDATA_SIZE_VER1 222 +#define IB_REJ_PDATA_SIZE_VER1 148 +#define IB_REP_PDATA_SIZE_VER1 204 +#define IB_RTU_PDATA_SIZE_VER1 224 +#define IB_LAP_PDATA_SIZE_VER1 168 +#define IB_APR_PDATA_SIZE_VER1 151 +#define IB_DREQ_PDATA_SIZE_VER1 220 +#define IB_DREP_PDATA_SIZE_VER1 224 +#define IB_SIDR_REQ_PDATA_SIZE_VER1 216 +#define IB_SIDR_REP_PDATA_SIZE_VER1 140 + +#define IB_ARI_SIZE 72 // redefine +#define IB_APR_INFO_SIZE 72 + +/****d* Access Layer/ib_rej_status_t +* NAME +* ib_rej_status_t +* +* DESCRIPTION +* Rejection reasons. +* +* SYNOPSIS +*/ +typedef ib_net16_t ib_rej_status_t; +/* +* SEE ALSO +* ib_cm_rej, ib_cm_rej_rec_t +* +* SOURCE +*/ +#define IB_REJ_INSUF_QP CL_HTON16(1) +#define IB_REJ_INSUF_EEC CL_HTON16(2) +#define IB_REJ_INSUF_RESOURCES CL_HTON16(3) +#define IB_REJ_TIMEOUT CL_HTON16(4) +#define IB_REJ_UNSUPPORTED CL_HTON16(5) +#define IB_REJ_INVALID_COMM_ID CL_HTON16(6) +#define IB_REJ_INVALID_COMM_INSTANCE CL_HTON16(7) +#define IB_REJ_INVALID_SID CL_HTON16(8) +#define IB_REJ_INVALID_XPORT CL_HTON16(9) +#define IB_REJ_STALE_CONN CL_HTON16(10) +#define IB_REJ_RDC_NOT_EXIST CL_HTON16(11) +#define IB_REJ_INVALID_GID CL_HTON16(12) +#define IB_REJ_INVALID_LID CL_HTON16(13) +#define IB_REJ_INVALID_SL CL_HTON16(14) +#define IB_REJ_INVALID_TRAFFIC_CLASS CL_HTON16(15) +#define IB_REJ_INVALID_HOP_LIMIT CL_HTON16(16) +#define IB_REJ_INVALID_PKT_RATE CL_HTON16(17) +#define IB_REJ_INVALID_ALT_GID CL_HTON16(18) +#define IB_REJ_INVALID_ALT_LID CL_HTON16(19) +#define IB_REJ_INVALID_ALT_SL CL_HTON16(20) +#define IB_REJ_INVALID_ALT_TRAFFIC_CLASS CL_HTON16(21) +#define IB_REJ_INVALID_ALT_HOP_LIMIT CL_HTON16(22) +#define IB_REJ_INVALID_ALT_PKT_RATE CL_HTON16(23) +#define IB_REJ_PORT_REDIRECT CL_HTON16(24) +#define IB_REJ_INVALID_MTU CL_HTON16(26) +#define IB_REJ_INSUFFICIENT_RESP_RES CL_HTON16(27) +#define IB_REJ_USER_DEFINED CL_HTON16(28) +#define IB_REJ_INVALID_RNR_RETRY CL_HTON16(29) +#define IB_REJ_DUPLICATE_LOCAL_COMM_ID CL_HTON16(30) +#define IB_REJ_INVALID_CLASS_VER CL_HTON16(31) +#define IB_REJ_INVALID_FLOW_LBL CL_HTON16(32) +#define IB_REJ_INVALID_ALT_FLOW_LBL CL_HTON16(33) + +#define IB_REJ_SERVICE_HANDOFF CL_HTON16(65535) +/******/ + +/****d* Access Layer/ib_apr_status_t +* NAME +* ib_apr_status_t +* +* DESCRIPTION +* Automatic path migration status information. +* +* SYNOPSIS +*/ +typedef uint8_t ib_apr_status_t; +/* +* SEE ALSO +* ib_cm_apr, ib_cm_apr_rec_t +* +* SOURCE + */ +#define IB_AP_SUCCESS 0 +#define IB_AP_INVALID_COMM_ID 1 +#define IB_AP_UNSUPPORTED 2 +#define IB_AP_REJECT 3 +#define IB_AP_REDIRECT 4 +#define IB_AP_IS_CURRENT 5 +#define IB_AP_INVALID_QPN_EECN 6 +#define IB_AP_INVALID_LID 7 +#define IB_AP_INVALID_GID 8 +#define IB_AP_INVALID_FLOW_LBL 9 +#define IB_AP_INVALID_TCLASS 10 +#define IB_AP_INVALID_HOP_LIMIT 11 +#define IB_AP_INVALID_PKT_RATE 12 +#define IB_AP_INVALID_SL 13 +/******/ + +/****d* Access Layer/ib_cm_cap_mask_t +* NAME +* ib_cm_cap_mask_t +* +* DESCRIPTION +* Capability mask values in ClassPortInfo. +* +* SYNOPSIS +*/ +#define IB_CM_RELIABLE_CONN_CAPABLE CL_HTON16(9) +#define IB_CM_RELIABLE_DGRM_CAPABLE CL_HTON16(10) +#define IB_CM_RDGRM_CAPABLE CL_HTON16(11) +#define IB_CM_UNRELIABLE_CONN_CAPABLE CL_HTON16(12) +#define IB_CM_SIDR_CAPABLE CL_HTON16(13) +/* +* SEE ALSO +* ib_cm_rep, ib_class_port_info_t +* +* SOURCE +* +*******/ + +/* + * Service ID resolution status + */ +typedef uint16_t ib_sidr_status_t; +#define IB_SIDR_SUCCESS 0 +#define IB_SIDR_UNSUPPORTED 1 +#define IB_SIDR_REJECT 2 +#define IB_SIDR_NO_QP 3 +#define IB_SIDR_REDIRECT 4 +#define IB_SIDR_UNSUPPORTED_VER 5 + +/* + * The following definitions are shared between the Access Layer and VPD + */ + + +typedef struct _ib_ca* __ptr64 ib_ca_handle_t; +typedef struct _ib_pd* __ptr64 ib_pd_handle_t; +typedef struct _ib_rdd* __ptr64 ib_rdd_handle_t; +typedef struct _ib_mr* __ptr64 ib_mr_handle_t; +typedef struct _ib_mw* __ptr64 ib_mw_handle_t; +typedef struct _ib_qp* __ptr64 ib_qp_handle_t; +typedef struct _ib_eec* __ptr64 ib_eec_handle_t; +typedef struct _ib_cq* __ptr64 ib_cq_handle_t; +typedef struct _ib_av* __ptr64 ib_av_handle_t; +typedef struct _ib_mcast* __ptr64 ib_mcast_handle_t; + +/* Currently for windows branch, use the extended version of ib special verbs struct + in order to be compliant with Infinicon ib_types; later we'll change it to support + OpenSM ib_types.h */ + +#ifndef WIN32 +/****d* Access Layer/ib_api_status_t +* NAME +* ib_api_status_t +* +* DESCRIPTION +* Function return codes indicating the success or failure of an API call. +* Note that success is indicated by the return value IB_SUCCESS, which +* is always zero. +* +* NOTES +* IB_VERBS_PROCESSING_DONE is used by UVP library to terminate a verbs call +* in the pre-ioctl step itself. +* +* SYNOPSIS +*/ +typedef enum _ib_api_status_t +{ + IB_SUCCESS, + IB_INSUFFICIENT_RESOURCES, + IB_INSUFFICIENT_MEMORY, + IB_INVALID_PARAMETER, + IB_INVALID_SETTING, + IB_NOT_FOUND, + IB_TIMEOUT, + IB_CANCELED, + IB_INTERRUPTED, + IB_INVALID_PERMISSION, + IB_UNSUPPORTED, + IB_OVERFLOW, + IB_MAX_MCAST_QPS_REACHED, + IB_INVALID_QP_STATE, + IB_INVALID_EEC_STATE, + IB_INVALID_APM_STATE, + IB_INVALID_PORT_STATE, + IB_INVALID_STATE, + IB_RESOURCE_BUSY, + IB_INVALID_PKEY, + IB_INVALID_LKEY, + IB_INVALID_RKEY, + IB_INVALID_MAX_WRS, + IB_INVALID_MAX_SGE, + IB_INVALID_CQ_SIZE, + IB_INVALID_SERVICE_TYPE, + IB_INVALID_GID, + IB_INVALID_LID, + IB_INVALID_GUID, + IB_INVALID_CA_HANDLE, + IB_INVALID_AV_HANDLE, + IB_INVALID_CQ_HANDLE, + IB_INVALID_EEC_HANDLE, + IB_INVALID_QP_HANDLE, + IB_INVALID_PD_HANDLE, + IB_INVALID_MR_HANDLE, + IB_INVALID_MW_HANDLE, + IB_INVALID_RDD_HANDLE, + IB_INVALID_MCAST_HANDLE, + IB_INVALID_CALLBACK, + IB_INVALID_AL_HANDLE, /* InfiniBand Access Layer */ + IB_INVALID_HANDLE, /* InfiniBand Access Layer */ + IB_ERROR, /* InfiniBand Access Layer */ + IB_REMOTE_ERROR, /* Infiniband Access Layer */ + IB_VERBS_PROCESSING_DONE, /* See Notes above */ + IB_INVALID_WR_TYPE, + IB_QP_IN_TIMEWAIT, + IB_EE_IN_TIMEWAIT, + IB_INVALID_PORT, + IB_NOT_DONE, + IB_UNKNOWN_ERROR /* ALWAYS LAST ENUM VALUE! */ + +} ib_api_status_t; +/*****/ + +OSM_EXPORT const char* ib_error_str[]; + +/****f* IBA Base: Types/ib_get_err_str +* NAME +* ib_get_err_str +* +* DESCRIPTION +* Returns a string for the specified status value. +* +* SYNOPSIS +*/ +static inline const char* OSM_API +ib_get_err_str( + IN ib_api_status_t status ) +{ + if( status > IB_UNKNOWN_ERROR ) + status = IB_UNKNOWN_ERROR; + return( ib_error_str[status] ); +} +/* +* PARAMETERS +* status +* [in] status value +* +* RETURN VALUES +* Pointer to the status description string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****d* Verbs/ib_async_event_t +* NAME +* ib_async_event_t -- Async event types +* +* DESCRIPTION +* This type indicates the reason the async callback was called. +* The context in the ib_event_rec_t indicates the resource context +* that associated with the callback. For example, for IB_AE_CQ_ERROR +* the context provided during the ib_create_cq is returned in the event. +* +* SYNOPSIS +*/ +typedef enum _ib_async_event_t +{ + IB_AE_SQ_ERROR = 1, + IB_AE_SQ_DRAINED, + IB_AE_RQ_ERROR, + IB_AE_CQ_ERROR, + IB_AE_QP_FATAL, + IB_AE_QP_COMM, + IB_AE_QP_APM, + IB_AE_EEC_FATAL, + IB_AE_EEC_COMM, + IB_AE_EEC_APM, + IB_AE_LOCAL_FATAL, + IB_AE_PKEY_TRAP, + IB_AE_QKEY_TRAP, + IB_AE_MKEY_TRAP, + IB_AE_PORT_TRAP, + IB_AE_SYSIMG_GUID_TRAP, + IB_AE_BUF_OVERRUN, + IB_AE_LINK_INTEGRITY, + IB_AE_FLOW_CTRL_ERROR, + IB_AE_BKEY_TRAP, + IB_AE_QP_APM_ERROR, + IB_AE_EEC_APM_ERROR, + IB_AE_WQ_REQ_ERROR, + IB_AE_WQ_ACCESS_ERROR, + IB_AE_PORT_ACTIVE, + IB_AE_PORT_DOWN, + IB_AE_UNKNOWN /* ALWAYS LAST ENUM VALUE */ + +} ib_async_event_t; +/* +* VALUES +* IB_AE_SQ_ERROR +* An error occurred when accessing the send queue of the QP or EEC. +* This event is optional. +* +* IB_AE_SQ_DRAINED +* The send queue of the specified QP has completed the outstanding +* messages in progress when the state change was requested and, if +* applicable, has received all acknowledgements for those messages. +* +* IB_AE_RQ_ERROR +* An error occurred when accessing the receive queue of the QP or EEC. +* This event is optional. +* +* IB_AE_CQ_ERROR +* An error occurred when writing an entry to the CQ. +* +* IB_AE_QP_FATAL +* A catastrophic error occurred while accessing or processing the +* work queue that prevents reporting of completions. +* +* IB_AE_QP_COMM +* The first packet has arrived for the receive work queue where the +* QP is still in the RTR state. +* +* IB_AE_QP_APM +* If alternate path migration is supported, this event indicates that +* the QP connection has migrated to the alternate path. +* +* IB_AE_EEC_FATAL +* If reliable datagram service is supported, this event indicates that +* a catastrophic error occurred while accessing or processing the EEC +* that prevents reporting of completions. +* +* IB_AE_EEC_COMM +* If reliable datagram service is supported, this event indicates that +* the first packet has arrived for the receive work queue where the +* EEC is still in the RTR state. +* +* IB_AE_EEC_APM +* If reliable datagram service and alternate path migration is supported, +* this event indicates that the EEC connection has migrated to the +* alternate path. +* +* IB_AE_LOCAL_FATAL +* A catastrophic HCA error occurred which cannot be attributed to any +* resource; behavior is indeterminate. +* +* IB_AE_PKEY_TRAP +* A PKEY violation was detected. This event is optional. +* +* IB_AE_QKEY_TRAP +* A QKEY violation was detected. This event is optional. +* +* IB_AE_MKEY_TRAP +* An MKEY violation was detected. This event is optional. +* +* IB_AE_PORT_TRAP +* A port capability change was detected. This event is optional. +* +* IB_AE_SYSIMG_GUID_TRAP +* If the system image GUID is supported, this event indicates that the +* system image GUID of this HCA has been changed. This event is +* optional. +* +* IB_AE_BUF_OVERRUN +* The number of consecutive flow control update periods with at least +* one overrun error in each period has exceeded the threshold specified +* in the port info attributes. This event is optional. +* +* IB_AE_LINK_INTEGRITY +* The detection of excessively frequent local physical errors has +* exceeded the threshold specified in the port info attributes. This +* event is optional. +* +* IB_AE_FLOW_CTRL_ERROR +* An HCA watchdog timer monitoring the arrival of flow control updates +* has expired without receiving an update. This event is optional. +* +* IB_AE_BKEY_TRAP +* An BKEY violation was detected. This event is optional. +* +* IB_AE_QP_APM_ERROR +* If alternate path migration is supported, this event indicates that +* an incoming path migration request to this QP was not accepted. +* +* IB_AE_EEC_APM_ERROR +* If reliable datagram service and alternate path migration is supported, +* this event indicates that an incoming path migration request to this +* EEC was not accepted. +* +* IB_AE_WQ_REQ_ERROR +* An OpCode violation was detected at the responder. +* +* IB_AE_WQ_ACCESS_ERROR +* An access violation was detected at the responder. +* +* IB_AE_PORT_ACTIVE +* If the port active event is supported, this event is generated +* when the link becomes active: IB_LINK_ACTIVE. +* +* IB_AE_PORT_DOWN +* The link is declared unavailable: IB_LINK_INIT, IB_LINK_ARMED, +* IB_LINK_DOWN. +* +* IB_AE_UNKNOWN +* An unknown error occurred which cannot be attributed to any +* resource; behavior is indeterminate. +* +*****/ + +OSM_EXPORT const char* ib_async_event_str[]; + +/****f* IBA Base: Types/ib_get_async_event_str +* NAME +* ib_get_async_event_str +* +* DESCRIPTION +* Returns a string for the specified asynchronous event. +* +* SYNOPSIS +*/ +static inline const char* OSM_API +ib_get_async_event_str( + IN ib_async_event_t event ) +{ + if( event > IB_AE_UNKNOWN ) + event = IB_AE_UNKNOWN; + return( ib_async_event_str[event] ); +} +/* +* PARAMETERS +* event +* [in] event value +* +* RETURN VALUES +* Pointer to the asynchronous event description string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* Verbs/ib_event_rec_t +* NAME +* ib_event_rec_t -- Async event notification record +* +* DESCRIPTION +* When an async event callback is made, this structure is passed to indicate +* the type of event, the source of event that caused it, and the context +* associated with this event. +* +* context -- Context of the resource that caused the event. +* -- ca_context if this is a port/adapter event. +* -- qp_context if the source is a QP event +* -- cq_context if the source is a CQ event. +* -- ee_context if the source is an EE event. +* +* SYNOPSIS +*/ +typedef struct _ib_event_rec +{ + void *context; + ib_async_event_t type; + + /* HCA vendor specific event information. */ + uint64_t vendor_specific; + + /* The following structures are valid only for trap types. */ + union _trap + { + struct + { + uint16_t lid; + ib_net64_t port_guid; + uint8_t port_num; + + /* + * The following structure is valid only for + * P_KEY, Q_KEY, and M_KEY violation traps. + */ + struct + { + uint8_t sl; + uint16_t src_lid; + uint16_t dest_lid; + union _key + { + uint16_t pkey; + uint32_t qkey; + uint64_t mkey; + } key; + uint32_t src_qp; + uint32_t dest_qp; + ib_gid_t src_gid; + ib_gid_t dest_gid; + + } violation; + + } info; + + ib_net64_t sysimg_guid; + + } trap; + +} ib_event_rec_t; +/*******/ + +/****d* Access Layer/ib_atomic_t +* NAME +* ib_atomic_t +* +* DESCRIPTION +* Indicates atomicity levels supported by an adapter. +* +* SYNOPSIS +*/ +typedef enum _ib_atomic_t +{ + IB_ATOMIC_NONE, + IB_ATOMIC_LOCAL, + IB_ATOMIC_GLOBAL + +} ib_atomic_t; +/* +* VALUES +* IB_ATOMIC_NONE +* Atomic operations not supported. +* +* IB_ATOMIC_LOCAL +* Atomic operations guaranteed between QPs of a single CA. +* +* IB_ATOMIC_GLOBAL +* Atomic operations are guaranteed between CA and any other entity +* in the system. +*****/ + +/****s* Access Layer/ib_port_cap_t +* NAME +* ib_port_cap_t +* +* DESCRIPTION +* Indicates which management agents are currently available on the specified +* port. +* +* SYNOPSIS +*/ +typedef struct _ib_port_cap +{ + boolean_t cm; + boolean_t snmp; + boolean_t dev_mgmt; + boolean_t vend; + boolean_t sm; + boolean_t sm_disable; + boolean_t qkey_ctr; + boolean_t pkey_ctr; + boolean_t notice; + boolean_t trap; + boolean_t apm; + boolean_t slmap; + boolean_t pkey_nvram; + boolean_t mkey_nvram; + boolean_t sysguid; + boolean_t dr_notice; + boolean_t boot_mgmt; + boolean_t capm_notice; + boolean_t reinit; + boolean_t ledinfo; + boolean_t port_active; + +} ib_port_cap_t; +/*****/ + +/****d* Access Layer/ib_init_type_t +* NAME +* ib_init_type_t +* +* DESCRIPTION +* If supported by the HCA, the type of initialization requested by +* this port before SM moves it to the active or armed state. If the +* SM implements reinitialization, it shall set these bits to indicate +* the type of initialization performed prior to activating the port. +* Otherwise, these bits shall be set to 0. +* +* SYNOPSIS +*/ +typedef uint8_t ib_init_type_t; +#define IB_INIT_TYPE_NO_LOAD 0x01 +#define IB_INIT_TYPE_PRESERVE_CONTENT 0x02 +#define IB_INIT_TYPE_PRESERVE_PRESENCE 0x04 +#define IB_INIT_TYPE_DO_NOT_RESUSCITATE 0x08 +/*****/ + +/****s* Access Layer/ib_port_attr_mod_t +* NAME +* ib_port_attr_mod_t +* +* DESCRIPTION +* Port attributes that may be modified. +* +* SYNOPSIS +*/ +typedef struct _ib_port_attr_mod +{ + ib_port_cap_t cap; + uint16_t pkey_ctr; + uint16_t qkey_ctr; + + ib_init_type_t init_type; + ib_net64_t system_image_guid; + +} ib_port_attr_mod_t; +/* +* SEE ALSO +* ib_port_cap_t +*****/ + +/****s* Access Layer/ib_port_attr_t +* NAME +* ib_port_attr_t +* +* DESCRIPTION +* Information about a port on a given channel adapter. +* +* SYNOPSIS +*/ +typedef struct _ib_port_attr +{ + ib_net64_t port_guid; + uint8_t port_num; + uint8_t mtu; + uint64_t max_msg_size; + ib_net16_t lid; + uint8_t lmc; + + /* + * LinkWidthSupported as defined in PortInfo. Required to calculate + * inter-packet delay (a.k.a. static rate). + */ + uint8_t link_width_supported; + + uint16_t max_vls; + + ib_net16_t sm_lid; + uint8_t sm_sl; + uint8_t link_state; + + ib_init_type_t init_type_reply; /* Optional */ + + /* + * subnet_timeout: + * The maximum expected subnet propagation delay to reach any port on + * the subnet. This value also determines the rate at which traps can + * be generated from this node. + * + * timeout = 4.096 microseconds * 2^subnet_timeout + */ + uint8_t subnet_timeout; + + ib_port_cap_t cap; + uint16_t pkey_ctr; + uint16_t qkey_ctr; + + uint16_t num_gids; + uint16_t num_pkeys; + /* + * Pointers at the end of the structure to allow doing a simple + * memory comparison of contents up to the first pointer. + */ + ib_gid_t *p_gid_table; + ib_net16_t *p_pkey_table; + +} ib_port_attr_t; +/* +* SEE ALSO +* uint8_t, ib_port_cap_t, ib_link_states_t +*****/ + +/****s* Access Layer/ib_ca_attr_t +* NAME +* ib_ca_attr_t +* +* DESCRIPTION +* Information about a channel adapter. +* +* SYNOPSIS +*/ +typedef struct _ib_ca_attr +{ + ib_net64_t ca_guid; + + uint32_t vend_id; + uint16_t dev_id; + uint16_t revision; + uint64_t fw_ver; + + /* + * Total size of the ca attributes in bytes + */ + uint32_t size; + uint32_t max_qps; + uint32_t max_wrs; + + uint32_t max_sges; + uint32_t max_rd_sges; + + uint32_t max_cqs; + uint32_t max_cqes; + + uint32_t max_pds; + + uint32_t init_regions; + uint64_t init_region_size; + + uint32_t init_windows; + uint32_t max_addr_handles; + + uint32_t max_partitions; + + ib_atomic_t atomicity; + + uint8_t max_qp_resp_res; + uint8_t max_eec_resp_res; + uint8_t max_resp_res; + + uint8_t max_qp_init_depth; + uint8_t max_eec_init_depth; + + uint32_t max_eecs; + uint32_t max_rdds; + + uint32_t max_ipv6_qps; + uint32_t max_ether_qps; + + uint32_t max_mcast_grps; + uint32_t max_mcast_qps; + uint32_t max_qps_per_mcast_grp; + uint32_t max_fmr; + uint32_t max_map_per_fmr; + + /* + * local_ack_delay: + * Specifies the maximum time interval between the local CA receiving + * a message and the transmission of the associated ACK or NAK. + * + * timeout = 4.096 microseconds * 2^local_ack_delay + */ + uint8_t local_ack_delay; + + boolean_t bad_pkey_ctr_support; + boolean_t bad_qkey_ctr_support; + boolean_t raw_mcast_support; + boolean_t apm_support; + boolean_t av_port_check; + boolean_t change_primary_port; + boolean_t modify_wr_depth; + boolean_t current_qp_state_support; + boolean_t shutdown_port_capability; + boolean_t init_type_support; + boolean_t port_active_event_support; + boolean_t system_image_guid_support; + boolean_t hw_agents; + + ib_net64_t system_image_guid; + + uint32_t num_page_sizes; + uint8_t num_ports; + + uint32_t *p_page_size; + ib_port_attr_t *p_port_attr; + +} ib_ca_attr_t; +/* +* FIELDS +* ca_guid +* GUID for this adapter. +* +* vend_id +* IEEE vendor ID for this adapter +* +* dev_id +* Device ID of this adapter. (typically from PCI device ID) +* +* revision +* Revision ID of this adapter +* +* fw_ver +* Device Firmware version. +* +* size +* Total size in bytes for the HCA attributes. This size includes total +* size required for all the variable members of the structure. If a +* vendor requires to pass vendor specific fields beyond this structure, +* the HCA vendor can choose to report a larger size. If a vendor is +* reporting extended vendor specific features, they should also provide +* appropriate access functions to aid with the required interpretation. +* +* max_qps +* Maximum number of QP's supported by this HCA. +* +* max_wrs +* Maximum number of work requests supported by this HCA. +* +* max_sges +* Maximum number of scatter gather elements supported per work request. +* +* max_rd_sges +* Maximum number of scatter gather elements supported for READ work +* requests for a Reliable Datagram QP. This value must be zero if RD +* service is not supported. +* +* max_cqs +* Maximum number of Completion Queues supported. +* +* max_cqes +* Maximum number of CQ elements supported per CQ. +* +* max_pds +* Maximum number of protection domains supported. +* +* init_regions +* Initial number of memory regions supported. These are only informative +* values. HCA vendors can extended and grow these limits on demand. +* +* init_region_size +* Initial limit on the size of the registered memory region. +* +* init_windows +* Initial number of window entries supported. +* +* max_addr_handles +* Maximum number of address handles supported. +* +* max_partitions +* Maximum number of partitions supported. +* +* atomicity +* Indicates level of atomic operations supported by this HCA. +* +* max_qp_resp_res +* max_eec_resp_res +* Maximum limit on number of responder resources for incoming RDMA +* operations, on QPs and EEC's respectively. +* +* max_resp_res +* Maximum number of responder resources per HCA, with this HCA used as +* the target. +* +* max_qp_init_depth +* max_eec_init_depth +* Maximimum initiator depth per QP or EEC for initiating RDMA reads and +* atomic operations. +* +* max_eecs +* Maximimum number of EEC's supported by the HCA. +* +* max_rdds +* Maximum number of Reliable datagram domains supported. +* +* max_ipv6_qps +* max_ether_qps +* Maximum number of IPV6 and raw ether QP's supported by this HCA. +* +* max_mcast_grps +* Maximum number of multicast groups supported. +* +* max_mcast_qps +* Maximum number of QP's that can support multicast operations. +* +* max_qps_per_mcast_grp +* Maximum number of multicast QP's per multicast group. +* +* local_ack_delay +* Specifies the maximum time interval between the local CA receiving +* a message and the transmission of the associated ACK or NAK. +* timeout = 4.096 microseconds * 2^local_ack_delay +* +* bad_pkey_ctr_support +* bad_qkey_ctr_support +* Indicates support for the bad pkey and qkey counters. +* +* raw_mcast_support +* Indicates support for raw packet multicast. +* +* apm_support +* Indicates support for Automatic Path Migration. +* +* av_port_check +* Indicates ability to check port number in address handles. +* +* change_primary_port +* Indicates ability to change primary port for a QP or EEC during a +* SQD->RTS transition. +* +* modify_wr_depth +* Indicates ability to modify QP depth during a modify QP operation. +* Check the verb specification for permitted states. +* +* current_qp_state_support +* Indicates ability of the HCA to support the current QP state modifier +* during a modify QP operation. +* +* shutdown_port_capability +* Shutdown port capability support indicator. +* +* init_type_support +* Indicates init_type_reply and ability to set init_type is supported. +* +* port_active_event_support +* Port active event support indicator. +* +* system_image_guid_support +* System image GUID support indicator. +* +* hw_agents +* Indicates SMA is implemented in HW. +* +* system_image_guid +* Optional system image GUID. This field is valid only if the +* system_image_guid_support flag is set. +* +* num_page_sizes +* Indicates support for different page sizes supported by the HCA. +* The variable size array can be obtained from p_page_size. +* +* num_ports +* Number of physical ports supported on this HCA. +* +* p_page_size +* Array holding different page size supported. +* +* p_port_attr +* Array holding port attributes. +* +* NOTES +* This structure contains the attributes of a channel adapter. Users must +* call ib_copy_ca_attr to copy the contents of this structure to a new +* memory region. +* +* SEE ALSO +* ib_port_attr_t, ib_atomic_t, ib_copy_ca_attr +*****/ + +/****f* Access layer/ib_copy_ca_attr +* NAME +* ib_copy_ca_attr +* +* DESCRIPTION +* Copies CA attributes. +* +* SYNOPSIS +*/ +ib_ca_attr_t* +ib_copy_ca_attr( + IN ib_ca_attr_t* const p_dest, + IN const ib_ca_attr_t* const p_src ); +/* +* PARAMETERS +* p_dest +* Pointer to the buffer that is the destination of the copy. +* +* p_src +* Pointer to the CA attributes to copy. +* +* RETURN VALUE +* Pointer to the copied CA attributes. +* +* NOTES +* The buffer pointed to by the p_dest parameter must be at least the size +* specified in the size field of the buffer pointed to by p_src. +* +* SEE ALSO +* ib_ca_attr_t, ib_dup_ca_attr, ib_free_ca_attr +*****/ + +/****s* Access Layer/ib_av_attr_t +* NAME +* ib_av_attr_t +* +* DESCRIPTION +* IBA address vector. +* +* SYNOPSIS +*/ +typedef struct _ib_av_attr +{ + uint8_t port_num; + + uint8_t sl; + ib_net16_t dlid; + + boolean_t grh_valid; + ib_grh_t grh; + uint8_t static_rate; + uint8_t path_bits; + + struct _av_conn + { + uint8_t path_mtu; + uint8_t local_ack_timeout; + uint8_t seq_err_retry_cnt; + uint8_t rnr_retry_cnt; + + } conn; + +} ib_av_attr_t; +/* +* SEE ALSO +* ib_gid_t +*****/ + +/****d* Access Layer/ib_qp_type_t +* NAME +* ib_qp_type_t +* +* DESCRIPTION +* Indicates the type of queue pair being created. +* +* SYNOPSIS +*/ +typedef enum _ib_qp_type +{ + IB_QPT_RELIABLE_CONN = 0, /* Matches CM REQ transport type */ + IB_QPT_UNRELIABLE_CONN = 1, /* Matches CM REQ transport type */ + IB_QPT_RELIABLE_DGRM = 2, /* Matches CM REQ transport type */ + IB_QPT_UNRELIABLE_DGRM, + IB_QPT_QP0, + IB_QPT_QP1, + IB_QPT_RAW_IPV6, + IB_QPT_RAW_ETHER, + IB_QPT_MAD, /* InfiniBand Access Layer */ + IB_QPT_QP0_ALIAS, /* InfiniBand Access Layer */ + IB_QPT_QP1_ALIAS /* InfiniBand Access Layer */ + +} ib_qp_type_t; +/* +* VALUES +* IB_QPT_RELIABLE_CONN +* Reliable, connected queue pair. +* +* IB_QPT_UNRELIABLE_CONN +* Unreliable, connected queue pair. +* +* IB_QPT_RELIABLE_DGRM +* Reliable, datagram queue pair. +* +* IB_QPT_UNRELIABLE_DGRM +* Unreliable, datagram queue pair. +* +* IB_QPT_QP0 +* Queue pair 0. +* +* IB_QPT_QP1 +* Queue pair 1. +* +* IB_QPT_RAW_DGRM +* Raw datagram queue pair. +* +* IB_QPT_RAW_IPV6 +* Raw IP version 6 queue pair. +* +* IB_QPT_RAW_ETHER +* Raw Ethernet queue pair. +* +* IB_QPT_MAD +* Unreliable, datagram queue pair that will send and receive management +* datagrams with assistance from the access layer. +* +* IB_QPT_QP0_ALIAS +* Alias to queue pair 0. Aliased QPs can only be created on an aliased +* protection domain. +* +* IB_QPT_QP1_ALIAS +* Alias to queue pair 1. Aliased QPs can only be created on an aliased +* protection domain. +*****/ + +/****d* Access Layer/ib_access_t +* NAME +* ib_access_t +* +* DESCRIPTION +* Indicates the type of access is permitted on resources such as QPs, +* memory regions and memory windows. +* +* SYNOPSIS +*/ +typedef uint32_t ib_access_t; +#define IB_AC_RDMA_READ 0x00000001 +#define IB_AC_RDMA_WRITE 0x00000002 +#define IB_AC_ATOMIC 0x00000004 +#define IB_AC_LOCAL_WRITE 0x00000008 +#define IB_AC_MW_BIND 0x00000010 +/* +* NOTES +* Users may combine access rights using a bit-wise or operation to specify +* additional access. For example: IB_AC_RDMA_READ | IB_AC_RDMA_WRITE grants +* RDMA read and write access. +*****/ + +/****d* Access Layer/ib_qp_state_t +* NAME +* ib_qp_state_t +* +* DESCRIPTION +* Indicates or sets the state of a queue pair. The current state of a queue +* pair is returned through the ib_qp_query call and set via the +* ib_qp_modify call. +* +* SYNOPSIS +*/ +typedef uint32_t ib_qp_state_t; +#define IB_QPS_RESET 0x00000001 +#define IB_QPS_INIT 0x00000002 +#define IB_QPS_RTR 0x00000004 +#define IB_QPS_RTS 0x00000008 +#define IB_QPS_SQD 0x00000010 +#define IB_QPS_SQD_DRAINING 0x00000030 +#define IB_QPS_SQD_DRAINED 0x00000050 +#define IB_QPS_SQERR 0x00000080 +#define IB_QPS_ERROR 0x00000100 +#define IB_QPS_TIME_WAIT 0xDEAD0000 /* InfiniBand Access Layer */ +/*****/ + +/****d* Access Layer/ib_apm_state_t +* NAME +* ib_apm_state_t +* +* DESCRIPTION +* The current automatic path migration state of a queue pair +* +* SYNOPSIS +*/ +typedef enum _ib_apm_state +{ + IB_APM_MIGRATED = 1, + IB_APM_REARM, + IB_APM_ARMED + +} ib_apm_state_t; +/*****/ + +/****s* Access Layer/ib_qp_create_t +* NAME +* ib_qp_create_t +* +* DESCRIPTION +* Attributes used to initialize a queue pair at creation time. +* +* SYNOPSIS +*/ +typedef struct _ib_qp_create +{ + ib_qp_type_t qp_type; + + ib_rdd_handle_t h_rdd; + + uint32_t sq_depth; + uint32_t rq_depth; + uint32_t sq_sge; + uint32_t rq_sge; + + ib_cq_handle_t h_sq_cq; + ib_cq_handle_t h_rq_cq; + + boolean_t sq_signaled; + +} ib_qp_create_t; +/* +* FIELDS +* type +* Specifies the type of queue pair to create. +* +* h_rdd +* A handle to a reliable datagram domain to associate with the queue +* pair. This field is ignored if the queue pair is not a reliable +* datagram type queue pair. +* +* sq_depth +* Indicates the requested maximum number of work requests that may be +* outstanding on the queue pair's send queue. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* rq_depth +* Indicates the requested maximum number of work requests that may be +* outstanding on the queue pair's receive queue. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* sq_sge +* Indicates the maximum number scatter-gather elements that may be +* given in a send work request. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* rq_sge +* Indicates the maximum number scatter-gather elements that may be +* given in a receive work request. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* h_sq_cq +* A handle to the completion queue that will be used to report send work +* request completions. This handle must be NULL if the type is +* IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS. +* +* h_rq_cq +* A handle to the completion queue that will be used to report receive +* work request completions. This handle must be NULL if the type is +* IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS. +* +* sq_signaled +* A flag that is used to indicate whether the queue pair will signal +* an event upon completion of a send work request. If set to +* TRUE, send work requests will always generate a completion +* event. If set to FALSE, a completion event will only be +* generated if the send_opt field of the send work request has the +* IB_SEND_OPT_SIGNALED flag set. +* +* SEE ALSO +* ib_qp_type_t, ib_qp_attr_t +*****/ + +/****s* Access Layer/ib_qp_attr_t +* NAME +* ib_qp_attr_t +* +* DESCRIPTION +* Queue pair attributes returned through ib_query_qp. +* +* SYNOPSIS +*/ +typedef struct _ib_qp_attr +{ + ib_pd_handle_t h_pd; + ib_qp_type_t qp_type; + ib_access_t access_ctrl; + uint16_t pkey_index; + + uint32_t sq_depth; + uint32_t rq_depth; + uint32_t sq_sge; + uint32_t rq_sge; + uint8_t init_depth; + uint8_t resp_res; + + ib_cq_handle_t h_sq_cq; + ib_cq_handle_t h_rq_cq; + ib_rdd_handle_t h_rdd; + + boolean_t sq_signaled; + + ib_qp_state_t state; + ib_net32_t num; + ib_net32_t dest_num; + ib_net32_t qkey; + + ib_net32_t sq_psn; + ib_net32_t rq_psn; + + uint8_t primary_port; + uint8_t alternate_port; + ib_av_attr_t primary_av; + ib_av_attr_t alternate_av; + ib_apm_state_t apm_state; + +} ib_qp_attr_t; +/* +* FIELDS +* h_pd +* This is a handle to a protection domain associated with the queue +* pair, or NULL if the queue pair is type IB_QPT_RELIABLE_DGRM. +* +* NOTES +* Other fields are defined by the Infiniband specification. +* +* SEE ALSO +* ib_qp_type_t, ib_access_t, ib_qp_state_t, ib_av_attr_t, ib_apm_state_t +*****/ + +/****d* Access Layer/ib_qp_opts_t +* NAME +* ib_qp_opts_t +* +* DESCRIPTION +* Optional fields supplied in the modify QP operation. +* +* SYNOPSIS +*/ +typedef uint32_t ib_qp_opts_t; +#define IB_MOD_QP_ALTERNATE_AV 0x00000001 +#define IB_MOD_QP_PKEY 0x00000002 +#define IB_MOD_QP_APM_STATE 0x00000004 +#define IB_MOD_QP_PRIMARY_AV 0x00000008 +#define IB_MOD_QP_RNR_NAK_TIMEOUT 0x00000010 +#define IB_MOD_QP_RESP_RES 0x00000020 +#define IB_MOD_QP_INIT_DEPTH 0x00000040 +#define IB_MOD_QP_PRIMARY_PORT 0x00000080 +#define IB_MOD_QP_ACCESS_CTRL 0x00000100 +#define IB_MOD_QP_QKEY 0x00000200 +#define IB_MOD_QP_SQ_DEPTH 0x00000400 +#define IB_MOD_QP_RQ_DEPTH 0x00000800 +#define IB_MOD_QP_CURRENT_STATE 0x00001000 +#define IB_MOD_QP_RETRY_CNT 0x00002000 +#define IB_MOD_QP_LOCAL_ACK_TIMEOUT 0x00004000 +#define IB_MOD_QP_RNR_RETRY_CNT 0x00008000 +/* +* SEE ALSO +* ib_qp_mod_t +*****/ + +/****s* Access Layer/ib_qp_mod_t +* NAME +* ib_qp_mod_t +* +* DESCRIPTION +* Information needed to change the state of a queue pair through the +* ib_modify_qp call. +* +* SYNOPSIS +*/ +typedef struct _ib_qp_mod +{ + ib_qp_state_t req_state; + + union _qp_state + { + struct _qp_reset + { + /* + * Time, in milliseconds, that the QP needs to spend in + * the time wait state before being reused. + */ + uint32_t timewait; + + } reset; + + struct _qp_init + { + ib_qp_opts_t opts; + uint8_t primary_port; + ib_net32_t qkey; + uint16_t pkey_index; + ib_access_t access_ctrl; + + } init; + + struct _qp_rtr + { + ib_net32_t rq_psn; + ib_net32_t dest_qp; + ib_av_attr_t primary_av; + uint8_t resp_res; + + ib_qp_opts_t opts; + ib_av_attr_t alternate_av; + ib_net32_t qkey; + uint16_t pkey_index; + ib_access_t access_ctrl; + uint32_t sq_depth; + uint32_t rq_depth; + uint8_t rnr_nak_timeout; + + } rtr; + + struct _qp_rts + { + ib_net32_t sq_psn; + uint8_t retry_cnt; + uint8_t rnr_retry_cnt; + uint8_t rnr_nak_timeout; + uint8_t local_ack_timeout; + uint8_t init_depth; + + ib_qp_opts_t opts; + ib_qp_state_t current_state; + ib_net32_t qkey; + ib_access_t access_ctrl; + uint8_t resp_res; + + ib_av_attr_t primary_av; + ib_av_attr_t alternate_av; + + uint32_t sq_depth; + uint32_t rq_depth; + + ib_apm_state_t apm_state; + uint8_t primary_port; + uint16_t pkey_index; + + } rts; + + struct _qp_sqd + { + boolean_t sqd_event; + + } sqd; + + } state; + +} ib_qp_mod_t; +/* +* SEE ALSO +* ib_qp_state_t, ib_access_t, ib_av_attr_t, ib_apm_state_t +*****/ + +/****s* Access Layer/ib_eec_attr_t +* NAME +* ib_eec_attr_t +* +* DESCRIPTION +* Information about an end-to-end context. +* +* SYNOPSIS +*/ +typedef struct _ib_eec_attr +{ + ib_qp_state_t state; + ib_rdd_handle_t h_rdd; + ib_net32_t local_eecn; + + ib_net32_t sq_psn; + ib_net32_t rq_psn; + uint8_t primary_port; + uint16_t pkey_index; + uint32_t resp_res; + ib_net32_t remote_eecn; + uint32_t init_depth; + uint32_t dest_num; // ??? What is this? + ib_av_attr_t primary_av; + ib_av_attr_t alternate_av; + ib_apm_state_t apm_state; + +} ib_eec_attr_t; +/* +* SEE ALSO +* ib_qp_state_t, ib_av_attr_t, ib_apm_state_t +*****/ + +/****d* Access Layer/ib_eec_opts_t +* NAME +* ib_eec_opts_t +* +* DESCRIPTION +* Optional fields supplied in the modify EEC operation. +* +* SYNOPSIS +*/ +typedef uint32_t ib_eec_opts_t; +#define IB_MOD_EEC_ALTERNATE_AV 0x00000001 +#define IB_MOD_EEC_PKEY 0x00000002 +#define IB_MOD_EEC_APM_STATE 0x00000004 +#define IB_MOD_EEC_PRIMARY_AV 0x00000008 +#define IB_MOD_EEC_RNR 0x00000010 +#define IB_MOD_EEC_RESP_RES 0x00000020 +#define IB_MOD_EEC_OUTSTANDING 0x00000040 +#define IB_MOD_EEC_PRIMARY_PORT 0x00000080 +/* +* NOTES +* +* +*****/ + +/****s* Access Layer/ib_eec_mod_t +* NAME +* ib_eec_mod_t +* +* DESCRIPTION +* Information needed to change the state of an end-to-end context through +* the ib_modify_eec function. +* +* SYNOPSIS +*/ +typedef struct _ib_eec_mod +{ + ib_qp_state_t req_state; + + union _eec_state + { + struct _eec_init + { + uint8_t primary_port; + uint16_t pkey_index; + + } init; + + struct _eec_rtr + { + ib_net32_t rq_psn; + ib_net32_t remote_eecn; + ib_av_attr_t primary_av; + uint8_t resp_res; + + ib_eec_opts_t opts; + ib_av_attr_t alternate_av; + uint16_t pkey_index; + + } rtr; + + struct _eec_rts + { + ib_net32_t sq_psn; + uint8_t retry_cnt; + uint8_t rnr_retry_cnt; + uint8_t local_ack_timeout; + uint8_t init_depth; + + ib_eec_opts_t opts; + ib_av_attr_t alternate_av; + ib_apm_state_t apm_state; + + ib_av_attr_t primary_av; + uint16_t pkey_index; + uint8_t primary_port; + + } rts; + + struct _eec_sqd + { + boolean_t sqd_event; + + } sqd; + + } state; + +} ib_eec_mod_t; +/* +* SEE ALSO +* ib_qp_state_t, ib_av_attr_t, ib_apm_state_t +*****/ + +/****d* Access Layer/ib_wr_type_t +* NAME +* ib_wr_type_t +* +* DESCRIPTION +* Identifies the type of work request posted to a queue pair. +* +* SYNOPSIS +*/ +typedef enum _ib_wr_type_t +{ + WR_SEND = 1, + WR_RDMA_WRITE, + WR_RDMA_READ, + WR_COMPARE_SWAP, + WR_FETCH_ADD + +} ib_wr_type_t; +/*****/ + +/****s* Access Layer/ib_local_ds_t +* NAME +* ib_local_ds_t +* +* DESCRIPTION +* Local data segment information referenced by send and receive work +* requests. This is used to specify local data buffers used as part of a +* work request. +* +* SYNOPSIS +*/ +typedef struct _ib_local_ds +{ + void *vaddr; + uint32_t length; + uint32_t lkey; + +} ib_local_ds_t; +/*****/ + +/****d* Access Layer/ib_send_opt_t +* NAME +* ib_send_opt_t +* +* DESCRIPTION +* Optional flags used when posting send work requests. These flags +* indicate specific processing for the send operation. +* +* SYNOPSIS +*/ +typedef uint32_t ib_send_opt_t; +#define IB_SEND_OPT_IMMEDIATE 0x00000001 +#define IB_SEND_OPT_FENCE 0x00000002 +#define IB_SEND_OPT_SIGNALED 0x00000004 +#define IB_SEND_OPT_SOLICITED 0x00000008 +#define IB_SEND_OPT_INLINE 0x00000010 +#define IB_SEND_OPT_LOCAL 0x00000020 +#define IB_SEND_OPT_VEND_MASK 0xFFFF0000 +/* +* VALUES +* The following flags determine the behavior of a work request when +* posted to the send side. +* +* IB_SEND_OPT_IMMEDIATE +* Send immediate data with the given request. +* +* IB_SEND_OPT_FENCE +* The operation is fenced. Complete all pending send operations +* before processing this request. +* +* IB_SEND_OPT_SIGNALED +* If the queue pair is configured for signaled completion, then +* generate a completion queue entry when this request completes. +* +* IB_SEND_OPT_SOLICITED +* Set the solicited bit on the last packet of this request. +* +* IB_SEND_OPT_INLINE +* Indicates that the requested send data should be copied into a VPD +* owned data buffer. This flag permits the user to issue send operations +* without first needing to register the buffer(s) associated with the +* send operation. Verb providers that support this operation may place +* vendor specific restrictions on the size of send operation that may +* be performed as inline. +* +* +* IB_SEND_OPT_LOCAL +* Indicates that a sent MAD request should be given to the local VPD for +* processing. MADs sent using this option are not placed on the wire. +* This send option is only valid for MAD send operations. +* +* +* IB_SEND_OPT_VEND_MASK +* This mask indicates bits reserved in the send options that may be used +* by the verbs provider to indicate vendor specific options. Bits set +* in this area of the send options are ignored by the Access Layer, but +* may have specific meaning to the underlying VPD. +* +*****/ + +/****s* Access Layer/ib_send_wr_t +* NAME +* ib_send_wr_t +* +* DESCRIPTION +* Information used to submit a work request to the send queue of a queue +* pair. +* +* SYNOPSIS +*/ +typedef struct _ib_send_wr +{ + struct _ib_send_wr *p_next; + uint64_t wr_id; + ib_wr_type_t wr_type; + ib_send_opt_t send_opt; + uint32_t num_ds; + ib_local_ds_t *ds_array; + ib_net32_t immediate_data; + + union _send_dgrm + { + struct _send_ud + { + ib_net32_t remote_qp; + ib_net32_t remote_qkey; + ib_av_handle_t h_av; + + } ud; + + struct _send_rd + { + ib_net32_t remote_qp; + ib_net32_t remote_qkey; + ib_net32_t eecn; + + } rd; + + struct _send_raw_ether + { + ib_net16_t dest_lid; + uint8_t path_bits; + uint8_t sl; + uint8_t max_static_rate; + ib_net16_t ether_type; + + } raw_ether; + + struct _send_raw_ipv6 + { + ib_net16_t dest_lid; + uint8_t path_bits; + uint8_t sl; + uint8_t max_static_rate; + + } raw_ipv6; + + } dgrm; + + struct _send_remote_ops + { + uint64_t vaddr; + uint32_t rkey; + + ib_net64_t atomic1; + ib_net64_t atomic2; + + } remote_ops; + +} ib_send_wr_t; +/* +* FIELDS +* p_next +* A pointer used to chain work requests together. This permits multiple +* work requests to be posted to a queue pair through a single function +* call. This value is set to NULL to mark the end of the chain. +* +* wr_id +* A 64-bit work request identifier that is returned to the consumer +* as part of the work completion. +* +* wr_type +* The type of work request being submitted to the send queue. +* +* send_opt +* Optional send control parameters. +* +* num_ds +* Number of local data segments specified by this work request. +* +* ds_array +* A reference to an array of local data segments used by the send +* operation. +* +* immediate_data +* 32-bit field sent as part of a message send or RDMA write operation. +* This field is only valid if the send_opt flag IB_SEND_OPT_IMMEDIATE +* has been set. +* +* dgrm.ud.remote_qp +* Identifies the destination queue pair of an unreliable datagram send +* operation. +* +* dgrm.ud.remote_qkey +* The qkey for the destination queue pair. +* +* dgrm.ud.h_av +* An address vector that specifies the path information used to route +* the outbound datagram to the destination queue pair. +* +* dgrm.rd.remote_qp +* Identifies the destination queue pair of a reliable datagram send +* operation. +* +* dgrm.rd.remote_qkey +* The qkey for the destination queue pair. +* +* dgrm.rd.eecn +* The local end-to-end context number to use with the reliable datagram +* send operation. +* +* dgrm.raw_ether.dest_lid +* The destination LID that will receive this raw ether send. +* +* dgrm.raw_ether.path_bits +* path bits... +* +* dgrm.raw_ether.sl +* service level... +* +* dgrm.raw_ether.max_static_rate +* static rate... +* +* dgrm.raw_ether.ether_type +* ether type... +* +* dgrm.raw_ipv6.dest_lid +* The destination LID that will receive this raw ether send. +* +* dgrm.raw_ipv6.path_bits +* path bits... +* +* dgrm.raw_ipv6.sl +* service level... +* +* dgrm.raw_ipv6.max_static_rate +* static rate... +* +* remote_ops.vaddr +* The registered virtual memory address of the remote memory to access +* with an RDMA or atomic operation. +* +* remote_ops.rkey +* The rkey associated with the specified remote vaddr. This data must +* be presented exactly as obtained from the remote node. No swapping +* of data must be performed. +* +* atomic1 +* The first operand for an atomic operation. +* +* atomic2 +* The second operand for an atomic operation. +* +* NOTES +* The format of data sent over the fabric is user-defined and is considered +* opaque to the access layer. The sole exception to this are MADs posted +* to a MAD QP service. MADs are expected to match the format defined by +* the Infiniband specification and must be in network-byte order when posted +* to the MAD QP service. +* +* SEE ALSO +* ib_wr_type_t, ib_local_ds_t, ib_send_opt_t +*****/ + +/****s* Access Layer/ib_recv_wr_t +* NAME +* ib_recv_wr_t +* +* DESCRIPTION +* Information used to submit a work request to the receive queue of a queue +* pair. +* +* SYNOPSIS +*/ +typedef struct _ib_recv_wr +{ + struct _ib_recv_wr *p_next; + uint64_t wr_id; + uint32_t num_ds; + ib_local_ds_t *ds_array; +} ib_recv_wr_t; +/* +* FIELDS +* p_next +* A pointer used to chain work requests together. This permits multiple +* work requests to be posted to a queue pair through a single function +* call. This value is set to NULL to mark the end of the chain. +* +* wr_id +* A 64-bit work request identifier that is returned to the consumer +* as part of the work completion. +* +* num_ds +* Number of local data segments specified by this work request. +* +* ds_array +* A reference to an array of local data segments used by the send +* operation. +* +* SEE ALSO +* ib_local_ds_t +*****/ + +/****s* Access Layer/ib_bind_wr_t +* NAME +* ib_bind_wr_t +* +* DESCRIPTION +* Information used to submit a memory window bind work request to the send +* queue of a queue pair. +* +* SYNOPSIS +*/ +typedef struct _ib_bind_wr +{ + uint64_t wr_id; + ib_send_opt_t send_opt; + + ib_mr_handle_t h_mr; + ib_access_t access_ctrl; + uint32_t current_rkey; + + ib_local_ds_t local_ds; + +} ib_bind_wr_t; +/* +* FIELDS +* wr_id +* A 64-bit work request identifier that is returned to the consumer +* as part of the work completion. +* +* send_opt +* Optional send control parameters. +* +* h_mr +* Handle to the memory region to which this window is being bound. +* +* access_ctrl +* Access rights for this memory window. +* +* current_rkey +* The current rkey assigned to this window for remote access. +* +* local_ds +* A reference to a local data segment used by the bind operation. +* +* SEE ALSO +* ib_send_opt_t, ib_access_t, ib_local_ds_t +*****/ + +/****d* Access Layer/ib_wc_status_t +* NAME +* ib_wc_status_t +* +* DESCRIPTION +* Indicates the status of a completed work request. These VALUES are +* returned to the user when retrieving completions. Note that success is +* identified as IB_WCS_SUCCESS, which is always zero. +* +* SYNOPSIS +*/ +typedef enum _ib_wc_status_t +{ + IB_WCS_SUCCESS, + IB_WCS_LOCAL_LEN_ERR, + IB_WCS_LOCAL_OP_ERR, + IB_WCS_LOCAL_EEC_OP_ERR, + IB_WCS_LOCAL_PROTECTION_ERR, + IB_WCS_WR_FLUSHED_ERR, + IB_WCS_MEM_WINDOW_BIND_ERR, + IB_WCS_REM_ACCESS_ERR, + IB_WCS_REM_OP_ERR, + IB_WCS_RNR_RETRY_ERR, + IB_WCS_TIMEOUT_RETRY_ERR, + IB_WCS_REM_INVALID_REQ_ERR, + IB_WCS_REM_INVALID_RD_REQ_ERR, + IB_WCS_INVALID_EECN, + IB_WCS_INVALID_EEC_STATE, + IB_WCS_UNMATCHED_RESPONSE, /* InfiniBand Access Layer */ + IB_WCS_CANCELED, /* InfiniBand Access Layer */ + IB_WCS_UNKNOWN /* Must be last. */ + +} ib_wc_status_t; +/* +* VALUES +* IB_WCS_SUCCESS +* Work request completed successfully. +* +* IB_WCS_MAD +* The completed work request was associated with a managmenet datagram +* that requires post processing. The MAD will be returned to the user +* through a callback once all post processing has completed. +* +* IB_WCS_LOCAL_LEN_ERR +* Generated for a work request posted to the send queue when the +* total of the data segment lengths exceeds the message length of the +* channel. Generated for a work request posted to the receive queue when +* the total of the data segment lengths is too small for a +* valid incoming message. +* +* IB_WCS_LOCAL_OP_ERR +* An internal QP consistency error was generated while processing this +* work request. This may indicate that the QP was in an incorrect state +* for the requested operation. +* +* IB_WCS_LOCAL_EEC_OP_ERR +* An internal EEC consistency error was generated while processing +* this work request. This may indicate that the EEC was in an incorrect +* state for the requested operation. +* +* IB_WCS_LOCAL_PROTECTION_ERR +* The data segments of the locally posted work request did not refer to +* a valid memory region. The memory may not have been properly +* registered for the requested operation. +* +* IB_WCS_WR_FLUSHED_ERR +* The work request was flushed from the QP before being completed. +* +* IB_WCS_MEM_WINDOW_BIND_ERR +* A memory window bind operation failed due to insufficient access +* rights. +* +* IB_WCS_REM_ACCESS_ERR, +* A protection error was detected at the remote node for a RDMA or atomic +* operation. +* +* IB_WCS_REM_OP_ERR, +* The operation could not be successfully completed at the remote node. +* This may indicate that the remote QP was in an invalid state or +* contained an invalid work request. +* +* IB_WCS_RNR_RETRY_ERR, +* The RNR retry count was exceeded while trying to send this message. +* +* IB_WCS_TIMEOUT_RETRY_ERR +* The local transport timeout counter expired while trying to send this +* message. +* +* IB_WCS_REM_INVALID_REQ_ERR, +* The remote node detected an invalid message on the channel. This error +* is usually a result of one of the following: +* - The operation was not supported on receive queue. +* - There was insufficient buffers to receive a new RDMA request. +* - There was insufficient buffers to receive a new atomic operation. +* - An RDMA request was larger than 2^31 bytes. +* +* IB_WCS_REM_INVALID_RD_REQ_ERR, +* Responder detected an invalid RD message. This may be the result of an +* invalid qkey or an RDD mismatch. +* +* IB_WCS_INVALID_EECN +* An invalid EE context number was detected. +* +* IB_WCS_INVALID_EEC_STATE +* The EEC was in an invalid state for the specified request. +* +* IB_WCS_UNMATCHED_RESPONSE +* A response MAD was received for which there was no matching send. The +* send operation may have been canceled by the user or may have timed +* out. +* +* IB_WCS_CANCELED +* The completed work request was canceled by the user. +*****/ + +OSM_EXPORT const char* ib_wc_status_str[]; + +/****f* IBA Base: Types/ib_get_wc_status_str +* NAME +* ib_get_wc_status_str +* +* DESCRIPTION +* Returns a string for the specified work completion status. +* +* SYNOPSIS +*/ +static inline const char* OSM_API +ib_get_wc_status_str( + IN ib_wc_status_t wc_status ) +{ + if( wc_status > IB_WCS_UNKNOWN ) + wc_status = IB_WCS_UNKNOWN; + return( ib_wc_status_str[wc_status] ); +} +/* +* PARAMETERS +* wc_status +* [in] work completion status value +* +* RETURN VALUES +* Pointer to the work completion status description string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****d* Access Layer/ib_wc_type_t +* NAME +* ib_wc_type_t +* +* DESCRIPTION +* Indicates the type of work completion. +* +* SYNOPSIS +*/ +typedef enum _ib_wc_type_t +{ + IB_WC_SEND, + IB_WC_RDMA_WRITE, + IB_WC_RECV, + IB_WC_RDMA_READ, + IB_WC_MW_BIND, + IB_WC_FETCH_ADD, + IB_WC_COMPARE_SWAP, + IB_WC_RECV_RDMA_WRITE + +} ib_wc_type_t; +/*****/ + +/****d* Access Layer/ib_recv_opt_t +* NAME +* ib_recv_opt_t +* +* DESCRIPTION +* Indicates optional fields valid in a receive work completion. +* +* SYNOPSIS +*/ +typedef uint32_t ib_recv_opt_t; +#define IB_RECV_OPT_IMMEDIATE 0x00000001 +#define IB_RECV_OPT_FORWARD 0x00000002 +#define IB_RECV_OPT_GRH_VALID 0x00000004 +#define IB_RECV_OPT_VEND_MASK 0xFFFF0000 +/* +* VALUES +* IB_RECV_OPT_IMMEDIATE +* Indicates that immediate data is valid for this work completion. +* +* IB_RECV_OPT_FORWARD +* Indicates that the received trap should be forwarded to the SM. +* +* IB_RECV_OPT_GRH_VALID +* Indicates presence of the global route header. When set, the first +* 40 bytes received are the GRH. +* +* IB_RECV_OPT_VEND_MASK +* This mask indicates bits reserved in the receive options that may be +* used by the verbs provider to indicate vendor specific options. Bits +* set in this area of the receive options are ignored by the Access Layer, +* but may have specific meaning to the underlying VPD. +*****/ + +/****s* Access Layer/ib_wc_t +* NAME +* ib_wc_t +* +* DESCRIPTION +* Work completion information. +* +* SYNOPSIS +*/ +typedef struct _ib_wc +{ + struct _ib_wc *p_next; + uint64_t wr_id; + ib_wc_type_t wc_type; + + uint32_t length; + ib_wc_status_t status; + uint64_t vendor_specific; + + union _wc_recv + { + struct _wc_conn + { + ib_recv_opt_t recv_opt; + ib_net32_t immediate_data; + + } conn; + + struct _wc_ud + { + ib_recv_opt_t recv_opt; + ib_net32_t immediate_data; + ib_net32_t remote_qp; + uint16_t pkey_index; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + + } ud; + + struct _wc_rd + { + ib_net32_t remote_eecn; + ib_net32_t remote_qp; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint32_t free_cnt; + + } rd; + + struct _wc_raw_ipv6 + { + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + + } raw_ipv6; + + struct _wc_raw_ether + { + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + ib_net16_t ether_type; + + } raw_ether; + + } recv; + +} ib_wc_t; +/* +* FIELDS +* p_next +* A pointer used to chain work completions. This permits multiple +* work completions to be retrieved from a completion queue through a +* single function call. This value is set to NULL to mark the end of +* the chain. +* +* wr_id +* The 64-bit work request identifier that was specified when posting the +* work request. +* +* wc_type +* Indicates the type of work completion. +* +* +* length +* The total length of the data sent or received with the work request. +* +* status +* The result of the work request. +* +* vendor_specific +* HCA vendor specific information returned as part of the completion. +* +* recv.conn.recv_opt +* Indicates optional fields valid as part of a work request that +* completed on a connected (reliable or unreliable) queue pair. +* +* recv.conn.immediate_data +* 32-bit field received as part of an inbound message on a connected +* queue pair. This field is only valid if the recv_opt flag +* IB_RECV_OPT_IMMEDIATE has been set. +* +* recv.ud.recv_opt +* Indicates optional fields valid as part of a work request that +* completed on an unreliable datagram queue pair. +* +* recv.ud.immediate_data +* 32-bit field received as part of an inbound message on a unreliable +* datagram queue pair. This field is only valid if the recv_opt flag +* IB_RECV_OPT_IMMEDIATE has been set. +* +* recv.ud.remote_qp +* Identifies the source queue pair of a received datagram. +* +* recv.ud.pkey_index +* The pkey index for the source queue pair. This is valid only for +* GSI type QP's. +* +* recv.ud.remote_lid +* The source LID of the received datagram. +* +* recv.ud.remote_sl +* The service level used by the source of the received datagram. +* +* recv.ud.path_bits +* path bits... +* +* recv.rd.remote_eecn +* The remote end-to-end context number that sent the received message. +* +* recv.rd.remote_qp +* Identifies the source queue pair of a received message. +* +* recv.rd.remote_lid +* The source LID of the received message. +* +* recv.rd.remote_sl +* The service level used by the source of the received message. +* +* recv.rd.free_cnt +* The number of available entries in the completion queue. Reliable +* datagrams may complete out of order, so this field may be used to +* determine the number of additional completions that may occur. +* +* recv.raw_ipv6.remote_lid +* The source LID of the received message. +* +* recv.raw_ipv6.remote_sl +* The service level used by the source of the received message. +* +* recv.raw_ipv6.path_bits +* path bits... +* +* recv.raw_ether.remote_lid +* The source LID of the received message. +* +* recv.raw_ether.remote_sl +* The service level used by the source of the received message. +* +* recv.raw_ether.path_bits +* path bits... +* +* recv.raw_ether.ether_type +* ether type... +* NOTES +* When the work request completes with error, the only values that the +* consumer can depend on are the wr_id field, and the status of the +* operation. +* +* If the consumer is using the same CQ for completions from more than +* one type of QP (i.e Reliable Connected, Datagram etc), then the consumer +* must have additional information to decide what fields of the union are +* valid. +* SEE ALSO +* ib_wc_type_t, ib_qp_type_t, ib_wc_status_t, ib_recv_opt_t +*****/ + +/****s* Access Layer/ib_mr_create_t +* NAME +* ib_mr_create_t +* +* DESCRIPTION +* Information required to create a registered memory region. +* +* SYNOPSIS +*/ +typedef struct _ib_mr_create +{ + void *vaddr; + uint64_t length; + ib_access_t access_ctrl; +} ib_mr_create_t; +/* +* FIELDS +* vaddr +* Starting virtual address of the region being registered. +* +* length +* Length of the buffer to register. +* +* access_ctrl +* Access rights of the registered region. +* +* SEE ALSO +* ib_access_t +*****/ + +/****s* Access Layer/ib_phys_create_t +* NAME +* ib_phys_create_t +* +* DESCRIPTION +* Information required to create a physical memory region. +* +* SYNOPSIS +*/ +typedef struct _ib_phys_create +{ + uint64_t length; + uint32_t num_bufs; + uint64_t *buf_array; + uint32_t buf_offset; + uint32_t page_size; + ib_access_t access_ctrl; +} ib_phys_create_t; +/* +* length +* The length of the memory region in bytes. +* +* num_bufs +* Number of buffers listed in the specified buffer array. +* +* buf_array +* An array of physical buffers to be registered as a single memory +* region. +* +* buf_offset +* The offset into the first physical page of the specified memory +* region to start the virtual address. +* +* page_size +* The physical page size of the memory being registered. +* +* access_ctrl +* Access rights of the registered region. +* +* SEE ALSO +* ib_access_t +*****/ + +/****s* Access Layer/ib_mr_attr_t +* NAME +* ib_mr_attr_t +* +* DESCRIPTION +* Attributes of a registered memory region. +* +* SYNOPSIS +*/ +typedef struct _ib_mr_attr +{ + ib_pd_handle_t h_pd; + void *local_lb; + void *local_ub; + void *remote_lb; + void *remote_ub; + ib_access_t access_ctrl; + uint32_t lkey; + uint32_t rkey; +} ib_mr_attr_t; +/* +* DESCRIPTION +* h_pd +* Handle to the protection domain for this memory region. +* +* local_lb +* The virtual address of the lower bound of protection for local +* memory access. +* +* local_ub +* The virtual address of the upper bound of protection for local +* memory access. +* +* remote_lb +* The virtual address of the lower bound of protection for remote +* memory access. +* +* remote_ub +* The virtual address of the upper bound of protection for remote +* memory access. +* +* access_ctrl +* Access rights for the specified memory region. +* +* lkey +* The lkey associated with this memory region. +* +* rkey +* The rkey associated with this memory region. +* +* NOTES +* The remote_lb, remote_ub, and rkey are only valid if remote memory access +* is enabled for this memory region. +* +* SEE ALSO +* ib_access_t +*****/ + +/****d* Access Layer/ib_ca_mod_t +* NAME +* ib_ca_mod_t -- Modify port attributes and error counters +* +* DESCRIPTION +* Specifies modifications to the port attributes of a channel adapter. +* +* SYNOPSIS +*/ +typedef uint32_t ib_ca_mod_t; +#define IB_CA_MOD_IS_CM_SUPPORTED 0x00000001 +#define IB_CA_MOD_IS_SNMP_SUPPORTED 0x00000002 +#define IB_CA_MOD_IS_DEV_MGMT_SUPPORTED 0x00000004 +#define IB_CA_MOD_IS_VEND_SUPPORTED 0x00000008 +#define IB_CA_MOD_IS_SM 0x00000010 +#define IB_CA_MOD_IS_SM_DISABLED 0x00000020 +#define IB_CA_MOD_QKEY_CTR 0x00000040 +#define IB_CA_MOD_PKEY_CTR 0x00000080 +#define IB_CA_MOD_IS_NOTICE_SUPPORTED 0x00000100 +#define IB_CA_MOD_IS_TRAP_SUPPORTED 0x00000200 +#define IB_CA_MOD_IS_APM_SUPPORTED 0x00000400 +#define IB_CA_MOD_IS_SLMAP_SUPPORTED 0x00000800 +#define IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED 0x00001000 +#define IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED 0x00002000 +#define IB_CA_MOD_IS_SYSGUID_SUPPORTED 0x00004000 +#define IB_CA_MOD_IS_DR_NOTICE_SUPPORTED 0x00008000 +#define IB_CA_MOD_IS_BOOT_MGMT_SUPPORTED 0x00010000 +#define IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED 0x00020000 +#define IB_CA_MOD_IS_REINIT_SUPORTED 0x00040000 +#define IB_CA_MOD_IS_LEDINFO_SUPPORTED 0x00080000 +#define IB_CA_MOD_SHUTDOWN_PORT 0x00100000 +#define IB_CA_MOD_INIT_TYPE_VALUE 0x00200000 +#define IB_CA_MOD_SYSTEM_IMAGE_GUID 0x00400000 +/* +* VALUES +* IB_CA_MOD_IS_CM_SUPPORTED +* Indicates if there is a communication manager accessible through +* the port. +* +* IB_CA_MOD_IS_SNMP_SUPPORTED +* Indicates if there is an SNMP agent accessible through the port. +* +* IB_CA_MOD_IS_DEV_MGMT_SUPPORTED +* Indicates if there is a device management agent accessible +* through the port. +* +* IB_CA_MOD_IS_VEND_SUPPORTED +* Indicates if there is a vendor supported agent accessible +* through the port. +* +* IB_CA_MOD_IS_SM +* Indicates if there is a subnet manager accessible through +* the port. +* +* IB_CA_MOD_IS_SM_DISABLED +* Indicates if the port has been disabled for configuration by the +* subnet manager. +* +* IB_CA_MOD_QKEY_CTR +* Used to reset the qkey violation counter associated with the +* port. +* +* IB_CA_MOD_PKEY_CTR +* Used to reset the pkey violation counter associated with the +* port. +* +* IB_CA_MOD_IS_NOTICE_SUPPORTED +* Indicates that this CA supports ability to generate Notices for +* Port State changes. (only applicable to switches) +* +* IB_CA_MOD_IS_TRAP_SUPPORTED +* Indicates that this management port supports ability to generate +* trap messages. (only applicable to switches) +* +* IB_CA_MOD_IS_APM_SUPPORTED +* Indicates that this port is capable of performing Automatic +* Path Migration. +* +* IB_CA_MOD_IS_SLMAP_SUPPORTED +* Indicates this port supports SLMAP capability. +* +* IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED +* Indicates that PKEY is supported in NVRAM +* +* IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED +* Indicates that MKEY is supported in NVRAM +* +* IB_CA_MOD_IS_SYSGUID_SUPPORTED +* Indicates System Image GUID support. +* +* IB_CA_MOD_IS_DR_NOTICE_SUPPORTED +* Indicate support for generating Direct Routed Notices +* +* IB_CA_MOD_IS_BOOT_MGMT_SUPPORTED +* Indicates support for Boot Management +* +* IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED +* Indicates capability to generate notices for changes to CAPMASK +* +* IB_CA_MOD_IS_REINIT_SUPORTED +* Indicates type of node init supported. Refer to Chapter 14 for +* Initialization actions. +* +* IB_CA_MOD_IS_LEDINFO_SUPPORTED +* Indicates support for LED info. +* +* IB_CA_MOD_SHUTDOWN_PORT +* Used to modify the port active indicator. +* +* IB_CA_MOD_INIT_TYPE_VALUE +* Used to modify the init_type value for the port. +* +* IB_CA_MOD_SYSTEM_IMAGE_GUID +* Used to modify the system image GUID for the port. +*****/ + +/****d* Access Layer/ib_mr_mod_t +* NAME +* ib_mr_mod_t +* +* DESCRIPTION +* Mask used to specify which attributes of a registered memory region are +* being modified. +* +* SYNOPSIS +*/ +typedef uint32_t ib_mr_mod_t; +#define IB_MR_MOD_ADDR 0x00000001 +#define IB_MR_MOD_PD 0x00000002 +#define IB_MR_MOD_ACCESS 0x00000004 +/* +* PARAMETERS +* IB_MEM_MOD_ADDR +* The address of the memory region is being modified. +* +* IB_MEM_MOD_PD +* The protection domain associated with the memory region is being +* modified. +* +* IB_MEM_MOD_ACCESS +* The access rights the memory region are being modified. +*****/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_INIT +* NAME +* IB_SMINFO_STATE_INIT +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_INIT 4 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_HANDOVER +* NAME +* IB_SMINFO_ATTR_MOD_HANDOVER +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_HANDOVER (CL_NTOH32(0x000001)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_ACKNOWLEDGE +* NAME +* IB_SMINFO_ATTR_MOD_ACKNOWLEDGE +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_ACKNOWLEDGE (CL_NTOH32(0x000002)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_DISABLE +* NAME +* IB_SMINFO_ATTR_MOD_DISABLE +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_DISABLE (CL_NTOH32(0x000003)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_STANDBY +* NAME +* IB_SMINFO_ATTR_MOD_STANDBY +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_STANDBY (CL_NTOH32(0x000004)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_DISCOVER +* NAME +* IB_SMINFO_ATTR_MOD_DISCOVER +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_DISCOVER (CL_NTOH32(0x000005)) +/**********/ + +/****s* Access Layer/ib_ci_op_t +* NAME +* ib_ci_op_t +* +* DESCRIPTION +* A structure used for vendor specific CA interface communication. +* +* SYNOPSIS +*/ +typedef struct _ib_ci_op +{ + IN uint32_t command; + IN OUT void* p_buf OPTIONAL; + IN uint32_t buf_size; + IN OUT uint32_t num_bytes_ret; + IN OUT int32_t status; + +} ib_ci_op_t; +/* +* FIELDS +* command +* A command code that is understood by the verbs provider. +* +* p_buf +* A reference to a buffer containing vendor specific data. The verbs +* provider must not access pointers in the p_buf between user-mode and +* kernel-mode. Any pointers embedded in the p_buf are invalidated by +* the user-mode/kernel-mode transition. +* +* buf_size +* The size of the buffer in bytes. +* +* num_bytes_ret +* The size in bytes of the vendor specific data returned in the buffer. +* This field is set by the verbs provider. The verbs provider should +* verify that the buffer size is sufficient to hold the data being +* returned. +* +* status +* The completion status from the verbs provider. This field should be +* initialize to indicate an error to allow detection and cleanup in +* case a communication error occurs between user-mode and kernel-mode. +* +* NOTES +* This structure is provided to allow the exchange of vendor specific +* data between the originator and the verbs provider. Users of this +* structure are expected to know the format of data in the p_buf based +* on the structure command field or the usage context. +*****/ + +END_C_DECLS + +#endif /* ndef WIN */ +#if defined( __WIN__ ) + #include +#endif + +#endif /* __IB_TYPES_H__ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/iba/ib_types_extended.h b/branches/Ndi/ulp/opensm/user/include/iba/ib_types_extended.h new file mode 100644 index 00000000..22a20dbe --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/iba/ib_types_extended.h @@ -0,0 +1,2585 @@ +/* + * Copyright (c) 2004,2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if !defined(__IB_TYPES_EXTENDED_H__) +#define __IB_TYPES_EXTENDED_H__ + + +#if defined( WIN32 ) + #if defined( EXPORT_AL_SYMBOLS ) + #define AL_EXPORT __declspec(dllexport) + #else + #define AL_EXPORT __declspec(dllimport) + #endif + + #ifdef CL_KERNEL + #define AL_API + #define AL_INLINE static inline + #else + #define AL_API __stdcall + #define AL_INLINE static inline + #endif /* CL_KERNEL */ +#else + #define AL_EXPORT extern + #define AL_INLINE static inline + #define AL_API + #define __ptr64 +#endif +/* + * Defines the size of user available data in communication management MADs + */ +#define IB_REQ_PDATA_SIZE 92 +#define IB_MRA_PDATA_SIZE 222 +#define IB_REJ_PDATA_SIZE 148 +#define IB_REP_PDATA_SIZE 196 +#define IB_RTU_PDATA_SIZE 224 +#define IB_LAP_PDATA_SIZE 168 +#define IB_APR_PDATA_SIZE 148 +#define IB_DREQ_PDATA_SIZE 220 +#define IB_DREP_PDATA_SIZE 224 +#define IB_SIDR_REQ_PDATA_SIZE 216 +#define IB_SIDR_REP_PDATA_SIZE 136 + +/* following v1 ver1.2 p901 */ +#define IB_PATH_RECORD_RATE_5_GBS 5 +#define IB_PATH_RECORD_RATE_20_GBS 6 +#define IB_PATH_RECORD_RATE_40_GBS 7 +#define IB_PATH_RECORD_RATE_60_GBS 8 +#define IB_PATH_RECORD_RATE_80_GBS 9 +#define IB_PATH_RECORD_RATE_120_GBS 10 + +typedef struct _ib_srq* __ptr64 ib_srq_handle_t; + +/* + * The following definitions are shared between the Access Layer and VPD + */ + + + +/****d* Access Layer/ib_api_status_t +* NAME +* ib_api_status_t +* +* DESCRIPTION +* Function return codes indicating the success or failure of an API call. +* Note that success is indicated by the return value IB_SUCCESS, which +* is always zero. +* +* NOTES +* IB_VERBS_PROCESSING_DONE is used by UVP library to terminate a verbs call +* in the pre-ioctl step itself. +* +* SYNOPSIS +*/ +typedef enum _ib_api_status_t +{ + IB_SUCCESS, + IB_INSUFFICIENT_RESOURCES, + IB_INSUFFICIENT_MEMORY, + IB_INVALID_PARAMETER, + IB_INVALID_SETTING, + IB_NOT_FOUND, + IB_TIMEOUT, + IB_CANCELED, + IB_INTERRUPTED, + IB_INVALID_PERMISSION, + IB_UNSUPPORTED, + IB_OVERFLOW, + IB_MAX_MCAST_QPS_REACHED, + IB_INVALID_QP_STATE, + IB_INVALID_APM_STATE, + IB_INVALID_PORT_STATE, + IB_INVALID_STATE, + IB_RESOURCE_BUSY, + IB_INVALID_PKEY, + IB_INVALID_LKEY, + IB_INVALID_RKEY, + IB_INVALID_MAX_WRS, + IB_INVALID_MAX_SGE, + IB_INVALID_CQ_SIZE, + IB_INVALID_SRQ_SIZE, + IB_INVALID_SERVICE_TYPE, + IB_INVALID_GID, + IB_INVALID_LID, + IB_INVALID_GUID, + IB_INVALID_CA_HANDLE, + IB_INVALID_AV_HANDLE, + IB_INVALID_CQ_HANDLE, + IB_INVALID_QP_HANDLE, + IB_INVALID_SRQ_HANDLE, + IB_INVALID_PD_HANDLE, + IB_INVALID_MR_HANDLE, + IB_INVALID_FMR_HANDLE, + IB_INVALID_MW_HANDLE, + IB_INVALID_MCAST_HANDLE, + IB_INVALID_CALLBACK, + IB_INVALID_AL_HANDLE, /* InfiniBand Access Layer */ + IB_INVALID_HANDLE, /* InfiniBand Access Layer */ + IB_ERROR, /* InfiniBand Access Layer */ + IB_REMOTE_ERROR, /* Infiniband Access Layer */ + IB_VERBS_PROCESSING_DONE, /* See Notes above */ + IB_INVALID_WR_TYPE, + IB_QP_IN_TIMEWAIT, + IB_EE_IN_TIMEWAIT, + IB_INVALID_PORT, + IB_NOT_DONE, + IB_INVALID_INDEX, + IB_NO_MATCH, + IB_PENDING, + IB_UNKNOWN_ERROR /* ALWAYS LAST ENUM VALUE! */ + +} ib_api_status_t; +/*****/ + + + +/****f* IBA Base: Types/ib_get_err_str +* NAME +* ib_get_err_str +* +* DESCRIPTION +* Returns a string for the specified status value. +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_err_str( + IN ib_api_status_t status ); +/* +* PARAMETERS +* status +* [in] status value +* +* RETURN VALUES +* Pointer to the status description string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****d* Verbs/ib_async_event_t +* NAME +* ib_async_event_t -- Async event types +* +* DESCRIPTION +* This type indicates the reason the async callback was called. +* The context in the ib_event_rec_t indicates the resource context +* that associated with the callback. For example, for IB_AE_CQ_ERROR +* the context provided during the ib_create_cq is returned in the event. +* +* SYNOPSIS +*/ +typedef enum _ib_async_event_t +{ + IB_AE_SQ_ERROR = 1, + IB_AE_SQ_DRAINED, + IB_AE_RQ_ERROR, + IB_AE_CQ_ERROR, + IB_AE_QP_FATAL, + IB_AE_QP_COMM, + IB_AE_QP_APM, + IB_AE_LOCAL_FATAL, + IB_AE_PKEY_TRAP, + IB_AE_QKEY_TRAP, + IB_AE_MKEY_TRAP, + IB_AE_PORT_TRAP, + IB_AE_SYSIMG_GUID_TRAP, + IB_AE_BUF_OVERRUN, + IB_AE_LINK_INTEGRITY, + IB_AE_FLOW_CTRL_ERROR, + IB_AE_BKEY_TRAP, + IB_AE_QP_APM_ERROR, + IB_AE_WQ_REQ_ERROR, + IB_AE_WQ_ACCESS_ERROR, + IB_AE_PORT_ACTIVE, + IB_AE_PORT_DOWN, + IB_AE_CLIENT_REREGISTER, + IB_AE_SRQ_LIMIT_REACHED, + IB_AE_SRQ_QP_LAST_WQE_REACHED, + IB_AE_UNKNOWN /* ALWAYS LAST ENUM VALUE */ + +} ib_async_event_t; +/* +* VALUES +* IB_AE_SQ_ERROR +* An error occurred when accessing the send queue of the QP. +* This event is optional. +* +* IB_AE_SQ_DRAINED +* The send queue of the specified QP has completed the outstanding +* messages in progress when the state change was requested and, if +* applicable, has received all acknowledgements for those messages. +* +* IB_AE_RQ_ERROR +* An error occurred when accessing the receive queue of the QP. +* This event is optional. +* +* IB_AE_CQ_ERROR +* An error occurred when writing an entry to the CQ. +* +* IB_AE_QP_FATAL +* A catastrophic error occurred while accessing or processing the +* work queue that prevents reporting of completions. +* +* IB_AE_QP_COMM +* The first packet has arrived for the receive work queue where the +* QP is still in the RTR state. +* +* IB_AE_QP_APM +* If alternate path migration is supported, this event indicates that +* the QP connection has migrated to the alternate path. +* +* IB_AE_LOCAL_FATAL +* A catastrophic HCA error occurred which cannot be attributed to any +* resource; behavior is indeterminate. +* +* IB_AE_PKEY_TRAP +* A PKEY violation was detected. This event is optional. +* +* IB_AE_QKEY_TRAP +* A QKEY violation was detected. This event is optional. +* +* IB_AE_MKEY_TRAP +* An MKEY violation was detected. This event is optional. +* +* IB_AE_PORT_TRAP +* A port capability change was detected. This event is optional. +* +* IB_AE_SYSIMG_GUID_TRAP +* If the system image GUID is supported, this event indicates that the +* system image GUID of this HCA has been changed. This event is +* optional. +* +* IB_AE_BUF_OVERRUN +* The number of consecutive flow control update periods with at least +* one overrun error in each period has exceeded the threshold specified +* in the port info attributes. This event is optional. +* +* IB_AE_LINK_INTEGRITY +* The detection of excessively frequent local physical errors has +* exceeded the threshold specified in the port info attributes. This +* event is optional. +* +* IB_AE_FLOW_CTRL_ERROR +* An HCA watchdog timer monitoring the arrival of flow control updates +* has expired without receiving an update. This event is optional. +* +* IB_AE_BKEY_TRAP +* An BKEY violation was detected. This event is optional. +* +* IB_AE_QP_APM_ERROR +* If alternate path migration is supported, this event indicates that +* an incoming path migration request to this QP was not accepted. +* +* IB_AE_WQ_REQ_ERROR +* An OpCode violation was detected at the responder. +* +* IB_AE_WQ_ACCESS_ERROR +* An access violation was detected at the responder. +* +* IB_AE_PORT_ACTIVE +* If the port active event is supported, this event is generated +* when the link becomes active: IB_LINK_ACTIVE. +* +* IB_AE_PORT_DOWN +* The link is declared unavailable: IB_LINK_INIT, IB_LINK_ARMED, +* IB_LINK_DOWN. +* +* IB_AE_CLIENT_REREGISTER +* The SM idicate to client to reregister its SA records. +* +* IB_AE_SRQ_CATAS_ERROR +* An error occurred while processing or accessing the SRQ that prevents +* dequeuing a WQE from the SRQ and reporting of receive completions. +* +* IB_AE_SRQ_QP_LAST_WQE_REACHED +* An event, issued for a QP, associated with a shared receive queue, when +* a CQE is generated for the last WQE, or +* the QP gets in the Error State and there are no more WQEs on the RQ. +* +* IB_AE_UNKNOWN +* An unknown error occurred which cannot be attributed to any +* resource; behavior is indeterminate. +* +*****/ + + + +/****f* IBA Base: Types/ib_get_async_event_str +* NAME +* ib_get_async_event_str +* +* DESCRIPTION +* Returns a string for the specified asynchronous event. +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_async_event_str( + IN ib_async_event_t event ); +/* +* PARAMETERS +* event +* [in] event value +* +* RETURN VALUES +* Pointer to the asynchronous event description string. +* +* NOTES +* +* SEE ALSO +*********/ + + +/****s* Verbs/ib_event_rec_t +* NAME +* ib_event_rec_t -- Async event notification record +* +* DESCRIPTION +* When an async event callback is made, this structure is passed to indicate +* the type of event, the source of event that caused it, and the context +* associated with this event. +* +* context -- Context of the resource that caused the event. +* -- ca_context if this is a port/adapter event. +* -- qp_context if the source is a QP event +* -- cq_context if the source is a CQ event. +* -- ee_context if the source is an EE event. +* +* SYNOPSIS +*/ +typedef struct _ib_event_rec +{ + void* __ptr64 context; + ib_async_event_t type; + + /* HCA vendor specific event information. */ + uint64_t vendor_specific; + + /* The following structures are valid only for trap types. */ + union _trap + { + struct + { + uint16_t lid; + ib_net64_t port_guid; + uint8_t port_num; + + /* + * The following structure is valid only for + * P_KEY, Q_KEY, and M_KEY violation traps. + */ + struct + { + uint8_t sl; + uint16_t src_lid; + uint16_t dest_lid; + union _key + { + uint16_t pkey; + uint32_t qkey; + uint64_t mkey; + } key; + uint32_t src_qp; + uint32_t dest_qp; + ib_gid_t src_gid; + ib_gid_t dest_gid; + + } violation; + + } info; + + ib_net64_t sysimg_guid; + + } trap; + +} ib_event_rec_t; +/*******/ + + +/****d* Access Layer/ib_atomic_t +* NAME +* ib_atomic_t +* +* DESCRIPTION +* Indicates atomicity levels supported by an adapter. +* +* SYNOPSIS +*/ +typedef enum _ib_atomic_t +{ + IB_ATOMIC_NONE, + IB_ATOMIC_LOCAL, + IB_ATOMIC_GLOBAL + +} ib_atomic_t; +/* +* VALUES +* IB_ATOMIC_NONE +* Atomic operations not supported. +* +* IB_ATOMIC_LOCAL +* Atomic operations guaranteed between QPs of a single CA. +* +* IB_ATOMIC_GLOBAL +* Atomic operations are guaranteed between CA and any other entity +* in the system. +*****/ + + +/****s* Access Layer/ib_port_cap_t +* NAME +* ib_port_cap_t +* +* DESCRIPTION +* Indicates which management agents are currently available on the specified +* port. +* +* SYNOPSIS +*/ +typedef struct _ib_port_cap +{ + boolean_t cm; + boolean_t snmp; + boolean_t dev_mgmt; + boolean_t vend; + boolean_t sm; + boolean_t sm_disable; + boolean_t qkey_ctr; + boolean_t pkey_ctr; + boolean_t notice; + boolean_t trap; + boolean_t apm; + boolean_t slmap; + boolean_t pkey_nvram; + boolean_t mkey_nvram; + boolean_t sysguid; + boolean_t dr_notice; + boolean_t boot_mgmt; + boolean_t capm_notice; + boolean_t reinit; + boolean_t ledinfo; + boolean_t port_active; + boolean_t ipd; + boolean_t pkey_switch_ext_port; + boolean_t bm; + boolean_t link_rtl; + boolean_t client_reregister; + +} ib_port_cap_t; +/*****/ + + +/****d* Access Layer/ib_init_type_t +* NAME +* ib_init_type_t +* +* DESCRIPTION +* If supported by the HCA, the type of initialization requested by +* this port before SM moves it to the active or armed state. If the +* SM implements reinitialization, it shall set these bits to indicate +* the type of initialization performed prior to activating the port. +* Otherwise, these bits shall be set to 0. +* +* SYNOPSIS +*/ +typedef uint8_t ib_init_type_t; +#define IB_INIT_TYPE_NO_LOAD 0x01 +#define IB_INIT_TYPE_PRESERVE_CONTENT 0x02 +#define IB_INIT_TYPE_PRESERVE_PRESENCE 0x04 +#define IB_INIT_TYPE_DO_NOT_RESUSCITATE 0x08 +/*****/ + + +/****s* Access Layer/ib_port_attr_mod_t +* NAME +* ib_port_attr_mod_t +* +* DESCRIPTION +* Port attributes that may be modified. +* +* SYNOPSIS +*/ +typedef struct _ib_port_attr_mod +{ + ib_port_cap_t cap; + uint16_t pkey_ctr; + uint16_t qkey_ctr; + + ib_init_type_t init_type; + ib_net64_t system_image_guid; + +} ib_port_attr_mod_t; +/* +* SEE ALSO +* ib_port_cap_t +*****/ + + +/****s* Access Layer/ib_port_attr_t +* NAME +* ib_port_attr_t +* +* DESCRIPTION +* Information about a port on a given channel adapter. +* +* SYNOPSIS +*/ +typedef struct _ib_port_attr +{ + ib_net64_t port_guid; + uint8_t port_num; + uint8_t mtu; + uint64_t max_msg_size; + ib_net16_t lid; + uint8_t lmc; + + /* + * LinkWidthSupported as defined in PortInfo. Required to calculate + * inter-packet delay (a.k.a. static rate). + */ + uint8_t link_width_supported; + + uint16_t max_vls; + + ib_net16_t sm_lid; + uint8_t sm_sl; + uint8_t link_state; + + ib_init_type_t init_type_reply; /* Optional */ + + /* + * subnet_timeout: + * The maximum expected subnet propagation delay to reach any port on + * the subnet. This value also determines the rate at which traps can + * be generated from this node. + * + * timeout = 4.096 microseconds * 2^subnet_timeout + */ + uint8_t subnet_timeout; + + ib_port_cap_t cap; + uint16_t pkey_ctr; + uint16_t qkey_ctr; + + uint16_t num_gids; + uint16_t num_pkeys; + /* + * Pointers at the end of the structure to allow doing a simple + * memory comparison of contents up to the first pointer. + */ + ib_gid_t* __ptr64 p_gid_table; + ib_net16_t* __ptr64 p_pkey_table; + +} ib_port_attr_t; +/* +* SEE ALSO +* uint8_t, ib_port_cap_t, ib_link_states_t +*****/ + + +/****s* Access Layer/ib_ca_attr_t +* NAME +* ib_ca_attr_t +* +* DESCRIPTION +* Information about a channel adapter. +* +* SYNOPSIS +*/ +typedef struct _ib_ca_attr +{ + ib_net64_t ca_guid; + + uint32_t vend_id; + uint16_t dev_id; + uint16_t revision; + uint64_t fw_ver; + + /* + * Total size of the ca attributes in bytes + */ + uint32_t size; + uint32_t max_qps; + uint32_t max_wrs; + + uint32_t max_sges; + uint32_t max_rd_sges; + + uint32_t max_cqs; + uint32_t max_cqes; + + uint32_t max_pds; + + uint32_t init_regions; + uint64_t init_region_size; + + uint32_t init_windows; + uint32_t max_addr_handles; + + uint32_t max_partitions; + + ib_atomic_t atomicity; + + uint8_t max_qp_resp_res; + uint8_t max_resp_res; + + uint8_t max_qp_init_depth; + + uint32_t max_ipv6_qps; + uint32_t max_ether_qps; + + uint32_t max_mcast_grps; + uint32_t max_mcast_qps; + uint32_t max_qps_per_mcast_grp; + uint32_t max_fmr; + uint32_t max_map_per_fmr; + uint32_t max_srq; + uint32_t max_srq_wrs; + uint32_t max_srq_sges; + + /* + * local_ack_delay: + * Specifies the maximum time interval between the local CA receiving + * a message and the transmission of the associated ACK or NAK. + * + * timeout = 4.096 microseconds * 2^local_ack_delay + */ + uint8_t local_ack_delay; + + boolean_t bad_pkey_ctr_support; + boolean_t bad_qkey_ctr_support; + boolean_t raw_mcast_support; + boolean_t apm_support; + boolean_t av_port_check; + boolean_t change_primary_port; + boolean_t modify_wr_depth; + boolean_t modify_srq_depth; + boolean_t current_qp_state_support; + boolean_t shutdown_port_capability; + boolean_t init_type_support; + boolean_t port_active_event_support; + boolean_t system_image_guid_support; + boolean_t hw_agents; + + ib_net64_t system_image_guid; + + uint32_t num_page_sizes; + uint8_t num_ports; + + uint32_t* __ptr64 p_page_size; + ib_port_attr_t* __ptr64 p_port_attr; + +} ib_ca_attr_t; +/* +* FIELDS +* ca_guid +* GUID for this adapter. +* +* vend_id +* IEEE vendor ID for this adapter +* +* dev_id +* Device ID of this adapter. (typically from PCI device ID) +* +* revision +* Revision ID of this adapter +* +* Fw_ver +* Device Firmware version. +* +* size +* Total size in bytes for the HCA attributes. This size includes total +* size required for all the variable members of the structure. If a +* vendor requires to pass vendor specific fields beyond this structure, +* the HCA vendor can choose to report a larger size. If a vendor is +* reporting extended vendor specific features, they should also provide +* appropriate access functions to aid with the required interpretation. +* +* max_qps +* Maximum number of QP's supported by this HCA. +* +* max_wrs +* Maximum number of work requests supported by this HCA. +* +* max_sges +* Maximum number of scatter gather elements supported per work request. +* +* max_rd_sges +* Maximum number of scatter gather elements supported for READ work +* requests for a Reliable Datagram QP. This value must be zero if RD +* service is not supported. +* +* max_cqs +* Maximum number of Completion Queues supported. +* +* max_cqes +* Maximum number of CQ elements supported per CQ. +* +* max_pds +* Maximum number of protection domains supported. +* +* init_regions +* Initial number of memory regions supported. These are only informative +* values. HCA vendors can extended and grow these limits on demand. +* +* init_region_size +* Initial limit on the size of the registered memory region. +* +* init_windows +* Initial number of window entries supported. +* +* max_addr_handles +* Maximum number of address handles supported. +* +* max_partitions +* Maximum number of partitions supported. +* +* atomicity +* Indicates level of atomic operations supported by this HCA. +* +* max_qp_resp_res +* Maximum limit on number of responder resources for incomming RDMA +* operations on QPs. +* +* max_fmr +* Maximum number of Fast Memory Regions supported. +* +* max_map_per_fmr +* Maximum number of mappings, supported by a Fast Memory Region. +* +* max_srq +* Maximum number of Shared Receive Queues supported. +* +* max_srq_wrs +* Maximum number of work requests supported by this SRQ. +* +* max_srq_sges +* Maximum number of scatter gather elements supported per work request on SRQ. +* +* max_resp_res +* Maximum number of responder resources per HCA, with this HCA used as +* the target. +* +* max_qp_init_depth +* Maximimum initiator depth per QP for initiating RDMA reads and +* atomic operations. +* +* max_ipv6_qps +* max_ether_qps +* Maximum number of IPV6 and raw ether QP's supported by this HCA. +* +* max_mcast_grps +* Maximum number of multicast groups supported. +* +* max_mcast_qps +* Maximum number of QP's that can support multicast operations. +* +* max_qps_per_mcast_grp +* Maximum number of multicast QP's per multicast group. +* +* local_ack_delay +* Specifies the maximum time interval between the local CA receiving +* a message and the transmission of the associated ACK or NAK. +* timeout = 4.096 microseconds * 2^local_ack_delay +* +* bad_pkey_ctr_support +* bad_qkey_ctr_support +* Indicates support for the bad pkey and qkey counters. +* +* raw_mcast_support +* Indicates support for raw packet multicast. +* +* apm_support +* Indicates support for Automatic Path Migration. +* +* av_port_check +* Indicates ability to check port number in address handles. +* +* change_primary_port +* Indicates ability to change primary port for a QP during a +* SQD->RTS transition. +* +* modify_wr_depth +* Indicates ability to modify QP depth during a modify QP operation. +* Check the verb specification for permitted states. +* +* modify_srq_depth +* Indicates ability to modify SRQ depth during a modify SRQ operation. +* Check the verb specification for permitted states. +* +* current_qp_state_support +* Indicates ability of the HCA to support the current QP state modifier +* during a modify QP operation. +* +* shutdown_port_capability +* Shutdown port capability support indicator. +* +* init_type_support +* Indicates init_type_reply and ability to set init_type is supported. +* +* port_active_event_support +* Port active event support indicator. +* +* system_image_guid_support +* System image GUID support indicator. +* +* hw_agents +* Indicates SMA is implemented in HW. +* +* system_image_guid +* Optional system image GUID. This field is valid only if the +* system_image_guid_support flag is set. +* +* num_page_sizes +* Indicates support for different page sizes supported by the HCA. +* The variable size array can be obtained from p_page_size. +* +* num_ports +* Number of physical ports supported on this HCA. +* +* p_page_size +* Array holding different page size supported. +* +* p_port_attr +* Array holding port attributes. +* +* NOTES +* This structure contains the attributes of a channel adapter. Users must +* call ib_copy_ca_attr to copy the contents of this structure to a new +* memory region. +* +* SEE ALSO +* ib_port_attr_t, ib_atomic_t, ib_copy_ca_attr +*****/ + +/****f* Access layer/ib_copy_ca_attr +* NAME +* ib_copy_ca_attr +* +* DESCRIPTION +* Copies CA attributes. +* +* SYNOPSIS +*/ +AL_EXPORT ib_ca_attr_t* AL_API +ib_copy_ca_attr( + IN ib_ca_attr_t* const p_dest, + IN const ib_ca_attr_t* const p_src ); +/* +* PARAMETERS +* p_dest +* Pointer to the buffer that is the destination of the copy. +* +* p_src +* Pointer to the CA attributes to copy. +* +* RETURN VALUE +* Pointer to the copied CA attributes. +* +* NOTES +* The buffer pointed to by the p_dest parameter must be at least the size +* specified in the size field of the buffer pointed to by p_src. +* +* SEE ALSO +* ib_ca_attr_t, ib_dup_ca_attr, ib_free_ca_attr +*****/ + + +/****d* Access Layer/ib_pd_type_t +* NAME +* ib_pd_type_t +* +* DESCRIPTION +* Indicates the type of protection domain being allocated. +* +* SYNOPSIS +*/ +typedef enum _ib_pd_type +{ + IB_PDT_NORMAL, + IB_PDT_ALIAS, + IB_PDT_SQP + +} ib_pd_type_t; +/* +* VALUES +* IB_PDT_NORMAL +* Protection domain for all non-aliased QPs. +* +* IB_PDT_ALIAS +* Protection domain for IB_QPT_QP0_ALIAS and IB_QPT_QP1_ALIAS QPs. +* +* IB_PDT_SQP +* Protection domain for special queue pair usage. +*****/ + + +/****s* Access Layer/ib_av_attr_t +* NAME +* ib_av_attr_t +* +* DESCRIPTION +* IBA address vector. +* +* SYNOPSIS +*/ +typedef struct _ib_av_attr +{ + uint8_t port_num; + + uint8_t sl; + ib_net16_t dlid; + + boolean_t grh_valid; + ib_grh_t grh; + uint8_t static_rate; + uint8_t path_bits; + + struct _av_conn + { + uint8_t path_mtu; + uint8_t local_ack_timeout; + uint8_t seq_err_retry_cnt; + uint8_t rnr_retry_cnt; + + } conn; + +} ib_av_attr_t; +/* +* SEE ALSO +* ib_gid_t +*****/ + + +/****d* Access Layer/ib_qp_type_t +* NAME +* ib_qp_type_t +* +* DESCRIPTION +* Indicates the type of queue pair being created. +* +* SYNOPSIS +*/ +typedef enum _ib_qp_type +{ + IB_QPT_RELIABLE_CONN = 0, /* Matches CM REQ transport type */ + IB_QPT_UNRELIABLE_CONN = 1, /* Matches CM REQ transport type */ + IB_QPT_UNRELIABLE_DGRM = 3, /* Purposefully skip RDD type. */ + IB_QPT_QP0, + IB_QPT_QP1, + IB_QPT_RAW_IPV6, + IB_QPT_RAW_ETHER, + IB_QPT_MAD, /* InfiniBand Access Layer */ + IB_QPT_QP0_ALIAS, /* InfiniBand Access Layer */ + IB_QPT_QP1_ALIAS /* InfiniBand Access Layer */ + +} ib_qp_type_t; +/* +* VALUES +* IB_QPT_RELIABLE_CONN +* Reliable, connected queue pair. +* +* IB_QPT_UNRELIABLE_CONN +* Unreliable, connected queue pair. +* +* IB_QPT_UNRELIABLE_DGRM +* Unreliable, datagram queue pair. +* +* IB_QPT_QP0 +* Queue pair 0. +* +* IB_QPT_QP1 +* Queue pair 1. +* +* IB_QPT_RAW_DGRM +* Raw datagram queue pair. +* +* IB_QPT_RAW_IPV6 +* Raw IP version 6 queue pair. +* +* IB_QPT_RAW_ETHER +* Raw Ethernet queue pair. +* +* IB_QPT_MAD +* Unreliable, datagram queue pair that will send and receive management +* datagrams with assistance from the access layer. +* +* IB_QPT_QP0_ALIAS +* Alias to queue pair 0. Aliased QPs can only be created on an aliased +* protection domain. +* +* IB_QPT_QP1_ALIAS +* Alias to queue pair 1. Aliased QPs can only be created on an aliased +* protection domain. +*****/ + + +/****d* Access Layer/ib_access_t +* NAME +* ib_access_t +* +* DESCRIPTION +* Indicates the type of access is permitted on resources such as QPs, +* memory regions and memory windows. +* +* SYNOPSIS +*/ +typedef uint32_t ib_access_t; +#define IB_AC_RDMA_READ 0x00000001 +#define IB_AC_RDMA_WRITE 0x00000002 +#define IB_AC_ATOMIC 0x00000004 +#define IB_AC_LOCAL_WRITE 0x00000008 +#define IB_AC_MW_BIND 0x00000010 +/* +* NOTES +* Users may combine access rights using a bit-wise or operation to specify +* additional access. For example: IB_AC_RDMA_READ | IB_AC_RDMA_WRITE grants +* RDMA read and write access. +*****/ + + +/****d* Access Layer/ib_qp_state_t +* NAME +* ib_qp_state_t +* +* DESCRIPTION +* Indicates or sets the state of a queue pair. The current state of a queue +* pair is returned through the ib_qp_query call and set via the +* ib_qp_modify call. +* +* SYNOPSIS +*/ +typedef uint32_t ib_qp_state_t; +#define IB_QPS_RESET 0x00000001 +#define IB_QPS_INIT 0x00000002 +#define IB_QPS_RTR 0x00000004 +#define IB_QPS_RTS 0x00000008 +#define IB_QPS_SQD 0x00000010 +#define IB_QPS_SQD_DRAINING 0x00000030 +#define IB_QPS_SQD_DRAINED 0x00000050 +#define IB_QPS_SQERR 0x00000080 +#define IB_QPS_ERROR 0x00000100 +#define IB_QPS_TIME_WAIT 0xDEAD0000 /* InfiniBand Access Layer */ +/*****/ + + +/****d* Access Layer/ib_apm_state_t +* NAME +* ib_apm_state_t +* +* DESCRIPTION +* The current automatic path migration state of a queue pair +* +* SYNOPSIS +*/ +typedef enum _ib_apm_state +{ + IB_APM_MIGRATED = 1, + IB_APM_REARM, + IB_APM_ARMED + +} ib_apm_state_t; +/*****/ + +/****d* Access Layer/ib_srq_attr_mask_t +* NAME +* ib_srq_attr_mask_t +* +* DESCRIPTION +* Indicates valid fields in ib_srq_attr_t structure +* +* SYNOPSIS +*/ +typedef enum _ib_srq_attr_mask { + IB_SRQ_MAX_WR = 1 << 0, + IB_SRQ_LIMIT = 1 << 1, +} ib_srq_attr_mask_t; +/*****/ + + +/****s* Access Layer/ib_srq_attr_t +* NAME +* ib_srq_attr_t +* +* DESCRIPTION +* Attributes used to initialize a shared queue pair at creation time. +* +* SYNOPSIS +*/ +typedef struct _ib_srq_attr { + uint32_t max_wr; + uint32_t max_sge; + uint32_t srq_limit; +} ib_srq_attr_t; +/* +* FIELDS +* max_wr +* Specifies the max number of work request on SRQ. +* +* max_sge +* Specifies the max number of scatter/gather elements in one work request. +* +* srq_limit +* Specifies the low water mark for SRQ. +* +* SEE ALSO +* ib_qp_type_t, ib_srq_attr_mask_t +*****/ + + + +/****s* Access Layer/ib_qp_create_t +* NAME +* ib_qp_create_t +* +* DESCRIPTION +* Attributes used to initialize a queue pair at creation time. +* +* SYNOPSIS +*/ +typedef struct _ib_qp_create +{ + ib_qp_type_t qp_type; + + uint32_t sq_depth; + uint32_t rq_depth; + uint32_t sq_sge; + uint32_t rq_sge; + + ib_cq_handle_t h_sq_cq; + ib_cq_handle_t h_rq_cq; + ib_srq_handle_t h_srq; + + boolean_t sq_signaled; + +} ib_qp_create_t; +/* +* FIELDS +* type +* Specifies the type of queue pair to create. +* +* sq_depth +* Indicates the requested maximum number of work requests that may be +* outstanding on the queue pair's send queue. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* rq_depth +* Indicates the requested maximum number of work requests that may be +* outstanding on the queue pair's receive queue. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* sq_sge +* Indicates the maximum number scatter-gather elements that may be +* given in a send work request. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* rq_sge +* Indicates the maximum number scatter-gather elements that may be +* given in a receive work request. This value must be less +* than or equal to the maximum reported by the channel adapter associated +* with the queue pair. +* +* h_sq_cq +* A handle to the completion queue that will be used to report send work +* request completions. This handle must be NULL if the type is +* IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS. +* +* h_rq_cq +* A handle to the completion queue that will be used to report receive +* work request completions. This handle must be NULL if the type is +* IB_QPT_MAD, IB_QPT_QP0_ALIAS, or IB_QPT_QP1_ALIAS. +* +* h_srq +* A handle to an SRQ to get receive completions via. Must be coded NULL +* when QP is not associated with SRQ +* +* sq_signaled +* A flag that is used to indicate whether the queue pair will signal +* an event upon completion of a send work request. If set to +* TRUE, send work requests will always generate a completion +* event. If set to FALSE, a completion event will only be +* generated if the send_opt field of the send work request has the +* IB_SEND_OPT_SIGNALED flag set. +* +* SEE ALSO +* ib_qp_type_t, ib_qp_attr_t +*****/ + + +/****s* Access Layer/ib_qp_attr_t +* NAME +* ib_qp_attr_t +* +* DESCRIPTION +* Queue pair attributes returned through ib_query_qp. +* +* SYNOPSIS +*/ +typedef struct _ib_qp_attr +{ + ib_pd_handle_t h_pd; + ib_qp_type_t qp_type; + ib_access_t access_ctrl; + uint16_t pkey_index; + + uint32_t sq_max_inline; + uint32_t sq_depth; + uint32_t rq_depth; + uint32_t sq_sge; + uint32_t rq_sge; + uint8_t init_depth; + uint8_t resp_res; + + ib_cq_handle_t h_sq_cq; + ib_cq_handle_t h_rq_cq; + ib_srq_handle_t h_srq; + + boolean_t sq_signaled; + + ib_qp_state_t state; + ib_net32_t num; + ib_net32_t dest_num; + ib_net32_t qkey; + + ib_net32_t sq_psn; + ib_net32_t rq_psn; + + uint8_t primary_port; + uint8_t alternate_port; + ib_av_attr_t primary_av; + ib_av_attr_t alternate_av; + ib_apm_state_t apm_state; + +} ib_qp_attr_t; +/* +* FIELDS +* h_pd +* This is a handle to a protection domain associated with the QP. +* +* sq_max_inline +* Maximum payload that can be inlined directly in a WQE, eliminating +* protection checks and additional DMA operations. +* +* NOTES +* Other fields are defined by the Infiniband specification. +* +* SEE ALSO +* ib_qp_type_t, ib_access_t, ib_qp_state_t, ib_av_attr_t, ib_apm_state_t +*****/ + + +/****d* Access Layer/ib_qp_opts_t +* NAME +* ib_qp_opts_t +* +* DESCRIPTION +* Optional fields supplied in the modify QP operation. +* +* SYNOPSIS +*/ +typedef uint32_t ib_qp_opts_t; +#define IB_MOD_QP_ALTERNATE_AV 0x00000001 +#define IB_MOD_QP_PKEY 0x00000002 +#define IB_MOD_QP_APM_STATE 0x00000004 +#define IB_MOD_QP_PRIMARY_AV 0x00000008 +#define IB_MOD_QP_RNR_NAK_TIMEOUT 0x00000010 +#define IB_MOD_QP_RESP_RES 0x00000020 +#define IB_MOD_QP_INIT_DEPTH 0x00000040 +#define IB_MOD_QP_PRIMARY_PORT 0x00000080 +#define IB_MOD_QP_ACCESS_CTRL 0x00000100 +#define IB_MOD_QP_QKEY 0x00000200 +#define IB_MOD_QP_SQ_DEPTH 0x00000400 +#define IB_MOD_QP_RQ_DEPTH 0x00000800 +#define IB_MOD_QP_CURRENT_STATE 0x00001000 +#define IB_MOD_QP_RETRY_CNT 0x00002000 +#define IB_MOD_QP_LOCAL_ACK_TIMEOUT 0x00004000 +#define IB_MOD_QP_RNR_RETRY_CNT 0x00008000 + +/* +* SEE ALSO +* ib_qp_mod_t +*****/ + + +/****s* Access Layer/ib_qp_mod_t +* NAME +* ib_qp_mod_t +* +* DESCRIPTION +* Information needed to change the state of a queue pair through the +* ib_modify_qp call. +* +* SYNOPSIS +*/ +typedef struct _ib_qp_mod +{ + ib_qp_state_t req_state; + + union _qp_state + { + struct _qp_init + { + ib_qp_opts_t opts; + uint8_t primary_port; + ib_net32_t qkey; + uint16_t pkey_index; + ib_access_t access_ctrl; + + } init; + + struct _qp_rtr + { + ib_net32_t rq_psn; + ib_net32_t dest_qp; + ib_av_attr_t primary_av; + uint8_t resp_res; + uint8_t rnr_nak_timeout; + + ib_qp_opts_t opts; + ib_av_attr_t alternate_av; + ib_net32_t qkey; + uint16_t pkey_index; + ib_access_t access_ctrl; + uint32_t sq_depth; + uint32_t rq_depth; + + } rtr; + + struct _qp_rts + { + ib_net32_t sq_psn; + uint8_t retry_cnt; + uint8_t rnr_retry_cnt; + uint8_t local_ack_timeout; + uint8_t init_depth; + + ib_qp_opts_t opts; + uint8_t rnr_nak_timeout; + ib_qp_state_t current_state; + ib_net32_t qkey; + ib_access_t access_ctrl; + uint8_t resp_res; + + ib_av_attr_t primary_av; + ib_av_attr_t alternate_av; + + uint32_t sq_depth; + uint32_t rq_depth; + + ib_apm_state_t apm_state; + uint8_t primary_port; + uint16_t pkey_index; + + } rts; + + struct _qp_sqd + { + boolean_t sqd_event; + + } sqd; + + } state; + +} ib_qp_mod_t; +/* +* SEE ALSO +* ib_qp_state_t, ib_access_t, ib_av_attr_t, ib_apm_state_t +*****/ + + +/****d* Access Layer/ib_wr_type_t +* NAME +* ib_wr_type_t +* +* DESCRIPTION +* Identifies the type of work request posted to a queue pair. +* +* SYNOPSIS +*/ +typedef enum _ib_wr_type_t +{ + WR_SEND = 1, + WR_RDMA_WRITE, + WR_RDMA_READ, + WR_COMPARE_SWAP, + WR_FETCH_ADD + +} ib_wr_type_t; +/*****/ + + +/****s* Access Layer/ib_local_ds_t +* NAME +* ib_local_ds_t +* +* DESCRIPTION +* Local data segment information referenced by send and receive work +* requests. This is used to specify local data buffers used as part of a +* work request. +* +* SYNOPSIS +*/ +typedef struct _ib_local_ds +{ + uint64_t vaddr; + uint32_t length; + uint32_t lkey; + +} ib_local_ds_t; +/*****/ + + +/****d* Access Layer/ib_send_opt_t +* NAME +* ib_send_opt_t +* +* DESCRIPTION +* Optional flags used when posting send work requests. These flags +* indicate specific processing for the send operation. +* +* SYNOPSIS +*/ +typedef uint32_t ib_send_opt_t; +#define IB_SEND_OPT_IMMEDIATE 0x00000001 +#define IB_SEND_OPT_FENCE 0x00000002 +#define IB_SEND_OPT_SIGNALED 0x00000004 +#define IB_SEND_OPT_SOLICITED 0x00000008 +#define IB_SEND_OPT_INLINE 0x00000010 +#define IB_SEND_OPT_LOCAL 0x00000020 +#define IB_SEND_OPT_VEND_MASK 0xFFFF0000 +/* +* VALUES +* The following flags determine the behavior of a work request when +* posted to the send side. +* +* IB_SEND_OPT_IMMEDIATE +* Send immediate data with the given request. +* +* IB_SEND_OPT_FENCE +* The operation is fenced. Complete all pending send operations before +* processing this request. +* +* IB_SEND_OPT_SIGNALED +* If the queue pair is configured for signaled completion, then +* generate a completion queue entry when this request completes. +* +* IB_SEND_OPT_SOLICITED +* Set the solicited bit on the last packet of this request. +* +* IB_SEND_OPT_INLINE +* Indicates that the requested send data should be copied into a VPD +* owned data buffer. This flag permits the user to issue send operations +* without first needing to register the buffer(s) associated with the +* send operation. Verb providers that support this operation may place +* vendor specific restrictions on the size of send operation that may +* be performed as inline. +* +* IB_SEND_OPT_LOCAL +* Indicates that a sent MAD request should be given to the local VPD for +* processing. MADs sent using this option are not placed on the wire. +* This send option is only valid for MAD send operations. +* +* IB_SEND_OPT_VEND_MASK +* This mask indicates bits reserved in the send options that may be used +* by the verbs provider to indicate vendor specific options. Bits set +* in this area of the send options are ignored by the Access Layer, but +* may have specific meaning to the underlying VPD. +* +*****/ + + +/****s* Access Layer/ib_send_wr_t +* NAME +* ib_send_wr_t +* +* DESCRIPTION +* Information used to submit a work request to the send queue of a queue +* pair. +* +* SYNOPSIS +*/ +typedef struct _ib_send_wr +{ + struct _ib_send_wr* __ptr64 p_next; + uint64_t wr_id; + ib_wr_type_t wr_type; + ib_send_opt_t send_opt; + uint32_t num_ds; + ib_local_ds_t* __ptr64 ds_array; + ib_net32_t immediate_data; + + union _send_dgrm + { + struct _send_ud + { + ib_net32_t remote_qp; + ib_net32_t remote_qkey; + ib_av_handle_t h_av; + uint16_t pkey_index; + void* __ptr64 rsvd; + + } ud; + + struct _send_raw_ether + { + ib_net16_t dest_lid; + uint8_t path_bits; + uint8_t sl; + uint8_t max_static_rate; + ib_net16_t ether_type; + + } raw_ether; + + struct _send_raw_ipv6 + { + ib_net16_t dest_lid; + uint8_t path_bits; + uint8_t sl; + uint8_t max_static_rate; + + } raw_ipv6; + + } dgrm; + + struct _send_remote_ops + { + uint64_t vaddr; + net32_t rkey; + + ib_net64_t atomic1; + ib_net64_t atomic2; + + } remote_ops; + +} ib_send_wr_t; +/* +* FIELDS +* p_next +* A pointer used to chain work requests together. This permits multiple +* work requests to be posted to a queue pair through a single function +* call. This value is set to NULL to mark the end of the chain. +* +* wr_id +* A 64-bit work request identifier that is returned to the consumer +* as part of the work completion. +* +* wr_type +* The type of work request being submitted to the send queue. +* +* send_opt +* Optional send control parameters. +* +* num_ds +* Number of local data segments specified by this work request. +* +* ds_array +* A reference to an array of local data segments used by the send +* operation. +* +* immediate_data +* 32-bit field sent as part of a message send or RDMA write operation. +* This field is only valid if the send_opt flag IB_SEND_OPT_IMMEDIATE +* has been set. +* +* dgrm.ud.remote_qp +* Identifies the destination queue pair of an unreliable datagram send +* operation. +* +* dgrm.ud.remote_qkey +* The qkey for the destination queue pair. +* +* dgrm.ud.h_av +* An address vector that specifies the path information used to route +* the outbound datagram to the destination queue pair. +* +* dgrm.ud.pkey_index +* The pkey index for this send work request. This is valid only +* for IB_QPT_QP1 and IB_QPT_QP1_ALIAS QP types. The work request +* is posted to using this pkey index build the GMP's BTH instead +* of the QP's pkey. +* +* dgrm.ud.rsvd +* Reserved for use by the Access Layer. +* +* dgrm.raw_ether.dest_lid +* The destination LID that will receive this raw ether send. +* +* dgrm.raw_ether.path_bits +* path bits... +* +* dgrm.raw_ether.sl +* service level... +* +* dgrm.raw_ether.max_static_rate +* static rate... +* +* dgrm.raw_ether.ether_type +* ether type... +* +* dgrm.raw_ipv6.dest_lid +* The destination LID that will receive this raw ether send. +* +* dgrm.raw_ipv6.path_bits +* path bits... +* +* dgrm.raw_ipv6.sl +* service level... +* +* dgrm.raw_ipv6.max_static_rate +* static rate... +* +* remote_ops.vaddr +* The registered virtual memory address of the remote memory to access +* with an RDMA or atomic operation. +* +* remote_ops.rkey +* The rkey associated with the specified remote vaddr. This data must +* be presented exactly as obtained from the remote node. No swapping +* of data must be performed. +* +* atomic1 +* The first operand for an atomic operation. +* +* atomic2 +* The second operand for an atomic operation. +* +* NOTES +* The format of data sent over the fabric is user-defined and is considered +* opaque to the access layer. The sole exception to this are MADs posted +* to a MAD QP service. MADs are expected to match the format defined by +* the Infiniband specification and must be in network-byte order when posted +* to the MAD QP service. +* +* SEE ALSO +* ib_wr_type_t, ib_local_ds_t, ib_send_opt_t +*****/ + + +/****s* Access Layer/ib_recv_wr_t +* NAME +* ib_recv_wr_t +* +* DESCRIPTION +* Information used to submit a work request to the receive queue of a queue +* pair. +* +* SYNOPSIS +*/ +typedef struct _ib_recv_wr +{ + struct _ib_recv_wr* __ptr64 p_next; + uint64_t wr_id; + uint32_t num_ds; + ib_local_ds_t* __ptr64 ds_array; + +} ib_recv_wr_t; +/* +* FIELDS +* p_next +* A pointer used to chain work requests together. This permits multiple +* work requests to be posted to a queue pair through a single function +* call. This value is set to NULL to mark the end of the chain. +* +* wr_id +* A 64-bit work request identifier that is returned to the consumer +* as part of the work completion. +* +* num_ds +* Number of local data segments specified by this work request. +* +* ds_array +* A reference to an array of local data segments used by the send +* operation. +* +* SEE ALSO +* ib_local_ds_t +*****/ + + +/****s* Access Layer/ib_bind_wr_t +* NAME +* ib_bind_wr_t +* +* DESCRIPTION +* Information used to submit a memory window bind work request to the send +* queue of a queue pair. +* +* SYNOPSIS +*/ +typedef struct _ib_bind_wr +{ + uint64_t wr_id; + ib_send_opt_t send_opt; + + ib_mr_handle_t h_mr; + ib_access_t access_ctrl; + net32_t current_rkey; + + ib_local_ds_t local_ds; + +} ib_bind_wr_t; +/* +* FIELDS +* wr_id +* A 64-bit work request identifier that is returned to the consumer +* as part of the work completion. +* +* send_opt +* Optional send control parameters. +* +* h_mr +* Handle to the memory region to which this window is being bound. +* +* access_ctrl +* Access rights for this memory window. +* +* current_rkey +* The current rkey assigned to this window for remote access. +* +* local_ds +* A reference to a local data segment used by the bind operation. +* +* SEE ALSO +* ib_send_opt_t, ib_access_t, ib_local_ds_t +*****/ + + +/****d* Access Layer/ib_wc_status_t +* NAME +* ib_wc_status_t +* +* DESCRIPTION +* Indicates the status of a completed work request. These VALUES are +* returned to the user when retrieving completions. Note that success is +* identified as IB_WCS_SUCCESS, which is always zero. +* +* SYNOPSIS +*/ +typedef enum _ib_wc_status_t +{ + IB_WCS_SUCCESS, + IB_WCS_LOCAL_LEN_ERR, + IB_WCS_LOCAL_OP_ERR, + IB_WCS_LOCAL_PROTECTION_ERR, + IB_WCS_WR_FLUSHED_ERR, + IB_WCS_MEM_WINDOW_BIND_ERR, + IB_WCS_REM_ACCESS_ERR, + IB_WCS_REM_OP_ERR, + IB_WCS_RNR_RETRY_ERR, + IB_WCS_TIMEOUT_RETRY_ERR, + IB_WCS_REM_INVALID_REQ_ERR, + IB_WCS_UNMATCHED_RESPONSE, /* InfiniBand Access Layer */ + IB_WCS_CANCELED, /* InfiniBand Access Layer */ + IB_WCS_UNKNOWN /* Must be last. */ + +} ib_wc_status_t; +/* +* VALUES +* IB_WCS_SUCCESS +* Work request completed successfully. +* +* IB_WCS_MAD +* The completed work request was associated with a managmenet datagram +* that requires post processing. The MAD will be returned to the user +* through a callback once all post processing has completed. +* +* IB_WCS_LOCAL_LEN_ERR +* Generated for a work request posted to the send queue when the +* total of the data segment lengths exceeds the message length of the +* channel. Generated for a work request posted to the receive queue when +* the total of the data segment lengths is too small for a +* valid incoming message. +* +* IB_WCS_LOCAL_OP_ERR +* An internal QP consistency error was generated while processing this +* work request. This may indicate that the QP was in an incorrect state +* for the requested operation. +* +* IB_WCS_LOCAL_PROTECTION_ERR +* The data segments of the locally posted work request did not refer to +* a valid memory region. The memory may not have been properly +* registered for the requested operation. +* +* IB_WCS_WR_FLUSHED_ERR +* The work request was flushed from the QP before being completed. +* +* IB_WCS_MEM_WINDOW_BIND_ERR +* A memory window bind operation failed due to insufficient access +* rights. +* +* IB_WCS_REM_ACCESS_ERR, +* A protection error was detected at the remote node for a RDMA or atomic +* operation. +* +* IB_WCS_REM_OP_ERR, +* The operation could not be successfully completed at the remote node. +* This may indicate that the remote QP was in an invalid state or +* contained an invalid work request. +* +* IB_WCS_RNR_RETRY_ERR, +* The RNR retry count was exceeded while trying to send this message. +* +* IB_WCS_TIMEOUT_RETRY_ERR +* The local transport timeout counter expired while trying to send this +* message. +* +* IB_WCS_REM_INVALID_REQ_ERR, +* The remote node detected an invalid message on the channel. This error +* is usually a result of one of the following: +* - The operation was not supported on receive queue. +* - There was insufficient buffers to receive a new RDMA request. +* - There was insufficient buffers to receive a new atomic operation. +* - An RDMA request was larger than 2^31 bytes. +* +* IB_WCS_UNMATCHED_RESPONSE +* A response MAD was received for which there was no matching send. The +* send operation may have been canceled by the user or may have timed +* out. +* +* IB_WCS_CANCELED +* The completed work request was canceled by the user. +*****/ + + + +/****f* IBA Base: Types/ib_get_wc_status_str +* NAME +* ib_get_wc_status_str +* +* DESCRIPTION +* Returns a string for the specified work completion status. +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_wc_status_str( + IN ib_wc_status_t wc_status ); +/* +* PARAMETERS +* wc_status +* [in] work completion status value +* +* RETURN VALUES +* Pointer to the work completion status description string. +* +* NOTES +* +* SEE ALSO +*********/ + + +/****d* Access Layer/ib_wc_type_t +* NAME +* ib_wc_type_t +* +* DESCRIPTION +* Indicates the type of work completion. +* +* SYNOPSIS +*/ +typedef enum _ib_wc_type_t +{ + IB_WC_SEND, + IB_WC_RDMA_WRITE, + IB_WC_RECV, + IB_WC_RDMA_READ, + IB_WC_MW_BIND, + IB_WC_FETCH_ADD, + IB_WC_COMPARE_SWAP, + IB_WC_RECV_RDMA_WRITE, + IB_WC_UNKNOWN + +} ib_wc_type_t; +/*****/ + + +/****f* IBA Base: Types/ib_get_wc_type_str +* NAME +* ib_get_wc_type_str +* +* DESCRIPTION +* Returns a string for the specified work completion type. +* +* SYNOPSIS +*/ +AL_EXPORT const char* AL_API +ib_get_wc_type_str( + IN ib_wc_type_t wc_type ); +/* +* PARAMETERS +* wc_type +* [in] work completion type value +* +* RETURN VALUES +* Pointer to the work completion type description string. +* +* NOTES +* +* SEE ALSO +*********/ + + +/****d* Access Layer/ib_recv_opt_t +* NAME +* ib_recv_opt_t +* +* DESCRIPTION +* Indicates optional fields valid in a receive work completion. +* +* SYNOPSIS +*/ +typedef uint32_t ib_recv_opt_t; +#define IB_RECV_OPT_IMMEDIATE 0x00000001 +#define IB_RECV_OPT_FORWARD 0x00000002 +#define IB_RECV_OPT_GRH_VALID 0x00000004 +#define IB_RECV_OPT_VEND_MASK 0xFFFF0000 +/* +* VALUES +* IB_RECV_OPT_IMMEDIATE +* Indicates that immediate data is valid for this work completion. +* +* IB_RECV_OPT_FORWARD +* Indicates that the received trap should be forwarded to the SM. +* +* IB_RECV_OPT_GRH_VALID +* Indicates presence of the global route header. When set, the first +* 40 bytes received are the GRH. +* +* IB_RECV_OPT_VEND_MASK +* This mask indicates bits reserved in the receive options that may be +* used by the verbs provider to indicate vendor specific options. Bits +* set in this area of the receive options are ignored by the Access Layer, +* but may have specific meaning to the underlying VPD. +*****/ + + +/****s* Access Layer/ib_wc_t +* NAME +* ib_wc_t +* +* DESCRIPTION +* Work completion information. +* +* SYNOPSIS +*/ +typedef struct _ib_wc +{ + struct _ib_wc* __ptr64 p_next; + uint64_t wr_id; + ib_wc_type_t wc_type; + + uint32_t length; + ib_wc_status_t status; + uint64_t vendor_specific; + + union _wc_recv + { + struct _wc_conn + { + ib_recv_opt_t recv_opt; + ib_net32_t immediate_data; + + } conn; + + struct _wc_ud + { + ib_recv_opt_t recv_opt; + ib_net32_t immediate_data; + ib_net32_t remote_qp; + uint16_t pkey_index; + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + + } ud; + + struct _wc_raw_ipv6 + { + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + + } raw_ipv6; + + struct _wc_raw_ether + { + ib_net16_t remote_lid; + uint8_t remote_sl; + uint8_t path_bits; + ib_net16_t ether_type; + + } raw_ether; + + } recv; + +} ib_wc_t; +/* +* FIELDS +* p_next +* A pointer used to chain work completions. This permits multiple +* work completions to be retrieved from a completion queue through a +* single function call. This value is set to NULL to mark the end of +* the chain. +* +* wr_id +* The 64-bit work request identifier that was specified when posting the +* work request. +* +* wc_type +* Indicates the type of work completion. +* +* length +* The total length of the data sent or received with the work request. +* +* status +* The result of the work request. +* +* vendor_specific +* HCA vendor specific information returned as part of the completion. +* +* recv.conn.recv_opt +* Indicates optional fields valid as part of a work request that +* completed on a connected (reliable or unreliable) queue pair. +* +* recv.conn.immediate_data +* 32-bit field received as part of an inbound message on a connected +* queue pair. This field is only valid if the recv_opt flag +* IB_RECV_OPT_IMMEDIATE has been set. +* +* recv.ud.recv_opt +* Indicates optional fields valid as part of a work request that +* completed on an unreliable datagram queue pair. +* +* recv.ud.immediate_data +* 32-bit field received as part of an inbound message on a unreliable +* datagram queue pair. This field is only valid if the recv_opt flag +* IB_RECV_OPT_IMMEDIATE has been set. +* +* recv.ud.remote_qp +* Identifies the source queue pair of a received datagram. +* +* recv.ud.pkey_index +* The pkey index of the source queue pair. This is valid only for +* IB_QPT_QP1 and IB_QPT_QP1_ALIAS QP types. +* +* recv.ud.remote_lid +* The source LID of the received datagram. +* +* recv.ud.remote_sl +* The service level used by the source of the received datagram. +* +* recv.ud.path_bits +* path bits... +* +* recv.raw_ipv6.remote_lid +* The source LID of the received message. +* +* recv.raw_ipv6.remote_sl +* The service level used by the source of the received message. +* +* recv.raw_ipv6.path_bits +* path bits... +* +* recv.raw_ether.remote_lid +* The source LID of the received message. +* +* recv.raw_ether.remote_sl +* The service level used by the source of the received message. +* +* recv.raw_ether.path_bits +* path bits... +* +* recv.raw_ether.ether_type +* ether type... +* NOTES +* When the work request completes with error, the only values that the +* consumer can depend on are the wr_id field, and the status of the +* operation. +* +* If the consumer is using the same CQ for completions from more than +* one type of QP (i.e Reliable Connected, Datagram etc), then the consumer +* must have additional information to decide what fields of the union are +* valid. +* SEE ALSO +* ib_wc_type_t, ib_qp_type_t, ib_wc_status_t, ib_recv_opt_t +*****/ + + +/****s* Access Layer/ib_mr_create_t +* NAME +* ib_mr_create_t +* +* DESCRIPTION +* Information required to create a registered memory region. +* +* SYNOPSIS +*/ +typedef struct _ib_mr_create +{ + void* __ptr64 vaddr; + uint64_t length; + ib_access_t access_ctrl; + +} ib_mr_create_t; +/* +* FIELDS +* vaddr +* Starting virtual address of the region being registered. +* +* length +* Length of the buffer to register. +* +* access_ctrl +* Access rights of the registered region. +* +* SEE ALSO +* ib_access_t +*****/ + + +/****s* Access Layer/ib_phys_range_t +* NAME +* ib_phys_range_t +* +* DESCRIPTION +* Information describing a physical memory range. +* +* SYNOPSIS +*/ +typedef struct _ib_phys_range +{ + uint64_t base_addr; + uint64_t size; + +} ib_phys_range_t; +/* +* FIELDS +* base_addr +* Physical address of the base of the memory range. +* +* size +* size, in bytes, of the memory range. +* +* NOTES +* The base address must be start and end on an HCA-supported page boundary. +* +* SEE ALSO +* ib_phys_create_t +*********/ + + +/****s* Access Layer/ib_phys_create_t +* NAME +* ib_phys_create_t +* +* DESCRIPTION +* Information required to create a physical memory region. +* +* SYNOPSIS +*/ +typedef struct _ib_phys_create +{ + uint64_t length; + uint32_t num_ranges; + ib_phys_range_t* __ptr64 range_array; + uint32_t buf_offset; + uint32_t hca_page_size; + ib_access_t access_ctrl; + +} ib_phys_create_t; +/* +* FIELDS +* length +* The length of the memory region in bytes. +* +* num_ranges +* Number of ib_phys_range structures listed in the specified range array. +* +* range_array +* An array of ib_phys_range structures to be registered as a single memory +* region. +* +* buf_offset +* The offset into the first physical memory range of the specified memory +* region on which to start the virtual address. +* +* hca_page_size +* The HCA page size to use to register the memory. +* +* access_ctrl +* Access rights of the registered region. +* +* SEE ALSO +* ib_access_t +*****/ + + +/****s* Access Layer/ib_mr_attr_t +* NAME +* ib_mr_attr_t +* +* DESCRIPTION +* Attributes of a registered memory region. +* +* SYNOPSIS +*/ +typedef struct _ib_mr_attr +{ + ib_pd_handle_t h_pd; + uint64_t local_lb; + uint64_t local_ub; + uint64_t remote_lb; + uint64_t remote_ub; + ib_access_t access_ctrl; + net32_t lkey; + net32_t rkey; + +} ib_mr_attr_t; +/* +* DESCRIPTION +* h_pd +* Handle to the protection domain for this memory region. +* +* local_lb +* The virtual address of the lower bound of protection for local +* memory access. This is always a 64-bit quantity to support registering +* more than 4GB of memory on 32-bit systems with PAE. +* +* local_ub +* The virtual address of the upper bound of protection for local +* memory access. This is always a 64-bit quantity to support registering +* more than 4GB of memory on 32-bit systems with PAE. +* +* remote_lb +* The virtual address of the lower bound of protection for remote +* memory access. This is always a 64-bit quantity to support registering +* more than 4GB of memory on 32-bit systems with PAE. +* +* remote_ub +* The virtual address of the upper bound of protection for remote +* memory access. This is always a 64-bit quantity to support registering +* more than 4GB of memory on 32-bit systems with PAE. +* +* access_ctrl +* Access rights for the specified memory region. +* +* lkey +* The lkey associated with this memory region. +* +* rkey +* The rkey associated with this memory region. +* +* NOTES +* The remote_lb, remote_ub, and rkey are only valid if remote memory access +* is enabled for this memory region. +* +* SEE ALSO +* ib_access_t +*****/ + + +/****d* Access Layer/ib_ca_mod_t +* NAME +* ib_ca_mod_t -- Modify port attributes and error counters +* +* DESCRIPTION +* Specifies modifications to the port attributes of a channel adapter. +* +* SYNOPSIS +*/ +typedef uint32_t ib_ca_mod_t; +#define IB_CA_MOD_IS_CM_SUPPORTED 0x00000001 +#define IB_CA_MOD_IS_SNMP_SUPPORTED 0x00000002 +#define IB_CA_MOD_IS_DEV_MGMT_SUPPORTED 0x00000004 +#define IB_CA_MOD_IS_VEND_SUPPORTED 0x00000008 +#define IB_CA_MOD_IS_SM 0x00000010 +#define IB_CA_MOD_IS_SM_DISABLED 0x00000020 +#define IB_CA_MOD_QKEY_CTR 0x00000040 +#define IB_CA_MOD_PKEY_CTR 0x00000080 +#define IB_CA_MOD_IS_NOTICE_SUPPORTED 0x00000100 +#define IB_CA_MOD_IS_TRAP_SUPPORTED 0x00000200 +#define IB_CA_MOD_IS_APM_SUPPORTED 0x00000400 +#define IB_CA_MOD_IS_SLMAP_SUPPORTED 0x00000800 +#define IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED 0x00001000 +#define IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED 0x00002000 +#define IB_CA_MOD_IS_SYSGUID_SUPPORTED 0x00004000 +#define IB_CA_MOD_IS_DR_NOTICE_SUPPORTED 0x00008000 +#define IB_CA_MOD_IS_BOOT_MGMT_SUPPORTED 0x00010000 +#define IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED 0x00020000 +#define IB_CA_MOD_IS_REINIT_SUPORTED 0x00040000 +#define IB_CA_MOD_IS_LEDINFO_SUPPORTED 0x00080000 +#define IB_CA_MOD_SHUTDOWN_PORT 0x00100000 +#define IB_CA_MOD_INIT_TYPE_VALUE 0x00200000 +#define IB_CA_MOD_SYSTEM_IMAGE_GUID 0x00400000 +/* +* VALUES +* IB_CA_MOD_IS_CM_SUPPORTED +* Indicates if there is a communication manager accessible through +* the port. +* +* IB_CA_MOD_IS_SNMP_SUPPORTED +* Indicates if there is an SNMP agent accessible through the port. +* +* IB_CA_MOD_IS_DEV_MGMT_SUPPORTED +* Indicates if there is a device management agent accessible through +* the port. +* +* IB_CA_MOD_IS_VEND_SUPPORTED +* Indicates if there is a vendor supported agent accessible through +* the port. +* +* IB_CA_MOD_IS_SM +* Indicates if there is a subnet manager accessible through +* the port. +* +* IB_CA_MOD_IS_SM_DISABLED +* Indicates if the port has been disabled for configuration by the subnet +* manager. +* +* IB_CA_MOD_QKEY_CTR +* Used to reset the qkey violation counter associated with the port. +* +* IB_CA_MOD_PKEY_CTR +* Used to reset the pkey violation counter associated with the port. +* +* IB_CA_MOD_IS_NOTICE_SUPPORTED +* Indicates that this CA supports ability to generate Notices for +* Port State changes. (only applicable to switches) +* +* IB_CA_MOD_IS_TRAP_SUPPORTED +* Indicates that this management port supports ability to generate +* trap messages. (only applicable to switches) +* +* IB_CA_MOD_IS_APM_SUPPORTED +* Indicates that this port is capable of performing Automatic Migration. +* +* IB_CA_MOD_IS_SLMAP_SUPPORTED +* Indicates this port supports SLMAP capability. +* +* IB_CA_MOD_IS_PKEY_NVRAM_SUPPORTED +* Indicates that PKEY is supported in NVRAM +* +* IB_CA_MOD_IS_MKEY_NVRAM_SUPPORTED +* Indicates that MKEY is supported in NVRAM +* +* IB_CA_MOD_IS_SYSGUID_SUPPORTED +* Indicates System Image GUID support. +* +* IB_CA_MOD_IS_DR_NOTICE_SUPPORTED +* Indicate support for generating Direct Routed Notices +* +* IB_CA_MOD_IS_BOOT_MGMT_SUPPORTED +* Indicates support for Boot Management +* +* IB_CA_MOD_IS_CAPM_NOTICE_SUPPORTED +* Indicates capability to generate notices for changes to CAPMASK +* +* IB_CA_MOD_IS_REINIT_SUPORTED +* Indicates type of node init supported. Refer to Chapter 14 for +* Initialization actions. +* +* IB_CA_MOD_IS_LEDINFO_SUPPORTED +* Indicates support for LED info. +* +* IB_CA_MOD_SHUTDOWN_PORT +* Used to modify the port active indicator. +* +* IB_CA_MOD_INIT_TYPE_VALUE +* Used to modify the init_type value for the port. +* +* IB_CA_MOD_SYSTEM_IMAGE_GUID +* Used to modify the system image GUID for the port. +*****/ + + +/****d* Access Layer/ib_mr_mod_t +* NAME +* ib_mr_mod_t +* +* DESCRIPTION +* Mask used to specify which attributes of a registered memory region are +* being modified. +* +* SYNOPSIS +*/ +typedef uint32_t ib_mr_mod_t; +#define IB_MR_MOD_ADDR 0x00000001 +#define IB_MR_MOD_PD 0x00000002 +#define IB_MR_MOD_ACCESS 0x00000004 +/* +* PARAMETERS +* IB_MEM_MOD_ADDR +* The address of the memory region is being modified. +* +* IB_MEM_MOD_PD +* The protection domain associated with the memory region is being +* modified. +* +* IB_MEM_MOD_ACCESS +* The access rights the memory region are being modified. +*****/ + +/****d* IBA Base: Constants/IB_SMINFO_STATE_INIT +* NAME +* IB_SMINFO_STATE_INIT +* +* DESCRIPTION +* Encoded state value used in the SMInfo attribute. +* +* SOURCE +*/ +#define IB_SMINFO_STATE_INIT 4 +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_HANDOVER +* NAME +* IB_SMINFO_ATTR_MOD_HANDOVER +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_HANDOVER (CL_NTOH32(0x000001)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_ACKNOWLEDGE +* NAME +* IB_SMINFO_ATTR_MOD_ACKNOWLEDGE +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_ACKNOWLEDGE (CL_NTOH32(0x000002)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_DISABLE +* NAME +* IB_SMINFO_ATTR_MOD_DISABLE +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_DISABLE (CL_NTOH32(0x000003)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_STANDBY +* NAME +* IB_SMINFO_ATTR_MOD_STANDBY +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_STANDBY (CL_NTOH32(0x000004)) +/**********/ + +/****d* IBA Base: Constants/IB_SMINFO_ATTR_MOD_DISCOVER +* NAME +* IB_SMINFO_ATTR_MOD_DISCOVER +* +* DESCRIPTION +* Encoded attribute modifier value used on SubnSet(SMInfo) SMPs. +* +* SOURCE +*/ +#define IB_SMINFO_ATTR_MOD_DISCOVER (CL_NTOH32(0x000005)) +/**********/ + +/****s* Access Layer/ib_ci_op_t +* NAME +* ib_ci_op_t +* +* DESCRIPTION +* A structure used for vendor specific CA interface communication. +* +* SYNOPSIS +*/ +typedef struct _ib_ci_op +{ + IN uint32_t command; + IN uint32_t buf_size; + IN uint32_t buf_info; + IN OUT int32_t status; + OUT uint32_t num_bytes_ret; + IN OUT void* __ptr64 p_buf OPTIONAL; + +} ib_ci_op_t; +/* +* FIELDS +* command +* A command code that is understood by the verbs provider. +* +* status +* The completion status from the verbs provider. This field should be +* initialize to indicate an error to allow detection and cleanup in +* case a communication error occurs between user-mode and kernel-mode. +* +* buf_size +* The size of the buffer in bytes. +* +* buf_info +* Additional buffer information +* +* p_buf +* A reference to a buffer containing vendor specific data. The verbs +* provider must not access pointers in the p_buf between user-mode and +* kernel-mode. Any pointers embedded in the p_buf are invalidated by +* the user-mode/kernel-mode transition. +* +* num_bytes_ret +* The size in bytes of the vendor specific data returned in the buffer. +* This field is set by the verbs provider. The verbs provider should +* verify that the buffer size is sufficient to hold the data being +* returned. +* +* NOTES +* This structure is provided to allow the exchange of vendor specific +* data between the originator and the verbs provider. Users of this +* structure are expected to know the format of data in the p_buf based +* on the structure command field or the usage context. +*****/ + + +#endif // __IB_TYPES_EXTENDED_H__ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/cl_dispatcher.h b/branches/Ndi/ulp/opensm/user/include/opensm/cl_dispatcher.h new file mode 100644 index 00000000..7b5a3ec1 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/cl_dispatcher.h @@ -0,0 +1,673 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of dispatcher abstraction. + * + * Environment: + * All + * + * $Revision: 1.4 $ + */ + + +#ifndef _CL_DISPATCHER_H_ +#define _CL_DISPATCHER_H_ + + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* Component Library/Dispatcher +* NAME +* Dispatcher +* +* DESCRIPTION +* The Dispatcher provides a facility for message routing to +* asynchronous worker threads. +* +* The Dispatcher functions operate on a cl_dispatcher_t structure +* which should be treated as opaque and should be manipulated +* only through the provided functions. +* +* SEE ALSO +* Structures: +* cl_dispatcher_t +* +* Initialization/Destruction: +* cl_disp_construct, cl_disp_init, cl_disp_shutdown, cl_disp_destroy +* +* Manipulation: +* cl_disp_post, cl_disp_reset, cl_disp_wait_on +*********/ + + +/****s* Component Library: Dispatcher/cl_disp_msgid_t +* NAME +* cl_disp_msgid_t +* +* DESCRIPTION +* Defines the type of dispatcher messages. +* +* SYNOPSIS +*/ +typedef uint32_t cl_disp_msgid_t; +/**********/ + + +/****s* Component Library: Dispatcher/CL_DISP_MSGID_NONE +* NAME +* CL_DISP_MSGID_NONE +* +* DESCRIPTION +* Defines a message value that means "no message". +* This value is used during registration by Dispatcher clients +* that do not wish to receive messages. +* +* No Dispatcher message is allowed to have this value. +* +* SYNOPSIS +*/ +#define CL_DISP_MSGID_NONE 0xFFFFFFFF +/**********/ + +/****s* Component Library: Dispatcher/CL_DISP_INVALID_HANDLE +* NAME +* CL_DISP_INVALID_HANDLE +* +* DESCRIPTION +* Defines the value of an invalid Dispatcher registration handle. +* +* SYNOPSIS +*/ +#define CL_DISP_INVALID_HANDLE ((cl_disp_reg_handle_t)0) +/*********/ + +/****f* Component Library: Dispatcher/cl_pfn_msgrcv_cb_t +* NAME +* cl_pfn_msgrcv_cb_t +* +* DESCRIPTION +* This typedef defines the prototype for client functions invoked +* by the Dispatcher. The Dispatcher calls the corresponding +* client function when delivering a message to the client. +* +* The client function must be reentrant if the user creates a +* Dispatcher with more than one worker thread. +* +* SYNOPSIS +*/ +typedef void +(*cl_pfn_msgrcv_cb_t)( + IN void* context, + IN void* p_data ); +/* +* PARAMETERS +* context +* [in] Client specific context specified in a call to +* cl_disp_register +* +* p_data +* [in] Pointer to the client specific data payload +* of this message. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This typedef provides a function prototype reference for +* the function provided by Dispatcher clients as a parameter +* to the cl_disp_register function. +* +* SEE ALSO +* Dispatcher, cl_disp_register +*********/ + + +/****f* Component Library: Dispatcher/cl_pfn_msgdone_cb_t +* NAME +* cl_pfn_msgdone_cb_t +* +* DESCRIPTION +* This typedef defines the prototype for client functions invoked +* by the Dispatcher. The Dispatcher calls the corresponding +* client function after completing delivery of a message. +* +* The client function must be reentrant if the user creates a +* Dispatcher with more than one worker thread. +* +* SYNOPSIS +*/ +typedef void +(*cl_pfn_msgdone_cb_t)( + IN void* context, + IN void* p_data ); +/* +* PARAMETERS +* context +* [in] Client specific context specified in a call to +* cl_disp_post +* +* p_data +* [in] Pointer to the client specific data payload +* of this message. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This typedef provides a function prototype reference for +* the function provided by Dispatcher clients as a parameter +* to the cl_disp_post function. +* +* SEE ALSO +* Dispatcher, cl_disp_post +*********/ + + +/****s* Component Library: Dispatcher/cl_dispatcher_t +* NAME +* cl_dispatcher_t +* +* DESCRIPTION +* Dispatcher structure. +* +* The Dispatcher is thread safe. +* +* The cl_dispatcher_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_dispatcher +{ + cl_spinlock_t lock; + cl_ptr_vector_t reg_vec; + cl_qlist_t reg_list; + cl_thread_pool_t worker_threads; + cl_qlist_t msg_fifo; + cl_qpool_t msg_pool; + uint64_t last_msg_queue_time_us; +} cl_dispatcher_t; +/* +* FIELDS +* reg_vec +* Vector of registration info objects. Indexed by message msg_id. +* +* lock +* Spinlock to guard internal structures. +* +* msg_fifo +* FIFO of messages being processed by the Dispatcher. New +* messages are posted to the tail of the FIFO. Worker threads +* pull messages from the front. +* +* worker_threads +* Thread pool of worker threads to dispose of posted messages. +* +* msg_pool +* Pool of message objects to be processed through the FIFO. +* +* reg_count +* Count of the number of registrants. +* +* state +* Indicates the state of the object. +* +* last_msg_queue_time_us +* The time that the last message spent in the Q in usec +* +* SEE ALSO +* Dispatcher +*********/ + + +/****s* Component Library: Dispatcher/cl_disp_reg_info_t +* NAME +* cl_disp_reg_info_t +* +* DESCRIPTION +* Defines the dispatcher registration object structure. +* +* The cl_disp_reg_info_t structure is for internal use by the +* Dispatcher only. +* +* SYNOPSIS +*/ +typedef struct _cl_disp_reg_info +{ + cl_list_item_t list_item; + cl_pfn_msgrcv_cb_t pfn_rcv_callback; + const void *context; + atomic32_t ref_cnt; + cl_disp_msgid_t msg_id; + cl_dispatcher_t *p_disp; + +} cl_disp_reg_info_t; +/* +* FIELDS +* pfn_rcv_callback +* Client's message receive callback. +* +* context +* Client's context for message receive callback. +* +* rcv_thread_count +* Number of threads currently in the receive callback. +* +* msg_done_thread_count +* Number of threads currently in the message done callback. +* +* state +* State of this registration object. +* DISP_REGSTATE_INIT: initialized and inactive +* DISP_REGSTATE_ACTIVE: in active use +* DISP_REGSTATE_UNREGPEND: unregistration is pending +* +* msg_id +* Dispatcher message msg_id value for this registration object. +* +* p_disp +* Pointer to parent Dispatcher. +* +* SEE ALSO +*********/ + + +/****s* Component Library: Dispatcher/cl_disp_msg_t +* NAME +* cl_disp_msg_t +* +* DESCRIPTION +* Defines the dispatcher message structure. +* +* The cl_disp_msg_t structure is for internal use by the +* Dispatcher only. +* +* SYNOPSIS +*/ +typedef struct _cl_disp_msg +{ + cl_pool_item_t item; + const void *p_data; + cl_disp_reg_info_t *p_src_reg; + cl_disp_reg_info_t *p_dest_reg; + cl_pfn_msgdone_cb_t pfn_xmt_callback; + uint64_t in_time; + const void *context; +} cl_disp_msg_t; +/* +* FIELDS +* item +* List & Pool linkage. Must be first element in the structure!! +* +* msg_id +* The message's numberic ID value. +* +* p_data +* Pointer to the data payload for this message. The payload +* is opaque to the Dispatcher. +* +* p_reg_info +* Pointer to the registration info of the sender. +* +* pfn_xmt_callback +* Client's message done callback. +* +* in_time +* The absolute time the message was inserted into the queue +* +* context +* Client's message done callback context. +* +* SEE ALSO +*********/ + + +/****s* Component Library: Dispatcher/cl_disp_reg_info_t +* NAME +* cl_disp_reg_info_t +* +* DESCRIPTION +* Defines the Dispatcher registration handle. This handle +* should be treated as opaque by the client. +* +* SYNOPSIS +*/ +typedef const struct _cl_disp_reg_info *cl_disp_reg_handle_t; +/**********/ + + +/****f* Component Library: Dispatcher/cl_disp_construct +* NAME +* cl_disp_construct +* +* DESCRIPTION +* This function constructs a Dispatcher object. +* +* SYNOPSIS +*/ +void +cl_disp_construct( + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_disp_init and cl_disp_destroy. +* +* SEE ALSO +* Dispatcher, cl_disp_init, cl_disp_destroy +*********/ + + +/****f* Component Library: Dispatcher/cl_disp_init +* NAME +* cl_disp_init +* +* DESCRIPTION +* This function initializes a Dispatcher object. +* +* SYNOPSIS +*/ +cl_status_t +cl_disp_init( + IN cl_dispatcher_t* const p_disp, + IN const uint32_t thread_count, + IN const char* const name ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* thread_count +* [in] The number of worker threads to create in this Dispatcher. +* A value of 0 causes the Dispatcher to create one worker thread +* per CPU in the system. When the Dispatcher is created with +* only one thread, the Dispatcher guarantees to deliver posted +* messages in order. When the Dispatcher is created with more +* than one thread, messages may be delivered out of order. +* +* name +* [in] Name to associate with the threads. The name may be up to 16 +* characters, including a terminating null character. All threads +* created in the Dispatcher have the same name. +* +* RETURN VALUE +* CL_SUCCESS if the operation is successful. +* +* SEE ALSO +* Dispatcher, cl_disp_destoy, cl_disp_register, cl_disp_unregister, +* cl_disp_post +*********/ + +/****f* Component Library: Dispatcher/cl_disp_shutdown +* NAME +* cl_disp_shutdown +* +* DESCRIPTION +* This function shutdown a Dispatcher object. So it unreg all messages and +* clears the fifo and waits for the threads to exit +* +* SYNOPSIS +*/ +void +cl_disp_shutdown( + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function does not returns until all worker threads +* have exited client callback functions and been successfully +* shutdowned. +* +* SEE ALSO +* Dispatcher, cl_disp_construct, cl_disp_init +*********/ + +/****f* Component Library: Dispatcher/cl_disp_destroy +* NAME +* cl_disp_destroy +* +* DESCRIPTION +* This function destroys a Dispatcher object. +* +* SYNOPSIS +*/ +void +cl_disp_destroy( + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* Dispatcher, cl_disp_construct, cl_disp_init +*********/ + + +/****f* Component Library: Dispatcher/cl_disp_register +* NAME +* cl_disp_register +* +* DESCRIPTION +* This function registers a client with a Dispatcher object. +* +* SYNOPSIS +*/ +cl_disp_reg_handle_t +cl_disp_register( + IN cl_dispatcher_t* const p_disp, + IN const cl_disp_msgid_t msg_id, + IN cl_pfn_msgrcv_cb_t pfn_callback OPTIONAL, + IN const void* const context ); +/* +* PARAMETERS +* p_disp +* [in] Pointer to a Dispatcher. +* +* msg_id +* [in] Numberic message ID for which the client is registering. +* If the client does not wish to receive any messages, +* (a send-only client) then the caller should set this value +* to CL_DISP_MSGID_NONE. For efficiency, numeric message msg_id +* values should start with 0 and should be contiguous, or nearly so. +* +* pfn_callback +* [in] Message receive callback. The Dispatcher calls this +* function after receiving a posted message with the +* appropriate message msg_id value. Send-only clients may specify +* NULL for this value. +* +* context +* [in] Client context value passed to the cl_pfn_msgrcv_cb_t +* function. +* +* RETURN VALUE +* On success a Dispatcher registration handle. +* CL_CL_DISP_INVALID_HANDLE otherwise. +* +* SEE ALSO +* Dispatcher, cl_disp_unregister, cl_disp_post +*********/ + + +/****f* Component Library: Dispatcher/cl_disp_unregister +* NAME +* cl_disp_unregister +* +* DESCRIPTION +* This function unregisters a client from a Dispatcher. +* +* SYNOPSIS +*/ +void +cl_disp_unregister( + IN const cl_disp_reg_handle_t handle ); +/* +* PARAMETERS +* handle +* [in] cl_disp_reg_handle_t value return by cl_disp_register. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function will not return until worker threads have exited +* the callback functions for this client. Do not invoke this +* function from a callback. +* +* SEE ALSO +* Dispatcher, cl_disp_register +*********/ + + +/****f* Component Library: Dispatcher/cl_disp_post +* NAME +* cl_disp_post +* +* DESCRIPTION +* This function posts a message to a Dispatcher object. +* +* SYNOPSIS +*/ +cl_status_t +cl_disp_post( + IN const cl_disp_reg_handle_t handle, + IN const cl_disp_msgid_t msg_id, + IN const void* const p_data, + IN cl_pfn_msgdone_cb_t pfn_callback OPTIONAL, + IN const void* const context ); +/* +* PARAMETERS +* handle +* [in] cl_disp_reg_handle_t value return by cl_disp_register. +* +* msg_id +* [in] Numeric message msg_id value associated with this message. +* +* p_data +* [in] Data payload for this message. +* +* pfn_callback +* [in] Pointer to a cl_pfn_msgdone_cb_t function. +* The Dispatcher calls this function after the message has been +* processed by the recipient. +* The caller may pass NULL for this value, which indicates no +* message done callback is necessary. +* +* context +* [in] Client context value passed to the cl_pfn_msgdone_cb_t +* function. +* +* RETURN VALUE +* CL_SUCCESS if the message was successfully queued in the Dispatcher. +* +* NOTES +* The caller must not modify the memory pointed to by p_data until +* the Dispatcher call the pfn_callback function. +* +* SEE ALSO +* Dispatcher +*********/ + +/****f* Component Library: Dispatcher/cl_disp_get_queue_status +* NAME +* cl_disp_get_queue_status +* +* DESCRIPTION +* This function posts a message to a Dispatcher object. +* +* SYNOPSIS +*/ +void +cl_disp_get_queue_status( + IN const cl_disp_reg_handle_t handle, + OUT uint32_t *p_num_queued_msgs, + OUT uint64_t *p_last_msg_queue_time_ms); +/* +* PARAMETERS +* handle +* [in] cl_disp_reg_handle_t value return by cl_disp_register. +* +* p_last_msg_queue_time_ms +* [out] pointer to a variable to hold the time the last popped up message +* spent in the queue +* +* p_num_queued_msgs +* [out] number of messages in the queue +* +* RETURN VALUE +* Thr time the last popped up message stayed in the queue, in msec +* +* NOTES +* Extarnel Locking is not required. +* +* SEE ALSO +* Dispatcher +*********/ + +END_C_DECLS + +#endif /* !defined(_CL_DISPATCHER_H_) */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/cl_event_wheel.h b/branches/Ndi/ulp/opensm/user/include/opensm/cl_event_wheel.h new file mode 100644 index 00000000..559f2aaa --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/cl_event_wheel.h @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of event wheel abstraction. + * + * Environment: + * All + * + * $Revision: 1.4 $ + */ + + +#ifndef _CL_EVENT_WHEEL_H_ +#define _CL_EVENT_WHEEL_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* Component Library/Event_Wheel +* NAME +* Event_Wheel +* +* DESCRIPTION +* The Event_Wheel provides a facility for registering delayed events +* and getting called once they timeout. +* +* The Event_Wheel functions operate on a cl_event_wheel_t structure +* which should be treated as opaque and should be manipulated +* only through the provided functions. +* +* SEE ALSO +* Structures: +* cl_event_wheel_t +* +* Initialization/Destruction: +* cl_event_wheel_construct, cl_event_wheel_init, cl_event_wheel_destroy +* +* Manipulation: +* cl_event_wheel_reg, cl_event_wheel_unreg +* +*********/ + +/****f* Component Library: Event_Wheel/cl_pfn_event_aged_cb_t +* NAME +* cl_pfn_event_aged_cb_t +* +* DESCRIPTION +* This typedef defines the prototype for client functions invoked +* by the Event_Wheel. The Event_Wheel calls the corresponding +* client function when the specific item has aged. +* +* SYNOPSIS +*/ +typedef uint64_t +(*cl_pfn_event_aged_cb_t)( + IN uint64_t key, + IN uint32_t num_regs, + IN void* context); +/* +* PARAMETERS +* key +* [in] The key used for registering the item in the call to +* cl_event_wheel_reg +* +* num_regs +* [in] The number of times this event was registered (pushed in time). +* +* context +* [in] Client specific context specified in a call to +* cl_event_wheel_reg +* +* RETURN VALUE +* This function returns the abosolute time the event should fire in [usec]. +* If lower then current time means the event should be unregistered +* immediatly. +* +* NOTES +* This typedef provides a function prototype reference for +* the function provided by Event_Wheel clients as a parameter +* to the cl_event_wheel_reg function. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_reg +*********/ + +/****s* Component Library: Event_Wheel/cl_event_wheel_t +* NAME +* cl_event_wheel_t +* +* DESCRIPTION +* Event_Wheel structure. +* +* The Event_Wheel is thread safe. +* +* The cl_event_wheel_t structure should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _cl_event_wheel +{ + cl_spinlock_t lock; + cl_spinlock_t *p_external_lock; + + cl_qmap_t events_map; + boolean_t closing; + cl_qlist_t events_wheel; + cl_timer_t timer; + osm_log_t *p_log; +} cl_event_wheel_t; +/* +* FIELDS +* lock +* Spinlock to guard internal structures. +* +* p_external_lock +* Reference to external spinlock to guard internal structures +* if the event wheel is part of a larger object protected by its own lock +* +* events_map +* A Map holding all registered event items by their key. +* +* closing +* A flag indicating the event wheel is closing. This means that +* callbacks that are called when closing == TRUE should just be ignored. +* +* events_wheel +* A list of the events sorted by expiration time. +* +* timer +* The timer scheduling event time propagation. +* +* p_log +* Pointer to opensm log object. +* +* SEE ALSO +* Event_Wheel +*********/ + +/****s* Component Library: Event_Wheel/cl_event_wheel_reg_info_t +* NAME +* cl_event_wheel_reg_info_t +* +* DESCRIPTION +* Defines the event_wheel registration object structure. +* +* The cl_event_wheel_reg_info_t structure is for internal use by the +* Event_Wheel only. +* +* SYNOPSIS +*/ +typedef struct _cl_event_wheel_reg_info +{ + cl_map_item_t map_item; + cl_list_item_t list_item; + uint64_t key; + cl_pfn_event_aged_cb_t pfn_aged_callback; + uint64_t aging_time; + uint32_t num_regs; + void *context; + cl_event_wheel_t *p_event_wheel; +} cl_event_wheel_reg_info_t; +/* +* FIELDS +* map_item +* The map item of this event +* +* list_item +* The sorted by aging time list item +* +* key +* The key by which one can find the event +* +* pfn_aged_callback +* The clients Event-Aged callback +* +* aging_time +* The delta time [msec] for which the event should age. +* +* num_regs +* The number of times the same event (key) was registered +* +* context +* Client's context for event-aged callback. +* +* p_event_wheel +* Pointer to this event wheel object +* +* SEE ALSO +*********/ + + +/****f* Component Library: Event_Wheel/cl_event_wheel_construct +* NAME +* cl_event_wheel_construct +* +* DESCRIPTION +* This function constructs a Event_Wheel object. +* +* SYNOPSIS +*/ +void +cl_event_wheel_construct( + IN cl_event_wheel_t* const p_event_wheel ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling cl_event_wheel_init and cl_event_wheel_destroy. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_init, cl_event_wheel_destroy +*********/ + + +/****f* Component Library: Event_Wheel/cl_event_wheel_init +* NAME +* cl_event_wheel_init +* +* DESCRIPTION +* This function initializes a Event_Wheel object. +* +* SYNOPSIS +*/ +cl_status_t +cl_event_wheel_init( + IN cl_event_wheel_t* const p_event_wheel, + IN osm_log_t *p_log); + +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* p_log +* [in] Pointer to opensm log object to be used for logging +* +* RETURN VALUE +* CL_SUCCESS if the operation is successful. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_destoy, cl_event_wheel_reg, cl_event_wheel_unreg +* +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_init +* NAME +* cl_event_wheel_init +* +* DESCRIPTION +* This function initializes a Event_Wheel object. +* +* SYNOPSIS +*/ +cl_status_t +cl_event_wheel_init_ex( + IN cl_event_wheel_t* const p_event_wheel, + IN osm_log_t *p_log, + IN cl_spinlock_t *p_external_lock); + +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* p_log +* [in] Pointer to opensm log object to be used for logging +* +* p_external_lock +* [in] Reference to external spinlock to guard internal structures +* if the event wheel is part of a larger object protected by its own lock +* +* RETURN VALUE +* CL_SUCCESS if the operation is successful. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_destoy, cl_event_wheel_reg, cl_event_wheel_unreg +* +*********/ + + +/****f* Component Library: Event_Wheel/cl_event_wheel_destroy +* NAME +* cl_event_wheel_destroy +* +* DESCRIPTION +* This function destroys a Event_Wheel object. +* +* SYNOPSIS +*/ +void +cl_event_wheel_destroy( + IN cl_event_wheel_t* const p_event_wheel ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* This function does not returns until all client callback functions +* been successfully finished. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_construct, cl_event_wheel_init +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_dump +* NAME +* cl_event_wheel_dump +* +* DESCRIPTION +* This function dumps the details of an Event_Whell object. +* +* SYNOPSIS +*/ +void +cl_event_wheel_dump( + IN cl_event_wheel_t* const p_event_wheel ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Note that this function should be called inside a lock of the event wheel! +* It doesn't aquire the lock by itself. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_construct, cl_event_wheel_init +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_reg +* NAME +* cl_event_wheel_reg +* +* DESCRIPTION +* This function registers a client with a Event_Wheel object. +* +* SYNOPSIS +*/ +cl_status_t +cl_event_wheel_reg( + IN cl_event_wheel_t* const p_event_wheel, + IN const uint64_t key, + IN const uint64_t aging_time_usec, + IN cl_pfn_event_aged_cb_t pfn_callback, + IN void* const context ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* key +* [in] The specifc Key by which events are registered. +* +* aging_time_usec +* [in] The absolute time this event should age in usec +* +* pfn_callback +* [in] Event Aging callback. The Event_Wheel calls this +* function after the time the event has registed for has come. +* +* context +* [in] Client context value passed to the cl_pfn_event_aged_cb_t +* function. +* +* RETURN VALUE +* On success a Event_Wheel CL_SUCCESS or CL_ERROR otherwise. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_unreg +*********/ + + +/****f* Component Library: Event_Wheel/cl_event_wheel_unreg +* NAME +* cl_event_wheel_unreg +* +* DESCRIPTION +* This function unregisters a client event from a Event_Wheel. +* +* SYNOPSIS +*/ +void +cl_event_wheel_unreg( + IN cl_event_wheel_t* const p_event_wheel, + IN uint64_t key ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* key +* [in] The key used for registering the event +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* After the event has aged it is automatically removed from +* the event wheel. So it should only be invoked when the need arises +* to remove existing events before they age. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_reg +*********/ + +/****f* Component Library: Event_Wheel/cl_event_wheel_num_regs +* NAME +* cl_event_wheel_num_regs +* +* DESCRIPTION +* This function returns the number of times an event was registered. +* +* SYNOPSIS +*/ +uint32_t +cl_event_wheel_num_regs( + IN cl_event_wheel_t* const p_event_wheel, + IN uint64_t key ); +/* +* PARAMETERS +* p_event_wheel +* [in] Pointer to a Event_Wheel. +* +* key +* [in] The key used for registering the event +* +* RETURN VALUE +* The number of times the event was registered. +* 0 if never registered or eventually aged. +* +* SEE ALSO +* Event_Wheel, cl_event_wheel_reg, cl_event_wheel_unreg +*********/ + +END_C_DECLS + +#endif /* !defined(_CL_EVENT_WHEEL_H_) */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_attrib_req.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_attrib_req.h new file mode 100644 index 00000000..7344b4a2 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_attrib_req.h @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_ATTRIB_REQ_H_ +#define _OSM_ATTRIB_REQ_H_ + +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/* + * Abstract: + * Declaration of the attribute request object. This object + * encapsulates information needed by the generic request controller + * to request an attribute from a node. + * These objects are part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +/****h* OpenSM/Attribute Request +* NAME +* Attribute Request +* +* DESCRIPTION +* The Attribute Request structure encapsulates +* encapsulates information needed by the generic request controller +* to request an attribute from a node. +* +* This structure allows direct access to member variables. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Attribute Request/osm_attrib_req_t +* NAME +* osm_attrib_req_t +* +* DESCRIPTION +* Attribute request structure. +* +* This structure allows direct access to member variables. +* +* SYNOPSIS +*/ +typedef struct _osm_attrib_req +{ + uint16_t attrib_id; + uint32_t attrib_mod; + osm_madw_context_t context; + osm_dr_path_t path; + cl_disp_msgid_t err_msg; + +} osm_attrib_req_t; +/* +* FIELDS +* attrib_id +* Attribute ID for this request. +* +* attrib_mod +* Attribute modifier for this request. +* +* context +* Context to insert in outbound mad wrapper context. +* +* path +* The directed route path to the node. +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_ATTRIB_REQ_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_base.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_base.h new file mode 100644 index 00000000..cc054b3c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_base.h @@ -0,0 +1,811 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Basic OpenSM definitions and structures. + * This object represents an OpenSM "base class". + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.15 $ + */ + +#ifndef _OSM_BASE_H_ +#define _OSM_BASE_H_ + +#ifdef __WIN__ +#include +#define OSM_CDECL __cdecl +#else +#define OSM_CDECL +#endif + +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Constants +* NAME +* Constants +* +* DESCRIPTION +* The following constants are used throughout the OpenSM. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****h* OpenSM/Base +* NAME +* Base +* +* DESCRIPTION +* The Base object encapsulates basic information needed by the +* OpenSM to manage objects. Each OpenSM object includes the +* Base object as the first member. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Base/OSM_DEFAULT_M_KEY +* NAME +* OSM_DEFAULT_M_KEY +* +* DESCRIPTION +* Managment key value used by the OpenSM. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_M_KEY 0 +/********/ + +/****s* OpenSM: Base/OSM_DEFAULT_SM_KEY +* NAME +* OSM_DEFAULT_SM_KEY +* +* DESCRIPTION +* Subnet Manager key value used by the OpenSM. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_SM_KEY 1 +/********/ + +/****s* OpenSM: Base/OSM_DEFAULT_LMC +* NAME +* OSM_DEFAULT_LMC +* +* DESCRIPTION +* Default LMC value used by the OpenSM. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_LMC 0 +/********/ + +/****s* OpenSM: Base/OSM_DEFAULT_MAX_OP_VLS +* NAME +* OSM_DEFAULT_MAX_OP_VLS +* +* DESCRIPTION +* Default Maximal Operational VLs to be initialized on +* the link ports PortInfo by the OpenSM. +* Default value provides backward compatibility. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_MAX_OP_VLS 5 +/********/ + +/****s* OpenSM: Base/OSM_DEFAULT_SL +* NAME +* OSM_DEFAULT_SL +* +* DESCRIPTION +* Default SL value used by the OpenSM. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_SL 0 +/********/ + +/****s* OpenSM: Base/OSM_DEFAULT_SM_PRIORITY +* NAME +* OSM_DEFAULT_SM_PRIORITY +* +* DESCRIPTION +* Default SM priority value used by the OpenSM, +* as defined in the SMInfo attribute. 0 is the lowest priority. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_SM_PRIORITY 1 +/********/ + +/****d* OpenSM: Base/OSM_DEFAULT_TMP_DIR +* NAME +* OSM_DEFAULT_TMP_DIR +* +* DESCRIPTION +* Specifies the default temporary directory for the log file, +* osm-subnet.lst, and other log files. +* +* SYNOPSIS +*/ +#ifdef __WIN__ +#define OSM_DEFAULT_TMP_DIR GetOsmTempPath() +#else +#define OSM_DEFAULT_TMP_DIR "/var/log/" +#endif +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_CACHE_DIR +* NAME +* OSM_DEFAULT_CACHE_DIR +* +* DESCRIPTION +* Specifies the default cache directory for the db files. +* Note that the directory must appear with "/" ("\\" for windows) at the end. +* +* SYNOPSIS +*/ +#ifdef __WIN__ +#define OSM_DEFAULT_CACHE_DIR GetOsmCachePath() +#else +#define OSM_DEFAULT_CACHE_DIR "/var/cache/osm/" +#endif +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_LOG_FILE +* NAME +* OSM_DEFAULT_LOG_FILE +* +* DESCRIPTION +* Specifies the default log file name +* +* SYNOPSIS +*/ +#ifdef __WIN__ +#define OSM_DEFAULT_LOG_FILE strcat(GetOsmTempPath(), "osm.log") +#else +#define OSM_DEFAULT_LOG_FILE "/var/log/osm.log" +#endif +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_PARTITION_CONFIG_FILE +* NAME +* OSM_DEFAULT_PARTITION_CONFIG_FILE +* +* DESCRIPTION +* Specifies the default partition config file name +* +* SYNOPSIS +*/ +#ifdef __WIN__ +#define OSM_DEFAULT_PARTITION_CONFIG_FILE strcat(GetOsmCachePath(), "osm-partitions.conf") +#else +#define OSM_DEFAULT_PARTITION_CONFIG_FILE "/etc/osm-partitions.conf" +#endif +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_SWEEP_INTERVAL_SECS +* NAME +* OSM_DEFAULT_SWEEP_INTERVAL_SECS +* +* DESCRIPTION +* Specifies the default number of seconds between subnet sweeps. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_SWEEP_INTERVAL_SECS 10 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_TRANS_TIMEOUT_MILLISEC +* NAME +* OSM_DEFAULT_TRANS_TIMEOUT_MILLISEC +* +* DESCRIPTION +* Specifies the default transaction timeout in milliseconds. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_TRANS_TIMEOUT_MILLISEC 200 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_SUBNET_TIMEOUT +* NAME +* OSM_DEFAULT_SUBNET_TIMEOUT +* +* DESCRIPTION +* Specifies the default transaction timeout. +* timeout time = 4us * 2^timeout. +* We use here ~1sec. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_SUBNET_TIMEOUT 0x12 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_SWITCH_PACKET_LIFE +* NAME +* OSM_DEFAULT_SWITCH_PACKET_LIFE +* +* DESCRIPTION +* Specifies the default max life time for a pcket on the switch. +* timeout time = 4us * 2^timeout. +* We use here the value of ~1sec +* A Value > 19dec disables this mechanism. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_SWITCH_PACKET_LIFE 0x12 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_HEAD_OF_QUEUE_LIFE +* NAME +* OSM_DEFAULT_HEAD_OF_QUEUE_LIFE +* +* DESCRIPTION +* Sets the time a packet can live in the head of the VL Queue +* We use here the value of ~1sec +* A Value > 19dec disables this mechanism. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_HEAD_OF_QUEUE_LIFE 0x12 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_LEAF_HEAD_OF_QUEUE_LIFE +* NAME +* OSM_DEFAULT_LEAF_HEAD_OF_QUEUE_LIFE +* +* DESCRIPTION +* Sets the time a packet can live in the head of the VL Queue +* of a port that drives a CA port. +* We use here the value of ~130usec +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_LEAF_HEAD_OF_QUEUE_LIFE 0xC +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_VL_STALL_COUNT +* NAME +* OSM_DEFAULT_LEAF_VL_COUNT +* +* DESCRIPTION +* Sets the number of consecutive head of queue life time drops that +* puts the VL into stalled state. In stalled state, the port is supposed +* to drop everything for 8*(head of queue lifetime) +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_VL_STALL_COUNT 0x7 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_LEAF_VL_STALL_COUNT +* NAME +* OSM_DEFAULT_LEAF_VL_STALL_COUNT +* +* DESCRIPTION +* Sets the number of consecutive head of queue life time drops that +* puts the VL into stalled state. In stalled state, the port is supposed +* to drop everything for 8*(head of queue lifetime). This value is for +* switch ports driving a CA port. +* We use the value of 1 here - so any drop due to HOQ means stalling the VL +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_LEAF_VL_STALL_COUNT 0x1 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_TRAP_SUPRESSION_TIMEOUT +* NAME +* OSM_DEFAULT_TRAP_SUPRESSION_TIMEOUT +* +* DESCRIPTION +* Specifies the default timeout for ignoring same trap. +* timeout time = 5000000us +* We use here ~5sec. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_TRAP_SUPRESSION_TIMEOUT 5000000 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_UNHEALTHY_TIMEOUT +* NAME +* OSM_DEFAULT_UNHEALTHY_TIMEOUT +* +* DESCRIPTION +* Specifies the default timeout for setting port as unhealthy. +* timeout time = 60000000us +* We use here ~60sec. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_UNHEALTHY_TIMEOUT 60000000 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_ERROR_THRESHOLD +* NAME +* OSM_DEFAULT_ERROR_THRESHOLD +* +* DESCRIPTION +* Specifies default link error threshold to be set by SubnMgt(Set.PortInfo). +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_ERROR_THRESHOLD 0x08 +/***********/ + +/****d* OpenSM: Base/OSM_DEFAULT_SMP_MAX_ON_WIRE +* NAME +* OSM_DEFAULT_SMP_MAX_ON_WIRE +* +* DESCRIPTION +* Specifies the default number of VL15 SMP MADs allowed on +* the wire at any one time. +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_SMP_MAX_ON_WIRE 4 +/***********/ + +/****d* OpenSM: Base/OSM_SM_DEFAULT_QP0_RCV_SIZE +* NAME +* OSM_SM_DEFAULT_QP0_RCV_SIZE +* +* DESCRIPTION +* Specifies the default size (in MADs) of the QP0 receive queue +* +* SYNOPSIS +*/ +#define OSM_SM_DEFAULT_QP0_RCV_SIZE 256 +/***********/ + +/****d* OpenSM: Base/OSM_SM_DEFAULT_QP0_SEND_SIZE +* NAME +* OSM_SM_DEFAULT_QP0_SEND_SIZE +* +* DESCRIPTION +* Specifies the default size (in MADs) of the QP0 send queue +* +* SYNOPSIS +*/ +#define OSM_SM_DEFAULT_QP0_SEND_SIZE 256 +/***********/ + +/****d* OpenSM: Base/OSM_SM_DEFAULT_QP1_RCV_SIZE +* NAME +* OSM_SM_DEFAULT_QP1_RCV_SIZE +* +* DESCRIPTION +* Specifies the default size (in MADs) of the QP1 receive queue +* +* SYNOPSIS +*/ +#define OSM_SM_DEFAULT_QP1_RCV_SIZE 256 +/***********/ + +/****d* OpenSM: Base/OSM_SM_DEFAULT_QP1_SEND_SIZE +* NAME +* OSM_SM_DEFAULT_QP1_SEND_SIZE +* +* DESCRIPTION +* Specifies the default size (in MADs) of the QP1 send queue +* +* SYNOPSIS +*/ +#define OSM_SM_DEFAULT_QP1_SEND_SIZE 256 + + +/****d* OpenSM: Base/OSM_SM_DEFAULT_POLLING_TIMEOUT_MILLISECS +* NAME +* OSM_SM_DEFAULT_POLLING_TIMEOUT_MILLISECS +* +* DESCRIPTION +* Specifies the polling timeout (in miliseconds) - the timeout +* between one poll to another. +* +* SYNOPSIS +*/ +#define OSM_SM_DEFAULT_POLLING_TIMEOUT_MILLISECS 10000 +/**********/ + +/****d* OpenSM: Base/OSM_SM_DEFAULT_POLLING_RETRY_NUMBER +* NAME +* OSM_SM_DEFAULT_POLLING_RETRY_NUMBER +* +* DESCRIPTION +* Specifies the number of polling retries before the SM goes back +* to DISCOVERY stage. So the default total time for handoff is 40 sec. +* +* SYNOPSIS +*/ +#define OSM_SM_DEFAULT_POLLING_RETRY_NUMBER 4 +/**********/ + +/****d* OpenSM: Base/OSM_NO_PATH +* NAME +* OSM_NO_PATH +* +* DESCRIPTION +* Value indicating there is no path to the given LID. +* +* SYNOPSIS +*/ +#define OSM_NO_PATH 0xFF +/**********/ + +/****d* OpenSM: Base/osm_thread_state_t +* NAME +* osm_thread_state_t +* +* DESCRIPTION +* Enumerates the possible states of worker threads, such +* as the subnet sweeper. +* +* SYNOPSIS +*/ +typedef enum _osm_thread_state +{ + OSM_THREAD_STATE_NONE = 0, + OSM_THREAD_STATE_INIT, + OSM_THREAD_STATE_RUN, + OSM_THREAD_STATE_EXIT + +} osm_thread_state_t; +/***********/ + +/* + * OSM_CAP are from Table 117 and C15-0.1.7 Table 186 + */ + +/****d* OpenSM: Base/OSM_CAP_IS_TRAP_SUP +* Name +* OSM_CAP_IS_SUBN_TRAP_SUP +* +* DESCRIPTION +* Management class generates Trap() MADs +* +* SYNOPSIS +*/ +#define OSM_CAP_IS_SUBN_TRAP_SUP (1 << 0) +/***********/ + +/****d* OpenSM: Base/OSM_CAP_IS_GET_SET_NOTICE_SUP +* Name +* OSM_CAP_IS_GET_SET_NOTICE_SUP +* +* DESCRIPTION +* Management class supports Get/Set(Notice) +* +* SYNOPSIS +*/ +#define OSM_CAP_IS_SUBN_GET_SET_NOTICE_SUP (1 << 1) +/***********/ + +/****d* OpenSM: Base/OSM_CAP_IS_SUBN_OPT_RECS_SUP +* Name +* OSM_CAP_IS_SUBN_OPT_RECS_SUP +* +* DESCRIPTION +* Support all optional attributes except: +* MCMemberRecord, TraceRecord, MultiPathRecord +* +* SYNOPSIS +*/ +#define OSM_CAP_IS_SUBN_OPT_RECS_SUP (1 << 8) +/***********/ + +/****d* OpenSM: Base/OSM_CAP_IS_UD_MCAST_SUP +* Name +* OSM_CAP_IS_UD_MCAST_SUP +* +* DESCRIPTION +* Multicast is supported +* +* SYNOPSIS +*/ +#define OSM_CAP_IS_UD_MCAST_SUP (1 << 9) +/***********/ + +/****d* OpenSM: Base/OSM_CAP_IS_MULTIPATH_SUP +* Name +* OSM_CAP_IS_MULTIPATH_SUP +* +* DESCRIPTION +* MultiPathRecord and TraceRecord are supported +* +* SYNOPSIS +*/ +#define OSM_CAP_IS_MULTIPATH_SUP (1 << 10) +/***********/ + +/****d* OpenSM: Base/OSM_CAP_IS_REINIT_SUP +* Name +* OSM_CAP_IS_REINIT_SUP +* +* DESCRIPTION +* SM/SA supports re-initialization supported +* +* SYNOPSIS +*/ +#define OSM_CAP_IS_REINIT_SUP (1 << 11) +/***********/ + +/****d* OpenSM: Base/OSM_CAP_IS_PORT_INFO_CAPMASK_MATCH_SUPPORTED +* Name +* OSM_CAP_IS_PORT_INFO_CAPMASK_MATCH_SUPPORTED +* +* DESCRIPTION +* SM/SA supports enhanced SA PortInfoRecord searches per 1.2 Errata: +* ClassPortInfo:CapabilityMask.IsPortInfoCapMaskMatchSupported is 1, +* then the AttributeModifier of the SubnAdmGet() and SubnAdmGetTable() +* methods affects the matching behavior on the PortInfo:CapabilityMask +* component. If the high-order bit (bit 31) of the AttributeModifier +* is set to 1, matching on the CapabilityMask component will not be an +* exact bitwise match as described in . Instead, +* matching will only be performed on those bits which are set to 1 in +* the PortInfo:CapabilityMask embedded in the query. +* +* SYNOPSIS +*/ +#define OSM_CAP_IS_PORT_INFO_CAPMASK_MATCH_SUPPORTED (1 << 13) +/***********/ + +/****d* OpenSM: Base/osm_sm_state_t +* NAME +* osm_sm_state_t +* +* DESCRIPTION +* Enumerates the possible states of the SM object. +* +* SYNOPSIS +*/ +typedef enum _osm_sm_state +{ + OSM_SM_STATE_NO_STATE = 0, + OSM_SM_STATE_INIT, + OSM_SM_STATE_IDLE, + OSM_SM_STATE_SWEEP_LIGHT, + OSM_SM_STATE_SWEEP_LIGHT_WAIT, + OSM_SM_STATE_SWEEP_HEAVY_SELF, + OSM_SM_STATE_SWEEP_HEAVY_SUBNET, + OSM_SM_STATE_SET_SM_UCAST_LID, + OSM_SM_STATE_SET_SM_UCAST_LID_WAIT, + OSM_SM_STATE_SET_SM_UCAST_LID_DONE, + OSM_SM_STATE_SET_SUBNET_UCAST_LIDS, + OSM_SM_STATE_SET_SUBNET_UCAST_LIDS_WAIT, + OSM_SM_STATE_SET_SUBNET_UCAST_LIDS_DONE, + OSM_SM_STATE_SET_UCAST_TABLES, + OSM_SM_STATE_SET_UCAST_TABLES_WAIT, + OSM_SM_STATE_SET_UCAST_TABLES_DONE, + OSM_SM_STATE_SET_MCAST_TABLES, + OSM_SM_STATE_SET_MCAST_TABLES_WAIT, + OSM_SM_STATE_SET_MCAST_TABLES_DONE, + OSM_SM_STATE_SET_LINK_PORTS, + OSM_SM_STATE_SET_LINK_PORTS_WAIT, + OSM_SM_STATE_SET_LINK_PORTS_DONE, + OSM_SM_STATE_SET_ARMED, + OSM_SM_STATE_SET_ARMED_WAIT, + OSM_SM_STATE_SET_ARMED_DONE, + OSM_SM_STATE_SET_ACTIVE, + OSM_SM_STATE_SET_ACTIVE_WAIT, + OSM_SM_STATE_LOST_NEGOTIATION, + OSM_SM_STATE_STANDBY, + OSM_SM_STATE_SUBNET_UP, + OSM_SM_STATE_PROCESS_REQUEST, + OSM_SM_STATE_PROCESS_REQUEST_WAIT, + OSM_SM_STATE_PROCESS_REQUEST_DONE, + OSM_SM_STATE_MASTER_OR_HIGHER_SM_DETECTED, + OSM_SM_STATE_SET_PKEY, + OSM_SM_STATE_SET_PKEY_WAIT, + OSM_SM_STATE_SET_PKEY_DONE, + OSM_SM_STATE_MAX +} osm_sm_state_t; +/***********/ + +/****d* OpenSM: Base/osm_signal_t +* NAME +* osm_signal_t +* +* DESCRIPTION +* Enumerates the possible signal codes used by the OSM managers +* This cannot be an enum type, since conversion to and from +* integral types is necessary when passing signals through +* the dispatcher. +* +* SYNOPSIS +*/ +#define OSM_SIGNAL_NONE 0 +#define OSM_SIGNAL_SWEEP 1 +#define OSM_SIGNAL_CHANGE_DETECTED 2 +#define OSM_SIGNAL_NO_PENDING_TRANSACTIONS 3 +#define OSM_SIGNAL_DONE 4 +#define OSM_SIGNAL_DONE_PENDING 5 +#define OSM_SIGNAL_LOST_SM_NEGOTIATION 6 +#define OSM_SIGNAL_LIGHT_SWEEP_FAIL 7 +#define OSM_SIGNAL_IDLE_TIME_PROCESS 8 +#define OSM_SIGNAL_IDLE_TIME_PROCESS_REQUEST 9 +#define OSM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED 10 +#define OSM_SIGNAL_EXIT_STBY 11 +#define OSM_SIGNAL_MAX 12 + +typedef uintn_t osm_signal_t; +/***********/ + +/****d* OpenSM: Base/osm_state_mgr_mode_t +* NAME +* osm_state_mgr_mode_t +* +* DESCRIPTION +* Enumerates the possible state progressing codes used by the OSM +* state manager. +* +* SYNOPSIS +*/ +typedef enum _osm_state_mgr_mode +{ + OSM_STATE_STEP_CONTINUOUS = 0, + OSM_STATE_STEP_TAKE_ONE, + OSM_STATE_STEP_BREAK +} osm_state_mgr_mode_t; +/* +* OSM_STATE_STEP_CONTINUOUS +* normal automatic progress mode +* +* OSM_STATE_STEP_TAKE_ONE +* Do one step +* +* OSM_STATE_STEP_BREAK +* Stop before taking next step (the while loop in the state +* manager automatically change to this state). +* +**********/ + +/****d* OpenSM: Base/osm_sm_signal_t +* NAME +* osm_sm_signal_t +* +* DESCRIPTION +* Enumerates the possible signals used by the OSM_SM_MGR +* +* SYNOPSIS +*/ +typedef enum _osm_sm_signal +{ + OSM_SM_SIGNAL_INIT = 0, + OSM_SM_SIGNAL_DISCOVERY_COMPLETED, + OSM_SM_SIGNAL_POLLING_TIMEOUT, + OSM_SM_SIGNAL_DISCOVER, + OSM_SM_SIGNAL_DISABLE, + OSM_SM_SIGNAL_HANDOVER, + OSM_SM_SIGNAL_HANDOVER_SENT, + OSM_SM_SIGNAL_ACKNOWLEDGE, + OSM_SM_SIGNAL_STANDBY, + OSM_SM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED, + OSM_SM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED_DONE, + OSM_SM_SIGNAL_WAIT_FOR_HANDOVER, + OSM_SM_SIGNAL_MAX + +} osm_sm_signal_t; +/***********/ + +/****d* OpenSM/osm_mcast_req_type_t +* NAME +* osm_mcast_req_type_t +* +* DESCRIPTION +* Enumerates the possible signals used by the OSM_MCAST_REQUEST +* +* SYNOPSIS +*/ +typedef enum _osm_mcast_req_type +{ + OSM_MCAST_REQ_TYPE_CREATE, + OSM_MCAST_REQ_TYPE_JOIN, + OSM_MCAST_REQ_TYPE_LEAVE, + OSM_MCAST_REQ_TYPE_SUBNET_CHANGE + +} osm_mcast_req_type_t; +/***********/ + +/****s* OpenSM: Base/MAX_UPDN_GUID_FILE_LINE_LENGTH +* NAME +* MAX_UPDN_GUID_FILE_LINE_LENGTH +* +* DESCRIPTION +* The maximum line number when reading updn guid file +* +* SYNOPSIS +*/ +#define MAX_UPDN_GUID_FILE_LINE_LENGTH 120 +/**********/ + +/****s* OpenSM: Base/VendorOUIs +* NAME +* VendorOUIs +* +* DESCRIPTION +* Known device vendor ID and GUID OUIs +* +* SYNOPSIS +*/ +#define OSM_VENDOR_ID_INTEL 0x00D0B7 +#define OSM_VENDOR_ID_MELLANOX 0x0002C9 +#define OSM_VENDOR_ID_REDSWITCH 0x000617 +#define OSM_VENDOR_ID_SILVERSTORM 0x00066A +#define OSM_VENDOR_ID_TOPSPIN 0x0005AD +#define OSM_VENDOR_ID_FUJITSU 0x00E000 +#define OSM_VENDOR_ID_FUJITSU2 0x000B5D +#define OSM_VENDOR_ID_VOLTAIRE 0x0008F1 +#define OSM_VENDOR_ID_YOTTAYOTTA 0x000453 +#define OSM_VENDOR_ID_PATHSCALE 0x001175 +#define OSM_VENDOR_ID_IBM 0x000255 +#define OSM_VENDOR_ID_DIVERGENET 0x00084E +#define OSM_VENDOR_ID_FLEXTRONICS 0x000B8C +#define OSM_VENDOR_ID_AGILENT 0x0030D3 +#define OSM_VENDOR_ID_OBSIDIAN 0x001777 +#define OSM_VENDOR_ID_BAYMICRO 0x000BC1 +#define OSM_VENDOR_ID_LSILOGIC 0x00A0B8 +#define OSM_VENDOR_ID_DDN 0x0001FF +#define OSM_VENDOR_ID_PANTA 0x001393 +#define OSM_VENDOR_ID_HP 0x001708 +#define OSM_VENDOR_ID_RIOWORKS 0x005045 + +/**********/ + +END_C_DECLS + +#endif /* _OSM_BASE_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_console.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_console.h new file mode 100644 index 00000000..da0fac3a --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_console.h @@ -0,0 +1,58 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_CONSOLE_H_ +#define _OSM_CONSOLE_H_ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +void osm_console(osm_opensm_t *p_osm); +void osm_console_prompt(void); + +END_C_DECLS + +#endif /* _OSM_CONSOLE_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_db.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_db.h new file mode 100644 index 00000000..cb68b474 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_db.h @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_DB_H_ +#define _OSM_DB_H_ + +/* + * Abstract: + * Declaration of the DB interface. + * + * $Revision: 1.4 $ + */ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Database +* NAME +* Database +* +* DESCRIPTION +* The OpenSM database interface provide the means to restore persistat +* data, query, modify, delete and evemtually commit it back to the +* persistent media. +* +* The interface is defined such that it can is not "data dependant": +* All keys and data items are texts. +* +* The DB implementation should be thread safe, thus callers do not need to +* provide serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox Technologies LTD +* +*********/ + +/****s* OpenSM: Database/osm_db_domain_t +* NAME +* osm_db_domain_t +* +* DESCRIPTION +* A domain of the database. Can be viewed as a database table. +* +* The osm_db_domain_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_db_domain { + struct _osm_db *p_db; + void *p_domain_imp; +} osm_db_domain_t; +/* +* FIELDS +* p_db +* Pointer to the parent database object. +* +* p_domain_imp +* Pointer to the db implementation object +* +* SEE ALSO +* osm_db_t +*********/ + +/****s* OpenSM: Database/osm_db_t +* NAME +* osm_db_t +* +* DESCRIPTION +* The main database object. +* +* The osm_db_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_db +{ + void *p_db_imp; + osm_log_t *p_log; + cl_list_t domains; +} osm_db_t; +/* +* FIELDS +* p_db_imp +* Pointer to the database implementation object +* +* p_log +* Pointer to the OSM logging facility +* +* domains +* List of initialize domains +* +* SEE ALSO +*********/ + +/****f* OpenSM: Database/osm_db_construct +* NAME +* osm_db_construct +* +* DESCRIPTION +* Construct a database. +* +* SYNOPSIS +*/ +void +osm_db_construct( + IN osm_db_t* const p_db ); +/* +* PARAMETERS +* p_db +* [in] Pointer to the database object to custruct +* +* RETURN VALUES +* NONE +* +* SEE ALSO +* Database, osm_db_init, osm_db_destroy +*********/ + +/****f* OpenSM: Database/osm_db_destroy +* NAME +* osm_db_destroy +* +* DESCRIPTION +* Destroys the osm_db_t structure. +* +* SYNOPSIS +*/ +void +osm_db_destroy( + IN osm_db_t* const p_db ); +/* +* PARAMETERS +* p_db +* [in] Pointer to osm_db_t structure to destroy +* +* SEE ALSO +* Database, osm_db_construct, osm_db_init +*********/ + +/****f* OpenSM: Database/osm_db_init +* NAME +* osm_db_init +* +* DESCRIPTION +* Initializes the osm_db_t structure. +* +* SYNOPSIS +*/ +int +osm_db_init( + IN osm_db_t* const p_db, + IN osm_log_t *p_log ); +/* +* PARAMETERS +* +* p_db +* [in] Pointer to the database object to initialize +* +* p_log +* [in] Pointer to the OSM logging facility +* +* RETURN VALUES +* 0 on success 1 otherwise +* +* SEE ALSO +* Database, osm_db_construct, osm_db_destroy +*********/ + +/****f* OpenSM: Database/osm_db_domain_init +* NAME +* osm_db_domain_init +* +* DESCRIPTION +* Initializes the osm_db_domain_t structure. +* +* SYNOPSIS +*/ +osm_db_domain_t* +osm_db_domain_init( + IN osm_db_t* const p_db, + IN char *domain_name); +/* +* PARAMETERS +* +* p_db +* [in] Pointer to the database object to initialize +* +* domain_name +* [in] a char array with the domain name. +* +* RETURN VALUES +* pointer to the new domain object or NULL if failed. +* +* SEE ALSO +* Database, osm_db_construct, osm_db_destroy +*********/ + +/****f* OpenSM: Database/osm_db_restore +* NAME +* osm_db_restore +* +* DESCRIPTION +* Reads the entire domain from persistent storage - overrides all +* existing cached data (if any). +* +* SYNOPSIS +*/ +int +osm_db_restore( + IN osm_db_domain_t *p_domain); +/* +* PARAMETERS +* +* p_domain +* [in] Pointer to the database domain object to restore from persistent db +* +* RETURN VALUES +* 0 if successful 1 otherwize +* +* SEE ALSO +* Database, osm_db_domain_init, osm_db_clear, osm_db_store, +* osm_db_keys, osm_db_lookup, osm_db_update, osm_db_delete +*********/ + +/****f* OpenSM: Database/osm_db_clear +* NAME +* osm_db_clear +* +* DESCRIPTION +* Clears the entire domain values from/in the cache +* +* SYNOPSIS +*/ +int +osm_db_clear( + IN osm_db_domain_t *p_domain); +/* +* PARAMETERS +* +* p_domain +* [in] Pointer to the database domain object to clear +* +* RETURN VALUES +* 0 if successful 1 otherwize +* +* SEE ALSO +* Database, osm_db_domain_init, osm_db_restore, osm_db_store, +* osm_db_keys, osm_db_lookup, osm_db_update, osm_db_delete +*********/ + +/****f* OpenSM: Database/osm_db_store +* NAME +* osm_db_store +* +* DESCRIPTION +* Store the domain cache back to the database (commit) +* +* SYNOPSIS +*/ +int osm_db_store( + IN osm_db_domain_t *p_domain); +/* +* PARAMETERS +* +* p_domain +* [in] Pointer to the database domain object to restore from persistent db +* +* RETURN VALUES +* 0 if successful 1 otherwize +* +* SEE ALSO +* Database, osm_db_domain_init, osm_db_restore, osm_db_clear, +* osm_db_keys, osm_db_lookup, osm_db_update, osm_db_delete +*********/ + +/****f* OpenSM: Database/osm_db_keys +* NAME +* osm_db_keys +* +* DESCRIPTION +* Retrive all keys of the domain +* +* SYNOPSIS +*/ +int +osm_db_keys( + IN osm_db_domain_t *p_domain, + OUT cl_list_t* p_key_list); +/* +* PARAMETERS +* +* p_domain +* [in] Pointer to the database domain object +* +* p_key_list +* [out] List of key values. It should be PRE constructed and initialized. +* +* RETURN VALUES +* 0 if successful 1 otherwize +* +* NOTE: the caller needs to free and destruct the list, +* the keys returned are intrnal to the hash and should NOT be free'ed +* +* SEE ALSO +* Database, osm_db_domain_init, osm_db_restore, osm_db_clear, osm_db_store, +* osm_db_lookup, osm_db_update, osm_db_delete +*********/ + +/****f* OpenSM: Database/osm_db_lookup +* NAME +* osm_db_lookup +* +* DESCRIPTION +* Lookup an entry in the domain by the given key +* +* SYNOPSIS +*/ +/* lookup value by key */ +char *osm_db_lookup( + IN osm_db_domain_t *p_domain, + IN char *const p_key); +/* +* PARAMETERS +* +* p_domain +* [in] Pointer to the database domain object +* +* key +* [in] The key to look for +* +* RETURN VALUES +* the value as char * or NULL if not found +* +* SEE ALSO +* Database, osm_db_domain_init, osm_db_restore, osm_db_clear, osm_db_store, +* osm_db_keys, osm_db_update, osm_db_delete +*********/ + +/****f* OpenSM: Database/osm_db_update +* NAME +* osm_db_update +* +* DESCRIPTION +* Set the value of the given key +* +* SYNOPSIS +*/ +int +osm_db_update( + IN osm_db_domain_t *p_domain, + IN char *const p_key, + IN char *const p_val); +/* +* PARAMETERS +* +* p_domain +* [in] Pointer to the database domain object +* +* p_key +* [in] The key to update +* +* p_val +* [in] The value to update +* +* RETURN VALUES +* 0 on success +* +* NOTE: the value will be duplicated so can be free'ed +* +* SEE ALSO +* Database, osm_db_domain_init, osm_db_restore, osm_db_clear, osm_db_store, +* osm_db_keys, osm_db_lookup, osm_db_delete +*********/ + +/****f* OpenSM: Database/osm_db_delete +* NAME +* osm_db_delete +* +* DESCRIPTION +* Delete an entry by the given key +* +* SYNOPSIS +*/ +int +osm_db_delete( + IN osm_db_domain_t *p_domain, + IN char *const p_key); +/* +* PARAMETERS +* +* p_domain +* [in] Pointer to the database domain object +* +* p_key +* [in] The key to look for +* +* RETURN VALUES +* 0 on success +* +* SEE ALSO +* Database, osm_db_domain_init, osm_db_restore, osm_db_clear, osm_db_store, +* osm_db_keys, osm_db_lookup, osm_db_update +*********/ + +END_C_DECLS + +#endif /* _OSM_DB_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_db_pack.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_db_pack.h new file mode 100644 index 00000000..52cdaacc --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_db_pack.h @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/****h* OpenSM/DB-Pack +* NAME +* Database Types +* +* DESCRIPTION +* This module provides packing and unpacking of the database +* storage into specific types. +* +* The following domains/conversions are supported: +* guid2lid - key is a guid and data is a lid. +* +* AUTHOR +* Eitan Zahavi, Mellanox Technologies LTD +* +*********/ + +#ifndef _OSM_DB_PACK_H_ +#define _OSM_DB_PACK_H_ + +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****f* OpenSM: DB-Pack/osm_db_guid2lid_init +* NAME +* osm_db_guid2lid_init +* +* DESCRIPTION +* Initialize a domain for the guid2lid table +* +* SYNOPSIS +*/ +static inline osm_db_domain_t* +osm_db_guid2lid_init( + IN osm_db_t* const p_db ) +{ + return( osm_db_domain_init( p_db, "guid2lid" ) ); +} +/* +* PARAMETERS +* p_db +* [in] Pointer to the database object to construct +* +* RETURN VALUES +* The pointer to the new allocated domain object or NULL. +* +* NOTE: DB domains are destroyed by the osm_db_destroy +* +* SEE ALSO +* Database, osm_db_init, osm_db_destroy +*********/ + +/****f* OpenSM: DB-Pack/osm_db_guid2lid_init +* NAME +* osm_db_guid2lid_init +* +* DESCRIPTION +* Initialize a domain for the guid2lid table +* +* SYNOPSIS +*/ +typedef struct _osm_db_guid_elem { + cl_list_item_t item; + uint64_t guid; +} osm_db_guid_elem_t; +/* +* FIELDS +* item +* required for list manipulations +* +* guid +* +************/ + +/****f* OpenSM: DB-Pack/osm_db_guid2lid_guids +* NAME +* osm_db_guid2lid_guids +* +* DESCRIPTION +* Provides back a list of guid elements. +* +* SYNOPSIS +*/ +int +osm_db_guid2lid_guids( + IN osm_db_domain_t* const p_g2l, + OUT cl_qlist_t* p_guid_list ); +/* +* PARAMETERS +* p_g2l +* [in] Pointer to the guid2lid domain +* +* p_guid_list +* [out] A quick list of guid elements of type osm_db_guid_elem_t +* +* RETURN VALUES +* 0 if successful +* +* NOTE: the output qlist should be initialized and each item freed +* by the caller, then destroyed. +* +* SEE ALSO +* osm_db_guid2lid_init, osm_db_guid2lid_guids, osm_db_guid2lid_get +* osm_db_guid2lid_set, osm_db_guid2lid_delete +*********/ + +/****f* OpenSM: DB-Pack/osm_db_guid2lid_get +* NAME +* osm_db_guid2lid_get +* +* DESCRIPTION +* Get a lid range by given guid. +* +* SYNOPSIS +*/ +int +osm_db_guid2lid_get( + IN osm_db_domain_t* const p_g2l, + IN uint64_t guid, + OUT uint16_t *p_min_lid, + OUT uint16_t *p_max_lid); +/* +* PARAMETERS +* p_g2l +* [in] Pointer to the guid2lid domain +* +* guid +* [in] The guid to look for +* +* p_min_lid +* [out] Pointer to the resulting min lid in host order. +* +* p_max_lid +* [out] Pointer to the resulting max lid in host order. +* +* RETURN VALUES +* 0 if successful. The lid will be set to 0 if not found. +* +* SEE ALSO +* osm_db_guid2lid_init, osm_db_guid2lid_guids +* osm_db_guid2lid_set, osm_db_guid2lid_delete +*********/ + +/****f* OpenSM: DB-Pack/osm_db_guid2lid_set +* NAME +* osm_db_guid2lid_set +* +* DESCRIPTION +* Set a lid range for the given guid. +* +* SYNOPSIS +*/ +int +osm_db_guid2lid_set( + IN osm_db_domain_t* const p_g2l, + IN uint64_t guid, + IN uint16_t min_lid, + IN uint16_t max_lid); +/* +* PARAMETERS +* p_g2l +* [in] Pointer to the guid2lid domain +* +* guid +* [in] The guid to look for +* +* min_lid +* [in] The min lid value to set +* +* max_lid +* [in] The max lid value to set +* +* RETURN VALUES +* 0 if successful +* +* SEE ALSO +* osm_db_guid2lid_init, osm_db_guid2lid_guids +* osm_db_guid2lid_get, osm_db_guid2lid_delete +*********/ + +/****f* OpenSM: DB-Pack/osm_db_guid2lid_delete +* NAME +* osm_db_guid2lid_delete +* +* DESCRIPTION +* Delete the entry by the given guid +* +* SYNOPSIS +*/ +int +osm_db_guid2lid_delete( + IN osm_db_domain_t* const p_g2l, + IN uint64_t guid ); +/* +* PARAMETERS +* p_g2l +* [in] Pointer to the guid2lid domain +* +* guid +* [in] The guid to look for +* +* RETURN VALUES +* 0 if successful otherwise 1 +* +* SEE ALSO +* osm_db_guid2lid_init, osm_db_guid2lid_guids +* osm_db_guid2lid_get, osm_db_guid2lid_set +*********/ + +END_C_DECLS + +#endif /* _OSM_DB_PACK_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_drop_mgr.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_drop_mgr.h new file mode 100644 index 00000000..81c51fa4 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_drop_mgr.h @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_drop_mgr_t. + * This object represents the Drop Manager object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_DROP_MGR_H_ +#define _OSM_DROP_MGR_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Drop Manager +* NAME +* Drop Manager +* +* DESCRIPTION +* The Drop Manager object encapsulates the information +* needed to receive the SwitchInfo attribute from a node. +* +* The Drop Manager object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Drop Manager/osm_drop_mgr_t +* NAME +* osm_drop_mgr_t +* +* DESCRIPTION +* Drop Manager structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_drop_mgr +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + osm_req_t *p_req; + cl_plock_t *p_lock; + +} osm_drop_mgr_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_req +* Pointer to the Request object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* Drop Manager object +*********/ + +/****f* OpenSM: Drop Manager/osm_drop_mgr_construct +* NAME +* osm_drop_mgr_construct +* +* DESCRIPTION +* This function constructs a Drop Manager object. +* +* SYNOPSIS +*/ +void osm_drop_mgr_construct( + IN osm_drop_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to a Drop Manager object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_drop_mgr_init, osm_drop_mgr_destroy +* +* Calling osm_drop_mgr_construct is a prerequisite to calling any other +* method except osm_drop_mgr_init. +* +* SEE ALSO +* Drop Manager object, osm_drop_mgr_init, +* osm_drop_mgr_destroy +*********/ + +/****f* OpenSM: Drop Manager/osm_drop_mgr_destroy +* NAME +* osm_drop_mgr_destroy +* +* DESCRIPTION +* The osm_drop_mgr_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_drop_mgr_destroy( + IN osm_drop_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Drop Manager object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_drop_mgr_construct or osm_drop_mgr_init. +* +* SEE ALSO +* Drop Manager object, osm_drop_mgr_construct, +* osm_drop_mgr_init +*********/ + +/****f* OpenSM: Drop Manager/osm_drop_mgr_init +* NAME +* osm_drop_mgr_init +* +* DESCRIPTION +* The osm_drop_mgr_init function initializes a +* Drop Manager object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_drop_mgr_init( + IN osm_drop_mgr_t* const p_mgr, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_req_t* const p_req, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_drop_mgr_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Drop Manager object was initialized +* successfully. +* +* NOTES +* Allows calling other Drop Manager methods. +* +* SEE ALSO +* Drop Manager object, osm_drop_mgr_construct, +* osm_drop_mgr_destroy +*********/ + +/****f* OpenSM: Drop Manager/osm_drop_mgr_process +* NAME +* osm_drop_mgr_process +* +* DESCRIPTION +* Process the SwitchInfo attribute. +* +* SYNOPSIS +*/ +void osm_drop_mgr_process( + IN const osm_drop_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_drop_mgr_t object. +* +* RETURN VALUES +* None +* +* NOTES +* This function processes a SwitchInfo attribute. +* +* SEE ALSO +* Drop Manager, Switch Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_DROP_MGR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_errors.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_errors.h new file mode 100644 index 00000000..c1ffb723 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_errors.h @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of error code ranges for the various OpenSM modules. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_ERRORS_H_ +#define _OSM_ERRORS_H_ + +/* + Generic Request Controller + 0100 - 01FF + + Node Info Receive Controller + 0200 - 02FF + + Generic Requester + 0300 - 03FF + + Node Info Receiver + 0400 - 04FF + + Node Description Receiver + 0500 - 05FF + + Node Description Receive Controller + 0600 - 06FF + + Port Info Receiver + 0700 - 07FF + + Port Info Receive Controller + 0800 - 08FF + + Mad Pool + 0900 - 09FF + + SM + 1000 - 10FF + + SM MAD Controller + 1100 - 11FF + + VL15 Interface + 1200 - 12FF + + Switch Info Receive Controller + 1300 - 13FF + + Switch Info Receiver + 1400 - 14FF + + State Manager + 1500 - 15FF + + State Manager Controller + 1600 - 16FF + + LID Manager + 1700 - 17FF + + Link Manager + 1800 - 18FF + + Drop Manager + 1900 - 19FF + + Linear Forwarding Receive Controller + 2000 - 20FF + + Linear Forwarding Receiver + 2100 - 21FF + + Vendor Specific + 2200 - 22FF + + SMInfo Receive Controller + 2300 - 23FF + + SMInfo Info Receiver + 2400 - 24FF + + Generic Responder + 2500 - 25FF + + Linear Forwarding Receive Controller + 2600 - 26FF + + Linear Forwarding Receiver + 2700 - 27FF + + SA MAD controller + 2800 - 28FF + + Node Record Controller + 2900 - 29FF + + PortInfo Record Controller + 3000 - 30FF + + Link Record Controller + 3100 - 31FF + + Path Record Controller + 3200 - 32FF + + SMInfo Record Controller + 3300 - 33FF + + Multicast Record Controller + 3400 - 34FF + + Unicast Manager + 3500 - 35FF + + Multicast Manager + 3600 - 36FF + + SA Response + 3700 - 37FF + + Link Record Receiver + 3800 - 38FF + + Multicast Forwarding Receive Controller + 3900 - 39FF + + Multicast Forwarding Receiver + 4000 - 40FF + + SMInfo Record Receiver + 4100 - 41FF + + PortInfo Record Receiver + 4200 - 42FF + + Service Record Receiver + 4300 - 43FF + +*/ + +#endif /* _OSM_ERRORS_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_ft_config_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_ft_config_ctrl.h new file mode 100644 index 00000000..d42f36e2 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_ft_config_ctrl.h @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_ft_conf_ctrl_t. + * This object represents a controller that performs a + * Set(Linear/Random ForwardingTable) for the specified switch. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_FT_CONFIG_CTRL_H_ +#define _OSM_FT_CONFIG_CTRL_H_ + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Forwarding Table Configuration Controller +* NAME +* Forwarding Table Configuration Controller +* +* DESCRIPTION +* The Forwarding Table Configuration Controller object encapsulates the +* information needed to Set(Linear/Random ForwardingTable) at the +* specified switch. +* +* The Forwarding Table Configuration Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ +/****s* OpenSM: Forwarding Table Configuration Controller/osm_ft_conf_ctrl_t +* NAME +* osm_ft_conf_ctrl_t +* +* DESCRIPTION +* Forwarding Table Configuration Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_ft_conf_ctrl +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_dispatcher_t *p_disp; + +} osm_ft_conf_ctrl_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* p_disp +* Pointer to the Dispatcher. +* +* SEE ALSO +* Forwarding Table Configuration Controller object +*********/ +/****f* OpenSM: Forwarding Table Configuration Controller/osm_ft_conf_ctrl_construct +* NAME +* osm_ft_conf_ctrl_construct +* +* DESCRIPTION +* This function constructs a Forwarding Table Configuration Controller object. +* +* SYNOPSIS +*/ +void osm_ft_conf_ctrl_construct( + IN osm_ft_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Forwarding Table Configuration Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_ft_conf_ctrl_init, osm_ft_conf_ctrl_destroy +* +* Calling osm_ft_conf_ctrl_construct is a prerequisite to calling any other +* method except osm_ft_conf_ctrl_init. +* +* SEE ALSO +* Forwarding Table Configuration Controller object, osm_ft_conf_ctrl_init, +* osm_ft_conf_ctrl_destroy +*********/ + +/****f* OpenSM: Forwarding Table Configuration Controller/osm_ft_conf_ctrl_destroy +* NAME +* osm_ft_conf_ctrl_destroy +* +* DESCRIPTION +* The osm_ft_conf_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_ft_conf_ctrl_destroy( + IN osm_ft_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Forwarding Table Configuration Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_ft_conf_ctrl_construct or osm_ft_conf_ctrl_init. +* +* SEE ALSO +* Forwarding Table Configuration Controller object, osm_ft_conf_ctrl_construct, +* osm_ft_conf_ctrl_init +*********/ + +/****f* OpenSM: Forwarding Table Configuration Controller/osm_ft_conf_ctrl_init +* NAME +* osm_ft_conf_ctrl_init +* +* DESCRIPTION +* The osm_ft_conf_ctrl_init function initializes a +* Forwarding Table Configuration Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_ft_conf_ctrl_init( + IN osm_ft_conf_ctrl_t* const p_ctrl, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_ft_conf_ctrl_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Forwarding Table Configuration Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Forwarding Table Configuration Controller methods. +* +* SEE ALSO +* Forwarding Table Configuration Controller object, osm_ft_conf_ctrl_construct, +* osm_ft_conf_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_FT_CONFIG_CTRL_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_fwd_tbl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_fwd_tbl.h new file mode 100644 index 00000000..ac31131d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_fwd_tbl.h @@ -0,0 +1,388 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_fwd_tbl_t. + * This object represents a unicast forwarding table. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_FWD_TBL_H_ +#define _OSM_FWD_TBL_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Forwarding Table +* NAME +* Forwarding Table +* +* DESCRIPTION +* The Forwarding Table objects encapsulate the information +* needed by the OpenSM to manage forwarding tables. The OpenSM +* allocates one Forwarding Table object per switch in the +* IBA subnet. +* +* The Forwarding Table objects are not thread safe, thus +* callers must provide serialization. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Forwarding Table/osm_fwd_tbl_t +* NAME +* osm_fwd_tbl_t +* +* DESCRIPTION +* Forwarding Table structure. This object hides the type +* of fowarding table (linear or random) actually used by +* the switch. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_fwd_tbl_t +{ + osm_rand_fwd_tbl_t *p_rnd_tbl; + osm_lin_fwd_tbl_t *p_lin_tbl; + +} osm_fwd_tbl_t; +/* +* FIELDS +* p_rnd_tbl +* Pointer to the switch's Random Forwarding Table object. +* If the switch does not use a Random Forwarding Table, +* then this pointer is NULL. +* +* p_lin_tbl +* Pointer to the switch's Linear Forwarding Table object. +* If the switch does not use a Linear Forwarding Table, +* then this pointer is NULL. +* +* SEE ALSO +* Forwarding Table object, Random Forwarding Table object. +*********/ + +/****f* OpenSM: Forwarding Table/osm_fwd_tbl_init +* NAME +* osm_fwd_tbl_init +* +* DESCRIPTION +* Initializes a Forwarding Table object. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_fwd_tbl_init( + IN osm_fwd_tbl_t* const p_tbl, + IN const ib_switch_info_t* const p_si ); +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* p_si +* [in] Pointer to the SwitchInfo attribute of the associated +* switch. +* +* RETURN VALUE +* IB_SUCCESS if the operation is successful. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_fwd_tbl_destroy +* NAME +* osm_fwd_tbl_destroy +* +* DESCRIPTION +* Destroys a Forwarding Table object. +* +* SYNOPSIS +*/ +void +osm_fwd_tbl_destroy( + IN osm_fwd_tbl_t* const p_tbl ); +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_fwd_tbl_get +* NAME +* osm_fwd_tbl_get +* +* DESCRIPTION +* Returns the port that routes the specified LID. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_fwd_tbl_get( + IN const osm_fwd_tbl_t* const p_tbl, + IN uint16_t const lid_ho ) +{ + if( p_tbl->p_lin_tbl ) + return( osm_lin_fwd_tbl_get( p_tbl->p_lin_tbl, lid_ho ) ); + else + return( osm_rand_fwd_tbl_get( p_tbl->p_rnd_tbl, lid_ho ) ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* lid_ho +* [in] LID (host order) for which to find the route. +* +* RETURN VALUE +* Returns the port that routes the specified LID. +* IB_INVALID_PORT_NUM if the table does not have a route for this LID. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_fwd_tbl_set +* NAME +* osm_fwd_tbl_set +* +* DESCRIPTION +* Sets the port to route the specified LID. +* +* SYNOPSIS +*/ +static inline void +osm_fwd_tbl_set( + IN osm_fwd_tbl_t* const p_tbl, + IN const uint16_t lid_ho, + IN const uint8_t port ) +{ + CL_ASSERT( p_tbl ); + if( p_tbl->p_lin_tbl ) + osm_lin_fwd_tbl_set( p_tbl->p_lin_tbl, lid_ho, port ); + else + osm_rand_fwd_tbl_set( p_tbl->p_rnd_tbl, lid_ho, port ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* lid_ho +* [in] LID value (host order) for which to set the route. +* +* port +* [in] Port to route the specified LID value. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_fwd_tbl_set_block +* NAME +* osm_fwd_tbl_set_block +* +* DESCRIPTION +* Copies the specified block into the Forwarding Table. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +osm_fwd_tbl_set_block( + IN osm_fwd_tbl_t* const p_tbl, + IN const uint8_t* const p_block, + IN const uint32_t block_num ) +{ + CL_ASSERT( p_tbl ); + if( p_tbl->p_lin_tbl ) + return( osm_lin_fwd_tbl_set_block( p_tbl->p_lin_tbl, + p_block, block_num ) ); + else + return( osm_rand_fwd_tbl_set_block( p_tbl->p_rnd_tbl, + p_block, block_num ) ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_fwd_tbl_get_size +* NAME +* osm_fwd_tbl_get_size +* +* DESCRIPTION +* Returns the number of entries available in the forwarding table. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_fwd_tbl_get_size( + IN const osm_fwd_tbl_t* const p_tbl ) +{ + CL_ASSERT( p_tbl ); + if( p_tbl->p_lin_tbl ) + return( osm_lin_fwd_tbl_get_size( p_tbl->p_lin_tbl ) ); + else + return( osm_rand_fwd_tbl_get_size( p_tbl->p_rnd_tbl ) ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* Returns the number of entries available in the forwarding table. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_fwd_tbl_get_lids_per_block +* NAME +* osm_fwd_tbl_get_lids_per_block +* +* DESCRIPTION +* Returns the number of LIDs per LID block. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_fwd_tbl_get_lids_per_block( + IN const osm_fwd_tbl_t* const p_tbl ) +{ + CL_ASSERT( p_tbl ); + if( p_tbl->p_lin_tbl ) + return( osm_lin_fwd_tbl_get_lids_per_block( p_tbl->p_lin_tbl ) ); + else + return( osm_rand_fwd_tbl_get_lids_per_block( p_tbl->p_rnd_tbl ) ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* Returns the number of LIDs per LID block. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_fwd_tbl_get_max_block_id_in_use +* NAME +* osm_fwd_tbl_get_max_block_id_in_use +* +* DESCRIPTION +* Returns the number of LIDs per LID block. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_fwd_tbl_get_max_block_id_in_use( + IN const osm_fwd_tbl_t* const p_tbl, + IN const uint16_t lid_top_ho ) +{ + CL_ASSERT( p_tbl ); + if( p_tbl->p_lin_tbl ) + return( osm_lin_fwd_tbl_get_max_block_id_in_use( + p_tbl->p_lin_tbl, lid_top_ho ) ); + else + return( osm_rand_fwd_tbl_get_max_block_id_in_use( + p_tbl->p_rnd_tbl, lid_top_ho ) ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* Returns the number of LIDs per LID block. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_FWD_TBL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_helper.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_helper.h new file mode 100644 index 00000000..a713c5c6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_helper.h @@ -0,0 +1,620 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_HELPER_H_ +#define _OSM_HELPER_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/* + * Abstract: + * Declaration of helpful functions. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +/****f* OpenSM: Helper/ib_get_sa_method_str + * NAME + * ib_get_sa_method_str + * + * DESCRIPTION + * Returns a string for the specified SA Method value. + * + * SYNOPSIS + */ +const char* +ib_get_sa_method_str( + IN uint8_t method ); +/* + * PARAMETERS + * method + * [in] Network order METHOD ID value. + * + * RETURN VALUES + * Pointer to the method string. + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* OpenSM: Helper/ib_get_sm_method_str +* NAME +* ib_get_sm_method_str +* +* DESCRIPTION +* Returns a string for the specified SM Method value. +* +* SYNOPSIS +*/ +const char* +ib_get_sm_method_str( + IN uint8_t method ); +/* +* PARAMETERS +* method +* [in] Network order METHOD ID value. +* +* RETURN VALUES +* Pointer to the method string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Helper/ib_get_sm_attr_str +* NAME +* ib_get_sm_attr_str +* +* DESCRIPTION +* Returns a string for the specified SM attribute value. +* +* SYNOPSIS +*/ +const char* +ib_get_sm_attr_str( + IN ib_net16_t attr ); +/* +* PARAMETERS +* attr +* [in] Network order attribute ID value. +* +* RETURN VALUES +* Pointer to the attribute string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Helper/ib_get_sa_attr_str +* NAME +* ib_get_sa_attr_str +* +* DESCRIPTION +* Returns a string for the specified SA attribute value. +* +* SYNOPSIS +*/ +const char* +ib_get_sa_attr_str( + IN ib_net16_t attr ); +/* +* PARAMETERS +* attr +* [in] Network order attribute ID value. +* +* RETURN VALUES +* Pointer to the attribute string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Helper/osm_dump_port_info +* NAME +* osm_dump_port_info +* +* DESCRIPTION +* Dumps the PortInfo attribute to the log. +* +* SYNOPSIS +*/ +void osm_dump_port_info( + IN osm_log_t* const p_log, + IN const ib_net64_t node_guid, + IN const ib_net64_t port_guid, + IN const uint8_t port_num, + IN const ib_port_info_t* const p_pi, + IN const osm_log_level_t log_level ); +/* +* PARAMETERS +* p_log +* [in] Pointer to the osm_log_t object +* +* node_guid +* [in] Node GUID that owns this port. +* +* port_guid +* [in] Port GUID for this port. +* +* port_num +* [in] Port number for this port. +* +* p_pi +* [in] Pointer to the PortInfo attribute +* +* log_level +* [in] Log verbosity level with which to dump the data. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +void +osm_dump_path_record( + IN osm_log_t* const p_log, + IN const ib_path_rec_t* const p_pr, + IN const osm_log_level_t log_level ); + +void +osm_dump_multipath_record( + IN osm_log_t* const p_log, + IN const ib_multipath_rec_t* const p_mpr, + IN const osm_log_level_t log_level ); + +void +osm_dump_node_record( + IN osm_log_t* const p_log, + IN const ib_node_record_t* const p_nr, + IN const osm_log_level_t log_level ); + +void +osm_dump_mc_record( + IN osm_log_t* const p_log, + IN const ib_member_rec_t* const p_mcmr, + IN const osm_log_level_t log_level ); + +void +osm_dump_link_record( + IN osm_log_t* const p_log, + IN const ib_link_record_t* const p_lr, + IN const osm_log_level_t log_level ); + +void +osm_dump_service_record( + IN osm_log_t* const p_log, + IN const ib_service_record_t* const p_sr, + IN const osm_log_level_t log_level ); + +void +osm_dump_portinfo_record( + IN osm_log_t* const p_log, + IN const ib_portinfo_record_t* const p_pir, + IN const osm_log_level_t log_level ); + +void +osm_dump_guidinfo_record( + IN osm_log_t* const p_log, + IN const ib_guidinfo_record_t* const p_gir, + IN const osm_log_level_t log_level ); + +void +osm_dump_inform_info( + IN osm_log_t* const p_log, + IN const ib_inform_info_t* const p_ii, + IN const osm_log_level_t log_level ); + +void +osm_dump_inform_info_record( + IN osm_log_t* const p_log, + IN const ib_inform_info_record_t* const p_iir, + IN const osm_log_level_t log_level ); + +void +osm_dump_switch_info_record( + IN osm_log_t* const p_log, + IN const ib_switch_info_record_t* const p_sir, + IN const osm_log_level_t log_level ); + +void +osm_dump_sm_info_record( + IN osm_log_t* const p_log, + IN const ib_sminfo_record_t* const p_smir, + IN const osm_log_level_t log_level ); + +void +osm_dump_pkey_block( + IN osm_log_t* const p_log, + IN uint64_t port_guid, + IN uint16_t block_num, + IN uint8_t port_num, + IN const ib_pkey_table_t* const p_pkey_tbl, + IN const osm_log_level_t log_level ); + +void +osm_dump_slvl_map_table( + IN osm_log_t* const p_log, + IN uint64_t port_guid, + IN uint8_t in_port_num, + IN uint8_t out_port_num, + IN const ib_slvl_table_t* const p_slvl_tbl, + IN const osm_log_level_t log_level ); + +void +osm_dump_vl_arb_table( + IN osm_log_t* const p_log, + IN uint64_t port_guid, + IN uint8_t block_num, + IN uint8_t port_num, + IN const ib_vl_arb_table_t* const p_vla_tbl, + IN const osm_log_level_t log_level ); + +/****f* OpenSM: Helper/osm_dump_port_info +* NAME +* osm_dump_port_info +* +* DESCRIPTION +* Dumps the PortInfo attribute to the log. +* +* SYNOPSIS +*/ +void osm_dump_node_info( + IN osm_log_t* const p_log, + IN const ib_node_info_t* const p_ni, + IN const osm_log_level_t log_level ); +/* +* PARAMETERS +* p_log +* [in] Pointer to the osm_log_t object +* +* p_ni +* [in] Pointer to the NodeInfo attribute +* +* log_level +* [in] Log verbosity level with which to dump the data. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Helper/osm_dump_sm_info +* NAME +* osm_dump_sm_info +* +* DESCRIPTION +* Dumps the SMInfo attribute to the log. +* +* SYNOPSIS +*/ +void +osm_dump_sm_info( + IN osm_log_t* const p_log, + IN const ib_sm_info_t* const p_smi, + IN const osm_log_level_t log_level ); +/* +* PARAMETERS +* p_log +* [in] Pointer to the osm_log_t object +* +* p_smi +* [in] Pointer to the SMInfo attribute +* +* log_level +* [in] Log verbosity level with which to dump the data. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Helper/osm_dump_switch_info +* NAME +* osm_dump_switch_info +* +* DESCRIPTION +* Dumps the SwitchInfo attribute to the log. +* +* SYNOPSIS +*/ +void +osm_dump_switch_info( + IN osm_log_t* const p_log, + IN const ib_switch_info_t* const p_si, + IN const osm_log_level_t log_level ); +/* +* PARAMETERS +* p_log +* [in] Pointer to the osm_log_t object +* +* p_si +* [in] Pointer to the SwitchInfo attribute +* +* log_level +* [in] Log verbosity level with which to dump the data. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Helper/osm_dump_notice +* NAME +* osm_dump_notice +* +* DESCRIPTION +* Dumps the Notice attribute to the log. +* +* SYNOPSIS +*/ +void +osm_dump_notice( + IN osm_log_t* const p_log, + IN const ib_mad_notice_attr_t *p_ntci, + IN const osm_log_level_t log_level ); +/* +* PARAMETERS +* p_log +* [in] Pointer to the osm_log_t object +* +* p_ntci +* [in] Pointer to the Notice attribute +* +* log_level +* [in] Log verbosity level with which to dump the data. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/osm_get_disp_msg_str +* NAME +* osm_get_disp_msg_str +* +* DESCRIPTION +* Returns a string for the specified Dispatcher message. +* +* SYNOPSIS +*/ +const char* +osm_get_disp_msg_str( + IN cl_disp_msgid_t msg ); +/* +* PARAMETERS +* msg +* [in] Dispatcher message ID value. +* +* RETURN VALUES +* Pointer to the message discription string. +* +* NOTES +* +* SEE ALSO +*********/ + +void osm_dump_dr_path( + IN osm_log_t* const p_log, + IN const osm_dr_path_t* const p_path, + IN const osm_log_level_t level ); + +void osm_dump_smp_dr_path( + IN osm_log_t* const p_log, + IN const ib_smp_t* const p_smp, + IN const osm_log_level_t level ); + +void osm_dump_dr_smp( + IN osm_log_t* const p_log, + IN const ib_smp_t* const p_smp, + IN const osm_log_level_t level ); + +void osm_dump_sa_mad( + IN osm_log_t* const p_log, + IN const ib_sa_mad_t* const p_smp, + IN const osm_log_level_t level ); + +/****f* IBA Base: Types/osm_get_sm_state_str +* NAME +* osm_get_sm_state_str +* +* DESCRIPTION +* Returns a string for the specified SM state. +* +* SYNOPSIS +*/ +const char* +osm_get_sm_state_str( + IN osm_sm_state_t state ); +/* +* PARAMETERS +* state +* [in] SM State value +* +* RETURN VALUES +* Pointer to the state discription string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/osm_get_sm_signal_str +* NAME +* osm_get_sm_signal_str +* +* DESCRIPTION +* Returns a string for the specified SM state. +* +* SYNOPSIS +*/ +const char* +osm_get_sm_signal_str( + IN osm_signal_t signal ); +/* +* PARAMETERS +* state +* [in] Signal value +* +* RETURN VALUES +* Pointer to the signal discription string. +* +* NOTES +* +* SEE ALSO +*********/ + +const char* +osm_get_port_state_str_fixed_width( + IN uint8_t port_state ); + +const char* +osm_get_node_type_str_fixed_width( + IN uint32_t node_type ); + +const char* +osm_get_manufacturer_str( + IN uint64_t const guid_ho ); + +const char* +osm_get_mtu_str( + IN uint8_t const mtu ); + +const char* +osm_get_lwa_str( + IN uint8_t const lwa ); + +const char* +osm_get_mtu_str( + IN uint8_t const mtu ); + +const char* +osm_get_lwa_str( + IN uint8_t const lwa ); + +const char* +osm_get_lsa_str( + IN uint8_t const lsa ); + + +/****f* IBA Base: Types/osm_get_sm_mgr_signal_str +* NAME +* osm_get_sm_mgr_signal_str +* +* DESCRIPTION +* Returns a string for the specified SM manager signal. +* +* SYNOPSIS +*/ +const char* +osm_get_sm_mgr_signal_str( + IN osm_sm_signal_t signal ); +/* +* PARAMETERS +* signal +* [in] SM manager signal +* +* RETURN VALUES +* Pointer to the signal discription string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* IBA Base: Types/osm_get_sm_mgr_state_str +* NAME +* osm_get_sm_mgr_state_str +* +* DESCRIPTION +* Returns a string for the specified SM manager state. +* +* SYNOPSIS +*/ +const char* +osm_get_sm_mgr_state_str( + IN uint16_t state ); +/* +* PARAMETERS +* state +* [in] SM manager state +* +* RETURN VALUES +* Pointer to the state discription string. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_HELPER_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_inform.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_inform.h new file mode 100644 index 00000000..70849ea7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_inform.h @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_inform_rec_t. + * This object represents an IBA Inform Record. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * Author: + * Eitan Zahavi, Mellanox + * + * $Revision: 1.5 $ + */ + +#ifndef _OSM_INFR_H_ +#define _OSM_INFR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Inform Record +* NAME +* Inform Record +* +* DESCRIPTION +* The Inform record encapsulates the information needed by the +* SA to manage InformInfo registrations and sending Reports(Notice) +* when SM receives Traps for registered LIDs. +* +* The inform records is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: Inform Record/osm_infr_t +* NAME +* osm_infr_t +* +* DESCRIPTION +* Inform Record structure. +* +* The osm_infr_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_infr_t +{ + cl_list_item_t list_item; + osm_bind_handle_t h_bind; + osm_infr_rcv_t* p_infr_rcv; + osm_mad_addr_t report_addr; + ib_inform_info_record_t inform_record; +} osm_infr_t; +/* +* FIELDS +* list_item +* List Item for qlist linkage. Must be first element!! +* +* h_bind +* A handle of lower level mad srvc +* +* p_infr_rcv +* The receiver of inform_info's +* +* report_addr +* Report address +* +* inform_record +* The Inform Info Record +* +* SEE ALSO +*********/ + +/****f* OpenSM: Inform Record/osm_infr_new +* NAME +* osm_infr_new +* +* DESCRIPTION +* Allocates and initializes a Inform Record for use. +* +* SYNOPSIS +*/ +osm_infr_t* +osm_infr_new( + IN const osm_infr_t *p_infr_rec ); +/* +* PARAMETERS +* p_inf_rec +* [in] Pointer to IB Inform Record +* +* RETURN VALUES +* pointer to osm_infr_t structure. +* +* NOTES +* Allows calling other service record methods. +* +* SEE ALSO +* Inform Record, osm_infr_construct, osm_infr_destroy +*********/ + +/****f* OpenSM: Inform Record/osm_infr_init +* NAME +* osm_infr_new +* +* DESCRIPTION +* Initializes the osm_infr_t structure. +* +* SYNOPSIS +*/ +void +osm_infr_init( + IN osm_infr_t* const p_infr, + IN const osm_infr_t *p_infr_rec ); +/* +* PARAMETERS +* p_infr +* [in] Pointer to osm_infr_t structure +* p_inf_rec +* [in] Pointer to the ib_inform_info_record_t +* +* SEE ALSO +* Inform Record, osm_infr_construct, osm_infr_destroy +*********/ + +/****f* OpenSM: Inform Record/osm_infr_construct +* NAME +* osm_infr_construct +* +* DESCRIPTION +* Constructs the osm_infr_t structure. +* +* SYNOPSIS +*/ +void +osm_infr_construct( + IN osm_infr_t* const p_infr ); +/* +* PARAMETERS +* p_infr +* [in] Pointer to osm_infr_t structure +* +* SEE ALSO +* Inform Record, osm_infr_construct, osm_infr_destroy +*********/ + +/****f* OpenSM: Inform Record/osm_infr_destroy +* NAME +* osm_infr_destroy +* +* DESCRIPTION +* Constructs the osm_infr_t structure. +* +* SYNOPSIS +*/ +void +osm_infr_destroy( + IN osm_infr_t* const p_infr ); +/* +* PARAMETERS +* p_infr +* [in] Pointer to osm_infr_t structure +* +* SEE ALSO +* Inform Record, osm_infr_construct, osm_infr_destroy +*********/ + +/****f* OpenSM: Inform Record/osm_infr_get_by_rec +* NAME +* osm_infr_get_by_rec +* +* DESCRIPTION +* Find a matching osm_infr_t in the subnet DB by inform_info_record +* +* SYNOPSIS +*/ +osm_infr_t* +osm_infr_get_by_rec( + IN osm_subn_t const *p_subn, + IN osm_log_t *p_log, + IN osm_infr_t* const p_infr_rec ); +/* +* PARAMETERS +* p_subn +* [in] Pointer to the subnet object +* +* p_log +* [in] Pointer to the log object +* +* p_inf_rec +* [in] Pointer to an inform_info record +* +* RETURN +* The matching osm_infr_t +* SEE ALSO +* Inform Record, osm_infr_construct, osm_infr_destroy +*********/ + +void +osm_infr_insert_to_db( + IN osm_subn_t *p_subn, + IN osm_log_t *p_log, + IN osm_infr_t *p_infr); + +void +osm_infr_remove_from_db( + IN osm_subn_t *p_subn, + IN osm_log_t *p_log, + IN osm_infr_t *p_infr); + +/****f* OpenSM: Inform Record/osm_report_notice +* NAME +* osm_report_notice +* +* DESCRIPTION +* Once a Trap was received by the osm_trap_rcv, or a Trap sourced in +* the SM was sent (Traps 64-67) this routine is called with a copy of +* the notice data. +* Given a notice attribute - compare and see if it matches the InformInfo +* Element and if it does - call the Report(Notice) for the +* target QP registered by the address stored in the InformInfo element +* +* SYNOPSIS +*/ +ib_api_status_t +osm_report_notice( + IN osm_log_t* const p_log, + IN osm_subn_t* p_subn, + IN ib_mad_notice_attr_t* p_ntc ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the trap receiver +* +* p_ntc +* [in] Pointer to a copy of the incoming trap notice attribute. +* +* RETURN +* IB_SUCCESS on good completion +* +* SEE ALSO +* Inform Record, osm_trap_rcv +*********/ + +END_C_DECLS + +#endif /* _OSM_INFR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_lid_mgr.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_lid_mgr.h new file mode 100644 index 00000000..0d63d2e5 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_lid_mgr.h @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_lid_mgr_t. + * This object represents the LID Manager object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_LID_MGR_H_ +#define _OSM_LID_MGR_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +#define OSM_LID_MGR_LIST_SIZE_MIN 256 + +/****h* OpenSM/LID Manager +* NAME +* LID Manager +* +* DESCRIPTION +* The LID Manager object encapsulates the information +* needed to control LID assignments on the subnet. +* +* The LID Manager object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: LID Manager/osm_lid_mgr_t +* NAME +* osm_lid_mgr_t +* +* DESCRIPTION +* LID Manager structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_lid_mgr +{ + osm_subn_t *p_subn; + osm_db_t *p_db; + osm_req_t *p_req; + osm_log_t *p_log; + cl_plock_t *p_lock; + boolean_t send_set_reqs; + osm_db_domain_t *p_g2l; + cl_ptr_vector_t used_lids; + cl_qlist_t free_ranges; +} osm_lid_mgr_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_db +* Pointer to the database (persistency) object +* +* p_req +* Pointer to the Requester object sending SMPs. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* p_g2l +* Pointer to the database domain storing guid to lid mapping. +* +* used_lids +* A vector the maps from the lid to its guid. keeps track of +* existing and non existing mapping of guid->lid +* +* free_ranges +* A list of available free lid ranges. The list is initialized +* by the code that initializes the lid assignment and is consumed +* by the procedure that finds a free range. It holds elements of +* type osm_lid_mgr_range_t +* +* SEE ALSO +* LID Manager object +*********/ + +/****f* OpenSM: LID Manager/osm_lid_mgr_construct +* NAME +* osm_lid_mgr_construct +* +* DESCRIPTION +* This function constructs a LID Manager object. +* +* SYNOPSIS +*/ +void +osm_lid_mgr_construct( + IN osm_lid_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to a LID Manager object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows osm_lid_mgr_destroy +* +* Calling osm_lid_mgr_construct is a prerequisite to calling any other +* method except osm_lid_mgr_init. +* +* SEE ALSO +* LID Manager object, osm_lid_mgr_init, +* osm_lid_mgr_destroy +*********/ + +/****f* OpenSM: LID Manager/osm_lid_mgr_destroy +* NAME +* osm_lid_mgr_destroy +* +* DESCRIPTION +* The osm_lid_mgr_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_lid_mgr_destroy( + IN osm_lid_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* LID Manager object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_lid_mgr_construct or osm_lid_mgr_init. +* +* SEE ALSO +* LID Manager object, osm_lid_mgr_construct, +* osm_lid_mgr_init +*********/ + +/****f* OpenSM: LID Manager/osm_lid_mgr_init +* NAME +* osm_lid_mgr_init +* +* DESCRIPTION +* The osm_lid_mgr_init function initializes a +* LID Manager object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_lid_mgr_init( + IN osm_lid_mgr_t* const p_mgr, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_db_t* const p_db, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_lid_mgr_t object to initialize. +* +* p_req +* [in] Pointer to the attribute Requester object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_db +* [in] Pointer to the database object. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the LID Manager object was initialized +* successfully. +* +* NOTES +* Allows calling other LID Manager methods. +* +* SEE ALSO +* LID Manager object, osm_lid_mgr_construct, +* osm_lid_mgr_destroy +*********/ + +/****f* OpenSM: LID Manager/osm_lid_mgr_process_sm +* NAME +* osm_lid_mgr_process_sm +* +* DESCRIPTION +* Configures the SM's port with its designated LID values. +* +* SYNOPSIS +*/ +osm_signal_t +osm_lid_mgr_process_sm( + IN osm_lid_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_lid_mgr_t object. +* +* RETURN VALUES +* Returns the appropriate signal to the caller: +* OSM_SIGNAL_DONE - operation is complete +* OSM_SIGNAL_DONE_PENDING - local operations are complete, but +* transactions are still pending on the wire. +* +* NOTES +* +* SEE ALSO +* LID Manager +*********/ + +/****f* OpenSM: LID Manager/osm_lid_mgr_process_subnet +* NAME +* osm_lid_mgr_process_subnet +* +* DESCRIPTION +* Configures subnet ports (except the SM port itself) with their +* designated LID values. +* +* SYNOPSIS +*/ +osm_signal_t +osm_lid_mgr_process_subnet( + IN osm_lid_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_lid_mgr_t object. +* +* RETURN VALUES +* Returns the appropriate signal to the caller: +* OSM_SIGNAL_DONE - operation is complete +* OSM_SIGNAL_DONE_PENDING - local operations are complete, but +* transactions are still pending on the wire. +* +* NOTES +* +* SEE ALSO +* LID Manager +*********/ + +END_C_DECLS + +#endif /* _OSM_LID_MGR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_rcv.h new file mode 100644 index 00000000..3ba174d0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_rcv.h @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_lft_rcv_t. + * This object represents the LFT Receiver object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_LFT_RCV_H_ +#define _OSM_LFT_RCV_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/LFT Receiver +* NAME +* LFT Receiver +* +* DESCRIPTION +* The LFT Receiver object encapsulates the information +* needed to receive the LFT attribute from a node. +* +* The LFT Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: LFT Receiver/osm_lft_rcv_t +* NAME +* osm_lft_rcv_t +* +* DESCRIPTION +* LFT Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_lft_rcv +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + cl_plock_t *p_lock; + +} osm_lft_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* LFT Receiver object +*********/ + +/****f* OpenSM: LFT Receiver/osm_lft_rcv_construct +* NAME +* osm_lft_rcv_construct +* +* DESCRIPTION +* This function constructs a LFT Receiver object. +* +* SYNOPSIS +*/ +void osm_lft_rcv_construct( + IN osm_lft_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a LFT Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_lft_rcv_init, osm_lft_rcv_destroy +* +* Calling osm_lft_rcv_construct is a prerequisite to calling any other +* method except osm_lft_rcv_init. +* +* SEE ALSO +* LFT Receiver object, osm_lft_rcv_init, +* osm_lft_rcv_destroy +*********/ + +/****f* OpenSM: LFT Receiver/osm_lft_rcv_destroy +* NAME +* osm_lft_rcv_destroy +* +* DESCRIPTION +* The osm_lft_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_lft_rcv_destroy( + IN osm_lft_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* LFT Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_lft_rcv_construct or osm_lft_rcv_init. +* +* SEE ALSO +* LFT Receiver object, osm_lft_rcv_construct, +* osm_lft_rcv_init +*********/ + +/****f* OpenSM: LFT Receiver/osm_lft_rcv_init +* NAME +* osm_lft_rcv_init +* +* DESCRIPTION +* The osm_lft_rcv_init function initializes a +* LFT Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_lft_rcv_init( + IN osm_lft_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_lft_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the LFT Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other LFT Receiver methods. +* +* SEE ALSO +* LFT Receiver object, osm_lft_rcv_construct, +* osm_lft_rcv_destroy +*********/ + +/****f* OpenSM: LFT Receiver/osm_lft_rcv_process +* NAME +* osm_lft_rcv_process +* +* DESCRIPTION +* Process the LFT attribute. +* +* SYNOPSIS +*/ +void osm_lft_rcv_process( + IN const osm_lft_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_lft_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's LFT attribute. +* +* RETURN VALUES +* CL_SUCCESS if the LFT processing was successful. +* +* NOTES +* This function processes a LFT attribute. +* +* SEE ALSO +* LFT Receiver, Node Description Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_LFT_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_rcv_ctrl.h new file mode 100644 index 00000000..907034d4 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_rcv_ctrl.h @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_lft_rcv_ctrl_t. + * This object represents a controller that receives the IBA + * LFT attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_LFT_RCV_CTRL_H_ +#define _OSM_LFT_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/LFT Receive Controller +* NAME +* LFT Receive Controller +* +* DESCRIPTION +* The LFT Receive Controller object +* encapsulates the information +* needed to receive the NodeDescription attribute from a node. +* +* The LFT Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: LFT Receive Controller/osm_lft_rcv_ctrl_t +* NAME +* osm_lft_rcv_ctrl_t +* +* DESCRIPTION +* LFT Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_lft_rcv_ctrl +{ + osm_lft_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_lft_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the LFT Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* LFT Receive Controller object +*********/ + +/****f* OpenSM: LFT Receive Controller/osm_lft_rcv_ctrl_construct +* NAME +* osm_lft_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a LFT Receive Controller object. +* +* SYNOPSIS +*/ +void +osm_lft_rcv_ctrl_construct( + IN osm_lft_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a LFT Receive Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_lft_rcv_ctrl_init, osm_lft_rcv_ctrl_destroy +* +* Calling osm_lft_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_lft_rcv_ctrl_init. +* +* SEE ALSO +* LFT Receive Controller object, osm_lft_rcv_ctrl_init, +* osm_lft_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: LFT Receive Controller/osm_lft_rcv_ctrl_destroy +* NAME +* osm_lft_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_lft_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_lft_rcv_ctrl_destroy( + IN osm_lft_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* LFT Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_lft_rcv_ctrl_construct or osm_lft_rcv_ctrl_init. +* +* SEE ALSO +* LFT Receive Controller object, osm_lft_rcv_ctrl_construct, +* osm_lft_rcv_ctrl_init +*********/ + +/****f* OpenSM: LFT Receive Controller/osm_lft_rcv_ctrl_init +* NAME +* osm_lft_rcv_ctrl_init +* +* DESCRIPTION +* The osm_lft_rcv_ctrl_init function initializes a +* LFT Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_lft_rcv_ctrl_init( + IN osm_lft_rcv_ctrl_t* const p_ctrl, + IN osm_lft_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_lft_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_lft_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the LFT Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other LFT Receive Controller methods. +* +* SEE ALSO +* LFT Receive Controller object, osm_lft_rcv_ctrl_construct, +* osm_lft_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* OSM_LFT_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_tbl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_tbl.h new file mode 100644 index 00000000..b7763aba --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_lin_fwd_tbl.h @@ -0,0 +1,379 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_lin_fwd_tbl_t. + * This object represents a linear forwarding table. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_LIN_FWD_TBL_H_ +#define _OSM_LIN_FWD_TBL_H_ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Linear Forwarding Table +* NAME +* Linear Forwarding Table +* +* DESCRIPTION +* The Linear Forwarding Table objects encapsulate the information +* needed by the OpenSM to manage linear forwarding tables. The OpenSM +* allocates one Linear Forwarding Table object per switch in the +* IBA subnet, if that switch uses a linear table. +* +* The Linear Forwarding Table objects are not thread safe, thus +* callers must provide serialization. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Forwarding Table/osm_lin_fwd_tbl_t +* NAME +* osm_lin_fwd_tbl_t +* +* DESCRIPTION +* Linear Forwarding Table structure. +* +* Callers may directly access this object. +* +* SYNOPSIS +*/ +typedef struct _osm_lin_fwd_tbl +{ + uint16_t size; + uint8_t port_tbl[1]; + +} osm_lin_fwd_tbl_t; +/* +* FIELDS +* Size +* Number of entries in the linear forwarding table. This value +* is taken from the SwitchInfo attribute. +* +* port_tbl +* The array that specifies the port number which routes the +* corresponding LID. Index is by LID. +* +* SEE ALSO +* Forwarding Table object, Random Forwarding Table object. +*********/ + +/****f* OpenSM: Forwarding Table/osm_lin_tbl_new +* NAME +* osm_lin_tbl_new +* +* DESCRIPTION +* This function creates and initializes a Linear Forwarding Table object. +* +* SYNOPSIS +*/ +osm_lin_fwd_tbl_t* +osm_lin_tbl_new( + IN uint16_t const size ); +/* +* PARAMETERS +* size +* [in] Number of entries in the Linear Forwarding Table. +* +* RETURN VALUE +* On success, returns a pointer to a new Linear Forwarding Table object +* of the specified size. +* NULL otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_lin_tbl_delete +* NAME +* osm_lin_tbl_delete +* +* DESCRIPTION +* This destroys and deallocates a Linear Forwarding Table object. +* +* SYNOPSIS +*/ +void +osm_lin_tbl_delete( + IN osm_lin_fwd_tbl_t** const pp_tbl ); +/* +* PARAMETERS +* pp_tbl +* [in] Pointer a Pointer to the Linear Forwarding Table object. +* +* RETURN VALUE +* On success, returns a pointer to a new Linear Forwarding Table object +* of the specified size. +* NULL otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_lin_fwd_tbl_set +* NAME +* osm_lin_fwd_tbl_set +* +* DESCRIPTION +* Sets the port to route the specified LID. +* +* SYNOPSIS +*/ +static inline void +osm_lin_fwd_tbl_set( + IN osm_lin_fwd_tbl_t* const p_tbl, + IN const uint16_t lid_ho, + IN const uint8_t port ) +{ + CL_ASSERT( lid_ho < p_tbl->size ); + if( lid_ho < p_tbl->size ) + p_tbl->port_tbl[lid_ho] = port; +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Linear Forwarding Table object. +* +* lid_ho +* [in] LID value (host order) for which to set the route. +* +* port +* [in] Port to route the specified LID value. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_lin_fwd_tbl_get +* NAME +* osm_lin_fwd_tbl_get +* +* DESCRIPTION +* Returns the port that routes the specified LID. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_lin_fwd_tbl_get( + IN const osm_lin_fwd_tbl_t* const p_tbl, + IN const uint16_t lid_ho ) +{ + if( lid_ho < p_tbl->size ) + return( p_tbl->port_tbl[lid_ho] ); + else + return( 0xFF ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Linear Forwarding Table object. +* +* lid_ho +* [in] LID value (host order) for which to get the route. +* +* RETURN VALUE +* Returns the port that routes the specified LID. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_lin_fwd_tbl_get_size +* NAME +* osm_lin_fwd_tbl_get_size +* +* DESCRIPTION +* Returns the number of entries available in the forwarding table. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_lin_fwd_tbl_get_size( + IN const osm_lin_fwd_tbl_t* const p_tbl ) +{ + return( p_tbl->size ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* Returns the number of entries available in the forwarding table. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_lin_fwd_tbl_get_lids_per_block +* NAME +* osm_lin_fwd_tbl_get_lids_per_block +* +* DESCRIPTION +* Returns the number of LIDs per LID block. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_lin_fwd_tbl_get_lids_per_block( + IN const osm_lin_fwd_tbl_t* const p_tbl ) +{ + UNUSED_PARAM( p_tbl ); + return( 64 ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* Returns the number of LIDs per LID block. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_lin_fwd_tbl_get_max_block_id_in_use +* NAME +* osm_lin_fwd_tbl_get_max_block_id_in_use +* +* DESCRIPTION +* Returns the maximum block ID in actual use by the forwarding table. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_lin_fwd_tbl_get_max_block_id_in_use( + IN const osm_lin_fwd_tbl_t* const p_tbl, + IN const uint16_t lid_top_ho ) +{ + return( (uint16_t)(lid_top_ho / + osm_lin_fwd_tbl_get_lids_per_block( p_tbl ) ) ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* Returns the maximum block ID in actual use by the forwarding table. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_lin_fwd_tbl_set_block +* NAME +* osm_lin_fwd_tbl_set_block +* +* DESCRIPTION +* Copies the specified block into the Linear Forwarding Table. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +osm_lin_fwd_tbl_set_block( + IN osm_lin_fwd_tbl_t* const p_tbl, + IN const uint8_t* const p_block, + IN const uint32_t block_num ) +{ + uint16_t lid_start; + uint16_t num_lids; + + CL_ASSERT( p_tbl ); + CL_ASSERT( p_block ); + + num_lids = osm_lin_fwd_tbl_get_lids_per_block( p_tbl ); + lid_start = (uint16_t)(block_num * num_lids); + + if( lid_start + num_lids > p_tbl->size ) + return( IB_INVALID_PARAMETER ); + + memcpy( &p_tbl->port_tbl[lid_start], p_block, num_lids ); + return( IB_SUCCESS ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Linear Forwarding Table object. +* +* p_block +* [in] Pointer to the Forwarding Table block. +* +* block_num +* [in] Block number of this block. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_LIN_FWD_TBL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_link_mgr.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_link_mgr.h new file mode 100644 index 00000000..c61b8a16 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_link_mgr.h @@ -0,0 +1,271 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_link_mgr_t. + * This object represents the Link Manager object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_LINK_MGR_H_ +#define _OSM_LINK_MGR_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Link Manager +* NAME +* Link Manager +* +* DESCRIPTION +* The Link Manager object encapsulates the information +* needed to control unicast LID forwarding on the subnet. +* +* The Link Manager object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Link Manager/osm_link_mgr_t +* NAME +* osm_link_mgr_t +* +* DESCRIPTION +* Link Manager structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_link_mgr +{ + osm_subn_t *p_subn; + osm_req_t *p_req; + osm_log_t *p_log; + cl_plock_t *p_lock; + boolean_t send_set_reqs; + +} osm_link_mgr_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_req +* Pointer to the Requester object sending SMPs. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* Link Manager object +*********/ + +/****f* OpenSM: Link Manager/osm_link_mgr_construct +* NAME +* osm_link_mgr_construct +* +* DESCRIPTION +* This function constructs a Link Manager object. +* +* SYNOPSIS +*/ +void +osm_link_mgr_construct( + IN osm_link_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to a Link Manager object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows osm_link_mgr_destroy +* +* Calling osm_link_mgr_construct is a prerequisite to calling any other +* method except osm_link_mgr_init. +* +* SEE ALSO +* Link Manager object, osm_link_mgr_init, +* osm_link_mgr_destroy +*********/ + +/****f* OpenSM: Link Manager/osm_link_mgr_destroy +* NAME +* osm_link_mgr_destroy +* +* DESCRIPTION +* The osm_link_mgr_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_link_mgr_destroy( + IN osm_link_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Link Manager object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_link_mgr_construct or osm_link_mgr_init. +* +* SEE ALSO +* Link Manager object, osm_link_mgr_construct, +* osm_link_mgr_init +*********/ + +/****f* OpenSM: Link Manager/osm_link_mgr_init +* NAME +* osm_link_mgr_init +* +* DESCRIPTION +* The osm_link_mgr_init function initializes a +* Link Manager object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_link_mgr_init( + IN osm_link_mgr_t* const p_mgr, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_link_mgr_t object to initialize. +* +* p_req +* [in] Pointer to the attribute Requester object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Link Manager object was initialized +* successfully. +* +* NOTES +* Allows calling other Link Manager methods. +* +* SEE ALSO +* Link Manager object, osm_link_mgr_construct, +* osm_link_mgr_destroy +*********/ + +/****f* OpenSM: Link Manager/osm_link_mgr_process +* NAME +* osm_link_mgr_process +* +* DESCRIPTION +* Processes all ports in the subnet per the link manager command. +* +* SYNOPSIS +*/ +osm_signal_t +osm_link_mgr_process( + IN osm_link_mgr_t* const p_mgr, + IN const uint8_t link_state ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_link_mgr_t object. +* +* link_state +* [in] state to which to the set the port. +* +* +* RETURN VALUES +* Returns the appropriate signal to the caller: +* OSM_SIGNAL_DONE - operation is complete +* OSM_SIGNAL_DONE_PENDING - local operations are complete, but +* transactions are still pending on the wire. +* +* NOTES +* +* SEE ALSO +* Link Manager, Node Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_LINK_MGR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_log.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_log.h new file mode 100644 index 00000000..f111a6e7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_log.h @@ -0,0 +1,474 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_log_t. + * This object represents the log file. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#ifndef _OSM_LOG_H_ +#define _OSM_LOG_H_ + +#ifndef __WIN__ +#include +#endif +#include +#include +#include +#include +#include + +#ifdef __GNUC__ +#define STRICT_OSM_LOG_FORMAT __attribute__((format(printf, 3, 4))) +#else +#define STRICT_OSM_LOG_FORMAT +#endif + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +#define LOG_ENTRY_SIZE_MAX 4096 +#define BUF_SIZE LOG_ENTRY_SIZE_MAX + +#define __func__ __FUNCTION__ + +#define OSM_LOG_ENTER( OSM_LOG_PTR, NAME ) \ + osm_log( OSM_LOG_PTR, OSM_LOG_FUNCS, \ + "%s: [\n", __func__); + +#define OSM_LOG_EXIT( OSM_LOG_PTR ) \ + osm_log( OSM_LOG_PTR, OSM_LOG_FUNCS, \ + "%s: ]\n", __func__); + +/****h* OpenSM/Log +* NAME +* Log +* +* DESCRIPTION +* +* AUTHOR +* +*********/ +typedef uint8_t osm_log_level_t; + +#define OSM_LOG_NONE 0x00 +#define OSM_LOG_ERROR 0x01 +#define OSM_LOG_INFO 0x02 +#define OSM_LOG_VERBOSE 0x04 +#define OSM_LOG_DEBUG 0x08 +#define OSM_LOG_FUNCS 0x10 +#define OSM_LOG_FRAMES 0x20 +#define OSM_LOG_ROUTING 0x40 +#define OSM_LOG_SYS 0x80 + +/* + DEFAULT - turn on ERROR and INFO only +*/ +#define OSM_LOG_DEFAULT_LEVEL OSM_LOG_ERROR | OSM_LOG_INFO + +/****s* OpenSM: MAD Wrapper/osm_log_t +* NAME +* osm_log_t +* +* DESCRIPTION +* +* SYNOPSIS +*/ +typedef struct _osm_log +{ + osm_log_level_t level; + cl_spinlock_t lock; + unsigned long count; + unsigned long max_size; + boolean_t flush; + FILE* out_port; +} osm_log_t; +/*********/ + +/****f* OpenSM: Log/osm_log_construct +* NAME +* osm_log_construct +* +* DESCRIPTION +* This function constructs a Log object. +* +* SYNOPSIS +*/ +static inline void +osm_log_construct( + IN osm_log_t* const p_log ) +{ + cl_spinlock_construct( &p_log->lock ); +} +/* +* PARAMETERS +* p_log +* [in] Pointer to a Log object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_log_init, osm_log_init_v2, osm_log_destroy +* +* Calling osm_log_construct is a prerequisite to calling any other +* method except osm_log_init or osm_log_init_v2. +* +* SEE ALSO +* Log object, osm_log_init, osm_log_init_v2, +* osm_log_destroy +*********/ + +/****f* OpenSM: Log/osm_log_destroy +* NAME +* osm_log_destroy +* +* DESCRIPTION +* The osm_log_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +static inline void +osm_log_destroy( + IN osm_log_t* const p_log ) +{ + cl_spinlock_destroy( &p_log->lock ); + if (p_log->out_port != stdout) { + fclose(p_log->out_port); + p_log->out_port = stdout; + } + closelog(); +} +/* +* PARAMETERS +* p_log +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Log object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_log_construct, osm_log_init, or osm_log_init_v2. +* +* SEE ALSO +* Log object, osm_log_construct, +* osm_log_init, osm_log_init_v2 +*********/ + +/****f* OpenSM: Log/osm_log_init_v2 +* NAME +* osm_log_init_v2 +* +* DESCRIPTION +* The osm_log_init_v2 function initializes a +* Log object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_log_init_v2( + IN osm_log_t* const p_log, + IN const boolean_t flush, + IN const uint8_t log_flags, + IN const char *log_file, + IN const unsigned long max_size, + IN const boolean_t accum_log_file ); +/* +* PARAMETERS +* p_log +* [in] Pointer to the log object. +* +* flush +* [in] Set to TRUE directs the log to flush all log messages +* immediately. This severely degrades log performance, +* and is normally used for debugging only. +* +* log_flags +* [in] The log verbosity level to be used. +* +* log_file +* [in] if not NULL defines the name of the log file. Otherwise it is stdout. +* +* RETURN VALUES +* CL_SUCCESS if the Log object was initialized +* successfully. +* +* NOTES +* Allows calling other Log methods. +* +* SEE ALSO +* Log object, osm_log_construct, +* osm_log_destroy +*********/ + +/****f* OpenSM: Log/osm_log_init +* NAME +* osm_log_init +* +* DESCRIPTION +* The osm_log_init function initializes a +* Log object for use. It is a wrapper for osm_log_init_v2(). +* +* SYNOPSIS +*/ +ib_api_status_t +osm_log_init( + IN osm_log_t* const p_log, + IN const boolean_t flush, + IN const uint8_t log_flags, + IN const char *log_file, + IN const boolean_t accum_log_file ); +/* + * Same as osm_log_init_v2() but without max_size parameter + */ + +/****f* OpenSM: Log/osm_log_get_level +* NAME +* osm_log_get_level +* +* DESCRIPTION +* Returns the current log level. +* +* SYNOPSIS +*/ +static inline osm_log_level_t +osm_log_get_level( + IN const osm_log_t* const p_log ) +{ + return( p_log->level ); +} +/* +* PARAMETERS +* p_log +* [in] Pointer to the log object. +* +* RETURN VALUES +* Returns the current log level. +* +* NOTES +* +* SEE ALSO +* Log object, osm_log_construct, +* osm_log_destroy +*********/ + +/****f* OpenSM: Log/osm_log_set_level +* NAME +* osm_log_set_level +* +* DESCRIPTION +* Sets the current log level. +* +* SYNOPSIS +*/ +static inline void +osm_log_set_level( + IN osm_log_t* const p_log, + IN const osm_log_level_t level ) +{ + p_log->level = level; +} +/* +* PARAMETERS +* p_log +* [in] Pointer to the log object. +* +* level +* [in] New level to set. +* +* RETURN VALUES +* Returns the current log level. +* +* NOTES +* +* SEE ALSO +* Log object, osm_log_construct, +* osm_log_destroy +*********/ + +/****f* OpenSM: Log/osm_log_is_active +* NAME +* osm_log_is_active +* +* DESCRIPTION +* Returns TRUE if the specified log level would be logged. +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_log_is_active( + IN const osm_log_t* const p_log, + IN const osm_log_level_t level ) +{ + return( (p_log->level & level) != 0 ); +} +/* +* PARAMETERS +* p_log +* [in] Pointer to the log object. +* +* level +* [in] Level to check. +* +* RETURN VALUES +* Returns TRUE if the specified log level would be logged. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Log object, osm_log_construct, +* osm_log_destroy +*********/ + +extern int osm_log_printf(osm_log_t *p_log, osm_log_level_t level, + const char *fmt, ...); + +void +osm_log( + IN osm_log_t* const p_log, + IN const osm_log_level_t verbosity, + IN const char *p_str, ... ) STRICT_OSM_LOG_FORMAT; + +void +osm_log_raw( + IN osm_log_t* const p_log, + IN const osm_log_level_t verbosity, + IN const char *p_buf ); + +#define DBG_CL_LOCK 0 + +#define CL_PLOCK_EXCL_ACQUIRE( __exp__ ) \ +{ \ + if (DBG_CL_LOCK) \ + printf("cl_plock_excl_acquire: Acquiring %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ + cl_plock_excl_acquire( __exp__ ); \ + if (DBG_CL_LOCK) \ + printf("cl_plock_excl_acquire: Acquired %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ +} + +#define CL_PLOCK_ACQUIRE( __exp__ ) \ +{ \ + if (DBG_CL_LOCK) \ + printf("cl_plock_acquire: Acquiring %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ + cl_plock_acquire( __exp__ ); \ + if (DBG_CL_LOCK) \ + printf("cl_plock_acquire: Acquired %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ +} + +#define CL_PLOCK_RELEASE( __exp__ ) \ +{ \ + if (DBG_CL_LOCK) \ + printf("cl_plock_release: Releasing %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ + cl_plock_release( __exp__ ); \ + if (DBG_CL_LOCK) \ + printf("cl_plock_release: Released %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ +} + +#define DBG_CL_SPINLOCK 0 +#define CL_SPINLOCK_RELEASE( __exp__ ) \ +{ \ + if (DBG_CL_SPINLOCK) \ + printf("cl_spinlock_release: Releasing %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ + cl_spinlock_release( __exp__ ); \ + if (DBG_CL_SPINLOCK) \ + printf("cl_spinlock_release: Released %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ +} + +#define CL_SPINLOCK_ACQUIRE( __exp__ ) \ +{ \ + if (DBG_CL_SPINLOCK) \ + printf("cl_spinlock_acquire: Acquiring %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ + cl_spinlock_acquire( __exp__ ); \ + if (DBG_CL_SPINLOCK) \ + printf("cl_spinlock_acquire: Acquired %p file %s, line %d\n", \ + __exp__,__FILE__, __LINE__); \ +} + +/****f* OpenSM: Helper/osm_is_debug +* NAME +* osm_is_debug +* +* DESCRIPTION +* The osm_is_debug function returns TRUE if the opensm was compiled +* in debug mode, and FALSE otherwise. +* +* SYNOPSIS +*/ +boolean_t +osm_is_debug(void); +/* +* PARAMETERS +* None +* +* RETURN VALUE +* TRUE if compiled in debug version. FALSE otherwise. +* +* NOTES +* +*********/ + +END_C_DECLS + +#endif /* _OSM_LOG_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mad_pool.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mad_pool.h new file mode 100644 index 00000000..c2512088 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mad_pool.h @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mad_pool_t. + * This object represents a pool of management datagram (MAD) objects. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#ifndef _OSM_MAD_POOL_H_ +#define _OSM_MAD_POOL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/MAD Pool +* NAME +* MAD Pool +* +* DESCRIPTION +* The MAD Pool encapsulates the information needed by the +* OpenSM to manage a pool of MAD objects. The OpenSM allocates +* one MAD Pool per IBA subnet. +* +* The MAD Pool is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: MAD Pool/osm_mad_pool_t +* NAME +* osm_mad_pool_t +* +* DESCRIPTION +* MAD Pool structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mad_pool +{ + osm_log_t *p_log; + cl_qlock_pool_t madw_pool; + atomic32_t mads_out; +} osm_mad_pool_t; +/* +* FIELDS +* p_log +* Pointer to the log object. +* +* lock +* Spinlock guarding the pool. +* +* mads_out +* Running total of the number of MADs outstanding. +* +* SEE ALSO +* MAD Pool +*********/ + +/****f* OpenSM: MAD Pool/osm_mad_pool_construct +* NAME +* osm_mad_pool_construct +* +* DESCRIPTION +* This function constructs a MAD Pool. +* +* SYNOPSIS +*/ +void osm_mad_pool_construct( + IN osm_mad_pool_t* const p_pool ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a MAD Pool to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mad_pool_init, osm_mad_pool_destroy +* +* Calling osm_mad_pool_construct is a prerequisite to calling any other +* method except osm_mad_pool_init. +* +* SEE ALSO +* MAD Pool, osm_mad_pool_init, osm_mad_pool_destroy +*********/ + +/****f* OpenSM: MAD Pool/osm_mad_pool_destroy +* NAME +* osm_mad_pool_destroy +* +* DESCRIPTION +* The osm_mad_pool_destroy function destroys a node, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_mad_pool_destroy( + IN osm_mad_pool_t* const p_pool ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to a MAD Pool to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified MAD Pool. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_mad_pool_construct or +* osm_mad_pool_init. +* +* SEE ALSO +* MAD Pool, osm_mad_pool_construct, osm_mad_pool_init +*********/ + +/****f* OpenSM: MAD Pool/osm_mad_pool_init +* NAME +* osm_mad_pool_init +* +* DESCRIPTION +* The osm_mad_pool_init function initializes a MAD Pool for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_mad_pool_init( + IN osm_mad_pool_t* const p_pool, + IN osm_log_t* const p_log ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to an osm_mad_pool_t object to initialize. +* +* p_log +* [in] Pointer to the log object. +* +* +* RETURN VALUES +* CL_SUCCESS if the MAD Pool was initialized successfully. +* +* NOTES +* Allows calling other MAD Pool methods. +* +* SEE ALSO +* MAD Pool, osm_mad_pool_construct, osm_mad_pool_destroy +*********/ + +/****f* OpenSM: MAD Pool/osm_mad_pool_get +* NAME +* osm_mad_pool_get +* +* DESCRIPTION +* Gets a MAD wrapper and wire MAD from the pool. +* +* SYNOPSIS +*/ +osm_madw_t* +osm_mad_pool_get( + IN osm_mad_pool_t* const p_pool, + IN osm_bind_handle_t h_bind, + IN const uint32_t total_size, + IN const osm_mad_addr_t* const p_mad_addr ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to an osm_mad_pool_t object. +* +* h_bind +* [in] Handle returned from osm_vendor_bind() call to the +* port over which this mad will be sent. +* +* total_size +* [in] Total size, including MAD header of the requested MAD. +* +* p_mad_addr +* [in] Pointer to the MAD address structure. This parameter +* may be NULL for directed route MADs. +* +* RETURN VALUES +* Returns a pointer to a MAD wrapper containing the MAD. +* A return value of NULL means no MADs are available. +* +* NOTES +* The MAD must eventually be returned to the pool with a call to +* osm_mad_pool_put. +* +* The osm_mad_pool_construct or osm_mad_pool_init must be called before +* using this function. +* +* SEE ALSO +* MAD Pool, osm_mad_pool_put +*********/ + +/****f* OpenSM: MAD Pool/osm_mad_pool_put +* NAME +* osm_mad_pool_put +* +* DESCRIPTION +* Returns a MAD to the pool. +* +* SYNOPSIS +*/ +void osm_mad_pool_put( + IN osm_mad_pool_t* const p_pool, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to an osm_mad_pool_t object. +* +* p_madw +* [in] Pointer to a MAD Wrapper for a MAD that was previously +* retrieved from the pool. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* The osm_mad_pool_construct or osm_mad_pool_init must be called before +* using this function. +* +* SEE ALSO +* MAD Pool, osm_mad_pool_get +*********/ + +/****f* OpenSM: MAD Pool/osm_mad_pool_get_wrapper +* NAME +* osm_mad_pool_get_wrapper +* +* DESCRIPTION +* Gets a only MAD wrapper from the pool (no wire MAD). +* +* SYNOPSIS +*/ +osm_madw_t* +osm_mad_pool_get_wrapper( + IN osm_mad_pool_t* const p_pool, + IN osm_bind_handle_t h_bind, + IN const uint32_t total_size, + IN const ib_mad_t* const p_mad, + IN const osm_mad_addr_t* const p_mad_addr ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to an osm_mad_pool_t object. +* +* h_bind +* [in] Handle returned from osm_vendor_bind() call to the +* port for which this mad wrapper will be used. +* +* total_size +* [in] Total size, including MAD header of the MAD that will +* be attached to this wrapper. +* +* p_mad +* [in] Pointer to the MAD to attach to this wrapper. +* +* p_mad_addr +* [in] Pointer to the MAD address structure. This parameter +* may be NULL for directed route MADs. +* +* RETURN VALUES +* Returns a pointer to a MAD wrapper. +* A return value of NULL means no MAD wrappers are available. +* +* NOTES +* The MAD must eventually be returned to the pool with a call to +* osm_mad_pool_put. +* +* The osm_mad_pool_construct or osm_mad_pool_init must be called before +* using this function. +* +* SEE ALSO +* MAD Pool, osm_mad_pool_put +*********/ + +/****f* OpenSM: MAD Pool/osm_mad_pool_get_wrapper_raw +* NAME +* osm_mad_pool_get_wrapper_raw +* +* DESCRIPTION +* Gets a only an uninitialized MAD wrapper from the pool (no wire MAD). +* +* SYNOPSIS +*/ +osm_madw_t* +osm_mad_pool_get_wrapper_raw( + IN osm_mad_pool_t* const p_pool ); +/* +* PARAMETERS +* p_pool +* [in] Pointer to an osm_mad_pool_t object. +* +* RETURN VALUES +* Returns a pointer to a MAD wrapper. +* A return value of NULL means no MAD wrappers are available. +* +* NOTES +* The MAD must eventually be returned to the pool with a call to +* osm_mad_pool_put. +* +* The osm_mad_pool_construct or osm_mad_pool_init must be called before +* using this function. +* +* SEE ALSO +* MAD Pool, osm_mad_pool_put +*********/ + +/****f* OpenSM: MAD Pool/osm_mad_pool_get_outstanding +* NAME +* osm_mad_pool_get_count +* +* DESCRIPTION +* Returns the running count of MADs currently outstanding from the pool. +* +* SYNOPSIS +*/ +static inline uint32_t +osm_mad_pool_get_outstanding( + IN const osm_mad_pool_t* const p_pool ) +{ + return( p_pool->mads_out ); +} +/* +* PARAMETERS +* p_pool +* [in] Pointer to an osm_mad_pool_t object. +* +* RETURN VALUES +* Returns the running count of MADs currently outstanding from the pool. +* +* NOTES +* The osm_mad_pool_construct or osm_mad_pool_init must be called before +* using this function. +* +* SEE ALSO +* MAD Pool, osm_mad_pool_get +*********/ + +END_C_DECLS + +#endif /* _OSM_MAD_POOL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_madw.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_madw.h new file mode 100644 index 00000000..a0aa29ea --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_madw.h @@ -0,0 +1,1160 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mad_wrapper_t. + * This object represents the context wrapper for OpenSM MAD processing. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#ifndef _OSM_MADW_H_ +#define _OSM_MADW_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****s* OpenSM: MAD Wrapper/osm_bind_info_t +* NAME +* osm_bind_info_t +* +* DESCRIPTION +* +* SYNOPSIS +*/ +typedef struct _osm_bind_info +{ + ib_net64_t port_guid; + uint8_t mad_class; + uint8_t class_version; + boolean_t is_responder; + boolean_t is_trap_processor; + boolean_t is_report_processor; + uint32_t send_q_size; + uint32_t recv_q_size; +} osm_bind_info_t; +/* +* FIELDS +* portguid +* PortGuid of local port +* +* class +* Mgmt Class ID +* +* class_version +* Mgmt Class version +* +* is_responder +* True if this is a GSI Agent +* +* is_trap_processor +* True if GSI Trap msgs are handled +* +* is_report_processo +* True if GSI Report msgs are handled +* +* send_q_size +* SendQueueSize +* +* recv_q_size +* Receive Queue Size +* +* SEE ALSO +*********/ + +/****h* OpenSM/MAD Wrapper +* NAME +* MAD Wrapper +* +* DESCRIPTION +* The MAD Wrapper object encapsulates the information needed by the +* OpenSM to manage individual MADs. The OpenSM allocates one MAD Wrapper +* per MAD. +* +* The MAD Wrapper is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: MAD Wrapper/osm_ni_context_t +* NAME +* osm_ni_context_t +* +* DESCRIPTION +* Context needed by recipient of NodeInfo attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_ni_context +{ + ib_net64_t node_guid; + uint8_t port_num; +} osm_ni_context_t; +/* +* FIELDS +* p_node +* Pointer to the node thru which we got to this node. +* +* p_sw +* Pointer to the switch object (if any) of the switch +* thru which we got to this node. +* +* port_num +* Port number on the node or switch thru which we got +* to this node. +* +* SEE ALSO +*********/ + +/****s* OpenSM: MAD Wrapper/osm_pi_context_t +* NAME +* osm_pi_context_t +* +* DESCRIPTION +* Context needed by recipient of PortInfo attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_pi_context +{ + ib_net64_t node_guid; + ib_net64_t port_guid; + boolean_t set_method; + boolean_t light_sweep; + boolean_t update_master_sm_base_lid; + boolean_t ignore_errors; + boolean_t active_transition; +} osm_pi_context_t; +/*********/ + +/****s* OpenSM: MAD Wrapper/osm_nd_context_t +* NAME +* osm_nd_context_t +* +* DESCRIPTION +* Context needed by recipient of NodeDescription attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_nd_context +{ + ib_net64_t node_guid; +} osm_nd_context_t; +/*********/ + +/****s* OpenSM: MAD Wrapper/osm_si_context_t +* NAME +* osm_si_context_t +* +* DESCRIPTION +* Context needed by recipient of SwitchInfo attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_si_context +{ + ib_net64_t node_guid; + boolean_t set_method; + boolean_t light_sweep; +} osm_si_context_t; +/*********/ + +/****s* OpenSM: MAD Wrapper/osm_lft_context_t +* NAME +* osm_lft_context_t +* +* DESCRIPTION +* Context needed by recipient of LinearForwardingTable attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_lft_context +{ + ib_net64_t node_guid; + boolean_t set_method; +} osm_lft_context_t; +/*********/ + +/****s* OpenSM: MAD Wrapper/osm_mft_context_t +* NAME +* osm_mft_context_t +* +* DESCRIPTION +* Context needed by recipient of MulticastForwardingTable attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_mft_context +{ + ib_net64_t node_guid; + boolean_t set_method; +} osm_mft_context_t; +/*********/ + +/****s* OpenSM: MAD Wrapper/osm_smi_context_t +* NAME +* osm_smi_context_t +* +* DESCRIPTION +* Context needed by recipient of SMInfo attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_smi_context +{ + ib_net64_t port_guid; + boolean_t set_method; +} osm_smi_context_t; +/*********/ + +/****s* OpenSM: MAD Wrapper/osm_pkey_context_t +* NAME +* osm_pkey_context_t +* +* DESCRIPTION +* Context needed by recipient of P_Key attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_pkey_context +{ + ib_net64_t node_guid; + ib_net64_t port_guid; + boolean_t set_method; +} osm_pkey_context_t; +/*********/ + +/****s* OpenSM: MAD Wrapper/osm_slvl_context_t +* NAME +* osm_slvl_context_t +* +* DESCRIPTION +* Context needed by recipient of PortInfo attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_slvl_context +{ + ib_net64_t node_guid; + ib_net64_t port_guid; + boolean_t set_method; +} osm_slvl_context_t; +/*********/ + +/****s* OpenSM: MAD Wrapper/osm_vla_context_t +* NAME +* osm_vla_context_t +* +* DESCRIPTION +* Context needed by recipient of VL Arb attribute. +* +* SYNOPSIS +*/ +typedef struct _osm_vla_context +{ + ib_net64_t node_guid; + ib_net64_t port_guid; + boolean_t set_method; +} osm_vla_context_t; +/*********/ + +#ifndef OSM_VENDOR_INTF_OPENIB +/****s* OpenSM: MAD Wrapper/osm_arbitrary_context_t +* NAME +* osm_arbitrary_context_t +* +* DESCRIPTION +* Context needed by arbitrary recipient. +* +* SYNOPSIS +*/ +typedef struct _osm_arbitrary_context +{ + void* context1; + void* context2; +} osm_arbitrary_context_t; +/*********/ +#endif + +/****s* OpenSM: MAD Wrapper/osm_madw_context_t +* NAME +* osm_madw_context_t +* +* DESCRIPTION +* Context needed by recipients of MAD responses. +* +* SYNOPSIS +*/ +typedef union _osm_madw_context +{ + osm_ni_context_t ni_context; + osm_pi_context_t pi_context; + osm_nd_context_t nd_context; + osm_si_context_t si_context; + osm_lft_context_t lft_context; + osm_mft_context_t mft_context; + osm_smi_context_t smi_context; + osm_slvl_context_t slvl_context; + osm_pkey_context_t pkey_context; + osm_vla_context_t vla_context; +#ifndef OSM_VENDOR_INTF_OPENIB + osm_arbitrary_context_t arb_context; +#endif +} osm_madw_context_t; +/*********/ + +/****s* OpenSM: MAD Wrapper/osm_mad_addr_t +* NAME +* osm_mad_addr_t +* +* DESCRIPTION +* +* SYNOPSIS +*/ +typedef struct _osm_mad_addr +{ + ib_net16_t dest_lid; + uint8_t path_bits; + uint8_t static_rate; + + union addr_type + { + struct _smi + { + ib_net16_t source_lid; + uint8_t port_num; + } smi; + + struct _gsi + { + ib_net32_t remote_qp; + ib_net32_t remote_qkey; + ib_net16_t pkey; + uint8_t service_level; + boolean_t global_route; + ib_grh_t grh_info; + } gsi; + } addr_type; + +} osm_mad_addr_t; +/* +* FIELDS +* +* SEE ALSO +*********/ + +/****s* OpenSM: MAD Wrapper/osm_madw_t +* NAME +* osm_madw_t +* +* DESCRIPTION +* Context needed for processing individual MADs +* +* SYNOPSIS +*/ +typedef struct _osm_madw +{ + cl_pool_item_t pool_item; + osm_bind_handle_t h_bind; + osm_vend_wrap_t vend_wrap; + osm_mad_addr_t mad_addr; + osm_bind_info_t bind_info; + osm_madw_context_t context; + uint32_t mad_size; + ib_api_status_t status; + cl_disp_msgid_t fail_msg; + boolean_t resp_expected; + const ib_mad_t *p_mad; +} osm_madw_t; +/* +* FIELDS +* pool_item +* List linkage for pools and lists. MUST BE FIRST MEMBER! +* +* h_bind +* Bind handle for the port on which this MAD will be sent +* or was received. +* +* vend_wrap +* Transport vendor specific context. This structure is not +* used outside MAD transport vendor specific code. +* +* context +* Union of controller specific contexts needed for this MAD. +* This structure allows controllers to indirectly communicate +* with each other through the dispatcher. +* +* mad_size +* Size of this MAD in bytes. +* +* status +* Status of completed operation on the MAD. +* CL_SUCCESS if the operation was successful. +* +* fail_msg +* Dispatcher message with which to post this MAD on failure. +* This value is set by the originator of the MAD. +* If an operation on this MAD fails, for example due to a timeout, +* then the transport layer will dispose of the MAD by sending +* it through the Dispatcher with this message type. Presumably, +* there is a controller listening for the failure message that can +* properly clean up. +* +* resp_expected +* TRUE if a response is expected to this MAD. +* FALSE otherwise. +* +* p_mad +* Pointer to the wire MAD. The MAD itself cannot be part of the +* wrapper, since wire MADs typically reside in special memory +* registered with the local HCA. +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_construct +* NAME +* osm_madw_construct +* +* DESCRIPTION +* This function constructs a MAD Wrapper object. +* +* SYNOPSIS +*/ +static inline void +osm_madw_construct( + IN osm_madw_t* const p_madw ) +{ + /* + Don't touch the pool_item since that is an opaque object. + Clear all other objects in the mad wrapper. + */ + memset( ((uint8_t *)p_madw) + sizeof( cl_pool_item_t ), 0, + sizeof(*p_madw) - sizeof( cl_pool_item_t ) ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to a MAD Wrapper object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_madw_init, osm_madw_destroy +* +* Calling osm_madw_construct is a prerequisite to calling any other +* method except osm_madw_init. +* +* SEE ALSO +* MAD Wrapper object, osm_madw_init, osm_madw_destroy +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_destroy +* NAME +* osm_madw_destroy +* +* DESCRIPTION +* The osm_madw_destroy function destroys a node, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_madw_destroy( + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_madw +* [in] Pointer to a MAD Wrapper object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified MAD Wrapper object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_madw_construct or +* osm_madw_init. +* +* SEE ALSO +* MAD Wrapper object, osm_madw_construct, osm_madw_init +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_init +* NAME +* osm_madw_init +* +* DESCRIPTION +* Initializes a MAD Wrapper object for use. +* +* SYNOPSIS +*/ +static inline void +osm_madw_init( + IN osm_madw_t* const p_madw, + IN osm_bind_handle_t h_bind, + IN const uint32_t mad_size, + IN const osm_mad_addr_t* const p_mad_addr ) +{ + osm_madw_construct( p_madw ); + p_madw->h_bind = h_bind; + p_madw->fail_msg = CL_DISP_MSGID_NONE; + p_madw->mad_size = mad_size; + if( p_mad_addr ) + p_madw->mad_addr = *p_mad_addr; + p_madw->resp_expected = FALSE; +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object to initialize. +* +* h_bind +* [in] Pointer to the wire MAD. +* +* p_mad_addr +* [in] Pointer to the MAD address structure. This parameter may +* be NULL for directed route MADs. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_smp_ptr +* NAME +* osm_madw_get_smp_ptr +* +* DESCRIPTION +* Gets a pointer to the SMP in this MAD. +* +* SYNOPSIS +*/ +static inline ib_smp_t* +osm_madw_get_smp_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (ib_smp_t*)p_madw->p_mad ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object to initialize. +* +* RETURN VALUES +* Pointer to the start of the SMP MAD. +* +* NOTES +* +* SEE ALSO +* MAD Wrapper object, osm_madw_construct, osm_madw_destroy +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_sa_mad_ptr +* NAME +* osm_madw_get_sa_mad_ptr +* +* DESCRIPTION +* Gets a pointer to the SA MAD in this MAD wrapper. +* +* SYNOPSIS +*/ +static inline ib_sa_mad_t* +osm_madw_get_sa_mad_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (ib_sa_mad_t*)p_madw->p_mad ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object to initialize. +* +* RETURN VALUES +* Pointer to the start of the SMP MAD. +* +* NOTES +* +* SEE ALSO +* MAD Wrapper object, osm_madw_construct, osm_madw_destroy +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_ni_context_ptr +* NAME +* osm_madw_get_ni_context_ptr +* +* DESCRIPTION +* Gets a pointer to the NodeInfo context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_ni_context_t* +osm_madw_get_ni_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_ni_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_pi_context_ptr +* NAME +* osm_madw_get_pi_context_ptr +* +* DESCRIPTION +* Gets a pointer to the PortInfo context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_pi_context_t* +osm_madw_get_pi_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_pi_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_nd_context_ptr +* NAME +* osm_madw_get_nd_context_ptr +* +* DESCRIPTION +* Gets a pointer to the NodeDescription context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_nd_context_t* +osm_madw_get_nd_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_nd_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_lft_context_ptr +* NAME +* osm_madw_get_lft_context_ptr +* +* DESCRIPTION +* Gets a pointer to the LFT context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_lft_context_t* +osm_madw_get_lft_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_lft_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_mft_context_ptr +* NAME +* osm_madw_get_mft_context_ptr +* +* DESCRIPTION +* Gets a pointer to the MFT context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_mft_context_t* +osm_madw_get_mft_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_mft_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_si_context_ptr +* NAME +* osm_madw_get_si_context_ptr +* +* DESCRIPTION +* Gets a pointer to the SwitchInfo context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_si_context_t* +osm_madw_get_si_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_si_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_smi_context_ptr +* NAME +* osm_madw_get_smi_context_ptr +* +* DESCRIPTION +* Gets a pointer to the SMInfo context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_smi_context_t* +osm_madw_get_smi_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_smi_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_pkey_context_ptr +* NAME +* osm_madw_get_pkey_context_ptr +* +* DESCRIPTION +* Gets a pointer to the P_Key context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_pkey_context_t* +osm_madw_get_pkey_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_pkey_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_slvl_context_ptr +* NAME +* osm_madw_get_slvl_context_ptr +* +* DESCRIPTION +* Gets a pointer to the PortInfo context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_slvl_context_t* +osm_madw_get_slvl_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_slvl_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_vla_context_ptr +* NAME +* osm_madw_get_vla_context_ptr +* +* DESCRIPTION +* Gets a pointer to the Vl Arb context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_vla_context_t* +osm_madw_get_vla_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_vla_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ + +#ifndef OSM_VENDOR_INTF_OPENIB +/****f* OpenSM: MAD Wrapper/osm_madw_get_arbitrary_context_ptr +* NAME +* osm_madw_get_arbitrary_context_ptr +* +* DESCRIPTION +* Gets a pointer to the arbitrary context in this MAD. +* +* SYNOPSIS +*/ +static inline osm_arbitrary_context_t* +osm_madw_get_arbitrary_context_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_arbitrary_context_t*)&p_madw->context ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Pointer to the start of the context structure. +* +* NOTES +* +* SEE ALSO +*********/ +#endif + +/****f* OpenSM: MAD Wrapper/osm_madw_get_vend_ptr +* NAME +* osm_madw_get_vend_ptr +* +* DESCRIPTION +* Gets a pointer to the vendor specific MAD wrapper component. +* +* SYNOPSIS +*/ +static inline osm_vend_wrap_t* +osm_madw_get_vend_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_vend_wrap_t*)&p_madw->vend_wrap ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Gets a pointer to the vendor specific MAD wrapper component. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_vend_ptr +* NAME +* osm_madw_get_vend_ptr +* +* DESCRIPTION +* Returns the bind handle associated with this MAD. +* +* SYNOPSIS +*/ +static inline osm_bind_handle_t +osm_madw_get_bind_handle( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_bind_handle_t)p_madw->h_bind ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Returns the bind handle associated with this MAD. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_mad_addr_ptr +* NAME +* osm_madw_get_mad_addr_ptr +* +* DESCRIPTION +* Returns the mad address structure associated with this MAD. +* +* SYNOPSIS +*/ +static inline osm_mad_addr_t* +osm_madw_get_mad_addr_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (osm_mad_addr_t*)&p_madw->mad_addr ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Returns the mad address structure associated with this MAD. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_mad_ptr +* NAME +* osm_madw_get_mad_ptr +* +* DESCRIPTION +* Returns the mad address structure associated with this MAD. +* +* SYNOPSIS +*/ +static inline ib_mad_t* +osm_madw_get_mad_ptr( + IN const osm_madw_t* const p_madw ) +{ + return( (ib_mad_t*)p_madw->p_mad ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Returns the mad address structure associated with this MAD. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_get_err_msg +* NAME +* osm_madw_get_err_msg +* +* DESCRIPTION +* Returns the message with which to post this mad wrapper if +* an error occurs during processing the mad. +* +* SYNOPSIS +*/ +static inline cl_disp_msgid_t +osm_madw_get_err_msg( + IN const osm_madw_t* const p_madw ) +{ + return( (cl_disp_msgid_t)p_madw->fail_msg ); +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* RETURN VALUES +* Returns the message with which to post this mad wrapper if +* an error occurs during processing the mad. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_set_mad +* NAME +* osm_madw_set_mad +* +* DESCRIPTION +* Associates a wire MAD with this MAD Wrapper object. +* +* SYNOPSIS +*/ +static inline void +osm_madw_set_mad( + IN osm_madw_t* const p_madw, + IN const ib_mad_t* const p_mad ) +{ + p_madw->p_mad = p_mad; +} +/* +* PARAMETERS +* p_madw +* [in] Pointer to an osm_madw_t object. +* +* p_mad +* [in] Pointer to the wire MAD to attach to this wrapper. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: MAD Wrapper/osm_madw_copy_context +* NAME +* osm_madw_copy_context +* +* DESCRIPTION +* Copies the controller context from one MAD Wrapper to another. +* +* SYNOPSIS +*/ +static inline void +osm_madw_copy_context( + IN osm_madw_t* const p_dest, + IN const osm_madw_t* const p_src ) +{ + p_dest->context = p_src->context; +} +/* +* PARAMETERS +* p_dest +* [in] Pointer to the destination osm_madw_t object. +* +* p_src +* [in] Pointer to the source osm_madw_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_MADW_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_matrix.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_matrix.h new file mode 100644 index 00000000..a6de3854 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_matrix.h @@ -0,0 +1,454 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_lid_matrix_t. + * This object represents a two dimensional array of port numbers + * and LID values. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + + +#ifndef _OSM_MATRIX_H_ +#define _OSM_MATRIX_H_ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/LID Matrix +* NAME +* LID Matrix +* +* DESCRIPTION +* The LID Matrix object encapsulates the information needed by the +* OpenSM to manage fabric routes. It is a two dimensional array +* index by LID value and Port Number. Each element contains the +* number of hops from that Port Number to the LID. +* Every Switch object contains a LID Matrix. +* +* The LID Matrix is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: LID Matrix/osm_lid_matrix_t +* NAME +* osm_lid_matrix_t +* +* DESCRIPTION +* +* The LID Matrix object encapsulates the information needed by the +* OpenSM to manage fabric routes. It is a two dimensional array +* index by LID value and Port Number. Each element contains the +* number of hops from that Port Number to the LID. +* Every Switch object contains a LID Matrix. +* +* The LID Matrix is not thread safe, thus callers must provide +* serialization. +* +* The num_ports index into the matrix serves a special purpose, in that it +* contains the shortest hop path for that LID through any port. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_lid_matrix_t +{ + cl_vector_t lid_vec; + uint8_t num_ports; + +} osm_lid_matrix_t; +/* +* FIELDS +* lid_vec +* Vector (indexed by LID) of port arrays (indexed by port number) +* +* num_ports +* Number of ports at each entry in the LID vector. +* +* SEE ALSO +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_construct +* NAME +* osm_lid_matrix_construct +* +* DESCRIPTION +* This function constructs a LID Matrix object. +* +* SYNOPSIS +*/ +static inline void +osm_lid_matrix_construct( + IN osm_lid_matrix_t* const p_lmx ) +{ + p_lmx->num_ports = 0; + cl_vector_construct( &p_lmx->lid_vec ); +} +/* +* PARAMETERS +* p_lmx +* [in] Pointer to a LID Matrix object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_lid_matrix_init, osm_lid_matrix_destroy +* +* Calling osm_lid_matrix_construct is a prerequisite to calling any other +* method except osm_lid_matrix_init. +* +* SEE ALSO +* LID Matrix object, osm_lid_matrix_init, osm_lid_matrix_destroy +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_destroy +* NAME +* osm_lid_matrix_destroy +* +* DESCRIPTION +* The osm_lid_matrix_destroy function destroys a node, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_lid_matrix_destroy( + IN osm_lid_matrix_t* const p_lmx ); +/* +* PARAMETERS +* p_lmx +* [in] Pointer to a LID Matrix object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified LID Matrix object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_lid_matrix_construct or +* osm_lid_matrix_init. +* +* SEE ALSO +* LID Matrix object, osm_lid_matrix_construct, osm_lid_matrix_init +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_init +* NAME +* osm_lid_matrix_init +* +* DESCRIPTION +* Initializes a LID Matrix object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_lid_matrix_init( + IN osm_lid_matrix_t* const p_lmx, + IN const uint8_t num_ports ); +/* +* PARAMETERS +* p_lmx +* [in] Pointer to an osm_lid_matrix_t object to initialize. +* +* num_ports +* [in] Number of ports at each LID index. This value is fixed +* at initialization time. +* +* RETURN VALUES +* IB_SUCCESS on success +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_get +* NAME +* osm_lid_matrix_get +* +* DESCRIPTION +* Returns the hop count at the specified LID/Port intersection. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_lid_matrix_get( + IN const osm_lid_matrix_t* const p_lmx, + IN const uint16_t lid_ho, + IN const uint8_t port_num ) +{ + CL_ASSERT( port_num < p_lmx->num_ports ); + CL_ASSERT( lid_ho lid_vec) ); + return( ((uint8_t *)cl_vector_get_ptr( + &p_lmx->lid_vec, lid_ho ))[port_num] ); +} +/* +* PARAMETERS +* p_lmx +* [in] Pointer to an osm_lid_matrix_t object. +* +* lid_ho +* [in] LID value (host order) for which to return the hop count +* +* port_num +* [in] Port number in the switch +* +* RETURN VALUES +* Returns the hop count at the specified LID/Port intersection. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_get_max_lid_ho +* NAME +* osm_lid_matrix_get_max_lid_ho +* +* DESCRIPTION +* Returns the maximum LID (host order) value contained +* in the matrix. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_lid_matrix_get_max_lid_ho( + IN const osm_lid_matrix_t* const p_lmx ) +{ + return( (uint16_t)(cl_vector_get_size( &p_lmx->lid_vec ) - 1 ) ); +} +/* +* PARAMETERS +* p_lmx +* [in] Pointer to an osm_lid_matrix_t object. +* +* RETURN VALUES +* Returns the maximum LID (host order) value contained +* in the matrix. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_get_num_ports +* NAME +* osm_lid_matrix_get_num_ports +* +* DESCRIPTION +* Returns the number of ports in this lid matrix. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_lid_matrix_get_num_ports( + IN const osm_lid_matrix_t* const p_lmx ) +{ + return( p_lmx->num_ports ); +} +/* +* PARAMETERS +* p_lmx +* [in] Pointer to an osm_lid_matrix_t object. +* +* RETURN VALUES +* Returns the number of ports in this lid matrix. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_get_least_hops +* NAME +* osm_lid_matrix_get_least_hops +* +* DESCRIPTION +* Returns the least number of hops for specified lid +* +* SYNOPSIS +*/ +static inline uint8_t +osm_lid_matrix_get_least_hops( + IN const osm_lid_matrix_t* const p_lmx, + IN const uint16_t lid_ho ) +{ + if( lid_ho > osm_lid_matrix_get_max_lid_ho( p_lmx ) ) + return( OSM_NO_PATH ); + + return( ((uint8_t *)cl_vector_get_ptr( + &p_lmx->lid_vec, lid_ho ))[p_lmx->num_ports] ); +} +/* +* PARAMETERS +* p_lmx +* [in] Pointer to an osm_lid_matrix_t object. +* +* lid_ho +* [in] LID (host order) for which to retrieve the shortest hop count. +* +* RETURN VALUES +* Returns the least number of hops for specified lid +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_set +* NAME +* osm_lid_matrix_set +* +* DESCRIPTION +* Sets the hop count at the specified LID/Port intersection. +* +* SYNOPSIS +*/ +cl_status_t +osm_lid_matrix_set( + IN osm_lid_matrix_t* const p_lmx, + IN const uint16_t lid_ho, + IN const uint8_t port_num, + IN const uint8_t val ); +/* +* PARAMETERS +* p_lmx +* [in] Pointer to an osm_lid_matrix_t object. +* +* lid_ho +* [in] LID value (host order) to index into the vector. +* +* port_num +* [in] port number index into the vector entry. +* +* val +* [in] value (number of hops) to assign to this entry. +* +* RETURN VALUES +* Returns the hop count at the specified LID/Port intersection. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_set_min_lid_size +* NAME +* osm_lid_matrix_set_min_lid_size +* +* DESCRIPTION +* Sets the size of the matrix to at least accomodate the +* specified LID value (host ordered) +* +* SYNOPSIS +*/ +static inline cl_status_t +osm_lid_matrix_set_min_lid_size( + IN osm_lid_matrix_t* const p_lmx, + IN const uint16_t lid_ho ) +{ + return( cl_vector_set_min_size( &p_lmx->lid_vec, lid_ho + 1 ) ); +} +/* +* PARAMETERS +* p_lmx +* [in] Pointer to an osm_lid_matrix_t object. +* +* lid_ho +* [in] Minimum LID value (host order) to accomodate. +* +* RETURN VALUES +* Sets the size of the matrix to at least accomodate the +* specified LID value (host ordered) +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: LID Matrix/osm_lid_matrix_clear +* NAME +* osm_lid_matrix_clear +* +* DESCRIPTION +* Clears a LID Matrix object in anticipation of a rebuild. +* +* SYNOPSIS +*/ +void +osm_lid_matrix_clear( + IN osm_lid_matrix_t* const p_lmx ); +/* +* PARAMETERS +* p_lmx +* [in] Pointer to an osm_lid_matrix_t object to clear. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_MATRIX_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_config_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_config_ctrl.h new file mode 100644 index 00000000..17390bac --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_config_ctrl.h @@ -0,0 +1,288 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mcast_conf_ctrl_t. + * This object represents a controller that performs a + * Set(MulticastForwardingTable) for the specified port. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_MCAST_CONFIG_CTRL_H_ +#define _OSM_MCAST_CONFIG_CTRL_H_ + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Multicast Forwarding Table Configuration Controller +* NAME +* Multicast Forwarding Table Configuration Controller +* +* DESCRIPTION +* The Multicast Forwarding Table Configuration Controller object +* encapsulates the information needed to Set(MulticastForwardingTable) +* at the specified port. +* +* The Multicast Forwarding Table Configuration Controller object +* is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ +/****s* OpenSM: Multicast Forwarding Table Configuration Controller/osm_mcast_conf_ctrl_t +* NAME +* osm_mcast_conf_ctrl_t +* +* DESCRIPTION +* Multicast Forwarding Table Configuration Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mcast_conf_ctrl +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_dispatcher_t *p_disp; + +} osm_mcast_conf_ctrl_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* p_disp +* Pointer to the Dispatcher. +* +* SEE ALSO +* Multicast Forwarding Table Configuration Controller object +*********/ +/****f* OpenSM: Multicast Forwarding Table Configuration Controller/osm_mcast_conf_ctrl_construct +* NAME +* osm_mcast_conf_ctrl_construct +* +* DESCRIPTION +* This function constructs a Multicast Forwarding Table Configuration Controller object. +* +* SYNOPSIS +*/ +void osm_mcast_conf_ctrl_construct( + IN osm_mcast_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Multicast Forwarding Table Configuration Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mcast_conf_ctrl_init, osm_mcast_conf_ctrl_destroy, +* and osm_mcast_conf_ctrl_is_inited. +* +* Calling osm_mcast_conf_ctrl_construct is a prerequisite to calling any other +* method except osm_mcast_conf_ctrl_init. +* +* SEE ALSO +* Multicast Forwarding Table Configuration Controller object, osm_mcast_conf_ctrl_init, +* osm_mcast_conf_ctrl_destroy, osm_mcast_conf_ctrl_is_inited +*********/ + +/****f* OpenSM: Multicast Forwarding Table Configuration Controller/osm_mcast_conf_ctrl_destroy +* NAME +* osm_mcast_conf_ctrl_destroy +* +* DESCRIPTION +* The osm_mcast_conf_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_mcast_conf_ctrl_destroy( + IN osm_mcast_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Multicast Forwarding Table Configuration Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mcast_conf_ctrl_construct or osm_mcast_conf_ctrl_init. +* +* SEE ALSO +* Multicast Forwarding Table Configuration Controller object, osm_mcast_conf_ctrl_construct, +* osm_mcast_conf_ctrl_init +*********/ + +/****f* OpenSM: Multicast Forwarding Table Configuration Controller/osm_mcast_conf_ctrl_init +* NAME +* osm_mcast_conf_ctrl_init +* +* DESCRIPTION +* The osm_mcast_conf_ctrl_init function initializes a +* Multicast Forwarding Table Configuration Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_mcast_conf_ctrl_init( + IN osm_mcast_conf_ctrl_t* const p_ctrl, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mcast_conf_ctrl_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Multicast Forwarding Table Configuration Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Multicast Forwarding Table Configuration Controller methods. +* +* SEE ALSO +* Multicast Forwarding Table Configuration Controller object, osm_mcast_conf_ctrl_construct, +* osm_mcast_conf_ctrl_destroy, osm_mcast_conf_ctrl_is_inited +*********/ + +/****f* OpenSM: Multicast Forwarding Table Configuration Controller/osm_mcast_conf_ctrl_is_inited +* NAME +* osm_mcast_conf_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_mcast_conf_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_mcast_conf_ctrl_is_inited( + IN const osm_mcast_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mcast_conf_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_mcast_conf_ctrl_construct or osm_mcast_conf_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Multicast Forwarding Table Configuration Controller object, osm_mcast_conf_ctrl_construct, +* osm_mcast_conf_ctrl_init +*********/ + +/****f* OpenSM: Multicast Forwarding Table Configuration Controller/osm_mcast_conf_ctrl_process +* NAME +* osm_mcast_conf_ctrl_process +* +* DESCRIPTION +* Initiate a MulticastForwardingTable configuration. +* +* SYNOPSIS +*/ +ib_api_status_t osm_mcast_conf_ctrl_process( + IN const osm_mcast_conf_ctrl_t* const p_ctrl, + IN const ib_guid_t guid ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mcast_conf_ctrl_t object. +* +* guid +* [in] Node GUID of switch to configure. +* +* RETURN VALUES +* CL_SUCCESS if configuration processing was successfully +* initiated. +* +* NOTES +* A success status here does not indicate that +* the Multicast Forwarding Table configuration completed successfully. +* +* SEE ALSO +* Multicast Forwarding Table Configuration Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_MCAST_CONFIG_CTRL_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_fwd_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_fwd_rcv.h new file mode 100644 index 00000000..0a812167 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_fwd_rcv.h @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mft_rcv_t. + * This object represents the Multicast Forwarding Table Receiver object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_MFT_RCV_H_ +#define _OSM_MFT_RCV_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/MFT Receiver +* NAME +* MFT Receiver +* +* DESCRIPTION +* The MFT Receiver object encapsulates the information +* needed to receive the MFT attribute from a node. +* +* The MFT Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: MFT Receiver/osm_mft_rcv_t +* NAME +* osm_mft_rcv_t +* +* DESCRIPTION +* MFT Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mft_rcv +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + cl_plock_t *p_lock; + +} osm_mft_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* MFT Receiver object +*********/ + +/****f* OpenSM: MFT Receiver/osm_mft_rcv_construct +* NAME +* osm_mft_rcv_construct +* +* DESCRIPTION +* This function constructs a MFT Receiver object. +* +* SYNOPSIS +*/ +void +osm_mft_rcv_construct( + IN osm_mft_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a MFT Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mft_rcv_init, osm_mft_rcv_destroy +* +* Calling osm_mft_rcv_construct is a prerequisite to calling any other +* method except osm_mft_rcv_init. +* +* SEE ALSO +* MFT Receiver object, osm_mft_rcv_init, +* osm_mft_rcv_destroy +*********/ + +/****f* OpenSM: MFT Receiver/osm_mft_rcv_destroy +* NAME +* osm_mft_rcv_destroy +* +* DESCRIPTION +* The osm_mft_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_mft_rcv_destroy( + IN osm_mft_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* MFT Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mft_rcv_construct or osm_mft_rcv_init. +* +* SEE ALSO +* MFT Receiver object, osm_mft_rcv_construct, +* osm_mft_rcv_init +*********/ + +/****f* OpenSM: MFT Receiver/osm_mft_rcv_init +* NAME +* osm_mft_rcv_init +* +* DESCRIPTION +* The osm_mft_rcv_init function initializes a +* MFT Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_mft_rcv_init( + IN osm_mft_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_mft_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the MFT Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other MFT Receiver methods. +* +* SEE ALSO +* MFT Receiver object, osm_mft_rcv_construct, +* osm_mft_rcv_destroy +*********/ + +/****f* OpenSM: MFT Receiver/osm_mft_rcv_process +* NAME +* osm_mft_rcv_process +* +* DESCRIPTION +* Process the MFT attribute. +* +* SYNOPSIS +*/ +void +osm_mft_rcv_process( + IN const osm_mft_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_mft_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's MFT attribute. +* +* RETURN VALUES +* CL_SUCCESS if the MFT processing was successful. +* +* NOTES +* This function processes a MFT attribute. +* +* SEE ALSO +* MFT Receiver, Node Description Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_MFT_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_fwd_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_fwd_rcv_ctrl.h new file mode 100644 index 00000000..c664ecb6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_fwd_rcv_ctrl.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mft_rcv_ctrl_t. + * This object represents a controller that receives the IBA + * Multicast Forwarding Table attribute from a node (specifically, a + * switch). + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_MFT_RCV_CTRL_H_ +#define _OSM_MFT_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/MFT Receive Controller +* NAME +* MFT Receive Controller +* +* DESCRIPTION +* The MFT Receive Controller object +* encapsulates the information +* needed to receive the Multicast Forwarding Table +* attribute from a node. +* +* The MFT Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: MFT Receive Controller/osm_mft_rcv_ctrl_t +* NAME +* osm_mft_rcv_ctrl_t +* +* DESCRIPTION +* MFT Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mft_rcv_ctrl +{ + osm_mft_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_mft_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the MFT Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* MFT Receive Controller object +*********/ + +/****f* OpenSM: MFT Receive Controller/osm_mft_rcv_ctrl_construct +* NAME +* osm_mft_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a MFT Receive Controller object. +* +* SYNOPSIS +*/ +void +osm_mft_rcv_ctrl_construct( + IN osm_mft_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a MFT Receive Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mft_rcv_ctrl_init, osm_mft_rcv_ctrl_destroy +* +* Calling osm_mft_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_mft_rcv_ctrl_init. +* +* SEE ALSO +* MFT Receive Controller object, osm_mft_rcv_ctrl_init, +* osm_mft_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: MFT Receive Controller/osm_mft_rcv_ctrl_destroy +* NAME +* osm_mft_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_mft_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_mft_rcv_ctrl_destroy( + IN osm_mft_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* MFT Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mft_rcv_ctrl_construct or osm_mft_rcv_ctrl_init. +* +* SEE ALSO +* MFT Receive Controller object, osm_mft_rcv_ctrl_construct, +* osm_mft_rcv_ctrl_init +*********/ + +/****f* OpenSM: MFT Receive Controller/osm_mft_rcv_ctrl_init +* NAME +* osm_mft_rcv_ctrl_init +* +* DESCRIPTION +* The osm_mft_rcv_ctrl_init function initializes a +* MFT Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_mft_rcv_ctrl_init( + IN osm_mft_rcv_ctrl_t* const p_ctrl, + IN osm_mft_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mft_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_mft_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the MFT Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other MFT Receive Controller methods. +* +* SEE ALSO +* MFT Receive Controller object, osm_mft_rcv_ctrl_construct, +* osm_mft_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* OSM_MFT_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_mgr.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_mgr.h new file mode 100644 index 00000000..c975b96d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_mgr.h @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mcast_mgr_t. + * This object represents the Multicast Manager object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_MCAST_MGR_H_ +#define _OSM_MCAST_MGR_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +#define OSM_MCAST_MGR_LIST_SIZE_MIN 256 + +/****h* OpenSM/Multicast Manager +* NAME +* Multicast Manager +* +* DESCRIPTION +* The Multicast Manager object encapsulates the information +* needed to control multicast LID forwarding on the subnet. +* +* The Multicast Manager object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Multicast Manager/osm_mcast_mgr_t +* NAME +* osm_mcast_mgr_t +* +* DESCRIPTION +* Multicast Manager structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mcast_mgr +{ + osm_subn_t *p_subn; + osm_req_t *p_req; + osm_log_t *p_log; + cl_plock_t *p_lock; + +} osm_mcast_mgr_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_req +* Pointer to the Requester object sending SMPs. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* Multicast Manager object +*********/ + +/****f* OpenSM: Multicast Manager/osm_mcast_mgr_construct +* NAME +* osm_mcast_mgr_construct +* +* DESCRIPTION +* This function constructs a Multicast Manager object. +* +* SYNOPSIS +*/ +void +osm_mcast_mgr_construct( + IN osm_mcast_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to a Multicast Manager object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows osm_mcast_mgr_destroy +* +* Calling osm_mcast_mgr_construct is a prerequisite to calling any other +* method except osm_mcast_mgr_init. +* +* SEE ALSO +* Multicast Manager object, osm_mcast_mgr_init, +* osm_mcast_mgr_destroy +*********/ + +/****f* OpenSM: Multicast Manager/osm_mcast_mgr_destroy +* NAME +* osm_mcast_mgr_destroy +* +* DESCRIPTION +* The osm_mcast_mgr_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_mcast_mgr_destroy( + IN osm_mcast_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Multicast Manager object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mcast_mgr_construct or osm_mcast_mgr_init. +* +* SEE ALSO +* Multicast Manager object, osm_mcast_mgr_construct, +* osm_mcast_mgr_init +*********/ + +/****f* OpenSM: Multicast Manager/osm_mcast_mgr_init +* NAME +* osm_mcast_mgr_init +* +* DESCRIPTION +* The osm_mcast_mgr_init function initializes a +* Multicast Manager object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_mcast_mgr_init( + IN osm_mcast_mgr_t* const p_mgr, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_mcast_mgr_t object to initialize. +* +* p_req +* [in] Pointer to the attribute Requester object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Multicast Manager object was initialized +* successfully. +* +* NOTES +* Allows calling other Multicast Manager methods. +* +* SEE ALSO +* Multicast Manager object, osm_mcast_mgr_construct, +* osm_mcast_mgr_destroy +*********/ + +/****f* OpenSM: Multicast Manager/osm_mcast_mgr_process +* NAME +* osm_mcast_mgr_process +* +* DESCRIPTION +* Process and configure the subnet's multicast forwarding tables. +* +* SYNOPSIS +*/ +osm_signal_t +osm_mcast_mgr_process( + IN osm_mcast_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_mcast_mgr_t object. +* +* RETURN VALUES +* Returns the appropriate signal to the caller: +* OSM_SIGNAL_DONE - operation is complete +* OSM_SIGNAL_DONE_PENDING - local operations are complete, but +* transactions are still pending on the wire. +* +* NOTES +* This function processes the subnet, configuring switch +* multicast forwarding tables. +* +* SEE ALSO +* Multicast Manager, Node Info Response Controller +*********/ + +/****f* OpenSM: Multicast Manager/osm_mcast_mgr_process_mgrp_cb +* NAME +* osm_mcast_mgr_process_mgrp_cb +* +* DESCRIPTION +* Callback entry point for the osm_mcast_mgr_process_mgrp function. +* +* SYNOPSIS +*/ +osm_signal_t +osm_mcast_mgr_process_mgrp_cb( + IN void* const Context1, + IN void* const Context2 ); +/* +* PARAMETERS +* (Context1) p_mgr +* [in] Pointer to an osm_mcast_mgr_t object. +* +* (Context2) p_mgrp +* [in] Pointer to the multicast group to process. +* +* RETURN VALUES +* IB_SUCCESS +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Manager/osm_mcast_mgr_process +* NAME +* osm_mcast_mgr_process_single +* +* DESCRIPTION +* Attempts to add a single port to an existing multicast spanning tree. +* This function can only succeed if the port to be added is connected +* to a switch that is already routing traffic for this multicast group. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_mcast_mgr_process_single( + IN osm_mcast_mgr_t* const p_mgr, + IN const ib_net16_t mlid, + IN const ib_net64_t port_guid, + IN const uint8_t join_state ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_mcast_mgr_t object. +* +* mlid +* [in] Multicast LID of relevent multicast group. +* +* port_guid +* [in] GUID of port to attempt to add to the group. +* +* join_state +* [in] Specifies the join state for this port per the spec. +* +* RETURN VALUES +* IB_SUCCESS +* IB_NOT_DONE +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_MCAST_MGR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_tbl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_tbl.h new file mode 100644 index 00000000..f4adad3c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcast_tbl.h @@ -0,0 +1,484 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mcast_tbl_t. + * This object represents a multicast forwarding table. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#ifndef _OSM_MCAST_TBL_H_ +#define _OSM_MCAST_TBL_H_ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****s* OpenSM: Forwarding Table/osm_mcast_tbl_t +* NAME +* osm_mcast_tbl_t +* +* DESCRIPTION +* Multicast Forwarding Table structure. +* +* Callers may directly access this object. +* +* SYNOPSIS +*/ +typedef struct _osm_mcast_fwd_tbl +{ + uint8_t num_ports; + uint8_t max_position; + uint16_t max_block; + int16_t max_block_in_use; + uint16_t num_entries; + uint16_t max_mlid_ho; + uint16_t (*p_mask_tbl)[][IB_MCAST_POSITION_MAX]; +} osm_mcast_tbl_t; +/* +* FIELDS +* num_ports +* The number of ports in the port mask. This value +* is the same as the number of ports on the switch +* +* max_position +* Maximum bit mask position for this table. This value +* is computed from the number of ports on the switch. +* +* max_block +* Maximum block number supported in the table. This value +* is approximately the number of MLID entries divided by the +* number of MLIDs per block +* +* num_entries +* Number of entries in the table (aka number of MLIDs supported). +* +* max_mlid_ho +* Maximum MLID value (host order). +* +* pp_mask_tbl +* Pointer to a two dimensional array of port_masks for this switch. +* The first dimension is MLID, the second dimension is mask position. +* This pointer is null for switches that do not support multicast. +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_init +* NAME +* osm_mcast_tbl_init +* +* DESCRIPTION +* This function initializes a Multicast Forwarding Table object. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_mcast_tbl_init( + IN osm_mcast_tbl_t* const p_tbl, + IN uint8_t const num_ports, + IN uint16_t const capacity ); +/* +* PARAMETERS +* num_ports +* [in] Number of ports in the switch owning this table. +* +* capacity +* [in] The number of MLID entries (starting at 0xC000) supported +* by this switch. +* +* RETURN VALUE +* IB_SUCCESS on success. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_delete +* NAME +* osm_mcast_tbl_delete +* +* DESCRIPTION +* This destroys and deallocates a Multicast Forwarding Table object. +* +* SYNOPSIS +*/ +void +osm_mcast_tbl_delete( + IN osm_mcast_tbl_t** const pp_tbl ); +/* +* PARAMETERS +* pp_tbl +* [in] Pointer a Pointer to the Multicast Forwarding Table object. +* +* RETURN VALUE +* On success, returns a pointer to a new Multicast Forwarding Table object +* of the specified size. +* NULL otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_destroy +* NAME +* osm_mcast_tbl_destroy +* +* DESCRIPTION +* This destroys and deallocates a Multicast Forwarding Table object. +* +* SYNOPSIS +*/ +void +osm_mcast_tbl_destroy( + IN osm_mcast_tbl_t* const p_tbl ); +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Multicast Forwarding Table object. +* +* RETURN VALUE +* None +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_set +* NAME +* osm_mcast_tbl_set +* +* DESCRIPTION +* Adds the port to the multicast group. +* +* SYNOPSIS +*/ +void +osm_mcast_tbl_set( + IN osm_mcast_tbl_t* const p_tbl, + IN const uint16_t mlid_ho, + IN const uint8_t port_num ); +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Multicast Forwarding Table object. +* +* mlid_ho +* [in] MLID value (host order) for which to set the route. +* +* port_num +* [in] Port to add to the multicast group. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_clear_mlid +* NAME +* osm_mcast_tbl_clear_mlid +* +* DESCRIPTION +* Removes all multicast paths for the specified MLID. +* +* SYNOPSIS +*/ +void +osm_mcast_tbl_clear_mlid( + IN osm_mcast_tbl_t* const p_tbl, + IN const uint16_t mlid_ho ); +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Multicast Forwarding Table object. +* +* mlid_ho +* [in] MLID value (host order) for which to clear. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_is_port +* NAME +* osm_mcast_tbl_is_port +* +* DESCRIPTION +* Returns TRUE if the port is in the multicast group. +* +* SYNOPSIS +*/ +boolean_t +osm_mcast_tbl_is_port( + IN const osm_mcast_tbl_t* const p_tbl, + IN const uint16_t mlid_ho, + IN const uint8_t port_num ); +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Multicast Forwarding Table object. +* +* mlid_ho +* [in] MLID value (host order). +* +* port_num +* [in] Port number on the switch +* +* RETURN VALUE +* Returns the port that routes the specified LID. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_is_any_port +* NAME +* osm_mcast_tbl_is_any_port +* +* DESCRIPTION +* Returns TRUE if any port is in the multicast group. +* +* SYNOPSIS +*/ +boolean_t +osm_mcast_tbl_is_any_port( + IN const osm_mcast_tbl_t* const p_tbl, + IN const uint16_t mlid_ho ); +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Multicast Forwarding Table object. +* +* mlid_ho +* [in] MLID value (host order). +* +* RETURN VALUE +* Returns TRUE if any port is in the multicast group. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_set_block +* NAME +* osm_mcast_tbl_set_block +* +* DESCRIPTION +* Copies the specified block into the Multicast Forwarding Table. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_mcast_tbl_set_block( + IN osm_mcast_tbl_t* const p_tbl, + IN const ib_net16_t* const p_block, + IN const int16_t block_num, + IN const uint8_t position ); +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Multicast Forwarding Table object. +* +* p_block +* [in] Pointer to the Forwarding Table block. +* +* block_num +* [in] Block number of this block. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_get_tbl_block +* NAME +* osm_mcast_get_tbl_block +* +* DESCRIPTION +* Retrieve a multicast forwarding table block. +* +* SYNOPSIS +*/ +boolean_t +osm_mcast_tbl_get_block( + IN osm_mcast_tbl_t* const p_tbl, + IN const int16_t block_num, + IN const uint8_t position, + OUT ib_net16_t* const p_block ); +/* +* PARAMETERS +* p_tbl +* [in] Pointer to an osm_mcast_tbl_t object. +* +* p_block +* [in] Pointer to the Forwarding Table block. +* +* block_num +* [in] Block number of this block. +* +* p_block +* [out] Pointer to the 32 entry array to store the +* forwarding table clock specified by block_id. +* +* RETURN VALUES +* Returns true if there are more blocks necessary to +* configure all the MLIDs reachable from this switch. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_get_max_block +* NAME +* osm_mcast_tbl_get_max_block +* +* DESCRIPTION +* Returns the maximum block ID in this table. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_mcast_tbl_get_max_block( + IN osm_mcast_tbl_t* const p_tbl ) +{ + return( p_tbl->max_block ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to an osm_mcast_tbl_t object. +* +* RETURN VALUES +* Returns the maximum block ID in this table. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_get_max_block_in_use +* NAME +* osm_mcast_tbl_get_max_block_in_use +* +* DESCRIPTION +* Returns the maximum block ID in use in this table. +* A value of -1 indicates no blocks are in use. +* +* SYNOPSIS +*/ +static inline int16_t +osm_mcast_tbl_get_max_block_in_use( + IN osm_mcast_tbl_t* const p_tbl ) +{ + return( p_tbl->max_block_in_use ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to an osm_mcast_tbl_t object. +* +* RETURN VALUES +* Returns the maximum block ID in use in this table. +* A value of -1 indicates no blocks are in use. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_mcast_tbl_get_max_position +* NAME +* osm_mcast_tbl_get_max_position +* +* DESCRIPTION +* Returns the maximum position in this table. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_mcast_tbl_get_max_position( + IN osm_mcast_tbl_t* const p_tbl ) +{ + return( p_tbl->max_position ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to an osm_mcast_tbl_t object. +* +* RETURN VALUES +* Returns the maximum position in this table. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_MCAST_TBL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcm_info.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcm_info.h new file mode 100644 index 00000000..6f6e864c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcm_info.h @@ -0,0 +1,237 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mcm_info_t. + * This object represents a Multicast Forwarding Information object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_MCM_INFO_H_ +#define _OSM_MCM_INFO_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****s* OpenSM: Multicast Member Info/osm_mcm_info_t +* NAME +* osm_mcm_info_t +* +* DESCRIPTION +* Multicast Membership Info object. +* This object contains information about a nodes membership +* in a particular multicast group. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mcm_info +{ + cl_list_item_t list_item; + ib_net16_t mlid; + +} osm_mcm_info_t; +/* +* FIELDS +* list_item +* Linkage structure for cl_qlist. MUST BE FIRST MEMBER! +* +* mlid +* MLID of this multicast group. +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Member Info/osm_mcm_info_construct +* NAME +* osm_mcm_info_construct +* +* DESCRIPTION +* This function constructs a Multicast Member Info object. +* +* SYNOPSIS +*/ +static inline void +osm_mcm_info_construct( + IN osm_mcm_info_t* const p_mcm ) +{ + memset( p_mcm, 0, sizeof(*p_mcm) ); +} +/* +* PARAMETERS +* p_mcm +* [in] Pointer to a Multicast Member Info object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Member Info/osm_mcm_info_destroy +* NAME +* osm_mcm_info_destroy +* +* DESCRIPTION +* The osm_mcm_info_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_mcm_info_destroy( + IN osm_mcm_info_t* const p_mcm ); +/* +* PARAMETERS +* p_mcm +* [in] Pointer to a Multicast Member Info object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Multicast Member Info object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_mtree_construct or +* osm_mtree_init. +* +* SEE ALSO +* Multicast Member Info object, osm_mtree_construct, osm_mtree_init +*********/ + +/****f* OpenSM: Multicast Member Info/osm_mcm_info_init +* NAME +* osm_mcm_info_init +* +* DESCRIPTION +* Initializes a Multicast Member Info object for use. +* +* SYNOPSIS +*/ +void +osm_mcm_info_init( + IN osm_mcm_info_t* const p_mcm, + IN const ib_net16_t mlid ); +/* +* PARAMETERS +* p_mcm +* [in] Pointer to an osm_mcm_info_t object to initialize. +* +* mlid +* [in] MLID value for this multicast group. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Member Info/osm_mcm_info_new +* NAME +* osm_mcm_info_new +* +* DESCRIPTION +* Returns an initialized a Multicast Member Info object for use. +* +* SYNOPSIS +*/ +osm_mcm_info_t* +osm_mcm_info_new( + IN const ib_net16_t mlid ); +/* +* PARAMETERS +* mlid +* [in] MLID value for this multicast group. +* +* RETURN VALUES +* Pointer to an initialized tree node. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Member Info/osm_mcm_info_delete +* NAME +* osm_mcm_info_delete +* +* DESCRIPTION +* Destroys and deallocates the specified object. +* +* SYNOPSIS +*/ +void +osm_mcm_info_delete( + IN osm_mcm_info_t* const p_mcm ); +/* +* PARAMETERS +* p_mcm +* Pointer to the object to destroy. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_MCM_INFO_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcm_port.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcm_port.h new file mode 100644 index 00000000..78232dd3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcm_port.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mcm_port_t. + * This object represents the membership of a port in a multicast group. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_MCM_PORT_H_ +#define _OSM_MCM_PORT_H_ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****s* OpenSM: MCM Port Object/osm_mcm_port_t +* NAME +* osm_mcm_port_t +* +* DESCRIPTION +* This object represents a particular port as a member of a +* multicast group. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mcm_port +{ + cl_map_item_t map_item; + ib_gid_t port_gid; + uint8_t scope_state; + boolean_t proxy_join; +} osm_mcm_port_t; +/* +* FIELDS +* map_item +* Map Item for qmap linkage. Must be first element!! +* +* port_gid +* GID of the member port. +* +* scope_state +* ??? +* +* proxy_join +* If FALSE - Join was performed by the endport identified +* by PortGID. If TRUE - Join was performed on behalf of +* the endport identified by PortGID by another port within +* the same partition. +* +* SEE ALSO +* MCM Port Object +*********/ + +/****f* OpenSM: MCM Port Object/osm_mcm_port_construct +* NAME +* osm_mcm_port_construct +* +* DESCRIPTION +* This function constructs a MCM Port object. +* +* SYNOPSIS +*/ +void +osm_mcm_port_construct( + IN osm_mcm_port_t* const p_mcm ); +/* +* PARAMETERS +* p_mcm +* [in] Pointer to a MCM Port Object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mcm_port_init, osm_mcm_port_destroy. +* +* Calling osm_mcm_port_construct is a prerequisite to calling any other +* method except osm_mcm_port_init. +* +* SEE ALSO +* MCM Port Object, osm_mcm_port_init, osm_mcm_port_destroy +*********/ + +/****f* OpenSM: MCM Port Object/osm_mcm_port_destroy +* NAME +* osm_mcm_port_destroy +* +* DESCRIPTION +* The osm_mcm_port_destroy function destroys a MCM Port Object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_mcm_port_destroy( + IN osm_mcm_port_t* const p_mcm ); +/* +* PARAMETERS +* p_mcm +* [in] Pointer to a MCM Port Object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified MCM Port Object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mcm_port_construct or osm_mcm_port_init. +* +* SEE ALSO +* MCM Port Object, osm_mcm_port_construct, osm_mcm_port_init +*********/ + +/****f* OpenSM: MCM Port Object/osm_mcm_port_init +* NAME +* osm_mcm_port_init +* +* DESCRIPTION +* The osm_mcm_port_init function initializes a MCM Port Object for use. +* +* SYNOPSIS +*/ +void +osm_mcm_port_init( + IN osm_mcm_port_t* const p_mcm, + IN const ib_gid_t* const p_port_gid, + IN const uint8_t scope_state, + IN const boolean_t proxy_join ); +/* +* PARAMETERS +* p_mcm +* [in] Pointer to an osm_mcm_port_t object to initialize. +* +* p_port_gid +* [in] Pointer to the GID of the port to add to the multicast group. +* +* scope_state +* [in] scope state of the join request +* +* proxy_join +* [in] proxy_join state analyzed from the request +* +* RETURN VALUES +* None. +* +* NOTES +* Allows calling other MCM Port Object methods. +* +* SEE ALSO +* MCM Port Object, osm_mcm_port_construct, osm_mcm_port_destroy, +*********/ + +/****f* OpenSM: MCM Port Object/osm_mcm_port_init +* NAME +* osm_mcm_port_init +* +* DESCRIPTION +* The osm_mcm_port_init function initializes a MCM Port Object for use. +* +* SYNOPSIS +*/ +osm_mcm_port_t* +osm_mcm_port_new( + IN const ib_gid_t* const p_port_gid, + IN const uint8_t scope_state, + IN const boolean_t proxy_join ); +/* +* PARAMETERS +* p_port_gid +* [in] Pointer to the GID of the port to add to the multicast group. +* +* scope_state +* [in] scope state of the join request +* +* proxy_join +* [in] proxy_join state analyzed from the request +* +* RETURN VALUES +* Pointer to the allocated and initialized MCM Port object. +* +* NOTES +* +* SEE ALSO +* MCM Port Object, osm_mcm_port_construct, osm_mcm_port_destroy, +*********/ + +/****f* OpenSM: MCM Port Object/osm_mcm_port_destroy +* NAME +* osm_mcm_port_destroy +* +* DESCRIPTION +* The osm_mcm_port_destroy function destroys and dellallocates an +* MCM Port Object, releasing all resources. +* +* SYNOPSIS +*/ +void +osm_mcm_port_delete( + IN osm_mcm_port_t* const p_mcm ); +/* +* PARAMETERS +* p_mcm +* [in] Pointer to a MCM Port Object to delete. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +* MCM Port Object, osm_mcm_port_construct, osm_mcm_port_init +*********/ + +END_C_DECLS + +#endif /* _OSM_MCM_PORT_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcmember.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcmember.h new file mode 100644 index 00000000..ab47ea2a --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mcmember.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mcmember_t. + * This object represents an IBA mcmember. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_MCMEMBER_H_ +#define _OSM_MCMEMBER_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/MCMember +* NAME +* MCMember +* +* DESCRIPTION +* The MCMember object encapsulates the information needed by the +* OpenSM to manage mcmembers. The OpenSM allocates one MCMember object +* per mcmember in the IBA subnet. +* +* The MCMember object is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ +/****s* OpenSM: MCMember/osm_mcmember_t +* NAME +* osm_mcmember_t +* +* DESCRIPTION +* MCMember structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mcmember +{ + cl_map_item_t map_item; + ib_member_rec_t mcmember_rec; + cl_qlist_t mcmember_port_list; + +} osm_mcmember_t; + +/* +* FIELDS +* map_item +* Linkage structure for cl_qmap. MUST BE FIRST MEMBER! +* +* mcmember_rec +* The IBA defined MCMemberRecord data for this mcmember. +* +* member_port_list +* List of Port specific information for each port that in +* member of a MultiCast Group. +* +* SEE ALSO +* MCMember object +*********/ + +/****s* OpenSM: MCMember/osm_mcmember_t +* NAME +* osm_mcmember_t +* +* DESCRIPTION +* MCMember structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mcmember_port +{ + cl_list_item_t list_item; + ib_gid_t port_gid; + uint8_t scope_state; + +} osm_mcmember_port_t; + +/* +* FIELDS +* list_item +* DESCRIPTION??? +* +* port_gid +* DESCRIPTION??? +* +* scope_state +* description??? +* +* SEE ALSO +* MCMember object +*********/ + +END_C_DECLS + +#endif /* _OSM_MCMEMBER_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_msgdef.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_msgdef.h new file mode 100644 index 00000000..8b591d1a --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_msgdef.h @@ -0,0 +1,208 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of Dispatcher message values. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#ifndef _OSM_MSGDEF_H_ +#define _OSM_MSGDEF_H_ + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Dispatcher Messages +* NAME +* Dispatcher Messages +* +* DESCRIPTION +* These constants define the messages sent between OpenSM controllers +* attached to the Dispatcher. +* +* Each message description contains the following information: +* Sent by: which controller(s) send this message +* Received by: which controller receives this message +* Delivery notice: Indicates if the sender requires confirmation +* that the message has been delivered. Typically a "yes" here +* means that some resources associated with sending the +* message must be freed. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Dispatcher Messages/OSM_MSG_REQ +* NAME +* OSM_MSG_REQ +* +* DESCRIPTION +* Initiates a QP0 attribute request. +* +* NOTES +* Sent by: osm_sm_t +* Received by: osm_req_ctrl_t +* Delivery notice: yes +* +***********/ + +/****s* OpenSM: Dispatcher Messages/OSM_MSG_MAD_NODE_INFO +* NAME +* OSM_MSG_MAD_NODE_INFO +* +* DESCRIPTION +* Message for received NodeInfo MADs. +* +* NOTES +* Sent by: osm_mad_ctrl_t +* Received by: osm_ni_rcv_ctrl_t +* Delivery notice: yes +* +* +***********/ + +/****s* OpenSM: Dispatcher Messages/OSM_MSG_MAD_PORT_INFO +* NAME +* OSM_MSG_MAD_PORT_INFO +* +* DESCRIPTION +* Message for received PortInfo MADs. +* +* NOTES +* Sent by: osm_mad_ctrl_t +* Received by: osm_pi_rcv_ctrl_t +* Delivery notice: yes +* +* +***********/ + +/****s* OpenSM: Dispatcher Messages/OSM_MSG_MAD_SWITCH_INFO +* NAME +* OSM_MSG_MAD_SWITCH_INFO +* +* DESCRIPTION +* Message for received SwitchInfo MADs. +* +* NOTES +* Sent by: osm_mad_ctrl_t +* Received by: osm_si_rcv_ctrl_t +* Delivery notice: yes +* +***********/ + +/****s* OpenSM: Dispatcher Messages/OSM_MSG_MAD_NODE_DESC +* NAME +* OSM_MSG_MAD_NODE_DESC +* +* DESCRIPTION +* Message for received NodeDescription MADs. +* +* NOTES +* Sent by: osm_mad_ctrl_t +* Received by: osm_nd_rcv_ctrl_t +* Delivery notice: yes +* +* SOURCE +***********/ + +/****d* OpenSM: Dispatcher Messages/OSM_MSG_NO_SMPS_OUTSTANDING +* NAME +* OSM_MSG_NO_SMPS_OUTSTANDING +* +* DESCRIPTION +* Message indicating that there are no outstanding SMPs on the subnet. +* +* NOTES +* Sent by: osm_mad_ctrl_t +* Received by: osm_state_mgr_ctrl_t +* Delivery notice: no +* +* SOURCE +***********/ +enum +{ + OSM_MSG_REQ = 0, + OSM_MSG_MAD_NODE_INFO, + OSM_MSG_MAD_PORT_INFO, + OSM_MSG_MAD_SWITCH_INFO, + OSM_MSG_MAD_NODE_DESC, + OSM_MSG_NO_SMPS_OUTSTANDING, + OSM_MSG_MAD_NODE_RECORD, + OSM_MSG_MAD_PORTINFO_RECORD, + OSM_MSG_MAD_SERVICE_RECORD, + OSM_MSG_MAD_PATH_RECORD, + OSM_MSG_MAD_MCMEMBER_RECORD, + OSM_MSG_MAD_LINK_RECORD, + OSM_MSG_MAD_SMINFO_RECORD, + OSM_MSG_MAD_CLASS_PORT_INFO, + OSM_MSG_MAD_INFORM_INFO, + OSM_MSG_MAD_LFT_RECORD, + OSM_MSG_MAD_LFT, + OSM_MSG_MAD_SM_INFO, + OSM_MSG_MAD_NOTICE, + OSM_MSG_LIGHT_SWEEP_FAIL, + OSM_MSG_MAD_MFT, + OSM_MSG_MAD_PKEY_TBL_RECORD, + OSM_MSG_MAD_VL_ARB_RECORD, + OSM_MSG_MAD_SLVL_TBL_RECORD, + OSM_MSG_MAD_PKEY, + OSM_MSG_MAD_VL_ARB, + OSM_MSG_MAD_SLVL, + OSM_MSG_MAD_GUIDINFO_RECORD, + OSM_MSG_MAD_INFORM_INFO_RECORD, + OSM_MSG_MAD_SWITCH_INFO_RECORD, + OSM_MSG_MAD_MFT_RECORD, +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + OSM_MSG_MAD_MULTIPATH_RECORD, +#endif + OSM_MSG_MAX +}; + +END_C_DECLS + +#endif /* _OSM_MSGDEF_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mtl_bind.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mtl_bind.h new file mode 100644 index 00000000..4394ff22 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mtl_bind.h @@ -0,0 +1,144 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_BIND_H_ +#define _OSM_BIND_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****s* OpenSM: Vendor/osm_vendor_mgt_bind +* NAME +* osm_vendor_mgt_bind_t +* +* DESCRIPTION +* Tracks the handles returned by IB_MGT to the SMI and GSI +* Nulled on init of the vendor obj. Populated on first bind. +* +* SYNOPSIS +*/ +typedef struct _osm_vendor_mgt_bind +{ + boolean_t smi_init, gsi_init; + IB_MGT_mad_hndl_t smi_mads_hdl; + IB_MGT_mad_hndl_t gsi_mads_hdl; + struct _osm_mtl_bind_info *smi_p_bind; +} +osm_vendor_mgt_bind_t; + +/* +* FIELDS +* smi_mads_hdl +* Handle returned by IB_MGT_get_handle to the IB_MGT_SMI +* +* gsi_mads_hdl +* Handle returned by IB_MGT_get_handle to the IB_MGT_GSI +* +* SEE ALSO +*********/ + +/****s* OpenSM: Vendor osm_mtl_bind_info_t +* NAME +* osm_mtl_bind_info_t +* +* DESCRIPTION +* Handle to the result of binding a class callbacks to IB_MGT. +* +* SYNOPSIS +*/ +typedef struct _osm_mtl_bind_info +{ + IB_MGT_mad_hndl_t mad_hndl; + osm_vendor_t *p_vend; + void *client_context; + VAPI_hca_hndl_t hca_hndl; + VAPI_hca_id_t hca_id; + uint8_t port_num; + osm_vend_mad_recv_callback_t rcv_callback; + osm_vend_mad_send_err_callback_t send_err_callback; + osm_mad_pool_t *p_osm_pool; +} +osm_mtl_bind_info_t; + +/* +* FIELDS +* mad_hndl +* the handle returned from the registration in IB_MGT +* +* p_vend +* Pointer to the vendor object. +* +* client_context +* User's context passed during osm_bind +* +* hca_id +* HCA Id we bind to. +* +* port_num +* Port number (within the HCA) of the bound port. +* +* rcv_callback +* OSM Callback function to be called on receive of MAD. +* +* send_err_callback +* OSM Callback to be called on send error. +* +* p_osm_pool +* Points to the MAD pool used by OSM +* +* +* SEE ALSO +*********/ +ib_api_status_t +osm_mtl_send_mad( + IN osm_mtl_bind_info_t *p_bind, + IN osm_madw_t * const p_madw); + +END_C_DECLS + +#endif // _OSM_BIND_H_ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_mtree.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mtree.h new file mode 100644 index 00000000..24e4a857 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_mtree.h @@ -0,0 +1,378 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mtree_t. + * This object represents multicast spanning tree. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_MTREE_H_ +#define _OSM_MTREE_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +#define OSM_MTREE_LEAF ((void*)-1) + +/****h* OpenSM/Multicast Tree +* NAME +* Multicast Tree +* +* DESCRIPTION +* The Multicast Tree object encapsulates the information needed by the +* OpenSM to manage multicast fabric routes. It is a tree structure +* in which each node in the tree represents a switch, and may have a +* varying number of children. +* +* Multicast trees do not contain loops. +* +* The Multicast Tree is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Multicast Tree/osm_mtree_node_t +* NAME +* osm_mtree_node_t +* +* DESCRIPTION +* The MTree Node object encapsulates the information needed by the +* OpenSM for a particular switch in the multicast tree. +* +* The MTree Node object is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mtree_node +{ + cl_map_item_t map_item; + osm_switch_t *p_sw; + uint8_t max_children; + struct _osm_mtree_node *p_up; + struct _osm_mtree_node *child_array[1]; +} osm_mtree_node_t; +/* +* FIELDS +* map_item +* Linkage for quick map. MUST BE FIRST ELEMENT!!! +* +* p_sw +* Pointer to the switch represented by this tree node. +* +* max_children +* Maximum number of child nodes of this node. Equal to the +* the number of ports on the switch if the switch supports +* multicast. Equal to 1 (default route) if the switch does +* not support multicast. +* +* p_up +* Pointer to the parent of this node. If this pointer is +* NULL, the node is at the root of the tree. +* +* child_array +* Array (indexed by port number) of pointers to the +* child osm_mtree_node_t objects of this tree node, if any. +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Tree/osm_mtree_node_construct +* NAME +* osm_mtree_node_construct +* +* DESCRIPTION +* This function constructs a Multicast Tree Node object. +* +* SYNOPSIS +*/ +static inline void +osm_mtree_node_construct( + IN osm_mtree_node_t* const p_mtn ) +{ + memset( p_mtn, 0, sizeof(*p_mtn) ); +} +/* +* PARAMETERS +* p_mtn +* [in] Pointer to a Multicast Tree Node object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Tree/osm_mtree_node_destroy +* NAME +* osm_mtree_node_destroy +* +* DESCRIPTION +* The osm_mtree_node_destroy function destroys a node, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_mtree_node_destroy( + IN osm_mtree_node_t* const p_mtn ); +/* +* PARAMETERS +* p_mtn +* [in] Pointer to a Multicast Tree Node object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Multicast Tree object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_mtree_construct or +* osm_mtree_init. +* +* SEE ALSO +* Multicast Tree object, osm_mtree_construct, osm_mtree_init +*********/ + +/****f* OpenSM: Multicast Tree/osm_mtree_node_init +* NAME +* osm_mtree_node_init +* +* DESCRIPTION +* Initializes a Multicast Tree Node object for use. +* +* SYNOPSIS +*/ +void +osm_mtree_node_init( + IN osm_mtree_node_t* const p_mtn, + IN const osm_switch_t* const p_sw ); +/* +* PARAMETERS +* p_mtn +* [in] Pointer to an osm_mtree_node_t object to initialize. +* +* p_sw +* [in] Pointer to the switch represented by this node. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Tree/osm_mtree_node_new +* NAME +* osm_mtree_node_new +* +* DESCRIPTION +* Returns an initialized a Multicast Tree object for use. +* +* SYNOPSIS +*/ +osm_mtree_node_t* +osm_mtree_node_new( + IN const osm_switch_t* const p_sw ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch represented by this node. +* +* RETURN VALUES +* Pointer to an initialized tree node. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Tree/osm_mtree_destroy +* NAME +* osm_mtree_destroy +* +* DESCRIPTION +* Destroys a Multicast Tree object given by the p_mtn +* +* SYNOPSIS +*/ +void +osm_mtree_destroy( + IN osm_mtree_node_t *p_mtn ); +/* +* PARAMETERS +* p_mtn +* [in] Pointer to an osm_mtree_node_t object to destroy. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Tree/osm_mtree_node_get_max_children +* NAME +* osm_mtree_node_get_max_children +* +* DESCRIPTION +* Returns the number maximum number of children of this node. +* The return value is 1 greater than the highest valid port +* number on the switch. +* +* +* SYNOPSIS +*/ +static inline uint8_t +osm_mtree_node_get_max_children( + IN const osm_mtree_node_t* const p_mtn ) +{ + return( p_mtn->max_children ); +} +/* +* PARAMETERS +* p_mtn +* [in] Pointer to the multicast tree node. +* +* RETURN VALUES +* See description. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Tree/osm_mtree_node_get_child +* NAME +* osm_mtree_node_get_child +* +* DESCRIPTION +* Returns the specified child node of this node. +* +* SYNOPSIS +*/ +static inline osm_mtree_node_t* +osm_mtree_node_get_child( + IN const osm_mtree_node_t* const p_mtn, + IN const uint8_t child ) +{ + CL_ASSERT( child < p_mtn->max_children ); + return( p_mtn->child_array[child] ); +} +/* +* PARAMETERS +* p_mtn +* [in] Pointer to the multicast tree node. +* +* child +* [in] Index of the child to retrieve. +* +* RETURN VALUES +* Returns the specified child node of this node. +* +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Tree/osm_mtree_node_get_switch_ptr +* NAME +* osm_mtree_node_get_switch_ptr +* +* DESCRIPTION +* Returns a pointer to the switch object represented by this tree node. +* +* SYNOPSIS +*/ +static inline osm_switch_t* +osm_mtree_node_get_switch_ptr( + IN const osm_mtree_node_t* const p_mtn ) +{ + return( p_mtn->p_sw ); +} +/* +* PARAMETERS +* p_mtn +* [in] Pointer to the multicast tree node. +* +* child +* [in] Index of the child to retrieve. +* +* RETURN VALUES +* Returns a pointer to the switch object represented by this tree node. +* +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_MTREE_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_multicast.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_multicast.h new file mode 100644 index 00000000..b182d3b7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_multicast.h @@ -0,0 +1,769 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mgrp_t. + * This object represents an IBA Multicast Group. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_MULTICAST_H_ +#define _OSM_MULTICAST_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Multicast Group +* NAME +* Multicast Group +* +* DESCRIPTION +* The Multicast Group encapsulates the information needed by the +* OpenSM to manage Multicast Groups. The OpenSM allocates one +* Multicast Group object per Multicast Group in the IBA subnet. +* +* The Multicast Group is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****f* IBA Base: OpneSM: Multicast Group/osm_get_mcast_req_type_str +* NAME +* osm_get_mcast_req_type_str +* +* DESCRIPTION +* Returns a string for the specified osm_mcast_req_type_t value. +* +* SYNOPSIS +*/ +const char* +osm_get_mcast_req_type_str( + IN osm_mcast_req_type_t req_type ); +/* +* PARAMETERS +* req_type +* [in] osm_mcast_req_type value +* +* RETURN VALUES +* Pointer to the request type description string. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* OpenSM: Multicast Group/osm_mcast_mgr_ctxt_t +* NAME +* osm_mcast_mgr_ctxt_t +* +* DESCRIPTION +* Struct for passing context arguments to the multicast manager. +* +* The osm_mcast_mgr_ctxt_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct osm_mcast_mgr_ctxt +{ + ib_net16_t mlid; + osm_mcast_req_type_t req_type; + ib_net64_t port_guid; +} osm_mcast_mgr_ctxt_t; +/* +* FIELDS +* +* mlid +* The network ordered LID of this Multicast Group (must be >= 0xC000). +* +* req_type +* The type of the request that caused this call +* (multicast create/join/leave). +* +* port_guid +* The port guid of the port that is being added/removed from +* the multicast group due to this call. +* +* SEE ALSO +*********/ + +/****s* OpenSM: Multicast Group/osm_mgrp_t +* NAME +* osm_mgrp_t +* +* DESCRIPTION +* Multicast Group structure. +* +* The osm_mgrp_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mgrp +{ + cl_map_item_t map_item; + ib_net16_t mlid; + osm_mtree_node_t *p_root; + cl_qmap_t mcm_port_tbl; + ib_member_rec_t mcmember_rec; + boolean_t well_known; + boolean_t to_be_deleted; + uint32_t last_change_id; + uint32_t last_tree_id; +} osm_mgrp_t; +/* +* FIELDS +* map_item +* Map Item for qmap linkage. Must be first element!! +* +* mlid +* The network ordered LID of this Multicast Group (must be +* >= 0xC000). +* +* p_root +* Pointer to the root "tree node" in the single spanning tree +* for this multicast group. The nodes of the tree represent +* switches. Member ports are not represented in the tree. +* +* mcm_port_tbl +* Table (sorted by port GUID) of osm_mcm_port_t objects +* representing the member ports of this multicast group. +* +* mcmember_rec +* Hold the parameters of the Multicast Group. +* +* well_known +* Indicates that this is the wellknow multicast group which +* is created during the initialization of SM/SA and will be +* present even if there are no ports for this group +* +* to_be_deleted +* Since groups are deleted only after re-route we need to +* track the fact the group is about to be deleted so we can +* track the fact a new join is actually a create request. +* +* last_change_id +* a counter for the number of changes applied to the group. +* This counter shuold be incremented on any modification +* to the group: joining or leaving of ports. +* +* last_tree_id +* the last change id used for building the current tree. +* +* SEE ALSO +*********/ + +/****f* OpenSM: Vendor API/osm_mgrp_func_t +* NAME +* osm_mgrp_func_t +* +* DESCRIPTION +* Callback for the osm_mgrp_apply_func function. +* The callback function must not modify the tree linkage. +* +* SYNOPSIS +*/ +typedef void (*osm_mgrp_func_t)( + IN const osm_mgrp_t* const p_mgrp, + IN const osm_mtree_node_t* const p_mtn, + IN void* context ); +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to the multicast group object. +* +* p_mtn +* [in] Pointer to the multicast tree node. +* +* context +* [in] User context. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_construct +* NAME +* osm_mgrp_construct +* +* DESCRIPTION +* This function constructs a Multicast Group. +* +* SYNOPSIS +*/ +void +osm_mgrp_construct( + IN osm_mgrp_t* const p_mgrp ); +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to a Multicast Group to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mgrp_init, osm_mgrp_destroy. +* +* Calling osm_mgrp_construct is a prerequisite to calling any other +* method except osm_mgrp_init. +* +* SEE ALSO +* Multicast Group, osm_mgrp_init, osm_mgrp_destroy +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_destroy +* NAME +* osm_mgrp_destroy +* +* DESCRIPTION +* The osm_mgrp_destroy function destroys a Multicast Group, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_mgrp_destroy( + IN osm_mgrp_t* const p_mgrp ); +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to a Muticast Group to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Multicast Group. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_mgrp_construct or +* osm_mgrp_init. +* +* SEE ALSO +* Multicast Group, osm_mgrp_construct, osm_mgrp_init +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_init +* NAME +* osm_mgrp_init +* +* DESCRIPTION +* The osm_mgrp_init function initializes a Multicast Group for use. +* +* SYNOPSIS +*/ +void +osm_mgrp_init( + IN osm_mgrp_t* const p_mgrp, + IN const ib_net16_t mlid ); +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object to initialize. +* +* mlid +* [in] Multicast LID for this multicast group. +* +* RETURN VALUES +* None. +* +* NOTES +* Allows calling other Multicast Group methods. +* +* SEE ALSO +* Multicast Group, osm_mgrp_construct, osm_mgrp_destroy, +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_new +* NAME +* osm_mgrp_new +* +* DESCRIPTION +* Allocates and initializes a Multicast Group for use. +* +* SYNOPSIS +*/ +osm_mgrp_t* +osm_mgrp_new( + IN const ib_net16_t mlid ); +/* +* PARAMETERS +* mlid +* [in] Multicast LID for this multicast group. +* +* RETURN VALUES +* IB_SUCCESS if initialization was successful. +* +* NOTES +* Allows calling other Multicast Group methods. +* +* SEE ALSO +* Multicast Group, osm_mgrp_construct, osm_mgrp_destroy, +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_delete +* NAME +* osm_mgrp_delete +* +* DESCRIPTION +* Destroys and de-allocates a Multicast Group. +* +* SYNOPSIS +*/ +void +osm_mgrp_delete( + IN osm_mgrp_t* const p_mgrp ); +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Multicast Group, osm_mgrp_construct, osm_mgrp_destroy, +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_is_guid +* NAME +* osm_mgrp_is_guid +* +* DESCRIPTION +* Indicates if the specified port GUID is a member of the Multicast Group. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_mgrp_is_guid( + IN const osm_mgrp_t* const p_mgrp, + IN const ib_net64_t port_guid ) +{ + return( cl_qmap_get( &p_mgrp->mcm_port_tbl, port_guid ) != + cl_qmap_end( &p_mgrp->mcm_port_tbl ) ); +} +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* port_guid +* [in] Port GUID. +* +* RETURN VALUES +* TRUE if the port GUID is a member of the group, +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Multicast Group +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_is_empty +* NAME +* osm_mgrp_is_empty +* +* DESCRIPTION +* Indicates if the multicast group has any member ports. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_mgrp_is_empty( + IN const osm_mgrp_t* const p_mgrp ) +{ + return( cl_qmap_count( &p_mgrp->mcm_port_tbl ) == 0 ); +} +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* RETURN VALUES +* TRUE if there are no ports in the multicast group. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Multicast Group +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_get_mlid +* NAME +* osm_mgrp_get_mlid +* +* DESCRIPTION +* The osm_mgrp_get_mlid function returns the multicast LID of this group. +* +* SYNOPSIS +*/ +static inline ib_net16_t +osm_mgrp_get_mlid( + IN const osm_mgrp_t* const p_mgrp ) +{ + return( p_mgrp->mlid ); +} +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* RETURN VALUES +* MLID of the Multicast Group. +* +* NOTES +* +* SEE ALSO +* Multicast Group +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_add_port +* NAME +* osm_mgrp_add_port +* +* DESCRIPTION +* Adds a port to the multicast group. +* +* SYNOPSIS +*/ +osm_mcm_port_t* +osm_mgrp_add_port( + IN osm_mgrp_t* const p_mgrp, + IN const ib_gid_t* const p_port_gid, + IN const uint8_t join_state, + IN boolean_t proxy_join); +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object to initialize. +* +* p_port_gid +* [in] Pointer to the GID of the port to add to the multicast group. +* +* join_state +* [in] The join state for this port in the group. +* +* RETURN VALUES +* IB_SUCCESS +* IB_INSUFFICIENT_MEMORY +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_is_port_present +* NAME +* osm_mgrp_is_port_present +* +* DESCRIPTION +* checks a port from the multicast group. +* +* SYNOPSIS +*/ + +boolean_t +osm_mgrp_is_port_present( + IN const osm_mgrp_t* const p_mgrp, + IN const ib_net64_t port_guid, + OUT osm_mcm_port_t ** const pp_mcm_port); +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* port_guid +* [in] Port guid of the departing port. +* +* pp_mcm_port +* [out] Pointer to a pointer to osm_mcm_port_t +* Updated to the member on success or NULLed +* +* RETURN VALUES +* TRUE if port present +* FALSE if port is not present. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_remove_port +* NAME +* osm_mgrp_remove_port +* +* DESCRIPTION +* Removes a port from the multicast group. +* +* SYNOPSIS +*/ +void +osm_mgrp_remove_port( + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_mgrp_t* const p_mgrp, + IN const ib_net64_t port_guid ); +/* +* PARAMETERS +* +* p_subn +* [in] Pointer to the subnet object +* +* p_log +* [in] The log object pointer +* +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* port_guid +* [in] Port guid of the departing port. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_get_root_switch +* NAME +* osm_mgrp_get_root_switch +* +* DESCRIPTION +* Returns the "root" switch of this multicast group. The root switch +* is at the trunk of the multicast single spanning tree. +* +* SYNOPSIS +*/ +static inline osm_switch_t* +osm_mgrp_get_root_switch( + IN const osm_mgrp_t* const p_mgrp ) +{ + if( p_mgrp->p_root ) + return( p_mgrp->p_root->p_sw ); + else + return( NULL ); +} +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* RETURN VALUES +* Returns the "root" switch of this multicast group. The root switch +* is at the trunk of the multicast single spanning tree. +* +* NOTES +* +* SEE ALSO +* Multicast Group +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_compute_avg_hops +* NAME +* osm_mgrp_compute_avg_hops +* +* DESCRIPTION +* Returns the average number of hops from the given to switch +* to all member of a multicast group. +* +* SYNOPSIS +*/ +float +osm_mgrp_compute_avg_hops( + const osm_mgrp_t* const p_mgrp, + const osm_switch_t* const p_sw ); +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* p_sw +* [in] Pointer to the switch from which to measure. +* +* RETURN VALUES +* Returns the average number of hops from the given to switch +* to all member of a multicast group. +* +* NOTES +* +* SEE ALSO +* Multicast Group +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_apply_func +* NAME +* osm_mgrp_apply_func +* +* DESCRIPTION +* Calls the specified function for each element in the tree. +* Elements are passed to the callback function in no particular order. +* +* SYNOPSIS +*/ +void +osm_mgrp_apply_func( + const osm_mgrp_t* const p_mgrp, + osm_mgrp_func_t p_func, + void* context ); +/* +* PARAMETERS +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* p_func +* [in] Pointer to the users callback function. +* +* context +* [in] User context passed to the callback function. +* +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Multicast Group +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_send_delete_notice +* NAME +* osm_mgrp_send_delete_notice +* +* DESCRIPTION +* Sends a notice that the given multicast group is now deleted. +* +* SYNOPSIS +*/ +void +osm_mgrp_send_delete_notice( + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_mgrp_t *p_mgrp ); +/* +* PARAMETERS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Multicast Group +*********/ + +/****f* OpenSM: Multicast Group/osm_mgrp_send_create_notice +* NAME +* osm_mgrp_send_create_notice +* +* DESCRIPTION +* Sends a notice that the given multicast group is now created. +* +* SYNOPSIS +*/ +void +osm_mgrp_send_create_notice( + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_mgrp_t *p_mgrp ); +/* +* PARAMETERS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_mgrp +* [in] Pointer to an osm_mgrp_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Multicast Group +*********/ + +END_C_DECLS + +#endif /* _OSM_MULTICAST_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_node.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node.h new file mode 100644 index 00000000..db249543 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node.h @@ -0,0 +1,957 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_node_t. + * This object represents an IBA node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_NODE_H_ +#define _OSM_NODE_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +struct _osm_switch; + +/****h* OpenSM/Node +* NAME +* Node +* +* DESCRIPTION +* The Node object encapsulates the information needed by the +* OpenSM to manage nodes. The OpenSM allocates one Node object +* per node in the IBA subnet. +* +* The Node object is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Node/osm_node_t +* NAME +* osm_node_t +* +* DESCRIPTION +* Node structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_node +{ + cl_map_item_t map_item; + struct _osm_switch *sw; + ib_node_info_t node_info; + ib_node_desc_t node_desc; + uint32_t discovery_count; + uint32_t physp_tbl_size; + osm_physp_t physp_table[1]; +} osm_node_t; +/* +* FIELDS +* map_item +* Linkage structure for cl_qmap. MUST BE FIRST MEMBER! +* +* sw +* For switch node contains pointer to appropriate osm_switch +* structure. NULL for non-switch nodes. Can be used for fast +* access to switch object and for simple node type detection +* +* node_info +* The IBA defined NodeInfo data for this node. +* +* node_desc +* The IBA defined NodeDescription data for this node. +* +* discovery_count +* The number of times this node has been discovered +* during the current fabric sweep. This number is reset +* to zero at the start of a sweep. +* +* phsyp_tbl_size +* The size of the physp_table array. This value is one greater +* than the number of ports in the node, since port numbers +* start with 1 for some bizzare reason. +* +* phsyp_table +* Array of physical port objects belonging to this node. +* Index is contiguous by local port number. +* For switches, port 0 is the always the management port (14.2.5.6). +* MUST BE LAST MEMBER! - Since it grows !!!! +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_destroy +* NAME +* osm_node_destroy +* +* DESCRIPTION +* The osm_node_destroy function destroys a node, releasing +* all resources. +* +* SYNOPSIS +*/void +osm_node_destroy( + IN osm_node_t *p_node ); +/* +* PARAMETERS +* p_node +* [in] Pointer a Node object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Node object. +* This function should only be called after a call to osm_node_new. +* +* SEE ALSO +* Node object, osm_node_new +*********/ + +/****f* OpenSM: Node/osm_node_delete +* NAME +* osm_node_delete +* +* DESCRIPTION +* The osm_node_delete function destroys a node, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_node_delete( + IN OUT osm_node_t** const p_node ); +/* +* PARAMETERS +* p_node +* [in][out] Pointer to a Pointer a Node object to destroy. +* On return, the pointer to set to NULL. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Node object. +* This function should only be called after a call to osm_node_new. +* +* SEE ALSO +* Node object, osm_node_new +*********/ + +/****f* OpenSM: Node/osm_node_new +* NAME +* osm_node_new +* +* DESCRIPTION +* The osm_node_new function initializes a Node object for use. +* +* SYNOPSIS +*/ +osm_node_t* +osm_node_new( + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_madw +* [in] Pointer to a osm_madw_t object containing a mad with +* the node's NodeInfo attribute. The caller may discard the +* osm_madw_t structure after calling osm_node_new. +* +* RETURN VALUES +* On success, a pointer to the new initialized osm_node_t structure. +* NULL otherwise. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_is_lid +* NAME +* osm_node_is_lid +* +* DESCRIPTION +* Indicates if the specified LID belongs to this node. +* +* SYNOPSIS +*/ +boolean_t osm_node_is_lid( + IN const osm_node_t* const p_node, + IN const uint16_t lid ); +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* lid +* [in] LID value. +* +* RETURN VALUES +* TRUE if the specified LID belongs to the node, +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_physp_ptr +* NAME +* osm_node_get_physp_ptr +* +* DESCRIPTION +* Returns a pointer to the physical port object at the +* specified local port number. +* +* SYNOPSIS +*/ +static inline osm_physp_t* +osm_node_get_physp_ptr( + IN const osm_node_t* const p_node, + IN const uint32_t port_num ) +{ + + CL_ASSERT( port_num < p_node->physp_tbl_size ); + return( (osm_physp_t*)&p_node->physp_table[port_num] ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Local port number. +* +* RETURN VALUES +* Returns a pointer to the physical port object at the +* specified local port number. +* A return value of zero means the port number was out of range. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_any_physp_ptr +* NAME +* osm_node_get_any_physp_ptr +* +* DESCRIPTION +* Returns a pointer to any valid physical port object associated +* with this node. This operation is mostly meaningful for switches, +* in which case all the Physical Ports share the same GUID. +* +* SYNOPSIS +*/ +static inline osm_physp_t* +osm_node_get_any_physp_ptr( + IN const osm_node_t* const p_node ) +{ + CL_ASSERT( p_node ); + return( (osm_physp_t*)&p_node->physp_table[ + ib_node_info_get_local_port_num( &p_node->node_info )] ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* RETURN VALUES +* Returns a pointer to any valid physical port object associated +* with this node. This operation is mostly meaningful for switches, +* in which case all the Physical Ports share the same GUID. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_any_path +* NAME +* osm_node_get_any_path +* +* DESCRIPTION +* Returns a pointer to the physical port object at the +* specified local port number. +* +* SYNOPSIS +*/ +static inline osm_dr_path_t* +osm_node_get_any_dr_path_ptr( + IN const osm_node_t* const p_node ) +{ + CL_ASSERT( p_node ); + return( osm_physp_get_dr_path_ptr( &p_node->physp_table[ + ib_node_info_get_local_port_num( &p_node->node_info )] ) ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Local port number. +* +* RETURN VALUES +* Returns a pointer to the physical port object at the +* specified local port number. +* A return value of zero means the port number was out of range. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_type +* NAME +* osm_node_get_type +* +* DESCRIPTION +* Returns the type of this node. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_node_get_type( + IN const osm_node_t* const p_node ) +{ + return( p_node->node_info.node_type ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* RETURN VALUES +* Returns the IBA defined type of this node. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_num_physp +* NAME +* osm_node_get_num_physp +* +* DESCRIPTION +* Returns the type of this node. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_node_get_num_physp( + IN const osm_node_t* const p_node ) +{ + return( (uint8_t)p_node->physp_tbl_size ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* RETURN VALUES +* Returns the IBA defined type of this node. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_remote_node +* NAME +* osm_node_get_remote_node +* +* DESCRIPTION +* Returns a pointer to the node on the other end of the +* specified port. +* Returns NULL if no remote node exists. +* +* SYNOPSIS +*/ +osm_node_t* +osm_node_get_remote_node( + IN const osm_node_t* const p_node, + IN const uint8_t port_num, + OUT uint8_t *p_remote_port_num ); +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Port number in p_node through which to get the remote node. +* +* p_remote_port_num +* [out] Port number in the remote's node through which this +* link exists. The caller may specify NULL for this pointer +* if the port number isn't needed. +* +* RETURN VALUES +* Returns a pointer to the node on the other end of the +* specified port. +* Returns NULL if no remote node exists. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_base_lid +* NAME +* osm_node_get_base_lid +* +* DESCRIPTION +* Returns the LID value of the specified port on this node. +* +* SYNOPSIS +*/ +static inline ib_net16_t +osm_node_get_base_lid( + IN const osm_node_t* const p_node, + IN const uint32_t port_num ) +{ + CL_ASSERT( port_num < p_node->physp_tbl_size ); + return( osm_physp_get_base_lid( &p_node->physp_table[port_num] ) ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Local port number. +* +* RETURN VALUES +* Returns a pointer to the physical port object at the +* specified local port number. +* A return value of zero means the port number was out of range. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_remote_base_lid +* NAME +* osm_node_get_remote_base_lid +* +* DESCRIPTION +* Returns the base LID value of the port on the other side +* of the wire from the specified port on this node. +* +* SYNOPSIS +*/ +ib_net16_t +osm_node_get_remote_base_lid( + IN const osm_node_t* const p_node, + IN const uint32_t port_num ); +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Local port number. +* +* RETURN VALUES +* Returns a pointer to the physical port object at the +* specified local port number. +* A return value of zero means the port number was out of range. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_remote_type +* NAME +* osm_node_get_remote_type +* +* DESCRIPTION +* Returns the type of the node on the other side +* of the wire from the specified port on this node. +* The remote node must exist. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_node_get_remote_type( + IN const osm_node_t* const p_node, + IN const uint8_t port_num ) +{ + osm_node_t *p_remote_node; + + p_remote_node = osm_node_get_remote_node( p_node, port_num, NULL ); + CL_ASSERT( p_remote_node ); + return( osm_node_get_type( p_remote_node ) ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Local port number. +* +* RETURN VALUES +* Returns the type of the node on the other side +* of the wire from the specified port on this node. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_lmc +* NAME +* osm_node_get_lmc +* +* DESCRIPTION +* Returns the LMC value of the specified port on this node. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_node_get_lmc( + IN const osm_node_t* const p_node, + IN const uint32_t port_num ) +{ + CL_ASSERT( port_num < p_node->physp_tbl_size ); + return( osm_physp_get_lmc( &p_node->physp_table[port_num] ) ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Local port number. +* +* RETURN VALUES +* Returns the LMC value of the specified port on this node. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_init_physp +* NAME +* osm_node_init_physp +* +* DESCRIPTION +* Initializes a physical port for the given node. +* +* SYNOPSIS +*/ +void +osm_node_init_physp( + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* p_madw +* [in] Pointer to a osm_madw_t object containing a mad with +* the node's NodeInfo attribute as discovered through the +* Physical Port to add to the node. The caller may discard the +* osm_madw_t structure after calling osm_node_new. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Node object, Physical Port object. +*********/ + +/****f* OpenSM: Node/osm_node_discovery_count_get +* NAME +* osm_node_discovery_count_get +* +* DESCRIPTION +* Returns a pointer to the physical port object at the +* specified local port number. +* +* SYNOPSIS +*/ +static inline uint32_t +osm_node_discovery_count_get( + IN const osm_node_t* const p_node ) +{ + return( p_node->discovery_count ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* RETURN VALUES +* Returns the discovery count for this node. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_discovery_count_reset +* NAME +* osm_node_discovery_count_reset +* +* DESCRIPTION +* Resets the discovery count for this node to zero. +* This operation should be performed at the start of a sweep. +* +* SYNOPSIS +*/ +static inline void +osm_node_discovery_count_reset( + IN osm_node_t* const p_node ) +{ + p_node->discovery_count = 0; +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_discovery_count_inc +* NAME +* osm_node_discovery_count_inc +* +* DESCRIPTION +* Increments the discovery count for this node. +* +* SYNOPSIS +*/ +static inline void +osm_node_discovery_count_inc( + IN osm_node_t* const p_node ) +{ + p_node->discovery_count++; +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_get_node_guid +* NAME +* osm_node_get_node_guid +* +* DESCRIPTION +* Returns the node GUID of this node. +* +* SYNOPSIS +*/ +static inline ib_net64_t +osm_node_get_node_guid( + IN const osm_node_t* const p_node ) +{ + return( p_node->node_info.node_guid ); +} +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* RETURN VALUES +* Returns the node GUID of this node. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_link +* NAME +* osm_node_link +* +* DESCRIPTION +* Logically connects a node to another node through the specified port. +* +* SYNOPSIS +*/ +void +osm_node_link( + IN osm_node_t* const p_node, + IN const uint8_t port_num, + IN osm_node_t* const p_remote_node, + IN const uint8_t remote_port_num ); +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Port number in p_node through which to create the link. +* +* p_remote_node +* [in] Pointer to the remote port object. +* +* remote_port_num +* [in] Port number in the remote's node through which to +* create this link. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_unlink +* NAME +* osm_node_unlink +* +* DESCRIPTION +* Logically disconnects a node from another node through +* the specified port. +* +* SYNOPSIS +*/ +void +osm_node_unlink( + IN osm_node_t* const p_node, + IN const uint8_t port_num, + IN osm_node_t* const p_remote_node, + IN const uint8_t remote_port_num ); +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Port number in p_node through which to unlink. +* +* p_remote_node +* [in] Pointer to the remote port object. +* +* remote_port_num +* [in] Port number in the remote's node through which to unlink. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_link_exists +* NAME +* osm_node_link_exists +* +* DESCRIPTION +* Return TRUE if a link exists between the specified nodes on +* the specified ports. +* Returns FALSE otherwise. +* +* SYNOPSIS +*/ +boolean_t +osm_node_link_exists( + IN osm_node_t* const p_node, + IN const uint8_t port_num, + IN osm_node_t* const p_remote_node, + IN const uint8_t remote_port_num ); +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Port number in p_node through which to check the link. +* +* p_remote_node +* [in] Pointer to the remote port object. +* +* remote_port_num +* [in] Port number in the remote's node through which to +* check this link. +* +* RETURN VALUES +* Return TRUE if a link exists between the specified nodes on +* the specified ports. +* Returns FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_has_any_link +* NAME +* osm_node_has_any_link +* +* DESCRIPTION +* Return TRUE if a any link exists from the specified nodes on +* the specified port. +* Returns FALSE otherwise. +* +* SYNOPSIS +*/ +boolean_t +osm_node_has_any_link( + IN osm_node_t* const p_node, + IN const uint8_t port_num ); +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Port number in p_node through which to check the link. +* +* RETURN VALUES +* Return TRUE if a any link exists from the specified nodes on +* the specified port. +* Returns FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_node_link_has_valid_ports +* NAME +* osm_node_link_has_valid_ports +* +* DESCRIPTION +* Return TRUE if both ports in the link are valid (initialized). +* Returns FALSE otherwise. +* +* SYNOPSIS +*/ +boolean_t +osm_node_link_has_valid_ports( + IN osm_node_t* const p_node, + IN const uint8_t port_num, + IN osm_node_t* const p_remote_node, + IN const uint8_t remote_port_num ); +/* +* PARAMETERS +* p_node +* [in] Pointer to an osm_node_t object. +* +* port_num +* [in] Port number in p_node through which to check the link. +* +* RETURN VALUES +* Return TRUE if both ports in the link are valid (initialized). +* Returns FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +END_C_DECLS + +#endif /* _OSM_NODE_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_desc_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_desc_rcv.h new file mode 100644 index 00000000..6e48cd27 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_desc_rcv.h @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_nd_rcv_t. + * This object represents the NodeInfo Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_ND_RCV_H_ +#define _OSM_ND_RCV_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Node Description Receiver +* NAME +* Node Description Receiver +* +* DESCRIPTION +* The Node Description Receiver object encapsulates the information +* needed to receive the NodeInfo attribute from a node. +* +* The Node Description Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Node Description Receiver/osm_nd_rcv_t +* NAME +* osm_nd_rcv_t +* +* DESCRIPTION +* Node Description Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_nd_rcv +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + cl_plock_t *p_lock; + +} osm_nd_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* Node Description Receiver object +*********/ + +/****f* OpenSM: Node Description Receiver/osm_nd_rcv_construct +* NAME +* osm_nd_rcv_construct +* +* DESCRIPTION +* This function constructs a Node Description Receiver object. +* +* SYNOPSIS +*/ +void osm_nd_rcv_construct( + IN osm_nd_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a Node Description Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_nd_rcv_init, osm_nd_rcv_destroy +* +* Calling osm_nd_rcv_construct is a prerequisite to calling any other +* method except osm_nd_rcv_init. +* +* SEE ALSO +* Node Description Receiver object, osm_nd_rcv_init, +* osm_nd_rcv_destroy +*********/ + +/****f* OpenSM: Node Description Receiver/osm_nd_rcv_destroy +* NAME +* osm_nd_rcv_destroy +* +* DESCRIPTION +* The osm_nd_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_nd_rcv_destroy( + IN osm_nd_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Node Description Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_nd_rcv_construct or osm_nd_rcv_init. +* +* SEE ALSO +* Node Description Receiver object, osm_nd_rcv_construct, +* osm_nd_rcv_init +*********/ + +/****f* OpenSM: Node Description Receiver/osm_nd_rcv_init +* NAME +* osm_nd_rcv_init +* +* DESCRIPTION +* The osm_nd_rcv_init function initializes a +* Node Description Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_nd_rcv_init( + IN osm_nd_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_nd_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the Node Description Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Node Description Receiver methods. +* +* SEE ALSO +* Node Description Receiver object, osm_nd_rcv_construct, +* osm_nd_rcv_destroy +*********/ + +/****f* OpenSM: Node Description Receiver/osm_nd_rcv_process +* NAME +* osm_nd_rcv_process +* +* DESCRIPTION +* Process the NodeInfo attribute. +* +* SYNOPSIS +*/ +void osm_nd_rcv_process( + IN const osm_nd_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_nd_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's NodeInfo attribute. +* +* RETURN VALUES +* CL_SUCCESS if the NodeInfo processing was successful. +* +* NOTES +* This function processes a NodeInfo attribute. +* +* SEE ALSO +* Node Description Receiver, Node Description Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_ND_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_desc_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_desc_rcv_ctrl.h new file mode 100644 index 00000000..d838a60c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_desc_rcv_ctrl.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_nd_rcv_ctrl_t. + * This object represents a controller that receives the IBA NodeDescription + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_ND_RCV_CTRL_H_ +#define _OSM_ND_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Node Description Receive Controller +* NAME +* Node Description Receive Controller +* +* DESCRIPTION +* The Node Description Receive Controller object encapsulates the information +* needed to receive the NodeDescription attribute from a node. +* +* The Node Description Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Node Description Receive Controller/osm_nd_rcv_ctrl_t +* NAME +* osm_nd_rcv_ctrl_t +* +* DESCRIPTION +* Node Description Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_nd_rcv_ctrl +{ + osm_nd_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_nd_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Node Description Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Node Description Receive Controller object +*********/ + +/****f* OpenSM: Node Description Receive Controller/osm_nd_rcv_ctrl_construct +* NAME +* osm_nd_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Node Description Receive Controller object. +* +* SYNOPSIS +*/ +void +osm_nd_rcv_ctrl_construct( + IN osm_nd_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Node Description Receive Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_nd_rcv_ctrl_init, osm_nd_rcv_ctrl_destroy +* +* Calling osm_nd_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_nd_rcv_ctrl_init. +* +* SEE ALSO +* Node Description Receive Controller object, osm_nd_rcv_ctrl_init, +* osm_nd_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: Node Description Receive Controller/osm_nd_rcv_ctrl_destroy +* NAME +* osm_nd_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_nd_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_nd_rcv_ctrl_destroy( + IN osm_nd_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Node Description Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_nd_rcv_ctrl_construct or osm_nd_rcv_ctrl_init. +* +* SEE ALSO +* Node Description Receive Controller object, osm_nd_rcv_ctrl_construct, +* osm_nd_rcv_ctrl_init +*********/ + +/****f* OpenSM: Node Description Receive Controller/osm_nd_rcv_ctrl_init +* NAME +* osm_nd_rcv_ctrl_init +* +* DESCRIPTION +* The osm_nd_rcv_ctrl_init function initializes a +* Node Description Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_nd_rcv_ctrl_init( + IN osm_nd_rcv_ctrl_t* const p_ctrl, + IN osm_nd_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_nd_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_nd_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Node Description Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Node Description Receive Controller methods. +* +* SEE ALSO +* Node Description Receive Controller object, osm_nd_rcv_ctrl_construct, +* osm_nd_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* OSM_ND_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_info_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_info_rcv.h new file mode 100644 index 00000000..ca202fd9 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_info_rcv.h @@ -0,0 +1,305 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_ni_rcv_t. + * This object represents the NodeInfo Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_NI_RCV_H_ +#define _OSM_NI_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Node Info Receiver +* NAME +* Node Info Receiver +* +* DESCRIPTION +* The Node Info Receiver object encapsulates the information +* needed to receive the NodeInfo attribute from a node. +* +* The Node Info Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Node Info Receiver/osm_ni_rcv_t +* NAME +* osm_ni_rcv_t +* +* DESCRIPTION +* Node Info Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_ni_rcv +{ + osm_subn_t *p_subn; + osm_req_t *p_gen_req; + osm_log_t *p_log; + osm_state_mgr_t *p_state_mgr; + cl_plock_t *p_lock; + +} osm_ni_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_gen_req_ctrl +* Pointer to the generic request controller. +* +* p_log +* Pointer to the log object. +* +* p_state_mgr +* Pointer to the State Manager object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* Node Info Receiver object +*********/ + +/****f* OpenSM: Node Info Receiver/osm_ni_rcv_construct +* NAME +* osm_ni_rcv_construct +* +* DESCRIPTION +* This function constructs a Node Info Receiver object. +* +* SYNOPSIS +*/ +void osm_ni_rcv_construct( + IN osm_ni_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Node Info Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_ni_rcv_init, osm_ni_rcv_destroy, +* and osm_ni_rcv_is_inited. +* +* Calling osm_ni_rcv_construct is a prerequisite to calling any other +* method except osm_ni_rcv_init. +* +* SEE ALSO +* Node Info Receiver object, osm_ni_rcv_init, +* osm_ni_rcv_destroy, osm_ni_rcv_is_inited +*********/ + +/****f* OpenSM: Node Info Receiver/osm_ni_rcv_destroy +* NAME +* osm_ni_rcv_destroy +* +* DESCRIPTION +* The osm_ni_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_ni_rcv_destroy( + IN osm_ni_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Node Info Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_ni_rcv_construct or osm_ni_rcv_init. +* +* SEE ALSO +* Node Info Receiver object, osm_ni_rcv_construct, +* osm_ni_rcv_init +*********/ + +/****f* OpenSM: Node Info Receiver/osm_ni_rcv_init +* NAME +* osm_ni_rcv_init +* +* DESCRIPTION +* The osm_ni_rcv_init function initializes a +* Node Info Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_ni_rcv_init( + IN osm_ni_rcv_t* const p_ctrl, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_ni_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_state_mgr +* [in] Pointer to the State Manager object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the Node Info Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Node Info Receiver methods. +* +* SEE ALSO +* Node Info Receiver object, osm_ni_rcv_construct, +* osm_ni_rcv_destroy, osm_ni_rcv_is_inited +*********/ + +/****f* OpenSM: Node Info Receiver/osm_ni_rcv_is_inited +* NAME +* osm_ni_rcv_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_ni_rcv_init. +* +* SYNOPSIS +*/ +boolean_t osm_ni_rcv_is_inited( + IN const osm_ni_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_ni_rcv_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_ni_rcv_construct or osm_ni_rcv_init must be +* called before using this function. +* +* SEE ALSO +* Node Info Receiver object, osm_ni_rcv_construct, +* osm_ni_rcv_init +*********/ + +/****f* OpenSM: Node Info Receiver/osm_ni_rcv_process +* NAME +* osm_ni_rcv_process +* +* DESCRIPTION +* Process the NodeInfo attribute. +* +* SYNOPSIS +*/ +void osm_ni_rcv_process( + IN const osm_ni_rcv_t* const p_ctrl, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_ni_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's NodeInfo attribute. +* +* RETURN VALUES +* CL_SUCCESS if the NodeInfo processing was successful. +* +* NOTES +* This function processes a NodeInfo attribute. +* +* SEE ALSO +* Node Info Receiver, Node Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_NI_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_info_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_info_rcv_ctrl.h new file mode 100644 index 00000000..72b1f671 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_node_info_rcv_ctrl.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_ni_rcv_ctrl_t. + * This object represents a controller that receives the IBA NodeInfo + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_NI_RCV_CTRL_H_ +#define _OSM_NI_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Node Info Receive Controller +* NAME +* Node Info Receive Controller +* +* DESCRIPTION +* The Node Info Receive Controller object encapsulates +* the information needed to receive the NodeInfo attribute from a node. +* +* The Node Info Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Node Info Receive Controller/osm_ni_rcv_ctrl_t +* NAME +* osm_ni_rcv_ctrl_t +* +* DESCRIPTION +* Node Info Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_ni_rcv_ctrl +{ + osm_ni_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_ni_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Node Info Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Node Info Receive Controller object +* Node Info Receiver object +*********/ + +/****f* OpenSM: Node Info Receive Controller/osm_ni_rcv_ctrl_construct +* NAME +* osm_ni_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Node Info Receive Controller object. +* +* SYNOPSIS +*/ +void osm_ni_rcv_ctrl_construct( + IN osm_ni_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Node Info Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_ni_rcv_ctrl_init, osm_ni_rcv_ctrl_destroy, +* and osm_ni_rcv_ctrl_is_inited. +* +* Calling osm_ni_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_ni_rcv_ctrl_init. +* +* SEE ALSO +* Node Info Receive Controller object, osm_ni_rcv_ctrl_init, +* osm_ni_rcv_ctrl_destroy, osm_ni_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Node Info Receive Controller/osm_ni_rcv_ctrl_destroy +* NAME +* osm_ni_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_ni_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_ni_rcv_ctrl_destroy( + IN osm_ni_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Node Info Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_ni_rcv_ctrl_construct or osm_ni_rcv_ctrl_init. +* +* SEE ALSO +* Node Info Receive Controller object, osm_ni_rcv_ctrl_construct, +* osm_ni_rcv_ctrl_init +*********/ + +/****f* OpenSM: Node Info Receive Controller/osm_ni_rcv_ctrl_init +* NAME +* osm_ni_rcv_ctrl_init +* +* DESCRIPTION +* The osm_ni_rcv_ctrl_init function initializes a +* Node Info Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_ni_rcv_ctrl_init( + IN osm_ni_rcv_ctrl_t* const p_ctrl, + IN osm_ni_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_ni_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_ni_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Node Info Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Node Info Receive Controller methods. +* +* SEE ALSO +* Node Info Receive Controller object, osm_ni_rcv_ctrl_construct, +* osm_ni_rcv_ctrl_destroy, osm_ni_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Node Info Receive Controller/osm_ni_rcv_ctrl_is_inited +* NAME +* osm_ni_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_ni_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_ni_rcv_ctrl_is_inited( + IN const osm_ni_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_ni_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_ni_rcv_ctrl_construct or osm_ni_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Node Info Receive Controller object, osm_ni_rcv_ctrl_construct, +* osm_ni_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_NI_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_opensm.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_opensm.h new file mode 100644 index 00000000..9f20488a --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_opensm.h @@ -0,0 +1,440 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_opensm_t. + * This object represents the OpenSM super object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#ifndef _OSM_OPENSM_H_ +#define _OSM_OPENSM_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/OpenSM +* NAME +* OpenSM +* +* DESCRIPTION +* The OpenSM object encapsulates the information needed by the +* OpenSM to govern itself. The OpenSM is one OpenSM object. +* +* The OpenSM object is thread safe. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: OpenSM/osm_routing_engine +* NAME +* struct osm_routing_engine +* +* DESCRIPTION +* OpenSM routing engine module definition. +* NOTES +* routing engine structure - yet limited by ucast_fdb_assign and +* ucast_build_fwd_tables (multicast callbacks may be added later) +*/ +struct osm_routing_engine { + const char *name; + void *context; + int (*build_lid_matrices)(void *context); + int (*ucast_build_fwd_tables)(void *context); + void (*ucast_dump_tables)(void *context); + void (*delete)(void *context); +}; +/* +* FIELDS +* name +* The routing engine name (will be used in logs). +* +* context +* The routing engine context. Will be passed as parameter +* to the callback functions. +* +* build_lid_matrices +* The callback for lid matrices generation. +* +* ucast_build_fwd_tables +* The callback for unicast forwarding table generation. +* +* ucast_dump_tables +* The callback for dumping unicast routing tables. +* +* delete +* The delete method, may be used for routing engine +* internals cleanup. +*/ + +/****s* OpenSM: OpenSM/osm_opensm_t +* NAME +* osm_opensm_t +* +* DESCRIPTION +* OpenSM structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_opensm_t +{ + osm_subn_t subn; + osm_sm_t sm; + osm_sa_t sa; + osm_db_t db; + osm_mad_pool_t mad_pool; + osm_vendor_t *p_vendor; + osm_vl15_t vl15; + osm_log_t log; + cl_dispatcher_t disp; + cl_plock_t lock; + struct osm_routing_engine routing_engine; + osm_stats_t stats; +} osm_opensm_t; +/* +* FIELDS +* subn +* Subnet object for this subnet. +* +* sm +* The Subnet Manager (SM) object for this subnet. +* +* sa +* The Subnet Administration (SA) object for this subnet. +* +* db +* Persistant storage of some data required between sessions. +* +* mad_pool +* Pool of Management Datagram (MAD) objects. +* +* p_vendor +* Pointer to the Vendor specific adapter for various +* transport interfaces, such as UMADT, AL, etc. The +* particular interface is set at compile time. +* +* vl15 +* The VL15 interface. +* +* log +* Log facility used by all OpenSM components. +* +* disp +* Central dispatcher containing the OpenSM worker threads. +* +* lock +* Shared lock guarding most OpenSM structures. +* +* routing_engine +* Routing engine; will be initialized then used. +* +* stats +* Open SM statistics block +* +* SEE ALSO +*********/ + +/****f* OpenSM: OpenSM/osm_opensm_construct +* NAME +* osm_opensm_construct +* +* DESCRIPTION +* This function constructs an OpenSM object. +* +* SYNOPSIS +*/ +void osm_opensm_construct( + IN osm_opensm_t* const p_osm ); +/* +* PARAMETERS +* p_osm +* [in] Pointer to a OpenSM object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_opensm_init, osm_opensm_destroy +* +* Calling osm_opensm_construct is a prerequisite to calling any other +* method except osm_opensm_init. +* +* SEE ALSO +* SM object, osm_opensm_init, osm_opensm_destroy +*********/ + +/****f* OpenSM: OpenSM/osm_opensm_destroy +* NAME +* osm_opensm_destroy +* +* DESCRIPTION +* The osm_opensm_destroy function destroys an SM, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_opensm_destroy( + IN osm_opensm_t* const p_osm ); +/* +* PARAMETERS +* p_osm +* [in] Pointer to a OpenSM object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified OpenSM object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_opensm_construct or +* osm_opensm_init. +* +* SEE ALSO +* SM object, osm_opensm_construct, osm_opensm_init +*********/ + +/****f* OpenSM: OpenSM/osm_opensm_init +* NAME +* osm_opensm_init +* +* DESCRIPTION +* The osm_opensm_init function initializes a OpenSM object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_opensm_init( + IN osm_opensm_t* const p_osm, + IN const osm_subn_opt_t* const p_opt ); +/* +* PARAMETERS +* p_osm +* [in] Pointer to an osm_opensm_t object to initialize. +* +* p_opt +* [in] Pointer to the subnet options structure. +* +* RETURN VALUES +* IB_SUCCESS if the OpenSM object was initialized successfully. +* +* NOTES +* Allows calling other OpenSM methods. +* +* SEE ALSO +* SM object, osm_opensm_construct, osm_opensm_destroy +*********/ + +/****f* OpenSM: OpenSM/osm_opensm_sweep +* NAME +* osm_opensm_sweep +* +* DESCRIPTION +* Initiates a subnet sweep. +* +* SYNOPSIS +*/ +static inline void +osm_opensm_sweep( + IN osm_opensm_t* const p_osm ) +{ + osm_sm_sweep( &p_osm->sm ); +} +/* +* PARAMETERS +* p_osm +* [in] Pointer to an osm_opensm_t object on which to +* initiate a sweep. +* +* RETURN VALUES +* None +* +* NOTES +* If the OpenSM object is not bound to a port, this function +* does nothing. +* +* SEE ALSO +*********/ + +/****f* OpenSM: OpenSM/osm_opensm_set_log_flags +* NAME +* osm_opensm_set_log_flags +* +* DESCRIPTION +* Sets the log level. +* +* SYNOPSIS +*/ +static inline void +osm_opensm_set_log_flags( + IN osm_opensm_t* const p_osm, + IN const osm_log_level_t log_flags ) +{ + osm_log_set_level( &p_osm->log, log_flags ); +} +/* +* PARAMETERS +* p_osm +* [in] Pointer to an osm_opensm_t object. +* +* log_flags +* [in] Log level flags to set. +* +* RETURN VALUES +* None +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: OpenSM/osm_opensm_bind +* NAME +* osm_opensm_bind +* +* DESCRIPTION +* Binds the opensm object to a port guid. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_opensm_bind( + IN osm_opensm_t* const p_osm, + IN const ib_net64_t guid ); +/* +* PARAMETERS +* p_osm +* [in] Pointer to an osm_opensm_t object to bind. +* +* guid +* [in] Local port GUID with which to bind. +* +* RETURN VALUES +* None +* +* NOTES +* A given opensm object can only be bound to one port at a time. +* +* SEE ALSO +*********/ + +/****f* OpenSM: OpenSM/osm_opensm_wait_for_subnet_up +* NAME +* osm_opensm_wait_for_subnet_up +* +* DESCRIPTION +* Blocks the calling thread until the subnet is up. +* +* SYNOPSIS +*/ +static inline cl_status_t +osm_opensm_wait_for_subnet_up( + IN osm_opensm_t* const p_osm, + IN uint32_t const wait_us, + IN boolean_t const interruptible ) +{ + return( osm_sm_wait_for_subnet_up( &p_osm->sm, wait_us, interruptible ) ); +} +/* +* PARAMETERS +* p_osm +* [in] Pointer to an osm_opensm_t object. +* +* wait_us +* [in] Number of microseconds to wait. +* +* interruptible +* [in] Indicates whether the wait operation can be interrupted +* by external signals. +* +* RETURN VALUES +* CL_SUCCESS if the wait operation succeeded in response to the event +* being set. +* +* CL_TIMEOUT if the specified time period elapses. +* +* CL_NOT_DONE if the wait was interrupted by an external signal. +* +* CL_ERROR if the wait operation failed. +* +* NOTES +* +* SEE ALSO +*********/ + +/****v* OpenSM/osm_exit_flag +*/ +extern volatile unsigned int osm_exit_flag; +/* +* DESCRIPTION +* Set to one to cause all threads to leave +*********/ + +END_C_DECLS + +#endif /* _OSM_OPENSM_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_partition.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_partition.h new file mode 100644 index 00000000..0076a349 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_partition.h @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_prtn_t. + * This object represents an IBA Partition. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PARTITION_H_ +#define _OSM_PARTITION_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Partition +* NAME +* Partition +* +* DESCRIPTION +* The Partition object encapsulates the information needed by the +* OpenSM to manage Partitions. The OpenSM allocates one Partition +* object per Partition in the IBA subnet. +* +* The Partition is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Partition/osm_prtn_t +* NAME +* osm_prtn_t +* +* DESCRIPTION +* Partition structure. +* +* The osm_prtn_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_prtn +{ + cl_map_item_t map_item; + uint16_t pkey; + uint8_t sl; + cl_map_t full_guid_tbl; + cl_map_t part_guid_tbl; + char name[32]; +} osm_prtn_t; +/* +* FIELDS +* map_item +* Linkage structure for cl_qmap. MUST BE FIRST MEMBER! +* +* pkey +* The IBA defined P_KEY of this Partition. +* +* sl +* The Service Level (SL) associated with this Partiton. +* +* full_guid_tbl +* Container of pointers to all Port objects in the Partition +* with full membership, indexed by port GUID. +* +* part_guid_tbl +* Container of pointers to all Port objects in the Partition +* with limited membership, indexed by port GUID. +* +* name +* Name of the Partition as specified in partition +* configuration. +* +* SEE ALSO +* Partition +*********/ + +/****f* OpenSM: Partition/osm_prtn_delete +* NAME +* osm_prtn_delete +* +* DESCRIPTION +* This function destroys and deallocates a Partition object. +* +* SYNOPSIS +*/ +void osm_prtn_delete( + IN OUT osm_prtn_t** const pp_prtn ); +/* +* PARAMETERS +* pp_prtn +* [in][out] Pointer to a pointer to a Partition object to +* delete. On return, this pointer is NULL. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Partition object. +* +* SEE ALSO +* Partition, osm_prtn_new +*********/ + +/****f* OpenSM: Partition/osm_prtn_new +* NAME +* osm_prtn_new +* +* DESCRIPTION +* This function allocates and initializes a Partition object. +* +* SYNOPSIS +*/ +osm_prtn_t* osm_prtn_new( + IN const char *name, + IN const uint16_t pkey ); +/* +* PARAMETERS +* name +* [in] Partition name string +* +* pkey +* [in] Partition P_Key value +* +* RETURN VALUE +* Pointer to the initialize Partition object. +* +* NOTES +* Allows calling other partition methods. +* +* SEE ALSO +* Partition +*********/ + +/****f* OpenSM: Partition/osm_prtn_is_guid +* NAME +* osm_prtn_is_guid +* +* DESCRIPTION +* Indicates if a port is a member of the partition. +* +* SYNOPSIS +*/ +static inline +boolean_t osm_prtn_is_guid( + IN const osm_prtn_t* const p_prtn, + IN const ib_net64_t guid ) +{ + return (cl_map_get(&p_prtn->full_guid_tbl, guid) != NULL) || + (cl_map_get(&p_prtn->part_guid_tbl, guid) != NULL); +} +/* +* PARAMETERS +* p_prtn +* [in] Pointer to an osm_prtn_t object. +* +* guid +* [in] Port GUID. +* +* RETURN VALUES +* TRUE if the specified port GUID is a member of the partition, +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Partition/osm_prtn_make_partitions +* NAME +* osm_prtn_make_partitions +* +* DESCRIPTION +* Makes all partitions in subnet. +* +* SYNOPSIS +*/ +ib_api_status_t osm_prtn_make_partitions( + IN osm_log_t * const p_log, + IN osm_subn_t * const p_subn); +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_subn +* [in] Pointer to subnet object. +* +* RETURN VALUES +* IB_SUCCESS value on success. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_PARTITION_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_path.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_path.h new file mode 100644 index 00000000..3cc435a7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_path.h @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_PATH_H_ +#define _OSM_PATH_H_ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/* + * Abstract: + * Declaration of path related objects. + * These objects are part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +/****h* OpenSM/DR Path +* NAME +* DR Path +* +* DESCRIPTION +* The DR Path structure encapsulates a directed route through the subnet. +* +* This structure allows direct access to member variables. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: DR Path/osm_dr_path_t +* NAME +* osm_dr_path_t +* +* DESCRIPTION +* Directed Route structure. +* +* This structure allows direct access to member variables. +* +* SYNOPSIS +*/ +typedef struct _osm_dr_path +{ + osm_bind_handle_t h_bind; + uint8_t hop_count; + uint8_t path[IB_SUBNET_PATH_HOPS_MAX]; + +} osm_dr_path_t; +/* +* FIELDS +* h_bind +* Bind handle for port to which this path applies. +* +* hop_count +* The number of hops in this path. +* +* path +* The array of port numbers that comprise this path. +* +* SEE ALSO +* DR Path structure +*********/ +/****f* OpenSM: DR Path/osm_dr_path_construct +* NAME +* osm_dr_path_construct +* +* DESCRIPTION +* This function constructs a directed route path object. +* +* SYNOPSIS +*/ +static inline void +osm_dr_path_construct( + IN osm_dr_path_t* const p_path ) +{ + /* The first location in the path array is reserved. */ + memset( p_path, 0, sizeof(*p_path) ); + p_path->h_bind = OSM_BIND_INVALID_HANDLE; +} + +/* +* PARAMETERS +* p_path +* [in] Pointer to a directed route path object to initialize. +* +* h_bind +* [in] Bind handle for the port on which this path applies. +* +* hop_count +* [in] Hop count needed to reach this node. +* +* path +* [in] Directed route path to reach this node. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: DR Path/osm_dr_path_init +* NAME +* osm_dr_path_init +* +* DESCRIPTION +* This function initializes a directed route path object. +* +* SYNOPSIS +*/ +static inline void +osm_dr_path_init( + IN osm_dr_path_t* const p_path, + IN const osm_bind_handle_t h_bind, + IN const uint8_t hop_count, + IN const uint8_t path[IB_SUBNET_PATH_HOPS_MAX] ) +{ + /* The first location in the path array is reserved. */ + CL_ASSERT( path[0] == 0 ); + CL_ASSERT( hop_count < IB_SUBNET_PATH_HOPS_MAX ); + p_path->h_bind = h_bind; + p_path->hop_count = hop_count; + memcpy( p_path->path, path, IB_SUBNET_PATH_HOPS_MAX ); +} + +/* +* PARAMETERS +* p_path +* [in] Pointer to a directed route path object to initialize. +* +* h_bind +* [in] Bind handle for the port on which this path applies. +* +* hop_count +* [in] Hop count needed to reach this node. +* +* path +* [in] Directed route path to reach this node. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ +/****f* OpenSM: DR Path/osm_dr_path_extend +* NAME +* osm_dr_path_extend +* +* DESCRIPTION +* Adds a new hop to a path. +* +* SYNOPSIS +*/ +static inline void +osm_dr_path_extend( + IN osm_dr_path_t* const p_path, + IN const uint8_t port_num ) +{ + p_path->hop_count++; + CL_ASSERT( p_path->hop_count < IB_SUBNET_PATH_HOPS_MAX ); + /* + Location 0 in the path array is reserved per IB spec. + */ + p_path->path[p_path->hop_count] = port_num; +} + +/* +* PARAMETERS +* p_path +* [in] Pointer to a directed route path object to initialize. +* +* port_num +* [in] Additional port to add to the DR path. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: DR Path/osm_dr_path_get_bind_handle +* NAME +* osm_dr_path_get_bind_handle +* +* DESCRIPTION +* Gets the bind handle from a path. +* +* SYNOPSIS +*/ +static inline osm_bind_handle_t +osm_dr_path_get_bind_handle( + IN const osm_dr_path_t* const p_path ) +{ + return( p_path->h_bind ); +} + +/* +* PARAMETERS +* p_path +* [in] Pointer to a directed route path object to initialize. +* +* port_num +* [in] Additional port to add to the DR path. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_PATH_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_pi_config_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pi_config_ctrl.h new file mode 100644 index 00000000..ab5ea3a6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pi_config_ctrl.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_pi_conf_ctrl_t. + * This object represents a controller that performs a + * Set(PortInfo) for the specified port. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PI_CONFIG_CTRL_H_ +#define _OSM_PI_CONFIG_CTRL_H_ + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Port Info Configuration Controller +* NAME +* Port Info Configuration Controller +* +* DESCRIPTION +* The Port Info Configuration Controller object encapsulates the +* information needed to Set(PortInfo) at the specified port. +* +* The Port Info Configuration Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ +/****s* OpenSM: Port Info Configuration Controller/osm_pi_conf_ctrl_t +* NAME +* osm_pi_conf_ctrl_t +* +* DESCRIPTION +* Port Info Configuration Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pi_conf_ctrl +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_dispatcher_t *p_disp; + +} osm_pi_conf_ctrl_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* p_disp +* Pointer to the Dispatcher. +* +* SEE ALSO +* Port Info Configuration Controller object +*********/ +/****f* OpenSM: Port Info Configuration Controller/osm_pi_conf_ctrl_construct +* NAME +* osm_pi_conf_ctrl_construct +* +* DESCRIPTION +* This function constructs a Port Info Configuration Controller object. +* +* SYNOPSIS +*/ +void osm_pi_conf_ctrl_construct( + IN osm_pi_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Port Info Configuration Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pi_conf_ctrl_init, osm_pi_conf_ctrl_destroy, +* and osm_pi_conf_ctrl_is_inited. +* +* Calling osm_pi_conf_ctrl_construct is a prerequisite to calling any other +* method except osm_pi_conf_ctrl_init. +* +* SEE ALSO +* Port Info Configuration Controller object, osm_pi_conf_ctrl_init, +* osm_pi_conf_ctrl_destroy, osm_pi_conf_ctrl_is_inited +*********/ + +/****f* OpenSM: Port Info Configuration Controller/osm_pi_conf_ctrl_destroy +* NAME +* osm_pi_conf_ctrl_destroy +* +* DESCRIPTION +* The osm_pi_conf_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_pi_conf_ctrl_destroy( + IN osm_pi_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Port Info Configuration Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pi_conf_ctrl_construct or osm_pi_conf_ctrl_init. +* +* SEE ALSO +* Port Info Configuration Controller object, osm_pi_conf_ctrl_construct, +* osm_pi_conf_ctrl_init +*********/ + +/****f* OpenSM: Port Info Configuration Controller/osm_pi_conf_ctrl_init +* NAME +* osm_pi_conf_ctrl_init +* +* DESCRIPTION +* The osm_pi_conf_ctrl_init function initializes a +* Port Info Configuration Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pi_conf_ctrl_init( + IN osm_pi_conf_ctrl_t* const p_ctrl, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pi_conf_ctrl_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Port Info Configuration Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Port Info Configuration Controller methods. +* +* SEE ALSO +* Port Info Configuration Controller object, osm_pi_conf_ctrl_construct, +* osm_pi_conf_ctrl_destroy, osm_pi_conf_ctrl_is_inited +*********/ + +/****f* OpenSM: Port Info Configuration Controller/osm_pi_conf_ctrl_is_inited +* NAME +* osm_pi_conf_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_pi_conf_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_pi_conf_ctrl_is_inited( + IN const osm_pi_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pi_conf_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_pi_conf_ctrl_construct or osm_pi_conf_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Port Info Configuration Controller object, osm_pi_conf_ctrl_construct, +* osm_pi_conf_ctrl_init +*********/ + +/****f* OpenSM: Port Info Configuration Controller/osm_pi_conf_ctrl_process +* NAME +* osm_pi_conf_ctrl_process +* +* DESCRIPTION +* Initiate a PortInfo configuration. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pi_conf_ctrl_process( + IN const osm_pi_conf_ctrl_t* const p_ctrl, + IN const ib_guid_t guid ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pi_conf_ctrl_t object. +* +* guid +* [in] GUID of port to configure. +* +* RETURN VALUES +* CL_SUCCESS if configuration processing was successfully +* initiated. +* +* NOTES +* A success status here does not indicate that +* the PortInfo configuration process completed successfully. +* +* SEE ALSO +* Port Info Configuration Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_PI_CONFIG_CTRL_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey.h new file mode 100644 index 00000000..ef8771c4 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey.h @@ -0,0 +1,755 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_PKEY_H_ +#define _OSM_PKEY_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/* + Forward references. +*/ +struct _osm_physp; +struct _osm_port; +struct _osm_subn; +struct _osm_node; +struct _osm_physp; + +/* + * Abstract: + * Declaration of pkey manipulation functions. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.1 $ + */ + +/****s* OpenSM: osm_pkey_tbl_t +* NAME +* osm_pkey_tbl_t +* +* DESCRIPTION +* This object represents a pkey table. The need for a special object +* is required to optimize search performance of a PKey in the IB standard +* non sorted table. +* +* The osm_pkey_tbl_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pkey_tbl +{ + cl_ptr_vector_t blocks; + cl_ptr_vector_t new_blocks; + cl_map_t keys; + cl_qlist_t pending; + uint16_t used_blocks; + uint16_t max_blocks; +} osm_pkey_tbl_t; +/* +* FIELDS +* blocks +* The IBA defined blocks of pkey values, updated from the subnet +* +* new_blocks +* The blocks of pkey values, will be used for updates by SM +* +* keys +* A set holding all keys +* +* pending +* A list of osm_pending_pkey structs that is temporarily set by +* the pkey mgr and used during pkey mgr algorithm only +* +* used_blocks +* Tracks the number of blocks having non-zero pkeys +* +* max_blocks +* The maximal number of blocks this partition table might hold +* this value is based on node_info (for port 0 or CA) or +* switch_info updated on receiving the node_info or switch_info +* GetResp +* +* NOTES +* 'blocks' vector should be used to store pkey values obtained from +* the port and SM pkey manager should not change it directly, for this +* purpose 'new_blocks' should be used. +* +* The only pkey values stored in 'blocks' vector will be mapped with +* 'keys' map +* +*********/ + +/****s* OpenSM: osm_pending_pkey_t +* NAME +* osm_pending_pkey_t +* +* DESCRIPTION +* This objects stores temporary information on pkeys, their target block, +* and index during the pkey manager operation +* +* SYNOPSIS +*/ +typedef struct _osm_pending_pkey { + cl_list_item_t list_item; + uint16_t pkey; + uint16_t block; + uint8_t index; + boolean_t is_new; +} osm_pending_pkey_t; +/* +* FIELDS +* pkey +* The actual P_Key +* +* block +* The block index based on the previous table extracted from the +* device +* +* index +* The index of the pkey within the block +* +* is_new +* TRUE for new P_Keys such that the block and index are invalid +* in that case +* +*********/ + +/****f* OpenSM: osm_pkey_tbl_construct +* NAME +* osm_pkey_tbl_construct +* +* DESCRIPTION +* Constructs the PKey table object +* +* SYNOPSIS +*/ +void osm_pkey_tbl_construct( + IN osm_pkey_tbl_t *p_pkey_tbl); +/* +* p_pkey_tbl +* [in] Pointer to osm_pkey_tbl_t object. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_pkey_tbl_init +* NAME +* osm_pkey_tbl_init +* +* DESCRIPTION +* Inits the PKey table object +* +* SYNOPSIS +*/ +ib_api_status_t +osm_pkey_tbl_init( + IN osm_pkey_tbl_t *p_pkey_tbl); +/* +* p_pkey_tbl +* [in] Pointer to osm_pkey_tbl_t object. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_pkey_tbl_destroy +* NAME +* osm_pkey_tbl_destroy +* +* DESCRIPTION +* Destroys the PKey table object +* +* SYNOPSIS +*/ +void osm_pkey_tbl_destroy( + IN osm_pkey_tbl_t *p_pkey_tbl); +/* +* p_pkey_tbl +* [in] Pointer to osm_pkey_tbl_t object. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_pkey_get_num_blocks +* NAME +* osm_pkey_get_num_blocks +* +* DESCRIPTION +* Obtain the number of blocks in IB PKey table +* +* SYNOPSIS +*/ +static inline uint16_t +osm_pkey_tbl_get_num_blocks( + IN const osm_pkey_tbl_t *p_pkey_tbl ) +{ + return((uint16_t)(cl_ptr_vector_get_size( &p_pkey_tbl->blocks ))); +} +/* +* p_pkey_tbl +* [in] Pointer to osm_pkey_tbl_t object. +* +* RETURN VALUES +* The IB pkey table of that pkey table element +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_pkey_tbl_block_get +* NAME +* osm_pkey_tbl_block_get +* +* DESCRIPTION +* Obtain the pointer to the IB PKey table block stored in the object +* +* SYNOPSIS +*/ +static inline ib_pkey_table_t *osm_pkey_tbl_block_get( + const osm_pkey_tbl_t *p_pkey_tbl, uint16_t block) +{ + return( (block < cl_ptr_vector_get_size(&p_pkey_tbl->blocks)) ? + cl_ptr_vector_get(&p_pkey_tbl->blocks, block) : NULL ); +}; +/* +* p_pkey_tbl +* [in] Pointer to osm_pkey_tbl_t object. +* +* block +* [in] The lock number to get +* +* RETURN VALUES +* The IB pkey table of that pkey table element +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_pkey_tbl_new_block_get +* NAME +* osm_pkey_tbl_new_block_get +* +* DESCRIPTION +* The same as above but for new block +* +* SYNOPSIS +*/ +static inline ib_pkey_table_t *osm_pkey_tbl_new_block_get( + const osm_pkey_tbl_t *p_pkey_tbl, uint16_t block) +{ + return (block < cl_ptr_vector_get_size(&p_pkey_tbl->new_blocks)) ? + cl_ptr_vector_get(&p_pkey_tbl->new_blocks, block) : NULL; +}; + +/****f* OpenSM: osm_pkey_tbl_set_new_entry +* NAME +* osm_pkey_tbl_set_new_entry +* +* DESCRIPTION +* Stores the given pkey in the "new" blocks array and update +* the "map" to show that on the "old" blocks +* +* SYNOPSIS +*/ +ib_api_status_t +osm_pkey_tbl_set_new_entry( + IN osm_pkey_tbl_t *p_pkey_tbl, + IN uint16_t block_idx, + IN uint8_t pkey_idx, + IN uint16_t pkey); +/* +* p_pkey_tbl +* [in] Pointer to the PKey table +* +* block_idx +* [in] The block index to use +* +* pkey_idx +* [in] The index within the block +* +* pkey +* [in] PKey to store +* +* RETURN VALUES +* IB_SUCCESS if OK +* IB_ERROR if failed +* +*********/ + +/****f* OpenSM: osm_pkey_find_next_free_entry +* NAME +* osm_pkey_find_next_free_entry +* +* DESCRIPTION +* Find the next free entry in the PKey table starting at the given +* index and block number. The user should increment pkey_idx before +* next call +* Inspect the "new" blocks array for empty space. +* +* SYNOPSIS +*/ +boolean_t +osm_pkey_find_next_free_entry( + IN osm_pkey_tbl_t *p_pkey_tbl, + OUT uint16_t *p_block_idx, + OUT uint8_t *p_pkey_idx); +/* +* p_pkey_tbl +* [in] Pointer to the PKey table +* +* p_block_idx +* [out] The block index to use +* +* p_pkey_idx +* [out] The index within the block to use +* +* RETURN VALUES +* TRUE if found +* FALSE if did not find +* +*********/ + +/****f* OpenSM: osm_pkey_tbl_init_new_blocks +* NAME +* osm_pkey_tbl_init_new_blocks +* +* DESCRIPTION +* Initializes new_blocks vector content (allocate and clear) +* +* SYNOPSIS +*/ +void osm_pkey_tbl_init_new_blocks( + const osm_pkey_tbl_t *p_pkey_tbl); +/* +* p_pkey_tbl +* [in] Pointer to osm_pkey_tbl_t object. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_pkey_tbl_get_block_and_idx +* NAME +* osm_pkey_tbl_get_block_and_idx +* +* DESCRIPTION +* Set the block index and pkey index the given +* pkey is found in. Return IB_NOT_FOUND if could +* not find it, IB_SUCCESS if OK +* +* SYNOPSIS +*/ +ib_api_status_t +osm_pkey_tbl_get_block_and_idx( + IN osm_pkey_tbl_t *p_pkey_tbl, + IN uint16_t *p_pkey, + OUT uint16_t *block_idx, + OUT uint8_t *pkey_index); +/* +* p_pkey_tbl +* [in] Pointer to osm_pkey_tbl_t object. +* +* p_pkey +* [in] Pointer to the P_Key entry searched +* +* p_block_idx +* [out] Pointer to the block index to be updated +* +* p_pkey_idx +* [out] Pointer to the pkey index (in the block) to be updated +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_pkey_tbl_set +* NAME +* osm_pkey_tbl_set +* +* DESCRIPTION +* Set the PKey table block provided in the PKey object. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_pkey_tbl_set( + IN osm_pkey_tbl_t *p_pkey_tbl, + IN uint16_t block, + IN ib_pkey_table_t *p_tbl); +/* +* p_pkey_tbl +* [in] Pointer to osm_pkey_tbl_t object. +* +* block +* [in] The block number to set +* +* p_tbl +* [in] The IB PKey block to copy to the object +* +* RETURN VALUES +* IB_SUCCESS or IB_ERROR +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_physp_share_this_pkey +* NAME +* osm_physp_share_this_pkey +* +* DESCRIPTION +* Checks if the given physical ports share the specified pkey. +* +* SYNOPSIS +*/ +boolean_t osm_physp_share_this_pkey( + IN const struct _osm_physp * const p_physp1, + IN const struct _osm_physp * const p_physp2, + IN const ib_net16_t pkey); +/* +* PARAMETERS +* +* p_physp1 +* [in] Pointer to an osm_physp_t object. +* +* p_physp2 +* [in] Pointer to an osm_physp_t object. +* +* pkey +* [in] value of P_Key to check. +* +* RETURN VALUES +* Returns TRUE if the two ports are matching. +* FALSE otherwise. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_physp_find_common_pkey +* NAME +* osm_physp_find_common_pkey +* +* DESCRIPTION +* Returns first matching P_Key values for specified physical ports. +* +* SYNOPSIS +*/ +ib_net16_t osm_physp_find_common_pkey( + IN const struct _osm_physp * const p_physp1, + IN const struct _osm_physp * const p_physp2 ); +/* +* PARAMETERS +* +* p_physp1 +* [in] Pointer to an osm_physp_t object. +* +* p_physp2 +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns value of first shared P_Key or INVALID P_Key (0x0) if not +* found. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_physp_share_pkey +* NAME +* osm_physp_share_pkey +* +* DESCRIPTION +* Checks if the given physical ports share a pkey. +* The meaning P_Key matching: +* 10.9.3 : +* In the following, let M_P_Key(Message P_Key) be the P_Key in the incoming +* packet and E_P_Key(Endnode P_Key) be the P_Key it is being compared against +* in the packet's destination endnode. +* +* If: +* * neither M_P_Key nor E_P_Key are the invalid P_Key +* * and the low-order 15 bits of the M_P_Key match the low order 15 +* bits of the E_P_Key +* * and the high order bit(membership type) of both the M_P_Key and +* E_P_Key are not both 0 (i.e., both are not Limited members of +* the partition) +* +* then the P_Keys are said to match. +* +* SYNOPSIS +*/ +boolean_t osm_physp_share_pkey( + IN osm_log_t* p_log, + IN const struct _osm_physp* const p_physp_1, + IN const struct _osm_physp* const p_physp_2 ); + +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_physp_1 +* [in] Pointer to an osm_physp_t object. +* +* p_physp_2 +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns TRUE if the 2 physical ports are matching. +* FALSE otherwise. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_port_share_pkey +* NAME +* osm_port_share_pkey +* +* DESCRIPTION +* Checks if the given ports (on their default physical port) share a pkey. +* The meaning P_Key matching: +* 10.9.3 : +* In the following, let M_P_Key(Message P_Key) be the P_Key in the incoming +* packet and E_P_Key(Endnode P_Key) be the P_Key it is being compared against +* in the packet's destination endnode. +* +* If: +* * neither M_P_Key nor E_P_Key are the invalid P_Key +* * and the low-order 15 bits of the M_P_Key match the low order 15 +* bits of the E_P_Key +* * and the high order bit(membership type) of both the M_P_Key and +* E_P_Key are not both 0 (i.e., both are not Limited members of +* the partition) +* +* then the P_Keys are said to match. +* +* SYNOPSIS +*/ +boolean_t osm_port_share_pkey( + IN osm_log_t* p_log, + IN const struct _osm_port* const p_port_1, + IN const struct _osm_port* const p_port_2 ); + +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_port_1 +* [in] Pointer to an osm_port_t object. +* +* p_port_2 +* [in] Pointer to an osm_port_t object. +* +* RETURN VALUES +* Returns TRUE if the 2 ports are matching. +* FALSE otherwise. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_lid_share_pkey +* NAME +* osm_lid_share_pkey +* +* DESCRIPTION +* Checks if the given lids and port_numbers share a pkey. +* The meaning P_Key matching: +* 10.9.3 : +* In the following, let M_P_Key(Message P_Key) be the P_Key in the incoming +* packet and E_P_Key(Endnode P_Key) be the P_Key it is being compared against +* in the packet's destination endnode. +* +* If: +* * neither M_P_Key nor E_P_Key are the invalid P_Key +* * and the low-order 15 bits of the M_P_Key match the low order 15 +* bits of the E_P_Key +* * and the high order bit(membership type) of both the M_P_Key and +* E_P_Key are not both 0 (i.e., both are not Limited members of +* the partition) +* +* then the P_Keys are said to match. +* +* SYNOPSIS +*/ +boolean_t osm_lid_share_pkey( + IN osm_log_t* p_log, + IN const struct _osm_subn* const p_subn, + IN const ib_net16_t lid1, + IN const uint8_t port_num1, + IN const ib_net16_t lid2, + IN const uint8_t port_num2 ); + +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_subn +* [in] Pointer to the subnet object for accessing of the options. +* +* lid1 +* [in] lid number of first port. +* +* port_num1 +* [in] port number of first port. +* +* lid2 +* [in] lid number of second port. +* +* port_num2 +* [in] port number of second port. +* +* RETURN VALUES +* Returns TRUE if the 2 physical ports that belong to these lids/port_numbers +* are matching. FALSE otherwise. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_physp_has_pkey +* NAME +* osm_physp_has_pkey +* +* DESCRIPTION +* Checks if the given lids and port_numbers share a pkey. +* The meaning P_Key matching: +* 10.9.3 : +* In the following, let M_P_Key(Message P_Key) be the P_Key in the incoming +* packet and E_P_Key(Endnode P_Key) be the P_Key it is being compared against +* in the packet's destination endnode. +* +* If: +* * neither M_P_Key nor E_P_Key are the invalid P_Key +* * and the low-order 15 bits of the M_P_Key match the low order 15 +* bits of the E_P_Key +* * and the high order bit(membership type) of both the M_P_Key and +* E_P_Key are not both 0 (i.e., both are not Limited members of +* the partition) +* +* then the P_Keys are said to match. +* +* SYNOPSIS +*/ +boolean_t osm_physp_has_pkey( + IN osm_log_t* p_log, + IN const ib_net16_t pkey, + IN const struct _osm_physp* const p_physp ); + +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* pkey +* [in] pkey number to look for. +* +* p_physp +* [in] Pointer to osm_physp_t object. +* +* RETURN VALUES +* Returns TRUE if the p_physp has the pkey given. False otherwise. +* +* NOTES +* +*********/ + +/****f* OpenSM: osm_pkey_get_tables +* NAME +* osm_pkey_get_tables +* +* DESCRIPTION +* Sends a request for getting the pkey tables of the given physp. +* +* SYNOPSIS +*/ +void osm_pkey_get_tables( + IN osm_log_t *p_log, + IN osm_req_t *p_req, + IN osm_subn_t* const p_subn, + IN struct _osm_node* const p_node, + IN struct _osm_physp* const p_physp ); + +/* +* PARAMETERS +* p_log +* [in] Pointer to osm_log object. +* +* p_req +* [in] Pointer to osm_req object. +* +* p_subn +* [in] Pointer to osm_subn object. +* +* p_node +* [in] Pointer to osm_node object. +* +* p_physp +* [in] Pointer to osm_physp_t object. +* +* RETURN VALUES +* None +* +* NOTES +* +*********/ + +END_C_DECLS + +#endif /* _OSM_PKEY_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_config_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_config_ctrl.h new file mode 100644 index 00000000..070399d7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_config_ctrl.h @@ -0,0 +1,286 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_pkey_conf_ctrl_t. + * This object represents a controller that performs a + * Set(P_KeyTable) for the specified port. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PKEY_CONFIG_CTRL_H_ +#define _OSM_PKEY_CONFIG_CTRL_H_ + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/P_Key Table Configuration Controller +* NAME +* P_Key Table Configuration Controller +* +* DESCRIPTION +* The P_Key Table Configuration Controller object encapsulates the +* information needed to Set(PortInfo) at the specified port. +* +* The P_Key Table Configuration Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ +/****s* OpenSM: P_Key Table Configuration Controller/osm_pkey_conf_ctrl_t +* NAME +* osm_pkey_conf_ctrl_t +* +* DESCRIPTION +* P_Key Table Configuration Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pkey_conf_ctrl +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_dispatcher_t *p_disp; + +} osm_pkey_conf_ctrl_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* p_disp +* Pointer to the Dispatcher. +* +* SEE ALSO +* P_Key Table Configuration Controller object +*********/ +/****f* OpenSM: P_Key Table Configuration Controller/osm_pkey_conf_ctrl_construct +* NAME +* osm_pkey_conf_ctrl_construct +* +* DESCRIPTION +* This function constructs a P_Key Table Configuration Controller object. +* +* SYNOPSIS +*/ +void osm_pkey_conf_ctrl_construct( + IN osm_pkey_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a P_Key Table Configuration Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pkey_conf_ctrl_init, osm_pkey_conf_ctrl_destroy, +* and osm_pkey_conf_ctrl_is_inited. +* +* Calling osm_pkey_conf_ctrl_construct is a prerequisite to calling any other +* method except osm_pkey_conf_ctrl_init. +* +* SEE ALSO +* P_Key Table Configuration Controller object, osm_pkey_conf_ctrl_init, +* osm_pkey_conf_ctrl_destroy, osm_pkey_conf_ctrl_is_inited +*********/ + +/****f* OpenSM: P_Key Table Configuration Controller/osm_pkey_conf_ctrl_destroy +* NAME +* osm_pkey_conf_ctrl_destroy +* +* DESCRIPTION +* The osm_pkey_conf_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_pkey_conf_ctrl_destroy( + IN osm_pkey_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* P_Key Table Configuration Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pkey_conf_ctrl_construct or osm_pkey_conf_ctrl_init. +* +* SEE ALSO +* P_Key Table Configuration Controller object, osm_pkey_conf_ctrl_construct, +* osm_pkey_conf_ctrl_init +*********/ + +/****f* OpenSM: P_Key Table Configuration Controller/osm_pkey_conf_ctrl_init +* NAME +* osm_pkey_conf_ctrl_init +* +* DESCRIPTION +* The osm_pkey_conf_ctrl_init function initializes a +* P_Key Table Configuration Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pkey_conf_ctrl_init( + IN osm_pkey_conf_ctrl_t* const p_ctrl, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pkey_conf_ctrl_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the P_Key Table Configuration Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other P_Key Table Configuration Controller methods. +* +* SEE ALSO +* P_Key Table Configuration Controller object, osm_pkey_conf_ctrl_construct, +* osm_pkey_conf_ctrl_destroy, osm_pkey_conf_ctrl_is_inited +*********/ + +/****f* OpenSM: P_Key Table Configuration Controller/osm_pkey_conf_ctrl_is_inited +* NAME +* osm_pkey_conf_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_pkey_conf_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_pkey_conf_ctrl_is_inited( + IN const osm_pkey_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pkey_conf_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_pkey_conf_ctrl_construct or osm_pkey_conf_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* P_Key Table Configuration Controller object, osm_pkey_conf_ctrl_construct, +* osm_pkey_conf_ctrl_init +*********/ + +/****f* OpenSM: P_Key Table Configuration Controller/osm_pkey_conf_ctrl_process +* NAME +* osm_pkey_conf_ctrl_process +* +* DESCRIPTION +* Initiate a P_KeyTable configuration. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pkey_conf_ctrl_process( + IN const osm_pkey_conf_ctrl_t* const p_ctrl, + IN const ib_guid_t guid ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pkey_conf_ctrl_t object. +* +* guid +* [in] GUID of port to configure. +* +* RETURN VALUES +* CL_SUCCESS if configuration processing was successfully +* initiated. +* +* NOTES +* A success status here does not indicate that +* the P_Key Table configuration completed successfully. +* +* SEE ALSO +* P_Key Table Configuration Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_PKEY_CONFIG_CTRL_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_mgr.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_mgr.h new file mode 100644 index 00000000..b750503c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_mgr.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Prototype for osm_pkey_mgr_process() function + * This is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PKEY_MGR_H_ +#define _OSM_PKEY_MGR_H_ + +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****f* OpenSM: P_Key Manager/osm_pkey_mgr_process +* NAME +* osm_pkey_mgr_process +* +* DESCRIPTION +* This function enforces the pkey rules on the SM DB. +* +* SYNOPSIS +*/ +osm_signal_t +osm_pkey_mgr_process( + IN osm_opensm_t *p_osm ); +/* +* PARAMETERS +* p_osm +* [in] Pointer to an osm_opensm_t object. +* +* RETURN VALUES +* None +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_PKEY_MGR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_rcv.h new file mode 100644 index 00000000..5099e0e3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_rcv.h @@ -0,0 +1,253 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_PKEY_RCV_H_ +#define _OSM_PKEY_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/P_Key Receiver +* NAME +* P_Key Receiver +* +* DESCRIPTION +* The P_Key Receiver object encapsulates the information +* needed to set or get the vl arbitration attribute from a port. +* +* The P_Key Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Yael Kalka, Mellanox +* +*********/ + +/****s* OpenSM: P_Key Receiver/osm_pkey_rcv_t +* NAME +* osm_pkey_rcv_t +* +* DESCRIPTION +* P_Key Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pkey_rcv +{ + osm_subn_t *p_subn; + osm_req_t *p_req; + osm_log_t *p_log; + cl_plock_t *p_lock; + +} osm_pkey_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_req +* Pointer to the generic attribute request object. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* P_Key Receiver object +*********/ + +/****f* OpenSM: P_Key Receiver/osm_pkey_rcv_construct +* NAME +* osm_pkey_rcv_construct +* +* DESCRIPTION +* This function constructs a P_Key Receiver object. +* +* SYNOPSIS +*/ +void osm_pkey_rcv_construct( + IN osm_pkey_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a P_Key Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pkey_rcv_destroy +* +* Calling osm_pkey_rcv_construct is a prerequisite to calling any other +* method except osm_pkey_rcv_init. +* +* SEE ALSO +* P_Key Receiver object, osm_pkey_rcv_init, +* osm_pkey_rcv_destroy +*********/ + +/****f* OpenSM: P_Key Receiver/osm_pkey_rcv_destroy +* NAME +* osm_pkey_rcv_destroy +* +* DESCRIPTION +* The osm_pkey_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_pkey_rcv_destroy( + IN osm_pkey_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* P_Key Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pkey_rcv_construct or osm_pkey_rcv_init. +* +* SEE ALSO +* P_Key Receiver object, osm_pkey_rcv_construct, +* osm_pkey_rcv_init +*********/ + +/****f* OpenSM: P_Key Receiver/osm_pkey_rcv_init +* NAME +* osm_pkey_rcv_init +* +* DESCRIPTION +* The osm_pkey_rcv_init function initializes a +* P_Key Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pkey_rcv_init( + IN osm_pkey_rcv_t* const p_ctrl, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pkey_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the P_Key Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other P_Key Receiver methods. +* +* SEE ALSO +* P_Key Receiver object, osm_pkey_rcv_construct, +* osm_pkey_rcv_destroy +*********/ + +/****f* OpenSM: P_Key Receiver/osm_pkey_rcv_process +* NAME +* osm_pkey_rcv_process +* +* DESCRIPTION +* Process the vl arbitration attribute. +* +* SYNOPSIS +*/ +void osm_pkey_rcv_process( + IN const osm_pkey_rcv_t* const p_ctrl, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pkey_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's SLtoVL attribute. +* +* RETURN VALUES +* CL_SUCCESS if the SLtoVL processing was successful. +* +* NOTES +* This function processes a SLtoVL attribute. +* +* SEE ALSO +* P_Key Receiver, P_Key Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_PKEY_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_rcv_ctrl.h new file mode 100644 index 00000000..20700d36 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_pkey_rcv_ctrl.h @@ -0,0 +1,248 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_PKEY_RCV_CTRL_H_ +#define _OSM_PKEY_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/P_Key Table Receive Controller +* NAME +* P_Key Receive Controller +* +* DESCRIPTION +* The P_Key Receive Controller object encapsulates +* the information needed to get or set P_Key table of a port. +* +* The P_Key Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Yael Kalka, Mellanox +* +*********/ + +/****s* OpenSM: P_Key Receive Controller/osm_pkey_rcv_ctrl_t +* NAME +* osm_pkey_rcv_ctrl_t +* +* DESCRIPTION +* P_Key Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pkey_rcv_ctrl +{ + osm_pkey_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_pkey_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the P_Key Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* P_Key Receive Controller object +* P_Key Receiver object +*********/ + +/****f* OpenSM: P_Key Receive Controller/osm_pkey_rcv_ctrl_construct +* NAME +* osm_pkey_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a P_Key Receive Controller object. +* +* SYNOPSIS +*/ +void osm_pkey_rcv_ctrl_construct( + IN osm_pkey_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a P_Key Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pkey_rcv_ctrl_init, osm_pkey_rcv_ctrl_destroy, +* and osm_pkey_rcv_ctrl_is_inited. +* +* Calling osm_pkey_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_pkey_rcv_ctrl_init. +* +* SEE ALSO +* P_Key Receive Controller object, osm_pkey_rcv_ctrl_init, +* osm_pkey_rcv_ctrl_destroy, osm_pkey_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: P_Key Receive Controller/osm_pkey_rcv_ctrl_destroy +* NAME +* osm_pkey_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_pkey_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_pkey_rcv_ctrl_destroy( + IN osm_pkey_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* P_Key Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pkey_rcv_ctrl_construct or osm_pkey_rcv_ctrl_init. +* +* SEE ALSO +* P_Key Receive Controller object, osm_pkey_rcv_ctrl_construct, +* osm_pkey_rcv_ctrl_init +*********/ + +/****f* OpenSM: P_Key Receive Controller/osm_pkey_rcv_ctrl_init +* NAME +* osm_pkey_rcv_ctrl_init +* +* DESCRIPTION +* The osm_pkey_rcv_ctrl_init function initializes a +* P_Key Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pkey_rcv_ctrl_init( + IN osm_pkey_rcv_ctrl_t* const p_ctrl, + IN osm_pkey_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pkey_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_pkey_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the P_Key Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other P_Key Receive Controller methods. +* +* SEE ALSO +* P_Key Receive Controller object, osm_pkey_rcv_ctrl_construct, +* osm_pkey_rcv_ctrl_destroy, osm_pkey_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: P_Key Receive Controller/osm_pkey_rcv_ctrl_is_inited +* NAME +* osm_pkey_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_pkey_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_pkey_rcv_ctrl_is_inited( + IN const osm_pkey_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pkey_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_pkey_rcv_ctrl_construct or osm_pkey_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* P_Key Receive Controller object, osm_pkey_rcv_ctrl_construct, +* osm_pkey_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_PKEY_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_port.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_port.h new file mode 100644 index 00000000..b72b16d0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_port.h @@ -0,0 +1,2122 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of port related objects. + * These objects comprise an IBA port. + * These objects are part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.8 $ + */ + +#ifndef _OSM_PORT_H_ +#define _OSM_PORT_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/* + Forward references. +*/ +struct _osm_port; +struct _osm_node; + +/****h* OpenSM/Physical Port +* NAME +* Physical Port +* +* DESCRIPTION +* The Physical Port object encapsulates the information needed by the +* OpenSM to manage physical ports. The OpenSM allocates one Physical Port +* per physical port in the IBA subnet. +* +* In a switch, one multiple Physical Port objects share the same port GUID. +* In an end-point, Physical Ports do not share GUID values. +* +* The Physical Port is not thread safe, thus callers must provide +* serialization. +* +* These objects should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Physical Port/osm_physp_t +* NAME +* osm_physp_t +* +* DESCRIPTION +* This object represents a physical port on a switch, router or end-point. +* +* The osm_physp_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_physp +{ + ib_port_info_t port_info; + ib_net64_t port_guid; + uint8_t port_num; + struct _osm_node *p_node; + struct _osm_physp *p_remote_physp; + boolean_t healthy; + osm_dr_path_t dr_path; + osm_pkey_tbl_t pkeys; + ib_vl_arb_table_t vl_arb[4]; + cl_ptr_vector_t slvl_by_port; + boolean_t got_set_resp; +} osm_physp_t; +/* +* FIELDS +* port_info +* The IBA defined PortInfo data for this port. +* +* port_guid +* Port GUID value of this port. For switches, +* all ports share the same GUID value. +* +* port_num +* The port number of this port. The PortInfo also +* contains a port_number, but that number is not +* the port number of this port, but rather the number +* of the port that received the SMP during discovery. +* Therefore, we must keep a separate record for this +* port's port number. +* +* p_node +* Pointer to the parent Node object of this Physical Port. +* +* p_remote_physp +* Pointer to the Physical Port on the other side of the wire. +* If this pointer is NULL no link exists at this port. +* +* healthy +* Tracks the health of the port. Normally should be TRUE but +* might change as a result of incoming traps indicating the port +* healthy is questionable. +* +* dr_path +* The directed route path to this port. +* +* pkeys +* osm_pkey_tbl_t object holding the port PKeys. +* +* vl_arb[] +* Each Physical Port has 4 sections of VL Arbitration table. +* +* slvl_by_port +* A vector of pointers to the sl2vl tables (ordered by input port). +* On switches have an entry for every other input port (inc SMA=0). +* On CAs only one per port. +* +* got_set_resp +* Marks whether or not we got a PortInfoSetResp from this port or not. +* This is used for minimizing the number of PortInfoSet requests sent. +* If we already got a set response from this port, then we will +* send a PortInfoSet only if the values we are updating are +* different than the ones on the port. If the haven't gotten a set +* response - then we want to send the request anyways - since +* every we need at least one PortInfoSet request for every port +* (by a new SM). +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_construct +* NAME +* osm_physp_construct +* +* DESCRIPTION +* Constructs a Physical Port. +* +* SYNOPSIS +*/ +void +osm_physp_construct( + IN osm_physp_t* const p_physp ); +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object to initialize. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_init +* NAME +* osm_physp_init +* +* DESCRIPTION +* Initializes a Physical Port for use. +* +* SYNOPSIS +*/ +void +osm_physp_init( + IN osm_physp_t* const p_physp, + IN const ib_net64_t port_guid, + IN const uint8_t port_num, + IN const struct _osm_node* const p_node, + IN const osm_bind_handle_t h_bind, + IN const uint8_t hop_count, + IN const uint8_t* const p_initial_path ); +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object to initialize. +* +* port_guid +* [in] GUID value of this port. Switch ports all share +* the same value. +* Caller should use 0 if the guid is unknown. +* +* port_num +* [in] The port number of this port. +* +* p_node +* [in] Pointer to the parent Node object of this Physical Port. +* +* h_bind +* [in] Bind handle on which this port is accessed. +* Caller should use OSM_INVALID_BIND_HANDLE if the bind +* handle to this port is unknown. +* +* hop_count +* [in] Directed route hop count to reach this port. +* Caller should use 0 if the hop count is unknown. +* +* p_initial_path +* [in] Pointer to the directed route path to reach this node. +* Caller should use NULL if the path is unknown. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Port/void osm_physp_destroy +* NAME +* osm_physp_destroy +* +* DESCRIPTION +* This function destroys a Port object. +* +* SYNOPSIS +*/ +void +osm_physp_destroy( + IN osm_physp_t* const p_physp ); +/* +* PARAMETERS +* p_port +* [in] Pointer to a PhysPort object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified PhysPort object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_physp_construct or +* osm_physp_init. +* +* SEE ALSO +* Port, osm_port_init, osm_port_destroy +*********/ + +/****f* OpenSM: Physical Port/osm_physp_is_valid +* NAME +* osm_physp_is_valid +* +* DESCRIPTION +* Returns TRUE if the Physical Port has been successfully initialized. +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_physp_is_valid( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + return( p_physp->port_guid != 0 ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns TRUE if the Physical Port has been successfully initialized. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_is_healthy +* NAME +* osm_physp_is_healthy +* +* DESCRIPTION +* Returns TRUE if the Physical Port has been maked as healthy +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_physp_is_healthy( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + return( p_physp->healthy ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns TRUE if the Physical Port has been maked as healthy +* FALSE otherwise. +* All physical ports are initialized as "healthy" but may be marked +* otherwise if a received trap claims otherwise. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_link_is_healthy +* NAME +* osm_link_is_healthy +* +* DESCRIPTION +* Returns TRUE if the link given by the physical port is health, +* and FALSE otherwise. Link is healthy if both its physical ports are +* healthy +* +* SYNOPSIS +*/ +boolean_t +osm_link_is_healthy( + IN const osm_physp_t* const p_physp ); +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* TRUE if both physical ports on the link are healthy, and FALSE otherwise. +* All physical ports are initialized as "healthy" but may be marked +* otherwise if a received trap claiming otherwise. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_set_health +* NAME +* osm_physp_set_health +* +* DESCRIPTION +* Sets the port health flag. TRUE means the port is healthy and +* should be used for packet routing. FALSE means it should be avoided. +* +* SYNOPSIS +*/ +static inline void +osm_physp_set_health( + IN osm_physp_t* const p_physp, + IN boolean_t is_healthy ) +{ + CL_ASSERT( p_physp ); + p_physp->healthy = is_healthy; +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* is_healthy +* [in] The health value to be assigned to the port. +* TRUE if the Physical Port should been maked as healthy +* FALSE otherwise. +* +* RETURN VALUES +* NONE +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_set_port_info +* NAME +* osm_physp_set_port_info +* +* DESCRIPTION +* Copies the PortInfo attribute into the Physical Port object +* based on the PortState. +* +* SYNOPSIS +*/ +static inline void +osm_physp_set_port_info( + IN osm_physp_t* const p_physp, + IN const ib_port_info_t* const p_pi ) +{ + CL_ASSERT( p_pi ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + if (ib_port_info_get_port_state(p_pi) == IB_LINK_DOWN) + { + /* If PortState is down, only copy PortState */ + /* and PortPhysicalState per C14-24-2.1 */ + ib_port_info_set_port_state(&p_physp->port_info, IB_LINK_DOWN); + ib_port_info_set_port_phys_state( + ib_port_info_get_port_phys_state(p_pi), &p_physp->port_info); + } + else + { + p_physp->port_info = *p_pi; + } +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* p_pi +* [in] Pointer to the IBA defined PortInfo at this port number. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_trim_base_lid_to_valid_range +* NAME +* osm_physp_trim_base_lid_to_valid_range +* +* DESCRIPTION +* Validates the base LID in the Physical Port object +* and resets it if the base LID is invalid. +* +* SYNOPSIS +*/ +static inline ib_net16_t +osm_physp_trim_base_lid_to_valid_range( + IN osm_physp_t* const p_physp ) +{ + ib_net16_t orig_lid = 0; + + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + if ( ( cl_ntoh16( p_physp->port_info.base_lid ) > IB_LID_UCAST_END_HO ) || + ( cl_ntoh16( p_physp->port_info.base_lid ) < IB_LID_UCAST_START_HO ) ) + { + orig_lid = p_physp->port_info.base_lid; + p_physp->port_info.base_lid = 0; + } + return orig_lid; +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns 0 if the base LID in the Physical port object is valid. +* Returns original invalid LID otherwise. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_set_pkey_tbl +* NAME +* osm_physp_set_pkey_tbl +* +* DESCRIPTION +* Copies the P_Key table into the Physical Port object. +* +* SYNOPSIS +*/ +void +osm_physp_set_pkey_tbl( IN osm_log_t* p_log, + IN const osm_subn_t* p_subn, + IN osm_physp_t* const p_physp, + IN ib_pkey_table_t *p_pkey_tbl, + IN uint16_t block_num ); +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_subn +* [in] Pointer to the subnet data structure. +* +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* p_pkey_tbl +* [in] Pointer to the IBA defined P_Key table for this port +* number. +* +* block_num +* [in] The part of the P_Key table as defined in the IBA +* (valid values 0-2047, and is further limited by the +* partitionCap). +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_pkey_tbl +* NAME +* osm_physp_get_pkey_tbl +* +* DESCRIPTION +* Returns a pointer to the P_Key table object of the Physical Port object. +* +* SYNOPSIS +*/ +static inline const osm_pkey_tbl_t * +osm_physp_get_pkey_tbl( IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + /* + (14.2.5.7) - the block number valid values are 0-2047, and are further + limited by the size of the P_Key table specified by the PartitionCap on the node. + */ + return( &p_physp->pkeys ); +}; +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* The pointer to the P_Key table object. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_mod_pkey_tbl +* NAME +* osm_physp_get_mod_pkey_tbl +* +* DESCRIPTION +* Returns a NON CONST pointer to the P_Key table object of the Physical Port object. +* +* SYNOPSIS +*/ +static inline osm_pkey_tbl_t * +osm_physp_get_mod_pkey_tbl( IN osm_physp_t* const p_physp ) +{ + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + /* + (14.2.5.7) - the block number valid values are 0-2047, and are further + limited by the size of the P_Key table specified by the PartitionCap on the node. + */ + return( &p_physp->pkeys ); +}; +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* The pointer to the P_Key table object. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_set_slvl_tbl +* NAME +* osm_physp_set_slvl_tbl +* +* DESCRIPTION +* Copies the SLtoVL attribute into the Physical Port object. +* +* SYNOPSIS +*/ +static inline void +osm_physp_set_slvl_tbl( IN osm_physp_t* const p_physp, + IN ib_slvl_table_t *p_slvl_tbl, + IN uint8_t in_port_num ) +{ + ib_slvl_table_t *p_tbl; + + CL_ASSERT( p_slvl_tbl ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + p_tbl = cl_ptr_vector_get(&p_physp->slvl_by_port, in_port_num); + *p_tbl = *p_slvl_tbl; +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* p_slvl_tbl +* [in] Pointer to the IBA defined SLtoVL map table for this port number. +* +* in_port_num +* [in] Input Port Number for this SLtoVL. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_slvl_tbl +* NAME +* osm_physp_get_slvl_tbl +* +* DESCRIPTION +* Returns a pointer to the SLtoVL attribute of the Physical Port object. +* +* SYNOPSIS +*/ +static inline ib_slvl_table_t * +osm_physp_get_slvl_tbl( IN const osm_physp_t* const p_physp, + IN uint8_t in_port_num ) +{ + ib_slvl_table_t *p_tbl; + + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + p_tbl = cl_ptr_vector_get(&p_physp->slvl_by_port, in_port_num); + return(p_tbl); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* in_port_num +* [in] Input Port Number for this SLtoVL. +* +* RETURN VALUES +* The pointer to the slvl table +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_set_vla_tbl +* NAME +* osm_physp_set_vla_tbl +* +* DESCRIPTION +* Copies the VL Arbitration attribute into the Physical Port object. +* +* SYNOPSIS +*/ +static inline void +osm_physp_set_vla_tbl( IN osm_physp_t* const p_physp, + IN ib_vl_arb_table_t *p_vla_tbl, + IN uint8_t block_num ) +{ + CL_ASSERT( p_vla_tbl ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + CL_ASSERT( (1 <= block_num) && (block_num <= 4)); + p_physp->vl_arb[block_num - 1] = *p_vla_tbl; +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* p_vla_tbl +* [in] Pointer to the IBA defined VL Arbitration table for this port number. +* +* block_num +* [in] The part of the VL arbitration as defined in the IBA +* (valid values 1-4) +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_vla_tbl +* NAME +* osm_physp_get_vla_tbl +* +* DESCRIPTION +* Returns a pointer to the VL Arbitration table of the Physical Port object. +* +* SYNOPSIS +*/ +static inline ib_vl_arb_table_t * +osm_physp_get_vla_tbl( IN osm_physp_t* const p_physp, + IN uint8_t block_num ) +{ + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + CL_ASSERT( (1 <= block_num) && (block_num <= 4)); + return(& (p_physp->vl_arb[block_num - 1])); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* block_num +* [in] The part of the VL arbitration as defined in the IBA +* (valid values 1-4) +* +* RETURN VALUES +* The pointer to the VL Arbitration table +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_remote +* NAME +* osm_physp_get_remote +* +* DESCRIPTION +* Returns a pointer to the Physical Port on the other side the wire. +* +* SYNOPSIS +*/ +static inline osm_physp_t* +osm_physp_get_remote( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( p_physp->p_remote_physp ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns a pointer to the Physical Port on the other side of +* the wire. A return value of NULL means there is no link at this port. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_port_guid +* NAME +* osm_physp_get_port_guid +* +* DESCRIPTION +* Returns the port guid of this physical port. +* +* SYNOPSIS +*/ +static inline ib_net64_t +osm_physp_get_port_guid( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( p_physp->port_guid ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns the port guid of this physical port. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_subnet_prefix +* NAME +* osm_physp_get_subnet_prefix +* +* DESCRIPTION +* Returns the subnet prefix for this physical port. +* +* SYNOPSIS +*/ +static inline ib_net64_t +osm_physp_get_subnet_prefix( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( p_physp->port_info.subnet_prefix ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns the subnet prefix for this physical port. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_link_exists +* NAME +* osm_physp_link_exists +* +* DESCRIPTION +* Returns TRUE if the Physical Port has a link to the specified port. +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_physp_link_exists( + IN const osm_physp_t* const p_physp, + IN const osm_physp_t* const p_remote_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + CL_ASSERT( p_remote_physp ); + CL_ASSERT( osm_physp_is_valid( p_remote_physp ) ); + return( (p_physp->p_remote_physp == p_remote_physp ) && + (p_remote_physp->p_remote_physp == p_physp ) ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* p_remote_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns TRUE if the Physical Port has a link to another port. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_link +* NAME +* osm_physp_link +* +* DESCRIPTION +* Sets the pointers to the Physical Ports on the other side the wire. +* +* SYNOPSIS +*/ +static inline void +osm_physp_link( + IN osm_physp_t* const p_physp, + IN osm_physp_t* const p_remote_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( p_remote_physp ); + p_physp->p_remote_physp = p_remote_physp; + p_remote_physp->p_remote_physp = p_physp; +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object to link. +* +* p_remote_physp +* [in] Pointer to the adjacent osm_physp_t object to link. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_unlink +* NAME +* osm_physp_unlink +* +* DESCRIPTION +* Clears the pointers to the Physical Port on the other side the wire. +* +* SYNOPSIS +*/ +static inline void +osm_physp_unlink( + IN osm_physp_t* const p_physp, + IN osm_physp_t* const p_remote_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( p_remote_physp ); + CL_ASSERT( osm_physp_link_exists( p_physp, p_remote_physp ) ); + p_physp->p_remote_physp = NULL; + p_remote_physp->p_remote_physp = NULL; +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object to link. +* +* p_remote_physp +* [in] Pointer to the adjacent osm_physp_t object to link. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_has_any_link +* NAME +* osm_physp_has_any_link +* +* DESCRIPTION +* Returns TRUE if the Physical Port has a link to another port. +* FALSE otherwise. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_physp_has_any_link( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + if( osm_physp_is_valid( p_physp ) ) + return( p_physp->p_remote_physp != NULL ); + else + return( FALSE ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns TRUE if the Physical Port has a link to another port. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Port, Physical Port +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_port_num +* NAME +* osm_physp_get_port_num +* +* DESCRIPTION +* Returns the local port number of this Physical Port. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_physp_get_port_num( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( p_physp->port_num ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns the local port number of this Physical Port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_port_info_ptr +* NAME +* osm_physp_get_port_info_ptr +* +* DESCRIPTION +* Returns a pointer to the PortInfo attribute for this port. +* +* SYNOPSIS +*/ +static inline ib_port_info_t* +osm_physp_get_port_info_ptr( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( (ib_port_info_t*)&p_physp->port_info ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns a pointer to the PortInfo attribute for this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_node_ptr +* NAME +* osm_physp_get_node_ptr +* +* DESCRIPTION +* Returns a pointer to the parent Node object for this port. +* +* SYNOPSIS +*/ +static inline struct _osm_node* +osm_physp_get_node_ptr( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( (struct _osm_node*)p_physp->p_node ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns a pointer to the parent Node object for this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_port_state +* NAME +* osm_physp_get_port_state +* +* DESCRIPTION +* Returns the port state of this Physical Port. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_physp_get_port_state( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( ib_port_info_get_port_state( &p_physp->port_info )); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns the local port number of this Physical Port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_base_lid +* NAME +* osm_physp_get_base_lid +* +* DESCRIPTION +* Returns the base lid of this Physical Port. +* +* SYNOPSIS +*/ +static inline ib_net16_t +osm_physp_get_base_lid( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( p_physp->port_info.base_lid ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns the base lid of this Physical Port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_lmc +* NAME +* osm_physp_get_lmc +* +* DESCRIPTION +* Returns the LMC value of this Physical Port. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_physp_get_lmc( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( ib_port_info_get_lmc( &p_physp->port_info ) ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* Returns the LMC value of this Physical Port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Physical Port/osm_physp_get_dr_path_ptr +* NAME +* osm_physp_get_dr_path_ptr +* +* DESCRIPTION +* Returns a pointer to the directed route path for this port. +* +* SYNOPSIS +*/ +static inline osm_dr_path_t* +osm_physp_get_dr_path_ptr( + IN const osm_physp_t* const p_physp ) +{ + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( (osm_dr_path_t*)&p_physp->dr_path ); +} +/* +* PARAMETERS +* p_physp +* [in] Pointer to a Physical Port object. +* +* RETURN VALUES +* Returns a pointer to the directed route path for this port. +* +* NOTES +* +* SEE ALSO +* Physical Port object +*********/ + +/****h* OpenSM/Port +* NAME +* Port +* +* DESCRIPTION +* The Port object encapsulates the information needed by the +* OpenSM to manage ports. The OpenSM allocates one Port object +* per port in the IBA subnet. +* +* Each Port object is associated with a single port GUID. A Port object +* contains 1 or more Physical Port objects. An end point node has +* one Physical Port per Port. A switch node has more than +* one Physical Port per Port. +* +* The Port object is not thread safe, thus callers must provide +* serialization. +* +* These objects should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****d* OpenSM: Port/osm_port_lid_category_t +* NAME +* osm_port_lid_category_t +* +* DESCRIPTION +* Enumerated values for LID disposition. +* +* SYNOPSIS +*/ +typedef enum _osm_port_lid_category +{ + OSM_PORT_LID_ASSIGNED = 0, + OSM_PORT_LID_UNASSIGNED, + OSM_PORT_LID_CONFLICT, + OSM_PORT_LID_FOREIGN, + +} osm_port_lid_category_t; +/* +* FIELDS +* OSM_PORT_LID_ASSIGNED +* Indicates the Port has a known LID value. +* +* OSM_PORT_LID_UNASSIGNED +* Indicates the Port does not have a LID value. +* +* OSM_PORT_LID_CONFLICT +* Indicates the Port's LID conflicts with an assigned LID. +* +* OSM_PORT_LID_FOREIGN +* Indicates the Port has a LID value not currently known in +* in the OpenSM LID database. +* +* SEE ALSO +* Port +*********/ + +/****s* OpenSM: Port/osm_port_t +* NAME +* osm_port_t +* +* DESCRIPTION +* This object represents a logical port on a switch, router or end-point. +* +* The osm_port_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_port +{ + cl_map_item_t map_item; + struct _osm_node *p_node; + ib_net64_t guid; + uint32_t discovery_count; + uint8_t default_port_num; + uint8_t physp_tbl_size; + cl_qlist_t mcm_list; + osm_physp_t *tbl[1]; +} osm_port_t; +/* +* FIELDS +* map_item +* Linkage structure for cl_qmap. MUST BE FIRST MEMBER! +* +* p_node +* Points to the Node object that owns this port. +* +* guid +* Manufacturer assigned GUID for this port. +* +* discovery_count +* The number of times this port has been discovered +* during the current fabric sweep. This number is reset +* to zero at the start of a sweep. +* +* default_port_num +* Index of the physical port used when physical characteristics +* contained in the Physical Port are needed. +* +* physp_tbl_size +* Number of physical ports associated with this logical port. +* +* mcm_list +* Multicast member list +* +* tbl +* Array of pointers to Physical Port objects contained by this node. +* MUST BE LAST ELEMENT SINCE IT CAN GROW !!! +* +* SEE ALSO +* Port, Physical Port, Physical Port Table +*********/ + +/****f* OpenSM: Port/osm_port_construct +* NAME +* osm_port_construct +* +* DESCRIPTION +* This function constructs a Port object. +* +* SYNOPSIS +*/ +static inline void +osm_port_construct( + IN osm_port_t* const p_port ) +{ + memset( p_port, 0, sizeof(*p_port) ); + cl_qlist_init( &p_port->mcm_list ); +} +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_port_init, and osm_port_destroy. +* +* Calling osm_port_construct is a prerequisite to calling any other +* method except osm_port_init. +* +* SEE ALSO +* Port, osm_port_init, osm_port_destroy +*********/ + +/****f* OpenSM: Port/osm_port_destroy +* NAME +* osm_port_destroy +* +* DESCRIPTION +* This function destroys a Port object. +* +* SYNOPSIS +*/ +void +osm_port_destroy( + IN osm_port_t* const p_port ); +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Port object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_port_construct +* or osm_port_init. +* +* SEE ALSO +* Port, osm_port_init, osm_port_destroy +*********/ + +/****f* OpenSM: Port/osm_port_delete +* NAME +* osm_port_delete +* +* DESCRIPTION +* This function destroys and deallocates a Port object. +* +* SYNOPSIS +*/ +inline static void +osm_port_delete( + IN OUT osm_port_t** const pp_port ) +{ + osm_port_destroy( *pp_port ); + free( *pp_port ); + *pp_port = NULL; +} +/* +* PARAMETERS +* pp_port +* [in][out] Pointer to a pointer to a Port object to delete. +* On return, this pointer is NULL. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Port object. +* +* SEE ALSO +* Port, osm_port_init, osm_port_destroy +*********/ + +/****f* OpenSM: Port/osm_port_init +* NAME +* osm_port_init +* +* DESCRIPTION +* This function initializes a Port object. +* +* SYNOPSIS +*/ +void +osm_port_init( + IN osm_port_t* const p_port, + IN const ib_node_info_t* p_ni, + IN const struct _osm_node* const p_parent_node ); +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object to initialize. +* +* p_ni +* [in] Pointer to the NodeInfo attribute relavent for this port. +* +* p_parent_node +* [in] Pointer to the initialized parent osm_node_t object +* that owns this port. +* +* RETURN VALUE +* None. +* +* NOTES +* Allows calling other port methods. +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_new +* NAME +* osm_port_new +* +* DESCRIPTION +* This function allocates and initializes a Port object. +* +* SYNOPSIS +*/ +osm_port_t* +osm_port_new( + IN const ib_node_info_t* p_ni, + IN const struct _osm_node* const p_parent_node ); +/* +* PARAMETERS +* p_ni +* [in] Pointer to the NodeInfo attribute relavent for this port. +* +* p_parent_node +* [in] Pointer to the initialized parent osm_node_t object +* that owns this port. +* +* RETURN VALUE +* Pointer to the initialize Port object. +* +* NOTES +* Allows calling other port methods. +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_get_base_lid +* NAME +* osm_port_get_base_lid +* +* DESCRIPTION +* Gets the base LID of a port. +* +* SYNOPSIS +*/ +static inline ib_net16_t +osm_port_get_base_lid( + IN const osm_port_t* const p_port ) +{ + const osm_physp_t* const p_physp = p_port->tbl[p_port->default_port_num]; + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( osm_physp_get_base_lid( p_physp )); +} +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object. +* +* RETURN VALUE +* Base LID of the port. +* If the return value is 0, then this port has no assigned LID. +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_get_lmc +* NAME +* osm_port_get_lmc +* +* DESCRIPTION +* Gets the LMC value of a port. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_port_get_lmc( + IN const osm_port_t* const p_port ) +{ + const osm_physp_t* const p_physp = p_port->tbl[p_port->default_port_num]; + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + return( osm_physp_get_lmc( p_physp )); +} +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object. +* +* RETURN VALUE +* Gets the LMC value of a port. +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_get_guid +* NAME +* osm_port_get_guid +* +* DESCRIPTION +* Gets the GUID of a port. +* +* SYNOPSIS +*/ +static inline ib_net64_t +osm_port_get_guid( + IN const osm_port_t* const p_port ) +{ + return( p_port->guid ); +} +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object. +* +* RETURN VALUE +* Manufacturer assigned GUID of the port. +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_get_num_physp +* NAME +* osm_port_get_num_physp +* +* DESCRIPTION +* Returns the number of Physical Port objects associated with this port. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_port_get_num_physp( + IN const osm_port_t* const p_port ) +{ + return( p_port->physp_tbl_size ); +} +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object. +* +* RETURN VALUE +* Returns the number of Physical Port objects associated with this port. +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_get_phys_ptr +* NAME +* osm_port_get_phys_ptr +* +* DESCRIPTION +* Gets the pointer to the specified Physical Port object. +* +* SYNOPSIS +*/ +static inline osm_physp_t* +osm_port_get_phys_ptr( + IN const osm_port_t* const p_port, + IN const uint8_t port_num ) +{ + CL_ASSERT( port_num < p_port->physp_tbl_size ); + return( p_port->tbl[port_num] ); +} +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object. +* +* port_num +* [in] Number of physical port for which to return the +* osm_physp_t object. If this port is on an HCA, then +* this value is ignored. +* +* RETURN VALUE +* Pointer to the Physical Port object. +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_get_default_phys_ptr +* NAME +* osm_port_get_default_phys_ptr +* +* DESCRIPTION +* Gets the pointer to the default Physical Port object. +* This call should only be used for non-switch ports in which there +* is a one-for-one mapping of port to physp. +* +* SYNOPSIS +*/ +static inline +osm_physp_t* +osm_port_get_default_phys_ptr( + IN const osm_port_t* const p_port ) +{ + CL_ASSERT( p_port->tbl[p_port->default_port_num] ); + CL_ASSERT( osm_physp_is_valid( p_port->tbl[p_port->default_port_num] ) ); + return( p_port->tbl[p_port->default_port_num] ); +} +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object. +* +* RETURN VALUE +* Pointer to the Physical Port object. +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_get_parent_node +* NAME +* osm_port_get_parent_node +* +* DESCRIPTION +* Gets the pointer to the this port's Node object. +* +* SYNOPSIS +*/ +static inline struct _osm_node* +osm_port_get_parent_node( + IN const osm_port_t* const p_port ) +{ + return( p_port->p_node ); +} +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object. +* +* port_num +* [in] Number of physical port for which to return the +* osm_physp_t object. +* +* RETURN VALUE +* Pointer to the Physical Port object. +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_get_lid_range_ho +* NAME +* osm_port_get_lid_range_ho +* +* DESCRIPTION +* Returns the HOST ORDER lid min and max values for this port, +* based on the lmc value. +* +* SYNOPSIS +*/ +void +osm_port_get_lid_range_ho( + IN const osm_port_t* const p_port, + OUT uint16_t* const p_min_lid, + OUT uint16_t* const p_max_lid ); +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object. +* +* p_min_lid +* [out] Pointer to the minimum LID value occupied by this port. +* +* p_max_lid +* [out] Pointer to the maximum LID value occupied by this port. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_get_port_by_base_lid +* NAME +* osm_get_port_by_base_lid +* +* DESCRIPTION +* Returns a status on whether a Port was able to be +* determined based on the LID supplied and if so, return the Port. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_get_port_by_base_lid( + IN const osm_subn_t* const p_subn, + IN const ib_net16_t lid, + IN OUT const osm_port_t** const pp_port ); +/* +* PARAMETERS +* p_subn +* [in] Pointer to the subnet data structure. +* +* lid +* [in] LID requested. +* +* pp_port +* [in][out] Pointer to pointer to Port object. +* +* RETURN VALUES +* IB_SUCCESS +* IB_NOT_FOUND +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_add_new_physp +* NAME +* osm_port_add_new_physp +* +* DESCRIPTION +* Adds a new physical port to the logical collection owned by the Port. +* Physical Ports added here must share the same GUID as the Port. +* +* SYNOPSIS +*/ +void +osm_port_add_new_physp( + IN osm_port_t* const p_port, + IN const uint8_t port_num ); +/* +* PARAMETERS +* p_port +* [in] Pointer to a Port object. +* +* port_num +* [in] Port number to add. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +* Port +*********/ + +/****f* OpenSM: Port/osm_port_discovery_count_reset +* NAME +* osm_port_discovery_count_reset +* +* DESCRIPTION +* Resets the discovery count for this Port to zero. +* This operation should be performed at the start of a sweep. +* +* SYNOPSIS +*/ +static inline void +osm_port_discovery_count_reset( + IN osm_port_t* const p_port ) +{ + p_port->discovery_count = 0; +} +/* +* PARAMETERS +* p_port +* [in] Pointer to an osm_port_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Port object +*********/ + +/****f* OpenSM: Port/osm_port_discovery_count_get +* NAME +* osm_port_discovery_count_get +* +* DESCRIPTION +* Returns the number of times this port has been discovered +* since the last time the discovery count was reset. +* +* SYNOPSIS +*/ +static inline uint32_t +osm_port_discovery_count_get( + IN const osm_port_t* const p_port ) +{ + return( p_port->discovery_count ); +} +/* +* PARAMETERS +* p_port +* [in] Pointer to an osm_port_t object. +* +* RETURN VALUES +* Returns the number of times this port has been discovered +* since the last time the discovery count was reset. +* +* NOTES +* +* SEE ALSO +* Port object +*********/ + +/****f* OpenSM: Port/osm_port_discovery_count_inc +* NAME +* osm_port_discovery_count_inc +* +* DESCRIPTION +* Increments the discovery count for this Port. +* +* SYNOPSIS +*/ +static inline void +osm_port_discovery_count_inc( + IN osm_port_t* const p_port ) +{ + p_port->discovery_count++; +} +/* +* PARAMETERS +* p_port +* [in] Pointer to an osm_port_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Port object +*********/ + +/****f* OpenSM: Port/osm_port_add_mgrp +* NAME +* osm_port_add_mgrp +* +* DESCRIPTION +* Logically connects a port to a multicast group. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_port_add_mgrp( + IN osm_port_t* const p_port, + IN const ib_net16_t mlid ); +/* +* PARAMETERS +* p_port +* [in] Pointer to an osm_port_t object. +* +* mlid +* [in] MLID of the multicast group. +* +* RETURN VALUES +* IB_SUCCESS +* IB_INSUFFICIENT_MEMORY +* +* NOTES +* +* SEE ALSO +* Port object +*********/ + +/****f* OpenSM: Port/osm_port_remove_mgrp +* NAME +* osm_port_remove_mgrp +* +* DESCRIPTION +* Logically disconnects a port from a multicast group. +* +* SYNOPSIS +*/ +void +osm_port_remove_mgrp( + IN osm_port_t* const p_port, + IN const ib_net16_t mlid ); +/* +* PARAMETERS +* p_port +* [in] Pointer to an osm_port_t object. +* +* mlid +* [in] MLID of the multicast group. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Port object +*********/ + +/****f* OpenSM: Port/osm_port_remove_all_mgrp +* NAME +* osm_port_remove_all_mgrp +* +* DESCRIPTION +* Logically disconnects a port from all its multicast groups. +* +* SYNOPSIS +*/ +void +osm_port_remove_all_mgrp( + IN osm_port_t* const p_port ); +/* +* PARAMETERS +* p_port +* [in] Pointer to an osm_port_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Port object +*********/ + +/****f* OpenSM: Physical Port/osm_physp_calc_link_mtu +* NAME +* osm_physp_calc_link_mtu +* +* DESCRIPTION +* Calculate the Port MTU based on current and remote +* physical ports MTU CAP values. +* +* SYNOPSIS +*/ +uint8_t +osm_physp_calc_link_mtu( + IN osm_log_t* p_log, + IN const osm_physp_t* p_physp ); +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* The MTU of the link to be used. +* +* NOTES +* +* SEE ALSO +* PhysPort object +*********/ + +/****f* OpenSM: Physical Port/osm_physp_calc_link_op_vls +* NAME +* osm_physp_calc_link_op_vls +* +* DESCRIPTION +* Calculate the Port OP_VLS based on current and remote +* physical ports VL CAP values. Allowing user option for a max limit. +* +* SYNOPSIS +*/ +uint8_t +osm_physp_calc_link_op_vls( + IN osm_log_t* p_log, + IN const osm_subn_t * p_subn, + IN const osm_physp_t* p_physp ); +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_subn +* [in] Pointer to the subnet object for accessing of the options. +* +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* RETURN VALUES +* The OP_VLS of the link to be used. +* +* NOTES +* +* SEE ALSO +* PhysPort object +*********/ + +/****f* OpenSM: Physical Port/osm_physp_replace_dr_path_with_alternate_dr_path +* NAME +* osm_physp_replace_dr_path_with_alternate_dr_path +* +* DESCRIPTION +* Replace the direct route path for the given phys port with an +* alternate path going through forien set of phys port. +* +* SYNOPSIS +*/void +osm_physp_replace_dr_path_with_alternate_dr_path( + IN osm_log_t *p_log, + IN osm_subn_t const *p_subn, + IN osm_physp_t const *p_physp, + IN osm_bind_handle_t *h_bind ); +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_subn +* [in] Pointer to the subnet object for accessing of the options. +* +* p_physp +* [in] Pointer to an osm_physp_t object. +* +* h_bind +* [in] Pointer to osm_bind_handle_t object. +* +* RETURN VALUES +* NONE +* +* NOTES +* +* SEE ALSO +* PhysPort object +*********/ + +END_C_DECLS + +#endif /* _OSM_PORT_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_port_info_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_port_info_rcv.h new file mode 100644 index 00000000..67b2a164 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_port_info_rcv.h @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_pi_rcv_t. + * This object represents the PortInfo Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PI_RCV_H_ +#define _OSM_PI_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Port Info Receiver +* NAME +* Port Info Receiver +* +* DESCRIPTION +* The Port Info Receiver object encapsulates the information +* needed to receive the PortInfo attribute from a node. +* +* The Port Info Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Port Info Receiver/osm_pi_rcv_t +* NAME +* osm_pi_rcv_t +* +* DESCRIPTION +* Port Info Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pi_rcv +{ + osm_subn_t *p_subn; + osm_req_t *p_req; + osm_log_t *p_log; + osm_state_mgr_t *p_state_mgr; + cl_plock_t *p_lock; +} osm_pi_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_req +* Pointer to the generic attribute request object. +* +* p_log +* Pointer to the log object. +* +* p_state_mgr +* Pointer to the State Manager object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* Port Info Receiver object +*********/ + +/****f* OpenSM: Port Info Receiver/osm_pi_rcv_construct +* NAME +* osm_pi_rcv_construct +* +* DESCRIPTION +* This function constructs a Port Info Receiver object. +* +* SYNOPSIS +*/ +void osm_pi_rcv_construct( + IN osm_pi_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Port Info Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pi_rcv_destroy +* +* Calling osm_pi_rcv_construct is a prerequisite to calling any other +* method except osm_pi_rcv_init. +* +* SEE ALSO +* Port Info Receiver object, osm_pi_rcv_init, +* osm_pi_rcv_destroy +*********/ + +/****f* OpenSM: Port Info Receiver/osm_pi_rcv_destroy +* NAME +* osm_pi_rcv_destroy +* +* DESCRIPTION +* The osm_pi_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_pi_rcv_destroy( + IN osm_pi_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Port Info Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pi_rcv_construct or osm_pi_rcv_init. +* +* SEE ALSO +* Port Info Receiver object, osm_pi_rcv_construct, +* osm_pi_rcv_init +*********/ + +/****f* OpenSM: Port Info Receiver/osm_pi_rcv_init +* NAME +* osm_pi_rcv_init +* +* DESCRIPTION +* The osm_pi_rcv_init function initializes a +* Port Info Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pi_rcv_init( + IN osm_pi_rcv_t* const p_ctrl, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pi_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_state_mgr +* [in] Pointer to the state manager object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the Port Info Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Port Info Receiver methods. +* +* SEE ALSO +* Port Info Receiver object, osm_pi_rcv_construct, +* osm_pi_rcv_destroy +*********/ + +/****f* OpenSM: Port Info Receiver/osm_pi_rcv_process +* NAME +* osm_pi_rcv_process +* +* DESCRIPTION +* Process the PortInfo attribute. +* +* SYNOPSIS +*/ +void osm_pi_rcv_process( + IN const osm_pi_rcv_t* const p_ctrl, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pi_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's PortInfo attribute. +* +* RETURN VALUES +* None. +* +* NOTES +* This function processes a PortInfo attribute. +* +* SEE ALSO +* Port Info Receiver, Port Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_PI_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_port_info_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_port_info_rcv_ctrl.h new file mode 100644 index 00000000..47bf31ea --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_port_info_rcv_ctrl.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_pi_rcv_ctrl_t. + * This object represents a controller that receives the IBA NodeInfo + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PI_RCV_CTRL_H_ +#define _OSM_PI_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Port Info Receive Controller +* NAME +* Port Info Receive Controller +* +* DESCRIPTION +* The Port Info Receive Controller object encapsulates +* the information needed to receive the NodeInfo attribute from a node. +* +* The Port Info Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Port Info Receive Controller/osm_pi_rcv_ctrl_t +* NAME +* osm_pi_rcv_ctrl_t +* +* DESCRIPTION +* Port Info Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pi_rcv_ctrl +{ + osm_pi_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_pi_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Port Info Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Port Info Receive Controller object +* Port Info Receiver object +*********/ + +/****f* OpenSM: Port Info Receive Controller/osm_pi_rcv_ctrl_construct +* NAME +* osm_pi_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Port Info Receive Controller object. +* +* SYNOPSIS +*/ +void osm_pi_rcv_ctrl_construct( + IN osm_pi_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Port Info Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pi_rcv_ctrl_init, osm_pi_rcv_ctrl_destroy, +* and osm_pi_rcv_ctrl_is_inited. +* +* Calling osm_pi_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_pi_rcv_ctrl_init. +* +* SEE ALSO +* Port Info Receive Controller object, osm_pi_rcv_ctrl_init, +* osm_pi_rcv_ctrl_destroy, osm_pi_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Port Info Receive Controller/osm_pi_rcv_ctrl_destroy +* NAME +* osm_pi_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_pi_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_pi_rcv_ctrl_destroy( + IN osm_pi_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Port Info Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pi_rcv_ctrl_construct or osm_pi_rcv_ctrl_init. +* +* SEE ALSO +* Port Info Receive Controller object, osm_pi_rcv_ctrl_construct, +* osm_pi_rcv_ctrl_init +*********/ + +/****f* OpenSM: Port Info Receive Controller/osm_pi_rcv_ctrl_init +* NAME +* osm_pi_rcv_ctrl_init +* +* DESCRIPTION +* The osm_pi_rcv_ctrl_init function initializes a +* Port Info Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pi_rcv_ctrl_init( + IN osm_pi_rcv_ctrl_t* const p_ctrl, + IN osm_pi_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pi_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_pi_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Port Info Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Port Info Receive Controller methods. +* +* SEE ALSO +* Port Info Receive Controller object, osm_pi_rcv_ctrl_construct, +* osm_pi_rcv_ctrl_destroy, osm_pi_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Port Info Receive Controller/osm_pi_rcv_ctrl_is_inited +* NAME +* osm_pi_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_pi_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_pi_rcv_ctrl_is_inited( + IN const osm_pi_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pi_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_pi_rcv_ctrl_construct or osm_pi_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Port Info Receive Controller object, osm_pi_rcv_ctrl_construct, +* osm_pi_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_PI_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_port_profile.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_port_profile.h new file mode 100644 index 00000000..cf55b094 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_port_profile.h @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of Switch/osm_port_profile_t. + * This object represents a port profile for an IBA switch. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#ifndef _OSM_PORT_PROFILE_H_ +#define _OSM_PORT_PROFILE_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Port Profile +* NAME +* Port Profile +* +* DESCRIPTION +* The Port Profile object contains profiling information for +* each Physical Port on a switch. The profile information +* may be used to optimize path selection. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Switch/osm_port_profile_t +* NAME +* osm_port_profile_t +* +* DESCRIPTION +* The Port Profile object contains profiling information for +* each Physical Port on the switch. The profile information +* may be used to optimize path selection. +* +* This object should be treated as opaque and should be +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_port_profile +{ + uint32_t num_paths; +} osm_port_profile_t; +/* +* FIELDS +* num_paths +* The number of paths using this port. +* +* SEE ALSO +*********/ + +/****f* OpenSM: Port Profile/osm_port_prof_construct +* NAME +* osm_port_prof_construct +* +* DESCRIPTION +* +* +* SYNOPSIS +*/ +static inline void +osm_port_prof_construct( + IN osm_port_profile_t* const p_prof ) +{ + CL_ASSERT( p_prof ); + memset( p_prof, 0, sizeof(*p_prof) ); +} +/* +* PARAMETERS +* p_prof +* [in] Pointer to the Port Profile object to construct. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Port Profile/osm_port_prof_path_count_inc +* NAME +* osm_port_prof_path_count_inc +* +* DESCRIPTION +* Increments the count of the number of paths going through this port. +* +* +* SYNOPSIS +*/ +static inline void +osm_port_prof_path_count_inc( + IN osm_port_profile_t* const p_prof ) +{ + CL_ASSERT( p_prof ); + p_prof->num_paths++; +} +/* +* PARAMETERS +* p_pro +* [in] Pointer to the Port Profile object. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Port Profile/osm_port_prof_path_count_get +* NAME +* osm_port_prof_path_count_get +* +* DESCRIPTION +* Returns the count of the number of paths going through this port. +* +* SYNOPSIS +*/ +static inline uint32_t +osm_port_prof_path_count_get( + IN const osm_port_profile_t* const p_prof ) +{ + return( p_prof->num_paths ); +} +/* +* PARAMETERS +* p_pro +* [in] Pointer to the Port Profile object. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Port Profile Opt/osm_port_prof_is_ignored_port +* NAME +* osm_port_prof_is_ignored_port +* +* DESCRIPTION +* Check to see if this port is to be ignored in path counting. +* This is done by examining the optional list of port_prof_ignore_guids. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_port_prof_is_ignored_port( + IN const osm_subn_t *p_subn, + IN uint64_t port_guid, + IN uint8_t port_num ) +{ + const cl_map_t *p_map = &(p_subn->opt.port_prof_ignore_guids); + const void *p_obj = cl_map_get(p_map, port_guid); + size_t res; + + // HACK: we currently support ignoring ports 0 - 31 + if (p_obj != NULL) { + res = (size_t)p_obj & (size_t)(1 << port_num); + return (res != 0); + } + return FALSE; +} +/* +* PARAMETERS +* p_subn +* [in] Pointer to the OSM Subnet object. +* +* port_guid +* [in] The port guid +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Port Profile Opt/osm_port_prof_set_ignored_port +* NAME +* osm_port_prof_set_ignored_port +* +* DESCRIPTION +* Set the ignored property of the port. +* +* SYNOPSIS +*/ +static inline void +osm_port_prof_set_ignored_port( + IN osm_subn_t *p_subn, + IN uint64_t port_guid, + IN uint8_t port_num ) +{ + cl_map_t *p_map = &(p_subn->opt.port_prof_ignore_guids); + const void *p_obj = cl_map_get(p_map, port_guid); + size_t value = 0; + + // HACK: we currently support ignoring ports 0 - 31 + CL_ASSERT(port_num < 32); + + if (p_obj != NULL) { + value = (size_t)p_obj; + } + + value = value | (1 << port_num); + cl_map_insert(&(p_subn->opt.port_prof_ignore_guids), + port_guid, + (void *)value); +} +/* +* PARAMETERS +* p_subn +* [in] Pointer to the OSM Subnet object. +* +* port_guid +* [in] The port guid +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_PORT_PROFILE_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_rand_fwd_tbl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_rand_fwd_tbl.h new file mode 100644 index 00000000..bc362adc --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_rand_fwd_tbl.h @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_switch_t. + * This object represents an IBA switch. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_RAND_FWD_TBL_H_ +#define _OSM_RAND_FWD_TBL_H_ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Random Forwarding Table +* NAME +* Random Forwarding Table +* +* DESCRIPTION +* The Random Forwarding Table objects encapsulate the information +* needed by the OpenSM to manage random forwarding tables. The OpenSM +* allocates one Random Forwarding Table object per switch in the +* IBA subnet, if that switch uses a random forwarding table. +* +* The Random Forwarding Table objects are not thread safe, thus +* callers must provide serialization. +* +* ** RANDOM FORWARDING TABLES ARE NOT SUPPORTED IN THE CURRENT VERSION ** +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Forwarding Table/osm_rand_fwd_tbl_t +* NAME +* osm_rand_fwd_tbl_t +* +* DESCRIPTION +* Random Forwarding Table structure. +* +* THIS OBJECT IS PLACE HOLDER. SUPPORT FOR SWITCHES WITH +* RANDOM FORWARDING TABLES HAS NOT BEEN IMPLEMENTED YET. +* +* SYNOPSIS +*/ +typedef struct _osm_rand_fwd_tbl +{ + /* PLACE HOLDER STRUCTURE ONLY!! */ + uint32_t size; +} osm_rand_fwd_tbl_t; +/* +* FIELDS +* RANDOM FORWARDING TABLES ARE NOT SUPPORTED YET!! +* +* SEE ALSO +* Forwarding Table object, Random Forwarding Table object. +*********/ + +/****f* OpenSM: Forwarding Table/osm_rand_tbl_delete +* NAME +* osm_rand_tbl_delete +* +* DESCRIPTION +* This destroys and deallocates a Random Forwarding Table object. +* +* SYNOPSIS +*/ +static inline void +osm_rand_tbl_delete( + IN osm_rand_fwd_tbl_t** const pp_tbl ) +{ + /* + TO DO - This is a place holder function only! + */ + free( *pp_tbl ); + *pp_tbl = NULL; +} +/* +* PARAMETERS +* pp_tbl +* [in] Pointer a Pointer to the Random Forwarding Table object. +* +* RETURN VALUE +* On success, returns a pointer to a new Linear Forwarding Table object +* of the specified size. +* NULL otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_rand_fwd_tbl_set +* NAME +* osm_rand_fwd_tbl_set +* +* DESCRIPTION +* Sets the port to route the specified LID. +* +* SYNOPSIS +*/ +static inline void +osm_rand_fwd_tbl_set( + IN osm_rand_fwd_tbl_t* const p_tbl, + IN const uint16_t lid_ho, + IN const uint8_t port ) +{ + /* Random forwarding tables not supported yet. */ + UNUSED_PARAM( p_tbl ); + UNUSED_PARAM( lid_ho ); + UNUSED_PARAM( port ); + CL_ASSERT( FALSE ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Random Forwarding Table object. +* +* lid_ho +* [in] LID value (host order) for which to set the route. +* +* port +* [in] Port to route the specified LID value. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_rand_fwd_tbl_set_block +* NAME +* osm_rand_fwd_tbl_set_block +* +* DESCRIPTION +* Copies the specified block into the Random Forwarding Table. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +osm_rand_fwd_tbl_set_block( + IN osm_rand_fwd_tbl_t* const p_tbl, + IN const uint8_t* const p_block, + IN const uint32_t block_num ) +{ + /* Random forwarding tables not supported yet. */ + UNUSED_PARAM( p_tbl ); + UNUSED_PARAM( p_block ); + UNUSED_PARAM( block_num ); + CL_ASSERT( FALSE ); + return( IB_ERROR ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Random Forwarding Table object. +* +* p_block +* [in] Pointer to the Forwarding Table block. +* +* block_num +* [in] Block number of this block. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_rand_fwd_tbl_get +* NAME +* osm_rand_fwd_tbl_get +* +* DESCRIPTION +* Returns the port that routes the specified LID. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_rand_fwd_tbl_get( + IN const osm_rand_fwd_tbl_t* const p_tbl, + IN const uint16_t lid_ho ) +{ + CL_ASSERT( FALSE ); + UNUSED_PARAM( p_tbl ); + UNUSED_PARAM( lid_ho ); + + return( OSM_NO_PATH ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Linear Forwarding Table object. +* +* lid_ho +* [in] LID value (host order) for which to get the route. +* +* RETURN VALUE +* Returns the port that routes the specified LID. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_rand_fwd_tbl_get_lids_per_block +* NAME +* osm_rand_fwd_tbl_get_lids_per_block +* +* DESCRIPTION +* Returns the number of LIDs per LID block. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_rand_fwd_tbl_get_lids_per_block( + IN const osm_rand_fwd_tbl_t* const p_tbl ) +{ + UNUSED_PARAM( p_tbl ); + return( 16 ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* Returns the number of LIDs per LID block. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_rand_fwd_tbl_get_max_block_id_in_use +* NAME +* osm_rand_fwd_tbl_get_max_block_id_in_use +* +* DESCRIPTION +* Returns the maximum block ID in actual use by the forwarding table. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_rand_fwd_tbl_get_max_block_id_in_use( + IN const osm_rand_fwd_tbl_t* const p_tbl, + IN const uint16_t lid_top_ho ) +{ + UNUSED_PARAM( p_tbl ); + UNUSED_PARAM( lid_top_ho ); + CL_ASSERT( FALSE ); + return( 0 ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* Returns the maximum block ID in actual use by the forwarding table. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Forwarding Table/osm_rand_fwd_tbl_get_size +* NAME +* osm_rand_fwd_tbl_get_size +* +* DESCRIPTION +* Returns the number of entries available in the forwarding table. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_rand_fwd_tbl_get_size( + IN const osm_rand_fwd_tbl_t* const p_tbl ) +{ + UNUSED_PARAM( p_tbl ); + CL_ASSERT( FALSE ); + return( 0 ); +} +/* +* PARAMETERS +* p_tbl +* [in] Pointer to the Forwarding Table object. +* +* RETURN VALUE +* Returns the number of entries available in the forwarding table. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_RAND_FWD_TBL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_remote_sm.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_remote_sm.h new file mode 100644 index 00000000..da4b6f71 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_remote_sm.h @@ -0,0 +1,212 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sm_t, osm_remote_sm_t. + * This object represents an IBA subnet. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_REMOTE_SM_H_ +#define _OSM_REMOTE_SM_H_ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Remote SM +* NAME +* Remote SM +* +* DESCRIPTION +* The Remote SM object encapsulates the information tracked for +* other SM ports on the subnet. +* +* The Remote SM object is thread safe. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Remote SM/osm_remote_sm_t +* NAME +* osm_remote_sm_t +* +* DESCRIPTION +* Remote Subnet Manager structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_remote_sm +{ + cl_map_item_t map_item; + const osm_port_t *p_port; + ib_sm_info_t smi; +} osm_remote_sm_t; +/* +* FIELDS +* map_item +* Linkage for the cl_qmap container. MUST BE FIRST ELEMENT!! +* p_port +* Pointer to the port object for this SM. +* +* smi +* The SMInfo attribute for this SM. +* +* SEE ALSO +*********/ + +/****f* OpenSM: SM/osm_remote_sm_construct +* NAME +* osm_remote_sm_construct +* +* DESCRIPTION +* This function constructs an Remote SM object. +* +* SYNOPSIS +*/ +void +osm_remote_sm_construct( + IN osm_remote_sm_t* const p_sm ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to an Remote SM object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_remote_sm_init, osm_remote_sm_destroy +* +* Calling osm_remote_sm_construct is a prerequisite to calling any other +* method except osm_remote_sm_init. +* +* SEE ALSO +* SM object, osm_remote_sm_init, osm_remote_sm_destroy +*********/ + +/****f* OpenSM: SM/osm_remote_sm_destroy +* NAME +* osm_remote_sm_destroy +* +* DESCRIPTION +* The osm_remote_sm_destroy function destroys an SM, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_remote_sm_destroy( + IN osm_remote_sm_t* const p_sm ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to an Remote SM object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Remote SM object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_remote_sm_construct or osm_remote_sm_init. +* +* SEE ALSO +* Remote SM object, osm_remote_sm_construct, osm_remote_sm_init +*********/ + +/****f* OpenSM: SM/osm_remote_sm_init +* NAME +* osm_remote_sm_init +* +* DESCRIPTION +* The osm_remote_sm_init function initializes an Remote SM object for use. +* +* SYNOPSIS +*/ +void +osm_remote_sm_init( + IN osm_remote_sm_t* const p_sm, + IN const osm_port_t* const p_port, + IN const ib_sm_info_t* const p_smi ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to an osm_remote_sm_t object to initialize. +* +* p_port +* [in] Pointer to the Remote SM's port object. +* +* p_smi +* [in] Pointer to the SMInfo attribute for this SM. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* Allows calling other Remote SM methods. +* +* SEE ALSO +* Remote SM object, osm_remote_sm_construct, osm_remote_sm_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_REMOTE_SM_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_req.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_req.h new file mode 100644 index 00000000..aed298c1 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_req.h @@ -0,0 +1,354 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_req_t. + * This object represents an object that genericly requests + * attributes from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_REQ_H_ +#define _OSM_REQ_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Generic Requester +* NAME +* Generic Requester +* +* DESCRIPTION +* The Generic Requester object encapsulates the information +* needed to request an attribute from a node. +* +* The Generic Requester object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Generic Requester/osm_req_t +* NAME +* osm_req_t +* +* DESCRIPTION +* Generic Requester structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_req +{ + osm_mad_pool_t *p_pool; + osm_vl15_t *p_vl15; + osm_log_t *p_log; + osm_subn_t *p_subn; + atomic32_t *p_sm_trans_id; + +} osm_req_t; +/* +* FIELDS +* p_pool +* Pointer to the MAD pool. +* +* p_vl15 +* Pointer to the VL15 interface. +* +* p_log +* Pointer to the log object. +* +* p_subn +* Pointer to the subnet object. +* +* SEE ALSO +* Generic Requester object +*********/ + +/****f* OpenSM: Generic Requester/osm_req_construct +* NAME +* osm_req_construct +* +* DESCRIPTION +* This function constructs a Generic Requester object. +* +* SYNOPSIS +*/ +void +osm_req_construct( + IN osm_req_t* const p_req ); +/* +* PARAMETERS +* p_req +* [in] Pointer to a Generic Requester object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_req_init, and osm_req_destroy. +* +* Calling osm_req_construct is a prerequisite to calling any other +* method except osm_req_init. +* +* SEE ALSO +* Generic Requester object, osm_req_init, +* osm_req_destroy +*********/ + +/****f* OpenSM: Generic Requester/osm_req_destroy +* NAME +* osm_req_destroy +* +* DESCRIPTION +* The osm_req_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_req_destroy( + IN osm_req_t* const p_req ); +/* +* PARAMETERS +* p_req +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Generic Requester object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_req_construct or osm_req_init. +* +* SEE ALSO +* Generic Requester object, osm_req_construct, +* osm_req_init +*********/ + +/****f* OpenSM: Generic Requester/osm_req_init +* NAME +* osm_req_init +* +* DESCRIPTION +* The osm_req_init function initializes a +* Generic Requester object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_req_init( + IN osm_req_t* const p_req, + IN osm_mad_pool_t* const p_pool, + IN osm_vl15_t* const p_vl15, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN atomic32_t* const p_sm_trans_id ); +/* +* PARAMETERS +* p_req +* [in] Pointer to an osm_req_t object to initialize. +* +* p_mad_pool +* [in] Pointer to the MAD pool. +* +* p_vl15 +* [in] Pointer to the VL15 interface. +* +* p_subn +* [in] Pointer to the subnet object. +* +* p_log +* [in] Pointer to the log object. +* +* p_sm_trans_id +* [in] Pointer to the atomic SM transaction ID. +* +* RETURN VALUES +* IB_SUCCESS if the Generic Requester object was initialized +* successfully. +* +* NOTES +* Allows calling other Generic Requester methods. +* +* SEE ALSO +* Generic Requester object, osm_req_construct, +* osm_req_destroy +*********/ + +/****f* OpenSM: Generic Requester/osm_req_get +* NAME +* osm_req_get +* +* DESCRIPTION +* Starts the process to transmit a directed route request for +* the attribute. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_req_get( + IN const osm_req_t* const p_req, + IN const osm_dr_path_t* const p_path, + IN const uint16_t attr_id, + IN const uint32_t attr_mod, + IN const cl_disp_msgid_t err_msg, + IN const osm_madw_context_t* const p_context ); +/* +* PARAMETERS +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_path +* [in] Pointer to the directed route path to the node +* from which to retrieve the attribute. +* +* attr_id +* [in] Attribute ID to request. +* +* attr_mod +* [in] Attribute modifier for this request. +* +* err_msg +* [in] Message id with which to post this MAD if an error occurs. +* +* p_context +* [in] Mad wrapper context structure to be copied into the wrapper +* context, and thus visible to the recipient of the response. +* +* RETURN VALUES +* IB_SUCCESS if the request was successful. +* +* NOTES +* This function asynchronously requests the specified attribute. +* The response from the node will be routed through the Dispatcher +* to the appropriate receive controller object. +* +* SEE ALSO +* Generic Requester +*********/ +/****f* OpenSM: Generic Requester/osm_req_set +* NAME +* osm_req_set +* +* DESCRIPTION +* Starts the process to transmit a directed route Set() request. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_req_set( + IN const osm_req_t* const p_req, + IN const osm_dr_path_t* const p_path, + IN const uint8_t* const p_payload, + IN const size_t payload_size, + IN const uint16_t attr_id, + IN const uint32_t attr_mod, + IN const cl_disp_msgid_t err_msg, + IN const osm_madw_context_t* const p_context ); +/* +* PARAMETERS +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_path +* [in] Pointer to the directed route path of the recipient. +* +* p_payload +* [in] Pointer to the SMP payload to send. +* +* payload_size +* [in] The size of the payload to be copied to the SMP data field. +* +* attr_id +* [in] Attribute ID to request. +* +* attr_mod +* [in] Attribute modifier for this request. +* +* err_msg +* [in] Message id with which to post this MAD if an error occurs. +* +* p_context +* [in] Mad wrapper context structure to be copied into the wrapper +* context, and thus visible to the recipient of the response. +* +* RETURN VALUES +* IB_SUCCESS if the request was successful. +* +* NOTES +* This function asynchronously requests the specified attribute. +* The response from the node will be routed through the Dispatcher +* to the appropriate receive controller object. +* +* SEE ALSO +* Generic Requester +*********/ + +END_C_DECLS + +#endif /* _OSM_REQ_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_req_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_req_ctrl.h new file mode 100644 index 00000000..1113d056 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_req_ctrl.h @@ -0,0 +1,228 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_req_ctrl_t. + * This object represents a controller that calls the + * generic requester object to retrieve attributes from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_REQ_CTRL_H_ +#define _OSM_REQ_CTRL_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Generic Request Controller +* NAME +* Generic Request Controller +* +* DESCRIPTION +* The Generic Request Controller object encapsulates the information +* needed to request an attribute from a node. +* +* The Generic Request Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Generic Request Controller/osm_req_ctrl_t +* NAME +* osm_req_ctrl_t +* +* DESCRIPTION +* Generic Request Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_req_ctrl +{ + osm_req_t *p_req; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_req_ctrl_t; +/* +* FIELDS +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Generic Request Controller object +*********/ + +/****f* OpenSM: Generic Request Controller/osm_req_ctrl_construct +* NAME +* osm_req_ctrl_construct +* +* DESCRIPTION +* This function constructs a Generic Request Controller object. +* +* SYNOPSIS +*/ +void +osm_req_ctrl_construct( + IN osm_req_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Generic Request Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_req_ctrl_init, and osm_req_ctrl_destroy. +* +* Calling osm_req_ctrl_construct is a prerequisite to calling any other +* method except osm_req_ctrl_init. +* +* SEE ALSO +* Generic Request Controller object, osm_req_ctrl_init, +* osm_req_ctrl_destroy +*********/ + +/****f* OpenSM: Generic Request Controller/osm_req_ctrl_destroy +* NAME +* osm_req_ctrl_destroy +* +* DESCRIPTION +* The osm_req_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_req_ctrl_destroy( + IN osm_req_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Generic Request Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_req_ctrl_construct or osm_req_ctrl_init. +* +* SEE ALSO +* Generic Request Controller object, osm_req_ctrl_construct, +* osm_req_ctrl_init +*********/ + +/****f* OpenSM: Generic Request Controller/osm_req_ctrl_init +* NAME +* osm_req_ctrl_init +* +* DESCRIPTION +* The osm_req_ctrl_init function initializes a +* Generic Request Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_req_ctrl_init( + IN osm_req_ctrl_t* const p_ctrl, + IN osm_req_t* const p_req, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_req_ctrl_t object to initialize. +* +* p_req +* [in] Pointer to a Generic Requester object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Generic Request Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Generic Request Controller methods. +* +* SEE ALSO +* Generic Request Controller object, osm_req_ctrl_construct, +* Generic Requester object, osm_req_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_REQ_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_resp.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_resp.h new file mode 100644 index 00000000..9a5977a0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_resp.h @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_resp_t. + * This object represents an object that genericly requests + * attributes from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_RESP_H_ +#define _OSM_RESP_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Generic Responder +* NAME +* Generic Responder +* +* DESCRIPTION +* The Generic Responder object encapsulates the information +* needed to respond to an attribute from a node. +* +* The Generic Responder object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Generic Responder/osm_resp_t +* NAME +* osm_resp_t +* +* DESCRIPTION +* Generic Responder structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_resp +{ + osm_mad_pool_t *p_pool; + osm_vl15_t *p_vl15; + osm_log_t *p_log; + osm_subn_t *p_subn; + +} osm_resp_t; +/* +* FIELDS +* p_pool +* Pointer to the MAD pool. +* +* p_vl15 +* Pointer to the VL15 interface. +* +* p_log +* Pointer to the log object. +* +* p_subn +* Pointer to the subnet object. +* +* SEE ALSO +* Generic Responder object +*********/ + +/****f* OpenSM: Generic Responder/osm_resp_construct +* NAME +* osm_resp_construct +* +* DESCRIPTION +* This function constructs a Generic Responder object. +* +* SYNOPSIS +*/ +void +osm_resp_construct( + IN osm_resp_t* const p_resp ); +/* +* PARAMETERS +* p_resp +* [in] Pointer to a Generic Responder object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_resp_init, osm_resp_destroy +* +* Calling osm_resp_construct is a prerequisite to calling any other +* method except osm_resp_init. +* +* SEE ALSO +* Generic Responder object, osm_resp_init, +* osm_resp_destroy +*********/ + +/****f* OpenSM: Generic Responder/osm_resp_destroy +* NAME +* osm_resp_destroy +* +* DESCRIPTION +* The osm_resp_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_resp_destroy( + IN osm_resp_t* const p_resp ); +/* +* PARAMETERS +* p_resp +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Generic Responder object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_resp_construct or osm_resp_init. +* +* SEE ALSO +* Generic Responder object, osm_resp_construct, +* osm_resp_init +*********/ + +/****f* OpenSM: Generic Responder/osm_resp_init +* NAME +* osm_resp_init +* +* DESCRIPTION +* The osm_resp_init function initializes a +* Generic Responder object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_resp_init( + IN osm_resp_t* const p_resp, + IN osm_mad_pool_t* const p_pool, + IN osm_vl15_t* const p_vl15, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log ); +/* +* PARAMETERS +* p_resp +* [in] Pointer to an osm_resp_t object to initialize. +* +* p_mad_pool +* [in] Pointer to the MAD pool. +* +* p_vl15 +* [in] Pointer to the VL15 interface. +* +* p_subn +* [in] Pointer to the subnet object. +* +* p_log +* [in] Pointer to the log object. +* +* RETURN VALUES +* IB_SUCCESS if the Generic Responder object was initialized +* successfully. +* +* NOTES +* Allows calling other Generic Responder methods. +* +* SEE ALSO +* Generic Responder object, osm_resp_construct, +* osm_resp_destroy +*********/ + +/****f* OpenSM: Generic Responder/osm_resp_send +* NAME +* osm_resp_send +* +* DESCRIPTION +* Starts the process to transmit a directed route response. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_resp_send( + IN const osm_resp_t* const p_resp, + IN const osm_madw_t* const p_req_madw, + IN const ib_net16_t status, + IN const uint8_t* const p_payload ); +/* +* PARAMETERS +* p_resp +* [in] Pointer to an osm_resp_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper object for the requesting MAD +* to which this response is generated. +* +* status +* [in] Status for this response. +* +* p_payload +* [in] Pointer to the payload of the response MAD. +* +* RETURN VALUES +* IB_SUCCESS if the response was successful. +* +* NOTES +* +* SEE ALSO +* Generic Responder +*********/ + +END_C_DECLS + +#endif /* _OSM_RESP_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_router.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_router.h new file mode 100644 index 00000000..91b04166 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_router.h @@ -0,0 +1,323 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_router_t. + * This object represents an IBA router. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#ifndef _OSM_ROUTER_H_ +#define _OSM_ROUTER_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Router +* NAME +* Router +* +* DESCRIPTION +* The Router object encapsulates the information needed by the +* OpenSM to manage routers. The OpenSM allocates one router object +* per router in the IBA subnet. +* +* The Router object is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Hal Rosenstock, Voltaire +* +*********/ + +/****s* OpenSM: Router/osm_router_t +* NAME +* osm_router_t +* +* DESCRIPTION +* Router structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_router +{ + cl_map_item_t map_item; + osm_port_t *p_port; +} osm_router_t; +/* +* FIELDS +* map_item +* Linkage structure for cl_qmap. MUST BE FIRST MEMBER! +* +* p_port +* Pointer to the Port object for this router. +* +* SEE ALSO +* Router object +*********/ + +/****f* OpenSM: Router/osm_router_construct +* NAME +* osm_router_construct +* +* DESCRIPTION +* This function constructs a Router object. +* +* SYNOPSIS +*/ +void +osm_router_construct( + IN osm_router_t* const p_rtr ); +/* +* PARAMETERS +* p_rtr +* [in] Pointer to a Router object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_router_init, and osm_router_destroy. +* +* Calling osm_router_construct is a prerequisite to calling any other +* method except osm_router_init. +* +* SEE ALSO +* Router object, osm_router_init, osm_router_destroy +*********/ + +/****f* OpenSM: Router/osm_router_destroy +* NAME +* osm_router_destroy +* +* DESCRIPTION +* The osm_router_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_router_destroy( + IN osm_router_t* const p_rtr ); +/* +* PARAMETERS +* p_rtr +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* None. +* +* NOTES +* Performs any necessary cleanup of the specified object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_router_construct +* or osm_router_init. +* +* SEE ALSO +* Router object, osm_router_construct, osm_router_init +*********/ + +/****f* OpenSM: Router/osm_router_destroy +* NAME +* osm_router_destroy +* +* DESCRIPTION +* Destroys and deallocates the object. +* +* SYNOPSIS +*/ +void +osm_router_delete( + IN OUT osm_router_t** const pp_rtr ); +/* +* PARAMETERS +* p_rtr +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +* Router object, osm_router_construct, osm_router_init +*********/ + +/****f* OpenSM: Router/osm_router_init +* NAME +* osm_router_init +* +* DESCRIPTION +* The osm_router_init function initializes a Router object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_router_init( + IN osm_router_t* const p_rtr, + IN osm_port_t* const p_port ); +/* +* PARAMETERS +* p_rtr +* [in] Pointer to an osm_router_t object to initialize. +* +* p_port +* [in] Pointer to the port object of this router +* +* RETURN VALUES +* IB_SUCCESS if the Router object was initialized successfully. +* +* NOTES +* Allows calling other node methods. +* +* SEE ALSO +* Router object, osm_router_construct, osm_router_destroy +*********/ + +/****f* OpenSM: Router/osm_router_new +* NAME +* osm_router_new +* +* DESCRIPTION +* The osm_router_init function initializes a Router object for use. +* +* SYNOPSIS +*/ +osm_router_t* +osm_router_new( + IN osm_port_t* const p_port ); +/* +* PARAMETERS +* p_node +* [in] Pointer to the node object of this router +* +* RETURN VALUES +* Pointer to the new initialized router object. +* +* NOTES +* +* SEE ALSO +* Router object, osm_router_construct, osm_router_destroy, +*********/ + +/****f* OpenSM: Router/osm_router_get_port_ptr +* NAME +* osm_router_get_port_ptr +* +* DESCRIPTION +* Returns a pointer to the Port object for this router. +* +* SYNOPSIS +*/ +static inline osm_port_t* +osm_router_get_port_ptr( + IN const osm_router_t* const p_rtr ) +{ + return( p_rtr->p_port ); +} +/* +* PARAMETERS +* p_rtr +* [in] Pointer to an osm_router_t object. +* +* RETURN VALUES +* Returns a pointer to the Port object for this router. +* +* NOTES +* +* SEE ALSO +* Router object +*********/ + +/****f* OpenSM: Router/osm_router_get_node_ptr +* NAME +* osm_router_get_node_ptr +* +* DESCRIPTION +* Returns a pointer to the Node object for this router. +* +* SYNOPSIS +*/ +static inline osm_node_t* +osm_router_get_node_ptr( + IN const osm_router_t* const p_rtr ) +{ + return( p_rtr->p_port->p_node ); +} +/* +* PARAMETERS +* p_rtr +* [in] Pointer to an osm_router_t object. +* +* RETURN VALUES +* Returns a pointer to the Node object for this router. +* +* NOTES +* +* SEE ALSO +* Router object +*********/ + +END_C_DECLS + +#endif /* _OSM_ROUTER_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa.h new file mode 100644 index 00000000..de0f64d0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa.h @@ -0,0 +1,500 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sa_t. + * This object represents an IBA subnet. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#ifndef _OSM_SA_H_ +#define _OSM_SA_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SA +* NAME +* SA +* +* DESCRIPTION +* The SA object encapsulates the information needed by the +* OpenSM to instantiate a subnet administrator. The OpenSM allocates +* one SA object per subnet manager. +* +* The SA object is thread safe. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* Anil Keshavamurthy, Intel +* +*********/ + +/****d* OpenSM: SA/osm_sa_state_t +* NAME +* osm_sa_state_t +* +* DESCRIPTION +* Enumerates the possible states of SA object. +* +* SYNOPSIS +*/ +typedef enum _osm_sa_state +{ + OSM_SA_STATE_INIT = 0, + OSM_SA_STATE_READY + +} osm_sa_state_t; +/***********/ + +/****s* OpenSM: SM/osm_sa_t +* NAME +* osm_sa_t +* +* DESCRIPTION +* Subnet Administration structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sa +{ + osm_sa_state_t state; + osm_subn_t *p_subn; + osm_vendor_t *p_vendor; + osm_log_t *p_log; + osm_mad_pool_t *p_mad_pool; + cl_dispatcher_t *p_disp; + cl_plock_t *p_lock; + atomic32_t sa_trans_id; + osm_sa_mad_ctrl_t mad_ctrl; + osm_sa_resp_t resp; + osm_cpi_rcv_t cpi_rcv; + osm_cpi_rcv_ctrl_t cpi_rcv_ctrl; + osm_nr_rcv_t nr_rcv; + osm_nr_rcv_ctrl_t nr_rcv_ctrl; + osm_pir_rcv_t pir_rcv; + osm_pir_rcv_ctrl_t pir_rcv_ctrl; + osm_gir_rcv_t gir_rcv; + osm_gir_rcv_ctrl_t gir_rcv_ctrl; + osm_lr_rcv_t lr_rcv; + osm_lr_rcv_ctrl_t lr_rcv_ctrl; + osm_pr_rcv_t pr_rcv; + osm_pr_rcv_ctrl_t pr_rcv_ctrl; + osm_smir_rcv_t smir_rcv; + osm_smir_ctrl_t smir_ctrl; + osm_mcmr_recv_t mcmr_rcv; + osm_mcmr_rcv_ctrl_t mcmr_rcv_ctlr; + osm_sr_rcv_t sr_rcv; + osm_sr_rcv_ctrl_t sr_rcv_ctrl; +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + osm_mpr_rcv_t mpr_rcv; + osm_mpr_rcv_ctrl_t mpr_rcv_ctrl; +#endif + + /* InformInfo Receiver */ + osm_infr_rcv_t infr_rcv; + osm_infr_rcv_ctrl_t infr_rcv_ctrl; + + /* VL Arbitrartion Query */ + osm_vlarb_rec_rcv_t vlarb_rec_rcv; + osm_vlarb_rec_rcv_ctrl_t vlarb_rec_rcv_ctrl; + + /* SLtoVL Map Query */ + osm_slvl_rec_rcv_t slvl_rec_rcv; + osm_slvl_rec_rcv_ctrl_t slvl_rec_rcv_ctrl; + + /* P_Key table Query */ + osm_pkey_rec_rcv_t pkey_rec_rcv; + osm_pkey_rec_rcv_ctrl_t pkey_rec_rcv_ctrl; + + /* LinearForwardingTable Query */ + osm_lftr_rcv_t lftr_rcv; + osm_lftr_rcv_ctrl_t lftr_rcv_ctrl; + + /* SwitchInfo Query */ + osm_sir_rcv_t sir_rcv; + osm_sir_rcv_ctrl_t sir_rcv_ctrl; + + /* MulticastForwardingTable Query */ + osm_mftr_rcv_t mftr_rcv; + osm_mftr_rcv_ctrl_t mftr_rcv_ctrl; +} osm_sa_t; +/* +* FIELDS +* state +* State of this SA object +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_vendor +* Pointer to the vendor specific interfaces object. +* +* p_log +* Pointer to the log object. +* +* p_mad_pool +* Pointer to the MAD pool. +* +* p_disp +* Pointer to dispatcher +* +* p_lock +* Pointer to Lock for serialization +* +* sa_trans_id +* Transaction ID +* +* mad_ctrl +* Mad Controller +* +* resp +* Response object +* +* nr +* +* nr_ctrl +* +* pir_rcv +* +* pir_rcv_ctrl +* +* lr +* +* lr_ctrl +* +* pr +* +* pr_ctrl +* +* smir +* +* smir_ctrl +* +* SEE ALSO +* SM object +*********/ + +/****f* OpenSM: SA/osm_sa_construct +* NAME +* osm_sa_construct +* +* DESCRIPTION +* This function constructs an SA object. +* +* SYNOPSIS +*/ +void osm_sa_construct( + IN osm_sa_t* const p_sa ); +/* +* PARAMETERS +* p_sa +* [in] Pointer to a SA object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sa_init, osm_sa_destroy, and osm_sa_is_inited. +* +* Calling osm_sa_construct is a prerequisite to calling any other +* method except osm_sa_init. +* +* SEE ALSO +* SA object, osm_sa_init, osm_sa_destroy, osm_sa_is_inited +*********/ + +/****f* OpenSM: SA/osm_sa_shutdown +* NAME +* osm_sa_shutdown +* +* DESCRIPTION +* The osm_sa_shutdown function shutdowns an SA, unregistering from all +* dispatcher messages and unbinding the QP1 mad service +* +* SYNOPSIS +*/ +void osm_sa_shutdown( + IN osm_sa_t* const p_sa ); +/* +* PARAMETERS +* p_sa +* [in] Pointer to a SA object to shutdown. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* SA object, osm_sa_construct, osm_sa_init +*********/ + +/****f* OpenSM: SA/osm_sa_destroy +* NAME +* osm_sa_destroy +* +* DESCRIPTION +* The osm_sa_destroy function destroys an SA, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_sa_destroy( + IN osm_sa_t* const p_sa ); +/* +* PARAMETERS +* p_sa +* [in] Pointer to a SA object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified SA object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_sa_construct or +* osm_sa_init. +* +* SEE ALSO +* SA object, osm_sa_construct, osm_sa_init +*********/ + +/****f* OpenSM: SA/osm_sa_init +* NAME +* osm_sa_init +* +* DESCRIPTION +* The osm_sa_init function initializes a SA object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_sa_init( + IN osm_sm_t* const p_sm, + IN osm_sa_t* const p_sa, + IN osm_subn_t* const p_subn, + IN osm_vendor_t* const p_vendor, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_log_t* const p_log, + IN osm_stats_t* const p_stats, + IN cl_dispatcher_t* const p_disp, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_sa +* [in] Pointer to an osm_sa_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_vendor +* [in] Pointer to the vendor specific interfaces object. +* +* p_mad_pool +* [in] Pointer to the MAD pool. +* +* p_log +* [in] Pointer to the log object. +* +* p_stats +* [in] Pointer to the statistics object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the SA object was initialized successfully. +* +* NOTES +* Allows calling other SA methods. +* +* SEE ALSO +* SA object, osm_sa_construct, osm_sa_destroy, +* osm_sa_is_inited +*********/ + +/****f* OpenSM: SA/osm_sa_is_inited +* NAME +* osm_sa_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_sa_init. +* +* SYNOPSIS +*/ +boolean_t osm_sa_is_inited( + IN const osm_sa_t* const p_sa ); +/* +* PARAMETERS +* p_sa +* [in] Pointer to an osm_sa_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_sa_construct or osm_sa_init must be called before using +* this function. +* +* SEE ALSO +* SA object, osm_sa_construct, osm_sa_init +*********/ + +/****f* OpenSM: SA/osm_sa_bind +* NAME +* osm_sa_bind +* +* DESCRIPTION +* Binds the SA object to a port guid. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sa_bind( + IN osm_sa_t* const p_sa, + IN const ib_net64_t port_guid ); +/* +* PARAMETERS +* p_sa +* [in] Pointer to an osm_sa_t object to bind. +* +* port_guid +* [in] Local port GUID with which to bind. +* +* +* RETURN VALUES +* None +* +* NOTES +* A given SA object can only be bound to one port at a time. +* +* SEE ALSO +*********/ + +struct _osm_opensm_t; +/****f* OpenSM: SA/osm_sa_db_file_dump +* NAME +* osm_sa_db_file_dump +* +* DESCRIPTION +* Dumps the SA DB to the dump file. +* +* SYNOPSIS +*/ +int osm_sa_db_file_dump(struct _osm_opensm_t *p_osm); +/* +* PARAMETERS +* p_osm +* [in] Pointer to an osm_opensm_t object. +* +* RETURN VALUES +* None +* +*********/ + +/****f* OpenSM: SA/osm_sa_db_file_load +* NAME +* osm_sa_db_file_load +* +* DESCRIPTION +* Loads SA DB from the file. +* +* SYNOPSIS +*/ +int osm_sa_db_file_load(struct _osm_opensm_t *p_osm); +/* +* PARAMETERS +* p_osm +* [in] Pointer to an osm_opensm_t object. +* +* RETURN VALUES +* 0 on success, other value on failure. +* +*********/ + +END_C_DECLS + +#endif /* _OSM_SA_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_class_port_info.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_class_port_info.h new file mode 100644 index 00000000..3869dbce --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_class_port_info.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_cpi_rcv_t. + * This object represents the ClassPortInfo Receiver object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + +#ifndef _OSM_CPI_H_ +#define _OSM_CPI_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/ClassPort Info Receiver +* NAME +* ClassPort Info Receiver +* +* DESCRIPTION +* The ClassPort Info Receiver object encapsulates the information +* needed to receive the ClassPortInfo request from a node. +* +* The ClassPort Info Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: ClassPort Info Receiver/osm_cpi_rcv_t +* NAME +* osm_cpi_rcv_t +* +* DESCRIPTION +* ClassPort Info Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_cpi_rcv +{ + osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + +} osm_cpi_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_gen_req_ctrl +* Pointer to the generic request controller. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* ClassPort Info Receiver object +*********/ + +/****f* OpenSM: ClassPort Info Receiver/osm_cpi_rcv_construct +* NAME +* osm_cpi_rcv_construct +* +* DESCRIPTION +* This function constructs a ClassPort Info Receiver object. +* +* SYNOPSIS +*/ +void +osm_cpi_rcv_construct( + IN osm_cpi_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a ClassPort Info Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_cpi_rcv_init, osm_cpi_rcv_destroy +* +* Calling osm_cpi_rcv_construct is a prerequisite to calling any other +* method except osm_cpi_rcv_init. +* +* SEE ALSO +* ClassPort Info Receiver object, osm_cpi_rcv_init, osm_cpi_rcv_destroy +*********/ + +/****f* OpenSM: ClassPort Info Receiver/osm_cpi_rcv_destroy +* NAME +* osm_cpi_rcv_destroy +* +* DESCRIPTION +* The osm_cpi_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_cpi_rcv_destroy( + IN osm_cpi_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* ClassPort Info Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_cpi_rcv_construct or osm_cpi_rcv_init. +* +* SEE ALSO +* ClassPort Info Receiver object, osm_cpi_rcv_construct, +* osm_cpi_rcv_init +*********/ + +/****f* OpenSM: ClassPort Info Receiver/osm_cpi_rcv_init +* NAME +* osm_cpi_rcv_init +* +* DESCRIPTION +* The osm_cpi_rcv_init function initializes a +* ClassPort Info Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_cpi_rcv_init( + IN osm_cpi_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_cpi_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the ClassPort Info Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other ClassPort Info Receiver methods. +* +* SEE ALSO +* ClassPort Info Receiver object, osm_cpi_rcv_construct, +* osm_cpi_rcv_destroy +*********/ + +/****f* OpenSM: ClassPort Info Receiver/osm_cpi_rcv_process +* NAME +* osm_cpi_rcv_process +* +* DESCRIPTION +* Process the ClassPortInfo request. +* +* SYNOPSIS +*/ +void +osm_cpi_rcv_process( + IN osm_cpi_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_cpi_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the ClassPortInfo attribute. +* +* RETURN VALUES +* IB_SUCCESS if the ClassPortInfo processing was successful. +* +* NOTES +* This function processes a ClassPortInfo attribute. +* +* SEE ALSO +* ClassPort Info Receiver, ClassPort Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_CPI_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_class_port_info_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_class_port_info_ctrl.h new file mode 100644 index 00000000..58c3c91d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_class_port_info_ctrl.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_cpi_rcv_ctrl_t. + * This object represents a controller that receives the IBA ClassPortInfo + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + +#ifndef _OSM_CPICTRL_H_ +#define _OSM_CPICTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Class Port Info Receive Controller +* NAME +* Class Port Info Receive Controller +* +* DESCRIPTION +* The Class Port Info Receive Controller object encapsulates +* the information requested by the ClassPortInfo attribute. +* +* The ClassPortInfo Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: ClassPort Info Receive Controller/osm_cpi_rcv_ctrl_t +* NAME +* osm_cpi_rcv_ctrl_t +* +* DESCRIPTION +* ClassPort Info Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_cpi_rcv_ctrl +{ + osm_cpi_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_cpi_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the ClassPort Info Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Class Port Info Receive Controller object +* Class Port Info Receiver object +*********/ + +/****f* OpenSM: Class Port Info Receive Controller/osm_cpi_rcv_ctrl_construct +* NAME +* osm_cpi_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Class Port Info Receive Controller object. +* +* SYNOPSIS +*/ +void osm_cpi_rcv_ctrl_construct( + IN osm_cpi_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Class Port Info Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_cpi_rcv_ctrl_init, osm_cpi_rcv_ctrl_destroy, +* and osm_cpi_rcv_ctrl_is_inited. +* +* Calling osm_cpi_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_cpi_rcv_ctrl_init. +* +* SEE ALSO +* Class Port Info Receive Controller object, osm_cpi_rcv_ctrl_init, +* osm_cpi_rcv_ctrl_destroy, osm_cpi_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Class Port Info Receive Controller/osm_cpi_rcv_ctrl_destroy +* NAME +* osm_cpi_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_cpi_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_cpi_rcv_ctrl_destroy( + IN osm_cpi_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Class Port Info Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_cpi_rcv_ctrl_construct or osm_cpi_rcv_ctrl_init. +* +* SEE ALSO +* Class Port Info Receive Controller object, osm_cpi_rcv_ctrl_construct, +* osm_cpi_rcv_ctrl_init +*********/ + +/****f* OpenSM: Class Port Info Receive Controller/osm_cpi_rcv_ctrl_init +* NAME +* osm_cpi_rcv_ctrl_init +* +* DESCRIPTION +* The osm_cpi_rcv_ctrl_init function initializes a +* Class Port Info Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_cpi_rcv_ctrl_init( + IN osm_cpi_rcv_ctrl_t* const p_ctrl, + IN osm_cpi_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_cpi_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_cpi_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Class Port Info Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Class Port Info Receive Controller methods. +* +* SEE ALSO +* Class Port Info Receive Controller object, osm_cpi_rcv_ctrl_construct, +* osm_cpi_rcv_ctrl_destroy, osm_cpi_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Class Port Info Receive Controller/osm_cpi_rcv_ctrl_is_inited +* NAME +* osm_cpi_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_cpi_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_cpi_rcv_ctrl_is_inited( + IN const osm_cpi_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_cpi_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_cpi_rcv_ctrl_construct or osm_cpi_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Class Port Info Receive Controller object, osm_cpi_rcv_ctrl_construct, +* osm_cpi_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_CPICTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_guidinfo_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_guidinfo_record.h new file mode 100644 index 00000000..d9bd48e3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_guidinfo_record.h @@ -0,0 +1,279 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_gir_rcv_t. + * This object represents the GUIDInfo Record Receiver object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#ifndef _OSM_GIR_RCV_H_ +#define _OSM_GIR_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/GUIDInfo Record Receiver +* NAME +* GUIDInfo Record Receiver +* +* DESCRIPTION +* The GUIDInfo Record Receiver object encapsulates the information +* needed to receive the GUIDInfoRecord attribute from a node. +* +* The GUIDInfo Record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Hal Rosenstock, Voltaire +* +*********/ + +/****s* OpenSM: GUIDInfo Record Receiver/osm_gir_rcv_t +* NAME +* osm_gir_rcv_t +* +* DESCRIPTION +* GUIDInfo Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_gir_rcv +{ + const osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t pool; + +} osm_gir_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_resp +* Pointer to the SA responder. +* +* p_mad_pool +* Pointer to the mad pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pool +* Pool of linkable GUIDInfo Record objects used to generate +* the query response. +* +* SEE ALSO +* +*********/ + +/****f* OpenSM: GUIDInfo Record Receiver/osm_gir_rcv_construct +* NAME +* osm_gir_rcv_construct +* +* DESCRIPTION +* This function constructs a GUIDInfo Record Receiver object. +* +* SYNOPSIS +*/ +void +osm_gir_rcv_construct( + IN osm_gir_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a GUIDInfo Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_gir_rcv_init, osm_gir_rcv_destroy +* +* Calling osm_gir_rcv_construct is a prerequisite to calling any other +* method except osm_gir_rcv_init. +* +* SEE ALSO +* GUIDInfo Record Receiver object, osm_gir_rcv_init, +* osm_gir_rcv_destroy +*********/ + +/****f* OpenSM: GUIDInfo Record Receiver/osm_gir_rcv_destroy +* NAME +* osm_gir_rcv_destroy +* +* DESCRIPTION +* The osm_gir_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_gir_rcv_destroy( + IN osm_gir_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* GUIDInfo Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_gir_rcv_construct or osm_gir_rcv_init. +* +* SEE ALSO +* GUIDInfo Record Receiver object, osm_gir_rcv_construct, +* osm_gir_rcv_init +*********/ + +/****f* OpenSM: GUIDInfo Record Receiver/osm_gir_rcv_init +* NAME +* osm_gir_rcv_init +* +* DESCRIPTION +* The osm_gir_rcv_init function initializes a +* GUIDInfo Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_gir_rcv_init( + IN osm_gir_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_gir_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the GUIDInfo Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other GUIDInfo Record Receiver methods. +* +* SEE ALSO +* GUIDInfo Record Receiver object, osm_gir_rcv_construct, +* osm_gir_rcv_destroy +*********/ + +/****f* OpenSM: GUIDInfo Record Receiver/osm_gir_rcv_process +* NAME +* osm_gir_rcv_process +* +* DESCRIPTION +* Process the GUIDInfoRecord attribute. +* +* SYNOPSIS +*/ +void +osm_gir_rcv_process( + IN osm_gir_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_gir_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's GUIDInfoRecord attribute. +* +* RETURN VALUES +* CL_SUCCESS if the GUIDInfoRecord processing was successful. +* +* NOTES +* This function processes a GUIDInfoRecord attribute. +* +* SEE ALSO +* GUIDInfo Record Receiver, GUIDInfo Record Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_GIR_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_guidinfo_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_guidinfo_record_ctrl.h new file mode 100644 index 00000000..f92b5a71 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_guidinfo_record_ctrl.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sa_gir_rec_rcv_ctrl_t. + * This object represents a controller that receives the IBA GUID Info + * record query from SA client. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#ifndef _OSM_GIR_CTRL_H_ +#define _OSM_GIR_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/GUID Info Record Receive Controller +* NAME +* GUID Info Record Receive Controller +* +* DESCRIPTION +* The GUID Info Record Receive Controller object encapsulates +* the information needed to handle GUID Info record query from SA client. +* +* The GUID Info Record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Hal Rosenstock, Voltaire +* +*********/ + +/****s* OpenSM: GUID Info Record Receive Controller/osm_gir_rcv_ctrl_t +* NAME +* osm_gir_rcv_ctrl_t +* +* DESCRIPTION +* GUID Info Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_gir_rcv_ctrl +{ + osm_gir_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_gir_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the GUID Info Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* GUID Info Record Receive Controller object +* GUID Info Record Receiver object +*********/ + +/****f* OpenSM: GUID Info Record Receive Controller/osm_gir_rec_rcv_ctrl_construct +* NAME +* osm_gir_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a GUID Info Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_gir_rcv_ctrl_construct( + IN osm_gir_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a GUID Info Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_gir_rcv_ctrl_init, osm_gir_rcv_ctrl_destroy +* +* Calling osm_gir_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_gir_rcv_ctrl_init. +* +* SEE ALSO +* GUID Info Record Receive Controller object, osm_gir_rcv_ctrl_init, +* osm_gir_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: GUID Info Record Receive Controller/osm_gir_rcv_ctrl_destroy +* NAME +* osm_gir_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_gir_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_gir_rcv_ctrl_destroy( + IN osm_gir_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* GUIDInfo Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_gir_rcv_ctrl_construct or osm_gir_rcv_ctrl_init. +* +* SEE ALSO +* GUIDInfo Record Receive Controller object, osm_gir_rcv_ctrl_construct, +* osm_gir_rcv_ctrl_init +*********/ + +/****f* OpenSM: GUID Info Record Receive Controller/osm_gir_rcv_ctrl_init +* NAME +* osm_gir_rcv_ctrl_init +* +* DESCRIPTION +* The osm_gir_rcv_ctrl_init function initializes a +* GUID Info Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_gir_rcv_ctrl_init( + IN osm_gir_rcv_ctrl_t* const p_ctrl, + IN osm_gir_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_gir_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_gir_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the GUID Info Record Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other GUID Info Record Receive Controller methods. +* +* SEE ALSO +* GUID Info Record Receive Controller object, osm_gir_rcv_ctrl_construct, +* osm_gir_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_GIR_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_informinfo.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_informinfo.h new file mode 100644 index 00000000..ac12208e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_informinfo.h @@ -0,0 +1,299 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_infr_rcv_t. + * This object represents the InformInfoRecord Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_SA_INFR_H_ +#define _OSM_SA_INFR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/InformInfo Receiver +* NAME +* InformInfo Receiver +* +* DESCRIPTION +* The InformInfo Receiver object encapsulates the information +* needed to receive the InformInfo request from a node. +* +* The InformInfo Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: InformInfo Receiver/osm_infr_rcv_t +* NAME +* osm_infr_rcv_t +* +* DESCRIPTION +* InformInfo Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_infr_rcv +{ + osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t pool; +} osm_infr_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_resp +* Pointer to the osm_sa_resp_t object. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pool +* Pool of linkable InformInfo Record objects used to +* generate the query response. +* +* SEE ALSO +* InformInfo Receiver object +*********/ + +/****f* OpenSM: InformInfo Receiver/osm_infr_rcv_construct +* NAME +* osm_infr_rcv_construct +* +* DESCRIPTION +* This function constructs a InformInfo Receiver object. +* +* SYNOPSIS +*/ +void +osm_infr_rcv_construct( + IN osm_infr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a InformInfo Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_infr_rcv_init, osm_infr_rcv_destroy +* +* Calling osm_infr_rcv_construct is a prerequisite to calling any other +* method except osm_infr_rcv_init. +* +* SEE ALSO +* InformInfo Receiver object, osm_infr_rcv_init, osm_infr_rcv_destroy +*********/ + +/****f* OpenSM: InformInfo Receiver/osm_infr_rcv_destroy +* NAME +* osm_infr_rcv_destroy +* +* DESCRIPTION +* The osm_infr_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_infr_rcv_destroy( + IN osm_infr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* InformInfo Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_infr_rcv_construct or osm_infr_rcv_init. +* +* SEE ALSO +* InformInfo Receiver object, osm_infr_rcv_construct, +* osm_infr_rcv_init +*********/ + +/****f* OpenSM: InformInfo Receiver/osm_infr_rcv_init +* NAME +* osm_infr_rcv_init +* +* DESCRIPTION +* The osm_infr_rcv_init function initializes a +* InformInfo Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_infr_rcv_init( + IN osm_infr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_infr_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the InformInfo Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other InformInfo Receiver methods. +* +* SEE ALSO +* InformInfo Receiver object, osm_infr_rcv_construct, +* osm_infr_rcv_destroy +*********/ + +/****f* OpenSM: InformInfo Receiver/osm_infr_rcv_process +* NAME +* osm_infr_rcv_process +* +* DESCRIPTION +* Process the InformInfo request. +* +* SYNOPSIS +*/ +void +osm_infr_rcv_process( + IN osm_infr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_infr_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's InformInfo attribute. +* NOTES +* This function processes a InformInfo attribute. +* +* SEE ALSO +* InformInfo Receiver +*********/ + +/****f* OpenSM: InformInfo Record Receiver/osm_infir_rcv_process +* NAME +* osm_infir_rcv_process +* +* DESCRIPTION +* Process the InformInfo Record request. +* +* SYNOPSIS +*/ +void +osm_infir_rcv_process( + IN osm_infr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_infr_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's InformInfo Record attribute. +* NOTES +* This function processes a InformInfo Record attribute. +* +* SEE ALSO +* InformInfo Receiver +*********/ + +END_C_DECLS + +#endif /* _OSM_SA_INFR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_informinfo_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_informinfo_ctrl.h new file mode 100644 index 00000000..4c23bd90 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_informinfo_ctrl.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_infr_rcv_ctrl_t. + * This object represents a controller that receives the IBA InfromInfo + * Set method attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_INFR_RCV_CTRL_H_ +#define _OSM_INFR_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/InformInfo Receive Controller +* NAME +* InformInfo Receive Controller +* +* DESCRIPTION +* The InformInfo Receive Controller object encapsulates +* the information needed to receive the InformInfo attribute from a node. +* +* The InformInfo Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: InformInfo Receive Controller/osm_infr_rcv_ctrl_t +* NAME +* osm_infr_rcv_ctrl_t +* +* DESCRIPTION +* InformInfo Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_infr_rcv_ctrl +{ + osm_infr_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + cl_disp_reg_handle_t h_disp2; +} osm_infr_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the InformInfo Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* InformInfo Receive Controller object +* InformInfo Receiver object +*********/ + +/****f* OpenSM: InformInfo Receive Controller/osm_infr_rcv_ctrl_construct +* NAME +* osm_infr_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a InformInfo Receive Controller object. +* +* SYNOPSIS +*/ +void osm_infr_rcv_ctrl_construct( + IN osm_infr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a InformInfo Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_infr_rcv_ctrl_init, osm_infr_rcv_ctrl_destroy, +* and osm_infr_rcv_ctrl_is_inited. +* +* Calling osm_infr_rcv_ctrl_construct is a prerequisite to calling any +* other method except osm_infr_rcv_ctrl_init. +* +* SEE ALSO +* InformInfo Receive Controller object, osm_infr_rcv_ctrl_init, +* osm_infr_rcv_ctrl_destroy, osm_infr_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: InformInfo Receive Controller/osm_infr_rcv_ctrl_destroy +* NAME +* osm_infr_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_infr_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_infr_rcv_ctrl_destroy( + IN osm_infr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* InformInfo Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_infr_rcv_ctrl_construct or osm_infr_rcv_ctrl_init. +* +* SEE ALSO +* InformInfo Receive Controller object, osm_infr_rcv_ctrl_construct, +* osm_infr_rcv_ctrl_init +*********/ + +/****f* OpenSM: InformInfo Receive Controller/osm_infr_rcv_ctrl_init +* NAME +* osm_infr_rcv_ctrl_init +* +* DESCRIPTION +* The osm_infr_rcv_ctrl_init function initializes a +* InformInfo Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_infr_rcv_ctrl_init( + IN osm_infr_rcv_ctrl_t* const p_ctrl, + IN osm_infr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_infr_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_infr_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the InformInfo Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other InformInfo Receive Controller methods. +* +* SEE ALSO +* InformInfo Receive Controller object, osm_infr_rcv_ctrl_construct, +* osm_infr_rcv_ctrl_destroy, osm_infr_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: InformInfo Receive Controller/osm_infr_rcv_ctrl_is_inited +* NAME +* osm_infr_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_infr_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_infr_rcv_ctrl_is_inited( + IN const osm_infr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_infr_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_infr_rcv_ctrl_construct or osm_infr_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* InformInfo Receive Controller object, osm_infr_rcv_ctrl_construct, +* osm_infr_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_INFR_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_lft_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_lft_record.h new file mode 100644 index 00000000..6b543e66 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_lft_record.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_lftr_rcv_t. + * This object represents the LinearForwardingTable Receiver object. + * attribute from a switch node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_LFTR_H_ +#define _OSM_LFTR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Linear Forwarding Table Receiver +* NAME +* Linear Forwarding Table Receiver +* +* DESCRIPTION +* The Linear Forwarding Table Receiver object encapsulates the information +* needed to receive the LinearForwardingTable attribute from a switch node. +* +* The Linear Forwarding Table Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox Technologies LTD +* +*********/ + +/****s* OpenSM: Linear Forwarding Table Receiver/osm_lftr_rcv_t +* NAME +* osm_lftr_rcv_t +* +* DESCRIPTION +* Linear Forwarding Table Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_lft +{ + osm_subn_t* p_subn; + osm_stats_t* p_stats; + osm_sa_resp_t* p_resp; + osm_mad_pool_t* p_mad_pool; + osm_log_t* p_log; + cl_plock_t* p_lock; + cl_qlock_pool_t pool; +} osm_lftr_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_stats +* Pointer to the statistics. +* +* p_resp +* Pointer to the SA responder. +* +* p_mad_pool +* Pointer to the mad pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pool +* Pool of linkable Linear Forwarding Table Record objects used to +* generate the query response. +* +* SEE ALSO +* Linear Forwarding Table Receiver object +*********/ + +/****f* OpenSM: Linear Forwarding Table Receiver/osm_lftr_rcv_construct +* NAME +* osm_lftr_rcv_construct +* +* DESCRIPTION +* This function constructs a Linear Forwarding Table Receiver object. +* +* SYNOPSIS +*/ +void osm_lftr_rcv_construct( + IN osm_lftr_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Linear Forwarding Table Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_lftr_rcv_init, osm_lftr_rcv_destroy +* +* Calling osm_lftr_rcv_construct is a prerequisite to calling any other +* method except osm_lftr_rcv_init. +* +* SEE ALSO +* Linear Forwarding Table Receiver object, osm_lftr_rcv_init, +* osm_lftr_rcv_destroy +*********/ + +/****f* OpenSM: Linear Forwarding Table Receiver/osm_lftr_rcv_destroy +* NAME +* osm_lftr_rcv_destroy +* +* DESCRIPTION +* The osm_lftr_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_lftr_rcv_destroy( + IN osm_lftr_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Linear Forwarding Table Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_lftr_rcv_construct or osm_lftr_rcv_init. +* +* SEE ALSO +* Linear Forwarding Table Receiver object, osm_lftr_rcv_construct, +* osm_lftr_rcv_init +*********/ + +/****f* OpenSM: Linear Forwarding Table Receiver/osm_lftr_rcv_init +* NAME +* osm_lftr_rcv_init +* +* DESCRIPTION +* The osm_lftr_rcv_init function initializes a +* Linear Forwarding Table Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_lftr_rcv_init( + IN osm_lftr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_lftr_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the Linear Forwarding Table Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Linear Forwarding Table Receiver methods. +* +* SEE ALSO +* Linear Forwarding Table Receiver object, osm_lftr_rcv_construct, +* osm_lftr_rcv_destroy +*********/ + +/****f* OpenSM: Linear Forwarding Table Receiver/osm_lftr_rcv_process +* NAME +* osm_lftr_rcv_process +* +* DESCRIPTION +* Process the LinearForwardingTable attribute. +* +* SYNOPSIS +*/ +void osm_lftr_rcv_process( + IN osm_lftr_rcv_t* const p_ctrl, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_lftr_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the switch node's LinearForwardingTable attribute. +* +* RETURN VALUES +* CL_SUCCESS if the LinearForwardingTable processing was successful. +* +* NOTES +* This function processes a LinearForwardingTable attribute. +* +* SEE ALSO +* Linear Forwarding Table Receiver, Linear Forwarding Table Response +* Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_LFTR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_lft_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_lft_record_ctrl.h new file mode 100644 index 00000000..1dd3a837 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_lft_record_ctrl.h @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_lftr_rcv_ctrl_t. + * This object represents a controller that receives the IBA + * LinearForwardingTable attribute from a switch. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_LFTR_RCV_CTRL_H_ +#define _OSM_LFTR_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Linear Forwarding Table Receive Controller +* NAME +* Linear Forwarding Table Record Receive Controller +* +* DESCRIPTION +* The Linear Forwarding Table Receive Controller object encapsulates +* the information needed to receive the LinearFowardingTable attribute +* from a switch node. +* +* The Linear Forwarding Table Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox Technologies LTD +* +*********/ + +/****s* OpenSM: Linear Forwarding Table Receive Controller/osm_lftr_rcv_ctrl_t +* NAME +* osm_lftr_rcv_ctrl_t +* +* DESCRIPTION +* Linear Forwarding Table Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_lftr_rcv_ctrl +{ + osm_lftr_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_lftr_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Linear Forwarding Table Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Linear Forwarding Table Receive Controller object +* Linear Forwarding Table Receiver object +*********/ + +/****f* OpenSM: Linear Forwarding Table Receive Controller/osm_lftr_rcv_ctrl_construct +* NAME +* osm_lftr_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Linear Forwarding Table Receive +* Controller object. +* +* SYNOPSIS +*/ +void osm_lftr_rcv_ctrl_construct( + IN osm_lftr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Linear Forwarding Table Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_lftr_rcv_ctrl_init, osm_lftr_rcv_ctrl_destroy +* +* Calling osm_lftr_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_lftr_rcv_ctrl_init. +* +* SEE ALSO +* Linear Forwarding Table Receive Controller object, osm_lftr_rcv_ctrl_init, +* osm_lftr_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: Linear Forwarding Table Receive Controller/osm_lftr_rcv_ctrl_destroy +* NAME +* osm_lftr_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_lftr_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_lftr_rcv_ctrl_destroy( + IN osm_lftr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Linear Forwarding Table Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_lftr_rcv_ctrl_construct or osm_lftr_rcv_ctrl_init. +* +* SEE ALSO +* Linear Forwarding Table Receive Controller object, osm_lftr_rcv_ctrl_construct, +* osm_lftr_rcv_ctrl_init +*********/ + +/****f* OpenSM: Linear Forwarding Table Receive Controller/osm_lftr_rcv_ctrl_init +* NAME +* osm_lftr_rcv_ctrl_init +* +* DESCRIPTION +* The osm_lftr_rcv_ctrl_init function initializes a +* Linear Forwarding Table Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_lftr_rcv_ctrl_init( + IN osm_lftr_rcv_ctrl_t* const p_ctrl, + IN osm_lftr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_lftr_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_lftr_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Linear Forwarding Table Receive Controller object +* was initialized successfully. +* +* NOTES +* Allows calling other Linear Forwarding Table Receive Controller methods. +* +* SEE ALSO +* Linear Forwarding Table Receive Controller object, +* osm_lftr_rcv_ctrl_construct, osm_lftr_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_LFTR_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_link_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_link_record.h new file mode 100644 index 00000000..5c9b291c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_link_record.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_lr_rcv_t. + * This object represents the Link Record Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_LR_RCV_H_ +#define _OSM_LR_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Link Record Receiver +* NAME +* Link Record Receiver +* +* DESCRIPTION +* The Link Record Receiver object encapsulates the information +* needed to receive the Link Record attribute from a node. +* +* The Link Record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ + +/****s* OpenSM: Link Record Receiver/osm_lr_rcv_t +* NAME +* osm_lr_rcv_t +* +* DESCRIPTION +* Link Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_lr_rcv +{ + osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t lr_pool; + +} osm_lr_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_resp +* Pointer to the SA responder. +* +* p_mad_pool +* Pointer to the mad pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* lr_pool +* Pool of link record objects used to generate the query response. +* +* SEE ALSO +*********/ + +/****f* OpenSM: Link Record Receiver/osm_lr_rcv_construct +* NAME +* osm_lr_rcv_construct +* +* DESCRIPTION +* This function constructs a Link Record Receiver object. +* +* SYNOPSIS +*/ +void +osm_lr_rcv_construct( + IN osm_lr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a Link Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_lr_rcv_init, osm_lr_rcv_destroy +* +* Calling osm_lr_rcv_construct is a prerequisite to calling any other +* method except osm_lr_rcv_init. +* +* SEE ALSO +* Link Record Receiver object, osm_lr_rcv_init, osm_lr_rcv_destroy +*********/ + +/****f* OpenSM: Link Record Receiver/osm_lr_rcv_destroy +* NAME +* osm_lr_rcv_destroy +* +* DESCRIPTION +* The osm_lr_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_lr_rcv_destroy( + IN osm_lr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Link Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_lr_rcv_construct or osm_lr_rcv_init. +* +* SEE ALSO +* Link Record Receiver object, osm_lr_rcv_construct, +* osm_lr_rcv_init +*********/ + +/****f* OpenSM: Link Record Receiver/osm_lr_rcv_init +* NAME +* osm_lr_rcv_init +* +* DESCRIPTION +* The osm_lr_rcv_init function initializes a +* Link Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_lr_rcv_init( + IN osm_lr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_lr_rcv_t object to initialize. +* +* p_resp +* [in] Pointer to the SA Responder object. +* +* p_mad_pool +* [in] Pointer to the mad pool. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Link Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Link Record Receiver methods. +* +* SEE ALSO +* Link Record Receiver object, osm_lr_rcv_construct, osm_lr_rcv_destroy +*********/ + +/****f* OpenSM: Link Record Receiver/osm_lr_rcv_process +* NAME +* osm_lr_rcv_process +* +* DESCRIPTION +* Process the Link Record attribute. +* +* SYNOPSIS +*/ +void osm_lr_rcv_process( + IN osm_lr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_lr_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's Link Record attribute. +* +* NOTES +* This function processes a Link Record attribute. +* +* SEE ALSO +* Link Record Receiver, Link Record Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_LR_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_link_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_link_record_ctrl.h new file mode 100644 index 00000000..2c9355d3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_link_record_ctrl.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_lr_rcv_ctrl_t. + * This object represents a controller that receives the IBA Link Record + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_LR_CTRL_H_ +#define _OSM_LR_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Link Record Receive Controller +* NAME +* Link Record Receive Controller +* +* DESCRIPTION +* The Link Record Receive Controller object encapsulates +* the information needed to receive the LinkRecord attribute from a node. +* +* The Link Record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ + +/****s* OpenSM: Link Record Receive Controller/osm_lr_rcv_ctrl_t +* NAME +* osm_lr_rcv_ctrl_t +* +* DESCRIPTION +* Link Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_lr_rcv_ctrl +{ + osm_lr_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_lr_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Link Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Link Record Receive Controller object +* Link Record Receiver object +*********/ + +/****f* OpenSM: Link Record Receive Controller/osm_lr_rcv_ctrl_construct +* NAME +* osm_lr_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Link Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_lr_rcv_ctrl_construct( + IN osm_lr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Link Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_lr_rcv_ctrl_init, osm_lr_rcv_ctrl_destroy, +* and osm_lr_rcv_ctrl_is_inited. +* +* Calling osm_lr_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_lr_rcv_ctrl_init. +* +* SEE ALSO +* Link Record Receive Controller object, osm_lr_rcv_ctrl_init, +* osm_lr_rcv_ctrl_destroy, osm_lr_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Link Record Receive Controller/osm_lr_rcv_ctrl_destroy +* NAME +* osm_lr_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_lr_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_lr_rcv_ctrl_destroy( + IN osm_lr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Link Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_lr_rcv_ctrl_construct or osm_lr_rcv_ctrl_init. +* +* SEE ALSO +* Link Record Receive Controller object, osm_lr_rcv_ctrl_construct, +* osm_lr_rcv_ctrl_init +*********/ + +/****f* OpenSM: Link Record Receive Controller/osm_lr_rcv_ctrl_init +* NAME +* osm_lr_rcv_ctrl_init +* +* DESCRIPTION +* The osm_lr_rcv_ctrl_init function initializes a +* Link Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_lr_rcv_ctrl_init( + IN osm_lr_rcv_ctrl_t* const p_ctrl, + IN osm_lr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_lr_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_lr_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Link Record Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Link Record Receive Controller methods. +* +* SEE ALSO +* Link Record Receive Controller object, osm_lr_rcv_ctrl_construct, +* osm_lr_rcv_ctrl_destroy, osm_lr_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Link Record Receive Controller/osm_lr_rcv_ctrl_is_inited +* NAME +* osm_lr_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_lr_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_lr_rcv_ctrl_is_inited( + IN const osm_lr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_lr_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_lr_rcv_ctrl_construct or osm_lr_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Link Record Receive Controller object, osm_lr_rcv_ctrl_construct, +* osm_lr_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_LR_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mad_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mad_ctrl.h new file mode 100644 index 00000000..378618e5 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mad_ctrl.h @@ -0,0 +1,352 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sa_mad_ctrl_t. + * This object represents a controller that receives the IBA SA + * attributes from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SA_MAD_CTRL_H_ +#define _OSM_SA_MAD_CTRL_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SA MAD Controller +* NAME +* SA MAD Controller +* +* DESCRIPTION +* The SA MAD Controller object encapsulates +* the information needed to receive MADs from the transport layer. +* +* The SA MAD Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ + +/****s* OpenSM: SA MAD Controller/osm_sa_mad_ctrl_t +* NAME +* osm_sa_mad_ctrl_t +* +* DESCRIPTION +* SA MAD Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sa_mad_ctrl +{ + osm_log_t *p_log; + osm_mad_pool_t *p_mad_pool; + osm_vendor_t *p_vendor; + osm_bind_handle_t h_bind; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + osm_stats_t *p_stats; + osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; +} osm_sa_mad_ctrl_t; +/* +* FIELDS +* p_log +* Pointer to the log object. +* +* p_mad_pool +* Pointer to the MAD pool. +* +* p_vendor +* Pointer to the vendor specific interfaces object. +* +* h_bind +* Bind handle returned by the transport layer. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* p_stats +* Pointer to the OpenSM statistics block. +* +* p_resp +* Pointer to the SA response manager +* +* SEE ALSO +* SA MAD Controller object +* SA MADr object +*********/ + +/****f* OpenSM: SA MAD Controller/osm_sa_mad_ctrl_construct +* NAME +* osm_sa_mad_ctrl_construct +* +* DESCRIPTION +* This function constructs a SA MAD Controller object. +* +* SYNOPSIS +*/ +void osm_sa_mad_ctrl_construct( + IN osm_sa_mad_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a SA MAD Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sa_mad_ctrl_init, and osm_sa_mad_ctrl_destroy. +* +* Calling osm_sa_mad_ctrl_construct is a prerequisite to calling any other +* method except osm_sa_mad_ctrl_init. +* +* SEE ALSO +* SA MAD Controller object, osm_sa_mad_ctrl_init, +* osm_sa_mad_ctrl_destroy +*********/ + +/****f* OpenSM: SA MAD Controller/osm_sa_mad_ctrl_destroy +* NAME +* osm_sa_mad_ctrl_destroy +* +* DESCRIPTION +* The osm_sa_mad_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_sa_mad_ctrl_destroy( + IN osm_sa_mad_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SA MAD Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sa_mad_ctrl_construct or osm_sa_mad_ctrl_init. +* +* SEE ALSO +* SA MAD Controller object, osm_sa_mad_ctrl_construct, +* osm_sa_mad_ctrl_init +*********/ + +/****f* OpenSM: SA MAD Controller/osm_sa_mad_ctrl_init +* NAME +* osm_sa_mad_ctrl_init +* +* DESCRIPTION +* The osm_sa_mad_ctrl_init function initializes a +* SA MAD Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_sa_mad_ctrl_init( + IN osm_sa_mad_ctrl_t* const p_ctrl, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_vendor_t* const p_vendor, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_stats_t* const p_stats, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sa_mad_ctrl_t object to initialize. +* +* p_resp +* [in] Pointer to the response SA manager object +* +* p_mad_pool +* [in] Pointer to the MAD pool. +* +* p_vendor +* [in] Pointer to the vendor specific interfaces object. +* +* p_log +* [in] Pointer to the log object. +* +* p_stats +* [in] Pointer to the OpenSM stastics block. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* IB_SUCCESS if the SA MAD Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other SA MAD Controller methods. +* +* SEE ALSO +* SA MAD Controller object, osm_sa_mad_ctrl_construct, +* osm_sa_mad_ctrl_destroy +*********/ + +/****f* OpenSM: SA/osm_sa_mad_ctrl_bind +* NAME +* osm_sa_mad_ctrl_bind +* +* DESCRIPTION +* Binds the SA MAD Controller object to a port guid. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sa_mad_ctrl_bind( + IN osm_sa_mad_ctrl_t* const p_ctrl, + IN const ib_net64_t port_guid ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sa_mad_ctrl_t object to initialize. +* +* port_guid +* [in] Local port GUID with which to bind. +* +* +* RETURN VALUES +* None +* +* NOTES +* A given SA MAD Controller object can only be bound to one +* port at a time. +* +* SEE ALSO +*********/ + +/****f* OpenSM: SA/osm_sa_mad_ctrl_unbind +* NAME +* osm_sa_mad_ctrl_unbind +* +* DESCRIPTION +* Un-Binds the SA MAD Controller object from the IB port +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sa_mad_ctrl_unbind( + IN osm_sa_mad_ctrl_t* const p_ctrl); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sa_mad_ctrl_t object to initialize. +* +* RETURN VALUES +* None +* +* NOTES +* A given SA MAD Controller should be previously bound to IB +* port. +* +* SEE ALSO +*********/ + +/****f* OpenSM: SA/osm_sa_mad_ctrl_get_bind_handle +* NAME +* osm_sa_mad_ctrl_get_bind_handle +* +* DESCRIPTION +* Returns the bind handle. +* +* SYNOPSIS +*/ +static inline osm_bind_handle_t +osm_sa_mad_ctrl_get_bind_handle( + IN const osm_sa_mad_ctrl_t* const p_ctrl ) +{ + return( p_ctrl->h_bind ); +} +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sa_mad_ctrl_t object. +* +* RETURN VALUES +* Returns the bind handle, which may be OSM_BIND_INVALID_HANDLE +* if no port has been bound. +* +* NOTES +* A given SA MAD Controller object can only be bound to one +* port at a time. +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_SA_MAD_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mcmember_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mcmember_record.h new file mode 100644 index 00000000..7603f76b --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mcmember_record.h @@ -0,0 +1,420 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mcmr_recv_t. + * This object represents the MCMemberRecord Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#ifndef _OSM_MCMR_H_ +#define _OSM_MCMR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/MCMember Receiver +* NAME +* MCMember Receiver +* +* DESCRIPTION +* The MCMember Receiver object encapsulates the information +* needed to receive the MCMemberRecord attribute from a node. +* +* The MCMember Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Anil Keshavamurthy, Intel +* +*********/ + +/****s* OpenSM: MCMember Receiver/osm_mcmr_recv_t +* NAME +* osm_mcmr_recv_t +* +* DESCRIPTION +* MCMember Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ + + +typedef struct _osm_mcmr +{ + osm_subn_t *p_subn; + osm_sm_t *p_sm; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + uint16_t mlid_ho; + cl_qlock_pool_t pool; +} osm_mcmr_recv_t; + + +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_gen_req_ctrl +* Pointer to the generic request controller. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* MCMember Receiver object +*********/ + +/****f* OpenSM: MCMember Receiver/osm_mcmr_rcv_construct +* NAME +* osm_mcmr_rcv_construct +* +* DESCRIPTION +* This function constructs a MCMember Receiver object. +* +* SYNOPSIS +*/ +void osm_mcmr_rcv_construct( + IN osm_mcmr_recv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a MCMember Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mcmr_rcv_init, osm_mcmr_rcv_destroy +* +* Calling osm_mcmr_rcv_construct is a prerequisite to calling any other +* method except osm_mcmr_init. +* +* SEE ALSO +* MCMember Receiver object, osm_mcmr_init, +* osm_mcmr_rcv_destroy +*********/ + +/****f* OpenSM: MCMember Receiver/osm_mcmr_rcv_destroy +* NAME +* osm_mcmr_rcv_destroy +* +* DESCRIPTION +* The osm_mcmr_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_mcmr_rcv_destroy( + IN osm_mcmr_recv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* MCMember Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mcmr_rcv_construct or osm_mcmr_init. +* +* SEE ALSO +* MCMember Receiver object, osm_mcmr_rcv_construct, +* osm_mcmr_init +*********/ + +/****f* OpenSM: MCMember Receiver/osm_mcmr_rcv_init +* NAME +* osm_mcmr_init +* +* DESCRIPTION +* The osm_mcmr_init function initializes a +* MCMember Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_mcmr_rcv_init( + IN osm_sm_t * const p_sm, + IN osm_mcmr_recv_t* const p_ctrl, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_sm +* [in] pointer to osm_sm_t object +* p_ctrl +* [in] Pointer to an osm_mcmr_recv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the MCMember Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other MCMember Receiver methods. +* +* SEE ALSO +* MCMember Receiver object, osm_mcmr_rcv_construct, +* osm_mcmr_rcv_destroy +*********/ + +/****f* OpenSM: MCMember Receiver/osm_mcmr_rcv_process +* NAME +* osm_mcmr_rcv_process +* +* DESCRIPTION +* Process the MCMemberRecord attribute. +* +* SYNOPSIS +*/ +void osm_mcmr_rcv_process( + IN osm_mcmr_recv_t* const p_ctrl, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mcmr_recv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's MCMemberRecord attribute. +* +* RETURN VALUES +* CL_SUCCESS if the MCMemberRecord processing was successful. +* +* NOTES +* This function processes a MCMemberRecord attribute. +* +* SEE ALSO +* MCMember Receiver, MCMember Response Controller +*********/ + + + +/****f* OpenSM: MC Member Record Receiver/osm_mcmr_rcv_create_new_mgrp +* NAME +* osm_mcmr_rcv_create_new_mgrp +* +* DESCRIPTION +* Create new Multicast group +* +* SYNOPSIS +*/ + +ib_api_status_t +osm_mcmr_rcv_create_new_mgrp( + IN osm_mcmr_recv_t* const p_mcmr, + IN uint64_t comp_mask, + IN const ib_member_rec_t* const p_recvd_mcmember_rec, + IN const osm_physp_t* const p_req_physp, + OUT osm_mgrp_t **pp_mgrp); +/* +* PARAMETERS +* p_mcmr +* [in] Pointer to an osm_mcmr_recv_t object. +* p_recvd_mcmember_rec +* [in] Received Multicast member record +* +* p_req_physp +* [in] The requesting osm_physp_t object. +* NULL if the creation is without a requesting port (e.g - ipoib known mcgroups) +* +* pp_mgrp +* [out] pointer the osm_mgrp_t object +* +* RETURN VALUES +* IB_SUCCESS, IB_ERROR +* +* NOTES +* +* +* SEE ALSO +* +*********/ + +/****f* OpenSM: MC Member Record Receiver/osm_mcmr_rcv_find_or_create_new_mgrp +* NAME +* osm_mcmr_rcv_find_or_create_new_mgrp +* +* DESCRIPTION +* Create new Multicast group +* +* SYNOPSIS +*/ + +ib_api_status_t +osm_mcmr_rcv_find_or_create_new_mgrp( + IN osm_mcmr_recv_t* const p_mcmr, + IN uint64_t comp_mask, + IN ib_member_rec_t* const p_recvd_mcmember_rec, + OUT osm_mgrp_t **pp_mgrp); +/* +* PARAMETERS +* p_mcmr +* [in] Pointer to an osm_mcmr_recv_t object. +* p_recvd_mcmember_rec +* [in] Received Multicast member record +* +* pp_mgrp +* [out] pointer the osm_mgrp_t object +* +* RETURN VALUES +* IB_SUCCESS, IB_ERROR +* +* NOTES +* +* +* SEE ALSO +* +*********/ + +#define JOIN_MC_COMP_MASK (IB_MCR_COMPMASK_MGID | \ + IB_MCR_COMPMASK_PORT_GID | \ + IB_MCR_COMPMASK_JOIN_STATE) + +#define REQUIRED_MC_CREATE_COMP_MASK (IB_MCR_COMPMASK_MGID | \ + IB_MCR_COMPMASK_PORT_GID | \ + IB_MCR_COMPMASK_JOIN_STATE | \ + IB_MCR_COMPMASK_QKEY | \ + IB_MCR_COMPMASK_TCLASS | \ + IB_MCR_COMPMASK_PKEY | \ + IB_MCR_COMPMASK_FLOW | \ + IB_MCR_COMPMASK_SL) + +/****d* OpenSM: MC Member Record Receiver/OSM_DEFAULT_MGRP_MTU +* Name +* OSM_DEFAULT_MGRP_MTU +* +* DESCRIPTION +* Default MTU used for new MGRP creation (2048 bytes) +* Note it includes the MTUSelector which is set to "Greater Than" +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_MGRP_MTU 0x04 +/***********/ + +/****d* OpenSM: MC Member Record Receiver/OSM_DEFAULT_MGRP_RATE +* Name +* OSM_DEFAULT_MGRP_RATE +* +* DESCRIPTION +* Default RATE used for new MGRP creation (10Gb/sec) +* Note it includes the RateSelector which is set to "Greater Than" +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_MGRP_RATE 0x03 +/***********/ + +/* Scope component definitions from IBA 1.2 (Table 3 p. 146) */ +#define MC_SCOPE_LINK_LOCAL 0x2 +#define MC_SCOPE_SITE_LOCAL 0x5 +#define MC_SCOPE_ORG_LOCAL 0x8 +#define MC_SCOPE_GLOBAL 0xE + +/****d* OpenSM: MC Member Record Receiver/OSM_DEFAULT_MGRP_SCOPE +* Name +* OSM_DEFAULT_MGRP_SCOPE +* +* DESCRIPTION +* Default SCOPE used for new MGRP creation (link local) +* +* SYNOPSIS +*/ +#define OSM_DEFAULT_MGRP_SCOPE MC_SCOPE_LINK_LOCAL +/***********/ + +/* JoinState definitions from IBA 1.2 */ +#define MC_FULL_MEMBER 0x1 +#define MC_NON_MEMBER 0x2 +#define MC_SENDONLY_NON_MEMBER 0x4 + +END_C_DECLS + +#endif /* _OSM_MCMR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mcmember_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mcmember_record_ctrl.h new file mode 100644 index 00000000..22c5606b --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mcmember_record_ctrl.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mcmr_rcv_ctrl_t. + * This object represents a controller that receives the IBA MCMemberRecord + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + + +#ifndef _OSM_MCMRCTRL_H +#define _OSM_MCMRCTRL_H + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/MCMember Receive Controller +* NAME +* MCMember Receive Controller +* +* DESCRIPTION +* The MCMember Receive Controller object encapsulates +* the information needed to receive the MCMemberRecord attribute from a node. +* +* The MCMember Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ + +/****s* OpenSM: MCMember Receive Controller/osm_mcmr_rcv_ctrl_t +* NAME +* osm_mcmr_rcv_ctrl_t +* +* DESCRIPTION +* MCMember Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mcmr_rcv_ctrl +{ + osm_mcmr_recv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_mcmr_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the MCMember Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* MCMember Receive Controller object +* MCMember Receiver object +*********/ + +/****f* OpenSM: MCMember Receive Controller/osm_mcmr_rcv_ctrl_construct +* NAME +* osm_mcmr_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a MCMember Receive Controller object. +* +* SYNOPSIS +*/ +void osm_mcmr_rcv_ctrl_construct( + IN osm_mcmr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a MCMember Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mcmr_rcv_ctrl_init, osm_mcmr_rcv_ctrl_destroy, +* and osm_mcmr_ctrl_is_inited. +* +* Calling osm_mcmr_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_mcmr_rcv_ctrl_init. +* +* SEE ALSO +* MCMember Receive Controller object, osm_mcmr_rcv_ctrl_init, +* osm_mcmr_rcv_ctrl_destroy, osm_mcmr_ctrl_is_inited +*********/ + +/****f* OpenSM: MCMember Receive Controller/osm_mcmr_rcv_ctrl_destroy +* NAME +* osm_mcmr_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_mcmr_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_mcmr_rcv_ctrl_destroy( + IN osm_mcmr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* MCMember Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mcmr_rcv_ctrl_construct or osm_mcmr_rcv_ctrl_init. +* +* SEE ALSO +* MCMember Receive Controller object, osm_mcmr_rcv_ctrl_construct, +* osm_mcmr_rcv_ctrl_init +*********/ + +/****f* OpenSM: MCMember Receive Controller/osm_mcmr_rcv_ctrl_init +* NAME +* osm_mcmr_rcv_ctrl_init +* +* DESCRIPTION +* The osm_mcmr_rcv_ctrl_init function initializes a +* MCMember Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_mcmr_rcv_ctrl_init( + IN osm_mcmr_rcv_ctrl_t* const p_ctrl, + IN osm_mcmr_recv_t* const p_mcmr, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mcmr_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_mcmr_recv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the MCMember Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other MCMember Receive Controller methods. +* +* SEE ALSO +* MCMember Receive Controller object, osm_mcmr_rcv_ctrl_construct, +* osm_mcmr_rcv_ctrl_destroy, osm_mcmr_ctrl_is_inited +*********/ + +/****f* OpenSM: MCMember Receive Controller/osm_mcmr_ctrl_is_inited +* NAME +* osm_mcmr_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_mcmr_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_mcmr_ctrl_is_inited( + IN const osm_mcmr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mcmr_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_mcmr_rcv_ctrl_construct or osm_mcmr_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* MCMember Receive Controller object, osm_mcmr_rcv_ctrl_construct, +* osm_mcmr_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_MCMRCTRL_H */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mft_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mft_record.h new file mode 100644 index 00000000..a4e42483 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mft_record.h @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mftr_rcv_t. + * This object represents the MulticastForwardingTable Receiver object. + * attribute from a switch node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#ifndef _OSM_MFTR_H_ +#define _OSM_MFTR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Multicast Forwarding Table Receiver +* NAME +* Multicast Forwarding Table Receiver +* +* DESCRIPTION +* The Multicast Forwarding Table Receiver object encapsulates the information +* needed to receive the MulticastForwardingTable attribute from a switch node. +* +* The Multicast Forwarding Table Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Hal Rosenstock, Voltaire +* +*********/ + +/****s* OpenSM: Multicast Forwarding Table Receiver/osm_mftr_rcv_t +* NAME +* osm_mftr_rcv_t +* +* DESCRIPTION +* Multicast Forwarding Table Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mft +{ + osm_subn_t* p_subn; + osm_stats_t* p_stats; + osm_sa_resp_t* p_resp; + osm_mad_pool_t* p_mad_pool; + osm_log_t* p_log; + cl_plock_t* p_lock; + cl_qlock_pool_t pool; +} osm_mftr_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_stats +* Pointer to the statistics. +* +* p_resp +* Pointer to the SA responder. +* +* p_mad_pool +* Pointer to the mad pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pool +* Pool of linkable Multicast Forwarding Table Record objects used to +* generate the query response. +* +* SEE ALSO +* Multicast Forwarding Table Receiver object +*********/ + +/****f* OpenSM: Multicast Forwarding Table Receiver/osm_mftr_rcv_construct +* NAME +* osm_mftr_rcv_construct +* +* DESCRIPTION +* This function constructs a Multicast Forwarding Table Receiver object. +* +* SYNOPSIS +*/ +void osm_mftr_rcv_construct( + IN osm_mftr_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Multicast Forwarding Table Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mftr_rcv_init, osm_mftr_rcv_destroy +* +* Calling osm_mftr_rcv_construct is a prerequisite to calling any other +* method except osm_mftr_rcv_init. +* +* SEE ALSO +* Multicast Forwarding Table Receiver object, osm_mftr_rcv_init, +* osm_mftr_rcv_destroy +*********/ + +/****f* OpenSM: Multicast Forwarding Table Receiver/osm_mftr_rcv_destroy +* NAME +* osm_mftr_rcv_destroy +* +* DESCRIPTION +* The osm_mftr_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_mftr_rcv_destroy( + IN osm_mftr_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Multicast Forwarding Table Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mftr_rcv_construct or osm_mftr_rcv_init. +* +* SEE ALSO +* Multicast Forwarding Table Receiver object, osm_mftr_rcv_construct, +* osm_mftr_rcv_init +*********/ + +/****f* OpenSM: Multicast Forwarding Table Receiver/osm_mftr_rcv_init +* NAME +* osm_mftr_rcv_init +* +* DESCRIPTION +* The osm_mftr_rcv_init function initializes a +* Multicast Forwarding Table Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_mftr_rcv_init( + IN osm_mftr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_mftr_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the Multicast Forwarding Table Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Multicast Forwarding Table Receiver methods. +* +* SEE ALSO +* Multicast Forwarding Table Receiver object, osm_mftr_rcv_construct, +* osm_mftr_rcv_destroy +*********/ + +/****f* OpenSM: Multicast Forwarding Table Receiver/osm_mftr_rcv_process +* NAME +* osm_mftr_rcv_process +* +* DESCRIPTION +* Process the MulticastForwardingTable attribute. +* +* SYNOPSIS +*/ +void osm_mftr_rcv_process( + IN osm_mftr_rcv_t* const p_ctrl, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mftr_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the switch node's MulticastForwardingTable attribute. +* +* RETURN VALUES +* CL_SUCCESS if the MulticastForwardingTable processing was successful. +* +* NOTES +* This function processes a MulticastForwardingTable attribute. +* +* SEE ALSO +* Multicast Forwarding Table Receiver, Multicast Forwarding Table Response +* Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_MFTR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mft_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mft_record_ctrl.h new file mode 100644 index 00000000..8f815ff5 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_mft_record_ctrl.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mftr_rcv_ctrl_t. + * This object represents a controller that receives the IBA + * MulticastForwardingTable attribute from a switch. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#ifndef _OSM_MFTR_RCV_CTRL_H_ +#define _OSM_MFTR_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Multicast Forwarding Table Receive Controller +* NAME +* Multicast Forwarding Table Record Receive Controller +* +* DESCRIPTION +* The Multicast Forwarding Table Receive Controller object encapsulates +* the information needed to receive the MulticastFowardingTable attribute +* from a switch node. +* +* The Multicast Forwarding Table Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Hal Rosenstock, Voltaire +* +*********/ + +/****s* OpenSM: Multicast Forwarding Table Receive Controller/osm_mftr_rcv_ctrl_t +* NAME +* osm_mftr_rcv_ctrl_t +* +* DESCRIPTION +* Multicast Forwarding Table Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mftr_rcv_ctrl +{ + osm_mftr_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; +} osm_mftr_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Multicast Forwarding Table Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Multicast Forwarding Table Receive Controller object +* Multicast Forwarding Table Receiver object +*********/ + +/****f* OpenSM: Multicast Forwarding Table Receive Controller/osm_mftr_rcv_ctrl_construct +* NAME +* osm_mftr_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Multicast Forwarding Table Receive +* Controller object. +* +* SYNOPSIS +*/ +void osm_mftr_rcv_ctrl_construct( + IN osm_mftr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Multicast Forwarding Table Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mftr_rcv_ctrl_init, osm_mftr_rcv_ctrl_destroy +* +* Calling osm_mftr_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_mftr_rcv_ctrl_init. +* +* SEE ALSO +* Multicast Forwarding Table Receive Controller object, osm_mftr_rcv_ctrl_init, +* osm_mftr_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: Multicast Forwarding Table Receive Controller/osm_mftr_rcv_ctrl_destroy +* NAME +* osm_mftr_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_mftr_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_mftr_rcv_ctrl_destroy( + IN osm_mftr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Multicast Forwarding Table Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mftr_rcv_ctrl_construct or osm_mftr_rcv_ctrl_init. +* +* SEE ALSO +* Multicast Forwarding Table Receive Controller object, osm_mftr_rcv_ctrl_construct, +* osm_mftr_rcv_ctrl_init +*********/ + +/****f* OpenSM: Multicast Forwarding Table Receive Controller/osm_mftr_rcv_ctrl_init +* NAME +* osm_mftr_rcv_ctrl_init +* +* DESCRIPTION +* The osm_mftr_rcv_ctrl_init function initializes a +* Multicast Forwarding Table Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_mftr_rcv_ctrl_init( + IN osm_mftr_rcv_ctrl_t* const p_ctrl, + IN osm_mftr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mftr_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_mftr_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Multicast Forwarding Table Receive Controller object +* was initialized successfully. +* +* NOTES +* Allows calling other Multicast Forwarding Table Receive Controller methods. +* +* SEE ALSO +* Multicast Forwarding Table Receive Controller object, +* osm_mftr_rcv_ctrl_construct, osm_mftr_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_MFTR_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_multipath_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_multipath_record.h new file mode 100644 index 00000000..b55cfb09 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_multipath_record.h @@ -0,0 +1,273 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mpr_rcv_t. + * This object represents the MultiPathRecord Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#ifndef _OSM_MPR_RCV_H_ +#define _OSM_MPR_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/MultiPath Record Receiver +* NAME +* MultiPath Record Receiver +* +* DESCRIPTION +* The MultiPath Record Receiver object encapsulates the information +* needed to receive the PathRecord request from a node. +* +* The MultiPath Record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Hal Rosenstock, Voltaire +* +*********/ + +/****s* OpenSM: MultiPath Record Receiver/osm_mpr_rcv_t +* NAME +* osm_mpr_rcv_t +* +* DESCRIPTION +* MultiPath Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mpr_rcv +{ + osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t pr_pool; +} osm_mpr_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_gen_req_ctrl +* Pointer to the generic request controller. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pr_pool +* Pool of multipath record objects used to generate query responses. +* +* SEE ALSO +* MultiPath Record Receiver object +*********/ + +/****f* OpenSM: MultiPath Record Receiver/osm_mpr_rcv_construct +* NAME +* osm_mpr_rcv_construct +* +* DESCRIPTION +* This function constructs a MultiPath Record Receiver object. +* +* SYNOPSIS +*/ +void +osm_mpr_rcv_construct( + IN osm_mpr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a MultiPath Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mpr_rcv_init, osm_mpr_rcv_destroy +* +* Calling osm_mpr_rcv_construct is a prerequisite to calling any other +* method except osm_mpr_rcv_init. +* +* SEE ALSO +* MultiPath Record Receiver object, osm_mpr_rcv_init, osm_mpr_rcv_destroy +*********/ + +/****f* OpenSM: MultiPath Record Receiver/osm_mpr_rcv_destroy +* NAME +* osm_mpr_rcv_destroy +* +* DESCRIPTION +* The osm_mpr_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_mpr_rcv_destroy( + IN osm_mpr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* MultiPath Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mpr_rcv_construct or osm_mpr_rcv_init. +* +* SEE ALSO +* MultiPath Record Receiver object, osm_mpr_rcv_construct, +* osm_mpr_rcv_init +*********/ + +/****f* OpenSM: MultiPath Record Receiver/osm_mpr_rcv_init +* NAME +* osm_mpr_rcv_init +* +* DESCRIPTION +* The osm_mpr_rcv_init function initializes a +* MultiPath Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_mpr_rcv_init( + IN osm_mpr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_mpr_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the MultiPath Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other MultiPath Record Receiver methods. +* +* SEE ALSO +* MultiPath Record Receiver object, osm_mpr_rcv_construct, +* osm_mpr_rcv_destroy +*********/ + +/****f* OpenSM: MultiPath Record Receiver/osm_mpr_rcv_process +* NAME +* osm_mpr_rcv_process +* +* DESCRIPTION +* Process the MultiPathRecord request. +* +* SYNOPSIS +*/ +void +osm_mpr_rcv_process( + IN osm_mpr_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_mpr_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's MultiPathRecord attribute. +* +* RETURN VALUES +* IB_SUCCESS if the MultiPathRecord processing was successful. +* +* NOTES +* This function processes a MultiPathRecord attribute. +* +* SEE ALSO +* MultiPath Record Receiver, Node Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_MPR_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_multipath_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_multipath_record_ctrl.h new file mode 100644 index 00000000..090dc9e8 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_multipath_record_ctrl.h @@ -0,0 +1,260 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mpr_rcv_ctrl_t. + * This object represents a controller that receives the IBA + * MultiPathRecord attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#ifndef _OSM_MPRCTRL_H_ +#define _OSM_MPRCTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/MultiPath Record Receive Controller +* NAME +* MultiPath Record Receive Controller +* +* DESCRIPTION +* The MultiPath Record Receive Controller object encapsulates +* the information needed to receive the MultiPathRecord attribute from a node. +* +* The MultiPath record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Hal Rosenstock, Voltaire +* +*********/ + +/****s* OpenSM: MultiPath Record Receive Controller/osm_mpr_rcv_ctrl_t +* NAME +* osm_mpr_rcv_ctrl_t +* +* DESCRIPTION +* MultiPath Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_mpr_rcv_ctrl +{ + osm_mpr_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_mpr_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the MultiPath Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* MultiPath Record Receive Controller object +* MultiPath Record Receiver object +*********/ + +/****f* OpenSM: MultiPath Record Receive Controller/osm_pr_rcv_ctrl_construct +* NAME +* osm_mpr_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a MultiPath Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_mpr_rcv_ctrl_construct( + IN osm_mpr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a MultiPath Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_mpr_rcv_ctrl_init, osm_mpr_rcv_ctrl_destroy, +* and osm_mpr_rcv_ctrl_is_inited. +* +* Calling osm_mpr_rcv_ctrl_construct is a prerequisite to calling any +* other method except osm_mpr_rcv_ctrl_init. +* +* SEE ALSO +* MultiPath Record Receive Controller object, osm_mpr_rcv_ctrl_init, +* osm_mpr_rcv_ctrl_destroy, osm_mpr_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: MultiPath Record Receive Controller/osm_mpr_rcv_ctrl_destroy +* NAME +* osm_mpr_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_mpr_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_mpr_rcv_ctrl_destroy( + IN osm_mpr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* MultiPath Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_mpr_rcv_ctrl_construct or osm_mpr_rcv_ctrl_init. +* +* SEE ALSO +* MultiPath Record Receive Controller object, osm_mpr_rcv_ctrl_construct, +* osm_mpr_rcv_ctrl_init +*********/ + +/****f* OpenSM: MultiPath Record Receive Controller/osm_mpr_rcv_ctrl_init +* NAME +* osm_mpr_rcv_ctrl_init +* +* DESCRIPTION +* The osm_mpr_rcv_ctrl_init function initializes a +* MultiPath Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_mpr_rcv_ctrl_init( + IN osm_mpr_rcv_ctrl_t* const p_ctrl, + IN osm_mpr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mpr_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_mpr_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the MultiPath Record Receive Controller object was +* initialized successfully. +* +* NOTES +* Allows calling other MultiPath Record Receive Controller methods. +* +* SEE ALSO +* MultiPath Record Receive Controller object, osm_pr_rcv_ctrl_construct, +* osm_mpr_rcv_ctrl_destroy, osm_mpr_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: MultiPath Record Receive Controller/osm_mpr_rcv_ctrl_is_inited +* NAME +* osm_mpr_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_mpr_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_mpr_rcv_ctrl_is_inited( + IN const osm_mpr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_mpr_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_mpr_rcv_ctrl_construct or osm_mpr_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* MultiPath Record Receive Controller object, osm_mpr_rcv_ctrl_construct, +* osm_mpr_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_MPRCTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_node_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_node_record.h new file mode 100644 index 00000000..6af9e2bc --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_node_record.h @@ -0,0 +1,275 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_nr_rcv_t. + * This object represents the NodeRecord Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_NR_H_ +#define _OSM_NR_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Node Record Receiver +* NAME +* Node Record Receiver +* +* DESCRIPTION +* The Node Record Receiver object encapsulates the information +* needed to receive the NodeRecord attribute from a node. +* +* The Node record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Anil S Keshavamurthy, Intel +* +*********/ + +/****s* OpenSM: Node Record Receiver/osm_nr_rcv_t +* NAME +* osm_nr_rcv_t +* +* DESCRIPTION +* Node Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_nr_recv +{ + const osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t pool; + +} osm_nr_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_resp +* Pointer to the SA responder. +* +* p_mad_pool +* Pointer to the mad pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pool +* Pool of linkable node record objects used to generate +* the query response. +* +* SEE ALSO +* +*********/ + +/****f* OpenSM: Node Record Receiver/osm_nr_rcv_construct +* NAME +* osm_nr_rcv_construct +* +* DESCRIPTION +* This function constructs a Node Record Receiver object. +* +* SYNOPSIS +*/ +void osm_nr_rcv_construct( + IN osm_nr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a Node Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_nr_rcv_init, osm_nr_rcv_destroy +* +* Calling osm_nr_rcv_construct is a prerequisite to calling any other +* method except osm_nr_rcv_init. +* +* SEE ALSO +* Node Record Receiver object, osm_nr_rcv_init, osm_lr_rcv_destroy +*********/ + +/****f* OpenSM: Node Record Receiver/osm_nr_rcv_destroy +* NAME +* osm_nr_rcv_destroy +* +* DESCRIPTION +* The osm_nr_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_nr_rcv_destroy( + IN osm_nr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Node Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_nr_rcv_construct or osm_nr_rcv_init. +* +* SEE ALSO +* Node Record Receiver object, osm_nr_rcv_construct, +* osm_nr_rcv_init +*********/ + +/****f* OpenSM: Node Record Receiver/osm_nr_rcv_init +* NAME +* osm_nr_rcv_init +* +* DESCRIPTION +* The osm_nr_rcv_init function initializes a +* Node Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_nr_rcv_init( + IN osm_nr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_nr_rcv_t object to initialize. +* +* p_resp +* [in] Pointer to the SA Responder object. +* +* p_mad_pool +* [in] Pointer to the mad pool. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Node Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Link Record Receiver methods. +* +* SEE ALSO +* Node Record Receiver object, osm_nr_rcv_construct, osm_nr_rcv_destroy +*********/ + + +/****f* OpenSM: Node Record Receiver/osm_nr_rcv_process +* NAME +* osm_nr_rcv_process +* +* DESCRIPTION +* Process the NodeRecord attribute. +* +* SYNOPSIS +*/ +void osm_nr_rcv_process( + IN osm_nr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_nr_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's NodeRecord attribute. +* +* NOTES +* This function processes a NodeRecord attribute. +* +* SEE ALSO +* Node Record Receiver, Node Record Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_NR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_node_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_node_record_ctrl.h new file mode 100644 index 00000000..681e20ff --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_node_record_ctrl.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_nr_rcv_ctrl_t. + * This object represents a controller that receives the IBA NodeInfo + * record from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_NR_CTRL_H_ +#define _OSM_NR_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Node Record Receive Controller +* NAME +* Node Record Receive Controller +* +* DESCRIPTION +* The Node Record Receive Controller object encapsulates +* the information needed to receive the NodeInfo attribute from a node. +* +* The Node Record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Anil S Keshavamurthy, Intel +* +*********/ + +/****s* OpenSM: Node Record Receive Controller/osm_nr_rcv_ctrl_t +* NAME +* osm_nr_rcv_ctrl_t +* +* DESCRIPTION +* Node Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_nr_ctrl +{ + osm_nr_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_nr_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Node Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Node Record Receive Controller object +* Node Record Receiver object +*********/ + +/****f* OpenSM: Node Record Receive Controller/osm_nr_rcv_ctrl_construct +* NAME +* osm_nr_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Node Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_nr_rcv_ctrl_construct( + IN osm_nr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Node Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_nr_rcv_ctrl_init, osm_nr_rcv_ctrl_destroy, +* +* Calling osm_nr_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_nr_rcv_ctrl_init. +* +* SEE ALSO +* Node Record Receive Controller object, osm_nr_rcv_ctrl_init, +* osm_nr_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: Node Record Receive Controller/osm_nr_rcv_ctrl_destroy +* NAME +* osm_nr_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_nr_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_nr_rcv_ctrl_destroy( + IN osm_nr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Node Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_nr_rcv_ctrl_construct or osm_nr_rcv_ctrl_init. +* +* SEE ALSO +* Node Record Receive Controller object, osm_nr_rcv_ctrl_construct, +* osm_nr_rcv_ctrl_init +*********/ + +/****f* OpenSM: Node Record Receive Controller/osm_nr_rcv_ctrl_init +* NAME +* osm_nr_rcv_ctrl_init +* +* DESCRIPTION +* The osm_nr_rcv_ctrl_init function initializes a +* Node Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_nr_rcv_ctrl_init( + IN osm_nr_rcv_ctrl_t* const p_ctrl, + IN osm_nr_rcv_t* const p_nr, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_nr_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_nr_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Node Record Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Node Record Receive Controller methods. +* +* SEE ALSO +* Node Record Receive Controller object, osm_nr_rcv_ctrl_construct, +* osm_nr_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_NR_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_path_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_path_record.h new file mode 100644 index 00000000..30a6289e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_path_record.h @@ -0,0 +1,274 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_pr_rcv_t. + * This object represents the PathRecord Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PR_H_ +#define _OSM_PR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Path Record Receiver +* NAME +* Path Record Receiver +* +* DESCRIPTION +* The Path Record Receiver object encapsulates the information +* needed to receive the PathRecord request from a node. +* +* The Path Record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Path Record Receiver/osm_pr_rcv_t +* NAME +* osm_pr_rcv_t +* +* DESCRIPTION +* Path Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pr_rcv +{ + osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t pr_pool; +} osm_pr_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_gen_req_ctrl +* Pointer to the generic request controller. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pr_pool +* Pool of path record objects used to generate query responses. +* +* SEE ALSO +* Path Record Receiver object +*********/ + +/****f* OpenSM: Path Record Receiver/osm_pr_rcv_construct +* NAME +* osm_pr_rcv_construct +* +* DESCRIPTION +* This function constructs a Path Record Receiver object. +* +* SYNOPSIS +*/ +void +osm_pr_rcv_construct( + IN osm_pr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a Path Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pr_rcv_init, osm_pr_rcv_destroy +* +* Calling osm_pr_rcv_construct is a prerequisite to calling any other +* method except osm_pr_rcv_init. +* +* SEE ALSO +* Path Record Receiver object, osm_pr_rcv_init, osm_pr_rcv_destroy +*********/ + +/****f* OpenSM: Path Record Receiver/osm_pr_rcv_destroy +* NAME +* osm_pr_rcv_destroy +* +* DESCRIPTION +* The osm_pr_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_pr_rcv_destroy( + IN osm_pr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Path Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pr_rcv_construct or osm_pr_rcv_init. +* +* SEE ALSO +* Path Record Receiver object, osm_pr_rcv_construct, +* osm_pr_rcv_init +*********/ + +/****f* OpenSM: Path Record Receiver/osm_pr_rcv_init +* NAME +* osm_pr_rcv_init +* +* DESCRIPTION +* The osm_pr_rcv_init function initializes a +* Path Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_pr_rcv_init( + IN osm_pr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_pr_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Path Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Path Record Receiver methods. +* +* SEE ALSO +* Path Record Receiver object, osm_pr_rcv_construct, +* osm_pr_rcv_destroy +*********/ + +/****f* OpenSM: Path Record Receiver/osm_pr_rcv_process +* NAME +* osm_pr_rcv_process +* +* DESCRIPTION +* Process the PathRecord request. +* +* SYNOPSIS +*/ +void +osm_pr_rcv_process( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_pr_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's PathRecord attribute. +* +* RETURN VALUES +* IB_SUCCESS if the PathRecord processing was successful. +* +* NOTES +* This function processes a PathRecord attribute. +* +* SEE ALSO +* Path Record Receiver, Path Record Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_PR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_path_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_path_record_ctrl.h new file mode 100644 index 00000000..1b7f9052 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_path_record_ctrl.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_pr_rcv_ctrl_t. + * This object represents a controller that receives the IBA PathRecord + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PRCTRL_H_ +#define _OSM_PRCTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Path Record Receive Controller +* NAME +* Path Record Receive Controller +* +* DESCRIPTION +* The Path Record Receive Controller object encapsulates +* the information needed to receive the PathRecord attribute from a node. +* +* The Path record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ + +/****s* OpenSM: Path Record Receive Controller/osm_pr_rcv_ctrl_t +* NAME +* osm_pr_rcv_ctrl_t +* +* DESCRIPTION +* Path Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pr_rcv_ctrl +{ + osm_pr_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_pr_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Path Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Path Record Receive Controller object +* Path Record Receiver object +*********/ + +/****f* OpenSM: Path Record Receive Controller/osm_pr_rcv_ctrl_construct +* NAME +* osm_pr_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Path Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_pr_rcv_ctrl_construct( + IN osm_pr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Path Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pr_rcv_ctrl_init, osm_pr_rcv_ctrl_destroy, +* and osm_pr_rcv_ctrl_is_inited. +* +* Calling osm_pr_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_pr_rcv_ctrl_init. +* +* SEE ALSO +* Path Record Receive Controller object, osm_pr_rcv_ctrl_init, +* osm_pr_rcv_ctrl_destroy, osm_pr_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Path Record Receive Controller/osm_pr_rcv_ctrl_destroy +* NAME +* osm_pr_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_pr_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_pr_rcv_ctrl_destroy( + IN osm_pr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Path Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pr_rcv_ctrl_construct or osm_pr_rcv_ctrl_init. +* +* SEE ALSO +* Path Record Receive Controller object, osm_pr_rcv_ctrl_construct, +* osm_pr_rcv_ctrl_init +*********/ + +/****f* OpenSM: Path Record Receive Controller/osm_pr_rcv_ctrl_init +* NAME +* osm_pr_rcv_ctrl_init +* +* DESCRIPTION +* The osm_pr_rcv_ctrl_init function initializes a +* Path Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pr_rcv_ctrl_init( + IN osm_pr_rcv_ctrl_t* const p_ctrl, + IN osm_pr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pr_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_pr_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Path Record Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Path Record Receive Controller methods. +* +* SEE ALSO +* Path Record Receive Controller object, osm_pr_rcv_ctrl_construct, +* osm_pr_rcv_ctrl_destroy, osm_pr_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Path Record Receive Controller/osm_pr_rcv_ctrl_is_inited +* NAME +* osm_pr_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_pr_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_pr_rcv_ctrl_is_inited( + IN const osm_pr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pr_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_pr_rcv_ctrl_construct or osm_pr_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Path Record Receive Controller object, osm_pr_rcv_ctrl_construct, +* osm_pr_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_PRCTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_pkey_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_pkey_record.h new file mode 100644 index 00000000..a3c016bc --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_pkey_record.h @@ -0,0 +1,268 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_PKEY_REC_RCV_H_ +#define _OSM_PKEY_REC_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/P_Key Record Receiver +* NAME +* P_Key Record Receiver +* +* DESCRIPTION +* The P_Key Record Receiver object encapsulates the information +* needed to handle P_Key Record query from a SA. +* +* The P_Key Record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Yael Kalka, Mellanox +* +*********/ + +/****s* OpenSM: P_Key Record Receiver/osm_pkey_rec_rcv_t +* NAME +* osm_pkey_rec_rcv_t +* +* DESCRIPTION +* P_Key Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pkey_rec_rcv +{ + const osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t pool; + +} osm_pkey_rec_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_resp +* Pointer to the SA responder. +* +* p_mad_pool +* Pointer to the mad pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pool +* Pool of linkable P_Key Record objects used to generate +* the query response. +* +* SEE ALSO +* +*********/ + +/****f* OpenSM: P_Key Record Receiver/osm_vlarb_rec_rcv_construct +* NAME +* osm_pkey_rec_rcv_construct +* +* DESCRIPTION +* This function constructs a P_Key Record Receiver object. +* +* SYNOPSIS +*/ +void +osm_pkey_rec_rcv_construct( + IN osm_pkey_rec_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a P_Key Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pkey_rec_rcv_init, osm_pkey_rec_rcv_destroy +* +* Calling osm_pkey_rec_rcv_construct is a prerequisite to calling any other +* method except osm_pkey_rec_rcv_init. +* +* SEE ALSO +* P_Key Record Receiver object, osm_pkey_rec_rcv_init, +* osm_pkey_rec_rcv_destroy +*********/ + +/****f* OpenSM: P_Key Record Receiver/osm_pkey_rec_rcv_destroy +* NAME +* osm_pkey_rec_rcv_destroy +* +* DESCRIPTION +* The osm_pkey_rec_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_pkey_rec_rcv_destroy( + IN osm_pkey_rec_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* P_Key Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pkey_rec_rcv_construct or osm_pkey_rec_rcv_init. +* +* SEE ALSO +* P_Key Record Receiver object, osm_pkey_rec_rcv_construct, +* osm_pkey_rec_rcv_init +*********/ + +/****f* OpenSM: P_Key Record Receiver/osm_pkey_rec_rcv_init +* NAME +* osm_pkey_rec_rcv_init +* +* DESCRIPTION +* The osm_pkey_rec_rcv_init function initializes a +* P_Key Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_pkey_rec_rcv_init( + IN osm_pkey_rec_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_pkey_rec_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the P_Key Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other P_Key Record Receiver methods. +* +* SEE ALSO +* P_Key Record Receiver object, osm_pkey_rec_rcv_construct, +* osm_pkey_rec_rcv_destroy +*********/ + +/****f* OpenSM: P_Key Record Receiver/osm_pkey_rec_rcv_process +* NAME +* osm_pkey_rec_rcv_process +* +* DESCRIPTION +* Process the P_Key Table Query . +* +* SYNOPSIS +*/ +void +osm_pkey_rec_rcv_process( + IN osm_pkey_rec_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_pkey_rec_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the P_Key Record Query attribute. +* +* RETURN VALUES +* CL_SUCCESS if the Query processing was successful. +* +* NOTES +* This function processes a SA P_Key Record attribute. +* +* SEE ALSO +* P_Key Record Receiver, P_Key Record Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_PKEY_REC_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_pkey_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_pkey_record_ctrl.h new file mode 100644 index 00000000..3c861b20 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_pkey_record_ctrl.h @@ -0,0 +1,218 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_PKEY_REC_CTRL_H_ +#define _OSM_PKEY_REC_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/P_Key Record Receive Controller +* NAME +* P_Key Record Receive Controller +* +* DESCRIPTION +* The P_Key Record Receive Controller object encapsulates +* the information needed to handle P_Key record query from SA client. +* +* The P_Key Record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Yael Kalka, Mellanox +* +*********/ + +/****s* OpenSM: P_Key Record Receive Controller/osm_pkey_rec_rcv_ctrl_t +* NAME +* osm_pkey_rec_rcv_ctrl_t +* +* DESCRIPTION +* P_Key Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pkey_rec_rcv_ctrl +{ + osm_pkey_rec_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_pkey_rec_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the P_Key Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* P_Key Record Receive Controller object +* P_Key Record Receiver object +*********/ + +/****f* OpenSM: P_Key Record Receive Controller/osm_pkey_rec_rcv_ctrl_construct +* NAME +* osm_pkey_rec_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a P_Key Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_pkey_rec_rcv_ctrl_construct( + IN osm_pkey_rec_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a P_Key Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pkey_rec_rcv_ctrl_init, osm_pkey_rec_rcv_ctrl_destroy +* +* Calling osm_pkey_rec_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_pkey_rec_rcv_ctrl_init. +* +* SEE ALSO +* P_Key Record Receive Controller object, osm_pkey_rec_rcv_ctrl_init, +* osm_pkey_rec_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: P_Key Record Receive Controller/osm_pkey_rec_rcv_ctrl_destroy +* NAME +* osm_pkey_rec_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_pkey_rec_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_pkey_rec_rcv_ctrl_destroy( + IN osm_pkey_rec_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* P_Key Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pkey_rec_rcv_ctrl_construct or osm_pkey_rec_rcv_ctrl_init. +* +* SEE ALSO +* P_Key Record Receive Controller object, osm_pkey_rec_rcv_ctrl_construct, +* osm_pkey_rec_rcv_ctrl_init +*********/ + +/****f* OpenSM: P_Key Record Receive Controller/osm_pkey_rec_rcv_ctrl_init +* NAME +* osm_pkey_rec_rcv_ctrl_init +* +* DESCRIPTION +* The osm_pkey_rec_rcv_ctrl_init function initializes a +* P_Key Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pkey_rec_rcv_ctrl_init( + IN osm_pkey_rec_rcv_ctrl_t* const p_ctrl, + IN osm_pkey_rec_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pkey_rec_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_pkey_rec_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the P_Key Record Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other P_Key Record Receive Controller methods. +* +* SEE ALSO +* P_Key Record Receive Controller object, osm_pkey_rec_rcv_ctrl_construct, +* osm_pkey_rec_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_PKEY_REC_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_portinfo_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_portinfo_record.h new file mode 100644 index 00000000..242d52eb --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_portinfo_record.h @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_pir_rcv_t. + * This object represents the PortInfo Record Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PIR_RCV_H_ +#define _OSM_PIR_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/PortInfo Record Receiver +* NAME +* PortInfo Record Receiver +* +* DESCRIPTION +* The PortInfo Record Receiver object encapsulates the information +* needed to receive the PortInfoRecord attribute from a node. +* +* The PortInfo Record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ + +/****s* OpenSM: PortInfo Record Receiver/osm_pir_rcv_t +* NAME +* osm_pir_rcv_t +* +* DESCRIPTION +* PortInfo Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pir_rcv +{ + osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t pool; +} osm_pir_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_resp +* Pointer to the SA responder. +* +* p_mad_pool +* Pointer to the mad pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pool +* Pool of linkable PortInfo Record objects used to generate +* the query response. +* +* SEE ALSO +* +*********/ + +/****f* OpenSM: PortInfo Record Receiver/osm_pir_rcv_construct +* NAME +* osm_pir_rcv_construct +* +* DESCRIPTION +* This function constructs a PortInfo Record Receiver object. +* +* SYNOPSIS +*/ +void +osm_pir_rcv_construct( + IN osm_pir_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a PortInfo Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pir_rcv_init, osm_pir_rcv_destroy +* +* Calling osm_pir_rcv_construct is a prerequisite to calling any other +* method except osm_pir_rcv_init. +* +* SEE ALSO +* PortInfo Record Receiver object, osm_pir_rcv_init, +* osm_pir_rcv_destroy +*********/ + +/****f* OpenSM: PortInfo Record Receiver/osm_pir_rcv_destroy +* NAME +* osm_pir_rcv_destroy +* +* DESCRIPTION +* The osm_pir_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_pir_rcv_destroy( + IN osm_pir_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* PortInfo Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pir_rcv_construct or osm_pir_rcv_init. +* +* SEE ALSO +* PortInfo Record Receiver object, osm_pir_rcv_construct, +* osm_pir_rcv_init +*********/ + +/****f* OpenSM: PortInfo Record Receiver/osm_pir_rcv_init +* NAME +* osm_pir_rcv_init +* +* DESCRIPTION +* The osm_pir_rcv_init function initializes a +* PortInfo Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_pir_rcv_init( + IN osm_pir_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_pir_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the PortInfo Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other PortInfo Record Receiver methods. +* +* SEE ALSO +* PortInfo Record Receiver object, osm_pir_rcv_construct, +* osm_pir_rcv_destroy +*********/ + +/****f* OpenSM: PortInfo Record Receiver/osm_pir_rcv_process +* NAME +* osm_pir_rcv_process +* +* DESCRIPTION +* Process the PortInfoRecord attribute. +* +* SYNOPSIS +*/ +void +osm_pir_rcv_process( + IN osm_pir_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_pir_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's PortInfoRecord attribute. +* +* RETURN VALUES +* CL_SUCCESS if the PortInfoRecord processing was successful. +* +* NOTES +* This function processes a PortInfoRecord attribute. +* +* SEE ALSO +* PortInfo Record Receiver, PortInfo Record Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_PIR_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_portinfo_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_portinfo_record_ctrl.h new file mode 100644 index 00000000..a8ee2b80 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_portinfo_record_ctrl.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_pir_rcv_ctrl_t. + * This object represents a controller that receives the IBA PortInfo + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_PIR_CTRL_H_ +#define _OSM_PIR_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/PortInfo Record Receive Controller +* NAME +* PortInfo Record Receive Controller +* +* DESCRIPTION +* The PortInfo Record Receive Controller object encapsulates +* the information needed to receive the PortInfo attribute from a node. +* +* The PortInfo Record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ + +/****s* OpenSM: PortInfo Record Receive Controller/osm_pir_rcv_ctrl_t +* NAME +* osm_pir_rcv_ctrl_t +* +* DESCRIPTION +* PortInfo Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_pir_rcv_ctrl +{ + osm_pir_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_pir_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the PortInfo Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* PortInfo Record Receive Controller object +* PortInfo Record Receiver object +*********/ + +/****f* OpenSM: PortInfo Record Receive Controller/osm_pir_rcv_ctrl_construct +* NAME +* osm_pir_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a PortInfo Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_pir_rcv_ctrl_construct( + IN osm_pir_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a PortInfo Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_pir_rcv_ctrl_init, osm_pir_rcv_ctrl_destroy +* +* Calling osm_pir_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_pir_rcv_ctrl_init. +* +* SEE ALSO +* PortInfo Record Receive Controller object, osm_pir_rcv_ctrl_init, +* osm_pir_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: PortInfo Record Receive Controller/osm_pir_rcv_ctrl_destroy +* NAME +* osm_pir_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_pir_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_pir_rcv_ctrl_destroy( + IN osm_pir_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* PortInfo Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_pir_rcv_ctrl_construct or osm_pir_rcv_ctrl_init. +* +* SEE ALSO +* PortInfo Record Receive Controller object, osm_pir_rcv_ctrl_construct, +* osm_pir_rcv_ctrl_init +*********/ + +/****f* OpenSM: PortInfo Record Receive Controller/osm_pir_rcv_ctrl_init +* NAME +* osm_pir_rcv_ctrl_init +* +* DESCRIPTION +* The osm_pir_rcv_ctrl_init function initializes a +* PortInfo Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_pir_rcv_ctrl_init( + IN osm_pir_rcv_ctrl_t* const p_ctrl, + IN osm_pir_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_pir_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_pir_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the PortInfo Record Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other PortInfo Record Receive Controller methods. +* +* SEE ALSO +* PortInfo Record Receive Controller object, osm_pir_rcv_ctrl_construct, +* osm_pir_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_PIR_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_response.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_response.h new file mode 100644 index 00000000..7d691609 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_response.h @@ -0,0 +1,255 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sa_resp_t. + * This object represents an object that responds to SA queries. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SA_RESP_H_ +#define _OSM_SA_RESP_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SA Response +* NAME +* SA Response +* +* DESCRIPTION +* The SA Response object encapsulates the information +* needed to respond to an SA query. +* +* The SA Response object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* Steve King, Intel +* +*********/ + +/****s* OpenSM: SA Response/osm_sa_resp_t +* NAME +* osm_sa_resp_t +* +* DESCRIPTION +* SA Response structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sa_resp +{ + osm_mad_pool_t *p_pool; + osm_log_t *p_log; +} osm_sa_resp_t; +/* +* FIELDS +* p_pool +* Pointer to the MAD pool. +* +* SEE ALSO +* SA Response object +*********/ + +/****f* OpenSM: SA Response/osm_sa_resp_construct +* NAME +* osm_sa_resp_construct +* +* DESCRIPTION +* This function constructs a SA Response object. +* +* SYNOPSIS +*/ +void +osm_sa_resp_construct( + IN osm_sa_resp_t* const p_resp ); +/* +* PARAMETERS +* p_resp +* [in] Pointer to a SA Response object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sa_resp_init, and osm_sa_resp_destroy. +* +* Calling osm_sa_resp_construct is a prerequisite to calling any other +* method except osm_sa_resp_init. +* +* SEE ALSO +* SA Response object, osm_sa_resp_init, +* osm_sa_resp_destroy +*********/ + +/****f* OpenSM: SA Response/osm_sa_resp_destroy +* NAME +* osm_sa_resp_destroy +* +* DESCRIPTION +* The osm_sa_resp_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_sa_resp_destroy( + IN osm_sa_resp_t* const p_resp ); +/* +* PARAMETERS +* p_resp +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SA Response object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sa_resp_construct or osm_sa_resp_init. +* +* SEE ALSO +* SA Response object, osm_sa_resp_construct, +* osm_sa_resp_init +*********/ + +/****f* OpenSM: SA Response/osm_sa_resp_init +* NAME +* osm_sa_resp_init +* +* DESCRIPTION +* The osm_sa_resp_init function initializes a +* SA Response object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sa_resp_init( + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_pool, + IN osm_log_t* const p_log ); +/* +* PARAMETERS +* p_resp +* [in] Pointer to an osm_sa_resp_t object to initialize. +* +* p_mad_pool +* [in] Pointer to the MAD pool. +* +* p_vl15 +* [in] Pointer to the VL15 interface. +* +* p_log +* [in] Pointer to the log object. +* +* RETURN VALUES +* IB_SUCCESS if the SA Response object was initialized +* successfully. +* +* NOTES +* Allows calling other SA Response methods. +* +* SEE ALSO +* SA Response object, osm_sa_resp_construct, +* osm_sa_resp_destroy +*********/ + +/****f* IBA Base: Types/osm_sa_send_error +* NAME +* osm_sa_send_error +* +* DESCRIPTION +* Sends a generic SA response with the specified error status. +* The payload is simply replicated from the request MAD. +* +* SYNOPSIS +*/ +void +osm_sa_send_error( + IN osm_sa_resp_t* const p_resp, + IN const osm_madw_t* const p_madw, + IN const ib_net16_t sa_status ); +/* +* PARAMETERS +* p_resp +* [in] Pointer to an osm_sa_resp_t object. +* +* p_madw +* [in] Original MAD to which the response must be sent. +* +* sa_status +* [in] Status to send in the response. +* +* RETURN VALUES +* None. +* +* NOTES +* Allows calling other SA Response methods. +* +* SEE ALSO +* SA Response object, osm_sa_resp_construct, +* osm_sa_resp_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_SA_RESP_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_service_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_service_record.h new file mode 100644 index 00000000..baa15fae --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_service_record.h @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sr_rcv_t. + * This object represents the ServiceRecord Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SR_H_ +#define _OSM_SR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Service Record Receiver +* NAME +* Service Record Receiver +* +* DESCRIPTION +* The Service Record Receiver object encapsulates the information +* needed to receive the ServiceRecord request from a node. +* +* The Service Record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Anil S Keshavamurthy +* +*********/ + +/****s* OpenSM: Service Record Receiver/osm_sr_rcv_t +* NAME +* osm_sr_rcv_t +* +* DESCRIPTION +* Service Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sr_rcv +{ + osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t sr_pool; + cl_timer_t sr_timer; + +} osm_sr_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_resp +* Pointer to the osm_sa_resp_t object. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* sr_pool +* Pool of Service Record objects used to generate query responses. +* +* SEE ALSO +* Service Record Receiver object +*********/ + +/****f* OpenSM: Service Record Receiver/osm_sr_rcv_construct +* NAME +* osm_sr_rcv_construct +* +* DESCRIPTION +* This function constructs a Service Record Receiver object. +* +* SYNOPSIS +*/ +void +osm_sr_rcv_construct( + IN osm_sr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a Service Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sr_rcv_init, osm_sr_rcv_destroy +* +* Calling osm_sr_rcv_construct is a prerequisite to calling any other +* method except osm_sr_rcv_init. +* +* SEE ALSO +* Service Record Receiver object, osm_sr_rcv_init, osm_sr_rcv_destroy +*********/ + +/****f* OpenSM: Service Record Receiver/osm_sr_rcv_destroy +* NAME +* osm_sr_rcv_destroy +* +* DESCRIPTION +* The osm_sr_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_sr_rcv_destroy( + IN osm_sr_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Service Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sr_rcv_construct or osm_sr_rcv_init. +* +* SEE ALSO +* Service Record Receiver object, osm_sr_rcv_construct, +* osm_sr_rcv_init +*********/ + +/****f* OpenSM: Service Record Receiver/osm_sr_rcv_init +* NAME +* osm_sr_rcv_init +* +* DESCRIPTION +* The osm_sr_rcv_init function initializes a +* Service Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sr_rcv_init( + IN osm_sr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_sr_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Service Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Service Record Receiver methods. +* +* SEE ALSO +* Service Record Receiver object, osm_sr_rcv_construct, +* osm_sr_rcv_destroy +*********/ + +/****f* OpenSM: Service Record Receiver/osm_sr_rcv_process +* NAME +* osm_sr_rcv_process +* +* DESCRIPTION +* Process the ServiceRecord request. +* +* SYNOPSIS +*/ +void +osm_sr_rcv_process( + IN osm_sr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_sr_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's ServiceRecord attribute. +* NOTES +* This function processes a ServiceRecord attribute. +* +* SEE ALSO +* Service Record Receiver +*********/ + +/****f* OpenSM: Service Record Receiver/osm_sr_rcv_lease_cb +* NAME +* osm_sr_rcv_lease_cb +* +* DESCRIPTION +* Timer Callback function which is executed to check the lease period +* expiration +* +* SYNOPSIS +*/ + +void +osm_sr_rcv_lease_cb( + IN void* context ); +/* +* PARAMETERS +* context +* [in] Pointer to osm_sa_db_t object. +* +* NOTES +* This function processes a ServiceRecord attribute. +* +* SEE ALSO +* Service Record Receiver +*********/ + +END_C_DECLS + +#endif /* _OSM_SR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_service_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_service_record_ctrl.h new file mode 100644 index 00000000..f784090e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_service_record_ctrl.h @@ -0,0 +1,230 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sr_rcv_ctrl_t. + * This object represents a controller that receives the IBA Service + * record attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SRCTRL_H_ +#define _OSM_SRCTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Service Record Receive Controller +* NAME +* Service Record Receive Controller +* +* DESCRIPTION +* The Service Record Receive Controller object encapsulates +* the information needed to receive the Service Record attribute from a node. +* +* The Service Record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Anil S Keshavamurthy, Intel +* +*********/ + +/****s* OpenSM: Service Record Receive Controller/osm_sr_rcv_ctrl_t +* NAME +* osm_sr_rcv_ctrl_t +* +* DESCRIPTION +* Service Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sr_rcv_ctrl +{ + osm_sr_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_sr_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Service Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Service Record Receiver object +*********/ + +/****f* OpenSM: Service Record Receive Controller/osm_sr_rcv_ctrl_construct +* NAME +* osm_sr_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Service Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_sr_rcv_ctrl_construct( + IN osm_sr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Service Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sr_rcv_ctrl_init, osm_sr_rcv_ctrl_destroy, +* +* Calling osm_sr_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_psr_rcv_ctrl_init. +* +* SEE ALSO +* Service Record Receive Controller object, osm_sr_rcv_ctrl_init, +* osm_sr_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: Service Record Receive Controller/osm_sr_rcv_ctrl_destroy +* NAME +* osm_sr_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_sr_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_sr_rcv_ctrl_destroy( + IN osm_sr_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Service Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sr_rcv_ctrl_construct or osm_sr_rcv_ctrl_init. +* +* SEE ALSO +* Service Record Receive Controller object, osm_sr_rcv_ctrl_construct, +* osm_sr_rcv_ctrl_init +*********/ + +/****f* OpenSM: Service Record Receive Controller/osm_sr_rcv_ctrl_init +* NAME +* osm_sr_rcv_ctrl_init +* +* DESCRIPTION +* The osm_sr_rcv_ctrl_init function initializes a +* Service Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_sr_rcv_ctrl_init( + IN osm_sr_rcv_ctrl_t* const p_ctrl, + IN osm_sr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sr_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_sr_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* IB_SUCCESS if the osm_sr_rcv_t Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Service Record Receive Controller methods. +* +* SEE ALSO +* Service Record Receive Controller object, osm_sr_rcv_ctrl_construct, +* osm_sr_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_SRCTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_slvl_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_slvl_record.h new file mode 100644 index 00000000..3f1abfff --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_slvl_record.h @@ -0,0 +1,281 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_slvl_rec_rcv_t. + * This object represents the SLtoVL Mapping Table Receiver object. + * attribute from a SA query. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_SLVL_REC_RCV_H_ +#define _OSM_SLVL_REC_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SLtoVL Mapping Record Receiver +* NAME +* SLtoVL Mapping Record Receiver +* +* DESCRIPTION +* The SLtoVL Mapping Record Receiver object encapsulates the information +* needed to handle SLtoVL Mapping Record query from a SA. +* +* The SLtoVL Mapping Record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: SLtoVL Mapping Record Receiver/osm_slvl_rec_rcv_t +* NAME +* osm_slvl_rec_rcv_t +* +* DESCRIPTION +* SLtoVL Mapping Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_slvl_rec_rcv +{ + const osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t pool; + +} osm_slvl_rec_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_resp +* Pointer to the SA responder. +* +* p_mad_pool +* Pointer to the mad pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pool +* Pool of linkable SLtoVL Mapping Record objects used to generate +* the query response. +* +* SEE ALSO +* +*********/ + +/****f* OpenSM: SLtoVL Mapping Record Receiver/osm_slvl_rec_rcv_construct +* NAME +* osm_slvl_rec_rcv_construct +* +* DESCRIPTION +* This function constructs a SLtoVL Mapping Record Receiver object. +* +* SYNOPSIS +*/ +void +osm_slvl_rec_rcv_construct( + IN osm_slvl_rec_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a SLtoVL Mapping Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_slvl_rec_rcv_init, osm_slvl_rec_rcv_destroy +* +* Calling osm_slvl_rec_rcv_construct is a prerequisite to calling any other +* method except osm_slvl_rec_rcv_init. +* +* SEE ALSO +* SLtoVL Mapping Record Receiver object, osm_slvl_rec_rcv_init, +* osm_slvl_rec_rcv_destroy +*********/ + +/****f* OpenSM: SLtoVL Mapping Record Receiver/osm_slvl_rec_rcv_destroy +* NAME +* osm_slvl_rec_rcv_destroy +* +* DESCRIPTION +* The osm_slvl_rec_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_slvl_rec_rcv_destroy( + IN osm_slvl_rec_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SLtoVL Mapping Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_slvl_rec_rcv_construct or osm_slvl_rec_rcv_init. +* +* SEE ALSO +* SLtoVL Mapping Record Receiver object, osm_slvl_rec_rcv_construct, +* osm_slvl_rec_rcv_init +*********/ + +/****f* OpenSM: SLtoVL Mapping Record Receiver/osm_slvl_rec_rcv_init +* NAME +* osm_slvl_rec_rcv_init +* +* DESCRIPTION +* The osm_slvl_rec_rcv_init function initializes a +* SLtoVL Mapping Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_slvl_rec_rcv_init( + IN osm_slvl_rec_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_slvl_rec_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the SLtoVL Mapping Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other SLtoVL Mapping Record Receiver methods. +* +* SEE ALSO +* SLtoVL Mapping Record Receiver object, osm_slvl_rec_rcv_construct, +* osm_slvl_rec_rcv_destroy +*********/ + +/****f* OpenSM: SLtoVL Mapping Record Receiver/osm_slvl_rec_rcv_process +* NAME +* osm_slvl_rec_rcv_process +* +* DESCRIPTION +* Process the SLtoVL Map Table Query . +* +* SYNOPSIS +*/ +void +osm_slvl_rec_rcv_process( + IN osm_slvl_rec_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_slvl_rec_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the SLtoVL Map Record Query attribute. +* +* RETURN VALUES +* CL_SUCCESS if the Query processing was successful. +* +* NOTES +* This function processes a SA SLtoVL Map Record attribute. +* +* SEE ALSO +* SLtoVL Mapping Record Receiver, SLtoVL Mapping Record Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_SLVL_REC_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_slvl_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_slvl_record_ctrl.h new file mode 100644 index 00000000..ae02bba1 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_slvl_record_ctrl.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sa_slvl_rec_rcv_ctrl_t. + * This object represents a controller that receives the IBA VL Arbitration + * record query from SA client. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_SLVL_REC_CTRL_H_ +#define _OSM_SLVL_REC_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SLtoVL Record Receive Controller +* NAME +* SLtoVL Record Receive Controller +* +* DESCRIPTION +* The SLtoVL Mapping Record Receive Controller object encapsulates +* the information needed to handle SLtoVL Mapping record query from SA client. +* +* The SLtoVL Mapping Record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: SLtoVL Mapping Record Receive Controller/osm_slvl_rec_rcv_ctrl_t +* NAME +* osm_slvl_rec_rcv_ctrl_t +* +* DESCRIPTION +* SLtoVL Mapping Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_slvl_rec_rcv_ctrl +{ + osm_slvl_rec_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_slvl_rec_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the SLtoVL Mapping Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* SLtoVL Mapping Record Receive Controller object +* SLtoVL Mapping Record Receiver object +*********/ + +/****f* OpenSM: SLtoVL Mapping Record Receive Controller/osm_slvl_rec_rcv_ctrl_construct +* NAME +* osm_slvl_rec_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a SLtoVL Mapping Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_slvl_rec_rcv_ctrl_construct( + IN osm_slvl_rec_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a SLtoVL Mapping Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_slvl_rec_rcv_ctrl_init, osm_slvl_rec_rcv_ctrl_destroy +* +* Calling osm_slvl_rec_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_slvl_rec_rcv_ctrl_init. +* +* SEE ALSO +* SLtoVL Mapping Record Receive Controller object, osm_slvl_rec_rcv_ctrl_init, +* osm_slvl_rec_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: SLtoVL Mapping Record Receive Controller/osm_slvl_rec_rcv_ctrl_destroy +* NAME +* osm_slvl_rec_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_slvl_rec_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_slvl_rec_rcv_ctrl_destroy( + IN osm_slvl_rec_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SLtoVL Mapping Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_slvl_rec_rcv_ctrl_construct or osm_slvl_rec_rcv_ctrl_init. +* +* SEE ALSO +* SLtoVL Mapping Record Receive Controller object, osm_slvl_rec_rcv_ctrl_construct, +* osm_slvl_rec_rcv_ctrl_init +*********/ + +/****f* OpenSM: SLtoVL Mapping Record Receive Controller/osm_slvl_rec_rcv_ctrl_init +* NAME +* osm_slvl_rec_rcv_ctrl_init +* +* DESCRIPTION +* The osm_slvl_rec_rcv_ctrl_init function initializes a +* SLtoVL Mapping Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_slvl_rec_rcv_ctrl_init( + IN osm_slvl_rec_rcv_ctrl_t* const p_ctrl, + IN osm_slvl_rec_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_slvl_rec_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_slvl_rec_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the SLtoVL Mapping Record Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other SLtoVL Mapping Record Receive Controller methods. +* +* SEE ALSO +* SLtoVL Mapping Record Receive Controller object, osm_slvl_rec_rcv_ctrl_construct, +* osm_slvl_rec_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_SLVL_REC_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sminfo_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sminfo_record.h new file mode 100644 index 00000000..d9dabcb3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sminfo_record.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_smir_rcv_t. + * This object represents the SMInfo Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SMIR_H_ +#define _OSM_SMIR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SM Info Receiver +* NAME +* SM Info Receiver +* +* DESCRIPTION +* The SM Info Receiver object encapsulates the information +* needed to receive the SMInfoRecord attribute from a node. +* +* The SM Info Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ +/****s* OpenSM: SM Info Receiver/osm_smir_rcv_t +* NAME +* osm_smir_rcv_t +* +* DESCRIPTION +* SM Info Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_smir +{ + osm_subn_t* p_subn; + osm_stats_t* p_stats; + osm_sa_resp_t* p_resp; + osm_mad_pool_t* p_mad_pool; + osm_log_t* p_log; + cl_plock_t* p_lock; + cl_qlock_pool_t pool; +} osm_smir_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* SEE ALSO +* SM Info Receiver object +*********/ + +/****f* OpenSM: SM Info Receiver/osm_smir_rcv_construct +* NAME +* osm_smir_rcv_construct +* +* DESCRIPTION +* This function constructs a SM Info Receiver object. +* +* SYNOPSIS +*/ +void osm_smir_rcv_construct( + IN osm_smir_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a SM Info Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_smir_rcv_init, osm_smir_rcv_destroy +* +* Calling osm_smir_rcv_construct is a prerequisite to calling any other +* method except osm_smir_rcv_init. +* +* SEE ALSO +* SM Info Receiver object, osm_smir_rcv_init, osm_smir_rcv_destroy +*********/ + +/****f* OpenSM: SM Info Receiver/osm_smir_rcv_destroy +* NAME +* osm_smir_rcv_destroy +* +* DESCRIPTION +* The osm_smir_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_smir_rcv_destroy( + IN osm_smir_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SM Info Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_smir_rcv_construct or osm_smir_rcv_init. +* +* SEE ALSO +* SM Info Receiver object, osm_smir_rcv_construct, +* osm_smir_rcv_init +*********/ + +/****f* OpenSM: SM Info Receiver/osm_smir_rcv_init +* NAME +* osm_smir_rcv_init +* +* DESCRIPTION +* The osm_smir_rcv_init function initializes a +* SM Info Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_smir_rcv_init( + IN osm_smir_rcv_t* const p_ctrl, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_stats_t* const p_stats, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_smir_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_stats +* [in] Pointer to the Statistics object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the SM Info Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other SM Info Receiver methods. +* +* SEE ALSO +* SM Info Receiver object, osm_smir_rcv_construct, osm_smir_rcv_destroy +*********/ + +/****f* OpenSM: SM Info Receiver/osm_smir_rcv_process +* NAME +* osm_smir_rcv_process +* +* DESCRIPTION +* Process the SMInfoRecord attribute. +* +* SYNOPSIS +*/ +void osm_smir_rcv_process( + IN osm_smir_rcv_t* const p_ctrl, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_smir_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's SMInfoRecord attribute. +* +* RETURN VALUES +* CL_SUCCESS if the SMInfoRecord processing was successful. +* +* NOTES +* This function processes a SMInfoRecord attribute. +* +* SEE ALSO +* SM Info Receiver, SM Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_SMIR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sminfo_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sminfo_record_ctrl.h new file mode 100644 index 00000000..28aae4dd --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sminfo_record_ctrl.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_smir_ctrl_t. + * This object represents a controller that receives the IBA SMInfo + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SMIR_CTRL_H_ +#define _OSM_SMIR_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SM Info Receive Controller +* NAME +* SM Info Receive Controller +* +* DESCRIPTION +* The SM Info Receive Controller object encapsulates +* the information needed to receive the SMInfo attribute from a node. +* +* The SM Info Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Ranjit Pandit, Intel +* +*********/ + +/****s* OpenSM: SM Info Receive Controller/osm_smir_ctrl_t +* NAME +* osm_smir_ctrl_t +* +* DESCRIPTION +* SM Info Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_smir_ctrl +{ + osm_smir_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_smir_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the SM Info Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* SM Info Receive Controller object +* SM Info Receiver object +*********/ + +/****f* OpenSM: SM Info Receive Controller/osm_smir_ctrl_construct +* NAME +* osm_smir_ctrl_construct +* +* DESCRIPTION +* This function constructs a SM Info Receive Controller object. +* +* SYNOPSIS +*/ +void osm_smir_ctrl_construct( + IN osm_smir_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a SM Info Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_smir_ctrl_init, osm_smir_ctrl_destroy +* +* Calling osm_smir_ctrl_construct is a prerequisite to calling any other +* method except osm_smir_ctrl_init. +* +* SEE ALSO +* SM Info Receive Controller object, osm_smir_ctrl_init, +* osm_smir_ctrl_destroy +*********/ + +/****f* OpenSM: SM Info Receive Controller/osm_smir_ctrl_destroy +* NAME +* osm_smir_ctrl_destroy +* +* DESCRIPTION +* The osm_smir_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_smir_ctrl_destroy( + IN osm_smir_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SM Info Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_smir_ctrl_construct or osm_smir_ctrl_init. +* +* SEE ALSO +* SM Info Receive Controller object, osm_smir_ctrl_construct, +* osm_smir_ctrl_init +*********/ + +/****f* OpenSM: SM Info Receive Controller/osm_smir_ctrl_init +* NAME +* osm_smir_ctrl_init +* +* DESCRIPTION +* The osm_smir_ctrl_init function initializes a +* SM Info Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_smir_ctrl_init( + IN osm_smir_ctrl_t* const p_ctrl, + IN osm_smir_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_smir_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_smir_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the SM Info Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other SM Info Receive Controller methods. +* +* SEE ALSO +* SM Info Receive Controller object, osm_smir_ctrl_construct, +* osm_smir_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_SMIR_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sw_info_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sw_info_record.h new file mode 100644 index 00000000..e8cf0bb0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sw_info_record.h @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sir_rcv_t. + * This object represents the SwitchInfo Receiver object. + * attribute from a switch node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#ifndef _OSM_SIR_RCV_H_ +#define _OSM_SIR_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Switch Info Receiver +* NAME +* Switch Info Receiver +* +* DESCRIPTION +* The Switch Info Receiver object encapsulates the information +* needed to receive the SwitchInfo attribute from a switch node. +* +* The Switch Info Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Hal Rosenstock, Voltaire +* +*********/ + +/****s* OpenSM: Switch Info Receiver/osm_sir_rcv_t +* NAME +* osm_sir_rcv_t +* +* DESCRIPTION +* Switch Info Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sir_rcv +{ + osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + osm_req_t *p_req; + osm_state_mgr_t *p_state_mgr; + cl_plock_t *p_lock; + cl_qlock_pool_t pool; +} osm_sir_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_req +* Pointer to the Request object. +* +* p_state_mgr +* Pointer to the State Manager object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* Switch Info Receiver object +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_sir_rcv_construct +* NAME +* osm_sir_rcv_construct +* +* DESCRIPTION +* This function constructs a Switch Info Receiver object. +* +* SYNOPSIS +*/ +void osm_sir_rcv_construct( + IN osm_sir_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Switch Info Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sir_rcv_init, osm_sir_rcv_destroy, +* and osm_sir_rcv_is_inited. +* +* Calling osm_sir_rcv_construct is a prerequisite to calling any other +* method except osm_sir_rcv_init. +* +* SEE ALSO +* Switch Info Receiver object, osm_sir_rcv_init, +* osm_sir_rcv_destroy, osm_sir_rcv_is_inited +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_sir_rcv_destroy +* NAME +* osm_sir_rcv_destroy +* +* DESCRIPTION +* The osm_sir_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_sir_rcv_destroy( + IN osm_sir_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Switch Info Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sir_rcv_construct or osm_sir_rcv_init. +* +* SEE ALSO +* Switch Info Receiver object, osm_sir_rcv_construct, +* osm_sir_rcv_init +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_sir_rcv_init +* NAME +* osm_sir_rcv_init +* +* DESCRIPTION +* The osm_sir_rcv_init function initializes a +* Switch Info Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_sir_rcv_init( + IN osm_sir_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_sir_rcv_t object to initialize. +* +* p_resp +* [in] Pointer to the SA Responder object. +* +* p_mad_pool +* [in] Pointer to the mad pool. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Switch Info Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Switch Info Receiver methods. +* +* SEE ALSO +* Switch Info Receiver object, osm_sir_rcv_construct, +* osm_sir_rcv_destroy, osm_sir_rcv_is_inited +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_sir_rcv_is_inited +* NAME +* osm_sir_rcv_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_sir_rcv_init. +* +* SYNOPSIS +*/ +boolean_t osm_sir_rcv_is_inited( + IN const osm_sir_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sir_rcv_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_sir_rcv_construct or osm_sir_rcv_init must be +* called before using this function. +* +* SEE ALSO +* Switch Info Receiver object, osm_sir_rcv_construct, +* osm_sir_rcv_init +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_sir_rcv_process +* NAME +* osm_sir_rcv_process +* +* DESCRIPTION +* Process the SwitchInfo attribute. +* +* SYNOPSIS +*/ +void osm_sir_rcv_process( + IN osm_sir_rcv_t* const p_ctrl, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sir_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's SwitchInfo attribute. +* +* RETURN VALUES +* CL_SUCCESS if the SwitchInfo processing was successful. +* +* NOTES +* This function processes a SwitchInfo attribute. +* +* SEE ALSO +* Switch Info Receiver, Switch Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_SIR_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sw_info_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sw_info_record_ctrl.h new file mode 100644 index 00000000..86b9674f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_sw_info_record_ctrl.h @@ -0,0 +1,259 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sir_rcv_ctrl_t. + * This object represents a controller that receives the IBA SwitchInfo + * attribute from a switch node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#ifndef _OSM_SIR_RCV_CTRL_H_ +#define _OSM_SIR_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Switch Info Receive Controller +* NAME +* Switch Info Receive Controller +* +* DESCRIPTION +* The Switch Info Receive Controller object encapsulates the information +* needed to receive the SwitchInfo attribute from a switch node. +* +* The Switch Info Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Hal Rosenstock, Voltaire +* +*********/ + +/****s* OpenSM: Switch Info Receive Controller/osm_sir_rcv_ctrl_t +* NAME +* osm_sir_rcv_ctrl_t +* +* DESCRIPTION +* Switch Info Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sir_rcv_ctrl +{ + osm_sir_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; +} osm_sir_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Switch Info Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Switch Info Receive Controller object +* Switch Info Receiver object +*********/ + +/****f* OpenSM: Switch Info Receive Controller/osm_sir_rcv_ctrl_construct +* NAME +* osm_sir_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Switch Info Receive Controller object. +* +* SYNOPSIS +*/ +void osm_sir_rcv_ctrl_construct( + IN osm_sir_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Switch Info Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sir_rcv_ctrl_init, osm_sir_rcv_ctrl_destroy, +* and osm_sir_rcv_ctrl_is_inited. +* +* Calling osm_sir_rcv_ctrl_construct is a prerequisite to calling any +* other method except osm_sir_rcv_ctrl_init. +* +* SEE ALSO +* Switch Info Receive Controller object, osm_sir_rcv_ctrl_init, +* osm_sir_rcv_ctrl_destroy, osm_sir_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Switch Info Receive Controller/osm_sir_rcv_ctrl_destroy +* NAME +* osm_sir_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_sir_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_sir_rcv_ctrl_destroy( + IN osm_sir_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Switch Info Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sir_rcv_ctrl_construct or osm_sir_rcv_ctrl_init. +* +* SEE ALSO +* Switch Info Receive Controller object, osm_sir_rcv_ctrl_construct, +* osm_sir_rcv_ctrl_init +*********/ + +/****f* OpenSM: Switch Info Receive Controller/osm_sir_rcv_ctrl_init +* NAME +* osm_sir_rcv_ctrl_init +* +* DESCRIPTION +* The osm_sir_rcv_ctrl_init function initializes a +* Switch Info Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_sir_rcv_ctrl_init( + IN osm_sir_rcv_ctrl_t* const p_ctrl, + IN osm_sir_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sir_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_sir_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Switch Info Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Switch Info Receive Controller methods. +* +* SEE ALSO +* Switch Info Receive Controller object, osm_sir_rcv_ctrl_construct, +* osm_sir_rcv_ctrl_destroy, osm_sir_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Switch Info Receive Controller/osm_sir_rcv_ctrl_is_inited +* NAME +* osm_sir_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_sir_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_sir_rcv_ctrl_is_inited( + IN const osm_sir_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sir_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_sir_rcv_ctrl_construct or osm_sir_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Switch Info Receive Controller object, osm_sir_rcv_ctrl_construct, +* osm_sir_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_SIR_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_vlarb_record.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_vlarb_record.h new file mode 100644 index 00000000..c5f3766e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_vlarb_record.h @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_vlarb_rec_rcv_t. + * This object represents the VLArbitration Record Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_VLARB_REC_RCV_H_ +#define _OSM_VLARB_REC_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/VLArbitration Record Receiver +* NAME +* VLArbitration Record Receiver +* +* DESCRIPTION +* The VLArbitration Record Receiver object encapsulates the information +* needed to handle VL Arbitration Record query from a SA. +* +* The VLArbitration Record Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: VLArbitration Record Receiver/osm_vlarb_rec_rcv_t +* NAME +* osm_vlarb_rec_rcv_t +* +* DESCRIPTION +* VLArbitration Record Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_vlarb_rec_rcv +{ + const osm_subn_t *p_subn; + osm_sa_resp_t *p_resp; + osm_mad_pool_t *p_mad_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_qlock_pool_t pool; +} osm_vlarb_rec_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_resp +* Pointer to the SA responder. +* +* p_mad_pool +* Pointer to the mad pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* pool +* Pool of linkable VLArbitration Record objects used to generate +* the query response. +* +* SEE ALSO +* +*********/ + +/****f* OpenSM: VLArbitration Record Receiver/osm_vlarb_rec_rcv_construct +* NAME +* osm_vlarb_rec_rcv_construct +* +* DESCRIPTION +* This function constructs a VLArbitration Record Receiver object. +* +* SYNOPSIS +*/ +void +osm_vlarb_rec_rcv_construct( + IN osm_vlarb_rec_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a VLArbitration Record Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_vlarb_rec_rcv_init, osm_vlarb_rec_rcv_destroy +* +* Calling osm_vlarb_rec_rcv_construct is a prerequisite to calling any other +* method except osm_vlarb_rec_rcv_init. +* +* SEE ALSO +* VLArbitration Record Receiver object, osm_vlarb_rec_rcv_init, +* osm_vlarb_rec_rcv_destroy +*********/ + +/****f* OpenSM: VLArbitration Record Receiver/osm_vlarb_rec_rcv_destroy +* NAME +* osm_vlarb_rec_rcv_destroy +* +* DESCRIPTION +* The osm_vlarb_rec_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_vlarb_rec_rcv_destroy( + IN osm_vlarb_rec_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* VLArbitration Record Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_vlarb_rec_rcv_construct or osm_vlarb_rec_rcv_init. +* +* SEE ALSO +* VLArbitration Record Receiver object, osm_vlarb_rec_rcv_construct, +* osm_vlarb_rec_rcv_init +*********/ + +/****f* OpenSM: VLArbitration Record Receiver/osm_vlarb_rec_rcv_init +* NAME +* osm_vlarb_rec_rcv_init +* +* DESCRIPTION +* The osm_vlarb_rec_rcv_init function initializes a +* VLArbitration Record Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_vlarb_rec_rcv_init( + IN osm_vlarb_rec_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_vlarb_rec_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the VLArbitration Record Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other VLArbitration Record Receiver methods. +* +* SEE ALSO +* VLArbitration Record Receiver object, osm_vlarb_rec_rcv_construct, +* osm_vlarb_rec_rcv_destroy +*********/ + +/****f* OpenSM: VLArbitration Record Receiver/osm_vlarb_rec_rcv_process +* NAME +* osm_vlarb_rec_rcv_process +* +* DESCRIPTION +* Process the VL Arbitration Table Query . +* +* SYNOPSIS +*/ +void +osm_vlarb_rec_rcv_process( + IN osm_vlarb_rec_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_vlarb_rec_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the VL Arbitration Record Query attribute. +* +* RETURN VALUES +* CL_SUCCESS if the Query processing was successful. +* +* NOTES +* This function processes a SA VL Arbitration Record attribute. +* +* SEE ALSO +* VLArbitration Record Receiver, VLArbitration Record Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_VLARB_REC_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_vlarb_record_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_vlarb_record_ctrl.h new file mode 100644 index 00000000..3cc07568 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sa_vlarb_record_ctrl.h @@ -0,0 +1,231 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sa_vlarb_rec_rcv_ctrl_t. + * This object represents a controller that receives the IBA VL Arbitration + * record query from SA client. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_VLARB_REC_CTRL_H_ +#define _OSM_VLARB_REC_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/VLArbitration Record Receive Controller +* NAME +* VLArbitration Record Receive Controller +* +* DESCRIPTION +* The VLArbitration Record Receive Controller object encapsulates +* the information needed to handle VLArbitration record query from SA client. +* +* The VLArbitration Record Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: VLArbitration Record Receive Controller/osm_vlarb_rec_rcv_ctrl_t +* NAME +* osm_vlarb_rec_rcv_ctrl_t +* +* DESCRIPTION +* VLArbitration Record Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_vlarb_rec_rcv_ctrl +{ + osm_vlarb_rec_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_vlarb_rec_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the VLArbitration Record Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* VLArbitration Record Receive Controller object +* VLArbitration Record Receiver object +*********/ + +/****f* OpenSM: VLArbitration Record Receive Controller/osm_vlarb_rec_rcv_ctrl_construct +* NAME +* osm_vlarb_rec_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a VLArbitration Record Receive Controller object. +* +* SYNOPSIS +*/ +void osm_vlarb_rec_rcv_ctrl_construct( + IN osm_vlarb_rec_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a VLArbitration Record Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_vlarb_rec_rcv_ctrl_init, osm_vlarb_rec_rcv_ctrl_destroy +* +* Calling osm_vlarb_rec_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_vlarb_rec_rcv_ctrl_init. +* +* SEE ALSO +* VLArbitration Record Receive Controller object, osm_vlarb_rec_rcv_ctrl_init, +* osm_vlarb_rec_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: VLArbitration Record Receive Controller/osm_vlarb_rec_rcv_ctrl_destroy +* NAME +* osm_vlarb_rec_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_vlarb_rec_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_vlarb_rec_rcv_ctrl_destroy( + IN osm_vlarb_rec_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* VLArbitration Record Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_vlarb_rec_rcv_ctrl_construct or osm_vlarb_rec_rcv_ctrl_init. +* +* SEE ALSO +* VLArbitration Record Receive Controller object, osm_vlarb_rec_rcv_ctrl_construct, +* osm_vlarb_rec_rcv_ctrl_init +*********/ + +/****f* OpenSM: VLArbitration Record Receive Controller/osm_vlarb_rec_rcv_ctrl_init +* NAME +* osm_vlarb_rec_rcv_ctrl_init +* +* DESCRIPTION +* The osm_vlarb_rec_rcv_ctrl_init function initializes a +* VLArbitration Record Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_vlarb_rec_rcv_ctrl_init( + IN osm_vlarb_rec_rcv_ctrl_t* const p_ctrl, + IN osm_vlarb_rec_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_vlarb_rec_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_vlarb_rec_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the VLArbitration Record Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other VLArbitration Record Receive Controller methods. +* +* SEE ALSO +* VLArbitration Record Receive Controller object, osm_vlarb_rec_rcv_ctrl_construct, +* osm_vlarb_rec_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_VLARB_REC_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_service.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_service.h new file mode 100644 index 00000000..7c09a4f6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_service.h @@ -0,0 +1,241 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_SVCR_H_ +#define _OSM_SVCR_H_ + +/* + * Abstract: + * Declaration of osm_service_rec_t. + * This object represents an IBA Service Record. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Service Record +* NAME +* Service Record +* +* DESCRIPTION +* The service record encapsulates the information needed by the +* SA to manage service registrations. +* +* The service records is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Anil S Keshavamurthy, Intel +* +*********/ + +/****s* OpenSM: Service Record/osm_svcr_t +* NAME +* osm_svcr_t +* +* DESCRIPTION +* Service Record structure. +* +* The osm_svcr_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ + +typedef struct _osm_svcr_t +{ + cl_list_item_t list_item; + ib_service_record_t service_record; + uint32_t modified_time; + uint32_t lease_period; +} osm_svcr_t; +/* +* FIELDS +* map_item +* Map Item for qmap linkage. Must be first element!! +* +* svc_rec +* IB Service record structure +* +* modified_time +* Last modified time of this record in milliseconds +* +* lease_period +* Remaining lease period for this record +* +* +* SEE ALSO +*********/ + + +/****f* OpenSM: Service Record/osm_svcr_new +* NAME +* osm_svcr_new +* +* DESCRIPTION +* Allocates and initializes a Service Record for use. +* +* SYNOPSIS +*/ +osm_svcr_t* +osm_svcr_new( + IN const ib_service_record_t *p_svc_rec ); +/* +* PARAMETERS +* p_svc_rec +* [in] Pointer to IB Service Record +* +* RETURN VALUES +* pointer to osm_svcr_t structure. +* +* NOTES +* Allows calling other service record methods. +* +* SEE ALSO +* Service Record, osm_svcr_construct, osm_svcr_destroy +*********/ + + +/****f* OpenSM: Service Record/osm_svcr_init +* NAME +* osm_svcr_new +* +* DESCRIPTION +* Initializes the osm_svcr_t structure. +* +* SYNOPSIS +*/ +void +osm_svcr_init( + IN osm_svcr_t* const p_svcr, + IN const ib_service_record_t *p_svc_rec ); +/* +* PARAMETERS +* p_svc_rec +* [in] Pointer to osm_svcr_t structure +* p_svc_rec +* [in] Pointer to the ib_service_record_t +* +* SEE ALSO +* Service Record, osm_svcr_construct, osm_svcr_destroy +*********/ + +/****f* OpenSM: Service Record/osm_svcr_construct +* NAME +* osm_svcr_construct +* +* DESCRIPTION +* Constructs the osm_svcr_t structure. +* +* SYNOPSIS +*/ +void +osm_svcr_construct( + IN osm_svcr_t* const p_svcr ); +/* +* PARAMETERS +* p_svc_rec +* [in] Pointer to osm_svcr_t structure +* +* SEE ALSO +* Service Record, osm_svcr_construct, osm_svcr_destroy +*********/ + +/****f* OpenSM: Service Record/osm_svcr_destroy +* NAME +* osm_svcr_destroy +* +* DESCRIPTION +* Constructs the osm_svcr_t structure. +* +* SYNOPSIS +*/ +void +osm_svcr_destroy( + IN osm_svcr_t* const p_svcr ); +/* +* PARAMETERS +* p_svc_rec +* [in] Pointer to osm_svcr_t structure +* +* SEE ALSO +* Service Record, osm_svcr_construct, osm_svcr_destroy +*********/ + + + +osm_svcr_t* +osm_svcr_get_by_rid( + IN osm_subn_t const *p_subn, + IN osm_log_t *p_log, + IN ib_service_record_t* const p_svc_rec ); + +void +osm_svcr_insert_to_db( + IN osm_subn_t *p_subn, + IN osm_log_t *p_log, + IN osm_svcr_t *p_svcr); +void +osm_svcr_remove_from_db( + IN osm_subn_t *p_subn, + IN osm_log_t *p_log, + IN osm_svcr_t *p_svcr); + + +END_C_DECLS + +#endif /* _OSM_SVCR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_slvl_map_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_slvl_map_rcv.h new file mode 100644 index 00000000..9ab13bf7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_slvl_map_rcv.h @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_slvl_rcv_t. + * This object represents the SLtoVL Map Receiver object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_SLVL_RCV_H_ +#define _OSM_SLVL_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Slvl Map Receiver +* NAME +* Slvl Map Receiver +* +* DESCRIPTION +* The Slvl Map Receiver object encapsulates the information +* needed to set or get the SLtoVL map attribute from a port. +* +* The Slvl Map Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: Slvl Map Receiver/osm_slvl_rcv_t +* NAME +* osm_slvl_rcv_t +* +* DESCRIPTION +* Slvl Map Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_slvl_rcv +{ + osm_subn_t *p_subn; + osm_req_t *p_req; + osm_log_t *p_log; + cl_plock_t *p_lock; + +} osm_slvl_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_req +* Pointer to the generic attribute request object. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* Slvl Map Receiver object +*********/ + +/****f* OpenSM: Slvl Map Receiver/osm_slvl_rcv_construct +* NAME +* osm_slvl_rcv_construct +* +* DESCRIPTION +* This function constructs a Slvl Map Receiver object. +* +* SYNOPSIS +*/ +void osm_slvl_rcv_construct( + IN osm_slvl_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Slvl Map Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_slvl_rcv_destroy +* +* Calling osm_slvl_rcv_construct is a prerequisite to calling any other +* method except osm_slvl_rcv_init. +* +* SEE ALSO +* Slvl Map Receiver object, osm_slvl_rcv_init, +* osm_slvl_rcv_destroy +*********/ + +/****f* OpenSM: Slvl Map Receiver/osm_slvl_rcv_destroy +* NAME +* osm_slvl_rcv_destroy +* +* DESCRIPTION +* The osm_slvl_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_slvl_rcv_destroy( + IN osm_slvl_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Slvl Map Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_slvl_rcv_construct or osm_slvl_rcv_init. +* +* SEE ALSO +* Slvl Map Receiver object, osm_slvl_rcv_construct, +* osm_slvl_rcv_init +*********/ + +/****f* OpenSM: Slvl Map Receiver/osm_slvl_rcv_init +* NAME +* osm_slvl_rcv_init +* +* DESCRIPTION +* The osm_slvl_rcv_init function initializes a +* Slvl Map Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_slvl_rcv_init( + IN osm_slvl_rcv_t* const p_ctrl, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_slvl_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the Slvl Map Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Slvl Map Receiver methods. +* +* SEE ALSO +* Slvl Map Receiver object, osm_slvl_rcv_construct, +* osm_slvl_rcv_destroy +*********/ + +/****f* OpenSM: Slvl Map Receiver/osm_slvl_rcv_process +* NAME +* osm_slvl_rcv_process +* +* DESCRIPTION +* Process the SLtoVL map attribute. +* +* SYNOPSIS +*/ +void osm_slvl_rcv_process( + IN const osm_slvl_rcv_t* const p_ctrl, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_slvl_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's SLtoVL attribute. +* +* RETURN VALUES +* CL_SUCCESS if the SLtoVL processing was successful. +* +* NOTES +* This function processes a SLtoVL attribute. +* +* SEE ALSO +* Slvl Map Receiver, Slvl Map Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_SLVL_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_slvl_map_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_slvl_map_rcv_ctrl.h new file mode 100644 index 00000000..26f4776f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_slvl_map_rcv_ctrl.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_pi_rcv_ctrl_t. + * This object represents a controller that receives the IBA NodeInfo + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_SLVL_RCV_CTRL_H_ +#define _OSM_SLVL_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SltoVL Map Table Receive Controller +* NAME +* Slvl Map Receive Controller +* +* DESCRIPTION +* The Slvl Map Receive Controller object encapsulates +* the information needed to get or set SLtoVL Map of a port. +* +* The Slvl Map Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: Slvl Map Receive Controller/osm_slvl_rcv_ctrl_t +* NAME +* osm_slvl_rcv_ctrl_t +* +* DESCRIPTION +* Slvl Map Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_slvl_rcv_ctrl +{ + osm_slvl_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_slvl_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Slvl Map Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Slvl Map Receive Controller object +* Slvl Map Receiver object +*********/ + +/****f* OpenSM: Slvl Map Receive Controller/osm_slvl_rcv_ctrl_construct +* NAME +* osm_slvl_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Slvl Map Receive Controller object. +* +* SYNOPSIS +*/ +void osm_slvl_rcv_ctrl_construct( + IN osm_slvl_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Slvl Map Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_slvl_rcv_ctrl_init, osm_slvl_rcv_ctrl_destroy, +* and osm_slvl_rcv_ctrl_is_inited. +* +* Calling osm_slvl_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_slvl_rcv_ctrl_init. +* +* SEE ALSO +* Slvl Map Receive Controller object, osm_slvl_rcv_ctrl_init, +* osm_slvl_rcv_ctrl_destroy, osm_slvl_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Slvl Map Receive Controller/osm_slvl_rcv_ctrl_destroy +* NAME +* osm_slvl_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_slvl_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_slvl_rcv_ctrl_destroy( + IN osm_slvl_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Slvl Map Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_slvl_rcv_ctrl_construct or osm_slvl_rcv_ctrl_init. +* +* SEE ALSO +* Slvl Map Receive Controller object, osm_slvl_rcv_ctrl_construct, +* osm_slvl_rcv_ctrl_init +*********/ + +/****f* OpenSM: Slvl Map Receive Controller/osm_slvl_rcv_ctrl_init +* NAME +* osm_slvl_rcv_ctrl_init +* +* DESCRIPTION +* The osm_slvl_rcv_ctrl_init function initializes a +* Slvl Map Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_slvl_rcv_ctrl_init( + IN osm_slvl_rcv_ctrl_t* const p_ctrl, + IN osm_slvl_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_slvl_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_slvl_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Slvl Map Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Slvl Map Receive Controller methods. +* +* SEE ALSO +* Slvl Map Receive Controller object, osm_slvl_rcv_ctrl_construct, +* osm_slvl_rcv_ctrl_destroy, osm_slvl_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Slvl Map Receive Controller/osm_slvl_rcv_ctrl_is_inited +* NAME +* osm_slvl_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_slvl_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_slvl_rcv_ctrl_is_inited( + IN const osm_slvl_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_slvl_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_slvl_rcv_ctrl_construct or osm_slvl_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Slvl Map Receive Controller object, osm_slvl_rcv_ctrl_construct, +* osm_slvl_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_SLVL_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm.h new file mode 100644 index 00000000..07025554 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm.h @@ -0,0 +1,566 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sm_t, osm_other_sm_t. + * This object represents an IBA subnet. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#ifndef _OSM_SM_H_ +#define _OSM_SM_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SM +* NAME +* SM +* +* DESCRIPTION +* The SM object encapsulates the information needed by the +* OpenSM to instantiate a subnet manager. The OpenSM allocates +* one SM object per subnet manager. +* +* The SM object is thread safe. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: SM/osm_sm_t +* NAME +* osm_sm_t +* +* DESCRIPTION +* Subnet Manager structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sm +{ + osm_thread_state_t thread_state; + cl_event_t signal; + cl_event_t subnet_up_event; + cl_thread_t sweeper; + osm_subn_t *p_subn; + osm_db_t *p_db; + osm_vendor_t *p_vendor; + osm_log_t *p_log; + osm_mad_pool_t *p_mad_pool; + osm_vl15_t *p_vl15; + cl_dispatcher_t *p_disp; + cl_plock_t *p_lock; + atomic32_t sm_trans_id; + osm_req_t req; + osm_req_ctrl_t req_ctrl; + osm_resp_t resp; + osm_ni_rcv_t ni_rcv; + osm_ni_rcv_ctrl_t ni_rcv_ctrl; + osm_pi_rcv_t pi_rcv; + osm_pi_rcv_ctrl_t pi_rcv_ctrl; + osm_nd_rcv_t nd_rcv; + osm_nd_rcv_ctrl_t nd_rcv_ctrl; + osm_sm_mad_ctrl_t mad_ctrl; + osm_si_rcv_t si_rcv; + osm_si_rcv_ctrl_t si_rcv_ctrl; + osm_state_mgr_ctrl_t state_mgr_ctrl; + osm_lid_mgr_t lid_mgr; + osm_ucast_mgr_t ucast_mgr; + osm_link_mgr_t link_mgr; + osm_state_mgr_t state_mgr; + osm_drop_mgr_t drop_mgr; + osm_lft_rcv_t lft_rcv; + osm_lft_rcv_ctrl_t lft_rcv_ctrl; + osm_mft_rcv_t mft_rcv; + osm_mft_rcv_ctrl_t mft_rcv_ctrl; + osm_sweep_fail_ctrl_t sweep_fail_ctrl; + osm_sminfo_rcv_t sm_info_rcv; + osm_sminfo_rcv_ctrl_t sm_info_rcv_ctrl; + osm_trap_rcv_t trap_rcv; + osm_trap_rcv_ctrl_t trap_rcv_ctrl; + osm_sm_state_mgr_t sm_state_mgr; + osm_mcast_mgr_t mcast_mgr; + osm_slvl_rcv_t slvl_rcv; + osm_slvl_rcv_ctrl_t slvl_rcv_ctrl; + osm_vla_rcv_t vla_rcv; + osm_vla_rcv_ctrl_t vla_rcv_ctrl; + osm_pkey_rcv_t pkey_rcv; + osm_pkey_rcv_ctrl_t pkey_rcv_ctrl; +} osm_sm_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_db +* Pointer to the database (persistency) object +* +* p_vendor +* Pointer to the vendor specific interfaces object. +* +* p_log +* Pointer to the log object. +* +* p_mad_pool +* Pointer to the MAD pool. +* +* p_vl15 +* Pointer to the VL15 interface. +* +* req +* Generic MAD attribute requester. +* +* req_ctrl +* Controller for the generic requester. +* +* resp +* MAD attribute responder. +* +* nd_rcv_ctrl +* Node Description Receive Controller. +* +* ni_rcv_ctrl +* Node Info Receive Controller. +* +* pi_rcv_ctrl +* Port Info Receive Controller. +* +* si_rcv_ctrl +* Switch Info Receive Controller. +* +* nd_rcv_ctrl +* Node Description Receive Controller. +* +* mad_ctrl +* MAD Controller. +* +* smi_get_ctrl +* SM Info Get Controller. +* +* p_disp +* Pointer to the Dispatcher. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* SM object +*********/ + +/****f* OpenSM: SM/osm_sm_construct +* NAME +* osm_sm_construct +* +* DESCRIPTION +* This function constructs an SM object. +* +* SYNOPSIS +*/ +void +osm_sm_construct( + IN osm_sm_t* const p_sm ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to a SM object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sm_init, osm_sm_destroy +* +* Calling osm_sm_construct is a prerequisite to calling any other +* method except osm_sm_init. +* +* SEE ALSO +* SM object, osm_sm_init, osm_sm_destroy +*********/ + +/****f* OpenSM: SM/osm_sm_shutdown +* NAME +* osm_sm_shutdown +* +* DESCRIPTION +* The osm_sm_shutdown function shutdowns an SM, stopping the sweeper +* and unregistering all messages from the dispatcher +* +* SYNOPSIS +*/ +void +osm_sm_shutdown( + IN osm_sm_t* const p_sm ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to a SM object to shutdown. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* SM object, osm_sm_construct, osm_sm_init +*********/ + +/****f* OpenSM: SM/osm_sm_destroy +* NAME +* osm_sm_destroy +* +* DESCRIPTION +* The osm_sm_destroy function destroys an SM, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_sm_destroy( + IN osm_sm_t* const p_sm ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to a SM object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified SM object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_sm_construct or +* osm_sm_init. +* +* SEE ALSO +* SM object, osm_sm_construct, osm_sm_init +*********/ + +/****f* OpenSM: SM/osm_sm_init +* NAME +* osm_sm_init +* +* DESCRIPTION +* The osm_sm_init function initializes a SM object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sm_init( + IN osm_sm_t* const p_sm, + IN osm_subn_t* const p_subn, + IN osm_db_t* const p_db, + IN osm_vendor_t* const p_vendor, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_vl15_t* const p_vl15, + IN osm_log_t* const p_log, + IN osm_stats_t* const p_stats, + IN cl_dispatcher_t* const p_disp, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to an osm_sm_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_vendor +* [in] Pointer to the vendor specific interfaces object. +* +* p_mad_pool +* [in] Pointer to the MAD pool. +* +* p_vl15 +* [in] Pointer to the VL15 interface. +* +* p_log +* [in] Pointer to the log object. +* +* p_stats +* [in] Pointer to the statistics object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the SM object was initialized successfully. +* +* NOTES +* Allows calling other SM methods. +* +* SEE ALSO +* SM object, osm_sm_construct, osm_sm_destroy +*********/ + +/****f* OpenSM: SM/osm_sm_sweep +* NAME +* osm_sm_sweep +* +* DESCRIPTION +* Initiates a subnet sweep. +* +* SYNOPSIS +*/ +void +osm_sm_sweep( + IN osm_sm_t* const p_sm ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to an osm_sm_t object. +* +* RETURN VALUES +* IB_SUCCESS if the sweep completed successfully. +* +* NOTES +* +* SEE ALSO +* SM object +*********/ + +/****f* OpenSM: SM/osm_sm_bind +* NAME +* osm_sm_bind +* +* DESCRIPTION +* Binds the sm object to a port guid. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sm_bind( + IN osm_sm_t* const p_sm, + IN const ib_net64_t port_guid ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to an osm_sm_t object to bind. +* +* port_guid +* [in] Local port GUID with which to bind. +* +* +* RETURN VALUES +* None +* +* NOTES +* A given SM object can only be bound to one port at a time. +* +* SEE ALSO +*********/ + +/****f* OpenSM: SM/osm_sm_mcgrp_join +* NAME +* osm_sm_mcgrp_join +* +* DESCRIPTION +* Adds a port to the multicast group. Creates the multicast group +* if necessary. +* +* This function is called by the SA. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sm_mcgrp_join( + IN osm_sm_t* const p_sm, + IN const ib_net16_t mlid, + IN const ib_net64_t port_guid, + IN osm_mcast_req_type_t req_type ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to an osm_sm_t object. +* +* mlid +* [in] Multicast LID +* +* port_guid +* [in] Port GUID to add to the group. +* +* req_type +* [in] Type of the MC request that caused this join +* (MC create/join). +* +* RETURN VALUES +* None +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: SM/osm_sm_mcgrp_leave +* NAME +* osm_sm_mcgrp_leave +* +* DESCRIPTION +* Removes a port from the multicast group. +* +* This function is called by the SA. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sm_mcgrp_leave( + IN osm_sm_t* const p_sm, + IN const ib_net16_t mlid, + IN const ib_net64_t port_guid ); +/* +* PARAMETERS +* p_sm +* [in] Pointer to an osm_sm_t object. +* +* mlid +* [in] Multicast LID +* +* port_guid +* [in] Port GUID to remove from the group. +* +* RETURN VALUES +* None +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: OpenSM/osm_sm_wait_for_subnet_up +* NAME +* osm_sm_wait_for_subnet_up +* +* DESCRIPTION +* Blocks the calling thread until the subnet is up. +* +* SYNOPSIS +*/ +static inline cl_status_t +osm_sm_wait_for_subnet_up( + IN osm_sm_t* const p_sm, + IN uint32_t const wait_us, + IN boolean_t const interruptible ) +{ + return( cl_event_wait_on( &p_sm->subnet_up_event, + wait_us, interruptible ) ); +} +/* +* PARAMETERS +* p_sm +* [in] Pointer to an osm_sm_t object. +* +* wait_us +* [in] Number of microseconds to wait. +* +* interruptible +* [in] Indicates whether the wait operation can be interrupted +* by external signals. +* +* RETURN VALUES +* CL_SUCCESS if the wait operation succeeded in response to the event +* being set. +* +* CL_TIMEOUT if the specified time period elapses. +* +* CL_NOT_DONE if the wait was interrupted by an external signal. +* +* CL_ERROR if the wait operation failed. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_SM_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_info_get_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_info_get_ctrl.h new file mode 100644 index 00000000..52a43ce6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_info_get_ctrl.h @@ -0,0 +1,294 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_smi_get_ctrl_t. + * This object represents a controller that receives the IBA SMInfo + * query from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SM_INFO_GET_CTRL_H_ +#define _OSM_SM_INFO_GET_CTRL_H_ + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SM Info Get Controller +* NAME +* SM Info Get Controller +* +* DESCRIPTION +* The SM Info Get Controller object encapsulates the information +* needed to handle the SwitchInfo query from a node. +* +* The SM Info Get Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ +/****s* OpenSM: SM Info Get Controller/osm_smi_get_ctrl_t +* NAME +* osm_smi_get_ctrl_t +* +* DESCRIPTION +* SM Info Get Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_smi_get_ctrl +{ + osm_sminfo_t *p_smi; + osm_mad_pool_t *p_pool; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_dispatcher_t *p_disp; + +} osm_smi_get_ctrl_t; +/* +* FIELDS +* p_smi +* Pointer to the SM Info object of the SM. +* +* p_pool +* Pointer to the MAD pool. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* p_disp +* Pointer to the Dispatcher. +* +* SEE ALSO +* SM Info Get Controller object +*********/ + +/****f* OpenSM: SM Info Get Controller/osm_smi_get_ctrl_construct +* NAME +* osm_smi_get_ctrl_construct +* +* DESCRIPTION +* This function constructs a SM Info Get Controller object. +* +* SYNOPSIS +*/ +void osm_smi_get_ctrl_construct( + IN osm_smi_get_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a SM Info Get Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_smi_get_ctrl_init, osm_smi_get_ctrl_destroy, +* and osm_smi_get_ctrl_is_inited. +* +* Calling osm_smi_get_ctrl_construct is a prerequisite to calling any other +* method except osm_smi_get_ctrl_init. +* +* SEE ALSO +* SM Info Get Controller object, osm_smi_get_ctrl_init, +* osm_smi_get_ctrl_destroy, osm_smi_get_ctrl_is_inited +*********/ + +/****f* OpenSM: SM Info Get Controller/osm_smi_get_ctrl_destroy +* NAME +* osm_smi_get_ctrl_destroy +* +* DESCRIPTION +* The osm_smi_get_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_smi_get_ctrl_destroy( + IN osm_smi_get_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SM Info Get Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_smi_get_ctrl_construct or osm_smi_get_ctrl_init. +* +* SEE ALSO +* SM Info Get Controller object, osm_smi_get_ctrl_construct, +* osm_smi_get_ctrl_init +*********/ + +/****f* OpenSM: SM Info Get Controller/osm_smi_get_ctrl_init +* NAME +* osm_smi_get_ctrl_init +* +* DESCRIPTION +* The osm_smi_get_ctrl_init function initializes a +* SM Info Get Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_smi_get_ctrl_init( + IN osm_smi_get_ctrl_t* const p_ctrl, + IN osm_sminfo_t* const p_smi, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_smi_get_ctrl_t object to initialize. +* +* p_smi +* [in] Pointer to the SMInfo object of the SM. +* +* p_mad_pool +* [in] Pointer to the MAD pool. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the SM Info Get Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other SM Info Get Controller methods. +* +* SEE ALSO +* SM Info Get Controller object, osm_smi_get_ctrl_construct, +* osm_smi_get_ctrl_destroy, osm_smi_get_ctrl_is_inited +*********/ + +/****f* OpenSM: SM Info Get Controller/osm_smi_get_ctrl_is_inited +* NAME +* osm_smi_get_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_smi_get_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_smi_get_ctrl_is_inited( + IN const osm_smi_get_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_smi_get_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_smi_get_ctrl_construct or osm_smi_get_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* SM Info Get Controller object, osm_smi_get_ctrl_construct, +* osm_smi_get_ctrl_init +*********/ + +/****f* OpenSM: SM Info Get Controller/osm_smi_get_ctrl_process +* NAME +* osm_smi_get_ctrl_process +* +* DESCRIPTION +* Process the SwitchInfo attribute. +* +* SYNOPSIS +*/ +ib_api_status_t osm_smi_get_ctrl_process( + IN const osm_smi_get_ctrl_t* const p_ctrl, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_smi_get_ctrl_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's SwitchInfo attribute. +* +* RETURN VALUES +* CL_SUCCESS if the SwitchInfo processing was successful. +* +* NOTES +* This function processes a SwitchInfo attribute. +* +* SEE ALSO +* SM Info Get Controller, SM Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_SM_INFO_GET_CTRL_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_mad_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_mad_ctrl.h new file mode 100644 index 00000000..ffba2c75 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_mad_ctrl.h @@ -0,0 +1,339 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sm_mad_ctrl_t. + * This object represents a controller that receives the IBA NodeInfo + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SM_MAD_CTRL_H_ +#define _OSM_SM_MAD_CTRL_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SM MAD Controller +* NAME +* SM MAD Controller +* +* DESCRIPTION +* The SM MAD Controller object encapsulates +* the information needed to receive MADs from the transport layer. +* +* The SM MAD Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: SM MAD Controller/osm_sm_mad_ctrl_t +* NAME +* osm_sm_mad_ctrl_t +* +* DESCRIPTION +* SM MAD Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sm_mad_ctrl +{ + osm_log_t *p_log; + osm_subn_t *p_subn; + osm_mad_pool_t *p_mad_pool; + osm_vl15_t *p_vl15; + osm_vendor_t *p_vendor; + osm_bind_handle_t h_bind; + cl_plock_t *p_lock; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + osm_stats_t *p_stats; + +} osm_sm_mad_ctrl_t; +/* +* FIELDS +* p_log +* Pointer to the log object. +* +* p_subn +* Pointer to the subnet object. +* +* p_mad_pool +* Pointer to the MAD pool. +* +* p_vendor +* Pointer to the vendor specific interfaces object. +* +* h_bind +* Bind handle returned by the transport layer. +* +* p_lock +* Pointer to the serializing lock. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* p_stats +* Pointer to the OpenSM statistics block. +* +* SEE ALSO +* SM MAD Controller object +* SM MADr object +*********/ + +/****f* OpenSM: SM MAD Controller/osm_sm_mad_ctrl_construct +* NAME +* osm_sm_mad_ctrl_construct +* +* DESCRIPTION +* This function constructs a SM MAD Controller object. +* +* SYNOPSIS +*/ +void +osm_sm_mad_ctrl_construct( + IN osm_sm_mad_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a SM MAD Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sm_mad_ctrl_init, and osm_sm_mad_ctrl_destroy. +* +* Calling osm_sm_mad_ctrl_construct is a prerequisite to calling any other +* method except osm_sm_mad_ctrl_init. +* +* SEE ALSO +* SM MAD Controller object, osm_sm_mad_ctrl_init, +* osm_sm_mad_ctrl_destroy +*********/ + +/****f* OpenSM: SM MAD Controller/osm_sm_mad_ctrl_destroy +* NAME +* osm_sm_mad_ctrl_destroy +* +* DESCRIPTION +* The osm_sm_mad_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_sm_mad_ctrl_destroy( + IN osm_sm_mad_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SM MAD Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sm_mad_ctrl_construct or osm_sm_mad_ctrl_init. +* +* SEE ALSO +* SM MAD Controller object, osm_sm_mad_ctrl_construct, +* osm_sm_mad_ctrl_init +*********/ + +/****f* OpenSM: SM MAD Controller/osm_sm_mad_ctrl_init +* NAME +* osm_sm_mad_ctrl_init +* +* DESCRIPTION +* The osm_sm_mad_ctrl_init function initializes a +* SM MAD Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sm_mad_ctrl_init( + IN osm_sm_mad_ctrl_t* const p_ctrl, + IN osm_subn_t* const p_subn, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_vl15_t* const p_vl15, + IN osm_vendor_t* const p_vendor, + IN osm_log_t* const p_log, + IN osm_stats_t* const p_stats, + IN cl_plock_t* const p_lock, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sm_mad_ctrl_t object to initialize. +* +* p_mad_pool +* [in] Pointer to the MAD pool. +* +* p_vl15 +* [in] Pointer to the VL15 interface object. +* +* p_vendor +* [in] Pointer to the vendor specific interfaces object. +* +* p_log +* [in] Pointer to the log object. +* +* p_stats +* [in] Pointer to the OpenSM stastics block. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* IB_SUCCESS if the SM MAD Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other SM MAD Controller methods. +* +* SEE ALSO +* SM MAD Controller object, osm_sm_mad_ctrl_construct, +* osm_sm_mad_ctrl_destroy +*********/ + +/****f* OpenSM: SM/osm_sm_mad_ctrl_bind +* NAME +* osm_sm_mad_ctrl_bind +* +* DESCRIPTION +* Binds the SM MAD Controller object to a port guid. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sm_mad_ctrl_bind( + IN osm_sm_mad_ctrl_t* const p_ctrl, + IN const ib_net64_t port_guid ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sm_mad_ctrl_t object to initialize. +* +* port_guid +* [in] Local port GUID with which to bind. +* +* +* RETURN VALUES +* None +* +* NOTES +* A given SM MAD Controller object can only be bound to one +* port at a time. +* +* SEE ALSO +*********/ + +/****f* OpenSM: SM/osm_sm_mad_ctrl_get_bind_handle +* NAME +* osm_sm_mad_ctrl_get_bind_handle +* +* DESCRIPTION +* Returns the bind handle. +* +* SYNOPSIS +*/ +static inline osm_bind_handle_t +osm_sm_mad_ctrl_get_bind_handle( + IN const osm_sm_mad_ctrl_t* const p_ctrl ) +{ + return( p_ctrl->h_bind ); +} +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sm_mad_ctrl_t object. +* +* RETURN VALUES +* Returns the bind handle, which may be OSM_BIND_INVALID_HANDLE +* if no port has been bound. +* +* NOTES +* A given SM MAD Controller object can only be bound to one +* port at a time. +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_SM_MAD_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_state_mgr.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_state_mgr.h new file mode 100644 index 00000000..2d8282a9 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sm_state_mgr.h @@ -0,0 +1,353 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sm_state_mgr_t. + * This object represents the SM State Manager object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + +#ifndef _OSM_SM_STATE_MGR_H_ +#define _OSM_SM_STATE_MGR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SM State Manager +* NAME +* SM State Manager +* +* DESCRIPTION +* The SM State Manager object encapsulates the information +* needed to control the state of the SM. +* +* The SM State Manager object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Yael Kalka, Mellanox +* +*********/ + +/****s* OpenSM: SM State Manager/osm_sm_state_mgr_t +* NAME +* osm_sm_state_mgr_t +* +* DESCRIPTION +* SM State Manager structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sm_state_mgr +{ + cl_spinlock_t state_lock; + cl_timer_t polling_timer; + uint32_t retry_number; + ib_net64_t master_guid; + osm_state_mgr_t* p_state_mgr; + osm_subn_t* p_subn; + osm_req_t* p_req; + osm_log_t* p_log; + osm_remote_sm_t* p_polling_sm; +} osm_sm_state_mgr_t; + +/* +* FIELDS +* state_lock +* Spinlock guarding the state and processes. +* +* retry_number +* Used on Standby state - to count the number of retries +* of queries to the master SM. +* +* polling_timer +* Timer for polling +* +* p_state_mgr +* Point to the state manager object +* +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_req +* Pointer to the generic attribute request object. +* +* p_log +* Pointer to the log object. +* +* p_polling_sm +* Pointer to a osm_remote_sm_t object. When our SM needs +* to poll on a remote sm, this will be the pointer of the +* polled SM. +* +* SEE ALSO +* SM State Manager object +*********/ + + +/****f* OpenSM: SM State Manager/osm_sm_state_mgr_construct +* NAME +* osm_sm_state_mgr_construct +* +* DESCRIPTION +* This function constructs a SM State Manager object. +* +* SYNOPSIS +*/ +void +osm_sm_state_mgr_construct( + IN osm_sm_state_mgr_t* const p_sm_mgr ); +/* +* PARAMETERS +* p_sm_mgr +* [in] Pointer to a SM State Manager object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows osm_sm_state_mgr_destroy +* +* Calling osm_sm_state_mgr_construct is a prerequisite to calling any other +* method except osm_sm_state_mgr_init. +* +* SEE ALSO +* SM State Manager object, osm_sm_state_mgr_init, +* osm_sm_state_mgr_destroy +*********/ + +/****f* OpenSM: SM State Manager/osm_sm_state_mgr_destroy +* NAME +* osm_sm_state_mgr_destroy +* +* DESCRIPTION +* The osm_sm_state_mgr_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_sm_state_mgr_destroy( + IN osm_sm_state_mgr_t* const p_sm_mgr ); +/* +* PARAMETERS +* p_sm_mgr +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SM State Manager object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sm_state_mgr_construct or osm_sm_state_mgr_init. +* +* SEE ALSO +* SM State Manager object, osm_sm_state_mgr_construct, +* osm_sm_state_mgr_init +*********/ + +/****f* OpenSM: SM State Manager/osm_sm_state_mgr_init +* NAME +* osm_sm_state_mgr_init +* +* DESCRIPTION +* The osm_sm_state_mgr_init function initializes a +* SM State Manager object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sm_state_mgr_init( + IN osm_sm_state_mgr_t* const p_sm_mgr, + IN osm_state_mgr_t* const p_state_mgr, + IN osm_subn_t* const p_subn, + IN osm_req_t* const p_req, + IN osm_log_t* const p_log ); +/* +* PARAMETERS +* p_sm_mgr +* [in] Pointer to an osm_sm_state_mgr_t object to initialize. +* +* +* p_state_mgr +* [in] Pointer to the State Manager object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_log +* [in] Pointer to the log object. +* +* RETURN VALUES +* IB_SUCCESS if the SM State Manager object was initialized +* successfully. +* +* NOTES +* Allows calling other SM State Manager methods. +* +* SEE ALSO +* SM State Manager object, osm_sm_state_mgr_construct, +* osm_sm_state_mgr_destroy +*********/ + +/****f* OpenSM: SM State Manager/osm_sm_state_mgr_process +* NAME +* osm_sm_state_mgr_process +* +* DESCRIPTION +* Processes and maintains the states of the SM. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sm_state_mgr_process( + IN osm_sm_state_mgr_t* const p_sm_mgr, + IN osm_sm_signal_t signal ); +/* +* PARAMETERS +* p_sm_mgr +* [in] Pointer to an osm_sm_state_mgr_t object. +* +* signal +* [in] Signal to the state SM engine. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* State Manager +*********/ + + +/****f* OpenSM: SM State Manager/osm_sm_state_mgr_signal_master_is_alive +* NAME +* osm_sm_state_mgr_signal_master_is_alive +* +* DESCRIPTION +* Signals that the remote Master SM is alive. +* Need to clear the retry_number variable. +* +* SYNOPSIS +*/ +void +osm_sm_state_mgr_signal_master_is_alive( + IN osm_sm_state_mgr_t* const p_sm_mgr ); +/* +* PARAMETERS +* p_sm_mgr +* [in] Pointer to an osm_sm_state_mgr_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* State Manager +*********/ + +/****f* OpenSM: SM State Manager/osm_sm_state_mgr_check_legality +* NAME +* osm_sm_state_mgr_check_legality +* +* DESCRIPTION +* Checks the legality of the signal received, according to the +* current state of the SM state machine. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sm_state_mgr_check_legality( + IN osm_sm_state_mgr_t* const p_sm_mgr, + IN osm_sm_signal_t signal ); +/* +* PARAMETERS +* p_sm_mgr +* [in] Pointer to an osm_sm_state_mgr_t object. +* +* signal +* [in] Signal to the state SM engine. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* State Manager +*********/ + +END_C_DECLS + +#endif /* _OSM_SM_STATE_MGR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sminfo_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sminfo_rcv.h new file mode 100644 index 00000000..51b5f985 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sminfo_rcv.h @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sminfo_rcv_t. + * This object represents the SMInfo Receiver object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SMINFO_RCV_H_ +#define _OSM_SMINFO_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SMInfo Receiver +* NAME +* SMInfo Receiver +* +* DESCRIPTION +* The SMInfo Receiver object encapsulates the information +* needed to receive the SMInfo attribute from a node. +* +* The SMInfo Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: SMInfo Receiver/osm_sminfo_rcv_t +* NAME +* osm_sminfo_rcv_t +* +* DESCRIPTION +* SMInfo Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sminfo_rcv +{ + osm_subn_t *p_subn; + osm_stats_t *p_stats; + osm_log_t *p_log; + osm_resp_t *p_resp; + osm_state_mgr_t *p_state_mgr; + struct _osm_sm_state_mgr *p_sm_state_mgr; + cl_plock_t *p_lock; + +} osm_sminfo_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_stats +* Pointer to the OpenSM statistics block. +* +* p_log +* Pointer to the log object. +* +* p_resp +* Pointer to the generic MAD responder object. +* +* p_state_mgr +* Pointer to the State Manager object. +* +* p_sm_state_mgr +* Pointer to the SM State Manager object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* SMInfo Receiver object +*********/ + +/****f* OpenSM: SMInfo Receiver/osm_sminfo_rcv_construct +* NAME +* osm_sminfo_rcv_construct +* +* DESCRIPTION +* This function constructs a SMInfo Receiver object. +* +* SYNOPSIS +*/ +void osm_sminfo_rcv_construct( + IN osm_sminfo_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a SMInfo Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sminfo_rcv_init, osm_sminfo_rcv_destroy +* +* Calling osm_sminfo_rcv_construct is a prerequisite to calling any other +* method except osm_sminfo_rcv_init. +* +* SEE ALSO +* SMInfo Receiver object, osm_sminfo_rcv_init, +* osm_sminfo_rcv_destroy +*********/ + +/****f* OpenSM: SMInfo Receiver/osm_sminfo_rcv_destroy +* NAME +* osm_sminfo_rcv_destroy +* +* DESCRIPTION +* The osm_sminfo_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_sminfo_rcv_destroy( + IN osm_sminfo_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SMInfo Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sminfo_rcv_construct or osm_sminfo_rcv_init. +* +* SEE ALSO +* SMInfo Receiver object, osm_sminfo_rcv_construct, +* osm_sminfo_rcv_init +*********/ + +/****f* OpenSM: SMInfo Receiver/osm_sminfo_rcv_init +* NAME +* osm_sminfo_rcv_init +* +* DESCRIPTION +* The osm_sminfo_rcv_init function initializes a +* SMInfo Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_sminfo_rcv_init( + IN osm_sminfo_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_stats_t* const p_stats, + IN osm_resp_t* const p_resp, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN struct _osm_sm_state_mgr* const p_sm_state_mgr, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_sminfo_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_stats +* [in] Pointer to the OpenSM statistics block. +* +* p_resp +* [in] Pointer to the generic MAD Responder object. +* +* p_log +* [in] Pointer to the log object. +* +* p_state_mgr +* [in] Pointer to the State Manager object. +* +* p_sm_state_mgr +* [in] Pointer to the SM State Manager object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the SMInfo Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other SMInfo Receiver methods. +* +* SEE ALSO +* SMInfo Receiver object, osm_sminfo_rcv_construct, +* osm_sminfo_rcv_destroy +*********/ + +/****f* OpenSM: SMInfo Receiver/osm_sminfo_rcv_process +* NAME +* osm_sminfo_rcv_process +* +* DESCRIPTION +* Process the SMInfo attribute. +* +* SYNOPSIS +*/ +void osm_sminfo_rcv_process( + IN const osm_sminfo_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_sminfo_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's SMInfo attribute. +* +* RETURN VALUES +* IB_SUCCESS if the SMInfo processing was successful. +* +* NOTES +* This function processes a SMInfo attribute. +* +* SEE ALSO +* SMInfo Receiver, SMInfo Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_SMINFO_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sminfo_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sminfo_rcv_ctrl.h new file mode 100644 index 00000000..08d5dd0b --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sminfo_rcv_ctrl.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sminfo_rcv_ctrl_t. + * This object represents a controller that receives the IBA SMInfo + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SMINFO_RCV_CTRL_H_ +#define _OSM_SMINFO_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/SMInfo Receive Controller +* NAME +* SMInfo Receive Controller +* +* DESCRIPTION +* The SMInfo Receive Controller object encapsulates the information +* needed to receive the SMInfo attribute from a node. +* +* The SMInfo Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: SMInfo Receive Controller/osm_sminfo_rcv_ctrl_t +* NAME +* osm_sminfo_rcv_ctrl_t +* +* DESCRIPTION +* SMInfo Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sminfo_rcv_ctrl +{ + osm_sminfo_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_sminfo_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the SMInfo Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* SMInfo Receive Controller object +*********/ + +/****f* OpenSM: SMInfo Receive Controller/osm_sminfo_rcv_ctrl_construct +* NAME +* osm_sminfo_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a SMInfo Receive Controller object. +* +* SYNOPSIS +*/ +void +osm_sminfo_rcv_ctrl_construct( + IN osm_sminfo_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a SMInfo Receive Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sminfo_rcv_ctrl_init, osm_sminfo_rcv_ctrl_destroy +* +* Calling osm_sminfo_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_sminfo_rcv_ctrl_init. +* +* SEE ALSO +* SMInfo Receive Controller object, osm_sminfo_rcv_ctrl_init, +* osm_sminfo_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: SMInfo Receive Controller/osm_sminfo_rcv_ctrl_destroy +* NAME +* osm_sminfo_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_sminfo_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_sminfo_rcv_ctrl_destroy( + IN osm_sminfo_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* SMInfo Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sminfo_rcv_ctrl_construct or osm_sminfo_rcv_ctrl_init. +* +* SEE ALSO +* SMInfo Receive Controller object, osm_sminfo_rcv_ctrl_construct, +* osm_sminfo_rcv_ctrl_init +*********/ + +/****f* OpenSM: SMInfo Receive Controller/osm_sminfo_rcv_ctrl_init +* NAME +* osm_sminfo_rcv_ctrl_init +* +* DESCRIPTION +* The osm_sminfo_rcv_ctrl_init function initializes a +* SMInfo Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sminfo_rcv_ctrl_init( + IN osm_sminfo_rcv_ctrl_t* const p_ctrl, + IN osm_sminfo_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sminfo_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_sminfo_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* IB_SUCCESS if the SMInfo Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other SMInfo Receive Controller methods. +* +* SEE ALSO +* SMInfo Receive Controller object, osm_sminfo_rcv_ctrl_construct, +* osm_sminfo_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* OSM_SMINFO_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_state_mgr.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_state_mgr.h new file mode 100644 index 00000000..7e701796 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_state_mgr.h @@ -0,0 +1,518 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_state_mgr_t. + * This object represents the State Manager object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#ifndef _OSM_STATE_MGR_H_ +#define _OSM_STATE_MGR_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/State Manager +* NAME +* State Manager +* +* DESCRIPTION +* The State Manager object encapsulates the information +* needed to control subnet sweeps and configuration. +* +* The State Manager object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: State Manager/osm_state_mgr_t +* NAME +* osm_state_mgr_t +* +* DESCRIPTION +* State Manager structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_state_mgr +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + osm_lid_mgr_t *p_lid_mgr; + osm_ucast_mgr_t *p_ucast_mgr; + osm_mcast_mgr_t *p_mcast_mgr; + osm_link_mgr_t *p_link_mgr; + osm_drop_mgr_t *p_drop_mgr; + osm_req_t *p_req; + osm_stats_t *p_stats; + struct _osm_sm_state_mgr *p_sm_state_mgr; + const osm_sm_mad_ctrl_t *p_mad_ctrl; + cl_spinlock_t state_lock; + cl_spinlock_t idle_lock; + cl_qlist_t idle_time_list; + cl_plock_t *p_lock; + cl_event_t *p_subnet_up_event; + osm_sm_state_t state; + osm_state_mgr_mode_t state_step_mode; + osm_signal_t next_stage_signal; +} osm_state_mgr_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_lid_mgr +* Pointer to the LID Manager object. +* +* p_ucast_mgr +* Pointer to the Unicast Manager object. +* +* p_mcast_mgr +* Pointer to the Multicast Manager object. +* +* p_link_mgr +* Pointer to the Link Manager object. +* +* p_drop_mgr +* Pointer to the Drop Manager object. +* +* p_req +* Pointer to the Requester object sending SMPs. +* +* p_stats +* Pointer to the OpenSM statistics block. +* +* p_sm_state_mgr +* Pointer to the SM state mgr object. +* +* p_mad_ctrl +* Pointer to the SM's MAD Controller object. +* +* state_lock +* Spinlock guarding the state and processes. +* +* p_lock +* lock guarding the subnet object. +* +* p_subnet_up_event +* Pointer to the event to set if/when the subnet comes up. +* +* state +* State of the SM. +* +* state_step_mode +* Controls the mode of progressing to next stage: +* OSM_STATE_STEP_CONTINUOUS - normal automatic progress mode +* OSM_STATE_STEP_TAKE_ONE - do one step and stop +* OSM_STATE_STEP_BREAK - stop before taking next step +* +* next_stage_signal +* Stores the signal to be provided when running the next stage. +* +* SEE ALSO +* State Manager object +*********/ + +/****s* OpenSM: State Manager/_osm_idle_item +* NAME +* _osm_idle_item +* +* DESCRIPTION +* Idle item. +* +* SYNOPSIS +*/ + +typedef osm_signal_t +(*osm_pfn_start_t)( + IN void *context1, + IN void *context2 ); + +typedef void +(*osm_pfn_done_t)( + IN void *context1, + IN void *context2 ); + +typedef struct _osm_idle_item +{ + cl_list_item_t list_item; + void* context1; + void* context2; + osm_pfn_start_t pfn_start; + osm_pfn_done_t pfn_done; +}osm_idle_item_t; + +/* +* FIELDS +* list_item +* list item. +* +* context1 +* Context pointer +* +* context2 +* Context pointer +* +* pfn_start +* Pointer to the start function. +* +* pfn_done +* Pointer to the dine function. +* SEE ALSO +* State Manager object +*********/ + +/****f* OpenSM: State Manager/osm_state_mgr_process_idle +* NAME +* osm_state_mgr_process_idle +* +* DESCRIPTION +* Formulates the osm_idle_item and inserts it into the queue and +* signals the state manager. +* +* SYNOPSIS +*/ + +ib_api_status_t +osm_state_mgr_process_idle( + IN osm_state_mgr_t* const p_mgr, + IN osm_pfn_start_t pfn_start, + IN osm_pfn_done_t pfn_done, + void* context1, + void* context2 + ); + +/* +* PARAMETERS +* p_mgr +* [in] Pointer to a State Manager object to construct. +* +* pfn_start +* [in] Pointer the start function which will be called at +* idle time. +* +* pfn_done +* [in] pointer the done function which will be called +* when outstanding smps is zero +* +* context1 +* [in] Pointer to void +* +* context2 +* [in] Pointer to void +* +* RETURN VALUE +* IB_SUCCESS or IB_ERROR +* +* NOTES +* Allows osm_state_mgr_destroy +* +* Calling osm_state_mgr_construct is a prerequisite to calling any other +* method except osm_state_mgr_init. +* +* SEE ALSO +* State Manager object, osm_state_mgr_init, +* osm_state_mgr_destroy +*********/ + +/****f* OpenSM: State Manager/osm_state_mgr_construct +* NAME +* osm_state_mgr_construct +* +* DESCRIPTION +* This function constructs a State Manager object. +* +* SYNOPSIS +*/ +void +osm_state_mgr_construct( + IN osm_state_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to a State Manager object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows osm_state_mgr_destroy +* +* Calling osm_state_mgr_construct is a prerequisite to calling any other +* method except osm_state_mgr_init. +* +* SEE ALSO +* State Manager object, osm_state_mgr_init, +* osm_state_mgr_destroy +*********/ + +/****f* OpenSM: State Manager/osm_state_mgr_destroy +* NAME +* osm_state_mgr_destroy +* +* DESCRIPTION +* The osm_state_mgr_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_state_mgr_destroy( + IN osm_state_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* State Manager object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_state_mgr_construct or osm_state_mgr_init. +* +* SEE ALSO +* State Manager object, osm_state_mgr_construct, +* osm_state_mgr_init +*********/ + +/****f* OpenSM: State Manager/osm_state_mgr_init +* NAME +* osm_state_mgr_init +* +* DESCRIPTION +* The osm_state_mgr_init function initializes a +* State Manager object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_state_mgr_init( + IN osm_state_mgr_t* const p_mgr, + IN osm_subn_t* const p_subn, + IN osm_lid_mgr_t* const p_lid_mgr, + IN osm_ucast_mgr_t* const p_ucast_mgr, + IN osm_mcast_mgr_t* const p_mcast_mgr, + IN osm_link_mgr_t* const p_link_mgr, + IN osm_drop_mgr_t* const p_drop_mgr, + IN osm_req_t* const p_req, + IN osm_stats_t* const p_stats, + IN struct _osm_sm_state_mgr* const p_sm_state_mgr, + IN const osm_sm_mad_ctrl_t* const p_mad_ctrl, + IN cl_plock_t* const p_lock, + IN cl_event_t* const p_subnet_up_event, + IN osm_log_t* const p_log ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_state_mgr_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_lid_mgr +* [in] Pointer to the LID Manager object. +* +* p_ucast_mgr +* [in] Pointer to the Unicast Manager object. +* +* p_mcast_mgr +* [in] Pointer to the Multicast Manager object. +* +* p_link_mgr +* [in] Pointer to the Link Manager object. +* +* p_drop_mgr +* [in] Pointer to the Drop Manager object. +* +* p_req +* [in] Pointer to the Request Controller object. +* +* p_stats +* [in] Pointer to the OpenSM statistics block. +* +* p_sm_state_mgr +* [in] Pointer to the SM state mgr object. +* +* p_mad_ctrl +* [in] Pointer to the SM's mad controller. +* +* p_subnet_up_event +* [in] Pointer to the event to set if/when the subnet comes up. +* +* p_log +* [in] Pointer to the log object. +* +* RETURN VALUES +* IB_SUCCESS if the State Manager object was initialized +* successfully. +* +* NOTES +* Allows calling other State Manager methods. +* +* SEE ALSO +* State Manager object, osm_state_mgr_construct, +* osm_state_mgr_destroy +*********/ + +/****f* OpenSM: State Manager/osm_sm_is_greater_than +* NAME +* osm_sm_is_greater_than +* +* DESCRIPTION +* Compares two SM's (14.4.1.2) +* +* SYNOPSIS +*/ +static inline boolean_t +osm_sm_is_greater_than ( + IN const uint8_t l_priority, + IN const ib_net64_t l_guid, + IN const uint8_t r_priority, + IN const ib_net64_t r_guid ) +{ + if( l_priority > r_priority ) + { + return( TRUE ); + } + else + { + if( l_priority == r_priority ) + { + if( cl_ntoh64(l_guid) < cl_ntoh64(r_guid) ) + { + return( TRUE ); + } + } + } + return( FALSE ); +} +/* +* PARAMETERS +* l_priority +* [in] Priority of the SM on the "left" +* +* l_guid +* [in] GUID of the SM on the "left" +* +* r_priority +* [in] Priority of the SM on the "right" +* +* r_guid +* [in] GUID of the SM on the "right" +* +* RETURN VALUES +* Return TRUE if an sm with l_priority and l_guid is higher than an sm +* with r_priority and r_guid, +* return FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* State Manager +*********/ + +/****f* OpenSM: State Manager/osm_state_mgr_process +* NAME +* osm_state_mgr_process +* +* DESCRIPTION +* Processes and maintains the states of the SM. +* +* SYNOPSIS +*/ +void +osm_state_mgr_process( + IN osm_state_mgr_t* const p_mgr, + IN osm_signal_t signal ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_state_mgr_t object. +* +* signal +* [in] Signal to the state engine. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* State Manager +*********/ + +END_C_DECLS + +#endif /* _OSM_STATE_MGR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_state_mgr_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_state_mgr_ctrl.h new file mode 100644 index 00000000..86043a58 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_state_mgr_ctrl.h @@ -0,0 +1,233 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_state_mgr_ctrl_t. + * This object represents a controller that receives the + * State indication after a subnet sweep. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_STATE_MGR_CTRL_H_ +#define _OSM_STATE_MGR_CTRL_H_ + +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/State Manager Controller +* NAME +* State Manager Controller +* +* DESCRIPTION +* The State Manager Controller object encapsulates the information +* needed to pass the dispatcher message from the dispatcher +* to the State Manager. +* +* The State Manager Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: State Manager Controller/osm_state_mgr_ctrl_t +* NAME +* osm_state_mgr_ctrl_t +* +* DESCRIPTION +* State Manager Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_state_mgr_ctrl +{ + osm_state_mgr_t *p_mgr; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_state_mgr_ctrl_t; +/* +* FIELDS +* p_mgr +* Pointer to the State Manager object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* State Manager Controller object +*********/ + +/****f* OpenSM: State Manager Controller/osm_state_mgr_ctrl_construct +* NAME +* osm_state_mgr_ctrl_construct +* +* DESCRIPTION +* This function constructs a State Manager Controller object. +* +* SYNOPSIS +*/ +void +osm_state_mgr_ctrl_construct( + IN osm_state_mgr_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a State Manager Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_state_mgr_ctrl_init, and osm_state_mgr_ctrl_destroy. +* +* Calling osm_state_mgr_ctrl_construct is a prerequisite to calling any +* other method except osm_state_mgr_ctrl_init. +* +* SEE ALSO +* State Manager Controller object, osm_state_mgr_ctrl_init, +* osm_state_mgr_ctrl_destroy +*********/ + +/****f* OpenSM: State Manager Controller/osm_state_mgr_ctrl_destroy +* NAME +* osm_state_mgr_ctrl_destroy +* +* DESCRIPTION +* The osm_state_mgr_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_state_mgr_ctrl_destroy( + IN osm_state_mgr_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* State Manager Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_state_mgr_ctrl_construct or osm_state_mgr_ctrl_init. +* +* SEE ALSO +* State Manager Controller object, osm_state_mgr_ctrl_construct, +* osm_state_mgr_ctrl_init +*********/ + +/****f* OpenSM: State Manager Controller/osm_state_mgr_ctrl_init +* NAME +* osm_state_mgr_ctrl_init +* +* DESCRIPTION +* The osm_state_mgr_ctrl_init function initializes a +* State Manager Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_state_mgr_ctrl_init( + IN osm_state_mgr_ctrl_t* const p_ctrl, + IN osm_state_mgr_t* const p_mgr, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_state_mgr_ctrl_t object to initialize. +* +* p_mgr +* [in] Pointer to an osm_state_mgr_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* IB_SUCCESS if the State Manager Controller object +* was initialized successfully. +* +* NOTES +* Allows calling other State Manager Controller methods. +* +* SEE ALSO +* State Manager Controller object, osm_state_mgr_ctrl_construct, +* osm_state_mgr_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* OSM_STATE_MGR_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_stats.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_stats.h new file mode 100644 index 00000000..07c64e1a --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_stats.h @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_stats_t. + * This object represents the OpenSM statistics object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_STATS_H_ +#define _OSM_STATS_H_ + +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Statistics +* NAME +* OpenSM +* +* DESCRIPTION +* The OpenSM object encapsulates the information needed by the +* OpenSM to track interesting traffic and internal statistics. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Statistics/osm_stats_t +* NAME +* osm_stats_t +* +* DESCRIPTION +* OpenSM statistics block. +* +* SYNOPSIS +*/ +typedef struct _osm_stats +{ + atomic32_t qp0_mads_outstanding; + atomic32_t qp0_mads_outstanding_on_wire; + atomic32_t qp0_mads_rcvd; + atomic32_t qp0_mads_sent; + atomic32_t qp0_unicasts_sent; + atomic32_t qp1_mads_outstanding; + atomic32_t qp1_mads_rcvd; + atomic32_t qp1_mads_sent; + +} osm_stats_t; +/* +* FIELDS +* qp0_mads_outstanding +* Contains the number of MADs outstanding on QP0. +* When this value reaches zero, OpenSM has discovered all +* nodes on the subnet, and finished retrieving attributes. +* At that time, subnet configuration may begin. +* This variable must be manipulated using atomic instructions. +* +* qp0_mads_outstanding_on_wire +* The number of MADs outstanding on the wire at any moment. +* +* qp0_mads_rcvd +* Total number of QP0 MADs received. +* +* qp0_mads_sent +* Total number of QP0 MADs sent. +* +* qp0_unicasts_sent +* Total number of response-less MADs sent on the wire. This count +* includes getresp(), send() and trap() methods. +* +* SEE ALSO +***************/ + +END_C_DECLS + +#endif /* _OSM_STATS_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_subnet.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_subnet.h new file mode 100644 index 00000000..49a03260 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_subnet.h @@ -0,0 +1,1161 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_subn_t. + * This object represents an IBA subnet. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.12 $ + */ + +#ifndef _OSM_SUBNET_H_ +#define _OSM_SUBNET_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +#define OSM_SUBNET_VECTOR_MIN_SIZE 0 +#define OSM_SUBNET_VECTOR_GROW_SIZE 1 +#define OSM_SUBNET_VECTOR_CAPACITY 256 + +struct _osm_opensm_t; + +/****h* OpenSM/Subnet +* NAME +* Subnet +* +* DESCRIPTION +* The Subnet object encapsulates the information needed by the +* OpenSM to manage a subnet. The OpenSM allocates one Subnet object +* per IBA subnet. +* +* The Subnet object is not thread safe, thus callers must provide +* serialization. +* +* This object is essentially a container for the various components +* of a subnet. Callers may directly access the member variables. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****f* OpenSM: Subnet/osm_pfn_ui_extension +* NAME +* osm_pfn_ui_extension +* +* DESCRIPTION +* This typedef defines the prototype for UI extension functions +* The might be registered in the subnet options to handle pre_lid_assign, +* and ui_ucast_fwd_assign. +* +* SYNOPSIS +*/ +typedef int +(*osm_pfn_ui_extension_t)( + IN void* context ); +/* +* PARAMETERS +* context +* [in] Client specific context specified in the subnet opt +* Same prefix as the UI function (suffixed by ctx) +* +* RETURN VALUE +* This function returns an int (the semantic is different between +* the different calls) +* +* SEE ALSO +* +*********/ + +/****f* OpenSM: Subnet/osm_pfn_ui_mcast_extension +* NAME +* osm_pfn_ui_mcast_extension +* +* DESCRIPTION +* This typedef defines the prototype for UI extension functions +* They might be registered in the subnet options to handle ui_mcast_fwd_assign +* +* SYNOPSIS +*/ +typedef void +(*osm_pfn_ui_mcast_extension_t)( + IN void *context, + IN ib_net16_t mlid, + IN osm_mcast_req_type_t request_type, + IN ib_net64_t port_guid ); +/* +* PARAMETERS +* context +* [in] Client specific context specified in the subnet opt +* Same prefix as the UI function (suffixed by ctx) +* +* mlid +* [in] multicast lid of the group handled. +* +* request_type +* [in] Type of MC request being handled (create/join/leave) +* +* port_guid +* [in] port gui of the port that was added/removed from the +* multicast group handled. +* +* RETURN VALUE +* This function does not return a value. +* +* SEE ALSO +* +*********/ + +/****d* OpenSM: Subnet/osm_testability_modes_t +* NAME +* osm_testability_modes_t +* +* DESCRIPTION +* Enumerates the possible testability modes. +* +* SYNOPSIS +*/ +typedef enum _osm_testability_modes +{ + OSM_TEST_MODE_NONE = 0, + OSM_TEST_MODE_EXIT_BEFORE_SEND_HANDOVER, + OSM_TEST_MODE_MAX +} osm_testability_modes_t; +/***********/ + +/****s* OpenSM: Subnet/osm_qos_options_t +* NAME +* osm_qos_options_t +* +* DESCRIPTION +* Subnet QoS options structure. This structure contains the various +* QoS specific configuration parameters for the subnet. +* +* SYNOPSIS +*/ +typedef struct _osm_qos_options_t { + unsigned max_vls; + unsigned high_limit; + char *vlarb_high; + char *vlarb_low; + char *sl2vl; +} osm_qos_options_t; +/* +* FIELDS +* +* max_vls +* The number of maximum VLs on the Subnet +* +* high_limit +* The limit of High Priority component of VL Arbitration +* table (IBA 7.6.9) +* +* vlarb_high +* High priority VL Arbitration table template. +* +* vlarb_low +* Low priority VL Arbitration table template. +* +* sl2vl +* SL2VL Mapping table (IBA 7.6.6) template. +* +*********/ + +/****s* OpenSM: Subnet/osm_subn_opt_t +* NAME +* osm_subn_opt_t +* +* DESCRIPTION +* Subnet options structure. This structure contains the various +* site specific configuration parameters for the subnet. +* +* SYNOPSIS +*/ +typedef struct _osm_subn_opt +{ + ib_net64_t guid; + ib_net64_t m_key; + ib_net64_t sm_key; + ib_net64_t subnet_prefix; + ib_net16_t m_key_lease_period; + uint32_t sweep_interval; + uint32_t max_wire_smps; + uint32_t transaction_timeout; + uint8_t sm_priority; + uint8_t lmc; + boolean_t lmc_esp0; + uint8_t max_op_vls; + uint8_t force_link_speed; + boolean_t reassign_lids; + boolean_t reassign_lfts; + boolean_t ignore_other_sm; + boolean_t single_thread; + boolean_t no_multicast_option; + boolean_t disable_multicast; + boolean_t force_log_flush; + uint8_t subnet_timeout; + uint8_t packet_life_time; + uint8_t vl_stall_count; + uint8_t leaf_vl_stall_count; + uint8_t head_of_queue_lifetime; + uint8_t leaf_head_of_queue_lifetime; + uint8_t local_phy_errors_threshold; + uint8_t overrun_errors_threshold; + uint32_t sminfo_polling_timeout; + uint32_t polling_retry_number; + uint32_t max_msg_fifo_timeout; + boolean_t force_heavy_sweep; + uint8_t log_flags; + char * dump_files_dir; + char * log_file; + unsigned long log_max_size; + char * partition_config_file; + boolean_t no_partition_enforcement; + boolean_t no_qos; + boolean_t accum_log_file; + boolean_t console; + cl_map_t port_prof_ignore_guids; + boolean_t port_profile_switch_nodes; + osm_pfn_ui_extension_t pfn_ui_pre_lid_assign; + void * ui_pre_lid_assign_ctx; + osm_pfn_ui_mcast_extension_t pfn_ui_mcast_fdb_assign; + void * ui_mcast_fdb_assign_ctx; + boolean_t sweep_on_trap; + osm_testability_modes_t testability_mode; + char * routing_engine_name; + char * lid_matrix_dump_file; + char * ucast_dump_file; + char * updn_guid_file; + char * sa_db_file; + boolean_t exit_on_fatal; + boolean_t honor_guid2lid_file; + osm_qos_options_t qos_options; + osm_qos_options_t qos_ca_options; + osm_qos_options_t qos_sw0_options; + osm_qos_options_t qos_swe_options; + osm_qos_options_t qos_rtr_options; + boolean_t enable_quirks; + boolean_t no_clients_rereg; +} osm_subn_opt_t; +/* +* FIELDS +* +* guid +* The port guid that the SM is binding to. +* +* m_key +* M_Key value sent to all ports qualifing all Set(PortInfo). +* +* sm_key +* SM_Key value of the SM to qualify rcv SA queries as "trusted". +* +* subnet_prefix +* Subnet prefix used on this subnet. +* +* m_key_lease_period +* The lease period used for the M_Key on this subnet. +* +* sweep_interval +* The number of seconds between subnet sweeps. A value of 0 +* disables sweeping. +* +* sm_priority +* The priority of this SM as specified by the user. This +* value is made available in the SMInfo attribute. +* +* lmc +* The LMC value used on this subnet. +* +* lmc_esp0 +* Whether LMC value used on subnet should be used for +* enhanced switch port 0 or not. If TRUE, it is used. +* Otherwise (the default), LMC is set to 0 for ESP0. +* +* max_op_vls +* Limit the maximal operational VLs. default is 1. +* +* reassign_lids +* If TRUE cause all lids to be re-assigend. +* Otherwise (the default), +* OpenSM always tries to preserve as LIDs as much as possible. +* +* reassign_lfts +* If TRUE ignore existing LFT entries on first sweep (default). +* Otherwise only non minimal hop cases are modified. +* NOTE: A standby SM clears its first sweep flag - since the +* master SM already sweeps... +* +* ignore_other_sm_option +* This flag is TRUE if other SMs on the subnet should be ignored. +* +* no_multicast_option +* This flag is TRUE if OpenSM should disable multicast support. +* +* max_msg_fifo_timeout +* The maximal time a message can stay in the incoming message queue. +* If there is more than one message in the queue and the last +* message stayed in the queue more than this value the SA request +* will be immediately returned with a BUSY status. +* +* subnet_timeout +* The subnet_timeout that will be set for all the ports in the +* design SubnMgt.Set(PortInfo.vl_stall_life)) +* +* vl_stall_count +* The number of sequential packets dropped that cause the port +* to enter the VLStalled state. +* +* leaf_vl_stall_count +* The number of sequential packets dropped that cause the port +* to enter the VLStalled state. This is for switch ports driving +* a CA or router port. +* +* head_of_queue_lifetime +* The maximal time a packet can live at the head of a VL queue +* on any port not driving a CA or router port. +* +* leaf_head_of_queue_lifetime +* The maximal time a packet can live at the head of a VL queue +* on switch ports driving a CA or router. +* +* local_phy_errors_threshold +* Threshold of local phy errors for sending Trap 129 +* +* overrun_errors_threshold +* Threshold of credits overrun errors for sending Trap 129 +* +* sminfo_polling_timeout +* Specifies the polling timeout (in milliseconds) - the timeout +* between one poll to another. +* +* packet_life_time +* The maximal time a packet can stay in a switch. +* The value is send to all switches as SubnMgt.Set(SwitchInfo.life_state) +* +* dump_files_dir +* The directory to be used for subnet.lst osm.fdbs, osm.mcfdbs +* and default log file (the latter for Windows, not Linux). +* +* log_file +* Name of the log file (or NULL) for stdout. +* +* log_max_size +* This option defines maximal log file size in MB. When +* specified the log file will be truncated upon reaching +* this limit. +* +* accum_log_file +* If TRUE (default) - the log file will be accumulated. +* If FALSE - the log file will be erased before starting current opensm run. +* +* port_prof_ignore_guids +* A map of guids to be ignored by port profiling. +* +* port_profile_switch_nodes +* If TRUE will count the number of switch nodes routed through +* the link. If FALSE - only CA/RT nodes are counted. +* +* pfn_ui_pre_lid_assign +* A UI function to be invoked prior to lid assigment. It should +* return 1 if any change was made to any lid or 0 otherwise. +* +* ui_pre_lid_assign_ctx +* A UI context (void *) to be provided to the pfn_ui_pre_lid_assign +* +* pfn_ui_mcast_fdb_assign +* A UI function to be called inside the mcast manager instead of +* the call for the build spanning tree. This will be called on +* every multicast call for create, join and leave, and is +* responsible for the mcast FDB configuration. +* +* ui_mcast_fdb_assign_ctx +* A UI context (void *) to be provided to the pfn_ui_mcast_fdb_assign +* +* sweep_on_trap +* Received traps will initiate a new sweep. +* +* testability_mode +* Object that indicates if we are running in a special testability mode. +* +* routing_engine_name +* Name of used routing engine +* (other than default Min Hop Algorithm) +* +* lid_matrix_dump_file +* Name of the lid matrix dump file from where switch +* lid matrices (min hops tables) will be loaded +* +* ucast_dump_file +* Name of the unicast routing dump file from where switch +* forwarding tables will be loaded +* +* updn_guid_file +* Pointer to name of the UPDN guid file given by User +* +* sa_db_file +* Name of the SA database file. +* +* exit_on_fatal +* If TRUE (default) - SM will exit on fatal subnet initialization issues. +* If FALSE - SM will not exit. +* Fatal initialization issues: +* a. SM recognizes 2 different nodes with the same guid, or +* 12x link with lane reversal badly configured. +* +* honor_guid2lid_file +* Always honor the guid2lid file if it exists and is valid. This +* means that the file will be honored when SM is coming out of +* STANDBY. By default this is FALSE. +* +* qos_options +* Default set of QoS options +* +* qos_ca_options +* QoS options for CA ports +* +* qos_sw0_options +* QoS options for switches' port 0 +* +* qos_swe_options +* QoS options for switches' external ports +* +* qos_rtr_options +* QoS options for router ports +* +* enable_quirks +* Enable high risk new features and not fully qualified +* hardware specific work arounds +* +* no_clients_rereg +* When TRUE disables clients reregistration request. +* +* SEE ALSO +* Subnet object +*********/ + +/****s* OpenSM: Subnet/osm_subn_t +* NAME +* osm_subn_t +* +* DESCRIPTION +* Subnet structure. Callers may directly access member components, +* after grabbing a lock. +* +* TO DO +* This structure should probably be volatile. +* +* SYNOPSIS +*/ +typedef struct _osm_subn +{ + struct _osm_opensm_t *p_osm; + cl_qmap_t sw_guid_tbl; + cl_qmap_t node_guid_tbl; + cl_qmap_t port_guid_tbl; + cl_qmap_t rtr_guid_tbl; + cl_qmap_t prtn_pkey_tbl; + cl_qmap_t mgrp_mlid_tbl; + cl_qmap_t sm_guid_tbl; + cl_list_t light_sweep_physp_list; + cl_qlist_t sa_sr_list; + cl_qlist_t sa_infr_list; + cl_ptr_vector_t node_lid_tbl; + cl_ptr_vector_t port_lid_tbl; + ib_net16_t master_sm_base_lid; + ib_net16_t sm_base_lid; + ib_net64_t sm_port_guid; + uint8_t sm_state; + osm_subn_opt_t opt; + uint16_t max_unicast_lid_ho; + uint16_t max_multicast_lid_ho; + uint8_t min_ca_mtu; + uint8_t min_ca_rate; + boolean_t ignore_existing_lfts; + boolean_t subnet_initialization_error; + boolean_t force_immediate_heavy_sweep; + boolean_t force_delayed_heavy_sweep; + cl_list_t new_ports_list; + boolean_t in_sweep_hop_0; + boolean_t moved_to_master_state; + boolean_t first_time_master_sweep; + boolean_t coming_out_of_standby; +} osm_subn_t; +/* +* FIELDS +* sw_guid_tbl +* Container of pointers to all Switch objects in the subent. +* Indexed by node GUID. +* +* node_guid_tbl +* Container of pointers to all Node objects in the subent. +* Indexed by node GUID. +* +* port_guid_tbl +* Container of pointers to all Port objects in the subent. +* Indexed by port GUID - network order! +* +* rtr_guid_tbl +* Container of pointers to all Router objects in the subent. +* Indexed by node GUID. +* +* prtn_pkey_tbl +* Container of pointers to all Partition objects in the subnet. +* Indexed by P_KEY. +* +* mgrp_mlid_tbl +* Container of pointers to all Multicast Group objects in the subnet. +* Indexed by MLID. +* +* sm_guid_tbl +* Container of pointers to SM objects representing other SMs +* on the subnet. +* +* light_sweep_physp_list +* A list of all phys ports to scan for a change in remote +* side state in next light sweep. These ports are not down +* but for some reason the remote side did not answer. +* +* node_lid_tbl +* Container of pointers to all Node objects in the subent. +* Indexed by node LID. +* +* port_ptr_tbl +* Container of pointers to all Port objects in the subent. +* Indexed by port LID. +* +* master_sm_base_lid +* The base LID owned by the subnet's master SM. +* +* sm_base_lid +* The base LID of the local port where the SM is. +* +* sm_port_guid +* This SM's own port GUID. +* +* sm_state +* The high-level state of the SM. This value is made available +* in the SMInfo attribute. +* +* opt +* Subnet options structure contains site specific configuration. +* +* max_unicast_lid_ho +* The minimal max unicast lid reported by all switches +* +* max_multicast_lid_ho +* The minimal max multicast lid reported by all switches +* +* min_ca_mtu +* The minimal MTU reported by all CAs ports on the subnet +* +* min_ca_rate +* The minimal rate reported by all CA ports on the subnet +* +* ignore_existing_lfts +* This flag is a dynamic flag to instruct the LFT assignment to +* ignore existing legal LFT settings. +* The value will be set according to : +* - During SM init set to the reassign_lfts flag value +* - Coming out of STANDBY it will be cleared (other SM worked) +* - Any change to the list of switches will set it to high +* - Set to FALSE upon end of all lft assignments. +* +* subnet_initalization_error +* Similar to the force_immediate_heavy_sweep flag. If TRUE - +* means that we had errors during initialization (due to SubnSet requests +* that failed). We want to declare the subnet as unhealthy, and force +* another heavy sweep. +* +* force_immediate_heavy_sweep +* If TRUE - we want to force a heavy sweep. This can be done either +* due to receiving of trap - meaning there is some change on the subnet, +* or we received a handover from a remote sm. +* In this case we want to sweep and reconfigure the entire subnet. +* This will cause another heavy sweep to occure when the current sweep +* is done. +* +* force_delayed_heavy_sweep +* In some means - similar to the force_immediate_heavy_sweep flag, only +* it'll cause a heavy sweep in the next sweep. Note that this means that +* if we are running with -s 0 (no sweeps) - then this forced heavy sweep +* will not occur. +* If we had some trouble on the subnet, that caused a strange dropping +* of ports - we will try to do another heavy sweep on our next sweep. +* +* new_ports_list +* Container of pointers to port objects that were discovered for +* the first time during a current sweep. +* +* in_sweep_hop_0 +* When in_sweep_hop_0 flag is set to TRUE - this means we are +* in sweep_hop_0 - meaning we do not want to continue beyond +* the current node. +* This is relevant for the case of SM on switch, since in the +* switch info we need to signal somehow not to continue +* the sweeping. +* +* moved_to_master_state +* Used for the writing of "SUBNET UP" into /var/log/messages. +* Will be TRUE when the SM switches to Master state, and returned +* to FALSE once the sunbet is up. +* +* first_time_master_sweep +* This flag is used for the PortInfo setting. On the first sweep as master +* (meaning after moving from Standby|Discovering state), the SM must send +* a PortInfoSet to all ports. After that - we want to minimize the number of +* PortInfoSet requests sent, and to send only requests that change the value +* from what is updated in the port (or send a first request if this is a new +* port). We will set this flag to TRUE when entering the master state, and +* set it back to FALSE at the end of the drop manager. This is done since at +* the end of the drop manager we have updated all the ports that are +* reachable, and from now on these are the only ports we have data of. We +* don't want to send extra set requests to these ports anymore. +* +* coming_out_of_standby +* TRUE on the first sweep after the SM was in standby. +* Used for nulling any cache of LID and Routing. +* The flag is set true if the SM state was standby and now changed to MASTER +* it is reset at the end of the sweep. +* +* SEE ALSO +* Subnet object +*********/ + +/****f* OpenSM: Subnet/osm_subn_construct +* NAME +* osm_subn_construct +* +* DESCRIPTION +* This function constructs a Subnet object. +* +* SYNOPSIS +*/ +void +osm_subn_construct( + IN osm_subn_t* const p_subn ); +/* +* PARAMETERS +* p_subn +* [in] Pointer to a Subnet object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_subn_init, and osm_subn_destroy. +* +* Calling osm_subn_construct is a prerequisite to calling any other +* method except osm_subn_init. +* +* SEE ALSO +* Subnet object, osm_subn_init, osm_subn_destroy +*********/ + +/****f* OpenSM: Subnet/osm_subn_destroy +* NAME +* osm_subn_destroy +* +* DESCRIPTION +* The osm_subn_destroy function destroys a subnet, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_subn_destroy( + IN osm_subn_t* const p_subn ); +/* +* PARAMETERS +* p_subn +* [in] Pointer to a Subnet object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified Subnet object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_subn_construct or +* osm_subn_init. +* +* SEE ALSO +* Subnet object, osm_subn_construct, osm_subn_init +*********/ + +/****f* OpenSM: Subnet/osm_subn_init +* NAME +* osm_subn_init +* +* DESCRIPTION +* The osm_subn_init function initializes a Subnet object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_subn_init( + IN osm_subn_t* const p_subn, + IN struct _osm_opensm_t * const p_osm, + IN const osm_subn_opt_t* const p_opt ); +/* +* PARAMETERS +* p_subn +* [in] Pointer to an osm_subn_t object to initialize. +* +* p_opt +* [in] Pointer to the subnet options structure. +* +* RETURN VALUES +* IB_SUCCESS if the Subnet object was initialized successfully. +* +* NOTES +* Allows calling other Subnet methods. +* +* SEE ALSO +* Subnet object, osm_subn_construct, osm_subn_destroy +*********/ + +/* + Forward references. +*/ +struct _osm_mad_addr; +struct _osm_log; +struct _osm_switch; +struct _osm_physp; +struct _osm_port; + +/****f* OpenSM: Helper/osm_get_gid_by_mad_addr +* NAME +* osm_get_gid_by_mad_addr +* +* DESCRIPTION +* Looks for the requester gid in the mad address. +* +* Note: This code is not thread safe. Need to grab the lock before +* calling it. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_get_gid_by_mad_addr( + IN struct _osm_log *p_log, + IN const osm_subn_t *p_subn, + IN const struct _osm_mad_addr *p_mad_addr, + OUT ib_gid_t *p_gid); +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_subn +* [in] Pointer to subnet object. +* +* p_mad_addr +* [in] Pointer to mad address object. +* +* p_gid +* [out] Pointer to the GID structure to fill in. +* +* RETURN VALUES +* IB_SUCCESS if able to find the GID by address given. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Helper/osm_get_physp_by_mad_addr +* NAME +* osm_get_physp_by_mad_addr +* +* DESCRIPTION +* Looks for the requester physical port in the mad address. +* +* Note: This code is not thread safe. Need to grab the lock before +* calling it. +* +* SYNOPSIS +*/ +struct _osm_physp * +osm_get_physp_by_mad_addr( + IN struct _osm_log *p_log, + IN const osm_subn_t *p_subn, + IN struct _osm_mad_addr *p_mad_addr ); +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_subn +* [in] Pointer to subnet object. +* +* p_mad_addr +* [in] Pointer to mad address object. +* +* RETURN VALUES +* Pointer to requester physical port object if found. Null otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Helper/osm_get_port_by_mad_addr +* NAME +* osm_get_port_by_mad_addr +* +* DESCRIPTION +* Looks for the requester port in the mad address. +* +* Note: This code is not thread safe. Need to grab the lock before +* calling it. +* +* SYNOPSIS +*/ +struct _osm_port * +osm_get_port_by_mad_addr( + IN struct _osm_log *p_log, + IN const osm_subn_t *p_subn, + IN struct _osm_mad_addr *p_mad_addr ); +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_subn +* [in] Pointer to subnet object. +* +* p_mad_addr +* [in] Pointer to mad address object. +* +* RETURN VALUES +* Pointer to requester port object if found. Null otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Subnet/osm_get_switch_by_guid +* NAME +* osm_get_switch_by_guid +* +* DESCRIPTION +* Looks for the given switch guid in the subnet table of switches by guid. +* NOTE: this code is not thread safe. Need to grab the lock before +* calling it. +* +* SYNOPSIS +*/ +struct _osm_switch * +osm_get_switch_by_guid( + IN const osm_subn_t *p_subn, + IN uint64_t guid ); +/* +* PARAMETERS +* p_subn +* [in] Pointer to an osm_subn_t object +* +* guid +* [in] The node guid in host order +* +* RETURN VALUES +* The switch structure pointer if found. NULL otherwise. +* +* SEE ALSO +* Subnet object, osm_subn_construct, osm_subn_destroy, +* osm_switch_t +*********/ + +/****f* OpenSM: Subnet/osm_get_node_by_guid +* NAME +* osm_get_node_by_guid +* +* DESCRIPTION +* The looks for the given node giud in the subnet table of nodes by guid. +* NOTE: this code is not thread safe. Need to grab the lock before +* calling it. +* +* SYNOPSIS +*/ +struct _osm_node * +osm_get_node_by_guid( + IN osm_subn_t const *p_subn, + IN uint64_t guid ); +/* +* PARAMETERS +* p_subn +* [in] Pointer to an osm_subn_t object +* +* guid +* [in] The node guid in host order +* +* RETURN VALUES +* The node structure pointer if found. NULL otherwise. +* +* SEE ALSO +* Subnet object, osm_subn_construct, osm_subn_destroy, +* osm_node_t +*********/ + +/****f* OpenSM: Subnet/osm_get_port_by_guid +* NAME +* osm_get_port_by_guid +* +* DESCRIPTION +* The looks for the given port guid in the subnet table of ports by guid. +* NOTE: this code is not thread safe. Need to grab the lock before +* calling it. +* +* SYNOPSIS +*/ +struct _osm_port * +osm_get_port_by_guid( + IN osm_subn_t const *p_subn, + IN uint64_t guid ); +/* +* PARAMETERS +* p_subn +* [in] Pointer to an osm_subn_t object +* +* guid +* [in] The port guid in host order +* +* RETURN VALUES +* The port structure pointer if found. NULL otherwise. +* +* SEE ALSO +* Subnet object, osm_subn_construct, osm_subn_destroy, +* osm_port_t +*********/ + +/****f* OpenSM: Helper/osm_get_physp_by_mad_addr +* NAME +* osm_get_physp_by_mad_addr +* +* DESCRIPTION +* Looks for the requester physical port in the mad address. +* +* Note: This code is not thread safe. Need to grab the lock before +* calling it. +* +* SYNOPSIS +*/ +struct _osm_physp * +osm_get_physp_by_mad_addr( + IN struct _osm_log *p_log, + IN const osm_subn_t *p_subn, + IN struct _osm_mad_addr *p_mad_addr ); +/* +* PARAMETERS +* p_log +* [in] Pointer to a log object. +* +* p_subn +* [in] Pointer to subnet object. +* +* p_mad_addr +* [in] Pointer to mad address object. +* +* RETURN VALUES +* Pointer to requester physical port object if found. Null otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Subnet/osm_subn_set_default_opt +* NAME +* osm_subn_set_default_opt +* +* DESCRIPTION +* The osm_subn_set_default_opt function sets the default options. +* +* SYNOPSIS +*/ +void +osm_subn_set_default_opt( + IN osm_subn_opt_t* const p_opt ); +/* +* PARAMETERS +* +* p_opt +* [in] Pointer to the subnet options structure. +* +* RETURN VALUES +* None +* +* NOTES +* +* SEE ALSO +* Subnet object, osm_subn_construct, osm_subn_destroy +*********/ + +/****f* OpenSM: Subnet/osm_subn_set_default_opt +* NAME +* osm_subn_set_default_opt +* +* DESCRIPTION +* The osm_subn_set_default_opt function sets the default options. +* +* SYNOPSIS +*/ +void +osm_subn_set_default_opt( + IN osm_subn_opt_t* const p_opt ); +/* +* PARAMETERS +* +* p_opt +* [in] Pointer to the subnet options structure. +* +* RETURN VALUES +* None +* +* NOTES +* +* SEE ALSO +* Subnet object, osm_subn_construct, osm_subn_destroy +*********/ + +/****f* OpenSM: Subnet/osm_subn_parse_conf_file +* NAME +* osm_subn_parse_conf_file +* +* DESCRIPTION +* The osm_subn_parse_conf_file function parses the configuration file +* and sets the defaults accordingly. +* +* SYNOPSIS +*/ +void +osm_subn_parse_conf_file( + IN osm_subn_opt_t* const p_opt ); +/* +* PARAMETERS +* +* p_opt +* [in] Pointer to the subnet options structure. +* +* RETURN VALUES +* None +* +* NOTES +* Assumes the conf file is part of the cache dir which defaults to +* OSM_DEFAULT_CACHE_DIR or OSM_CACHE_DIR the name is opensm.opts +* +* SEE ALSO +* Subnet object, osm_subn_construct, osm_subn_destroy +*********/ + +/****f* OpenSM: Subnet/osm_subn_parse_conf_file +* NAME +* osm_subn_rescan_conf_file +* +* DESCRIPTION +* The osm_subn_rescan_conf_file function parses the configuration +* file and update selected subnet options +* +* SYNOPSIS +*/ +void +osm_subn_rescan_conf_file( + IN osm_subn_opt_t* const p_opts ); +/* +* PARAMETERS +* +* p_opt +* [in] Pointer to the subnet options structure. +* +* RETURN VALUES +* None +* +* NOTES +* This uses the same file as osm_subn_parse_conf_file() +* +*********/ + +/****f* OpenSM: Subnet/osm_subn_write_conf_file +* NAME +* osm_subn_write_conf_file +* +* DESCRIPTION +* Write the configuration file into the cache +* +* SYNOPSIS +*/ +void +osm_subn_write_conf_file( + IN osm_subn_opt_t* const p_opt ); +/* +* PARAMETERS +* +* p_opt +* [in] Pointer to the subnet options structure. +* +* RETURN VALUES +* None +* +* NOTES +* Assumes the conf file is part of the cache dir which defaults to +* OSM_DEFAULT_CACHE_DIR or OSM_CACHE_DIR the name is opensm.opts +* +* SEE ALSO +* Subnet object, osm_subn_construct, osm_subn_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_SUBNET_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_subnet_config_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_subnet_config_ctrl.h new file mode 100644 index 00000000..18fddb8c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_subnet_config_ctrl.h @@ -0,0 +1,287 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_subn_conf_ctrl_t. + * This object represents a controller that initiates configuration + * of the subnet after the discovery phase is complete. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SUBNET_CONFIG_CTRL_H_ +#define _OSM_SUBNET_CONFIG_CTRL_H_ + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Subnet Configuration Controller +* NAME +* Subnet Configuration Controller +* +* DESCRIPTION +* The Subnet Configuration Controller object encapsulates the +* information needed to initiate a subnet configuration pass. +* The Subnet Configuration Controller sends Dispatcher messages +* to controllers responsible for configuring LIDs, +* switch forwarding tables, etc. +* +* The Subnet Configuration Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ +/****s* OpenSM: Subnet Configuration Controller/osm_subn_conf_ctrl_t +* NAME +* osm_subn_conf_ctrl_t +* +* DESCRIPTION +* Subnet Configuration Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_subn_conf_ctrl +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + cl_plock_t *p_lock; + cl_dispatcher_t *p_disp; + +} osm_subn_conf_ctrl_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* p_disp +* Pointer to the Dispatcher. +* +* SEE ALSO +* Subnet Configuration Controller object +*********/ + +/****f* OpenSM: Subnet Configuration Controller/osm_subn_conf_ctrl_construct +* NAME +* osm_subn_conf_ctrl_construct +* +* DESCRIPTION +* This function constructs a Subnet Configuration Controller object. +* +* SYNOPSIS +*/ +void osm_subn_conf_ctrl_construct( + IN osm_subn_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Subnet Configuration Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_subn_conf_ctrl_init, osm_subn_conf_ctrl_destroy, +* and osm_subn_conf_ctrl_is_inited. +* +* Calling osm_subn_conf_ctrl_construct is a prerequisite to calling any other +* method except osm_subn_conf_ctrl_init. +* +* SEE ALSO +* Subnet Configuration Controller object, osm_subn_conf_ctrl_init, +* osm_subn_conf_ctrl_destroy, osm_subn_conf_ctrl_is_inited +*********/ + +/****f* OpenSM: Subnet Configuration Controller/osm_subn_conf_ctrl_destroy +* NAME +* osm_subn_conf_ctrl_destroy +* +* DESCRIPTION +* The osm_subn_conf_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_subn_conf_ctrl_destroy( + IN osm_subn_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Subnet Configuration Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_subn_conf_ctrl_construct or osm_subn_conf_ctrl_init. +* +* SEE ALSO +* Subnet Configuration Controller object, osm_subn_conf_ctrl_construct, +* osm_subn_conf_ctrl_init +*********/ + +/****f* OpenSM: Subnet Configuration Controller/osm_subn_conf_ctrl_init +* NAME +* osm_subn_conf_ctrl_init +* +* DESCRIPTION +* The osm_subn_conf_ctrl_init function initializes a +* Subnet Configuration Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_subn_conf_ctrl_init( + IN osm_subn_conf_ctrl_t* const p_ctrl, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_subn_conf_ctrl_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* IB_SUCCESS if the Subnet Configuration Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Subnet Configuration Controller methods. +* +* SEE ALSO +* Subnet Configuration Controller object, osm_subn_conf_ctrl_construct, +* osm_subn_conf_ctrl_destroy, osm_subn_conf_ctrl_is_inited +*********/ + +/****f* OpenSM: Subnet Configuration Controller/osm_subn_conf_ctrl_is_inited +* NAME +* osm_subn_conf_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_subn_conf_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_subn_conf_ctrl_is_inited( + IN const osm_subn_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_subn_conf_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_subn_conf_ctrl_construct or osm_subn_conf_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Subnet Configuration Controller object, osm_subn_conf_ctrl_construct, +* osm_subn_conf_ctrl_init +*********/ + +/****f* OpenSM: Subnet Configuration Controller/osm_subn_conf_ctrl_process +* NAME +* osm_subn_conf_ctrl_process +* +* DESCRIPTION +* Initiate a subnet configuration pass. +* +* SYNOPSIS +*/ +ib_api_status_t osm_subn_conf_ctrl_process( + IN const osm_subn_conf_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_subn_conf_ctrl_t object. +* +* RETURN VALUES +* IB_SUCCESS if configuration processing was successfully +* initiated. +* +* NOTES +* A success status here does not indicate that +* the entire subnet configuration process completed successfully. +* Only one configuration pass is active at one time. +* +* SEE ALSO +* Subnet Configuration Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_SUBNET_CONFIG_CTRL_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sw_info_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sw_info_rcv.h new file mode 100644 index 00000000..3695e3db --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sw_info_rcv.h @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_si_rcv_t. + * This object represents the SwitchInfo Receiver object. + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SI_RCV_H_ +#define _OSM_SI_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Switch Info Receiver +* NAME +* Switch Info Receiver +* +* DESCRIPTION +* The Switch Info Receiver object encapsulates the information +* needed to receive the SwitchInfo attribute from a node. +* +* The Switch Info Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Switch Info Receiver/osm_si_rcv_t +* NAME +* osm_si_rcv_t +* +* DESCRIPTION +* Switch Info Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_si_rcv +{ + osm_subn_t *p_subn; + osm_log_t *p_log; + osm_req_t *p_req; + osm_state_mgr_t *p_state_mgr; + cl_plock_t *p_lock; + +} osm_si_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_log +* Pointer to the log object. +* +* p_req +* Pointer to the Request object. +* +* p_state_mgr +* Pointer to the State Manager object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* Switch Info Receiver object +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_si_rcv_construct +* NAME +* osm_si_rcv_construct +* +* DESCRIPTION +* This function constructs a Switch Info Receiver object. +* +* SYNOPSIS +*/ +void osm_si_rcv_construct( + IN osm_si_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Switch Info Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_si_rcv_init, osm_si_rcv_destroy, +* and osm_si_rcv_is_inited. +* +* Calling osm_si_rcv_construct is a prerequisite to calling any other +* method except osm_si_rcv_init. +* +* SEE ALSO +* Switch Info Receiver object, osm_si_rcv_init, +* osm_si_rcv_destroy, osm_si_rcv_is_inited +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_si_rcv_destroy +* NAME +* osm_si_rcv_destroy +* +* DESCRIPTION +* The osm_si_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_si_rcv_destroy( + IN osm_si_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Switch Info Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_si_rcv_construct or osm_si_rcv_init. +* +* SEE ALSO +* Switch Info Receiver object, osm_si_rcv_construct, +* osm_si_rcv_init +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_si_rcv_init +* NAME +* osm_si_rcv_init +* +* DESCRIPTION +* The osm_si_rcv_init function initializes a +* Switch Info Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_si_rcv_init( + IN osm_si_rcv_t* const p_ctrl, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_req_t* const p_req, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_si_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_state_mgr +* [in] Pointer to the State Manager object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Switch Info Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Switch Info Receiver methods. +* +* SEE ALSO +* Switch Info Receiver object, osm_si_rcv_construct, +* osm_si_rcv_destroy, osm_si_rcv_is_inited +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_si_rcv_is_inited +* NAME +* osm_si_rcv_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_si_rcv_init. +* +* SYNOPSIS +*/ +boolean_t osm_si_rcv_is_inited( + IN const osm_si_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_si_rcv_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_si_rcv_construct or osm_si_rcv_init must be +* called before using this function. +* +* SEE ALSO +* Switch Info Receiver object, osm_si_rcv_construct, +* osm_si_rcv_init +*********/ + +/****f* OpenSM: Switch Info Receiver/osm_si_rcv_process +* NAME +* osm_si_rcv_process +* +* DESCRIPTION +* Process the SwitchInfo attribute. +* +* SYNOPSIS +*/ +void osm_si_rcv_process( + IN const osm_si_rcv_t* const p_ctrl, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_si_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's SwitchInfo attribute. +* +* RETURN VALUES +* CL_SUCCESS if the SwitchInfo processing was successful. +* +* NOTES +* This function processes a SwitchInfo attribute. +* +* SEE ALSO +* Switch Info Receiver, Switch Info Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_SI_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sw_info_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sw_info_rcv_ctrl.h new file mode 100644 index 00000000..5e745960 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sw_info_rcv_ctrl.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_si_rcv_ctrl_t. + * This object represents a controller that receives the IBA SwitchInfo + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SI_RCV_CTRL_H_ +#define _OSM_SI_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Switch Info Receive Controller +* NAME +* Switch Info Receive Controller +* +* DESCRIPTION +* The Switch Info Receive Controller object encapsulates +* the information needed to receive the SwitchInfo attribute from a node. +* +* The Switch Info Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Switch Info Receive Controller/osm_si_rcv_ctrl_t +* NAME +* osm_si_rcv_ctrl_t +* +* DESCRIPTION +* Switch Info Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_si_rcv_ctrl +{ + osm_si_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_si_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Switch Info Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Switch Info Receive Controller object +* Switch Info Receiver object +*********/ + +/****f* OpenSM: Switch Info Receive Controller/osm_si_rcv_ctrl_construct +* NAME +* osm_si_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Switch Info Receive Controller object. +* +* SYNOPSIS +*/ +void osm_si_rcv_ctrl_construct( + IN osm_si_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Switch Info Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_si_rcv_ctrl_init, osm_si_rcv_ctrl_destroy, +* and osm_si_rcv_ctrl_is_inited. +* +* Calling osm_si_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_si_rcv_ctrl_init. +* +* SEE ALSO +* Switch Info Receive Controller object, osm_si_rcv_ctrl_init, +* osm_si_rcv_ctrl_destroy, osm_si_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Switch Info Receive Controller/osm_si_rcv_ctrl_destroy +* NAME +* osm_si_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_si_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_si_rcv_ctrl_destroy( + IN osm_si_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Switch Info Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_si_rcv_ctrl_construct or osm_si_rcv_ctrl_init. +* +* SEE ALSO +* Switch Info Receive Controller object, osm_si_rcv_ctrl_construct, +* osm_si_rcv_ctrl_init +*********/ + +/****f* OpenSM: Switch Info Receive Controller/osm_si_rcv_ctrl_init +* NAME +* osm_si_rcv_ctrl_init +* +* DESCRIPTION +* The osm_si_rcv_ctrl_init function initializes a +* Switch Info Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_si_rcv_ctrl_init( + IN osm_si_rcv_ctrl_t* const p_ctrl, + IN osm_si_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_si_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_si_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Switch Info Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Switch Info Receive Controller methods. +* +* SEE ALSO +* Switch Info Receive Controller object, osm_si_rcv_ctrl_construct, +* osm_si_rcv_ctrl_destroy, osm_si_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: Switch Info Receive Controller/osm_si_rcv_ctrl_is_inited +* NAME +* osm_si_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_si_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_si_rcv_ctrl_is_inited( + IN const osm_si_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_si_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_si_rcv_ctrl_construct or osm_si_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* Switch Info Receive Controller object, osm_si_rcv_ctrl_construct, +* osm_si_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_SI_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_sweep_fail_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sweep_fail_ctrl.h new file mode 100644 index 00000000..3d0d35ec --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_sweep_fail_ctrl.h @@ -0,0 +1,239 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_sweep_fail_ctrl_t. + * This object represents a controller that + * handles transport failures during sweeps. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_SWEEP_FAIL_CTRL_H_ +#define _OSM_SWEEP_FAIL_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Sweep Fail Controller +* NAME +* Sweep Fail Controller +* +* DESCRIPTION +* The Sweep Fail Controller object encapsulates +* the information needed to handle transport failures during +* sweeps. +* +* The Sweep Fail Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Sweep Fail Controller/osm_sweep_fail_ctrl_t +* NAME +* osm_sweep_fail_ctrl_t +* +* DESCRIPTION +* Sweep Fail Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_sweep_fail_ctrl +{ + osm_log_t *p_log; + osm_state_mgr_t *p_state_mgr; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + + +} osm_sweep_fail_ctrl_t; +/* +* FIELDS +* p_log +* Pointer to the log object. +* +* p_sate_mgr +* Pointer to the state manager object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Sweep Fail Controller object +* Sweep Failr object +*********/ + +/****f* OpenSM: Sweep Fail Controller/osm_sweep_fail_ctrl_construct +* NAME +* osm_sweep_fail_ctrl_construct +* +* DESCRIPTION +* This function constructs a Sweep Fail Controller object. +* +* SYNOPSIS +*/ +void +osm_sweep_fail_ctrl_construct( + IN osm_sweep_fail_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Sweep Fail Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_sweep_fail_ctrl_init, osm_sweep_fail_ctrl_destroy +* +* Calling osm_sweep_fail_ctrl_construct is a prerequisite to calling any other +* method except osm_sweep_fail_ctrl_init. +* +* SEE ALSO +* Sweep Fail Controller object, osm_sweep_fail_ctrl_init, +* osm_sweep_fail_ctrl_destroy +*********/ + +/****f* OpenSM: Sweep Fail Controller/osm_sweep_fail_ctrl_destroy +* NAME +* osm_sweep_fail_ctrl_destroy +* +* DESCRIPTION +* The osm_sweep_fail_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_sweep_fail_ctrl_destroy( + IN osm_sweep_fail_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Sweep Fail Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_sweep_fail_ctrl_construct or osm_sweep_fail_ctrl_init. +* +* SEE ALSO +* Sweep Fail Controller object, osm_sweep_fail_ctrl_construct, +* osm_sweep_fail_ctrl_init +*********/ + +/****f* OpenSM: Sweep Fail Controller/osm_sweep_fail_ctrl_init +* NAME +* osm_sweep_fail_ctrl_init +* +* DESCRIPTION +* The osm_sweep_fail_ctrl_init function initializes a +* Sweep Fail Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_sweep_fail_ctrl_init( + IN osm_sweep_fail_ctrl_t* const p_ctrl, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_sweep_fail_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_sweep_fail_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_state_mgr +* [in] Pointer to the state manager object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the Sweep Fail Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Sweep Fail Controller methods. +* +* SEE ALSO +* Sweep Fail Controller object, osm_sweep_fail_ctrl_construct, +* osm_sweep_fail_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* _OSM_SWEEP_FAIL_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_switch.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_switch.h new file mode 100644 index 00000000..2f7ca2f1 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_switch.h @@ -0,0 +1,1552 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_switch_t. + * This object represents an IBA switch. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#ifndef _OSM_SWITCH_H_ +#define _OSM_SWITCH_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Switch +* NAME +* Switch +* +* DESCRIPTION +* The Switch object encapsulates the information needed by the +* OpenSM to manage switches. The OpenSM allocates one switch object +* per switch in the IBA subnet. +* +* The Switch object is not thread safe, thus callers must provide +* serialization. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Switch/osm_switch_t +* NAME +* osm_switch_t +* +* DESCRIPTION +* Switch structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_switch +{ + cl_map_item_t map_item; + osm_node_t *p_node; + ib_switch_info_t switch_info; + osm_fwd_tbl_t fwd_tbl; + osm_lid_matrix_t lmx; + uint16_t max_lid_ho; + osm_port_profile_t *p_prof; + osm_mcast_tbl_t mcast_tbl; + uint32_t discovery_count; +} osm_switch_t; +/* +* FIELDS +* map_item +* Linkage structure for cl_qmap. MUST BE FIRST MEMBER! +* +* p_node +* Pointer to the Node object for this switch. +* +* switch_info +* IBA defined SwitchInfo structure for this switch. +* +* fwd_tbl +* This switch's forwarding table. +* +* lmx +* LID Matrix for this switch containing the hop count +* to every LID from every port. +* +* max_lid_ho +* Max LID that is accessible from this switch. +* +* p_pro +* Pointer to array of Port Profile objects for this switch. +* +* mcast_tbl +* Multicast forwarding table for this switch. +* +* discovery_count +* The number of times this switch has been discovered +* during the current fabric sweep. This number is reset +* to zero at the start of a sweep. +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_construct +* NAME +* osm_switch_construct +* +* DESCRIPTION +* This function constructs a Switch object. +* +* SYNOPSIS +*/ +void +osm_switch_construct( + IN osm_switch_t* const p_sw ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to a Switch object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_switch_init, and osm_switch_destroy. +* +* Calling osm_switch_construct is a prerequisite to calling any other +* method except osm_switch_init. +* +* SEE ALSO +* Switch object, osm_switch_init, osm_switch_destroy +*********/ + +/****f* OpenSM: Switch/osm_switch_destroy +* NAME +* osm_switch_destroy +* +* DESCRIPTION +* The osm_switch_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_switch_destroy( + IN osm_switch_t* const p_sw ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* None. +* +* NOTES +* Performs any necessary cleanup of the specified object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_switch_construct +* or osm_switch_init. +* +* SEE ALSO +* Switch object, osm_switch_construct, osm_switch_init +*********/ + +/****f* OpenSM: Switch/osm_switch_destroy +* NAME +* osm_switch_destroy +* +* DESCRIPTION +* Destroys and deallocates the object. +* +* SYNOPSIS +*/ +void +osm_switch_delete( + IN OUT osm_switch_t** const pp_sw ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +* Switch object, osm_switch_construct, osm_switch_init +*********/ + +/****f* OpenSM: Switch/osm_switch_init +* NAME +* osm_switch_init +* +* DESCRIPTION +* The osm_switch_init function initializes a Switch object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_switch_init( + IN osm_switch_t* const p_sw, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object to initialize. +* +* p_node +* [in] Pointer to the node object of this switch +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the switch's +* SwitchInfo attribute. +* +* RETURN VALUES +* IB_SUCCESS if the Switch object was initialized successfully. +* +* NOTES +* Allows calling other node methods. +* +* SEE ALSO +* Switch object, osm_switch_construct, osm_switch_destroy +*********/ + +/****f* OpenSM: Switch/osm_switch_new +* NAME +* osm_switch_new +* +* DESCRIPTION +* The osm_switch_init function initializes a Switch object for use. +* +* SYNOPSIS +*/ +osm_switch_t* +osm_switch_new( + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_node +* [in] Pointer to the node object of this switch +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the switch's +* SwitchInfo attribute. +* +* RETURN VALUES +* Pointer to the new initialized switch object. +* +* NOTES +* +* SEE ALSO +* Switch object, osm_switch_construct, osm_switch_destroy, +*********/ + +/****f* OpenSM: Switch/osm_switch_is_leaf_lid +* NAME +* osm_switch_is_leaf_lid +* +* DESCRIPTION +* Indicates if the specified LID is the switch's LID, or is a leaf +* of the switch. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_switch_is_leaf_lid( + IN const osm_switch_t* const p_sw, + IN const uint16_t lid_ho ) +{ + return( osm_lid_matrix_get_least_hops( &p_sw->lmx, lid_ho ) <= 1 ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* lid_ho +* [in] LID (host order) to compare. +* +* RETURN VALUES +* TRUE if the LID is the switch's LID or is a leaf of the switch, +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_get_hop_count +* NAME +* osm_switch_get_hop_count +* +* DESCRIPTION +* Returns the hop count at the specified LID/Port intersection. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_switch_get_hop_count( + IN const osm_switch_t* const p_sw, + IN const uint16_t lid_ho, + IN const uint8_t port_num ) +{ + return( osm_lid_matrix_get( &p_sw->lmx, lid_ho, port_num ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to a Switch object. +* +* lid_ho +* [in] LID value (host order) for which to return the hop count +* +* port_num +* [in] Port number in the switch +* +* RETURN VALUES +* Returns the hop count at the specified LID/Port intersection. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_fwd_tbl_ptr +* NAME +* osm_switch_get_fwd_tbl_ptr +* +* DESCRIPTION +* Returns a pointer to the switch's forwarding table. +* +* SYNOPSIS +*/ +static inline osm_fwd_tbl_t* +osm_switch_get_fwd_tbl_ptr( + IN const osm_switch_t* const p_sw ) +{ + return( (osm_fwd_tbl_t*)&p_sw->fwd_tbl ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to a Switch object. +* +* RETURN VALUES +* Returns a pointer to the switch's forwarding table. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_set_hops +* NAME +* osm_switch_set_hops +* +* DESCRIPTION +* Sets the hop count at the specified LID/Port intersection. +* +* SYNOPSIS +*/ +static inline cl_status_t +osm_switch_set_hops( + IN osm_switch_t* const p_sw, + IN const uint16_t lid_ho, + IN const uint8_t port_num, + IN const uint8_t num_hops ) +{ + return( osm_lid_matrix_set( &p_sw->lmx, lid_ho, port_num, num_hops ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to a Switch object. +* +* lid_ho +* [in] LID value (host order) for which to set the count. +* +* port_num +* [in] port number for which to set the count. +* +* num_hops +* [in] value to assign to this entry. +* +* RETURN VALUES +* Returns the hop count at the specified LID/Port intersection. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_set_min_lid_size +* NAME +* osm_switch_set_min_lid_size +* +* DESCRIPTION +* Sets the size of the switch's routing table to at least accomodate the +* specified LID value (host ordered) +* +* SYNOPSIS +*/ +static inline cl_status_t +osm_switch_set_min_lid_size( + IN osm_switch_t* const p_sw, + IN const uint16_t lid_ho ) +{ + return( osm_lid_matrix_set_min_lid_size( &p_sw->lmx, lid_ho ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to a Switch object. +* +* lid_ho +* [in] LID value (host order) for which to set the count. +* +* RETURN VALUES +* Sets the size of the switch's routing table to at least accomodate the +* specified LID value (host ordered) +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_least_hops +* NAME +* osm_switch_get_least_hops +* +* DESCRIPTION +* Returns the number of hops in the short path to this lid from +* any port on the switch. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_switch_get_least_hops( + IN const osm_switch_t* const p_sw, + IN const uint16_t lid_ho ) +{ + return( osm_lid_matrix_get_least_hops( &p_sw->lmx, lid_ho ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* lid_ho +* [in] LID (host order) for which to retrieve the shortest hop count. +* +* RETURN VALUES +* Returns the number of hops in the short path to this lid from +* any port on the switch. +* +* NOTES +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_get_port_by_lid +* NAME +* osm_switch_get_port_by_lid +* +* DESCRIPTION +* Returns the switch port number on which the specified LID is routed. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_switch_get_port_by_lid( + IN const osm_switch_t* const p_sw, + IN const uint16_t lid_ho ) +{ + return( osm_fwd_tbl_get( &p_sw->fwd_tbl, lid_ho ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* lid_ho +* [in] LID (host order) for which to retrieve the shortest hop count. +* +* RETURN VALUES +* Returns the switch port on which the specified LID is routed. +* +* NOTES +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_get_physp_ptr +* NAME +* osm_switch_get_physp_ptr +* +* DESCRIPTION +* Gets the Physical Port Object at the specified port number. +* +* SYNOPSIS +*/ +osm_physp_t* +osm_switch_get_physp_ptr( + IN const osm_switch_t* const p_sw, + IN const uint32_t port_num ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* port_num +* [in] Port number for which to retrieve the Physical Port Object. +* +* RETURN VALUES +* Returns a pointer to the Physical Port Object object at the specified +* port number. +* A return value of zero means the port number was out of range. +* +* +* NOTES +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_get_route_by_lid +* NAME +* osm_switch_get_route_by_lid +* +* DESCRIPTION +* Gets the physical port object that routes the specified LID. +* +* SYNOPSIS +*/ +static inline osm_physp_t* +osm_switch_get_route_by_lid( + IN const osm_switch_t* const p_sw, + IN const ib_net16_t lid ) +{ + uint8_t port_num; + + CL_ASSERT( p_sw ); + CL_ASSERT( lid ); + + port_num = osm_fwd_tbl_get( &p_sw->fwd_tbl, cl_ntoh16( lid ) ); + /* + In order to avoid holes in the subnet (usually happens when + running UPDN algorithm), i.e. cases where port is + unreachable through a switch (we put an OSM_NO_PATH value at + the port entry, we do not assert on unreachable lid entries + at the fwd table but return NULL + */ + if (port_num != OSM_NO_PATH) + return( osm_node_get_physp_ptr( p_sw->p_node, port_num ) ); + else + return NULL; +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* lid +* [in] LID for which to find a route. This must be a unicast +* LID value < 0xC000. +* +* RETURN VALUES +* Returns a pointer to the Physical Port Object object that +* routes the specified LID. A return value of zero means +* there is no route for the lid through this switch. +* The lid value must be a unicast LID. +* +* NOTES +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_get_si_ptr +* NAME +* osm_switch_get_si_ptr +* +* DESCRIPTION +* Returns a pointer to the SwitchInfo for this switch. +* +* SYNOPSIS +*/ +static inline ib_switch_info_t* +osm_switch_get_si_ptr( + IN const osm_switch_t* const p_sw ) +{ + return( (ib_switch_info_t*)&p_sw->switch_info ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* RETURN VALUES +* Returns a pointer to the SwitchInfo for this switch. +* +* NOTES +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_sp0_is_lmc_capable +* NAME +* osm_switch_sp0_is_lmc_capable +* +* DESCRIPTION +* Returns whether switch port 0 (SP0) can support LMC +* +*/ +static inline unsigned +osm_switch_sp0_is_lmc_capable( + IN const osm_switch_t* const p_sw, + IN osm_subn_t *p_subn) +{ + return (p_subn->opt.lmc_esp0 && + ib_switch_info_is_enhanced_port0(&p_sw->switch_info)) ? 1 : 0; +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* p_subn +* [in] Pointer to an osm_subn_t object. +* +* RETURN VALUES +* TRUE if SP0 is enhanced and globally enabled. FALSE otherwise. +* +* NOTES +* This is workaround function, it takes into account user defined +* p_subn->opt.lmc_esp0 parameter. +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_max_block_id +* NAME +* osm_switch_get_max_block_id +* +* DESCRIPTION +* Returns the maximum block ID (host order) of this switch. +* +* SYNOPSIS +*/ +static inline uint32_t +osm_switch_get_max_block_id( + IN const osm_switch_t* const p_sw ) +{ + return( (uint32_t)(osm_fwd_tbl_get_size( &p_sw->fwd_tbl ) / + osm_fwd_tbl_get_lids_per_block( &p_sw->fwd_tbl ) ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* RETURN VALUES +* Returns the maximum block ID (host order) of this switch. +* +* NOTES +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_get_max_block_id_in_use +* NAME +* osm_switch_get_max_block_id_in_use +* +* DESCRIPTION +* Returns the maximum block ID (host order) of this switch that +* is used for unicast routing. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_switch_get_max_block_id_in_use( + IN const osm_switch_t* const p_sw ) +{ + return( osm_fwd_tbl_get_max_block_id_in_use( &p_sw->fwd_tbl, + cl_ntoh16( p_sw->switch_info.lin_top ) ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* RETURN VALUES +* Returns the maximum block ID (host order) of this switch. +* +* NOTES +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_get_node_ptr +* NAME +* osm_switch_get_node_ptr +* +* DESCRIPTION +* Returns a pointer to the Node object for this switch. +* +* SYNOPSIS +*/ +static inline osm_node_t* +osm_switch_get_node_ptr( + IN const osm_switch_t* const p_sw ) +{ + return( p_sw->p_node ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* RETURN VALUES +* Returns a pointer to the Node object for this switch. +* +* NOTES +* +* SEE ALSO +* Switch object +*********/ + +/****f* OpenSM: Switch/osm_switch_get_max_lid_ho +* NAME +* osm_switch_get_max_lid_ho +* +* DESCRIPTION +* Returns the maximum LID (host order) value contained +* in the switch routing tables. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_switch_get_max_lid_ho( + IN const osm_switch_t* const p_sw ) +{ + if (p_sw->max_lid_ho != 0) + return p_sw->max_lid_ho; + return( osm_lid_matrix_get_max_lid_ho( &p_sw->lmx ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to a switch object. +* +* RETURN VALUES +* Returns the maximum LID (host order) value contained +* in the switch routing tables. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_num_ports +* NAME +* osm_switch_get_num_ports +* +* DESCRIPTION +* Returns the number of ports in this switch. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_switch_get_num_ports( + IN const osm_switch_t* const p_sw ) +{ + return( osm_lid_matrix_get_num_ports( &p_sw->lmx ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* RETURN VALUES +* Returns the number of ports in this switch. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_fwd_tbl_block +* NAME +* osm_switch_get_fwd_tbl_block +* +* DESCRIPTION +* Retrieve a forwarding table block. +* +* SYNOPSIS +*/ +boolean_t +osm_switch_get_fwd_tbl_block( + IN const osm_switch_t* const p_sw, + IN const uint32_t block_id, + OUT uint8_t* const p_block ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* block_ID +* [in] The block_id to retrieve. +* +* p_block +* [out] Pointer to the 64 byte array to store the +* forwarding table clock specified by block_id. +* +* RETURN VALUES +* Returns true if there are more blocks necessary to +* configure all the LIDs reachable from this switch. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_supports_mcast +* NAME +* osm_switch_supports_mcast +* +* DESCRIPTION +* Indicates if a switch supports multicast. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_switch_supports_mcast( + IN const osm_switch_t* const p_sw ) +{ + return( p_sw->switch_info.mcast_cap != 0 ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to an osm_switch_t object. +* +* RETURN VALUES +* Returns TRUE if the switch supports multicast. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_set_switch_info +* NAME +* osm_switch_set_switch_info +* +* DESCRIPTION +* Updates the switch info attribute of this switch. +* +* SYNOPSIS +*/ +static inline void +osm_switch_set_switch_info( + IN osm_switch_t* const p_sw, + IN const ib_switch_info_t* const p_si ) +{ + CL_ASSERT( p_sw ); + CL_ASSERT( p_si ); + p_sw->switch_info = *p_si; +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to a Switch object. +* +* p_si +* [in] Pointer to the SwitchInfo attribute for this switch. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_count_path +* NAME +* osm_switch_count_path +* +* DESCRIPTION +* Counts this path in port profile. +* +* SYNOPSIS +*/ +static inline void +osm_switch_count_path( + IN osm_switch_t* const p_sw, + IN const uint8_t port + ) +{ + osm_port_prof_path_count_inc( &p_sw->p_prof[port] ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch object. +* +* port +* [in] Port to count path. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_set_ft_block +* NAME +* osm_switch_set_ft_block +* +* DESCRIPTION +* Copies in the specified block into the switch's Forwarding Table object. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +osm_switch_set_ft_block( + IN osm_switch_t* const p_sw, + IN const uint8_t* const p_block, + IN const uint32_t block_num ) +{ + CL_ASSERT( p_sw ); + return( osm_fwd_tbl_set_block( &p_sw->fwd_tbl, p_block, block_num ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch object. +* +* p_block +* [in] Pointer to the forwarding table block. +* +* block_num +* [in] Block number for this block +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_set_mft_block +* NAME +* osm_switch_set_mft_block +* +* DESCRIPTION +* Sets a block of multicast port masks into the multicast table. +* +* SYNOPSIS +*/ +static inline ib_api_status_t +osm_switch_set_mft_block( + IN osm_switch_t* const p_sw, + IN const ib_net16_t* const p_block, + IN const uint16_t block_num, + IN const uint8_t position ) +{ + CL_ASSERT( p_sw ); + return( osm_mcast_tbl_set_block( &p_sw->mcast_tbl, p_block, + block_num, position ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch object. +* +* p_block +* [in] Pointer to the block of port masks to set. +* +* block_num +* [in] Block number (0-511) to set. +* +* position +* [in] Port mask position (0-15) to set. +* +* RETURN VALUE +* IB_SUCCESS on success. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_mft_block +* NAME +* osm_switch_get_mft_block +* +* DESCRIPTION +* Retrieve a block of multicast port masks from the multicast table. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_switch_get_mft_block( + IN osm_switch_t* const p_sw, + IN const uint16_t block_num, + IN const uint8_t position, + OUT ib_net16_t* const p_block ) +{ + CL_ASSERT( p_sw ); + return( osm_mcast_tbl_get_block( &p_sw->mcast_tbl, + block_num, position, p_block ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch object. +* +* block_num +* [in] Block number (0-511) to set. +* +* position +* [in] Port mask position (0-15) to set. +* +* p_block +* [out] Pointer to the block of port masks stored. +* +* RETURN VALUES +* Returns true if there are more blocks necessary to +* configure all the MLIDs reachable from this switch. +* FALSE otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_mft_max_block +* NAME +* osm_switch_get_mft_max_block +* +* DESCRIPTION +* Get the max_block from the associated multicast table. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_switch_get_mft_max_block( + IN osm_switch_t* const p_sw ) +{ + CL_ASSERT( p_sw ); + return( osm_mcast_tbl_get_max_block( &p_sw->mcast_tbl ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch object. +* +* RETURN VALUE +*/ + +/****f* OpenSM: Switch/osm_switch_get_mft_max_block_in_use +* NAME +* osm_switch_get_mft_max_block_in_use +* +* DESCRIPTION +* Get the max_block_in_use from the associated multicast table. +* +* SYNOPSIS +*/ +static inline int16_t +osm_switch_get_mft_max_block_in_use( + IN osm_switch_t* const p_sw ) +{ + CL_ASSERT( p_sw ); + return( osm_mcast_tbl_get_max_block_in_use( &p_sw->mcast_tbl ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch object. +* +* RETURN VALUES +* Returns the maximum block ID in use in this switch's mcast table. +* A value of -1 indicates no blocks are in use. +* +* NOTES +* +* SEE ALSO +*/ + +/****f* OpenSM: Switch/osm_switch_get_mft_max_position +* NAME +* osm_switch_get_mft_max_position +* +* DESCRIPTION +* Get the max_position from the associated multicast table. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_switch_get_mft_max_position( + IN osm_switch_t* const p_sw ) +{ + CL_ASSERT( p_sw ); + return( osm_mcast_tbl_get_max_position( &p_sw->mcast_tbl ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch object. +* +* RETURN VALUE +*/ + +/****f* OpenSM: Switch/osm_switch_recommend_path +* NAME +* osm_switch_recommend_path +* +* DESCRIPTION +* Returns the recommended port on which to route this LID. +* In cases where LMC > 0, the remote side system and node +* used for the routing are tracked in the provided arrays +* (and counts) such that other lid for the same port will +* try and avoid going through the same remote system/node. +* +* SYNOPSIS +*/ +uint8_t +osm_switch_recommend_path( + IN const osm_switch_t* const p_sw, + IN const uint16_t lid_ho, + IN const boolean_t ignore_existing, + IN OUT uint64_t *remote_sys_guids, + IN OUT uint16_t *p_num_used_sys, + IN OUT uint64_t *remote_node_guids, + IN OUT uint16_t *p_num_used_nodes + ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch object. +* +* lid_ho +* [in] LID value (host order) for which to get a path advisory. +* +* ignore_existing +* [in] Set to cause the switch to choose the optimal route +* regardless of existing paths. +* If false, the switch will choose an existing route if one +* exists, otherwise will choose the optimal route. +* +* remote_sys_guids +* [in out] The array of remote system guids already used to +* route the other lids of the same target port (if LMC > 0). +* +* p_num_used_sys +* [in out] The number of remote systems used for routing to +* the port. +* +* remote_node_guids +* [in out] The array of remote node guids already used to route +* the other lids of the same target port (if LMC > 0). +* +* p_num_used_nodes +* [in out] The number of remote nodes used for routing to +* the port. +* +* RETURN VALUE +* Returns the recommended port on which to route this LID. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_recommend_mcast_path +* NAME +* osm_switch_recommend_mcast_path +* +* DESCRIPTION +* Returns the recommended port on which to route this LID. +* +* SYNOPSIS +*/ +uint8_t +osm_switch_recommend_mcast_path( + IN osm_switch_t* const p_sw, + IN const uint16_t lid_ho, + IN const uint16_t mlid_ho, + IN const boolean_t ignore_existing ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch object. +* +* lid_ho +* [in] LID value (host order) for of the node for with to get +* the multicast path. +* +* mlid_ho +* [in] MLID for the multicast group in question. +* +* ignore_existing +* [in] Set to cause the switch to choose the optimal route +* regardless of existing paths. +* If false, the switch will choose an existing route if one exists, +* otherwise will choose the optimal route. +* +* RETURN VALUE +* Returns the recommended port on which to route this LID. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_fwd_tbl_size +* NAME +* osm_switch_get_fwd_tbl_size +* +* DESCRIPTION +* Returns the number of entries available in the forwarding table. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_switch_get_fwd_tbl_size( + IN const osm_switch_t* const p_sw ) +{ + return( osm_fwd_tbl_get_size( &p_sw->fwd_tbl ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch. +* +* RETURN VALUE +* Returns the number of entries available in the forwarding table. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_mcast_fwd_tbl_size +* NAME +* osm_switch_get_mcast_fwd_tbl_size +* +* DESCRIPTION +* Returns the number of entries available in the multicast forwarding table. +* +* SYNOPSIS +*/ +static inline uint16_t +osm_switch_get_mcast_fwd_tbl_size( + IN const osm_switch_t* const p_sw ) +{ + return( cl_ntoh16( p_sw->switch_info.mcast_cap ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch. +* +* RETURN VALUE +* Returns the number of entries available in the multicast forwarding table. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_path_count_get +* NAME +* osm_switch_path_count_get +* +* DESCRIPTION +* Returns the count of the number of paths going through this port. +* +* SYNOPSIS +*/ +static inline uint32_t +osm_switch_path_count_get( + IN const osm_switch_t* const p_sw, + IN const uint8_t port_num ) +{ + return( osm_port_prof_path_count_get( &p_sw->p_prof[port_num] ) ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the Switch object. +* +* port_num +* [in] Port number for which to get path count. +* +* RETURN VALUE +* Returns the count of the number of paths going through this port. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_prepare_path_rebuild +* NAME +* osm_switch_prepare_path_rebuild +* +* DESCRIPTION +* Prepares a switch to rebuild pathing information. +* +* SYNOPSIS +*/ +void +osm_switch_prepare_path_rebuild( + IN osm_switch_t* const p_sw ); +/* +* PARAMETERS +* p_sw +* [in] Pointer to the Switch object. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_get_mcast_tbl_ptr +* NAME +* osm_switch_get_mcast_tbl_ptr +* +* DESCRIPTION +* Returns a pointer to the switch's multicast table. +* +* SYNOPSIS +*/ +static inline osm_mcast_tbl_t* +osm_switch_get_mcast_tbl_ptr( + IN const osm_switch_t* const p_sw ) +{ + return( (osm_mcast_tbl_t*)&p_sw->mcast_tbl ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch. +* +* RETURN VALUE +* Returns a pointer to the switch's multicast table. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Switch/osm_switch_is_in_mcast_tree +* NAME +* osm_switch_is_in_mcast_tree +* +* DESCRIPTION +* Returns true if this switch already belongs in the tree for the specified +* multicast group. +* +* SYNOPSIS +*/ +static inline boolean_t +osm_switch_is_in_mcast_tree( + IN const osm_switch_t* const p_sw, + IN const uint16_t mlid_ho ) +{ + const osm_mcast_tbl_t* p_tbl; + + p_tbl = &p_sw->mcast_tbl; + if( p_tbl ) + return( osm_mcast_tbl_is_any_port( &p_sw->mcast_tbl, mlid_ho ) ); + else + return( FALSE ); +} +/* +* PARAMETERS +* p_sw +* [in] Pointer to the switch. +* +* mlid_ho +* [in] MLID (host order) of the multicast tree to check. +* +* RETURN VALUE +* Returns true if this switch already belongs in the tree for the specified +* multicast group. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: Node/osm_switch_discovery_count_get +* NAME +* osm_switch_discovery_count_get +* +* DESCRIPTION +* Returns a pointer to the physical port object at the +* specified local port number. +* +* SYNOPSIS +*/ +static inline uint32_t +osm_switch_discovery_count_get( + IN const osm_switch_t* const p_switch ) +{ + return( p_switch->discovery_count ); +} +/* +* PARAMETERS +* p_switch +* [in] Pointer to an osm_switch_t object. +* +* RETURN VALUES +* Returns the discovery count for this node. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_switch_discovery_count_reset +* NAME +* osm_switch_discovery_count_reset +* +* DESCRIPTION +* Resets the discovery count for this node to zero. +* This operation should be performed at the start of a sweep. +* +* SYNOPSIS +*/ +static inline void +osm_switch_discovery_count_reset( + IN osm_switch_t* const p_switch ) +{ + p_switch->discovery_count = 0; +} +/* +* PARAMETERS +* p_switch +* [in] Pointer to an osm_switch_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +/****f* OpenSM: Node/osm_switch_discovery_count_inc +* NAME +* osm_switch_discovery_count_inc +* +* DESCRIPTION +* Increments the discovery count for this node. +* +* SYNOPSIS +*/ +static inline void +osm_switch_discovery_count_inc( + IN osm_switch_t* const p_switch ) +{ + p_switch->discovery_count++; +} +/* +* PARAMETERS +* p_switch +* [in] Pointer to an osm_switch_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* Node object +*********/ + +END_C_DECLS + +#endif /* _OSM_SWITCH_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_trap_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_trap_rcv.h new file mode 100644 index 00000000..5f8e7274 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_trap_rcv.h @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_trap_rcv_t. + * This object represents the Trap Receiver object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_TRAP_RCV_H_ +#define _OSM_TRAP_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Trap Receiver +* NAME +* Trap Receiver +* +* DESCRIPTION +* The Trap Receiver object encapsulates the information +* needed to receive the Trap attribute from a node. +* +* The Trap Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Trap Receiver/osm_trap_rcv_t +* NAME +* osm_trap_rcv_t +* +* DESCRIPTION +* Trap Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_trap_rcv +{ + osm_subn_t *p_subn; + osm_stats_t *p_stats; + osm_log_t *p_log; + osm_resp_t *p_resp; + osm_state_mgr_t *p_state_mgr; + cl_plock_t *p_lock; + cl_event_wheel_t trap_aging_tracker; +} osm_trap_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_stats +* Pointer to the OpenSM statistics block. +* +* p_log +* Pointer to the log object. +* +* p_resp +* Pointer to the generic MAD responder object. +* +* p_state_mgr +* Pointer to the State Manager object. +* +* p_lock +* Pointer to the serializing lock. +* +* trap_aging_tracker +* An event wheel tracking erceived traps and their aging. +* Basically we can start a timer every time we receive a specific +* trap and check to seee if not expired next time it is received. +* +* SEE ALSO +* Trap Receiver object +*********/ + +/****f* OpenSM: Trap Receiver/osm_trap_rcv_construct +* NAME +* osm_trap_rcv_construct +* +* DESCRIPTION +* This function constructs a Trap Receiver object. +* +* SYNOPSIS +*/ +void osm_trap_rcv_construct( + IN osm_trap_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to a Trap Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_trap_rcv_init, osm_trap_rcv_destroy +* +* Calling osm_trap_rcv_construct is a prerequisite to calling any other +* method except osm_trap_rcv_init. +* +* SEE ALSO +* Trap Receiver object, osm_trap_rcv_init, +* osm_trap_rcv_destroy +*********/ + +/****f* OpenSM: Trap Receiver/osm_trap_rcv_destroy +* NAME +* osm_trap_rcv_destroy +* +* DESCRIPTION +* The osm_trap_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_trap_rcv_destroy( + IN osm_trap_rcv_t* const p_rcv ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Trap Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_trap_rcv_construct or osm_trap_rcv_init. +* +* SEE ALSO +* Trap Receiver object, osm_trap_rcv_construct, +* osm_trap_rcv_init +*********/ + +/****f* OpenSM: Trap Receiver/osm_trap_rcv_init +* NAME +* osm_trap_rcv_init +* +* DESCRIPTION +* The osm_trap_rcv_init function initializes a +* Trap Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_trap_rcv_init( + IN osm_trap_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_stats_t* const p_stats, + IN osm_resp_t* const p_resp, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_trap_rcv_t object to initialize. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_stats +* [in] Pointer to the OpenSM statistics block. +* +* p_resp +* [in] Pointer to the generic MAD Responder object. +* +* p_log +* [in] Pointer to the log object. +* +* p_state_mgr +* [in] Pointer to the State Manager object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Trap Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other Trap Receiver methods. +* +* SEE ALSO +* Trap Receiver object, osm_trap_rcv_construct, +* osm_trap_rcv_destroy +*********/ + +/****f* OpenSM: Trap Receiver/osm_trap_rcv_process +* NAME +* osm_trap_rcv_process +* +* DESCRIPTION +* Process the Trap attribute. +* +* SYNOPSIS +*/ +void osm_trap_rcv_process( + IN osm_trap_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_rcv +* [in] Pointer to an osm_trap_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's Trap attribute. +* +* RETURN VALUES +* IB_SUCCESS if the Trap processing was successful. +* +* NOTES +* This function processes a Trap attribute. +* +* SEE ALSO +* Trap Receiver, Trap Response Controller +*********/ + +/****f* OpenSM: Trap Receiver/osm_trap_rcv_aging_tracker_callback +* NAME +* osm_trap_rcv_aging_tracker_callback +* +* DESCRIPTION +* Callback function called by the aging tracker mechanism. +* +* SYNOPSIS +*/ +uint64_t +osm_trap_rcv_aging_tracker_callback( + IN uint64_t key, + IN uint32_t num_regs, + IN void* context ); + +/* +* PARAMETERS +* key +* [in] The key by which the event was inserted. +* +* num_regs +* [in] The number of times the same event (key) was registered. +* +* context +* [in] Pointer to the context given in the registering of the event. +* +* RETURN VALUES +* None. +* +* NOTES +* This function is called by the cl_event_wheel when the aging tracker +* event has ended. +* +* SEE ALSO +* Trap Receiver, Trap Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_TRAP_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_trap_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_trap_rcv_ctrl.h new file mode 100644 index 00000000..c46be6a3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_trap_rcv_ctrl.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_trap_rcv_ctrl_t. + * This object represents a controller that receives the IBA Trap + * attribute from a node. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + +#ifndef _OSM_TRAP_RCV_CTRL_H_ +#define _OSM_TRAP_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/***** OpenSM/Trap Receive Controller +* NAME +* Trap Receive Controller +* +* DESCRIPTION +* The Trap Receive Controller object encapsulates the information +* needed to receive the Trap attribute from a node. +* +* The Trap Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Yael Kalka, Mellanox +* +*********/ + +/****s* OpenSM: Trap Receive Controller/osm_trap_rcv_ctrl_t +* NAME +* osm_trap_rcv_ctrl_t +* +* DESCRIPTION +* Trap Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_trap_rcv_ctrl +{ + osm_trap_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_trap_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the Trap Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* Trap Receive Controller object +*********/ + +/****f* OpenSM: Trap Receive Controller/osm_trap_rcv_ctrl_construct +* NAME +* osm_trap_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a Trap Receive Controller object. +* +* SYNOPSIS +*/ +void +osm_trap_rcv_ctrl_construct( + IN osm_trap_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a Trap Receive Controller object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_trap_rcv_ctrl_init, osm_trap_rcv_ctrl_destroy +* +* Calling osm_trap_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_trap_rcv_ctrl_init. +* +* SEE ALSO +* Trap Receive Controller object, osm_trap_rcv_ctrl_init, +* osm_trap_rcv_ctrl_destroy +*********/ + +/****f* OpenSM: Trap Receive Controller/osm_trap_rcv_ctrl_destroy +* NAME +* osm_trap_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_trap_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_trap_rcv_ctrl_destroy( + IN osm_trap_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Trap Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_trap_rcv_ctrl_construct or osm_trap_rcv_ctrl_init. +* +* SEE ALSO +* Trap Receive Controller object, osm_trap_rcv_ctrl_construct, +* osm_trap_rcv_ctrl_init +*********/ + +/****f* OpenSM: Trap Receive Controller/osm_trap_rcv_ctrl_init +* NAME +* osm_trap_rcv_ctrl_init +* +* DESCRIPTION +* The osm_trap_rcv_ctrl_init function initializes a +* Trap Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_trap_rcv_ctrl_init( + IN osm_trap_rcv_ctrl_t* const p_ctrl, + IN osm_trap_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_trap_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_trap_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* IB_SUCCESS if the Trap Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other Trap Receive Controller methods. +* +* SEE ALSO +* Trap Receive Controller object, osm_trap_rcv_ctrl_construct, +* osm_trap_rcv_ctrl_destroy +*********/ + +END_C_DECLS + +#endif /* OSM_TRAP_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_ts_useraccess.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_ts_useraccess.h new file mode 100644 index 00000000..39df01ba --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_ts_useraccess.h @@ -0,0 +1,54 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ts_ib_useraccess.h" + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +typedef struct ib_user_mad_filter osm_ts_user_mad_filter; +typedef struct ib_set_port_info_ioctl osm_ts_set_port_info_ioctl; +typedef struct ib_get_port_info_ioctl osm_ts_get_port_info_ioctl; +typedef struct ib_gid_entry_ioctl osm_ts_gid_entry_ioctl; + +END_C_DECLS + + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_ucast_mgr.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_ucast_mgr.h new file mode 100644 index 00000000..25d19d55 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_ucast_mgr.h @@ -0,0 +1,328 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_ucast_mgr_t. + * This object represents the Unicast Manager object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_UCAST_MGR_H_ +#define _OSM_UCAST_MGR_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +#define OSM_UCAST_MGR_LIST_SIZE_MIN 256 + +/****h* OpenSM/Unicast Manager +* NAME +* Unicast Manager +* +* DESCRIPTION +* The Unicast Manager object encapsulates the information +* needed to control unicast LID forwarding on the subnet. +* +* The Unicast Manager object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****s* OpenSM: Unicast Manager/osm_ucast_mgr_t +* NAME +* osm_ucast_mgr_t +* +* DESCRIPTION +* Unicast Manager structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_ucast_mgr +{ + osm_subn_t *p_subn; + osm_req_t *p_req; + osm_log_t *p_log; + cl_plock_t *p_lock; + boolean_t any_change; + uint8_t *lft_buf; +} osm_ucast_mgr_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_req +* Pointer to the Requester object sending SMPs. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* any_change +* Initialized to FALSE at the beginning of the algorithm, +* set to TRUE by osm_ucast_mgr_set_fwd_table() if any mad +* was sent. +* +* lft_buf +* LFT buffer - used during LFT calculation/setup. +* +* SEE ALSO +* Unicast Manager object +*********/ + +/****f* OpenSM: Unicast Manager/osm_ucast_mgr_construct +* NAME +* osm_ucast_mgr_construct +* +* DESCRIPTION +* This function constructs a Unicast Manager object. +* +* SYNOPSIS +*/ +void +osm_ucast_mgr_construct( + IN osm_ucast_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to a Unicast Manager object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows osm_ucast_mgr_destroy +* +* Calling osm_ucast_mgr_construct is a prerequisite to calling any other +* method except osm_ucast_mgr_init. +* +* SEE ALSO +* Unicast Manager object, osm_ucast_mgr_init, +* osm_ucast_mgr_destroy +*********/ + +/****f* OpenSM: Unicast Manager/osm_ucast_mgr_destroy +* NAME +* osm_ucast_mgr_destroy +* +* DESCRIPTION +* The osm_ucast_mgr_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_ucast_mgr_destroy( + IN osm_ucast_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* Unicast Manager object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_ucast_mgr_construct or osm_ucast_mgr_init. +* +* SEE ALSO +* Unicast Manager object, osm_ucast_mgr_construct, +* osm_ucast_mgr_init +*********/ + +/****f* OpenSM: Unicast Manager/osm_ucast_mgr_init +* NAME +* osm_ucast_mgr_init +* +* DESCRIPTION +* The osm_ucast_mgr_init function initializes a +* Unicast Manager object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_ucast_mgr_init( + IN osm_ucast_mgr_t* const p_mgr, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_ucast_mgr_t object to initialize. +* +* p_req +* [in] Pointer to the attribute Requester object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the Unicast Manager object was initialized +* successfully. +* +* NOTES +* Allows calling other Unicast Manager methods. +* +* SEE ALSO +* Unicast Manager object, osm_ucast_mgr_construct, +* osm_ucast_mgr_destroy +*********/ + +/****f* OpenSM: Unicast Manager/osm_ucast_mgr_set_fwd_table +* NAME +* osm_ucast_mgr_set_fwd_table +* +* DESCRIPTION +* Setup forwarding table for the switch (from prepared lft_buf). +* +* SYNOPSIS +*/ +void +osm_ucast_mgr_set_fwd_table( + IN osm_ucast_mgr_t* const p_mgr, + IN osm_switch_t* const p_sw ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_ucast_mgr_t object. +* +* p_mgr +* [in] Pointer to an osm_switch_t object. +* +* SEE ALSO +* Unicast Manager +*********/ + +/****f* OpenSM: Unicast Manager/osm_ucast_mgr_build_lid_matrices +* NAME +* osm_ucast_mgr_build_lid_matrices +* +* DESCRIPTION +* Build switches's lid matrices. +* +* SYNOPSIS +*/ +void +osm_ucast_mgr_build_lid_matrices( + IN osm_ucast_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_ucast_mgr_t object. +* +* NOTES +* This function processes the subnet, configuring switches' +* min hops tables (aka lid matrices). +* +* SEE ALSO +* Unicast Manager +*********/ + +/****f* OpenSM: Unicast Manager/osm_ucast_mgr_process +* NAME +* osm_ucast_mgr_process +* +* DESCRIPTION +* Process and configure the subnet's unicast forwarding tables. +* +* SYNOPSIS +*/ +osm_signal_t +osm_ucast_mgr_process( + IN osm_ucast_mgr_t* const p_mgr ); +/* +* PARAMETERS +* p_mgr +* [in] Pointer to an osm_ucast_mgr_t object. +* +* RETURN VALUES +* Returns the appropriate signal to the caller: +* OSM_SIGNAL_DONE - operation is complete +* OSM_SIGNAL_DONE_PENDING - local operations are complete, but +* transactions are still pending on the wire. +* +* NOTES +* This function processes the subnet, configuring switch +* unicast forwarding tables. +* +* SEE ALSO +* Unicast Manager, Node Info Response Controller +*********/ +END_C_DECLS + +#endif /* _OSM_UCAST_MGR_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_ucast_updn.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_ucast_updn.h new file mode 100644 index 00000000..b82582e4 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_ucast_updn.h @@ -0,0 +1,472 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_UCAST_UPDN_H_ +#define _OSM_UCAST_UPDN_H_ + + +/* + * Abstract: + * Implementation of Up Down Algorithm using ranking & Min Hop + * Calculation functions + * + * Environment: + * Linux User Mode + * + * $Revision: 1.0 $ + */ +/* LS : This code is useless since we integrate it with opensm */ +/* +#include +#include +#include +#include +#include +#include +*/ + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/* //////////////////////////// */ +/* ENUM TypeDefs */ +/* /////////////////////////// */ + +/* +* DESCRIPTION +* This enum respresent available directions of arcs in the graph +* SYNOPSIS +*/ +typedef enum _updn_switch_dir + { + UP = 0, + DOWN + + } updn_switch_dir_t; + +/* + * TYPE DEFINITIONS + * UP + * Current switch direction in propogating the subnet is up + * DOWN + * Current switch direction in propogating the subnet is down + * + */ + + +/* +* DESCRIPTION +* This enum respresent available states in the UPDN algorithm +* SYNOPSIS +*/ +typedef enum _updn_state + { + UPDN_INIT = 0, + UPDN_RANK, + UPDN_MIN_HOP_CALC, + } updn_state_t; + +/* + * TYPE DEFINITIONS + * UPDN_INIT - loading the package but still not performing anything + * UPDN_RANK - post ranking algorithm + * UPDN_MIN_HOP_CALC - post min hop table calculation + */ + + +/* ////////////////////////////////// */ +/* Struct TypeDefs */ +/* ///////////////////////////////// */ + +/****s* UPDN: Rank element/updn_rank_t +* NAME +* updn_rank_t +* +* DESCRIPTION +* This object represents a rank type element in a list +* +* The updn_rank_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ + +typedef struct _updn_rank +{ + cl_map_item_t map_item; + uint8_t rank; +} updn_rank_t; + +/* +* FIELDS +* map_item +* Linkage structure for cl_qmap. MUST BE FIRST MEMBER! +* +* rank +* Rank value of this node +* +*/ + +/****s* UPDN: Histogram element/updn_hist_t +* NAME +* updn_hist_t +* +* DESCRIPTION +* This object represents a histogram type element in a list +* +* The updn_hist_t object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ + +typedef struct _updn_hist +{ + cl_map_item_t map_item; + uint32_t bar_value; +} updn_hist_t; + +/* +* FIELDS +* map_item +* Linkage structure for cl_qmap. MUST BE FIRST MEMBER! +* +* bar_value +* The number of occurences of the same hop value +* +*/ + +typedef struct _updn_next_step +{ + updn_switch_dir_t state; + osm_switch_t *p_sw; +} updn_next_step_t; + +/*****s* updn: updn/updn_input_t +* NAME updn_t +* +* +* DESCRIPTION +* updn input fields structure. +* +* SYNOPSIS +*/ + +typedef struct _updn_input +{ + uint32_t num_guids; + uint64_t * guid_list; +} updn_input_t; + +/* +* FIELDS +* num_guids +* number of guids given at the UI +* +* guid_list +* guids specified as an array (converted from a list given in the UI) +* +* +* SEE ALSO +* +*********/ + + +/*****s* updn: updn/updn_t +* NAME updn_t +* +* +* DESCRIPTION +* updn structure. +* +* SYNOPSIS +*/ + +typedef struct _updn +{ + updn_state_t state; + boolean_t auto_detect_root_nodes; + cl_qmap_t guid_rank_tbl; + updn_input_t updn_ucast_reg_inputs; + cl_list_t * p_root_nodes; +} updn_t; + +/* +* FIELDS +* state +* state of the updn algorithm which basically should pass through Init +* - Ranking - UpDn algorithm +* +* guid_rank_tbl +* guid 2 rank mapping vector , indexed by guid in network order +* +* +* SEE ALSO +* +*********/ + + +/* ////////////////////////////// */ +/* Function */ +/* ////////////////////////////// */ + +/***f** Osmsh: Updn/updn_construct +* NAME +* updn_construct +* +* DESCRIPTION +* Allocation of updn_t struct +* +* SYNOPSIS +*/ + +updn_t* +updn_construct(void); + +/* +* PARAMETERS +* +* +* RETURN VALUE +* Return a pointer to an updn struct. Null if fails to do so. +* +* NOTES +* First step of the creation of updn_t +*/ + +/****s* Osmsh: Updn/updn_destroy +* NAME +* updn_destroy +* +* DESCRIPTION +* release of updn_t struct +* +* SYNOPSIS +*/ + +void +updn_destroy( + IN updn_t* const p_updn ); + +/* +* PARAMETERS +* p_updn +* A pointer to the updn_t struct that is goining to be released +* +* RETURN VALUE +* +* NOTES +* Final step of the releasing of updn_t +* +* SEE ALSO +* updn_construct +*********/ + +/****f* Osmsh: Updn/updn_init +* NAME +* updn_init +* +* DESCRIPTION +* Initialization of an updn_t struct +* +* SYNOPSIS +*/ +cl_status_t +updn_init( + IN updn_t* const p_updn ); + +/* +* PARAMETERS +* p_updn +* A pointer to the updn_t struct that is goining to be initilized +* +* RETURN VALUE +* The status of the function. +* +* NOTES +* +* SEE ALSO +* updn_construct +********/ + + + + +/****** Osmsh: Updn/updn_subn_rank +* NAME +* updn_subn_rank +* +* DESCRIPTION +* This function ranks the subnet for credit loop free algorithm +* +* SYNOPSIS +*/ + +int +updn_subn_rank( + IN uint64_t root_guid , + IN uint8_t base_rank, + IN updn_t* p_updn); + +/* +* PARAMETERS +* p_subn +* [in] Pointer to a Subnet object to construct. +* +* base_rank +* [in] The base ranking value (lowest value) +* +* p_updn +* [in] Pointer to updn structure which includes state & lid2rank table +* +* RETURN VALUE +* This function returns 0 when rankning has succeded , otherwise 1. +******/ + + +/****** Osmsh: UpDown/osm_subn_set_up_down_min_hop_table +* NAME +* osm_subn_set_up_down_min_hop_table +* +* DESCRIPTION +* This function set min hop table of all switches by BFS through each +* port guid at the subnet using ranking done before. +* +* SYNOPSIS +*/ + +int +osm_subn_set_up_down_min_hop_table( + IN updn_t* p_updn); + +/* +* PARAMETERS +* p_updn +* [in] Pointer to updn structure which includes state & lid2rank table +* +* RETURN VALUE +* This function returns 0 when rankning has succeded , otherwise 1. +******/ + + + + +/****** Osmsh: UpDown/osm_subn_calc_up_down_min_hop_table +* NAME +* osm_subn_calc_up_down_min_hop_table +* +* DESCRIPTION +* This function perform ranking and setting of all switches' min hop table +* by UP DOWN algorithm +* +* SYNOPSIS +*/ + +int +osm_subn_calc_up_down_min_hop_table( + IN uint32_t num_guids, + IN uint64_t* guid_list, + IN updn_t* p_updn); + +/* +* PARAMETERS +* +* guid_list +* [in] Guid list from which to start ranking . +* +* p_updn +* [in] Pointer to updn structure which includes state & lid2rank table +* RETURN VALUE +* This function returns 0 when rankning has succeded , otherwise 1. +******/ + +/****f* OpenSM: OpenSM/osm_updn_reg_calc_min_hop_table +* NAME +* osm_updn_reg_calc_min_hop_table +* +* DESCRIPTION +* Registration function to ucast routing manager (instead of +* Min Hop Algorithm) +* +* SYNOPSIS +*/ +int +osm_updn_reg_calc_min_hop_table( + IN updn_t * p_updn, + IN osm_subn_opt_t* p_opt ); +/* +* PARAMETERS +* +* RETURN VALUES +* 0 - on success , 1 - on failure +* +* NOTES +* +* SEE ALSO +* osm_subn_calc_up_down_min_hop_table +*********/ + +/****** Osmsh: UpDown/osm_updn_find_root_nodes_by_min_hop +* NAME +* osm_updn_find_root_nodes_by_min_hop +* +* DESCRIPTION +* This function perform auto identification of root nodes for UPDN ranking phase +* +* SYNOPSIS +*/ +int +osm_updn_find_root_nodes_by_min_hop( OUT updn_t * p_updn ); + +/* +* PARAMETERS +* p_root_nodes_list +* +* [out] Pointer to the root nodes list found in the subnet +* +* RETURN VALUE +* This function returns 0 when auto identification had succeeded +******/ + + +END_C_DECLS + +#endif /* _OSM_UCAST_UPDN_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_umadt.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_umadt.h new file mode 100644 index 00000000..80c969e0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_umadt.h @@ -0,0 +1,143 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_mad_wrapper_t. + * This object represents the context wrapper for OpenSM MAD processing. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_UMADT_h_ +#define _OSM_UMADT_h_ + +#include "iba/ib_types.h" +#include +#include +#include "umadt.h" +#include "ibt.h" + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +typedef struct _umadt_obj_t{ + void* umadt_handle; + UMADT_INTERFACE uMadtInterface; + IBT_INTERFACE IbtInterface; + boolean init_done; + cl_spinlock_t register_lock; + cl_qlist_t register_list; + osm_log_t *p_log; + uint32_t timeout; + +}umadt_obj_t; +/*********/ + +/****s* OpenSM: Umadt MAD Wrapper/osm_bind_info +* NAME +* osm_bind_info +* +* DESCRIPTION +* Context needed for processing individual MADs +* +* SYNOPSIS +*/ + +typedef struct _mad_bind_info_t{ + cl_list_item_t list_item; + umadt_obj_t *p_umadt_obj; + osm_mad_pool_t *p_mad_pool; + osm_vend_mad_recv_callback_t mad_recv_callback; + void *client_context; + cl_thread_t recv_processor_thread; + cl_spinlock_t trans_ctxt_lock; + cl_qlist_t trans_ctxt_list; + cl_timer_t timeout_timer; + cl_spinlock_t timeout_list_lock; + cl_qlist_t timeout_list; + RegisterClassStruct umadt_reg_class; + MADT_HANDLE umadt_handle; /* Umadt type */ + +}mad_bind_info_t; + +typedef struct _trans_context_t { + cl_list_item_t list_item; + uint64_t trans_id; + uint64_t sent_time; /* micro secs */ + void* context; +} trans_context_t; + +/* +* FIELDS +* list_item +* List linkage for pools and lists. MUST BE FIRST MEMBER! +* +* p_mad_pool +* Pointer to the MAD pool to be used by mads with this bind handle. +* +* mad_recv_callback +* Callback function called by the mad receive processor. +* +* client_context +* context to be passed to the receive callback. +* +* recv_processor_thread +* Thread structure for the receive processor thread. +* +* umadt_reg_class +* Umadt register class struct used to register with Umadt. +* +* umadt_handle +* Umadt returns this handle from a registration call. The transport layer +* uses this handle to talk to Umadt. +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /*_OSM_UMADT_h_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_version.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_version.h new file mode 100644 index 00000000..7be8e07f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_version.h @@ -0,0 +1,63 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _OSM_VERSION_H_ +#define _OSM_VERSION_H_ + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****s* OpenSM: Base/OSM_VERSION +* NAME +* OSM_VERSION +* +* DESCRIPTION +* The version string for OpenSM +* +* SYNOPSIS +*/ +#define OSM_VERSION "OpenSM Rev:openib-3.0.0" +/********/ + +END_C_DECLS + +#endif /* _OSM_VERSION_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_vl15intf.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_vl15intf.h new file mode 100644 index 00000000..f56a6ca3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_vl15intf.h @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_vl15_t. + * This object represents an IBA subnet. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#ifndef _OSM_VL15INTF_H_ +#define _OSM_VL15INTF_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/VL15 +* NAME +* VL15 +* +* DESCRIPTION +* The VL15 object encapsulates the information needed by the +* OpenSM to instantiate the VL15 interface. The OpenSM allocates +* one VL15 object per subnet. +* +* The VL15 object transmits MADs to the wire at a throttled rate, +* so as to not overload the VL15 buffering of subnet components. +* OpenSM modules may post VL15 MADs to the VL15 interface as fast +* as possible. +* +* The VL15 object is thread safe. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +/****d* OpenSM: SM/osm_vl15_state_t +* NAME +* osm_vl15_state_t +* +* DESCRIPTION +* Enumerates the possible states of SM object. +* +* SYNOPSIS +*/ +typedef enum _osm_vl15_state +{ + OSM_VL15_STATE_INIT = 0, + OSM_VL15_STATE_READY + +} osm_vl15_state_t; +/***********/ + +/****s* OpenSM: VL15/osm_vl15_t +* NAME +* osm_vl15_t +* +* DESCRIPTION +* VL15 structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_vl15 +{ + osm_thread_state_t thread_state; + osm_vl15_state_t state; + uint32_t max_wire_smps; + cl_event_t signal; + cl_thread_t poller; + cl_qlist_t rfifo; + cl_qlist_t ufifo; + cl_spinlock_t lock; + osm_vendor_t *p_vend; + osm_log_t *p_log; + osm_stats_t *p_stats; + osm_subn_t *p_subn; + cl_disp_reg_handle_t h_disp; + cl_plock_t *p_lock; + +} osm_vl15_t; +/* +* FIELDS +* thread_state +* Tracks the thread state of the poller thread. +* +* state +* Tracks the state of the VL15 interface itself. +* +* max_wire_smps +* Maximum number of VL15 MADs allowed on the wire at one time. +* +* signal +* Event on which the poller sleeps. +* +* rfifo +* First-in First-out queue for outbound VL15 MADs for which +* a response is expected, aka the "response fifo" +* +* ufifo +* First-in First-out queue for outbound VL15 MADs for which +* no response is expected, aka the "unicast fifo". +* +* poller +* Worker thread pool that services the fifo to transmit VL15 MADs +* +* lock +* Spinlock guarding the FIFO. +* +* p_vend +* Pointer to the vendor transport object. +* +* p_log +* Pointer to the log object. +* +* p_stats +* Pointer to the OpenSM statistics block. +* +* p_subn +* Pointer to the Subnet object for this subnet. +* +* h_disp +* Handle returned from dispatcher registration. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* VL15 object +*********/ + +/****f* OpenSM: VL15/osm_vl15_construct +* NAME +* osm_vl15_construct +* +* DESCRIPTION +* This function constructs an VL15 object. +* +* SYNOPSIS +*/ +void +osm_vl15_construct( + IN osm_vl15_t* const p_vl15 ); +/* +* PARAMETERS +* p_vl15 +* [in] Pointer to a VL15 object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_vl15_init, osm_vl15_destroy, and osm_vl15_is_inited. +* +* Calling osm_vl15_construct is a prerequisite to calling any other +* method except osm_vl15_init. +* +* SEE ALSO +* VL15 object, osm_vl15_init, osm_vl15_destroy, osm_vl15_is_inited +*********/ + +/****f* OpenSM: VL15/osm_vl15_destroy +* NAME +* osm_vl15_destroy +* +* DESCRIPTION +* The osm_vl15_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void +osm_vl15_destroy( + IN osm_vl15_t* const p_vl15, + IN struct _osm_mad_pool *p_pool); +/* +* PARAMETERS +* p_vl15 +* [in] Pointer to a VL15 object to destroy. +* +* p_pool +* [in] The pointer to the mad pool to return outstanding mads to +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified VL15 object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to osm_vl15_construct or +* osm_vl15_init. +* +* SEE ALSO +* VL15 object, osm_vl15_construct, osm_vl15_init +*********/ + +/* + Initialization. + Rate specifies the minimum number of microseconds between transmissions + on VL15. +*/ +/****f* OpenSM: VL15/osm_vl15_init +* NAME +* osm_vl15_init +* +* DESCRIPTION +* The osm_vl15_init function initializes a VL15 object for use. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_vl15_init( + IN osm_vl15_t* const p_vl15, + IN osm_vendor_t* const p_vend, + IN osm_log_t* const p_log, + IN osm_stats_t* const p_stats, + IN const int32_t max_wire_smps, + IN osm_subn_t* const p_subn, + IN cl_dispatcher_t* const p_disp, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_vl15 +* [in] Pointer to an osm_vl15_t object to initialize. +* +* p_vend +* [in] Pointer to the vendor transport object. +* +* p_log +* [in] Pointer to the log object. +* +* p_stats +* [in] Pointer to the OpenSM stastics block. +* +* max_wire_smps +* [in] Maximum number of MADs allowed on the wire at one time. +* +* p_subn +* [in] Pointer to the subnet object. +* +* p_disp +* [in] Pointer to the dispatcher object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* IB_SUCCESS if the VL15 object was initialized successfully. +* +* NOTES +* Allows calling other VL15 methods. +* +* SEE ALSO +* VL15 object, osm_vl15_construct, osm_vl15_destroy, +* osm_vl15_is_inited +*********/ + +/****f* OpenSM: VL15/osm_vl15_post +* NAME +* osm_vl15_post +* +* DESCRIPTION +* Posts a MAD to the VL15 interface for transmission. +* +* SYNOPSIS +*/ +void +osm_vl15_post( + IN osm_vl15_t* const p_vl15, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_vl15 +* [in] Pointer to an osm_vl15_t object. +* +* p_madw +* [in] Pointer to a MAD wrapper structure containing the MAD. +* +* RETURN VALUES +* This function does not return a value. +* +* NOTES +* The osm_vl15_construct or osm_vl15_init must be called before using +* this function. +* +* SEE ALSO +* VL15 object, osm_vl15_construct, osm_vl15_init +*********/ + +/****f* OpenSM: VL15/osm_vl15_poll +* NAME +* osm_vl15_poll +* +* DESCRIPTION +* Causes the VL15 Interface to consider sending another QP0 MAD. +* +* SYNOPSIS +*/ +void +osm_vl15_poll( + IN osm_vl15_t* const p_vl ); +/* +* PARAMETERS +* p_vl15 +* [in] Pointer to an osm_vl15_t object. +* +* RETURN VALUES +* None. +* +* NOTES +* This function signals the VL15 that it may be possible to send +* a SMP. This function checks three criteria before sending a SMP: +* 1) The VL15 worker is IDLE +* 2) There are no QP0 SMPs currently outstanding +* 3) There is something on the VL15 FIFO to send +* +* SEE ALSO +* VL15 object, osm_vl15_construct, osm_vl15_init +*********/ + +/****f* OpenSM: VL15/osm_vl15_shutdown +* NAME +* osm_vl15_shutdown +* +* DESCRIPTION +* Cleanup all outstanding MADs on both fifo's. +* This is required to return all outstanding MAD resources. +* +* SYNOPSIS +*/ +void +osm_vl15_shutdown( + IN osm_vl15_t* const p_vl, + IN osm_mad_pool_t* const p_mad_pool); +/* +* PARAMETERS +* p_vl15 +* [in] Pointer to an osm_vl15_t object. +* +* p_mad_pool +* [in] The MAD pool owning the mads. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +* VL15 object, osm_vl15_construct, osm_vl15_init +*********/ + +END_C_DECLS + +#endif /* _OSM_VL15INTF_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_vl_arb_rcv.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_vl_arb_rcv.h new file mode 100644 index 00000000..9b57aeef --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_vl_arb_rcv.h @@ -0,0 +1,265 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_vla_rcv_t. + * This object represents the VL Arbitration Receiver object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_VLA_RCV_H_ +#define _OSM_VLA_RCV_H_ + +#include +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/VL Arbitration Receiver +* NAME +* VL Arbitration Receiver +* +* DESCRIPTION +* The VL Arbitration Receiver object encapsulates the information +* needed to set or get the vl arbitration attribute from a port. +* +* The VL Arbitration Receiver object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: VL Arbitration Receiver/osm_vla_rcv_t +* NAME +* osm_vla_rcv_t +* +* DESCRIPTION +* VL Arbitration Receiver structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_vla_rcv +{ + osm_subn_t *p_subn; + osm_req_t *p_req; + osm_log_t *p_log; + cl_plock_t *p_lock; + +} osm_vla_rcv_t; +/* +* FIELDS +* p_subn +* Pointer to the Subnet object for this subnet. +* +* p_req +* Pointer to the generic attribute request object. +* +* p_log +* Pointer to the log object. +* +* p_lock +* Pointer to the serializing lock. +* +* SEE ALSO +* VL Arbitration Receiver object +*********/ + +/****f* OpenSM: VL Arbitration Receiver/osm_vla_rcv_construct +* NAME +* osm_vla_rcv_construct +* +* DESCRIPTION +* This function constructs a VL Arbitration Receiver object. +* +* SYNOPSIS +*/ +void osm_vla_rcv_construct( + IN osm_vla_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a VL Arbitration Receiver object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_vla_rcv_destroy +* +* Calling osm_vla_rcv_construct is a prerequisite to calling any other +* method except osm_vla_rcv_init. +* +* SEE ALSO +* VL Arbitration Receiver object, osm_vla_rcv_init, +* osm_vla_rcv_destroy +*********/ + +/****f* OpenSM: VL Arbitration Receiver/osm_vla_rcv_destroy +* NAME +* osm_vla_rcv_destroy +* +* DESCRIPTION +* The osm_vla_rcv_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_vla_rcv_destroy( + IN osm_vla_rcv_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* VL Arbitration Receiver object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_vla_rcv_construct or osm_vla_rcv_init. +* +* SEE ALSO +* VL Arbitration Receiver object, osm_vla_rcv_construct, +* osm_vla_rcv_init +*********/ + +/****f* OpenSM: VL Arbitration Receiver/osm_vla_rcv_init +* NAME +* osm_vla_rcv_init +* +* DESCRIPTION +* The osm_vla_rcv_init function initializes a +* VL Arbitration Receiver object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_vla_rcv_init( + IN osm_vla_rcv_t* const p_ctrl, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_vla_rcv_t object to initialize. +* +* p_req +* [in] Pointer to an osm_req_t object. +* +* p_subn +* [in] Pointer to the Subnet object for this subnet. +* +* p_log +* [in] Pointer to the log object. +* +* p_lock +* [in] Pointer to the OpenSM serializing lock. +* +* RETURN VALUES +* CL_SUCCESS if the VL Arbitration Receiver object was initialized +* successfully. +* +* NOTES +* Allows calling other VL Arbitration Receiver methods. +* +* SEE ALSO +* VL Arbitration Receiver object, osm_vla_rcv_construct, +* osm_vla_rcv_destroy +*********/ + +/****f* OpenSM: VL Arbitration Receiver/osm_vla_rcv_process +* NAME +* osm_vla_rcv_process +* +* DESCRIPTION +* Process the vl arbitration attribute. +* +* SYNOPSIS +*/ +void osm_vla_rcv_process( + IN const osm_vla_rcv_t* const p_ctrl, + IN osm_madw_t* const p_madw ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_vla_rcv_t object. +* +* p_madw +* [in] Pointer to the MAD Wrapper containing the MAD +* that contains the node's SLtoVL attribute. +* +* RETURN VALUES +* CL_SUCCESS if the SLtoVL processing was successful. +* +* NOTES +* This function processes a SLtoVL attribute. +* +* SEE ALSO +* VL Arbitration Receiver, VL Arbitration Response Controller +*********/ + +END_C_DECLS + +#endif /* _OSM_VLA_RCV_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/osm_vl_arb_rcv_ctrl.h b/branches/Ndi/ulp/opensm/user/include/opensm/osm_vl_arb_rcv_ctrl.h new file mode 100644 index 00000000..bca54d61 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/osm_vl_arb_rcv_ctrl.h @@ -0,0 +1,261 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osm_vla_rcv_ctrl_t. + * This object represents a controller that set or get resp the + * IBA VL Arbitration Table attribute from a port. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#ifndef _OSM_VLA_RCV_CTRL_H_ +#define _OSM_VLA_RCV_CTRL_H_ + +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/VL Arbitration Table Receive Controller +* NAME +* VL Arbitration Receive Controller +* +* DESCRIPTION +* The VL Arbitration Receive Controller object encapsulates +* the information needed to get or set VL Arbitration of a port. +* +* The VL Arbitration Receive Controller object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* AUTHOR +* Eitan Zahavi, Mellanox +* +*********/ + +/****s* OpenSM: VL Arbitration Receive Controller/osm_vla_rcv_ctrl_t +* NAME +* osm_vla_rcv_ctrl_t +* +* DESCRIPTION +* VL Arbitration Receive Controller structure. +* +* This object should be treated as opaque and should +* be manipulated only through the provided functions. +* +* SYNOPSIS +*/ +typedef struct _osm_vla_rcv_ctrl +{ + osm_vla_rcv_t *p_rcv; + osm_log_t *p_log; + cl_dispatcher_t *p_disp; + cl_disp_reg_handle_t h_disp; + +} osm_vla_rcv_ctrl_t; +/* +* FIELDS +* p_rcv +* Pointer to the VL Arbitration Receiver object. +* +* p_log +* Pointer to the log object. +* +* p_disp +* Pointer to the Dispatcher. +* +* h_disp +* Handle returned from dispatcher registration. +* +* SEE ALSO +* VL Arbitration Receive Controller object +* VL Arbitration Receiver object +*********/ + +/****f* OpenSM: VL Arbitration Receive Controller/osm_vla_rcv_ctrl_construct +* NAME +* osm_vla_rcv_ctrl_construct +* +* DESCRIPTION +* This function constructs a VL Arbitration Receive Controller object. +* +* SYNOPSIS +*/ +void osm_vla_rcv_ctrl_construct( + IN osm_vla_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to a VL Arbitration Receive Controller +* object to construct. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Allows calling osm_vla_rcv_ctrl_init, osm_vla_rcv_ctrl_destroy, +* and osm_vla_rcv_ctrl_is_inited. +* +* Calling osm_vla_rcv_ctrl_construct is a prerequisite to calling any other +* method except osm_vla_rcv_ctrl_init. +* +* SEE ALSO +* VL Arbitration Receive Controller object, osm_vla_rcv_ctrl_init, +* osm_vla_rcv_ctrl_destroy, osm_vla_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: VL Arbitration Receive Controller/osm_vla_rcv_ctrl_destroy +* NAME +* osm_vla_rcv_ctrl_destroy +* +* DESCRIPTION +* The osm_vla_rcv_ctrl_destroy function destroys the object, releasing +* all resources. +* +* SYNOPSIS +*/ +void osm_vla_rcv_ctrl_destroy( + IN osm_vla_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to the object to destroy. +* +* RETURN VALUE +* This function does not return a value. +* +* NOTES +* Performs any necessary cleanup of the specified +* VL Arbitration Receive Controller object. +* Further operations should not be attempted on the destroyed object. +* This function should only be called after a call to +* osm_vla_rcv_ctrl_construct or osm_vla_rcv_ctrl_init. +* +* SEE ALSO +* VL Arbitration Receive Controller object, osm_vla_rcv_ctrl_construct, +* osm_vla_rcv_ctrl_init +*********/ + +/****f* OpenSM: VL Arbitration Receive Controller/osm_vla_rcv_ctrl_init +* NAME +* osm_vla_rcv_ctrl_init +* +* DESCRIPTION +* The osm_vla_rcv_ctrl_init function initializes a +* VL Arbitration Receive Controller object for use. +* +* SYNOPSIS +*/ +ib_api_status_t osm_vla_rcv_ctrl_init( + IN osm_vla_rcv_ctrl_t* const p_ctrl, + IN osm_vla_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_vla_rcv_ctrl_t object to initialize. +* +* p_rcv +* [in] Pointer to an osm_vla_rcv_t object. +* +* p_log +* [in] Pointer to the log object. +* +* p_disp +* [in] Pointer to the OpenSM central Dispatcher. +* +* RETURN VALUES +* CL_SUCCESS if the VL Arbitration Receive Controller object was initialized +* successfully. +* +* NOTES +* Allows calling other VL Arbitration Receive Controller methods. +* +* SEE ALSO +* VL Arbitration Receive Controller object, osm_vla_rcv_ctrl_construct, +* osm_vla_rcv_ctrl_destroy, osm_vla_rcv_ctrl_is_inited +*********/ + +/****f* OpenSM: VL Arbitration Receive Controller/osm_vla_rcv_ctrl_is_inited +* NAME +* osm_vla_rcv_ctrl_is_inited +* +* DESCRIPTION +* Indicates if the object has been initialized with osm_vla_rcv_ctrl_init. +* +* SYNOPSIS +*/ +boolean_t osm_vla_rcv_ctrl_is_inited( + IN const osm_vla_rcv_ctrl_t* const p_ctrl ); +/* +* PARAMETERS +* p_ctrl +* [in] Pointer to an osm_vla_rcv_ctrl_t object. +* +* RETURN VALUES +* TRUE if the object was initialized successfully, +* FALSE otherwise. +* +* NOTES +* The osm_vla_rcv_ctrl_construct or osm_vla_rcv_ctrl_init must be +* called before using this function. +* +* SEE ALSO +* VL Arbitration Receive Controller object, osm_vla_rcv_ctrl_construct, +* osm_vla_rcv_ctrl_init +*********/ + +END_C_DECLS + +#endif /* _OSM_VLA_RCV_CTRL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/opensm/st.h b/branches/Ndi/ulp/opensm/user/include/opensm/st.h new file mode 100644 index 00000000..ee0ba918 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/opensm/st.h @@ -0,0 +1,107 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* @(#) st.h 5.1 89/12/14 */ + +#ifndef ST_INCLUDED +#define ST_INCLUDED + +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +#if (__WORDSIZE == 64) || defined (_WIN64) +#define st_ptr_t unsigned long long +#else +#define st_ptr_t unsigned long +#endif + +typedef st_ptr_t st_data_t; + +#define ST_DATA_T_DEFINED + +typedef struct st_table st_table; + +struct st_hash_type { + int (*compare)(void *, void *); + st_ptr_t (*hash)(void *); +}; + +struct st_table { + struct st_hash_type *type; + int num_bins; + int num_entries; + struct st_table_entry **bins; +}; + +#define st_is_member(table,key) st_lookup(table,key,(st_data_t *)0) + +enum st_retval {ST_CONTINUE, ST_STOP, ST_DELETE}; + +st_table *st_init_table(struct st_hash_type *); +st_table *st_init_table_with_size(struct st_hash_type *, size_t); +st_table *st_init_numtable(void); +st_table *st_init_numtable_with_size(size_t); +st_table *st_init_strtable(void); +st_table *st_init_strtable_with_size(size_t); +int st_delete(st_table *, st_data_t *, st_data_t *); +int st_delete_safe(st_table *, st_data_t *, st_data_t *, st_data_t); +int st_insert(st_table *, st_data_t, st_data_t); +int st_lookup(st_table *, st_data_t, st_data_t *); +void st_foreach(st_table *, int (*)(st_data_t key, st_data_t val, st_data_t arg), st_data_t); +void st_add_direct(st_table *, st_data_t, st_data_t); +void st_free_table(st_table *); +void st_cleanup_safe(st_table *, st_data_t); +st_table *st_copy(st_table *); + +#define ST_NUMCMP ((int (*)()) 0) +#define ST_NUMHASH ((int (*)()) -2) + +#define st_numcmp ST_NUMCMP +#define st_numhash ST_NUMHASH + +/* int st_strhash(void); */ + +END_C_DECLS + +#endif /* ST_INCLUDED */ + diff --git a/branches/Ndi/ulp/opensm/user/include/unistd.h b/branches/Ndi/ulp/opensm/user/include/unistd.h new file mode 100644 index 00000000..08fb5372 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/unistd.h @@ -0,0 +1,38 @@ +/* + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id: osm_base.h 1689 2006-09-12 20:55:47Z eitan $ + */ + + + +/* + * Abstract: + * Work around for cmpatibility mode with Linux. + * + */ diff --git a/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor.h b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor.h new file mode 100644 index 00000000..2df637e7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor.h @@ -0,0 +1,78 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Include file used by OpenSM to pull in the correct vendor file. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +/* + this is the generic include file which includes + the proper vendor specific file +*/ +#include + +#if defined( OSM_VENDOR_INTF_TEST ) +#include +#elif defined( OSM_VENDOR_INTF_UMADT ) +#include +#elif defined( OSM_VENDOR_INTF_MTL ) +/* HACK - I do not know how to prevent complib from loading kernel H files */ +#undef __init +#include +#elif defined( OSM_VENDOR_INTF_TS ) +#undef __init +#include +#elif defined( OSM_VENDOR_INTF_ANAFA ) +#undef __init +#include +#elif defined( OSM_VENDOR_INTF_SIM ) +#undef __init +#include +#elif defined( OSM_VENDOR_INTF_OPENIB ) +#include +#elif defined( OSM_VENDOR_INTF_AL ) +#include +#elif +#error No MAD Interface selected! +#error Choose an interface in osm_vendor_select.h +#endif + + diff --git a/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_al.h b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_al.h new file mode 100644 index 00000000..80293c43 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_al.h @@ -0,0 +1,371 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Declaration of osm_mad_wrapper_t. + * This object represents the context wrapper for OpenSM MAD processing. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + + + + +#ifndef _OSM_VENDOR_AL_H_ +#define _OSM_VENDOR_AL_H_ + +#include +#include +#include +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****h* OpenSM/Vendor AL +* NAME +* Vendor AL +* +* DESCRIPTION +* +* The Vendor AL object is thread safe. +* +* This object should be treated as opaque and should be +* manipulated only through the provided functions. +* +* Enable various hacks to compensate for bugs in external code... +* +* +* AUTHOR +* +* +*********/ + + + +/****h* OpenSM/Vendor Access Layer (AL) +* NAME +* Vendor AL +* +* DESCRIPTION +* This file is the vendor specific file for the AL Infiniband API. +* +* AUTHOR +* Steve King, Intel +* +*********/ + +#define OSM_AL_SQ_SGE 256 +#define OSM_AL_RQ_SGE 256 +#define OSM_DEFAULT_RETRY_COUNT 3 + +/* AL supports RMPP */ +#define VENDOR_RMPP_SUPPORT 1 + +/****s* OpenSM: Vendor AL/osm_ca_info_t +* NAME +* osm_ca_info_t +* +* DESCRIPTION +* Structure containing information about local Channle Adapters. +* +* SYNOPSIS +*/ +typedef struct _osm_ca_info +{ + ib_net64_t guid; + size_t attr_size; + ib_ca_attr_t *p_attr; + +} osm_ca_info_t; +/* +* FIELDS +* guid +* Node GUID of the local CA. +* +* attr_size +* Size of the CA attributes for this CA. +* +* p_attr +* Pointer to dynamicly allocated CA Attribute structure. +* +* SEE ALSO +*********/ + +/****f* OpenSM: CA Info/osm_ca_info_get_num_ports +* NAME +* osm_ca_info_get_num_ports +* +* DESCRIPTION +* Returns the number of ports owned by this CA. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_ca_info_get_num_ports( + IN const osm_ca_info_t* const p_ca_info ) +{ + return( p_ca_info->p_attr->num_ports ); +} +/* +* PARAMETERS +* p_ca_info +* [in] Pointer to a CA Info object. +* +* RETURN VALUE +* Returns the number of ports owned by this CA. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: CA Info/osm_ca_info_get_port_guid +* NAME +* osm_ca_info_get_port_guid +* +* DESCRIPTION +* Returns the port GUID of the specified port owned by this CA. +* +* SYNOPSIS +*/ +static inline ib_net64_t +osm_ca_info_get_port_guid( + IN const osm_ca_info_t* const p_ca_info, + IN const uint8_t index ) +{ + return( p_ca_info->p_attr->p_port_attr[index].port_guid ); +} +/* +* PARAMETERS +* p_ca_info +* [in] Pointer to a CA Info object. +* +* index +* [in] Port "index" for which to retrieve the port GUID. +* The index is the offset into the ca's internal array +* of port attributes. +* +* RETURN VALUE +* Returns the port GUID of the specified port owned by this CA. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: CA Info/osm_ca_info_get_port_num +* NAME +* osm_ca_info_get_port_num +* +* DESCRIPTION +* Returns the port number of the specified port owned by this CA. +* Port numbers start with 1 for HCA's. +* +* SYNOPSIS +*/ +static inline uint8_t +osm_ca_info_get_port_num( + IN const osm_ca_info_t* const p_ca_info, + IN const uint8_t index ) +{ + return( p_ca_info->p_attr->p_port_attr[index].port_num ); +} +/* +* PARAMETERS +* p_ca_info +* [in] Pointer to a CA Info object. +* +* index +* [in] Port "index" for which to retrieve the port GUID. +* The index is the offset into the ca's internal array +* of port attributes. +* +* RETURN VALUE +* Returns the port GUID of the specified port owned by this CA. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM: CA Info/osm_ca_info_get_ca_guid +* NAME +* osm_ca_info_get_ca_guid +* +* DESCRIPTION +* Returns the GUID of the specified CA. +* +* SYNOPSIS +*/ +static inline ib_net64_t +osm_ca_info_get_ca_guid( + IN const osm_ca_info_t* const p_ca_info ) +{ + return( p_ca_info->p_attr->ca_guid ); +} +/* +* PARAMETERS +* p_ca_info +* [in] Pointer to a CA Info object. +* +* RETURN VALUE +* Returns the GUID of the specified CA. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* OpenSM: Vendor AL/osm_bind_handle_t +* NAME +* osm_bind_handle_t +* +* DESCRIPTION +* handle returned by the vendor transport bind call. +* +* SYNOPSIS +*/ +typedef struct _osm_vendor +{ + ib_al_handle_t h_al; + osm_log_t *p_log; + uint32_t ca_count; + osm_ca_info_t *p_ca_info; + uint32_t timeout; + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd; + +} osm_vendor_t; +/* +* FIELDS +* h_al +* Handle returned by AL open call (ib_open_al). +* +* p_log +* Pointer to the log object. +* +* ca_count +* Number of CA's in the array pointed to by p_ca_info. +* +* p_ca_info +* Pointer to dynamically allocated array of CA info objects. +* +* h_pool +* MAD Pool handle returned by ib_create_mad_pool at init time. +* +* timeout +* Transaction timeout time in milliseconds. +* +* SEE ALSO +*********/ + + +#define OSM_BIND_INVALID_HANDLE 0 + + +/****s* OpenSM: Vendor AL/osm_bind_handle_t +* NAME +* osm_bind_handle_t +* +* DESCRIPTION +* handle returned by the vendor transport bind call. +* +* SYNOPSIS +*/ +typedef void* osm_bind_handle_t; +/***********/ + +/****s* OpenSM/osm_vend_wrap_t +* NAME +* AL Vendor MAD Wrapper +* +* DESCRIPTION +* AL specific MAD wrapper. AL transport layer uses this for +* housekeeping. +* +* SYNOPSIS +*********/ +typedef struct _osm_vend_wrap_t +{ + uint32_t size; + osm_bind_handle_t h_bind; + ib_mad_element_t *p_elem; + ib_av_handle_t h_av; + void* p_resp_madw; + +}osm_vend_wrap_t; +/* +* FIELDS +* size +* Size of the allocated MAD +* +* h_bind +* Bind handle used on this transaction +* +* p_elem +* Pointer to the mad element structure associated with +* this mad. +* +* h_av +* Address vector handle used for this transaction. +* +* p_resp_madw +* Pointer to the mad wrapper structure used to hold the pending +* reponse to the mad, if any. If a response is expected, the +* wrapper for the reponse is allocated during the send call. +* +* SEE ALSO +*********/ + + +END_C_DECLS + +#endif /* _OSM_VENDOR_AL_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_api.h b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_api.h new file mode 100644 index 00000000..e4c61284 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_api.h @@ -0,0 +1,519 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Specification of the OpenSM transport API. This API is OpenSM's view + * of the Infiniband transport. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_VENDOR_API_H_ +#define _OSM_VENDOR_API_H_ + +#include +#include +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****s* OpenSM Vendor API/osm_vend_mad_recv_callback_t +* NAME +* osm_vend_mad_recv_callback_t +* +* DESCRIPTION +* Function prototype for the vendor MAD receive callback. +* The vendor layer calls this function for MAD receives. +* +* SYNOPSIS +*/ +typedef void (*osm_vend_mad_recv_callback_t)( + IN osm_madw_t *p_madw, + IN void* bind_context, + IN osm_madw_t *p_req_madw ); +/* +* PARAMETERS +* p_madw +* [in] The received MAD wrapper. +* +* bind_context +* [in] User context supplied during the bind call. +* +* p_req_madw +* [in] Pointer to the request mad wrapper that generated this response. +* If the inbound MAD is not a response, this field is NULL. +* +* RETURN VALUES +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* OpenSM Vendor API/osm_vend_mad_send_err_callback_t +* NAME +* osm_vend_mad_send_err_callback_t +* +* DESCRIPTION +* Function prototype for the vendor send failure callback. +* The vendor layer calls this function when MADs expecting +* a response are completed in error, most likely due to a +* timeout. +* +* SYNOPSIS +*/ +typedef void (*osm_vend_mad_send_err_callback_t)( + IN void* bind_context, + IN osm_madw_t *p_madw ); +/* +* PARAMETERS +* bind_context +* [in] User context supplied during the bind call. +* +* p_madw +* [in] Pointer to the request mad that failed. +* +* RETURN VALUES +* None. +* +* NOTES +* The vendor layer does not call this function (or any other) +* for MADs that were not expecting a response. +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_new +* NAME +* osm_vendor_new +* +* DESCRIPTION +* Allocates and initializes a new osm_vendor_t object. +* OpenSM calls this function before any other in the vendor API. +* This object is passed as a parameter to all other vendor functions. +* +* SYNOPSIS +*/ +osm_vendor_t* +osm_vendor_new( + IN osm_log_t* const p_log, + IN const uint32_t timeout ); +/* +* PARAMETERS +* p_log +* [in] Pointer to the log object to use. +* +* timeout +* [in] transaction timeout +* +* RETURN VALUES +* Returns a pointer to the vendor object. +* +* NOTES +* +* SEE ALSO +*********/ + +/****s* OpenSM Vendor API/osm_vendor_delete +* NAME +* osm_vendor_delete +* +* DESCRIPTION +* Dealocate the vendor object. +* +* SYNOPSIS +*/ +void +osm_vendor_delete( + IN osm_vendor_t** const pp_vend ); +/* +* PARAMETERS +* pp_vend +* [in/out] pointer to pointer to vendor objcet to be deleted +* +* RETURN VALUES +* None +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_get_ports +* NAME +* osm_vendor_get_ports +* +* DESCRIPTION +* Returns an array of available port attribute structures. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_vendor_get_all_port_attr( + IN osm_vendor_t* const p_vend, + IN ib_port_attr_t* const p_attr_array, + IN uint32_t* const p_num_ports ); +/* +* PARAMETERS +* p_vend +* [in] Pointer to the vendor object to initialize. +* +* p_attr_array +* [in/out] Pointer to pre-allocated array of port attributes. +* If it is NULL - then the command only updates the p_num_ports, +* and return IB_INSUFFICIENT_MEMORY. +* +* p_num_ports +* [in/out] Pointer to a variable to hold the total number of ports +* available on the local machine.. +* +* RETURN VALUES +* IB_SUCCESS on success. +* IB_INSUFFICIENT_MEMORY if the attribute array was not large enough. +* The number of attributes needed is returned in num_guids. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_init +* NAME +* osm_vendor_init +* +* DESCRIPTION +* The osm_vendor_init function initializes the vendor transport layer. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_vendor_init( + IN osm_vendor_t* const p_vend, + IN osm_log_t * const p_log, + IN const uint32_t timeout ); +/* +* PARAMETERS +* p_vend +* [in] Pointer to the vendor object to initialize. +* +* p_log +* [in] Pointer to OpenSM's log object. Vendor code may +* use the log object to send messages to OpenSM's log. +* +* timeout +* [in] Transaction timeout value in milliseconds. +* A value of 0 disables timeouts. +* +* RETURN VALUE +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_bind +* NAME +* osm_vendor_bind +* +* DESCRIPTION +* The osm_vendor_bind function registers with the vendor transport layer +* per Mad Class per PortGuid for mad transport capability. +* +* SYNOPSIS +*/ +osm_bind_handle_t +osm_vendor_bind( + IN osm_vendor_t* const p_vend, + IN osm_bind_info_t* const p_bind_info, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_vend_mad_recv_callback_t mad_recv_callback, + IN osm_vend_mad_send_err_callback_t send_err_callback, + IN void* context ); +/* +* PARAMETERS +* p_vend +* [in] pointer to the vendor object +* +* p_osm_bind_info +* [in] pointer to a struct defining the type of bind to perform. +* +* p_mad_pool +* [in] pointer to a mad wrappers pool to be used for allocating +* mad wrappers on send and receive. +* +* mad_recv_callback +* [in] the callback function to be invoked on mad receive. +* +* send_err_callback +* [in] the callback function to be invoked on mad transaction errors. +* +* context +* [in] the context to be provided to the callbacks as bind_ctx. +* +* RETURN VALUE +* On success, a valid bind handle. +* OSM_BIND_INVALID_HANDLE otherwise. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_unbind +* NAME +* osm_vendor_unbind +* +* DESCRIPTION +* Unbind the given bind handle (obtained by osm_vendor_bind). +* +* SYNOPSIS +*/ +void +osm_vendor_unbind( + IN osm_bind_handle_t h_bind ); +/* +* PARAMETERS +* h_bind +* [in] the bind handle to release. +* +* RETURN VALUE +* NONE. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_get +* NAME +* osm_vendor_get +* +* DESCRIPTION +* Obtain a mad wrapper holding actual mad buffer to be sent via +* the transport. +* +* SYNOPSIS +*/ +ib_mad_t* +osm_vendor_get( + IN osm_bind_handle_t h_bind, + IN const uint32_t mad_size, + IN osm_vend_wrap_t* const p_vend_wrap ); +/* +* PARAMETERS +* h_bind +* [in] the bind handle obtained by calling osm_vendor_bind +* +* mad_size +* [in] the actual mad size required +* +* p_vend_wrap +* [out] the returned mad vendor wrapper +* +* RETURN VALUE +* IB_SUCCESS on succesful completion. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_send +* NAME +* osm_vendor_send +* +* DESCRIPTION +* +* SYNOPSIS +*/ +ib_api_status_t +osm_vendor_send( + IN osm_bind_handle_t h_bind, + IN osm_madw_t* const p_madw, + IN boolean_t const resp_expected); +/* +* PARAMETERS +* h_bind +* [in] the bind handle obtained by calling osm_vendor_bind +* +* p_madw +* [in] pointer to the Mad Wrapper structure for the MAD to be sent. +* +* resp_expected +* [in] boolean value declaring the mad as a request (expecting a response). +* +* RETURN VALUE +* IB_SUCCESS on succesful completion. +* +* NOTES +* 1. Only mads that expect a response are tracked for transaction competion. +* 2. A mad that does not expect a response is being put back immediatly after +* being sent. +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_put +* NAME +* osm_vendor_put +* +* DESCRIPTION +* Return a mad vandor wrapper to the mad pool. It also means that the +* mad buffer is returned to the transport. +* +* SYNOPSIS +*/ +void +osm_vendor_put( + IN osm_bind_handle_t h_bind, + IN osm_vend_wrap_t* const p_vend_wrap ); +/* +* PARAMETERS +* h_bind +* [in] the bind handle obtained by calling osm_vendor_bind +* +* p_vend_wrap +* [in] pointer to the mad vendor wrapper to put back into the pool. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****i* OpenSM Vendor API/osm_vendor_local_lid_change +* NAME +* osm_vendor_local_lid_change +* +* DESCRIPTION +* Notifies the vendor transport layer that the local address +* has changed. This allows the vendor layer to perform housekeeping +* functions such as address vector updates. +* +* SYNOPSIS +*/ +ib_api_status_t +osm_vendor_local_lid_change( + IN osm_bind_handle_t h_bind ); +/* +* PARAMETERS +* h_bind +* [in] the bind handle obtained by calling osm_vendor_bind +* +* RETURN VALUE +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_set_sm +* NAME +* osm_vendor_set_sm +* +* DESCRIPTION +* Modifies the port info for the bound port to set the "IS_SM" bit +* according to the value given (TRUE or FALSE). +* +* SYNOPSIS +*/ +void +osm_vendor_set_sm( + IN osm_bind_handle_t h_bind, + IN boolean_t is_sm_val ); +/* +* PARAMETERS +* h_bind +* [in] bind handle for this port. +* +* is_sm_val +* [in] If TRUE - will set the is_sm to TRUE, if FALSE - will set the +* the is_sm to FALSE. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +/****f* OpenSM Vendor API/osm_vendor_set_debug +* NAME +* osm_vendor_set_debug +* +* DESCRIPTION +* Modifies the vendor specific debug level. +* +* SYNOPSIS +*/ +void +osm_vendor_set_debug( + IN osm_vendor_t* const p_vend, + IN int32_t level ); +/* +* PARAMETERS +* p_vend +* [in] vendor handle. +* +* level +* [in] vendor specific debug level. +* +* RETURN VALUE +* None. +* +* NOTES +* +* SEE ALSO +*********/ + +END_C_DECLS + +#endif /* _OSM_VENDOR_API_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_sa_api.h b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_sa_api.h new file mode 100644 index 00000000..734d83fb --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_sa_api.h @@ -0,0 +1,879 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Specification of the OpenSM SA Client API. This API uses the basic osm + * vendor API to provide SA Client interface. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#ifndef _OSM_VENDOR_SA_API_H_ +#define _OSM_VENDOR_SA_API_H_ + +#include + +#ifdef __cplusplus +# define BEGIN_C_DECLS extern "C" { +# define END_C_DECLS } +#else /* !__cplusplus */ +# define BEGIN_C_DECLS +# define END_C_DECLS +#endif /* __cplusplus */ + +BEGIN_C_DECLS + +/****d* OpenSM Vendor SA Client/osmv_flags_t +* NAME +* osmv_flags_t +* +* DESCRIPTION +* Access layer flags used to direct the operation of various calls. +* +* SYNOPSIS +*/ +typedef uint32_t osmv_flags_t; +#define OSM_SA_FLAGS_SYNC 0x00000001 +/* +* VALUES +* OSM_SA_FLAGS_SYNC +* Indicates that the given operation should be performed synchronously. +* The call will block until it completes. Callbacks will still be +* invoked. +* +* SEE ALSO +* osmv_query_sa +*****/ + +/****d* OpenSM Vendor SA Client/osmv_query_type_t +* NAME +* osmv_query_type_t +* +* DESCRIPTION +* Abstracted queries supported by the access layer. +* +* SYNOPSIS +*/ +typedef enum _osmv_query_type +{ + OSMV_QUERY_USER_DEFINED, + + OSMV_QUERY_ALL_SVC_RECS, + OSMV_QUERY_SVC_REC_BY_NAME, + OSMV_QUERY_SVC_REC_BY_ID, + + OSMV_QUERY_CLASS_PORT_INFO, + + OSMV_QUERY_NODE_REC_BY_NODE_GUID, + OSMV_QUERY_PORT_REC_BY_LID, + OSMV_QUERY_PORT_REC_BY_LID_AND_NUM, + + OSMV_QUERY_VLARB_BY_LID_PORT_BLOCK, + OSMV_QUERY_SLVL_BY_LID_AND_PORTS, + + OSMV_QUERY_PATH_REC_BY_PORT_GUIDS, + OSMV_QUERY_PATH_REC_BY_GIDS, + OSMV_QUERY_PATH_REC_BY_LIDS, + + OSMV_QUERY_UD_MULTICAST_SET, + OSMV_QUERY_UD_MULTICAST_DELETE, + + OSMV_QUERY_MULTIPATH_REC, + +} osmv_query_type_t; +/* +* VALUES +* OSMV_QUERY_USER_DEFINED +* Query the SA based on user-defined input. Queries of this type +* should reference an osmv_user_query_t structure as input to the +* query. +* +* OSMV_QUERY_SVC_REC_BY_NAME +* Query for service records based on the service name. Queries of +* this type should reference an ib_svc_name_t structure as input +* to the query. +* +* OSMV_QUERY_SVC_REC_BY_ID +* Query for service records based on the service ID. Queries of +* this type should reference an ib_net64_t value that indicates +* the ID of the service being requested. +* +* OSMV_QUERY_NODE_REC_BY_NODE_GUID +* Query for node information based on the node's GUID. Queries of +* this type should reference an ib_net64_t value that indicates +* the GUID of the node being requested. +* +* OSMV_QUERY_PORT_REC_BY_LID +* Query for port information based on the port's base LID. Queries +* of this type should reference an ib_net16_t value that indicates +* the base LID of the port being requested. +* +* OSMV_QUERY_PORT_REC_BY_LID_AND_NUM +* Query for port information based on the port's LID and port num. +* Queries of this type should reference an osmv_user_query_t +* structure as input to the query. The port num and lid should +* be provided by it. +* +* OSMV_QUERY_PATH_REC_BY_PORT_GUIDS +* Query for path records between the specified pair of port GUIDs. +* Queries of this type should reference an osmv_guid_pair_t +* structure that indicates the GUIDs of the path being requested. +* +* OSMV_QUERY_PATH_REC_BY_GIDS +* Query for path records between the specified pair of port GIDs. +* Queries of this type should reference an osmv_gid_pair_t +* structure that indicates the GIDs of the path being requested. +* +* OSMV_QUERY_PATH_REC_BY_LIDS +* Query for path records between the specified pair of port LIDs. +* Queries of this type should reference an osmv_lid_pair_t +* structure that indicates the LIDs of the path being requested. +* +* NOTES +* This enum is used to define abstracted queries provided by the access +* layer. Users may issue queries not listed here by sending MADs directly +* to subnet administration or a class manager. These queries are +* intended to represent those most often used by clients. +* +* SEE ALSO +* osmv_query, osmv_query_req_t, osmv_user_query_t, osmv_gid_pair_t, +* osmv_lid_pair_t osmv_guid_pair_t +*****/ + +/****s* OpenSM Vendor SA Client/osmv_user_query_t +* NAME +* osmv_user_query_t +* +* DESCRIPTION +* User-defined query information. +* +* SYNOPSIS +*/ +typedef struct _osmv_user_query +{ + uint8_t method; + ib_net16_t attr_id; + ib_net16_t attr_offset; + ib_net32_t attr_mod; + ib_net64_t comp_mask; + void *p_attr; +} osmv_user_query_t; +/* +* FIELDS +* +* method +* Method to be used +* +* attr_id +* Attribute identifier of query data. +* +* attr_offset +* Size of the query attribute, in 8-byte words. Users can set +* this value by passing in the sizeof( attribute ) into the +* ib_get_attr_offset() routine. +* +* attr_mod +* Attribute modifier for query request. +* +* comp_mask +* Indicates the attribute components that are specified for the +* query. +* +* p_attr +* References the attribute structure used as input into the query. +* This field is ignored if comp_mask is set to 0. +* +* NOTES +* This structure is used to describe a user-defined query. The attribute +* ID, attribute offset, component mask, and attribute structure must match +* those defined by the IBA specification. Users should refer to chapter +* 15 of the IBA specification for additional details. +* +* SEE ALSO +* osmv_query_type_t, ib_get_attr_offset, ib_get_attr_size, osmv_query_sa +*****/ + +/****s* OpenSM Vendor SA Client/osmv_gid_pair_t +* NAME +* osmv_gid_pair_t +* +* DESCRIPTION +* Source and destination GIDs. +* +* SYNOPSIS +*/ +typedef struct _osmv_gid_pair +{ + ib_gid_t src_gid; + ib_gid_t dest_gid; +} osmv_gid_pair_t; +/* +* FIELDS +* src_gid +* Source GID of a path. +* +* dest_gid +* Destination GID of a path. +* +* NOTES +* This structure is used to describe the endpoints of a path. +* +* SEE ALSO +* ib_gid_t +*****/ + +/****s* OpenSM Vendor SA Client/osmv_lid_pair_t +* NAME +* osmv_lid_pair_t +* +* DESCRIPTION +* Source and destination LIDs. +* +* SYNOPSIS +*/ +typedef struct _osmv_lid_pair +{ + ib_net16_t src_lid; + ib_net16_t dest_lid; +} osmv_lid_pair_t; +/* +* FIELDS +* src_lid +* Source LID of a path. +* +* dest_lid +* Destination LID of a path. +* +* NOTES +* This structure is used to describe the endpoints of a path. +*****/ + +/****s* OpenSM Vendor SA Client/osmv_guid_pair_t +* NAME +* osmv_guid_pair_t +* +* DESCRIPTION +* Source and destination GUIDs. These may be port or channel adapter +* GUIDs, depending on the context in which this structure is used. +* +* SYNOPSIS +*/ +typedef struct _osmv_guid_pair +{ + ib_net64_t src_guid; + ib_net64_t dest_guid; +} osmv_guid_pair_t; +/* +* FIELDS +* src_guid +* Source GUID of a path. +* +* dest_guid +* Destination GUID of a path. +* +* NOTES +* This structure is used to describe the endpoints of a path. The given +* GUID pair may belong to either ports or channel adapters. +* +* SEE ALSO +* ib_guid_t +*****/ + +/****s* OpenSM Vendor SA Client/osmv_multipath_req_t +* NAME +* osmv_multipath_req_t +* +* DESCRIPTION +* Fields from which to generate a MultiPathRecord request. +* +* SYNOPSIS +*/ +typedef struct _osmv_multipath_req_t +{ + ib_net64_t comp_mask; + uint16_t pkey; + boolean_t reversible; + uint8_t num_path; + uint8_t sl; + uint8_t independence; + uint8_t sgid_count; + uint8_t dgid_count; + ib_gid_t gids[IB_MULTIPATH_MAX_GIDS]; +} osmv_multipath_req_t; +/* +* FIELDS +* +* NOTES +* This structure is used to describe a multipath request. +* +* SEE ALSO +*****/ + +/****s* OpenSM Vendor SA Client/osmv_query_res_t +* NAME +* osmv_query_res_t +* +* DESCRIPTION +* Contains the results of a subnet administration query. +* +* SYNOPSIS +*/ +typedef struct _osmv_query_res +{ + const void *query_context; + ib_api_status_t status; + osmv_query_type_t query_type; + uint32_t result_cnt; + osm_madw_t *p_result_madw; +} osmv_query_res_t; +/* +* FIELDS +* query_context +* User-defined context information associated with the query +* through the osm_vendor_query_sa call. +* +* status +* Indicates the success of the query operation. +* +* query_type +* Indicates the type of query for which the results are being +* returned. This matches the query_type specified through the +* osm_vendor_query_sa call. +* +* result_cnt +* The number of result structures that were returned by the query. +* +* p_result_madw +* For queries returning IB_SUCCESS or IB_REMOTE_ERROR, this +* references the MAD wrapper returned by subnet administration +* containing the list of results or the returned error code. +* +* NOTES +* A query result structure is returned to a client through their +* osmv_pfn_query_cb_t routine to notify them of the results of a subnet +* administration query. If the query was successful or received an error +* from subnet administration, p_result_madw will reference a MAD wrapper +* containing the results. The MAD referenced by p_result_madw is owned by +* the user and remains available even after their callback returns. Users +* must call osm_mad_pool_put() to return the MAD wrapper back to the +* mad pool when they are done accessing the results. +* +* To retrieve individual result structures from the p_result_madw, users +* may call osmv_get_query_result(). +* +* SEE ALSO +* osmv_query_sa, osmv_pfn_query_cb_t, ib_api_status_t, +* osmv_query_status_t, osmv_query_type_t, +* osmv_get_query_result +*****/ + +/****f* OpenSM Vendor SA Client/osmv_get_query_result +* NAME +* osmv_get_query_result +* +* DESCRIPTION +* Retrieves a result structure from a MADW returned by a call to +* osmv_query_sa(). +* +* SYNOPSIS +*/ +static inline void* +osmv_get_query_result( + IN osm_madw_t *p_result_madw, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_madw ); + p_sa_mad = (ib_sa_mad_t*)osm_madw_get_mad_ptr( p_result_madw ); + CL_ASSERT( p_sa_mad ); + CL_ASSERT( ib_get_attr_size( p_sa_mad->attr_offset ) * (result_index + 1) + + IB_SA_MAD_HDR_SIZE <= p_result_madw->mad_size ); + + return( p_sa_mad->data + + (ib_get_attr_size( p_sa_mad->attr_offset ) * result_index) ); +} +/* +* PARAMETERS +* p_result_madw +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a result structure from a +* call to osmv_query_sa(). The type of result structure must be known to +* the user either through the user's context or the query_type returned as +* part of the osmv_query_res_t structure. +* +* SEE ALSO +* osmv_query_res_t, osm_madw_t +*****/ + +/****f* OpenSM Vendor SA Client/osmv_get_query_path_rec +* NAME +* osmv_get_query_path_rec +* +* DESCRIPTION +* Retrieves a path record result from a MAD returned by a call to +* osmv_query_sa(). +* +* SYNOPSIS +*/ +static inline ib_path_rec_t* +osmv_get_query_path_rec( + IN osm_madw_t *p_result_madw, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_madw ); + p_sa_mad = (ib_sa_mad_t*)osm_madw_get_mad_ptr( p_result_madw ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_PATH_RECORD ); + + return( (ib_path_rec_t*)osmv_get_query_result( p_result_madw, result_index ) ); +} +/* +* PARAMETERS +* p_result_madw +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a path record result from +* a call to osmv_query_sa(). +* +* SEE ALSO +* osmv_query_res_t, osm_madw_t, osmv_get_query_result, ib_path_rec_t +*****/ + +/****f* OpenSM Vendor SA Client/osmv_get_query_portinfo_rec +* NAME +* osmv_get_query_portinfo_rec +* +* DESCRIPTION +* Retrieves a port info record result from a MAD returned by a call to +* osmv_query_sa(). +* +* SYNOPSIS +*/ +static inline ib_portinfo_record_t* +osmv_get_query_portinfo_rec( + IN osm_madw_t *p_result_madw, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_madw ); + p_sa_mad = (ib_sa_mad_t*)osm_madw_get_mad_ptr( p_result_madw ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_PORTINFO_RECORD ); + + return( (ib_portinfo_record_t*)osmv_get_query_result( p_result_madw, + result_index ) ); +} +/* +* PARAMETERS +* p_result_madw +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a port info record result +* from a call to osmv_query_sa(). +* +* SEE ALSO +* osmv_query_res_t, osm_madw_t, osmv_get_query_result, ib_portinfo_record_t +*****/ + +/****f* OpenSM Vendor SA Client/osmv_get_query_node_rec +* NAME +* osmv_get_query_node_rec +* +* DESCRIPTION +* Retrieves a node record result from a MAD returned by a call to +* osmv_query_sa(). +* +* SYNOPSIS +*/ +static inline ib_node_record_t* +osmv_get_query_node_rec( + IN osm_madw_t *p_result_madw, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_madw ); + p_sa_mad = (ib_sa_mad_t*)osm_madw_get_mad_ptr( p_result_madw ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_NODE_RECORD ); + + return( (ib_node_record_t*)osmv_get_query_result( p_result_madw, + result_index ) ); +} +/* +* PARAMETERS +* p_result_madw +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a node record result from +* a call to osmv_query_sa(). +* +* SEE ALSO +* osmv_query_res_t, osm_madw_t, osmv_get_query_result, ib_node_record_t +*****/ + +/****f* OpenSM Vendor SA Client/osmv_get_query_svc_rec +* NAME +* osmv_get_query_svc_rec +* +* DESCRIPTION +* Retrieves a service record result from a MAD returned by a call to +* osmv_query_sa(). +* +* SYNOPSIS +*/ +static inline ib_service_record_t* +osmv_get_query_svc_rec( + IN osm_madw_t *p_result_madw, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_madw ); + p_sa_mad = (ib_sa_mad_t*)osm_madw_get_mad_ptr( p_result_madw ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_SERVICE_RECORD ); + + return( (ib_service_record_t*)osmv_get_query_result( p_result_madw, + result_index ) ); +} +/* +* PARAMETERS +* p_result_madw +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a service record result from +* a call to osmv_query_sa(). +* +* SEE ALSO +* osmv_query_res_t, osm_madw_t, osmv_get_query_result, ib_service_record_t +*****/ + +/****f* OpenSM Vendor SA Client/osmv_get_query_mc_rec +* NAME +* osmv_get_query_mc_rec +* +* DESCRIPTION +* Retrieves a multicast record result from a MAD returned by a call to +* osmv_query_sa(). +* +* SYNOPSIS +*/ +static inline ib_member_rec_t* +osmv_get_query_mc_rec( + IN osm_madw_t *p_result_madw, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_madw ); + p_sa_mad = (ib_sa_mad_t*)osm_madw_get_mad_ptr( p_result_madw ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_MCMEMBER_RECORD ); + + return( (ib_member_rec_t*)osmv_get_query_result( p_result_madw, + result_index ) ); +} +/* +* PARAMETERS +* p_result_madw +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a service record result from +* a call to osmv_query_sa(). +* +* SEE ALSO +* osmv_query_res_t, osm_madw_t, osmv_get_query_result, ib_member_rec_t +*****/ + +/****f* OpenSM Vendor SA Client/osmv_get_query_inform_info_rec +* NAME +* osmv_get_query_inform_info_rec +* +* DESCRIPTION +* Retrieves an InformInfo record result from a MAD returned by +* a call to osmv_query_sa(). +* +* SYNOPSIS +*/ +static inline ib_inform_info_record_t* +osmv_get_query_inform_info_rec( + IN osm_madw_t *p_result_madw, + IN uint32_t result_index ) +{ + ib_sa_mad_t *p_sa_mad; + + CL_ASSERT( p_result_madw ); + p_sa_mad = (ib_sa_mad_t*)osm_madw_get_mad_ptr( p_result_madw ); + CL_ASSERT( p_sa_mad && p_sa_mad->attr_id == IB_MAD_ATTR_INFORM_INFO_RECORD ); + + return( (ib_inform_info_record_t*)osmv_get_query_result( p_result_madw, + result_index ) ); +} +/* +* PARAMETERS +* p_result_madw +* [in] This is a reference to the MAD returned as a result of the +* query. +* +* result_index +* [in] A zero-based index indicating which result to return. +* +* NOTES +* This call returns a pointer to the start of a service record result from +* a call to osmv_query_sa(). +* +* SEE ALSO +* osmv_query_res_t, osm_madw_t, osmv_get_query_result, ib_inform_info_record_t +*****/ + +/****f* OpenSM Vendor SA Client/osmv_pfn_query_cb_t +* NAME +* osmv_pfn_query_cb_t +* +* DESCRIPTION +* User-defined callback invoked on completion of subnet administration +* query. +* +* SYNOPSIS +*/ +typedef void +(*osmv_pfn_query_cb_t)( + IN osmv_query_res_t *p_query_res ); +/* +* PARAMETERS +* p_query_res +* [in] This is a reference to a structure containing the result of +* the query. +* +* NOTES +* This routine is invoked to notify a client of the result of a subnet +* administration query. The p_query_rec parameter references the result +* of the query and, in the case of a successful query, any information +* returned by subnet administration. +* +* In the kernel, this callback is usually invoked using a tasklet, +* dependent on the implementation of the underlying verbs provider driver. +* +* SEE ALSO +* osmv_query_res_t +*****/ + +/****s* OpenSM Vendor SA Client/osmv_query_req_t +* NAME +* osmv_query_req_t +* +* DESCRIPTION +* Information used to request an access layer provided query of subnet +* administration. +* +* SYNOPSIS +*/ +typedef struct _osmv_query_req +{ + osmv_query_type_t query_type; + const void *p_query_input; + ib_net64_t sm_key; + + uint32_t timeout_ms; + uint32_t retry_cnt; + osmv_flags_t flags; + + const void *query_context; + osmv_pfn_query_cb_t pfn_query_cb; +} osmv_query_req_t; +/* +* FIELDS +* query_type +* Indicates the type of query that the access layer should +* perform. +* +* p_query_input +* A pointer to the input for the query. The data referenced by +* this structure is dependent on the type of query being requested +* and is determined by the specified query_type. +* +* sm_key +* The M_Key to be provided with the SA MAD for authentication. +* Normally 0 is used. +* +* timeout_ms +* Specifies the number of milliseconds to wait for a response for +* this query until retrying or timing out the request. +* +* retry_cnt +* Specifies the number of times that the query will be retried +* before failing the request. +* +* flags +* Used to describe the mode of operation. Set to IB_FLAGS_SYNC to +* process the called routine synchronously. +* +* query_context +* User-defined context information associated with this query. +* The context data is returned to the user as a part of their +* query callback. +* +* pfn_query_cb +* A user-defined callback that is invoked upon completion of the +* query. +* +* NOTES +* This structure is used when requesting an osm vendor provided query +* of subnet administration. Clients specify the type of query through +* the query_type field. Based on the type of query, the p_query_input +* field is set to reference the appropriate data structure. +* +* The information referenced by the p_query_input field is one of the +* following: +* +* -- a NULL terminated service name +* -- a service id +* -- a single GUID +* -- a pair of GUIDs specified through an osmv_guid_pair_t structure +* -- a pair of GIDs specified through an osmv_gid_pair_t structure +* +* SEE ALSO +* osmv_query_type_t, osmv_pfn_query_cb_t, osmv_guid_pair_t, +* osmv_gid_pair_t +*****/ + +/****f* OpenSM Vendor SA Client/osmv_bind_sa +* NAME +* osmv_bind_sa +* +* DESCRIPTION +* Bind to the SA service and return a handle to be used for later +* queries. +* +* +* SYNOPSIS +*/ +osm_bind_handle_t +osmv_bind_sa( + IN osm_vendor_t * const p_vend, + IN osm_mad_pool_t * const p_mad_pool, + IN ib_net64_t port_guid + ); +/* +* PARAMETERS +* p_vend +* [in] an osm_vendor object to work with +* +* p_mad_pool +* [in] mad pool to obtain madw from +* +* port_guid +* [in] the port guid to attach to. +* +* RETURN VALUE +* Bind handle to be used for later SA queries or OSM_BIND_INVALID_HANDLE +* +* NOTES +* +* SEE ALSO +* osmv_query_sa +*********/ + +/****f* OpenSM Vendor SA Client/osmv_query_sa +* NAME +* osmv_query_sa +* +* DESCRIPTION +* Query the SA given an SA query request (similar to IBAL ib_query). +* +* SYNOPSIS +*/ +ib_api_status_t +osmv_query_sa( + IN osm_bind_handle_t h_bind, + IN const osmv_query_req_t * const p_query_req + ); +/* +* PARAMETERS +* h_bind +* [in] bind handle for this port. Should be previously +* obtained by calling osmv_bind_sa +* +* p_query_req +* [in] an SA query request structure. +* +* RETURN VALUE +* IB_SUCCESS if completed successfuly (or in ASYNC mode +* if the request was sent). +* +* NOTES +* +* SEE ALSO +* osmv_bind_sa +*********/ + +END_C_DECLS + +#endif /* _OSM_VENDOR_SA_API_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_select.h b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_select.h new file mode 100644 index 00000000..b766b67b --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/vendor/osm_vendor_select.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Include file that defines which vendor files to compile. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#ifndef _OSM_VENDOR_SELECT_H_ +#define _OSM_VENDOR_SELECT_H_ + +///////////////////////////////////////////////////// +// +// MAD INTERFACE SELECTION +// +///////////////////////////////////////////////////// + +/* + TEST and UMADT must be specified in the 'make' command line, + with VENDOR=test or VENDOR=umadt. +*/ +#ifndef OSM_VENDOR_INTF_OPENIB +#ifndef OSM_VENDOR_INTF_TEST +#ifndef OSM_VENDOR_INTF_UMADT +#ifndef OSM_VENDOR_INTF_MTL +#ifndef OSM_VENDOR_INTF_TS +#ifndef OSM_VENDOR_INTF_SIM +#ifndef OSM_VENDOR_INTF_AL +#define OSM_VENDOR_INTF_AL +#endif /* AL */ +#endif /* TS */ +#endif /* SIM */ +#endif /* MTL */ +#endif /* UMADT */ +#endif /* TEST */ +#endif /* OPENIB */ + +#endif /* _OSM_VENDOR_SELECT_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/include/vendor/winosm_common.h b/branches/Ndi/ulp/opensm/user/include/vendor/winosm_common.h new file mode 100644 index 00000000..75738f90 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/include/vendor/winosm_common.h @@ -0,0 +1,246 @@ +#ifndef _OSM_COMMON_H_ +#define _OSM_COMMON_H_ + +#include +#include +#include +#include +#include +#include +#include + +#pragma warning(disable : 4996) +#pragma warning(disable : 4100) + +struct timezone { + int tz_minuteswest; /* minutes west of Greenwich */ + int tz_dsttime; /* type of dst correction */ +}; + + +# define no_argument 0 +# define required_argument 1 +# define optional_argument 2 +/* Global variables for getopt_long */ +char *optarg; +/* +extern int optind; +extern int opterr; +extern int optopt; +extern int iArg; + */ +struct option +{ + const char *name; + int has_arg; + int *flag; + int val; +}; + + +/************************************************************************/ +static char* +get_char_option(const char* optstring, + char*const* argv,int argc, + int iArg, int* opt_ind,char* opt_p); +int +getopt_long_only(int argc, char *const*argv, + const char *optstring, + const struct option *longopts, int *longindex); +/**************************************************************************/ +static __inline +void FileTimeToTimeval(LPFILETIME pft, struct timeval * ptv) +{ /* Note that LONGLONG is a 64-bit value */ +LONGLONG ll; + +if(!pft || !ptv) +goto Exit; + +ll = ((LONGLONG) pft->dwHighDateTime << 32); +ll += (LONGLONG) pft->dwLowDateTime; +#ifdef __GNUC__ +ll -= 116444736000000000ll; +#else +ll -= 116444736000000000; +#endif + +ptv->tv_sec = (long) (ll / 10000000); +ptv->tv_usec = (long) (ll - ((LONGLONG)(ptv->tv_sec) * 10000000)) / 10; + +Exit:; +}/* FileTimeToTimeval */ + +/********************************************************************************/ +static __inline +int gettimeofday(struct timeval *ptv, struct timezone *tzp) +{ +static int QueryCounter = 2; +FILETIME CurrentTime; +/* TODO : We need to add it , since in DDK - compiler does not like vars that are not in use */ +UNREFERENCED_PARAMETER(tzp); +if(!ptv) +goto Exit; + +if(QueryCounter) +{ +static LARGE_INTEGER Frequency; +static LARGE_INTEGER Offset; /* counter offset for right time*/ +static LARGE_INTEGER LastCounter; +LARGE_INTEGER Time; +LARGE_INTEGER Counter; +/* HANDLE hThread = GetCurrentThread(); +int ThreadPrio = GetThreadPriority(hThread); + +SetThreadPriority(hThread, THREAD_PRIORITY_TIME_CRITICAL); */ +GetSystemTimeAsFileTime(&CurrentTime); +QueryPerformanceCounter(&Counter); +/* SetThreadPriority(hThread, ThreadPrio); */ + +if(QueryCounter == 2) +{ +QueryCounter = 1; +if(!QueryPerformanceFrequency(&Frequency)) +{ +QueryCounter = 0; +Frequency.QuadPart = 10000000; /* prevent division by 0 */ +} + +/* get time as a large integer */ +Counter.HighPart &= 0x7fl; /* Clear the highest bits to prevent overflows */ +Offset.LowPart = CurrentTime.dwLowDateTime; +Offset.HighPart = (LONG) CurrentTime.dwHighDateTime; +Offset.QuadPart -= Counter.QuadPart * 10000000 / Frequency.QuadPart; +} + +/* Convert counter to a 100 nanoseconds resolution timer value. */ + +Counter.HighPart &= 0x7fl; /* Clear the highest bits to prevent overflows */ +Counter.QuadPart *= 10000000; /* Because we need time stamp in units of 100 ns */ +Counter.QuadPart /= Frequency.QuadPart; /* counter of 0.1 microseconds */ + +if(LastCounter.QuadPart > Counter.QuadPart) +{ /* Counter value wrapped */ +#ifdef __GNUC__ +Offset.QuadPart += (0x7f00000000ll * 10000000ll) / Frequency.QuadPart; +#else +Offset.QuadPart += (0x7f00000000 * 10000000) / Frequency.QuadPart; +#endif +} +LastCounter = Counter; + +/* Add the in previous call calculated offset */ +Counter.QuadPart += Offset.QuadPart; + +/* get time as a large integer */ +Time.LowPart = CurrentTime.dwLowDateTime; +Time.HighPart = (LONG) CurrentTime.dwHighDateTime; + +/* keep time difference within an interval of +- 0.1 seconds +relative to the time function by adjusting the counters offset */ + +if(((Time.QuadPart + 1000000) < Counter.QuadPart) || +((Time.QuadPart - 1000000) > Counter.QuadPart)) +{ /* Adjust the offset */ +Offset.QuadPart += Time.QuadPart - Counter.QuadPart; +Counter.QuadPart = Time.QuadPart; +} + +/* Now let's use the adjusted performance counter time for the time stamp */ +CurrentTime.dwLowDateTime = Counter.LowPart; +CurrentTime.dwHighDateTime = Counter.HighPart; +} +else +{ +GetSystemTimeAsFileTime(&CurrentTime); +} + +FileTimeToTimeval(&CurrentTime,ptv); + +Exit:; +return(0); +}/* int gettimeofday(struct timeval *ptv, void *tzp) */ +/*****************************************************************************/ + + + + +#define getpid() GetCurrentProcessId() +#define sleep(sec) SleepEx((sec)*1000,TRUE) +#define usleep(usec) SleepEx(usec/1000,TRUE) +//#define MT_ALIGN8 __declspec(align(8)) +/* Verify the correct ETIMEDOUT value is defined in all compiled files */ +#ifndef ETIMEDOUT +#define ETIMEDOUT (10060) +#endif +#define strtoull _strtoui64 +#define OSM_MAX_LOG_NAME_SIZE 2048 +#define unlink(str) _unlink(str) +#define strnicmp _strnicmp + +/* The following defines replace syslog.h */ +#define openlog(a,b,c) + +#define closelog() + +static __inline void +syslog(int priority, ...) {} + +#define LOG_INFO 0 +#define LOG_WARNING 1 +#define LOG_ERR 2 +/*****************************************/ + +/****f* OpenSM: osm_common/GetOsmTempPath +* NAME +* GetOsmTempPath +* +* DESCRIPTION +* The function retrieves the temp path defined in Windows using its API +* +* SYNOPSIS +*/ +char* +GetOsmTempPath(void); +/* +* PARAMETERS +* NONE +* +* RETURN VALUE +* This function returns string containing the default temp path in windows +* +* NOTES +*/ + +/****f* OpenSM: osm_common/GetOsmCachePath +* NAME +* GetOsmCachePath +* +* DESCRIPTION +* The function retrieves the path the cache directory. This directory is +* the etc dir under the installation directory of the mellanox stack. +* The installation directory should be pointed out by the WinIB_HOME variable. +* If WinIB_HOME variable is missing, or there is not /etc/ dir under it - then +* the function will return the getOsmTempPath() value. +* +* SYNOPSIS +*/ +char* +GetOsmCachePath(void); +/* +* PARAMETERS +* NONE +* +* RETURN VALUE +* This function returns string containing the default cache path for osm use. +* +* NOTES +*/ + +/* Implementation of strtok_r for windows: since strtok in windows is safe, + just ignore the last variable, and call strtok. */ +static inline +char *strtok_r(char *s1, const char *s2, char **lasts) +{ + return strtok(s1, s2); +} +#endif /* _OSM_COMMON_H_ */ diff --git a/branches/Ndi/ulp/opensm/user/libopensm/Makefile b/branches/Ndi/ulp/opensm/user/libopensm/Makefile new file mode 100755 index 00000000..58189757 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libopensm/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/Ndi/ulp/opensm/user/libopensm/SOURCES b/branches/Ndi/ulp/opensm/user/libopensm/SOURCES new file mode 100644 index 00000000..7acd92f2 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libopensm/SOURCES @@ -0,0 +1,64 @@ +!if $(FREEBUILD) +TARGETNAME=opensm_ibal +!else +TARGETNAME=opensm_ibald +!endif +TARGETTYPE=LIBRARY + +!if !defined(WINIBHOME) +WINIBHOME=..\..\..\.. +!endif + +LIBPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + +!if defined(OSM_TARGET) +TARGETPATH=$(OSM_TARGET)\bin\user\obj$(BUILD_ALT_DIR) +!else +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) +!endif + +USE_NTDLL=1 +OVR_DIR=..\addon + + +SOURCES=\ + osm_log.c \ + osm_mad_pool.c \ + osm_helper.c + +OSM_HOME=.. + +TARGETLIBS=\ +!if $(FREEBUILD) + $(LIBPATH)\*\ibal.lib \ + $(LIBPATH)\*\complib.lib \ + $(TARGETPATH)\*\osmv_ibal.lib \ + $(CRT_LIB_PATH)\msvcrt.lib + +!else + $(LIBPATH)\*\ibald.lib \ + $(LIBPATH)\*\complibd.lib \ + $(TARGETPATH)\*\osmv_ibald.lib \ + $(CRT_LIB_PATH)\msvcrt.lib +!endif + +#DO NOT TOUCH the order of search path , until ib_types.h merging process will be done +INCLUDES= \ + $(OSM_HOME)\include; \ + $(OSM_HOME); \ + $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; + +# Could be any special flag needed for this project +USER_C_FLAGS=$(USER_C_FLAGS) /MD +#Add preproccessor definitions +C_DEFINES=$(C_DEFINES) -DWIN32 -D__WIN__ -D__i386__ -Dinline=__inline -DMT_LITTLE_ENDIAN -DOSM_VENDOR_INTF_AL +C_DEFINES=$(C_DEFINES) -I.. -DHAVE_CONFIG_H +!if !$(FREEBUILD) +#C_DEFINES=$(C_DEFINES) -D_DEBUG -DDEBUG -DDBG +C_DEFINES=$(C_DEFINES) +!endif + +LINKER_FLAGS= $(LINKER_FLAGS) +MSC_WARNING_LEVEL= /W3 +#MSC_OPTIMIZATION= /O0 diff --git a/branches/Ndi/ulp/opensm/user/libopensm/osm_helper.c b/branches/Ndi/ulp/opensm/user/libopensm/osm_helper.c new file mode 100644 index 00000000..55c01c9f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libopensm/osm_helper.c @@ -0,0 +1,2534 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of opensm helper functions. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.19 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include + +#define LINE_LENGTH 256 + +/* we use two tables - one for queries and one for responses */ +const char* const __ib_sa_method_str[] = +{ + "RESERVED", /* 0 */ + "SubnAdmGet", /* 1 */ + "SubnAdmSet", /* 2 */ + "RESERVED", /* 3 */ + "RESERVED", /* 4 */ + "RESERVED", /* 5 */ + "SubnAdmReport", /* 6 */ + "RESERVED", /* 7 */ + "RESERVED", /* 8 */ + "RESERVED", /* 9 */ + "RESERVED", /* A */ + "RESERVED", /* B */ + "RESERVED", /* C */ + "RESERVED", /* D */ + "RESERVED", /* E */ + "RESERVED", /* F */ + "RESERVED", /* 10 */ + "RESERVED", /* 11 */ + "SubnAdmGetTable", /* 12 */ + "SubnAdmGetTraceTable", /* 13 */ + "SubnAdmGetMulti", /* 14 */ + "SubnAdmDelete", /* 15 */ + "UNKNOWN" /* 16 */ +}; + +const char* const __ib_sa_resp_method_str[] = +{ + "RESERVED", /* 80 */ + "SubnAdmGetResp", /* 81 */ + "RESERVED (SetResp?)", /* 82 */ + "RESERVED", /* 83 */ + "RESERVED", /* 84 */ + "RESERVED", /* 85 */ + "SubnAdmReportResp", /* 86 */ + "RESERVED", /* 87 */ + "RESERVED", /* 88 */ + "RESERVED", /* 89 */ + "RESERVED", /* 8A */ + "RESERVED", /* 8B */ + "RESERVED", /* 8C */ + "RESERVED", /* 8D */ + "RESERVED", /* 8E */ + "RESERVED", /* 8F */ + "RESERVED", /* 90 */ + "RESERVED", /* 91 */ + "SubnAdmGetTableResp", /* 92 */ + "RESERVED", /* 93 */ + "SubnAdmGetMultiResp", /* 94 */ + "SubnAdmDeleteResp", /* 95 */ + "UNKNOWN" +}; + +#define OSM_SA_METHOD_STR_UNKNOWN_VAL 0x16 + +const char* const __ib_sm_method_str[] = +{ + "RESERVED0", /* 0 */ + "SubnGet", /* 1 */ + "SubnSet", /* 2 */ + "RESERVED3", /* 3 */ + "RESERVED4", /* 4 */ + "SubnTrap", /* 5 */ + "RESERVED6", /* 6 */ + "SubnTrapRepress", /* 7 */ + "RESERVED8", /* 8 */ + "RESERVED9", /* 9 */ + "RESERVEDA", /* A */ + "RESERVEDB", /* B */ + "RESERVEDC", /* C */ + "RESERVEDD", /* D */ + "RESERVEDE", /* E */ + "RESERVEDF", /* F */ + "RESERVED10", /* 10 */ + "SubnGetResp", /* 11 */ + "RESERVED12", /* 12 */ + "RESERVED13", /* 13 */ + "RESERVED14", /* 14 */ + "RESERVED15", /* 15 */ + "RESERVED16", /* 16 */ + "RESERVED17", /* 17 */ + "RESERVED18", /* 18 */ + "RESERVED19", /* 19 */ + "RESERVED1A", /* 1A */ + "RESERVED1B", /* 1B */ + "RESERVED1C", /* 1C */ + "RESERVED1D", /* 1D */ + "RESERVED1E", /* 1E */ + "RESERVED1F", /* 1F */ + "UNKNOWN" /* 20 */ +}; + +#define OSM_SM_METHOD_STR_UNKNOWN_VAL 0x21 + +const char* const __ib_sm_attr_str[] = +{ + "RESERVED", /* 0 */ + "ClassPortInfo", /* 1 */ + "Notice", /* 2 */ + "InformInfo", /* 3 */ + "RESERVED", /* 4 */ + "RESERVED", /* 5 */ + "RESERVED", /* 6 */ + "RESERVED", /* 7 */ + "RESERVED", /* 8 */ + "RESERVED", /* 9 */ + "RESERVED", /* A */ + "RESERVED", /* B */ + "RESERVED", /* C */ + "RESERVED", /* D */ + "RESERVED", /* E */ + "RESERVED", /* F */ + "NodeDescription", /* 10 */ + "NodeInfo", /* 11 */ + "SwitchInfo", /* 12 */ + "UNKNOWN", /* 13 */ + "GUIDInfo", /* 14 */ + "PortInfo", /* 15 */ + "P_KeyTable", /* 16 */ + "SLtoVLMappingTable", /* 17 */ + "VLArbitrationTable", /* 18 */ + "LinearForwardingTable", /* 19 */ + "RandomForwardingTable", /* 1A */ + "MulticastForwardingTable", /* 1B */ + "UNKNOWN", /* 1C */ + "UNKNOWN", /* 1D */ + "UNKNOWN", /* 1E */ + "UNKNOWN", /* 1F */ + "SMInfo", /* 20 */ + "UNKNOWN" /* 21 - always highest value */ +}; + +#define OSM_SM_ATTR_STR_UNKNOWN_VAL 0x21 + +const char* const __ib_sa_attr_str[] = +{ + "RESERVED", /* 0 */ + "ClassPortInfo", /* 1 */ + "Notice", /* 2 */ + "InformInfo", /* 3 */ + "RESERVED", /* 4 */ + "RESERVED", /* 5 */ + "RESERVED", /* 6 */ + "RESERVED", /* 7 */ + "RESERVED", /* 8 */ + "RESERVED", /* 9 */ + "RESERVED", /* A */ + "RESERVED", /* B */ + "RESERVED", /* C */ + "RESERVED", /* D */ + "RESERVED", /* E */ + "RESERVED", /* F */ + "RESERVED", /* 10 */ + "NodeRecord", /* 11 */ + "PortInfoRecord", /* 12 */ + "SLtoVLMappingTableRecord", /* 13 */ + "SwitchInfoRecord", /* 14 */ + "LinearForwardingTableRecord", /* 15 */ + "RandomForwardingTableRecord", /* 16 */ + "MulticastForwardingTableRecord", /* 17 */ + "SMInfoRecord", /* 18 */ + "RESERVED", /* 19 */ + "RandomForwardingTable", /* 1A */ + "MulticastForwardingTable", /* 1B */ + "UNKNOWN", /* 1C */ + "UNKNOWN", /* 1D */ + "UNKNOWN", /* 1E */ + "UNKNOWN", /* 1F */ + "LinkRecord", /* 20 */ + "UNKNOWN", /* 21 */ + "UNKNOWN", /* 22 */ + "UNKNOWN", /* 23 */ + "UNKNOWN", /* 24 */ + "UNKNOWN", /* 25 */ + "UNKNOWN", /* 26 */ + "UNKNOWN", /* 27 */ + "UNKNOWN", /* 28 */ + "UNKNOWN", /* 29 */ + "UNKNOWN", /* 2A */ + "UNKNOWN", /* 2B */ + "UNKNOWN", /* 2C */ + "UNKNOWN", /* 2D */ + "UNKNOWN", /* 2E */ + "UNKNOWN", /* 2F */ + "GuidInfoRecord", /* 30 */ + "ServiceRecord", /* 31 */ + "UNKNOWN", /* 32 */ + "P_KeyTableRecord", /* 33 */ + "UNKNOWN", /* 34 */ + "PathRecord", /* 35 */ + "VLArbitrationTableRecord", /* 36 */ + "UNKNOWN", /* 37 */ + "MCMemberRecord", /* 38 */ + "TraceRecord", /* 39 */ + "MultiPathRecord", /* 3A */ + "ServiceAssociationRecord", /* 3B */ + "UNKNOWN", /* 3C */ + "UNKNOWN", /* 3D */ + "UNKNOWN", /* 3E */ + "UNKNOWN", /* 3F */ + "UNKNOWN", /* 40 */ + "UNKNOWN", /* 41 */ + "UNKNOWN", /* 42 */ + "UNKNOWN", /* 43 */ + "UNKNOWN", /* 44 */ + "UNKNOWN", /* 45 */ + "UNKNOWN", /* 46 */ + "UNKNOWN", /* 47 */ + "UNKNOWN", /* 48 */ + "UNKNOWN", /* 49 */ + "UNKNOWN", /* 4A */ + "UNKNOWN", /* 4B */ + "UNKNOWN", /* 4C */ + "UNKNOWN", /* 4D */ + "UNKNOWN", /* 4E */ + "UNKNOWN", /* 4F */ + "UNKNOWN", /* 50 */ + "UNKNOWN", /* 51 */ + "UNKNOWN", /* 52 */ + "UNKNOWN", /* 53 */ + "UNKNOWN", /* 54 */ + "UNKNOWN", /* 55 */ + "UNKNOWN", /* 56 */ + "UNKNOWN", /* 57 */ + "UNKNOWN", /* 58 */ + "UNKNOWN", /* 59 */ + "UNKNOWN", /* 5A */ + "UNKNOWN", /* 5B */ + "UNKNOWN", /* 5C */ + "UNKNOWN", /* 5D */ + "UNKNOWN", /* 5E */ + "UNKNOWN", /* 5F */ + "UNKNOWN", /* 60 */ + "UNKNOWN", /* 61 */ + "UNKNOWN", /* 62 */ + "UNKNOWN", /* 63 */ + "UNKNOWN", /* 64 */ + "UNKNOWN", /* 65 */ + "UNKNOWN", /* 66 */ + "UNKNOWN", /* 67 */ + "UNKNOWN", /* 68 */ + "UNKNOWN", /* 69 */ + "UNKNOWN", /* 6A */ + "UNKNOWN", /* 6B */ + "UNKNOWN", /* 6C */ + "UNKNOWN", /* 6D */ + "UNKNOWN", /* 6E */ + "UNKNOWN", /* 6F */ + "UNKNOWN", /* 70 */ + "UNKNOWN", /* 71 */ + "UNKNOWN", /* 72 */ + "UNKNOWN", /* 73 */ + "UNKNOWN", /* 74 */ + "UNKNOWN", /* 75 */ + "UNKNOWN", /* 76 */ + "UNKNOWN", /* 77 */ + "UNKNOWN", /* 78 */ + "UNKNOWN", /* 79 */ + "UNKNOWN", /* 7A */ + "UNKNOWN", /* 7B */ + "UNKNOWN", /* 7C */ + "UNKNOWN", /* 7D */ + "UNKNOWN", /* 7E */ + "UNKNOWN", /* 7F */ + "UNKNOWN", /* 80 */ + "UNKNOWN", /* 81 */ + "UNKNOWN", /* 82 */ + "UNKNOWN", /* 83 */ + "UNKNOWN", /* 84 */ + "UNKNOWN", /* 85 */ + "UNKNOWN", /* 86 */ + "UNKNOWN", /* 87 */ + "UNKNOWN", /* 88 */ + "UNKNOWN", /* 89 */ + "UNKNOWN", /* 8A */ + "UNKNOWN", /* 8B */ + "UNKNOWN", /* 8C */ + "UNKNOWN", /* 8D */ + "UNKNOWN", /* 8E */ + "UNKNOWN", /* 8F */ + "UNKNOWN", /* 90 */ + "UNKNOWN", /* 91 */ + "UNKNOWN", /* 92 */ + "UNKNOWN", /* 93 */ + "UNKNOWN", /* 94 */ + "UNKNOWN", /* 95 */ + "UNKNOWN", /* 96 */ + "UNKNOWN", /* 97 */ + "UNKNOWN", /* 98 */ + "UNKNOWN", /* 99 */ + "UNKNOWN", /* 9A */ + "UNKNOWN", /* 9B */ + "UNKNOWN", /* 9C */ + "UNKNOWN", /* 9D */ + "UNKNOWN", /* 9E */ + "UNKNOWN", /* 9F */ + "UNKNOWN", /* A0 */ + "UNKNOWN", /* A1 */ + "UNKNOWN", /* A2 */ + "UNKNOWN", /* A3 */ + "UNKNOWN", /* A4 */ + "UNKNOWN", /* A5 */ + "UNKNOWN", /* A6 */ + "UNKNOWN", /* A7 */ + "UNKNOWN", /* A8 */ + "UNKNOWN", /* A9 */ + "UNKNOWN", /* AA */ + "UNKNOWN", /* AB */ + "UNKNOWN", /* AC */ + "UNKNOWN", /* AD */ + "UNKNOWN", /* AE */ + "UNKNOWN", /* AF */ + "UNKNOWN", /* B0 */ + "UNKNOWN", /* B1 */ + "UNKNOWN", /* B2 */ + "UNKNOWN", /* B3 */ + "UNKNOWN", /* B4 */ + "UNKNOWN", /* B5 */ + "UNKNOWN", /* B6 */ + "UNKNOWN", /* B7 */ + "UNKNOWN", /* B8 */ + "UNKNOWN", /* B9 */ + "UNKNOWN", /* BA */ + "UNKNOWN", /* BB */ + "UNKNOWN", /* BC */ + "UNKNOWN", /* BD */ + "UNKNOWN", /* BE */ + "UNKNOWN", /* BF */ + "UNKNOWN", /* C0 */ + "UNKNOWN", /* C1 */ + "UNKNOWN", /* C2 */ + "UNKNOWN", /* C3 */ + "UNKNOWN", /* C4 */ + "UNKNOWN", /* C5 */ + "UNKNOWN", /* C6 */ + "UNKNOWN", /* C7 */ + "UNKNOWN", /* C8 */ + "UNKNOWN", /* C9 */ + "UNKNOWN", /* CA */ + "UNKNOWN", /* CB */ + "UNKNOWN", /* CC */ + "UNKNOWN", /* CD */ + "UNKNOWN", /* CE */ + "UNKNOWN", /* CF */ + "UNKNOWN", /* D0 */ + "UNKNOWN", /* D1 */ + "UNKNOWN", /* D2 */ + "UNKNOWN", /* D3 */ + "UNKNOWN", /* D4 */ + "UNKNOWN", /* D5 */ + "UNKNOWN", /* D6 */ + "UNKNOWN", /* D7 */ + "UNKNOWN", /* D8 */ + "UNKNOWN", /* D9 */ + "UNKNOWN", /* DA */ + "UNKNOWN", /* DB */ + "UNKNOWN", /* DC */ + "UNKNOWN", /* DD */ + "UNKNOWN", /* DE */ + "UNKNOWN", /* DF */ + "UNKNOWN", /* E0 */ + "UNKNOWN", /* E1 */ + "UNKNOWN", /* E2 */ + "UNKNOWN", /* E3 */ + "UNKNOWN", /* E4 */ + "UNKNOWN", /* E5 */ + "UNKNOWN", /* E6 */ + "UNKNOWN", /* E7 */ + "UNKNOWN", /* E8 */ + "UNKNOWN", /* E9 */ + "UNKNOWN", /* EA */ + "UNKNOWN", /* EB */ + "UNKNOWN", /* EC */ + "UNKNOWN", /* ED */ + "UNKNOWN", /* EE */ + "UNKNOWN", /* EF */ + "UNKNOWN", /* F0 */ + "UNKNOWN", /* F1 */ + "UNKNOWN", /* F2 */ + "InformInfoRecord", /* F3 */ + "UNKNOWN" /* F4 - always highest value */ +}; + +#define OSM_SA_ATTR_STR_UNKNOWN_VAL 0xF4 + + +/********************************************************************** + **********************************************************************/ +const char* +ib_get_sa_method_str( + IN uint8_t method ) +{ + if (method & 0x80) + { + method = method & 0x7f; + if( method >= OSM_SA_METHOD_STR_UNKNOWN_VAL ) + method = OSM_SA_METHOD_STR_UNKNOWN_VAL; + /* it is a response - use the response table */ + return( __ib_sa_resp_method_str[method] ); + } + else + { + if( method >= OSM_SA_METHOD_STR_UNKNOWN_VAL ) + method = OSM_SA_METHOD_STR_UNKNOWN_VAL; + return( __ib_sa_method_str[method] ); + } +} + +/********************************************************************** + **********************************************************************/ +const char* +ib_get_sm_method_str( + IN uint8_t method ) +{ + if (method & 0x80) method = (method & 0x0F) | 0x10; + if( method >= OSM_SM_METHOD_STR_UNKNOWN_VAL ) + method = OSM_SM_METHOD_STR_UNKNOWN_VAL; + return( __ib_sm_method_str[method] ); +} + +/********************************************************************** + **********************************************************************/ +const char* +ib_get_sm_attr_str( + IN ib_net16_t attr ) +{ + uint16_t host_attr; + host_attr = cl_ntoh16( attr ); + + if( host_attr >= OSM_SM_ATTR_STR_UNKNOWN_VAL ) + host_attr = OSM_SM_ATTR_STR_UNKNOWN_VAL; + + return( __ib_sm_attr_str[host_attr] ); +} + +/********************************************************************** + **********************************************************************/ +const char* +ib_get_sa_attr_str( + IN ib_net16_t attr ) +{ + uint16_t host_attr; + host_attr = cl_ntoh16( attr ); + + if( host_attr >= OSM_SA_ATTR_STR_UNKNOWN_VAL ) + host_attr = OSM_SA_ATTR_STR_UNKNOWN_VAL; + + return( __ib_sa_attr_str[host_attr] ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_dbg_do_line( + IN char** pp_local, + IN const uint32_t buf_size, + IN const char* const p_prefix_str, + IN const char* const p_new_str, + IN uint32_t* const p_total_len ) +{ + char line[LINE_LENGTH]; + uint32_t len; + + sprintf( line, "%s%s", p_prefix_str, p_new_str ); + len = (uint32_t) strlen( line ); + *p_total_len += len; + if( *p_total_len + sizeof('\0') > buf_size ) + return( IB_INSUFFICIENT_MEMORY ); + + strcpy( *pp_local, line ); + *pp_local += len; + return( IB_SUCCESS ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_dbg_get_capabilities_str( + IN char* p_buf, + IN const uint32_t buf_size, + IN const char* const p_prefix_str, + IN const ib_port_info_t* const p_pi ) +{ + uint32_t total_len = 0; + char *p_local = p_buf; + + strcpy( p_local, "Capability Mask:\n" ); + p_local += strlen( p_local ); + + if( p_pi->capability_mask & IB_PORT_CAP_RESV0 ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV0\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_IS_SM ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_IS_SM\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_NOTICE ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_NOTICE\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_TRAP ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_TRAP\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_IPD ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_IPD\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_AUTO_MIG ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_AUTO_MIG\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_SL_MAP ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_SL_MAP\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_NV_MKEY ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_NV_MKEY\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_NV_PKEY ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_NV_PKEY\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_LED_INFO ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_LED_INFO\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_SM_DISAB ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_SM_DISAB\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_SYS_IMG_GUID ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_SYS_IMG_GUID\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_PKEY_SW_EXT_PORT_TRAP ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_PKEY_SW_EXT_PORT_TRAP\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_RESV13 ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV13\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_RESV14 ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV14\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_RESV15 ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV15\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_COM_MGT ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_COM_MGT\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_SNMP ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_SNMP\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_REINIT ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_REINIT\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_DEV_MGT ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_DEV_MGT\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_VEND_CLS ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_VEND_CLS\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_DR_NTC ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_DR_NTC\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_CAP_NTC ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_CAP_NTC\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_BM ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_BM\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_LINK_RT_LATENCY ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_LINK_RT_LATENCY\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_HAS_CLIENT_REREG ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_HAS_CLIENT_REREG\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_RESV26 ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV26\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_RESV27 ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV27\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_RESV28) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV28\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_RESV29 ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV29\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_RESV30 ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV30\n", &total_len ) != IB_SUCCESS ) + return; + } + if( p_pi->capability_mask & IB_PORT_CAP_RESV31 ) + { + if( osm_dbg_do_line( &p_local, buf_size, p_prefix_str, + "IB_PORT_CAP_RESV31\n", &total_len ) != IB_SUCCESS ) + return; + } + + return; +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_port_info( + IN osm_log_t* const p_log, + IN const ib_net64_t node_guid, + IN const ib_net64_t port_guid, + IN const uint8_t port_num, + IN const ib_port_info_t* const p_pi, + IN const osm_log_level_t log_level ) +{ + char buf[BUF_SIZE]; + + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, log_level, + "PortInfo dump:\n" + "\t\t\t\tport number.............0x%X\n" + "\t\t\t\tnode_guid...............0x%016" PRIx64 "\n" + "\t\t\t\tport_guid...............0x%016" PRIx64 "\n" + "\t\t\t\tm_key...................0x%016" PRIx64 "\n" + "\t\t\t\tsubnet_prefix...........0x%016" PRIx64 "\n" + "\t\t\t\tbase_lid................0x%X\n" + "\t\t\t\tmaster_sm_base_lid......0x%X\n" + "\t\t\t\tcapability_mask.........0x%X\n" + "\t\t\t\tdiag_code...............0x%X\n" + "\t\t\t\tm_key_lease_period......0x%X\n" + "\t\t\t\tlocal_port_num..........0x%X\n" + "\t\t\t\tlink_width_enabled......0x%X\n" + "\t\t\t\tlink_width_supported....0x%X\n" + "\t\t\t\tlink_width_active.......0x%X\n" + "\t\t\t\tlink_speed_supported....0x%X\n" + "\t\t\t\tport_state..............%s\n" + "\t\t\t\tstate_info2.............0x%X\n" + "\t\t\t\tm_key_protect_bits......0x%X\n" + "\t\t\t\tlmc.....................0x%X\n" + "\t\t\t\tlink_speed..............0x%X\n" + "\t\t\t\tmtu_smsl................0x%X\n" + "\t\t\t\tvl_cap_init_type........0x%X\n" + "\t\t\t\tvl_high_limit...........0x%X\n" + "\t\t\t\tvl_arb_high_cap.........0x%X\n" + "\t\t\t\tvl_arb_low_cap..........0x%X\n" + "\t\t\t\tinit_rep_mtu_cap........0x%X\n" + "\t\t\t\tvl_stall_life...........0x%X\n" + "\t\t\t\tvl_enforce..............0x%X\n" + "\t\t\t\tm_key_violations........0x%X\n" + "\t\t\t\tp_key_violations........0x%X\n" + "\t\t\t\tq_key_violations........0x%X\n" + "\t\t\t\tguid_cap................0x%X\n" + "\t\t\t\tclient_reregister.......0x%X\n" + "\t\t\t\tsubnet_timeout..........0x%X\n" + "\t\t\t\tresp_time_value.........0x%X\n" + "\t\t\t\terror_threshold.........0x%X\n" + "", + port_num, + cl_ntoh64( node_guid ), + cl_ntoh64( port_guid ), + cl_ntoh64( p_pi->m_key ), + cl_ntoh64( p_pi->subnet_prefix ), + cl_ntoh16( p_pi->base_lid ), + cl_ntoh16( p_pi->master_sm_base_lid ), + cl_ntoh32( p_pi->capability_mask ), + cl_ntoh16( p_pi->diag_code ), + cl_ntoh16( p_pi->m_key_lease_period ), + p_pi->local_port_num, + p_pi->link_width_enabled, + p_pi->link_width_supported, + p_pi->link_width_active, + ib_port_info_get_link_speed_sup( p_pi ), + ib_get_port_state_str( ib_port_info_get_port_state( p_pi ) ), + p_pi->state_info2, + ib_port_info_get_mpb( p_pi ), + ib_port_info_get_lmc( p_pi ), + p_pi->link_speed, + p_pi->mtu_smsl, + p_pi->vl_cap, + p_pi->vl_high_limit, + p_pi->vl_arb_high_cap, + p_pi->vl_arb_low_cap, + p_pi->mtu_cap, + p_pi->vl_stall_life, + p_pi->vl_enforce, + cl_ntoh16( p_pi->m_key_violations ), + cl_ntoh16( p_pi->p_key_violations ), + cl_ntoh16( p_pi->q_key_violations ), + p_pi->guid_cap, + ib_port_info_get_client_rereg( p_pi ), + ib_port_info_get_timeout( p_pi ), + p_pi->resp_time_value, + p_pi->error_threshold + ); + + /* show the capabilities mask */ + osm_dbg_get_capabilities_str( buf, BUF_SIZE, "\t\t\t\t", p_pi ); + + osm_log( p_log, log_level, "%s", buf ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_portinfo_record( + IN osm_log_t* const p_log, + IN const ib_portinfo_record_t* const p_pir, + IN const osm_log_level_t log_level ) +{ + char buf[BUF_SIZE]; + const ib_port_info_t * const p_pi = &p_pir->port_info; + + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, log_level, + "PortInfo Record dump:\n" + "\t\t\t\tRID\n" + "\t\t\t\tEndPortLid..............0x%X\n" + "\t\t\t\tPortNum.................0x%X\n" + "\t\t\t\tReserved................0x%X\n" + "\t\t\t\tPortInfo dump:\n" + "\t\t\t\tm_key...................0x%016" PRIx64 "\n" + "\t\t\t\tsubnet_prefix...........0x%016" PRIx64 "\n" + "\t\t\t\tbase_lid................0x%X\n" + "\t\t\t\tmaster_sm_base_lid......0x%X\n" + "\t\t\t\tcapability_mask.........0x%X\n" + "\t\t\t\tdiag_code...............0x%X\n" + "\t\t\t\tm_key_lease_period......0x%X\n" + "\t\t\t\tlocal_port_num..........0x%X\n" + "\t\t\t\tlink_width_enabled......0x%X\n" + "\t\t\t\tlink_width_supported....0x%X\n" + "\t\t\t\tlink_width_active.......0x%X\n" + "\t\t\t\tlink_speed_supported....0x%X\n" + "\t\t\t\tport_state..............%s\n" + "\t\t\t\tstate_info2.............0x%X\n" + "\t\t\t\tm_key_protect_bits......0x%X\n" + "\t\t\t\tlmc.....................0x%X\n" + "\t\t\t\tlink_speed..............0x%X\n" + "\t\t\t\tmtu_smsl................0x%X\n" + "\t\t\t\tvl_cap_init_type........0x%X\n" + "\t\t\t\tvl_high_limit...........0x%X\n" + "\t\t\t\tvl_arb_high_cap.........0x%X\n" + "\t\t\t\tvl_arb_low_cap..........0x%X\n" + "\t\t\t\tinit_rep_mtu_cap........0x%X\n" + "\t\t\t\tvl_stall_life...........0x%X\n" + "\t\t\t\tvl_enforce..............0x%X\n" + "\t\t\t\tm_key_violations........0x%X\n" + "\t\t\t\tp_key_violations........0x%X\n" + "\t\t\t\tq_key_violations........0x%X\n" + "\t\t\t\tguid_cap................0x%X\n" + "\t\t\t\tsubnet_timeout..........0x%X\n" + "\t\t\t\tresp_time_value.........0x%X\n" + "\t\t\t\terror_threshold.........0x%X\n" + "", + cl_ntoh16(p_pir->lid), + p_pir->port_num, + p_pir->resv, + cl_ntoh64( p_pi->m_key ), + cl_ntoh64( p_pi->subnet_prefix ), + cl_ntoh16( p_pi->base_lid ), + cl_ntoh16( p_pi->master_sm_base_lid ), + cl_ntoh32( p_pi->capability_mask ), + cl_ntoh16( p_pi->diag_code ), + cl_ntoh16( p_pi->m_key_lease_period ), + p_pi->local_port_num, + p_pi->link_width_enabled, + p_pi->link_width_supported, + p_pi->link_width_active, + ib_port_info_get_link_speed_sup( p_pi ), + ib_get_port_state_str( ib_port_info_get_port_state( p_pi ) ), + p_pi->state_info2, + ib_port_info_get_mpb( p_pi ), + ib_port_info_get_lmc( p_pi ), + p_pi->link_speed, + p_pi->mtu_smsl, + p_pi->vl_cap, + p_pi->vl_high_limit, + p_pi->vl_arb_high_cap, + p_pi->vl_arb_low_cap, + p_pi->mtu_cap, + p_pi->vl_stall_life, + p_pi->vl_enforce, + cl_ntoh16( p_pi->m_key_violations ), + cl_ntoh16( p_pi->p_key_violations ), + cl_ntoh16( p_pi->q_key_violations ), + p_pi->guid_cap, + ib_port_info_get_timeout( p_pi ), + p_pi->resp_time_value, + p_pi->error_threshold + ); + + /* show the capabilities mask */ + osm_dbg_get_capabilities_str( buf, BUF_SIZE, "\t\t\t\t", p_pi ); + + osm_log( p_log, log_level, "%s", buf ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_guidinfo_record( + IN osm_log_t* const p_log, + IN const ib_guidinfo_record_t* const p_gir, + IN const osm_log_level_t log_level ) +{ + const ib_guid_info_t * const p_gi = &p_gir->guid_info; + + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, log_level, + "GUIDInfo Record dump:\n" + "\t\t\t\tRID\n" + "\t\t\t\tLid.....................0x%X\n" + "\t\t\t\tBlockNum................0x%X\n" + "\t\t\t\tReserved................0x%X\n" + "\t\t\t\tGUIDInfo dump:\n" + "\t\t\t\tReserved................0x%X\n" + "\t\t\t\tGUID 0..................0x%016" PRIx64 "\n" + "\t\t\t\tGUID 1..................0x%016" PRIx64 "\n" + "\t\t\t\tGUID 2..................0x%016" PRIx64 "\n" + "\t\t\t\tGUID 3..................0x%016" PRIx64 "\n" + "\t\t\t\tGUID 4..................0x%016" PRIx64 "\n" + "\t\t\t\tGUID 5..................0x%016" PRIx64 "\n" + "\t\t\t\tGUID 6..................0x%016" PRIx64 "\n" + "\t\t\t\tGUID 7..................0x%016" PRIx64 "\n", + cl_ntoh16(p_gir->lid), + p_gir->block_num, + p_gir->resv, + cl_ntoh32(p_gir->reserved), + cl_ntoh64(p_gi->guid[0]), + cl_ntoh64(p_gi->guid[1]), + cl_ntoh64(p_gi->guid[2]), + cl_ntoh64(p_gi->guid[3]), + cl_ntoh64(p_gi->guid[4]), + cl_ntoh64(p_gi->guid[5]), + cl_ntoh64(p_gi->guid[6]), + cl_ntoh64(p_gi->guid[7]) + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_node_info( + IN osm_log_t* const p_log, + IN const ib_node_info_t* const p_ni, + IN const osm_log_level_t log_level ) +{ + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, log_level, + "NodeInfo dump:\n" + "\t\t\t\tbase_version............0x%X\n" + "\t\t\t\tclass_version...........0x%X\n" + "\t\t\t\tnode_type...............%s\n" + "\t\t\t\tnum_ports...............0x%X\n" + "\t\t\t\tsys_guid................0x%016" PRIx64 "\n" + "\t\t\t\tnode_guid...............0x%016" PRIx64 "\n" + "\t\t\t\tport_guid...............0x%016" PRIx64 "\n" + "\t\t\t\tpartition_cap...........0x%X\n" + "\t\t\t\tdevice_id...............0x%X\n" + "\t\t\t\trevision................0x%X\n" + "\t\t\t\tport_num................0x%X\n" + "\t\t\t\tvendor_id...............0x%X\n" + "", + p_ni->base_version, + p_ni->class_version, + ib_get_node_type_str( p_ni->node_type ), + p_ni->num_ports, + cl_ntoh64( p_ni->sys_guid ), + cl_ntoh64( p_ni->node_guid ), + cl_ntoh64( p_ni->port_guid ), + cl_ntoh16( p_ni->partition_cap ), + cl_ntoh16( p_ni->device_id ), + cl_ntoh32( p_ni->revision ), + ib_node_info_get_local_port_num( p_ni ), + cl_ntoh32( ib_node_info_get_vendor_id( p_ni ) ) + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_node_record( + IN osm_log_t* const p_log, + IN const ib_node_record_t* const p_nr, + IN const osm_log_level_t log_level ) +{ + const ib_node_info_t * const p_ni = &p_nr->node_info; + + if( osm_log_is_active( p_log, log_level ) ) + { + char desc[sizeof(p_nr->node_desc.description) + 1]; + + memcpy(desc, p_nr->node_desc.description, + sizeof(p_nr->node_desc.description)); + desc[sizeof(desc) - 1] = '\0'; + osm_log( p_log, log_level, + "Node Record dump:\n" + "\t\t\t\tRID\n" + "\t\t\t\tLid.....................0x%X\n" + "\t\t\t\tReserved................0x%X\n" + "\t\t\t\tNodeInfo dump:\n" + "\t\t\t\tbase_version............0x%X\n" + "\t\t\t\tclass_version...........0x%X\n" + "\t\t\t\tnode_type...............%s\n" + "\t\t\t\tnum_ports...............0x%X\n" + "\t\t\t\tsys_guid................0x%016" PRIx64 "\n" + "\t\t\t\tnode_guid...............0x%016" PRIx64 "\n" + "\t\t\t\tport_guid...............0x%016" PRIx64 "\n" + "\t\t\t\tpartition_cap...........0x%X\n" + "\t\t\t\tdevice_id...............0x%X\n" + "\t\t\t\trevision................0x%X\n" + "\t\t\t\tport_num................0x%X\n" + "\t\t\t\tvendor_id...............0x%X\n" + "\t\t\t\tNodeDescription\n" + "\t\t\t\t%s\n" + "", + cl_ntoh16(p_nr->lid), + cl_ntoh16(p_nr->resv), + p_ni->base_version, + p_ni->class_version, + ib_get_node_type_str( p_ni->node_type ), + p_ni->num_ports, + cl_ntoh64( p_ni->sys_guid ), + cl_ntoh64( p_ni->node_guid ), + cl_ntoh64( p_ni->port_guid ), + cl_ntoh16( p_ni->partition_cap ), + cl_ntoh16( p_ni->device_id ), + cl_ntoh32( p_ni->revision ), + ib_node_info_get_local_port_num( p_ni ), + cl_ntoh32( ib_node_info_get_vendor_id( p_ni )), + desc + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_path_record( + IN osm_log_t* const p_log, + IN const ib_path_rec_t* const p_pr, + IN const osm_log_level_t log_level ) +{ + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, log_level, + "PathRecord dump:\n" + "\t\t\t\tresv0...................0x%016" PRIx64 "\n" + "\t\t\t\tdgid....................0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n" + "\t\t\t\tsgid....................0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n" + "\t\t\t\tdlid....................0x%X\n" + "\t\t\t\tslid....................0x%X\n" + "\t\t\t\thop_flow_raw............0x%X\n" + "\t\t\t\ttclass..................0x%X\n" + "\t\t\t\tnum_path_revers.........0x%X\n" + "\t\t\t\tpkey....................0x%X\n" + "\t\t\t\tsl......................0x%X\n" + "\t\t\t\tmtu.....................0x%X\n" + "\t\t\t\trate....................0x%X\n" + "\t\t\t\tpkt_life................0x%X\n" + "\t\t\t\tpreference..............0x%X\n" + "\t\t\t\tresv2...................0x%X\n" + "\t\t\t\tresv3...................0x%X\n" + "", + *(uint64_t*)p_pr->resv0, + cl_ntoh64( p_pr->dgid.unicast.prefix ), + cl_ntoh64( p_pr->dgid.unicast.interface_id ), + cl_ntoh64( p_pr->sgid.unicast.prefix ), + cl_ntoh64( p_pr->sgid.unicast.interface_id ), + cl_ntoh16( p_pr->dlid ), + cl_ntoh16( p_pr->slid ), + cl_ntoh32( p_pr->hop_flow_raw ), + p_pr->tclass, + p_pr->num_path, + cl_ntoh16( p_pr->pkey ), + cl_ntoh16( p_pr->sl ), + p_pr->mtu, + p_pr->rate, + p_pr->pkt_life, + p_pr->preference, + *(uint32_t*)&p_pr->resv2, + *((uint16_t*)&p_pr->resv2 + 2) + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_multipath_record( + IN osm_log_t* const p_log, + IN const ib_multipath_rec_t* const p_mpr, + IN const osm_log_level_t log_level ) +{ + int i; + char buf_line[1024]; + ib_gid_t const *p_gid; + + if( osm_log_is_active( p_log, log_level ) ) + { + memset(buf_line, 0, sizeof(buf_line)); + p_gid = p_mpr->gids; + if ( p_mpr->sgid_count ) + { + for (i = 0; i < p_mpr->sgid_count; i++) + { + sprintf( buf_line, "%s\t\t\t\tsgid%02d.................." + "0x%016" PRIx64 " : 0x%016" PRIx64 "\n", + buf_line, i + 1, cl_ntoh64( p_gid->unicast.prefix ), + cl_ntoh64( p_gid->unicast.interface_id ) ); + p_gid++; + } + } + if ( p_mpr->dgid_count ) + { + for (i = 0; i < p_mpr->dgid_count; i++) + { + sprintf( buf_line, "%s\t\t\t\tdgid%02d.................." + "0x%016" PRIx64 " : 0x%016" PRIx64 "\n", + buf_line, i + 1, cl_ntoh64( p_gid->unicast.prefix ), + cl_ntoh64( p_gid->unicast.interface_id ) ); + p_gid++; + } + } + osm_log( p_log, log_level, + "MultiPath Record dump:\n" + "\t\t\t\thop_flow_raw............0x%X\n" + "\t\t\t\ttclass..................0x%X\n" + "\t\t\t\tnum_path_revers.........0x%X\n" + "\t\t\t\tpkey....................0x%X\n" + "\t\t\t\tresv0...................0x%X\n" + "\t\t\t\tsl......................0x%X\n" + "\t\t\t\tmtu.....................0x%X\n" + "\t\t\t\trate....................0x%X\n" + "\t\t\t\tpkt_life................0x%X\n" + "\t\t\t\tresv1...................0x%X\n" + "\t\t\t\tindependence............0x%X\n" + "\t\t\t\tsgid_count..............0x%X\n" + "\t\t\t\tdgid_count..............0x%X\n" + "%s\n" + "", + cl_ntoh32( p_mpr->hop_flow_raw ), + p_mpr->tclass, + p_mpr->num_path, + cl_ntoh16( p_mpr->pkey ), + p_mpr->resv0, + cl_ntoh16( p_mpr->sl ), + p_mpr->mtu, + p_mpr->rate, + p_mpr->pkt_life, + p_mpr->resv1, + p_mpr->independence, + p_mpr->sgid_count, + p_mpr->dgid_count, + buf_line + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_mc_record( + IN osm_log_t* const p_log, + IN const ib_member_rec_t* const p_mcmr, + IN const osm_log_level_t log_level ) +{ + + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, log_level, + "MCMember Record dump:\n" + "\t\t\t\tMGID....................0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n" + "\t\t\t\tPortGid.................0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n" + "\t\t\t\tqkey....................0x%X\n" + "\t\t\t\tmlid....................0x%X\n" + "\t\t\t\tmtu.....................0x%X\n" + "\t\t\t\tTClass..................0x%X\n" + "\t\t\t\tpkey....................0x%X\n" + "\t\t\t\trate....................0x%X\n" + "\t\t\t\tpkt_life................0x%X\n" + "\t\t\t\tSLFlowLabelHopLimit.....0x%X\n" + "\t\t\t\tScopeState..............0x%X\n" + "\t\t\t\tProxyJoin...............0x%X\n" + "", + cl_ntoh64( p_mcmr->mgid.unicast.prefix ), + cl_ntoh64( p_mcmr->mgid.unicast.interface_id ), + cl_ntoh64( p_mcmr->port_gid.unicast.prefix ), + cl_ntoh64( p_mcmr->port_gid.unicast.interface_id ), + cl_ntoh32( p_mcmr->qkey ), + cl_ntoh16( p_mcmr->mlid ), + p_mcmr->mtu, + p_mcmr->tclass, + cl_ntoh16( p_mcmr->pkey ), + p_mcmr->rate, + p_mcmr->pkt_life, + cl_ntoh32( p_mcmr->sl_flow_hop ), + p_mcmr->scope_state, + p_mcmr->proxy_join + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_service_record( + IN osm_log_t* const p_log, + IN const ib_service_record_t* const p_sr, + IN const osm_log_level_t log_level ) +{ + char buf_service_key[35]; + char buf_service_name[65]; + + if( osm_log_is_active( p_log, log_level ) ) + { + sprintf(buf_service_key, + "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", + p_sr->service_key[0], + p_sr->service_key[1], + p_sr->service_key[2], + p_sr->service_key[3], + p_sr->service_key[4], + p_sr->service_key[5], + p_sr->service_key[6], + p_sr->service_key[7], + p_sr->service_key[8], + p_sr->service_key[9], + p_sr->service_key[10], + p_sr->service_key[11], + p_sr->service_key[12], + p_sr->service_key[13], + p_sr->service_key[14], + p_sr->service_key[15]); + strncpy(buf_service_name, (char *)p_sr->service_name, 64); + buf_service_name[64] = '\0'; + + osm_log( p_log, log_level, + "Service Record dump:\n" + "\t\t\t\tServiceID...............0x%016" PRIx64 "\n" + "\t\t\t\tServiceGID..............0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n" + "\t\t\t\tServiceP_Key............0x%X\n" + "\t\t\t\tServiceLease............0x%X\n" + "\t\t\t\tServiceKey..............%s\n" + "\t\t\t\tServiceName.............%s\n" + "\t\t\t\tServiceData8.1..........0x%X\n" + "\t\t\t\tServiceData8.2..........0x%X\n" + "\t\t\t\tServiceData8.3..........0x%X\n" + "\t\t\t\tServiceData8.4..........0x%X\n" + "\t\t\t\tServiceData8.5..........0x%X\n" + "\t\t\t\tServiceData8.6..........0x%X\n" + "\t\t\t\tServiceData8.7..........0x%X\n" + "\t\t\t\tServiceData8.8..........0x%X\n" + "\t\t\t\tServiceData8.9..........0x%X\n" + "\t\t\t\tServiceData8.10.........0x%X\n" + "\t\t\t\tServiceData8.11.........0x%X\n" + "\t\t\t\tServiceData8.12.........0x%X\n" + "\t\t\t\tServiceData8.13.........0x%X\n" + "\t\t\t\tServiceData8.14.........0x%X\n" + "\t\t\t\tServiceData8.15.........0x%X\n" + "\t\t\t\tServiceData8.16.........0x%X\n" + "\t\t\t\tServiceData16.1.........0x%X\n" + "\t\t\t\tServiceData16.2.........0x%X\n" + "\t\t\t\tServiceData16.3.........0x%X\n" + "\t\t\t\tServiceData16.4.........0x%X\n" + "\t\t\t\tServiceData16.5.........0x%X\n" + "\t\t\t\tServiceData16.6.........0x%X\n" + "\t\t\t\tServiceData16.7.........0x%X\n" + "\t\t\t\tServiceData16.8.........0x%X\n" + "\t\t\t\tServiceData32.1.........0x%X\n" + "\t\t\t\tServiceData32.2.........0x%X\n" + "\t\t\t\tServiceData32.3.........0x%X\n" + "\t\t\t\tServiceData32.4.........0x%X\n" + "\t\t\t\tServiceData64.1.........0x%016" PRIx64 "\n" + "\t\t\t\tServiceData64.2.........0x%016" PRIx64 "\n" + "", + cl_ntoh64( p_sr->service_id ), + cl_ntoh64( p_sr->service_gid.unicast.prefix ), + cl_ntoh64( p_sr->service_gid.unicast.interface_id ), + cl_ntoh16( p_sr->service_pkey ), + cl_ntoh32( p_sr->service_lease ), + buf_service_key, + buf_service_name, + p_sr->service_data8[0], p_sr->service_data8[1], + p_sr->service_data8[2], p_sr->service_data8[3], + p_sr->service_data8[4], p_sr->service_data8[5], + p_sr->service_data8[6], p_sr->service_data8[7], + p_sr->service_data8[8], p_sr->service_data8[9], + p_sr->service_data8[10], p_sr->service_data8[11], + p_sr->service_data8[12], p_sr->service_data8[13], + p_sr->service_data8[14], p_sr->service_data8[15], + cl_ntoh16(p_sr->service_data16[0]), + cl_ntoh16(p_sr->service_data16[1]), + cl_ntoh16(p_sr->service_data16[2]), + cl_ntoh16(p_sr->service_data16[3]), + cl_ntoh16(p_sr->service_data16[4]), + cl_ntoh16(p_sr->service_data16[5]), + cl_ntoh16(p_sr->service_data16[6]), + cl_ntoh16(p_sr->service_data16[7]), + cl_ntoh32(p_sr->service_data32[0]), + cl_ntoh32(p_sr->service_data32[1]), + cl_ntoh32(p_sr->service_data32[2]), + cl_ntoh32(p_sr->service_data32[3]), + cl_ntoh64(p_sr->service_data64[0]), + cl_ntoh64(p_sr->service_data64[1]) + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_inform_info( + IN osm_log_t* const p_log, + IN const ib_inform_info_t* const p_ii, + IN const osm_log_level_t log_level ) +{ + uint32_t qpn; + uint8_t resp_time_val; + + if( osm_log_is_active( p_log, log_level ) ) + { + + ib_inform_info_get_qpn_resp_time(p_ii->g_or_v.generic.qpn_resp_time_val, + &qpn, &resp_time_val); + + if (p_ii->is_generic) + { + osm_log( p_log, log_level, + "InformInfo dump:\n" + "\t\t\t\tgid.....................0x%016" PRIx64 " : 0x%016" PRIx64 "\n" + "\t\t\t\tlid_range_begin.........0x%X\n" + "\t\t\t\tlid_range_end...........0x%X\n" + "\t\t\t\tis_generic..............0x%X\n" + "\t\t\t\tsubscribe...............0x%X\n" + "\t\t\t\ttrap_type...............0x%X\n" + "\t\t\t\ttrap_num................%u\n" + "\t\t\t\tqpn.....................0x%06X\n" + "\t\t\t\tresp_time_val...........0x%X\n" + "\t\t\t\tnode_type...............0x%06X\n" + "", + cl_ntoh64( p_ii->gid.unicast.prefix ), + cl_ntoh64( p_ii->gid.unicast.interface_id ), + cl_ntoh16( p_ii->lid_range_begin ), + cl_ntoh16( p_ii->lid_range_end ), + p_ii->is_generic, + p_ii->subscribe, + cl_ntoh16( p_ii->trap_type ), + cl_ntoh16( p_ii->g_or_v.generic.trap_num ), + cl_ntoh32(qpn), + resp_time_val, + cl_ntoh32(ib_inform_info_get_node_type( p_ii )) + ); + } + else + { + osm_log( p_log, log_level, + "InformInfo dump:\n" + "\t\t\t\tgid.....................0x%016" PRIx64 " : 0x%016" PRIx64 "\n" + "\t\t\t\tlid_range_begin.........0x%X\n" + "\t\t\t\tlid_range_end...........0x%X\n" + "\t\t\t\tis_generic..............0x%X\n" + "\t\t\t\tsubscribe...............0x%X\n" + "\t\t\t\ttrap_type...............0x%X\n" + "\t\t\t\tdev_id..................0x%X\n" + "\t\t\t\tqpn.....................0x%06X\n" + "\t\t\t\tresp_time_val...........0x%X\n" + "\t\t\t\tvendor_id...............0x%06X\n" + "", + cl_ntoh64( p_ii->gid.unicast.prefix ), + cl_ntoh64( p_ii->gid.unicast.interface_id ), + cl_ntoh16( p_ii->lid_range_begin ), + cl_ntoh16( p_ii->lid_range_end ), + p_ii->is_generic, + p_ii->subscribe, + cl_ntoh16( p_ii->trap_type ), + cl_ntoh16( p_ii->g_or_v.vend.dev_id ), + cl_ntoh32(qpn), + resp_time_val, + cl_ntoh32(ib_inform_info_get_node_type( p_ii )) + ); + } + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_inform_info_record( + IN osm_log_t* const p_log, + IN const ib_inform_info_record_t* const p_iir, + IN const osm_log_level_t log_level ) +{ + uint32_t qpn; + uint8_t resp_time_val; + + ib_inform_info_get_qpn_resp_time(p_iir->inform_info.g_or_v.generic.qpn_resp_time_val, + &qpn, &resp_time_val); + + if( osm_log_is_active( p_log, log_level ) ) + { + + ib_inform_info_get_qpn_resp_time(p_iir->inform_info.g_or_v.generic.qpn_resp_time_val, + &qpn, &resp_time_val); + + if (p_iir->inform_info.is_generic) + { + osm_log( p_log, log_level, + "InformInfo Record dump:\n" + "\t\t\t\tRID\n" + "\t\t\t\tSubscriberGID...........0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n" + "\t\t\t\tSubscriberEnum..........0x%X\n" + "\t\t\t\tInformInfo dump:\n" + "\t\t\t\tgid.....................0x%016" PRIx64 " : 0x%016" PRIx64 "\n" + "\t\t\t\tlid_range_begin.........0x%X\n" + "\t\t\t\tlid_range_end...........0x%X\n" + "\t\t\t\tis_generic..............0x%X\n" + "\t\t\t\tsubscribe...............0x%X\n" + "\t\t\t\ttrap_type...............0x%X\n" + "\t\t\t\ttrap_num................%u\n" + "\t\t\t\tqpn.....................0x%06X\n" + "\t\t\t\tresp_time_val...........0x%X\n" + "\t\t\t\tnode_type...............0x%06X\n" + "", + cl_ntoh64( p_iir->subscriber_gid.unicast.prefix ), + cl_ntoh64( p_iir->subscriber_gid.unicast.interface_id ), + cl_ntoh16( p_iir->subscriber_enum ), + cl_ntoh64( p_iir->inform_info.gid.unicast.prefix ), + cl_ntoh64( p_iir->inform_info.gid.unicast.interface_id ), + cl_ntoh16( p_iir->inform_info.lid_range_begin ), + cl_ntoh16( p_iir->inform_info.lid_range_end ), + p_iir->inform_info.is_generic, + p_iir->inform_info.subscribe, + cl_ntoh16( p_iir->inform_info.trap_type ), + cl_ntoh16( p_iir->inform_info.g_or_v.generic.trap_num ), + cl_ntoh32(qpn), + resp_time_val, + cl_ntoh32(ib_inform_info_get_node_type( &p_iir->inform_info )) + ); + } + else + { + osm_log( p_log, log_level, + "InformInfo Record dump:\n" + "\t\t\t\tRID\n" + "\t\t\t\tSubscriberGID...........0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n" + "\t\t\t\tSubscriberEnum..........0x%X\n" + "\t\t\t\tInformInfo dump:\n" + "\t\t\t\tgid.....................0x%016" PRIx64 " : 0x%016" PRIx64 "\n" + "\t\t\t\tlid_range_begin.........0x%X\n" + "\t\t\t\tlid_range_end...........0x%X\n" + "\t\t\t\tis_generic..............0x%X\n" + "\t\t\t\tsubscribe...............0x%X\n" + "\t\t\t\ttrap_type...............0x%X\n" + "\t\t\t\tdev_id..................0x%X\n" + "\t\t\t\tqpn.....................0x%06X\n" + "\t\t\t\tresp_time_val...........0x%X\n" + "\t\t\t\tvendor_id...............0x%06X\n" + "", + cl_ntoh64( p_iir->subscriber_gid.unicast.prefix ), + cl_ntoh64( p_iir->subscriber_gid.unicast.interface_id ), + cl_ntoh16( p_iir->subscriber_enum ), + cl_ntoh64( p_iir->inform_info.gid.unicast.prefix ), + cl_ntoh64( p_iir->inform_info.gid.unicast.interface_id ), + cl_ntoh16( p_iir->inform_info.lid_range_begin ), + cl_ntoh16( p_iir->inform_info.lid_range_end ), + p_iir->inform_info.is_generic, + p_iir->inform_info.subscribe, + cl_ntoh16( p_iir->inform_info.trap_type ), + cl_ntoh16( p_iir->inform_info.g_or_v.vend.dev_id ), + cl_ntoh32(qpn), + resp_time_val, + cl_ntoh32(ib_inform_info_get_node_type( &p_iir->inform_info )) + ); + } + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_link_record( + IN osm_log_t* const p_log, + IN const ib_link_record_t* const p_lr, + IN const osm_log_level_t log_level ) +{ + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, log_level, + "Link Record dump:\n" + "\t\t\t\tfrom_lid................0x%X\n" + "\t\t\t\tfrom_port_num...........0x%X\n" + "\t\t\t\tto_port_num.............0x%X\n" + "\t\t\t\tto_lid..................0x%X\n" + "", + cl_ntoh16( p_lr->from_lid ), + p_lr->from_port_num, + p_lr->to_port_num, + cl_ntoh16( p_lr->to_lid ) + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_switch_info( + IN osm_log_t* const p_log, + IN const ib_switch_info_t* const p_si, + IN const osm_log_level_t log_level ) +{ + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, OSM_LOG_VERBOSE, + "SwitchInfo dump:\n" + "\t\t\t\tlin_cap.................0x%X\n" + "\t\t\t\trand_cap................0x%X\n" + "\t\t\t\tmcast_cap...............0x%X\n" + "\t\t\t\tlin_top.................0x%X\n" + "\t\t\t\tdef_port................0x%X\n" + "\t\t\t\tdef_mcast_pri_port......0x%X\n" + "\t\t\t\tdef_mcast_not_port......0x%X\n" + "\t\t\t\tlife_state..............0x%X\n" + "\t\t\t\tlids_per_port...........0x%X\n" + "\t\t\t\tpartition_enf_cap.......0x%X\n" + "\t\t\t\tflags...................0x%X\n" + "", + cl_ntoh16( p_si->lin_cap ), + cl_ntoh16( p_si->rand_cap ), + cl_ntoh16( p_si->mcast_cap ), + cl_ntoh16( p_si->lin_top ), + p_si->def_port, + p_si->def_mcast_pri_port, + p_si->def_mcast_not_port, + p_si->life_state, + cl_ntoh16( p_si->lids_per_port ), + cl_ntoh16( p_si->enforce_cap ), + p_si->flags + ); + } +} + + +/********************************************************************** + **********************************************************************/ +void +osm_dump_switch_info_record( + IN osm_log_t* const p_log, + IN const ib_switch_info_record_t* const p_sir, + IN const osm_log_level_t log_level ) +{ + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, log_level, + "SwitchInfo Record dump:\n" + "\t\t\t\tRID\n" + "\t\t\t\tlid.....................0x%X\n" + "\t\t\t\tSwitchInfo dump:\n" + "\t\t\t\tlin_cap.................0x%X\n" + "\t\t\t\trand_cap................0x%X\n" + "\t\t\t\tmcast_cap...............0x%X\n" + "\t\t\t\tlin_top.................0x%X\n" + "\t\t\t\tdef_port................0x%X\n" + "\t\t\t\tdef_mcast_pri_port......0x%X\n" + "\t\t\t\tdef_mcast_not_port......0x%X\n" + "\t\t\t\tlife_state..............0x%X\n" + "\t\t\t\tlids_per_port...........0x%X\n" + "\t\t\t\tpartition_enf_cap.......0x%X\n" + "\t\t\t\tflags...................0x%X\n" + "", + cl_ntoh16( p_sir->lid ), + cl_ntoh16( p_sir->switch_info.lin_cap ), + cl_ntoh16( p_sir->switch_info.rand_cap ), + cl_ntoh16( p_sir->switch_info.mcast_cap ), + cl_ntoh16( p_sir->switch_info.lin_top ), + p_sir->switch_info.def_port, + p_sir->switch_info.def_mcast_pri_port, + p_sir->switch_info.def_mcast_not_port, + p_sir->switch_info.life_state, + cl_ntoh16( p_sir->switch_info.lids_per_port ), + cl_ntoh16( p_sir->switch_info.enforce_cap ), + p_sir->switch_info.flags + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_pkey_block( + IN osm_log_t* const p_log, + IN uint64_t port_guid, + IN uint16_t block_num, + IN uint8_t port_num, + IN const ib_pkey_table_t* const p_pkey_tbl, + IN const osm_log_level_t log_level ) +{ + int i; + char buf_line[1024]; + + if( osm_log_is_active( p_log, log_level ) ) + { + buf_line[0] = '\0'; + for (i = 0; i < 32; i++) + sprintf( buf_line,"%s 0x%04x |", + buf_line, cl_ntoh16(p_pkey_tbl->pkey_entry[i])); + + osm_log( p_log, log_level, + "P_Key table dump:\n" + "\t\t\tport_guid...........0x%016" PRIx64 "\n" + "\t\t\tblock_num...........0x%X\n" + "\t\t\tport_num............0x%X\n\tP_Key Table: %s\n", + cl_ntoh64( port_guid ), + block_num, + port_num, + buf_line + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_slvl_map_table( + IN osm_log_t* const p_log, + IN uint64_t port_guid, + IN uint8_t in_port_num, + IN uint8_t out_port_num, + IN const ib_slvl_table_t* const p_slvl_tbl, + IN const osm_log_level_t log_level ) +{ + uint8_t i; + char buf_line1[1024]; + char buf_line2[1024]; + + if( osm_log_is_active( p_log, log_level ) ) + { + buf_line1[0] = '\0'; + buf_line2[0] = '\0'; + for (i = 0; i < 16; i++) + sprintf( buf_line1,"%s %-2u |", buf_line1, i); + for (i = 0; i < 16; i++) + sprintf( buf_line2,"%s0x%01X |", + buf_line2, ib_slvl_table_get(p_slvl_tbl, i)); + osm_log( p_log, log_level, + "SLtoVL dump:\n" + "\t\t\tport_guid............0x%016" PRIx64 "\n" + "\t\t\tin_port_num..........0x%X\n" + "\t\t\tout_port_num.........0x%X\n\tSL: | %s\n\tVL: | %s\n", + cl_ntoh64( port_guid ), + in_port_num, + out_port_num, + buf_line1, buf_line2 + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_vl_arb_table( + IN osm_log_t* const p_log, + IN uint64_t port_guid, + IN uint8_t block_num, + IN uint8_t port_num, + IN const ib_vl_arb_table_t* const p_vla_tbl, + IN const osm_log_level_t log_level ) +{ + int i; + char buf_line1[1024]; + char buf_line2[1024]; + + if( osm_log_is_active( p_log, log_level ) ) + { + buf_line1[0] = '\0'; + buf_line2[0] = '\0'; + for (i = 0; i < 32; i++) + sprintf( buf_line1,"%s 0x%01X |", + buf_line1, p_vla_tbl->vl_entry[i].vl); + for (i = 0; i < 32; i++) + sprintf( buf_line2,"%s 0x%01X |", + buf_line2, p_vla_tbl->vl_entry[i].weight); + osm_log( p_log, log_level, + "VlArb dump:\n" + "\t\t\tport_guid...........0x%016" PRIx64 "\n" + "\t\t\tblock_num...........0x%X\n" + "\t\t\tport_num............0x%X\n\tVL : | %s\n\tWEIGHT:| %s\n", + cl_ntoh64( port_guid ), + block_num, + port_num, + buf_line1, buf_line2 + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_sm_info( + IN osm_log_t* const p_log, + IN const ib_sm_info_t* const p_smi, + IN const osm_log_level_t log_level ) +{ + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "SMInfo dump:\n" + "\t\t\t\tguid....................0x%016" PRIx64 "\n" + "\t\t\t\tsm_key..................0x%016" PRIx64 "\n" + "\t\t\t\tact_count...............%u\n" + "\t\t\t\tpriority................%u\n" + "\t\t\t\tsm_state................%u\n" + "", + cl_ntoh64( p_smi->guid ), + cl_ntoh64( p_smi->sm_key ), + cl_ntoh32( p_smi->act_count ), + ib_sminfo_get_priority( p_smi ), + ib_sminfo_get_state( p_smi ) + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_sm_info_record( + IN osm_log_t* const p_log, + IN const ib_sminfo_record_t* const p_smir, + IN const osm_log_level_t log_level ) +{ + if( osm_log_is_active( p_log, log_level ) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "SMInfo Record dump:\n" + "\t\t\t\tRID\n" + "\t\t\t\tLid.....................0x%X\n" + "\t\t\t\tReserved................0x%X\n" + "\t\t\t\tSMInfo dump:\n" + "\t\t\t\tguid....................0x%016" PRIx64 "\n" + "\t\t\t\tsm_key..................0x%016" PRIx64 "\n" + "\t\t\t\tact_count...............%u\n" + "\t\t\t\tpriority................%u\n" + "\t\t\t\tsm_state................%u\n" + "", + cl_ntoh16( p_smir->lid ), + cl_ntoh16( p_smir->resv0 ), + cl_ntoh64( p_smir->sm_info.guid ), + cl_ntoh64( p_smir->sm_info.sm_key ), + cl_ntoh32( p_smir->sm_info.act_count ), + ib_sminfo_get_priority( &p_smir->sm_info ), + ib_sminfo_get_state( &p_smir->sm_info ) + ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_notice( + IN osm_log_t* const p_log, + IN const ib_mad_notice_attr_t *p_ntci, + IN const osm_log_level_t log_level ) +{ + char buff[1024]; + buff[0] = '\0'; + + if( osm_log_is_active( p_log, log_level ) ) + { + if (ib_notice_is_generic(p_ntci)) + { + /* immediate data based on the trap */ + switch (cl_ntoh16(p_ntci->g_or_v.generic.trap_num)) { + case 64: + case 65: + case 66: + case 67: + sprintf(buff, + "\t\t\t\tsrc_gid..................0x%016" PRIx64 + ":0x%016" PRIx64 "\n", + cl_ntoh64(p_ntci->data_details.ntc_64_67.gid.unicast.prefix), + cl_ntoh64(p_ntci->data_details.ntc_64_67.gid.unicast.interface_id)); + break; + case 128: + sprintf(buff, + "\t\t\t\tsw_lid...................0x%04X\n", + cl_ntoh16(p_ntci->data_details.ntc_128.sw_lid)); + break; + case 129: + case 130: + case 131: + sprintf(buff, + "\t\t\t\tlid......................0x%04X\n" + "\t\t\t\tport_num.................%u\n", + cl_ntoh16(p_ntci->data_details.ntc_129_131.lid), + p_ntci->data_details.ntc_129_131.port_num); + break; + case 144: + sprintf(buff, + "\t\t\t\tlid......................0x%04x\n" + "\t\t\t\tnew_cap_mask.............0x%08x\n", + cl_ntoh16(p_ntci->data_details.ntc_144.lid), + cl_ntoh32(p_ntci->data_details.ntc_144.new_cap_mask)); + break; + case 145: + sprintf(buff, + "\t\t\t\tlid......................0x%04X\n" + "\t\t\t\tnew_sys_guid.............0x%016" PRIx64 "\n", + cl_ntoh16(p_ntci->data_details.ntc_145.lid), + cl_ntoh64(p_ntci->data_details.ntc_145.new_sys_guid)); + break; + } + + osm_log( p_log, log_level, + "Generic Notice dump:\n" + "\t\t\t\ttype.....................0x%02X\n" + "\t\t\t\tprod_type................%u\n" + "\t\t\t\ttrap_num.................%u\n%s" + "", + ib_notice_get_type(p_ntci), + cl_ntoh32(ib_notice_get_prod_type(p_ntci)), + cl_ntoh16(p_ntci->g_or_v.generic.trap_num), + buff + ); + } + else + { + osm_log( p_log, log_level, + "Vendor Notice dump:\n" + "\t\t\t\ttype.....................0x%04x\n" + "\t\t\t\tvendor...................%u\n" + "\t\t\t\tdevice_id................%u\n" + "", + cl_ntoh16(ib_notice_get_type(p_ntci)), + cl_ntoh32(ib_notice_get_vend_id(p_ntci)), + cl_ntoh16(p_ntci->g_or_v.vend.dev_id) + ); + } + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_dr_smp( + IN osm_log_t* const p_log, + IN const ib_smp_t* const p_smp, + IN const osm_log_level_t log_level ) +{ + uint32_t i; + char buf[BUF_SIZE]; + char line[BUF_SIZE]; + + if( osm_log_is_active( p_log, log_level ) ) + { + sprintf( buf, + "SMP dump:\n" + "\t\t\t\tbase_ver................0x%X\n" + "\t\t\t\tmgmt_class..............0x%X\n" + "\t\t\t\tclass_ver...............0x%X\n" + "\t\t\t\tmethod..................0x%X (%s)\n", + p_smp->base_ver, + p_smp->mgmt_class, + p_smp->class_ver, + p_smp->method, ib_get_sm_method_str(p_smp->method)); + + if (p_smp->mgmt_class == IB_MCLASS_SUBN_DIR) + { + sprintf( line, + "\t\t\t\tD bit...................0x%X\n" + "\t\t\t\tstatus..................0x%X\n", + ib_smp_is_d(p_smp), + ib_smp_get_status(p_smp)); + } + else + { + sprintf( line, + "\t\t\t\tstatus..................0x%X\n", + cl_ntoh16(p_smp->status)); + } + strcat( buf, line ); + + sprintf( line, + "\t\t\t\thop_ptr.................0x%X\n" + "\t\t\t\thop_count...............0x%X\n" + "\t\t\t\ttrans_id................0x%" PRIx64 "\n" + "\t\t\t\tattr_id.................0x%X (%s)\n" + "\t\t\t\tresv....................0x%X\n" + "\t\t\t\tattr_mod................0x%X\n" + "\t\t\t\tm_key...................0x%016" PRIx64 "\n", + p_smp->hop_ptr, + p_smp->hop_count, + cl_ntoh64(p_smp->trans_id), + cl_ntoh16(p_smp->attr_id), + ib_get_sm_attr_str( p_smp->attr_id ), + cl_ntoh16(p_smp->resv), + cl_ntoh32(p_smp->attr_mod), + cl_ntoh64(p_smp->m_key) + ); + strcat( buf, line ); + + if (p_smp->mgmt_class == IB_MCLASS_SUBN_DIR) + { + sprintf( line, + "\t\t\t\tdr_slid.................0x%X\n" + "\t\t\t\tdr_dlid.................0x%X\n", + cl_ntoh16(p_smp->dr_slid), + cl_ntoh16(p_smp->dr_dlid) + ); + strcat( buf, line ); + + strcat( buf, "\n\t\t\t\tInitial path: " ); + + for( i = 0; i <= p_smp->hop_count; i++ ) + { + sprintf( line, "[%X]", p_smp->initial_path[i] ); + strcat( buf, line ); + } + + strcat( buf, "\n\t\t\t\tReturn path: " ); + + for( i = 0; i <= p_smp->hop_count; i++ ) + { + sprintf( line, "[%X]", p_smp->return_path[i] ); + strcat( buf, line ); + } + + strcat( buf, "\n\t\t\t\tReserved: " ); + + for( i = 0; i < 7; i++ ) + { + sprintf( line, "[%0X]", p_smp->resv1[i] ); + strcat( buf, line ); + } + + strcat( buf, "\n" ); + + for( i = 0; i < 64; i += 16 ) + { + sprintf( line, "\n\t\t\t\t%02X %02X %02X %02X " + "%02X %02X %02X %02X" + " %02X %02X %02X %02X %02X %02X %02X %02X\n", + p_smp->data[i], + p_smp->data[i+1], + p_smp->data[i+2], + p_smp->data[i+3], + p_smp->data[i+4], + p_smp->data[i+5], + p_smp->data[i+6], + p_smp->data[i+7], + p_smp->data[i+8], + p_smp->data[i+9], + p_smp->data[i+10], + p_smp->data[i+11], + p_smp->data[i+12], + p_smp->data[i+13], + p_smp->data[i+14], + p_smp->data[i+15] ); + + strcat( buf, line ); + } + } + else + { + /* not a Direct Route so provide source and destination lids */ + strcat(buf, "\t\t\t\tMAD IS LID ROUTED\n"); + } + + osm_log( p_log, log_level, + "%s\n", buf ); + + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_sa_mad( + IN osm_log_t* const p_log, + IN const ib_sa_mad_t* const p_mad, + IN const osm_log_level_t log_level ) +{ + char buf[BUF_SIZE]; + + /* make sure the mad is valid */ + if (p_mad == NULL) + { + osm_log( p_log, log_level, + "NULL MAD POINTER\n"); + return; + } + + if( osm_log_is_active( p_log, log_level ) ) + { + sprintf( buf, + "SA MAD dump:\n" + "\t\t\t\tbase_ver................0x%X\n" + "\t\t\t\tmgmt_class..............0x%X\n" + "\t\t\t\tclass_ver...............0x%X\n" + "\t\t\t\tmethod..................0x%X (%s)\n" + "\t\t\t\tstatus..................0x%X\n" + "\t\t\t\tresv....................0x%X\n" + "\t\t\t\ttrans_id................0x%" PRIx64 "\n" + "\t\t\t\tattr_id.................0x%X (%s)\n" + "\t\t\t\tresv1...................0x%X\n" + "\t\t\t\tattr_mod................0x%X\n" + "\t\t\t\trmpp_version............0x%X\n" + "\t\t\t\trmpp_type...............0x%X\n" + "\t\t\t\trmpp_flags..............0x%X\n" + "\t\t\t\trmpp_status.............0x%X\n" + "\t\t\t\tseg_num.................0x%X\n" + "\t\t\t\tpayload_len/new_win.....0x%X\n" + "\t\t\t\tsm_key..................0x%016" PRIx64 "\n" + "\t\t\t\tattr_offset.............0x%X\n" + "\t\t\t\tresv2...................0x%X\n" + "\t\t\t\tcomp_mask...............0x%016" PRIx64 "\n", + p_mad->base_ver, + p_mad->mgmt_class, + p_mad->class_ver, + p_mad->method, ib_get_sa_method_str(p_mad->method), + cl_ntoh16(p_mad->status), + cl_ntoh16(p_mad->resv), + cl_ntoh64(p_mad->trans_id), + cl_ntoh16(p_mad->attr_id), + ib_get_sa_attr_str( p_mad->attr_id ), + cl_ntoh16(p_mad->resv1), + cl_ntoh32(p_mad->attr_mod), + p_mad->rmpp_version, + p_mad->rmpp_type, + p_mad->rmpp_flags, + p_mad->rmpp_status, + cl_ntoh32(p_mad->seg_num), + cl_ntoh32(p_mad->paylen_newwin), + cl_ntoh64(p_mad->sm_key), + cl_ntoh16(p_mad->attr_offset), + cl_ntoh16(p_mad->resv3), + cl_ntoh64(p_mad->comp_mask) + ); + + strcat( buf, "\n" ); + + osm_log( p_log, log_level, + "%s\n", buf ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_dr_path( + IN osm_log_t* const p_log, + IN const osm_dr_path_t* const p_path, + IN const osm_log_level_t log_level) +{ + uint32_t i; + char buf[BUF_SIZE]; + char line[BUF_SIZE]; + + if( osm_log_is_active( p_log, log_level) ) + { + sprintf( buf, "Directed Path Dump of %u hop path:" + "\n\t\t\t\tPath = ", p_path->hop_count ); + + for( i = 0; i <= p_path->hop_count; i++ ) + { + sprintf( line, "[%X]", p_path->path[i] ); + strcat( buf, line ); + } + osm_log( p_log, log_level, + "%s\n", buf ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_dump_smp_dr_path( + IN osm_log_t* const p_log, + IN const ib_smp_t* const p_smp, + IN const osm_log_level_t log_level + ) +{ + uint32_t i; + char buf[BUF_SIZE]; + char line[BUF_SIZE]; + + if( osm_log_is_active( p_log, log_level) ) + { + sprintf( buf, "Received SMP on a %u hop path:" + "\n\t\t\t\tInitial path = ", p_smp->hop_count ); + + for( i = 0; i <= p_smp->hop_count; i++ ) + { + sprintf( line, "[%X]", p_smp->initial_path[i] ); + strcat( buf, line ); + } + + strcat( buf, "\n\t\t\t\tReturn path = " ); + + for( i = 0; i <= p_smp->hop_count; i++ ) + { + sprintf( line, "[%X]", p_smp->return_path[i] ); + strcat( buf, line ); + } + + osm_log( p_log, log_level, + "%s\n", buf ); + } +} + +const char* const __osm_sm_state_str[] = +{ + "OSM_SM_STATE_NO_STATE", /* 0 */ + "OSM_SM_STATE_INIT", /* 1 */ + "OSM_SM_STATE_IDLE", /* 2 */ + "OSM_SM_STATE_SWEEP_LIGHT", /* 3 */ + "OSM_SM_STATE_SWEEP_LIGHT_WAIT", /* 4 */ + "OSM_SM_STATE_SWEEP_HEAVY_SELF", /* 5 */ + "OSM_SM_STATE_SWEEP_HEAVY_SUBNET", /* 6 */ + "OSM_SM_STATE_SET_SM_UCAST_LID", /* 7 */ + "OSM_SM_STATE_SET_SM_UCAST_LID_WAIT", /* 8 */ + "OSM_SM_STATE_SET_SM_UCAST_LID_DONE", /* 9 */ + "OSM_SM_STATE_SET_SUBNET_UCAST_LIDS", /* 10 */ + "OSM_SM_STATE_SET_SUBNET_UCAST_LIDS_WAIT", /* 11 */ + "OSM_SM_STATE_SET_SUBNET_UCAST_LIDS_DONE", /* 12 */ + "OSM_SM_STATE_SET_UCAST_TABLES", /* 13 */ + "OSM_SM_STATE_SET_UCAST_TABLES_WAIT", /* 14 */ + "OSM_SM_STATE_SET_UCAST_TABLES_DONE", /* 15 */ + "OSM_SM_STATE_SET_MCAST_TABLES", /* 16 */ + "OSM_SM_STATE_SET_MCAST_TABLES_WAIT", /* 17 */ + "OSM_SM_STATE_SET_MCAST_TABLES_DONE", /* 18 */ + "OSM_SM_STATE_SET_LINK_PORTS", /* 19 */ + "OSM_SM_STATE_SET_LINK_PORTS_WAIT", /* 20 */ + "OSM_SM_STATE_SET_LINK_PORTS_DONE", /* 21 */ + "OSM_SM_STATE_SET_ARMED", /* 22 */ + "OSM_SM_STATE_SET_ARMED_WAIT", /* 23 */ + "OSM_SM_STATE_SET_ARMED_DONE", /* 24 */ + "OSM_SM_STATE_SET_ACTIVE", /* 25 */ + "OSM_SM_STATE_SET_ACTIVE_WAIT", /* 26 */ + "OSM_SM_STATE_LOST_NEGOTIATION", /* 27 */ + "OSM_SM_STATE_STANDBY", /* 28 */ + "OSM_SM_STATE_SUBNET_UP", /* 29 */ + "OSM_SM_STATE_PROCESS_REQUEST", /* 30 */ + "OSM_SM_STATE_PROCESS_REQUEST_WAIT", /* 31 */ + "OSM_SM_STATE_PROCESS_REQUEST_DONE", /* 32 */ + "OSM_SM_STATE_MASTER_OR_HIGHER_SM_DETECTED",/* 33 */ + "OSM_SM_STATE_SET_PKEY", /* 34 */ + "OSM_SM_STATE_SET_PKEY_WAIT", /* 35 */ + "OSM_SM_STATE_SET_PKEY_DONE", /* 36 */ + "UNKNOWN STATE!!" /* 37 */ +}; + +const char* const __osm_sm_signal_str[] = +{ + "OSM_SIGNAL_NONE", /* 0 */ + "OSM_SIGNAL_SWEEP", /* 1 */ + "OSM_SIGNAL_CHANGE_DETECTED", /* 2 */ + "OSM_SIGNAL_NO_PENDING_TRANSACTIONS", /* 3 */ + "OSM_SIGNAL_DONE", /* 4 */ + "OSM_SIGNAL_DONE_PENDING", /* 5 */ + "OSM_SIGNAL_LOST_SM_NEGOTIATION", /* 6 */ + "OSM_SIGNAL_LIGHT_SWEEP_FAIL", /* 7 */ + "OSM_SIGNAL_IDLE_TIME_PROCESS", /* 8 */ + "OSM_SIGNAL_IDLE_TIME_PROCESS_REQUEST", /* 9 */ + "OSM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED", /* 10 */ + "OSM_SIGNAL_EXIT_STBY", /* 11 */ + "UNKNOWN SIGNAL!!" /* 12 */ +}; + +/********************************************************************** + **********************************************************************/ +const char* +osm_get_sm_state_str( + IN osm_sm_state_t state ) +{ + if( state > OSM_SM_STATE_MAX ) + state = OSM_SM_STATE_MAX; + return( __osm_sm_state_str[state] ); +} + +/********************************************************************** + **********************************************************************/ +const char* +osm_get_sm_signal_str( + IN osm_signal_t signal ) +{ + if( signal > OSM_SIGNAL_MAX ) + signal = OSM_SIGNAL_MAX; + return( __osm_sm_signal_str[signal] ); +} + +/********************************************************************** + **********************************************************************/ + +static const char* const __osm_disp_msg_str[] = +{ + "OSM_MSG_REQ", + "OSM_MSG_MAD_NODE_INFO", + "OSM_MSG_MAD_PORT_INFO,", + "OSM_MSG_MAD_SWITCH_INFO", + "OSM_MSG_MAD_NODE_DESC", + "OSM_MSG_NO_SMPS_OUTSTANDING", + "OSM_MSG_MAD_NODE_RECORD", + "OSM_MSG_MAD_PORTINFO_RECORD", + "OSM_MSG_MAD_SERVICE_RECORD", + "OSM_MSG_MAD_PATH_RECORD", + "OSM_MSG_MAD_MCMEMBER_RECORD", + "OSM_MSG_MAD_LINK_RECORD", + "OSM_MSG_MAD_SMINFO_RECORD", + "OSM_MSG_MAD_CLASS_PORT_INFO", + "OSM_MSG_MAD_INFORM_INFO", + "OSM_MSG_MAD_LFT_RECORD", + "OSM_MSG_MAD_LFT", + "OSM_MSG_MAD_SM_INFO", + "OSM_MSG_MAD_NOTICE", + "OSM_MSG_LIGHT_SWEEP_FAIL", + "OSM_MSG_MAD_MFT", + "OSM_MSG_MAD_PKEY_TBL_RECORD", + "OSM_MSG_MAD_VL_ARB_RECORD", + "OSM_MSG_MAD_SLVL_TBL_RECORD", + "OSM_MSG_MAD_PKEY", + "OSM_MSG_MAD_VL_ARB", + "OSM_MSG_MAD_SLVL", + "OSM_MSG_MAD_GUIDINFO_RECORD", + "OSM_MSG_MAD_INFORM_INFO_RECORD", +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + "OSM_MSG_MAD_MULTIPATH_RECORD", +#endif + "UNKNOWN!!" +}; + +/********************************************************************** + **********************************************************************/ +const char* +osm_get_disp_msg_str( + IN cl_disp_msgid_t msg ) +{ + if( msg > OSM_MSG_MAX ) + msg = OSM_MSG_MAX; + return( __osm_disp_msg_str[msg] ); +} + +static const char* const __osm_port_state_str_fixed_width[] = +{ + "NOC", + "DWN", + "INI", + "ARM", + "ACT", + "???" +}; + +/********************************************************************** + **********************************************************************/ +const char* +osm_get_port_state_str_fixed_width( + IN uint8_t port_state ) +{ + if( port_state > IB_LINK_ACTIVE ) + port_state = IB_LINK_ACTIVE + 1; + return( __osm_port_state_str_fixed_width[port_state] ); +} + +static const char* const __osm_node_type_str_fixed_width[] = +{ + "??", + "CA", + "SW", + "RT", +}; + +/********************************************************************** + **********************************************************************/ +const char* +osm_get_node_type_str_fixed_width( + IN uint32_t node_type ) +{ + if( node_type >= IB_NOTICE_NODE_TYPE_ROUTER ) + node_type = 0; + return( __osm_node_type_str_fixed_width[node_type] ); +} + +/********************************************************************** + **********************************************************************/ +const char* +osm_get_manufacturer_str( + IN uint64_t const guid_ho ) +{ + static const char* intel_str = "Intel "; + static const char* mellanox_str = "Mellanox "; + static const char* redswitch_str = "Redswitch "; + static const char* silverstorm_str = "SilverStorm"; + static const char* topspin_str = "Topspin "; + static const char* fujitsu_str = "Fujitsu "; + static const char* voltaire_str = "Voltaire "; + static const char* yotta_str = "YottaYotta "; + static const char* pathscale_str = "PathScale "; + static const char* ibm_str = "IBM "; + static const char* divergenet_str = "DivergeNet "; + static const char* flextronics_str = "Flextronics"; + static const char* agilent_str = "Agilent "; + static const char* obsidian_str = "Obsidian "; + static const char* baymicro_str = "BayMicro "; + static const char* lsilogic_str = "LSILogic "; + static const char* ddn_str = "DataDirect "; + static const char* panta_str = "Panta "; + static const char* hp_str = "HP "; + static const char* rioworks_str = "Rioworks "; + static const char* unknown_str = "Unknown "; + + switch( (uint32_t)(guid_ho >> (5 * 8)) ) + { + case OSM_VENDOR_ID_INTEL: + return( intel_str ); + case OSM_VENDOR_ID_MELLANOX: + return( mellanox_str ); + case OSM_VENDOR_ID_REDSWITCH: + return( redswitch_str ); + case OSM_VENDOR_ID_SILVERSTORM: + return( silverstorm_str ); + case OSM_VENDOR_ID_TOPSPIN: + return( topspin_str ); + case OSM_VENDOR_ID_FUJITSU: + case OSM_VENDOR_ID_FUJITSU2: + return( fujitsu_str ); + case OSM_VENDOR_ID_VOLTAIRE: + return( voltaire_str ); + case OSM_VENDOR_ID_YOTTAYOTTA: + return( yotta_str ); + case OSM_VENDOR_ID_PATHSCALE: + return( pathscale_str ); + case OSM_VENDOR_ID_IBM: + return( ibm_str ); + case OSM_VENDOR_ID_DIVERGENET: + return( divergenet_str ); + case OSM_VENDOR_ID_FLEXTRONICS: + return( flextronics_str ); + case OSM_VENDOR_ID_AGILENT: + return( agilent_str ); + case OSM_VENDOR_ID_OBSIDIAN: + return( obsidian_str ); + case OSM_VENDOR_ID_BAYMICRO: + return( baymicro_str ); + case OSM_VENDOR_ID_LSILOGIC: + return( lsilogic_str ); + case OSM_VENDOR_ID_DDN: + return( ddn_str ); + case OSM_VENDOR_ID_PANTA: + return( panta_str ); + case OSM_VENDOR_ID_HP: + return( hp_str ); + case OSM_VENDOR_ID_RIOWORKS: + return( rioworks_str ); + default: + return( unknown_str ); + } +} + +static const char* const __osm_mtu_str_fixed_width[] = +{ + "??? ", + "256 ", + "512 ", + "1024", + "2048", + "4096" +}; + +/********************************************************************** + **********************************************************************/ +const char* +osm_get_mtu_str( + IN uint8_t const mtu ) +{ + if( mtu > IB_MTU_LEN_4096 ) + return( __osm_mtu_str_fixed_width[0] ); + else + return( __osm_mtu_str_fixed_width[mtu] ); +} + +static const char* const __osm_lwa_str_fixed_width[] = +{ + "???", + "1x ", + "4x ", + "???", + "???", + "???", + "???", + "???", + "12x" +}; + +/********************************************************************** + **********************************************************************/ +const char* +osm_get_lwa_str( + IN uint8_t const lwa ) +{ + if( lwa > 8 ) + return( __osm_lwa_str_fixed_width[0] ); + else + return( __osm_lwa_str_fixed_width[lwa] ); +} + +/********************************************************************** + **********************************************************************/ +static const char* const __osm_lsa_str_fixed_width[] = +{ + "???", + "2.5", + "5 ", + "???", + "10 " +}; + +const char* +osm_get_lsa_str( + IN uint8_t const lsa ) +{ + if( lsa > 4 ) + return( __osm_lsa_str_fixed_width[0] ); + else + return( __osm_lsa_str_fixed_width[lsa] ); +} + +/********************************************************************** + **********************************************************************/ + +const char* const __osm_sm_mgr_signal_str[] = +{ + "OSM_SM_SIGNAL_INIT", /* 0 */ + "OSM_SM_SIGNAL_DISCOVERY_COMPLETED", /* 2 */ + "OSM_SM_SIGNAL_POLLING_TIMEOUT", /* 3 */ + "OSM_SM_SIGNAL_DISCOVER", /* 4 */ + "OSM_SM_SIGNAL_DISABLE", /* 5 */ + "OSM_SM_SIGNAL_HANDOVER", /* 6 */ + "OSM_SM_SIGNAL_HANDOVER_SENT", /* 7 */ + "OSM_SM_SIGNAL_ACKNOWLEDGE", /* 8 */ + "OSM_SM_SIGNAL_STANDBY", /* 9 */ + "OSM_SM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED", /* 10 */ + "OSM_SM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED_DONE", /* 11 */ + "OSM_SM_SIGNAL_WAIT_FOR_HANDOVER", /* 12 */ + "UNKNOWN STATE!!" /* 13 */ + +}; + +/********************************************************************** + **********************************************************************/ +const char* +osm_get_sm_mgr_signal_str( + IN osm_sm_signal_t signal ) +{ + if( signal > OSM_SM_SIGNAL_MAX ) + signal = OSM_SM_SIGNAL_MAX; + return( __osm_sm_mgr_signal_str[signal] ); +} + +const char* const __osm_sm_mgr_state_str[] = +{ + "IB_SMINFO_STATE_NOTACTIVE", /* 0 */ + "IB_SMINFO_STATE_DISCOVERING", /* 1 */ + "IB_SMINFO_STATE_STANDBY", /* 2 */ + "IB_SMINFO_STATE_MASTER", /* 3 */ + "IB_SMINFO_STATE_INIT", /* 4 */ + "UNKNOWN STATE!!" /* 5 */ + +}; + +const char* +osm_get_sm_mgr_state_str( + IN uint16_t state ) +{ + if( state > IB_SMINFO_STATE_INIT ) + state = IB_SMINFO_STATE_INIT + 1; + return( __osm_sm_mgr_state_str[state] ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/libopensm/osm_log.c b/branches/Ndi/ulp/opensm/user/libopensm/osm_log.c new file mode 100644 index 00000000..e7bc851f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libopensm/osm_log.c @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementaion of osm_log_t. + * This object represents the log file. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.8 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include + +static int log_exit_count = 0; + +#ifndef WIN32 +#include +#include +#include + +static char *month_str[] = { + "Jan", + "Feb", + "Mar", + "Apr", + "May", + "Jun", + "Jul", + "Aug", + "Sep", + "Oct", + "Nov", + "Dec" +}; +#else +void +OsmReportState( + IN const char *p_str); +#endif /* ndef WIN32 */ + +#ifndef WIN32 + +static void truncate_log_file(osm_log_t* const p_log) +{ + int fd = fileno(p_log->out_port); + if (ftruncate(fd, 0) < 0) + fprintf(stderr, "truncate_log_file: cannot truncate: %s\n", + strerror(errno)); + if (lseek(fd, 0, SEEK_SET) < 0) + fprintf(stderr, "truncate_log_file: cannot rewind: %s\n", + strerror(errno)); + p_log->count = 0; +} + +#else /* Windows */ + +static void truncate_log_file(osm_log_t* const p_log) +{ + fprintf(stderr, "truncate_log_file: cannot truncate on windows system (yet)\n"); +} +#endif /* ndef WIN32 */ + +int osm_log_printf(osm_log_t *p_log, osm_log_level_t level, + const char *fmt, ...) +{ + va_list args; + int ret; + + if (!(p_log->level&level)) + return 0; + + va_start(args, fmt); + ret = vfprintf(stdout, fmt, args); + va_end(args); + + if (p_log->flush || level&OSM_LOG_ERROR) + fflush( stdout ); + + return ret; +} + +void +osm_log( + IN osm_log_t* const p_log, + IN const osm_log_level_t verbosity, + IN const char *p_str, ... ) +{ + char buffer[LOG_ENTRY_SIZE_MAX]; + va_list args; + int ret; + +#ifdef WIN32 + SYSTEMTIME st; + uint32_t pid = GetCurrentThreadId(); +#else + pid_t pid = 0; + time_t tim; + struct tm result; + uint64_t time_usecs; + uint32_t usecs; + + time_usecs = cl_get_time_stamp(); + tim = time_usecs/1000000; + usecs = time_usecs % 1000000; + localtime_r(&tim, &result); +#endif /* WIN32 */ + + /* If this is a call to syslog - always print it */ + if ( verbosity & (OSM_LOG_SYS | p_log->level) ) + { + va_start( args, p_str ); + vsprintf( buffer, p_str, args ); + va_end(args); + + /* this is a call to the syslog */ + if (verbosity & OSM_LOG_SYS) + { + cl_log_event("OpenSM", LOG_INFO, buffer , NULL, 0); + + /* SYSLOG should go to stdout too */ + if (p_log->out_port != stdout) + { + printf("%s\n", buffer); + fflush( stdout ); + } +#ifdef WIN32 + OsmReportState(buffer); +#endif /* WIN32 */ + } + + /* regular log to default out_port */ + cl_spinlock_acquire( &p_log->lock ); + + if (p_log->max_size && p_log->count > p_log->max_size) + { + /* truncate here */ + fprintf(stderr, "osm_log: log file exceeds the limit %lu. Truncating.\n", + p_log->max_size); + truncate_log_file(p_log); + } + +#ifdef WIN32 + GetLocalTime(&st); + _retry: + ret = fprintf( p_log->out_port, "[%02d:%02d:%02d:%03d][%04X] -> %s", + st.wHour, st.wMinute, st.wSecond, st.wMilliseconds, + pid, buffer ); +#else + pid = pthread_self(); + _retry: + ret = fprintf( p_log->out_port, "%s %02d %02d:%02d:%02d %06d [%04X] -> %s", + (result.tm_mon < 12 ? month_str[result.tm_mon] : "???"), + result.tm_mday, result.tm_hour, + result.tm_min, result.tm_sec, + usecs, pid, buffer ); +#endif + + /* flush log */ + if (ret > 0 && (p_log->flush || (verbosity & OSM_LOG_ERROR)) && + fflush( p_log->out_port ) < 0) + ret = -1; + + if (ret >= 0) + { + log_exit_count = 0; + p_log->count += ret; + } + else if (log_exit_count < 3) + { + log_exit_count++; + if (errno == ENOSPC && p_log->max_size) { + fprintf(stderr, "osm_log: write failed: %s. Truncating log file.\n", + strerror(errno)); + truncate_log_file(p_log); + goto _retry; + } + fprintf(stderr, "osm_log: write failed: %s\n", strerror(errno)); + } + + cl_spinlock_release( &p_log->lock ); + } +} + +void +osm_log_raw( + IN osm_log_t* const p_log, + IN const osm_log_level_t verbosity, + IN const char *p_buf ) +{ + if( p_log->level & verbosity ) + { + cl_spinlock_acquire( &p_log->lock ); + printf( "%s", p_buf ); + cl_spinlock_release( &p_log->lock ); + + /* + Flush log on errors too. + */ + if( p_log->flush || (verbosity & OSM_LOG_ERROR) ) + fflush( stdout ); + } +} + +boolean_t +osm_is_debug(void) +{ +#if defined( _DEBUG_ ) + return TRUE; +#else + return FALSE; +#endif /* defined( _DEBUG_ ) */ +} + +ib_api_status_t +osm_log_init_v2( + IN osm_log_t* const p_log, + IN const boolean_t flush, + IN const uint8_t log_flags, + IN const char *log_file, + IN const unsigned long max_size, + IN const boolean_t accum_log_file ) +{ + struct stat st; + + p_log->level = log_flags; + p_log->flush = flush; + p_log->count = 0; + p_log->max_size = 0; + + if (log_file == NULL || !strcmp(log_file, "-") || + !strcmp(log_file, "stdout")) + { + p_log->out_port = stdout; + } + else if (!strcmp(log_file, "stderr")) + { + p_log->out_port = stderr; + } + else + { + if (accum_log_file) + p_log->out_port = fopen(log_file, "a+"); + else + p_log->out_port = fopen(log_file, "w+"); + + if (!p_log->out_port) + { + if (accum_log_file) + printf("Cannot open %s for appending. Permission denied\n", log_file); + else + printf("Cannot open %s for writing. Permission denied\n", log_file); + + return(IB_UNKNOWN_ERROR); + } + + if (fstat(fileno(p_log->out_port), &st) == 0) + p_log->count = st.st_size; + + p_log->max_size = max_size; + } + + openlog("OpenSM", LOG_CONS | LOG_PID, LOG_USER); + + if (cl_spinlock_init( &p_log->lock ) == CL_SUCCESS) + return IB_SUCCESS; + else + return IB_ERROR; +} + +ib_api_status_t +osm_log_init( + IN osm_log_t* const p_log, + IN const boolean_t flush, + IN const uint8_t log_flags, + IN const char *log_file, + IN const boolean_t accum_log_file ) +{ + return osm_log_init_v2( p_log, flush, log_flags, log_file, 0, accum_log_file ); +} + diff --git a/branches/Ndi/ulp/opensm/user/libopensm/osm_mad_pool.c b/branches/Ndi/ulp/opensm/user/libopensm/osm_mad_pool.c new file mode 100644 index 00000000..03ccd341 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libopensm/osm_mad_pool.c @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_mad_pool_t. + * This object represents a pool of management datagram (MAD) objects. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include + +#define OSM_MAD_POOL_MIN_SIZE 256 +#define OSM_MAD_POOL_GROW_SIZE 256 + + +/********************************************************************** + **********************************************************************/ +cl_status_t +__osm_mad_pool_ctor( + IN void* const p_object, + IN void* context, + OUT cl_pool_item_t** const pp_pool_item ) +{ + osm_madw_t *p_madw = p_object; + + UNUSED_PARAM( context ); + osm_madw_construct( p_madw ); + /* CHECK THIS. DOCS DON'T DESCRIBE THIS OUT PARAM. */ + *pp_pool_item = &p_madw->pool_item; + return( CL_SUCCESS ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mad_pool_construct( + IN osm_mad_pool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + + memset( p_pool, 0, sizeof(*p_pool) ); + cl_qlock_pool_construct( &p_pool->madw_pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mad_pool_destroy( + IN osm_mad_pool_t* const p_pool ) +{ + CL_ASSERT( p_pool ); + + /* HACK: we still rarely see some mads leaking - so ignore this */ + /* cl_qlock_pool_destroy( &p_pool->madw_pool ); */ +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mad_pool_init( + IN osm_mad_pool_t* const p_pool, + IN osm_log_t* const p_log ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_mad_pool_init ); + + p_pool->p_log = p_log; + + status = cl_qlock_pool_init( + &p_pool->madw_pool, + OSM_MAD_POOL_MIN_SIZE, + 0, + OSM_MAD_POOL_GROW_SIZE, + sizeof( osm_madw_t ), + __osm_mad_pool_ctor, + NULL, + p_pool ); + if( status != IB_SUCCESS ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_mad_pool_init: ERR 0702: " + "Grow pool initialization failed (%s)\n", + ib_get_err_str(status) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +osm_madw_t* +osm_mad_pool_get( + IN osm_mad_pool_t* const p_pool, + IN osm_bind_handle_t h_bind, + IN const uint32_t total_size, + IN const osm_mad_addr_t* const p_mad_addr ) +{ + osm_madw_t *p_madw; + ib_mad_t *p_mad; + + OSM_LOG_ENTER( p_pool->p_log, osm_mad_pool_get ); + + CL_ASSERT( h_bind != OSM_BIND_INVALID_HANDLE ); + CL_ASSERT( total_size ); + + /* + First, acquire a mad wrapper from the mad wrapper pool. + */ + p_madw = (osm_madw_t*)cl_qlock_pool_get( &p_pool->madw_pool ); + if( p_madw == NULL ) + { + osm_log( p_pool->p_log, OSM_LOG_ERROR, + "osm_mad_pool_get: ERR 0703: " + "Unable to acquire MAD wrapper object\n" ); + goto Exit; + } + + osm_madw_init( p_madw, h_bind, total_size, p_mad_addr ); + + /* + Next, acquire a wire mad of the specified size. + */ + p_mad = osm_vendor_get( h_bind, total_size, &p_madw->vend_wrap ); + if( p_mad == NULL ) + { + osm_log( p_pool->p_log, OSM_LOG_ERROR, + "osm_mad_pool_get: ERR 0704: " + "Unable to acquire wire MAD\n" ); + + /* Don't leak wrappers! */ + cl_qlock_pool_put( &p_pool->madw_pool, (cl_pool_item_t*)p_madw ); + p_madw = NULL; + goto Exit; + } + + cl_atomic_inc( &p_pool->mads_out ); + /* + Finally, attach the wire MAD to this wrapper. + */ + osm_madw_set_mad( p_madw, p_mad ); + + osm_log( p_pool->p_log, OSM_LOG_DEBUG, + "osm_mad_pool_get: Acquired p_madw = %p, p_mad = %p, " + "size = %u\n", p_madw, p_madw->p_mad, total_size ); + + Exit: + OSM_LOG_EXIT( p_pool->p_log ); + return( p_madw ); +} + +/********************************************************************** + **********************************************************************/ +osm_madw_t* +osm_mad_pool_get_wrapper( + IN osm_mad_pool_t* const p_pool, + IN osm_bind_handle_t h_bind, + IN const uint32_t total_size, + IN const ib_mad_t* const p_mad, + IN const osm_mad_addr_t* const p_mad_addr ) +{ + osm_madw_t *p_madw; + + OSM_LOG_ENTER( p_pool->p_log, osm_mad_pool_get_wrapper ); + + CL_ASSERT( h_bind != OSM_BIND_INVALID_HANDLE ); + CL_ASSERT( total_size ); + CL_ASSERT( p_mad ); + + /* + First, acquire a mad wrapper from the mad wrapper pool. + */ + p_madw = (osm_madw_t*)cl_qlock_pool_get( &p_pool->madw_pool ); + if( p_madw == NULL ) + { + osm_log( p_pool->p_log, OSM_LOG_ERROR, + "osm_mad_pool_get_wrapper: ERR 0705: " + "Unable to acquire MAD wrapper object\n" ); + goto Exit; + } + + /* + Finally, initialize the wrapper object. + */ + cl_atomic_inc( &p_pool->mads_out ); + osm_madw_init( p_madw, h_bind, total_size, p_mad_addr ); + osm_madw_set_mad( p_madw, p_mad ); + + osm_log( p_pool->p_log, OSM_LOG_DEBUG, + "osm_mad_pool_get_wrapper: Acquired p_madw = %p, p_mad = %p " + "size = %u\n", p_madw, p_madw->p_mad, total_size ); + + Exit: + OSM_LOG_EXIT( p_pool->p_log ); + return( p_madw ); +} + +/********************************************************************** + **********************************************************************/ +osm_madw_t* +osm_mad_pool_get_wrapper_raw( + IN osm_mad_pool_t* const p_pool ) +{ + osm_madw_t *p_madw; + + OSM_LOG_ENTER( p_pool->p_log, osm_mad_pool_get_wrapper_raw ); + + p_madw = (osm_madw_t*)cl_qlock_pool_get( &p_pool->madw_pool ); + + osm_log( p_pool->p_log, OSM_LOG_DEBUG, + "osm_mad_pool_get_wrapper_raw: " + "Getting p_madw = %p\n", p_madw ); + + osm_madw_init( p_madw, 0, 0, 0 ); + osm_madw_set_mad( p_madw, 0 ); + cl_atomic_inc( &p_pool->mads_out ); + + OSM_LOG_EXIT( p_pool->p_log ); + return( p_madw ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mad_pool_put( + IN osm_mad_pool_t* const p_pool, + IN osm_madw_t* const p_madw ) +{ + OSM_LOG_ENTER( p_pool->p_log, osm_mad_pool_put ); + + CL_ASSERT( p_madw ); + + osm_log( p_pool->p_log, OSM_LOG_DEBUG, + "osm_mad_pool_put: Releasing p_madw = %p, p_mad = %p\n", + p_madw, p_madw->p_mad ); + + /* + First, return the wire mad to the pool + */ + if( p_madw->p_mad ) + osm_vendor_put( p_madw->h_bind, &p_madw->vend_wrap ); + + /* + Return the mad wrapper to the wrapper pool + */ + cl_qlock_pool_put( &p_pool->madw_pool, (cl_pool_item_t*)p_madw ); + cl_atomic_dec( &p_pool->mads_out ); + + OSM_LOG_EXIT( p_pool->p_log ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/libvendor/Makefile b/branches/Ndi/ulp/opensm/user/libvendor/Makefile new file mode 100644 index 00000000..58189757 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libvendor/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/Ndi/ulp/opensm/user/libvendor/SOURCES b/branches/Ndi/ulp/opensm/user/libvendor/SOURCES new file mode 100644 index 00000000..929dc1ae --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libvendor/SOURCES @@ -0,0 +1,62 @@ +!if $(FREEBUILD) +TARGETNAME=osmv_ibal +!else +TARGETNAME=osmv_ibald +!endif +TARGETTYPE=LIBRARY + +!if !defined(WINIBHOME) +WINIBHOME=..\..\..\.. +!endif + +LIBPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + +!if defined(OSM_TARGET) +TARGETPATH=$(OSM_TARGET)\bin\user\obj$(BUILD_ALT_DIR) +!else +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) +!endif + +USE_NTDLL=1 +OVR_DIR=..\addon + + +SOURCES=\ + osm_vendor_al.c \ + osm_vendor_mlx_sa.c \ + winosm_common.c + +OSM_HOME=.. + +TARGETLIBS=\ +!if $(FREEBUILD) + $(LIBPATH)\*\ibal.lib \ + $(LIBPATH)\*\complib.lib \ + $(CRT_LIB_PATH)\msvcrt.lib + +!else + $(LIBPATH)\*\ibald.lib \ + $(LIBPATH)\*\complibd.lib \ + $(CRT_LIB_PATH)\msvcrt.lib +!endif + +#DO NOT TOUCH the order of search path , until ib_types.h merging process will be done +INCLUDES= \ + $(OSM_HOME)\include; \ + $(OSM_HOME); \ + $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; + +# Could be any special flag needed for this project +USER_C_FLAGS=$(USER_C_FLAGS) /Ze +#Add preproccessor definitions +C_DEFINES=$(C_DEFINES) -DWIN32 -D__WIN__ -D__i386__ -Dinline=__inline -DMT_LITTLE_ENDIAN -DOSM_VENDOR_INTF_AL +C_DEFINES=$(C_DEFINES) -I.. -DHAVE_CONFIG_H +!if !$(FREEBUILD) +#C_DEFINES=$(C_DEFINES) -D_DEBUG -DDEBUG -DDBG +C_DEFINES=$(C_DEFINES) +!endif + +LINKER_FLAGS= $(LINKER_FLAGS) +MSC_WARNING_LEVEL= /W3 +#MSC_OPTIMIZATION= /O0 diff --git a/branches/Ndi/ulp/opensm/user/libvendor/osm_vendor_al.c b/branches/Ndi/ulp/opensm/user/libvendor/osm_vendor_al.c new file mode 100644 index 00000000..a450e4f0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libvendor/osm_vendor_al.c @@ -0,0 +1,1532 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_req_t. + * This object represents the generic attribute requester. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.8 $ + */ + +/* + Next available error code: 0x300 +*/ +#ifdef __WIN__ +/* Suppress all warning regarding casting void* to specific pointer object */ +#pragma warning(disable : 4305) +#endif + +#include + +#ifdef OSM_VENDOR_INTF_AL + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/****s* OpenSM: Vendor AL/osm_al_bind_info_t + * NAME + * osm_al_bind_info_t + * + * DESCRIPTION + * Structure containing bind information. + * + * SYNOPSIS + */ +typedef struct _osm_al_bind_info +{ + osm_vendor_t *p_vend; + void *client_context; + ib_qp_handle_t h_qp; + ib_mad_svc_handle_t h_svc; + uint8_t port_num; + ib_pool_key_t pool_key; + osm_vend_mad_recv_callback_t rcv_callback; + osm_vend_mad_send_err_callback_t send_err_callback; + osm_mad_pool_t *p_osm_pool; + ib_av_handle_t h_dr_av; + +} osm_al_bind_info_t; +/* + * FIELDS + * p_vend + * Pointer to the vendor object. + * + * client_context + * User's context passed during osm_bind + * + * h_qp + * Handle the QP for this bind. + * + * h_qp_svc + * Handle the QP mad service for this bind. + * + * port_num + * Port number (within the HCA) of the bound port. + * + * pool_key + * Pool key returned by all for this QP. + * + * h_dr_av + * Address vector handle used for all directed route SMPs. + * + * SEE ALSO + *********/ + +/********************************************************************** + **********************************************************************/ +inline static ib_api_status_t +__osm_al_convert_wcs( + IN ib_wc_status_t const wc_status ) +{ + switch( wc_status ) + { + case IB_WCS_SUCCESS: + return( IB_SUCCESS ); + + case IB_WCS_TIMEOUT_RETRY_ERR: + return( IB_TIMEOUT ); + + default: + return( IB_ERROR ); + } +} + +/********************************************************************** + **********************************************************************/ +void AL_API +__osm_set_vend_wrap( IN osm_al_bind_info_t* const p_bind, + IN ib_mad_element_t* const p_elem, + OUT osm_vend_wrap_t* p_vw) +{ + p_vw->h_bind = p_bind; + p_vw->size = p_elem->size; + p_vw->p_elem = p_elem; + p_vw->h_av = 0; + p_vw->p_resp_madw = NULL; + +} + + +static void AL_API +__osm_al_ca_err_callback( + IN ib_async_event_rec_t *p_async_rec ) +{ + osm_vendor_t *p_vend = (osm_vendor_t*)p_async_rec->context; + OSM_LOG_ENTER( p_vend->p_log, __osm_al_ca_err_callback ); + + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_al_ca_err_callback: ERR 3B01: " + "Event on channel adapter (%s).\n", + ib_get_async_event_str( p_async_rec->code ) ); + + OSM_LOG_EXIT( p_vend->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void AL_API +__osm_al_ca_destroy_callback( + IN void *context ) +{ + osm_al_bind_info_t *p_bind = (osm_al_bind_info_t*)context; + osm_vendor_t *p_vend = p_bind->p_vend; + OSM_LOG_ENTER( p_vend->p_log, __osm_al_ca_destroy_callback ); + + osm_log( p_vend->p_log, OSM_LOG_INFO, + "__osm_al_ca_destroy_callback: " + "Closing local channel adapter.\n" ); + + OSM_LOG_EXIT( p_vend->p_log ); +} + +/********************************************************************** + **********************************************************************/ + +static void AL_API +__osm_al_err_callback( + IN ib_async_event_rec_t *p_async_rec ) +{ + osm_al_bind_info_t *p_bind = (osm_al_bind_info_t*)p_async_rec->context; + osm_vendor_t *p_vend = p_bind->p_vend; + OSM_LOG_ENTER( p_vend->p_log, __osm_al_err_callback ); + + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_al_err_callback: ERR 3B02: " + "Error on QP (%s).\n", + ib_get_async_event_str( p_async_rec->code ) ); + + OSM_LOG_EXIT( p_vend->p_log ); +} + +/********************************************************************** + **********************************************************************/ +/* + Send_cb will handle the following cases : + Element Status | Send Response (no reponse expected) | Send Request (response expeceted) + ================|===========================================|========================================== + ERROR | Free : AV , madw(send_err_cb) | Free : AV , madw , resp_madw + SUCCESS | Free : AV , madw | Free : AV , madw + + Element Status | Receive Response (no reponse expected) + ================|=========================================== + ERROR | Free : AV , madw(send_err_cb) + SUCCESS | Free : AV , madw, resp_madw (both through rcv_callback) + +*/ +static void AL_API +__osm_al_send_callback( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_elem ) +{ + osm_al_bind_info_t* const p_bind = (osm_al_bind_info_t*)mad_svc_context; + osm_vendor_t* const p_vend = p_bind->p_vend; + osm_madw_t* const p_madw = (osm_madw_t*)p_elem->context1; + osm_vend_wrap_t* p_vw = osm_madw_get_vend_ptr( p_madw ); + ib_mad_t *p_mad = ib_get_mad_buf( p_elem ); + ib_av_attr_t av_attr; + ib_pd_handle_t h_pd; + ib_api_status_t status_elem,status; + osm_madw_t *p_new_madw; + + OSM_LOG_ENTER( p_vend->p_log, __osm_al_send_callback ); + UNUSED_PARAM(h_mad_svc); + CL_ASSERT( p_vw ); + CL_ASSERT( p_vw->h_av ); + /* since we use context1 , safely , and its the only place that remove the clean + p_elem,p_madw , h_av no checks are required */ + status_elem = __osm_al_convert_wcs(p_elem->status); + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "__osm_al_send_callback: " + "Destroying av handle %p.\n", p_elem->h_av ); + /* Check first if its a direct route handle , in this case skip */ + if (p_elem->h_av != p_bind->h_dr_av) + { + ib_destroy_av( p_elem->h_av ); + } + /* Since the free order is first resp_madw then madw (PARENT) we should check + this case first */ + if (p_elem->resp_expected) + { + p_madw->status = status_elem; + if ( status_elem != IB_SUCCESS ) + { + /* + Return any wrappers to the pool that may have been + pre-emptively allocated to handle a receive. + */ + osm_log(p_vend->p_log, OSM_LOG_ERROR, + "__osm_al_send_callback: ERR 3333 " + " Mad Completed with WQE Error : %s.\n",ib_get_wc_status_str(p_elem->status)); + if( p_vw->p_resp_madw ) + { + osm_mad_pool_put( p_bind->p_osm_pool, p_vw->p_resp_madw ); + p_vw->p_resp_madw = NULL; + } + + p_bind->send_err_callback( p_bind->client_context, p_madw ); + } + else + { + /* We are in response flow of receive , need to apply the rcv_callback + The rcv_callback will free the resp_madw , req_madw , p_elem of receive + and request */ + osm_log(p_vend->p_log, OSM_LOG_DEBUG, + "__osm_al_send_callback: " + "The Mad is a response , thus handeled in __osm_al_send_callback\n"); + p_new_madw = p_vw->p_resp_madw; + p_bind->rcv_callback( p_new_madw, p_bind->client_context, + p_madw ); + + } + } + else + { + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "__osm_al_send_callback: " + "Returning MAD to pool, TID = 0x%" PRIx64 ".\n", + cl_ntoh64( p_mad->trans_id ) ); + osm_mad_pool_put( p_bind->p_osm_pool, p_madw ); + if ( status_elem != IB_SUCCESS ) + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_al_send_callback: ERR 3b0b " + "request mad had failed.\n"); + goto Exit; + } + + + + + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); +} + +/********************************************************************** + **********************************************************************/ +/* + Receive_cb will be applied in the following cases : + Element Status | Receive Response (no reponse expected) | Receive Request (response expeceted) + ================|==============================================|======================================= + ERROR | NOT APPLIED | NOT_APPLIED + SUCCESS | Free : Resp_madw , copy_req_madw | Allocate new_madw (for response then in send_cb free) + */ + static void AL_API +__osm_al_rcv_callback( + IN const ib_mad_svc_handle_t h_mad_svc, + IN void *mad_svc_context, + IN ib_mad_element_t *p_elem ) +{ + osm_al_bind_info_t* const p_bind = (osm_al_bind_info_t*)mad_svc_context; + osm_vendor_t* const p_vend = p_bind->p_vend; + osm_madw_t *p_old_madw,*p_copy_old_madw; + osm_madw_t *p_new_madw; + osm_vend_wrap_t* p_old_vw; + osm_vend_wrap_t* p_new_vw; + ib_mad_t *p_new_mad; + osm_mad_addr_t mad_addr; + + OSM_LOG_ENTER( p_vend->p_log, __osm_al_rcv_callback ); + UNUSED_PARAM(h_mad_svc); + CL_ASSERT( p_elem->context1 == NULL ); + CL_ASSERT( p_elem->context2 == NULL ); + /* + osm_log( p_vend->p_log, OSM_LOG_VERBOSE, + "__osm_al_rcv_callback: " + "Handling Transaction : 0x%" PRIx64 " .\n", + cl_ntoh64(p_elem->p_mad_buf->trans_id)); + */ + p_new_mad = ib_get_mad_buf( p_elem ); + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "__osm_al_rcv_callback: " + "Acquired implicitly MAD %p.\n", p_new_mad ); + + /* + In preperation for initializing the new mad wrapper, + Initialize the mad_addr structure for the received wire MAD. + */ + mad_addr.dest_lid = p_elem->remote_lid; + mad_addr.path_bits = p_elem->path_bits; + + /* TO DO - figure out which #define to use for the 2.5 Gb rate... */ + mad_addr.static_rate = 0; + + if( p_new_mad->mgmt_class == IB_MCLASS_SUBN_LID || + p_new_mad->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + mad_addr.addr_type.smi.source_lid = p_elem->remote_lid; + } + else + { + mad_addr.addr_type.gsi.remote_qp = p_elem->remote_qp; + mad_addr.addr_type.gsi.remote_qkey = p_elem->remote_qkey; + mad_addr.addr_type.gsi.pkey = p_elem->pkey_index; + mad_addr.addr_type.gsi.service_level = p_elem->remote_sl; + mad_addr.addr_type.gsi.global_route = FALSE; + } + + /* + If this MAD is a response to a previous request, + then grab our pre-allocated MAD wrapper. + Otherwise, allocate a new MAD wrapper. + context1 - contains the request madw + */ + if( ib_mad_is_response( p_new_mad ) ) + { + /* + The acquiring was done in the ib_get_mad_buf function. + If this is a request - then we impllicitly allocate the MAD. + In this case - it was allocated in the lower layer. The message + is for tracking down messages - locate how/where this mad was + allocated. + */ + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "__osm_al_rcv_callback: " + "The Mad is a response , thus handeled in __osm_al_send_callback\n"); + CL_ASSERT( p_elem->send_context1 != NULL ); + CL_ASSERT( p_elem->send_context2 == NULL ); + + p_old_madw = (osm_madw_t*)p_elem->send_context1; + p_old_vw = osm_madw_get_vend_ptr( p_old_madw ); + p_new_madw = p_old_vw->p_resp_madw; + + CL_ASSERT( p_new_madw ); + osm_madw_init( p_new_madw, p_bind, p_elem->size, + &mad_addr ); + osm_madw_set_mad( p_new_madw, p_new_mad ); + p_new_vw = osm_madw_get_vend_ptr( p_new_madw ); + __osm_set_vend_wrap(p_bind,p_elem,p_new_vw); + goto Exit; + } + else + { + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "__osm_al_rcv_callback: " + "The Mad is a request , thus handeled in __osm_al_rcv_callback\n"); + CL_ASSERT( p_elem->send_context1 == NULL ); + CL_ASSERT( p_elem->send_context2 == NULL ); + + p_new_madw = osm_mad_pool_get_wrapper( p_bind->p_osm_pool, + p_bind, p_elem->size, p_new_mad, &mad_addr ); + CL_ASSERT(p_new_madw); + p_new_vw = osm_madw_get_vend_ptr( p_new_madw ); + + __osm_set_vend_wrap(p_bind,p_elem,p_new_vw); + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "__osm_al_rcv_callback: " + "Calling receive callback function %p.\n", + p_bind->rcv_callback ); + + + p_bind->rcv_callback( p_new_madw, p_bind->client_context, + ((osm_madw_t*)p_elem->send_context1) ); + } + + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_vendor_init( + IN osm_vendor_t* const p_vend, + IN osm_log_t* const p_log, + IN const uint32_t timeout ) +{ + ib_api_status_t status; + OSM_LOG_ENTER( p_log, osm_vendor_init ); + + p_vend->p_log = p_log; + + /* + Open our instance of AL. + */ + status = ib_open_al( &p_vend->h_al ); + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_init: ERR 3B03: " + "Error opening AL (%s).\n", + ib_get_err_str( status ) ); + + goto Exit; + } + + p_vend->timeout = timeout; + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +osm_vendor_t* +osm_vendor_new( + IN osm_log_t* const p_log, + IN const uint32_t timeout ) +{ + ib_api_status_t status; + osm_vendor_t *p_vend; + + OSM_LOG_ENTER( p_log, osm_vendor_new ); + + p_vend = cl_zalloc( sizeof(*p_vend) ); + if( p_vend == NULL ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_new: ERR 3B04: " + "Unable to allocate vendor object.\n" ); + goto Exit; + } + + status = osm_vendor_init( p_vend, p_log, timeout ); + if( status != IB_SUCCESS ) + { + cl_free( p_vend ); + p_vend = NULL; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( p_vend ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vendor_delete( + IN osm_vendor_t** const pp_vend ) +{ + /* TO DO - fill this in */ + ib_close_al( (*pp_vend)->h_al ); + cl_free( *pp_vend ); + *pp_vend = NULL; +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_ca_info_init( + IN osm_vendor_t* const p_vend, + IN osm_ca_info_t* const p_ca_info, + IN const ib_net64_t ca_guid ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_vend->p_log, __osm_ca_info_init ); + + p_ca_info->guid = ca_guid; + + if( osm_log_is_active( p_vend->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_vend->p_log, OSM_LOG_VERBOSE, + "__osm_ca_info_init: " + "Querying CA 0x%" PRIx64 ".\n", + cl_ntoh64( ca_guid ) ); + } +/* attr size by verbs definition is required to be (uint32_t *) under opensm is only being set as 1 */ + status = ib_query_ca_by_guid( p_vend->h_al, ca_guid, NULL, + (uint32_t*)&p_ca_info->attr_size ); + if( (status != IB_INSUFFICIENT_MEMORY ) && (status != IB_SUCCESS ) ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_ca_info_init: ERR 3B05: " + "Unexpected status getting CA attributes (%s).\n", + ib_get_err_str( status ) ); + goto Exit; + } + + CL_ASSERT( p_ca_info->attr_size ); + + p_ca_info->p_attr = cl_malloc( p_ca_info->attr_size ); + if( p_ca_info->p_attr == NULL ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_ca_info_init: ERR 3B06: " + "Unable to allocate attribute storage.\n" ); + goto Exit; + } + + status = ib_query_ca_by_guid( p_vend->h_al, ca_guid, p_ca_info->p_attr, + (uint32_t*)&p_ca_info->attr_size ); + + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_ca_info_init: ERR 3B07: " + "Unexpected status getting CA attributes (%s).\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); + return( status ); +} + + +/********************************************************************** + **********************************************************************/ +void +osm_ca_info_destroy( + IN osm_vendor_t* const p_vend, + IN osm_ca_info_t* const p_ca_info ) +{ + OSM_LOG_ENTER( p_vend->p_log, osm_ca_info_destroy ); + + if( p_ca_info->p_attr ) + cl_free( p_ca_info->p_attr ); + + cl_free( p_ca_info ); + + OSM_LOG_EXIT( p_vend->p_log ); +} + +/********************************************************************** + **********************************************************************/ +osm_ca_info_t* +osm_ca_info_new( + IN osm_vendor_t* const p_vend, + IN const ib_net64_t ca_guid ) +{ + ib_api_status_t status; + osm_ca_info_t *p_ca_info; + + OSM_LOG_ENTER( p_vend->p_log, osm_ca_info_new ); + + CL_ASSERT( ca_guid ); + + p_ca_info = cl_zalloc( sizeof(*p_ca_info) ); + if( p_ca_info == NULL ) + goto Exit; + + status = __osm_ca_info_init( p_vend, p_ca_info, ca_guid ); + if( status != IB_SUCCESS ) + { + osm_ca_info_destroy( p_vend, p_ca_info ); + p_ca_info = NULL; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); + return( p_ca_info ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_vendor_get_ca_guids( + IN osm_vendor_t* const p_vend, + IN ib_net64_t** const p_guids, + IN size_t* const p_num_guids ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_vend->p_log, __osm_vendor_get_ca_guids ); + + CL_ASSERT( p_guids ); + CL_ASSERT( p_num_guids ); + + status = ib_get_ca_guids( p_vend->h_al, NULL, p_num_guids ); + if( (status != IB_INSUFFICIENT_MEMORY ) && (status != IB_SUCCESS ) ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_vendor_get_ca_guids: ERR 3B08: " + "Unexpected status getting CA GUID array (%s).\n", + ib_get_err_str( status ) ); + goto Exit; + } + + if( *p_num_guids == 0 ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_vendor_get_ca_guids: ERR 3B09: " + "No available channel adapters.\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + *p_guids = cl_malloc( *p_num_guids * sizeof(**p_guids) ); + if( *p_guids == NULL ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_vendor_get_ca_guids: ERR 3B10: " + "Unable to allocate CA GUID array.\n" ); + goto Exit; + } + + status = ib_get_ca_guids( p_vend->h_al, *p_guids, p_num_guids ); + CL_ASSERT( *p_num_guids ); + + if( osm_log_is_active( p_vend->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_vend->p_log, OSM_LOG_VERBOSE, + "__osm_vendor_get_ca_guids: " + "Detected %u local channel adapters.\n", *p_num_guids ); + } + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); + return( status ); +} + +/****f* OpenSM: CA Info/osm_ca_info_get_pi_ptr + * NAME + * osm_ca_info_get_pi_ptr + * + * DESCRIPTION + * Returns a pointer to the port attribute of the specified port + * owned by this CA. + * + * SYNOPSIS + */ +static ib_port_attr_t* +__osm_ca_info_get_port_attr_ptr( + IN const osm_ca_info_t* const p_ca_info, + IN const uint8_t index ) +{ + return( &p_ca_info->p_attr->p_port_attr[index] ); +} +/* + * PARAMETERS + * p_ca_info + * [in] Pointer to a CA Info object. + * + * index + * [in] Port "index" for which to retrieve the port attribute. + * The index is the offset into the ca's internal array + * of port attributes. + * + * RETURN VALUE + * Returns a pointer to the port attribute of the specified port + * owned by this CA. + * Also allocate p_vend->p_ca_info if not allocated and init it . + * + * NOTES + * + * SEE ALSO + *********/ + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_vendor_get_all_port_attr( + IN osm_vendor_t* const p_vend, + IN ib_port_attr_t* const p_attr_array, + IN uint32_t* const p_num_ports ) +{ + ib_api_status_t status; + + uint32_t ca; + size_t ca_count; + uint32_t port_count = 0; + uint8_t port_num; + uint32_t total_ports = 0; + ib_net64_t *p_ca_guid = NULL; + osm_ca_info_t *p_ca_info; + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_get_all_port_attr ); + + CL_ASSERT( p_vend ); + + /* + 1) Determine the number of CA's + 2) If not allready allocated - allocate an array big enough to hold the + ca info object , the ca info will be overwrite to contain the new ca info + 3) Call again to retrieve the guids. + */ + status = __osm_vendor_get_ca_guids( p_vend, &p_ca_guid, &ca_count ); + + if (p_vend->p_ca_info == NULL) + { + p_vend->p_ca_info = cl_zalloc( ca_count * sizeof(*(p_vend->p_ca_info)) ); + + if( p_vend->p_ca_info == NULL ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_get_all_port_attr: ERR 3B11: " + "Unable to allocate CA information array.\n" ); + goto Exit; + } + } + + p_vend->ca_count = ca_count; + + /* + For each CA, retrieve the port info attributes + */ + for( ca = 0; ca < ca_count; ca++ ) + { + p_ca_info = &p_vend->p_ca_info[ca]; + + status = __osm_ca_info_init( + p_vend, + p_ca_info, + p_ca_guid[ca] ); + + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_get_all_port_attr: ERR 3B12: " + "Unable to initialize CA Info object (%s).\n", + ib_get_err_str( status ) ); + } + + total_ports += osm_ca_info_get_num_ports( p_ca_info ); + } + + /* + If the user supplied enough storage, return the port guids, + otherwise, return the appropriate error. + */ + if( *p_num_ports >= total_ports ) + { + for( ca = 0; ca < ca_count; ca++ ) + { + uint32_t num_ports; + + p_ca_info = &p_vend->p_ca_info[ca]; + + num_ports = osm_ca_info_get_num_ports( p_ca_info ); + + for( port_num = 0; port_num < num_ports; port_num++ ) + { + p_attr_array[port_count] = + *__osm_ca_info_get_port_attr_ptr( p_ca_info, port_num ); + /* convert lid to host order */ + p_attr_array[port_count].lid = cl_ntoh16(p_attr_array[port_count].lid); + port_count++; + } + } + } + else + { + status = IB_INSUFFICIENT_MEMORY; + } + + + *p_num_ports = total_ports; + + + Exit: + if( p_ca_guid ) + cl_free( p_ca_guid ); + + OSM_LOG_EXIT( p_vend->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_net64_t +osm_vendor_get_ca_guid( + IN osm_vendor_t* const p_vend, + IN const ib_net64_t port_guid ) +{ + uint8_t index; + uint8_t num_ports; + uint32_t num_guids = 0; + osm_ca_info_t *p_ca_info; + uint32_t ca; + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_get_ca_guid ); + + CL_ASSERT( port_guid ); + /* + First, locate the HCA that owns this port. + */ + if( p_vend->p_ca_info == NULL ) + { + /* + Initialize the osm_ca_info_t array which allows + us to match port GUID to CA. + */ + osm_vendor_get_all_port_attr( p_vend, NULL, &num_guids ); + } + + CL_ASSERT( p_vend->p_ca_info ); + CL_ASSERT( p_vend->ca_count ); + + for( ca = 0; ca < p_vend->ca_count; ca++ ) + { + p_ca_info = &p_vend->p_ca_info[ca]; + + num_ports = osm_ca_info_get_num_ports( p_ca_info ); + CL_ASSERT( num_ports ); + + for( index = 0; index < num_ports; index++ ) + { + if( port_guid == + osm_ca_info_get_port_guid( p_ca_info, index ) ) + { + OSM_LOG_EXIT( p_vend->p_log ); + return( osm_ca_info_get_ca_guid( p_ca_info ) ); + } + } + } + + /* + No local CA owns this guid! + */ + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_get_ca_guid: ERR 3B13: " + "Unable to determine CA guid.\n" ); + + OSM_LOG_EXIT( p_vend->p_log ); + return( 0 ); +} + +/********************************************************************** + **********************************************************************/ +uint8_t +osm_vendor_get_port_num( + IN osm_vendor_t* const p_vend, + IN const ib_net64_t port_guid ) +{ + uint8_t index; + uint8_t num_ports; + uint32_t num_guids = 0; + osm_ca_info_t *p_ca_info; + uint32_t ca; + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_get_port_num ); + + CL_ASSERT( port_guid ); + /* + First, locate the HCA that owns this port. + */ + if( p_vend->p_ca_info == NULL ) + { + /* + Initialize the osm_ca_info_t array which allows + us to match port GUID to CA. + */ + osm_vendor_get_all_port_attr( p_vend, NULL, &num_guids ); + } + + CL_ASSERT( p_vend->p_ca_info ); + CL_ASSERT( p_vend->ca_count ); + + for( ca = 0; ca < p_vend->ca_count; ca++ ) + { + p_ca_info = &p_vend->p_ca_info[ca]; + + num_ports = osm_ca_info_get_num_ports( p_ca_info ); + CL_ASSERT( num_ports ); + + for( index = 0; index < num_ports; index++ ) + { + if( port_guid == + osm_ca_info_get_port_guid( p_ca_info, index ) ) + { + OSM_LOG_EXIT( p_vend->p_log ); + return( osm_ca_info_get_port_num( p_ca_info, index ) ); + } + } + } + + /* + No local CA owns this guid! + */ + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_get_port_num: ERR 3B30: " + "Unable to determine CA guid.\n" ); + + OSM_LOG_EXIT( p_vend->p_log ); + return( 0 ); +} + + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_vendor_open_ca( + IN osm_vendor_t* const p_vend, + IN const ib_net64_t port_guid ) +{ + ib_net64_t ca_guid; + ib_api_status_t status; + + OSM_LOG_ENTER( p_vend->p_log, __osm_vendor_open_ca ); + + ca_guid = osm_vendor_get_ca_guid( p_vend, port_guid ); + if( ca_guid == 0 ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_vendor_open_ca: ERR 3B31: " + "Bad port GUID value 0x%" PRIx64 ".\n", + cl_ntoh64( port_guid ) ); + status = IB_ERROR; + goto Exit; + } + + osm_log( p_vend->p_log, OSM_LOG_VERBOSE, + "__osm_vendor_open_ca: " + "Opening HCA 0x%" PRIx64 ".\n", cl_ntoh64( ca_guid ) ); + + status = ib_open_ca( p_vend->h_al, + ca_guid, + __osm_al_ca_err_callback, + p_vend, + &p_vend->h_ca ); + + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_vendor_open_ca: ERR 3B15: " + "Unable to open CA (%s).\n", + ib_get_err_str( status ) ); + goto Exit; + } + + CL_ASSERT( p_vend->h_ca ); + + status = ib_alloc_pd( + p_vend->h_ca, + IB_PDT_ALIAS, + p_vend, + &p_vend->h_pd ); + + if( status != IB_SUCCESS ) + { + ib_close_ca( p_vend->h_ca,__osm_al_ca_destroy_callback ); + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osm_vendor_open_ca: ERR 3B16: " + "Unable to allocate protection domain (%s).\n", + ib_get_err_str( status ) ); + goto Exit; + } + + CL_ASSERT( p_vend->h_pd ); + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_vendor_init_av( + IN const osm_al_bind_info_t* p_bind, + IN ib_av_attr_t* p_av ) +{ + cl_memclr( p_av, sizeof(*p_av) ); + p_av->port_num = p_bind->port_num; + p_av->dlid = IB_LID_PERMISSIVE; +} + +/********************************************************************** + **********************************************************************/ +osm_bind_handle_t +osm_vendor_bind( + IN osm_vendor_t* const p_vend, + IN osm_bind_info_t* const p_user_bind, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_vend_mad_recv_callback_t mad_recv_callback, + IN osm_vend_mad_send_err_callback_t send_err_callback, + IN void* context ) +{ + ib_net64_t port_guid; + osm_al_bind_info_t *p_bind = 0; + ib_api_status_t status; + ib_qp_create_t qp_create; + ib_mad_svc_t mad_svc; + ib_av_attr_t av; + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_bind ); + + CL_ASSERT( p_user_bind ); + CL_ASSERT( p_mad_pool ); + CL_ASSERT( mad_recv_callback ); + CL_ASSERT( send_err_callback ); + + port_guid = p_user_bind->port_guid; + osm_log( p_vend->p_log, OSM_LOG_INFO, + "osm_vendor_bind: " + "Binding to port 0x%" PRIx64 ".\n", + cl_ntoh64( port_guid ) ); + + if( p_vend->h_ca == 0 ) + { + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "osm_vendor_bind: " + "Opening CA that owns port 0x%" PRIx64 ".\n", cl_ntoh64( port_guid )); + + status = __osm_vendor_open_ca( p_vend, port_guid ); + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_bind: ERR 3B17: " + "Unable to Open CA (%s).\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + + p_bind = cl_zalloc( sizeof(*p_bind) ); + if( p_bind == NULL ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_bind: ERR 3B18: " + "Unable to allocate internal bind object.\n" ); + goto Exit; + } + + p_bind->p_vend = p_vend; + p_bind->client_context = context; + p_bind->port_num = osm_vendor_get_port_num( p_vend, port_guid ); + p_bind->rcv_callback = mad_recv_callback; + p_bind->send_err_callback = send_err_callback; + p_bind->p_osm_pool = p_mad_pool; + + CL_ASSERT( p_bind->port_num ); + + /* + Get the proper QP. + */ + cl_memclr( &qp_create, sizeof(qp_create) ); + + switch( p_user_bind->mad_class ) + { + case IB_MCLASS_SUBN_LID: + case IB_MCLASS_SUBN_DIR: + qp_create.qp_type = IB_QPT_QP0_ALIAS; + break; + + case IB_MCLASS_SUBN_ADM: + default: + qp_create.qp_type = IB_QPT_QP1_ALIAS; + break; + } + + qp_create.sq_depth = p_user_bind->send_q_size; + qp_create.rq_depth = p_user_bind->recv_q_size; + qp_create.sq_sge = OSM_AL_SQ_SGE; + qp_create.rq_sge = OSM_AL_RQ_SGE; + status = ib_get_spl_qp( + p_vend->h_pd, + port_guid, + &qp_create, + p_bind, + __osm_al_err_callback, + &p_bind->pool_key, + &p_bind->h_qp ); + + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_bind: ERR 3B19: " + "Unable to get QP handle (%s).\n", + ib_get_err_str( status ) ); + cl_free( p_bind ); + p_bind = 0; + goto Exit; + } + + CL_ASSERT( p_bind->h_qp ); + CL_ASSERT( p_bind->pool_key ); + + cl_memclr( &mad_svc, sizeof(mad_svc) ); + + mad_svc.mad_svc_context = p_bind; + mad_svc.pfn_mad_send_cb = __osm_al_send_callback; + mad_svc.pfn_mad_recv_cb = __osm_al_rcv_callback; + mad_svc.mgmt_class = p_user_bind->mad_class; + mad_svc.mgmt_version = p_user_bind->class_version; + mad_svc.support_unsol = p_user_bind->is_responder; + mad_svc.method_array[IB_MAD_METHOD_GET] = TRUE; + mad_svc.method_array[IB_MAD_METHOD_SET] = TRUE; + mad_svc.method_array[IB_MAD_METHOD_DELETE] = TRUE; + mad_svc.method_array[IB_MAD_METHOD_TRAP] = TRUE; + mad_svc.method_array[IB_MAD_METHOD_GETTABLE] = TRUE; + + + status = ib_reg_mad_svc( + p_bind->h_qp, + &mad_svc, + &p_bind->h_svc ); + + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_bind: ERR 3B21: " + "Unable to register QP0 MAD service (%s).\n", + ib_get_err_str( status ) ); + cl_free( p_bind ); + p_bind = 0; + goto Exit; + } + + __osm_vendor_init_av( p_bind, &av ); + + status = ib_create_av( p_vend->h_pd, &av, &(p_bind->h_dr_av) ); + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_bind: ERR 3B22: " + "Unable to create address vector (%s).\n", + ib_get_err_str( status ) ); + + cl_free( p_bind ); + p_bind = 0; + goto Exit; + } + + if( osm_log_is_active( p_vend->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "osm_vendor_bind: " + "Allocating av handle %p.\n", p_bind->h_dr_av ); + } + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); + return( (osm_bind_handle_t)p_bind ); +} + +/********************************************************************** + **********************************************************************/ +/* osm_vendor_unbind is added due to OSM-1.8.0 gen2 merging + The functionality will be added when the Gen2 osm_vendor_unbind + will be implemented. +*/ +void +osm_vendor_unbind( + IN osm_bind_handle_t h_bind) +{ + osm_al_bind_info_t *p_bind = ( osm_al_bind_info_t * ) h_bind; + osm_vendor_t *p_vend = p_bind->p_vend; + + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_unbind ); + + OSM_LOG_EXIT( p_vend->p_log); +} + +/********************************************************************** + **********************************************************************/ +ib_mad_t* +osm_vendor_get( + IN osm_bind_handle_t h_bind, + IN const uint32_t mad_size, + IN osm_vend_wrap_t* const p_vw ) +{ + ib_mad_t *p_mad; + osm_al_bind_info_t *p_bind = (osm_al_bind_info_t *)h_bind; + osm_vendor_t *p_vend = p_bind->p_vend; + ib_api_status_t status; + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_get ); + + CL_ASSERT( p_vw ); + + p_vw->size = mad_size; + p_vw->h_bind = h_bind; + + /* + Retrieve a MAD element from the pool and give the user direct + access to its buffer. + */ + status = ib_get_mad( p_bind->pool_key, mad_size, &p_vw->p_elem ); + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_get: ERR 3B25: " + "Unable to acquire MAD (%s).\n", + ib_get_err_str( status ) ); + + p_mad = NULL; + goto Exit; + } + + CL_ASSERT( p_vw->p_elem ); + p_mad = ib_get_mad_buf( p_vw->p_elem ); + + if( osm_log_get_level( p_vend->p_log ) >= OSM_LOG_DEBUG ) + { + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "osm_vendor_get: " + "Acquired MAD %p, size = %u.\n", p_mad, mad_size ); + } + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); + return( p_mad ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vendor_put( + IN osm_bind_handle_t h_bind, + IN osm_vend_wrap_t* const p_vw ) +{ + osm_al_bind_info_t *p_bind = (osm_al_bind_info_t *)h_bind; + osm_vendor_t *p_vend = p_bind->p_vend; + ib_api_status_t status; + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_put ); + + CL_ASSERT( p_vw ); + CL_ASSERT( p_vw->p_elem ); + CL_ASSERT( p_vw->h_bind == h_bind ); + + if( osm_log_get_level( p_vend->p_log ) >= OSM_LOG_DEBUG ) + { + + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "osm_vendor_put: " + "Retiring MAD %p.\n", ib_get_mad_buf( p_vw->p_elem ) ); + // "Retiring MAD %p.\n", p_mad); + } + + status = ib_put_mad( p_vw->p_elem ); + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_put: ERR 3B26: " + "Unable to retire MAD (%s).\n", + ib_get_err_str( status ) ); + } + + OSM_LOG_EXIT( p_vend->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_vendor_send( + IN osm_bind_handle_t h_bind, + IN osm_madw_t* const p_madw, + IN boolean_t const resp_expected ) +{ + osm_al_bind_info_t* const p_bind = h_bind; + osm_vendor_t* const p_vend = p_bind->p_vend; + osm_vend_wrap_t* const p_vw = osm_madw_get_vend_ptr( p_madw ); + osm_mad_addr_t* const p_mad_addr = osm_madw_get_mad_addr_ptr( p_madw ); + ib_mad_t* const p_mad = osm_madw_get_mad_ptr( p_madw ); + ib_api_status_t status; + ib_mad_element_t *p_elem; + ib_av_attr_t av; + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_send ); + + CL_ASSERT( p_vw->h_bind == h_bind ); + CL_ASSERT( p_vw->p_elem ); + + p_elem = p_vw->p_elem; + + /* + If a response is expected to this MAD, then preallocate + a mad wrapper to contain the wire MAD received in the + response. Allocating a wrapper here allows for easier + failure paths than after we already received the wire mad. + In order to seperate the receive callback and the send callback + dependency , we copy the request madw and send it as context2 + Which in time in the receive callback will replace the req_madw + to allow avoid races with send callback + */ + if( resp_expected ) + { + p_vw->p_resp_madw = osm_mad_pool_get_wrapper_raw( + p_bind->p_osm_pool ); + if( p_vw->p_resp_madw == NULL ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_send: ERR 3B27: " + "Unable to allocate MAD wrapper.\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + } + else + p_vw->p_resp_madw = NULL; + + /* + For all sends other than directed route SM MADs, + acquire an address vector for the destination. + */ + if( p_mad->mgmt_class != IB_MCLASS_SUBN_DIR ) + { + cl_memclr( &av, sizeof(av) ); + av.port_num = p_bind->port_num; + av.dlid = p_mad_addr->dest_lid; + av.static_rate = p_mad_addr->static_rate; + av.path_bits = p_mad_addr->path_bits; + + if( (p_mad->mgmt_class != IB_MCLASS_SUBN_LID) && + (p_mad->mgmt_class != IB_MCLASS_SUBN_DIR) ) + { + av.sl = p_mad_addr->addr_type.gsi.service_level; + + + if(p_mad_addr->addr_type.gsi.global_route) + { + av.grh_valid = TRUE; + /* ANIL */ + /* av.grh = p_mad_addr->addr_type.gsi.grh_info; */ + } + } + + if( osm_log_is_active( p_vend->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "osm_vendor_send: " + "av.port_num 0x%X, " + "av.dlid 0x%X, " + "av.static_rate %d, " + "av.path_bits %d.\n", + av.port_num, cl_ntoh16(av.dlid), + av.static_rate, av.path_bits); + } + + status = ib_create_av( p_vend->h_pd, &av, &(p_vw->h_av) ); + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_send: ERR 3B28: " + "Unable to create address vector (%s).\n", + ib_get_err_str( status ) ); + + if( p_vw->p_resp_madw ) + osm_mad_pool_put( p_bind->p_osm_pool, p_vw->p_resp_madw ); + /* Since we in immediate error the vendor layer is expected to handle the rollback , i.e free of madw */ + if (p_madw) + osm_mad_pool_put( p_bind->p_osm_pool, p_madw ); + goto Exit; + } + + if( osm_log_is_active( p_vend->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "osm_vendor_send: " + "Allocating av handle %p.\n", p_vw->h_av ); + } + } + else + { + p_vw->h_av = p_bind->h_dr_av; + } + + p_elem->h_av = p_vw->h_av; + + p_elem->context1 = p_madw; + p_elem->context2 = NULL; + + p_elem->immediate_data = 0; + p_elem->p_grh = NULL; + p_elem->resp_expected = resp_expected; + p_elem->retry_cnt = OSM_DEFAULT_RETRY_COUNT; + + p_elem->send_opt = IB_SEND_OPT_SIGNALED; + p_elem->timeout_ms = p_vend->timeout; + + /* Completion information. */ + p_elem->status = 0; /* Not trusting AL */ + + + if( (p_mad->mgmt_class == IB_MCLASS_SUBN_LID) || + (p_mad->mgmt_class == IB_MCLASS_SUBN_DIR) ) + { + p_elem->remote_qp = 0; + p_elem->remote_qkey = 0; + } + else + { + p_elem->remote_qp = p_mad_addr->addr_type.gsi.remote_qp; + p_elem->remote_qkey = p_mad_addr->addr_type.gsi.remote_qkey; + osm_log(p_vend->p_log, OSM_LOG_DEBUG, + "osm_vendor_send: " + "remote qp = 0x%X, remote qkey = 0x%X.\n", + cl_ntoh32(p_elem->remote_qp), + cl_ntoh32(p_elem->remote_qkey) ); + } + + status = ib_send_mad( p_bind->h_svc, p_elem, NULL ); + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_send: ERR 3B29: " + "Send failed , cleaning up (%s).\n", + ib_get_err_str( status ) ); + /* When we destroy the av - we should take the pointer from local allocation since + we do not "trust" IBAL to keep track in p_elem */ + + if (p_vw->h_av && (p_vw->h_av != p_bind->h_dr_av) ) + { + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "__osm_al_send: " + "Destroying av handle %p.\n", p_vw->h_av ); + ib_destroy_av( p_vw->h_av ); + } + if( p_vw->p_resp_madw ) + osm_mad_pool_put( p_bind->p_osm_pool, p_vw->p_resp_madw ); + /* Since we in immediate error the vendor layer is expected to handle the rollback , i.e free of madw */ + if (p_madw) + osm_mad_pool_put( p_bind->p_osm_pool, p_madw ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_vendor_local_lid_change( + IN osm_bind_handle_t h_bind ) +{ + osm_al_bind_info_t *p_bind = (osm_al_bind_info_t *)h_bind; + osm_vendor_t *p_vend = p_bind->p_vend; + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_local_lid_change ); + + OSM_LOG_EXIT( p_vend->p_log ); + return( IB_SUCCESS ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vendor_set_sm( + IN osm_bind_handle_t h_bind, + IN boolean_t is_sm_val ) +{ + osm_al_bind_info_t *p_bind = (osm_al_bind_info_t *)h_bind; + osm_vendor_t *p_vend = p_bind->p_vend; + ib_api_status_t status; + ib_port_attr_mod_t attr_mod; + + OSM_LOG_ENTER( p_vend->p_log, osm_vendor_set_sm ); + + cl_memclr( &attr_mod, sizeof(attr_mod) ); + + attr_mod.cap.sm = is_sm_val; + + status = ib_modify_ca( p_vend->h_ca, p_bind->port_num, + IB_CA_MOD_IS_SM, &attr_mod ); + + if( status != IB_SUCCESS ) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "osm_vendor_set_sm: ERR 3B34: " + "Unable set 'IS_SM' bit to:%u in port attributes (%s).\n", + is_sm_val, ib_get_err_str( status ) ); + } + + OSM_LOG_EXIT( p_vend->p_log ); +} + +#endif /* OSM_VENDOR_INTF_AL */ diff --git a/branches/Ndi/ulp/opensm/user/libvendor/osm_vendor_mlx_sa.c b/branches/Ndi/ulp/opensm/user/libvendor/osm_vendor_mlx_sa.c new file mode 100644 index 00000000..ed123c67 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libvendor/osm_vendor_mlx_sa.c @@ -0,0 +1,879 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include + + +/***************************************************************************** + *****************************************************************************/ + +/* this struct is the internal rep of the bind handle */ +typedef struct _osmv_sa_bind_info { + osm_bind_handle_t h_bind; + osm_log_t *p_log; + osm_vendor_t *p_vendor; + osm_mad_pool_t *p_mad_pool; + uint64_t port_guid; + cl_event_t sync_event; + uint64_t last_lids_update_sec; + uint16_t lid; + uint16_t sm_lid; +} osmv_sa_bind_info_t; + +/***************************************************************************** + *****************************************************************************/ + +/* + Call back on new mad received: + + We basically only need to set the context of the query. + Or report an error. + + A pointer to the actual context of the request (a copy of the oriignal + request structure) is attached as the p_madw->context.ni_context.node_guid +*/ +void +__osmv_sa_mad_rcv_cb( + IN osm_madw_t *p_madw, + IN void* bind_context, + IN osm_madw_t *p_req_madw) +{ + osmv_sa_bind_info_t *p_bind = (osmv_sa_bind_info_t *)bind_context; + osmv_query_req_t *p_query_req_copy = NULL; + osmv_query_res_t query_res; + ib_sa_mad_t *p_sa_mad; + ib_net16_t mad_status; + + OSM_LOG_ENTER( p_bind->p_log, __osmv_sa_mad_rcv_cb ); + + if (! p_req_madw) + { + osm_log( p_bind->p_log, OSM_LOG_DEBUG, + "__osmv_sa_mad_rcv_cb: " + "Ignoring a non-response mad\n"); + osm_mad_pool_put(p_bind->p_mad_pool, p_madw); + goto Exit; + } + + /* obtain the sent context */ + p_query_req_copy = + (osmv_query_req_t *)(p_req_madw->context.arb_context.context1); + + /* provide the context of the original request in the result */ + query_res.query_context = p_query_req_copy->query_context; + + /* provide the resulting madw */ + query_res.p_result_madw = p_madw; + + /* update the req fields */ + p_sa_mad = ( ib_sa_mad_t * ) p_madw->p_mad; + + /* if we got a remote error track it in the status */ + mad_status = ( ib_net16_t ) ( p_sa_mad->status & IB_SMP_STATUS_MASK ); + if (mad_status != IB_SUCCESS) + { + osm_log( p_bind->p_log, OSM_LOG_ERROR, + "__osmv_sa_mad_rcv_cb: ERR 0501: " + "Remote error:0x%04X .\n", mad_status + ); + query_res.status = IB_REMOTE_ERROR; + } + else + { + query_res.status = IB_SUCCESS; + } + + /* what if we have got back an empty mad ? */ + if (! p_madw->mad_size) + { + osm_log( p_bind->p_log, OSM_LOG_ERROR, + "__osmv_sa_mad_rcv_cb: ERR 0502: " + "Got an empty mad.\n" + ); + query_res.status = IB_ERROR; + } + + if (IB_SUCCESS == mad_status) + { + + /* if we are in not in a method response of an rmpp nature we must get only 1 */ + /* HACK: in the future we might need to be smarter for other methods... */ + if (p_sa_mad->method != IB_MAD_METHOD_GETTABLE_RESP) + { + query_res.result_cnt = 1; + } + else + { +#ifndef VENDOR_RMPP_SUPPORT + if (mad_status != IB_SUCCESS) + query_res.result_cnt = 0; + else + query_res.result_cnt = 1; +#else + /* we used the offset value to calculate the number of + records in here */ + query_res.result_cnt = + (uintn_t) + ( ( p_madw->mad_size - IB_SA_MAD_HDR_SIZE ) / + ib_get_attr_size( p_sa_mad->attr_offset ) ); + osm_log( p_bind->p_log, OSM_LOG_DEBUG, + "__osmv_sa_mad_rcv_cb: Count = %u = %u / %u (%u)\n", + query_res.result_cnt, p_madw->mad_size - IB_SA_MAD_HDR_SIZE, + ib_get_attr_size( p_sa_mad->attr_offset ), + ( p_madw->mad_size - IB_SA_MAD_HDR_SIZE ) % + ib_get_attr_size( p_sa_mad->attr_offset ) + ); +#endif + } + } + + query_res.query_type = p_query_req_copy->query_type; + + p_query_req_copy->pfn_query_cb( &query_res ); + + if ((p_query_req_copy->flags & OSM_SA_FLAGS_SYNC) == OSM_SA_FLAGS_SYNC) + cl_event_signal( &p_bind->sync_event ); + + Exit: + + /* free the copied query request if found */ + if (p_query_req_copy) free(p_query_req_copy); + + /* put back the request madw */ + if (p_req_madw) + osm_mad_pool_put(p_bind->p_mad_pool, p_req_madw); + + OSM_LOG_EXIT( p_bind->p_log ); +} + +/***************************************************************************** + ****************************************************************************/ +/* + Send Error Callback: + + Only report the error and get rid of the mad wrapper +*/ +void +__osmv_sa_mad_err_cb( + IN void* bind_context, + IN osm_madw_t *p_madw) +{ + osmv_sa_bind_info_t *p_bind = (osmv_sa_bind_info_t *)bind_context; + osmv_query_req_t *p_query_req_copy = NULL; + osmv_query_res_t query_res; + + OSM_LOG_ENTER( p_bind->p_log, __osmv_sa_mad_err_cb ); + + /* Obtain the sent context etc */ + p_query_req_copy = + (osmv_query_req_t *)(p_madw->context.arb_context.context1); + + /* provide the context of the original request in the result */ + query_res.query_context = p_query_req_copy->query_context; + + query_res.p_result_madw = p_madw; + + query_res.status = IB_TIMEOUT; + query_res.result_cnt = 0; + query_res.p_result_madw->status = IB_TIMEOUT; + p_madw->status = IB_TIMEOUT; + query_res.query_type = p_query_req_copy->query_type; + + p_query_req_copy->pfn_query_cb( &query_res ); + + if ((p_query_req_copy->flags & OSM_SA_FLAGS_SYNC) == OSM_SA_FLAGS_SYNC) + cl_event_signal( &p_bind->sync_event ); + + if (p_query_req_copy) free(p_query_req_copy); + OSM_LOG_EXIT( p_bind->p_log ); +} + +/***************************************************************************** + This routine needs to be invoked on every send - since the SM LID and Local + lid might change. To do that without any major perfoermance impact we cache + the results and time they were obtained. Refresh only twice a minute. + To avoid the need to use statics and risk a race - we require the refresh time + to be stored in the context of the results. Also this coveres cases were + we query for multiple guids. + *****************************************************************************/ +ib_api_status_t +__osmv_get_lid_and_sm_lid_by_port_guid( + IN osm_vendor_t* const p_vend, + IN ib_net64_t port_guid, + IN OUT uint64_t* p_lids_update_time_sec, + OUT uint16_t* lid, + OUT uint16_t* sm_lid) +{ + + ib_api_status_t status; + ib_port_attr_t *p_attr_array; + uint32_t num_ports; + uint32_t port_num; + + OSM_LOG_ENTER( p_vend->p_log, __osmv_get_lid_and_sm_lid_by_port_guid ); + + /* use prevous values if current time is close enough to previous query */ + if (cl_get_time_stamp_sec() <= *p_lids_update_time_sec + 30) + { + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "__osmv_get_lid_and_sm_lid_by_port_guid: " + "Using previously stored lid:0x%04x sm_lid:0x%04x\n", + *lid, *sm_lid + ); + status = IB_SUCCESS; + goto Exit; + } + + /* obtain the number of available ports */ + num_ports = 0; + status = osm_vendor_get_all_port_attr(p_vend, NULL, &num_ports); + if (status != IB_INSUFFICIENT_MEMORY) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osmv_get_lid_and_sm_lid_by_port_guid: ERR 0503: " + "expected to get the IB_INSUFFICIENT_MEMORY but got: %s\n", + ib_get_err_str(status) + ); + status = IB_ERROR; + goto Exit; + } + + osm_log( p_vend->p_log, OSM_LOG_DEBUG, + "__osmv_get_lid_and_sm_lid_by_port_guid: " + "Found total of %u ports. Looking for guid:0x%016" PRIx64 "\n", + num_ports, cl_ntoh64(port_guid) + ); + + /* allocate the attributes */ + p_attr_array = + (ib_port_attr_t *)malloc(sizeof(ib_port_attr_t)*num_ports); + + /* obtain the attributes */ + status = osm_vendor_get_all_port_attr(p_vend, p_attr_array, &num_ports); + if (status != IB_SUCCESS) + { + osm_log( p_vend->p_log, OSM_LOG_ERROR, + "__osmv_get_lid_and_sm_lid_by_port_guid: ERR 0504: " + "Fail to get port attributes (error: %s)\n", + ib_get_err_str(status) + ); + free(p_attr_array); + goto Exit; + } + + status = IB_ERROR; + /* find the port requested in the list */ + for (port_num = 0; (port_nump_log, OSM_LOG_DEBUG, + "__osmv_get_lid_and_sm_lid_by_port_guid: " + "Found guid:0x%016" PRIx64 " with idx:%d\n", + cl_ntoh64(port_guid), port_num); + } + } + + free(p_attr_array); + + Exit: + OSM_LOG_EXIT( p_vend->p_log ); + return ( status ); +} + +/***************************************************************************** + *****************************************************************************/ +osm_bind_handle_t +osmv_bind_sa( + IN osm_vendor_t* const p_vend, + IN osm_mad_pool_t* const p_mad_pool, + IN ib_net64_t port_guid + ) +{ + osm_bind_info_t bind_info; + osm_log_t *p_log = p_vend->p_log; + ib_api_status_t status = IB_SUCCESS; + osmv_sa_bind_info_t *p_sa_bind_info; + cl_status_t cl_status; + + OSM_LOG_ENTER( p_log, osmv_bind_sa ); + + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_bind_sa: " + "Binding to port 0x%" PRIx64 ".\n", + cl_ntoh64( port_guid ) ); + + bind_info.port_guid = port_guid; + bind_info.mad_class = IB_MCLASS_SUBN_ADM; + bind_info.class_version = 2; + bind_info.is_responder = TRUE; + bind_info.is_trap_processor = FALSE; + bind_info.is_report_processor = TRUE; + bind_info.send_q_size = 256; + bind_info.recv_q_size = 256; + + /* allocate the new sa bind info */ + p_sa_bind_info = + (osmv_sa_bind_info_t *)malloc(sizeof(osmv_sa_bind_info_t)); + if (! p_sa_bind_info) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmv_bind_sa: ERR 0505: " + "Fail to allocate new bidn structure\n" ); + p_sa_bind_info = OSM_BIND_INVALID_HANDLE; + goto Exit; + } + + /* store some important context */ + p_sa_bind_info->p_log = p_log; + p_sa_bind_info->port_guid = port_guid; + p_sa_bind_info->p_mad_pool = p_mad_pool; + p_sa_bind_info->p_vendor = p_vend; + p_sa_bind_info->last_lids_update_sec = 0; + + /* Bind to the lower level */ + p_sa_bind_info->h_bind = + osm_vendor_bind( p_vend, + &bind_info, + p_mad_pool, + __osmv_sa_mad_rcv_cb, + __osmv_sa_mad_err_cb, + p_sa_bind_info); /* context provided to CBs */ + + if (p_sa_bind_info->h_bind == OSM_BIND_INVALID_HANDLE) + { + free(p_sa_bind_info); + p_sa_bind_info = OSM_BIND_INVALID_HANDLE; + osm_log( p_log, OSM_LOG_ERROR, + "osmv_bind_sa: ERR 0506: " + "Fail to bind to vendor SMI.\n" ); + goto Exit; + } + + /* obtain the sm_lid from the vendor */ + status = + __osmv_get_lid_and_sm_lid_by_port_guid( + p_vend, port_guid, + &p_sa_bind_info->last_lids_update_sec, + &p_sa_bind_info->lid, + &p_sa_bind_info->sm_lid); + if (status != IB_SUCCESS) + { + free(p_sa_bind_info); + p_sa_bind_info = OSM_BIND_INVALID_HANDLE; + osm_log( p_log, OSM_LOG_ERROR, + "osmv_bind_sa: ERR 0507: " + "Fail to obtain the sm lid.\n" ); + goto Exit; + } + + /* initialize the sync_event */ + cl_event_construct( &p_sa_bind_info->sync_event ); + cl_status = cl_event_init( &p_sa_bind_info->sync_event, TRUE ); + if( cl_status != CL_SUCCESS ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmv_bind_sa: ERR 0508: " + "cl_init_event failed: %s\n", + ib_get_err_str(cl_status) + ); + free(p_sa_bind_info); + p_sa_bind_info = OSM_BIND_INVALID_HANDLE; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return ( p_sa_bind_info ); +} + +/***************************************************************************** + *****************************************************************************/ + +/****t* OSM Vendor SA Client/osmv_sa_mad_data + * NAME + * osmv_sa_mad_data + * + * DESCRIPTION + * Extra fields required to perform a mad query + * This struct is passed to the actual send method + * + * SYNOPSIS + */ +typedef struct _osmv_sa_mad_data +{ + /* MAD data. */ + uint8_t method; + ib_net16_t attr_id; + ib_net16_t attr_offset; + ib_net32_t attr_mod; + ib_net64_t comp_mask; + void *p_attr; +} osmv_sa_mad_data_t; +/* + * method + * The method of the mad to be sent + * + * attr_id + * Attribute ID + * + * attr_offset + * Offset as defined by RMPP + * + * attr_mod + * Attribute modifier + * + * comp_mask + * The component mask of the query + * + * p_attr + * A pointer to the record of the attribute to be sent. + * + *****/ + +/***************************************************************************** + *****************************************************************************/ +/* Send a MAD out on the GSI interface */ +ib_api_status_t +__osmv_send_sa_req( + IN osmv_sa_bind_info_t* p_bind, + IN const osmv_sa_mad_data_t * const p_sa_mad_data, + IN const osmv_query_req_t * const p_query_req ) +{ + ib_api_status_t status; + ib_mad_t *p_mad_hdr; + ib_sa_mad_t *p_sa_mad; + osm_madw_t *p_madw; + osm_log_t *p_log = p_bind->p_log; + static atomic32_t trans_id; + boolean_t sync; + osmv_query_req_t *p_query_req_copy; + + OSM_LOG_ENTER( p_log, __osmv_send_sa_req ); + + /* + since the sm_lid might change we obtain it every send + (actually it is cached in the bind object and refreshed + every 30sec by this proc ) + */ + status = + __osmv_get_lid_and_sm_lid_by_port_guid( + p_bind->p_vendor, p_bind->port_guid, + &p_bind->last_lids_update_sec, + &p_bind->lid, + &p_bind->sm_lid); + if (status != IB_SUCCESS) + { + osm_log( p_log, OSM_LOG_ERROR, + "__osmv_send_sa_req: ERR 0509: " + "Fail to obtain the sm lid.\n" ); + goto Exit; + } + + /* Get a MAD wrapper for the send */ + p_madw = osm_mad_pool_get( + p_bind->p_mad_pool, + p_bind->h_bind, + MAD_BLOCK_SIZE, + NULL ); + + if( p_madw == NULL ) + { + osm_log( p_log, OSM_LOG_ERROR, + "__osmv_send_sa_req: ERR 0510: " + "Unable to acquire MAD.\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + /* Initialize the Sent MAD: */ + + /* Initialize the MAD buffer for the send operation. */ + p_mad_hdr = osm_madw_get_mad_ptr( p_madw ); + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + + /* Get a new transaction Id */ + cl_atomic_inc( &trans_id ); + + /* Cleanup the MAD from any residue */ + memset(p_sa_mad, 0, MAD_BLOCK_SIZE); + + /* Initialize the standard MAD header. */ + ib_mad_init_new( + p_mad_hdr, /* mad pointer */ + IB_MCLASS_SUBN_ADM, /* class */ + ( uint8_t ) 2, /* version */ + p_sa_mad_data->method, /* method */ + cl_hton64( ( uint64_t ) trans_id ),/* tid */ + p_sa_mad_data->attr_id, /* attr id */ + p_sa_mad_data->attr_mod /* attr mod */ + ); + + /* Set the query information. */ + p_sa_mad->sm_key = p_query_req->sm_key; + p_sa_mad->attr_offset = 0; + p_sa_mad->comp_mask = p_sa_mad_data->comp_mask; + if( p_sa_mad->comp_mask ) + { + memcpy( p_sa_mad->data, p_sa_mad_data->p_attr, + ib_get_attr_size(p_sa_mad_data->attr_offset)); + } + + /* + Provide the address to send to + */ + /* Patch to handle IBAL - host order , where it should take destination lid in network order */ +#ifdef OSM_VENDOR_INTF_AL + p_madw->mad_addr.dest_lid = p_bind->sm_lid; +#else + p_madw->mad_addr.dest_lid = cl_hton16(p_bind->sm_lid); +#endif + p_madw->mad_addr.addr_type.smi.source_lid = + cl_hton16(p_bind->lid); + p_madw->mad_addr.addr_type.gsi.remote_qp = CL_HTON32(1); + p_madw->mad_addr.addr_type.gsi.remote_qkey = IB_QP1_WELL_KNOWN_Q_KEY; + p_madw->mad_addr.addr_type.gsi.pkey = IB_DEFAULT_PKEY; + p_madw->resp_expected = TRUE; + p_madw->fail_msg = CL_DISP_MSGID_NONE; + + /* + Provide MAD context such that the call back will know what to do. + We have to keep the entire request structure so we know the CB. + Since we can not rely on the client to keep it arroud until + the response - we duplicate it and will later dispose it (in CB). + To store on the MADW we cast it into what opensm has: + p_madw->context.arb_context.context1 + */ + p_query_req_copy = malloc(sizeof(*p_query_req_copy)); + *p_query_req_copy = *p_query_req; + p_madw->context.arb_context.context1 = p_query_req_copy; + + /* we can support async as well as sync calls */ + sync = ((p_query_req->flags & OSM_SA_FLAGS_SYNC) == OSM_SA_FLAGS_SYNC); + + /* send the mad asynchronously */ + status = osm_vendor_send( + osm_madw_get_bind_handle( p_madw ), + p_madw, + p_madw->resp_expected ); + + /* if synchronous - wait on the event */ + if (sync) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__osmv_send_sa_req: " + "Waiting for async event.\n" ); + cl_event_wait_on( &p_bind->sync_event, EVENT_NO_TIMEOUT, FALSE ); + cl_event_reset(&p_bind->sync_event); + status = p_madw->status; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return status; +} + +/***************************************************************************** + *****************************************************************************/ +/* + * Query the SA based on the user's request. + */ +ib_api_status_t +osmv_query_sa( + IN osm_bind_handle_t h_bind, + IN const osmv_query_req_t * const p_query_req + ) +{ + osmv_sa_bind_info_t *p_bind = (osmv_sa_bind_info_t *)h_bind; + osmv_sa_mad_data_t sa_mad_data; + osmv_user_query_t *p_user_query; + ib_service_record_t svc_rec; + ib_node_record_t node_rec; + ib_portinfo_record_t port_info; + ib_path_rec_t path_rec; + ib_class_port_info_t class_port_info; + osm_log_t *p_log = p_bind->p_log; + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osmv_query_sa ); + + /* Set the request information. */ + sa_mad_data.method = IB_MAD_METHOD_GETTABLE; + sa_mad_data.attr_mod = 0; + + /* Set the MAD attributes and component mask correctly. */ + switch ( p_query_req->query_type ) + { + + case OSMV_QUERY_USER_DEFINED: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s", "USER_DEFINED\n" ); + p_user_query = ( osmv_user_query_t * ) p_query_req->p_query_input; + if (p_user_query->method) sa_mad_data.method = p_user_query->method; + sa_mad_data.attr_offset = p_user_query->attr_offset; + sa_mad_data.attr_id = p_user_query->attr_id; + sa_mad_data.attr_mod = p_user_query->attr_mod; + sa_mad_data.comp_mask = p_user_query->comp_mask; + sa_mad_data.p_attr = p_user_query->p_attr; +#if (0) +#ifdef OSM_VENDOR_INTF_AL + /* HACK for OFED OSM: convert lid order from network to + host for IB_MAD_ATTR_PORTINFO_RECORD */ + if ( (sa_mad_data.attr_id == IB_MAD_ATTR_PORTINFO_RECORD) && + (sa_mad_data.comp_mask & IB_PIR_COMPMASK_LID)) + { + ib_portinfo_record_t * p_pir = (ib_portinfo_record_t *)sa_mad_data.p_attr; + p_pir->lid = cl_ntoh16(p_pir->lid); + } +#endif +#endif + break; + + case OSMV_QUERY_ALL_SVC_RECS: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s", "SVC_REC_BY_NAME\n" ); + sa_mad_data.method = IB_MAD_METHOD_GETTABLE; + sa_mad_data.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_service_record_t ) ); + sa_mad_data.comp_mask = 0; + sa_mad_data.p_attr = &svc_rec; + break; + + case OSMV_QUERY_SVC_REC_BY_NAME: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s", "SVC_REC_BY_NAME\n" ); + sa_mad_data.method = IB_MAD_METHOD_GET; + sa_mad_data.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + sa_mad_data.comp_mask = IB_SR_COMPMASK_SNAME; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_service_record_t ) ); + sa_mad_data.p_attr = &svc_rec; + memcpy( svc_rec.service_name, p_query_req->p_query_input, + sizeof( ib_svc_name_t ) ); + break; + + case OSMV_QUERY_SVC_REC_BY_ID: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s", "SVC_REC_BY_ID\n" ); + sa_mad_data.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + sa_mad_data.comp_mask = IB_SR_COMPMASK_SID; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_service_record_t ) ); + sa_mad_data.p_attr = &svc_rec; + svc_rec.service_id = *( ib_net64_t * ) ( p_query_req->p_query_input ); + break; + + case OSMV_QUERY_CLASS_PORT_INFO: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","CLASS_PORT_INFO\n" ); + sa_mad_data.method = IB_MAD_METHOD_GET; + sa_mad_data.attr_id = IB_MAD_ATTR_CLASS_PORT_INFO; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_class_port_info_t ) ); + sa_mad_data.comp_mask = 0; + sa_mad_data.p_attr = &class_port_info; + + break; + + case OSMV_QUERY_NODE_REC_BY_NODE_GUID: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","NODE_REC_BY_NODE_GUID\n" ); + sa_mad_data.method = IB_MAD_METHOD_GETTABLE; + sa_mad_data.attr_id = IB_MAD_ATTR_NODE_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_node_record_t ) ); + sa_mad_data.comp_mask = IB_NR_COMPMASK_NODEGUID; + sa_mad_data.p_attr = &node_rec; + node_rec.node_info.node_guid = + *( ib_net64_t * ) ( p_query_req->p_query_input ); + + break; + + case OSMV_QUERY_PORT_REC_BY_LID: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","PORT_REC_BY_LID\n" ); + sa_mad_data.attr_id = IB_MAD_ATTR_PORTINFO_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_portinfo_record_t ) ); + sa_mad_data.comp_mask = IB_PIR_COMPMASK_LID; + sa_mad_data.p_attr = &port_info; + port_info.lid = *( ib_net16_t * ) ( p_query_req->p_query_input ); + break; + + case OSMV_QUERY_PORT_REC_BY_LID_AND_NUM: + sa_mad_data.method = IB_MAD_METHOD_GET; + p_user_query = ( osmv_user_query_t * ) p_query_req->p_query_input; + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","PORT_REC_BY_LID_AND_NUM\n" ); + sa_mad_data.attr_id = IB_MAD_ATTR_PORTINFO_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_portinfo_record_t ) ); + sa_mad_data.comp_mask = IB_PIR_COMPMASK_LID | IB_PIR_COMPMASK_PORTNUM; + sa_mad_data.p_attr = p_user_query->p_attr; + break; + + case OSMV_QUERY_VLARB_BY_LID_PORT_BLOCK: + sa_mad_data.method = IB_MAD_METHOD_GET; + p_user_query = ( osmv_user_query_t * ) p_query_req->p_query_input; + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","OSMV_QUERY_VLARB_BY_LID_PORT_BLOCK\n" ); + sa_mad_data.attr_id = IB_MAD_ATTR_VLARB_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_vl_arb_table_record_t ) ); + sa_mad_data.comp_mask = IB_VLA_COMPMASK_LID | IB_VLA_COMPMASK_OUT_PORT | IB_VLA_COMPMASK_BLOCK; + sa_mad_data.p_attr = p_user_query->p_attr; + break; + + case OSMV_QUERY_SLVL_BY_LID_AND_PORTS: + sa_mad_data.method = IB_MAD_METHOD_GET; + p_user_query = ( osmv_user_query_t * ) p_query_req->p_query_input; + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","OSMV_QUERY_VLARB_BY_LID_PORT_BLOCK\n" ); + sa_mad_data.attr_id = IB_MAD_ATTR_SLVL_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_slvl_table_record_t ) ); + sa_mad_data.comp_mask = IB_SLVL_COMPMASK_LID | IB_SLVL_COMPMASK_OUT_PORT | IB_SLVL_COMPMASK_IN_PORT; + sa_mad_data.p_attr = p_user_query->p_attr; + break; + + case OSMV_QUERY_PATH_REC_BY_PORT_GUIDS: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","PATH_REC_BY_PORT_GUIDS\n" ); + memset(&path_rec, 0, sizeof(ib_path_rec_t )); + sa_mad_data.attr_id = IB_MAD_ATTR_PATH_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_path_rec_t ) ); + sa_mad_data.comp_mask = ( IB_PR_COMPMASK_DGID | IB_PR_COMPMASK_SGID ); + sa_mad_data.p_attr = &path_rec; + ib_gid_set_default( &path_rec.dgid, + ( ( osmv_guid_pair_t * ) ( p_query_req-> + p_query_input ) )-> + dest_guid ); + ib_gid_set_default( &path_rec.sgid, + ( ( osmv_guid_pair_t * ) ( p_query_req-> + p_query_input ) )-> + src_guid ); + break; + + case OSMV_QUERY_PATH_REC_BY_GIDS: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","PATH_REC_BY_GIDS\n" ); + memset(&path_rec, 0, sizeof(ib_path_rec_t )); + sa_mad_data.attr_id = IB_MAD_ATTR_PATH_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_path_rec_t ) ); + sa_mad_data.comp_mask = ( IB_PR_COMPMASK_DGID | IB_PR_COMPMASK_SGID ); + sa_mad_data.p_attr = &path_rec; + memcpy( &path_rec.dgid, + &( ( osmv_gid_pair_t * ) ( p_query_req->p_query_input ) )-> + dest_gid, sizeof( ib_gid_t ) ); + memcpy( &path_rec.sgid, + &( ( osmv_gid_pair_t * ) ( p_query_req->p_query_input ) )-> + src_gid, sizeof( ib_gid_t ) ); + break; + + case OSMV_QUERY_PATH_REC_BY_LIDS: + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","PATH_REC_BY_LIDS\n" ); + memset(&path_rec, 0, sizeof(ib_path_rec_t )); + sa_mad_data.method = IB_MAD_METHOD_GET; + sa_mad_data.attr_id = IB_MAD_ATTR_PATH_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_path_rec_t ) ); + sa_mad_data.comp_mask = ( IB_PR_COMPMASK_DLID | IB_PR_COMPMASK_SLID ); + sa_mad_data.p_attr = &path_rec; + path_rec.dlid = + ( ( osmv_lid_pair_t * ) ( p_query_req->p_query_input ) )->dest_lid; + path_rec.slid = + ( ( osmv_lid_pair_t * ) ( p_query_req->p_query_input ) )->src_lid; + break; + + case OSMV_QUERY_UD_MULTICAST_SET: + sa_mad_data.method = IB_MAD_METHOD_SET; + p_user_query = ( osmv_user_query_t * ) p_query_req->p_query_input; + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","OSMV_QUERY_UD_MULTICAST_SET\n" ); + sa_mad_data.attr_id = IB_MAD_ATTR_MCMEMBER_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_member_rec_t ) ); + sa_mad_data.comp_mask = p_user_query->comp_mask; + sa_mad_data.p_attr = p_user_query->p_attr; + break; + + case OSMV_QUERY_UD_MULTICAST_DELETE: + sa_mad_data.method = IB_MAD_METHOD_DELETE; + p_user_query = ( osmv_user_query_t * ) p_query_req->p_query_input; + osm_log( p_log, OSM_LOG_DEBUG, + "osmv_query_sa DBG:001 %s","OSMV_QUERY_UD_MULTICAST_DELETE\n" ); + sa_mad_data.attr_id = IB_MAD_ATTR_MCMEMBER_RECORD; + sa_mad_data.attr_offset = + ib_get_attr_offset( sizeof( ib_member_rec_t ) ); + sa_mad_data.comp_mask = p_user_query->comp_mask; + sa_mad_data.p_attr = p_user_query->p_attr; + break; + + default: + osm_log( p_log, OSM_LOG_ERROR, + "osmv_query_sa DBG:001 %s","UNKNOWN\n" ); + CL_ASSERT( 0 ); + return IB_ERROR; + } + + status = __osmv_send_sa_req( h_bind, &sa_mad_data, p_query_req ); + + OSM_LOG_EXIT( p_log ); + return status; +} + + +/***************************************************************************** + *****************************************************************************/ + + + diff --git a/branches/Ndi/ulp/opensm/user/libvendor/winosm_common.c b/branches/Ndi/ulp/opensm/user/libvendor/winosm_common.c new file mode 100644 index 00000000..361d2c00 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/libvendor/winosm_common.c @@ -0,0 +1,249 @@ +#include +#include +#include +#include + +int optind=1; +int opterr=1; +int optopt='?'; +int iArg=1; + +char* +GetOsmTempPath(void) +{ + char* temp_path; + int length; + temp_path = (char*)cl_malloc(OSM_MAX_LOG_NAME_SIZE); + length = GetTempPath(OSM_MAX_LOG_NAME_SIZE,temp_path); + if (length > OSM_MAX_LOG_NAME_SIZE) + { + cl_free(temp_path); + temp_path = (char*)cl_malloc(length+1); + GetTempPath(length+1,temp_path); + } + return temp_path; +} + +char* +GetOsmCachePath(void) +{ + char* cache_path; + char* tmp_file_name; + char* winib_home, tmp; + HANDLE hFile; + + winib_home = (char*)cl_malloc(OSM_MAX_LOG_NAME_SIZE); + winib_home = getenv("WinIB_HOME"); + if (winib_home == NULL) + { + /* The WinIB_HOME variable isn't defined. Use the + default temp path */ + return GetOsmTempPath(); + } + cache_path = (char*)cl_malloc(OSM_MAX_LOG_NAME_SIZE); + strcpy(cache_path, winib_home); + + strcat(cache_path, "\\etc\\"); + tmp_file_name = (char*)cl_malloc(OSM_MAX_LOG_NAME_SIZE); + strcpy(tmp_file_name, cache_path); + strcat(tmp_file_name, "opensm.opts"); + hFile = CreateFile(tmp_file_name, + GENERIC_READ, + 0, + NULL, + OPEN_EXISTING, + FILE_ATTRIBUTE_NORMAL, + NULL); + if (hFile == INVALID_HANDLE_VALUE) + { + cl_free(cache_path); + return GetOsmTempPath(); + } + /* Such file exists. This means the directory is usable */ + CloseHandle(hFile); + + return cache_path; +} + +/****************************************************************************/ +int getopt_long_only(int argc, char *const*argv, + const char *optstring, + const struct option *longopts, int *longindex) +{ + char chOpt; + char tmp_str[256]; + char* tmp_arg = NULL; + char* tok=NULL; + int i; + char tokens[2] = {'='}; + + if (iArg == argc) + { + + return (EOF); + } + + if (argv[iArg][0] != '-') + { + /* Does not start with a - - we are done scanning */ + + return (EOF); + } + + /*termination of scanning */ + if (!strcmp("--",argv[iArg])) { + return EOF; + + } + + + /* char option : -d 5 */ + if ((argv[iArg][0] == '-') &&(argv[iArg][1] != '-') ) { + optarg = get_char_option(optstring,argv,argc,iArg,&optind,&chOpt); + iArg = optind; + + return chOpt; + } + + /* Look for this string in longopts */ + strcpy(tmp_str,&(argv[iArg][2])); + + /*get the option */ + tok = strtok(tmp_str,tokens); + + for (i = 0; longopts[i].name; i++) + { + if (strcmp (tok, longopts[i].name) == 0) + { + /* We have a match */ + if (longindex != NULL ) + *longindex = i; + + if (longopts[i].flag != NULL) { + *(longopts[i].flag) = longopts[i].val; + } + + + + if (longopts[i].has_arg != no_argument) + { + /*get the argument */ + + if (strchr(argv[iArg],'=') != NULL) + { + optarg = strtok(NULL,tokens); + }else { /*the next arg in cmd line is the param */ + tmp_arg = argv[iArg+1]; + if (*tmp_arg == '-') { + + /*no param is found */ + chOpt = '?'; + if ((longopts[i].has_arg == required_argument) && opterr) + { + fprintf (stderr, "Option %s requires argument\n",tok); + } + + }else + { + optarg = tmp_arg; + iArg++; + optind++; + } + } + + }/*longopts */ + + iArg++; + optind++; + if (longopts[i].flag == 0) + return (longopts[i].val); + else return 0; + + }/*end if strcmp */ + } + + return ('?'); +} + +/******************************************************************************/ +static char* get_char_option(const char* optstring,char*const* argv,int argc, + int iArg, int* opt_ind,char* opt_p) + { + char chOpt; + char* tmp_str; + char* prm = NULL; + + chOpt = argv[iArg][1]; + + + /*non valid argument*/ + if (!isalpha(chOpt)) + { + chOpt = EOF; + goto end; + } + + tmp_str = strchr(optstring, chOpt); + + /*the argument wasn't found in optstring */ + if (tmp_str == NULL){ + chOpt = EOF; + optopt = chOpt; + goto end; + } + + /* don't need argument */ + if (tmp_str[1]!= ':' ) { + goto end; + } + + if (argv[iArg][2] != '\0') + { + // param is attached to option: -po8889 + prm = &(argv[iArg][2]); + goto end; + } + + // must look at next argv for param + /*at the end of arg list */ + if ((iArg)+1 == argc) { + /* no param will be found */ + if (tmp_str[2]== ':' ) { + /* optional argument ::*/ + goto end; + } + else + { + chOpt = EOF; + goto end; + } + } + + prm = &(argv[(iArg)+1][0]); + if (*prm == '-' ) + { + // next argv is a new option, so param + // not given for current option + if (tmp_str[2]== ':' ) { + /* optional argument ::*/ + goto end; + } + else + { + chOpt = EOF; + goto end; + } + } + + // next argv is the param + (*opt_ind)++; + + +end: + (*opt_ind)++; + *opt_p = chOpt; + return prm; +} + + +/******************************************************************************/ diff --git a/branches/Ndi/ulp/opensm/user/opensm/Makefile b/branches/Ndi/ulp/opensm/user/opensm/Makefile new file mode 100644 index 00000000..a0c06273 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\..\inc\openib.def diff --git a/branches/Ndi/ulp/opensm/user/opensm/SOURCES b/branches/Ndi/ulp/opensm/user/opensm/SOURCES new file mode 100644 index 00000000..f1bd9326 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/SOURCES @@ -0,0 +1,164 @@ +TARGETNAME=opensm + +!if !defined(WINIBHOME) +WINIBHOME=..\..\..\.. +!endif + +LIBPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + +!if defined(OSM_TARGET) +TARGETPATH=$(OSM_TARGET)\bin\user\obj$(BUILD_ALT_DIR) +!else +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) +!endif + +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 + +SOURCES=\ + osm.mc \ + osm.rc \ + opensm.rc \ + osm_db_files.c \ + osm_db_pack.c \ + osm_drop_mgr.c \ + osm_fwd_tbl.c \ + osm_inform.c \ + osm_lid_mgr.c \ + osm_lin_fwd_rcv.c \ + osm_lin_fwd_rcv_ctrl.c \ + osm_lin_fwd_tbl.c \ + osm_link_mgr.c \ + osm_matrix.c \ + osm_mcast_fwd_rcv.c \ + osm_mcast_fwd_rcv_ctrl.c \ + osm_mcast_mgr.c \ + osm_mcast_tbl.c \ + osm_mcm_info.c \ + osm_mcm_port.c \ + osm_mtree.c \ + osm_multicast.c \ + osm_node.c \ + osm_node_desc_rcv.c \ + osm_node_desc_rcv_ctrl.c \ + osm_node_info_rcv.c \ + osm_node_info_rcv_ctrl.c \ + osm_opensm.c \ + osm_pkey.c \ + osm_pkey_mgr.c \ + osm_prtn.c \ + osm_prtn_config.c \ + osm_pkey_rcv.c \ + osm_pkey_rcv_ctrl.c \ + osm_qos.c \ + osm_port.c \ + osm_port_info_rcv.c \ + osm_port_info_rcv_ctrl.c \ + osm_remote_sm.c \ + osm_req.c \ + osm_req_ctrl.c \ + osm_resp.c \ + osm_sa.c \ + osm_router.c \ + osm_sa_class_port_info.c \ + osm_sa_class_port_info_ctrl.c \ + osm_sa_guidinfo_record.c \ + osm_sa_guidinfo_record_ctrl.c \ + osm_sa_informinfo.c \ + osm_sa_informinfo_ctrl.c \ + osm_sa_lft_record.c \ + osm_sa_lft_record_ctrl.c \ + osm_sa_link_record.c \ + osm_sa_link_record_ctrl.c \ + osm_sa_mad_ctrl.c \ + osm_sa_mcmember_record.c \ + osm_sa_mcmember_record_ctrl.c \ + osm_sa_mft_record.c \ + osm_sa_mft_record_ctrl.c \ + osm_sa_node_record.c \ + osm_sa_node_record_ctrl.c \ + osm_sa_path_record.c \ + osm_sa_path_record_ctrl.c \ + osm_sa_pkey_record.c \ + osm_sa_pkey_record_ctrl.c \ + osm_sa_portinfo_record.c \ + osm_sa_portinfo_record_ctrl.c \ + osm_sa_response.c \ + osm_sa_service_record.c \ + osm_sa_service_record_ctrl.c \ + osm_sa_slvl_record.c \ + osm_sa_slvl_record_ctrl.c \ + osm_sa_sminfo_record.c \ + osm_sa_sminfo_record_ctrl.c \ + osm_sa_sw_info_record.c \ + osm_sa_sw_info_record_ctrl.c \ + osm_sa_vlarb_record.c \ + osm_sa_vlarb_record_ctrl.c \ + osm_service.c \ + osm_slvl_map_rcv.c \ + osm_slvl_map_rcv_ctrl.c \ + osm_sm.c \ + osm_sminfo_rcv.c \ + osm_sminfo_rcv_ctrl.c \ + osm_sm_mad_ctrl.c \ + osm_sm_state_mgr.c \ + osm_state_mgr.c \ + osm_state_mgr_ctrl.c \ + osm_subnet.c \ + osm_sweep_fail_ctrl.c \ + osm_sw_info_rcv.c \ + osm_sw_info_rcv_ctrl.c \ + osm_switch.c \ + osm_trap_rcv.c \ + osm_trap_rcv_ctrl.c \ + osm_ucast_mgr.c \ + osm_ucast_updn.c \ + osm_ucast_file.c \ + osm_ucast_ftree.c \ + osm_vl15intf.c \ + osm_vl_arb_rcv.c \ + osm_vl_arb_rcv_ctrl.c \ + st.c \ + main.c \ + cl_event_wheel.c \ + cl_dispatcher.c + +OSM_HOME=.. + +TARGETLIBS=\ +!if $(FREEBUILD) + $(LIBPATH)\*\ibal.lib \ + $(LIBPATH)\*\complib.lib \ + $(TARGETPATH)\*\osmv_ibal.lib \ + $(TARGETPATH)\*\opensm_ibal.lib \ + $(CRT_LIB_PATH)\msvcrt.lib + +!else + $(LIBPATH)\*\ibald.lib \ + $(LIBPATH)\*\complibd.lib \ + $(TARGETPATH)\*\osmv_ibald.lib \ + $(TARGETPATH)\*\opensm_ibald.lib \ + $(CRT_LIB_PATH)\msvcrt.lib +!endif + +#DO NOT TOUCH the order of search path , until ib_types.h merging process will be done +INCLUDES= \ + $(OSM_HOME)\include; \ + $(OSM_HOME); \ + $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; + +# Could be any special flag needed for this project +USER_C_FLAGS=$(USER_C_FLAGS) /MD +#Add preproccessor definitions +C_DEFINES=$(C_DEFINES) -DWIN32 -D__WIN__ -D__i386__ -Dinline=__inline -DMT_LITTLE_ENDIAN -DOSM_VENDOR_INTF_AL +C_DEFINES=$(C_DEFINES) -I.. -DHAVE_CONFIG_H +!if !$(FREEBUILD) +#C_DEFINES=$(C_DEFINES) -D_DEBUG -DDEBUG -DDBG +C_DEFINES=$(C_DEFINES) +!endif + +LINKER_FLAGS= $(LINKER_FLAGS) +MSC_WARNING_LEVEL= /W3 +#MSC_OPTIMIZATION= /O0 diff --git a/branches/Ndi/ulp/opensm/user/opensm/cl_dispatcher.c b/branches/Ndi/ulp/opensm/user/opensm/cl_dispatcher.c new file mode 100644 index 00000000..dd2f78f6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/cl_dispatcher.c @@ -0,0 +1,405 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of Dispatcher abstraction. + * + * Environment: + * All + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/* give some guidance when we build our cl_pool of messages */ +#define CL_DISP_INITIAL_MSG_COUNT 256 +#define CL_DISP_MSG_GROW_SIZE 64 + +/* give some guidance when we build our cl_pool of registration elements */ +#define CL_DISP_INITIAL_REG_COUNT 16 +#define CL_DISP_REG_GROW_SIZE 16 + +/******************************************************************** + __cl_disp_worker + + Description: + This function takes messages off the FIFO and calls Processmsg() + This function executes as passive level. + + Inputs: + p_disp - Pointer to Dispatcher object + + Outputs: + None + + Returns: + None +********************************************************************/ +void +__cl_disp_worker( + IN void* context ) +{ + cl_disp_msg_t *p_msg; + cl_dispatcher_t *p_disp = (cl_dispatcher_t*)context; + + cl_spinlock_acquire( &p_disp->lock ); + + /* Process the FIFO until we drain it dry. */ + while( cl_qlist_count( &p_disp->msg_fifo ) ) + { + /* Pop the message at the head from the FIFO. */ + p_msg = (cl_disp_msg_t*)cl_qlist_remove_head( &p_disp->msg_fifo ); + + /* we track the tim ethe last message spent in the queue */ + p_disp->last_msg_queue_time_us = cl_get_time_stamp() - p_msg->in_time; + + /* + * Release the spinlock while the message is processed. + * The user's callback may reenter the dispatcher + * and cause the lock to be reaquired. + */ + cl_spinlock_release( &p_disp->lock ); + p_msg->p_dest_reg->pfn_rcv_callback( + (void*)p_msg->p_dest_reg->context, (void*)p_msg->p_data ); + + cl_atomic_dec( &p_msg->p_dest_reg->ref_cnt ); + + /* The client has seen the data. Notify the sender as appropriate. */ + if( p_msg->pfn_xmt_callback ) + { + p_msg->pfn_xmt_callback( (void*)p_msg->context, + (void*)p_msg->p_data ); + cl_atomic_dec( &p_msg->p_src_reg->ref_cnt ); + } + + /* Grab the lock for the next iteration through the list. */ + cl_spinlock_acquire(&p_disp->lock); + + /* Return this message to the pool. */ + cl_qpool_put( &p_disp->msg_pool, (cl_pool_item_t *)p_msg ); + } + + cl_spinlock_release( &p_disp->lock ); +} + +/******************************************************************** + ********************************************************************/ +void +cl_disp_construct( + IN cl_dispatcher_t* const p_disp ) +{ + CL_ASSERT( p_disp ); + + cl_qlist_init( &p_disp->reg_list ); + cl_ptr_vector_construct( &p_disp->reg_vec ); + cl_thread_pool_construct( &p_disp->worker_threads ); + cl_qlist_init( &p_disp->msg_fifo ); + cl_spinlock_construct( &p_disp->lock ); + cl_qpool_construct( &p_disp->msg_pool ); +} + +/******************************************************************** + ********************************************************************/ +void +cl_disp_shutdown( + IN cl_dispatcher_t* const p_disp ) +{ + CL_ASSERT( p_disp ); + + /* Stop the thread pool. */ + cl_thread_pool_destroy( &p_disp->worker_threads ); + + /* Process all outstanding callbacks. */ + __cl_disp_worker( p_disp ); + + /* Free all registration info. */ + while( !cl_is_qlist_empty( &p_disp->reg_list ) ) + free( cl_qlist_remove_head( &p_disp->reg_list ) ); +} + +/******************************************************************** + ********************************************************************/ +void +cl_disp_destroy( + IN cl_dispatcher_t* const p_disp ) +{ + CL_ASSERT( p_disp ); + + cl_spinlock_destroy( &p_disp->lock ); + /* Destroy the message pool */ + cl_qpool_destroy( &p_disp->msg_pool ); + /* Destroy the pointer vector of registrants. */ + cl_ptr_vector_destroy( &p_disp->reg_vec ); +} + +/******************************************************************** + ********************************************************************/ +cl_status_t +cl_disp_init( + IN cl_dispatcher_t* const p_disp, + IN const uint32_t thread_count, + IN const char* const name ) +{ + cl_status_t status; + + CL_ASSERT( p_disp ); + + cl_disp_construct( p_disp ); + + status = cl_spinlock_init( &p_disp->lock ); + if( status != CL_SUCCESS ) + { + cl_disp_destroy( p_disp ); + return( status ); + } + + /* Specify no upper limit to the number of messages in the pool */ + status = cl_qpool_init( &p_disp->msg_pool, CL_DISP_INITIAL_MSG_COUNT, + 0, CL_DISP_MSG_GROW_SIZE, sizeof(cl_disp_msg_t), NULL, + NULL, NULL ); + if( status != CL_SUCCESS ) + { + cl_disp_destroy( p_disp ); + return( status ); + } + + status = cl_ptr_vector_init( &p_disp->reg_vec, CL_DISP_INITIAL_REG_COUNT, + CL_DISP_REG_GROW_SIZE ); + if( status != CL_SUCCESS ) + { + cl_disp_destroy( p_disp ); + return( status ); + } + + status = cl_thread_pool_init( &p_disp->worker_threads, thread_count, + __cl_disp_worker, p_disp, name ); + if( status != CL_SUCCESS ) + cl_disp_destroy( p_disp ); + + return( status ); +} + +/******************************************************************** + ********************************************************************/ +cl_disp_reg_handle_t +cl_disp_register( + IN cl_dispatcher_t* const p_disp, + IN const cl_disp_msgid_t msg_id, + IN cl_pfn_msgrcv_cb_t pfn_callback OPTIONAL, + IN const void* const context OPTIONAL ) +{ + cl_disp_reg_info_t *p_reg; + cl_status_t status; + + CL_ASSERT( p_disp ); + + /* Check that the requested registrant ID is available. */ + cl_spinlock_acquire( &p_disp->lock ); + if( (msg_id != CL_DISP_MSGID_NONE ) && + (msg_id < cl_ptr_vector_get_size( &p_disp->reg_vec )) && + (cl_ptr_vector_get( &p_disp->reg_vec, msg_id )) ) + { + cl_spinlock_release( &p_disp->lock ); + return( NULL ); + } + + /* Get a registration info from the pool. */ + p_reg = (cl_disp_reg_info_t*)malloc( sizeof(cl_disp_reg_info_t) ); + if( !p_reg ) + { + cl_spinlock_release( &p_disp->lock ); + return( NULL ); + } + else + { + memset( p_reg, 0, sizeof(cl_disp_reg_info_t) ); + } + + p_reg->p_disp = p_disp; + p_reg->ref_cnt = 0; + p_reg->pfn_rcv_callback = pfn_callback; + p_reg->context = context; + p_reg->msg_id = msg_id; + + /* Insert the registration in the list. */ + cl_qlist_insert_tail( &p_disp->reg_list, (cl_list_item_t*)p_reg ); + + /* Set the array entry to the registrant. */ + /* The ptr_vector grow automatically as necessary. */ + if( msg_id != CL_DISP_MSGID_NONE ) + { + status = cl_ptr_vector_set( &p_disp->reg_vec, msg_id, p_reg ); + if( status != CL_SUCCESS ) + { + free( p_reg ); + cl_spinlock_release( &p_disp->lock ); + return( NULL ); + } + } + + cl_spinlock_release( &p_disp->lock ); + + return( p_reg ); +} + +/******************************************************************** + ********************************************************************/ +void +cl_disp_unregister( + IN const cl_disp_reg_handle_t handle ) +{ + cl_disp_reg_info_t *p_reg; + cl_dispatcher_t *p_disp; + + if( handle == CL_DISP_INVALID_HANDLE ) + return; + + p_reg = (cl_disp_reg_info_t*)handle; + p_disp = p_reg->p_disp; + CL_ASSERT( p_disp ); + + cl_spinlock_acquire( &p_disp->lock ); + /* + * Clear the registrant vector entry. This will cause any further + * post calls to fail. + */ + if( p_reg->msg_id != CL_DISP_MSGID_NONE ) + { + CL_ASSERT( p_reg->msg_id < cl_ptr_vector_get_size(&p_disp->reg_vec)); + cl_ptr_vector_set( &p_disp->reg_vec, p_reg->msg_id, NULL ); + } + cl_spinlock_release( &p_disp->lock ); + + while( p_reg->ref_cnt > 0) + cl_thread_suspend( 1 ); + + cl_spinlock_acquire(&p_disp->lock); + /* Remove the registrant from the list. */ + cl_qlist_remove_item( &p_disp->reg_list, (cl_list_item_t*)p_reg ); + /* Return the registration info to the pool */ + free( p_reg ); + + cl_spinlock_release( &p_disp->lock ); +} + +/******************************************************************** + ********************************************************************/ +cl_status_t +cl_disp_post( + IN const cl_disp_reg_handle_t handle, + IN const cl_disp_msgid_t msg_id, + IN const void* const p_data, + IN cl_pfn_msgdone_cb_t pfn_callback OPTIONAL, + IN const void* const context OPTIONAL ) +{ + cl_disp_reg_info_t *p_src_reg = (cl_disp_reg_info_t*)handle; + cl_disp_reg_info_t *p_dest_reg; + cl_dispatcher_t *p_disp; + cl_disp_msg_t *p_msg; + + p_disp = handle->p_disp; + CL_ASSERT( p_disp ); + CL_ASSERT( msg_id != CL_DISP_MSGID_NONE ); + + cl_spinlock_acquire( &p_disp->lock ); + /* Check that the recipient exists. */ + p_dest_reg = cl_ptr_vector_get( &p_disp->reg_vec, msg_id ); + if( !p_dest_reg ) + { + cl_spinlock_release( &p_disp->lock ); + return( CL_NOT_FOUND ); + } + + /* Get a free message from the pool. */ + p_msg = (cl_disp_msg_t*)cl_qpool_get( &p_disp->msg_pool ); + if( !p_msg ) + { + cl_spinlock_release( &p_disp->lock ); + return( CL_INSUFFICIENT_MEMORY ); + } + + /* Initialize the message */ + p_msg->p_src_reg = p_src_reg; + p_msg->p_dest_reg = p_dest_reg; + p_msg->p_data = p_data; + p_msg->pfn_xmt_callback = pfn_callback; + p_msg->context = context; + p_msg->in_time = cl_get_time_stamp(); + + /* + * Increment the sender's reference count if they request a completion + * notification. + */ + if( pfn_callback ) + cl_atomic_inc( &p_src_reg->ref_cnt ); + + /* Increment the recipient's reference count. */ + cl_atomic_inc( &p_dest_reg->ref_cnt ); + + /* Queue the message in the FIFO. */ + cl_qlist_insert_tail( &p_disp->msg_fifo, (cl_list_item_t *)p_msg ); + cl_spinlock_release( &p_disp->lock ); + + /* Signal the thread pool that there is work to be done. */ + cl_thread_pool_signal( &p_disp->worker_threads ); + return( CL_SUCCESS ); +} + +void +cl_disp_get_queue_status( + IN const cl_disp_reg_handle_t handle, + OUT uint32_t *p_num_queued_msgs, + OUT uint64_t *p_last_msg_queue_time_ms) +{ + cl_dispatcher_t *p_disp = ((cl_disp_reg_info_t*)handle)->p_disp; + + cl_spinlock_acquire( &p_disp->lock ); + + if (p_last_msg_queue_time_ms) + *p_last_msg_queue_time_ms = p_disp->last_msg_queue_time_us/1000; + + if (p_num_queued_msgs) + *p_num_queued_msgs = cl_qlist_count( &p_disp->msg_fifo ); + + cl_spinlock_release( &p_disp->lock ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/cl_event_wheel.c b/branches/Ndi/ulp/opensm/user/opensm/cl_event_wheel.c new file mode 100644 index 00000000..a4ec7aa5 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/cl_event_wheel.c @@ -0,0 +1,663 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include + +cl_status_t +__event_will_age_before( + IN const cl_list_item_t* const p_list_item, + IN void* context ) +{ + uint64_t aging_time = *((uint64_t*)context); + cl_event_wheel_reg_info_t *p_event; + + p_event = + PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t, list_item); + + if (p_event->aging_time < aging_time) + return CL_SUCCESS; + else + return CL_NOT_FOUND; +} + +void +__cl_event_wheel_callback( IN void* context ) +{ + cl_event_wheel_t *p_event_wheel = (cl_event_wheel_t *)context; + cl_list_item_t *p_list_item, *p_prev_event_list_item; + cl_list_item_t *p_list_next_item; + cl_event_wheel_reg_info_t *p_event; + uint64_t current_time; + uint64_t next_aging_time; + uint32_t new_timeout; + cl_status_t cl_status; + + OSM_LOG_ENTER( p_event_wheel->p_log, __cl_event_wheel_callback); + + /* might be during closing ... */ + if (p_event_wheel->closing) + { + goto JustExit; + } + + current_time = cl_get_time_stamp(); + + if (NULL != p_event_wheel->p_external_lock) { + + /* Take care of the order of acquiring locks to avoid the deadlock! + * The external lock goes first. + */ + CL_SPINLOCK_ACQUIRE(p_event_wheel->p_external_lock); + } + + CL_SPINLOCK_ACQUIRE(&p_event_wheel->lock); + + p_list_item = cl_qlist_head(&p_event_wheel->events_wheel); + if (p_list_item == cl_qlist_end(&p_event_wheel->events_wheel)) + { + /* the list is empty - nothing to do */ + goto Exit; + } + + /* we found such an item. get the p_event */ + p_event = PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t, list_item); + + while (p_event->aging_time <= current_time) + { + /* this object has aged - invoke it's callback */ + if (p_event->pfn_aged_callback) + { + next_aging_time = + p_event->pfn_aged_callback( + p_event->key, p_event->num_regs, p_event->context); + } + else + { + next_aging_time = 0; + } + + /* point to the next object in the wheel */ + p_list_next_item = cl_qlist_next(p_list_item); + + /* We need to retire the event if the next aging time passed */ + if (next_aging_time < current_time) + { + /* remove it from the map */ + cl_qmap_remove_item(&p_event_wheel->events_map, &(p_event->map_item)); + + /* pop p_event from the wheel */ + cl_qlist_remove_head(&p_event_wheel->events_wheel); + + /* delete the event info object - allocated by cl_event_wheel_reg */ + free(p_event); + } + else + { + /* update the required aging time */ + p_event->aging_time = next_aging_time; + p_event->num_regs++; + + /* do not remove from the map - but remove from the list head and + place in the correct position */ + + /* pop p_event from the wheel */ + cl_qlist_remove_head(&p_event_wheel->events_wheel); + + /* find the event that ages just before */ + p_prev_event_list_item = cl_qlist_find_from_tail( + &p_event_wheel->events_wheel, + __event_will_age_before, + &p_event->aging_time); + + /* insert just after */ + cl_qlist_insert_next( + &p_event_wheel->events_wheel, + p_prev_event_list_item , + &p_event->list_item); + + /* as we have modified the list - restart from first item: */ + p_list_next_item = cl_qlist_head(&p_event_wheel->events_wheel); + } + + /* advance to next event */ + p_list_item = p_list_next_item; + if (p_list_item == cl_qlist_end(&p_event_wheel->events_wheel)) + { + /* the list is empty - nothing to do */ + break; + } + + /* get the p_event */ + p_event = PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t, list_item); + } + + /* We need to restart the timer only if the list is not empty now */ + if (p_list_item != cl_qlist_end(&p_event_wheel->events_wheel)) + { + /* get the p_event */ + p_event = PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t, list_item); + + /* start the timer to the timeout [msec] */ + new_timeout = (uint32_t)(((p_event->aging_time - current_time)/1000)+0.5); + osm_log (p_event_wheel->p_log, OSM_LOG_DEBUG, + "__cl_event_wheel_callback : " + "Restart timer in : %u [msec]\n", + new_timeout); + cl_status = cl_timer_start(&p_event_wheel->timer, new_timeout); + if (cl_status != CL_SUCCESS) + { + osm_log (p_event_wheel->p_log, OSM_LOG_ERROR, + "__cl_event_wheel_callback : ERR 6100: " + "Failed to start timer\n" ); + } + } + + /* release the lock */ + Exit: + CL_SPINLOCK_RELEASE( &p_event_wheel->lock ); + if (NULL != p_event_wheel->p_external_lock) { + CL_SPINLOCK_RELEASE(p_event_wheel->p_external_lock); + } + JustExit: + OSM_LOG_EXIT( p_event_wheel->p_log ); +} + +/* + * Construct and Initialize + */ +void +cl_event_wheel_construct( + IN cl_event_wheel_t* const p_event_wheel ) +{ + cl_spinlock_construct( &(p_event_wheel->lock) ); + cl_timer_construct( &(p_event_wheel->timer) ); +} + +cl_status_t +cl_event_wheel_init( + IN cl_event_wheel_t* const p_event_wheel, + IN osm_log_t *p_log) +{ + cl_status_t cl_status = CL_SUCCESS; + + OSM_LOG_ENTER( p_log, cl_event_wheel_init ); + + /* initialize */ + p_event_wheel->p_log = p_log; + p_event_wheel->p_external_lock = NULL; + p_event_wheel->closing = FALSE; + cl_status = cl_spinlock_init( &(p_event_wheel->lock) ); + if (cl_status != CL_SUCCESS) + { + osm_log (p_event_wheel->p_log, OSM_LOG_ERROR, + "cl_event_wheel_init : ERR 6101: " + "Failed to initialize cl_spinlock\n" ); + goto Exit; + } + cl_qlist_init( &p_event_wheel->events_wheel); + cl_qmap_init( &p_event_wheel->events_map ); + + /* init the timer with timeout */ + cl_status = cl_timer_init(&p_event_wheel->timer, + __cl_event_wheel_callback, + p_event_wheel ); /* cb context */ + + if (cl_status != CL_SUCCESS) + { + osm_log (p_event_wheel->p_log, OSM_LOG_ERROR, + "cl_event_wheel_init : ERR 6102: " + "Failed to initialize cl_timer\n" ); + goto Exit; + } + Exit: + OSM_LOG_EXIT( p_event_wheel->p_log ); + return(cl_status); +} + +cl_status_t +cl_event_wheel_init_ex( + IN cl_event_wheel_t* const p_event_wheel, + IN osm_log_t *p_log, + IN cl_spinlock_t *p_external_lock) +{ + cl_status_t cl_status; + + cl_status = cl_event_wheel_init(p_event_wheel, p_log); + if (CL_SUCCESS != cl_status) + { + return cl_status; + } + + p_event_wheel->p_external_lock = p_external_lock; + return cl_status; +} + +void +cl_event_wheel_dump( + IN cl_event_wheel_t* const p_event_wheel ) +{ + cl_list_item_t *p_list_item; + cl_event_wheel_reg_info_t *p_event; + + OSM_LOG_ENTER( p_event_wheel->p_log, cl_event_wheel_dump ); + + p_list_item = cl_qlist_head(&p_event_wheel->events_wheel); + osm_log( p_event_wheel->p_log, OSM_LOG_DEBUG, + "cl_event_wheel_dump: " + "event_wheel ptr:%p\n", + p_event_wheel); + + while (p_list_item != cl_qlist_end(&p_event_wheel->events_wheel) ) + { + p_event = PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t, list_item); + osm_log( p_event_wheel->p_log, OSM_LOG_DEBUG, + "cl_event_wheel_dump: " + "Found event key:<0x%"PRIx64">, aging time:%" PRIu64"\n", + p_event->key, p_event->aging_time ); + p_list_item = cl_qlist_next( p_list_item ); + } + OSM_LOG_EXIT( p_event_wheel->p_log ); +} + +void +cl_event_wheel_destroy( + IN cl_event_wheel_t* const p_event_wheel ) +{ + cl_list_item_t *p_list_item; + cl_map_item_t *p_map_item; + cl_event_wheel_reg_info_t *p_event; + + OSM_LOG_ENTER( p_event_wheel->p_log, cl_event_wheel_destroy ); + + /* we need to get a lock */ + CL_SPINLOCK_ACQUIRE( &p_event_wheel->lock ); + + cl_event_wheel_dump( p_event_wheel); + + /* go over all the items in the list and remove them */ + p_list_item = cl_qlist_remove_head(&p_event_wheel->events_wheel); + while ( p_list_item != cl_qlist_end(&p_event_wheel->events_wheel) ) + { + p_event = PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t, list_item); + + osm_log( p_event_wheel->p_log, OSM_LOG_DEBUG, + "cl_event_wheel_destroy: " + "Found outstanding event key:<0x%"PRIx64">\n", + p_event->key ); + + /* remove it from the map */ + p_map_item = &(p_event->map_item); + cl_qmap_remove_item(&p_event_wheel->events_map, p_map_item); + free(p_event); /* allocated by cl_event_wheel_reg */ + p_list_item = cl_qlist_remove_head(&p_event_wheel->events_wheel); + } + + /* destroy the timer */ + cl_timer_destroy( &p_event_wheel->timer ); + + /* destroy the lock (this should be done without releasing - we don't want + any other run to grab the lock at this point. */ + CL_SPINLOCK_RELEASE( &p_event_wheel->lock ); + cl_spinlock_destroy( &(p_event_wheel->lock) ); + + OSM_LOG_EXIT( p_event_wheel->p_log ); +} + +cl_status_t +cl_event_wheel_reg( + IN cl_event_wheel_t* const p_event_wheel, + IN const uint64_t key, + IN const uint64_t aging_time_usec, + IN cl_pfn_event_aged_cb_t pfn_callback, + IN void* const context ) +{ + cl_event_wheel_reg_info_t *p_event; + uint64_t timeout; + uint32_t to; + cl_status_t cl_status = CL_SUCCESS; + cl_list_item_t *prev_event_list_item; + cl_map_item_t *p_map_item; + + OSM_LOG_ENTER( p_event_wheel->p_log, cl_event_wheel_reg ); + + /* Get the lock on the manager */ + CL_SPINLOCK_ACQUIRE( &(p_event_wheel->lock) ); + + cl_event_wheel_dump( p_event_wheel); + + /* Make sure such a key does not exists */ + p_map_item = cl_qmap_get( &p_event_wheel->events_map, key ); + if (p_map_item != cl_qmap_end( &p_event_wheel->events_map )) + { + osm_log( p_event_wheel->p_log, OSM_LOG_DEBUG, + "cl_event_wheel_reg: " + "Already exists key:0x%"PRIx64"\n", key); + + /* already there - remove it from the list as it is getting a new time */ + p_event = PARENT_STRUCT(p_map_item, cl_event_wheel_reg_info_t, map_item); + + /* remove the item from the qlist */ + cl_qlist_remove_item( &p_event_wheel->events_wheel, &p_event->list_item ); + /* and the qmap */ + cl_qmap_remove_item( &p_event_wheel->events_map, &p_event->map_item ); + } + else + { + /* make a new one */ + p_event = (cl_event_wheel_reg_info_t *) + malloc( sizeof (cl_event_wheel_reg_info_t) ); + p_event->num_regs = 0; + } + + p_event->key = key; + p_event->aging_time = aging_time_usec; + p_event->pfn_aged_callback = pfn_callback; + p_event->context = context; + p_event->num_regs++; + + osm_log( p_event_wheel->p_log, OSM_LOG_DEBUG, + "cl_event_wheel_reg: " + "Registering event key:0x%"PRIx64" aging in %u [msec]\n", + p_event->key, + (uint32_t)((p_event->aging_time - cl_get_time_stamp()) / 1000 )); + + /* If the list is empty - need to start the timer */ + if (cl_is_qlist_empty(&p_event_wheel->events_wheel)) + { + /* Edward Bortnikov 03/29/2003 + * ++TBD Consider moving the timer manipulation behind the list manipulation. + */ + + /* calculate the new timeout */ + timeout = (p_event->aging_time - cl_get_time_stamp() + 500) / 1000; + + /* stop the timer if it is running */ + + /* Edward Bortnikov 03/29/2003 + * Don't call cl_timer_stop() because it spins forever. + * cl_timer_start() will invoke cl_timer_stop() by itself. + * + * The problematic scenario is when __cl_event_wheel_callback() + * is in race condition with this code. It sets timer.in_timer_cb + * to TRUE and then blocks on p_event_wheel->lock. Following this, + * the call to cl_timer_stop() hangs. Following this, the whole system + * enters into a deadlock. + * + * cl_timer_stop(&p_event_wheel->timer); + */ + + /* The timeout for the cl_timer_start should be given as uint32_t. + if there is an overflow - warn about it. */ + to = (uint32_t)timeout; + if ( timeout > (uint32_t)timeout ) + { + to = 0xffffffff; /* max 32 bit timer */ + osm_log (p_event_wheel->p_log, OSM_LOG_INFO, + "cl_event_wheel_reg: " + "timeout requested is too large. Using timeout: %u\n", + to ); + } + + /* start the timer to the timeout [msec] */ + cl_status = cl_timer_start(&p_event_wheel->timer, to); + if (cl_status != CL_SUCCESS) + { + osm_log (p_event_wheel->p_log, OSM_LOG_ERROR, + "cl_event_wheel_reg : ERR 6103: " + "Failed to start timer\n" ); + goto Exit; + } + } + + /* insert the object to the qlist and the qmap */ + + /* BUT WE MUST INSERT IT IN A SORTED MANNER */ + prev_event_list_item = cl_qlist_find_from_tail( + &p_event_wheel->events_wheel, + __event_will_age_before, + &p_event->aging_time); + + cl_qlist_insert_next( + &p_event_wheel->events_wheel, + prev_event_list_item , + &p_event->list_item); + + cl_qmap_insert( &p_event_wheel->events_map, key, &(p_event->map_item)); + + Exit: + CL_SPINLOCK_RELEASE( &p_event_wheel->lock ); + OSM_LOG_EXIT( p_event_wheel->p_log ); + + return cl_status; +} + +void +cl_event_wheel_unreg( + IN cl_event_wheel_t* const p_event_wheel, + IN uint64_t key ) +{ + cl_event_wheel_reg_info_t *p_event; + cl_map_item_t* p_map_item; + + OSM_LOG_ENTER( p_event_wheel->p_log, cl_event_wheel_unreg ); + + osm_log( p_event_wheel->p_log, OSM_LOG_DEBUG, + "cl_event_wheel_unreg: " + "Removing key:0x%"PRIx64"\n", key ); + + CL_SPINLOCK_ACQUIRE( &p_event_wheel->lock ); + p_map_item = cl_qmap_get( &p_event_wheel->events_map, key ); + if (p_map_item != cl_qmap_end( &p_event_wheel->events_map )) + { + /* we found such an item. */ + p_event = PARENT_STRUCT(p_map_item, cl_event_wheel_reg_info_t, map_item); + + /* remove the item from the qlist */ + cl_qlist_remove_item( &p_event_wheel->events_wheel, &(p_event->list_item)); + /* remove the item from the qmap */ + cl_qmap_remove_item( &p_event_wheel->events_map, &(p_event->map_item) ); + + osm_log( p_event_wheel->p_log, OSM_LOG_DEBUG, + "cl_event_wheel_unreg: " + "Removed key:0x%"PRIx64"\n", key ); + + /* free the item */ + free(p_event); + } + else + { + osm_log( p_event_wheel->p_log, OSM_LOG_DEBUG, + "cl_event_wheel_unreg: " + "Did not find key:0x%"PRIx64"\n", key ); + } + + CL_SPINLOCK_RELEASE( &p_event_wheel->lock ); + OSM_LOG_EXIT( p_event_wheel->p_log ); + +} + +uint32_t +cl_event_wheel_num_regs( + IN cl_event_wheel_t* const p_event_wheel, + IN uint64_t key ) +{ + + cl_event_wheel_reg_info_t *p_event; + cl_map_item_t* p_map_item; + uint32_t num_regs = 0; + + OSM_LOG_ENTER( p_event_wheel->p_log, cl_event_wheel_num_regs ); + + /* try to find the key in the map */ + osm_log( p_event_wheel->p_log, OSM_LOG_DEBUG, + "cl_event_wheel_num_regs: " + "Looking for key:0x%"PRIx64"\n", key ); + + CL_SPINLOCK_ACQUIRE( &p_event_wheel->lock ); + p_map_item = cl_qmap_get( &p_event_wheel->events_map, key ); + if (p_map_item != cl_qmap_end( &p_event_wheel->events_map )) + { + /* ok so we can simply return it's num_regs */ + p_event = PARENT_STRUCT(p_map_item, cl_event_wheel_reg_info_t, map_item); + num_regs = p_event->num_regs; + } + + CL_SPINLOCK_RELEASE( &p_event_wheel->lock ); + OSM_LOG_EXIT( p_event_wheel->p_log ); + return(num_regs); +} + +#ifdef __CL_EVENT_WHEEL_TEST__ + +/* Dump out the complete state of the event wheel */ +void __cl_event_wheel_dump( + IN cl_event_wheel_t* const p_event_wheel) +{ + cl_list_item_t *p_list_item; + cl_map_item_t *p_map_item; + cl_event_wheel_reg_info_t *p_event; + + printf("************** Event Wheel Dump ***********************\n"); + printf("Event Wheel List has %u items:\n", + cl_qlist_count( &p_event_wheel->events_wheel )); + + p_list_item = cl_qlist_head(&p_event_wheel->events_wheel); + while (p_list_item != cl_qlist_end(&p_event_wheel->events_wheel)) + { + p_event = PARENT_STRUCT(p_list_item, cl_event_wheel_reg_info_t, list_item); + printf("Event key:0x%"PRIx64" Conetxt:%s NumRegs:%u\n", + p_event->key, (char *)p_event->context, p_event->num_regs); + + /* next */ + p_list_item = cl_qlist_next(p_list_item); + } + + printf("Event Map has %u items:\n", + cl_qmap_count( &p_event_wheel->events_map )); + + p_map_item = cl_qmap_head(&p_event_wheel->events_map); + while (p_map_item != cl_qmap_end(&p_event_wheel->events_map)) + { + p_event = PARENT_STRUCT(p_map_item, cl_event_wheel_reg_info_t, map_item); + printf("Event key:0x%"PRIx64" Conetxt:%s NumRegs:%u\n", + p_event->key, (char *)p_event->context, p_event->num_regs); + + /* next */ + p_map_item = cl_qmap_next(p_map_item); + } + +} + +/* The callback for aging event */ +/* We assume we pass a text context */ +void __test_event_aging(uint64_t key, void *context) +{ + printf("*****************************************************\n"); + printf("Aged key: 0x%"PRIx64" Conetxt:%s\n", key, (char *)context); +} + +int +main () +{ + osm_log_t log; + cl_event_wheel_t event_wheel; + /* uint64_t key; */ + + /* construct */ + osm_log_construct( &log ); + cl_event_wheel_construct( &event_wheel ); + + /* init */ + osm_log_init_v2( &log, TRUE, 0xff, NULL, 0, FALSE); + cl_event_wheel_init( &event_wheel, &log ); + + /* Start Playing */ + cl_event_wheel_reg( &event_wheel, + 1, /* key */ + cl_get_time_stamp() + 3000000, /* 3 sec lifetime */ + __test_event_aging, /* cb */ + "The first Aging Event" + ); + + cl_event_wheel_reg( &event_wheel, + 2, /* key */ + cl_get_time_stamp() + 3000000, /* 3 sec lifetime */ + __test_event_aging, /* cb */ + "The Second Aging Event" + ); + + cl_event_wheel_reg( &event_wheel, + 3, /* key */ + cl_get_time_stamp() + 3500000, /* 3 sec lifetime */ + __test_event_aging, /* cb */ + "The Third Aging Event" + ); + + __cl_event_wheel_dump(&event_wheel); + + sleep(2); + cl_event_wheel_reg( &event_wheel, + 2, /* key */ + cl_get_time_stamp() + 8000000, /* 3 sec lifetime */ + __test_event_aging, /* cb */ + "The Second Aging Event Moved" + ); + + __cl_event_wheel_dump(&event_wheel); + + sleep(1); + /* remove the third event */ + cl_event_wheel_unreg( &event_wheel, + 3); /* key */ + + /* get the number of registrations for the keys */ + printf("Event 1 Registred: %u\n", cl_event_wheel_num_regs(&event_wheel, 1)); + printf("Event 2 Registred: %u\n", cl_event_wheel_num_regs(&event_wheel, 2)); + + sleep(5); + /* destroy */ + cl_event_wheel_destroy( &event_wheel ); + + return(0); +} + +#endif /* __CL_EVENT_WHEEL_TEST__ */ + diff --git a/branches/Ndi/ulp/opensm/user/opensm/main.c b/branches/Ndi/ulp/opensm/user/opensm/main.c new file mode 100644 index 00000000..77552a63 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/main.c @@ -0,0 +1,1158 @@ +/* + * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Command line interface for opensm. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.23 $ + */ +#pragma warning(disable : 4996) + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include "stdio.h" +#include +#include +#include +#include +#include +#include +#include + +/******************************************************************** + D E F I N E G L O B A L V A R I A B L E S +*********************************************************************/ +/* + This is the global opensm object. + One opensm object is required per subnet. + Future versions could support multiple subnets by + instantiating more than one opensm object. +*/ +osm_opensm_t osm; +volatile unsigned int osm_exit_flag = 0; +HANDLE osm_exit_event = NULL; + +#define GUID_ARRAY_SIZE 64 +#define INVALID_GUID (0xFFFFFFFFFFFFFFFFULL) + +typedef struct _osm_main_args_t +{ + int argc; + char** argv; + boolean_t is_service; +} osm_main_args_t; + +osm_main_args_t osm_main_args; + +enum service_state { + SERVICE_STATE_STARTING, + SERVICE_STATE_STARTED_OK, + SERVICE_STATE_START_FAILED +}; + +enum service_state g_service_state = SERVICE_STATE_STARTING; + +void OsmReportState(IN const char *p_str) +{ + if ( !strcmp(p_str, "SUBNET UP\n") || + !strcmp(p_str, "SM port is down\n") || + !strcmp(p_str, "Errors during initialization\n") || + !strcmp(p_str, "Entering STANDBY state\n") ) + { + InterlockedCompareExchange((LONG *)&g_service_state , SERVICE_STATE_STARTED_OK, SERVICE_STATE_STARTING); + return; + } + + if ( !strcmp(p_str, "Found remote SM with non-matching sm_key. Exiting\n") || + !strcmp(p_str, "Errors on subnet. Duplicate GUID found ""by link from a port to itself. ""See osm log for more details\n") || + !strcmp(p_str, "Errors on subnet. SM found duplicated guids or 12x ""link with lane reversal badly configured. ""See osm log for more details\n") || + !strcmp(p_str, "Fatal: Error restoring Guid-to-Lid persistent database\n") ) + + { + InterlockedCompareExchange((LONG *)&g_service_state , SERVICE_STATE_START_FAILED, SERVICE_STATE_STARTING); + return; + } + + if( !strcmp(p_str, "OpenSM Rev:openib-1.2.0\n") || + !strcmp(p_str, "OpenSM Rev:openib-2.0.3\n") || + !strcmp(p_str, "OpenSM Rev:openib-3.0.0\n") || + !strcmp(p_str, "Entering MASTER state\n") || + !strcmp(p_str, "Exiting SM\n") ) + { + // This are messages that it is safe to ignore + return; + } + CL_ASSERT(FALSE); + +} + +/********************************************************************** + **********************************************************************/ +void show_usage(void); + +void +show_usage(void) +{ + printf( "\n------- OpenSM - Usage and options ----------------------\n" ); + printf( "Usage: opensm [options]\n"); + printf( "Options:\n" ); + printf( "-c\n" + "--cache-options\n" + " Cache the given command line options into the file\n" + " /var/cache/osm/opensm.opts for use next invocation\n" + " The cache directory can be changed by the environment\n" + " variable OSM_CACHE_DIR\n\n"); + printf( "-g[=]\n" + "--guid[=]\n" + " This option specifies the local port GUID value\n" + " with which OpenSM should bind. OpenSM may be\n" + " bound to 1 port at a time.\n" + " If GUID given is 0, OpenSM displays a list\n" + " of possible port GUIDs and waits for user input.\n" + " Without -g, OpenSM tries to use the default port.\n\n"); + printf( "-l \n" + "--lmc \n" + " This option specifies the subnet's LMC value.\n" + " The number of LIDs assigned to each port is 2^LMC.\n" + " The LMC value must be in the range 0-7.\n" + " LMC values > 0 allow multiple paths between ports.\n" + " LMC values > 0 should only be used if the subnet\n" + " topology actually provides multiple paths between\n" + " ports, i.e. multiple interconnects between switches.\n" + " Without -l, OpenSM defaults to LMC = 0, which allows\n" + " one path between any two ports.\n\n" ); + printf( "-p \n" + "--priority \n" + " This option specifies the SM's PRIORITY.\n" + " This will effect the handover cases, where master\n" + " is chosen by priority and GUID.\n\n" ); + printf( "-smkey \n" + " This option specifies the SM's SM_Key (64 bits).\n" + " This will effect SM authentication.\n\n" ); + printf( "-r\n" + "--reassign_lids\n" + " This option causes OpenSM to reassign LIDs to all\n" + " end nodes. Specifying -r on a running subnet\n" + " may disrupt subnet traffic.\n" + " Without -r, OpenSM attempts to preserve existing\n" + " LID assignments resolving multiple use of same LID.\n\n"); + printf( "-R\n" + "--routing_engine \n" + " This option chooses routing engine instead of Min Hop\n" + " algorithm (default). Supported engines: updn, file\n\n"); + printf( "-U\n" + "--ucast_file \n" + " This option specifies name of the unicast dump file\n" + " from where switch forwarding tables will be loaded.\n\n"); + printf ("-a\n" + "--add_guid_file \n" + " Set the root nodes for the Up/Down routing algorithm\n" + " to the guids provided in the given file (one to a line)\n" + "\n"); + printf( "-o\n" + "--once\n" + " This option causes OpenSM to configure the subnet\n" + " once, then exit. Ports remain in the ACTIVE state.\n\n" ); + printf( "-s \n" + "--sweep \n" + " This option specifies the number of seconds between\n" + " subnet sweeps. Specifying -s 0 disables sweeping.\n" + " Without -s, OpenSM defaults to a sweep interval of\n" + " 10 seconds.\n\n" ); + printf( "-t \n" + "--timeout \n" + " This option specifies the time in milliseconds\n" + " used for transaction timeouts.\n" + " Specifying -t 0 disables timeouts.\n" + " Without -t, OpenSM defaults to a timeout value of\n" + " 200 milliseconds.\n\n" ); + printf( "-maxsmps \n" + " This option specifies the number of VL15 SMP MADs\n" + " allowed on the wire at any one time.\n" + " Specifying -maxsmps 0 allows unlimited outstanding\n" + " SMPs.\n" + " Without -maxsmps, OpenSM defaults to a maximum of\n" + " 4 outstanding SMPs.\n\n" ); + printf( "-i \n" + "-ignore-guids \n" + " This option provides the means to define a set of ports\n" + " (by guid) that will be ignored by the link load\n" + " equalization algorithm.\n\n" ); + printf( "-x\n" + "--honor_guid2lid\n" + " This option forces OpenSM to honor the guid2lid file,\n" + " when it comes out of Standby state, if such file exists\n" + " under OSM_CACHE_DIR, and is valid. By default, this is FALSE.\n\n" ); + printf( "-f\n" + "--log_file\n" + " This option defines the log to be the given file.\n" + " By default, the log goes to %temp%/log/osm.log.\n" + " For the log to go to standard output use -f stdout.\n\n"); + printf( "-e\n" + "--erase_log_file\n" + " This option will cause deletion of the log file\n" + " (if it previously exists). By default, the log file\n" + " is accumulative.\n\n"); + printf( "-P\n" + "--Pconfig\n" + " This option defines the optional partition configuration file.\n" + " The default name is '%s'.\n\n", OSM_DEFAULT_PARTITION_CONFIG_FILE); + printf( "-Q\n" + "--no_qos\n" + " This option disables QoS setup.\n\n"); + printf( "-N\n" + "--no_part_enforce\n" + " This option disables partition enforcement on switch external ports.\n\n"); + printf( "-y\n" + "--stay_on_fatal\n" + " This option will cause SM not to exit on fatal initialization\n" + " issues: if SM discovers duplicated guids or 12x link with\n" + " lane reversal badly configured.\n" + " By default, the SM will exit on these errors.\n\n"); + printf( "-v\n" + "--verbose\n" + " This option increases the log verbosity level.\n" + " The -v option may be specified multiple times\n" + " to further increase the verbosity level.\n" + " See the -D option for more information about\n" + " log verbosity.\n\n" ); + printf( "-V\n" + " This option sets the maximum verbosity level and\n" + " forces log flushing.\n" + " The -V is equivalent to '-D 0xFF -d 2'.\n" + " See the -D option for more information about\n" + " log verbosity.\n\n" ); + printf( "-D \n" + " This option sets the log verbosity level.\n" + " A flags field must follow the -D option.\n" + " A bit set/clear in the flags enables/disables a\n" + " specific log level as follows:\n" + " BIT LOG LEVEL ENABLED\n" + " ---- -----------------\n" + " 0x01 - ERROR (error messages)\n" + " 0x02 - INFO (basic messages, low volume)\n" + " 0x04 - VERBOSE (interesting stuff, moderate volume)\n" + " 0x08 - DEBUG (diagnostic, high volume)\n" + " 0x10 - FUNCS (function entry/exit, very high volume)\n" + " 0x20 - FRAMES (dumps all SMP and GMP frames)\n" + " 0x40 - ROUTING (dump FDB routing information)\n" + " 0x80 - currently unused.\n" + " Without -D, OpenSM defaults to ERROR + INFO (0x3).\n" + " Specifying -D 0 disables all messages.\n" + " Specifying -D 0xFF enables all messages (see -V).\n" + " High verbosity levels may require increasing\n" + " the transaction timeout with the -t option.\n\n" ); + printf( "-d \n" + "--debug \n" + " This option specifies a debug option.\n" + " These options are not normally needed.\n" + " The number following -d selects the debug\n" + " option to enable as follows:\n" + " OPT Description\n" + " --- -----------------\n" + " -d0 - Ignore other SM nodes\n" + " -d1 - Force single threaded dispatching\n" + " -d2 - Force log flushing after each log message\n" + " -d3 - Disable multicast support\n" + " -d10 - Put OpenSM in testability mode\n" + " Without -d, no debug options are enabled\n\n" ); + printf( "-h\n" + "--help\n" + " Display this usage info then exit.\n\n" ); + printf( "-?\n" + " Display this usage info then exit.\n\n" ); + fflush( stdout ); + osm_exit_flag = TRUE; +} + +/********************************************************************** + **********************************************************************/ +void show_menu(void); + +void +show_menu(void) +{ + printf("\n------- Interactive Menu -------\n"); + printf("X - Exit.\n\n"); +} + +/********************************************************************** + **********************************************************************/ +ib_net64_t +get_port_guid( + IN osm_opensm_t *p_osm, uint64_t port_guid, + IN boolean_t is_service) +{ + uint32_t i; + uint32_t choice = 0; + char junk[128]; + boolean_t done_flag = FALSE; + ib_api_status_t status; + uint32_t num_ports = GUID_ARRAY_SIZE; + ib_port_attr_t attr_array[GUID_ARRAY_SIZE]; + + /* + Call the transport layer for a list of local port + GUID values. + */ + status = osm_vendor_get_all_port_attr( p_osm->p_vendor, attr_array, &num_ports ); + if( status != IB_SUCCESS ) + { + printf( "\nError from osm_vendor_get_all_port_attr (%x)\n", status); + return( 0 ); + } + + /* if num_ports is 0 - return 0 */ + if( num_ports == 0 ) + { + printf( "\nNo local ports detected!\n" ); + return( 0 ); + } + /* If num_ports is 1, then there is only one possible port to use. Use it. */ + if ( num_ports == 1 ) + { + printf("Using default GUID 0x%" PRIx64 "\n", cl_hton64(attr_array[0].port_guid)); + return( attr_array[0].port_guid ); + } + +#if defined ( OSM_VENDOR_INTF_OPENIB ) + /* If port_guid is 0, and this is gen2 - use the default port whose info is in attr_array[0] */ + if ( port_guid == 0 ) + { + printf("Using default GUID 0x%" PRIx64 "\n", cl_hton64(attr_array[0].port_guid)); + return( attr_array[0].port_guid ); + } +#endif /* OSM_VENDOR_INTF_OPENIB */ + + /* If port_guid is 0, and we are in windows - find the first port with link_state != DOWN and + use it as default port. */ + if ( port_guid == 0 ) + { + for ( i = 0; i < num_ports; i++ ) + { + if (attr_array[i].link_state > IB_LINK_DOWN) + { + /* Use this port */ + printf("Using default guid 0x%" PRIx64 "\n", cl_hton64(attr_array[i].port_guid)); + return( attr_array[i].port_guid ); + } + } + /* If we are running as a service, and all ports are doen we return the + first port (we can't open a window, as a service)*/ + if (is_service) { + return( attr_array[0].port_guid ); + } + } + + /* More than one possible port - list all ports and let the user to choose. */ + while( done_flag == FALSE ) + { + printf( "\nChoose a local port number with which to bind:\n\n" ); + /* If this is gen2 code - then port 0 has details of the default port used. + no need to print it. + If this is not gen2 code - need to print details of all ports. */ +#if defined ( OSM_VENDOR_INTF_OPENIB ) + for( i = 1; i < num_ports; i++ ) + { + printf("\t%u: GUID = 0x%8" PRIx64 ", lid = 0x%04X, state = %s\n", + i, cl_ntoh64( attr_array[i].port_guid ), + attr_array[i].lid, + ib_get_port_state_str( attr_array[i].link_state ) ); + } + printf( "\nEnter choice (1-%u): ", i-1 ); +# else + for( i = 0; i < num_ports; i++ ) + { + /* + Print the index + 1 since by convention, port numbers + start with 1 on host channel adapters. + */ + + printf("\t%u: GUID = 0x%8" PRIx64 ", lid = 0x%04X, state = %s\n", + i+1, cl_ntoh64( attr_array[i].port_guid ), + attr_array[i].lid, + ib_get_port_state_str( attr_array[i].link_state ) ); + } + printf( "\nEnter choice (1-%u): ", i ); +#endif /* OSM_VENDOR_INTF_OPENIB */ + + fflush( stdout ); + if (scanf( "%u", &choice )) + { + /* If gen2 code - choice can be between 1 to num_ports-1 + if not gen2 code - choice can be between 1 to num_ports */ +#if defined ( OSM_VENDOR_INTF_OPENIB ) + if( choice >= num_ports ) +# else + if( choice > num_ports || choice < 1 ) +#endif /* OSM_VENDOR_INTF_OPENIB */ + { + printf("\nError: Lame choice!\n"); + fflush( stdin ); + } + else + { + done_flag = TRUE; + } + } + else + { + /* get rid of the junk in the selection line */ + scanf( "%s", junk ); + printf("\nError: Lame choice!\n"); + fflush( stdin ); + } + } +#if defined ( OSM_VENDOR_INTF_OPENIB ) + printf("Choice guid=0x%8" PRIx64 "\n", cl_ntoh64( attr_array[choice].port_guid )); + return( attr_array[choice].port_guid ); +# else + return( attr_array[choice - 1].port_guid ); +#endif /* OSM_VENDOR_INTF_OPENIB */ +} + +/********************************************************************** + **********************************************************************/ +#define OSM_MAX_IGNORE_GUID_LINES_LEN 128 +int +parse_ignore_guids_file(IN char *guids_file_name, + IN osm_opensm_t *p_osm) +{ + FILE *fh; + char line[OSM_MAX_IGNORE_GUID_LINES_LEN]; + char *p_c, *p_ec; + uint32_t line_num = 0; + uint64_t port_guid; + ib_api_status_t status = IB_SUCCESS; + unsigned int port_num; + + OSM_LOG_ENTER( &p_osm->log, parse_ignore_guids_file ); + + fh = fopen( guids_file_name, "r" ); + if( fh == NULL ) + { + osm_log( &p_osm->log, OSM_LOG_ERROR, + "parse_ignore_guids_file: ERR 0601: " + "Unable to open ignore guids file (%s)\n" ); + status = IB_ERROR; + goto Exit; + } + + /* + * Parse the file and add to the ignore guids map. + */ + while( fgets( line, OSM_MAX_IGNORE_GUID_LINES_LEN, fh ) != NULL ) + { + line_num++; + p_c = line; + while ( (*p_c == ' ') && (*p_c != '\0')) p_c++ ; + port_guid = cl_hton64( strtoull( p_c, &p_ec, 16 ) ); + if (p_ec == p_c) + { + osm_log( &p_osm->log, OSM_LOG_ERROR, + "parse_ignore_guids_file: ERR 0602: " + "Error in line (%u): %s\n" , + line_num, line + ); + status = IB_ERROR; + goto Exit; + } + + while ( (*p_ec == ' ') && (*p_ec != '\0')) p_ec++ ; + if (! sscanf(p_ec, "%d", &port_num)) + { + osm_log( &p_osm->log, OSM_LOG_ERROR, + "parse_ignore_guids_file: ERR 0603: " + "Error in line (%u): %s\n" , + line_num, p_ec + ); + status = IB_ERROR; + goto Exit; + } + /* Make sure the port_num isn't greater than 256 */ + if (port_num > 256) + { + osm_log( &p_osm->log, OSM_LOG_ERROR, + "parse_ignore_guids_file: ERR 0604: " + "Error in line (%u): %s. " + "port number is greater than 256 (%d) \n", + line_num, p_ec, port_num + ); + status = IB_ERROR; + goto Exit; + } + /* ok insert it */ + osm_port_prof_set_ignored_port(&p_osm->subn, port_guid, (uint8_t)port_num); + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "parse_ignore_guids_file: " + "Inserted Port: 0x%" PRIx64 " into ignored guids list\n" , + port_guid + ); + + } + + fclose( fh ); + + Exit: + OSM_LOG_EXIT( &p_osm->log ); + return ( status ); + +} + +/********************************************************************** + **********************************************************************/ +int +opensm_main( + void *osm_main_args) +{ + int argc = ((osm_main_args_t*)osm_main_args)->argc; + char** argv = ((osm_main_args_t*)osm_main_args)->argv; + osm_subn_opt_t opt; + ib_net64_t sm_key = 0; + ib_api_status_t status; + uint32_t log_flags = OSM_LOG_DEFAULT_LEVEL; + int long temp; + uint32_t dbg_lvl; + boolean_t run_once_flag = FALSE; + boolean_t mem_track = FALSE; + uint32_t next_option; + uint32_t exitTimeout; + boolean_t cache_options = FALSE; + char *ignore_guids_file_name = NULL; + uint32_t val; + const char * const short_option = "i:f:ed:g:l:L:s:t:a:R:U:P:NQvVhorcyx"; + + + /* + In the array below, the 2nd parameter specified the number + of arguments as follows: + 0: no arguments + 1: argument + 2: optional + */ + const struct option long_option[] = + { + { "service", 0, NULL, 'Z'}, + { "debug", 1, NULL, 'd'}, + { "guid", 1, NULL, 'g'}, + { "ignore_guids", 1, NULL, 'i'}, + { "lmc", 1, NULL, 'l'}, + { "sweep", 1, NULL, 's'}, + { "timeout", 1, NULL, 't'}, + { "verbose", 0, NULL, 'v'}, + { "D", 1, NULL, 'D'}, + { "log_file", 1, NULL, 'f'}, + { "log_limit", 1, NULL, 'L'}, + { "erase_log_file",0, NULL, 'e'}, + { "Pconfig", 1, NULL, 'P'}, + { "no_part_enforce",0,NULL, 'N'}, + { "no_qos", 0, NULL, 'Q'}, + { "maxsmps", 1, NULL, 'n'}, + { "console", 0, NULL, 'q'}, + { "V", 0, NULL, 'V'}, + { "help", 0, NULL, 'h'}, + { "once", 0, NULL, 'o'}, + { "reassign_lids", 0, NULL, 'r'}, + { "priority", 1, NULL, 'p'}, + { "smkey", 1, NULL, 'k'}, + { "routing_engine",1, NULL, 'R'}, + { "ucast_file" ,1, NULL, 'U'}, + { "add_guid_file", 1, NULL, 'a'}, + { "cache-options", 0, NULL, 'c'}, + { "stay_on_fatal", 0, NULL, 'y'}, + { "honor_guid2lid", 0, NULL, 'x'}, + { NULL, 0, NULL, 0 } /* Required at the end of the array */ + }; + printf("-------------------------------------------------\n"); + printf("%s\n", OSM_VERSION); + + osm_subn_set_default_opt(&opt); + osm_subn_parse_conf_file(&opt); + + printf("Command Line Arguments:\n"); + do + { + next_option = getopt_long_only(argc, argv, short_option, + long_option, NULL); + switch(next_option) + { + case 'Z': + /* + service option - nothing to do + */ + break; + + case 'o': + /* + Run once option. + */ + run_once_flag = TRUE; + printf(" Run Once\n"); + break; + + case 'r': + /* + Reassign LIDs subnet option. + */ + opt.reassign_lids = TRUE; + printf(" Reassign LIDs\n"); + break; + + case 'i': + /* + Specifies ignore guids file. + */ + ignore_guids_file_name = optarg; + printf(" Ignore Guids File = %s\n", ignore_guids_file_name); + break; + + case 'g': + /* + Specifies port guid with which to bind. + */ + opt.guid = cl_hton64( strtoull( optarg, NULL, 16 )); + if (! opt.guid) + { + /* If guid is 0 - need to display the guid list */ + opt.guid = INVALID_GUID; + } + else + printf(" Guid <0x%"PRIx64">\n", cl_hton64( opt.guid )); + break; + + case 's': + val = strtol(optarg, NULL, 0); + /* Check that the number is not too large */ + if ( ((uint32_t)(val * 1000000)) / 1000000 != val ) + fprintf(stderr, "ERROR: sweep interval given is too large. Ignoring it.\n"); + else + { + opt.sweep_interval = val; + printf(" sweep interval = %d\n", opt.sweep_interval); + } + break; + + case 't': + opt.transaction_timeout = strtol(optarg, NULL, 0); + printf(" Transaction timeout = %d\n", opt.transaction_timeout); + break; + + case 'n': + opt.max_wire_smps = strtol(optarg, NULL, 0); + if( opt.max_wire_smps <= 0 ) + opt.max_wire_smps = 0x7FFFFFFF; + printf(" Max wire smp's = %d\n", opt.max_wire_smps); + break; + + case 'q': + /* + * OpenSM interactive console + */ + opt.console = TRUE; + printf(" Enabling OpenSM interactive console\n"); + break; + + case 'd': + dbg_lvl = strtol(optarg, NULL, 0); + printf(" d level = 0x%x\n", dbg_lvl); + if (dbg_lvl == 0) + { + printf(" Debug mode: Ignore Other SMs\n"); + opt.ignore_other_sm = TRUE; + } + else if(dbg_lvl == 1) + { + printf(" Debug mode: Forcing Single Thread\n"); + opt.single_thread = TRUE; + } + else if(dbg_lvl == 2) + { + printf(" Debug mode: Force Log Flush\n"); + opt.force_log_flush = TRUE; + } + else if(dbg_lvl == 3) + { + printf(" Debug mode: Disable multicast support\n"); + opt.disable_multicast = TRUE; + } + /* + * NOTE: Debug level 4 used to be used for memory tracking + * but this is now deprecated + */ + else if(dbg_lvl >= 10) + { + /* Please look at osm_subnet.h for list of testability modes. */ + opt.testability_mode = dbg_lvl - 9; + } + else + printf( " OpenSM: Unknown debug option %d ignored\n", + dbg_lvl ); + break; + + case 'l': + temp = strtol(optarg, NULL, 0); + if( temp > 7 ) + { + fprintf(stderr, "ERROR: LMC must be 7 or less."); + return( -1 ); + } + opt.lmc = (uint8_t)temp; + printf(" LMC = %d\n", temp); + break; + + case 'D': + log_flags = strtol(optarg, NULL, 0); + printf(" verbose option -D = 0x%x\n", log_flags); + break; + + case 'f': + if (!strcmp(optarg, "stdout")) + /* output should be to standard output */ + opt.log_file = NULL; + else + opt.log_file = optarg; + break; + + case 'L': + opt.log_max_size = strtoul(optarg, NULL, 0) * (1024*1024); + printf(" Log file max size is %lu bytes\n", opt.log_max_size); + break; + + case 'e': + opt.accum_log_file = FALSE; + printf(" Creating new log file\n"); + break; + + case 'P': + opt.partition_config_file = optarg; + break; + + case 'N': + opt.no_partition_enforcement = TRUE; + break; + + case 'Q': + opt.no_qos = TRUE; + break; + + case 'y': + opt.exit_on_fatal = FALSE; + printf(" Staying on fatal initialization errors\n"); + break; + + case 'v': + log_flags = (log_flags <<1 )|1; + printf(" Verbose option -v (log flags = 0x%X)\n", log_flags ); + break; + + case 'V': + log_flags = 0xFFFFFFFF; + opt.force_log_flush = TRUE; + printf(" Big V selected\n"); + break; + + case 'p': + temp = strtol(optarg, NULL, 0); + if (0 > temp || 15 < temp) { + fprintf(stderr, "ERROR: priority must be between 0 and 15\n"); + return (-1); + } + opt.sm_priority = (uint8_t)temp; + printf(" Priority = %d\n", temp); + break; + + case 'k': + sm_key = cl_hton64( strtoull( optarg, NULL, 16 )); + printf(" SM Key <0x%"PRIx64">\n", cl_hton64( sm_key )); + opt.sm_key = sm_key; + break; + + case 'R': + opt.routing_engine_name = optarg; + printf(" Activate \'%s\' routing engine\n", optarg); + break; + + case 'U': + opt.ucast_dump_file = optarg; + printf(" Ucast dump file is \'%s\'\n", optarg); + break; + + case 'a': + /* + Specifies port guids file + */ + opt.updn_guid_file = optarg; + printf (" UPDN Guid File: %s\n", opt.updn_guid_file ); + break; + + case 'c': + cache_options = TRUE; + printf (" Caching command line options\n"); + break; + + case 'x': + opt.honor_guid2lid_file = TRUE; + printf (" Honor guid2lid file, if possible\n"); + break; + + case 'h': + case '?': + case ':': + show_usage(); + return 0; + break; + + case -1: + break; /* done with option */ + default: /* something wrong */ + abort(); + } + } + while(next_option != -1); + + if (opt.log_file != NULL ) + printf(" Log File: %s\n", opt.log_file ); + /* Done with options description */ + printf("-------------------------------------------------\n"); + + opt.log_flags = (uint8_t)log_flags; + + status = osm_opensm_init( &osm, &opt ); + if( status != IB_SUCCESS ) + { + char buffer[LOG_ENTRY_SIZE_MAX]; + const char *err_str = ib_get_err_str( status ); + if (err_str == NULL) + { + err_str = "Unknown Error Type"; + } + printf( "\nError from osm_opensm_init: %s.\n", + err_str); + sprintf(buffer, "Error from osm_opensm_init: %s. See opensm log file for more details", err_str); + /* We will just exit, and not go to Exit, since we don't + want the destroy to be called. */ + cl_log_event("OpenSM", LOG_ERR, buffer , NULL, 0); + g_service_state = SERVICE_STATE_START_FAILED; + return( status ); + } + + /* + If the user didn't specify a GUID on the command line, + then get a port GUID value with which to bind. + */ + if( opt.guid == 0 || cl_hton64(opt.guid) == CL_HTON64(INVALID_GUID)) + opt.guid = get_port_guid( &osm, opt.guid, ((osm_main_args_t*)osm_main_args)->is_service ); + + if ( opt.guid == 0 ) + { + printf( "Error: Could not get port guid \n" ); + status = IB_ERROR; + goto Exit; + } + + if ( cache_options == TRUE ) + osm_subn_write_conf_file( &opt ); + + status = osm_opensm_bind( &osm, opt.guid ); + if( status != IB_SUCCESS ) + { + printf( "\nError from osm_opensm_bind (0x%X)\n", status ); + goto Exit; + } + + /* + * Define some port guids to ignore during path equalization + */ + if (ignore_guids_file_name != NULL) + { + status = parse_ignore_guids_file(ignore_guids_file_name, &osm); + if( status != IB_SUCCESS ) + { + printf( "\nError from parse_ignore_guids_file (0x%X)\n", status ); + goto Exit; + } + } + + osm_opensm_sweep( &osm ); + + status = osm_opensm_wait_for_subnet_up( + &osm, EVENT_NO_TIMEOUT, TRUE ); + + if( status != CL_SUCCESS ) + { + printf( "\nError from osm_opensm_wait_for_subnet_up (0x%X)\n", status ); + goto Exit; + } + + if( run_once_flag == FALSE ) + { + /* + Sit here forever + In the future, some sort of console interactivity could + be implemented in this loop. + */ + WaitForSingleObject(osm_exit_event, INFINITE); + osm_exit_flag = TRUE; + } + + /* wait for all transactions to end */ + CL_ASSERT( ((opt.polling_retry_number + 1) * opt.transaction_timeout / 1000.0) < 0x100000000ULL ); + exitTimeout = + (uint32_t) ((opt.polling_retry_number + 1) * opt.transaction_timeout / 1000.0); + + if (exitTimeout < 3) exitTimeout = 3; + + /* + printf( "\n------- OpenSM Exiting (in %u seconds) -------\n", + exitTimeout); + sleep(exitTimeout); + */ + + if (osm.mad_pool.mads_out) + fprintf(stdout, + "There are still %u MADs out. Forcing the exit of the OpenSM application...\n", + osm.mad_pool.mads_out); + + Exit: + g_service_state = SERVICE_STATE_START_FAILED; + osm_opensm_destroy( &osm ); + + exit( 0 ); +} + +SERVICE_STATUS OsmServiceStatus; +SERVICE_STATUS_HANDLE OsmServiceStatusHandle; + +VOID SvcDebugOut(LPSTR String, DWORD Status); +VOID WINAPI OsmServiceCtrlHandler (DWORD opcode); +__stdcall OsmServiceStart (DWORD argc, LPTSTR *argv); + +DWORD OsmServiceInitialization (DWORD argc, LPTSTR *argv, + DWORD *specificError); + +int __cdecl +main ( + int argc, + char* argv[] ) +{ + int i; + boolean_t run_as_service = FALSE; + osm_main_args.argc = argc; + osm_main_args.argv = argv; + /* If there are arguments that the executable is ran with, then this is + not running as service - just run the opensm_main. */ + for (i = 0 ; i< argc ; i++) + if (!strcmp(argv[i], "--service")) + { + run_as_service = TRUE; + osm_main_args.is_service = TRUE; + break; + } + osm_exit_event = CreateEvent(NULL, FALSE, FALSE, NULL); + if(osm_exit_event == NULL) + { + printf( "\nCreateEvent failed gle=%d\n", GetLastError()); + return( 1 ); + } + + if (!run_as_service) + { + /* Running as executable */ + osm_main_args.is_service = FALSE; + return opensm_main(&osm_main_args); + } + else + { + /* Running as service */ + SERVICE_TABLE_ENTRY DispatchTable[] = + { + { "OsmService", OsmServiceStart }, + { NULL, NULL } + }; + // Give older versions of opensm a chance to stop + Sleep(3000); + + if (!StartServiceCtrlDispatcher( DispatchTable)) + { + SvcDebugOut(" [OSM_SERVICE] StartServiceCtrlDispatcher (%d)\n", + GetLastError()); + } + } +} + +VOID SvcDebugOut(LPSTR String, DWORD Status) +{ + CHAR Buffer[1024]; + if (strlen(String) < 1000) + { + sprintf(Buffer, String, Status); + OutputDebugStringA(Buffer); + } +} + +VOID WINAPI OsmServiceCtrlHandler (DWORD Opcode) +{ + DWORD status; + + switch(Opcode) + { + case SERVICE_CONTROL_SHUTDOWN: + case SERVICE_CONTROL_STOP: + // Do whatever it takes to stop here. + osm_exit_flag = TRUE; + SetEvent(osm_exit_event); + OsmServiceStatus.dwWin32ExitCode = 0; + OsmServiceStatus.dwCurrentState = SERVICE_STOPPED; + OsmServiceStatus.dwCheckPoint = 0; + OsmServiceStatus.dwWaitHint = 0; + + if (!SetServiceStatus (OsmServiceStatusHandle, + &OsmServiceStatus)) + { + status = GetLastError(); + SvcDebugOut(" [OSM_SERVICE] SetServiceStatus error %ld\n", + status); + } + + SvcDebugOut(" [OSM_SERVICE] Leaving OsmService \n",0); + return; + + case SERVICE_CONTROL_INTERROGATE: + // Fall through to send current status. + break; + + default: + SvcDebugOut(" [OSM_SERVICE] Unrecognized opcode %ld\n", + Opcode); + } + + // Send current status. + if (!SetServiceStatus (OsmServiceStatusHandle, &OsmServiceStatus)) + { + status = GetLastError(); + SvcDebugOut(" [OSM_SERVICE] SetServiceStatus error %ld\n", + status); + } + return; +} + +__stdcall OsmServiceStart (DWORD argc, LPTSTR *argv) +{ + DWORD status; + DWORD specificError; + + OsmServiceStatus.dwServiceType = SERVICE_WIN32; + OsmServiceStatus.dwCurrentState = SERVICE_START_PENDING; + OsmServiceStatus.dwControlsAccepted = SERVICE_ACCEPT_STOP | + SERVICE_ACCEPT_PAUSE_CONTINUE | SERVICE_ACCEPT_SHUTDOWN; + OsmServiceStatus.dwWin32ExitCode = 0; + OsmServiceStatus.dwServiceSpecificExitCode = 0; + OsmServiceStatus.dwCheckPoint = 0; + OsmServiceStatus.dwWaitHint = 2000; + + OsmServiceStatusHandle = RegisterServiceCtrlHandler( + "OsmService", + OsmServiceCtrlHandler); + + if (OsmServiceStatusHandle == (SERVICE_STATUS_HANDLE)0) + { + SvcDebugOut(" [OSM_SERVICE] RegisterServiceCtrlHandler failed %d\n", GetLastError()); + return 0; + } + + // Initialization code goes here. + status = OsmServiceInitialization(argc,argv, &specificError); + while (status == NO_ERROR && g_service_state == SERVICE_STATE_STARTING) + { + Sleep(1000); + OsmServiceStatus.dwCheckPoint++; + if (!SetServiceStatus (OsmServiceStatusHandle, &OsmServiceStatus)) + { + status = GetLastError(); + SvcDebugOut(" [OSM_SERVICE] SetServiceStatus error %ld\n",status); + } + + } + CL_ASSERT(g_service_state == SERVICE_STATE_STARTED_OK || + g_service_state == SERVICE_STATE_START_FAILED || + status != NO_ERROR); + + // Handle error condition + if (status != NO_ERROR || g_service_state == SERVICE_STATE_START_FAILED) + { + OsmServiceStatus.dwCurrentState = SERVICE_STOPPED; + OsmServiceStatus.dwCheckPoint = 0; + OsmServiceStatus.dwWaitHint = 0; + OsmServiceStatus.dwWin32ExitCode = status; + OsmServiceStatus.dwServiceSpecificExitCode = specificError; + + SetServiceStatus (OsmServiceStatusHandle, &OsmServiceStatus); + return 0; + } + + // Initialization complete - report running status. + OsmServiceStatus.dwCurrentState = SERVICE_RUNNING; + OsmServiceStatus.dwCheckPoint = 0; + OsmServiceStatus.dwWaitHint = 0; + + if (!SetServiceStatus (OsmServiceStatusHandle, &OsmServiceStatus)) + { + status = GetLastError(); + SvcDebugOut(" [OSM_SERVICE] SetServiceStatus error %ld\n",status); + } + + // This is where the service does its work. + SvcDebugOut(" [OSM_SERVICE] Returning the Main Thread \n",0); + + return 1; +} + +// Stub initialization function. +DWORD OsmServiceInitialization(DWORD argc, LPTSTR *argv, + DWORD *specificError) +{ + if (CreateThread(NULL, 0, opensm_main, &osm_main_args, 0, NULL) == NULL) + { + SvcDebugOut(" [OSM_SERVICE] failed to create thread (%d)\n", + GetLastError()); + return(1); + } + return(0); +} diff --git a/branches/Ndi/ulp/opensm/user/opensm/opensm.opts b/branches/Ndi/ulp/opensm/user/opensm/opensm.opts new file mode 100644 index 00000000..122e151e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/opensm.opts @@ -0,0 +1,139 @@ +# +# DEVICE ATTRIBUTES OPTIONS +# +# The port GUID on which the OpenSM is running. +#guid + +# M_Key value sent to all ports qualifing all Set(PortInfo). +#m_key 0x0000000000000000 + +# The lease period used for the M_Key on this subnet in [msec] +#m_key_lease_period 0 + +# SM_Key value of the SM to qualify rcv SA queries as 'trusted' +#sm_key 0x0100000000000000 + +# Subnet prefix used on this subnet +#subnet_prefix 0xfe80000000000000 + +# The LMC value used on this subnet +#lmc 0 + +# The code of maximal time a packet can live in a switch +# The actual time is 4.096usec * 2^ +# The value 0x14 disables this mechanism +#packet_life_time 0x12 + +# The code of maximal time a packet can wait at the head of +# transmission queue. +# The actual time is 4.096usec * 2^ +# The value 0x14 disables this mechanism +#head_of_queue_lifetime 0x12 + +# The maximal time a packet can wait at the head of queue on +# switch port connected to a HCA +#leaf_head_of_queue_lifetime 0x0c + +# Limit the maximal operational VLs +#max_op_vls 5 + +# The subnet_timeout code that will be set for all the ports +# The actual timeout is 4.096usec * 2^ +#subnet_timeout 18 + +# Threshold of local phy errors for sending Trap 129 +#local_phy_errors_threshold 0x08 + +# Threshold of credits over-run errors for sending Trap 129 +#overrun_errors_threshold 0x08 + +# +# SWEEP OPTIONS +# +# The number of seconds between subnet sweeps (0 disables it) +#sweep_interval 10 + +# If TRUE cause all lids to be re-assigned +#reassign_lids FALSE + +# If TRUE ignore existing LFT entries on first sweep (default). +# Otherwise only non minimal hop cases are modified. +# NOTE: A standby SM clears its first sweep flag - since the +# master SM already sweeps... +#reassign_lfts TRUE + +# If true forces every sweep to be a heavy sweep +#force_heavy_sweep FALSE + +# If true every trap will cause a heavy sweep. +# NOTE: successive same traps (>10) are supressed +#sweep_on_trap TRUE + +# +# ROUTING OPTIONS +# +# If true do not count switches as link subscriptions +#port_profile_switch_nodes FALSE + +# Activate the Up/Down routing algorithm +#updn_activate FALSE + +# +# HANDOVER - MULTIPLE SM's OPTIONS +# +# SM priority used for deciding who is the master +#sm_priority 1 + +# If TRUE other SM's on the subnet should be ignored +#ignore_other_sm FALSE + +# Timeout in [sec] between two polls of active master SM +#sminfo_polling_timeout 10000 + +# Number of failing polls of remote SM that declares it dead +#polling_retry_number 4 + +# +# TIMING AND THREADING OPTIONS +# +# Number of MADs sent in parallel +#max_wire_smps 4 + +# The time taken to a transaction to finish in [msec] +#transaction_timeout 200 + +# Maximal time in [msec] a message can stay in the incoming message queue. +# If there is more then one message in the queue and the last message +# stayed in the queue more then this value any SA request will be +# immediately returned with a BUSY status. +#max_msg_fifo_timeout 10000 + +# Use a single thread for handling SA queries +#single_thread FALSE + +# +# DEBUG FEATURES +# +# The log flags used +#log_flags 0x03 + +# Force flush of the log file after each log message +#force_log_flush TRUE + +# Log file to be used +#log_file + +#accum_log_file TRUE + +# The directory to hold the file OpenSM dumps +#dump_files_dir + +# If TRUE if OpenSM should disable multicast support +#no_multicast_option FALSE + +# No multicast routing is performed if TRUE +#disable_multicast FALSE + +# If TRUE opensm will exit on fatal initialization issues +#exit_on_fatal TRUE + diff --git a/branches/Ndi/ulp/opensm/user/opensm/opensm.rc b/branches/Ndi/ulp/opensm/user/opensm/opensm.rc new file mode 100644 index 00000000..525ef064 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/opensm.rc @@ -0,0 +1,46 @@ +/* + * Copyright (c) 2005 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_APP +#define VER_FILESUBTYPE VFT2_UNKNOWN +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Debug OpenSM Subnet Manager Application" +#define VER_INTERNALNAME_STR "opensm.exe" +#define VER_ORIGINALFILENAME_STR "opensm.exe" +#else +#define VER_FILEDESCRIPTION_STR "OpenSM Subnet Manager Application" +#define VER_INTERNALNAME_STR "opensm.exe" +#define VER_ORIGINALFILENAME_STR "opensm.exe" +#endif +#include diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm.h b/branches/Ndi/ulp/opensm/user/opensm/osm.h new file mode 100644 index 00000000..2caab674 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm.h @@ -0,0 +1,68 @@ +/*++ +============================================================================= +Copyright (c) 2005 Mellanox Technologies + +Module Name: + + osm.mc + +Abstract: + + OpenSM event log messages + +Authors: + + Leonid Keller + +Environment: + + Kernel Mode . + +============================================================================= +--*/ + +// +// Values are 32 bit values layed out as follows: +// +// 3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1 +// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 +// +---+-+-+-----------------------+-------------------------------+ +// |Sev|C|R| Facility | Code | +// +---+-+-+-----------------------+-------------------------------+ +// +// where +// +// Sev - is the severity code +// +// 00 - Success +// 01 - Informational +// 10 - Warning +// 11 - Error +// +// C - is the Customer code flag +// +// R - is a reserved bit +// +// Facility - is the facility code +// +// Code - is the facility's status code +// +// +// Define the facility codes +// + + +// +// Define the severity codes +// + + +// +// MessageId: EVENT_OSM_ANY_INFO +// +// MessageText: +// +// %1 +// +#define EVENT_OSM_ANY_INFO 0x00000000L + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm.mc b/branches/Ndi/ulp/opensm/user/opensm/osm.mc new file mode 100644 index 00000000..3030f718 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm.mc @@ -0,0 +1,29 @@ +;/*++ +;============================================================================= +;Copyright (c) 2005 Mellanox Technologies +; +;Module Name: +; +; osm.mc +; +;Abstract: +; +; OpenSM event log messages +; +;Authors: +; +; Leonid Keller +; +;Environment: +; +; Kernel Mode . +; +;============================================================================= +;--*/ +; + +MessageId=0x0000 SymbolicName=EVENT_OSM_ANY_INFO +Language=English +%1 +. + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm.rc b/branches/Ndi/ulp/opensm/user/opensm/osm.rc new file mode 100644 index 00000000..116522b7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm.rc @@ -0,0 +1,2 @@ +LANGUAGE 0x9,0x1 +1 11 MSG00001.bin diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_console.c b/branches/Ndi/ulp/opensm/user/opensm/osm_console.c new file mode 100644 index 00000000..9eadd40d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_console.c @@ -0,0 +1,226 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#define _GNU_SOURCE /* for getline */ +#include +#include +#include +#include + +#define OSM_COMMAND_LINE_LEN 120 +#define OSM_COMMAND_PROMPT "$ " + +struct command { + char *name; + void (*help_function)(void); + void (*parse_function)(char **p_last, osm_opensm_t *p_osm); +}; + +static const struct command console_cmds[]; + +static inline char *next_token(char **p_last) +{ + return strtok_r(NULL, " \t\n", p_last); +} + +static void help_command() +{ + int i; + + printf("Supported commands and syntax:\n"); + printf("help []\n"); + /* skip help command */ + for (i = 1; console_cmds[i].name; i++) + console_cmds[i].help_function(); +} + +static void help_loglevel() +{ + printf("loglevel []\n"); +} + +static void help_priority() +{ + printf("priority []\n"); +} + +/* more help routines go here */ + +static void help_parse(char **p_last, osm_opensm_t *p_osm) +{ + char *p_cmd; + int i, found = 0; + + p_cmd = next_token(p_last); + if (!p_cmd) + help_command(); + else { + for (i = 1; console_cmds[i].name; i++) { + if (!strcmp(p_cmd, console_cmds[i].name)) { + found = 1; + console_cmds[i].help_function(); + break; + } + } + if (!found) { + printf("Command %s not found\n\n", p_cmd); + help_command(); + } + } +} + +static void loglevel_parse(char **p_last, osm_opensm_t *p_osm) +{ + char *p_cmd; + int level; + + p_cmd = next_token(p_last); + if (!p_cmd) + printf("Current log level is 0x%x\n", osm_log_get_level(&p_osm->log)); + else { + /* Handle x, 0x, and decimal specification of log level */ + if (!strncmp(p_cmd, "x", 1)) { + p_cmd++; + level = strtoul(p_cmd, NULL, 16); + } else { + if (!strncmp(p_cmd, "0x", 2)) { + p_cmd += 2; + level = strtoul(p_cmd, NULL, 16); + } else + level = strtol(p_cmd, NULL, 10); + } + if ((level >= 0) && (level < 256)) { + printf("Setting log level to 0x%x\n", level); + osm_log_set_level(&p_osm->log, level); + } else + printf("Invalid log level 0x%x\n", level); + } +} + +static void priority_parse(char **p_last, osm_opensm_t *p_osm) +{ + char *p_cmd; + int priority; + + p_cmd = next_token(p_last); + if (!p_cmd) + printf("Current sm-priority is %d\n", p_osm->subn.opt.sm_priority); + else { + priority = strtol(p_cmd, NULL, 0); + if (0 > priority || 15 < priority) + printf("Invalid sm-priority %d; must be between 0 and 15\n", priority); + else { + printf("Setting sm-priority to %d\n", priority); + p_osm->subn.opt.sm_priority = (uint8_t)priority; + /* Does the SM state machine need a kick now ? */ + } + } +} + +/* more parse routines go here */ + +static const struct command console_cmds[] = +{ + { "help", &help_command, &help_parse}, + { "loglevel", &help_loglevel, &loglevel_parse}, + { "priority", &help_priority, &priority_parse}, + { NULL, NULL, NULL} /* end of array */ +}; + +static void parse_cmd_line(char *line, osm_opensm_t *p_osm) +{ + char *p_cmd, *p_last; + int i, found = 0; + + /* find first token which is the command */ + p_cmd = strtok_r(line, " \t\n", &p_last); + if (p_cmd) { + for (i = 0; console_cmds[i].name; i++) { + if (!strcmp(p_cmd, console_cmds[i].name)) { + found = 1; + console_cmds[i].parse_function(&p_last, p_osm); + break; + } + } + if (!found) { + printf("Command %s not found\n\n", p_cmd); + help_command(); + } + } else { + printf("Error parsing command line: %s\n", line); + return; + } +} + +void osm_console_prompt(void) +{ + printf("%s", OSM_COMMAND_PROMPT); + fflush(stdout); +} + +void osm_console(osm_opensm_t *p_osm) +{ + struct pollfd pollfd; + char *p_line; + size_t len; + ssize_t n; + + pollfd.fd = 0; + pollfd.events = POLLIN; + pollfd.revents = 0; + + if (poll(&pollfd, 1, 10000) <= 0) + return; + + if (pollfd.revents|POLLIN) { + p_line = NULL; + /* Get input line */ + n = getline(&p_line, &len, stdin); + if (n > 0) { + /* Parse and act on input */ + parse_cmd_line(p_line, p_osm); + free(p_line); + } else { + printf("Input error\n"); + fflush(stdin); + } + osm_console_prompt(); + } +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_db_files.c b/branches/Ndi/ulp/opensm/user/opensm/osm_db_files.c new file mode 100644 index 00000000..43a90d10 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_db_files.c @@ -0,0 +1,796 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implemntation of the osm_db interface using simple text files + * + * $Revision: 1.4 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include + +/****d* Database/OSM_DB_MAX_LINE_LEN + * NAME + * OSM_DB_MAX_LINE_LEN + * + * DESCRIPTION + * The Maximal line length allowed for the file + * + * SYNOPSIS + */ +#define OSM_DB_MAX_LINE_LEN 1024 +/**********/ + +/****d* Database/OSM_DB_MAX_GUID_LEN + * NAME + * OSM_DB_MAX_GUID_LEN + * + * DESCRIPTION + * The Maximal word length allowed for the file (guid or lid) + * + * SYNOPSIS + */ +#define OSM_DB_MAX_GUID_LEN 32 +/**********/ + +/****s* OpenSM: Database/osm_db_domain_imp + * NAME + * osm_db_domain_imp + * + * DESCRIPTION + * An implementation for domain of the database based on text files and + * hash tables. + * + * SYNOPSIS + */ +typedef struct _osm_db_domain_imp { + char *file_name; + st_table *p_hash; + cl_spinlock_t lock; +} osm_db_domain_imp_t; +/* + * FIELDS + * + * SEE ALSO + * osm_db_domain_t + *********/ + +/****s* OpenSM: Database/osm_db_imp_t + * NAME + * osm_db_imp_t + * + * DESCRIPTION + * An implementation for file based database + * + * SYNOPSIS + */ +typedef struct _osm_db_imp { + char *db_dir_name; +} osm_db_imp_t; +/* + * FIELDS + * + * db_dir_name + * The directory holding the database + * + * SEE ALSO + * osm_db_t + *********/ + +/*************************************************************************** + ***************************************************************************/ +void +osm_db_construct( + IN osm_db_t* const p_db ) +{ + memset(p_db, 0, sizeof(osm_db_t)); + cl_list_construct( &p_db->domains ); +} + +/*************************************************************************** + ***************************************************************************/ +void +osm_db_domain_destroy( + IN osm_db_domain_t* const p_db_domain) +{ + osm_db_domain_imp_t *p_domain_imp; + p_domain_imp = (osm_db_domain_imp_t *)p_db_domain->p_domain_imp; + + osm_db_clear( p_db_domain ); + + cl_spinlock_destroy( &p_domain_imp->lock ); + + st_free_table( p_domain_imp->p_hash ); + free( p_domain_imp->file_name ); + free( p_domain_imp ); +} + +/*************************************************************************** + ***************************************************************************/ +void +osm_db_destroy( + IN osm_db_t* const p_db ) +{ + osm_db_domain_t *p_domain; + + while ((p_domain = cl_list_remove_head( &p_db->domains )) != NULL ) + { + osm_db_domain_destroy( p_domain ); + free( p_domain ); + } + cl_list_destroy( &p_db->domains ); + free( p_db->p_db_imp ); +} + +/*************************************************************************** + ***************************************************************************/ +int +osm_db_init( + IN osm_db_t* const p_db, + IN osm_log_t *p_log ) +{ + osm_db_imp_t *p_db_imp; + struct stat dstat; + + OSM_LOG_ENTER( p_log, osm_db_init ); + + p_db_imp = (osm_db_imp_t *)malloc(sizeof(osm_db_imp_t)); + CL_ASSERT( p_db_imp != NULL); + + p_db_imp->db_dir_name = getenv("OSM_CACHE_DIR"); + if (!p_db_imp->db_dir_name || !(*p_db_imp->db_dir_name)) + p_db_imp->db_dir_name = OSM_DEFAULT_CACHE_DIR; + + /* Create the directory if it doesn't exist */ + /* There is a difference in creating directory between windows and linux */ +#ifdef __WIN__ + /* Check if the directory exists. If not - create it. */ + CreateDirectory(p_db_imp->db_dir_name, NULL); +#else /* __WIN__ */ + /* make sure the directory exists */ + if (lstat(p_db_imp->db_dir_name, &dstat)) + { + if (mkdir(p_db_imp->db_dir_name, 0755)) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_init: ERR 6101: " + " Failed to create the db directory:%s\n", + p_db_imp->db_dir_name); + OSM_LOG_EXIT( p_log ); + return 1; + } + } +#endif + + p_db->p_log = p_log; + p_db->p_db_imp = (void*)p_db_imp; + + cl_list_init( &p_db->domains, 5 ); + + OSM_LOG_EXIT( p_log ); + + return 0; +} + +/*************************************************************************** + ***************************************************************************/ +osm_db_domain_t* +osm_db_domain_init( + IN osm_db_t* const p_db, + IN char *domain_name) +{ + osm_db_domain_t *p_domain; + osm_db_domain_imp_t *p_domain_imp; + int dir_name_len; + osm_log_t *p_log = p_db->p_log; + FILE *p_file; + + OSM_LOG_ENTER( p_log, osm_db_domain_init ); + + /* allocate a new domain object */ + p_domain = (osm_db_domain_t *)malloc(sizeof(osm_db_domain_t)); + CL_ASSERT( p_domain != NULL ); + + p_domain_imp = + (osm_db_domain_imp_t *)malloc(sizeof(osm_db_domain_imp_t)); + CL_ASSERT( p_domain_imp != NULL ); + + dir_name_len = strlen(((osm_db_imp_t*)p_db->p_db_imp)->db_dir_name); + + /* set the domain file name */ + p_domain_imp->file_name = + (char *)malloc(sizeof(char)*(dir_name_len) + strlen(domain_name) + 2); + CL_ASSERT(p_domain_imp->file_name != NULL); + strcpy(p_domain_imp->file_name,((osm_db_imp_t*)p_db->p_db_imp)->db_dir_name); + strcat(p_domain_imp->file_name,domain_name); + + /* make sure the file exists - or exit if not writable */ + p_file = fopen(p_domain_imp->file_name, "a+"); + if (! p_file) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_domain_init: ERR 6102: " + " Failed to open the db file:%s\n", + p_domain_imp->file_name); + free(p_domain_imp); + free(p_domain); + p_domain = NULL; + goto Exit; + } + fclose(p_file); + + /* initialize the hash table object */ + p_domain_imp->p_hash = st_init_strtable(); + CL_ASSERT( p_domain_imp->p_hash != NULL ); + + p_domain->p_db = p_db; + cl_list_insert_tail( &p_db->domains, p_domain ); + p_domain->p_domain_imp = p_domain_imp; + cl_spinlock_construct( &p_domain_imp->lock ); + cl_spinlock_init( &p_domain_imp->lock ); + + Exit: + OSM_LOG_EXIT( p_log ); + return p_domain; +} + +/*************************************************************************** + ***************************************************************************/ +int +osm_db_restore( + IN osm_db_domain_t *p_domain) +{ + + osm_log_t *p_log = p_domain->p_db->p_log; + osm_db_domain_imp_t *p_domain_imp = + (osm_db_domain_imp_t *)p_domain->p_domain_imp; + FILE *p_file; + int status; + char sLine[OSM_DB_MAX_LINE_LEN]; + boolean_t before_key; + char *p_first_word, *p_rest_of_line, *p_last; + char *p_key = NULL; + char *p_prev_val, *p_accum_val = NULL; + unsigned int line_num; + + OSM_LOG_ENTER( p_log, osm_db_restore ); + + /* take the lock on the domain */ + cl_spinlock_acquire( &p_domain_imp->lock ); + + /* open the file - read mode */ + p_file = fopen(p_domain_imp->file_name, "r"); + + if (! p_file) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_restore: ERR 6103: " + " Failed to open the db file:%s\n", + p_domain_imp->file_name); + status = 1; + goto Exit; + } + + /* parse the file allocating new hash tables as required */ + /* + states: + before_key (0) -> in_key (1) + + before_key: if a word on the first byte - it is the key. state=in_key + the rest of the line is start of the value. + in_key: unless the line is empty - add it (with newlines) to the value. + if empty: state=before_key + */ + status = 0; + before_key = TRUE; + line_num = 0; + /* if we got to EOF in the middle of a key we add a last newline */ + while ( + (fgets(sLine, OSM_DB_MAX_LINE_LEN, p_file) != NULL) || + ((before_key == FALSE) && strcpy(sLine,"\n")) + ) + { + line_num++; + if (before_key) + { + if ((sLine[0] != ' ') && (sLine[0] != '\t') && (sLine[0] != '\n')) + { + /* we got a new key */ + before_key = FALSE; + + /* handle the key */ + p_first_word = strtok_r(sLine, " \t\n", &p_last); + if (! p_first_word) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_restore: ERR 6104: " + " Failed to get key from line:%u : %s (file:%s)\n", + line_num, sLine, p_domain_imp->file_name); + status = 1; + goto EndParsing; + } + if (strlen(p_first_word) > OSM_DB_MAX_GUID_LEN) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_restore: ERR 610A: " + " Illegal key from line:%u : %s (file:%s)\n", + line_num, sLine, p_domain_imp->file_name); + status = 1; + goto EndParsing; + } + + p_key = (char *)malloc(sizeof(char)*(strlen(p_first_word) + 1)); + strcpy(p_key, p_first_word); + + p_rest_of_line = strtok_r(NULL, "\n", &p_last); + if (p_rest_of_line != NULL) + { + p_accum_val = + (char*)malloc(sizeof(char)*(strlen(p_rest_of_line) + 1)); + strcpy(p_accum_val, p_rest_of_line); + } + else + { + p_accum_val = (char*)malloc(2); + strcpy(p_accum_val, "\0"); + } + } + else if (sLine[0] != '\n') + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_restore: ERR 6105: " + " How did we get here? line:%u : %s (file:%s)\n", + line_num, sLine, p_domain_imp->file_name); + status = 1; + goto EndParsing; + } + } /* before key */ + else + { + /* we already have a key */ + + if (sLine[0] == '\n') + { + /* got an end of key */ + before_key = TRUE; + + /* make sure the key was not previously used */ + if (st_lookup(p_domain_imp->p_hash, + (st_data_t)p_key, + (st_data_t*)&p_prev_val)) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_restore: ERR 6106: " + " Key:%s already exists in:%s with value:%s." + " Removing it\n", + p_key, + p_domain_imp->file_name, + p_prev_val); + } + else + { + p_prev_val = NULL; + } + + /* store our key and value */ + st_insert(p_domain_imp->p_hash, + (st_data_t)p_key, (st_data_t)p_accum_val); + osm_log( p_log, OSM_LOG_DEBUG, + "osm_db_restore: " + "Got key:%s value:%s\n", p_key, p_accum_val); + } + else + { + /* accumulate into the value */ + p_prev_val = p_accum_val; + p_accum_val = + (char *)malloc(strlen(p_prev_val) + strlen(sLine) + 1); + strcpy(p_accum_val, p_prev_val); + free(p_prev_val); + strcat(p_accum_val, sLine); + } + } /* in key */ + } /* while lines or last line */ + + EndParsing: + fclose(p_file); + + Exit: + cl_spinlock_release( &p_domain_imp->lock ); + OSM_LOG_EXIT( p_log ); + return status; +} + +/*************************************************************************** + ***************************************************************************/ +int +__osm_dump_tbl_entry(st_data_t key, st_data_t val, st_data_t arg) +{ + FILE *p_file = (FILE*)arg; + char *p_key = (char*)key; + char *p_val = (char *)val; + + fprintf(p_file, "%s %s\n\n", p_key, p_val); + return ST_CONTINUE; +} + +int +osm_db_store( + IN osm_db_domain_t *p_domain) +{ + osm_log_t *p_log = p_domain->p_db->p_log; + osm_db_domain_imp_t *p_domain_imp; + FILE *p_file; + int status = 0; + char *p_tmp_file_name; + + OSM_LOG_ENTER( p_log, osm_db_store ); + + p_domain_imp = (osm_db_domain_imp_t *)p_domain->p_domain_imp; + p_tmp_file_name = + (char *)malloc(sizeof(char)*(strlen(p_domain_imp->file_name)+8)); + strcpy(p_tmp_file_name, p_domain_imp->file_name); + strcat(p_tmp_file_name,".tmp"); + + cl_spinlock_acquire( &p_domain_imp->lock ); + + /* open up the output file */ + p_file = fopen(p_tmp_file_name, "w"); + if (! p_file) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_store: ERR 6107: " + " Failed to open the db file:%s for writing\n", + p_domain_imp->file_name); + status = 1; + goto Exit; + } + + st_foreach(p_domain_imp->p_hash, __osm_dump_tbl_entry, (st_data_t)p_file); + fclose(p_file); + + /* move the domain file */ + status = remove(p_domain_imp->file_name); + if (status) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_store: ERR 6109: " + " Failed to remove file:%s (err:%u)\n", + p_domain_imp->file_name, status); + } + + status = rename(p_tmp_file_name, p_domain_imp->file_name); + if (status) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_store: ERR 6108: " + " Failed to rename the db file to:%s (err:%u)\n", + p_domain_imp->file_name, status); + } + Exit: + cl_spinlock_release( &p_domain_imp->lock ); + free(p_tmp_file_name); + OSM_LOG_EXIT( p_log ); + return status; +} + +/*************************************************************************** + ***************************************************************************/ +/* simply de-allocate the key and the value and return the code + that makes the st_foreach delete the entry */ +int +__osm_clear_tbl_entry(st_data_t key, st_data_t val, st_data_t arg) +{ + free((char*)key); + free((char*)val); + return ST_DELETE; +} + +int +osm_db_clear( + IN osm_db_domain_t *p_domain) +{ + osm_db_domain_imp_t *p_domain_imp = + (osm_db_domain_imp_t *)p_domain->p_domain_imp; + + cl_spinlock_acquire( &p_domain_imp->lock ); + st_foreach(p_domain_imp->p_hash, __osm_clear_tbl_entry, (st_data_t)NULL); + cl_spinlock_release( &p_domain_imp->lock ); + + return 0; +} + +/*************************************************************************** + ***************************************************************************/ +int +__osm_get_key_of_tbl_entry(st_data_t key, st_data_t val, st_data_t arg) +{ + cl_list_t *p_list = (cl_list_t *)arg; + cl_list_insert_tail(p_list, (void*)key); + return ST_CONTINUE; +} + +int +osm_db_keys( + IN osm_db_domain_t *p_domain, + OUT cl_list_t* p_key_list) +{ + osm_db_domain_imp_t *p_domain_imp = + (osm_db_domain_imp_t *)p_domain->p_domain_imp; + + cl_spinlock_acquire( &p_domain_imp->lock ); + + st_foreach(p_domain_imp->p_hash, + __osm_get_key_of_tbl_entry, (st_data_t)p_key_list); + + cl_spinlock_release( &p_domain_imp->lock ); + + return 0; +} + +/*************************************************************************** + ***************************************************************************/ +char * +osm_db_lookup( + IN osm_db_domain_t *p_domain, + IN char *const p_key) +{ + osm_db_domain_imp_t *p_domain_imp = + (osm_db_domain_imp_t *)p_domain->p_domain_imp; + char *p_val = NULL; + + cl_spinlock_acquire( &p_domain_imp->lock ); + + if (!st_lookup(p_domain_imp->p_hash, (st_data_t)p_key, (st_data_t*)&p_val)) + p_val = NULL; + + cl_spinlock_release( &p_domain_imp->lock ); + + return p_val; +} + +/*************************************************************************** + ***************************************************************************/ +int +osm_db_update( + IN osm_db_domain_t *p_domain, + IN char *const p_key, + IN char *const p_val) +{ + osm_log_t *p_log = p_domain->p_db->p_log; + osm_db_domain_imp_t *p_domain_imp = + (osm_db_domain_imp_t *)p_domain->p_domain_imp; + char *p_prev_val = NULL; + char *p_new_key; + char *p_new_val; + + cl_spinlock_acquire( &p_domain_imp->lock ); + + if (st_lookup(p_domain_imp->p_hash, + (st_data_t)p_key, (st_data_t*)&p_prev_val)) + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_db_update: " + " Key:%s previously exists in:%s with value:%s\n", + p_key, + p_domain_imp->file_name, + p_prev_val); + p_new_key = p_key; + } + else + { + /* need to allocate the key */ + p_new_key = malloc(sizeof(char)*(strlen(p_key) + 1)); + strcpy(p_new_key, p_key); + } + + /* need to arange a new copy of the value */ + p_new_val = malloc(sizeof(char)*(strlen(p_val) + 1)); + strcpy(p_new_val, p_val); + + st_insert(p_domain_imp->p_hash, (st_data_t)p_new_key, (st_data_t)p_new_val); + + if (p_prev_val) + free(p_prev_val); + + cl_spinlock_release( &p_domain_imp->lock ); + + return 0; +} + +/*************************************************************************** + ***************************************************************************/ +int +osm_db_delete( + IN osm_db_domain_t *p_domain, + IN char *const p_key) +{ + osm_log_t *p_log = p_domain->p_db->p_log; + osm_db_domain_imp_t *p_domain_imp = + (osm_db_domain_imp_t *)p_domain->p_domain_imp; + char *p_prev_val = NULL; + int res; + + OSM_LOG_ENTER( p_log, osm_db_delete ); + + cl_spinlock_acquire( &p_domain_imp->lock ); + if (st_delete(p_domain_imp->p_hash, + (st_data_t*)&p_key, (st_data_t*)&p_prev_val)) + { + if (st_lookup(p_domain_imp->p_hash, + (st_data_t)p_key, (st_data_t*)&p_prev_val)) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_db_delete: " + " key:%s still exists in:%s with value:%s\n", + p_key, + p_domain_imp->file_name, + p_prev_val); + res = 1; + } + else + { + free(p_key); + free(p_prev_val); + res = 0; + } + } + else + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_db_update: " + " fail to find key:%s. delete failed\n", + p_key); + res = 1; + } + cl_spinlock_release( &p_domain_imp->lock ); + + OSM_LOG_EXIT( p_log ); + return res; +} + +#ifdef TEST_OSMDB +#include +#include + +int +main(int argc, char **argv) +{ + osm_db_t db; + osm_log_t log; + osm_db_domain_t *p_dbd; + cl_list_t keys; + cl_list_iterator_t kI; + char *p_key; + char *p_val; + int i; + + cl_list_construct( &keys ); + cl_list_init( &keys, 10 ); + + osm_log_init_v2( &log, TRUE, 0xff, "/var/log/osm_db_test.log", 0, FALSE); + + osm_db_construct(&db); + if (osm_db_init(&db, &log)) + { + printf("db init failed\n"); + exit(1); + } + + p_dbd = osm_db_domain_init(&db, "lid_by_guid"); + + if (osm_db_restore(p_dbd)) + { + printf("failed to restore\n"); + } + + if (osm_db_keys(p_dbd, &keys)) + { + printf("failed to get keys\n"); + } + else + { + kI = cl_list_head( &keys ); + while (kI != cl_list_end( & keys )) + { + p_key = cl_list_obj(kI); + kI = cl_list_next( kI ); + + p_val = osm_db_lookup(p_dbd, p_key); + printf("key = %s val = %s\n", p_key, p_val); + } + } + + cl_list_remove_all(&keys); + + /* randomly add and remove numbers */ + for (i = 0; i < 10; i++) + { + int k; + float v; + int is_add; + char val_buf[16]; + char key_buf[16]; + + k = floor(1.0 * rand()/ RAND_MAX * 100); + v = rand(); + sprintf(key_buf, "%u", k); + sprintf(val_buf, "%u", v); + + is_add = (rand() < RAND_MAX/ 2); + + if (is_add) + { + osm_db_update(p_dbd, key_buf, val_buf); + } + else + { + osm_db_delete(p_dbd, key_buf); + } + } + if (osm_db_keys(p_dbd, &keys)) + { + printf("failed to get keys\n"); + } + else + { + kI = cl_list_head( &keys ); + while (kI != cl_list_end( & keys )) + { + p_key = cl_list_obj(kI); + kI = cl_list_next( kI ); + + p_val = osm_db_lookup(p_dbd, p_key); + printf("key = %s val = %s\n", p_key, p_val); + } + } + if (osm_db_store(p_dbd)) + printf("failed to store\n"); + + osm_db_destroy( &db ); + cl_list_destroy( &keys ); +} +#endif + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_db_pack.c b/branches/Ndi/ulp/opensm/user/opensm/osm_db_pack.c new file mode 100644 index 00000000..14b923e8 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_db_pack.c @@ -0,0 +1,172 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +static inline void +__osm_pack_guid(uint64_t guid, char *p_guid_str) +{ + sprintf(p_guid_str, "0x%016" PRIx64, guid); +} + +static inline uint64_t +__osm_unpack_guid(char *p_guid_str) +{ +#if __WORDSIZE == 64 + return (strtoul(p_guid_str, NULL, 0)); +#else + return (strtoull(p_guid_str, NULL, 0)); +#endif +} + +static inline void +__osm_pack_lids(uint16_t min_lid, uint16_t max_lid, char *p_lid_str) +{ + sprintf(p_lid_str, "0x%04x 0x%04x", min_lid, max_lid); +} + +static inline int +__osm_unpack_lids( + IN char *p_lid_str, + OUT uint16_t *p_min_lid, + OUT uint16_t *p_max_lid ) +{ + unsigned long tmp; + char *p_next; + char *p_num; + char lids_str[24]; + + strncpy(lids_str, p_lid_str, 23); + lids_str[23] = '\0'; + p_num = strtok_r(lids_str, " \t", &p_next); + if (! p_num) return 1; + tmp = strtoul(p_num, NULL, 0); + CL_ASSERT( tmp < 0x10000 ); + *p_min_lid = (uint16_t)tmp; + + p_num = strtok_r(NULL, " \t", &p_next); + if (! p_num) return 1; + tmp = strtoul(p_num, NULL, 0); + CL_ASSERT( tmp < 0x10000 ); + *p_max_lid = (uint16_t)tmp; + + return 0; +} + +int +osm_db_guid2lid_guids( + IN osm_db_domain_t* const p_g2l, + OUT cl_qlist_t* p_guid_list ) +{ + char *p_key; + cl_list_t keys; + osm_db_guid_elem_t *p_guid_elem; + + cl_list_construct( &keys ); + cl_list_init( &keys , 10); + + if (osm_db_keys(p_g2l, &keys)) + return 1; + + while ( (p_key = cl_list_remove_head( &keys )) != NULL ) + { + p_guid_elem = (osm_db_guid_elem_t*)malloc(sizeof(osm_db_guid_elem_t)); + CL_ASSERT( p_guid_elem != NULL ); + + p_guid_elem->guid = __osm_unpack_guid(p_key); + cl_qlist_insert_head(p_guid_list, &p_guid_elem->item); + } + + cl_list_destroy( &keys ); + return 0; +} + +int +osm_db_guid2lid_get( + IN osm_db_domain_t* const p_g2l, + IN uint64_t guid, + OUT uint16_t *p_min_lid, + OUT uint16_t *p_max_lid) +{ + char guid_str[20]; + char *p_lid_str; + uint16_t min_lid, max_lid; + + __osm_pack_guid(guid, guid_str); + p_lid_str = osm_db_lookup(p_g2l, guid_str); + if (! p_lid_str) + return 1; + if (__osm_unpack_lids(p_lid_str, &min_lid, &max_lid)) + return 1; + + if (p_min_lid) *p_min_lid = min_lid; + if (p_max_lid) *p_max_lid = max_lid; + + return 0; +} + +int +osm_db_guid2lid_set( + IN osm_db_domain_t* const p_g2l, + IN uint64_t guid, + IN uint16_t min_lid, + IN uint16_t max_lid) +{ + char guid_str[20]; + char lid_str[16]; + + __osm_pack_guid(guid, guid_str); + __osm_pack_lids(min_lid, max_lid, lid_str); + + return( osm_db_update( p_g2l, guid_str, lid_str) ); +} + +int +osm_db_guid2lid_delete( + IN osm_db_domain_t* const p_g2l, + IN uint64_t guid ) +{ + char guid_str[20]; + __osm_pack_guid(guid, guid_str); + return( osm_db_delete( p_g2l, guid_str) ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_drop_mgr.c b/branches/Ndi/ulp/opensm/user/opensm/osm_drop_mgr.c new file mode 100644 index 00000000..eb8bbd4b --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_drop_mgr.c @@ -0,0 +1,721 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_drop_mgr_t. + * This object represents the Drop Manager object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_drop_mgr_construct( + IN osm_drop_mgr_t* const p_mgr ) +{ + CL_ASSERT( p_mgr ); + memset( p_mgr, 0, sizeof(*p_mgr) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_drop_mgr_destroy( + IN osm_drop_mgr_t* const p_mgr ) +{ + CL_ASSERT( p_mgr ); + + OSM_LOG_ENTER( p_mgr->p_log, osm_drop_mgr_destroy ); + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_drop_mgr_init( + IN osm_drop_mgr_t* const p_mgr, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_req_t* const p_req, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_drop_mgr_init ); + + osm_drop_mgr_construct( p_mgr ); + + p_mgr->p_log = p_log; + p_mgr->p_subn = p_subn; + p_mgr->p_lock = p_lock; + p_mgr->p_req = p_req; + + OSM_LOG_EXIT( p_mgr->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_drop_mgr_remove_router( + IN const osm_drop_mgr_t* const p_mgr, + IN const ib_net64_t portguid ) +{ + osm_router_t *p_rtr; + cl_qmap_t* p_rtr_guid_tbl; + + p_rtr_guid_tbl = &p_mgr->p_subn->rtr_guid_tbl; + p_rtr = (osm_router_t*)cl_qmap_remove( p_rtr_guid_tbl, portguid ); + if( p_rtr != (osm_router_t*)cl_qmap_end( p_rtr_guid_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_remove_router: " + "Cleaned router for port guid 0x%016" PRIx64 "\n", + cl_ntoh64( portguid ) ); + osm_router_delete( &p_rtr ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_drop_mgr_remove_port( + IN const osm_drop_mgr_t* const p_mgr, + IN osm_port_t* p_port ) +{ + ib_net64_t port_guid; + osm_port_t *p_port_check; + cl_list_t* p_new_ports_list; + cl_list_iterator_t cl_list_item; + cl_qmap_t* p_port_guid_tbl; + cl_qmap_t* p_sm_guid_tbl; + osm_mcm_info_t* p_mcm; + osm_mgrp_t* p_mgrp; + cl_ptr_vector_t* p_port_lid_tbl; + uint16_t min_lid_ho; + uint16_t max_lid_ho; + uint16_t lid_ho; + uint32_t port_num; + uint32_t remote_port_num; + uint32_t num_physp; + osm_node_t *p_node; + osm_node_t *p_remote_node; + osm_physp_t *p_physp; + osm_physp_t *p_remote_physp; + osm_remote_sm_t *p_sm; + ib_gid_t port_gid; + ib_mad_notice_attr_t notice; + ib_api_status_t status; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_drop_mgr_remove_port ); + + port_guid = osm_port_get_guid( p_port ); + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_remove_port: " + "Unreachable port 0x%016" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + + /* + Remove this port from the new_ports_list, if it exists there. + Remove this port from the guid and LID tables. + Remove also from the sm guid table - if the object + exists there. + */ + p_new_ports_list = &p_mgr->p_subn->new_ports_list; + cl_list_item = cl_list_head(p_new_ports_list); + while( cl_list_item != cl_list_end(p_new_ports_list) ) + { + if ( (osm_port_t*)(cl_list_obj(cl_list_item)) == p_port ) + { + /* Found the port in the new_ports_list. Remove it from there. */ + cl_list_remove_item(p_new_ports_list, cl_list_item); + break; + } + cl_list_item = cl_list_next(cl_list_item); + } + + p_port_guid_tbl = &p_mgr->p_subn->port_guid_tbl; + p_port_check = (osm_port_t*)cl_qmap_remove( p_port_guid_tbl, port_guid ); + if( p_port_check != p_port ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_drop_mgr_remove_port: ERR 0101: " + "Port 0x%016" PRIx64 " not in guid table\n", + cl_ntoh64( port_guid ) ); + goto Exit; + } + + p_sm_guid_tbl = &p_mgr->p_subn->sm_guid_tbl; + p_sm = (osm_remote_sm_t*)cl_qmap_remove( p_sm_guid_tbl, port_guid ); + if( p_sm != (osm_remote_sm_t*)cl_qmap_end( p_sm_guid_tbl ) ) + { + /* need to remove this item */ + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_remove_port: " + "Cleaned SM for port guid\n" ); + + free(p_sm); + } + + __osm_drop_mgr_remove_router( p_mgr, port_guid ); + + osm_port_get_lid_range_ho( p_port, &min_lid_ho, &max_lid_ho ); + + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_remove_port: " + "Clearing abandoned LID range [0x%X,0x%X]\n", + min_lid_ho, max_lid_ho ); + + p_port_lid_tbl = &p_mgr->p_subn->port_lid_tbl; + for( lid_ho = min_lid_ho; lid_ho <= max_lid_ho; lid_ho++ ) + cl_ptr_vector_set( p_port_lid_tbl, lid_ho, NULL ); + + /* + For each Physical Port associated with this port: + Unlink the remote Physical Port, if any + Re-initialize each Physical Port. + */ + + num_physp = osm_port_get_num_physp( p_port ); + for( port_num = 0; port_num < num_physp; port_num++ ) + { + p_physp = osm_port_get_phys_ptr( p_port, (uint8_t)port_num ); + + if( p_physp ) + { + p_remote_physp = osm_physp_get_remote( p_physp ); + if( p_remote_physp && osm_physp_is_valid( p_remote_physp ) ) + { + osm_port_t* p_remote_port; + + p_node = osm_physp_get_node_ptr( p_physp ); + p_remote_node = osm_physp_get_node_ptr( p_remote_physp ); + remote_port_num = osm_physp_get_port_num( p_remote_physp ); + p_remote_port = (osm_port_t*)cl_qmap_get( p_port_guid_tbl, p_remote_physp->port_guid ); + + if ( p_remote_port != (osm_port_t*)cl_qmap_end( p_port_guid_tbl ) ) + { + /* Let's check if this is a case of link that is lost (both ports + weren't recognized), or a "hiccup" in the subnet - in which case + the remote port was recognized, and its state is ACTIVE. + If this is just a "hiccup" - force a heavy sweep in the next sweep. + We don't want to lose that part of the subnet. */ + if (osm_port_discovery_count_get( p_remote_port ) && + osm_physp_get_port_state( p_remote_physp ) == IB_LINK_ACTIVE ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_remove_port: " + "Forcing delayed heavy sweep. Remote " + "port 0x%016" PRIx64 " port num: 0x%X " + "was recognized in ACTIVE state\n", + cl_ntoh64( p_remote_physp->port_guid ), + remote_port_num ); + p_mgr->p_subn->force_delayed_heavy_sweep = TRUE; + } + } + + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_remove_port: " + "Unlinking local node 0x%016" PRIx64 ", port 0x%X" + "\n\t\t\t\tand remote node 0x%016" PRIx64 + ", port 0x%X\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + port_num, + cl_ntoh64( osm_node_get_node_guid( p_remote_node ) ), + remote_port_num ); + + osm_node_unlink( p_node, (uint8_t)port_num, + p_remote_node, (uint8_t)remote_port_num ); + + /* If the remote node is ca or router - need to remove the remote port, + since it is no longer reachable. This can be done if we reset the + discovery count of the remote port. */ + if ( osm_node_get_type( p_remote_node ) != IB_NODE_TYPE_SWITCH ) + { + if ( p_remote_port != (osm_port_t*)cl_qmap_end( p_port_guid_tbl ) ) + { + osm_port_discovery_count_reset( p_remote_port ); + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_drop_mgr_remove_port: " + "Resetting discovery count of node: " + "0x%016" PRIx64 " port num:0x%X\n", + cl_ntoh64( osm_node_get_node_guid( p_remote_node ) ), + remote_port_num ); + } + } + } + + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_drop_mgr_remove_port: " + "Clearing physical port number 0x%X\n", + port_num ); + + osm_physp_destroy( p_physp ); + } + } + + p_mcm = (osm_mcm_info_t*)cl_qlist_remove_head( &p_port->mcm_list ); + while( p_mcm != (osm_mcm_info_t *)cl_qlist_end( &p_port->mcm_list ) ) + { + p_mgrp = (osm_mgrp_t *)cl_qmap_get( &p_mgr->p_subn->mgrp_mlid_tbl, + p_mcm->mlid ); + if(p_mgrp != (osm_mgrp_t *)cl_qmap_end( &p_mgr->p_subn->mgrp_mlid_tbl ) ) + { + osm_mgrp_remove_port(p_mgr->p_subn, p_mgr->p_log, p_mgrp, p_port->guid ); + osm_mcm_info_delete( (osm_mcm_info_t*)p_mcm ); + } + p_mcm = (osm_mcm_info_t*)cl_qlist_remove_head( &p_port->mcm_list ); + } + + /* initialize the p_node - may need to get node_desc later */ + p_node = p_port->p_node; + + osm_port_delete( &p_port ); + + /* issue a notice - trap 65 */ + + /* details of the notice */ + notice.generic_type = 0x83; /* is generic subn mgt type */ + ib_notice_set_prod_type_ho(¬ice, 4); /* A class manager generator */ + /* endport ceases to be reachable */ + notice.g_or_v.generic.trap_num = CL_HTON16(65); + /* The sm_base_lid is saved in network order already. */ + notice.issuer_lid = p_mgr->p_subn->sm_base_lid; + /* following C14-72.1.2 and table 119 p725 */ + /* we need to provide the GID */ + port_gid.unicast.prefix = p_mgr->p_subn->opt.subnet_prefix; + port_gid.unicast.interface_id = port_guid; + memcpy(&(notice.data_details.ntc_64_67.gid), + &(port_gid), + sizeof(ib_gid_t)); + + /* According to page 653 - the issuer gid in this case of trap + is the SM gid, since the SM is the initiator of this trap. */ + notice.issuer_gid.unicast.prefix = p_mgr->p_subn->opt.subnet_prefix; + notice.issuer_gid.unicast.interface_id = p_mgr->p_subn->sm_port_guid; + + status = osm_report_notice(p_mgr->p_log, p_mgr->p_subn, ¬ice); + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_drop_mgr_remove_port: ERR 0103: " + "Error sending trap reports (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + if (osm_log_is_active( p_mgr->p_log, OSM_LOG_INFO )) + { + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + + if (p_node) + { + memcpy(desc, p_node->node_desc.description, IB_NODE_DESCRIPTION_SIZE); + desc[IB_NODE_DESCRIPTION_SIZE] = '\0'; + } + osm_log( p_mgr->p_log, OSM_LOG_INFO, + "__osm_drop_mgr_remove_port: " + "Removed port with GUID:0x%016" PRIx64 + " LID range [0x%X,0x%X] of node:%s\n", + cl_ntoh64( port_gid.unicast.interface_id ), + min_lid_ho, max_lid_ho, p_node ? desc : "UNKNOWN" ); + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_drop_mgr_remove_switch( + IN const osm_drop_mgr_t* const p_mgr, + IN osm_node_t* p_node ) +{ + osm_switch_t *p_sw; + cl_qmap_t* p_sw_guid_tbl; + ib_net64_t node_guid; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_drop_mgr_remove_switch ); + + node_guid = osm_node_get_node_guid( p_node ); + p_sw_guid_tbl = &p_mgr->p_subn->sw_guid_tbl; + + p_sw = (osm_switch_t*)cl_qmap_remove( p_sw_guid_tbl, node_guid ); + if( p_sw == (osm_switch_t*)cl_qmap_end( p_sw_guid_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_drop_mgr_remove_switch: ERR 0102: " + "Node 0x%016" PRIx64 " not in switch table\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + } + else + { + p_node->sw = NULL; + osm_switch_delete( &p_sw ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static boolean_t +__osm_drop_mgr_process_node( + IN const osm_drop_mgr_t* const p_mgr, + IN osm_node_t* p_node ) +{ + osm_physp_t *p_physp; + osm_port_t *p_port; + osm_node_t *p_node_check; + cl_qmap_t *p_node_guid_tbl; + uint32_t port_num; + uint32_t max_ports; + ib_net64_t port_guid; + cl_qmap_t* p_port_guid_tbl; + boolean_t return_val = FALSE; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_drop_mgr_process_node ); + + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_process_node: " + "Unreachable node 0x%016" PRIx64 "\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + + /* + Delete all the logical and physical port objects + associated with this node. + */ + p_port_guid_tbl = &p_mgr->p_subn->port_guid_tbl; + + max_ports = osm_node_get_num_physp( p_node ); + for( port_num = 0; port_num < max_ports; port_num++ ) + { + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + if( osm_physp_is_valid( p_physp ) ) + { + port_guid = osm_physp_get_port_guid( p_physp ); + + p_port = (osm_port_t*)cl_qmap_get( p_port_guid_tbl, port_guid ); + + if( p_port != (osm_port_t*)cl_qmap_end( p_port_guid_tbl ) ) + __osm_drop_mgr_remove_port( p_mgr, p_port ); + } + } + + return_val = TRUE; + + if (p_node->sw) + __osm_drop_mgr_remove_switch( p_mgr, p_node ); + + p_node_guid_tbl = &p_mgr->p_subn->node_guid_tbl; + p_node_check = (osm_node_t*)cl_qmap_remove( p_node_guid_tbl, + osm_node_get_node_guid( p_node ) ); + if( p_node_check != p_node ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_drop_mgr_process_node: ERR 0105: " + "Node 0x%016" PRIx64 " not in guid table\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + } + + /* free memory allocated to node */ + osm_node_delete( &p_node ); + + OSM_LOG_EXIT( p_mgr->p_log ); + return( return_val ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_drop_mgr_check_node( + IN const osm_drop_mgr_t* const p_mgr, + IN osm_node_t* p_node ) +{ + ib_net64_t node_guid; + osm_physp_t *p_physp; + osm_port_t *p_port; + cl_qmap_t* p_port_guid_tbl; + ib_net64_t port_guid; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_drop_mgr_check_node ); + + node_guid = osm_node_get_node_guid( p_node ); + + if ( osm_node_get_type( p_node ) != IB_NODE_TYPE_SWITCH ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_drop_mgr_check_node: ERR 0107: " + "Node 0x%016" PRIx64 " is not a switch node\n", + cl_ntoh64( node_guid ) ); + goto Exit; + } + + /* Make sure we have a switch object for this node */ + if (!p_node->sw) + { + /* We do not have switch info for this node */ + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_check_node: " + "Node 0x%016" PRIx64 " no switch in table\n", + cl_ntoh64( node_guid ) ); + + __osm_drop_mgr_process_node( p_mgr, p_node ); + goto Exit; + } + + /* Make sure we have a port object for port zero */ + p_port_guid_tbl = &p_mgr->p_subn->port_guid_tbl; + p_physp = osm_node_get_physp_ptr( p_node, 0 ); + if ( !osm_physp_is_valid( p_physp ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_check_node: " + "Node 0x%016" PRIx64 " no valid physical port 0\n", + cl_ntoh64( node_guid ) ); + + __osm_drop_mgr_process_node( p_mgr, p_node ); + goto Exit; + } + + port_guid = osm_physp_get_port_guid( p_physp ); + + p_port = (osm_port_t*)cl_qmap_get( + p_port_guid_tbl, port_guid ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_port_guid_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_check_node: " + "Node 0x%016" PRIx64 " has no port object\n", + cl_ntoh64( node_guid ) ); + + __osm_drop_mgr_process_node( p_mgr, p_node ); + goto Exit; + } + + if ( osm_port_discovery_count_get( p_port ) == 0 ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_drop_mgr_check_node: " + "Node 0x%016" PRIx64 " port has discovery count zero\n", + cl_ntoh64( node_guid ) ); + + __osm_drop_mgr_process_node( p_mgr, p_node ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return; +} + +/********************************************************************** + **********************************************************************/ +void +osm_drop_mgr_process( + IN const osm_drop_mgr_t* const p_mgr ) +{ + cl_qmap_t *p_node_guid_tbl; + cl_qmap_t *p_port_guid_tbl; + cl_list_t *p_lsweep_ports; + osm_port_t *p_port; + osm_port_t *p_next_port; + osm_node_t *p_node; + osm_node_t *p_next_node; + ib_net64_t port_guid; + ib_net64_t node_guid; + uint8_t port_num; + osm_physp_t *p_physp; + + CL_ASSERT( p_mgr ); + + OSM_LOG_ENTER( p_mgr->p_log, osm_drop_mgr_process ); + + p_node_guid_tbl = &p_mgr->p_subn->node_guid_tbl; + p_port_guid_tbl = &p_mgr->p_subn->port_guid_tbl; + p_lsweep_ports = &p_mgr->p_subn->light_sweep_physp_list; + + CL_PLOCK_EXCL_ACQUIRE( p_mgr->p_lock ); + + p_next_node = (osm_node_t*)cl_qmap_head( p_node_guid_tbl ); + while( p_next_node != (osm_node_t*)cl_qmap_end( p_node_guid_tbl ) ) + { + p_node = p_next_node; + p_next_node = (osm_node_t*)cl_qmap_next( &p_next_node->map_item ); + + CL_ASSERT( cl_qmap_key( &p_node->map_item ) == + osm_node_get_node_guid( p_node ) ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + node_guid = osm_node_get_node_guid( p_node ); + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_drop_mgr_process: " + "Checking node 0x%016" PRIx64 "\n", + cl_ntoh64( node_guid ) ); + } + + /* + Check if this node was discovered during the last sweep. + If not, it is unreachable in the current subnet, and + should therefore be removed from the subnet object. + */ + if( osm_node_discovery_count_get( p_node ) == 0 ) + __osm_drop_mgr_process_node( p_mgr, p_node ); + } + + /* + Go over all the nodes. If the node is a switch - make sure + there is also a switch record for it, and a portInfo record for + port zero of of the node. + If not - this means that there was some error in getting the data + of this node. Drop the node. + */ + p_next_node = (osm_node_t*)cl_qmap_head( p_node_guid_tbl ); + while( p_next_node != (osm_node_t*)cl_qmap_end( p_node_guid_tbl ) ) + { + p_node = p_next_node; + p_next_node = (osm_node_t*)cl_qmap_next( &p_next_node->map_item ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + node_guid = osm_node_get_node_guid( p_node ); + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_drop_mgr_process: " + "Checking full discovery of node 0x%016" PRIx64 "\n", + cl_ntoh64( node_guid ) ); + } + + if ( osm_node_get_type( p_node ) != IB_NODE_TYPE_SWITCH ) + continue; + + /* We are handling a switch node */ + __osm_drop_mgr_check_node( p_mgr, p_node ); + } + + p_next_port = (osm_port_t*)cl_qmap_head( p_port_guid_tbl ); + while( p_next_port != (osm_port_t*)cl_qmap_end( p_port_guid_tbl ) ) + { + p_port = p_next_port; + p_next_port = (osm_port_t*)cl_qmap_next( &p_next_port->map_item ); + + CL_ASSERT( cl_qmap_key( &p_port->map_item ) == + osm_port_get_guid( p_port ) ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + port_guid = osm_port_get_guid( p_port ); + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_drop_mgr_process: " + "Checking port 0x%016" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + } + + /* + If the port is unreachable, remove it from the guid table. + */ + if( osm_port_discovery_count_get( p_port ) == 0 ) + __osm_drop_mgr_remove_port( p_mgr, p_port ); + } + + /* + scan through all the ports left - if the port is not DOWN and + it does not have a valid remote port - we need to track it for + next light sweep scan... + */ + cl_list_remove_all( p_lsweep_ports ); + p_next_node = (osm_node_t*)cl_qmap_head( p_node_guid_tbl ); + while( p_next_node != (osm_node_t*)cl_qmap_end( p_node_guid_tbl ) ) + { + p_node = p_next_node; + p_next_node = (osm_node_t*)cl_qmap_next( &p_next_node->map_item ); + + for (port_num = 1; port_num < osm_node_get_num_physp(p_node); port_num++) + { + p_physp = osm_node_get_physp_ptr(p_node, port_num); + if (osm_physp_is_valid(p_physp) && + (osm_physp_get_port_state(p_physp) != IB_LINK_DOWN) && + ! osm_physp_get_remote(p_physp)) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_drop_mgr_process: ERR 0108: " + "Unknown remote side for node 0x%016" PRIx64 + " port %u. Adding to light sweep sampling list\n", + cl_ntoh64( osm_node_get_node_guid( p_node )), + port_num); + + osm_dump_dr_path(p_mgr->p_log, + osm_physp_get_dr_path_ptr( p_physp ), + OSM_LOG_ERROR); + + cl_list_insert_head( p_lsweep_ports, p_physp ); + } + } + } + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + OSM_LOG_EXIT( p_mgr->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_fwd_tbl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_fwd_tbl.c new file mode 100644 index 00000000..1365f385 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_fwd_tbl.c @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_fwd_tbl_t. + * This object represents a unicast forwarding table. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_fwd_tbl_init( + IN osm_fwd_tbl_t* const p_tbl, + IN const ib_switch_info_t* const p_si ) +{ + uint16_t tbl_cap; + ib_api_status_t status = IB_SUCCESS; + + /* + Determine the type and size of the forwarding table + used by this switch, then initialize accordingly. + The current implementation only supports switches + with linear forwarding tables. + */ + tbl_cap = cl_ntoh16( p_si->lin_cap ); + + if( tbl_cap == 0 ) + { + /* + This switch does not support linear forwarding + tables. Error out for now. + */ + status = IB_UNSUPPORTED; + goto Exit; + } + + p_tbl->p_rnd_tbl = NULL; + + p_tbl->p_lin_tbl = osm_lin_tbl_new( tbl_cap ); + + if( p_tbl->p_lin_tbl == NULL ) + { + status = IB_INSUFFICIENT_MEMORY; + goto Exit; + } + + Exit: + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_fwd_tbl_destroy( + IN osm_fwd_tbl_t* const p_tbl ) +{ + if( p_tbl->p_lin_tbl ) + { + CL_ASSERT( p_tbl->p_rnd_tbl == NULL ); + osm_lin_tbl_delete( &p_tbl->p_lin_tbl ); + } + else + { + osm_rand_tbl_delete( &p_tbl->p_rnd_tbl ); + } +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_inform.c b/branches/Ndi/ulp/opensm/user/opensm/osm_inform.c new file mode 100644 index 00000000..2bd875c1 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_inform.c @@ -0,0 +1,763 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of inform record functions. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.18 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +typedef struct _osm_infr_match_ctxt +{ + cl_list_t *p_remove_infr_list; + ib_mad_notice_attr_t *p_ntc; +} osm_infr_match_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_infr_construct( + IN osm_infr_t* const p_infr ) +{ + memset( p_infr, 0, sizeof(osm_infr_t) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_infr_destroy( + IN osm_infr_t* const p_infr ) +{ + free( p_infr ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_infr_init( + IN osm_infr_t* const p_infr, + IN const osm_infr_t *p_infr_rec ) +{ + CL_ASSERT( p_infr ); + + /* what else do we need in the inform_record ??? */ + + /* copy the contents of the provided informinfo */ + memcpy( p_infr, p_infr_rec, sizeof(osm_infr_t) ); +} + +/********************************************************************** + **********************************************************************/ +osm_infr_t* +osm_infr_new( + IN const osm_infr_t *p_infr_rec ) +{ + osm_infr_t* p_infr; + + CL_ASSERT(p_infr_rec); + + p_infr = (osm_infr_t*)malloc( sizeof(osm_infr_t) ); + if( p_infr ) + { + osm_infr_init( p_infr, p_infr_rec ); + } + + return( p_infr ); +} + +/********************************************************************** + **********************************************************************/ +void +__dump_all_informs( + IN osm_subn_t const *p_subn, + IN osm_log_t *p_log) +{ + cl_list_item_t* p_list_item; + + OSM_LOG_ENTER( p_log, __dump_all_informs ); + + if( !osm_log_is_active( p_log, OSM_LOG_DEBUG ) ) + goto Exit; + + p_list_item = cl_qlist_head( &p_subn->sa_infr_list ); + while (p_list_item != cl_qlist_end( &p_subn->sa_infr_list )) + { + osm_dump_inform_info( p_log, + &((osm_infr_t*)p_list_item)->inform_record.inform_info, + OSM_LOG_DEBUG ); + p_list_item = cl_qlist_next( p_list_item ); + } + + Exit: + OSM_LOG_EXIT( p_log ); +} + +/********************************************************************** + * Match an infr by the InformInfo and Address vector + **********************************************************************/ +static cl_status_t +__match_inf_rec( + IN const cl_list_item_t* const p_list_item, + IN void* context ) +{ + osm_infr_t* p_infr_rec = (osm_infr_t *)context; + osm_infr_t* p_infr = (osm_infr_t*)p_list_item; + osm_log_t *p_log = p_infr_rec->p_infr_rcv->p_log; + cl_status_t status = CL_NOT_FOUND; + ib_gid_t all_zero_gid; + + OSM_LOG_ENTER( p_log, __match_inf_rec); + + if ( memcmp( &p_infr->report_addr, + &p_infr_rec->report_addr, + sizeof(p_infr_rec->report_addr)) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by Address\n" ); + goto Exit; + } + + memset( &all_zero_gid, 0, sizeof(ib_gid_t) ); + + /* if inform_info.gid is not zero, ignore lid range */ + if ( !memcmp( &p_infr_rec->inform_record.inform_info.gid, + &all_zero_gid, + sizeof(p_infr_rec->inform_record.inform_info.gid)) ) + { + if ( memcmp( &p_infr->inform_record.inform_info.gid, + &p_infr_rec->inform_record.inform_info.gid, + sizeof(p_infr->inform_record.inform_info.gid)) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.gid\n" ); + goto Exit; + } + } + else + { + if ( (p_infr->inform_record.inform_info.lid_range_begin != + p_infr_rec->inform_record.inform_info.lid_range_begin) || + (p_infr->inform_record.inform_info.lid_range_end != + p_infr_rec->inform_record.inform_info.lid_range_end) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.LIDRange\n" ); + goto Exit; + } + } + + if ( p_infr->inform_record.inform_info.trap_type != + p_infr_rec->inform_record.inform_info.trap_type ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.TrapType\n" ); + goto Exit; + } + + if ( p_infr->inform_record.inform_info.is_generic != + p_infr_rec->inform_record.inform_info.is_generic ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.IsGeneric\n" ); + goto Exit; + } + + if (p_infr->inform_record.inform_info.is_generic) + { + if ( p_infr->inform_record.inform_info.g_or_v.generic.trap_num != + p_infr_rec->inform_record.inform_info.g_or_v.generic.trap_num ) + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.Generic.TrapNumber\n" ); + else if ( p_infr->inform_record.inform_info.g_or_v.generic.qpn_resp_time_val != + p_infr_rec->inform_record.inform_info.g_or_v.generic.qpn_resp_time_val ) + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.Generic.QPNRespTimeVal\n" ); + else if ( p_infr->inform_record.inform_info.g_or_v.generic.node_type_msb != + p_infr_rec->inform_record.inform_info.g_or_v.generic.node_type_msb ) + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.Generic.NodeTypeMSB\n" ); + else if ( p_infr->inform_record.inform_info.g_or_v.generic.node_type_lsb != + p_infr_rec->inform_record.inform_info.g_or_v.generic.node_type_lsb ) + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.Generic.NodeTypeLSB\n" ); + else + status = CL_SUCCESS; + } + else + { + if ( p_infr->inform_record.inform_info.g_or_v.vend.dev_id != + p_infr_rec->inform_record.inform_info.g_or_v.vend.dev_id ) + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.Vendor.DeviceID\n" ); + else if ( p_infr->inform_record.inform_info.g_or_v.vend.qpn_resp_time_val != + p_infr_rec->inform_record.inform_info.g_or_v.vend.qpn_resp_time_val ) + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.Vendor.QPNRespTimeVal\n" ); + else if ( p_infr->inform_record.inform_info.g_or_v.vend.vendor_id_msb != + p_infr_rec->inform_record.inform_info.g_or_v.vend.vendor_id_msb ) + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.Vendor.VendorIdMSB\n" ); + else if ( p_infr->inform_record.inform_info.g_or_v.vend.vendor_id_lsb != + p_infr_rec->inform_record.inform_info.g_or_v.vend.vendor_id_lsb ) + osm_log( p_log, OSM_LOG_DEBUG, + "__match_inf_rec: " + "Differ by InformInfo.Vendor.VendorIdLSB\n" ); + else + status = CL_SUCCESS; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return status; +} + +/********************************************************************** + **********************************************************************/ +osm_infr_t* +osm_infr_get_by_rec( + IN osm_subn_t const *p_subn, + IN osm_log_t *p_log, + IN osm_infr_t* const p_infr_rec ) +{ + cl_list_item_t* p_list_item; + + OSM_LOG_ENTER( p_log, osm_infr_get_by_rec ); + + __dump_all_informs( p_subn, p_log ); + + osm_log( p_log, OSM_LOG_DEBUG, + "osm_infr_get_by_rec: " + "Looking for Inform Record\n" ); + osm_dump_inform_info( p_log, &(p_infr_rec->inform_record.inform_info), + OSM_LOG_DEBUG ); + osm_log( p_log, OSM_LOG_DEBUG, + "osm_infr_get_by_rec: " + "InformInfo list size %d\n", + cl_qlist_count(&p_subn->sa_infr_list) ); + + p_list_item = cl_qlist_find_from_head( + &p_subn->sa_infr_list, + __match_inf_rec, + p_infr_rec ); + + if( p_list_item == cl_qlist_end( &p_subn->sa_infr_list ) ) + p_list_item = NULL; + + OSM_LOG_EXIT( p_log ); + return (osm_infr_t*)p_list_item; +} + +/********************************************************************** + **********************************************************************/ +void +osm_infr_insert_to_db( + IN osm_subn_t *p_subn, + IN osm_log_t *p_log, + IN osm_infr_t *p_infr) +{ + OSM_LOG_ENTER( p_log, osm_infr_insert_to_db ); + + osm_log( p_log, OSM_LOG_DEBUG, + "osm_infr_insert_to_db: " + "Inserting new InformInfo Record into Database\n" ); + osm_log( p_log, OSM_LOG_DEBUG, + "osm_infr_insert_to_db: " + "Dump before insertion (size %d)\n", + cl_qlist_count(&p_subn->sa_infr_list) ); + __dump_all_informs( p_subn, p_log ); + +#if 0 + osm_dump_inform_info( p_log, + &(p_infr->inform_record.inform_info), OSM_LOG_DEBUG ); +#endif + + cl_qlist_insert_head( &p_subn->sa_infr_list, + &p_infr->list_item ); + + osm_log( p_log, OSM_LOG_DEBUG, + "osm_infr_insert_to_db: " + "Dump after insertion (size %d)\n", + cl_qlist_count(&p_subn->sa_infr_list) ); + __dump_all_informs( p_subn, p_log ); + OSM_LOG_EXIT( p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_infr_remove_from_db( + IN osm_subn_t *p_subn, + IN osm_log_t *p_log, + IN osm_infr_t *p_infr) +{ + OSM_LOG_ENTER( p_log, osm_infr_remove_from_db ); + + osm_log( p_log, OSM_LOG_DEBUG, + "osm_infr_remove_from_db: " + "Removing InformInfo Subscribing GID:0x%016" PRIx64 " : 0x%016" PRIx64 + " Enum:0x%X from Database\n", + cl_ntoh64(p_infr->inform_record.subscriber_gid.unicast.prefix), + cl_ntoh64(p_infr->inform_record.subscriber_gid.unicast.interface_id), + p_infr->inform_record.subscriber_enum + ); + + osm_dump_inform_info( p_log, &(p_infr->inform_record.inform_info), OSM_LOG_DEBUG ); + + cl_qlist_remove_item( &p_subn->sa_infr_list, + &p_infr->list_item ); + + osm_infr_destroy( p_infr ); + + OSM_LOG_EXIT( p_log ); +} + +/********************************************************************** + * Send a report: + * Given a target address to send to and the notice. + * We need to send SubnAdmReport + **********************************************************************/ +static ib_api_status_t +__osm_send_report( + IN osm_infr_t* p_infr_rec, /* the informinfo */ + IN ib_mad_notice_attr_t* p_ntc /* notice to send */ + ) +{ + osm_madw_t* p_report_madw; + ib_mad_notice_attr_t* p_report_ntc; + ib_mad_t* p_mad; + ib_sa_mad_t* p_sa_mad; + static atomic32_t trap_fwd_trans_id = 0x02DAB000; + ib_api_status_t status; + osm_log_t * p_log = p_infr_rec->p_infr_rcv->p_log; + + OSM_LOG_ENTER( p_log, __osm_send_report ); + + /* HACK: who switches or uses the src and dest GIDs in the grh_info ?? */ + + /* it is better to use LIDs since the GIDs might not be there for SMI traps */ + osm_log( p_log, OSM_LOG_DEBUG, + "__osm_send_report: " + "Forwarding Notice Event from LID:0x%X" + " to InformInfo LID: 0x%X TID:0x%X\n", + cl_ntoh16(p_ntc->issuer_lid), + cl_ntoh16(p_infr_rec->report_addr.dest_lid), + trap_fwd_trans_id + ); + + /* get the MAD to send */ + p_report_madw = osm_mad_pool_get( p_infr_rec->p_infr_rcv->p_mad_pool, + p_infr_rec->h_bind, + MAD_BLOCK_SIZE, + &(p_infr_rec->report_addr) ); + + p_report_madw->resp_expected = TRUE; + + if( !p_report_madw ) + { + osm_log( p_log, OSM_LOG_ERROR, + "__osm_send_report: ERR 0203: " + "osm_mad_pool_get failed\n" ); + status = IB_ERROR; + goto Exit; + } + + /* advance trap trans id (cant simply ++ on some systems inside ntoh) */ + p_mad = osm_madw_get_mad_ptr( p_report_madw ); + ib_mad_init_new( p_mad, + IB_MCLASS_SUBN_ADM, + 2, + IB_MAD_METHOD_REPORT, + cl_hton64( (uint64_t)cl_atomic_inc( &trap_fwd_trans_id ) ), + IB_MAD_ATTR_NOTICE, + 0 ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_report_madw ); + + p_report_ntc = (ib_mad_notice_attr_t*)&(p_sa_mad->data); + + /* copy the notice */ + *p_report_ntc = *p_ntc; + + /* The TRUE is for: response is expected */ + status = osm_vendor_send( p_report_madw->h_bind, p_report_madw, TRUE ); + if ( status != IB_SUCCESS ) + { + osm_log( p_log, OSM_LOG_ERROR, + "__osm_send_report: ERR 0204: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return(status); +} + +/********************************************************************** + * This routine compares a given Notice and a ListItem of InformInfo type. + * PREREQUISITE: + * The Notice.GID should be pre-filled with the trap generator GID + **********************************************************************/ +static void +__match_notice_to_inf_rec( + IN cl_list_item_t* const p_list_item, + IN void* context ) +{ + osm_infr_match_ctxt_t* p_infr_match = (osm_infr_match_ctxt_t *)context; + ib_mad_notice_attr_t* p_ntc = p_infr_match->p_ntc; + cl_list_t* p_infr_to_remove_list = p_infr_match->p_remove_infr_list; + osm_infr_t* p_infr_rec = (osm_infr_t*)p_list_item; + ib_inform_info_t *p_ii = &(p_infr_rec->inform_record.inform_info); + cl_status_t status = CL_NOT_FOUND; + osm_log_t *p_log = p_infr_rec->p_infr_rcv->p_log; + osm_subn_t *p_subn = p_infr_rec->p_infr_rcv->p_subn; + ib_gid_t source_gid; + osm_port_t* p_src_port; + osm_port_t* p_dest_port; + + OSM_LOG_ENTER( p_log, __match_notice_to_inf_rec ); + + /* matching rules + * InformInfo Notice + * GID IssuerGID if non zero must match the trap + * LIDRange IssuerLID apply only if GID=0 + * IsGeneric IsGeneric is compulsory and must match the trap + * Type Type if not 0xFFFF must match + * TrapNumber TrapNumber if not 0xFFFF must match + * DeviceId DeviceID if not 0xFFFF must match + * QPN dont care + * ProducerType ProducerType match or 0xFFFFFF // EZ: actually my interpretation + * VendorID VendorID match or 0xFFFFFF + */ + + /* GID IssuerGID if non zero must match the trap */ + if ( p_ii->gid.unicast.prefix != 0 || p_ii->gid.unicast.interface_id != 0 ) + { + /* match by GID */ + if ( memcmp(&(p_ii->gid), &(p_ntc->issuer_gid), sizeof(ib_gid_t)) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "Mismatch by GID\n" ); + goto Exit; + } + } + else + { + /* LIDRange IssuerLID apply only if GID=0 */ + /* If lid_range_begin of the informInfo is 0xFFFF - then it should be ignored. */ + if ( p_ii->lid_range_begin != 0xFFFF ) + { + /* a real lid range is given - check it */ + if ( (cl_hton16(p_ii->lid_range_begin) > cl_hton16(p_ntc->issuer_lid)) || + (cl_hton16(p_ntc->issuer_lid) > cl_hton16(p_ii->lid_range_end)) ) { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "Mismatch by LID Range. Needed: 0x%X <= 0x%X <= 0x%X\n", + cl_hton16(p_ii->lid_range_begin), + cl_hton16(p_ntc->issuer_lid), + cl_hton16(p_ii->lid_range_end) + ); + goto Exit; + } + } + } + + /* IsGeneric IsGeneric is compulsory and must match the trap */ + if ( (p_ii->is_generic && ! ib_notice_is_generic(p_ntc)) || + (!p_ii->is_generic && ib_notice_is_generic(p_ntc)) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "Mismatch by Generic/Vendor\n" ); + goto Exit; + } + + /* Type Type if not 0xFFFF must match */ + if ( (p_ii->trap_type != 0xFFFF) && + (cl_ntoh16(p_ii->trap_type) != ib_notice_get_type(p_ntc)) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "Mismatch by Type\n" ); + goto Exit; + } + + /* based on generic type */ + if ( p_ii->is_generic ) + { + /* TrapNumber TrapNumber if not 0xFFFF must match */ + if ( (p_ii->g_or_v.generic.trap_num != 0xFFFF) && + (p_ii->g_or_v.generic.trap_num != p_ntc->g_or_v.generic.trap_num) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "Mismatch by Trap Num\n" ); + goto Exit; + } + + /* ProducerType ProducerType match or 0xFFFFFF */ + if ( (cl_ntoh32(ib_inform_info_get_node_type(p_ii)) != 0xFFFFFF) && + (ib_inform_info_get_node_type(p_ii) != ib_notice_get_prod_type(p_ntc)) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "Mismatch by Node Type: II=0x%06X Trap=0x%06X\n", + cl_ntoh32(ib_inform_info_get_node_type(p_ii)), + cl_ntoh32(ib_notice_get_prod_type(p_ntc)) + ); + goto Exit; + } + } + else + { + /* DeviceId DeviceID if not 0xFFFF must match */ + if ( (p_ii->g_or_v.vend.dev_id != 0xFFFF) && + (p_ii->g_or_v.vend.dev_id != p_ntc->g_or_v.vend.dev_id) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "Mismatch by Dev Id\n" ); + goto Exit; + } + + /* VendorID VendorID match or 0xFFFFFF */ + if ( (ib_inform_info_get_vend_id(p_ii) != CL_HTON32(0xFFFFFF)) && + (ib_inform_info_get_vend_id(p_ii) != ib_notice_get_vend_id(p_ntc)) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "Mismatch by Vendor ID\n" ); + goto Exit; + } + } + + /* Check if there is a pkey match. o13-17.1.1 */ + /* Check if the issuer of the trap is the SM. If it is, then the gid + comparison should be done on the trap source (saved as the gid in the + data details field). + If the issuer gid is not the SM - then it is the guid of the trap + source */ + if ( (cl_ntoh64(p_ntc->issuer_gid.unicast.prefix) == p_subn->opt.subnet_prefix) && + (cl_ntoh64(p_ntc->issuer_gid.unicast.interface_id) == p_subn->sm_port_guid) ) + { + /* The issuer is the SM then this is trap 64-67 - compare the gid + with the gid saved on the data details */ + source_gid = p_ntc->data_details.ntc_64_67.gid; + } + else + { + source_gid = p_ntc->issuer_gid; + } + p_src_port = (osm_port_t*)cl_qmap_get( &p_subn->port_guid_tbl, + source_gid.unicast.interface_id ); + + if( p_src_port == (osm_port_t*)cl_qmap_end( &(p_subn->port_guid_tbl)) ) + { + osm_log( p_log, OSM_LOG_INFO, + "__match_notice_to_inf_rec: " + "Cannot find source port with GUID:0x%016" PRIx64 "\n", + cl_ntoh64(source_gid.unicast.interface_id) ); + goto Exit; + } + + p_dest_port = + cl_ptr_vector_get( &p_subn->port_lid_tbl, + cl_ntoh16(p_infr_rec->report_addr.dest_lid) ); + if( !p_dest_port ) + { + osm_log( p_log, OSM_LOG_INFO, + "__match_notice_to_inf_rec: " + "Cannot find destination port with LID:0x%04x\n", + cl_ntoh16(p_infr_rec->report_addr.dest_lid) ); + goto Exit; + } + + if (osm_port_share_pkey( p_log, p_src_port, p_dest_port ) == FALSE ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "Mismatch by Pkey\n" ); + /* According to o13-17.1.2 - If this informInfo does not have + lid_range_begin of 0xFFFF, then this informInfo request + should be removed from database */ + if ( p_ii->lid_range_begin != 0xFFFF ) + { + osm_log( p_log, OSM_LOG_VERBOSE, + "__match_notice_to_inf_rec: " + "Pkey mismatch on lid_range_begin != 0xFFFF. " + "Need to remove this informInfo from db\n" ); + /* add the informInfo record to the remove_infr list */ + cl_list_insert_tail( p_infr_to_remove_list, p_infr_rec ); + } + goto Exit; + } + + /* send the report to the address provided in the inform record */ + osm_log( p_log, OSM_LOG_DEBUG, + "__match_notice_to_inf_rec: " + "MATCH! Sending Report...\n" ); + __osm_send_report( p_infr_rec, p_ntc ); + status = CL_SUCCESS; + + Exit: + OSM_LOG_EXIT( p_log ); +} + +/********************************************************************** + * Once a Trap was received by osm_trap_rcv, or a Trap sourced by + * the SM was sent (Traps 64-67), this routine is called with a copy of + * the notice data. + * Given a notice attribute - compare and see if it matches the InformInfo + * element and if it does - call the Report(Notice) for the + * target QP registered by the address stored in the InformInfo element + **********************************************************************/ +ib_api_status_t +osm_report_notice( + IN osm_log_t* const p_log, + IN osm_subn_t* p_subn, + IN ib_mad_notice_attr_t *p_ntc ) +{ + osm_infr_match_ctxt_t context; + cl_list_t infr_to_remove_list; + osm_infr_t* p_infr_rec; + osm_infr_t* p_next_infr_rec; + + OSM_LOG_ENTER( p_log, osm_report_notice ); + + /* + * we must make sure we are ready for this... + * note that the trap receivers might be initialized before + * the osm_infr_init call is performed. + */ + if ( p_subn->sa_infr_list.state != CL_INITIALIZED ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_report_notice: " + "Ignoring Notice Reports since Inform List is not initialized yet!\n" ); + return IB_ERROR; + } + + /* an official Event information log */ + if ( ib_notice_is_generic(p_ntc) ) + { + osm_log( p_log, OSM_LOG_INFO, + "osm_report_notice: " + "Reporting Generic Notice type:%u num:%u" + " from LID:0x%04X GID:0x%016" PRIx64 + ",0x%016" PRIx64 "\n", + ib_notice_get_type(p_ntc), + cl_ntoh16(p_ntc->g_or_v.generic.trap_num), + cl_ntoh16(p_ntc->issuer_lid), + cl_ntoh64(p_ntc->issuer_gid.unicast.prefix), + cl_ntoh64(p_ntc->issuer_gid.unicast.interface_id) + ); + } + else + { + osm_log( p_log, OSM_LOG_INFO, + "osm_report_notice: " + "Reporting Vendor Notice type:%u vend:%u dev:%u" + " from LID:0x%04X GID:0x%016" PRIx64 + ",0x%016" PRIx64 "\n", + ib_notice_get_type(p_ntc), + cl_ntoh32(ib_notice_get_vend_id(p_ntc)), + cl_ntoh16(p_ntc->g_or_v.vend.dev_id), + cl_ntoh16(p_ntc->issuer_lid), + cl_ntoh64(p_ntc->issuer_gid.unicast.prefix), + cl_ntoh64(p_ntc->issuer_gid.unicast.interface_id) + ); + } + + /* Create a list that will hold all the infr records that should + be removed due to violation. o13-17.1.2 */ + cl_list_construct( &infr_to_remove_list ); + cl_list_init( &infr_to_remove_list, 5 ); + context.p_remove_infr_list = &infr_to_remove_list; + context.p_ntc = p_ntc; + + /* go over all inform info available at the subnet */ + /* try match to the given notice and send if match */ + cl_qlist_apply_func(&(p_subn->sa_infr_list), + __match_notice_to_inf_rec, + &context ); + + /* If we inserted items into the infr_to_remove_list - we need to + remove them */ + p_infr_rec = (osm_infr_t*)cl_list_remove_head(&infr_to_remove_list); + while ( p_infr_rec != NULL ) + { + p_next_infr_rec = (osm_infr_t*)cl_list_remove_head(&infr_to_remove_list); + osm_infr_remove_from_db ( p_subn, p_log, p_infr_rec ); + p_infr_rec = p_next_infr_rec; + } + cl_list_destroy(&infr_to_remove_list); + + OSM_LOG_EXIT( p_log ); + + return(IB_SUCCESS); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_lid_mgr.c b/branches/Ndi/ulp/opensm/user/opensm/osm_lid_mgr.c new file mode 100644 index 00000000..9217ea00 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_lid_mgr.c @@ -0,0 +1,1486 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_lid_mgr_t. + * This file implements the LID Manager object which is responsible for + * assigning LIDs to all ports on the subnet. + * + * DATA STRUCTURES: + * p_subn->port_lid_tbl : a vector pointing from lid to its port. + * osm db guid2lid domain : a hash from guid to lid (min lid). + * p_subn->port_guid_tbl : a map from guid to discovered port obj. + * + * ALGORITHM: + * + * 0. we define a function to obtain the correct port lid: + * __osm_lid_mgr_get_port_lid( p_mgr, port, &min_lid ): + * 0.1 if the port info lid matches the guid2lid return 0 + * 0.2 if the port info has a lid and that range is empty in + * port_lid_tbl, return 0 and update the port_lid_tbl and + * guid2lid + * 0.3 else find an empty space in port_lid_tbl, update the + * port_lid_tbl and guid2lid, return 1 to flag a change required. + * + * 1. During initialization: + * 1.1 initialize the guid2lid database domain. + * 1.2 if reassign_lid is not set: + * 1.2.1 read the persistent data for the domain. + * 1.2.2 validate no duplicate use of lids and lids are 2^(lmc-1) + * + * 2. During SM port lid assignment: + * 2.1 if reassign_lids is set, make it 2^lmc + * 2.2 cleanup all port_lid_tbl and re-fill it according to guid2lid + * 2.3 call __osm_lid_mgr_get_port_lid the SM port + * 2.4 set the port info + * + * 3. During all other ports lid assignment: + * 3.1 go through all ports in the subnet + * 3.1.1 call __osm_lid_mgr_get_port_min_lid + * 3.1.2 if a change required send the port info + * 3.2 if any change send the signal PENDING... + * + * 4. Store the guid2lid + * + * Environment: + * Linux User Mode + * + * $Revision: 1.15 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + lid range item of qlist + **********************************************************************/ +typedef struct _osm_lid_mgr_range { + cl_list_item_t item; + uint16_t min_lid; + uint16_t max_lid; +} osm_lid_mgr_range_t; + +/********************************************************************** + **********************************************************************/ +void +osm_lid_mgr_construct( + IN osm_lid_mgr_t* const p_mgr ) +{ + memset( p_mgr, 0, sizeof(*p_mgr) ); + cl_ptr_vector_construct( &p_mgr->used_lids ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lid_mgr_destroy( + IN osm_lid_mgr_t* const p_mgr ) +{ + cl_list_item_t *p_item; + + OSM_LOG_ENTER( p_mgr->p_log, osm_lid_mgr_destroy ); + + cl_ptr_vector_destroy( &p_mgr->used_lids ); + p_item = cl_qlist_remove_head( &p_mgr->free_ranges ); + while ( p_item != cl_qlist_end( &p_mgr->free_ranges ) ) + { + free((osm_lid_mgr_range_t *)p_item); + p_item = cl_qlist_remove_head( &p_mgr->free_ranges ); + } + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** +Validate the guid to lid data by making sure that under the current +LMC we did not get duplicates. If we do flag them as errors and remove +the entry. +**********************************************************************/ +static void +__osm_lid_mgr_validate_db( + IN osm_lid_mgr_t* p_mgr) +{ + cl_qlist_t guids; + osm_db_guid_elem_t *p_item; + uint16_t lid; + uint16_t min_lid; + uint16_t max_lid; + uint16_t lmc_mask; + boolean_t lids_ok; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_lid_mgr_validate_db ); + + if (p_mgr->p_subn->opt.lmc) + lmc_mask = ~((1 << p_mgr->p_subn->opt.lmc) - 1); + else + lmc_mask = 0xffff; + + cl_qlist_init( &guids ); + + if (osm_db_guid2lid_guids( p_mgr->p_g2l, &guids )) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_lid_mgr_validate_db: ERR 0310: " + "could not get guid list\n"); + goto Exit; + } + + p_item = (osm_db_guid_elem_t*)cl_qlist_remove_head(&guids); + while ((cl_list_item_t*)p_item != cl_qlist_end(&guids)) + { + if (osm_db_guid2lid_get( p_mgr->p_g2l, p_item->guid, &min_lid, &max_lid )) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_lid_mgr_validate_db: ERR 0311: " + "could not get lid for guid:0x%016" PRIx64 "\n", + p_item->guid + ); + } + else + { + lids_ok = TRUE; + + if ((min_lid > max_lid) || (min_lid == 0) || (p_item->guid == 0) || + (max_lid > p_mgr->p_subn->max_unicast_lid_ho)) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_lid_mgr_validate_db: ERR 0312: " + "Illegal LID range [0x%x:0x%x] for guid:0x%016" PRIx64 + "\n", + min_lid, max_lid, p_item->guid + ); + lids_ok = FALSE; + } + else if ((min_lid != max_lid) && ((min_lid & lmc_mask) != min_lid)) + { + /* check that if the lids define a range that is valid + for the current LMC mask */ + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_lid_mgr_validate_db: ERR 0313: " + "LID range [0x%x:0x%x] for guid:0x%016" PRIx64 + " is not aligned according to mask:0x%04x\n", + min_lid, max_lid, p_item->guid, lmc_mask + ); + lids_ok = FALSE; + } + else + { + /* check if the lids were not previously assigned */ + for (lid = min_lid; lid <= max_lid; lid++) + { + if (( cl_ptr_vector_get_size( &p_mgr->used_lids ) > lid ) && + ( cl_ptr_vector_get( &p_mgr->used_lids, lid ) ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_lid_mgr_validate_db: ERR 0314: " + "0x%04x for guid:0x%016" PRIx64 + " was previously used\n", + lid, p_item->guid + ); + lids_ok = FALSE; + } + } + } + + if (!lids_ok) + { + if (osm_db_guid2lid_delete( p_mgr->p_g2l, p_item->guid )) + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_lid_mgr_validate_db: ERR 0315: " + "failed to delete entry for guid:0x%016" PRIx64 + "\n", + p_item->guid + ); + } + else + { + /* mark it was visited */ + for (lid = min_lid; lid <= max_lid; lid++) + cl_ptr_vector_set( &p_mgr->used_lids, lid, (void *)1); + } + } /* got a lid */ + p_item = (osm_db_guid_elem_t*)cl_qlist_remove_head(&guids); + } /* all guids */ + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_lid_mgr_init( + IN osm_lid_mgr_t* const p_mgr, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_db_t* const p_db, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_lid_mgr_init ); + + CL_ASSERT( p_req ); + CL_ASSERT( p_subn ); + CL_ASSERT( p_lock ); + CL_ASSERT( p_db ); + + osm_lid_mgr_construct( p_mgr ); + + p_mgr->p_log = p_log; + p_mgr->p_subn = p_subn; + p_mgr->p_db = p_db; + p_mgr->p_lock = p_lock; + p_mgr->p_req = p_req; + + /* we initialize and restore the db domain of guid to lid map */ + p_mgr->p_g2l = osm_db_domain_init(p_mgr->p_db, "guid2lid"); + if (! p_mgr->p_g2l) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_lid_mgr_init: ERR 0316: " + "Error initializing Guid-to-Lid persistent database\n"); + status = IB_ERROR; + goto Exit; + } + + cl_ptr_vector_init( &p_mgr->used_lids, 100, 40 ); + cl_qlist_init( &p_mgr->free_ranges ); + + /* we use the stored guid to lid table if not forced to reassign */ + if (!p_mgr->p_subn->opt.reassign_lids) + { + if (osm_db_restore(p_mgr->p_g2l)) + { + if (p_subn->opt.exit_on_fatal) + { + osm_log( p_mgr->p_log, OSM_LOG_SYS, + "FATAL: Error restoring Guid-to-Lid persistent database\n" ); + status = IB_ERROR; + goto Exit; + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_lid_mgr_init: ERR 0317: " + "Error restoring Guid-to-Lid persistent database\n"); + } + } + + /* we need to make sure we did not get duplicates with + current lmc */ + __osm_lid_mgr_validate_db(p_mgr); + } + +Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return( status ); +} + +static uint16_t +__osm_trim_lid( + IN uint16_t lid ) +{ + if ((lid > IB_LID_UCAST_END_HO) || + (lid < IB_LID_UCAST_START_HO)) + return 0; + return lid; +} + +/********************************************************************** + initialize the manager for a new sweep: + scans the known persistent assignment and port_lid_tbl + re-calculate all empty ranges. + cleanup invalid port_lid_tbl entries +**********************************************************************/ +static int +__osm_lid_mgr_init_sweep( + IN osm_lid_mgr_t* const p_mgr ) +{ + cl_ptr_vector_t *p_discovered_vec = &p_mgr->p_subn->port_lid_tbl; + cl_ptr_vector_t *p_persistent_vec = &p_mgr->used_lids; + uint16_t max_defined_lid; + uint16_t max_persistent_lid; + uint16_t max_discovered_lid; + uint16_t lid; + uint16_t disc_min_lid; + uint16_t disc_max_lid; + uint16_t db_min_lid; + uint16_t db_max_lid; + int status = 0; + cl_list_item_t *p_item; + boolean_t is_free; + osm_lid_mgr_range_t *p_range = NULL; + osm_port_t *p_port; + cl_qmap_t *p_port_guid_tbl; + uint8_t lmc_num_lids = (uint8_t)(1 << p_mgr->p_subn->opt.lmc); + uint16_t lmc_mask; + uint16_t req_lid, num_lids; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_lid_mgr_init_sweep ); + + if (p_mgr->p_subn->opt.lmc) + lmc_mask = ~((1 << p_mgr->p_subn->opt.lmc) - 1); + else + lmc_mask = 0xffff; + + /* if we came out of standby we need to discard any previous guid2lid + info we might have. + Do this only if the honor_guid2lid_file option is FALSE. If not, then + need to honor this file. */ + if ( p_mgr->p_subn->coming_out_of_standby == TRUE ) + { + if ( p_mgr->p_subn->opt.honor_guid2lid_file == FALSE ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "Ignore guid2lid file when coming out of standby\n"); + osm_db_clear( p_mgr->p_g2l ); + for (lid = 0; lid < cl_ptr_vector_get_size(&p_mgr->used_lids); lid++) + cl_ptr_vector_set(p_persistent_vec, lid, NULL); + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "Honor current guid2lid file when coming out of standby\n"); + osm_db_clear( p_mgr->p_g2l ); + if (osm_db_restore(p_mgr->p_g2l)) + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_lid_mgr_init_sweep: ERR 0306: " + "Error restoring Guid-to-Lid persistent database. Ignoring it\n"); + } + } + + /* we need to cleanup the empty ranges list */ + p_item = cl_qlist_remove_head( &p_mgr->free_ranges ); + while ( p_item != cl_qlist_end( &p_mgr->free_ranges ) ) + { + free( (osm_lid_mgr_range_t *)p_item ); + p_item = cl_qlist_remove_head( &p_mgr->free_ranges ); + } + + /* first clean up the port_by_lid_tbl */ + for (lid = 0; lid < cl_ptr_vector_get_size(p_discovered_vec); lid++) + cl_ptr_vector_set(p_discovered_vec, lid, NULL); + + /* we if are in the first sweep and in reassign lids mode + we should ignore all the available info and simply define one + huge empty range */ + if ((p_mgr->p_subn->first_time_master_sweep == TRUE) && + (p_mgr->p_subn->opt.reassign_lids == TRUE )) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "Skipping all lids as we are reassigning them\n"); + p_range = + (osm_lid_mgr_range_t *)malloc(sizeof(osm_lid_mgr_range_t)); + p_range->min_lid = 1; + goto AfterScanningLids; + } + + /* go over all discovered ports and mark their entries */ + p_port_guid_tbl = &p_mgr->p_subn->port_guid_tbl; + + for( p_port = (osm_port_t*)cl_qmap_head( p_port_guid_tbl ); + p_port != (osm_port_t*)cl_qmap_end( p_port_guid_tbl ); + p_port = (osm_port_t*)cl_qmap_next( &p_port->map_item ) ) + { + osm_port_get_lid_range_ho(p_port, &disc_min_lid, &disc_max_lid); + disc_min_lid = __osm_trim_lid(disc_min_lid); + disc_max_lid = __osm_trim_lid(disc_max_lid); + for (lid = disc_min_lid; lid <= disc_max_lid; lid++) + cl_ptr_vector_set(p_discovered_vec, lid, p_port ); + /* make sure the guid2lid entry is valid. If not, clean it. */ + if (!osm_db_guid2lid_get( p_mgr->p_g2l, + cl_ntoh64(osm_port_get_guid(p_port)), + &db_min_lid, &db_max_lid)) + { + if ( !p_port->p_node->sw || + osm_switch_sp0_is_lmc_capable(p_port->p_node->sw, p_mgr->p_subn)) + num_lids = lmc_num_lids; + else + num_lids = 1; + + if ((num_lids != 1) && + (((db_min_lid & lmc_mask) != db_min_lid) || + (db_max_lid - db_min_lid + 1 < num_lids)) ) + { + /* Not aligned, or not wide enough, then remove the entry */ + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "Cleaning persistent entry for guid:0x%016" PRIx64 + " illegal range:[0x%x:0x%x]\n", + cl_ntoh64(osm_port_get_guid(p_port)), db_min_lid, + db_max_lid ); + osm_db_guid2lid_delete( p_mgr->p_g2l, + cl_ntoh64(osm_port_get_guid(p_port))); + for ( lid = db_min_lid ; lid <= db_max_lid ; lid++ ) + cl_ptr_vector_set(p_persistent_vec, lid, NULL); + } + } + } + + /* + Our task is to find free lid ranges. + A lid can be used if + 1. a persistent assignment exists + 2. the lid is used by a discovered port that does not have a persistent + assignment. + + scan through all lid values of both the persistent table and + discovered table. + If the lid has an assigned port in the discovered table: + * make sure the lid matches the persistent table, or + * there is no other persistent assignment for that lid. + * else cleanup the port_by_lid_tbl, mark this as empty range. + Else if the lid does not have an entry in the persistent table + mark it as free. + */ + + /* find the range of lids to scan */ + max_discovered_lid = (uint16_t)cl_ptr_vector_get_size(p_discovered_vec); + max_persistent_lid = (uint16_t)cl_ptr_vector_get_size(p_persistent_vec); + + /* but the vectors have one extra entry for lid=0 */ + if (max_discovered_lid) max_discovered_lid--; + if (max_persistent_lid) max_persistent_lid--; + + if (max_persistent_lid > max_discovered_lid) + max_defined_lid = max_persistent_lid; + else + max_defined_lid = max_discovered_lid; + + for (lid = 1; lid <= max_defined_lid ; lid++) + { + is_free = TRUE; + /* first check to see if the lid is used by a persistent assignment */ + if ((lid <= max_persistent_lid) && cl_ptr_vector_get(p_persistent_vec, lid)) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "0x%04x is not free as its mapped by the persistent db\n", + lid); + is_free = FALSE; + } + else + { + /* check this is a discovered port */ + if (lid <= max_discovered_lid && (p_port = (osm_port_t *)cl_ptr_vector_get(p_discovered_vec, lid))) + { + /* we have a port. Now lets see if we can preserve its lid range. */ + /* For that, we need to make sure: + 1. The port has a (legal) persistency entry. Then the local lid + is free (we will use the persistency value). + 2. Can the port keep its local assignment? + a. Make sure the lid a aligned. + b. Make sure all needed lids (for the lmc) are free according + to persistency table. + */ + /* qualify the guid of the port is not persistently mapped to + another range */ + if (!osm_db_guid2lid_get( p_mgr->p_g2l, + cl_ntoh64(osm_port_get_guid(p_port)), + &db_min_lid, &db_max_lid)) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "0x%04x is free as it was discovered " + "but mapped by the persistent db to [0x%04x:0x%04x]\n", + lid, db_min_lid, db_max_lid); + } + else + { + /* can the port keep its assignment ? */ + /* get the lid range of that port, and the required number + of lids we are about to assign to it */ + osm_port_get_lid_range_ho(p_port, &disc_min_lid, &disc_max_lid); + if ( !p_port->p_node->sw || + osm_switch_sp0_is_lmc_capable(p_port->p_node->sw, p_mgr->p_subn)) + { + disc_max_lid = disc_min_lid + lmc_num_lids - 1; + num_lids = lmc_num_lids; + } + else + num_lids = 1; + + /* Make sure the lid is aligned */ + if ((num_lids != 1) && ((disc_min_lid & lmc_mask) != disc_min_lid)) + { + /* The lid cannot be used */ + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "0x%04x is free as it was discovered " + "but not aligned\n", + lid ); + } + else + { + /* check that all needed lids are not persistently mapped */ + is_free = FALSE; + for ( req_lid = disc_min_lid + 1 ; req_lid <= disc_max_lid ; req_lid++ ) + { + if ((req_lid <= max_persistent_lid) && cl_ptr_vector_get(p_persistent_vec, req_lid)) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "0x%04x is free as it was discovered " + "but mapped\n", + lid ); + is_free = TRUE; + break; + } + } + + if (is_free == FALSE) + { + /* This port will use its local lid, and consume the entire required lid range. + Thus we can skip that range. */ + /* If the disc_max_lid is greater then lid, we can skip right to it, + since we've done all neccessary checks on the lids in between. */ + if (disc_max_lid > lid) + lid = disc_max_lid; + } + } + } + } + } + + if (is_free) + { + if (p_range) + { + p_range->max_lid = lid; + } + else + { + p_range = + (osm_lid_mgr_range_t *)malloc(sizeof(osm_lid_mgr_range_t)); + p_range->min_lid = lid; + p_range->max_lid = lid; + } + } + else + { + /* this lid is used so we need to finalize the previous free range */ + if (p_range) + { + cl_qlist_insert_tail( &p_mgr->free_ranges, &p_range->item ); + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "new free lid range [0x%x:0x%x]\n", + p_range->min_lid, p_range->max_lid); + p_range = NULL; + } + } + } + + AfterScanningLids: + /* after scanning all known lids we need to extend the last range + to the max allowed lid */ + if (!p_range) + { + p_range = + (osm_lid_mgr_range_t *)malloc(sizeof(osm_lid_mgr_range_t)); + /* + The p_range can be NULL in one of 2 cases: + 1. If max_defined_lid == 0. In this case, we want the entire range. + 2. If all lids discovered in the loop where mapped. In this case, + no free range exists and we want to define it after the last + mapped lid. + */ + p_range->min_lid = lid; + } + p_range->max_lid = p_mgr->p_subn->max_unicast_lid_ho - 1; + cl_qlist_insert_tail( &p_mgr->free_ranges, &p_range->item ); + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_init_sweep: " + "final free lid range [0x%x:0x%x]\n", + p_range->min_lid, p_range->max_lid ); + + OSM_LOG_EXIT( p_mgr->p_log ); + return status; +} + +/********************************************************************** + check if the given range of lids is free +**********************************************************************/ +static boolean_t +__osm_lid_mgr_is_range_not_persistent( + IN osm_lid_mgr_t* const p_mgr, + IN const uint16_t lid, + IN const uint16_t num_lids ) +{ + uint16_t i; + cl_status_t status; + osm_port_t *p_port; + const uint8_t start_lid = (uint8_t)(1 << p_mgr->p_subn->opt.lmc); + const cl_ptr_vector_t* const p_tbl = &p_mgr->used_lids; + + if( lid < start_lid ) + return( FALSE ); + + for( i = lid; i < lid + num_lids; i++ ) + { + status = cl_ptr_vector_at( p_tbl, i, (void*)&p_port ); + if( status == CL_SUCCESS ) + { + if(p_port != NULL) + return( FALSE ); + } + else + { + /* + We are out of range in the array. + Consider all further entries "free". + */ + return( TRUE ); + } + } + + return( TRUE ); +} + +/********************************************************************** +find a free lid range +**********************************************************************/ +static void +__osm_lid_mgr_find_free_lid_range( + IN osm_lid_mgr_t* const p_mgr, + IN const uint8_t num_lids, + OUT uint16_t* const p_min_lid, + OUT uint16_t* const p_max_lid ) +{ + uint16_t lid; + cl_list_item_t *p_item; + cl_list_item_t *p_next_item; + osm_lid_mgr_range_t *p_range = NULL; + uint8_t lmc_num_lids; + uint16_t lmc_mask; + + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_find_free_lid_range: " + "LMC = %u, number LIDs = %u\n", + p_mgr->p_subn->opt.lmc, num_lids ); + + lmc_num_lids = (1 << p_mgr->p_subn->opt.lmc ); + if (p_mgr->p_subn->opt.lmc) + lmc_mask = ~((1 << p_mgr->p_subn->opt.lmc) - 1); + else + lmc_mask = 0xffff; + + /* + Search the list of free lid ranges for a range which is big enough + */ + p_item = cl_qlist_head( &p_mgr->free_ranges ); + while( p_item != cl_qlist_end( &p_mgr->free_ranges ) ) + { + p_next_item = cl_qlist_next( p_item ); + p_range = (osm_lid_mgr_range_t *)p_item; + + lid = p_range->min_lid; + + /* if we require more then one lid we must align to LMC */ + if (num_lids > 1) + { + if ((lid & lmc_mask) != lid) + lid = (lid + lmc_num_lids) & lmc_mask; + } + + /* but we can be out of the range */ + if (lid + num_lids - 1 <= p_range->max_lid) + { + /* ok let us use that range */ + if (lid + num_lids - 1 == p_range->max_lid) + { + /* we consumed the entire range */ + cl_qlist_remove_item( &p_mgr->free_ranges, p_item ); + } + else + { + /* only update the available range */ + p_range->min_lid = lid + num_lids; + } + + *p_min_lid = lid; + *p_max_lid = (uint16_t)(lid + num_lids - 1); + return; + } + p_item = p_next_item; + } + + /* + Couldn't find a free range of lids. + */ + *p_min_lid = *p_max_lid = 0; + /* if we run out of lids, give an error and abort! */ + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_lid_mgr_find_free_lid_range: ERR 0307: " + "OPENSM RAN OUT OF LIDS!!!\n" ); + CL_ASSERT( 0 ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_lid_mgr_cleanup_discovered_port_lid_range( + IN osm_lid_mgr_t* p_mgr, + IN osm_port_t *p_port ) +{ + cl_ptr_vector_t *p_discovered_vec = &p_mgr->p_subn->port_lid_tbl; + uint16_t lid, min_lid, max_lid; + uint16_t max_tbl_lid = (uint16_t)(cl_ptr_vector_get_size( p_discovered_vec )); + + osm_port_get_lid_range_ho(p_port, &min_lid, &max_lid); + min_lid = __osm_trim_lid(min_lid); + max_lid = __osm_trim_lid(max_lid); + for (lid = min_lid; lid <= max_lid; lid++) + { + if ((lid < max_tbl_lid ) && + (p_port == (osm_port_t*)cl_ptr_vector_get(p_discovered_vec, lid))) + cl_ptr_vector_set(p_discovered_vec, lid, NULL ); + } +} + +/********************************************************************** + 0.1 if the port info lid matches the guid2lid return 0 + 0.2 if the port info has a lid and that range is empty in + port_lid_tbl, return 0 and update the port_lid_tbl and + guid2lid + 0.3 else find an empty space in port_lid_tbl, update the + port_lid_tbl and guid2lid, return 1 to flag a change required. +**********************************************************************/ +static int +__osm_lid_mgr_get_port_lid( + IN osm_lid_mgr_t* const p_mgr, + IN osm_port_t * const p_port, + OUT uint16_t* const p_min_lid, + OUT uint16_t* const p_max_lid) +{ + uint16_t lid, min_lid, max_lid; + uint64_t guid; + uint8_t num_lids = (1 << p_mgr->p_subn->opt.lmc); + int lid_changed = 0; + uint16_t lmc_mask; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_lid_mgr_get_port_lid ); + + if (p_mgr->p_subn->opt.lmc) + lmc_mask = ~((1 << p_mgr->p_subn->opt.lmc) - 1); + else + lmc_mask = 0xffff; + + /* get the lid from the guid2lid */ + guid = cl_ntoh64( osm_port_get_guid( p_port ) ); + + /* if the port is a base switch port 0 then we only need one lid */ + if( p_port->p_node->sw && + !osm_switch_sp0_is_lmc_capable(p_port->p_node->sw, p_mgr->p_subn)) + num_lids = 1; + + /* if the port matches the guid2lid */ + if (!osm_db_guid2lid_get( p_mgr->p_g2l, guid, &min_lid, &max_lid)) + { + *p_min_lid = min_lid; + *p_max_lid = min_lid + num_lids - 1; + if (min_lid == cl_ntoh16(osm_port_get_base_lid(p_port))) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_get_port_lid: " + "0x%016" PRIx64" matches its known lid:0x%04x\n", + guid, min_lid ); + goto Exit; + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_get_port_lid: " + "0x%016" PRIx64 + " with lid:0x%04x does not match its known lid:0x%04x\n", + guid, cl_ntoh16(osm_port_get_base_lid(p_port)), min_lid); + __osm_lid_mgr_cleanup_discovered_port_lid_range( p_mgr, p_port ); + /* we still need to send the setting to the target port */ + lid_changed = 1; + goto Exit; + } + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_get_port_lid: " + "0x%016" PRIx64" has no persistent lid assigned\n", + guid ); + } + + /* if the port info carries a lid it must be lmc aligned and not mapped + by the pesistent storage */ + min_lid = cl_ntoh16(osm_port_get_base_lid(p_port)); + + /* we want to ignore the discovered lid if we are also on first sweep of + reassign lids flow */ + if (min_lid && + ! ((p_mgr->p_subn->first_time_master_sweep == TRUE) && + (p_mgr->p_subn->opt.reassign_lids == TRUE ))) + { + /* make sure lid is valid */ + if ((num_lids == 1) || ((min_lid & lmc_mask) == min_lid)) + { + /* is it free */ + if (__osm_lid_mgr_is_range_not_persistent(p_mgr, min_lid, num_lids)) + { + *p_min_lid = min_lid; + *p_max_lid = min_lid + num_lids - 1; + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_get_port_lid: " + "0x%016" PRIx64" lid range:[0x%x-0x%x] is free\n", + guid, *p_min_lid, *p_max_lid ); + goto NewLidSet; + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_get_port_lid: " + "0x%016" PRIx64 + " existing lid range:[0x%x:0x%x] is not free\n", + guid, min_lid, min_lid + num_lids - 1 ); + } + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_get_port_lid: " + "0x%016" PRIx64 + " existing lid range:[0x%x:0x%x] is not lmc aligned\n", + guid, min_lid, min_lid + num_lids - 1 ); + } + } + + /* first cleanup the existing discovered lid range */ + __osm_lid_mgr_cleanup_discovered_port_lid_range( p_mgr, p_port ); + + /* find an empty space */ + __osm_lid_mgr_find_free_lid_range(p_mgr, num_lids, p_min_lid, p_max_lid); + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_get_port_lid: " + "0x%016" PRIx64" assigned a new lid range:[0x%x-0x%x]\n", + guid, *p_min_lid, *p_max_lid ); + lid_changed = 1; + + NewLidSet: + /* update the guid2lid db and used_lids */ + osm_db_guid2lid_set(p_mgr->p_g2l, guid, *p_min_lid, *p_max_lid); + for (lid = *p_min_lid; lid <= *p_max_lid; lid++) + cl_ptr_vector_set(&p_mgr->used_lids, lid, (void*)1); + + Exit: + /* make sure the assigned lids are marked in port_lid_tbl */ + for (lid = *p_min_lid; lid <= *p_max_lid; lid++) + cl_ptr_vector_set(&p_mgr->p_subn->port_lid_tbl, lid, p_port); + + OSM_LOG_EXIT( p_mgr->p_log ); + return lid_changed; +} + +/********************************************************************** + Set to INIT the remote port of the given physical port + **********************************************************************/ +static void +__osm_lid_mgr_set_remote_pi_state_to_init( + IN osm_lid_mgr_t * const p_mgr, + IN osm_physp_t* const p_physp) +{ + ib_port_info_t *p_pi; + osm_physp_t *p_rem_physp = osm_physp_get_remote(p_physp); + + if ( p_rem_physp == NULL ) + return; + + if (osm_physp_is_valid( p_rem_physp )) + { + p_pi = osm_physp_get_port_info_ptr( p_rem_physp ); + /* but in some rare cases the remote side might be irresponsive */ + if (p_pi) + ib_port_info_set_port_state( p_pi, IB_LINK_INIT ); + } +} + +/********************************************************************** + **********************************************************************/ +static boolean_t +__osm_lid_mgr_set_physp_pi( + IN osm_lid_mgr_t * const p_mgr, + IN osm_port_t* const p_port, + IN osm_physp_t* const p_physp, + IN ib_net16_t const lid ) +{ + uint8_t payload[IB_SMP_DATA_SIZE]; + ib_port_info_t* p_pi = (ib_port_info_t*)payload; + const ib_port_info_t* p_old_pi; + osm_madw_context_t context; + osm_node_t* p_node; + ib_api_status_t status; + uint8_t mtu; + uint8_t op_vls; + uint8_t port_num; + boolean_t send_set = FALSE; + boolean_t new_port = FALSE; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_lid_mgr_set_physp_pi ); + + /* + Don't bother doing anything if this Physical Port is not valid. + This allows simplified code in the caller. + */ + if( p_physp == NULL ) + goto Exit; + + if( !osm_physp_is_valid( p_physp ) ) + goto Exit; + + port_num = osm_physp_get_port_num( p_physp ); + p_node = osm_physp_get_node_ptr( p_physp ); + + if( (osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ) && + (port_num != 0) ) + { + /* + Switch ports that are not numbered 0 should not be set with the + following attributes as they are set later (during NO_CHANGE state + in link mgr). + */ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_set_physp_pi: " + "Skipping switch port %u, GUID 0x%016" PRIx64 "\n", + port_num, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ) ); + } + goto Exit; + } + + p_old_pi = osm_physp_get_port_info_ptr( p_physp ); + + /* + First, copy existing parameters from the PortInfo attribute we + already have for this node. + + Second, update with default values that we know must be set for + every Physical Port and the LID and set the neighbor MTU field + appropriately. + + Third, send the SMP to this physical port. + */ + + memset( payload, 0, IB_SMP_DATA_SIZE ); + + /* Correction by FUJITSU */ + if( port_num != 0 ) + { + memcpy( payload, p_old_pi, sizeof(ib_port_info_t) ); + } + + /* + Correction following a bug injected by the previous + FUJITSU line: + + Should never write back a value that is bigger then 3 in + the PortPhysicalState field, so cannot simply copy! + + Actually we want to write there: + port physical state - no change + link down default state = polling + port state - no change + */ + /* these values can be set only for hca ports, so if we are + on a switch node, set these values to zero */ + if ( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ) + p_pi->state_info2 = 0x0; + else + { + p_pi->state_info2 = 0x02; + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if ( ib_port_info_get_link_down_def_state(p_pi) != + ib_port_info_get_link_down_def_state(p_old_pi) ) + send_set = TRUE; + } + + ib_port_info_set_port_state( p_pi, IB_LINK_NO_CHANGE ); + + p_pi->m_key = p_mgr->p_subn->opt.m_key; + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if (memcmp( &p_pi->m_key, &p_old_pi->m_key, sizeof(p_pi->m_key) )) + send_set = TRUE; + + p_pi->subnet_prefix = p_mgr->p_subn->opt.subnet_prefix; + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if (memcmp( &p_pi->subnet_prefix, &p_old_pi->subnet_prefix, + sizeof(p_pi->subnet_prefix) )) + send_set = TRUE; + + p_pi->base_lid = lid; + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if (memcmp( &p_pi->base_lid, &p_old_pi->base_lid, + sizeof(p_pi->base_lid) )) + send_set = TRUE; + + /* we are updating the ports with our local sm_base_lid */ + p_pi->master_sm_base_lid = p_mgr->p_subn->sm_base_lid; + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if (memcmp( &p_pi->master_sm_base_lid, &p_old_pi->master_sm_base_lid, + sizeof(p_pi->master_sm_base_lid) )) + send_set = TRUE; + + p_pi->m_key_lease_period = p_mgr->p_subn->opt.m_key_lease_period; + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if (memcmp( &p_pi->m_key_lease_period, &p_old_pi->m_key_lease_period, + sizeof(p_pi->m_key_lease_period) )) + send_set = TRUE; + + /* + we want to set the timeout for both the switch port 0 + and the HCA ports + */ + ib_port_info_set_timeout( p_pi, p_mgr->p_subn->opt.subnet_timeout ); + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if (ib_port_info_get_timeout( p_pi ) != ib_port_info_get_timeout( p_old_pi )) + send_set = TRUE; + + if( port_num != 0 ) + { + /* + HCA's don't have a port 0, and for switch port 0, + the state bits are ignored. + This is not the switch management port + */ + p_pi->link_width_enabled = p_old_pi->link_width_supported; + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if (memcmp( &p_pi->link_width_enabled, &p_old_pi->link_width_enabled, + sizeof(p_pi->link_width_enabled) )) + send_set = TRUE; + + if ( p_mgr->p_subn->opt.force_link_speed ) + ib_port_info_set_link_speed_enabled( p_pi, IB_LINK_SPEED_ACTIVE_2_5 ); + else if (ib_port_info_get_link_speed_enabled( p_old_pi ) != ib_port_info_get_link_speed_sup( p_pi )) + ib_port_info_set_link_speed_enabled( p_pi, IB_PORT_LINK_SPEED_ENABLED_MASK ); + else + ib_port_info_set_link_speed_enabled( p_pi, ib_port_info_get_link_speed_enabled( p_old_pi )); + if (memcmp( &p_pi->link_speed, &p_old_pi->link_speed, + sizeof(p_pi->link_speed) )) + send_set = TRUE; + + /* M_KeyProtectBits are always zero */ + p_pi->mkey_lmc = p_mgr->p_subn->opt.lmc; + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if (memcmp( &p_pi->mkey_lmc, &p_old_pi->mkey_lmc, sizeof(p_pi->mkey_lmc) )) + send_set = TRUE; + + /* calc new op_vls and mtu */ + op_vls = osm_physp_calc_link_op_vls( + p_mgr->p_log, p_mgr->p_subn, p_physp ); + mtu = osm_physp_calc_link_mtu( p_mgr->p_log, p_physp ); + + ib_port_info_set_neighbor_mtu( p_pi, mtu ); + + if ( ib_port_info_get_neighbor_mtu(p_pi) != + ib_port_info_get_neighbor_mtu(p_old_pi) ) + send_set = TRUE; + + ib_port_info_set_op_vls( p_pi, op_vls ); + if ( ib_port_info_get_op_vls(p_pi) != + ib_port_info_get_op_vls(p_old_pi) ) + send_set = TRUE; + + /* + Several timeout mechanisms: + */ + ib_port_info_set_phy_and_overrun_err_thd( + p_pi, + p_mgr->p_subn->opt.local_phy_errors_threshold, + p_mgr->p_subn->opt.overrun_errors_threshold); + + if (memcmp( &p_pi->error_threshold, &p_old_pi->error_threshold, + sizeof(p_pi->error_threshold) )) + send_set = TRUE; + + /* + To reset the port state machine we can send PortInfo.State = DOWN. + (see: 7.2.7 p171 lines:10-19) + */ + if ( (mtu != ib_port_info_get_neighbor_mtu(p_old_pi)) || + (op_vls != ib_port_info_get_op_vls(p_old_pi))) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_set_physp_pi: " + "Sending Link Down due to op_vls or mtu change. MTU:%u,%u VL_CAP:%u,%u\n", + mtu, ib_port_info_get_neighbor_mtu(p_old_pi), + op_vls, ib_port_info_get_op_vls(p_old_pi) + ); + } + + /* + we need to make sure the internal DB will follow the fact the remote + port is also going through "down" state into "init"... + */ + __osm_lid_mgr_set_remote_pi_state_to_init(p_mgr, p_physp); + + ib_port_info_set_port_state( p_pi, IB_LINK_DOWN ); + if ( ib_port_info_get_port_state(p_pi) != + ib_port_info_get_port_state(p_old_pi) ) + send_set = TRUE; + } + } + else + { + /* + For Port 0, NeighborMTU is relevant only for Enh. SP0. + In this case, we'll set the MTU according to the mtu_cap + */ + ib_port_info_set_neighbor_mtu( p_pi, ib_port_info_get_mtu_cap( p_old_pi ) ); + if ( ib_port_info_get_neighbor_mtu(p_pi) != + ib_port_info_get_neighbor_mtu(p_old_pi) ) + send_set = TRUE; + + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_set_physp_pi: " + "Updating neighbor_mtu on switch port 0 to:%u\n", + ib_port_info_get_neighbor_mtu( p_pi ) ); + + /* Determine if enhanced switch port 0 and if so set LMC */ + if (osm_switch_sp0_is_lmc_capable(p_node->sw, p_mgr->p_subn)) + { + /* M_KeyProtectBits are always zero */ + p_pi->mkey_lmc = p_mgr->p_subn->opt.lmc; + /* Check to see if the value we are setting is different than + the value in the port_info. If it is, turn on send_set flag */ + if (memcmp( &p_pi->mkey_lmc, &p_old_pi->mkey_lmc, sizeof(p_pi->mkey_lmc) )) + send_set = TRUE; + } + } + + context.pi_context.node_guid = osm_node_get_node_guid( p_node ); + context.pi_context.port_guid = osm_physp_get_port_guid( p_physp ); + context.pi_context.set_method = TRUE; + context.pi_context.update_master_sm_base_lid = FALSE; + context.pi_context.ignore_errors = FALSE; + context.pi_context.light_sweep = FALSE; + context.pi_context.active_transition = FALSE; + + /* + We need to set the cli_rereg bit when we are in first_time_master_sweep for + ports supporting the ClientReregistration Vol1 (v1.2) p811 14.4.11 + Also, if this port was just now discovered, then we should also set the + cli_rereg bit. We know that the port was just discovered if it is in + the p_subn->new_ports_list list. + */ + if ( cl_is_object_in_list(&p_mgr->p_subn->new_ports_list, p_port) ) + { + /* p_port is in new_ports_list, mark new_port as TRUE */ + new_port = TRUE; + } + + if ( ( p_mgr->p_subn->first_time_master_sweep == TRUE || + new_port == TRUE ) && + !p_mgr->p_subn->opt.no_clients_rereg && + ( (p_old_pi->capability_mask & IB_PORT_CAP_HAS_CLIENT_REREG) != 0 ) ) + ib_port_info_set_client_rereg( p_pi, 1 ); + else + ib_port_info_set_client_rereg( p_pi, 0 ); + + /* We need to send the PortInfo Set request with the new sm_lid + in the following cases: + 1. There is a change in the values (send_set == TRUE) + 2. first_time_master_sweep flag on the subnet is TRUE. This means the + SM just became master, and it then needs to send a PortInfo Set to + every port. + 3. got_set_resp on the physical port is FALSE. This means we haven't seen + this port before and we need to send Set of PortInfo to it. + */ + if (send_set || p_mgr->p_subn->first_time_master_sweep == TRUE || + p_physp->got_set_resp == FALSE) + { + + p_mgr->send_set_reqs = TRUE; + status = osm_req_set( p_mgr->p_req, + osm_physp_get_dr_path_ptr( p_physp ), + payload, + sizeof(payload), + IB_MAD_ATTR_PORT_INFO, + cl_hton32(osm_physp_get_port_num( p_physp )), + CL_DISP_MSGID_NONE, + &context ); + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return (send_set || p_mgr->p_subn->first_time_master_sweep == TRUE || + p_physp->got_set_resp == FALSE); +} + +/********************************************************************** + Processes our own node + Lock must already be held. +**********************************************************************/ +static boolean_t +__osm_lid_mgr_process_our_sm_node( + IN osm_lid_mgr_t* const p_mgr ) +{ + osm_port_t *p_port; + uint16_t min_lid_ho; + uint16_t max_lid_ho; + osm_physp_t *p_physp; + boolean_t res = TRUE; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_lid_mgr_process_our_sm_node ); + + /* + Acquire our own port object. + */ + p_port = (osm_port_t*)cl_qmap_get( &p_mgr->p_subn->port_guid_tbl, + p_mgr->p_subn->sm_port_guid ); + + if( p_port == (osm_port_t*)cl_qmap_end( &p_mgr->p_subn->port_guid_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_lid_mgr_process_our_sm_node: ERR 0308: " + "Can't acquire SM's port object, GUID 0x%016" PRIx64 "\n", + cl_ntoh64( p_mgr->p_subn->sm_port_guid ) ); + res = FALSE; + goto Exit; + } + + /* + Determine the LID this SM will use for its own port. + Be careful. With an LMC > 0, the bottom of the LID range becomes + unusable, since port hardware will mask off least significant bits, + leaving a LID of 0 (invalid). Therefore, make sure that we always + configure the SM with a LID that has non-zero bits, even after + LMC masking by hardware. + */ + __osm_lid_mgr_get_port_lid(p_mgr, p_port, &min_lid_ho, &max_lid_ho); + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_lid_mgr_process_our_sm_node: " + "Current base LID is 0x%X\n", min_lid_ho ); + /* + Update subnet object. + */ + p_mgr->p_subn->master_sm_base_lid = cl_hton16( min_lid_ho ); + p_mgr->p_subn->sm_base_lid = cl_hton16( min_lid_ho ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_lid_mgr_process_our_sm_node: " + "Assigning SM's port 0x%016" PRIx64 + "\n\t\t\t\tto LID range [0x%X,0x%X]\n", + cl_ntoh64( osm_port_get_guid( p_port ) ), + min_lid_ho, max_lid_ho ); + } + + /* + Set the PortInfo the Physical Port associated + with this Port. + */ + p_physp = osm_port_get_default_phys_ptr( p_port ); + + __osm_lid_mgr_set_physp_pi( p_mgr, p_port, p_physp, cl_hton16( min_lid_ho ) ); + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return res; +} + +/********************************************************************** + **********************************************************************/ +osm_signal_t +osm_lid_mgr_process_sm( + IN osm_lid_mgr_t* const p_mgr ) +{ + osm_signal_t signal = OSM_SIGNAL_DONE_PENDING; + + OSM_LOG_ENTER( p_mgr->p_log, osm_lid_mgr_process_sm ); + + CL_ASSERT( p_mgr->p_subn->sm_port_guid ); + + CL_PLOCK_EXCL_ACQUIRE( p_mgr->p_lock ); + + /* initialize the port_lid_tbl and empty ranges list following the + persistent db */ + __osm_lid_mgr_init_sweep( p_mgr ); + + if (p_mgr->p_subn->opt.pfn_ui_pre_lid_assign) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "osm_lid_mgr_process_sm: " + "Invoking UI function pfn_ui_pre_lid_assign\n" ); + p_mgr->p_subn->opt.pfn_ui_pre_lid_assign( + p_mgr->p_subn->opt.ui_pre_lid_assign_ctx ); + } + + /* Set the send_set_reqs of the p_mgr to FALSE, and + we'll see if any set requests were sent. If not - + can signal OSM_SIGNAL_DONE */ + p_mgr->send_set_reqs = FALSE; + if ( __osm_lid_mgr_process_our_sm_node( p_mgr ) == FALSE ) + /* The initialization failed */ + signal = OSM_SIGNAL_DONE; + + if ( p_mgr->send_set_reqs == FALSE ) + signal = OSM_SIGNAL_DONE; + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + OSM_LOG_EXIT( p_mgr->p_log ); + return( signal ); +} + +/********************************************************************** + 1 go through all ports in the subnet. + 1.1 call __osm_lid_mgr_get_port_min_lid + 1.2 if a change is required send the port info + 2 if any change send the signal PENDING... +**********************************************************************/ +osm_signal_t +osm_lid_mgr_process_subnet( + IN osm_lid_mgr_t* const p_mgr ) +{ + osm_signal_t signal; + cl_qmap_t *p_port_guid_tbl; + osm_port_t *p_port; + ib_net64_t port_guid; + uint16_t min_lid_ho, max_lid_ho; + osm_physp_t *p_physp; + int lid_changed; + + CL_ASSERT( p_mgr ); + + OSM_LOG_ENTER( p_mgr->p_log, osm_lid_mgr_process_subnet ); + + CL_PLOCK_EXCL_ACQUIRE( p_mgr->p_lock ); + + CL_ASSERT( p_mgr->p_subn->sm_port_guid ); + + /* Set the send_set_reqs of the p_mgr to FALSE, and + we'll see if any set requests were sent. If not - + can signal OSM_SIGNAL_DONE */ + p_mgr->send_set_reqs = FALSE; + + p_port_guid_tbl = &p_mgr->p_subn->port_guid_tbl; + + for( p_port = (osm_port_t*)cl_qmap_head( p_port_guid_tbl ); + p_port != (osm_port_t*)cl_qmap_end( p_port_guid_tbl ); + p_port = (osm_port_t*)cl_qmap_next( &p_port->map_item ) ) + { + port_guid = osm_port_get_guid( p_port ); + + /* + Our own port is a special case in that we want to + assign a LID to ourselves first, since we have to + advertise that LID value to the other ports. + + For that reason, our node is treated separately and + we will not add it to any of these lists. + */ + if( port_guid == p_mgr->p_subn->sm_port_guid ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_lid_mgr_process_subnet: " + "Skipping our own port 0x%016" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + } + else + { + /* + get the port lid range - we need to send it on first active sweep or + if there was a change (the result of the __osm_lid_mgr_get_port_lid) + */ + lid_changed = + __osm_lid_mgr_get_port_lid(p_mgr, p_port, &min_lid_ho, &max_lid_ho); + + /* we can call the function to update the port info as it known to + look for any field change and will only send an updated if required */ + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "osm_lid_mgr_process_subnet: " + "Assigned port 0x%016" PRIx64 + ", LID [0x%X,0x%X]\n", cl_ntoh64( port_guid ), + min_lid_ho, max_lid_ho ); + + p_physp = osm_port_get_default_phys_ptr( p_port ); + /* the proc returns the fact it sent a set port info */ + if (__osm_lid_mgr_set_physp_pi( p_mgr, p_port, p_physp, cl_hton16( min_lid_ho ))) + p_mgr->send_set_reqs = TRUE; + } + } /* all ports */ + + /* store the guid to lid table in persistent db */ + osm_db_store( p_mgr->p_g2l ); + + if ( p_mgr->send_set_reqs == FALSE ) + signal = OSM_SIGNAL_DONE; + else + signal = OSM_SIGNAL_DONE_PENDING; + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + OSM_LOG_EXIT( p_mgr->p_log ); + return( signal ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_rcv.c new file mode 100644 index 00000000..70e9673d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_rcv.c @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_lft_rcv_t. + * This object represents the NodeDescription Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_lft_rcv_construct( + IN osm_lft_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lft_rcv_destroy( + IN osm_lft_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_lft_rcv_destroy ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_lft_rcv_init( + IN osm_lft_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_lft_rcv_init ); + + osm_lft_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lft_rcv_process( + IN const osm_lft_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + ib_smp_t *p_smp; + uint32_t block_num; + osm_switch_t *p_sw; + osm_lft_context_t *p_lft_context; + uint8_t *p_block; + ib_net64_t node_guid; + ib_api_status_t status; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_lft_rcv_process ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_block = (uint8_t*)ib_smp_get_payload_ptr( p_smp ); + block_num = cl_ntoh32( p_smp->attr_mod ); + + /* + Acquire the switch object for this switch. + */ + p_lft_context = osm_madw_get_lft_context_ptr( p_madw ); + node_guid = p_lft_context->node_guid; + + CL_PLOCK_EXCL_ACQUIRE( p_rcv->p_lock ); + p_sw = osm_get_switch_by_guid( p_rcv->p_subn, node_guid ); + + if( !p_sw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_lft_rcv_process: ERR 0401: " + "LFT received for nonexistent node " + "0x%" PRIx64 "\n", cl_ntoh64( node_guid ) ); + } + else + { + status = osm_switch_set_ft_block( p_sw, p_block, block_num ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_lft_rcv_process: ERR 0402: " + "Setting forwarding table block failed (%s)" + "\n\t\t\t\tSwitch 0x%" PRIx64 "\n", + ib_get_err_str( status ), + cl_ntoh64( node_guid ) ); + } + } + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_rcv_ctrl.c new file mode 100644 index 00000000..b3071b60 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_rcv_ctrl.c @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_lft_rcv_ctrl_t. + * This object represents the LFT Receive controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_lft_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_lft_rcv_process( ((osm_lft_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lft_rcv_ctrl_construct( + IN osm_lft_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_lft_rcv_ctrl_destroy( + IN osm_lft_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_lft_rcv_ctrl_init( + IN osm_lft_rcv_ctrl_t* const p_ctrl, + IN osm_lft_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_lft_rcv_ctrl_init ); + + osm_lft_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_LFT, + __osm_lft_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_lft_rcv_ctrl_init: ERR 1601: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_tbl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_tbl.c new file mode 100644 index 00000000..9a8d8034 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_lin_fwd_tbl.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_lin_fwd_tbl_t. + * This object represents an linear forwarding table. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include + + +inline size_t +__osm_lin_tbl_compute_obj_size( + IN const uint32_t num_ports ) +{ + return( sizeof(osm_lin_fwd_tbl_t) + (num_ports - 1) ); +} + +/********************************************************************** + **********************************************************************/ +osm_lin_fwd_tbl_t* +osm_lin_tbl_new( + IN uint16_t const size ) +{ + osm_lin_fwd_tbl_t* p_tbl; + + /* + The capacity reported by the switch includes LID 0, + so add 1 to the end of the range here for this assert. + */ + CL_ASSERT( size <= IB_LID_UCAST_END_HO + 1 ); + p_tbl = (osm_lin_fwd_tbl_t*)malloc( + __osm_lin_tbl_compute_obj_size( size ) ); + + /* + Initialize the table to OSM_NO_PATH, which means "invalid port" + */ + memset( p_tbl, OSM_NO_PATH, __osm_lin_tbl_compute_obj_size( size ) ); + if( p_tbl != NULL ) + { + p_tbl->size = (uint16_t)size; + } + return( p_tbl ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lin_tbl_delete( + IN osm_lin_fwd_tbl_t** const pp_tbl ) +{ + free( *pp_tbl ); + *pp_tbl = NULL; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_link_mgr.c b/branches/Ndi/ulp/opensm/user/opensm/osm_link_mgr.c new file mode 100644 index 00000000..4eddf18c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_link_mgr.c @@ -0,0 +1,514 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_link_mgr_t. + * This file implements the Link Manager object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.15 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include + + +/********************************************************************** + **********************************************************************/ +void +osm_link_mgr_construct( + IN osm_link_mgr_t* const p_mgr ) +{ + memset( p_mgr, 0, sizeof(*p_mgr) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_link_mgr_destroy( + IN osm_link_mgr_t* const p_mgr ) +{ + OSM_LOG_ENTER( p_mgr->p_log, osm_link_mgr_destroy ); + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_link_mgr_init( + IN osm_link_mgr_t* const p_mgr, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_link_mgr_init ); + + CL_ASSERT( p_req ); + CL_ASSERT( p_subn ); + CL_ASSERT( p_lock ); + + osm_link_mgr_construct( p_mgr ); + + p_mgr->p_log = p_log; + p_mgr->p_subn = p_subn; + p_mgr->p_lock = p_lock; + p_mgr->p_req = p_req; + + OSM_LOG_EXIT( p_mgr->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_link_mgr_set_physp_pi( + IN osm_link_mgr_t* const p_mgr, + IN osm_physp_t* const p_physp, + IN uint8_t const port_state ) +{ + uint8_t payload[IB_SMP_DATA_SIZE]; + ib_port_info_t* const p_pi = (ib_port_info_t*)payload; + const ib_port_info_t* p_old_pi; + osm_madw_context_t context; + osm_node_t* p_node; + ib_api_status_t status; + uint8_t port_num; + uint8_t mtu; + uint8_t op_vls; + boolean_t esp0 = FALSE; + boolean_t send_set = FALSE; + osm_physp_t *p_remote_physp; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_link_mgr_set_physp_pi ); + + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + p_node = osm_physp_get_node_ptr( p_physp ); + + port_num = osm_physp_get_port_num( p_physp ); + + if( port_num == 0 ) + { + ib_switch_info_t* p_sw_info; + + /* + HCA's don't have a port 0, and for switch port 0, + we need to check if this is enhanced port 0 or base port 0. + For base port 0 the following parameters are not valid. (p824, table 145) + */ + if (!p_node->sw) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_link_mgr_set_physp_pi: ERR 4201: " + "Cannot find switch by guid: 0x%" PRIx64 "\n", + cl_ntoh64( p_node->node_info.node_guid ) ); + goto Exit; + } + + p_sw_info = osm_switch_get_si_ptr(p_node->sw); + if (ib_switch_info_is_enhanced_port0( p_sw_info ) == FALSE) + { + /* This means the switch doesn't support enhanced port zero. + Can skip it. */ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_link_mgr_set_physp_pi: " + "Skipping port 0, GUID 0x%016" PRIx64 "\n", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ) ); + } + goto Exit; + } + esp0 = TRUE; + } + + /* + PAST THIS POINT WE ARE HANDLING EITHER A NON PORT 0 OR ENHANCED PORT 0 + */ + + p_old_pi = osm_physp_get_port_info_ptr( p_physp ); + + memset( payload, 0, IB_SMP_DATA_SIZE ); + + /* Correction by FUJITSU */ + memcpy( payload, p_old_pi, sizeof(ib_port_info_t) ); + + /* + Correction following a bug injected by the previous + FUJITSU line: + + Should never write back a value that is bigger then 3 in + the PortPhysicalState field - so can not simply copy! + + Actually we want to write there: + port physical state - no change, + link down default state = polling + port state - no change + */ + p_pi->state_info2 = 0x02; + ib_port_info_set_port_state( p_pi, IB_LINK_NO_CHANGE ); + if ( ib_port_info_get_link_down_def_state(p_pi) != + ib_port_info_get_link_down_def_state(p_old_pi) ) + send_set = TRUE; + + /* we only change port fields if we do not change state */ + if (port_state == IB_LINK_NO_CHANGE) + { + /* The following fields are relevant only for CA port or Enh. SP0 */ + if( osm_node_get_type( p_node ) != IB_NODE_TYPE_SWITCH || + port_num == 0 ) + { + p_pi->m_key = p_mgr->p_subn->opt.m_key; + if (memcmp( &p_pi->m_key, &p_old_pi->m_key, sizeof(p_pi->m_key) )) + send_set = TRUE; + + p_pi->subnet_prefix = p_mgr->p_subn->opt.subnet_prefix; + if (memcmp( &p_pi->subnet_prefix, &p_old_pi->subnet_prefix, + sizeof(p_pi->subnet_prefix) )) + send_set = TRUE; + + p_pi->base_lid = osm_physp_get_base_lid( p_physp ); + if (memcmp( &p_pi->base_lid, &p_old_pi->base_lid, + sizeof(p_pi->base_lid) )) + send_set = TRUE; + + /* we are initializing the ports with our local sm_base_lid */ + p_pi->master_sm_base_lid = p_mgr->p_subn->sm_base_lid; + if (memcmp( &p_pi->master_sm_base_lid, &p_old_pi->master_sm_base_lid, + sizeof(p_pi->master_sm_base_lid) )) + send_set = TRUE; + + p_pi->m_key_lease_period = p_mgr->p_subn->opt.m_key_lease_period; + if (memcmp( &p_pi->m_key_lease_period, &p_old_pi->m_key_lease_period, + sizeof(p_pi->m_key_lease_period) )) + send_set = TRUE; + + if (esp0 == FALSE) + p_pi->mkey_lmc = p_mgr->p_subn->opt.lmc; + else + { + if (p_mgr->p_subn->opt.lmc_esp0) + p_pi->mkey_lmc = p_mgr->p_subn->opt.lmc; + else + p_pi->mkey_lmc = 0; + } + if (memcmp( &p_pi->mkey_lmc, &p_old_pi->mkey_lmc, sizeof(p_pi->mkey_lmc) )) + send_set = TRUE; + + ib_port_info_set_timeout( p_pi, p_mgr->p_subn->opt.subnet_timeout ); + if (ib_port_info_get_timeout( p_pi ) != ib_port_info_get_timeout( p_old_pi )) + send_set = TRUE; + } + + /* + Several timeout mechanisms: + */ + p_remote_physp = osm_physp_get_remote( p_physp ); + if (port_num != 0 && p_remote_physp && + osm_physp_is_valid(p_remote_physp)) { + if (osm_node_get_type(osm_physp_get_node_ptr(p_physp)) == + IB_NODE_TYPE_ROUTER) + { + ib_port_info_set_hoq_lifetime( + p_pi, p_mgr->p_subn->opt.leaf_head_of_queue_lifetime); + } + else if (osm_node_get_type(osm_physp_get_node_ptr(p_physp)) == + IB_NODE_TYPE_SWITCH) + { + /* Is remote end CA or router (a leaf port) ? */ + if (osm_node_get_type(osm_physp_get_node_ptr(p_remote_physp)) != + IB_NODE_TYPE_SWITCH) + { + ib_port_info_set_hoq_lifetime( + p_pi, p_mgr->p_subn->opt.leaf_head_of_queue_lifetime); + ib_port_info_set_vl_stall_count( + p_pi, p_mgr->p_subn->opt.leaf_vl_stall_count); + } + else + { + ib_port_info_set_hoq_lifetime( + p_pi, p_mgr->p_subn->opt.head_of_queue_lifetime); + ib_port_info_set_vl_stall_count( + p_pi, p_mgr->p_subn->opt.vl_stall_count); + } + } + if ( ib_port_info_get_hoq_lifetime(p_pi) != + ib_port_info_get_hoq_lifetime(p_old_pi) || + ib_port_info_get_vl_stall_count(p_pi) != + ib_port_info_get_vl_stall_count(p_old_pi) ) + send_set = TRUE; + } + + ib_port_info_set_phy_and_overrun_err_thd( + p_pi, + p_mgr->p_subn->opt.local_phy_errors_threshold, + p_mgr->p_subn->opt.overrun_errors_threshold); + if (memcmp( &p_pi->error_threshold, &p_old_pi->error_threshold, + sizeof(p_pi->error_threshold) )) + send_set = TRUE; + + /* + Set the easy common parameters for all port types, + then determine the neighbor MTU. + */ + p_pi->link_width_enabled = p_old_pi->link_width_supported; + if (memcmp( &p_pi->link_width_enabled, &p_old_pi->link_width_enabled, + sizeof(p_pi->link_width_enabled) )) + send_set = TRUE; + + if ( p_mgr->p_subn->opt.force_link_speed ) + ib_port_info_set_link_speed_enabled( p_pi, IB_LINK_SPEED_ACTIVE_2_5 ); + else if (ib_port_info_get_link_speed_enabled( p_old_pi ) != ib_port_info_get_link_speed_sup( p_pi )) + ib_port_info_set_link_speed_enabled( p_pi, IB_PORT_LINK_SPEED_ENABLED_MASK ); + else + ib_port_info_set_link_speed_enabled( p_pi, ib_port_info_get_link_speed_enabled( p_old_pi )); + if (memcmp( &p_pi->link_speed, &p_old_pi->link_speed, + sizeof(p_pi->link_speed) )) + send_set = TRUE; + + /* calc new op_vls and mtu */ + op_vls = + osm_physp_calc_link_op_vls( p_mgr->p_log, p_mgr->p_subn, p_physp ); + mtu = osm_physp_calc_link_mtu( p_mgr->p_log, p_physp ); + + ib_port_info_set_neighbor_mtu( p_pi, mtu ); + if ( ib_port_info_get_neighbor_mtu(p_pi) != + ib_port_info_get_neighbor_mtu(p_old_pi) ) + send_set = TRUE; + + ib_port_info_set_op_vls( p_pi, op_vls ); + if ( ib_port_info_get_op_vls(p_pi) != + ib_port_info_get_op_vls(p_old_pi) ) + send_set = TRUE; + + /* also the context can flag the need to check for errors. */ + context.pi_context.ignore_errors = FALSE; + } + else + { + /* + Since the only change we try to do is to modify the port + state we can ignore the errors that might be caused by a + race in setting the state and the actual state the port is + in. + */ + context.pi_context.ignore_errors = FALSE; + } + + ib_port_info_set_port_state( p_pi, port_state ); + if (port_state != IB_LINK_NO_CHANGE && + ib_port_info_get_port_state(p_pi) != + ib_port_info_get_port_state(p_old_pi) ) + { + send_set = TRUE; + if (port_state == IB_LINK_ACTIVE) + context.pi_context.active_transition = TRUE; + else + context.pi_context.active_transition = FALSE; + } + + context.pi_context.node_guid = osm_node_get_node_guid( p_node ); + context.pi_context.port_guid = osm_physp_get_port_guid( p_physp ); + context.pi_context.set_method = TRUE; + context.pi_context.update_master_sm_base_lid = FALSE; + context.pi_context.light_sweep = FALSE; + + /* We need to send the PortInfoSet request with the new sm_lid + in the following cases: + 1. There is a change in the values (send_set == TRUE) + 2. This is an hca port or a switch port zero and got_set_resp is FALSE + (in this case we sent a PortInfoSet in the osm_lid_mgr, but for some + reason we didn't get a response) - try and re-send. + 3. This is a switch port and: + a. first_time_master_sweep flag on the subnet is TRUE. This means the + SM just became master, and it then needs to send at PortInfoSet to + every port (and this is the first time we can send a PortInfoSet to + switch external ports). + b. got_set_resp on the physical port is FALSE. This means we haven't + seen this port before - need to send PortInfoSet to it. + */ + if (send_set || + (osm_node_get_type(p_node) != IB_NODE_TYPE_SWITCH && p_physp->got_set_resp == FALSE) || + (osm_node_get_type(p_node) == IB_NODE_TYPE_SWITCH && port_num == 0 && + p_physp->got_set_resp == FALSE) || + (osm_node_get_type(p_node) == IB_NODE_TYPE_SWITCH && port_num != 0 && + (p_mgr->p_subn->first_time_master_sweep == TRUE || p_physp->got_set_resp == FALSE))) + { + p_mgr->send_set_reqs = TRUE; + status = osm_req_set( p_mgr->p_req, + osm_physp_get_dr_path_ptr( p_physp ), + payload, + sizeof(payload), + IB_MAD_ATTR_PORT_INFO, + cl_hton32(port_num), + CL_DISP_MSGID_NONE, + &context ); + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static osm_signal_t +__osm_link_mgr_process_port( + IN osm_link_mgr_t* const p_mgr, + IN osm_port_t* const p_port, + IN const uint8_t link_state ) +{ + uint32_t i; + uint32_t num_physp; + osm_physp_t *p_physp; + uint8_t current_state; + osm_signal_t signal = OSM_SIGNAL_DONE; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_link_mgr_process_port ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_link_mgr_process_port: " + "Port 0x%" PRIx64 " going to %s\n", + cl_ntoh64( osm_port_get_guid( p_port ) ), + ib_get_port_state_str( link_state ) ); + } + + /* + Set the PortInfo for every Physical Port associated + with this Port. Start iterating with port 1, since the linkstate + is not applicable to the management port on switches. + */ + num_physp = osm_port_get_num_physp( p_port ); + for( i = 0; i < num_physp; i ++ ) + { + /* + Don't bother doing anything if this Physical Port is not valid. + or if the state of the port is already better then the + specified state. + */ + p_physp = osm_port_get_phys_ptr( p_port, (uint8_t)i ); + if( p_physp && osm_physp_is_valid( p_physp ) ) + { + current_state = osm_physp_get_port_state( p_physp ); + + if( current_state == IB_LINK_DOWN ) + continue; + + /* + Normally we only send state update if state is lower + then required state. However, we need to send update if + no state change required. + */ + if( (link_state == IB_LINK_NO_CHANGE) || + (current_state < link_state) ) + { + p_mgr->send_set_reqs = FALSE; + __osm_link_mgr_set_physp_pi( + p_mgr, + p_physp, + link_state ); + + if ( p_mgr->send_set_reqs == TRUE ) + signal = OSM_SIGNAL_DONE_PENDING; + } + else + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_link_mgr_process_port: " + "Physical port 0x%X already %s. Skipping\n", + osm_physp_get_port_num( p_physp ), + ib_get_port_state_str( current_state ) ); + } + } + } + } + + OSM_LOG_EXIT( p_mgr->p_log ); + return( signal ); +} + +/********************************************************************** + **********************************************************************/ +osm_signal_t +osm_link_mgr_process( + IN osm_link_mgr_t* const p_mgr, + IN const uint8_t link_state ) +{ + cl_qmap_t *p_port_guid_tbl; + osm_port_t *p_port; + osm_signal_t signal = OSM_SIGNAL_DONE; + + OSM_LOG_ENTER( p_mgr->p_log, osm_link_mgr_process ); + + p_port_guid_tbl = &p_mgr->p_subn->port_guid_tbl; + + CL_PLOCK_EXCL_ACQUIRE( p_mgr->p_lock ); + + for( p_port = (osm_port_t*)cl_qmap_head( p_port_guid_tbl ); + p_port != (osm_port_t*)cl_qmap_end( p_port_guid_tbl ); + p_port = (osm_port_t*)cl_qmap_next( &p_port->map_item ) ) + { + if( __osm_link_mgr_process_port( p_mgr, p_port, link_state ) == + OSM_SIGNAL_DONE_PENDING ) + signal = OSM_SIGNAL_DONE_PENDING; + } + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + OSM_LOG_EXIT( p_mgr->p_log ); + return( signal ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_matrix.c b/branches/Ndi/ulp/opensm/user/opensm/osm_matrix.c new file mode 100644 index 00000000..8fb8e128 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_matrix.c @@ -0,0 +1,156 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_lid_matrix_t. + * This file implements the LID Matrix object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_lid_matrix_destroy( + IN osm_lid_matrix_t* const p_lmx ) +{ + cl_vector_destroy( &p_lmx->lid_vec ); +} + +/********************************************************************** + Initializer function called by cl_vector +**********************************************************************/ +cl_status_t +__osm_lid_matrix_vec_init( + IN void* const p_elem, + IN void* context ) +{ + osm_lid_matrix_t* const p_lmx = (osm_lid_matrix_t*)context; + + memset( p_elem, OSM_NO_PATH, p_lmx->num_ports + 1); + return( CL_SUCCESS ); +} + +/********************************************************************** + Initializer function called by cl_vector +**********************************************************************/ +void +__osm_lid_matrix_vec_clear( + IN const size_t index, + IN void* const p_elem, + IN void* context ) +{ + osm_lid_matrix_t* const p_lmx = (osm_lid_matrix_t*)context; + + UNUSED_PARAM( index ); + memset( p_elem, OSM_NO_PATH, p_lmx->num_ports + 1); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lid_matrix_clear( + IN osm_lid_matrix_t* const p_lmx ) +{ + cl_vector_apply_func( &p_lmx->lid_vec, + __osm_lid_matrix_vec_clear, p_lmx ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_lid_matrix_init( + IN osm_lid_matrix_t* const p_lmx, + IN const uint8_t num_ports ) +{ + cl_vector_t *p_vec; + cl_status_t status; + + CL_ASSERT( p_lmx ); + CL_ASSERT( num_ports ); + + p_lmx->num_ports = num_ports; + + p_vec = &p_lmx->lid_vec; + /* + Initialize the vector for the number of ports plus an + extra entry to hold the "least-hops" count for that LID. + */ + status = cl_vector_init( p_vec, + 0, /* min_size, */ + 1, /* grow_size */ + sizeof(uint8_t)*(num_ports + 1), /* element size */ + __osm_lid_matrix_vec_init, /* init function */ + NULL, /* destory func */ + p_lmx /* context */ + ); + + return( status ); +} + +/********************************************************************** + **********************************************************************/ +cl_status_t +osm_lid_matrix_set( + IN osm_lid_matrix_t* const p_lmx, + IN const uint16_t lid_ho, + IN const uint8_t port_num, + IN const uint8_t val ) +{ + uint8_t *p_port_array; + cl_status_t status; + + CL_ASSERT( port_num < p_lmx->num_ports ); + status = cl_vector_set_min_size( &p_lmx->lid_vec, lid_ho + 1 ); + if( status == CL_SUCCESS ) + { + p_port_array = (uint8_t *)cl_vector_get_ptr( &p_lmx->lid_vec, lid_ho ); + p_port_array[port_num] = val; + if( p_port_array[p_lmx->num_ports] > val ) + p_port_array[p_lmx->num_ports] = val; + } + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_fwd_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_fwd_rcv.c new file mode 100644 index 00000000..54f9dba7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_fwd_rcv.c @@ -0,0 +1,181 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_mft_rcv_t. + * This object represents the Multicast Forwarding Table Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_mft_rcv_construct( + IN osm_mft_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mft_rcv_destroy( + IN osm_mft_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_mft_rcv_destroy ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mft_rcv_init( + IN osm_mft_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_mft_rcv_init ); + + osm_mft_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mft_rcv_process( + IN const osm_mft_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + ib_smp_t *p_smp; + uint32_t block_num; + uint8_t position; + osm_switch_t *p_sw; + osm_mft_context_t *p_mft_context; + uint16_t *p_block; + ib_net64_t node_guid; + ib_api_status_t status; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_mft_rcv_process ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_block = (uint16_t*)ib_smp_get_payload_ptr( p_smp ); + block_num = cl_ntoh32( p_smp->attr_mod ) & IB_MCAST_BLOCK_ID_MASK_HO; + position = (uint8_t)((cl_ntoh32( p_smp->attr_mod ) & + IB_MCAST_POSITION_MASK_HO) >> IB_MCAST_POSITION_SHIFT); + + /* + Acquire the switch object for this switch. + */ + p_mft_context = osm_madw_get_mft_context_ptr( p_madw ); + node_guid = p_mft_context->node_guid; + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_mft_rcv_process: " + "Setting MFT block %u, position %u" + "\n\t\t\t\tSwitch 0x%016" PRIx64 ", TID 0x%" PRIx64 "\n", + block_num, position, cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + } + + CL_PLOCK_EXCL_ACQUIRE( p_rcv->p_lock ); + p_sw = osm_get_switch_by_guid( p_rcv->p_subn, node_guid ); + + if( !p_sw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mft_rcv_process: ERR 0801: " + "MFT received for nonexistent node " + "0x%016" PRIx64 "\n", cl_ntoh64( node_guid ) ); + } + else + { + status = osm_switch_set_mft_block( p_sw, p_block, + (uint16_t)block_num, position ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mft_rcv_process: ERR 0802: " + "Setting MFT block failed (%s)" + "\n\t\t\t\tSwitch 0x%016" PRIx64 + ", block %u, position %u\n", + ib_get_err_str( status ), + cl_ntoh64( node_guid ), + block_num, position ); + } + } + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_fwd_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_fwd_rcv_ctrl.c new file mode 100644 index 00000000..0a492180 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_fwd_rcv_ctrl.c @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_mft_rcv_ctrl_t. + * This object represents the LFT Receive controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_mft_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_mft_rcv_process( ((osm_mft_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mft_rcv_ctrl_construct( + IN osm_mft_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_mft_rcv_ctrl_destroy( + IN osm_mft_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mft_rcv_ctrl_init( + IN osm_mft_rcv_ctrl_t* const p_ctrl, + IN osm_mft_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_mft_rcv_ctrl_init ); + + osm_mft_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_MFT, + __osm_mft_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_mft_rcv_ctrl_init: ERR 0901: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_mgr.c b/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_mgr.c new file mode 100644 index 00000000..b40b64dc --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_mgr.c @@ -0,0 +1,1726 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_mcast_mgr_t. + * This file implements the Multicast Manager object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.9 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LINE_LENGTH 256 + +/********************************************************************** + **********************************************************************/ +typedef struct _osm_mcast_work_obj +{ + cl_list_item_t list_item; + osm_port_t* p_port; +} osm_mcast_work_obj_t; + +/********************************************************************** + **********************************************************************/ +static osm_mcast_work_obj_t* +__osm_mcast_work_obj_new( + IN const osm_port_t* const p_port ) +{ + /* + TO DO - get these objects from a lockpool. + */ + osm_mcast_work_obj_t* p_obj; + + /* + clean allocated memory to avoid assertion when trying to insert to + qlist. + see cl_qlist_insert_tail(): CL_ASSERT(p_list_item->p_list != p_list) + */ + p_obj = malloc( sizeof( *p_obj ) ); + if( p_obj ) + { + memset( p_obj, 0, sizeof( *p_obj ) ); + p_obj->p_port = (osm_port_t*)p_port; + } + + return( p_obj ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_mcast_work_obj_delete( + IN osm_mcast_work_obj_t* p_wobj ) +{ + free( p_wobj ); +} + +/********************************************************************** + Recursively remove nodes from the tree +**********************************************************************/ +static void +__osm_mcast_mgr_purge_tree_node( + IN osm_mtree_node_t* p_mtn ) +{ + uint8_t i; + + for( i = 0; i < p_mtn->max_children; i++ ) + { + if( p_mtn->child_array[i] && + (p_mtn->child_array[i] != OSM_MTREE_LEAF) ) + __osm_mcast_mgr_purge_tree_node( p_mtn->child_array[i] ); + + p_mtn->child_array[i] = NULL; + + } + + free( p_mtn ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_mcast_mgr_purge_tree( + IN osm_mcast_mgr_t* const p_mgr, + IN osm_mgrp_t* const p_mgrp ) +{ + OSM_LOG_ENTER( p_mgr->p_log, __osm_mcast_mgr_purge_tree ); + + if( p_mgrp->p_root ) + __osm_mcast_mgr_purge_tree_node( p_mgrp->p_root ); + + p_mgrp->p_root = NULL; + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static float +osm_mcast_mgr_compute_avg_hops( + osm_mcast_mgr_t* const p_mgr, + const osm_mgrp_t* const p_mgrp, + const osm_switch_t* const p_sw ) +{ + float avg_hops = 0; + uint32_t hops = 0; + uint32_t num_ports = 0; + uint16_t base_lid_ho; + const osm_port_t* p_port; + const osm_mcm_port_t* p_mcm_port; + const cl_qmap_t* p_mcm_tbl; + const cl_qmap_t* p_port_tbl; + + OSM_LOG_ENTER( p_mgr->p_log, osm_mcast_mgr_compute_avg_hops ); + + p_mcm_tbl = &p_mgrp->mcm_port_tbl; + p_port_tbl = &p_mgr->p_subn->port_guid_tbl; + + /* + For each member of the multicast group, compute the + number of hops to its base LID. + */ + for( p_mcm_port = (osm_mcm_port_t*)cl_qmap_head( p_mcm_tbl ); + p_mcm_port != (osm_mcm_port_t*)cl_qmap_end( p_mcm_tbl ); + p_mcm_port = (osm_mcm_port_t*)cl_qmap_next(&p_mcm_port->map_item)) + { + /* + Acquire the port object for this port guid, then create + the new worker object to build the list. + */ + p_port = (osm_port_t*)cl_qmap_get( p_port_tbl, + ib_gid_get_guid( &p_mcm_port->port_gid ) ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_port_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_compute_avg_hops: ERR 0A18: " + "No port object for port 0x%016" PRIx64 "\n", + cl_ntoh64( ib_gid_get_guid( &p_mcm_port->port_gid ) ) ); + continue; + } + + base_lid_ho = cl_ntoh16( osm_port_get_base_lid( p_port ) ); + hops += osm_switch_get_least_hops( p_sw, base_lid_ho ); + num_ports++; + } + + /* + We should be here if there aren't any ports in the group. + */ + CL_ASSERT( num_ports ); + + if( num_ports != 0 ) + { + avg_hops = (float)(hops / num_ports); + } + + OSM_LOG_EXIT( p_mgr->p_log ); + return( avg_hops ); +} + +/********************************************************************** + Calculate the maximal "min hops" from the given switch to any + of the group HCAs + **********************************************************************/ +static float +osm_mcast_mgr_compute_max_hops( + osm_mcast_mgr_t* const p_mgr, + const osm_mgrp_t* const p_mgrp, + const osm_switch_t* const p_sw ) +{ + uint32_t max_hops = 0; + uint32_t hops = 0; + uint16_t base_lid_ho; + const osm_port_t* p_port; + const osm_mcm_port_t* p_mcm_port; + const cl_qmap_t* p_mcm_tbl; + const cl_qmap_t* p_port_tbl; + + OSM_LOG_ENTER( p_mgr->p_log, osm_mcast_mgr_compute_max_hops ); + + p_mcm_tbl = &p_mgrp->mcm_port_tbl; + p_port_tbl = &p_mgr->p_subn->port_guid_tbl; + + /* + For each member of the multicast group, compute the + number of hops to its base LID. + */ + for( p_mcm_port = (osm_mcm_port_t*)cl_qmap_head( p_mcm_tbl ); + p_mcm_port != (osm_mcm_port_t*)cl_qmap_end( p_mcm_tbl ); + p_mcm_port = (osm_mcm_port_t*)cl_qmap_next(&p_mcm_port->map_item)) + { + /* + Acquire the port object for this port guid, then create + the new worker object to build the list. + */ + p_port = (osm_port_t*)cl_qmap_get( + p_port_tbl, + ib_gid_get_guid( &p_mcm_port->port_gid ) ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_port_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_compute_max_hops: ERR 0A1A: " + "No port object for port 0x%016" PRIx64 "\n", + cl_ntoh64( ib_gid_get_guid( &p_mcm_port->port_gid ) ) ); + continue; + } + + base_lid_ho = cl_ntoh16( osm_port_get_base_lid( p_port ) ); + hops = osm_switch_get_least_hops( p_sw, base_lid_ho ); + if (hops > max_hops) max_hops = hops; + } + + if( max_hops == 0 ) + { + /* + We should be here if there aren't any ports in the group. + */ + max_hops = 10001; /* see later - we use it to realize no hops */ + } + + OSM_LOG_EXIT( p_mgr->p_log ); + return(float)(max_hops); +} + +/********************************************************************** + This function attempts to locate the optimal switch for the + center of the spanning tree. The current algorithm chooses + a switch with the lowest average hop count to the members + of the multicast group. +**********************************************************************/ +static osm_switch_t* +__osm_mcast_mgr_find_optimal_switch( + osm_mcast_mgr_t* const p_mgr, + const osm_mgrp_t* const p_mgrp ) +{ + cl_qmap_t* p_sw_tbl; + const osm_switch_t* p_sw; + const osm_switch_t* p_best_sw = NULL; + float hops = 0; + float best_hops = 10000; /* any big # will do */ + uint64_t sw_guid_ho; +#ifdef OSM_VENDOR_INTF_ANAFA + boolean_t use_avg_hops = TRUE; /* anafa2 - bug hca on switch */ /* use max hops for root */ +#else + boolean_t use_avg_hops = FALSE; /* use max hops for root */ +#endif + + OSM_LOG_ENTER( p_mgr->p_log, __osm_mcast_mgr_find_optimal_switch ); + + p_sw_tbl = &p_mgr->p_subn->sw_guid_tbl; + + CL_ASSERT( !osm_mgrp_is_empty( p_mgrp ) ); + + for( p_sw = (osm_switch_t*)cl_qmap_head( p_sw_tbl ); + p_sw != (osm_switch_t*)cl_qmap_end( p_sw_tbl ); + p_sw = (osm_switch_t*)cl_qmap_next( &p_sw->map_item ) ) + { + if( !osm_switch_supports_mcast( p_sw ) ) + continue; + + if (use_avg_hops) + hops = osm_mcast_mgr_compute_avg_hops( p_mgr, p_mgrp, p_sw ); + else + hops = osm_mcast_mgr_compute_max_hops( p_mgr, p_mgrp, p_sw ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + sw_guid_ho = cl_ntoh64( osm_node_get_node_guid( + osm_switch_get_node_ptr( p_sw ) ) ); + + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_mcast_mgr_find_optimal_switch: " + "Switch 0x%016" PRIx64 ", hops = %f\n", + sw_guid_ho, hops ); + } + + if( hops < best_hops ) + { + p_best_sw = p_sw; + best_hops = hops; + } + } + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + if( p_best_sw ) + { + sw_guid_ho = cl_ntoh64( osm_node_get_node_guid( + osm_switch_get_node_ptr( p_best_sw ) ) ); + + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_mcast_mgr_find_optimal_switch: " + "Best switch is 0x%" PRIx64 ", hops = %f\n", + sw_guid_ho, best_hops ); + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_mcast_mgr_find_optimal_switch: " + "No multicast capable switches detected\n" ); + } + } + + OSM_LOG_EXIT( p_mgr->p_log ); + return( (osm_switch_t*)p_best_sw ); +} + +/********************************************************************** + This function returns the existing or optimal root swtich for the tree. +**********************************************************************/ +static osm_switch_t* +__osm_mcast_mgr_find_root_switch( + osm_mcast_mgr_t* const p_mgr, + const osm_mgrp_t* const p_mgrp ) +{ + const osm_switch_t* p_sw = NULL; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_mcast_mgr_find_root_switch ); + + /* + We always look for the best multicast tree root switch. + Otherwise since we always start with a a single join + the root will be always on the first switch attached to it. + - Very bad ... + */ + p_sw = __osm_mcast_mgr_find_optimal_switch( p_mgr, p_mgrp ); + + OSM_LOG_EXIT( p_mgr->p_log ); + return( (osm_switch_t*)p_sw ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcast_mgr_construct( + IN osm_mcast_mgr_t* const p_mgr ) +{ + memset( p_mgr, 0, sizeof(*p_mgr) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcast_mgr_destroy( + IN osm_mcast_mgr_t* const p_mgr ) +{ + CL_ASSERT( p_mgr ); + + OSM_LOG_ENTER( p_mgr->p_log, osm_mcast_mgr_destroy ); + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mcast_mgr_init( + IN osm_mcast_mgr_t* const p_mgr, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_mcast_mgr_init ); + + CL_ASSERT( p_req ); + CL_ASSERT( p_subn ); + CL_ASSERT( p_lock ); + + osm_mcast_mgr_construct( p_mgr ); + + p_mgr->p_log = p_log; + p_mgr->p_subn = p_subn; + p_mgr->p_lock = p_lock; + p_mgr->p_req = p_req; + + OSM_LOG_EXIT( p_mgr->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static osm_signal_t +__osm_mcast_mgr_set_tbl( + IN osm_mcast_mgr_t* const p_mgr, + IN osm_switch_t* const p_sw ) +{ + osm_node_t* p_node; + osm_dr_path_t* p_path; + osm_madw_context_t mad_context; + ib_api_status_t status; + uint32_t block_id_ho = 0; + int16_t block_num = 0; + uint32_t position = 0; + uint32_t max_position; + osm_mcast_tbl_t* p_tbl; + ib_net16_t block[IB_MCAST_BLOCK_SIZE]; + osm_signal_t signal = OSM_SIGNAL_DONE; + + CL_ASSERT( p_mgr ); + + OSM_LOG_ENTER( p_mgr->p_log, __osm_mcast_mgr_set_tbl ); + + CL_ASSERT( p_sw ); + + p_node = osm_switch_get_node_ptr( p_sw ); + + CL_ASSERT( p_node ); + + p_path = osm_node_get_any_dr_path_ptr( p_node ); + + CL_ASSERT( p_path ); + + /* + Send multicast forwarding table blocks to the switch + as long as the switch indicates it has blocks needing + configuration. + */ + + mad_context.mft_context.node_guid = osm_node_get_node_guid( p_node ); + mad_context.mft_context.set_method = TRUE; + + p_tbl = osm_switch_get_mcast_tbl_ptr( p_sw ); + max_position = p_tbl->max_position; + + while( osm_mcast_tbl_get_block( p_tbl, block_num, + (uint8_t)position, block ) ) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_mcast_mgr_set_tbl: " + "Writing MFT block 0x%X\n", block_id_ho ); + } + + block_id_ho = block_num + (position << 28); + + status = osm_req_set( p_mgr->p_req, + p_path, + (void*)block, + sizeof(block), + IB_MAD_ATTR_MCAST_FWD_TBL, + cl_hton32( block_id_ho ), + CL_DISP_MSGID_NONE, + &mad_context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_set_tbl: ERR 0A02: " + "Sending multicast fwd. tbl. block failed (%s)\n", + ib_get_err_str( status ) ); + } + + signal = OSM_SIGNAL_DONE_PENDING; + + if( ++position > max_position ) + { + position = 0; + block_num++; + } + } + + OSM_LOG_EXIT( p_mgr->p_log ); + return( signal ); +} + +/********************************************************************** + This is part of the recursive function to compute the paths in the + spanning tree that eminate from this switch. On input, the p_list + contains the group members that must be routed from this switch. +**********************************************************************/ +static void +__osm_mcast_mgr_subdivide( + osm_mcast_mgr_t* const p_mgr, + osm_mgrp_t* const p_mgrp, + osm_switch_t* const p_sw, + cl_qlist_t* const p_list, + cl_qlist_t* const list_array, + uint8_t const array_size ) +{ + uint8_t port_num; + uint16_t mlid_ho; + uint16_t lid_ho; + boolean_t ignore_existing; + osm_mcast_work_obj_t* p_wobj; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_mcast_mgr_subdivide ); + + mlid_ho = cl_ntoh16( osm_mgrp_get_mlid( p_mgrp ) ); + + /* + For Multicast Groups, we want not to count on previous + configurations - since we can easily generate a storm + by loops. + */ + ignore_existing = TRUE; + + /* + Subdivide the set of ports into non-overlapping subsets + that will be routed to other switches. + */ + while( (p_wobj = (osm_mcast_work_obj_t*)cl_qlist_remove_head( p_list )) != + (osm_mcast_work_obj_t*)cl_qlist_end( p_list ) ) + { + lid_ho = cl_ntoh16( osm_port_get_base_lid( p_wobj->p_port ) ); + + port_num = osm_switch_recommend_mcast_path( + p_sw, lid_ho, mlid_ho, ignore_existing ); + + if( port_num == OSM_NO_PATH ) + { + /* + This typically occurs if the switch does not support + multicast and the multicast tree must branch at this + switch. + */ + uint64_t node_guid_ho = cl_ntoh64( osm_node_get_node_guid( + osm_switch_get_node_ptr( p_sw ) ) ); + + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_subdivide: ERR 0A03: " + "Error routing MLID 0x%X through switch 0x%" PRIx64 "\n" + "\t\t\t\tNo multicast paths from this switch for port " + "with LID 0x%X\n", + mlid_ho, node_guid_ho, lid_ho ); + + __osm_mcast_work_obj_delete( p_wobj ); + continue; + } + + if( port_num > array_size ) + { + uint64_t node_guid_ho = cl_ntoh64( osm_node_get_node_guid( + osm_switch_get_node_ptr( p_sw ) ) ); + + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_subdivide: ERR 0A04: " + "Error routing MLID 0x%X through switch 0x%" PRIx64 "\n" + "\t\t\t\tNo multicast paths from this switch to port " + "with LID 0x%X\n", + mlid_ho, node_guid_ho, lid_ho ); + + __osm_mcast_work_obj_delete( p_wobj ); + + /* This is means OpenSM has a bug. */ + CL_ASSERT( FALSE ); + continue; + } + + cl_qlist_insert_tail( &list_array[port_num], &p_wobj->list_item ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_mcast_mgr_purge_list( + osm_mcast_mgr_t* const p_mgr, + cl_qlist_t* const p_list ) +{ + osm_mcast_work_obj_t* p_wobj; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_mcast_mgr_purge_list ); + + while( (p_wobj = (osm_mcast_work_obj_t*)cl_qlist_remove_head( p_list ) ) + != (osm_mcast_work_obj_t*)cl_qlist_end( p_list ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_purge_list: ERR 0A06: " + "Unable to route for port 0x%" PRIx64 "\n", + osm_port_get_guid( p_wobj->p_port ) ); + __osm_mcast_work_obj_delete( p_wobj ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + This is the recursive function to compute the paths in the spanning + tree that emanate from this switch. On input, the p_list contains + the group members that must be routed from this switch. + + The function returns the newly created mtree node element. +**********************************************************************/ +static osm_mtree_node_t* +__osm_mcast_mgr_branch( + osm_mcast_mgr_t* const p_mgr, + osm_mgrp_t* const p_mgrp, + osm_switch_t* const p_sw, + cl_qlist_t* const p_list, + uint8_t depth, + uint8_t const upstream_port, + uint8_t* const p_max_depth ) +{ + uint8_t max_children; + osm_mtree_node_t* p_mtn = NULL; + cl_qlist_t* list_array = NULL; + uint8_t i; + ib_net64_t node_guid; + uint64_t node_guid_ho; + osm_mcast_work_obj_t* p_wobj; + cl_qlist_t* p_port_list; + size_t count; + uint16_t mlid_ho; + osm_mcast_tbl_t* p_tbl; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_mcast_mgr_branch ); + + CL_ASSERT( p_sw ); + CL_ASSERT( p_list ); + CL_ASSERT( p_max_depth ); + + node_guid = osm_node_get_node_guid( osm_switch_get_node_ptr( p_sw ) ); + node_guid_ho = cl_ntoh64( node_guid ); + mlid_ho = cl_ntoh16( osm_mgrp_get_mlid( p_mgrp ) ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_mcast_mgr_branch: " + "Routing MLID 0x%X through switch 0x%" PRIx64 "\n" + "\t\t\t\t%u nodes at depth %u\n", + mlid_ho, + node_guid_ho, + cl_qlist_count( p_list ), depth ); + } + + CL_ASSERT( cl_qlist_count( p_list ) > 0 ); + + depth++; + + if( depth > *p_max_depth ) + { + CL_ASSERT( depth == *p_max_depth + 1 ); + *p_max_depth = depth; + } + + if( osm_switch_supports_mcast( p_sw ) == FALSE ) + { + /* + This switch doesn't do multicast. Clean-up. + */ + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_branch: ERR 0A14: " + "Switch 0x%" PRIx64 " does not support multicast\n", + node_guid_ho ); + + /* + Deallocate all the work objects on this branch of the tree. + */ + __osm_mcast_mgr_purge_list( p_mgr, p_list ); + goto Exit; + } + + p_mtn = osm_mtree_node_new( p_sw ); + if( p_mtn == NULL ) + { + /* + We are unable to continue routing down this + leg of the tree. Clean-up. + */ + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_branch: ERR 0A15: " + "Insufficient memory to build multicast tree\n" ); + + /* + Deallocate all the work objects on this branch of the tree. + */ + __osm_mcast_mgr_purge_list( p_mgr, p_list ); + goto Exit; + } + + max_children = osm_mtree_node_get_max_children( p_mtn ); + + CL_ASSERT( max_children > 1 ); + + /* + Prepare an empty list for each port in the switch. + TO DO - this list array could probably be moved + inside the switch element to save on malloc thrashing. + */ + list_array = malloc( sizeof(cl_qlist_t) * max_children ); + if( list_array == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_branch: ERR 0A16: " + "Unable to allocate list array\n" ); + __osm_mcast_mgr_purge_list( p_mgr, p_list ); + goto Exit; + } + + memset( list_array, 0, sizeof(cl_qlist_t) * max_children ); + + for( i = 0; i < max_children; i++ ) + cl_qlist_init( &list_array[i] ); + + __osm_mcast_mgr_subdivide( p_mgr, p_mgrp, p_sw, p_list, list_array, + max_children ); + + p_tbl = osm_switch_get_mcast_tbl_ptr( p_sw ); + + /* + Add the upstream port to the forwarding table unless + we're at the root of the spanning tree. + */ + if( depth > 1 ) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_mcast_mgr_branch: " + "Adding upstream port 0x%X\n", upstream_port ); + } + + CL_ASSERT( upstream_port ); + osm_mcast_tbl_set( p_tbl, mlid_ho, upstream_port ); + } + + /* + For each port that was allocated some routes, + recurse into this function to continue building the tree + if the node on the other end of that port is another switch. + Otherwise, the node is an endpoint, and we've found a leaf + of the tree. Mark leaves with our special pointer value. + */ + + for( i = 0; i < max_children; i++ ) + { + const osm_physp_t *p_physp; + const osm_physp_t *p_remote_physp; + const osm_node_t *p_node; + const osm_node_t *p_remote_node; + + p_port_list = &list_array[i]; + + count = cl_qlist_count( p_port_list ); + + /* + There should be no children routed through the upstream port! + */ + CL_ASSERT( ( upstream_port == 0 ) || ( i != upstream_port) || + ( (i == upstream_port) && (count == 0)) ); + + if( count == 0) + continue; /* No routes down this port. */ + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_mcast_mgr_branch: " + "Routing %zu destination(s) via switch port 0x%X\n", + count, i ); + } + + /* + This port routes frames for this mcast group. Therefore, + set the appropriate bit in the multicast forwarding + table for this switch. + */ + osm_mcast_tbl_set( p_tbl, mlid_ho, i ); + if (i == 0) + /* This means we are adding the switch to the MC group. + We do not need to continue looking at the remote port, just + needed to add the port to the table */ + continue; + + p_node = osm_switch_get_node_ptr( p_sw ); + p_remote_node = osm_node_get_remote_node( p_node, i, NULL ); + + if( osm_node_get_type( p_remote_node ) == IB_NODE_TYPE_SWITCH ) + { + /* + Acquire a pointer to the remote switch then recurse. + */ + CL_ASSERT( p_remote_node->sw ); + + p_physp = osm_node_get_physp_ptr( p_node, i ); + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + p_remote_physp = osm_physp_get_remote( p_physp ); + CL_ASSERT( p_remote_physp ); + CL_ASSERT( osm_physp_is_valid( p_remote_physp ) ); + + p_mtn->child_array[i] = __osm_mcast_mgr_branch( + p_mgr, p_mgrp, p_remote_node->sw, + p_port_list, depth, + osm_physp_get_port_num( p_remote_physp), + p_max_depth ); + } + else + { + /* + The neighbor node is not a switch, so this + must be a leaf. + */ + CL_ASSERT( count == 1 ); + + p_mtn->child_array[i] = OSM_MTREE_LEAF; + p_wobj = (osm_mcast_work_obj_t*)cl_qlist_remove_head( + p_port_list ); + + CL_ASSERT( cl_is_qlist_empty( p_port_list ) ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_mcast_mgr_branch: " + "Found leaf for port 0x%016" PRIx64 "\n" + "\t\t\t\ton switch port 0x%X\n", + cl_ntoh64( osm_port_get_guid( p_wobj->p_port ) ), i ); + } + + __osm_mcast_work_obj_delete( p_wobj ); + } + } + + free( list_array ); + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return( p_mtn ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_mcast_mgr_build_spanning_tree( + osm_mcast_mgr_t* const p_mgr, + osm_mgrp_t* const p_mgrp ) +{ + const cl_qmap_t* p_mcm_tbl; + const cl_qmap_t* p_port_tbl; + const osm_port_t* p_port; + const osm_mcm_port_t* p_mcm_port; + uint32_t num_ports; + cl_qlist_t port_list; + osm_switch_t* p_sw; + osm_mcast_work_obj_t* p_wobj; + ib_api_status_t status = IB_SUCCESS; + uint8_t max_depth = 0; + uint32_t count; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_mcast_mgr_build_spanning_tree ); + + cl_qlist_init( &port_list ); + + /* + TO DO - for now, just blow away the old tree. + In the future we'll need to construct the tree based + on multicast forwarding table information if the user wants to + preserve existing multicast routes. + */ + __osm_mcast_mgr_purge_tree( p_mgr, p_mgrp ); + + p_mcm_tbl = &p_mgrp->mcm_port_tbl; + p_port_tbl = &p_mgr->p_subn->port_guid_tbl; + num_ports = cl_qmap_count( p_mcm_tbl ); + if( num_ports == 0 ) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_mcast_mgr_build_spanning_tree: " + "MLID 0x%X has no members--nothing to do\n", + cl_ntoh16( osm_mgrp_get_mlid( p_mgrp ) ) ); + } + goto Exit; + } + + /* + This function builds the single spanning tree recursively. + At each stage, the ports to be reached are divided into + non-overlapping subsets of member ports that can be reached through + a given switch port. Construction then moves down each + branch, and the process starts again with each branch computing + for its own subset of the member ports. + + The maximum recursion depth is at worst the maximum hop count in the + subnet, which is spec limited to 64. + */ + + /* + Locate the switch around which to create the spanning + tree for this multicast group. + */ + p_sw = __osm_mcast_mgr_find_root_switch( p_mgr, p_mgrp ); + if( p_sw == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_build_spanning_tree: ERR 0A08: " + "Unable to locate a suitable switch for group 0x%X\n", + cl_ntoh16( osm_mgrp_get_mlid( p_mgrp ) )); + status = IB_ERROR; + goto Exit; + } + + /* + Build the first "subset" containing all member ports. + */ + for( p_mcm_port = (osm_mcm_port_t*)cl_qmap_head( p_mcm_tbl ); + p_mcm_port != (osm_mcm_port_t*)cl_qmap_end( p_mcm_tbl ); + p_mcm_port = (osm_mcm_port_t*)cl_qmap_next(&p_mcm_port->map_item)) + { + /* + Acquire the port object for this port guid, then create + the new worker object to build the list. + */ + p_port = (osm_port_t*)cl_qmap_get( p_port_tbl, + ib_gid_get_guid( &p_mcm_port->port_gid ) ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_port_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_build_spanning_tree: ERR 0A09: " + "No port object for port 0x%016" PRIx64 "\n", + cl_ntoh64( ib_gid_get_guid( &p_mcm_port->port_gid ) ) ); + continue; + } + + p_wobj = __osm_mcast_work_obj_new( p_port ); + if( p_wobj == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_mcast_mgr_build_spanning_tree: ERR 0A10: " + "Insufficient memory to route port 0x%016" PRIx64 "\n", + cl_ntoh64( osm_port_get_guid( p_port ) ) ); + continue; + } + + cl_qlist_insert_tail( &port_list, &p_wobj->list_item ); + } + + count = cl_qlist_count( &port_list ); + p_mgrp->p_root = __osm_mcast_mgr_branch( p_mgr, p_mgrp, p_sw, + &port_list, 0, 0, &max_depth ); + + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_mcast_mgr_build_spanning_tree: " + "Configured MLID 0x%X for %u ports, max tree depth = %u\n", + cl_ntoh16( osm_mgrp_get_mlid( p_mgrp ) ), + count, max_depth ); + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return( status ); +} + +#if 0 +/* unused */ +/********************************************************************** + **********************************************************************/ +void +osm_mcast_mgr_set_table( + IN osm_mcast_mgr_t* const p_mgr, + IN const osm_mgrp_t* const p_mgrp, + IN const osm_mtree_node_t* const p_mtn ) +{ + uint8_t i; + uint8_t max_children; + osm_mtree_node_t* p_child_mtn; + uint16_t mlid_ho; + osm_mcast_tbl_t* p_tbl; + osm_switch_t* p_sw; + + OSM_LOG_ENTER( p_mgr->p_log, osm_mcast_mgr_set_table ); + + mlid_ho = cl_ntoh16( osm_mgrp_get_mlid( p_mgrp ) ); + p_sw = osm_mtree_node_get_switch_ptr( p_mtn ); + + CL_ASSERT( p_sw ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "osm_mcast_mgr_set_table: " + "Configuring MLID 0x%X on switch 0x%" PRIx64 "\n", + mlid_ho, osm_node_get_node_guid( + osm_switch_get_node_ptr( p_sw ) ) ); + } + + /* + For every child of this tree node, set the corresponding + bit in the switch's mcast table. + */ + p_tbl = osm_switch_get_mcast_tbl_ptr( p_sw ); + max_children = osm_mtree_node_get_max_children( p_mtn ); + + CL_ASSERT( max_children <= osm_switch_get_num_ports( p_sw ) ); + + osm_mcast_tbl_clear_mlid( p_tbl, mlid_ho ); + + for( i = 0; i < max_children; i++ ) + { + p_child_mtn = osm_mtree_node_get_child( p_mtn, i ); + if( p_child_mtn == NULL ) + continue; + + osm_mcast_tbl_set( p_tbl, mlid_ho, i ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} +#endif + +/********************************************************************** + **********************************************************************/ +static void +__osm_mcast_mgr_clear( + IN osm_mcast_mgr_t* const p_mgr, + IN osm_mgrp_t* const p_mgrp ) +{ + osm_switch_t* p_sw; + cl_qmap_t* p_sw_tbl; + osm_mcast_tbl_t* p_mcast_tbl; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_mcast_mgr_clear ); + + /* + Walk the switches and clear the routing entries for + this MLID. + */ + p_sw_tbl = &p_mgr->p_subn->sw_guid_tbl; + p_sw = (osm_switch_t*)cl_qmap_head( p_sw_tbl ); + while( p_sw != (osm_switch_t*)cl_qmap_end( p_sw_tbl ) ) + { + p_mcast_tbl = osm_switch_get_mcast_tbl_ptr( p_sw ); + osm_mcast_tbl_clear_mlid( p_mcast_tbl, cl_ntoh16(p_mgrp->mlid) ); + p_sw = (osm_switch_t*)cl_qmap_next( &p_sw->map_item ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +#if 0 +/* TO DO - make this real -- at least update spanning tree */ +/********************************************************************** + Lock must be held on entry. +**********************************************************************/ +ib_api_status_t +osm_mcast_mgr_process_single( + IN osm_mcast_mgr_t* const p_mgr, + IN ib_net16_t const mlid, + IN ib_net64_t const port_guid, + IN uint8_t const join_state ) +{ + uint8_t port_num; + uint16_t mlid_ho; + osm_switch_t* p_sw; + ib_net64_t sw_guid; + osm_port_t* p_port; + osm_physp_t* p_physp; + osm_physp_t* p_remote_physp; + osm_node_t* p_remote_node; + cl_qmap_t* p_port_tbl; + osm_mcast_tbl_t* p_mcast_tbl; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_mgr->p_log, osm_mcast_mgr_process_single ); + + CL_ASSERT( mlid ); + CL_ASSERT( port_guid ); + + p_port_tbl = &p_mgr->p_subn->port_guid_tbl; + mlid_ho = cl_ntoh16( mlid ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_mcast_mgr_process_single: " + "Attempting to add port 0x%" PRIx64 " to MLID 0x%X, " + "\n\t\t\t\tjoin state = 0x%X\n", + cl_ntoh64( port_guid ), mlid_ho, join_state ); + } + + /* + Acquire the Port object. + */ + p_port = (osm_port_t*)cl_qmap_get( p_port_tbl, port_guid ); + if( p_port == (osm_port_t*)cl_qmap_end( p_port_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_single: ERR 0A01: " + "Unable to acquire port object for 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + status = IB_ERROR; + goto Exit; + } + + p_physp = osm_port_get_default_phys_ptr( p_port ); + if( p_physp == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_single: ERR 0A05: " + "Unable to acquire phsyical port object for 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + status = IB_ERROR; + goto Exit; + } + + if( !osm_physp_is_valid( p_physp ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_single: ERR 0A07: " + "Unable to acquire valid physical port object " + "for 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + status = IB_ERROR; + goto Exit; + } + + p_remote_physp = osm_physp_get_remote( p_physp ); + if( p_remote_physp == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_single: ERR 0A11: " + "Unable to acquire remote phsyical port object " + "for 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + status = IB_ERROR; + goto Exit; + } + + if( !osm_physp_is_valid( p_remote_physp ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_single: ERR 0A21: " + "Unable to acquire valid remote physical port object " + "for 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + status = IB_ERROR; + goto Exit; + } + + p_remote_node = osm_physp_get_node_ptr( p_remote_physp ); + + CL_ASSERT( p_remote_node ); + + sw_guid = osm_node_get_node_guid( p_remote_node ); + + if( osm_node_get_type( p_remote_node ) != IB_NODE_TYPE_SWITCH ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_single: ERR 0A22: " + "Remote node not a switch node 0x%" PRIx64 "\n", + cl_ntoh64( sw_guid ) ); + status = IB_ERROR; + goto Exit; + } + + p_sw = p_remote_node->sw; + if( !p_sw ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_single: ERR 0A12: " + "No switch object 0x%" PRIx64 "\n", + cl_ntoh64( sw_guid ) ); + status = IB_ERROR; + goto Exit; + } + + if( osm_switch_is_in_mcast_tree( p_sw, mlid_ho ) ) + { + /* + We're in luck. The switch attached to this port + is already in the multicast group, so we can just + add the specified port as a new leaf of the tree. + */ + if( join_state & (IB_JOIN_STATE_FULL | IB_JOIN_STATE_NON ) ) + { + /* + This node wants to receive multicast frames. + Get the switch port number to which the new member port + is attached, then configure this single mcast table. + */ + port_num = osm_physp_get_port_num( p_remote_physp ); + CL_ASSERT( port_num ); + + p_mcast_tbl = osm_switch_get_mcast_tbl_ptr( p_sw ); + osm_mcast_tbl_set( p_mcast_tbl, mlid_ho, port_num ); + } + else + { + if( join_state & IB_JOIN_STATE_SEND_ONLY ) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_mcast_mgr_process_single: " + "Success. Nothing to do for send" + "only member\n" ); + } + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_single: ERR 0A13: " + "Unknown join state 0x%X\n", join_state ); + status = IB_ERROR; + goto Exit; + } + } + } + else + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_mcast_mgr_process_single: " + "Unable to add port\n" ); + } + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return( status ); +} +#endif + +/********************************************************************** + lock must already be held on entry +**********************************************************************/ +static ib_api_status_t +osm_mcast_mgr_process_tree( + IN osm_mcast_mgr_t* const p_mgr, + IN osm_mgrp_t* const p_mgrp, + IN osm_mcast_req_type_t req_type, + ib_net64_t port_guid ) +{ + ib_api_status_t status = IB_SUCCESS; + ib_net16_t mlid; + boolean_t ui_mcast_fdb_assign_func_defined; + + OSM_LOG_ENTER( p_mgr->p_log, osm_mcast_mgr_process_tree ); + + mlid = osm_mgrp_get_mlid( p_mgrp ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_mcast_mgr_process_tree: " + "Processing multicast group 0x%X\n", cl_ntoh16( mlid )); + } + + /* + If there are no switches in the subnet, then we have nothing to do. + */ + if( cl_qmap_count( &p_mgr->p_subn->sw_guid_tbl ) == 0 ) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_mcast_mgr_process_tree: " + "No switches in subnet. Nothing to do\n" ); + } + goto Exit; + } + + if (p_mgr->p_subn->opt.pfn_ui_mcast_fdb_assign) + ui_mcast_fdb_assign_func_defined = TRUE; + else + ui_mcast_fdb_assign_func_defined = FALSE; + + /* + Clear the multicast tables to start clean, then build + the spanning tree which sets the mcast table bits for each + port in the group. + We will clean the multicast tables if a ui_mcast function isn't + defined, or if such function is defined, but we got here + through a MC_CREATE request - this means we are creating a new + multicast group - clean all old data. + */ + if ( ui_mcast_fdb_assign_func_defined == FALSE || + req_type == OSM_MCAST_REQ_TYPE_CREATE ) + __osm_mcast_mgr_clear( p_mgr, p_mgrp ); + + /* If a UI function is defined, then we will call it here. + If not - the use the regular build spanning tree function */ + if ( ui_mcast_fdb_assign_func_defined == FALSE ) + { + status = __osm_mcast_mgr_build_spanning_tree( p_mgr, p_mgrp ); + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_tree: ERR 0A17: " + "Unable to create spanning tree (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + else + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_mcast_mgr_process_tree: " + "Invoking UI function pfn_ui_mcast_fdb_assign\n"); + } + + p_mgr->p_subn->opt.pfn_ui_mcast_fdb_assign( + p_mgr->p_subn->opt.ui_mcast_fdb_assign_ctx, + mlid, req_type, port_guid ); + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +mcast_mgr_dump_sw_routes( + IN const osm_mcast_mgr_t* const p_mgr, + IN const osm_switch_t* const p_sw, + IN FILE *file ) +{ + osm_mcast_tbl_t* p_tbl; + int16_t mlid_ho = 0; + int16_t mlid_start_ho; + uint8_t position = 0; + int16_t block_num = 0; + boolean_t first_mlid; + boolean_t first_port; + const osm_node_t* p_node; + uint16_t i, j; + uint16_t mask_entry; + char sw_hdr[256]; + char mlid_hdr[32]; + + OSM_LOG_ENTER( p_mgr->p_log, mcast_mgr_dump_sw_routes ); + + if( !osm_log_is_active( p_mgr->p_log, OSM_LOG_ROUTING ) ) + goto Exit; + + p_node = osm_switch_get_node_ptr( p_sw ); + + p_tbl = osm_switch_get_mcast_tbl_ptr( p_sw ); + + sprintf( sw_hdr, "\nSwitch 0x%016" PRIx64 "\n" + "LID : Out Port(s)\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + first_mlid = TRUE; + while ( block_num <= p_tbl->max_block_in_use ) + { + mlid_start_ho = (uint16_t)(block_num * IB_MCAST_BLOCK_SIZE); + for (i = 0 ; i < IB_MCAST_BLOCK_SIZE ; i++) + { + mlid_ho = mlid_start_ho + i; + position = 0; + first_port = TRUE; + sprintf( mlid_hdr, "0x%04X :", mlid_ho + IB_LID_MCAST_START_HO ); + while ( position <= p_tbl->max_position ) + { + mask_entry = cl_ntoh16((*p_tbl->p_mask_tbl)[mlid_ho][position]); + if (mask_entry == 0) + { + position++; + continue; + } + for (j = 0 ; j < 16 ; j++) + { + if ( (1 << j) & mask_entry ) + { + if (first_mlid) + { + fprintf( file,"%s", sw_hdr ); + first_mlid = FALSE; + } + if (first_port) + { + fprintf( file,"%s", mlid_hdr ); + first_port = FALSE; + } + fprintf( file, " 0x%03X ", j+(position*16) ); + } + } + position++; + } + if (first_port == FALSE) + { + fprintf( file, "\n" ); + } + } + block_num++; + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +struct mcast_mgr_dump_context { + osm_mcast_mgr_t *p_mgr; + FILE *file; +}; + +static void +mcast_mgr_dump_table(cl_map_item_t *p_map_item, void *context) +{ + osm_switch_t *p_sw = (osm_switch_t *)p_map_item; + struct mcast_mgr_dump_context *cxt = context; + + mcast_mgr_dump_sw_routes(cxt->p_mgr, p_sw, cxt->file); +} + +static void +mcast_mgr_dump_mcast_routes(osm_mcast_mgr_t *p_mgr) +{ + char file_name[1024]; + struct mcast_mgr_dump_context dump_context; + FILE *file; + + if (!osm_log_is_active(p_mgr->p_log, OSM_LOG_ROUTING)) + return; + + snprintf(file_name, sizeof(file_name), "%s/%s", + p_mgr->p_subn->opt.dump_files_dir, "osm.mcfdbs"); + + file = fopen(file_name, "w"); + if (!file) { + osm_log(p_mgr->p_log, OSM_LOG_ERROR, + "mcast_dump_mcast_routes: ERR 0A18: " + "cannot create mcfdb file \'%s\': %s\n", + file_name, strerror(errno)); + return; + } + + dump_context.p_mgr = p_mgr; + dump_context.file = file; + + cl_qmap_apply_func(&p_mgr->p_subn->sw_guid_tbl, + mcast_mgr_dump_table, &dump_context); + + fclose(file); +} + +/********************************************************************** + Process the entire group. + + NOTE : The lock should be held externally! + **********************************************************************/ +static osm_signal_t +osm_mcast_mgr_process_mgrp( + IN osm_mcast_mgr_t* const p_mgr, + IN osm_mgrp_t* const p_mgrp, + IN osm_mcast_req_type_t req_type, + IN ib_net64_t port_guid ) +{ + osm_signal_t signal = OSM_SIGNAL_DONE; + ib_api_status_t status; + osm_switch_t* p_sw; + cl_qmap_t* p_sw_tbl; + boolean_t pending_transactions = FALSE; + + OSM_LOG_ENTER( p_mgr->p_log, osm_mcast_mgr_process_mgrp ); + + p_sw_tbl = &p_mgr->p_subn->sw_guid_tbl; + + status = osm_mcast_mgr_process_tree( p_mgr, p_mgrp, req_type, port_guid ); + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process_mgrp: ERR 0A19: " + "Unable to create spanning tree (%s)\n", + ib_get_err_str( status ) ); + + goto Exit; + } + + /* + Walk the switches and download the tables for each. + */ + p_sw = (osm_switch_t*)cl_qmap_head( p_sw_tbl ); + while( p_sw != (osm_switch_t*)cl_qmap_end( p_sw_tbl ) ) + { + signal = __osm_mcast_mgr_set_tbl( p_mgr, p_sw ); + if( signal == OSM_SIGNAL_DONE_PENDING ) + pending_transactions = TRUE; + + p_sw = (osm_switch_t*)cl_qmap_next( &p_sw->map_item ); + } + + mcast_mgr_dump_mcast_routes( p_mgr ); + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + + if( pending_transactions == TRUE ) + return( OSM_SIGNAL_DONE_PENDING ); + else + return( OSM_SIGNAL_DONE ); +} + +/********************************************************************** + **********************************************************************/ +osm_signal_t +osm_mcast_mgr_process( + IN osm_mcast_mgr_t* const p_mgr ) +{ + osm_signal_t signal; + osm_switch_t* p_sw; + cl_qmap_t* p_sw_tbl; + cl_qmap_t* p_mcast_tbl; + osm_mgrp_t* p_mgrp; + ib_api_status_t status; + boolean_t pending_transactions = FALSE; + + OSM_LOG_ENTER( p_mgr->p_log, osm_mcast_mgr_process ); + + p_sw_tbl = &p_mgr->p_subn->sw_guid_tbl; + + p_mcast_tbl = &p_mgr->p_subn->mgrp_mlid_tbl; + /* + While holding the lock, iterate over all the established + multicast groups, servicing each in turn. + + Then, download the multicast tables to the switches. + */ + CL_PLOCK_EXCL_ACQUIRE( p_mgr->p_lock ); + + p_mgrp = (osm_mgrp_t*)cl_qmap_head( p_mcast_tbl ); + while( p_mgrp != (osm_mgrp_t*)cl_qmap_end( p_mcast_tbl ) ) + { + /* We reached here due to some change that caused a heavy sweep + of the subnet. Not due to a specific multicast request. + So the request type is subnet_change and the port guid is 0. */ + status = osm_mcast_mgr_process_tree( p_mgr, p_mgrp, + OSM_MCAST_REQ_TYPE_SUBNET_CHANGE, 0); + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_mcast_mgr_process: ERR 0A20: " + "Unable to create spanning tree (%s)\n", + ib_get_err_str( status ) ); + } + + p_mgrp = (osm_mgrp_t*)cl_qmap_next( &p_mgrp->map_item ); + } + + /* + Walk the switches and download the tables for each. + */ + p_sw = (osm_switch_t*)cl_qmap_head( p_sw_tbl ); + while( p_sw != (osm_switch_t*)cl_qmap_end( p_sw_tbl ) ) + { + signal = __osm_mcast_mgr_set_tbl( p_mgr, p_sw ); + if( signal == OSM_SIGNAL_DONE_PENDING ) + pending_transactions = TRUE; + + p_sw = (osm_switch_t*)cl_qmap_next( &p_sw->map_item ); + } + + mcast_mgr_dump_mcast_routes( p_mgr ); + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + OSM_LOG_EXIT( p_mgr->p_log ); + + if( pending_transactions == TRUE ) + return( OSM_SIGNAL_DONE_PENDING ); + else + return( OSM_SIGNAL_DONE ); +} + +/********************************************************************** + **********************************************************************/ +static +osm_mgrp_t * +__get_mgrp_by_mlid( + IN osm_mcast_mgr_t* const p_mgr, + IN ib_net16_t const mlid) +{ + cl_map_item_t *map_item; + + map_item = cl_qmap_get(&p_mgr->p_subn->mgrp_mlid_tbl, mlid); + if(map_item == cl_qmap_end(&p_mgr->p_subn->mgrp_mlid_tbl)) + { + return NULL; + } + return (osm_mgrp_t *)map_item; +} + +/********************************************************************** + This is the function that is invoked during idle time to handle the + process request. Context1 is simply the osm_mcast_mgr_t*, Context2 + hold the mlid, port guid and action (join/leave/delete) required. + **********************************************************************/ +osm_signal_t +osm_mcast_mgr_process_mgrp_cb( + IN void* const Context1, + IN void* const Context2 ) +{ + osm_mcast_mgr_t* p_mgr = (osm_mcast_mgr_t*)Context1; + osm_mgrp_t* p_mgrp; + ib_net16_t mlid; + osm_signal_t signal = OSM_SIGNAL_DONE; + osm_mcast_mgr_ctxt_t* p_ctxt = (osm_mcast_mgr_ctxt_t*)Context2; + osm_mcast_req_type_t req_type = p_ctxt->req_type; + ib_net64_t port_guid = p_ctxt->port_guid; + + OSM_LOG_ENTER( p_mgr->p_log, osm_mcast_mgr_process_mgrp_cb ); + + /* nice copy no warning on size diff */ + memcpy(&mlid, &p_ctxt->mlid, sizeof(mlid)); + + /* we can destroy the context now */ + free(p_ctxt); + + /* we need a lock to make sure the p_mgrp is not change other ways */ + CL_PLOCK_EXCL_ACQUIRE( p_mgr->p_lock ); + p_mgrp = __get_mgrp_by_mlid( p_mgr, mlid); + + /* since we delayed the execution we prefer to pass the + mlid as the mgrp identifier and then find it or abort */ + + if (p_mgrp) + { + + /* if there was no change from the last time we processed the group + we can skip doing anything + */ + if ( p_mgrp->last_change_id == p_mgrp->last_tree_id) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_mcast_mgr_process_mgrp_cb: " + "Skip processing mgrp with lid:0x%X change id:%u\n", + cl_ntoh16(mlid), p_mgrp->last_change_id ); + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_mcast_mgr_process_mgrp_cb: " + "Processing mgrp with lid:0x%X change id:%u\n", + cl_ntoh16(mlid), p_mgrp->last_change_id ); + + signal = + osm_mcast_mgr_process_mgrp( p_mgr, p_mgrp, req_type, port_guid ); + p_mgrp->last_tree_id = p_mgrp->last_change_id; + } + + /* Remove MGRP only if osm_mcm_port_t count is 0 and + * Not a well known group + */ + if((0x0 == cl_qmap_count(&p_mgrp->mcm_port_tbl)) && + (p_mgrp->well_known == FALSE)) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_mcast_mgr_process_mgrp_cb: " + "Destroying mgrp with lid:0x%X\n", + cl_ntoh16(mlid) ); + + /* Send a Report to any InformInfo registered for + Trap 67 : MCGroup delete */ + osm_mgrp_send_delete_notice( p_mgr->p_subn, p_mgr->p_log, p_mgrp ); + + cl_qmap_remove_item(&p_mgr->p_subn->mgrp_mlid_tbl, + (cl_map_item_t *)p_mgrp ); + + osm_mgrp_destroy(p_mgrp); + } + } + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + OSM_LOG_EXIT( p_mgr->p_log ); + return signal; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_tbl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_tbl.c new file mode 100644 index 00000000..bb175802 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_mcast_tbl.c @@ -0,0 +1,302 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_mcast_tbl_t. + * This object represents an multicast forwarding table. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include + + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mcast_tbl_init( + IN osm_mcast_tbl_t* const p_tbl, + IN uint8_t const num_ports, + IN uint16_t const capacity ) +{ + CL_ASSERT( p_tbl ); + CL_ASSERT( num_ports ); + + memset( p_tbl, 0, sizeof(*p_tbl) ); + + p_tbl->max_block_in_use = -1; + + if( capacity == 0 ) + { + /* + This switch apparently doesn't support multicast. + Everything is initialized to zero already, so return. + */ + return( IB_SUCCESS ); + } + + p_tbl->num_entries = capacity; + p_tbl->num_ports = num_ports; + p_tbl->max_position = (uint8_t)((ROUNDUP( num_ports, IB_MCAST_MASK_SIZE) / + IB_MCAST_MASK_SIZE) - 1); + + p_tbl->max_block = (uint16_t)((ROUNDUP( p_tbl->num_entries, + IB_MCAST_BLOCK_SIZE ) / IB_MCAST_BLOCK_SIZE) - 1); + + p_tbl->max_mlid_ho = (uint16_t)(IB_LID_MCAST_START_HO + capacity); + + /* + The number of bytes needed in the mask table is: + The (maximum bit mask 'position' + 1) times the + number of bytes in each bit mask times the + number of MLIDs supported by the table. + + We must always allocate the array with the maximum position + since it is (and must be) defined that way the table structure + in order to create a pointer to a two dimensional array. + */ + p_tbl->p_mask_tbl = malloc( p_tbl->num_entries * + (IB_MCAST_POSITION_MAX + 1) * IB_MCAST_MASK_SIZE / 8 ); + + if( p_tbl->p_mask_tbl == NULL ) + return( IB_INSUFFICIENT_MEMORY ); + + memset(p_tbl->p_mask_tbl, 0, + p_tbl->num_entries * (IB_MCAST_POSITION_MAX + 1) * IB_MCAST_MASK_SIZE / 8 ); + return( IB_SUCCESS ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcast_tbl_destroy( + IN osm_mcast_tbl_t* const p_tbl ) +{ + free( p_tbl->p_mask_tbl ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcast_tbl_set( + IN osm_mcast_tbl_t* const p_tbl, + IN const uint16_t mlid_ho, + IN const uint8_t port ) +{ + uintn_t mlid_offset; + uintn_t mask_offset; + uintn_t bit_mask; + int16_t block_num; + + CL_ASSERT( p_tbl ); + CL_ASSERT( mlid_ho >= IB_LID_MCAST_START_HO ); + CL_ASSERT( mlid_ho <= p_tbl->max_mlid_ho ); + CL_ASSERT( p_tbl->p_mask_tbl ); + + mlid_offset = mlid_ho - IB_LID_MCAST_START_HO; + mask_offset = port / IB_MCAST_MASK_SIZE; + bit_mask = cl_ntoh16( (uint16_t)( 1 << (port % IB_MCAST_MASK_SIZE) ) ); + (*p_tbl->p_mask_tbl)[mlid_offset][mask_offset] |= bit_mask; + + block_num = (int16_t)(mlid_offset / IB_MCAST_BLOCK_SIZE); + + if( block_num > p_tbl->max_block_in_use ) + p_tbl->max_block_in_use = (uint16_t)block_num; +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_mcast_tbl_is_port( + IN const osm_mcast_tbl_t* const p_tbl, + IN const uint16_t mlid_ho, + IN const uint8_t port_num ) +{ + uintn_t mlid_offset; + uintn_t mask_offset; + uintn_t bit_mask; + + CL_ASSERT( p_tbl ); + + if( p_tbl->p_mask_tbl ) + { + CL_ASSERT( port_num <= (p_tbl->max_position + 1) * IB_MCAST_MASK_SIZE ); + CL_ASSERT( mlid_ho >= IB_LID_MCAST_START_HO ); + CL_ASSERT( mlid_ho <= p_tbl->max_mlid_ho ); + + mlid_offset = mlid_ho - IB_LID_MCAST_START_HO; + mask_offset = port_num / IB_MCAST_MASK_SIZE; + bit_mask = cl_ntoh16( + (uint16_t)( 1 << (port_num % IB_MCAST_MASK_SIZE) ) ); + return( ((*p_tbl->p_mask_tbl)[mlid_offset][mask_offset] & bit_mask) == + bit_mask ); + } + + return( FALSE ); +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_mcast_tbl_is_any_port( + IN const osm_mcast_tbl_t* const p_tbl, + IN const uint16_t mlid_ho ) +{ + uintn_t mlid_offset; + uint8_t position; + uint16_t result = 0; + + CL_ASSERT( p_tbl ); + + if( p_tbl->p_mask_tbl ) + { + CL_ASSERT( mlid_ho >= IB_LID_MCAST_START_HO ); + CL_ASSERT( mlid_ho <= p_tbl->max_mlid_ho ); + + mlid_offset = mlid_ho - IB_LID_MCAST_START_HO; + + for( position = 0; position <= p_tbl->max_position; position++ ) + result |= (*p_tbl->p_mask_tbl)[mlid_offset][position]; + } + + return( result != 0 ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mcast_tbl_set_block( + IN osm_mcast_tbl_t* const p_tbl, + IN const ib_net16_t* const p_block, + IN const int16_t block_num, + IN const uint8_t position ) +{ + uint32_t i; + uint16_t mlid_start_ho; + + CL_ASSERT( p_tbl ); + CL_ASSERT( p_block ); + + if( block_num > p_tbl->max_block ) + return( IB_INVALID_PARAMETER ); + + if( position > p_tbl->max_position ) + return( IB_INVALID_PARAMETER ); + + mlid_start_ho = (uint16_t)(block_num * IB_MCAST_BLOCK_SIZE); + + if( mlid_start_ho + IB_MCAST_BLOCK_SIZE > p_tbl->max_mlid_ho ) + return( IB_INVALID_PARAMETER ); + + for( i = 0; i < IB_MCAST_BLOCK_SIZE; i++ ) + (*p_tbl->p_mask_tbl)[mlid_start_ho + i][position] = p_block[i]; + + if( block_num > p_tbl->max_block_in_use ) + p_tbl->max_block_in_use = (uint16_t)block_num; + + return( IB_SUCCESS ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcast_tbl_clear_mlid( + IN osm_mcast_tbl_t* const p_tbl, + IN const uint16_t mlid_ho ) +{ + uint8_t i; + uintn_t mlid_offset; + + CL_ASSERT( p_tbl ); + CL_ASSERT( mlid_ho >= IB_LID_MCAST_START_HO ); + + if( p_tbl->p_mask_tbl && (mlid_ho <= p_tbl->max_mlid_ho) ) + { + mlid_offset = mlid_ho - IB_LID_MCAST_START_HO; + for( i = 0; i <= p_tbl->max_position; i++ ) + (*p_tbl->p_mask_tbl)[mlid_offset][i] = 0; + } +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_mcast_tbl_get_block( + IN osm_mcast_tbl_t* const p_tbl, + IN int16_t const block_num, + IN uint8_t const position, + OUT ib_net16_t* const p_block ) +{ + uint32_t i; + uint16_t mlid_start_ho; + + CL_ASSERT( p_tbl ); + CL_ASSERT( p_block ); + + if( block_num > p_tbl->max_block_in_use ) + return( FALSE ); + + if( position > p_tbl->max_position ) + { + /* + Caller shouldn't do this for efficiency's sake... + */ + memset( p_block, 0, IB_SMP_DATA_SIZE ); + return( TRUE ); + } + + mlid_start_ho = (uint16_t)(block_num * IB_MCAST_BLOCK_SIZE); + + if( mlid_start_ho + IB_MCAST_BLOCK_SIZE > p_tbl->max_mlid_ho ) + return( IB_INVALID_PARAMETER ); + + for( i = 0; i < IB_MCAST_BLOCK_SIZE; i++ ) + p_block[i] = (*p_tbl->p_mask_tbl)[mlid_start_ho + i][position]; + + return( TRUE ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_mcm_info.c b/branches/Ndi/ulp/opensm/user/opensm/osm_mcm_info.c new file mode 100644 index 00000000..47eeb2e6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_mcm_info.c @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Declaration of osm_mcm_info_t. + * This object represents a Multicast Forwarding Information object. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_mcm_info_destroy( + IN osm_mcm_info_t* const p_mcm ) +{ + CL_ASSERT( p_mcm ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcm_info_init( + IN osm_mcm_info_t* const p_mcm, + IN const ib_net16_t mlid ) +{ + CL_ASSERT( p_mcm ); + p_mcm->mlid = mlid; +} + +/********************************************************************** + **********************************************************************/ +osm_mcm_info_t* +osm_mcm_info_new( + IN const ib_net16_t mlid ) +{ + osm_mcm_info_t* p_mcm; + + p_mcm = (osm_mcm_info_t*)malloc( sizeof(*p_mcm) ); + if( p_mcm ) + { + memset(p_mcm, 0, sizeof(*p_mcm) ); + osm_mcm_info_init( p_mcm, mlid ); + } + + return( p_mcm ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcm_info_delete( + IN osm_mcm_info_t* const p_mcm ) +{ + osm_mcm_info_destroy( p_mcm ); + free( p_mcm ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_mcm_port.c b/branches/Ndi/ulp/opensm/user/opensm/osm_mcm_port.c new file mode 100644 index 00000000..af15ef4b --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_mcm_port.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_mcm_port_t. + * This object represents the membership of a port in a multicast group. + * This object is part of the OpenSM family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_mcm_port_construct( + IN osm_mcm_port_t* const p_mcm ) +{ + memset( p_mcm, 0, sizeof(*p_mcm) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcm_port_destroy( + IN osm_mcm_port_t* const p_mcm ) +{ + /* + Nothing to do? + */ + UNUSED_PARAM( p_mcm ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcm_port_init( + IN osm_mcm_port_t* const p_mcm, + IN const ib_gid_t* const p_port_gid, + IN const uint8_t scope_state, + IN const boolean_t proxy_join ) +{ + CL_ASSERT( p_mcm ); + CL_ASSERT( p_port_gid ); + CL_ASSERT( scope_state ); + + osm_mcm_port_construct( p_mcm ); + p_mcm->port_gid = *p_port_gid; + p_mcm->scope_state = scope_state; + p_mcm->proxy_join = proxy_join; +} + +/********************************************************************** + **********************************************************************/ +osm_mcm_port_t* +osm_mcm_port_new( + IN const ib_gid_t* const p_port_gid, + IN const uint8_t scope_state, + IN const boolean_t proxy_join ) +{ + osm_mcm_port_t* p_mcm; + + p_mcm = malloc( sizeof(*p_mcm) ); + if( p_mcm ) + { + osm_mcm_port_init( p_mcm, p_port_gid, + scope_state, proxy_join ); + } + + return( p_mcm ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcm_port_delete( + IN osm_mcm_port_t* const p_mcm ) +{ + CL_ASSERT( p_mcm ); + + osm_mcm_port_destroy( p_mcm ); + free( p_mcm ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_mtree.c b/branches/Ndi/ulp/opensm/user/opensm/osm_mtree.c new file mode 100644 index 00000000..256ec281 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_mtree.c @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_mtree_node_t. + * This file implements the Multicast Tree object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_mtree_node_init( + IN osm_mtree_node_t* const p_mtn, + IN const osm_switch_t* const p_sw ) +{ + uint32_t i; + + CL_ASSERT( p_mtn ); + CL_ASSERT( p_sw ); + + osm_mtree_node_construct( p_mtn ); + + p_mtn->p_sw = (osm_switch_t*)p_sw; + p_mtn->max_children = osm_switch_get_num_ports( p_sw ); + + for( i = 0; i < p_mtn->max_children; i++ ) + p_mtn->child_array[i] = NULL; +} + +/********************************************************************** + **********************************************************************/ +osm_mtree_node_t* +osm_mtree_node_new( + IN const osm_switch_t* const p_sw ) +{ + osm_mtree_node_t *p_mtn; + + p_mtn = malloc( sizeof(osm_mtree_node_t) + + sizeof(void*) * (osm_switch_get_num_ports( p_sw ) - 1) ); + + if( p_mtn != NULL ) + osm_mtree_node_init( p_mtn, p_sw ); + + return( p_mtn ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mtree_destroy( + IN osm_mtree_node_t *p_mtn ) +{ + uint32_t i; + + if (p_mtn == NULL) + return; + + if ( p_mtn->child_array != NULL ) + for (i = 0 ; i< p_mtn->max_children; i++ ) + if ( (p_mtn->child_array[i] != NULL) && + (p_mtn->child_array[i] != OSM_MTREE_LEAF) ) + osm_mtree_destroy(p_mtn->child_array[i]); + + free( p_mtn ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_mtree_dump( + IN osm_mtree_node_t *p_mtn ) +{ + uint32_t i; + + if (p_mtn == NULL) + return; + + printf("GUID:0x%016" PRIx64 " max_children:%u\n", + cl_ntoh64(p_mtn->p_sw->p_node->node_info.node_guid), + p_mtn->max_children ); + if ( p_mtn->child_array != NULL ) + { + for (i = 0 ; i< p_mtn->max_children; i++ ) + { + printf("i=%d\n", i); + if ( (p_mtn->child_array[i] != NULL) && (p_mtn->child_array[i] != OSM_MTREE_LEAF) ) + __osm_mtree_dump(p_mtn->child_array[i]); + } + } +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_multicast.c b/branches/Ndi/ulp/opensm/user/opensm/osm_multicast.c new file mode 100644 index 00000000..b12bcbe3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_multicast.c @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of multicast functions. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +/* osm_mcast_req_type_t values converted to test for easier printing. */ +const char* osm_mcast_req_type_str[] = +{ + "OSM_MCAST_REQ_TYPE_CREATE", + "OSM_MCAST_REQ_TYPE_JOIN", + "OSM_MCAST_REQ_TYPE_LEAVE", + "OSM_MCAST_REQ_TYPE_SUBNET_CHANGE" +}; + +const char* +osm_get_mcast_req_type_str( + IN osm_mcast_req_type_t req_type ) +{ + if ( req_type > OSM_MCAST_REQ_TYPE_SUBNET_CHANGE ) + req_type = OSM_MCAST_REQ_TYPE_SUBNET_CHANGE; + return( osm_mcast_req_type_str[req_type] ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mgrp_construct( + IN osm_mgrp_t* const p_mgrp ) +{ + memset( p_mgrp, 0, sizeof(*p_mgrp) ); + cl_qmap_init( &p_mgrp->mcm_port_tbl ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mgrp_destroy( + IN osm_mgrp_t* const p_mgrp ) +{ + osm_mcm_port_t *p_mcm_port; + osm_mcm_port_t *p_next_mcm_port; + + CL_ASSERT(p_mgrp); + + p_next_mcm_port = (osm_mcm_port_t*)cl_qmap_head( &p_mgrp->mcm_port_tbl ); + while( p_next_mcm_port != (osm_mcm_port_t*)cl_qmap_end( &p_mgrp->mcm_port_tbl ) ) + { + p_mcm_port = p_next_mcm_port; + p_next_mcm_port = (osm_mcm_port_t*)cl_qmap_next( &p_mcm_port->map_item ); + osm_mcm_port_delete( p_mcm_port ); + } + /* destroy the mtree_node structure */ + osm_mtree_destroy(p_mgrp->p_root); + + free(p_mgrp); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mgrp_init( + IN osm_mgrp_t* const p_mgrp, + IN const ib_net16_t mlid ) +{ + CL_ASSERT( p_mgrp ); + CL_ASSERT( cl_ntoh16( mlid ) >= IB_LID_MCAST_START_HO ); + + osm_mgrp_construct( p_mgrp ); + p_mgrp->mlid = mlid; + p_mgrp->last_change_id = 0; + p_mgrp->last_tree_id = 0; + p_mgrp->to_be_deleted = FALSE; +} + +/********************************************************************** + **********************************************************************/ +osm_mgrp_t* +osm_mgrp_new( + IN const ib_net16_t mlid ) +{ + osm_mgrp_t* p_mgrp; + + p_mgrp = (osm_mgrp_t*)malloc( sizeof(*p_mgrp) ); + if( p_mgrp ) + osm_mgrp_init( p_mgrp, mlid ); + + return( p_mgrp ); +} + +/********************************************************************** + **********************************************************************/ +osm_mcm_port_t* +osm_mgrp_add_port( + IN osm_mgrp_t* const p_mgrp, + IN const ib_gid_t* const p_port_gid, + IN const uint8_t join_state, + IN boolean_t proxy_join ) +{ + ib_net64_t port_guid; + osm_mcm_port_t *p_mcm_port; + cl_map_item_t *prev_item; + uint8_t prev_join_state; + uint8_t prev_scope; + + p_mcm_port = osm_mcm_port_new( p_port_gid, join_state, proxy_join ); + if( p_mcm_port ) + { + port_guid = p_port_gid->unicast.interface_id; + + /* + prev_item = cl_qmap_insert(...) + Pointer to the item in the map with the specified key. If insertion + was successful, this is the pointer to the item. If an item with the + specified key already exists in the map, the pointer to that item is + returned. + */ + prev_item = cl_qmap_insert( &p_mgrp->mcm_port_tbl, + port_guid, &p_mcm_port->map_item ); + + /* if already exists - revert the insertion and only update join state */ + if( prev_item != &p_mcm_port->map_item ) + { + + osm_mcm_port_delete( p_mcm_port ); + p_mcm_port =(osm_mcm_port_t *) prev_item; + + /* + o15.0.1.11 + Join state of the end port should be the or of the + previous setting with the current one + */ + ib_member_get_scope_state(p_mcm_port->scope_state, &prev_scope, &prev_join_state); + p_mcm_port->scope_state = + ib_member_set_scope_state(prev_scope, prev_join_state | join_state); + + } + else + { + /* track the fact we modified the group ports */ + p_mgrp->last_change_id++; + } + } + + return( p_mcm_port ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mgrp_remove_port( + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_mgrp_t* const p_mgrp, + IN const ib_net64_t port_guid ) +{ + cl_map_item_t *p_map_item; + + CL_ASSERT(p_mgrp); + + p_map_item = cl_qmap_get( &p_mgrp->mcm_port_tbl, port_guid ); + + if( p_map_item != cl_qmap_end( &p_mgrp->mcm_port_tbl ) ) + { + cl_qmap_remove_item( &p_mgrp->mcm_port_tbl, + p_map_item ); + osm_mcm_port_delete((osm_mcm_port_t*)p_map_item); + + /* track the fact we modified the group */ + p_mgrp->last_change_id++; + } + + /* + no more ports so the group will be deleted after re-route + but only if it is not a well known group and not already deleted + */ + if ((cl_is_qmap_empty( &p_mgrp->mcm_port_tbl )) && + (p_mgrp->well_known == FALSE) && + (p_mgrp->to_be_deleted == FALSE)) + { + p_mgrp->to_be_deleted = TRUE; + + /* Send a Report to any InformInfo registered for + Trap 67 : MCGroup delete */ + osm_mgrp_send_delete_notice( p_subn, p_log, p_mgrp ); + } +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_mgrp_is_port_present( + IN const osm_mgrp_t* const p_mgrp, + IN const ib_net64_t port_guid, + OUT osm_mcm_port_t ** const pp_mcm_port ) +{ + cl_map_item_t *p_map_item; + + CL_ASSERT(p_mgrp); + + p_map_item = cl_qmap_get(&p_mgrp->mcm_port_tbl, + port_guid); + + if (p_map_item != cl_qmap_end(&p_mgrp->mcm_port_tbl)) + { + if (pp_mcm_port) + *pp_mcm_port = (osm_mcm_port_t *)p_map_item; + return TRUE; + } + if (pp_mcm_port) + *pp_mcm_port = NULL; + return FALSE; +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_mgrp_apply_func_sub( + const osm_mgrp_t* const p_mgrp, + const osm_mtree_node_t* const p_mtn, + osm_mgrp_func_t p_func, + void* context ) +{ + uint8_t i = 0; + uint8_t max_children; + osm_mtree_node_t* p_child_mtn; + + /* + Call the user, then recurse. + */ + p_func( p_mgrp, p_mtn, context ); + + max_children = osm_mtree_node_get_max_children( p_mtn ); + for( i = 0; i < max_children; i++ ) + { + p_child_mtn = osm_mtree_node_get_child( p_mtn, i ); + if( p_child_mtn ) + __osm_mgrp_apply_func_sub( p_mgrp, p_child_mtn, p_func, context ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_mgrp_apply_func( + const osm_mgrp_t* const p_mgrp, + osm_mgrp_func_t p_func, + void* context ) +{ + osm_mtree_node_t* p_mtn; + + CL_ASSERT( p_mgrp ); + CL_ASSERT( p_func ); + + p_mtn = p_mgrp->p_root; + + if( p_mtn ) + __osm_mgrp_apply_func_sub( p_mgrp, p_mtn, p_func, context ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mgrp_send_delete_notice( + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_mgrp_t *p_mgrp ) +{ + ib_mad_notice_attr_t notice; + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_mgrp_send_delete_notice ); + + /* prepare the needed info */ + + /* details of the notice */ + notice.generic_type = 0x83; /* is generic subn mgt type */ + ib_notice_set_prod_type_ho(¬ice, 4); /* A Class Manager generator */ + notice.g_or_v.generic.trap_num = CL_HTON16(67); /* delete of mcg */ + /* The sm_base_lid is saved in network order already. */ + notice.issuer_lid = p_subn->sm_base_lid; + /* following o14-12.1.11 and table 120 p726 */ + /* we need to provide the MGID */ + memcpy(&(notice.data_details.ntc_64_67.gid), + &(p_mgrp->mcmember_rec.mgid), + sizeof(ib_gid_t)); + + /* According to page 653 - the issuer gid in this case of trap + is the SM gid, since the SM is the initiator of this trap. */ + notice.issuer_gid.unicast.prefix = p_subn->opt.subnet_prefix; + notice.issuer_gid.unicast.interface_id = p_subn->sm_port_guid; + + status = osm_report_notice(p_log, p_subn, ¬ice); + if( status != IB_SUCCESS ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_mgrp_send_delete_notice: ERR 7601: " + "Error sending trap reports (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mgrp_send_create_notice( + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_mgrp_t *p_mgrp ) +{ + ib_mad_notice_attr_t notice; + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_mgrp_send_create_notice ); + + /* prepare the needed info */ + + /* details of the notice */ + notice.generic_type = 0x83; /* Generic SubnMgt type */ + ib_notice_set_prod_type_ho(¬ice, 4); /* A Class Manager generator */ + notice.g_or_v.generic.trap_num = CL_HTON16(66); /* create of mcg */ + /* The sm_base_lid is saved in network order already. */ + notice.issuer_lid = p_subn->sm_base_lid; + /* following o14-12.1.11 and table 120 p726 */ + /* we need to provide the MGID */ + memcpy(&(notice.data_details.ntc_64_67.gid), + &(p_mgrp->mcmember_rec.mgid), + sizeof(ib_gid_t)); + + /* According to page 653 - the issuer gid in this case of trap + is the SM gid, since the SM is the initiator of this trap. */ + notice.issuer_gid.unicast.prefix = p_subn->opt.subnet_prefix; + notice.issuer_gid.unicast.interface_id = p_subn->sm_port_guid; + + status = osm_report_notice(p_log, p_subn, ¬ice); + if( status != IB_SUCCESS ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_mgrp_send_create_notice: ERR 7602: " + "Error sending trap reports (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_node.c b/branches/Ndi/ulp/opensm/user/opensm/osm_node.c new file mode 100644 index 00000000..064c1db5 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_node.c @@ -0,0 +1,335 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_node_t. + * This object represents an Infiniband Node. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_node_init_physp( + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + osm_physp_t *p_physp; + ib_net64_t port_guid; + ib_smp_t *p_smp; + ib_node_info_t *p_ni; + uint8_t port_num; + + CL_ASSERT( p_node ); + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + CL_ASSERT( p_smp->attr_id == IB_MAD_ATTR_NODE_INFO ); + + p_ni = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp ); + port_guid = p_ni->port_guid; + port_num = ib_node_info_get_local_port_num( p_ni ); + + CL_ASSERT( port_num < p_node->physp_tbl_size ); + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + + osm_physp_init( p_physp, port_guid, port_num, p_node, + osm_madw_get_bind_handle( p_madw ), + p_smp->hop_count, p_smp->initial_path ); +} + +/********************************************************************** + **********************************************************************/ +osm_node_t* +osm_node_new( + IN const osm_madw_t* const p_madw ) +{ + osm_node_t *p_node; + ib_smp_t *p_smp; + ib_node_info_t *p_ni; + uint8_t i; + uint32_t size; + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + CL_ASSERT( p_smp->attr_id == IB_MAD_ATTR_NODE_INFO ); + + p_ni = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp ); + + /* + The node object already contains one physical port object. + Therefore, subtract 1 from the number of physical ports + used by the switch. This is not done for CA's since they + need to occupy 1 more physp than they physically have since + we still reserve room for a "port 0". + */ + size = p_ni->num_ports; + + p_node = malloc( sizeof(*p_node) + sizeof(osm_physp_t) * size ); + if( p_node != NULL ) + { + memset( p_node, 0, sizeof(*p_node) + sizeof(osm_physp_t) * size ); + p_node->node_info = *p_ni; + p_node->physp_tbl_size = size + 1; + + /* + Construct Physical Port objects owned by this Node. + Then, initialize the Physical Port through with we + discovered this port. + For switches, all ports have the same GUID. + For HCA's each port has a different GUID, so we only + know the GUID for the port that responded to our + Get(NodeInfo). + */ + for( i = 0; i < p_node->physp_tbl_size; i++ ) + osm_physp_construct( osm_node_get_physp_ptr( p_node, i ) ); + + osm_node_init_physp( p_node, p_madw ); + } + + return( p_node ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_node_destroy( + IN osm_node_t *p_node ) +{ + uint16_t i; + + /* Cleanup all PhysPorts */ + if( p_node != NULL ) + { + /* + Cleanup all physports + */ + for( i = 0; i < p_node->physp_tbl_size; i++ ) + { + osm_physp_t *p_physp = osm_node_get_physp_ptr( p_node, i ); + if (p_physp) + osm_physp_destroy( p_physp ); + } + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_node_delete( + IN OUT osm_node_t** const p_node ) +{ + osm_node_destroy( *p_node ); + free( *p_node ); + *p_node = NULL; +} + +/********************************************************************** + **********************************************************************/ +void +osm_node_link( + IN osm_node_t* const p_node, + IN const uint8_t port_num, + IN osm_node_t* const p_remote_node, + IN const uint8_t remote_port_num ) +{ + osm_physp_t *p_physp; + osm_physp_t *p_remote_physp; + + CL_ASSERT( port_num < p_node->physp_tbl_size ); + CL_ASSERT( remote_port_num < p_remote_node->physp_tbl_size ); + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + p_remote_physp = osm_node_get_physp_ptr( p_remote_node, + remote_port_num ); + + if (p_physp->p_remote_physp) + p_physp->p_remote_physp->p_remote_physp = NULL; + if (p_remote_physp->p_remote_physp) + p_remote_physp->p_remote_physp->p_remote_physp = NULL; + + osm_physp_link( p_physp, p_remote_physp ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_node_unlink( + IN osm_node_t* const p_node, + IN const uint8_t port_num, + IN osm_node_t* const p_remote_node, + IN const uint8_t remote_port_num ) +{ + osm_physp_t *p_physp; + osm_physp_t *p_remote_physp; + + CL_ASSERT( port_num < p_node->physp_tbl_size ); + CL_ASSERT( remote_port_num < p_remote_node->physp_tbl_size ); + + if( osm_node_link_exists( p_node, port_num, + p_remote_node, remote_port_num ) ) + { + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + p_remote_physp = osm_node_get_physp_ptr( p_remote_node, + remote_port_num ); + + osm_physp_unlink( p_physp, p_remote_physp ); + } +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_node_link_exists( + IN osm_node_t* const p_node, + IN const uint8_t port_num, + IN osm_node_t* const p_remote_node, + IN const uint8_t remote_port_num ) +{ + osm_physp_t *p_physp; + osm_physp_t *p_remote_physp; + + CL_ASSERT( port_num < p_node->physp_tbl_size ); + CL_ASSERT( remote_port_num < p_remote_node->physp_tbl_size ); + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + p_remote_physp = osm_node_get_physp_ptr( p_remote_node, + remote_port_num ); + + return( osm_physp_link_exists( p_physp, p_remote_physp ) ); +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_node_link_has_valid_ports( + IN osm_node_t* const p_node, + IN const uint8_t port_num, + IN osm_node_t* const p_remote_node, + IN const uint8_t remote_port_num ) +{ + osm_physp_t *p_physp; + osm_physp_t *p_remote_physp; + + CL_ASSERT( port_num < p_node->physp_tbl_size ); + CL_ASSERT( remote_port_num < p_remote_node->physp_tbl_size ); + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + p_remote_physp = osm_node_get_physp_ptr( p_remote_node, + remote_port_num ); + + return( osm_physp_is_valid( p_physp ) && + osm_physp_is_valid( p_remote_physp ) ); +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_node_has_any_link( + IN osm_node_t* const p_node, + IN const uint8_t port_num ) +{ + osm_physp_t *p_physp; + CL_ASSERT( port_num < p_node->physp_tbl_size ); + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + return( osm_physp_has_any_link( p_physp ) ); +} + +/********************************************************************** + **********************************************************************/ +osm_node_t* +osm_node_get_remote_node( + IN const osm_node_t* const p_node, + IN const uint8_t port_num, + OUT uint8_t *p_remote_port_num ) +{ + osm_physp_t *p_physp; + osm_physp_t *p_remote_physp; + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + + if( !osm_physp_has_any_link( p_physp ) ) + return( NULL ); + + p_remote_physp = osm_physp_get_remote( p_physp ); + if( p_remote_port_num ) + *p_remote_port_num = osm_physp_get_port_num( p_remote_physp ); + + return( osm_physp_get_node_ptr( p_remote_physp ) ); +} + +/********************************************************************** + The lock must be held before calling this function. +**********************************************************************/ +ib_net16_t +osm_node_get_remote_base_lid( + IN const osm_node_t* const p_node, + IN const uint32_t port_num ) +{ + osm_physp_t *p_physp; + osm_physp_t *p_remote_physp; + CL_ASSERT( port_num < p_node->physp_tbl_size ); + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + if( osm_physp_is_valid( p_physp ) ) + { + p_remote_physp = osm_physp_get_remote( p_physp ); + return( osm_physp_get_base_lid( p_remote_physp ) ); + } + + return( 0 ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_node_desc_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_node_desc_rcv.c new file mode 100644 index 00000000..d3e8cfbd --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_node_desc_rcv.c @@ -0,0 +1,182 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_nd_rcv_t. + * This object represents the NodeDescription Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_nd_rcv_process_nd( + IN const osm_nd_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const ib_node_desc_t* const p_nd ) +{ + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + OSM_LOG_ENTER( p_rcv->p_log, __osm_nd_rcv_process_nd ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_VERBOSE ) ) + { + memcpy( desc, p_nd, sizeof(*p_nd) ); + /* Guarantee null termination before printing. */ + desc[IB_NODE_DESCRIPTION_SIZE] = '\0'; + + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_nd_rcv_process_nd: " + "Node 0x%" PRIx64 "\n\t\t\t\tDescription = %s\n", + cl_ntoh64( osm_node_get_node_guid( p_node )), desc ); + } + + memcpy( &p_node->node_desc.description, p_nd, sizeof(*p_nd) ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_nd_rcv_construct( + IN osm_nd_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_nd_rcv_destroy( + IN osm_nd_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_nd_rcv_destroy ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_nd_rcv_init( + IN osm_nd_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_nd_rcv_init ); + + osm_nd_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_nd_rcv_process( + IN const osm_nd_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + cl_qmap_t *p_guid_tbl; + ib_node_desc_t *p_nd; + ib_smp_t *p_smp; + osm_node_t *p_node; + ib_net64_t node_guid; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_nd_rcv_process ); + + CL_ASSERT( p_madw ); + + p_guid_tbl = &p_rcv->p_subn->node_guid_tbl; + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_nd = (ib_node_desc_t*)ib_smp_get_payload_ptr( p_smp ); + + /* + Acquire the node object and add the node description. + */ + + node_guid = osm_madw_get_nd_context_ptr( p_madw )->node_guid; + CL_PLOCK_EXCL_ACQUIRE( p_rcv->p_lock ); + p_node = (osm_node_t*)cl_qmap_get( p_guid_tbl, node_guid ); + + if( p_node == (osm_node_t*)cl_qmap_end( p_guid_tbl) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_nd_rcv_process: ERR 0B01: " + "NodeDescription received for nonexistent node " + "0x%" PRIx64 "\n", cl_ntoh64(node_guid) ); + } + else + { + __osm_nd_rcv_process_nd( p_rcv, p_node, p_nd ); + } + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_node_desc_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_node_desc_rcv_ctrl.c new file mode 100644 index 00000000..f038e7b2 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_node_desc_rcv_ctrl.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_nd_rcv_ctrl_t. + * This object represents the NodeDescription request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_nd_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_nd_rcv_process( ((osm_nd_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_nd_rcv_ctrl_construct( + IN osm_nd_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_nd_rcv_ctrl_destroy( + IN osm_nd_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_nd_rcv_ctrl_init( + IN osm_nd_rcv_ctrl_t* const p_ctrl, + IN osm_nd_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_nd_rcv_ctrl_init ); + + osm_nd_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_NODE_DESC, + __osm_nd_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_nd_rcv_ctrl_init: ERR 0C01: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_node_info_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_node_info_rcv.c new file mode 100755 index 00000000..76151f2c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_node_info_rcv.c @@ -0,0 +1,1090 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_ni_rcv_t. + * This object represents the NodeInfo Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.9 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_ni_rcv_set_links( + IN const osm_ni_rcv_t* const p_rcv, + osm_node_t* p_node, + const uint8_t port_num, + const osm_ni_context_t* const p_ni_context ) +{ + cl_qmap_t *p_guid_tbl; + osm_node_t *p_neighbor_node; + osm_node_t *p_old_neighbor_node; + uint8_t old_neighbor_port_num; + osm_physp_t *p_physp, *p_old_physp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_set_links ); + + /* + A special case exists in which the node we're trying to + link is our own node. In this case, the guid value in + the ni_context will be zero. + */ + if( p_ni_context->node_guid != 0 ) + { + p_guid_tbl = &p_rcv->p_subn->node_guid_tbl; + p_neighbor_node = (osm_node_t*)cl_qmap_get( p_guid_tbl, + p_ni_context->node_guid ); + if( p_neighbor_node == (osm_node_t*)cl_qmap_end( p_guid_tbl ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_set_links: ERR 0D10: " + "Unexpected removal of neighbor node " + "0x%" PRIx64 "\n", + cl_ntoh64( p_ni_context->node_guid ) ); + } + else + { + /* + We have seen this neighbor node before, but we might + not have seen this port on the neighbor node before. + We should not set links to an uninitialized port on the + neighbor, so check validity up front. If it's not + valid, do nothing, since we'll see this link again + when we probe the neighbor. + */ + if( osm_node_link_has_valid_ports( p_node, port_num, + p_neighbor_node, p_ni_context->port_num ) ) + { + if( osm_node_link_exists( p_node, port_num, + p_neighbor_node, p_ni_context->port_num ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_ni_rcv_set_links: " + "Link already exists\n" ); + } + else + { + if( osm_node_has_any_link( p_node, port_num ) && + p_rcv->p_subn->force_immediate_heavy_sweep == FALSE ) + { + /* + Uh oh... + This means that we found 2 nodes with the same guid, + or a 12x link with lane reversal that is not configured correctly. + If the force_immediate_heavy_sweep == TRUE, then this might be a case + of port being moved (causing trap 128), and thus rediscovered. + In this case, just continue. There will be another heavy sweep + immediately after, when the subnet is stable again. + */ + char line[BUF_SIZE]; + char dr_new_path[BUF_SIZE]; + char dr_old_path[BUF_SIZE]; + uint32_t i; + osm_dr_path_t *p_path = NULL, *p_old_path = NULL; + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + sprintf( dr_new_path, "no_path_available" ); + if (p_physp) + { + p_path = osm_physp_get_dr_path_ptr( p_physp ); + if ( p_path ) + { + sprintf( dr_new_path, "new path:" ); + for (i = 0; i <= p_path->hop_count; i++ ) + { + sprintf( line, "[%X]", p_path->path[i] ); + strcat( dr_new_path, line ); + } + } + } + + p_old_neighbor_node = osm_node_get_remote_node( + p_node, port_num, &old_neighbor_port_num ); + p_old_physp = osm_node_get_physp_ptr( + p_old_neighbor_node, + old_neighbor_port_num); + sprintf( dr_old_path, "no_path_available" ); + if (p_old_physp) + { + p_old_path = osm_physp_get_dr_path_ptr( p_old_physp ); + if ( p_old_path ) + { + sprintf( dr_old_path, "old_path:" ); + for (i = 0; i <= p_old_path->hop_count; i++ ) + { + sprintf( line, "[%X]", p_old_path->path[i] ); + strcat( dr_old_path, line ); + } + } + } + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_set_links: ERR 0D01: " + "Found duplicated guids or 12x link " + "with lane reversal badly configured.\n" + "Overriding existing link to:" + "node 0x%" PRIx64 ", port number 0x%X connected to:\n" + "\t\t\t\told node 0x%" PRIx64 ", " + "port number 0x%X %s\n" + "\t\t\t\tnew node 0x%" PRIx64 ", " + "port number 0x%X %s\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + port_num, + cl_ntoh64( osm_node_get_node_guid( + p_old_neighbor_node ) ), + old_neighbor_port_num , + dr_old_path, + cl_ntoh64( p_ni_context->node_guid ), + p_ni_context->port_num, + dr_new_path + ); + + osm_log( p_rcv->p_log, OSM_LOG_SYS, + "FATAL: duplicated guids or 12x lane reversal\n"); + + if ( p_rcv->p_subn->opt.exit_on_fatal == TRUE ) + { + osm_log( p_rcv->p_log, OSM_LOG_SYS, "Exiting\n"); + exit( 1 ); + } + } + + /* + When there are only two nodes with exact same guids (connected back + to back) - the previous check for duplicated guid will not catch + them. But the link will be from the port to itself... + Enhanced Port 0 is an exception to this + */ + if ((osm_node_get_node_guid( p_node ) == p_ni_context->node_guid) && + (port_num == p_ni_context->port_num) && + (port_num != 0)) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_set_links: ERR 0D18: " + "Duplicate GUID found by link from a port to itself:" + "node 0x%" PRIx64 ", port number 0x%X\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + port_num ); + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + if (p_physp) + osm_dump_dr_path(p_rcv->p_log, + osm_physp_get_dr_path_ptr(p_physp), + OSM_LOG_ERROR); + + osm_log( p_rcv->p_log, OSM_LOG_SYS, + "Errors on subnet. Duplicate GUID found " + "by link from a port to itself. " + "See osm log for more details\n"); + + if ( p_rcv->p_subn->opt.exit_on_fatal == TRUE ) + exit( 1 ); + } + else + { + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_ni_rcv_set_links: " + "Creating new link between: " + "\n\t\t\t\tnode 0x%" PRIx64 ", " + "port number 0x%X and" + "\n\t\t\t\tnode 0x%" PRIx64 ", " + "port number 0x%X\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + port_num, + cl_ntoh64( p_ni_context->node_guid ), + p_ni_context->port_num ); + } + + CL_ASSERT( osm_node_get_node_guid( p_neighbor_node ) == + p_ni_context->node_guid ); + + osm_node_link( p_node, port_num, p_neighbor_node, + p_ni_context->port_num ); + } + } + } + } + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_ni_rcv_set_links: " + "Nothing to link for our own node 0x%" PRIx64 "\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_ni_rcv_process_new_node( + IN const osm_ni_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + ib_api_status_t status = IB_SUCCESS; + osm_madw_context_t context; + osm_physp_t *p_physp; + ib_node_info_t *p_ni; + ib_smp_t *p_smp; + uint8_t port_num; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_process_new_node ); + + CL_ASSERT( p_node ); + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_ni = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp ); + port_num = ib_node_info_get_local_port_num( p_ni ); + + /* + Request PortInfo & NodeDescription attributes for the port + that responded to the NodeInfo attribute. + Because this is a channel adapter or router, we are + not allowed to request PortInfo for the other ports. + Set the context union properly, so the recipient + knows which node & port are relevant. + */ + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + CL_ASSERT( osm_madw_get_bind_handle( p_madw ) == + osm_dr_path_get_bind_handle( + osm_physp_get_dr_path_ptr( p_physp ) ) ); + + context.pi_context.node_guid = p_ni->node_guid; + context.pi_context.port_guid = p_ni->port_guid; + context.pi_context.set_method = FALSE; + context.pi_context.update_master_sm_base_lid = FALSE; + context.pi_context.ignore_errors = FALSE; + context.pi_context.light_sweep = FALSE; + context.pi_context.active_transition = FALSE; + + status = osm_req_get( p_rcv->p_gen_req, + osm_physp_get_dr_path_ptr( p_physp ), + IB_MAD_ATTR_PORT_INFO, + cl_hton32( port_num ), + CL_DISP_MSGID_NONE, + &context ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_new_node: ERR 0D02: " + "Failure initiating PortInfo request (%s)\n", + ib_get_err_str(status)); + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_ni_rcv_get_node_desc( + IN const osm_ni_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + ib_api_status_t status = IB_SUCCESS; + osm_madw_context_t context; + osm_physp_t *p_physp; + ib_node_info_t *p_ni; + ib_smp_t *p_smp; + uint8_t port_num; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_get_node_desc ); + + CL_ASSERT( p_node ); + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_ni = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp ); + port_num = ib_node_info_get_local_port_num( p_ni ); + + /* + Request PortInfo & NodeDescription attributes for the port + that responded to the NodeInfo attribute. + Because this is a channel adapter or router, we are + not allowed to request PortInfo for the other ports. + Set the context union properly, so the recipient + knows which node & port are relevant. + */ + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + CL_ASSERT( osm_madw_get_bind_handle( p_madw ) == + osm_dr_path_get_bind_handle( + osm_physp_get_dr_path_ptr( p_physp ) ) ); + + context.nd_context.node_guid = osm_node_get_node_guid( p_node ); + + status = osm_req_get( p_rcv->p_gen_req, + osm_physp_get_dr_path_ptr( p_physp ), + IB_MAD_ATTR_NODE_DESC, + 0, + CL_DISP_MSGID_NONE, + &context ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_get_node_desc: ERR 0D03: " + "Failure initiating NodeDescription request (%s)\n", + ib_get_err_str(status)); + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_ni_rcv_process_new_ca_or_router( + IN const osm_ni_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_process_new_ca_or_router ); + + __osm_ni_rcv_process_new_node( p_rcv, p_node, p_madw ); + + /* + A node guid of 0 is the corner case that indicates + we discovered our own node. Initialize the subnet + object with the SM's own port guid. + */ + if( osm_madw_get_ni_context_ptr( p_madw )->node_guid == 0 ) + { + p_rcv->p_subn->sm_port_guid = p_node->node_info.port_guid; + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_ni_rcv_process_existing_ca_or_router( + IN const osm_ni_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + ib_node_info_t *p_ni; + ib_smp_t *p_smp; + osm_port_t *p_port; + osm_port_t *p_port_check; + cl_qmap_t *p_guid_tbl; + osm_madw_context_t context; + uint8_t port_num; + osm_physp_t *p_physp; + ib_api_status_t status; + osm_dr_path_t *p_dr_path; + osm_bind_handle_t h_bind; + cl_status_t cl_status; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_process_existing_ca_or_router ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_ni = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp ); + port_num = ib_node_info_get_local_port_num( p_ni ); + p_guid_tbl = &p_rcv->p_subn->port_guid_tbl; + h_bind = osm_madw_get_bind_handle( p_madw ); + + /* + Determine if we have encountered this node through a + previously undiscovered port. If so, build the new + port object. + */ + p_port = (osm_port_t*)cl_qmap_get( p_guid_tbl, p_ni->port_guid ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_guid_tbl ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_ni_rcv_process_existing_ca_or_router: " + "Creating new port object with GUID 0x%" PRIx64 "\n", + cl_ntoh64( p_ni->port_guid ) ); + + osm_node_init_physp( p_node, p_madw ); + + p_port = osm_port_new( p_ni, p_node ); + if( p_port == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_existing_ca_or_router: ERR 0D04: " + "Unable to create new port object\n" ); + goto Exit; + } + + /* + Add the new port object to the database. + */ + p_port_check = (osm_port_t*)cl_qmap_insert( p_guid_tbl, + p_ni->port_guid, &p_port->map_item ); + if( p_port_check != p_port ) + { + /* + We should never be here! + Somehow, this port GUID already exists in the table. + */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_existing_ca_or_router: ERR 0D12: " + "Port 0x%" PRIx64 " already in the database!\n", + cl_ntoh64( p_ni->port_guid ) ); + + osm_port_delete( &p_port ); + goto Exit; + } + + /* If we are a master, then this means the port is new on the subnet. + Add it to the new_ports_list - need to send trap 64 on these ports. + The condition that we are master is true, since if we are in discovering + state (meaning we woke up from standby or we are just initializing), + then these ports may be new to us, but are not new on the subnet. + If we are master, then the subnet as we know it is the updated one, + and any new ports we encounter should cause trap 64. C14-72.1.1 */ + if ( p_rcv->p_subn->sm_state == IB_SMINFO_STATE_MASTER ) + { + cl_status = cl_list_insert_tail( &p_rcv->p_subn->new_ports_list, p_port ); + if( cl_status != CL_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_existing_ca_or_router: ERR 0D08: " + "Error %s adding to list\n", + CL_STATUS_MSG( cl_status ) ); + osm_port_delete( &p_port ); + goto Exit; + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_ni_rcv_process_existing_ca_or_router: " + "Adding port GUID:0x%016" PRIx64 " to new_ports_list\n", + cl_ntoh64(osm_node_get_node_guid( p_port->p_node )) ); + } + } + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + } + else + { + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + + CL_ASSERT( p_physp ); + + if ( !osm_physp_is_valid( p_physp ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_existing_ca_or_router: ERR 0D19: " + "Invalid physical port. Aborting discovery\n"); + goto Exit; + } + + /* + Update the DR Path to the port, + in case the old one is no longer available. + */ + p_dr_path = osm_physp_get_dr_path_ptr( p_physp ); + + osm_dr_path_init( p_dr_path, h_bind, p_smp->hop_count, + p_smp->initial_path ); + } + + context.pi_context.node_guid = p_ni->node_guid; + context.pi_context.port_guid = p_ni->port_guid; + context.pi_context.set_method = FALSE; + context.pi_context.update_master_sm_base_lid = FALSE; + context.pi_context.ignore_errors = FALSE; + context.pi_context.light_sweep = FALSE; + + status = osm_req_get( p_rcv->p_gen_req, + osm_physp_get_dr_path_ptr( p_physp ), + IB_MAD_ATTR_PORT_INFO, + cl_hton32( port_num ), + CL_DISP_MSGID_NONE, + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_existing_ca_or_router: ERR 0D13: " + "Failure initiating PortInfo request (%s)\n", + ib_get_err_str(status)); + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_ni_rcv_process_switch( + IN const osm_ni_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + ib_api_status_t status = IB_SUCCESS; + osm_madw_context_t context; + osm_dr_path_t dr_path; + ib_smp_t *p_smp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_process_switch ); + + CL_ASSERT( p_node ); + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + osm_dr_path_init( &dr_path, + osm_madw_get_bind_handle( p_madw ), + p_smp->hop_count, + p_smp->initial_path ); + + context.si_context.node_guid = osm_node_get_node_guid( p_node ); + context.si_context.set_method = FALSE; + context.si_context.light_sweep = FALSE; + + /* Request a SwitchInfo attribute */ + status = osm_req_get( p_rcv->p_gen_req, + &dr_path, + IB_MAD_ATTR_SWITCH_INFO, + 0, + CL_DISP_MSGID_NONE, + &context ); + if( status != IB_SUCCESS ) + { + /* continue despite error */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_switch: ERR 0D06: " + "Failure initiating SwitchInfo request (%s)\n", + ib_get_err_str( status ) ); + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_ni_rcv_process_existing_switch( + IN const osm_ni_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_process_existing_switch ); + + /* + If this switch has already been probed during this sweep, + then don't bother reprobing it. + There is one exception - if the node has been visited, but + for some reason we don't have the switch object (this can happen + if the SwitchInfo mad didn't reach the SM) then we want + to retry to probe the switch. + */ + if( osm_node_discovery_count_get( p_node ) == 1 ) + __osm_ni_rcv_process_switch( p_rcv, p_node, p_madw ); + else + { + /* Make sure we have SwitchInfo on this node */ + if( !p_node->sw || osm_switch_discovery_count_get( p_node->sw ) == 0 ) + { + /* we don't have the SwitchInfo - retry to get it */ + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_ni_rcv_process_existing_switch: " + "Retry to get SwitchInfo on node GUID:0x%" + PRIx64 "\n", cl_ntoh64(osm_node_get_node_guid(p_node)) ); + __osm_ni_rcv_process_switch( p_rcv, p_node, p_madw ); + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_ni_rcv_process_new_switch( + IN const osm_ni_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_process_new_switch ); + + __osm_ni_rcv_process_switch( p_rcv, p_node, p_madw ); + + /* + A node guid of 0 is the corner case that indicates + we discovered our own node. Initialize the subnet + object with the SM's own port guid. + */ + if( osm_madw_get_ni_context_ptr( p_madw )->node_guid == 0 ) + { + p_rcv->p_subn->sm_port_guid = p_node->node_info.port_guid; + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must NOT be held before calling this function. +**********************************************************************/ +static void +__osm_ni_rcv_process_new( + IN const osm_ni_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + osm_node_t *p_node; + osm_node_t *p_node_check; + osm_port_t *p_port; + osm_port_t *p_port_check; + osm_router_t *p_rtr = NULL; + osm_router_t *p_rtr_check; + cl_qmap_t *p_node_guid_tbl; + cl_qmap_t *p_port_guid_tbl; + cl_qmap_t *p_rtr_guid_tbl; + ib_node_info_t *p_ni; + ib_smp_t *p_smp; + osm_ni_context_t *p_ni_context; + uint8_t port_num; + cl_status_t status; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_process_new ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_ni = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp ); + p_ni_context = osm_madw_get_ni_context_ptr( p_madw ); + port_num = ib_node_info_get_local_port_num( p_ni ); + + osm_dump_smp_dr_path( p_rcv->p_log, p_smp, OSM_LOG_VERBOSE ); + + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_ni_rcv_process_new: " + "Discovered new %s node," + "\n\t\t\t\tGUID 0x%" PRIx64 ", TID 0x%" PRIx64 "\n", + ib_get_node_type_str( p_ni->node_type ), + cl_ntoh64( p_ni->node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + + p_node = osm_node_new( p_madw ); + if( p_node == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_new: ERR 0D07: " + "Unable to create new node object\n" ); + goto Exit; + } + + /* + Create a new port object to represent this node's physical + ports in the port table. + */ + p_port = osm_port_new( p_ni, p_node ); + if( p_port == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_new: ERR 0D14: " + "Unable to create new port object\n" ); + osm_node_delete( &p_node ); + goto Exit; + } + + /* If there were RouterInfo or other router attribute, + this would be elsewhere */ + if ( p_ni->node_type == IB_NODE_TYPE_ROUTER ) + { + p_rtr = osm_router_new( p_port ); + if ( p_rtr == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_new: ERR 0D1A: " + "Unable to create new router object\n" ); + } + } + + /* + Add the new port object to the database. + */ + p_port_guid_tbl = &p_rcv->p_subn->port_guid_tbl; + p_port_check = (osm_port_t*)cl_qmap_insert( p_port_guid_tbl, + p_ni->port_guid, + &p_port->map_item ); + if( p_port_check != p_port ) + { + /* + We should never be here! + Somehow, this port GUID already exists in the table. + */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_new: ERR 0D15: " + "Duplicate Port GUID 0x%" PRIx64 "! Found by the two directed routes:\n", + cl_ntoh64( p_ni->port_guid ) ); + osm_dump_dr_path(p_rcv->p_log, + osm_physp_get_dr_path_ptr( + osm_port_get_default_phys_ptr ( p_port) ), + OSM_LOG_ERROR); + osm_dump_dr_path(p_rcv->p_log, + osm_physp_get_dr_path_ptr( + osm_port_get_default_phys_ptr ( p_port_check) ), + OSM_LOG_ERROR); + if ( p_rtr ) + osm_router_delete( &p_rtr ); + osm_port_delete( &p_port ); + osm_node_delete( &p_node ); + goto Exit; + } + + /* If we are a master, then this means the port is new on the subnet. + Add it to the new_ports_list - need to send trap 64 on these ports. + The condition that we are master is true, since if we are in discovering + state (meaning we woke up from standby or we are just initializing), + then these ports may be new to us, but are not new on the subnet. + If we are master, then the subnet as we know it is the updated one, + and any new ports we encounter should cause trap 64. C14-72.1.1 */ + if ( p_rcv->p_subn->sm_state == IB_SMINFO_STATE_MASTER ) + { + status = cl_list_insert_tail( &p_rcv->p_subn->new_ports_list, p_port ); + if( status != CL_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_new: ERR 0D05: " + "Error %s adding to new_ports_list\n", + CL_STATUS_MSG( status ) ); + if ( p_rtr ) + osm_router_delete( &p_rtr ); + osm_port_delete( &p_port ); + osm_node_delete( &p_node ); + goto Exit; + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_ni_rcv_process_new: " + "Adding port GUID:0x%016" PRIx64 " to new_ports_list\n", + cl_ntoh64( osm_node_get_node_guid( p_port->p_node ) ) ); + } + } + + if ( p_rtr && p_ni->node_type == IB_NODE_TYPE_ROUTER ) + { + p_rtr_guid_tbl = &p_rcv->p_subn->rtr_guid_tbl; + p_rtr_check = (osm_router_t*)cl_qmap_insert( p_rtr_guid_tbl, + p_ni->port_guid, + &p_rtr->map_item ); + if( p_rtr_check != p_rtr ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_new: ERR 0D1B: " + "Unable to add port GUID:0x%016" PRIx64 " to router table\n", + cl_ntoh64( p_ni->port_guid ) ); + } + } + + p_node_guid_tbl = &p_rcv->p_subn->node_guid_tbl; + p_node_check = (osm_node_t*)cl_qmap_insert( p_node_guid_tbl, + p_ni->node_guid, + &p_node->map_item ); + if( p_node_check != p_node ) + { + /* + This node must have been inserted by another thread. + This is unexpected, but is not an error. + We can simply clean-up, since the other thread will + see this processing through to completion. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_ni_rcv_process_new: " + "Discovery race detected at node 0x%" PRIx64 "\n", + cl_ntoh64( p_ni->node_guid ) ); + osm_node_delete( &p_node ); + p_node = p_node_check; + __osm_ni_rcv_set_links( p_rcv, p_node, port_num, p_ni_context ); + goto Exit; + } + else + __osm_ni_rcv_set_links( p_rcv, p_node, port_num, p_ni_context ); + + osm_node_discovery_count_inc( p_node ); + __osm_ni_rcv_get_node_desc( p_rcv, p_node, p_madw ); + + switch( p_ni->node_type ) + { + case IB_NODE_TYPE_CA: + case IB_NODE_TYPE_ROUTER: + __osm_ni_rcv_process_new_ca_or_router( p_rcv, p_node, p_madw ); + break; + case IB_NODE_TYPE_SWITCH: + __osm_ni_rcv_process_new_switch( p_rcv, p_node, p_madw ); + break; + default: + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_new: ERR 0D16: " + "Unknown node type %u with GUID 0x%" PRIx64 "\n", + p_ni->node_type, cl_ntoh64( p_ni->node_guid ) ); + break; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_ni_rcv_process_existing( + IN const osm_ni_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + ib_node_info_t *p_ni; + ib_smp_t *p_smp; + osm_ni_context_t *p_ni_context; + uint8_t port_num; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_ni_rcv_process_existing ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_ni = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp ); + p_ni_context = osm_madw_get_ni_context_ptr( p_madw ); + port_num = ib_node_info_get_local_port_num( p_ni ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_ni_rcv_process_existing: " + "Rediscovered %s node 0x%" PRIx64 + "\n\t\t\t\tTID 0x%" PRIx64 + ", discovered %u times already\n", + ib_get_node_type_str(p_ni->node_type), + cl_ntoh64( p_ni->node_guid ), + cl_ntoh64( p_smp->trans_id ), + osm_node_discovery_count_get( p_node ) ); + } + + /* + If we haven't already encountered this existing node + on this particular sweep, then process further. + */ + osm_node_discovery_count_inc( p_node ); + + switch( p_ni->node_type ) + { + case IB_NODE_TYPE_CA: + case IB_NODE_TYPE_ROUTER: + __osm_ni_rcv_process_existing_ca_or_router( p_rcv, p_node, p_madw ); + break; + + case IB_NODE_TYPE_SWITCH: + __osm_ni_rcv_process_existing_switch( p_rcv, p_node, p_madw ); + break; + + default: + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_ni_rcv_process_existing: ERR 0D09: " + "Unknown node type %u with GUID 0x%" PRIx64 "\n", + p_ni->node_type, cl_ntoh64( p_ni->node_guid ) ); + break; + } + + __osm_ni_rcv_set_links( p_rcv, p_node, port_num, p_ni_context ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_ni_rcv_construct( + IN osm_ni_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_ni_rcv_destroy( + IN osm_ni_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_ni_rcv_destroy ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_ni_rcv_init( + IN osm_ni_rcv_t* const p_rcv, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_ni_rcv_init ); + + osm_ni_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_gen_req = p_req; + p_rcv->p_state_mgr = p_state_mgr; + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_ni_rcv_process( + IN const osm_ni_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + cl_qmap_t *p_guid_tbl; + ib_node_info_t *p_ni; + ib_smp_t *p_smp; + osm_node_t *p_node; + boolean_t process_new_flag = FALSE; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_ni_rcv_process ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_ni = (ib_node_info_t*)ib_smp_get_payload_ptr( p_smp ); + + CL_ASSERT( p_smp->attr_id == IB_MAD_ATTR_NODE_INFO ); + + if (p_ni->node_guid == 0) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_ni_rcv_process: ERR 0D16: " + "Got Zero Node GUID! Found on the directed route:\n"); + osm_dump_smp_dr_path(p_rcv->p_log, p_smp, OSM_LOG_ERROR); + goto Exit; + } + + if (p_ni->port_guid == 0) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_ni_rcv_process: ERR 0D17: " + "Got Zero Port GUID! Found on the directed route:\n"); + osm_dump_smp_dr_path(p_rcv->p_log, p_smp, OSM_LOG_ERROR); + goto Exit; + } + + p_guid_tbl = &p_rcv->p_subn->node_guid_tbl; + + /* + Determine if this node has already been discovered, + and process accordingly. + During processing of this node, hold the shared lock. + */ + + CL_PLOCK_EXCL_ACQUIRE( p_rcv->p_lock ); + p_node = (osm_node_t*)cl_qmap_get( p_guid_tbl, p_ni->node_guid ); + + osm_dump_node_info( p_rcv->p_log, p_ni, OSM_LOG_DEBUG ); + + if( p_node == (osm_node_t*)cl_qmap_end(p_guid_tbl) ) + { + __osm_ni_rcv_process_new( p_rcv, p_madw ); + process_new_flag = TRUE; + } + else + __osm_ni_rcv_process_existing( p_rcv, p_node, p_madw ); + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + /* + * If we processed a new node - need to signal to the state_mgr that + * change detected. BUT - we cannot call the osm_state_mgr_process + * from within the lock of p_rcv->p_lock (can cause a deadlock). + */ + if ( process_new_flag ) + osm_state_mgr_process( p_rcv->p_state_mgr, OSM_SIGNAL_CHANGE_DETECTED ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_node_info_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_node_info_rcv_ctrl.c new file mode 100644 index 00000000..8633dd3e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_node_info_rcv_ctrl.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_ni_rcv_ctrl_t. + * This object represents the NodeInfo request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_ni_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_ni_rcv_process( ((osm_ni_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_ni_rcv_ctrl_construct( + IN osm_ni_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_ni_rcv_ctrl_destroy( + IN osm_ni_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_ni_rcv_ctrl_init( + IN osm_ni_rcv_ctrl_t* const p_ctrl, + IN osm_ni_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_ni_rcv_ctrl_init ); + + osm_ni_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_NODE_INFO, + __osm_ni_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_ni_rcv_ctrl_init: ERR 0E01: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_opensm.c b/branches/Ndi/ulp/opensm/user/opensm/osm_opensm.c new file mode 100644 index 00000000..e80d6c34 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_opensm.c @@ -0,0 +1,320 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_opensm_t. + * This object represents the opensm super object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +struct routing_engine_module { + const char *name; + int (*setup)(osm_opensm_t *p_osm); +}; + +extern int osm_ucast_updn_setup(osm_opensm_t *p_osm); +extern int osm_ucast_file_setup(osm_opensm_t *p_osm); +extern int osm_ucast_ftree_setup(osm_opensm_t *p_osm); + +static int osm_ucast_null_setup(osm_opensm_t *p_osm); + +const static struct routing_engine_module routing_modules[] = { + { "null", osm_ucast_null_setup }, + { "updn", osm_ucast_updn_setup }, + { "file", osm_ucast_file_setup }, + { "ftree", osm_ucast_ftree_setup }, + { NULL, NULL } +}; + +static int setup_routing_engine(osm_opensm_t *p_osm, const char *name) +{ + const struct routing_engine_module *r; + + for (r = routing_modules; r->name && *r->name; r++) { + if(!strcmp(r->name, name)) { + p_osm->routing_engine.name = r->name; + if (r->setup(p_osm)) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "setup_routing_engine: setup of routing" + " engine \'%s\' failed\n", name); + return -2; + } + osm_log (&p_osm->log, OSM_LOG_DEBUG, + "setup_routing_engine: " + "\'%s\' routing engine set up\n", + p_osm->routing_engine.name); + return 0; + } + } + return -1; +} + +static int osm_ucast_null_setup(osm_opensm_t *p_osm) +{ + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "osm_ucast_null_setup: nothing yet - " + "will use default routing engine\n"); + return 0; +} + +/********************************************************************** + **********************************************************************/ +void +osm_opensm_construct( + IN osm_opensm_t * const p_osm ) +{ + memset( p_osm, 0, sizeof( *p_osm ) ); + osm_subn_construct( &p_osm->subn ); + osm_sm_construct( &p_osm->sm ); + osm_sa_construct( &p_osm->sa ); + osm_db_construct( &p_osm->db ); + osm_mad_pool_construct( &p_osm->mad_pool ); + osm_vl15_construct( &p_osm->vl15 ); + osm_log_construct( &p_osm->log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_opensm_destroy( + IN osm_opensm_t * const p_osm ) +{ + + /* in case of shutdown through exit proc - no ^C */ + osm_exit_flag = TRUE; + + /* + * First of all - Clear the is_sm bit. + */ + if( p_osm->sm.mad_ctrl.h_bind ) + osm_vendor_set_sm( p_osm->sm.mad_ctrl.h_bind, FALSE ); + + /* shut down the SA + * - unbind from QP1 messages + */ + osm_sa_shutdown( &p_osm->sa ); + + /* shut down the SM + * - make sure the SM sweeper thread exited + * - unbind from QP0 messages + */ + osm_sm_shutdown( &p_osm->sm ); + + /* cleanup all messages on VL15 fifo that were not sent yet */ + osm_vl15_shutdown( &p_osm->vl15, &p_osm->mad_pool ); + + /* shut down the dispatcher - so no new messages cross */ + cl_disp_shutdown( &p_osm->disp ); + + /* dump SA DB */ + osm_sa_db_file_dump(p_osm); + + /* do the destruction in reverse order as init */ + if (p_osm->routing_engine.delete) + p_osm->routing_engine.delete(p_osm->routing_engine.context); + osm_sa_destroy( &p_osm->sa ); + osm_sm_destroy( &p_osm->sm ); + osm_db_destroy( &p_osm->db ); + osm_vl15_destroy( &p_osm->vl15, &p_osm->mad_pool ); + osm_mad_pool_destroy( &p_osm->mad_pool ); + osm_vendor_delete( &p_osm->p_vendor ); + osm_subn_destroy( &p_osm->subn ); + cl_disp_destroy( &p_osm->disp ); + + cl_plock_destroy( &p_osm->lock ); + + osm_log_destroy( &p_osm->log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_opensm_init( + IN osm_opensm_t * const p_osm, + IN const osm_subn_opt_t * const p_opt ) +{ + ib_api_status_t status; + + /* Can't use log macros here, since we're initializing the log. */ + osm_opensm_construct( p_osm ); + + status = osm_log_init_v2( &p_osm->log, p_opt->force_log_flush, + p_opt->log_flags, p_opt->log_file, + p_opt->log_max_size, p_opt->accum_log_file ); + if( status != IB_SUCCESS ) + return ( status ); + + /* If there is a log level defined - add the OSM_VERSION to it. */ + osm_log( &p_osm->log, + osm_log_get_level( &p_osm->log ) & ( OSM_LOG_SYS ^ 0xFF ), "%s\n", + OSM_VERSION ); + /* Write the OSM_VERSION to the SYS_LOG */ + osm_log( &p_osm->log, OSM_LOG_SYS, "%s\n", OSM_VERSION ); /* Format Waived */ + + osm_log( &p_osm->log, OSM_LOG_FUNCS, "osm_opensm_init: [\n" ); /* Format Waived */ + + status = cl_plock_init( &p_osm->lock ); + if( status != IB_SUCCESS ) + goto Exit; + + if( p_opt->single_thread ) + { + osm_log( &p_osm->log, OSM_LOG_INFO, + "osm_opensm_init: Forcing single threaded dispatcher.\n" ); + status = cl_disp_init( &p_osm->disp, 1, "opensm" ); + } + else + { + /* + * Normal behavior is to initialize the dispatcher with + * one thread per CPU, as specified by a thread count of '0'. + */ + status = cl_disp_init( &p_osm->disp, 0, "opensm" ); + } + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_subn_init( &p_osm->subn, p_osm, p_opt ); + if( status != IB_SUCCESS ) + goto Exit; + + p_osm->p_vendor = + osm_vendor_new( &p_osm->log, p_opt->transaction_timeout ); + if( p_osm->p_vendor == NULL ) + { + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + status = osm_mad_pool_init( &p_osm->mad_pool, &p_osm->log ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_vl15_init( &p_osm->vl15, + p_osm->p_vendor, + &p_osm->log, &p_osm->stats, p_opt->max_wire_smps, + &p_osm->subn, &p_osm->disp, &p_osm->lock ); + if( status != IB_SUCCESS ) + goto Exit; + + /* the DB is in use by the SM and SA so init before */ + status = osm_db_init( &p_osm->db, &p_osm->log ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sm_init( &p_osm->sm, + &p_osm->subn, + &p_osm->db, + p_osm->p_vendor, + &p_osm->mad_pool, + &p_osm->vl15, + &p_osm->log, + &p_osm->stats, &p_osm->disp, &p_osm->lock ); + + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sa_init( &p_osm->sm, + &p_osm->sa, + &p_osm->subn, + p_osm->p_vendor, + &p_osm->mad_pool, + &p_osm->log, + &p_osm->stats, &p_osm->disp, &p_osm->lock ); + + if( status != IB_SUCCESS ) + goto Exit; + + if( p_opt->routing_engine_name && + setup_routing_engine(p_osm, p_opt->routing_engine_name)) { + osm_log( &p_osm->log, OSM_LOG_VERBOSE, + "osm_opensm_init: cannot find or setup routing engine" + " \'%s\'. Default will be used instead\n", + p_opt->routing_engine_name); + goto Exit; + } + + Exit: + osm_log( &p_osm->log, OSM_LOG_FUNCS, "osm_opensm_init: ]\n" ); /* Format Waived */ + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_opensm_bind( + IN osm_opensm_t * const p_osm, + IN const ib_net64_t guid ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( &p_osm->log, osm_opensm_bind ); + + status = osm_sm_bind( &p_osm->sm, guid ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sa_bind( &p_osm->sa, guid ); + if( status != IB_SUCCESS ) + goto Exit; + Exit: + OSM_LOG_EXIT( &p_osm->log ); + return ( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_pkey.c b/branches/Ndi/ulp/opensm/user/opensm/osm_pkey.c new file mode 100644 index 00000000..49edbdf6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_pkey.c @@ -0,0 +1,547 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of opensm pkey manipulation functions. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.1 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void osm_pkey_tbl_construct( + IN osm_pkey_tbl_t *p_pkey_tbl) +{ + cl_ptr_vector_construct( &p_pkey_tbl->blocks ); + cl_ptr_vector_construct( &p_pkey_tbl->new_blocks ); + cl_map_construct( &p_pkey_tbl->keys ); +} + +/********************************************************************** + **********************************************************************/ +void osm_pkey_tbl_destroy( + IN osm_pkey_tbl_t *p_pkey_tbl) +{ + ib_pkey_table_t *p_block; + uint16_t num_blocks, i; + + num_blocks = (uint16_t)(cl_ptr_vector_get_size( &p_pkey_tbl->blocks )); + for (i = 0; i < num_blocks; i++) + if ((p_block = cl_ptr_vector_get( &p_pkey_tbl->blocks, i ))) + free(p_block); + cl_ptr_vector_destroy( &p_pkey_tbl->blocks ); + + num_blocks = (uint16_t)(cl_ptr_vector_get_size( &p_pkey_tbl->new_blocks )); + for (i = 0; i < num_blocks; i++) + if ((p_block = cl_ptr_vector_get( &p_pkey_tbl->new_blocks, i ))) + free(p_block); + cl_ptr_vector_destroy( &p_pkey_tbl->new_blocks ); + + cl_map_remove_all( &p_pkey_tbl->keys ); + cl_map_destroy( &p_pkey_tbl->keys ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pkey_tbl_init( + IN osm_pkey_tbl_t *p_pkey_tbl) +{ + cl_ptr_vector_init(&p_pkey_tbl->blocks, 0, 1); + cl_ptr_vector_init(&p_pkey_tbl->new_blocks, 0, 1); + cl_map_init(&p_pkey_tbl->keys, 1); + cl_qlist_init(&p_pkey_tbl->pending); + p_pkey_tbl->used_blocks = 0; + p_pkey_tbl->max_blocks = 0; + return(IB_SUCCESS); +} + +/********************************************************************** + **********************************************************************/ +void osm_pkey_tbl_init_new_blocks( + IN const osm_pkey_tbl_t *p_pkey_tbl) +{ + ib_pkey_table_t *p_block; + size_t b, num_blocks = cl_ptr_vector_get_size(&p_pkey_tbl->new_blocks); + + for (b = 0; b < num_blocks; b++) + if ((p_block = cl_ptr_vector_get(&p_pkey_tbl->new_blocks, b))) + memset(p_block, 0, sizeof(*p_block)); +} + +/********************************************************************** + **********************************************************************/ +void osm_pkey_tbl_cleanup_pending( + IN osm_pkey_tbl_t *p_pkey_tbl) +{ + cl_list_item_t *p_item; + + p_item = cl_qlist_remove_head(&p_pkey_tbl->pending); + while (p_item != cl_qlist_end(&p_pkey_tbl->pending)) + { + free((osm_pending_pkey_t *)p_item); + } +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pkey_tbl_set( + IN osm_pkey_tbl_t *p_pkey_tbl, + IN uint16_t block, + IN ib_pkey_table_t *p_tbl) +{ + uint16_t b, i; + ib_pkey_table_t *p_pkey_block; + uint16_t *p_prev_pkey; + ib_net16_t pkey; + + /* make sure the block is allocated */ + if (cl_ptr_vector_get_size( &p_pkey_tbl->blocks ) > block) + p_pkey_block = + (ib_pkey_table_t *)cl_ptr_vector_get( &p_pkey_tbl->blocks, block ); + else + p_pkey_block = NULL; + + if ( !p_pkey_block ) + { + p_pkey_block = (ib_pkey_table_t *)malloc(sizeof(ib_pkey_table_t)); + if (p_pkey_block) + memset(p_pkey_block, 0, sizeof(ib_pkey_table_t)); + cl_ptr_vector_set( &p_pkey_tbl->blocks, block, p_pkey_block ); + } + + /* sets the block values */ + memcpy( p_pkey_block, p_tbl, sizeof(ib_pkey_table_t) ); + + /* + NOTE: as the spec does not require uniqueness of PKeys in + tables there is no other way but to refresh the entire keys map. + + Moreover, if the same key exists but with full membership it should have + precedence on the key with limited membership ! + */ + cl_map_remove_all( &p_pkey_tbl->keys ); + + for (b = 0; b < cl_ptr_vector_get_size( &p_pkey_tbl->blocks ); b++) + { + + p_pkey_block = cl_ptr_vector_get( &p_pkey_tbl->blocks, b ); + if (! p_pkey_block) + continue; + + for (i = 0; i < IB_NUM_PKEY_ELEMENTS_IN_BLOCK; i++) + { + pkey = p_pkey_block->pkey_entry[i]; + if (ib_pkey_is_invalid(pkey)) + continue; + + /* + ignore the PKey Full Member bit in the key but store + the pointer to the table element as the map value + */ + p_prev_pkey = + cl_map_get( &p_pkey_tbl->keys, ib_pkey_get_base(pkey)); + + /* we only insert if no previous or it is not full member */ + if ((p_prev_pkey == NULL) || + (cl_ntoh16(*p_prev_pkey) < cl_ntoh16(pkey))) + cl_map_insert( &p_pkey_tbl->keys, + ib_pkey_get_base(pkey), + &(p_pkey_block->pkey_entry[i]) + ); + } + } + return(IB_SUCCESS); +} + +/********************************************************************** + **********************************************************************/ +/* + Store the given pkey in the "new" blocks array. + Also, make sure the regular block exists. +*/ +ib_api_status_t +osm_pkey_tbl_set_new_entry( + IN osm_pkey_tbl_t *p_pkey_tbl, + IN uint16_t block_idx, + IN uint8_t pkey_idx, + IN uint16_t pkey) +{ + ib_pkey_table_t *p_block; + + if (!(p_block = osm_pkey_tbl_new_block_get(p_pkey_tbl, block_idx))) { + p_block = (ib_pkey_table_t *)malloc(sizeof(ib_pkey_table_t)); + if (!p_block) + return(IB_ERROR); + memset(p_block, 0, sizeof(ib_pkey_table_t)); + cl_ptr_vector_set(&p_pkey_tbl->new_blocks, block_idx, p_block); + } + + p_block->pkey_entry[pkey_idx] = pkey; + if (p_pkey_tbl->used_blocks <= block_idx) + p_pkey_tbl->used_blocks = block_idx + 1; + + return(IB_SUCCESS); +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_pkey_find_next_free_entry( + IN osm_pkey_tbl_t *p_pkey_tbl, + OUT uint16_t *p_block_idx, + OUT uint8_t *p_pkey_idx) +{ + ib_pkey_table_t *p_new_block; + + CL_ASSERT(p_block_idx); + CL_ASSERT(p_pkey_idx); + + while (*p_block_idx < p_pkey_tbl->max_blocks) + { + if (*p_pkey_idx > IB_NUM_PKEY_ELEMENTS_IN_BLOCK - 1) + { + *p_pkey_idx = 0; + (*p_block_idx)++; + if (*p_block_idx >= p_pkey_tbl->max_blocks) + return FALSE; + } + + p_new_block = osm_pkey_tbl_new_block_get(p_pkey_tbl, *p_block_idx); + + if (!p_new_block || + ib_pkey_is_invalid(p_new_block->pkey_entry[*p_pkey_idx])) + return TRUE; + else + (*p_pkey_idx)++; + } + return FALSE; +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pkey_tbl_get_block_and_idx( + IN osm_pkey_tbl_t *p_pkey_tbl, + IN uint16_t *p_pkey, + OUT uint16_t *p_block_idx, + OUT uint8_t *p_pkey_idx) +{ + uint16_t num_of_blocks; + uint16_t block_index; + ib_pkey_table_t *block; + + CL_ASSERT( p_block_idx != NULL ); + CL_ASSERT( p_pkey_idx != NULL ); + + num_of_blocks = (uint16_t)cl_ptr_vector_get_size( &p_pkey_tbl->blocks ); + for (block_index = 0; block_index < num_of_blocks; block_index++) + { + block = osm_pkey_tbl_block_get(p_pkey_tbl, block_index); + if ((block->pkey_entry <= p_pkey) && + (p_pkey < block->pkey_entry + IB_NUM_PKEY_ELEMENTS_IN_BLOCK)) + { + *p_block_idx = block_index; + *p_pkey_idx = (uint8_t)(p_pkey - block->pkey_entry); + return(IB_SUCCESS); + } + } + return(IB_NOT_FOUND); +} + +/********************************************************************** + **********************************************************************/ +static boolean_t +__osm_match_pkey ( + IN const ib_net16_t *pkey1, + IN const ib_net16_t *pkey2 ) +{ + + /* if both pkeys are not full member - this is not a match */ + if (!(ib_pkey_is_full_member(*pkey1) || ib_pkey_is_full_member(*pkey2))) + return(FALSE); + + /* compare if the bases are the same. if they are - then + this is a match */ + if (ib_pkey_get_base(*pkey1) != ib_pkey_get_base(*pkey2)) + return(FALSE); + + return(TRUE); +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_physp_share_this_pkey( + IN const osm_physp_t* const p_physp1, + IN const osm_physp_t* const p_physp2, + IN const ib_net16_t pkey ) +{ + ib_net16_t *pkey1, *pkey2; + + pkey1 = cl_map_get( &(osm_physp_get_pkey_tbl(p_physp1))->keys, + ib_pkey_get_base(pkey)); + pkey2 = cl_map_get( &(osm_physp_get_pkey_tbl(p_physp2))->keys, + ib_pkey_get_base(pkey)); + return (pkey1 && pkey2 && __osm_match_pkey(pkey1, pkey2)); +} + +/********************************************************************** + **********************************************************************/ +ib_net16_t +osm_physp_find_common_pkey( + IN const osm_physp_t* const p_physp1, + IN const osm_physp_t* const p_physp2 ) +{ + ib_net16_t *pkey1, *pkey2; + uint64_t pkey1_base, pkey2_base; + const osm_pkey_tbl_t *pkey_tbl1, *pkey_tbl2; + cl_map_iterator_t map_iter1, map_iter2; + + pkey_tbl1 = osm_physp_get_pkey_tbl(p_physp1); + pkey_tbl2 = osm_physp_get_pkey_tbl(p_physp2); + + map_iter1 = cl_map_head(&pkey_tbl1->keys); + map_iter2 = cl_map_head(&pkey_tbl2->keys); + + /* we rely on the fact the map are sorted by pkey */ + while ( (map_iter1 != cl_map_end( &pkey_tbl1->keys )) && + (map_iter2 != cl_map_end( &pkey_tbl2->keys ))) + { + pkey1 = (ib_net16_t *)cl_map_obj( map_iter1 ); + pkey2 = (ib_net16_t *)cl_map_obj( map_iter2 ); + + if (__osm_match_pkey(pkey1, pkey2)) + return *pkey1; + + /* advance the lower value if they are not equal */ + pkey1_base = cl_map_key( map_iter1 ); + pkey2_base = cl_map_key( map_iter2 ); + if (pkey2_base == pkey1_base) + { + map_iter1 = cl_map_next( map_iter1 ); + map_iter2 = cl_map_next( map_iter2 ); + } + else if (pkey2_base < pkey1_base) + map_iter2 = cl_map_next( map_iter2 ); + else + map_iter1 = cl_map_next( map_iter1 ); + } + + return 0; +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_physp_share_pkey( + IN osm_log_t* p_log, + IN const osm_physp_t* const p_physp_1, + IN const osm_physp_t* const p_physp_2 ) +{ + const osm_pkey_tbl_t *pkey_tbl1, *pkey_tbl2; + + if (p_physp_1 == p_physp_2) + return TRUE; + + pkey_tbl1 = osm_physp_get_pkey_tbl(p_physp_1); + pkey_tbl2 = osm_physp_get_pkey_tbl(p_physp_2); + + /* + The spec: 10.9.2 does not require each phys port to have PKey Table. + So actually if it does not, we need to use the default port instead. + + HACK: meanwhile we will ignore the check + */ + if (cl_is_map_empty(&pkey_tbl1->keys) || cl_is_map_empty(&pkey_tbl2->keys)) + return TRUE; + + return !ib_pkey_is_invalid(osm_physp_find_common_pkey(p_physp_1, p_physp_2)); +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_port_share_pkey( + IN osm_log_t* p_log, + IN const osm_port_t* const p_port_1, + IN const osm_port_t* const p_port_2 ) { + + osm_physp_t *p_physp1, *p_physp2; + boolean_t ret; + + OSM_LOG_ENTER( p_log, osm_port_share_pkey ); + + if (!p_port_1 || !p_port_2) + { + ret = FALSE; + goto Exit; + } + + p_physp1 = osm_port_get_default_phys_ptr(p_port_1); + p_physp2 = osm_port_get_default_phys_ptr(p_port_2); + + if (!p_physp1 || !p_physp2) + { + ret = FALSE; + goto Exit; + } + + ret = osm_physp_share_pkey(p_log, p_physp1, p_physp2); + +Exit: + OSM_LOG_EXIT(p_log); + return ret; +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_lid_share_pkey( + IN osm_log_t* p_log, + IN const osm_subn_t * const p_subn, + IN const ib_net16_t lid1, + IN const uint8_t port_num1, + IN const ib_net16_t lid2, + IN const uint8_t port_num2 ) { + + osm_physp_t *p_physp1, *p_physp2; + osm_port_t *p_port1, *p_port2; + osm_node_t *p_node1, *p_node2; + const cl_ptr_vector_t* const p_port_lid_tbl = &(p_subn->port_lid_tbl); + + OSM_LOG_ENTER( p_log, osm_lid_share_pkey ); + + p_port1 = cl_ptr_vector_get(p_port_lid_tbl, lid1); + p_port2 = cl_ptr_vector_get(p_port_lid_tbl, lid2); + + p_node1 = p_port1->p_node; + p_node2 = p_port2->p_node; + + if (osm_node_get_type( p_node1 ) == IB_NODE_TYPE_SWITCH) + { + p_physp1 = osm_node_get_physp_ptr( p_node1, port_num1 ); + } + else + { + p_physp1 = osm_port_get_default_phys_ptr(p_port1); + } + + if (osm_node_get_type( p_node2 ) == IB_NODE_TYPE_SWITCH) + { + p_physp2 = osm_node_get_physp_ptr( p_node2, port_num2 ); + } + else + { + p_physp2 = osm_port_get_default_phys_ptr(p_port2); + } + + return(osm_physp_share_pkey(p_log, p_physp1, p_physp2)); +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_physp_has_pkey( + IN osm_log_t* p_log, + IN const ib_net16_t pkey, + IN const osm_physp_t* const p_physp ) { + + ib_net16_t *p_pkey, pkey_base; + const osm_pkey_tbl_t *pkey_tbl; + boolean_t res = FALSE; + + OSM_LOG_ENTER( p_log, osm_physp_has_pkey ); + + osm_log( p_log, OSM_LOG_DEBUG, + "osm_physp_has_pkey: " + "Search for PKey: 0x%4x\n", + cl_ntoh16(pkey) ); + + /* if the pkey given is an invalid pkey - return TRUE. */ + if(ib_pkey_is_invalid(pkey)) + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_physp_has_pkey: " + "Given invalid PKey - we treat it loosely and allow it\n"); + res = TRUE; + goto Exit; + } + + pkey_base = ib_pkey_get_base(pkey); + + pkey_tbl = osm_physp_get_pkey_tbl(p_physp); + + p_pkey = cl_map_get( &pkey_tbl->keys, pkey_base); + if (p_pkey) + { + res = TRUE; + osm_log( p_log, OSM_LOG_DEBUG, + "osm_physp_has_pkey: " + "PKey 0x%04x was found\n", cl_ntoh16(pkey)); + } + else + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_physp_has_pkey: " + "PKey 0x%04x was not found\n", cl_ntoh16(pkey)); + } + + Exit: + OSM_LOG_EXIT( p_log ); + return res; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_pkey_mgr.c b/branches/Ndi/ulp/opensm/user/opensm/osm_pkey_mgr.c new file mode 100644 index 00000000..a03f7a4b --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_pkey_mgr.c @@ -0,0 +1,596 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of the P_Key Manager (Partititon Manager). + * This is part of the OpenSM. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +/* + The max number of pkey blocks for a physical port is located in + a different place for switch external ports (SwitchInfo) and the + rest of the ports (NodeInfo). +*/ +static uint16_t +pkey_mgr_get_physp_max_blocks( + IN const osm_subn_t *p_subn, + IN const osm_physp_t *p_physp ) +{ + osm_node_t *p_node = osm_physp_get_node_ptr( p_physp ); + uint16_t num_pkeys = 0; + + if ( !p_node->sw || + ( osm_physp_get_port_num( p_physp ) == 0 ) ) + num_pkeys = cl_ntoh16( p_node->node_info.partition_cap ); + else + num_pkeys = cl_ntoh16( p_node->sw->switch_info.enforce_cap ); + return((num_pkeys + 31) / 32); +} + +/********************************************************************** + **********************************************************************/ +/* + * Insert new pending pkey entry to the specific port pkey table + * pending pkeys. New entries are inserted at the back. + */ +static void +pkey_mgr_process_physical_port( + IN osm_log_t *p_log, + IN const osm_req_t *p_req, + IN const ib_net16_t pkey, + IN osm_physp_t *p_physp ) +{ + osm_node_t *p_node = osm_physp_get_node_ptr( p_physp ); + osm_pkey_tbl_t *p_pkey_tbl; + ib_net16_t *p_orig_pkey; + char *stat = NULL; + osm_pending_pkey_t *p_pending; + + p_pkey_tbl = osm_physp_get_mod_pkey_tbl( p_physp ); + p_pending = (osm_pending_pkey_t *)malloc( sizeof( osm_pending_pkey_t ) ); + if (!p_pending) + { + osm_log( p_log, OSM_LOG_ERROR, + "pkey_mgr_process_physical_port: ERR 0502: " + "Failed to allocate new pending pkey entry for node " + "0x%016" PRIx64 " port %u\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( p_physp ) ); + return; + } + p_pending->pkey = pkey; + p_orig_pkey = cl_map_get( &p_pkey_tbl->keys, ib_pkey_get_base( pkey ) ); + if (!p_orig_pkey) + { + p_pending->is_new = TRUE; + cl_qlist_insert_tail( &p_pkey_tbl->pending, (cl_list_item_t*)p_pending ); + stat = "inserted"; + } + else + { + CL_ASSERT( ib_pkey_get_base( *p_orig_pkey ) == ib_pkey_get_base( pkey ) ); + p_pending->is_new = FALSE; + if (osm_pkey_tbl_get_block_and_idx( + p_pkey_tbl, p_orig_pkey, + &p_pending->block, &p_pending->index ) != IB_SUCCESS) + { + osm_log( p_log, OSM_LOG_ERROR, + "pkey_mgr_process_physical_port: ERR 0503: " + "Failed to obtain P_Key 0x%04x block and index for node " + "0x%016" PRIx64 " port %u\n", + ib_pkey_get_base( pkey ), + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( p_physp ) ); + return; + } + cl_qlist_insert_head( &p_pkey_tbl->pending, (cl_list_item_t*)p_pending ); + stat = "updated"; + } + + osm_log( p_log, OSM_LOG_DEBUG, + "pkey_mgr_process_physical_port: " + "pkey 0x%04x was %s for node 0x%016" PRIx64 + " port %u\n", + cl_ntoh16( pkey ), stat, + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( p_physp ) ); +} + +/********************************************************************** + **********************************************************************/ +static void +pkey_mgr_process_partition_table( + osm_log_t *p_log, + const osm_req_t *p_req, + const osm_prtn_t *p_prtn, + const boolean_t full ) +{ + const cl_map_t *p_tbl = + full ? &p_prtn->full_guid_tbl : &p_prtn->part_guid_tbl; + cl_map_iterator_t i, i_next; + ib_net16_t pkey = p_prtn->pkey; + osm_physp_t *p_physp; + + if (full) + pkey |= cl_hton16( 0x8000 ); + + i_next = cl_map_head( p_tbl ); + while ( i_next != cl_map_end( p_tbl ) ) + { + i = i_next; + i_next = cl_map_next( i ); + p_physp = cl_map_obj( i ); + if ( p_physp && osm_physp_is_valid( p_physp ) ) + pkey_mgr_process_physical_port( p_log, p_req, pkey, p_physp ); + } +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +pkey_mgr_update_pkey_entry( + IN const osm_req_t *p_req, + IN const osm_physp_t *p_physp, + IN const ib_pkey_table_t *block, + IN const uint16_t block_index ) +{ + osm_madw_context_t context; + osm_node_t *p_node = osm_physp_get_node_ptr( p_physp ); + uint32_t attr_mod; + + context.pkey_context.node_guid = osm_node_get_node_guid( p_node ); + context.pkey_context.port_guid = osm_physp_get_port_guid( p_physp ); + context.pkey_context.set_method = TRUE; + attr_mod = block_index; + if ( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ) + attr_mod |= osm_physp_get_port_num( p_physp ) << 16; + return osm_req_set( p_req, osm_physp_get_dr_path_ptr( p_physp ), + ( uint8_t * ) block, sizeof( *block ), + IB_MAD_ATTR_P_KEY_TABLE, + cl_hton32( attr_mod ), CL_DISP_MSGID_NONE, &context ); +} + +/********************************************************************** + **********************************************************************/ +static boolean_t +pkey_mgr_enforce_partition( + IN osm_log_t *p_log, + IN const osm_req_t *p_req, + IN const osm_physp_t *p_physp, + IN const boolean_t enforce) +{ + osm_madw_context_t context; + uint8_t payload[IB_SMP_DATA_SIZE]; + ib_port_info_t *p_pi; + ib_api_status_t status; + + if (!(p_pi = osm_physp_get_port_info_ptr( p_physp ))) + { + osm_log( p_log, OSM_LOG_ERROR, + "pkey_mgr_enforce_partition: ERR 0507: " + "No port info for " + "node 0x%016" PRIx64 " port %u\n", + cl_ntoh64( + osm_node_get_node_guid( + osm_physp_get_node_ptr( p_physp ))), + osm_physp_get_port_num( p_physp ) ); + return FALSE; + } + + if ((p_pi->vl_enforce & 0xc) == (0xc)*(enforce == TRUE)) + { + osm_log( p_log, OSM_LOG_DEBUG, + "pkey_mgr_enforce_partition: " + "No need to update PortInfo for " + "node 0x%016" PRIx64 " port %u\n", + cl_ntoh64( + osm_node_get_node_guid( + osm_physp_get_node_ptr( p_physp ))), + osm_physp_get_port_num( p_physp ) ); + return FALSE; + } + + memset( payload, 0, IB_SMP_DATA_SIZE ); + memcpy( payload, p_pi, sizeof(ib_port_info_t) ); + + p_pi = (ib_port_info_t*)payload; + if (enforce == TRUE) + p_pi->vl_enforce |= 0xc; + else + p_pi->vl_enforce &= ~0xc; + p_pi->state_info2 = 0; + ib_port_info_set_port_state( p_pi, IB_LINK_NO_CHANGE ); + + context.pi_context.node_guid = + osm_node_get_node_guid( osm_physp_get_node_ptr( p_physp ) ); + context.pi_context.port_guid = osm_physp_get_port_guid( p_physp ); + context.pi_context.set_method = TRUE; + context.pi_context.update_master_sm_base_lid = FALSE; + context.pi_context.ignore_errors = FALSE; + context.pi_context.light_sweep = FALSE; + context.pi_context.active_transition = FALSE; + + status = osm_req_set( p_req, osm_physp_get_dr_path_ptr( p_physp ), + payload, sizeof(payload), + IB_MAD_ATTR_PORT_INFO, + cl_hton32( osm_physp_get_port_num( p_physp ) ), + CL_DISP_MSGID_NONE, &context ); + if (status != IB_SUCCESS) + { + osm_log( p_log, OSM_LOG_ERROR, + "pkey_mgr_enforce_partition: ERR 0511: " + "Failed to set PortInfo for " + "node 0x%016" PRIx64 " port %u\n", + cl_ntoh64( + osm_node_get_node_guid( + osm_physp_get_node_ptr( p_physp ))), + osm_physp_get_port_num( p_physp ) ); + return FALSE; + } + else + { + osm_log( p_log, OSM_LOG_DEBUG, + "pkey_mgr_enforce_partition: " + "Set PortInfo for " + "node 0x%016" PRIx64 " port %u\n", + cl_ntoh64( + osm_node_get_node_guid( + osm_physp_get_node_ptr( p_physp ))), + osm_physp_get_port_num( p_physp ) ); + return TRUE; + } +} + +/********************************************************************** + **********************************************************************/ +static boolean_t pkey_mgr_update_port( + osm_log_t *p_log, + osm_req_t *p_req, + const osm_port_t * const p_port ) +{ + osm_physp_t *p_physp; + osm_node_t *p_node; + ib_pkey_table_t *block, *new_block; + osm_pkey_tbl_t *p_pkey_tbl; + uint16_t block_index; + uint8_t pkey_index; + uint16_t last_free_block_index = 0; + uint8_t last_free_pkey_index = 0; + uint16_t num_of_blocks; + uint16_t max_num_of_blocks; + ib_api_status_t status; + boolean_t ret_val = FALSE; + osm_pending_pkey_t *p_pending; + boolean_t found; + ib_pkey_table_t empty_block; + + memset(&empty_block, 0, sizeof(ib_pkey_table_t)); + + p_physp = osm_port_get_default_phys_ptr( p_port ); + if ( !osm_physp_is_valid( p_physp ) ) + return FALSE; + + p_node = osm_physp_get_node_ptr( p_physp ); + p_pkey_tbl = osm_physp_get_mod_pkey_tbl( p_physp ); + num_of_blocks = osm_pkey_tbl_get_num_blocks( p_pkey_tbl ); + max_num_of_blocks = pkey_mgr_get_physp_max_blocks( p_req->p_subn, p_physp ); + if ( p_pkey_tbl->max_blocks > max_num_of_blocks ) + { + osm_log( p_log, OSM_LOG_INFO, + "pkey_mgr_update_port: " + "Max number of blocks reduced from %u to %u " + "for node 0x%016" PRIx64 " port %u\n", + p_pkey_tbl->max_blocks, max_num_of_blocks, + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( p_physp ) ); + } + p_pkey_tbl->max_blocks = max_num_of_blocks; + + osm_pkey_tbl_init_new_blocks( p_pkey_tbl ); + p_pkey_tbl->used_blocks = 0; + + /* + process every pending pkey in order - + first must be "updated" last are "new" + */ + p_pending = (osm_pending_pkey_t *)cl_qlist_remove_head( &p_pkey_tbl->pending ); + while ( p_pending != (osm_pending_pkey_t *)cl_qlist_end( &p_pkey_tbl->pending )) + { + if (p_pending->is_new == FALSE) + { + block_index = p_pending->block; + pkey_index = p_pending->index; + found = TRUE; + } + else + { + found = osm_pkey_find_next_free_entry( p_pkey_tbl, + &last_free_block_index, + &last_free_pkey_index ); + if (!found) + { + osm_log( p_log, OSM_LOG_ERROR, + "pkey_mgr_update_port: ERR 0504: " + "Failed to find empty space for new pkey 0x%04x " + "for node 0x%016" PRIx64 " port %u\n", + cl_ntoh16( p_pending->pkey ), + cl_ntoh64( osm_node_get_node_guid( p_node )), + osm_physp_get_port_num( p_physp ) ); + } + else + { + block_index = last_free_block_index; + pkey_index = last_free_pkey_index++; + } + } + + if (found) + { + if (IB_SUCCESS != osm_pkey_tbl_set_new_entry( p_pkey_tbl, block_index, + pkey_index, p_pending->pkey )) + { + osm_log( p_log, OSM_LOG_ERROR, + "pkey_mgr_update_port: ERR 0505: " + "Failed to set PKey 0x%04x in block %u idx %u " + "for node 0x%016" PRIx64 " port %u\n", + p_pending->pkey, block_index, pkey_index, + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + free(p_pending); + p_pending = (osm_pending_pkey_t *)cl_qlist_remove_head( &p_pkey_tbl->pending ); + } + + /* now look for changes and store */ + for (block_index = 0; block_index < num_of_blocks; block_index++) + { + block = osm_pkey_tbl_block_get( p_pkey_tbl, block_index ); + new_block = osm_pkey_tbl_new_block_get( p_pkey_tbl, block_index ); + if (!new_block) + new_block = &empty_block; + if (block && !memcmp( new_block, block, sizeof( *block ) )) + continue; + + status = pkey_mgr_update_pkey_entry( p_req, p_physp, new_block, block_index ); + if (status == IB_SUCCESS) + { + osm_log( p_log, OSM_LOG_DEBUG, + "pkey_mgr_update_port: " + "Updated " + "pkey table block %d for node 0x%016" PRIx64 " port %u\n", + block_index, + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( p_physp ) ); + ret_val = TRUE; + } + else + { + osm_log( p_log, OSM_LOG_ERROR, + "pkey_mgr_update_port: ERR 0506: " + "pkey_mgr_update_pkey_entry() failed to update " + "pkey table block %d for node 0x%016" PRIx64 " port %u\n", + block_index, + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + return ret_val; +} + +/********************************************************************** + **********************************************************************/ +static boolean_t +pkey_mgr_update_peer_port( + osm_log_t *p_log, + const osm_req_t *p_req, + const osm_subn_t *p_subn, + const osm_port_t * const p_port, + boolean_t enforce ) +{ + osm_physp_t *p_physp, *peer; + osm_node_t *p_node; + ib_pkey_table_t *block, *peer_block; + const osm_pkey_tbl_t *p_pkey_tbl; + osm_pkey_tbl_t *p_peer_pkey_tbl; + uint16_t block_index; + uint16_t num_of_blocks; + uint16_t peer_max_blocks; + ib_api_status_t status = IB_SUCCESS; + boolean_t ret_val = FALSE; + boolean_t port_info_set = FALSE; + ib_pkey_table_t empty_block; + + memset(&empty_block, 0, sizeof(ib_pkey_table_t)); + + p_physp = osm_port_get_default_phys_ptr( p_port ); + if (!osm_physp_is_valid( p_physp )) + return FALSE; + peer = osm_physp_get_remote( p_physp ); + if ( !peer || !osm_physp_is_valid( peer ) ) + return FALSE; + p_node = osm_physp_get_node_ptr( peer ); + if ( !p_node->sw || !p_node->sw->switch_info.enforce_cap ) + return FALSE; + + p_pkey_tbl = osm_physp_get_pkey_tbl( p_physp ); + p_peer_pkey_tbl = osm_physp_get_mod_pkey_tbl( peer ); + num_of_blocks = osm_pkey_tbl_get_num_blocks( p_pkey_tbl ); + peer_max_blocks = pkey_mgr_get_physp_max_blocks( p_subn, peer ); + if (peer_max_blocks < p_pkey_tbl->used_blocks) + { + osm_log( p_log, OSM_LOG_ERROR, + "pkey_mgr_update_peer_port: ERR 0508: " + "Not enough pkey entries (%u < %u) on switch 0x%016" PRIx64 + " port %u. Clearing Enforcement bit\n", + peer_max_blocks, num_of_blocks, + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( peer ) ); + enforce = FALSE; + } + + if ( pkey_mgr_enforce_partition( p_log, p_req, peer, enforce ) ) + port_info_set = TRUE; + + if (enforce == FALSE) + return port_info_set; + + p_peer_pkey_tbl->used_blocks = p_pkey_tbl->used_blocks; + for (block_index = 0; block_index < p_pkey_tbl->used_blocks; block_index++) + { + block = osm_pkey_tbl_new_block_get( p_pkey_tbl, block_index ); + if (!block) + block = &empty_block; + + peer_block = osm_pkey_tbl_block_get( p_peer_pkey_tbl, block_index ); + if ( !peer_block || memcmp( peer_block, block, sizeof( *peer_block ) ) ) + { + status = pkey_mgr_update_pkey_entry( p_req, peer, block, block_index ); + if ( status == IB_SUCCESS ) + ret_val = TRUE; + else + osm_log( p_log, OSM_LOG_ERROR, + "pkey_mgr_update_peer_port: ERR 0509: " + "pkey_mgr_update_pkey_entry() failed to update " + "pkey table block %d for node 0x%016" PRIx64 + " port %u\n", + block_index, + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( peer ) ); + } + } + + if ( (ret_val == TRUE) && osm_log_is_active( p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "pkey_mgr_update_peer_port: " + "Pkey table was updated for node 0x%016" PRIx64 + " port %u\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + osm_physp_get_port_num( peer ) ); + } + + if (port_info_set) + return TRUE; + return ret_val; +} + +/********************************************************************** + **********************************************************************/ +osm_signal_t +osm_pkey_mgr_process( + IN osm_opensm_t *p_osm ) +{ + cl_qmap_t *p_tbl; + cl_map_item_t *p_next; + osm_prtn_t *p_prtn; + osm_port_t *p_port; + osm_signal_t signal = OSM_SIGNAL_DONE; + osm_node_t *p_node; + + CL_ASSERT( p_osm ); + + OSM_LOG_ENTER( &p_osm->log, osm_pkey_mgr_process ); + + CL_PLOCK_EXCL_ACQUIRE( &p_osm->lock ); + + if ( osm_prtn_make_partitions( &p_osm->log, &p_osm->subn ) != IB_SUCCESS ) + { + osm_log( &p_osm->log, OSM_LOG_ERROR, + "osm_pkey_mgr_process: ERR 0510: " + "osm_prtn_make_partitions() failed\n" ); + goto _err; + } + + /* populate the pending pkey entries by scanning all partitions */ + p_tbl = &p_osm->subn.prtn_pkey_tbl; + p_next = cl_qmap_head( p_tbl ); + while ( p_next != cl_qmap_end( p_tbl ) ) + { + p_prtn = ( osm_prtn_t * ) p_next; + p_next = cl_qmap_next( p_next ); + pkey_mgr_process_partition_table( &p_osm->log, &p_osm->sm.req, + p_prtn, FALSE ); + pkey_mgr_process_partition_table( &p_osm->log, &p_osm->sm.req, + p_prtn, TRUE ); + } + + /* calculate and set new pkey tables */ + p_tbl = &p_osm->subn.port_guid_tbl; + p_next = cl_qmap_head( p_tbl ); + while ( p_next != cl_qmap_end( p_tbl ) ) + { + p_port = ( osm_port_t * ) p_next; + p_next = cl_qmap_next( p_next ); + if ( pkey_mgr_update_port( &p_osm->log, &p_osm->sm.req, p_port ) ) + signal = OSM_SIGNAL_DONE_PENDING; + p_node = osm_port_get_parent_node( p_port ); + if ( ( osm_node_get_type( p_node ) != IB_NODE_TYPE_SWITCH ) && + pkey_mgr_update_peer_port( &p_osm->log, &p_osm->sm.req, + &p_osm->subn, p_port, + !p_osm->subn.opt.no_partition_enforcement ) ) + signal = OSM_SIGNAL_DONE_PENDING; + } + + _err: + CL_PLOCK_RELEASE( &p_osm->lock ); + OSM_LOG_EXIT( &p_osm->log ); + return ( signal ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_pkey_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_pkey_rcv.c new file mode 100644 index 00000000..47cbac6e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_pkey_rcv.c @@ -0,0 +1,220 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_pkey_rcv_construct( + IN osm_pkey_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pkey_rcv_destroy( + IN osm_pkey_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_pkey_rcv_destroy ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pkey_rcv_init( + IN osm_pkey_rcv_t* const p_rcv, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_pkey_rcv_init ); + + osm_pkey_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_req = p_req; + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +/* + * WE MIGHT ONLY RECEIVE GET or SET responses + */ +void +osm_pkey_rcv_process( + IN const osm_pkey_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + cl_qmap_t *p_guid_tbl; + ib_pkey_table_t *p_pkey_tbl; + ib_smp_t *p_smp; + osm_port_t *p_port; + osm_physp_t *p_physp; + osm_node_t *p_node; + osm_pkey_context_t *p_context; + ib_net64_t port_guid; + ib_net64_t node_guid; + uint8_t port_num; + uint16_t block_num; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_pkey_rcv_process ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + p_context = osm_madw_get_pkey_context_ptr( p_madw ); + p_pkey_tbl = (ib_pkey_table_t*)ib_smp_get_payload_ptr( p_smp ); + + port_guid = p_context->port_guid; + node_guid = p_context->node_guid; + + CL_ASSERT( p_smp->attr_id == IB_MAD_ATTR_P_KEY_TABLE ); + + p_guid_tbl = &p_rcv->p_subn->port_guid_tbl; + cl_plock_excl_acquire( p_rcv->p_lock ); + p_port = (osm_port_t*)cl_qmap_get( p_guid_tbl, port_guid ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_guid_tbl) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pkey_rcv_process: ERR 4806: " + "No port object for port with GUID 0x%" PRIx64 + "\n\t\t\t\tfor parent node GUID 0x%" PRIx64 + ", TID 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ), + cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + goto Exit; + } + + p_node = osm_port_get_parent_node( p_port ); + CL_ASSERT( p_node ); + + block_num = (uint16_t)((cl_ntoh32(p_smp->attr_mod)) & 0x0000FFFF); + /* in case of a non switch node the attr modifier should be ignored */ + if (osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH) + { + port_num = (uint8_t)(((cl_ntoh32( p_smp->attr_mod)) & 0x00FF0000) >> 16 ); + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + } + else + { + p_physp = osm_port_get_default_phys_ptr(p_port); + port_num = p_port->default_port_num; + } + + CL_ASSERT( p_physp ); + + /* + We do not mind if this is a result of a set or get - all we want is to + update the subnet. + */ + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_pkey_rcv_process: " + "Got GetResp(PKey) block:%u port_num %u with GUID 0x%" PRIx64 + " for parent node GUID 0x%" PRIx64 + ", TID 0x%" PRIx64 "\n", + block_num, port_num, + cl_ntoh64( port_guid ), + cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + } + + /* + Determine if we encountered a new Physical Port. + If so, ignore it. + */ + if( !osm_physp_is_valid( p_physp ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pkey_rcv_process: ERR 4807: " + "Got invalid port number 0x%X\n", + port_num ); + goto Exit; + } + + osm_dump_pkey_block( p_rcv->p_log, + port_guid, block_num, + port_num, p_pkey_tbl, + OSM_LOG_DEBUG ); + + osm_physp_set_pkey_tbl( p_rcv->p_log, p_rcv->p_subn, + p_physp, p_pkey_tbl, block_num ); + + Exit: + cl_plock_release( p_rcv->p_lock ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_pkey_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_pkey_rcv_ctrl.c new file mode 100644 index 00000000..ca7fe0ad --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_pkey_rcv_ctrl.c @@ -0,0 +1,116 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +static void +__osm_pkey_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_pkey_rcv_process( ((osm_pkey_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pkey_rcv_ctrl_construct( + IN osm_pkey_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_pkey_rcv_ctrl_destroy( + IN osm_pkey_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pkey_rcv_ctrl_init( + IN osm_pkey_rcv_ctrl_t* const p_ctrl, + IN osm_pkey_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_pkey_rcv_ctrl_init ); + + osm_pkey_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + + + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_PKEY, + __osm_pkey_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_pkey_rcv_ctrl_init: ERR 4901: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_port.c b/branches/Ndi/ulp/opensm/user/opensm/osm_port.c new file mode 100644 index 00000000..9afd2e4c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_port.c @@ -0,0 +1,936 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_physp_t. + * This object represents an Infiniband Port. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_physp_construct( + IN osm_physp_t* const p_physp ) +{ + memset( p_physp, 0, sizeof(*p_physp) ); + osm_dr_path_construct( &p_physp->dr_path ); + cl_ptr_vector_construct( &p_physp->slvl_by_port ); + osm_pkey_tbl_construct( &p_physp->pkeys ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_physp_destroy( + IN osm_physp_t* const p_physp ) +{ + size_t num_slvl, i; + + /* the physp might be un-initialized */ + if (p_physp->port_guid) + { + /* free the SL2VL Tables */ + num_slvl = cl_ptr_vector_get_size(&p_physp->slvl_by_port); + for (i = 0; i < num_slvl; i++) + free(cl_ptr_vector_get(&p_physp->slvl_by_port, i)); + cl_ptr_vector_destroy(&p_physp->slvl_by_port); + + /* free the P_Key Tables */ + osm_pkey_tbl_destroy( &p_physp->pkeys ); + + memset( p_physp, 0, sizeof(*p_physp) ); + osm_dr_path_construct( &p_physp->dr_path ); /* clear dr_path */ + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_physp_init( + IN osm_physp_t* const p_physp, + IN const ib_net64_t port_guid, + IN const uint8_t port_num, + IN const struct _osm_node* const p_node, + IN const osm_bind_handle_t h_bind, + IN const uint8_t hop_count, + IN const uint8_t* const p_initial_path ) +{ + uint16_t num_slvl, i; + ib_slvl_table_t *p_slvl; + + CL_ASSERT( p_node ); + + osm_physp_construct( p_physp ); + p_physp->port_guid = port_guid; + p_physp->port_num = port_num; + p_physp->healthy = TRUE; + p_physp->p_node = (struct _osm_node*)p_node; + + osm_dr_path_init( + &p_physp->dr_path, + h_bind, + hop_count, + p_initial_path ); + + /* allocate enough SL2VL tables */ + if (osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH) + { + /* we need node num ports + 1 SL2VL tables */ + num_slvl = osm_node_get_num_physp( p_node ) + 1; + } + else + { + /* An end node - we need only one SL2VL */ + num_slvl = 1; + } + + cl_ptr_vector_init( &p_physp->slvl_by_port, num_slvl, 1); + for (i = 0; i < num_slvl; i++) + { + p_slvl = (ib_slvl_table_t *)malloc(sizeof(ib_slvl_table_t)); + if (p_slvl) + memset(p_slvl, 0, sizeof(ib_slvl_table_t)); + cl_ptr_vector_set(&p_physp->slvl_by_port, i, p_slvl); + } + + /* initialize the pkey table */ + osm_pkey_tbl_init( &p_physp->pkeys ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_port_destroy( + IN osm_port_t* const p_port ) +{ + /* cleanup all mcm recs attached */ + osm_port_remove_all_mgrp( p_port ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_port_init( + IN osm_port_t* const p_port, + IN const ib_node_info_t* p_ni, + IN const osm_node_t* const p_parent_node ) +{ + uint32_t port_index; + ib_net64_t port_guid; + osm_physp_t *p_physp; + uint32_t size; + + CL_ASSERT( p_port ); + CL_ASSERT( p_ni ); + CL_ASSERT( p_parent_node ); + + osm_port_construct( p_port ); + + p_port->p_node = (struct _osm_node *)p_parent_node; + port_guid = p_ni->port_guid; + p_port->guid = port_guid; + + /* + See comment in port_new for info about this... + */ + size = p_ni->num_ports; + + p_port->physp_tbl_size = (uint8_t)(size + 1); + + /* + Get the pointers to the physical node objects "owned" by this + logical port GUID. + For switches, all the ports are owned; for HCA's and routers, + only the singular part that has this GUID is owned. + */ + p_port->default_port_num = 0xFF; + for( port_index = 0; port_index < p_port->physp_tbl_size; port_index++ ) + { + p_physp = osm_node_get_physp_ptr( p_parent_node, port_index ); + if( osm_physp_is_valid( p_physp ) && + port_guid == osm_physp_get_port_guid( p_physp ) ) + { + p_port->tbl[port_index] = p_physp; + /* + Because much of the PortInfo data is only valid + for port 0 on switches, try to keep the lowest + possible value of default_port_num. + */ + if( port_index < p_port->default_port_num ) + p_port->default_port_num = (uint8_t)port_index; + } + else + p_port->tbl[port_index] = NULL; + } + + CL_ASSERT( p_port->default_port_num < 0xFF ); +} + +/********************************************************************** + **********************************************************************/ +osm_port_t* +osm_port_new( + IN const ib_node_info_t* p_ni, + IN const osm_node_t* const p_parent_node ) +{ + osm_port_t* p_port; + uint32_t size; + + /* + The port object already contains one physical port object pointer. + Therefore, subtract 1 from the number of physical ports + used by the switch. This is not done for CA's since they + need to occupy 1 more physp pointer than they physically have since + we still reserve room for a "port 0". + */ + size = p_ni->num_ports; + + p_port = malloc( sizeof(*p_port) + sizeof(void *) * size ); + if( p_port != NULL ) + { + memset( p_port, 0, sizeof(*p_port) + sizeof(void *) * size ); + osm_port_init( p_port, p_ni, p_parent_node ); + } + + return( p_port ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_port_get_lid_range_ho( + IN const osm_port_t* const p_port, + IN uint16_t* const p_min_lid, + IN uint16_t* const p_max_lid ) +{ + uint8_t lmc; + + *p_min_lid = cl_ntoh16( osm_port_get_base_lid( p_port ) ); + lmc = osm_port_get_lmc( p_port ); + *p_max_lid = (uint16_t)(*p_min_lid + (1 << lmc) - 1); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_get_port_by_base_lid( + IN const osm_subn_t* const p_subn, + IN const ib_net16_t lid, + IN OUT const osm_port_t** const pp_port ) +{ + ib_api_status_t status; + uint16_t base_lid; + uint8_t lmc; + + *pp_port = NULL; + + /* Loop on lmc from 0 up through max LMC possible */ + for (lmc = 0; lmc <= IB_PORT_LMC_MAX; lmc++) + { + /* Calculate a base LID assuming this is the real LMC */ + base_lid = cl_ntoh16(lid) & ~((1 << lmc) - 1); + + /* Look for a match */ + status = cl_ptr_vector_at( &p_subn->port_lid_tbl, + base_lid, + (void**)pp_port ); + if ((status == CL_SUCCESS) && (*pp_port != NULL)) + { + /* Determine if base LID "tested" is the real base LID */ + /* This is true if the LMC "tested" is the port's actual LMC */ + if (lmc == osm_port_get_lmc( *pp_port ) ) + { + status = IB_SUCCESS; + goto Found; + } + } + } + *pp_port = NULL; + status = IB_NOT_FOUND; + + Found: + return status; +} + +/********************************************************************** + **********************************************************************/ +void +osm_port_add_new_physp( + IN osm_port_t* const p_port, + IN const uint8_t port_num ) +{ + osm_node_t *p_node; + osm_physp_t *p_physp; + + CL_ASSERT( port_num < p_port->physp_tbl_size ); + + p_node = p_port->p_node; + CL_ASSERT( p_node ); + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + CL_ASSERT( osm_physp_get_port_guid( p_physp ) == p_port->guid ); + p_port->tbl[port_num] = p_physp; + + /* + For switches, we generally want to use Port 0, which is + the management port as the default Physical Port. + The LID value in the PortInfo for example, is only valid + for port 0 on switches. + */ + if( !osm_physp_is_valid( p_port->tbl[p_port->default_port_num] ) ) + { + p_port->default_port_num = port_num; + } + else + { + if( port_num < p_port->default_port_num ) + { + p_port->default_port_num = port_num; + } + } +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_port_add_mgrp( + IN osm_port_t* const p_port, + IN const ib_net16_t mlid ) +{ + ib_api_status_t status = IB_SUCCESS; + osm_mcm_info_t *p_mcm; + + p_mcm = osm_mcm_info_new( mlid ); + if( p_mcm ) + cl_qlist_insert_tail( &p_port->mcm_list, (cl_list_item_t*)p_mcm ); + else + status = IB_INSUFFICIENT_MEMORY; + + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static cl_status_t +__osm_port_mgrp_find_func( + IN const cl_list_item_t* const p_list_item, + IN void* context ) +{ + if( *((ib_net16_t*)context) == ((osm_mcm_info_t*)p_list_item)->mlid ) + return( CL_SUCCESS ); + else + return( CL_NOT_FOUND ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_port_remove_mgrp( + IN osm_port_t* const p_port, + IN const ib_net16_t mlid ) +{ + cl_list_item_t *p_mcm; + + p_mcm = cl_qlist_find_from_head( &p_port->mcm_list, + __osm_port_mgrp_find_func, &mlid ); + + if( p_mcm != cl_qlist_end( &p_port->mcm_list ) ) + { + cl_qlist_remove_item( &p_port->mcm_list, p_mcm ); + osm_mcm_info_delete( (osm_mcm_info_t*)p_mcm ); + } +} + +/********************************************************************** + **********************************************************************/ +void +osm_port_remove_all_mgrp( + IN osm_port_t* const p_port ) +{ + cl_list_item_t *p_mcm; + + p_mcm = cl_qlist_remove_head( &p_port->mcm_list ); + while( p_mcm != cl_qlist_end( &p_port->mcm_list ) ) + { + osm_mcm_info_delete( (osm_mcm_info_t*)p_mcm ); + p_mcm = cl_qlist_remove_head( &p_port->mcm_list ); + } +} + +/********************************************************************** + **********************************************************************/ +uint8_t +osm_physp_calc_link_mtu( + IN osm_log_t* p_log, + IN const osm_physp_t* p_physp ) +{ + const ib_port_info_t* p_old_pi; + const ib_port_info_t* p_remote_pi; + const osm_physp_t* p_remote_physp; + uint8_t mtu; + uint8_t remote_mtu; + + OSM_LOG_ENTER( p_log, osm_physp_calc_link_mtu ); + p_old_pi = osm_physp_get_port_info_ptr( p_physp ); + + /* use the available MTU */ + mtu = ib_port_info_get_mtu_cap( p_old_pi ); + + p_remote_physp = osm_physp_get_remote( p_physp ); + if( p_remote_physp && osm_physp_is_valid( p_remote_physp ) ) + { + p_remote_pi = osm_physp_get_port_info_ptr( p_remote_physp ); + remote_mtu = ib_port_info_get_mtu_cap( p_remote_pi ); + + if( osm_log_is_active( p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_physp_calc_link_mtu: " + "Remote port 0x%016" PRIx64 " port# = 0x%X : " + "MTU = %u. This Port MTU: %u\n", + cl_ntoh64( osm_physp_get_port_guid( p_remote_physp ) ), + osm_physp_get_port_num( p_remote_physp ), + remote_mtu, mtu ); + } + + if( mtu != remote_mtu ) + { + if( mtu > remote_mtu ) + mtu = remote_mtu; + + if( osm_log_is_active( p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_log, OSM_LOG_VERBOSE, + "osm_physp_calc_link_mtu: " + "MTU mismatch between ports." + "\n\t\t\t\tPort 0x%016" PRIx64 ", port# 0x%X" + " and port 0x%016" PRIx64 ", port# 0x%X." + "\n\t\t\t\tUsing lower MTU of %u\n", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ), + cl_ntoh64( osm_physp_get_port_guid( p_remote_physp ) ), + osm_physp_get_port_num( p_remote_physp ), + mtu ); + } + } + } + + if( mtu == 0 ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_physp_calc_link_mtu: ERR 4101: " + "Invalid MTU = 0. Forcing correction to 256\n" ); + mtu = 1; + } + + OSM_LOG_EXIT( p_log ); + return(mtu); +} + +/********************************************************************** + **********************************************************************/ +uint8_t +osm_physp_calc_link_op_vls( + IN osm_log_t* p_log, + IN const osm_subn_t* p_subn, + IN const osm_physp_t* p_physp ) +{ + const ib_port_info_t* p_old_pi; + const ib_port_info_t* p_remote_pi; + const osm_physp_t* p_remote_physp; + uint8_t op_vls; + uint8_t remote_op_vls; + + OSM_LOG_ENTER( p_log, osm_physp_calc_link_op_vls ); + p_old_pi = osm_physp_get_port_info_ptr( p_physp ); + + /* use the available VL CAP */ + op_vls = ib_port_info_get_vl_cap(p_old_pi); + + p_remote_physp = osm_physp_get_remote( p_physp ); + if( p_remote_physp && osm_physp_is_valid( p_remote_physp ) ) + { + p_remote_pi = osm_physp_get_port_info_ptr( p_remote_physp ); + remote_op_vls = ib_port_info_get_vl_cap(p_remote_pi); + + if( osm_log_is_active( p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_physp_calc_link_op_vls: " + "Remote port 0x%016" PRIx64 " port# = 0x%X : " + "VL_CAP = %u. This port VL_CAP:%u\n", + cl_ntoh64( osm_physp_get_port_guid( p_remote_physp ) ), + osm_physp_get_port_num( p_remote_physp ), + remote_op_vls, + op_vls + ); + } + + if( op_vls != remote_op_vls ) + { + if( op_vls > remote_op_vls ) + op_vls = remote_op_vls; + + if( osm_log_is_active( p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_log, OSM_LOG_VERBOSE, + "osm_physp_calc_link_op_vls: " + "OP_VLS mismatch between ports." + "\n\t\t\t\tPort 0x%016" PRIx64 ", port# 0x%X" + " and port 0x%016" PRIx64 ", port# 0x%X." + "\n\t\t\t\tUsing lower OP_VLS of %u\n", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ), + cl_ntoh64( osm_physp_get_port_guid( p_remote_physp ) ), + osm_physp_get_port_num( p_remote_physp ), + op_vls ); + } + } + } + + /* support user limitation of max_op_vls */ + if (op_vls > p_subn->opt.max_op_vls) + op_vls = p_subn->opt.max_op_vls; + + if( op_vls == 0 ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_physp_calc_link_op_vls: ERR 4102: " + "Invalid OP_VLS = 0. Forcing correction to 1 (VL0)\n" ); + op_vls = 1; + } + + OSM_LOG_EXIT( p_log ); + return(op_vls); +} + +inline +uint64_t +__osm_ptr_to_key(void const *p) +{ + uint64_t k = 0; + + memcpy(&k, p, sizeof(void *)); + return k; +} + +inline +void * +__osm_key_to_ptr(uint64_t k) +{ + void *p = 0; + + memcpy(&p, &k, sizeof(void *)); + return p; +} + +/********************************************************************** + Traverse the fabric from the SM node following the DR path given and + add every phys port traversed to the map. Avoid tracking the first and + last phys ports (going into the first switch and into the target port). + **********************************************************************/ +cl_status_t +__osm_physp_get_dr_physp_set( + IN osm_log_t* p_log, + IN osm_subn_t const *p_subn, + IN osm_dr_path_t const *p_path, + OUT cl_map_t* p_physp_map) +{ + osm_port_t *p_port; + osm_physp_t *p_physp; + osm_node_t *p_node; + uint8_t hop; + cl_status_t status = CL_SUCCESS; + + OSM_LOG_ENTER( p_log, __osm_physp_get_dr_physp_set ); + + /* find the OSM node */ + p_port = osm_get_port_by_guid(p_subn, p_subn->sm_port_guid); + if (! p_port) + { + osm_log( p_log, OSM_LOG_ERROR, + "__osm_physp_get_dr_nodes_set: ERR 4103: " + "Failed to find the SM own port by guid\n"); + status = CL_ERROR; + goto Exit; + } + + /* get the node of the SM */ + p_node = osm_port_get_parent_node(p_port); + + /* + traverse the path adding the nodes to the table + start after the first dummy hop and stop just before the + last one + */ + for (hop = 1; hop < p_path->hop_count - 1; hop++) + { + /* go out using the phys port of the path */ + p_physp = osm_node_get_physp_ptr(p_node, p_path->path[hop]); + + /* we track the ports we go out along the path */ + if (hop > 1) + cl_map_insert(p_physp_map, __osm_ptr_to_key(p_physp), NULL); + + osm_log( p_log, OSM_LOG_DEBUG, + "__osm_physp_get_dr_nodes_set: " + "Traversed through node: 0x%016" PRIx64 + " port:%u\n", + cl_ntoh64(p_node->node_info.node_guid), + p_path->path[hop]); + + /* make sure we got a valid port and it has a remote port */ + if (!(p_physp && osm_physp_is_valid( p_physp ))) + { + osm_log( p_log, OSM_LOG_ERROR, + "__osm_physp_get_dr_nodes_set: ERR 4104: " + "DR Traversal stopped on invalid port at hop:%u\n", + hop); + status = CL_ERROR; + goto Exit; + } + + if (! (p_physp = osm_physp_get_remote(p_physp))) + { + osm_log( p_log, OSM_LOG_ERROR, + "__osm_physp_get_dr_nodes_set: ERR 4106: " + "DR Traversal stopped on missing remote physp at hop:%u\n", + hop); + status = CL_ERROR; + goto Exit; + } + + p_node = osm_physp_get_node_ptr(p_physp); + } + + Exit: + OSM_LOG_EXIT( p_log ); + return status; +} + +/********************************************************************** + **********************************************************************/ +void +__osm_physp_update_new_dr_path( + IN osm_physp_t const *p_dest_physp, + IN cl_map_t *p_visited_map, + IN osm_bind_handle_t *h_bind ) +{ + cl_list_t tmpPortsList; + osm_physp_t *p_physp, *p_src_physp = NULL; + uint8_t path_array[IB_SUBNET_PATH_HOPS_MAX]; + uint8_t i = 0; + osm_dr_path_t *p_dr_path; + + cl_list_construct( &tmpPortsList ); + cl_list_init( &tmpPortsList, 10 ); + + cl_list_insert_head( &tmpPortsList, p_dest_physp ); + /* get the output port where we need to come from */ + p_physp = (osm_physp_t*)cl_map_get( p_visited_map, + __osm_ptr_to_key(p_dest_physp) ); + while ( p_physp != NULL ) + { + cl_list_insert_head( &tmpPortsList, p_physp ); + /* get the input port through where we reached the output port */ + p_src_physp = p_physp; + p_physp = (osm_physp_t*)cl_map_get( p_visited_map, + __osm_ptr_to_key(p_physp) ); + /* if we reached a null p_physp - this means we are at the begining + of the path. Break. */ + if ( p_physp == NULL ) + break; + /* get the output port */ + p_physp = (osm_physp_t*)cl_map_get( p_visited_map, + __osm_ptr_to_key(p_physp) ); + } + + memset( path_array, 0, sizeof(path_array) ); + p_physp = (osm_physp_t*)cl_list_remove_head( &tmpPortsList ); + while ( p_physp != NULL ) + { + i++; + path_array[i] = p_physp->port_num; + p_physp = (osm_physp_t*)cl_list_remove_head( &tmpPortsList ); + } + if (p_src_physp) + { + p_dr_path = osm_physp_get_dr_path_ptr( p_src_physp ); + osm_dr_path_init( p_dr_path, h_bind, i, path_array ); + } + + cl_list_destroy( &tmpPortsList ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_physp_replace_dr_path_with_alternate_dr_path( + IN osm_log_t *p_log, + IN osm_subn_t const *p_subn, + IN osm_physp_t const *p_dest_physp, + IN osm_bind_handle_t *h_bind + ) +{ + cl_map_t physp_map; + cl_map_t visited_map; + osm_dr_path_t * p_dr_path; + cl_list_t *p_currPortsList; + cl_list_t *p_nextPortsList; + cl_qmap_t const *p_port_tbl; + osm_port_t *p_port; + osm_physp_t *p_physp, *p_remote_physp; + ib_net64_t port_guid; + boolean_t next_list_is_full = TRUE, reached_dest = FALSE; + uint8_t num_ports, port_num; + + /* + initialize the map of all port participating in current dr path + not including first and last switches + */ + cl_map_construct( &physp_map ); + cl_map_init( &physp_map, 4 ); + cl_map_construct( &visited_map ); + cl_map_init( &visited_map, 4 ); + p_dr_path = osm_physp_get_dr_path_ptr( p_dest_physp ); + __osm_physp_get_dr_physp_set(p_log, p_subn, p_dr_path, &physp_map); + + /* + BFS from OSM port until we find the target physp but avoid + going through mapped ports + */ + p_nextPortsList = (cl_list_t*)malloc(sizeof(cl_list_t)); + cl_list_construct( p_nextPortsList ); + cl_list_init( p_nextPortsList, 10 ); + + p_port_tbl = &p_subn->port_guid_tbl; + port_guid = p_subn->sm_port_guid; + + CL_ASSERT( port_guid ); + + p_port = (osm_port_t*)cl_qmap_get( p_port_tbl, port_guid ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_port_tbl ) ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_physp_replace_dr_path_with_alternate_dr_path: ERR 4105: " + "No SM port object\n" ); + goto Exit; + } + + /* + HACK: We are assuming SM is running on HCA, so when getting the default + port we'll get the port connected to the rest of the subnet. If SM is + running on SWITCH - we should try to get a dr path from all switch ports. + */ + p_physp = osm_port_get_default_phys_ptr( p_port ); + + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + cl_list_insert_tail( p_nextPortsList, p_physp ); + + while (next_list_is_full == TRUE) + { + next_list_is_full = FALSE; + p_currPortsList = p_nextPortsList; + p_nextPortsList = (cl_list_t*)malloc(sizeof(cl_list_t)); + cl_list_construct( p_nextPortsList ); + cl_list_init( p_nextPortsList, 10 ); + p_physp = (osm_physp_t*)cl_list_remove_head( p_currPortsList ); + while ( p_physp != NULL ) + { + /* If we are in a switch - need to go out through all the other + physical ports of the switch */ + num_ports = osm_node_get_num_physp( p_physp->p_node ); + + for (port_num = 1 ; port_num < num_ports ; port_num++) + { + if (osm_node_get_type( p_physp->p_node ) == IB_NODE_TYPE_SWITCH) + p_remote_physp = osm_node_get_physp_ptr( p_physp->p_node, port_num ); + else + /* this is HCA or router - the remote port is just the port connected + on the other side */ + p_remote_physp = p_physp->p_remote_physp; + + /* + make sure that all of the following occurred: + 1. The port isn't NULL + 2. The port is a valid port + 3. This is not the port we came from + 4. The port is not in the physp_map + 5. This port haven't been visited before + */ + if ( p_remote_physp && + osm_physp_is_valid ( p_remote_physp ) && + p_remote_physp != p_physp && + cl_map_get( &physp_map, __osm_ptr_to_key(p_remote_physp)) == NULL && + cl_map_get( &visited_map, __osm_ptr_to_key(p_remote_physp)) == NULL ) + { + /* Insert the port into the visited_map, and save its source port */ + cl_map_insert( &visited_map, __osm_ptr_to_key(p_remote_physp), p_physp ); + + /* Is this the p_dest_physp? */ + if ( p_remote_physp == p_dest_physp ) + { + /* update the new dr path */ + __osm_physp_update_new_dr_path( p_dest_physp, &visited_map, h_bind ); + reached_dest = TRUE; + break; + } + + /* add the p_remote_physp to the nextPortsList */ + cl_list_insert_tail( p_nextPortsList, p_remote_physp ); + next_list_is_full = TRUE; + } + } + + p_physp = (osm_physp_t*)cl_list_remove_head( p_currPortsList ); + if ( reached_dest == TRUE ) + { + /* free the rest of the currPortsList */ + while ( p_physp != NULL ) + p_physp = (osm_physp_t*)cl_list_remove_head( p_currPortsList ); + /* free the nextPortsList, if items were added to it */ + p_physp = (osm_physp_t*)cl_list_remove_head( p_nextPortsList ); + while ( p_physp != NULL ) + p_physp = (osm_physp_t*)cl_list_remove_head( p_nextPortsList ); + next_list_is_full = FALSE; + } + } + cl_list_destroy( p_currPortsList ); + free(p_currPortsList); + } + + /* cleanup */ + Exit: + cl_list_destroy( p_nextPortsList ); + free( p_nextPortsList ); + cl_map_destroy( &physp_map ); + cl_map_destroy( &visited_map ); +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_link_is_healthy( + IN const osm_physp_t* const p_physp ) +{ + osm_physp_t* p_remote_physp; + CL_ASSERT( p_physp ); + p_remote_physp = p_physp->p_remote_physp; + if (p_remote_physp != NULL && osm_physp_is_valid(p_remote_physp) ) + return( (p_physp->healthy)&(p_remote_physp->healthy) ); + /* the other side is not known - consider the link as healthy */ + return(TRUE); +} + +/********************************************************************** + **********************************************************************/ +void +osm_physp_set_pkey_tbl( + IN osm_log_t* p_log, + IN const osm_subn_t* p_subn, + IN osm_physp_t* const p_physp, + IN ib_pkey_table_t *p_pkey_tbl, + IN uint16_t block_num ) +{ + uint16_t max_blocks; + + CL_ASSERT( p_pkey_tbl ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + /* + (14.2.5.7) - the block number valid values are 0-2047, and are further + limited by the size of the P_Key table specified by the PartitionCap on the + node. + */ + if (!p_physp->p_node->sw || p_physp->port_num == 0 ) + { + /* + The maximum blocks is defined in the node info: partition cap for CA, + routers and switch management ports. + */ + max_blocks = (cl_ntoh16(p_physp->p_node->node_info.partition_cap) + + IB_NUM_PKEY_ELEMENTS_IN_BLOCK - 1) + / IB_NUM_PKEY_ELEMENTS_IN_BLOCK; + } + else + { + /* + This is a switch, and not a management port. The maximum blocks is defined + in the switch info: partition enforcement cap. + */ + max_blocks = + (cl_ntoh16(p_physp->p_node->sw->switch_info.enforce_cap) + + IB_NUM_PKEY_ELEMENTS_IN_BLOCK - 1) / IB_NUM_PKEY_ELEMENTS_IN_BLOCK; + } + + if ( block_num >= max_blocks ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_physp_set_pkey_tbl: ERR 4108: " + "Got illegal set for block number:%u " + "For GUID: %" PRIx64 " port number:0x%X\n", + block_num, + cl_ntoh64(p_physp->p_node->node_info.node_guid), + p_physp->port_num ); + return; + } + + osm_pkey_tbl_set( &p_physp->pkeys, block_num, p_pkey_tbl); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_port_info_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_port_info_rcv.c new file mode 100644 index 00000000..0b4c3462 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_port_info_rcv.c @@ -0,0 +1,870 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_pi_rcv_t. + * This object represents the PortInfo Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.8 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +static void +__osm_pi_rcv_set_sm( + IN const osm_pi_rcv_t* const p_rcv, + IN osm_physp_t* const p_physp ) +{ + osm_bind_handle_t h_bind; + osm_dr_path_t *p_dr_path; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pi_rcv_set_sm ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pi_rcv_set_sm: " + "Setting 'IS_SM' bit in port attributes\n" ); + } + + p_dr_path = osm_physp_get_dr_path_ptr( p_physp ); + h_bind = osm_dr_path_get_bind_handle( p_dr_path ); + /* + The 'IS_SM' bit isn't already set, so set it. + */ + osm_vendor_set_sm( h_bind, TRUE ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pi_rcv_process_endport( + IN const osm_pi_rcv_t* const p_rcv, + IN osm_physp_t* const p_physp, + IN const ib_port_info_t* const p_pi ) +{ + osm_madw_context_t context; + ib_api_status_t status; + ib_net64_t port_guid; + uint8_t rate, mtu; + cl_qmap_t* p_sm_tbl; + osm_remote_sm_t* p_sm; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pi_rcv_process_endport ); + + port_guid = osm_physp_get_port_guid( p_physp ); + + /* HACK extended port 0 should be handled too! */ + if (osm_physp_get_port_num( p_physp ) != 0) + { + /* track the minimal endport MTU and rate */ + mtu = ib_port_info_get_mtu_cap(p_pi); + if (mtu < p_rcv->p_subn->min_ca_mtu) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pi_rcv_process_endport: " + "Setting endport minimal MTU to:%u defined by port:0x%" + PRIx64 "\n", + mtu, + cl_ntoh64( port_guid ) ); + p_rcv->p_subn->min_ca_mtu = mtu; + } + + rate = ib_port_info_compute_rate( p_pi ); + if (rate < p_rcv->p_subn->min_ca_rate) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pi_rcv_process_endport: " + "Setting endport minimal rate to:%u defined by port:0x%" + PRIx64 "\n", + rate, + cl_ntoh64( port_guid ) ); + p_rcv->p_subn->min_ca_rate = rate; + } + } + + if( port_guid == p_rcv->p_subn->sm_port_guid ) + { + /* + We received the PortInfo for our own port. + */ + if( !(p_pi->capability_mask & IB_PORT_CAP_IS_SM ) ) + { + /* + Set the IS_SM bit to indicate our port hosts an SM. + */ + __osm_pi_rcv_set_sm( p_rcv, p_physp ); + } + } + else + { + /* + Before querying the SM - we want to make sure we clean its state, so + if the querying fails we recognize that this SM is not active. + */ + p_sm_tbl = &p_rcv->p_subn->sm_guid_tbl; + p_sm = (osm_remote_sm_t*)cl_qmap_get( p_sm_tbl, port_guid ); + if( p_sm != (osm_remote_sm_t*)cl_qmap_end( p_sm_tbl ) ) + { + /* clean it up */ + p_sm->smi.pri_state = 0xF0 & p_sm->smi.pri_state; + } + + if( p_pi->capability_mask & IB_PORT_CAP_IS_SM ) + { + if( p_rcv->p_subn->opt.ignore_other_sm ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pi_rcv_process_endport: " + "Ignoring SM on port 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + } + else + { + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pi_rcv_process_endport: " + "Detected another SM. Requesting SMInfo" + "\n\t\t\t\tPort 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + } + + /* + This port indicates it's an SM and it's not our own port. + Acquire the SMInfo Attribute. + */ + memset( &context, 0, sizeof(context) ); + context.smi_context.set_method = FALSE; + status = osm_req_get( p_rcv->p_req, + osm_physp_get_dr_path_ptr( p_physp ), + IB_MAD_ATTR_SM_INFO, + 0, + CL_DISP_MSGID_NONE, + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pi_rcv_process_endport: ERR 0F05: " + "Failure requesting SMInfo (%s)\n", + ib_get_err_str( status ) ); + } + } + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_pi_rcv_process_switch_port( + IN const osm_pi_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN osm_physp_t* const p_physp, + IN const ib_port_info_t* const p_pi ) +{ + ib_api_status_t status = IB_SUCCESS; + osm_madw_context_t context; + osm_physp_t *p_remote_physp; + osm_node_t *p_remote_node; + ib_net16_t orig_lid; + uint8_t port_num; + uint8_t remote_port_num; + osm_dr_path_t path; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pi_rcv_process_switch_port ); + + /* + Check the state of the physical port. + If there appears to be something on the other end of the wire, + then ask for NodeInfo. Ignore the switch management port. + */ + port_num = osm_physp_get_port_num( p_physp ); + /* if in_sweep_hop_0 is TRUE, then this means the SM in on the switch, + and we got switchInfo of our local switch. Do not continue + probing through the switch. */ + if( port_num != 0 && p_rcv->p_subn->in_sweep_hop_0 == FALSE) + { + switch( ib_port_info_get_port_state( p_pi ) ) + { + case IB_LINK_DOWN: + p_remote_physp = osm_physp_get_remote( p_physp ); + if( p_remote_physp && osm_physp_is_valid( p_remote_physp ) ) + { + p_remote_node = osm_physp_get_node_ptr( p_remote_physp ); + remote_port_num = osm_physp_get_port_num( p_remote_physp ); + + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pi_rcv_process_switch_port: " + "Unlinking local node 0x%" PRIx64 ", port 0x%X" + "\n\t\t\t\tand remote node 0x%" PRIx64 + ", port 0x%X\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + port_num, + cl_ntoh64( osm_node_get_node_guid( p_remote_node ) ), + remote_port_num ); + + osm_node_unlink( p_node, (uint8_t)port_num, + p_remote_node, (uint8_t)remote_port_num ); + + } + break; + + case IB_LINK_INIT: + case IB_LINK_ARMED: + case IB_LINK_ACTIVE: + /* + To avoid looping forever, only probe the port if it + is NOT the port that responded to the SMP. + + Request node info from the other end of this link: + 1) Copy the current path from the parent node. + 2) Extend the path to the next hop thru this port. + 3) Request node info with the new path + + */ + if( p_pi->local_port_num != osm_physp_get_port_num( p_physp ) ) + { + path = *osm_physp_get_dr_path_ptr( p_physp ); + + osm_dr_path_extend( &path, osm_physp_get_port_num( p_physp ) ); + + context.ni_context.node_guid = osm_node_get_node_guid( p_node ); + context.ni_context.port_num = osm_physp_get_port_num( p_physp ); + + status = osm_req_get( p_rcv->p_req, + &path, + IB_MAD_ATTR_NODE_INFO, + 0, + CL_DISP_MSGID_NONE, + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pi_rcv_process_switch_port: ERR 0F02: " + "Failure initiating NodeInfo request (%s)\n", + ib_get_err_str(status) ); + } + } + else + { + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pi_rcv_process_switch_port: " + "Skipping SMP responder port 0x%X\n", + p_pi->local_port_num ); + } + } + break; + + default: + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pi_rcv_process_switch_port: ERR 0F03: " + "Unknown link state = %u, port = 0x%X\n", + ib_port_info_get_port_state( p_pi ), + p_pi->local_port_num ); + break; + } + } + + /* + Update the PortInfo attribute. + */ + osm_physp_set_port_info( p_physp, p_pi ); + + if (port_num == 0) + { + /* This is switch management port 0 */ + if ( ( orig_lid = osm_physp_trim_base_lid_to_valid_range( p_physp ) ) ) + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pi_rcv_process_switch_port: ERR 0F04: " + "Invalid base LID 0x%x corrected\n", + cl_ntoh16( orig_lid ) ); + /* Determine if base switch port 0 */ + if (p_node->sw && + !ib_switch_info_is_enhanced_port0(&p_node->sw->switch_info)) + { + /* PortState is not used on BSP0 but just in case it is DOWN */ + p_physp->port_info = *p_pi; + } + __osm_pi_rcv_process_endport(p_rcv, p_physp, p_pi); + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pi_rcv_process_ca_port( + IN const osm_pi_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN osm_physp_t* const p_physp, + IN const ib_port_info_t* const p_pi ) +{ + ib_net16_t orig_lid; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pi_rcv_process_ca_port ); + + UNUSED_PARAM( p_node ); + + osm_physp_set_port_info( p_physp, p_pi ); + + if ( (orig_lid = osm_physp_trim_base_lid_to_valid_range( p_physp ) ) ) + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pi_rcv_process_ca_port: ERR 0F08: " + "Invalid base LID 0x%x corrected\n", + cl_ntoh16 ( orig_lid ) ); + + __osm_pi_rcv_process_endport(p_rcv, p_physp, p_pi); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pi_rcv_process_router_port( + IN const osm_pi_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN osm_physp_t* const p_physp, + IN const ib_port_info_t* const p_pi ) +{ + ib_net16_t orig_lid; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pi_rcv_process_router_port ); + + UNUSED_PARAM( p_node ); + + /* + Update the PortInfo attribute. + */ + osm_physp_set_port_info( p_physp, p_pi ); + + if ( (orig_lid = osm_physp_trim_base_lid_to_valid_range( p_physp ) ) ) + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pi_rcv_process_router_port: ERR 0F09: " + "Invalid base LID 0x%x corrected\n", + cl_ntoh16 ( orig_lid) ); + + __osm_pi_rcv_process_endport(p_rcv, p_physp, p_pi); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +#define IBM_VENDOR_ID (0x5076) +/********************************************************************** + **********************************************************************/ +void osm_pkey_get_tables( + IN osm_log_t *p_log, + IN osm_req_t *p_req, + IN osm_subn_t* const p_subn, + IN osm_node_t* const p_node, + IN osm_physp_t* const p_physp ) { + + osm_madw_context_t context; + ib_api_status_t status; + osm_dr_path_t path; + uint8_t port_num; + uint16_t block_num, max_blocks; + uint32_t attr_mod_ho; + + OSM_LOG_ENTER( p_log, osm_pkey_get_tables ); + + path = *osm_physp_get_dr_path_ptr( p_physp ); + + context.pkey_context.node_guid = + osm_node_get_node_guid( p_node ); + context.pkey_context.port_guid = + osm_physp_get_port_guid( p_physp ); + context.pkey_context.set_method = FALSE; + + port_num = p_physp->port_num; + + if (!p_node->sw || port_num == 0) + { + /* The maximum blocks is defined by the node info partition cap for CA, + router, and switch management ports. */ + max_blocks = (cl_ntoh16(p_node->node_info.partition_cap)+IB_NUM_PKEY_ELEMENTS_IN_BLOCK -1) + / IB_NUM_PKEY_ELEMENTS_IN_BLOCK ; + } + else + { + /* This is a switch, and not a management port. The maximum blocks + is defined in the switch info partition enforcement cap. */ + + /* Check for IBM eHCA firmware defect in reporting partition enforcement cap */ + if (cl_ntoh32(ib_node_info_get_vendor_id(&p_node->node_info)) == IBM_VENDOR_ID) + p_node->sw->switch_info.enforce_cap = 0; + + /* Bail out if this is a switch with no partition enforcement capability */ + if (cl_ntoh16(p_node->sw->switch_info.enforce_cap) == 0) + goto Exit; + + max_blocks = (cl_ntoh16(p_node->sw->switch_info.enforce_cap) + + IB_NUM_PKEY_ELEMENTS_IN_BLOCK -1) / IB_NUM_PKEY_ELEMENTS_IN_BLOCK ; + } + + for (block_num = 0 ; block_num < max_blocks ; block_num++) + { + if (osm_node_get_type( p_node ) != IB_NODE_TYPE_SWITCH) + attr_mod_ho = block_num; + else + attr_mod_ho = block_num | (port_num << 16); + status = osm_req_get( p_req, + &path, + IB_MAD_ATTR_P_KEY_TABLE, + cl_hton32(attr_mod_ho), + CL_DISP_MSGID_NONE, + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_physp_has_pkey: ERR 0F12: " + "Failure initiating PKeyTable request (%s)\n", + ib_get_err_str(status)); + goto Exit; + } + } + + Exit: + OSM_LOG_EXIT( p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pi_rcv_get_pkey_slvl_vla_tables( + IN const osm_pi_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN osm_physp_t* const p_physp ) +{ + OSM_LOG_ENTER( p_rcv->p_log, __osm_pi_rcv_get_pkey_slvl_vla_tables ); + + osm_pkey_get_tables( p_rcv->p_log, p_rcv->p_req, p_rcv->p_subn, + p_node, p_physp ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pi_rcv_construct( + IN osm_pi_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pi_rcv_destroy( + IN osm_pi_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_pi_rcv_destroy ); + + CL_ASSERT( p_rcv ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pi_rcv_init( + IN osm_pi_rcv_t* const p_rcv, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + OSM_LOG_ENTER( p_log, osm_pi_rcv_init ); + + osm_pi_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_req = p_req; + p_rcv->p_state_mgr = p_state_mgr; + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pi_rcv_process_set( + IN const osm_pi_rcv_t* const p_rcv, + IN osm_port_t* const p_port, + IN const uint8_t port_num, + IN osm_madw_t* const p_madw ) +{ + osm_physp_t *p_physp; + osm_node_t *p_node; + ib_net64_t port_guid; + ib_smp_t *p_smp; + ib_port_info_t *p_pi; + osm_pi_context_t *p_context; + osm_log_level_t level; + + OSM_LOG_ENTER( p_rcv->p_log, osm_pi_rcv_process_set ); + + p_context = osm_madw_get_pi_context_ptr( p_madw ); + + p_physp = osm_port_get_phys_ptr( p_port, port_num ); + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + port_guid = osm_physp_get_port_guid( p_physp ); + p_node = osm_port_get_parent_node( p_port ); + CL_ASSERT( p_node ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_pi = (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp ); + + /* check for error */ + if (!p_context->ignore_errors && (cl_ntoh16(p_smp->status) & 0x7fff)) + { + /* If port already ACTIVE, don't treat status 7 as error */ + if (p_context->active_transition && + (cl_ntoh16(p_smp->status) & 0x7fff) == 0x1c) + { + level = OSM_LOG_INFO; + osm_log( p_rcv->p_log, OSM_LOG_INFO, + "osm_pi_rcv_process_set: " + "Received error status 0x%x for SetResp() during ACTIVE transition\n", + cl_ntoh16(p_smp->status) & 0x7fff); + /* Should there be a subsequent Get to validate that port is ACTIVE ? */ + } + else + { + level = OSM_LOG_ERROR; + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pi_rcv_process_set: ERR 0F10: " + "Received error status for SetResp()\n"); + } + osm_dump_port_info( + p_rcv->p_log, + osm_node_get_node_guid( p_node ), + port_guid, + port_num, + p_pi, + level); + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_pi_rcv_process_set: " + "Received logical SetResp() for GUID 0x%" PRIx64 + ", port num 0x%X" + "\n\t\t\t\tfor parent node GUID 0x%" PRIx64 + " TID 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ), + port_num, + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + cl_ntoh64( p_smp->trans_id ) ); + } + + osm_physp_set_port_info( p_physp, p_pi ); + + /* We got a PortInfoSetResp - set the got_set_resp flag to TRUE */ + p_physp->got_set_resp = TRUE; + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pi_rcv_process( + IN const osm_pi_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + cl_qmap_t *p_guid_tbl; + ib_port_info_t *p_pi; + ib_smp_t *p_smp; + osm_port_t *p_port; + osm_physp_t *p_physp; + osm_dr_path_t *p_dr_path; + osm_node_t *p_node; + osm_pi_context_t *p_context; + ib_net64_t port_guid; + ib_net64_t node_guid; + uint8_t port_num; + + OSM_LOG_ENTER( p_rcv->p_log, osm_pi_rcv_process ); + + CL_ASSERT( p_rcv ); + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_context = osm_madw_get_pi_context_ptr( p_madw ); + p_pi = (ib_port_info_t*)ib_smp_get_payload_ptr( p_smp ); + + CL_ASSERT( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO ); + + /* On receipt of client reregister, clear the reregister bit so + reregistering won't be sent again and again */ + if ( ib_port_info_get_client_rereg( p_pi ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_pi_rcv_process: " + "Client reregister received on response\n"); + ib_port_info_set_client_rereg( p_pi, 0 ); + } + + port_num = (uint8_t)cl_ntoh32( p_smp->attr_mod ); + + port_guid = p_context->port_guid; + node_guid = p_context->node_guid; + + osm_dump_port_info( p_rcv->p_log, + node_guid, port_guid, port_num, p_pi, + OSM_LOG_DEBUG ); + + /* + we might get a response during a light sweep looking for a change in + the status of a remote port that did not respond in earlier sweeps. + So if the context of the Get was light_sweep - we do not need to + do anything with the response - just flag that we need a heavy sweep + */ + if (p_context->light_sweep == TRUE) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_pi_rcv_process: " + "Got light sweep response from remote port of parent node " + "GUID 0x%" PRIx64 " port 0x%016" PRIx64 + ", Commencing heavy sweep\n", + cl_ntoh64( node_guid ), + cl_ntoh64( port_guid ) ); + osm_state_mgr_process( p_rcv->p_state_mgr, + OSM_SIGNAL_CHANGE_DETECTED ); + goto Exit; + } + + p_guid_tbl = &p_rcv->p_subn->port_guid_tbl; + CL_PLOCK_EXCL_ACQUIRE( p_rcv->p_lock ); + p_port = (osm_port_t*)cl_qmap_get( p_guid_tbl, port_guid ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_guid_tbl) ) + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pi_rcv_process: ERR 0F06: " + "No port object for port with GUID 0x%" PRIx64 + "\n\t\t\t\tfor parent node GUID 0x%" PRIx64 + ", TID 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ), + cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + goto Exit; + } + + /* + If we were setting the PortInfo, then receiving + this attribute was not part of sweeping the subnet. + In this case, just update the PortInfo attribute. + + In an unfortunate blunder, the IB spec defines the + return method for Set() as a GetResp(). Thus, we can't + use the method (what would have been SetResp()) to determine + our course of action. So, we have to carry this extra + boolean around to determine if we were doing Get() or Set(). + */ + if( p_context->set_method ) + { + osm_pi_rcv_process_set( p_rcv, p_port, port_num, p_madw ); + } + else + { + osm_port_discovery_count_inc( p_port ); + + /* + This PortInfo arrived because we did a Get() method, + most likely due to a subnet sweep in progress. + */ + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_pi_rcv_process: " + "Discovered port num 0x%X with GUID 0x%" PRIx64 + " for parent node GUID 0x%" PRIx64 + ", TID 0x%" PRIx64 "\n", + port_num, + cl_ntoh64( port_guid ), + cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + } + + p_node = osm_port_get_parent_node( p_port ); + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + + CL_ASSERT( p_node ); + CL_ASSERT( p_physp ); + + /* + Determine if we encountered a new Physical Port. + If so, initialize the new Physical Port then + continue processing as normal. + */ + if( !osm_physp_is_valid( p_physp ) ) + { + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_pi_rcv_process: " + "Initializing port number 0x%X\n", + port_num ); + } + + osm_physp_init( p_physp, + port_guid, + port_num, + p_node, + osm_madw_get_bind_handle( p_madw ), + p_smp->hop_count, + p_smp->initial_path ); + + osm_port_add_new_physp( p_port, port_num ); + } + else + { + /* + Update the directed route path to this port + in case the old path is no longer usable. + */ + p_dr_path = osm_physp_get_dr_path_ptr( p_physp ); + osm_dr_path_init( p_dr_path, + osm_madw_get_bind_handle( p_madw ), + p_smp->hop_count, p_smp->initial_path ); + } + + /* + Check if the update_sm_base_lid in the context is TRUE. + If it is - then update the master_sm_base_lid of the variable + in the subnet. + */ + if (p_context->update_master_sm_base_lid == TRUE) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_pi_rcv_process: " + "update_master_sm is TRUE. " + "Updating master_sm_base_lid to:%u\n", + p_pi->master_sm_base_lid ); + + p_rcv->p_subn->master_sm_base_lid = p_pi->master_sm_base_lid; + } + + switch( osm_node_get_type( p_node ) ) + { + case IB_NODE_TYPE_CA: + __osm_pi_rcv_process_ca_port( p_rcv, + p_node, p_physp, p_pi ); + break; + case IB_NODE_TYPE_ROUTER: + __osm_pi_rcv_process_router_port( p_rcv, + p_node, p_physp, p_pi ); + break; + case IB_NODE_TYPE_SWITCH: + __osm_pi_rcv_process_switch_port( p_rcv, + p_node, p_physp, p_pi ); + break; + default: + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pi_rcv_process: ERR 0F07: " + "Unknown node type %u with GUID 0x%" PRIx64 "\n", + osm_node_get_type( p_node ), + cl_ntoh64( node_guid ) ); + break; + } + + /* + Get the tables on the physp. + */ + __osm_pi_rcv_get_pkey_slvl_vla_tables( p_rcv, p_node, p_physp ); + + } + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + Exit: + /* + Release the lock before jumping here!! + */ + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_port_info_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_port_info_rcv_ctrl.c new file mode 100644 index 00000000..ff6a8a1c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_port_info_rcv_ctrl.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_pi_rcv_ctrl_t. + * This object represents the PortInfo request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_pi_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_pi_rcv_process( ((osm_pi_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pi_rcv_ctrl_construct( + IN osm_pi_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_pi_rcv_ctrl_destroy( + IN osm_pi_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pi_rcv_ctrl_init( + IN osm_pi_rcv_ctrl_t* const p_ctrl, + IN osm_pi_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_pi_rcv_ctrl_init ); + + osm_pi_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_PORT_INFO, + __osm_pi_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_pi_rcv_ctrl_init: ERR 1001: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_prtn.c b/branches/Ndi/ulp/opensm/user/opensm/osm_prtn.c new file mode 100644 index 00000000..4440553d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_prtn.c @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_prtn_t. + * This object represents an IBA partition. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision$ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern int osm_prtn_config_parse_file(osm_log_t * const p_log, + osm_subn_t * const p_subn, + const char *file_name); + +static uint16_t global_pkey_counter; + +osm_prtn_t* osm_prtn_new( + IN const char *name, + IN const uint16_t pkey ) +{ + osm_prtn_t *p = malloc(sizeof(*p)); + if (!p) + return NULL; + + memset(p, 0, sizeof(*p)); + p->pkey = pkey; + p->sl = OSM_DEFAULT_SL; + cl_map_construct(&p->full_guid_tbl); + cl_map_init(&p->full_guid_tbl, 32); + cl_map_construct(&p->part_guid_tbl); + cl_map_init(&p->part_guid_tbl, 32); + + if (name && *name) + strncpy(p->name, name, sizeof(p->name)); + else + snprintf(p->name, sizeof(p->name), "%04x", cl_ntoh16(pkey)); + + return p; +} + +void osm_prtn_delete( + IN OUT osm_prtn_t** const pp_prtn ) +{ + osm_prtn_t *p = *pp_prtn; + + cl_map_remove_all(&p->full_guid_tbl); + cl_map_destroy(&p->full_guid_tbl); + cl_map_remove_all(&p->part_guid_tbl); + cl_map_destroy(&p->part_guid_tbl); + free(p); + *pp_prtn = NULL; +} + +ib_api_status_t osm_prtn_add_port(osm_log_t *p_log, osm_subn_t *p_subn, + osm_prtn_t *p, ib_net64_t guid, boolean_t full) +{ + cl_qmap_t *p_port_tbl = &p_subn->port_guid_tbl; + ib_api_status_t status = IB_SUCCESS; + cl_map_t *p_tbl; + osm_port_t *p_port; + osm_physp_t *p_physp; + + p_port = (osm_port_t *)cl_qmap_get(p_port_tbl, guid); + if (!p_port || p_port == (osm_port_t *)cl_qmap_end(p_port_tbl)) { + osm_log(p_log, OSM_LOG_VERBOSE, "osm_prtn_add_port: " + "port 0x%" PRIx64 " not found\n", + cl_ntoh64(guid)); + return status; + } + + p_physp = osm_port_get_default_phys_ptr(p_port); + if (!p_physp) { + osm_log(p_log, OSM_LOG_VERBOSE, "osm_prtn_add_port: " + "no physical for port 0x%" PRIx64 "\n", + cl_ntoh64(guid)); + return status; + } + + if (cl_map_remove(&p->part_guid_tbl, guid) || + cl_map_remove(&p->full_guid_tbl, guid)) { + osm_log(p_log, OSM_LOG_VERBOSE, "osm_prtn_add_port: " + "port 0x%" PRIx64 " already in " + "partition \'%s\' (0x%04x). Will overwrite\n", + cl_ntoh64(guid), p->name, cl_ntoh16(p->pkey)); + } + + p_tbl = (full == TRUE) ? &p->full_guid_tbl : &p->part_guid_tbl ; + + if (cl_map_insert(p_tbl, guid, p_physp) == NULL) + return IB_INSUFFICIENT_MEMORY; + + return status; +} + +ib_api_status_t osm_prtn_add_all(osm_log_t *p_log, osm_subn_t *p_subn, + osm_prtn_t *p, boolean_t full) +{ + cl_qmap_t *p_port_tbl = &p_subn->port_guid_tbl; + cl_map_item_t *p_item; + osm_port_t *p_port; + ib_api_status_t status = IB_SUCCESS; + + p_item = cl_qmap_head(p_port_tbl); + while (p_item != cl_qmap_end(p_port_tbl)) { + p_port = (osm_port_t *)p_item; + p_item = cl_qmap_next(p_item); + status = osm_prtn_add_port(p_log, p_subn, p, + osm_port_get_guid(p_port), full); + if (status != IB_SUCCESS) + goto _err; + } + + _err: + return status; +} + +static const ib_gid_t osm_ipoib_mgid = { + { + 0xff, /* multicast field */ + 0x12, /* non-permanent bit, link local scope */ + 0x40, 0x1b, /* IPv4 signature */ + 0xff, 0xff, /* 16 bits of P_Key (to be filled in) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 bits of zeros */ + 0xff, 0xff, 0xff, 0xff, /* 32 bit IPv4 broadcast address */ + }, +}; + +/* + * HACK: Until TS resolves their noncompliant join compmask, + * we have to pre-define the MGID + */ +static const ib_gid_t osm_ts_ipoib_mgid = { + { + 0xff, /* multicast field */ + 0x12, /* non-permanent bit, link local scope */ + 0x40, 0x1b, /* IPv4 signature */ + 0xff, 0xff, /* 16 bits of P_Key (to be filled in) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 bits of zeros */ + 0x00, 0x00, 0x00, 0x01, /* 32 bit IPv4 broadcast address */ + }, +}; + +ib_api_status_t osm_prtn_add_mcgroup(osm_log_t *p_log, + osm_subn_t *p_subn, osm_prtn_t *p, + unsigned is_ipoib, uint8_t rate, + uint8_t mtu, uint8_t scope) +{ + ib_member_rec_t mc_rec; + ib_net64_t comp_mask; + ib_net16_t pkey; + osm_mgrp_t *p_mgrp = NULL; + osm_sa_t *p_sa = &p_subn->p_osm->sa; + ib_api_status_t status = IB_SUCCESS; + uint8_t ts_scope; + + pkey = p->pkey | cl_hton16(0x8000); + + memset(&mc_rec, 0, sizeof(mc_rec)); + + mc_rec.mgid = osm_ipoib_mgid; /* ipv4 broadcast group */ + memcpy(&mc_rec.mgid.raw[4], &pkey, sizeof(pkey)); + + mc_rec.qkey = CL_HTON32(0x0b1b); + mc_rec.mtu = (mtu ? mtu : OSM_DEFAULT_MGRP_MTU) | (2 << 6); /* 2048 Bytes */ + mc_rec.tclass = 0; + mc_rec.pkey = pkey; + mc_rec.rate = (rate ? rate : OSM_DEFAULT_MGRP_RATE) | (2 << 6); /* 10Gb/sec */ + mc_rec.pkt_life = OSM_DEFAULT_SUBNET_TIMEOUT; + mc_rec.sl_flow_hop = ib_member_set_sl_flow_hop(p->sl, 0, 0); + /* Scope in MCMemberRecord needs to be consistent with MGID */ + mc_rec.scope_state = ib_member_set_scope_state(scope ? scope : OSM_DEFAULT_MGRP_SCOPE, MC_FULL_MEMBER); + ib_mgid_set_scope(&mc_rec.mgid, scope ? scope : OSM_DEFAULT_MGRP_SCOPE); + + /* don't update rate, mtu, scope */ + comp_mask = IB_MCR_COMPMASK_MTU | IB_MCR_COMPMASK_MTU_SEL + | IB_MCR_COMPMASK_RATE | IB_MCR_COMPMASK_RATE_SEL + | IB_MCR_COMPMASK_SCOPE; + status = osm_mcmr_rcv_find_or_create_new_mgrp(&p_sa->mcmr_rcv, + comp_mask, &mc_rec, &p_mgrp); + if (!p_mgrp || status != IB_SUCCESS) + osm_log( p_log, OSM_LOG_ERROR, + "osm_prtn_add_mcgroup: " + "Failed to create MC group with pkey 0x%04x\n", + cl_ntoh16(pkey)); + if (p_mgrp) + p_mgrp->well_known = TRUE; + + /* workaround for TS */ + /* FIXME: remove this upon TS fixes */ + mc_rec.mgid = osm_ts_ipoib_mgid; + memcpy(&mc_rec.mgid.raw[4], &pkey, sizeof(pkey)); + /* Scope in MCMemberRecord needs to be consistent with MGID */ + ts_scope = ib_mgid_get_scope(&osm_ts_ipoib_mgid); /* get scope from MGID */ + mc_rec.scope_state = ib_member_set_scope_state(ts_scope, MC_FULL_MEMBER); + status = osm_mcmr_rcv_find_or_create_new_mgrp(&p_sa->mcmr_rcv, + comp_mask, &mc_rec, &p_mgrp); + if (p_mgrp) + p_mgrp->well_known = TRUE; + + return status; +} + +static uint16_t __generate_pkey(osm_subn_t *p_subn) +{ + uint16_t pkey; + + cl_qmap_t *m = &p_subn->prtn_pkey_tbl; + while (global_pkey_counter < cl_ntoh16(IB_DEFAULT_PARTIAL_PKEY) - 1) { + pkey = ++global_pkey_counter; + pkey = cl_hton16(pkey); + if (cl_qmap_get(m, pkey) == cl_qmap_end(m)) + return pkey; + } + return 0; +} + +static osm_prtn_t *find_prtn_by_name(osm_subn_t *p_subn, const char *name) +{ + cl_map_item_t *p_next; + osm_prtn_t *p; + + p_next = cl_qmap_head(&p_subn->prtn_pkey_tbl); + while (p_next != cl_qmap_end(&p_subn->prtn_pkey_tbl)) { + p = (osm_prtn_t *)p_next; + p_next = cl_qmap_next(&p->map_item); + if (!strncmp(p->name, name, sizeof(p->name))) + return p; + } + + return NULL; +} + +osm_prtn_t *osm_prtn_make_new(osm_log_t *p_log, osm_subn_t *p_subn, + const char *name, uint16_t pkey) +{ + osm_prtn_t *p = NULL, *p_check; + + pkey &= cl_hton16((uint16_t)~0x8000); + + if (!pkey) { + if (name && (p = find_prtn_by_name(p_subn, name))) + return p; + if(!(pkey = __generate_pkey(p_subn))) + return NULL; + } + + p = osm_prtn_new(name, pkey); + if (!p) { + osm_log(p_log, OSM_LOG_ERROR, + "osm_prtn_make_new: Unable to create" + " partition \'%s\' (0x%04x)\n", + name, cl_ntoh16(pkey)); + return NULL; + } + + p_check = (osm_prtn_t *)cl_qmap_insert(&p_subn->prtn_pkey_tbl, + p->pkey, &p->map_item); + if (p != p_check) { + osm_log(p_log, OSM_LOG_VERBOSE, + "osm_prtn_make_new: Duplicated partition" + " definition: \'%s\' (0x%04x) prev name \'%s\'" + ". Will use it\n", + name, cl_ntoh16(pkey), p_check->name); + osm_prtn_delete(&p); + p = p_check; + } + + return p; +} + +static ib_api_status_t osm_prtn_make_default(osm_log_t * const p_log, + osm_subn_t * const p_subn, + boolean_t no_config) +{ + ib_api_status_t status = IB_UNKNOWN_ERROR; + osm_prtn_t *p; + + p = osm_prtn_make_new(p_log, p_subn, "Default", IB_DEFAULT_PARTIAL_PKEY); + if (!p) + goto _err; + status = osm_prtn_add_all(p_log, p_subn, p, no_config); + if (status != IB_SUCCESS) + goto _err; + cl_map_remove(&p->part_guid_tbl, p_subn->sm_port_guid); + status = osm_prtn_add_port(p_log, p_subn, p, p_subn->sm_port_guid, TRUE); + + if (no_config) + osm_prtn_add_mcgroup(p_log, p_subn, p, 1, 0, 0, 0); + + _err: + return status; +} + +ib_api_status_t osm_prtn_make_partitions(osm_log_t * const p_log, + osm_subn_t * const p_subn) +{ + struct stat statbuf; + const char *file_name; + boolean_t is_config = TRUE; + ib_api_status_t status = IB_SUCCESS; + cl_map_item_t *p_next; + osm_prtn_t *p; + + file_name = p_subn->opt.partition_config_file ? + p_subn->opt.partition_config_file : + "/etc/osm-partitions.conf"; + if (stat(file_name, &statbuf)) + is_config = FALSE; + + /* clean up current port maps */ + p_next = cl_qmap_head(&p_subn->prtn_pkey_tbl); + while (p_next != cl_qmap_end(&p_subn->prtn_pkey_tbl)) { + p = (osm_prtn_t *)p_next; + p_next = cl_qmap_next(&p->map_item); + cl_map_remove_all(&p->part_guid_tbl); + cl_map_remove_all(&p->full_guid_tbl); + } + + global_pkey_counter = 0; + + status = osm_prtn_make_default(p_log, p_subn, !is_config); + if (status != IB_SUCCESS) + goto _err; + + if (is_config && osm_prtn_config_parse_file(p_log, p_subn, file_name)) { + osm_log(p_log, OSM_LOG_VERBOSE, + "osm_prtn_make_partitions: Partition configuration " + "was not fully processed\n"); + } + + /* and now clean up empty partitions */ + p_next = cl_qmap_head(&p_subn->prtn_pkey_tbl); + while (p_next != cl_qmap_end(&p_subn->prtn_pkey_tbl)) { + p = (osm_prtn_t *)p_next; + p_next = cl_qmap_next(&p->map_item); + if (cl_map_count(&p->part_guid_tbl) == 0 && + cl_map_count(&p->full_guid_tbl) == 0) { + cl_qmap_remove_item(&p_subn->prtn_pkey_tbl, + (cl_map_item_t *)p); + osm_prtn_delete(&p); + } + } + + _err: + return status; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_prtn_config.c b/branches/Ndi/ulp/opensm/user/opensm/osm_prtn_config.c new file mode 100644 index 00000000..3d4411b2 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_prtn_config.c @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of opensm partition management configuration + * + * Environment: + * Linux User Mode + * + * $Revision$ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#if __WORDSIZE == 64 +#define STRTO_IB_NET64(str, end, base) strtoul(str, end, base) +#else +#define STRTO_IB_NET64(str, end, base) strtoull(str, end, base) +#endif + +/* + */ +struct part_conf { + osm_log_t *p_log; + osm_subn_t *p_subn; + osm_prtn_t *p_prtn; + unsigned is_ipoib, mtu, rate, sl, scope; +}; + +extern osm_prtn_t *osm_prtn_make_new(osm_log_t *p_log, osm_subn_t *p_subn, + const char *name, uint16_t pkey); +extern ib_api_status_t osm_prtn_add_all(osm_log_t *p_log, + osm_subn_t *p_subn, + osm_prtn_t *p, boolean_t full); +extern ib_api_status_t osm_prtn_add_port(osm_log_t *p_log, + osm_subn_t *p_subn, osm_prtn_t *p, + ib_net64_t guid, boolean_t full); +extern ib_api_status_t osm_prtn_add_mcgroup(osm_log_t *p_log, + osm_subn_t *p_subn, osm_prtn_t *p, + unsigned is_ipoib, uint8_t rate, + uint8_t mtu, uint8_t scope); + +static int partition_create(unsigned lineno, struct part_conf *conf, + char *name, char *id, char *flag, char *flag_val) +{ + uint16_t pkey; + + if (!id && name && isdigit(*name)) { + id = name; + name = NULL; + } + + if (id) { + char *end; + + pkey = (uint16_t)strtoul(id, &end, 0); + if (end == id || *end) + return -1; + } else + pkey = 0; + + conf->p_prtn = osm_prtn_make_new(conf->p_log, conf->p_subn, + name, cl_hton16(pkey)); + if (!conf->p_prtn) + return -1; + + if (conf->p_subn->opt.no_qos) { + if (conf->sl != OSM_DEFAULT_SL) { + osm_log(conf->p_log, OSM_LOG_ERROR, + "partition_create: Overriding SL %d to default SL %d on partition %s as QoS not enabled\n", + conf->sl, OSM_DEFAULT_SL, name); + conf->sl = OSM_DEFAULT_SL; + } + } + conf->p_prtn->sl = (uint8_t)conf->sl; + + if (conf->is_ipoib) + osm_prtn_add_mcgroup(conf->p_log, conf->p_subn, conf->p_prtn, + conf->is_ipoib, (uint8_t)conf->rate, + (uint8_t)conf->mtu, (uint8_t)conf->scope); + + return 0; +} + +static int partition_add_flag(unsigned lineno, struct part_conf *conf, + char *flag, char *val) +{ + int len = strlen(flag); + if (!strncmp(flag, "ipoib", len)) { + conf->is_ipoib = 1; + } else if (!strncmp(flag, "mtu", len)) { + if (!val || (conf->mtu = strtoul(val, NULL, 0)) == 0) + osm_log(conf->p_log, OSM_LOG_VERBOSE, + "PARSE WARN: line %d: " + "flag \'mtu\' requires valid value" + " - skipped\n", lineno); + } else if (!strncmp(flag, "rate", len)) { + if (!val || (conf->rate = strtoul(val, NULL, 0)) == 0) + osm_log(conf->p_log, OSM_LOG_VERBOSE, + "PARSE WARN: line %d: " + "flag \'rate\' requires valid value" + " - skipped\n", lineno); + } else if (!strncmp(flag, "scope", len)) { + if (!val || (conf->scope = strtoul(val, NULL, 0)) == 0) + osm_log(conf->p_log, OSM_LOG_VERBOSE, + "PARSE WARN: line %d: " + "flag \'scope\' requires valid value" + " - skipped\n", lineno); + } else if (!strncmp(flag, "sl", len)) { + unsigned sl; + char *end; + + if (!val || !*val || (sl = strtoul(val, &end, 0)) > 15 || + (*end && !isspace(*end))) + osm_log(conf->p_log, OSM_LOG_VERBOSE, + "PARSE WARN: line %d: " + "flag \'sl\' requires valid value" + " - skipped\n", lineno); + else + conf->sl = sl; + } else { + osm_log(conf->p_log, OSM_LOG_VERBOSE, + "PARSE WARN: line %d: " + "unrecognized partition flag \'%s\'" + " - ignored\n", lineno, flag); + } + return 0; +} + +static int partition_add_port(unsigned lineno, struct part_conf *conf, + char *name, char *flag) +{ + osm_prtn_t *p = conf->p_prtn; + ib_net64_t guid; + boolean_t full = FALSE; + + if (!name || !*name || !strncmp(name, "NONE", strlen(name))) + return 0; + + if (flag) { + if (!strncmp(flag, "full", strlen(flag))) + full = TRUE; + else if (strncmp(flag, "limited", strlen(flag))) { + osm_log(conf->p_log, OSM_LOG_VERBOSE, + "PARSE WARN: line %d: " + "unrecognized port flag \'%s\'." + " Assume \'limited\'\n", lineno, flag); + } + } + + if (!strncmp(name, "ALL", strlen(name))) { + return osm_prtn_add_all(conf->p_log, conf->p_subn, p, + full) == IB_SUCCESS ? 0 : -1; + } else if (!strncmp(name, "SELF", strlen(name))) { + guid = cl_ntoh64(conf->p_subn->sm_port_guid); + } else { + char *end; + guid = STRTO_IB_NET64(name, &end, 0); + if (!guid || *end) + return -1; + } + + if (osm_prtn_add_port(conf->p_log, conf->p_subn, p, + cl_hton64(guid), full) != IB_SUCCESS) + return -1; + + return 0; +} + +/* conf file parser */ + +#define STRIP_HEAD_SPACES(p) while (*(p) == ' ' || *(p) == '\t' || \ + *(p) == '\n') { (p)++; } +#define STRIP_TAIL_SPACES(p) { char *q = (p) + strlen(p); \ + while ( q != (p) && ( *q == '\0' || \ + *q == ' ' || *q == '\t' || \ + *q == '\n')) { *q-- = '\0'; }; } + +static int parse_name_token(char *str, char **name, char **val) +{ + int len = 0; + char *p, *q; + + *name = *val = NULL; + + p = str; + + while (*p == ' ' || *p == '\t' || *p == '\n') + p++; + + q = strchr(p, '='); + if (q) + *q++ = '\0'; + + len = strlen(str) + 1; + str = q; + + q = p + strlen(p); + while ( q != p && + ( *q == '\0' || *q == ' ' || *q == '\t' || *q == '\n')) + *q-- = '\0'; + + *name = p; + + p = str; + if (!p) + return len; + + while (*p == ' ' || *p == '\t' || *p == '\n') + p++; + + q = p + strlen(p); + len += (int)(q - str) + 1; + while ( q != p && + ( *q == '\0' || *q == ' ' || *q == '\t' || *q == '\n')) + *q-- = '\0'; + *val = p; + + return len; +} + +static struct part_conf *new_part_conf(osm_log_t *p_log, osm_subn_t *p_subn) +{ + static struct part_conf part; + struct part_conf *conf = ∂ + + memset(conf, 0, sizeof(*conf)); + conf->p_log = p_log; + conf->p_subn = p_subn; + conf->p_prtn = NULL; + conf->is_ipoib = 0; + conf->sl = OSM_DEFAULT_SL; + return conf; +} + +static int flush_part_conf(struct part_conf *conf) +{ + memset(conf, 0, sizeof(*conf)); + return 0; +} + +static int parse_part_conf(struct part_conf *conf, char *str, int lineno) +{ + int ret, len = 0; + char *name, *id, *flag, *flval; + char *q, *p; + + p = str; + if (*p == '\t' || *p == '\0' || *p == '\n') + p++; + + len += (int)(p - str); + str = p; + + if (conf->p_prtn) + goto skip_header; + + q = strchr(p, ':'); + if (!q) { + osm_log(conf->p_log, OSM_LOG_ERROR, + "PARSE ERROR: line %d: " + "no partition definition found\n", lineno); + fprintf(stderr, "\nPARSE ERROR: line %d: " + "no partition definition found\n", lineno); + return -1; + } + + *q++ = '\0'; + str = q; + + name = id = flag = flval = NULL; + + q = strchr(p, ','); + if (q) + *q = '\0'; + + ret = parse_name_token(p, &name, &id); + p += ret; + len += ret; + + while (q) { + flag = flval = NULL; + q = strchr(p, ','); + if (q) + *q++ = '\0'; + ret = parse_name_token(p, &flag, &flval); + if (!flag) { + osm_log(conf->p_log, OSM_LOG_ERROR, + "PARSE ERROR: line %d: " + "bad partition flags\n",lineno); + fprintf(stderr, "\nPARSE ERROR: line %d: " + "bad partition flags\n",lineno); + return -1; + } + p += ret; + len += ret; + partition_add_flag(lineno, conf, flag, flval); + } + + if (p != str || (partition_create(lineno, conf, + name, id, flag, flval) < 0)) { + osm_log(conf->p_log, OSM_LOG_ERROR, + "PARSE ERROR: line %d: " + "bad partition definition\n", lineno); + fprintf(stderr, "\nPARSE ERROR: line %d: " + "bad partition definition\n", lineno); + return -1; + } + + skip_header: + do { + name = flag = NULL; + q = strchr(p, ','); + if (q) + *q++ = '\0'; + ret = parse_name_token(p, &name, &flag); + if (partition_add_port(lineno, conf, name, flag) < 0) { + osm_log(conf->p_log, OSM_LOG_ERROR, + "PARSE ERROR: line %d: " + "bad PortGUID\n", lineno); + fprintf(stderr, "PARSE ERROR: line %d: " + "bad PortGUID\n", lineno); + return -1; + } + p += ret; + len += ret; + } while (q); + + return len; +} + +int osm_prtn_config_parse_file(osm_log_t *p_log, osm_subn_t *p_subn, + const char *file_name) +{ + char line[1024]; + struct part_conf *conf = NULL; + FILE *file; + int lineno; + + file = fopen(file_name, "r"); + if (!file) { + osm_log(p_log, OSM_LOG_VERBOSE, + "osm_prtn_config_parse_file: " + "Cannot open config file \'%s\': %s\n", + file_name, strerror(errno)); + return -1; + } + + lineno = 0; + + while (fgets(line, sizeof(line) - 1, file) != NULL) { + char *q, *p = line; + + lineno++; + + p = line; + + q = strchr(p, '#'); + if (q) + *q = '\0'; + + do { + int len; + while (*p == ' ' || *p == '\t' || *p == '\n') + p++; + if (*p == '\0') + break; + + if (!conf && + !(conf = new_part_conf(p_log, p_subn))) { + osm_log(conf->p_log, OSM_LOG_ERROR, + "PARSE ERROR: line %d: " + "internal: cannot create config\n", lineno); + fprintf(stderr, "PARSE ERROR: line %d: " + "internal: cannot create config\n", lineno); + break; + } + + q = strchr(p, ';'); + if (q) + *q = '\0'; + + len = parse_part_conf(conf, p, lineno); + if (len < 0) { + break; + } + + p += len; + + if (q) { + flush_part_conf(conf); + conf = NULL; + } + } while (q); + } + + fclose(file); + + return 0; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_qos.c b/branches/Ndi/ulp/opensm/user/opensm/osm_qos.c new file mode 100644 index 00000000..fe3b9534 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_qos.c @@ -0,0 +1,451 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of OpenSM QoS infrastructure primitives + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include + +#include +#include +#include +#include +#include + +struct qos_config { + uint8_t max_vls; + uint8_t vl_high_limit; + ib_vl_arb_table_t vlarb_high[2]; + ib_vl_arb_table_t vlarb_low[2]; + ib_slvl_table_t sl2vl; +}; + +static void qos_build_config(struct qos_config * cfg, + osm_qos_options_t * opt, osm_qos_options_t * dflt); + +/* + * QoS primitives + */ +static ib_api_status_t vlarb_update_table_block(osm_req_t * p_req, + osm_physp_t * p, + uint8_t port_num, + const ib_vl_arb_table_t *table_block, + unsigned block_length, + unsigned block_num) +{ + ib_vl_arb_table_t block; + osm_madw_context_t context; + uint32_t attr_mod; + ib_port_info_t *p_pi; + unsigned vl_mask, i; + + if (!(p_pi = osm_physp_get_port_info_ptr(p))) + return IB_ERROR; + + vl_mask = (1 << (ib_port_info_get_op_vls(p_pi) - 1)) - 1; + + memset(&block, 0, sizeof(block)); + memcpy(&block, table_block, + block_length * sizeof(block.vl_entry[0])); + for (i = 0; i < block_length; i++) + block.vl_entry[i].vl &= vl_mask; + + if (!memcmp(&p->vl_arb[block_num], &block, + block_length * sizeof(block.vl_entry[0]))) + return IB_SUCCESS; + + context.vla_context.node_guid = + osm_node_get_node_guid(osm_physp_get_node_ptr(p)); + context.vla_context.port_guid = osm_physp_get_port_guid(p); + context.vla_context.set_method = TRUE; + attr_mod = ((block_num + 1) << 16) | port_num; + + return osm_req_set(p_req, osm_physp_get_dr_path_ptr(p), + (uint8_t *) & block, sizeof(block), + IB_MAD_ATTR_VL_ARBITRATION, + cl_hton32(attr_mod), CL_DISP_MSGID_NONE, &context); +} + +static ib_api_status_t vlarb_update(osm_req_t * p_req, + osm_physp_t * p, uint8_t port_num, + const struct qos_config *qcfg) +{ + ib_api_status_t status = IB_SUCCESS; + ib_port_info_t *p_pi; + unsigned len; + + if (!(p_pi = osm_physp_get_port_info_ptr(p))) + return IB_ERROR; + + if (p_pi->vl_arb_low_cap > 0) { + len = p_pi->vl_arb_low_cap < IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK ? + p_pi->vl_arb_low_cap : IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK; + if ((status = vlarb_update_table_block(p_req, p, port_num, + &qcfg->vlarb_low[0], + len, 0)) != IB_SUCCESS) + return status; + } + if (p_pi->vl_arb_low_cap > IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK) { + len = p_pi->vl_arb_low_cap % IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK; + if ((status = vlarb_update_table_block(p_req, p, port_num, + &qcfg->vlarb_low[1], + len, 1)) != IB_SUCCESS) + return status; + } + if (p_pi->vl_arb_high_cap > 0) { + len = p_pi->vl_arb_high_cap < IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK ? + p_pi->vl_arb_high_cap : IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK; + if ((status = vlarb_update_table_block(p_req, p, port_num, + &qcfg->vlarb_high[0], + len, 2)) != IB_SUCCESS) + return status; + } + if (p_pi->vl_arb_high_cap > IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK) { + len = p_pi->vl_arb_high_cap % IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK; + if ((status = vlarb_update_table_block(p_req, p, port_num, + &qcfg->vlarb_high[1], + len, 3)) != IB_SUCCESS) + return status; + } + + return status; +} + +static ib_api_status_t sl2vl_update_table(osm_req_t * p_req, + osm_physp_t * p, uint8_t in_port, + uint8_t out_port, + const ib_slvl_table_t * sl2vl_table) +{ + osm_madw_context_t context; + ib_slvl_table_t tbl, *p_tbl; + osm_node_t *p_node = osm_physp_get_node_ptr(p); + uint32_t attr_mod; + ib_port_info_t *p_pi; + unsigned vl_mask; + uint8_t vl1, vl2; + int i; + + if (!(p_pi = osm_physp_get_port_info_ptr(p))) + return IB_ERROR; + + vl_mask = (1 << (ib_port_info_get_op_vls(p_pi) - 1)) - 1; + + for (i = 0; i < IB_MAX_NUM_VLS / 2; i++) { + vl1 = sl2vl_table->raw_vl_by_sl[i] >> 4; + vl2 = sl2vl_table->raw_vl_by_sl[i] & 0xf; + if (vl1 != 15) + vl1 &= vl_mask; + if (vl2 != 15) + vl2 &= vl_mask; + tbl.raw_vl_by_sl[i] = (vl1 << 4 ) | vl2 ; + } + + p_tbl = osm_physp_get_slvl_tbl(p, in_port); + if (p_tbl && !memcmp(p_tbl, &tbl, sizeof(tbl))) + return IB_SUCCESS; + + context.slvl_context.node_guid = osm_node_get_node_guid(p_node); + context.slvl_context.port_guid = osm_physp_get_port_guid(p); + context.slvl_context.set_method = TRUE; + attr_mod = in_port << 8 | out_port; + return osm_req_set(p_req, osm_physp_get_dr_path_ptr(p), + (uint8_t *) & tbl, sizeof(tbl), + IB_MAD_ATTR_SLVL_TABLE, + cl_hton32(attr_mod), CL_DISP_MSGID_NONE, &context); +} + +static ib_api_status_t sl2vl_update(osm_req_t * p_req, osm_port_t * p_port, + osm_physp_t * p, uint8_t port_num, + const struct qos_config *qcfg) +{ + ib_api_status_t status; + uint8_t i, num_ports; + ib_port_info_t *p_pi = osm_physp_get_port_info_ptr(p); + osm_physp_t *p_physp; + + if (!p_pi) + return IB_ERROR; + + if (osm_node_get_type(osm_physp_get_node_ptr(p)) == IB_NODE_TYPE_SWITCH) { + if (ib_port_info_get_vl_cap(p_pi) == 1) { + /* Check port 0's capability mask */ + p_physp = osm_port_get_default_phys_ptr(p_port); + p_pi = osm_physp_get_port_info_ptr(p_physp); + if (!(p_pi->capability_mask & IB_PORT_CAP_HAS_SL_MAP)) + return IB_SUCCESS; + } + num_ports = osm_node_get_num_physp(osm_physp_get_node_ptr(p)); + } else { + if (!(p_pi->capability_mask & IB_PORT_CAP_HAS_SL_MAP)) + return IB_SUCCESS; + num_ports = 1; + } + + for (i = 0; i < num_ports; i++) { + status = + sl2vl_update_table(p_req, p, i, port_num, &qcfg->sl2vl); + if (status != IB_SUCCESS) + return status; + } + + return IB_SUCCESS; +} + +static ib_api_status_t vl_high_limit_update(osm_req_t * p_req, + osm_physp_t * p, + const struct qos_config *qcfg) +{ + uint8_t payload[IB_SMP_DATA_SIZE]; + osm_madw_context_t context; + ib_port_info_t *p_pi; + + if (!(p_pi = osm_physp_get_port_info_ptr(p))) + return IB_ERROR; + + if (p_pi->vl_high_limit == qcfg->vl_high_limit) + return IB_SUCCESS; + + memset(payload, 0, IB_SMP_DATA_SIZE); + memcpy(payload, p_pi, sizeof(ib_port_info_t)); + + p_pi = (ib_port_info_t *) payload; + ib_port_info_set_state_no_change(p_pi); + + p_pi->vl_high_limit = qcfg->vl_high_limit; + + context.pi_context.node_guid = + osm_node_get_node_guid(osm_physp_get_node_ptr(p)); + context.pi_context.port_guid = osm_physp_get_port_guid(p); + context.pi_context.set_method = TRUE; + context.pi_context.update_master_sm_base_lid = FALSE; + context.pi_context.ignore_errors = FALSE; + context.pi_context.light_sweep = FALSE; + context.pi_context.active_transition = FALSE; + + return osm_req_set(p_req, osm_physp_get_dr_path_ptr(p), + payload, sizeof(payload), IB_MAD_ATTR_PORT_INFO, + cl_hton32(osm_physp_get_port_num(p)), + CL_DISP_MSGID_NONE, &context); +} + +static ib_api_status_t qos_physp_setup(osm_log_t * p_log, osm_req_t * p_req, + osm_port_t * p_port, osm_physp_t * p, + uint8_t port_num, + const struct qos_config *qcfg) +{ + ib_api_status_t status; + + /* OpVLs should be ok at this moment - just use it */ + + /* setup VL high limit */ + status = vl_high_limit_update(p_req, p, qcfg); + if (status != IB_SUCCESS) { + osm_log(p_log, OSM_LOG_ERROR, + "qos_physp_setup: ERR 6201 : " + "failed to update VLHighLimit " + "for port %" PRIx64 " #%d\n", + cl_ntoh64(p->port_guid), port_num); + return status; + } + + /* setup VLArbitration */ + status = vlarb_update(p_req, p, port_num, qcfg); + if (status != IB_SUCCESS) { + osm_log(p_log, OSM_LOG_ERROR, + "qos_physp_setup: ERR 6202 : " + "failed to update VLArbitration tables " + "for port %" PRIx64 " #%d\n", + cl_ntoh64(p->port_guid), port_num); + return status; + } + + /* setup SL2VL tables */ + status = sl2vl_update(p_req, p_port, p, port_num, qcfg); + if (status != IB_SUCCESS) { + osm_log(p_log, OSM_LOG_ERROR, + "qos_physp_setup: ERR 6203 : " + "failed to update SL2VLMapping tables " + "for port %" PRIx64 " #%d\n", + cl_ntoh64(p->port_guid), port_num); + return status; + } + + return IB_SUCCESS; +} + +osm_signal_t osm_qos_setup(osm_opensm_t * p_osm) +{ + struct qos_config ca_config, sw0_config, swe_config, rtr_config; + struct qos_config *cfg; + cl_qmap_t *p_tbl; + cl_map_item_t *p_next; + osm_port_t *p_port; + uint32_t num_physp; + osm_physp_t *p_physp; + osm_node_t *p_node; + ib_api_status_t status; + uint8_t i; + + if (p_osm->subn.opt.no_qos) + return OSM_SIGNAL_DONE; + + OSM_LOG_ENTER(&p_osm->log, osm_qos_setup); + + qos_build_config(&ca_config, &p_osm->subn.opt.qos_ca_options, + &p_osm->subn.opt.qos_options); + qos_build_config(&sw0_config, &p_osm->subn.opt.qos_sw0_options, + &p_osm->subn.opt.qos_options); + qos_build_config(&swe_config, &p_osm->subn.opt.qos_swe_options, + &p_osm->subn.opt.qos_options); + qos_build_config(&rtr_config, &p_osm->subn.opt.qos_rtr_options, + &p_osm->subn.opt.qos_options); + + cl_plock_excl_acquire(&p_osm->lock); + + p_tbl = &p_osm->subn.port_guid_tbl; + p_next = cl_qmap_head(p_tbl); + while (p_next != cl_qmap_end(p_tbl)) { + p_port = (osm_port_t *) p_next; + p_next = cl_qmap_next(p_next); + + p_node = p_port->p_node; + if (p_node->sw) { + num_physp = osm_port_get_num_physp(p_port); + for (i = 1; i < num_physp; i++) { + p_physp = osm_port_get_phys_ptr(p_port, i); + if (!p_physp || !osm_physp_is_valid(p_physp)) + continue; + status = + qos_physp_setup(&p_osm->log, &p_osm->sm.req, + p_port, p_physp, i, &swe_config); + } + /* skip base port 0 */ + if (!ib_switch_info_is_enhanced_port0(&p_node->sw->switch_info)) + continue; + + cfg = &sw0_config; + } else if (osm_node_get_type(p_node) == IB_NODE_TYPE_ROUTER) + cfg = &rtr_config; + else + cfg = &ca_config; + + p_physp = osm_port_get_default_phys_ptr(p_port); + if (!osm_physp_is_valid(p_physp)) + continue; + + status = qos_physp_setup(&p_osm->log, &p_osm->sm.req, + p_port, p_physp, 0, cfg); + } + + cl_plock_release(&p_osm->lock); + OSM_LOG_EXIT(&p_osm->log); + + return OSM_SIGNAL_DONE; +} + +/* + * QoS config stuff + */ +static int parse_one_unsigned(char *str, char delim, unsigned *val) +{ + char *end; + *val = strtoul(str, &end, 0); + if (*end) + end++; + return (int)(end - str); +} + +static int parse_vlarb_entry(char *str, ib_vl_arb_element_t * e) +{ + unsigned val; + char *p = str; + p += parse_one_unsigned(p, ':', &val); + e->vl = val % 15; + p += parse_one_unsigned(p, ',', &val); + e->weight = (uint8_t)val; + return (int)(p - str); +} + +static int parse_sl2vl_entry(char *str, uint8_t * raw) +{ + unsigned val1, val2; + char *p = str; + p += parse_one_unsigned(p, ',', &val1); + p += parse_one_unsigned(p, ',', &val2); + *raw = (val1 << 4) | (val2 & 0xf); + return (int)(p - str); +} + +static void qos_build_config(struct qos_config *cfg, + osm_qos_options_t * opt, osm_qos_options_t * dflt) +{ + int i; + char *p; + + memset(cfg, 0, sizeof(*cfg)); + + cfg->max_vls = opt->max_vls > 0 ? opt->max_vls : dflt->max_vls; + cfg->vl_high_limit = (uint8_t)opt->high_limit; + + p = opt->vlarb_high ? opt->vlarb_high : dflt->vlarb_high; + for (i = 0; i < 2 * IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK; i++) { + p += parse_vlarb_entry(p, + &cfg->vlarb_high[i/IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK]. + vl_entry[i%IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK]); + } + + p = opt->vlarb_low ? opt->vlarb_low : dflt->vlarb_low; + for (i = 0; i < 2 * IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK; i++) { + p += parse_vlarb_entry(p, + &cfg->vlarb_low[i/IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK]. + vl_entry[i%IB_NUM_VL_ARB_ELEMENTS_IN_BLOCK]); + } + + p = opt->sl2vl ? opt->sl2vl : dflt->sl2vl; + for (i = 0; i < IB_MAX_NUM_VLS / 2; i++) + p += parse_sl2vl_entry(p, &cfg->sl2vl.raw_vl_by_sl[i]); + +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_remote_sm.c b/branches/Ndi/ulp/opensm/user/opensm/osm_remote_sm.c new file mode 100644 index 00000000..4b0cefeb --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_remote_sm.c @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_sm_t. + * This object represents the remote SM object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_remote_sm_construct( + IN osm_remote_sm_t* const p_sm ) +{ + memset( p_sm, 0, sizeof(*p_sm) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_remote_sm_destroy( + IN osm_remote_sm_t* const p_sm ) +{ + memset( p_sm, 0, sizeof(*p_sm) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_remote_sm_init( + IN osm_remote_sm_t* const p_sm, + IN const osm_port_t* const p_port, + IN const ib_sm_info_t* const p_smi ) +{ + CL_ASSERT( p_sm ); + CL_ASSERT( p_port ); + + osm_remote_sm_construct( p_sm ); + + p_sm->p_port = p_port; + p_sm->smi = *p_smi; + return; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_req.c b/branches/Ndi/ulp/opensm/user/opensm/osm_req.c new file mode 100644 index 00000000..f428315f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_req.c @@ -0,0 +1,298 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_req_t. + * This object represents the generic attribute requester. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_req_construct( + IN osm_req_t* const p_req ) +{ + CL_ASSERT( p_req ); + + memset( p_req, 0, sizeof(*p_req) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_req_destroy( + IN osm_req_t* const p_req ) +{ + CL_ASSERT( p_req ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_req_init( + IN osm_req_t* const p_req, + IN osm_mad_pool_t* const p_pool, + IN osm_vl15_t* const p_vl15, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN atomic32_t* const p_sm_trans_id ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_req_init ); + + osm_req_construct( p_req ); + p_req->p_log = p_log; + + p_req->p_pool = p_pool; + p_req->p_vl15 = p_vl15; + p_req->p_subn = p_subn; + p_req->p_sm_trans_id = p_sm_trans_id; + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + The plock MAY or MAY NOT be held before calling this function. +**********************************************************************/ +ib_api_status_t +osm_req_get( + IN const osm_req_t* const p_req, + IN const osm_dr_path_t* const p_path, + IN const uint16_t attr_id, + IN const uint32_t attr_mod, + IN const cl_disp_msgid_t err_msg, + IN const osm_madw_context_t* const p_context ) +{ + osm_madw_t *p_madw; + ib_api_status_t status = IB_SUCCESS; + ib_net64_t tid; + + CL_ASSERT( p_req ); + + OSM_LOG_ENTER( p_req->p_log, osm_req_get ); + + CL_ASSERT( p_path ); + CL_ASSERT( attr_id ); + + /* do nothing if we are exiting ... */ + if (osm_exit_flag) + goto Exit; + + /* p_context may be NULL. */ + + p_madw = osm_mad_pool_get( + p_req->p_pool, + p_path->h_bind, + MAD_BLOCK_SIZE, + NULL ); + + if( p_madw == NULL ) + { + osm_log( p_req->p_log, OSM_LOG_ERROR, + "osm_req_get: ERR 1101: " + "Unable to acquire MAD\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + tid = cl_hton64( (uint64_t)cl_atomic_inc( p_req->p_sm_trans_id ) ); + + if( osm_log_is_active( p_req->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_req->p_log, OSM_LOG_DEBUG, + "osm_req_get: " + "Getting %s (0x%X), modifier 0x%X, TID 0x%" PRIx64 "\n", + ib_get_sm_attr_str( attr_id ), + cl_ntoh16( attr_id ), + cl_ntoh32( attr_mod ), + cl_ntoh64( tid ) ); + } + + ib_smp_init_new( + osm_madw_get_smp_ptr( p_madw ), + IB_MAD_METHOD_GET, + tid, + attr_id, + attr_mod, + p_path->hop_count, + p_req->p_subn->opt.m_key, + p_path->path, + IB_LID_PERMISSIVE, + IB_LID_PERMISSIVE ); + + p_madw->mad_addr.dest_lid = IB_LID_PERMISSIVE; + p_madw->mad_addr.addr_type.smi.source_lid = IB_LID_PERMISSIVE; + p_madw->resp_expected = TRUE; + p_madw->fail_msg = err_msg; + + /* + Fill in the mad wrapper context for the recipient. + In this case, the only thing the recipient needs is the + guid value. + */ + + if( p_context ) + p_madw->context = *p_context; + + osm_vl15_post( p_req->p_vl15, p_madw ); + + Exit: + OSM_LOG_EXIT( p_req->p_log ); + return( status ); +} + +/********************************************************************** + The plock MAY or MAY NOT be held before calling this function. +**********************************************************************/ +ib_api_status_t +osm_req_set( + IN const osm_req_t* const p_req, + IN const osm_dr_path_t* const p_path, + IN const uint8_t* const p_payload, + IN const size_t payload_size, + IN const uint16_t attr_id, + IN const uint32_t attr_mod, + IN const cl_disp_msgid_t err_msg, + IN const osm_madw_context_t* const p_context ) +{ + osm_madw_t *p_madw; + ib_api_status_t status = IB_SUCCESS; + ib_net64_t tid; + + CL_ASSERT( p_req ); + + OSM_LOG_ENTER( p_req->p_log, osm_req_set ); + + CL_ASSERT( p_path ); + CL_ASSERT( attr_id ); + CL_ASSERT( p_payload ); + + /* do nothing if we are exiting ... */ + if (osm_exit_flag) + goto Exit; + + /* p_context may be NULL. */ + + p_madw = osm_mad_pool_get( + p_req->p_pool, + p_path->h_bind, + MAD_BLOCK_SIZE, + NULL ); + + if( p_madw == NULL ) + { + osm_log( p_req->p_log, OSM_LOG_ERROR, + "osm_req_set: ERR 1102: " + "Unable to acquire MAD\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + tid = cl_hton64( (uint64_t)cl_atomic_inc( p_req->p_sm_trans_id ) ); + + if( osm_log_is_active( p_req->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_req->p_log, OSM_LOG_DEBUG, + "osm_req_set: " + "Setting %s (0x%X), modifier 0x%X, TID 0x%" PRIx64 "\n", + ib_get_sm_attr_str( attr_id ), + cl_ntoh16( attr_id ), + cl_ntoh32( attr_mod ), + cl_ntoh64( tid ) ); + } + + ib_smp_init_new( + osm_madw_get_smp_ptr( p_madw ), + IB_MAD_METHOD_SET, + tid, + attr_id, + attr_mod, + p_path->hop_count, + p_req->p_subn->opt.m_key, + p_path->path, + IB_LID_PERMISSIVE, + IB_LID_PERMISSIVE ); + + p_madw->mad_addr.dest_lid = IB_LID_PERMISSIVE; + p_madw->mad_addr.addr_type.smi.source_lid = IB_LID_PERMISSIVE; + p_madw->resp_expected = TRUE; + p_madw->fail_msg = err_msg; + + /* + Fill in the mad wrapper context for the recipient. + In this case, the only thing the recipient needs is the + guid value. + */ + + if( p_context ) + p_madw->context = *p_context; + + memcpy( osm_madw_get_smp_ptr( p_madw )->data, + p_payload, payload_size ); + + osm_vl15_post( p_req->p_vl15, p_madw ); + + Exit: + OSM_LOG_EXIT( p_req->p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_req_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_req_ctrl.c new file mode 100644 index 00000000..97274389 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_req_ctrl.c @@ -0,0 +1,136 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_req_ctrl_t. + * This object represents the request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_req_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_req_get( ((osm_req_ctrl_t*)context)->p_req, + (&((osm_attrib_req_t*)p_data)->path), + ((osm_attrib_req_t*)p_data)->attrib_id, + ((osm_attrib_req_t*)p_data)->attrib_mod, + ((osm_attrib_req_t*)p_data)->err_msg, + (&((osm_attrib_req_t*)p_data)->context) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_req_ctrl_construct( + IN osm_req_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_req_ctrl_destroy( + IN osm_req_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_req_ctrl_init( + IN osm_req_ctrl_t* const p_ctrl, + IN osm_req_t* const p_req, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_req_ctrl_init ); + + osm_req_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + + p_ctrl->p_disp = p_disp; + p_ctrl->p_req = p_req; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_REQ, + __osm_req_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_req_ctrl_init: ERR 1202: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_resp.c b/branches/Ndi/ulp/opensm/user/opensm/osm_resp.c new file mode 100644 index 00000000..aff8198a --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_resp.c @@ -0,0 +1,227 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_resp_t. + * This object represents the generic attribute responder. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_resp_construct( + IN osm_resp_t* const p_resp ) +{ + memset( p_resp, 0, sizeof(*p_resp) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_resp_destroy( + IN osm_resp_t* const p_resp ) +{ + CL_ASSERT( p_resp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_resp_init( + IN osm_resp_t* const p_resp, + IN osm_mad_pool_t* const p_pool, + IN osm_vl15_t* const p_vl15, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_resp_init ); + + osm_resp_construct( p_resp ); + + p_resp->p_log = p_log; + p_resp->p_pool = p_pool; + p_resp->p_vl15 = p_vl15; + p_resp->p_subn = p_subn; + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_resp_make_resp_smp( + IN const osm_resp_t* const p_resp, + IN const ib_smp_t* const p_src_smp, + IN const ib_net16_t status, + IN const uint8_t* const p_payload, + OUT ib_smp_t* const p_dest_smp ) +{ + OSM_LOG_ENTER( p_resp->p_log, osm_resp_make_resp_smp ); + + CL_ASSERT( p_dest_smp ); + CL_ASSERT( p_src_smp ); + CL_ASSERT( !ib_smp_is_response( p_src_smp ) ); + + *p_dest_smp = *p_src_smp; + if (p_src_smp->method == IB_MAD_METHOD_GET || + p_src_smp->method == IB_MAD_METHOD_SET ) { + p_dest_smp->method = IB_MAD_METHOD_GET_RESP; + p_dest_smp->status = status; + } + else if (p_src_smp->method == IB_MAD_METHOD_TRAP) + { + p_dest_smp->method = IB_MAD_METHOD_TRAP_REPRESS; + p_dest_smp->status = 0; + } + else + { + osm_log( p_resp->p_log, OSM_LOG_ERROR, + "osm_resp_make_resp_smp: ERR 1302: " + "src smp method unsupported 0x%X\n", + p_src_smp->method ); + goto Exit; + } + + if (p_src_smp->mgmt_class == IB_MCLASS_SUBN_DIR) + p_dest_smp->status |= IB_SMP_DIRECTION; + + p_dest_smp->dr_dlid = p_dest_smp->dr_slid; + p_dest_smp->dr_slid = p_dest_smp->dr_dlid; + memcpy( &p_dest_smp->data, p_payload, IB_SMP_DATA_SIZE ); + + Exit: + OSM_LOG_EXIT( p_resp->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_resp_send( + IN const osm_resp_t* const p_resp, + IN const osm_madw_t* const p_req_madw, + IN const ib_net16_t mad_status, + IN const uint8_t* const p_payload ) +{ + const ib_smp_t* p_req_smp; + ib_smp_t* p_smp; + osm_madw_t* p_madw; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_resp->p_log, osm_resp_send ); + + CL_ASSERT( p_req_madw ); + CL_ASSERT( p_payload ); + + /* do nothing if we are exiting ... */ + if (osm_exit_flag) + goto Exit; + + p_madw = osm_mad_pool_get( + p_resp->p_pool, + osm_madw_get_bind_handle( p_req_madw ), + MAD_BLOCK_SIZE, + NULL ); + + if( p_madw == NULL ) + { + osm_log( p_resp->p_log, OSM_LOG_ERROR, + "osm_resp_send: ERR 1301: " + "Unable to acquire MAD\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + /* + Copy the request smp to the response smp, then just + update the necessary fields. + */ + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_req_smp = osm_madw_get_smp_ptr( p_req_madw ); + osm_resp_make_resp_smp( p_resp, p_req_smp, mad_status, + p_payload, p_smp ); + p_madw->mad_addr.dest_lid = + p_req_madw->mad_addr.addr_type.smi.source_lid; + p_madw->mad_addr.addr_type.smi.source_lid = + p_req_madw->mad_addr.dest_lid; + + p_madw->resp_expected = FALSE; + p_madw->fail_msg = CL_DISP_MSGID_NONE; + + if( osm_log_is_active( p_resp->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_resp->p_log, OSM_LOG_DEBUG, + "osm_resp_send: " + "Responding to %s (0x%X)" + "\n\t\t\t\tattribute modifier 0x%X, TID 0x%" PRIx64 "\n", + ib_get_sm_attr_str( p_smp->attr_id ), + cl_ntoh16( p_smp->attr_id ), + cl_ntoh32( p_smp->attr_mod ), + cl_ntoh64( p_smp->trans_id ) ); + } + + osm_vl15_post( p_resp->p_vl15, p_madw ); + + Exit: + OSM_LOG_EXIT( p_resp->p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_router.c b/branches/Ndi/ulp/opensm/user/opensm/osm_router.c new file mode 100644 index 00000000..0f478b65 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_router.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_router_t. + * This object represents an Infiniband router. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_router_construct( + IN osm_router_t* const p_rtr ) +{ + CL_ASSERT( p_rtr ); + memset( p_rtr, 0, sizeof(*p_rtr) ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_router_init( + IN osm_router_t* const p_rtr, + IN osm_port_t* const p_port ) +{ + ib_api_status_t status = IB_SUCCESS; + + CL_ASSERT( p_rtr ); + CL_ASSERT( p_port ); + + osm_router_construct( p_rtr ); + + p_rtr->p_port = p_port; + + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_router_destroy( + IN osm_router_t* const p_rtr ) +{ +} + +/********************************************************************** + **********************************************************************/ +void +osm_router_delete( + IN OUT osm_router_t** const pp_rtr ) +{ + osm_router_destroy( *pp_rtr ); + free( *pp_rtr ); + *pp_rtr = NULL; +} + +/********************************************************************** + **********************************************************************/ +osm_router_t* +osm_router_new( + IN osm_port_t* const p_port ) +{ + ib_api_status_t status; + osm_router_t *p_rtr; + + p_rtr = (osm_router_t*)malloc( sizeof(*p_rtr) ); + if( p_rtr ) + { + memset( p_rtr, 0, sizeof(*p_rtr) ); + status = osm_router_init( p_rtr, p_port ); + if( status != IB_SUCCESS ) + osm_router_delete( &p_rtr ); + } + + return( p_rtr ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa.c new file mode 100644 index 00000000..3512cb27 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa.c @@ -0,0 +1,1207 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_sa_t. + * This object represents the Subnet Administration object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.14 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_SA_INITIAL_TID_VALUE 0xabc + +/********************************************************************** + **********************************************************************/ +void +osm_sa_construct( + IN osm_sa_t* const p_sa ) +{ + memset( p_sa, 0, sizeof(*p_sa) ); + p_sa->state = OSM_SA_STATE_INIT; + p_sa->sa_trans_id = OSM_SA_INITIAL_TID_VALUE; + + osm_sa_mad_ctrl_construct( &p_sa->mad_ctrl ); + osm_sa_resp_construct( &p_sa->resp ); + + osm_nr_rcv_construct( &p_sa->nr_rcv); + osm_nr_rcv_ctrl_construct( &p_sa->nr_rcv_ctrl ); + + osm_pir_rcv_construct( &p_sa->pir_rcv ); + osm_pir_rcv_ctrl_construct( &p_sa->pir_rcv_ctrl ); + + osm_gir_rcv_construct( &p_sa->gir_rcv ); + osm_gir_rcv_ctrl_construct( &p_sa->gir_rcv_ctrl ); + + osm_lr_rcv_construct( &p_sa->lr_rcv ); + osm_lr_rcv_ctrl_construct( &p_sa->lr_rcv_ctrl ); + + osm_pr_rcv_construct( &p_sa->pr_rcv ); + osm_pr_rcv_ctrl_construct( &p_sa->pr_rcv_ctrl ); + +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + osm_mpr_rcv_construct( &p_sa->mpr_rcv ); + osm_mpr_rcv_ctrl_construct( &p_sa->mpr_rcv_ctrl ); +#endif + + osm_smir_rcv_construct( &p_sa->smir_rcv ); + osm_smir_ctrl_construct( &p_sa->smir_ctrl ); + + osm_mcmr_rcv_construct(&p_sa->mcmr_rcv ); + osm_mcmr_rcv_ctrl_construct(&p_sa->mcmr_rcv_ctlr); + + osm_sr_rcv_construct( &p_sa->sr_rcv ); + osm_sr_rcv_ctrl_construct( &p_sa->sr_rcv_ctrl ); + + osm_infr_rcv_construct( &p_sa->infr_rcv ); + osm_infr_rcv_ctrl_construct( &p_sa->infr_rcv_ctrl ); + + osm_vlarb_rec_rcv_construct( &p_sa->vlarb_rec_rcv ); + osm_vlarb_rec_rcv_ctrl_construct( &p_sa->vlarb_rec_rcv_ctrl ); + + osm_slvl_rec_rcv_construct( &p_sa->slvl_rec_rcv ); + osm_slvl_rec_rcv_ctrl_construct( &p_sa->slvl_rec_rcv_ctrl ); + + osm_pkey_rec_rcv_construct( &p_sa->pkey_rec_rcv ); + osm_pkey_rec_rcv_ctrl_construct( &p_sa->pkey_rec_rcv_ctrl ); + + osm_lftr_rcv_construct( &p_sa->lftr_rcv ); + osm_lftr_rcv_ctrl_construct( &p_sa->lftr_rcv_ctrl ); + + osm_sir_rcv_construct( &p_sa->sir_rcv ); + osm_sir_rcv_ctrl_construct( &p_sa->sir_rcv_ctrl ); + + osm_mftr_rcv_construct( &p_sa->mftr_rcv ); + osm_mftr_rcv_ctrl_construct( &p_sa->mftr_rcv_ctrl ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sa_shutdown( + IN osm_sa_t* const p_sa ) +{ + ib_api_status_t status; + OSM_LOG_ENTER( p_sa->p_log, osm_sa_shutdown ); + + /* unbind from the mad service */ + status = osm_sa_mad_ctrl_unbind( &p_sa->mad_ctrl ); + + /* remove any registered dispatcher message */ + osm_nr_rcv_ctrl_destroy( &p_sa->nr_rcv_ctrl ); + osm_pir_rcv_ctrl_destroy( &p_sa->pir_rcv_ctrl ); + osm_gir_rcv_ctrl_destroy( &p_sa->gir_rcv_ctrl ); + osm_lr_rcv_ctrl_destroy( &p_sa->lr_rcv_ctrl ); + osm_pr_rcv_ctrl_destroy( &p_sa->pr_rcv_ctrl ); +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + osm_mpr_rcv_ctrl_destroy( &p_sa->mpr_rcv_ctrl ); +#endif + osm_smir_ctrl_destroy( &p_sa->smir_ctrl ); + osm_mcmr_rcv_ctrl_destroy( &p_sa->mcmr_rcv_ctlr); + osm_sr_rcv_ctrl_destroy( &p_sa->sr_rcv_ctrl ); + osm_infr_rcv_ctrl_destroy( &p_sa->infr_rcv_ctrl ); + osm_vlarb_rec_rcv_ctrl_destroy( &p_sa->vlarb_rec_rcv_ctrl ); + osm_slvl_rec_rcv_ctrl_destroy( &p_sa->slvl_rec_rcv_ctrl ); + osm_pkey_rec_rcv_ctrl_destroy( &p_sa->pkey_rec_rcv_ctrl ); + osm_lftr_rcv_ctrl_destroy( &p_sa->lftr_rcv_ctrl ); + osm_sir_rcv_ctrl_destroy( &p_sa->sir_rcv_ctrl ); + osm_mftr_rcv_ctrl_destroy( &p_sa->mftr_rcv_ctrl ); + osm_sa_mad_ctrl_destroy( &p_sa->mad_ctrl ); + + OSM_LOG_EXIT( p_sa->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sa_destroy( + IN osm_sa_t* const p_sa ) +{ + OSM_LOG_ENTER( p_sa->p_log, osm_sa_destroy ); + + p_sa->state = OSM_SA_STATE_INIT; + + osm_nr_rcv_destroy( &p_sa->nr_rcv ); + osm_pir_rcv_destroy( &p_sa->pir_rcv ); + osm_gir_rcv_destroy( &p_sa->gir_rcv ); + osm_lr_rcv_destroy( &p_sa->lr_rcv ); + osm_pr_rcv_destroy( &p_sa->pr_rcv ); +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + osm_mpr_rcv_destroy( &p_sa->mpr_rcv ); +#endif + osm_smir_rcv_destroy( &p_sa->smir_rcv ); + osm_mcmr_rcv_destroy(&p_sa->mcmr_rcv); + osm_sr_rcv_destroy( &p_sa->sr_rcv ); + osm_infr_rcv_destroy( &p_sa->infr_rcv ); + osm_vlarb_rec_rcv_destroy( &p_sa->vlarb_rec_rcv ); + osm_slvl_rec_rcv_destroy( &p_sa->slvl_rec_rcv ); + osm_pkey_rec_rcv_destroy( &p_sa->pkey_rec_rcv ); + osm_lftr_rcv_destroy( &p_sa->lftr_rcv ); + osm_sir_rcv_destroy( &p_sa->sir_rcv ); + osm_mftr_rcv_destroy( &p_sa->mftr_rcv ); + osm_sa_resp_destroy( &p_sa->resp ); + + OSM_LOG_EXIT( p_sa->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sa_init( + IN osm_sm_t* const p_sm, + IN osm_sa_t* const p_sa, + IN osm_subn_t* const p_subn, + IN osm_vendor_t* const p_vendor, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_log_t* const p_log, + IN osm_stats_t* const p_stats, + IN cl_dispatcher_t* const p_disp, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sa_init ); + + p_sa->p_subn = p_subn; + p_sa->p_vendor = p_vendor; + p_sa->p_mad_pool = p_mad_pool; + p_sa->p_log = p_log; + p_sa->p_disp = p_disp; + p_sa->p_lock = p_lock; + + p_sa->state = OSM_SA_STATE_READY; + + status = osm_sa_resp_init(&p_sa->resp, + p_sa->p_mad_pool, + p_log); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sa_mad_ctrl_init( + &p_sa->mad_ctrl, + &p_sa->resp, + p_sa->p_mad_pool, + p_sa->p_vendor, + p_subn, + p_log, + p_stats, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_cpi_rcv_init( + &p_sa->cpi_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_cpi_rcv_ctrl_init( + &p_sa->cpi_rcv_ctrl, + &p_sa->cpi_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_nr_rcv_init( + &p_sa->nr_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_nr_rcv_ctrl_init( + &p_sa->nr_rcv_ctrl, + &p_sa->nr_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pir_rcv_init( + &p_sa->pir_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pir_rcv_ctrl_init( + &p_sa->pir_rcv_ctrl, + &p_sa->pir_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_gir_rcv_init( + &p_sa->gir_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_gir_rcv_ctrl_init( + &p_sa->gir_rcv_ctrl, + &p_sa->gir_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_lr_rcv_init( + &p_sa->lr_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_lr_rcv_ctrl_init( + &p_sa->lr_rcv_ctrl, + &p_sa->lr_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pr_rcv_init( + &p_sa->pr_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pr_rcv_ctrl_init( + &p_sa->pr_rcv_ctrl, + &p_sa->pr_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + status = osm_mpr_rcv_init( + &p_sa->mpr_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_mpr_rcv_ctrl_init( + &p_sa->mpr_rcv_ctrl, + &p_sa->mpr_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; +#endif + + status = osm_smir_rcv_init( + &p_sa->smir_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_stats, + p_log, + p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_smir_ctrl_init( + &p_sa->smir_ctrl, + &p_sa->smir_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_mcmr_rcv_init( + p_sm, + &p_sa->mcmr_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_mcmr_rcv_ctrl_init( + &p_sa->mcmr_rcv_ctlr, + &p_sa->mcmr_rcv, + p_log, + p_disp); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sr_rcv_init( + &p_sa->sr_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sr_rcv_ctrl_init( + &p_sa->sr_rcv_ctrl, + &p_sa->sr_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_infr_rcv_init( + &p_sa->infr_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_infr_rcv_ctrl_init( + &p_sa->infr_rcv_ctrl, + &p_sa->infr_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_vlarb_rec_rcv_init( + &p_sa->vlarb_rec_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_vlarb_rec_rcv_ctrl_init( + &p_sa->vlarb_rec_rcv_ctrl, + &p_sa->vlarb_rec_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_slvl_rec_rcv_init( + &p_sa->slvl_rec_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_slvl_rec_rcv_ctrl_init( + &p_sa->slvl_rec_rcv_ctrl, + &p_sa->slvl_rec_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pkey_rec_rcv_init( + &p_sa->pkey_rec_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pkey_rec_rcv_ctrl_init( + &p_sa->pkey_rec_rcv_ctrl, + &p_sa->pkey_rec_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_lftr_rcv_init( + &p_sa->lftr_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_lftr_rcv_ctrl_init( + &p_sa->lftr_rcv_ctrl, + &p_sa->lftr_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sir_rcv_init( + &p_sa->sir_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sir_rcv_ctrl_init( + &p_sa->sir_rcv_ctrl, + &p_sa->sir_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_mftr_rcv_init( + &p_sa->mftr_rcv, + &p_sa->resp, + p_sa->p_mad_pool, + p_subn, + p_log, + p_lock); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_mftr_rcv_ctrl_init( + &p_sa->mftr_rcv_ctrl, + &p_sa->mftr_rcv, + p_log, + p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sa_bind( + IN osm_sa_t* const p_sa, + IN const ib_net64_t port_guid ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_sa->p_log, osm_sa_bind ); + + status = osm_sa_mad_ctrl_bind( + &p_sa->mad_ctrl, port_guid ); + + OSM_LOG_EXIT( p_sa->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +/* + * SA DB Dumper + * + */ + +struct opensm_dump_context { + osm_opensm_t *p_osm; + FILE *file; +}; + +static int +opensm_dump_to_file(osm_opensm_t *p_osm, const char *file_name, + void (*dump_func)(osm_opensm_t *p_osm, FILE *file)) +{ + char path[1024]; + FILE *file; + + snprintf(path, sizeof(path), "%s/%s", + p_osm->subn.opt.dump_files_dir, file_name); + + file = fopen(path, "w"); + if (!file) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "opensm_dump_to_file: ERR 0000: " + "cannot open file \'%s\': %s\n", + file_name, strerror(errno)); + return -1; + } + + chmod(path, S_IRUSR|S_IWUSR); + + dump_func(p_osm, file); + + fclose(file); + return 0; +} + +static void +mcast_mgr_dump_one_port(cl_map_item_t *p_map_item, void *cxt) +{ + FILE *file = ((struct opensm_dump_context *)cxt)->file; + osm_mcm_port_t *p_mcm_port = (osm_mcm_port_t *)p_map_item; + + fprintf(file, "mcm_port: " + "port_gid=0x%016" PRIx64 ":0x%016" PRIx64 " " + "scope_state=0x%02x proxy_join=0x%x" "\n\n", + cl_ntoh64(p_mcm_port->port_gid.unicast.prefix), + cl_ntoh64(p_mcm_port->port_gid.unicast.interface_id), + p_mcm_port->scope_state, + p_mcm_port->proxy_join); +} + +static void +sa_dump_one_mgrp(cl_map_item_t *p_map_item, void *cxt) +{ + struct opensm_dump_context dump_context; + osm_opensm_t *p_osm = ((struct opensm_dump_context *)cxt)->p_osm; + FILE *file = ((struct opensm_dump_context *)cxt)->file; + osm_mgrp_t *p_mgrp = (osm_mgrp_t *)p_map_item; + + fprintf(file, "MC Group 0x%04x %s:" + " mgid=0x%016" PRIx64 ":0x%016" PRIx64 + " port_gid=0x%016" PRIx64 ":0x%016" PRIx64 + " qkey=0x%08x mlid=0x%04x mtu=0x%02x tclass=0x%02x" + " pkey=0x%04x rate=0x%02x pkt_life=0x%02x sl_flow_hop=0x%08x" + " scope_state=0x%02x proxy_join=0x%x" "\n\n", + cl_ntoh16(p_mgrp->mlid), + p_mgrp->well_known ? " (well known)" : "", + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.prefix), + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.interface_id), + cl_ntoh64(p_mgrp->mcmember_rec.port_gid.unicast.prefix), + cl_ntoh64(p_mgrp->mcmember_rec.port_gid.unicast.interface_id), + cl_ntoh32(p_mgrp->mcmember_rec.qkey), + cl_ntoh16(p_mgrp->mcmember_rec.mlid), + p_mgrp->mcmember_rec.mtu, + p_mgrp->mcmember_rec.tclass, + cl_ntoh16(p_mgrp->mcmember_rec.pkey), + p_mgrp->mcmember_rec.rate, + p_mgrp->mcmember_rec.pkt_life, + cl_ntoh32(p_mgrp->mcmember_rec.sl_flow_hop), + p_mgrp->mcmember_rec.scope_state, + p_mgrp->mcmember_rec.proxy_join + ); + + dump_context.p_osm = p_osm; + dump_context.file = file; + + cl_qmap_apply_func(&p_mgrp->mcm_port_tbl, + mcast_mgr_dump_one_port, &dump_context); +} + +static void +sa_dump_one_inform(cl_list_item_t *p_list_item, void *cxt) +{ + FILE *file = ((struct opensm_dump_context *)cxt)->file; + osm_infr_t *p_infr = (osm_infr_t *)p_list_item; + ib_inform_info_record_t *p_iir = &p_infr->inform_record; + + fprintf(file, "InformInfo Record:" + " subscriber_gid=0x%016" PRIx64 ":0x%016" PRIx64 + " subscriber_enum=0x%x" + " InformInfo:" + " gid=0x%016" PRIx64 ":0x%016" PRIx64 + " lid_range_begin=0x%x" + " lid_range_end=0x%x" + " is_generic=0x%x" + " subscribe=0x%x" + " trap_type=0x%x" + " trap_num=0x%x" + " qpn_resp_time_val=0x%x" + " node_type=0x%06x" + " rep_addr: lid=0x%04x path_bits=0x%02x static_rate=0x%02x" + " remote_qp=0x%08x remote_qkey=0x%08x pkey=0x%04x sl=0x%02x" + "\n\n", + cl_ntoh64(p_iir->subscriber_gid.unicast.prefix), + cl_ntoh64(p_iir->subscriber_gid.unicast.interface_id), + cl_ntoh16(p_iir->subscriber_enum), + cl_ntoh64(p_iir->inform_info.gid.unicast.prefix), + cl_ntoh64(p_iir->inform_info.gid.unicast.interface_id), + cl_ntoh16(p_iir->inform_info.lid_range_begin), + cl_ntoh16(p_iir->inform_info.lid_range_end), + p_iir->inform_info.is_generic, + p_iir->inform_info.subscribe, + cl_ntoh16(p_iir->inform_info.trap_type), + cl_ntoh16(p_iir->inform_info.g_or_v.generic.trap_num), + cl_ntoh32(p_iir->inform_info.g_or_v.generic.qpn_resp_time_val), + cl_ntoh32(ib_inform_info_get_node_type(&p_iir->inform_info)), + cl_ntoh16(p_infr->report_addr.dest_lid), + p_infr->report_addr.path_bits, + p_infr->report_addr.static_rate, + cl_ntoh32(p_infr->report_addr.addr_type.gsi.remote_qp), + cl_ntoh32(p_infr->report_addr.addr_type.gsi.remote_qkey), + cl_ntoh16(p_infr->report_addr.addr_type.gsi.pkey), + p_infr->report_addr.addr_type.gsi.service_level); +} + +static void +sa_dump_one_service(cl_list_item_t *p_list_item, void *cxt) +{ + FILE *file = ((struct opensm_dump_context *)cxt)->file; + osm_svcr_t *p_svcr = (osm_svcr_t *)p_list_item; + ib_service_record_t *p_sr = &p_svcr->service_record; + + fprintf(file, "Service Record: id=0x%016" PRIx64 + " gid=0x%016" PRIx64 ":0x%016" PRIx64 + " pkey=0x%x" + " lease=0x%x" + " key=0x%02x%02x%02x%02x%02x%02x%02x%02x" + ":0x%02x%02x%02x%02x%02x%02x%02x%02x" + " name=\'%s\'" + " data8=0x%02x%02x%02x%02x%02x%02x%02x%02x" + ":0x%02x%02x%02x%02x%02x%02x%02x%02x" + " data16=0x%04x%04x%04x%04x:0x%04x%04x%04x%04x" + " data32=0x%08x%08x:0x%08x%08x" + " data64=0x%016" PRIx64 ":0x%016" PRIx64 + " modified_time=0x%x lease_period=0x%x\n\n", + cl_ntoh64( p_sr->service_id ), + cl_ntoh64( p_sr->service_gid.unicast.prefix ), + cl_ntoh64( p_sr->service_gid.unicast.interface_id ), + cl_ntoh16( p_sr->service_pkey ), + cl_ntoh32( p_sr->service_lease ), + p_sr->service_key[0], p_sr->service_key[1], + p_sr->service_key[2], p_sr->service_key[3], + p_sr->service_key[4], p_sr->service_key[5], + p_sr->service_key[6], p_sr->service_key[7], + p_sr->service_key[8], p_sr->service_key[9], + p_sr->service_key[10], p_sr->service_key[11], + p_sr->service_key[12], p_sr->service_key[13], + p_sr->service_key[14], p_sr->service_key[15], + p_sr->service_name, + p_sr->service_data8[0], p_sr->service_data8[1], + p_sr->service_data8[2], p_sr->service_data8[3], + p_sr->service_data8[4], p_sr->service_data8[5], + p_sr->service_data8[6], p_sr->service_data8[7], + p_sr->service_data8[8], p_sr->service_data8[9], + p_sr->service_data8[10], p_sr->service_data8[11], + p_sr->service_data8[12], p_sr->service_data8[13], + p_sr->service_data8[14], p_sr->service_data8[15], + cl_ntoh16(p_sr->service_data16[0]), + cl_ntoh16(p_sr->service_data16[1]), + cl_ntoh16(p_sr->service_data16[2]), + cl_ntoh16(p_sr->service_data16[3]), + cl_ntoh16(p_sr->service_data16[4]), + cl_ntoh16(p_sr->service_data16[5]), + cl_ntoh16(p_sr->service_data16[6]), + cl_ntoh16(p_sr->service_data16[7]), + cl_ntoh32(p_sr->service_data32[0]), + cl_ntoh32(p_sr->service_data32[1]), + cl_ntoh32(p_sr->service_data32[2]), + cl_ntoh32(p_sr->service_data32[3]), + cl_ntoh64(p_sr->service_data64[0]), + cl_ntoh64(p_sr->service_data64[1]), + p_svcr->modified_time, p_svcr->lease_period); +} + +static void +sa_dump_all_sa(osm_opensm_t *p_osm, FILE *file) +{ + struct opensm_dump_context dump_context; + + dump_context.p_osm = p_osm; + dump_context.file = file; + osm_log(&p_osm->log, OSM_LOG_DEBUG, "sa_dump_all_sa: Dump multicast:\n"); + cl_plock_acquire(&p_osm->lock); + cl_qmap_apply_func(&p_osm->subn.mgrp_mlid_tbl, + sa_dump_one_mgrp, &dump_context); + osm_log(&p_osm->log, OSM_LOG_DEBUG, "sa_dump_all_sa: Dump inform:\n"); + cl_qlist_apply_func(&p_osm->subn.sa_infr_list, + sa_dump_one_inform, &dump_context); + osm_log(&p_osm->log, OSM_LOG_DEBUG, "sa_dump_all_sa: Dump services:\n"); + cl_qlist_apply_func(&p_osm->subn.sa_sr_list, + sa_dump_one_service, &dump_context); + cl_plock_release(&p_osm->lock); +} + +int osm_sa_db_file_dump(osm_opensm_t *p_osm) +{ + return opensm_dump_to_file(p_osm, "opensm-sa.dump", sa_dump_all_sa); +} + +/* + * SA DB Loader + * + */ + +osm_mgrp_t *load_mcgroup(osm_opensm_t *p_osm, ib_net16_t mlid, + ib_member_rec_t *p_mcm_rec, unsigned well_known) +{ + ib_net64_t comp_mask; + cl_map_item_t *p_next; + osm_mgrp_t *p_mgrp; + + cl_plock_excl_acquire(&p_osm->lock); + + if ((p_next = cl_qmap_get(&p_osm->subn.mgrp_mlid_tbl, mlid)) != + cl_qmap_end(&p_osm->subn.mgrp_mlid_tbl)) { + p_mgrp = (osm_mgrp_t *)p_next; + if (!memcmp(&p_mgrp->mcmember_rec.mgid, &p_mcm_rec->mgid, + sizeof(ib_gid_t))) { + osm_log(&p_osm->log, OSM_LOG_DEBUG, + "load_mcgroup: mgrp %04x is already here.", + cl_ntoh16(mlid)); + goto _out; + } + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "load_mcgroup: mlid %04x is already used by another " + "MC group. Will request clients reregistration.\n", + cl_ntoh16(mlid)); + p_mgrp = NULL; + goto _out; + } + + comp_mask = IB_MCR_COMPMASK_MTU | IB_MCR_COMPMASK_MTU_SEL + | IB_MCR_COMPMASK_RATE | IB_MCR_COMPMASK_RATE_SEL; + if (osm_mcmr_rcv_find_or_create_new_mgrp(&p_osm->sa.mcmr_rcv, + comp_mask, p_mcm_rec, + &p_mgrp) != IB_SUCCESS || + !p_mgrp || p_mgrp->mlid != mlid) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "load_mcgroup: cannot create MC group with mlid " + "0x%04x and mgid 0x%016" PRIx64 ":0x%016" PRIx64 "\n", + cl_ntoh16(mlid), + cl_ntoh64(p_mcm_rec->mgid.unicast.prefix), + cl_ntoh64(p_mcm_rec->mgid.unicast.interface_id)); + p_mgrp=NULL; + } + else if (well_known) + p_mgrp->well_known = TRUE; + + _out: + cl_plock_release(&p_osm->lock); + + return p_mgrp; +} + +static int load_svcr(osm_opensm_t *p_osm, ib_service_record_t *sr, + uint32_t modified_time, uint32_t lease_period) +{ + osm_svcr_t *p_svcr; + int ret = 0; + + cl_plock_excl_acquire(&p_osm->lock); + + if(osm_svcr_get_by_rid(&p_osm->subn, &p_osm->log, sr)) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "load_svcr ServiceRecord already exists.\n"); + goto _out; + } + + if (!(p_svcr = osm_svcr_new(sr))) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "load_svcr: cannot allocate new service struct\n"); + ret = -1; + goto _out; + } + + p_svcr->modified_time = modified_time; + p_svcr->lease_period = lease_period; + + osm_log(&p_osm->log, OSM_LOG_DEBUG, + "load_svcr: adding ServiceRecord...\n"); + + osm_svcr_insert_to_db(&p_osm->subn, &p_osm->log, p_svcr); + + if (lease_period != 0xffffffff) + cl_timer_trim(&p_osm->sa.sr_rcv.sr_timer, 1000); + + _out: + cl_plock_release(&p_osm->lock); + + return ret; +} + +static int load_infr(osm_opensm_t *p_osm, ib_inform_info_record_t *iir, + osm_mad_addr_t *addr) +{ + osm_infr_t infr, *p_infr; + int ret = 0; + + infr.h_bind = p_osm->sa.mad_ctrl.h_bind; + infr.p_infr_rcv = &p_osm->sa.infr_rcv; + /* other possible way to restore mad_addr partially is + to extract qpn from InformInfo and to find lid by gid */ + infr.report_addr = *addr; + infr.inform_record = *iir; + + cl_plock_excl_acquire(&p_osm->lock); + if (osm_infr_get_by_rec(&p_osm->subn, &p_osm->log, &infr)) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "load_infr: InformInfo Record already exists\n"); + goto _out; + } + + if (!(p_infr = osm_infr_new(&infr))) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "load_infr: cannot allocate new infr struct\n"); + ret = -1; + goto _out; + } + + osm_log(&p_osm->log, OSM_LOG_DEBUG, + "load_infr: adding InformInfo Record...\n"); + + osm_infr_insert_to_db(&p_osm->subn, &p_osm->log, p_infr); + + _out: + cl_plock_release(&p_osm->lock); + + return ret; +} + + +#define UNPACK_FUNC(name,x) \ +int unpack_##name##x(char *p, uint##x##_t *val_ptr) \ +{ \ + char *q; \ + unsigned long long num; \ + num = strtoull(p, &q, 16); \ + if (num > ~((uint##x##_t)0x0) \ + || q == p || (!isspace(*q) && *q != ':')) { \ + *val_ptr = 0; \ + return -1; \ + } \ + *val_ptr = cl_hton##x((uint##x##_t)num); \ + return (int)(q - p); \ +} + +#define cl_hton8(x) (x) + +UNPACK_FUNC(net,8); +UNPACK_FUNC(net,16); +UNPACK_FUNC(net,32); +UNPACK_FUNC(net,64); + +static int unpack_string(char *p, uint8_t *buf, unsigned len) +{ + char *q = p; + char delim = ' '; + + if (*q == '\'' || *q == '\"') + delim = *q++; + while (--len && *q && *q != delim) + *buf++ = *q++; + *buf = '\0'; + if (*q == delim && delim != ' ') + q++; + return (int)(q - p); +} + +static int unpack_string64(char *p, uint8_t *buf) +{ + return unpack_string(p, buf, 64); +} + +#define PARSE_AHEAD(p, x, name, val_ptr) { int _ret; \ + p = strstr(p, name); \ + if (!p) { \ + osm_log(&p_osm->log, OSM_LOG_ERROR, \ + "PARSE ERROR: %s:%u: cannot find \"%s\" string\n", \ + file_name, lineno, (name)); \ + ret = -2; \ + goto _error; \ + } \ + p += strlen(name); \ + _ret = unpack_##x(p, (val_ptr)); \ + if (_ret < 0) { \ + osm_log(&p_osm->log, OSM_LOG_ERROR, \ + "PARSE ERROR: %s:%u: cannot parse "#x" value " \ + "after \"%s\"\n", file_name, lineno, (name)); \ + ret = _ret; \ + goto _error; \ + } \ + p += _ret; \ +} + +int osm_sa_db_file_load(osm_opensm_t *p_osm) +{ + char line[1024]; + char *file_name; + FILE *file; + int ret = 0; + osm_mgrp_t *p_mgrp = NULL; + unsigned rereg_clients = 0; + unsigned lineno; + + file_name = p_osm->subn.opt.sa_db_file; + if (!file_name) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "osm_sa_db_file_load: sa db file name is not " + "specifed. Skip restore\n"); + return 0; + } + + file = fopen(file_name, "r"); + if (!file) { + osm_log(&p_osm->log, OSM_LOG_ERROR|OSM_LOG_SYS, + "osm_sa_db_file_load: ERR 0000: " + "cannot open sa db file \'%s\'. " + "Skip restoring\n", file_name); + return -1; + } + + lineno = 0; + + while (fgets(line, sizeof(line) - 1, file) != NULL) { + char *p; + uint8_t val; + + lineno++; + + p = line; + while (isspace(*p)) + p++; + + if (*p == '#') + continue; + + if (!strncmp(p, "MC Group", 8)) { + ib_member_rec_t mcm_rec; + ib_net16_t mlid; + unsigned well_known = 0; + + p_mgrp = NULL; + memset(&mcm_rec, 0, sizeof(mcm_rec)); + + PARSE_AHEAD(p, net16, " 0x", &mlid); + if(strstr(p, "well known")) + well_known = 1; + PARSE_AHEAD(p, net64, " mgid=0x", + &mcm_rec.mgid.unicast.prefix); + PARSE_AHEAD(p, net64, ":0x", + &mcm_rec.mgid.unicast.interface_id); + PARSE_AHEAD(p, net64, " port_gid=0x", + &mcm_rec.port_gid.unicast.prefix); + PARSE_AHEAD(p, net64, ":0x", + &mcm_rec.port_gid.unicast.interface_id); + PARSE_AHEAD(p, net32, " qkey=0x", &mcm_rec.qkey); + PARSE_AHEAD(p, net16, " mlid=0x", &mcm_rec.mlid); + PARSE_AHEAD(p, net8, " mtu=0x", &mcm_rec.mtu); + PARSE_AHEAD(p, net8, " tclass=0x", &mcm_rec.tclass); + PARSE_AHEAD(p, net16, " pkey=0x", &mcm_rec.pkey); + PARSE_AHEAD(p, net8, " rate=0x", &mcm_rec.rate); + PARSE_AHEAD(p, net8, " pkt_life=0x", &mcm_rec.pkt_life); + PARSE_AHEAD(p, net32, " sl_flow_hop=0x", + &mcm_rec.sl_flow_hop); + PARSE_AHEAD(p, net8, " scope_state=0x", + &mcm_rec.scope_state); + PARSE_AHEAD(p, net8, " proxy_join=0x", &val); + mcm_rec.proxy_join = val; + + p_mgrp = load_mcgroup(p_osm, mlid, &mcm_rec, + well_known); + if (!p_mgrp) + rereg_clients = 1; + } + else if (p_mgrp && !strncmp(p, "mcm_port", 8)) { + ib_gid_t port_gid; + ib_net64_t guid; + uint8_t scope_state; + boolean_t proxy_join; + + PARSE_AHEAD(p, net64, " port_gid=0x", + &port_gid.unicast.prefix); + PARSE_AHEAD(p, net64, ":0x", + &port_gid.unicast.interface_id); + PARSE_AHEAD(p, net8, " scope_state=0x", &scope_state); + PARSE_AHEAD(p, net8, " proxy_join=0x", &val); + proxy_join = val; + + guid = port_gid.unicast.interface_id; + if (cl_qmap_get(&p_mgrp->mcm_port_tbl, + port_gid.unicast.interface_id) == + cl_qmap_end(&p_mgrp->mcm_port_tbl)) + osm_mgrp_add_port(p_mgrp, &port_gid, + scope_state, proxy_join); + } + else if (!strncmp(p, "Service Record:", 15)) { + ib_service_record_t s_rec; + uint32_t modified_time, lease_period; + + p_mgrp = NULL; + memset(&s_rec, 0, sizeof(s_rec)); + + PARSE_AHEAD(p, net64, " id=0x", &s_rec.service_id); + PARSE_AHEAD(p, net64, " gid=0x", + &s_rec.service_gid.unicast.prefix); + PARSE_AHEAD(p, net64, ":0x", + &s_rec.service_gid.unicast.interface_id); + PARSE_AHEAD(p, net16, " pkey=0x", &s_rec.service_pkey); + PARSE_AHEAD(p, net32, " lease=0x", &s_rec.service_lease); + PARSE_AHEAD(p, net64, " key=0x", + (ib_net64_t *)(&s_rec.service_key[0])); + PARSE_AHEAD(p, net64, ":0x", + (ib_net64_t *)(&s_rec.service_key[8])); + PARSE_AHEAD(p, string64, " name=", s_rec.service_name); + PARSE_AHEAD(p, net64, " data8=0x", + (ib_net64_t *)(&s_rec.service_data8[0])); + PARSE_AHEAD(p, net64, ":0x", + (ib_net64_t *)(&s_rec.service_data8[8])); + PARSE_AHEAD(p, net64, " data16=0x", + (ib_net64_t *)(&s_rec.service_data16[0])); + PARSE_AHEAD(p, net64, ":0x", + (ib_net64_t *)(&s_rec.service_data16[4])); + PARSE_AHEAD(p, net64, " data32=0x", + (ib_net64_t *)(&s_rec.service_data32[0])); + PARSE_AHEAD(p, net64, ":0x", + (ib_net64_t *)(&s_rec.service_data32[2])); + PARSE_AHEAD(p, net64, " data64=0x", &s_rec.service_data64[0]); + PARSE_AHEAD(p, net64, ":0x", &s_rec.service_data64[1]); + PARSE_AHEAD(p, net32, " modified_time=0x", + &modified_time); + PARSE_AHEAD(p, net32, " lease_period=0x", + &lease_period); + + if (load_svcr(p_osm, &s_rec, cl_ntoh32(modified_time), + cl_ntoh32(lease_period))) + rereg_clients = 1; + } + else if (!strncmp(p, "InformInfo Record:", 18)) { + ib_inform_info_record_t i_rec; + osm_mad_addr_t rep_addr; + + p_mgrp = NULL; + memset(&i_rec, 0, sizeof(i_rec)); + memset(&rep_addr, 0, sizeof(rep_addr)); + + PARSE_AHEAD(p, net64, " subscriber_gid=0x", + &i_rec.subscriber_gid.unicast.prefix); + PARSE_AHEAD(p, net64, ":0x", + &i_rec.subscriber_gid.unicast.interface_id); + PARSE_AHEAD(p, net16, " subscriber_enum=0x", + &i_rec.subscriber_enum); + PARSE_AHEAD(p, net64, " gid=0x", + &i_rec.inform_info.gid.unicast.prefix); + PARSE_AHEAD(p, net64, ":0x", + &i_rec.inform_info.gid.unicast.interface_id); + PARSE_AHEAD(p, net16, " lid_range_begin=0x", + &i_rec.inform_info.lid_range_begin); + PARSE_AHEAD(p, net16, " lid_range_end=0x", + &i_rec.inform_info.lid_range_end); + PARSE_AHEAD(p, net8, " is_generic=0x", + &i_rec.inform_info.is_generic); + PARSE_AHEAD(p, net8, " subscribe=0x", + &i_rec.inform_info.subscribe); + PARSE_AHEAD(p, net16, " trap_type=0x", + &i_rec.inform_info.trap_type); + PARSE_AHEAD(p, net16, " trap_num=0x", + &i_rec.inform_info.g_or_v.generic.trap_num); + PARSE_AHEAD(p, net32, " qpn_resp_time_val=0x", + &i_rec.inform_info.g_or_v.generic.qpn_resp_time_val); + PARSE_AHEAD(p, net32, " node_type=0x", + (uint32_t *)&i_rec.inform_info.g_or_v.generic.reserved2); + + PARSE_AHEAD(p, net16, " rep_addr: lid=0x", + &rep_addr.dest_lid); + PARSE_AHEAD(p, net8, " path_bits=0x", + &rep_addr.path_bits); + PARSE_AHEAD(p, net8, " static_rate=0x", + &rep_addr.static_rate); + PARSE_AHEAD(p, net32, " remote_qp=0x", + &rep_addr.addr_type.gsi.remote_qp); + PARSE_AHEAD(p, net32, " remote_qkey=0x", + &rep_addr.addr_type.gsi.remote_qkey); + PARSE_AHEAD(p, net16, " pkey=0x", + &rep_addr.addr_type.gsi.pkey); + PARSE_AHEAD(p, net8, " sl=0x", + &rep_addr.addr_type.gsi.service_level); + + if (load_infr(p_osm, &i_rec, &rep_addr)) + rereg_clients = 1; + } + } + + if (!rereg_clients) + p_osm->subn.opt.no_clients_rereg = TRUE; + + _error: + fclose(file); + return ret; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_class_port_info.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_class_port_info.c new file mode 100644 index 00000000..493fd157 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_class_port_info.c @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_cpi_rcv_t. + * This object represents the ClassPortInfo Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.8 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define MAX_MSECS_TO_RTV 24 +/* Precalculated table in msec (index is related to encoded value) */ +/* 4.096 usec * 2 ** n (where n = 8 - 31) */ +static uint32_t __msecs_to_rtv_table[MAX_MSECS_TO_RTV] = + { 1, 2, 4, 8, + 16, 33, 67, 134, + 268, 536, 1073, 2147, + 4294, 8589, 17179, 34359, + 68719, 137438, 274877, 549755, + 1099511, 2199023, 4398046, 8796093 }; + +/********************************************************************** + **********************************************************************/ +void +osm_cpi_rcv_construct( + IN osm_cpi_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_cpi_rcv_destroy( + IN osm_cpi_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_cpi_rcv_destroy ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_cpi_rcv_init( + IN osm_cpi_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_cpi_rcv_init ); + + osm_cpi_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_cpi_rcv_respond( + IN osm_cpi_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + osm_madw_t* p_resp_madw; + const ib_sa_mad_t* p_sa_mad; + ib_sa_mad_t* p_resp_sa_mad; + ib_class_port_info_t *p_resp_cpi; + ib_api_status_t status; + ib_gid_t zero_gid; + uint8_t rtv; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_cpi_rcv_respond ); + + memset(&zero_gid, 0, sizeof(ib_gid_t)); + + /* + Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + MAD_BLOCK_SIZE, + &p_madw->mad_addr ); + if( !p_resp_madw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_cpi_rcv_respond: ERR 1408: " + "Unable to allocate MAD\n" ); + goto Exit; + } + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + memcpy( p_resp_sa_mad, p_sa_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + + p_resp_cpi = (ib_class_port_info_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + + /* finally do it (the job) man ! */ + p_resp_cpi->base_ver = 1; + p_resp_cpi->class_ver = 2; + /* Calculate encoded response time value */ + /* transaction timeout is in msec */ + if (p_rcv->p_subn->opt.transaction_timeout > __msecs_to_rtv_table[MAX_MSECS_TO_RTV]) + rtv = MAX_MSECS_TO_RTV - 1; + else + { + for (rtv = 0; rtv < MAX_MSECS_TO_RTV; rtv++) { + if (p_rcv->p_subn->opt.transaction_timeout <= __msecs_to_rtv_table[rtv]) + break; + } + } + rtv += 8; + p_resp_cpi->resp_time_val = rtv; + p_resp_cpi->redir_gid = zero_gid; + p_resp_cpi->redir_tc_sl_fl = 0; + p_resp_cpi->redir_lid = 0; + p_resp_cpi->redir_pkey = 0; + p_resp_cpi->redir_qp = CL_NTOH32(1); + p_resp_cpi->redir_qkey = IB_QP1_WELL_KNOWN_Q_KEY; + p_resp_cpi->trap_gid = zero_gid; + p_resp_cpi->trap_tc_sl_fl = 0; + p_resp_cpi->trap_lid = 0; + p_resp_cpi->trap_pkey = 0; + p_resp_cpi->trap_hop_qp = 0; + p_resp_cpi->trap_qkey = IB_QP1_WELL_KNOWN_Q_KEY; + + /* set specific capability mask bits */ + /* we do not support the following optional records: + OSM_CAP_IS_SUBN_OPT_RECS_SUP : + RandomForwardingTableRecord, + ServiceAssociationRecord + other optional records supported "under the table" + + OSM_CAP_IS_MULTIPATH_SUP: + TraceRecord + + OSM_CAP_IS_REINIT_SUP: + For reinitialization functionality. + + So not sending traps, but supporting Get(Notice) and Set(Notice). + */ + + /* Note host notation replaced later */ +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + p_resp_cpi->cap_mask = OSM_CAP_IS_SUBN_GET_SET_NOTICE_SUP | + OSM_CAP_IS_PORT_INFO_CAPMASK_MATCH_SUPPORTED | + OSM_CAP_IS_MULTIPATH_SUP; +#else + p_resp_cpi->cap_mask = OSM_CAP_IS_SUBN_GET_SET_NOTICE_SUP | + OSM_CAP_IS_PORT_INFO_CAPMASK_MATCH_SUPPORTED; +#endif + if (p_rcv->p_subn->opt.no_multicast_option != TRUE) + p_resp_cpi->cap_mask |= OSM_CAP_IS_UD_MCAST_SUP; + p_resp_cpi->cap_mask = cl_hton16(p_resp_cpi->cap_mask); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_FRAMES ) ) + osm_dump_sa_mad( p_rcv->p_log, p_resp_sa_mad, OSM_LOG_FRAMES ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_cpi_rcv_respond: ERR 1409: " + "Unable to send MAD (%s)\n", ib_get_err_str( status ) ); + /* osm_mad_pool_put( p_rcv->p_mad_pool, p_resp_madw ); */ + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + * This code actually handles the call + **********************************************************************/ +void +osm_cpi_rcv_process( + IN osm_cpi_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_path_rec_t* p_pr; + const ib_sa_mad_t* p_sa_mad; + + OSM_LOG_ENTER( p_rcv->p_log, osm_cpi_rcv_process ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + + /* we only support GET */ + if (p_sa_mad->method != IB_MAD_METHOD_GET) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_cpi_rcv_process: ERR 1403: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_sa_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_REQ_INVALID); + goto Exit; + } + + p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + CL_ASSERT( p_sa_mad->attr_id == IB_MAD_ATTR_CLASS_PORT_INFO ); + + /* + CLASS PORT INFO does not really look on the SMDB - no lock required. + */ + + __osm_cpi_rcv_respond( p_rcv, p_madw); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_class_port_info_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_class_port_info_ctrl.c new file mode 100644 index 00000000..dbd55dca --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_class_port_info_ctrl.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_pr_rcv_ctrl_t. + * This object represents the ClassPortInfo request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_cpi_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_cpi_rcv_process( ((osm_cpi_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_cpi_rcv_ctrl_construct( + IN osm_cpi_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_cpi_rcv_ctrl_destroy( + IN osm_cpi_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_cpi_rcv_ctrl_init( + IN osm_cpi_rcv_ctrl_t* const p_ctrl, + IN osm_cpi_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_cpi_rcv_ctrl_init ); + + osm_cpi_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_CLASS_PORT_INFO, + __osm_cpi_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_cpi_rcv_ctrl_init: ERR 1501: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_guidinfo_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_guidinfo_record.c new file mode 100644 index 00000000..2c0bce9f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_guidinfo_record.c @@ -0,0 +1,611 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_gir_rcv_t. + * This object represents the GUIDInfoRecord Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_GIR_RCV_POOL_MIN_SIZE 32 +#define OSM_GIR_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_gir_item +{ + cl_pool_item_t pool_item; + ib_guidinfo_record_t rec; +} osm_gir_item_t; + +typedef struct _osm_gir_search_ctxt +{ + const ib_guidinfo_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + cl_qlist_t* p_list; + osm_gir_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; +} osm_gir_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_gir_rcv_construct( + IN osm_gir_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_gir_rcv_destroy( + IN osm_gir_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_gir_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_gir_rcv_init( + IN osm_gir_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_gir_rcv_init ); + + osm_gir_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_GIR_RCV_POOL_MIN_SIZE, + 0, + OSM_GIR_RCV_POOL_GROW_SIZE, + sizeof(osm_gir_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_gir_rcv_new_gir( + IN osm_gir_rcv_t* const p_rcv, + IN const osm_node_t* const p_node, + IN cl_qlist_t* const p_list, + IN ib_net64_t const match_port_guid, + IN ib_net16_t const match_lid, + IN const osm_physp_t* const p_req_physp, + IN uint8_t const block_num ) +{ + osm_gir_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_gir_rcv_new_gir ); + + p_rec_item = (osm_gir_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_gir_rcv_new_gir: ERR 5102: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_gir_rcv_new_gir: " + "New GUIDInfoRecord: lid 0x%X, block num %d\n", + cl_ntoh16( match_lid ), block_num ); + } + + memset( &p_rec_item->rec, 0, sizeof( p_rec_item->rec ) ); + + p_rec_item->rec.lid = match_lid; + p_rec_item->rec.block_num = block_num; + if (!block_num) + p_rec_item->rec.guid_info.guid[0] = osm_physp_get_port_guid( p_req_physp ); + + cl_qlist_insert_tail( p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_gir_create_gir( + IN osm_gir_rcv_t* const p_rcv, + IN const osm_node_t* const p_node, + IN cl_qlist_t* const p_list, + IN ib_net64_t const match_port_guid, + IN ib_net16_t const match_lid, + IN const osm_physp_t* const p_req_physp, + IN uint8_t const match_block_num ) +{ + const osm_physp_t* p_physp; + uint8_t port_num; + uint8_t num_ports; + uint16_t match_lid_ho; + ib_net16_t base_lid_ho; + ib_net16_t max_lid_ho; + uint8_t lmc; + ib_net64_t port_guid; + const ib_port_info_t* p_pi; + uint8_t block_num, start_block_num, end_block_num, num_blocks; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_gir_create_gir ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_gir_create_gir: " + "Looking for GUIDRecord with LID: 0x%X GUID:0x%016" PRIx64 "\n", + cl_ntoh16( match_lid ), + cl_ntoh64( match_port_guid ) + ); + } + + /* + For switches, do not return the GUIDInfo record(s) + for each port on the switch, just for port 0. + */ + if( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ) + num_ports = 1; + else + num_ports = osm_node_get_num_physp( p_node ); + + for( port_num = 0; port_num < num_ports; port_num++ ) + { + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + + if( !osm_physp_is_valid( p_physp ) ) + continue; + + /* Check to see if the found p_physp and the requester physp + share a pkey. If not, continue */ + if (!osm_physp_share_pkey( p_rcv->p_log, p_physp, p_req_physp ) ) + continue; + + port_guid = osm_physp_get_port_guid( p_physp ); + + if( match_port_guid && ( port_guid != match_port_guid ) ) + continue; + + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + /* + Note: the following check is a temporary workaround + Since 1. GUIDCap should never be 0 on ports where this applies + and 2. GUIDCap should not be used on ports where it doesn't apply + So this should really be a check for whether the port is a + switch external port or not! + */ + if ( p_pi->guid_cap == 0 ) + continue; + + num_blocks = p_pi->guid_cap / 8; + if ( p_pi->guid_cap % 8 ) + num_blocks++; + if ( match_block_num == 255 ) + { + start_block_num = 0; + end_block_num = num_blocks - 1; + } + else + { + if ( match_block_num >= num_blocks ) + continue; + end_block_num = start_block_num = match_block_num; + } + + base_lid_ho = cl_ntoh16( osm_physp_get_base_lid( p_physp ) ); + match_lid_ho = cl_ntoh16( match_lid ); + if( match_lid_ho ) + { + lmc = osm_physp_get_lmc( p_physp ); + max_lid_ho = (uint16_t)( base_lid_ho + (1 << lmc) - 1 ); + + /* + We validate that the lid belongs to this node. + */ + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_gir_create_gir: " + "Comparing LID: 0x%X <= 0x%X <= 0x%X\n", + base_lid_ho, match_lid_ho, max_lid_ho + ); + } + + if ( match_lid_ho < base_lid_ho || match_lid_ho > max_lid_ho ) + continue; + } + + for (block_num = start_block_num; block_num <= end_block_num; block_num++) + __osm_gir_rcv_new_gir( p_rcv, p_node, p_list, + port_guid, cl_ntoh16(base_lid_ho), + p_physp, block_num ); + + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_gir_by_comp_mask_cb( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_gir_search_ctxt_t* const p_ctxt = (osm_gir_search_ctxt_t *)context; + const osm_node_t* const p_node = (osm_node_t*)p_map_item; + const ib_guidinfo_record_t* const p_rcvd_rec = p_ctxt->p_rcvd_rec; + const osm_physp_t* const p_req_physp = p_ctxt->p_req_physp; + osm_gir_rcv_t* const p_rcv = p_ctxt->p_rcv; + const ib_guid_info_t* p_comp_gi; + ib_net64_t const comp_mask = p_ctxt->comp_mask; + ib_net64_t match_port_guid = 0; + ib_net16_t match_lid = 0; + uint8_t match_block_num = 255; + + OSM_LOG_ENTER( p_ctxt->p_rcv->p_log, __osm_sa_gir_by_comp_mask_cb); + + if( comp_mask & IB_GIR_COMPMASK_LID ) + match_lid = p_rcvd_rec->lid; + + if( comp_mask & IB_GIR_COMPMASK_BLOCKNUM ) + match_block_num = p_rcvd_rec->block_num; + + p_comp_gi = &p_rcvd_rec->guid_info; + /* Different rule for block 0 v. other blocks */ + if( comp_mask & IB_GIR_COMPMASK_GID0 ) + { + if ( !p_rcvd_rec->block_num ) + match_port_guid = osm_physp_get_port_guid( p_req_physp ); + if ( p_comp_gi->guid[0] != match_port_guid ) + goto Exit; + } + + if( comp_mask & IB_GIR_COMPMASK_GID1 ) + { + if ( p_comp_gi->guid[1] != 0) + goto Exit; + } + + if( comp_mask & IB_GIR_COMPMASK_GID2 ) + { + if ( p_comp_gi->guid[2] != 0) + goto Exit; + } + + if( comp_mask & IB_GIR_COMPMASK_GID3 ) + { + if ( p_comp_gi->guid[3] != 0) + goto Exit; + } + + if( comp_mask & IB_GIR_COMPMASK_GID4 ) + { + if ( p_comp_gi->guid[4] != 0) + goto Exit; + } + + if( comp_mask & IB_GIR_COMPMASK_GID5 ) + { + if ( p_comp_gi->guid[5] != 0) + goto Exit; + } + + if( comp_mask & IB_GIR_COMPMASK_GID6 ) + { + if ( p_comp_gi->guid[6] != 0) + goto Exit; + } + + if( comp_mask & IB_GIR_COMPMASK_GID7 ) + { + if ( p_comp_gi->guid[7] != 0) + goto Exit; + } + + __osm_sa_gir_create_gir( p_rcv, p_node, p_ctxt->p_list, + match_port_guid, match_lid, p_req_physp, + match_block_num ); + + Exit: + OSM_LOG_EXIT( p_ctxt->p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_gir_rcv_process( + IN osm_gir_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_guidinfo_record_t* p_rcvd_rec; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + ib_guidinfo_record_t* p_resp_rec; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_gir_search_ctxt_t context; + osm_gir_item_t* p_rec_item; + ib_api_status_t status; + osm_physp_t* p_req_physp; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_gir_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_guidinfo_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_GUIDINFO_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_gir_rcv_process: ERR 5105: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_gir_rcv_process: ERR 5104: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_guidinfo_record( p_rcv->p_log, p_rcvd_rec, OSM_LOG_DEBUG ); + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.p_req_physp = p_req_physp; + + cl_plock_acquire( p_rcv->p_lock ); + + cl_qmap_apply_func( &p_rcv->p_subn->node_guid_tbl, + __osm_sa_gir_by_comp_mask_cb, + &context ); + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_gir_rcv_process: ERR 5103: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ); + + /* need to set the mem free ... */ + p_rec_item = (osm_gir_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_gir_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_gir_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_guidinfo_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_gir_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_gir_rcv_process: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_guidinfo_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_gir_rcv_process: ERR 5106: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_gir_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_guidinfo_record_t) ); + + p_resp_rec = (ib_guidinfo_record_t*) + ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_gir_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE); + if (status != IB_SUCCESS) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_gir_rcv_process: ERR 5107: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status)); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_guidinfo_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_guidinfo_record_ctrl.c new file mode 100644 index 00000000..6e0b9e20 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_guidinfo_record_ctrl.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_gir_rcv_ctrl_t. + * This object represents the GUIDInfoRecord request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_gir_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_gir_rcv_process( ((osm_gir_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_gir_rcv_ctrl_construct( + IN osm_gir_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_gir_rcv_ctrl_destroy( + IN osm_gir_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_gir_rcv_ctrl_init( + IN osm_gir_rcv_ctrl_t* const p_ctrl, + IN osm_gir_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_gir_rcv_ctrl_init ); + + osm_gir_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_GUIDINFO_RECORD, + __osm_gir_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_gir_rcv_ctrl_init: ERR 5201: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_informinfo.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_informinfo.c new file mode 100644 index 00000000..56619130 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_informinfo.c @@ -0,0 +1,922 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_infr_rcv_t. + * This object represents the InformInfo Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.8 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_IIR_RCV_POOL_MIN_SIZE 32 +#define OSM_IIR_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_iir_item +{ + cl_pool_item_t pool_item; + ib_inform_info_record_t rec; +} osm_iir_item_t; + +typedef struct _osm_iir_search_ctxt +{ + const ib_inform_info_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + cl_qlist_t* p_list; + ib_gid_t subscriber_gid; + ib_net16_t subscriber_enum; + osm_infr_rcv_t* p_rcv; + osm_physp_t* p_req_physp; +} osm_iir_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_infr_rcv_construct( + IN osm_infr_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_infr_rcv_destroy( + IN osm_infr_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_infr_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_infr_rcv_init( + IN osm_infr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_ERROR; + + OSM_LOG_ENTER( p_log, osm_infr_rcv_init ); + + osm_infr_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_IIR_RCV_POOL_MIN_SIZE, + 0, + OSM_IIR_RCV_POOL_GROW_SIZE, + sizeof(osm_iir_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** +o13-14.1.1: Except for Set(InformInfo) requests with Inform- +Info:LIDRangeBegin=0xFFFF, managers that support event forwarding +shall, upon receiving a Set(InformInfo), verify that the requester +originating the Set(InformInfo) and a Trap() source identified by Inform- +can access each other - can use path record to verify that. +**********************************************************************/ +static +boolean_t +__validate_ports_access_rights( + IN osm_infr_rcv_t* const p_rcv, + IN osm_infr_t* p_infr_rec ) +{ + boolean_t valid = TRUE; + osm_physp_t* p_requester_physp; + osm_port_t* p_port; + osm_physp_t* p_physp; + ib_net64_t portguid; + ib_net16_t lid_range_begin; + ib_net16_t lid_range_end; + ib_net16_t lid; + const cl_ptr_vector_t* p_tbl; + ib_gid_t zero_gid; + + OSM_LOG_ENTER( p_rcv->p_log, __validate_ports_access_rights ); + + /* get the requester physp from the request address */ + p_requester_physp = osm_get_physp_by_mad_addr( p_rcv->p_log, + p_rcv->p_subn, + &p_infr_rec->report_addr ); + + memset( &zero_gid, 0, sizeof(zero_gid) ); + if ( memcmp (&(p_infr_rec->inform_record.inform_info.gid), + &zero_gid, sizeof(ib_gid_t) ) ) + { + /* a gid is defined */ + portguid = p_infr_rec->inform_record.inform_info.gid.unicast.interface_id; + + p_port = osm_get_port_by_guid( p_rcv->p_subn, portguid ); + + if ( p_port == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__validate_ports_access_rights: ERR 4301: " + "Invalid port guid: 0x%016" PRIx64 "\n", + cl_ntoh64(portguid) ); + valid = FALSE; + goto Exit; + } + + /* get the destination InformInfo physical port */ + p_physp = osm_port_get_default_phys_ptr(p_port); + + /* make sure that the requester and destination port can access each other + according to the current partitioning. */ + if (! osm_physp_share_pkey( p_rcv->p_log, p_physp, p_requester_physp)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_ports_access_rights: " + "port and requester don't share pkey\n" ); + valid = FALSE; + goto Exit; + } + } + else + { + /* gid is zero - check if LID range is defined */ + lid_range_begin = cl_ntoh16(p_infr_rec->inform_record.inform_info.lid_range_begin); + /* if lid is 0xFFFF - meaning all endports managed by the manager */ + if ( lid_range_begin == 0xFFFF ) + goto Exit; + + lid_range_end = cl_ntoh16(p_infr_rec->inform_record.inform_info.lid_range_end); + + /* lid_range_end is set to zero if no range desired. In this case - + just make it equal to the lid_range_begin. */ + if (lid_range_end == 0) + lid_range_end = lid_range_begin; + + /* go over all defined lids within the range and make sure that the + requester port can access them according to current partitioning. */ + for ( lid = lid_range_begin; lid <= lid_range_end; lid++ ) + { + p_tbl = &p_rcv->p_subn->port_lid_tbl; + if ( cl_ptr_vector_get_size( p_tbl ) > lid ) + { + p_port = cl_ptr_vector_get( p_tbl, lid ); + } + else + { + /* lid requested is out of range */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__validate_ports_access_rights: ERR 4302: " + "Given LID (0x%X) is out of range:0x%X\n", + lid, cl_ptr_vector_get_size(p_tbl) ); + valid = FALSE; + goto Exit; + } + if ( p_port == NULL ) + continue; + + p_physp = osm_port_get_default_phys_ptr(p_port); + /* make sure that the requester and destination port can access + each other according to the current partitioning. */ + if (! osm_physp_share_pkey( p_rcv->p_log, p_physp, p_requester_physp)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_ports_access_rights: " + "port and requester don't share pkey\n" ); + valid = FALSE; + goto Exit; + } + } + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return valid; +} + +/********************************************************************** + **********************************************************************/ +static +boolean_t +__validate_infr( + IN osm_infr_rcv_t* const p_rcv, + IN osm_infr_t* p_infr_rec ) +{ + boolean_t valid = TRUE; + + OSM_LOG_ENTER( p_rcv->p_log, __validate_infr ); + + valid = __validate_ports_access_rights( p_rcv, p_infr_rec ); + if (!valid) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_infr: " + "Invalid Access for InformInfo\n" ); + valid = FALSE; + } + + OSM_LOG_EXIT( p_rcv->p_log ); + return valid; +} + +/********************************************************************** +o13-12.1.1: Confirm a valid request for event subscription by responding +with an InformInfo attribute that is a copy of the data in the +Set(InformInfo) request. +**********************************************************************/ +static void +__osm_infr_rcv_respond( + IN osm_infr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + osm_madw_t* p_resp_madw; + const ib_sa_mad_t* p_sa_mad; + ib_sa_mad_t* p_resp_sa_mad; + ib_inform_info_t* p_resp_infr; + ib_api_status_t status; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_infr_rcv_respond ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_infr_rcv_respond: " + "Generating successful InformInfo response\n"); + } + + /* + Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + MAD_BLOCK_SIZE, + &p_madw->mad_addr ); + if ( !p_resp_madw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_infr_rcv_respond: ERR 4303: " + "Unable to allocate MAD\n" ); + goto Exit; + } + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* copy the request InformInfo */ + memcpy( p_resp_sa_mad, p_sa_mad, MAD_BLOCK_SIZE ); + p_resp_sa_mad->method = IB_MAD_METHOD_GET_RESP; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + + p_resp_infr = (ib_inform_info_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + + if ( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_infr_rcv_respond: ERR 4304: " + "Unable to send MAD (%s)\n", ib_get_err_str( status ) ); + /* osm_mad_pool_put( p_rcv->p_mad_pool, p_resp_madw ); */ + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sa_inform_info_rec_by_comp_mask( + IN osm_infr_rcv_t* const p_rcv, + IN const osm_infr_t* const p_infr, + osm_iir_search_ctxt_t* const p_ctxt ) +{ + const ib_inform_info_record_t* p_rcvd_rec = NULL; + ib_net64_t comp_mask; + ib_net64_t portguid; + osm_port_t * p_subscriber_port; + osm_physp_t * p_subscriber_physp; + const osm_physp_t* p_req_physp; + osm_iir_item_t* p_rec_item; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_inform_info_rec_by_comp_mask ); + + p_rcvd_rec = p_ctxt->p_rcvd_rec; + comp_mask = p_ctxt->comp_mask; + p_req_physp = p_ctxt->p_req_physp; + + if (comp_mask & IB_IIR_COMPMASK_SUBSCRIBERGID) + { + if (memcmp(&p_infr->inform_record.subscriber_gid, + &p_ctxt->subscriber_gid, + sizeof(p_infr->inform_record.subscriber_gid))) + goto Exit; + } + + if (comp_mask & IB_IIR_COMPMASK_ENUM) + { + if (p_infr->inform_record.subscriber_enum != p_ctxt->subscriber_enum) + goto Exit; + } + + /* Implement any other needed search cases */ + + /* Ensure pkey is shared before returning any records */ + portguid = p_infr->inform_record.subscriber_gid.unicast.interface_id; + p_subscriber_port = osm_get_port_by_guid( p_rcv->p_subn, portguid ); + if ( p_subscriber_port == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sa_inform_info_rec_by_comp_mask: ERR 430D: " + "Invalid subscriber port guid: 0x%016" PRIx64 "\n", + cl_ntoh64(portguid) ); + goto Exit; + } + + /* get the subscriber InformInfo physical port */ + p_subscriber_physp = osm_port_get_default_phys_ptr(p_subscriber_port); + /* make sure that the requester and subscriber port can access each other + according to the current partitioning. */ + if (! osm_physp_share_pkey( p_rcv->p_log, p_req_physp, p_subscriber_physp )) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_inform_info_rec_by_comp_mask: " + "requester and subscriber ports don't share pkey\n" ); + goto Exit; + } + + p_rec_item = (osm_iir_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sa_inform_info_rec_by_comp_mask: ERR 430E: " + "cl_qlock_pool_get failed\n" ); + goto Exit; + } + + memcpy((void *)&p_rec_item->rec, (void *)&p_infr->inform_record, sizeof(ib_inform_info_record_t)); + cl_qlist_insert_tail( p_ctxt->p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + +Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sa_inform_info_rec_by_comp_mask_cb( + IN cl_list_item_t* const p_list_item, + IN void* context ) +{ + const osm_infr_t* const p_infr = (osm_infr_t *)p_list_item; + osm_iir_search_ctxt_t* const p_ctxt = (osm_iir_search_ctxt_t *)context; + + __osm_sa_inform_info_rec_by_comp_mask( p_ctxt->p_rcv, p_infr, p_ctxt ); +} + +/********************************************************************** +Received a Get(InformInfoRecord) or GetTable(InformInfoRecord) MAD +**********************************************************************/ +static void +osm_infr_rcv_process_get_method( + IN osm_infr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + ib_sa_mad_t* p_rcvd_mad; + const ib_inform_info_record_t* p_rcvd_rec; + ib_inform_info_record_t* p_resp_rec; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i, j; + osm_iir_search_ctxt_t context; + osm_iir_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + osm_physp_t* p_req_physp; + + OSM_LOG_ENTER( p_rcv->p_log, osm_infr_rcv_process_get_method ); + + CL_ASSERT( p_madw ); + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = + (ib_inform_info_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_infr_rcv_process_get_method: ERR 4309: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_inform_info_record( p_rcv->p_log, p_rcvd_rec, OSM_LOG_DEBUG ); + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.subscriber_gid = p_rcvd_rec->subscriber_gid; + context.subscriber_enum = p_rcvd_rec->subscriber_enum; + context.p_rcv = p_rcv; + context.p_req_physp = p_req_physp; + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_infr_rcv_process_get_method: " + "Query Subscriber GID:0x%016" PRIx64 " : 0x%016" PRIx64 "(%02X) Enum:0x%X(%02X)\n", + cl_ntoh64(p_rcvd_rec->subscriber_gid.unicast.prefix), + cl_ntoh64(p_rcvd_rec->subscriber_gid.unicast.interface_id), + (p_rcvd_mad->comp_mask & IB_IIR_COMPMASK_SUBSCRIBERGID) != 0, + cl_ntoh16(p_rcvd_rec->subscriber_enum), + (p_rcvd_mad->comp_mask & IB_IIR_COMPMASK_ENUM) != 0 ); + + cl_plock_acquire( p_rcv->p_lock ); + + cl_qlist_apply_func( &p_rcv->p_subn->sa_infr_list, + __osm_sa_inform_info_rec_by_comp_mask_cb, + &context ); + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_infr_rcv_process_get_method: ERR 430A: " + "More than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS); + + /* need to set the mem free ... */ + p_rec_item = (osm_iir_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_iir_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_iir_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + /* we limit the number of records to a single packet */ + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_inform_info_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_infr_rcv_process_get_method: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_infr_rcv_process_get_method: " + "Returning %u records\n", num_rec ); + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_inform_info_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_infr_rcv_process_get_method: ERR 430B: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_iir_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RESOURCES ); + + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_inform_info_record_t) ); + + p_resp_rec = (ib_inform_info_record_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_iir_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + /* clear reserved and pad fields in InformInfoRecord */ + for (j = 0; j < 6; j++) + p_resp_rec->reserved[j] = 0; + for (j = 0; j < 4; j++) + p_resp_rec->pad[j] = 0; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if (status != IB_SUCCESS) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_infr_rcv_process_get_method: ERR 430C: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status)); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************* +Received a Set(InformInfo) MAD +**********************************************************************/ +static void +osm_infr_rcv_process_set_method( + IN osm_infr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + ib_sa_mad_t *p_sa_mad; + ib_inform_info_t *p_recvd_inform_info; + osm_infr_t inform_info_rec; /* actual inform record to be stored for reports */ + osm_infr_t *p_infr; + ib_net32_t qpn; + uint8_t resp_time_val; + ib_api_status_t res; + + OSM_LOG_ENTER( p_rcv->p_log, osm_infr_rcv_process_set_method ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_recvd_inform_info = + (ib_inform_info_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + +#if 0 + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_inform_info( p_rcv->p_log, p_recvd_inform_info, OSM_LOG_DEBUG ); +#endif + + /* Grab the lock */ + cl_plock_excl_acquire( p_rcv->p_lock ); + + /* define the inform record */ + inform_info_rec.inform_record.inform_info = *p_recvd_inform_info; + + /* following C13-32.1.2 Tbl 120: we only copy the source address vector */ + inform_info_rec.report_addr = p_madw->mad_addr; + + /* we will need to know the mad srvc to send back through */ + inform_info_rec.h_bind = p_madw->h_bind; + inform_info_rec.p_infr_rcv = p_rcv; + + /* update the subscriber GID according to mad address */ + res = osm_get_gid_by_mad_addr( + p_rcv->p_log, + p_rcv->p_subn, + &p_madw->mad_addr, + &inform_info_rec.inform_record.subscriber_gid ); + if ( res != IB_SUCCESS ) + { + cl_plock_release( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_infr_rcv_process_set_method: ERR 4308 " + "Subscribe Request from unknown LID: 0x%04X\n", + cl_ntoh16(p_madw->mad_addr.dest_lid) + ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_REQ_INVALID ); + goto Exit; + } + + /* HACK: enum is always 0 (currently) */ + inform_info_rec.inform_record.subscriber_enum = 0; + + /* Subscribe values above 1 are undefined */ + if ( p_recvd_inform_info->subscribe > 1 ) + { + cl_plock_release( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_infr_rcv_process_set_method: ERR 4308 " + "Invalid subscribe: %d\n", + p_recvd_inform_info->subscribe + ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_REQ_INVALID ); + goto Exit; + } + + /* + * MODIFICATIONS DONE ON INCOMING REQUEST: + * + * QPN: + * Internally we keep the QPN field of the InformInfo updated + * so we can simply compare it in the record - when finding such. + */ + if ( p_recvd_inform_info->subscribe ) + { + ib_inform_info_set_qpn( + &inform_info_rec.inform_record.inform_info, + inform_info_rec.report_addr.addr_type.gsi.remote_qp ); + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_infr_rcv_process_set_method: " + "Subscribe Request with QPN: 0x%06X\n", + cl_ntoh32(inform_info_rec.report_addr.addr_type.gsi.remote_qp) + ); + } + else + { + ib_inform_info_get_qpn_resp_time( + p_recvd_inform_info->g_or_v.generic.qpn_resp_time_val, + &qpn, &resp_time_val ); + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_infr_rcv_process_set_method: " + "UnSubscribe Request with QPN: 0x%06X\n", + cl_ntoh32(qpn) + ); + } + + /* If record exists with matching InformInfo */ + p_infr = osm_infr_get_by_rec( p_rcv->p_subn, p_rcv->p_log, &inform_info_rec ); + + /* check to see if the request was for subscribe */ + if ( p_recvd_inform_info->subscribe ) + { + /* validate the request for a new or update InformInfo */ + if ( __validate_infr( p_rcv, &inform_info_rec ) != TRUE ) + { + cl_plock_release( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_infr_rcv_process_set_method: ERR 4305: " + "Failed to validate a new inform object\n"); + + /* o13-13.1.1: we need to set the subscribe bit to 0 */ + p_recvd_inform_info->subscribe = 0; + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_REQ_INVALID ); + goto Exit; + } + + /* ok - we can try and create a new entry */ + if (p_infr == NULL) + { + /* Create the instance of the osm_infr_t object */ + p_infr = osm_infr_new( &inform_info_rec ); + if (p_infr == NULL) + { + cl_plock_release( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_infr_rcv_process_set_method: ERR 4306: " + "Failed to create a new inform object\n"); + + /* o13-13.1.1: we need to set the subscribe bit to 0 */ + p_recvd_inform_info->subscribe = 0; + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + /* Add this new osm_infr_t object to subnet object */ + osm_infr_insert_to_db( p_rcv->p_subn, p_rcv->p_log, p_infr ); + } + else + { + /* Update the old instance of the osm_infr_t object */ + p_infr->inform_record = inform_info_rec.inform_record; + } + } + else + { + /* We got an UnSubscribe request */ + if (p_infr == NULL) + { + cl_plock_release( p_rcv->p_lock ); + + /* No Such Item - So Error */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_infr_rcv_process_set_method: ERR 4307: " + "Failed to UnSubscribe to non existing inform object\n"); + + /* o13-13.1.1: we need to set the subscribe bit to 0 */ + p_recvd_inform_info->subscribe = 0; + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_REQ_INVALID ); + goto Exit; + } + else + { + /* Delete this object from the subnet list of informs */ + osm_infr_remove_from_db( p_rcv->p_subn, p_rcv->p_log, p_infr ); + } + } + + cl_plock_release( p_rcv->p_lock ); + + /* send the success response */ + __osm_infr_rcv_respond( p_rcv, p_madw ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************* +**********************************************************************/ +void +osm_infr_rcv_process( + IN osm_infr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + ib_sa_mad_t *p_sa_mad; + + OSM_LOG_ENTER( p_rcv->p_log, osm_infr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + + CL_ASSERT( p_sa_mad->attr_id == IB_MAD_ATTR_INFORM_INFO ); + + if (p_sa_mad->method != IB_MAD_METHOD_SET) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_infr_rcv_process: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_sa_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + osm_infr_rcv_process_set_method( p_rcv, p_madw ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************* +**********************************************************************/ +void +osm_infir_rcv_process( + IN osm_infr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + ib_sa_mad_t *p_sa_mad; + + OSM_LOG_ENTER( p_rcv->p_log, osm_infr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + + CL_ASSERT( p_sa_mad->attr_id == IB_MAD_ATTR_INFORM_INFO_RECORD ); + + if ( (p_sa_mad->method != IB_MAD_METHOD_GET) && + (p_sa_mad->method != IB_MAD_METHOD_GETTABLE) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_infir_rcv_process: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_sa_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + osm_infr_rcv_process_get_method( p_rcv, p_madw ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_informinfo_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_informinfo_ctrl.c new file mode 100644 index 00000000..94760b6e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_informinfo_ctrl.c @@ -0,0 +1,154 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_infr_rcv_ctrl_t. + * This object represents the InformInfo set request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +static void +__osm_infr_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_infr_rcv_process( ((osm_infr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_infir_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_infir_rcv_process( ((osm_infr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_infr_rcv_ctrl_construct( + IN osm_infr_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; + p_ctrl->h_disp2 = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_infr_rcv_ctrl_destroy( + IN osm_infr_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp2 ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_infr_rcv_ctrl_init( + IN osm_infr_rcv_ctrl_t* const p_ctrl, + IN osm_infr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_infr_rcv_ctrl_init ); + + osm_infr_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_INFORM_INFO, + __osm_infr_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_infr_rcv_ctrl_init: ERR 1701: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + p_ctrl->h_disp2 = cl_disp_register( + p_disp, + OSM_MSG_MAD_INFORM_INFO_RECORD, + __osm_infir_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp2 == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_infr_rcv_ctrl_init: ERR 1702: " + "Dispatcher registration failed\n" ); + cl_disp_unregister( p_ctrl->h_disp ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_lft_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_lft_record.c new file mode 100644 index 00000000..8034febe --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_lft_record.c @@ -0,0 +1,515 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_lftr_rcv_t. + * This object represents the LinearForwardingTable Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_LFTR_RCV_POOL_MIN_SIZE 32 +#define OSM_LFTR_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_lftr_item +{ + cl_pool_item_t pool_item; + ib_lft_record_t rec; +} osm_lftr_item_t; + +typedef struct _osm_lftr_search_ctxt +{ + const ib_lft_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + cl_qlist_t* p_list; + osm_lftr_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; +} osm_lftr_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_lftr_rcv_construct( + IN osm_lftr_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lftr_rcv_destroy( + IN osm_lftr_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_lftr_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_lftr_rcv_init( + IN osm_lftr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_lftr_rcv_init ); + + osm_lftr_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_LFTR_RCV_POOL_MIN_SIZE, + 0, + OSM_LFTR_RCV_POOL_GROW_SIZE, + sizeof(osm_lftr_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_lftr_rcv_new_lftr( + IN osm_lftr_rcv_t* const p_rcv, + IN const osm_switch_t* const p_sw, + IN cl_qlist_t* const p_list, + IN ib_net16_t const lid, + IN ib_net16_t const block ) +{ + osm_lftr_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_lftr_rcv_new_lftr ); + + p_rec_item = (osm_lftr_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_lftr_rcv_new_lftr: ERR 4402: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_lftr_rcv_new_lftr: " + "New LinearForwardingTable: sw 0x%016" PRIx64 + "\n\t\t\t\tblock 0x%02X lid 0x%02X\n", + cl_ntoh64( osm_node_get_node_guid( p_sw->p_node ) ), + cl_ntoh16( block ), cl_ntoh16( lid ) + ); + } + + memset( &p_rec_item->rec, 0, sizeof(ib_lft_record_t) ); + + p_rec_item->rec.lid = lid; + p_rec_item->rec.block_num = block; + + /* copy the lft block */ + osm_switch_get_fwd_tbl_block( p_sw, cl_ntoh16(block), p_rec_item->rec.lft ); + + cl_qlist_insert_tail( p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static osm_port_t* +__osm_lftr_get_port_by_guid( + IN osm_lftr_rcv_t* const p_rcv, + IN uint64_t port_guid ) +{ + osm_port_t* p_port; + + CL_PLOCK_ACQUIRE(p_rcv->p_lock); + + p_port = (osm_port_t *)cl_qmap_get(&p_rcv->p_subn->port_guid_tbl, + port_guid); + if (p_port == (osm_port_t *)cl_qmap_end(&p_rcv->p_subn->port_guid_tbl)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_lftr_get_port_by_guid ERR 4404: " + "Invalid port GUID 0x%016" PRIx64 "\n", + port_guid ); + p_port = NULL; + } + + CL_PLOCK_RELEASE(p_rcv->p_lock); + return p_port; +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_lftr_rcv_by_comp_mask( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_lftr_search_ctxt_t* const p_ctxt = + (osm_lftr_search_ctxt_t *)context; + const osm_switch_t* const p_sw = (osm_switch_t*)p_map_item; + const ib_lft_record_t* const p_rcvd_rec = p_ctxt->p_rcvd_rec; + osm_lftr_rcv_t* const p_rcv = p_ctxt->p_rcv; + ib_net64_t const comp_mask = p_ctxt->comp_mask; + const osm_physp_t* const p_req_physp = p_ctxt->p_req_physp; + osm_port_t* p_port; + uint16_t min_lid_ho, max_lid_ho; + uint16_t min_block, max_block, block; + const osm_physp_t* p_physp; + + /* In switches, the port guid is the node guid. */ + p_port = + __osm_lftr_get_port_by_guid( p_rcv, p_sw->p_node->node_info.port_guid ); + if (! p_port) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_lftr_rcv_by_comp_mask: ERR 4405: " + "Failed to find Port by Node Guid:0x%016" PRIx64 + "\n", + cl_ntoh64( p_sw->p_node->node_info.node_guid ) + ); + return; + } + + /* check that the requester physp and the current physp are under + the same partition. */ + p_physp = osm_port_get_default_phys_ptr( p_port ); + if (! p_physp) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_lftr_rcv_by_comp_mask: ERR 4406: " + "Failed to find default physical Port by Node Guid:0x%016" PRIx64 + "\n", + cl_ntoh64( p_sw->p_node->node_info.node_guid ) + ); + return; + } + if (! osm_physp_share_pkey( p_rcv->p_log, p_req_physp, p_physp )) + return; + + /* get the port 0 of the switch */ + osm_port_get_lid_range_ho( p_port, &min_lid_ho, &max_lid_ho ); + + /* compare the lids - if required */ + if( comp_mask & IB_LFTR_COMPMASK_LID ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_lftr_rcv_by_comp_mask: " + "Comparing lid:0x%02X to port lid range: 0x%02X .. 0x%02X\n", + cl_ntoh16( p_rcvd_rec->lid ), min_lid_ho, max_lid_ho + ); + /* ok we are ready for range check */ + if (min_lid_ho > cl_ntoh16(p_rcvd_rec->lid) || + max_lid_ho < cl_ntoh16(p_rcvd_rec->lid)) + return; + } + + /* now we need to decide which blocks to output */ + if( comp_mask & IB_LFTR_COMPMASK_BLOCK ) + { + max_block = min_block = cl_ntoh16(p_rcvd_rec->block_num); + } + else + { + /* use as many blocks as "in use" */ + min_block = 0; + max_block = osm_switch_get_max_block_id_in_use(p_sw); + } + + /* so we can add these blocks one by one ... */ + for (block = min_block; block <= max_block; block++) + __osm_lftr_rcv_new_lftr( p_rcv, p_sw, p_ctxt->p_list, + osm_port_get_base_lid(p_port), + cl_hton16(block) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lftr_rcv_process( + IN osm_lftr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_lft_record_t* p_rcvd_rec; + ib_lft_record_t* p_resp_rec; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_lftr_search_ctxt_t context; + osm_lftr_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + osm_physp_t* p_req_physp; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_lftr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_lft_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_LFT_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_lftr_rcv_process: ERR 4408: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_lftr_rcv_process: ERR 4407: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.p_req_physp = p_req_physp; + + cl_plock_acquire( p_rcv->p_lock ); + + /* Go over all switches */ + cl_qmap_apply_func( &p_rcv->p_subn->sw_guid_tbl, + __osm_lftr_rcv_by_comp_mask, + &context ); + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_lftr_rcv_process: ERR 4409: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS); + + /* need to set the mem free ... */ + p_rec_item = (osm_lftr_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_lftr_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_lftr_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + /* we limit the number of records to a single packet */ + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_lft_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_lftr_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_lftr_rcv_process: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) && + (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_lft_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_lftr_rcv_process: ERR 4410: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_lftr_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RESOURCES ); + + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_lft_record_t) ); + + p_resp_rec = (ib_lft_record_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_lftr_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if (status != IB_SUCCESS) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_lftr_rcv_process: ERR 4411: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status)); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_lft_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_lft_record_ctrl.c new file mode 100644 index 00000000..c39efe2d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_lft_record_ctrl.c @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_lftr_rcv_ctrl_t. + * This object represents the LinearForwardingTable request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_lftr_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_lftr_rcv_process( ((osm_lftr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lftr_rcv_ctrl_construct( + IN osm_lftr_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_lftr_rcv_ctrl_destroy( + IN osm_lftr_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_lftr_rcv_ctrl_init( + IN osm_lftr_rcv_ctrl_t* const p_ctrl, + IN osm_lftr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_lftr_rcv_ctrl_init ); + + osm_lftr_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_LFT_RECORD, + __osm_lftr_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_lftr_rcv_ctrl_init: ERR 4501: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_link_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_link_record.c new file mode 100644 index 00000000..8e242b08 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_link_record.c @@ -0,0 +1,777 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_lr_rcv_t. + * This object represents the LinkRecord Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.8 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_LR_RCV_POOL_MIN_SIZE 64 +#define OSM_LR_RCV_POOL_GROW_SIZE 64 + +typedef struct _osm_lr_item +{ + cl_pool_item_t pool_item; + ib_link_record_t link_rec; +} osm_lr_item_t; + +/********************************************************************** + **********************************************************************/ +void +osm_lr_rcv_construct( + IN osm_lr_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->lr_pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lr_rcv_destroy( + IN osm_lr_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_lr_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->lr_pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_lr_rcv_init( + IN osm_lr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_lr_rcv_init ); + + osm_lr_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->lr_pool, + OSM_LR_RCV_POOL_MIN_SIZE, + 0, + OSM_LR_RCV_POOL_GROW_SIZE, + sizeof(osm_lr_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_lr_rcv_build_physp_link( + IN osm_lr_rcv_t* const p_rcv, + IN const ib_net16_t from_lid, + IN const ib_net16_t to_lid, + IN const uint8_t from_port, + IN const uint8_t to_port, + IN cl_qlist_t* p_list ) +{ + osm_lr_item_t* p_lr_item; + + p_lr_item = (osm_lr_item_t*)cl_qlock_pool_get( &p_rcv->lr_pool ); + if( p_lr_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_lr_rcv_build_physp_link: ERR 1801: " + "Unable to acquire link record\n" + "\t\t\t\tFrom port 0x%u\n" + "\t\t\t\tTo port 0x%u\n" + "\t\t\t\tFrom lid 0x%X\n" + "\t\t\t\tTo lid 0x%X\n", + from_port, to_port, + cl_ntoh16(from_lid), + cl_ntoh16(to_lid) ); + return; + } + + p_lr_item->link_rec.from_port_num = from_port; + p_lr_item->link_rec.to_port_num = to_port; + p_lr_item->link_rec.to_lid = to_lid; + p_lr_item->link_rec.from_lid = from_lid; + + cl_qlist_insert_tail( p_list, (cl_list_item_t*)&p_lr_item->pool_item ); +} + +/********************************************************************** + **********************************************************************/ +static void +__get_base_lid( + IN const osm_physp_t* p_physp, + OUT uint16_t * p_base_lid ) +{ + if(p_physp->p_node->node_info.node_type == IB_NODE_TYPE_SWITCH) + { + *p_base_lid = + cl_ntoh16( + osm_physp_get_base_lid( + osm_node_get_physp_ptr(p_physp->p_node, 0)) + ); + } + else + { + *p_base_lid = + cl_ntoh16(osm_physp_get_base_lid(p_physp)); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_lr_rcv_get_physp_link( + IN osm_lr_rcv_t* const p_rcv, + IN const ib_link_record_t* const p_lr, + IN const osm_physp_t* p_src_physp, + IN const osm_physp_t* p_dest_physp, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list, + IN const osm_physp_t* p_req_physp ) +{ + uint8_t src_port_num; + uint8_t dest_port_num; + ib_net16_t from_base_lid_ho; + ib_net16_t to_base_lid_ho; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_lr_rcv_get_physp_link ); + + /* + If only one end of the link is specified, determine + the other side. + */ + if( p_src_physp ) + { + if( !osm_physp_is_valid( p_src_physp ) ) + goto Exit; + + if( p_dest_physp ) + { + if( !osm_physp_is_valid( p_dest_physp ) ) + goto Exit; + /* + Ensure the two physp's are actually connected. + If not, bail out. + */ + if( osm_physp_get_remote( p_src_physp ) != p_dest_physp ) + goto Exit; + } + else + { + p_dest_physp = osm_physp_get_remote( p_src_physp ); + + if(p_dest_physp == NULL) + goto Exit; + + if( !osm_physp_is_valid( p_dest_physp ) ) + goto Exit; + } + } + else + { + if( p_dest_physp ) + { + if( !osm_physp_is_valid( p_dest_physp ) ) + goto Exit; + + p_src_physp = osm_physp_get_remote( p_dest_physp ); + + if(p_src_physp == NULL) + goto Exit; + } + else + goto Exit; /* no physp's, so nothing to do */ + } + + CL_ASSERT( p_src_physp ); + CL_ASSERT( p_dest_physp ); + CL_ASSERT( osm_physp_is_valid( p_src_physp ) ); + CL_ASSERT( osm_physp_is_valid( p_dest_physp ) ); + + /* Check that the p_src_physp, p_dest_physp and p_req_physp + all share a pkey (doesn't have to be the same p_key). */ + if (! osm_physp_share_pkey(p_rcv->p_log, p_src_physp, p_dest_physp)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_lr_rcv_get_physp_link: " + "Source and Dest PhysPorts do not share PKey\n"); + goto Exit; + } + if (! osm_physp_share_pkey(p_rcv->p_log, p_src_physp, p_req_physp)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_lr_rcv_get_physp_link: " + "Source and Requester PhysPorts do not share PKey\n"); + goto Exit; + } + if (! osm_physp_share_pkey(p_rcv->p_log, p_req_physp, p_dest_physp) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_lr_rcv_get_physp_link: " + "Requester and Dest PhysPorts do not share PKey\n"); + goto Exit; + } + + src_port_num = osm_physp_get_port_num( p_src_physp ); + dest_port_num = osm_physp_get_port_num( p_dest_physp ); + + if( comp_mask & IB_LR_COMPMASK_FROM_PORT ) + if( src_port_num != p_lr->from_port_num ) + goto Exit; + + if( comp_mask & IB_LR_COMPMASK_TO_PORT ) + if( dest_port_num != p_lr->to_port_num ) + goto Exit; + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_lr_rcv_get_physp_link: " + "Acquiring link record\n" + "\t\t\t\tsrc port 0x%" PRIx64 " (port 0x%X)" + ", dest port 0x%" PRIx64 " (port 0x%X)\n", + cl_ntoh64( osm_physp_get_port_guid( p_src_physp ) ), + src_port_num, + cl_ntoh64( osm_physp_get_port_guid( p_dest_physp ) ), + dest_port_num ); + } + + __get_base_lid(p_src_physp, &from_base_lid_ho); + __get_base_lid(p_dest_physp, &to_base_lid_ho); + + __osm_lr_rcv_build_physp_link(p_rcv, cl_ntoh16(from_base_lid_ho), + cl_ntoh16(to_base_lid_ho), + src_port_num, dest_port_num, p_list); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_lr_rcv_get_port_links( + IN osm_lr_rcv_t* const p_rcv, + IN const ib_link_record_t* const p_lr, + IN const osm_port_t* p_src_port, + IN const osm_port_t* p_dest_port, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list, + IN const osm_physp_t* p_req_physp ) +{ + const osm_physp_t* p_src_physp; + const osm_physp_t* p_dest_physp; + const cl_qmap_t* p_port_tbl; + uint8_t port_num; + uint8_t num_ports; + uint8_t dest_num_ports; + uint8_t dest_port_num; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_lr_rcv_get_port_links ); + + if( p_src_port ) + { + if( p_dest_port ) + { + /* + Build an LR for every link connected between both ports. + The inner function will discard physp combinations + that do not actually connect. Don't bother screening + for that here. + */ + num_ports = osm_port_get_num_physp( p_src_port ); + dest_num_ports = osm_port_get_num_physp( p_dest_port ); + for( port_num = 1; port_num < num_ports; port_num++ ) + { + p_src_physp = osm_port_get_phys_ptr( p_src_port, port_num ); + for( dest_port_num = 1; dest_port_num < dest_num_ports; + dest_port_num++ ) + { + p_dest_physp = osm_port_get_phys_ptr( p_dest_port, + dest_port_num ); + /* both physical ports should be with data */ + if (p_src_physp && p_dest_physp) + __osm_lr_rcv_get_physp_link( p_rcv, p_lr, p_src_physp, + p_dest_physp, comp_mask, + p_list, p_req_physp ); + } + } + } + else + { + /* + Build an LR for every link connected from the source port. + */ + if( comp_mask & IB_LR_COMPMASK_FROM_PORT ) + { + port_num = p_lr->from_port_num; + /* If the port number is out of the range of the p_src_port, then + this couldn't be a relevant record. */ + if (port_num < p_src_port->physp_tbl_size) + { + p_src_physp = osm_port_get_phys_ptr( p_src_port, port_num ); + if (p_src_physp) + __osm_lr_rcv_get_physp_link( p_rcv, p_lr, p_src_physp, + NULL, comp_mask, p_list, + p_req_physp ); + } + } + else + { + num_ports = osm_port_get_num_physp( p_src_port ); + for( port_num = 1; port_num < num_ports; port_num++ ) + { + p_src_physp = osm_port_get_phys_ptr( p_src_port, port_num ); + if (p_src_physp) + __osm_lr_rcv_get_physp_link( p_rcv, p_lr, p_src_physp, + NULL, comp_mask, p_list, + p_req_physp ); + } + } + } + } + else + { + if( p_dest_port ) + { + /* + Build an LR for every link connected to the dest port. + */ + if( comp_mask & IB_LR_COMPMASK_TO_PORT ) + { + port_num = p_lr->to_port_num; + /* If the port number is out of the range of the p_dest_port, then + this couldn't be a relevant record. */ + if (port_num < p_dest_port->physp_tbl_size ) + { + p_dest_physp = osm_port_get_phys_ptr( + p_dest_port, port_num ); + if (p_dest_physp) + __osm_lr_rcv_get_physp_link( p_rcv, p_lr, NULL, + p_dest_physp, comp_mask, + p_list, p_req_physp ); + } + } + else + { + num_ports = osm_port_get_num_physp( p_dest_port ); + for( port_num = 1; port_num < num_ports; port_num++ ) + { + p_dest_physp = osm_port_get_phys_ptr( + p_dest_port, port_num ); + if (p_dest_physp) + __osm_lr_rcv_get_physp_link( p_rcv, p_lr, NULL, + p_dest_physp, comp_mask, + p_list, p_req_physp ); + } + } + } + else + { + /* + Process the world (recurse once back into this function). + */ + p_port_tbl = &p_rcv->p_subn->port_guid_tbl; + p_src_port = (osm_port_t*)cl_qmap_head( p_port_tbl ); + + while( p_src_port != (osm_port_t*)cl_qmap_end( p_port_tbl ) ) + { + __osm_lr_rcv_get_port_links( p_rcv, p_lr, p_src_port, + NULL, comp_mask, p_list, + p_req_physp ); + + p_src_port = (osm_port_t*)cl_qmap_next( + &p_src_port->map_item ); + } + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + Returns the SA status to return to the client. + **********************************************************************/ +static ib_net16_t +__osm_lr_rcv_get_end_points( + IN osm_lr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + OUT const osm_port_t** const pp_src_port, + OUT const osm_port_t** const pp_dest_port ) +{ + const ib_link_record_t* p_lr; + const ib_sa_mad_t* p_sa_mad; + ib_net64_t comp_mask; + ib_api_status_t status; + ib_net16_t sa_status = IB_SA_MAD_STATUS_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_lr_rcv_get_end_points ); + + /* + Determine what fields are valid and then get a pointer + to the source and destination port objects, if possible. + */ + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_lr = (ib_link_record_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + comp_mask = p_sa_mad->comp_mask; + *pp_src_port = NULL; + *pp_dest_port = NULL; + + if( p_sa_mad->comp_mask & IB_LR_COMPMASK_FROM_LID ) + { + status = osm_get_port_by_base_lid( p_rcv->p_subn, + p_lr->from_lid, + pp_src_port ); + + if( (status != IB_SUCCESS) || (*pp_src_port == NULL) ) + { + /* + This 'error' is the client's fault (bad lid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_lr_rcv_get_end_points: " + "No source port with LID = 0x%X\n", + cl_ntoh16( p_lr->from_lid ) ); + + sa_status = IB_SA_MAD_STATUS_NO_RECORDS; + goto Exit; + } + } + + if( p_sa_mad->comp_mask & IB_LR_COMPMASK_TO_LID ) + { + status = osm_get_port_by_base_lid( p_rcv->p_subn, + p_lr->to_lid, + pp_dest_port ); + + if( (status != IB_SUCCESS) || (*pp_dest_port == NULL) ) + { + /* + This 'error' is the client's fault (bad lid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_lr_rcv_get_end_points: " + "No dest port with LID = 0x%X\n", + cl_ntoh16( p_lr->to_lid ) ); + + sa_status = IB_SA_MAD_STATUS_NO_RECORDS; + goto Exit; + } + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( sa_status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_lr_rcv_respond( + IN osm_lr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN cl_qlist_t* const p_list ) +{ + osm_madw_t* p_resp_madw; + const ib_sa_mad_t* p_sa_mad; + ib_sa_mad_t* p_resp_sa_mad; + size_t num_rec, num_copied; +#ifndef VENDOR_RMPP_SUPPORT + size_t trim_num_rec; +#endif + ib_link_record_t* p_resp_lr; + ib_api_status_t status; + osm_lr_item_t* p_lr_item; + const ib_sa_mad_t* p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + + OSM_LOG_ENTER( p_rcv->p_log, __osm_lr_rcv_respond ); + + num_rec = cl_qlist_count( p_list ); + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if ( (p_rcvd_mad->method == IB_MAD_METHOD_GET) && + (num_rec > 1)) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_lr_rcv_respond: ERR 1806: " + "Got more than one record for SubnAdmGet (%zu)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ); + + /* need to set the mem free ... */ + p_lr_item = (osm_lr_item_t*)cl_qlist_remove_head( p_list ); + while( p_lr_item != (osm_lr_item_t*)cl_qlist_end( p_list ) ) + { + cl_qlock_pool_put( &p_rcv->lr_pool, &p_lr_item->pool_item ); + p_lr_item = (osm_lr_item_t*)cl_qlist_remove_head( p_list ); + } + + goto Exit; + } + +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_link_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_lr_rcv_respond: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_lr_rcv_respond: " + "Generating response with %zu records", num_rec ); + } + + /* + Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_link_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + if( !p_resp_madw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_lr_rcv_respond: ERR 1802: " + "Unable to allocate MAD\n" ); + /* Release the quick pool items */ + p_lr_item = (osm_lr_item_t*)cl_qlist_remove_head( p_list ); + while( p_lr_item != (osm_lr_item_t*)cl_qlist_end( p_list ) ) + { + cl_qlock_pool_put( &p_rcv->lr_pool, &p_lr_item->pool_item ); + p_lr_item = (osm_lr_item_t*)cl_qlist_remove_head( p_list ); + } + + goto Exit; + } + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* Copy the header from the request to response */ + memcpy( p_resp_sa_mad, p_sa_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_link_record_t) ); + /* C15-0.1.5 - always return SM_Key = 0 (table table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + p_resp_lr = (ib_link_record_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + p_resp_sa_mad->status = IB_SA_MAD_STATUS_NO_RECORDS; + memset( p_resp_lr, 0, sizeof(*p_resp_lr) ); + } + else + { + p_lr_item = (osm_lr_item_t*)cl_qlist_remove_head( p_list ); + /* we need to track the number of copied items so we can + * stop the copy - but clear them all + */ + num_copied = 0; + while( p_lr_item != (osm_lr_item_t*)cl_qlist_end( p_list ) ) + { + /* Copy the Link Records from the list into the MAD */ + /* only if we did not go over the mad size (since we might trimmed it) */ + if (num_copied < num_rec) + { + *p_resp_lr = p_lr_item->link_rec; + num_copied++; + } + cl_qlock_pool_put( &p_rcv->lr_pool, &p_lr_item->pool_item ); + p_resp_lr++; + p_lr_item = (osm_lr_item_t*)cl_qlist_remove_head( p_list ); + } + } + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if (status != IB_SUCCESS) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_lr_rcv_respond: ERR 1803: " + "Unable to send MAD (%s)\n", ib_get_err_str( status ) ); + /* osm_mad_pool_put( p_rcv->p_mad_pool, p_resp_madw ); */ + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lr_rcv_process( + IN osm_lr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_link_record_t* p_lr; + const ib_sa_mad_t* p_sa_mad; + const osm_port_t* p_src_port; + const osm_port_t* p_dest_port; + cl_qlist_t lr_list; + ib_net16_t sa_status; + osm_physp_t* p_req_physp; + + OSM_LOG_ENTER( p_rcv->p_log, osm_lr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_lr = (ib_link_record_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + CL_ASSERT( p_sa_mad->attr_id == IB_MAD_ATTR_LINK_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_sa_mad->method != IB_MAD_METHOD_GET) && + (p_sa_mad->method != IB_MAD_METHOD_GETTABLE)) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_lr_rcv_process: ERR 1804: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_sa_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_lr_rcv_process: ERR 1805: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_link_record( p_rcv->p_log, p_lr, OSM_LOG_DEBUG ); + + cl_qlist_init( &lr_list ); + + /* + Most SA functions (including this one) are read-only on the + subnet object, so we grab the lock non-exclusively. + */ + cl_plock_acquire( p_rcv->p_lock ); + + sa_status = __osm_lr_rcv_get_end_points( p_rcv, p_madw, + &p_src_port, &p_dest_port ); + + if( sa_status == IB_SA_MAD_STATUS_SUCCESS ) + { + __osm_lr_rcv_get_port_links( p_rcv, p_lr, p_src_port, p_dest_port, + p_sa_mad->comp_mask, &lr_list, p_req_physp ); + } + + cl_plock_release( p_rcv->p_lock ); + + if( (cl_qlist_count( &lr_list ) == 0) && + (p_sa_mad->method == IB_MAD_METHOD_GET) ) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + __osm_lr_rcv_respond( p_rcv, p_madw, &lr_list ); + + Exit: + + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_link_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_link_record_ctrl.c new file mode 100644 index 00000000..ee517abf --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_link_record_ctrl.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_lr_rcv_ctrl_t. + * This object represents the link record controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_lr_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_lr_rcv_process( ((osm_lr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_lr_rcv_ctrl_construct( + IN osm_lr_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_lr_rcv_ctrl_destroy( + IN osm_lr_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_lr_rcv_ctrl_init( + IN osm_lr_rcv_ctrl_t* const p_ctrl, + IN osm_lr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_lr_rcv_ctrl_init ); + + osm_lr_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_LINK_RECORD, + __osm_lr_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_lr_rcv_ctrl_init: ERR 1901: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mad_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mad_ctrl.c new file mode 100644 index 00000000..55c10f8e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mad_ctrl.c @@ -0,0 +1,651 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_sa_mad_ctrl_t. + * This object is part of the SA object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include + +/****f* opensm: SA/__osm_sa_mad_ctrl_disp_done_callback + * NAME + * __osm_sa_mad_ctrl_disp_done_callback + * + * DESCRIPTION + * This function is the Dispatcher callback that indicates + * a received MAD has been processed by the recipient. + * + * SYNOPSIS + */ +static void +__osm_sa_mad_ctrl_disp_done_callback( + IN void* context, + IN void* p_data ) +{ + osm_sa_mad_ctrl_t* const p_ctrl = (osm_sa_mad_ctrl_t*)context; + osm_madw_t* const p_madw = (osm_madw_t*)p_data; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sa_mad_ctrl_disp_done_callback ); + + CL_ASSERT( p_madw ); + /* + Return the MAD & wrapper to the pool. + */ + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/************/ + +/****f* opensm: SA/__osm_sa_mad_ctrl_process + * NAME + * __osm_sa_mad_ctrl_process + * + * DESCRIPTION + * This function handles known methods for received MADs. + * + * SYNOPSIS + */ +static void +__osm_sa_mad_ctrl_process( + IN osm_sa_mad_ctrl_t* const p_ctrl, + IN osm_madw_t *p_madw ) +{ + ib_sa_mad_t* p_sa_mad; + cl_status_t status; + cl_disp_msgid_t msg_id = CL_DISP_MSGID_NONE; + uint64_t last_dispatched_msg_queue_time_msec; + uint32_t num_messages; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sa_mad_ctrl_process ); + + /* + If the dispatcher is showing us that it is overloaded + there is no point in placing the request in. We should instead provide + immediate response - IB_RESOURCE_BUSY + But how do we know? + The dispatcher reports back the number of outstanding messages and the + time the last message stayed in the queue. + HACK: Actually, we cannot send a mad from within the receive callback; + thus - we will just drop it. + */ + cl_disp_get_queue_status(p_ctrl->h_disp, + &num_messages, + &last_dispatched_msg_queue_time_msec); + if ((num_messages > 1) && + (p_ctrl->p_subn->opt.max_msg_fifo_timeout) && + (last_dispatched_msg_queue_time_msec > + p_ctrl->p_subn->opt.max_msg_fifo_timeout)) + { + osm_log( p_ctrl->p_log, OSM_LOG_INFO, + "__osm_sa_mad_ctrl_process: " + /* "Responding BUSY status since the dispatcher is already"*/ + "Dropping MAD since the dispatcher is already" + " overloaded with %u messages and queue time of:" + "%" PRIu64 "[msec]\n", + num_messages, last_dispatched_msg_queue_time_msec ); + + /* send a busy response */ + /* osm_sa_send_error( p_ctrl->p_resp, p_madw, IB_RESOURCE_BUSY ); */ + + /* return the request to the pool */ + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + + goto Exit; + } + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + + /* + Note that attr_id (like the rest of the MAD) is in + network byte order. + */ + switch( p_sa_mad->attr_id ) + { + case IB_MAD_ATTR_CLASS_PORT_INFO: + msg_id = OSM_MSG_MAD_CLASS_PORT_INFO; + break; + + case IB_MAD_ATTR_NODE_RECORD: + msg_id = OSM_MSG_MAD_NODE_RECORD; + break; + + case IB_MAD_ATTR_PORTINFO_RECORD: + msg_id = OSM_MSG_MAD_PORTINFO_RECORD; + break; + + case IB_MAD_ATTR_LINK_RECORD: + msg_id = OSM_MSG_MAD_LINK_RECORD; + break; + + case IB_MAD_ATTR_SMINFO_RECORD: + msg_id = OSM_MSG_MAD_SMINFO_RECORD; + break; + + case IB_MAD_ATTR_SERVICE_RECORD: + msg_id = OSM_MSG_MAD_SERVICE_RECORD; + break; + + case IB_MAD_ATTR_PATH_RECORD: + msg_id = OSM_MSG_MAD_PATH_RECORD; + break; + + case IB_MAD_ATTR_MCMEMBER_RECORD: + msg_id = OSM_MSG_MAD_MCMEMBER_RECORD; + break; + + case IB_MAD_ATTR_INFORM_INFO: + msg_id = OSM_MSG_MAD_INFORM_INFO; + break; + + case IB_MAD_ATTR_VLARB_RECORD: + msg_id = OSM_MSG_MAD_VL_ARB_RECORD; + break; + + case IB_MAD_ATTR_SLVL_RECORD: + msg_id = OSM_MSG_MAD_SLVL_TBL_RECORD; + break; + + case IB_MAD_ATTR_PKEY_TBL_RECORD: + msg_id = OSM_MSG_MAD_PKEY_TBL_RECORD; + break; + + case IB_MAD_ATTR_LFT_RECORD: + msg_id = OSM_MSG_MAD_LFT_RECORD; + break; + + case IB_MAD_ATTR_GUIDINFO_RECORD: + msg_id = OSM_MSG_MAD_GUIDINFO_RECORD; + break; + + case IB_MAD_ATTR_INFORM_INFO_RECORD: + msg_id = OSM_MSG_MAD_INFORM_INFO_RECORD; + break; + + case IB_MAD_ATTR_SWITCH_INFO_RECORD: + msg_id = OSM_MSG_MAD_SWITCH_INFO_RECORD; + break; + + case IB_MAD_ATTR_MFT_RECORD: + msg_id = OSM_MSG_MAD_MFT_RECORD; + break; + +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + case IB_MAD_ATTR_MULTIPATH_RECORD: + msg_id = OSM_MSG_MAD_MULTIPATH_RECORD; + break; +#endif + + default: + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sa_mad_ctrl_process: ERR 1A01: " + "Unsupported attribute = 0x%X\n", + cl_ntoh16( p_sa_mad->attr_id ) ); + osm_dump_sa_mad( p_ctrl->p_log, p_sa_mad, OSM_LOG_ERROR ); + } + + if( msg_id != CL_DISP_MSGID_NONE ) + { + /* + Post this MAD to the dispatcher for asynchronous + processing by the appropriate controller. + */ + + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sa_mad_ctrl_process: " + "Posting Dispatcher message %s\n", + osm_get_disp_msg_str( msg_id ) ); + + status = cl_disp_post( p_ctrl->h_disp, + msg_id, + p_madw, + __osm_sa_mad_ctrl_disp_done_callback, + p_ctrl ); + + if( status != CL_SUCCESS ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sa_mad_ctrl_process: ERR 1A02: " + "Dispatcher post message failed (%s) for attribute = 0x%X\n", + CL_STATUS_MSG( status ), + cl_ntoh16( p_sa_mad->attr_id ) ); + + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + goto Exit; + } + } + else + { + /* + There is an unknown MAD attribute type for which there is + no recipient. Simply retire the MAD here. + */ + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/* + * PARAMETERS + * + * RETURN VALUES + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* opensm: SA/__osm_sa_mad_ctrl_rcv_callback + * NAME + * __osm_sa_mad_ctrl_rcv_callback + * + * DESCRIPTION + * This is the callback from the transport layer for received MADs. + * + * SYNOPSIS + */ +static void +__osm_sa_mad_ctrl_rcv_callback( + IN osm_madw_t *p_madw, + IN void *bind_context, + IN osm_madw_t *p_req_madw ) +{ + osm_sa_mad_ctrl_t* p_ctrl = (osm_sa_mad_ctrl_t*)bind_context; + ib_sa_mad_t* p_sa_mad; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sa_mad_ctrl_rcv_callback ); + + CL_ASSERT( p_madw ); + + /* + A MAD was received from the wire, possibly in response to a request. + */ + cl_atomic_inc( &p_ctrl->p_stats->qp1_mads_rcvd ); + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sa_mad_ctrl_rcv_callback: " + "%u QP1 MADs received\n", + p_ctrl->p_stats->qp1_mads_rcvd ); + } + + /* + * C15-0.1.3 requires not responding to any MAD if the SM is + * not in active state! + * We will not respond if the sm_state is not MASTER, or if the + * first_time_master_sweep flag (of the subnet) is TRUE - this + * flag indicates that the master still didn't finish its first + * sweep, so the subnet is not up and stable yet. + */ + if ( p_ctrl->p_subn->sm_state != IB_SMINFO_STATE_MASTER ) + { + osm_log( p_ctrl->p_log, OSM_LOG_VERBOSE, + "__osm_sa_mad_ctrl_rcv_callback: " + "Received SA MAD while SM not MASTER. MAD ignored\n"); + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + goto Exit; + } + if ( p_ctrl->p_subn->first_time_master_sweep == TRUE ) + { + osm_log( p_ctrl->p_log, OSM_LOG_VERBOSE, + "__osm_sa_mad_ctrl_rcv_callback: " + "Received SA MAD while SM in first sweep. MAD ignored\n"); + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + goto Exit; + } + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_FRAMES ) ) + osm_dump_sa_mad( p_ctrl->p_log, p_sa_mad, OSM_LOG_FRAMES ); + + /* + * C15-0.1.5 - Table 185: SA Header - p884 + * SM_key should be either 0 or match the current SM_Key + * otherwise discard the MAD. + */ + if ((p_sa_mad->sm_key != 0) && + (p_sa_mad->sm_key != p_ctrl->p_subn->opt.sm_key)) { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sa_mad_ctrl_rcv_callback: ERR 1A04: " + "Non-Zero SA MAD SM_Key: 0x%" PRIx64 " != SM_Key: 0x%" PRIx64 + "; MAD ignored\n", + cl_ntoh64(p_sa_mad->sm_key), + cl_ntoh64(p_ctrl->p_subn->opt.sm_key) + ); + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + goto Exit; + } + + switch( p_sa_mad->method ) + { + case IB_MAD_METHOD_REPORT_RESP: + /* we do not really do anything with report represses - + just retire the transaction */ + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sa_mad_ctrl_rcv_callback: " + "Received Report Repress. Retiring the transaction\n"); + + if (p_req_madw) + osm_mad_pool_put( p_ctrl->p_mad_pool, p_req_madw ); + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + + break; + + case IB_MAD_METHOD_GET: + case IB_MAD_METHOD_GETTABLE: +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + case IB_MAD_METHOD_GETMULTI: +#endif + case IB_MAD_METHOD_SET: + case IB_MAD_METHOD_DELETE: + __osm_sa_mad_ctrl_process( p_ctrl, p_madw ); + break; + + default: + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sa_mad_ctrl_rcv_callback: ERR 1A05: " + "Unsupported method = 0x%X\n", + p_sa_mad->method ); + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/* + * PARAMETERS + * + * RETURN VALUES + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* opensm: SA/__osm_sa_mad_ctrl_send_err_callback + * NAME + * __osm_sa_mad_ctrl_send_err_callback + * + * DESCRIPTION + * This is the callback from the transport layer for send errors + * on MADs that were expecting a response. + * + * SYNOPSIS + */ +void +__osm_sa_mad_ctrl_send_err_callback( + IN void *bind_context, + IN osm_madw_t *p_madw ) +{ + osm_sa_mad_ctrl_t* p_ctrl = (osm_sa_mad_ctrl_t*)bind_context; + cl_status_t status; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sa_mad_ctrl_send_err_callback ); + + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sa_mad_ctrl_send_err_callback: ERR 1A06: " + "MAD transaction completed in error\n" ); + + /* + We should never be here since the SA never originates a request. + Unless we generated a Report(Notice) + */ + + CL_ASSERT( p_madw ); + + /* + An error occurred. No response was received to a request MAD. + Retire the original request MAD. + */ + + osm_dump_sa_mad(p_ctrl->p_log, osm_madw_get_sa_mad_ptr( p_madw ), + OSM_LOG_ERROR ); + + /* __osm_sm_mad_ctrl_update_wire_stats( p_ctrl ); */ + + if( osm_madw_get_err_msg( p_madw ) != CL_DISP_MSGID_NONE ) + { + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sa_mad_ctrl_send_err_callback: " + "Posting Dispatcher message %s\n", + osm_get_disp_msg_str( osm_madw_get_err_msg( p_madw ) ) ); + } + + status = cl_disp_post( p_ctrl->h_disp, + osm_madw_get_err_msg( p_madw ), + p_madw, + __osm_sa_mad_ctrl_disp_done_callback, + p_ctrl ); + if( status != CL_SUCCESS ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sa_mad_ctrl_send_err_callback: ERR 1A07: " + "Dispatcher post message failed (%s)\n", + CL_STATUS_MSG( status ) ); + } + } + else + { + /* + No error message was provided, just retire the MAD. + */ + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + } + + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/* + * PARAMETERS + * + * RETURN VALUES + * + * NOTES + * + * SEE ALSO + *********/ + +/********************************************************************** + **********************************************************************/ +void +osm_sa_mad_ctrl_construct( + IN osm_sa_mad_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_sa_mad_ctrl_destroy( + IN osm_sa_mad_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sa_mad_ctrl_init( + IN osm_sa_mad_ctrl_t* const p_ctrl, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_vendor_t* const p_vendor, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_stats_t* const p_stats, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sa_mad_ctrl_init ); + + osm_sa_mad_ctrl_construct( p_ctrl ); + + p_ctrl->p_log = p_log; + p_ctrl->p_disp = p_disp; + p_ctrl->p_mad_pool = p_mad_pool; + p_ctrl->p_vendor = p_vendor; + p_ctrl->p_stats = p_stats; + p_ctrl->p_subn = p_subn; + p_ctrl->p_resp = p_resp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + CL_DISP_MSGID_NONE, + NULL, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_sa_mad_ctrl_init: ERR 1A08: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sa_mad_ctrl_bind( + IN osm_sa_mad_ctrl_t* const p_ctrl, + IN const ib_net64_t port_guid ) +{ + osm_bind_info_t bind_info; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_ctrl->p_log, osm_sa_mad_ctrl_bind ); + + if( p_ctrl->h_bind != OSM_BIND_INVALID_HANDLE ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "osm_sa_mad_ctrl_bind: ERR 1A09: " + "Multiple binds not allowed\n" ); + status = IB_ERROR; + goto Exit; + } + + bind_info.class_version = 2; + bind_info.is_responder = TRUE; + bind_info.is_report_processor = FALSE; + bind_info.is_trap_processor = FALSE; + bind_info.mad_class = IB_MCLASS_SUBN_ADM; + bind_info.port_guid = port_guid; + bind_info.recv_q_size = OSM_SM_DEFAULT_QP1_RCV_SIZE; + bind_info.send_q_size = OSM_SM_DEFAULT_QP1_SEND_SIZE; + + osm_log( p_ctrl->p_log, OSM_LOG_VERBOSE, + "osm_sa_mad_ctrl_bind: " + "Binding to port GUID 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + + p_ctrl->h_bind = osm_vendor_bind( p_ctrl->p_vendor, + &bind_info, + p_ctrl->p_mad_pool, + __osm_sa_mad_ctrl_rcv_callback, + __osm_sa_mad_ctrl_send_err_callback, + p_ctrl ); + + if( p_ctrl->h_bind == OSM_BIND_INVALID_HANDLE ) + { + status = IB_ERROR; + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "osm_sa_mad_ctrl_bind: ERR 1A10: " + "Vendor specific bind failed (%s)\n", + ib_get_err_str(status) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sa_mad_ctrl_unbind( + IN osm_sa_mad_ctrl_t* const p_ctrl) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_ctrl->p_log, osm_sa_mad_ctrl_unbind ); + + if( p_ctrl->h_bind == OSM_BIND_INVALID_HANDLE ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "osm_sa_mad_ctrl_unbind: ERR 1A11: " + "No previous bind\n" ); + status = IB_ERROR; + goto Exit; + } + + osm_vendor_unbind( p_ctrl->h_bind ); + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mcmember_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mcmember_record.c new file mode 100644 index 00000000..e5875d12 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mcmember_record.c @@ -0,0 +1,2383 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_mcmr_recv_t. + * This object represents the MCMemberRecord Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.15 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_MCMR_RCV_POOL_MIN_SIZE 32 +#define OSM_MCMR_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_mcmr_item +{ + cl_pool_item_t pool_item; + ib_member_rec_t rec; +} osm_mcmr_item_t; + +typedef struct osm_sa_mcmr_search_ctxt { + const ib_member_rec_t *p_mcmember_rec; + osm_mgrp_t *p_mgrp; + osm_mcmr_recv_t *p_rcv; + cl_qlist_t *p_list; /* hold results */ + ib_net64_t comp_mask; + const osm_physp_t* p_req_physp; + boolean_t trusted_req; +} osm_sa_mcmr_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_mcmr_rcv_construct( + IN osm_mcmr_recv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcmr_rcv_destroy( + IN osm_mcmr_recv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_mcmr_rcv_destroy ); + + cl_qlock_pool_destroy( &p_rcv->pool ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mcmr_rcv_init( + IN osm_sm_t * const p_sm, + IN osm_mcmr_recv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_mcmr_rcv_init ); + + osm_mcmr_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_sm = p_sm; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + p_rcv->mlid_ho = 0xC000; + + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_MCMR_RCV_POOL_MIN_SIZE, + 0, + OSM_MCMR_RCV_POOL_GROW_SIZE, + sizeof(osm_mcmr_item_t), + NULL, NULL, NULL ); + if (status != CL_SUCCESS) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mcmr_rcv_init: ERR 1B02: " + "qlock pool init failed (%d)\n", + status ); + } + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + A search function that compares the given mgrp with the search context + if there is a match by mgid the p_mgrp is copied to the search context + p_mgrp component + + Inputs: + p_map_item - which is part of a mgrp object + context - points to the osm_sa_mcmr_search_ctxt_t including the mgid + looked for and the result p_mgrp +**********************************************************************/ +static void +__search_mgrp_by_mgid( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + osm_mgrp_t* p_mgrp = (osm_mgrp_t*)p_map_item; + osm_sa_mcmr_search_ctxt_t *p_ctxt = (osm_sa_mcmr_search_ctxt_t *) context; + const ib_member_rec_t *p_recvd_mcmember_rec; + osm_mcmr_recv_t *p_rcv; + + p_recvd_mcmember_rec = p_ctxt->p_mcmember_rec; + p_rcv = p_ctxt->p_rcv; + + /* ignore groups marked for deletion */ + if (p_mgrp->to_be_deleted) + return; + + /* compare entire MGID so different scope will not sneak in for + the same MGID */ + if (memcmp(&p_mgrp->mcmember_rec.mgid, + &p_recvd_mcmember_rec->mgid, + sizeof(ib_gid_t))) + return; + + if (p_ctxt->p_mgrp) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__search_mgrp_by_mgid: ERR 1B03: " + "Multiple MC groups for same MGID\n" ); + return; + } + + p_ctxt->p_mgrp = p_mgrp; + +} + +/********************************************************************** + Look for a MGRP in the mgrp_mlid_tbl by mlid +**********************************************************************/ +static osm_mgrp_t * +__get_mgrp_by_mlid( + IN osm_mcmr_recv_t* const p_rcv, + IN ib_net16_t const mlid) +{ + cl_map_item_t *map_item; + + map_item = cl_qmap_get(&p_rcv->p_subn->mgrp_mlid_tbl, mlid); + if (map_item == cl_qmap_end(&p_rcv->p_subn->mgrp_mlid_tbl)) + { + return NULL; + } + return (osm_mgrp_t *)map_item; + +} + +/********************************************************************** +Look for a MGRP in the mgrp_mlid_tbl by mgid +***********************************************************************/ +static ib_api_status_t +__get_mgrp_by_mgid( + IN osm_mcmr_recv_t* const p_rcv, + IN ib_member_rec_t* p_recvd_mcmember_rec, + OUT osm_mgrp_t **pp_mgrp) +{ + osm_sa_mcmr_search_ctxt_t mcmr_search_context; + + mcmr_search_context.p_mcmember_rec = p_recvd_mcmember_rec; + mcmr_search_context.p_rcv = p_rcv; + mcmr_search_context.p_mgrp = NULL; + + cl_qmap_apply_func( &p_rcv->p_subn->mgrp_mlid_tbl, + __search_mgrp_by_mgid, + &mcmr_search_context); + + if (mcmr_search_context.p_mgrp == NULL) + { + return IB_NOT_FOUND; + } + + *pp_mgrp = mcmr_search_context.p_mgrp; + return IB_SUCCESS; +} + +/********************************************************************* +Copy certain fields between two mcmember records +used during the process of join request to copy data from the mgrp to the +port record. +**********************************************************************/ +static inline void +__copy_from_create_mc_rec( + IN ib_member_rec_t * const dest, + IN const ib_member_rec_t *const src) +{ + dest->qkey = src->qkey; + dest->mlid = src->mlid; + dest->tclass = src->tclass; + dest->pkey = src->pkey; + dest->sl_flow_hop = src->sl_flow_hop; + dest->mtu = src->mtu; + dest->rate = src->rate; + dest->pkt_life = src->pkt_life; +} + +/********************************************************************* +Return an mlid to the pool of free mlids. +But this implementation is not a pool - it is simply scanning through +the MGRP database for unused mlids... +*********************************************************************/ +static void +__free_mlid( + IN osm_mcmr_recv_t* const p_rcv, + IN uint16_t mlid) +{ + UNUSED_PARAM(p_rcv); + UNUSED_PARAM(mlid); +} + +/********************************************************************* +Get a new unused mlid by scanning all the used ones in the subnet. +TODO: Implement a more scalable - O(1) solution based on pool of +available mlids. +**********************************************************************/ +static ib_net16_t +__get_new_mlid( + IN osm_mcmr_recv_t* const p_rcv, + IN ib_net16_t requested_mlid) +{ + osm_subn_t *p_subn = p_rcv->p_subn; + osm_mgrp_t *p_mgrp; + uint8_t *used_mlids_array; + uint16_t idx; + uint16_t mlid; /* the result */ + uint16_t max_num_mlids; + + OSM_LOG_ENTER(p_rcv->p_log, __get_new_mlid); + + if (requested_mlid && cl_ntoh16(requested_mlid) >= IB_LID_MCAST_START_HO && + cl_ntoh16(requested_mlid) < p_subn->max_multicast_lid_ho && + cl_qmap_get(&p_subn->mgrp_mlid_tbl, requested_mlid) == + cl_qmap_end(&p_subn->mgrp_mlid_tbl) ) { + mlid = cl_ntoh16(requested_mlid); + goto Exit; + } + + /* If MCGroups table empty, first return the min mlid */ + p_mgrp = (osm_mgrp_t*)cl_qmap_head( &p_subn->mgrp_mlid_tbl ); + if (p_mgrp == (osm_mgrp_t*)cl_qmap_end( &p_subn->mgrp_mlid_tbl )) + { + mlid = IB_LID_MCAST_START_HO; + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__get_new_mlid: " + "No multicast groups found using minimal mlid:0x%04X\n", + mlid ); + goto Exit; + } + + max_num_mlids = + p_rcv->p_subn->max_multicast_lid_ho - IB_LID_MCAST_START_HO; + + /* track all used mlids in the array (by mlid index) */ + used_mlids_array = + (uint8_t *)malloc(sizeof(uint8_t)*max_num_mlids); + if (used_mlids_array) + memset(used_mlids_array, 0, sizeof(uint8_t)*max_num_mlids); + if (!used_mlids_array) + return 0; + + /* scan all available multicast groups in the DB and fill in the table */ + while( p_mgrp != (osm_mgrp_t*)cl_qmap_end( &p_subn->mgrp_mlid_tbl ) ) + { + /* ignore mgrps marked for deletion */ + if (p_mgrp->to_be_deleted == FALSE) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__get_new_mlid: " + "Found mgrp with lid:0x%X MGID: 0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n", + cl_ntoh16( p_mgrp->mlid), + cl_ntoh64( p_mgrp->mcmember_rec.mgid.unicast.prefix ), + cl_ntoh64( p_mgrp->mcmember_rec.mgid.unicast.interface_id ) ); + + /* Map in table */ + if (cl_ntoh16(p_mgrp->mlid) > p_rcv->p_subn->max_multicast_lid_ho) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__get_new_mlid: ERR 1B27: " + "Found mgrp with mlid:0x%04X > max allowed mlid:0x%04X\n", + cl_ntoh16(p_mgrp->mlid), + max_num_mlids + IB_LID_MCAST_START_HO ); + } + else + { + used_mlids_array[cl_ntoh16(p_mgrp->mlid) - IB_LID_MCAST_START_HO] = 1; + } + } + p_mgrp = (osm_mgrp_t*)cl_qmap_next( &p_mgrp->map_item ); + } + + /* Find "mlid holes" in the mgrp table */ + for (idx = 0; + (idx < max_num_mlids) && (used_mlids_array[idx] == 1); + idx++); + + /* did it go above the maximal mlid allowed */ + if ( idx < max_num_mlids ) + { + mlid = idx + IB_LID_MCAST_START_HO; + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__get_new_mlid: " + "Found available mlid:0x%04X at idx:%u\n", + mlid, idx ); + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__get_new_mlid: ERR 1B23: " + "All available:%u mlids are taken\n", + max_num_mlids ); + mlid = 0; + } + + free(used_mlids_array); + + Exit: + OSM_LOG_EXIT(p_rcv->p_log); + return cl_hton16(mlid); +} + +/********************************************************************* +This procedure is only invoked to cleanup an INTERMEDIATE mgrp. +If there is only one port on the mgrp it means that the current +request was the only member and the group is not really needed. So we +silently drop it. Since it was an intermediate group no need to +re-route it. +**********************************************************************/ +static void +__cleanup_mgrp( + IN osm_mcmr_recv_t* const p_rcv, + IN ib_net16_t const mlid) +{ + osm_mgrp_t *p_mgrp; + + p_mgrp = __get_mgrp_by_mlid(p_rcv, mlid); + if(p_mgrp) + { + /* Remove MGRP only if osm_mcm_port_t count is 0 and + * Not a well known group + */ + if(cl_is_qmap_empty(&p_mgrp->mcm_port_tbl) && + (p_mgrp->well_known == FALSE)) + { + cl_qmap_remove_item(&p_rcv->p_subn->mgrp_mlid_tbl, + (cl_map_item_t *)p_mgrp ); + osm_mgrp_destroy(p_mgrp); + } + } +} + +/********************************************************************* +Add a port to the group. Calculating its PROXY_JOIN by the Port and +requester gids. +**********************************************************************/ +static ib_api_status_t +__add_new_mgrp_port( + IN osm_mcmr_recv_t *p_rcv, + IN osm_mgrp_t *p_mgrp, + IN ib_member_rec_t *p_recvd_mcmember_rec, + IN osm_mad_addr_t *p_mad_addr, + OUT osm_mcm_port_t **pp_mcmr_port) +{ + boolean_t proxy_join; + ib_gid_t requester_gid; + ib_api_status_t res; + + /* set the proxy_join if the requester gid is not identical to the + joined gid */ + res = osm_get_gid_by_mad_addr( p_rcv->p_log, + p_rcv->p_subn, + p_mad_addr, &requester_gid ); + if ( res != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__add_new_mgrp_port: ERR 1B29: " + "Could not find GID for requester\n" ); + + return IB_INVALID_PARAMETER; + } + + if (!memcmp(&p_recvd_mcmember_rec->port_gid, &requester_gid, + sizeof(ib_gid_t))) + { + proxy_join = FALSE; + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__add_new_mgrp_port: " + "Create new port with proxy_join FALSE\n" ); + } + else + { + /* The port is not the one specified in PortGID. + The check that the requester is in the same partition as + the PortGID is done before - just need to update the proxy_join. */ + proxy_join = TRUE; + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__add_new_mgrp_port: " + "Create new port with proxy_join TRUE\n" ); + } + + *pp_mcmr_port = osm_mgrp_add_port( p_mgrp, + &p_recvd_mcmember_rec->port_gid, + p_recvd_mcmember_rec->scope_state, + proxy_join ); + if(*pp_mcmr_port == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__add_new_mgrp_port: ERR 1B06: " + "osm_mgrp_add_port failed\n" ); + + return IB_INSUFFICIENT_MEMORY; + } + + return IB_SUCCESS; +} + +/********************************************************************** + **********************************************************************/ +static inline boolean_t +__check_join_comp_mask(ib_net64_t comp_mask) +{ + return( (comp_mask & JOIN_MC_COMP_MASK) == JOIN_MC_COMP_MASK ); +} + +/********************************************************************** + **********************************************************************/ +static inline boolean_t +__check_create_comp_mask(ib_net64_t comp_mask, + ib_member_rec_t *p_recvd_mcmember_rec) +{ + return( + ((comp_mask & REQUIRED_MC_CREATE_COMP_MASK) == REQUIRED_MC_CREATE_COMP_MASK) + ); +} + +/********************************************************************** +Generate the response MAD +**********************************************************************/ +static void +__osm_mcmr_rcv_respond( + IN const osm_mcmr_recv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN ib_member_rec_t *p_mcmember_rec ) +{ + osm_madw_t *p_resp_madw; + ib_sa_mad_t *p_sa_mad, *p_resp_sa_mad; + ib_member_rec_t *p_resp_mcmember_rec; + ib_api_status_t status; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mcmr_rcv_respond ); + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get(p_rcv->p_mad_pool, + p_madw->h_bind, + sizeof(ib_member_rec_t)+IB_SA_MAD_HDR_SIZE, + osm_madw_get_mad_addr_ptr(p_madw) ); + if ( !p_resp_madw ) + { + goto Exit; + } + + p_resp_sa_mad = (ib_sa_mad_t*)p_resp_madw->p_mad; + p_sa_mad = (ib_sa_mad_t*)p_madw->p_mad; + /* Copy the MAD header back into the response mad */ + memcpy(p_resp_sa_mad, p_sa_mad, IB_SA_MAD_HDR_SIZE); + /* based on the current method decide about the response: */ + if ((p_resp_sa_mad->method == IB_MAD_METHOD_GET) || + (p_resp_sa_mad->method == IB_MAD_METHOD_SET)) { + p_resp_sa_mad->method = IB_MAD_METHOD_GET_RESP; + } + else if (p_resp_sa_mad->method == IB_MAD_METHOD_DELETE) { + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + } + else + { + CL_ASSERT( p_resp_sa_mad->method == 0); + } + + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_member_rec_t) ); + p_resp_mcmember_rec = (ib_member_rec_t*)&p_resp_sa_mad->data; + + *p_resp_mcmember_rec = *p_mcmember_rec; + + /* Fill in the mtu, rate, and packet lifetime selectors */ + p_resp_mcmember_rec->mtu &= 0x3f; + p_resp_mcmember_rec->mtu |= 2<<6; /* exactly */ + p_resp_mcmember_rec->rate &= 0x3f; + p_resp_mcmember_rec->rate |= 2<<6; /* exactly */ + p_resp_mcmember_rec->pkt_life &= 0x3f; + p_resp_mcmember_rec->pkt_life |= 2<<6; /* exactly */ + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + + if(status != IB_SUCCESS) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_rcv_respond: ERR 1B07: " + "Unable to send MAD (%s) for TID <0x%"PRIx64">\n", + ib_get_err_str( status ), + p_resp_sa_mad->trans_id ); + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return; +} + +/********************************************************************* +In joining an existing group, or when querying the mc groups, +we make sure the following components provided match: MTU and RATE +HACK: Currently we ignore the PKT_LIFETIME field. +**********************************************************************/ +static boolean_t +__validate_more_comp_fields( + osm_log_t *p_log, + const osm_mgrp_t *p_mgrp, + const ib_member_rec_t *p_recvd_mcmember_rec, + ib_net64_t comp_mask) +{ + uint8_t mtu_sel; + uint8_t mtu_required; + uint8_t mtu_mgrp; + uint8_t rate_sel; + uint8_t rate_required; + uint8_t rate_mgrp; + + if ( comp_mask & IB_MCR_COMPMASK_MTU_SEL) + { + mtu_sel = (uint8_t)(p_recvd_mcmember_rec->mtu >> 6); + /* Clearing last 2 bits */ + mtu_required = (uint8_t)(p_recvd_mcmember_rec->mtu & 0x3F); + mtu_mgrp = (uint8_t)(p_mgrp->mcmember_rec.mtu & 0x3F); + switch (mtu_sel) + { + case 0: /* Greater than MTU specified */ + if(mtu_mgrp <= mtu_required) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__validate_more_comp_fields: " + "Requested MTU %x is not greater than %x\n", + mtu_mgrp, mtu_required ); + return FALSE; + } + break; + case 1: /* Less than MTU specified */ + if(mtu_mgrp >= mtu_required) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__validate_more_comp_fields: " + "Requested MTU %x is not less than %x\n", + mtu_mgrp, mtu_required ); + return FALSE; + } + break; + case 2: /* Exactly MTU specified */ + if(mtu_mgrp != mtu_required) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__validate_more_comp_fields: " + "Requested MTU %x is not equal to %x\n", + mtu_mgrp, mtu_required ); + return FALSE; + } + break; + default: + break; + } + } + + /* what about rate ? */ + if ( comp_mask & IB_MCR_COMPMASK_RATE_SEL) + { + rate_sel = (uint8_t)(p_recvd_mcmember_rec->rate >> 6); + /* Clearing last 2 bits */ + rate_required = (uint8_t)(p_recvd_mcmember_rec->rate & 0x3F); + rate_mgrp = (uint8_t)(p_mgrp->mcmember_rec.rate & 0x3F); + switch (rate_sel) + { + case 0: /* Greater than RATE specified */ + if(rate_mgrp <= rate_required) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__validate_more_comp_fields: " + "Requested RATE %x is not greater than %x\n", + rate_mgrp, rate_required ); + return FALSE; + } + break; + case 1: /* Less than RATE specified */ + if(rate_mgrp >= rate_required) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__validate_more_comp_fields: " + "Requested RATE %x is not less than %x\n", + rate_mgrp, rate_required ); + return FALSE; + } + break; + case 2: /* Exactly RATE specified */ + if(rate_mgrp != rate_required) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__validate_more_comp_fields: " + "Requested RATE %x is not equal to %x\n", + rate_mgrp, rate_required ); + return FALSE; + } + break; + default: + break; + } + } + + return TRUE; +} + +/********************************************************************* +In joining an existing group, we make sure the following components +are physically realizable: MTU and RATE +**********************************************************************/ +static boolean_t +__validate_port_caps( + osm_log_t * const p_log, + const osm_mgrp_t *p_mgrp, + const osm_physp_t *p_physp) +{ + ib_port_info_t *p_pi; + uint8_t mtu_required; + uint8_t mtu_mgrp; + uint8_t rate_required; + uint8_t rate_mgrp; + + p_pi = osm_physp_get_port_info_ptr(p_physp); + if (!p_pi) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__validate_port_caps: " + "Cannot get Port's 0x%016" PRIx64 " PortInfo\n", + cl_ntoh64( osm_physp_get_port_guid(p_physp) ) ); + return FALSE; + } + + mtu_required = ib_port_info_get_mtu_cap(p_pi); + mtu_mgrp = (uint8_t)(p_mgrp->mcmember_rec.mtu & 0x3F); + if (mtu_required < mtu_mgrp) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__validate_port_caps: " + "Port's MTU %x is less than %x\n", + mtu_required, mtu_mgrp ); + return FALSE; + } + + rate_required = ib_port_info_compute_rate(p_pi); + rate_mgrp = (uint8_t)(p_mgrp->mcmember_rec.rate & 0x3F); + if (rate_required < rate_mgrp) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__validate_port_caps: " + "Port's RATE %x is less than %x\n", + rate_required, rate_mgrp ); + return FALSE; + } + + return TRUE; +} + +/********************************************************************** + * o15-0.2.1: If SA supports UD multicast, then if SA receives a SubnAdmSet() + * or SubnAdmDelete() method that would modify an existing + * MCMemberRecord, SA shall not modify that MCMemberRecord and shall + * return an error status of ERR_REQ_INVALID in response in the + * following cases: + * 1. Saved MCMemberRecord.ProxyJoin is not set and the request is + * issued by a requester with a GID other than the Port-GID. + * 2. Saved MCMemberRecord.ProxyJoin is set and the requester is not + * part of the partition for that MCMemberRecord. + **********************************************************************/ +static boolean_t +__validate_modify(IN osm_mcmr_recv_t* const p_rcv, + IN osm_mgrp_t* p_mgrp, + IN osm_mad_addr_t* p_mad_addr, + IN ib_member_rec_t* p_recvd_mcmember_rec, + OUT osm_mcm_port_t **pp_mcm_port) +{ + ib_net64_t portguid; + ib_gid_t request_gid; + osm_physp_t* p_request_physp; + ib_api_status_t res; + + portguid = p_recvd_mcmember_rec->port_gid.unicast.interface_id; + + *pp_mcm_port = NULL; + + /* o15-0.2.1: If this is a new port being added - nothing to check */ + if (!osm_mgrp_is_port_present(p_mgrp, portguid, pp_mcm_port)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_modify: " + "This is a new port in the MC group\n" ); + return TRUE; + } + + /* We validate the request according the the proxy_join. + Check if the proxy_join is set or not */ + if ( (*pp_mcm_port)->proxy_join == FALSE ) + { + /* The proxy_join is not set. Modifying can by done only + if the requester GID == PortGID */ + res = osm_get_gid_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + p_mad_addr, + &request_gid); + + if ( res != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_modify: " + "Could not find port for requested address\n" + ); + return FALSE; + } + + if (memcmp(&((*pp_mcm_port)->port_gid), &request_gid, sizeof(ib_gid_t))) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_modify: " + "No ProxyJoin but different ports: stored:0x%016"PRIx64 + " request:0x%016"PRIx64"\n", + cl_ntoh64((*pp_mcm_port)->port_gid.unicast.interface_id), + cl_ntoh64(p_mad_addr->addr_type.gsi.grh_info.src_gid.unicast.interface_id) + ); + return FALSE; + } + } + else + { + /* the proxy_join is set. Modification allowed only if the + requester is part of the partition for this MCMemberRecord */ + p_request_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + p_mad_addr); + if (p_request_physp == NULL) + return FALSE; + + if (!osm_physp_has_pkey(p_rcv->p_log, p_mgrp->mcmember_rec.pkey, + p_request_physp)) + { + /* the request port is not part of the partition for this mgrp */ + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_modify: " + "ProxyJoin but port not in partition. stored:0x%016"PRIx64 + " request:0x%016"PRIx64"\n", + cl_ntoh64((*pp_mcm_port)->port_gid.unicast.interface_id), + cl_ntoh64(p_mad_addr->addr_type.gsi.grh_info.src_gid.unicast.interface_id) + ); + return FALSE; + } + } + return TRUE; +} + +/********************************************************************** + **********************************************************************/ +/* + * Check legality of the requested MGID DELETE + * o15-0.1.14 = VALID DELETE: + * To be a valid delete MAD needs to: + * 1 the MADs PortGID and MGID components match the PortGID and + * MGID of a stored MCMemberRecord; + * 2 the MADs JoinState component contains at least one bit set to 1 + * in the same position as that stored MCMemberRecords JoinState + * has a bit set to 1, + * i.e., the logical AND of the two JoinState components + * is not all zeros; + * 3 the MADs JoinState component does not have some bits set + * which are not set in the stored MCMemberRecords JoinState component; + * 4 either the stored MCMemberRecord:ProxyJoin is reset (0), and the + * MADs source is the stored PortGID; + * OR + * the stored MCMemberRecord:ProxyJoin is set (1), (see o15- + * 0.1.2:); and the MADs source is a member of the partition indicated + * by the stored MCMemberRecord:P_Key. + */ +static boolean_t +__validate_delete(IN osm_mcmr_recv_t* const p_rcv, + IN osm_mgrp_t *p_mgrp, + IN osm_mad_addr_t* p_mad_addr, + IN ib_member_rec_t* p_recvd_mcmember_rec, + OUT osm_mcm_port_t **pp_mcm_port) +{ + ib_net64_t portguid; + + portguid = p_recvd_mcmember_rec->port_gid.unicast.interface_id; + + *pp_mcm_port = NULL; + + /* 1 */ + if (!osm_mgrp_is_port_present(p_mgrp, portguid, pp_mcm_port)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_delete: " + "Failed to find the port in the MC group\n" ); + return FALSE; + } + + /* 2 */ + if (!(p_recvd_mcmember_rec->scope_state & 0x0F & + (*pp_mcm_port)->scope_state)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_delete: " + "Could not find any matching bits in the stored and requested JoinStates\n" ); + return FALSE; + } + + /* 3 */ + if ( ((p_recvd_mcmember_rec->scope_state & 0x0F) | + (0x0F & (*pp_mcm_port)->scope_state)) != + (0x0F & (*pp_mcm_port)->scope_state)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_delete: " + "Some bits in the request JoinState (0x%X) are not set in the stored port (0x%X)\n", + (p_recvd_mcmember_rec->scope_state & 0x0F), + (0x0F & (*pp_mcm_port)->scope_state) + ); + return FALSE; + } + + /* 4 */ + /* Validate according the the proxy_join (o15-0.1.2) */ + if ( __validate_modify( p_rcv, p_mgrp, p_mad_addr, p_recvd_mcmember_rec, + pp_mcm_port ) == FALSE ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_delete: " + "proxy_join validation failure\n" ); + return FALSE; + } + return TRUE; +} + +/********************************************************************** + **********************************************************************/ +/* + * Check legality of the requested MGID (note this does not hold for SA + * created MGIDs) + * + * Implementing o15-0.1.5: + * A multicast GID is considered to be invalid if: + * 1. It does not comply with the rules as specified in 4.1.1 "GID Usage and + * Properties" on page 145: + * + * 14) The multicast GID format is (bytes are comma sep): + * 0xff,,,,

,

,

,

,

,

,

,

,,,, + * Fl 4bit = Flags (b) + * Sc 4bit = Scope (c) + * Si 16bit = Signature (2) + * P 64bit = GID Prefix (should be a subnet unique ID - normally Subnet Prefix) + * Id 32bit = Unique ID in the Subnet (might be MLID or Pkey ?) + * + * a) 8-bits of 11111111 at the start of the GID identifies this as being a + * multicast GID. + * b) Flags is a set of four 1-bit flags: 000T with three flags reserved + * and defined as zero (0). The T flag is defined as follows: + * i) T = 0 indicates this is a permanently assigned (i.e. wellknown) + * multicast GID. See RFC 2373 and RFC 2375 as reference + * for these permanently assigned GIDs. + * ii) T = 1 indicates this is a non-permanently assigned (i.e. transient) + * multicast GID. + * c) Scope is a 4-bit multicast scope value used to limit the scope of + * the multicast group. The following table defines scope value and + * interpretation. + * + * Multicast Address Scope Values: + * 0x2 Link-local + * 0x5 Site-local + * 0x8 Organization-local + * 0xE Global + * + * 2. It contains the SA-specific signature of 0xA01B and has the link-local + * scope bits set. (EZ: the idea here is that SA created MGIDs are the + * only source for this signature with link-local scope) + */ +ib_api_status_t +__validate_requested_mgid(IN osm_mcmr_recv_t* const p_rcv, + IN const ib_member_rec_t* p_mcm_rec) +{ + uint16_t signature; + boolean_t valid = TRUE; + + OSM_LOG_ENTER( p_rcv->p_log, __validate_requested_mgid ); + + /* 14-a: mcast GID must start with 0xFF */ + if (p_mcm_rec->mgid.multicast.header[0] != 0xFF) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__validate_requested_mgid: ERR 1B01: " + "Wrong MGID Prefix 0x%02X must be 0xFF\n", + cl_ntoh16(p_mcm_rec->mgid.multicast.header[0]) + ); + valid = FALSE; + goto Exit; + } + + /* the MGID signature can mark IPoIB or SA assigned MGIDs */ + memcpy(&signature, &(p_mcm_rec->mgid.multicast.raw_group_id), sizeof(signature)); + signature = cl_ntoh16(signature); + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_requested_mgid: " + "MGID Signed as 0x%04X\n", + signature + ); + + /* + * We skip any checks for MGIDs that follow IPoIB + * GID structure as defined by the IETF ipoib-link-multicast. + * + * For IPv4 over IB, the signature will be "0x401B". + * + * | 8 | 4 | 4 | 16 bits | 16 bits | 48 bits | 32 bits | + * +--------+----+----+-----------------+---------+----------+---------+ + * |11111111|0001|scop||< P_Key >|00.......0|| + * +--------+----+----+-----------------+---------+----------+---------+ + * + * For IPv6 over IB, the signature will be "0x601B". + * + * | 8 | 4 | 4 | 16 bits | 16 bits | 80 bits | + * +--------+----+----+-----------------+---------+--------------------+ + * |11111111|0001|scop||< P_Key >|000.............0001| + * +--------+----+----+-----------------+---------+--------------------+ + * + */ + if (signature == 0x401B || signature == 0x601B) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_requested_mgid: " + "Skipping MGID Validation for IPoIB Signed (0x%04X) MGIDs\n", + signature + ); + goto Exit; + } + + /* 14-b: the 3 upper bits in the "flags" should be zero: */ + if ( p_mcm_rec->mgid.multicast.header[1] & 0xE0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__validate_requested_mgid: ERR 1B28: " + "MGID uses Reserved Flags: flags=0x%X\n", + (p_mcm_rec->mgid.multicast.header[1] & 0xE0) >> 4 + ); + valid = FALSE; + goto Exit; + } + + /* 2 - now what if the link local format 0xA01B is used - + the scope should not be link local */ + if ( ( signature == 0xA01B ) && + ((p_mcm_rec->mgid.multicast.header[1] & 0x0F) == MC_SCOPE_LINK_LOCAL) ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__validate_requested_mgid: ERR 1B24: " + "MGID uses 0xA01B signature but with link-local scope\n" ); + valid = FALSE; + goto Exit; + } + + /* + * For SA assigned MGIDs (signature 0xA01B): + * There is no real way to make sure the Unique MGID Prefix is really unique. + * If we could enforce using the Subnet Prefix for that purpose it would + * have been nice. But the spec does not require it. + */ + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return (valid); +} + +/********************************************************************** + Check if the requested new MC group parameters are realizable. + Also set the default MTU and Rate if not provided by the user. +**********************************************************************/ +boolean_t +__mgrp_request_is_realizable( + IN osm_mcmr_recv_t* const p_rcv, + IN ib_net64_t comp_mask, + IN ib_member_rec_t * p_mcm_rec, + IN const osm_physp_t* const p_physp) +{ + uint8_t mtu_sel = 2; /* exactly */ + uint8_t mtu_required, mtu, port_mtu; + uint8_t rate_sel = 2; /* exactly */ + uint8_t rate_required, rate, port_rate; + osm_log_t *p_log = p_rcv->p_log; + ib_port_info_t *p_pi = NULL; + + OSM_LOG_ENTER( p_rcv->p_log, __mgrp_request_is_realizable ); + + if (p_physp != NULL) + p_pi = osm_physp_get_port_info_ptr(p_physp); + + /* + * End of o15-0.2.3 specifies: + * .... + * The entity may also supply the other components such as HopLimit, MTU, + * etc. during group creation time. If these components are not provided + * during group creation time, SA will provide them for the group. The values + * chosen are vendor-dependent and beyond the scope of the specification. + * + * so we might also need to assign RATE/MTU if they are not comp masked in. + */ + + port_mtu = p_pi ? ib_port_info_get_mtu_cap(p_pi) : 0; + if (!(comp_mask & IB_MCR_COMPMASK_MTU) || + !(comp_mask & IB_MCR_COMPMASK_MTU_SEL) || + (mtu_sel = (p_mcm_rec->mtu >> 6)) == 3) + mtu = port_mtu ? port_mtu : p_rcv->p_subn->min_ca_mtu; + else + { + mtu_required = (uint8_t)(p_mcm_rec->mtu & 0x3F); + mtu = mtu_required; + switch (mtu_sel) + { + case 0: /* Greater than MTU specified */ + if (port_mtu && mtu_required >= port_mtu) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__mgrp_request_is_realizable: " + "Requested MTU %x >= the port\'s mtu:%x\n", + mtu_required, port_mtu ); + return FALSE; + } + /* we provide the largest MTU possible if we can */ + if (port_mtu) + mtu = port_mtu; + else if (mtu_required < p_rcv->p_subn->min_ca_mtu) + mtu = p_rcv->p_subn->min_ca_mtu; + else + mtu++; + break; + case 1: /* Less than MTU specified */ + /* use the smaller of the two: + a. one lower then the required + b. the mtu of the requesting port (if exists) */ + if (port_mtu && mtu_required > port_mtu) + mtu = port_mtu; + else + mtu--; + break; + case 2: /* Exactly MTU specified */ + default: + break; + } + /* make sure it still be in the range */ + if (mtu < IB_MIN_MTU || mtu > IB_MAX_MTU) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__mgrp_request_is_realizable: " + "Calculated MTU %x is out of range\n", + mtu ); + return FALSE; + } + } + p_mcm_rec->mtu = (mtu_sel<<6) | mtu; + + port_rate = p_pi ? ib_port_info_compute_rate(p_pi) : 0; + if (!(comp_mask & IB_MCR_COMPMASK_RATE) || + !(comp_mask & IB_MCR_COMPMASK_RATE_SEL) || + (rate_sel = (p_mcm_rec->rate >> 6)) == 3) + rate = port_rate ? port_rate : p_rcv->p_subn->min_ca_rate; + else + { + rate_required = (uint8_t)(p_mcm_rec->rate & 0x3F); + rate = rate_required; + switch (rate_sel) + { + case 0: /* Greater than RATE specified */ + if (port_rate && rate_required >= port_rate) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__mgrp_request_is_realizable: " + "Requested RATE %x >= the port\'s rate:%x\n", + rate_required, port_rate ); + return FALSE; + } + /* we provide the largest RATE possible if we can */ + if (port_rate) + rate = port_rate; + else if (rate_required < p_rcv->p_subn->min_ca_rate) + rate = p_rcv->p_subn->min_ca_rate; + else + rate++; + break; + case 1: /* Less than RATE specified */ + /* use the smaller of the two: + a. one lower then the required + b. the rate of the requesting port (if exists) */ + if (port_rate && rate_required > port_rate) + rate = port_rate; + else + rate--; + break; + case 2: /* Exactly RATE specified */ + default: + break; + } + /* make sure it still is in the range */ + if (rate < IB_MIN_RATE || rate > IB_MAX_RATE) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__mgrp_request_is_realizable: " + "Calculated RATE %x is out of range\n", + rate ); + return FALSE; + } + } + p_mcm_rec->rate = (rate_sel<<6) | rate; + + OSM_LOG_EXIT( p_rcv->p_log ); + return TRUE; +} + +/********************************************************************** + Call this function to find or create a new mgrp. +**********************************************************************/ +ib_api_status_t +osm_mcmr_rcv_find_or_create_new_mgrp( + IN osm_mcmr_recv_t* const p_rcv, + IN ib_net64_t comp_mask, + IN ib_member_rec_t* const p_recvd_mcmember_rec, + OUT osm_mgrp_t **pp_mgrp) +{ + ib_api_status_t status; + + status = __get_mgrp_by_mgid(p_rcv, p_recvd_mcmember_rec, pp_mgrp); + if (status == IB_SUCCESS) + return status; + return osm_mcmr_rcv_create_new_mgrp(p_rcv, comp_mask, + p_recvd_mcmember_rec, NULL, pp_mgrp); +} + +/********************************************************************** + Call this function to create a new mgrp. +**********************************************************************/ +ib_api_status_t +osm_mcmr_rcv_create_new_mgrp( + IN osm_mcmr_recv_t* const p_rcv, + IN ib_net64_t comp_mask, + IN const ib_member_rec_t* const p_recvd_mcmember_rec, + IN const osm_physp_t* const p_physp, + OUT osm_mgrp_t **pp_mgrp) +{ + ib_net16_t mlid; + uint8_t zero_mgid, valid; + uint8_t scope, i; + ib_gid_t *p_mgid; + osm_mgrp_t *p_prev_mgrp; + ib_api_status_t status = IB_SUCCESS; + ib_member_rec_t mcm_rec = *p_recvd_mcmember_rec; /* copy for modifications */ + + OSM_LOG_ENTER( p_rcv->p_log, osm_mcmr_rcv_create_new_mgrp ); + + /* but what if the given MGID was not 0 ? */ + zero_mgid = 1; + for ( i = 0 ; i < sizeof(p_recvd_mcmember_rec->mgid); i++ ) + { + if (p_recvd_mcmember_rec->mgid.raw[i] != 0) + { + zero_mgid = 0; + break; + } + } + + /* + we allocate a new mlid number before we might use it + for MGID ... + */ + mlid = __get_new_mlid(p_rcv, mcm_rec.mlid); + if ( mlid == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mcmr_rcv_create_new_mgrp: ERR 1B19: " + "__get_new_mlid failed\n" ); + status = IB_SA_MAD_STATUS_NO_RESOURCES; + goto Exit; + } + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_mcmr_rcv_create_new_mgrp: " + "Obtained new mlid 0x%X\n", cl_ntoh16(mlid) ); + + /* we need to create the new MGID if it was not defined */ + if (zero_mgid) + { + /* create a new MGID */ + + /* use the given scope state only if requested! */ + if (comp_mask & IB_MCR_COMPMASK_SCOPE) + { + ib_member_get_scope_state( + p_recvd_mcmember_rec->scope_state, &scope, NULL); + } + else + { + /* to guarantee no collision with other subnets use local scope! */ + scope = MC_SCOPE_LINK_LOCAL; + } + + p_mgid = &(mcm_rec.mgid); + p_mgid->raw[0] = 0xFF; + p_mgid->raw[1] = 0x10 | scope; + p_mgid->raw[2] = 0xA0; + p_mgid->raw[3] = 0x1B; + + /* HACK: use the SA port gid to make it globally unique */ + memcpy((&p_mgid->raw[4]), + &p_rcv->p_subn->opt.subnet_prefix, sizeof(uint64_t)); + + /* HACK: how do we get a unique number - use the mlid twice */ + memcpy(&p_mgid->raw[10], &mlid, sizeof(uint16_t)); + memcpy(&p_mgid->raw[12], &mlid, sizeof(uint16_t)); + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_mcmr_rcv_create_new_mgrp: " + "Allocated new MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n", + cl_ntoh64(p_mgid->unicast.prefix), + cl_ntoh64(p_mgid->unicast.interface_id) ); + } + else + { + /* a specific MGID was requested so validate the resulting MGID */ + valid = __validate_requested_mgid(p_rcv, &mcm_rec); + if (!valid) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mcmr_rcv_create_new_mgrp: ERR 1B22: " + "Invalid requested MGID\n" ); + __free_mlid(p_rcv, mlid); + status = IB_SA_MAD_STATUS_REQ_INVALID; + goto Exit; + } + } + + /* check the requested parameters are realizable */ + if (__mgrp_request_is_realizable(p_rcv, comp_mask, &mcm_rec, p_physp) == FALSE) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mcmr_rcv_create_new_mgrp: ERR 1B26: " + "Requested MGRP parameters are not realizable\n" ); + __free_mlid(p_rcv, mlid); + status = IB_SA_MAD_STATUS_REQ_INVALID; + goto Exit; + } + + /* create a new MC Group */ + *pp_mgrp = osm_mgrp_new(mlid); + if (*pp_mgrp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mcmr_rcv_create_new_mgrp: ERR 1B08: " + "osm_mgrp_new failed\n" ); + __free_mlid(p_rcv, mlid); + status = IB_SA_MAD_STATUS_NO_RESOURCES; + goto Exit; + } + + /* Initialize the mgrp */ + (*pp_mgrp)->mcmember_rec = mcm_rec; + (*pp_mgrp)->mcmember_rec.mlid = mlid; + + /* the mcmember_record should have mtu_sel, rate_sel, and pkt_lifetime_sel = 2 */ + (*pp_mgrp)->mcmember_rec.mtu &= 0x3f; + (*pp_mgrp)->mcmember_rec.mtu |= 2<<6; /* exactly */ + (*pp_mgrp)->mcmember_rec.rate &= 0x3f; + (*pp_mgrp)->mcmember_rec.rate |= 2<<6; /* exactly */ + (*pp_mgrp)->mcmember_rec.pkt_life &= 0x3f; + (*pp_mgrp)->mcmember_rec.pkt_life |= 2<<6; /* exactly */ + + /* Insert the new group in the data base */ + + /* since we might have an old group by that mlid + one whose deletion was delayed for an idle time + we need to deallocate it first */ + p_prev_mgrp = (osm_mgrp_t *)cl_qmap_get(&p_rcv->p_subn->mgrp_mlid_tbl, mlid); + if (p_prev_mgrp != (osm_mgrp_t *)cl_qmap_end(&p_rcv->p_subn->mgrp_mlid_tbl)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_mcmr_rcv_create_new_mgrp: " + "Found previous group for mlid:0x%04x - Need to destroy it\n", + cl_ntoh16(mlid) ); + cl_qmap_remove_item(&p_rcv->p_subn->mgrp_mlid_tbl, + (cl_map_item_t *)p_prev_mgrp ); + osm_mgrp_destroy( p_prev_mgrp ); + } + + cl_qmap_insert(&p_rcv->p_subn->mgrp_mlid_tbl, + mlid, + &(*pp_mgrp)->map_item); + + /* Send a Report to any InformInfo registerd for + Trap 66: MCGroup create */ + osm_mgrp_send_create_notice(p_rcv->p_subn, p_rcv->p_log, *pp_mgrp); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return status; + +} + +/********************************************************************* +Process a request for leaving the group +**********************************************************************/ +static void +__osm_mcmr_rcv_leave_mgrp( + IN osm_mcmr_recv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + boolean_t valid; + osm_mgrp_t *p_mgrp; + ib_api_status_t status; + ib_sa_mad_t *p_sa_mad; + ib_member_rec_t*p_recvd_mcmember_rec; + ib_member_rec_t mcmember_rec; + ib_net16_t mlid; + ib_net16_t sa_status; + ib_net64_t portguid; + osm_mcm_port_t *p_mcm_port; + uint8_t port_join_state; + uint8_t new_join_state; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mcmr_rcv_leave_mgrp ); + + p_mgrp = NULL; + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_recvd_mcmember_rec = + (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + mcmember_rec = *p_recvd_mcmember_rec; + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mcmr_rcv_leave_mgrp: Dump of record\n" ); + osm_dump_mc_record( p_rcv->p_log, &mcmember_rec, OSM_LOG_DEBUG ); + } + + CL_PLOCK_EXCL_ACQUIRE(p_rcv->p_lock); + status = __get_mgrp_by_mgid(p_rcv,p_recvd_mcmember_rec, &p_mgrp); + if (status == IB_SUCCESS) + { + mlid = p_mgrp->mlid; + portguid = p_recvd_mcmember_rec->port_gid.unicast.interface_id; + + /* check validity of the delete request o15-0.1.14 */ + valid = __validate_delete(p_rcv, + p_mgrp, + osm_madw_get_mad_addr_ptr(p_madw), + p_recvd_mcmember_rec, + &p_mcm_port); + + if (valid) + { + /* + * according to the same o15-0.1.14 we get the stored JoinState and the + * request JoinState and they must be opposite to leave - + * otherwise just update it + */ + port_join_state = p_mcm_port->scope_state & 0x0F; + new_join_state = + port_join_state & ~(p_recvd_mcmember_rec->scope_state & 0x0F); + if (new_join_state) + { + /* Just update the result JoinState */ + p_mcm_port->scope_state = + new_join_state | (p_mcm_port->scope_state & 0xf0); + + mcmember_rec.scope_state = p_mcm_port->scope_state; + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mcmr_rcv_leave_mgrp: " + "After update JoinState != 0. Updating from 0x%X to 0x%X\n", + port_join_state, + new_join_state + ); + } + else + { + /* we need to return the stored scope state */ + mcmember_rec.scope_state = p_mcm_port->scope_state; + + /* OK we can leave */ + /* note: osm_sm_mcgrp_leave() will release p_rcv->p_lock */ + + status = osm_sm_mcgrp_leave(p_rcv->p_sm, mlid, portguid); + if(status != IB_SUCCESS) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_rcv_leave_mgrp: ERR 1B09: " + "osm_sm_mcgrp_leave failed\n" ); + } + } + } + else + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_rcv_leave_mgrp: ERR 1B25: " + "Received an invalid delete request for " + "MGID: 0x%016" PRIx64 " : " + "0x%016" PRIx64 " for " + "PortGID: 0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n", + cl_ntoh64( p_recvd_mcmember_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_mcmember_rec->mgid.unicast.interface_id ), + cl_ntoh64( p_recvd_mcmember_rec->port_gid.unicast.prefix ), + cl_ntoh64( p_recvd_mcmember_rec->port_gid.unicast.interface_id ) ); + sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + } + else + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mcmr_rcv_leave_mgrp: " + "Failed since multicast group not present\n" ); + sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + /* Send an SA response */ + __osm_mcmr_rcv_respond( p_rcv, p_madw, &mcmember_rec ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return; +} + +/********************************************************************** + Handle a join (or create) request +**********************************************************************/ +static void +__osm_mcmr_rcv_join_mgrp( + IN osm_mcmr_recv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + boolean_t valid; + osm_mgrp_t *p_mgrp = NULL; + ib_api_status_t status; + ib_sa_mad_t *p_sa_mad; + ib_member_rec_t*p_recvd_mcmember_rec; + ib_member_rec_t mcmember_rec; + ib_net16_t sa_status; + ib_net16_t mlid; + osm_mcm_port_t *p_mcmr_port; + ib_net64_t portguid; + osm_port_t * p_port; + osm_physp_t* p_physp; + osm_physp_t* p_request_physp; + uint8_t is_new_group; /* TRUE = there is a need to create a group */ + osm_mcast_req_type_t req_type; + uint8_t join_state; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mcmr_rcv_join_mgrp ); + + p_mgrp = NULL; + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_recvd_mcmember_rec = + (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + portguid = p_recvd_mcmember_rec->port_gid.unicast.interface_id; + + mcmember_rec = *p_recvd_mcmember_rec; + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mcmr_rcv_join_mgrp: " + "Dump of incoming record\n" ); + osm_dump_mc_record( p_rcv->p_log, &mcmember_rec, OSM_LOG_DEBUG ); + } + + CL_PLOCK_EXCL_ACQUIRE(p_rcv->p_lock); + + /* make sure the requested port guid is known to the SM */ + p_port = (osm_port_t *)cl_qmap_get(&p_rcv->p_subn->port_guid_tbl, + portguid); + + if (p_port == (osm_port_t *)cl_qmap_end(&p_rcv->p_subn->port_guid_tbl)) + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mcmr_rcv_join_mgrp: " + "Unknown port GUID 0x%016" PRIx64 "\n", + portguid ); + sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + p_physp = osm_port_get_default_phys_ptr(p_port); + /* Check that the p_physp and the requester physp are in the same + partition. */ + p_request_physp = + osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_request_physp == NULL) + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + goto Exit; + } + + if (!osm_physp_share_pkey( p_rcv->p_log, p_physp, p_request_physp)) + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mcmr_rcv_join_mgrp: " + "Port and requester don't share pkey\n" ); + sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + ib_member_get_scope_state( + p_recvd_mcmember_rec->scope_state, NULL, &join_state); + + /* do we need to create a new group? */ + status = __get_mgrp_by_mgid(p_rcv, p_recvd_mcmember_rec, &p_mgrp); + if ((status == IB_NOT_FOUND) || p_mgrp->to_be_deleted) + { + /* check for JoinState.FullMember = 1 o15.0.1.9 */ + if ((join_state & 0x01) != 0x01) + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_rcv_join_mgrp: ERR 1B10: " + "Provided Join State != FullMember - required for create, " + "MGID: 0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n", + cl_ntoh64( p_recvd_mcmember_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_mcmember_rec->mgid.unicast.interface_id ) ); + sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + /* check for the comp_mask */ + valid = __check_create_comp_mask(p_sa_mad->comp_mask, + p_recvd_mcmember_rec); + if (valid) + { + status = osm_mcmr_rcv_create_new_mgrp(p_rcv, + p_sa_mad->comp_mask, + p_recvd_mcmember_rec, + p_physp, + &p_mgrp); + if (status != IB_SUCCESS) + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + sa_status = status; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + /* copy the MGID to the result */ + mcmember_rec.mgid = p_mgrp->mcmember_rec.mgid; + } + else + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_rcv_join_mgrp: ERR 1B11: " + "method = %s, " + "scope_state = 0x%x, " + "component mask = 0x%016" PRIx64 ", " + "expected comp mask = 0x%016" PRIx64 ", " + "MGID: 0x%016" PRIx64 " : " + "0x%016" PRIx64 " from port 0x%016" PRIx64 "\n", + ib_get_sa_method_str(p_sa_mad->method), + p_recvd_mcmember_rec->scope_state, + cl_ntoh64(p_sa_mad->comp_mask), + CL_NTOH64(REQUIRED_MC_CREATE_COMP_MASK), + cl_ntoh64( p_recvd_mcmember_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_mcmember_rec->mgid.unicast.interface_id ), + cl_ntoh64( portguid ) ); + + sa_status = IB_SA_MAD_STATUS_INSUF_COMPS; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + is_new_group = 1; + req_type = OSM_MCAST_REQ_TYPE_CREATE; + } + else + { + /* no need for a new group */ + is_new_group = 0; + req_type = OSM_MCAST_REQ_TYPE_JOIN; + } + + CL_ASSERT(p_mgrp); + mlid = p_mgrp->mlid; + + /* + * o15-0.2.4: If SA supports UD multicast, then SA shall cause an + * endport to join an existing multicast group if: + * 1. It receives a SubnAdmSet() method for a MCMemberRecord, and + * - WE KNOW THAT ALREADY + * 2. The MGID is specified and matches an existing multicast + * group, and + * - WE KNOW THAT ALREADY + * 3. The MCMemberRecord:JoinState is not all 0s, and + * 4. PortGID is specified and + * - WE KNOW THAT ALREADY (as it matched a real one) + * 5. All other components match that existing group, either by + * being wildcarded or by having values identical to those specified + * by the component mask and in use by the group with the exception + * of components such as ProxyJoin and Reserved, which are ignored + * by SA. + * + * We need to check #3 and #5 here: + */ + valid = __validate_more_comp_fields( + p_rcv->p_log, + p_mgrp, + p_recvd_mcmember_rec, + p_sa_mad->comp_mask) && __validate_port_caps( + p_rcv->p_log, + p_mgrp, + p_physp) && (join_state != 0); + + if (!valid) + { + /* since we might have created the new group we need to cleanup */ + __cleanup_mgrp(p_rcv, mlid); + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_rcv_join_mgrp: ERR 1B12: " + "__validate_more_comp_fields, __validate_port_caps, " + "or JoinState = 0 failed from port 0x%016" PRIx64 ", " + "sending IB_SA_MAD_STATUS_REQ_INVALID\n", + cl_ntoh64( portguid ) ); + + sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + /* + * Do some validation of the modification + */ + if (!is_new_group) + { + /* + * o15-0.2.1 requires validation of the requesting port + * in the case of modification: + */ + valid = __validate_modify(p_rcv, + p_mgrp, + osm_madw_get_mad_addr_ptr(p_madw), + p_recvd_mcmember_rec, + &p_mcmr_port); + if (!valid) + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_rcv_join_mgrp: ERR 1B13: " + "__validate_modify failed, " + "sending IB_SA_MAD_STATUS_REQ_INVALID\n" ); + + sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + } + + /* create or update existing port (join-state will be updated) */ + status = __add_new_mgrp_port( + p_rcv, + p_mgrp, + p_recvd_mcmember_rec, + osm_madw_get_mad_addr_ptr(p_madw), + &p_mcmr_port); + + if (status != IB_SUCCESS) + { + /* we fail to add the port so we might need to delete the group */ + __cleanup_mgrp(p_rcv, mlid); + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + if (status == IB_INVALID_PARAMETER) + sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + else + sa_status = IB_SA_MAD_STATUS_NO_RESOURCES; + + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + /* o15.0.1.11: copy the join state */ + mcmember_rec.scope_state = p_mcmr_port->scope_state; + + /* copy qkey mlid tclass pkey sl_flow_hop mtu rate pkt_life sl_flow_hop */ + __copy_from_create_mc_rec(&mcmember_rec, &p_mgrp->mcmember_rec); + + /* Release the lock as we don't need it. */ + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + /* do the actual routing (actually schedule the update) */ + status = + osm_sm_mcgrp_join(p_rcv->p_sm, + mlid, + p_recvd_mcmember_rec->port_gid.unicast.interface_id, + req_type); + + if (status != IB_SUCCESS) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_rcv_join_mgrp: ERR 1B14: " + "osm_sm_mcgrp_join failed, " + "sending IB_SA_MAD_STATUS_NO_RESOURCES\n" ); + + CL_PLOCK_EXCL_ACQUIRE(p_rcv->p_lock); + + /* the request for routing failed so we need to remove the port */ + p_mgrp = __get_mgrp_by_mlid(p_rcv, mlid); + if (p_mgrp != NULL) + { + osm_mgrp_remove_port( + p_rcv->p_subn, + p_rcv->p_log, + p_mgrp, + p_recvd_mcmember_rec->port_gid.unicast.interface_id); + __cleanup_mgrp(p_rcv, mlid); + } + CL_PLOCK_RELEASE( p_rcv->p_lock ); + sa_status = IB_SA_MAD_STATUS_NO_RESOURCES; + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + + } /* failed to route */ + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_mc_record( p_rcv->p_log, &mcmember_rec, OSM_LOG_DEBUG ); + + __osm_mcmr_rcv_respond( p_rcv, p_madw, &mcmember_rec ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return; +} + +/********************************************************************** + Add a patched multicast group to the results list +**********************************************************************/ +static ib_api_status_t +__osm_mcmr_rcv_new_mcmr( + IN osm_mcmr_recv_t* const p_rcv, + IN const ib_member_rec_t* p_rcvd_rec, + IN cl_qlist_t* const p_list ) +{ + osm_mcmr_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mcmr_rcv_new_mcmr ); + + p_rec_item = (osm_mcmr_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_rcv_new_mcmr: ERR 1B15: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + memset( &p_rec_item->rec, 0, sizeof( p_rec_item->rec ) ); + + /* HACK: Not trusted requesters should result with 0 Join + State, Port Guid, and Proxy */ + p_rec_item->rec = *p_rcvd_rec; + cl_qlist_insert_tail( p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + Match the given mgrp to the requested mcmr +**********************************************************************/ +void +__osm_sa_mcm_by_comp_mask_cb( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_mgrp_t * const p_mgrp = (osm_mgrp_t *)p_map_item; + osm_sa_mcmr_search_ctxt_t* const p_ctxt = + (osm_sa_mcmr_search_ctxt_t *)context; + osm_mcmr_recv_t* const p_rcv = p_ctxt->p_rcv; + const ib_member_rec_t* p_rcvd_rec = p_ctxt->p_mcmember_rec; + const osm_physp_t* p_req_physp = p_ctxt->p_req_physp; + + /* since we might change scope_state */ + ib_member_rec_t match_rec; + ib_net64_t comp_mask = p_ctxt->comp_mask; + osm_mcm_port_t* p_mcm_port; + ib_net64_t portguid = p_rcvd_rec->port_gid.unicast.interface_id; + /* will be used for group or port info */ + uint8_t scope_state; + uint8_t scope_state_mask = 0; + cl_map_item_t *p_item; + ib_gid_t port_gid; + boolean_t proxy_join; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_mcm_by_comp_mask_cb ); + + osm_log(p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_mcm_by_comp_mask_cb: " + "Checking mlid:0x%X\n", + cl_ntoh16(p_mgrp->mlid)); + + /* the group might be marked for deletion */ + if (p_mgrp->to_be_deleted) + { + osm_log(p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_mcm_by_comp_mask_cb: " + "Group mlid:0x%X is marked to be deleted\n", + cl_ntoh16(p_mgrp->mlid)); + goto Exit; + } + + /* first try to eliminate the group by MGID, MLID, or P_Key */ + if ((IB_MCR_COMPMASK_MGID & comp_mask) && + memcmp(&p_rcvd_rec->mgid, &p_mgrp->mcmember_rec.mgid, sizeof(ib_gid_t))) + goto Exit; + + if ((IB_MCR_COMPMASK_MLID & comp_mask) && + memcmp(&p_rcvd_rec->mlid, &p_mgrp->mcmember_rec.mlid, sizeof(uint16_t))) + goto Exit; + + /* if the requester physical port doesn't have the pkey that is defined for + the group - exit. */ + if (!osm_physp_has_pkey( p_rcv->p_log, p_mgrp->mcmember_rec.pkey, + p_req_physp )) + goto Exit; + + /* now do the rest of the match */ + if ((IB_MCR_COMPMASK_QKEY & comp_mask) && + (p_rcvd_rec->qkey != p_mgrp->mcmember_rec.qkey)) + goto Exit; + + if ((IB_MCR_COMPMASK_PKEY & comp_mask) && + (p_rcvd_rec->pkey != p_mgrp->mcmember_rec.pkey)) + goto Exit; + + if ((IB_MCR_COMPMASK_TCLASS & comp_mask) && + (p_rcvd_rec->tclass != p_mgrp->mcmember_rec.tclass)) + goto Exit; + + /* check SL, Flow, and Hop limit */ + { + uint8_t mgrp_sl, query_sl; + uint32_t mgrp_flow, query_flow; + uint8_t mgrp_hop, query_hop; + + ib_member_get_sl_flow_hop(p_rcvd_rec->sl_flow_hop, + &query_sl, &query_flow, &query_hop); + + ib_member_get_sl_flow_hop(p_mgrp->mcmember_rec.sl_flow_hop, + &mgrp_sl, &mgrp_flow, &mgrp_hop); + + if (IB_MCR_COMPMASK_SL & comp_mask ) + if (query_sl != mgrp_sl) + goto Exit; + + if (IB_MCR_COMPMASK_FLOW & comp_mask) + if (query_flow != mgrp_flow) + goto Exit; + + if (IB_MCR_COMPMASK_HOP & comp_mask) + if (query_hop != mgrp_hop) + goto Exit; + } + + if ((IB_MCR_COMPMASK_PROXY & comp_mask) && + (p_rcvd_rec->proxy_join != p_mgrp->mcmember_rec.proxy_join)) + goto Exit; + + if (IB_MCR_COMPMASK_SCOPE & comp_mask) + scope_state_mask = 0xF0; + + if (IB_MCR_COMPMASK_JOIN_STATE & comp_mask) + scope_state_mask = scope_state_mask | 0x0F; + + /* need to validate mtu, rate, and pkt_lifetime fields. */ + if (__validate_more_comp_fields( p_rcv->p_log, + p_mgrp, + p_rcvd_rec, + comp_mask ) == FALSE) + goto Exit; + + /* Port specific fields */ + /* so did we get the PortGUID mask */ + if (IB_MCR_COMPMASK_PORT_GID & comp_mask) + { + /* try to find this port */ + if (osm_mgrp_is_port_present(p_mgrp, portguid, &p_mcm_port)) + { + scope_state = p_mcm_port->scope_state; + memcpy(&port_gid, &(p_mcm_port->port_gid), sizeof(ib_gid_t)); + proxy_join = p_mcm_port->proxy_join; + } + else + { + /* port not in group */ + goto Exit; + } + } + else + { + /* point to the group information */ + scope_state = p_mgrp->mcmember_rec.scope_state; + } + + /* Many MC records returned */ + if ( (p_ctxt->trusted_req == TRUE) && !(IB_MCR_COMPMASK_PORT_GID & comp_mask) ) + { + osm_log(p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_mcm_by_comp_mask_cb: " + "Trusted req is TRUE and no specific port defined\n"); + + /* return all the ports that match in this MC group */ + p_item = cl_qmap_head(&(p_mgrp->mcm_port_tbl)); + while( p_item != cl_qmap_end(&(p_mgrp->mcm_port_tbl)) ) + { + p_mcm_port=(osm_mcm_port_t *)p_item; + + if ((scope_state_mask & p_rcvd_rec->scope_state) == + (scope_state_mask & p_mcm_port->scope_state)) + { + /* add to the list */ + match_rec = p_mgrp->mcmember_rec; + match_rec.scope_state = p_mcm_port->scope_state; + memcpy( &(match_rec.port_gid), &(p_mcm_port->port_gid), + sizeof(ib_gid_t)); + osm_log(p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_mcm_by_comp_mask_cb: " + "Record of port_gid: 0x%016" PRIx64 "0x%016" PRIx64 + " in multicast_lid: 0x%X is returned\n", + cl_ntoh64(match_rec.port_gid.unicast.prefix), + cl_ntoh64(match_rec.port_gid.unicast.interface_id), + cl_ntoh16(p_mgrp->mlid) + ); + + match_rec.proxy_join = (uint8_t)(p_mcm_port->proxy_join); + + __osm_mcmr_rcv_new_mcmr(p_rcv, &match_rec, p_ctxt->p_list); + } + p_item = cl_qmap_next(p_item); + } + } + /* One MC record returned */ + else + { + if ((scope_state_mask & p_rcvd_rec->scope_state) != + (scope_state_mask & scope_state)) + goto Exit; + + /* add to the list */ + match_rec = p_mgrp->mcmember_rec; + match_rec.scope_state = scope_state; + memcpy(&(match_rec.port_gid), &port_gid, sizeof(ib_gid_t)); + match_rec.proxy_join = (uint8_t)proxy_join; + + __osm_mcmr_rcv_new_mcmr(p_rcv, &match_rec, p_ctxt->p_list); + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + Handle a query request +**********************************************************************/ +static void +__osm_mcmr_query_mgrp(IN osm_mcmr_recv_t* const p_rcv, + IN const osm_madw_t* const p_madw) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_member_rec_t* p_rcvd_rec; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + ib_member_rec_t* p_resp_rec; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_sa_mcmr_search_ctxt_t context; + osm_mcmr_item_t* p_rec_item; + ib_api_status_t status; + ib_net64_t comp_mask; + osm_physp_t* p_req_physp; + boolean_t trusted_req; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mcmr_query_mgrp ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + comp_mask = p_rcvd_mad->comp_mask; + + /* + if sm_key is not zero and does not match we never get here + see main SA receiver + */ + trusted_req = (p_rcvd_mad->sm_key != 0); + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_query_mgrp: ERR 1B04: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + cl_qlist_init( &rec_list ); + + context.p_mcmember_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.p_req_physp = p_req_physp; + context.trusted_req = trusted_req; + + CL_PLOCK_ACQUIRE( p_rcv->p_lock ); + + /* simply go over all MCGs and match */ + cl_qmap_apply_func( &p_rcv->p_subn->mgrp_mlid_tbl, + __osm_sa_mcm_by_comp_mask_cb, + &context); + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if ( (p_rcvd_mad->method == IB_MAD_METHOD_GET) && + (num_rec > 1)) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_query_mgrp: ERR 1B05: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ); + + /* need to set the mem free ... */ + p_rec_item = (osm_mcmr_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_mcmr_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_mcmr_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_member_rec_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_mcmr_query_mgrp: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mcmr_query_mgrp: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_member_rec_t) + IB_SA_MAD_HDR_SIZE, + osm_madw_get_mad_addr_ptr(p_madw) ); + + if( !p_resp_madw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_query_mgrp: ERR 1B16: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_mcmr_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_member_rec_t) ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + p_resp_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + + /* + p923 - The PortGID, JoinState and ProxyJoin shall be zero, + except in the case of a trusted request. + Note: In the mad controller we check that the SM_Key received on + the mad is valid. Meaning - is either zero or equal to the local + sm_key. + */ + + for ( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_mcmr_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + if (trusted_req == FALSE) + { + memset(&p_resp_rec->port_gid, 0, sizeof(ib_gid_t)); + ib_member_set_join_state(p_resp_rec, 0); + p_resp_rec->proxy_join = 0; + } + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if(status != IB_SUCCESS) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mcmr_query_mgrp: ERR 1B17: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcmr_rcv_process( + IN osm_mcmr_recv_t* const p_rcv, + const IN osm_madw_t* const p_madw ) +{ + ib_sa_mad_t *p_sa_mad; + ib_net16_t sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + ib_member_rec_t *p_recvd_mcmember_rec; + boolean_t valid; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_mcmr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_recvd_mcmember_rec = + (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + CL_ASSERT( p_sa_mad->attr_id == IB_MAD_ATTR_MCMEMBER_RECORD ); + + switch (p_sa_mad->method) + { + case IB_MAD_METHOD_SET: + valid = __check_join_comp_mask(p_sa_mad->comp_mask); + if(!valid) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mcmr_rcv_process: ERR 1B18: " + "component mask = 0x%016" PRIx64 ", " + "expected comp mask = 0x%016" PRIx64 " ," + "MGID: 0x%016" PRIx64 " : " + "0x%016" PRIx64 " for " + "PortGID: 0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n", + cl_ntoh64(p_sa_mad->comp_mask), + CL_NTOH64(JOIN_MC_COMP_MASK), + cl_ntoh64( p_recvd_mcmember_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_mcmember_rec->mgid.unicast.interface_id ), + cl_ntoh64( p_recvd_mcmember_rec->port_gid.unicast.prefix ), + cl_ntoh64( p_recvd_mcmember_rec->port_gid.unicast.interface_id ) ); + + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + /* + * Join or Create Multicast Group + */ + __osm_mcmr_rcv_join_mgrp(p_rcv, p_madw); + break; + case IB_MAD_METHOD_DELETE: + valid = __check_join_comp_mask(p_sa_mad->comp_mask); + if(!valid) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mcmr_rcv_process: ERR 1B20: " + "component mask = 0x%016" PRIx64 ", " + "expected comp mask = 0x%016" PRIx64 "\n", + cl_ntoh64(p_sa_mad->comp_mask), + CL_NTOH64(JOIN_MC_COMP_MASK) ); + + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + /* + * Leave Multicast Group + */ + __osm_mcmr_rcv_leave_mgrp(p_rcv, p_madw); + break; + case IB_MAD_METHOD_GET: + case IB_MAD_METHOD_GETTABLE: + /* + * Querying a Multicast Group + */ + __osm_mcmr_query_mgrp(p_rcv, p_madw); + break; + default: + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mcmr_rcv_process: ERR 1B21: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_sa_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + break; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mcmember_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mcmember_record_ctrl.c new file mode 100644 index 00000000..19285e73 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mcmember_record_ctrl.c @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_mcmr_rcv_ctrl_t. + * This object represents the Multicast member record controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_mcmr_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_mcmr_rcv_process( ((osm_mcmr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcmr_rcv_ctrl_construct( + IN osm_mcmr_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_mcmr_rcv_ctrl_destroy( + IN osm_mcmr_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mcmr_rcv_ctrl_init( + IN osm_mcmr_rcv_ctrl_t* const p_ctrl, + IN osm_mcmr_recv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_mcmr_rcv_ctrl_init ); + + osm_mcmr_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_MCMEMBER_RECORD, + __osm_mcmr_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_mcmr_rcv_ctrl_init: ERR 1C01: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mft_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mft_record.c new file mode 100644 index 00000000..5cd6efad --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mft_record.c @@ -0,0 +1,547 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_mftr_rcv_t. + * This object represents the MulticastForwardingTable Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_MFTR_RCV_POOL_MIN_SIZE 32 +#define OSM_MFTR_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_mftr_item +{ + cl_pool_item_t pool_item; + ib_mft_record_t rec; +} osm_mftr_item_t; + +typedef struct _osm_mftr_search_ctxt +{ + const ib_mft_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + cl_qlist_t* p_list; + osm_mftr_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; +} osm_mftr_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_mftr_rcv_construct( + IN osm_mftr_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mftr_rcv_destroy( + IN osm_mftr_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_mftr_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mftr_rcv_init( + IN osm_mftr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_mftr_rcv_init ); + + osm_mftr_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_MFTR_RCV_POOL_MIN_SIZE, + 0, + OSM_MFTR_RCV_POOL_GROW_SIZE, + sizeof(osm_mftr_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_mftr_rcv_new_mftr( + IN osm_mftr_rcv_t* const p_rcv, + IN osm_switch_t* const p_sw, + IN cl_qlist_t* const p_list, + IN ib_net16_t const lid, + IN uint16_t const block, + IN uint8_t const position ) +{ + osm_mftr_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + uint16_t position_block_num; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mftr_rcv_new_mftr ); + + p_rec_item = (osm_mftr_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mftr_rcv_new_mftr: ERR 4A02: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mftr_rcv_new_mftr: " + "New MulticastForwardingTable: sw 0x%016" PRIx64 + "\n\t\t\t\tblock %u position %u lid 0x%02X\n", + cl_ntoh64( osm_node_get_node_guid( p_sw->p_node ) ), + block, position, cl_ntoh16( lid ) + ); + } + + position_block_num = ((uint16_t)position << 12) | + (block & IB_MCAST_BLOCK_ID_MASK_HO); + + memset( &p_rec_item->rec, 0, sizeof(ib_mft_record_t) ); + + p_rec_item->rec.lid = lid; + p_rec_item->rec.position_block_num = cl_hton16( position_block_num ); + + /* copy the mft block */ + osm_switch_get_mft_block( p_sw, block, position, p_rec_item->rec.mft ); + + cl_qlist_insert_tail( p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static osm_port_t* +__osm_mftr_get_port_by_guid( + IN osm_mftr_rcv_t* const p_rcv, + IN uint64_t port_guid ) +{ + osm_port_t* p_port; + + CL_PLOCK_ACQUIRE(p_rcv->p_lock); + + p_port = (osm_port_t *)cl_qmap_get(&p_rcv->p_subn->port_guid_tbl, + port_guid); + if (p_port == (osm_port_t *)cl_qmap_end(&p_rcv->p_subn->port_guid_tbl)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mftr_get_port_by_guid ERR 4A04: " + "Invalid port GUID 0x%016" PRIx64 "\n", + port_guid ); + p_port = NULL; + } + + CL_PLOCK_RELEASE(p_rcv->p_lock); + return p_port; +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_mftr_rcv_by_comp_mask( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_mftr_search_ctxt_t* const p_ctxt = + (osm_mftr_search_ctxt_t *)context; + osm_switch_t* const p_sw = (osm_switch_t*)p_map_item; + const ib_mft_record_t* const p_rcvd_rec = p_ctxt->p_rcvd_rec; + osm_mftr_rcv_t* const p_rcv = p_ctxt->p_rcv; + ib_net64_t const comp_mask = p_ctxt->comp_mask; + const osm_physp_t* const p_req_physp = p_ctxt->p_req_physp; + osm_port_t* p_port; + uint16_t min_lid_ho, max_lid_ho; + uint16_t position_block_num_ho; + uint16_t min_block, max_block, block; + const osm_physp_t* p_physp; + uint8_t min_position, max_position, position; + + /* In switches, the port guid is the node guid. */ + p_port = + __osm_mftr_get_port_by_guid( p_rcv, p_sw->p_node->node_info.port_guid ); + if (! p_port) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mftr_rcv_by_comp_mask: ERR 4A05: " + "Failed to find Port by Node Guid:0x%016" PRIx64 + "\n", + cl_ntoh64( p_sw->p_node->node_info.node_guid ) + ); + return; + } + + /* check that the requester physp and the current physp are under + the same partition. */ + p_physp = osm_port_get_default_phys_ptr( p_port ); + if (! p_physp) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mftr_rcv_by_comp_mask: ERR 4A06: " + "Failed to find default physical Port by Node Guid:0x%016" PRIx64 + "\n", + cl_ntoh64( p_sw->p_node->node_info.node_guid ) + ); + return; + } + if (! osm_physp_share_pkey( p_rcv->p_log, p_req_physp, p_physp )) + return; + + /* get the port 0 of the switch */ + osm_port_get_lid_range_ho( p_port, &min_lid_ho, &max_lid_ho ); + + /* compare the lids - if required */ + if( comp_mask & IB_MFTR_COMPMASK_LID ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mftr_rcv_by_comp_mask: " + "Comparing lid:0x%02X to port lid range: 0x%02X .. 0x%02X\n", + cl_ntoh16( p_rcvd_rec->lid ), min_lid_ho, max_lid_ho + ); + /* ok we are ready for range check */ + if (min_lid_ho > cl_ntoh16(p_rcvd_rec->lid) || + max_lid_ho < cl_ntoh16(p_rcvd_rec->lid)) + return; + } + + if ( !osm_switch_supports_mcast ( p_sw ) ) + return; + + /* Are there any blocks in use ? */ + if ( osm_switch_get_mft_max_block_in_use( p_sw ) == -1 ) + return; + + position_block_num_ho = cl_ntoh16( p_rcvd_rec->position_block_num ); + + /* now we need to decide which blocks to output */ + if( comp_mask & IB_MFTR_COMPMASK_BLOCK ) + { + max_block = min_block = position_block_num_ho & IB_MCAST_BLOCK_ID_MASK_HO; + if ( max_block > osm_switch_get_mft_max_block_in_use( p_sw ) ) + return; + } + else + { + /* use as many blocks as needed */ + min_block = 0; + max_block = osm_switch_get_mft_max_block_in_use( p_sw ); + } + + /* need to decide which positions to output */ + if ( comp_mask & IB_MFTR_COMPMASK_POSITION ) + { + min_position = max_position = (position_block_num_ho & 0xF000) >> 12; + if (max_position > osm_switch_get_mft_max_position( p_sw ) ) + return; + } + else + { + /* use as many positions as needed */ + min_position = 0; + max_position = osm_switch_get_mft_max_position( p_sw ); + } + + /* so we can add these one by one ... */ + for (block = min_block; block <= max_block; block++) + for (position = min_position; position <= max_position; position++) + __osm_mftr_rcv_new_mftr( p_rcv, p_sw, p_ctxt->p_list, + osm_port_get_base_lid(p_port), + block, position ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mftr_rcv_process( + IN osm_mftr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_mft_record_t* p_rcvd_rec; + ib_mft_record_t* p_resp_rec; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_mftr_search_ctxt_t context; + osm_mftr_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + osm_physp_t* p_req_physp; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_mftr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_mft_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_MFT_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mftr_rcv_process: ERR 4A08: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mftr_rcv_process: ERR 4A07: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.p_req_physp = p_req_physp; + + cl_plock_acquire( p_rcv->p_lock ); + + /* Go over all switches */ + cl_qmap_apply_func( &p_rcv->p_subn->sw_guid_tbl, + __osm_mftr_rcv_by_comp_mask, + &context ); + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mftr_rcv_process: ERR 4A09: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS); + + /* need to set the mem free ... */ + p_rec_item = (osm_mftr_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_mftr_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_mftr_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + /* we limit the number of records to a single packet */ + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_mft_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_mftr_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_mftr_rcv_process: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) && + (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_mft_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_mftr_rcv_process: ERR 4A10: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_mftr_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RESOURCES ); + + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_mft_record_t) ); + + p_resp_rec = (ib_mft_record_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_mftr_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if (status != IB_SUCCESS) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_mftr_rcv_process: ERR 4A11: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status)); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mft_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mft_record_ctrl.c new file mode 100644 index 00000000..3c0c39c7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_mft_record_ctrl.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_mftr_rcv_ctrl_t. + * This object represents the MulticastForwardingTable request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_mftr_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_mftr_rcv_process( ((osm_mftr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mftr_rcv_ctrl_construct( + IN osm_mftr_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_mftr_rcv_ctrl_destroy( + IN osm_mftr_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mftr_rcv_ctrl_init( + IN osm_mftr_rcv_ctrl_t* const p_ctrl, + IN osm_mftr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_mftr_rcv_ctrl_init ); + + osm_mftr_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_MFT_RECORD, + __osm_mftr_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_mftr_rcv_ctrl_init: ERR 4A01: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_multipath_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_multipath_record.c new file mode 100644 index 00000000..83404e0b --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_multipath_record.c @@ -0,0 +1,1652 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_mpr_rcv_t. + * This object represents the MultiPath Record Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_MPR_RCV_POOL_MIN_SIZE 64 +#define OSM_MPR_RCV_POOL_GROW_SIZE 64 + +#define OSM_SA_MPR_MAX_NUM_PATH 127 + +typedef struct _osm_mpr_item +{ + cl_pool_item_t pool_item; + const osm_port_t *p_src_port; + const osm_port_t *p_dest_port; + int hops; + ib_path_rec_t path_rec; +} osm_mpr_item_t; + +typedef struct _osm_path_parms +{ + ib_net16_t pkey; + uint8_t mtu; + uint8_t rate; + uint8_t sl; + uint8_t pkt_life; + boolean_t reversible; + int hops; +} osm_path_parms_t; + +/********************************************************************** + **********************************************************************/ +void +osm_mpr_rcv_construct( + IN osm_mpr_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pr_pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mpr_rcv_destroy( + IN osm_mpr_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_mpr_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pr_pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mpr_rcv_init( + IN osm_mpr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_mpr_rcv_init ); + + osm_mpr_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pr_pool, + OSM_MPR_RCV_POOL_MIN_SIZE, + 0, + OSM_MPR_RCV_POOL_GROW_SIZE, + sizeof(osm_mpr_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static inline boolean_t +__osm_sa_multipath_rec_is_tavor_port( + IN const osm_port_t* const p_port) +{ + osm_node_t const* p_node; + ib_net32_t vend_id; + + p_node = osm_port_get_parent_node( p_port ); + vend_id = ib_node_info_get_vendor_id( &p_node->node_info ); + + return( (p_node->node_info.device_id == CL_HTON16(23108)) && + ((vend_id == CL_HTON32(OSM_VENDOR_ID_MELLANOX)) || + (vend_id == CL_HTON32(OSM_VENDOR_ID_TOPSPIN)) || + (vend_id == CL_HTON32(OSM_VENDOR_ID_SILVERSTORM)) || + (vend_id == CL_HTON32(OSM_VENDOR_ID_VOLTAIRE))) ); +} + +/********************************************************************** + **********************************************************************/ +boolean_t + __osm_sa_multipath_rec_apply_tavor_mtu_limit( + IN const ib_multipath_rec_t* const p_mpr, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const ib_net64_t comp_mask) +{ + uint8_t required_mtu; + + /* only if at least one of the ports is a Tavor device */ + if (! __osm_sa_multipath_rec_is_tavor_port(p_src_port) && + ! __osm_sa_multipath_rec_is_tavor_port(p_dest_port) ) + return( FALSE ); + + /* + we can apply the patch if either: + 1. No MTU required + 2. Required MTU < + 3. Required MTU = 1K or 512 or 256 + 4. Required MTU > 256 or 512 + */ + required_mtu = ib_multipath_rec_mtu( p_mpr ); + if ( ( comp_mask & IB_MPR_COMPMASK_MTUSELEC ) && + ( comp_mask & IB_PR_COMPMASK_MTU ) ) + { + switch( ib_multipath_rec_mtu_sel( p_mpr ) ) + { + case 0: /* must be greater than */ + case 2: /* exact match */ + if( IB_MTU_LEN_1024 < required_mtu ) + return(FALSE); + break; + + case 1: /* must be less than */ + /* can't be disqualified by this one */ + break; + + case 3: /* largest available */ + /* the ULP intentionally requested */ + /* the largest MTU possible */ + return(FALSE); + break; + + default: + /* if we're here, there's a bug in ib_multipath_rec_mtu_sel() */ + CL_ASSERT( FALSE ); + break; + } + } + + return(TRUE); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_mpr_rcv_get_path_parms( + IN osm_mpr_rcv_t* const p_rcv, + IN const ib_multipath_rec_t* const p_mpr, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const uint16_t dest_lid_ho, + IN const ib_net64_t comp_mask, + OUT osm_path_parms_t* const p_parms ) +{ + const osm_node_t* p_node; + const osm_physp_t* p_physp; + const osm_physp_t* p_dest_physp; + const osm_prtn_t* p_prtn; + const ib_port_info_t* p_pi; + ib_slvl_table_t* p_slvl_tbl; + ib_api_status_t status = IB_SUCCESS; + uint8_t mtu; + uint8_t rate; + uint8_t pkt_life; + uint8_t required_mtu; + uint8_t required_rate; + uint16_t required_pkey; + uint8_t required_sl; + uint8_t required_pkt_life; + ib_net16_t dest_lid; + int hops = 0; + int in_port_num = 0; + uint8_t vl; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_get_path_parms ); + + dest_lid = cl_hton16( dest_lid_ho ); + + p_dest_physp = osm_port_get_default_phys_ptr( p_dest_port ); + p_physp = osm_port_get_default_phys_ptr( p_src_port ); + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + mtu = ib_port_info_get_mtu_cap( p_pi ); + rate = ib_port_info_compute_rate( p_pi ); + + /* + Mellanox Tavor device performance is better using 1K MTU. + If required MTU and MTU selector are such that 1K is OK + and at least one end of the path is Tavor we override the + port MTU with 1K. + */ + if ( p_rcv->p_subn->opt.enable_quirks && + __osm_sa_multipath_rec_apply_tavor_mtu_limit( + p_mpr, p_src_port, p_dest_port, comp_mask) ) + if (mtu > IB_MTU_LEN_1024) + { + mtu = IB_MTU_LEN_1024; + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_path_parms: " + "Optimized Path MTU to 1K for Mellanox Tavor device\n"); + } + + if ( comp_mask & IB_MPR_COMPMASK_RAWTRAFFIC && + cl_ntoh32( p_mpr->hop_flow_raw ) & ( 1<<31 ) ) + required_pkey = osm_physp_find_common_pkey( p_physp, p_dest_physp ); + else if ( comp_mask & IB_MPR_COMPMASK_PKEY ) { + required_pkey = p_mpr->pkey; + if( !osm_physp_share_this_pkey( p_physp, p_dest_physp, required_pkey ) ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_path_parms: ERR 4518: " + "Ports do not share specified PKey 0x%04x\n" + "\t\tsrc %" PRIx64 " dst %" PRIx64 "\n", + cl_ntoh16( required_pkey ), + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + cl_ntoh64( osm_physp_get_port_guid( p_dest_physp ) ) ); + status = IB_NOT_FOUND; + goto Exit; + } + } else { + required_pkey = osm_physp_find_common_pkey( p_physp, p_dest_physp ); + if ( !required_pkey ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_path_parms: ERR 4519: " + "Ports do not have any shared PKeys\n" + "\t\tsrc %" PRIx64 " dst %" PRIx64 "\n", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + cl_ntoh64( osm_physp_get_port_guid( p_dest_physp ) ) ); + status = IB_NOT_FOUND; + goto Exit; + } + } + + required_sl = OSM_DEFAULT_SL; + + if (required_pkey) { + p_prtn = (osm_prtn_t *)cl_qmap_get(&p_rcv->p_subn->prtn_pkey_tbl, + required_pkey & cl_ntoh16((uint16_t)~0x8000)); + if ( p_prtn == (osm_prtn_t *)cl_qmap_end(&p_rcv->p_subn->prtn_pkey_tbl) ) + { + /* this may be possible when pkey tables are created somehow in + previous runs or things are going wrong here */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_path_parms: ERR 451A: " + "No partition found for PKey 0x%04x - using default SL %d\n", cl_ntoh16(required_pkey), required_sl ); + } + else + required_sl = p_prtn->sl; + + /* reset pkey when raw traffic */ + if( comp_mask & IB_PR_COMPMASK_RAWTRAFFIC && + cl_ntoh32( p_mpr->hop_flow_raw ) & ( 1<<31 ) ) + required_pkey = 0; + } + + if ( ( comp_mask & IB_MPR_COMPMASK_SL ) && ib_multipath_rec_sl( p_mpr ) != required_sl ) + { + status = IB_NOT_FOUND; + goto Exit; + } + + /* + Walk the subnet object from source to destination, + tracking the most restrictive rate and mtu values along the way... + + If source port node is a switch, then p_physp should + point to the port that routes the destination lid + */ + + p_node = osm_physp_get_node_ptr( p_physp ); + + if ( p_node->sw ) + { + + /* + * If the dest_lid_ho is equal to the lid of the switch pointed by + * p_sw then p_physp will be the physical port of the switch port zero. + */ + p_physp = osm_switch_get_route_by_lid( p_node->sw, cl_ntoh16( dest_lid_ho ) ); + if ( p_physp == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_path_parms: ERR 4514: " + "Can't find routing to LID 0x%X from switch for GUID 0x%016" PRIx64 "\n", + dest_lid_ho, + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + status = IB_ERROR; + goto Exit; + } + } + + /* + * Same as above + */ + p_node = osm_physp_get_node_ptr( p_dest_physp ); + + if ( p_node->sw ) + { + + p_dest_physp = osm_switch_get_route_by_lid( p_node->sw, cl_ntoh16( dest_lid_ho ) ); + + if ( p_dest_physp == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_path_parms: ERR 4515: " + "Can't find routing to LID 0x%X from switch for GUID 0x%016" PRIx64 "\n", + dest_lid_ho, + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + status = IB_ERROR; + goto Exit; + } + + } + + while ( p_physp != p_dest_physp ) + { + p_physp = osm_physp_get_remote( p_physp ); + + if ( p_physp == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_path_parms: ERR 4505: " + "Can't find remote phys port when routing to LID 0x%X from node GUID 0x%016" PRIx64 "\n", + dest_lid_ho, + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + status = IB_ERROR; + goto Exit; + } + + hops++; + + /* + This is point to point case (no switch in between) + */ + if ( p_physp == p_dest_physp ) + break; + + p_node = osm_physp_get_node_ptr( p_physp ); + + if ( !p_node->sw ) + { + /* + There is some sort of problem in the subnet object! + If this isn't a switch, we should have reached + the destination by now! + */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_path_parms: ERR 4503: " + "Internal error, bad path\n" ); + status = IB_ERROR; + goto Exit; + } + + /* + Check parameters for the ingress port in this switch. + */ + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + if ( mtu > ib_port_info_get_mtu_cap( p_pi ) ) + { + mtu = ib_port_info_get_mtu_cap( p_pi ); + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_path_parms: " + "New smallest MTU = %u at intervening port 0x%016" PRIx64 + " port num 0x%X\n", + mtu, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + if ( rate > ib_port_info_compute_rate( p_pi ) ) + { + rate = ib_port_info_compute_rate( p_pi ); + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_path_parms: " + "New smallest rate = %u at intervening port 0x%016" PRIx64 + " port num 0x%X\n", + rate, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + /* + Continue with the egress port on this switch. + */ + p_physp = osm_switch_get_route_by_lid( p_node->sw, dest_lid ); + + if ( p_physp == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_path_parms: ERR 4516: " + "Dead end on path to LID 0x%X from switch for GUID 0x%016" PRIx64 "\n", + dest_lid_ho, + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + status = IB_ERROR; + goto Exit; + } + + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + if ( comp_mask & IB_MPR_COMPMASK_SL ) { + in_port_num = osm_physp_get_port_num( p_physp ); + p_slvl_tbl = osm_physp_get_slvl_tbl( p_physp, in_port_num ); + vl = ib_slvl_table_get( p_slvl_tbl, required_sl ); + if (vl == IB_DROP_VL) { /* discard packet */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_mpr_rcv_get_path_parms: Path not found for SL %d\n" + "\t\tin_port_num %d port_guid %" PRIx64 "\n", + required_sl, in_port_num, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ) ); + status = IB_NOT_FOUND; + goto Exit; + } + } + + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + if ( mtu > ib_port_info_get_mtu_cap( p_pi ) ) + { + mtu = ib_port_info_get_mtu_cap( p_pi ); + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_path_parms: " + "New smallest MTU = %u at intervening port 0x%016" PRIx64 + " port num 0x%X\n", + mtu, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + if ( rate > ib_port_info_compute_rate( p_pi ) ) + { + rate = ib_port_info_compute_rate( p_pi ); + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_path_parms: " + "New smallest rate = %u at intervening port 0x%016" PRIx64 + " port num 0x%X\n", + rate, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + } + + /* + p_physp now points to the destination + */ + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + if ( mtu > ib_port_info_get_mtu_cap( p_pi ) ) + { + mtu = ib_port_info_get_mtu_cap( p_pi ); + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_path_parms: " + "New smallest MTU = %u at destination port 0x%016" PRIx64 "\n", + mtu, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ) ); + } + } + + if ( rate > ib_port_info_compute_rate( p_pi ) ) + { + rate = ib_port_info_compute_rate( p_pi ); + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_path_parms: " + "New smallest rate = %u at destination port 0x%016" PRIx64 "\n", + rate, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ) ); + } + } + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_path_parms: " + "Path min MTU = %u, min rate = %u\n", mtu, rate ); + } + + /* + Determine if these values meet the user criteria + */ + + /* we silently ignore cases where only the MTU selector is defined */ + if ( ( comp_mask & IB_MPR_COMPMASK_MTUSELEC ) && + ( comp_mask & IB_MPR_COMPMASK_MTU ) ) + { + required_mtu = ib_multipath_rec_mtu( p_mpr ); + switch ( ib_multipath_rec_mtu_sel( p_mpr ) ) + { + case 0: /* must be greater than */ + if ( mtu <= required_mtu ) + status = IB_NOT_FOUND; + break; + + case 1: /* must be less than */ + if ( mtu >= required_mtu ) + { + /* adjust to use the highest mtu + lower then the required one */ + if ( required_mtu > 1 ) + mtu = required_mtu - 1; + else + status = IB_NOT_FOUND; + } + break; + + case 2: /* exact match */ + if ( mtu < required_mtu ) + status = IB_NOT_FOUND; + else + mtu = required_mtu; + break; + + case 3: /* largest available */ + /* can't be disqualified by this one */ + break; + + default: + /* if we're here, there's a bug in ib_multipath_rec_mtu_sel() */ + CL_ASSERT( FALSE ); + status = IB_ERROR; + break; + } + } + + /* we silently ignore cases where only the Rate selector is defined */ + if ( ( comp_mask & IB_MPR_COMPMASK_RATESELEC ) && + ( comp_mask & IB_PR_COMPMASK_RATE ) ) + { + required_rate = ib_multipath_rec_rate( p_mpr ); + switch ( ib_multipath_rec_rate_sel( p_mpr ) ) + { + case 0: /* must be greater than */ + if ( rate <= required_rate ) + status = IB_NOT_FOUND; + break; + + case 1: /* must be less than */ + if ( rate >= required_rate ) + { + /* adjust the rate to use the highest rate + lower then the required one */ + if ( required_rate > 2 ) + rate = required_rate - 1; + else + status = IB_NOT_FOUND; + } + break; + + case 2: /* exact match */ + if ( rate < required_rate ) + status = IB_NOT_FOUND; + else + rate = required_rate; + break; + + case 3: /* largest available */ + /* can't be disqualified by this one */ + break; + + default: + /* if we're here, there's a bug in ib_multipath_rec_mtu_sel() */ + CL_ASSERT( FALSE ); + status = IB_ERROR; + break; + } + } + + /* Verify the pkt_life_time */ + /* According to spec definition IBA 1.2 Table 205 PacketLifeTime description, + for loopback paths, packetLifeTime shall be zero. */ + if ( p_src_port == p_dest_port ) + pkt_life = 0; /* loopback */ + else + pkt_life = OSM_DEFAULT_SUBNET_TIMEOUT; + + /* we silently ignore cases where only the PktLife selector is defined */ + if ( ( comp_mask & IB_MPR_COMPMASK_PKTLIFETIMESELEC ) && + ( comp_mask & IB_MPR_COMPMASK_PKTLIFETIME ) ) + { + required_pkt_life = ib_multipath_rec_pkt_life( p_mpr ); + switch ( ib_multipath_rec_pkt_life_sel( p_mpr ) ) + { + case 0: /* must be greater than */ + if ( pkt_life <= required_pkt_life ) + status = IB_NOT_FOUND; + break; + + case 1: /* must be less than */ + if ( pkt_life >= required_pkt_life ) + { + /* adjust the lifetime to use the highest possible + lower then the required one */ + if ( required_pkt_life > 1 ) + pkt_life = required_pkt_life - 1; + else + status = IB_NOT_FOUND; + } + break; + + case 2: /* exact match */ + if ( pkt_life < required_pkt_life ) + status = IB_NOT_FOUND; + else + pkt_life = required_pkt_life; + break; + + case 3: /* smallest available */ + /* can't be disqualified by this one */ + break; + + default: + /* if we're here, there's a bug in ib_path_rec_pkt_life_sel() */ + CL_ASSERT( FALSE ); + status = IB_ERROR; + break; + } + } + + if (status != IB_SUCCESS) + goto Exit; + + p_parms->mtu = mtu; + p_parms->rate = rate; + p_parms->pkey = required_pkey; + p_parms->pkt_life = pkt_life; + p_parms->sl = required_sl; + p_parms->hops = hops; + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_mpr_rcv_build_pr( + IN osm_mpr_rcv_t* const p_rcv, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const uint16_t src_lid_ho, + IN const uint16_t dest_lid_ho, + IN const uint8_t preference, + IN const osm_path_parms_t* const p_parms, + OUT ib_path_rec_t* const p_pr ) +{ + const osm_physp_t* p_src_physp; + const osm_physp_t* p_dest_physp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_build_pr ); + + p_src_physp = osm_port_get_default_phys_ptr( p_src_port ); + p_dest_physp = osm_port_get_default_phys_ptr( p_dest_port ); + + p_pr->dgid.unicast.prefix = osm_physp_get_subnet_prefix( p_dest_physp ); + p_pr->dgid.unicast.interface_id = osm_physp_get_port_guid( p_dest_physp ); + + p_pr->sgid.unicast.prefix = osm_physp_get_subnet_prefix( p_src_physp ); + p_pr->sgid.unicast.interface_id = osm_physp_get_port_guid( p_src_physp ); + + p_pr->dlid = cl_hton16( dest_lid_ho ); + p_pr->slid = cl_hton16( src_lid_ho ); + + p_pr->hop_flow_raw &= cl_hton32(1<<31); + + p_pr->pkey = p_parms->pkey; + p_pr->sl = cl_hton16( p_parms->sl ); + p_pr->mtu = (uint8_t)( p_parms->mtu | 0x80 ); + p_pr->rate = (uint8_t)( p_parms->rate | 0x80 ); + + /* According to 1.2 spec definition Table 205 PacketLifeTime description, + for loopback paths, packetLifeTime shall be zero. */ + if ( p_src_port == p_dest_port ) + p_pr->pkt_life = 0x80; /* loopback */ + else + p_pr->pkt_life = (uint8_t)( p_parms->pkt_life | 0x80 ); + + p_pr->preference = preference; + + /* always return num_path = 0 so this is only the reversible component */ + if ( p_parms->reversible ) + p_pr->num_path = 0x80; + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static osm_mpr_item_t* +__osm_mpr_rcv_get_lid_pair_path( + IN osm_mpr_rcv_t* const p_rcv, + IN const ib_multipath_rec_t* const p_mpr, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const uint16_t src_lid_ho, + IN const uint16_t dest_lid_ho, + IN const ib_net64_t comp_mask, + IN const uint8_t preference ) +{ + osm_path_parms_t path_parms; + osm_path_parms_t rev_path_parms; + osm_mpr_item_t *p_pr_item; + ib_api_status_t status, rev_path_status; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_get_lid_pair_path ); + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_lid_pair_path: " + "Src LID 0x%X, Dest LID 0x%X\n", + src_lid_ho, dest_lid_ho ); + } + + p_pr_item = (osm_mpr_item_t*)cl_qlock_pool_get( &p_rcv->pr_pool ); + if ( p_pr_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_lid_pair_path: ERR 4501: " + "Unable to allocate path record\n" ); + goto Exit; + } + + status = __osm_mpr_rcv_get_path_parms( p_rcv, p_mpr, p_src_port, + p_dest_port, dest_lid_ho, + comp_mask, &path_parms ); + + if ( status != IB_SUCCESS ) + { + cl_qlock_pool_put( &p_rcv->pr_pool, &p_pr_item->pool_item ); + p_pr_item = NULL; + goto Exit; + } + + /* now try the reversible path */ + rev_path_status = __osm_mpr_rcv_get_path_parms( p_rcv, p_mpr, p_dest_port, + p_src_port, src_lid_ho, + comp_mask, &rev_path_parms ); + path_parms.reversible = ( rev_path_status == IB_SUCCESS ); + + /* did we get a Reversible Path compmask ? */ + /* + NOTE that if the reversible component = 0, it is a don't care + rather then requiring non-reversible paths ... + see Vol1 Ver1.2 p900 l16 + */ + if ( comp_mask & IB_MPR_COMPMASK_REVERSIBLE ) + { + if ( (! path_parms.reversible && ( p_mpr->num_path & 0x80 ) ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_lid_pair_path: " + "Requested reversible path but failed to get one\n"); + + cl_qlock_pool_put( &p_rcv->pr_pool, &p_pr_item->pool_item ); + p_pr_item = NULL; + goto Exit; + } + } + + p_pr_item->p_src_port = p_src_port; + p_pr_item->p_dest_port = p_dest_port; + p_pr_item->hops = path_parms.hops; + + __osm_mpr_rcv_build_pr( p_rcv, p_src_port, p_dest_port, src_lid_ho, + dest_lid_ho, preference, &path_parms, + &p_pr_item->path_rec ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( p_pr_item ); +} + +/********************************************************************** + **********************************************************************/ +static uint32_t +__osm_mpr_rcv_get_port_pair_paths( + IN osm_mpr_rcv_t* const p_rcv, + IN const ib_multipath_rec_t* const p_mpr, + IN const osm_port_t* const p_req_port, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const uint32_t rem_paths, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list ) +{ + osm_mpr_item_t* p_pr_item; + uint16_t src_lid_min_ho; + uint16_t src_lid_max_ho; + uint16_t dest_lid_min_ho; + uint16_t dest_lid_max_ho; + uint16_t src_lid_ho; + uint16_t dest_lid_ho; + uint32_t path_num = 0; + uint8_t preference; + uintn_t src_offset; + uintn_t dest_offset; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_get_port_pair_paths ); + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_port_pair_paths: " + "Src port 0x%016" PRIx64 ", " + "Dst port 0x%016" PRIx64 "\n", + cl_ntoh64( osm_port_get_guid( p_src_port ) ), + cl_ntoh64( osm_port_get_guid( p_dest_port ) ) ); + } + + /* Check that the req_port, src_port and dest_port all share a + pkey. The check is done on the default physical port of the ports. */ + if ( osm_port_share_pkey(p_rcv->p_log, p_req_port, p_src_port ) == FALSE || + osm_port_share_pkey(p_rcv->p_log, p_req_port, p_dest_port ) == FALSE || + osm_port_share_pkey(p_rcv->p_log, p_src_port, p_dest_port ) == FALSE ) + { + /* One of the pairs doesn't share a pkey so the path is disqualified. */ + goto Exit; + } + + /* + We shouldn't be here if the paths are disqualified in some way... + Thus, we assume every possible connection is valid. + + We desire to return high-quality paths first. + In OpenSM, higher quality mean least overlap with other paths. + This is acheived in practice by returning paths with + different LID value on each end, which means these + paths are more redundant that paths with the same LID repeated + on one side. For example, in OpenSM the paths between two + endpoints with LMC = 1 might be as follows: + + Port A, LID 1 <-> Port B, LID 3 + Port A, LID 1 <-> Port B, LID 4 + Port A, LID 2 <-> Port B, LID 3 + Port A, LID 2 <-> Port B, LID 4 + + The OpenSM unicast routing algorithms attempt to disperse each path + to as varied a physical path as is reasonable. 1<->3 and 1<->4 have + more physical overlap (hence less redundancy) than 1<->3 and 2<->4. + + OpenSM ranks paths in three preference groups: + + Preference Value Description + ---------------- ------------------------------------------- + 0 Redundant in both directions with other + pref value = 0 paths + + 1 Redundant in one direction with other + pref value = 0 and pref value = 1 paths + + 2 Not redundant in either direction with + other paths + + 3-FF Unused + + + SA clients don't need to know these details, only that the lower + preference paths are preferred, as stated in the spec. The paths + may not actually be physically redundant depending on the topology + of the subnet, but the point of LMC > 0 is to offer redundancy, + so I assume the subnet is physically appropriate for the specified + LMC value. A more advanced implementation could inspect for physical + redundancy, but I'm not going to bother with that now. + */ + + osm_port_get_lid_range_ho( p_src_port, &src_lid_min_ho, &src_lid_max_ho ); + osm_port_get_lid_range_ho( p_dest_port, &dest_lid_min_ho, &dest_lid_max_ho ); + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_port_pair_paths: " + "Src LID [0x%X-0x%X], " + "Dest LID [0x%X-0x%X]\n", + src_lid_min_ho, src_lid_max_ho, + dest_lid_min_ho, dest_lid_max_ho ); + } + + src_lid_ho = src_lid_min_ho; + dest_lid_ho = dest_lid_min_ho; + + /* + Preferred paths come first in OpenSM + */ + preference = 0; + + while ( path_num < rem_paths ) + { + /* + These paths are "fully redundant" + */ + p_pr_item = __osm_mpr_rcv_get_lid_pair_path( p_rcv, p_mpr, + p_src_port, p_dest_port, + src_lid_ho, dest_lid_ho, + comp_mask, preference ); + + if ( p_pr_item ) + { + cl_qlist_insert_tail( p_list, + (cl_list_item_t*)&p_pr_item->pool_item ); + ++path_num; + } + + if ( ++src_lid_ho > src_lid_max_ho ) + break; + + if ( ++dest_lid_ho > dest_lid_max_ho ) + break; + } + + /* + Check if we've accumulated all the paths that the user cares to see + */ + if ( path_num == rem_paths ) + goto Exit; + + /* + Don't bother reporting preference 1 paths for now. + It's more trouble than it's worth and can only occur + if ports have different LMC values, which isn't supported + by OpenSM right now anyway. + */ + preference = 2; + src_lid_ho = src_lid_min_ho; + dest_lid_ho = dest_lid_min_ho; + src_offset = 0; + dest_offset = 0; + + /* + Iterate over the remaining paths + */ + while ( path_num < rem_paths ) + { + dest_offset++; + dest_lid_ho++; + + if ( dest_lid_ho > dest_lid_max_ho ) + { + src_offset++; + src_lid_ho++; + + if ( src_lid_ho > src_lid_max_ho ) + break; /* done */ + + dest_offset = 0; + dest_lid_ho = dest_lid_min_ho; + } + + /* + These paths are "fully non-redundant" with paths already + identified above and consequently not of much value. + + Don't return paths we already identified above, as indicated + by the offset values being equal. + */ + if ( src_offset == dest_offset ) + continue; /* already reported */ + + p_pr_item = __osm_mpr_rcv_get_lid_pair_path( p_rcv, p_mpr, + p_src_port, p_dest_port, + src_lid_ho, dest_lid_ho, + comp_mask, preference ); + + if ( p_pr_item ) + { + cl_qlist_insert_tail( p_list, + (cl_list_item_t*)&p_pr_item->pool_item ); + ++path_num; + } + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return path_num; +} + +#undef min +#define min(x,y) (((x) < (y)) ? (x) : (y)) + +/********************************************************************** + **********************************************************************/ +static osm_mpr_item_t* +__osm_mpr_rcv_get_apm_port_pair_paths( + IN osm_mpr_rcv_t* const p_rcv, + IN const ib_multipath_rec_t* const p_mpr, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN int base_offs, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list ) +{ + osm_mpr_item_t* p_pr_item = 0; + uint16_t src_lid_min_ho; + uint16_t src_lid_max_ho; + uint16_t dest_lid_min_ho; + uint16_t dest_lid_max_ho; + uint16_t src_lid_ho; + uint16_t dest_lid_ho; + uintn_t iterations; + int src_lids, dest_lids; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_get_apm_port_pair_paths ); + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_apm_port_pair_paths: " + "Src port 0x%016" PRIx64 ", " + "Dst port 0x%016" PRIx64 ", base offs %d\n", + cl_ntoh64( osm_port_get_guid( p_src_port ) ), + cl_ntoh64( osm_port_get_guid( p_dest_port ) ), + base_offs ); + } + + osm_port_get_lid_range_ho( p_src_port, &src_lid_min_ho, &src_lid_max_ho ); + osm_port_get_lid_range_ho( p_dest_port, &dest_lid_min_ho, &dest_lid_max_ho ); + + src_lid_ho = src_lid_min_ho; + dest_lid_ho = dest_lid_min_ho; + + src_lids = src_lid_max_ho - src_lid_min_ho + 1; + dest_lids = dest_lid_max_ho - dest_lid_min_ho + 1; + + src_lid_ho += base_offs % src_lids; + dest_lid_ho += base_offs % dest_lids; + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_apm_port_pair_paths: " + "Src LIDs [0x%X-0x%X] hashed %d, " + "Dest LIDs [0x%X-0x%X] hashed %d\n", + src_lid_min_ho, src_lid_max_ho, src_lid_ho, + dest_lid_min_ho, dest_lid_max_ho, dest_lid_ho ); + + iterations = min( src_lids, dest_lids ); + + while ( iterations-- ) + { + /* + These paths are "fully redundant" + */ + p_pr_item = __osm_mpr_rcv_get_lid_pair_path( p_rcv, p_mpr, + p_src_port, p_dest_port, + src_lid_ho, dest_lid_ho, + comp_mask, 0 ); + + if ( p_pr_item ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_get_apm_port_pair_paths: " + "Found matching path from Src LID 0x%X to Dest LID 0x%X with %d hops\n", + src_lid_ho, dest_lid_ho, p_pr_item->hops); + break; + } + + if ( ++src_lid_ho > src_lid_max_ho ) + src_lid_ho = src_lid_min_ho; + + if ( ++dest_lid_ho > dest_lid_max_ho ) + dest_lid_ho = dest_lid_min_ho; + } + + OSM_LOG_EXIT( p_rcv->p_log ); + return p_pr_item; +} + +/********************************************************************** + **********************************************************************/ +static ib_net16_t +__osm_mpr_rcv_get_gids( + IN osm_mpr_rcv_t* const p_rcv, + IN const ib_gid_t * gids, + IN int ngids, + IN int is_sgid, + OUT osm_port_t** pp_port ) +{ + osm_port_t *p_port; + ib_net16_t ib_status = IB_SUCCESS; + int i; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_get_gids ); + + for ( i = 0; i < ngids; i++, gids++ ) { + if ( !ib_gid_is_link_local ( gids ) ) { + if ( ( is_sgid && ib_gid_is_multicast( gids ) ) || + ( ib_gid_get_subnet_prefix ( gids ) != p_rcv->p_subn->opt.subnet_prefix ) ) { + /* + This 'error' is the client's fault (bad gid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_mpr_rcv_get_gids: ERR 451B: " + "Non local %sGID subnet prefix 0x%016" PRIx64 "\n", + is_sgid ? "S" : "D", + cl_ntoh64( gids->unicast.prefix ) ); + + ib_status = IB_SA_MAD_STATUS_INVALID_GID; + goto Exit; + } + } + + p_port = (osm_port_t *)cl_qmap_get( &p_rcv->p_subn->port_guid_tbl, + gids->unicast.interface_id ); + if ( !p_port || + p_port == (osm_port_t *)cl_qmap_end( &p_rcv->p_subn->port_guid_tbl ) ) { + /* + This 'error' is the client's fault (bad gid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_get_gids: ERR 4506: " + "No port with GUID 0x%016" PRIx64 "\n", + cl_ntoh64( gids->unicast.interface_id ) ); + + ib_status = IB_SA_MAD_STATUS_INVALID_GID; + goto Exit; + } + + pp_port[i] = p_port; + } + + Exit: + OSM_LOG_EXIT(p_rcv->p_log); + + return ib_status; +} + +/********************************************************************** + **********************************************************************/ +static ib_net16_t +__osm_mpr_rcv_get_end_points( + IN osm_mpr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + OUT osm_port_t ** pp_ports, + OUT int * nsrc, + OUT int * ndest ) +{ + const ib_multipath_rec_t* p_mpr; + const ib_sa_mad_t* p_sa_mad; + ib_net64_t comp_mask; + ib_net16_t sa_status = IB_SA_MAD_STATUS_SUCCESS; + ib_gid_t * gids; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_get_end_points ); + + /* + Determine what fields are valid and then get a pointer + to the source and destination port objects, if possible. + */ + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_mpr = (ib_multipath_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + gids = (ib_gid_t *)p_mpr->gids; + + comp_mask = p_sa_mad->comp_mask; + + /* + Check a few easy disqualifying cases up front before getting + into the endpoints. + */ + *nsrc = *ndest = 0; + + if ( comp_mask & IB_MPR_COMPMASK_SGIDCOUNT ) { + *nsrc = p_mpr->sgid_count; + if ( *nsrc > IB_MULTIPATH_MAX_GIDS ) + *nsrc = IB_MULTIPATH_MAX_GIDS; + sa_status = __osm_mpr_rcv_get_gids( p_rcv, gids, *nsrc, 1, pp_ports ); + if ( sa_status != IB_SUCCESS ) + goto Exit; + } + + if ( comp_mask & IB_MPR_COMPMASK_DGIDCOUNT ) { + *ndest = p_mpr->dgid_count; + if ( *ndest + *nsrc > IB_MULTIPATH_MAX_GIDS ) + *ndest = IB_MULTIPATH_MAX_GIDS - *nsrc; + sa_status = __osm_mpr_rcv_get_gids( p_rcv, gids + *nsrc, *ndest, 0, + pp_ports + *nsrc ); + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( sa_status ); +} + +#define __hash_lids(a, b, lmc) \ + (((((a) >> (lmc)) << 4) | ((b) >> (lmc))) % 103) + +/********************************************************************** + **********************************************************************/ +static void +__osm_mpr_rcv_get_apm_paths( + IN osm_mpr_rcv_t* const p_rcv, + IN const ib_multipath_rec_t* const p_mpr, + IN const osm_port_t* const p_req_port, + IN osm_port_t ** _pp_ports, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list ) +{ + osm_port_t *pp_ports[4]; + osm_mpr_item_t *matrix[2][2]; + int base_offs, src_lid_ho, dest_lid_ho; + int sumA, sumB, minA, minB; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_get_apm_paths ); + + /* + * We want to: + * 1. use different lid offsets (from base) for the resultant paths + * to increase the probability of redundant paths or in case + * of Clos - to ensure it (different offset => different spine!) + * 2. keep consistent paths no matter of direction and order of ports + * 3. distibute the lid offsets to balance the load + * So, we sort the ports (within the srcs, and within the dests), + * hash the lids of S0, D0 (after the sort), and call __osm_mpr_rcv_get_apm_port_pair_paths + * with base_lid for S0, D0 and base_lid + 1 for S1, D1. This way we will get + * always the same offsets - order indepentent, and make sure different spines are used. + * Note that the diagonals on a Clos have the same number of hops, so it doesn't + * really matter which diagonal we use. + */ + if ( _pp_ports[0]->guid < _pp_ports[1]->guid ) { + pp_ports[0] = _pp_ports[0]; + pp_ports[1] = _pp_ports[1]; + } + else + { + pp_ports[0] = _pp_ports[1]; + pp_ports[1] = _pp_ports[0]; + } + if ( _pp_ports[2]->guid < _pp_ports[3]->guid ) { + pp_ports[2] = _pp_ports[2]; + pp_ports[3] = _pp_ports[3]; + } + else + { + pp_ports[2] = _pp_ports[3]; + pp_ports[3] = _pp_ports[2]; + } + + src_lid_ho = osm_port_get_base_lid( pp_ports[0] ); + dest_lid_ho = osm_port_get_base_lid( pp_ports[2] ); + + base_offs = src_lid_ho < dest_lid_ho ? + __hash_lids( src_lid_ho, dest_lid_ho, p_rcv->p_subn->opt.lmc ) : + __hash_lids( dest_lid_ho, src_lid_ho, p_rcv->p_subn->opt.lmc ); + + matrix[0][0] = __osm_mpr_rcv_get_apm_port_pair_paths( p_rcv, p_mpr, pp_ports[0], + pp_ports[2], base_offs, comp_mask , p_list ); + matrix[0][1] = __osm_mpr_rcv_get_apm_port_pair_paths( p_rcv, p_mpr, pp_ports[0], + pp_ports[3], base_offs, comp_mask, p_list ); + matrix[1][0] = __osm_mpr_rcv_get_apm_port_pair_paths( p_rcv, p_mpr, pp_ports[1], + pp_ports[2], base_offs+1, comp_mask, p_list ); + matrix[1][1] = __osm_mpr_rcv_get_apm_port_pair_paths( p_rcv, p_mpr, pp_ports[1], + pp_ports[3], base_offs+1, comp_mask, p_list ); + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, "__osm_mpr_rcv_get_apm_paths: " + "APM matrix:\n" + "\t{0,0} 0x%X->0x%X (%d)\t| {0,1} 0x%X->0x%X (%d)\n" + "\t{1,0} 0x%X->0x%X (%d)\t| {1,1} 0x%X->0x%X (%d)\n", + matrix[0][0]->path_rec.slid, matrix[0][0]->path_rec.dlid, matrix[0][0]->hops, + matrix[0][1]->path_rec.slid, matrix[0][1]->path_rec.dlid, matrix[0][1]->hops, + matrix[1][0]->path_rec.slid, matrix[1][0]->path_rec.dlid, matrix[1][0]->hops, + matrix[1][1]->path_rec.slid, matrix[1][1]->path_rec.dlid, matrix[1][1]->hops ); + + /* check diagonal A {(0,0), (1,1)} */ + sumA = matrix[0][0]->hops + matrix[1][1]->hops; + minA = min( matrix[0][0]->hops, matrix[1][1]->hops ); + + /* check diagonal B {(0,1), (1,0)} */ + sumB = matrix[0][1]->hops + matrix[1][0]->hops; + minB = min( matrix[0][1]->hops, matrix[1][0]->hops ); + + /* and the winner is... */ + if ( minA <= minB || ( minA == minB && sumA < sumB ) ) { + /* Diag A */ + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, "__osm_mpr_rcv_get_apm_paths: " + "Diag {0,0} & {1,1} is the best:\n" + "\t{0,0} 0x%X->0x%X (%d)\t & {1,1} 0x%X->0x%X (%d)\n", + matrix[0][0]->path_rec.slid, matrix[0][0]->path_rec.dlid, matrix[0][0]->hops, + matrix[1][1]->path_rec.slid, matrix[1][1]->path_rec.dlid, matrix[1][1]->hops ); + cl_qlist_insert_tail( p_list, + (cl_list_item_t*)&matrix[0][0]->pool_item ); + cl_qlist_insert_tail( p_list, + (cl_list_item_t*)&matrix[1][1]->pool_item ); + cl_qlock_pool_put( &p_rcv->pr_pool, &matrix[0][1]->pool_item ); + cl_qlock_pool_put( &p_rcv->pr_pool, &matrix[1][0]->pool_item ); + } + else + { + /* Diag B */ + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, "__osm_mpr_rcv_get_apm_paths: " + "Diag {0,1} & {1,0} is the best:\n" + "\t{0,1} 0x%X->0x%X (%d)\t & {1,0} 0x%X->0x%X (%d)\n", + matrix[0][1]->path_rec.slid, matrix[0][1]->path_rec.dlid, matrix[0][1]->hops, + matrix[1][0]->path_rec.slid, matrix[1][0]->path_rec.dlid, matrix[1][0]->hops ); + cl_qlist_insert_tail( p_list, + (cl_list_item_t*)&matrix[0][1]->pool_item ); + cl_qlist_insert_tail( p_list, + (cl_list_item_t*)&matrix[1][0]->pool_item ); + cl_qlock_pool_put( &p_rcv->pr_pool, &matrix[0][0]->pool_item ); + cl_qlock_pool_put( &p_rcv->pr_pool, &matrix[1][1]->pool_item ); + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_mpr_rcv_process_pairs( + IN osm_mpr_rcv_t* const p_rcv, + IN const ib_multipath_rec_t* const p_mpr, + IN osm_port_t* const p_req_port, + IN osm_port_t ** pp_ports, + IN const int nsrc, + IN const int ndest, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list ) +{ + osm_port_t **pp_src_port, **pp_es; + osm_port_t **pp_dest_port, **pp_ed; + uint32_t max_paths, num_paths, total_paths = 0; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_process_pairs ); + + if ( comp_mask & IB_MPR_COMPMASK_NUMBPATH ) + max_paths = p_mpr->num_path & 0x7F; + else + max_paths = OSM_SA_MPR_MAX_NUM_PATH; + + for ( pp_src_port = pp_ports, pp_es = pp_ports + nsrc; pp_src_port < pp_es; pp_src_port++ ) + { + for ( pp_dest_port = pp_es, pp_ed = pp_es + ndest; pp_dest_port < pp_ed; pp_dest_port++ ) + { + num_paths = __osm_mpr_rcv_get_port_pair_paths( p_rcv, p_mpr, p_req_port, + *pp_src_port, *pp_dest_port, + max_paths - total_paths, + comp_mask, p_list ); + total_paths += num_paths; + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_process_pairs: " + "%d paths %d total paths %d max paths\n", + num_paths, total_paths, max_paths ); + /* Just take first NumbPaths found */ + if (total_paths >= max_paths) + goto Exit; + } + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_mpr_rcv_respond( + IN osm_mpr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN cl_qlist_t* const p_list ) +{ + osm_madw_t* p_resp_madw; + const ib_sa_mad_t* p_sa_mad; + ib_sa_mad_t* p_resp_sa_mad; + size_t num_rec; + size_t mad_size; + ib_path_rec_t* p_resp_pr; + ib_multipath_rec_t* p_mpr; + ib_api_status_t status; + osm_mpr_item_t* p_mpr_item; + uint32_t i; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_mpr_rcv_respond ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_mpr = (ib_multipath_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + num_rec = cl_qlist_count( p_list ); + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_mpr_rcv_respond: " + "Generating response with %zu records\n", num_rec ); + + mad_size = IB_SA_MAD_HDR_SIZE + num_rec * sizeof(ib_path_rec_t); + + /* + Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, p_madw->h_bind, + mad_size, &p_madw->mad_addr ); + + if ( !p_resp_madw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_respond: " + "ERR 4502: Unable to allocate MAD\n" ); + + for ( i = 0; i < num_rec; i++ ) + { + p_mpr_item = (osm_mpr_item_t*)cl_qlist_remove_head( p_list ); + cl_qlock_pool_put( &p_rcv->pr_pool, &p_mpr_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + memcpy( p_resp_sa_mad, p_sa_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + + /* + o15-0.2.7: If MultiPath is supported, then SA shall respond to a + SubnAdmGetMulti() containing a valid MultiPathRecord attribute with + a set of zero or more PathRecords satisfying the constraints indicated + in the MultiPathRecord received. The PathRecord Attribute ID shall be + used in the response. + */ + p_resp_sa_mad->attr_id = IB_MAD_ATTR_PATH_RECORD; + p_resp_sa_mad->attr_offset = ib_get_attr_offset( sizeof(ib_path_rec_t) ); + + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; + + p_resp_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + + for ( i = 0; i < num_rec; i++ ) + { + p_mpr_item = (osm_mpr_item_t*)cl_qlist_remove_head( p_list ); + + /* Copy the Path Records from the list into the MAD */ + *p_resp_pr = p_mpr_item->path_rec; + + cl_qlock_pool_put( &p_rcv->pr_pool, &p_mpr_item->pool_item ); + p_resp_pr++; + } + + CL_ASSERT( cl_is_qlist_empty( p_list ) ); + + osm_dump_sa_mad( p_rcv->p_log, p_resp_sa_mad, OSM_LOG_FRAMES ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + + if ( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_mpr_rcv_respond: ERR 4507: " + "Unable to send MAD (%s)\n", ib_get_err_str( status ) ); + /* osm_mad_pool_put( p_rcv->p_mad_pool, p_resp_madw ); */ + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mpr_rcv_process( + IN osm_mpr_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + const ib_multipath_rec_t* p_mpr; + const ib_sa_mad_t* p_sa_mad; + osm_port_t* requester_port; + osm_port_t* pp_ports[IB_MULTIPATH_MAX_GIDS]; + cl_qlist_t pr_list; + ib_net16_t sa_status; + int nsrc, ndest; + + OSM_LOG_ENTER( p_rcv->p_log, osm_mpr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_mpr = (ib_multipath_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + CL_ASSERT( p_sa_mad->attr_id == IB_MAD_ATTR_MULTIPATH_RECORD ); + + if ( ( p_sa_mad->rmpp_flags & IB_RMPP_FLAG_ACTIVE ) != IB_RMPP_FLAG_ACTIVE ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mpr_rcv_process: ERR 4510: " + "Invalid request since RMPP_FLAG_ACTIVE is not set\n" ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_REQ_INVALID ); + goto Exit; + } + + /* we only support SubnAdmGetMulti method */ + if ( p_sa_mad->method != IB_MAD_METHOD_GETMULTI ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mpr_rcv_process: ERR 4513: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_sa_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + requester_port = osm_get_port_by_mad_addr( p_rcv->p_log, p_rcv->p_subn, + osm_madw_get_mad_addr_ptr( p_madw ) ); + if ( requester_port == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mpr_rcv_process: ERR 4517: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_multipath_record( p_rcv->p_log, p_mpr, OSM_LOG_DEBUG ); + + cl_qlist_init( &pr_list ); + + /* + Most SA functions (including this one) are read-only on the + subnet object, so we grab the lock non-exclusively. + */ + cl_plock_acquire( p_rcv->p_lock ); + + sa_status = __osm_mpr_rcv_get_end_points( p_rcv, p_madw, pp_ports, + &nsrc, &ndest ); + + if ( sa_status != IB_SA_MAD_STATUS_SUCCESS || !nsrc || !ndest ) + { + if ( sa_status == IB_SA_MAD_STATUS_SUCCESS && ( !nsrc || !ndest ) ) + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_mpr_rcv_process_cb: ERR 4512: " + "__osm_mpr_rcv_get_end_points failed, not enough GIDs " + "(nsrc %d ndest %d)\n", + nsrc, ndest); + cl_plock_release( p_rcv->p_lock ); + if ( sa_status == IB_SA_MAD_STATUS_SUCCESS ) + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_REQ_INVALID ); + else + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + /* APM request */ + if ( nsrc == 2 && ndest == 2 && ( p_mpr->num_path & 0x7F ) == 2 ) + __osm_mpr_rcv_get_apm_paths( p_rcv, p_mpr, requester_port, pp_ports, + p_sa_mad->comp_mask, &pr_list ); + else + __osm_mpr_rcv_process_pairs( p_rcv, p_mpr, requester_port, pp_ports, + nsrc, ndest, + p_sa_mad->comp_mask, &pr_list ); + + cl_plock_release( p_rcv->p_lock ); + __osm_mpr_rcv_respond( p_rcv, p_madw, &pr_list ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} +#endif + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_multipath_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_multipath_record_ctrl.c new file mode 100644 index 00000000..6a30be92 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_multipath_record_ctrl.c @@ -0,0 +1,128 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_mpr_rcv_ctrl_t. + * This object represents the MultiPathRecord request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +static void +__osm_mpr_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_mpr_rcv_process( ((osm_mpr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_mpr_rcv_ctrl_construct( + IN osm_mpr_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_mpr_rcv_ctrl_destroy( + IN osm_mpr_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_mpr_rcv_ctrl_init( + IN osm_mpr_rcv_ctrl_t* const p_ctrl, + IN osm_mpr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_mpr_rcv_ctrl_init ); + + osm_mpr_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_MULTIPATH_RECORD, + __osm_mpr_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_mpr_rcv_ctrl_init: ERR 4B01: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + +#endif + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_node_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_node_record.c new file mode 100644 index 00000000..0c752afe --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_node_record.c @@ -0,0 +1,600 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_nr_rcv_t. + * This object represents the NodeInfo Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.8 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_NR_RCV_POOL_MIN_SIZE 32 +#define OSM_NR_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_nr_item +{ + cl_pool_item_t pool_item; + ib_node_record_t rec; +} osm_nr_item_t; + +typedef struct _osm_nr_search_ctxt +{ + const ib_node_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + cl_qlist_t* p_list; + osm_nr_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; +} osm_nr_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_nr_rcv_construct( + IN osm_nr_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_nr_rcv_destroy( + IN osm_nr_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_nr_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_nr_rcv_init( + IN osm_nr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_nr_rcv_init ); + + osm_nr_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_NR_RCV_POOL_MIN_SIZE, + 0, + OSM_NR_RCV_POOL_GROW_SIZE, + sizeof(osm_nr_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_nr_rcv_new_nr( + IN osm_nr_rcv_t* const p_rcv, + IN const osm_node_t* const p_node, + IN cl_qlist_t* const p_list, + IN ib_net64_t const port_guid, + IN ib_net16_t const lid ) +{ + osm_nr_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_nr_rcv_new_nr ); + + p_rec_item = (osm_nr_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_nr_rcv_new_nr: ERR 1D02: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_nr_rcv_new_nr: " + "New NodeRecord: node 0x%016" PRIx64 + "\n\t\t\t\tport 0x%016" PRIx64 ", lid 0x%X\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + cl_ntoh64( port_guid ), cl_ntoh16( lid ) + ); + } + + memset( &p_rec_item->rec, 0, sizeof(ib_node_record_t) ); + + p_rec_item->rec.lid = lid; + + p_rec_item->rec.node_info = p_node->node_info; + p_rec_item->rec.node_info.port_guid = port_guid; + memcpy(&(p_rec_item->rec.node_desc), &(p_node->node_desc), + IB_NODE_DESCRIPTION_SIZE); + cl_qlist_insert_tail( p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_nr_rcv_create_nr( + IN osm_nr_rcv_t* const p_rcv, + IN const osm_node_t* const p_node, + IN cl_qlist_t* const p_list, + IN ib_net64_t const match_port_guid, + IN ib_net16_t const match_lid, + IN const osm_physp_t* const p_req_physp ) +{ + const osm_physp_t* p_physp; + uint8_t port_num; + uint8_t num_ports; + uint16_t match_lid_ho; + ib_net16_t base_lid; + ib_net16_t base_lid_ho; + ib_net16_t max_lid_ho; + uint8_t lmc; + ib_net64_t port_guid; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_nr_rcv_create_nr ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_nr_rcv_create_nr: " + "Looking for NodeRecord with LID: 0x%X GUID:0x%016" PRIx64 "\n", + cl_ntoh16( match_lid ), + cl_ntoh64( match_port_guid ) + ); + } + + /* + For switches, do not return the NodeInfo record + for each port on the switch, just for port 0. + */ + if( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ) + num_ports = 1; + else + num_ports = osm_node_get_num_physp( p_node ); + + for( port_num = 0; port_num < num_ports; port_num++ ) + { + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + + if( !osm_physp_is_valid( p_physp ) ) + continue; + + /* Check to see if the found p_physp and the requester physp + share a pkey. If not - continue */ + if (!osm_physp_share_pkey( p_rcv->p_log, p_physp, p_req_physp ) ) + continue; + + port_guid = osm_physp_get_port_guid( p_physp ); + + if( match_port_guid && ( port_guid != match_port_guid ) ) + continue; + + base_lid = osm_physp_get_base_lid( p_physp ); + base_lid_ho = cl_ntoh16( base_lid ); + lmc = osm_physp_get_lmc( p_physp ); + max_lid_ho = (uint16_t)( base_lid_ho + (1 << lmc) - 1 ); + match_lid_ho = cl_ntoh16( match_lid ); + + if( match_lid_ho ) + { + /* + We validate that the lid belongs to this node. + */ + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_nr_rcv_create_nr: " + "Comparing LID: 0x%X <= 0x%X <= 0x%X\n", + base_lid_ho, match_lid_ho, max_lid_ho + ); + } + + if ( match_lid_ho < base_lid_ho || match_lid_ho > max_lid_ho ) + continue; + } + + __osm_nr_rcv_new_nr( p_rcv, p_node, p_list, port_guid, base_lid ); + + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_nr_rcv_by_comp_mask( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_nr_search_ctxt_t* const p_ctxt = (osm_nr_search_ctxt_t *)context; + const osm_node_t* const p_node = (osm_node_t*)p_map_item; + const ib_node_record_t* const p_rcvd_rec = p_ctxt->p_rcvd_rec; + const osm_physp_t* const p_req_physp = p_ctxt->p_req_physp; + osm_nr_rcv_t* const p_rcv = p_ctxt->p_rcv; + ib_net64_t const comp_mask = p_ctxt->comp_mask; + ib_net64_t match_port_guid = 0; + ib_net16_t match_lid = 0; + + OSM_LOG_ENTER( p_ctxt->p_rcv->p_log, __osm_nr_rcv_by_comp_mask ); + + osm_dump_node_info( + p_ctxt->p_rcv->p_log, + &p_node->node_info, + OSM_LOG_VERBOSE ); + + if( comp_mask & IB_NR_COMPMASK_LID ) + match_lid = p_rcvd_rec->lid; + + if( comp_mask & IB_NR_COMPMASK_NODEGUID) + { + /* + DEBUG TOP + */ + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_nr_rcv_by_comp_mask: " + "Looking for node 0x%016" PRIx64 + ", found 0x%016" PRIx64 "\n", + cl_ntoh64( p_rcvd_rec->node_info.node_guid ), + cl_ntoh64( osm_node_get_node_guid( p_node ) ) + ); + } + /* + DEBUG BOTTOM + */ + if( (p_node->node_info.node_guid != p_rcvd_rec->node_info.node_guid) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_PORTGUID ) + { + match_port_guid = p_rcvd_rec->node_info.port_guid; + } + if( comp_mask & IB_NR_COMPMASK_SYSIMAGEGUID ) + { + if( (p_node->node_info.sys_guid != p_rcvd_rec->node_info.sys_guid) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_BASEVERSION ) + { + if( (p_node->node_info.base_version != p_rcvd_rec->node_info.base_version) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_CLASSVERSION ) + { + if( (p_node->node_info.class_version != p_rcvd_rec->node_info.class_version) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_NODETYPE ) + { + if( (p_node->node_info.node_type != p_rcvd_rec->node_info.node_type) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_NUMPORTS ) + { + if( (p_node->node_info.num_ports != p_rcvd_rec->node_info.num_ports) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_PARTCAP ) + { + if( (p_node->node_info.partition_cap != p_rcvd_rec->node_info.partition_cap) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_DEVID ) + { + if( (p_node->node_info.device_id != p_rcvd_rec->node_info.device_id) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_REV ) + { + if( (p_node->node_info.revision != p_rcvd_rec->node_info.revision) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_PORTNUM ) + { + if( ib_node_info_get_local_port_num(&p_node->node_info) != + ib_node_info_get_local_port_num(&p_rcvd_rec->node_info) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_VENDID ) + { + if( ib_node_info_get_vendor_id(&p_node->node_info) != + ib_node_info_get_vendor_id(&p_rcvd_rec->node_info) ) + goto Exit; + } + if( comp_mask & IB_NR_COMPMASK_NODEDESC ) + { + if( strncmp((char*) &p_node->node_desc, + (char*) &p_rcvd_rec->node_desc, sizeof(ib_node_desc_t)) ) + goto Exit; + } + + __osm_nr_rcv_create_nr( p_rcv, p_node, p_ctxt->p_list, + match_port_guid, match_lid, p_req_physp ); + + Exit: + OSM_LOG_EXIT( p_ctxt->p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_nr_rcv_process( + IN osm_nr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_node_record_t* p_rcvd_rec; + ib_node_record_t* p_resp_rec; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_nr_search_ctxt_t context; + osm_nr_item_t* p_rec_item; + ib_api_status_t status; + osm_physp_t* p_req_physp; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_nr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_node_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_NODE_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_nr_rcv_process: ERR 1D05: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_nr_rcv_process: ERR 1D04: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_node_record( p_rcv->p_log, p_rcvd_rec, OSM_LOG_DEBUG ); + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.p_req_physp = p_req_physp; + + cl_plock_acquire( p_rcv->p_lock ); + + cl_qmap_apply_func( &p_rcv->p_subn->node_guid_tbl, + __osm_nr_rcv_by_comp_mask, + &context ); + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if ( (p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec > 1) ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_nr_rcv_process: ERR 1D03: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ); + + /* need to set the mem free ... */ + p_rec_item = (osm_nr_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_nr_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_nr_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + /* we limit the number of records to a single packet */ + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_node_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_nr_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_nr_rcv_process: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_node_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_nr_rcv_process: ERR 1D06: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_nr_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_node_record_t) ); + + p_resp_rec = (ib_node_record_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_nr_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if (status != IB_SUCCESS) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_nr_rcv_process: ERR 1D07: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status)); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_node_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_node_record_ctrl.c new file mode 100644 index 00000000..5712c83c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_node_record_ctrl.c @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_nr_rcv_ctrl_t. + * This object represents the Node Record controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_nr_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_nr_rcv_process( ((osm_nr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_nr_rcv_ctrl_construct( + IN osm_nr_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_nr_rcv_ctrl_destroy( + IN osm_nr_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_nr_rcv_ctrl_init( + IN osm_nr_rcv_ctrl_t* const p_ctrl, + IN osm_nr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_nr_rcv_ctrl_init ); + + osm_nr_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_NODE_RECORD, + __osm_nr_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_nr_rcv_ctrl_init: ERR 1E01: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_path_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_path_record.c new file mode 100644 index 00000000..a8e60cc3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_path_record.c @@ -0,0 +1,2006 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_pr_rcv_t. + * This object represents the PathRecord Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.10 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_PR_RCV_POOL_MIN_SIZE 64 +#define OSM_PR_RCV_POOL_GROW_SIZE 64 + +typedef struct _osm_pr_item +{ + cl_pool_item_t pool_item; + ib_path_rec_t path_rec; +} osm_pr_item_t; + +typedef struct _osm_path_parms +{ + ib_net16_t pkey; + uint8_t mtu; + uint8_t rate; + uint8_t sl; + uint8_t pkt_life; + boolean_t reversible; +} osm_path_parms_t; + +typedef struct osm_sa_pr_mcmr_search_ctxt { + ib_gid_t *p_mgid; + osm_mgrp_t *p_mgrp; + osm_pr_rcv_t *p_rcv; +} osm_sa_pr_mcmr_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_pr_rcv_construct( + IN osm_pr_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pr_pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pr_rcv_destroy( + IN osm_pr_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_pr_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pr_pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pr_rcv_init( + IN osm_pr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_pr_rcv_init ); + + osm_pr_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pr_pool, + OSM_PR_RCV_POOL_MIN_SIZE, + 0, + OSM_PR_RCV_POOL_GROW_SIZE, + sizeof(osm_pr_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static inline boolean_t +__osm_sa_path_rec_is_tavor_port( + IN const osm_port_t* const p_port) +{ + osm_node_t const* p_node; + ib_net32_t vend_id; + + p_node = osm_port_get_parent_node( p_port ); + vend_id = ib_node_info_get_vendor_id( &p_node->node_info ); + + return( (p_node->node_info.device_id == CL_HTON16(23108)) && + ((vend_id == CL_HTON32(OSM_VENDOR_ID_MELLANOX)) || + (vend_id == CL_HTON32(OSM_VENDOR_ID_TOPSPIN)) || + (vend_id == CL_HTON32(OSM_VENDOR_ID_SILVERSTORM)) || + (vend_id == CL_HTON32(OSM_VENDOR_ID_VOLTAIRE))) ); +} + +/********************************************************************** + **********************************************************************/ +static boolean_t + __osm_sa_path_rec_apply_tavor_mtu_limit( + IN const ib_path_rec_t* const p_pr, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const ib_net64_t comp_mask) +{ + uint8_t required_mtu; + + /* only if at least one of the ports is a Tavor device */ + if (! __osm_sa_path_rec_is_tavor_port(p_src_port) && + ! __osm_sa_path_rec_is_tavor_port(p_dest_port) ) + return( FALSE ); + + /* + we can apply the patch if either: + 1. No MTU required + 2. Required MTU < + 3. Required MTU = 1K or 512 or 256 + 4. Required MTU > 256 or 512 + */ + required_mtu = ib_path_rec_mtu( p_pr ); + if ( ( comp_mask & IB_PR_COMPMASK_MTUSELEC ) && + ( comp_mask & IB_PR_COMPMASK_MTU ) ) + { + switch( ib_path_rec_mtu_sel( p_pr ) ) + { + case 0: /* must be greater than */ + case 2: /* exact match */ + if( IB_MTU_LEN_1024 < required_mtu ) + return(FALSE); + break; + + case 1: /* must be less than */ + /* can't be disqualified by this one */ + break; + + case 3: /* largest available */ + /* the ULP intentionally requested */ + /* the largest MTU possible */ + return(FALSE); + break; + + default: + /* if we're here, there's a bug in ib_path_rec_mtu_sel() */ + CL_ASSERT( FALSE ); + break; + } + } + + return(TRUE); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_pr_rcv_get_path_parms( + IN osm_pr_rcv_t* const p_rcv, + IN const ib_path_rec_t* const p_pr, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const uint16_t dest_lid_ho, + IN const ib_net64_t comp_mask, + OUT osm_path_parms_t* const p_parms ) +{ + const osm_node_t* p_node; + const osm_physp_t* p_physp; + const osm_physp_t* p_dest_physp; + const osm_prtn_t* p_prtn; + const ib_port_info_t* p_pi; + ib_api_status_t status = IB_SUCCESS; + ib_net16_t pkey; + uint8_t mtu; + uint8_t rate; + uint8_t pkt_life; + uint8_t required_mtu; + uint8_t required_rate; + uint8_t required_pkt_life; + uint8_t sl; + ib_net16_t dest_lid; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_get_path_parms ); + + dest_lid = cl_hton16( dest_lid_ho ); + + p_dest_physp = osm_port_get_default_phys_ptr( p_dest_port ); + p_physp = osm_port_get_default_phys_ptr( p_src_port ); + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + mtu = ib_port_info_get_mtu_cap( p_pi ); + rate = ib_port_info_compute_rate( p_pi ); + + /* + Mellanox Tavor device performance is better using 1K MTU. + If required MTU and MTU selector are such that 1K is OK + and at least one end of the path is Tavor we override the + port MTU with 1K. + */ + if ( p_rcv->p_subn->opt.enable_quirks && + __osm_sa_path_rec_apply_tavor_mtu_limit( + p_pr, p_src_port, p_dest_port, comp_mask) ) + if (mtu > IB_MTU_LEN_1024) + { + mtu = IB_MTU_LEN_1024; + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_path_parms: " + "Optimized Path MTU to 1K for Mellanox Tavor device\n"); + } + + /* + Walk the subnet object from source to destination, + tracking the most restrictive rate and mtu values along the way... + + If source port node is a switch, then p_physp should + point to the port that routes the destination lid + */ + + p_node = osm_physp_get_node_ptr( p_physp ); + + if( p_node->sw ) + { + /* + * If the dest_lid_ho is equal to the lid of the switch pointed by + * p_sw then p_physp will be the physical port of the switch port zero. + */ + p_physp = osm_switch_get_route_by_lid(p_node->sw, cl_ntoh16( dest_lid_ho ) ); + if ( p_physp == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_path_parms: ERR 1F02: " + "Cannot find routing to LID 0x%X from switch for GUID 0x%016" PRIx64 "\n", + dest_lid_ho, + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + status = IB_ERROR; + goto Exit; + } + } + + /* + * Same as above + */ + p_node = osm_physp_get_node_ptr( p_dest_physp ); + + if( p_node->sw ) + { + p_dest_physp = osm_switch_get_route_by_lid( p_node->sw, cl_ntoh16( dest_lid_ho ) ); + + if ( p_dest_physp == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_path_parms: ERR 1F03: " + "Cannot find routing to LID 0x%X from switch for GUID 0x%016" PRIx64 "\n", + dest_lid_ho, + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + status = IB_ERROR; + goto Exit; + } + + } + + while( p_physp != p_dest_physp ) + { + p_physp = osm_physp_get_remote( p_physp ); + + if ( p_physp == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_path_parms: ERR 1F05: " + "Cannot find remote phys port when routing to LID 0x%X from node GUID 0x%016" PRIx64 "\n", + dest_lid_ho, + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + status = IB_ERROR; + goto Exit; + } + + /* + This is point to point case (no switch in between) + */ + if( p_physp == p_dest_physp ) + break; + + p_node = osm_physp_get_node_ptr( p_physp ); + + if( !p_node->sw ) + { + /* + There is some sort of problem in the subnet object! + If this isn't a switch, we should have reached + the destination by now! + */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_path_parms: ERR 1F06: " + "Internal error, bad path\n" ); + status = IB_ERROR; + goto Exit; + } + + /* + Check parameters for the ingress port in this switch. + */ + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + if( mtu > ib_port_info_get_mtu_cap( p_pi ) ) + { + mtu = ib_port_info_get_mtu_cap( p_pi ); + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_path_parms: " + "New smallest MTU = %u at intervening port 0x%016" PRIx64 + " port num 0x%X\n", + mtu, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + if( rate > ib_port_info_compute_rate( p_pi ) ) + { + rate = ib_port_info_compute_rate( p_pi ); + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_path_parms: " + "New smallest rate = %u at intervening port 0x%016" PRIx64 + " port num 0x%X\n", + rate, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + /* + Continue with the egress port on this switch. + */ + p_physp = osm_switch_get_route_by_lid( p_node->sw, dest_lid ); + + if ( p_physp == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_path_parms: ERR 1F07: " + "Dead end on path to LID 0x%X from switch for GUID 0x%016" PRIx64 "\n", + dest_lid_ho, + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + status = IB_ERROR; + goto Exit; + } + + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + if( mtu > ib_port_info_get_mtu_cap( p_pi ) ) + { + mtu = ib_port_info_get_mtu_cap( p_pi ); + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_path_parms: " + "New smallest MTU = %u at intervening port 0x%016" PRIx64 + " port num 0x%X\n", + mtu, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + if( rate > ib_port_info_compute_rate( p_pi ) ) + { + rate = ib_port_info_compute_rate( p_pi ); + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_path_parms: " + "New smallest rate = %u at intervening port 0x%016" PRIx64 + " port num 0x%X\n", + rate, + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + osm_physp_get_port_num( p_physp ) ); + } + } + + } + + /* + p_physp now points to the destination + */ + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + if( mtu > ib_port_info_get_mtu_cap( p_pi ) ) + { + mtu = ib_port_info_get_mtu_cap( p_pi ); + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_path_parms: " + "New smallest MTU = %u at destination port 0x%016" PRIx64 "\n", + mtu, + cl_ntoh64(osm_physp_get_port_guid( p_physp )) ); + } + } + + if( rate > ib_port_info_compute_rate( p_pi ) ) + { + rate = ib_port_info_compute_rate( p_pi ); + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_path_parms: " + "New smallest rate = %u at destination port 0x%016" PRIx64 "\n", + rate, + cl_ntoh64(osm_physp_get_port_guid( p_physp )) ); + } + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_path_parms: " + "Path min MTU = %u, min rate = %u\n", mtu, rate ); + } + + /* + Determine if these values meet the user criteria + and adjust appropriately + */ + + /* we silently ignore cases where only the MTU selector is defined */ + if ( ( comp_mask & IB_PR_COMPMASK_MTUSELEC ) && + ( comp_mask & IB_PR_COMPMASK_MTU ) ) + { + required_mtu = ib_path_rec_mtu( p_pr ); + switch( ib_path_rec_mtu_sel( p_pr ) ) + { + case 0: /* must be greater than */ + if( mtu <= required_mtu ) + status = IB_NOT_FOUND; + break; + + case 1: /* must be less than */ + if( mtu >= required_mtu ) + { + /* adjust to use the highest mtu + lower then the required one */ + if( required_mtu > 1 ) + mtu = required_mtu - 1; + else + status = IB_NOT_FOUND; + } + break; + + case 2: /* exact match */ + if( mtu < required_mtu ) + status = IB_NOT_FOUND; + else + mtu = required_mtu; + break; + + case 3: /* largest available */ + /* can't be disqualified by this one */ + break; + + default: + /* if we're here, there's a bug in ib_path_rec_mtu_sel() */ + CL_ASSERT( FALSE ); + status = IB_ERROR; + break; + } + } + + /* we silently ignore cases where only the Rate selector is defined */ + if ( ( comp_mask & IB_PR_COMPMASK_RATESELEC ) && + ( comp_mask & IB_PR_COMPMASK_RATE ) ) + { + required_rate = ib_path_rec_rate( p_pr ); + switch( ib_path_rec_rate_sel( p_pr ) ) + { + case 0: /* must be greater than */ + if( rate <= required_rate ) + status = IB_NOT_FOUND; + break; + + case 1: /* must be less than */ + if( rate >= required_rate ) + { + /* adjust the rate to use the highest rate + lower then the required one */ + if( required_rate > 2 ) + rate = required_rate - 1; + else + status = IB_NOT_FOUND; + } + break; + + case 2: /* exact match */ + if( rate < required_rate ) + status = IB_NOT_FOUND; + else + rate = required_rate; + break; + + case 3: /* largest available */ + /* can't be disqualified by this one */ + break; + + default: + /* if we're here, there's a bug in ib_path_rec_mtu_sel() */ + CL_ASSERT( FALSE ); + status = IB_ERROR; + break; + } + } + + /* Verify the pkt_life_time */ + /* According to spec definition IBA 1.2 Table 205 PacketLifeTime description, + for loopback paths, packetLifeTime shall be zero. */ + if ( p_src_port == p_dest_port ) + pkt_life = 0; /* loopback */ + else + pkt_life = OSM_DEFAULT_SUBNET_TIMEOUT; + + /* we silently ignore cases where only the PktLife selector is defined */ + if ( ( comp_mask & IB_PR_COMPMASK_PKTLIFETIMESELEC ) && + ( comp_mask & IB_PR_COMPMASK_PKTLIFETIME ) ) + { + required_pkt_life = ib_path_rec_pkt_life( p_pr ); + switch( ib_path_rec_pkt_life_sel( p_pr ) ) + { + case 0: /* must be greater than */ + if( pkt_life <= required_pkt_life ) + status = IB_NOT_FOUND; + break; + + case 1: /* must be less than */ + if( pkt_life >= required_pkt_life ) + { + /* adjust the lifetime to use the highest possible + lower then the required one */ + if( required_pkt_life > 1 ) + pkt_life = required_pkt_life - 1; + else + status = IB_NOT_FOUND; + } + break; + + case 2: /* exact match */ + if( pkt_life < required_pkt_life ) + status = IB_NOT_FOUND; + else + pkt_life = required_pkt_life; + break; + + case 3: /* smallest available */ + /* can't be disqualified by this one */ + break; + + default: + /* if we're here, there's a bug in ib_path_rec_pkt_life_sel() */ + CL_ASSERT( FALSE ); + status = IB_ERROR; + break; + } + } + + if (status != IB_SUCCESS) + goto Exit; + + p_parms->mtu = mtu; + p_parms->rate = rate; + p_parms->pkt_life = pkt_life; + + if( comp_mask & IB_PR_COMPMASK_RAWTRAFFIC && + cl_ntoh32( p_pr->hop_flow_raw ) & ( 1<<31 ) ) + pkey = osm_physp_find_common_pkey( p_physp, p_dest_physp ); + else if( comp_mask & IB_PR_COMPMASK_PKEY ) + { + pkey = p_pr->pkey; + if( !osm_physp_share_this_pkey( p_physp, p_dest_physp, pkey ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_path_parms: ERR 1F1A: " + "Ports do not share specified PKey 0x%04x\n", cl_ntoh16(pkey)); + status = IB_NOT_FOUND; + goto Exit; + } + } + else + { + pkey = osm_physp_find_common_pkey( p_physp, p_dest_physp ); + if ( !pkey ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_path_parms: ERR 1F1B: " + "Ports do not have any shared PKeys\n"); + status = IB_NOT_FOUND; + goto Exit; + } + } + + sl = OSM_DEFAULT_SL; + + if (pkey) { + p_prtn = (osm_prtn_t *)cl_qmap_get(&p_rcv->p_subn->prtn_pkey_tbl, + pkey & cl_ntoh16((uint16_t)~0x8000)); + if ( p_prtn == (osm_prtn_t *)cl_qmap_end(&p_rcv->p_subn->prtn_pkey_tbl) ) + { + /* this may be possible when pkey tables are created somehow in + previous runs or things are going wrong here */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_path_parms: ERR 1F1C: " + "No partition found for PKey 0x%04x - using default SL %d\n", + cl_ntoh16(pkey), sl ); + } + else + sl = p_prtn->sl; + + /* reset pkey when raw traffic */ + if( comp_mask & IB_PR_COMPMASK_RAWTRAFFIC && + cl_ntoh32( p_pr->hop_flow_raw ) & ( 1<<31 ) ) + pkey = 0; + } + + if ( ( comp_mask & IB_PR_COMPMASK_SL ) && ib_path_rec_sl( p_pr ) != sl ) + { + status = IB_NOT_FOUND; + goto Exit; + } + + p_parms->pkey = pkey; + p_parms->sl = sl; + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pr_rcv_build_pr( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const uint16_t src_lid_ho, + IN const uint16_t dest_lid_ho, + IN const uint8_t preference, + IN const osm_path_parms_t* const p_parms, + OUT ib_path_rec_t* const p_pr ) +{ + const osm_physp_t* p_src_physp; + const osm_physp_t* p_dest_physp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_build_pr ); + + p_src_physp = osm_port_get_default_phys_ptr( p_src_port ); + p_dest_physp = osm_port_get_default_phys_ptr( p_dest_port ); + + p_pr->dgid.unicast.prefix = osm_physp_get_subnet_prefix( p_dest_physp ); + p_pr->dgid.unicast.interface_id = osm_physp_get_port_guid( p_dest_physp ); + + p_pr->sgid.unicast.prefix = osm_physp_get_subnet_prefix( p_src_physp ); + p_pr->sgid.unicast.interface_id = osm_physp_get_port_guid( p_src_physp ); + + p_pr->dlid = cl_hton16( dest_lid_ho ); + p_pr->slid = cl_hton16( src_lid_ho ); + + p_pr->hop_flow_raw &= cl_hton32(1<<31); + + p_pr->pkey = p_parms->pkey; + p_pr->sl = cl_hton16(p_parms->sl); + p_pr->mtu = (uint8_t)(p_parms->mtu | 0x80); + p_pr->rate = (uint8_t)(p_parms->rate | 0x80); + + /* According to 1.2 spec definition Table 205 PacketLifeTime description, + for loopback paths, packetLifeTime shall be zero. */ + if ( p_src_port == p_dest_port ) + p_pr->pkt_life = 0x80; /* loopback */ + else + p_pr->pkt_life = (uint8_t)(p_parms->pkt_life | 0x80); + + p_pr->preference = preference; + + /* always return num_path = 0 so this is only the reversible component */ + if (p_parms->reversible) + p_pr->num_path = 0x80; + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static osm_pr_item_t* +__osm_pr_rcv_get_lid_pair_path( + IN osm_pr_rcv_t* const p_rcv, + IN const ib_path_rec_t* const p_pr, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const uint16_t src_lid_ho, + IN const uint16_t dest_lid_ho, + IN const ib_net64_t comp_mask, + IN const uint8_t preference ) +{ + osm_path_parms_t path_parms; + osm_path_parms_t rev_path_parms; + osm_pr_item_t *p_pr_item; + ib_api_status_t status, rev_path_status; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_get_lid_pair_path ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_lid_pair_path: " + "Src LID 0x%X, Dest LID 0x%X\n", + src_lid_ho, dest_lid_ho ); + } + + p_pr_item = (osm_pr_item_t*)cl_qlock_pool_get( &p_rcv->pr_pool ); + if( p_pr_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_lid_pair_path: ERR 1F01: " + "Unable to allocate path record\n" ); + goto Exit; + } + + status = __osm_pr_rcv_get_path_parms( p_rcv, p_pr, p_src_port, + p_dest_port, dest_lid_ho, + comp_mask, &path_parms ); + + if( status != IB_SUCCESS ) + { + cl_qlock_pool_put( &p_rcv->pr_pool, &p_pr_item->pool_item ); + p_pr_item = NULL; + goto Exit; + } + + /* now try the reversible path */ + rev_path_status = __osm_pr_rcv_get_path_parms( p_rcv, p_pr, p_dest_port, + p_src_port, src_lid_ho, + comp_mask, &rev_path_parms ); + path_parms.reversible = ( rev_path_status == IB_SUCCESS ); + + /* did we get a Reversible Path compmask ? */ + /* + NOTE that if the reversible component = 0, it is a don't care + rather then requiring non-reversible paths ... + see Vol1 Ver1.2 p900 l16 + */ + if( comp_mask & IB_PR_COMPMASK_REVERSIBLE ) + { + if( (! path_parms.reversible && ( p_pr->num_path & 0x80 ) ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_lid_pair_path: " + "Requested reversible path but failed to get one\n"); + + cl_qlock_pool_put( &p_rcv->pr_pool, &p_pr_item->pool_item ); + p_pr_item = NULL; + goto Exit; + } + } + + __osm_pr_rcv_build_pr( p_rcv, p_src_port, p_dest_port, src_lid_ho, + dest_lid_ho, preference, &path_parms, + &p_pr_item->path_rec ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( p_pr_item ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pr_rcv_get_port_pair_paths( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN const osm_port_t* const p_req_port, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list ) +{ + const ib_path_rec_t* p_pr; + const ib_sa_mad_t* p_sa_mad; + osm_pr_item_t* p_pr_item; + uint16_t src_lid_min_ho; + uint16_t src_lid_max_ho; + uint16_t dest_lid_min_ho; + uint16_t dest_lid_max_ho; + uint16_t src_lid_ho; + uint16_t dest_lid_ho; + uint32_t path_num; + uint8_t preference; + uintn_t iterations; + uintn_t src_offset; + uintn_t dest_offset; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_get_port_pair_paths ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_port_pair_paths: " + "Src port 0x%016" PRIx64 ", " + "Dst port 0x%016" PRIx64 "\n", + cl_ntoh64( osm_port_get_guid( p_src_port ) ), + cl_ntoh64( osm_port_get_guid( p_dest_port ) ) ); + } + + /* Check that the req_port, src_port and dest_port all share a + pkey. The check is done on the default physical port of the ports. */ + if (osm_port_share_pkey(p_rcv->p_log, p_req_port, p_src_port) == FALSE || + osm_port_share_pkey(p_rcv->p_log, p_req_port, p_dest_port) == FALSE || + osm_port_share_pkey(p_rcv->p_log, p_src_port, p_dest_port) == FALSE ) + { + /* One of the pairs doesn't share a pkey so the path is disqualified. */ + goto Exit; + } + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + /* + We shouldn't be here if the paths are disqualified in some way... + Thus, we assume every possible connection is valid. + + We desire to return high-quality paths first. + In OpenSM, higher quality means least overlap with other paths. + This is acheived in practice by returning paths with + different LID value on each end, which means these + paths are more redundant that paths with the same LID repeated + on one side. For example, in OpenSM the paths between two + endpoints with LMC = 1 might be as follows: + + Port A, LID 1 <-> Port B, LID 3 + Port A, LID 1 <-> Port B, LID 4 + Port A, LID 2 <-> Port B, LID 3 + Port A, LID 2 <-> Port B, LID 4 + + The OpenSM unicast routing algorithms attempt to disperse each path + to as varied a physical path as is reasonable. 1<->3 and 1<->4 have + more physical overlap (hence less redundancy) than 1<->3 and 2<->4. + + OpenSM ranks paths in three preference groups: + + Preference Value Description + ---------------- ------------------------------------------- + 0 Redundant in both directions with other + pref value = 0 paths + + 1 Redundant in one direction with other + pref value = 0 and pref value = 1 paths + + 2 Not redundant in either direction with + other paths + + 3-FF Unused + + + SA clients don't need to know these details, only that the lower + preference paths are preferred, as stated in the spec. The paths + may not actually be physically redundant depending on the topology + of the subnet, but the point of LMC > 0 is to offer redundancy, + so it is assumed that the subnet is physically appropriate for the + specified LMC value. A more advanced implementation would inspect for + physical redundancy, but I'm not going to bother with that now. + */ + + /* + Refine our search if the client specified end-point LIDs + */ + if( comp_mask & IB_PR_COMPMASK_DLID ) + { + dest_lid_min_ho = cl_ntoh16( p_pr->dlid ); + dest_lid_max_ho = cl_ntoh16( p_pr->dlid ); + } + else + { + osm_port_get_lid_range_ho( p_dest_port, &dest_lid_min_ho, + &dest_lid_max_ho ); + } + + if( comp_mask & IB_PR_COMPMASK_SLID ) + { + src_lid_min_ho = cl_ntoh16( p_pr->slid ); + src_lid_max_ho = cl_ntoh16( p_pr->slid ); + } + else + { + osm_port_get_lid_range_ho( p_src_port, &src_lid_min_ho, + &src_lid_max_ho ); + } + + if ( src_lid_min_ho == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_port_pair_paths: ERR 1F20:" + "Obtained source LID of 0. No such LID possible\n"); + goto Exit; + } + + if ( dest_lid_min_ho == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_get_port_pair_paths: ERR 1F21:" + "Obtained destination LID of 0. No such LID possible\n"); + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_get_port_pair_paths: " + "Src LIDs [0x%X-0x%X], " + "Dest LIDs [0x%X-0x%X]\n", + src_lid_min_ho, src_lid_max_ho, + dest_lid_min_ho, dest_lid_max_ho ); + } + + src_lid_ho = src_lid_min_ho; + dest_lid_ho = dest_lid_min_ho; + + /* + Preferred paths come first in OpenSM + */ + preference = 0; + path_num = 0; + + /* If SubnAdmGet, assume NumbPaths 1 (1.2 erratum) */ + if( p_sa_mad->method != IB_MAD_METHOD_GET ) + if( comp_mask & IB_PR_COMPMASK_NUMBPATH ) + iterations = ib_path_rec_num_path( p_pr ); + else + iterations = (uintn_t)(-1); + else + iterations = 1; + + while( path_num < iterations ) + { + /* + These paths are "fully redundant" + */ + + p_pr_item = __osm_pr_rcv_get_lid_pair_path( p_rcv, p_pr, + p_src_port, p_dest_port, + src_lid_ho, dest_lid_ho, + comp_mask, preference ); + + if( p_pr_item ) + { + cl_qlist_insert_tail( p_list, + (cl_list_item_t*)&p_pr_item->pool_item ); + ++path_num; + } + + if( ++src_lid_ho > src_lid_max_ho ) + break; + + if( ++dest_lid_ho > dest_lid_max_ho ) + break; + } + + /* + Check if we've accumulated all the paths that the user cares to see + */ + if( path_num == iterations ) + goto Exit; + + /* + Don't bother reporting preference 1 paths for now. + It's more trouble than it's worth and can only occur + if ports have different LMC values, which isn't supported + by OpenSM right now anyway. + */ + preference = 2; + src_lid_ho = src_lid_min_ho; + dest_lid_ho = dest_lid_min_ho; + src_offset = 0; + dest_offset = 0; + + /* + Iterate over the remaining paths + */ + while( path_num < iterations ) + { + dest_offset++; + dest_lid_ho++; + + if( dest_lid_ho > dest_lid_max_ho ) + { + src_offset++; + src_lid_ho++; + + if( src_lid_ho > src_lid_max_ho ) + break; /* done */ + + dest_offset = 0; + dest_lid_ho = dest_lid_min_ho; + } + + /* + These paths are "fully non-redundant" with paths already + identified above and consequently not of much value. + + Don't return paths we already identified above, as indicated + by the offset values being equal. + */ + if( src_offset == dest_offset ) + continue; /* already reported */ + + p_pr_item = __osm_pr_rcv_get_lid_pair_path( p_rcv, p_pr, + p_src_port, p_dest_port, + src_lid_ho, dest_lid_ho, + comp_mask, preference ); + + if( p_pr_item ) + { + cl_qlist_insert_tail( p_list, + (cl_list_item_t*)&p_pr_item->pool_item ); + ++path_num; + } + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static ib_net16_t +__osm_pr_rcv_get_end_points( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + OUT const osm_port_t** const pp_src_port, + OUT const osm_port_t** const pp_dest_port ) +{ + const ib_path_rec_t* p_pr; + const ib_sa_mad_t* p_sa_mad; + ib_net64_t comp_mask; + ib_api_status_t status; + ib_net16_t sa_status = IB_SA_MAD_STATUS_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_get_end_points ); + + /* + Determine what fields are valid and then get a pointer + to the source and destination port objects, if possible. + */ + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + comp_mask = p_sa_mad->comp_mask; + + /* + Check a few easy disqualifying cases up front before getting + into the endpoints. + */ + + if( comp_mask & IB_PR_COMPMASK_SGID ) + { + if ( ! ib_gid_is_link_local ( &p_pr->sgid ) ) + { + if ( ib_gid_get_subnet_prefix ( &p_pr->sgid ) != p_rcv->p_subn->opt.subnet_prefix ) + { + /* + This 'error' is the client's fault (bad gid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pr_rcv_get_end_points: " + "Non local SGID subnet prefix 0x%016" PRIx64 "\n", + cl_ntoh64( p_pr->sgid.unicast.prefix ) ); + + sa_status = IB_SA_MAD_STATUS_INVALID_GID; + goto Exit; + } + } + + *pp_src_port = (osm_port_t*)cl_qmap_get( + &p_rcv->p_subn->port_guid_tbl, + p_pr->sgid.unicast.interface_id ); + + if( *pp_src_port == (osm_port_t*)cl_qmap_end( + &p_rcv->p_subn->port_guid_tbl ) ) + { + /* + This 'error' is the client's fault (bad gid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pr_rcv_get_end_points: " + "No source port with GUID 0x%016" PRIx64 "\n", + cl_ntoh64( p_pr->sgid.unicast.interface_id) ); + + sa_status = IB_SA_MAD_STATUS_INVALID_GID; + goto Exit; + } + } + else + { + *pp_src_port = 0; + if( comp_mask & IB_PR_COMPMASK_SLID ) + { + status = cl_ptr_vector_at( &p_rcv->p_subn->port_lid_tbl, + cl_ntoh16(p_pr->slid), (void**)pp_src_port ); + + if( (status != CL_SUCCESS) || (*pp_src_port == NULL) ) + { + /* + This 'error' is the client's fault (bad lid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pr_rcv_get_end_points: " + "No source port with LID = 0x%X\n", + cl_ntoh16( p_pr->slid) ); + + sa_status = IB_SA_MAD_STATUS_NO_RECORDS; + goto Exit; + } + } + } + + if( comp_mask & IB_PR_COMPMASK_DGID ) + { + if ( ! ib_gid_is_link_local ( &p_pr->dgid ) ) + { + if ( ! ib_gid_is_multicast ( &p_pr->dgid ) && + ib_gid_get_subnet_prefix ( &p_pr->dgid ) != p_rcv->p_subn->opt.subnet_prefix ) + { + /* + This 'error' is the client's fault (bad gid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pr_rcv_get_end_points: " + "Non local DGID subnet prefix 0x%016" PRIx64 "\n", + cl_ntoh64( p_pr->dgid.unicast.prefix ) ); + + sa_status = IB_SA_MAD_STATUS_INVALID_GID; + goto Exit; + } + } + + *pp_dest_port = (osm_port_t*)cl_qmap_get( + &p_rcv->p_subn->port_guid_tbl, + p_pr->dgid.unicast.interface_id ); + + if( *pp_dest_port == (osm_port_t*)cl_qmap_end( + &p_rcv->p_subn->port_guid_tbl ) ) + { + /* + This 'error' is the client's fault (bad gid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pr_rcv_get_end_points: " + "No dest port with GUID 0x%016" PRIx64 "\n", + cl_ntoh64( p_pr->dgid.unicast.interface_id) ); + + sa_status = IB_SA_MAD_STATUS_INVALID_GID; + goto Exit; + } + } + else + { + *pp_dest_port = 0; + if( comp_mask & IB_PR_COMPMASK_DLID ) + { + status = cl_ptr_vector_at( &p_rcv->p_subn->port_lid_tbl, + cl_ntoh16(p_pr->dlid), (void**)pp_dest_port ); + + if( (status != CL_SUCCESS) || (*pp_dest_port == NULL) ) + { + /* + This 'error' is the client's fault (bad lid) so + don't enter it as an error in our own log. + Return an error response to the client. + */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pr_rcv_get_end_points: " + "No dest port with LID = 0x%X\n", + cl_ntoh16( p_pr->dlid) ); + + sa_status = IB_SA_MAD_STATUS_NO_RECORDS; + goto Exit; + } + } + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( sa_status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pr_rcv_process_world( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN const osm_port_t* const requester_port, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list ) +{ + const cl_qmap_t* p_tbl; + const osm_port_t* p_dest_port; + const osm_port_t* p_src_port; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_process_world ); + + /* + Iterate the entire port space over itself. + A path record from a port to itself is legit, so no + need for a special case there. + + We compute both A -> B and B -> A, since we don't have + any check to determine the reversability of the paths. + */ + p_tbl = &p_rcv->p_subn->port_guid_tbl; + + p_dest_port = (osm_port_t*)cl_qmap_head( p_tbl ); + while( p_dest_port != (osm_port_t*)cl_qmap_end( p_tbl ) ) + { + p_src_port = (osm_port_t*)cl_qmap_head( p_tbl ); + while( p_src_port != (osm_port_t*)cl_qmap_end( p_tbl ) ) + { + __osm_pr_rcv_get_port_pair_paths( p_rcv, p_madw, requester_port, p_src_port, + p_dest_port, comp_mask, p_list ); + + p_src_port = (osm_port_t*)cl_qmap_next( &p_src_port->map_item ); + } + + p_dest_port = (osm_port_t*)cl_qmap_next( &p_dest_port->map_item ); + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pr_rcv_process_half( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN const osm_port_t* const requester_port, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list ) +{ + const cl_qmap_t* p_tbl; + const osm_port_t* p_port; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_process_half ); + + /* + Iterate over every port, looking for matches... + A path record from a port to itself is legit, so no + need to special case that one. + */ + p_tbl = &p_rcv->p_subn->port_guid_tbl; + + if( p_src_port ) + { + /* + The src port if fixed, so iterate over destination ports. + */ + p_port = (osm_port_t*)cl_qmap_head( p_tbl ); + while( p_port != (osm_port_t*)cl_qmap_end( p_tbl ) ) + { + __osm_pr_rcv_get_port_pair_paths( p_rcv, p_madw , requester_port, p_src_port, + p_port, comp_mask, p_list ); + p_port = (osm_port_t*)cl_qmap_next( &p_port->map_item ); + } + } + else + { + /* + The dest port if fixed, so iterate over source ports. + */ + p_port = (osm_port_t*)cl_qmap_head( p_tbl ); + while( p_port != (osm_port_t*)cl_qmap_end( p_tbl ) ) + { + __osm_pr_rcv_get_port_pair_paths( p_rcv, p_madw, requester_port, p_port, + p_dest_port, comp_mask, p_list ); + p_port = (osm_port_t*)cl_qmap_next( &p_port->map_item ); + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pr_rcv_process_pair( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN const osm_port_t* const requester_port, + IN const osm_port_t* const p_src_port, + IN const osm_port_t* const p_dest_port, + IN const ib_net64_t comp_mask, + IN cl_qlist_t* const p_list ) +{ + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_process_pair ); + + __osm_pr_rcv_get_port_pair_paths( p_rcv, p_madw, requester_port, p_src_port, + p_dest_port, comp_mask, p_list ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + *********************************************************************/ +static void +__search_mgrp_by_mgid( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + osm_mgrp_t* p_mgrp = (osm_mgrp_t*)p_map_item; + osm_sa_pr_mcmr_search_ctxt_t *p_ctxt = (osm_sa_pr_mcmr_search_ctxt_t *) context; + const ib_gid_t *p_recvd_mgid; + osm_pr_rcv_t *p_rcv; + /* uint32_t i; */ + + p_recvd_mgid = p_ctxt->p_mgid; + p_rcv = p_ctxt->p_rcv; + + /* ignore groups marked for deletion */ + if ( p_mgrp->to_be_deleted ) + return; + + /* compare entire MGID so different scope will not sneak in for + the same MGID */ + if ( memcmp( &p_mgrp->mcmember_rec.mgid, + p_recvd_mgid, + sizeof(ib_gid_t) ) ) + return; + +#if 0 + for ( i = 0 ; i < sizeof(p_mgrp->mcmember_rec.mgid.multicast.raw_group_id); i++) + { + if ( p_mgrp->mcmember_rec.mgid.multicast.raw_group_id[i] != + p_recvd_mgid->mgid.multicast.raw_group_id[i] ) + return; + } +#endif + + if( p_ctxt->p_mgrp ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__search_mgrp_by_mgid: ERR 1F08: " + "Multiple MC groups for same MGID\n" ); + return; + } + p_ctxt->p_mgrp = p_mgrp; +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__get_mgrp_by_mgid( + IN osm_pr_rcv_t* const p_rcv, + IN ib_path_rec_t* p_recvd_path_rec, + OUT osm_mgrp_t ** pp_mgrp ) +{ + osm_sa_pr_mcmr_search_ctxt_t mcmr_search_context; + + mcmr_search_context.p_mgid = &p_recvd_path_rec->dgid; + mcmr_search_context.p_rcv = p_rcv; + mcmr_search_context.p_mgrp = NULL; + + cl_qmap_apply_func( &p_rcv->p_subn->mgrp_mlid_tbl, + __search_mgrp_by_mgid, + &mcmr_search_context); + + if( mcmr_search_context.p_mgrp == NULL ) + { + return IB_NOT_FOUND; + } + + *pp_mgrp = mcmr_search_context.p_mgrp; + return IB_SUCCESS; +} + +/********************************************************************** + **********************************************************************/ +static osm_mgrp_t * +__get_mgrp_by_mlid( + IN const osm_pr_rcv_t* const p_rcv, + IN ib_net16_t const mlid ) +{ + cl_map_item_t * map_item; + + map_item = cl_qmap_get( &p_rcv->p_subn->mgrp_mlid_tbl, mlid ); + + if( map_item == cl_qmap_end(&p_rcv->p_subn->mgrp_mlid_tbl) ) + { + return NULL; + } + + return (osm_mgrp_t *)map_item; +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pr_get_mgrp( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + OUT osm_mgrp_t **pp_mgrp ) +{ + ib_path_rec_t* p_pr; + const ib_sa_mad_t* p_sa_mad; + ib_net64_t comp_mask; + ib_api_status_t status; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_get_mgrp ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + comp_mask = p_sa_mad->comp_mask; + + if( comp_mask & IB_PR_COMPMASK_DGID ) + { + status = __get_mgrp_by_mgid( p_rcv, p_pr, pp_mgrp ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_get_mgrp: ERR 1F09: " + "No MC group found for PathRecord destination GID\n" ); + goto Exit; + } + } + + if( comp_mask & IB_PR_COMPMASK_DLID ) + { + if( *pp_mgrp) + { + /* check that the MLID in the MC group is */ + /* the same as the DLID in the PathRecord */ + if( (*pp_mgrp)->mlid != p_pr->dlid ) + { + /* Note: perhaps this might be better indicated as an invalid request */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_get_mgrp: ERR 1F10: " + "MC group MLID does not match PathRecord destination LID\n" ); + *pp_mgrp = NULL; + goto Exit; + } + } + else + { + *pp_mgrp = __get_mgrp_by_mlid( p_rcv, p_pr->dlid ); + if( *pp_mgrp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_get_mgrp: ERR 1F11: " + "No MC group found for PathRecord destination LID\n" ); + } + } + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_pr_match_mgrp_attributes( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN const osm_mgrp_t* const p_mgrp ) +{ + const ib_path_rec_t* p_pr; + const ib_sa_mad_t* p_sa_mad; + ib_net64_t comp_mask; + ib_api_status_t status = IB_ERROR; + uint32_t flow_label; + uint8_t sl; + uint8_t hop_limit; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_match_mgrp_attributes ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + comp_mask = p_sa_mad->comp_mask; + + /* If SGID and/or SLID specified, should validate as member of MC group */ + /* Also, MTU, rate, packet lifetime, and raw traffic requested are not currently checked */ + if( comp_mask & IB_PR_COMPMASK_PKEY ) + { + if( p_pr->pkey != p_mgrp->mcmember_rec.pkey ) + goto Exit; + } + + ib_member_get_sl_flow_hop( p_mgrp->mcmember_rec.sl_flow_hop, + &sl, &flow_label, &hop_limit ); + + if( comp_mask & IB_PR_COMPMASK_SL ) + { + if( ib_path_rec_sl( p_pr ) != sl ) + goto Exit; + } + + /* If SubnAdmGet, assume NumbPaths of 1 (1.2 erratum) */ + if( ( comp_mask & IB_PR_COMPMASK_NUMBPATH ) && + ( p_sa_mad->method != IB_MAD_METHOD_GET ) ) + { + if( ib_path_rec_num_path( p_pr ) == 0 ) + goto Exit; + } + + if( comp_mask & IB_PR_COMPMASK_FLOWLABEL ) + { + if( ib_path_rec_flow_lbl( p_pr ) != flow_label ) + goto Exit; + } + + if( comp_mask & IB_PR_COMPMASK_HOPLIMIT ) + { + if( ib_path_rec_hop_limit( p_pr ) != hop_limit ) + goto Exit; + } + + if( comp_mask & IB_PR_COMPMASK_TCLASS ) + { + if( p_pr->tclass != p_mgrp->mcmember_rec.tclass ) + goto Exit; + } + + status = IB_SUCCESS; + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static int +__osm_pr_rcv_check_mcast_dest( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_path_rec_t* p_pr; + const ib_sa_mad_t* p_sa_mad; + ib_net64_t comp_mask; + int is_multicast = 0; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_check_mcast_dest ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + comp_mask = p_sa_mad->comp_mask; + + if( comp_mask & IB_PR_COMPMASK_DGID ) + { + is_multicast = ib_gid_is_multicast( &p_pr->dgid ); + if( !is_multicast ) + goto Exit; + } + + if( comp_mask & IB_PR_COMPMASK_DLID ) + { + if( cl_ntoh16( p_pr->dlid ) >= IB_LID_MCAST_START_HO && + cl_ntoh16( p_pr->dlid ) <= IB_LID_MCAST_END_HO ) + is_multicast = 1; + else if( is_multicast ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_check_mcast_dest: ERR 1F12: " + "PathRecord request indicates MGID but not MLID\n" ); + is_multicast = -1; + } + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( is_multicast ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_pr_rcv_respond( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN cl_qlist_t* const p_list ) +{ + osm_madw_t* p_resp_madw; + const ib_sa_mad_t* p_sa_mad; + ib_sa_mad_t* p_resp_sa_mad; + size_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + size_t trim_num_rec; +#endif + ib_path_rec_t* p_resp_pr; + ib_api_status_t status; + const ib_sa_mad_t* p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + osm_pr_item_t* p_pr_item; + uint32_t i; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pr_rcv_respond ); + + num_rec = cl_qlist_count( p_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_respond: ERR 1F13: " + "Got more than one record for SubnAdmGet (%zu)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ); + /* need to set the mem free ... */ + p_pr_item = (osm_pr_item_t*)cl_qlist_remove_head( p_list ); + while( p_pr_item != (osm_pr_item_t*)cl_qlist_end( p_list ) ) + { + cl_qlock_pool_put( &p_rcv->pr_pool, &p_pr_item->pool_item ); + p_pr_item = (osm_pr_item_t*)cl_qlist_remove_head( p_list ); + } + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_path_rec_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_pr_rcv_respond: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec,trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pr_rcv_respond: " + "Generating response with %zu records\n", num_rec ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, p_madw->h_bind, + num_rec * sizeof(ib_path_rec_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + if( !p_resp_madw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_respond: ERR 1F14: " + "Unable to allocate MAD\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_pr_item = (osm_pr_item_t*)cl_qlist_remove_head( p_list ); + cl_qlock_pool_put( &p_rcv->pr_pool, &p_pr_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + memcpy( p_resp_sa_mad, p_sa_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = ib_get_attr_offset( sizeof(ib_path_rec_t) ); + + p_resp_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for ( i = 0; i < pre_trim_num_rec; i++ ) + { + p_pr_item = (osm_pr_item_t*)cl_qlist_remove_head( p_list ); + /* copy only if not trimmed */ + if (i < num_rec) + *p_resp_pr = p_pr_item->path_rec; + + cl_qlock_pool_put( &p_rcv->pr_pool, &p_pr_item->pool_item ); + p_resp_pr++; + } + + CL_ASSERT( cl_is_qlist_empty( p_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pr_rcv_respond: ERR 1F15: " + "Unable to send MAD (%s)\n", ib_get_err_str( status ) ); + /* osm_mad_pool_put( p_rcv->p_mad_pool, p_resp_madw ); */ + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pr_rcv_process( + IN osm_pr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_path_rec_t* p_pr; + const ib_sa_mad_t* p_sa_mad; + const osm_port_t* p_src_port; + const osm_port_t* p_dest_port; + cl_qlist_t pr_list; + ib_net16_t sa_status; + osm_port_t* requester_port; + int ret; + + OSM_LOG_ENTER( p_rcv->p_log, osm_pr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + CL_ASSERT( p_sa_mad->attr_id == IB_MAD_ATTR_PATH_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ((p_sa_mad->method != IB_MAD_METHOD_GET) && + (p_sa_mad->method != IB_MAD_METHOD_GETTABLE)) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pr_rcv_process: ERR 1F17: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_sa_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + requester_port = osm_get_port_by_mad_addr( p_rcv->p_log, p_rcv->p_subn, + osm_madw_get_mad_addr_ptr( p_madw ) ); + if( requester_port == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pr_rcv_process: ERR 1F16: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_path_record( p_rcv->p_log, p_pr, OSM_LOG_DEBUG ); + + cl_qlist_init( &pr_list ); + + /* + Most SA functions (including this one) are read-only on the + subnet object, so we grab the lock non-exclusively. + */ + cl_plock_acquire( p_rcv->p_lock ); + + /* Handle multicast destinations separately */ + if( (ret = __osm_pr_rcv_check_mcast_dest( p_rcv, p_madw )) < 0 ) + { + /* Multicast DGID with unicast DLID */ + cl_plock_release( p_rcv->p_lock ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_INVALID_FIELD ); + goto Exit; + } + + if(ret > 0) + goto McastDest; + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_pr_rcv_process: " + "Unicast destination requested\n" ); + + sa_status = __osm_pr_rcv_get_end_points( p_rcv, p_madw, + &p_src_port, &p_dest_port ); + + if( sa_status == IB_SA_MAD_STATUS_SUCCESS ) + { + /* + What happens next depends on the type of endpoint information + that was specified.... + */ + if( p_src_port ) + { + if( p_dest_port ) + __osm_pr_rcv_process_pair( p_rcv, p_madw, requester_port, + p_src_port, p_dest_port, + p_sa_mad->comp_mask, &pr_list ); + else + __osm_pr_rcv_process_half( p_rcv, p_madw, requester_port, + p_src_port, NULL, + p_sa_mad->comp_mask, &pr_list ); + } + else + { + if( p_dest_port ) + __osm_pr_rcv_process_half( p_rcv, p_madw, requester_port, + NULL, p_dest_port, + p_sa_mad->comp_mask, &pr_list ); + else + /* + Katie, bar the door! + */ + __osm_pr_rcv_process_world( p_rcv, p_madw, requester_port, + p_sa_mad->comp_mask, &pr_list ); + } + } + goto Unlock; + + McastDest: + osm_log(p_rcv->p_log, OSM_LOG_DEBUG, + "osm_pr_rcv_process: " + "Multicast destination requested\n" ); + { + osm_mgrp_t *p_mgrp = NULL; + ib_api_status_t status; + osm_pr_item_t* p_pr_item; + uint32_t flow_label; + uint8_t sl; + uint8_t hop_limit; + + /* First, get the MC info */ + __osm_pr_get_mgrp( p_rcv, p_madw, &p_mgrp ); + + if ( p_mgrp ) + { + /* Make sure the rest of the PathRecord matches the MC group attributes */ + status = __osm_pr_match_mgrp_attributes( p_rcv, p_madw, p_mgrp ); + if ( status == IB_SUCCESS ) + { + p_pr_item = (osm_pr_item_t*)cl_qlock_pool_get( &p_rcv->pr_pool ); + if( p_pr_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pr_rcv_process: ERR 1F18: " + "Unable to allocate path record for MC group\n" ); + } + else + { + /* Copy PathRecord request into response */ + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_pr = (ib_path_rec_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + p_pr_item->path_rec = *p_pr; + + /* Now, use the MC info to cruft up the PathRecord response */ + p_pr_item->path_rec.dgid = p_mgrp->mcmember_rec.mgid; + p_pr_item->path_rec.dlid = p_mgrp->mcmember_rec.mlid; + p_pr_item->path_rec.tclass = p_mgrp->mcmember_rec.tclass; + p_pr_item->path_rec.num_path = 1; + p_pr_item->path_rec.pkey = p_mgrp->mcmember_rec.pkey; + + /* MTU, rate, and packet lifetime should be exactly */ + p_pr_item->path_rec.mtu = (2<<6) | p_mgrp->mcmember_rec.mtu; + p_pr_item->path_rec.rate = (2<<6) | p_mgrp->mcmember_rec.rate; + p_pr_item->path_rec.pkt_life = (2<<6) | p_mgrp->mcmember_rec.pkt_life; + + /* SL, Hop Limit, and Flow Label */ + ib_member_get_sl_flow_hop( p_mgrp->mcmember_rec.sl_flow_hop, + &sl, &flow_label, &hop_limit ); + p_pr_item->path_rec.sl = cl_hton16( sl ); + p_pr_item->path_rec.hop_flow_raw = (uint32_t)(hop_limit) | + (flow_label << 8); + + cl_qlist_insert_tail( &pr_list, + (cl_list_item_t*)&p_pr_item->pool_item ); + + } + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pr_rcv_process: ERR 1F19: " + "MC group attributes don't match PathRecord request\n" ); + } + } + } + + Unlock: + cl_plock_release( p_rcv->p_lock ); + + /* Now, (finally) respond to the PathRecord request */ + __osm_pr_rcv_respond( p_rcv, p_madw, &pr_list ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_path_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_path_record_ctrl.c new file mode 100644 index 00000000..ec13fdc2 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_path_record_ctrl.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_pr_rcv_ctrl_t. + * This object represents the PathRecord request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +static void +__osm_pr_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_pr_rcv_process( ((osm_pr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pr_rcv_ctrl_construct( + IN osm_pr_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_pr_rcv_ctrl_destroy( + IN osm_pr_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pr_rcv_ctrl_init( + IN osm_pr_rcv_ctrl_t* const p_ctrl, + IN osm_pr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_pr_rcv_ctrl_init ); + + osm_pr_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_PATH_RECORD, + __osm_pr_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_pr_rcv_ctrl_init: ERR 2001: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_pkey_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_pkey_record.c new file mode 100644 index 00000000..60d8d937 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_pkey_record.c @@ -0,0 +1,590 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_PKEY_REC_RCV_POOL_MIN_SIZE 32 +#define OSM_PKEY_REC_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_pkey_item +{ + cl_pool_item_t pool_item; + ib_pkey_table_record_t rec; +} osm_pkey_item_t; + +typedef struct _osm_pkey_search_ctxt +{ + const ib_pkey_table_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + uint16_t block_num; + cl_qlist_t* p_list; + osm_pkey_rec_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; +} osm_pkey_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_pkey_rec_rcv_construct( + IN osm_pkey_rec_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pkey_rec_rcv_destroy( + IN osm_pkey_rec_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_pkey_rec_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pkey_rec_rcv_init( + IN osm_pkey_rec_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_pkey_rec_rcv_init ); + + osm_pkey_rec_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + /* used for matching records collection */ + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_PKEY_REC_RCV_POOL_MIN_SIZE, + 0, + OSM_PKEY_REC_RCV_POOL_GROW_SIZE, + sizeof(osm_pkey_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_pkey_create( + IN osm_pkey_rec_rcv_t* const p_rcv, + IN osm_physp_t* const p_physp, + IN osm_pkey_search_ctxt_t* const p_ctxt, + IN uint16_t block ) +{ + osm_pkey_item_t* p_rec_item; + uint16_t lid; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_pkey_create ); + + p_rec_item = (osm_pkey_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sa_pkey_create: ERR 4602: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if (p_physp->p_node->node_info.node_type != IB_NODE_TYPE_SWITCH) + { + lid = osm_physp_get_port_info_ptr( p_physp )->base_lid; + } + else + { + lid = osm_node_get_base_lid( p_physp->p_node, 0 ); + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_pkey_create: " + "New P_Key table for: port 0x%016" PRIx64 + ", lid 0x%X, port# 0x%X Block:%u\n", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + cl_ntoh16( lid ), osm_physp_get_port_num( p_physp ), + block + ); + } + + memset( &p_rec_item->rec, 0, sizeof( p_rec_item->rec ) ); + + p_rec_item->rec.lid = lid; + p_rec_item->rec.block_num = block; + p_rec_item->rec.port_num = osm_physp_get_port_num( p_physp ); + p_rec_item->rec.pkey_tbl = + *(osm_pkey_tbl_block_get(osm_physp_get_pkey_tbl(p_physp), block)); + + cl_qlist_insert_tail( p_ctxt->p_list, + (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_pkey_check_physp( + IN osm_pkey_rec_rcv_t* const p_rcv, + IN osm_physp_t* const p_physp, + osm_pkey_search_ctxt_t* const p_ctxt ) +{ + ib_net64_t comp_mask = p_ctxt->comp_mask; + uint16_t block, num_blocks; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_pkey_check_physp ); + + /* we got here with the phys port - all is left is to get the right block */ + if ( comp_mask & IB_PKEY_COMPMASK_BLOCK ) + { + __osm_sa_pkey_create( p_rcv, p_physp, p_ctxt, p_ctxt->block_num); + } + else + { + num_blocks = + osm_pkey_tbl_get_num_blocks( osm_physp_get_pkey_tbl( p_physp )); + for (block = 0; block < num_blocks; block++) { + __osm_sa_pkey_create( p_rcv, p_physp, p_ctxt, block ); + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sa_pkey_by_comp_mask( + IN osm_pkey_rec_rcv_t* const p_rcv, + IN const osm_port_t* const p_port, + osm_pkey_search_ctxt_t* const p_ctxt ) +{ + const ib_pkey_table_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + osm_physp_t * p_physp; + uint8_t port_num; + uint8_t num_ports; + const osm_physp_t* p_req_physp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_pkey_by_comp_mask ); + + p_rcvd_rec = p_ctxt->p_rcvd_rec; + comp_mask = p_ctxt->comp_mask; + port_num = p_rcvd_rec->port_num; + p_req_physp = p_ctxt->p_req_physp; + + /* if this is a switch port we can search all ports + otherwise we must be looking on port 0 */ + if ( p_port->p_node->node_info.node_type != IB_NODE_TYPE_SWITCH ) + { + /* we put it in the comp mask and port num */ + port_num = p_port->default_port_num; + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_pkey_by_comp_mask: " + "Using Physical Default Port Number: 0x%X (for End Node)\n", + port_num); + comp_mask |= IB_PKEY_COMPMASK_PORT; + } + + if( comp_mask & IB_PKEY_COMPMASK_PORT ) + { + if (port_num < osm_port_get_num_physp( p_port )) + { + p_physp = osm_port_get_phys_ptr( p_port, port_num ); + /* Check that the p_physp is valid, and that is shares a pkey + with the p_req_physp. */ + if( p_physp && osm_physp_is_valid( p_physp ) && + (osm_physp_share_pkey(p_rcv->p_log, p_req_physp, p_physp)) ) + __osm_sa_pkey_check_physp( p_rcv, p_physp, p_ctxt ); + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sa_pkey_by_comp_mask: ERR 4603: " + "Given Physical Port Number: 0x%X is out of range should be < 0x%X\n", + port_num, osm_port_get_num_physp( p_port )); + goto Exit; + } + } + else + { + num_ports = osm_port_get_num_physp( p_port ); + for( port_num = 0; port_num < num_ports; port_num++ ) + { + p_physp = osm_port_get_phys_ptr( p_port, port_num ); + if( p_physp == NULL ) + continue; + + if( !osm_physp_is_valid( p_physp ) ) + continue; + + /* if the requester and the p_physp don't share a pkey - + continue */ + if (!osm_physp_share_pkey(p_rcv->p_log, p_req_physp, p_physp ) ) + continue; + + __osm_sa_pkey_check_physp( p_rcv, p_physp, p_ctxt ); + } + } + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sa_pkey_by_comp_mask_cb( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_port_t* const p_port = (osm_port_t*)p_map_item; + osm_pkey_search_ctxt_t* const p_ctxt = (osm_pkey_search_ctxt_t *)context; + + __osm_sa_pkey_by_comp_mask( p_ctxt->p_rcv, p_port, p_ctxt ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pkey_rec_rcv_process( + IN osm_pkey_rec_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_pkey_table_record_t* p_rcvd_rec; + const cl_ptr_vector_t* p_tbl; + const osm_port_t* p_port = NULL; + const ib_pkey_table_t* p_pkey; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + ib_pkey_table_record_t* p_resp_rec; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_pkey_search_ctxt_t context; + osm_pkey_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + ib_net64_t comp_mask; + osm_physp_t* p_req_physp; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_pkey_rec_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_pkey_table_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + comp_mask = p_rcvd_mad->comp_mask; + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_PKEY_TBL_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pkey_rec_rcv_process: ERR 4605: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* + p922 - P_KeyTableRecords shall only be provided in response + to trusted requests. + Check that the requester is a trusted one. + */ + if ( p_rcvd_mad->sm_key != p_rcv->p_subn->opt.sm_key ) + { + /* This is not a trusted requester! */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pkey_rec_rcv_process ERR 4608: " + "Request from non-trusted requester: " + "Given SM_Key:0x%016" PRIx64 "\n", + cl_ntoh64(p_rcvd_mad->sm_key) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_REQ_INVALID ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pkey_rec_rcv_process: ERR 4604: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + p_pkey = (ib_pkey_table_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.block_num = p_rcvd_rec->block_num; + context.p_req_physp = p_req_physp; + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_pkey_rec_rcv_process: " + "Got Query Lid:0x%04X(%02X), Block:0x%02X(%02X), Port:0x%02X(%02X)\n", + cl_ntoh16(p_rcvd_rec->lid), (comp_mask & IB_PKEY_COMPMASK_LID) != 0, + p_rcvd_rec->port_num, (comp_mask & IB_PKEY_COMPMASK_PORT) != 0, + p_rcvd_rec->block_num, (comp_mask & IB_PKEY_COMPMASK_BLOCK) != 0 ); + + cl_plock_acquire( p_rcv->p_lock ); + + /* + If the user specified a LID, it obviously narrows our + work load, since we don't have to search every port + */ + if( comp_mask & IB_PKEY_COMPMASK_LID ) + { + + p_tbl = &p_rcv->p_subn->port_lid_tbl; + + CL_ASSERT( cl_ptr_vector_get_size(p_tbl) < 0x10000 ); + + status = osm_get_port_by_base_lid( p_rcv->p_subn, p_rcvd_rec->lid, &p_port ); + if ( ( status != IB_SUCCESS ) || ( p_port == NULL ) ) + { + status = IB_NOT_FOUND; + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pkey_rec_rcv_process: ERR 460B: " + "No port found with LID 0x%x\n", + cl_ntoh16(p_rcvd_rec->lid) ); + } + } + + if ( status == IB_SUCCESS ) + { + /* if we got a unique port - no need for a port search */ + if( p_port ) + /* this does the loop on all the port phys ports */ + __osm_sa_pkey_by_comp_mask( p_rcv, p_port, &context ); + else + { + cl_qmap_apply_func( &p_rcv->p_subn->port_guid_tbl, + __osm_sa_pkey_by_comp_mask_cb, + &context ); + } + } + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pkey_rec_rcv_process: ERR 460A: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS); + + /* need to set the mem free ... */ + p_rec_item = (osm_pkey_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_pkey_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_pkey_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_pkey_table_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_pkey_rec_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_pkey_rec_rcv_process: " + "Returning %u records\n", num_rec ); + + if((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_pkey_table_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_pkey_rec_rcv_process: ERR 4606: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_pkey_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_pkey_table_record_t) ); + + p_resp_rec = (ib_pkey_table_record_t*) + ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_pkey_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE); + if (status != IB_SUCCESS) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_pkey_rec_rcv_process: ERR 4607: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status)); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_pkey_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_pkey_record_ctrl.c new file mode 100644 index 00000000..324b6edf --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_pkey_record_ctrl.c @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_pkey_rec_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_pkey_rec_rcv_process( ((osm_pkey_rec_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pkey_rec_rcv_ctrl_construct( + IN osm_pkey_rec_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_pkey_rec_rcv_ctrl_destroy( + IN osm_pkey_rec_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pkey_rec_rcv_ctrl_init( + IN osm_pkey_rec_rcv_ctrl_t* const p_ctrl, + IN osm_pkey_rec_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_pkey_rec_rcv_ctrl_init ); + + osm_pkey_rec_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_PKEY_TBL_RECORD, + __osm_pkey_rec_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_pkey_rec_rcv_ctrl_init: ERR 4701: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_portinfo_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_portinfo_record.c new file mode 100644 index 00000000..bf868991 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_portinfo_record.c @@ -0,0 +1,878 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_pir_rcv_t. + * This object represents the PortInfoRecord Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.10 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_PIR_RCV_POOL_MIN_SIZE 32 +#define OSM_PIR_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_pir_item +{ + cl_pool_item_t pool_item; + ib_portinfo_record_t rec; +} osm_pir_item_t; + +typedef struct _osm_pir_search_ctxt +{ + const ib_portinfo_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + cl_qlist_t* p_list; + osm_pir_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; + boolean_t is_enhanced_comp_mask; +} osm_pir_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_pir_rcv_construct( + IN osm_pir_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pir_rcv_destroy( + IN osm_pir_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_pir_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pir_rcv_init( + IN osm_pir_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_pir_rcv_init ); + + osm_pir_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_PIR_RCV_POOL_MIN_SIZE, + 0, + OSM_PIR_RCV_POOL_GROW_SIZE, + sizeof(osm_pir_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_pir_rcv_new_pir( + IN osm_pir_rcv_t* const p_rcv, + IN const osm_physp_t* const p_physp, + IN cl_qlist_t* const p_list, + IN ib_net16_t const lid ) +{ + osm_pir_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_pir_rcv_new_pir ); + + p_rec_item = (osm_pir_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_pir_rcv_new_pir: ERR 2102: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_pir_rcv_new_pir: " + "New PortInfoRecord: port 0x%016" PRIx64 + ", lid 0x%X, port# 0x%X\n", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + cl_ntoh16( lid ), osm_physp_get_port_num( p_physp ) ); + } + + memset( &p_rec_item->rec, 0, sizeof( p_rec_item->rec ) ); + + p_rec_item->rec.lid = lid; + p_rec_item->rec.port_info = *osm_physp_get_port_info_ptr( p_physp ); + p_rec_item->rec.port_num = osm_physp_get_port_num( p_physp ); + + cl_qlist_insert_tail( p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_pir_create( + IN osm_pir_rcv_t* const p_rcv, + IN const osm_physp_t* const p_physp, + IN osm_pir_search_ctxt_t* const p_ctxt ) +{ + uint8_t lmc; + uint16_t max_lid_ho; + uint16_t base_lid_ho; + uint16_t match_lid_ho; + osm_physp_t *p_node_physp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_pir_create ); + + if (p_physp->p_node->sw) + { + p_node_physp = osm_node_get_physp_ptr( p_physp->p_node, 0 ); + base_lid_ho = cl_ntoh16( osm_physp_get_base_lid( p_node_physp ) ); + lmc = osm_switch_sp0_is_lmc_capable(p_physp->p_node->sw, p_rcv->p_subn) ? + osm_physp_get_lmc( p_node_physp ) : 0; + } + else + { + lmc = osm_physp_get_lmc( p_physp ); + base_lid_ho = cl_ntoh16( osm_physp_get_base_lid( p_physp ) ); + } + max_lid_ho = (uint16_t)( base_lid_ho + (1 << lmc) - 1 ); + + if( p_ctxt->comp_mask & IB_PIR_COMPMASK_LID ) + { + match_lid_ho = cl_ntoh16( p_ctxt->p_rcvd_rec->lid ); + + /* + We validate that the lid belongs to this node. + */ + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_pir_create: " + "Comparing LID: 0x%X <= 0x%X <= 0x%X\n", + base_lid_ho, match_lid_ho, max_lid_ho + ); + } + + if ( match_lid_ho < base_lid_ho || match_lid_ho > max_lid_ho ) + goto Exit; + } + + __osm_pir_rcv_new_pir( p_rcv, p_physp, p_ctxt->p_list, + cl_hton16( base_lid_ho ) ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_pir_check_physp( + IN osm_pir_rcv_t* const p_rcv, + IN const osm_physp_t* const p_physp, + osm_pir_search_ctxt_t* const p_ctxt ) +{ + const ib_portinfo_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + const ib_port_info_t* p_comp_pi; + const ib_port_info_t* p_pi; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_pir_check_physp ); + + p_rcvd_rec = p_ctxt->p_rcvd_rec; + comp_mask = p_ctxt->comp_mask; + p_comp_pi = &p_rcvd_rec->port_info; + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + osm_dump_port_info( + p_rcv->p_log, + osm_node_get_node_guid( p_physp->p_node ), + p_physp->port_guid, + p_physp->port_num, + &p_physp->port_info, + OSM_LOG_DEBUG ); + + /* We have to re-check the base_lid, since if the given + base_lid in p_pi is zero - we are comparing on all ports. */ + if( comp_mask & IB_PIR_COMPMASK_BASELID ) + { + if( p_comp_pi->base_lid != p_pi->base_lid ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_MKEY ) + { + if( p_comp_pi->m_key != p_pi->m_key ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_GIDPRE ) + { + if( p_comp_pi->subnet_prefix != p_pi->subnet_prefix ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_SMLID ) + { + if( p_comp_pi->master_sm_base_lid != p_pi->master_sm_base_lid ) + goto Exit; + } + + /* IBTA 1.2 errata provides support for bitwise compare if the bit 31 + of the attribute modifier of the Get/GetTable is set */ + if( comp_mask & IB_PIR_COMPMASK_CAPMASK ) + { + if (p_ctxt->is_enhanced_comp_mask) + { + if ( ( ( p_comp_pi->capability_mask & p_pi->capability_mask ) != p_comp_pi->capability_mask) ) + goto Exit; + } + else + { + if( p_comp_pi->capability_mask != p_pi->capability_mask ) + goto Exit; + } + } + + if( comp_mask & IB_PIR_COMPMASK_DIAGCODE ) + { + if( p_comp_pi->diag_code != p_pi->diag_code ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_MKEYLEASEPRD ) + { + if( p_comp_pi->m_key_lease_period != p_pi->m_key_lease_period ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_LOCALPORTNUM ) + { + if( p_comp_pi->local_port_num != p_pi->local_port_num ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_LNKWIDTHSUPPORT ) + { + if( p_comp_pi->link_width_supported != p_pi->link_width_supported ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_LNKWIDTHACTIVE ) + { + if( p_comp_pi->link_width_active != p_pi->link_width_active ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_LINKWIDTHENABLED ) + { + if( p_comp_pi->link_width_enabled != p_pi->link_width_enabled ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_LNKSPEEDSUPPORT ) + { + if( ib_port_info_get_link_speed_sup( p_comp_pi )!= + ib_port_info_get_link_speed_sup( p_pi) ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_PORTSTATE ) + { + if( ib_port_info_get_port_state( p_comp_pi ) != + ib_port_info_get_port_state( p_pi ) ) + goto Exit; + } + if ( comp_mask & IB_PIR_COMPMASK_PORTPHYSTATE ) + { + if ( ib_port_info_get_port_phys_state( p_comp_pi ) != + ib_port_info_get_port_phys_state( p_pi ) ) + goto Exit; + } + if ( comp_mask & IB_PIR_COMPMASK_LINKDWNDFLTSTATE ) + { + if ( ib_port_info_get_link_down_def_state( p_comp_pi ) != + ib_port_info_get_link_down_def_state( p_pi ) ) + goto Exit; + } + if ( comp_mask & IB_PIR_COMPMASK_MKEYPROTBITS ) + { + if( ib_port_info_get_mpb( p_comp_pi ) != + ib_port_info_get_mpb( p_pi ) ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_LMC ) + { + if( ib_port_info_get_lmc( p_comp_pi ) != + ib_port_info_get_lmc( p_pi ) ) + goto Exit; + } + if ( comp_mask & IB_PIR_COMPMASK_LINKSPEEDACTIVE ) + { + if ( ib_port_info_get_link_speed_active( p_comp_pi ) != + ib_port_info_get_link_speed_active( p_pi ) ) + goto Exit; + } + if ( comp_mask & IB_PIR_COMPMASK_LINKSPEEDENABLE ) + { + if ( ib_port_info_get_link_speed_enabled( p_comp_pi ) != + ib_port_info_get_link_speed_enabled( p_pi ) ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_NEIGHBORMTU ) + { + if( ib_port_info_get_neighbor_mtu( p_comp_pi ) != + ib_port_info_get_neighbor_mtu( p_pi ) ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_MASTERSMSL ) + { + if( ib_port_info_get_master_smsl( p_comp_pi ) != + ib_port_info_get_master_smsl( p_pi ) ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_VLCAP ) + { + if( ib_port_info_get_vl_cap( p_comp_pi ) != + ib_port_info_get_vl_cap( p_pi ) ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_INITTYPE ) + { + if( ib_port_info_get_init_type( p_comp_pi ) != + ib_port_info_get_init_type( p_pi ) ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_VLHIGHLIMIT ) + { + if( p_comp_pi->vl_high_limit != p_pi->vl_high_limit ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_VLARBHIGHCAP ) + { + if( p_comp_pi->vl_arb_high_cap != p_pi->vl_arb_high_cap ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_VLARBLOWCAP ) + { + if( p_comp_pi->vl_arb_low_cap != p_pi->vl_arb_low_cap ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_MTUCAP ) + { + if( ib_port_info_get_mtu_cap( p_comp_pi ) != + ib_port_info_get_mtu_cap( p_pi ) ) + goto Exit; + } + if( comp_mask & IB_PIR_COMPMASK_VLSTALLCNT ) + { + if( ib_port_info_get_vl_stall_count( p_comp_pi ) != + ib_port_info_get_vl_stall_count( p_pi ) ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_HOQLIFE ) + { + if ((p_comp_pi->vl_stall_life & 0x1F) != (p_pi->vl_stall_life & 0x1F) ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_OPVLS ) + { + if ((p_comp_pi->vl_enforce & 0xF0) != (p_pi->vl_enforce & 0xF0) ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_PARENFIN ) + { + if ((p_comp_pi->vl_enforce & 0x08) != (p_pi->vl_enforce & 0x08) ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_PARENFOUT ) + { + if ((p_comp_pi->vl_enforce & 0x04) != (p_pi->vl_enforce & 0x04) ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_FILTERRAWIN ) + { + if ((p_comp_pi->vl_enforce & 0x02) != (p_pi->vl_enforce & 0x02) ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_FILTERRAWOUT ) + { + if ((p_comp_pi->vl_enforce & 0x01) != (p_pi->vl_enforce & 0x01) ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_MKEYVIO ) + { + if (p_comp_pi->m_key_violations != p_pi->m_key_violations ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_PKEYVIO ) + { + if (p_comp_pi->p_key_violations != p_pi->p_key_violations ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_QKEYVIO ) + { + if (p_comp_pi->q_key_violations != p_pi->q_key_violations ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_GUIDCAP ) + { + if (p_comp_pi->guid_cap != p_pi->guid_cap ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_SUBNTO ) + { + if (ib_port_info_get_timeout(p_comp_pi) != ib_port_info_get_timeout(p_pi)) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_RESPTIME ) + { + if ((p_comp_pi->resp_time_value & 0x1F) != (p_pi->resp_time_value &0x1F) ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_LOCALPHYERR ) + { + if( ib_port_info_get_local_phy_err_thd( p_comp_pi ) != + ib_port_info_get_local_phy_err_thd( p_pi ) ) + goto Exit; + } + if (comp_mask & IB_PIR_COMPMASK_OVERRUNERR) + { + if( ib_port_info_get_overrun_err_thd( p_comp_pi ) != + ib_port_info_get_overrun_err_thd( p_pi ) ) + goto Exit; + } + + __osm_sa_pir_create( p_rcv, p_physp, p_ctxt ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sa_pir_by_comp_mask( + IN osm_pir_rcv_t* const p_rcv, + IN const osm_port_t* const p_port, + osm_pir_search_ctxt_t* const p_ctxt ) +{ + const ib_portinfo_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + const osm_physp_t* p_physp; + uint8_t port_num; + uint8_t num_ports; + const osm_physp_t* p_req_physp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_pir_by_comp_mask ); + + p_rcvd_rec = p_ctxt->p_rcvd_rec; + comp_mask = p_ctxt->comp_mask; + p_req_physp = p_ctxt->p_req_physp; + + num_ports = osm_port_get_num_physp( p_port ); + + if( comp_mask & IB_PIR_COMPMASK_PORTNUM ) + { + if (p_rcvd_rec->port_num < num_ports) + { + p_physp = osm_port_get_phys_ptr( p_port, p_rcvd_rec->port_num ); + /* Check that the p_physp is valid, and that the p_physp and the + p_req_physp share a pkey. */ + if( p_physp && osm_physp_is_valid( p_physp ) && + osm_physp_share_pkey(p_rcv->p_log, p_req_physp, p_physp)) + __osm_sa_pir_check_physp( p_rcv, p_physp, p_ctxt ); + } + } + else + { + for( port_num = 0; port_num < num_ports; port_num++ ) + { + p_physp = osm_port_get_phys_ptr( p_port, port_num ); + if( p_physp == NULL ) + continue; + + if( !osm_physp_is_valid( p_physp ) ) + continue; + + /* if the requester and the p_physp don't share a pkey - + continue */ + if (!osm_physp_share_pkey(p_rcv->p_log, p_req_physp, p_physp ) ) + continue; + + __osm_sa_pir_check_physp( p_rcv, p_physp, p_ctxt ); + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sa_pir_by_comp_mask_cb( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_port_t* const p_port = (osm_port_t*)p_map_item; + osm_pir_search_ctxt_t* const p_ctxt = (osm_pir_search_ctxt_t *)context; + + __osm_sa_pir_by_comp_mask( p_ctxt->p_rcv, p_port, p_ctxt ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pir_rcv_process( + IN osm_pir_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_portinfo_record_t* p_rcvd_rec; + const cl_ptr_vector_t* p_tbl; + const osm_port_t* p_port = NULL; + const ib_port_info_t* p_pi; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + ib_portinfo_record_t* p_resp_rec; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_pir_search_ctxt_t context; + osm_pir_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + ib_net64_t comp_mask; + osm_physp_t* p_req_physp; + boolean_t trusted_req = TRUE; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_pir_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_portinfo_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + comp_mask = p_rcvd_mad->comp_mask; + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_PORTINFO_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pir_rcv_process: ERR 2105: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pir_rcv_process: ERR 2104: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_portinfo_record( p_rcv->p_log, p_rcvd_rec, OSM_LOG_DEBUG ); + + p_tbl = &p_rcv->p_subn->port_lid_tbl; + p_pi = &p_rcvd_rec->port_info; + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.p_req_physp = p_req_physp; + context.is_enhanced_comp_mask = cl_ntoh32(p_rcvd_mad->attr_mod) & (1 << 31); + + cl_plock_acquire( p_rcv->p_lock ); + + CL_ASSERT( cl_ptr_vector_get_size(p_tbl) < 0x10000 ); + + /* + If the user specified a LID, it obviously narrows our + work load, since we don't have to search every port + */ + if( comp_mask & IB_PIR_COMPMASK_LID ) + { + status = osm_get_port_by_base_lid( p_rcv->p_subn, p_rcvd_rec->lid, &p_port ); + if ( ( status != IB_SUCCESS ) || ( p_port == NULL ) ) + { + status = IB_NOT_FOUND; + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pir_rcv_process: ERR 2109: " + "No port found with LID 0x%x\n", + cl_ntoh16(p_rcvd_rec->lid) ); + } + } + else + { + if( comp_mask & IB_PIR_COMPMASK_BASELID ) + { + if ((uint16_t)cl_ptr_vector_get_size(p_tbl) > cl_ntoh16(p_pi->base_lid)) + { + p_port = cl_ptr_vector_get( p_tbl, cl_ntoh16(p_pi->base_lid) ); + } + else + { + status = IB_NOT_FOUND; + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pir_rcv_process: ERR 2103: " + "Given LID (0x%X) is out of range:0x%X\n", + cl_ntoh16(p_pi->base_lid), cl_ptr_vector_get_size(p_tbl)); + } + } + } + + if ( status == IB_SUCCESS ) + { + if( p_port ) + __osm_sa_pir_by_comp_mask( p_rcv, p_port, &context ); + else + { + cl_qmap_apply_func( &p_rcv->p_subn->port_guid_tbl, + __osm_sa_pir_by_comp_mask_cb, + &context ); + } + } + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_pir_rcv_process: ERR 2108: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS); + + /* need to set the mem free ... */ + p_rec_item = (osm_pir_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_pir_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_pir_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_portinfo_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_pir_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_pir_rcv_process: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_portinfo_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_pir_rcv_process: ERR 2106: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_pir_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RESOURCES ); + + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_portinfo_record_t) ); + + p_resp_rec = (ib_portinfo_record_t*) + ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + /* + p922 - The M_Key returned shall be zero, except in the case of a + trusted request. + Note: In the mad controller we check that the SM_Key received on + the mad is valid. Meaning - is either zero or equal to the local + sm_key. + */ + if (p_rcvd_mad->sm_key == 0) + trusted_req = FALSE; + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_pir_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + if (trusted_req == FALSE) + p_resp_rec->port_info.m_key = 0; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE); + if (status != IB_SUCCESS) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_pir_rcv_process: ERR 2107: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status)); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_portinfo_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_portinfo_record_ctrl.c new file mode 100644 index 00000000..a097cb14 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_portinfo_record_ctrl.c @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_pir_rcv_ctrl_t. + * This object represents the PortInfoRecord request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_pir_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_pir_rcv_process( ((osm_pir_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_pir_rcv_ctrl_construct( + IN osm_pir_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_pir_rcv_ctrl_destroy( + IN osm_pir_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_pir_rcv_ctrl_init( + IN osm_pir_rcv_ctrl_t* const p_ctrl, + IN osm_pir_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_pir_rcv_ctrl_init ); + + osm_pir_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_PORTINFO_RECORD, + __osm_pir_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_pir_rcv_ctrl_init: ERR 2201: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_response.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_response.c new file mode 100644 index 00000000..9106aa1f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_response.c @@ -0,0 +1,175 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_sa_resp_t. + * This object represents the SA query responder. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_sa_resp_construct( + IN osm_sa_resp_t* const p_resp ) +{ + memset( p_resp, 0, sizeof(*p_resp) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sa_resp_destroy( + IN osm_sa_resp_t* const p_resp ) +{ + CL_ASSERT( p_resp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sa_resp_init( + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_pool, + IN osm_log_t* const p_log ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sa_resp_init ); + + osm_sa_resp_construct( p_resp ); + + p_resp->p_log = p_log; + p_resp->p_pool = p_pool; + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sa_send_error( + IN osm_sa_resp_t* const p_resp, + IN const osm_madw_t* const p_madw, + IN const ib_net16_t sa_status ) +{ + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + ib_sa_mad_t* p_sa_mad; + ib_api_status_t status; + + OSM_LOG_ENTER( p_resp->p_log, osm_sa_send_error ); + + /* avoid races - if we are exiting - exit */ + if (osm_exit_flag) + { + osm_log( p_resp->p_log, OSM_LOG_DEBUG, + "osm_sa_send_error: " + "Ignoring requested send after exit\n" ); + goto Exit; + } + + p_resp_madw = osm_mad_pool_get( p_resp->p_pool, + p_madw->h_bind, MAD_BLOCK_SIZE, &p_madw->mad_addr ); + + if( p_resp_madw == NULL ) + { + osm_log( p_resp->p_log, OSM_LOG_ERROR, + "osm_sa_send_error: ERR 2301: " + "Unable to acquire response MAD\n" ); + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + + /* Copy the MAD header back into the response mad */ + *p_resp_sa_mad = *p_sa_mad; + p_resp_sa_mad->status = sa_status; + + if( p_resp_sa_mad->method == IB_MAD_METHOD_SET ) + p_resp_sa_mad->method = IB_MAD_METHOD_GET; + + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + + /* + * C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) + */ + p_resp_sa_mad->sm_key = 0; + + /* + * o15-0.2.7 - The PathRecord Attribute ID shall be used in + * the response (to a SubnAdmGetMulti(MultiPathRecord) + */ + if( p_resp_sa_mad->attr_id == IB_MAD_ATTR_MULTIPATH_RECORD ) + p_resp_sa_mad->attr_id = IB_MAD_ATTR_PATH_RECORD; + + if( osm_log_is_active( p_resp->p_log, OSM_LOG_FRAMES ) ) + osm_dump_sa_mad( p_resp->p_log, p_resp_sa_mad, OSM_LOG_FRAMES ); + + status = osm_vendor_send( osm_madw_get_bind_handle( p_resp_madw ), + p_resp_madw, FALSE ); + + if( status != IB_SUCCESS ) + { + osm_log( p_resp->p_log, OSM_LOG_ERROR, + "osm_sa_send_error: ERR 2302: " + "Error sending MAD (%s)\n", ib_get_err_str( status ) ); + /* osm_mad_pool_put( p_resp->p_pool, p_resp_madw ); */ + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_resp->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_service_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_service_record.c new file mode 100644 index 00000000..44ab006f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_service_record.c @@ -0,0 +1,1204 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_sr_rcv_t. + * This object represents the ServiceRecord Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.9 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_SR_RCV_POOL_MIN_SIZE 64 +#define OSM_SR_RCV_POOL_GROW_SIZE 64 + +typedef struct _osm_sr_item +{ + cl_pool_item_t pool_item; + ib_service_record_t service_rec; +} osm_sr_item_t; + +typedef struct osm_sr_match_item { + cl_qlist_t sr_list; + ib_service_record_t* p_service_rec; + ib_net64_t comp_mask; + osm_sr_rcv_t* p_rcv; + +} osm_sr_match_item_t; + +typedef struct _osm_sr_search_ctxt +{ + osm_sr_match_item_t * p_sr_item; + const osm_physp_t* p_req_physp; +} osm_sr_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_sr_rcv_construct( + IN osm_sr_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->sr_pool ); + cl_timer_construct(&p_rcv->sr_timer ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sr_rcv_destroy( + IN osm_sr_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_sr_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->sr_pool ); + cl_timer_trim(&p_rcv->sr_timer, 1); + cl_timer_destroy(&p_rcv->sr_timer ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sr_rcv_init( + IN osm_sr_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_ERROR; + cl_status_t cl_status; + + OSM_LOG_ENTER( p_log, osm_sr_rcv_init ); + + osm_sr_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + cl_status = cl_qlock_pool_init( &p_rcv->sr_pool, + OSM_SR_RCV_POOL_MIN_SIZE, + 0, + OSM_SR_RCV_POOL_GROW_SIZE, + sizeof(osm_sr_item_t), + NULL, NULL, NULL ); + if(cl_status != CL_SUCCESS) + goto Exit; + + status = cl_timer_init(&p_rcv->sr_timer, + osm_sr_rcv_lease_cb, + p_rcv ); + if(cl_status != CL_SUCCESS) + goto Exit; + + status = IB_SUCCESS; + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static boolean_t +__match_service_pkey_with_ports_pkey( + IN osm_sr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + ib_service_record_t * const p_service_rec, + ib_net64_t const comp_mask ) +{ + boolean_t valid = TRUE; + osm_physp_t * p_req_physp; + ib_net64_t service_guid; + osm_port_t * service_port; + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw)); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__match_service_pkey_with_ports_pkey: ERR 2404: " + "Cannot find requester physical port\n" ); + valid = FALSE; + goto Exit; + } + + if((comp_mask & IB_SR_COMPMASK_SPKEY) == IB_SR_COMPMASK_SPKEY) + { + /* We have a ServiceP_Key - check matching on requester port, and + ServiceGid port (if such exists) */ + /* Make sure it matches the p_req_physp */ + if (!osm_physp_has_pkey(p_rcv->p_log, p_service_rec->service_pkey, p_req_physp)) + { + valid = FALSE; + goto Exit; + } + + /* Make sure it matches the port of the ServiceGid */ + if((comp_mask & IB_SR_COMPMASK_SGID) == IB_SR_COMPMASK_SGID) + { + service_guid = p_service_rec->service_gid.unicast.interface_id; + service_port = (osm_port_t*)cl_qmap_get( &p_rcv->p_subn->port_guid_tbl, service_guid ); + if (service_port == (osm_port_t*)cl_qmap_end( &p_rcv->p_subn->port_guid_tbl )) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__match_service_pkey_with_ports_pkey: ERR 2405: " + "No port object for port 0x%016" PRIx64 "\n", + cl_ntoh64( service_guid ) ); + valid = FALSE; + goto Exit; + } + /* check on the table of the default physical port of the service port */ + if ( !osm_physp_has_pkey( p_rcv->p_log, + p_service_rec->service_pkey, + osm_port_get_default_phys_ptr(service_port) ) ) + { + valid = FALSE; + goto Exit; + } + } + } + + Exit: + return valid; +} + +/********************************************************************** + **********************************************************************/ +boolean_t +__match_name_to_key_association( + IN osm_sr_rcv_t* const p_rcv, + ib_service_record_t* p_service_rec, + ib_net64_t comp_mask ) +{ + UNUSED_PARAM( p_service_rec ); + UNUSED_PARAM( p_rcv ); + + if( (comp_mask & (IB_SR_COMPMASK_SKEY | IB_SR_COMPMASK_SNAME)) == + (IB_SR_COMPMASK_SKEY | IB_SR_COMPMASK_SNAME) ) + { + /* For now, we are not maintaining the ServiceAssociation record + * so just return TRUE + */ + return TRUE; + } + + return TRUE; +} + +/********************************************************************** + **********************************************************************/ +static boolean_t +__validate_sr( + IN osm_sr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + boolean_t valid = TRUE; + ib_sa_mad_t * p_sa_mad; + ib_service_record_t* p_recvd_service_rec; + + OSM_LOG_ENTER( p_rcv->p_log, __validate_sr ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_recvd_service_rec = + (ib_service_record_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + valid = __match_service_pkey_with_ports_pkey( + p_rcv, + p_madw, + p_recvd_service_rec, + p_sa_mad->comp_mask ); + + if(!valid) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_sr: " + "No Match for Service Pkey\n" ); + valid = FALSE; + goto Exit; + } + + valid = __match_name_to_key_association( + p_rcv, + p_recvd_service_rec, + p_sa_mad->comp_mask ); + + if(!valid) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__validate_sr: " + "Service Record Name to key matching failed\n" ); + valid = FALSE; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return valid; +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sr_rcv_respond( + IN osm_sr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw, + IN cl_qlist_t* const p_list ) +{ + osm_madw_t* p_resp_madw; + const ib_sa_mad_t* p_sa_mad; + ib_sa_mad_t* p_resp_sa_mad; + uint32_t num_rec, num_copied; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + ib_service_record_t* p_resp_sr; + ib_api_status_t status; + osm_sr_item_t* p_sr_item; + const ib_sa_mad_t* p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + boolean_t trusted_req = TRUE; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sr_rcv_respond ); + + num_rec = cl_qlist_count( p_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if ( (p_rcvd_mad->method == IB_MAD_METHOD_GET) && + (num_rec > 1)) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sr_rcv_respond: ERR 2406: " + "Got more than one record for SubnAdmGet (%u).\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ); + + /* need to set the mem free ... */ + p_sr_item = (osm_sr_item_t*)cl_qlist_remove_head( p_list ); + while( p_sr_item != (osm_sr_item_t*)cl_qlist_end( p_list ) ) + { + cl_qlock_pool_put( &p_rcv->sr_pool, &p_sr_item->pool_item ); + p_sr_item = (osm_sr_item_t*)cl_qlist_remove_head( p_list ); + } + + goto Exit; + } + +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_service_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_sr_rcv_respond: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sr_rcv_respond: " + "Generating response with %u records\n", num_rec ); + } + + /* + Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_service_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + if( !p_resp_madw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sr_rcv_respond: ERR 2402: " + "Unable to allocate MAD\n" ); + /* Release the quick pool items */ + p_sr_item = (osm_sr_item_t*)cl_qlist_remove_head( p_list ); + while( p_sr_item != (osm_sr_item_t*)cl_qlist_end( p_list ) ) + { + cl_qlock_pool_put( &p_rcv->sr_pool, &p_sr_item->pool_item ); + p_sr_item = (osm_sr_item_t*)cl_qlist_remove_head( p_list ); + } + + goto Exit; + } + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + memcpy( p_resp_sa_mad, p_sa_mad, IB_SA_MAD_HDR_SIZE ); + + /* but what if it was a SET ? setting the response bit is not enough */ + if (p_rcvd_mad->method == IB_MAD_METHOD_SET) + { + p_resp_sa_mad->method = IB_MAD_METHOD_GET; + } + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_service_record_t) ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + p_resp_sr = (ib_service_record_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + + if( (p_resp_sa_mad->method != IB_MAD_METHOD_GETTABLE_RESP) && + (num_rec == 0)) + { + p_resp_sa_mad->status = IB_SA_MAD_STATUS_NO_RECORDS; + memset( p_resp_sr, 0, sizeof(*p_resp_sr) ); + } + else + { + /* + p923 - The ServiceKey shall be set to 0, except in the case of a trusted + request. + Note: In the mad controller we check that the SM_Key received on + the mad is valid. Meaning - is either zero or equal to the local + sm_key. + */ + if (p_sa_mad->sm_key == 0) + trusted_req = FALSE; + + p_sr_item = (osm_sr_item_t*)cl_qlist_remove_head( p_list ); + + /* we need to track the number of copied items so we can + * stop the copy - but clear them all + */ + num_copied = 0; + while( p_sr_item != (osm_sr_item_t*)cl_qlist_end( p_list ) ) + { + /* Copy the Link Records from the list into the MAD */ + if (num_copied < num_rec) + { + *p_resp_sr = p_sr_item->service_rec; + if (trusted_req == FALSE) + memset(p_resp_sr->service_key, 0, sizeof(p_resp_sr->service_key)); + + num_copied++; + } + cl_qlock_pool_put( &p_rcv->sr_pool, &p_sr_item->pool_item ); + p_resp_sr++; + p_sr_item = (osm_sr_item_t*)cl_qlist_remove_head( p_list ); + } + } + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sr_rcv_respond: ERR 2407: " + "Unable to send MAD (%s)\n", ib_get_err_str( status ) ); + /* osm_mad_pool_put( p_rcv->p_mad_pool, p_resp_madw ); */ + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__get_matching_sr( + IN cl_list_item_t* const p_list_item, + IN void* context ) +{ + osm_sr_search_ctxt_t* const p_ctxt = (osm_sr_search_ctxt_t*)context; + osm_svcr_t * p_svcr = (osm_svcr_t*)p_list_item; + osm_sr_item_t* p_sr_pool_item; + osm_sr_match_item_t* p_sr_item =p_ctxt->p_sr_item; + ib_net64_t comp_mask = p_sr_item->comp_mask; + const osm_physp_t* p_req_physp = p_ctxt->p_req_physp; + + if((comp_mask & IB_SR_COMPMASK_SID) == IB_SR_COMPMASK_SID) + { + if(p_sr_item->p_service_rec->service_id != + p_svcr->service_record.service_id) + return; + } + if((comp_mask & IB_SR_COMPMASK_SGID) == IB_SR_COMPMASK_SGID) + { + if( + memcmp(&p_sr_item->p_service_rec->service_gid, + &p_svcr->service_record.service_gid, + sizeof(p_svcr->service_record.service_gid)) != 0) + return; + } + if((comp_mask & IB_SR_COMPMASK_SPKEY) == IB_SR_COMPMASK_SPKEY ) + { + if(p_sr_item->p_service_rec->service_pkey != + p_svcr->service_record.service_pkey) + return; + } + + if((comp_mask & IB_SR_COMPMASK_SKEY) == IB_SR_COMPMASK_SKEY) + { + if(memcmp(p_sr_item->p_service_rec->service_key , + p_svcr->service_record.service_key, + 16*sizeof(uint8_t))) + return; + } + if((comp_mask & IB_SR_COMPMASK_SNAME) == IB_SR_COMPMASK_SNAME) + { + if( + memcmp(p_sr_item->p_service_rec->service_name, + p_svcr->service_record.service_name, + sizeof(p_svcr->service_record.service_name)) != 0 + ) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA8_0) == IB_SR_COMPMASK_SDATA8_0) + { + if(p_sr_item->p_service_rec->service_data8[0] != + p_svcr->service_record.service_data8[0]) + return; + } + + if((comp_mask & IB_SR_COMPMASK_SDATA8_1) == IB_SR_COMPMASK_SDATA8_1) + { + if(p_sr_item->p_service_rec->service_data8[1] != + p_svcr->service_record.service_data8[1]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA8_2) == IB_SR_COMPMASK_SDATA8_2) + { + if(p_sr_item->p_service_rec->service_data8[2] != + p_svcr->service_record.service_data8[2]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA8_3) == IB_SR_COMPMASK_SDATA8_3) + { + if(p_sr_item->p_service_rec->service_data8[3] != + p_svcr->service_record.service_data8[3]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA8_4)== IB_SR_COMPMASK_SDATA8_4) + { + if(p_sr_item->p_service_rec->service_data8[4] != + p_svcr->service_record.service_data8[4]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA8_5)== IB_SR_COMPMASK_SDATA8_5) + { + if(p_sr_item->p_service_rec->service_data8[5] != + p_svcr->service_record.service_data8[5]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA8_6)== IB_SR_COMPMASK_SDATA8_6) + { + if(p_sr_item->p_service_rec->service_data8[6]!= + p_svcr->service_record.service_data8[6]) + return; + } + + if((comp_mask & IB_SR_COMPMASK_SDATA8_7)== IB_SR_COMPMASK_SDATA8_7) + { + if(p_sr_item->p_service_rec->service_data8[7]!= + p_svcr->service_record.service_data8[7]) + return; + } + + if((comp_mask & IB_SR_COMPMASK_SDATA8_8)== IB_SR_COMPMASK_SDATA8_8) + { + if(p_sr_item->p_service_rec->service_data8[8]!= + p_svcr->service_record.service_data8[8]) + return; + } + + if((comp_mask & IB_SR_COMPMASK_SDATA8_9)== IB_SR_COMPMASK_SDATA8_9) + { + if(p_sr_item->p_service_rec->service_data8[9]!= + p_svcr->service_record.service_data8[9]) + return; + } + + if((comp_mask & IB_SR_COMPMASK_SDATA8_10)== IB_SR_COMPMASK_SDATA8_10) + { + if(p_sr_item->p_service_rec->service_data8[10]!= + p_svcr->service_record.service_data8[10]) + return; + } + + if((comp_mask & IB_SR_COMPMASK_SDATA8_11)== IB_SR_COMPMASK_SDATA8_11) + { + if(p_sr_item->p_service_rec->service_data8[11]!= + p_svcr->service_record.service_data8[11]) + return; + } + + if((comp_mask & IB_SR_COMPMASK_SDATA8_12)== IB_SR_COMPMASK_SDATA8_12) + { + if(p_sr_item->p_service_rec->service_data8[12]!= + p_svcr->service_record.service_data8[12]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA8_13)== IB_SR_COMPMASK_SDATA8_13) + { + if(p_sr_item->p_service_rec->service_data8[13]!= + p_svcr->service_record.service_data8[13]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA8_14)== IB_SR_COMPMASK_SDATA8_14) + { + if(p_sr_item->p_service_rec->service_data8[14]!= + p_svcr->service_record.service_data8[14]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA8_15)== IB_SR_COMPMASK_SDATA8_15) + { + if(p_sr_item->p_service_rec->service_data8[15]!= + p_svcr->service_record.service_data8[15]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA16_0)== IB_SR_COMPMASK_SDATA16_0) + { + if(p_sr_item->p_service_rec->service_data16[0]!= + p_svcr->service_record.service_data16[0]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA16_1)== IB_SR_COMPMASK_SDATA16_1) + { + if(p_sr_item->p_service_rec->service_data16[1]!= + p_svcr->service_record.service_data16[1]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA16_2)== IB_SR_COMPMASK_SDATA16_2) + { + if(p_sr_item->p_service_rec->service_data16[2]!= + p_svcr->service_record.service_data16[2]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA16_3)== IB_SR_COMPMASK_SDATA16_3) + { + if(p_sr_item->p_service_rec->service_data16[3]!= + p_svcr->service_record.service_data16[3]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA16_4)== IB_SR_COMPMASK_SDATA16_4) + { + if(p_sr_item->p_service_rec->service_data16[4]!= + p_svcr->service_record.service_data16[4]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA16_5)== IB_SR_COMPMASK_SDATA16_5) + { + if(p_sr_item->p_service_rec->service_data16[5]!= + p_svcr->service_record.service_data16[5]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA16_6)== IB_SR_COMPMASK_SDATA16_6) + { + if(p_sr_item->p_service_rec->service_data16[6]!= + p_svcr->service_record.service_data16[6]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA16_7)== IB_SR_COMPMASK_SDATA16_7) + { + if(p_sr_item->p_service_rec->service_data16[7]!= + p_svcr->service_record.service_data16[7]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA32_0)== IB_SR_COMPMASK_SDATA32_0) + { + if(p_sr_item->p_service_rec->service_data32[0]!= + p_svcr->service_record.service_data32[0]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA32_1)== IB_SR_COMPMASK_SDATA32_1) + { + if(p_sr_item->p_service_rec->service_data32[1]!= + p_svcr->service_record.service_data32[1]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA32_2)== IB_SR_COMPMASK_SDATA32_2) + { + if(p_sr_item->p_service_rec->service_data32[2]!= + p_svcr->service_record.service_data32[2]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA32_3)== IB_SR_COMPMASK_SDATA32_3) + { + if(p_sr_item->p_service_rec->service_data32[3]!= + p_svcr->service_record.service_data32[3]) + return; + } + + if((comp_mask & IB_SR_COMPMASK_SDATA64_0)== IB_SR_COMPMASK_SDATA64_0) + { + if(p_sr_item->p_service_rec->service_data64[0]!= + p_svcr->service_record.service_data64[0]) + return; + } + if((comp_mask & IB_SR_COMPMASK_SDATA64_1)== IB_SR_COMPMASK_SDATA64_1) + { + if(p_sr_item->p_service_rec->service_data64[1]!= + p_svcr->service_record.service_data64[1]) + return; + } + + /* Check that the requester port has the pkey which is the service_pkey. + If not - then it cannot receive this ServiceRecord. */ + /* The check is relevant only if the service_pkey is valid */ + if (!ib_pkey_is_invalid(p_svcr->service_record.service_pkey)) + { + if (!osm_physp_has_pkey( p_sr_item->p_rcv->p_log, + p_svcr->service_record.service_pkey, + p_req_physp ) ) + { + osm_log( p_sr_item->p_rcv->p_log, OSM_LOG_VERBOSE, + "__get_matching_sr: " + "requester port doesn't have the service_pkey: 0x%X\n", + cl_ntoh16(p_svcr->service_record.service_pkey) ); + return; + } + } + + p_sr_pool_item = (osm_sr_item_t*)cl_qlock_pool_get( &p_sr_item->p_rcv->sr_pool ); + + if( p_sr_pool_item == NULL ) + { + osm_log( p_sr_item->p_rcv->p_log, OSM_LOG_ERROR, + "__get_matching_sr: ERR 2408: " + "Unable to acquire Service Record from pool\n" ); + goto Exit; + } + + p_sr_pool_item->service_rec = p_svcr->service_record; + + cl_qlist_insert_tail( &p_sr_item->sr_list, + (cl_list_item_t*)&p_sr_pool_item->pool_item ); + + Exit: + return; +} + +/********************************************************************** + **********************************************************************/ +static void +osm_sr_rcv_process_get_method( + IN osm_sr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + ib_sa_mad_t * p_sa_mad; + ib_service_record_t* p_recvd_service_rec; + osm_sr_match_item_t sr_match_item; + osm_sr_search_ctxt_t context; + osm_physp_t* p_req_physp; + + OSM_LOG_ENTER( p_rcv->p_log, osm_sr_rcv_process_get_method ); + + CL_ASSERT( p_madw ); + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_sr_rcv_process_get_method: ERR 2409: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_recvd_service_rec = + (ib_service_record_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_dump_service_record( p_rcv->p_log, + p_recvd_service_rec, + OSM_LOG_DEBUG ); + } + + cl_qlist_init(&sr_match_item.sr_list); + sr_match_item.p_service_rec = p_recvd_service_rec; + sr_match_item.comp_mask = p_sa_mad->comp_mask; + sr_match_item.p_rcv = p_rcv; + + context.p_sr_item = &sr_match_item; + context.p_req_physp = p_req_physp; + + /* Grab the lock */ + cl_plock_excl_acquire(p_rcv->p_lock); + + cl_qlist_apply_func(&p_rcv->p_subn->sa_sr_list, + __get_matching_sr, + &context); + + cl_plock_release(p_rcv->p_lock); + + if ((p_sa_mad->method == IB_MAD_METHOD_GET) && + (cl_qlist_count( &sr_match_item.sr_list ) == 0)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_sr_rcv_process_get_method: " + "No records matched the Service Record query\n"); + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + __osm_sr_rcv_respond( p_rcv, p_madw, &sr_match_item.sr_list ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return; +} + +/********************************************************************** + **********************************************************************/ +static void +osm_sr_rcv_process_set_method( + IN osm_sr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + ib_sa_mad_t * p_sa_mad; + ib_net16_t sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + ib_service_record_t* p_recvd_service_rec; + ib_net64_t comp_mask; + osm_svcr_t* p_svcr; + osm_sr_item_t* p_sr_item; + cl_qlist_t sr_list; + + OSM_LOG_ENTER( p_rcv->p_log, osm_sr_rcv_process_set_method ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_recvd_service_rec = + (ib_service_record_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + comp_mask = p_sa_mad->comp_mask; + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_dump_service_record( p_rcv->p_log, + p_recvd_service_rec, + OSM_LOG_DEBUG ); + } + + if( (comp_mask & ( IB_SR_COMPMASK_SID | IB_SR_COMPMASK_SGID )) != + (IB_SR_COMPMASK_SID | IB_SR_COMPMASK_SGID )) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_sr_rcv_process_set_method: " + "Component Mask RID check failed for METHOD_SET\n"); + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + + /* if we were not provided with a service lease make it + infinite */ + if( (comp_mask & IB_SR_COMPMASK_SLEASE) != IB_SR_COMPMASK_SLEASE) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_sr_rcv_process_set_method: " + "ServiceLease Component Mask not set - using infinite lease\n"); + p_recvd_service_rec->service_lease = 0xFFFFFFFF; + } + + /* Grab the lock */ + cl_plock_excl_acquire(p_rcv->p_lock); + + /* If Record exists with matching RID */ + p_svcr = osm_svcr_get_by_rid( + p_rcv->p_subn, + p_rcv->p_log, + p_recvd_service_rec ); + + if(p_svcr == NULL) + { + /* Create the instance of the osm_svcr_t object */ + p_svcr = osm_svcr_new(p_recvd_service_rec); + if(p_svcr == NULL) + { + cl_plock_release(p_rcv->p_lock); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_sr_rcv_process_set_method: ERR 2411: " + "osm_svcr_get_by_rid failed\n" ); + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + /* Add this new osm_svcr_t object to subnet object */ + osm_svcr_insert_to_db( + p_rcv->p_subn, + p_rcv->p_log, + p_svcr ); + + } + else + { + /* Update the old instance of the osm_svcr_t object */ + osm_svcr_init(p_svcr, p_recvd_service_rec); + } + + cl_plock_release(p_rcv->p_lock); + + if( p_recvd_service_rec->service_lease != 0xFFFFFFFF ) + { +#if 0 + cl_timer_trim(&p_rcv->sr_timer, + p_recvd_service_rec->service_lease * 1000); +#endif + /* This was a bug since no check was made to see if too long */ + /* just make sure the timer works - get a call back within a second */ + cl_timer_trim(&p_rcv->sr_timer, 1000); + p_svcr->modified_time = cl_get_time_stamp_sec(); + } + + p_sr_item = (osm_sr_item_t*)cl_qlock_pool_get( &p_rcv->sr_pool ); + if( p_sr_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_sr_rcv_process_set_method: ERR 2412: " + "Unable to acquire Service record\n" ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + if( (comp_mask & IB_SR_COMPMASK_SPKEY) != IB_SR_COMPMASK_SPKEY) + { + /* Set the Default Service P_Key in the response */ + p_recvd_service_rec->service_pkey = IB_DEFAULT_PKEY; + } + + p_sr_item->service_rec = *p_recvd_service_rec; + cl_qlist_init(&sr_list); + + cl_qlist_insert_tail( &sr_list, (cl_list_item_t*)&p_sr_item->pool_item ); + + __osm_sr_rcv_respond( p_rcv, p_madw, &sr_list ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return; +} + +/********************************************************************** + **********************************************************************/ +static void +osm_sr_rcv_process_delete_method( + IN osm_sr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + ib_sa_mad_t * p_sa_mad; + ib_service_record_t* p_recvd_service_rec; + ib_net64_t comp_mask; + osm_svcr_t* p_svcr; + osm_sr_item_t* p_sr_item; + cl_qlist_t sr_list; + + OSM_LOG_ENTER( p_rcv->p_log, osm_sr_rcv_process_delete_method ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_recvd_service_rec = + (ib_service_record_t*)ib_sa_mad_get_payload_ptr( p_sa_mad ); + + comp_mask = p_sa_mad->comp_mask; + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_dump_service_record( p_rcv->p_log, + p_recvd_service_rec, + OSM_LOG_DEBUG ); + } + + /* Grab the lock */ + cl_plock_excl_acquire(p_rcv->p_lock); + + /* If Record exists with matching RID */ + p_svcr = osm_svcr_get_by_rid( + p_rcv->p_subn, + p_rcv->p_log, + p_recvd_service_rec ); + + if(p_svcr == NULL) + { + cl_plock_release(p_rcv->p_lock); + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_sr_rcv_process_delete_method: " + "No records matched the RID\n"); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + else + { + osm_svcr_remove_from_db(p_rcv->p_subn, + p_rcv->p_log, + p_svcr ); + } + + cl_plock_release(p_rcv->p_lock); + + p_sr_item = (osm_sr_item_t*)cl_qlock_pool_get( &p_rcv->sr_pool ); + if( p_sr_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_sr_rcv_process_delete_method: ERR 2413: " + "Unable to acquire Service record\n"); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + /* provide back the copy of the record */ + p_sr_item->service_rec = p_svcr->service_record; + cl_qlist_init(&sr_list); + + cl_qlist_insert_tail( &sr_list, (cl_list_item_t*)&p_sr_item->pool_item ); + + if(p_svcr) + osm_svcr_destroy(p_svcr); + + __osm_sr_rcv_respond( p_rcv, p_madw, &sr_list ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return; +} + +/********************************************************************** + **********************************************************************/ +void +osm_sr_rcv_process( + IN osm_sr_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + ib_sa_mad_t * p_sa_mad; + ib_net16_t sa_status = IB_SA_MAD_STATUS_REQ_INVALID; + boolean_t valid; + + OSM_LOG_ENTER( p_rcv->p_log, osm_sr_rcv_process ); + + CL_ASSERT( p_madw ); + + p_sa_mad = osm_madw_get_sa_mad_ptr( p_madw ); + + CL_ASSERT( p_sa_mad->attr_id == IB_MAD_ATTR_SERVICE_RECORD ); + + switch (p_sa_mad->method) + { + case IB_MAD_METHOD_SET: + valid = __validate_sr(p_rcv, p_madw); + if(!valid) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_sr_rcv_process: " + "Component Mask check failed for set request\n" ); + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + osm_sr_rcv_process_set_method(p_rcv, p_madw); + break; + case IB_MAD_METHOD_DELETE: + valid = __validate_sr(p_rcv, p_madw); + if(!valid) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_sr_rcv_process: " + "Component Mask check failed for delete request\n" ); + osm_sa_send_error( p_rcv->p_resp, p_madw, sa_status ); + goto Exit; + } + osm_sr_rcv_process_delete_method(p_rcv, p_madw); + break; + case IB_MAD_METHOD_GET: + case IB_MAD_METHOD_GETTABLE: + osm_sr_rcv_process_get_method(p_rcv, p_madw); + break; + default: + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_sr_rcv_process: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_sa_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + break; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return; +} + +/********************************************************************** + **********************************************************************/ +void +osm_sr_rcv_lease_cb( + IN void* context ) +{ + osm_sr_rcv_t* p_rcv = (osm_sr_rcv_t*)context; + cl_list_item_t* p_list_item; + cl_list_item_t* p_next_list_item; + osm_svcr_t* p_svcr; + uint32_t curr_time; + uint32_t elapsed_time; + uint32_t trim_time = 20; /* maxiaml timer refresh is 20 seconds */ + + OSM_LOG_ENTER( p_rcv->p_log, osm_sr_rcv_lease_cb ); + + cl_plock_excl_acquire(p_rcv->p_lock); + + p_list_item = cl_qlist_head(&p_rcv->p_subn->sa_sr_list); + + while( p_list_item != cl_qlist_end(&p_rcv->p_subn->sa_sr_list) ) + { + p_svcr = (osm_svcr_t*)p_list_item; + + if(p_svcr->service_record.service_lease == 0xFFFFFFFF) + { + p_list_item = cl_qlist_next(p_list_item); + continue; + } + + /* current time in seconds */ + curr_time = cl_get_time_stamp_sec(); + /* elapsed time from last modify */ + elapsed_time = curr_time - p_svcr->modified_time; + /* but it can not be less then 1 */ + if (elapsed_time < 1) elapsed_time = 1; + + if(elapsed_time < p_svcr->lease_period) + { + /* + Just update the service lease period + note: for simplicity we work with a uint32_t field + external to the network order lease_period of the MAD + */ + p_svcr->lease_period -= elapsed_time; + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_sr_rcv_lease_cb: " + "Remaining time for Service Name:%s is:0x%X\n", + p_svcr->service_record.service_name, + p_svcr->lease_period ); + + p_svcr->modified_time = curr_time; + + /* Update the trim timer */ + if(trim_time > p_svcr->lease_period) + { + trim_time = p_svcr->lease_period; + if (trim_time < 1) trim_time = 1; + } + + p_list_item = cl_qlist_next(p_list_item); + continue; + + } + else + { + p_next_list_item = cl_qlist_next(p_list_item); + + /* Remove the service Record */ + osm_svcr_remove_from_db(p_rcv->p_subn, + p_rcv->p_log, + p_svcr); + + osm_svcr_destroy(p_svcr); + + p_list_item = p_next_list_item; + continue; + } + } + + /* Release the Lock */ + cl_plock_release(p_rcv->p_lock); + + if(trim_time != 0xFFFFFFFF) + { + cl_timer_trim(&p_rcv->sr_timer, + trim_time * 1000); /* Convert to milli seconds */ + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_service_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_service_record_ctrl.c new file mode 100644 index 00000000..003a1f45 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_service_record_ctrl.c @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_sr_rcv_ctrl_t. + * This object represents the ServiceRecord request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +static void +__osm_sr_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_sr_rcv_process( ((osm_sr_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sr_rcv_ctrl_construct( + IN osm_sr_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_sr_rcv_ctrl_destroy( + IN osm_sr_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sr_rcv_ctrl_init( + IN osm_sr_rcv_ctrl_t* const p_ctrl, + IN osm_sr_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sr_rcv_ctrl_init ); + + osm_sr_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_SERVICE_RECORD, + __osm_sr_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_sr_rcv_ctrl_init: ERR 2501: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_slvl_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_slvl_record.c new file mode 100644 index 00000000..4895b6be --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_slvl_record.c @@ -0,0 +1,557 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_slvl_rec_rcv_t. + * This object represents the SLtoVL Mapping Query Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_SLVL_REC_RCV_POOL_MIN_SIZE 32 +#define OSM_SLVL_REC_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_slvl_item +{ + cl_pool_item_t pool_item; + ib_slvl_table_record_t rec; +} osm_slvl_item_t; + +typedef struct _osm_slvl_search_ctxt +{ + const ib_slvl_table_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + uint8_t in_port_num; + cl_qlist_t* p_list; + osm_slvl_rec_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; +} osm_slvl_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_slvl_rec_rcv_construct( + IN osm_slvl_rec_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_slvl_rec_rcv_destroy( + IN osm_slvl_rec_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_slvl_rec_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_slvl_rec_rcv_init( + IN osm_slvl_rec_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_slvl_rec_rcv_init ); + + osm_slvl_rec_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + /* used for matching records collection */ + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_SLVL_REC_RCV_POOL_MIN_SIZE, + 0, + OSM_SLVL_REC_RCV_POOL_GROW_SIZE, + sizeof(osm_slvl_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_slvl_create( + IN osm_slvl_rec_rcv_t* const p_rcv, + IN const osm_physp_t* const p_physp, + IN osm_slvl_search_ctxt_t* const p_ctxt, + IN uint8_t in_port_idx ) +{ + osm_slvl_item_t* p_rec_item; + uint16_t lid; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_slvl_create ); + + p_rec_item = (osm_slvl_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sa_slvl_create: ERR 2602: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if (p_physp->p_node->node_info.node_type != IB_NODE_TYPE_SWITCH) + { + lid = osm_physp_get_port_info_ptr( p_physp )->base_lid; + } + else + { + lid = osm_node_get_base_lid( p_physp->p_node, 0 ); + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_slvl_create: " + "New SLtoVL Map for: OUT port 0x%016" PRIx64 + ", lid 0x%X, port# 0x%X to In Port:%u\n", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + cl_ntoh16( lid ), osm_physp_get_port_num( p_physp ), + in_port_idx + ); + } + + memset( &p_rec_item->rec, 0, sizeof( p_rec_item->rec ) ); + + p_rec_item->rec.lid = lid; + p_rec_item->rec.out_port_num = osm_physp_get_port_num( p_physp ); + p_rec_item->rec.in_port_num = in_port_idx; + p_rec_item->rec.slvl_tbl = *(osm_physp_get_slvl_tbl(p_physp,in_port_idx)); + + cl_qlist_insert_tail( p_ctxt->p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_slvl_by_comp_mask( + IN osm_slvl_rec_rcv_t* const p_rcv, + IN const osm_port_t* const p_port, + osm_slvl_search_ctxt_t* const p_ctxt ) +{ + const ib_slvl_table_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + const osm_physp_t* p_out_physp, *p_in_physp; + uint8_t in_port_num, out_port_num; + uint8_t num_ports; + uint8_t in_port_start, in_port_end; + uint8_t out_port_start, out_port_end; + const osm_physp_t* p_req_physp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_slvl_by_comp_mask ); + + p_rcvd_rec = p_ctxt->p_rcvd_rec; + comp_mask = p_ctxt->comp_mask; + num_ports = osm_port_get_num_physp( p_port ); + in_port_start = 0; + in_port_end = num_ports; + out_port_start = 0; + out_port_end = num_ports; + p_req_physp = p_ctxt->p_req_physp; + + if ( p_port->p_node->node_info.node_type != IB_NODE_TYPE_SWITCH ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_slvl_by_comp_mask: " + "Using Physical Default Port Number: 0x%X (for End Node)\n", + p_port->default_port_num ); + p_out_physp = osm_port_get_phys_ptr( p_port, p_port->default_port_num ); + /* check that the p_out_physp and the p_req_physp share a pkey */ + if (osm_physp_share_pkey( p_rcv->p_log, p_req_physp, p_out_physp )) + __osm_sa_slvl_create( p_rcv, p_out_physp, p_ctxt, 0 ); + } + else + { + if ( comp_mask & IB_SLVL_COMPMASK_OUT_PORT ) + { + out_port_start = out_port_end = p_rcvd_rec->out_port_num; + } + if ( comp_mask & IB_SLVL_COMPMASK_IN_PORT ) + { + in_port_start = in_port_end = p_rcvd_rec->in_port_num; + } + + for( out_port_num = out_port_start; out_port_num <= out_port_end; out_port_num++ ) { + p_out_physp = osm_port_get_phys_ptr( p_port, out_port_num ); + if( p_out_physp == NULL ) + continue; + + if( !osm_physp_is_valid( p_out_physp ) ) + continue; + + for( in_port_num = in_port_start; in_port_num <= in_port_end; in_port_num++ ) { +#if 0 + if (out_port_num && out_port_num == in_port_num) + continue; +#endif + + p_in_physp = osm_port_get_phys_ptr( p_port, in_port_num ); + if( p_in_physp == NULL ) + continue; + + if( !osm_physp_is_valid( p_in_physp ) ) + continue; + + /* if the requester and the p_out_physp don't share a pkey - + continue */ + if (!osm_physp_share_pkey(p_rcv->p_log, p_req_physp, p_out_physp ) ) + continue; + + __osm_sa_slvl_create(p_rcv, p_out_physp, p_ctxt, in_port_num); + } + } + } + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_slvl_by_comp_mask_cb( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_port_t* const p_port = (osm_port_t*)p_map_item; + osm_slvl_search_ctxt_t* const p_ctxt = (osm_slvl_search_ctxt_t *)context; + + __osm_sa_slvl_by_comp_mask( p_ctxt->p_rcv, p_port, p_ctxt ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_slvl_rec_rcv_process( + IN osm_slvl_rec_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_slvl_table_record_t* p_rcvd_rec; + const cl_ptr_vector_t* p_tbl; + const osm_port_t* p_port = NULL; + const ib_slvl_table_t* p_slvl_tbl; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + ib_slvl_table_record_t* p_resp_rec; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_slvl_search_ctxt_t context; + osm_slvl_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + ib_net64_t comp_mask; + osm_physp_t* p_req_physp; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_slvl_rec_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_slvl_table_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + comp_mask = p_rcvd_mad->comp_mask; + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_SLVL_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_slvl_rec_rcv_process: ERR 2604: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_slvl_rec_rcv_process: ERR 2603: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + p_slvl_tbl = (ib_slvl_table_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.in_port_num = p_rcvd_rec->in_port_num; + context.p_req_physp = p_req_physp; + + cl_plock_acquire( p_rcv->p_lock ); + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_slvl_rec_rcv_process: " + "Got Query Lid:0x%04X(%02X), In-Port:0x%02X(%02X), Out-Port:0x%02X(%02X)\n", + cl_ntoh16(p_rcvd_rec->lid), (comp_mask & IB_SLVL_COMPMASK_LID) != 0, + p_rcvd_rec->in_port_num, (comp_mask & IB_SLVL_COMPMASK_IN_PORT) != 0, + p_rcvd_rec->out_port_num, (comp_mask & IB_SLVL_COMPMASK_OUT_PORT) != 0 ); + + /* + If the user specified a LID, it obviously narrows our + work load, since we don't have to search every port + */ + if( comp_mask & IB_SLVL_COMPMASK_LID ) + { + + p_tbl = &p_rcv->p_subn->port_lid_tbl; + + CL_ASSERT( cl_ptr_vector_get_size(p_tbl) < 0x10000 ); + + status = osm_get_port_by_base_lid( p_rcv->p_subn, p_rcvd_rec->lid, &p_port ); + if ( ( status != IB_SUCCESS ) || ( p_port == NULL ) ) + { + status = IB_NOT_FOUND; + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_slvl_rec_rcv_process: ERR 2608: " + "No port found with LID 0x%x\n", + cl_ntoh16(p_rcvd_rec->lid) ); + } + } + + if ( status == IB_SUCCESS ) + { + /* if we have a unique port - no need for a port search */ + if( p_port ) + /* this does the loop on all the port phys ports */ + __osm_sa_slvl_by_comp_mask( p_rcv, p_port, &context ); + else + { + cl_qmap_apply_func( &p_rcv->p_subn->port_guid_tbl, + __osm_sa_slvl_by_comp_mask_cb, + &context ); + } + } + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_slvl_rec_rcv_process: ERR 2607: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ); + + /* need to set the mem free ... */ + p_rec_item = (osm_slvl_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_slvl_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_slvl_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_slvl_table_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_slvl_rec_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_slvl_rec_rcv_process: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_slvl_table_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_slvl_rec_rcv_process: ERR 2605: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_slvl_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_slvl_table_record_t) ); + + p_resp_rec = (ib_slvl_table_record_t*) + ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_slvl_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE); + if(status != IB_SUCCESS) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_slvl_rec_rcv_process: ERR 2606: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_slvl_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_slvl_record_ctrl.c new file mode 100644 index 00000000..5faee333 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_slvl_record_ctrl.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_slvl_rec_rcv_ctrl_t. + * This object represents the SLtoVL Map Record SA request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_slvl_rec_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_slvl_rec_rcv_process( ((osm_slvl_rec_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_slvl_rec_rcv_ctrl_construct( + IN osm_slvl_rec_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_slvl_rec_rcv_ctrl_destroy( + IN osm_slvl_rec_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_slvl_rec_rcv_ctrl_init( + IN osm_slvl_rec_rcv_ctrl_t* const p_ctrl, + IN osm_slvl_rec_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_slvl_rec_rcv_ctrl_init ); + + osm_slvl_rec_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_SLVL_TBL_RECORD, + __osm_slvl_rec_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_slvl_rec_rcv_ctrl_init: ERR 2701: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sminfo_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sminfo_record.c new file mode 100644 index 00000000..258cd2a5 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sminfo_record.c @@ -0,0 +1,583 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_smir_rcv_t. + * This object represents the SMInfo Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_SMIR_RCV_POOL_MIN_SIZE 32 +#define OSM_SMIR_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_smir_item +{ + cl_pool_item_t pool_item; + ib_sminfo_record_t rec; +} osm_smir_item_t; + +typedef struct _osm_smir_search_ctxt +{ + const ib_sminfo_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + cl_qlist_t* p_list; + osm_smir_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; +} osm_smir_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_smir_rcv_construct( + IN osm_smir_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_smir_rcv_destroy( + IN osm_smir_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_smir_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_smir_rcv_init( + IN osm_smir_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_stats_t* const p_stats, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_smir_rcv_init ); + + osm_smir_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_stats = p_stats; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_SMIR_RCV_POOL_MIN_SIZE, + 0, + OSM_SMIR_RCV_POOL_GROW_SIZE, + sizeof(osm_smir_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +static ib_api_status_t +__osm_smir_rcv_new_smir( + IN osm_smir_rcv_t* const p_rcv, + IN const osm_port_t* const p_port, + IN cl_qlist_t* const p_list, + IN ib_net64_t const guid, + IN ib_net32_t const act_count, + IN uint8_t const pri_state, + IN const osm_physp_t* const p_req_physp ) +{ + osm_smir_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_smir_rcv_new_smir ); + + p_rec_item = (osm_smir_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_smir_rcv_new_smir: ERR 2801: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_smir_rcv_new_smir: " + "New SMInfo: GUID 0x%016" PRIx64 "\n", + cl_ntoh64( guid ) + ); + } + + memset( &p_rec_item->rec, 0, sizeof(ib_sminfo_record_t) ); + + p_rec_item->rec.lid = osm_port_get_base_lid( p_port ); + p_rec_item->rec.sm_info.guid = guid; + p_rec_item->rec.sm_info.act_count = act_count; + p_rec_item->rec.sm_info.pri_state = pri_state; + + cl_qlist_insert_tail( p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sa_smir_by_comp_mask( + IN osm_smir_rcv_t* const p_rcv, + IN const osm_remote_sm_t* const p_rem_sm, + osm_smir_search_ctxt_t* const p_ctxt ) +{ + const ib_sminfo_record_t* const p_rcvd_rec = p_ctxt->p_rcvd_rec; + const osm_physp_t* const p_req_physp = p_ctxt->p_req_physp; + ib_net64_t const comp_mask = p_ctxt->comp_mask; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_smir_by_comp_mask ); + + if ( comp_mask & IB_SMIR_COMPMASK_GUID ) + { + if ( p_rem_sm->smi.guid != p_rcvd_rec->sm_info.guid ) + goto Exit; + } + + if ( comp_mask & IB_SMIR_COMPMASK_PRIORITY ) + { + if ( ib_sminfo_get_priority( &p_rem_sm->smi ) != + ib_sminfo_get_priority( &p_rcvd_rec->sm_info ) ) + goto Exit; + } + + if ( comp_mask & IB_SMIR_COMPMASK_SMSTATE ) + { + if ( ib_sminfo_get_state( &p_rem_sm->smi ) != + ib_sminfo_get_state( &p_rcvd_rec->sm_info ) ) + goto Exit; + } + + /* Implement any other needed search cases */ + + __osm_smir_rcv_new_smir( p_rcv, p_rem_sm->p_port, p_ctxt->p_list, + p_rem_sm->smi.guid, + p_rem_sm->smi.act_count, + p_rem_sm->smi.pri_state, + p_req_physp ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sa_smir_by_comp_mask_cb( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_remote_sm_t* const p_rem_sm = (osm_remote_sm_t*)p_map_item; + osm_smir_search_ctxt_t* const p_ctxt = (osm_smir_search_ctxt_t *)context; + + __osm_sa_smir_by_comp_mask( p_ctxt->p_rcv, p_rem_sm, p_ctxt ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_smir_rcv_process( + IN osm_smir_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_sminfo_record_t* p_rcvd_rec; + const cl_qmap_t* p_tbl; + const osm_port_t* p_port = NULL; + const ib_sm_info_t* p_smi; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + ib_sminfo_record_t* p_resp_rec; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_smir_search_ctxt_t context; + osm_smir_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + ib_net64_t comp_mask; + ib_net64_t port_guid; + osm_physp_t* p_req_physp; + osm_port_t* local_port; + osm_remote_sm_t* p_rem_sm; + cl_qmap_t* p_sm_guid_tbl; + uint8_t pri_state; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_smir_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_sminfo_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + comp_mask = p_rcvd_mad->comp_mask; + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_SMINFO_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_smir_rcv_process: ERR 2804: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_smir_rcv_process: ERR 2803: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_sm_info_record( p_rcv->p_log, p_rcvd_rec, OSM_LOG_DEBUG ); + + p_tbl = &p_rcv->p_subn->sm_guid_tbl; + p_smi = &p_rcvd_rec->sm_info; + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.p_req_physp = p_req_physp; + + cl_plock_acquire( p_rcv->p_lock ); + + /* + If the user specified a LID, it obviously narrows our + work load, since we don't have to search every port + */ + if( comp_mask & IB_SMIR_COMPMASK_LID ) + { + status = osm_get_port_by_base_lid( p_rcv->p_subn, p_rcvd_rec->lid, &p_port ); + if ( ( status != IB_SUCCESS ) || ( p_port == NULL ) ) + { + status = IB_NOT_FOUND; + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_smir_rcv_process: ERR 2806: " + "No port found with LID 0x%x\n", + cl_ntoh16(p_rcvd_rec->lid) ); + } + } + + if ( status == IB_SUCCESS ) + { + /* Handle our own SM first */ + local_port = osm_get_port_by_guid( p_rcv->p_subn, p_rcv->p_subn->sm_port_guid ); + if ( !local_port ) + { + cl_plock_release( p_rcv->p_lock ); + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_smir_rcv_process: ERR 2809: " + "No port found with GUID 0x%016" PRIx64 "\n", + cl_ntoh64(p_rcv->p_subn->sm_port_guid ) ); + goto Exit; + } + + if ( !p_port || local_port == p_port ) + { + if (FALSE == + osm_physp_share_pkey( p_rcv->p_log, p_req_physp, + osm_port_get_default_phys_ptr( local_port ) ) ) + { + cl_plock_release( p_rcv->p_lock ); + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_smir_rcv_process: ERR 2805: " + "Cannot get SMInfo record due to pkey violation\n" ); + goto Exit; + } + + /* Check that other search components specified match */ + if ( comp_mask & IB_SMIR_COMPMASK_GUID ) + { + if ( p_rcv->p_subn->sm_port_guid != p_smi->guid ) + goto Remotes; + } + if ( comp_mask & IB_SMIR_COMPMASK_PRIORITY ) + { + if ( p_rcv->p_subn->opt.sm_priority != ib_sminfo_get_priority( p_smi ) ) + goto Remotes; + } + if ( comp_mask & IB_SMIR_COMPMASK_SMSTATE ) + { + if ( p_rcv->p_subn->sm_state != ib_sminfo_get_state( p_smi ) ) + goto Remotes; + } + + /* Now, add local SMInfo to list */ + pri_state = p_rcv->p_subn->sm_state & 0x0F; + pri_state |= (p_rcv->p_subn->opt.sm_priority & 0x0F) << 4; + __osm_smir_rcv_new_smir( p_rcv, local_port, context.p_list, + p_rcv->p_subn->sm_port_guid, + cl_ntoh32( p_rcv->p_stats->qp0_mads_sent ), + pri_state, + p_req_physp ); + } + + Remotes: + if( p_port && p_port != local_port ) + { + /* Find remote SM corresponding to p_port */ + port_guid = osm_port_get_guid( p_port ); + p_sm_guid_tbl = &p_rcv->p_subn->sm_guid_tbl; + p_rem_sm = (osm_remote_sm_t*)cl_qmap_get( p_sm_guid_tbl, port_guid ); + if (p_rem_sm != (osm_remote_sm_t*)cl_qmap_end( p_sm_guid_tbl ) ) + __osm_sa_smir_by_comp_mask( p_rcv, p_rem_sm, &context ); + else + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_smir_rcv_process: ERR 280A: " + "No remote SM for GUID 0x%016" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + } + } + else + { + /* Go over all other known (remote) SMs */ + cl_qmap_apply_func( &p_rcv->p_subn->sm_guid_tbl, + __osm_sa_smir_by_comp_mask_cb, + &context ); + } + } + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_smir_rcv_process: ERR 2808: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS); + + /* need to set the mem free ... */ + p_rec_item = (osm_smir_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_smir_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_smir_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_sminfo_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_smir_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_smir_rcv_process: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_sminfo_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_smir_rcv_process: ERR 2807: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_smir_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_NO_RESOURCES ); + + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_sminfo_record_t) ); + + p_resp_rec = (ib_sminfo_record_t*) + ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_smir_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + p_resp_rec->sm_info.sm_key = 0; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if( status != IB_SUCCESS ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_smir_rcv_process: ERR 2802: " + "Error sending MAD (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sminfo_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sminfo_record_ctrl.c new file mode 100644 index 00000000..de6a7c74 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sminfo_record_ctrl.c @@ -0,0 +1,125 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_smir_ctrl_t. + * This object represents the SMInfo request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_smir_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_smir_rcv_process( ((osm_smir_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_smir_ctrl_construct( + IN osm_smir_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_smir_ctrl_destroy( + IN osm_smir_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_smir_ctrl_init( + IN osm_smir_ctrl_t* const p_ctrl, + IN osm_smir_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_smir_ctrl_init ); + + osm_smir_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_SMINFO_RECORD, + __osm_smir_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_smir_ctrl_init: ERR 2901: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sw_info_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sw_info_record.c new file mode 100644 index 00000000..8c893ad3 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sw_info_record.c @@ -0,0 +1,535 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_sir_rcv_t. + * This object represents the SwitchInfo Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_SIR_RCV_POOL_MIN_SIZE 32 +#define OSM_SIR_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_sir_item +{ + cl_pool_item_t pool_item; + ib_switch_info_record_t rec; +} osm_sir_item_t; + +typedef struct _osm_sir_search_ctxt +{ + const ib_switch_info_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + cl_qlist_t* p_list; + osm_sir_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; +} osm_sir_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_sir_rcv_construct( + IN osm_sir_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sir_rcv_destroy( + IN osm_sir_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_sir_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sir_rcv_init( + IN osm_sir_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_sir_rcv_init ); + + osm_sir_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_SIR_RCV_POOL_MIN_SIZE, + 0, + OSM_SIR_RCV_POOL_GROW_SIZE, + sizeof(osm_sir_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_sir_rcv_new_sir( + IN osm_sir_rcv_t* const p_rcv, + IN const osm_switch_t* const p_sw, + IN cl_qlist_t* const p_list, + IN ib_net16_t const lid ) +{ + osm_sir_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sir_rcv_new_sir ); + + p_rec_item = (osm_sir_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sir_rcv_new_sir: ERR 5308: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sir_rcv_new_sir: " + "New SwitchInfoRecord: lid 0x%X\n", + cl_ntoh16( lid ) + ); + } + + memset( &p_rec_item->rec, 0, sizeof(ib_switch_info_record_t) ); + + p_rec_item->rec.lid = lid; + p_rec_item->rec.switch_info = p_sw->switch_info; + + cl_qlist_insert_tail( p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +static osm_port_t* +__osm_sir_get_port_by_guid( + IN osm_sir_rcv_t* const p_rcv, + IN uint64_t port_guid ) +{ + osm_port_t* p_port; + + CL_PLOCK_ACQUIRE(p_rcv->p_lock); + + p_port = (osm_port_t *)cl_qmap_get(&p_rcv->p_subn->port_guid_tbl, + port_guid); + if (p_port == (osm_port_t *)cl_qmap_end(&p_rcv->p_subn->port_guid_tbl)) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sir_get_port_by_guid ERR 5309: " + "Invalid port GUID 0x%016" PRIx64 "\n", + port_guid ); + p_port = NULL; + } + + CL_PLOCK_RELEASE(p_rcv->p_lock); + return p_port; +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sir_rcv_create_sir( + IN osm_sir_rcv_t* const p_rcv, + IN const osm_switch_t* const p_sw, + IN cl_qlist_t* const p_list, + IN ib_net16_t const match_lid, + IN const osm_physp_t* const p_req_physp ) +{ + osm_port_t* p_port; + const osm_physp_t* p_physp; + uint16_t match_lid_ho; + ib_net16_t min_lid_ho; + ib_net16_t max_lid_ho; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sir_rcv_create_sir ); + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sir_rcv_create_sir: " + "Looking for SwitchInfoRecord with LID: 0x%X\n", + cl_ntoh16( match_lid ) + ); + } + + /* In switches, the port guid is the node guid. */ + p_port = + __osm_sir_get_port_by_guid( p_rcv, p_sw->p_node->node_info.port_guid ); + if (! p_port) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sir_rcv_create_sir: ERR 530A: " + "Failed to find Port by Node Guid:0x%016" PRIx64 + "\n", + cl_ntoh64( p_sw->p_node->node_info.node_guid ) + ); + goto Exit; + } + + /* check that the requester physp and the current physp are under + the same partition. */ + p_physp = osm_port_get_default_phys_ptr( p_port ); + if (! p_physp) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sir_rcv_create_sir: ERR 530B: " + "Failed to find default physical Port by Node Guid:0x%016" PRIx64 + "\n", + cl_ntoh64( p_sw->p_node->node_info.node_guid ) + ); + goto Exit; + } + if (! osm_physp_share_pkey( p_rcv->p_log, p_req_physp, p_physp )) + goto Exit; + + /* get the port 0 of the switch */ + osm_port_get_lid_range_ho( p_port, &min_lid_ho, &max_lid_ho ); + + match_lid_ho = cl_ntoh16( match_lid ); + if( match_lid_ho ) + { + /* + We validate that the lid belongs to this switch. + */ + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sir_rcv_create_sir: " + "Comparing LID: 0x%X <= 0x%X <= 0x%X\n", + min_lid_ho, match_lid_ho, max_lid_ho + ); + } + + if ( match_lid_ho < min_lid_ho || match_lid_ho > max_lid_ho ) + goto Exit; + + } + + __osm_sir_rcv_new_sir( p_rcv, p_sw, p_list, osm_port_get_base_lid(p_port) ); + +Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sir_rcv_by_comp_mask( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_sir_search_ctxt_t* const p_ctxt = (osm_sir_search_ctxt_t *)context; + const osm_switch_t* const p_sw = (osm_switch_t*)p_map_item; + const ib_switch_info_record_t* const p_rcvd_rec = p_ctxt->p_rcvd_rec; + const osm_physp_t* const p_req_physp = p_ctxt->p_req_physp; + osm_sir_rcv_t* const p_rcv = p_ctxt->p_rcv; + ib_net64_t const comp_mask = p_ctxt->comp_mask; + ib_net16_t match_lid = 0; + + OSM_LOG_ENTER( p_ctxt->p_rcv->p_log, __osm_sir_rcv_by_comp_mask ); + + osm_dump_switch_info( + p_ctxt->p_rcv->p_log, + &p_sw->switch_info, + OSM_LOG_VERBOSE ); + + if( comp_mask & IB_SWIR_COMPMASK_LID ) + { + match_lid = p_rcvd_rec->lid; + if (!match_lid) + goto Exit; + } + + __osm_sir_rcv_create_sir( p_rcv, p_sw, p_ctxt->p_list, + match_lid, p_req_physp ); + +Exit: + OSM_LOG_EXIT( p_ctxt->p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sir_rcv_process( + IN osm_sir_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_switch_info_record_t* p_rcvd_rec; + ib_switch_info_record_t* p_resp_rec; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_sir_search_ctxt_t context; + osm_sir_item_t* p_rec_item; + ib_api_status_t status; + osm_physp_t* p_req_physp; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_sir_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_switch_info_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_SWITCH_INFO_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_sir_rcv_process: ERR 5305: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_sir_rcv_process: ERR 5304: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + if ( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + osm_dump_switch_info_record( p_rcv->p_log, p_rcvd_rec, OSM_LOG_DEBUG ); + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.p_req_physp = p_req_physp; + + cl_plock_acquire( p_rcv->p_lock ); + + /* Go over all switches */ + cl_qmap_apply_func( &p_rcv->p_subn->sw_guid_tbl, + __osm_sir_rcv_by_comp_mask, + &context ); + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if ( (p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec > 1) ) { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_sir_rcv_process: ERR 5303: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ); + + /* need to set the mem free ... */ + p_rec_item = (osm_sir_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_sir_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_sir_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + /* we limit the number of records to a single packet */ + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_switch_info_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_sir_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_sir_rcv_process: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_switch_info_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_sir_rcv_process: ERR 5306: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_sir_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_switch_info_record_t) ); + + p_resp_rec = (ib_switch_info_record_t*)ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_sir_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if (status != IB_SUCCESS) + { + osm_log(p_rcv->p_log, OSM_LOG_ERROR, + "osm_sir_rcv_process: ERR 5307: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status)); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sw_info_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sw_info_record_ctrl.c new file mode 100644 index 00000000..726cd40a --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_sw_info_record_ctrl.c @@ -0,0 +1,123 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_sir_rcv_ctrl_t. + * This object represents the SwitchInfo Record controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_sir_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_sir_rcv_process( ((osm_sir_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sir_rcv_ctrl_construct( + IN osm_sir_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_sir_rcv_ctrl_destroy( + IN osm_sir_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sir_rcv_ctrl_init( + IN osm_sir_rcv_ctrl_t* const p_ctrl, + IN osm_sir_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sir_rcv_ctrl_init ); + + osm_sir_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_SWITCH_INFO_RECORD, + __osm_sir_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_sir_rcv_ctrl_init: ERR 5301: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_vlarb_record.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_vlarb_record.c new file mode 100644 index 00000000..1bad3db6 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_vlarb_record.c @@ -0,0 +1,577 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_vlarb_rec_rcv_t. + * This object represents the VLArbitrationRecord Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_VLARB_REC_RCV_POOL_MIN_SIZE 32 +#define OSM_VLARB_REC_RCV_POOL_GROW_SIZE 32 + +typedef struct _osm_vl_arb_item +{ + cl_pool_item_t pool_item; + ib_vl_arb_table_record_t rec; +} osm_vl_arb_item_t; + +typedef struct _osm_vl_arb_search_ctxt +{ + const ib_vl_arb_table_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + uint8_t block_num; + cl_qlist_t* p_list; + osm_vlarb_rec_rcv_t* p_rcv; + const osm_physp_t* p_req_physp; +} osm_vl_arb_search_ctxt_t; + +/********************************************************************** + **********************************************************************/ +void +osm_vlarb_rec_rcv_construct( + IN osm_vlarb_rec_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_qlock_pool_construct( &p_rcv->pool ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vlarb_rec_rcv_destroy( + IN osm_vlarb_rec_rcv_t* const p_rcv ) +{ + OSM_LOG_ENTER( p_rcv->p_log, osm_vlarb_rec_rcv_destroy ); + cl_qlock_pool_destroy( &p_rcv->pool ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_vlarb_rec_rcv_init( + IN osm_vlarb_rec_rcv_t* const p_rcv, + IN osm_sa_resp_t* const p_resp, + IN osm_mad_pool_t* const p_mad_pool, + IN const osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osm_vlarb_rec_rcv_init ); + + osm_vlarb_rec_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_resp = p_resp; + p_rcv->p_mad_pool = p_mad_pool; + + /* used for matching records collection */ + status = cl_qlock_pool_init( &p_rcv->pool, + OSM_VLARB_REC_RCV_POOL_MIN_SIZE, + 0, + OSM_VLARB_REC_RCV_POOL_GROW_SIZE, + sizeof(osm_vl_arb_item_t), + NULL, NULL, NULL ); + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_vl_arb_create( + IN osm_vlarb_rec_rcv_t* const p_rcv, + IN osm_physp_t* const p_physp, + IN osm_vl_arb_search_ctxt_t* const p_ctxt, + IN uint8_t block ) +{ + osm_vl_arb_item_t* p_rec_item; + uint16_t lid; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_vl_arb_create ); + + p_rec_item = (osm_vl_arb_item_t*)cl_qlock_pool_get( &p_rcv->pool ); + if( p_rec_item == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sa_vl_arb_create: ERR 2A02: " + "cl_qlock_pool_get failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + if (p_physp->p_node->node_info.node_type != IB_NODE_TYPE_SWITCH) + { + lid = osm_physp_get_port_info_ptr( p_physp )->base_lid; + } + else + { + lid = osm_node_get_base_lid( p_physp->p_node, 0 ); + } + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_vl_arb_create: " + "New VLArbitration for: port 0x%016" PRIx64 + ", lid 0x%X, port# 0x%X Block:%u\n", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ), + cl_ntoh16( lid ), osm_physp_get_port_num( p_physp ), + block + ); + } + + memset( &p_rec_item->rec, 0, sizeof( p_rec_item->rec ) ); + + p_rec_item->rec.lid = lid; + p_rec_item->rec.port_num = osm_physp_get_port_num( p_physp ); + p_rec_item->rec.block_num = block; + p_rec_item->rec.vl_arb_tbl = *(osm_physp_get_vla_tbl(p_physp, block)); + + cl_qlist_insert_tail( p_ctxt->p_list, (cl_list_item_t*)&p_rec_item->pool_item ); + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_vl_arb_check_physp( + IN osm_vlarb_rec_rcv_t* const p_rcv, + IN osm_physp_t* const p_physp, + osm_vl_arb_search_ctxt_t* const p_ctxt ) +{ + ib_net64_t comp_mask = p_ctxt->comp_mask; + uint8_t block; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_vl_arb_check_physp ); + + /* we got here with the phys port - all that's left is to get the right block */ + for (block = 1; block <= 4; block++) { + if (!(comp_mask & IB_VLA_COMPMASK_BLOCK) || block == p_ctxt->block_num) + { + __osm_sa_vl_arb_create( p_rcv, p_physp, p_ctxt, block ); + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_vl_arb_by_comp_mask( + IN osm_vlarb_rec_rcv_t* const p_rcv, + IN const osm_port_t* const p_port, + osm_vl_arb_search_ctxt_t* const p_ctxt ) +{ + const ib_vl_arb_table_record_t* p_rcvd_rec; + ib_net64_t comp_mask; + osm_physp_t * p_physp; + uint8_t port_num; + uint8_t num_ports; + const osm_physp_t* p_req_physp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sa_vl_arb_by_comp_mask ); + + p_rcvd_rec = p_ctxt->p_rcvd_rec; + comp_mask = p_ctxt->comp_mask; + port_num = p_rcvd_rec->port_num; + p_req_physp = p_ctxt->p_req_physp; + + /* if this is a switch port we can search all ports + otherwise we must be looking on port 0 */ + if ( p_port->p_node->node_info.node_type != IB_NODE_TYPE_SWITCH) + { + /* we put it in the comp mask and port num */ + port_num = p_port->default_port_num; + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sa_vl_arb_by_comp_mask: " + "Using Physical Default Port Number: 0x%X (for End Node)\n", + port_num ); + comp_mask |= IB_VLA_COMPMASK_OUT_PORT; + } + + if( comp_mask & IB_VLA_COMPMASK_OUT_PORT ) + { + if (port_num < osm_port_get_num_physp( p_port )) + { + p_physp = osm_port_get_phys_ptr( p_port, port_num ); + /* check that the p_physp is valid, and that the requester + and the p_physp share a pkey. */ + if( p_physp && osm_physp_is_valid( p_physp ) && + osm_physp_share_pkey(p_rcv->p_log, p_req_physp, p_physp) ) + __osm_sa_vl_arb_check_physp( p_rcv, p_physp, p_ctxt ); + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sa_vl_arb_by_comp_mask: ERR 2A03: " + "Given Physical Port Number: 0x%X is out of range should be < 0x%X\n", + port_num, osm_port_get_num_physp( p_port ) ); + goto Exit; + } + } + else + { + num_ports = osm_port_get_num_physp( p_port ); + for( port_num = 0; port_num < num_ports; port_num++ ) + { + p_physp = osm_port_get_phys_ptr( p_port, port_num ); + if( p_physp == NULL ) + continue; + + if( !osm_physp_is_valid( p_physp ) ) + continue; + + /* if the requester and the p_physp don't share a pkey - + continue */ + if (!osm_physp_share_pkey(p_rcv->p_log, p_req_physp, p_physp)) + continue; + + __osm_sa_vl_arb_check_physp( p_rcv, p_physp, p_ctxt ); + } + } + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sa_vl_arb_by_comp_mask_cb( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + const osm_port_t* const p_port = (osm_port_t*)p_map_item; + osm_vl_arb_search_ctxt_t* const p_ctxt = (osm_vl_arb_search_ctxt_t *)context; + + __osm_sa_vl_arb_by_comp_mask( p_ctxt->p_rcv, p_port, p_ctxt ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vlarb_rec_rcv_process( + IN osm_vlarb_rec_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_sa_mad_t* p_rcvd_mad; + const ib_vl_arb_table_record_t* p_rcvd_rec; + const cl_ptr_vector_t* p_tbl; + const osm_port_t* p_port = NULL; + const ib_vl_arb_table_t* p_vl_arb; + cl_qlist_t rec_list; + osm_madw_t* p_resp_madw; + ib_sa_mad_t* p_resp_sa_mad; + ib_vl_arb_table_record_t* p_resp_rec; + uint32_t num_rec, pre_trim_num_rec; +#ifndef VENDOR_RMPP_SUPPORT + uint32_t trim_num_rec; +#endif + uint32_t i; + osm_vl_arb_search_ctxt_t context; + osm_vl_arb_item_t* p_rec_item; + ib_api_status_t status = IB_SUCCESS; + ib_net64_t comp_mask; + osm_physp_t* p_req_physp; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_vlarb_rec_rcv_process ); + + CL_ASSERT( p_madw ); + + p_rcvd_mad = osm_madw_get_sa_mad_ptr( p_madw ); + p_rcvd_rec = (ib_vl_arb_table_record_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + comp_mask = p_rcvd_mad->comp_mask; + + CL_ASSERT( p_rcvd_mad->attr_id == IB_MAD_ATTR_VLARB_RECORD ); + + /* we only support SubnAdmGet and SubnAdmGetTable methods */ + if ( (p_rcvd_mad->method != IB_MAD_METHOD_GET) && + (p_rcvd_mad->method != IB_MAD_METHOD_GETTABLE) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_vlarb_rec_rcv_process: ERR 2A05: " + "Unsupported Method (%s)\n", + ib_get_sa_method_str( p_rcvd_mad->method ) ); + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_MAD_STATUS_UNSUP_METHOD_ATTR ); + goto Exit; + } + + /* update the requester physical port. */ + p_req_physp = osm_get_physp_by_mad_addr(p_rcv->p_log, + p_rcv->p_subn, + osm_madw_get_mad_addr_ptr(p_madw) ); + if (p_req_physp == NULL) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_vlarb_rec_rcv_process: ERR 2A04: " + "Cannot find requester physical port\n" ); + goto Exit; + } + + p_vl_arb = (ib_vl_arb_table_t*)ib_sa_mad_get_payload_ptr( p_rcvd_mad ); + + cl_qlist_init( &rec_list ); + + context.p_rcvd_rec = p_rcvd_rec; + context.p_list = &rec_list; + context.comp_mask = p_rcvd_mad->comp_mask; + context.p_rcv = p_rcv; + context.block_num = p_rcvd_rec->block_num; + context.p_req_physp = p_req_physp; + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_vlarb_rec_rcv_process: " + "Got Query Lid:0x%04X(%02X), Port:0x%02X(%02X), Block:0x%02X(%02X)\n", + cl_ntoh16(p_rcvd_rec->lid), (comp_mask & IB_VLA_COMPMASK_LID) != 0, + p_rcvd_rec->port_num, (comp_mask & IB_VLA_COMPMASK_OUT_PORT) != 0, + p_rcvd_rec->block_num, (comp_mask & IB_VLA_COMPMASK_BLOCK) != 0 ); + + cl_plock_acquire( p_rcv->p_lock ); + + /* + If the user specified a LID, it obviously narrows our + work load, since we don't have to search every port + */ + if( comp_mask & IB_VLA_COMPMASK_LID ) + { + + p_tbl = &p_rcv->p_subn->port_lid_tbl; + + CL_ASSERT( cl_ptr_vector_get_size(p_tbl) < 0x10000 ); + + status = osm_get_port_by_base_lid( p_rcv->p_subn, p_rcvd_rec->lid, &p_port ); + if ( ( status != IB_SUCCESS ) || ( p_port == NULL ) ) + { + status = IB_NOT_FOUND; + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_vlarb_rec_rcv_process: ERR 2A09: " + "No port found with LID 0x%x\n", + cl_ntoh16(p_rcvd_rec->lid) ); + } + } + + if ( status == IB_SUCCESS ) + { + /* if we got a unique port - no need for a port search */ + if( p_port ) + /* this does the loop on all the port phys ports */ + __osm_sa_vl_arb_by_comp_mask( p_rcv, p_port, &context ); + else + { + cl_qmap_apply_func( &p_rcv->p_subn->port_guid_tbl, + __osm_sa_vl_arb_by_comp_mask_cb, + &context ); + } + } + + cl_plock_release( p_rcv->p_lock ); + + num_rec = cl_qlist_count( &rec_list ); + + /* + * C15-0.1.30: + * If we do a SubnAdmGet and got more than one record it is an error ! + */ + if (p_rcvd_mad->method == IB_MAD_METHOD_GET) + { + if (num_rec == 0) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + if (num_rec > 1) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_vlarb_rec_rcv_process: ERR 2A08: " + "Got more than one record for SubnAdmGet (%u)\n", + num_rec ); + osm_sa_send_error( p_rcv->p_resp, p_madw, + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ); + + /* need to set the mem free ... */ + p_rec_item = (osm_vl_arb_item_t*)cl_qlist_remove_head( &rec_list ); + while( p_rec_item != (osm_vl_arb_item_t*)cl_qlist_end( &rec_list ) ) + { + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_rec_item = (osm_vl_arb_item_t*)cl_qlist_remove_head( &rec_list ); + } + + goto Exit; + } + } + + pre_trim_num_rec = num_rec; +#ifndef VENDOR_RMPP_SUPPORT + trim_num_rec = (MAD_BLOCK_SIZE - IB_SA_MAD_HDR_SIZE) / sizeof(ib_vl_arb_table_record_t); + if (trim_num_rec < num_rec) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_vlarb_rec_rcv_process: " + "Number of records:%u trimmed to:%u to fit in one MAD\n", + num_rec, trim_num_rec ); + num_rec = trim_num_rec; + } +#endif + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_vlarb_rec_rcv_process: " + "Returning %u records\n", num_rec ); + + if ((p_rcvd_mad->method == IB_MAD_METHOD_GET) && (num_rec == 0)) + { + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RECORDS ); + goto Exit; + } + + /* + * Get a MAD to reply. Address of Mad is in the received mad_wrapper + */ + p_resp_madw = osm_mad_pool_get( p_rcv->p_mad_pool, + p_madw->h_bind, + num_rec * sizeof(ib_vl_arb_table_record_t) + IB_SA_MAD_HDR_SIZE, + &p_madw->mad_addr ); + + if( !p_resp_madw ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_vlarb_rec_rcv_process: ERR 2A06: " + "osm_mad_pool_get failed\n" ); + + for( i = 0; i < num_rec; i++ ) + { + p_rec_item = (osm_vl_arb_item_t*)cl_qlist_remove_head( &rec_list ); + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + } + + osm_sa_send_error( p_rcv->p_resp, p_madw, IB_SA_MAD_STATUS_NO_RESOURCES ); + goto Exit; + } + + p_resp_sa_mad = osm_madw_get_sa_mad_ptr( p_resp_madw ); + + /* + Copy the MAD header back into the response mad. + Set the 'R' bit and the payload length, + Then copy all records from the list into the response payload. + */ + + memcpy( p_resp_sa_mad, p_rcvd_mad, IB_SA_MAD_HDR_SIZE ); + p_resp_sa_mad->method |= IB_MAD_METHOD_RESP_MASK; + /* C15-0.1.5 - always return SM_Key = 0 (table 185 p 884) */ + p_resp_sa_mad->sm_key = 0; + + /* Fill in the offset (paylen will be done by the rmpp SAR) */ + p_resp_sa_mad->attr_offset = + ib_get_attr_offset( sizeof(ib_vl_arb_table_record_t) ); + + p_resp_rec = (ib_vl_arb_table_record_t*) + ib_sa_mad_get_payload_ptr( p_resp_sa_mad ); + +#ifndef VENDOR_RMPP_SUPPORT + /* we support only one packet RMPP - so we will set the first and + last flags for gettable */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + { + p_resp_sa_mad->rmpp_type = IB_RMPP_TYPE_DATA; + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_FIRST | IB_RMPP_FLAG_LAST | IB_RMPP_FLAG_ACTIVE; + } +#else + /* forcefully define the packet as RMPP one */ + if (p_resp_sa_mad->method == IB_MAD_METHOD_GETTABLE_RESP) + p_resp_sa_mad->rmpp_flags = IB_RMPP_FLAG_ACTIVE; +#endif + + for( i = 0; i < pre_trim_num_rec; i++ ) + { + p_rec_item = (osm_vl_arb_item_t*)cl_qlist_remove_head( &rec_list ); + /* copy only if not trimmed */ + if (i < num_rec) + { + *p_resp_rec = p_rec_item->rec; + } + cl_qlock_pool_put( &p_rcv->pool, &p_rec_item->pool_item ); + p_resp_rec++; + } + + CL_ASSERT( cl_is_qlist_empty( &rec_list ) ); + + status = osm_vendor_send( p_resp_madw->h_bind, p_resp_madw, FALSE ); + if(status != IB_SUCCESS) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_vlarb_rec_rcv_process: ERR 2A07: " + "osm_vendor_send status = %s\n", + ib_get_err_str(status) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sa_vlarb_record_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_vlarb_record_ctrl.c new file mode 100644 index 00000000..94a607a8 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sa_vlarb_record_ctrl.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_vlarb_rec_rcv_ctrl_t. + * This object represents the VL Arbitration Record SA request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_vlarb_rec_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_vlarb_rec_rcv_process( ((osm_vlarb_rec_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vlarb_rec_rcv_ctrl_construct( + IN osm_vlarb_rec_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_vlarb_rec_rcv_ctrl_destroy( + IN osm_vlarb_rec_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_vlarb_rec_rcv_ctrl_init( + IN osm_vlarb_rec_rcv_ctrl_t* const p_ctrl, + IN osm_vlarb_rec_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_vlarb_rec_rcv_ctrl_init ); + + osm_vlarb_rec_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_VL_ARB_RECORD, + __osm_vlarb_rec_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_vlarb_rec_rcv_ctrl_init: ERR 2B01: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_service.c b/branches/Ndi/ulp/opensm/user/opensm/osm_service.c new file mode 100644 index 00000000..2f236b3c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_service.c @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of service record functions. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_svcr_construct( + IN osm_svcr_t* const p_svcr ) +{ + memset( p_svcr, 0, sizeof(*p_svcr) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_svcr_destroy( + IN osm_svcr_t* const p_svcr ) +{ + free( p_svcr); +} + +/********************************************************************** + **********************************************************************/ +void +osm_svcr_init( + IN osm_svcr_t* const p_svcr, + IN const ib_service_record_t *p_svc_rec ) +{ + CL_ASSERT( p_svcr ); + + p_svcr->modified_time = cl_get_time_stamp_sec(); + + /* We track the time left for this service in + an external field to avoid extra cl_ntoh/hton + required for working with the MAD field */ + p_svcr->lease_period = cl_ntoh32(p_svc_rec->service_lease); + p_svcr->service_record = *p_svc_rec; +} + +/********************************************************************** + **********************************************************************/ +osm_svcr_t* +osm_svcr_new( + IN const ib_service_record_t *p_svc_rec ) +{ + osm_svcr_t* p_svcr; + + CL_ASSERT(p_svc_rec); + + p_svcr = (osm_svcr_t*)malloc( sizeof(*p_svcr) ); + if( p_svcr ) + { + osm_svcr_construct( p_svcr ); + osm_svcr_init( p_svcr, p_svc_rec ); + } + + return( p_svcr ); +} + +/********************************************************************** + **********************************************************************/ +static +cl_status_t +__match_rid_of_svc_rec( + + IN const cl_list_item_t* const p_list_item, + IN void* context ) +{ + ib_service_record_t* p_svc_rec = (ib_service_record_t *)context; + osm_svcr_t* p_svcr = (osm_svcr_t*)p_list_item; + int32_t count; + + count = memcmp( + &p_svcr->service_record, + p_svc_rec, + sizeof(p_svc_rec->service_id) + + sizeof(p_svc_rec->service_gid) + + sizeof(p_svc_rec->service_pkey) ); + + if(count == 0) + return CL_SUCCESS; + else + return CL_NOT_FOUND; + +} + +/********************************************************************** + **********************************************************************/ +osm_svcr_t* +osm_svcr_get_by_rid( + IN osm_subn_t const *p_subn, + IN osm_log_t *p_log, + IN ib_service_record_t* const p_svc_rec ) +{ + cl_list_item_t* p_list_item; + + OSM_LOG_ENTER( p_log, osm_svcr_get_by_rid ); + + p_list_item = cl_qlist_find_from_head( + &p_subn->sa_sr_list, + __match_rid_of_svc_rec, + p_svc_rec); + + if( p_list_item == cl_qlist_end( &p_subn->sa_sr_list ) ) + p_list_item = NULL; + + OSM_LOG_EXIT( p_log ); + return (osm_svcr_t*)p_list_item; +} + +/********************************************************************** + **********************************************************************/ +void +osm_svcr_insert_to_db( + IN osm_subn_t *p_subn, + IN osm_log_t *p_log, + IN osm_svcr_t *p_svcr) +{ + OSM_LOG_ENTER( p_log, osm_svcr_insert_to_db ); + + osm_log( p_log, OSM_LOG_DEBUG, + "osm_svcr_insert_to_db: " + "Inserting new Service Record into Database\n"); + + cl_qlist_insert_head(&p_subn->sa_sr_list, + &p_svcr->list_item); + + OSM_LOG_EXIT( p_log ); +} + +void +osm_svcr_remove_from_db( + IN osm_subn_t *p_subn, + IN osm_log_t *p_log, + IN osm_svcr_t *p_svcr) +{ + OSM_LOG_ENTER( p_log, osm_svcr_remove_from_db ); + + osm_log( p_log, OSM_LOG_DEBUG, + "osm_svcr_remove_from_db: " + "Removing Service Record Name:%s ID:0x%016" PRIx64" from Database\n", + p_svcr->service_record.service_name, p_svcr->service_record.service_id + ); + + cl_qlist_remove_item(&p_subn->sa_sr_list, + &p_svcr->list_item); + + OSM_LOG_EXIT( p_log ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_slvl_map_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_slvl_map_rcv.c new file mode 100644 index 00000000..7b5623fb --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_slvl_map_rcv.c @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_slvl_rcv_t. + * This object represents the SLtoVL Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_slvl_rcv_construct( + IN osm_slvl_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_slvl_rcv_destroy( + IN osm_slvl_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_slvl_rcv_destroy ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_slvl_rcv_init( + IN osm_slvl_rcv_t* const p_rcv, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_slvl_rcv_init ); + + osm_slvl_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_req = p_req; + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +/* + * WE MIGHT ONLY RECEIVE A GET or SET responses + */ +void +osm_slvl_rcv_process( + IN const osm_slvl_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + cl_qmap_t *p_guid_tbl; + ib_slvl_table_t *p_slvl_tbl; + ib_smp_t *p_smp; + osm_port_t *p_port; + osm_physp_t *p_physp; + osm_node_t *p_node; + osm_slvl_context_t *p_context; + ib_net64_t port_guid; + ib_net64_t node_guid; + uint8_t out_port_num, in_port_num; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_slvl_rcv_process ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_context = osm_madw_get_slvl_context_ptr( p_madw ); + p_slvl_tbl = (ib_slvl_table_t*)ib_smp_get_payload_ptr( p_smp ); + + port_guid = p_context->port_guid; + node_guid = p_context->node_guid; + + CL_ASSERT( p_smp->attr_id == IB_MAD_ATTR_SLVL_TABLE ); + + p_guid_tbl = &p_rcv->p_subn->port_guid_tbl; + cl_plock_excl_acquire( p_rcv->p_lock ); + p_port = (osm_port_t*)cl_qmap_get( p_guid_tbl, port_guid ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_guid_tbl) ) + { + cl_plock_release( p_rcv->p_lock ); + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_slvl_rcv_process: ERR 2C06: " + "No port object for port with GUID 0x%" PRIx64 + "\n\t\t\t\tfor parent node GUID 0x%" PRIx64 + ", TID 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ), + cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + goto Exit; + } + + p_node = osm_port_get_parent_node( p_port ); + CL_ASSERT( p_node ); + + /* in case of a non switch node the attr modifier should be ignored */ + if (osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH) + { + out_port_num = (uint8_t)cl_ntoh32( p_smp->attr_mod & 0xFF000000); + in_port_num = (uint8_t)cl_ntoh32( (p_smp->attr_mod & 0x00FF0000) << 8); + p_physp = osm_node_get_physp_ptr( p_node, out_port_num ); + } + else + { + p_physp = osm_port_get_default_phys_ptr(p_port); + out_port_num = p_port->default_port_num; + in_port_num = 0; + } + + CL_ASSERT( p_physp ); + + /* + We do not mind if this is a result of a set or get - all we want is to update + the subnet. + */ + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_slvl_rcv_process: " + "Got SLtoVL get response in_port_num %u out_port_num %u with GUID 0x%" PRIx64 + " for parent node GUID 0x%" PRIx64 + ", TID 0x%" PRIx64 "\n", + in_port_num, out_port_num, + cl_ntoh64( port_guid ), + cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + } + + /* + Determine if we encountered a new Physical Port. + If so, Ignore it. + */ + if( !osm_physp_is_valid( p_physp ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_slvl_rcv_process: " + "Got invalid port number 0x%X\n", + out_port_num ); + goto Exit; + } + + osm_dump_slvl_map_table( p_rcv->p_log, + port_guid, in_port_num, + out_port_num, p_slvl_tbl, + OSM_LOG_DEBUG ); + + osm_physp_set_slvl_tbl( p_physp, p_slvl_tbl, in_port_num); + + Exit: + cl_plock_release( p_rcv->p_lock ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_slvl_map_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_slvl_map_rcv_ctrl.c new file mode 100644 index 00000000..cb96cbe0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_slvl_map_rcv_ctrl.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_slvl_rcv_ctrl_t. + * This object represents the SLtoVL request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_slvl_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_slvl_rcv_process( ((osm_slvl_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_slvl_rcv_ctrl_construct( + IN osm_slvl_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_slvl_rcv_ctrl_destroy( + IN osm_slvl_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_slvl_rcv_ctrl_init( + IN osm_slvl_rcv_ctrl_t* const p_ctrl, + IN osm_slvl_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_slvl_rcv_ctrl_init ); + + osm_slvl_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_SLVL, + __osm_slvl_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_slvl_rcv_ctrl_init: ERR 2D01: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sm.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sm.c new file mode 100644 index 00000000..ea8352eb --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sm.c @@ -0,0 +1,824 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_sm_t. + * This object represents the SM Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.9 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define OSM_SM_INITIAL_TID_VALUE 0x1233 + +/********************************************************************** + **********************************************************************/ +void +__osm_sm_sweeper( + IN void *p_ptr ) +{ + ib_api_status_t status; + osm_sm_t *const p_sm = ( osm_sm_t * ) p_ptr; + + OSM_LOG_ENTER( p_sm->p_log, __osm_sm_sweeper ); + + if( p_sm->thread_state == OSM_THREAD_STATE_INIT ) + { + p_sm->thread_state = OSM_THREAD_STATE_RUN; + } + + /* If the sweep interval was updated before - then run only if + * it is not zero. */ + while( p_sm->thread_state == OSM_THREAD_STATE_RUN && + p_sm->p_subn->opt.sweep_interval != 0 ) + { + /* do the sweep only if we are in MASTER state */ + if( p_sm->p_subn->sm_state == IB_SMINFO_STATE_MASTER || + p_sm->p_subn->sm_state == IB_SMINFO_STATE_DISCOVERING ) + osm_state_mgr_process( &p_sm->state_mgr, OSM_SIGNAL_SWEEP ); + + /* + * Wait on the event with a timeout. + * Sweeps may be initiated "off schedule" by simply + * signaling the event. + */ + status = cl_event_wait_on( &p_sm->signal, + p_sm->p_subn->opt.sweep_interval * 1000000, + TRUE ); + + if( status == CL_SUCCESS ) + { + if( osm_log_is_active( p_sm->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_sm->p_log, OSM_LOG_DEBUG, + "__osm_sm_sweeper: " "Off schedule sweep signalled\n" ); + } + } + else + { + if( status != CL_TIMEOUT ) + { + osm_log( p_sm->p_log, OSM_LOG_ERROR, + "__osm_sm_sweeper: ERR 2E01: " + "Event wait failed (%s)\n", CL_STATUS_MSG( status ) ); + } + } + } + + OSM_LOG_EXIT( p_sm->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sm_construct( + IN osm_sm_t * const p_sm ) +{ + memset( p_sm, 0, sizeof( *p_sm ) ); + p_sm->thread_state = OSM_THREAD_STATE_NONE; + p_sm->sm_trans_id = OSM_SM_INITIAL_TID_VALUE; + cl_event_construct( &p_sm->signal ); + cl_event_construct( &p_sm->subnet_up_event ); + cl_thread_construct( &p_sm->sweeper ); + osm_req_construct( &p_sm->req ); + osm_req_ctrl_construct( &p_sm->req_ctrl ); + osm_resp_construct( &p_sm->resp ); + osm_ni_rcv_construct( &p_sm->ni_rcv ); + osm_ni_rcv_ctrl_construct( &p_sm->ni_rcv_ctrl ); + osm_pi_rcv_construct( &p_sm->pi_rcv ); + osm_pi_rcv_ctrl_construct( &p_sm->pi_rcv_ctrl ); + osm_nd_rcv_construct( &p_sm->nd_rcv ); + osm_nd_rcv_ctrl_construct( &p_sm->nd_rcv_ctrl ); + osm_sm_mad_ctrl_construct( &p_sm->mad_ctrl ); + osm_si_rcv_construct( &p_sm->si_rcv ); + osm_si_rcv_ctrl_construct( &p_sm->si_rcv_ctrl ); + osm_lid_mgr_construct( &p_sm->lid_mgr ); + osm_ucast_mgr_construct( &p_sm->ucast_mgr ); + osm_link_mgr_construct( &p_sm->link_mgr ); + osm_state_mgr_construct( &p_sm->state_mgr ); + osm_state_mgr_ctrl_construct( &p_sm->state_mgr_ctrl ); + osm_drop_mgr_construct( &p_sm->drop_mgr ); + osm_lft_rcv_construct( &p_sm->lft_rcv ); + osm_lft_rcv_ctrl_construct( &p_sm->lft_rcv_ctrl ); + osm_mft_rcv_construct( &p_sm->mft_rcv ); + osm_mft_rcv_ctrl_construct( &p_sm->mft_rcv_ctrl ); + osm_sweep_fail_ctrl_construct( &p_sm->sweep_fail_ctrl ); + osm_sminfo_rcv_construct( &p_sm->sm_info_rcv ); + osm_sminfo_rcv_ctrl_construct( &p_sm->sm_info_rcv_ctrl ); + osm_trap_rcv_construct( &p_sm->trap_rcv ); + osm_trap_rcv_ctrl_construct( &p_sm->trap_rcv_ctrl ); + osm_sm_state_mgr_construct( &p_sm->sm_state_mgr ); + osm_slvl_rcv_construct( &p_sm->slvl_rcv ); + osm_slvl_rcv_ctrl_construct( &p_sm->slvl_rcv_ctrl ); + osm_vla_rcv_construct( &p_sm->vla_rcv ); + osm_vla_rcv_ctrl_construct( &p_sm->vla_rcv_ctrl ); + osm_pkey_rcv_construct( &p_sm->pkey_rcv ); + osm_pkey_rcv_ctrl_construct( &p_sm->pkey_rcv_ctrl ); + osm_mcast_mgr_construct( &p_sm->mcast_mgr ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sm_shutdown( + IN osm_sm_t * const p_sm ) +{ + boolean_t signal_event = FALSE; + + OSM_LOG_ENTER( p_sm->p_log, osm_sm_shutdown ); + + /* + * Signal our threads that we're leaving. + */ + if( p_sm->thread_state != OSM_THREAD_STATE_NONE ) + signal_event = TRUE; + + p_sm->thread_state = OSM_THREAD_STATE_EXIT; + + /* + * Don't trigger unless event has been initialized. + * Destroy the thread before we tear down the other objects. + */ + if( signal_event ) + cl_event_signal( &p_sm->signal ); + + cl_thread_destroy( &p_sm->sweeper ); + + /* + * Always destroy controllers before the corresponding + * receiver to guarantee that all callbacks from the + * dispatcher are complete. + */ + osm_sm_mad_ctrl_destroy( &p_sm->mad_ctrl ); + osm_trap_rcv_ctrl_destroy( &p_sm->trap_rcv_ctrl ); + osm_sminfo_rcv_ctrl_destroy( &p_sm->sm_info_rcv_ctrl ); + osm_req_ctrl_destroy( &p_sm->req_ctrl ); + osm_ni_rcv_ctrl_destroy( &p_sm->ni_rcv_ctrl ); + osm_pi_rcv_ctrl_destroy( &p_sm->pi_rcv_ctrl ); + osm_si_rcv_ctrl_destroy( &p_sm->si_rcv_ctrl ); + osm_nd_rcv_ctrl_destroy( &p_sm->nd_rcv_ctrl ); + osm_lft_rcv_ctrl_destroy( &p_sm->lft_rcv_ctrl ); + osm_mft_rcv_ctrl_destroy( &p_sm->mft_rcv_ctrl ); + osm_slvl_rcv_ctrl_destroy( &p_sm->slvl_rcv_ctrl ); + osm_vla_rcv_ctrl_destroy( &p_sm->vla_rcv_ctrl ); + osm_pkey_rcv_ctrl_destroy( &p_sm->pkey_rcv_ctrl ); + osm_sweep_fail_ctrl_destroy( &p_sm->sweep_fail_ctrl ); + osm_state_mgr_ctrl_destroy( &p_sm->state_mgr_ctrl ); + + OSM_LOG_EXIT( p_sm->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sm_destroy( + IN osm_sm_t * const p_sm ) +{ + OSM_LOG_ENTER( p_sm->p_log, osm_sm_destroy ); + osm_trap_rcv_destroy( &p_sm->trap_rcv ); + osm_sminfo_rcv_destroy( &p_sm->sm_info_rcv ); + osm_req_destroy( &p_sm->req ); + osm_resp_destroy( &p_sm->resp ); + osm_ni_rcv_destroy( &p_sm->ni_rcv ); + osm_pi_rcv_destroy( &p_sm->pi_rcv ); + osm_si_rcv_destroy( &p_sm->si_rcv ); + osm_nd_rcv_destroy( &p_sm->nd_rcv ); + osm_lid_mgr_destroy( &p_sm->lid_mgr ); + osm_ucast_mgr_destroy( &p_sm->ucast_mgr ); + osm_link_mgr_destroy( &p_sm->link_mgr ); + osm_drop_mgr_destroy( &p_sm->drop_mgr ); + osm_lft_rcv_destroy( &p_sm->lft_rcv ); + osm_mft_rcv_destroy( &p_sm->mft_rcv ); + osm_slvl_rcv_destroy( &p_sm->slvl_rcv ); + osm_vla_rcv_destroy( &p_sm->vla_rcv ); + osm_pkey_rcv_destroy( &p_sm->pkey_rcv ); + osm_state_mgr_destroy( &p_sm->state_mgr ); + osm_sm_state_mgr_destroy( &p_sm->sm_state_mgr ); + osm_mcast_mgr_destroy( &p_sm->mcast_mgr ); + cl_event_destroy( &p_sm->signal ); + cl_event_destroy( &p_sm->subnet_up_event ); + + osm_log( p_sm->p_log, OSM_LOG_SYS, "Exiting SM\n" ); /* Format Waived */ + OSM_LOG_EXIT( p_sm->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sm_init( + IN osm_sm_t * const p_sm, + IN osm_subn_t * const p_subn, + IN osm_db_t * const p_db, + IN osm_vendor_t * const p_vendor, + IN osm_mad_pool_t * const p_mad_pool, + IN osm_vl15_t * const p_vl15, + IN osm_log_t * const p_log, + IN osm_stats_t * const p_stats, + IN cl_dispatcher_t * const p_disp, + IN cl_plock_t * const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sm_init ); + + p_sm->p_subn = p_subn; + p_sm->p_db = p_db; + p_sm->p_vendor = p_vendor; + p_sm->p_mad_pool = p_mad_pool; + p_sm->p_vl15 = p_vl15; + p_sm->p_log = p_log; + p_sm->p_disp = p_disp; + p_sm->p_lock = p_lock; + + status = cl_event_init( &p_sm->signal, FALSE ); + if( status != CL_SUCCESS ) + goto Exit; + + status = cl_event_init( &p_sm->subnet_up_event, FALSE ); + if( status != CL_SUCCESS ) + goto Exit; + + status = osm_sm_mad_ctrl_init( &p_sm->mad_ctrl, + p_sm->p_subn, + p_sm->p_mad_pool, + p_sm->p_vl15, + p_sm->p_vendor, + p_log, p_stats, p_lock, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_req_init( &p_sm->req, + p_mad_pool, + p_vl15, p_subn, p_log, &p_sm->sm_trans_id ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_req_ctrl_init( &p_sm->req_ctrl, &p_sm->req, p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_resp_init( &p_sm->resp, p_mad_pool, p_vl15, p_subn, p_log ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_ni_rcv_init( &p_sm->ni_rcv, + &p_sm->req, + p_subn, p_log, &p_sm->state_mgr, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_ni_rcv_ctrl_init( &p_sm->ni_rcv_ctrl, + &p_sm->ni_rcv, p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pi_rcv_init( &p_sm->pi_rcv, + &p_sm->req, + p_subn, p_log, &p_sm->state_mgr, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pi_rcv_ctrl_init( &p_sm->pi_rcv_ctrl, + &p_sm->pi_rcv, p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_si_rcv_init( &p_sm->si_rcv, + p_sm->p_subn, + p_sm->p_log, + &p_sm->req, &p_sm->state_mgr, p_sm->p_lock ); + + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_si_rcv_ctrl_init( &p_sm->si_rcv_ctrl, + &p_sm->si_rcv, p_sm->p_log, p_sm->p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_nd_rcv_init( &p_sm->nd_rcv, p_subn, p_log, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_nd_rcv_ctrl_init( &p_sm->nd_rcv_ctrl, + &p_sm->nd_rcv, p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_lid_mgr_init( &p_sm->lid_mgr, + &p_sm->req, + p_sm->p_subn, + p_sm->p_db, p_sm->p_log, p_sm->p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_ucast_mgr_init( &p_sm->ucast_mgr, + &p_sm->req, + p_sm->p_subn, + p_sm->p_log, p_sm->p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_link_mgr_init( &p_sm->link_mgr, + &p_sm->req, + p_sm->p_subn, p_sm->p_log, p_sm->p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_state_mgr_init( &p_sm->state_mgr, + p_sm->p_subn, + &p_sm->lid_mgr, + &p_sm->ucast_mgr, + &p_sm->mcast_mgr, + &p_sm->link_mgr, + &p_sm->drop_mgr, + &p_sm->req, + p_stats, + &p_sm->sm_state_mgr, + &p_sm->mad_ctrl, + p_sm->p_lock, + &p_sm->subnet_up_event, + p_sm->p_log ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_state_mgr_ctrl_init( &p_sm->state_mgr_ctrl, + &p_sm->state_mgr, + p_sm->p_log, p_sm->p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_drop_mgr_init( &p_sm->drop_mgr, + p_sm->p_subn, + p_sm->p_log, &p_sm->req, p_sm->p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_lft_rcv_init( &p_sm->lft_rcv, p_subn, p_log, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_lft_rcv_ctrl_init( &p_sm->lft_rcv_ctrl, + &p_sm->lft_rcv, p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_mft_rcv_init( &p_sm->mft_rcv, p_subn, p_log, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_mft_rcv_ctrl_init( &p_sm->mft_rcv_ctrl, + &p_sm->mft_rcv, p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sweep_fail_ctrl_init( &p_sm->sweep_fail_ctrl, + p_log, &p_sm->state_mgr, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sminfo_rcv_init( &p_sm->sm_info_rcv, + p_subn, + p_stats, + &p_sm->resp, + p_log, + &p_sm->state_mgr, + &p_sm->sm_state_mgr, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sminfo_rcv_ctrl_init( &p_sm->sm_info_rcv_ctrl, + &p_sm->sm_info_rcv, + p_sm->p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_trap_rcv_init( &p_sm->trap_rcv, + p_subn, + p_stats, + &p_sm->resp, p_log, &p_sm->state_mgr, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_trap_rcv_ctrl_init( &p_sm->trap_rcv_ctrl, + &p_sm->trap_rcv, p_sm->p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_sm_state_mgr_init( &p_sm->sm_state_mgr, + &p_sm->state_mgr, + p_sm->p_subn, &p_sm->req, p_sm->p_log ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_mcast_mgr_init( &p_sm->mcast_mgr, + &p_sm->req, p_subn, p_log, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_slvl_rcv_init( &p_sm->slvl_rcv, + &p_sm->req, p_subn, p_log, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_slvl_rcv_ctrl_init( &p_sm->slvl_rcv_ctrl, + &p_sm->slvl_rcv, p_sm->p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_vla_rcv_init( &p_sm->vla_rcv, + &p_sm->req, p_subn, p_log, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_vla_rcv_ctrl_init( &p_sm->vla_rcv_ctrl, + &p_sm->vla_rcv, p_sm->p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pkey_rcv_init( &p_sm->pkey_rcv, + &p_sm->req, p_subn, p_log, p_lock ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_pkey_rcv_ctrl_init( &p_sm->pkey_rcv_ctrl, + &p_sm->pkey_rcv, p_sm->p_log, p_disp ); + if( status != IB_SUCCESS ) + goto Exit; + + /* + * Now that the component objects are initialized, start + * the sweeper thread if the user wants sweeping. + */ + if( p_sm->p_subn->opt.sweep_interval ) + { + p_sm->thread_state = OSM_THREAD_STATE_INIT; + status = cl_thread_init( &p_sm->sweeper, __osm_sm_sweeper, p_sm, + "opensm sweeper" ); + if( status != IB_SUCCESS ) + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sm_sweep( + IN osm_sm_t * const p_sm ) +{ + OSM_LOG_ENTER( p_sm->p_log, osm_sm_sweep ); + osm_state_mgr_process( &p_sm->state_mgr, OSM_SIGNAL_SWEEP ); + OSM_LOG_EXIT( p_sm->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sm_bind( + IN osm_sm_t * const p_sm, + IN const ib_net64_t port_guid ) +{ + ib_api_status_t status; + + OSM_LOG_ENTER( p_sm->p_log, osm_sm_bind ); + + status = osm_sm_mad_ctrl_bind( &p_sm->mad_ctrl, port_guid ); + + if( status != IB_SUCCESS ) + { + osm_log( p_sm->p_log, OSM_LOG_ERROR, + "osm_sm_bind: ERR 2E10: " + "SM MAD Controller bind failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_sm->p_log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +__osm_sm_mgrp_connect( + IN osm_sm_t * const p_sm, + IN osm_mgrp_t * const p_mgrp, + IN const ib_net64_t port_guid, + IN osm_mcast_req_type_t req_type ) +{ + ib_api_status_t status; + osm_mcast_mgr_ctxt_t *ctx2; + + OSM_LOG_ENTER( p_sm->p_log, __osm_sm_mgrp_connect ); + + /* + * 'Schedule' all the QP0 traffic for when the state manager + * isn't busy trying to do something else. + */ + ctx2 = + ( osm_mcast_mgr_ctxt_t * ) malloc( sizeof( osm_mcast_mgr_ctxt_t ) ); + memcpy( &ctx2->mlid, &p_mgrp->mlid, sizeof( p_mgrp->mlid ) ); + ctx2->req_type = req_type; + ctx2->port_guid = port_guid; + + status = osm_state_mgr_process_idle( &p_sm->state_mgr, + osm_mcast_mgr_process_mgrp_cb, + NULL, &p_sm->mcast_mgr, + ( void * )ctx2 ); + + OSM_LOG_EXIT( p_sm->p_log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sm_mgrp_disconnect( + IN osm_sm_t * const p_sm, + IN osm_mgrp_t * const p_mgrp, + IN const ib_net64_t port_guid ) +{ + ib_api_status_t status; + osm_mcast_mgr_ctxt_t *ctx2; + + OSM_LOG_ENTER( p_sm->p_log, __osm_sm_mgrp_disconnect ); + + /* + * 'Schedule' all the QP0 traffic for when the state manager + * isn't busy trying to do something else. + */ + ctx2 = + ( osm_mcast_mgr_ctxt_t * ) malloc( sizeof( osm_mcast_mgr_ctxt_t ) ); + memcpy( &ctx2->mlid, &p_mgrp->mlid, sizeof( p_mgrp->mlid ) ); + ctx2->req_type = OSM_MCAST_REQ_TYPE_LEAVE; + ctx2->port_guid = port_guid; + + status = osm_state_mgr_process_idle( &p_sm->state_mgr, + osm_mcast_mgr_process_mgrp_cb, + NULL, &p_sm->mcast_mgr, ctx2 ); + if( status != IB_SUCCESS ) + { + osm_log( p_sm->p_log, OSM_LOG_ERROR, + "__osm_sm_mgrp_disconnect: ERR 2E11: " + "Failure processing multicast group (%s)\n", + ib_get_err_str( status ) ); + } + + OSM_LOG_EXIT( p_sm->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sm_mcgrp_join( + IN osm_sm_t * const p_sm, + IN const ib_net16_t mlid, + IN const ib_net64_t port_guid, + IN osm_mcast_req_type_t req_type ) +{ + osm_mgrp_t *p_mgrp; + osm_port_t *p_port; + cl_qmap_t *p_tbl; + ib_api_status_t status = IB_SUCCESS; + osm_mcm_info_t *p_mcm; + + OSM_LOG_ENTER( p_sm->p_log, osm_sm_mcgrp_join ); + + osm_log( p_sm->p_log, OSM_LOG_VERBOSE, + "osm_sm_mcgrp_join: " + "Port 0x%016" PRIx64 " joining MLID 0x%X\n", + cl_ntoh64( port_guid ), cl_ntoh16( mlid ) ); + + /* + * Acquire the port object for the port joining this group. + */ + CL_PLOCK_EXCL_ACQUIRE( p_sm->p_lock ); + p_port = ( osm_port_t * ) cl_qmap_get( &p_sm->p_subn->port_guid_tbl, + port_guid ); + if( p_port == + ( osm_port_t * ) cl_qmap_end( &p_sm->p_subn->port_guid_tbl ) ) + { + CL_PLOCK_RELEASE( p_sm->p_lock ); + osm_log( p_sm->p_log, OSM_LOG_ERROR, + "osm_sm_mcgrp_join: ERR 2E05: " + "No port object for port 0x%016" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + status = IB_INVALID_PARAMETER; + goto Exit; + } + + /* + * If this multicast group does not already exist, create it. + */ + p_tbl = &p_sm->p_subn->mgrp_mlid_tbl; + p_mgrp = ( osm_mgrp_t * ) cl_qmap_get( p_tbl, mlid ); + if( p_mgrp == ( osm_mgrp_t * ) cl_qmap_end( p_tbl ) ) + { + osm_log( p_sm->p_log, OSM_LOG_VERBOSE, + "osm_sm_mcgrp_join: " + "Creating group, MLID 0x%X\n", cl_ntoh16( mlid ) ); + + p_mgrp = osm_mgrp_new( mlid ); + if( p_mgrp == NULL ) + { + CL_PLOCK_RELEASE( p_sm->p_lock ); + osm_log( p_sm->p_log, OSM_LOG_ERROR, + "osm_sm_mcgrp_join: ERR 2E06: " + "Unable to allocate multicast group object\n" ); + status = IB_INSUFFICIENT_MEMORY; + goto Exit; + } + + cl_qmap_insert( p_tbl, mlid, &p_mgrp->map_item ); + } + else + { + /* + * The group already exists. If the port is not a + * member of the group, then fail immediately. + * This can happen since the spinlock is released briefly + * before the SA calls this function. + */ + if( !osm_mgrp_is_guid( p_mgrp, port_guid ) ) + { + CL_PLOCK_RELEASE( p_sm->p_lock ); + osm_log( p_sm->p_log, OSM_LOG_ERROR, + "osm_sm_mcgrp_join: ERR 2E12: " + "Port 0x%016" PRIx64 " not in mcast group 0x%X\n", + cl_ntoh64( port_guid ), cl_ntoh16( mlid ) ); + status = IB_NOT_FOUND; + goto Exit; + } + } + + /* + * Check if the object (according to mlid) already exists on this port. + * If it does - then no need to update it again, and no need to + * create the mc tree again. Just goto Exit. + */ + p_mcm = ( osm_mcm_info_t * ) cl_qlist_head( &p_port->mcm_list ); + while( p_mcm != ( osm_mcm_info_t * ) cl_qlist_end( &p_port->mcm_list ) ) + { + if( p_mcm->mlid == mlid ) + { + CL_PLOCK_RELEASE( p_sm->p_lock ); + osm_log( p_sm->p_log, OSM_LOG_DEBUG, + "osm_sm_mcgrp_join: " + "Found mlid object for Port:" + "0x%016" PRIx64 " lid:0x%X\n", + cl_ntoh64( port_guid ), cl_ntoh16( mlid ) ); + goto Exit; + } + p_mcm = ( osm_mcm_info_t * ) cl_qlist_next( &p_mcm->list_item ); + } + + status = osm_port_add_mgrp( p_port, mlid ); + if( status != IB_SUCCESS ) + { + CL_PLOCK_RELEASE( p_sm->p_lock ); + osm_log( p_sm->p_log, OSM_LOG_ERROR, + "osm_sm_mcgrp_join: ERR 2E03: " + "Unable to associate port 0x%" PRIx64 " to mlid 0x%X\n", + cl_ntoh64( osm_port_get_guid( p_port ) ), + cl_ntoh16( osm_mgrp_get_mlid( p_mgrp ) ) ); + goto Exit; + } + + CL_PLOCK_RELEASE( p_sm->p_lock ); + status = __osm_sm_mgrp_connect( p_sm, p_mgrp, port_guid, req_type ); + + Exit: + OSM_LOG_EXIT( p_sm->p_log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sm_mcgrp_leave( + IN osm_sm_t * const p_sm, + IN const ib_net16_t mlid, + IN const ib_net64_t port_guid ) +{ + osm_mgrp_t *p_mgrp; + osm_port_t *p_port; + cl_qmap_t *p_tbl; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_sm->p_log, osm_sm_mcgrp_leave ); + + osm_log( p_sm->p_log, OSM_LOG_VERBOSE, + "osm_sm_mcgrp_leave: " + "Port 0x%" PRIx64 " leaving MLID 0x%X\n", + cl_ntoh64( port_guid ), cl_ntoh16( mlid ) ); + + /* + * Acquire the port object for the port leaving this group. + */ + /* note: p_sm->p_lock is locked by caller, but will be released later + this function */ + p_port = ( osm_port_t * ) cl_qmap_get( &p_sm->p_subn->port_guid_tbl, + port_guid ); + if( p_port == + ( osm_port_t * ) cl_qmap_end( &p_sm->p_subn->port_guid_tbl ) ) + { + CL_PLOCK_RELEASE( p_sm->p_lock ); + osm_log( p_sm->p_log, OSM_LOG_ERROR, + "osm_sm_mcgrp_leave: ERR 2E04: " + "No port object for port 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + status = IB_INVALID_PARAMETER; + goto Exit; + } + + /* + * Get the multicast group object for this group. + */ + p_tbl = &p_sm->p_subn->mgrp_mlid_tbl; + p_mgrp = ( osm_mgrp_t * ) cl_qmap_get( p_tbl, mlid ); + if( p_mgrp == ( osm_mgrp_t * ) cl_qmap_end( p_tbl ) ) + { + CL_PLOCK_RELEASE( p_sm->p_lock ); + osm_log( p_sm->p_log, OSM_LOG_ERROR, + "osm_sm_mcgrp_leave: ERR 2E08: " + "No multicast group for MLID 0x%X\n", cl_ntoh16( mlid ) ); + status = IB_INVALID_PARAMETER; + goto Exit; + } + + /* + * Walk the list of ports in the group, and remove the appropriate one. + */ + osm_mgrp_remove_port( p_sm->p_subn, p_sm->p_log, p_mgrp, port_guid ); + + osm_port_remove_mgrp( p_port, mlid ); + + CL_PLOCK_RELEASE( p_sm->p_lock ); + + __osm_sm_mgrp_disconnect( p_sm, p_mgrp, port_guid ); + + Exit: + OSM_LOG_EXIT( p_sm->p_log ); + return ( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sm_mad_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sm_mad_ctrl.c new file mode 100644 index 00000000..a79e50b7 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sm_mad_ctrl.c @@ -0,0 +1,1049 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_sm_mad_ctrl_t. + * This object represents the SM MAD request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/****f* opensm: SM/__osm_sm_mad_ctrl_retire_trans_mad + * NAME + * __osm_sm_mad_ctrl_retire_trans_mad + * + * DESCRIPTION + * This function handles clean-up of MADs associated with the SM's + * outstanding transactions on the wire. + * + * SYNOPSIS + */ +static void +__osm_sm_mad_ctrl_retire_trans_mad( + IN osm_sm_mad_ctrl_t* const p_ctrl, + IN osm_madw_t* const p_madw ) +{ + uint32_t outstanding; + cl_status_t status; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sm_mad_ctrl_retire_trans_mad ); + + CL_ASSERT( p_madw ); + /* + Return the MAD & wrapper to the pool. + */ + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_retire_trans_mad: " + "Retiring MAD with TID 0x%" PRIx64 "\n", + cl_ntoh64( osm_madw_get_smp_ptr( p_madw )->trans_id ) ); + } + + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + + outstanding = cl_atomic_dec( &p_ctrl->p_stats->qp0_mads_outstanding ); + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_retire_trans_mad: " + "%u QP0 MADs outstanding\n", + p_ctrl->p_stats->qp0_mads_outstanding ); + } + + if( outstanding == 0 ) + { + /* + The wire is clean. + Signal the state manager. + */ + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_retire_trans_mad: " + "Posting Dispatcher message %s\n", + osm_get_disp_msg_str( OSM_MSG_NO_SMPS_OUTSTANDING ) ); + } + + status = cl_disp_post( p_ctrl->h_disp, + OSM_MSG_NO_SMPS_OUTSTANDING, + (void *)OSM_SIGNAL_NO_PENDING_TRANSACTIONS, + NULL, + NULL ); + + if( status != CL_SUCCESS ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_retire_trans_mad: ERR 3101: " + "Dispatcher post message failed (%s)\n", + CL_STATUS_MSG( status ) ); + goto Exit; + } + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/************/ + +/****f* opensm: SM/__osm_sm_mad_ctrl_disp_done_callback + * NAME + * __osm_sm_mad_ctrl_disp_done_callback + * + * DESCRIPTION + * This function is the Dispatcher callback that indicates + * a received MAD has been processed by the recipient. + * + * SYNOPSIS + */ +static void +__osm_sm_mad_ctrl_disp_done_callback( + IN void* context, + IN void* p_data ) +{ + osm_sm_mad_ctrl_t* const p_ctrl = (osm_sm_mad_ctrl_t*)context; + osm_madw_t* const p_madw = (osm_madw_t*)p_data; + ib_smp_t *p_smp; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sm_mad_ctrl_disp_done_callback ); + + /* + If the MAD that just finished processing was a response, + then retire the transaction, since we must have generated + the request. + + Otherwise, retire the transaction if a response was expected, + as in the case of a send failure. If a response was not expected, + just put the MAD back in the pool, because the MAD was a query + from some outside agent, e.g. Get(SMInfo) from another SM. + */ + p_smp = osm_madw_get_smp_ptr( p_madw ); + if( ib_smp_is_response( p_smp ) ) + { + CL_ASSERT( p_madw->resp_expected == FALSE ); + __osm_sm_mad_ctrl_retire_trans_mad( p_ctrl, p_madw ); + } + else + { + if( p_madw->resp_expected == TRUE ) + __osm_sm_mad_ctrl_retire_trans_mad( p_ctrl, p_madw ); + else + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + } + + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/************/ + +/****f* opensm: SM/__osm_sm_mad_ctrl_update_wire_stats + * NAME + * __osm_sm_mad_ctrl_update_wire_stats + * + * DESCRIPTION + * Updates wire stats for outstanding MADs and calls the VL15 poller. + * + * SYNOPSIS + */ +static void +__osm_sm_mad_ctrl_update_wire_stats( + IN osm_sm_mad_ctrl_t* const p_ctrl ) +{ + uint32_t mads_on_wire; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sm_mad_ctrl_update_wire_stats ); + + mads_on_wire = cl_atomic_dec( + &p_ctrl->p_stats->qp0_mads_outstanding_on_wire ); + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_update_wire_stats: " + "%u SMPs on the wire, %u outstanding\n", mads_on_wire, + p_ctrl->p_stats->qp0_mads_outstanding ); + } + + /* + We can signal the VL15 controller to send another MAD + if any are waiting for transmission. + */ + osm_vl15_poll( p_ctrl->p_vl15 ); + OSM_LOG_EXIT( p_ctrl->p_log ); +} + +/****f* opensm: SM/__osm_sm_mad_ctrl_process_get_resp + * NAME + * __osm_sm_mad_ctrl_process_get_resp + * + * DESCRIPTION + * This function handles method GetResp() for received MADs. + * This is the most common path for QP0 MADs. + * + * SYNOPSIS + */ +static void +__osm_sm_mad_ctrl_process_get_resp( + IN osm_sm_mad_ctrl_t* const p_ctrl, + IN osm_madw_t *p_madw, + IN void* transaction_context ) +{ + ib_smp_t* p_smp; + cl_status_t status; + osm_madw_t* p_old_madw; + cl_disp_msgid_t msg_id = CL_DISP_MSGID_NONE; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sm_mad_ctrl_process_get_resp ); + + CL_ASSERT( p_madw ); + CL_ASSERT( transaction_context ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + if( !ib_smp_is_d( p_smp ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_process_get_resp: ERR 3102: " + "'D' bit not set in returned SMP\n" ); + osm_dump_dr_smp( p_ctrl->p_log, p_smp, OSM_LOG_ERROR ); + } + } + + p_old_madw = (osm_madw_t*)transaction_context; + + __osm_sm_mad_ctrl_update_wire_stats( p_ctrl ); + + /* + Copy the MAD Wrapper context from the requesting MAD + to the new MAD. This mechanism allows the recipient + controller to recover its own context regarding this + MAD transaction. Once we've copied the context, we + can return the original MAD to the pool. + */ + osm_madw_copy_context( p_madw, p_old_madw ); + osm_mad_pool_put( p_ctrl->p_mad_pool, p_old_madw ); + + /* + Note that attr_id (like the rest of the MAD) is in + network byte order. + */ + switch( p_smp->attr_id ) + { + case IB_MAD_ATTR_NODE_DESC: + msg_id = OSM_MSG_MAD_NODE_DESC; + break; + case IB_MAD_ATTR_NODE_INFO: + msg_id = OSM_MSG_MAD_NODE_INFO; + break; + case IB_MAD_ATTR_SWITCH_INFO: + msg_id = OSM_MSG_MAD_SWITCH_INFO; + break; + case IB_MAD_ATTR_PORT_INFO: + msg_id = OSM_MSG_MAD_PORT_INFO; + break; + case IB_MAD_ATTR_LIN_FWD_TBL: + msg_id = OSM_MSG_MAD_LFT; + break; + case IB_MAD_ATTR_MCAST_FWD_TBL: + msg_id = OSM_MSG_MAD_MFT; + break; + case IB_MAD_ATTR_SM_INFO: + msg_id = OSM_MSG_MAD_SM_INFO; + break; + case IB_MAD_ATTR_SLVL_TABLE: + msg_id = OSM_MSG_MAD_SLVL; + break; + case IB_MAD_ATTR_VL_ARBITRATION: + msg_id = OSM_MSG_MAD_VL_ARB; + break; + case IB_MAD_ATTR_P_KEY_TABLE: + msg_id = OSM_MSG_MAD_PKEY; + break; + + case IB_MAD_ATTR_GUID_INFO: + case IB_MAD_ATTR_CLASS_PORT_INFO: + case IB_MAD_ATTR_NOTICE: + case IB_MAD_ATTR_INFORM_INFO: + default: + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_process_get_resp: ERR 3103: " + "Unsupported attribute = 0x%X\n", + cl_ntoh16( p_smp->attr_id ) ); + osm_dump_dr_smp( p_ctrl->p_log, p_smp, OSM_LOG_ERROR ); + goto Exit; + } + + if( msg_id != CL_DISP_MSGID_NONE ) + { + /* + Post this MAD to the dispatcher for asynchronous + processing by the appropriate controller. + */ + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_process_get_resp: " + "Posting Dispatcher message %s\n", + osm_get_disp_msg_str( msg_id ) ); + } + + status = cl_disp_post( p_ctrl->h_disp, + msg_id, + p_madw, + __osm_sm_mad_ctrl_disp_done_callback, + p_ctrl ); + + if( status != CL_SUCCESS ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_process_get_resp: ERR 3104: " + "Dispatcher post message failed (%s) for attribute = 0x%X\n", + CL_STATUS_MSG( status ), + cl_ntoh16( p_smp->attr_id ) ); + goto Exit; + } + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); +} + +/****f* opensm: SM/__osm_sm_mad_ctrl_process_get + * NAME + * __osm_sm_mad_ctrl_process_get + * + * DESCRIPTION + * This function handles method Get() for received MADs. + * + * SYNOPSIS + */ +static void +__osm_sm_mad_ctrl_process_get( + IN osm_sm_mad_ctrl_t* const p_ctrl, + IN osm_madw_t *p_madw ) +{ + ib_smp_t* p_smp; + cl_status_t status; + cl_disp_msgid_t msg_id = CL_DISP_MSGID_NONE; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sm_mad_ctrl_process_get ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + /* + Note that attr_id (like the rest of the MAD) is in + network byte order. + */ + switch( p_smp->attr_id ) + { + case IB_MAD_ATTR_SM_INFO: + msg_id = OSM_MSG_MAD_SM_INFO; + break; + + default: + osm_log( p_ctrl->p_log, OSM_LOG_VERBOSE, + "__osm_sm_mad_ctrl_process_get: " + "Ignoring SubnGet MAD - unsupported attribute = 0x%X\n", + cl_ntoh16( p_smp->attr_id ) ); + break; + } + + if( msg_id != CL_DISP_MSGID_NONE ) + { + /* + Post this MAD to the dispatcher for asynchronous + processing by the appropriate controller. + */ + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_process_get: " + "Posting Dispatcher message %s\n", + osm_get_disp_msg_str( msg_id ) ); + } + + status = cl_disp_post( p_ctrl->h_disp, + msg_id, + p_madw, + __osm_sm_mad_ctrl_disp_done_callback, + p_ctrl ); + + if( status != CL_SUCCESS ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_process_get: ERR 3106: " + "Dispatcher post message failed (%s)\n", + CL_STATUS_MSG( status ) ); + goto Exit; + } + } + else + { + /* + There is an unknown MAD attribute type for which there is + no recipient. Simply retire the MAD here. + */ + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/* + * PARAMETERS + * + * RETURN VALUES + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* opensm: SM/__osm_sm_mad_ctrl_process_set + * NAME + * __osm_sm_mad_ctrl_process_set + * + * DESCRIPTION + * This function handles method Set() for received MADs. + * + * SYNOPSIS + */ +static void +__osm_sm_mad_ctrl_process_set( + IN osm_sm_mad_ctrl_t* const p_ctrl, + IN osm_madw_t *p_madw ) +{ + ib_smp_t* p_smp; + cl_status_t status; + cl_disp_msgid_t msg_id = CL_DISP_MSGID_NONE; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sm_mad_ctrl_process_set ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + /* + Note that attr_id (like the rest of the MAD) is in + network byte order. + */ + switch( p_smp->attr_id ) + { + case IB_MAD_ATTR_SM_INFO: + msg_id = OSM_MSG_MAD_SM_INFO; + break; + + default: + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_process_set: ERR 3107: " + "Unsupported attribute = 0x%X\n", + cl_ntoh16( p_smp->attr_id ) ); + osm_dump_dr_smp( p_ctrl->p_log, p_smp, OSM_LOG_ERROR ); + break; + } + + if( msg_id != CL_DISP_MSGID_NONE ) + { + /* + Post this MAD to the dispatcher for asynchronous + processing by the appropriate controller. + */ + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_process_set: " + "Posting Dispatcher message %s\n", + osm_get_disp_msg_str( msg_id ) ); + } + + status = cl_disp_post( p_ctrl->h_disp, + msg_id, + p_madw, + __osm_sm_mad_ctrl_disp_done_callback, + p_ctrl ); + + if( status != CL_SUCCESS ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_process_set: ERR 3108: " + "Dispatcher post message failed (%s)\n", + CL_STATUS_MSG( status ) ); + goto Exit; + } + } + else + { + /* + There is an unknown MAD attribute type for which there is + no recipient. Simply retire the MAD here. + */ + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/* + * PARAMETERS + * + * RETURN VALUES + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* opensm: SM/__osm_sm_mad_ctrl_process_trap + * NAME + * __osm_sm_mad_ctrl_process_trap + * + * DESCRIPTION + * This function handles method Trap() for received MADs. + * + * SYNOPSIS + */ +static void +__osm_sm_mad_ctrl_process_trap( + IN osm_sm_mad_ctrl_t* const p_ctrl, + IN osm_madw_t *p_madw ) +{ + ib_smp_t* p_smp; + cl_status_t status; + cl_disp_msgid_t msg_id = CL_DISP_MSGID_NONE; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sm_mad_ctrl_process_trap ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + /* Make sure OpenSM is master. If not - then we should not process the trap */ + if (p_ctrl->p_subn->sm_state != IB_SMINFO_STATE_MASTER) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_process_trap: " + "Received trap but OpenSM is not in MASTER state. " + "Dropping mad\n"); + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + goto Exit; + } + + /* + Note that attr_id (like the rest of the MAD) is in + network byte order. + */ + switch( p_smp->attr_id ) + { + case IB_MAD_ATTR_NOTICE: + msg_id = OSM_MSG_MAD_NOTICE; + break; + + default: + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_process_trap: ERR 3109: " + "Unsupported attribute = 0x%X\n", + cl_ntoh16( p_smp->attr_id ) ); + osm_dump_dr_smp( p_ctrl->p_log, p_smp, OSM_LOG_ERROR ); + break; + } + + if( msg_id != CL_DISP_MSGID_NONE ) + { + /* + Post this MAD to the dispatcher for asynchronous + processing by the appropriate controller. + */ + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_process_trap: " + "Posting Dispatcher message %s\n", + osm_get_disp_msg_str( msg_id ) ); + } + + status = cl_disp_post( p_ctrl->h_disp, + msg_id, + p_madw, + __osm_sm_mad_ctrl_disp_done_callback, + p_ctrl ); + + if( status != CL_SUCCESS ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_process_trap: ERR 3110: " + "Dispatcher post message failed (%s)\n", + CL_STATUS_MSG( status ) ); + goto Exit; + } + } + else + { + /* + There is an unknown MAD attribute type for which there is + no recipient. Simply retire the MAD here. + */ + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/* + * PARAMETERS + * + * RETURN VALUES + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* opensm: SM/__osm_sm_mad_ctrl_rcv_callback + * NAME + * __osm_sm_mad_ctrl_rcv_callback + * + * DESCRIPTION + * This is the callback from the transport layer for received MADs. + * + * SYNOPSIS + */ +void +__osm_sm_mad_ctrl_rcv_callback( + IN osm_madw_t *p_madw, + IN void *bind_context, + IN osm_madw_t *p_req_madw ) +{ + osm_sm_mad_ctrl_t* p_ctrl = (osm_sm_mad_ctrl_t*)bind_context; + ib_smp_t* p_smp; + ib_net16_t status; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sm_mad_ctrl_rcv_callback ); + + CL_ASSERT( p_madw ); + + /* + A MAD was received from the wire, possibly in response to a request. + */ + cl_atomic_inc( &p_ctrl->p_stats->qp0_mads_rcvd ); + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_rcv_callback: " + "%u QP0 MADs received\n", + p_ctrl->p_stats->qp0_mads_rcvd ); + } + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + /* if we are closing down simply do nothing */ + if (osm_exit_flag) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_rcv_callback: " + "Ignoring received mad - since we are exiting\n"); + + osm_dump_dr_smp( p_ctrl->p_log, p_smp, OSM_LOG_DEBUG ); + + /* retire the mad or put it back */ + if( ib_smp_is_response( p_smp ) || + (p_smp->method == IB_MAD_METHOD_TRAP_REPRESS)) + { + CL_ASSERT( p_madw->resp_expected == FALSE ); + __osm_sm_mad_ctrl_retire_trans_mad( p_ctrl, p_madw ); + } + else + { + if( p_madw->resp_expected == TRUE ) + __osm_sm_mad_ctrl_retire_trans_mad( p_ctrl, p_madw ); + else + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + } + + goto Exit; + } + + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_FRAMES ) ) + osm_dump_dr_smp( p_ctrl->p_log, p_smp, OSM_LOG_FRAMES ); + + if( p_smp->mgmt_class == IB_MCLASS_SUBN_DIR ) + { + status = ib_smp_get_status( p_smp ); + } + else + { + status = p_smp->status; + } + + if( status != 0 ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_rcv_callback: ERR 3111: " + "Error status = 0x%X\n", status ); + osm_dump_dr_smp( p_ctrl->p_log, p_smp, OSM_LOG_ERROR ); + } + + switch( p_smp->method ) + { + case IB_MAD_METHOD_GET_RESP: + CL_ASSERT( p_req_madw != NULL ); + __osm_sm_mad_ctrl_process_get_resp( p_ctrl, p_madw, p_req_madw ); + break; + + case IB_MAD_METHOD_GET: + CL_ASSERT( p_req_madw == NULL ); + __osm_sm_mad_ctrl_process_get( p_ctrl, p_madw ); + break; + + case IB_MAD_METHOD_TRAP: + CL_ASSERT( p_req_madw == NULL ); + __osm_sm_mad_ctrl_process_trap( p_ctrl, p_madw ); + break; + + case IB_MAD_METHOD_SET: + CL_ASSERT( p_req_madw == NULL ); + __osm_sm_mad_ctrl_process_set( p_ctrl, p_madw ); + break; + + case IB_MAD_METHOD_SEND: + case IB_MAD_METHOD_REPORT: + case IB_MAD_METHOD_REPORT_RESP: + case IB_MAD_METHOD_TRAP_REPRESS: + default: + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_rcv_callback: ERR 3112: " + "Unsupported method = 0x%X\n", p_smp->method ); + osm_dump_dr_smp( p_ctrl->p_log, p_smp, OSM_LOG_ERROR ); + osm_mad_pool_put( p_ctrl->p_mad_pool, p_madw ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/* + * PARAMETERS + * + * RETURN VALUES + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* opensm: SM/__osm_sm_mad_ctrl_send_err_cb + * NAME + * __osm_sm_mad_ctrl_send_err_cb + * + * DESCRIPTION + * This is the callback from the transport layer for send errors + * on MADs that were expecting a response. + * + * SYNOPSIS + */ +void +__osm_sm_mad_ctrl_send_err_cb( + IN void *bind_context, + IN osm_madw_t *p_madw ) +{ + osm_sm_mad_ctrl_t* p_ctrl = (osm_sm_mad_ctrl_t*)bind_context; +#if 0 + osm_physp_t* p_physp; +#endif + ib_api_status_t status; + ib_smp_t* p_smp; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sm_mad_ctrl_send_err_cb ); + + CL_ASSERT( p_madw ); + + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_send_err_cb: ERR 3113: " + "MAD completed in error (%s)\n", + ib_get_err_str( p_madw->status ) ); + + /* + If this was a SubnSet MAD, then this error might indicate a problem + in configuring the subnet. In this case - need to mark that there was + such a problem. The subnet will not be up, and the next sweep should + be a heavy sweep as well. + */ + p_smp = osm_madw_get_smp_ptr( p_madw ); + if (p_smp->method == IB_MAD_METHOD_SET && + ( p_smp->attr_id == IB_MAD_ATTR_PORT_INFO || + p_smp->attr_id == IB_MAD_ATTR_MCAST_FWD_TBL || + p_smp->attr_id == IB_MAD_ATTR_SWITCH_INFO || + p_smp->attr_id == IB_MAD_ATTR_LIN_FWD_TBL ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_send_err_cb: ERR 3119: " + "Set method failed\n" ); + p_ctrl->p_subn->subnet_initialization_error = TRUE; + } + + /* + Since we did not get any response we suspect the DR path + used for the target port. + Find it and replace it with an alternate path. + This is true only if the destination lid is not 0xFFFF, since + then we are aiming for a specific path and not specific destination + lid. + */ + /* For now - do not add the alternate dr path to the release */ +#if 0 + if ( p_madw->mad_addr.dest_lid != 0xFFFF ) + { + p_physp = + osm_get_physp_by_mad_addr(p_ctrl->p_log, + p_ctrl->p_subn, + &(p_madw->mad_addr)); + if (!p_physp) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_send_err_cb: ERR 3114: " + "Failed to find the corresponding phys port\n"); + } + else + { + osm_physp_replace_dr_path_with_alternate_dr_path( + p_ctrl->p_log, p_ctrl->p_subn, p_physp, p_madw->h_bind ); + } + } +#endif + + /* + An error occurred. No response was received to a request MAD. + Retire the original request MAD. + */ + + osm_dump_dr_smp( p_ctrl->p_log, osm_madw_get_smp_ptr( p_madw ), + OSM_LOG_ERROR ); + + __osm_sm_mad_ctrl_update_wire_stats( p_ctrl ); + + if( osm_madw_get_err_msg( p_madw ) != CL_DISP_MSGID_NONE ) + { + if( osm_log_is_active( p_ctrl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_ctrl->p_log, OSM_LOG_DEBUG, + "__osm_sm_mad_ctrl_send_err_cb: " + "Posting Dispatcher message %s\n", + osm_get_disp_msg_str( osm_madw_get_err_msg( p_madw ) ) ); + } + + status = cl_disp_post( p_ctrl->h_disp, + osm_madw_get_err_msg( p_madw ), + p_madw, + __osm_sm_mad_ctrl_disp_done_callback, + p_ctrl ); + if( status != CL_SUCCESS ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "__osm_sm_mad_ctrl_send_err_cb: ERR 3115: " + "Dispatcher post message failed (%s)\n", + CL_STATUS_MSG( status ) ); + } + } + else + { + /* + No error message was provided, just retire the MAD. + */ + __osm_sm_mad_ctrl_retire_trans_mad( p_ctrl, p_madw ); + } + + OSM_LOG_EXIT( p_ctrl->p_log ); +} +/* + * PARAMETERS + * + * RETURN VALUES + * + * NOTES + * + * SEE ALSO + *********/ + +/********************************************************************** + **********************************************************************/ +void +osm_sm_mad_ctrl_construct( + IN osm_sm_mad_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_sm_mad_ctrl_destroy( + IN osm_sm_mad_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + + if (p_ctrl->h_bind != CL_DISP_INVALID_HANDLE) + { + osm_vendor_unbind( p_ctrl->h_bind ); + } + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sm_mad_ctrl_init( + IN osm_sm_mad_ctrl_t* const p_ctrl, + IN osm_subn_t* const p_subn, + IN osm_mad_pool_t* const p_mad_pool, + IN osm_vl15_t* const p_vl15, + IN osm_vendor_t* const p_vendor, + IN osm_log_t* const p_log, + IN osm_stats_t* const p_stats, + IN cl_plock_t* const p_lock, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sm_mad_ctrl_init ); + + osm_sm_mad_ctrl_construct( p_ctrl ); + + p_ctrl->p_subn = p_subn; + p_ctrl->p_log = p_log; + p_ctrl->p_disp = p_disp; + p_ctrl->p_mad_pool = p_mad_pool; + p_ctrl->p_vendor = p_vendor; + p_ctrl->p_stats = p_stats; + p_ctrl->p_lock = p_lock; + p_ctrl->p_vl15 = p_vl15; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + CL_DISP_MSGID_NONE, + NULL, + NULL ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_sm_mad_ctrl_init: ERR 3116: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sm_mad_ctrl_bind( + IN osm_sm_mad_ctrl_t* const p_ctrl, + IN const ib_net64_t port_guid ) +{ + osm_bind_info_t bind_info; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_ctrl->p_log, osm_sm_mad_ctrl_bind ); + + if( p_ctrl->h_bind != OSM_BIND_INVALID_HANDLE ) + { + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "osm_sm_mad_ctrl_bind: ERR 3117: " + "Multiple binds not allowed\n" ); + status = IB_ERROR; + goto Exit; + } + + bind_info.class_version = 1; + bind_info.is_report_processor = FALSE; + bind_info.is_responder = TRUE; + bind_info.is_trap_processor = TRUE; + bind_info.mad_class = IB_MCLASS_SUBN_DIR; + bind_info.port_guid = port_guid; + bind_info.recv_q_size = OSM_SM_DEFAULT_QP0_RCV_SIZE; + bind_info.send_q_size = OSM_SM_DEFAULT_QP0_SEND_SIZE; + + osm_log( p_ctrl->p_log, OSM_LOG_VERBOSE, + "osm_sm_mad_ctrl_bind: " + "Binding to port 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + + p_ctrl->h_bind = osm_vendor_bind( p_ctrl->p_vendor, + &bind_info, + p_ctrl->p_mad_pool, + __osm_sm_mad_ctrl_rcv_callback, + __osm_sm_mad_ctrl_send_err_cb, + p_ctrl ); + + if( p_ctrl->h_bind == OSM_BIND_INVALID_HANDLE ) + { + status = IB_ERROR; + osm_log( p_ctrl->p_log, OSM_LOG_ERROR, + "osm_sm_mad_ctrl_bind: ERR 3118: " + "Vendor specific bind failed\n" ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_ctrl->p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sm_state_mgr.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sm_state_mgr.c new file mode 100644 index 00000000..cf37caae --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sm_state_mgr.c @@ -0,0 +1,872 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_state_mgr_t. + * This file implements the SM State Manager object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.7 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +static void +__osm_sm_state_mgr_standby_msg( + IN const osm_sm_state_mgr_t * p_sm_mgr ) +{ + osm_log( p_sm_mgr->p_log, OSM_LOG_SYS, "Entering STANDBY state\n" ); /* Format Waived */ + + if( osm_log_is_active( p_sm_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_sm_state_mgr_standby_msg: " + "\n\n\n********************************" + "**********************************\n" + "******************** ENTERING SM STANDBY" + " STATE *******************\n" + "**************************************" + "****************************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sm_state_mgr_master_msg( + IN const osm_sm_state_mgr_t * p_sm_mgr ) +{ + osm_log( p_sm_mgr->p_log, OSM_LOG_SYS, "Entering MASTER state\n" ); /* Format Waived */ + + if( osm_log_is_active( p_sm_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_sm_state_mgr_master_msg: " + "\n\n\n********************************" + "**********************************\n" + "******************** ENTERING SM MASTER" + " STATE ********************\n" + "**************************************" + "****************************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sm_state_mgr_discovering_msg( + IN const osm_sm_state_mgr_t * p_sm_mgr ) +{ + if( osm_log_is_active( p_sm_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_sm_state_mgr_discovering_msg: " + "\n\n\n********************************" + "**********************************\n" + "******************** ENTERING SM DISCOVERING" + " STATE ***************\n" + "**************************************" + "****************************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sm_state_mgr_notactive_msg( + IN const osm_sm_state_mgr_t * p_sm_mgr ) +{ + if( osm_log_is_active( p_sm_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_sm_state_mgr_notactive_msg: " + "\n\n\n********************************" + "**********************************\n" + "******************** ENTERING SM NOT-ACTIVE" + " STATE **********************\n" + "**************************************" + "****************************\n\n\n" ); + } +} + +#if 0 +/********************************************************************** + **********************************************************************/ +static void +__osm_sm_state_mgr_send_local_port_info_req( + IN osm_sm_state_mgr_t * p_sm_mgr ) +{ + osm_madw_context_t context; + osm_port_t *p_port; + ib_net64_t port_guid = p_sm_mgr->p_subn->sm_port_guid; + ib_api_status_t status; + + OSM_LOG_ENTER( p_sm_mgr->p_log, + __osm_sm_state_mgr_send_local_port_info_req ); + /* + * Send a query of SubnGet(PortInfo) to our own port, in order to + * update the master_sm_base_lid of the subnet. + */ + memset( &context, 0, sizeof( context ) ); + p_port = ( osm_port_t * ) cl_qmap_get( &p_sm_mgr->p_subn->port_guid_tbl, + port_guid ); + if( p_port == + ( osm_port_t * ) cl_qmap_end( &p_sm_mgr->p_subn->port_guid_tbl ) ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "__osm_sm_state_mgr_send_local_port_info_req: ERR 3205: " + "No port object for port 0x%016" PRIx64 "\n", + cl_ntoh64( port_guid ) ); + goto Exit; + } + + context.pi_context.port_guid = port_guid; + context.pi_context.node_guid = p_port->p_node->node_info.node_guid; + context.pi_context.set_method = FALSE; + context.pi_context.ignore_errors = FALSE; + /* mark the update_master_sm_base_lid with TRUE - we want to update it */ + /* with the new master lid value. */ + context.pi_context.update_master_sm_base_lid = TRUE; + context.pi_context.light_sweep = FALSE; + context.pi_context.active_transition = FALSE; + + status = osm_req_get( p_sm_mgr->p_req, + osm_physp_get_dr_path_ptr + ( osm_port_get_default_phys_ptr( p_port ) ), + IB_MAD_ATTR_PORT_INFO, + cl_hton32( p_port->default_port_num ), + CL_DISP_MSGID_NONE, &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "__osm_sm_state_mgr_send_local_port_info_req: ERR 3202: " + "Failure requesting PortInfo (%s)\n", + ib_get_err_str( status ) ); + } + + Exit: + OSM_LOG_EXIT( p_sm_mgr->p_log ); +} +#endif + +/********************************************************************** + **********************************************************************/ +static void +__osm_sm_state_mgr_send_master_sm_info_req( + IN osm_sm_state_mgr_t * p_sm_mgr ) +{ + osm_madw_context_t context; + const osm_port_t *p_port; + ib_api_status_t status; + + OSM_LOG_ENTER( p_sm_mgr->p_log, + __osm_sm_state_mgr_send_master_sm_info_req ); + + memset( &context, 0, sizeof( context ) ); + if( p_sm_mgr->p_subn->sm_state == IB_SMINFO_STATE_STANDBY ) + { + /* + * We are in STANDBY state - this means we need to poll on the master + * SM (according to master_guid) + * Send a query of SubnGet(SMInfo) to the subn master_sm_base_lid object. + */ + p_port = ( osm_port_t * ) cl_qmap_get( &p_sm_mgr->p_subn->port_guid_tbl, + p_sm_mgr->master_guid ); + } + else + { + /* + * We are not in STANDBY - this means we are in MASTER state - so we need + * to poll on the SM that is saved in p_polling_sm under p_sm_mgr. + * Send a query of SubnGet(SMInfo) to that SM. + */ + p_port = p_sm_mgr->p_polling_sm->p_port; + } + if( p_port == NULL ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "__osm_sm_state_mgr_send_master_sm_info_req: ERR 3203: " + "No port object for GUID 0x%016" PRIx64 "\n", + cl_ntoh64(p_sm_mgr->master_guid) ); + goto Exit; + } + + context.smi_context.port_guid = p_port->guid; + context.smi_context.set_method = FALSE; + + status = osm_req_get( p_sm_mgr->p_req, + osm_physp_get_dr_path_ptr + ( osm_port_get_default_phys_ptr( p_port ) ), + IB_MAD_ATTR_SM_INFO, 0, CL_DISP_MSGID_NONE, + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "__osm_sm_state_mgr_send_master_sm_info_req: ERR 3204: " + "Failure rquesting SMInfo (%s)\n", ib_get_err_str( status ) ); + } + + Exit: + OSM_LOG_EXIT( p_sm_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sm_state_mgr_start_polling( + IN osm_sm_state_mgr_t * p_sm_mgr ) +{ + uint32_t sminfo_polling_timeout = + p_sm_mgr->p_subn->opt.sminfo_polling_timeout; + cl_status_t cl_status; + + OSM_LOG_ENTER( p_sm_mgr->p_log, __osm_sm_state_mgr_start_polling ); + + /* + * Init the retry_nubmer back to zero - need to restart counting + */ + p_sm_mgr->retry_number = 0; + + /* + * Send a SubnGet(SMInfo) query to the current (or new) master found. + */ + __osm_sm_state_mgr_send_master_sm_info_req( p_sm_mgr ); + + /* + * Start a timer that will wake up every sminfo_polling_timeout milliseconds. + * The callback of the timer will send a SubnGet(SMInfo) to the Master SM + * and restart the timer + */ + cl_status = cl_timer_start( &p_sm_mgr->polling_timer, + sminfo_polling_timeout ); + if( cl_status != CL_SUCCESS ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "__osm_sm_state_mgr_start_polling : ERR 3210: " + "Failed to start timer\n" ); + } + + OSM_LOG_EXIT( p_sm_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sm_state_mgr_polling_callback( + IN void *context ) +{ + osm_sm_state_mgr_t *p_sm_mgr = ( osm_sm_state_mgr_t * ) context; + uint32_t sminfo_polling_timeout = + p_sm_mgr->p_subn->opt.sminfo_polling_timeout; + cl_status_t cl_status; + + OSM_LOG_ENTER( p_sm_mgr->p_log, __osm_sm_state_mgr_polling_callback ); + + /* + * We can be here in one of two cases: + * 1. We are a STANDBY sm polling on the master SM. + * 2. We are a MASTER sm, waiting for a handover from a remote master sm. + * If we are not in one of these cases - don't need to restart the poller. + */ + if( !( ( p_sm_mgr->p_subn->sm_state == IB_SMINFO_STATE_MASTER && + p_sm_mgr->p_polling_sm != NULL ) || + ( p_sm_mgr->p_subn->sm_state == IB_SMINFO_STATE_STANDBY ) ) ) + { + goto Exit; + } + + /* + * If we are a STANDBY sm and the osm_exit_flag is 1, then let's signal + * the subnet_up. This is relevant for the case of running only once. In that + * case - the program is stuck until this signal is received. In other + * cases - it is not relevant whether or not the signal is on - since we are + * currently in exit flow + */ + if( p_sm_mgr->p_subn->sm_state == IB_SMINFO_STATE_STANDBY && + osm_exit_flag == 1 ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_sm_state_mgr_polling_callback : " + "Signalling subnet_up_event\n" ); + cl_event_signal( p_sm_mgr->p_state_mgr->p_subnet_up_event ); + goto Exit; + } + + /* + * Incr the retry number. + * If it reached the max_retry_number in the subnet opt - call + * osm_sm_state_mgr_process with signal OSM_SM_SIGNAL_POLLING_TIMEOUT + */ + p_sm_mgr->retry_number++; + osm_log( p_sm_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_sm_state_mgr_polling_callback : " + "Retry number:%d\n", p_sm_mgr->retry_number ); + + if( p_sm_mgr->retry_number >= p_sm_mgr->p_subn->opt.polling_retry_number ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_DEBUG, + "__osm_sm_state_mgr_polling_callback : " + "Reached polling_retry_number value in retry_number. " + "Go to DISCOVERY state\n" ); + osm_sm_state_mgr_process( p_sm_mgr, OSM_SM_SIGNAL_POLLING_TIMEOUT ); + goto Exit; + } + + /* Send a SubnGet(SMInfo) request to the remote sm (depends on our state) */ + __osm_sm_state_mgr_send_master_sm_info_req( p_sm_mgr ); + + /* restart the timer */ + cl_status = cl_timer_start( &p_sm_mgr->polling_timer, + sminfo_polling_timeout ); + if( cl_status != CL_SUCCESS ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "__osm_sm_state_mgr_polling_callback : ERR 3211: " + "Failed to restart timer\n" ); + } + + Exit: + OSM_LOG_EXIT( p_sm_mgr->p_log ); + return; +} + +/********************************************************************** + **********************************************************************/ +void +osm_sm_state_mgr_construct( + IN osm_sm_state_mgr_t * const p_sm_mgr ) +{ + memset( p_sm_mgr, 0, sizeof( *p_sm_mgr ) ); + cl_spinlock_construct( &p_sm_mgr->state_lock ); + cl_timer_construct( &p_sm_mgr->polling_timer ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sm_state_mgr_destroy( + IN osm_sm_state_mgr_t * const p_sm_mgr ) +{ + CL_ASSERT( p_sm_mgr ); + + OSM_LOG_ENTER( p_sm_mgr->p_log, osm_sm_state_mgr_destroy ); + + cl_spinlock_destroy( &p_sm_mgr->state_lock ); + cl_timer_destroy( &p_sm_mgr->polling_timer ); + + OSM_LOG_EXIT( p_sm_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sm_state_mgr_init( + IN osm_sm_state_mgr_t * const p_sm_mgr, + IN osm_state_mgr_t * const p_state_mgr, + IN osm_subn_t * const p_subn, + IN osm_req_t * const p_req, + IN osm_log_t * const p_log ) +{ + cl_status_t status; + + OSM_LOG_ENTER( p_log, osm_sm_state_mgr_init ); + + CL_ASSERT( p_subn ); + CL_ASSERT( p_state_mgr ); + CL_ASSERT( p_req ); + + osm_sm_state_mgr_construct( p_sm_mgr ); + + p_sm_mgr->p_log = p_log; + p_sm_mgr->p_req = p_req; + p_sm_mgr->p_subn = p_subn; + p_sm_mgr->p_state_mgr = p_state_mgr; + + /* init the state of the SM to idle */ + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_INIT; + + status = cl_spinlock_init( &p_sm_mgr->state_lock ); + if( status != CL_SUCCESS ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "osm_sm_state_mgr_init: ERR 3201: " + "Spinlock init failed (%s)\n", CL_STATUS_MSG( status ) ); + } + + status = cl_timer_init( &p_sm_mgr->polling_timer, + __osm_sm_state_mgr_polling_callback, p_sm_mgr ); + + if( status != CL_SUCCESS ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "osm_sm_state_mgr_init: ERR 3206: " + "Timer init failed (%s)\n", CL_STATUS_MSG( status ) ); + } + + OSM_LOG_EXIT( p_sm_mgr->p_log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +void +__osm_sm_state_mgr_signal_error( + IN const osm_sm_state_mgr_t * const p_sm_mgr, + IN const osm_sm_signal_t signal ) +{ + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "__osm_sm_state_mgr_signal_error: ERR 3207: " + "Invalid signal %s in state %s\n", + osm_get_sm_mgr_signal_str( signal ), + osm_get_sm_mgr_state_str( p_sm_mgr->p_subn->sm_state ) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sm_state_mgr_signal_master_is_alive( + IN osm_sm_state_mgr_t * const p_sm_mgr ) +{ + OSM_LOG_ENTER( p_sm_mgr->p_log, osm_sm_state_mgr_signal_master_is_alive ); + p_sm_mgr->retry_number = 0; + OSM_LOG_EXIT( p_sm_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sm_state_mgr_process( + IN osm_sm_state_mgr_t * const p_sm_mgr, + IN osm_sm_signal_t signal ) +{ + ib_api_status_t status = IB_SUCCESS; + + CL_ASSERT( p_sm_mgr ); + + OSM_LOG_ENTER( p_sm_mgr->p_log, osm_sm_state_mgr_process ); + + /* + * The state lock prevents many race conditions from screwing + * up the state transition process. + */ + cl_spinlock_acquire( &p_sm_mgr->state_lock ); + + if( osm_log_is_active( p_sm_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_DEBUG, + "osm_sm_state_mgr_process: " + "Received signal %s in state %s\n", + osm_get_sm_mgr_signal_str( signal ), + osm_get_sm_mgr_state_str( p_sm_mgr->p_subn->sm_state ) ); + } + + switch ( p_sm_mgr->p_subn->sm_state ) + { + case IB_SMINFO_STATE_INIT: + switch ( signal ) + { + case OSM_SM_SIGNAL_INIT: + /* + * Update the state of the SM to DISCOVERING + */ + __osm_sm_state_mgr_discovering_msg( p_sm_mgr ); + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_DISCOVERING; + break; + + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + case IB_SMINFO_STATE_DISCOVERING: + switch ( signal ) + { + case OSM_SM_SIGNAL_DISCOVERY_COMPLETED: + /* + * Update the state of the SM to MASTER + */ + __osm_sm_state_mgr_master_msg( p_sm_mgr ); + /* Turn on the moved_to_master_state flag */ + p_sm_mgr->p_subn->moved_to_master_state = TRUE; + /* Turn on the first_time_master_sweep flag */ + if( p_sm_mgr->p_subn->first_time_master_sweep == FALSE ) + p_sm_mgr->p_subn->first_time_master_sweep = TRUE; + + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_MASTER; + /* + * Make sure to set the subnet master_sm_base_lid + * to the sm_base_lid value + */ + p_sm_mgr->p_subn->master_sm_base_lid = p_sm_mgr->p_subn->sm_base_lid; + break; + case OSM_SM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED: + /* + * Stop the discovering + */ + osm_state_mgr_process( p_sm_mgr->p_state_mgr, + OSM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED ); + break; + case OSM_SM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED_DONE: + /* + * Finished all discovery actions - move to STANDBY + * start the polling + */ + __osm_sm_state_mgr_standby_msg( p_sm_mgr ); + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_STANDBY; + /* + * Since another SM is doing the LFT config - we should not + * ignore the results of it + */ + p_sm_mgr->p_subn->ignore_existing_lfts = FALSE; + + __osm_sm_state_mgr_start_polling( p_sm_mgr ); + break; + case OSM_SM_SIGNAL_HANDOVER: + /* + * Do nothing. We will discover it later on. If we already discovered + * this SM, and got the HANDOVER - this means the remote SM is of + * lower priority. In this case we will stop polling it (since it is + * a lower priority SM in STANDBY state). + */ + break; + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + case IB_SMINFO_STATE_STANDBY: + switch ( signal ) + { + case OSM_SM_SIGNAL_POLLING_TIMEOUT: + case OSM_SM_SIGNAL_DISCOVER: + /* + * case 1: Polling timeout occured - this means that the Master SM + * no longer alive. + * case 2: Got a signal to move to DISCOVERING + * Move to DISCOVERING state, and start sweeping + */ + __osm_sm_state_mgr_discovering_msg( p_sm_mgr ); + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_DISCOVERING; + p_sm_mgr->p_subn->coming_out_of_standby = TRUE; + osm_state_mgr_process( p_sm_mgr->p_state_mgr, OSM_SIGNAL_EXIT_STBY ); + break; + case OSM_SM_SIGNAL_DISABLE: + /* + * Update the state to NOT_ACTIVE + */ + __osm_sm_state_mgr_notactive_msg( p_sm_mgr ); + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_NOTACTIVE; + break; + case OSM_SM_SIGNAL_HANDOVER: + /* + * Update state to MASTER, and start sweeping + * OPTIONAL: send ACKNOWLEDGE + */ + __osm_sm_state_mgr_master_msg( p_sm_mgr ); + /* Turn on the moved_to_master_state flag */ + p_sm_mgr->p_subn->moved_to_master_state = TRUE; + /* Turn on the first_time_master_sweep flag */ + if( p_sm_mgr->p_subn->first_time_master_sweep == FALSE ) + p_sm_mgr->p_subn->first_time_master_sweep = TRUE; + /* Turn on the force_immediate_heavy_sweep - we want a + * heavy sweep to occur on the first sweep of this SM. */ + p_sm_mgr->p_subn->force_immediate_heavy_sweep = TRUE; + + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_MASTER; + /* + * Make sure to set the subnet master_sm_base_lid + * to the sm_base_lid value + */ + p_sm_mgr->p_subn->master_sm_base_lid = p_sm_mgr->p_subn->sm_base_lid; + p_sm_mgr->p_subn->coming_out_of_standby = TRUE; + osm_state_mgr_process( p_sm_mgr->p_state_mgr, OSM_SIGNAL_EXIT_STBY ); + break; + case OSM_SM_SIGNAL_ACKNOWLEDGE: + /* + * Do nothing - already moved to STANDBY + */ + break; + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + case IB_SMINFO_STATE_NOTACTIVE: + switch ( signal ) + { + case OSM_SM_SIGNAL_STANDBY: + /* + * Update the state to STANDBY + * start the polling + */ + __osm_sm_state_mgr_standby_msg( p_sm_mgr ); + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_STANDBY; + __osm_sm_state_mgr_start_polling( p_sm_mgr ); + break; + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + case IB_SMINFO_STATE_MASTER: + switch ( signal ) + { + case OSM_SM_SIGNAL_POLLING_TIMEOUT: + /* + * we received a polling timeout - this means that we waited for + * a remote master sm to send us a handover, but didn't get it, and + * didn't get a response from that remote sm. + * We want to force a heavy sweep - hopefully this occurred because + * the remote sm died, and we'll find this out and configure the + * subnet after a heavy sweep. + * We also want to clear the p_polling_sm object - since we are + * done polling on that remote sm - we are sweeping again. + */ + case OSM_SM_SIGNAL_HANDOVER: + /* + * If we received a handover in a master state - then we want to + * force a heavy sweep. This means that either we are in a sweep + * currently - in this case - no change, or we are in idle state - + * since we recognized a master SM before - so we want to make a + * heavy sweep and reconfigure the new subnet. + * We also want to clear the p_polling_sm object - since we are + * done polling on that remote sm - we got a handover from it. + */ + osm_log( p_sm_mgr->p_log, OSM_LOG_VERBOSE, + "osm_sm_state_mgr_process: " + "Forcing immediate heavy sweep. " + "Received OSM_SM_SIGNAL_HANDOVER or OSM_SM_SIGNAL_POLLING_TIMEOUT\n" ); + p_sm_mgr->p_polling_sm = NULL; + p_sm_mgr->p_subn->force_immediate_heavy_sweep = TRUE; + osm_state_mgr_process( p_sm_mgr->p_state_mgr, OSM_SIGNAL_SWEEP ); + break; + case OSM_SM_SIGNAL_HANDOVER_SENT: + /* + * Just sent a HANDOVER signal - move to STANDBY + * start the polling + */ + __osm_sm_state_mgr_standby_msg( p_sm_mgr ); + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_STANDBY; + __osm_sm_state_mgr_start_polling( p_sm_mgr ); + break; + case OSM_SM_SIGNAL_WAIT_FOR_HANDOVER: + /* + * We found a remote master SM, and we are waiting for it + * to handover the mastership to us. Need to start polling + * on that SM, to make sure it is alive, if it isn't - then + * we should move back to discovering, since something must + * have happened to it. + */ + __osm_sm_state_mgr_start_polling( p_sm_mgr ); + break; + case OSM_SM_SIGNAL_DISCOVER: + __osm_sm_state_mgr_discovering_msg( p_sm_mgr ); + p_sm_mgr->p_subn->sm_state = IB_SMINFO_STATE_DISCOVERING; + break; + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + default: + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "osm_sm_state_mgr_process: ERR 3208: " + "Invalid state %s\n", + osm_get_sm_mgr_state_str( p_sm_mgr->p_subn->sm_state ) ); + + } + + cl_spinlock_release( &p_sm_mgr->state_lock ); + + OSM_LOG_EXIT( p_sm_mgr->p_log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sm_state_mgr_check_legality( + IN osm_sm_state_mgr_t * const p_sm_mgr, + IN osm_sm_signal_t signal ) +{ + ib_api_status_t status = IB_SUCCESS; + + CL_ASSERT( p_sm_mgr ); + + OSM_LOG_ENTER( p_sm_mgr->p_log, osm_sm_state_mgr_check_legality ); + + /* + * The state lock prevents many race conditions from screwing + * up the state transition process. + */ + cl_spinlock_acquire( &p_sm_mgr->state_lock ); + + if( osm_log_is_active( p_sm_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_sm_mgr->p_log, OSM_LOG_DEBUG, + "osm_sm_state_mgr_check_legality: " + "Received signal %s in state %s\n", + osm_get_sm_mgr_signal_str( signal ), + osm_get_sm_mgr_state_str( p_sm_mgr->p_subn->sm_state ) ); + } + + switch ( p_sm_mgr->p_subn->sm_state ) + { + case IB_SMINFO_STATE_INIT: + switch ( signal ) + { + case OSM_SM_SIGNAL_INIT: + status = IB_SUCCESS; + break; + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + case IB_SMINFO_STATE_DISCOVERING: + switch ( signal ) + { + case OSM_SM_SIGNAL_DISCOVERY_COMPLETED: + case OSM_SM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED: + case OSM_SM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED_DONE: + case OSM_SM_SIGNAL_HANDOVER: + status = IB_SUCCESS; + break; + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + case IB_SMINFO_STATE_STANDBY: + switch ( signal ) + { + case OSM_SM_SIGNAL_POLLING_TIMEOUT: + case OSM_SM_SIGNAL_DISCOVER: + case OSM_SM_SIGNAL_DISABLE: + case OSM_SM_SIGNAL_HANDOVER: + case OSM_SM_SIGNAL_ACKNOWLEDGE: + status = IB_SUCCESS; + break; + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + case IB_SMINFO_STATE_NOTACTIVE: + switch ( signal ) + { + case OSM_SM_SIGNAL_STANDBY: + status = IB_SUCCESS; + break; + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + case IB_SMINFO_STATE_MASTER: + switch ( signal ) + { + case OSM_SM_SIGNAL_HANDOVER: + case OSM_SM_SIGNAL_HANDOVER_SENT: + status = IB_SUCCESS; + break; + default: + __osm_sm_state_mgr_signal_error( p_sm_mgr, signal ); + status = IB_INVALID_PARAMETER; + break; + } + break; + + default: + osm_log( p_sm_mgr->p_log, OSM_LOG_ERROR, + "osm_sm_state_mgr_check_legality: ERR 3209: " + "Invalid state %s\n", + osm_get_sm_mgr_state_str( p_sm_mgr->p_subn->sm_state ) ); + status = IB_INVALID_PARAMETER; + + } + + cl_spinlock_release( &p_sm_mgr->state_lock ); + + OSM_LOG_EXIT( p_sm_mgr->p_log ); + return ( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sminfo_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sminfo_rcv.c new file mode 100755 index 00000000..770b0ca9 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sminfo_rcv.c @@ -0,0 +1,768 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_sminfo_rcv_t. + * This object represents the SMInfo Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_sminfo_rcv_construct( + IN osm_sminfo_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sminfo_rcv_destroy( + IN osm_sminfo_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_sminfo_rcv_destroy ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sminfo_rcv_init( + IN osm_sminfo_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_stats_t* const p_stats, + IN osm_resp_t* const p_resp, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN osm_sm_state_mgr_t* const p_sm_state_mgr, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sminfo_rcv_init ); + + osm_sminfo_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_stats = p_stats; + p_rcv->p_resp = p_resp; + p_rcv->p_state_mgr = p_state_mgr; + p_rcv->p_sm_state_mgr = p_sm_state_mgr; + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + Return TRUE if the remote sm given (by ib_sm_info_t) is higher, + return FALSE otherwise. + By higher - we mean: SM with higher priority or with same priority + and lower GUID. +**********************************************************************/ +static inline boolean_t +__osm_sminfo_rcv_remote_sm_is_higher( + IN const osm_sminfo_rcv_t* p_rcv, + IN const ib_sm_info_t* p_remote_sm ) +{ + + return( osm_sm_is_greater_than( ib_sminfo_get_priority( p_remote_sm ), + p_remote_sm->guid, + p_rcv->p_subn->opt.sm_priority, + p_rcv->p_subn->sm_port_guid) ); + +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sminfo_rcv_process_get_request( + IN const osm_sminfo_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + uint8_t payload[IB_SMP_DATA_SIZE]; + ib_smp_t* p_smp; + ib_sm_info_t* p_smi = (ib_sm_info_t*)payload; + ib_api_status_t status; + ib_sm_info_t* p_remote_smi; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sminfo_rcv_process_get_request ); + + CL_ASSERT( p_madw ); + + /* + No real need to grab the lock for this function. + */ + memset( payload, 0, sizeof( payload ) ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + CL_ASSERT( p_smp->method == IB_MAD_METHOD_GET ); + + p_smi->guid = p_rcv->p_subn->sm_port_guid; + p_smi->act_count = cl_hton32( p_rcv->p_stats->qp0_mads_sent ); + p_smi->pri_state = (uint8_t)(p_rcv->p_subn->sm_state | + p_rcv->p_subn->opt.sm_priority << 4); + /* + p.750 row 11 - Return 0 for the SM key unless we authenticate the + requester as the master SM. + */ + p_remote_smi = ib_smp_get_payload_ptr ( osm_madw_get_smp_ptr (p_madw) ); + if (ib_sminfo_get_state( p_remote_smi ) == IB_SMINFO_STATE_MASTER ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sminfo_rcv_process_get_request: " + "Responding to master SM with real sm_key\n" ); + p_smi->sm_key = p_rcv->p_subn->opt.sm_key; + } + else + { + /* The requester is not authenticated as master - set sm_key to zero. */ + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sminfo_rcv_process_get_request: " + "Responding to SM not master with zero sm_key\n" ); + p_smi->sm_key = 0; + } + + status = osm_resp_send( p_rcv->p_resp, p_madw, 0, payload ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_get_request: ERR 2F02: " + "Error sending response (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + * Check if the p_smp received is legal. + * Current checks: + * MADHeader:AttributeModifiers of ACKNOWLEDGE that was not sent by a + * Standby SM. + * MADHeader:AttributeModifiers of HANDOVER/DISABLE/STANDBY/DISCOVER + * that was not sent by a Master SM. + * FUTURE - TO DO: + * Check that the SM_Key is matching. + **********************************************************************/ +static ib_api_status_t +__osm_sminfo_rcv_check_set_req_legality( + IN const ib_smp_t* const p_smp ) +{ + ib_sm_info_t* p_smi; + + p_smi = ib_smp_get_payload_ptr( p_smp ); + + if (p_smp->attr_mod == IB_SMINFO_ATTR_MOD_ACKNOWLEDGE) + { + if ( ib_sminfo_get_state( p_smi ) == IB_SMINFO_STATE_STANDBY ) + { + return( IB_SUCCESS ); + } + } + else if ( p_smp->attr_mod == IB_SMINFO_ATTR_MOD_HANDOVER || + p_smp->attr_mod == IB_SMINFO_ATTR_MOD_DISABLE || + p_smp->attr_mod == IB_SMINFO_ATTR_MOD_STANDBY || + p_smp->attr_mod == IB_SMINFO_ATTR_MOD_DISCOVER ) + { + if ( ib_sminfo_get_state( p_smi ) == IB_SMINFO_STATE_MASTER ) + { + return( IB_SUCCESS ); + } + } + + return( IB_INVALID_PARAMETER ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sminfo_rcv_process_set_request( + IN const osm_sminfo_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + uint8_t payload[IB_SMP_DATA_SIZE]; + ib_smp_t* p_smp; + ib_sm_info_t* p_smi = (ib_sm_info_t*)payload; + ib_sm_info_t* p_rcv_smi; + ib_api_status_t status; + osm_sm_signal_t sm_signal; + ib_sm_info_t* p_remote_smi; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sminfo_rcv_process_set_request ); + + CL_ASSERT( p_madw ); + + /* + No real need to grab the lock for this function. + */ + memset( payload, 0, sizeof( payload ) ); + + /* get the lock */ + CL_PLOCK_EXCL_ACQUIRE( p_rcv->p_lock ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_rcv_smi = ib_smp_get_payload_ptr( p_smp ); + + if( p_smp->method != IB_MAD_METHOD_SET ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_request: ERR 2F03: " + "Unsupported method 0x%X\n", + p_smp->method ); + CL_PLOCK_RELEASE( p_rcv->p_lock ); + goto Exit; + } + + p_smi->guid = p_rcv->p_subn->sm_port_guid; + p_smi->act_count = cl_hton32( p_rcv->p_stats->qp0_mads_sent ); + p_smi->pri_state = (uint8_t)(p_rcv->p_subn->sm_state | + p_rcv->p_subn->opt.sm_priority << 4); + /* + p.750 row 11 - Return 0 for the SM key unless we authenticate the + requester as the master SM. + */ + p_remote_smi = ib_smp_get_payload_ptr ( osm_madw_get_smp_ptr (p_madw) ); + if (ib_sminfo_get_state( p_remote_smi ) == IB_SMINFO_STATE_MASTER ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sminfo_rcv_process_set_request: " + "Responding to master SM with real sm_key\n" ); + p_smi->sm_key = p_rcv->p_subn->opt.sm_key; + } + else + { + /* The requester is not authenticated as master - set sm_key to zero. */ + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_sminfo_rcv_process_set_request: " + "Responding to SM not master with zero sm_key\n" ); + p_smi->sm_key = 0; + } + + /* Check the legality of the packet */ + status = __osm_sminfo_rcv_check_set_req_legality( p_smp ); + if ( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_request: ERR 2F04: " + "Check legality failed. AttributeModifier:0x%X RemoteState:%s\n", + p_smp->attr_mod, + osm_get_sm_mgr_state_str(ib_sminfo_get_state( p_rcv_smi ) ) ); + /* send a response with error code */ + status = osm_resp_send( p_rcv->p_resp, p_madw, 7, payload ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_request: ERR 2F05: " + "Error sending response (%s)\n", + ib_get_err_str( status ) ); + } + CL_PLOCK_RELEASE( p_rcv->p_lock ); + goto Exit; + } + + /* translate from IB_SMINFO_ATTR to OSM_SM_SIGNAL */ + switch (p_smp->attr_mod) + { + case IB_SMINFO_ATTR_MOD_HANDOVER: + sm_signal = OSM_SM_SIGNAL_HANDOVER; + break; + case IB_SMINFO_ATTR_MOD_ACKNOWLEDGE: + sm_signal = OSM_SM_SIGNAL_ACKNOWLEDGE; + break; + case IB_SMINFO_ATTR_MOD_DISABLE: + sm_signal = OSM_SM_SIGNAL_DISABLE; + break; + case IB_SMINFO_ATTR_MOD_STANDBY: + sm_signal = OSM_SM_SIGNAL_STANDBY; + break; + case IB_SMINFO_ATTR_MOD_DISCOVER: + sm_signal = OSM_SM_SIGNAL_DISCOVER; + break; + default: + /* + This code shouldn't be reached - checked in the + check legality + */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_request: ERR 2F06: " + "THIS CODE SHOULD NOT BE REACHED!!\n"); + CL_PLOCK_RELEASE( p_rcv->p_lock ); + goto Exit; + } + + /* check legality of the needed transition in the SM state machine */ + status = osm_sm_state_mgr_check_legality( p_rcv->p_sm_state_mgr, + sm_signal ); + if ( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_request: ERR 2F07: " + "Check legality of SM needed transition. AttributeModifier:0x%X RemoteState:%s\n", + p_smp->attr_mod, + osm_get_sm_mgr_state_str(ib_sminfo_get_state( p_rcv_smi ) ) ); + /* send a response with error code */ + status = osm_resp_send( p_rcv->p_resp, p_madw, 7, payload ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_request: ERR 2F08: " + "Error sending response (%s)\n", + ib_get_err_str( status ) ); + } + CL_PLOCK_RELEASE( p_rcv->p_lock ); + goto Exit; + } + + /* the SubnSet(SMInfo) command is ok. Send a response. */ + status = osm_resp_send( p_rcv->p_resp, p_madw, 0, payload ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_request: ERR 2F09: " + "Error sending response (%s)\n", + ib_get_err_str( status ) ); + } + + /* it is a legal packet - act according to it */ + + /* if the AttributeModifier is STANDBY - need to save on the */ + /* p_sm_state_mgr in the master_guid variable - the guid of the */ + /* current master. */ + if ( p_smp->attr_mod == IB_SMINFO_ATTR_MOD_STANDBY ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_sminfo_rcv_process_set_request: " + "Received a STANDBY signal. Updating " + "sm_state_mgr master_guid: 0x%016" PRIx64 "\n", + cl_ntoh64(p_rcv_smi->guid) ); + p_rcv->p_sm_state_mgr->master_guid = p_rcv_smi->guid; + } + + /* call osm_sm_state_mgr_process with the received signal. */ + CL_PLOCK_RELEASE( p_rcv->p_lock ); + status = osm_sm_state_mgr_process( p_rcv->p_sm_state_mgr, + sm_signal ); + + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_request: ERR 2F10: " + "Error in SM state transition (%s)\n", + ib_get_err_str( status ) ); + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + * Return a signal with which to call the osm_state_mgr_process. + * This is done since we are locked by p_rcv->p_lock in this function, + * and thus cannot call osm_state_mgr_process (that locks the state_lock). + * If return OSM_SIGNAL_NONE - do not call osm_state_mgr_process. + **********************************************************************/ +static osm_signal_t +__osm_sminfo_rcv_process_get_sm( + IN const osm_sminfo_rcv_t* const p_rcv, + IN const osm_remote_sm_t* const p_sm ) +{ + const ib_sm_info_t* p_smi; + osm_signal_t ret_val = OSM_SIGNAL_NONE; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sminfo_rcv_process_get_sm ); + + p_smi = &p_sm->smi; + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_sminfo_rcv_process_get_sm: " + "Detected SM 0x%016" PRIx64 " in state %u\n", + cl_ntoh64( p_smi->guid ), ib_sminfo_get_state( p_smi ) ); + } + + /* + Check the state of this SM vs. our own. + */ + switch( p_rcv->p_subn->sm_state ) + { + case IB_SMINFO_STATE_NOTACTIVE: + break; + + case IB_SMINFO_STATE_DISCOVERING: + switch( ib_sminfo_get_state( p_smi ) ) + { + case IB_SMINFO_STATE_NOTACTIVE: + break; + case IB_SMINFO_STATE_MASTER: + ret_val = OSM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED; + /* save on the p_sm_state_mgr the guid of the current master. */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_sminfo_rcv_process_get_sm: " + "Found master SM. Updating sm_state_mgr master_guid: 0x%016" PRIx64 "\n", + cl_ntoh64( p_sm->p_port->guid ) ); + p_rcv->p_sm_state_mgr->master_guid = p_sm->p_port->guid; + break; + case IB_SMINFO_STATE_DISCOVERING: + case IB_SMINFO_STATE_STANDBY: + if ( __osm_sminfo_rcv_remote_sm_is_higher(p_rcv, p_smi) == TRUE ) + { + /* the remote is a higher sm - need to stop sweeping */ + ret_val = OSM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED; + /* save on the p_sm_state_mgr the guid of the higher SM we found - */ + /* we will poll it - as long as it lives - we should be in Standby. */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_sminfo_rcv_process_get_sm: " + "Found higher SM. Updating sm_state_mgr master_guid:" + " 0x%016" PRIx64 "\n", + cl_ntoh64(p_sm->p_port->guid) ); + p_rcv->p_sm_state_mgr->master_guid = p_sm->p_port->guid; + } + break; + default: + break; + } + break; + + case IB_SMINFO_STATE_STANDBY: + /* if the guid of the SM that sent us this response is equal to the */ + /* p_sm_mgr->master_guid - then this is a signal that the polling */ + switch( ib_sminfo_get_state( p_smi ) ) + { + case IB_SMINFO_STATE_MASTER: + /* This means the master is alive */ + /* Signal that to the SM state mgr */ + osm_sm_state_mgr_signal_master_is_alive( p_rcv->p_sm_state_mgr ); + break; + case IB_SMINFO_STATE_STANDBY: + /* This should be the response from the sm we are polling. */ + /* If it is - then signal master is alive */ + if (p_rcv->p_sm_state_mgr->master_guid == p_sm->p_port->guid) + { + /* Make sure that it is an SM with higher priority than us. + If we started polling it when it was master, and it moved + to standby - then it might be with a lower priority than + us - and then we don't want to continue polling it. */ + if ( __osm_sminfo_rcv_remote_sm_is_higher(p_rcv, p_smi) == TRUE ) + osm_sm_state_mgr_signal_master_is_alive( p_rcv->p_sm_state_mgr ); + } + break; + default: + /* any other state - do nothing */ + break; + } + break; + + case IB_SMINFO_STATE_MASTER: + switch( ib_sminfo_get_state( p_smi ) ) + { + case IB_SMINFO_STATE_MASTER: + /* If this is a response due to our polling, this means that we are + waiting for a handover from this SM, and it is still alive - + signal that. */ + if ( p_rcv->p_sm_state_mgr->p_polling_sm != NULL ) + { + osm_sm_state_mgr_signal_master_is_alive( p_rcv->p_sm_state_mgr ); + } + else + { + /* This is a response we got while sweeping the subnet. + We will handle a case of handover needed later on, when the sweep + is done and all SMs are recongnized. */ + } + break; + default: + /* any other state - do nothing */ + break; + } + break; + + default: + break; + } + + OSM_LOG_EXIT( p_rcv->p_log ); + return ret_val; +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sminfo_rcv_process_get_response( + IN const osm_sminfo_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_smp_t* p_smp; + const ib_sm_info_t* p_smi; + cl_qmap_t* p_sm_tbl; + cl_qmap_t* p_port_tbl; + osm_port_t* p_port; + ib_net64_t port_guid; + osm_remote_sm_t* p_sm; + osm_signal_t process_get_sm_ret_val = OSM_SIGNAL_NONE; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sminfo_rcv_process_get_response ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + if( p_smp->method != IB_MAD_METHOD_GET_RESP ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_get_response: ERR 2F11: " + "Unsupported method 0x%X\n", + p_smp->method ); + goto Exit; + } + + p_smi = ib_smp_get_payload_ptr( p_smp ); + p_sm_tbl = &p_rcv->p_subn->sm_guid_tbl; + p_port_tbl = &p_rcv->p_subn->port_guid_tbl; + port_guid = p_smi->guid; + + osm_dump_sm_info( p_rcv->p_log, p_smi, OSM_LOG_DEBUG ); + + /* + Check that the sm_key of the found SM is the same as ours, + or is zero. If not - OpenSM cannot continue with configuration!. */ + if ( p_smi->sm_key != 0 && + p_smi->sm_key != p_rcv->p_subn->opt.sm_key ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_get_response: ERR 2F18: " + "Got SM with sm_key that doesn't match our " + "local key. Exiting\n" ); + osm_log( p_rcv->p_log, OSM_LOG_SYS, + "Found remote SM with non-matching sm_key. Exiting\n" ); + osm_exit_flag = TRUE; + goto Exit; + } + + /* + Determine if we already have another SM object for this SM. + */ + CL_PLOCK_EXCL_ACQUIRE( p_rcv->p_lock ); + + p_port = (osm_port_t*)cl_qmap_get( p_port_tbl, port_guid ); + if( p_port == (osm_port_t*)cl_qmap_end( p_port_tbl ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_get_response: ERR 2F12: " + "No port object for this SM\n" ); + goto Exit; + } + + if( osm_port_get_guid( p_port ) != p_smi->guid ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_get_response: ERR 2F13: " + "Bogus SM port GUID" + "\n\t\t\t\tExpected 0x%016" PRIx64 + ", Received 0x%016" PRIx64 "\n", + cl_ntoh64( osm_port_get_guid( p_port ) ), + cl_ntoh64( p_smi->guid ) ); + goto Exit; + } + + p_sm = (osm_remote_sm_t*)cl_qmap_get( p_sm_tbl, port_guid ); + if( p_sm == (osm_remote_sm_t*)cl_qmap_end( p_sm_tbl ) ) + { + p_sm = malloc( sizeof(*p_sm) ); + if( p_sm == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_get_response: ERR 2F14: " + "Unable to allocate SM object\n" ); + goto Exit; + } + + osm_remote_sm_init( p_sm, p_port, p_smi ); + + cl_qmap_insert( p_sm_tbl, port_guid, &p_sm->map_item ); + } + else + { + /* + We already know this SM. + Update the SMInfo attribute. + */ + p_sm->smi = *p_smi; + } + + process_get_sm_ret_val = __osm_sminfo_rcv_process_get_sm( p_rcv, p_sm ); + + Exit: + CL_PLOCK_RELEASE( p_rcv->p_lock ); + + /* If process_get_sm_ret_val != OSM_SIGNAL_NONE then we have to signal + * to the state_mgr with that signal. */ + if (process_get_sm_ret_val != OSM_SIGNAL_NONE) + osm_state_mgr_process( p_rcv->p_state_mgr, + process_get_sm_ret_val ); + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_sminfo_rcv_process_set_response( + IN const osm_sminfo_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + const ib_smp_t* p_smp; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_sminfo_rcv_process_set_response ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + if( p_smp->method != IB_MAD_METHOD_GET_RESP ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_response: ERR 2F16: " + "Unsupported method 0x%X\n", + p_smp->method ); + goto Exit; + } + + /* Check the AttributeModifier */ + if ( p_smp->attr_mod != IB_SMINFO_ATTR_MOD_HANDOVER ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_sminfo_rcv_process_set_response: ERR 2F17: " + "Unsupported attribute modifier 0x%X\n", + p_smp->attr_mod ); + goto Exit; + } + + /* + This is a response on a HANDOVER request - + Nothing to do. + */ + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sminfo_rcv_process( + IN const osm_sminfo_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + ib_smp_t *p_smp; + osm_smi_context_t *p_smi_context; + + OSM_LOG_ENTER( p_rcv->p_log, osm_sminfo_rcv_process ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + /* + Determine if this is a request for our own SMInfo + or if this is a response to our request for another + SM's SMInfo. + */ + if( ib_smp_is_response( p_smp ) ) + { + /* Get the context - to see if this is a response to a Get or Set method */ + p_smi_context = osm_madw_get_smi_context_ptr( p_madw ); + if ( p_smi_context->set_method == FALSE ) + { + /* this is a response to a Get method */ + __osm_sminfo_rcv_process_get_response( p_rcv, p_madw ); + } + else + { + /* this is a response to a Set method */ + __osm_sminfo_rcv_process_set_response( p_rcv, p_madw ); + } + } + else + { + /* This is a request */ + if ( p_smp->method == IB_MAD_METHOD_GET ) + { + /* This is a SubnGet request */ + __osm_sminfo_rcv_process_get_request( p_rcv, p_madw ); + } + else + { + /* This is a SubnSet request */ + __osm_sminfo_rcv_process_set_request( p_rcv, p_madw ); + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sminfo_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sminfo_rcv_ctrl.c new file mode 100644 index 00000000..61f444f0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sminfo_rcv_ctrl.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_sminfo_rcv_ctrl_t. + * This object represents the SMInfo request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_sminfo_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_sminfo_rcv_process( ((osm_sminfo_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sminfo_rcv_ctrl_construct( + IN osm_sminfo_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_sminfo_rcv_ctrl_destroy( + IN osm_sminfo_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sminfo_rcv_ctrl_init( + IN osm_sminfo_rcv_ctrl_t* const p_ctrl, + IN osm_sminfo_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sminfo_rcv_ctrl_init ); + + osm_sminfo_rcv_ctrl_construct( p_ctrl ); + + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_SM_INFO, + __osm_sminfo_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_sminfo_rcv_ctrl_init: ERR 3001: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_state_mgr.c b/branches/Ndi/ulp/opensm/user/opensm/osm_state_mgr.c new file mode 100644 index 00000000..19ec46f0 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_state_mgr.c @@ -0,0 +1,2986 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_state_mgr_t. + * This file implements the State Manager object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.13 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define SUBNET_LIST_FILENAME "/osm-subnet.lst" + +osm_signal_t osm_qos_setup(IN osm_opensm_t * p_osm); + +/********************************************************************** + **********************************************************************/ +void +osm_state_mgr_construct( + IN osm_state_mgr_t * const p_mgr ) +{ + memset( p_mgr, 0, sizeof( *p_mgr ) ); + cl_spinlock_construct( &p_mgr->state_lock ); + cl_spinlock_construct( &p_mgr->idle_lock ); + p_mgr->state = OSM_SM_STATE_INIT; +} + +/********************************************************************** + **********************************************************************/ +void +osm_state_mgr_destroy( + IN osm_state_mgr_t * const p_mgr ) +{ + CL_ASSERT( p_mgr ); + + OSM_LOG_ENTER( p_mgr->p_log, osm_state_mgr_destroy ); + + /* destroy the locks */ + cl_spinlock_destroy( &p_mgr->state_lock ); + cl_spinlock_destroy( &p_mgr->idle_lock ); + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_state_mgr_init( + IN osm_state_mgr_t * const p_mgr, + IN osm_subn_t * const p_subn, + IN osm_lid_mgr_t * const p_lid_mgr, + IN osm_ucast_mgr_t * const p_ucast_mgr, + IN osm_mcast_mgr_t * const p_mcast_mgr, + IN osm_link_mgr_t * const p_link_mgr, + IN osm_drop_mgr_t * const p_drop_mgr, + IN osm_req_t * const p_req, + IN osm_stats_t * const p_stats, + IN osm_sm_state_mgr_t * const p_sm_state_mgr, + IN const osm_sm_mad_ctrl_t * const p_mad_ctrl, + IN cl_plock_t * const p_lock, + IN cl_event_t * const p_subnet_up_event, + IN osm_log_t * const p_log ) +{ + cl_status_t status; + + OSM_LOG_ENTER( p_log, osm_state_mgr_init ); + + CL_ASSERT( p_subn ); + CL_ASSERT( p_lid_mgr ); + CL_ASSERT( p_ucast_mgr ); + CL_ASSERT( p_mcast_mgr ); + CL_ASSERT( p_link_mgr ); + CL_ASSERT( p_drop_mgr ); + CL_ASSERT( p_req ); + CL_ASSERT( p_stats ); + CL_ASSERT( p_sm_state_mgr ); + CL_ASSERT( p_mad_ctrl ); + CL_ASSERT( p_lock ); + + osm_state_mgr_construct( p_mgr ); + + p_mgr->p_log = p_log; + p_mgr->p_subn = p_subn; + p_mgr->p_lid_mgr = p_lid_mgr; + p_mgr->p_ucast_mgr = p_ucast_mgr; + p_mgr->p_mcast_mgr = p_mcast_mgr; + p_mgr->p_link_mgr = p_link_mgr; + p_mgr->p_drop_mgr = p_drop_mgr; + p_mgr->p_mad_ctrl = p_mad_ctrl; + p_mgr->p_req = p_req; + p_mgr->p_stats = p_stats; + p_mgr->p_sm_state_mgr = p_sm_state_mgr; + p_mgr->state = OSM_SM_STATE_IDLE; + p_mgr->p_lock = p_lock; + p_mgr->p_subnet_up_event = p_subnet_up_event; + p_mgr->state_step_mode = OSM_STATE_STEP_CONTINUOUS; + p_mgr->next_stage_signal = OSM_SIGNAL_NONE; + + status = cl_spinlock_init( &p_mgr->state_lock ); + if( status != CL_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_state_mgr_init: ERR 3301: " + "Spinlock init failed (%s)\n", CL_STATUS_MSG( status ) ); + } + + cl_qlist_init( &p_mgr->idle_time_list ); + + status = cl_spinlock_init( &p_mgr->idle_lock ); + if( status != CL_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_state_mgr_init: ERR 3302: " + "Spinlock init failed (%s)\n", CL_STATUS_MSG( status ) ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_up_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + /* + * This message should be written only once - when the + * SM moves to Master state and the subnet is up for + * the first time. The change of state is marked with + * the subnet flag moved_to_master_state + */ + if( p_mgr->p_subn->moved_to_master_state == TRUE ) + { + osm_log( p_mgr->p_log, OSM_LOG_SYS, "SUBNET UP\n" ); /* Format Waived */ + /* clear the signal */ + p_mgr->p_subn->moved_to_master_state = FALSE; + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_INFO, "SUBNET UP\n" ); /* Format Waived */ + } + + if( p_mgr->p_subn->opt.sweep_interval ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_up_msg: " + "\n\n\n********************************" + "**********************************\n" + "**************************** SUBNET UP " + "***************************\n" + "**************************************" + "****************************\n\n\n" ); + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_up_msg: " + "\n\n\n********************************" + "**********************************\n" + "******************* SUBNET UP " + "(sweep disabled) *******************\n" + "**************************************" + "****************************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_init_errors_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + osm_log( p_mgr->p_log, OSM_LOG_SYS, "Errors during initialization\n" ); /* Format Waived */ + + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_init_errors_msg: " + "\n\n\n********************************" + "**********************************\n" + "****************** ERRORS DURING INITI" + "ALIZATION ******************\n" + "**************************************" + "****************************\n\n\n" ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_light_sweep_done_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_light_sweep_done_msg: " + "\n\n\n********************************" + "**********************************\n" + "********************** LIGHT SWEEP " + "COMPLETE **********************\n" + "**************************************" + "****************************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_standby_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_standby_msg: " + "\n\n\n********************************" + "**********************************\n" + "******************** ENTERING STANDBY" + " STATE **********************\n" + "**************************************" + "****************************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_sm_port_down_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + osm_log( p_mgr->p_log, OSM_LOG_SYS, "SM port is down\n" ); /* Format Waived */ + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_sm_port_down_msg: " + "\n\n\n********************************" + "**********************************\n" + "************************** SM PORT DOWN " + "**************************\n" + "**************************************" + "****************************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_lid_assign_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_lid_assign_msg: " + "\n\n\n**************************************" + "****************************\n" + "***** LID ASSIGNMENT COMPLETE - STARTING SWITC" + "H TABLE CONFIG *****\n" + "*********************************************" + "*********************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_set_sm_lid_done_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_set_sm_lid_done_msg: " + "\n\n\n**************************************" + "****************************\n" + "**** SM LID ASSIGNMENT COMPLETE - STARTING SUBN" + "ET LID CONFIG *****\n" + "*********************************************" + "*********************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_switch_config_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_switch_config_msg: " + "\n\n\n**************************************" + "****************************\n" + "***************** SWITCHES CONFIGURED FOR UNICAST " + "****************\n" + "*********************************************" + "*********************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_multicast_config_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_multicast_config_msg: " + "\n\n\n**************************************" + "****************************\n" + "**************** SWITCHES CONFIGURED FOR MULTICAST " + "***************\n" + "*********************************************" + "*********************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_links_ports_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_links_ports_msg: " + "\n\n\n**************************************" + "****************************\n" + "******* LINKS PORTS CONFIGURED - SET LINKS TO ARMED " + "STATE ********\n" + "*********************************************" + "*********************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_links_armed_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_links_armed_msg: " + "\n\n\n**************************************" + "****************************\n" + "************* LINKS ARMED - SET LINKS TO ACTIVE " + "STATE ************\n" + "*********************************************" + "*********************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_sweep_heavy_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_sweep_heavy_msg: " + "\n\n\n**************************************" + "****************************\n" + "******************** INITIATING HEAVY SWEEP " + "**********************\n" + "*********************************************" + "*********************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_sweep_heavy_done_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_sweep_heavy_done_msg: " + "\n\n\n**************************************" + "****************************\n" + "********************* HEAVY SWEEP COMPLETE " + "***********************\n" + "*********************************************" + "*********************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_sweep_light_msg( + IN const osm_state_mgr_t * p_mgr ) +{ + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_sweep_light_msg: " + "\n\n\n**************************************" + "****************************\n" + "******************** INITIATING LIGHT SWEEP " + "**********************\n" + "*********************************************" + "*********************\n\n\n" ); + } +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_signal_warning( + IN const osm_state_mgr_t * const p_mgr, + IN const osm_signal_t signal ) +{ + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_signal_warning: " + "Invalid signal %s(%lu) in state %s\n", + osm_get_sm_signal_str( signal ), + signal, osm_get_sm_state_str( p_mgr->state ) ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_signal_error( + IN const osm_state_mgr_t * const p_mgr, + IN const osm_signal_t signal ) +{ + /* the Request for IDLE processing can come async to the state so it + * really is just verbose ... */ + if( signal == OSM_SIGNAL_IDLE_TIME_PROCESS_REQUEST ) + __osm_state_mgr_signal_warning( p_mgr, signal ); + else + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_signal_error: ERR 3303: " + "Invalid signal %s(%lu) in state %s\n", + osm_get_sm_signal_str( signal ), + signal, osm_get_sm_state_str( p_mgr->state ) ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_reset_node_count( + IN cl_map_item_t * const p_map_item, + IN void *context ) +{ + osm_node_t *p_node = ( osm_node_t * ) p_map_item; + osm_state_mgr_t *const p_mgr = ( osm_state_mgr_t * ) context; + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_reset_node_count: " + "Resetting discovery count for node 0x%" PRIx64 "\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + } + + osm_node_discovery_count_reset( p_node ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_reset_port_count( + IN cl_map_item_t * const p_map_item, + IN void *context ) +{ + osm_port_t *p_port = ( osm_port_t * ) p_map_item; + osm_state_mgr_t *const p_mgr = ( osm_state_mgr_t * ) context; + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_reset_port_count: " + "Resetting discovery count for port 0x%" PRIx64 "\n", + cl_ntoh64( osm_port_get_guid( p_port ) ) ); + } + + osm_port_discovery_count_reset( p_port ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_reset_switch_count( + IN cl_map_item_t * const p_map_item, + IN void *context ) +{ + osm_switch_t *p_sw = ( osm_switch_t * ) p_map_item; + osm_state_mgr_t *const p_mgr = ( osm_state_mgr_t * ) context; + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_reset_switch_count: " + "Resetting discovery count for switch 0x%" PRIx64 "\n", + cl_ntoh64( osm_node_get_node_guid( p_sw->p_node ) ) ); + } + + osm_switch_discovery_count_reset( p_sw ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_get_sw_info( + IN cl_map_item_t * const p_object, + IN void *context ) +{ + osm_node_t *p_node; + osm_dr_path_t *p_dr_path; + osm_madw_context_t mad_context; + osm_switch_t *const p_sw = ( osm_switch_t * ) p_object; + osm_state_mgr_t *const p_mgr = ( osm_state_mgr_t * ) context; + ib_api_status_t status; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_get_sw_info ); + + p_node = osm_switch_get_node_ptr( p_sw ); + p_dr_path = osm_node_get_any_dr_path_ptr( p_node ); + + memset( &context, 0, sizeof( context ) ); + + mad_context.si_context.node_guid = osm_node_get_node_guid( p_node ); + mad_context.si_context.set_method = FALSE; + mad_context.si_context.light_sweep = TRUE; + + status = osm_req_get( p_mgr->p_req, + p_dr_path, + IB_MAD_ATTR_SWITCH_INFO, + 0, OSM_MSG_LIGHT_SWEEP_FAIL, &mad_context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_get_sw_info: ERR 3304: " + "Request SwitchInfo failed\n" ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + Initiate a remote port info request for the given physical port + **********************************************************************/ +static void +__osm_state_mgr_get_remote_port_info( + IN osm_state_mgr_t * const p_mgr, + IN osm_physp_t * const p_physp ) +{ + osm_dr_path_t *p_dr_path; + osm_dr_path_t rem_node_dr_path; + osm_madw_context_t mad_context; + ib_api_status_t status; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_get_remote_port_info ); + + /* generate a dr path leaving on the physp to the remote node */ + p_dr_path = osm_physp_get_dr_path_ptr( p_physp ); + memcpy( &rem_node_dr_path, p_dr_path, sizeof( osm_dr_path_t ) ); + osm_dr_path_extend( &rem_node_dr_path, osm_physp_get_port_num( p_physp ) ); + + memset( &mad_context, 0, sizeof( mad_context ) ); + + mad_context.pi_context.node_guid = + osm_node_get_node_guid( osm_physp_get_node_ptr( p_physp ) ); + mad_context.pi_context.port_guid = + cl_hton64( osm_physp_get_port_num( p_physp ) ); + mad_context.pi_context.set_method = FALSE; + mad_context.pi_context.light_sweep = TRUE; + mad_context.pi_context.ignore_errors = FALSE; + mad_context.pi_context.update_master_sm_base_lid = FALSE; + mad_context.pi_context.active_transition = FALSE; + + /* note that with some negative logic - if the query failed it means that + * there is no point in going to heavy sweep */ + status = osm_req_get( p_mgr->p_req, + &rem_node_dr_path, + IB_MAD_ATTR_PORT_INFO, + 0, CL_DISP_MSGID_NONE, &mad_context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_get_remote_port_info: ERR 332E: " + "Request for PortInfo failed\n" ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + Initiates a thorough sweep of the subnet. + Used when there is suspicion that something on the subnet has changed. +**********************************************************************/ +static ib_api_status_t +__osm_state_mgr_sweep_hop_0( + IN osm_state_mgr_t * const p_mgr ) +{ + ib_api_status_t status; + osm_dr_path_t dr_path; + osm_bind_handle_t h_bind; + osm_ni_context_t ni_context; + uint8_t path_array[IB_SUBNET_PATH_HOPS_MAX]; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_sweep_hop_0 ); + + memset( path_array, 0, sizeof( path_array ) ); + + /* + * First, get the bind handle. + */ + h_bind = osm_sm_mad_ctrl_get_bind_handle( p_mgr->p_mad_ctrl ); + if( h_bind != OSM_BIND_INVALID_HANDLE ) + { + __osm_state_mgr_sweep_heavy_msg( p_mgr ); + + /* + * Start the sweep by clearing the port counts, then + * get our own NodeInfo at 0 hops. + */ + CL_PLOCK_ACQUIRE( p_mgr->p_lock ); + + cl_qmap_apply_func( &p_mgr->p_subn->node_guid_tbl, + __osm_state_mgr_reset_node_count, p_mgr ); + + cl_qmap_apply_func( &p_mgr->p_subn->port_guid_tbl, + __osm_state_mgr_reset_port_count, p_mgr ); + + cl_qmap_apply_func( &p_mgr->p_subn->sw_guid_tbl, + __osm_state_mgr_reset_switch_count, p_mgr ); + + /* Set the in_sweep_hop_0 flag in subn to be TRUE. + * This will indicate the sweeping not to continue beyond the + * the current node. + * This is relevant for the case of SM on switch, since in the + * switch info we need to signal somehow not to continue + * the sweeping. */ + p_mgr->p_subn->in_sweep_hop_0 = TRUE; + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + memset( &ni_context, 0, sizeof( ni_context ) ); + osm_dr_path_init( &dr_path, h_bind, 0, path_array ); + status = osm_req_get( p_mgr->p_req, + &dr_path, + IB_MAD_ATTR_NODE_INFO, + 0, CL_DISP_MSGID_NONE, NULL ); + + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_sweep_hop_0: ERR 3305: " + "Request NodeInfo failed\n" ); + } + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_sweep_hop_0: " + "No bound ports. Deferring sweep...\n" ); + status = IB_INVALID_STATE; + } + + OSM_LOG_EXIT( p_mgr->p_log ); + return ( status ); +} + +/********************************************************************** + Clear out all existing port lid assignments +**********************************************************************/ +static ib_api_status_t +__osm_state_mgr_clean_known_lids( + IN osm_state_mgr_t * const p_mgr ) +{ + ib_api_status_t status = IB_SUCCESS; + cl_ptr_vector_t *p_vec = &( p_mgr->p_subn->port_lid_tbl ); + uint32_t i; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_clean_known_lids ); + + /* we need a lock here! */ + CL_PLOCK_ACQUIRE( p_mgr->p_lock ); + + for( i = 0; i < cl_ptr_vector_get_size( p_vec ); i++ ) + cl_ptr_vector_set( p_vec, i, NULL ); + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + OSM_LOG_EXIT( p_mgr->p_log ); + return ( status ); +} + +/********************************************************************** + Notifies the transport layer that the local LID has changed, + which give it a chance to update address vectors, etc.. +**********************************************************************/ +static ib_api_status_t +__osm_state_mgr_notify_lid_change( + IN osm_state_mgr_t * const p_mgr ) +{ + ib_api_status_t status; + osm_bind_handle_t h_bind; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_notify_lid_change ); + + /* + * First, get the bind handle. + */ + h_bind = osm_sm_mad_ctrl_get_bind_handle( p_mgr->p_mad_ctrl ); + if( h_bind == OSM_BIND_INVALID_HANDLE ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_notify_lid_change: ERR 3306: " + "No bound ports\n" ); + status = IB_ERROR; + goto Exit; + } + + /* + * Notify the transport layer that we changed the local LID. + */ + status = osm_vendor_local_lid_change( h_bind ); + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_notify_lid_change: ERR 3307: " + "Vendor LID update failed (%s)\n", ib_get_err_str( status ) ); + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return ( status ); +} + +/********************************************************************** + Returns true if the SM port is down. + The SM's port object must exist in the port_guid table. +**********************************************************************/ +static boolean_t +__osm_state_mgr_is_sm_port_down( + IN osm_state_mgr_t * const p_mgr ) +{ + ib_net64_t port_guid; + osm_port_t *p_port; + osm_physp_t *p_physp; + cl_qmap_t *p_tbl; + uint8_t state; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_is_sm_port_down ); + + port_guid = p_mgr->p_subn->sm_port_guid; + + /* + * If we don't know our own port guid yet, assume the port is down. + */ + if( port_guid == 0 ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_is_sm_port_down: ERR 3308: " + "SM port GUID unknown\n" ); + state = IB_LINK_DOWN; + goto Exit; + } + + p_tbl = &p_mgr->p_subn->port_guid_tbl; + + CL_ASSERT( port_guid ); + + CL_PLOCK_ACQUIRE( p_mgr->p_lock ); + p_port = ( osm_port_t * ) cl_qmap_get( p_tbl, port_guid ); + if( p_port == ( osm_port_t * ) cl_qmap_end( p_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_is_sm_port_down: ERR 3309: " + "SM port with GUID:%016" PRIx64 " is unknown\n", + cl_ntoh64( port_guid ) ); + state = IB_LINK_DOWN; + CL_PLOCK_RELEASE( p_mgr->p_lock ); + goto Exit; + } + + p_physp = osm_port_get_default_phys_ptr( p_port ); + + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + state = osm_physp_get_port_state( p_physp ); + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return ( state == IB_LINK_DOWN ); +} + +/********************************************************************** + Sweeps the node 1 hop away. + This sets off a "chain reaction" that causes discovery of the subnet. + Used when there is suspicion that something on the subnet has changed. +**********************************************************************/ +static ib_api_status_t +__osm_state_mgr_sweep_hop_1( + IN osm_state_mgr_t * const p_mgr ) +{ + ib_api_status_t status = IB_SUCCESS; + osm_bind_handle_t h_bind; + osm_madw_context_t context; + osm_node_t *p_node; + osm_port_t *p_port; + osm_physp_t *p_physp; + osm_dr_path_t *p_dr_path; + osm_dr_path_t hop_1_path; + ib_net64_t port_guid; + uint8_t port_num; + cl_qmap_t *p_port_tbl; + uint8_t path_array[IB_SUBNET_PATH_HOPS_MAX]; + uint8_t num_ports; + osm_physp_t *p_ext_physp; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_sweep_hop_1 ); + + /* + * First, get our own port and node objects. + */ + p_port_tbl = &p_mgr->p_subn->port_guid_tbl; + port_guid = p_mgr->p_subn->sm_port_guid; + + CL_ASSERT( port_guid ); + + /* Set the in_sweep_hop_0 flag in subn to be FALSE. + * This will indicate the sweeping to continue beyond the + * the current node. + * This is relevant for the case of SM on switch, since in the + * switch info we need to signal that the sweeping should + * continue through the switch. */ + p_mgr->p_subn->in_sweep_hop_0 = FALSE; + + p_port = ( osm_port_t * ) cl_qmap_get( p_port_tbl, port_guid ); + if( p_port == ( osm_port_t * ) cl_qmap_end( p_port_tbl ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_sweep_hop_1: ERR 3310: " + "No SM port object\n" ); + status = IB_ERROR; + goto Exit; + } + + p_node = osm_port_get_parent_node( p_port ); + CL_ASSERT( p_node ); + + port_num = ib_node_info_get_local_port_num( &p_node->node_info ); + + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_sweep_hop_1: " + "Probing hop 1 on local port %u\n", port_num ); + + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + p_dr_path = osm_physp_get_dr_path_ptr( p_physp ); + h_bind = osm_dr_path_get_bind_handle( p_dr_path ); + + CL_ASSERT( h_bind != OSM_BIND_INVALID_HANDLE ); + + memset( path_array, 0, sizeof( path_array ) ); + /* the hop_1 operations depend on the type of our node. + * Currently - legal nodes that can host SM are SW and CA */ + switch ( osm_node_get_type( p_node ) ) + { + case IB_NODE_TYPE_CA: + case IB_NODE_TYPE_ROUTER: + context.ni_context.node_guid = osm_node_get_node_guid( p_node ); + context.ni_context.port_num = port_num; + + path_array[1] = port_num; + + osm_dr_path_init( &hop_1_path, h_bind, 1, path_array ); + status = osm_req_get( p_mgr->p_req, + &hop_1_path, + IB_MAD_ATTR_NODE_INFO, + 0, CL_DISP_MSGID_NONE, &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_sweep_hop_1: ERR 3311: " + "Request NodeInfo failed\n" ); + } + break; + + case IB_NODE_TYPE_SWITCH: + /* Need to go over all the ports of the switch, and send a node_info + * from them. This doesn't include the port 0 of the switch, which + * hosts the SM. + * Note: We'll send another switchInfo on port 0, since if no ports + * are connected, we still want to get some response, and have the + * subnet come up. + */ + num_ports = osm_node_get_num_physp( p_node ); + for( port_num = 0; port_num < num_ports; port_num++ ) + { + /* go through the port only if the port is not DOWN */ + p_ext_physp = osm_node_get_physp_ptr( p_node, port_num ); + /* Make sure the physp object exists */ + if( !p_ext_physp ) + continue; + if( ib_port_info_get_port_state( &( p_ext_physp->port_info ) ) > + IB_LINK_DOWN ) + { + context.ni_context.node_guid = osm_node_get_node_guid( p_node ); + context.ni_context.port_num = port_num; + + path_array[1] = port_num; + + osm_dr_path_init( &hop_1_path, h_bind, 1, path_array ); + status = osm_req_get( p_mgr->p_req, + &hop_1_path, + IB_MAD_ATTR_NODE_INFO, + 0, CL_DISP_MSGID_NONE, &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_sweep_hop_1: ERR 3312: " + "Request NodeInfo failed\n" ); + } + } + } + break; + + default: + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_sweep_hop_1: ERR 3313: Unknown node type %d\n", + osm_node_get_type( p_node ) ); + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return ( status ); +} + +/********************************************************************** + Initiates a lightweight sweep of the subnet. + Used during normal sweeps after the subnet is up. +**********************************************************************/ +static ib_api_status_t +__osm_state_mgr_light_sweep_start( + IN osm_state_mgr_t * const p_mgr ) +{ + ib_api_status_t status = IB_SUCCESS; + osm_bind_handle_t h_bind; + cl_qmap_t *p_sw_tbl; + cl_list_t *p_no_rem_port_list; + cl_list_iterator_t list_iter; + uint8_t path_array[IB_SUBNET_PATH_HOPS_MAX]; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_light_sweep_start ); + + p_sw_tbl = &p_mgr->p_subn->sw_guid_tbl; + + memset( path_array, 0, sizeof( path_array ) ); + + /* + * First, get the bind handle. + */ + h_bind = osm_sm_mad_ctrl_get_bind_handle( p_mgr->p_mad_ctrl ); + if( h_bind != OSM_BIND_INVALID_HANDLE ) + { + __osm_state_mgr_sweep_light_msg( p_mgr ); + CL_PLOCK_ACQUIRE( p_mgr->p_lock ); + cl_qmap_apply_func( p_sw_tbl, __osm_state_mgr_get_sw_info, p_mgr ); + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + /* now scan the list of physical ports that were not down but have no remote port */ + CL_PLOCK_ACQUIRE( p_mgr->p_lock ); + p_no_rem_port_list = &p_mgr->p_subn->light_sweep_physp_list; + list_iter = cl_list_head( p_no_rem_port_list ); + while( list_iter != cl_list_end( p_no_rem_port_list ) ) + { + __osm_state_mgr_get_remote_port_info( p_mgr, + ( osm_physp_t * ) + cl_list_obj( list_iter ) ); + list_iter = cl_list_next( list_iter ); + } + CL_PLOCK_RELEASE( p_mgr->p_lock ); + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_light_sweep_start: " + "No bound ports. Deferring sweep...\n" ); + status = IB_INVALID_STATE; + } + + OSM_LOG_EXIT( p_mgr->p_log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_topology_file_create( + IN osm_state_mgr_t * const p_mgr ) +{ + const osm_node_t *p_node; + char *file_name; + FILE *rc; + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_topology_file_create ); + + CL_PLOCK_ACQUIRE( p_mgr->p_lock ); + + file_name = + ( char * )malloc( strlen( p_mgr->p_subn->opt.dump_files_dir ) + + strlen(SUBNET_LIST_FILENAME) + 1 ); + + CL_ASSERT( file_name ); + + strcpy( file_name, p_mgr->p_subn->opt.dump_files_dir ); + strcat( file_name, SUBNET_LIST_FILENAME ); + + if( ( rc = fopen( file_name, "w" ) ) == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_topology_file_create: " + "fopen failed for file:%s\n", file_name ); + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + goto Exit; + } + + p_node = ( osm_node_t * ) cl_qmap_head( &p_mgr->p_subn->node_guid_tbl ); + while( p_node != + ( osm_node_t * ) cl_qmap_end( &p_mgr->p_subn->node_guid_tbl ) ) + { + if( p_node->node_info.num_ports ) + { + uint32_t cPort; + osm_node_t *p_nbnode; + osm_physp_t *p_physp; + osm_physp_t *p_default_physp; + osm_physp_t *p_rphysp; + uint8_t link_speed_act; + + for( cPort = 1; cPort < osm_node_get_num_physp( p_node ); cPort++ ) + { + uint8_t port_state; + + p_physp = osm_node_get_physp_ptr( p_node, cPort ); + + if( ( p_physp == NULL ) || ( !osm_physp_is_valid( p_physp ) ) ) + continue; + + p_rphysp = p_physp->p_remote_physp; + + if( ( p_rphysp == NULL ) || ( !osm_physp_is_valid( p_rphysp ) ) ) + continue; + + CL_ASSERT( cPort == p_physp->port_num ); + + if( p_node->node_info.node_type == IB_NODE_TYPE_SWITCH ) + { + p_default_physp = osm_node_get_physp_ptr( p_node, 0 ); + } + else + { + p_default_physp = p_physp; + } + + memcpy(desc, p_node->node_desc.description, + IB_NODE_DESCRIPTION_SIZE); + desc[IB_NODE_DESCRIPTION_SIZE] = '\0'; + + fprintf( rc, "{ %s%s Ports:%02X" + " SystemGUID:%016" PRIx64 + " NodeGUID:%016" PRIx64 + " PortGUID:%016" PRIx64 + " VenID:%06X DevID:%04X Rev:%08X {%s} LID:%04X PN:%02X } ", + ( p_node->node_info.node_type == + IB_NODE_TYPE_SWITCH ) ? "SW" : ( p_node->node_info. + node_type == + IB_NODE_TYPE_CA ) ? + "CA" : ( p_node->node_info.node_type == + IB_NODE_TYPE_ROUTER ) ? "Rt" : "**", + ( p_default_physp->port_info.base_lid == + p_default_physp->port_info. + master_sm_base_lid ) ? "-SM" : "", + p_node->node_info.num_ports, + cl_ntoh64( p_node->node_info.sys_guid ), + cl_ntoh64( p_node->node_info.node_guid ), + cl_ntoh64( p_physp->port_guid ), + cl_ntoh32( ib_node_info_get_vendor_id + ( &p_node->node_info ) ), + cl_ntoh16( p_node->node_info.device_id ), + cl_ntoh32( p_node->node_info.revision ), + desc, + cl_ntoh16( p_default_physp->port_info.base_lid ), + cPort ); + + p_nbnode = p_rphysp->p_node; + + if( p_nbnode->node_info.node_type == IB_NODE_TYPE_SWITCH ) + { + p_default_physp = osm_node_get_physp_ptr( p_nbnode, 0 ); + } + else + { + p_default_physp = p_rphysp; + } + + memcpy(desc, p_nbnode->node_desc.description, + IB_NODE_DESCRIPTION_SIZE); + desc[IB_NODE_DESCRIPTION_SIZE] = '\0'; + + fprintf( rc, "{ %s%s Ports:%02X" + " SystemGUID:%016" PRIx64 + " NodeGUID:%016" PRIx64 + " PortGUID:%016" PRIx64 + " VenID:%08X DevID:%04X Rev:%08X {%s} LID:%04X PN:%02X } ", + ( p_nbnode->node_info.node_type == + IB_NODE_TYPE_SWITCH ) ? "SW" : ( p_nbnode->node_info. + node_type == + IB_NODE_TYPE_CA ) ? + "CA" : ( p_nbnode->node_info.node_type == + IB_NODE_TYPE_ROUTER ) ? "Rt" : "**", + ( p_default_physp->port_info.base_lid == + p_default_physp->port_info. + master_sm_base_lid ) ? "-SM" : "", + p_nbnode->node_info.num_ports, + cl_ntoh64( p_nbnode->node_info.sys_guid ), + cl_ntoh64( p_nbnode->node_info.node_guid ), + cl_ntoh64( p_rphysp->port_guid ), + cl_ntoh32( ib_node_info_get_vendor_id + ( &p_nbnode->node_info ) ), + cl_ntoh32( p_nbnode->node_info.device_id ), + cl_ntoh32( p_nbnode->node_info.revision ), + desc, + cl_ntoh16( p_default_physp->port_info.base_lid ), + p_rphysp->port_num ); + + port_state = ib_port_info_get_port_state( &p_physp->port_info ); + link_speed_act = + ib_port_info_get_link_speed_active( &p_physp->port_info ); + + fprintf( rc, "PHY=%s LOG=%s SPD=%s\n", + ( p_physp->port_info.link_width_active == 1 ) ? "1x" : + ( p_physp->port_info.link_width_active == 2 ) ? "4x" : + ( p_physp->port_info.link_width_active == 8 ) ? "12x" : + "??", + ( ( port_state == IB_LINK_ACTIVE ) ? "ACT" : + ( port_state == IB_LINK_ARMED ) ? "ARM" : + ( port_state == IB_LINK_INIT ) ? "INI" : "DWN" ), + ( link_speed_act == 1 ) ? "2.5" : + ( link_speed_act == 2 ) ? "5" : + ( link_speed_act == 4 ) ? "10" : "??" ); + } + } + p_node = ( osm_node_t * ) cl_qmap_next( &p_node->map_item ); + } + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + fclose( rc ); + + Exit: + free( file_name ); + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_state_mgr_report( + IN osm_state_mgr_t * const p_mgr ) +{ + const cl_qmap_t *p_tbl; + const osm_port_t *p_port; + const osm_node_t *p_node; + const osm_physp_t *p_physp; + const osm_physp_t *p_remote_physp; + const ib_port_info_t *p_pi; + uint8_t port_num; + uint8_t start_port; + uint32_t num_ports; + uint8_t node_type; + + if( !osm_log_is_active( p_mgr->p_log, OSM_LOG_VERBOSE ) ) + return; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_report ); + + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, + "\n===================================================" + "====================================================" + "\nVendor : Ty " + ": # : Sta : LID : LMC : MTU : LWA : LSA : Port GUID " + " : Neighbor Port (Port #)\n" ); + + p_tbl = &p_mgr->p_subn->port_guid_tbl; + + /* + * Hold lock non-exclusively while we perform these read-only operations. + */ + + CL_PLOCK_ACQUIRE( p_mgr->p_lock ); + p_port = ( osm_port_t * ) cl_qmap_head( p_tbl ); + while( p_port != ( osm_port_t * ) cl_qmap_end( p_tbl ) ) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_report: " + "Processing port 0x%016" PRIx64 "\n", + cl_ntoh64( osm_port_get_guid( p_port ) ) ); + } + + p_node = osm_port_get_parent_node( p_port ); + node_type = osm_node_get_type( p_node ); + if( node_type == IB_NODE_TYPE_SWITCH ) + start_port = 0; + else + start_port = 1; + + num_ports = osm_port_get_num_physp( p_port ); + for( port_num = start_port; port_num < num_ports; port_num++ ) + { + p_physp = osm_port_get_phys_ptr( p_port, port_num ); + if( ( p_physp == NULL ) || ( !osm_physp_is_valid( p_physp ) ) ) + continue; + + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, "%s : %s : %02X :", + osm_get_manufacturer_str( cl_ntoh64 + ( osm_node_get_node_guid + ( p_node ) ) ), + osm_get_node_type_str_fixed_width( node_type ), + port_num ); + + p_pi = osm_physp_get_port_info_ptr( p_physp ); + + /* + * Port state is not defined for switch port 0 + */ + if( port_num == 0 ) + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, " :" ); + else + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, " %s :", + osm_get_port_state_str_fixed_width + ( ib_port_info_get_port_state( p_pi ) ) ); + + /* + * LID values are only meaningful in select cases. + */ + if( ib_port_info_get_port_state( p_pi ) != IB_LINK_DOWN + && ( ( node_type == IB_NODE_TYPE_SWITCH && port_num == 0 ) + || node_type != IB_NODE_TYPE_SWITCH ) ) + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, " %04X : %01X :", + cl_ntoh16( p_pi->base_lid ), + ib_port_info_get_lmc( p_pi ) ); + else + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, " : :" ); + + if( port_num != 0 ) + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, " %s : %s : %s ", + osm_get_mtu_str( ib_port_info_get_neighbor_mtu( p_pi ) ), + osm_get_lwa_str( p_pi->link_width_active ), + osm_get_lsa_str( ib_port_info_get_link_speed_active + ( p_pi ) ) ); + else + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, " : : " ); + + if( osm_physp_get_port_guid( p_physp ) == + p_mgr->p_subn->sm_port_guid ) + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, "* %016" PRIx64 " *", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ) ); + else + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, ": %016" PRIx64 " :", + cl_ntoh64( osm_physp_get_port_guid( p_physp ) ) ); + + if( port_num && + ( ib_port_info_get_port_state( p_pi ) != IB_LINK_DOWN ) ) + { + p_remote_physp = osm_physp_get_remote( p_physp ); + if( p_remote_physp && osm_physp_is_valid( p_remote_physp ) ) + { + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, + " %016" PRIx64 " (%02X)", + cl_ntoh64( osm_physp_get_port_guid + ( p_remote_physp ) ), + osm_physp_get_port_num( p_remote_physp ) ); + } + else + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, " UNKNOWN" ); + } + + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, "\n" ); + } + + osm_log_printf( p_mgr->p_log, OSM_LOG_VERBOSE, + "------------------------------------------------------" + "------------------------------------------------\n" ); + p_port = ( osm_port_t * ) cl_qmap_next( &p_port->map_item ); + } + + CL_PLOCK_RELEASE( p_mgr->p_lock ); + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__process_idle_time_queue_done( + IN osm_state_mgr_t * const p_mgr ) +{ + cl_qlist_t *p_list = &p_mgr->idle_time_list; + cl_list_item_t *p_list_item; + osm_idle_item_t *p_process_item; + + OSM_LOG_ENTER( p_mgr->p_log, __process_idle_time_queue_done ); + + cl_spinlock_acquire( &p_mgr->idle_lock ); + p_list_item = cl_qlist_remove_head( p_list ); + + if( p_list_item == cl_qlist_end( p_list ) ) + { + cl_spinlock_release( &p_mgr->idle_lock ); + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__process_idle_time_queue_done: ERR 3314: " + "Idle time queue is empty\n" ); + return; + } + cl_spinlock_release( &p_mgr->idle_lock ); + + p_process_item = ( osm_idle_item_t * ) p_list_item; + + if( p_process_item->pfn_done ) + { + + p_process_item->pfn_done( p_process_item->context1, + p_process_item->context2 ); + } + + free( p_process_item ); + + OSM_LOG_EXIT( p_mgr->p_log ); + return; +} + +/********************************************************************** + **********************************************************************/ +static osm_signal_t +__process_idle_time_queue_start( + IN osm_state_mgr_t * const p_mgr ) +{ + cl_qlist_t *p_list = &p_mgr->idle_time_list; + cl_list_item_t *p_list_item; + osm_idle_item_t *p_process_item; + osm_signal_t signal; + + OSM_LOG_ENTER( p_mgr->p_log, __process_idle_time_queue_start ); + + cl_spinlock_acquire( &p_mgr->idle_lock ); + + p_list_item = cl_qlist_head( p_list ); + if( p_list_item == cl_qlist_end( p_list ) ) + { + cl_spinlock_release( &p_mgr->idle_lock ); + OSM_LOG_EXIT( p_mgr->p_log ); + return OSM_SIGNAL_NONE; + } + + cl_spinlock_release( &p_mgr->idle_lock ); + + p_process_item = ( osm_idle_item_t * ) p_list_item; + + CL_ASSERT( p_process_item->pfn_start ); + + signal = p_process_item->pfn_start( p_process_item->context1, + p_process_item->context2 ); + + CL_ASSERT( signal != OSM_SIGNAL_NONE ); + + OSM_LOG_EXIT( p_mgr->p_log ); + return signal; +} + +/********************************************************************** + * Go over all the remote SMs (as updated in the sm_guid_tbl). + * Find if there is a remote sm that is a master SM. + * If there is a remote master SM - return TRUE, else - return FALSE. + **********************************************************************/ +static osm_remote_sm_t * +__osm_state_mgr_exists_other_master_sm( + IN osm_state_mgr_t * const p_mgr ) +{ + cl_qmap_t *p_sm_tbl; + osm_remote_sm_t *p_sm; + osm_remote_sm_t *p_sm_res = NULL; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_exists_other_master_sm ); + + p_sm_tbl = &p_mgr->p_subn->sm_guid_tbl; + + /* go over all the remote SMs */ + for( p_sm = ( osm_remote_sm_t * ) cl_qmap_head( p_sm_tbl ); + p_sm != ( osm_remote_sm_t * ) cl_qmap_end( p_sm_tbl ); + p_sm = ( osm_remote_sm_t * ) cl_qmap_next( &p_sm->map_item ) ) + { + /* If the sm is in MASTER state - return TRUE */ + if( ib_sminfo_get_state( &p_sm->smi ) == IB_SMINFO_STATE_MASTER ) + { + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_exists_other_master_sm: " + "Found remote master SM with guid:0x%016" PRIx64 "\n", + cl_ntoh64(p_sm->smi.guid) ); + p_sm_res = p_sm; + goto Exit; + } + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); + return ( p_sm_res ); +} + +/********************************************************************** + * Go over all remote SMs (as updated in the sm_guid_tbl). + * Find the one with the highest priority and lowest guid. + * Compare this SM to the local SM. If the local SM is higher - + * return NULL, if the remote SM is higher - return a pointer to it. + **********************************************************************/ +static osm_remote_sm_t * +__osm_state_mgr_get_highest_sm( + IN osm_state_mgr_t * const p_mgr ) +{ + cl_qmap_t *p_sm_tbl; + osm_remote_sm_t *p_sm = NULL; + osm_remote_sm_t *p_highest_sm; + uint8_t highest_sm_priority; + ib_net64_t highest_sm_guid; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_get_highest_sm ); + + p_sm_tbl = &p_mgr->p_subn->sm_guid_tbl; + + /* Start with the local sm as the standard */ + p_highest_sm = NULL; + highest_sm_priority = p_mgr->p_subn->opt.sm_priority; + highest_sm_guid = p_mgr->p_subn->sm_port_guid; + + /* go over all the remote SMs */ + for( p_sm = ( osm_remote_sm_t * ) cl_qmap_head( p_sm_tbl ); + p_sm != ( osm_remote_sm_t * ) cl_qmap_end( p_sm_tbl ); + p_sm = ( osm_remote_sm_t * ) cl_qmap_next( &p_sm->map_item ) ) + { + + /* If the sm is in NOTACTIVE state - continue */ + if( ib_sminfo_get_state( &p_sm->smi ) == IB_SMINFO_STATE_NOTACTIVE ) + continue; + + if( osm_sm_is_greater_than( ib_sminfo_get_priority( &p_sm->smi ), + p_sm->smi.guid, highest_sm_priority, + highest_sm_guid ) ) + { + /* the new p_sm is with higher priority - update the highest_sm */ + /* to this sm */ + p_highest_sm = p_sm; + highest_sm_priority = ib_sminfo_get_priority( &p_sm->smi ); + highest_sm_guid = p_sm->smi.guid; + } + } + + if( p_highest_sm != NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_get_highest_sm: " + "Found higher SM with guid: %016" PRIx64 "\n", + cl_ntoh64( p_highest_sm->smi.guid ) ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); + return ( p_highest_sm ); +} + +/********************************************************************** + * Send SubnSet(SMInfo) SMP with HANDOVER attribute to the + * remote_sm indicated. + **********************************************************************/ +static void +__osm_state_mgr_send_handover( + IN osm_state_mgr_t * const p_mgr, + IN osm_remote_sm_t * const p_sm ) +{ + uint8_t payload[IB_SMP_DATA_SIZE]; + ib_sm_info_t *p_smi = ( ib_sm_info_t * ) payload; + osm_madw_context_t context; + const osm_port_t *p_port; + ib_api_status_t status; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_send_handover ); + + if( p_mgr->p_subn->opt.testability_mode == + OSM_TEST_MODE_EXIT_BEFORE_SEND_HANDOVER ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_send_handover: ERR 3315: " + "Exit on testability mode OSM_TEST_MODE_EXIT_BEFORE_SEND_HANDOVER\n" ); + osm_exit_flag = TRUE; + sleep( 3 ); + exit( 1 ); + } + + /* + * Send a query of SubnSet(SMInfo) HANDOVER to the remote sm given. + */ + + memset( &context, 0, sizeof( context ) ); + p_port = p_sm->p_port; + if( p_port == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_send_handover: ERR 3316: " + "No port object on given remote_sm object\n" ); + goto Exit; + } + + /* update the master_guid in the p_sm_state_mgr object according to */ + /* the guid of the port where the new Master SM should reside. */ + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "__osm_state_mgr_send_handover: " + "Handing over mastership. Updating sm_state_mgr master_guid: %016" + PRIx64 "\n", cl_ntoh64( p_port->guid ) ); + p_mgr->p_sm_state_mgr->master_guid = p_port->guid; + + context.smi_context.port_guid = p_port->guid; + context.smi_context.set_method = TRUE; + + p_smi->guid = p_mgr->p_subn->sm_port_guid; + p_smi->act_count = cl_hton32( p_mgr->p_stats->qp0_mads_sent ); + p_smi->pri_state = ( uint8_t ) ( p_mgr->p_subn->sm_state | + p_mgr->p_subn->opt.sm_priority << 4 ); + /* + * Return 0 for the SM key unless we authenticate the requester + * as the master SM. + */ + if( ib_sminfo_get_state( &p_sm->smi ) == IB_SMINFO_STATE_MASTER ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_send_handover: " + "Responding to master SM with real sm_key\n" ); + p_smi->sm_key = p_mgr->p_subn->opt.sm_key; + } + else + { + /* The requester is not authenticated as master - set sm_key to zero */ + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_state_mgr_send_handover: " + "Responding to SM not master with zero sm_key\n" ); + p_smi->sm_key = 0; + } + + status = osm_req_set( p_mgr->p_req, + osm_physp_get_dr_path_ptr + ( osm_port_get_default_phys_ptr( p_port ) ), payload, + sizeof(payload), + IB_MAD_ATTR_SM_INFO, IB_SMINFO_ATTR_MOD_HANDOVER, + CL_DISP_MSGID_NONE, &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_send_handover: ERR 3317: " + "Failure requesting SMInfo (%s)\n", ib_get_err_str( status ) ); + } + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + * Send Trap 64 on all ports in new_ports_list. + **********************************************************************/ +static void +__osm_state_mgr_report_new_ports( + IN osm_state_mgr_t * const p_mgr ) +{ + osm_port_t *p_port; + ib_gid_t port_gid; + ib_mad_notice_attr_t notice; + ib_api_status_t status; + ib_net64_t port_guid; + uint16_t min_lid_ho; + uint16_t max_lid_ho; + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_report_new_ports ); + + CL_PLOCK_ACQUIRE( p_mgr->p_lock ); + p_port = + ( osm_port_t + * ) ( cl_list_remove_head( &p_mgr->p_subn->new_ports_list ) ); + while( p_port != NULL ) + { + port_guid = osm_port_get_guid( p_port ); + /* issue a notice - trap 64 */ + + /* details of the notice */ + notice.generic_type = 0x83; /* is generic subn mgt type */ + ib_notice_set_prod_type_ho( ¬ice, 4 ); /* A Class Manager generator */ + /* endport becomes to be reachable */ + notice.g_or_v.generic.trap_num = CL_HTON16( 64 ); + /* The sm_base_lid is saved in network order already. */ + notice.issuer_lid = p_mgr->p_subn->sm_base_lid; + /* following C14-72.1.1 and table 119 p725 */ + /* we need to provide the GID */ + port_gid.unicast.prefix = p_mgr->p_subn->opt.subnet_prefix; + port_gid.unicast.interface_id = port_guid; + memcpy( &( notice.data_details.ntc_64_67.gid ), + &( port_gid ), sizeof( ib_gid_t ) ); + + /* According to page 653 - the issuer gid in this case of trap + * is the SM gid, since the SM is the initiator of this trap. */ + notice.issuer_gid.unicast.prefix = p_mgr->p_subn->opt.subnet_prefix; + notice.issuer_gid.unicast.interface_id = p_mgr->p_subn->sm_port_guid; + + status = osm_report_notice( p_mgr->p_log, p_mgr->p_subn, ¬ice ); + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_report_new_ports: ERR 3318: " + "Error sending trap reports on GUID:0x%016" PRIx64 + " (%s)\n", port_gid.unicast.interface_id, + ib_get_err_str( status ) ); + } + osm_port_get_lid_range_ho( p_port, &min_lid_ho, &max_lid_ho ); + if (p_port->p_node) + { + memcpy(desc, p_port->p_node->node_desc.description, + IB_NODE_DESCRIPTION_SIZE); + desc[IB_NODE_DESCRIPTION_SIZE] = '\0'; + } + osm_log( p_mgr->p_log, OSM_LOG_INFO, + "Discovered new port with GUID:0x%016" PRIx64 + " LID range [0x%X,0x%X] of node:%s\n", + cl_ntoh64( port_gid.unicast.interface_id ), + min_lid_ho, max_lid_ho, + p_port->p_node ? desc : "UNKNOWN" ); + + p_port = + ( osm_port_t + * ) ( cl_list_remove_head( &p_mgr->p_subn->new_ports_list ) ); + } + CL_PLOCK_RELEASE( p_mgr->p_lock ); + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + * Make sure that the lid_port_tbl of the subnet has only the ports + * that are recognized, and in the correct lid place. There could be + * errors if we wanted to assign a certain port with lid X, but that + * request didn't reach the port. In this case port_lid_tbl will have + * the port under lid X, though the port isn't updated with this lid. + * We will run a new heavy sweep (since there were errors in the + * initialization), but here we'll clean the database from incorrect + * information. + **********************************************************************/ +static void +__osm_state_mgr_check_tbl_consistency( + IN osm_state_mgr_t * const p_mgr ) +{ + cl_qmap_t *p_port_guid_tbl; + osm_port_t *p_port; + osm_port_t *p_next_port; + cl_ptr_vector_t *p_port_lid_tbl; + size_t max_lid, ref_size, curr_size, lid; + osm_port_t *p_port_ref, *p_port_stored; + cl_ptr_vector_t ref_port_lid_tbl; + uint16_t min_lid_ho; + uint16_t max_lid_ho; + uint16_t lid_ho; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_state_mgr_check_tbl_consistency ); + + cl_ptr_vector_construct( &ref_port_lid_tbl ); + cl_ptr_vector_init( &ref_port_lid_tbl, + cl_ptr_vector_get_size( &p_mgr->p_subn->port_lid_tbl ), + OSM_SUBNET_VECTOR_GROW_SIZE ); + + p_port_guid_tbl = &p_mgr->p_subn->port_guid_tbl; + + /* Let's go over all the ports according to port_guid_tbl, + * and add the port to a reference port_lid_tbl. */ + p_next_port = ( osm_port_t * ) cl_qmap_head( p_port_guid_tbl ); + while( p_next_port != ( osm_port_t * ) cl_qmap_end( p_port_guid_tbl ) ) + { + p_port = p_next_port; + p_next_port = ( osm_port_t * ) cl_qmap_next( &p_next_port->map_item ); + + osm_port_get_lid_range_ho( p_port, &min_lid_ho, &max_lid_ho ); + for( lid_ho = min_lid_ho; lid_ho <= max_lid_ho; lid_ho++ ) + cl_ptr_vector_set( &ref_port_lid_tbl, lid_ho, p_port ); + } + + p_port_lid_tbl = &p_mgr->p_subn->port_lid_tbl; + + ref_size = cl_ptr_vector_get_size( &ref_port_lid_tbl ); + curr_size = cl_ptr_vector_get_size( p_port_lid_tbl ); + /* They should be the same, but compare it anyway */ + max_lid = ( ref_size > curr_size ) ? ref_size : curr_size; + + for( lid = 1; lid <= max_lid; lid++ ) + { + p_port_ref = NULL; + p_port_stored = NULL; + cl_ptr_vector_at( p_port_lid_tbl, lid, ( void * )&p_port_stored ); + cl_ptr_vector_at( &ref_port_lid_tbl, lid, ( void * )&p_port_ref ); + + if( p_port_stored == p_port_ref ) + /* This is the "good" case - both entries are the same for this lid. + * Nothing to do. */ + continue; + + if( p_port_ref == NULL ) + { + /* There is an object in the subnet database for this lid, + * but no such object exists in the reference port_list_tbl. + * This can occur if we wanted to assign a certain port with some + * lid (different than the one pre-assigned to it), and the port + * didn't get the PortInfo Set request. Due to this, the port + * is updated with its original lid in our database, but with the + * new lid we wanted to give it in our port_lid_tbl. */ + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_check_tbl_consistency: ERR 3322: " + "lid 0x%zX is wrongly assigned to port 0x%016" PRIx64 + " in port_lid_tbl\n", + lid, cl_ntoh64( osm_port_get_guid( p_port_stored ) ) ); + } + else + { + if( p_port_stored == NULL ) + { + /* There is an object in the new database, but no object in our subnet + * database. This is the matching case of the prior check - the port + * still has its original lid. */ + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_check_tbl_consistency: ERR 3323: " + "port 0x%016" PRIx64 " exists in new port_lid_tbl under " + "lid 0x%zX, but missing in subnet port_lid_tbl db\n", + cl_ntoh64( osm_port_get_guid( p_port_ref ) ), lid ); + } + else + { + + /* if we reached here then p_port_stored != p_port_ref. + * We were trying to set a lid to p_port_stored, but it didn't reach it, + * and p_port_ref also didn't get the lid update. */ + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_state_mgr_check_tbl_consistency: ERR 3324: " + "lid 0x%zX has port 0x%016" PRIx64 + " in new port_lid_tbl db, " "and port 0x%016" PRIx64 + " in subnet port_lid_tbl db\n", lid, + cl_ntoh64( osm_port_get_guid( p_port_ref ) ), + cl_ntoh64( osm_port_get_guid( p_port_stored ) ) ); + } + } + /* In any of these cases we want to set NULL in the port_lid_tbl, since this + * entry is invalid. Also, make sure we'll do another heavy sweep. */ + cl_ptr_vector_set( p_port_lid_tbl, lid, NULL ); + p_mgr->p_subn->subnet_initialization_error = TRUE; + } + + cl_ptr_vector_destroy( &ref_port_lid_tbl ); + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_state_mgr_process( + IN osm_state_mgr_t * const p_mgr, + IN osm_signal_t signal ) +{ + ib_api_status_t status; + osm_remote_sm_t *p_remote_sm; + osm_signal_t tmp_signal; + + CL_ASSERT( p_mgr ); + + OSM_LOG_ENTER( p_mgr->p_log, osm_state_mgr_process ); + + /* if we are exiting do nothing */ + if( osm_exit_flag ) + signal = OSM_SIGNAL_NONE; + + /* + * The state lock prevents many race conditions from screwing + * up the state transition process. For example, if an function + * puts transactions on the wire, the state lock guarantees this + * loop will see the return code ("DONE PENDING") of the function + * before the "NO OUTSTANDING TRANSACTIONS" signal is asynchronously + * received. + */ + cl_spinlock_acquire( &p_mgr->state_lock ); + + while( signal != OSM_SIGNAL_NONE ) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_state_mgr_process: " + "Received signal %s in state %s\n", + osm_get_sm_signal_str( signal ), + osm_get_sm_state_str( p_mgr->state ) ); + } + + /* + * If we're already sweeping and we get the signal to sweep, + * just ignore it harmlessly. + */ + if( ( p_mgr->state != OSM_SM_STATE_IDLE ) && + ( p_mgr->state != OSM_SM_STATE_STANDBY ) && + ( signal == OSM_SIGNAL_SWEEP ) ) + { + break; + } + + switch ( p_mgr->state ) + { + case OSM_SM_STATE_IDLE: + switch ( signal ) + { + case OSM_SIGNAL_SWEEP: + /* + * If the osm_sm_state_mgr is in INIT state - signal + * it with a INIT signal to move it to DISCOVERY state. + */ + if( p_mgr->p_subn->sm_state == IB_SMINFO_STATE_INIT ) + osm_sm_state_mgr_process( p_mgr->p_sm_state_mgr, + OSM_SM_SIGNAL_INIT ); + + /* + * If we already have switches, then try a light sweep. + * Otherwise, this is probably our first discovery pass + * or we are connected in loopback. In both cases do a + * heavy sweep. + * Note: If we are connected in loopback we want a heavy + * sweep, since we will not be getting any traps if there is + * a lost connection. + */ + /* if we are in DISCOVERING state - this means it is either in + * initializing or wake up from STANDBY - run the heavy sweep */ + if( cl_qmap_count( &p_mgr->p_subn->sw_guid_tbl ) && + p_mgr->p_subn->sm_state != IB_SMINFO_STATE_DISCOVERING && + p_mgr->p_subn->opt.force_heavy_sweep == FALSE && + p_mgr->p_subn->force_immediate_heavy_sweep == FALSE && + p_mgr->p_subn->force_delayed_heavy_sweep == FALSE && + p_mgr->p_subn->subnet_initialization_error == FALSE ) + { + if( __osm_state_mgr_light_sweep_start( p_mgr ) == IB_SUCCESS ) + { + p_mgr->state = OSM_SM_STATE_SWEEP_LIGHT; + } + } + else + { + /* First of all - if force_immediate_heavy_sweep is TRUE then + * need to unset it */ + p_mgr->p_subn->force_immediate_heavy_sweep = FALSE; + /* If force_delayed_heavy_sweep is TRUE then + * need to unset it */ + p_mgr->p_subn->force_delayed_heavy_sweep = FALSE; + /* If subnet_initialization_error is TRUE then + * need to unset it. */ + p_mgr->p_subn->subnet_initialization_error = FALSE; + + /* rescan configuration updates */ + osm_subn_rescan_conf_file(&p_mgr->p_subn->opt); + + status = __osm_state_mgr_sweep_hop_0( p_mgr ); + if( status == IB_SUCCESS ) + { + p_mgr->state = OSM_SM_STATE_SWEEP_HEAVY_SELF; + } + } + signal = OSM_SIGNAL_NONE; + break; + + case OSM_SIGNAL_IDLE_TIME_PROCESS_REQUEST: + p_mgr->state = OSM_SM_STATE_PROCESS_REQUEST; + signal = OSM_SIGNAL_IDLE_TIME_PROCESS; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_PROCESS_REQUEST: + switch ( signal ) + { + case OSM_SIGNAL_IDLE_TIME_PROCESS: + signal = __process_idle_time_queue_start( p_mgr ); + switch ( signal ) + { + case OSM_SIGNAL_NONE: + p_mgr->state = OSM_SM_STATE_IDLE; + break; + + case OSM_SIGNAL_DONE_PENDING: + p_mgr->state = OSM_SM_STATE_PROCESS_REQUEST_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + case OSM_SIGNAL_DONE: + p_mgr->state = OSM_SM_STATE_PROCESS_REQUEST_DONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_PROCESS_REQUEST_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + p_mgr->state = OSM_SM_STATE_PROCESS_REQUEST_DONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_PROCESS_REQUEST_DONE: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + case OSM_SIGNAL_DONE: + /* CALL the done function */ + __process_idle_time_queue_done( p_mgr ); + + /* + * Set the signal to OSM_SIGNAL_IDLE_TIME_PROCESS + * so that the next element in the queue gets processed + */ + + signal = OSM_SIGNAL_IDLE_TIME_PROCESS; + p_mgr->state = OSM_SM_STATE_PROCESS_REQUEST; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SWEEP_LIGHT: + switch ( signal ) + { + case OSM_SIGNAL_LIGHT_SWEEP_FAIL: + case OSM_SIGNAL_CHANGE_DETECTED: + /* + * Nothing else to do yet except change state. + */ + p_mgr->state = OSM_SM_STATE_SWEEP_LIGHT_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + /* + * No change was detected on the subnet. + * We can return to the idle state. + */ + __osm_state_mgr_light_sweep_done_msg( p_mgr ); + p_mgr->state = OSM_SM_STATE_PROCESS_REQUEST; + signal = OSM_SIGNAL_IDLE_TIME_PROCESS; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SWEEP_LIGHT_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_LIGHT_SWEEP_FAIL: + case OSM_SIGNAL_CHANGE_DETECTED: + /* + * Nothing to do here. One subnet change typcially + * begets another.... But needs to wait for all transactions to + * complete + */ + break; + + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + /* + * A change was detected on the subnet. + * Initiate a heavy sweep. + */ + if( __osm_state_mgr_sweep_hop_0( p_mgr ) == IB_SUCCESS ) + { + p_mgr->state = OSM_SM_STATE_SWEEP_HEAVY_SELF; + } + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + break; + } + signal = OSM_SIGNAL_NONE; + break; + + case OSM_SM_STATE_SWEEP_HEAVY_SELF: + switch ( signal ) + { + case OSM_SIGNAL_CHANGE_DETECTED: + /* + * Nothing to do here. One subnet change typcially + * begets another.... But needs to wait for all transactions + */ + signal = OSM_SIGNAL_NONE; + break; + + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + if( __osm_state_mgr_is_sm_port_down( p_mgr ) == TRUE ) + { + __osm_state_mgr_sm_port_down_msg( p_mgr ); + + /* Run the drop manager - we want to clear all records */ + osm_drop_mgr_process( p_mgr->p_drop_mgr ); + + /* Move to DISCOVERING state */ + osm_sm_state_mgr_process( p_mgr->p_sm_state_mgr, + OSM_SM_SIGNAL_DISCOVER ); + + p_mgr->state = OSM_SM_STATE_PROCESS_REQUEST; + signal = OSM_SIGNAL_IDLE_TIME_PROCESS; + } + else + { + if( __osm_state_mgr_sweep_hop_1( p_mgr ) == IB_SUCCESS ) + { + p_mgr->state = OSM_SM_STATE_SWEEP_HEAVY_SUBNET; + } + signal = OSM_SIGNAL_NONE; + } + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + /* + * There is no 'OSM_SM_STATE_SWEEP_HEAVY_WAIT' state since we + * know that there are outstanding transactions on the wire already... + */ + case OSM_SM_STATE_SWEEP_HEAVY_SUBNET: + switch ( signal ) + { + case OSM_SIGNAL_CHANGE_DETECTED: + /* + * Nothing to do here. One subnet change typically + * begets another.... + */ + signal = OSM_SIGNAL_NONE; + break; + + case OSM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED: + p_mgr->state = OSM_SM_STATE_MASTER_OR_HIGHER_SM_DETECTED; + break; + + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + __osm_state_mgr_sweep_heavy_done_msg( p_mgr ); + + /* If we are MASTER - get the highest remote_sm, and + * see if it is higher than our local sm. If + */ + if( p_mgr->p_subn->sm_state == IB_SMINFO_STATE_MASTER ) + { + p_remote_sm = __osm_state_mgr_get_highest_sm( p_mgr ); + if( p_remote_sm != NULL ) + { + /* need to handover the mastership + * to the remote sm, and move to standby */ + __osm_state_mgr_send_handover( p_mgr, p_remote_sm ); + osm_sm_state_mgr_process( p_mgr->p_sm_state_mgr, + OSM_SM_SIGNAL_HANDOVER_SENT ); + p_mgr->state = OSM_SM_STATE_STANDBY; + signal = OSM_SIGNAL_NONE; + break; + } + else + { + /* We are the highest sm - check to see if there is + * a remote SM that is in master state. */ + p_remote_sm = __osm_state_mgr_exists_other_master_sm( p_mgr ); + if( p_remote_sm != NULL ) + { + /* There is a remote SM that is master. + * need to wait for that SM to relinquish control + * of its portion of the subnet. C14-60. + * Also - need to start polling on that SM. */ + p_mgr->p_sm_state_mgr->p_polling_sm = p_remote_sm; + osm_sm_state_mgr_process( p_mgr->p_sm_state_mgr, + OSM_SM_SIGNAL_WAIT_FOR_HANDOVER ); + p_mgr->state = OSM_SM_STATE_PROCESS_REQUEST; + signal = OSM_SIGNAL_IDLE_TIME_PROCESS; + break; + } + } + } + + /* Need to continue with lid assignment */ + osm_drop_mgr_process( p_mgr->p_drop_mgr ); + + p_mgr->state = OSM_SM_STATE_SET_PKEY; + + /* + * If we are not MASTER already - this means that we are + * in discovery state. call osm_sm_state_mgr with signal + * DISCOVERY_COMPLETED + */ + if( p_mgr->p_subn->sm_state == IB_SMINFO_STATE_DISCOVERING ) + osm_sm_state_mgr_process( p_mgr->p_sm_state_mgr, + OSM_SM_SIGNAL_DISCOVERY_COMPLETED ); + + /* the returned signal might be DONE or DONE_PENDING */ + signal = osm_pkey_mgr_process( p_mgr->p_subn->p_osm ); + + /* the returned signal is always DONE */ + tmp_signal = osm_qos_setup(p_mgr->p_subn->p_osm); + + if (tmp_signal == OSM_SIGNAL_DONE_PENDING) + signal = OSM_SIGNAL_DONE_PENDING; + + /* try to restore SA DB (this should be before lid_mgr + because we may want to disable clients reregistration + when SA DB is restored) */ + osm_sa_db_file_load(p_mgr->p_subn->p_osm); + + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_PKEY: + switch ( signal ) + { + case OSM_SIGNAL_DONE: + p_mgr->state = OSM_SM_STATE_SET_PKEY_DONE; + break; + + case OSM_SIGNAL_DONE_PENDING: + /* + * There are outstanding transactions, so we + * must wait for the wire to clear. + */ + p_mgr->state = OSM_SM_STATE_SET_PKEY_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_PKEY_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + p_mgr->state = OSM_SM_STATE_SET_PKEY_DONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_PKEY_DONE: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + case OSM_SIGNAL_DONE: + p_mgr->state = OSM_SM_STATE_SET_SM_UCAST_LID; + signal = osm_lid_mgr_process_sm( p_mgr->p_lid_mgr ); + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_SM_UCAST_LID: + switch ( signal ) + { + case OSM_SIGNAL_DONE: + p_mgr->state = OSM_SM_STATE_SET_SM_UCAST_LID_DONE; + break; + + case OSM_SIGNAL_DONE_PENDING: + /* + * There are outstanding transactions, so we + * must wait for the wire to clear. + */ + p_mgr->state = OSM_SM_STATE_SET_SM_UCAST_LID_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_SM_UCAST_LID_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + p_mgr->state = OSM_SM_STATE_SET_SM_UCAST_LID_DONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_SM_UCAST_LID_DONE: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + case OSM_SIGNAL_DONE: + /* If we run single step we have already done this */ + if( p_mgr->state_step_mode != OSM_STATE_STEP_TAKE_ONE ) + { + __osm_state_mgr_set_sm_lid_done_msg( p_mgr ); + __osm_state_mgr_notify_lid_change( p_mgr ); + } + + /* Break on single step mode - if not continuous */ + if( p_mgr->state_step_mode == OSM_STATE_STEP_BREAK ) + { + p_mgr->next_stage_signal = signal; + signal = OSM_SIGNAL_NONE; + break; + } + + p_mgr->state = OSM_SM_STATE_SET_SUBNET_UCAST_LIDS; + signal = osm_lid_mgr_process_subnet( p_mgr->p_lid_mgr ); + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + + case OSM_SM_STATE_SET_SUBNET_UCAST_LIDS: + switch ( signal ) + { + case OSM_SIGNAL_DONE: + /* + * The LID Manager is done processing. + * There are no outstanding transactions, so we + * can move on to configuring the forwarding tables. + */ + p_mgr->state = OSM_SM_STATE_SET_SUBNET_UCAST_LIDS_DONE; + break; + + case OSM_SIGNAL_DONE_PENDING: + /* + * The LID Manager is done processing. + * There are outstanding transactions, so we + * must wait for the wire to clear. + */ + p_mgr->state = OSM_SM_STATE_SET_SUBNET_UCAST_LIDS_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + /* + * In this state, the Unicast Manager has completed processing, + * but there are still transactions on the wire. Therefore, + * wait here until the wire clears. + */ + case OSM_SM_STATE_SET_SUBNET_UCAST_LIDS_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + /* + * The LID Manager is done processing. + * There are no outstanding transactions, so we + * can move on to configuring the forwarding tables. + */ + p_mgr->state = OSM_SM_STATE_SET_SUBNET_UCAST_LIDS_DONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_SUBNET_UCAST_LIDS_DONE: + + switch ( signal ) + { + case OSM_SIGNAL_DONE: + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + /* At this point we need to check the consistency of + * the port_lid_tbl under the subnet. There might be + * errors in it if PortInfo Set reqeusts didn't reach + * their destination. */ + __osm_state_mgr_check_tbl_consistency( p_mgr ); + + /* If we run single step we have already done this */ + if( p_mgr->state_step_mode != OSM_STATE_STEP_TAKE_ONE ) + __osm_state_mgr_lid_assign_msg( p_mgr ); + + /* Break on single step mode - just before taking next step */ + if( p_mgr->state_step_mode == OSM_STATE_STEP_BREAK ) + { + p_mgr->next_stage_signal = signal; + signal = OSM_SIGNAL_NONE; + break; + } + + /* + * OK, the wire is clear, so proceed with + * unicast forwarding table configuration. + * First - send trap 64 on newly discovered endports + */ + __osm_state_mgr_report_new_ports( p_mgr ); + + p_mgr->state = OSM_SM_STATE_SET_UCAST_TABLES; + signal = osm_ucast_mgr_process( p_mgr->p_ucast_mgr ); + + /* Break on single step mode */ + if( p_mgr->state_step_mode != OSM_STATE_STEP_CONTINUOUS ) + { + p_mgr->next_stage_signal = signal; + signal = OSM_SIGNAL_NONE; + } + + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_UCAST_TABLES: + switch ( signal ) + { + case OSM_SIGNAL_DONE: + p_mgr->state = OSM_SM_STATE_SET_UCAST_TABLES_DONE; + break; + + case OSM_SIGNAL_DONE_PENDING: + /* + * The Unicast Manager is done processing. + * There are outstanding transactions, so we + * must wait for the wire to clear. + */ + p_mgr->state = OSM_SM_STATE_SET_UCAST_TABLES_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_UCAST_TABLES_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + p_mgr->state = OSM_SM_STATE_SET_UCAST_TABLES_DONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_UCAST_TABLES_DONE: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + case OSM_SIGNAL_DONE: + /* we are done setting all LFTs so clear the ignore existing. + * From now on, as long as we are still master, we want to + * take into account these lfts. */ + p_mgr->p_subn->ignore_existing_lfts = FALSE; + + /* If we run single step we have already done this */ + if( p_mgr->state_step_mode != OSM_STATE_STEP_TAKE_ONE ) + __osm_state_mgr_switch_config_msg( p_mgr ); + + /* Break on single step mode - just before taking next step */ + if( p_mgr->state_step_mode == OSM_STATE_STEP_BREAK ) + { + p_mgr->next_stage_signal = signal; + signal = OSM_SIGNAL_NONE; + break; + } + + if( !p_mgr->p_subn->opt.disable_multicast ) + { + p_mgr->state = OSM_SM_STATE_SET_MCAST_TABLES; + signal = osm_mcast_mgr_process( p_mgr->p_mcast_mgr ); + } + else + { + p_mgr->state = OSM_SM_STATE_SET_LINK_PORTS; + signal = osm_link_mgr_process( p_mgr->p_link_mgr, + IB_LINK_NO_CHANGE ); + } + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_MCAST_TABLES: + switch ( signal ) + { + case OSM_SIGNAL_DONE: + p_mgr->state = OSM_SM_STATE_SET_MCAST_TABLES_DONE; + break; + + case OSM_SIGNAL_DONE_PENDING: + /* + * The Multicast Manager is done processing. + * There are outstanding transactions, so we + * must wait for the wire to clear. + */ + p_mgr->state = OSM_SM_STATE_SET_MCAST_TABLES_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_MCAST_TABLES_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + p_mgr->state = OSM_SM_STATE_SET_MCAST_TABLES_DONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_MCAST_TABLES_DONE: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + case OSM_SIGNAL_DONE: + /* If we run single step we have already done this */ + if( p_mgr->state_step_mode != OSM_STATE_STEP_TAKE_ONE ) + __osm_state_mgr_multicast_config_msg( p_mgr ); + + /* Break on single step mode - just before taking next step */ + if( p_mgr->state_step_mode == OSM_STATE_STEP_BREAK ) + { + p_mgr->next_stage_signal = signal; + signal = OSM_SIGNAL_NONE; + break; + } + + p_mgr->state = OSM_SM_STATE_SET_LINK_PORTS; + signal = osm_link_mgr_process( p_mgr->p_link_mgr, + IB_LINK_NO_CHANGE ); + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + /* + * The LINK_PORTS state is required since we can not count on + * the port state change MADs to succeed. This is an artifact + * of the spec defining state change from state X to state X + * as an error. The hardware then is not required to process + * other parameters provided by the Set(PortInfo) Packet. + */ + case OSM_SM_STATE_SET_LINK_PORTS: + switch ( signal ) + { + case OSM_SIGNAL_DONE: + p_mgr->state = OSM_SM_STATE_SET_LINK_PORTS_DONE; + break; + + case OSM_SIGNAL_DONE_PENDING: + /* + * The Link Manager is done processing. + * There are outstanding transactions, so we + * must wait for the wire to clear. + */ + p_mgr->state = OSM_SM_STATE_SET_LINK_PORTS_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_LINK_PORTS_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + p_mgr->state = OSM_SM_STATE_SET_LINK_PORTS_DONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_LINK_PORTS_DONE: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + case OSM_SIGNAL_DONE: + + __osm_state_mgr_links_ports_msg( p_mgr ); + + p_mgr->state = OSM_SM_STATE_SET_ARMED; + signal = osm_link_mgr_process( p_mgr->p_link_mgr, IB_LINK_ARMED ); + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_ARMED: + switch ( signal ) + { + case OSM_SIGNAL_DONE: + p_mgr->state = OSM_SM_STATE_SET_ARMED_DONE; + break; + + case OSM_SIGNAL_DONE_PENDING: + /* + * The Link Manager is done processing. + * There are outstanding transactions, so we + * must wait for the wire to clear. + */ + p_mgr->state = OSM_SM_STATE_SET_ARMED_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_ARMED_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + p_mgr->state = OSM_SM_STATE_SET_ARMED_DONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_ARMED_DONE: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + case OSM_SIGNAL_DONE: + + /* If we run single step we have already done this */ + if( p_mgr->state_step_mode != OSM_STATE_STEP_TAKE_ONE ) + __osm_state_mgr_links_armed_msg( p_mgr ); + + /* Break on single step mode - just before taking next step */ + if( p_mgr->state_step_mode == OSM_STATE_STEP_BREAK ) + { + p_mgr->next_stage_signal = signal; + signal = OSM_SIGNAL_NONE; + break; + } + + p_mgr->state = OSM_SM_STATE_SET_ACTIVE; + signal = osm_link_mgr_process( p_mgr->p_link_mgr, + IB_LINK_ACTIVE ); + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_ACTIVE: + switch ( signal ) + { + case OSM_SIGNAL_DONE: + /* + * Don't change the signal, just the state. + */ + p_mgr->state = OSM_SM_STATE_SUBNET_UP; + break; + + case OSM_SIGNAL_DONE_PENDING: + /* + * The Link Manager is done processing. + * There are outstanding transactions, so we + * must wait for the wire to clear. + */ + p_mgr->state = OSM_SM_STATE_SET_ACTIVE_WAIT; + signal = OSM_SIGNAL_NONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SET_ACTIVE_WAIT: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + /* + * Don't change the signal, just the state. + */ + p_mgr->state = OSM_SM_STATE_SUBNET_UP; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_SUBNET_UP: + switch ( signal ) + { + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + case OSM_SIGNAL_DONE: + /* + * The sweep completed! + */ + + /* in any case we zero this flag */ + p_mgr->p_subn->coming_out_of_standby = FALSE; + + /* If there were errors - then the subnet is not really up */ + if( p_mgr->p_subn->subnet_initialization_error == TRUE ) + { + __osm_state_mgr_init_errors_msg( p_mgr ); + } + else + { + /* The subnet is up correctly - set the first_time_master_sweep flag + * (if it is on) to FALSE. */ + if( p_mgr->p_subn->first_time_master_sweep == TRUE ) + { + p_mgr->p_subn->first_time_master_sweep = FALSE; + } + + __osm_topology_file_create( p_mgr ); + __osm_state_mgr_report( p_mgr ); + __osm_state_mgr_up_msg( p_mgr ); + + if( osm_log_is_active(p_mgr->p_log, OSM_LOG_VERBOSE) ) + osm_sa_db_file_dump(p_mgr->p_subn->p_osm); + } + p_mgr->state = OSM_SM_STATE_PROCESS_REQUEST; + signal = OSM_SIGNAL_IDLE_TIME_PROCESS; + + /* + * Finally signal the subnet up event + */ + status = cl_event_signal( p_mgr->p_subnet_up_event ); + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_state_mgr_process: ERR 3319: " + "Invalid SM state %u\n", p_mgr->state ); + } + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + break; + + case OSM_SM_STATE_MASTER_OR_HIGHER_SM_DETECTED: + switch ( signal ) + { + case OSM_SIGNAL_CHANGE_DETECTED: + /* + * Nothing to do here. One subnet change typically + * begets another.... + */ + break; + + case OSM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED: + /* + * If we lost once, we might lose again. Nothing to do. + */ + break; + + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + p_mgr->state = OSM_SM_STATE_STANDBY; + /* + * Call the sm_state_mgr with signal + * MASTER_OR_HIGHER_SM_DETECTED_DONE + */ + osm_sm_state_mgr_process( p_mgr->p_sm_state_mgr, + OSM_SM_SIGNAL_MASTER_OR_HIGHER_SM_DETECTED_DONE ); + __osm_state_mgr_standby_msg( p_mgr ); + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + break; + } + signal = OSM_SIGNAL_NONE; + break; + + case OSM_SM_STATE_STANDBY: + switch ( signal ) + { + case OSM_SIGNAL_EXIT_STBY: + /* + * Need to force re-write of sm_base_lid to all ports + * to do that we want all the ports to be considered + * foriegn + */ + signal = OSM_SIGNAL_SWEEP; + __osm_state_mgr_clean_known_lids( p_mgr ); + p_mgr->state = OSM_SM_STATE_IDLE; + break; + + case OSM_SIGNAL_NO_PENDING_TRANSACTIONS: + /* + * Nothing to do here - need to stay at this state + */ + signal = OSM_SIGNAL_NONE; + break; + + default: + __osm_state_mgr_signal_error( p_mgr, signal ); + signal = OSM_SIGNAL_NONE; + break; + } + /* stay with the same signal - so we can start the sweep */ + break; + + default: + CL_ASSERT( FALSE ); + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_state_mgr_process: ERR 3320: " + "Invalid SM state %u\n", p_mgr->state ); + p_mgr->state = OSM_SM_STATE_IDLE; + signal = OSM_SIGNAL_NONE; + break; + } + + /* if we got a signal to force immediate heavy sweep in the middle of the sweep - + * try another sweep. */ + if( ( p_mgr->p_subn->force_immediate_heavy_sweep ) && + ( p_mgr->state == OSM_SM_STATE_IDLE ) ) + { + signal = OSM_SIGNAL_SWEEP; + } + /* if we got errors during the initialization in the middle of the sweep - + * try another sweep. */ + if( ( p_mgr->p_subn->subnet_initialization_error ) && + ( p_mgr->state == OSM_SM_STATE_IDLE ) ) + { + signal = OSM_SIGNAL_SWEEP; + } + + /* + * for single step mode - some stages need to break only + * after evaluating a single step. + * For those we track the fact we have already performed + * a single loop + */ + if( p_mgr->state_step_mode == OSM_STATE_STEP_TAKE_ONE ) + p_mgr->state_step_mode = OSM_STATE_STEP_BREAK; + } + + cl_spinlock_release( &p_mgr->state_lock ); + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_state_mgr_process_idle( + IN osm_state_mgr_t * const p_mgr, + IN osm_pfn_start_t pfn_start, + IN osm_pfn_done_t pfn_done, + void *context1, + void *context2 ) +{ + osm_idle_item_t *p_idle_item; + + OSM_LOG_ENTER( p_mgr->p_log, osm_state_mgr_process_idle ); + + p_idle_item = malloc( sizeof( osm_idle_item_t ) ); + if( p_idle_item == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_state_mgr_process_idle: ERR 3321: " + "insufficient memory\n" ); + return IB_ERROR; + } + + memset( p_idle_item, 0, sizeof( osm_idle_item_t ) ); + p_idle_item->pfn_start = pfn_start; + p_idle_item->pfn_done = pfn_done; + p_idle_item->context1 = context1; + p_idle_item->context2 = context2; + + cl_spinlock_acquire( &p_mgr->idle_lock ); + cl_qlist_insert_tail( &p_mgr->idle_time_list, &p_idle_item->list_item ); + cl_spinlock_release( &p_mgr->idle_lock ); + + osm_state_mgr_process( p_mgr, OSM_SIGNAL_IDLE_TIME_PROCESS_REQUEST ); + + OSM_LOG_EXIT( p_mgr->p_log ); + + return IB_SUCCESS; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_state_mgr_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_state_mgr_ctrl.c new file mode 100644 index 00000000..21aed85f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_state_mgr_ctrl.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_state_mgr_ctrl_t. + * This object represents the State Manager Controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_state_mgr_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_state_mgr_process( ((osm_state_mgr_ctrl_t*)context)->p_mgr, + (osm_signal_t)(p_data) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_state_mgr_ctrl_construct( + IN osm_state_mgr_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_state_mgr_ctrl_destroy( + IN osm_state_mgr_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_state_mgr_ctrl_init( + IN osm_state_mgr_ctrl_t* const p_ctrl, + IN osm_state_mgr_t* const p_mgr, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_state_mgr_ctrl_init ); + + osm_state_mgr_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + + p_ctrl->p_mgr = p_mgr; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_NO_SMPS_OUTSTANDING, + __osm_state_mgr_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_state_mgr_ctrl_init: ERR 3401: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_subnet.c b/branches/Ndi/ulp/opensm/user/opensm/osm_subnet.c new file mode 100644 index 00000000..5d3cb3e1 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_subnet.c @@ -0,0 +1,1272 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_subn_t. + * This object represents an IBA subnet. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.9 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_subn_construct( + IN osm_subn_t* const p_subn ) +{ + memset( p_subn, 0, sizeof(*p_subn) ); + cl_ptr_vector_construct( &p_subn->node_lid_tbl ); + cl_ptr_vector_construct( &p_subn->port_lid_tbl ); + cl_qmap_init( &p_subn->sw_guid_tbl ); + cl_qmap_init( &p_subn->node_guid_tbl ); + cl_qmap_init( &p_subn->port_guid_tbl ); + cl_qmap_init( &p_subn->sm_guid_tbl ); + cl_qlist_init( &p_subn->sa_sr_list ); + cl_qlist_init( &p_subn->sa_infr_list ); + cl_qmap_init( &p_subn->rtr_guid_tbl ); + cl_qmap_init( &p_subn->prtn_pkey_tbl ); + cl_qmap_init( &p_subn->mgrp_mlid_tbl ); + cl_list_construct( &p_subn->new_ports_list ); + cl_list_init( &p_subn->new_ports_list, 10 ); + cl_list_construct( &p_subn->light_sweep_physp_list ); + cl_list_init( &p_subn->light_sweep_physp_list, 5 ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_subn_destroy( + IN osm_subn_t* const p_subn ) +{ + osm_node_t *p_node, *p_next_node; + osm_port_t *p_port, *p_next_port; + osm_switch_t *p_sw, *p_next_sw; + osm_remote_sm_t *p_rsm, *p_next_rsm; + osm_prtn_t *p_prtn, *p_next_prtn; + osm_mgrp_t *p_mgrp, *p_next_mgrp; + osm_infr_t *p_infr, *p_next_infr; + + /* it might be a good idea to de-allocate all known objects */ + p_next_node = (osm_node_t*)cl_qmap_head( &p_subn->node_guid_tbl ); + while( p_next_node != (osm_node_t*)cl_qmap_end( &p_subn->node_guid_tbl ) ) + { + p_node = p_next_node; + p_next_node = (osm_node_t*)cl_qmap_next( &p_node->map_item ); + osm_node_delete( &p_node ); + } + + cl_ptr_vector_destroy( &p_subn->node_lid_tbl ); + + p_next_port = (osm_port_t*)cl_qmap_head( &p_subn->port_guid_tbl ); + while( p_next_port != (osm_port_t*)cl_qmap_end( &p_subn->port_guid_tbl ) ) + { + p_port = p_next_port; + p_next_port = (osm_port_t*)cl_qmap_next( &p_port->map_item ); + osm_port_delete( &p_port ); + } + + p_next_sw = (osm_switch_t*)cl_qmap_head( &p_subn->sw_guid_tbl ); + while( p_next_sw != (osm_switch_t*)cl_qmap_end( &p_subn->sw_guid_tbl ) ) + { + p_sw = p_next_sw; + p_next_sw = (osm_switch_t*)cl_qmap_next( &p_sw->map_item ); + osm_switch_delete( &p_sw ); + } + + p_next_rsm = (osm_remote_sm_t*)cl_qmap_head( &p_subn->sm_guid_tbl ); + while( p_next_rsm != (osm_remote_sm_t*)cl_qmap_end( &p_subn->sm_guid_tbl ) ) + { + p_rsm = p_next_rsm; + p_next_rsm = (osm_remote_sm_t*)cl_qmap_next( &p_rsm->map_item ); + free( p_rsm ); + } + + p_next_prtn = (osm_prtn_t*)cl_qmap_head( &p_subn->prtn_pkey_tbl ); + while( p_next_prtn != (osm_prtn_t*)cl_qmap_end( &p_subn->prtn_pkey_tbl ) ) + { + p_prtn = p_next_prtn; + p_next_prtn = (osm_prtn_t*)cl_qmap_next( &p_prtn->map_item ); + osm_prtn_delete( &p_prtn ); + } + + p_next_mgrp = (osm_mgrp_t*)cl_qmap_head( &p_subn->mgrp_mlid_tbl ); + while( p_next_mgrp != (osm_mgrp_t*)cl_qmap_end( &p_subn->mgrp_mlid_tbl ) ) + { + p_mgrp = p_next_mgrp; + p_next_mgrp = (osm_mgrp_t*)cl_qmap_next( &p_mgrp->map_item ); + osm_mgrp_destroy( p_mgrp ); + } + + p_next_infr = (osm_infr_t*)cl_qlist_head( &p_subn->sa_infr_list ); + while (p_next_infr != (osm_infr_t*)cl_qlist_end( &p_subn->sa_infr_list ) ) + { + p_infr = p_next_infr; + p_next_infr = (osm_infr_t*)cl_qlist_next( &p_infr->list_item ); + osm_infr_destroy( p_infr ); + } + + cl_list_remove_all( &p_subn->new_ports_list ); + cl_list_destroy( &p_subn->new_ports_list ); + + cl_list_remove_all( &p_subn->light_sweep_physp_list ); + cl_list_destroy( &p_subn->light_sweep_physp_list ); + + cl_ptr_vector_destroy( &p_subn->port_lid_tbl ); + cl_map_remove_all(&(p_subn->opt.port_prof_ignore_guids)); + cl_map_destroy(&(p_subn->opt.port_prof_ignore_guids)); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_subn_init( + IN osm_subn_t* const p_subn, + IN osm_opensm_t * const p_osm, + IN const osm_subn_opt_t* const p_opt ) +{ + cl_status_t status; + + p_subn->p_osm = p_osm; + + status = cl_ptr_vector_init( &p_subn->node_lid_tbl, + OSM_SUBNET_VECTOR_MIN_SIZE, + OSM_SUBNET_VECTOR_GROW_SIZE ); + if( status != CL_SUCCESS ) + return( status ); + + status = cl_ptr_vector_init( &p_subn->port_lid_tbl, + OSM_SUBNET_VECTOR_MIN_SIZE, + OSM_SUBNET_VECTOR_GROW_SIZE ); + if( status != CL_SUCCESS ) + return( status ); + + status = cl_ptr_vector_set_capacity( &p_subn->node_lid_tbl, + OSM_SUBNET_VECTOR_CAPACITY ); + if( status != CL_SUCCESS ) + return( status ); + + status = cl_ptr_vector_set_capacity( &p_subn->port_lid_tbl, + OSM_SUBNET_VECTOR_CAPACITY ); + if( status != CL_SUCCESS ) + return( status ); + + /* + LID zero is not valid. NULL out this entry for the + convenience of other code. + */ + cl_ptr_vector_set( &p_subn->node_lid_tbl, 0, NULL ); + cl_ptr_vector_set( &p_subn->port_lid_tbl, 0, NULL ); + + p_subn->opt = *p_opt; + p_subn->max_unicast_lid_ho = IB_LID_UCAST_END_HO; + p_subn->max_multicast_lid_ho = IB_LID_MCAST_END_HO; + p_subn->min_ca_mtu = IB_MAX_MTU; + p_subn->min_ca_rate = IB_MAX_RATE; + + /* note that insert and remove are part of the port_profile thing */ + cl_map_init(&(p_subn->opt.port_prof_ignore_guids), 10); + + /* ignore_existing_lfts follows reassign_lfts on first sweep */ + p_subn->ignore_existing_lfts = p_subn->opt.reassign_lfts; + + /* we assume master by default - so we only need to set it true if STANDBY */ + p_subn->coming_out_of_standby = FALSE; + + return( IB_SUCCESS ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_get_gid_by_mad_addr( + IN osm_log_t* p_log, + IN const osm_subn_t *p_subn, + IN const osm_mad_addr_t *p_mad_addr, + OUT ib_gid_t *p_gid) +{ + const cl_ptr_vector_t* p_tbl; + const osm_port_t* p_port = NULL; + const osm_physp_t* p_physp = NULL; + + if ( p_gid == NULL ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_get_gid_by_mad_addr: ERR 7505: " + "Provided output GID is NULL\n"); + return(IB_INVALID_PARAMETER); + } + + /* Find the port gid of the request in the subnet */ + p_tbl = &p_subn->port_lid_tbl; + + CL_ASSERT( cl_ptr_vector_get_size(p_tbl) < 0x10000 ); + + if ((uint16_t)cl_ptr_vector_get_size(p_tbl) > + cl_ntoh16(p_mad_addr->dest_lid)) + { + p_port = cl_ptr_vector_get( p_tbl, cl_ntoh16(p_mad_addr->dest_lid) ); + if ( p_port == NULL ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "osm_get_gid_by_mad_addr: " + "Did not find any port with LID: 0x%X\n", + cl_ntoh16(p_mad_addr->dest_lid) + ); + return(IB_INVALID_PARAMETER); + } + p_physp = osm_port_get_phys_ptr( p_port, p_port->default_port_num); + p_gid->unicast.interface_id = p_physp->port_guid; + p_gid->unicast.prefix = p_subn->opt.subnet_prefix; + } + else + { + /* The dest_lid is not in the subnet table - this is an error */ + osm_log( p_log, OSM_LOG_ERROR, + "osm_get_gid_by_mad_addr: ERR 7501: " + "LID is out of range: 0x%X\n", + cl_ntoh16(p_mad_addr->dest_lid) + ); + return(IB_INVALID_PARAMETER); + } + + return( IB_SUCCESS ); +} + +/********************************************************************** + **********************************************************************/ +osm_physp_t* +osm_get_physp_by_mad_addr( + IN osm_log_t* p_log, + IN const osm_subn_t *p_subn, + IN osm_mad_addr_t *p_mad_addr ) +{ + const cl_ptr_vector_t* p_port_lid_tbl; + osm_port_t* p_port = NULL; + osm_physp_t* p_physp = NULL; + + /* Find the port gid of the request in the subnet */ + p_port_lid_tbl = &p_subn->port_lid_tbl; + + CL_ASSERT( cl_ptr_vector_get_size(p_port_lid_tbl) < 0x10000 ); + + if ((uint16_t)cl_ptr_vector_get_size(p_port_lid_tbl) > + cl_ntoh16(p_mad_addr->dest_lid)) + { + p_port = cl_ptr_vector_get( p_port_lid_tbl, cl_ntoh16(p_mad_addr->dest_lid) ); + if (p_port == NULL) + { + /* The port is not in the port_lid table - this is an error */ + osm_log( p_log, OSM_LOG_ERROR, + "osm_get_physp_by_mad_addr: ERR 7502: " + "Cannot locate port object by lid: 0x%X\n", + cl_ntoh16(p_mad_addr->dest_lid) + ); + + goto Exit; + } + p_physp = osm_port_get_phys_ptr( p_port, p_port->default_port_num); + } + else + { + /* The dest_lid is not in the subnet table - this is an error */ + osm_log( p_log, OSM_LOG_ERROR, + "osm_get_physp_by_mad_addr: ERR 7503: " + "Lid is out of range: 0x%X\n", + cl_ntoh16(p_mad_addr->dest_lid) + ); + } + + Exit: + return p_physp; +} + +/********************************************************************** + **********************************************************************/ +osm_port_t* +osm_get_port_by_mad_addr( + IN osm_log_t* p_log, + IN const osm_subn_t *p_subn, + IN osm_mad_addr_t *p_mad_addr ) +{ + const cl_ptr_vector_t* p_port_lid_tbl; + osm_port_t* p_port = NULL; + + /* Find the port gid of the request in the subnet */ + p_port_lid_tbl = &p_subn->port_lid_tbl; + + CL_ASSERT( cl_ptr_vector_get_size(p_port_lid_tbl) < 0x10000 ); + + if ((uint16_t)cl_ptr_vector_get_size(p_port_lid_tbl) > + cl_ntoh16(p_mad_addr->dest_lid)) + { + p_port = + cl_ptr_vector_get( p_port_lid_tbl, cl_ntoh16(p_mad_addr->dest_lid) ); + } + else + { + /* The dest_lid is not in the subnet table - this is an error */ + osm_log( p_log, OSM_LOG_ERROR, + "osm_get_port_by_mad_addr: ERR 7504: " + "Lid is out of range: 0x%X\n", + cl_ntoh16(p_mad_addr->dest_lid) + ); + } + + return p_port; +} + +/********************************************************************** + **********************************************************************/ +osm_switch_t * +osm_get_switch_by_guid( + IN const osm_subn_t *p_subn, + IN uint64_t guid) +{ + osm_switch_t *p_switch; + + p_switch = (osm_switch_t*)cl_qmap_get( &(p_subn->sw_guid_tbl), guid ); + if( p_switch == (osm_switch_t*)cl_qmap_end( &(p_subn->sw_guid_tbl)) ) + p_switch = NULL; + return p_switch; +} + +/********************************************************************** + **********************************************************************/ +osm_node_t * +osm_get_node_by_guid( + IN osm_subn_t const *p_subn, + IN uint64_t guid) +{ + osm_node_t *p_node; + + p_node = (osm_node_t*)cl_qmap_get( &(p_subn->node_guid_tbl), guid ); + if( p_node == (osm_node_t*)cl_qmap_end( &(p_subn->node_guid_tbl)) ) + p_node = NULL; + return p_node; +} + +/********************************************************************** + **********************************************************************/ +osm_port_t * +osm_get_port_by_guid( + IN osm_subn_t const *p_subn, + IN uint64_t guid) +{ + osm_port_t *p_port; + + p_port = (osm_port_t*)cl_qmap_get( &(p_subn->port_guid_tbl), guid ); + if( p_port == (osm_port_t*)cl_qmap_end( &(p_subn->port_guid_tbl)) ) + p_port = NULL; + return p_port; +} + +/********************************************************************** + **********************************************************************/ +static void +subn_set_default_qos_options( + IN osm_qos_options_t *opt) +{ + opt->max_vls = 15; + opt->high_limit = 0; + opt->vlarb_high = "0:4,1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0,10:0,11:0,12:0,13:0,14:0"; + opt->vlarb_low = "0:0,1:4,2:4,3:4,4:4,5:4,6:4,7:4,8:4,9:4,10:4,11:4,12:4,13:4,14:4"; + opt->sl2vl = "0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,7"; +} + +/********************************************************************** + **********************************************************************/ +void +osm_subn_set_default_opt( + IN osm_subn_opt_t* const p_opt ) +{ + memset(p_opt, 0, sizeof(osm_subn_opt_t)); + p_opt->guid = 0; + p_opt->m_key = OSM_DEFAULT_M_KEY; + p_opt->sm_key = OSM_DEFAULT_SM_KEY; + p_opt->subnet_prefix = IB_DEFAULT_SUBNET_PREFIX; + p_opt->m_key_lease_period = 0; + p_opt->sweep_interval = OSM_DEFAULT_SWEEP_INTERVAL_SECS; + p_opt->max_wire_smps = OSM_DEFAULT_SMP_MAX_ON_WIRE; + p_opt->console = FALSE; + p_opt->transaction_timeout = OSM_DEFAULT_TRANS_TIMEOUT_MILLISEC; + /* by default we will consider waiting for 50x transaction timeout normal */ + p_opt->max_msg_fifo_timeout = 50*OSM_DEFAULT_TRANS_TIMEOUT_MILLISEC; + p_opt->sm_priority = OSM_DEFAULT_SM_PRIORITY; + p_opt->lmc = OSM_DEFAULT_LMC; + p_opt->lmc_esp0 = FALSE; + p_opt->max_op_vls = OSM_DEFAULT_MAX_OP_VLS; + p_opt->force_link_speed = 0; + p_opt->reassign_lids = FALSE; + p_opt->reassign_lfts = TRUE; + p_opt->ignore_other_sm = FALSE; + p_opt->single_thread = FALSE; + p_opt->no_multicast_option = FALSE; + p_opt->disable_multicast = FALSE; + p_opt->force_log_flush = FALSE; + p_opt->subnet_timeout = OSM_DEFAULT_SUBNET_TIMEOUT; + p_opt->packet_life_time = OSM_DEFAULT_SWITCH_PACKET_LIFE; + p_opt->vl_stall_count = OSM_DEFAULT_VL_STALL_COUNT; + p_opt->leaf_vl_stall_count = OSM_DEFAULT_LEAF_VL_STALL_COUNT; + p_opt->head_of_queue_lifetime = OSM_DEFAULT_HEAD_OF_QUEUE_LIFE; + p_opt->leaf_head_of_queue_lifetime = OSM_DEFAULT_LEAF_HEAD_OF_QUEUE_LIFE; + p_opt->local_phy_errors_threshold = OSM_DEFAULT_ERROR_THRESHOLD; + p_opt->overrun_errors_threshold = OSM_DEFAULT_ERROR_THRESHOLD; + p_opt->sminfo_polling_timeout = OSM_SM_DEFAULT_POLLING_TIMEOUT_MILLISECS; + p_opt->polling_retry_number = OSM_SM_DEFAULT_POLLING_RETRY_NUMBER; + p_opt->force_heavy_sweep = FALSE; + p_opt->log_flags = 0; + p_opt->honor_guid2lid_file = FALSE; + + p_opt->dump_files_dir = getenv("OSM_TMP_DIR"); + if (!p_opt->dump_files_dir || !(*p_opt->dump_files_dir)) + p_opt->dump_files_dir = OSM_DEFAULT_TMP_DIR; + + p_opt->log_file = OSM_DEFAULT_LOG_FILE; + p_opt->log_max_size = 0; + p_opt->partition_config_file = OSM_DEFAULT_PARTITION_CONFIG_FILE; + p_opt->no_partition_enforcement = FALSE; + p_opt->no_qos = TRUE; + p_opt->accum_log_file = TRUE; + p_opt->port_profile_switch_nodes = FALSE; + p_opt->pfn_ui_pre_lid_assign = NULL; + p_opt->ui_pre_lid_assign_ctx = NULL; + p_opt->pfn_ui_mcast_fdb_assign = NULL; + p_opt->ui_mcast_fdb_assign_ctx = NULL; + p_opt->sweep_on_trap = TRUE; + p_opt->testability_mode = OSM_TEST_MODE_NONE; + p_opt->routing_engine_name = NULL; + p_opt->lid_matrix_dump_file = NULL; + p_opt->ucast_dump_file = NULL; + p_opt->updn_guid_file = NULL; + p_opt->sa_db_file = NULL; + p_opt->exit_on_fatal = TRUE; + p_opt->enable_quirks = FALSE; + p_opt->no_clients_rereg = FALSE; + subn_set_default_qos_options(&p_opt->qos_options); + subn_set_default_qos_options(&p_opt->qos_ca_options); + subn_set_default_qos_options(&p_opt->qos_sw0_options); + subn_set_default_qos_options(&p_opt->qos_swe_options); + subn_set_default_qos_options(&p_opt->qos_rtr_options); +} + +/********************************************************************** + **********************************************************************/ +static inline void +__osm_subn_opts_unpack_net64( + IN char *p_req_key, + IN char *p_key, + IN char *p_val_str, + IN uint64_t *p_val) +{ + uint64_t val; + if (!strcmp(p_req_key, p_key)) + { +#if __WORDSIZE == 64 + val = strtoul(p_val_str, NULL, 0); +#else + val = strtoull(p_val_str, NULL, 0); +#endif + if (cl_hton64(val) != *p_val) + { + char buff[128]; + sprintf(buff, " Using Cached Option:%s = 0x%016" PRIx64 "\n", + p_key, val); + printf(buff); + cl_log_event("OpenSM", LOG_INFO, buff, NULL, 0); + *p_val = cl_ntoh64(val); + } + } +} + +/********************************************************************** + **********************************************************************/ +static inline void +__osm_subn_opts_unpack_uint32( + IN char *p_req_key, + IN char *p_key, + IN char *p_val_str, + IN uint32_t *p_val) +{ + uint32_t val; + if (!strcmp(p_req_key, p_key)) + { + val = strtoul(p_val_str, NULL, 0); + if (val != *p_val) + { + char buff[128]; + sprintf(buff, " Using Cached Option:%s = %u\n", + p_key, val); + printf(buff); + cl_log_event("OpenSM", LOG_INFO, buff, NULL, 0); + *p_val = val; + } + } +} + +/********************************************************************** + **********************************************************************/ +static inline void +__osm_subn_opts_unpack_net16( + IN char *p_req_key, + IN char *p_key, + IN char *p_val_str, + IN uint16_t *p_val) +{ + if (!strcmp(p_req_key, p_key)) + { + uint32_t val; + val = strtoul(p_val_str, NULL, 0); + CL_ASSERT( val < 0x10000 ); + if (cl_hton32(val) != *p_val) + { + char buff[128]; + sprintf(buff, " Using Cached Option:%s = 0x%04x\n", + p_key, val); + printf(buff); + cl_log_event("OpenSM", LOG_INFO, buff, NULL, 0); + *p_val = cl_hton16((uint16_t)val); + } + } +} + +/********************************************************************** + **********************************************************************/ +static inline void +__osm_subn_opts_unpack_uint8( + IN char *p_req_key, + IN char *p_key, + IN char *p_val_str, + IN uint8_t *p_val) +{ + if (!strcmp(p_req_key, p_key)) + { + uint32_t val; + val = strtoul(p_val_str, NULL, 0); + CL_ASSERT( val < 0x100 ); + if (val != *p_val) + { + char buff[128]; + sprintf(buff, " Using Cached Option:%s = %u\n", + p_key, val); + printf(buff); + cl_log_event("OpenSM", LOG_INFO, buff, NULL, 0); + *p_val = (uint8_t)val; + } + } +} + +/********************************************************************** + **********************************************************************/ +static inline void +__osm_subn_opts_unpack_boolean( + IN char *p_req_key, + IN char *p_key, + IN char *p_val_str, + IN boolean_t *p_val) +{ + if (!strcmp(p_req_key, p_key) && p_val_str) + { + boolean_t val; + if (strcmp("TRUE", p_val_str)) + val = FALSE; + else + val = TRUE; + + if (val != *p_val) { + char buff[128]; + sprintf(buff, " Using Cached Option:%s = %s\n", + p_key, p_val_str); + printf(buff); + cl_log_event("OpenSM", LOG_INFO, buff, NULL, 0); + *p_val = val; + } + } +} + +/********************************************************************** + **********************************************************************/ +static inline void +__osm_subn_opts_unpack_charp( + IN char *p_req_key, + IN char *p_key, + IN char *p_val_str, + IN char **p_val) +{ + if (!strcmp(p_req_key, p_key) && p_val_str) + { + if ((*p_val == NULL) || strcmp(p_val_str, *p_val)) + { + char buff[128]; + sprintf(buff, " Using Cached Option:%s = %s\n", + p_key, p_val_str); + printf(buff); + cl_log_event("OpenSM", LOG_INFO, buff, NULL, 0); + + /* + Ignore the possible memory leak here; + the pointer may be to a static default. + */ + *p_val = (char *)malloc( strlen(p_val_str) +1 ); + strcpy( *p_val, p_val_str); + } + } +} + +/********************************************************************** + **********************************************************************/ +static void +subn_parse_qos_options( + IN const char *prefix, + IN char *p_key, + IN char *p_val_str, + IN osm_qos_options_t *opt) +{ + char name[256]; + + snprintf(name, sizeof(name), "%s_max_vls", prefix); + __osm_subn_opts_unpack_uint32(name, p_key, p_val_str, &opt->max_vls); + snprintf(name, sizeof(name), "%s_high_limit", prefix); + __osm_subn_opts_unpack_uint32(name, p_key, p_val_str, &opt->high_limit); + snprintf(name, sizeof(name), "%s_vlarb_high", prefix); + __osm_subn_opts_unpack_charp(name, p_key, p_val_str, &opt->vlarb_high); + snprintf(name, sizeof(name), "%s_vlarb_low", prefix); + __osm_subn_opts_unpack_charp(name, p_key, p_val_str, &opt->vlarb_low); + snprintf(name, sizeof(name), "%s_sl2vl", prefix); + __osm_subn_opts_unpack_charp(name, p_key, p_val_str, &opt->sl2vl); +} + +static int +subn_dump_qos_options( + FILE *file, + const char *set_name, + const char *prefix, + osm_qos_options_t *opt) +{ + return fprintf(file, "# %s\n" + "%s_max_vls %u\n" + "%s_high_limit %u\n" + "%s_vlarb_high %s\n" + "%s_vlarb_low %s\n" + "%s_sl2vl %s\n", + set_name, + prefix, opt->max_vls, + prefix, opt->high_limit, + prefix, opt->vlarb_high, + prefix, opt->vlarb_low, + prefix, opt->sl2vl); +} + +/********************************************************************** + **********************************************************************/ +void +osm_subn_rescan_conf_file( + IN osm_subn_opt_t* const p_opts ) +{ + char *p_cache_dir = getenv("OSM_CACHE_DIR"); + char file_name[256]; + FILE *opts_file; + char line[1024]; + char *p_key, *p_val ,*p_last; + + /* try to open the options file from the cache dir */ + if (!p_cache_dir || !(*p_cache_dir)) + p_cache_dir = OSM_DEFAULT_CACHE_DIR; + + strcpy(file_name, p_cache_dir); + strcat(file_name, "opensm.opts"); + + opts_file = fopen(file_name, "r"); + if (!opts_file) + return; + + while (fgets(line, 1023, opts_file) != NULL) + { + /* get the first token */ + p_key = strtok_r(line, " \t\n", &p_last); + if (p_key) + { + p_val = strtok_r(NULL, " \t\n", &p_last); + + subn_parse_qos_options("qos", + p_key, p_val, &p_opts->qos_options); + + subn_parse_qos_options("qos_ca", + p_key, p_val, &p_opts->qos_ca_options); + + subn_parse_qos_options("qos_sw0", + p_key, p_val, &p_opts->qos_sw0_options); + + subn_parse_qos_options("qos_swe", + p_key, p_val, &p_opts->qos_swe_options); + + subn_parse_qos_options("qos_rtr", + p_key, p_val, &p_opts->qos_rtr_options); + + } + } + fclose(opts_file); +} + +/********************************************************************** + **********************************************************************/ +void +osm_subn_parse_conf_file( + IN osm_subn_opt_t* const p_opts ) +{ + char *p_cache_dir = getenv("OSM_CACHE_DIR"); + char file_name[256]; + FILE *opts_file; + char line[1024]; + char *p_key, *p_val ,*p_last; + + /* try to open the options file from the cache dir */ + if (!p_cache_dir || !(*p_cache_dir)) + p_cache_dir = OSM_DEFAULT_CACHE_DIR; + + strcpy(file_name, p_cache_dir); + strcat(file_name, "opensm.opts"); + + opts_file = fopen(file_name, "r"); + if (!opts_file) return; + + while (fgets(line, 1023, opts_file) != NULL) + { + /* get the first token */ + p_key = strtok_r(line, " \t\n", &p_last); + if (p_key) + { + p_val = strtok_r(NULL, " \t\n", &p_last); + + __osm_subn_opts_unpack_net64( + "guid", p_key, p_val, &p_opts->guid); + + __osm_subn_opts_unpack_net64( + "m_key", p_key, p_val, &p_opts->m_key); + + __osm_subn_opts_unpack_net64( + "sm_key", p_key, p_val, &p_opts->sm_key); + + __osm_subn_opts_unpack_net64( + "subnet_prefix", + p_key, p_val, &p_opts->subnet_prefix); + + __osm_subn_opts_unpack_net16( + "m_key_lease_period", + p_key, p_val, &p_opts->m_key_lease_period); + + __osm_subn_opts_unpack_uint32( + "sweep_interval", + p_key, p_val, &p_opts->sweep_interval); + + __osm_subn_opts_unpack_uint32( + "max_wire_smps", + p_key, p_val, &p_opts->max_wire_smps); + + __osm_subn_opts_unpack_uint32( + "transaction_timeout", + p_key, p_val, &p_opts->transaction_timeout); + + __osm_subn_opts_unpack_uint32( + "max_msg_fifo_timeout", + p_key, p_val, &p_opts->max_msg_fifo_timeout); + + __osm_subn_opts_unpack_uint8( + "sm_priority", + p_key, p_val, &p_opts->sm_priority); + + __osm_subn_opts_unpack_uint8( + "lmc", + p_key, p_val, &p_opts->lmc); + + __osm_subn_opts_unpack_boolean( + "lmc_esp0", + p_key, p_val, &p_opts->lmc_esp0); + + __osm_subn_opts_unpack_uint8( + "max_op_vls", + p_key, p_val, &p_opts->max_op_vls); + + __osm_subn_opts_unpack_uint8( + "force_link_speed", + p_key, p_val, &p_opts->force_link_speed); + + __osm_subn_opts_unpack_boolean( + "reassign_lids", + p_key, p_val, &p_opts->reassign_lids); + + __osm_subn_opts_unpack_boolean( + "reassign_lfts", + p_key, p_val, &p_opts->reassign_lfts); + + __osm_subn_opts_unpack_boolean( + "ignore_other_sm", + p_key, p_val, &p_opts->ignore_other_sm); + + __osm_subn_opts_unpack_boolean( + "single_thread", + p_key, p_val, &p_opts->single_thread); + + __osm_subn_opts_unpack_boolean( + "no_multicast_option", + p_key, p_val, &p_opts->no_multicast_option); + + __osm_subn_opts_unpack_boolean( + "disable_multicast", + p_key, p_val, &p_opts->disable_multicast); + + __osm_subn_opts_unpack_boolean( + "force_log_flush", + p_key, p_val, &p_opts->force_log_flush); + + __osm_subn_opts_unpack_uint8( + "subnet_timeout", + p_key, p_val, &p_opts->subnet_timeout); + + __osm_subn_opts_unpack_uint8( + "packet_life_time", + p_key, p_val, &p_opts->packet_life_time); + + __osm_subn_opts_unpack_uint8( + "vl_stall_count", + p_key, p_val, &p_opts->vl_stall_count); + + __osm_subn_opts_unpack_uint8( + "leaf_vl_stall_count", + p_key, p_val, &p_opts->leaf_vl_stall_count); + + __osm_subn_opts_unpack_uint8( + "head_of_queue_lifetime", + p_key, p_val, &p_opts->head_of_queue_lifetime); + + __osm_subn_opts_unpack_uint8( + "leaf_head_of_queue_lifetime", + p_key, p_val, &p_opts->leaf_head_of_queue_lifetime); + + __osm_subn_opts_unpack_uint8( + "local_phy_errors_threshold", + p_key, p_val, &p_opts->local_phy_errors_threshold); + + __osm_subn_opts_unpack_uint8( + "overrun_errors_threshold", + p_key, p_val, &p_opts->overrun_errors_threshold); + + __osm_subn_opts_unpack_uint32( + "sminfo_polling_timeout", + p_key, p_val, &p_opts->sminfo_polling_timeout); + + __osm_subn_opts_unpack_uint32( + "polling_retry_number", + p_key, p_val, &p_opts->polling_retry_number); + + __osm_subn_opts_unpack_boolean( + "force_heavy_sweep", + p_key, p_val, &p_opts->force_heavy_sweep); + + __osm_subn_opts_unpack_uint8( + "log_flags", + p_key, p_val, &p_opts->log_flags); + + __osm_subn_opts_unpack_boolean( + "port_profile_switch_nodes", + p_key, p_val, &p_opts->port_profile_switch_nodes); + + __osm_subn_opts_unpack_boolean( + "sweep_on_trap", + p_key, p_val, &p_opts->sweep_on_trap); + + __osm_subn_opts_unpack_charp( + "routing_engine", + p_key, p_val, &p_opts->routing_engine_name); + + __osm_subn_opts_unpack_charp( + "log_file", p_key, p_val, &p_opts->log_file); + + __osm_subn_opts_unpack_uint32( + "log_max_size", + p_key, p_val, (uint32_t *)&p_opts->log_max_size); + + __osm_subn_opts_unpack_charp( + "partition_config_file", + p_key, p_val, &p_opts->partition_config_file); + + __osm_subn_opts_unpack_boolean( + "no_partition_enforcement", + p_key, p_val, &p_opts->no_partition_enforcement); + + __osm_subn_opts_unpack_boolean( + "no_qos", + p_key, p_val, &p_opts->no_qos); + + __osm_subn_opts_unpack_boolean( + "accum_log_file", + p_key, p_val, &p_opts->accum_log_file); + + __osm_subn_opts_unpack_charp( + "dump_files_dir", + p_key, p_val, &p_opts->dump_files_dir); + + __osm_subn_opts_unpack_charp( + "lid_matrix_dump_file", + p_key, p_val, &p_opts->lid_matrix_dump_file); + + __osm_subn_opts_unpack_charp( + "ucast_dump_file", + p_key, p_val, &p_opts->ucast_dump_file); + + __osm_subn_opts_unpack_charp( + "updn_guid_file", + p_key, p_val, &p_opts->updn_guid_file); + + __osm_subn_opts_unpack_charp( + "sa_db_file", + p_key, p_val, &p_opts->sa_db_file); + + __osm_subn_opts_unpack_boolean( + "exit_on_fatal", + p_key, p_val, &p_opts->exit_on_fatal); + + __osm_subn_opts_unpack_boolean( + "honor_guid2lid_file", + p_key, p_val, &p_opts->honor_guid2lid_file); + + subn_parse_qos_options("qos", + p_key, p_val, &p_opts->qos_options); + + subn_parse_qos_options("qos_ca", + p_key, p_val, &p_opts->qos_ca_options); + + subn_parse_qos_options("qos_sw0", + p_key, p_val, &p_opts->qos_sw0_options); + + subn_parse_qos_options("qos_swe", + p_key, p_val, &p_opts->qos_swe_options); + + subn_parse_qos_options("qos_rtr", + p_key, p_val, &p_opts->qos_rtr_options); + + __osm_subn_opts_unpack_boolean( + "enable_quirks", + p_key, p_val, &p_opts->enable_quirks); + + } + } + fclose(opts_file); +} + +/********************************************************************** + **********************************************************************/ +void +osm_subn_write_conf_file( + IN osm_subn_opt_t* const p_opts ) +{ + char *p_cache_dir = getenv("OSM_CACHE_DIR"); + char file_name[256]; + FILE *opts_file; + + /* try to open the options file from the cache dir */ + if (!p_cache_dir || !(*p_cache_dir)) + p_cache_dir = OSM_DEFAULT_CACHE_DIR; + + strcpy(file_name, p_cache_dir); + strcat(file_name, "opensm.opts"); + + opts_file = fopen(file_name, "w"); + if (!opts_file) return; + + fprintf( + opts_file, + "#\n# DEVICE ATTRIBUTES OPTIONS\n#\n" + "# The port GUID on which the OpenSM is running\n" + "guid 0x%016" PRIx64 "\n\n" + "# M_Key value sent to all ports qualifying all Set(PortInfo)\n" + "m_key 0x%016" PRIx64 "\n\n" + "# The lease period used for the M_Key on this subnet in [msec]\n" + "m_key_lease_period %u\n\n" + "# SM_Key value of the SM to qualify rcv SA queries as 'trusted'\n" + "sm_key 0x%016" PRIx64 "\n\n" + "# Subnet prefix used on this subnet\n" + "subnet_prefix 0x%016" PRIx64 "\n\n" + "# The LMC value used on this subnet\n" + "lmc %u\n\n" + "# lmc_esp0 determines whether LMC value used on subnet is used for\n" + "#enhanced switch port 0. If TRUE, LMC value for subnet is used for\n" + "#ESP0. Otherwise, LMC value for ESP0s is 0.\n" + "lmc_esp0 %s\n\n" + "# The code of maximal time a packet can live in a switch\n" + "# The actual time is 4.096usec * 2^\n" + "# The value 0x14 disables this mechanism\n" + "packet_life_time 0x%02x\n\n" + "# The number of sequential packets dropped that cause the port\n" + "# to enter the VLStalled state. The result of setting this value to\n" + "# zero is undefined.\n" + "vl_stall_count 0x%02x\n\n" + "# The number of sequential packets dropped that cause the port\n" + "# to enter the VLStalled state. This value is for switch ports\n" + "# driving a CA or router port. The result of setting this value\n" + "# to zero is undefined.\n" + "leaf_vl_stall_count 0x%02x\n\n" + "# The code of maximal time a packet can wait at the head of\n" + "# transmission queue. \n" + "# The actual time is 4.096usec * 2^\n" + "# The value 0x14 disables this mechanism\n" + "head_of_queue_lifetime 0x%02x\n\n" + "# The maximal time a packet can wait at the head of queue on \n" + "# switch port connected to a CA or router port\n" + "leaf_head_of_queue_lifetime 0x%02x\n\n" + "# Limit the maximal operational VLs\n" + "max_op_vls %u\n\n" + "# Force switch links which are more than SDR capable to \n" + "# operate at SDR speed\n\n" + "force_link_speed %u\n\n" + "# The subnet_timeout code that will be set for all the ports\n" + "# The actual timeout is 4.096usec * 2^\n" + "subnet_timeout %u\n\n" + "# Threshold of local phy errors for sending Trap 129\n" + "local_phy_errors_threshold 0x%02x\n\n" + "# Threshold of credits over-run errors for sending Trap 129\n" + "overrun_errors_threshold 0x%02x\n\n", + cl_ntoh64(p_opts->guid), + cl_ntoh64(p_opts->m_key), + cl_ntoh16(p_opts->m_key_lease_period), + cl_ntoh64(p_opts->sm_key), + cl_ntoh64(p_opts->subnet_prefix), + p_opts->lmc, + p_opts->lmc_esp0 ? "TRUE" : "FALSE", + p_opts->packet_life_time, + p_opts->vl_stall_count, + p_opts->leaf_vl_stall_count, + p_opts->head_of_queue_lifetime, + p_opts->leaf_head_of_queue_lifetime, + p_opts->max_op_vls, + p_opts->force_link_speed, + p_opts->subnet_timeout, + p_opts->local_phy_errors_threshold, + p_opts->overrun_errors_threshold + ); + + fprintf( + opts_file, + "#\n# PARTITIONING OPTIONS\n#\n" + "# Partition configuration file to be used\n" + "partition_config_file %s\n\n" + "# Disable partition enforcement by switches\n" + "no_partition_enforcement %s\n\n", + p_opts->partition_config_file, + p_opts->no_partition_enforcement ? "TRUE" : "FALSE"); + + fprintf( + opts_file, + "#\n# SWEEP OPTIONS\n#\n" + "# The number of seconds between subnet sweeps (0 disables it)\n" + "sweep_interval %u\n\n" + "# If TRUE cause all lids to be reassigned\n" + "reassign_lids %s\n\n" + "# If TRUE ignore existing LFT entries on first sweep (default).\n" + "# Otherwise only non minimal hop cases are modified.\n" + "# NOTE: A standby SM clears its first sweep flag - since the\n" + "# master SM already sweeps...\n" + "reassign_lfts %s\n\n" + "# If TRUE forces every sweep to be a heavy sweep\n" + "force_heavy_sweep %s\n\n" + "# If TRUE every trap will cause a heavy sweep.\n" + "# NOTE: successive identical traps (>10) are suppressed\n" + "sweep_on_trap %s\n\n", + p_opts->sweep_interval, + p_opts->reassign_lids ? "TRUE" : "FALSE", + p_opts->reassign_lfts ? "TRUE" : "FALSE", + p_opts->force_heavy_sweep ? "TRUE" : "FALSE", + p_opts->sweep_on_trap ? "TRUE" : "FALSE" + ); + + fprintf( + opts_file, + "#\n# ROUTING OPTIONS\n#\n" + "# If TRUE count switches as link subscriptions\n" + "port_profile_switch_nodes %s\n\n", + p_opts->port_profile_switch_nodes ? "TRUE" : "FALSE"); + + if (p_opts->routing_engine_name) + fprintf( opts_file, + "# Routing engine\n" + "routing_engine %s\n\n", + p_opts->routing_engine_name); + if (p_opts->lid_matrix_dump_file) + fprintf( opts_file, + "# Lid matrix dump file name\n" + "lid_matrix_dump_file %s\n\n", + p_opts->lid_matrix_dump_file); + if (p_opts->ucast_dump_file) + fprintf( opts_file, + "# Ucast dump file name\n" + "ucast_dump_file %s\n\n", + p_opts->ucast_dump_file); + if (p_opts->updn_guid_file) + fprintf( opts_file, + "# The file holding the Up/Down root node guids\n" + "# One guid in each line\n" + "updn_guid_file %s\n\n", + p_opts->updn_guid_file); + if (p_opts->sa_db_file) + fprintf( opts_file, + "# SA database file name\n" + "sa_db_file %s\n\n", + p_opts->sa_db_file); + + fprintf( + opts_file, + "#\n# HANDOVER - MULTIPLE SMs OPTIONS\n#\n" + "# SM priority used for deciding who is the master\n" + "sm_priority %u\n\n" + "# If TRUE other SMs on the subnet should be ignored\n" + "ignore_other_sm %s\n\n" + "# Timeout in [msec] between two polls of active master SM\n" + "sminfo_polling_timeout %u\n\n" + "# Number of failing polls of remote SM that declares it dead\n" + "polling_retry_number %u\n\n" + "# If TRUE honor the guid2lid file when coming out of standby\n" + "# state, if such file exists and is valid\n" + "honor_guid2lid_file %s\n\n", + p_opts->sm_priority, + p_opts->ignore_other_sm ? "TRUE" : "FALSE", + p_opts->sminfo_polling_timeout, + p_opts->polling_retry_number, + p_opts->honor_guid2lid_file ? "TRUE" : "FALSE" + ); + + fprintf( + opts_file, + "#\n# TIMING AND THREADING OPTIONS\n#\n" + "# Number of MADs sent in parallel\n" + "max_wire_smps %u\n\n" + "# The time taken to a transaction to finish in [msec]\n" + "transaction_timeout %u\n\n" + "# Maximal time in [msec] a message can stay in the incoming message queue.\n" + "# If there is more than one message in the queue and the last message\n" + "# stayed in the queue more than this value any SA request will be \n" + "# immediately returned with a BUSY status.\n" + "max_msg_fifo_timeout %u\n\n" + "# Use a single thread for handling SA queries\n" + "single_thread %s\n\n", + p_opts->max_wire_smps, + p_opts->transaction_timeout, + p_opts->max_msg_fifo_timeout, + p_opts->single_thread ? "TRUE" : "FALSE" + ); + + fprintf( + opts_file, + "#\n# DEBUG FEATURES\n#\n" + "# The log flags used\n" + "log_flags 0x%02x\n\n" + "# Force flush of the log file after each log message\n" + "force_log_flush %s\n\n" + "# Log file to be used\n" + "log_file %s\n\n" + "# Limit the size of the log file. If overrun, log is restarted\n" + "log_max_size %lu\n\n" + "# If TRUE will accumulate the log over multiple OpenSM sessions\n" + "accum_log_file %s\n\n" + "# The directory to hold the file OpenSM dumps\n" + "dump_files_dir %s\n\n" + "# If TRUE enables new high risk options and hardware specific quirks\n" + "enable_quirks %s\n\n" + "# If TRUE OpenSM should disable multicast support\n" + "no_multicast_option %s\n\n" + "# No multicast routing is performed if TRUE\n" + "disable_multicast %s\n\n" + "# If TRUE opensm will exit on fatal initialization issues\n" + "exit_on_fatal %s\n\n", + p_opts->log_flags, + p_opts->force_log_flush ? "TRUE" : "FALSE", + p_opts->log_file, + p_opts->log_max_size, + p_opts->accum_log_file ? "TRUE" : "FALSE", + p_opts->dump_files_dir, + p_opts->enable_quirks ? "TRUE" : "FALSE", + p_opts->no_multicast_option ? "TRUE" : "FALSE", + p_opts->disable_multicast ? "TRUE" : "FALSE", + p_opts->exit_on_fatal ? "TRUE" : "FALSE" + ); + + fprintf( + opts_file, + "#\n# QoS OPTIONS\n#\n" + "# Disable QoS setup\n" + "no_qos %s\n\n", + p_opts->no_qos ? "TRUE" : "FALSE"); + + subn_dump_qos_options(opts_file, + "QoS default options", "qos", &p_opts->qos_options); + fprintf(opts_file, "\n"); + subn_dump_qos_options(opts_file, + "QoS CA options", "qos_ca", &p_opts->qos_ca_options); + fprintf(opts_file, "\n"); + subn_dump_qos_options(opts_file, + "QoS Switch Port 0 options", "qos_sw0", &p_opts->qos_sw0_options); + fprintf(opts_file, "\n"); + subn_dump_qos_options(opts_file, + "QoS Switch external ports options", "qos_swe", &p_opts->qos_swe_options); + fprintf(opts_file, "\n"); + subn_dump_qos_options(opts_file, + "QoS Router ports options", "qos_rtr", &p_opts->qos_rtr_options); + + /* optional string attributes ... */ + + fclose(opts_file); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sw_info_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sw_info_rcv.c new file mode 100644 index 00000000..9fbdb145 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sw_info_rcv.c @@ -0,0 +1,681 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_si_rcv_t. + * This object represents the SwitchInfo Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_si_rcv_get_port_info( + IN const osm_si_rcv_t* const p_rcv, + IN osm_switch_t* const p_sw, + IN const osm_madw_t* const p_madw ) +{ + osm_madw_context_t context; + uint8_t port_num; + osm_physp_t *p_physp; + osm_node_t *p_node; + uint8_t num_ports; + osm_dr_path_t dr_path; + const ib_smp_t* p_smp; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_si_rcv_get_port_info ); + + CL_ASSERT( p_sw ); + + p_node = osm_switch_get_node_ptr( p_sw ); + p_smp = osm_madw_get_smp_ptr( p_madw ); + + CL_ASSERT( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ); + + /* + Request PortInfo attribute for each port on the switch. + Don't trust the port's own DR Path, since it may no longer + be a legitimate path through the subnet. + Build a path from the mad instead, since we know that path works. + The port's DR Path info gets updated when the PortInfo + attribute is received. + */ + p_physp = osm_node_get_any_physp_ptr( p_node ); + + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + context.pi_context.node_guid = osm_node_get_node_guid( p_node ); + context.pi_context.port_guid = osm_physp_get_port_guid( p_physp ); + context.pi_context.set_method = FALSE; + context.pi_context.update_master_sm_base_lid = FALSE; + context.pi_context.ignore_errors = FALSE; + context.pi_context.light_sweep = FALSE; + context.pi_context.active_transition = FALSE; + + num_ports = osm_node_get_num_physp( p_node ); + osm_dr_path_init( &dr_path, + osm_madw_get_bind_handle( p_madw ), + p_smp->hop_count, p_smp->initial_path ); + + for( port_num = 0; port_num < num_ports; port_num++) + { + status = osm_req_get( + p_rcv->p_req, + &dr_path, + IB_MAD_ATTR_PORT_INFO, + cl_hton32( port_num ), + CL_DISP_MSGID_NONE, + &context ); + if( status != IB_SUCCESS ) + { + /* continue the loop despite the error */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_si_rcv_get_port_info: ERR 3602: " + "Failure initiating PortInfo request (%s)\n", + ib_get_err_str(status)); + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_si_rcv_get_fwd_tbl( + IN const osm_si_rcv_t* const p_rcv, + IN osm_switch_t* const p_sw ) +{ + osm_madw_context_t context; + osm_dr_path_t *p_dr_path; + osm_physp_t *p_physp; + osm_node_t *p_node; + uint32_t block_id_ho; + uint32_t max_block_id_ho; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_si_rcv_get_fwd_tbl ); + + CL_ASSERT( p_sw ); + + p_node = osm_switch_get_node_ptr( p_sw ); + + CL_ASSERT( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ); + + p_physp = osm_node_get_any_physp_ptr( p_node ); + + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + context.lft_context.node_guid = osm_node_get_node_guid( p_node ); + context.lft_context.set_method = FALSE; + + max_block_id_ho = osm_switch_get_max_block_id_in_use( p_sw ); + + p_dr_path = osm_physp_get_dr_path_ptr( p_physp ); + + for( block_id_ho = 0; block_id_ho <= max_block_id_ho; block_id_ho++) + { + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_si_rcv_get_fwd_tbl: " + "Retrieving FT block %u\n", block_id_ho ); + } + + status = osm_req_get( + p_rcv->p_req, + p_dr_path, + IB_MAD_ATTR_LIN_FWD_TBL, + cl_hton32( block_id_ho ), + CL_DISP_MSGID_NONE, + &context ); + if( status != IB_SUCCESS ) + { + /* continue the loop despite the error */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_si_rcv_get_fwd_tbl: ERR 3603: " + "Failure initiating PortInfo request (%s)\n", + ib_get_err_str(status)); + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +#if 0 +/********************************************************************** + The plock must be held before calling this function. +**********************************************************************/ +static void +__osm_si_rcv_get_mcast_fwd_tbl( + IN const osm_si_rcv_t* const p_rcv, + IN osm_switch_t* const p_sw ) +{ + osm_madw_context_t context; + osm_dr_path_t *p_dr_path; + osm_physp_t *p_physp; + osm_node_t *p_node; + osm_mcast_tbl_t* p_tbl; + uint32_t block_id_ho; + uint32_t max_block_id_ho; + uint32_t position; + uint32_t max_position; + uint32_t attr_mod_ho; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_si_rcv_get_mcast_fwd_tbl ); + + CL_ASSERT( p_sw ); + + p_node = osm_switch_get_node_ptr( p_sw ); + + CL_ASSERT( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ); + + if( osm_switch_get_mcast_fwd_tbl_size( p_sw ) == 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_si_rcv_get_mcast_fwd_tbl: " + "Multicast not supported by switch 0x%016" PRIx64 "\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + goto Exit; + } + + p_physp = osm_node_get_any_physp_ptr( p_node ); + p_tbl = osm_switch_get_mcast_tbl_ptr( p_sw ); + + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + + context.mft_context.node_guid = osm_node_get_node_guid( p_node ); + context.mft_context.set_method = FALSE; + + max_block_id_ho = osm_mcast_tbl_get_max_block( p_tbl ); + + if( max_block_id_ho > IB_MCAST_MAX_BLOCK_ID ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_si_rcv_get_mcast_fwd_tbl: ERR 3609: " + "Out-of-range mcast block size = %u on switch 0x%016" PRIx64 + "\n", max_block_id_ho, + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + goto Exit; + } + + max_position = osm_mcast_tbl_get_max_position( p_tbl ); + + CL_ASSERT( max_position <= IB_MCAST_POSITION_MAX ); + + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_si_rcv_get_mcast_fwd_tbl: " + "Max MFT block = %u, Max position = %u\n", max_block_id_ho, + max_position ); + + p_dr_path = osm_physp_get_dr_path_ptr( p_physp ); + + for( block_id_ho = 0; block_id_ho <= max_block_id_ho; block_id_ho++) + { + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_si_rcv_get_mcast_fwd_tbl: " + "Retrieving MFT block %u\n", block_id_ho ); + } + + for( position = 0; position <= max_position; position++ ) + { + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_si_rcv_get_mcast_fwd_tbl: " + "Retrieving MFT position %u\n", position ); + } + + attr_mod_ho = block_id_ho | position << IB_MCAST_POSITION_SHIFT; + status = osm_req_get( + p_rcv->p_req, + p_dr_path, + IB_MAD_ATTR_MCAST_FWD_TBL, + cl_hton32( attr_mod_ho ), + CL_DISP_MSGID_NONE, + &context ); + if( status != IB_SUCCESS ) + { + /* continue the loop despite the error */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_si_rcv_get_mcast_fwd_tbl: ERR 3607: " + "Failure initiating PortInfo request (%s)\n", + ib_get_err_str(status)); + } + } + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} +#endif + +/********************************************************************** + Lock must be held on entry to this function. +**********************************************************************/ +static void +__osm_si_rcv_process_new( + IN const osm_si_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + osm_switch_t *p_sw; + osm_switch_t *p_check; + ib_switch_info_t *p_si; + ib_smp_t *p_smp; + cl_qmap_t *p_sw_guid_tbl; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, __osm_si_rcv_process_new ); + + CL_ASSERT( p_madw ); + + p_sw_guid_tbl = &p_rcv->p_subn->sw_guid_tbl; + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_si = (ib_switch_info_t*)ib_smp_get_payload_ptr( p_smp ); + + osm_dump_switch_info( p_rcv->p_log, p_si, OSM_LOG_DEBUG ); + + /* + Allocate a new switch object for this switch, + and place it in the switch table. + */ + p_sw = osm_switch_new( p_node, p_madw ); + if( p_sw == NULL ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_si_rcv_process_new: ERR 3608: " + "Unable to allocate new switch object\n" ); + goto Exit; + } + + /* set subnet max mlid to the minimum MulticastFDBCap of all switches */ + if ( p_sw->mcast_tbl.max_mlid_ho < p_rcv->p_subn->max_multicast_lid_ho ) + { + p_rcv->p_subn->max_multicast_lid_ho = p_sw->mcast_tbl.max_mlid_ho; + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_si_rcv_process_new: " + "Subnet max multicast lid is 0x%X\n", + p_rcv->p_subn->max_multicast_lid_ho ); + } + + /* set subnet max unicast lid to the minimum LinearFDBCap of all switches */ + if ( p_sw->fwd_tbl.p_lin_tbl->size < p_rcv->p_subn->max_unicast_lid_ho ) + { + p_rcv->p_subn->max_unicast_lid_ho = p_sw->fwd_tbl.p_lin_tbl->size; + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_si_rcv_process_new: " + "Subnet max unicast lid is 0x%X\n", + p_rcv->p_subn->max_unicast_lid_ho ); + } + + p_check = (osm_switch_t*)cl_qmap_insert( p_sw_guid_tbl, + osm_node_get_node_guid( p_node ), + &p_sw->map_item ); + + if( p_check != p_sw ) + { + /* + This shouldn't happen since we hold the lock! + */ + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_si_rcv_process_new: ERR 3605: " + "Unable to add new switch object to database\n" ); + osm_switch_delete( &p_sw ); + goto Exit; + } + + p_node->sw = p_sw; + + /* + Update the switch info according to the + info we just received. + */ + osm_switch_set_switch_info( p_sw, p_si ); + osm_switch_discovery_count_inc( p_sw ); + + /* + Get the PortInfo attribute for every port. + */ + __osm_si_rcv_get_port_info( p_rcv, p_sw, p_madw ); + __osm_si_rcv_get_fwd_tbl( p_rcv, p_sw ); + + /* + Don't bother retrieving the current multicast tables + from the switches. The current version of SM does + not support silent take-over of an existing multicast + configuration. + + Gathering the multicast tables can also generate large amounts + of extra subnet-init traffic. + + The code to retrieve the tables was fully debugged. + */ +#if 0 + if( !p_rcv->p_subn->opt.disable_multicast ) + __osm_si_rcv_get_mcast_fwd_tbl( p_rcv, p_sw ); +#endif + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + Lock must be held on entry to this function. + Return 1 if the caller is expected to send a change_detected event. + this can not be done internally as the event needs the lock... +**********************************************************************/ +static boolean_t +__osm_si_rcv_process_existing( + IN const osm_si_rcv_t* const p_rcv, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + osm_switch_t *p_sw = p_node->sw; + ib_switch_info_t *p_si; + osm_si_context_t *p_si_context; + ib_smp_t *p_smp; + boolean_t is_change_detected = FALSE; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_si_rcv_process_existing ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_si = (ib_switch_info_t*)ib_smp_get_payload_ptr( p_smp ); + p_si_context = osm_madw_get_si_context_ptr( p_madw ); + + if( p_si_context->set_method ) + { + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_si_rcv_process_existing: " + "Received logical SetResp()\n" ); + } + + osm_switch_set_switch_info( p_sw, p_si ); + } + else + { + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_si_rcv_process_existing: " + "Received logical GetResp()\n" ); + } + + osm_switch_set_switch_info( p_sw, p_si ); + + /* + Check the port state change bit. If true, then this switch + has seen a port state transition, so continue probing. + */ + if( p_si_context->light_sweep == TRUE ) + { + /* This is a light sweep */ + /* If the mad was returned with an error - + signal a change to the state manager. */ + if ( ib_smp_get_status( p_smp ) != 0 ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_si_rcv_process_existing: " + "GetResp() received with error in light sweep. " + "Commencing heavy sweep\n" ); + is_change_detected = TRUE; + } + else + { + /* + If something changed, then just signal the state + manager. Don't attempt to probe further during + a light sweep. + */ + if( ib_switch_info_get_state_change( p_si ) ) + { + osm_dump_switch_info( p_rcv->p_log, p_si, OSM_LOG_DEBUG ); + is_change_detected = TRUE; + } + } + } + else + { + /* + This is a heavy sweep. Get information regardless + of the state change bit. + */ + osm_switch_discovery_count_inc( p_sw ); + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_si_rcv_process_existing: " + "discovery_count is:%u\n", + osm_switch_discovery_count_get( p_sw ) ); + + /* If this is the first discovery - then get the port_info */ + if ( osm_switch_discovery_count_get( p_sw ) == 1 ) + __osm_si_rcv_get_port_info( p_rcv, p_sw, p_madw ); + else + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_si_rcv_process_existing: " + "Not discovering again through switch:0x%" + PRIx64 "\n", + osm_node_get_node_guid( p_sw->p_node) ); + } + } + } + + OSM_LOG_EXIT( p_rcv->p_log ); + return is_change_detected; +} + +/********************************************************************** + **********************************************************************/ +void +osm_si_rcv_construct( + IN osm_si_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_si_rcv_destroy( + IN osm_si_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_si_rcv_destroy ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_si_rcv_init( + IN osm_si_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN osm_req_t* const p_req, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + OSM_LOG_ENTER( p_log, osm_si_rcv_init ); + + osm_si_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_req = p_req; + p_rcv->p_state_mgr = p_state_mgr; + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_si_rcv_process( + IN const osm_si_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + cl_qmap_t *p_node_guid_tbl; + ib_switch_info_t *p_si; + ib_smp_t *p_smp; + osm_node_t *p_node; + ib_net64_t node_guid; + osm_si_context_t *p_context; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_si_rcv_process ); + + CL_ASSERT( p_madw ); + + p_node_guid_tbl = &p_rcv->p_subn->node_guid_tbl; + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_si = (ib_switch_info_t*)ib_smp_get_payload_ptr( p_smp ); + + /* + Acquire the switch object and add the switch info. + */ + + p_context = osm_madw_get_si_context_ptr( p_madw ); + + node_guid = p_context->node_guid; + + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "osm_si_rcv_process: " + "Switch GUID 0x%016" PRIx64 + ", TID 0x%" PRIx64 "\n", + cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + } + + CL_PLOCK_EXCL_ACQUIRE( p_rcv->p_lock ); + + p_node = (osm_node_t*)cl_qmap_get( p_node_guid_tbl, node_guid ); + if( p_node == (osm_node_t*)cl_qmap_end( p_node_guid_tbl ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_si_rcv_process: ERR 3606: " + "SwitchInfo received for nonexistent node " + "with GUID 0x%" PRIx64 "\n", + cl_ntoh64( node_guid ) ); + } + else + { + + /* + Hack for bad value in Mellanox switch + */ + if( cl_ntoh16( p_si->lin_top ) > IB_LID_UCAST_END_HO ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_si_rcv_process: ERR 3610: " + "\n\t\t\t\tBad LinearFDBTop value = 0x%X " + "on switch 0x%" PRIx64 + "\n\t\t\t\tForcing correction to 0x%X\n", + cl_ntoh16( p_si->lin_top ), + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + 0 ); + + p_si->lin_top = 0; + } + + /* + Acquire the switch object for this switch. + */ + if( !p_node->sw ) + { + __osm_si_rcv_process_new( p_rcv, p_node, p_madw ); + /* + A new switch was found during the sweep so we need + to ignore the current LFT settings. + */ + p_rcv->p_subn->ignore_existing_lfts = TRUE; + } + else + { + /* we might get back a request for signaling change was detected */ + if (__osm_si_rcv_process_existing( p_rcv, p_node, p_madw )) + { + CL_PLOCK_RELEASE( p_rcv->p_lock ); + osm_state_mgr_process( p_rcv->p_state_mgr, + OSM_SIGNAL_CHANGE_DETECTED ); + goto Exit; + } + } + } + + CL_PLOCK_RELEASE( p_rcv->p_lock ); + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sw_info_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sw_info_rcv_ctrl.c new file mode 100644 index 00000000..4b2170de --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sw_info_rcv_ctrl.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_si_rcv_ctrl_t. + * This object represents the SwitchInfo request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_si_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_si_rcv_process( ((osm_si_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_si_rcv_ctrl_construct( + IN osm_si_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_si_rcv_ctrl_destroy( + IN osm_si_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_si_rcv_ctrl_init( + IN osm_si_rcv_ctrl_t* const p_ctrl, + IN osm_si_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_si_rcv_ctrl_init ); + + osm_si_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_SWITCH_INFO, + __osm_si_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_si_rcv_ctrl_init: ERR 3701: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_sweep_fail_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_sweep_fail_ctrl.c new file mode 100644 index 00000000..547825cc --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_sweep_fail_ctrl.c @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_sweep_fail_ctrl_t. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.5 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_sweep_fail_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + osm_sweep_fail_ctrl_t* const p_ctrl = (osm_sweep_fail_ctrl_t*)context; + + OSM_LOG_ENTER( p_ctrl->p_log, __osm_sweep_fail_ctrl_disp_callback ); + + UNUSED_PARAM( p_data ); + /* + Notify the state manager that we had a light sweep failure. + */ + osm_state_mgr_process( p_ctrl->p_state_mgr, + OSM_SIGNAL_LIGHT_SWEEP_FAIL ); + + OSM_LOG_EXIT( p_ctrl->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_sweep_fail_ctrl_construct( + IN osm_sweep_fail_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_sweep_fail_ctrl_destroy( + IN osm_sweep_fail_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_sweep_fail_ctrl_init( + IN osm_sweep_fail_ctrl_t* const p_ctrl, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_sweep_fail_ctrl_init ); + + osm_sweep_fail_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + p_ctrl->p_disp = p_disp; + p_ctrl->p_state_mgr = p_state_mgr; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_LIGHT_SWEEP_FAIL, + __osm_sweep_fail_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_sweep_fail_ctrl_init: ERR 3501: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_switch.c b/branches/Ndi/ulp/opensm/user/opensm/osm_switch.c new file mode 100644 index 00000000..23bb8b68 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_switch.c @@ -0,0 +1,550 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_switch_t. + * This object represents an Infiniband switch. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.13 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_switch_construct( + IN osm_switch_t* const p_sw ) +{ + CL_ASSERT( p_sw ); + memset( p_sw, 0, sizeof(*p_sw) ); + osm_lid_matrix_construct( &p_sw->lmx ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_switch_init( + IN osm_switch_t* const p_sw, + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + ib_api_status_t status = IB_SUCCESS; + ib_switch_info_t *p_si; + ib_smp_t *p_smp; + uint8_t num_ports; + uint32_t port_num; + + CL_ASSERT( p_sw ); + CL_ASSERT( p_madw ); + CL_ASSERT( p_node ); + + osm_switch_construct( p_sw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + p_si = (ib_switch_info_t*)ib_smp_get_payload_ptr( p_smp ); + num_ports = osm_node_get_num_physp( p_node ); + + CL_ASSERT( p_smp->attr_id == IB_MAD_ATTR_SWITCH_INFO ); + + p_sw->p_node = p_node; + p_sw->switch_info = *p_si; + + status = osm_lid_matrix_init( &p_sw->lmx, num_ports ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osm_fwd_tbl_init( &p_sw->fwd_tbl, p_si ); + if( status != IB_SUCCESS ) + goto Exit; + + p_sw->p_prof = malloc( sizeof(*p_sw->p_prof) * num_ports ); + if( p_sw->p_prof == NULL ) + { + status = IB_INSUFFICIENT_MEMORY; + goto Exit; + } + + memset( p_sw->p_prof, 0, sizeof(*p_sw->p_prof) * num_ports ); + + status = osm_mcast_tbl_init( &p_sw->mcast_tbl, + osm_node_get_num_physp( p_node ), cl_ntoh16( p_si->mcast_cap ) ); + if( status != IB_SUCCESS ) + goto Exit; + + for( port_num = 0; port_num < num_ports; port_num++ ) + osm_port_prof_construct( &p_sw->p_prof[port_num] ); + + Exit: + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_switch_destroy( + IN osm_switch_t* const p_sw ) +{ + /* free memory to avoid leaks */ + osm_mcast_tbl_destroy( &p_sw->mcast_tbl ); + free( p_sw->p_prof ); + osm_fwd_tbl_destroy( &p_sw->fwd_tbl ); + osm_lid_matrix_destroy( &p_sw->lmx ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_switch_delete( + IN OUT osm_switch_t** const pp_sw ) +{ + osm_switch_destroy( *pp_sw ); + free( *pp_sw ); + *pp_sw = NULL; +} + +/********************************************************************** + **********************************************************************/ +osm_switch_t* +osm_switch_new( + IN osm_node_t* const p_node, + IN const osm_madw_t* const p_madw ) +{ + ib_api_status_t status; + osm_switch_t *p_sw; + + p_sw = (osm_switch_t*)malloc( sizeof(*p_sw) ); + if( p_sw ) + { + memset( p_sw, 0, sizeof(*p_sw) ); + status = osm_switch_init( p_sw, p_node, p_madw ); + if( status != IB_SUCCESS ) + osm_switch_delete( &p_sw ); + } + + return( p_sw ); +} + +/********************************************************************** + **********************************************************************/ +boolean_t +osm_switch_get_fwd_tbl_block( + IN const osm_switch_t* const p_sw, + IN const uint32_t block_id, + OUT uint8_t* const p_block ) +{ + uint16_t base_lid_ho; + uint16_t max_lid_ho; + uint16_t lid_ho; + uint16_t block_top_lid_ho; + uint32_t lids_per_block; + osm_fwd_tbl_t *p_tbl; + boolean_t return_flag = FALSE; + + CL_ASSERT( p_sw ); + CL_ASSERT( p_block ); + + p_tbl = osm_switch_get_fwd_tbl_ptr( p_sw ); + max_lid_ho = osm_switch_get_max_lid_ho( p_sw ); + lids_per_block = osm_fwd_tbl_get_lids_per_block( &p_sw->fwd_tbl ); + base_lid_ho = (uint16_t)(block_id * lids_per_block); + + if( base_lid_ho <= max_lid_ho ) + { + /* Initialize LIDs in block to invalid port number. */ + memset( p_block, 0xff, IB_SMP_DATA_SIZE ); + /* + Determine the range of LIDs we can return with this block. + */ + block_top_lid_ho = (uint16_t)(base_lid_ho + lids_per_block - 1); + if( block_top_lid_ho > max_lid_ho ) + block_top_lid_ho = max_lid_ho; + + /* + Configure the forwarding table with the routing + information for the specified block of LIDs. + */ + for( lid_ho = base_lid_ho; lid_ho <= block_top_lid_ho; lid_ho++ ) + { + p_block[lid_ho - base_lid_ho] = osm_fwd_tbl_get( p_tbl, lid_ho ); + } + + return_flag = TRUE; + } + + return( return_flag ); +} + +/********************************************************************** + **********************************************************************/ +uint8_t +osm_switch_recommend_path( + IN const osm_switch_t* const p_sw, + IN const uint16_t lid_ho, + IN const boolean_t ignore_existing, + IN OUT uint64_t *remote_sys_guids, + IN OUT uint16_t *p_num_used_sys, + IN OUT uint64_t *remote_node_guids, + IN OUT uint16_t *p_num_used_nodes + ) +{ + /* + We support an enhanced LMC aware routing mode: + In the case of LMC > 0, we can track the remote side + system and node for all of the lids of the target + and try and avoid routing again through the same + system / node. + + If the procedure is provided with the tracking arrays + and counters we can conduct this algorithm. + */ + boolean_t routing_for_lmc = remote_sys_guids && remote_node_guids && + p_num_used_sys && p_num_used_nodes; + boolean_t sys_used, node_used; + uint16_t i; + uint8_t hops; + uint8_t least_hops; + uint8_t port_num; + uint8_t num_ports; + uint32_t least_paths = 0xFFFFFFFF; + /* + The follwing will track the least paths if the + route should go through a new system/node + */ + uint32_t least_paths_other_sys = 0xFFFFFFFF; + uint32_t least_paths_other_nodes = 0xFFFFFFFF; + uint32_t check_count; + uint8_t best_port = 0; + /* + These vars track the best port if it connects to + not used system/node. + */ + uint8_t best_port_other_sys = 0; + uint8_t best_port_other_node = 0; + boolean_t port_found = FALSE; + osm_physp_t *p_physp; + osm_physp_t *p_rem_physp; + osm_node_t *p_rem_node; + + CL_ASSERT( lid_ho > 0 ); + + num_ports = osm_switch_get_num_ports( p_sw ); + + least_hops = osm_switch_get_least_hops( p_sw, lid_ho ); + if ( least_hops == OSM_NO_PATH ) + return (OSM_NO_PATH); + + /* + First, inquire with the forwarding table for an existing + route. If one is found, honor it unless: + 1. the ignore existing flag is set. + 2. the physical port is not a valid one or not healthy + 3. the physical port has a remote port (the link is up) + 4. the port has min-hops to the target (avoid loops) + */ + if( !ignore_existing ) + { + port_num = osm_fwd_tbl_get( &p_sw->fwd_tbl, lid_ho ); + + if (port_num != OSM_NO_PATH) + { + p_physp = osm_node_get_physp_ptr(p_sw->p_node, port_num); + /* + Don't be too trusting of the current forwarding table! + Verify that the port number is legal and that the + LID is reachable through this port. + */ + if( (port_num < num_ports ) && + osm_physp_is_valid(p_physp) && + osm_physp_is_healthy(p_physp) && + osm_physp_get_remote(p_physp) ) + { + hops = osm_switch_get_hop_count( p_sw, lid_ho, port_num ); + /* + If we aren't using pre-defined user routes function, then + we need to make sure that the current path is the minimum one. + In case of having such a user function - this check will not + be done, and the old routing will be used. + Note: This means that it is the user's job to clean all data + in the forwarding tables that he wants to be overridden by the + minimum hop function. + */ + if ( hops == least_hops ) + { + return( port_num ); + } + } + } + } + + /* + This algorithm selects a port based on a static load balanced + selection across equal hop-count ports. + There is lots of room for improved sophistication here, + possibly guided by user configuration info. + */ + + /* + OpenSM routing is "local" - not considering a full lid to lid + path. As such we can not guarantee a path will not loop if we + do not always follow least hops. + So we must abort if not least hops. + */ + + /* port number starts with zero and num_ports is 1 + num phys ports */ + for ( port_num = 0; port_num < num_ports; port_num++ ) + { + if ( osm_switch_get_hop_count( p_sw, lid_ho, port_num ) == least_hops) + { + /* let us make sure it is not down or unhealthy */ + p_physp = osm_node_get_physp_ptr(p_sw->p_node, port_num); + if (osm_physp_is_valid(p_physp) && + osm_physp_is_healthy(p_physp) && + /* + we require all - non sma ports to be linked + to be routed through + */ + (!port_num || osm_physp_get_remote(p_physp))) + { + + /* + We located a least-hop port, possibly one of many. + For this port, check the running total count of + the number of paths through this port. Select + the port routing the least number of paths. + */ + check_count = osm_port_prof_path_count_get( + &p_sw->p_prof[port_num] ); + + /* + Advanced LMC routing requires tracking of the + best port by the node connected to the other side of + it. + */ + if (routing_for_lmc && port_num) + { +#if 0 + printf("LID:0x%X SYS:%d NODE:%d\n", lid_ho, *p_num_used_sys, *p_num_used_nodes); +#endif + + /* Get the Remote Node */ + p_rem_physp = osm_physp_get_remote(p_physp); + p_rem_node = osm_physp_get_node_ptr(p_rem_physp); + + /* Is the sys guid already used ? */ + sys_used = FALSE; + for (i = 0; !sys_used && (i < *p_num_used_sys); i++) + if (!memcmp(&p_rem_node->node_info.sys_guid, + &remote_sys_guids[i], + sizeof(uint64_t))) + sys_used = TRUE; + + /* If not update the least hops for this case */ + if (!sys_used) + { + if (check_count < least_paths_other_sys) + { + least_paths_other_sys = check_count; + best_port_other_sys = port_num; + } + } + else + { /* same sys found - try node */ + + /* Else is the node guid already used ? */ + node_used = FALSE; + for (i = 0; !node_used && (i < *p_num_used_nodes); i++) + if (!memcmp(&p_rem_node->node_info.node_guid, + &remote_node_guids[i], + sizeof(uint64_t))) + node_used = TRUE; + + + /* If not update the least hops for this case */ + if (!node_used) + { + if (check_count < least_paths_other_nodes) + { + least_paths_other_nodes = check_count; + best_port_other_node = port_num; + } + } + + } /* same sys found */ + } /* routing for LMC mode */ + + /* + the count is min but also lower then the max subscribed + */ + if( check_count < least_paths ) + { + port_found = TRUE; + best_port = port_num; + least_paths = check_count; + } + } + } + } + + if ( port_found == FALSE ) + return (OSM_NO_PATH); + + /* + if we are in enhanced routing mode and the best port is not + the local port 0 + */ + if (routing_for_lmc && best_port) + { + /* Select the least hop port of the non used sys first */ + if (best_port_other_sys) + best_port = best_port_other_sys; + else if (best_port_other_node) + best_port = best_port_other_node; + + /* track the remote node and system of the port used. */ + p_physp = osm_node_get_physp_ptr(p_sw->p_node, best_port); + p_rem_physp = osm_physp_get_remote(p_physp); + p_rem_node = osm_physp_get_node_ptr(p_rem_physp); + memcpy(&remote_node_guids[*p_num_used_nodes], + &(p_rem_node->node_info.node_guid), + sizeof(uint64_t)); + (*p_num_used_nodes)++; + memcpy(&remote_sys_guids[*p_num_used_sys], + &(p_rem_node->node_info.sys_guid), + sizeof(uint64_t)); + (*p_num_used_sys)++; + } + + return( best_port ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_switch_prepare_path_rebuild( + IN osm_switch_t* const p_sw ) +{ + uint8_t port_num; + uint8_t num_ports; + + num_ports = osm_switch_get_num_ports( p_sw ); + osm_lid_matrix_clear( &p_sw->lmx ); + for( port_num = 0; port_num < num_ports; port_num++ ) + osm_port_prof_construct( &p_sw->p_prof[port_num] ); +} + +/********************************************************************** + **********************************************************************/ +uint8_t +osm_switch_recommend_mcast_path( + IN osm_switch_t* const p_sw, + IN uint16_t const lid_ho, + IN uint16_t const mlid_ho, + IN boolean_t const ignore_existing ) +{ + uint8_t hops; + uint8_t port_num; + uint8_t num_ports; + uint8_t least_hops; + + CL_ASSERT( lid_ho > 0 ); + CL_ASSERT( mlid_ho >= IB_LID_MCAST_START_HO ); + + num_ports = osm_switch_get_num_ports( p_sw ); + + /* + If the user wants us to ignore existing multicast routes, + then simply return the shortest hop count path to the + target port. + + Otherwise, return the first port that has a path to the target, + picking from the ports that are already in the multicast group. + */ + if( !ignore_existing ) + { + for( port_num = 1; port_num < num_ports; port_num++ ) + { + if( osm_mcast_tbl_is_port( &p_sw->mcast_tbl, mlid_ho, port_num ) ) + { + /* + Don't be too trusting of the current forwarding table! + Verify that the LID is reachable through this port. + */ + hops = osm_switch_get_hop_count( p_sw, lid_ho, port_num ); + if( hops != OSM_NO_PATH ) + { + return( port_num ); + } + } + } + } + + /* + Either no existing mcast paths reach this port or we are + ignoring existing paths. + + Determine the best multicast path to the target. Note that this + algorithm is slightly different from the one used for unicast route + recommendation. In this case (multicast), we must NOT + perform any sort of load balancing. We MUST take the FIRST + port found that has <= the lowest hop count path. This prevents + more than one multicast path to the same remote switch which + prevents a multicast loop. Multicast loops are bad since the same + multicast packet will go around and around, inevitably creating + a black hole that will destroy the Earth in a firey conflagration. + */ + least_hops = osm_switch_get_least_hops( p_sw, lid_ho ); + for( port_num = 0; port_num < num_ports; port_num++ ) + { + if( osm_switch_get_hop_count( p_sw, lid_ho, port_num ) == least_hops ) + break; + } + + CL_ASSERT( port_num < num_ports ); + return( port_num ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_trap_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_trap_rcv.c new file mode 100644 index 00000000..3fe79324 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_trap_rcv.c @@ -0,0 +1,771 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_trap_rcv_t. + * This object represents the Trap Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.12 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + * + * TRAP HANDLING: + * + * Assuming traps can be caused by bad hardware we should provide + * a mechanism for filtering their propagation into the actual logic + * of OpenSM such that it is not overloaded by them. + * + * We will provide a trap filtering mechanism with "Aging" capability. + * This mechanism will track incoming traps, clasify them by their + * source and content and provide back their age. + * + * A timer running in the background will toggle a timer counter + * that should be referenced by the aging algorithm. + * To provide an efficient handling of aging. We also track all traps + * in a sorted list by their aging. + * + * The generic Aging Tracker mechanism is implemented in the + * cl_aging_tracker object. + * + **********************************************************************/ + +typedef struct _osm_trap_aging_tracker_context +{ + osm_log_t* p_log; + osm_physp_t* p_physp; +} osm_trap_aging_tracker_context_t; + +/********************************************************************** + **********************************************************************/ +osm_physp_t * +__get_physp_by_lid_and_num( + IN osm_trap_rcv_t* const p_rcv, + IN uint16_t lid, + IN uint8_t num) +{ + cl_ptr_vector_t *p_vec = &(p_rcv->p_subn->port_lid_tbl); + osm_port_t *p_port; + + if (lid > cl_ptr_vector_get_size(p_vec)) + return NULL; + + p_port = (osm_port_t *)cl_ptr_vector_get(p_vec, lid); + if (! p_port) + return NULL; + + if (osm_port_get_num_physp(p_port) < num) + return NULL; + + return( osm_port_get_phys_ptr(p_port, num) ); +} + +/********************************************************************** + **********************************************************************/ +uint64_t +osm_trap_rcv_aging_tracker_callback( + IN uint64_t key, + IN uint32_t num_regs, + IN void* context ) +{ + osm_trap_rcv_t* p_rcv = (osm_trap_rcv_t*)context; + uint16_t lid; + uint8_t port_num; + osm_physp_t* p_physp; + + OSM_LOG_ENTER( p_rcv->p_log, osm_trap_rcv_aging_tracker_callback ); + + if (osm_exit_flag) + /* We got an exit flag - do nothing */ + return 0; + + lid = cl_ntoh16((uint16_t)(( key & 0x0000FFFF00000000ULL) >> 32)); + port_num = (uint8_t)(( key & 0x00FF000000000000ULL) >> 48); + + p_physp = __get_physp_by_lid_and_num( p_rcv, lid, port_num ); + if (!p_physp) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_trap_rcv_aging_tracker_callback: " + "Cannot find port num:0x%X with lid:%u\n", + port_num, lid ); + } + else + { + /* make sure the physp is still valid */ + if ( osm_physp_is_valid(p_physp) ) + { + /* If the health port was false - set it to true */ + if (!osm_physp_is_healthy(p_physp) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_trap_rcv_aging_tracker_callback: " + "Clearing health bit of port num:%u with lid:%u\n", + port_num, lid ); + + /* Clear its health bit */ + osm_physp_set_health(p_physp, TRUE); + } + } + } + + OSM_LOG_EXIT (p_rcv->p_log ); + + /* We want to remove the event from the tracker - so + need to return zero. */ + return 0; +} + +/********************************************************************** + **********************************************************************/ +void +osm_trap_rcv_construct( + IN osm_trap_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); + cl_event_wheel_construct( &p_rcv->trap_aging_tracker ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_trap_rcv_destroy( + IN osm_trap_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_trap_rcv_destroy ); + + cl_event_wheel_destroy( &p_rcv->trap_aging_tracker ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_trap_rcv_init( + IN osm_trap_rcv_t* const p_rcv, + IN osm_subn_t* const p_subn, + IN osm_stats_t* const p_stats, + IN osm_resp_t* const p_resp, + IN osm_log_t* const p_log, + IN osm_state_mgr_t* const p_state_mgr, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_trap_rcv_init ); + + osm_trap_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_stats = p_stats; + p_rcv->p_resp = p_resp; + p_rcv->p_state_mgr = p_state_mgr; + + cl_event_wheel_init( &p_rcv->trap_aging_tracker, p_log ); + + OSM_LOG_EXIT( p_rcv->p_log ); + return( status ); +} + +/********************************************************************** + * CRC calculation for notice identification + **********************************************************************/ + +#define CRC32_POLYNOMIAL 0xEDB88320L + +/* calculate the crc for a given buffer */ +static uint32_t +__osm_trap_calc_crc32(void *buffer, uint32_t count) +{ + uint32_t temp1, temp2; + uint32_t crc = -1L; + unsigned char *p = (unsigned char *)buffer; + /* pre - calculated table for faster crc calculation */ + static uint32_t crc_table[256]; + static boolean_t first = TRUE; + int i, j; + + /* if we need to initialize the lookup table */ + if (first) + { + /* calc the CRC table */ + for (i = 0; i <= 255; i++) + { + crc = i; + for (j = 8; j > 0; j--) + if (crc & 1) + crc = (crc >> 1) ^ CRC32_POLYNOMIAL; + else + crc >>= 1; + crc_table[i] = crc; + } + first = FALSE; + } + + crc = -1L; + /* do the calculation */ + while (count-- != 0) + { + temp1 = (crc >> 8) & 0x00FFFFFFL; + temp2 = crc_table[((int)crc ^ *p++) & 0xFF]; + crc = temp1 ^ temp2; + } + return crc; +} + +/******************************************************************** + ********************************************************************/ + +/* The key is created in the following manner: + port_num lid crc + \______/ \___/ \___/ + 16b 16b 32b +*/ +static void +__osm_trap_get_key( + IN uint16_t lid, + IN uint8_t port_num, + IN ib_mad_notice_attr_t* p_ntci, + OUT uint64_t *trap_key) +{ + uint32_t crc = 0; + + CL_ASSERT(trap_key); + + crc = __osm_trap_calc_crc32(p_ntci, sizeof(ib_mad_notice_attr_t)); + *trap_key = ((uint64_t)port_num << 48) | ((uint64_t)lid << 32) | crc; +} + +/********************************************************************** + **********************************************************************/ +static int +__print_num_received( + IN uint32_t num_received ) +{ + uint32_t i; + + /* Series is 10, 20, 50, 100, 200, 500, ... */ + i = num_received; + while (i >= 10) { + if (i % 10) + break; + i = i / 10; + } + if (i == 1 || i == 2 || i == 5) + return 1; + else + return 0; +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_trap_rcv_process_request( + IN osm_trap_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + uint8_t payload[sizeof(ib_mad_notice_attr_t)]; + ib_smp_t* p_smp; + ib_mad_notice_attr_t* p_ntci = (ib_mad_notice_attr_t*)payload; + ib_api_status_t status; + osm_madw_t tmp_madw; /* we need a copy to last after repress */ + uint64_t trap_key; + uint32_t num_received; + osm_physp_t* p_physp; + cl_ptr_vector_t* p_tbl; + osm_port_t* p_port; + ib_net16_t source_lid = 0; + boolean_t is_gsi = TRUE; + uint8_t port_num = 0; + boolean_t physp_change_trap = FALSE; + uint64_t event_wheel_timeout = OSM_DEFAULT_TRAP_SUPRESSION_TIMEOUT; + boolean_t run_heavy_sweep = FALSE; + + OSM_LOG_ENTER( p_rcv->p_log, __osm_trap_rcv_process_request ); + + CL_ASSERT( p_madw ); + + if (osm_exit_flag) + { + /* + We got an exit flag - do nothing + Otherwise we start a sweep on the trap 144 caused by cleaning up + SM Cap bit... + */ + goto Exit; + } + + /* update the is_gsi flag according to the mgmt_class field */ + if (p_madw->p_mad->mgmt_class == IB_MCLASS_SUBN_LID || + p_madw->p_mad->mgmt_class == IB_MCLASS_SUBN_DIR ) + is_gsi = FALSE; + + /* + No real need to grab the lock for this function. + */ + memset( payload, 0, sizeof( payload ) ); + memset( &tmp_madw, 0, sizeof( tmp_madw )); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + if( p_smp->method != IB_MAD_METHOD_TRAP ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_request: ERR 3801: " + "Unsupported method 0x%X\n", + p_smp->method ); + goto Exit; + } + + /* + * The NOTICE Attribute is part of the SMP CLASS attributes + * As such the actual attribute data resides inside the SMP + * payload. + */ + + memcpy(payload, &(p_smp->data), IB_SMP_DATA_SIZE); + memcpy(&tmp_madw, p_madw, sizeof( tmp_madw )); + + if (is_gsi == FALSE) + { + /* We are in smi flow */ + /* + * When we received a TRAP with dlid = 0 - it means it + * came from our own node. So we need to fix it. + */ + + if (p_madw->mad_addr.addr_type.smi.source_lid == 0) + { + /* Check if the sm_base_lid is 0. If yes - this means that + the local lid wasn't configured yet. Don't send a response + to the trap. */ + if (p_rcv->p_subn->sm_base_lid == 0) + { + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_trap_rcv_process_request: " + "Received SLID=0 Trap with local LID=0. Ignoring MAD\n"); + goto Exit; + } + osm_log( p_rcv->p_log, OSM_LOG_DEBUG, + "__osm_trap_rcv_process_request: " + "Received SLID=0 Trap. Using local LID:0x%04X instead\n", + cl_ntoh16(p_rcv->p_subn->sm_base_lid) + ); + tmp_madw.mad_addr.addr_type.smi.source_lid = p_rcv->p_subn->sm_base_lid; + } + + source_lid = tmp_madw.mad_addr.addr_type.smi.source_lid; + + /* Print some info about the incoming Trap */ + if (ib_notice_is_generic(p_ntci)) + { + if ((p_ntci->g_or_v.generic.trap_num == CL_HTON16(129)) || + (p_ntci->g_or_v.generic.trap_num == CL_HTON16(130)) || + (p_ntci->g_or_v.generic.trap_num == CL_HTON16(131))) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_request: " + "Received Generic Notice type:0x%02X num:%u Producer:%u " + "from LID:0x%04X Port %d TID:0x%016" PRIx64 "\n", + ib_notice_get_type(p_ntci), + cl_ntoh16(p_ntci->g_or_v.generic.trap_num), + cl_ntoh32(ib_notice_get_prod_type(p_ntci)), + cl_hton16(source_lid), + p_ntci->data_details.ntc_129_131.port_num, + cl_ntoh64(p_smp->trans_id) + ); + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_request: " + "Received Generic Notice type:0x%02X num:%u Producer:%u " + "from LID:0x%04X TID:0x%016" PRIx64 "\n", + ib_notice_get_type(p_ntci), + cl_ntoh16(p_ntci->g_or_v.generic.trap_num), + cl_ntoh32(ib_notice_get_prod_type(p_ntci)), + cl_hton16(source_lid), + cl_ntoh64(p_smp->trans_id) + ); + } + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_request: " + "Received Vendor Notice type:0x%02X vend:0x%06X dev:%u " + "from LID:0x%04X TID:0x%016" PRIx64 "\n", + ib_notice_get_type(p_ntci), + cl_ntoh32(ib_notice_get_vend_id(p_ntci)), + cl_ntoh16(p_ntci->g_or_v.vend.dev_id), + cl_ntoh16(source_lid), + cl_ntoh64(p_smp->trans_id) + ); + } + } + + osm_dump_notice( p_rcv->p_log, p_ntci, OSM_LOG_VERBOSE ); + + status = osm_resp_send( p_rcv->p_resp, &tmp_madw, 0, payload ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_request: ERR 3802: " + "Error sending response (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * We would like to filter out recurring Traps so we track them by + * their source lid and content. If the same trap was already + * received within the aging time window more than 10 times, + * we simply ignore it. This is done only if we are in smi mode + */ + + if (is_gsi == FALSE) + { + if (ib_notice_is_generic(p_ntci) && + ((p_ntci->g_or_v.generic.trap_num == CL_HTON16(129)) || + (p_ntci->g_or_v.generic.trap_num == CL_HTON16(130)) || + (p_ntci->g_or_v.generic.trap_num == CL_HTON16(131))) ) + { + /* If this is a trap 129, 130, or 131 - then this is a trap + signaling a change on a physical port. Mark the flag physp_change_trap as TRUE. */ + physp_change_trap = TRUE; + /* The source_lid should be based on the source_lid from the trap */ + source_lid = p_ntci->data_details.ntc_129_131.lid; + } + + /* If physp_change_trap is TRUE - the key will include the port number. + If not - the port_number in the key will be zero. */ + if ( physp_change_trap == TRUE ) + { + port_num = p_ntci->data_details.ntc_129_131.port_num; + __osm_trap_get_key(source_lid, port_num, p_ntci, &trap_key); + } + else + __osm_trap_get_key(source_lid, 0, p_ntci, &trap_key); + + /* try to find it in the aging tracker */ + num_received = cl_event_wheel_num_regs(&p_rcv->trap_aging_tracker, + trap_key); + + /* Now we know how many times it provided this trap */ + if (num_received > 10) + { + if (__print_num_received(num_received)) + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_request: ERR 3804: " + "Received trap %u times consecutively\n", + num_received); + /* + * If the trap provides info about a bad port + * we mark it as unhealthy. + */ + if (physp_change_trap == TRUE) + { + /* get the port */ + p_physp = __get_physp_by_lid_and_num( + p_rcv, + cl_ntoh16(p_ntci->data_details.ntc_129_131.lid), + port_num + ); + + if (! p_physp) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_request: ERR 3805: " + "Failed to find physical port by lid:0x%02X num:%u\n", + cl_ntoh16(p_ntci->data_details.ntc_129_131.lid), + p_ntci->data_details.ntc_129_131.port_num + ); + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_trap_rcv_process_request: " + "Marking unhealthy physical port by lid:0x%02X num:%u\n", + cl_ntoh16(p_ntci->data_details.ntc_129_131.lid), + p_ntci->data_details.ntc_129_131.port_num + ); + /* check if the current state of the p_physp is healthy. If + it is - then this is a first change of state. Run a heavy sweep. + if it is not - no need to mark it again - just restart the timer. */ + if ( osm_physp_is_healthy(p_physp) ) + { + osm_physp_set_health(p_physp, FALSE); + /* Make sure we sweep again - force a heavy sweep. */ + /* The sweep should be done only after the re-registration, or + else we'll be losing track of the timer. */ + run_heavy_sweep = TRUE; + } + /* If we are marking the port as unhealthy - we want to + keep this for a longer period of time than the + OSM_DEFAULT_TRAP_SUPRESSION_TIMEOUT. Use the + OSM_DEFAULT_UNHEALTHY_TIMEOUT */ + event_wheel_timeout = OSM_DEFAULT_UNHEALTHY_TIMEOUT; + } + } + } + + /* restart the aging anyway */ + /* If physp_change_trap is TRUE - then use a callback to unset the + healthy bit. If not - no need to use a callback. */ + if (physp_change_trap == TRUE ) + cl_event_wheel_reg(&p_rcv->trap_aging_tracker, + trap_key, + cl_get_time_stamp() + event_wheel_timeout, + osm_trap_rcv_aging_tracker_callback, /* no callback */ + p_rcv /* no context */ + ); + else + cl_event_wheel_reg(&p_rcv->trap_aging_tracker, + trap_key, + cl_get_time_stamp() + event_wheel_timeout, + NULL, /* no callback */ + NULL /* no context */ + ); + + /* If was already registered do nothing more */ + if ( num_received > 10 && run_heavy_sweep == FALSE ) + { + if (__print_num_received(num_received)) + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_trap_rcv_process_request: " + "Continuously received this trap %u times. Ignoring it\n", + num_received); + goto Exit; + } + } + + /* do a sweep if we received a trap */ + if (p_rcv->p_subn->opt.sweep_on_trap) + { + /* if this is trap number 128 or run_heavy_sweep is TRUE - update the + force_single_heavy_sweep flag of the subnet. + Sweep also on traps 144/145 - these traps signal a change on a certain + port capability/system image guid. + TODO: In the future we can change this to just getting PortInfo on + this port instead of sweeping the entire subnet. */ + if (ib_notice_is_generic(p_ntci) && + ( (cl_ntoh16(p_ntci->g_or_v.generic.trap_num) == 128) || + (cl_ntoh16(p_ntci->g_or_v.generic.trap_num) == 144) || + (cl_ntoh16(p_ntci->g_or_v.generic.trap_num) == 145) || + run_heavy_sweep )) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_trap_rcv_process_request: " + "Forcing immediate heavy sweep. " + "Received trap:%u\n", + cl_ntoh16(p_ntci->g_or_v.generic.trap_num) ); + + p_rcv->p_subn->force_immediate_heavy_sweep = TRUE; + } + osm_state_mgr_process( p_rcv->p_state_mgr, OSM_SIGNAL_SWEEP ); + } + + /* If we reached here due to trap 129/130/131 - do not need to do + the notice report. Just goto exit. We know this is the case + if physp_change_trap is TRUE. */ + if ( physp_change_trap == TRUE ) + goto Exit; + + /* Add a call to osm_report_notice */ + /* We are going to report the notice - so need to fix the IssuerGID + accordingly. See IBA 1.1 P.653 or IBA 1.2 P.739 for details. */ + if (is_gsi) + { + if (tmp_madw.mad_addr.addr_type.gsi.global_route) + { + memcpy(&(p_ntci->issuer_gid), + &(tmp_madw.mad_addr.addr_type.gsi.grh_info.src_gid), + sizeof(ib_gid_t)); + } + else + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_request: ERR 3806: " + "Received gsi trap with global_route FALSE. " + "Cannot update issuer_gid!\n" ); + goto Exit; + } + } + else + { + /* Need to use the IssuerLID */ + p_tbl = &p_rcv->p_subn->port_lid_tbl; + + CL_ASSERT( cl_ptr_vector_get_size(p_tbl) < 0x10000 ); + + if ((uint16_t)cl_ptr_vector_get_size(p_tbl) <= cl_ntoh16(source_lid) ) + { + /* the source lid is out of range */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_trap_rcv_process_request: " + "source lid is out of range:0x%X\n", + cl_ntoh16(source_lid) ); + + goto Exit; + } + p_port = cl_ptr_vector_get( p_tbl, cl_ntoh16(source_lid) ); + if ( p_port == 0) + { + /* We have the lid - but no corresponding port */ + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "__osm_trap_rcv_process_request: " + "Cannot find port according to lid:0x%X\n", + cl_ntoh16(source_lid) ); + + goto Exit; + } + + p_ntci->issuer_gid.unicast.prefix = p_rcv->p_subn->opt.subnet_prefix; + p_ntci->issuer_gid.unicast.interface_id = p_port->guid; + } + + /* we need a lock here as the InformInfo DB must be stable */ + CL_PLOCK_ACQUIRE( p_rcv->p_lock ); + status = osm_report_notice(p_rcv->p_log, p_rcv->p_subn, p_ntci); + CL_PLOCK_RELEASE( p_rcv->p_lock ); + if( status != IB_SUCCESS ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_request: ERR 3803: " + "Error sending trap reports (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_rcv->p_log ); +} + +#if 0 +/********************************************************************** + CURRENTLY WE ARE NOT CREATING TRAPS - SO THIS CALL IS AN ERROR +**********************************************************************/ +static void +__osm_trap_rcv_process_sm( + IN const osm_trap_rcv_t* const p_rcv, + IN const osm_remote_sm_t* const p_sm ) +{ + /* const ib_sm_info_t* p_smi; */ + OSM_LOG_ENTER( p_rcv->p_log, __osm_trap_rcv_process_sm ); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_sm: ERR 3807: " + "This function is not supported yet\n"); + + OSM_LOG_EXIT( p_rcv->p_log ); +} +#endif + +/********************************************************************** + CURRENTLY WE ARE NOT CREATING TRAPS - SO THIS CALL IN AN ERROR +**********************************************************************/ +static void +__osm_trap_rcv_process_response( + IN const osm_trap_rcv_t* const p_rcv, + IN const osm_madw_t* const p_madw ) +{ + + OSM_LOG_ENTER( p_rcv->p_log, __osm_trap_rcv_process_response ); + + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "__osm_trap_rcv_process_response: ERR 3808: " + "This function is not supported yet\n"); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_trap_rcv_process( + IN osm_trap_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + ib_smp_t *p_smp; + + OSM_LOG_ENTER( p_rcv->p_log, osm_trap_rcv_process ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + /* + Determine if this is a request for our own Trap + or if this is a response to our request for another + SM's Trap. + */ + if( ib_smp_is_response( p_smp ) ) + { + __osm_trap_rcv_process_response( p_rcv, p_madw ); + } + else + { + __osm_trap_rcv_process_request( p_rcv, p_madw ); + } + + OSM_LOG_EXIT( p_rcv->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_trap_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_trap_rcv_ctrl.c new file mode 100644 index 00000000..d7803506 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_trap_rcv_ctrl.c @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_trap_rcv_ctrl_t. + * This object represents the Trap request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_trap_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_trap_rcv_process( ((osm_trap_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_trap_rcv_ctrl_construct( + IN osm_trap_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_trap_rcv_ctrl_destroy( + IN osm_trap_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_trap_rcv_ctrl_init( + IN osm_trap_rcv_ctrl_t* const p_ctrl, + IN osm_trap_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_trap_rcv_ctrl_init ); + + osm_trap_rcv_ctrl_construct( p_ctrl ); + + p_ctrl->p_log = p_log; + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_NOTICE, + __osm_trap_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_trap_rcv_ctrl_init: ERR 3901: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_file.c b/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_file.c new file mode 100644 index 00000000..424fed24 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_file.c @@ -0,0 +1,413 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of OpenSM unicast routing module which loads + * routes from the dump file + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include + + +static uint16_t remap_lid(osm_opensm_t *p_osm, uint16_t lid, ib_net64_t guid) +{ + osm_port_t *p_port; + uint16_t min_lid, max_lid; + uint8_t lmc; + + p_port = (osm_port_t *)cl_qmap_get(&p_osm->subn.port_guid_tbl, guid); + if (!p_port || + p_port == (osm_port_t *)cl_qmap_end(&p_osm->subn.port_guid_tbl)) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "remap_lid: cannot find port guid 0x%016" PRIx64 + " , will use the same lid\n", cl_ntoh64(guid)); + return lid; + } + + osm_port_get_lid_range_ho(p_port, &min_lid, &max_lid); + if (min_lid <= lid && lid <= max_lid) + return lid; + + lmc = osm_port_get_lmc(p_port); + return min_lid + (lid & ((1 << lmc) - 1)); +} + +static void add_path(osm_opensm_t * p_osm, + osm_switch_t * p_sw, uint16_t lid, uint8_t port_num, + ib_net64_t port_guid) +{ + uint16_t new_lid; + uint8_t old_port; + + new_lid = port_guid ? remap_lid(p_osm, lid, port_guid) : lid; + old_port = osm_fwd_tbl_get(osm_switch_get_fwd_tbl_ptr(p_sw), new_lid); + if (old_port != OSM_NO_PATH && old_port != port_num) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "add_path: LID collision is detected on switch " + "0x016%" PRIx64 ", will overwrite LID 0x%x entry\n", + cl_ntoh64(osm_node_get_node_guid + (osm_switch_get_node_ptr(p_sw))), new_lid); + } + + p_osm->sm.ucast_mgr.lft_buf[new_lid] = port_num; + if (!(p_osm->subn.opt.port_profile_switch_nodes && port_guid && + osm_get_switch_by_guid(&p_osm->subn, port_guid))) + osm_switch_count_path(p_sw, port_num); + + osm_log(&p_osm->log, OSM_LOG_DEBUG, + "add_path: route 0x%04x(was 0x%04x) %u 0x%016" PRIx64 + " is added to switch 0x%016" PRIx64 "\n", + new_lid, lid, port_num, cl_ntoh64(port_guid), + cl_ntoh64(osm_node_get_node_guid + (osm_switch_get_node_ptr(p_sw)))); +} + +static void add_lid_hops(osm_opensm_t *p_osm, osm_switch_t *p_sw, + uint16_t lid, ib_net64_t guid, + uint8_t hops[], unsigned len) +{ + uint16_t new_lid; + uint8_t i; + + new_lid = guid ? remap_lid(p_osm, lid, guid) : lid; + if (len > osm_switch_get_num_ports(p_sw)) + len = osm_switch_get_num_ports(p_sw); + + for (i = 0 ; i < len ; i++) + osm_switch_set_hops(p_sw, lid, i, hops[i]); +} + +static int do_ucast_file_load(void *context) +{ + char line[1024]; + char *file_name; + FILE *file; + ib_net64_t sw_guid, port_guid; + osm_opensm_t *p_osm = context; + osm_switch_t *p_sw; + uint16_t lid; + uint8_t port_num; + unsigned lineno; + + file_name = p_osm->subn.opt.ucast_dump_file; + if (!file_name) { + osm_log(&p_osm->log, OSM_LOG_ERROR|OSM_LOG_SYS, + "do_ucast_file_load: ERR 6301: " + "ucast dump file name is not defined; " + "using default routing algorithm\n"); + return -1; + } + + file = fopen(file_name, "r"); + if (!file) { + osm_log(&p_osm->log, OSM_LOG_ERROR|OSM_LOG_SYS, + "do_ucast_file_load: ERR 6302: " + "cannot open ucast dump file \'%s\'; " + "using default routing algorithm\n", file_name); + return -1; + } + + lineno = 0; + p_sw = NULL; + + while (fgets(line, sizeof(line) - 1, file) != NULL) { + char *p, *q; + lineno++; + + p = line; + while (isspace(*p)) + p++; + + if (*p == '#') + continue; + + if (!strncmp(p, "Multicast mlids", 15)) { + osm_log(&p_osm->log, OSM_LOG_ERROR|OSM_LOG_SYS, + "do_ucast_file_load: ERR 6303: " + "Multicast dump file detected; " + "skipping parsing. Using default " + "routing algorithm\n"); + } else if (!strncmp(p, "Unicast lids", 12)) { + if (p_sw) + osm_ucast_mgr_set_fwd_table(&p_osm->sm.ucast_mgr, p_sw); + q = strstr(p, " guid 0x"); + if (!q) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "PARSE ERROR: %s:%u: " + "cannot parse switch definition\n", + file_name, lineno); + return -1; + } + p = q + 8; + sw_guid = strtoull(p, &q, 16); + if (q == p || !isspace(*q)) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "PARSE ERROR: %s:%u: " + "cannot parse switch guid: \'%s\'\n", + file_name, lineno, p); + return -1; + } + sw_guid = cl_hton64(sw_guid); + + p_sw = osm_get_switch_by_guid(&p_osm->subn, sw_guid); + if (!p_sw) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "do_ucast_file_load: " + "cannot find switch %016" PRIx64 "\n", + cl_ntoh64(sw_guid)); + continue; + } + memset(p_osm->sm.ucast_mgr.lft_buf, 0xff, IB_LID_UCAST_END_HO + 1); + } else if (p_sw && !strncmp(p, "0x", 2)) { + p += 2; + lid = (uint16_t)strtoul(p, &q, 16); + if (q == p || !isspace(*q)) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "PARSE ERROR: %s:%u: " + "cannot parse lid: \'%s\'\n", + file_name, lineno, p); + return -1; + } + p = q; + while (isspace(*p)) + p++; + port_num = (uint8_t)strtoul(p, &q, 10); + if (q == p || !isspace(*q)) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "PARSE ERROR: %s:%u: " + "cannot parse port: \'%s\'\n", + file_name, lineno, p); + return -1; + } + p = q; + /* additionally try to exract guid */ + q = strstr(p, " portguid 0x"); + if (!q) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "PARSE WARNING: %s:%u: " + "cannot find port guid " + "(maybe broken dump): \'%s\'\n", + file_name, lineno, p); + port_guid = 0; + } + else { + p = q + 12; + port_guid = strtoull(p, &q, 16); + if (q == p || (!isspace(*q) && *q != ':')) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "PARSE WARNING: %s:%u: " + "cannot parse port guid " + "(maybe broken dump): \'%s\'\n", + file_name, lineno, p); + port_guid = 0; + } + } + port_guid = cl_hton64(port_guid); + add_path(p_osm, p_sw, lid, port_num, port_guid); + } + } + + if (p_sw) + osm_ucast_mgr_set_fwd_table(&p_osm->sm.ucast_mgr, p_sw); + + fclose(file); + return 0; +} + +static int do_lid_matrix_file_load(void *context) +{ + char line[1024]; + uint8_t hops[256]; + char *file_name; + FILE *file; + ib_net64_t guid; + osm_opensm_t *p_osm = context; + osm_switch_t *p_sw; + unsigned lineno; + uint16_t lid; + + file_name = p_osm->subn.opt.lid_matrix_dump_file; + if (!file_name) { + osm_log(&p_osm->log, OSM_LOG_ERROR|OSM_LOG_SYS, + "do_lid_matrix_file_load: ERR 6304: " + "lid matrix file name is not defined; " + "using default lid matrix generation algorithm\n"); + return -1; + } + + file = fopen(file_name, "r"); + if (!file) { + osm_log(&p_osm->log, OSM_LOG_ERROR|OSM_LOG_SYS, + "do_lid_matrix_file_load: ERR 6305: " + "cannot open lid matrix file \'%s\'; " + "using default lid matrix generation algorithm\n", + file_name); + return -1; + } + + lineno = 0; + p_sw = NULL; + + while (fgets(line, sizeof(line) - 1, file) != NULL) { + char *p, *q; + lineno++; + + p = line; + while (isspace(*p)) + p++; + + if (*p == '#') + continue; + + if (!strncmp(p, "Switch", 6)) { + q = strstr(p, " guid 0x"); + if (!q) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "PARSE ERROR: %s:%u: " + "cannot parse switch definition\n", + file_name, lineno); + return -1; + } + p = q + 8; + guid = strtoull(p, &q, 16); + if (q == p || !isspace(*q)) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "PARSE ERROR: %s:%u: " + "cannot parse switch guid: \'%s\'\n", + file_name, lineno, p); + return -1; + } + guid = cl_hton64(guid); + + p_sw = osm_get_switch_by_guid(&p_osm->subn, guid); + if (!p_sw) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "do_lid_matrix_file_load: " + "cannot find switch %016" PRIx64 "\n", + cl_ntoh64(guid)); + continue; + } + } else if (p_sw && !strncmp(p, "0x", 2)) { + unsigned long num; + unsigned len = 0; + + memset(hops, 0xff, sizeof(hops)); + + p += 2; + num = strtoul(p, &q, 16); + if (num > 0xffff || q == p || + (*q != ':' && !isspace(*q))) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "PARSE ERROR: %s:%u: " + "cannot parse lid: \'%s\'\n", + file_name, lineno, p); + return -1; + } + /* Just checked the range, so casting is safe */ + lid = (uint16_t)num; + p = q; + while (isspace(*p) || *p == ':') + p++; + while (len < 256 && *p && *p != '#') { + num = strtoul(p, &q, 16); + if (num > 0xff || q == p) { + osm_log(&p_osm->log, OSM_LOG_ERROR, + "PARSE ERROR: %s:%u: " + "cannot parse hops number: \'%s\'\n", + file_name, lineno, p); + return -1; + } + /* Just checked the range, so casting is safe */ + hops[len++] = (uint8_t)num; + p = q; + while (isspace(*p)) + p++; + } + /* additionally try to extract guid */ + q = strstr(p, " portguid 0x"); + if (!q) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "PARSE WARNING: %s:%u: " + "cannot find port guid " + "(maybe broken dump): \'%s\'\n", + file_name, lineno, p); + guid = 0; + } + else { + p = q + 12; + guid = strtoull(p, &q, 16); + if (q == p || !isspace(*q)) { + osm_log(&p_osm->log, OSM_LOG_VERBOSE, + "PARSE WARNING: %s:%u: " + "cannot parse port guid " + "(maybe broken dump): \'%s\'\n", + file_name, lineno, p); + guid = 0; + } + } + guid = cl_hton64(guid); + add_lid_hops(p_osm, p_sw, lid, guid, hops, len); + } + } + + fclose(file); + return 0; +} + +int osm_ucast_file_setup(osm_opensm_t * p_osm) +{ + p_osm->routing_engine.context = (void *)p_osm; + p_osm->routing_engine.build_lid_matrices = do_lid_matrix_file_load; + p_osm->routing_engine.ucast_build_fwd_tables = do_ucast_file_load; + return 0; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_ftree.c b/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_ftree.c new file mode 100644 index 00000000..6a8eae2d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_ftree.c @@ -0,0 +1,3141 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of OpenSM FatTree routing + * + * Environment: + * Linux User Mode + * + */ + +#if HAVE_CONFIG_H +# include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * FatTree rank is bounded between 2 and 8: + * - Tree of rank 1 has only trivial routing pathes, + * so no need to use FatTree routing. + * - Why maximum rank is 8: + * Each node (switch) is assigned a unique tuple. + * Switches are stored in two cl_qmaps - one is + * ordered by guid, and the other by a key that is + * generated from tuple. Since cl_qmap supports only + * a 64-bit key, the maximal tuple lenght is 8 bytes. + * which means that maximal tree rank is 8. + * Note that the above also implies that each switch + * can have at max 255 up/down ports. + */ + +#define FAT_TREE_MIN_RANK 2 +#define FAT_TREE_MAX_RANK 8 + +typedef enum { + FTREE_DIRECTION_DOWN = -1, + FTREE_DIRECTION_SAME, + FTREE_DIRECTION_UP +} ftree_direction_t; + + +/*************************************************** + ** + ** Forward references + ** + ***************************************************/ + +struct ftree_sw_t_; +struct ftree_hca_t_; +struct ftree_port_t_; +struct ftree_port_group_t_; +struct ftree_fabric_t_; + +/*************************************************** + ** + ** ftree_tuple_t definition + ** + ***************************************************/ + +#define FTREE_TUPLE_BUFF_LEN 1024 +#define FTREE_TUPLE_LEN 8 + +typedef uint8_t ftree_tuple_t[FTREE_TUPLE_LEN]; +typedef uint64_t ftree_tuple_key_t; + +/*************************************************** + ** + ** ftree_sw_table_element_t definition + ** + ***************************************************/ + +typedef struct { + cl_map_item_t map_item; + struct ftree_sw_t_ * p_sw; +} ftree_sw_tbl_element_t; + +/*************************************************** + ** + ** ftree_fwd_tbl_t definition + ** + ***************************************************/ + +typedef uint8_t * ftree_fwd_tbl_t; +#define FTREE_FWD_TBL_LEN (IB_LID_UCAST_END_HO + 1) + +/*************************************************** + ** + ** ftree_port_t definition + ** + ***************************************************/ + +typedef struct ftree_port_t_ +{ + cl_map_item_t map_item; + uint8_t port_num; /* port number on the current node */ + uint8_t remote_port_num; /* port number on the remote node */ + uint32_t counter_up; /* number of allocated routs upwards */ + uint32_t counter_down; /* number of allocated routs downwards */ +} ftree_port_t; + +/*************************************************** + ** + ** ftree_port_group_t definition + ** + ***************************************************/ + +typedef struct ftree_port_group_t_ +{ + cl_map_item_t map_item; + ib_net16_t base_lid; /* base lid of the current node */ + ib_net16_t remote_base_lid; /* base lid of the remote node */ + ib_net64_t port_guid; /* port guid of this port */ + ib_net64_t remote_port_guid; /* port guid of the remote port */ + ib_net64_t remote_node_guid; /* node guid of the remote node */ + uint8_t remote_node_type; /* IB_NODE_TYPE_{CA,SWITCH,ROUTER,...} */ + union remote_hca_or_sw_ + { + struct ftree_hca_t_ * remote_hca; + struct ftree_sw_t_ * remote_sw; + } remote_hca_or_sw; /* pointer to remote hca/switch */ + cl_ptr_vector_t ports; /* vector of ports to the same lid */ +} ftree_port_group_t; + +/*************************************************** + ** + ** ftree_sw_t definition + ** + ***************************************************/ + +typedef struct ftree_sw_t_ +{ + cl_map_item_t map_item; + osm_switch_t * p_osm_sw; + uint8_t rank; + ftree_tuple_t tuple; + ib_net16_t base_lid; + ftree_port_group_t ** down_port_groups; + uint8_t down_port_groups_num; + ftree_port_group_t ** up_port_groups; + uint8_t up_port_groups_num; + ftree_fwd_tbl_t lft_buf; +} ftree_sw_t; + +/*************************************************** + ** + ** ftree_hca_t definition + ** + ***************************************************/ + +typedef struct ftree_hca_t_ { + cl_map_item_t map_item; + osm_node_t * p_osm_node; + ftree_port_group_t ** up_port_groups; + uint16_t up_port_groups_num; +} ftree_hca_t; + +/*************************************************** + ** + ** ftree_fabric_t definition + ** + ***************************************************/ + +typedef struct ftree_fabric_t_ +{ + osm_opensm_t * p_osm; + cl_qmap_t hca_tbl; + cl_qmap_t sw_tbl; + cl_qmap_t sw_by_tuple_tbl; + uint8_t tree_rank; + ftree_sw_t ** leaf_switches; + uint32_t leaf_switches_num; + uint16_t max_hcas_per_leaf; + cl_pool_t sw_fwd_tbl_pool; + uint16_t lft_max_lid_ho; + boolean_t fabric_built; +} ftree_fabric_t; + +/*************************************************** + ** + ** comparators + ** + ***************************************************/ + +static int OSM_CDECL +__osm_ftree_compare_switches_by_index( + IN const void * p1, + IN const void * p2) +{ + ftree_sw_t ** pp_sw1 = (ftree_sw_t **)p1; + ftree_sw_t ** pp_sw2 = (ftree_sw_t **)p2; + + uint16_t i; + for (i = 0; i < FTREE_TUPLE_LEN; i++) + { + if ((*pp_sw1)->tuple[i] > (*pp_sw2)->tuple[i]) + return 1; + if ((*pp_sw1)->tuple[i] < (*pp_sw2)->tuple[i]) + return -1; + } + return 0; +} + +/***************************************************/ + +static int OSM_CDECL +__osm_ftree_compare_port_groups_by_remote_switch_index( + IN const void * p1, + IN const void * p2) +{ + ftree_port_group_t ** pp_g1 = (ftree_port_group_t **)p1; + ftree_port_group_t ** pp_g2 = (ftree_port_group_t **)p2; + + return __osm_ftree_compare_switches_by_index( + &((*pp_g1)->remote_hca_or_sw.remote_sw), + &((*pp_g2)->remote_hca_or_sw.remote_sw) ); +} + +/***************************************************/ + +boolean_t +__osm_ftree_sw_less_by_index( + IN ftree_sw_t * p_sw1, + IN ftree_sw_t * p_sw2) +{ + if (__osm_ftree_compare_switches_by_index((void *)&p_sw1, + (void *)&p_sw2) < 0) + return TRUE; + return FALSE; +} + +/***************************************************/ + +boolean_t +__osm_ftree_sw_greater_by_index( + IN ftree_sw_t * p_sw1, + IN ftree_sw_t * p_sw2) +{ + if (__osm_ftree_compare_switches_by_index((void *)&p_sw1, + (void *)&p_sw2) > 0) + return TRUE; + return FALSE; +} + +/*************************************************** + ** + ** ftree_tuple_t functions + ** + ***************************************************/ + +static void +__osm_ftree_tuple_init( + IN ftree_tuple_t tuple) +{ + memset(tuple, 0xFF, FTREE_TUPLE_LEN); +} + +/***************************************************/ + +static inline boolean_t +__osm_ftree_tuple_assigned( + IN ftree_tuple_t tuple) +{ + return (tuple[0] != 0xFF); +} + +/***************************************************/ + +#define FTREE_TUPLE_BUFFERS_NUM 6 + +static char * +__osm_ftree_tuple_to_str( + IN ftree_tuple_t tuple) +{ + static char buffer[FTREE_TUPLE_BUFFERS_NUM][FTREE_TUPLE_BUFF_LEN]; + static uint8_t ind = 0; + char * ret_buffer; + uint32_t i; + + if (!__osm_ftree_tuple_assigned(tuple)) + return "INDEX.NOT.ASSIGNED"; + + buffer[ind][0] = '\0'; + + for(i = 0; (i < FTREE_TUPLE_LEN) && (tuple[i] != 0xFF); i++) + { + if ((strlen(buffer[ind]) + 10) > FTREE_TUPLE_BUFF_LEN) + return "INDEX.TOO.LONG"; + if (i != 0) + strcat(buffer[ind],"."); + sprintf(&buffer[ind][strlen(buffer[ind])], "%u", tuple[i]); + } + + ret_buffer = buffer[ind]; + ind = (ind + 1) % FTREE_TUPLE_BUFFERS_NUM; + return ret_buffer; +} /* __osm_ftree_tuple_to_str() */ + +/***************************************************/ + +static inline ftree_tuple_key_t +__osm_ftree_tuple_to_key( + IN ftree_tuple_t tuple) +{ + ftree_tuple_key_t key; + memcpy(&key, tuple, FTREE_TUPLE_LEN); + return key; +} + +/***************************************************/ + +static inline void +__osm_ftree_tuple_from_key( + IN ftree_tuple_t tuple, + IN ftree_tuple_key_t key) +{ + memcpy(tuple, &key, FTREE_TUPLE_LEN); +} + +/*************************************************** + ** + ** ftree_sw_tbl_element_t functions + ** + ***************************************************/ + +static ftree_sw_tbl_element_t * +__osm_ftree_sw_tbl_element_create( + IN ftree_sw_t * p_sw) +{ + ftree_sw_tbl_element_t * p_element = + (ftree_sw_tbl_element_t *) malloc(sizeof(ftree_sw_tbl_element_t)); + if (!p_element) + return NULL; + memset(p_element, 0,sizeof(ftree_sw_tbl_element_t)); + + if (p_element) + p_element->p_sw = p_sw; + return p_element; +} + +/***************************************************/ + +static void +__osm_ftree_sw_tbl_element_destroy( + IN ftree_sw_tbl_element_t * p_element) +{ + if (!p_element) + return; + free(p_element); +} + +/*************************************************** + ** + ** ftree_port_t functions + ** + ***************************************************/ + +static ftree_port_t * +__osm_ftree_port_create( + IN uint8_t port_num, + IN uint8_t remote_port_num) +{ + ftree_port_t * p_port = (ftree_port_t *)malloc(sizeof(ftree_port_t)); + if (!p_port) + return NULL; + memset(p_port,0,sizeof(ftree_port_t)); + + p_port->port_num = port_num; + p_port->remote_port_num = remote_port_num; + + return p_port; +} + +/***************************************************/ + +static void +__osm_ftree_port_destroy( + IN ftree_port_t * p_port) +{ + if(p_port) + free(p_port); +} + +/*************************************************** + ** + ** ftree_port_group_t functions + ** + ***************************************************/ + +static ftree_port_group_t * +__osm_ftree_port_group_create( + IN ib_net16_t base_lid, + IN ib_net16_t remote_base_lid, + IN ib_net64_t * p_port_guid, + IN ib_net64_t * p_remote_port_guid, + IN ib_net64_t * p_remote_node_guid, + IN uint8_t remote_node_type, + IN void * p_remote_hca_or_sw) +{ + ftree_port_group_t * p_group = + (ftree_port_group_t *)malloc(sizeof(ftree_port_group_t)); + if (p_group == NULL) + return NULL; + memset(p_group, 0, sizeof(ftree_port_group_t)); + + p_group->base_lid = base_lid; + p_group->remote_base_lid = remote_base_lid; + memcpy(&p_group->port_guid, p_port_guid, sizeof(ib_net64_t)); + memcpy(&p_group->remote_port_guid, p_remote_port_guid, sizeof(ib_net64_t)); + memcpy(&p_group->remote_node_guid, p_remote_node_guid, sizeof(ib_net64_t)); + + p_group->remote_node_type = remote_node_type; + switch (remote_node_type) + { + case IB_NODE_TYPE_CA: + p_group->remote_hca_or_sw.remote_hca = (ftree_hca_t *)p_remote_hca_or_sw; + break; + case IB_NODE_TYPE_SWITCH: + p_group->remote_hca_or_sw.remote_sw = (ftree_sw_t *)p_remote_hca_or_sw; + break; + default: + /* we shouldn't get here - port is created only in hca or switch */ + CL_ASSERT(0); + } + + cl_ptr_vector_init(&p_group->ports, + 0, /* min size */ + 8); /* grow size */ + return p_group; +} /* __osm_ftree_port_group_create() */ + +/***************************************************/ + +static void +__osm_ftree_port_group_destroy( + IN ftree_port_group_t * p_group) +{ + uint32_t i; + uint32_t size; + ftree_port_t * p_port; + + if (!p_group) + return; + + /* remove all the elements of p_group->ports vector */ + size = cl_ptr_vector_get_size(&p_group->ports); + for (i = 0; i < size; i++) + { + cl_ptr_vector_at(&p_group->ports, i, (void **)&p_port); + __osm_ftree_port_destroy(p_port); + } + cl_ptr_vector_destroy(&p_group->ports); + free(p_group); +} /* __osm_ftree_port_group_destroy() */ + +/***************************************************/ + +static void +__osm_ftree_port_group_dump( + IN ftree_fabric_t *p_ftree, + IN ftree_port_group_t * p_group, + IN ftree_direction_t direction) +{ + ftree_port_t * p_port; + uint32_t size; + uint32_t i; + char buff[10*1024]; + + if (!p_group) + return; + + if (!osm_log_is_active(&p_ftree->p_osm->log, OSM_LOG_DEBUG)) + return; + + size = cl_ptr_vector_get_size(&p_group->ports); + buff[0] = '\0'; + + for (i = 0; i < size; i++) + { + cl_ptr_vector_at(&p_group->ports, i, (void **)&p_port); + CL_ASSERT(p_port); + + if (i != 0) + strcat(buff,", "); + sprintf(buff + strlen(buff), "%u", p_port->port_num); + } + + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_port_group_dump:" + " Port Group of size %u, port(s): %s, direction: %s\n" + " Local <--> Remote GUID (LID):" + "0x%016" PRIx64 " (0x%x) <--> 0x%016" PRIx64 " (0x%x)\n", + size, + buff, + (direction == FTREE_DIRECTION_DOWN)? "DOWN" : "UP", + cl_ntoh64(p_group->port_guid), + cl_ntoh16(p_group->base_lid), + cl_ntoh64(p_group->remote_port_guid), + cl_ntoh16(p_group->remote_base_lid)); + +} /* __osm_ftree_port_group_dump() */ + +/***************************************************/ + +static void +__osm_ftree_port_group_add_port( + IN ftree_port_group_t * p_group, + IN uint8_t port_num, + IN uint8_t remote_port_num) +{ + uint16_t i; + ftree_port_t * p_port; + + for (i = 0; i < cl_ptr_vector_get_size(&p_group->ports); i++) + { + cl_ptr_vector_at(&p_group->ports, i, (void **)&p_port); + if (p_port->port_num == port_num) + return; + } + + p_port = __osm_ftree_port_create(port_num,remote_port_num); + cl_ptr_vector_insert(&p_group->ports, p_port, NULL); +} + +/*************************************************** + ** + ** ftree_sw_t functions + ** + ***************************************************/ + +static ftree_sw_t * +__osm_ftree_sw_create( + IN ftree_fabric_t * p_ftree, + IN osm_switch_t * p_osm_sw) +{ + ftree_sw_t * p_sw; + uint8_t ports_num; + + /* make sure that the switch has ports */ + if (osm_switch_get_num_ports(p_osm_sw) == 1) + return NULL; + + p_sw = (ftree_sw_t *)malloc(sizeof(ftree_sw_t)); + if (p_sw == NULL) + return NULL; + memset(p_sw, 0, sizeof(ftree_sw_t)); + + p_sw->p_osm_sw = p_osm_sw; + p_sw->rank = 0xFF; + __osm_ftree_tuple_init(p_sw->tuple); + + p_sw->base_lid = osm_node_get_base_lid(osm_switch_get_node_ptr(p_sw->p_osm_sw),0); + + ports_num = osm_node_get_num_physp(osm_switch_get_node_ptr(p_sw->p_osm_sw)); + p_sw->down_port_groups = + (ftree_port_group_t **) malloc(ports_num * sizeof(ftree_port_group_t *)); + p_sw->up_port_groups = + (ftree_port_group_t **) malloc(ports_num * sizeof(ftree_port_group_t *)); + if (!p_sw->down_port_groups || !p_sw->up_port_groups) + return NULL; + p_sw->down_port_groups_num = 0; + p_sw->up_port_groups_num = 0; + + /* initialize lft buffer */ + p_sw->lft_buf = (ftree_fwd_tbl_t)cl_pool_get(&p_ftree->sw_fwd_tbl_pool); + memset(p_sw->lft_buf, OSM_NO_PATH, FTREE_FWD_TBL_LEN); + + return p_sw; +} /* __osm_ftree_sw_create() */ + +/***************************************************/ + +static void +__osm_ftree_sw_destroy( + IN ftree_fabric_t * p_ftree, + IN ftree_sw_t * p_sw) +{ + uint8_t i; + + if (!p_sw) + return; + + for (i = 0; i < p_sw->down_port_groups_num; i++) + __osm_ftree_port_group_destroy(p_sw->down_port_groups[i]); + for (i = 0; i < p_sw->up_port_groups_num; i++) + __osm_ftree_port_group_destroy(p_sw->up_port_groups[i]); + if (p_sw->down_port_groups) + free(p_sw->down_port_groups); + if (p_sw->up_port_groups) + free(p_sw->up_port_groups); + + /* return switch fwd_tbl to pool */ + if (p_sw->lft_buf) + cl_pool_put(&p_ftree->sw_fwd_tbl_pool, (void *)p_sw->lft_buf); + + free(p_sw); +} /* __osm_ftree_sw_destroy() */ + +/***************************************************/ + +static void +__osm_ftree_sw_dump( + IN ftree_fabric_t * p_ftree, + IN ftree_sw_t * p_sw) +{ + uint32_t i; + + if (!p_sw) + return; + + if (!osm_log_is_active(&p_ftree->p_osm->log, OSM_LOG_DEBUG)) + return; + + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_sw_dump: " + "Switch index: %s, GUID: 0x%016" PRIx64 ", Ports: %u DOWN, %u UP\n", + __osm_ftree_tuple_to_str(p_sw->tuple), + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_sw->p_osm_sw))), + p_sw->down_port_groups_num, + p_sw->up_port_groups_num); + + for( i = 0; i < p_sw->down_port_groups_num; i++ ) + __osm_ftree_port_group_dump(p_ftree, + p_sw->down_port_groups[i], + FTREE_DIRECTION_DOWN); + for( i = 0; i < p_sw->up_port_groups_num; i++ ) + __osm_ftree_port_group_dump(p_ftree, + p_sw->up_port_groups[i], + FTREE_DIRECTION_UP); + +} /* __osm_ftree_sw_dump() */ + +/***************************************************/ + +static boolean_t +__osm_ftree_sw_ranked( + IN ftree_sw_t * p_sw) +{ + return (p_sw->rank != 0xFF); +} + +/***************************************************/ + +static ftree_port_group_t * +__osm_ftree_sw_get_port_group_by_remote_lid( + IN ftree_sw_t * p_sw, + IN ib_net16_t remote_base_lid, + IN ftree_direction_t direction) +{ + uint32_t i; + uint32_t size; + ftree_port_group_t ** port_groups; + + if (direction == FTREE_DIRECTION_UP) + { + port_groups = p_sw->up_port_groups; + size = p_sw->up_port_groups_num; + } + else + { + port_groups = p_sw->down_port_groups; + size = p_sw->down_port_groups_num; + } + + for (i = 0; i < size; i++) + if (remote_base_lid == port_groups[i]->remote_base_lid) + return port_groups[i]; + + return NULL; +} /* __osm_ftree_sw_get_port_group_by_remote_lid() */ + +/***************************************************/ + +static void +__osm_ftree_sw_add_port( + IN ftree_sw_t * p_sw, + IN uint8_t port_num, + IN uint8_t remote_port_num, + IN ib_net16_t base_lid, + IN ib_net16_t remote_base_lid, + IN ib_net64_t port_guid, + IN ib_net64_t remote_port_guid, + IN ib_net64_t remote_node_guid, + IN uint8_t remote_node_type, + IN void * p_remote_hca_or_sw, + IN ftree_direction_t direction) +{ + ftree_port_group_t * p_group = + __osm_ftree_sw_get_port_group_by_remote_lid(p_sw,remote_base_lid,direction); + + if (!p_group) + { + p_group = __osm_ftree_port_group_create( + base_lid, + remote_base_lid, + &port_guid, + &remote_port_guid, + &remote_node_guid, + remote_node_type, + p_remote_hca_or_sw); + CL_ASSERT(p_group); + + if (direction == FTREE_DIRECTION_UP) + p_sw->up_port_groups[p_sw->up_port_groups_num++] = p_group; + else + p_sw->down_port_groups[p_sw->down_port_groups_num++] = p_group; + } + __osm_ftree_port_group_add_port(p_group,port_num,remote_port_num); + +} /* __osm_ftree_sw_add_port() */ + +/***************************************************/ + +static inline void +__osm_ftree_sw_set_fwd_table_block( + IN ftree_sw_t * p_sw, + IN uint16_t lid_ho, + IN uint8_t port_num) +{ + p_sw->lft_buf[lid_ho] = port_num; +} + +/***************************************************/ + +static inline uint8_t +__osm_ftree_sw_get_fwd_table_block( + IN ftree_sw_t * p_sw, + IN uint16_t lid_ho) +{ + return p_sw->lft_buf[lid_ho]; +} + +/***************************************************/ + +static inline cl_status_t +__osm_ftree_sw_set_hops( + IN ftree_sw_t * p_sw, + IN uint16_t max_lid_ho, + IN uint16_t lid_ho, + IN uint8_t port_num, + IN uint8_t hops) +{ + /* make sure the lid matrix has enough room */ + osm_switch_set_min_lid_size(p_sw->p_osm_sw, max_lid_ho); + + /* set local min hop table(LID) */ + return osm_switch_set_hops(p_sw->p_osm_sw, + lid_ho, + port_num, + hops); +} + +/*************************************************** + ** + ** ftree_hca_t functions + ** + ***************************************************/ + +static ftree_hca_t * +__osm_ftree_hca_create( + IN osm_node_t * p_osm_node) +{ + ftree_hca_t * p_hca = (ftree_hca_t *)malloc(sizeof(ftree_hca_t)); + if (p_hca == NULL) + return NULL; + memset(p_hca,0,sizeof(ftree_hca_t)); + + p_hca->p_osm_node = p_osm_node; + p_hca->up_port_groups = (ftree_port_group_t **) + malloc(osm_node_get_num_physp(p_hca->p_osm_node) * sizeof (ftree_port_group_t *)); + if (!p_hca->up_port_groups) + return NULL; + p_hca->up_port_groups_num = 0; + return p_hca; +} + +/***************************************************/ + +static void +__osm_ftree_hca_destroy( + IN ftree_hca_t * p_hca) +{ + uint32_t i; + + if (!p_hca) + return; + + for (i = 0; i < p_hca->up_port_groups_num; i++) + __osm_ftree_port_group_destroy(p_hca->up_port_groups[i]); + + if (p_hca->up_port_groups) + free(p_hca->up_port_groups); + + free(p_hca); +} + +/***************************************************/ + +static void +__osm_ftree_hca_dump( + IN ftree_fabric_t * p_ftree, + IN ftree_hca_t * p_hca) +{ + uint32_t i; + + if (!p_hca) + return; + + if (!osm_log_is_active(&p_ftree->p_osm->log,OSM_LOG_DEBUG)) + return; + + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_hca_dump: " + "HCA GUID: 0x%016" PRIx64 ", Ports: %u UP\n", + cl_ntoh64(osm_node_get_node_guid(p_hca->p_osm_node)), + p_hca->up_port_groups_num); + + for( i = 0; i < p_hca->up_port_groups_num; i++ ) + __osm_ftree_port_group_dump(p_ftree, + p_hca->up_port_groups[i], + FTREE_DIRECTION_UP); +} + +/***************************************************/ + +static ftree_port_group_t * +__osm_ftree_hca_get_port_group_by_remote_lid( + IN ftree_hca_t * p_hca, + IN ib_net16_t remote_base_lid) +{ + uint32_t i; + for (i = 0; i < p_hca->up_port_groups_num; i++) + if (remote_base_lid == p_hca->up_port_groups[i]->remote_base_lid) + return p_hca->up_port_groups[i]; + + return NULL; +} + +/***************************************************/ + +static void +__osm_ftree_hca_add_port( + IN ftree_hca_t * p_hca, + IN uint8_t port_num, + IN uint8_t remote_port_num, + IN ib_net16_t base_lid, + IN ib_net16_t remote_base_lid, + IN ib_net64_t port_guid, + IN ib_net64_t remote_port_guid, + IN ib_net64_t remote_node_guid, + IN uint8_t remote_node_type, + IN void * p_remote_hca_or_sw) +{ + ftree_port_group_t * p_group; + + /* this function is supposed to be called only for adding ports + in hca's that lead to switches */ + CL_ASSERT(remote_node_type == IB_NODE_TYPE_SWITCH); + + p_group = __osm_ftree_hca_get_port_group_by_remote_lid(p_hca,remote_base_lid); + + if (!p_group) + { + p_group = __osm_ftree_port_group_create( + base_lid, + remote_base_lid, + &port_guid, + &remote_port_guid, + &remote_node_guid, + remote_node_type, + p_remote_hca_or_sw); + p_hca->up_port_groups[p_hca->up_port_groups_num++] = p_group; + } + __osm_ftree_port_group_add_port(p_group, port_num, remote_port_num); + +} /* __osm_ftree_hca_add_port() */ + +/*************************************************** + ** + ** ftree_fabric_t functions + ** + ***************************************************/ + +static ftree_fabric_t * +__osm_ftree_fabric_create() +{ + cl_status_t status; + ftree_fabric_t * p_ftree = (ftree_fabric_t *)malloc(sizeof(ftree_fabric_t)); + if (p_ftree == NULL) + return NULL; + + memset(p_ftree,0,sizeof(ftree_fabric_t)); + + cl_qmap_init(&p_ftree->hca_tbl); + cl_qmap_init(&p_ftree->sw_tbl); + cl_qmap_init(&p_ftree->sw_by_tuple_tbl); + + status = cl_pool_init( &p_ftree->sw_fwd_tbl_pool, + 8, /* min pool size */ + 0, /* max pool size - unlimited */ + 8, /* grow size */ + FTREE_FWD_TBL_LEN, /* object_size */ + NULL, /* object initializer */ + NULL, /* object destructor */ + NULL ); /* context */ + if (status != CL_SUCCESS) + return NULL; + + p_ftree->tree_rank = 1; + return p_ftree; +} + +/***************************************************/ + +static void +__osm_ftree_fabric_clear(ftree_fabric_t * p_ftree) +{ + ftree_hca_t * p_hca; + ftree_hca_t * p_next_hca; + ftree_sw_t * p_sw; + ftree_sw_t * p_next_sw; + ftree_sw_tbl_element_t * p_element; + ftree_sw_tbl_element_t * p_next_element; + + if (!p_ftree) + return; + + /* remove all the elements of hca_tbl */ + + p_next_hca = (ftree_hca_t *)cl_qmap_head(&p_ftree->hca_tbl); + while( p_next_hca != (ftree_hca_t *)cl_qmap_end( &p_ftree->hca_tbl ) ) + { + p_hca = p_next_hca; + p_next_hca = (ftree_hca_t *)cl_qmap_next(&p_hca->map_item ); + __osm_ftree_hca_destroy(p_hca); + } + cl_qmap_remove_all(&p_ftree->hca_tbl); + + /* remove all the elements of sw_tbl */ + + p_next_sw = (ftree_sw_t *)cl_qmap_head(&p_ftree->sw_tbl); + while( p_next_sw != (ftree_sw_t *)cl_qmap_end( &p_ftree->sw_tbl ) ) + { + p_sw = p_next_sw; + p_next_sw = (ftree_sw_t *)cl_qmap_next(&p_sw->map_item ); + __osm_ftree_sw_destroy(p_ftree,p_sw); + } + cl_qmap_remove_all(&p_ftree->sw_tbl); + + /* remove all the elements of sw_by_tuple_tbl */ + + p_next_element = + (ftree_sw_tbl_element_t *)cl_qmap_head(&p_ftree->sw_by_tuple_tbl); + while( p_next_element != + (ftree_sw_tbl_element_t *)cl_qmap_end( &p_ftree->sw_by_tuple_tbl ) ) + { + p_element = p_next_element; + p_next_element = + (ftree_sw_tbl_element_t *)cl_qmap_next(&p_element->map_item); + __osm_ftree_sw_tbl_element_destroy(p_element); + } + cl_qmap_remove_all(&p_ftree->sw_by_tuple_tbl); + + /* free the leaf switches array */ + if ((p_ftree->leaf_switches_num > 0) && (p_ftree->leaf_switches)) + free(p_ftree->leaf_switches); + + p_ftree->leaf_switches_num = 0; + p_ftree->leaf_switches = NULL; + p_ftree->fabric_built = FALSE; + +} /* __osm_ftree_fabric_destroy() */ + +/***************************************************/ + +static void +__osm_ftree_fabric_destroy(ftree_fabric_t * p_ftree) +{ + if (!p_ftree) + return; + __osm_ftree_fabric_clear(p_ftree); + cl_pool_destroy(&p_ftree->sw_fwd_tbl_pool); + free(p_ftree); +} + +/***************************************************/ + +static void +__osm_ftree_fabric_set_rank(ftree_fabric_t * p_ftree, uint8_t rank) +{ + if (rank > p_ftree->tree_rank) + p_ftree->tree_rank = rank; +} + +/***************************************************/ + +static uint8_t +__osm_ftree_fabric_get_rank(ftree_fabric_t * p_ftree) +{ + return p_ftree->tree_rank; +} + +/***************************************************/ + +static void +__osm_ftree_fabric_add_hca(ftree_fabric_t * p_ftree, osm_node_t * p_osm_node) +{ + ftree_hca_t * p_hca = __osm_ftree_hca_create(p_osm_node); + + CL_ASSERT(osm_node_get_type(p_osm_node) == IB_NODE_TYPE_CA); + + cl_qmap_insert(&p_ftree->hca_tbl, + p_osm_node->node_info.node_guid, + &p_hca->map_item); +} + +/***************************************************/ + +static void +__osm_ftree_fabric_add_sw(ftree_fabric_t * p_ftree, osm_switch_t * p_osm_sw) +{ + ftree_sw_t * p_sw = __osm_ftree_sw_create(p_ftree,p_osm_sw); + + CL_ASSERT(osm_node_get_type(p_osm_sw->p_node) == IB_NODE_TYPE_SWITCH); + + cl_qmap_insert(&p_ftree->sw_tbl, + p_osm_sw->p_node->node_info.node_guid, + &p_sw->map_item); + + /* track the max lid (in host order) that exists in the fabric */ + if (cl_ntoh16(p_sw->base_lid) > p_ftree->lft_max_lid_ho) + p_ftree->lft_max_lid_ho = cl_ntoh16(p_sw->base_lid); +} + +/***************************************************/ + +static void +__osm_ftree_fabric_add_sw_by_tuple( + IN ftree_fabric_t * p_ftree, + IN ftree_sw_t * p_sw) +{ + CL_ASSERT(__osm_ftree_tuple_assigned(p_sw->tuple)); + + cl_qmap_insert(&p_ftree->sw_by_tuple_tbl, + __osm_ftree_tuple_to_key(p_sw->tuple), + &__osm_ftree_sw_tbl_element_create(p_sw)->map_item); +} + +/***************************************************/ + +static ftree_sw_t * +__osm_ftree_fabric_get_sw_by_tuple( + IN ftree_fabric_t * p_ftree, + IN ftree_tuple_t tuple) +{ + ftree_sw_tbl_element_t * p_element; + + CL_ASSERT(__osm_ftree_tuple_assigned(tuple)); + + __osm_ftree_tuple_to_key(tuple); + + p_element = (ftree_sw_tbl_element_t * )cl_qmap_get(&p_ftree->sw_by_tuple_tbl, + __osm_ftree_tuple_to_key(tuple)); + if (p_element == (ftree_sw_tbl_element_t * )cl_qmap_end(&p_ftree->sw_by_tuple_tbl)) + return NULL; + + return p_element->p_sw; +} + +/***************************************************/ + +static void +__osm_ftree_fabric_dump(ftree_fabric_t * p_ftree) +{ + uint32_t i; + ftree_hca_t * p_hca; + ftree_sw_t * p_sw; + + if (!osm_log_is_active(&p_ftree->p_osm->log,OSM_LOG_DEBUG)) + return; + + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG,"__osm_ftree_fabric_dump: \n" + " |-------------------------------|\n" + " |- Full fabric topology dump -|\n" + " |-------------------------------|\n\n"); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_fabric_dump: -- HCAs:\n"); + + for ( p_hca = (ftree_hca_t *)cl_qmap_head(&p_ftree->hca_tbl); + p_hca != (ftree_hca_t *)cl_qmap_end(&p_ftree->hca_tbl); + p_hca = (ftree_hca_t *)cl_qmap_next(&p_hca->map_item) ) + { + __osm_ftree_hca_dump(p_ftree, p_hca); + } + + for (i = 0; i < __osm_ftree_fabric_get_rank(p_ftree); i++) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_fabric_dump: -- Rank %u switches\n", i); + for ( p_sw = (ftree_sw_t *)cl_qmap_head(&p_ftree->sw_tbl); + p_sw != (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl); + p_sw = (ftree_sw_t *)cl_qmap_next(&p_sw->map_item) ) + { + if (p_sw->rank == i) + __osm_ftree_sw_dump(p_ftree, p_sw); + } + } + + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG,"__osm_ftree_fabric_dump: \n" + " |---------------------------------------|\n" + " |- Full fabric topology dump completed -|\n" + " |---------------------------------------|\n\n"); +} /* __osm_ftree_fabric_dump() */ + +/***************************************************/ + +static void +__osm_ftree_fabric_dump_general_info( + IN ftree_fabric_t * p_ftree) +{ + uint32_t i,j; + ftree_sw_t * p_sw; + char * addition_str; + + osm_log(&p_ftree->p_osm->log, OSM_LOG_INFO, + "__osm_ftree_fabric_dump_general_info: " + "General fabric topology info\n"); + osm_log(&p_ftree->p_osm->log, OSM_LOG_INFO,"__osm_ftree_fabric_dump_general_info: " + "============================\n"); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_INFO, + "__osm_ftree_fabric_dump_general_info: " + " - FatTree rank (switches only): %u\n", + p_ftree->tree_rank); + osm_log(&p_ftree->p_osm->log, OSM_LOG_INFO, + "__osm_ftree_fabric_dump_general_info: " + " - Fabric has %u HCAs, %u switches\n", + cl_qmap_count(&p_ftree->hca_tbl), + cl_qmap_count(&p_ftree->sw_tbl)); + + for (i = 0; i < __osm_ftree_fabric_get_rank(p_ftree); i++) + { + j = 0; + for ( p_sw = (ftree_sw_t *)cl_qmap_head(&p_ftree->sw_tbl); + p_sw != (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl); + p_sw = (ftree_sw_t *)cl_qmap_next(&p_sw->map_item) ) + { + if (p_sw->rank == i) + j++; + } + if (i == 0) + addition_str = " (root) "; + else + if (i == (__osm_ftree_fabric_get_rank(p_ftree) - 1)) + addition_str = " (leaf) "; + else + addition_str = " "; + osm_log(&p_ftree->p_osm->log, OSM_LOG_INFO, + "__osm_ftree_fabric_dump_general_info: " + " - Fabric has %u rank %u%s switches\n", + j, i, addition_str); + } + + if (osm_log_is_active(&p_ftree->p_osm->log, OSM_LOG_VERBOSE)) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_fabric_dump_general_info: " + " - Root switches:\n"); + for ( p_sw = (ftree_sw_t *)cl_qmap_head(&p_ftree->sw_tbl); + p_sw != (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl); + p_sw = (ftree_sw_t *)cl_qmap_next(&p_sw->map_item) ) + { + if (p_sw->rank == 0) + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_fabric_dump_general_info: " + " GUID: 0x%016" PRIx64 ", LID: 0x%x, Index %s\n", + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_sw->p_osm_sw))), + cl_ntoh16(p_sw->base_lid), + __osm_ftree_tuple_to_str(p_sw->tuple)); + } + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_fabric_dump_general_info: " + " - Leaf switches (sorted by index):\n"); + for (i = 0; i < p_ftree->leaf_switches_num; i++) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_fabric_dump_general_info: " + " GUID: 0x%016" PRIx64 ", LID: 0x%x, Index %s\n", + cl_ntoh64(osm_node_get_node_guid( + osm_switch_get_node_ptr( + p_ftree->leaf_switches[i]->p_osm_sw))), + cl_ntoh16(p_ftree->leaf_switches[i]->base_lid), + __osm_ftree_tuple_to_str(p_ftree->leaf_switches[i]->tuple)); + } + } +} /* __osm_ftree_fabric_dump_general_info() */ + +/***************************************************/ + +static void +__osm_ftree_fabric_dump_hca_ordering( + IN ftree_fabric_t * p_ftree) +{ + ftree_hca_t * p_hca; + ftree_sw_t * p_sw; + ftree_port_group_t * p_group; + uint32_t i; + uint32_t j; + + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + char path[1024]; + FILE * p_hca_ordering_file; + char * filename = "osm-ftree-ca-order.dump"; + + snprintf(path, sizeof(path), "%s/%s", + p_ftree->p_osm->subn.opt.dump_files_dir, filename); + p_hca_ordering_file = fopen(path, "w"); + if (!p_hca_ordering_file) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_dump_hca_ordering: ERR AB01: " + "cannot open file \'%s\': %s\n", + filename, strerror(errno)); + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return; + } + + /* for each leaf switch (in indexing order) */ + for(i = 0; i < p_ftree->leaf_switches_num; i++) + { + p_sw = p_ftree->leaf_switches[i]; + /* for each real HCA connected to this switch */ + for (j = 0; j < p_sw->down_port_groups_num; j++) + { + p_group = p_sw->down_port_groups[j]; + p_hca = p_group->remote_hca_or_sw.remote_hca; + memcpy(desc,p_hca->p_osm_node->node_desc.description,IB_NODE_DESCRIPTION_SIZE); + desc[IB_NODE_DESCRIPTION_SIZE] = '\0'; + + fprintf(p_hca_ordering_file,"0x%x\t%s\n", + cl_ntoh16(p_group->remote_base_lid), desc); + } + + /* now print dummy HCAs */ + for (j = p_sw->down_port_groups_num; j < p_ftree->max_hcas_per_leaf; j++) + { + fprintf(p_hca_ordering_file,"0xFFFF\tDUMMY\n"); + } + + } + /* done going through all the leaf switches */ + + fclose(p_hca_ordering_file); +} /* __osm_ftree_fabric_dump_hca_ordering() */ + +/***************************************************/ + +static void +__osm_ftree_fabric_assign_tuple( + IN ftree_fabric_t * p_ftree, + IN ftree_sw_t * p_sw, + IN ftree_tuple_t new_tuple) +{ + memcpy(p_sw->tuple, new_tuple, FTREE_TUPLE_LEN); + __osm_ftree_fabric_add_sw_by_tuple(p_ftree,p_sw); +} + +/***************************************************/ + +static void +__osm_ftree_fabric_assign_first_tuple( + IN ftree_fabric_t * p_ftree, + IN ftree_sw_t * p_sw) +{ + uint8_t i; + ftree_tuple_t new_tuple; + + __osm_ftree_tuple_init(new_tuple); + new_tuple[0] = p_sw->rank; + for (i = 1; i <= p_sw->rank; i++) + new_tuple[i] = 0; + + __osm_ftree_fabric_assign_tuple(p_ftree,p_sw,new_tuple); +} + +/***************************************************/ + +static void +__osm_ftree_fabric_get_new_tuple( + IN ftree_fabric_t * p_ftree, + OUT ftree_tuple_t new_tuple, + IN ftree_tuple_t from_tuple, + IN ftree_direction_t direction) +{ + ftree_sw_t * p_sw; + ftree_tuple_t temp_tuple; + uint8_t var_index; + uint8_t i; + + __osm_ftree_tuple_init(new_tuple); + memcpy(temp_tuple, from_tuple, FTREE_TUPLE_LEN); + + if (direction == FTREE_DIRECTION_DOWN) + { + temp_tuple[0] ++; + var_index = from_tuple[0] + 1; + } + else + { + temp_tuple[0] --; + var_index = from_tuple[0]; + } + + for (i = 0; i < 0xFF; i++) + { + temp_tuple[var_index] = i; + p_sw = __osm_ftree_fabric_get_sw_by_tuple(p_ftree,temp_tuple); + if (p_sw == NULL) /* found free tuple */ + break; + } + + if (i == 0xFF) + { + /* new tuple not found - there are more than 255 ports in one direction */ + return; + } + memcpy(new_tuple, temp_tuple, FTREE_TUPLE_LEN); + +} /* __osm_ftree_fabric_get_new_tuple() */ + +/***************************************************/ + +static void +__osm_ftree_fabric_calculate_rank( + IN ftree_fabric_t * p_ftree) +{ + ftree_sw_t * p_sw; + ftree_sw_t * p_next_sw; + uint16_t max_rank = 0; + + /* go over all the switches and find maximal switch rank */ + + p_next_sw = (ftree_sw_t *)cl_qmap_head(&p_ftree->sw_tbl); + while( p_next_sw != (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl) ) + { + p_sw = p_next_sw; + if(p_sw->rank > max_rank) + max_rank = p_sw->rank; + p_next_sw = (ftree_sw_t *)cl_qmap_next(&p_sw->map_item ); + } + + /* set FatTree rank */ + __osm_ftree_fabric_set_rank(p_ftree, max_rank + 1); +} + +/***************************************************/ + +static void +__osm_ftree_fabric_make_indexing( + IN ftree_fabric_t * p_ftree) +{ + ftree_sw_t * p_remote_sw; + ftree_sw_t * p_sw; + ftree_sw_t * p_next_sw; + ftree_tuple_t new_tuple; + uint32_t i; + cl_list_t bfs_list; + ftree_sw_tbl_element_t * p_sw_tbl_element; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_fabric_make_indexing); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE,"__osm_ftree_fabric_make_indexing: " + "Starting FatTree indexing\n"); + + /* create array of leaf switches */ + p_ftree->leaf_switches = (ftree_sw_t **) + malloc(cl_qmap_count(&p_ftree->sw_tbl) * sizeof(ftree_sw_t *)); + + /* Looking for a leaf switch - the one that has rank equal to (tree_rank - 1). + This switch will be used as a starting point for indexing algorithm. */ + + p_next_sw = (ftree_sw_t *)cl_qmap_head(&p_ftree->sw_tbl); + while( p_next_sw != (ftree_sw_t *)cl_qmap_end( &p_ftree->sw_tbl ) ) + { + p_sw = p_next_sw; + if(p_sw->rank == (__osm_ftree_fabric_get_rank(p_ftree) - 1)) + break; + p_next_sw = (ftree_sw_t *)cl_qmap_next(&p_sw->map_item ); + } + + CL_ASSERT(p_next_sw != (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl)); + + /* Assign the first tuple to the switch that is used as BFS starting point. + The tuple will be as follows: [rank].0.0.0... + This fuction also adds the switch it into the switch_by_tuple table. */ + __osm_ftree_fabric_assign_first_tuple(p_ftree,p_sw); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_fabric_make_indexing: Indexing starting point:\n" + " - Switch rank : %u\n" + " - Switch index : %s\n" + " - Node LID : 0x%x\n" + " - Node GUID : 0x%016" PRIx64 "\n", + p_sw->rank, + __osm_ftree_tuple_to_str(p_sw->tuple), + cl_ntoh16(p_sw->base_lid), + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_sw->p_osm_sw)))); + + /* + * Now run BFS and assign indexes to all switches + * Pseudo code of the algorithm is as follows: + * + * * Add first switch to BFS queue + * * While (BFS queue not empty) + * - Pop the switch from the head of the queue + * - Scan all the downward and upward ports + * - For each port + * + Get the remote switch + * + Assign index to the remote switch + * + Add remote switch to the BFS queue + */ + + cl_list_init(&bfs_list, cl_qmap_count(&p_ftree->sw_tbl)); + cl_list_insert_tail(&bfs_list, &__osm_ftree_sw_tbl_element_create(p_sw)->map_item); + + while (!cl_is_list_empty(&bfs_list)) + { + p_sw_tbl_element = (ftree_sw_tbl_element_t *)cl_list_remove_head(&bfs_list); + p_sw = p_sw_tbl_element->p_sw; + __osm_ftree_sw_tbl_element_destroy(p_sw_tbl_element); + + /* Discover all the nodes from ports that are pointing down */ + + if (p_sw->rank == (__osm_ftree_fabric_get_rank(p_ftree) - 1)) + { + /* add switch to leaf switches array */ + p_ftree->leaf_switches[p_ftree->leaf_switches_num++] = p_sw; + /* update the max_hcas_per_leaf value */ + if (p_sw->down_port_groups_num > p_ftree->max_hcas_per_leaf) + p_ftree->max_hcas_per_leaf = p_sw->down_port_groups_num; + } + else + { + /* This is not the leaf switch, which means that all the + ports that point down are taking us to another switches. + No need to assign indexing to HCAs */ + for( i = 0; i < p_sw->down_port_groups_num; i++ ) + { + p_remote_sw = p_sw->down_port_groups[i]->remote_hca_or_sw.remote_sw; + if (__osm_ftree_tuple_assigned(p_remote_sw->tuple)) + { + /* this switch has been already indexed */ + continue; + } + /* allocate new tuple */ + __osm_ftree_fabric_get_new_tuple(p_ftree, + new_tuple, + p_sw->tuple, + FTREE_DIRECTION_DOWN); + /* Assign the new tuple to the remote switch. + This fuction also adds the switch into the switch_by_tuple table. */ + __osm_ftree_fabric_assign_tuple(p_ftree, + p_remote_sw, + new_tuple); + + /* add the newly discovered switch to the BFS queue */ + cl_list_insert_tail(&bfs_list, + &__osm_ftree_sw_tbl_element_create(p_remote_sw)->map_item); + } + /* Done assigning indexes to all the remote switches + that are pointed by the downgoing ports. + Now sort port groups according to remote index. */ + qsort(p_sw->down_port_groups, /* array */ + p_sw->down_port_groups_num, /* number of elements */ + sizeof(ftree_port_group_t *), /* size of each element */ + __osm_ftree_compare_port_groups_by_remote_switch_index); /* comparator */ + } + + /* Done indexing switches from ports that go down. + Now do the same with ports that are pointing up. */ + + if (p_sw->rank != 0) + { + /* This is not the root switch, which means that all the ports + that are pointing up are taking us to another switches. */ + for( i = 0; i < p_sw->up_port_groups_num; i++ ) + { + p_remote_sw = p_sw->up_port_groups[i]->remote_hca_or_sw.remote_sw; + if (__osm_ftree_tuple_assigned(p_remote_sw->tuple)) + continue; + /* allocate new tuple */ + __osm_ftree_fabric_get_new_tuple(p_ftree, + new_tuple, + p_sw->tuple, + FTREE_DIRECTION_UP); + /* Assign the new tuple to the remote switch. + This fuction also adds the switch to the + switch_by_tuple table. */ + __osm_ftree_fabric_assign_tuple(p_ftree, + p_remote_sw, + new_tuple); + /* add the newly discovered switch to the BFS queue */ + cl_list_insert_tail(&bfs_list, + &__osm_ftree_sw_tbl_element_create(p_remote_sw)->map_item); + } + /* Done assigning indexes to all the remote switches + that are pointed by the upgoing ports. + Now sort port groups according to remote index. */ + qsort(p_sw->up_port_groups, /* array */ + p_sw->up_port_groups_num, /* number of elements */ + sizeof(ftree_port_group_t *), /* size of each element */ + __osm_ftree_compare_port_groups_by_remote_switch_index); /* comparator */ + } + /* Done assigning indexes to all the switches that are directly connected + to the current switch - go to the next switch in the BFS queue */ + } + + /* sort array of leaf switches by index */ + qsort(p_ftree->leaf_switches, /* array */ + p_ftree->leaf_switches_num, /* number of elements */ + sizeof(ftree_sw_t *), /* size of each element */ + __osm_ftree_compare_switches_by_index); /* comparator */ + + OSM_LOG_EXIT(&p_ftree->p_osm->log); +} /* __osm_ftree_fabric_make_indexing() */ + +/***************************************************/ + +static boolean_t +__osm_ftree_fabric_validate_topology( + IN ftree_fabric_t * p_ftree) +{ + ftree_port_group_t * p_group; + ftree_port_group_t * p_ref_group; + ftree_sw_t * p_sw; + ftree_sw_t * p_next_sw; + ftree_sw_t ** reference_sw_arr; + uint16_t tree_rank = __osm_ftree_fabric_get_rank(p_ftree); + boolean_t res = TRUE; + uint8_t i; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_fabric_validate_topology); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_fabric_validate_topology: " + "Validating fabric topology\n"); + + reference_sw_arr = (ftree_sw_t **)malloc(tree_rank * sizeof(ftree_sw_t *)); + if ( reference_sw_arr == NULL ) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fat-tree routing: Memory allocation failed\n"); + return FALSE; + } + memset(reference_sw_arr, 0, tree_rank * sizeof(ftree_sw_t *)); + + p_next_sw = (ftree_sw_t *)cl_qmap_head(&p_ftree->sw_tbl); + while( res && + p_next_sw != (ftree_sw_t *)cl_qmap_end( &p_ftree->sw_tbl ) ) + { + p_sw = p_next_sw; + p_next_sw = (ftree_sw_t *)cl_qmap_next(&p_sw->map_item ); + + if (!reference_sw_arr[p_sw->rank]) + { + /* This is the first switch in the current level that + we're checking - use it as a reference */ + reference_sw_arr[p_sw->rank] = p_sw; + } + else + { + /* compare this switch properties to the reference switch */ + + if ( reference_sw_arr[p_sw->rank]->up_port_groups_num != p_sw->up_port_groups_num ) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_validate_topology: " + "ERR AB09: Different number of upward port groups on switches:\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, Index %s - %u groups\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, Index %s - %u groups\n", + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(reference_sw_arr[p_sw->rank]->p_osm_sw))), + cl_ntoh16(reference_sw_arr[p_sw->rank]->base_lid), + __osm_ftree_tuple_to_str(reference_sw_arr[p_sw->rank]->tuple), + reference_sw_arr[p_sw->rank]->up_port_groups_num, + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_sw->p_osm_sw))), + cl_ntoh16(p_sw->base_lid), + __osm_ftree_tuple_to_str(p_sw->tuple), + p_sw->up_port_groups_num); + res = FALSE; + break; + } + + if ( p_sw->rank != (__osm_ftree_fabric_get_rank(p_ftree) - 1) && + reference_sw_arr[p_sw->rank]->down_port_groups_num != p_sw->down_port_groups_num ) + { + /* we're allowing some hca's to be missing */ + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_validate_topology: " + "ERR AB0A: Different number of downward port groups on switches:\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, Index %s - %u port groups\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, Index %s - %u port groups\n", + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(reference_sw_arr[p_sw->rank]->p_osm_sw))), + cl_ntoh16(reference_sw_arr[p_sw->rank]->base_lid), + __osm_ftree_tuple_to_str(reference_sw_arr[p_sw->rank]->tuple), + reference_sw_arr[p_sw->rank]->down_port_groups_num, + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_sw->p_osm_sw))), + cl_ntoh16(p_sw->base_lid), + __osm_ftree_tuple_to_str(p_sw->tuple), + p_sw->down_port_groups_num); + res = FALSE; + break; + } + + if ( reference_sw_arr[p_sw->rank]->up_port_groups_num != 0 ) + { + p_ref_group = reference_sw_arr[p_sw->rank]->up_port_groups[0]; + for (i = 0; i < p_sw->up_port_groups_num; i++) + { + p_group = p_sw->up_port_groups[i]; + if (cl_ptr_vector_get_size(&p_ref_group->ports) != cl_ptr_vector_get_size(&p_group->ports)) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_validate_topology: " + "ERR AB0B: Different number of ports in an upward port group on switches:\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, Index %s - %u ports\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, Index %s - %u ports\n", + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(reference_sw_arr[p_sw->rank]->p_osm_sw))), + cl_ntoh16(reference_sw_arr[p_sw->rank]->base_lid), + __osm_ftree_tuple_to_str(reference_sw_arr[p_sw->rank]->tuple), + cl_ptr_vector_get_size(&p_ref_group->ports), + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_sw->p_osm_sw))), + cl_ntoh16(p_sw->base_lid), + __osm_ftree_tuple_to_str(p_sw->tuple), + cl_ptr_vector_get_size(&p_group->ports)); + res = FALSE; + break; + } + } + } + if ( reference_sw_arr[p_sw->rank]->down_port_groups_num != 0 && + p_sw->rank != (tree_rank - 1) ) + { + /* we're allowing some hca's to be missing */ + p_ref_group = reference_sw_arr[p_sw->rank]->down_port_groups[0]; + for (i = 0; i < p_sw->down_port_groups_num; i++) + { + p_group = p_sw->down_port_groups[0]; + if (cl_ptr_vector_get_size(&p_ref_group->ports) != cl_ptr_vector_get_size(&p_group->ports)) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_validate_topology: " + "ERR AB0C: Different number of ports in an downward port group on switches:\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, Index %s - %u ports\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, Index %s - %u ports\n", + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(reference_sw_arr[p_sw->rank]->p_osm_sw))), + cl_ntoh16(reference_sw_arr[p_sw->rank]->base_lid), + __osm_ftree_tuple_to_str(reference_sw_arr[p_sw->rank]->tuple), + cl_ptr_vector_get_size(&p_ref_group->ports), + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_sw->p_osm_sw))), + cl_ntoh16(p_sw->base_lid), + __osm_ftree_tuple_to_str(p_sw->tuple), + cl_ptr_vector_get_size(&p_group->ports)); + res = FALSE; + break; + } + } + } + } /* end of else */ + } /* end of while */ + + if (res == TRUE) + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_fabric_validate_topology: " + "Fabric topology has been identified as FatTree\n"); + else + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_validate_topology: " + "ERR AB0D: Fabric topology hasn't been identified as FatTree\n"); + + free(reference_sw_arr); + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return res; +} /* __osm_ftree_fabric_validate_topology() */ + +/*************************************************** + ***************************************************/ + +static void +__osm_ftree_set_sw_fwd_table( + IN cl_map_item_t* const p_map_item, + IN void *context) +{ + ftree_sw_t * p_sw = (ftree_sw_t * const) p_map_item; + ftree_fabric_t * p_ftree = (ftree_fabric_t *)context; + + /* calculate lft length rounded up to a multiple of 64 (block length) */ + uint16_t lft_len = 64 * ((p_ftree->lft_max_lid_ho + 1 + 63) / 64); + + p_sw->p_osm_sw->max_lid_ho = p_ftree->lft_max_lid_ho; + + memcpy(p_ftree->p_osm->sm.ucast_mgr.lft_buf, + p_sw->lft_buf, + lft_len); + osm_ucast_mgr_set_fwd_table(&p_ftree->p_osm->sm.ucast_mgr, p_sw->p_osm_sw); +} + +/*************************************************** + ***************************************************/ + +/* + * Function: assign-up-going-port-by-descending-down + * Given : a switch and a LID + * Pseudo code: + * foreach down-going-port-group (in indexing order) + * skip this group if the LFT(LID) port is part of this group + * find the least loaded port of the group (scan in indexing order) + * r-port is the remote port connected to it + * assign the remote switch node LFT(LID) to r-port + * increase r-port usage counter + * assign-up-going-port-by-descending-down to r-port node (recursion) + */ + +static void +__osm_ftree_fabric_route_upgoing_by_going_down( + IN ftree_fabric_t * p_ftree, + IN ftree_sw_t * p_sw, + IN ftree_sw_t * p_prev_sw, + IN ib_net16_t target_lid, + IN uint8_t target_rank, + IN boolean_t is_real_lid, + IN boolean_t is_main_path, + IN uint8_t highest_rank_in_route) +{ + ftree_sw_t * p_remote_sw; + uint16_t ports_num; + ftree_port_group_t * p_group; + ftree_port_t * p_port; + ftree_port_t * p_min_port; + uint16_t i; + uint16_t j; + + /* we shouldn't enter here if both real_lid and main_path are false */ + CL_ASSERT(is_real_lid || is_main_path); + + /* can't be here for leaf switch, */ + CL_ASSERT(p_sw->rank != (__osm_ftree_fabric_get_rank(p_ftree) - 1)); + + /* if there is no down-going ports */ + if (p_sw->down_port_groups_num == 0) + return; + + /* foreach down-going port group (in indexing order) */ + for (i = 0; i < p_sw->down_port_groups_num; i++) + { + p_group = p_sw->down_port_groups[i]; + + if ( p_prev_sw && (p_group->remote_base_lid == p_prev_sw->base_lid) ) + { + /* This port group has a port that was used when we entered this switch, + which means that the current group points to the switch where we were + at the previous step of the algorithm (before going up). + Skipping this group. */ + continue; + } + + /* find the least loaded port of the group (in indexing order) */ + p_min_port = NULL; + ports_num = (uint16_t)cl_ptr_vector_get_size(&p_group->ports); + /* ToDo: no need to select a least loaded port for non-main path. + Think about optimization. */ + for (j = 0; j < ports_num; j++) + { + cl_ptr_vector_at(&p_group->ports, j, (void **)&p_port); + if (!p_min_port) + { + /* first port that we're checking - set as port with the lowest load */ + p_min_port = p_port; + } + else if (p_port->counter_up < p_min_port->counter_up) + { + /* this port is less loaded - use it as min */ + p_min_port = p_port; + } + } + /* At this point we have selected a port in this group with the + lowest load of upgoing routes. + Set on the remote switch how to get to the target_lid - + set LFT(target_lid) on the remote switch to the remote port */ + p_remote_sw = p_group->remote_hca_or_sw.remote_sw; + + /* Four possible cases: + * + * 1. is_real_lid == TRUE && is_main_path == TRUE: + * - going DOWN(TRUE,TRUE) through ALL the groups + * + promoting port counter + * + setting path in remote switch fwd tbl + * + setting hops in remote switch on all the ports of each group + * + * 2. is_real_lid == TRUE && is_main_path == FALSE: + * - going DOWN(TRUE,FALSE) through ALL the groups but only if + * the remote (upper) switch hasn't been already configured + * for this target LID + * + NOT promoting port counter + * + setting path in remote switch fwd tbl if it hasn't been set yet + * + setting hops in remote switch on all the ports of each group + * if it hasn't been set yet + * + * 3. is_real_lid == FALSE && is_main_path == TRUE: + * - going DOWN(FALSE,TRUE) through ALL the groups + * + promoting port counter + * + NOT setting path in remote switch fwd tbl + * + NOT setting hops in remote switch + * + * 4. is_real_lid == FALSE && is_main_path == FALSE: + * - illegal state - we shouldn't get here + */ + + /* second case: skip the port group if the remote (upper) + switch has been already configured for this target LID */ + if ( is_real_lid && !is_main_path && + __osm_ftree_sw_get_fwd_table_block(p_remote_sw, + cl_ntoh16(target_lid)) != OSM_NO_PATH ) + continue; + + /* setting fwd tbl port only if this is real LID */ + if (is_real_lid) + { + __osm_ftree_sw_set_fwd_table_block(p_remote_sw, + cl_ntoh16(target_lid), + p_min_port->remote_port_num); + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_fabric_route_upgoing_by_going_down: " + "Switch %s: set path to HCA LID 0x%x through port %u\n", + __osm_ftree_tuple_to_str(p_remote_sw->tuple), + cl_ntoh16(target_lid), + p_min_port->remote_port_num); + + /* On the remote switch that is pointed by the p_group, + set hops for ALL the ports in the remote group. */ + + for (j = 0; j < ports_num; j++) + { + cl_ptr_vector_at(&p_group->ports, j, (void **)&p_port); + + __osm_ftree_sw_set_hops(p_remote_sw, + p_ftree->lft_max_lid_ho, + cl_ntoh16(target_lid), + p_port->remote_port_num, + ( (target_rank - highest_rank_in_route) + + (p_remote_sw->rank - highest_rank_in_route) )); + } + + + } + + /* The number of upgoing routes is tracked in the + p_port->counter_up counter of the port that belongs to + the upper side of the link (on switch with lower rank). + Counter is promoted only if we're routing LID on the main + path (whether it's a real LID or a dummy one). */ + if (is_main_path) + p_min_port->counter_up++; + + /* Recursion step: + Assign upgoing ports by stepping down, starting on REMOTE switch. + Recursion stop condition - if the REMOTE switch is a leaf switch. */ + if (p_remote_sw->rank != (__osm_ftree_fabric_get_rank(p_ftree) - 1)) + { + __osm_ftree_fabric_route_upgoing_by_going_down( + p_ftree, + p_remote_sw, /* remote switch - used as a route-upgoing alg. start point */ + NULL, /* prev. position - NULL to mark that we went down and not up */ + target_lid, /* LID that we're routing to */ + target_rank, /* rank of the LID that we're routing to */ + is_real_lid, /* whether the target LID is real or dummy */ + is_main_path, /* whether this is path to HCA that should by tracked by counters */ + highest_rank_in_route); /* highest visited point in the tree before going down */ + } + } + /* done scanning all the down-going port groups */ + +} /* __osm_ftree_fabric_route_upgoing_by_going_down() */ + +/***************************************************/ + +/* + * Function: assign-down-going-port-by-descending-up + * Given : a switch and a LID + * Pseudo code: + * find the least loaded port of all the upgoing groups (scan in indexing order) + * assign the LFT(LID) of remote switch to that port + * track that port usage + * assign-up-going-port-by-descending-down on CURRENT switch + * assign-down-going-port-by-descending-up on REMOTE switch (recursion) + */ + +static void +__osm_ftree_fabric_route_downgoing_by_going_up( + IN ftree_fabric_t * p_ftree, + IN ftree_sw_t * p_sw, + IN ftree_sw_t * p_prev_sw, + IN ib_net16_t target_lid, + IN uint8_t target_rank, + IN boolean_t is_real_lid, + IN boolean_t is_main_path) +{ + ftree_sw_t * p_remote_sw; + uint16_t ports_num; + ftree_port_group_t * p_group; + ftree_port_t * p_port; + ftree_port_group_t * p_min_group; + ftree_port_t * p_min_port; + uint16_t i; + uint16_t j; + + /* we shouldn't enter here if both real_lid and main_path are false */ + CL_ASSERT(is_real_lid || is_main_path); + + /* If this switch isn't a leaf switch: + Assign upgoing ports by stepping down, starting on THIS switch. */ + if (p_sw->rank != (__osm_ftree_fabric_get_rank(p_ftree) - 1)) + { + __osm_ftree_fabric_route_upgoing_by_going_down( + p_ftree, + p_sw, /* local switch - used as a route-upgoing alg. start point */ + p_prev_sw, /* switch that we went up from (NULL means that we went down) */ + target_lid, /* LID that we're routing to */ + target_rank, /* rank of the LID that we're routing to */ + is_real_lid, /* whether this target LID is real or dummy */ + is_main_path, /* whether this path to HCA should by tracked by counters */ + p_sw->rank); /* the highest visited point in the tree before going down */ + } + + /* recursion stop condition - if it's a root switch, */ + if (p_sw->rank == 0) + return; + + /* Find the least loaded port of all the upgoing port groups + (in indexing order of the remote switches). */ + p_min_group = NULL; + p_min_port = NULL; + for (i = 0; i < p_sw->up_port_groups_num; i++) + { + p_group = p_sw->up_port_groups[i]; + + ports_num = (uint16_t)cl_ptr_vector_get_size(&p_group->ports); + for (j = 0; j < ports_num; j++) + { + cl_ptr_vector_at(&p_group->ports, j, (void **)&p_port); + if (!p_min_group) + { + /* first port that we're checking - use + it as a port with the lowest load */ + p_min_group = p_group; + p_min_port = p_port; + } + else + { + if ( p_port->counter_down < p_min_port->counter_down ) + { + /* this port is less loaded - use it as min */ + p_min_group = p_group; + p_min_port = p_port; + } + } + } + } + + /* At this point we have selected a group and port with the + lowest load of downgoing routes. + Set on the remote switch how to get to the target_lid - + set LFT(target_lid) on the remote switch to the remote port */ + p_remote_sw = p_min_group->remote_hca_or_sw.remote_sw; + + /* Four possible cases: + * + * 1. is_real_lid == TRUE && is_main_path == TRUE: + * - going UP(TRUE,TRUE) on selected min_group and min_port + * + promoting port counter + * + setting path in remote switch fwd tbl + * + setting hops in remote switch on all the ports of selected group + * - going UP(TRUE,FALSE) on rest of the groups, each time on port 0 + * + NOT promoting port counter + * + setting path in remote switch fwd tbl if it hasn't been set yet + * + setting hops in remote switch on all the ports of each group + * if it hasn't been set yet + * + * 2. is_real_lid == TRUE && is_main_path == FALSE: + * - going UP(TRUE,FALSE) on ALL the groups, each time on port 0, + * but only if the remote (upper) switch hasn't been already + * configured for this target LID + * + NOT promoting port counter + * + setting path in remote switch fwd tbl if it hasn't been set yet + * + setting hops in remote switch on all the ports of each group + * if it hasn't been set yet + * + * 3. is_real_lid == FALSE && is_main_path == TRUE: + * - going UP(FALSE,TRUE) ONLY on selected min_group and min_port + * + promoting port counter + * + NOT setting path in remote switch fwd tbl + * + NOT setting hops in remote switch + * + * 4. is_real_lid == FALSE && is_main_path == FALSE: + * - illegal state - we shouldn't get here + */ + + /* covering first half of case 1, and case 3 */ + if (is_main_path) + { + if (p_sw->rank == (__osm_ftree_fabric_get_rank(p_ftree) - 1)) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_fabric_route_downgoing_by_going_up: " + " - Routing MAIN path for %s HCA LID 0x%x: %s --> %s\n", + (is_real_lid)? "real" : "DUMMY", + cl_ntoh16(target_lid), + __osm_ftree_tuple_to_str(p_sw->tuple), + __osm_ftree_tuple_to_str(p_remote_sw->tuple)); + } + /* The number of downgoing routes is tracked in the + p_port->counter_down counter of the port that belongs to + the lower side of the link (on switch with higher rank) */ + p_min_port->counter_down++; + if (is_real_lid) + { + __osm_ftree_sw_set_fwd_table_block(p_remote_sw, + cl_ntoh16(target_lid), + p_min_port->remote_port_num); + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_fabric_route_downgoing_by_going_up: " + "Switch %s: set path to HCA LID 0x%x through port %u\n", + __osm_ftree_tuple_to_str(p_remote_sw->tuple), + cl_ntoh16(target_lid),p_min_port->remote_port_num); + + /* On the remote switch that is pointed by the min_group, + set hops for ALL the ports in the remote group. */ + + ports_num = (uint16_t)cl_ptr_vector_get_size(&p_min_group->ports); + for (j = 0; j < ports_num; j++) + { + cl_ptr_vector_at(&p_min_group->ports, j, (void **)&p_port); + __osm_ftree_sw_set_hops(p_remote_sw, + p_ftree->lft_max_lid_ho, + cl_ntoh16(target_lid), + p_port->remote_port_num, + target_rank - p_remote_sw->rank); + } + } + + /* Recursion step: + Assign downgoing ports by stepping up, starting on REMOTE switch. */ + __osm_ftree_fabric_route_downgoing_by_going_up( + p_ftree, + p_remote_sw, /* remote switch - used as a route-downgoing alg. next step point */ + p_sw, /* this switch - prev. position switch for the function */ + target_lid, /* LID that we're routing to */ + target_rank, /* rank of the LID that we're routing to */ + is_real_lid, /* whether this target LID is real or dummy */ + is_main_path); /* whether this is path to HCA that should by tracked by counters */ + } + + /* we're done for the third case */ + if (!is_real_lid) + return; + + /* What's left to do at this point: + * + * 1. is_real_lid == TRUE && is_main_path == TRUE: + * - going UP(TRUE,FALSE) on rest of the groups, each time on port 0, + * but only if the remote (upper) switch hasn't been already + * configured for this target LID + * + NOT promoting port counter + * + setting path in remote switch fwd tbl if it hasn't been set yet + * + setting hops in remote switch on all the ports of each group + * if it hasn't been set yet + * + * 2. is_real_lid == TRUE && is_main_path == FALSE: + * - going UP(TRUE,FALSE) on ALL the groups, each time on port 0, + * but only if the remote (upper) switch hasn't been already + * configured for this target LID + * + NOT promoting port counter + * + setting path in remote switch fwd tbl if it hasn't been set yet + * + setting hops in remote switch on all the ports of each group + * if it hasn't been set yet + * + * These two rules can be rephrased this way: + * - foreach UP port group + * + if remote switch has been set with the target LID + * - skip this port group + * + else + * - select port 0 + * - do NOT promote port counter + * - set path in remote switch fwd tbl + * - set hops in remote switch on all the ports of this group + * - go UP(TRUE,FALSE) to the remote switch + */ + + for (i = 0; i < p_sw->up_port_groups_num; i++) + { + p_group = p_sw->up_port_groups[i]; + p_remote_sw = p_group->remote_hca_or_sw.remote_sw; + + /* skip if target lid has been already set on remote switch fwd tbl */ + if (__osm_ftree_sw_get_fwd_table_block( + p_remote_sw,cl_ntoh16(target_lid)) != OSM_NO_PATH) + continue; + + if (p_sw->rank == (__osm_ftree_fabric_get_rank(p_ftree) - 1)) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_fabric_route_downgoing_by_going_up: " + " - Routing SECONDARY path for LID 0x%x: %s --> %s\n", + cl_ntoh16(target_lid), + __osm_ftree_tuple_to_str(p_sw->tuple), + __osm_ftree_tuple_to_str(p_remote_sw->tuple)); + } + + cl_ptr_vector_at(&p_group->ports, 0, (void **)&p_port); + __osm_ftree_sw_set_fwd_table_block(p_remote_sw, + cl_ntoh16(target_lid), + p_port->remote_port_num); + + /* On the remote switch that is pointed by the p_group, + set hops for ALL the ports in the remote group. */ + + ports_num = (uint16_t)cl_ptr_vector_get_size(&p_group->ports); + for (j = 0; j < ports_num; j++) + { + cl_ptr_vector_at(&p_group->ports, j, (void **)&p_port); + + __osm_ftree_sw_set_hops(p_remote_sw, + p_ftree->lft_max_lid_ho, + cl_ntoh16(target_lid), + p_port->remote_port_num, + target_rank - p_remote_sw->rank); + } + + /* Recursion step: + Assign downgoing ports by stepping up, starting on REMOTE switch. */ + __osm_ftree_fabric_route_downgoing_by_going_up( + p_ftree, + p_remote_sw, /* remote switch - used as a route-downgoing alg. next step point */ + p_sw, /* this switch - prev. position switch for the function */ + target_lid, /* LID that we're routing to */ + target_rank, /* rank of the LID that we're routing to */ + TRUE, /* whether the target LID is real or dummy */ + FALSE); /* whether this is path to HCA that should by tracked by counters */ + } + +} /* ftree_fabric_route_downgoing_by_going_up() */ + +/***************************************************/ + +/* + * Pseudo code: + * foreach leaf switch (in indexing order) + * for each compute node (in indexing order) + * obtain the LID of the compute node + * set local LFT(LID) of the port connecting to compute node + * call assign-down-going-port-by-descending-up(TRUE,TRUE) on CURRENT switch + * for each MISSING compute node + * call assign-down-going-port-by-descending-up(FALSE,TRUE) on CURRENT switch + */ + +static void +__osm_ftree_fabric_route_to_hcas( + IN ftree_fabric_t * p_ftree) +{ + ftree_sw_t * p_sw; + ftree_port_group_t * p_group; + ftree_port_t * p_port; + uint32_t i; + uint32_t j; + ib_net16_t remote_lid; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_fabric_route_to_hcas); + + /* for each leaf switch (in indexing order) */ + for(i = 0; i < p_ftree->leaf_switches_num; i++) + { + p_sw = p_ftree->leaf_switches[i]; + + /* for each HCA connected to this switch */ + for (j = 0; j < p_sw->down_port_groups_num; j++) + { + /* obtain the LID of HCA port */ + p_group = p_sw->down_port_groups[j]; + remote_lid = p_group->remote_base_lid; + + /* set local LFT(LID) to the port that is connected to HCA */ + cl_ptr_vector_at(&p_group->ports, 0, (void **)&p_port); + __osm_ftree_sw_set_fwd_table_block(p_sw, + cl_ntoh16(remote_lid), + p_port->port_num); + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_fabric_route_to_hcas: " + "Switch %s: set path to HCA LID 0x%x through port %u\n", + __osm_ftree_tuple_to_str(p_sw->tuple), + cl_ntoh16(remote_lid), + p_port->port_num); + + /* set local min hop table(LID) to route to the CA */ + __osm_ftree_sw_set_hops(p_sw, + p_ftree->lft_max_lid_ho, + cl_ntoh16(remote_lid), + p_port->port_num, + 1); + + /* assign downgoing ports by stepping up */ + __osm_ftree_fabric_route_downgoing_by_going_up( + p_ftree, + p_sw, /* local switch - used as a route-downgoing alg. start point */ + NULL, /* prev. position switch */ + remote_lid, /* LID that we're routing to */ + __osm_ftree_fabric_get_rank(p_ftree), /* rank of the LID that we're routing to */ + TRUE, /* whether this HCA LID is real or dummy */ + TRUE); /* whether this path to HCA should by tracked by counters */ + } + + /* We're done with the real HCAs. Now route the dummy HCAs that are missing. + When routing to dummy HCAs we don't fill lid matrices. */ + + if (p_ftree->max_hcas_per_leaf > p_sw->down_port_groups_num) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG,"__osm_ftree_fabric_route_to_hcas: " + "Routing %u dummy HCAs\n", + p_ftree->max_hcas_per_leaf - p_sw->down_port_groups_num); + for ( j = 0; + ((int)j) < (p_ftree->max_hcas_per_leaf - p_sw->down_port_groups_num); + j++) + { + /* assign downgoing ports by stepping up */ + __osm_ftree_fabric_route_downgoing_by_going_up( + p_ftree, + p_sw, /* local switch - used as a route-downgoing alg. start point */ + NULL, /* prev. position switch */ + 0, /* LID that we're routing to - ignored for dummy HCA */ + 0, /* rank of the LID that we're routing to - ignored for dummy HCA */ + FALSE, /* whether this HCA LID is real or dummy */ + TRUE); /* whether this path to HCA should by tracked by counters */ + } + } + } + /* done going through all the leaf switches */ + OSM_LOG_EXIT(&p_ftree->p_osm->log); +} /* __osm_ftree_fabric_route_to_hcas() */ + +/***************************************************/ + +/* + * Pseudo code: + * foreach switch in fabric + * obtain its LID + * set local LFT(LID) to port 0 + * call assign-down-going-port-by-descending-up(TRUE,FALSE) on CURRENT switch + * + * Routing to switch is similar to routing a REAL hca lid on SECONDARY path: + * - we should set fwd tables + * - we should NOT update port counters + */ + +static void +__osm_ftree_fabric_route_to_switches( + IN ftree_fabric_t * p_ftree) +{ + ftree_sw_t * p_sw; + ftree_sw_t * p_next_sw; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_fabric_route_to_switches); + + p_next_sw = (ftree_sw_t *)cl_qmap_head(&p_ftree->sw_tbl); + while( p_next_sw != (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl) ) + { + p_sw = p_next_sw; + p_next_sw = (ftree_sw_t *)cl_qmap_next(&p_sw->map_item ); + + /* set local LFT(LID) to 0 (route to itself) */ + __osm_ftree_sw_set_fwd_table_block(p_sw, + cl_ntoh16(p_sw->base_lid), + 0); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_fabric_route_to_switches: " + "Switch %s (LID 0x%x): routing switch-to-switch pathes\n", + __osm_ftree_tuple_to_str(p_sw->tuple), + cl_ntoh16(p_sw->base_lid)); + + /* set min hop table of the switch to itself */ + __osm_ftree_sw_set_hops(p_sw, + p_ftree->lft_max_lid_ho, + cl_ntoh16(p_sw->base_lid), + 0, /* port_num */ + 0);/* hops */ + + __osm_ftree_fabric_route_downgoing_by_going_up( + p_ftree, + p_sw, /* local switch - used as a route-downgoing alg. start point */ + NULL, /* prev. position switch */ + p_sw->base_lid, /* LID that we're routing to */ + p_sw->rank, /* rank of the LID that we're routing to */ + TRUE, /* whether the target LID is a real or dummy */ + FALSE); /* whether this path should by tracked by counters */ + } + + OSM_LOG_EXIT(&p_ftree->p_osm->log); +} /* __osm_ftree_fabric_route_to_switches() */ + +/*************************************************** + ***************************************************/ + +static int +__osm_ftree_fabric_populate_switches( + IN ftree_fabric_t * p_ftree) +{ + osm_switch_t * p_osm_sw; + osm_switch_t * p_next_osm_sw; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_fabric_populate_switches); + + p_next_osm_sw = (osm_switch_t *)cl_qmap_head(&p_ftree->p_osm->subn.sw_guid_tbl); + while( p_next_osm_sw != (osm_switch_t *)cl_qmap_end(&p_ftree->p_osm->subn.sw_guid_tbl) ) + { + p_osm_sw = p_next_osm_sw; + p_next_osm_sw = (osm_switch_t *)cl_qmap_next(&p_osm_sw->map_item ); + __osm_ftree_fabric_add_sw(p_ftree,p_osm_sw); + } + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return 0; +} /* __osm_ftree_fabric_populate_switches() */ + +/*************************************************** + ***************************************************/ + +static int +__osm_ftree_fabric_populate_hcas( + IN ftree_fabric_t * p_ftree) +{ + osm_node_t * p_osm_node; + osm_node_t * p_next_osm_node; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_fabric_populate_hcas); + + p_next_osm_node = (osm_node_t *)cl_qmap_head(&p_ftree->p_osm->subn.node_guid_tbl); + while( p_next_osm_node != (osm_node_t *)cl_qmap_end(&p_ftree->p_osm->subn.node_guid_tbl) ) + { + p_osm_node = p_next_osm_node; + p_next_osm_node = (osm_node_t *)cl_qmap_next(&p_osm_node->map_item); + switch (osm_node_get_type(p_osm_node)) + { + case IB_NODE_TYPE_CA: + __osm_ftree_fabric_add_hca(p_ftree,p_osm_node); + break; + case IB_NODE_TYPE_ROUTER: + break; + case IB_NODE_TYPE_SWITCH: + /* all the switches added separately */ + break; + default: + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_populate_hcas: ERR AB0E: " + "Node GUID 0x%016" PRIx64 " - Unknown node type: %s\n", + cl_ntoh64(osm_node_get_node_guid(p_osm_node)), + ib_get_node_type_str(osm_node_get_type(p_osm_node))); + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return -1; + } + } + + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return 0; +} /* __osm_ftree_fabric_populate_hcas() */ + +/*************************************************** + ***************************************************/ + +static void +__osm_ftree_rank_from_switch( + IN ftree_fabric_t * p_ftree, + IN ftree_sw_t * p_starting_sw) +{ + ftree_sw_t * p_sw; + ftree_sw_t * p_remote_sw; + osm_node_t * p_node; + osm_node_t * p_remote_node; + osm_physp_t * p_osm_port; + uint8_t i; + cl_list_t bfs_list; + ftree_sw_tbl_element_t * p_sw_tbl_element = NULL; + + p_starting_sw->rank = 0; + + /* Run BFS scan of the tree, starting from this switch */ + + cl_list_init(&bfs_list, cl_qmap_count(&p_ftree->sw_tbl)); + cl_list_insert_tail(&bfs_list, &__osm_ftree_sw_tbl_element_create(p_starting_sw)->map_item); + + while (!cl_is_list_empty(&bfs_list)) + { + p_sw_tbl_element = (ftree_sw_tbl_element_t *)cl_list_remove_head(&bfs_list); + p_sw = p_sw_tbl_element->p_sw; + __osm_ftree_sw_tbl_element_destroy(p_sw_tbl_element); + + p_node = osm_switch_get_node_ptr(p_sw->p_osm_sw); + + /* note: skipping port 0 on switches */ + for (i = 1; i < osm_node_get_num_physp(p_node); i++) + { + p_osm_port = osm_node_get_physp_ptr(p_node,i); + if (!osm_physp_is_valid(p_osm_port)) + continue; + if (!osm_link_is_healthy(p_osm_port)) + continue; + + p_remote_node = osm_node_get_remote_node(p_node,i,NULL); + if (!p_remote_node) + continue; + if (osm_node_get_type(p_remote_node) != IB_NODE_TYPE_SWITCH) + continue; + + p_remote_sw = (ftree_sw_t *)cl_qmap_get(&p_ftree->sw_tbl, + osm_node_get_node_guid(p_remote_node)); + if (p_remote_sw == (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl)) + { + /* remote node is not a switch */ + continue; + } + if (__osm_ftree_sw_ranked(p_remote_sw) && p_remote_sw->rank <= (p_sw->rank + 1)) + continue; + + /* rank the remote switch and add it to the BFS list */ + p_remote_sw->rank = p_sw->rank + 1; + cl_list_insert_tail(&bfs_list, + &__osm_ftree_sw_tbl_element_create(p_remote_sw)->map_item); + } + } +} /* __osm_ftree_rank_from_switch() */ + + +/*************************************************** + ***************************************************/ + +static int +__osm_ftree_rank_switches_from_hca( + IN ftree_fabric_t * p_ftree, + IN ftree_hca_t * p_hca) +{ + ftree_sw_t * p_sw; + osm_node_t * p_osm_node = p_hca->p_osm_node; + osm_node_t * p_remote_osm_node; + osm_physp_t * p_osm_port; + static uint8_t i = 0; + int res = 0; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_rank_switches_from_hca); + + for (i = 0; i < osm_node_get_num_physp(p_osm_node); i++) + { + p_osm_port = osm_node_get_physp_ptr(p_osm_node,i); + if (!osm_physp_is_valid(p_osm_port)) + continue; + if (!osm_link_is_healthy(p_osm_port)) + continue; + + p_remote_osm_node = osm_node_get_remote_node(p_osm_node,i,NULL); + + switch (osm_node_get_type(p_remote_osm_node)) + { + case IB_NODE_TYPE_CA: + /* HCA connected directly to another HCA - not FatTree */ + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_rank_switches_from_hca: ERR AB0F: " + "HCA conected directly to another HCA: " + "0x%016" PRIx64 " <---> 0x%016" PRIx64 "\n", + cl_ntoh64(osm_node_get_node_guid(p_hca->p_osm_node)), + cl_ntoh64(osm_node_get_node_guid(p_remote_osm_node))); + res = -1; + goto Exit; + + case IB_NODE_TYPE_ROUTER: + /* leaving this port - proceeding to the next one */ + continue; + + case IB_NODE_TYPE_SWITCH: + /* continue with this port */ + break; + + default: + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_rank_switches_from_hca: ERR AB10: " + "Node GUID 0x%016" PRIx64 " - Unknown node type: %s\n", + cl_ntoh64(osm_node_get_node_guid(p_remote_osm_node)), + ib_get_node_type_str(osm_node_get_type(p_remote_osm_node))); + res = -1; + goto Exit; + } + + /* remote node is switch */ + + p_sw = (ftree_sw_t *)cl_qmap_get(&p_ftree->sw_tbl, + p_osm_port->p_remote_physp->p_node->node_info.node_guid); + + CL_ASSERT(p_sw != (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl)); + + if (__osm_ftree_sw_ranked(p_sw) && p_sw->rank == 0) + continue; + + osm_log(&p_ftree->p_osm->log, OSM_LOG_DEBUG, + "__osm_ftree_rank_switches_from_hca: " + "Marking rank of switch that is directly connected to HCA:\n" + " - HCA guid : 0x%016" PRIx64 "\n" + " - Switch guid: 0x%016" PRIx64 "\n" + " - Switch LID : 0x%x\n", + cl_ntoh64(osm_node_get_node_guid(p_hca->p_osm_node)), + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_sw->p_osm_sw))), + cl_ntoh16(p_sw->base_lid)); + __osm_ftree_rank_from_switch(p_ftree, p_sw); + } + + Exit: + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return res; +} /* __osm_ftree_rank_switches_from_hca() */ + +/***************************************************/ + +static void +__osm_ftree_sw_reverse_rank( + IN cl_map_item_t* const p_map_item, + IN void *context) +{ + ftree_fabric_t * p_ftree = (ftree_fabric_t *)context; + ftree_sw_t * p_sw = (ftree_sw_t * const) p_map_item; + p_sw->rank = __osm_ftree_fabric_get_rank(p_ftree) - p_sw->rank - 1; +} + +/*************************************************** + ***************************************************/ + +static int +__osm_ftree_fabric_construct_hca_ports( + IN ftree_fabric_t * p_ftree, + IN ftree_hca_t * p_hca) +{ + ftree_sw_t * p_remote_sw; + osm_node_t * p_node = p_hca->p_osm_node; + osm_node_t * p_remote_node; + uint8_t remote_node_type; + ib_net64_t remote_node_guid; + osm_physp_t * p_remote_osm_port; + uint8_t i; + uint8_t remote_port_num; + int res = 0; + + for (i = 0; i < osm_node_get_num_physp(p_node); i++) + { + osm_physp_t * p_osm_port = osm_node_get_physp_ptr(p_node,i); + + if (!osm_physp_is_valid(p_osm_port)) + continue; + if (!osm_link_is_healthy(p_osm_port)) + continue; + + p_remote_osm_port = osm_physp_get_remote(p_osm_port); + p_remote_node = osm_node_get_remote_node(p_node,i,&remote_port_num); + + if (!p_remote_osm_port) + continue; + + remote_node_type = osm_node_get_type(p_remote_node); + remote_node_guid = osm_node_get_node_guid(p_remote_node); + + switch (remote_node_type) + { + case IB_NODE_TYPE_ROUTER: + /* leaving this port - proceeding to the next one */ + continue; + + case IB_NODE_TYPE_CA: + /* HCA connected directly to another HCA - not FatTree */ + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_construct_hca_ports: ERR AB11: " + "HCA conected directly to another HCA: " + "0x%016" PRIx64 " <---> 0x%016" PRIx64 "\n", + cl_ntoh64(osm_node_get_node_guid(p_node)), + cl_ntoh64(remote_node_guid)); + res = -1; + goto Exit; + + case IB_NODE_TYPE_SWITCH: + /* continue with this port */ + break; + + default: + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_construct_hca_ports: ERR AB12: " + "Node GUID 0x%016" PRIx64 " - Unknown node type: %s\n", + cl_ntoh64(remote_node_guid), + ib_get_node_type_str(remote_node_type)); + res = -1; + goto Exit; + } + + /* remote node is switch */ + + p_remote_sw = (ftree_sw_t *)cl_qmap_get(&p_ftree->sw_tbl,remote_node_guid); + CL_ASSERT( p_remote_sw != (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl) ); + CL_ASSERT( (p_remote_sw->rank + 1) == __osm_ftree_fabric_get_rank(p_ftree) ); + + __osm_ftree_hca_add_port( + p_hca, /* local ftree_hca object */ + i, /* local port number */ + remote_port_num, /* remote port number */ + osm_node_get_base_lid(p_node, i), /* local lid */ + osm_node_get_base_lid(p_remote_node, 0), /* remote lid */ + osm_physp_get_port_guid(p_osm_port), /* local port guid */ + osm_physp_get_port_guid(p_remote_osm_port),/* remote port guid */ + remote_node_guid, /* remote node guid */ + remote_node_type, /* remote node type */ + (void *) p_remote_sw); /* remote ftree_hca/sw object */ + } + + Exit: + return res; +} /* __osm_ftree_fabric_construct_hca_ports() */ + +/*************************************************** + ***************************************************/ + +static int +__osm_ftree_fabric_construct_sw_ports( + IN ftree_fabric_t * p_ftree, + IN ftree_sw_t * p_sw) +{ + ftree_hca_t * p_remote_hca; + ftree_sw_t * p_remote_sw; + osm_node_t * p_node = osm_switch_get_node_ptr(p_sw->p_osm_sw); + osm_node_t * p_remote_node; + ib_net16_t remote_base_lid; + uint8_t remote_node_type; + ib_net64_t remote_node_guid; + osm_physp_t * p_remote_osm_port; + ftree_direction_t direction; + void * p_remote_hca_or_sw; + uint8_t i; + uint8_t remote_port_num; + int res = 0; + + CL_ASSERT(osm_node_get_type(p_node) == IB_NODE_TYPE_SWITCH); + + for (i = 0; i < osm_node_get_num_physp(p_node); i++) + { + osm_physp_t * p_osm_port = osm_node_get_physp_ptr(p_node,i); + + if (!osm_physp_is_valid(p_osm_port)) + continue; + if (!osm_link_is_healthy(p_osm_port)) + continue; + + p_remote_osm_port = osm_physp_get_remote(p_osm_port); + p_remote_node = osm_node_get_remote_node(p_node,i,&remote_port_num); + + if (!p_remote_osm_port) + continue; + + remote_node_type = osm_node_get_type(p_remote_node); + remote_node_guid = osm_node_get_node_guid(p_remote_node); + + switch (remote_node_type) + { + case IB_NODE_TYPE_ROUTER: + /* leaving this port - proceeding to the next one */ + continue; + + case IB_NODE_TYPE_CA: + /* switch connected to hca */ + + CL_ASSERT((p_sw->rank + 1) == __osm_ftree_fabric_get_rank(p_ftree)); + + p_remote_hca = (ftree_hca_t *)cl_qmap_get(&p_ftree->hca_tbl,remote_node_guid); + CL_ASSERT(p_remote_hca != (ftree_hca_t *)cl_qmap_end(&p_ftree->hca_tbl)); + + p_remote_hca_or_sw = (void *)p_remote_hca; + direction = FTREE_DIRECTION_DOWN; + + remote_base_lid = osm_physp_get_base_lid(p_remote_osm_port); + break; + + case IB_NODE_TYPE_SWITCH: + /* switch connected to another switch */ + + p_remote_sw = (ftree_sw_t *)cl_qmap_get(&p_ftree->sw_tbl,remote_node_guid); + CL_ASSERT(p_remote_sw != (ftree_sw_t *)cl_qmap_end(&p_ftree->sw_tbl)); + p_remote_hca_or_sw = (void *)p_remote_sw; + + if (abs(p_sw->rank - p_remote_sw->rank) != 1) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_construct_sw_ports: ERR AB16: " + "Illegal link between switches with ranks %u and %u:\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, rank %u\n" + " GUID 0x%016" PRIx64 ", LID 0x%x, rank %u\n", + p_sw->rank, + p_remote_sw->rank, + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_sw->p_osm_sw))), + cl_ntoh16(p_sw->base_lid), + p_sw->rank, + cl_ntoh64(osm_node_get_node_guid(osm_switch_get_node_ptr(p_remote_sw->p_osm_sw))), + cl_ntoh16(p_remote_sw->base_lid), + p_remote_sw->rank); + res = -1; + goto Exit; + } + + if (p_sw->rank > p_remote_sw->rank) + direction = FTREE_DIRECTION_UP; + else + direction = FTREE_DIRECTION_DOWN; + + /* switch LID is only in port 0 port_info structure */ + remote_base_lid = osm_node_get_base_lid(p_remote_node, 0); + + break; + + default: + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_construct_sw_ports: ERR AB13: " + "Node GUID 0x%016" PRIx64 " - Unknown node type: %s\n", + cl_ntoh64(remote_node_guid), + ib_get_node_type_str(remote_node_type)); + res = -1; + goto Exit; + } + __osm_ftree_sw_add_port( + p_sw, /* local ftree_sw object */ + i, /* local port number */ + remote_port_num, /* remote port number */ + p_sw->base_lid, /* local lid */ + remote_base_lid, /* remote lid */ + osm_physp_get_port_guid(p_osm_port), /* local port guid */ + osm_physp_get_port_guid(p_remote_osm_port), /* remote port guid */ + remote_node_guid, /* remote node guid */ + remote_node_type, /* remote node type */ + p_remote_hca_or_sw, /* remote ftree_hca/sw object */ + direction); /* port direction (up or down) */ + + /* Track the max lid (in host order) that exists in the fabric */ + if (cl_ntoh16(remote_base_lid) > p_ftree->lft_max_lid_ho) + p_ftree->lft_max_lid_ho = cl_ntoh16(remote_base_lid); + } + + Exit: + return res; +} /* __osm_ftree_fabric_construct_sw_ports() */ + +/*************************************************** + ***************************************************/ + +/* ToDo: improve ranking algorithm complexity + by propogating BFS from more nodes */ +static int +__osm_ftree_fabric_perform_ranking( + IN ftree_fabric_t * p_ftree) +{ + ftree_hca_t * p_hca; + ftree_hca_t * p_next_hca; + int res = 0; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_fabric_perform_ranking); + + /* Mark REVERSED rank of all the switches in the subnet. + Start from switches that are connected to hca's, and + scan all the switches in the subnet. */ + p_next_hca = (ftree_hca_t *)cl_qmap_head(&p_ftree->hca_tbl); + while( p_next_hca != (ftree_hca_t *)cl_qmap_end( &p_ftree->hca_tbl ) ) + { + p_hca = p_next_hca; + p_next_hca = (ftree_hca_t *)cl_qmap_next(&p_hca->map_item ); + if (__osm_ftree_rank_switches_from_hca(p_ftree,p_hca) != 0) + { + res = -1; + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_perform_ranking: ERR AB14: " + "Subnet ranking failed - subnet is not FatTree"); + goto Exit; + } + } + + /* calculate and set FatTree rank */ + __osm_ftree_fabric_calculate_rank(p_ftree); + osm_log(&p_ftree->p_osm->log, OSM_LOG_INFO, + "__osm_ftree_fabric_perform_ranking: " + "FatTree rank is %u\n", __osm_ftree_fabric_get_rank(p_ftree)); + + /* fix ranking of the switches by reversing the ranking direction */ + cl_qmap_apply_func(&p_ftree->sw_tbl, __osm_ftree_sw_reverse_rank, (void *)p_ftree); + + if ( __osm_ftree_fabric_get_rank(p_ftree) > FAT_TREE_MAX_RANK || + __osm_ftree_fabric_get_rank(p_ftree) < FAT_TREE_MIN_RANK ) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_ERROR, + "__osm_ftree_fabric_perform_ranking: ERR AB15: " + "Tree rank is %u (should be between %u and %u)\n", + __osm_ftree_fabric_get_rank(p_ftree), + FAT_TREE_MIN_RANK, + FAT_TREE_MAX_RANK); + res = -1; + goto Exit; + } + + Exit: + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return res; +} /* __osm_ftree_fabric_perform_ranking() */ + +/*************************************************** + ***************************************************/ + +static int +__osm_ftree_fabric_populate_ports( + IN ftree_fabric_t * p_ftree) +{ + ftree_hca_t * p_hca; + ftree_hca_t * p_next_hca; + ftree_sw_t * p_sw; + ftree_sw_t * p_next_sw; + int res = 0; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_fabric_populate_ports); + + p_next_hca = (ftree_hca_t *)cl_qmap_head(&p_ftree->hca_tbl); + while( p_next_hca != (ftree_hca_t *)cl_qmap_end( &p_ftree->hca_tbl ) ) + { + p_hca = p_next_hca; + p_next_hca = (ftree_hca_t *)cl_qmap_next(&p_hca->map_item ); + if (__osm_ftree_fabric_construct_hca_ports(p_ftree,p_hca) != 0) + { + res = -1; + goto Exit; + } + } + + p_next_sw = (ftree_sw_t *)cl_qmap_head(&p_ftree->sw_tbl); + while( p_next_sw != (ftree_sw_t *)cl_qmap_end( &p_ftree->sw_tbl ) ) + { + p_sw = p_next_sw; + p_next_sw = (ftree_sw_t *)cl_qmap_next(&p_sw->map_item ); + if (__osm_ftree_fabric_construct_sw_ports(p_ftree,p_sw) != 0) + { + res = -1; + goto Exit; + } + } + Exit: + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return res; +} /* __osm_ftree_fabric_populate_ports() */ + +/*************************************************** + ***************************************************/ + +static int +__osm_ftree_construct_fabric( + IN void * context) +{ + ftree_fabric_t * p_ftree = context; + int status = 0; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_construct_fabric); + + if (p_ftree->p_osm->subn.opt.lmc > 0) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "LMC > 0 is not supported by fat-tree routing.\n" + "Falling back to default routing.\n"); + status = -1; + goto Exit; + } + + if ( cl_qmap_count(&p_ftree->p_osm->subn.sw_guid_tbl) < 2 ) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fabric has %u switches - topology is not fat-tree.\n" + "Falling back to default routing.\n", + cl_qmap_count(&p_ftree->p_osm->subn.sw_guid_tbl)); + status = -1; + goto Exit; + } + + if ( (cl_qmap_count(&p_ftree->p_osm->subn.node_guid_tbl) - + cl_qmap_count(&p_ftree->p_osm->subn.sw_guid_tbl)) < 2) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fabric has %u nodes (%u switches) - topology is not fat-tree.\n" + "Falling back to default routing.\n", + cl_qmap_count(&p_ftree->p_osm->subn.node_guid_tbl), + cl_qmap_count(&p_ftree->p_osm->subn.sw_guid_tbl)); + status = -1; + goto Exit; + } + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE,"__osm_ftree_construct_fabric: \n" + " |----------------------------------------|\n" + " |- Starting FatTree fabric construction -|\n" + " |----------------------------------------|\n\n"); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_construct_fabric: " + "Populating FatTree switch table\n"); + /* ToDo: now that the pointer from node to switch exists, + no need to fill the switch table in a separate loop */ + if (__osm_ftree_fabric_populate_switches(p_ftree) != 0) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fabric topology is not fat-tree - " + "falling back to default routing\n"); + status = -1; + goto Exit; + } + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_construct_fabric: " + "Populating FatTree HCA table\n"); + if (__osm_ftree_fabric_populate_hcas(p_ftree) != 0) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fabric topology is not fat-tree - " + "falling back to default routing\n"); + status = -1; + goto Exit; + } + + if (cl_qmap_count(&p_ftree->hca_tbl) < 2) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fabric has %u HCAa - topology is not fat-tree.\n" + "Falling back to default routing.\n", + cl_qmap_count(&p_ftree->hca_tbl)); + status = -1; + goto Exit; + } + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_construct_fabric: Ranking FatTree\n"); + + if (__osm_ftree_fabric_perform_ranking(p_ftree) != 0) + { + if (__osm_ftree_fabric_get_rank(p_ftree) > FAT_TREE_MAX_RANK) + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fabric rank is %u (>%u) - " + "fat-tree routing falls back to default routing\n", + __osm_ftree_fabric_get_rank(p_ftree), FAT_TREE_MAX_RANK); + else if (__osm_ftree_fabric_get_rank(p_ftree) < FAT_TREE_MIN_RANK) + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fabric rank is %u (<%u) - " + "fat-tree routing falls back to default routing\n", + __osm_ftree_fabric_get_rank(p_ftree), FAT_TREE_MIN_RANK); + status = -1; + goto Exit; + } + + /* For each hca and switch, construct array of ports. + This is done after the whole FatTree data structure is ready, because + we want the ports to have pointers to ftree_{sw,hca}_t objects.*/ + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_construct_fabric: " + "Populating HCA & switch ports\n"); + if (__osm_ftree_fabric_populate_ports(p_ftree) != 0) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fabric topology is not a fat-tree - " + "routing falls back to default routing\n"); + status = -1; + goto Exit; + } + + /* Assign index to all the switches and hca's in the fabric. + This function also sorts all the port arrays of the switches + by the remote switch index, creates a leaf switch array + sorted by the switch index, and tracks the maximal number of + hcas per leaf switch. */ + __osm_ftree_fabric_make_indexing(p_ftree); + + /* print general info about fabric topology */ + __osm_ftree_fabric_dump_general_info(p_ftree); + + /* dump full tree topology */ + if (osm_log_is_active(&p_ftree->p_osm->log, OSM_LOG_DEBUG)) + __osm_ftree_fabric_dump(p_ftree); + + if (! __osm_ftree_fabric_validate_topology(p_ftree)) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_SYS, + "Fabric topology is not a fat-tree - " + "routing falls back to default routing\n"); + status = -1; + goto Exit; + } + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_construct_fabric: " + "Max LID in switch LFTs (in host order): 0x%x\n", + p_ftree->lft_max_lid_ho); + + Exit: + if (status != 0) + { + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_construct_fabric: " + "Clearing FatTree Fabric data structures\n"); + __osm_ftree_fabric_clear(p_ftree); + } + else + p_ftree->fabric_built = TRUE; + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE, + "__osm_ftree_construct_fabric: \n" + " |--------------------------------------------------|\n" + " |- Done constructing FatTree fabric (status = %d) -|\n" + " |--------------------------------------------------|\n\n", + status); + + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return status; +} /* __osm_ftree_construct_fabric() */ + +/*************************************************** + ***************************************************/ + +static int +__osm_ftree_do_routing( + IN void * context) +{ + ftree_fabric_t * p_ftree = context; + + OSM_LOG_ENTER(&p_ftree->p_osm->log, __osm_ftree_do_routing); + + if (!p_ftree->fabric_built) + goto Exit; + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE,"__osm_ftree_do_routing: " + "Starting FatTree routing\n"); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE,"__osm_ftree_do_routing: " + "Filling switch forwarding tables for routes to HCAs\n"); + __osm_ftree_fabric_route_to_hcas(p_ftree); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE,"__osm_ftree_do_routing: " + "Filling switch forwarding tables for switch-to-switch pathes\n"); + __osm_ftree_fabric_route_to_switches(p_ftree); + + /* for each switch, set its fwd table */ + cl_qmap_apply_func(&p_ftree->sw_tbl, __osm_ftree_set_sw_fwd_table, (void *)p_ftree); + + /* write out hca ordering file */ + __osm_ftree_fabric_dump_hca_ordering(p_ftree); + + osm_log(&p_ftree->p_osm->log, OSM_LOG_VERBOSE,"__osm_ftree_do_routing: " + "FatTree routing is done\n"); + + Exit: + OSM_LOG_EXIT(&p_ftree->p_osm->log); + return 0; +} + +/*************************************************** + ***************************************************/ + +static void +__osm_ftree_delete( + IN void * context) +{ + if (!context) + return; + __osm_ftree_fabric_destroy((ftree_fabric_t *)context); +} + +/*************************************************** + ***************************************************/ + +int osm_ucast_ftree_setup(osm_opensm_t * p_osm) +{ + ftree_fabric_t * p_ftree = __osm_ftree_fabric_create(); + if (!p_ftree) + return -1; + + p_ftree->p_osm = p_osm; + + p_osm->routing_engine.context = (void *)p_ftree; + p_osm->routing_engine.build_lid_matrices = __osm_ftree_construct_fabric; + p_osm->routing_engine.ucast_build_fwd_tables = __osm_ftree_do_routing; + p_osm->routing_engine.delete = __osm_ftree_delete; + return 0; +} + +/*************************************************** + ***************************************************/ + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_mgr.c b/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_mgr.c new file mode 100644 index 00000000..27c87680 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_mgr.c @@ -0,0 +1,1277 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of osm_ucast_mgr_t. + * This file implements the Unicast Manager object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.14 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#define LINE_LENGTH 256 + +/********************************************************************** + **********************************************************************/ +/* + * This flag is used for stopping the relaxation algorithm if no + * change detected during the fabric scan + */ +static boolean_t __some_hop_count_set; + +/********************************************************************** + **********************************************************************/ +void +osm_ucast_mgr_construct( + IN osm_ucast_mgr_t* const p_mgr ) +{ + memset( p_mgr, 0, sizeof(*p_mgr) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_ucast_mgr_destroy( + IN osm_ucast_mgr_t* const p_mgr ) +{ + CL_ASSERT( p_mgr ); + + OSM_LOG_ENTER( p_mgr->p_log, osm_ucast_mgr_destroy ); + + if (p_mgr->lft_buf) + free(p_mgr->lft_buf); + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_ucast_mgr_init( + IN osm_ucast_mgr_t* const p_mgr, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_ucast_mgr_init ); + + CL_ASSERT( p_req ); + CL_ASSERT( p_subn ); + CL_ASSERT( p_lock ); + + osm_ucast_mgr_construct( p_mgr ); + + p_mgr->p_log = p_log; + p_mgr->p_subn = p_subn; + p_mgr->p_lock = p_lock; + p_mgr->p_req = p_req; + + p_mgr->lft_buf = malloc(IB_LID_UCAST_END_HO + 1); + if (!p_mgr->lft_buf) + return IB_INSUFFICIENT_MEMORY; + + OSM_LOG_EXIT( p_mgr->p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +struct ucast_mgr_dump_context { + osm_ucast_mgr_t *p_mgr; + FILE *file; +}; + +static void +ucast_mgr_dump(osm_ucast_mgr_t *p_mgr, FILE *file, + void (*func)(cl_map_item_t *, void *)) +{ + struct ucast_mgr_dump_context dump_context; + + dump_context.p_mgr = p_mgr; + dump_context.file = file; + + cl_qmap_apply_func(&p_mgr->p_subn->sw_guid_tbl, func, &dump_context); +} + +void +ucast_mgr_dump_to_file(osm_ucast_mgr_t *p_mgr, const char *file_name, + void (*func)(cl_map_item_t *, void *)) +{ + char path[1024]; + FILE *file; + + snprintf(path, sizeof(path), "%s/%s", + p_mgr->p_subn->opt.dump_files_dir, file_name); + + file = fopen(path, "w"); + if (!file) { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "ucast_mgr_dump_to_file: ERR 3A12: " + "Failed to open fdb file (%s)\n", path ); + return; + } + + ucast_mgr_dump(p_mgr, file, func); + + fclose(file); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_ucast_mgr_dump_path_distribution( + IN cl_map_item_t *p_map_item, + IN void *cxt) +{ + osm_node_t *p_node; + osm_node_t *p_remote_node; + uint8_t i; + uint8_t num_ports; + uint32_t num_paths; + ib_net64_t remote_guid_ho; + osm_switch_t* p_sw = (osm_switch_t *)p_map_item; + osm_ucast_mgr_t* p_mgr = ((struct ucast_mgr_dump_context *)cxt)->p_mgr; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_ucast_mgr_dump_path_distribution ); + + p_node = osm_switch_get_node_ptr( p_sw ); + num_ports = osm_switch_get_num_ports( p_sw ); + + osm_log_printf( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_dump_path_distribution: " + "Switch 0x%" PRIx64 "\n" + "Port : Path Count Through Port", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + + for( i = 0; i < num_ports; i++ ) + { + num_paths = osm_switch_path_count_get( p_sw , i ); + osm_log_printf( p_mgr->p_log, OSM_LOG_DEBUG,"\n %03u : %u", i, num_paths ); + if( i == 0 ) + { + osm_log_printf( p_mgr->p_log, OSM_LOG_DEBUG, " (switch management port)" ); + continue; + } + + p_remote_node = osm_node_get_remote_node( p_node, i, NULL ); + if( p_remote_node == NULL ) + continue; + + remote_guid_ho = cl_ntoh64( osm_node_get_node_guid( p_remote_node ) ); + + switch( osm_node_get_remote_type( p_node, i ) ) + { + case IB_NODE_TYPE_SWITCH: + osm_log_printf( p_mgr->p_log, OSM_LOG_DEBUG, " (link to switch" ); + break; + case IB_NODE_TYPE_ROUTER: + osm_log_printf( p_mgr->p_log, OSM_LOG_DEBUG, " (link to router" ); + break; + case IB_NODE_TYPE_CA: + osm_log_printf( p_mgr->p_log, OSM_LOG_DEBUG, " (link to CA" ); + break; + default: + osm_log_printf( p_mgr->p_log, OSM_LOG_DEBUG, " (link to unknown node type" ); + break; + } + + osm_log_printf( p_mgr->p_log, OSM_LOG_DEBUG, " 0x%" PRIx64 ")", + remote_guid_ho ); + } + + osm_log_printf( p_mgr->p_log, OSM_LOG_DEBUG, "\n" ); + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_ucast_mgr_dump_ucast_routes( + IN cl_map_item_t *p_map_item, + IN void *cxt ) +{ + const osm_node_t* p_node; + uint8_t port_num; + uint8_t num_hops; + uint8_t best_hops; + uint8_t best_port; + uint16_t max_lid_ho; + uint16_t lid_ho; + osm_switch_t* p_sw = (osm_switch_t *)p_map_item; + osm_ucast_mgr_t* p_mgr = ((struct ucast_mgr_dump_context *)cxt)->p_mgr; + FILE *file = ((struct ucast_mgr_dump_context *)cxt)->file; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_ucast_mgr_dump_ucast_routes ); + + p_node = osm_switch_get_node_ptr( p_sw ); + + max_lid_ho = osm_switch_get_max_lid_ho( p_sw ); + + fprintf( file, "__osm_ucast_mgr_dump_ucast_routes: " + "Switch 0x%016" PRIx64 "\n" + "LID : Port : Hops : Optimal\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + for( lid_ho = 1; lid_ho <= max_lid_ho; lid_ho++ ) + { + fprintf(file, "0x%04X : ", lid_ho); + + port_num = osm_switch_get_port_by_lid( p_sw, lid_ho ); + if( port_num == OSM_NO_PATH ) + { + /* + This may occur if there are 'holes' in the existing + LID assignments. Running SM with --reassign_lids + will reassign and compress the LID range. The + subnet should work fine either way. + */ + fprintf( file, "UNREACHABLE\n" ); + continue; + } + /* + Switches can lie about which port routes a given + lid due to a recent reconfiguration of the subnet. + Therefore, ensure that the hop count is better than + OSM_NO_PATH. + */ + num_hops = osm_switch_get_hop_count( p_sw, lid_ho, port_num ); + if( num_hops == OSM_NO_PATH ) + { + fprintf( file, "UNREACHABLE\n" ); + continue; + } + + best_hops = osm_switch_get_least_hops( p_sw, lid_ho ); + fprintf( file, "%03u : %02u : ", port_num, num_hops ); + + if( best_hops == num_hops ) + fprintf( file, "yes" ); + else + { + best_port = osm_switch_recommend_path( + p_sw, lid_ho, TRUE, + NULL, NULL, NULL, NULL ); /* No LMC Optimization */ + fprintf( file, "No %u hop path possible via port %u!", + best_hops, best_port ); + } + + fprintf( file, "\n" ); + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +ucast_mgr_dump_lid_matrix(cl_map_item_t *p_map_item, void *cxt) +{ + osm_switch_t* p_sw = (osm_switch_t *)p_map_item; + osm_ucast_mgr_t* p_mgr = ((struct ucast_mgr_dump_context *)cxt)->p_mgr; + FILE *file = ((struct ucast_mgr_dump_context *)cxt)->file; + osm_node_t *p_node = osm_switch_get_node_ptr(p_sw); + unsigned max_lid = osm_switch_get_max_lid_ho(p_sw); + unsigned max_port = osm_switch_get_num_ports(p_sw); + uint16_t lid; + uint8_t port; + + fprintf(file, "Switch: guid 0x%016" PRIx64 "\n", + cl_ntoh64(osm_node_get_node_guid(p_node))); + for (lid = 1; lid <= max_lid; lid++) { + osm_port_t *p_port; + + fprintf(file, "0x%04x:", lid); + for (port = 0 ; port < max_port ; port++) + fprintf(file, " %02x", + osm_switch_get_hop_count(p_sw, lid, port)); + p_port = cl_ptr_vector_get(&p_mgr->p_subn->port_lid_tbl, lid); + if (p_port) + fprintf(file, " # portguid 0x%" PRIx64, + cl_ntoh64(osm_port_get_guid(p_port))); + fprintf(file, "\n"); + } +} + +/********************************************************************** + **********************************************************************/ +void +ucast_mgr_dump_lfts(cl_map_item_t *p_map_item, void *cxt) +{ + osm_switch_t* p_sw = (osm_switch_t *)p_map_item; + osm_ucast_mgr_t* p_mgr = ((struct ucast_mgr_dump_context *)cxt)->p_mgr; + FILE *file = ((struct ucast_mgr_dump_context *)cxt)->file; + osm_node_t *p_node = osm_switch_get_node_ptr(p_sw); + unsigned max_lid = osm_switch_get_max_lid_ho(p_sw); + unsigned max_port = osm_switch_get_num_ports(p_sw); + uint16_t lid; + uint8_t port; + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + + memcpy(desc, p_node->node_desc.description, IB_NODE_DESCRIPTION_SIZE); + desc[IB_NODE_DESCRIPTION_SIZE] = '\0'; + fprintf(file, "Unicast lids [0x0-0x%x] of switch Lid %u guid 0x%016" + PRIx64 " (\'%s\'):\n", + max_lid, osm_node_get_base_lid(p_node, 0), + cl_ntoh64(osm_node_get_node_guid(p_node)), desc); + for (lid = 0; lid <= max_lid; lid++) { + osm_port_t *p_port; + port = osm_switch_get_port_by_lid(p_sw, lid); + + if (port >= max_port) + continue; + + fprintf(file, "0x%04x %03u # ", lid, port); + + p_port = cl_ptr_vector_get(&p_mgr->p_subn->port_lid_tbl, lid); + if (p_port) { + p_node = osm_port_get_parent_node(p_port); + memcpy(desc, p_node->node_desc.description, + IB_NODE_DESCRIPTION_SIZE); + desc[IB_NODE_DESCRIPTION_SIZE] = '\0'; + fprintf(file, "%s portguid 0x016%" PRIx64 ": \'%s\'", + ib_get_node_type_str(osm_node_get_type(p_node)), + cl_ntoh64(osm_port_get_guid(p_port)), desc); + } + else + fprintf(file, "unknown node and type"); + fprintf(file, "\n"); + } + fprintf(file, "%u lids dumped\n", max_lid); +} + +/********************************************************************** + **********************************************************************/ +static void __osm_ucast_mgr_dump_tables(osm_ucast_mgr_t *p_mgr) +{ + ucast_mgr_dump_to_file(p_mgr, "opensm-lid-matrix.dump", + ucast_mgr_dump_lid_matrix); + ucast_mgr_dump_to_file(p_mgr, "opensm-lfts.dump", ucast_mgr_dump_lfts); + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + ucast_mgr_dump(p_mgr, NULL, __osm_ucast_mgr_dump_path_distribution); + ucast_mgr_dump_to_file(p_mgr, "osm.fdbs", __osm_ucast_mgr_dump_ucast_routes); +} + +/********************************************************************** + Starting a rebuild, so notify the switch so it can clear tables, etc... +**********************************************************************/ +static void +__osm_ucast_mgr_clean_switch( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + osm_switch_prepare_path_rebuild((osm_switch_t *)p_map_item); +} + +/********************************************************************** + Add each switch's own LID(s) to its LID matrix. +**********************************************************************/ +static void +__osm_ucast_mgr_process_hop_0( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + osm_switch_t* const p_sw = (osm_switch_t*)p_map_item; + osm_ucast_mgr_t* const p_mgr = (osm_ucast_mgr_t*)context; + osm_node_t *p_node; + uint16_t lid_ho, base_lid_ho, max_lid_ho; + cl_status_t status; + uint8_t lmc; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_ucast_mgr_process_hop_0 ); + + p_node = p_sw->p_node; + + CL_ASSERT( p_node ); + CL_ASSERT( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ); + + base_lid_ho = cl_ntoh16( osm_node_get_base_lid( p_node, 0 ) ); + if (osm_switch_sp0_is_lmc_capable( p_sw, p_mgr->p_subn )) + lmc = osm_node_get_lmc( p_node, 0 ); + else + lmc = 0; + max_lid_ho = (uint16_t)( base_lid_ho + (1 << lmc) - 1 ); + + for (lid_ho = base_lid_ho; lid_ho <= max_lid_ho; lid_ho++) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_hop_0: " + "Processing switch GUID 0x%" PRIx64 ", LID 0x%X\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + lid_ho ); + } + + status = osm_switch_set_hops( p_sw, lid_ho, 0, 0 ); + if( status != CL_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_ucast_mgr_process_hop_0: ERR 3A02: " + "Setting hop count failed (%s) for " + "switch GUID 0x%" PRIx64 ", LID 0x%X\n", + CL_STATUS_MSG( status ), + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + lid_ho ); + } + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_ucast_mgr_process_neighbor( + IN osm_ucast_mgr_t* const p_mgr, + IN osm_switch_t* const p_sw, + IN osm_switch_t* const p_remote_sw, + IN const uint8_t port_num, + IN const uint8_t remote_port_num ) +{ + uint16_t lid_ho; + uint16_t max_lid_ho; + osm_node_t* p_node; + const osm_node_t* p_remote_node; + uint8_t hops; + cl_status_t status; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_ucast_mgr_process_neighbor ); + + CL_ASSERT( p_sw ); + CL_ASSERT( p_remote_sw ); + CL_ASSERT( port_num ); + CL_ASSERT( remote_port_num ); + + p_node = osm_switch_get_node_ptr( p_sw ); + p_remote_node = osm_switch_get_node_ptr( p_remote_sw ); + + CL_ASSERT( p_node ); + CL_ASSERT( p_remote_node ); + + CL_ASSERT( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ); + CL_ASSERT( osm_node_get_type( p_remote_node ) == IB_NODE_TYPE_SWITCH ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_neighbor: " + "Node 0x%" PRIx64 ", remote node 0x%" PRIx64 "\n" + "\t\t\t\tport 0x%X, remote port 0x%X\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ), + cl_ntoh64( osm_node_get_node_guid( p_remote_node ) ), + port_num, remote_port_num ); + } + + /* + Iterate through all the LIDs in the neighbor switch. + */ + max_lid_ho = osm_switch_get_max_lid_ho( p_remote_sw ); + + /* + Make sure the local lid matrix has enough room to hold + all the LID info coming from the remote LID matrix. + */ + osm_switch_set_min_lid_size( p_sw, max_lid_ho ); + + hops = OSM_NO_PATH; + for( lid_ho = 1; lid_ho <= max_lid_ho; lid_ho++ ) + { + /* + Find the lowest hop count value to this LID. + */ + hops = osm_switch_get_least_hops( p_remote_sw, lid_ho ); + + if( hops != OSM_NO_PATH ) + { + /* + Increment hop count of the neighbor by 1, since it + takes 1 hop to get to the neighbor. + */ + hops++; + + CL_ASSERT( hops <= osm_switch_get_hop_count( p_sw, lid_ho, + port_num ) ); + if( osm_switch_get_hop_count( p_sw, lid_ho, + port_num ) > hops ) + { + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_neighbor: " + "New best path is %u hops for LID 0x%X\n", + hops, lid_ho ); + } + + /* mark the fact we have got to change anything */ + __some_hop_count_set = TRUE; + + status = osm_switch_set_hops( p_sw, lid_ho, + port_num, hops ); + if( status != CL_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_ucast_mgr_process_neighbor: ERR 3A03: " + "Setting hop count failed (%s)\n", + CL_STATUS_MSG( status ) ); + } + } + } + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_ucast_mgr_process_leaf( + IN osm_ucast_mgr_t* const p_mgr, + IN osm_switch_t* const p_sw, + IN osm_node_t* const p_node, + IN const uint8_t port_num, + IN osm_node_t* const p_remote_node, + IN const uint8_t remote_port_num ) +{ + uint16_t i; + uint16_t base_lid_ho; + uint16_t max_lid_ho; + uint8_t lmc; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_ucast_mgr_process_leaf ); + + CL_ASSERT( p_node ); + CL_ASSERT( p_remote_node ); + CL_ASSERT( port_num ); + CL_ASSERT( remote_port_num ); + + switch( osm_node_get_type( p_remote_node ) ) + { + case IB_NODE_TYPE_CA: + case IB_NODE_TYPE_ROUTER: + base_lid_ho = cl_ntoh16( osm_node_get_base_lid( + p_remote_node, remote_port_num ) ); + lmc = osm_node_get_lmc( p_remote_node, remote_port_num ); + break; +#if 0 + case IB_NODE_TYPE_SWITCH: + base_lid_ho = cl_ntoh16( osm_node_get_base_lid( + p_remote_node, 0 ) ); + lmc = 0; + break; +#endif + + default: + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_ucast_mgr_process_leaf: ERR 3A01: " + "Bad node type %u, GUID 0x%" PRIx64 "\n", + osm_node_get_type( p_remote_node ), + cl_ntoh64( osm_node_get_node_guid( p_node ) )); + goto Exit; + } + + max_lid_ho = (uint16_t)(base_lid_ho + (1 << lmc) - 1 ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_leaf: " + "Discovered LIDs [0x%X,0x%X]\n" + "\t\t\t\tport number 0x%X, node 0x%" PRIx64 "\n", + base_lid_ho, max_lid_ho, + port_num, cl_ntoh64( osm_node_get_node_guid( p_node ) )); + } + + for( i = base_lid_ho; i <= max_lid_ho; i++ ) + osm_switch_set_hops( p_sw, i, port_num, 1 ); + + Exit: + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_ucast_mgr_process_leaves( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + osm_switch_t* const p_sw = (osm_switch_t*)p_map_item; + osm_ucast_mgr_t* const p_mgr = (osm_ucast_mgr_t*)context; + osm_node_t *p_node; + osm_node_t *p_remote_node; + uint32_t port_num; + uint8_t remote_port_num; + uint32_t num_ports; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_ucast_mgr_process_leaves ); + + p_node = p_sw->p_node; + + CL_ASSERT( p_node ); + CL_ASSERT( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_leaves: " + "Processing switch 0x%" PRIx64 "\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) )); + } + + /* + Add the LIDs of all leaves of this switch to the LID matrix. + Don't bother processing loopback paths from one port of + this switch to the another port. + Don't process neighbor switches yet. + Start with port 1 to skip the switch's management port. + */ + num_ports = osm_node_get_num_physp( p_node ); + + for( port_num = 1; port_num < num_ports; port_num++ ) + { + p_remote_node = osm_node_get_remote_node( p_node, + (uint8_t)port_num, &remote_port_num ); + + if( p_remote_node && (p_remote_node != p_node ) + && (osm_node_get_type( p_remote_node ) != IB_NODE_TYPE_SWITCH ) ) + { + __osm_ucast_mgr_process_leaf( + p_mgr, + p_sw, + p_node, + (uint8_t)port_num, + p_remote_node, + remote_port_num ); + } + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_ucast_mgr_process_port( + IN osm_ucast_mgr_t* const p_mgr, + IN osm_switch_t* const p_sw, + IN const osm_port_t* const p_port ) +{ + uint16_t min_lid_ho; + uint16_t max_lid_ho; + uint16_t lid_ho; + uint8_t port; + boolean_t is_ignored_by_port_prof; + ib_net64_t node_guid; + /* + The following are temporary structures that will aid + in providing better routing in LMC > 0 situations + */ + uint16_t lids_per_port = 1 << p_mgr->p_subn->opt.lmc; + uint64_t *remote_sys_guids = NULL; + uint64_t *remote_node_guids = NULL; + uint16_t num_used_sys = 0; + uint16_t num_used_nodes = 0; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_ucast_mgr_process_port ); + + remote_sys_guids = malloc( sizeof(uint64_t) * lids_per_port ); + if( remote_sys_guids == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_ucast_mgr_process_port: ERR 3A09: " + "Cannot allocate array. Insufficient memory\n"); + goto Exit; + } + + memset( remote_sys_guids, 0, sizeof(uint64_t) * lids_per_port ); + + remote_node_guids = malloc( sizeof(uint64_t) * lids_per_port ); + if( remote_node_guids == NULL ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_ucast_mgr_process_port: ERR 3A0A: " + "Cannot allocate array. Insufficient memory\n"); + goto Exit; + } + + memset( remote_node_guids, 0, sizeof(uint64_t) * lids_per_port ); + + osm_port_get_lid_range_ho( p_port, &min_lid_ho, &max_lid_ho ); + + /* If the lids are zero - then there was some problem with the initialization. + Don't handle this port. */ + if ( min_lid_ho == 0 || max_lid_ho == 0 ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_ucast_mgr_process_port: ERR 3A04: " + "Port 0x%" PRIx64 " has LID 0. An initialization " + "error occurred. Ignoring port\n", + cl_ntoh64( osm_port_get_guid( p_port ) ) ); + goto Exit; + } + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_port: " + "Processing port 0x%" PRIx64 + ", LIDs [0x%X,0x%X]\n", + cl_ntoh64( osm_port_get_guid( p_port ) ), + min_lid_ho, max_lid_ho ); + } + + /* + TO DO - This should be runtime error, not a CL_ASSERT() + */ + CL_ASSERT( max_lid_ho < osm_switch_get_fwd_tbl_size( p_sw ) ); + + node_guid = osm_node_get_node_guid(osm_switch_get_node_ptr( p_sw ) ); + + /* + The lid matrix contains the number of hops to each + lid from each port. From this information we determine + how best to distribute the LID range across the ports + that can reach those LIDs. + */ + for( lid_ho = min_lid_ho; lid_ho <= max_lid_ho; lid_ho++ ) + { + /* Use the enhanced algorithm only for LMC > 0 */ + if (lids_per_port > 1) + port = osm_switch_recommend_path( p_sw, lid_ho, + p_mgr->p_subn->ignore_existing_lfts, + remote_sys_guids, &num_used_sys, + remote_node_guids, &num_used_nodes ); + else + port = osm_switch_recommend_path( p_sw, lid_ho, + p_mgr->p_subn->ignore_existing_lfts, + NULL, NULL, NULL, NULL ); + + /* + There might be no path to the target + */ + if (port == OSM_NO_PATH) + { + /* do not try to overwrite the ppro of non existing port ... */ + is_ignored_by_port_prof = TRUE; + + /* Up/Down routing can cause unreachable routes between some + switches so we do not report that as an error in that case */ + if (!p_mgr->p_subn->p_osm->routing_engine.build_lid_matrices) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "__osm_ucast_mgr_process_port: ERR 3A08: " + "No path to get to LID 0x%X from switch 0x%" PRIx64 "\n", + lid_ho, cl_ntoh64( node_guid ) ); + /* trigger a new sweep - try again ... */ + p_mgr->p_subn->subnet_initialization_error = TRUE; + } + else + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_port: " + "No path to get to LID 0x%X from switch 0x%" PRIx64 "\n", + lid_ho, cl_ntoh64( node_guid ) ); + } + else + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_port: " + "Routing LID 0x%X to port 0x%X" + "\n\t\t\t\tFor switch 0x%" PRIx64 "\n", + lid_ho, port, cl_ntoh64( node_guid ) ); + + /* + we would like to optionally ignore this port in equalization + like in the case of the Mellanox Anafa Internal PCI TCA port + */ + is_ignored_by_port_prof = + osm_port_prof_is_ignored_port(p_mgr->p_subn, cl_ntoh64(node_guid), port); + + /* + We also would ignore this route if the target lid is of a switch + and the port_profile_switch_node is not TRUE + */ + if (! p_mgr->p_subn->opt.port_profile_switch_nodes) + { + is_ignored_by_port_prof |= + (osm_node_get_type(osm_port_get_parent_node(p_port)) == + IB_NODE_TYPE_SWITCH); + } + } + + /* + We have selected the port for this LID. + Write it to the forwarding tables. + */ + p_mgr->lft_buf[lid_ho] = port; + if (!is_ignored_by_port_prof) + osm_switch_count_path(p_sw, port); + } + + Exit: + if (remote_sys_guids) + free(remote_sys_guids); + if (remote_node_guids) + free(remote_node_guids); + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_ucast_mgr_set_fwd_table( + IN osm_ucast_mgr_t* const p_mgr, + IN osm_switch_t* const p_sw ) +{ + osm_node_t *p_node; + osm_dr_path_t *p_path; + osm_madw_context_t context; + ib_api_status_t status; + ib_switch_info_t si; + uint32_t block_id_ho = 0; + uint8_t block[IB_SMP_DATA_SIZE]; + boolean_t set_swinfo_require = FALSE; + uint16_t lin_top; + uint8_t life_state; + + CL_ASSERT( p_mgr ); + + OSM_LOG_ENTER( p_mgr->p_log, osm_ucast_mgr_set_fwd_table ); + + CL_ASSERT( p_sw ); + + p_node = osm_switch_get_node_ptr( p_sw ); + + CL_ASSERT( p_node ); + + p_path = osm_node_get_any_dr_path_ptr( p_node ); + + CL_ASSERT( p_path ); + + /* + Set the top of the unicast forwarding table. + */ + si = *osm_switch_get_si_ptr( p_sw ); + lin_top = cl_hton16( osm_switch_get_max_lid_ho( p_sw ) ); + if (lin_top != si.lin_top) + { + set_swinfo_require = TRUE; + si.lin_top = lin_top; + } + + /* check to see if the change state bit is on. If it is - then we + need to clear it. */ + if ( ib_switch_info_get_state_change( &si ) ) + life_state = ( (p_mgr->p_subn->opt.packet_life_time <<3 ) + | ( si.life_state & IB_SWITCH_PSC ) ) & 0xfc; + else + life_state = (p_mgr->p_subn->opt.packet_life_time <<3 ) & 0xf8; + + if ( (life_state != si.life_state) || ib_switch_info_get_state_change( &si ) ) + { + set_swinfo_require = TRUE; + si.life_state = life_state; + } + + if ( set_swinfo_require ) + { + if ( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_ucast_mgr_set_fwd_table: " + "Setting switch FT top to LID 0x%X\n", + osm_switch_get_max_lid_ho( p_sw ) ); + } + + context.si_context.light_sweep = FALSE; + context.si_context.node_guid = osm_node_get_node_guid( p_node ); + context.si_context.set_method = TRUE; + + status = osm_req_set( p_mgr->p_req, + p_path, + (uint8_t*)&si, + sizeof(si), + IB_MAD_ATTR_SWITCH_INFO, + 0, + CL_DISP_MSGID_NONE, + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_ucast_mgr_set_fwd_table: ERR 3A06: " + "Sending SwitchInfo attribute failed (%s)\n", + ib_get_err_str( status ) ); + } + else + p_mgr->any_change = TRUE; + } + + /* + Send linear forwarding table blocks to the switch + as long as the switch indicates it has blocks needing + configuration. + */ + + context.lft_context.node_guid = osm_node_get_node_guid( p_node ); + context.lft_context.set_method = TRUE; + + for (block_id_ho = 0; + osm_switch_get_fwd_tbl_block( p_sw, block_id_ho, block ) ; + block_id_ho++ ) + { + if (!memcmp(block, p_mgr->lft_buf + block_id_ho * 64, 64)) + continue; + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_ucast_mgr_set_fwd_table: " + "Writing FT block %u\n", block_id_ho ); + } + + status = osm_req_set( p_mgr->p_req, + p_path, + p_mgr->lft_buf + block_id_ho * 64, + sizeof(block), + IB_MAD_ATTR_LIN_FWD_TBL, + cl_hton32( block_id_ho ), + CL_DISP_MSGID_NONE, + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( p_mgr->p_log, OSM_LOG_ERROR, + "osm_ucast_mgr_set_fwd_table: ERR 3A05: " + "Sending linear fwd. tbl. block failed (%s)\n", + ib_get_err_str( status ) ); + } + else + { + p_mgr->any_change = TRUE; + /* + HACK: for now we will assume we succeeded to send + and set the local DB based on it. This should allow + us to immediatly dump out our routing. + */ + osm_switch_set_ft_block( + p_sw, p_mgr->lft_buf + block_id_ho * 64, block_id_ho ); + } + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_ucast_mgr_process_tbl( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + osm_switch_t* const p_sw = (osm_switch_t*)p_map_item; + osm_ucast_mgr_t* const p_mgr = (osm_ucast_mgr_t*)context; + osm_node_t *p_node; + const osm_port_t *p_port; + const cl_qmap_t* p_port_tbl; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_ucast_mgr_process_tbl ); + + p_node = p_sw->p_node; + + CL_ASSERT( p_node ); + CL_ASSERT( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_tbl: " + "Processing switch 0x%" PRIx64 "\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) )); + } + + /* Initialize LIDs in buffer to invalid port number. */ + memset(p_mgr->lft_buf, 0xff, IB_LID_UCAST_END_HO + 1); + + p_port_tbl = &p_mgr->p_subn->port_guid_tbl; + + /* + Iterate through every port setting LID routes for each + port based on base LID and LMC value. + */ + + for( p_port = (osm_port_t*)cl_qmap_head( p_port_tbl ); + p_port != (osm_port_t*)cl_qmap_end( p_port_tbl ); + p_port = (osm_port_t*)cl_qmap_next( &p_port->map_item ) ) + { + __osm_ucast_mgr_process_port( p_mgr, p_sw, p_port ); + } + + osm_ucast_mgr_set_fwd_table( p_mgr, p_sw ); + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_ucast_mgr_process_neighbors( + IN cl_map_item_t* const p_map_item, + IN void* context ) +{ + osm_switch_t* const p_sw = (osm_switch_t*)p_map_item; + osm_ucast_mgr_t* const p_mgr = (osm_ucast_mgr_t*)context; + osm_node_t *p_node; + osm_node_t *p_remote_node; + uint32_t port_num; + uint8_t remote_port_num; + uint32_t num_ports; + osm_physp_t* p_physp; + + OSM_LOG_ENTER( p_mgr->p_log, __osm_ucast_mgr_process_neighbors ); + + p_node = p_sw->p_node; + + CL_ASSERT( p_node ); + CL_ASSERT( osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH ); + + if( osm_log_is_active( p_mgr->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "__osm_ucast_mgr_process_neighbors: " + "Processing switch with GUID 0x%" PRIx64 "\n", + cl_ntoh64( osm_node_get_node_guid( p_node ) ) ); + } + + num_ports = osm_node_get_num_physp( p_node ); + + /* + Start with port 1 to skip the switch's management port. + */ + for( port_num = 1; port_num < num_ports; port_num++ ) + { + p_remote_node = osm_node_get_remote_node( p_node, + (uint8_t)port_num, &remote_port_num ); + + if( p_remote_node && (p_remote_node != p_node ) + && p_remote_node->sw ) + { + /* make sure the link is healthy. If it is not - don't + propagate through it. */ + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + if (!osm_link_is_healthy( p_physp ) ) + continue; + + __osm_ucast_mgr_process_neighbor(p_mgr, p_sw, p_remote_node->sw, + (uint8_t)port_num, remote_port_num ); + + } + } + + OSM_LOG_EXIT( p_mgr->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_ucast_mgr_build_lid_matrices( + IN osm_ucast_mgr_t* const p_mgr ) +{ + uint32_t i; + uint32_t iteration_max; + cl_qmap_t *p_sw_guid_tbl; + + p_sw_guid_tbl = &p_mgr->p_subn->sw_guid_tbl; + + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "osm_ucast_mgr_build_lid_matrices: " + "Starting switches Min Hop Table Assignment\n" ); + + /* + Set the switch matrices for each switch's own port 0 LID(s) + then set the lid matrices for the each switch's leaf nodes. + */ + cl_qmap_apply_func( p_sw_guid_tbl, + __osm_ucast_mgr_process_hop_0, p_mgr ); + + cl_qmap_apply_func( p_sw_guid_tbl, + __osm_ucast_mgr_process_leaves, p_mgr ); + + /* + Get the switch matrices for each switch's neighbors. + This process requires a number of iterations equal to + the number of switches in the subnet minus 1. + + In each iteration, a switch learns the lid/port/hop + information (as contained by a switch's lid matrix) from + its immediate neighbors. After each iteration, a switch + (and it's neighbors) know more routing information than + it did on the previous iteration. + Thus, by repeatedly absorbing the routing information of + neighbor switches, every switch eventually learns how to + route all LIDs on the subnet. + + Note that there may not be any switches in the subnet if + we are in simple p2p configuration. + */ + iteration_max = cl_qmap_count( p_sw_guid_tbl ); + + /* + If there are switches in the subnet, iterate until the lid + matrix has been constructed. Otherwise, just immediately + indicate we're done if no switches exist. + */ + if( iteration_max ) + { + iteration_max--; + + /* + we need to find out when the propagation of + hop counts has relaxed. So this global variable + is preset to 0 on each iteration and if + if non of the switches was set will exit the + while loop + */ + __some_hop_count_set = TRUE; + for( i = 0; (i < iteration_max) && __some_hop_count_set; i++ ) + { + __some_hop_count_set = FALSE; + cl_qmap_apply_func( p_sw_guid_tbl, + __osm_ucast_mgr_process_neighbors, p_mgr ); + } + osm_log( p_mgr->p_log, OSM_LOG_DEBUG, + "osm_ucast_mgr_build_lid_matrices: " + "Min-hop propagated in %d steps\n", i ); + } +} + +/********************************************************************** + **********************************************************************/ +osm_signal_t +osm_ucast_mgr_process( + IN osm_ucast_mgr_t* const p_mgr ) +{ + struct osm_routing_engine *p_routing_eng; + osm_signal_t signal = OSM_SIGNAL_DONE; + cl_qmap_t *p_sw_guid_tbl; + boolean_t default_routing = TRUE; + + OSM_LOG_ENTER( p_mgr->p_log, osm_ucast_mgr_process ); + + p_sw_guid_tbl = &p_mgr->p_subn->sw_guid_tbl; + p_routing_eng = &p_mgr->p_subn->p_osm->routing_engine; + + CL_PLOCK_EXCL_ACQUIRE( p_mgr->p_lock ); + + /* + If there are no switches in the subnet, we are done. + */ + if (cl_qmap_count( p_sw_guid_tbl ) == 0) + goto Exit; + + p_mgr->any_change = FALSE; + cl_qmap_apply_func(p_sw_guid_tbl, __osm_ucast_mgr_clean_switch, NULL); + + if (!p_routing_eng->build_lid_matrices || + p_routing_eng->build_lid_matrices(p_routing_eng->context) != 0) + osm_ucast_mgr_build_lid_matrices(p_mgr); + + osm_log( p_mgr->p_log, OSM_LOG_INFO, + "osm_ucast_mgr_process: " + "Min Hop Tables configured on all switches\n" ); + + /* + Now that the lid matrices have been built, we can + build and download the switch forwarding tables. + */ + + if ( p_routing_eng->ucast_build_fwd_tables && + (p_routing_eng->ucast_build_fwd_tables(p_routing_eng->context) == 0) ) + default_routing = FALSE; + else + cl_qmap_apply_func( p_sw_guid_tbl, __osm_ucast_mgr_process_tbl, p_mgr ); + + /* dump fdb into file: */ + if ( osm_log_is_active( p_mgr->p_log, OSM_LOG_ROUTING ) ) + { + if ( !default_routing && p_routing_eng->ucast_dump_tables != 0 ) + p_routing_eng->ucast_dump_tables(p_routing_eng->context); + else + __osm_ucast_mgr_dump_tables( p_mgr ); + } + + if (p_mgr->any_change) + { + signal = OSM_SIGNAL_DONE_PENDING; + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "osm_ucast_mgr_process: " + "LFT Tables configured on all switches\n"); + } + else + { + signal = OSM_SIGNAL_DONE; + osm_log( p_mgr->p_log, OSM_LOG_VERBOSE, + "osm_ucast_mgr_process: " + "No need to set any LFT Tables on any switches\n"); + } + + Exit: + CL_PLOCK_RELEASE( p_mgr->p_lock ); + OSM_LOG_EXIT( p_mgr->p_log ); + return( signal ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_updn.c b/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_updn.c new file mode 100644 index 00000000..d23b52c4 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_ucast_updn.c @@ -0,0 +1,1281 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of Up Down Algorithm using ranking & Min Hop + * Calculation functions + * + * Environment: + * Linux User Mode + * + * $Revision: 1.0 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include + +/* //////////////////////////// */ +/* Local types */ +/* //////////////////////////// */ + +/* direction */ +typedef enum _updn_switch_dir +{ + UP = 0, + DOWN +} updn_switch_dir_t; + +/* This enum respresent available states in the UPDN algorithm */ +typedef enum _updn_state +{ + UPDN_INIT = 0, + UPDN_RANK, + UPDN_MIN_HOP_CALC, +} updn_state_t; + +/* Rank value of this node */ +typedef struct _updn_rank +{ + cl_map_item_t map_item; + uint8_t rank; +} updn_rank_t; + +/* Histogram element - the number of occurences of the same hop value */ +typedef struct _updn_hist +{ + cl_map_item_t map_item; + uint32_t bar_value; +} updn_hist_t; + +typedef struct _updn_next_step +{ + updn_switch_dir_t state; + osm_switch_t *p_sw; +} updn_next_step_t; + +/* guids list */ +typedef struct _updn_input +{ + uint32_t num_guids; + uint64_t * guid_list; +} updn_input_t; + +/* updn structure */ +typedef struct _updn +{ + updn_state_t state; + boolean_t auto_detect_root_nodes; + cl_qmap_t guid_rank_tbl; + updn_input_t updn_ucast_reg_inputs; + cl_list_t * p_root_nodes; + osm_opensm_t *p_osm; +} updn_t; + +/* ///////////////////////////////// */ +/* Statics */ +/* ///////////////////////////////// */ +static int __osm_updn_find_root_nodes_by_min_hop(OUT updn_t *p_updn); + +/********************************************************************** + **********************************************************************/ +/* This function returns direction based on rank and guid info of current & + remote ports */ +static updn_switch_dir_t +__updn_get_dir( + IN updn_t *p_updn, + IN uint8_t cur_rank, + IN uint8_t rem_rank, + IN uint64_t cur_guid, + IN uint64_t rem_guid ) +{ + uint32_t i = 0, max_num_guids = p_updn->updn_ucast_reg_inputs.num_guids; + uint64_t *p_guid = p_updn->updn_ucast_reg_inputs.guid_list; + boolean_t cur_is_root = FALSE, rem_is_root = FALSE; + + /* HACK: comes to solve root nodes connection, in a classic subnet root nodes do not connect + directly, but in case they are we assign to root node an UP direction to allow UPDN discover + the subnet correctly (and not from the point of view of the last root node). + */ + for ( i = 0; i < max_num_guids; i++ ) + { + if (cur_guid == p_guid[i]) + cur_is_root = TRUE; + if (rem_guid == p_guid[i]) + rem_is_root = TRUE; + } + if (cur_is_root && rem_is_root) + return UP; + + if (cur_rank < rem_rank) + return DOWN; + else if (cur_rank > rem_rank) + return UP; + else + { + /* Equal rank, decide by guid number, bigger == UP direction */ + if (cur_guid > rem_guid) + return UP; + else + return DOWN; + } +} + +/********************************************************************** + **********************************************************************/ +/* This function creates a new element of updn_next_step_t type then return its + pointer , Null if malloc has failed */ +static updn_next_step_t* +__updn_create_updn_next_step_t( + IN updn_switch_dir_t state, + IN osm_switch_t* const p_sw ) +{ + updn_next_step_t *p_next_step; + + p_next_step = (updn_next_step_t*) malloc(sizeof(*p_next_step)); + if (p_next_step) + { + memset(p_next_step, 0, sizeof(*p_next_step)); + p_next_step->state = state; + p_next_step->p_sw = p_sw; + } + + return p_next_step; +} + +/********************************************************************** + **********************************************************************/ +/* This function updates an element in the qmap list by guid index and rank value */ +/* Return 0 if no need to futher update 1 if brought a new value */ +static int +__updn_update_rank( + IN cl_qmap_t *p_guid_rank_tbl, + IN ib_net64_t guid, + IN uint8_t rank ) +{ + updn_rank_t *p_updn_rank; + + p_updn_rank = (updn_rank_t*) cl_qmap_get(p_guid_rank_tbl, guid); + if (p_updn_rank == (updn_rank_t*) cl_qmap_end(p_guid_rank_tbl)) + { + p_updn_rank = (updn_rank_t*) malloc(sizeof(updn_rank_t)); + + CL_ASSERT (p_updn_rank); + + p_updn_rank->rank = rank; + + cl_qmap_insert(p_guid_rank_tbl, guid, &p_updn_rank->map_item); + return 1; + } + else + { + if (p_updn_rank->rank > rank) + { + p_updn_rank->rank = rank; + return 1; + } + } + return 0; +} + +/********************************************************************** + * This function does the bfs of min hop table calculation by guid index + * as a starting point. + **********************************************************************/ +static int +__updn_bfs_by_node( + IN updn_t *p_updn, + IN osm_subn_t *p_subn, + IN osm_port_t *p_port, + IN cl_qmap_t *p_guid_rank_tbl ) +{ + /* Init local vars */ + osm_switch_t *p_self_node = NULL; + uint8_t pn, pn_rem; + osm_physp_t *p_physp, *p_remote_physp; + cl_list_t *p_currList, *p_nextList; + uint16_t root_lid, max_sw_lid; + updn_next_step_t *p_updn_switch, *p_tmp; + updn_switch_dir_t next_dir, current_dir; + osm_log_t *p_log = &p_updn->p_osm->log; + + OSM_LOG_ENTER( p_log, __updn_bfs_by_node ); + + /* Init the list pointers */ + p_nextList = (cl_list_t*)malloc(sizeof(cl_list_t)); + cl_list_construct( p_nextList ); + cl_list_init( p_nextList, 10 ); + p_currList = p_nextList; + + p_physp = osm_port_get_default_phys_ptr(p_port); + /* Check valid pointer */ + if (!p_physp || !osm_physp_is_valid(p_physp )) + { + OSM_LOG_EXIT( p_log ); + return 1; + } + /* The Root BFS - lid */ + root_lid = cl_ntoh16(osm_physp_get_base_lid( p_physp )); + /* printf ("-V- BFS through lid : 0x%x\n", root_lid); */ + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node:" + "Starting lid : 0x%x \n", root_lid ); + + if (p_port->p_node->sw) + { + p_self_node = p_port->p_node->sw; + /* Update its Min Hop Table */ + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node:" + "Update Min Hop Table of GUID 0x%" PRIx64 "\n", + cl_ntoh64(p_port->guid) ); + osm_switch_set_hops(p_self_node, root_lid, 0, 0); + } + else + { + /* This is a CA or router - need to take its remote port */ + p_remote_physp = p_physp->p_remote_physp; + /* + make sure that the following occur: + 1. The port isn't NULL + 2. The port is a valid port + */ + if ( p_remote_physp && osm_physp_is_valid ( p_remote_physp )) + { + /* Check if the remote port is a switch, if it is update root_lid, + Min Hop Table */ + if (!p_remote_physp->p_node->sw) + { + osm_log( p_log, OSM_LOG_ERROR, + "__updn_bfs_by_node: ERR AA07: " + "This is a non switched subnet OR non valid connection, cannot perform UPDN algorithm\n" ); + OSM_LOG_EXIT( p_log ); + return 1; + } + else + { + p_self_node = p_remote_physp->p_node->sw; + max_sw_lid = osm_switch_get_max_lid_ho(p_self_node); + if ((1 <= root_lid) && (root_lid <= max_sw_lid)) + /* Update its Min Hop Table */ + { + /* NOTE : Check if there is a function which prints the Min Hop Table */ + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node:" + "Update Min Hop Table of GUID 0x%" PRIx64 "\n", + cl_ntoh64(p_remote_physp->port_guid) ); + osm_switch_set_hops(p_self_node, root_lid, + p_remote_physp->port_num, 1); + + } + else + { + osm_log( p_log, OSM_LOG_ERROR, + "__updn_bfs_by_node: ERR AA09: " + " Invalid lid value 0x%x for switch 0x%" PRIx64 "\n", + root_lid, + cl_ntoh64(p_self_node->p_node->node_info.port_guid) ); + OSM_LOG_EXIT( p_log ); + return 1; + } + } + } + } + + CL_ASSERT(p_self_node); + + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node:" + "Starting from switch - port GUID 0x%" PRIx64 "\n", + cl_ntoh64(p_self_node->p_node->node_info.port_guid) ); + + /* Update list with the updn_next_step_t new element */ + /* NOTE : When inserting an item which is a pointer to a struct, does remove + action also free its memory */ + if (!(p_tmp=__updn_create_updn_next_step_t(UP, p_self_node))) + { + osm_log( p_log, OSM_LOG_ERROR, + "__updn_bfs_by_node: ERR AA08: " + "Could not create updn_next_step_t\n" ); + return 1; + } + + cl_list_insert_tail(p_currList, p_tmp); + + /* BFS the list till no next element */ + osm_log( p_log, OSM_LOG_VERBOSE, + "__updn_bfs_by_node:" + "BFS the subnet [\n" ); + + while (!cl_is_list_empty(p_currList)) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node:" + "Starting a new iteration with %zu elements in current list\n", + cl_list_count(p_currList) ); + /* Init the switch directed list */ + p_nextList = (cl_list_t*)malloc(sizeof(cl_list_t)); + cl_list_construct( p_nextList ); + cl_list_init( p_nextList, 10 ); + /* Go over all current list items till it's empty */ + /* printf ("-V- In inner while\n"); */ + p_updn_switch = (updn_next_step_t*)cl_list_remove_head( p_currList ); + /* While there is a pointer to updn struct we continue to BFS */ + while (p_updn_switch) + { + current_dir = p_updn_switch->state; + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node:" + "Visiting port GUID 0x%" PRIx64 "\n", + cl_ntoh64(p_updn_switch->p_sw->p_node->node_info.port_guid) ); + /* Go over all ports of the switch and find unvisited remote nodes */ + for ( pn = 0; pn < osm_switch_get_num_ports(p_updn_switch->p_sw); pn++ ) + { + /* printf("-V- Inner for in port num 0x%X\n", pn); */ + osm_node_t *p_remote_node; + cl_list_iterator_t updn_switch_iterator; + boolean_t HasVisited = FALSE; + ib_net64_t remote_guid,current_guid; + updn_rank_t *p_rem_rank, *p_cur_rank; + uint8_t current_min_hop, remote_min_hop, set_hop_return_value; + osm_switch_t *p_remote_sw; + + current_guid = osm_node_get_node_guid(p_updn_switch->p_sw->p_node); + p_remote_node = osm_node_get_remote_node( p_updn_switch->p_sw->p_node + , pn, &pn_rem ); + /* If no remote node OR remote node is not a SWITCH + continue to next pn */ + if( !p_remote_node || + (osm_node_get_type(p_remote_node) != IB_NODE_TYPE_SWITCH) ) + continue; + /* Fetch remote guid only after validation of remote node */ + remote_guid = osm_node_get_node_guid(p_remote_node); + /* printf ("-V- Current guid : 0x%" PRIx64 " Remote guid : 0x%" PRIx64 "\n", */ + /* cl_ntoh64(current_guid), cl_ntoh64(remote_guid)); */ + p_remote_sw = p_remote_node->sw; + p_rem_rank = (updn_rank_t*)cl_qmap_get(p_guid_rank_tbl, remote_guid); + p_cur_rank = (updn_rank_t*)cl_qmap_get(p_guid_rank_tbl, current_guid); + /* Decide which direction to mark it (UP/DOWN) */ + next_dir = __updn_get_dir (p_updn, p_cur_rank->rank, p_rem_rank->rank, + current_guid, remote_guid); + + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node:" + "move from 0x%016" PRIx64 " rank: %u " + "to 0x%016" PRIx64" rank: %u\n", + cl_ntoh64(current_guid), p_cur_rank->rank, + cl_ntoh64(remote_guid), p_rem_rank->rank ); + /* Check if this is a legal step : the only illegal step is going + from DOWN to UP */ + if ((current_dir == DOWN) && (next_dir == UP)) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node:" + "Avoiding move from 0x%016" PRIx64 " to 0x%016" PRIx64"\n", + cl_ntoh64(current_guid), cl_ntoh64(remote_guid) ); + /* Illegal step */ + continue; + } + /* Set MinHop value for the current lid */ + current_min_hop = osm_switch_get_least_hops(p_updn_switch->p_sw,root_lid); + /* Check hop count if better insert into NextState list && update + the remote node Min Hop Table */ + remote_min_hop = osm_switch_get_hop_count(p_remote_sw, root_lid, pn_rem); + if (current_min_hop + 1 < remote_min_hop) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node (less):" + "Setting Min Hop Table of switch: 0x%" PRIx64 + "\n\t\tCurrent hop count is: %d, next hop count: %d" + "\n\tlid to set: 0x%x" + "\n\tport number: 0x%X" + " \n\thops number: %d\n", + cl_ntoh64(remote_guid), remote_min_hop,current_min_hop + 1, + root_lid, pn_rem, current_min_hop + 1 ); + set_hop_return_value = osm_switch_set_hops(p_remote_sw, root_lid, pn_rem, current_min_hop + 1); + if (set_hop_return_value) + { + osm_log( p_log, OSM_LOG_ERROR, + "__updn_bfs_by_node (less) ERR AA01: " + "Invalid value returned from set min hop is: %d\n", + set_hop_return_value ); + } + /* Check if remote port is allready has been visited */ + updn_switch_iterator = cl_list_head(p_nextList); + while( updn_switch_iterator != cl_list_end(p_nextList) ) + { + updn_next_step_t *p_updn; + p_updn = (updn_next_step_t*)cl_list_obj(updn_switch_iterator); + /* Mark HasVisited only if: + 1. Same node guid + 2. Same direction + */ + if ((p_updn->p_sw->p_node == p_remote_node) && (p_updn->state == next_dir)) + HasVisited = TRUE; + updn_switch_iterator = cl_list_next(updn_switch_iterator); + } + if (!HasVisited) + { + /* Insert updn_switch item into the next list */ + if(!(p_tmp=__updn_create_updn_next_step_t(next_dir, p_remote_sw))) + { + osm_log( p_log, OSM_LOG_ERROR, + "__updn_bfs_by_node: ERR AA11: " + "Could not create updn_next_step_t\n" ); + return 1; + } + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node: " + "Inserting new element to the next list: guid=0x%" PRIx64 " %s\n", + cl_ntoh64(p_tmp->p_sw->p_node->node_info.port_guid), + (p_tmp->state == UP ? "UP" : "DOWN") + ); + cl_list_insert_tail(p_nextList, p_tmp); + } + /* If the same value only update entry - at the min hop table */ + } else if (current_min_hop + 1 == osm_switch_get_hop_count(p_remote_sw, + root_lid, + pn_rem)) + { + osm_log( p_log, OSM_LOG_DEBUG, + "__updn_bfs_by_node (equal):" + "Setting Min Hop Table of switch: 0x%" PRIx64 + "\n\t\tCurrent hop count is: %d, next hop count: %d" + "\n\tlid to set: 0x%x" + "\n\tport number: 0x%X" + "\n\thops number: %d\n", + cl_ntoh64(remote_guid), + osm_switch_get_hop_count(p_remote_sw, root_lid, pn_rem), + current_min_hop + 1, root_lid, pn_rem, current_min_hop + 1 ); + set_hop_return_value = osm_switch_set_hops(p_remote_sw, root_lid, pn_rem, current_min_hop + 1); + + if (set_hop_return_value) + { + osm_log( p_log, OSM_LOG_ERROR, + "__updn_bfs_by_node (less) ERR AA12: " + "Invalid value returned from set min hop is: %d\n", + set_hop_return_value ); + } + } + } + free (p_updn_switch); + p_updn_switch = (updn_next_step_t*)cl_list_remove_head( p_currList ); + } + /* Cleanup p_currList */ + cl_list_destroy( p_currList ); + free (p_currList); + + /* Reassign p_currList to p_nextList */ + p_currList = p_nextList; + } + /* Cleanup p_currList - Had the pointer to cl_list_t */ + cl_list_destroy( p_currList ); + free (p_currList); + osm_log( p_log, OSM_LOG_VERBOSE, + "__updn_bfs_by_node:" + "BFS the subnet ]\n" ); + OSM_LOG_EXIT( p_log ); + return 0; +} + +/********************************************************************** + **********************************************************************/ +static void +updn_destroy( + IN updn_t* const p_updn ) +{ + cl_map_item_t *p_map_item; + uint64_t *p_guid_list_item; + + /* Destroy the updn struct */ + p_map_item = cl_qmap_head( &p_updn->guid_rank_tbl); + while( p_map_item != cl_qmap_end( &p_updn->guid_rank_tbl)) + { + osm_log ( &p_updn->p_osm->log, OSM_LOG_DEBUG, + "osm_subn_calc_up_down_min_hop_table: " + "guid = 0x%" PRIx64 " rank = %u\n", + cl_ntoh64(cl_qmap_key(p_map_item)), + ((updn_rank_t *)p_map_item)->rank ); + cl_qmap_remove_item( &p_updn->guid_rank_tbl, p_map_item); + free( (updn_rank_t *)p_map_item); + p_map_item = cl_qmap_head( &p_updn->guid_rank_tbl); + } + + /* free the array of guids */ + if (p_updn->updn_ucast_reg_inputs.guid_list) + free(p_updn->updn_ucast_reg_inputs.guid_list); + + /* destroy the list of root nodes */ + while ((p_guid_list_item = cl_list_remove_head( p_updn->p_root_nodes ))) + free( p_guid_list_item ); + + cl_list_remove_all( p_updn->p_root_nodes ); + cl_list_destroy( p_updn->p_root_nodes ); + free ( p_updn->p_root_nodes ); + free (p_updn); +} + +/********************************************************************** + **********************************************************************/ +static updn_t* +updn_construct(osm_log_t *p_log) +{ + updn_t* p_updn; + + OSM_LOG_ENTER( p_log, updn_construct ); + p_updn = malloc(sizeof(updn_t)); + if (p_updn) + memset(p_updn, 0, sizeof(updn_t)); + OSM_LOG_EXIT( p_log ); + return(p_updn); +} + +/********************************************************************** + **********************************************************************/ +static cl_status_t +updn_init( + IN updn_t* const p_updn, + IN osm_opensm_t *p_osm ) +{ + cl_list_t * p_list; + FILE* p_updn_guid_file; + char line[MAX_UPDN_GUID_FILE_LINE_LENGTH]; + uint64_t * p_tmp; + cl_list_iterator_t guid_iterator; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osm->log, updn_init ); + + p_updn->p_osm = p_osm; + p_updn->state = UPDN_INIT; + cl_qmap_init( &p_updn->guid_rank_tbl ); + p_list = (cl_list_t*)malloc(sizeof(cl_list_t)); + if (!p_list) + { + status = IB_ERROR; + goto Exit; + } + + cl_list_construct( p_list ); + cl_list_init( p_list, 10 ); + p_updn->p_root_nodes = p_list; + p_updn->updn_ucast_reg_inputs.num_guids = 0; + p_updn->updn_ucast_reg_inputs.guid_list = NULL; + p_updn->auto_detect_root_nodes = FALSE; + + /* + Check the source for root node list, if file parse it, otherwise + wait for a callback to activate auto detection + */ + if (p_osm->subn.opt.updn_guid_file) + { + /* Now parse guid from file */ + p_updn_guid_file = fopen(p_osm->subn.opt.updn_guid_file, "r"); + if (p_updn_guid_file == NULL) + { + osm_log( &p_osm->log, OSM_LOG_ERROR, + "osm_opensm_init : ERR AA02: " + "Failed to open guid list file (%s)\n", + p_osm->subn.opt.updn_guid_file ); + status = IB_NOT_FOUND; + goto Exit; + } + + while ( fgets(line, MAX_UPDN_GUID_FILE_LINE_LENGTH, p_updn_guid_file) ) + { + if (strcspn(line, " ,;.") == strlen(line)) + { + /* Skip empty lines anywhere in the file - only one char means the Null termination */ + if (strlen(line) > 1) + { + p_tmp = malloc(sizeof(uint64_t)); + *p_tmp = strtoull(line, NULL, 16); + cl_list_insert_tail(p_updn->p_root_nodes, p_tmp); + } + } + else + { + osm_log( &p_osm->log, OSM_LOG_ERROR, + "osm_opensm_init: ERR AA03: " + "Bad formatted guid in file (%s): %s\n", + p_osm->subn.opt.updn_guid_file, line ); + status = IB_NOT_FOUND; + break; + } + } + + /* For Debug Purposes ... */ + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "osm_opensm_init: " + "UPDN - Root nodes fetching by file %s\n", + p_osm->subn.opt.updn_guid_file ); + guid_iterator = cl_list_head(p_updn->p_root_nodes); + while( guid_iterator != cl_list_end(p_updn->p_root_nodes) ) + { + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "osm_opensm_init: " + "Inserting GUID 0x%" PRIx64 " as root node\n", + *((uint64_t*)cl_list_obj(guid_iterator)) ); + guid_iterator = cl_list_next(guid_iterator); + } + } + else + { + p_updn->auto_detect_root_nodes = TRUE; + } + /* If auto mode detection required - will be executed in main b4 the assignment of UI Ucast */ + +Exit : + OSM_LOG_EXIT( &p_osm->log ); + return (status); +} + +/********************************************************************** + **********************************************************************/ +/* NOTE : PLS check if we need to decide that the first */ +/* rank is a SWITCH for BFS purpose */ +static int +updn_subn_rank( + IN uint64_t root_guid, + IN uint8_t base_rank, + IN updn_t* p_updn ) +{ + /* Init local vars */ + osm_port_t *p_root_port = NULL; + uint16_t tbl_size; + uint8_t rank = base_rank; + osm_physp_t *p_physp, *p_remote_physp, *p_physp_temp; + cl_list_t *p_currList,*p_nextList; + cl_status_t did_cause_update; + uint8_t num_ports, port_num; + osm_log_t *p_log = &p_updn->p_osm->log; + + OSM_LOG_ENTER( p_log, updn_subn_rank ); + + osm_log( p_log, OSM_LOG_VERBOSE, + "updn_subn_rank: " + "Ranking starts from GUID 0x%" PRIx64 "\n", root_guid ); + + /* Init the list pointers */ + p_nextList = (cl_list_t*)malloc(sizeof(cl_list_t)); + cl_list_construct( p_nextList ); + cl_list_init( p_nextList, 10 ); + p_currList = p_nextList; + + /* Check valid subnet & guid */ + tbl_size = (uint16_t)(cl_qmap_count(&p_updn->p_osm->subn.port_guid_tbl)); + if (tbl_size == 0) + { + osm_log( p_log, OSM_LOG_ERROR, + "updn_subn_rank: ERR AA04: " + "Port guid table is empty, cannot perform ranking\n" ); + OSM_LOG_EXIT( p_log ); + return 1; + } + + p_root_port = (osm_port_t*) cl_qmap_get(&p_updn->p_osm->subn.port_guid_tbl, + cl_ntoh64(root_guid)); + if( p_root_port == (osm_port_t*)cl_qmap_end( &p_updn->p_osm->subn.port_guid_tbl ) ) + { + osm_log( p_log, OSM_LOG_ERROR, + "updn_subn_rank: ERR AA05: " + "Wrong guid value: 0x%" PRIx64 "\n", root_guid ); + OSM_LOG_EXIT( p_log ); + return 1; + } + + /* Rank the first chosen guid anyway since its the base rank */ + osm_log( p_log, OSM_LOG_DEBUG, + "updn_subn_rank: " + "Ranking port GUID 0x%" PRIx64 "\n", root_guid ); + + __updn_update_rank(&p_updn->guid_rank_tbl, cl_ntoh64(root_guid), rank); + /* + HACK: We are assuming SM is running on HCA, so when getting the default + port we'll get the port connected to the rest of the subnet. If SM is + running on SWITCH - we should try to get a dr path from all switch ports. + */ + p_physp = osm_port_get_default_phys_ptr( p_root_port ); + CL_ASSERT( p_physp ); + CL_ASSERT( osm_physp_is_valid( p_physp ) ); + /* We can safely add the node to the list */ + cl_list_insert_tail(p_nextList, p_physp); + /* Assign pointer to the list for BFS */ + p_currList = p_nextList; + + /* BFS the list till its empty */ + osm_log( p_log, OSM_LOG_VERBOSE, + "updn_subn_rank: " + "BFS the subnet [\n" ); + + while (!cl_is_list_empty(p_currList)) + { + rank++; + p_nextList = (cl_list_t*)malloc(sizeof(cl_list_t)); + cl_list_construct( p_nextList ); + cl_list_init( p_nextList, 10 ); + p_physp = (osm_physp_t*)cl_list_remove_head( p_currList ); + /* Go over all remote nodes and rank them (if not allready visited) till + no elemtent in the list p_currList */ + while ( p_physp != NULL ) + { + num_ports = osm_node_get_num_physp( p_physp->p_node ); + osm_log( p_log, OSM_LOG_DEBUG, + "updn_subn_rank: " + "Handling port GUID 0x%" PRIx64 "\n", + cl_ntoh64(p_physp->port_guid) ); + for (port_num = 1; port_num < num_ports; port_num++) + { + ib_net64_t port_guid; + + /* Current port fetched in order to get remote side */ + p_physp_temp = osm_node_get_physp_ptr( p_physp->p_node, port_num ); + p_remote_physp = p_physp_temp->p_remote_physp; + + /* + make sure that all the following occur on p_remote_physp: + 1. The port isn't NULL + 2. The port is a valid port + */ + if ( p_remote_physp && + osm_physp_is_valid ( p_remote_physp )) + { + port_guid = p_remote_physp->port_guid; + osm_log( p_log, OSM_LOG_DEBUG, + "updn_subn_rank: " + "Visiting remote port GUID 0x%" PRIx64 "\n", + cl_ntoh64(port_guid) ); + /* Was it visited ? + Only if the pointer equal to cl_qmap_end its not + found in the list */ + osm_log( p_log, OSM_LOG_DEBUG, + "updn_subn_rank: " + "Ranking port GUID 0x%" PRIx64 "\n", cl_ntoh64(port_guid) ); + did_cause_update = __updn_update_rank(&p_updn->guid_rank_tbl, port_guid, rank); + + osm_log( p_log, OSM_LOG_VERBOSE, + "updn_subn_rank: " + "Rank of port GUID 0x%" PRIx64 " = %u\n", cl_ntoh64(port_guid), + ((updn_rank_t*)cl_qmap_get(&p_updn->guid_rank_tbl, port_guid))->rank + ); + + if (did_cause_update) + { + cl_list_insert_tail(p_nextList, p_remote_physp); + } + } + } + /* Propagte through the next item in the p_currList */ + p_physp = (osm_physp_t*)cl_list_remove_head( p_currList ); + } + /* First free the allocation of cl_list pointer then reallocate */ + cl_list_destroy( p_currList ); + free(p_currList); + /* p_currList is empty - need to assign it to p_nextList */ + p_currList = p_nextList; + } + + osm_log( p_log, OSM_LOG_VERBOSE, + "updn_subn_rank: " + "BFS the subnet ]\n" ); + + cl_list_destroy( p_currList ); + free(p_currList); + + /* Print Summary of ranking */ + osm_log( p_log, OSM_LOG_VERBOSE, + "updn_subn_rank: " + "Rank Info :\n\t Root Guid = 0x%" PRIx64 "\n\t Max Node Rank = %d\n", + cl_ntoh64(p_root_port->guid), rank ); + p_updn->state = UPDN_RANK; + OSM_LOG_EXIT( p_log ); + return 0; +} + +/********************************************************************** + **********************************************************************/ +static int +__osm_subn_set_up_down_min_hop_table( + IN updn_t* p_updn ) +{ + /* Init local vars */ + osm_subn_t *p_subn = &p_updn->p_osm->subn; + osm_log_t *p_log = &p_updn->p_osm->log; + osm_switch_t *p_next_sw,*p_sw; + osm_port_t *p_next_port,*p_port; + ib_net64_t port_guid; + + OSM_LOG_ENTER( p_log, __osm_subn_set_up_down_min_hop_table ); + if (p_updn->state == UPDN_INIT) + { + osm_log( p_log, OSM_LOG_ERROR, + "__osm_subn_set_up_down_min_hop_table: ERR AA06: " + "Calculating Min Hop only allowed after ranking\n" ); + OSM_LOG_EXIT( p_log ); + return 1; + } + + /* Check if its a non switched subnet .. */ + if ( cl_is_qmap_empty( &p_subn->sw_guid_tbl ) ) + { + osm_log( p_log, OSM_LOG_ERROR, + "__osm_subn_set_up_down_min_hop_table: ERR AA10: " + "This is a non switched subnet, cannot perform UPDN algorithm\n" ); + OSM_LOG_EXIT( p_log ); + return 1; + } + /* Go over all the switches in the subnet - for each init their Min Hop + Table */ + osm_log( p_log, OSM_LOG_VERBOSE, + "__osm_subn_set_up_down_min_hop_table: " + "Init Min Hop Table of all switches [\n" ); + + p_next_sw = (osm_switch_t*)cl_qmap_head( &p_subn->sw_guid_tbl ); + while( p_next_sw != (osm_switch_t*)cl_qmap_end( &p_subn->sw_guid_tbl ) ) + { + p_sw = p_next_sw; + p_next_sw = (osm_switch_t*)cl_qmap_next( &p_sw->map_item ); + /* Clear Min Hop Table */ + osm_lid_matrix_clear(&(p_sw->lmx)); + } + + osm_log( p_log, OSM_LOG_VERBOSE, + "__osm_subn_set_up_down_min_hop_table: " + "Init Min Hop Table of all switches ]\n" ); + + /* Now do the BFS for each port in the subnet */ + osm_log( p_log, OSM_LOG_VERBOSE, + "__osm_subn_set_up_down_min_hop_table: " + "BFS through all port guids in the subnet [\n" ); + p_next_port = (osm_port_t*)cl_qmap_head( &p_subn->port_guid_tbl ); + while( p_next_port != (osm_port_t*)cl_qmap_end( &p_subn->port_guid_tbl ) ) + { + p_port = p_next_port; + p_next_port = (osm_port_t*)cl_qmap_next( &p_port->map_item ); + port_guid = cl_qmap_key(&(p_port->map_item)); + osm_log( p_log, OSM_LOG_DEBUG, + "__osm_subn_set_up_down_min_hop_table: " + "BFS through port GUID 0x%" PRIx64 "\n", + cl_ntoh64(port_guid) ); + if(__updn_bfs_by_node(p_updn, p_subn, p_port, + &p_updn->guid_rank_tbl)) + { + OSM_LOG_EXIT( p_log ); + return 1; + } + } + + osm_log( p_log, OSM_LOG_VERBOSE, + "__osm_subn_set_up_down_min_hop_table: " + "BFS through all port guids in the subnet ]\n" ); + /* Cleanup */ + OSM_LOG_EXIT( p_log ); + return 0; +} + +/********************************************************************** + **********************************************************************/ +static int +__osm_subn_calc_up_down_min_hop_table( + IN uint32_t num_guids, + IN uint64_t * guid_list, + IN updn_t* p_updn ) +{ + uint8_t idx = 0; + cl_map_item_t *p_map_item; + int status; + + OSM_LOG_ENTER( &p_updn->p_osm->log, osm_subn_calc_up_down_min_hop_table ); + osm_log( &p_updn->p_osm->log, OSM_LOG_VERBOSE, + "__osm_subn_calc_up_down_min_hop_table: " + "Ranking all port guids in the list\n" ); + if (num_guids == 0) + { + osm_log( &p_updn->p_osm->log, OSM_LOG_ERROR, + "__osm_subn_calc_up_down_min_hop_table: ERR AA0A: " + "No guids were given or number of guids is 0\n" ); + return 1; + } + + for (idx = 0; idx < num_guids; idx++) + { + /* Apply the ranking for each guid given by user - bypass illegal ones */ + updn_subn_rank(guid_list[idx], 0, p_updn); + } + /* After multiple ranking need to set Min Hop Table by UpDn algorithm */ + osm_log( &p_updn->p_osm->log, OSM_LOG_VERBOSE, + "__osm_subn_calc_up_down_min_hop_table: " + "Setting all switches' Min Hop Table\n" ); + + status = __osm_subn_set_up_down_min_hop_table (p_updn); + + /* Cleanup updn rank tbl */ + p_map_item = cl_qmap_head( &p_updn->guid_rank_tbl); + while( p_map_item != cl_qmap_end( &p_updn->guid_rank_tbl)) + { + osm_log( &p_updn->p_osm->log, OSM_LOG_DEBUG, + "__osm_subn_calc_up_down_min_hop_table: " + "guid = 0x%" PRIx64 " rank = %u\n", + cl_ntoh64(cl_qmap_key(p_map_item)), + ((updn_rank_t *)p_map_item)->rank ); + cl_qmap_remove_item( &p_updn->guid_rank_tbl, p_map_item); + free( (updn_rank_t *)p_map_item); + p_map_item = cl_qmap_head( &p_updn->guid_rank_tbl); + } + + OSM_LOG_EXIT( &p_updn->p_osm->log ); + return status; +} + +/********************************************************************** + **********************************************************************/ +/* UPDN callback function */ +static int +__osm_updn_call( + void *ctx ) +{ + updn_t *p_updn = ctx; + + OSM_LOG_ENTER( &p_updn->p_osm->log, __osm_updn_call ); + + /* First auto detect root nodes - if required */ + if ( p_updn->auto_detect_root_nodes ) + { + osm_ucast_mgr_build_lid_matrices( &p_updn->p_osm->sm.ucast_mgr ); + /* printf ("-V- b4 osm_updn_find_root_nodes_by_min_hop\n"); */ + __osm_updn_find_root_nodes_by_min_hop( p_updn ); + } + /* printf ("-V- after osm_updn_find_root_nodes_by_min_hop\n"); */ + /* Only if there are assigned root nodes do the algorithm , otherwise perform do nothing */ + if ( p_updn->updn_ucast_reg_inputs.num_guids > 0) + { + osm_log( &(p_updn->p_osm->log), OSM_LOG_DEBUG, + "__osm_updn_call: " + "activating UPDN algorithm\n" ); + __osm_subn_calc_up_down_min_hop_table( p_updn->updn_ucast_reg_inputs.num_guids, + p_updn->updn_ucast_reg_inputs.guid_list, + p_updn ); + } + else + osm_log( &p_updn->p_osm->log, OSM_LOG_INFO, + "__osm_updn_call: " + "disable UPDN algorithm, no root nodes were found\n" ); + + OSM_LOG_EXIT( &p_updn->p_osm->log ); + return 0; +} + +/********************************************************************** + **********************************************************************/ +/* UPDN convert cl_list to guid array in updn struct */ +static void +__osm_updn_convert_list2array( + IN updn_t * p_updn ) +{ + uint32_t i = 0, max_num = 0; + uint64_t *p_guid; + + OSM_LOG_ENTER( &p_updn->p_osm->log, __osm_updn_convert_list2array ); + + p_updn->updn_ucast_reg_inputs.num_guids = cl_list_count( + p_updn->p_root_nodes); + if (p_updn->updn_ucast_reg_inputs.guid_list) + free(p_updn->updn_ucast_reg_inputs.guid_list); + p_updn->updn_ucast_reg_inputs.guid_list = (uint64_t *)malloc( + p_updn->updn_ucast_reg_inputs.num_guids*sizeof(uint64_t)); + if (p_updn->updn_ucast_reg_inputs.guid_list) + memset(p_updn->updn_ucast_reg_inputs.guid_list, 0, + p_updn->updn_ucast_reg_inputs.num_guids*sizeof(uint64_t)); + if (!cl_is_list_empty(p_updn->p_root_nodes)) + { + while( (p_guid = (uint64_t*)cl_list_remove_head(p_updn->p_root_nodes)) ) + { + p_updn->updn_ucast_reg_inputs.guid_list[i] = *p_guid; + free(p_guid); + i++; + } + max_num = i; + for (i = 0; i < max_num; i++ ) + osm_log( &p_updn->p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_convert_list2array: " + "Map GUID 0x%" PRIx64 " into UPDN array\n", + p_updn->updn_ucast_reg_inputs.guid_list[i] ); + } + /* Since we need the template list for other sweeps, we wont destroy & free it */ + OSM_LOG_EXIT( &p_updn->p_osm->log ); +} + +/********************************************************************** + **********************************************************************/ +/* Find Root nodes automatically by Min Hop Table info */ +static int +__osm_updn_find_root_nodes_by_min_hop( + OUT updn_t * p_updn ) +{ + osm_opensm_t *p_osm = p_updn->p_osm; + osm_switch_t *p_next_sw, *p_sw; + osm_port_t *p_next_port, *p_port; + osm_physp_t *p_physp; + uint32_t numCas = 0; + uint32_t numSws = cl_qmap_count(&p_osm->subn.sw_guid_tbl); + cl_qmap_t min_hop_hist; /* Histogram container */ + updn_hist_t *p_updn_hist, *p_up_ht; + uint8_t maxHops = 0; /* contain the max histogram index */ + uint64_t *p_guid; + cl_list_t *p_root_nodes_list = p_updn->p_root_nodes; + cl_map_t ca_by_lid_map; /* map holding all CA lids */ + uint16_t self_lid_ho; + + OSM_LOG_ENTER( &p_osm->log, osm_updn_find_root_nodes_by_min_hop ); + + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "current number of ports in the subnet is %d\n", + cl_qmap_count(&p_osm->subn.port_guid_tbl) ); + /* Init the required vars */ + cl_qmap_init( &min_hop_hist ); + cl_map_construct( &ca_by_lid_map ); + cl_map_init( &ca_by_lid_map, 10 ); + + /* EZ: + p_ca_list = (cl_list_t*)malloc(sizeof(cl_list_t)); + cl_list_construct( p_ca_list ); + cl_list_init( p_ca_list, 10 ); + */ + + /* Find the Maximum number of CAs (and routers) for histogram normalization */ + osm_log( &p_osm->log, OSM_LOG_VERBOSE, + "__osm_updn_find_root_nodes_by_min_hop: " + "Find the number of CAs and store them in cl_list\n" ); + p_next_port = (osm_port_t*)cl_qmap_head( &p_osm->subn.port_guid_tbl ); + while( p_next_port != (osm_port_t*)cl_qmap_end( &p_osm->subn.port_guid_tbl ) ) { + p_port = p_next_port; + p_next_port = (osm_port_t*)cl_qmap_next( &p_next_port->map_item ); + if ( osm_node_get_type(p_port->p_node) != IB_NODE_TYPE_SWITCH ) + { + p_physp = osm_port_get_default_phys_ptr(p_port); + self_lid_ho = cl_ntoh16( osm_physp_get_base_lid(p_physp) ); + numCas++; + /* EZ: + self = malloc(sizeof(uint16_t)); + *self = self_lid_ho; + cl_list_insert_tail(p_ca_list, self); + */ + cl_map_insert( &ca_by_lid_map, self_lid_ho, (void *)0x1); + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "Inserting into array GUID 0x%" PRIx64 ", Lid: 0x%X\n", + cl_ntoh64(osm_port_get_guid(p_port)), self_lid_ho ); + } + } + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "Found %u CA, %u SW in the subnet\n", numCas, numSws ); + p_next_sw = (osm_switch_t*)cl_qmap_head( &p_osm->subn.sw_guid_tbl ); + osm_log( &p_osm->log, OSM_LOG_VERBOSE, + "__osm_updn_find_root_nodes_by_min_hop: " + "Passing through all switches to collect Min Hop info\n" ); + while( p_next_sw != (osm_switch_t*)cl_qmap_end( &p_osm->subn.sw_guid_tbl ) ) + { + uint16_t max_lid_ho, lid_ho; + uint8_t hop_val; + uint16_t numHopBarsOverThd1 = 0; + uint16_t numHopBarsOverThd2 = 0; + double thd1, thd2; + + p_sw = p_next_sw; + /* Roll to the next switch */ + p_next_sw = (osm_switch_t*)cl_qmap_next( &p_sw->map_item ); + + /* Clear Min Hop Table && FWD Tbls - This should caused opensm to + rebuild its FWD tables , post setting Min Hop Tables */ + max_lid_ho = osm_switch_get_max_lid_ho(p_sw); + /* Get base lid of switch by retrieving port 0 lid of node pointer */ + self_lid_ho = cl_ntoh16( osm_node_get_base_lid( p_sw->p_node, 0 ) ); + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "Passing through switch lid 0x%X\n", self_lid_ho ); + for (lid_ho = 1; lid_ho <= max_lid_ho; lid_ho++) + { + /* Skip lids which are not CAs - + for the histogram purposes we care only about CAs */ + + /* EZ: + boolean_t LidFound = FALSE; + cl_list_iterator_t ca_lid_iterator= cl_list_head(p_ca_list); + while( (ca_lid_iterator != cl_list_end(p_ca_list)) && !LidFound ) + { + uint16_t *p_lid; + + p_lid = (uint16_t*)cl_list_obj(ca_lid_iterator); + if ( *p_lid == lid_ho ) + LidFound = TRUE; + ca_lid_iterator = cl_list_next(ca_lid_iterator); + + } + if ( LidFound ) + */ + if (cl_map_get( &ca_by_lid_map, lid_ho )) + { + hop_val = osm_switch_get_least_hops( p_sw, lid_ho ); + if (hop_val > maxHops) + maxHops = hop_val; + p_updn_hist = + (updn_hist_t*)cl_qmap_get( &min_hop_hist , (uint64_t)hop_val ); + if ( p_updn_hist == (updn_hist_t*)cl_qmap_end( &min_hop_hist)) + { + /* New entry in the histogram , first create it */ + p_updn_hist = (updn_hist_t*) malloc(sizeof(updn_hist_t)); + CL_ASSERT (p_updn_hist); + p_updn_hist->bar_value = 1; + cl_qmap_insert(&min_hop_hist, (uint64_t)hop_val, &p_updn_hist->map_item); + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "Creating new entry in histogram %u with bar value 1\n", + hop_val ); + } + else + { + /* Entry exist in the table , just increment the value */ + p_updn_hist->bar_value++; + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "Updating entry in histogram %u with bar value %d\n", + hop_val, p_updn_hist->bar_value ); + } + } + } + + /* Now recognize the spines by requiring one bar to be above 90% of the + number of CAs */ + thd1 = numCas * 0.9; + thd2 = numCas * 0.05; + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "Pass over the histogram value and find only one root node above " + "thd1 = %f && thd2 = %f\n", thd1, thd2 ); + + p_updn_hist = (updn_hist_t*) cl_qmap_head( &min_hop_hist ); + while( p_updn_hist != (updn_hist_t*)cl_qmap_end( &min_hop_hist ) ) + { + p_up_ht = p_updn_hist; + p_updn_hist = (updn_hist_t*)cl_qmap_next( &p_updn_hist->map_item ) ; + if ( p_up_ht->bar_value > thd1 ) + numHopBarsOverThd1++; + if ( p_up_ht->bar_value > thd2 ) + numHopBarsOverThd2++; + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "Passing through histogram - Hop Index %u: " + "numHopBarsOverThd1 = %u, numHopBarsOverThd2 = %u\n", + (uint16_t)cl_qmap_key((cl_map_item_t*)p_up_ht), + numHopBarsOverThd1, numHopBarsOverThd2 ); + } + + /* destroy the qmap table and all its content - no longer needed */ + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "Cleanup: delete histogram " + "UPDN - Root nodes fetching by auto detect\n" ); + p_updn_hist = (updn_hist_t*) cl_qmap_head( &min_hop_hist ); + while ( p_updn_hist != (updn_hist_t*)cl_qmap_end( &min_hop_hist ) ) + { + cl_qmap_remove_item( &min_hop_hist, (cl_map_item_t*)p_updn_hist ); + free( p_updn_hist ); + p_updn_hist = (updn_hist_t*) cl_qmap_head( &min_hop_hist ); + } + + /* If thd conditions are valid insert the root node to the list */ + if ( (numHopBarsOverThd1 == 1) && (numHopBarsOverThd2 == 1) ) + { + p_guid = malloc(sizeof(uint64_t)); + *p_guid = cl_ntoh64(osm_node_get_node_guid(p_sw->p_node)); + osm_log( &p_osm->log, OSM_LOG_DEBUG, + "__osm_updn_find_root_nodes_by_min_hop: " + "Inserting GUID 0x%" PRIx64 " as root node\n", + *p_guid ); + cl_list_insert_tail(p_root_nodes_list, p_guid); + } + } + + /* destroy the map of CA lids */ + cl_map_remove_all( &ca_by_lid_map ); + cl_map_destroy( &ca_by_lid_map ); + + /* Now convert the cl_list to array */ + __osm_updn_convert_list2array(p_updn); + + OSM_LOG_EXIT( &p_osm->log ); + return 0; +} + +/********************************************************************** + **********************************************************************/ +static void +__osm_updn_delete( + void *context ) +{ + updn_t *p_updn = context; + + updn_destroy(p_updn); +} + +int +osm_ucast_updn_setup( + osm_opensm_t *p_osm ) +{ + updn_t *p_updn; + + p_updn = updn_construct(&p_osm->log); + if (!p_updn) + return -1; + p_osm->routing_engine.context = p_updn; + p_osm->routing_engine.delete = __osm_updn_delete; + p_osm->routing_engine.build_lid_matrices = __osm_updn_call; + + if (updn_init(p_updn, p_osm) != IB_SUCCESS) + return -1; + if (!p_updn->auto_detect_root_nodes) + __osm_updn_convert_list2array(p_updn); + + return 0; +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_vl15intf.c b/branches/Ndi/ulp/opensm/user/opensm/osm_vl15intf.c new file mode 100644 index 00000000..7548789a --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_vl15intf.c @@ -0,0 +1,544 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_vl15_t. + * This object represents the VL15 Interface object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +/********************************************************************** + **********************************************************************/ +void +__osm_vl15_poller( + IN void *p_ptr ) +{ + ib_api_status_t status; + osm_madw_t *p_madw; + uint32_t mads_sent; + uint32_t unicasts_sent; + uint32_t mads_on_wire; + osm_vl15_t* const p_vl = (osm_vl15_t*)p_ptr; + cl_qlist_t* p_fifo; + + OSM_LOG_ENTER( p_vl->p_log, __osm_vl15_poller ); + + if ( p_vl->thread_state == OSM_THREAD_STATE_NONE) + { + p_vl->thread_state = OSM_THREAD_STATE_RUN; + } + + while( p_vl->thread_state == OSM_THREAD_STATE_RUN ) + { + /* + Start servicing the FIFOs by pulling off MAD wrappers + and passing them to the transport interface. + There are lots of corner cases here so tread carefully. + + The unicast FIFO has priority, since somebody is waiting + for a timely response. + */ + cl_spinlock_acquire( &p_vl->lock ); + + if( cl_qlist_count( &p_vl->ufifo ) != 0 ) + p_fifo = &p_vl->ufifo; + else + p_fifo = &p_vl->rfifo; + + p_madw = (osm_madw_t*)cl_qlist_remove_head( p_fifo ); + + cl_spinlock_release( &p_vl->lock ); + + if( p_madw != (osm_madw_t*)cl_qlist_end( p_fifo ) ) + { + if( osm_log_is_active( p_vl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vl->p_log, OSM_LOG_DEBUG, + "__osm_vl15_poller: " + "Servicing p_madw = %p\n", p_madw ); + } + + if( osm_log_is_active( p_vl->p_log, OSM_LOG_FRAMES ) ) + { + osm_dump_dr_smp( p_vl->p_log, + osm_madw_get_smp_ptr( p_madw ), OSM_LOG_FRAMES ); + } + + /* + Non-response-expected mads are not throttled on the wire + since we can have no confirmation that they arrived + at their destination. + */ + if( p_madw->resp_expected == TRUE ) + { + /* + Note that other threads may not see the response MAD + arrive before send() even returns. + In that case, the wire count would temporarily go negative. + To avoid this confusion, preincrement the counts on the + assumption that send() will succeed. + */ + mads_on_wire = cl_atomic_inc( + &p_vl->p_stats->qp0_mads_outstanding_on_wire ); + CL_ASSERT( mads_on_wire <= p_vl->max_wire_smps ); + } + else + { + unicasts_sent = cl_atomic_inc( + &p_vl->p_stats->qp0_unicasts_sent ); + } + + mads_sent = cl_atomic_inc( &p_vl->p_stats->qp0_mads_sent ); + + status = osm_vendor_send( + osm_madw_get_bind_handle( p_madw ), + p_madw, p_madw->resp_expected ); + + if( status != IB_SUCCESS ) + { + uint32_t outstanding; + cl_status_t cl_status; + + osm_log( p_vl->p_log, OSM_LOG_ERROR, + "__osm_vl15_poller: ERR 3E03: " + "MAD send failed (%s)\n", + ib_get_err_str( status ) ); + + /* + The MAD was never successfully sent, so + fix up the pre-incremented count values. + */ + + /* Decrement qp0_mads_sent and qp0_mads_outstanding_on_wire + that was incremented in the code above. */ + mads_sent = cl_atomic_dec( &p_vl->p_stats->qp0_mads_sent ); + if( p_madw->resp_expected == TRUE ) + cl_atomic_dec( &p_vl->p_stats->qp0_mads_outstanding_on_wire ); + + /* + The following code is similar to the code in + __osm_sm_mad_ctrl_retire_trans_mad. We need to decrement the + qp0_mads_outstanding counter, and if we reached 0 - need to call + the cl_disp_post with OSM_SIGNAL_NO_PENDING_TRANSACTION (in order + to wake up the state mgr). + There is one difference from the code in __osm_sm_mad_ctrl_retire_trans_mad. + This code is called on all mads, if osm_vendor_send() failed, unlike + __osm_sm_mad_ctrl_retire_trans_mad which is called only on mads where + resp_expected == TRUE. As a result, the qp0_mads_outstanding counter + should be decremented and handled accordingly only if this is a mad + with resp_expected == TRUE. + */ + if ( p_madw->resp_expected == TRUE ) + { + outstanding = cl_atomic_dec( &p_vl->p_stats->qp0_mads_outstanding ); + + osm_log( p_vl->p_log, OSM_LOG_DEBUG, + "__osm_vl15_poller: " + "%u QP0 MADs outstanding\n", + p_vl->p_stats->qp0_mads_outstanding ); + + if( outstanding == 0 ) + { + /* + The wire is clean. + Signal the state manager. + */ + if( osm_log_is_active( p_vl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vl->p_log, OSM_LOG_DEBUG, + "__osm_vl15_poller: " + "Posting Dispatcher message %s\n", + osm_get_disp_msg_str( OSM_MSG_NO_SMPS_OUTSTANDING ) ); + } + + cl_status = cl_disp_post( p_vl->h_disp, + OSM_MSG_NO_SMPS_OUTSTANDING, + (void *)OSM_SIGNAL_NO_PENDING_TRANSACTIONS, + NULL, + NULL ); + + if( cl_status != CL_SUCCESS ) + { + osm_log( p_vl->p_log, OSM_LOG_ERROR, + "__osm_vl15_poller: ERR 3E06: " + "Dispatcher post message failed (%s)\n", + CL_STATUS_MSG( cl_status ) ); + } + } + } + } + else + { + if( osm_log_is_active( p_vl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vl->p_log, OSM_LOG_DEBUG, + "__osm_vl15_poller: " + "%u QP0 MADs on wire, %u outstanding, %u unicasts sent, " + "%u total sent\n", + p_vl->p_stats->qp0_mads_outstanding_on_wire, + p_vl->p_stats->qp0_mads_outstanding, + p_vl->p_stats->qp0_unicasts_sent, + p_vl->p_stats->qp0_mads_sent ); + } + } + } + else + { + /* + The VL15 FIFO is empty, so we have nothing left to do. + */ + status = cl_event_wait_on( &p_vl->signal, + EVENT_NO_TIMEOUT, TRUE ); + } + + while( (p_vl->p_stats->qp0_mads_outstanding_on_wire >= + (int32_t)p_vl->max_wire_smps ) && + (p_vl->thread_state == OSM_THREAD_STATE_RUN ) ) + { + status = cl_event_wait_on( &p_vl->signal, + EVENT_NO_TIMEOUT, TRUE ); + } + + if( status != CL_SUCCESS ) + { + osm_log( p_vl->p_log, OSM_LOG_ERROR, + "__osm_vl15_poller: ERR 3E02: " + "Event wait failed (%s)\n", + CL_STATUS_MSG( status ) ); + } + } + + /* + since we abort immediately when the state != OSM_THREAD_STATE_RUN + we might have some mads on the queues. After the thread exits + the vl15 destroy routine should put back these mads... + */ + + OSM_LOG_EXIT( p_vl->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vl15_construct( + IN osm_vl15_t* const p_vl ) +{ + memset( p_vl, 0, sizeof(*p_vl) ); + p_vl->state = OSM_VL15_STATE_INIT; + p_vl->thread_state = OSM_THREAD_STATE_NONE; + cl_event_construct( &p_vl->signal ); + cl_spinlock_construct( &p_vl->lock ); + cl_qlist_init( &p_vl->rfifo ); + cl_qlist_init( &p_vl->ufifo ); + cl_thread_construct( &p_vl->poller ); + p_vl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_vl15_destroy( + IN osm_vl15_t* const p_vl, + IN struct _osm_mad_pool *p_pool) +{ + osm_madw_t* p_madw; + + OSM_LOG_ENTER( p_vl->p_log, osm_vl15_destroy ); + + /* + Signal our threads that we're leaving. + */ + p_vl->thread_state = OSM_THREAD_STATE_EXIT; + + /* + Don't trigger unless event has been initialized. + Destroy the thread before we tear down the other objects. + */ + if( p_vl->state != OSM_VL15_STATE_INIT ) + cl_event_signal( &p_vl->signal ); + + cl_thread_destroy( &p_vl->poller ); + + /* + Return the outstanding messages to the pool + */ + + cl_spinlock_acquire( &p_vl->lock ); + + while (!cl_is_qlist_empty( &p_vl->rfifo)) + { + p_madw = (osm_madw_t*)cl_qlist_remove_head( &p_vl->rfifo); + osm_mad_pool_put( p_pool, p_madw ); + } + while (!cl_is_qlist_empty( &p_vl->ufifo)) + { + p_madw = (osm_madw_t*)cl_qlist_remove_head( &p_vl->ufifo); + osm_mad_pool_put( p_pool, p_madw ); + } + + cl_spinlock_release( &p_vl->lock ); + + cl_event_destroy( &p_vl->signal ); + p_vl->state = OSM_VL15_STATE_INIT; + cl_spinlock_destroy( &p_vl->lock ); + + OSM_LOG_EXIT( p_vl->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_vl15_init( + IN osm_vl15_t* const p_vl, + IN osm_vendor_t* const p_vend, + IN osm_log_t* const p_log, + IN osm_stats_t* const p_stats, + IN const int32_t max_wire_smps, + IN osm_subn_t* const p_subn, + IN cl_dispatcher_t* const p_disp, + IN cl_plock_t* const p_lock + ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_vl15_init ); + + p_vl->p_vend = p_vend; + p_vl->p_log = p_log; + p_vl->p_stats = p_stats; + p_vl->max_wire_smps = max_wire_smps; + p_vl->p_subn = p_subn; + p_vl->p_lock = p_lock; + + status = cl_event_init( &p_vl->signal, FALSE ); + if( status != IB_SUCCESS ) + goto Exit; + + p_vl->state = OSM_VL15_STATE_READY; + + status = cl_spinlock_init( &p_vl->lock ); + if( status != IB_SUCCESS ) + goto Exit; + + /* + Initialize the thread after all other dependent objects + have been initialized. + */ + status = cl_thread_init( &p_vl->poller, __osm_vl15_poller, p_vl, + "opensm poller" ); + if( status != IB_SUCCESS ) + goto Exit; + + p_vl->h_disp = cl_disp_register( + p_disp, + CL_DISP_MSGID_NONE, + NULL, + NULL ); + + if( p_vl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_vl15_init: ERR 3E01: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vl15_poll( + IN osm_vl15_t* const p_vl ) +{ + OSM_LOG_ENTER( p_vl->p_log, osm_vl15_poll ); + + CL_ASSERT( p_vl->state == OSM_VL15_STATE_READY ); + + /* + If we have room for more VL15 MADs on the wire, + then signal the poller thread. + + This is not an airtight check, since the poller thread + could be just about to send another MAD as we signal + the event here. To cover this rare case, the poller + thread checks for a spurious wake-up. + */ + if( p_vl->p_stats->qp0_mads_outstanding_on_wire < + (int32_t)p_vl->max_wire_smps ) + { + if( osm_log_is_active( p_vl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vl->p_log, OSM_LOG_DEBUG, + "osm_vl15_poll: " + "Signalling poller thread\n" ); + } + + cl_event_signal( &p_vl->signal ); + } + + OSM_LOG_EXIT( p_vl->p_log ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vl15_post( + IN osm_vl15_t* const p_vl, + IN osm_madw_t* const p_madw ) +{ + OSM_LOG_ENTER( p_vl->p_log, osm_vl15_post ); + + CL_ASSERT( p_vl->state == OSM_VL15_STATE_READY ); + + if( osm_log_is_active( p_vl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vl->p_log, OSM_LOG_DEBUG, + "osm_vl15_post: " + "Posting p_madw = 0x%p\n", p_madw ); + } + + /* + Determine in which fifo to place the pending madw. + */ + cl_spinlock_acquire( &p_vl->lock ); + if( p_madw->resp_expected == TRUE ) + { + cl_qlist_insert_tail( &p_vl->rfifo, (cl_list_item_t*)p_madw ); + cl_atomic_inc( &p_vl->p_stats->qp0_mads_outstanding ); + } + else + { + cl_qlist_insert_tail( &p_vl->ufifo, (cl_list_item_t*)p_madw ); + } + cl_spinlock_release( &p_vl->lock ); + + if( osm_log_is_active( p_vl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vl->p_log, OSM_LOG_DEBUG, + "osm_vl15_post: " + "%u QP0 MADs on wire, %u QP0 MADs outstanding\n", + p_vl->p_stats->qp0_mads_outstanding_on_wire, + p_vl->p_stats->qp0_mads_outstanding ); + } + + osm_vl15_poll( p_vl ); + + OSM_LOG_EXIT( p_vl->p_log ); +} + +void +osm_vl15_shutdown( + IN osm_vl15_t* const p_vl, + IN osm_mad_pool_t* const p_mad_pool) +{ + osm_madw_t* p_madw; + + OSM_LOG_ENTER( p_vl->p_log, osm_vl15_shutdown ); + + /* we only should get here after the VL15 interface was initialized */ + CL_ASSERT( p_vl->state == OSM_VL15_STATE_READY ); + + /* grap a lock on the object */ + cl_spinlock_acquire( &p_vl->lock ); + + cl_disp_unregister( p_vl->h_disp ); + + /* go over all outstanding MADs and retire their transactions */ + + /* first we handle the list of response MADs */ + p_madw = (osm_madw_t*)cl_qlist_remove_head( &p_vl->ufifo ); + while ( p_madw != (osm_madw_t*)cl_qlist_end( &p_vl->ufifo ) ) + { + if( osm_log_is_active( p_vl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vl->p_log, OSM_LOG_DEBUG, + "osm_vl15_shutdown: " + "Releasing Response p_madw = %p\n", p_madw ); + } + + osm_mad_pool_put( p_mad_pool, p_madw ); + + p_madw = (osm_madw_t*)cl_qlist_remove_head( &p_vl->ufifo ); + } + + /* Request MADs we send out */ + p_madw = (osm_madw_t*)cl_qlist_remove_head( &p_vl->rfifo ); + while ( p_madw != (osm_madw_t*)cl_qlist_end( &p_vl->rfifo ) ) + { + if( osm_log_is_active( p_vl->p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_vl->p_log, OSM_LOG_DEBUG, + "osm_vl15_shutdown: " + "Releasing Request p_madw = %p\n", p_madw ); + } + + osm_mad_pool_put( p_mad_pool, p_madw ); + cl_atomic_dec( &p_vl->p_stats->qp0_mads_outstanding ); + + p_madw = (osm_madw_t*)cl_qlist_remove_head( &p_vl->rfifo ); + } + + /* free the lock */ + cl_spinlock_release( &p_vl->lock ); + + OSM_LOG_EXIT( p_vl->p_log ); +} + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_vl_arb_rcv.c b/branches/Ndi/ulp/opensm/user/opensm/osm_vl_arb_rcv.c new file mode 100644 index 00000000..2bb9690f --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_vl_arb_rcv.c @@ -0,0 +1,240 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_vla_rcv_t. + * This object represents the Vl Arbitration Receiver object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +osm_vla_rcv_construct( + IN osm_vla_rcv_t* const p_rcv ) +{ + memset( p_rcv, 0, sizeof(*p_rcv) ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vla_rcv_destroy( + IN osm_vla_rcv_t* const p_rcv ) +{ + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_vla_rcv_destroy ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_vla_rcv_init( + IN osm_vla_rcv_t* const p_rcv, + IN osm_req_t* const p_req, + IN osm_subn_t* const p_subn, + IN osm_log_t* const p_log, + IN cl_plock_t* const p_lock ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_vla_rcv_init ); + + osm_vla_rcv_construct( p_rcv ); + + p_rcv->p_log = p_log; + p_rcv->p_subn = p_subn; + p_rcv->p_lock = p_lock; + p_rcv->p_req = p_req; + + OSM_LOG_EXIT( p_log ); + return( status ); +} + +/********************************************************************** + **********************************************************************/ +/* + * WE MIGHT ONLY RECEIVE GET or SET responses + */ +void +osm_vla_rcv_process( + IN const osm_vla_rcv_t* const p_rcv, + IN osm_madw_t* const p_madw ) +{ + cl_qmap_t *p_guid_tbl; + ib_vl_arb_table_t *p_vla_tbl; + ib_smp_t *p_smp; + osm_port_t *p_port; + osm_physp_t *p_physp; + osm_node_t *p_node; + osm_vla_context_t *p_context; + ib_net64_t port_guid; + ib_net64_t node_guid; + uint8_t port_num, block_num; + + CL_ASSERT( p_rcv ); + + OSM_LOG_ENTER( p_rcv->p_log, osm_vla_rcv_process ); + + CL_ASSERT( p_madw ); + + p_smp = osm_madw_get_smp_ptr( p_madw ); + + p_context = osm_madw_get_vla_context_ptr( p_madw ); + p_vla_tbl = (ib_vl_arb_table_t*)ib_smp_get_payload_ptr( p_smp ); + + port_guid = p_context->port_guid; + node_guid = p_context->node_guid; + + CL_ASSERT( p_smp->attr_id == IB_MAD_ATTR_VL_ARBITRATION ); + + p_guid_tbl = &p_rcv->p_subn->port_guid_tbl; + cl_plock_excl_acquire( p_rcv->p_lock ); + p_port = (osm_port_t*)cl_qmap_get( p_guid_tbl, port_guid ); + + if( p_port == (osm_port_t*)cl_qmap_end( p_guid_tbl ) ) + { + cl_plock_release( p_rcv->p_lock ); + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_vla_rcv_process: ERR 3F06: " + "No port object for port with GUID 0x%" PRIx64 + "\n\t\t\t\tfor parent node GUID 0x%" PRIx64 + ", TID 0x%" PRIx64 "\n", + cl_ntoh64( port_guid ), + cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + goto Exit; + } + + p_node = osm_port_get_parent_node( p_port ); + CL_ASSERT( p_node ); + + block_num = (uint8_t)(cl_ntoh32(p_smp->attr_mod) >> 16); + /* in case of a non switch node the attr modifier should be ignored */ + if (osm_node_get_type( p_node ) == IB_NODE_TYPE_SWITCH) + { + port_num = (uint8_t)(cl_ntoh32(p_smp->attr_mod) & 0x000000FF); + p_physp = osm_node_get_physp_ptr( p_node, port_num ); + } + else + { + p_physp = osm_port_get_default_phys_ptr(p_port); + port_num = p_port->default_port_num; + } + + CL_ASSERT( p_physp ); + + /* + We do not mind if this is a result of a set or get - all we want is to update + the subnet. + */ + if( osm_log_is_active( p_rcv->p_log, OSM_LOG_VERBOSE ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_VERBOSE, + "osm_vla_rcv_process: " + "Got GetResp(VLArb) block:%u port_num %u with GUID 0x%" PRIx64 + " for parent node GUID 0x%" PRIx64 + ", TID 0x%" PRIx64 "\n", + block_num, port_num, + cl_ntoh64( port_guid ), + cl_ntoh64( node_guid ), + cl_ntoh64( p_smp->trans_id ) ); + } + + /* + Determine if we encountered a new Physical Port. + If so, Ignore it. + */ + if( !osm_physp_is_valid( p_physp ) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_vla_rcv_process: " + "Got invalid port number 0x%X\n", + port_num ); + goto Exit; + } + + osm_dump_vl_arb_table( p_rcv->p_log, + port_guid, block_num, + port_num, p_vla_tbl, + OSM_LOG_DEBUG ); + + if ( (block_num < 1) || (block_num > 4) ) + { + osm_log( p_rcv->p_log, OSM_LOG_ERROR, + "osm_vla_rcv_process: " + "Got invalid block number 0x%X\n", + block_num ); + goto Exit; + } + osm_physp_set_vla_tbl( p_physp, p_vla_tbl, block_num); + + Exit: + cl_plock_release( p_rcv->p_lock ); + + OSM_LOG_EXIT( p_rcv->p_log ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/osm_vl_arb_rcv_ctrl.c b/branches/Ndi/ulp/opensm/user/opensm/osm_vl_arb_rcv_ctrl.c new file mode 100644 index 00000000..8dde5a87 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/osm_vl_arb_rcv_ctrl.c @@ -0,0 +1,127 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* + * Abstract: + * Implementation of osm_vla_rcv_ctrl_t. + * This object represents the Vl Arbitration request controller object. + * This object is part of the opensm family of objects. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.4 $ + */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +/********************************************************************** + **********************************************************************/ +void +__osm_vla_rcv_ctrl_disp_callback( + IN void *context, + IN void *p_data ) +{ + /* ignore return status when invoked via the dispatcher */ + osm_vla_rcv_process( ((osm_vla_rcv_ctrl_t*)context)->p_rcv, + (osm_madw_t*)p_data ); +} + +/********************************************************************** + **********************************************************************/ +void +osm_vla_rcv_ctrl_construct( + IN osm_vla_rcv_ctrl_t* const p_ctrl ) +{ + memset( p_ctrl, 0, sizeof(*p_ctrl) ); + p_ctrl->h_disp = CL_DISP_INVALID_HANDLE; +} + +/********************************************************************** + **********************************************************************/ +void +osm_vla_rcv_ctrl_destroy( + IN osm_vla_rcv_ctrl_t* const p_ctrl ) +{ + CL_ASSERT( p_ctrl ); + cl_disp_unregister( p_ctrl->h_disp ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osm_vla_rcv_ctrl_init( + IN osm_vla_rcv_ctrl_t* const p_ctrl, + IN osm_vla_rcv_t* const p_rcv, + IN osm_log_t* const p_log, + IN cl_dispatcher_t* const p_disp ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osm_vla_rcv_ctrl_init ); + + osm_vla_rcv_ctrl_construct( p_ctrl ); + p_ctrl->p_log = p_log; + + p_ctrl->p_rcv = p_rcv; + p_ctrl->p_disp = p_disp; + + p_ctrl->h_disp = cl_disp_register( + p_disp, + OSM_MSG_MAD_VL_ARB, + __osm_vla_rcv_ctrl_disp_callback, + p_ctrl ); + + if( p_ctrl->h_disp == CL_DISP_INVALID_HANDLE ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osm_vla_rcv_ctrl_init: ERR 4001: " + "Dispatcher registration failed\n" ); + status = IB_INSUFFICIENT_RESOURCES; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return( status ); +} + + diff --git a/branches/Ndi/ulp/opensm/user/opensm/st.c b/branches/Ndi/ulp/opensm/user/opensm/st.c new file mode 100644 index 00000000..c6d1d998 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/opensm/st.c @@ -0,0 +1,625 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +/* static char sccsid[] = "@(#) st.c 5.1 89/12/14 Crucible"; */ + +#if HAVE_CONFIG_H +# include +#endif /* HAVE_CONFIG_H */ + +#include +#include +#include + +#ifdef _WIN32 +#include +#endif + +typedef struct st_table_entry st_table_entry; + +struct st_table_entry { + unsigned int hash; + st_data_t key; + st_data_t record; + st_table_entry *next; +}; + +#define ST_DEFAULT_MAX_DENSITY 5 +#define ST_DEFAULT_INIT_TABLE_SIZE 11 + +/* + * DEFAULT_MAX_DENSITY is the default for the largest we allow the + * average number of items per bin before increasing the number of + * bins + * + * DEFAULT_INIT_TABLE_SIZE is the default for the number of bins + * allocated initially + * + */ +static int numcmp(void *, void *); +static st_ptr_t numhash(void *); +static struct st_hash_type type_numhash = { + numcmp, + numhash, +}; + + +/* extern int strcmp(const char *, const char *); */ +static int strhash(const char *); + +static inline st_ptr_t st_strhash(void *key) +{ + return strhash((const char *)key); +} + +static inline int st_strcmp(void *key1, void *key2) +{ + return strcmp((const char *)key1, (const char *)key2); +} + +static struct st_hash_type type_strhash = { + st_strcmp, + st_strhash +}; + + +#define xmalloc malloc +#define xcalloc calloc +#define xrealloc realloc +#define xfree free + +static void rehash(st_table *); + +#define alloc(type) (type*)xmalloc(sizeof(type)) +#define Calloc(n,s) (char*)xcalloc((n), (s)) + +#define EQUAL(table,x,y) ((x)==(y) || (*table->type->compare)(((void*)x),((void *)y)) == 0) + +#define do_hash(key,table) (unsigned int)(*(table)->type->hash)(((void*)key)) +#define do_hash_bin(key,table) (do_hash(key, table)%(table)->num_bins) + +/* + * MINSIZE is the minimum size of a dictionary. + */ + +#define MINSIZE 8 + +/* + Table of prime numbers 2^n+a, 2<=n<=30. +*/ +static long primes[] = { + 8 + 3, + 16 + 3, + 32 + 5, + 64 + 3, + 128 + 3, + 256 + 27, + 512 + 9, + 1024 + 9, + 2048 + 5, + 4096 + 3, + 8192 + 27, + 16384 + 43, + 32768 + 3, + 65536 + 45, + 131072 + 29, + 262144 + 3, + 524288 + 21, + 1048576 + 7, + 2097152 + 17, + 4194304 + 15, + 8388608 + 9, + 16777216 + 43, + 33554432 + 35, + 67108864 + 15, + 134217728 + 29, + 268435456 + 3, + 536870912 + 11, + 1073741824 + 85, + 0 +}; + +static int +new_size(int size) +{ + int i; + +#if 0 + for (i=3; i<31; i++) { + if ((1< size) return 1< size) return primes[i]; + } + /* Ran out of polynomials */ + return -1; /* should raise exception */ +#endif +} + +#ifdef HASH_LOG +static int collision = 0; +static int init_st = 0; + +static void +stat_col() +{ + FILE *f = fopen("/var/log/osm_st_col", "w"); + fprintf(f, "collision: %d\n", collision); + fclose(f); +} +#endif + +st_table* +st_init_table_with_size(type, size) + struct st_hash_type *type; + size_t size; +{ + st_table *tbl; + +#ifdef HASH_LOG + if (init_st == 0) + { + init_st = 1; + atexit(stat_col); + } +#endif + + size = new_size(size); /* round up to prime number */ + + tbl = alloc(st_table); + tbl->type = type; + tbl->num_entries = 0; + tbl->num_bins = size; + tbl->bins = (st_table_entry **)Calloc(size, sizeof(st_table_entry*)); + + return tbl; +} + +st_table* +st_init_table(type) + struct st_hash_type *type; +{ + return st_init_table_with_size(type, 0); +} + +st_table* +st_init_numtable(void) +{ + return st_init_table(&type_numhash); +} + +st_table* +st_init_numtable_with_size(size) + size_t size; +{ + return st_init_table_with_size(&type_numhash, size); +} + +st_table* +st_init_strtable(void) +{ + return st_init_table(&type_strhash); +} + +st_table* +st_init_strtable_with_size(size) + size_t size; +{ + return st_init_table_with_size(&type_strhash, size); +} + +void +st_free_table(table) + st_table *table; +{ + register st_table_entry *ptr, *next; + int i; + + for (i = 0; i < table->num_bins; i++) { + ptr = table->bins[i]; + while (ptr != 0) { + next = ptr->next; + free(ptr); + ptr = next; + } + } + free(table->bins); + free(table); +} + +#define PTR_NOT_EQUAL(table, ptr, hash_val, key) \ +((ptr) != 0 && (ptr->hash != (hash_val) || !EQUAL((table), (key), (ptr)->key))) + +#ifdef HASH_LOG +#define COLLISION collision++ +#else +#define COLLISION +#endif + +#define FIND_ENTRY(table, ptr, hash_val, bin_pos) do {\ + bin_pos = hash_val%(table)->num_bins;\ + ptr = (table)->bins[bin_pos];\ + if (PTR_NOT_EQUAL(table, ptr, hash_val, key)) \ + {\ + COLLISION;\ + while (PTR_NOT_EQUAL(table, ptr->next, hash_val, key)) {\ + ptr = ptr->next;\ + }\ + ptr = ptr->next;\ + }\ +} while (0) + +int +st_lookup(table, key, value) + st_table *table; + register st_data_t key; + st_data_t *value; +{ + unsigned int hash_val, bin_pos; + register st_table_entry *ptr; + + hash_val = do_hash(key, table); + FIND_ENTRY(table, ptr, hash_val, bin_pos); + + if (ptr == 0) + { + return 0; + } + else + { + if (value != 0) + *value = ptr->record; + return 1; + } +} + +#define ADD_DIRECT(table, key, value, hash_val, bin_pos)\ +do {\ + st_table_entry *entry;\ + if (table->num_entries/(table->num_bins) > ST_DEFAULT_MAX_DENSITY) \ + {\ + rehash(table);\ + bin_pos = hash_val % table->num_bins;\ + }\ + \ + entry = alloc(st_table_entry);\ + \ + entry->hash = hash_val;\ + entry->key = key;\ + entry->record = value;\ + entry->next = table->bins[bin_pos];\ + table->bins[bin_pos] = entry;\ + table->num_entries++;\ +} while (0); + + +int +st_insert(table, key, value) + register st_table *table; + register st_data_t key; + st_data_t value; +{ + unsigned int hash_val, bin_pos; + register st_table_entry *ptr; + + hash_val = do_hash(key, table); + FIND_ENTRY(table, ptr, hash_val, bin_pos); + + if (ptr == 0) + { + ADD_DIRECT(table, key, value, hash_val, bin_pos); + return 0; + } + else + { + ptr->record = value; + return 1; + } +} + +void +st_add_direct(table, key, value) + st_table *table; + st_data_t key; + st_data_t value; +{ + unsigned int hash_val, bin_pos; + + hash_val = do_hash(key, table); + bin_pos = hash_val % table->num_bins; + ADD_DIRECT(table, key, value, hash_val, bin_pos); +} + +static void +rehash(table) + register st_table *table; +{ + register st_table_entry *ptr, *next, **new_bins; + int i, old_num_bins = table->num_bins, new_num_bins; + unsigned int hash_val; + + new_num_bins = new_size(old_num_bins+1); + new_bins = (st_table_entry**)Calloc(new_num_bins, sizeof(st_table_entry*)); + + for (i = 0; i < old_num_bins; i++) { + ptr = table->bins[i]; + while (ptr != 0) { + next = ptr->next; + hash_val = ptr->hash % new_num_bins; + ptr->next = new_bins[hash_val]; + new_bins[hash_val] = ptr; + ptr = next; + } + } + free(table->bins); + table->num_bins = new_num_bins; + table->bins = new_bins; +} + +st_table* +st_copy(old_table) + st_table *old_table; +{ + st_table *new_table; + st_table_entry *ptr, *entry; + size_t i, num_bins = old_table->num_bins; + + new_table = alloc(st_table); + if (new_table == 0) + { + return 0; + } + + *new_table = *old_table; + new_table->bins = (st_table_entry**) + Calloc(num_bins, sizeof(st_table_entry*)); + + if (new_table->bins == 0) + { + free(new_table); + return 0; + } + + for (i = 0; i < num_bins; i++) { + new_table->bins[i] = 0; + ptr = old_table->bins[i]; + while (ptr != 0) { + entry = alloc(st_table_entry); + if (entry == 0) + { + free(new_table->bins); + free(new_table); + return 0; + } + *entry = *ptr; + entry->next = new_table->bins[i]; + new_table->bins[i] = entry; + ptr = ptr->next; + } + } + return new_table; +} + +int +st_delete(table, key, value) + register st_table *table; + register st_data_t *key; + st_data_t *value; +{ + unsigned int hash_val; + st_table_entry *tmp; + register st_table_entry *ptr; + + hash_val = do_hash_bin(*key, table); + ptr = table->bins[hash_val]; + + if (ptr == 0) + { + if (value != 0) *value = 0; + return 0; + } + + if (EQUAL(table, *key, ptr->key)) + { + table->bins[hash_val] = ptr->next; + table->num_entries--; + if (value != 0) *value = ptr->record; + *key = ptr->key; + free(ptr); + return 1; + } + + for (; ptr->next != 0; ptr = ptr->next) { + if (EQUAL(table, ptr->next->key, *key)) + { + tmp = ptr->next; + ptr->next = ptr->next->next; + table->num_entries--; + if (value != 0) *value = tmp->record; + *key = tmp->key; + free(tmp); + return 1; + } + } + + return 0; +} + +int +st_delete_safe(table, key, value, never) + register st_table *table; + register st_data_t *key; + st_data_t *value; + st_data_t never; +{ + unsigned int hash_val; + register st_table_entry *ptr; + + hash_val = do_hash_bin(*key, table); + ptr = table->bins[hash_val]; + + if (ptr == 0) + { + if (value != 0) *value = 0; + return 0; + } + + for (; ptr != 0; ptr = ptr->next) { + if ((ptr->key != never) && EQUAL(table, ptr->key, *key)) + { + table->num_entries--; + *key = ptr->key; + if (value != 0) *value = ptr->record; + ptr->key = ptr->record = never; + return 1; + } + } + + return 0; +} + +static int +delete_never(st_data_t key, st_data_t value, st_data_t never) +{ + if (value == never) return ST_DELETE; + return ST_CONTINUE; +} + +void +st_cleanup_safe(table, never) + st_table *table; + st_data_t never; +{ + int num_entries = table->num_entries; + + st_foreach(table, delete_never, never); + table->num_entries = num_entries; +} + +void +st_foreach(table, func, arg) + st_table *table; + int (*func)(st_data_t key, st_data_t val, st_data_t arg); + st_data_t arg; +{ + st_table_entry *ptr, *last, *tmp; + enum st_retval retval; + int i; + + for (i = 0; i < table->num_bins; i++) { + last = 0; + for (ptr = table->bins[i]; ptr != 0;) { + retval = (*func)(ptr->key, ptr->record, arg); + switch (retval) { + case ST_CONTINUE: + last = ptr; + ptr = ptr->next; + break; + case ST_STOP: + return; + case ST_DELETE: + tmp = ptr; + if (last == 0) + { + table->bins[i] = ptr->next; + } + else + { + last->next = ptr->next; + } + ptr = ptr->next; + free(tmp); + table->num_entries--; + } + } + } +} + +static int +strhash(string) + register const char *string; +{ + register int c; + +#ifdef HASH_ELFHASH + register unsigned int h = 0, g; + + while ((c = *string++) != '\0') { + h = ( h << 4 ) + c; + if ( g = h & 0xF0000000 ) + h ^= g >> 24; + h &= ~g; + } + return h; +#elif HASH_PERL + register int val = 0; + + while ((c = *string++) != '\0') { + val = val*33 + c; + } + + return val + (val>>5); +#else + register int val = 0; + + while ((c = *string++) != '\0') { + val = val*997 + c; + } + + return val + (val>>5); +#endif +} + +static int +numcmp(x, y) + void *x, *y; +{ + return (st_ptr_t)x != (st_ptr_t)y; +} + +static st_ptr_t +numhash(n) + void *n; +{ + return (st_ptr_t)n; +} + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/Makefile b/branches/Ndi/ulp/opensm/user/osmtest/Makefile new file mode 100644 index 00000000..9c985f57 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/Makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the Windows NT DDK +# + +!INCLUDE $(NTMAKEENV)\makefile.def diff --git a/branches/Ndi/ulp/opensm/user/osmtest/SOURCES b/branches/Ndi/ulp/opensm/user/osmtest/SOURCES new file mode 100644 index 00000000..0fe94590 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/SOURCES @@ -0,0 +1,73 @@ +!if $(FREEBUILD) +TARGETNAME=osmtest +!else +TARGETNAME=osmtestd +!endif + +!if !defined(WINIBHOME) +WINIBHOME=..\..\..\.. +!endif + +LIBPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) + +!if defined(OSM_TARGET) +TARGETPATH=$(OSM_TARGET)\bin\user\obj$(BUILD_ALT_DIR) +!else +TARGETPATH=$(WINIBHOME)\bin\user\obj$(BUILD_ALT_DIR) +!endif + +TARGETTYPE=PROGRAM +UMTYPE=console +USE_CRTDLL=1 +OVR_DIR=..\addon + + +SOURCES=\ + osmt_slvl_vl_arb.c \ + osmt_service.c \ + osmt_multicast.c \ + osmt_inform.c \ + osmtest.c \ + main.c \ + + + +OSM_HOME=.. + +TARGETLIBS=\ +!if $(FREEBUILD) + $(LIBPATH)\*\ibal.lib \ + $(LIBPATH)\*\complib.lib \ + $(TARGETPATH)\*\osmv_ibal.lib \ + $(TARGETPATH)\*\opensm_ibal.lib \ + $(CRT_LIB_PATH)\msvcrt.lib + +!else + $(LIBPATH)\*\ibald.lib \ + $(LIBPATH)\*\complibd.lib \ + $(TARGETPATH)\*\osmv_ibald.lib \ + $(TARGETPATH)\*\opensm_ibald.lib \ + $(CRT_LIB_PATH)\msvcrt.lib +!endif + +#DO NOT TOUCH the order of search path , until ib_types.h merging process will be done +INCLUDES= \ + $(OSM_HOME)\osmtest\include; \ + $(OSM_HOME)\include; \ + $(OSM_HOME); \ + $(WINIBHOME)\inc; \ + $(WINIBHOME)\inc\user; + +# Could be any special flag needed for this project +USER_C_FLAGS=$(USER_C_FLAGS) /MD +#Add preproccessor definitions +C_DEFINES=$(C_DEFINES) -DWIN32 -D__WIN__ -D__i386__ -Dinline=__inline -DMT_LITTLE_ENDIAN -DOSM_VENDOR_INTF_AL +C_DEFINES=$(C_DEFINES) -I.. -DHAVE_CONFIG_H +!if !$(FREEBUILD) +#C_DEFINES=$(C_DEFINES) -D_DEBUG -DDEBUG -DDBG +C_DEFINES=$(C_DEFINES) +!endif + +LINKER_FLAGS= $(LINKER_FLAGS) +MSC_WARNING_LEVEL= /W3 + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/include/error.h b/branches/Ndi/ulp/opensm/user/osmtest/include/error.h new file mode 100644 index 00000000..ef80b337 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/include/error.h @@ -0,0 +1,57 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of error code ranges for the various osmtest modules. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + + +/* + osmtest object + 0x0100 - 0x01FF + + parser object + 0x0200 - 0x02FF + + osmtest object + 0x0300 - 0x03FF + +*/ + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/include/osmt_inform.h b/branches/Ndi/ulp/opensm/user/osmtest/include/osmt_inform.h new file mode 100644 index 00000000..f0262541 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/include/osmt_inform.h @@ -0,0 +1,88 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef __OSMT_INFORM__ +#define __OSMT_INFORM__ + +#ifdef OSM_VENDOR_INTF_MTL +#include +#include +#include "osmt_mtl_regular_qp.h" +#endif + +typedef struct _osmt_qp_ctx +{ +#ifdef OSM_VENDOR_INTF_MTL + osmt_mtl_mad_res_t qp_bind_hndl; +#endif + uint8_t *p_send_buf; + uint8_t *p_recv_buf; +#ifdef OSM_VENDOR_INTF_MTL + IB_MGT_mad_hndl_t ib_mgt_qp0_handle; +#endif +} +osmt_qp_ctx_t; + +ib_api_status_t +osmt_bind_inform_qp( IN osmtest_t * const p_osmt, + OUT osmt_qp_ctx_t *p_qp_ctx); + +void +osmt_unbind_inform_qp( IN osmtest_t * const p_osmt, + IN osmt_qp_ctx_t *p_qp_ctx); + + +ib_api_status_t +osmt_reg_unreg_inform_info( IN osmtest_t *p_osmt, + IN osmt_qp_ctx_t *p_qp_ctx, + IN ib_inform_info_t *p_inform_info, + IN uint8_t reg_flag + ); + +ib_api_status_t +osmt_trap_wait( IN osmtest_t * const p_osmt, + IN osmt_qp_ctx_t *p_qp_ctx + ); + +ib_api_status_t +osmt_init_inform_info(IN osmtest_t * const p_osmt, + OUT ib_inform_info_t* p_ii); + +ib_api_status_t +osmt_init_inform_info_by_trap (IN osmtest_t * const p_osmt, + IN ib_net16_t trap_num, + OUT ib_inform_info_t* p_ii); + +#endif /* __OSMT_INFORM__ */ + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/include/osmt_mtl_regular_qp.h b/branches/Ndi/ulp/opensm/user/osmtest/include/osmt_mtl_regular_qp.h new file mode 100644 index 00000000..0466b76c --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/include/osmt_mtl_regular_qp.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * mad.h - + * Header file for common special QP resources creation code. + * + * Creation date: + * + * Version: osmt_mtl_regular_qp.h,v 1.2 2003/03/20 16:05:10 eitan + * + * Authors: + * Elazar Raab + * + * Changes: + */ + +#ifndef H_MAD_H +#define H_MAD_H + +#include +#include +#include +#include + + +#if defined(MAD_IN) || defined(MAD_OUT) +#error MACROS MAD_IN and MAD_OUT are in use, do not override +#endif +#define MAD_IN +#define MAD_OUT + + + +/* HCA Constants */ +#define HCA_ID "mt21108_pci0" +#define GRH_LEN 40 +#define KNOWN_QP1_QKEY 0x80010000 + + +#define MAX_OUTS_SQ 2 /* Max. buffers posted for requests in SQ */ +#define MAX_OUTS_RQ 5 /* Max. buffers posted for responses in RQ */ + +#define MAX_POLL_CNT 300 +#define POLL_SLEEP 1 /* for usleep */ + +#define MAD_SIZE 256 /* MADs are always 256B */ +#define MAD_ATTR_OFFSET 16 +#define MAD_TID_OFFSET 8 + + + + +/* Verbs SQP resources handles */ +typedef struct { + VAPI_hca_id_t hca_id; /*id of HCA*/ + u_int8_t port_num; /* the port num to use */ + VAPI_hca_hndl_t hca_hndl; /*handle of HCA*/ + VAPI_qp_hndl_t qp_hndl; /*handle of QP I use*/ + VAPI_mr_hndl_t mr_hndl; /*handle of memory region*/ + VAPI_cq_hndl_t rq_cq_hndl, sq_cq_hndl; /*handle of send & receive completion Queues*/ + VAPI_pd_hndl_t pd_hndl; /*handle of Partition Domain*/ + /* VAPI_ud_av_hndl_t av_hndl;*/ + IB_lid_t slid; /*LID*/ + void *buf_ptr; /*mem buffer for outstanding pkts*/ + MT_size_t buf_size; /*size of mem buffer for outstanding pkts*/ + + u_int32_t max_outs_sq; /*max # of outstanding pkts in send queue*/ + u_int32_t max_outs_rq; /*max # of outstanding pkts in receive queue*/ + + IB_rkey_t l_key; /*my l_key for memory regions*/ + VAPI_qkey_t qkey; /*my qkey*/ + + EVAPI_compl_handler_hndl_t rq_cq_eventh, sq_cq_eventh; /* event handlers for polling */ + + bool is_sqp; /* relate to union below - my QP*/ + union { + VAPI_special_qp_t sqp_type; + VAPI_qp_num_t qp_num; + } qp_id; + void *wait_q; +} osmt_mtl_mad_res_t; + + +/* init an osmt_mtl_mad_res_t with all resources initialized (use functions below) */ +VAPI_ret_t osmt_mtl_init( + osmt_mtl_mad_res_t* res /*pointer to res (resources) struct*/ + ); +VAPI_ret_t osmt_mtl_init_opened_hca( + osmt_mtl_mad_res_t* res /*pointer to res (resources) struct*/ + ); + +/* Cleanup all resources of (which are valid) in res */ +VAPI_ret_t osmt_mtl_mad_cleanup( + osmt_mtl_mad_res_t *res /*pointer to res (resources) struct*/ + ); + + +/* create CQs and QP as given in res->is_sqp (if TRUE, get special QP) */ +VAPI_ret_t osmt_mtl_get_qp_resources( + osmt_mtl_mad_res_t *res /*pointer to res (resources) struct*/ +); + + +/* move QP to RTS state */ +VAPI_ret_t osmt_mtl_mad_qp_init( + osmt_mtl_mad_res_t *res /*max number of outstanding packets allowed in send queue*/ + ); + + +/* create and register res->buf_ptr */ +VAPI_ret_t osmt_mtl_mad_create_mr( + osmt_mtl_mad_res_t *res /*pointer to res (resources) struct*/ + ); + +VAPI_ret_t osmt_mtl_create_av( + osmt_mtl_mad_res_t* res, /* pointer to res (resources) struct*/ + int16_t dlid, /*destination lid */ + VAPI_ud_av_hndl_t *avh_p /* address vectr handle to update */ +); + +/* Send MAD to given dest QP*/ +VAPI_ret_t osmt_mtl_mad_send( + osmt_mtl_mad_res_t* res, /*pointer to res (resources) struct*/ + VAPI_wr_id_t id, /*wqe ID*/ + void* mad, /*mad buffer to send*/ + VAPI_qp_num_t dest_qp, /*destination QP*/ + IB_sl_t sl, /*Service Level*/ + u_int32_t dest_qkey, /*Destination QP KEY*/ + VAPI_ud_av_hndl_t avh /* address vectr handle to use */ +); + + +/* post buffers to RQ. returns num of buffers actually posted */ +int osmt_mtl_mad_post_recv_bufs( + osmt_mtl_mad_res_t* res, /*pointer to res (resources) struct*/ + void *buf_array, /*array of receive buffers*/ + u_int32_t num_o_bufs, /*number of receive buffers*/ + u_int32_t size, /* size of expected receive packet - MAD*/ + VAPI_wr_id_t start_id /* start id for receive buffers*/ +); + + +/* Poll given CQ for completion max_poll times (POLL_SLEEP [usec] delays). result in wc_desc_p. */ +VAPI_ret_t osmt_mtl_mad_poll4cqe( + VAPI_hca_hndl_t hca, /*handle for HCA*/ + VAPI_cq_hndl_t cq, /*handle for Completion Queue - Rcv/Send */ + VAPI_wc_desc_t *wc_desc_p, /*handle of cqe */ + u_int32_t max_poll, /*number of polling iterations*/ + u_int32_t poll_sleep, /*timeout for each polling */ + VAPI_ud_av_hndl_t *avh_p /* address vectopr handle to cleanup */ + ); + + +#endif + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/include/osmtest.h b/branches/Ndi/ulp/opensm/user/osmtest/include/osmtest.h new file mode 100644 index 00000000..58feec98 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/include/osmtest.h @@ -0,0 +1,517 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osmtest_t. + * This object represents the OSMTest Test object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.6 $ + */ + +#ifndef _OSMTEST_H_ +#define _OSMTEST_H_ + +#include +#include +#include +#include +#include +#include +#include "osmtest_base.h" +#include "osmtest_subnet.h" + +/****s* OpenSM: Subnet/osmtest_opt_t + * NAME + * osmtest_opt_t + * + * DESCRIPTION + * Subnet options structure. This structure contains the various + * site specific configuration parameters for osmtest. + * + * SYNOPSIS + */ +typedef struct _osmtest_opt +{ + uint32_t transaction_timeout; + boolean_t force_log_flush; + boolean_t create; + uint32_t retry_count; + uint32_t stress; + uint32_t mmode; + char file_name[OSMTEST_FILE_PATH_MAX]; + uint8_t flow; + uint8_t wait_time; + char *log_file; + boolean_t ignore_path_records; +} osmtest_opt_t; + +/* + * FIELDS + * + * SEE ALSO + *********/ + +/****h* OSMTest/OSMTest + * NAME + * OSMTest + * + * DESCRIPTION + * The OSMTest object tests an SM/SA for conformance to a known + * set of data about an Infiniband subnet. + * + * AUTHOR + * Steve King, Intel + * + *********/ + + +/****s* OSMTest/osmtest_t + * NAME + * osmtest_t + * + * DESCRIPTION + * OSMTest structure. + * + * This object should be treated as opaque and should + * be manipulated only through the provided functions. + * + * SYNOPSIS + */ +typedef struct _osmtest +{ + osm_log_t log; + struct _osm_vendor *p_vendor; + osm_bind_handle_t h_bind; + osm_mad_pool_t mad_pool; + + osmtest_opt_t opt; + ib_port_attr_t local_port; + subnet_t exp_subn; + cl_qpool_t node_pool; + cl_qpool_t port_pool; + cl_qpool_t link_pool; + + uint16_t max_lid; +} osmtest_t; + +/* + * FIELDS + * log + * Log facility used by all OSMTest components. + * + * p_vendor + * Pointer to the vendor transport layer. + * + * h_bind + * The bind handle obtained by osm_vendor_sa_api/osmv_bind_sa + * + * mad_pool + * The mad pool provided for teh vendor layer to allocate mad wrappers in + * + * opt + * osmtest options structure + * + * local_port + * Port attributes for the port over which osmtest is running. + * + * exp_subn + * Subnet object representing the expected subnet + * + * node_pool + * Pool of objects for use in populating the subnet databases. + * + * port_pool + * Pool of objects for use in populating the subnet databases. + * + * link_pool + * Pool of objects for use in populating the subnet databases. + * + * SEE ALSO + *********/ + +/****s* OpenSM: Subnet/osmtest_req_context_t + * NAME + * osmtest_req_context_t + * + * DESCRIPTION + * Query context for ib_query callback function. + * + * SYNOPSIS + */ +typedef struct _osmtest_req_context +{ + osmtest_t *p_osmt; + osmv_query_res_t result; +} osmtest_req_context_t; + +typedef struct _osmtest_mgrp_t +{ + cl_map_item_t map_item; + ib_member_rec_t mcmember_rec; +} osmtest_mgrp_t; + + +/* + * FIELDS + * + * SEE ALSO + *********/ + +/****f* OSMTest/osmtest_construct + * NAME + * osmtest_construct + * + * DESCRIPTION + * This function constructs an OSMTest object. + * + * SYNOPSIS + */ +void osmtest_construct( IN osmtest_t * const p_osmt ); + +/* + * PARAMETERS + * p_osmt + * [in] Pointer to a OSMTest object to construct. + * + * RETURN VALUE + * This function does not return a value. + * + * NOTES + * Allows calling osmtest_init, osmtest_destroy. + * + * Calling osmtest_construct is a prerequisite to calling any other + * method except osmtest_init. + * + * SEE ALSO + * SM object, osmtest_init, osmtest_destroy + *********/ + +/****f* OSMTest/osmtest_destroy + * NAME + * osmtest_destroy + * + * DESCRIPTION + * The osmtest_destroy function destroys an osmtest object, releasing + * all resources. + * + * SYNOPSIS + */ +void osmtest_destroy( IN osmtest_t * const p_osmt ); + +/* + * PARAMETERS + * p_osmt + * [in] Pointer to a OSMTest object to destroy. + * + * RETURN VALUE + * This function does not return a value. + * + * NOTES + * Performs any necessary cleanup of the specified OSMTest object. + * Further operations should not be attempted on the destroyed object. + * This function should only be called after a call to osmtest_construct or + * osmtest_init. + * + * SEE ALSO + * SM object, osmtest_construct, osmtest_init + *********/ + +/****f* OSMTest/osmtest_init + * NAME + * osmtest_init + * + * DESCRIPTION + * The osmtest_init function initializes a OSMTest object for use. + * + * SYNOPSIS + */ +ib_api_status_t osmtest_init( IN osmtest_t * const p_osmt, + IN const osmtest_opt_t * const p_opt, + IN const osm_log_level_t log_flags + ); + +/* + * PARAMETERS + * p_osmt + * [in] Pointer to an osmtest_t object to initialize. + * + * p_opt + * [in] Pointer to the options structure. + * + * log_flags + * [in] Log level flags to set. + * + * RETURN VALUES + * IB_SUCCESS if the OSMTest object was initialized successfully. + * + * NOTES + * Allows calling other OSMTest methods. + * + * SEE ALSO + * SM object, osmtest_construct, osmtest_destroy + *********/ + +/****f* OSMTest/osmtest_run + * NAME + * osmtest_run + * + * DESCRIPTION + * Runs the osmtest suite. + * + * SYNOPSIS + */ +ib_api_status_t osmtest_run( IN osmtest_t * const p_osmt ); + +/* + * PARAMETERS + * p_osmt + * [in] Pointer to an osmtest_t object. + * + * guid + * [in] Port GUID over which to run the test suite. + * + * RETURN VALUES + * IB_SUCCESS + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* OSMTest/osmtest_bind + * NAME + * osmtest_bind + * + * DESCRIPTION + * Binds osmtest to a local port. + * + * SYNOPSIS + */ +ib_api_status_t osmtest_bind( IN osmtest_t * p_osmt, + IN uint16_t max_lid, + IN ib_net64_t guid OPTIONAL ); + +/* + * PARAMETERS + * p_osmt + * [in] Pointer to an osmtest_t object. + * + * max_lid + * [in] The maximal lid to query about (if RMPP is not supported) + * + * guid + * [in] Port GUID over which to run the test suite. + * If zero, the bind function will display a menu of local + * port guids and wait for user input. + * + * RETURN VALUES + * IB_SUCCESS + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* OSMTest/osmtest_query_res_cb + * NAME + * osmtest_query_res_cb + * + * DESCRIPTION + * A Callback for the query to invoke on completion + * + * SYNOPSIS + */ +void +osmtest_query_res_cb( IN osmv_query_res_t * p_rec ); +/* + * PARAMETERS + * p_rec + * [in] Pointer to an ib_query_rec_t object used for the query. + * + * RETURN VALUES + * NONE + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* OSMTest/ib_get_mad_status_str + * NAME + * ib_get_mad_status_str + * + * DESCRIPTION + * return the string representing the given mad status + * + * SYNOPSIS + */ +const char * +ib_get_mad_status_str( IN const ib_mad_t * const p_mad ); +/* + * PARAMETERS + * p_mad + * [in] Pointer to the mad payload + * + * RETURN VALUES + * NONE + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* OSMTest/osmt_run_service_records_flow + * NAME + * osmt_run_service_records_flow + * + * DESCRIPTION + * Run the service record testing flow. + * + * SYNOPSIS + */ +ib_api_status_t osmt_run_service_records_flow( IN osmtest_t * const p_osmt ); +/* + * PARAMETERS + * p_osmt + * [in] Pointer to the osmtest obj + * + * RETURN VALUES + * IB_SUCCESS if PASS + * + * NOTES + * + * SEE ALSO + *********/ + +ib_api_status_t +osmt_run_inform_info_flow( IN osmtest_t * const p_osmt ); + +/****f* OSMTest/osmt_run_slvl_and_vlarb_records_flow + * NAME + * osmt_run_slvl_and_vlarb_records_flow + * + * DESCRIPTION + * Run the sl2vl and vlarb tables testing flow. + * + * SYNOPSIS + */ +ib_api_status_t +osmt_run_slvl_and_vlarb_records_flow( IN osmtest_t * const p_osmt ); +/* + * PARAMETERS + * p_osmt + * [in] Pointer to the osmtest obj + * + * RETURN VALUES + * IB_SUCCESS if PASS + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* OSMTest/osmt_run_mcast_flow + * NAME + * osmt_run_mcast_flow + * + * DESCRIPTION + * Run the multicast test flow + * + * SYNOPSIS + */ +ib_api_status_t +osmt_run_mcast_flow( IN osmtest_t * const p_osmt ); +/* + * PARAMETERS + * p_osmt + * [in] Pointer to the osmtest obj + * + * RETURN VALUES + * IB_SUCCESS if PASS + * + * NOTES + * + * SEE ALSO + *********/ + +/****f* OSMTest/osmt_run_trap64_65_flow + * NAME + * osmt_run_trap64_65_flow + * + * DESCRIPTION + * Run the trap 64/65 test flow. This test is ran with + * an outside tool. + * + * SYNOPSIS + */ +ib_api_status_t +osmt_run_trap64_65_flow( IN osmtest_t * const p_osmt ); +/* + * PARAMETERS + * p_osmt + * [in] Pointer to the osmtest obj + * + * RETURN VALUES + * IB_SUCCESS if PASS + * + * NOTES + * + * SEE ALSO + *********/ + +ib_api_status_t +osmtest_get_all_recs( IN osmtest_t * const p_osmt, + IN ib_net16_t const attr_id, + IN size_t const attr_size, + IN OUT osmtest_req_context_t * const p_context ); + +ib_api_status_t +osmtest_get_local_port_lmc( IN osmtest_t * const p_osmt, + IN ib_net16_t lid, + OUT uint8_t * const p_lmc ); + + +/* + * A few auxiliary macros for logging + */ + +#define EXPECTING_ERRORS_START "[[ ===== Expecting Errors - START ===== " +#define EXPECTING_ERRORS_END " ===== Expecting Errors - END ===== ]]" + +#endif /* _OSMTEST_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/include/osmtest_base.h b/branches/Ndi/ulp/opensm/user/osmtest/include/osmtest_base.h new file mode 100644 index 00000000..f3716d3d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/include/osmtest_base.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osmtest_t. + * This object represents the OSMTest Test object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ +#ifndef _OSMTEST_BASE_H_ +#define _OSMTEST_BASE_H_ + +#ifndef __WIN__ +#include +#else +#include +#endif + +#define OSMTEST_MAX_LINE_LEN 120 +#ifdef WIN32 +#define OSMTEST_FILE_PATH_MAX 4096 +#else +#define OSMTEST_FILE_PATH_MAX PATH_MAX +#endif + +#define STRESS_SMALL_RMPP_THR 100000 +/* + Take long times when quering big clusters (over 40 nodes) , an average of : 0.25 sec for query + each query receives 1000 records +*/ +#define STRESS_LARGE_RMPP_THR 4000 +#define STRESS_LARGE_PR_RMPP_THR 20000 + +extern const char *const p_file; + +#endif /* _OSMTEST_BASE_H_ */ + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/include/osmtest_subnet.h b/branches/Ndi/ulp/opensm/user/osmtest/include/osmtest_subnet.h new file mode 100644 index 00000000..418dd366 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/include/osmtest_subnet.h @@ -0,0 +1,349 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Declaration of osmtest_t. + * This object represents the OSMTest Test object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + +#ifndef _OSMTEST_SUBNET_H_ +#define _OSMTEST_SUBNET_H_ + +#include +#include +#include +#include +#include +#include + + +/****s* Subnet Database/generic_t +* NAME +* generic_t +* +* DESCRIPTION +* Subnet database object for fields common to all record types. +* All other database types must be castable to this type. +* +* SYNOPSIS +*/ +typedef struct _generic +{ + cl_map_item_t map_item; /* must be first element! */ + uint32_t count; /* must be second element! */ +} generic_t; + +/* +* FIELDS +* +* SEE ALSO +*********/ + +/****s* Subnet Database/node_t +* NAME +* node_t +* +* DESCRIPTION +* Subnet database object for nodes. +* Must be castable to generic_t. +* +* SYNOPSIS +*/ +typedef struct _node +{ + cl_map_item_t map_item; /* must be first element! */ + uint32_t count; /* must be second element! */ + ib_node_record_t rec; + ib_node_record_t comp; +} node_t; + +/* +* FIELDS +* map_item +* Provides linkage for the qmap container. +* +* rec +* NodeRecord for this node as read from the database file. +* +* comp +* NodeRecord indicating which fields should be compared against rec. +* Bits set in the comp NodeRecord indicate that bit in the rec structure +* should be compared against real-time data from the SA. +* +* count +* Utility counter used by the validation logic. Typically used to +* to indicate the number of times a matching node was received from +* the SA. +* +* SEE ALSO +*********/ + +static inline node_t * +node_new( void ) +{ + node_t *p_obj; + + p_obj = malloc( sizeof( *p_obj ) ); + if (p_obj) + memset( p_obj, 0, sizeof( *p_obj ) ); + return ( p_obj ); +} + +static inline void +node_delete( IN node_t * p_obj ) +{ + free( p_obj ); +} + +/****s* Subnet Database/port_t +* NAME +* port_t +* +* DESCRIPTION +* Subnet database object for ports. +* Must be castable to generic_t. +* +* SYNOPSIS +*/ +typedef struct _port +{ + cl_map_item_t map_item; /* must be first element! */ + uint32_t count; /* must be second element! */ + /* Since there is no unique identifier for all ports we + must be able to have such a key by the lid and port num */ + uint64_t port_id; + ib_portinfo_record_t rec; + ib_portinfo_record_t comp; +} port_t; + +/* +* FIELDS +* +* map_item +* Provides linkage for the qmap container. +* +* rec +* PortInfoRecord for this port as read from the database file. +* +* comp +* PortInfoRecord indicating which fields should be compared against rec. +* Bits set in the comp NodeRecord indicate that bit in the rec structure +* should be compared against real-time data from the SA. +* +* count +* Utility counter used by the validation logic. Typically used to +* to indicate the number of times a matching node was received from +* the SA. +* +* SEE ALSO +*********/ + +static inline port_t * +port_new( void ) +{ + port_t *p_obj; + + p_obj = malloc( sizeof( *p_obj ) ); + if (p_obj) + memset( p_obj, 0, sizeof( *p_obj ) ); + return ( p_obj ); +} + +static inline void +port_delete( IN port_t * p_obj ) +{ + free( p_obj ); +} + +static inline uint64_t +port_gen_id( + IN ib_net16_t const lid, + IN uint8_t const port_num) +{ + return( lid << 8 | port_num ); +} + +static inline void +port_ext_id( IN uint64_t id, + IN ib_net16_t *p_lid, + IN uint8_t *p_port_num) +{ + CL_ASSERT( (id & 0xFF) < 0x100 ); + *p_port_num = (uint8_t)(id & 0xFF); + CL_ASSERT( ((id >> 8) & 0xFFFF) < 0x10000 ); + *p_lid = (uint16_t)((id >> 8) & 0xFFFF); +} + +static inline void +port_set_id( IN port_t * p_obj, + IN ib_net16_t const lid, + IN uint8_t const port_num) +{ + p_obj->port_id = port_gen_id(lid, port_num); +} + +static inline void +port_get_id( IN port_t * p_obj, + IN ib_net16_t *p_lid, + IN uint8_t *p_port_num) +{ + port_ext_id(p_obj->port_id, p_lid, p_port_num); +} + +/****s* Subnet Database/path_t +* NAME +* node_t +* +* DESCRIPTION +* Subnet database object for paths. +* Must be castable to generic_t. +* +* SYNOPSIS +*/ +typedef struct _path +{ + cl_map_item_t map_item; /* must be first element! */ + uint32_t count; /* must be second element! */ + ib_path_rec_t rec; + ib_path_rec_t comp; +} path_t; + +/* +* FIELDS +* map_item +* Provides linkage for the qmap container. +* +* rec +* PathRecord for this path as read from the database file. +* +* comp +* PathRecord indicating which fields should be compared against rec. +* Bits set in the comp PathRecord indicate that bit in the rec structure +* should be compared against real-time data from the SA. +* +* count +* Utility counter used by the validation logic. Typically used to +* to indicate the number of times a matching node was received from +* the SA. +* +* SEE ALSO +*********/ + +static inline path_t * +path_new( void ) +{ + path_t *p_obj; + + p_obj = malloc( sizeof( *p_obj ) ); + if (p_obj) + memset( p_obj, 0, sizeof( *p_obj ) ); + return ( p_obj ); +} + +static inline void +path_delete( IN path_t * p_obj ) +{ + free( p_obj ); +} + +/****s* Subnet Database/subnet_t +* NAME +* subnet_t +* +* DESCRIPTION +* Subnet database object. +* +* SYNOPSIS +*/ +typedef struct _subnet +{ + cl_qmap_t node_lid_tbl; + cl_qmap_t node_guid_tbl; + cl_qmap_t mgrp_mlid_tbl; + /* cl_qmap_t port_lid_tbl; */ + /* cl_qmap_t port_guid_tbl; */ + cl_qmap_t port_key_tbl; + cl_qmap_t link_tbl; + cl_qmap_t path_tbl; +} subnet_t; + +/* +* FIELDS +* +* SEE ALSO +*********/ + +/****f* Subnet Database/subnet_construct +* NAME +* subnet_construct +* +* DESCRIPTION +* This function constructs an subnet database object. +* This function cannot fail. +* +* SYNOPSIS +*/ +void subnet_construct( IN subnet_t * const p_subn ); + +/* +* FIELDS +* +* SEE ALSO +*********/ + +/****f* Subnet Database/subnet_init +* NAME +* subnet_init +* +* DESCRIPTION +* This function initializes an subnet database object. +* +* SYNOPSIS +*/ +cl_status_t subnet_init( IN subnet_t * const p_subn ); + +/* +* FIELDS +* +* SEE ALSO +*********/ + +#endif + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/main.c b/branches/Ndi/ulp/opensm/user/osmtest/main.c new file mode 100644 index 00000000..f1c8403e --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/main.c @@ -0,0 +1,534 @@ +/* + * Copyright (c) 2004,2005 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2005 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* + * Abstract: + * Command line interface for osmtest. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.3 $ + */ + +#include +#include +#include +#include "osmtest.h" + +/******************************************************************** + D E F I N E G L O B A L V A R I A B L E S +*********************************************************************/ + +/* + This is the global osmtest object. + One osmtest object is required per subnet. + Future versions could support multiple subents by + instantiating more than one osmtest object. +*/ +#define GUID_ARRAY_SIZE 64 +#define OSMT_DEFAULT_RETRY_COUNT 3 +#define OSMT_DEFAULT_TRANS_TIMEOUT_MILLISEC 1000 +#define OSMT_DEFAULT_TRAP_WAIT_TIMEOUT_SEC 10 + +/********************************************************************** + **********************************************************************/ +boolean_t +osmt_is_debug(void) +{ +#if defined( _DEBUG_ ) + return TRUE; +#else + return FALSE; +#endif /* defined( _DEBUG_ ) */ +} + +/********************************************************************** + **********************************************************************/ +void show_usage(void); + +void +show_usage( ) +{ + printf + ( "\n------- osmtest - Usage and options ----------------------\n" ); + printf( "Usage: osmtest [options]\n" ); + printf( "Options:\n" ); + printf( "-f \n" + "--flow \n" + " This option directs osmtest to run a specific flow:\n" + " FLOW DESCRIPTION\n" + " c = create an inventory file with all nodes, ports and paths.\n" + " a = run all validation tests (expecting an input inventory)\n" + " v = only validate the given inventory file.\n" + " s = run service registration, un-registration and lease.\n" + " e = run event forwarding test.\n" + " f = flood the SA with queries accoring to the stress mode.\n" + " m = multicast flow.\n" + " q = QoS info - VLArb and SLtoVL tables.\n" + " t = run trap 64/65 flow. This flow requires running of external tool.\n" + " (default is all but QoS).\n\n" ); + printf( "-w \n" + "--wait \n" + " This option specifies the wait time for trap 64/65 in seconds.\n" + " It is used only when running -f t - the trap 64/65 flow\n" + " (default to 10 sec).\n" ); + printf( "-d \n" + "--debug \n" + " This option specifies a debug option.\n" + " These options are not normally needed.\n" + " The number following -d selects the debug\n" + " option to enable as follows:\n" + " OPT Description\n" + " --- -----------------\n" + " -d0 - Unused.\n" + " -d1 - Do not scan/compare path records.\n" + " -d2 - Force log flushing after each log message.\n" + " -d3 - Use mem tracking.\n" + " Without -d, no debug options are enabled.\n\n" ); + printf( "-m \n" + "--max_lid \n" + " This option specifies the maximal LID number to be searched\n" + " for during inventory file build (default to 100).\n"); + printf( "-g \n" + "--guid \n" + " This option specifies the local port GUID value\n" + " with which osmtest should bind. osmtest may be\n" + " bound to 1 port at a time.\n" + " Without -g, osmtest displays a menu of possible\n" + " port GUIDs and waits for user input.\n\n" ); + printf( "-h\n" + "--help\n" " Display this usage info then exit.\n\n" ); + printf( "-i \n" + "--inventory \n" + " This option specifies the name of the inventory file.\n" + " Normally, osmtest expects to find an inventory file,\n" + " which osmtest uses to validate real-time information\n" + " received from the SA during testing.\n" + " If -i is not specified, osmtest defaults to the file\n" + " 'osmtest.dat'.\n" + " See the -c option for related information.\n\n" ); + printf( "-s\n" + "--stress\n" + " This option runs the specified stress test instead\n" + " of the normal test suite.\n" + " Stress test options are as follows:\n" + " OPT Description\n" + " --- -----------------\n" + " -s1 - Single-MAD response SA queries .\n" + " -s2 - Multi-MAD (RMPP) response SA queries.\n" + " -s3 - Multi-MAD (RMPP) Path Record SA queries.\n" + " Without -s, stress testing is not performed.\n\n" ); + printf( "-M\n" + "--Multicast_Mode\n" + " This option specify length of Multicast test :\n" + " OPT Description\n" + " --- -----------------\n" + " -M1 - Short Multicast Flow (default) - single mode.\n" + " -M2 - Short Multicast Flow - multiple mode.\n" + " -M3 - Long Multicast Flow - single mode.\n" + " -M4 - Long Multicast Flow - multiple mode.\n" + " Single mode - Osmtest is tested alone , with no other \n" + " apps that interact vs. OpenSM MC.\n" + " Multiple mode - Could be run with other apps using MC vs.\n" + " OpenSM." + " Without -M, default flow testing is performed.\n\n" ); + + printf( "-t \n" + " This option specifies the time in milliseconds\n" + " used for transaction timeouts.\n" + " Specifying -t 0 disables timeouts.\n" + " Without -t, osmtest defaults to a timeout value of\n" + " 1 second.\n\n" ); + printf( "-l\n" + "--log_file\n" + " This option defines the log to be the given file.\n" + " By default the log goes to stdout.\n\n"); + printf( "-v\n" + " This option increases the log verbosity level.\n" + " The -v option may be specified multiple times\n" + " to further increase the verbosity level.\n" + " See the -vf option for more information about.\n" + " log verbosity.\n\n" ); + printf( "-V\n" + " This option sets the maximum verbosity level and\n" + " forces log flushing.\n" + " The -V is equivalent to '-vf 0xFF -d 2'.\n" + " See the -vf option for more information about.\n" + " log verbosity.\n\n" ); + printf( "-vf \n" + " This option sets the log verbosity level.\n" + " A flags field must follow the -vf option.\n" + " A bit set/clear in the flags enables/disables a\n" + " specific log level as follows:\n" + " BIT LOG LEVEL ENABLED\n" + " ---- -----------------\n" + " 0x01 - ERROR (error messages)\n" + " 0x02 - INFO (basic messages, low volume)\n" + " 0x04 - VERBOSE (interesting stuff, moderate volume)\n" + " 0x08 - DEBUG (diagnostic, high volume)\n" + " 0x10 - FUNCS (function entry/exit, very high volume)\n" + " 0x20 - FRAMES (dumps all SMP and GMP frames)\n" + " 0x40 - currently unused.\n" + " 0x80 - currently unused.\n" + " Without -vf, osmtest defaults to ERROR + INFO (0x3).\n" + " Specifying -vf 0 disables all messages.\n" + " Specifying -vf 0xFF enables all messages (see -V).\n" + " High verbosity levels may require increasing\n" + " the transaction timeout with the -t option.\n\n" ); +} + +/********************************************************************** + **********************************************************************/ +void show_menu(void); + +void +show_menu( ) +{ + printf( "\n------- Interactive Menu -------\n" ); + printf( "X - Exit.\n\n" ); +} +void OsmReportState(IN const char *p_str) +{ +} + +/********************************************************************** + **********************************************************************/ +int OSM_CDECL +main( int argc, + char *argv[] ) +{ + static osmtest_t osm_test; + osmtest_opt_t opt = { 0 }; + ib_net64_t guid = 0; + int max_lid = 100; + ib_api_status_t status; + uint32_t log_flags = OSM_LOG_ERROR | OSM_LOG_INFO; + char flow_name[64]; + boolean_t mem_track = FALSE; + uint32_t next_option; + const char *const short_option = "f:l:m:M:d:g:s:t:i:cvVh"; + + /* + * In the array below, the 2nd parameter specified the number + * of arguments as follows: + * 0: no arguments + * 1: argument + * 2: optional + */ + const struct option long_option[] = { + {"create", 0, NULL, 'c'}, + {"debug", 1, NULL, 'd'}, + {"flow", 1, NULL, 'f'}, + {"wait", 1, NULL, 'w'}, + {"inventory", 1, NULL, 'i'}, + {"max_lid", 1, NULL, 'm'}, + {"guid", 1, NULL, 'g'}, + {"help", 0, NULL, 'h'}, + {"stress", 1, NULL, 's'}, + {"Multicast_Mode", 1, NULL, 'M'}, + {"timeout", 1, NULL, 't'}, + {"verbose", 0, NULL, 'v'}, + {"log_file", 1, NULL, 'l'}, + {"vf", 1, NULL, 'x'}, + {"V", 0, NULL, 'V'}, + + {NULL, 0, NULL, 0} /* Required at end of array */ + }; + + opt.transaction_timeout = OSMT_DEFAULT_TRANS_TIMEOUT_MILLISEC; + opt.wait_time = OSMT_DEFAULT_TRAP_WAIT_TIMEOUT_SEC; + opt.retry_count = OSMT_DEFAULT_RETRY_COUNT; + opt.force_log_flush = FALSE; + opt.stress = 0; + opt.log_file = NULL; + opt.create = FALSE; + opt.mmode = 1; + opt.ignore_path_records = FALSE; /* Do path Records too. */ + opt.flow = 0; /* run all validation tests */ + strcpy(flow_name, "All Validations"); + strcpy( opt.file_name, "osmtest.dat" ); + + printf( "\nCommand Line Arguments\n" ); + do + { + next_option = getopt_long_only( argc, argv, short_option, + long_option, NULL ); + switch ( next_option ) + { + case 'c': + /* + * Create the inventory file. + */ + opt.create = TRUE; + printf( "\tCreating inventory file\n" ); + break; + + case 'i': + /* + * Specifies inventory file name. + */ + if( strlen( optarg ) > OSMTEST_FILE_PATH_MAX ) + printf( "\nError: path name too long (ignored).\n" ); + else + strcpy( opt.file_name, optarg ); + + printf( "\tFile = %s\n", opt.file_name ); + break; + + case 'f': + /* + * Specifies Flow . + */ + if( strlen( optarg ) > OSMTEST_FILE_PATH_MAX ) + printf( "\nError: path name too long (ignored).\n" ); + else + strcpy( flow_name, optarg ); + + if (!strcmp("c",optarg)) { + strcpy(flow_name, "Create Inventory"); + opt.flow = 1; + } else if (!strcmp("v",optarg)) { + strcpy(flow_name, "Validate Inventory"); + opt.flow = 2; + } else if (!strcmp("s",optarg)) { + strcpy(flow_name, "Services Registration"); + opt.flow = 3; + } else if (!strcmp("e",optarg)) { + strcpy(flow_name, "Event Forwarding"); + opt.flow = 4; + } else if (!strcmp("f",optarg)) { + strcpy(flow_name, "Stress SA"); + opt.flow = 5; + } else if (!strcmp("m",optarg)) { + strcpy(flow_name, "Multicast"); + opt.flow = 6; + } else if (!strcmp("q",optarg)) { + strcpy(flow_name, "QoS: VLArb and SLtoVL"); + opt.flow = 7; + } else if (!strcmp("t", optarg)) { + strcpy(flow_name, "Trap 64/65"); + opt.flow = 8; + } else if (!strcmp("a",optarg)) { + strcpy(flow_name, "All Validations"); + opt.flow = 0; + } else { + printf( "\nError: un-known flow %s.\n",flow_name); + exit(2); + } + break; + + case 'w': + /* + * Specifies trap 64/65 wait time + */ + CL_ASSERT( strtol( optarg, NULL, 0 ) < 0x100 ); + opt.wait_time = (uint8_t)strtol( optarg, NULL, 0 ); + printf( "\tTrap 64/65 wait time = %d\n", opt.wait_time ); + break; + + case 'm': + /* + * Specifies the max LID to search for during exploration. + */ + max_lid = atoi( optarg ); + printf( "\tMAX-LID %u\n", max_lid ); + break; + + case 'g': + /* + * Specifies port guid with which to bind. + */ + guid = cl_hton64( strtoull( optarg, NULL, 16 )); + printf( "\tGUID 0x%016" PRIx64 "\n", guid ); + break; + case 't': + /* + * Specifies transaction timeout. + */ + opt.transaction_timeout = strtol( optarg, NULL, 0 ); + printf( "\tTransaction timeout = %d\n", opt.transaction_timeout ); + break; + + case 'l': + opt.log_file = optarg; + printf("\tLog File:%s\n", opt.log_file ); + break; + + case 'v': + /* + * Increases log verbosity. + */ + log_flags = ( log_flags << 1 ) | 1; + printf( "\tVerbose option -v (log flags = 0x%X)\n", log_flags ); + break; + + case 'V': + /* + * Specifies maximum log verbosity. + */ + log_flags = 0xFFFFFFFF; + opt.force_log_flush = TRUE; + printf( "\tEnabling maximum log verbosity\n" ); + break; + + case 's': + /* + * Perform stress test. + */ + opt.stress = strtol( optarg, NULL, 0 ); + printf( "\tStress test enabled: " ); + switch ( opt.stress ) + { + case 1: + printf( "Small SA queries\n" ); + break; + case 2: + printf( "Large SA queries\n" ); + break; + case 3: + printf( "Large Path Record SA queries\n" ); + break; + default: + printf( "Unknown value %u (ignored)\n", opt.stress ); + opt.stress = 0; + break; + } + break; + + case 'M': + /* + * Perform stress test. + */ + opt.mmode = strtol( optarg, NULL, 0 ); + printf( "\tMulticast test enabled: " ); + switch ( opt.mmode ) + { + case 1: + printf( "Short MC Flow - single mode (default)\n" ); + break; + case 2: + printf( "Short MC Flow - multiple mode\n" ); + break; + case 3: + printf( "Long MC Flow - single mode\n" ); + break; + case 4: + printf( "Long MC Flow - multiple mode\n" ); + break; + default: + printf( "Unknown value %u (ignored)\n", opt.stress ); + opt.mmode = 0; + break; + } + break; + + case 'd': + /* + * Debug Options + */ + printf( "\tDebug Option: " ); + switch ( strtol( optarg, NULL, 0 ) ) + { + case 1: + printf( "Ignore Path Records.\n" ); + opt.ignore_path_records = TRUE; + break; + case 2: + printf( "Force Log Flush.\n" ); + opt.force_log_flush = TRUE; + break; + case 3: + printf( "Use Mem Tracking.\n" ); + mem_track = TRUE; + break; + default: + printf( "Unknown value %ld (ignored)\n", strtol( optarg, NULL, 0 ) ); + break; + } + break; + + case 'h': + show_usage( ); + return 0; + + case 'x': + log_flags = strtol( optarg, NULL, 0 ); + printf( "\t\t\t\tVerbose option -vf (log flags = 0x%X)\n", + log_flags ); + break; + + case -1: + printf( "Done with args\n" ); + break; + + default: /* something wrong */ + abort( ); + } + + } + while( next_option != -1 ); + + printf( "\tFlow = %s\n", flow_name ); + + if (mem_track) __cl_mem_track(TRUE); + + + status = osmtest_init( &osm_test, &opt, ( osm_log_level_t ) log_flags ); + if( status != IB_SUCCESS ) + { + printf( "\nError from osmtest_init: %s.\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Guid may be zero going into this function if the user + * hasn't specified a binding port on the command line. + */ + status = osmtest_bind( &osm_test, (uint16_t)max_lid, guid ); + if (status != IB_SUCCESS) exit(status); + + status = osmtest_run( &osm_test ); + if (status != IB_SUCCESS) { + printf("OSMTEST: TEST \"%s\" FAIL\n", flow_name); + } else { + printf("OSMTEST: TEST \"%s\" PASS\n", flow_name); + } + osmtest_destroy( &osm_test ); + + if (mem_track) cl_mem_display(); + + Exit: + return ( status ); +} diff --git a/branches/Ndi/ulp/opensm/user/osmtest/osmt_inform.c b/branches/Ndi/ulp/opensm/user/osmtest/osmt_inform.c new file mode 100644 index 00000000..94c74df4 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/osmt_inform.c @@ -0,0 +1,961 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifdef OSM_VENDOR_INTF_MTL +/* + * Abstract: + * Implementation of InformInfo testing flow.. + * Top level is osmt_run_inform_info_flow: + * osmt_bind_inform_qp + * osmt_reg_unreg_inform_info + * osmt_send_trap_wait_for_forward + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + +#include +#include +#include +#include +#include +#include +#include "osmtest.h" +#include "osmt_inform.h" + +/* + * Prepare an asynchronous QP (rcv) for sending inform info and + * handling the incoming reports. + * + */ +ib_api_status_t +osmt_bind_inform_qp( IN osmtest_t * const p_osmt, + OUT osmt_qp_ctx_t *p_qp_ctx) { + ib_net64_t port_guid; + VAPI_hca_hndl_t hca_hndl; + VAPI_hca_id_t hca_id; + uint32_t port_num; + VAPI_ret_t vapi_ret; + IB_MGT_ret_t mgt_ret; + uint8_t hca_index; + osm_log_t *p_log = &p_osmt->log; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osmt_bind_inform_qp ); + + port_guid = p_osmt->local_port.port_guid; + + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_bind_inform_qp: " + "Binding to port 0x%" PRIx64 "\n", cl_ntoh64( port_guid ) ); + + /* obtain the hca name and port num from the guid */ + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_bind_inform_qp: " + "Finding CA and Port that owns port guid 0x%" PRIx64 "\n", + port_guid ); + + mgt_ret = + osm_vendor_get_guid_ca_and_port( + p_osmt->p_vendor, + port_guid, + &hca_hndl, + &hca_id[0], + &hca_index, + &port_num ); + if( mgt_ret != IB_MGT_OK ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_bind_inform_qp: ERR 0109: " + "Unable to obtain CA and port (%d).\n" ); + status = IB_ERROR; + goto Exit; + } + +#define OSMT_MTL_REVERSE_QP1_WELL_KNOWN_Q_KEY 0x80010000 + + strncpy(p_qp_ctx->qp_bind_hndl.hca_id, hca_id, sizeof(hca_id)); + p_qp_ctx->qp_bind_hndl.hca_hndl = hca_hndl; + p_qp_ctx->qp_bind_hndl.port_num = port_num; + p_qp_ctx->qp_bind_hndl.max_outs_sq = 10; + p_qp_ctx->qp_bind_hndl.max_outs_rq = 10; + p_qp_ctx->qp_bind_hndl.qkey = OSMT_MTL_REVERSE_QP1_WELL_KNOWN_Q_KEY; + + vapi_ret = osmt_mtl_init_opened_hca(&p_qp_ctx->qp_bind_hndl); + if (vapi_ret != VAPI_OK) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_bind_inform_qp: ERR 0114: " + "Error initializing QP.\n" ); + status = IB_ERROR; + goto Exit; + } + + /* we use the pre-allocated buffers for send and receive : + send from buf[0] + receive from buf[2] + */ + p_qp_ctx->p_send_buf = (uint8_t *)p_qp_ctx->qp_bind_hndl.buf_ptr + GRH_LEN; + p_qp_ctx->p_recv_buf = (uint8_t *)p_qp_ctx->qp_bind_hndl.buf_ptr + 2 * (GRH_LEN + MAD_BLOCK_SIZE); + + /* Need to clear assigned memory of p_send_buf - before using it to send any data */ + memset(p_qp_ctx->p_send_buf, 0, MAD_BLOCK_SIZE); + + status = IB_SUCCESS; + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_bind_inform_qp: " + "Initialized QP:0x%X in VAPI Mode\n", + p_qp_ctx->qp_bind_hndl.qp_id + ); + + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_bind_inform_qp: " "Binding to IB_MGT SMI\n" ); + + /* we also need a QP0 handle for sending packets */ + mgt_ret = IB_MGT_get_handle( hca_id, port_num, IB_MGT_SMI, + &( p_qp_ctx->ib_mgt_qp0_handle ) ); + if( IB_MGT_OK != mgt_ret ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_bind_inform_qp: ERR 0115: " + "Error obtaining IB_MGT handle to SMI\n" ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return status; +} + +/* + * Close the QP + */ +void +osmt_unbind_inform_qp( IN osmtest_t * const p_osmt, + IN osmt_qp_ctx_t *p_qp_ctx) { + osm_log_t *p_log = &p_osmt->log; + + OSM_LOG_ENTER( p_log, osmt_unbind_inform_qp ); + + osmt_mtl_mad_cleanup(&p_qp_ctx->qp_bind_hndl); + + IB_MGT_release_handle(p_qp_ctx->ib_mgt_qp0_handle); + + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_unbind_inform_qp: " + "Unbind QP handles\n" ); + OSM_LOG_EXIT( &p_osmt->log ); +} + +/* + * Register/Unregister to receive the given InformInfo + * + * Uses the qp context to send the inform info mad. + * Wait for GetResp(InformInfoResp) + * + */ +ib_api_status_t +osmt_reg_unreg_inform_info( IN osmtest_t *p_osmt, + IN osmt_qp_ctx_t *p_qp_ctx, + IN ib_inform_info_t *p_inform_info, + IN uint8_t reg_flag + ) +{ + ib_sa_mad_t *p_sa_mad = (ib_sa_mad_t *)(p_qp_ctx->p_send_buf); + ib_inform_info_t *p_ii = ib_sa_mad_get_payload_ptr(p_sa_mad); /* SA Payload */ + VAPI_ret_t vapi_ret; + VAPI_wc_desc_t wc_desc; + VAPI_ud_av_hndl_t avh; + static VAPI_wr_id_t wrid = 16198; + osm_log_t *p_log = &p_osmt->log; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmt_reg_unreg_inform_info ); + + /* init the MAD */ + ib_mad_init_new( (ib_mad_t*)p_sa_mad, + IB_MCLASS_SUBN_ADM, + ( uint8_t ) 2, + IB_MAD_METHOD_SET, + cl_hton64( wrid ), + ( ib_net16_t ) 0, + 0 ); + wrid++; + p_sa_mad->attr_id = IB_MAD_ATTR_INFORM_INFO; + + /* copy the reference inform info */ + memcpy(p_ii, p_inform_info, sizeof(ib_inform_info_t)); + + if (reg_flag) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_reg_unreg_inform_info: " + "Subscribing InformInfo: Traps from lid:0x%X to 0x%X, trap num :0x%X\n", + p_ii->lid_range_begin,p_ii->lid_range_end,p_ii->g_or_v.generic.trap_num); + } + else + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_reg_unreg_inform_info: " + "UnSubscribing InformInfo: Traps from lid:0x%X to 0x%X\n", + p_ii->lid_range_begin, p_ii->lid_range_end); + } + + /* set the subscribe bit */ + if (reg_flag) + { + p_ii->subscribe = 1; + } + else + { + p_ii->subscribe = 0; + /* + * we need to set the QPN on the mad if we unsubscribe: + * o13-2.1.1 - QPN Field need to be set when unsubscribing. + */ + ib_inform_info_set_qpn(p_ii, + cl_hton32(p_qp_ctx->qp_bind_hndl.qp_id.qp_num)); + } + + osm_dump_inform_info( &p_osmt->log, + p_ii, + OSM_LOG_DEBUG ); + + /* --------------------- PREP ------------------------- */ + if (osmt_mtl_mad_post_recv_bufs(&p_qp_ctx->qp_bind_hndl, + p_qp_ctx->p_recv_buf, + 1, /* but we need only one mad at a time */ + GRH_LEN + MAD_BLOCK_SIZE, + wrid) != 1) { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: ERR 0120: " + "Error posting recv bufs\n"); + status = IB_ERROR; + goto Exit; + } + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_reg_unreg_inform_info: " + "Posted recv bufs\n"); + + vapi_ret = osmt_mtl_create_av(&p_qp_ctx->qp_bind_hndl, p_osmt->local_port.sm_lid, &avh); + if (vapi_ret != VAPI_OK) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: ERR 0121: " + "Error Preparing AVH (%s)\n", + VAPI_strerror_sym(vapi_ret) ); + status = IB_ERROR; + goto Exit; + } + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_reg_unreg_inform_info: " + "Prepared AVH\n"); + + if( osm_log_is_active( p_log, OSM_LOG_DEBUG ) ) + { + osm_dump_sa_mad( p_log, (ib_sa_mad_t *)(p_qp_ctx->p_send_buf), OSM_LOG_DEBUG); +#if 0 + for (i = 56; i < 253; i++) { + if ( i % 8 == 0 ) { printf("\n %d : ", i); } + printf("0x%02X ", p_qp_ctx->p_send_buf[i]); + } +#endif + printf("\n"); + } + + /* --------------------- SEND ------------------------- */ + vapi_ret = osmt_mtl_mad_send(&p_qp_ctx->qp_bind_hndl, + wrid, + p_qp_ctx->p_send_buf, + 1, /* SA is QP1 */ + 0, /* SL is 0 */ + OSMT_MTL_REVERSE_QP1_WELL_KNOWN_Q_KEY, + avh + ); + if (vapi_ret != VAPI_OK) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: ERR 0122: " + "Error sending mad (%s)\n", + VAPI_strerror_sym(vapi_ret) ); + status = IB_ERROR; + goto Exit; + } + + vapi_ret = osmt_mtl_mad_poll4cqe(p_qp_ctx->qp_bind_hndl.hca_hndl, + p_qp_ctx->qp_bind_hndl.sq_cq_hndl, + &wc_desc, + 20, + 10000, + NULL); + if (vapi_ret != VAPI_OK) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: ERR 0123: " + "Error getting send completion (%s)\n", + VAPI_strerror_sym(vapi_ret) ); + status = IB_ERROR; + goto Exit; + } + + if (wc_desc.status != VAPI_SUCCESS) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: ERR 0124: " + "Error on send completion (%s) (%d)\n", + VAPI_strerror_sym(wc_desc.status), wc_desc.status ); + status = IB_ERROR; + goto Exit; + } + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_reg_unreg_inform_info: " + "Sent MAD\n"); + + /* --------------------- RECV ------------------------- */ + vapi_ret = osmt_mtl_mad_poll4cqe(p_qp_ctx->qp_bind_hndl.hca_hndl, + p_qp_ctx->qp_bind_hndl.rq_cq_hndl, + &wc_desc, + 20, + 10000, + &avh); + if (vapi_ret != VAPI_SUCCESS) + { + if (vapi_ret == VAPI_CQ_EMPTY) + { + status = IB_TIMEOUT; + } + else + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: ERR 0125: " + "Error receiving mad (%s)\n", + VAPI_strerror_sym(vapi_ret) ); + status = IB_ERROR; + } + goto Exit; + } + + /* check to see if successful - by examination of the subscribe bit */ + p_sa_mad = (ib_sa_mad_t *)(p_qp_ctx->p_recv_buf + GRH_LEN); + + if (p_sa_mad->status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: " + "Remote error = %s\n", + ib_get_mad_status_str( (ib_mad_t *)p_sa_mad )); + status = IB_REMOTE_ERROR; + goto Exit; + } + + if (p_sa_mad->method != IB_MAD_METHOD_GET_RESP) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: " + "Expected IB_MAD_METHOD_GET_RESP but got:(%X)\n", + p_sa_mad->method); + status = IB_REMOTE_ERROR; + goto Exit; + } + + if (p_sa_mad->attr_id != IB_MAD_ATTR_INFORM_INFO) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: " + "Expected IB_MAD_ATTR_INFORM_INFO but got:(%X)\n", + cl_ntoh16(p_sa_mad->attr_id)); + status = IB_REMOTE_ERROR; + goto Exit; + } + + p_ii = ib_sa_mad_get_payload_ptr(p_sa_mad); + if (!p_ii->subscribe) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_reg_unreg_inform_info: ERR 0126: " + "Subscribe/Unsubscribe Failed\n"); + status = IB_REMOTE_ERROR; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/* + * Send a trap (Subn LID Route) Trap(Notice) through the regular + * connection QP connection (targeted at QP0) + * + * Wait for the trap repress + */ +ib_api_status_t +osmt_send_trap_wait_for_forward( IN osmtest_t * const p_osmt, + IN osmt_qp_ctx_t *p_qp_ctx + ) +{ + ib_smp_t *p_smp = (ib_smp_t *)(p_qp_ctx->p_send_buf); + ib_mad_notice_attr_t *p_ntc = ib_smp_get_payload_ptr(p_smp); + ib_sa_mad_t * p_sa_mad; + IB_MGT_ret_t mgt_res; + VAPI_ret_t vapi_ret; + VAPI_wc_desc_t wc_desc; + VAPI_ud_av_hndl_t avh; + IB_ud_av_t av; + static VAPI_wr_id_t wrid = 2222; + osm_log_t *p_log = &p_osmt->log; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osmt_send_trap_wait_for_forward ); + + osm_log( p_log, OSM_LOG_INFO, + "osmt_send_trap_wait_for_forward: " + "Sending Traps to QP0 of SA LID:0x%X\n", + p_osmt->local_port.sm_lid); + + /* init the MAD */ + memset(p_smp, 0, sizeof(ib_smp_t)); + ib_mad_init_new( (ib_mad_t*)p_smp, + IB_MCLASS_SUBN_LID, + ( uint8_t ) 2, + IB_MAD_METHOD_TRAP, + cl_hton64( wrid), + ( ib_net16_t ) 0, + 0 ); + + wrid++; + p_smp->attr_id = IB_MAD_ATTR_NOTICE; + + /* prepare the notice */ + p_ntc->generic_type = 0x82;/* generic, type = 2 */ + ib_notice_set_prod_type_ho(p_ntc, 1); + p_ntc->g_or_v.generic.trap_num = cl_hton16(0x26); + p_ntc->issuer_lid = cl_hton16(2); + + /* --------------------- PREP ------------------------- */ + if (osmt_mtl_mad_post_recv_bufs(&p_qp_ctx->qp_bind_hndl, + p_qp_ctx->p_recv_buf, + 1, /* we need to receive both trap repress and report */ + GRH_LEN + MAD_BLOCK_SIZE, + wrid) != 1) { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_send_trap_wait_for_forward: ERR 0127: " + "Error posting recv bufs\n"); + status = IB_ERROR; + goto Exit; + } + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_send_trap_wait_for_forward: " + "Posted recv bufs\n"); + + av.dlid = p_osmt->local_port.sm_lid; + av.grh_flag = FALSE; + + /* EZ: returned in HACK: use constants */ + av.static_rate = 0; /* p_mad_addr->static_rate; */ + av.src_path_bits = 1 ; /* p_mad_addr->path_bits; */ + av.sl = 0 ; /* p_mad_addr->addr_type.gsi.service_level; */ + + if( osm_log_is_active( p_log, OSM_LOG_DEBUG ) ) + { + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_send_trap_wait_for_forward: " + "av.dlid 0x%X, " + "av.static_rate %d, " + "av.path_bits %d\n", + cl_ntoh16( av.dlid ), av.static_rate, av.src_path_bits ); + } + + /* send it */ + mgt_res = IB_MGT_send_mad( p_qp_ctx->ib_mgt_qp0_handle, p_smp, /* actual payload */ + &av, /* address vector */ + wrid, /* casting the mad wrapper pointer for err cb */ + p_osmt->opt.transaction_timeout ); + if( mgt_res != IB_MGT_OK ) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_send_trap_wait_for_forward: ERR 0128: " + "Error sending mad (%d)\n", mgt_res ); + status = IB_ERROR; + goto Exit; + } + + vapi_ret = osmt_mtl_create_av(&p_qp_ctx->qp_bind_hndl, p_osmt->local_port.sm_lid, &avh); + if (vapi_ret != VAPI_OK) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_send_trap_wait_for_forward: ERR 0129: " + "Error Preparing AVH (%s)\n", + VAPI_strerror_sym(vapi_ret) ); + status = IB_ERROR; + goto Exit; + } + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_send_trap_wait_for_forward: " + "Prepared AVH\n"); + + osm_log( p_log, OSM_LOG_DEBUG, + "osmt_send_trap_wait_for_forward: " + "Trap MAD Sent\n"); + + /* --------------------- RECV ------------------------- */ + vapi_ret = osmt_mtl_mad_poll4cqe(p_qp_ctx->qp_bind_hndl.hca_hndl, + p_qp_ctx->qp_bind_hndl.rq_cq_hndl, + &wc_desc, + 200, + 10000, + &avh); + if (vapi_ret != VAPI_SUCCESS) + { + if (vapi_ret == VAPI_CQ_EMPTY) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_send_trap_wait_for_forward: ERR 0130: " + "Timeout receiving mad (%s)\n", + VAPI_strerror_sym(vapi_ret) ); + status = IB_TIMEOUT; + } + else + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_send_trap_wait_for_forward: ERR 0131: " + "Error receiving mad (%s)\n", + VAPI_strerror_sym(vapi_ret) ); + status = IB_ERROR; + } + goto Exit; + } + + /* check to see if successful - by examination of the subscribe bit */ + p_sa_mad = (ib_sa_mad_t *)(p_qp_ctx->p_recv_buf + GRH_LEN); + + if (p_sa_mad->method == IB_MAD_METHOD_REPORT) + { + if (p_sa_mad->attr_id == IB_MAD_ATTR_NOTICE) + { + osm_log( p_log, OSM_LOG_INFO, + "osmt_send_trap_wait_for_forward: " + "Received the Report!\n"); + status = IB_SUCCESS; + } + else + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_send_trap_wait_for_forward: ERR 1020" + "Did not receive a Report(Notice) but attr:%d\n", + cl_ntoh16(p_sa_mad->attr_id) + ); + status = IB_ERROR; + } + } + else + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_send_trap_wait_for_forward: ERR 1020" + "Received an Unexpected Method:%d\n", + p_smp->method + ); + status = IB_ERROR; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return status; +} + +/* + * Wait for a trap on QPn + * + */ +ib_api_status_t +osmt_trap_wait( IN osmtest_t * const p_osmt, + IN osmt_qp_ctx_t *p_qp_ctx + ) +{ + ib_smp_t *p_smp = (ib_smp_t *)(p_qp_ctx->p_send_buf); + ib_sa_mad_t * p_sa_mad; + VAPI_ret_t vapi_ret; + VAPI_wc_desc_t wc_desc; + osm_log_t *p_log = &p_osmt->log; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( p_log, osmt_trap_wait ); + + osm_log( p_log, OSM_LOG_INFO, + "osmt_trap_wait: " + "Waiting for Traps under QP:0x%X of SA LID:0x%X\n", + cl_ntoh16(p_osmt->local_port.sm_lid)); + + /* --------------------- RECV ------------------------- */ + vapi_ret = osmt_mtl_mad_poll4cqe(p_qp_ctx->qp_bind_hndl.hca_hndl, + p_qp_ctx->qp_bind_hndl.rq_cq_hndl, + &wc_desc, + // 200, + p_osmt->opt.wait_time * 100, + 10000, + NULL); + if (vapi_ret != VAPI_SUCCESS) + { + if (vapi_ret == VAPI_CQ_EMPTY) + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_trap_wait: ERR 0130: " + "Timeout receiving mad (%s)\n", + VAPI_strerror_sym(vapi_ret) ); + status = IB_TIMEOUT; + } + else + { + osm_log( p_log, OSM_LOG_ERROR, + "osmt_trap_wait: ERR 0131: " + "Error receiving mad (%s)\n", + VAPI_strerror_sym(vapi_ret) ); + status = IB_ERROR; + } + goto Exit; + } + + /* check to see if successful - by examination of the subscribe bit */ + p_sa_mad = (ib_sa_mad_t *)(p_qp_ctx->p_recv_buf + GRH_LEN); + + if (p_sa_mad->method == IB_MAD_METHOD_REPORT) + { + if (p_sa_mad->attr_id == IB_MAD_ATTR_NOTICE) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_trap_wait: " + "Received the Report!\n"); + status = IB_SUCCESS; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_trap_wait: ERR 1020" + "Did not receive a Report(Notice) but attr:%d\n", + cl_ntoh16(p_sa_mad->attr_id) + ); + status = IB_ERROR; + } + } + else + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_trap_wait: ERR 1020" + "Received an Unexpected Method:%d\n", + p_smp->method + ); + status = IB_ERROR; + } + + Exit: + OSM_LOG_EXIT( p_log ); + return status; +} + +/* + * Initialize an inform info attribute: + * Catch all traps in the lid range of the p_osmt + * + */ +ib_api_status_t +osmt_init_inform_info(IN osmtest_t * const p_osmt, + OUT ib_inform_info_t* p_ii) { + + memset(p_ii, 0, sizeof(ib_inform_info_t)); + /* p_ii->lid_range_begin = cl_hton16(1); */ + p_ii->lid_range_begin = 0xFFFF; + p_ii->lid_range_end = cl_hton16(p_osmt->max_lid); + p_ii->is_generic = 1; /* have to choose */ + p_ii->trap_type = 0xFFFF; /* ALL */ + p_ii->g_or_v.generic.trap_num = 0xFFFF; /* ALL */ + p_ii->g_or_v.generic.node_type_lsb = 0xFFFF; /* ALL */ + p_ii->g_or_v.generic.node_type_msb = 0xFF; /* ALL */ + return IB_SUCCESS; +} + +ib_api_status_t +osmt_init_inform_info_by_trap (IN osmtest_t * const p_osmt, + IN ib_net16_t trap_num, + OUT ib_inform_info_t* p_ii) { + + memset(p_ii, 0, sizeof(ib_inform_info_t)); + /* p_ii->lid_range_begin = cl_hton16(1); */ + p_ii->lid_range_begin = 0xFFFF; + p_ii->lid_range_end = cl_hton16(p_osmt->max_lid); + p_ii->is_generic = 1; /* have to choose */ + p_ii->trap_type = 0xFFFF; /* ALL */ + p_ii->g_or_v.generic.trap_num = trap_num; /* ALL */ + p_ii->g_or_v.generic.node_type_lsb = 0xFFFF; /* ALL */ + p_ii->g_or_v.generic.node_type_msb = 0xFF; /* ALL */ + return IB_SUCCESS; +} + +/* + * Run a complete inform info test flow: + * - try to unregister inform info (should fail) + * - register an inform info + * - try to unregister inform info (should succeed) + * - register an inform info + * - send a trap - sleep + * - check that a Report(Notice) arrived that match the sent one + * + */ +ib_api_status_t +osmt_run_inform_info_flow( IN osmtest_t * const p_osmt ) { + ib_inform_info_t inform_info; + ib_api_status_t status; + osmt_qp_ctx_t qp_ctx; + + OSM_LOG_ENTER( &p_osmt->log, osmt_run_inform_info_flow); + + /* bind the QP */ + status = osmt_bind_inform_qp( p_osmt, &qp_ctx); + if (status != IB_SUCCESS) + { + goto Exit; + } + + /* init the inform info */ + osmt_init_inform_info(p_osmt, &inform_info); + + /* first try to unsubscribe */ + status = osmt_reg_unreg_inform_info(p_osmt, + &qp_ctx, + &inform_info, + 0); + /* WAS IB_REMOTE_ERROR */ + if (status != IB_REMOTE_ERROR) + { + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_inform_info_flow:" + "Error during UnSubscribe: (%s)\n", + ib_get_err_str( status )); + goto Exit; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_inform_info_flow:" + "Expected Failure to UnSubscribe non existing InformInfo\n"); + status = IB_ERROR; + goto Exit; + } + } + + /* send the inform info registration */ + status = osmt_reg_unreg_inform_info(p_osmt, + &qp_ctx, + &inform_info, + 1); + if (status != IB_SUCCESS) + { + goto Exit; + } + + /* send a trap through QP0 and wait on QPN */ + status = osmt_send_trap_wait_for_forward(p_osmt, &qp_ctx); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_inform_info_flow:" + "Error during Send Trap and Wait For Report: (%s)\n", + ib_get_err_str( status )); + goto Exit; + } + + /* try to unsubscribe for cleanup */ + status = osmt_reg_unreg_inform_info(p_osmt, + &qp_ctx, + &inform_info, + 0); + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_inform_info_flow:" + "Error during UnSubscribe: (%s)\n", + ib_get_err_str( status )); + goto Exit; + } + else + { + if (status == IB_REMOTE_ERROR) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_inform_info_flow:" + "Remote Error during UnSubscribe\n"); + status = IB_ERROR; + goto Exit; + } + } + + Exit: + osmt_unbind_inform_qp( p_osmt, &qp_ctx); + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/* + * Run a complete inform info test flow: + * - try to unregister inform info (should fail) + * - register an inform info + * - try to unregister inform info (should succeed) + * - register an inform info + * - send a trap - sleep + * - check that a Report(Notice) arrived that match the sent one + * + */ +ib_api_status_t +osmt_run_trap64_65_flow( IN osmtest_t * const p_osmt) { + ib_inform_info_t inform_info; + ib_api_status_t status; + osmt_qp_ctx_t qp_ctx; + + OSM_LOG_ENTER( &p_osmt->log, osmt_run_trap64_65_flow); + + /* bind the QP */ + status = osmt_bind_inform_qp( p_osmt, &qp_ctx); + if (status != IB_SUCCESS) + { + goto Exit; + } + + /* init the inform info */ + osmt_init_inform_info_by_trap(p_osmt, + cl_hton16(64), + &inform_info); + + /* send the inform info registration */ + status = osmt_reg_unreg_inform_info(p_osmt, + &qp_ctx, + &inform_info, + 1); + if (status != IB_SUCCESS) + { + goto Exit; + } + + /*--------------------- PREP -------------------------*/ + if (osmt_mtl_mad_post_recv_bufs( + &qp_ctx.qp_bind_hndl, + qp_ctx.p_recv_buf, + 1, /* we need to receive the report */ + GRH_LEN + MAD_BLOCK_SIZE, + 1) != 1) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_trap64_65_flow: ERR 0127: " + "Error posting recv bufs for trap 64\n"); + status = IB_ERROR; + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmt_run_trap64_65_flow: " + "Posted recv bufs for trap 64\n"); + + /* init the inform info */ + osmt_init_inform_info_by_trap(p_osmt, + cl_hton16(65), + &inform_info); + + /* send the inform info registration */ + status = osmt_reg_unreg_inform_info(p_osmt, + &qp_ctx, + &inform_info, + 1); + if (status != IB_SUCCESS) + { + goto Exit; + } + + /*--------------------- PREP -------------------------*/ + if (osmt_mtl_mad_post_recv_bufs( + &qp_ctx.qp_bind_hndl, + qp_ctx.p_recv_buf, + 1, /* we need to reveive the report */ + GRH_LEN + MAD_BLOCK_SIZE, + 1) != 1) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_trap64_65_flow: ERR 0127: " + "Error posting recv bufs for trap 65\n"); + status = IB_ERROR; + goto Exit; + } + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmt_run_trap64_65_flow: " + "Posted recv bufs for trap 65\n"); + + /* Sleep for x seconds in order to allow external script trap generation */ +#if 0 + sleep (p_osmt->opt.wait_time); +#endif + + /* wait for a trap on QPN */ + status = osmt_trap_wait(p_osmt, &qp_ctx); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_trap64_65_flow:" + "Error during Send Trap and Wait For Report: (%s)\n", + ib_get_err_str( status )); + goto Exit; + } + + /* try to unsubscribe for cleanup */ + status = osmt_reg_unreg_inform_info(p_osmt, + &qp_ctx, + &inform_info, + 0); + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_trap64_65_flow:" + "Error during UnSubscribe: (%s)\n", + ib_get_err_str( status )); + goto Exit; + } + + Exit: + osmt_unbind_inform_qp( p_osmt, &qp_ctx); + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +#endif /* OSM_VENDOR_INTF_MTL */ + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/osmt_mtl_regular_qp.c b/branches/Ndi/ulp/opensm/user/osmtest/osmt_mtl_regular_qp.c new file mode 100644 index 00000000..e1f8c3cc --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/osmt_mtl_regular_qp.c @@ -0,0 +1,447 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifdef OSM_VENDOR_INTF_MTL + +/* - Mellanox Confidential and Proprietary - + * + * Copyright (C) Jul. 2001, Mellanox Technologies Ltd. ALL RIGHTS RESERVED. + * + * Except as specifically permitted herein, no portion of the information, + * including but not limited to object code and source code, may be reproduced, + * modified, distributed, republished or otherwise exploited in any form or by + * any means for any purpose without the prior written permission of Mellanox + * Technologies Ltd. Use of software subject to the terms and conditions + * detailed in the file "LICENSE.txt". + * + * End of legal section ...................................................... + * + * osmt_mtl_regular_qp.c - + * Provide Simple Interface for Sending and Receiving MADS through a regular QP + * + * Creation date: + * + * Version: $Id$ + * + * Authors: + * Eitan Zahavi + * + * Changes: + */ + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +/* + * Initialize the QP etc. + * Given in res: port_num, max_outs_sq, max_outs_rq + */ +VAPI_ret_t osmt_mtl_get_qp_resources(IN OUT osmt_mtl_mad_res_t *res) +{ + VAPI_ret_t ret; + VAPI_hca_port_t hca_port_info; + VAPI_qp_init_attr_t qp_init_attr; + VAPI_qp_prop_t qp_prop; + VAPI_cqe_num_t act_num; + + /* Get HCA LID */ + ret = VAPI_query_hca_port_prop(res->hca_hndl, res->port_num, &hca_port_info); VAPI_CHECK_RET; + res->slid = hca_port_info.lid; + + /* Get a PD */ + ret = VAPI_alloc_pd(res->hca_hndl, &(res->pd_hndl)); VAPI_CHECK_RET; + + /* Create CQ for RQ and SQ*/ /* TBD - Check we have enough act nums */ + ret = VAPI_create_cq(res->hca_hndl, res->max_outs_sq + 1, &(res->sq_cq_hndl), &act_num); + VAPI_CHECK_RET; + ret = VAPI_create_cq(res->hca_hndl, res->max_outs_rq + 1, &(res->rq_cq_hndl), &act_num); + VAPI_CHECK_RET; + + + /* register event handlers for polling(block mode) internal use*/ + /* ret= EVAPI_set_comp_eventh(res->hca_hndl,res->rq_cq_hndl, */ + /* EVAPI_POLL_CQ_UNBLOCK_HANDLER,NULL,&(res->rq_cq_eventh)); */ + /* VAPI_CHECK_RET; */ + /* ret= EVAPI_set_comp_eventh(res->hca_hndl,res->sq_cq_hndl, */ + /* EVAPI_POLL_CQ_UNBLOCK_HANDLER,NULL,&(res->sq_cq_eventh)); */ + /* VAPI_CHECK_RET; */ + + /* Create QP */ + qp_init_attr.cap.max_oust_wr_sq = res->max_outs_sq + 1; + qp_init_attr.cap.max_oust_wr_rq = res->max_outs_rq + 1; + qp_init_attr.cap.max_sg_size_sq = 4; + qp_init_attr.cap.max_sg_size_rq = 4; + + qp_init_attr.pd_hndl = res->pd_hndl; + qp_init_attr.rdd_hndl = 0; + qp_init_attr.rq_cq_hndl = res->rq_cq_hndl; + qp_init_attr.rq_sig_type = VAPI_SIGNAL_ALL_WR; /* That's default for IB */ + qp_init_attr.sq_cq_hndl = res->sq_cq_hndl; + qp_init_attr.sq_sig_type = VAPI_SIGNAL_REQ_WR; + qp_init_attr.ts_type = VAPI_TS_UD; + + ret= VAPI_create_qp(res->hca_hndl,&qp_init_attr,&(res->qp_hndl),&qp_prop); + VAPI_CHECK_RET; + res->qp_id.qp_num= qp_prop.qp_num; + + return(VAPI_OK); +} + +VAPI_ret_t osmt_mtl_qp_init(osmt_mtl_mad_res_t *res) +{ + VAPI_ret_t ret; + + VAPI_qp_attr_t qp_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + + + /* + * Change QP to INIT + * + */ + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + qp_attr.qp_state = VAPI_INIT; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + qp_attr.pkey_ix = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PKEY_IX); + qp_attr.port = res->port_num; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_PORT); + qp_attr.qkey = res->qkey; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QKEY); + + /* If I do not set this mask, I get an error from HH. QPM should catch it */ + ret = VAPI_modify_qp(res->hca_hndl, res->qp_hndl, &qp_attr, &qp_attr_mask, &qp_cap); VAPI_CHECK_RET; + + return(ret); + +} + +VAPI_ret_t osmt_mtl_qp_2_rtr_rts(osmt_mtl_mad_res_t *res) +{ + VAPI_ret_t ret; + + VAPI_qp_attr_t qp_attr; + VAPI_qp_attr_mask_t qp_attr_mask; + VAPI_qp_cap_t qp_cap; + + /* + * Change QP to RTR + * + */ + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + qp_attr.qp_state = VAPI_RTR; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + /* qp_attr.rq_psn = 0; */ + /* QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_RQ_PSN); */ + + ret = VAPI_modify_qp(res->hca_hndl, res->qp_hndl, &qp_attr, &qp_attr_mask, &qp_cap); VAPI_CHECK_RET; + + /* + * Change QP to RTS + * + */ + QP_ATTR_MASK_CLR_ALL(qp_attr_mask); + qp_attr.qp_state = VAPI_RTS; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_QP_STATE); + qp_attr.sq_psn = 0; + QP_ATTR_MASK_SET(qp_attr_mask,QP_ATTR_SQ_PSN); + + ret = VAPI_modify_qp(res->hca_hndl,res->qp_hndl,&qp_attr,&qp_attr_mask,&qp_cap); VAPI_CHECK_RET; + + return(ret); +} + +VAPI_ret_t osmt_mtl_mad_create_mr(osmt_mtl_mad_res_t *res) +{ + + VAPI_ret_t ret; + + VAPI_mrw_t mr_in, mr_out; + + + res->buf_size = (MAD_SIZE + GRH_LEN) * (res->max_outs_sq + res->max_outs_rq + 1); + + /* Register single memory address region for all buffers */ + res->buf_ptr = VMALLOC(res->buf_size); + + if (res->buf_ptr == ((VAPI_virt_addr_t)NULL)) { + ret = VAPI_EAGAIN; + VAPI_CHECK_RET; + } + + /* Enable local and remote access to memory region */ + mr_in.acl = VAPI_EN_LOCAL_WRITE | VAPI_EN_REMOTE_WRITE; + mr_in.l_key =0; + mr_in.pd_hndl = res->pd_hndl; + mr_in.r_key =0; + mr_in.size = res->buf_size; + ASSERT_VOIDP2UINTN(res->buf_ptr); + mr_in.start = (VAPI_virt_addr_t)(uintn_t)(res->buf_ptr); + mr_in.type = VAPI_MR; + + ret = VAPI_register_mr(res->hca_hndl, &mr_in, &(res->mr_hndl), &mr_out); VAPI_CHECK_RET; + + res->l_key = mr_out.l_key; + + return(ret); +} + +VAPI_ret_t osmt_mtl_init_opened_hca(osmt_mtl_mad_res_t* res) +{ + VAPI_ret_t ret; + + res->pd_hndl= VAPI_INVAL_HNDL; + res->rq_cq_hndl= VAPI_INVAL_HNDL; + res->sq_cq_hndl= VAPI_INVAL_HNDL; + res->sq_cq_eventh= VAPI_INVAL_HNDL; + res->rq_cq_eventh= VAPI_INVAL_HNDL; + res->qp_hndl= VAPI_INVAL_HNDL; + res->mr_hndl= VAPI_INVAL_HNDL; + + /* + * Create QP + * + */ + ret = osmt_mtl_get_qp_resources(res); + if (ret != VAPI_OK) { + return ret; + } + + /* + * Move to init + * + */ + ret = osmt_mtl_qp_init(res); + if (ret != VAPI_OK) { + return ret; + } + + + /* + * Initialize memory regions + * + */ + ret = osmt_mtl_mad_create_mr(res); + if (ret != VAPI_OK) { + return ret; + } + + /* only now move to RTR and RTS */ + ret = osmt_mtl_qp_2_rtr_rts(res); + if (ret != VAPI_OK) { + return ret; + } + + return VAPI_OK; +} + + + +VAPI_ret_t osmt_mtl_mad_cleanup(osmt_mtl_mad_res_t *res) +{ + if (res->qp_hndl != VAPI_INVAL_HNDL) { + VAPI_destroy_qp(res->hca_hndl,res->qp_hndl); + } + if (res->sq_cq_eventh != VAPI_INVAL_HNDL) { + EVAPI_clear_comp_eventh(res->hca_hndl,res->sq_cq_eventh); + } + if (res->rq_cq_eventh != VAPI_INVAL_HNDL) { + EVAPI_clear_comp_eventh(res->hca_hndl,res->rq_cq_eventh); + } + if (res->rq_cq_hndl != VAPI_INVAL_HNDL) { + VAPI_destroy_cq(res->hca_hndl,res->rq_cq_hndl); + } + if (res->sq_cq_hndl != VAPI_INVAL_HNDL) { + VAPI_destroy_cq(res->hca_hndl,res->sq_cq_hndl); + } + if (res->mr_hndl != VAPI_INVAL_HNDL) { + VAPI_deregister_mr(res->hca_hndl,res->mr_hndl); + } + if (res->pd_hndl != VAPI_INVAL_HNDL) { + VAPI_dealloc_pd(res->hca_hndl,res->pd_hndl); + } +#if 0 + /* open/close of HCA should be done system wide - not per application */ + if (res->hca_hndl != VAPI_INVAL_HNDL) { + VAPI_close_hca(res->hca_hndl); /* TBD: HCA_open/close should be done on a system wide basis */ + } +#endif + return VAPI_OK; +} + +VAPI_ret_t osmt_mtl_create_av(osmt_mtl_mad_res_t* res, int16_t dlid, VAPI_ud_av_hndl_t *avh_p) +{ + VAPI_ud_av_t av; + VAPI_ret_t ret; + + av.dlid= dlid; + av.port= res->port_num; + av.sl = 0; /* dest->sl; */ + av.src_path_bits= 0; /* dest->ee_dlid.dst_path_bits; */ + av.static_rate = 0; + /* GRH ? */ + av.grh_flag= 0; + + ret= VAPI_create_addr_hndl(res->hca_hndl,res->pd_hndl, &av,avh_p); + if (ret != VAPI_OK) { + MTL_ERROR1("%s: failed VAPI_create_addr_hndl (%s)\n", __func__, + VAPI_strerror_sym(ret)); + return ret; + } + return VAPI_OK; +} + +VAPI_ret_t osmt_mtl_mad_send(osmt_mtl_mad_res_t* res, VAPI_wr_id_t id, void* mad, + VAPI_qp_num_t dest_qp, IB_sl_t sl,u_int32_t dest_qkey, VAPI_ud_av_hndl_t avh) +{ + VAPI_sr_desc_t sr; + VAPI_sg_lst_entry_t sg_entry; + VAPI_ret_t ret; + + /* building SEND request */ + sr.opcode= VAPI_SEND; + sr.remote_ah= avh; + sr.remote_qp= dest_qp; + sr.remote_qkey= dest_qkey; + + sr.id = id; + sr.set_se= FALSE; + sr.fence= FALSE; + sr.comp_type= VAPI_SIGNALED; + sr.sg_lst_len= 1; + sr.sg_lst_p= &sg_entry; + ASSERT_VOIDP2UINTN(mad); + sg_entry.addr= (VAPI_virt_addr_t)(uintn_t)(mad); + sg_entry.len= MAD_SIZE; + sg_entry.lkey= res->l_key; + + ret= VAPI_post_sr(res->hca_hndl,res->qp_hndl,&sr); + if (ret != VAPI_OK) { + MTL_ERROR1(__FUNCTION__ ": failed VAPI_post_sr (%s)\n", + VAPI_strerror_sym(ret)); + return ret; + } + + return VAPI_OK; +} + +int osmt_mtl_mad_post_recv_bufs(osmt_mtl_mad_res_t* res, void *buf_array, + u_int32_t num_o_bufs,u_int32_t size, VAPI_wr_id_t start_id) +{ + uint32_t i; + void* cur_buf; + VAPI_rr_desc_t rr; + VAPI_sg_lst_entry_t sg_entry; + VAPI_ret_t ret; + + rr.opcode= VAPI_RECEIVE; + rr.comp_type= VAPI_SIGNALED; /* All with CQE (IB compliant) */ + rr.sg_lst_len= 1; /* single buffers */ + rr.sg_lst_p= &sg_entry; + sg_entry.lkey= res->l_key; + cur_buf = buf_array; + for (i= 0; i < num_o_bufs ; i++ ) { + rr.id= start_id+i; /* WQE id used is the index to buffers ptr array */ + ASSERT_VOIDP2UINTN(cur_buf); + sg_entry.addr= (VAPI_virt_addr_t)(uintn_t)cur_buf; + sg_entry.len= size; + memset(cur_buf,0x00,size); /* fill with 0 */ + ret= VAPI_post_rr(res->hca_hndl,res->qp_hndl,&rr); + if (ret != VAPI_OK) { + MTL_ERROR1(__FUNCTION__ ": failed posting RQ WQE (%s)\n",VAPI_strerror_sym(ret)); + return i; + } + MTL_DEBUG4(__FUNCTION__ ": posted buf at %p\n",cur_buf); + cur_buf += size; + } + + return i; /* num of buffers posted */ +} + + +VAPI_ret_t osmt_mtl_mad_poll4cqe(VAPI_hca_hndl_t hca,VAPI_cq_hndl_t cq, + VAPI_wc_desc_t *wc_desc_p, + u_int32_t max_poll, u_int32_t poll_sleep, + VAPI_ud_av_hndl_t *avh_p) +{ + VAPI_ret_t ret = VAPI_CQ_EMPTY; + u_int32_t poll_cnt= 0; + + + /* wait for something to arrive */ + while ((ret == VAPI_CQ_EMPTY) && (poll_cnt < max_poll)) { + ret= VAPI_poll_cq(hca,cq,wc_desc_p); + /* don't sleep if we already succeeded) */ + if (ret != VAPI_CQ_EMPTY) { + break; + } + usleep(poll_sleep); + poll_cnt++; + } + + /* if passed an AVH to destory - do it */ + if (avh_p != NULL) { + VAPI_destroy_addr_hndl(hca,*avh_p); + } + + if ((poll_cnt == max_poll) && (ret == VAPI_CQ_EMPTY)) { + MTL_DEBUG1(__FUNCTION__ ": Failed to get completion on wq after %d polls.\n",max_poll); + return VAPI_CQ_EMPTY; + } + + if (ret != VAPI_OK) { + MTL_DEBUG1(__FUNCTION__ ": VAPI_poll_cq failed with ret=%s on sq_cq\n",mtl_strerror_sym(ret)); + return ret; + } + + if (wc_desc_p->status != VAPI_SUCCESS) { + MTL_DEBUG1(__FUNCTION__ ": completion error (%d) detected\n",wc_desc_p->status); + } + + return VAPI_OK; +} + +#endif /* OSM_VENDOR_INTF_MTL */ + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/osmt_multicast.c b/branches/Ndi/ulp/opensm/user/osmtest/osmt_multicast.c new file mode 100644 index 00000000..7082e7d4 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/osmt_multicast.c @@ -0,0 +1,3500 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of Multicast Member testing flow.. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + +#ifndef __WIN__ +#include +#endif +#include +#include +#include +#include +#include +#include "osmtest.h" + +/********************************************************************** + **********************************************************************/ + +static void +__osmt_print_all_multicast_records( + IN osmtest_t * const p_osmt ) +{ + uint32_t i; + ib_api_status_t status; + osmv_query_req_t req; + osmv_user_query_t user; + osmtest_req_context_t context; + ib_member_rec_t *mcast_record; + + memset( &context, 0, sizeof( context ) ); + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + + user.attr_id = IB_MAD_ATTR_MCMEMBER_RECORD; + user.attr_offset = ib_get_attr_offset(sizeof(*mcast_record)); + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = 1; + req.flags = OSM_SA_FLAGS_SYNC; + context.p_osmt = p_osmt; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + + /* UnTrusted (SMKey of 0) - get the multicast groups */ + status = osmv_query_sa(p_osmt->h_bind, &req); + + if (status != IB_SUCCESS || context.result.status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "__osmt_print_all_multicast_records: ERR 02B5: " + "Failed getting the multicast groups records - %s/%s\n", + ib_get_err_str(status), + ib_get_err_str(context.result.status) ); + return; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "\n |------------------------------------------|" + "\n | Remaining Multicast Groups |" + "\n |------------------------------------------|\n" ); + + for (i = 0; i < context.result.result_cnt; i++) { + mcast_record = osmv_get_query_mc_rec(context.result.p_result_madw, i); + osm_dump_mc_record(&p_osmt->log, mcast_record,OSM_LOG_INFO); + } + + /* Trusted - now get the multicast group members */ + req.sm_key = OSM_DEFAULT_SM_KEY; + status = osmv_query_sa(p_osmt->h_bind, &req); + + if (status != IB_SUCCESS || context.result.status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "__osmt_print_all_multicast_records: ERR 02B6: " + "Failed getting the multicast group members records - %s/%s\n", + ib_get_err_str(status), + ib_get_err_str(context.result.status) ); + return; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "\n |--------------------------------------------------|" + "\n | Remaining Multicast Group Members |" + "\n |--------------------------------------------------|\n" ); + + for (i = 0; i < context.result.result_cnt; i++) { + mcast_record = osmv_get_query_mc_rec(context.result.p_result_madw, i); + osm_dump_mc_record(&p_osmt->log, mcast_record,OSM_LOG_INFO); + } + +} + +/********************************************************************** + **********************************************************************/ + +static cl_status_t +__match_mgids( + IN const void* const p_object, + IN void* context ) +{ + ib_gid_t* p_mgid_context = (ib_gid_t *)context; + ib_gid_t* p_mgid_list_item = (ib_gid_t*)p_object; + int32_t count; + + count = memcmp(p_mgid_context, p_mgid_list_item, sizeof(ib_gid_t)); + if(count == 0) + return CL_SUCCESS; + else + return CL_NOT_FOUND; +} + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +osmt_query_mcast( IN osmtest_t * const p_osmt ) { + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + osmtest_req_context_t context; + ib_member_rec_t *p_rec; + uint32_t i,num_recs = 0; + cl_list_t mgids_list; + cl_list_t* p_mgids_list; + cl_list_iterator_t p_mgids_res; + cl_status_t cl_status; + cl_map_item_t *p_item,*p_next_item; + osmtest_mgrp_t *p_mgrp; + + OSM_LOG_ENTER( &p_osmt->log, osmt_query_mcast ); + + /* + * Do a blocking query for all Multicast Records in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + + context.p_osmt = p_osmt; + user.attr_id = IB_MAD_ATTR_MCMEMBER_RECORD; + user.attr_offset = ib_get_attr_offset( sizeof( ib_member_rec_t ) ); + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_mcast: ERR 0203: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_mcast: ERR 0264: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_mcast: " + "Remote error = %s.\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result. + p_result_madw ) ) ); + } + goto Exit; + } + + /* ok we have got something */ + /* First Delete the old MGID Table */ + p_next_item = cl_qmap_head( &p_osmt->exp_subn.mgrp_mlid_tbl ); + while( p_next_item != cl_qmap_end( &p_osmt->exp_subn.mgrp_mlid_tbl ) ) + { + p_item = p_next_item; + p_next_item = cl_qmap_next( p_item ); + cl_qmap_remove_item(&p_osmt->exp_subn.mgrp_mlid_tbl,p_item); + free( p_item ); + + } + + cl_list_construct(&mgids_list); + cl_list_init( &mgids_list, num_recs ); + p_mgids_list = &mgids_list; + num_recs = context.result.result_cnt; + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_query_mcast: " + "Received %u records\n", num_recs ); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_result( context.result.p_result_madw, i ); + p_mgids_res = cl_list_find_from_head ( p_mgids_list,__match_mgids,&(p_rec->mgid)); + /* If returns iterator other than end of list, same mgid exists already */ + if( p_mgids_res != cl_list_end( p_mgids_list ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_mcast: ERR 0265: " + "MCG MGIDs are the same - invalid MGID : 0x%016" PRIx64 " 0x%016" + PRIx64 "\n", + cl_ntoh64(p_rec->mgid.unicast.prefix), + cl_ntoh64(p_rec->mgid.unicast.interface_id)); + status = IB_ERROR; + goto Exit; + + } + osm_dump_mc_record( &p_osmt->log, p_rec, OSM_LOG_VERBOSE ); + cl_status = cl_list_insert_head(p_mgids_list,&(p_rec->mgid)); + if (cl_status) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_mcast: ERR 0205: " + "Could not add MGID to cl_list\n" ); + status = IB_ERROR; + goto Exit; + } + p_mgrp = (osmtest_mgrp_t*)malloc( sizeof(*p_mgrp) ); + if (!p_mgrp) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_mcast: ERR 0204: " + "Could not allocate new MCG\n" ); + status = IB_ERROR; + goto Exit; + } + memcpy(&p_mgrp->mcmember_rec,p_rec,sizeof(p_mgrp->mcmember_rec)); + cl_qmap_insert(&p_osmt->exp_subn.mgrp_mlid_tbl, + cl_ntoh16(p_rec->mlid),&p_mgrp->map_item); + } + + Exit: + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ + +/* given a multicast request send and wait for response. */ +ib_api_status_t +osmt_send_mcast_request( IN osmtest_t * const p_osmt, + IN uint8_t is_set, + IN ib_member_rec_t *p_mc_req, + IN uint64_t comp_mask, + OUT ib_sa_mad_t *p_res + ) { + osmtest_req_context_t context; + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + + OSM_LOG_ENTER( &p_osmt->log, osmt_send_mcast_request ); + + /* + * Do a blocking query for this record in the subnet. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &context, 0, sizeof( context ) ); + memset( p_res, 0, sizeof( ib_sa_mad_t ) ); + + context.p_osmt = p_osmt; + + user.p_attr = p_mc_req; + user.comp_mask = comp_mask; + + if (is_set == 1) + { + req.query_type = OSMV_QUERY_UD_MULTICAST_SET; + } else if (is_set == 0) { + req.query_type = OSMV_QUERY_UD_MULTICAST_DELETE; + } else if (is_set == 0xee) { + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_send_mcast_request: Set USER DEFINED QUERY\n" ); + req.query_type = OSMV_QUERY_USER_DEFINED; + user.method = IB_MAD_METHOD_GET; + user.attr_id = IB_MAD_ATTR_MCMEMBER_RECORD; + user.attr_offset = ib_get_attr_offset( sizeof( ib_member_rec_t ) ); + } else if (is_set == 0xff) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_send_mcast_request: Set USER DEFINED QUERY\n" ); + req.query_type = OSMV_QUERY_USER_DEFINED; + user.method = IB_MAD_METHOD_SET; + user.attr_id = IB_MAD_ATTR_MCMEMBER_RECORD; + user.attr_offset = ib_get_attr_offset( sizeof( ib_member_rec_t ) ); + } + + /* TODO : Check the validity of all user fields in order to use + OSMV_QUERY_USER_DEFINED + p_user_query = ( osmv_user_query_t * ) p_query_req->p_query_input; + if (p_user_query->method) sa_mad_data.method = p_user_query->method; + sa_mad_data.attr_offset = p_user_query->attr_offset; + sa_mad_data.attr_id = p_user_query->attr_id; + sa_mad_data.comp_mask = p_user_query->comp_mask; + sa_mad_data.p_attr = p_user_query->p_attr; + */ + + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + + if ( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_send_mcast_request: ERR 0206: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + /* ok it worked */ + memcpy(p_res, + osm_madw_get_mad_ptr(context.result.p_result_madw), + sizeof(ib_sa_mad_t)); + + status = context.result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_send_mcast_request: ERR 0224: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_send_mcast_request: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result.p_result_madw ) ) ); + } + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ + +void +osmt_init_mc_query_rec(IN osmtest_t * const p_osmt, + IN OUT ib_member_rec_t *p_mc_req) { + /* use default values so we can change only what we want later */ + memset(p_mc_req, 0, sizeof(ib_member_rec_t)); + + /* we leave the MGID to the user */ + memcpy(&p_mc_req->port_gid.unicast.interface_id, + &p_osmt->local_port.port_guid, + sizeof(p_osmt->local_port.port_guid) + ); + + /* use our own subnet prefix: */ + p_mc_req->port_gid.unicast.prefix = CL_HTON64(0xFE80000000000000ULL); + + /* ib_net32_t qkey; */ + /* ib_net16_t mlid; - we keep it zero for upper level to decide. */ + /* uint8_t mtu; - keep it zero means - anything you have please. */ + /* uint8_t tclass; can leave as zero for now (between subnets) */ + /* ib_net16_t pkey; leave as zero */ + p_mc_req->rate = IB_LINK_WIDTH_ACTIVE_4X; + /* uint8_t pkt_life; zero means greater than zero ... */ + /* ib_net32_t sl_flow_hop; keep it all zeros */ + /* we want to use a link local scope: 0x02 */ + p_mc_req->scope_state = ib_member_set_scope_state(0x02, 0); +} + +/*********************************************************************** + * UD Multicast testing flow: + * o15.0.1.3: + * - Request new MCG with not enough components in comp_mask : + * ERR_INSUFFICIENT_COMPONENTS + * o15.0.1.8: + * - Request a join with irrelevant RATE and get a ERR_INVALID_REQ + * o15.0.1.4: + * - Create an MGID by asking for a join with MGID = 0 + * providing P_Key, Q_Key, SL, FlowLabel, Tclass. + * o15.0.1.5: + * - Check the returned MGID is valid. (p 804) + * o15.0.1.6: + * - Create a new MCG with valid requested MGID. + * - Try to create a new MCG with invalid MGID : get back ERR_REQ_INVALID + * - Try again with MGID prefix = 0xA01B (maybe 0x1BA0 little or big ?) + * - Try to create again the already created group: ERR_REQ_INVALID + * o15.0.1.7 - implicitlly checked during the prev steps. + * o15.0.1.9 + * - Create MCG with Invalid JoinState.FullMember != 1 : get ERR_REQ_INVALID + * o15.0.1.10 - can't check on a single client . + * o15.0.1.11: + * - Try to join into a MGID that exists with JoinState=SendOnlyMember - + * see that it updates JoinState. What is the routing change? + * - We can not check simple join since we have only one tester (for now) + * o15.0.1.12: + * - The last join should have a special treatment in the SA (sender only) + * but what is it ? + * o15.0.1.13: + * - Try joining with wrong rate - ERR_REQ_INVALID + * o15.0.1.14: + * - Try partial delete - actually updating the join state. check it. + * - Register by InformInfo flow to receive trap 67 on MCG delete. + * - Try full delete (JoinState and should be 0) + * - Wait for trap 67. + * - Try joining (not full mem) again to see the group was deleted. + * (should fail - o15.0.1.13) + * o15.0.1.15: + * - Try deletion of the IPoIB MCG and get: ERR_REQ_INVALID + * o15.0.1.16: + * - Try GetTable with PortGUID wildcarded and get back some groups. + ***********************************************************************/ + +/* The following macro can be used only within the osmt_run_mcast_flow() function */ +#define IS_IPOIB_MGID(p_mgid) \ + ( !memcmp(&osm_ipoib_good_mgid, (p_mgid), sizeof(osm_ipoib_good_mgid)) || \ + !memcmp(&osm_ts_ipoib_good_mgid, (p_mgid), sizeof(osm_ts_ipoib_good_mgid)) ) + +ib_api_status_t +osmt_run_mcast_flow( IN osmtest_t * const p_osmt ) { + ib_api_status_t status; + ib_member_rec_t mc_req_rec; + ib_member_rec_t *p_mc_res; + ib_sa_mad_t res_sa_mad; + uint64_t comp_mask = 0; + ib_net64_t remote_port_guid = 0x0; + cl_qmap_t *p_mgrp_mlid_tbl; + osmtest_mgrp_t *p_mgrp; + ib_gid_t special_mgid, tmp_mgid, proxy_mgid; + ib_net16_t invalid_mlid = 0x0; + ib_net16_t max_mlid = cl_hton16(0xFFFE), tmp_mlid; + boolean_t ReachedMlidLimit = FALSE; + int start_cnt = 0, cnt, middle_cnt = 0, end_cnt = 0; + int start_ipoib_cnt = 0, end_ipoib_cnt = 0; + int mcg_outside_test_cnt = 0, fail_to_delete_mcg = 0; + osmtest_req_context_t context; + ib_node_record_t *p_rec; + uint32_t num_recs = 0, i; + uint8_t mtu_phys = 0, rate_phys = 0; + cl_map_t test_created_mlids; /* List of all mlids created in this test */ + ib_member_rec_t* p_recvd_rec; + boolean_t got_error = FALSE; + + static ib_gid_t good_mgid = { + { + 0xFF, 0x12, 0xA0, 0x1C, + 0xFE, 0x80, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x12, 0x34, 0x56, 0x78 + } + }; + static ib_gid_t osm_ipoib_mgid = { + { + 0xff, /* multicast field */ + 0x12, /* scope */ + 0x40, 0x1b, /* IPv4 signature */ + 0xff, 0xff, /* 16 bits of P_Key (to be filled in) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 bits of zeros */ + 0xff, 0xff, 0xff, 0xee, /* 32 bit IPv4 broadcast address */ + }, + }; + static ib_gid_t osm_ts_ipoib_good_mgid = { + { + 0xff, /* multicast field */ + 0x12, /* non-permanent bit,scope */ + 0x40, 0x1b, /* IPv4 signature */ + 0xff, 0xff, /* 16 bits of P_Key (to be filled in) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 bits of zeros */ + 0x00, 0x00, 0x00, 0x01, /* 32 bit IPv4 broadcast address */ + }, + }; + static ib_gid_t osm_ipoib_good_mgid = { + { + 0xff, /* multicast field */ + 0x12, /* non-permanent bit,scope */ + 0x40, 0x1b, /* IPv4 signature */ + 0xff, 0xff, /* 16 bits of P_Key (to be filled in) */ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 48 bits of zeros */ + 0xff, 0xff, 0xff, 0xff, /* 32 bit IPv4 broadcast address */ + }, + }; + static ib_gid_t osm_link_local_mgid = { + { + 0xFF, 0x02, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01 + }, + }; + + OSM_LOG_ENTER( &p_osmt->log, osmt_run_mcast_flow ); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "GetTable of all current MCGs...\n" + ); + status = osmt_query_mcast( p_osmt ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 2FF " + "GetTable of all records has failed!\n" ); + goto Exit; + } + + /* Initialize the test_created_mgrps map */ + cl_map_construct(&test_created_mlids); + cl_map_init(&test_created_mlids, 1000); + + p_mgrp_mlid_tbl = &p_osmt->exp_subn.mgrp_mlid_tbl; + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + + /* Only when we are on single mode check flow - do the count comparison, otherwise skip */ + if (p_osmt->opt.mmode == 1 || p_osmt->opt.mmode == 3) + { + start_cnt = cl_qmap_count(p_mgrp_mlid_tbl); + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow (start): " + "Number of MC Records found in SA DB is %d\n", start_cnt); + } + + /* This flow is being added due to bug discovered using SilverStorm stack - + The bug was initializing MCast with MTU & RATE min values that do + not match the subnet capability, even though that OpenSM + reponds with the correct value it does not store it in the MCG. + We want the check a join request to already existing group (ipoib) + without using MTU or RATE then getting response from OpenSM with + the correct values then join again with them and get IB_SUCCESS + all the way + */ + + /* First validate IPoIB exist in the SA DB */ + p_mgrp = (osmtest_mgrp_t*)cl_qmap_head( p_mgrp_mlid_tbl ); + /* scan all available multicast groups in the DB and fill in the table */ + while( p_mgrp != (osmtest_mgrp_t*)cl_qmap_end( p_mgrp_mlid_tbl ) ) + { + /* search for ipoib mgid */ + if (IS_IPOIB_MGID(&p_mgrp->mcmember_rec.mgid)) + { + start_ipoib_cnt++; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Non-IPoIB MC Groups exist: mgid=0x%016" PRIx64 ":0x%016" PRIx64 "\n", + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.prefix), + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.interface_id) ); + mcg_outside_test_cnt++; + } + + p_mgrp = (osmtest_mgrp_t*)cl_qmap_next( &p_mgrp->map_item ); + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Found %d non-IPoIB MC Groups\n", mcg_outside_test_cnt ); + + if (start_ipoib_cnt) + { + /* o15-0.2.4 - Check a join request to already created MCG */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Found IPoIB MC Group, so we run SilverStorm Bug Flow...\n" ); + /* Try to join first like IPoIB of SilverStorm */ + memcpy(&mc_req_rec.mgid,&osm_ipoib_good_mgid,sizeof(ib_gid_t)); + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_JOIN_STATE; + + status = osmt_send_mcast_request( + p_osmt, 0xff, /* User Defined query Set */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + osm_log(&p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Joining an existing IPoIB multicast group\n"); + osm_log(&p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Sent Join request with :\n\t\tport_gid=0x%016"PRIx64 + ":0x%016" PRIx64 ", mgid=0x%016" PRIx64 ":0x%016" PRIx64 + "\n\t\tjoin state= 0x%x, response is : %s\n", + cl_ntoh64(mc_req_rec.port_gid.unicast.prefix), + cl_ntoh64(mc_req_rec.port_gid.unicast.interface_id), + cl_ntoh64(mc_req_rec.mgid.unicast.prefix), + cl_ntoh64(mc_req_rec.mgid.unicast.interface_id), + (mc_req_rec.scope_state & 0x0F), + ib_get_err_str(status)); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02B3: " + "Failed joining existing IPoIB MCGroup - got %s\n", + ib_get_err_str(status)); + goto Exit; + } + /* Check MTU & Rate Value and resend with SA suggested values */ + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + + /* Prepare the mc_req_rec for the rest of the flow */ + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + /* + We simulate the same situation as in SilverStorm - a response with the + exact RATE & MTU as the SA responded with. Actually the query + has included some more fields but we know that problem was + genereated by the RATE + */ + osm_log(&p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Received attributes of MCG : \n\t\tMTU=0x%02X, RATE=0x%02X\n" + , p_mc_res->mtu,p_mc_res->rate); + + mc_req_rec.mtu = p_mc_res->mtu; + mc_req_rec.rate = p_mc_res->rate; + /* Set feasible mtu & rate that will allow check the + exact statement of OpenSM */ + mtu_phys = p_mc_res->mtu; + rate_phys = p_mc_res->rate; + + memcpy(&mc_req_rec.mgid, &osm_ipoib_good_mgid, sizeof(ib_gid_t)); + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU | + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log(&p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Sending attributes of MCG : \n\t\tMTU=0x%02X, RATE=0x%02X\n" + , mc_req_rec.mtu, mc_req_rec.rate); + status = osmt_send_mcast_request( p_osmt, 0xff, /* User Defined query */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log(&p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Sent Join request using response values, response is : %s\n" + , ib_get_err_str(status)); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02EF: " + "Query as Full Member of already existing ipoib group 0x%016" + PRIx64 ":0x%016" PRIx64 " has failed\n", + cl_ntoh64(mc_req_rec.mgid.unicast.prefix), + cl_ntoh64(mc_req_rec.mgid.unicast.interface_id)); + + goto Exit; + } + /* We do not want to leave the MCG since its IPoIB */ + } + + /**************************************************************************/ + /* Check Get with invalid mlid */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Get with invalid mlid...\n" + ); + /* Request Get */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + mc_req_rec.mlid = invalid_mlid; + comp_mask = IB_MCR_COMPMASK_MLID; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 0xee, /* User Defined query Get */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status == IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow : ERR 2E0 " + "SubnAdmGet with invalid mlid 0x%x succeeded\n", + cl_ntoh16(mc_req_rec.mlid)); + status = IB_ERROR; + goto Exit; + } + + /* Prepare the mc_req_rec for the rest of the flow */ + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + /**************************************************************************/ + /* Check Get with invalid port guid */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Get with invalid port guid (0x0) but valid interface ID : 0x%" + PRIx64 "...\n", + cl_ntoh64(mc_req_rec.port_gid.unicast.interface_id)); + + /* Request Get */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + memset(&mc_req_rec.port_gid.unicast.interface_id, 0, sizeof(ib_net64_t)); + comp_mask = IB_MCR_COMPMASK_GID; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 0xee, /* User Defined query Get */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status == IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow : ERR 2E4 " + "SubnAdmGet with invalid port guid succeeded\n" ); + status = IB_ERROR; + goto Exit; + } + + /* Prepare the mc_req_rec for the rest of the flow */ + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + /**************************************************************************/ + + /* o15.0.1.3: */ + /* - Request Join with insufficient comp_mask */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with insufficient comp mask qkey & pkey (o15.0.1.3)...\n" + ); + + /* no MGID */ + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + /* IB_MCR_COMPMASK_QKEY | */ + /* IB_MCR_COMPMASK_PKEY | intentionaly missed to raise the error */ + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + (( ib_net16_t ) (res_sa_mad.status & IB_SMP_STATUS_MASK )) != + IB_SA_MAD_STATUS_INSUF_COMPS) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02EE: " + "Expectedd REMOTE ERROR IB_SA_MAD_STATUS_INSUF_COMPS got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with insufficient comp mask - sl (15.0.1.3)...\n" + ); + + /* no MGID */ + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + /* Request Join */ + ib_member_set_join_state(&mc_req_rec,IB_MC_REC_STATE_FULL_MEMBER ); + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + /* IB_MCR_COMPMASK_SL | */ + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + (( ib_net16_t ) (res_sa_mad.status & IB_SMP_STATUS_MASK )) != + IB_SA_MAD_STATUS_INSUF_COMPS) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02ED: " + "Expectedd REMOTE ERROR IB_SA_MAD_STATUS_INSUF_COMPS got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + /* no MGID */ + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + + mc_req_rec.mgid.raw[15] = 0x01; + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with insufficient comp mask - flow label (o15.0.1.3)...\n" + ); + + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + /* IB_MCR_COMPMASK_FLOW | intentionaly missed to raise the error */ + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + (( ib_net16_t ) (res_sa_mad.status & IB_SMP_STATUS_MASK )) != + IB_SA_MAD_STATUS_INSUF_COMPS) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02EC: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_INSUF_COMPS got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with insufficient comp mask - tclass (o15.0.1.3)...\n" + ); + + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER) ; + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + /* IB_MCR_COMPMASK_TCLASS | Intentionally missed to raise an error */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + (( ib_net16_t ) (res_sa_mad.status & IB_SMP_STATUS_MASK )) != + IB_SA_MAD_STATUS_INSUF_COMPS) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02EA: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_INSUF_COMPS got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with insufficient comp mask - tclass qkey (o15.0.1.3)...\n" + ); + + /* no MGID */ + /* memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); */ + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + /* IB_MCR_COMPMASK_QKEY | intentionaly missed to raise the error */ + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + /* IB_MCR_COMPMASK_TCLASS | intentionaly missed to raise the error */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + (( ib_net16_t ) (res_sa_mad.status & IB_SMP_STATUS_MASK )) != + IB_SA_MAD_STATUS_INSUF_COMPS) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02E9: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_INSUF_COMPS got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* o15.0.1.8: */ + /* - Request join with irrelevant RATE : get a ERR_INSUFFICIENT_COMPONENTS */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with unrealistic rate (o15.0.1.8)...\n" + ); + + /* impossible requested rate */ + mc_req_rec.rate = + IB_LINK_WIDTH_ACTIVE_12X | + IB_PATH_SELECTOR_GREATER_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0207: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_REQ_INVALID got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Check Valid value which is unreasonable now */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with unrealistic rate 120GB (o15.0.1.8)...\n" + ); + + /* impossible requested rate */ + mc_req_rec.rate = + IB_PATH_RECORD_RATE_120_GBS | + IB_PATH_SELECTOR_GREATER_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0208: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_REQ_INVALID got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Check Valid value which is unreasonable now */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with less than min rate 2.5GB (o15.0.1.8)...\n" + ); + + /* impossible requested rate */ + mc_req_rec.rate = + IB_PATH_RECORD_RATE_2_5_GBS | + IB_PATH_SELECTOR_LESS_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02AB: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_REQ_INVALID got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Checking above max value of MTU which is impossible */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with unrealistic mtu : \n\t\tmore than 4096 -" + " max (o15.0.1.8)...\n" + ); + + /* impossible requested mtu */ + mc_req_rec.mtu = IB_MTU_LEN_4096 | IB_PATH_SELECTOR_GREATER_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02AC: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_REQ_INVALID got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Checking below min value of MTU which is impossible */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with unrealistic mtu : \n\t\tless than 256 -" + " min (o15.0.1.8)...\n" + ); + + /* impossible requested mtu */ + mc_req_rec.mtu = IB_MTU_LEN_256 | IB_PATH_SELECTOR_LESS_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02AD: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_REQ_INVALID got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with unrealistic mtu (o15.0.1.8)...\n" + ); + + /* impossible requested mtu */ + mc_req_rec.mtu = 0x6 | IB_PATH_SELECTOR_GREATER_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02AE: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_REQ_INVALID got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + +#if 0 + /* Currently PacketLifeTime isn't checked in opensm */ + /* Check PacketLifeTime as 0 */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Create with unrealistic packet life value less than 0 (o15.0.1.8)...\n" + ); + + /* impossible requested packet life */ + mc_req_rec.pkt_life = 0 | IB_PATH_SELECTOR_LESS_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_LIFE | + IB_MCR_COMPMASK_LIFE_SEL; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02AF: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_REQ_INVALID got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } +#endif + + /* o15.0.1.4: */ + /* - Create an MGID by asking for a join with MGID = 0 */ + /* providing P_Key, Q_Key, SL, FlowLabel, Tclass. */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Create given MGID=0 skip service level (o15.0.1.4)...\n" + ); + + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + + /* no MGID */ + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + /* IB_MCR_COMPMASK_SL | Intentionally missed */ + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + (( ib_net16_t ) (res_sa_mad.status & IB_SMP_STATUS_MASK )) != + IB_SA_MAD_STATUS_INSUF_COMPS) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02A8: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_INSUF_COMPS got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Check that no same MCG in the SMDB */ + status = osmt_query_mcast( p_osmt); + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02AA: " + "Could not get all MC Records in subnet, got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Only when we are on single mode check flow - do the count comparison, otherwise skip */ + if (p_osmt->opt.mmode == 1 || p_osmt->opt.mmode == 3) + { + middle_cnt = cl_qmap_count(&p_osmt->exp_subn.mgrp_mlid_tbl); + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow (post false create): " + "Number of MC Records found in SA DB is %d\n", middle_cnt); + if (middle_cnt != start_cnt) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Got different number of records stored in SA DB (before any creation)\n" + "Instead of %d got %d\n", start_cnt, middle_cnt); + } + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Create given MGID=0 skip Qkey and Pkey (o15.0.1.4)...\n" + ); + + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + + /* no MGID */ + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + /* IB_MCR_COMPMASK_QKEY | */ + /* IB_MCR_COMPMASK_PKEY | Intentionally missed */ + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + (( ib_net16_t ) (res_sa_mad.status & IB_SMP_STATUS_MASK )) != + IB_SA_MAD_STATUS_INSUF_COMPS) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02A7: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_INSUF_COMPS got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Bad Query o15.0.1.4 */ + + status = osmt_query_mcast( p_osmt); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Create given MGID=0 skip TClass (o15.0.1.4)...\n" + ); + + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + + /* no MGID */ + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + /* IB_MCR_COMPMASK_TCLASS | Intentionally missed */ + /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR || + (( ib_net16_t ) (res_sa_mad.status & IB_SMP_STATUS_MASK )) != + IB_SA_MAD_STATUS_INSUF_COMPS) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02A6: " + "Expected REMOTE ERROR IB_SA_MAD_STATUS_INSUF_COMPS got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Create given MGID=0 valid Set several options :\n\t\t" + "First above min RATE, Second less than max RATE\n\t\t" + "Third above min MTU, Second less than max MTU\n\t\t" + "Fifth exact MTU & RATE feasible, Sixth exact RATE feasible\n\t\t" + "Seventh exact MTU feasible (o15.0.1.4)...\n" + ); + + /* Good Flow - mgid is 0 while giving all required fields for join : P_Key, Q_Key, SL, FlowLabel, Tclass */ + + mc_req_rec.rate = + IB_LINK_WIDTH_ACTIVE_1X | + IB_PATH_SELECTOR_GREATER_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02A5: " + "Failed to create MCG for MGID=0 with higher than minimum RATE - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* Good Flow - mgid is 0 while giving all required fields for join : P_Key, Q_Key, SL, FlowLabel, Tclass */ + + mc_req_rec.rate = + IB_LINK_WIDTH_ACTIVE_12X | + IB_PATH_SELECTOR_LESS_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0211: " + "Failed to create MCG for MGID=0 with less than highest RATE - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* Good Flow - mgid is 0 while giving all required fields for join : P_Key, Q_Key, SL, FlowLabel, Tclass */ + + mc_req_rec.mtu = IB_MTU_LEN_4096 | + IB_PATH_SELECTOR_LESS_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0238: " + "Failed to create MCG for MGID=0 with less than highest MTU - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* Good Flow - mgid is 0 while giving all required fields for join : P_Key, Q_Key, SL, FlowLabel, Tclass */ + mc_req_rec.mtu = IB_MTU_LEN_256 | + IB_PATH_SELECTOR_GREATER_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0239: " + "Failed to create MCG for MGID=0 with higher than lowest MTU - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert( &test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* Good Flow - mgid is 0 while giving all required fields for join : P_Key, Q_Key, SL, FlowLabel, Tclass */ + /* Using Exact feasible MTU & RATE */ + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Using Exact feasible MTU & RATE: " + "MTU = 0x%02X, RATE = 0x%02X\n", + mtu_phys, rate_phys ); + + mc_req_rec.mtu = mtu_phys; + mc_req_rec.rate = rate_phys; + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU | + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0240: " + "Failed to create MCG for MGID=0 with exact MTU & RATE - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* Good Flow - mgid is 0 while giving all required fields for join : P_Key, Q_Key, SL, FlowLabel, Tclass */ + /* Using Exact feasible RATE */ + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Using Exact feasible RATE: 0x%02X\n", + rate_phys ); + + mc_req_rec.rate = rate_phys; + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0241: " + "Failed to create MCG for MGID=0 with exact RATE - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* Good Flow - mgid is 0 while giving all required fields for join : P_Key, Q_Key, SL, FlowLabel, Tclass */ + /* Using Exact feasible MTU */ + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Using Exact feasible MTU: 0x%02X\n", + mtu_phys ); + + mc_req_rec.mtu = mtu_phys; + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0242: " + "Failed to create MCG for MGID=0 with exact MTU - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* o15.0.1.5: */ + /* - Check the returned MGID is valid. (p 804) */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Validating resulting MGID (o15.0.1.5)...\n" + ); + /* prefix 0xFF1 Scope 0xA01B */ + /* Since we did not directly specified SCOPE in comp mask + we should get the comp mask that is link-local scope */ + if ( (p_mc_res->mgid.multicast.header[0] != 0xFF) || + (p_mc_res->mgid.multicast.header[1] != 0x12) || + (p_mc_res->mgid.multicast.raw_group_id[0] != 0xA0) || + (p_mc_res->mgid.multicast.raw_group_id[1] != 0x1B) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0209: " + "Validating MGID failed. MGID:0x%016" PRIx64 ":%016" PRIx64 "\n", + cl_ntoh64( p_mc_res->mgid.unicast.prefix ), + cl_ntoh64( p_mc_res->mgid.unicast.interface_id ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Good Flow - mgid is 0 while giving all required fields for join : P_Key, Q_Key, SL, FlowLabel, Tclass */ + /* Using feasible GREATER_THAN 0 packet lifitime */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Create given MGID=0 (o15.0.1.4)...\n" + ); + + status = osmt_query_mcast(p_osmt); + + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + + /* no MGID */ + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + + mc_req_rec.pkt_life = 0 | IB_PATH_SELECTOR_GREATER_THAN << 6; + + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_LIFE | + IB_MCR_COMPMASK_LIFE_SEL; + + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0210: " + "Failed to create MCG for MGID=0 - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* o15.0.1.6: */ + /* - Create a new MCG with valid requested MGID. */ + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + mc_req_rec.mgid = good_mgid; + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Create given valid MGID=0x%016" PRIx64 " : " + "0x%016" PRIx64 " (o15.0.1.6)...\n", + cl_ntoh64(mc_req_rec.mgid.unicast.prefix), + cl_ntoh64(mc_req_rec.mgid.unicast.interface_id)); + + /* Before creation, need to check that this group doesn't exist */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Verifying that MCGroup with this MGID doesn't exist by trying to Join it (o15.0.1.13)...\n" ); + + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_NON_MEMBER); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, /* join */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0301: " + "Tried joining group that shouldn't have existed - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Set State to full member to allow group creation */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Now creating group with given valid MGID=0x%016" PRIx64 " : " + "0x%016" PRIx64 " (o15.0.1.6)...\n", + cl_ntoh64(mc_req_rec.mgid.unicast.prefix), + cl_ntoh64(mc_req_rec.mgid.unicast.interface_id)); + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0211: " + "Failed to create MCG for MGID=0x%016" PRIx64 " : " + "0x%016" PRIx64 " (o15.0.1.6) - got %s/%s\n", + cl_ntoh64(good_mgid.unicast.prefix), + cl_ntoh64(good_mgid.unicast.interface_id), + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad)) ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Validating resulting MGID (o15.0.1.6)...\n" + ); + /* prefix 0xFF1 Scope 0xA01B */ + if ( (p_mc_res->mgid.multicast.header[0] != 0xFF) || + (p_mc_res->mgid.multicast.header[1] != 0x12) || /* HACK hardcoded scope = 0x02 */ + (p_mc_res->mgid.multicast.raw_group_id[0] != 0xA0) || + (p_mc_res->mgid.multicast.raw_group_id[1] != 0x1C) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0212: " + "Validating MGID failed. MGID:0x%016" PRIx64 ":%016" PRIx64 "\n", + cl_ntoh64( p_mc_res->mgid.unicast.prefix ), + cl_ntoh64( p_mc_res->mgid.unicast.interface_id ) + ); + status = IB_ERROR; + goto Exit; + } + + /* - Try to create a new MCG with invalid MGID : get back ERR_REQ_INVALID */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking BAD MGID=0xFA..... (o15.0.1.6)...\n" + ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + mc_req_rec.mgid.raw[0] = 0xFA; + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0213: " + "Failed to recognize MGID error for MGID=0xFA - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* - Try again with MGID prefix = 0xA01B (maybe 0x1BA0 little or big ?) */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking BAD MGID=0xFF12A01B..... with link-local scope (o15.0.1.6)...\n" + ); + + mc_req_rec.mgid.raw[0] = 0xFF; + mc_req_rec.mgid.raw[3] = 0x1B; + comp_mask = comp_mask | IB_MCR_COMPMASK_SCOPE; + mc_req_rec.scope_state = mc_req_rec.scope_state & 0x2F; /* local scope */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0214: " + "Failed to recognize MGID error for A01B with link-local bit (status %s) (rem status %s)\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Change the mgid prefix - get back ERR_REQ_INVALID */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking BAD MGID PREFIX=0xEF... (o15.0.1.6)...\n" + ); + + mc_req_rec.mgid = good_mgid; + + mc_req_rec.mgid.raw[0] = 0xEF; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0215: " + "Failed to recognize MGID PREFIX error for MGID=0xEF - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Change the scope to reserved - get back VALID REQ */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking local scope with full member \n\t\tand valid mgid 0x%016" + PRIx64 ":0x%016" PRIx64 " ... (o15.0.1.6)...\n", + cl_ntoh64(mc_req_rec.mgid.unicast.prefix), + cl_ntoh64(mc_req_rec.mgid.unicast.interface_id)); + + mc_req_rec.mgid = good_mgid; + + mc_req_rec.mgid.raw[1] = 0x1F; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0216: " + "Failed to create MCG for MGID=0x%016" PRIx64 " : " + "0x%016" PRIx64 " - got %s/%s\n", + cl_ntoh64(good_mgid.unicast.prefix), + cl_ntoh64(good_mgid.unicast.interface_id), + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* Change the flags to invalid value 0x2 - get back INVALID REQ */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking invalid flags=0xFF 22 ... (o15.0.1.6)...\n" + ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + mc_req_rec.mgid = good_mgid; + + mc_req_rec.mgid.raw[1] = 0x22; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0217: " + "Failed to recognize create with invalid flags value 0x2 - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Change the MGID to link local MGID - get back VALID REQ */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking link local MGID 0xFF02:0:0:0:0:0:0:1 (o15.0.1.6)...\n" + ); + + mc_req_rec.mgid = osm_link_local_mgid; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0218: " + "Failed to create MCG for MGID=0xFF02:0:0:0:0:0:0:1 - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* o15.0.1.7 - implicitlly checked during the prev steps. */ + /* o15.0.1.8 - implicitlly checked during the prev steps. */ + + /* o15.0.1.9 */ + /* - Create MCG with Invalid JoinState.FullMember != 1 : get ERR_REQ_INVALID */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking new MGID with invalid join state (o15.0.1.9)...\n" + ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + mc_req_rec.mgid = good_mgid; + mc_req_rec.mgid.raw[12] = 0xFF; + mc_req_rec.scope_state = 0x22; /* link-local scope, non-member state */ + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0219: " + "Failed to recognize create with JoinState != FullMember - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Lets try a valid join scope state */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking new MGID with valid join state (o15.0.1.9)...\n" + ); + + mc_req_rec.mgid = good_mgid; + mc_req_rec.scope_state = 0x23; /* link-local scope, non member and full member */ + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0220: " + "Failed to create MCG with valid join state 0x3 - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid ) ); + cl_map_insert( &test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* Lets try another invalid join scope state */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking new MGID with invalid join state (o15.0.1.9)...\n" + ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + /* We have created a new MCG so now we need different mgid when cresting group otherwise it will be counted as join request .*/ + mc_req_rec.mgid = good_mgid; + mc_req_rec.mgid.raw[12] = 0xFC; + + mc_req_rec.scope_state = 0x24; /* link-local scope, send only member */ + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0221: " + "Failed to recognize create with JoinState != FullMember - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Lets try another valid join scope state */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking new MGID creation with valid join state (o15.0.1.9)...\n" + ); + + mc_req_rec.mgid = good_mgid; + mc_req_rec.mgid.raw[12] = 0xFB; + memcpy(&special_mgid, &mc_req_rec.mgid, sizeof(ib_gid_t)); + mc_req_rec.scope_state = 0x2F; /* link-local scope, Full member with all other bits turned on */ + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0222: " + "Failed to create MCG with valid join state 0xF - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + + /* o15.0.1.10 - can't check on a single client .-- obsolete - + checked by SilverStorm bug o15-0.2.4, never the less recheck */ + /* o15-0.2.4 - Check a join request to already created MCG */ + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Check o15-0.2.4 statement...\n" ); + /* Try to join */ + memcpy(&mc_req_rec.mgid,&p_mc_res->mgid,sizeof(ib_gid_t)); + /* Request Join */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_NON_MEMBER); + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_JOIN_STATE; + + status = osmt_send_mcast_request( p_osmt, 0x1, /* User Defined query */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02CC: " + "Failed to join MCG with valid req, returned status = %s\n", + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) )); + goto Exit; + } + + /* o15.0.1.11: */ + /* - Try to join into a MGID that exists with JoinState=SendOnlyMember - */ + /* see that it updates JoinState. What is the routing change? */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Retry of existing MGID - See JoinState update (o15.0.1.11)...\n" + ); + + mc_req_rec.mgid = good_mgid; + mc_req_rec.scope_state = 0x22; /* link-local scope, send only member */ + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02CD: " + "Failed to update existing MGID - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Validating Join State update with NonMember (o15.0.1.11)...\n" + ); + + if (p_mc_res->scope_state != 0x23) /* scope is LSB */ + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02CE: " + "Validating JoinState update failed. Expected 0x23 got: 0x%02X\n", + p_mc_res->scope_state + ); + status = IB_ERROR; + goto Exit; + } + + /* Try delete current join state then update it with another value */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking JoinState update request should return 0x22 (o15.0.1.11)...\n" + ); + + mc_req_rec.rate = + IB_LINK_WIDTH_ACTIVE_1X | + IB_PATH_SELECTOR_GREATER_THAN << 6; + mc_req_rec.mgid = good_mgid; + /* link-local scope, non member (so we should not be able to delete) */ + /* but the FullMember bit should be gone */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Partially delete JoinState (o15.0.1.14)...\n" + ); + mc_req_rec.scope_state = 0x22; + status = osmt_send_mcast_request( p_osmt, 0, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if ((status != IB_SUCCESS) || (p_mc_res->scope_state != 0x21)) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02CF: " + "Failed to partially update JoinState : %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* So far successfully delete state - Now change it */ + mc_req_rec.mgid = good_mgid; + mc_req_rec.scope_state = 0x24; /* link-local scope, send only member */ + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C0: " + "Failed to update existing MCG - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Validating Join State update with Send Only Member (o15.0.1.11)...\n" + ); + + if (p_mc_res->scope_state != 0x25) /* scope is MSB */ + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C1: " + "Validating JoinState update failed. Expected 0x25 got: 0x%02X\n", + p_mc_res->scope_state + ); + status = IB_ERROR; + goto Exit; + } + /* Now try to update value of join state */ + mc_req_rec.scope_state = 0x21; /* link-local scope, full member */ + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C2: " + "Failed to update existing MGID - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Validating Join State update with Full Member\n\t\t" + "to an existing 0x5 state MCG (o15.0.1.11)...\n" ); + + if (p_mc_res->scope_state != 0x25) /* scope is LSB */ + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C3: " + "Validating JoinState update failed. Expected 0x25 got: 0x%02X\n", + p_mc_res->scope_state + ); + status = IB_ERROR; + goto Exit; + } + + /* Now try to update value of join state */ + mc_req_rec.scope_state = 0x22; /* link-local scope,non member */ + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C4: " + "Failed to update existing MGID - got %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + goto Exit; + } + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Validating Join State update with Non Member\n\t\t" + "to an existing 0x5 state MCG (o15.0.1.11)...\n" ); + + if (p_mc_res->scope_state != 0x27) /* scope is LSB */ + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C5: " + "Validating JoinState update failed. Expected 0x27 got: 0x%02X\n", + p_mc_res->scope_state + ); + status = IB_ERROR; + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "DEBUG - Current scope_state value : 0x%02X...\n", p_mc_res->scope_state ); + + /* - We can not check simple join since we have only one tester (for now) */ + + /* o15.0.1.12: Not Supported */ + /* - The SendOnlyNonMem join should have a special treatment in the + SA but what is it ? */ + + /* o15.0.1.13: */ + /* - Try joining with rate that does not exist in any MCG - + ERR_REQ_INVALID */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking BAD RATE when connecting to existing MGID (o15.0.1.13)...\n" + ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + mc_req_rec.mgid = good_mgid; + mc_req_rec.rate = + IB_LINK_WIDTH_ACTIVE_1X | + IB_PATH_SELECTOR_LESS_THAN << 6; + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C6: " + "Failed to catch BAD RATE joining an exiting MGID: %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Try MTU that does not exist in any MCG */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking BAD MTU (higher them max) when connecting to " + "existing MGID (o15.0.1.13)...\n" + ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + mc_req_rec.mgid = osm_ipoib_mgid; + mc_req_rec.mtu = + IB_MTU_LEN_4096 | + IB_PATH_SELECTOR_GREATER_THAN << 6; + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C7: " + "Failed to catch BAD RATE (higher them max) joining an exiting MGID: %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Try another MTU that does not exist in any MCG */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking BAD MTU (less than min) when connecting " + "to existing MGID (o15.0.1.13)...\n" + ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + mc_req_rec.mgid = osm_ipoib_mgid; + mc_req_rec.mtu = + IB_MTU_LEN_256 | + IB_PATH_SELECTOR_LESS_THAN << 6; + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C8: " + "Failed to catch BAD RATE (less them min) joining an exiting MGID: %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* o15.0.1.14: */ + /* - Try partial delete - actually updating the join state. check it. */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking partial JoinState delete request - removing NonMember (o15.0.1.14)...\n" + ); + + mc_req_rec.rate = + IB_LINK_WIDTH_ACTIVE_1X | + IB_PATH_SELECTOR_GREATER_THAN << 6; + mc_req_rec.mgid = good_mgid; + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_RATE_SEL | + IB_MCR_COMPMASK_RATE; + /* link-local scope, non member (so we should not be able to delete) */ + /* but the FullMember bit should be gone */ + mc_req_rec.scope_state = 0x22; + + status = osmt_send_mcast_request( p_osmt, 0, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02C9: " + "Fail to partially update JoinState during delete: %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Validating Join State removal of Non Member bit (o15.0.1.14)...\n" + ); + if (p_mc_res->scope_state != 0x25) /* scope is MSB - now only the non member & send only member have left */ + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02CA: " + "Validating JoinState update failed. Expected 0x25 got: 0x%02X\n", + p_mc_res->scope_state + ); + status = IB_ERROR; + goto Exit; + } + + /* Now use the same scope_state and delete all JoinState - leave multicast group since state is 0x0 */ + + mc_req_rec.scope_state = 0x25; + status = osmt_send_mcast_request( p_osmt, 0, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02CB: " + "Failed to update JoinState during delete: %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Validating Join State update remove (o15.0.1.14)...\n" + ); + + if (p_mc_res->scope_state != 0x25) /* scope is MSB - now only 0x0 so port is removed from MCG */ + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02BF: " + "Validating JoinState update failed. Expected 0x25 got: 0x%02X\n", + p_mc_res->scope_state + ); + status = IB_ERROR; + goto Exit; + } + + /* - Try joining (not full mem) again to see the group was deleted. (should fail) */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Delete by trying to Join deleted group (o15.0.1.13)...\n" + ); + + mc_req_rec.scope_state = 0x22; /* use non member - so if no group fail */ + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, /* join */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status != IB_REMOTE_ERROR) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02BC: " + "Succeeded Joining Deleted Group: %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* - Try deletion of the IPoIB MCG and get: ERR_REQ_INVALID */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking BAD Delete of Mgid membership (no prev join) (o15.0.1.15)...\n" + ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + mc_req_rec.mgid = osm_ipoib_mgid; + mc_req_rec.rate = + IB_LINK_WIDTH_ACTIVE_1X | + IB_PATH_SELECTOR_GREATER_THAN << 6; + mc_req_rec.scope_state = 0x21; /* delete full member */ + + status = osmt_send_mcast_request( p_osmt, 0, /* delete flag */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02BD: " + "Failed to catch BAD delete from IPoIB: %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* Prepare another MCG for the following tests : */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Create given MGID=0x%016" PRIx64 " : " + "0x%016" PRIx64 "\n\t\t(o15.0.1.4)...\n", + cl_ntoh64(osm_ipoib_mgid.unicast.prefix), + cl_ntoh64(osm_ipoib_mgid.unicast.interface_id) ); + + mc_req_rec.mgid = good_mgid; + mc_req_rec.mgid.raw[12] = 0xAA; + mc_req_rec.pkt_life = 0 | IB_PATH_SELECTOR_GREATER_THAN << 6; + mc_req_rec.scope_state = 0x21; /* Full memeber */ + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_LIFE | + IB_MCR_COMPMASK_LIFE_SEL; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02BE: " + "Failed to create MCG for 0x%016" PRIx64 " : " + "0x%016" PRIx64 " - got %s/%s\n", + cl_ntoh64(good_mgid.unicast.prefix), + cl_ntoh64(good_mgid.unicast.interface_id), + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) ); + goto Exit; + } + + /* - Try delete with valid join state */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Full Delete of a group (o15.0.1.14)...\n" + ); + mc_req_rec.scope_state = 0x21; /* the FullMember is the current JoinState */ + status = osmt_send_mcast_request( p_osmt, 0, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + if (status != IB_SUCCESS) + { + goto Exit; + } + + /* o15.0.1.15: */ + /* - Try deletion of the IPoIB MCG and get: ERR_REQ_INVALID */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking BAD Delete of IPoIB membership (no prev join) (o15.0.1.15)...\n" + ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + mc_req_rec.mgid = osm_ipoib_mgid; + mc_req_rec.rate = + IB_LINK_WIDTH_ACTIVE_1X | + IB_PATH_SELECTOR_GREATER_THAN << 6; + mc_req_rec.scope_state = 0x21; /* delete full member */ + + status = osmt_send_mcast_request( p_osmt, 0, /* delete flag */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if ((status != IB_REMOTE_ERROR) || + (res_sa_mad.status != IB_SA_MAD_STATUS_REQ_INVALID)) { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 0223: " + "Failed to catch BAD delete from IPoIB: %s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /**************************************************************************/ + /* Checking join with invalid MTU */ + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Join with unrealistic mtu : \n" + "\t\tFirst create new MCG than try to join it \n" + "\t\twith unrealistic MTU greater than 4096 (o15.0.1.8)...\n" + ); + + /* First create new mgrp */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + mc_req_rec.mtu = IB_MTU_LEN_1024 | IB_PATH_SELECTOR_EXACTLY << 6; + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02EB: " + "Failed to create new mgrp\n" ); + goto Exit; + } + memcpy(&tmp_mgid,&p_mc_res->mgid,sizeof(ib_gid_t)); + osm_dump_mc_record( &p_osmt->log, p_mc_res, OSM_LOG_INFO ); + /* tmp_mtu = p_mc_res->mtu & 0x3F; */ + + /* impossible requested mtu always greater than exist in MCG */ + mc_req_rec.mtu = IB_MTU_LEN_4096 | IB_PATH_SELECTOR_GREATER_THAN << 6; + memcpy(&mc_req_rec.mgid,&tmp_mgid,sizeof(ib_gid_t)); + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + comp_mask = + IB_MCR_COMPMASK_GID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_MTU_SEL | + IB_MCR_COMPMASK_MTU; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status == IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02E4: " + "Expected REMOTE ERROR got:%s/%s\n", + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) + ); + status = IB_ERROR; + goto Exit; + } + + /* - Try GetTable with PortGUID wildcarded and get back some groups. */ + status = osmt_query_mcast( p_osmt); + cnt = cl_qmap_count(&p_osmt->exp_subn.mgrp_mlid_tbl); + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow (Before checking Max MCG creation): " + "Number of MC Records found in SA DB is %d\n", cnt ); + + /**************************************************************************/ + /* Checking join on behalf of remote port gid */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Proxy Join...\n" + ); + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all NodeRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_NODE_RECORD, + sizeof( *p_rec ), &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02E5: " + "osmtest_get_all_recs failed on getting all node records(%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Populate the database with the received records. + */ + num_recs = context.result.result_cnt; + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "Received %u records\n", num_recs ); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_node_rec( context.result.p_result_madw, i ); + if (p_rec->node_info.port_guid != p_osmt->local_port.port_guid && + p_rec->node_info.node_type == IB_NODE_TYPE_CA) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow: " + "remote port_guid = 0x%" PRIx64 "\n", + cl_ntoh64(p_rec->node_info.port_guid) ); + + remote_port_guid = p_rec->node_info.port_guid; + i = num_recs; + break; + } + } + + if (remote_port_guid != 0x0) + { + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + mc_req_rec.port_gid.unicast.interface_id = remote_port_guid; + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS; /* all above are required */ + + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02B4: " + "Could not join on behalf of remote port 0x%016" PRIx64 + " remote status: %s\n", + cl_ntoh64(remote_port_guid), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) ); + status = IB_ERROR; + goto Exit; + } + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + memcpy(&proxy_mgid,&p_mc_res->mgid,sizeof(ib_gid_t)); + + /* First try a bad deletion then good one */ + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Trying deletion of remote port with local port guid\n" ); + + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_JOIN_STATE; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_START "\n" ); + + status = osmt_send_mcast_request( p_osmt, 0, /* delete flag */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: " EXPECTING_ERRORS_END "\n" ); + + if (status == IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02A9: " + "Successful deletion of remote port guid with local one MGID : " + "0x%016" PRIx64 " : 0x%016" PRIx64 ", Got : %s/%s\n", + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.prefix), + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.interface_id), + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) ); + status = IB_ERROR; + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Trying deletion of remote port with the right port guid\n" ); + + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + mc_req_rec.mgid = proxy_mgid; + mc_req_rec.port_gid.unicast.interface_id = remote_port_guid; + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_JOIN_STATE; + status = osmt_send_mcast_request( p_osmt, 0, /* delete flag */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02B0: " + "Failed to delete mgid with remote port guid MGID : " + "0x%016" PRIx64 " : 0x%016" PRIx64 ", Got : %s/%s\n", + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.prefix), + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.interface_id), + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) ); + goto Exit; + } + } + else + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Could not check proxy join since could not found remote port, different from local port\n" ); + } + + /* prepare init for next check */ + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + + /**************************************************************************/ + if (p_osmt->opt.mmode > 2) + { + /* Check invalid Join with max mlid which is more than the + Mellanox switches support 0xC000+0x1000 = 0xd000 */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Checking Creation of Maximum avaliable Groups (MulticastFDBCap)...\n" + ); + tmp_mlid = cl_ntoh16(max_mlid) - cnt; + + while (tmp_mlid > 0 && !ReachedMlidLimit) { + uint16_t cur_mlid = 0; + + /* Request Set */ + ib_member_set_join_state(&mc_req_rec, IB_MC_REC_STATE_FULL_MEMBER); + /* Good Flow - mgid is 0 while giving all required fields for + join : P_Key, Q_Key, SL, FlowLabel, Tclass */ + + mc_req_rec.rate = + IB_LINK_WIDTH_ACTIVE_1X | + IB_PATH_SELECTOR_GREATER_THAN << 6; + mc_req_rec.mlid = max_mlid; + memset(&mc_req_rec.mgid, 0, sizeof(ib_gid_t)); + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_QKEY | + IB_MCR_COMPMASK_PKEY | + IB_MCR_COMPMASK_SL | + IB_MCR_COMPMASK_FLOW | + IB_MCR_COMPMASK_JOIN_STATE | + IB_MCR_COMPMASK_TCLASS | /* all above are required */ + IB_MCR_COMPMASK_MLID; + status = osmt_send_mcast_request( p_osmt, 1, + &mc_req_rec, + comp_mask, + &res_sa_mad ); + + p_mc_res = ib_sa_mad_get_payload_ptr(&res_sa_mad); + if (status != IB_SUCCESS) + { + + if (cur_mlid > cl_ntoh16(max_mlid)) + { + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow : ERR 2E1 " + "Successful Join with greater mlid than switches support (MulticastFDBCap) 0x%04X\n", + cur_mlid ); + status = IB_ERROR; + osm_dump_mc_record( &p_osmt->log, p_mc_res, OSM_LOG_VERBOSE ); + goto Exit; + } else if (( res_sa_mad.status & IB_SMP_STATUS_MASK ) == IB_SA_MAD_STATUS_NO_RESOURCES) { + /* You can quitly exit the loop since no available mlid in SA DB + i.e. reached the maximum valiad avalable mlid */ + ReachedMlidLimit = TRUE; + } + } + else + { + cur_mlid = cl_ntoh16(p_mc_res->mlid); + /* Save the mlid created in test_created_mlids map */ + p_recvd_rec = (ib_member_rec_t*)ib_sa_mad_get_payload_ptr( &res_sa_mad ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow : " + "Created MGID:0x%016" PRIx64 " : " + "0x%016" PRIx64 " MLID:0x%04X\n", + cl_ntoh64( p_recvd_rec->mgid.unicast.prefix ), + cl_ntoh64( p_recvd_rec->mgid.unicast.interface_id ), + cl_ntoh16( p_recvd_rec->mlid )); + cl_map_insert(&test_created_mlids, + cl_ntoh16(p_recvd_rec->mlid), p_recvd_rec ); + } + tmp_mlid--; + } + } + + /* Prepare the mc_req_rec for the rest of the flow */ + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + + /**************************************************************************/ + /* o15.0.1.16: */ + /* - Try GetTable with PortGUID wildcarded and get back some groups. */ + + status = osmt_query_mcast( p_osmt); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02B1: " + "Failed to query multicast groups: %s\n", + ib_get_err_str(status) ); + goto Exit; + } + + cnt = cl_qmap_count(&p_osmt->exp_subn.mgrp_mlid_tbl); + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow (Before Deletion of all MCG): " + "Number of MC Records found in SA DB is %d\n", cnt ); + + /* Delete all MCG that are not of IPoIB */ + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow : " + "Cleanup all MCG that are not IPoIB...\n" ); + + p_mgrp_mlid_tbl = &p_osmt->exp_subn.mgrp_mlid_tbl; + p_mgrp = (osmtest_mgrp_t*)cl_qmap_head( p_mgrp_mlid_tbl ); + /* scan all available multicast groups in the DB and fill in the table */ + while( p_mgrp != (osmtest_mgrp_t*)cl_qmap_end( p_mgrp_mlid_tbl ) ) + { + /* Only if different from IPoIB Mgid try to delete */ + if (!IS_IPOIB_MGID(&p_mgrp->mcmember_rec.mgid)) + { + osmt_init_mc_query_rec(p_osmt, &mc_req_rec); + mc_req_rec.mgid = p_mgrp->mcmember_rec.mgid; + + /* o15-0.1.4 - need to specify the oppsite state for a valid delete */ + if (!memcmp(&special_mgid, &p_mgrp->mcmember_rec.mgid, sizeof(special_mgid))) + { + mc_req_rec.scope_state = 0x2F; + } + else + { + mc_req_rec.scope_state = 0x21; + } + comp_mask = + IB_MCR_COMPMASK_MGID | + IB_MCR_COMPMASK_PORT_GID | + IB_MCR_COMPMASK_JOIN_STATE; + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_run_mcast_flow : " + "Sending request to delete MGID : 0x%016" PRIx64 + " : 0x%016" PRIx64 ", scope_state : 0x%02X\n", + cl_ntoh64(mc_req_rec.mgid.unicast.prefix), + cl_ntoh64(mc_req_rec.mgid.unicast.interface_id), + mc_req_rec.scope_state ); + status = osmt_send_mcast_request( p_osmt, 0, /* delete flag */ + &mc_req_rec, + comp_mask, + &res_sa_mad ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02FF: " + "Failed to delete MGID : 0x%016" PRIx64 " : 0x%016" PRIx64 + " ,\n\t\t it is not our MCG, Status : %s/%s\n", + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.prefix), + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.interface_id), + ib_get_err_str( status ), + ib_get_mad_status_str( (ib_mad_t*)(&res_sa_mad) ) ); + fail_to_delete_mcg++; + } + } + else + { + end_ipoib_cnt++; + } + p_mgrp = (osmtest_mgrp_t*)cl_qmap_next( &p_mgrp->map_item ); + } + + status = osmt_query_mcast( p_osmt); + + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02B2 " + "GetTable of all records has failed - got %s\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* If we are in single mode check flow - need to make sure all the multicast groups + that are left are not ones created during the flow. + */ + if (p_osmt->opt.mmode == 1 || p_osmt->opt.mmode == 3) + { + end_cnt = cl_qmap_count(&p_osmt->exp_subn.mgrp_mlid_tbl); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Status of MC Records in SA DB during the test flow:\n" + " Beginning of test\n" + " Unrelated to the test: %d\n" + " IPoIB MC Records : %d\n" + " Total : %d\n" + " End of test\n" + " Failed to delete : %d\n" + " IPoIB MC Records : %d\n" + " Total : %d\n", + mcg_outside_test_cnt, /* Non-IPoIB that existed at the beginning */ + start_ipoib_cnt, /* IPoIB records */ + start_cnt, /* Total: IPoIB and MC Records unrelated to the test */ + fail_to_delete_mcg, /* Failed to delete at the end */ + end_ipoib_cnt, /* IPoIB records */ + end_cnt); /* Total MC Records at the end */ + + /* when we compare num of MCG we should consider an outside source which create other MCGs */ + if ((end_cnt - fail_to_delete_mcg-end_ipoib_cnt) != (start_cnt - mcg_outside_test_cnt - start_ipoib_cnt)) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Got different number of non-IPoIB records stored in SA DB\n\t\t" + "at Start got %d, at End got %d (IPoIB groups only)\n", + (start_cnt - mcg_outside_test_cnt - start_ipoib_cnt), + (end_cnt - fail_to_delete_mcg-end_ipoib_cnt) ); + } + + p_mgrp_mlid_tbl = &p_osmt->exp_subn.mgrp_mlid_tbl; + p_mgrp = (osmtest_mgrp_t*)cl_qmap_head( p_mgrp_mlid_tbl ); + while( p_mgrp != (osmtest_mgrp_t*)cl_qmap_end( p_mgrp_mlid_tbl ) ) + { + uint16_t mlid = (uint16_t)cl_qmap_key((cl_map_item_t*)p_mgrp); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Found MLID:0x%04X\n", + mlid ); + /* Check if the mlid is in the test_created_mlids. If TRUE, then we + didn't delete a MCgroup that was created in this flow. */ + if ( cl_map_get (&test_created_mlids, mlid) != NULL ) + { + /* This means that we still have an mgrp that we created!! */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_mcast_flow: ERR 02FE: " + "Wasn't able to erase mgrp with MGID:0x%016" PRIx64 " : 0x%016" + PRIx64 " MLID:0x%04X\n", + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.prefix), + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.interface_id), + mlid ); + got_error = TRUE; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_run_mcast_flow: " + "Still exists %s MGID:0x%016" PRIx64 " : 0x%016" PRIx64 "\n", + (IS_IPOIB_MGID(&p_mgrp->mcmember_rec.mgid)) ? "IPoIB" : "non-IPoIB", + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.prefix), + cl_ntoh64(p_mgrp->mcmember_rec.mgid.unicast.interface_id) ); + } + p_mgrp = (osmtest_mgrp_t*)cl_qmap_next( &p_mgrp->map_item ); + } + + if (got_error) + { + __osmt_print_all_multicast_records(p_osmt); + status = IB_ERROR; + } + } + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/osmt_service.c b/branches/Ndi/ulp/opensm/user/osmtest/osmt_service.c new file mode 100644 index 00000000..4325ed5d --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/osmt_service.c @@ -0,0 +1,1833 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of service records testing flow.. + * Top level is osmt_run_service_records_flow: + * osmt_register_service + * osmt_get_service_by_name + * osmt_get_all_services + * osmt_delete_service_by_name + * + * Environment: + * Linux User Mode + * + * $Revision: 1.1 $ + */ + +#ifndef __WIN__ +#include +#else +#include +#endif +#include +#include +#include +#include +#include "osmtest.h" + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +osmt_register_service( IN osmtest_t * const p_osmt, + IN ib_net64_t service_id, + IN ib_net16_t service_pkey, + IN ib_net32_t service_lease, + IN uint8_t service_key_lsb, + IN char *service_name) { + osmv_query_req_t req; + osmv_user_query_t user; + osmtest_req_context_t context; + ib_service_record_t svc_rec; + osm_log_t *p_log = &p_osmt->log; + ib_api_status_t status; + + OSM_LOG_ENTER( p_log, osmt_register_service ); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_register_service: " + "Registering service: name: %s id: 0x%" PRIx64 "\n", + service_name, cl_ntoh64(service_id)); + + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + memset( &user, 0, sizeof( user ) ); + memset( &svc_rec, 0, sizeof( svc_rec ) ); + + /* set the new service record fields */ + svc_rec.service_id = service_id; + svc_rec.service_pkey = service_pkey; + svc_rec.service_gid.unicast.prefix = 0; + svc_rec.service_gid.unicast.interface_id = p_osmt->local_port.port_guid; + svc_rec.service_lease = service_lease; + memset(&svc_rec.service_key, 0, 16*sizeof(uint8_t)); + svc_rec.service_key[0] = service_key_lsb; + memset(svc_rec.service_name, 0, sizeof(svc_rec.service_name)); + memcpy(svc_rec.service_name, service_name, + (strlen(service_name)+1)*sizeof(char)); + + /* prepare the data used for this query */ + /* sa_mad_data.method = IB_MAD_METHOD_SET; */ + /* sa_mad_data.sm_key = 0; */ + + context.p_osmt = p_osmt; + req.query_context = &context; + req.query_type = OSMV_QUERY_USER_DEFINED; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.flags = OSM_SA_FLAGS_SYNC; + req.sm_key = 0; + req.timeout_ms = p_osmt->opt.transaction_timeout; + + user.method = IB_MAD_METHOD_SET; + user.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + if (ib_pkey_is_invalid(service_pkey)) + { + /* if given an invalid service_pkey - don't turn the PKEY compmask on */ + user.comp_mask = IB_SR_COMPMASK_SID | + IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SKEY | + IB_SR_COMPMASK_SNAME; + } + else + { + user.comp_mask = IB_SR_COMPMASK_SID | + IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SKEY | + IB_SR_COMPMASK_SNAME; + } + user.attr_offset = ib_get_attr_offset( sizeof( ib_service_record_t ) ); + user.p_attr = &svc_rec; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service: ERR 4A01: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service: ERR 4A02: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result. + p_result_madw ) ) ); + } + goto Exit; + } + + Exit: + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +osmt_register_service_with_full_key ( IN osmtest_t * const p_osmt, + IN ib_net64_t service_id, + IN ib_net16_t service_pkey, + IN ib_net32_t service_lease, + IN uint8_t *service_key, + IN char *service_name) { + osmv_query_req_t req; + osmv_user_query_t user; + osmtest_req_context_t context; + ib_service_record_t svc_rec,*p_rec; + osm_log_t *p_log = &p_osmt->log; + ib_api_status_t status; + uint8_t i,skey[16]; + + OSM_LOG_ENTER( p_log, osmt_register_service_with_full_key ); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_register_service_with_full_key: " + "Registering service: name: %s id: 0x%" PRIx64 "\n", + service_name, cl_ntoh64(service_id)); + + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + memset( &user, 0, sizeof( user ) ); + memset( &svc_rec, 0, sizeof( svc_rec ) ); + + /* set the new service record fields */ + svc_rec.service_id = service_id; + svc_rec.service_pkey = service_pkey; + svc_rec.service_gid.unicast.prefix = 0; + svc_rec.service_gid.unicast.interface_id = p_osmt->local_port.port_guid; + svc_rec.service_lease = service_lease; + memset(&svc_rec.service_key, 0, 16*sizeof(uint8_t)); + memcpy(svc_rec.service_key,service_key, 16*sizeof(uint8_t)); + memset(svc_rec.service_name, 0, sizeof(svc_rec.service_name)); + memset(skey, 0, 16*sizeof(uint8_t)); + memcpy(svc_rec.service_name, service_name, + (strlen(service_name)+1)*sizeof(char)); + + /* prepare the data used for this query */ + /* sa_mad_data.method = IB_MAD_METHOD_SET; */ + /* sa_mad_data.sm_key = 0; */ + + context.p_osmt = p_osmt; + req.query_context = &context; + req.query_type = OSMV_QUERY_USER_DEFINED; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.flags = OSM_SA_FLAGS_SYNC; + req.sm_key = 0; + req.timeout_ms = p_osmt->opt.transaction_timeout; + + user.method = IB_MAD_METHOD_SET; + user.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + if (ib_pkey_is_invalid(service_pkey)) + { + /* if given an invalid service_pkey - don't turn the PKEY compmask on */ + user.comp_mask = IB_SR_COMPMASK_SID | + IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SKEY | + IB_SR_COMPMASK_SNAME; + } + else + { + user.comp_mask = IB_SR_COMPMASK_SID | + IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SKEY | + IB_SR_COMPMASK_SNAME; + } + user.attr_offset = ib_get_attr_offset( sizeof( ib_service_record_t ) ); + user.p_attr = &svc_rec; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service_with_full_key: ERR 4A03: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service_with_full_key: ERR 4A04: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service_with_full_key: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result. + p_result_madw ) ) ); + } + goto Exit; + } + + /* Check service key on context to see if match */ + p_rec = osmv_get_query_svc_rec( context.result.p_result_madw, 0 ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "Comparing service key...\n" + "return key is:\n"); + for (i = 0; i <= 15; i++) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "service_key sent[%u] = %u, service_key returned[%u] = %u\n", + i,service_key[i],i,p_rec->service_key[i]); + } + /* since c15-0.1.14 not supported all key association queries should bring in return zero in service key */ + if (memcmp(skey,p_rec->service_key, 16*sizeof(uint8_t)) != 0) + { + status = IB_REMOTE_ERROR; + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service_with_full_key: ERR 4A33: " + "Data mismatch in service_key\n" + ); + goto Exit; + } + + Exit: + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +osmt_register_service_with_data( IN osmtest_t * const p_osmt, + IN ib_net64_t service_id, + IN ib_net16_t service_pkey, + IN ib_net32_t service_lease, + IN uint8_t service_key_lsb, + IN uint8_t *service_data8, + IN ib_net16_t *service_data16, + IN ib_net32_t *service_data32, + IN ib_net64_t *service_data64, + IN char *service_name) { + osmv_query_req_t req; + osmv_user_query_t user; + osmtest_req_context_t context; + ib_service_record_t svc_rec,*p_rec; + osm_log_t *p_log = &p_osmt->log; + ib_api_status_t status; + /* ib_service_record_t* p_rec; */ + + OSM_LOG_ENTER( p_log, osmt_register_service_with_data ); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_register_service_with_data: " + "Registering service: name: %s id: 0x%" PRIx64 "\n", + service_name, cl_ntoh64(service_id)); + + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + memset( &user, 0, sizeof( user ) ); + memset( &svc_rec, 0, sizeof( svc_rec ) ); + + /* set the new service record fields */ + svc_rec.service_id = service_id; + svc_rec.service_pkey = service_pkey; + svc_rec.service_gid.unicast.prefix = 0; + svc_rec.service_gid.unicast.interface_id = p_osmt->local_port.port_guid; + svc_rec.service_lease = service_lease; + memset(&svc_rec.service_key, 0, 16*sizeof(uint8_t)); + svc_rec.service_key[0] = service_key_lsb; + + /* Copy data to service_data arrays */ + memcpy(svc_rec.service_data8, service_data8, 16*sizeof(uint8_t)); + memcpy(svc_rec.service_data16, service_data16, 8*sizeof(ib_net16_t)); + memcpy(svc_rec.service_data32, service_data32, 4*sizeof(ib_net32_t)); + memcpy(svc_rec.service_data64, service_data64, 2*sizeof(ib_net64_t)); + + memset(svc_rec.service_name, 0, sizeof(svc_rec.service_name)); + memcpy(svc_rec.service_name, service_name, + (strlen(service_name)+1)*sizeof(char)); + + /* prepare the data used for this query */ + /* sa_mad_data.method = IB_MAD_METHOD_SET; */ + /* sa_mad_data.sm_key = 0; */ + + context.p_osmt = p_osmt; + req.query_context = &context; + req.query_type = OSMV_QUERY_USER_DEFINED; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.flags = OSM_SA_FLAGS_SYNC; + req.sm_key = 0; + req.timeout_ms = p_osmt->opt.transaction_timeout; + + user.method = IB_MAD_METHOD_SET; + user.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + if (ib_pkey_is_invalid(service_pkey)) + { + /* if given an invalid service_pkey - don't turn the PKEY compmask on */ + user.comp_mask = IB_SR_COMPMASK_SID | + IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SKEY | + IB_SR_COMPMASK_SNAME | + IB_SR_COMPMASK_SDATA8_0 | + IB_SR_COMPMASK_SDATA8_1 | + IB_SR_COMPMASK_SDATA16_0 | + IB_SR_COMPMASK_SDATA16_1 | + IB_SR_COMPMASK_SDATA32_0 | + IB_SR_COMPMASK_SDATA32_1 | + IB_SR_COMPMASK_SDATA64_0 | + IB_SR_COMPMASK_SDATA64_1; + } + else + { + user.comp_mask = IB_SR_COMPMASK_SID | + IB_SR_COMPMASK_SGID | + IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SKEY | + IB_SR_COMPMASK_SNAME| + IB_SR_COMPMASK_SDATA8_0 | + IB_SR_COMPMASK_SDATA8_1 | + IB_SR_COMPMASK_SDATA16_0 | + IB_SR_COMPMASK_SDATA16_1 | + IB_SR_COMPMASK_SDATA32_0 | + IB_SR_COMPMASK_SDATA32_1 | + IB_SR_COMPMASK_SDATA64_0 | + IB_SR_COMPMASK_SDATA64_1; + } + user.attr_offset = ib_get_attr_offset( sizeof( ib_service_record_t ) ); + user.p_attr = &svc_rec; + + /* Dump to Service Data b4 send */ + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_register_service_with_data:" + "Dumping service data b4 send\n"); + osm_dump_service_record(&p_osmt->log,&svc_rec,OSM_LOG_VERBOSE); + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service_with_data: ERR 4A05: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service_with_data: ERR 4A06: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service_with_data: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result. + p_result_madw ) ) ); + } + goto Exit; + } + + /* Check data on context to see if match */ + p_rec = osmv_get_query_svc_rec( context.result.p_result_madw, 0 ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "Comparing service data...\n"); + if (memcmp(service_data8, p_rec->service_data8,16*sizeof(uint8_t)) != 0 || + memcmp(service_data16, p_rec->service_data16,8*sizeof(uint16_t)) != 0 || + memcmp(service_data32, p_rec->service_data32,4*sizeof(uint32_t)) != 0 || + memcmp(service_data64, p_rec->service_data64,2*sizeof(uint64_t)) != 0 + ) + { + status = IB_REMOTE_ERROR; + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_register_service_with_data: " + "Data mismatch in service_data8\n" + ); + goto Exit; + } + + Exit: + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +osmt_get_service_by_id_and_name ( IN osmtest_t * const p_osmt, + IN uint32_t rec_num, + IN ib_net64_t sid, + IN char *sr_name, + OUT ib_service_record_t *p_out_rec) { + + ib_api_status_t status = IB_SUCCESS; + osmtest_req_context_t context; + osmv_query_req_t req; + ib_service_record_t svc_rec,*p_rec; + uint32_t num_recs = 0; + osmv_user_query_t user; + + OSM_LOG_ENTER( &p_osmt->log, osmt_get_service_by_id ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_id_and_name: " + "Getting service record: id: 0x%016" PRIx64 " and name: %s\n", + cl_ntoh64(sid),sr_name); + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + + context.p_osmt = p_osmt; + + /* prepare the data used for this query */ + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.sm_key = 0; + + memset( &svc_rec, 0, sizeof( svc_rec ) ); + memset( &user, 0, sizeof( user ) ); + /* set the new service record fields */ + memset(svc_rec.service_name, 0, sizeof(svc_rec.service_name)); + memcpy(svc_rec.service_name, sr_name, + (strlen(sr_name)+1)*sizeof(char)); + svc_rec.service_id = sid; + req.p_query_input = &user; + + user.method = IB_MAD_METHOD_GET; + user.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + user.comp_mask = IB_SR_COMPMASK_SID | IB_SR_COMPMASK_SNAME; + user.attr_offset = ib_get_attr_offset( sizeof( ib_service_record_t ) ); + user.p_attr = &svc_rec; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_id_and_name: ERR 4A07: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + num_recs = context.result.result_cnt; + + if( status != IB_SUCCESS ) + { + char mad_stat_err[256]; + + /* If the failure is due to IB_SA_MAD_STATUS_NO_RECORDS and rec_num is 0, + then this is fine */ + if( status == IB_REMOTE_ERROR ) + strcpy(mad_stat_err, ib_get_mad_status_str( + osm_madw_get_mad_ptr(context.result.p_result_madw) ) ); + else + strcpy(mad_stat_err, ib_get_err_str(status) ); + if( status == IB_REMOTE_ERROR && + !strcmp(mad_stat_err, "IB_SA_MAD_STATUS_NO_RECORDS") && + rec_num == 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_id_and_name: " + "IS EXPECTED ERROR ^^^^\n"); + status = IB_SUCCESS; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_id_and_name: ERR 4A08: " + "Query failed: %s (%s)\n", + ib_get_err_str(status), + mad_stat_err ); + goto Exit; + } + } + + if ( rec_num && num_recs != rec_num ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_id_and_name: " + "Unmatched number of records: expected: %d, received: %d\n", + rec_num, num_recs); + status = IB_REMOTE_ERROR; + goto Exit; + } + + p_rec = osmv_get_query_svc_rec( context.result.p_result_madw, 0 ); + *p_out_rec = *p_rec; + + if (num_recs) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_id_and_name: " + "Found service record: name: %s id: 0x%016" PRIx64 "\n", + p_rec->service_name, cl_ntoh64(p_rec->service_id)); + + osm_dump_service_record(&p_osmt->log, p_rec, OSM_LOG_DEBUG); + } + + Exit: + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_id_and_name: " + "Expected and found %d records\n", + rec_num ); + + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +osmt_get_service_by_id ( IN osmtest_t * const p_osmt, + IN uint32_t rec_num, + IN ib_net64_t sid, + OUT ib_service_record_t *p_out_rec) { + + ib_api_status_t status = IB_SUCCESS; + osmtest_req_context_t context; + osmv_query_req_t req; + ib_service_record_t svc_rec,*p_rec; + uint32_t num_recs = 0; + osmv_user_query_t user; + + OSM_LOG_ENTER( &p_osmt->log, osmt_get_service_by_id ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_id: " + "Getting service record: id: 0x%016" PRIx64 "\n", + cl_ntoh64(sid)); + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + + context.p_osmt = p_osmt; + + /* prepare the data used for this query */ + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.sm_key = 0; + + memset( &svc_rec, 0, sizeof( svc_rec ) ); + memset( &user, 0, sizeof( user ) ); + /* set the new service record fields */ + svc_rec.service_id = sid; + req.p_query_input = &user; + + user.method = IB_MAD_METHOD_GET; + user.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + user.comp_mask = IB_SR_COMPMASK_SID; + user.attr_offset = ib_get_attr_offset( sizeof( ib_service_record_t ) ); + user.p_attr = &svc_rec; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_id: ERR 4A09: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + num_recs = context.result.result_cnt; + + if( status != IB_SUCCESS ) + { + char mad_stat_err[256]; + + /* If the failure is due to IB_SA_MAD_STATUS_NO_RECORDS and rec_num is 0, + then this is fine */ + if( status == IB_REMOTE_ERROR ) + strcpy(mad_stat_err, ib_get_mad_status_str( + osm_madw_get_mad_ptr(context.result.p_result_madw) ) ); + else + strcpy(mad_stat_err, ib_get_err_str(status) ); + + if( status == IB_REMOTE_ERROR && + !strcmp(mad_stat_err, "IB_SA_MAD_STATUS_NO_RECORDS") && + rec_num == 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_id: " + "IS EXPECTED ERROR ^^^^\n"); + status = IB_SUCCESS; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_id: ERR 4A0A: " + "Query failed: %s (%s)\n", + ib_get_err_str(status), + mad_stat_err ); + goto Exit; + } + } + + if ( rec_num && num_recs != rec_num ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_id: ERR 4A0B: " + "Unmatched number of records: expected: %d received: %d\n", + rec_num, num_recs); + status = IB_REMOTE_ERROR; + goto Exit; + } + + p_rec = osmv_get_query_svc_rec( context.result.p_result_madw, 0 ); + *p_out_rec = *p_rec; + + if (num_recs) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_id: " + "Found service record: name: %s id: 0x%016" PRIx64 "\n", + p_rec->service_name, cl_ntoh64(p_rec->service_id)); + + osm_dump_service_record(&p_osmt->log, p_rec, OSM_LOG_DEBUG); + } + + Exit: + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_id: " + "Expected and found %d records\n", + rec_num ); + + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +osmt_get_service_by_name_and_key ( IN osmtest_t * const p_osmt, + IN char * sr_name, + IN uint32_t rec_num, + IN uint8_t *skey, + OUT ib_service_record_t *p_out_rec) { + + ib_api_status_t status = IB_SUCCESS; + osmtest_req_context_t context; + osmv_query_req_t req; + ib_service_record_t svc_rec,*p_rec; + uint32_t num_recs = 0, i; + osmv_user_query_t user; + + OSM_LOG_ENTER( &p_osmt->log, osmt_get_service_by_name_and_key ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + char buf_service_key[33]; + + sprintf(buf_service_key, + "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", + skey[0], skey[1], skey[2], skey[3], skey[4], skey[5], skey[6], skey[7], + skey[8], skey[9], skey[10], skey[11], skey[12], skey[13], skey[14], + skey[15]); + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_name_and_key: " + "Getting service record: name: %s and key: %s\n", + sr_name, buf_service_key ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + + context.p_osmt = p_osmt; + + /* prepare the data used for this query */ + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.sm_key = 0; + + memset( &svc_rec, 0, sizeof( svc_rec ) ); + memset( &user, 0, sizeof( user ) ); + /* set the new service record fields */ + memset(svc_rec.service_name, 0, sizeof(svc_rec.service_name)); + memcpy(svc_rec.service_name, sr_name, + (strlen(sr_name)+1)*sizeof(char)); + for (i = 0; i <= 15; i++) + svc_rec.service_key[i] = skey[i]; + + req.p_query_input = &user; + + user.method = IB_MAD_METHOD_GET; + user.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + user.comp_mask = IB_SR_COMPMASK_SNAME | IB_SR_COMPMASK_SKEY; + user.attr_offset = ib_get_attr_offset( sizeof( ib_service_record_t ) ); + user.p_attr = &svc_rec; + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_name_and_key: ERR 4A0C: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + num_recs = context.result.result_cnt; + + if( status != IB_SUCCESS ) + { + char mad_stat_err[256]; + + /* If the failure is due to IB_SA_MAD_STATUS_NO_RECORDS and rec_num is 0, + then this is fine */ + if( status == IB_REMOTE_ERROR ) + strcpy(mad_stat_err, ib_get_mad_status_str( + osm_madw_get_mad_ptr(context.result.p_result_madw) ) ); + else + strcpy(mad_stat_err, ib_get_err_str(status) ); + + if( status == IB_REMOTE_ERROR && + !strcmp(mad_stat_err, "IB_SA_MAD_STATUS_NO_RECORDS") && + rec_num == 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_name_and_key: " + "IS EXPECTED ERROR ^^^^\n"); + status = IB_SUCCESS; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_name_and_key: ERR 4A0D: " + "Query failed:%s (%s)\n", + ib_get_err_str(status), + mad_stat_err ); + goto Exit; + } + } + + if ( rec_num && num_recs != rec_num ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_name_and_key: " + "Unmatched number of records: expected: %d, received: %d\n", + rec_num, num_recs); + status = IB_REMOTE_ERROR; + goto Exit; + } + + p_rec = osmv_get_query_svc_rec( context.result.p_result_madw, 0 ); + *p_out_rec = *p_rec; + + if ( num_recs ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_name_and_key: " + "Found service record: name: %s id: 0x%016" PRIx64 "\n", + sr_name, cl_ntoh64(p_rec->service_id)); + + osm_dump_service_record(&p_osmt->log, p_rec, OSM_LOG_DEBUG); + } + + Exit: + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_name_and_key: " + "Expected and found %d records\n", + rec_num ); + + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +osmt_get_service_by_name( IN osmtest_t * const p_osmt, + IN char * sr_name, + IN uint32_t rec_num, + OUT ib_service_record_t *p_out_rec) { + + ib_api_status_t status = IB_SUCCESS; + osmtest_req_context_t context; + osmv_query_req_t req; + ib_service_record_t* p_rec; + ib_svc_name_t service_name; + uint32_t num_recs = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmt_get_service_by_name ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_name: " + "Getting service record: name: %s\n", + sr_name); + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + + context.p_osmt = p_osmt; + + /* prepare the data used for this query */ + req.query_type = OSMV_QUERY_SVC_REC_BY_NAME; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.sm_key = 0; + + memset(service_name, 0, sizeof(service_name)); + memcpy(service_name, sr_name, (strlen(sr_name)+1)*sizeof(char)); + req.p_query_input = service_name; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_name: ERR 4A0E: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + num_recs = context.result.result_cnt; + + if( status != IB_SUCCESS ) + { + char mad_stat_err[256]; + + /* If the failure is due to IB_SA_MAD_STATUS_NO_RECORDS and rec_num is 0, + then this is fine */ + if( status == IB_REMOTE_ERROR ) + strcpy(mad_stat_err, ib_get_mad_status_str( + osm_madw_get_mad_ptr(context.result.p_result_madw) ) ); + else + strcpy(mad_stat_err, ib_get_err_str(status) ); + + if( status == IB_REMOTE_ERROR && + !strcmp(mad_stat_err, "IB_SA_MAD_STATUS_NO_RECORDS") && + rec_num == 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_name: " + "IS EXPECTED ERROR ^^^^\n"); + status = IB_SUCCESS; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_name: ERR 4A0F: " + "Query failed: %s (%s)\n", + ib_get_err_str(status), + mad_stat_err ); + goto Exit; + } + } + + if ( rec_num && num_recs != rec_num ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_service_by_name: ERR 4A10: " + "Unmatched number of records: expected: %d, received: %d\n", + rec_num, num_recs); + status = IB_REMOTE_ERROR; + goto Exit; + } + + p_rec = osmv_get_query_svc_rec( context.result.p_result_madw, 0 ); + *p_out_rec = *p_rec; + + if (num_recs) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_name: " + "Found service record: name: %s id: 0x%016" PRIx64 "\n", + sr_name, cl_ntoh64(p_rec->service_id)); + + osm_dump_service_record(&p_osmt->log, p_rec, OSM_LOG_DEBUG); + } + + Exit: + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_service_by_name: " + "Expected and found %d records\n", + rec_num ); + + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/********************************************************************** + **********************************************************************/ + +#ifdef VENDOR_RMPP_SUPPORT +ib_api_status_t +osmt_get_all_services_and_check_names( IN osmtest_t * const p_osmt, + IN ib_svc_name_t * const p_valid_service_names_arr, + IN uint8_t num_of_valid_names, + OUT uint32_t *num_services) { + ib_api_status_t status = IB_SUCCESS; + osmtest_req_context_t context; + osmv_query_req_t req; + ib_service_record_t* p_rec; + uint32_t num_recs = 0,i,j; + uint8_t *p_checked_names; + + OSM_LOG_ENTER(&p_osmt->log, osmt_get_all_services_and_check_names ); + + /* Prepare tracker for the checked names */ + p_checked_names = (uint8_t*)malloc(sizeof(uint8_t)*num_of_valid_names); + for (j = 0 ; j < num_of_valid_names ; j++) + { + p_checked_names[j] = 0; + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_all_services_and_check_names: " + "Getting all service records\n"); + } + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + + context.p_osmt = p_osmt; + + req.query_type = OSMV_QUERY_ALL_SVC_RECS; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_all_services_and_check_names: ERR 4A12: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + + if( status != IB_SUCCESS ) + { + if (status != IB_INVALID_PARAMETER) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_all_services_and_check_names: ERR 4A13: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + } + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_all_services_and_check_names: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result. + p_result_madw ) ) ); + } + goto Exit; + } + + num_recs = context.result.result_cnt; + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_all_services_and_check_names: " + "Received %u records\n", num_recs ); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_svc_rec( context.result.p_result_madw, i ); + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_all_services_and_check_names: " + "Found service record: name: %s id: 0x%016" PRIx64 "\n", + p_rec->service_name, cl_ntoh64(p_rec->service_id)); + osm_dump_service_record(&p_osmt->log, p_rec, OSM_LOG_VERBOSE); + for ( j = 0; j < num_of_valid_names; j++) + { + /* If the service names exist in the record, mark it as checked (1) */ + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_all_services_and_check_names: " + "-I- Comparing source name : >%s<, with record name : >%s<, idx : %d\n", + p_valid_service_names_arr[j], p_rec->service_name, p_checked_names[j]); + if ( strcmp((char *)p_valid_service_names_arr[j], + (char *)p_rec->service_name) == 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_get_all_services_and_check_names: " + "-I- The service %s is valid\n", p_valid_service_names_arr[j]); + p_checked_names[j] = 1; + break; + } + } + } + /* Check that all service names have been identified */ + for ( j = 0; j < num_of_valid_names; j++) + if (p_checked_names[j] == 0) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_get_all_services_and_check_names: ERR 4A14: " + "Missing valid service: name: %s\n", p_valid_service_names_arr[j]); + status = IB_ERROR; + goto Exit; + } + *num_services = num_recs; + + Exit: + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} +#endif + +/********************************************************************** + **********************************************************************/ + +ib_api_status_t +osmt_delete_service_by_name(IN osmtest_t * const p_osmt, + IN uint8_t IsServiceExist, + IN char * sr_name, IN uint32_t rec_num) { + osmv_query_req_t req; + osmv_user_query_t user; + osmtest_req_context_t context; + ib_service_record_t svc_rec; + ib_api_status_t status; + + OSM_LOG_ENTER( &p_osmt->log, osmt_delete_service_by_name); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_delete_service_by_name: " + "Trying to Delete service name: %s\n", + sr_name); + + memset( &svc_rec, 0, sizeof( svc_rec ) ); + + status = osmt_get_service_by_name(p_osmt, sr_name,rec_num, &svc_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_delete_service_by_name: ERR 4A15: " + "Failed to get service: name: %s\n", + sr_name ); + goto ExitNoDel; + } + + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + memset( &user, 0, sizeof( user ) ); + + /* set the new service record fields */ + memset(svc_rec.service_name, 0, sizeof(svc_rec.service_name)); + memcpy(svc_rec.service_name, sr_name, + (strlen(sr_name)+1)*sizeof(char)); + + /* prepare the data used for this query */ + context.p_osmt = p_osmt; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.query_context = &context; + req.query_type = OSMV_QUERY_USER_DEFINED; /* basically a don't care here */ + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.flags = OSM_SA_FLAGS_SYNC; + req.sm_key = 0; + + user.method = IB_MAD_METHOD_DELETE; + user.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + user.comp_mask = IB_SR_COMPMASK_SNAME; + user.attr_offset = ib_get_attr_offset( sizeof( ib_service_record_t ) ); + user.p_attr = &svc_rec; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_delete_service_by_name: ERR 4A16: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + if ( IsServiceExist ) + { + /* If IsServiceExist = 1 then we should succeed here */ + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_delete_service_by_name: ERR 4A17: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_delete_service_by_name: ERR 4A18: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result. + p_result_madw ) ) ); + } + } + } + else + { + /* If IsServiceExist = 0 then we should fail here */ + if ( status == IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_delete_service_by_name: ERR 4A19: " + "Succeeded to delete service: %s which " + "shouldn't exist", + sr_name ); + status = IB_ERROR; + } + else + { + /* The deletion should have failed, since the service_name + shouldn't exist. */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: " + "IS EXPECTED ERROR ^^^^\n"); + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmt_delete_service_by_name: " + "Failed to delete service_name: %s\n", + sr_name ); + status = IB_SUCCESS; + } + } + + Exit: + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + ExitNoDel: + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + +/********************************************************************** + **********************************************************************/ + +/* + * Run a complete service records flow: + * - register a service + * - register a service (with a lease period) + * - get a service by name + * - get all services / must be 2 + * - delete a service + * - get all services / must be 1 + * - wait for the lease to expire + * - get all services / must be 0 + * - get / set service by data + */ +ib_api_status_t +osmt_run_service_records_flow( IN osmtest_t * const p_osmt ) { + ib_service_record_t srv_rec; + ib_api_status_t status; + uint8_t instance,i; + uint8_t service_data8[16],service_key[16]; + ib_net16_t service_data16[8]; + ib_net32_t service_data32[4]; + ib_net64_t service_data64[2]; + uint64_t pid = getpid(); + uint64_t id[7]; + /* We use up to seven service names - we use the extra for bad flow*/ + ib_svc_name_t service_name[7]; +#ifdef VENDOR_RMPP_SUPPORT + /* This array contain only the valid names after registering vs SM */ + ib_svc_name_t service_valid_names[3]; + uint32_t num_recs = 0; +#endif + + OSM_LOG_ENTER( &p_osmt->log, osmt_run_service_records_flow); + + /* Init Service names */ + for (i = 0; i <= 6; i++) { +#ifdef __WIN__ + uint64_t rand_val = rand()-(uint64_t)i; +#else + uint64_t rand_val = random()-(uint64_t)i; +#endif + id[i] = abs((int)(pid - rand_val)); + /* Just to be unique any place on any host */ + sprintf((char*)(service_name[i]), + "osmt.srvc.%" PRIu64 ".%" PRIu64, rand_val,pid); + /*printf("-I- Service Name is : %s, ID is : 0x%" PRIx64 "\n",service_name[i],id[i]);*/ + } + status = osmt_register_service( + p_osmt, + cl_ntoh64(id[0]), /* IN ib_net64_t service_id, */ + IB_DEFAULT_PKEY,/* IN ib_net16_t service_pkey, */ + 0xFFFFFFFF, /* IN ib_net32_t service_lease, */ + 11, /* IN uint8_t service_key_lsb, */ + (char*)service_name[0] /* IN char *service_name */ + ); + if (status != IB_SUCCESS) + { + goto Exit; + } + + status = osmt_register_service( + p_osmt, + cl_ntoh64(id[1]), /* IN ib_net64_t service_id, */ + IB_DEFAULT_PKEY,/* IN ib_net16_t service_pkey, */ + cl_hton32(0x00000004), /* IN ib_net32_t service_lease, */ + 11, /* IN uint8_t service_key_lsb, */ + (char*)service_name[1] /* IN char *service_name */ + ); + if (status != IB_SUCCESS) + { + goto Exit; + } + + status = osmt_register_service( + p_osmt, + cl_ntoh64(id[2]), /* IN ib_net64_t service_id, */ + 0, /* IN ib_net16_t service_pkey, */ + 0xFFFFFFFF, /* IN ib_net32_t service_lease, */ + 11, /* Remove Service Record IN uint8_t service_key_lsb, */ + (char*)service_name[2] /* IN char *service_name */ + ); + + if (status != IB_SUCCESS) + { + goto Exit; + } + + /* Generate 2 instances of service record with consecutive data */ + for (instance = 0 ; instance < 2 ; instance++) { + /* First, clear all arrays */ + memset (service_data8, 0, 16*sizeof(uint8_t)); + memset (service_data16, 0, 8*sizeof(uint16_t)); + memset (service_data32, 0, 4*sizeof(uint32_t)); + memset (service_data64, 0, 2*sizeof(uint64_t)); + service_data8[instance] = instance+1; + service_data16[instance] = cl_hton16(instance+2); + service_data32[instance] = cl_hton32(instance+3); + service_data64[instance] = cl_hton64(instance+4); + status = osmt_register_service_with_data( + p_osmt, + cl_ntoh64(id[3]), /* IN ib_net64_t service_id, */ + IB_DEFAULT_PKEY, /* IN ib_net16_t service_pkey, */ + cl_ntoh32(10), /* IN ib_net32_t service_lease, */ + 12, /* IN uint8_t service_key_lsb, */ + service_data8,service_data16,service_data32,service_data64, /* service data structures */ + (char*)service_name[3] /* IN char *service_name */ + ); + + if (status != IB_SUCCESS) + { + goto Exit; + } + + } + + /* Trying to create service with zero key */ + memset (service_key, 0, 16*sizeof(uint8_t)); + status = osmt_register_service_with_full_key( + p_osmt, + cl_ntoh64(id[5]), /* IN ib_net64_t service_id, */ + 0, /* IN ib_net16_t service_pkey, */ + 0xFFFFFFFF, /* IN ib_net32_t service_lease, */ + service_key, /* full service_key, */ + (char*)service_name[5] /* IN char *service_name */ + ); + + if (status != IB_SUCCESS) + { + goto Exit; + } + + /* Now update it with Unique key and different service name */ + for (i = 0; i <= 15; i++) { + service_key[i] = i + 1; + } + status = osmt_register_service_with_full_key( + p_osmt, + cl_ntoh64(id[5]), /* IN ib_net64_t service_id, */ + 0, /* IN ib_net16_t service_pkey, */ + 0xFFFFFFFF, /* IN ib_net32_t service_lease, */ + service_key, /* full service_key, */ + (char*)service_name[6] /* IN char *service_name */ + ); + if (status != IB_SUCCESS) + { + goto Exit; + } + + /* Let OpenSM handle it */ + usleep(100); + + /* Make sure service_name[0] exists */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[0], 1, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A1A: " + "Fail to find service: name: %s\n", + (char*)service_name[0] ); + status = IB_ERROR; + goto Exit; + } + + /* Make sure service_name[1] exists */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[1], 1, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A1B: " + "Fail to find service: name: %s\n", + (char*)service_name[1] ); + status = IB_ERROR; + goto Exit; + } + + /* Make sure service_name[2] exists */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[2], 1, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A1C: " + "Fail to find service: name: %s\n", + (char*)service_name[2] ); + status = IB_ERROR; + goto Exit; + } + + /* Make sure service_name[3] exists. */ + /* After 10 seconds the service should not exist: service_lease = 10 */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[3], 1, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A1D: " + "Fail to find service: name: %s\n", + (char*)service_name[3] ); + status = IB_ERROR; + goto Exit; + } + + sleep(10); + + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[3], 0, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A1E: " + "Found service: name: %s that should have been " + "deleted due to service lease expiring\n", + (char*)service_name[3] ); + status = IB_ERROR; + goto Exit; + } + + /* Check that for service: id[5] only one record exists */ + status = osmt_get_service_by_id(p_osmt, 1, cl_ntoh64(id[5]), &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A1F: " + "Found number of records != 1 for " + "service: id: 0x%016" PRIx64 "\n", + id[5] ); + status = IB_ERROR; + goto Exit; + } + + /* Bad Flow of Get with invalid Service ID: id[7] */ + status = osmt_get_service_by_id(p_osmt, 0, cl_ntoh64(id[7]), &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A20: " + "Found service: id: 0x%016" PRIx64 " " + "that is invalid\n", + id[7] ); + status = IB_ERROR; + goto Exit; + } + + /* Check by both id and service name: id[0], service_name[0] */ + status = osmt_get_service_by_id_and_name(p_osmt, 1, cl_ntoh64(id[0]), + (char*)service_name[0], &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A21: " + "Fail to find service: id: 0x%016" PRIx64 " " + "name: %s\n", + id[0], + (char*)service_name[0] ); + status = IB_ERROR; + goto Exit; + } + + /* Check by both id and service name: id[5], service_name[6] */ + status = osmt_get_service_by_id_and_name(p_osmt, 1, cl_ntoh64(id[5]), + (char*)service_name[6], &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A22: " + "Fail to find service: id: 0x%016" PRIx64 " " + "name: %s\n", + id[5], + (char*)service_name[6] ); + status = IB_ERROR; + goto Exit; + } + + /* Bad Flow of Get with invalid name(service_name[3]) and valid ID(id[0]) */ + status = osmt_get_service_by_id_and_name(p_osmt, 0, cl_ntoh64(id[0]), + (char*)service_name[3], &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A23: " + "Found service: id: 0x%016" PRIx64 + "name: %s which is an invalid service\n", + id[0], + (char*)service_name[3] ); + status = IB_ERROR; + goto Exit; + } + + /* Bad Flow of Get with unmatched name(service_name[5]) and id(id[3]) (both valid) */ + status = osmt_get_service_by_id_and_name(p_osmt, 0, cl_ntoh64(id[3]), + (char*)service_name[5], &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A24: " + "Found service: id: 0x%016" PRIx64 + "name: %s which is an invalid service\n", + id[3], + (char*)service_name[5] ); + status = IB_ERROR; + goto Exit; + } + + /* Bad Flow of Get with service name that doesn't exist (service_name[4]) */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[4], 0, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A25: " + "Found service: name: %s that shouldn't exist\n", + (char*)service_name[4] ); + status = IB_ERROR; + goto Exit; + } + + /* Bad Flow : Check that getting service_name[5] brings no records since another service + has been updated with the same ID (service_name[6] */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[5], 0, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A26: " + "Found service: name: %s which is an " + "invalid service\n", + (char*)service_name[5] ); + status = IB_ERROR; + goto Exit; + } + + /* Check that getting service_name[6] by name ONLY is valid, + since we do not support key&name association, also trusted queries */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[6], 1, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A27: " + "Fail to find service: name: %s\n", + (char*)service_name[6] ); + status = IB_ERROR; + goto Exit; + } + + /* Test Service Key */ + memset(service_key, 0, 16*sizeof(uint8_t)); + + /* Check for service_name[5] with service_key=0 - the service shouldn't + exist with this name. */ + status = osmt_get_service_by_name_and_key (p_osmt, + (char*)service_name[5], + 0, service_key,&srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A28: " + "Found service: name: %s key:0 which is an " + "invalid service (wrong name)\n", + (char*)service_name[5] ); + status = IB_ERROR; + goto Exit; + } + + /* Check for service_name[6] with service_key=0 - the service should + exist with different key. */ + status = osmt_get_service_by_name_and_key (p_osmt, + (char*)service_name[6], + 0, service_key,&srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A29: " + "Found service: name: %s key: 0 which is an " + "invalid service (wrong service_key)\n", + (char*)service_name[6] ); + status = IB_ERROR; + goto Exit; + } + + /* check for service_name[6] with the correct service_key */ + for (i = 0; i <= 15; i++) + service_key[i]=i + 1; + status = osmt_get_service_by_name_and_key (p_osmt, + (char*)service_name[6], + 1, service_key, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A2A: " + "Fail to find service: name: %s with " + "correct service key\n", + (char*)service_name[6] ); + status = IB_ERROR; + goto Exit; + } + +#ifdef VENDOR_RMPP_SUPPORT + /* These ar the only service_names which are valid */ + memcpy(&service_valid_names[0], &service_name[0], sizeof(uint8_t)*64); + memcpy(&service_valid_names[1], &service_name[2], sizeof(uint8_t)*64); + memcpy(&service_valid_names[2], &service_name[6], sizeof(uint8_t)*64); + + status = osmt_get_all_services_and_check_names(p_osmt,service_valid_names, 3, &num_recs); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A2B: " + "Fail to find all services that should exist\n" ); + status = IB_ERROR; + goto Exit; + } +#endif + + /* Delete service_name[0] */ + status = osmt_delete_service_by_name(p_osmt, 1, + (char*)service_name[0], 1); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A2C: " + "Fail to delete service: name: %s\n", + (char*)service_name[0] ); + status = IB_ERROR; + goto Exit; + } + + /* Make sure deletion of service_name[0] succeeded */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[0], 0, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A2D: " + "Found service: name: %s that was deleted\n", + (char*)service_name[0] ); + status = IB_ERROR; + goto Exit; + } + + /* Make sure service_name[1] doesn't exist (expired service lease) */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[1], 0, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A2E: " + "Found service: name: %s that should have expired\n", + (char*)service_name[1] ); + status = IB_ERROR; + goto Exit; + } + + /* Make sure service_name[2] exists */ + status = osmt_get_service_by_name(p_osmt, + (char*)service_name[2], 1, &srv_rec); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A2F: " + "Fail to find service: name: %s\n", + (char*)service_name[2] ); + status = IB_ERROR; + goto Exit; + } + + /* Bad Flow - try to delete non-existent service_name[5] */ + status = osmt_delete_service_by_name(p_osmt, 0, + (char*)service_name[5], 0); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A30: " + "Succeed to delete non-existent service: name: %s\n", + (char*)service_name[5] ); + status = IB_ERROR; + goto Exit; + } + + /* Delete service_name[2] */ + status = osmt_delete_service_by_name(p_osmt, 1, + (char*)service_name[2], 1); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A31: " + "Fail to delete service: name: %s\n", + (char*)service_name[2] ); + status = IB_ERROR; + goto Exit; + } + + /* Delete service_name[6] */ + status = osmt_delete_service_by_name(p_osmt, 1, + (char*)service_name[6], 1); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_run_service_records_flow: ERR 4A32: " + "Failed to delete service name: %s\n", + (char*)service_name[6] ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/osmt_slvl_vl_arb.c b/branches/Ndi/ulp/opensm/user/osmtest/osmt_slvl_vl_arb.c new file mode 100644 index 00000000..45fe4916 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/osmt_slvl_vl_arb.c @@ -0,0 +1,570 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* + * Abstract: + * Implementation of SLtoVL and VL Arbitration testing flow.. + * Top level is osmt_run_slvl_and_vlarb_records_flow: + * osmt_query_all_ports_vl_arb + * osmt_query_all_ports_slvl_map + * + * Environment: + * Linux User Mode + * + * $Revision: 1.2 $ + */ + +#ifndef __WIN__ +#include +#endif +#include +#include +#include +#include +#include "osmtest.h" + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_write_vl_arb_table( IN osmtest_t * const p_osmt, + IN FILE * fh, + IN const ib_vl_arb_table_record_t * const p_rec ) +{ + int result,i; + cl_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_vl_arb_table ); + + result = fprintf( fh, + "VL_ARBITRATION_TABLE\n" + "lid 0x%X\n" + "port_num 0x%X\n" + "block 0x%X\n", + cl_ntoh16( p_rec->lid ), + p_rec->port_num, + p_rec->block_num ); + + fprintf( fh, " "); + for (i = 0; i < 32; i++) + fprintf( fh,"| %-2u ", i); + fprintf( fh, "|\nVL: "); + + for (i = 0; i < 32; i++) + fprintf( fh,"|0x%02X",p_rec->vl_arb_tbl.vl_entry[i].vl); + fprintf( fh, "|\nWEIGHT:"); + + for (i = 0; i < 32; i++) + fprintf( fh,"|0x%02X",p_rec->vl_arb_tbl.vl_entry[i].weight); + fprintf( fh,"|\nEND\n\n"); + + /* Exit: */ + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * GET A SINGLE PORT INFO BY NODE LID AND PORT NUMBER + **********************************************************************/ +ib_api_status_t +osmt_query_vl_arb( + IN osmtest_t * const p_osmt, + IN ib_net16_t const lid, + IN uint8_t const port_num, + IN uint8_t const block_num, + IN FILE *fh ) +{ + osmtest_req_context_t context; + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_vl_arb_table_record_t record, *p_rec; + + OSM_LOG_ENTER( &p_osmt->log, osmt_query_vl_arb ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmt_query_vl_arb: " + "Getting VL_Arbitration Table for port with LID 0x%X Num:0x%X\n", + cl_ntoh16( lid ), + port_num ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &context, 0, sizeof( context ) ); + + context.p_osmt = p_osmt; + + record.lid = lid; + record.port_num = port_num; + record.block_num = block_num; + user.p_attr = &record; + + req.query_type = OSMV_QUERY_VLARB_BY_LID_PORT_BLOCK; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + + if ( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_vl_arb: ERR 0405: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_vl_arb: ERR 0466: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_vl_arb: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result.p_result_madw ) ) ); + } + goto Exit; + } + + /* ok it worked */ + p_rec = osmv_get_query_result( context.result.p_result_madw, 0); + if ( fh ) + { + osmtest_write_vl_arb_table( p_osmt, fh, p_rec ); + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +static ib_api_status_t +osmt_query_all_ports_vl_arb( IN osmtest_t * const p_osmt, + IN FILE * fh ) +{ + cl_status_t status = CL_SUCCESS; + cl_qmap_t *p_tbl; + port_t *p_src_port; + uint8_t block, anyErr = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmt_query_all_ports_vl_arb ); + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_query_all_ports_vl_arb: " + "Obtaining ALL Ports VL Arbitration Tables\n"); + + /* + * Go over all ports that exist in the subnet + * get the relevant VLarbs + */ + + p_tbl = &p_osmt->exp_subn.port_key_tbl; + + p_src_port = ( port_t * ) cl_qmap_head( p_tbl ); + + while( p_src_port != ( port_t * ) cl_qmap_end( p_tbl ) ) + { + + /* HACK we use capability_mask to know diff a CA port from switch port */ + if( p_src_port->rec.port_info.capability_mask ) + { + /* this is an hca port */ + for (block = 1; block <= 4; block++) + { + /* NOTE to comply we must set port number to 0 and the SA should figure it out */ + /* since it is a CA port */ + status = osmt_query_vl_arb(p_osmt, p_src_port->rec.lid, 0, block, fh); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_all_ports_vl_arb: ERR 0467: " + "Failed to get Lid:0x%X Port:0x%X (%s)\n", + cl_ntoh16(p_src_port->rec.lid), 0, + ib_get_err_str( status ) ); + anyErr = 1; + } + } + } + else + { + /* this is a switch port */ + for (block = 1; block <= 4; block++) + { + status = osmt_query_vl_arb(p_osmt, p_src_port->rec.lid, + p_src_port->rec.port_num, block, fh); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_all_ports_vl_arb: ERR 0468: " + "Failed to get Lid:0x%X Port:0x%X (%s)\n", + cl_ntoh16(p_src_port->rec.lid), p_src_port->rec.port_num, + ib_get_err_str( status ) ); + anyErr = 1; + } + } + } + + p_src_port = ( port_t * ) cl_qmap_next( &p_src_port->map_item ); + } + + OSM_LOG_EXIT( &p_osmt->log ); + if (anyErr) + { + status = IB_ERROR; + } + return ( status ); +} + +/******************************************************************************* + SLtoVL +*******************************************************************************/ +static ib_api_status_t +osmtest_write_slvl_map_table( IN osmtest_t * const p_osmt, + IN FILE * fh, + IN const ib_slvl_table_record_t * const p_rec ) +{ + int result, i; + cl_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_slvl_map_table ); + + result = fprintf( fh, + "SLtoVL_MAP_TABLE\n" + "lid 0x%X\n" + "in_port_num 0x%X\n" + "out_port_num 0x%X\n", + cl_ntoh16( p_rec->lid ), + p_rec->in_port_num, + p_rec->out_port_num ); + + fprintf( fh, "SL:"); + for (i = 0; i < 16; i++) + fprintf( fh,"| %-2u ", i); + fprintf( fh, "|\nVL:"); + + for (i = 0; i < 16; i++) + fprintf( fh,"| 0x%01X ", ib_slvl_table_get( &p_rec->slvl_tbl, (uint8_t)i)); + fprintf( fh,"|\nEND\n\n"); + + /* Exit: */ + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * GET A SINGLE PORT INFO BY NODE LID AND PORT NUMBER + **********************************************************************/ +ib_api_status_t +osmt_query_slvl_map( + IN osmtest_t * const p_osmt, + IN ib_net16_t const lid, + IN uint8_t const out_port_num, + IN uint8_t const in_port_num, + IN FILE *fh ) +{ + osmtest_req_context_t context; + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_slvl_table_record_t record, *p_rec; + + OSM_LOG_ENTER( &p_osmt->log, osmt_query_slvl_map ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmt_query_slvl_map: " + "Getting SLtoVL Map Table for out-port with LID 0x%X Num:0x%X from In-Port:0x%X\n", + cl_ntoh16( lid ), + out_port_num, in_port_num ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &context, 0, sizeof( context ) ); + + context.p_osmt = p_osmt; + + record.lid = lid; + record.in_port_num = in_port_num; + record.out_port_num = out_port_num; + user.p_attr = &record; + + req.query_type = OSMV_QUERY_SLVL_BY_LID_AND_PORTS; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + + if ( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_slvl_map: ERR 0469: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_slvl_map: ERR 0470: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_slvl_map: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result.p_result_madw ) ) ); + } + goto Exit; + } + + /* ok it worked */ + p_rec = osmv_get_query_result( context.result.p_result_madw, 0); + if ( fh ) + { + osmtest_write_slvl_map_table( p_osmt, fh, p_rec ); + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +static ib_api_status_t +osmt_query_all_ports_slvl_map( IN osmtest_t * const p_osmt, + IN FILE * fh ) +{ + cl_status_t status = CL_SUCCESS; + cl_qmap_t *p_tbl; + port_t *p_src_port; + uint8_t in_port,anyErr = 0, num_ports; + node_t *p_node; + const cl_qmap_t *p_node_tbl; + + OSM_LOG_ENTER( &p_osmt->log, osmt_query_all_ports_slvl_map ); + + /* + * Go over all ports that exist in the subnet + * get the relevant SLtoVLs + */ + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmt_query_all_ports_slvl_map: " + "Obtaining ALL Ports (to other ports) SLtoVL Maps\n"); + + p_tbl = &p_osmt->exp_subn.port_key_tbl; + p_node_tbl = &p_osmt->exp_subn.node_lid_tbl; + + p_src_port = ( port_t * ) cl_qmap_head( p_tbl ); + + while( p_src_port != ( port_t * ) cl_qmap_end( p_tbl ) ) + { + + /* HACK we use capability_mask to know diff a CA port from switch port */ + if( p_src_port->rec.port_info.capability_mask ) + { + /* this is an hca port */ + /* NOTE to comply we must set port number to 0 and the SA should figure it out */ + /* since it is a CA port */ + status = osmt_query_slvl_map(p_osmt, p_src_port->rec.lid, 0, 0, fh); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_all_ports_slvl_map: ERR 0471: " + "Failed to get Lid:0x%X In-Port:0x%X Out-Port:0x%X(%s)\n", + cl_ntoh16(p_src_port->rec.lid), 0, 0, + ib_get_err_str( status ) ); + anyErr = 1; + } + } + else + { + /* this is a switch port */ + /* get the node */ + p_node = ( node_t * ) cl_qmap_get( p_node_tbl, p_src_port->rec.lid ); + if( p_node == ( node_t * ) cl_qmap_end( p_node_tbl ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_all_ports_slvl_map: ERR 0472: " + "Failed to get Node by Lid:0x%X\n", + p_src_port->rec.lid ); + goto Exit; + } + + num_ports = p_node->rec.node_info.num_ports; + + for (in_port = 1; in_port <= num_ports; in_port++) + { + status = osmt_query_slvl_map(p_osmt, p_src_port->rec.lid, + p_src_port->rec.port_num, in_port, fh ); + if (status != IB_SUCCESS) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmt_query_all_ports_slvl_map: ERR 0473: " + "Failed to get Lid:0x%X In-Port:0x%X Out-Port:0x%X (%s)\n", + cl_ntoh16(p_src_port->rec.lid), p_src_port->rec.port_num, in_port, + ib_get_err_str( status ) ); + anyErr = 1; + } + } + } + + p_src_port = ( port_t * ) cl_qmap_next( &p_src_port->map_item ); + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + if (anyErr) + { + status = IB_ERROR; + } + return ( status ); +} + +/* + * Run a vl arbitration queries and sl2vl maps queries flow: + * Good flow: + * - for each physical port on the network - obtain the VL Arb + * - for each CA physical port obtain its SLtoVL Map + * - for each SW physical port (out) obtain the SLtoVL Map to each other port + * BAD flow: + * - Try get with multiple results + * - Try gettable + * - Try providing non existing port + */ +ib_api_status_t +osmt_run_slvl_and_vlarb_records_flow( IN osmtest_t * const p_osmt ) +{ + ib_api_status_t status; + FILE *fh; + ib_net16_t test_lid; + uint8_t lmc; + + OSM_LOG_ENTER( &p_osmt->log, osmt_run_slvl_and_vlarb_records_flow ); + + fh = fopen("qos.txt","w"); + + /* go over all ports in the subnet */ + status = osmt_query_all_ports_vl_arb( p_osmt, fh ); + if (status != IB_SUCCESS) + { + goto Exit; + } + + status = osmt_query_all_ports_slvl_map( p_osmt, fh ); + if (status != IB_SUCCESS) + { + goto Exit; + } + + /* If LMC > 0, test non base LID SA QoS Record requests */ + status = osmtest_get_local_port_lmc( p_osmt, p_osmt->local_port.lid, &lmc ); + if ( status != IB_SUCCESS ) + goto Exit; + + if (lmc != 0) + { + test_lid = cl_ntoh16( p_osmt->local_port.lid + 1); + + status = osmt_query_vl_arb( p_osmt, test_lid, 0, 1, NULL ); + if ( status != IB_SUCCESS ) + goto Exit; + + status = osmt_query_slvl_map( p_osmt, test_lid, 0, 0, NULL ); + if ( status != IB_SUCCESS ) + goto Exit; + } + + Exit: + fclose(fh); + OSM_LOG_EXIT( &p_osmt->log ); + return status; +} + + diff --git a/branches/Ndi/ulp/opensm/user/osmtest/osmtest.c b/branches/Ndi/ulp/opensm/user/osmtest/osmtest.c new file mode 100644 index 00000000..063f8959 --- /dev/null +++ b/branches/Ndi/ulp/opensm/user/osmtest/osmtest.c @@ -0,0 +1,7488 @@ +/* + * Copyright (c) 2004-2006 Voltaire, Inc. All rights reserved. + * Copyright (c) 2002-2006 Mellanox Technologies LTD. All rights reserved. + * Copyright (c) 1996-2003 Intel Corporation. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* TODO : Check why we dont free the cl_qmap_items we store when reading DB */ + +/* + * Abstract: + * Implementation of osmtest_t. + * This object represents the OSMTest Test object. + * + * Environment: + * Linux User Mode + * + * $Revision: 1.10 $ + */ + +#ifdef __WIN__ +#pragma warning(disable : 4996) +#endif + +#include +#include +#include +#ifdef __WIN__ +#include +#else +#include +#include +#endif +#include +#include "osmtest.h" + +#ifndef __WIN__ +#define strnicmp strncasecmp +#endif + +#define POOL_MIN_ITEMS 64 +#define GUID_ARRAY_SIZE 64 + +typedef enum _osmtest_token_val +{ + OSMTEST_TOKEN_COMMENT = 0, + OSMTEST_TOKEN_END, + OSMTEST_TOKEN_DEFINE_NODE, + OSMTEST_TOKEN_DEFINE_PORT, + OSMTEST_TOKEN_DEFINE_PATH, + OSMTEST_TOKEN_DEFINE_LINK, + OSMTEST_TOKEN_LID, + OSMTEST_TOKEN_BASE_VERSION, + OSMTEST_TOKEN_CLASS_VERSION, + OSMTEST_TOKEN_NODE_TYPE, + OSMTEST_TOKEN_NUM_PORTS, + OSMTEST_TOKEN_SYS_GUID, + OSMTEST_TOKEN_NODE_GUID, + OSMTEST_TOKEN_PORT_GUID, + OSMTEST_TOKEN_PARTITION_CAP, + OSMTEST_TOKEN_DEVICE_ID, + OSMTEST_TOKEN_REVISION, + OSMTEST_TOKEN_PORT_NUM, + OSMTEST_TOKEN_VENDOR_ID, + OSMTEST_TOKEN_DGID, + OSMTEST_TOKEN_SGID, + OSMTEST_TOKEN_DLID, + OSMTEST_TOKEN_SLID, + OSMTEST_TOKEN_HOP_FLOW_RAW, + OSMTEST_TOKEN_TCLASS, + OSMTEST_TOKEN_NUM_PATH, + OSMTEST_TOKEN_PKEY, + OSMTEST_TOKEN_SL, + OSMTEST_TOKEN_RATE, + OSMTEST_TOKEN_PKT_LIFE, + OSMTEST_TOKEN_PREFERENCE, + OSMTEST_TOKEN_MKEY, + OSMTEST_TOKEN_SUBN_PREF, + OSMTEST_TOKEN_BASE_LID, + OSMTEST_TOKEN_SM_BASE_LID, + OSMTEST_TOKEN_CAP_MASK, + OSMTEST_TOKEN_DIAG_CODE, + OSMTEST_TOKEN_MKEY_LEASE_PER, + OSMTEST_TOKEN_LOC_PORT_NUM, + OSMTEST_TOKEN_LINK_WID_EN, + OSMTEST_TOKEN_LINK_WID_SUP, + OSMTEST_TOKEN_LINK_WID_ACT, + OSMTEST_TOKEN_LINK_SPEED_SUP, + OSMTEST_TOKEN_PORT_STATE, + OSMTEST_TOKEN_STATE_INFO2, + OSMTEST_TOKEN_MKEY_PROT_BITS, + OSMTEST_TOKEN_LMC, + OSMTEST_TOKEN_LINK_SPEED, + OSMTEST_TOKEN_MTU_SMSL, + OSMTEST_TOKEN_VL_CAP, + OSMTEST_TOKEN_VL_HIGH_LIMIT, + OSMTEST_TOKEN_VL_ARB_HIGH_CAP, + OSMTEST_TOKEN_VL_ARB_LOW_CAP, + OSMTEST_TOKEN_MTU_CAP, + OSMTEST_TOKEN_VL_STALL_LIFE, + OSMTEST_TOKEN_VL_ENFORCE, + OSMTEST_TOKEN_MKEY_VIOL, + OSMTEST_TOKEN_PKEY_VIOL, + OSMTEST_TOKEN_QKEY_VIOL, + OSMTEST_TOKEN_GUID_CAP, + OSMTEST_TOKEN_SUBN_TIMEOUT, + OSMTEST_TOKEN_RESP_TIME_VAL, + OSMTEST_TOKEN_ERR_THRESHOLD, + OSMTEST_TOKEN_MTU, + OSMTEST_TOKEN_FROMLID, + OSMTEST_TOKEN_FROMPORTNUM, + OSMTEST_TOKEN_TOPORTNUM, + OSMTEST_TOKEN_TOLID, + OSMTEST_TOKEN_UNKNOWN +} osmtest_token_val_t; + +typedef struct _osmtest_token +{ + osmtest_token_val_t val; + size_t str_size; + const char *str; +} osmtest_token_t; + +const osmtest_token_t token_array[] = { + {OSMTEST_TOKEN_COMMENT, 1, "#"}, + {OSMTEST_TOKEN_END, 3, "END"}, + {OSMTEST_TOKEN_DEFINE_NODE, 11, "DEFINE_NODE"}, + {OSMTEST_TOKEN_DEFINE_PORT, 11, "DEFINE_PORT"}, + {OSMTEST_TOKEN_DEFINE_PATH, 11, "DEFINE_PATH"}, + {OSMTEST_TOKEN_DEFINE_LINK, 11, "DEFINE_LINK"}, + {OSMTEST_TOKEN_LID, 3, "LID"}, + {OSMTEST_TOKEN_BASE_VERSION, 12, "BASE_VERSION"}, + {OSMTEST_TOKEN_CLASS_VERSION, 13, "CLASS_VERSION"}, + {OSMTEST_TOKEN_NODE_TYPE, 9, "NODE_TYPE"}, + {OSMTEST_TOKEN_NUM_PORTS, 9, "NUM_PORTS"}, + {OSMTEST_TOKEN_SYS_GUID, 8, "SYS_GUID"}, + {OSMTEST_TOKEN_NODE_GUID, 9, "NODE_GUID"}, + {OSMTEST_TOKEN_PORT_GUID, 9, "PORT_GUID"}, + {OSMTEST_TOKEN_PARTITION_CAP, 13, "PARTITION_CAP"}, + {OSMTEST_TOKEN_DEVICE_ID, 9, "DEVICE_ID"}, + {OSMTEST_TOKEN_REVISION, 8, "REVISION"}, + {OSMTEST_TOKEN_PORT_NUM, 8, "PORT_NUM"}, + {OSMTEST_TOKEN_VENDOR_ID, 9, "VENDOR_ID"}, + {OSMTEST_TOKEN_DGID, 4, "DGID"}, + {OSMTEST_TOKEN_SGID, 4, "SGID"}, + {OSMTEST_TOKEN_DLID, 4, "DLID"}, + {OSMTEST_TOKEN_SLID, 4, "SLID"}, + {OSMTEST_TOKEN_HOP_FLOW_RAW, 12, "HOP_FLOW_RAW"}, + {OSMTEST_TOKEN_TCLASS, 6, "TCLASS"}, + {OSMTEST_TOKEN_NUM_PATH, 8, "NUM_PATH"}, + {OSMTEST_TOKEN_PKEY, 4, "PKEY"}, + {OSMTEST_TOKEN_SL, 2, "SL"}, + {OSMTEST_TOKEN_RATE, 4, "RATE"}, + {OSMTEST_TOKEN_PKT_LIFE, 8, "PKT_LIFE"}, + {OSMTEST_TOKEN_PREFERENCE, 10, "PREFERENCE"}, + {OSMTEST_TOKEN_MKEY, 4, "M_KEY"}, + {OSMTEST_TOKEN_SUBN_PREF, 13, "SUBNET_PREFIX"}, + {OSMTEST_TOKEN_BASE_LID, 8, "BASE_LID"}, + {OSMTEST_TOKEN_SM_BASE_LID, 18, "MASTER_SM_BASE_LID"}, + {OSMTEST_TOKEN_CAP_MASK, 15, "CAPABILITY_MASK"}, + {OSMTEST_TOKEN_DIAG_CODE, 9, "DIAG_CODE"}, + {OSMTEST_TOKEN_MKEY_LEASE_PER, 18, "m_key_lease_period"}, + {OSMTEST_TOKEN_LOC_PORT_NUM, 14, "local_port_num"}, + {OSMTEST_TOKEN_LINK_WID_EN, 18, "link_width_enabled"}, + {OSMTEST_TOKEN_LINK_WID_SUP, 20, "link_width_supported"}, + {OSMTEST_TOKEN_LINK_WID_ACT, 17, "link_width_active"}, + {OSMTEST_TOKEN_LINK_SPEED_SUP, 20, "link_speed_supported"}, + {OSMTEST_TOKEN_PORT_STATE, 10, "port_state"}, + {OSMTEST_TOKEN_STATE_INFO2, 10, "state_info2"}, + {OSMTEST_TOKEN_MKEY_PROT_BITS, 3, "mpb"}, + {OSMTEST_TOKEN_LMC, 3, "lmc"}, + {OSMTEST_TOKEN_LINK_SPEED, 10, "link_speed"}, + {OSMTEST_TOKEN_MTU_SMSL, 8, "mtu_smsl"}, + {OSMTEST_TOKEN_VL_CAP, 6, "vl_cap"}, + {OSMTEST_TOKEN_VL_HIGH_LIMIT, 13, "vl_high_limit"}, + {OSMTEST_TOKEN_VL_ARB_HIGH_CAP, 15, "vl_arb_high_cap"}, + {OSMTEST_TOKEN_VL_ARB_LOW_CAP, 14, "vl_arb_low_cap"}, + {OSMTEST_TOKEN_MTU_CAP, 7, "mtu_cap"}, + {OSMTEST_TOKEN_VL_STALL_LIFE, 13, "vl_stall_life"}, + {OSMTEST_TOKEN_VL_ENFORCE, 10, "vl_enforce"}, + {OSMTEST_TOKEN_MKEY_VIOL, 16, "m_key_violations"}, + {OSMTEST_TOKEN_PKEY_VIOL, 16, "p_key_violations"}, + {OSMTEST_TOKEN_QKEY_VIOL, 16, "q_key_violations"}, + {OSMTEST_TOKEN_GUID_CAP, 8, "guid_cap"}, + {OSMTEST_TOKEN_SUBN_TIMEOUT, 14, "subnet_timeout"}, + {OSMTEST_TOKEN_RESP_TIME_VAL, 15, "resp_time_value"}, + {OSMTEST_TOKEN_ERR_THRESHOLD, 15, "error_threshold"}, + {OSMTEST_TOKEN_MTU, 3, "MTU"}, /* must be after the other mtu... tokens. */ + {OSMTEST_TOKEN_FROMLID, 8, "from_lid"}, + {OSMTEST_TOKEN_FROMPORTNUM, 13, "from_port_num"}, + {OSMTEST_TOKEN_TOPORTNUM, 11, "to_port_num"}, + {OSMTEST_TOKEN_TOLID, 6, "to_lid"}, + {OSMTEST_TOKEN_UNKNOWN, 0, ""} /* must be last entry */ +}; + +#define IB_MAD_STATUS_CLASS_MASK (CL_HTON16(0xFF00)) + +static const char ib_mad_status_str_busy[] = "IB_MAD_STATUS_BUSY"; +static const char ib_mad_status_str_redirect[] = "IB_MAD_STATUS_REDIRECT"; +static const char ib_mad_status_str_unsup_class_ver[] = +"IB_MAD_STATUS_UNSUP_CLASS_VER"; +static const char ib_mad_status_str_unsup_method[] = +"IB_MAD_STATUS_UNSUP_METHOD"; +static const char ib_mad_status_str_unsup_method_attr[] = +"IB_MAD_STATUS_UNSUP_METHOD_ATTR"; +static const char ib_mad_status_str_invalid_field[] = +"IB_MAD_STATUS_INVALID_FIELD"; +static const char ib_mad_status_str_no_resources[] = +"IB_SA_MAD_STATUS_NO_RESOURCES"; +static const char ib_mad_status_str_req_invalid[] = +"IB_SA_MAD_STATUS_REQ_INVALID"; +static const char ib_mad_status_str_no_records[] = +"IB_SA_MAD_STATUS_NO_RECORDS"; +static const char ib_mad_status_str_too_many_records[] = +"IB_SA_MAD_STATUS_TOO_MANY_RECORDS"; +static const char ib_mad_status_str_invalid_gid[] = +"IB_SA_MAD_STATUS_INVALID_GID"; +static const char ib_mad_status_str_insuf_comps[] = +"IB_SA_MAD_STATUS_INSUF_COMPS"; +static const char generic_or_str[] = " | "; + +/********************************************************************** + **********************************************************************/ +const char * +ib_get_mad_status_str( IN const ib_mad_t * const p_mad ) +{ + static char line[512]; + uint32_t offset = 0; + ib_net16_t status; + boolean_t first = TRUE; + + line[offset] = '\0'; + + status = ( ib_net16_t ) ( p_mad->status & IB_SMP_STATUS_MASK ); + + if( status == 0 ) + { + strcat( &line[offset], "IB_SUCCESS" ); + return ( line ); + } + + if( status & IB_MAD_STATUS_BUSY ) + { + strcat( &line[offset], ib_mad_status_str_busy ); + offset += sizeof( ib_mad_status_str_busy ); + } + if( status & IB_MAD_STATUS_REDIRECT ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_redirect ); + offset += sizeof( ib_mad_status_str_redirect ) - 1; + } + if( ( status & IB_MAD_STATUS_INVALID_FIELD ) == IB_MAD_STATUS_UNSUP_CLASS_VER ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_unsup_class_ver ); + offset += sizeof( ib_mad_status_str_unsup_class_ver ) - 1; + } + if( ( status & IB_MAD_STATUS_INVALID_FIELD ) == IB_MAD_STATUS_UNSUP_METHOD ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_unsup_method ); + offset += sizeof( ib_mad_status_str_unsup_method ) - 1; + } + if( (status & IB_MAD_STATUS_INVALID_FIELD ) == IB_MAD_STATUS_UNSUP_METHOD_ATTR ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_unsup_method_attr ); + offset += sizeof( ib_mad_status_str_unsup_method_attr ) - 1; + } + if( ( status & IB_MAD_STATUS_INVALID_FIELD ) == IB_MAD_STATUS_INVALID_FIELD ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_invalid_field ); + offset += sizeof( ib_mad_status_str_invalid_field ) - 1; + } + if( ( status & IB_MAD_STATUS_CLASS_MASK ) == + IB_SA_MAD_STATUS_NO_RESOURCES ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_no_resources ); + offset += sizeof( ib_mad_status_str_no_resources ) - 1; + } + if( ( status & IB_MAD_STATUS_CLASS_MASK ) == + IB_SA_MAD_STATUS_REQ_INVALID ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_req_invalid ); + offset += sizeof( ib_mad_status_str_req_invalid ) - 1; + } + if( ( status & IB_MAD_STATUS_CLASS_MASK ) == IB_SA_MAD_STATUS_NO_RECORDS ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_no_records ); + offset += sizeof( ib_mad_status_str_no_records ) - 1; + } + if( ( status & IB_MAD_STATUS_CLASS_MASK ) == + IB_SA_MAD_STATUS_TOO_MANY_RECORDS ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_too_many_records ); + offset += sizeof( ib_mad_status_str_too_many_records ) - 1; + } + if( ( status & IB_MAD_STATUS_CLASS_MASK ) == + IB_SA_MAD_STATUS_INVALID_GID ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_invalid_gid ); + offset += sizeof( ib_mad_status_str_invalid_gid ) - 1; + } + if( ( status & IB_MAD_STATUS_CLASS_MASK ) == + IB_SA_MAD_STATUS_INSUF_COMPS ) + { + if( !first ) + { + strcat( &line[offset], generic_or_str ); + offset += sizeof( generic_or_str ) - 1; + } + first = FALSE; + strcat( &line[offset], ib_mad_status_str_insuf_comps ); + offset += sizeof( ib_mad_status_str_insuf_comps ) - 1; + } + + return ( line ); +} + +/********************************************************************** + **********************************************************************/ +void +subnet_construct( IN subnet_t * const p_subn ) +{ + cl_qmap_init( &p_subn->link_tbl ); + cl_qmap_init( &p_subn->node_lid_tbl ); + cl_qmap_init( &p_subn->node_guid_tbl ); + cl_qmap_init( &p_subn->mgrp_mlid_tbl ); + + /* NO WAY TO HAVE UNIQUE PORT BY LID OR GUID */ + /* cl_qmap_init( &p_subn->port_lid_tbl ); */ + /* cl_qmap_init( &p_subn->port_guid_tbl ); */ + + /* port key is a lid and num pair */ + cl_qmap_init( &p_subn->port_key_tbl ); + cl_qmap_init( &p_subn->path_tbl ); +} + +/********************************************************************** + **********************************************************************/ +cl_status_t +subnet_init( IN subnet_t * const p_subn ) +{ + cl_status_t status = IB_SUCCESS; + + subnet_construct( p_subn ); + + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osmtest_construct( IN osmtest_t * const p_osmt ) +{ + memset( p_osmt, 0, sizeof( *p_osmt ) ); + osm_log_construct( &p_osmt->log ); + subnet_construct( &p_osmt->exp_subn ); +} + +/********************************************************************** + **********************************************************************/ +void +osmtest_destroy( IN osmtest_t * const p_osmt ) +{ + cl_map_item_t *p_item,*p_next_item; + + + + /* Currently there is a problem with IBAL exit flow - memory overrun , so we bypass the vendor deletion + Since it's already will be cleaned by the Windows OS . */ + +#ifndef __WIN__ + if( p_osmt->p_vendor ) + { + osm_vendor_delete( &p_osmt->p_vendor ); + } +#endif + cl_qpool_destroy( &p_osmt->port_pool ); + cl_qpool_destroy( &p_osmt->node_pool ); + + /* destroy the qmap tables */ + p_next_item = cl_qmap_head( &p_osmt->exp_subn.link_tbl ); + while( p_next_item != cl_qmap_end( &p_osmt->exp_subn.link_tbl ) ) + { + p_item = p_next_item; + p_next_item = cl_qmap_next( p_item ); + free( p_item ); + } + p_next_item = cl_qmap_head( &p_osmt->exp_subn.mgrp_mlid_tbl ); + while( p_next_item != cl_qmap_end( &p_osmt->exp_subn.mgrp_mlid_tbl ) ) + { + p_item = p_next_item; + p_next_item = cl_qmap_next( p_item ); + free( p_item ); + } + p_next_item = cl_qmap_head( &p_osmt->exp_subn.node_guid_tbl ); + while( p_next_item != cl_qmap_end( &p_osmt->exp_subn.node_guid_tbl ) ) + { + p_item = p_next_item; + p_next_item = cl_qmap_next( p_item ); + free( p_item ); + } + + p_next_item = cl_qmap_head( &p_osmt->exp_subn.node_lid_tbl ); + while( p_next_item != cl_qmap_end( &p_osmt->exp_subn.node_lid_tbl ) ) + { + p_item = p_next_item; + p_next_item = cl_qmap_next( p_item ); + free( p_item ); + } + + p_next_item = cl_qmap_head( &p_osmt->exp_subn.path_tbl ); + while( p_next_item != cl_qmap_end( &p_osmt->exp_subn.path_tbl ) ) + { + p_item = p_next_item; + p_next_item = cl_qmap_next( p_item ); + free( p_item ); + } + p_next_item = cl_qmap_head( &p_osmt->exp_subn.port_key_tbl ); + while( p_next_item != cl_qmap_end( &p_osmt->exp_subn.port_key_tbl ) ) + { + p_item = p_next_item; + p_next_item = cl_qmap_next( p_item ); + free( p_item ); + } + + osm_log_destroy( &p_osmt->log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_init( IN osmtest_t * const p_osmt, + IN const osmtest_opt_t * const p_opt, + IN const osm_log_level_t log_flags + ) +{ + ib_api_status_t status; + + /* Can't use log macros here, since we're initializing the log. */ + osmtest_construct( p_osmt ); + + status = osm_log_init_v2( &p_osmt->log, p_opt->force_log_flush, + 0x0001, p_opt->log_file, 0, TRUE ); + if( status != IB_SUCCESS ) + return ( status ); + + /* but we do not want any extra stuff here */ + osm_log_set_level( &p_osmt->log, log_flags ); + + osm_log( &p_osmt->log, OSM_LOG_FUNCS, + "osmtest_init: [\n" ); + + p_osmt->opt = *p_opt; + + status = cl_qpool_init( &p_osmt->node_pool, POOL_MIN_ITEMS, 0, + POOL_MIN_ITEMS, sizeof( node_t ), NULL, NULL, + NULL ); + CL_ASSERT( status == CL_SUCCESS ); + + status = cl_qpool_init( &p_osmt->port_pool, POOL_MIN_ITEMS, 0, + POOL_MIN_ITEMS, sizeof( port_t ), NULL, NULL, + NULL ); + CL_ASSERT( status == CL_SUCCESS ); + + p_osmt->p_vendor = osm_vendor_new( &p_osmt->log, + p_opt->transaction_timeout ); + + if( p_osmt->p_vendor == NULL ) + { + status = IB_INSUFFICIENT_RESOURCES; + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_init: ERR 0001: " + "Unable to allocate vendor object" ); + status = IB_ERROR; + goto Exit; + } + + osm_mad_pool_construct( &p_osmt->mad_pool ); + status = osm_mad_pool_init( &p_osmt->mad_pool, &p_osmt->log ); + if( status != IB_SUCCESS ) + goto Exit; + + Exit: + osm_log( &p_osmt->log, OSM_LOG_FUNCS, + "osmtest_init: ]\n" ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +void +osmtest_query_res_cb( IN osmv_query_res_t * p_rec ) +{ + osmtest_req_context_t *const p_ctxt = + ( osmtest_req_context_t * ) p_rec->query_context; + osmtest_t *const p_osmt = p_ctxt->p_osmt; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_query_res_cb ); + + p_ctxt->result = *p_rec; + + if( p_rec->status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_query_res_cb: ERR 0003: " + "Error on query (%s)\n", ib_get_err_str( p_rec->status ) ); + } + + OSM_LOG_EXIT( &p_osmt->log ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_get_all_recs( IN osmtest_t * const p_osmt, + IN ib_net16_t const attr_id, + IN size_t const attr_size, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_all_recs ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_get_all_recs: " + "Getting all %s records\n", ib_get_sa_attr_str( attr_id ) ); + } + + /* + * Do a blocking query for all records in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + + p_context->p_osmt = p_osmt; + user.attr_id = attr_id; + user.attr_offset = cl_ntoh16( ( uint16_t ) ( attr_size >> 3 ) ); + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_all_recs: ERR 0004: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_all_recs: ERR 0064: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_all_recs: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( p_context->result.p_result_madw ) ) ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_validate_sa_class_port_info( IN osmtest_t * const p_osmt) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_query_req_t req; + ib_class_port_info_t *p_cpi; + osmtest_req_context_t context; + osmtest_req_context_t *p_context = &context; + ib_sa_mad_t *p_resp_sa_madp; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_sa_class_port_info ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_sa_class_port_info: " + "Getting ClassPortInfo\n"); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + + p_context->p_osmt = p_osmt; + req.query_type = OSMV_QUERY_CLASS_PORT_INFO; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = 0; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_sa_class_port_info: ERR 0065: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_sa_class_port_info: ERR 0070: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_sa_class_port_info: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( p_context->result.p_result_madw ) ) ); + } + goto Exit; + } + + /* ok we got it so please print it out */ + p_resp_sa_madp = (ib_sa_mad_t*)osm_madw_get_mad_ptr(context.result.p_result_madw); + p_cpi = (ib_class_port_info_t*)ib_sa_mad_get_payload_ptr(p_resp_sa_madp ); + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmtest_validate_sa_class_port_info:\n-----------------------------\nSA Class Port Info:\n" + " base_ver:%u\n class_ver:%u\n cap_mask:0x%X\n resp_time_val:0x%X\n-----------------------------\n", + p_cpi->base_ver, p_cpi->class_ver, cl_ntoh16(p_cpi->cap_mask), p_cpi->resp_time_val + ); + + Exit: +#if 0 + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } +#endif + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_get_node_rec( IN osmtest_t * const p_osmt, + IN ib_net64_t const node_guid, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_node_record_t record; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_node_rec ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_node_rec: " + "Getting node record for 0x%016" PRIx64 "\n", + cl_ntoh64( node_guid ) ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.node_info.node_guid = node_guid; + + p_context->p_osmt = p_osmt; + user.comp_mask = IB_NR_COMPMASK_NODEGUID; + user.attr_id = IB_MAD_ATTR_NODE_RECORD; + user.attr_offset = cl_ntoh16( ( uint16_t ) ( sizeof( record ) >> 3 ) ); + user.p_attr = &record; + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_node_rec: ERR 0071: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_node_rec: ERR 0072: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_node_rec: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( p_context->result.p_result_madw ) ) ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * Get a node record by node LID + **********************************************************************/ +ib_api_status_t +osmtest_get_node_rec_by_lid( IN osmtest_t * const p_osmt, + IN ib_net16_t const lid, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_node_record_t record; + ib_mad_t *p_mad; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_node_rec_by_lid ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_node_rec_by_lid: " + "Getting node record for LID 0x%02X\n", + cl_ntoh16( lid ) ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.lid = lid; + + p_context->p_osmt = p_osmt; + user.comp_mask = IB_NR_COMPMASK_LID; + user.attr_id = IB_MAD_ATTR_NODE_RECORD; + user.attr_offset = cl_ntoh16( ( uint16_t ) ( sizeof( record ) >> 3 ) ); + user.p_attr = &record; + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_node_rec_by_lid: ERR 0073: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_node_rec_by_lid: ERR 0074: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + p_mad = osm_madw_get_mad_ptr( p_context->result.p_result_madw ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_node_rec_by_lid: " + "Remote error = %s\n", + ib_get_mad_status_str( p_mad )); + + status = (ib_net16_t) (p_mad->status & IB_SMP_STATUS_MASK ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_get_path_rec_by_guid_pair( IN osmtest_t * const p_osmt, + IN ib_net64_t sguid, + IN ib_net64_t dguid, + IN osmtest_req_context_t *p_context) +{ + cl_status_t status = IB_SUCCESS; + osmv_query_req_t req; + osmv_guid_pair_t guid_pair; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_path_rec_by_guid_pair); + + memset( &req, 0, sizeof( req ) ); + memset( p_context, 0, sizeof( *p_context ) ); + + p_context->p_osmt = p_osmt; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + + req.query_type = OSMV_QUERY_PATH_REC_BY_PORT_GUIDS; + + guid_pair.dest_guid = dguid; + guid_pair.src_guid = sguid; + + req.p_query_input = &guid_pair; + req.sm_key = 0; + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_path_rec_by_guid_pair: " + "Query for path from 0x%" PRIx64 " to 0x%" PRIx64"\n", + sguid, dguid ); + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_guid_pair: ERR 0063: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = (*p_context).result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_guid_pair: ERR 0066: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_guid_pair: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( (*p_context).result.p_result_madw ) ) ); + } + goto Exit; + } + + Exit: + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_get_path_rec_by_gid_pair( IN osmtest_t * const p_osmt, + IN ib_gid_t sgid, + IN ib_gid_t dgid, + IN osmtest_req_context_t *p_context) +{ + cl_status_t status = IB_SUCCESS; + osmv_query_req_t req; + osmv_gid_pair_t gid_pair; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_path_rec_by_gid_pair); + + memset( &req, 0, sizeof( req ) ); + memset( p_context, 0, sizeof( *p_context ) ); + + p_context->p_osmt = p_osmt; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + + req.query_type = OSMV_QUERY_PATH_REC_BY_GIDS; + + gid_pair.dest_gid = dgid; + gid_pair.src_gid = sgid; + + req.p_query_input = &gid_pair; + req.sm_key = 0; + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_path_rec_by_gid_pair: " + "Query for path from 0x%016" PRIx64 " 0x%016" PRIx64 " to 0x%016" PRIx64 " 0x%016" PRIx64"\n", + sgid.unicast.prefix, sgid.unicast.interface_id, + dgid.unicast.prefix, dgid.unicast.interface_id ); + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_gid_pair: ERR 006A: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = (*p_context).result.status; + + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_gid_pair: ERR 006B: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_gid_pair: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( (*p_context).result.p_result_madw ) ) ); + } + goto Exit; + } + + Exit: + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_get_multipath_rec( IN osmtest_t * const p_osmt, + IN osmv_multipath_req_t *p_request, + IN osmtest_req_context_t *p_context) +{ + cl_status_t status = IB_SUCCESS; + osmv_query_req_t req; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_multipath_rec ); + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + + p_context->p_osmt = p_osmt; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + + req.query_type = OSMV_QUERY_MULTIPATH_REC; + + req.p_query_input = p_request; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: ERR 0068: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: ERR 0069: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( p_context->result.p_result_madw ) ) ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} +#endif + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_get_port_rec( IN osmtest_t * const p_osmt, + IN ib_net16_t const lid, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_portinfo_record_t record; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_port_rec ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_get_port_rec: " + "Getting PortInfoRecord for port with LID 0x%X\n", + cl_ntoh16( lid ) ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.lid = lid; + + p_context->p_osmt = p_osmt; + user.comp_mask = IB_PIR_COMPMASK_LID; + user.attr_id = IB_MAD_ATTR_PORTINFO_RECORD; + user.attr_offset = cl_ntoh16( ( uint16_t ) ( sizeof( record ) >> 3 ) ); + user.p_attr = &record; + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_port_rec: ERR 0075: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_port_rec: ERR 0076: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_port_rec: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( p_context->result.p_result_madw ) ) ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_get_port_rec_by_num( IN osmtest_t * const p_osmt, + IN ib_net16_t const lid, + IN uint8_t const port_num, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_portinfo_record_t record; + ib_mad_t *p_mad; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_port_rec_by_num ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_get_port_rec_by_num: " + "Getting PortInfoRecord for port with LID 0x%X Num:0x%X\n", + cl_ntoh16( lid ), + port_num); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.lid = lid; + record.port_num = port_num; + user.p_attr = &record; + + p_context->p_osmt = p_osmt; + + req.query_type = OSMV_QUERY_PORT_REC_BY_LID_AND_NUM; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_port_rec_by_num: ERR 0077: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_port_rec_by_num: ERR 0078: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + p_mad = osm_madw_get_mad_ptr( p_context->result.p_result_madw ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_port_rec_by_num: " + "Remote error = %s\n", + ib_get_mad_status_str( p_mad )); + status = (ib_net16_t) (p_mad->status & IB_SMP_STATUS_MASK ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_stress_port_recs_large( IN osmtest_t * const p_osmt, + OUT uint32_t * const p_num_recs, + OUT uint32_t * const p_num_queries ) +{ + osmtest_req_context_t context; + ib_portinfo_record_t *p_rec; + uint32_t i; + cl_status_t status; + uint32_t num_recs = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_stress_port_recs_large ); + + memset( &context, 0, sizeof( context ) ); + /* + * Do a blocking query for all PortInfoRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_PORTINFO_RECORD, + sizeof( *p_rec ), &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_stress_port_recs_large: ERR 0006: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Populate the database with the received records. + */ + num_recs = context.result.result_cnt; + *p_num_recs += num_recs; + ++*p_num_queries; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_stress_port_recs_large: " + "Received %u records\n", num_recs ); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_portinfo_rec( context.result.p_result_madw, i ); + osm_dump_portinfo_record( &p_osmt->log, p_rec, OSM_LOG_VERBOSE ); + } + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_stress_node_recs_large( IN osmtest_t * const p_osmt, + OUT uint32_t * const p_num_recs, + OUT uint32_t * const p_num_queries ) +{ + osmtest_req_context_t context; + ib_node_record_t *p_rec; + uint32_t i; + cl_status_t status; + uint32_t num_recs = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_stress_node_recs_large ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all NodeRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_NODE_RECORD, + sizeof( *p_rec ), &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_stress_node_recs_large: ERR 0007: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Populate the database with the received records. + */ + num_recs = context.result.result_cnt; + *p_num_recs += num_recs; + ++*p_num_queries; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_stress_node_recs_large: " + "Received %u records\n", num_recs ); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_node_rec( context.result.p_result_madw, i ); + osm_dump_node_record( &p_osmt->log, p_rec, OSM_LOG_VERBOSE ); + } + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_stress_path_recs_large( IN osmtest_t * const p_osmt, + OUT uint32_t * const p_num_recs, + OUT uint32_t * const p_num_queries ) +{ + osmtest_req_context_t context; + ib_path_rec_t *p_rec; + uint32_t i; + cl_status_t status; + uint32_t num_recs = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_stress_path_recs_large ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all PathRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_PATH_RECORD, + sizeof( *p_rec ), &context ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_stress_path_recs_large: ERR 0008: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Populate the database with the received records. + */ + num_recs = context.result.result_cnt; + *p_num_recs += num_recs; + ++*p_num_queries; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_stress_path_recs_large: " + "Received %u records\n", num_recs ); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_path_rec( context.result.p_result_madw, i ); + osm_dump_path_record( &p_osmt->log, p_rec, OSM_LOG_VERBOSE ); + } + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_stress_path_recs_by_guid ( IN osmtest_t * const p_osmt, + OUT uint32_t * const p_num_recs, + OUT uint32_t * const p_num_queries ) +{ + osmtest_req_context_t context; + ib_path_rec_t *p_rec; + uint32_t i; + cl_status_t status = IB_SUCCESS; + uint32_t num_recs = 0; + node_t *p_src_node, *p_dst_node; + cl_qmap_t *p_tbl; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_stress_path_recs_by_guid ); + + memset( &context, 0, sizeof( context ) ); + + context.p_osmt = p_osmt; + + p_tbl = &p_osmt->exp_subn.node_guid_tbl; + + p_src_node = ( node_t * ) cl_qmap_head( p_tbl ); + + /* + * Go over all nodes that exist in the subnet + * for each pair that are not switch nodes get the path record + */ + while( p_src_node != ( node_t * ) cl_qmap_end( p_tbl ) ) + { + p_dst_node = ( node_t * ) cl_qmap_head( p_tbl ); + + while( p_dst_node != ( node_t * ) cl_qmap_end( p_tbl ) ) + { + /* + * Do a blocking query for CA to CA Path Record + */ + osm_log(&p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_stress_path_recs_by_guid:" + "Source : guid = 0x%" PRIx64 " type = %d" + "Target : guid = 0x%" PRIx64 " type = %d\n", + cl_ntoh64(p_src_node->rec.node_info.port_guid), + p_src_node->rec.node_info.node_type, + cl_ntoh64(p_dst_node->rec.node_info.port_guid), + p_dst_node->rec.node_info.node_type); + + if (p_src_node->rec.node_info.node_type == IB_NODE_TYPE_CA && + p_dst_node->rec.node_info.node_type == IB_NODE_TYPE_CA) + { + status = osmtest_get_path_rec_by_guid_pair(p_osmt, + p_src_node->rec.node_info.port_guid, + p_dst_node->rec.node_info.port_guid, + &context); + + /* In a case of TIMEOUT you still can try sending but cant count, maybe its a temporary issue */ + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_stress_path_recs_by_guid: ERR 0009: " + "osmtest_get_path_rec_by_guid_pair failed (%s)\n", + ib_get_err_str( status ) ); + if (status != IB_TIMEOUT) + goto Exit; + } + else + { + /* we might have received several records */ + num_recs = context.result.result_cnt; + /* + * Populate the database with the received records. + */ + *p_num_recs += num_recs; + ++*p_num_queries; + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_stress_path_recs_by_guid: " + "Received %u records\n", num_recs ); + /* Dont waste time if not VERBOSE and above */ + if (p_osmt->log.level & OSM_LOG_VERBOSE) + { + for (i = 0; i < num_recs; i++) + { + p_rec = osmv_get_query_path_rec( context.result.p_result_madw, i); + osm_dump_path_record(&p_osmt->log,p_rec,OSM_LOG_VERBOSE); + } + } + } + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + } + /* next one please */ + p_dst_node = ( node_t * ) cl_qmap_next( &p_dst_node->map_item ); + } + + p_src_node = ( node_t * ) cl_qmap_next( &p_src_node->map_item ); + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_stress_port_recs_small( IN osmtest_t * const p_osmt, + OUT uint32_t * const p_num_recs, + OUT uint32_t * const p_num_queries ) +{ + osmtest_req_context_t context; + ib_portinfo_record_t *p_rec; + uint32_t i; + cl_status_t status; + uint32_t num_recs = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_stress_port_recs_small ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for our own PortInfoRecord in the subnet. + */ + status = osmtest_get_port_rec( p_osmt, + cl_ntoh16(p_osmt->local_port.lid), + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_stress_port_recs_small: ERR 0010: " + "osmtest_get_port_rec failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Populate the database with the received records. + */ + num_recs = context.result.result_cnt; + *p_num_recs += num_recs; + ++*p_num_queries; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_stress_port_recs_small: " + "Received %u records\n", num_recs ); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_portinfo_rec( context.result.p_result_madw, i ); + osm_dump_portinfo_record( &p_osmt->log, p_rec, OSM_LOG_VERBOSE ); + } + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_get_local_port_lmc( IN osmtest_t * const p_osmt, + IN ib_net16_t lid, + OUT uint8_t * const p_lmc ) +{ + osmtest_req_context_t context; + ib_portinfo_record_t *p_rec; + uint32_t i; + cl_status_t status; + uint32_t num_recs = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_local_port_lmc ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for our own PortInfoRecord in the subnet. + */ + status = osmtest_get_port_rec( p_osmt, + cl_ntoh16( lid ), + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_local_port_lmc: ERR 001A: " + "osmtest_get_port_rec failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + num_recs = context.result.result_cnt; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_local_port_lmc: " + "Received %u records\n", num_recs ); + } + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_portinfo_rec( context.result.p_result_madw, i ); + osm_dump_portinfo_record( &p_osmt->log, p_rec, OSM_LOG_VERBOSE ); + if ( p_lmc) + { + *p_lmc = ib_port_info_get_lmc( &p_rec->port_info ); + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_get_local_port_lmc: " + "LMC %d\n", *p_lmc ); + } + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * Use a wrong SM_Key in a simple port query and report success if + * failed. + **********************************************************************/ +ib_api_status_t +osmtest_wrong_sm_key_ignored( IN osmtest_t * const p_osmt) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_portinfo_record_t record; + osmtest_req_context_t context; + osmtest_req_context_t *p_context = &context; + uint8_t port_num = 1; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_wrong_sm_key_ignored ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_INFO ) ) + { + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmtest_wrong_sm_key_ignored: " + "Trying PortInfoRecord for port with LID 0x%X Num:0x%X\n", + p_osmt->local_port.sm_lid, + port_num ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.lid = p_osmt->local_port.sm_lid; + record.port_num = port_num; + user.p_attr = &record; + + p_context->p_osmt = p_osmt; + + req.query_type = OSMV_QUERY_PORT_REC_BY_LID_AND_NUM; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 9999; + context.result.p_result_madw = NULL; + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_wrong_sm_key_ignored: " EXPECTING_ERRORS_START "\n" ); + status = osmv_query_sa( p_osmt->h_bind, &req ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_wrong_sm_key_ignored: " EXPECTING_ERRORS_END "\n" ); + + /* since we use a wrong sm_key we should get a timeout */ + if( status != IB_TIMEOUT ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_wrong_sm_key_ignored: ERR 0011: " + "Did not get a timeout but got (%s)\n", ib_get_err_str( status ) ); + if ( status == IB_SUCCESS ) + { + /* assign some error value to status, since IB_SUCCESS is a bad rc */ + status = IB_ERROR; + } + goto Exit; + } + else + { + status = IB_SUCCESS; + } + + Exit: + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_write_port_info( IN osmtest_t * const p_osmt, + IN FILE * fh, + IN const ib_portinfo_record_t * const p_rec ) +{ + int result; + cl_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_port_info ); + + result = fprintf( fh, + "DEFINE_PORT\n" + "lid 0x%X\n" + "port_num 0x%X\n" + "m_key 0x%016" PRIx64 "\n" + "subnet_prefix 0x%016" PRIx64 "\n" + "base_lid 0x%X\n" + "master_sm_base_lid 0x%X\n" + "capability_mask 0x%X\n" + "diag_code 0x%X\n" + "m_key_lease_period 0x%X\n" + "local_port_num 0x%X\n" + "link_width_enabled 0x%X\n" + "link_width_supported 0x%X\n" + "link_width_active 0x%X\n" + "link_speed_supported 0x%X\n" + "port_state %s\n" + "state_info2 0x%X\n" + "mpb 0x%X\n" + "lmc 0x%X\n" + "link_speed 0x%X\n" + "mtu_smsl 0x%X\n" + "vl_cap 0x%X\n" + "vl_high_limit 0x%X\n" + "vl_arb_high_cap 0x%X\n" + "vl_arb_low_cap 0x%X\n" + "mtu_cap 0x%X\n" + "vl_stall_life 0x%X\n" + "vl_enforce 0x%X\n" + "m_key_violations 0x%X\n" + "p_key_violations 0x%X\n" + "q_key_violations 0x%X\n" + "guid_cap 0x%X\n" + "subnet_timeout 0x%X\n" + "resp_time_value 0x%X\n" + "error_threshold 0x%X\n" + "END\n\n", + cl_ntoh16( p_rec->lid ), + p_rec->port_num, + cl_ntoh64( p_rec->port_info.m_key ), + cl_ntoh64( p_rec->port_info.subnet_prefix ), + cl_ntoh16( p_rec->port_info.base_lid ), + cl_ntoh16( p_rec->port_info.master_sm_base_lid ), + cl_ntoh32( p_rec->port_info.capability_mask ), + cl_ntoh16( p_rec->port_info.diag_code ), + cl_ntoh16( p_rec->port_info.m_key_lease_period ), + p_rec->port_info.local_port_num, + p_rec->port_info.link_width_enabled, + p_rec->port_info.link_width_supported, + p_rec->port_info.link_width_active, + ib_port_info_get_link_speed_sup( &p_rec->port_info ), + ib_get_port_state_str( ib_port_info_get_port_state + ( &p_rec->port_info ) ), + p_rec->port_info.state_info2, + ib_port_info_get_mpb( &p_rec->port_info ), + ib_port_info_get_lmc( &p_rec->port_info ), + p_rec->port_info.link_speed, p_rec->port_info.mtu_smsl, + p_rec->port_info.vl_cap, p_rec->port_info.vl_high_limit, + p_rec->port_info.vl_arb_high_cap, + p_rec->port_info.vl_arb_low_cap, + p_rec->port_info.mtu_cap, + p_rec->port_info.vl_stall_life, + p_rec->port_info.vl_enforce, + cl_ntoh16( p_rec->port_info.m_key_violations ), + cl_ntoh16( p_rec->port_info.p_key_violations ), + cl_ntoh16( p_rec->port_info.q_key_violations ), + p_rec->port_info.guid_cap, + ib_port_info_get_timeout(&p_rec->port_info), + p_rec->port_info.resp_time_value, + p_rec->port_info.error_threshold ); + + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_port_info: ERR 0161: " "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_write_path_info( IN osmtest_t * const p_osmt, + IN FILE * fh, + IN const ib_path_rec_t * const p_rec ) +{ + int result; + cl_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_path_info ); + + result = fprintf( fh, + "DEFINE_PATH\n" + "dgid 0x%016" PRIx64 " 0x%016" PRIx64 + "\nsgid 0x%016" PRIx64 " 0x%016" PRIx64 + "\ndlid 0x%X\n" + "slid 0x%X\n" + "# hop_flow_raw 0x%X\n" + "# tclass 0x%X\n" + "# num_path 0x%X\n" + "pkey 0x%X\n" + "# sl 0x%X\n" + "# mtu 0x%X\n" + "# rate 0x%X\n" + "# pkt_life 0x%X\n" + "# preference 0x%X\n" "END\n\n", + cl_ntoh64( p_rec->dgid.unicast.prefix ), + cl_ntoh64( p_rec->dgid.unicast.interface_id ), + cl_ntoh64( p_rec->sgid.unicast.prefix ), + cl_ntoh64( p_rec->sgid.unicast.interface_id ), + cl_ntoh16( p_rec->dlid ), cl_ntoh16( p_rec->slid ), + cl_ntoh32( p_rec->hop_flow_raw ), p_rec->tclass, + p_rec->num_path, cl_ntoh16( p_rec->pkey ), p_rec->sl, + p_rec->mtu, p_rec->rate, p_rec->pkt_life, + p_rec->preference ); + + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_path_info: ERR 0162: " "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_write_node_info( IN osmtest_t * const p_osmt, + IN FILE * fh, + IN const ib_node_record_t * const p_rec ) +{ + int result; + cl_status_t status = IB_SUCCESS; + char desc[IB_NODE_DESCRIPTION_SIZE + 1]; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_node_info ); + + memcpy(desc, p_rec->node_desc.description, IB_NODE_DESCRIPTION_SIZE); + desc[IB_NODE_DESCRIPTION_SIZE] = '\0'; + + result = fprintf( fh, + "DEFINE_NODE\n" + "lid 0x%X\n" + "base_version 0x%X\n" + "class_version 0x%X\n" + "node_type 0x%X # (%s)\n" + "num_ports 0x%X\n" + "sys_guid 0x%016" PRIx64 "\n" + "node_guid 0x%016" PRIx64 "\n" + "port_guid 0x%016" PRIx64 "\n" + "partition_cap 0x%X\n" + "device_id 0x%X\n" + "revision 0x%X\n" + "# port_num 0x%X\n" + "# vendor_id 0x%X\n" + "# node_desc %s\n" + "END\n\n", + cl_ntoh16( p_rec->lid ), + p_rec->node_info.base_version, + p_rec->node_info.class_version, + p_rec->node_info.node_type, + ib_get_node_type_str( p_rec->node_info.node_type ), + p_rec->node_info.num_ports, + cl_ntoh64( p_rec->node_info.sys_guid ), + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh64( p_rec->node_info.port_guid ), + cl_ntoh16( p_rec->node_info.partition_cap ), + cl_ntoh16( p_rec->node_info.device_id ), + cl_ntoh32( p_rec->node_info.revision ), + ib_node_info_get_local_port_num( &p_rec->node_info ), + cl_ntoh32( ib_node_info_get_vendor_id + ( &p_rec->node_info ) ), + desc ); + + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_node_info: ERR 0163: " "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_write_link( IN osmtest_t * const p_osmt, + IN FILE * fh, + IN const ib_link_record_t * const p_rec ) +{ + int result; + cl_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_link ); + + result = fprintf( fh, + "DEFINE_LINK\n" + "from_lid 0x%X\n" + "from_port_num 0x%X\n" + "to_port_num 0x%X\n" + "to_lid 0x%X\n" + "END\n\n", + cl_ntoh16( p_rec->from_lid ), + p_rec->from_port_num, + p_rec->to_port_num, cl_ntoh16( p_rec->to_lid ) ); + + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_link: ERR 0164: " "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_write_all_link_recs( IN osmtest_t * const p_osmt, + IN FILE * fh ) +{ + osmtest_req_context_t context; + const ib_link_record_t *p_rec; + uint32_t i; + cl_status_t status; + size_t num_recs; + int result; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_all_link_recs ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all NodeRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_LINK_RECORD, + sizeof( *p_rec ), &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_link_recs: ERR 0165: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Write the received records out to the file. + */ + num_recs = context.result.result_cnt; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_write_all_link_recs: " + "Received %zu records\n", num_recs ); + } + + result = fprintf( fh, "#\n" "# Link Records\n" "#\n" ); + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_link_recs: ERR 0166: " + "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + for( i = 0; i < num_recs; i++ ) + { + p_rec = ( ib_link_record_t * ) osmv_get_query_result( context.result. + p_result_madw, i ); + + osmtest_write_link( p_osmt, fh, p_rec ); + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_get_path_rec_by_lid_pair( IN osmtest_t * const p_osmt, + IN ib_net16_t slid, + IN ib_net16_t dlid, + IN osmtest_req_context_t *p_context ) +{ + cl_status_t status = IB_SUCCESS; + osmv_query_req_t req; + osmv_lid_pair_t lid_pair; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_path_rec_by_lid_pair ); + + memset( &req, 0, sizeof( req ) ); + memset( p_context, 0, sizeof( *p_context ) ); + + p_context->p_osmt = p_osmt; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + + req.query_type = OSMV_QUERY_PATH_REC_BY_LIDS; + + lid_pair.dest_lid = dlid; + lid_pair.src_lid = slid; + + req.p_query_input = &lid_pair; + req.sm_key = 0; + + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_path_rec_by_lid_pair: " + "Query for path from 0x%X to 0x%X\n", + slid,dlid ); + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_lid_pair: ERR 0053: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = (*p_context).result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_lid_pair: ERR 0067: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_lid_pair: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( (*p_context).result.p_result_madw ) ) ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +#ifdef VENDOR_RMPP_SUPPORT +/********************************************************************** + * ASSUMES RMPP + **********************************************************************/ +static ib_api_status_t +osmtest_write_all_node_recs( IN osmtest_t * const p_osmt, + IN FILE * fh ) +{ + osmtest_req_context_t context; + const ib_node_record_t *p_rec; + uint32_t i; + cl_status_t status; + size_t num_recs; + int result; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_all_node_recs ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all NodeRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_NODE_RECORD, + sizeof( *p_rec ), &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_node_recs: ERR 0022: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Write the received records out to the file. + */ + num_recs = context.result.result_cnt; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_write_all_node_recs: " + "Received %zu records\n", num_recs ); + } + + result = fprintf( fh, "#\n" "# Node Records\n" "#\n" ); + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_node_recs: ERR 0023: " + "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_node_rec( context.result.p_result_madw, i ); + osmtest_write_node_info( p_osmt, fh, p_rec ); + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * ASSUMES RMPP + **********************************************************************/ +static ib_api_status_t +osmtest_write_all_port_recs( IN osmtest_t * const p_osmt, + IN FILE * fh ) +{ + osmtest_req_context_t context; + const ib_portinfo_record_t *p_rec; + uint32_t i; + cl_status_t status; + size_t num_recs; + int result; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_all_port_recs ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all NodeRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_PORTINFO_RECORD, + sizeof( *p_rec ), &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_port_recs: ERR 0167: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Write the received records out to the file. + */ + num_recs = context.result.result_cnt; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_write_all_port_recs: " + "Received %zu records\n", num_recs ); + } + + result = fprintf( fh, "#\n" "# PortInfo Records\n" "#\n" ); + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_port_recs: ERR 0024: " "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_portinfo_rec( context.result.p_result_madw, i ); + osmtest_write_port_info( p_osmt, fh, p_rec ); + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * ASSUMES RMPP + **********************************************************************/ +static ib_api_status_t +osmtest_write_all_path_recs( + IN osmtest_t * const p_osmt, + IN FILE * fh ) +{ + osmtest_req_context_t context; + const ib_path_rec_t *p_rec; + uint32_t i; + cl_status_t status; + size_t num_recs; + int result; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_all_path_recs ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all PathRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_PATH_RECORD, + sizeof( *p_rec ), &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_path_recs: ERR 0025: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Write the received records out to the file. + */ + num_recs = context.result.result_cnt; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_write_all_path_recs: " + "Received %zu records\n", num_recs ); + } + + result = fprintf( fh, "#\n" "# Path Records\n" "#\n" ); + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_path_recs: ERR 0026: " + "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_path_rec( context.result.p_result_madw, i ); + osmtest_write_path_info( p_osmt, fh, p_rec ); + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +#else +/* + * NON RMPP BASED QUERY FOR ALL NODES: BASED ON THE MAX LID GIVEN BY THE USER + */ +static ib_api_status_t +osmtest_write_all_node_recs( + IN osmtest_t * const p_osmt, + IN FILE * fh ) +{ + osmtest_req_context_t context; + node_t *p_node; + node_t *p_guid_node; + const ib_node_record_t *p_rec; + cl_status_t status = CL_SUCCESS; + int result; + uint16_t lid; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_all_node_recs ); + + result = fprintf( fh, "#\n" "# Node Records\n" "#\n" ); + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_node_recs: ERR 0027: " + "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + /* + * Go over all LIDs in the range 1 to max_lid and do a + * NodeRecord query by that lid. + */ + for (lid = 1; lid <= p_osmt->max_lid; lid++) + { + /* prepare the query context */ + memset( &context, 0, sizeof( context ) ); + + status = osmtest_get_node_rec_by_lid( p_osmt, cl_ntoh16( lid ), &context ); + if( status != IB_SUCCESS ) + { + if ( status != IB_SA_MAD_STATUS_NO_RECORDS ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_write_all_node_recs: ERR 0028: " + "failed to get node info for LID:0x%02X (%s)\n", + cl_ntoh16( lid ), + ib_get_err_str( status ) ); + goto Exit; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_write_all_node_recs: WRN 0121: " + "failed to get node info for LID:0x%02X (%s)\n", + cl_ntoh16( lid ), + ib_get_err_str( status ) ); + status = IB_SUCCESS; + } + } + else + { + /* OK we got something */ + p_rec = osmv_get_query_node_rec( context.result.p_result_madw, 0 ); + osmtest_write_node_info( p_osmt, fh, p_rec ); + + /* create a subnet object */ + p_node = node_new( ); + CL_ASSERT( p_node != NULL ); + + /* copy the info to the subnet node object */ + p_node->rec = *p_rec; + + cl_qmap_insert( &p_osmt->exp_subn.node_lid_tbl, + p_node->rec.lid, &p_node->map_item ); + + p_guid_node = node_new( ); + CL_ASSERT( p_guid_node != NULL ); + + *p_guid_node = *p_node; + + cl_qmap_insert( &p_osmt->exp_subn.node_guid_tbl, + p_guid_node->rec.node_info.node_guid, + &p_guid_node->map_item ); + + } + + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + } + + Exit: + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/* + * GET ALL PORT RECORDS IN THE FABRIC - + * one by one by using the node info received + */ +static ib_api_status_t +osmtest_write_all_port_recs( IN osmtest_t * const p_osmt, + IN FILE * fh ) +{ + osmtest_req_context_t context; + const ib_node_record_t *p_node_rec; + const ib_portinfo_record_t *p_rec; + uint8_t port_num; + cl_status_t status = CL_SUCCESS; + cl_qmap_t *p_tbl; + node_t *p_node; + port_t *p_port; + int result; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_all_port_recs ); + + memset( &context, 0, sizeof( context ) ); + + /* print header */ + result = fprintf( fh, "#\n" "# PortInfo Records\n" "#\n" ); + if( result < 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_port_recs: ERR 0029: " "Write failed\n" ); + status = IB_ERROR; + goto Exit; + } + + /* use the pre-explored set of nodes */ + p_tbl = &p_osmt->exp_subn.node_lid_tbl; + p_node = ( node_t * ) cl_qmap_head( p_tbl ); + + /* + * Go over all LIDs in the range 1 to max_lid and do a + * NodeRecord query by that lid. + */ + while( p_node != ( node_t * ) cl_qmap_end( p_tbl ) ) + { + + p_node_rec = &(p_node->rec); + + /* go through all ports of the node: */ + for (port_num = 0; port_num <= p_node_rec->node_info.num_ports; port_num++) + { + /* prepare the query context */ + memset( &context, 0, sizeof( context ) ); + + status = osmtest_get_port_rec_by_num( p_osmt, + p_node_rec->lid, + port_num, + &context ); + if( status != IB_SUCCESS ) + { + if( status != IB_SA_MAD_STATUS_NO_RECORDS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_port_recs: WRN 0122: " + "Error encountered getting port info for LID:0x%04X Num:0x%02X (%s)\n", + p_node_rec->lid, port_num, + ib_get_err_str( status ) ); + goto Exit; + } + else + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_write_all_port_recs: WRN 0123: " + "failed to get port info for LID:0x%04X Num:0x%02X (%s)\n", + p_node_rec->lid, port_num, + ib_get_err_str( status ) ); + status = IB_SUCCESS; + } + } + else + { + /* OK we got something */ + p_rec = osmv_get_query_portinfo_rec( context.result.p_result_madw, 0 ); + osmtest_write_port_info( p_osmt, fh, p_rec ); + + /* create a subnet object */ + p_port = port_new( ); + CL_ASSERT( p_port != NULL ); + + /* copy the info to the subnet node object */ + p_port->rec = *p_rec; + + cl_qmap_insert( &p_osmt->exp_subn.port_key_tbl, + port_gen_id(p_node_rec->lid, port_num), &p_port->map_item ); + } + + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + } + p_node = ( node_t * ) cl_qmap_next( &p_node->map_item ); + } + + /* we must set the exist status to avoid abort of the over all algorith */ + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * ASSUMES NO RMPP + **********************************************************************/ +static ib_api_status_t +osmtest_write_all_path_recs( IN osmtest_t * const p_osmt, + IN FILE * fh ) +{ + osmtest_req_context_t context; + const ib_path_rec_t *p_rec; + cl_status_t status = CL_SUCCESS; + int num_recs, i; + cl_qmap_t *p_tbl; + node_t *p_src_node, *p_dst_node; + ib_api_status_t got_status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_write_all_path_recs ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Go over all nodes that exist in the subnet + * for each pair that are not switch nodes get the path record + */ + + context.p_osmt = p_osmt; + + p_tbl = &p_osmt->exp_subn.node_lid_tbl; + + p_src_node = ( node_t * ) cl_qmap_head( p_tbl ); + + while( p_src_node != ( node_t * ) cl_qmap_end( p_tbl ) ) + { + /* HACK we use capability_mask to know diff a CA node from switch node */ + /* if(p_src_node->rec.node_info.capability_mask ) { */ + p_dst_node = ( node_t * ) cl_qmap_head( p_tbl ); + + while( p_dst_node != ( node_t * ) cl_qmap_end( p_tbl ) ) + { + /* HACK we use capability_mask to know diff a CA node from switch node */ + /* if (p_dst_node->rec.node_info.capability_mask) { */ + + /* query for it: */ + status = osmtest_get_path_rec_by_lid_pair( p_osmt, + p_src_node->rec.lid, + p_dst_node->rec.lid, + &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_write_all_path_recs: ERR 012D: " + "failed to get path info from LID:0x%X To LID:0x%X (%s)\n", + p_src_node->rec.lid, p_dst_node->rec.lid, + ib_get_err_str( status ) ); + /* remember the first error status */ + got_status = ( got_status == IB_SUCCESS ) ? status : got_status; + } + else + { + /* we might have received several records */ + num_recs = context.result.result_cnt; + for (i = 0; i < num_recs; i++) + { + p_rec = osmv_get_query_path_rec( context.result.p_result_madw, i ); + osmtest_write_path_info( p_osmt, fh, p_rec ); + } + } +/* } */ + + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + /* next one please */ + p_dst_node = ( node_t * ) cl_qmap_next( &p_dst_node->map_item ); + } +/* } */ + + p_src_node = ( node_t * ) cl_qmap_next( &p_src_node->map_item ); + } + + if( got_status != IB_SUCCESS ) + status = got_status; + + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +#endif + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_create_inventory_file( IN osmtest_t * const p_osmt ) +{ + FILE *fh; + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_create_inventory_file ); + + fh = fopen( p_osmt->opt.file_name, "w" ); + if( fh == NULL ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_create_inventory_file: ERR 0079: " + "Unable to open inventory file (%s)\n", + p_osmt->opt.file_name ); + status = IB_ERROR; + goto Exit; + } + + /* HACK: the order is important: nodes ports paths */ + status = osmtest_write_all_node_recs( p_osmt, fh ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osmtest_write_all_port_recs( p_osmt, fh ); + if( status != IB_SUCCESS ) + goto Exit; + + if (! p_osmt->opt.ignore_path_records) + { + status = osmtest_write_all_path_recs( p_osmt, fh ); + if( status != IB_SUCCESS ) + goto Exit; + } + + status = osmtest_write_all_link_recs( p_osmt, fh ); + if( status != IB_SUCCESS ) + goto Exit; + + fclose( fh ); + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_stress_large_rmpp_pr( IN osmtest_t * const p_osmt ) +{ + ib_api_status_t status = IB_SUCCESS; + uint64_t num_recs = 0; + uint64_t num_queries = 0; + uint32_t delta_recs; + uint32_t delta_queries; + uint32_t print_freq = 0; + struct timeval start_tv,end_tv; + long sec_diff,usec_diff; + float ratio; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_stress_large_rmpp_pr ); + gettimeofday( &start_tv, NULL ); + printf("-I- Start time is : %09ld:%06ld [sec:usec]\n", start_tv.tv_sec, (long)start_tv.tv_usec); + + while( num_queries < STRESS_LARGE_PR_RMPP_THR ) + { + delta_recs = 0; + delta_queries = 0; + + status = osmtest_stress_path_recs_by_guid( p_osmt, &delta_recs, + &delta_queries ); + if( status != IB_SUCCESS ) + goto Exit; + + num_recs += delta_recs; + num_queries += delta_queries; + + print_freq += delta_recs; + if( print_freq > 10000 ) + { + gettimeofday( &end_tv, NULL ); + if (end_tv.tv_usec > start_tv.tv_usec) + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec; + usec_diff = end_tv.tv_usec-start_tv.tv_usec; + } + else + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec - 1; + usec_diff = 1000000 - (start_tv.tv_usec - end_tv.tv_usec); + } + printf("-I- End time is : %09ld:%06ld [sec:usec]\n", + end_tv.tv_sec, (long)end_tv.tv_usec); + printf("-I- Querying %" PRId64 " Path Record queries CA to CA (rmpp)\n\ttook %04ld:%06ld [sec:usec]\n", + num_queries, sec_diff, usec_diff); + if (num_recs == 0) + ratio = 0; + else + ratio = ((float)num_queries / (float)num_recs); + printf( "-I- Queries to Record Ratio is %" PRIu64 " records, %" PRIu64 " queries : %.2f \n", + num_recs, num_queries, ratio); + print_freq = 0; + } + } + + Exit: + gettimeofday( &end_tv, NULL ); + printf("-I- End time is : %09ld:%06ld [sec:usec]\n", + end_tv.tv_sec, (long)end_tv.tv_usec); + if (end_tv.tv_usec > start_tv.tv_usec) + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec; + usec_diff = end_tv.tv_usec-start_tv.tv_usec; + } + else + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec - 1; + usec_diff = 1000000 - (start_tv.tv_usec - end_tv.tv_usec); + } + + printf("-I- Querying %" PRId64 " Path Record queries (rmpp) took %04ld:%06ld [sec:usec]\n", + num_queries, sec_diff, usec_diff); + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_stress_large_rmpp( IN osmtest_t * const p_osmt ) +{ + ib_api_status_t status = IB_SUCCESS; + uint64_t num_recs = 0; + uint64_t num_queries = 0; + uint32_t delta_recs; + uint32_t delta_queries; + uint32_t print_freq = 0; + struct timeval start_tv,end_tv; + long sec_diff,usec_diff; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_stress_large_rmpp ); + gettimeofday( &start_tv, NULL ); + printf("-I- Start time is : %09ld:%06ld [sec:usec]\n", start_tv.tv_sec, (long)start_tv.tv_usec); + + while( num_queries < STRESS_LARGE_RMPP_THR ) + { + delta_recs = 0; + delta_queries = 0; + + status = osmtest_stress_node_recs_large( p_osmt, &delta_recs, + &delta_queries ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osmtest_stress_path_recs_large( p_osmt, &delta_recs, + &delta_queries ); + if( status != IB_SUCCESS ) + goto Exit; + + status = osmtest_stress_port_recs_large( p_osmt, &delta_recs, + &delta_queries ); + if( status != IB_SUCCESS ) + goto Exit; + + num_recs += delta_recs; + num_queries += delta_queries; + + print_freq += delta_recs; + + if( print_freq > 100000 ) + { + gettimeofday( &end_tv, NULL ); + if (end_tv.tv_usec > start_tv.tv_usec) + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec; + usec_diff = end_tv.tv_usec-start_tv.tv_usec; + } + else + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec - 1; + usec_diff = 1000000 - (start_tv.tv_usec - end_tv.tv_usec); + } + printf("-I- End time is : %09ld:%06ld [sec:usec]\n", + end_tv.tv_sec, (long)end_tv.tv_usec); + printf("-I- Querying %" PRId64 " large mixed queries (rmpp) took %04ld:%06ld [sec:usec]\n", + num_queries, sec_diff, usec_diff); + printf("%" PRIu64 " records, %" PRIu64 " queries\n", + num_recs, num_queries); + print_freq = 0; + } + } + + Exit: + gettimeofday( &end_tv, NULL ); + printf("-I- End time is : %09ld:%06ld [sec:usec]\n", + end_tv.tv_sec, (long)end_tv.tv_usec); + if (end_tv.tv_usec > start_tv.tv_usec) + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec; + usec_diff = end_tv.tv_usec-start_tv.tv_usec; + } + else + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec - 1; + usec_diff = 1000000 - (start_tv.tv_usec - end_tv.tv_usec); + } + + printf("-I- Querying %" PRId64 " large mixed queries (rmpp) took %04ld:%06ld [sec:usec]\n", + num_queries, sec_diff, usec_diff); + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_stress_small_rmpp( IN osmtest_t * const p_osmt ) +{ + ib_api_status_t status = IB_SUCCESS; + uint64_t num_recs = 0; + uint64_t num_queries = 0; + uint32_t delta_recs; + uint32_t delta_queries; + uint32_t print_freq = 0; + int num_timeouts = 0; + struct timeval start_tv,end_tv; + long sec_diff,usec_diff; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_stress_small_rmpp ); + gettimeofday( &start_tv, NULL ); + printf("-I- Start time is : %09ld:%06ld [sec:usec]\n", + start_tv.tv_sec, (long)start_tv.tv_usec); + + while( (num_queries < STRESS_SMALL_RMPP_THR) && (num_timeouts < 100) ) + { + delta_recs = 0; + delta_queries = 0; + + status = osmtest_stress_port_recs_small( p_osmt, &delta_recs, + &delta_queries ); + if( status != IB_SUCCESS ) + goto Exit; + + num_recs += delta_recs; + num_queries += delta_queries; + + print_freq += delta_recs; + if( print_freq > 5000 ) + { + gettimeofday( &end_tv, NULL ); + printf( "%" PRIu64 " records, %" PRIu64 " queries\n", + num_recs, num_queries ); + if (end_tv.tv_usec > start_tv.tv_usec) + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec; + usec_diff = end_tv.tv_usec-start_tv.tv_usec; + } + else + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec - 1; + usec_diff = 1000000 - (start_tv.tv_usec - end_tv.tv_usec); + } + printf("-I- End time is : %09ld:%06ld [sec:usec]\n", + end_tv.tv_sec, (long)end_tv.tv_usec); + printf("-I- Querying %" PRId64 " port_info queries (single mad) took %04ld:%06ld [sec:usec]\n", + num_queries, sec_diff, usec_diff); + print_freq = 0; + } + } + + Exit: + gettimeofday( &end_tv, NULL ); + printf("-I- End time is : %09ld:%06ld [sec:usec]\n", + end_tv.tv_sec, (long)end_tv.tv_usec); + if (end_tv.tv_usec > start_tv.tv_usec) + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec; + usec_diff = end_tv.tv_usec-start_tv.tv_usec; + } + else + { + sec_diff = end_tv.tv_sec-start_tv.tv_sec - 1; + usec_diff = 1000000 - (start_tv.tv_usec - end_tv.tv_usec); + } + + printf("-I- Querying %" PRId64 " port_info queries (single mad) took %04ld:%06ld [sec:usec]\n", + num_queries, sec_diff, usec_diff); + if (num_timeouts > 50) + { + status = IB_TIMEOUT; + } + /* Exit: */ + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static void +osmtest_prepare_db_generic( IN osmtest_t * const p_osmt, + IN cl_qmap_t * const p_tbl ) +{ + generic_t *p_generic; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_prepare_db_generic ); + + p_generic = ( generic_t * ) cl_qmap_head( p_tbl ); + + while( p_generic != ( generic_t * ) cl_qmap_end( p_tbl ) ) + { + p_generic->count = 0; + p_generic = ( generic_t * ) cl_qmap_next( &p_generic->map_item ); + } + + OSM_LOG_EXIT( &p_osmt->log ); +} + +/********************************************************************** + **********************************************************************/ +static void +osmtest_prepare_db( IN osmtest_t * const p_osmt ) +{ + OSM_LOG_ENTER( &p_osmt->log, osmtest_prepare_db ); + + osmtest_prepare_db_generic( p_osmt, &p_osmt->exp_subn.node_lid_tbl ); + osmtest_prepare_db_generic( p_osmt, &p_osmt->exp_subn.path_tbl ); + + OSM_LOG_EXIT( &p_osmt->log ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_check_missing_nodes( IN osmtest_t * const p_osmt ) +{ + const node_t *p_node; + cl_status_t status = IB_SUCCESS; + cl_qmap_t *p_tbl; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_check_missing_nodes ); + + p_tbl = &p_osmt->exp_subn.node_lid_tbl; + + p_node = ( node_t * ) cl_qmap_head( p_tbl ); + + while( p_node != ( node_t * ) cl_qmap_end( p_tbl ) ) + { + if( p_node->count == 0 ) + { + /* + * This node was not reported by the SA + */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_check_missing_nodes: ERR 0080: " + "Missing node 0x%016" PRIx64 "\n", + cl_ntoh64( p_node->rec.node_info.node_guid ) ); + status = IB_ERROR; + } + + p_node = ( node_t * ) cl_qmap_next( &p_node->map_item ); + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_check_missing_ports( IN osmtest_t * const p_osmt ) +{ + const port_t *p_port; + cl_status_t status = IB_SUCCESS; + cl_qmap_t *p_tbl; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_check_missing_ports ); + + p_tbl = &p_osmt->exp_subn.port_key_tbl; + + p_port = ( port_t * ) cl_qmap_head( p_tbl ); + + while( p_port != ( port_t * ) cl_qmap_end( p_tbl ) ) + { + if( p_port->count == 0 ) + { + /* + * This port was not reported by the SA + */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_check_missing_ports: ERR 0081: " + "Missing port LID:0x%X Num:0x%X\n", + cl_ntoh16( p_port->rec.lid), p_port->rec.port_num); + status = IB_ERROR; + } + + p_port = ( port_t * ) cl_qmap_next( &p_port->map_item ); + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_check_missing_paths( IN osmtest_t * const p_osmt ) +{ + const path_t *p_path; + cl_status_t status = IB_SUCCESS; + cl_qmap_t *p_tbl; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_check_missing_paths ); + + p_tbl = &p_osmt->exp_subn.path_tbl; + + p_path = ( path_t * ) cl_qmap_head( p_tbl ); + + while( p_path != ( path_t * ) cl_qmap_end( p_tbl ) ) + { + if( p_path->count == 0 ) + { + /* + * This path was not reported by the SA + */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_check_missing_paths: ERR 0051: " + "SA did not return path SLID 0x%X to DLID 0x%X\n", + cl_ntoh16( p_path->rec.slid ), + cl_ntoh16( p_path->rec.dlid ) ); + status = IB_ERROR; + goto Exit; + } + + p_path = ( path_t * ) cl_qmap_next( &p_path->map_item ); + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +inline uint32_t +osmtest_path_rec_key_get( IN const ib_path_rec_t * const p_rec ) +{ + return ( p_rec->dlid << 16 | p_rec->slid ); +} + +/********************************************************************** + **********************************************************************/ +static boolean_t +osmtest_path_rec_kay_is_valid( IN osmtest_t * const p_osmt, + IN const path_t * const p_path ) +{ + if( ( p_path->comp.dlid == 0 ) || ( p_path->comp.slid == 0 ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_path_rec_kay_is_valid: ERR 0168: " + "SLID and DLID must be specified for defined paths\n" ); + return ( FALSE ); + } + + return ( TRUE ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_path_data( IN osmtest_t * const p_osmt, + IN path_t * const p_path, + IN const ib_path_rec_t * const p_rec ) +{ + cl_status_t status = IB_SUCCESS; + uint8_t lmc = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_path_data ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_path_data: " + "Checking path SLID 0x%X to DLID 0x%X\n", + cl_ntoh16( p_rec->slid ), cl_ntoh16( p_rec->dlid ) ); + } + + status = osmtest_get_local_port_lmc( p_osmt, p_osmt->local_port.lid, &lmc ); + if (status != IB_SUCCESS) + goto Exit; + + /* HACK: Assume uniform LMC across endports in the subnet */ + /* This is the only LMC mode which OpenSM currently supports */ + /* In absence of this assumption, validation of this is much more complicated */ + if ( lmc == 0 ) + { + /* + * Has this record already been returned? + */ + if( p_path->count != 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_path_data: ERR 0056: " + "Already received path SLID 0x%X to DLID 0x%X\n", + cl_ntoh16( p_rec->slid ), cl_ntoh16( p_rec->dlid ) ); + status = IB_ERROR; + goto Exit; + } + } + else + { + /* Also, this doesn't detect fewer than the correct number of paths being returned */ + if ( p_path->count >= (uint32_t)( 1 << (2*lmc) ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_path_data: ERR 0052: " + "Already received path SLID 0x%X to DLID 0x%X count %d LMC %d\n", + cl_ntoh16( p_rec->slid ), cl_ntoh16( p_rec->dlid ), + p_path->count, lmc ); + status = IB_ERROR; + goto Exit; + } + } + + ++p_path->count; + + /* + * Check the fields the user wants checked. + */ + if( ( p_path->comp.dgid.unicast.interface_id & + p_path->rec.dgid.unicast.interface_id ) != + ( p_path->comp.dgid.unicast.interface_id & + p_rec->dgid.unicast.interface_id ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_path_data: ERR 0169: " + "DGID mismatch on path SLID 0x%X to DLID 0x%X\n" + "\t\t\t\tExpected 0x%016" PRIx64 " 0x%016" PRIx64 "\n" + "\t\t\t\tReceived 0x%016" PRIx64 " 0x%016" PRIx64 "\n", + cl_ntoh16( p_path->rec.slid ), + cl_ntoh16( p_path->rec.dlid ), + cl_ntoh64( p_path->rec.dgid.unicast.prefix ), + cl_ntoh64( p_path->rec.dgid.unicast.interface_id ), + cl_ntoh64( p_rec->dgid.unicast.prefix ), + cl_ntoh64( p_rec->dgid.unicast.interface_id ) ); + status = IB_ERROR; + goto Exit; + } + + /* + * Check the fields the user wants checked. + */ + if( ( p_path->comp.sgid.unicast.interface_id & + p_path->rec.sgid.unicast.interface_id ) != + ( p_path->comp.sgid.unicast.interface_id & + p_rec->sgid.unicast.interface_id ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_path_data: ERR 0057: " + "SGID mismatch on path SLID 0x%X to DLID 0x%X\n" + "\t\t\t\tExpected 0x%016" PRIx64 " 0x%016" PRIx64 ",\n" + "\t\t\t\tReceived 0x%016" PRIx64 " 0x%016" PRIx64 ".\n", + cl_ntoh16( p_path->rec.slid ), + cl_ntoh16( p_path->rec.dlid ), + cl_ntoh64( p_path->rec.sgid.unicast.prefix ), + cl_ntoh64( p_path->rec.sgid.unicast.interface_id ), + cl_ntoh64( p_rec->sgid.unicast.prefix ), + cl_ntoh64( p_rec->sgid.unicast.interface_id ) ); + status = IB_ERROR; + goto Exit; + } + + /* + * Compare the fields the user wishes to validate. + */ + if( ( p_path->comp.pkey & p_path->rec.pkey ) != + ( p_path->comp.pkey & p_rec->pkey ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_path_data: ERR 0012: " + "PKEY mismatch on path SLID 0x%X to DLID 0x%X\n" + "\t\t\t\tExpected 0x%X, received 0x%X\n", + cl_ntoh16( p_path->rec.slid ), + cl_ntoh16( p_path->rec.dlid ), + cl_ntoh16( p_path->rec.pkey ), + cl_ntoh16( p_rec->pkey ) ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_node_data( IN osmtest_t * const p_osmt, + IN node_t * const p_node, + IN const ib_node_record_t * const p_rec ) +{ + cl_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_node_data ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: " + "Checking node 0x%016" PRIx64 ", LID 0x%X\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ) ); + } + + /* + * Has this record already been returned? + */ + if( p_node->count != 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0013: " + "Already received node 0x%016" PRIx64 "\n", + cl_ntoh64( p_node->rec.node_info.node_guid ) ); + status = IB_ERROR; + goto Exit; + } + + ++p_node->count; + + /* + * Compare the fields the user wishes to validate. + */ + if( ( p_node->comp.lid & p_node->rec.lid ) != + ( p_node->comp.lid & p_rec->lid ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0014: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected LID 0x%X, received 0x%X\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), p_node->rec.lid, p_rec->lid ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_node->comp.node_info.base_version & + p_node->rec.node_info.base_version ) != + ( p_node->comp.node_info.base_version & + p_rec->node_info.base_version ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0015: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected base_version 0x%X, received 0x%X\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), + p_node->rec.node_info.base_version, + p_rec->node_info.base_version ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_node->comp.node_info.class_version & + p_node->rec.node_info.class_version ) != + ( p_node->comp.node_info.class_version & + p_rec->node_info.class_version ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0016: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected class_version 0x%X, received 0x%X\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), + p_node->rec.node_info.class_version, + p_rec->node_info.class_version ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_node->comp.node_info.node_type & + p_node->rec.node_info.node_type ) != + ( p_node->comp.node_info.node_type & p_rec->node_info.node_type ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0017: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected node_type 0x%X, received 0x%X\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), + p_node->rec.node_info.node_type, + p_rec->node_info.node_type ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_node->comp.node_info.sys_guid & + p_node->rec.node_info.sys_guid ) != + ( p_node->comp.node_info.sys_guid & p_rec->node_info.sys_guid ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0018: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected sys_guid 0x%016" PRIx64 + ", received 0x%016" PRIx64 "\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), + cl_ntoh64( p_node->rec.node_info.sys_guid ), + cl_ntoh64( p_rec->node_info.sys_guid ) ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_node->comp.node_info.node_guid & + p_node->rec.node_info.node_guid ) != + ( p_node->comp.node_info.node_guid & p_rec->node_info.node_guid ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0019: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected node_guid 0x%016" PRIx64 + ", received 0x%016" PRIx64 "\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), + cl_ntoh64( p_node->rec.node_info.node_guid ), + cl_ntoh64( p_rec->node_info.node_guid ) ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_node->comp.node_info.port_guid & + p_node->rec.node_info.port_guid ) != + ( p_node->comp.node_info.port_guid & p_rec->node_info.port_guid ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0031: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected port_guid 0x%016" PRIx64 + ", received 0x%016" PRIx64 "\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), + cl_ntoh64( p_node->rec.node_info.port_guid ), + cl_ntoh64( p_rec->node_info.port_guid ) ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_node->comp.node_info.partition_cap & + p_node->rec.node_info.partition_cap ) != + ( p_node->comp.node_info.partition_cap & + p_rec->node_info.partition_cap ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0032: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected partition_cap 0x%X" + ", received 0x%X\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), + cl_ntoh16( p_node->rec.node_info.partition_cap ), + cl_ntoh16( p_rec->node_info.partition_cap ) ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_node->comp.node_info.device_id & + p_node->rec.node_info.device_id ) != + ( p_node->comp.node_info.device_id & p_rec->node_info.device_id ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0033: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected device_id 0x%X" + ", received 0x%X\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), + cl_ntoh16( p_node->rec.node_info.device_id ), + cl_ntoh16( p_rec->node_info.device_id ) ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_node->comp.node_info.revision & + p_node->rec.node_info.revision ) != + ( p_node->comp.node_info.revision & p_rec->node_info.revision ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_data: ERR 0034: " + "Field mismatch node 0x%016" PRIx64 ", LID 0x%X\n" + "\t\t\t\tExpected revision 0x%X" + ", received 0x%X\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ), + cl_ntoh32( p_node->rec.node_info.revision ), + cl_ntoh32( p_rec->node_info.revision ) ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_node_rec( IN osmtest_t * const p_osmt, + IN const ib_node_record_t * const p_rec ) +{ + cl_status_t status = IB_SUCCESS; + node_t *p_node; + const cl_qmap_t *p_tbl; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_node_rec ); + + /* + * Find proper node record in the database. + */ + p_tbl = &p_osmt->exp_subn.node_lid_tbl; + p_node = ( node_t * ) cl_qmap_get( p_tbl, p_rec->lid ); + if( p_node == ( node_t * ) cl_qmap_end( p_tbl ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_node_rec: ERR 0035: " + "Unexpected node 0x%016" PRIx64 ", LID 0x%X\n", + cl_ntoh64( p_rec->node_info.node_guid ), + cl_ntoh16( p_rec->lid ) ); + status = IB_ERROR; + goto Exit; + } + + status = osmtest_validate_node_data( p_osmt, p_node, p_rec ); + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_port_data( IN osmtest_t * const p_osmt, + IN port_t * const p_port, + IN const ib_portinfo_record_t * const p_rec ) +{ + cl_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_port_data ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: " + "Checking port LID 0x%X, Num 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num); + } + + /* + * Has this record already been returned? + */ + if( p_port->count != 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0036: " + "Already received port LID 0x%X, Num 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num); + status = IB_ERROR; + goto Exit; + } + + ++p_port->count; + + /* + * Compare the fields the user wishes to validate. + */ + if( ( p_port->comp.lid & p_port->rec.lid ) != + ( p_port->comp.lid & p_rec->lid ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0037: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected LID 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.lid, p_rec->lid ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_num & p_port->rec.port_num ) != + ( p_port->comp.port_num & p_rec->port_num ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0038: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected port_num 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_num, p_rec->port_num ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.m_key & p_port->rec.port_info.m_key ) != + ( p_port->comp.port_info.m_key & p_rec->port_info.m_key ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0039: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected m_key 0x%016" PRIx64 ", received 0x%016" PRIx64 "\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.m_key, p_rec->port_info.m_key ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.subnet_prefix & p_port->rec.port_info.subnet_prefix ) != + ( p_port->comp.port_info.subnet_prefix & p_rec->port_info.subnet_prefix ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0040: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected subnet_prefix 0x%016" PRIx64 ", received 0x%016" PRIx64 "\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.subnet_prefix, p_rec->port_info.subnet_prefix ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.base_lid & p_port->rec.port_info.base_lid ) != + ( p_port->comp.port_info.base_lid & p_rec->port_info.base_lid ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0041: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected base_lid 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.base_lid, p_rec->port_info.base_lid ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.master_sm_base_lid & p_port->rec.port_info.master_sm_base_lid ) != + ( p_port->comp.port_info.master_sm_base_lid & p_rec->port_info.master_sm_base_lid ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0042: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected master_sm_base_lid 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.master_sm_base_lid, p_rec->port_info.master_sm_base_lid ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.capability_mask & p_port->rec.port_info.capability_mask ) != + ( p_port->comp.port_info.capability_mask & p_rec->port_info.capability_mask ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0043: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected capability_mask 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.capability_mask, p_rec->port_info.capability_mask ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.diag_code & p_port->rec.port_info.diag_code ) != + ( p_port->comp.port_info.diag_code & p_rec->port_info.diag_code ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0044: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected diag_code 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.diag_code, p_rec->port_info.diag_code ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.m_key_lease_period & p_port->rec.port_info.m_key_lease_period ) != + ( p_port->comp.port_info.m_key_lease_period & p_rec->port_info.m_key_lease_period ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0045: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected m_key_lease_period 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.m_key_lease_period, p_rec->port_info.m_key_lease_period ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.local_port_num & p_port->rec.port_info.local_port_num ) != + ( p_port->comp.port_info.local_port_num & p_rec->port_info.local_port_num ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0046: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected local_port_num 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.local_port_num, p_rec->port_info.local_port_num ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.link_width_enabled & p_port->rec.port_info.link_width_enabled ) != + ( p_port->comp.port_info.link_width_enabled & p_rec->port_info.link_width_enabled ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0047: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected link_width_enabled 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.link_width_enabled, p_rec->port_info.link_width_enabled ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.link_width_supported & p_port->rec.port_info.link_width_supported ) != + ( p_port->comp.port_info.link_width_supported & p_rec->port_info.link_width_supported ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0048: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected link_width_supported 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.link_width_supported, p_rec->port_info.link_width_supported ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.link_width_active & p_port->rec.port_info.link_width_active ) != + ( p_port->comp.port_info.link_width_active & p_rec->port_info.link_width_active ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0049: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected link_width_active 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.link_width_active, p_rec->port_info.link_width_active ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.link_speed & p_port->rec.port_info.link_speed ) != + ( p_port->comp.port_info.link_speed & p_rec->port_info.link_speed ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0054: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected link_speed 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.link_speed, p_rec->port_info.link_speed ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.state_info1 & p_port->rec.port_info.state_info1 ) != + ( p_port->comp.port_info.state_info1 & p_rec->port_info.state_info1 ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0055: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected state_info1 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.state_info1, p_rec->port_info.state_info1 ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.state_info2 & p_port->rec.port_info.state_info2 ) != + ( p_port->comp.port_info.state_info2 & p_rec->port_info.state_info2 ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0058: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected state_info2 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.state_info2, p_rec->port_info.state_info2 ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.mkey_lmc & p_port->rec.port_info.mkey_lmc ) != + ( p_port->comp.port_info.mkey_lmc & p_rec->port_info.mkey_lmc ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0059: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected mkey_lmc 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.mkey_lmc, p_rec->port_info.mkey_lmc ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.link_speed & p_port->rec.port_info.link_speed ) != + ( p_port->comp.port_info.link_speed & p_rec->port_info.link_speed ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0060: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected link_speed 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.link_speed, p_rec->port_info.link_speed ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.mtu_smsl & p_port->rec.port_info.mtu_smsl ) != + ( p_port->comp.port_info.mtu_smsl & p_rec->port_info.mtu_smsl ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0061: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected mtu_smsl 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.mtu_smsl, p_rec->port_info.mtu_smsl ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.vl_cap & p_port->rec.port_info.vl_cap ) != + ( p_port->comp.port_info.vl_cap & p_rec->port_info.vl_cap ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0062: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected vl_cap 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.vl_cap, p_rec->port_info.vl_cap ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.vl_high_limit & p_port->rec.port_info.vl_high_limit ) != + ( p_port->comp.port_info.vl_high_limit & p_rec->port_info.vl_high_limit ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0082: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected vl_high_limit 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.vl_high_limit, p_rec->port_info.vl_high_limit ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.vl_arb_high_cap & p_port->rec.port_info.vl_arb_high_cap ) != + ( p_port->comp.port_info.vl_arb_high_cap & p_rec->port_info.vl_arb_high_cap ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0083: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected vl_arb_high_cap 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.vl_arb_high_cap, p_rec->port_info.vl_arb_high_cap ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.vl_arb_low_cap & p_port->rec.port_info.vl_arb_low_cap ) != + ( p_port->comp.port_info.vl_arb_low_cap & p_rec->port_info.vl_arb_low_cap ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0084: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected vl_arb_low_cap 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.vl_arb_low_cap, p_rec->port_info.vl_arb_low_cap ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.mtu_cap & p_port->rec.port_info.mtu_cap ) != + ( p_port->comp.port_info.mtu_cap & p_rec->port_info.mtu_cap ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0085: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected mtu_cap 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.mtu_cap, p_rec->port_info.mtu_cap ); + status = IB_ERROR; + goto Exit; + } + +#if 0 + /* this is a dynamic attribute */ + if( ( p_port->comp.port_info.vl_stall_life & p_port->rec.port_info.vl_stall_life ) != + ( p_port->comp.port_info.vl_stall_life & p_rec->port_info.vl_stall_life ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 012F: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected vl_stall_life 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.vl_stall_life, + p_rec->port_info.vl_stall_life ); + status = IB_ERROR; + goto Exit; + } +#endif + + if( ( p_port->comp.port_info.vl_enforce & p_port->rec.port_info.vl_enforce ) != + ( p_port->comp.port_info.vl_enforce & p_rec->port_info.vl_enforce ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0086: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected vl_enforce 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.vl_enforce, p_rec->port_info.vl_enforce ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.m_key_violations & p_port->rec.port_info.m_key_violations ) != + ( p_port->comp.port_info.m_key_violations & p_rec->port_info.m_key_violations ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0087: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected m_key_violations 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + cl_ntoh16( p_port->rec.port_info.m_key_violations ), + cl_ntoh16( p_rec->port_info.m_key_violations ) ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.p_key_violations & p_port->rec.port_info.p_key_violations ) != + ( p_port->comp.port_info.p_key_violations & p_rec->port_info.p_key_violations ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0088: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected p_key_violations 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + cl_ntoh16( p_port->rec.port_info.p_key_violations ), + cl_ntoh16( p_rec->port_info.p_key_violations ) ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.q_key_violations & p_port->rec.port_info.q_key_violations ) != + ( p_port->comp.port_info.q_key_violations & p_rec->port_info.q_key_violations ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0089: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected q_key_violations 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + cl_ntoh16( p_port->rec.port_info.q_key_violations ), + cl_ntoh16( p_rec->port_info.q_key_violations ) ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.guid_cap & p_port->rec.port_info.guid_cap ) != + ( p_port->comp.port_info.guid_cap & p_rec->port_info.guid_cap ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0090: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected guid_cap 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.guid_cap, p_rec->port_info.guid_cap ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.subnet_timeout & p_port->rec.port_info.subnet_timeout ) != + ( p_port->comp.port_info.subnet_timeout & p_rec->port_info.subnet_timeout ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0091: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected subnet_timeout 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + ib_port_info_get_timeout(&p_port->rec.port_info), + ib_port_info_get_timeout(&p_rec->port_info) ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.resp_time_value & p_port->rec.port_info.resp_time_value ) != + ( p_port->comp.port_info.resp_time_value & p_rec->port_info.resp_time_value ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0092: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected resp_time_value 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.resp_time_value, + p_rec->port_info.resp_time_value ); + status = IB_ERROR; + goto Exit; + } + + if( ( p_port->comp.port_info.error_threshold & p_port->rec.port_info.error_threshold ) != + ( p_port->comp.port_info.error_threshold & p_rec->port_info.error_threshold ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_data: ERR 0093: " + "Field mismatch port LID 0x%X Num:0x%X\n" + "\t\t\t\tExpected error_threshold 0x%X, received 0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num, + p_port->rec.port_info.error_threshold, + p_rec->port_info.error_threshold ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_port_rec( IN osmtest_t * const p_osmt, + IN const ib_portinfo_record_t * const p_rec ) +{ + cl_status_t status = IB_SUCCESS; + port_t *p_port; + const cl_qmap_t *p_tbl; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_port_rec ); + + /* + * Find proper port record in the database. + * (we use by guid - since lid is not unique) + */ + p_tbl = &p_osmt->exp_subn.port_key_tbl; + p_port = ( port_t * ) cl_qmap_get( p_tbl, port_gen_id( p_rec->lid, p_rec->port_num)); + if( p_port == ( port_t * ) cl_qmap_end( p_tbl ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_port_rec: ERR 0094: " + "Unexpected port LID 0x%X, Num:0x%X\n", + cl_ntoh16( p_rec->lid ), p_rec->port_num ); + status = IB_ERROR; + goto Exit; + } + + status = osmtest_validate_port_data( p_osmt, p_port, p_rec ); + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_path_rec( IN osmtest_t * const p_osmt, + IN const ib_path_rec_t * const p_rec ) +{ + cl_status_t status = IB_SUCCESS; + path_t *p_path; + const cl_qmap_t *p_tbl; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_path_rec ); + + /* + * Find proper path record in the database. + */ + p_tbl = &p_osmt->exp_subn.path_tbl; + p_path = + ( path_t * ) cl_qmap_get( p_tbl, osmtest_path_rec_key_get( p_rec ) ); + if( p_path == ( path_t * ) cl_qmap_end( p_tbl ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_path_rec: ERR 0095: " + "Unexpected path SLID 0x%X to DLID 0x%X\n", + cl_ntoh16( p_rec->slid ), cl_ntoh16( p_rec->dlid ) ); + status = IB_ERROR; + goto Exit; + } + + status = osmtest_validate_path_data( p_osmt, p_path, p_rec ); + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +#ifdef VENDOR_RMPP_SUPPORT +ib_net64_t portguid = 0; + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_all_node_recs( IN osmtest_t * const p_osmt ) +{ + osmtest_req_context_t context; + const ib_node_record_t *p_rec; + uint32_t i; + cl_status_t status; + size_t num_recs; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_all_node_recs ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all NodeRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_NODE_RECORD, + sizeof( *p_rec ), &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_all_node_recs: ERR 0096: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + num_recs = context.result.result_cnt; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_all_node_recs: " + "Received %zu records\n", num_recs ); + } + + /* + * Compare the received records to the database. + */ + osmtest_prepare_db( p_osmt ); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_node_rec( context.result.p_result_madw, i ); + + status = osmtest_validate_node_rec( p_osmt, p_rec ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_all_node_recs: ERR 0097: " + "osmtest_valid_node_rec failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + if (!portguid) + portguid = p_rec->node_info.port_guid; + } + + status = osmtest_check_missing_nodes( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_all_node_recs: ERR 0098: " + "osmtest_check_missing_nodes failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_all_guidinfo_recs( IN osmtest_t * const p_osmt ) +{ + osmtest_req_context_t context; + const ib_guidinfo_record_t *p_rec; + cl_status_t status; + size_t num_recs; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_all_guidinfo_recs ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all GuidInfoRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_GUIDINFO_RECORD, + sizeof( *p_rec ), &context ); + + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_all_guidinfo_recs: ERR 0099: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + num_recs = context.result.result_cnt; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_all_guidinfo_recs: " + "Received %zu records\n", num_recs ); + } + + /* No validation as yet */ + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_all_path_recs( IN osmtest_t * const p_osmt ) +{ + osmtest_req_context_t context; + const ib_path_rec_t *p_rec; + uint32_t i; + cl_status_t status; + size_t num_recs; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_all_path_recs ); + + memset( &context, 0, sizeof( context ) ); + + /* + * Do a blocking query for all PathRecords in the subnet. + */ + status = osmtest_get_all_recs( p_osmt, IB_MAD_ATTR_PATH_RECORD, + sizeof( *p_rec ), &context ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_all_path_recs: ERR 009A: " + "osmtest_get_all_recs failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + num_recs = context.result.result_cnt; + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_all_path_recs: " + "Received %zu records\n", num_recs ); + } + + /* + * Compare the received records to the database. + */ + osmtest_prepare_db( p_osmt ); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_path_rec( context.result.p_result_madw, i ); + + status = osmtest_validate_path_rec( p_osmt, p_rec ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_all_path_recs: ERR 0100: " + "osmtest_validate_path_rec failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + + status = osmtest_check_missing_paths( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_all_path_recs: ERR 0101: " + "osmtest_check_missing_paths failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * Get link record by LID + **********************************************************************/ +ib_api_status_t +osmtest_get_link_rec_by_lid( IN osmtest_t * const p_osmt, + IN ib_net16_t const from_lid, + IN ib_net16_t const to_lid, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_link_record_t record; + ib_mad_t *p_mad; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_link_rec_by_lid ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_link_rec_by_lid: " + "Getting link record from LID 0x%02X to LID 0x%02X\n", + cl_ntoh16( from_lid ), cl_ntoh16( to_lid ) ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.from_lid = from_lid; + record.to_lid = to_lid; + p_context->p_osmt = p_osmt; + if (from_lid) + user.comp_mask |= IB_LR_COMPMASK_FROM_LID; + if (to_lid) + user.comp_mask |= IB_LR_COMPMASK_TO_LID; + user.attr_id = IB_MAD_ATTR_LINK_RECORD; + user.attr_offset = cl_ntoh16( ( uint16_t ) ( sizeof( record ) >> 3 ) ); + user.p_attr = &record; + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_link_rec_by_lid: ERR 007A: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_link_rec_by_lid: ERR 007B: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + p_mad = osm_madw_get_mad_ptr( p_context->result.p_result_madw ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_link_rec_by_lid: " + "Remote error = %s\n", + ib_get_mad_status_str( p_mad )); + + status = (ib_net16_t) (p_mad->status & IB_SMP_STATUS_MASK ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * Get GUIDInfo record by LID + **********************************************************************/ +ib_api_status_t +osmtest_get_guidinfo_rec_by_lid( IN osmtest_t * const p_osmt, + IN ib_net16_t const lid, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_guidinfo_record_t record; + ib_mad_t *p_mad; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_guidinfo_rec_by_lid ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_guidinfo_rec_by_lid: " + "Getting GUIDInfo record for LID 0x%02X\n", + cl_ntoh16( lid ) ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.lid = lid; + p_context->p_osmt = p_osmt; + user.comp_mask = IB_GIR_COMPMASK_LID; + user.attr_id = IB_MAD_ATTR_GUIDINFO_RECORD; + user.attr_offset = cl_ntoh16( ( uint16_t ) ( sizeof( record ) >> 3 ) ); + user.p_attr = &record; + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_guidinfo_rec_by_lid: ERR 007C: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_guidinfo_rec_by_lid: ERR 007D: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + p_mad = osm_madw_get_mad_ptr( p_context->result.p_result_madw ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_guidinfo_rec_by_lid: " + "Remote error = %s\n", + ib_get_mad_status_str( p_mad )); + + status = (ib_net16_t) (p_mad->status & IB_SMP_STATUS_MASK ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * Get PKeyTable record by LID + **********************************************************************/ +ib_api_status_t +osmtest_get_pkeytbl_rec_by_lid( IN osmtest_t * const p_osmt, + IN ib_net16_t const lid, + IN ib_net64_t const sm_key, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_pkey_table_record_t record; + ib_mad_t *p_mad; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_pkeytbl_rec_by_lid ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_pkeytbl_rec_by_lid: " + "Getting PKeyTable record for LID 0x%02X\n", + cl_ntoh16( lid ) ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.lid = lid; + p_context->p_osmt = p_osmt; + user.comp_mask = IB_PKEY_COMPMASK_LID; + user.attr_id = IB_MAD_ATTR_PKEY_TBL_RECORD; + user.attr_offset = cl_ntoh16( ( uint16_t ) ( sizeof( record ) >> 3 ) ); + user.p_attr = &record; + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = sm_key; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_pkeytbl_rec_by_lid: ERR 007E: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_pkeytbl_rec_by_lid: ERR 007F: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + p_mad = osm_madw_get_mad_ptr( p_context->result.p_result_madw ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_pkeytbl_rec_by_lid: " + "Remote error = %s\n", + ib_get_mad_status_str( p_mad )); + + status = (ib_net16_t) (p_mad->status & IB_SMP_STATUS_MASK ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + * Get LFT record by LID + **********************************************************************/ +ib_api_status_t +osmtest_get_lft_rec_by_lid( IN osmtest_t * const p_osmt, + IN ib_net16_t const lid, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_lft_record_t record; + ib_mad_t *p_mad; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_lft_rec_by_lid ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_get_lft_rec_by_lid: " + "Getting LFT record for LID 0x%02X\n", + cl_ntoh16( lid ) ); + } + + /* + * Do a blocking query for this record in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.lid = lid; + p_context->p_osmt = p_osmt; + user.comp_mask = IB_LFTR_COMPMASK_LID; + user.attr_id = IB_MAD_ATTR_LFT_RECORD; + user.attr_offset = cl_ntoh16( ( uint16_t ) ( sizeof( record ) >> 3 ) ); + user.p_attr = &record; + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_lft_rec_by_lid: ERR 008A: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_lft_rec_by_lid: ERR 008B: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + p_mad = osm_madw_get_mad_ptr( p_context->result.p_result_madw ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_lft_rec_by_lid: " + "Remote error = %s\n", + ib_get_mad_status_str( p_mad )); + + status = (ib_net16_t) (p_mad->status & IB_SMP_STATUS_MASK ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_sminfo_record_request( + IN osmtest_t * const p_osmt, + IN OUT osmtest_req_context_t * const p_context ) +{ + ib_api_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_sminfo_record_t record; + ib_mad_t *p_mad; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_sminfo_record_request ); + + /* + * Do a blocking query for these records in the subnet. + * The result is returned in the result field of the caller's + * context structure. + * + * The query structures are locals. + */ + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + p_context->p_osmt = p_osmt; + user.attr_id = IB_MAD_ATTR_SMINFO_RECORD; + user.attr_offset = cl_ntoh16( ( uint16_t ) ( sizeof( record ) >> 3 ) ); + user.p_attr = &record; + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = p_context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_sminfo_record_request: ERR 008C: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = p_context->result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_sminfo_record_request: ERR 008D: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + if( status == IB_REMOTE_ERROR ) + { + p_mad = osm_madw_get_mad_ptr( p_context->result.p_result_madw ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_sminfo_record_request: " + "Remote error = %s\n", + ib_get_mad_status_str( p_mad )); + + status = (ib_net16_t) (p_mad->status & IB_SMP_STATUS_MASK ); + } + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} +#endif + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_single_path_rec_lid_pair( IN osmtest_t * const p_osmt, + IN path_t * const p_path ) +{ + osmtest_req_context_t context; + const ib_path_rec_t *p_rec; + cl_status_t status = IB_SUCCESS; + size_t num_recs; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_single_path_rec_lid_pair ); + + memset( &context, 0, sizeof( context ) ); + + status = osmtest_get_path_rec_by_lid_pair( p_osmt, + p_path->rec.slid, + p_path->rec.dlid, + &context ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_lid_pair: ERR 0102: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + num_recs = context.result.result_cnt; + if( num_recs != 1 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_lid_pair: ERR 0103: " + "Too many records. Expected 1, received %zu\n", num_recs ); + + status = IB_ERROR; + } + else + { + p_rec = osmv_get_query_path_rec( context.result.p_result_madw, 0 ); + + status = osmtest_validate_path_data( p_osmt, p_path, p_rec ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_lid_pair: ERR 0104: " + "osmtest_validate_path_data failed (%s)\n", + ib_get_err_str( status ) ); + } + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_single_node_rec_lid( IN osmtest_t * const p_osmt, + IN ib_net16_t const lid, + IN node_t * const p_node ) +{ + cl_status_t status = IB_SUCCESS; + osmv_user_query_t user; + osmv_query_req_t req; + ib_node_record_t record; + + osmtest_req_context_t context; + const ib_node_record_t *p_rec; + int num_recs, i; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_single_node_rec_lid ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_validate_single_node_rec_lid: " + "Getting NodeRecord for node with LID 0x%X\n", + cl_ntoh16( lid ) ); + } + + memset( &context, 0, sizeof( context ) ); + memset( &req, 0, sizeof( req ) ); + memset( &user, 0, sizeof( user ) ); + memset( &record, 0, sizeof( record ) ); + + record.lid = lid; + + context.p_osmt = p_osmt; + user.comp_mask = IB_NR_COMPMASK_LID; + user.attr_id = IB_MAD_ATTR_NODE_RECORD; + user.attr_offset = cl_ntoh16( (uint16_t) ( sizeof( record ) >> 3 ) ); + user.p_attr = &record; + + req.query_type = OSMV_QUERY_USER_DEFINED; + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + req.p_query_input = &user; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_node_rec_lid: ERR 0105: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_node_rec_lid: ERR 0106: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_node_rec_lid: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result.p_result_madw ) ) ); + } + goto Exit; + } + + num_recs = context.result.result_cnt; + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_single_node_rec_lid: " + "Received %d nodes\n", num_recs); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_node_rec( context.result.p_result_madw, i ); + + status = osmtest_validate_node_rec( p_osmt, p_rec ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_node_rec_lid: ERR 0107: " + "osmtest_validate_node_data failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_single_port_rec_lid( IN osmtest_t * const p_osmt, + IN port_t * const p_port ) +{ + osmtest_req_context_t context; + + const ib_portinfo_record_t *p_rec; + cl_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_single_port_rec_lid ); + + memset( &context, 0, sizeof( context ) ); + + context.p_osmt = p_osmt; + osmtest_get_port_rec_by_num( p_osmt, + p_port->rec.lid, + p_port->rec.port_num, + &context); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_port_rec_lid: ERR 0108: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + goto Exit; + } + + /* we should have got exactly one port */ + p_rec = osmv_get_query_portinfo_rec( context.result.p_result_madw, 0); + status = osmtest_validate_port_rec( p_osmt, p_rec ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_port_rec_lid: ERR 0109: " + "osmtest_validate_port_data failed (%s)\n", + ib_get_err_str( status ) ); + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_single_path_rec_guid_pair( IN osmtest_t * const p_osmt, + IN const osmv_guid_pair_t * + const p_pair ) +{ + osmtest_req_context_t context; + const ib_path_rec_t *p_rec; + cl_status_t status = IB_SUCCESS; + size_t num_recs; + osmv_query_req_t req; + uint32_t i; + boolean_t got_error = FALSE; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_single_path_rec_guid_pair ); + + memset( &req, 0, sizeof( req ) ); + memset( &context, 0, sizeof( context ) ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_guid_pair: " + "\n\t\t\t\tChecking src 0x%016" PRIx64 + " to dest 0x%016" PRIx64 "\n", + cl_ntoh64( p_pair->src_guid ), + cl_ntoh64( p_pair->dest_guid ) ); + } + + context.p_osmt = p_osmt; + + req.timeout_ms = p_osmt->opt.transaction_timeout; + req.retry_cnt = p_osmt->opt.retry_count; + req.flags = OSM_SA_FLAGS_SYNC; + req.query_context = &context; + req.pfn_query_cb = osmtest_query_res_cb; + + req.query_type = OSMV_QUERY_PATH_REC_BY_PORT_GUIDS; + req.p_query_input = p_pair; + req.sm_key = 0; + + status = osmv_query_sa( p_osmt->h_bind, &req ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_guid_pair: ERR 0110: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + goto Exit; + } + + status = context.result.status; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_guid_pair: ERR 0111: " + "ib_query failed (%s)\n", ib_get_err_str( status ) ); + + if( status == IB_REMOTE_ERROR ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_guid_pair: " + "Remote error = %s\n", + ib_get_mad_status_str( osm_madw_get_mad_ptr + ( context.result.p_result_madw ) ) ); + } + goto Exit; + } + + num_recs = context.result.result_cnt; + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_single_path_rec_guid_pair: %zu records\n", + num_recs); + + for( i = 0; i < num_recs; i++ ) + { + p_rec = osmv_get_query_path_rec( context.result.p_result_madw, i ); + + /* + * Make sure the GUID values are correct + */ + if( p_rec->dgid.unicast.interface_id != p_pair->dest_guid ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_guid_pair: ERR 0112: " + "Destination GUID mismatch\n" + "\t\t\t\texpected 0x%016" PRIx64 + ", received 0x%016" PRIx64 "\n", + cl_ntoh64( p_pair->dest_guid ), + cl_ntoh64( p_rec->dgid.unicast.interface_id ) ); + got_error = TRUE; + } + + if( p_rec->sgid.unicast.interface_id != p_pair->src_guid ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_guid_pair: ERR 0113: " + "Source GUID mismatch\n" + "\t\t\t\texpected 0x%016" PRIx64 + ", received 0x%016" PRIx64 ".\n", + cl_ntoh64( p_pair->src_guid ), + cl_ntoh64( p_rec->sgid.unicast.interface_id ) ); + got_error = TRUE; + } + + status = osmtest_validate_path_rec( p_osmt, p_rec ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_rec_guid_pair: ERR 0114: " + "osmtest_validate_path_rec failed (%s)\n", + ib_get_err_str( status ) ); + got_error = TRUE; + } + if( got_error || (status != IB_SUCCESS) ) + { + osm_dump_path_record( &p_osmt->log, p_rec, OSM_LOG_VERBOSE ); + if( status == IB_SUCCESS ) + status = IB_ERROR; + goto Exit; + } + } + + Exit: + /* + * Return the IB query MAD to the pool as necessary. + */ + if( context.result.p_result_madw != NULL ) + { + osm_mad_pool_put( &p_osmt->mad_pool, context.result.p_result_madw ); + context.result.p_result_madw = NULL; + } + + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_single_path_recs( IN osmtest_t * const p_osmt ) +{ + path_t *p_path; + cl_status_t status = IB_SUCCESS; + const cl_qmap_t *p_path_tbl; +/* We skip node to node path record validation since it might contains + NONEXISTENT PATHS, i.e. when using UPDN */ + osmv_guid_pair_t guid_pair; + uint16_t cnt; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_single_path_recs ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_single_path_recs: " + "Validating individual path record queries\n" ); + } + p_path_tbl = &p_osmt->exp_subn.path_tbl; + + osmtest_prepare_db( p_osmt ); + + /* + * Walk the list of all path records, and ask for each one + * specifically. Make sure we get it. + */ + cnt = 0; + p_path = ( path_t * ) cl_qmap_head( p_path_tbl ); + while( p_path != ( path_t * ) cl_qmap_end( p_path_tbl ) ) + { + status = osmtest_validate_single_path_rec_lid_pair( p_osmt, p_path ); + if( status != IB_SUCCESS ) + goto Exit; + cnt++; + p_path = ( path_t * ) cl_qmap_next( &p_path->map_item ); + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_single_path_recs: " + "Total of %u path records validated using LID based query\n", cnt ); + } + + status = osmtest_check_missing_paths( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_recs: ERR 0115: " + "osmtest_check_missing_paths failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + /* + * Do the whole thing again with port GUID pairs. + * Note that multiple path records may be returned + * for each guid pair if LMC > 0. + */ + osmtest_prepare_db( p_osmt ); + cnt = 0; + p_path = ( path_t * ) cl_qmap_head( p_path_tbl ); + while( p_path != ( path_t * ) cl_qmap_end( p_path_tbl ) ) + { + guid_pair.src_guid = p_path->rec.sgid.unicast.interface_id; + guid_pair.dest_guid = p_path->rec.dgid.unicast.interface_id; + status = osmtest_validate_single_path_rec_guid_pair( p_osmt, + &guid_pair ); + if( status != IB_SUCCESS ) + goto Exit; + cnt++; + p_path = ( path_t * ) cl_qmap_next( &p_path->map_item ); + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_single_path_recs: " + "Total of %u path records validated using GUID based query\n", cnt ); + } + + status = osmtest_check_missing_paths( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_path_recs: ERR 0116: " + "osmtest_check_missing_paths failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_single_node_recs( IN osmtest_t * const p_osmt ) +{ + node_t *p_node; + cl_status_t status = IB_SUCCESS; + const cl_qmap_t *p_node_lid_tbl; + uint16_t cnt = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_single_node_recs ); + + p_node_lid_tbl = &p_osmt->exp_subn.node_lid_tbl; + + osmtest_prepare_db( p_osmt ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_single_node_recs: " + "Validating individual node record queries\n" ); + } + + /* + * Walk the list of all node records, and ask for each one + * specifically. Make sure we get it. + */ + p_node = ( node_t * ) cl_qmap_head( p_node_lid_tbl ); + while( p_node != ( node_t * ) cl_qmap_end( p_node_lid_tbl ) ) + { + status = osmtest_validate_single_node_rec_lid( p_osmt, + (ib_net16_t) cl_qmap_key ((cl_map_item_t*)p_node), + p_node ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_node_recs: ERR 011A: " + "osmtest_validate_single_node_rec_lid (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + cnt++; + p_node = ( node_t * ) cl_qmap_next( &p_node->map_item ); + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_single_node_recs: " + "Total of %u node records validated\n", cnt ); + } + + status = osmtest_check_missing_nodes( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_node_recs: ERR 0117: " + "osmtest_check_missing_nodes (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_single_port_recs( IN osmtest_t * const p_osmt ) +{ + port_t *p_port; + cl_status_t status = IB_SUCCESS; + const cl_qmap_t *p_port_key_tbl; + uint16_t cnt = 0; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_single_port_recs ); + + p_port_key_tbl = &p_osmt->exp_subn.port_key_tbl; + + osmtest_prepare_db( p_osmt ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_single_port_recs: " + "Validating individual port record queries\n" ); + } + + /* + * Walk the list of all port records, and ask for each one + * specifically. Make sure we get it. + */ + p_port = ( port_t * ) cl_qmap_head( p_port_key_tbl ); + while( p_port != ( port_t * ) cl_qmap_end( p_port_key_tbl ) ) + { + status = osmtest_validate_single_port_rec_lid( p_osmt, p_port ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_port_recs: ERR 011B: " + "osmtest_validate_single_port_rec_lid (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + cnt++; + p_port = ( port_t * ) cl_qmap_next( &p_port->map_item ); + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_VERBOSE ) ) + { + osm_log( &p_osmt->log, OSM_LOG_VERBOSE, + "osmtest_validate_single_port_recs: " + "Total of %u port records validated\n", cnt ); + } + + status = osmtest_check_missing_ports( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_validate_single_port_recs: ERR 0118: " + "osmtest_check_missing_paths failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_validate_against_db( IN osmtest_t * const p_osmt ) +{ + ib_api_status_t status = IB_SUCCESS; + ib_gid_t portgid, mgid; +#ifdef VENDOR_RMPP_SUPPORT + ib_net64_t sm_key; + ib_net16_t test_lid; + uint8_t lmc; + osmtest_req_context_t context; +#ifdef DUAL_SIDED_RMPP + osmv_multipath_req_t request; +#endif +#endif + + OSM_LOG_ENTER( &p_osmt->log, osmtest_validate_against_db ); + +#ifdef VENDOR_RMPP_SUPPORT + status = osmtest_validate_all_node_recs( p_osmt ); + if( status != IB_SUCCESS ) + goto Exit; +#endif + + status = osmtest_validate_single_node_recs( p_osmt ); + if( status != IB_SUCCESS ) + goto Exit; + + /* Exercise SA PathRecord multicast destination code */ + memset( &context, 0, sizeof( context ) ); + ib_gid_set_default( &portgid, portguid ); + /* Set IPoIB broadcast MGID */ + mgid.unicast.prefix = CL_HTON64(0xff12401bffff0000ULL); + mgid.unicast.interface_id = CL_HTON64(0x00000000ffffffffULL); + /* Can't check status as don't know whether port is running IPoIB */ + osmtest_get_path_rec_by_gid_pair( p_osmt, portgid, mgid, &context); + +#if defined (VENDOR_RMPP_SUPPORT) && defined (DUAL_SIDED_RMPP) + memset( &context, 0, sizeof( context ) ); + memset( &request, 0, sizeof( request ) ); + request.comp_mask = IB_MPR_COMPMASK_SGIDCOUNT | IB_MPR_COMPMASK_DGIDCOUNT; + request.sgid_count = 1; + request.dgid_count = 1; + ib_gid_set_default( &request.gids[0], portguid ); + ib_gid_set_default( &request.gids[1], portguid ); + status = osmtest_get_multipath_rec( p_osmt, &request, &context ); + if( status != IB_SUCCESS ) + goto Exit; + + memset( &context, 0, sizeof( context ) ); + memset( &request, 0, sizeof( request ) ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_START "\n" ); + status = osmtest_get_multipath_rec( p_osmt, &request, &context ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " + "Got error %s\n", ib_get_err_str(status) ); + } + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_END "\n" ); + + if( status == IB_SUCCESS ) + { + status = IB_ERROR; + goto Exit; + } + + memset( &context, 0, sizeof( context ) ); + memset( &request, 0, sizeof( request ) ); + request.comp_mask = IB_MPR_COMPMASK_SGIDCOUNT; + request.sgid_count = 1; + ib_gid_set_default( &request.gids[0], portguid ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_START "\n" ); + status = osmtest_get_multipath_rec( p_osmt, &request, &context ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " + "Got error %s\n", ib_get_err_str(status) ); + } + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_END "\n" ); + + if( status == IB_SUCCESS ) + { + status = IB_ERROR; + goto Exit; + } + + memset( &context, 0, sizeof( context ) ); + memset( &request, 0, sizeof( request ) ); + request.comp_mask = IB_MPR_COMPMASK_SGIDCOUNT | IB_MPR_COMPMASK_DGIDCOUNT; + request.sgid_count = 1; + request.dgid_count = 1; + ib_gid_set_default( &request.gids[0], portguid ); + /* Set IPoIB broadcast MGID */ + request.gids[1].unicast.prefix = CL_HTON64(0xff12401bffff0000ULL); + request.gids[1].unicast.interface_id = CL_HTON64(0x00000000ffffffffULL); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_START "\n" ); + status = osmtest_get_multipath_rec( p_osmt, &request, &context ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " + "Got error %s\n", ib_get_err_str(status) ); + } + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_END "\n" ); + + if( status == IB_SUCCESS ) + { + status = IB_ERROR; + goto Exit; + } + + memset( &context, 0, sizeof( context ) ); + request.comp_mask = IB_MPR_COMPMASK_SGIDCOUNT | IB_MPR_COMPMASK_DGIDCOUNT; + request.sgid_count = 1; + request.dgid_count = 1; + /* Set IPoIB broadcast MGID */ + request.gids[0].unicast.prefix = CL_HTON64(0xff12401bffff0000ULL); + request.gids[0].unicast.interface_id = CL_HTON64(0x00000000ffffffffULL); + ib_gid_set_default( &request.gids[1], portguid ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_START "\n" ); + status = osmtest_get_multipath_rec( p_osmt, &request, &context ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " + "Got error %s\n", ib_get_err_str(status) ); + } + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_END "\n" ); + + if( status == IB_SUCCESS ) + { + status = IB_ERROR; + goto Exit; + } + + memset( &context, 0, sizeof( context ) ); + memset( &request, 0, sizeof( request ) ); + request.comp_mask = IB_MPR_COMPMASK_SGIDCOUNT | IB_MPR_COMPMASK_DGIDCOUNT | + IB_MPR_COMPMASK_NUMBPATH; + request.sgid_count = 2; + request.dgid_count = 2; + request.num_path = 2; + ib_gid_set_default( &request.gids[0], portguid ); + ib_gid_set_default( &request.gids[1], portguid ); + ib_gid_set_default( &request.gids[2], portguid ); + ib_gid_set_default( &request.gids[3], portguid ); + status = osmtest_get_multipath_rec( p_osmt, &request, &context ); + if( status != IB_SUCCESS ) + goto Exit; +#endif + +#ifdef VENDOR_RMPP_SUPPORT + /* GUIDInfoRecords */ + status = osmtest_validate_all_guidinfo_recs( p_osmt ); + if( status != IB_SUCCESS ) + goto Exit; + + /* If LMC > 0, test non base LID SA PortInfoRecord request */ + status = osmtest_get_local_port_lmc( p_osmt, p_osmt->local_port.lid, &lmc ); + if ( status != IB_SUCCESS ) + goto Exit; + + if (lmc != 0) + { + status = osmtest_get_local_port_lmc( p_osmt, p_osmt->local_port.lid + 1, NULL ); + if ( status != IB_SUCCESS ) + goto Exit; + } + + status = osmtest_get_local_port_lmc( p_osmt, 0xffff, NULL ); + if ( status != IB_SUCCESS ) + goto Exit; + + test_lid = cl_ntoh16( p_osmt->local_port.lid ); + + /* More GUIDInfo Record tests */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_guidinfo_rec_by_lid( p_osmt, test_lid, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_guidinfo_rec_by_lid( p_osmt, 0xffff, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* Some PKeyTable Record tests */ + sm_key = OSM_DEFAULT_SM_KEY; + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_pkeytbl_rec_by_lid( p_osmt, test_lid, sm_key, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + memset( &context, 0, sizeof( context ) ); + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_START "\n" ); + status = osmtest_get_pkeytbl_rec_by_lid( p_osmt, test_lid, 0, &context ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " + "Got error %s\n", ib_get_err_str(status) ); + } + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_multipath_rec: " EXPECTING_ERRORS_END "\n" ); + + if( status == IB_SUCCESS ) + { + status = IB_ERROR; + goto Exit; + } + + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_pkeytbl_rec_by_lid( p_osmt, 0xffff, sm_key, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* LFT Record test */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_lft_rec_by_lid( p_osmt, test_lid, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* Some LinkRecord tests */ + /* FromLID */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_link_rec_by_lid( p_osmt, test_lid, 0, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* ToLID */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_link_rec_by_lid( p_osmt, 0, test_lid, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* FromLID & ToLID */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_link_rec_by_lid( p_osmt, test_lid, test_lid, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* NodeRecord test */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_node_rec_by_lid( p_osmt, 0xffff , &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* SMInfoRecord test */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_sminfo_record_request( p_osmt, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + if (lmc != 0) + { + test_lid = cl_ntoh16( p_osmt->local_port.lid + 1 ); + + /* Another GUIDInfo Record test */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_guidinfo_rec_by_lid( p_osmt, test_lid, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* Another PKeyTable Record test */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_pkeytbl_rec_by_lid( p_osmt, test_lid, sm_key, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* Another LFT Record test */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_lft_rec_by_lid( p_osmt, test_lid, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* More LinkRecord tests */ + /* FromLID */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_link_rec_by_lid( p_osmt, test_lid, 0, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* ToLID */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_link_rec_by_lid( p_osmt, 0, test_lid, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + + /* Another NodeRecord test */ + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_node_rec_by_lid( p_osmt, test_lid, &context ); + if ( status != IB_SUCCESS ) + goto Exit; + } + + /* PathRecords */ + if (! p_osmt->opt.ignore_path_records) + { + status = osmtest_validate_all_path_recs( p_osmt ); + if( status != IB_SUCCESS ) + goto Exit; + + if (lmc != 0) + { + memset( &context, 0, sizeof( context ) ); + status = osmtest_get_path_rec_by_lid_pair( p_osmt, test_lid, + test_lid, &context ); + if (status != IB_SUCCESS ) + goto Exit; + + memset( &context, 0, sizeof( context ) ); + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_lid_pair: " EXPECTING_ERRORS_START "\n" ); + status = osmtest_get_path_rec_by_lid_pair( p_osmt, 0xffff, + 0xffff, &context ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_lid_pair: " + "Got error %s\n", ib_get_err_str(status) ); + } + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_lid_pair: " EXPECTING_ERRORS_END "\n" ); + + if( status == IB_SUCCESS ) + { + status = IB_ERROR; + goto Exit; + } + + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_lid_pair: " EXPECTING_ERRORS_START "\n" ); + + status = osmtest_get_path_rec_by_lid_pair( p_osmt, test_lid, + 0xffff, &context ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_lid_pair: " + "Got error %s\n", ib_get_err_str(status) ); + } + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_get_path_rec_by_lid_pair: " EXPECTING_ERRORS_END "\n" ); + + if( status == IB_SUCCESS ) + { + status = IB_ERROR; + goto Exit; + } + } + } +#endif + + status = osmtest_validate_single_port_recs( p_osmt ); + if( status != IB_SUCCESS ) + goto Exit; + + if (! p_osmt->opt.ignore_path_records) + { + status = osmtest_validate_single_path_recs( p_osmt ); + if( status != IB_SUCCESS ) + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static const osmtest_token_t * +str_get_token( IN char *const p_str ) +{ + const osmtest_token_t *p_tok; + uint32_t index = 0; + + p_tok = &token_array[index]; + + while( p_tok->val != OSMTEST_TOKEN_UNKNOWN ) + { + if( strnicmp( p_str, p_tok->str, p_tok->str_size ) == 0 ) + return ( p_tok ); + + p_tok = &token_array[++index]; + } + + return ( NULL ); +} + +/********************************************************************** + Returns true if not whitespace character encountered before EOL. +**********************************************************************/ +static boolean_t +str_skip_white( IN char line[], + IN OUT uint32_t * const p_offset ) +{ + while( ( ( line[*p_offset] == '\t' ) || + ( line[*p_offset] == ' ' ) ) && + ( line[*p_offset] != '\n' ) && ( line[*p_offset] != '\0' ) ) + { + ++*p_offset; + } + + if( ( line[*p_offset] == '\n' ) || ( line[*p_offset] == '\0' ) ) + return ( FALSE ); + else + return ( TRUE ); +} + +/********************************************************************** + Returns true if not whitespace character encountered before EOL. +**********************************************************************/ +static void +str_skip_token( IN char line[], + IN OUT uint32_t * const p_offset ) +{ + while( ( line[*p_offset] != '\t' ) && + ( line[*p_offset] != ' ' ) && ( line[*p_offset] != '\0' ) ) + { + ++*p_offset; + } +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_parse_node( IN osmtest_t * const p_osmt, + IN FILE * const fh, + IN OUT uint32_t * const p_line_num ) +{ + ib_api_status_t status = IB_SUCCESS; + uint32_t offset; + char line[OSMTEST_MAX_LINE_LEN]; + boolean_t done = FALSE; + node_t *p_node; + node_t *p_guid_node; + const osmtest_token_t *p_tok; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_parse_node ); + + p_node = node_new( ); + CL_ASSERT( p_node != NULL ); + + /* + * Parse the inventory file and create the database. + */ + while( !done ) + { + if( fgets( line, OSMTEST_MAX_LINE_LEN, fh ) == NULL ) + { + /* + * End of file in the middle of a definition. + */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_node: ERR 0119: " + "Unexpected end of file\n" ); + status = IB_ERROR; + goto Exit; + } + + ++*p_line_num; + + /* + * Skip whitespace + */ + offset = 0; + if( !str_skip_white( line, &offset ) ) + continue; /* whole line was whitespace */ + + p_tok = str_get_token( &line[offset] ); + if( p_tok == NULL ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_node: ERR 0120: " + "Ignoring line %u with unknown token: %s\n", + *p_line_num, &line[offset] ); + continue; + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "Found '%s' (line %u)\n", p_tok->str, *p_line_num ); + } + + str_skip_token( line, &offset ); + + switch ( p_tok->val ) + { + case OSMTEST_TOKEN_COMMENT: + break; + + case OSMTEST_TOKEN_LID: + p_node->comp.lid = 0xFFFF; + p_node->rec.lid = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "lid = 0x%X\n", cl_ntoh16( p_node->rec.lid ) ); + } + break; + + case OSMTEST_TOKEN_BASE_VERSION: + p_node->comp.node_info.base_version = 0xFF; + p_node->rec.node_info.base_version = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "base_version = 0x%X\n", + p_node->rec.node_info.base_version ); + } + break; + + case OSMTEST_TOKEN_CLASS_VERSION: + p_node->comp.node_info.class_version = 0xFF; + p_node->rec.node_info.class_version = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "class_version = 0x%X\n", + p_node->rec.node_info.class_version ); + } + break; + + case OSMTEST_TOKEN_NODE_TYPE: + p_node->comp.node_info.node_type = 0xFF; + p_node->rec.node_info.node_type = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "node_type = 0x%X\n", + p_node->rec.node_info.node_type ); + } + break; + + case OSMTEST_TOKEN_NUM_PORTS: + p_node->comp.node_info.num_ports = 0xFF; + p_node->rec.node_info.num_ports = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "num_ports = 0x%X\n", + p_node->rec.node_info.num_ports ); + } + break; + + case OSMTEST_TOKEN_SYS_GUID: + p_node->comp.node_info.sys_guid = 0xFFFFFFFFFFFFFFFFULL; + p_node->rec.node_info.sys_guid = + cl_hton64( strtoull( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "sys_guid = 0x%016" PRIx64 "\n", + cl_ntoh64( p_node->rec.node_info.sys_guid ) ); + } + break; + + case OSMTEST_TOKEN_NODE_GUID: + p_node->comp.node_info.node_guid = 0xFFFFFFFFFFFFFFFFULL; + p_node->rec.node_info.node_guid = + cl_hton64( strtoull( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "node_guid = 0x%016" PRIx64 "\n", + cl_ntoh64( p_node->rec.node_info.node_guid ) ); + } + break; + + case OSMTEST_TOKEN_PORT_GUID: + p_node->comp.node_info.port_guid = 0xFFFFFFFFFFFFFFFFULL; + p_node->rec.node_info.port_guid = + cl_hton64( strtoull( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "port_guid = 0x%016" PRIx64 "\n", + cl_ntoh64( p_node->rec.node_info.port_guid ) ); + } + break; + + case OSMTEST_TOKEN_PARTITION_CAP: + p_node->comp.node_info.partition_cap = 0xFFFF; + p_node->rec.node_info.partition_cap = cl_hton16( ( uint16_t ) + strtoul( &line[offset], + NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "partition_cap = 0x%X\n", + cl_ntoh16( p_node->rec.node_info.partition_cap ) ); + } + break; + + case OSMTEST_TOKEN_DEVICE_ID: + p_node->comp.node_info.device_id = 0xFFFF; + p_node->rec.node_info.device_id = cl_hton16( ( uint16_t ) + strtoul( &line[offset], + NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "device_id = 0x%X\n", + cl_ntoh16( p_node->rec.node_info.device_id ) ); + } + break; + + case OSMTEST_TOKEN_REVISION: + p_node->comp.node_info.revision = 0xFFFFFFFF; + p_node->rec.node_info.revision = + cl_hton32( strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "revision = 0x%X\n", + cl_ntoh32( p_node->rec.node_info.revision ) ); + } + break; + + case OSMTEST_TOKEN_PORT_NUM: + p_node->comp.node_info.port_num_vendor_id |= IB_NODE_INFO_PORT_NUM_MASK; + p_node->rec.node_info.port_num_vendor_id |= + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "local_port_num = 0x%X\n", + ib_node_info_get_local_port_num( &p_node->rec. + node_info ) ); + } + break; + + case OSMTEST_TOKEN_VENDOR_ID: + p_node->comp.node_info.port_num_vendor_id |= IB_NODE_INFO_VEND_ID_MASK; + p_node->rec.node_info.port_num_vendor_id |= + cl_hton32( strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_node: " + "vendor_id = 0x%X\n", + cl_ntoh32( ib_node_info_get_vendor_id + ( &p_node->rec.node_info ) ) ); + } + break; + + case OSMTEST_TOKEN_END: + done = TRUE; + break; + + default: + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_node: ERR 0121: " + "Ignoring line %u with unknown token: %s\n", + *p_line_num, &line[offset] ); + + break; + } + } + + /* + * Make sure the user specified enough information, then + * add this object to the database. + */ + if( p_node->comp.lid == 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_node: ERR 0122: " + "LID must be specified for defined nodes\n" ); + node_delete( p_node ); + goto Exit; + } + + cl_qmap_insert( &p_osmt->exp_subn.node_lid_tbl, + p_node->rec.lid, &p_node->map_item ); + + p_guid_node = node_new( ); + CL_ASSERT( p_node != NULL ); + + *p_guid_node = *p_node; + + cl_qmap_insert( &p_osmt->exp_subn.node_guid_tbl, + p_guid_node->rec.node_info.node_guid, + &p_guid_node->map_item ); + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_parse_port( IN osmtest_t * const p_osmt, + IN FILE * const fh, + IN OUT uint32_t * const p_line_num ) +{ + ib_api_status_t status = IB_SUCCESS; + uint32_t offset; + char line[OSMTEST_MAX_LINE_LEN]; + boolean_t done = FALSE; + port_t *p_port; + const osmtest_token_t *p_tok; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_parse_port ); + + p_port = port_new( ); + CL_ASSERT( p_port != NULL ); + + /* + * Parse the inventory file and create the database. + */ + while( !done ) + { + if( fgets( line, OSMTEST_MAX_LINE_LEN, fh ) == NULL ) + { + /* + * End of file in the middle of a definition. + */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_port: ERR 0123: " + "Unexpected end of file\n" ); + status = IB_ERROR; + goto Exit; + } + + ++*p_line_num; + + /* + * Skip whitespace + */ + offset = 0; + if( !str_skip_white( line, &offset ) ) + continue; /* whole line was whitespace */ + + p_tok = str_get_token( &line[offset] ); + if( p_tok == NULL ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_port: ERR 0124: " + "Ignoring line %u with unknown token: %s\n", + *p_line_num, &line[offset] ); + continue; + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "Found '%s' (line %u)\n", p_tok->str, *p_line_num ); + } + + str_skip_token( line, &offset ); + + switch ( p_tok->val ) + { + case OSMTEST_TOKEN_COMMENT: + break; + + case OSMTEST_TOKEN_LID: + p_port->comp.lid = 0xFFFF; + p_port->rec.lid = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "lid = 0x%X\n", cl_ntoh16( p_port->rec.lid ) ); + } + break; + + case OSMTEST_TOKEN_PORT_NUM: + p_port->comp.port_num = 0xFF; + p_port->rec.port_num = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "port_num = 0x%u\n", + p_port->rec.port_num ); + } + break; + + case OSMTEST_TOKEN_MKEY: + p_port->comp.port_info.m_key = 0xFFFFFFFFFFFFFFFFULL; + p_port->rec.port_info.m_key = + cl_hton64( strtoull( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "m_key = 0x%016" PRIx64 "\n", + cl_ntoh64( p_port->rec.port_info.m_key ) ); + } + break; + + case OSMTEST_TOKEN_SUBN_PREF: + p_port->comp.port_info.subnet_prefix = 0xFFFFFFFFFFFFFFFFULL; + p_port->rec.port_info.subnet_prefix = + cl_hton64( strtoull( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "subnet_prefix = 0x%016" PRIx64 "\n", + cl_ntoh64( p_port->rec.port_info.subnet_prefix ) ); + } + break; + + case OSMTEST_TOKEN_BASE_LID: + p_port->comp.port_info.base_lid = 0xFFFF; + p_port->rec.port_info.base_lid = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "base_lid = 0x%X\n", cl_ntoh16( p_port->rec.port_info.base_lid ) ); + } + break; + + case OSMTEST_TOKEN_SM_BASE_LID: + p_port->comp.port_info.master_sm_base_lid = 0xFFFF; + p_port->rec.port_info.master_sm_base_lid = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "master_sm_base_lid = 0x%X\n", + cl_ntoh16( p_port->rec.port_info.master_sm_base_lid ) ); + } + break; + + case OSMTEST_TOKEN_CAP_MASK: + p_port->comp.port_info.capability_mask = 0xFFFFFFFF; + p_port->rec.port_info.capability_mask = + cl_hton32( ( uint32_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "capability_mask = 0x%X\n", + cl_ntoh32( p_port->rec.port_info.capability_mask ) ); + } + break; + + case OSMTEST_TOKEN_DIAG_CODE: + p_port->comp.port_info.diag_code = 0xFFFF; + p_port->rec.port_info.diag_code = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "diag_code = 0x%X\n", + cl_ntoh16( p_port->rec.port_info.diag_code ) ); + } + break; + + case OSMTEST_TOKEN_MKEY_LEASE_PER: + p_port->comp.port_info.m_key_lease_period = 0xFFFF; + p_port->rec.port_info.m_key_lease_period = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "m_key_lease_period = 0x%X\n", + cl_ntoh16( p_port->rec.port_info.m_key_lease_period ) ); + } + break; + + case OSMTEST_TOKEN_LOC_PORT_NUM: + p_port->comp.port_info.local_port_num = 0xFF; + p_port->rec.port_info.local_port_num = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "local_port_num = 0x%u\n", + p_port->rec.port_info.local_port_num ); + } + break; + + case OSMTEST_TOKEN_LINK_WID_EN: + p_port->comp.port_info.link_width_enabled = 0xFF; + p_port->rec.port_info.link_width_enabled = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "link_width_enabled = 0x%u\n", + p_port->rec.port_info.link_width_enabled ); + } + break; + + case OSMTEST_TOKEN_LINK_WID_SUP: + p_port->comp.port_info.link_width_supported = 0xFF; + p_port->rec.port_info.link_width_supported = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "link_width_supported = 0x%u\n", + p_port->rec.port_info.link_width_supported ); + } + break; + + case OSMTEST_TOKEN_LINK_WID_ACT: + p_port->comp.port_info.link_width_active = 0xFF; + p_port->rec.port_info.link_width_active = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "link_width_active = 0x%u\n", + p_port->rec.port_info.link_width_active ); + } + break; + + case OSMTEST_TOKEN_LINK_SPEED_SUP: + p_port->comp.port_info.state_info1 = 0xFF; + ib_port_info_set_link_speed_sup( ( uint8_t ) strtoul( &line[offset], + NULL, 0 ), + &p_port->rec.port_info); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "link_speed_supported = 0x%u\n", + ib_port_info_get_link_speed_sup(&p_port->rec.port_info)); + } + break; + + case OSMTEST_TOKEN_PORT_STATE: + str_skip_white( line, &offset ); + p_port->comp.port_info.state_info1 = 0xFF; + ib_port_info_set_port_state(&p_port->rec.port_info, + ib_get_port_state_from_str(&line[offset])); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "port_state = 0x%u\n", + ib_port_info_get_port_state(&p_port->rec.port_info)); + } + break; + + case OSMTEST_TOKEN_STATE_INFO2: + p_port->comp.port_info.state_info2 = 0xFF; + p_port->rec.port_info.state_info2 = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "state_info2 = 0x%u\n", + p_port->rec.port_info.state_info2 ); + } + break; + + case OSMTEST_TOKEN_MKEY_PROT_BITS: + p_port->comp.port_info.mkey_lmc = 0xFF; + ib_port_info_set_mpb( &p_port->rec.port_info, + ( uint8_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "mpb = 0x%u\n", + ib_port_info_get_mpb(&p_port->rec.port_info) ); + } + break; + + case OSMTEST_TOKEN_LMC: + p_port->comp.port_info.mkey_lmc = 0xFF; + ib_port_info_set_lmc( &p_port->rec.port_info, + ( uint8_t ) strtoul( &line[offset], NULL, 0 ) ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "lmc = 0x%u\n", + ib_port_info_get_lmc(&p_port->rec.port_info) ); + } + break; + + case OSMTEST_TOKEN_LINK_SPEED: + p_port->comp.port_info.link_speed = 0xFF; + p_port->rec.port_info.link_speed = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "link_speed = 0x%u\n", + p_port->rec.port_info.link_speed ); + } + break; + + case OSMTEST_TOKEN_MTU_SMSL: + p_port->comp.port_info.mtu_smsl = 0xFF; + p_port->rec.port_info.mtu_smsl = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "mtu_smsl = 0x%u\n", + p_port->rec.port_info.mtu_smsl ); + } + break; + + case OSMTEST_TOKEN_VL_CAP: + p_port->comp.port_info.vl_cap = 0xFF; + p_port->rec.port_info.vl_cap = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "vl_cap = 0x%u\n", + p_port->rec.port_info.vl_cap ); + } + break; + + case OSMTEST_TOKEN_VL_HIGH_LIMIT: + p_port->comp.port_info.vl_high_limit = 0xFF; + p_port->rec.port_info.vl_high_limit = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "vl_high_limit = 0x%u\n", + p_port->rec.port_info.vl_high_limit ); + } + break; + + case OSMTEST_TOKEN_VL_ARB_HIGH_CAP: + p_port->comp.port_info.vl_arb_high_cap = 0xFF; + p_port->rec.port_info.vl_arb_high_cap = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "vl_arb_high_cap = 0x%u\n", + p_port->rec.port_info.vl_arb_high_cap ); + } + break; + + case OSMTEST_TOKEN_VL_ARB_LOW_CAP: + p_port->comp.port_info.vl_arb_low_cap = 0xFF; + p_port->rec.port_info.vl_arb_low_cap = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "vl_arb_low_cap = 0x%u\n", + p_port->rec.port_info.vl_arb_low_cap ); + } + break; + + case OSMTEST_TOKEN_MTU_CAP: + p_port->comp.port_info.mtu_cap = 0xFF; + p_port->rec.port_info.mtu_cap = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "mtu_cap = 0x%u\n", + p_port->rec.port_info.mtu_cap ); + } + break; + + case OSMTEST_TOKEN_VL_STALL_LIFE: + p_port->comp.port_info.vl_stall_life = 0xFF; + p_port->rec.port_info.vl_stall_life = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "vl_stall_life = 0x%u\n", + p_port->rec.port_info.vl_stall_life ); + } + break; + + case OSMTEST_TOKEN_VL_ENFORCE: + p_port->comp.port_info.vl_enforce = 0xFF; + p_port->rec.port_info.vl_enforce = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "vl_enforce = 0x%u\n", + p_port->rec.port_info.vl_enforce ); + } + break; + + case OSMTEST_TOKEN_MKEY_VIOL: + p_port->comp.port_info.m_key_violations = 0xFFFF; + p_port->rec.port_info.m_key_violations = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "m_key_violations = 0x%X\n", + cl_ntoh16( p_port->rec.port_info.m_key_violations ) ); + } + break; + + case OSMTEST_TOKEN_PKEY_VIOL: + p_port->comp.port_info.p_key_violations = 0xFFFF; + p_port->rec.port_info.p_key_violations = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "p_key_violations = 0x%X\n", + cl_ntoh16( p_port->rec.port_info.p_key_violations ) ); + } + break; + + case OSMTEST_TOKEN_QKEY_VIOL: + p_port->comp.port_info.q_key_violations = 0xFFFF; + p_port->rec.port_info.q_key_violations = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "q_key_violations = 0x%X\n", + cl_ntoh16( p_port->rec.port_info.q_key_violations ) ); + } + break; + + case OSMTEST_TOKEN_GUID_CAP: + p_port->comp.port_info.guid_cap = 0xFF; + p_port->rec.port_info.guid_cap = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "guid_cap = 0x%u\n", + p_port->rec.port_info.guid_cap ); + } + break; + + case OSMTEST_TOKEN_SUBN_TIMEOUT: + p_port->comp.port_info.subnet_timeout = 0x1F; + p_port->rec.port_info.subnet_timeout = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "subnet_timeout = 0x%u\n", + ib_port_info_get_timeout(&p_port->rec.port_info) ); + } + break; + + case OSMTEST_TOKEN_RESP_TIME_VAL: + p_port->comp.port_info.resp_time_value = 0xFF; + p_port->rec.port_info.resp_time_value = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "resp_time_value = 0x%u\n", + p_port->rec.port_info.resp_time_value ); + } + break; + + case OSMTEST_TOKEN_ERR_THRESHOLD: + p_port->comp.port_info.error_threshold = 0xFF; + p_port->rec.port_info.error_threshold = + ( uint8_t ) strtoul( &line[offset], NULL, 0 ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_port: " + "error_threshold = 0x%u\n", + p_port->rec.port_info.error_threshold ); + } + break; + + case OSMTEST_TOKEN_END: + done = TRUE; + break; + + default: + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_port: ERR 0125: " + "Ignoring line %u with unknown token: %s\n", + *p_line_num, &line[offset] ); + break; + } + } + + /* + * Make sure the user specified enough information, then + * add this object to the database. + */ + if( p_port->comp.lid == 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_port: ERR 0126: " + "LID must be specified for defined ports\n" ); + port_delete( p_port ); + status = IB_ERROR; + goto Exit; + } + + cl_qmap_insert( &p_osmt->exp_subn.port_key_tbl, + port_gen_id(p_port->rec.lid, p_port->rec.port_num), + &p_port->map_item ); + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_parse_path( IN osmtest_t * const p_osmt, + IN FILE * const fh, + IN OUT uint32_t * const p_line_num ) +{ + ib_api_status_t status = IB_SUCCESS; + uint32_t offset; + char line[OSMTEST_MAX_LINE_LEN]; + boolean_t done = FALSE; + path_t *p_path; + const osmtest_token_t *p_tok; + boolean_t got_error = FALSE; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_parse_path ); + + p_path = path_new( ); + CL_ASSERT( p_path != NULL ); + + /* + * Parse the inventory file and create the database. + */ + while( !done ) + { + if( fgets( line, OSMTEST_MAX_LINE_LEN, fh ) == NULL ) + { + /* + * End of file in the middle of a definition. + */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_path: ERR 0127: " + "Unexpected end of file\n" ); + status = IB_ERROR; + goto Exit; + } + + ++*p_line_num; + + /* + * Skip whitespace + */ + offset = 0; + if( !str_skip_white( line, &offset ) ) + continue; /* whole line was whitespace */ + + p_tok = str_get_token( &line[offset] ); + if( p_tok == NULL ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_path: ERR 0128: " + "Ignoring line %u with unknown token: %s\n", + *p_line_num, &line[offset] ); + got_error = TRUE; + continue; + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_path: " + "Found '%s' (line %u)\n", p_tok->str, *p_line_num ); + } + + str_skip_token( line, &offset ); + + switch ( p_tok->val ) + { + case OSMTEST_TOKEN_COMMENT: + break; + + case OSMTEST_TOKEN_DGID: + p_path->comp.dgid.unicast.prefix = 0xFFFFFFFFFFFFFFFFULL; + p_path->comp.dgid.unicast.interface_id = 0xFFFFFFFFFFFFFFFFULL; + + str_skip_white( line, &offset ); + p_path->rec.dgid.unicast.prefix = + cl_hton64( strtoull( &line[offset], NULL, 0 ) ); + str_skip_token( line, &offset ); + p_path->rec.dgid.unicast.interface_id = + cl_hton64( strtoull( &line[offset], NULL, 0 ) ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_path: " + "dgid = 0x%016" PRIx64 " 0x%016" PRIx64 "\n", + cl_ntoh64( p_path->rec.dgid.unicast.prefix ), + cl_ntoh64( p_path->rec.dgid.unicast.interface_id ) ); + } + break; + + case OSMTEST_TOKEN_SGID: + p_path->comp.sgid.unicast.prefix = 0xFFFFFFFFFFFFFFFFULL; + p_path->comp.sgid.unicast.interface_id = 0xFFFFFFFFFFFFFFFFULL; + + str_skip_white( line, &offset ); + p_path->rec.sgid.unicast.prefix = + cl_hton64( strtoull( &line[offset], NULL, 0 ) ); + str_skip_token( line, &offset ); + p_path->rec.sgid.unicast.interface_id = + cl_hton64( strtoull( &line[offset], NULL, 0 ) ); + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_path: " + "sgid = 0x%016" PRIx64 " 0x%016" PRIx64 "\n", + cl_ntoh64( p_path->rec.sgid.unicast.prefix ), + cl_ntoh64( p_path->rec.sgid.unicast.interface_id ) ); + } + break; + + case OSMTEST_TOKEN_DLID: + p_path->comp.dlid = 0xFFFF; + p_path->rec.dlid = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_path: " + "dlid = 0x%X\n", cl_ntoh16( p_path->rec.dlid ) ); + } + break; + + case OSMTEST_TOKEN_SLID: + p_path->comp.slid = 0xFFFF; + p_path->rec.slid = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_path: " + "slid = 0x%X\n", cl_ntoh16( p_path->rec.slid ) ); + } + break; + + case OSMTEST_TOKEN_PKEY: + p_path->comp.pkey = 0xFFFF; + p_path->rec.pkey = + cl_hton16( ( uint16_t ) strtoul( &line[offset], NULL, 0 ) ); + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_path: " + "pkey = 0x%X\n", cl_ntoh16( p_path->rec.pkey ) ); + } + break; + + case OSMTEST_TOKEN_END: + done = TRUE; + break; + + default: + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_path: ERR 0129: " + "Ignoring line %u with unknown token: %s\n", + *p_line_num, &line[offset] ); + got_error = TRUE; + break; + } + } + + if( got_error ) + { + status = IB_ERROR; + goto Exit; + } + /* + * Make sure the user specified enough information, then + * add this object to the database. + */ + if( osmtest_path_rec_kay_is_valid( p_osmt, p_path ) == FALSE ) + { + path_delete( p_path ); + status = IB_ERROR; + goto Exit; + } + + cl_qmap_insert( &p_osmt->exp_subn.path_tbl, + osmtest_path_rec_key_get( &p_path->rec ), + &p_path->map_item ); + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_parse_link( IN osmtest_t * const p_osmt, + IN FILE * const fh, + IN OUT uint32_t * const p_line_num ) +{ + ib_api_status_t status = IB_SUCCESS; + uint32_t offset; + char line[OSMTEST_MAX_LINE_LEN]; + boolean_t done = FALSE; + const osmtest_token_t *p_tok; + boolean_t got_error = FALSE; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_parse_link); + + /* + * Parse the inventory file and create the database. + */ + while( !done ) + { + if( fgets( line, OSMTEST_MAX_LINE_LEN, fh ) == NULL ) + { + /* + * End of file in the middle of a definition. + */ + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_link: ERR 012A: " + "Unexpected end of file\n" ); + status = IB_ERROR; + goto Exit; + } + + ++*p_line_num; + + /* + * Skip whitespace + */ + offset = 0; + if( !str_skip_white( line, &offset ) ) + continue; /* whole line was whitespace */ + + p_tok = str_get_token( &line[offset] ); + if( p_tok == NULL ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_link: ERR 012B: " + "Ignoring line %u with unknown token: %s\n", + *p_line_num, &line[offset] ); + got_error = TRUE; + continue; + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_parse_link: " + "Found '%s' (line %u)\n", p_tok->str, *p_line_num ); + } + + str_skip_token( line, &offset ); + + switch ( p_tok->val ) + { + case OSMTEST_TOKEN_FROMLID: + case OSMTEST_TOKEN_FROMPORTNUM: + case OSMTEST_TOKEN_TOPORTNUM: + case OSMTEST_TOKEN_TOLID: + /* For now */ + break; + + case OSMTEST_TOKEN_END: + done = TRUE; + break; + + default: + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_parse_link: ERR 012C: " + "Ignoring line %u with unknown token: %s\n", + *p_line_num, &line[offset] ); + got_error = TRUE; + break; + } + } + + if( got_error ) + status = IB_ERROR; + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +static ib_api_status_t +osmtest_create_db( IN osmtest_t * const p_osmt ) +{ + FILE *fh; + ib_api_status_t status = IB_SUCCESS; + uint32_t offset; + char line[OSMTEST_MAX_LINE_LEN]; + uint32_t line_num = 0; + const osmtest_token_t *p_tok; + boolean_t got_error = FALSE; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_create_db ); + + fh = fopen( p_osmt->opt.file_name, "r" ); + if( fh == NULL ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_create_db: ERR 0130: " + "Unable to open inventory file (%s)\n", p_osmt->opt.file_name); + status = IB_ERROR; + goto Exit; + } + + /* + * Parse the inventory file and create the database. + */ + while( fgets( line, OSMTEST_MAX_LINE_LEN, fh ) != NULL ) + { + line_num++; + + /* + * Skip whitespace + */ + offset = 0; + if( !str_skip_white( line, &offset ) ) + continue; /* whole line was whitespace */ + + p_tok = str_get_token( &line[offset] ); + if( p_tok == NULL ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_create_db: ERR 0131: " + "Ignoring line %u: %s\n", line_num, &line[offset] ); + got_error = TRUE; + continue; + } + + if( osm_log_is_active( &p_osmt->log, OSM_LOG_DEBUG ) ) + { + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_create_db: " + "Found '%s' (line %u)\n", p_tok->str, line_num ); + } + + switch ( p_tok->val ) + { + case OSMTEST_TOKEN_COMMENT: + break; + + case OSMTEST_TOKEN_DEFINE_NODE: + status = osmtest_parse_node( p_osmt, fh, &line_num ); + break; + + case OSMTEST_TOKEN_DEFINE_PORT: + status = osmtest_parse_port( p_osmt, fh, &line_num ); + break; + + case OSMTEST_TOKEN_DEFINE_PATH: + status = osmtest_parse_path( p_osmt, fh, &line_num ); + break; + + case OSMTEST_TOKEN_DEFINE_LINK: + status = osmtest_parse_link( p_osmt, fh, &line_num ); + break; + + default: + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_create_db: ERR 0132: " + "Ignoring line %u: %s\n", line_num, &line[offset] ); + got_error = TRUE; + break; + } + + if( got_error ) + status = IB_ERROR; + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_create_db: ERR 0133: " + "Bad status received during parsing (%s)\n", + ib_get_err_str( status ) ); + fclose( fh ); + goto Exit; + } + } + + fclose( fh ); + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + Returns the index in the local port attribute array for the + user's selection. +**********************************************************************/ +static uint32_t +osmtest_get_user_port( IN osmtest_t * const p_osmt, + IN const ib_port_attr_t p_attr_array[], + IN uint32_t const num_ports ) +{ + uint32_t i; + uint32_t choice = 0; + boolean_t done_flag = FALSE; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_get_user_port ); + + /* + * User needs prompting for the local port GUID with which + * to bind. + */ + + while( done_flag == FALSE ) + { + printf( "\nChoose a local port number with which to bind:\n\n" ); + for( i = 0; i < num_ports; i++ ) + { + /* + * Print the index + 1 since by convention, port numbers + * start with 1 on host channel adapters. + */ + + printf( "\t%u: GUID = 0x%8" PRIx64 ", lid = 0x%04X, state = %s\n", + i + 1, cl_ntoh64( p_attr_array[i].port_guid ), + p_attr_array[i].lid, + ib_get_port_state_str( p_attr_array[i].link_state ) ); + } + + printf( "\nEnter choice (1-%u): ", i ); + scanf( "%u", &choice ); + if( choice > num_ports ) + printf( "\nError: Lame choice!\n" ); + else + done_flag = TRUE; + + } + printf("\n"); + OSM_LOG_EXIT( &p_osmt->log ); + return ( choice - 1 ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_bind( IN osmtest_t * p_osmt, + IN uint16_t max_lid, + IN ib_net64_t guid OPTIONAL ) +{ + uint32_t port_index; + ib_api_status_t status; + uint32_t num_ports = GUID_ARRAY_SIZE; + ib_port_attr_t attr_array[GUID_ARRAY_SIZE]; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_bind ); + + /* + * Call the transport layer for a list of local port + * GUID values. + */ + status = osm_vendor_get_all_port_attr( p_osmt->p_vendor, + attr_array, &num_ports ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_bind: ERR 0134: " + "Failure getting local port attributes (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + if( guid == 0 ) + { + /* + * User needs prompting for the local port GUID with which + * to bind. + */ + port_index = osmtest_get_user_port( p_osmt, attr_array, num_ports ); + + if( num_ports == 0 ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_bind: ERR 0135: " + "No local ports. Unable to proceed\n" ); + goto Exit; + } + guid = attr_array[port_index].port_guid; + } + else + { + for( port_index = 0; port_index < num_ports; port_index++ ) + { + if( attr_array[port_index].port_guid == guid ) + break; + } + + if( port_index == num_ports ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_bind: ERR 0136: " + "No local port with guid 0x%016" PRIx64 "\n", + cl_ntoh64( guid ) ); + status = IB_NOT_FOUND; + goto Exit; + } + } + + /* + * Copy the port info for the selected port. + */ + memcpy( &p_osmt->local_port, &attr_array[port_index], + sizeof( p_osmt->local_port ) ); + + /* bind to the SA */ + osm_log( &p_osmt->log, OSM_LOG_DEBUG, + "osmtest_bind: " + "Using port with SM LID:0x%04X\n", + p_osmt->local_port.sm_lid); + p_osmt->max_lid = max_lid; + + p_osmt->h_bind = osmv_bind_sa(p_osmt->p_vendor, &p_osmt->mad_pool, guid); + + if( p_osmt->h_bind == OSM_BIND_INVALID_HANDLE ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_bind: ERR 0137: " + "Unable to bind to SA\n" ); + status = IB_ERROR; + goto Exit; + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + +/********************************************************************** + **********************************************************************/ +ib_api_status_t +osmtest_run( IN osmtest_t * const p_osmt ) +{ + ib_api_status_t status = IB_SUCCESS; + + OSM_LOG_ENTER( &p_osmt->log, osmtest_run ); + + status = osmtest_validate_sa_class_port_info(p_osmt); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0138: " + "Could not obtain SA ClassPortInfo (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + if( p_osmt->opt.flow == 1 ) + { + /* + * Creating an inventory file with all nodes, ports and paths + */ + status = osmtest_create_inventory_file( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0139: " + "Inventory file create failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + else + { + if( p_osmt->opt.flow == 5 ) + { + /* + * Stress SA - flood the it with queries + */ + switch ( p_osmt->opt.stress ) + { + case 0: + case 1: /* small response SA query stress */ + status = osmtest_stress_small_rmpp( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0140: " + "Small RMPP stress test failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + break; + case 2: /* large response SA query stress */ + status = osmtest_stress_large_rmpp( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0141: " + "Large RMPP stress test failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + break; + case 3: /* large response Path Record SA query stress */ + status = osmtest_create_db( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0142: " + "Database creation failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + status = osmtest_stress_large_rmpp_pr( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0143: " + "Large RMPP stress test failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + break; + default: + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0144: " + "Unknown stress test value %u\n", + p_osmt->opt.stress ); + break; + } + } + else + { + + /* + * Run normal validition tests. + */ + if (p_osmt->opt.flow == 0 || p_osmt->opt.flow == 2) + { + /* + * Only validate the given inventory file + */ + status = osmtest_create_db( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0145: " + "Database creation failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + status = osmtest_validate_against_db( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0146: " + "SA validation database failure (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + + if (p_osmt->opt.flow == 0) + { + status = osmtest_wrong_sm_key_ignored( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0147: " + "Try wrong SM_Key failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + + if (p_osmt->opt.flow == 0 || p_osmt->opt.flow == 3) + { + /* + * run service registration, deregistration, and lease test + */ + status = osmt_run_service_records_flow( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0148: " + "Service Flow failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + + if (p_osmt->opt.flow == 0 || p_osmt->opt.flow == 4) + { + /* + * Run event forwarding test + */ +#ifdef OSM_VENDOR_INTF_MTL + status = osmt_run_inform_info_flow( p_osmt ); + + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0149: " + "Inform Info Flow failed: (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } +#else + osm_log (&p_osmt->log, OSM_LOG_INFO, + "osmtest_run: The event forwarding flow " + "is not implemented yet!\n"); + status = IB_SUCCESS; + goto Exit; +#endif + } + + if (p_osmt->opt.flow == 7) + { + /* + * QoS info: dump VLArb and SLtoVL tables. + * Since it generates a huge file, we run it only + * if explicitly required to + */ + status = osmtest_create_db( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 014A: " + "Database creation failed (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + + status = osmt_run_slvl_and_vlarb_records_flow(p_osmt); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0150: " + "Failed to get SLtoVL and VL Arbitration Tables (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + + if (p_osmt->opt.flow == 8) + { + /* + * Run trap 64/65 flow (this flow requires running of external tool) + */ +#ifdef OSM_VENDOR_INTF_MTL + status = osmt_run_trap64_65_flow( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0151: " + "Trap 64/65 Flow failed: (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } +#else + osm_log (&p_osmt->log, OSM_LOG_INFO, + "osmtest_run: The event forwarding flow " + "is not implemented yet!\n"); + status = IB_SUCCESS; + goto Exit; +#endif + } + + if (p_osmt->opt.flow == 0 || p_osmt->opt.flow == 6) + { + /* + * Multicast flow + */ + status = osmt_run_mcast_flow( p_osmt ); + if( status != IB_SUCCESS ) + { + osm_log( &p_osmt->log, OSM_LOG_ERROR, + "osmtest_run: ERR 0152: " + "Multicast Flow failed: (%s)\n", + ib_get_err_str( status ) ); + goto Exit; + } + } + + osm_log( &p_osmt->log, OSM_LOG_INFO, + "osmtest_run: " + "\n\n***************** ALL TESTS PASS *****************\n\n" ); + + } + } + + Exit: + OSM_LOG_EXIT( &p_osmt->log ); + return ( status ); +} + diff --git a/branches/Ndi/ulp/srp/dirs b/branches/Ndi/ulp/srp/dirs new file mode 100644 index 00000000..ddf0ed7d --- /dev/null +++ b/branches/Ndi/ulp/srp/dirs @@ -0,0 +1,3 @@ +DIRS=\ + user \ + kernel diff --git a/branches/Ndi/ulp/srp/kernel/SOURCES b/branches/Ndi/ulp/srp/kernel/SOURCES new file mode 100644 index 00000000..ebc1f4cf --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/SOURCES @@ -0,0 +1,56 @@ +TARGETNAME=ibsrp +TARGETPATH=..\..\..\bin\kernel\obj$(BUILD_ALT_DIR) +TARGETTYPE=MINIPORT + +!if $(FREEBUILD) +ENABLE_EVENT_TRACING=1 +!else +#ENABLE_EVENT_TRACING=1 +!endif + + +SOURCES= ibsrp.rc \ + srp_connection.c \ + srp_data_path.c \ + srp_descriptors.c \ + srp_driver.c \ + srp_event.c \ + srp_hba.c \ + srp_hca.c \ + srp_session.c + +INCLUDES=..\..\..\inc;..\..\..\inc\kernel; + +!if defined(DDK_TARGET_OS) && "$(DDK_TARGET_OS)"=="WinXP" +# storport.h in WinXP DDK already have "..._ALIASES" definition +C_DEFINES=$(C_DEFINES) -DDEPRECATE_DDK_FUNCTIONS -DWinXP -DNEED_CL_OBJ +!else +C_DEFINES=$(C_DEFINES) -DDEPRECATE_DDK_FUNCTIONS -DSTOR_USE_SCSI_ALIASES \ + -DNEED_CL_OBJ +!endif + +TARGETLIBS= \ + $(TARGETPATH)\*\complib.lib \ + $(DDK_LIB_PATH)\scsiwmi.lib \ + $(DDK_LIB_PATH)\ntoskrnl.lib \ + $(DDK_LIB_PATH)\hal.lib + +#!if !defined(DDK_TARGET_OS) || "$(DDK_TARGET_OS)"=="Win2K" +# Win2k doesn't support StorPort. +#TARGETLIBS= $(TARGETLIBS) $(DDK_LIB_PATH)\scsiport.lib +#!else +TARGETLIBS= $(TARGETLIBS) $(DDK_LIB_PATH)\storport.lib +#!endif + +!IFDEF ENABLE_EVENT_TRACING + +C_DEFINES = $(C_DEFINES) -DEVENT_TRACING + +RUN_WPP = $(SOURCES) -km -ext: .c .h .C .H \ + -scan:srp_debug.h \ + -func:SRP_PRINT(LEVEL,FLAGS,(MSG,...)) \ + -func:SRP_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) + +!ENDIF + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/ulp/srp/kernel/ib_srp.inf b/branches/Ndi/ulp/srp/kernel/ib_srp.inf new file mode 100644 index 00000000..ceb1eb2a --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/ib_srp.inf @@ -0,0 +1,135 @@ +; OpenIB InfiniBand SRP Miniport. +; Copyright 2005 SilverStorm Technologies all Rights Reserved. + +[Version] +Signature="$Windows NT$" +Class=SCSIAdapter +ClassGUID={4D36E97B-E325-11CE-BFC1-08002BE10318} +Provider=%OPENIB% +DriverVer=03/08/2006,1.0.0000.614 + + +; ================= Device Install section ===================== + +[DestinationDirs] +DefaultDestDir=12 + +[SourceDisksNames.x86] +1=%DiskId%,,,"" + +[SourceDisksNames.amd64] +1=%DiskId%,,,"" + +[SourceDisksNames.ia64] +1=%DiskId%,,,"" + +[SourceDisksFiles] +ibsrp.sys=1 + +[Manufacturer] +%OPENIB% = SRP.DeviceSection,ntx86...0x1,ntx86,ntamd64,ntia64 +%SST% = VFx.DeviceSection,ntx86...0x1,ntx86,ntamd64,ntia64 + +[SRP.DeviceSection] +; empty since we don't support W9x/Me + +[SRP.DeviceSection.ntx86...0x1] +; empty since we don't yet support XP. + +[SRP.DeviceSection.ntx86] +%SRP.DeviceDesc% = SRP.DDInstall,IBA\C0100c609ep0108r0001, \ + IBA\Cff00c609ep0108r0001, \ + IBA\C0100c609ep0108, \ + IBA\Cff00c609ep0108 + +[SRP.DeviceSection.ntamd64] +%SRP.DeviceDesc% = SRP.DDInstall,IBA\C0100c609ep0108r0001, \ + IBA\Cff00c609ep0108r0001, \ + IBA\C0100c609ep0108, \ + IBA\Cff00c609ep0108 + +[SRP.DeviceSection.ntia64] +%SRP.DeviceDesc% = SRP.DDInstall,IBA\C0100c609ep0108r0001, \ + IBA\Cff00c609ep0108r0001, \ + IBA\C0100c609ep0108, \ + IBA\Cff00c609ep0108 + +[VFx.DeviceSection] +; empty since we don't support W9x/Me + +[VFx.DeviceSection.ntx86...0x1] +; empty since we don't yet support XP. + +[VFx.DeviceSection.ntx86] +%VFx.DeviceDesc% = SRP.DDInstall,IBA\V00066aP00000038S00066as00000038v0001, \ + IBA\V00066aP00000038S00066as00000038, \ + IBA\V00066aP00000038v0001, \ + IBA\V00066aP00000038 + +[VFx.DeviceSection.ntamd64] +%VFx.DeviceDesc% = SRP.DDInstall,IBA\V00066aP00000038S00066as00000038v0001, \ + IBA\V00066aP00000038S00066as00000038, \ + IBA\V00066aP00000038v0001, \ + IBA\V00066aP00000038 + +[VFx.DeviceSection.ntia64] +%VFx.DeviceDesc% = SRP.DDInstall,IBA\V00066aP00000038S00066as00000038v0001, \ + IBA\V00066aP00000038S00066as00000038, \ + IBA\V00066aP00000038v0001, \ + IBA\V00066aP00000038 + +[SRP.DDInstall.nt] +CopyFiles = SRP.CopyFiles + +[SRP.DDInstall.nt.Services] +AddService = ibsrp,%SPSVCINST_ASSOCSERVICE%,SRP.ServiceInstall,SRP.EventLogInstall + +[SRP.CopyFiles] +ibsrp.sys + +; +; ============= Service Install section ============== +; + +[SRP.ServiceInstall] +DisplayName = %SRP.ServiceDesc% +ServiceType = %SERVICE_KERNEL_DRIVER% +StartType = %SERVICE_DEMAND_START% +ErrorControl = %SERVICE_ERROR_NORMAL% +ServiceBinary = %12%\ibsrp.sys +LoadOrderGroup = SCSI Miniport +AddReg = SRP.ParamsReg + +[SRP.ParamsReg] +HKR,"Parameters\PnpInterface",%InternalBus%,%REG_DWORD%,1 +HKR,"Parameters\PnpInterface",%PNPBus%,%REG_DWORD%,1 +HKR,"Parameters","DebugLevel",%REG_DWORD%,2 +HKR,"Parameters","DebugFlags",%REG_DWORD%,0x00ffffff +HKR,"Parameters","ModeFlags",%REG_DWORD%,0 + +; +; == The NT EventLog entries are the same for all SCSI miniports. == +; +[SRP.EventLogInstall] +AddReg = SRP.EventLogAddReg + +[SRP.EventLogAddReg] +HKR,,EventMessageFile,0x00020000,"%%SystemRoot%%\System32\IoLogMsg.dll" +HKR,,TypesSupported,0x00010001,7 + +[Strings] +OPENIB = "OpenIB Alliance" +SST = "SilverStorm Technologies" +SRP.DeviceDesc = "InfiniBand SRP Miniport" +VFx.DeviceDesc = "SilverStorm VFx I/O Controller" +SRP.ServiceDesc = "OpenIB InfiniBand SRP Miniport" +DiskId = "OpenIB InfiniBand SRP installation disk" +InternalBus = 0 +PNPBus = 15 +SPSVCINST_NULL = 0x0 +SPSVCINST_ASSOCSERVICE = 0x00000002 +SERVICE_KERNEL_DRIVER = 1 +SERVICE_DEMAND_START = 3 +SERVICE_ERROR_NORMAL = 1 +REG_DWORD = 0x00010001 +REG_DWORD_NO_CLOBBER = 0x00010003 diff --git a/branches/Ndi/ulp/srp/kernel/ibsrp.rc b/branches/Ndi/ulp/srp/kernel/ibsrp.rc new file mode 100644 index 00000000..5de864e8 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/ibsrp.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DRV +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "InfiniBand SRP Miniport (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "InfiniBand SRP Miniport" +#endif + +#define VER_INTERNALNAME_STR "ibsrp.sys" +#define VER_ORIGINALFILENAME_STR "ibsrp.sys" + +#include diff --git a/branches/Ndi/ulp/srp/kernel/makefile b/branches/Ndi/ulp/srp/kernel/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/ulp/srp/kernel/srp.h b/branches/Ndi/ulp/srp/kernel/srp.h new file mode 100644 index 00000000..626af3ec --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp.h @@ -0,0 +1,370 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _SRP_H_INCLUDED_ +#define _SRP_H_INCLUDED_ + +#include +#include +#include + +/* + SRP Service Definitions + */ +#define SRP_IO_CLASS CL_HTON16(0x0100) /* T10 changed */ +#define SRP_IO_CLASS_R10 CL_HTON16(0xff00) /* FF + high 8 bits of NCITS OUI */ +#define SRP_IO_SUBCLASS CL_HTON16(0x609e) /* Low 16 bits of NCITS OUI */ +#define SRP_PROTOCOL 0x0108 /* T10 administered identifier */ +#define SRP_PROTOCOL_VER 0x0001 /* Approved standard version */ +#define SRP_SERVICE_NAME_PREFIX "SRP.T10:" +#define SRP_EXTENSION_ID_LENGTH 16 /* Service name extension ID length */ + +#define SRP_MIN_IU_SIZE 64 +#define SRP_MAX_SG_IN_INDIRECT_DATA_BUFFER 257 /* it was 16 */ +#define SRP_MAX_IU_SIZE (SRP_MIN_IU_SIZE + 20 + 16*SRP_MAX_SG_IN_INDIRECT_DATA_BUFFER) + +#define SRP_MIN_INI_TO_TGT_IU 64 // Minimum initiator message size +#define SRP_MIN_TGT_TO_INI_IU 56 // Minimum target message size +#define SRP_MIN_TGT_TO_INI_DMA 512 // At least one sector! + +/* Requests sent from SRP initiator ports to SRP target ports */ +#define SRP_LOGIN_REQ 0x00 +#define SRP_TSK_MGMT 0x01 +#define SRP_CMD 0x02 +#define SRP_I_LOGOUT 0x03 + +/* Responses sent from SRP target ports to SRP initiator ports */ +#define SRP_LOGIN_RSP 0xC0 +#define SRP_RSP 0xC1 +#define SRP_LOGIN_REJ 0xC2 + +/* Requests sent from SRP target ports to SRP initiator ports */ +#define SRP_T_LOGOUT 0x80 +#define SRP_CRED_REQ 0x81 +#define SRP_AER_REQ 0x82 + +/* Responses sent from SRP initiator ports to SRP target ports */ +#define SRP_CRED_RSP 0x41 +#define SRP_AER_RSP 0x42 + +typedef struct _srp_information_unit +{ + uint8_t type; + uint8_t reserved[7]; + uint64_t tag; +} PACK_SUFFIX srp_information_unit_t; + +/* Mask values applied to bit fields for access */ +#define DATA_BUFFER_DESCRIPTOR_FORMAT_MASK 0x06 +#define MULTI_CHANNEL_ACTION_MASK 0x03 +#define MULTI_CHANNEL_RESULT_MASK 0x03 + +/* Allowable values for the Data Buffer Descriptor Formats */ +typedef enum data_buffer_descriptor_format_enum +{ + DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT = 0x00, + DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR = 0x01, + DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS = 0x02 +} DATA_BUFFER_DESCRIPTOR_FORMAT; + +/* Requested Supportted Data Buffer Format flag values */ +#define DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED 0x02 +#define INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED 0x04 + +typedef struct _srp_req_sup_db_fmt +{ + uint8_t reserved; + uint8_t flags; /* IDBD/DDBD */ +} PACK_SUFFIX srp_req_sup_db_fmt_t; + +/* + * The SRP spec r10 defines the port identifiers as + * GUID:ExtensionID, while the SRP 2.0 spec defines them + * as ExtensionID:GUID. Lucky for us the IO_CLASS in the + * IOC profile changed from 0xFF to 0x100. + */ +typedef struct _srp_ib_port_id +{ + net64_t field1; + net64_t field2; + +} PACK_SUFFIX srp_ib_port_id_t; + +/* Allowable values for the MultiChannel Action field */ +typedef enum multi_channel_action_enum +{ + MCA_TERMINATE_EXISTING = 0x00, + MCA_INDEPENDENT_OPERATION = 0x01 +} MULTI_CHANNEL_ACTION; + +typedef struct _srp_login_req +{ + uint8_t type; + uint8_t reserved1[7]; + uint64_t tag; + uint32_t req_max_init_to_targ_iu; + uint8_t reserved2[4]; + srp_req_sup_db_fmt_t req_buffer_fmts; + uint8_t flags; /* MULTI-CHANNEL ACTION */ + uint8_t reserved3; + uint8_t reserved4[4]; + srp_ib_port_id_t initiator_port_id; + srp_ib_port_id_t target_port_id; +} PACK_SUFFIX srp_login_req_t; + +/* Allowable values for the MultiChannel Result field */ +typedef enum multi_channel_result_enum +{ + MCR_NO_EXISTING_TERMINATED = 0x00, + MCR_EXISTING_TERMINATED = 0x01, + MCR_EXISTING_CONTINUED = 0x02 +} MULTI_CHANNEL_RESULT; + +typedef struct _srp_login_rsp +{ + uint8_t type; + uint8_t reserved1[3]; + int32_t request_limit_delta; + uint64_t tag; + uint32_t max_init_to_targ_iu; + uint32_t max_targ_to_init_iu; + srp_req_sup_db_fmt_t sup_buffer_fmts; + uint8_t flags; /* MULTI-CHANNEL RESULT */ + uint8_t reserved2; + uint8_t reserved3[24]; +} PACK_SUFFIX srp_login_rsp_t; + +/* Allowable values for SRP LOGIN REJ Reason Codes */ +typedef enum login_reject_code_enum +{ + LIREJ_UNABLE_TO_ESTABLISH_RDMA_CHANNEL = 0x00010000, + LIREJ_INSUFFICIENT_RDMA_CHANNEL_RESOURCES = 0x00010001, + LIREJ_INIT_TO_TARG_IU_LENGTH_TOO_LARGE = 0x00010002, + LIREJ_UNABLE_TO_ASSOCIATE_RDMA_CHANNEL_WITH_I_T_NEXUS = 0x00010003, + LIREJ_UNSUPPORTED_DATA_BUFFER_DESCRIPTOR_FORMAT = 0x00010004, + LIREJ_NO_TARGET_SUPPORT_FOR_MULTIPLE_RDMA_CHANNELS_PER_I_T_NEXUS = 0x00010005 +} LOGIN_REJECT_CODE; + +typedef struct _srp_login_rej +{ + uint8_t type; + uint8_t reserved1[3]; + uint32_t reason; + uint64_t tag; + uint8_t reserved2[8]; + srp_req_sup_db_fmt_t sup_buffer_fmts; + uint8_t reserved3[6]; +} PACK_SUFFIX srp_login_rej_t; + +typedef struct _srp_i_logout +{ + uint8_t type; + uint8_t reserved[7]; + uint64_t tag; +} PACK_SUFFIX srp_i_logout_t; + +/* Srp Target Logout Reason Codes */ +typedef enum target_logout_reason_code_enum +{ + TLO_NO_REASON = 0x0000, + TLO_INACTIVE_RDMA_CHANNEL = 0x0001, + TLO_INVALID_IU_TYPE_RECEIVED_BY_TARGET = 0x0002, + TLO_RESPONSE_WITH_NO_OUTSTANDING_TARGET_PORT_REQUEST = 0x0003, + TLO_DISCONNECT_DUE_TO_MULTI_CHANNEL_ACTION_ON_NEW_LOGIN = 0x0004, + TLO_UNSUPPORTED_FORMAT_FOR_DATA_OUT_BUFFER_DESCRIPTOR = 0x0006, + TLO_UNSUPPORTED_FORMAT_FOR_DATA_IN_BUFFER_DESCRIPTOR = 0x0007, + TLO_INVALID_COUNT_VALUE_IN_DATA_OUT_BUFFER_DESCRIPTOR_COUNT = 0x0008, + TLO_INVALID_COUNT_VALUE_IN_DATA_IN_BUFFER_DESCRIPTOR_COUNT = 0x0009 +} TARGET_LOGOUT_REASON_CODE; + +typedef struct _srp_t_logout +{ + uint8_t type; + uint8_t reserved[3]; + uint32_t reason; + uint64_t m_tag; +} PACK_SUFFIX srp_t_logout_t; + +/* Srp Task Management Flags */ +#define TMF_ABORT_TASK 0x01 +#define TMF_ABORT_TASK_SET 0x02 +#define TMF_CLEAR_TASK_SET 0x04 +#define TMF_LOGICAL_UNIT_RESET 0x08 +#define TMF_RESTRICTED 0x20 +#define TMF_CLEAR_ACA 0x40 + +typedef struct _srp_tsk_mgmt +{ + uint8_t type; + uint8_t reserved1[7]; + uint64_t tag; + uint8_t reserved2[4]; + uint64_t logical_unit_number; + uint8_t reserved3; + uint8_t reserved4; + uint8_t task_management_flags; + uint8_t reserved5; + uint64_t managed_task_tag; + uint8_t reserved6[8]; +} PACK_SUFFIX srp_tsk_mgmt_t; + +/* Srp TASK ATTRIBUTE VALUES */ +typedef enum task_attribute_value_enum +{ + TAV_SIMPLE_TASK = 0x00, + TAV_HEAD_OF_QUEUE_TASK = 0x01, + TAV_ORDERED = 0x02, + TAV_AUTOMATIC_CONTINGENT_ALLIANCE_TASK = 0x04 +} TASK_ATTRIBUTE_VALUE; + +typedef struct _srp_memory_descriptor +{ + uint64_t virtual_address; + uint32_t memory_handle; + uint32_t data_length; +} PACK_SUFFIX srp_memory_descriptor_t; + +typedef struct _srp_memory_table_descriptor +{ + srp_memory_descriptor_t descriptor; + uint32_t total_length; +} PACK_SUFFIX srp_memory_table_descriptor_t; + +typedef struct _srp_cmd +{ + uint8_t type; + uint8_t reserved1[4]; + uint8_t data_out_in_buffer_desc_fmt; + uint8_t data_out_buffer_desc_count; + uint8_t data_in_buffer_desc_count; + uint64_t tag; + uint8_t reserved2[4]; + uint64_t logical_unit_number; + uint8_t reserved3; + uint8_t flags1; /* TASK ATTRIBUTE */ + uint8_t reserved4; + uint8_t flags2; /* ADDITIONAL CDB LENGTH in 4 byte words */ + uint8_t cdb[16]; + uint8_t additional_cdb[1]; /* place holder, may not be present */ + /* srp_memory_descriptor_t data_out_buffer_desc[] */ + /* srp_memory_descriptor_t data_in_buffer_desc[] */ +} PACK_SUFFIX srp_cmd_t; + +/* Srp Response Code values */ +typedef enum response_code_value_enum +{ + RC_NO_FAILURE_OR_TSK_MGMT_FUNC_COMPLETE = 0x00, + RC_REQUEST_FIELDS_INVALID = 0x02, + RC_TSK_MGMT_FUNCTION_NOT_SUPPORTED = 0x04, + RC_TSK_MGMT_FUNCTION_FAILED = 0x05 +} RESPONSE_CODE_VALUE; + +typedef struct _srp_response_data +{ + uint8_t reserved[3]; + uint8_t response_code; +} PACK_SUFFIX srp_response_data_t; + +typedef struct _srp_rsp +{ + uint8_t type; + uint8_t reserved1[3]; + int32_t request_limit_delta; + uint64_t tag; + uint8_t reserved2[2]; + uint8_t flags; /* DIUNDER DIOVER DOUNDER DOOVER SNSVALID RSPVALID */ + uint8_t status; + uint32_t data_out_residual_count; + uint32_t data_in_residual_count; + uint32_t sense_data_list_length; + uint32_t response_data_list_length; + srp_response_data_t response_data[1]; /* place holder. may not be present */ + /* uint8_t sense_data[] */ +} PACK_SUFFIX srp_rsp_t; + +typedef struct _srp_cred_req +{ + uint8_t type; + uint8_t reserved[3]; + int32_t request_limit_delta; + uint64_t tag; +} PACK_SUFFIX srp_cred_req_t; + +typedef struct _srp_cred_rsp +{ + uint8_t type; + uint8_t reserved[7]; + uint64_t tag; +} PACK_SUFFIX srp_cred_rsp_t; + +typedef struct _srp_aer_req +{ + uint8_t type; + uint8_t reserved1[3]; + int32_t request_limit_delta; + uint64_t tag; + uint8_t reserved2[4]; + uint64_t logical_unit_number; + uint32_t sense_data_list_length; + uint8_t reserved3[4]; + uint8_t sense_data[1]; /* actually a place holder may not be present */ +} PACK_SUFFIX srp_aer_req_t; + +typedef struct _srp_aer_rsp +{ + uint8_t type; + uint8_t reserved[7]; + uint64_t tag; +} PACK_SUFFIX srp_aer_rsp_t; + +typedef union _srp_iu_buffer +{ + uint64_t alignment_dummy; + uint8_t iu_buffer[SRP_MAX_IU_SIZE]; + srp_information_unit_t information_unit; + srp_login_req_t login_request; + srp_login_rsp_t login_response; + srp_login_rej_t login_reject; + srp_i_logout_t initiator_logout; + srp_t_logout_t target_logout; + srp_tsk_mgmt_t task_management; + srp_cmd_t command; + srp_rsp_t response; + srp_cred_req_t credit_request; + srp_cred_rsp_t credit_response; + srp_aer_req_t async_event_request; + srp_aer_rsp_t async_event_response; +} PACK_SUFFIX srp_iu_buffer_t; + +#include + +#endif /* SRP_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_aer_req.h b/branches/Ndi/ulp/srp/kernel/srp_aer_req.h new file mode 100644 index 00000000..540c41ce --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_aer_req.h @@ -0,0 +1,295 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_AER_REQ_H_INCLUDED +#define SRP_AER_REQ_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_Information_unit.h" + +/* set_srp_async_event_request_tag */ +/*! +Sets the tag field of a AsyncEvent request information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_async_event_request_tag( + IN OUT srp_aer_req_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_async_event_request */ +/*! +Initializes the AsyncEvent request IU to zeroes +and sets the IU type to Srp AsyncEvent request +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_async_event_request( + IN OUT srp_aer_req_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_AER_REQ ) ; + set_srp_async_event_request_tag( p_information_unit, iu_tag ); +} + +/* set_srp_async_event_request_request_limit_delta */ +/*! +Sets the request limit delta for the AsyncEvent request + +@param p_information_unit - pointer to the IU structure +@param request_limit_delta - flow control request limit delta + +@return - none +*/ +static inline +void +set_srp_async_event_request_request_limit_delta( + IN OUT srp_aer_req_t *p_information_unit, + IN int32_t request_limit_delta ) +{ + p_information_unit->request_limit_delta = request_limit_delta; +} + +/* set_srp_async_event_request_logical_unit_number */ +/*! +Sets the logical unit number for the AsyncEvent request + +@param p_information_unit - pointer to the IU structure +@param logical_unit_number - logical unit number for request + +@return - none +*/ +static inline +void +set_srp_async_event_request_logical_unit_number( + IN OUT srp_aer_req_t *p_information_unit, + IN uint64_t logical_unit_number ) +{ + p_information_unit->logical_unit_number = logical_unit_number; +} + +/* set_srp_async_event_request_sense_data_list_length */ +/*! +Sets the sense data list length for the AsyncEvent request + +@param p_information_unit - pointer to the IU structure +@param sense_data_list_length - length of sense data + +@return - none +*/ +static inline +void +set_srp_async_event_request_sense_data_list_length( + IN OUT srp_aer_req_t *p_information_unit, + IN uint32_t sense_data_list_length ) +{ + p_information_unit->sense_data_list_length = sense_data_list_length; +} + +/* setup_srp_async_event_request */ +/*! +Initializes and sets the Srp AsyncEvent request IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair +@param request_limit_delta - flow control request limit delta +@param logical_unit_number - logical unit number for request +@param sense_data_list_length - length of sense data + +@return - pointer to the sense data area +*/ +static inline +uint8_t* +setup_srp_async_event_request( + IN OUT srp_aer_req_t *p_information_unit, + IN uint64_t iu_tag, + IN int32_t request_limit_delta, + IN uint64_t logical_unit_number, + IN uint32_t sense_data_list_length ) +{ + init_srp_async_event_request( p_information_unit, iu_tag ); + set_srp_async_event_request_request_limit_delta( p_information_unit, request_limit_delta ); + set_srp_async_event_request_logical_unit_number( p_information_unit, logical_unit_number ); + set_srp_async_event_request_sense_data_list_length( p_information_unit, sense_data_list_length ); + return( p_information_unit->sense_data ); +} + +/* get_srp_async_event_request_tag */ +/*! +Returns the value of the tag field of a AsyncEvent request + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_async_event_request_tag( + IN srp_aer_req_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_async_event_request_request_limit_delta */ +/*! +Returns the value of the request limit delta field of a AsyncEvent request + +@param p_information_unit - pointer to the IU structure + +@return - request limit delta value +*/ +static inline +int32_t +get_srp_async_event_request_request_limit_delta( + IN srp_aer_req_t *p_information_unit ) +{ + return( p_information_unit->request_limit_delta ); +} + +/* get_srp_async_event_request_logical_unit_number */ +/*! +Returns the value of the logical unit number field of a AsyncEvent request + +@param p_information_unit - pointer to the IU structure + +@return - logical unit number value +*/ +static inline +uint64_t +get_srp_async_event_request_logical_unit_number( + IN srp_aer_req_t *p_information_unit ) +{ + return( p_information_unit->logical_unit_number ); +} + +/* get_srp_async_event_request_sense_data_list_length */ +/*! +Returns the value of the sense data list length field of a AsyncEvent request + +@param p_information_unit - pointer to the IU structure + +@return - sense data list length value +*/ +static inline +uint32_t +get_srp_async_event_request_sense_data_list_length( + IN srp_aer_req_t *p_information_unit ) +{ + return( p_information_unit->sense_data_list_length ); +} + +/* get_srp_async_event_request_sense_data */ +/*! +Returns a pointer to the sense data field of a AsyncEvent request + +@param p_information_unit - pointer to the IU structure + +@return - pointer to the sense data +*/ +static inline +uint8_t* +get_srp_async_event_request_sense_data( + IN srp_aer_req_t *p_information_unit ) +{ + return( p_information_unit->sense_data ); +} + +/* get_srp_async_event_request_length */ +/*! +Returns the size in bytes of the Srp AsyncEvent request IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_async_event_request_length( + IN srp_aer_req_t *p_information_unit ) +{ + /* do not include sense data field in the sizeof the IU. add it's length to the structure size */ + return( ( sizeof( *p_information_unit ) - sizeof( p_information_unit->sense_data ) ) + p_information_unit->sense_data_list_length ); +} + +/* set_srp_async_event_request_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_async_event_request_from_host_to_network( + IN OUT srp_aer_req_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); + p_information_unit->request_limit_delta = cl_hton32( p_information_unit->request_limit_delta ); + p_information_unit->logical_unit_number = cl_hton64( p_information_unit->logical_unit_number ); + p_information_unit->sense_data_list_length = cl_hton32( p_information_unit->sense_data_list_length ); +} + +/* set_srp_async_event_request_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_async_event_request_from_network_to_host( + IN OUT srp_aer_req_t *p_information_unit ) +{ + set_srp_async_event_request_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_AER_REQ_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_aer_rsp.h b/branches/Ndi/ulp/srp/kernel/srp_aer_rsp.h new file mode 100644 index 00000000..78d9deeb --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_aer_rsp.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_AER_RSP_H_INCLUDED +#define SRP_AER_RSP_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_Information_unit.h" + +/* set_srp_async_event_response_tag */ +/*! +Sets the tag field of a Async Event response information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_async_event_response_tag( + IN OUT srp_aer_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_async_event_response */ +/*! +Initializes the Async Event response IU to zeroes +and sets the IU type to Srp Async Event Response +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_async_event_response( + IN OUT srp_aer_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_AER_RSP ) ; + set_srp_async_event_response_tag( p_information_unit, iu_tag ); +} + +/* setup_srp_async_event_response */ +/*! +Initializes and sets the Srp Async Event Response IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +setup_srp_async_event_response( + IN OUT srp_aer_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_async_event_response( p_information_unit, iu_tag ); +} + +/* get_srp_async_event_response_tag */ +/*! +Returns the value of the tag field of a Async Event response + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_async_event_response_tag( + IN srp_aer_rsp_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_async_event_response_length */ +/*! +Returns the size in bytes of the Srp AsyncEvent Response IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_async_event_response_length( + IN srp_aer_rsp_t *p_information_unit ) +{ + return( sizeof( *p_information_unit ) ); +} + +/* set_srp_async_event_response_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_async_event_response_from_host_to_network( + IN OUT srp_aer_rsp_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); +} + +/* set_srp_async_event_response_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_async_event_response_from_network_to_host( + IN OUT srp_aer_rsp_t *p_information_unit ) +{ + set_srp_async_event_response_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_AER_RSP_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_cmd.h b/branches/Ndi/ulp/srp/kernel/srp_cmd.h new file mode 100644 index 00000000..3c1e76e3 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_cmd.h @@ -0,0 +1,648 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_CMD_H_INCLUDED +#define SRP_CMD_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_information_unit.h" + +/* set_srp_command_tag */ +/*! +Sets the tag field of a command information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_command_tag( + IN OUT srp_cmd_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_command */ +/*! +Initializes the command IU to zeroes +and sets the IU type to command +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_command( + IN OUT srp_cmd_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_CMD ) ; + set_srp_command_tag( p_information_unit, iu_tag ); +} + +/* set_srp_command_data_out_buffer_desc_fmt */ +/*! +sets the data out buffer descriptor format value + +@param p_information_unit - pointer to the IU structure +@param data_out_buffer_desc_fmt - buffer descriptor format value + +@return - none +*/ +static inline +void +set_srp_command_data_out_buffer_desc_fmt( + IN OUT srp_cmd_t *p_information_unit, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_out_buffer_desc_fmt ) +{ + p_information_unit->data_out_in_buffer_desc_fmt = + ( p_information_unit->data_out_in_buffer_desc_fmt & 0x0F ) | ( (uint8_t)data_out_buffer_desc_fmt << 4 ); +} + +/* set_srp_command_data_in_buffer_desc_fmt */ +/*! +sets the data in buffer descriptor format value + +@param p_information_unit - pointer to the IU structure +@param data_in_buffer_desc_fmt - buffer descriptor format value + +@return - none +*/ +static inline +void +set_srp_command_data_in_buffer_desc_fmt( + IN OUT srp_cmd_t *p_information_unit, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_in_buffer_desc_fmt ) +{ + p_information_unit->data_out_in_buffer_desc_fmt = ( p_information_unit->data_out_in_buffer_desc_fmt & 0xF0 ) | (uint8_t)data_in_buffer_desc_fmt; +} + +/* set_srp_command_data_out_buffer_desc_count */ +/*! +sets the data out buffer descriptor count value + +@param p_information_unit - pointer to the IU structure +@param data_out_buffer_desc_count - buffer descriptor count value + +@return - none +*/ +static inline +void +set_srp_command_data_out_buffer_desc_count( + IN OUT srp_cmd_t *p_information_unit, + IN uint8_t data_out_buffer_desc_count ) +{ + p_information_unit->data_out_buffer_desc_count = data_out_buffer_desc_count; +} + +/* set_srp_command_data_in_buffer_desc_count */ +/*! +sets the data in buffer descriptor count value + +@param p_information_unit - pointer to the IU structure +@param data_in_buffer_desc_count - buffer descriptor count value + +@return - none +*/ +static inline +void +set_srp_command_data_in_buffer_desc_count( + IN OUT srp_cmd_t *p_information_unit, + IN uint8_t data_in_buffer_desc_count ) +{ + p_information_unit->data_in_buffer_desc_count = data_in_buffer_desc_count; +} + +/* set_srp_command_logical_unit_number */ +/*! +Sets the logical unit number for the command IU + +@param p_information_unit - pointer to the IU structure +@param logical_unit_number - logical unit number for request + +@return - none +*/ +static inline +void +set_srp_command_logical_unit_number( + IN OUT srp_cmd_t *p_information_unit, + IN uint64_t logical_unit_number ) +{ + p_information_unit->logical_unit_number = logical_unit_number; +} + +/* set_srp_command_task_attribute */ +/*! +Sets the task attribute for the command IU + +@param p_information_unit - pointer to the IU structure +@param task_attribute - task attribute for the request + +@return - none +*/ +static inline +void +set_srp_command_task_attribute( + IN OUT srp_cmd_t *p_information_unit, + IN TASK_ATTRIBUTE_VALUE task_attribute ) +{ + p_information_unit->flags1 = ( p_information_unit->flags1 & 0xF8 ) | ( (uint8_t)task_attribute ); +} + +/* set_srp_command_additional_cdb_length */ +/*! +Sets the additional CDB length for the command IU + +@param p_information_unit - pointer to the IU structure +@param additional_cdb_length - additional CDB length for the request + +@return - none +*/ +static inline +void +set_srp_command_additional_cdb_length( + IN OUT srp_cmd_t *p_information_unit, + IN uint8_t additional_cdb_length ) +{ + p_information_unit->flags2 = ( p_information_unit->flags2 & 0x03 ) | ( additional_cdb_length << 2 ); +} + +/* setup_srp_command */ +/*! +Initializes and sets the Srp command IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair +@param data_out_buffer_desc_fmt - buffer descriptor format value +@param data_in_buffer_desc_fmt - buffer descriptor format value +@param data_out_buffer_desc_count - buffer descriptor count value +@param data_in_buffer_desc_count - buffer descriptor count value +@param logical_unit_number - logical unit number for request +@param task_attribute - task attribute for the request +@param additional_cdb_length - additional CDB length for the request + +@return - pointer to the CDB +*/ +static inline +uint8_t* +setup_srp_command( + IN OUT srp_cmd_t *p_information_unit, + IN uint64_t iu_tag, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_out_buffer_desc_fmt, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_in_buffer_desc_fmt, + IN uint8_t data_out_buffer_desc_count, + IN uint8_t data_in_buffer_desc_count, + IN uint64_t logical_unit_number, + IN TASK_ATTRIBUTE_VALUE task_attribute, + IN uint8_t additional_cdb_length ) +{ + init_srp_command( p_information_unit, iu_tag ); + set_srp_command_data_out_buffer_desc_fmt( p_information_unit, data_out_buffer_desc_fmt ); + set_srp_command_data_in_buffer_desc_fmt( p_information_unit, data_in_buffer_desc_fmt ); + set_srp_command_data_out_buffer_desc_count( p_information_unit, data_out_buffer_desc_count ); + set_srp_command_data_in_buffer_desc_count( p_information_unit, data_in_buffer_desc_count ); + set_srp_command_logical_unit_number( p_information_unit, logical_unit_number ); + set_srp_command_task_attribute( p_information_unit, task_attribute ); + set_srp_command_additional_cdb_length( p_information_unit, additional_cdb_length ); + return( p_information_unit->cdb ); +} + +/* get_srp_command_tag */ +/*! +Returns the value of the tag field of a AsyncEvent request + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_command_tag( + IN srp_cmd_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_command_data_out_buffer_desc_fmt */ +/*! +Returns the value of the data out buffer descriptor format field of a command + +@param p_information_unit - pointer to the IU structure + +@return - data out buffer descriptor format value +*/ +static inline +DATA_BUFFER_DESCRIPTOR_FORMAT +get_srp_command_data_out_buffer_desc_fmt( + IN srp_cmd_t *p_information_unit ) +{ + return( ( DATA_BUFFER_DESCRIPTOR_FORMAT ) ( p_information_unit->data_out_in_buffer_desc_fmt >> 4 ) ); +} + +/* get_srp_command_data_in_buffer_desc_fmt */ +/*! +Returns the value of the data in buffer descriptor format field of a command + +@param p_information_unit - pointer to the IU structure + +@return - data in buffer descriptor format value +*/ +static inline +DATA_BUFFER_DESCRIPTOR_FORMAT +get_srp_command_data_in_buffer_desc_fmt( + IN srp_cmd_t *p_information_unit ) +{ + return( ( DATA_BUFFER_DESCRIPTOR_FORMAT ) ( p_information_unit->data_out_in_buffer_desc_fmt & 0x0F ) ); +} + +/* get_srp_command_data_out_buffer_desc_count */ +/*! +Returns the value of the data out buffer descriptor count field of a command + +@param p_information_unit - pointer to the IU structure + +@return - data out buffer descriptor count value +*/ +static inline +uint8_t +get_srp_command_data_out_buffer_desc_count( + IN srp_cmd_t *p_information_unit ) +{ + return( p_information_unit->data_out_buffer_desc_count ); +} + +/* get_srp_command_data_in_buffer_desc_count */ +/*! +Returns the value of the data in buffer descriptor count field of a command + +@param p_information_unit - pointer to the IU structure + +@return - data in buffer descriptor count value +*/ +static inline +uint8_t +get_srp_command_data_in_buffer_desc_count( + IN srp_cmd_t *p_information_unit ) +{ + return( p_information_unit->data_in_buffer_desc_count ); +} + +/* get_srp_command_logical_unit_number */ +/*! +Returns the value of the logical unit number field of a command IU + +@param p_information_unit - pointer to the IU structure + +@return - logical unit number value +*/ +static inline +uint64_t +get_srp_command_logical_unit_number( + IN srp_cmd_t *p_information_unit ) +{ + return( p_information_unit->logical_unit_number ); +} + +/* get_srp_command_task_attribute */ +/*! +Returns the value of the task attribute field of a command + +@param p_information_unit - pointer to the IU structure + +@return - task attribute value +*/ +static inline +TASK_ATTRIBUTE_VALUE +get_srp_command_task_attribute( + IN srp_cmd_t *p_information_unit ) +{ + return( ( TASK_ATTRIBUTE_VALUE ) ( p_information_unit->flags1 & 0x07 ) ); +} + +/* get_srp_command_additional_cdb_length */ +/*! +Returns the value of the additional CDB length field of a command + +@param p_information_unit - pointer to the IU structure + +@return - additional CDB length value +*/ +static inline +uint8_t +get_srp_command_additional_cdb_length( + IN srp_cmd_t *p_information_unit ) +{ + return( ( uint8_t ) ( p_information_unit->flags2 & 0xFC ) >> 2 ); +} + +/* get_srp_command_cdb */ +/*! +Returns a pointer to the CDB field of a command + +@param p_information_unit - pointer to the IU structure + +@return - pointer to the CDB +*/ +static inline +uint8_t* +get_srp_command_cdb( + IN srp_cmd_t *p_information_unit ) +{ + return( p_information_unit->cdb ); +} + +/* get_srp_command_additional_cdb */ +/*! +Returns a pointer to the additional CDB field of a command + +@param p_information_unit - pointer to the IU structure + +@return - pointer to the additional CDB +*/ +static inline +uint8_t* +get_srp_command_additional_cdb( + IN srp_cmd_t *p_information_unit ) +{ + if( get_srp_command_additional_cdb_length( p_information_unit ) == 0 ) + { + return( NULL ); + } + + return( p_information_unit->additional_cdb ); +} + +/* get_srp_command_data_out_buffer_desc */ +/*! +Returns a pointer to the data out buffer desc field of a command + +WARNING!!!! Set the additional CDB length before this call so the + offset can be correctly calculated + +@param p_information_unit - pointer to the IU structure + +@return - pointer to data out buffer desc +*/ +static inline +srp_memory_descriptor_t* +get_srp_command_data_out_buffer_desc( + IN srp_cmd_t *p_information_unit ) +{ + if( get_srp_command_data_out_buffer_desc_fmt( p_information_unit ) == DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT ) + { + return( NULL ); + } + + return( ( srp_memory_descriptor_t* ) ( p_information_unit->additional_cdb + ( get_srp_command_additional_cdb_length( p_information_unit ) * 4 ) ) ); +} + +/* get_srp_command_data_in_buffer_desc */ +/*! +Returns a pointer to the data in buffer desc field of a command + +WARNING!!!! Set the additional CDB length and data out buffer descriptor count + before this call so the offset can be correctly calculated + +@param p_information_unit - pointer to the IU structure + +@return - pointer to data in buffer desc +*/ +static inline +srp_memory_descriptor_t* +get_srp_command_data_in_buffer_desc( + IN srp_cmd_t *p_information_unit ) +{ + if( get_srp_command_data_in_buffer_desc_fmt( p_information_unit ) == DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT ) + { + return( NULL ); + } + + return( ( srp_memory_descriptor_t* ) ( p_information_unit->additional_cdb + + ( get_srp_command_additional_cdb_length( p_information_unit ) * 4 ) + + ( get_srp_command_data_out_buffer_desc_count( p_information_unit ) * sizeof( srp_memory_descriptor_t ) ) ) ); +} + +/* get_srp_command_buffer_desc */ +/*! +Returns a pointer to the start of the data buffer descs of a command + +WARNING!!!! Set the additional CDB length before this call so the + offset can be correctly calculated + +@param p_information_unit - pointer to the IU structure + +@return - pointer to start of data buffer descs block +*/ +static inline +srp_memory_descriptor_t* +get_srp_command_buffer_desc( + IN srp_cmd_t *p_information_unit ) +{ + return( ( srp_memory_descriptor_t* ) ( p_information_unit->additional_cdb + ( get_srp_command_additional_cdb_length( p_information_unit ) * 4 ) ) ); +} + + +/* get_srp_command_Length */ +/*! +Returns the size in bytes of the Srp command IU + +@param p_information_unit - pointer to the IU structure + +@return - used length of command IU buffer +*/ +static inline +uint32_t +get_srp_command_length( + IN srp_cmd_t *p_information_unit ) +{ + int buffer_desc_count; + uint32_t srp_cmd_length = ( sizeof( *p_information_unit ) - sizeof( p_information_unit->additional_cdb ) ) + + ( get_srp_command_additional_cdb_length( p_information_unit ) * 4 ); + + switch ( get_srp_command_data_out_buffer_desc_fmt ( p_information_unit )) + { + case DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR: + buffer_desc_count = get_srp_command_data_out_buffer_desc_count( p_information_unit ); + srp_cmd_length += ( buffer_desc_count == 0)? sizeof(srp_memory_descriptor_t): + ( buffer_desc_count * sizeof(srp_memory_descriptor_t )); + break; + case DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS: + buffer_desc_count = get_srp_command_data_out_buffer_desc_count( p_information_unit ); + srp_cmd_length += sizeof(srp_memory_table_descriptor_t) + ( buffer_desc_count * sizeof(srp_memory_descriptor_t)); + break; + default: + break; + } + + switch ( get_srp_command_data_in_buffer_desc_fmt ( p_information_unit )) + { + case DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR: + buffer_desc_count = get_srp_command_data_in_buffer_desc_count( p_information_unit ); + srp_cmd_length += ( buffer_desc_count == 0)? sizeof(srp_memory_descriptor_t): + ( buffer_desc_count * sizeof(srp_memory_descriptor_t )); + break; + case DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS: + buffer_desc_count = get_srp_command_data_in_buffer_desc_count( p_information_unit ); + srp_cmd_length += sizeof(srp_memory_table_descriptor_t) + ( buffer_desc_count * sizeof(srp_memory_descriptor_t)); + break; + default: + break; + } + return ( srp_cmd_length ); +} + +/* set_srp_command_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_command_from_host_to_network( + IN OUT srp_cmd_t *p_information_unit ) +{ + srp_memory_descriptor_t *p_memory_descriptor; + srp_memory_table_descriptor_t *p_table_descriptor; + int buffer_desc_count; + int i; + + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); + p_information_unit->logical_unit_number = cl_hton64( p_information_unit->logical_unit_number ); + + p_memory_descriptor = get_srp_command_buffer_desc( p_information_unit ); + + switch (get_srp_command_data_out_buffer_desc_fmt(p_information_unit) ) + { + case DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR: + buffer_desc_count = get_srp_command_data_out_buffer_desc_count( p_information_unit ); + if ( p_memory_descriptor != NULL ) + { + for ( i=0; i < buffer_desc_count; i++) + { + p_memory_descriptor->virtual_address = cl_hton64( p_memory_descriptor->virtual_address ); + p_memory_descriptor->data_length = cl_hton32 ( p_memory_descriptor->data_length ); + p_memory_descriptor++; + } + } + break; + case DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS: + buffer_desc_count = get_srp_command_data_out_buffer_desc_count( p_information_unit ); + if ( p_memory_descriptor != NULL ) + { + p_table_descriptor = ( srp_memory_table_descriptor_t *)p_memory_descriptor; + p_memory_descriptor = ( srp_memory_descriptor_t *)( p_table_descriptor + 1); + + p_table_descriptor->descriptor.virtual_address = cl_hton64( p_table_descriptor->descriptor.virtual_address ); + p_table_descriptor->descriptor.data_length = cl_hton32( p_table_descriptor->descriptor.data_length ); + p_table_descriptor->total_length = cl_hton32( p_table_descriptor->total_length ); + + for ( i=0; i < buffer_desc_count; i++) + { + p_memory_descriptor->virtual_address = cl_hton64( p_memory_descriptor->virtual_address ); + p_memory_descriptor->data_length = cl_hton32( p_memory_descriptor->data_length ); + p_memory_descriptor++; + } + } + break; + case DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT: + default: + break; + } + + switch (get_srp_command_data_in_buffer_desc_fmt(p_information_unit) ) + { + case DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR: + buffer_desc_count = get_srp_command_data_in_buffer_desc_count( p_information_unit ); + if ( p_memory_descriptor != NULL ) + { + for ( i=0; i < buffer_desc_count; i++) + { + p_memory_descriptor->virtual_address = cl_hton64( p_memory_descriptor->virtual_address ); + p_memory_descriptor->data_length = cl_hton32 ( p_memory_descriptor->data_length ); + p_memory_descriptor++; + } + } + break; + case DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS: + buffer_desc_count = get_srp_command_data_in_buffer_desc_count( p_information_unit ); + if ( p_memory_descriptor != NULL ) + { + p_table_descriptor = ( srp_memory_table_descriptor_t *)p_memory_descriptor; + p_memory_descriptor = ( srp_memory_descriptor_t *)( p_table_descriptor + 1); + + p_table_descriptor->descriptor.virtual_address = cl_hton64( p_table_descriptor->descriptor.virtual_address ); + p_table_descriptor->descriptor.data_length = cl_hton32( p_table_descriptor->descriptor.data_length ); + p_table_descriptor->total_length = cl_hton32( p_table_descriptor->total_length ); + + for ( i=0; i < buffer_desc_count; i++) + { + p_memory_descriptor->virtual_address = cl_hton64( p_memory_descriptor->virtual_address ); + p_memory_descriptor->data_length = cl_hton32( p_memory_descriptor->data_length ); + p_memory_descriptor++; + } + } + break; + case DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT: + default: + break; + } +} + +/* set_srp_command_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_command_from_network_to_host( + IN OUT srp_cmd_t *p_information_unit ) +{ + set_srp_command_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_CMD_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_connection.c b/branches/Ndi/ulp/srp/kernel/srp_connection.c new file mode 100644 index 00000000..e1caea7b --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_connection.c @@ -0,0 +1,940 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "srp_data_path.h" +#include "srp_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "srp_connection.tmh" +#endif +#include "srp_event.h" +#include "srp_hca.h" +#include "srp_session.h" + +#include "srp.h" +#include "srp_login_req.h" +#include "srp_login_rsp.h" +#include "srp_login_rej.h" + +#include "srp_connection.h" + +#include + +/* __srp_create_cqs */ +/*! +Creates the send/recv completion queues to be used by this connection + +@param p_srp_connection - pointer to the connection structure +@param p_hca - pointer to the hca structure used by the connection +@param p_session - context passed to callback functions + +@return - result of cq creation +*/ +static +ib_api_status_t +__srp_create_cqs( + IN OUT srp_connection_t *p_srp_connection, + IN srp_hca_t *p_hca, + IN p_srp_session_t p_session ) +{ + ib_api_status_t status; + ib_cq_create_t cq_create; + ib_al_ifc_t *p_ifc; + + SRP_ENTER( SRP_DBG_PNP ); + + p_ifc = &p_hca->p_hba->ifc; + + // Create Send CQ + cq_create.size = SRP_DEFAULT_SEND_Q_DEPTH; + cq_create.pfn_comp_cb = srp_send_completion_cb; + cq_create.h_wait_obj = NULL; + + status = p_ifc->create_cq( p_hca->h_ca, + &cq_create, + p_session, + srp_async_event_handler_cb, + &p_srp_connection->h_send_cq ); + if( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Create Send Completion Queue. Status = %d\n", status) ); + goto exit; + } + + // Create Receive CQ + cq_create.size = SRP_DEFAULT_RECV_Q_DEPTH; + cq_create.pfn_comp_cb = srp_recv_completion_cb; + cq_create.h_wait_obj = NULL; + + status = p_ifc->create_cq( p_hca->h_ca, + &cq_create, + p_session, + srp_async_event_handler_cb, + &p_srp_connection->h_recv_cq ); + if( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Create Receive Completion Queue. Status = %d\n", status) ); + } + +exit: + SRP_EXIT( SRP_DBG_PNP ); + + return ( status ); +} + +/* __srp_create_qp */ +/*! +Creates the queue pair to be used by this connection + +@param p_srp_connection - pointer to the connection structure +@param p_hca - pointer to the hca structure used by the connection +@param p_session - context passed to callback functions + +@return - result of qp creation +*/ +static +ib_api_status_t +__srp_create_qp( + IN OUT srp_connection_t *p_srp_connection, + IN srp_hca_t *p_hca, + IN p_srp_session_t p_session ) +{ + ib_api_status_t status; + ib_qp_create_t qp_create; + ib_al_ifc_t *p_ifc; + + SRP_ENTER( SRP_DBG_PNP ); + + p_ifc = &p_hca->p_hba->ifc; + + // Create QP + cl_memclr( &qp_create, sizeof(qp_create) ); + qp_create.qp_type = IB_QPT_RELIABLE_CONN; + qp_create.sq_depth = SRP_DEFAULT_SEND_Q_DEPTH; + qp_create.rq_depth = SRP_DEFAULT_RECV_Q_DEPTH; + qp_create.sq_sge = 1; + qp_create.rq_sge = 1; + qp_create.h_sq_cq = p_srp_connection->h_send_cq; + qp_create.h_rq_cq = p_srp_connection->h_recv_cq; + qp_create.sq_signaled = FALSE;//TRUE; + + status = p_ifc->create_qp( p_hca->h_pd, + &qp_create, + p_session, + srp_async_event_handler_cb, + &p_srp_connection->h_qp ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Create Queue Pair. Status = %d\n", status) ); + } + + SRP_EXIT( SRP_DBG_PNP ); + + return ( status ); +} + +static +cl_status_t +__srp_create_wc_free_list( + IN OUT srp_connection_t *p_connection, + IN uint32_t completion_count ) +{ + cl_status_t status = CL_SUCCESS; + ib_wc_t *p_wc; + uint32_t i; + + SRP_ENTER( SRP_DBG_PNP ); + + p_connection->p_wc_array = cl_zalloc( sizeof( ib_wc_t ) * completion_count ); + if ( p_connection->p_wc_array == NULL ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to allocate %d work completions.\n", completion_count) ); + status = CL_INSUFFICIENT_MEMORY; + goto exit; + } + + p_wc = p_connection->p_wc_array; + + for ( i = 1; i < completion_count; i++, p_wc++ ) + { + p_wc->p_next = (p_wc + 1); + } + + p_connection->p_wc_free_list = p_connection->p_wc_array; + +exit: + SRP_EXIT( SRP_DBG_PNP ); + + return ( status ); +} + + +/* __srp_cm_request_cb */ +/*! +Callback for a connect request - not used by SRP - We initiate connections + +@param p_cm_request - pointer to the connect request structure + +@return - none +*/ +static +void +__srp_cm_request_cb( + IN ib_cm_req_rec_t *p_cm_request) +{ + SRP_ENTER( SRP_DBG_PNP ); + + UNUSED_PARAM ( p_cm_request ); + + SRP_EXIT( SRP_DBG_PNP ); +} + +/* __srp_cm_apr_cb */ +/*! +Callback for alternate path response - not used by SRP + +@param p_cm_apr_rec - pointer to the alternate path response structure + +@return - none +*/ +static +void +__srp_cm_apr_cb( + IN ib_cm_apr_rec_t *p_cm_apr_rec ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + UNUSED_PARAM( p_cm_apr_rec ); + + SRP_EXIT( SRP_DBG_PNP ); +} + +/* __srp_cm_mra_cb */ +/*! +Callback for message received acknowledgement - ignored by SRP - wait for connect reply + +@param p_cm_mra_rec - pointer to the message received acknowledgement structure + +@return - none +*/ +static +void +__srp_cm_mra_cb( + IN ib_cm_mra_rec_t *p_cm_mra_rec) +{ + SRP_ENTER( SRP_DBG_PNP ); + + UNUSED_PARAM ( p_cm_mra_rec ); + + SRP_EXIT( SRP_DBG_PNP ); +} + +/* __srp_cm_dreq_cb */ +/*! +Callback for disconnect request from the target +Initiates the disconnect for the session + +TODO: + +@param p_cm_dreq_rec - pointer to the disconnect request structure + +@return - none +*/ +static +void +__srp_cm_dreq_cb( + IN ib_cm_dreq_rec_t *p_cm_dreq_rec ) +{ + srp_session_t *p_srp_session = (srp_session_t* __ptr64)p_cm_dreq_rec->qp_context; + srp_hba_t *p_hba = p_srp_session->p_hba; + ib_cm_drep_t cm_drep; + ib_api_status_t status; + int i; + int retry_count = 0; + + SRP_ENTER( SRP_DBG_PNP ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Target has issued a disconnect request.\n") ); + + if ( p_hba->adapter_paused == FALSE ) + { + p_hba->adapter_paused = TRUE; + StorPortBusy( p_hba->p_ext, (ULONG)-1 ); + StorPortCompleteRequest( p_hba->p_ext, + SP_UNTAGGED, + SP_UNTAGGED, + SP_UNTAGGED, + SRB_STATUS_BUSY ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, ("Pausing Adapter for %s.\n", p_hba->ioc_info.profile.id_string) ); + } + + cl_obj_lock( &p_srp_session->obj ); + p_srp_session->connection.state = SRP_TARGET_DISCONNECTING; + cl_obj_unlock( &p_srp_session->obj ); + + cm_drep.p_drep_pdata = NULL; + cm_drep.drep_length = 0; + + status = p_hba->ifc.cm_drep( p_cm_dreq_rec->h_cm_dreq, &cm_drep ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot respond to target disconnect request. Status = %d\n", status) ); + } + + cl_obj_lock( &p_hba->obj ); + + for ( i = 0; i < SRP_MAX_SERVICE_ENTRIES; i++ ) + { + if ( p_srp_session == p_hba->session_list[i] ) + { + p_hba->session_list[i] = NULL; + break; + } + } + + cl_obj_unlock( &p_hba->obj ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Session Object ref_cnt = %d\n", p_srp_session->obj.ref_cnt) ); + cl_obj_destroy( &p_srp_session->obj ); + + do + { + retry_count++; + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Attempting to reconnect %s. Connection Attempt Count = %d.\n", + p_hba->ioc_info.profile.id_string, + retry_count) ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Creating New Session For Service Entry Index %d.\n", + p_hba->ioc_info.profile.num_svc_entries)); + p_srp_session = srp_new_session( + p_hba, &p_hba->p_svc_entries[i], &status ); + if ( p_srp_session == NULL ) + { + status = IB_INSUFFICIENT_MEMORY; + break; + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("New Session For Service Entry Index %d Created.\n", + p_hba->ioc_info.profile.num_svc_entries)); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Logging Into Session.\n")); + status = srp_session_login( p_srp_session ); + if ( status == IB_SUCCESS ) + { + if ( p_hba->max_sg > p_srp_session->connection.max_scatter_gather_entries ) + { + p_hba->max_sg = p_srp_session->connection.max_scatter_gather_entries; + } + + if ( p_hba->max_srb_ext_sz > p_srp_session->connection.init_to_targ_iu_sz ) + { + p_hba->max_srb_ext_sz = + sizeof( srp_send_descriptor_t ) - + SRP_MAX_IU_SIZE + + p_srp_session->connection.init_to_targ_iu_sz; + } + + cl_obj_lock( &p_hba->obj ); + p_hba->session_list[i] = p_srp_session; + cl_obj_unlock( &p_hba->obj ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Session Login Issued Successfully.\n")); + } + else + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Session Login Failure Status = %d.\n", status)); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Session Object ref_cnt = %d\n", p_srp_session->obj.ref_cnt) ); + cl_obj_destroy( &p_srp_session->obj ); + } + } while ( (status != IB_SUCCESS) && (retry_count < 3) ); + + if ( status == IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Resuming Adapter for %s.\n", p_hba->ioc_info.profile.id_string) ); + p_hba->adapter_paused = FALSE; + StorPortReady( p_hba->p_ext ); +// StorPortNotification( BusChangeDetected, p_hba->p_ext, 0 ); + } + + SRP_EXIT( SRP_DBG_PNP ); +} + +/* __srp_cm_reply_cb */ +/*! +Callback for connect reply from the target +The target has accepted our connect/login request + +@param p_cm_reply - pointer to the connect reply structure + +@return - none +*/ +static +void +__srp_cm_reply_cb( + IN ib_cm_rep_rec_t *p_cm_reply) +{ + srp_session_t *p_srp_session = (srp_session_t* __ptr64)p_cm_reply->qp_context; + srp_connection_t *p_connection; + srp_login_rsp_t *p_srp_login_rsp = (srp_login_rsp_t* __ptr64)p_cm_reply->p_rep_pdata; + ib_api_status_t status; + union + { + ib_cm_mra_t cm_mra; + ib_cm_rtu_t cm_rtu; + ib_cm_rej_t cm_rej; + + } u; + cl_status_t cl_status; + ib_al_ifc_t *p_ifc; + + SRP_ENTER( SRP_DBG_PNP ); + + p_ifc = &p_srp_session->p_hba->ifc; + p_connection = &p_srp_session->connection; + + set_srp_login_response_from_network_to_host( p_srp_login_rsp ); + p_connection->descriptor_format = get_srp_login_response_supported_data_buffer_formats( p_srp_login_rsp ); + + p_connection->request_limit = + MIN( get_srp_login_response_request_limit_delta( p_srp_login_rsp ), SRP_DEFAULT_RECV_Q_DEPTH ); + + p_connection->request_threashold = 2; +#if DBG + p_srp_session->x_req_limit = p_connection->request_limit; +#endif + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ( "request_limit_delta %d, SRP_DEFAULT_RECV_Q_DEPTH %d, request_threashold %d\n", + get_srp_login_response_request_limit_delta( p_srp_login_rsp ), + SRP_DEFAULT_RECV_Q_DEPTH, p_connection->request_threashold )); + + p_connection->send_queue_depth = p_connection->request_limit; + p_connection->recv_queue_depth = p_connection->request_limit; + p_connection->init_to_targ_iu_sz = get_srp_login_response_max_init_to_targ_iu( p_srp_login_rsp ); + p_connection->targ_to_init_iu_sz = get_srp_login_response_max_targ_to_init_iu( p_srp_login_rsp ); + + p_connection->signaled_send_completion_count = p_connection->send_queue_depth / 2; + + if (( p_connection->descriptor_format & DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ) == DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ) + { + p_connection->max_scatter_gather_entries = + ( MIN( SRP_MAX_IU_SIZE, p_connection->init_to_targ_iu_sz ) - offsetof( srp_cmd_t, additional_cdb )- sizeof(srp_memory_table_descriptor_t)) / sizeof( srp_memory_descriptor_t ); + } + else if (( p_connection->descriptor_format & DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR ) == DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR ) + { + p_connection->max_scatter_gather_entries = + (p_connection->init_to_targ_iu_sz - offsetof( srp_cmd_t, additional_cdb ))/ sizeof( srp_memory_descriptor_t ); + } + else /* not reported any descriptor format */ + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Target does not support valid descriptor formats\n") ); + goto rej; + } + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Request Limit = %d, SendQ Depth = %d, RecvQDepth = %d, " + "ItoT size = %d, TtoI size = %d, Max S/G = %d\n", + p_connection->request_limit, + p_connection->send_queue_depth, + p_connection->recv_queue_depth, + p_connection->init_to_targ_iu_sz, + p_connection->targ_to_init_iu_sz, + p_connection->max_scatter_gather_entries) ); + + /* will be used in srp_find_adapter to calculate NumberOfPhysicalBreaks */ + p_srp_session->p_hba->max_sg = p_connection->max_scatter_gather_entries; + + u.cm_mra.svc_timeout = 0x08; + u.cm_mra.p_mra_pdata = NULL; + u.cm_mra.mra_length = 0; + + status = p_ifc->cm_mra( p_cm_reply->h_cm_rep, &u.cm_mra ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Send MRA. Status = %d\n", status) ); + goto rej; + } + + status = p_ifc->modify_cq( p_connection->h_send_cq, &p_connection->send_queue_depth ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_PNP, + ("Cannot Modify Send Completion Queue Depth. Status = %d\n", status) ); + } + + status = p_ifc->modify_cq( p_connection->h_recv_cq, &p_connection->recv_queue_depth ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_PNP, + ("Cannot Modify Recv Completion Queue Depth. Status = %d\n", status) ); + } + + cl_status = __srp_create_wc_free_list( p_connection, (p_connection->request_limit * 2) );/* Send/Recv */ + if ( cl_status != CL_SUCCESS ) + { + goto rej; + } + + status = srp_init_descriptors( &p_srp_session->descriptors, + p_connection->request_limit, + p_connection->targ_to_init_iu_sz, + &p_srp_session->p_hba->ifc, + p_srp_session->hca.h_pd, + p_connection->h_qp ); + if ( status != IB_SUCCESS ) + { + goto err_init_desc; + } + + u.cm_rtu.access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE; + + /* Have to set to 0 to indicate not to modify because Tavor doesn't support this */ + u.cm_rtu.sq_depth = 0 /*p_connection->request_limit*/; + u.cm_rtu.rq_depth = 0 /*p_connection->request_limit*/; + + u.cm_rtu.p_rtu_pdata = NULL; + u.cm_rtu.rtu_length = 0; + u.cm_rtu.pfn_cm_apr_cb = __srp_cm_apr_cb; + u.cm_rtu.pfn_cm_dreq_cb = __srp_cm_dreq_cb; + + status = p_ifc->cm_rtu( p_cm_reply->h_cm_rep, &u.cm_rtu ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Send RTU. Status = %d\n", status) ); + goto err_send_rtu; + } + + p_connection->state = SRP_CONNECTED; + + status = p_ifc->rearm_cq( p_connection->h_send_cq, FALSE ); + if ( status != IB_SUCCESS) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("ib_rearm_cq() for send cq failed!, status 0x%x", status) ); + + // TODO: Kill session and inform port driver link down storportnotification + goto err_send_rtu; + } + + status = p_ifc->rearm_cq( p_connection->h_recv_cq, FALSE ); + if ( status != IB_SUCCESS) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("ib_rearm_cq() for recv failed!, status 0x%x", status) ); + + // TODO: Kill session and inform port driver link down storportnotification + goto err_send_rtu; + } + goto exit; + +err_send_rtu: + // the rest will be cleaned up in srp_session_login + +err_init_desc: + cl_free( p_connection->p_wc_array ); + p_connection->p_wc_array = NULL; + p_connection->p_wc_free_list = NULL; + +rej: + p_connection->state = SRP_CONNECT_FAILURE; + cl_memclr( &u.cm_rej, sizeof(u.cm_rej) ); + u.cm_rej.rej_status = IB_REJ_INSUF_RESOURCES; + p_ifc->cm_rej( p_cm_reply->h_cm_rep, &u.cm_rej ); + +exit: + cl_status = cl_event_signal( &p_connection->conn_req_event ); + + SRP_EXIT( SRP_DBG_PNP ); +} + + +/* __srp_cm_rej_cb */ +/*! +Callback for connect reject from the target +The target has rejected our connect/login request + +@param p_cm_reject - pointer to the connect reject structure + +@return - none +*/ +static +void +__srp_cm_rej_cb( + IN ib_cm_rej_rec_t *p_cm_reject) +{ + srp_session_t *p_srp_session = (srp_session_t* __ptr64)p_cm_reject->qp_context; + srp_connection_t *p_connection; + srp_login_rej_t *p_srp_login_rej = (srp_login_rej_t* __ptr64)p_cm_reject->p_rej_pdata; + cl_status_t cl_status; + + SRP_ENTER( SRP_DBG_PNP ); + + p_connection = &p_srp_session->connection; + + if( p_srp_login_rej ) + { + set_srp_login_reject_from_network_to_host( p_srp_login_rej ); // <-- Is this coming back NULL? + p_connection->reject_reason = get_srp_login_reject_reason( p_srp_login_rej ); + + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Login Rejected. IBT Code = 0x%x, SRP Code = 0x%x\n", + p_cm_reject->rej_status, p_connection->reject_reason ) ); + switch( p_connection->reject_reason ) + { + case LIREJ_INIT_TO_TARG_IU_LENGTH_TOO_LARGE: + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("REQUESTED IU_SIZE %d\n", + p_connection->req_max_iu_msg_size )); + break; + case LIREJ_UNSUPPORTED_DATA_BUFFER_DESCRIPTOR_FORMAT: + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("REQUESTED DESC FORMAT: %#x, SUPPORTED FORMAT %#x\n", + p_connection->descriptor_format, + get_srp_login_reject_supported_data_buffer_formats(p_srp_login_rej) )); + __srp_issue_session_login( p_connection, (srp_hca_t *)&p_srp_session->hca, p_connection->ioc_max_send_msg_depth ); + return; + default: + break; + } + } + else + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Login Rejected. IBT Code = 0x%x\n", + p_cm_reject->rej_status) ); +} + p_connection->state = SRP_CONNECT_FAILURE; + + cl_status = cl_event_signal( &p_connection->conn_req_event ); + + SRP_EXIT( SRP_DBG_PNP ); +} + +/* __srp_issue_session_login */ +/*! +Initializes and issues a login/cm connect request to the target + +@param p_connection - pointer to the connection structure +@param p_hca - pointer to the hca structure used by this connection +@param send_msg_depth - initial request limit delta value + +@return - result of login/cm connect request operations +*/ +#pragma optimize( "", off ) +static +ib_api_status_t +__srp_issue_session_login( + IN OUT srp_connection_t *p_connection, + IN srp_hca_t *p_hca, + IN uint8_t send_msg_depth ) +{ + ib_api_status_t status; + ib_cm_req_t cm_req; + srp_login_req_t login_req; + + SRP_ENTER( SRP_DBG_PNP ); + + cl_memclr( &cm_req, sizeof(ib_cm_req_t) ); + + cm_req.svc_id = p_connection->service_id; + + cm_req.flags = 0; // event used instead of IB_FLAGS_SYNC + cm_req.max_cm_retries = 8; + cm_req.p_primary_path = p_connection->p_path_rec; + + /*already tried to login before and failed ? */ + if ( !p_connection->reject_reason ) + { + p_connection->descriptor_format = DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR | DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS; + } + else if ( p_connection->reject_reason == LIREJ_UNSUPPORTED_DATA_BUFFER_DESCRIPTOR_FORMAT ) + { + p_connection->descriptor_format = DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR; + } + else + { + p_connection->state = SRP_CONNECT_FAILURE; + status = IB_ERROR; + goto exit; + } + p_connection->req_max_iu_msg_size = ( p_connection->ioc_max_send_msg_size >= SRP_MAX_IU_SIZE ) ? SRP_MAX_IU_SIZE: p_connection->ioc_max_send_msg_size; + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_ERROR, + ( "(init_to_targ_iu_sz requested) req_max_iu_msg_size %d, (from profile) ioc_max_send_msg_size %d\n", + p_connection->req_max_iu_msg_size, p_connection->ioc_max_send_msg_size )); + /* + Build SRP Login request + */ + setup_srp_login_request( &login_req, + 0, /* tag */ + p_connection->req_max_iu_msg_size, + p_connection->descriptor_format, + MCA_TERMINATE_EXISTING, + &p_connection->init_port_id, + &p_connection->targ_port_id ); + set_srp_login_request_from_host_to_network(&login_req); + + cm_req.p_req_pdata = (const uint8_t *)&login_req; + cm_req.req_length = (uint8_t)get_srp_login_request_length( &login_req ); + + cm_req.qp_type = IB_QPT_RELIABLE_CONN; + cm_req.h_qp = p_connection->h_qp; + + /* The maximum number of outstanding RDMA read/atomic operations. */ + status = srp_get_responder_resources( p_hca, &cm_req.resp_res ); + if ( status != IB_SUCCESS ) + { + goto exit; + } + + cm_req.init_depth = send_msg_depth; + + cm_req.remote_resp_timeout = 15; + cm_req.flow_ctrl = FALSE; + cm_req.local_resp_timeout = 16; + cm_req.retry_cnt = 1; + cm_req.rnr_nak_timeout = 0; /* 655.36 ms */ + cm_req.rnr_retry_cnt = 6; + + cm_req.pfn_cm_rep_cb = __srp_cm_reply_cb; + cm_req.pfn_cm_req_cb = NULL; /* Set only for P2P */ + cm_req.pfn_cm_mra_cb = __srp_cm_mra_cb; + cm_req.pfn_cm_rej_cb = __srp_cm_rej_cb; + + cm_req.pkey = p_connection->p_path_rec->pkey; + + status = p_hca->p_hba->ifc.cm_req( &cm_req ); + if ( status == IB_SUCCESS ) + { + p_connection->state = SRP_CONNECT_REQUESTED; + } + else + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Send Connect Request. Status = %d\n", status) ); + p_connection->state = SRP_CONNECT_FAILURE; + } + +exit: + SRP_EXIT( SRP_DBG_PNP ); + + return ( status ); +} +#pragma optimize( "", on ) + +/* srp_init_connection */ +/*! +Initializes a connection structure + +@param p_connection - pointer to the connection structure +@param p_profile - Pointer to IOC profile. +@param ca_guid - Local CA GUID to use in as initiator GUID. +@param ext_id - Initiator and target extension ID. +@param p_path_rec - pointer to the path to the target +@param service_id - service id to which we want to connect + +@return - always success (for now) +*/ +ib_api_status_t +srp_init_connection( + IN OUT srp_connection_t *p_connection, + IN ib_ioc_profile_t* const p_profile, + IN net64_t ca_guid, + IN net64_t ext_id, + IN ib_path_rec_t *p_path_rec, + IN ib_net64_t service_id ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + cl_memclr( p_connection, sizeof(*p_connection) );\ + + p_connection->initialized = TRUE; + + p_connection->state = SRP_NOT_CONNECTED; + + p_connection->p_path_rec = p_path_rec; + switch( p_profile->io_class ) + { + case SRP_IO_CLASS_R10: + p_connection->init_port_id.field1 = ca_guid; + p_connection->init_port_id.field2 = ext_id; + p_connection->targ_port_id.field1 = p_profile->ioc_guid; + p_connection->targ_port_id.field2 = ext_id; + break; + + case SRP_IO_CLASS: + p_connection->init_port_id.field1 = ext_id; + p_connection->init_port_id.field2 = ca_guid; + p_connection->targ_port_id.field1 = ext_id; + p_connection->targ_port_id.field2 = p_profile->ioc_guid; + break; + + default: + return IB_INVALID_PARAMETER; + } + p_connection->service_id = service_id; + p_connection->send_queue_depth = SRP_DEFAULT_SEND_Q_DEPTH; + p_connection->recv_queue_depth = SRP_DEFAULT_RECV_Q_DEPTH; + + SRP_EXIT( SRP_DBG_PNP ); + + return ( IB_SUCCESS ); +} + +/* srp_connect */ +/*! +Orchestrates the processing required to connect to a target device + +@param p_connection - pointer to the connection structure +@param p_hca - pointer to the hca structure used by this connection +@param send_msg_depth - initial request limit delta value +@param p_session - context passed to callback functions + +@return - result of connect operations +*/ +ib_api_status_t +srp_connect( + IN OUT srp_connection_t *p_connection, + IN srp_hca_t *p_hca, + IN uint8_t send_msg_depth, + IN p_srp_session_t p_session ) +{ + ib_api_status_t status; + cl_status_t cl_status; + + SRP_ENTER( SRP_DBG_PNP ); + + p_connection->ioc_max_send_msg_size = + cl_ntoh32 (p_session->p_hba->ioc_info.profile.send_msg_size); + p_connection->ioc_max_send_msg_depth = send_msg_depth; + p_connection->reject_reason = 0; + + status = __srp_create_cqs( p_connection, p_hca, p_session ); + if ( status != IB_SUCCESS ) + { + goto exit; + } + + status = __srp_create_qp( p_connection, p_hca, p_session ); + if ( status != IB_SUCCESS ) + { + goto exit; + } + + cl_status = cl_event_init( &p_connection->conn_req_event, TRUE ); + if ( cl_status != CL_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Initialize Connect Request Event. Status = %d\n", cl_status) ); + status = cl_status; + goto exit; + } + + status = __srp_issue_session_login( p_connection, p_hca, send_msg_depth ); + if ( status != IB_SUCCESS ) + { + cl_event_destroy( &p_connection->conn_req_event ); + goto exit; + } + + cl_status = cl_event_wait_on( &p_connection->conn_req_event, EVENT_NO_TIMEOUT, FALSE ); + if ( cl_status != CL_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Wait On Connect Request Event Failed. Status = %d\n", cl_status) ); + status = cl_status; + cl_event_destroy( &p_connection->conn_req_event ); + goto exit; + } + + cl_event_destroy( &p_connection->conn_req_event ); + + if ( p_connection->state != SRP_CONNECTED ) + { + status = IB_ERROR; + goto exit; + } + +exit: + SRP_EXIT( SRP_DBG_PNP ); + + return ( status ); +} + +/* srp_free_connection */ +/*! +Frees connection resources + +@param p_connection - pointer to the connection structure + +@return - none +*/ +void +srp_free_connection( + IN srp_connection_t *p_srp_connection ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + if ( p_srp_connection->initialized == TRUE ) + { + if ( p_srp_connection->p_wc_array != NULL ) + { + cl_free( p_srp_connection->p_wc_array ); + } + + cl_event_destroy( &p_srp_connection->conn_req_event ); + + cl_memclr( p_srp_connection, sizeof( *p_srp_connection ) ); + } + + SRP_EXIT( SRP_DBG_PNP ); +} + + + diff --git a/branches/Ndi/ulp/srp/kernel/srp_connection.h b/branches/Ndi/ulp/srp/kernel/srp_connection.h new file mode 100644 index 00000000..d9810f8d --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_connection.h @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _SRP_CONNECTION_H_ +#define _SRP_CONNECTION_H_ + + +#include +#include +#include + +#include "srp.h" +#include "srp_connection.h" +#include "srp_hca.h" + +/* Default Max Inflight IO Depth Commands */ +#define SRP_DEFAULT_SEND_Q_DEPTH 1000 +/* Default Max Inflight IO Depth Responses */ +#define SRP_DEFAULT_RECV_Q_DEPTH 1000 + +typedef enum +{ + SRP_NOT_CONNECTED, + SRP_CONNECT_REQUESTED, + SRP_CONNECTED, + SRP_CONNECT_FAILURE, + SRP_CONNECTION_CLOSING, + SRP_TARGET_DISCONNECTING +} srp_connection_state_t; + +typedef struct _srp_session *p_srp_session_t; + +/* Connection information. */ +typedef struct _srp_connection +{ + BOOLEAN initialized; + + srp_connection_state_t state; + + ib_cq_handle_t h_send_cq; + ib_cq_handle_t h_recv_cq; + ib_qp_handle_t h_qp; + + ib_path_rec_t *p_path_rec; + srp_ib_port_id_t init_port_id; + srp_ib_port_id_t targ_port_id; + ib_net64_t service_id; + + uint32_t send_queue_depth; + uint32_t recv_queue_depth; + + atomic32_t tag; + atomic32_t request_limit; + int32_t request_threashold; + uint32_t init_to_targ_iu_sz; + uint32_t targ_to_init_iu_sz; + + ib_wc_t *p_wc_array; + ib_wc_t *p_wc_free_list; + + uint32_t signaled_send_completion_count; + uint32_t max_scatter_gather_entries; + uint32_t ioc_max_send_msg_size; + uint32_t req_max_iu_msg_size; + cl_event_t conn_req_event; + DATA_BUFFER_DESCRIPTOR_FORMAT descriptor_format; + LOGIN_REJECT_CODE reject_reason; + uint8_t ioc_max_send_msg_depth; +} srp_connection_t; + +ib_api_status_t +srp_init_connection( + IN OUT srp_connection_t *p_connection, + IN ib_ioc_profile_t* const p_profile, + IN net64_t ca_guid, + IN net64_t ext_id, + IN ib_path_rec_t *p_path_rec, + IN ib_net64_t service_id ); + +ib_api_status_t +srp_connect( + IN OUT srp_connection_t *p_connection, + IN srp_hca_t *p_hca, + IN uint8_t send_msg_depth, + IN p_srp_session_t p_session ); + +void +srp_free_connection( + IN srp_connection_t *p_srp_connection ); +static +ib_api_status_t +__srp_issue_session_login( + IN OUT srp_connection_t *p_connection, + IN srp_hca_t *p_hca, + IN uint8_t send_msg_depth ); + +#endif /* _SRP_CONNECTION_H_ */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_cred_req.h b/branches/Ndi/ulp/srp/kernel/srp_cred_req.h new file mode 100644 index 00000000..6f47638a --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_cred_req.h @@ -0,0 +1,201 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_CRED_REQ_H_INCLUDED +#define SRP_CRED_REQ_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_information_unit.h" + +/* set_srp_credit_request_tag */ +/*! +Sets the tag field of a credit request information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_credit_request_tag( + IN OUT srp_cred_req_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_credit_request */ +/*! +Initializes the credit request IU to zeroes +and sets the IU type to Srp credit request +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_credit_request( + IN OUT srp_cred_req_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_CRED_REQ ) ; + set_srp_credit_request_tag( p_information_unit, iu_tag ); +} + +/* set_srp_credit_request_request_limit_delta */ +/*! +Sets the request limit delta for the credit request + +@param p_information_unit - pointer to the IU structure +@param request_limit_delta - flow control request limit delta + +@return - none +*/ +static inline +void +set_srp_credit_request_request_limit_delta( + IN OUT srp_cred_req_t *p_information_unit, + IN int32_t request_limit_delta ) +{ + p_information_unit->request_limit_delta = request_limit_delta; +} + +/* setup_srp_credit_request */ +/*! +Initializes and sets the Srp credit request IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair +@param request_limit_delta - flow control request limit delta + +@return - none +*/ +static inline +void +setup_srp_credit_request( + IN OUT srp_cred_req_t *p_information_unit, + IN uint64_t iu_tag, + IN int32_t request_limit_delta ) +{ + init_srp_credit_request( p_information_unit, iu_tag ); + set_srp_credit_request_request_limit_delta( p_information_unit, request_limit_delta ); +} + +/* get_srp_credit_request_tag */ +/*! +Returns the value of the tag field of a credit request + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_credit_request_tag( + IN srp_cred_req_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_credit_request_request_limit_delta */ +/*! +Returns the value of the request limit delta field of a credit request + +@param p_information_unit - pointer to the IU structure + +@return - request limit delta value +*/ +static inline +int32_t +get_srp_credit_request_request_limit_delta( + IN srp_cred_req_t *p_information_unit ) +{ + return( p_information_unit->request_limit_delta ); +} + +/* get_srp_credit_request_length */ +/*! +Returns the size in bytes of the Srp credit request IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_credit_request_length( + IN srp_cred_req_t *p_information_unit ) +{ + return( sizeof( *p_information_unit ) ); +} + +/* set_srp_credit_request_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_credit_request_from_host_to_network( + IN OUT srp_cred_req_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); + p_information_unit->request_limit_delta = cl_hton32( p_information_unit->request_limit_delta ); +} + +/* set_srp_credit_request_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_credit_request_from_network_to_host( + IN OUT srp_cred_req_t *p_information_unit ) +{ + set_srp_credit_request_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_CRED_REQ_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_cred_rsp.h b/branches/Ndi/ulp/srp/kernel/srp_cred_rsp.h new file mode 100644 index 00000000..70b3ca9f --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_cred_rsp.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_CRED_RSP_H_INCLUDED +#define SRP_CRED_RSP_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_information_unit.h" + +/* set_srp_credit_response_tag */ +/*! +Sets the tag field of a credit response information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_credit_response_tag( + IN OUT srp_cred_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_credit_response */ +/*! +Initializes the credit response IU to zeroes +and sets the IU type to Srp credit Response +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_credit_response( + IN OUT srp_cred_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_CRED_RSP ) ; + set_srp_credit_response_tag( p_information_unit, iu_tag ); +} + +/* setup_srp_credit_response */ +/*! +Initializes and sets the Srp Credit Response IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +setup_srp_credit_response( + IN OUT srp_cred_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_credit_response( p_information_unit, iu_tag ); +} + +/* get_srp_credit_response_tag */ +/*! +Returns the value of the tag field of a credit response + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_credit_response_tag( + IN srp_cred_rsp_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_credit_response_length */ +/*! +Returns the size in bytes of the Srp Credit Response IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_credit_response_length( + IN srp_cred_rsp_t *p_information_unit ) +{ + return( sizeof( *p_information_unit ) ); +} + +/* set_srp_credit_response_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_credit_response_from_host_to_network( + IN OUT srp_cred_rsp_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); +} + +/* set_srp_credit_response_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_credit_response_from_network_to_host( + IN OUT srp_cred_rsp_t *p_information_unit ) +{ + set_srp_credit_response_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_CRED_RSP_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_data.h b/branches/Ndi/ulp/srp/kernel/srp_data.h new file mode 100644 index 00000000..87c5d1bc --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_data.h @@ -0,0 +1,80 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef _SRP_DATA_H_ +#define _SRP_DATA_H_ + + +//#define SRP_SCSI_MINIPORT + + +#include +#pragma warning( push, 3 ) +//#if WINVER == 0x500 || defined( SRP_SCSI_MINIPORT ) +//#include +//#include +//#else /* WINVER == 0x500 */ + +// WinXP typo workaround +#if defined (WinXP) +#define RaidPortReady StorPortReady +#endif + +#include +//#endif /* WINVER == 0x500 */ +#pragma warning( pop ) + +#define SRP_OBJ_TYPE_DRV 0x10000000 +#define SRP_OBJ_TYPE_HBA 0x10000001 +#define SRP_OBJ_TYPE_SESSION 0x10000002 + + +/* Device extension */ +typedef struct _srp_ext +{ + struct _srp_hba *p_hba; + +} srp_ext_t; +/* +* NOTES +* The device extension only contains a pointer to our dynamically +* allocated HBA structure. This is done since we don't have control +* over the destruction of the device extension, but we need to be able to +* control the creation and destruction of the HBA object. We hook the driver +* unload routine so that we can clean up any remaining objects. +*********/ + +extern BOOLEAN g_srp_system_shutdown; + + +#endif /* _SRP_DATA_H_ */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_data_path.c b/branches/Ndi/ulp/srp/kernel/srp_data_path.c new file mode 100644 index 00000000..cc41615c --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_data_path.c @@ -0,0 +1,1538 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include +#include + +#include "srp_cmd.h" +#include "srp_data_path.h" +#include "srp_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "srp_data_path.tmh" +#endif +#include "srp_descriptors.h" +#include "srp_rsp.h" +#include "srp_session.h" +#include "srp_tsk_mgmt.h" + +//#include "srp_aer_req.h" +//#include "srp_aer_rsp.h" + +//#include "srp_cred_req.h" +//#include "srp_cred_rsp.h" + +//#include "srp_i_logout.h" +//#include "srp_t_logout.h" + +// Final address is of the form 0b00ttttttllllllll +#define BUILD_SCSI_ADDRESS(lun) ((uint64_t)lun << 48) + +#define SRP_REQUEST_LIMIT_THRESHOLD 2 + +static ib_api_status_t +__srp_map_fmr( + IN PVOID p_dev_ext, + IN PSTOR_SCATTER_GATHER_LIST p_scatter_gather_list, + IN srp_send_descriptor_t *p_send_descriptor, + IN OUT srp_memory_descriptor_t *p_memory_descriptor) +{ + srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba; + PSTOR_SCATTER_GATHER_ELEMENT p_sg_element; + uint32_t total_len = 0; + uint32_t i,j,list_len = 0; + uint64_t *p_addr_list; + uint64_t vaddr=0; + ib_api_status_t status; + srp_hca_t hca; + uint64_t fmr_page_mask; + net32_t lkey; + net32_t rkey; + srp_session_t *p_srp_session; + mlnx_fmr_pool_el_t p_fmr_el; + + SRP_ENTER( SRP_DBG_DATA ); + + if (g_srp_mode_flags & SRP_MODE_NO_FMR_POOL) + return IB_UNSUPPORTED; + + p_srp_session = p_hba->session_list[p_send_descriptor->p_srb->TargetId]; + if ( p_srp_session == NULL ) + return IB_INVALID_STATE; + + hca = p_srp_session->hca; + fmr_page_mask = ~(hca.fmr_page_size-1); + + for ( i = 0, p_sg_element = p_scatter_gather_list->List; + i < p_scatter_gather_list->NumberOfElements; + i++, p_sg_element++ ) + { + uint32_t dma_len = p_sg_element->Length; + + if (p_sg_element->PhysicalAddress.QuadPart & ~fmr_page_mask) { + if (i > 0) + { // buffer start not from the beginning of the page is allowed only for the first SG element + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Unaligned address at the begin of the list\n") ); + return IB_INVALID_PARAMETER; + } + } + + if ((p_sg_element->PhysicalAddress.QuadPart + dma_len) & ~fmr_page_mask) { + if (i < (uint32_t)p_scatter_gather_list->NumberOfElements -1) + { // buffer end not on the beginning of the page is allowed only for the last SG element + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Unaligned address at the end of the list\n") ); + return IB_INVALID_PARAMETER; + } + } + + total_len += p_sg_element->Length; + list_len += (p_sg_element->Length + (hca.fmr_page_size-1)) >> hca.fmr_page_shift; + } + + + p_addr_list = cl_zalloc(sizeof(uint64_t)*list_len); + if(!p_addr_list) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,("Failed to allocate page list\n")); + return IB_INSUFFICIENT_MEMORY; + } + + list_len = 0; + for ( i = 0, p_sg_element = p_scatter_gather_list->List; + i < p_scatter_gather_list->NumberOfElements; + i++, p_sg_element++ ) + { + uint32_t dma_len = p_sg_element->Length; + for( j = 0; j < dma_len; j+=PAGE_SIZE) + { + p_addr_list[list_len++] = (p_sg_element->PhysicalAddress.QuadPart & fmr_page_mask) + j; + } + } + + p_send_descriptor->p_fmr_el = NULL; + status = p_hba->ifc.map_phys_mlnx_fmr_pool + (hca.h_fmr_pool, p_addr_list, list_len, &vaddr, &lkey, &rkey, &p_fmr_el ); + + cl_free( p_addr_list ); + + if(status != IB_SUCCESS) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR,("Failed to map fmr\n")); + return status; + } + + p_send_descriptor->p_fmr_el = p_fmr_el; + p_sg_element = p_scatter_gather_list->List; + + p_memory_descriptor->virtual_address = cl_hton64( p_sg_element->PhysicalAddress.QuadPart & ~fmr_page_mask); + p_memory_descriptor->memory_handle = rkey; + p_memory_descriptor->data_length = cl_hton32( total_len); + +#if DBG + /* statistics */ + p_srp_session->x_pkt_fmr++; +#endif + + SRP_EXIT( SRP_DBG_DATA ); + return IB_SUCCESS; +} + + + +static inline +void +__srp_dump_srb_info(srp_send_descriptor_t* p_send_descriptor) +{ + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Srb Address = %p\n", + p_send_descriptor->p_srb) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Srb DataBuffer Address = %p\n", + p_send_descriptor->p_srb->DataBuffer) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Srb DataTransferLength = %d\n", + p_send_descriptor->p_srb->DataTransferLength) ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for " + "Function = %s(0x%x), Path = 0x%x, Target = 0x%x, " + "Lun = 0x%x, tag 0x%I64xn", + g_srb_status_name[p_send_descriptor->p_srb->SrbStatus], + p_send_descriptor->p_srb->SrbStatus, + g_srb_function_name[p_send_descriptor->p_srb->Function], + p_send_descriptor->p_srb->Function, + p_send_descriptor->p_srb->PathId, + p_send_descriptor->p_srb->TargetId, + p_send_descriptor->p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); +} + + +static inline +void +__srp_process_session_send_completions( + IN srp_session_t *p_srp_session ) +{ + ib_api_status_t status; + ib_wc_t *p_wc_done_list = NULL; + ib_wc_t *p_wc; + + SRP_ENTER( SRP_DBG_DATA ); + + cl_obj_lock( &p_srp_session->obj ); + + if ( p_srp_session->connection.state != SRP_CONNECTED ) + { + cl_obj_unlock( &p_srp_session->obj ); + SRP_EXIT( SRP_DBG_DATA ); + return; + } + + status = p_srp_session->p_hba->ifc.poll_cq( + p_srp_session->connection.h_send_cq, + &p_srp_session->connection.p_wc_free_list, + &p_wc_done_list ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("ib_poll_cq() failed!, status 0x%x\n", status) ); + + // TODO: Kill session and inform port driver link down scsiportnotification + cl_obj_unlock( &p_srp_session->obj ); + return; + } + + cl_obj_ref( &p_srp_session->obj ); + cl_obj_unlock( &p_srp_session->obj ); + + while ( (p_wc = p_wc_done_list) != NULL ) + { + srp_send_descriptor_t *p_send_descriptor; + + p_send_descriptor = (srp_send_descriptor_t *)((uintn_t)p_wc->wr_id); + + /* Remove head from list */ + p_wc_done_list = p_wc->p_next; + p_wc->p_next = NULL; + + switch ( p_wc->status) + { + case IB_WCS_SUCCESS: + break; + case IB_WCS_WR_FLUSHED_ERR: + // TODO: Kill session and inform port driver link down scsiportnotification + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Send Completion Status %s Vendore Status = 0x%x, \n", + p_srp_session->p_hba->ifc.get_wc_status_str( p_wc->status ), + (int)p_wc->vendor_specific)); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Send Completion Received for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x, tag 0x%I64xn", + g_srb_function_name[p_send_descriptor->p_srb->Function], + p_send_descriptor->p_srb->Function, + p_send_descriptor->p_srb->PathId, + p_send_descriptor->p_srb->TargetId, + p_send_descriptor->p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + break; + default: + // TODO: Kill session and inform port driver link down scsiportnotification + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Send Completion Status %s Vendore Status = 0x%x, \n", + p_srp_session->p_hba->ifc.get_wc_status_str( p_wc->status ), + (int)p_wc->vendor_specific)); + + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Send Completion Received for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x, tag 0x%I64xn", + g_srb_function_name[p_send_descriptor->p_srb->Function], + p_send_descriptor->p_srb->Function, + p_send_descriptor->p_srb->PathId, + p_send_descriptor->p_srb->TargetId, + p_send_descriptor->p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + break; + } + + /* Put onto head of free list */ + cl_obj_lock( &p_srp_session->obj ); + p_wc->p_next = p_srp_session->connection.p_wc_free_list; + p_srp_session->connection.p_wc_free_list = p_wc; + cl_obj_unlock( &p_srp_session->obj ); + + /* Get next completion */ + p_wc = p_wc_done_list; + } + + /* Re-arm the CQ for more completions */ + status = p_srp_session->p_hba->ifc.rearm_cq( + p_srp_session->connection.h_send_cq, FALSE ); + if ( status != IB_SUCCESS) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("ib_rearm_cq() failed!, status 0x%x\n", status) ); + + // TODO: Kill session and inform port driver link down scsiportnotification + } + + cl_obj_deref( &p_srp_session->obj ); + + SRP_EXIT( SRP_DBG_DATA ); +} + +/* srp_send_completion_cb */ +/*! +Set Timer to Process Send Completions - Usually bad if we get here + +@param p_context - context pointer to the owning session + +@return - none +*/ +void +srp_send_completion_cb( + IN const ib_cq_handle_t h_cq, + IN void *p_context ) +{ + srp_session_t *p_srp_session = (srp_session_t *)p_context; + + SRP_ENTER( SRP_DBG_DATA ); + + UNUSED_PARAM( h_cq ); + + __srp_process_session_send_completions( p_srp_session ); + + SRP_EXIT( SRP_DBG_DATA ); +} + + +static inline ib_api_status_t +__srp_clean_send_descriptor( + IN srp_send_descriptor_t *p_send_descriptor, + IN srp_session_t *p_srp_session ) +{ + ib_api_status_t status = IB_SUCCESS; + + if(p_srp_session && p_send_descriptor && p_send_descriptor->p_fmr_el) + { + status = p_srp_session->p_hba->ifc.unmap_mlnx_fmr_pool(p_send_descriptor->p_fmr_el); + p_send_descriptor->p_fmr_el = NULL; + } + return status; +} + +void +__srp_post_io_request( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb, + srp_session_t *p_srp_session ) +{ + ib_api_status_t status; + srp_send_descriptor_t *p_send_descriptor = (srp_send_descriptor_t *)p_srb->SrbExtension; + + SRP_ENTER( SRP_DBG_DATA ); + + status = srp_post_send_descriptor( &p_srp_session->descriptors, + p_send_descriptor, + p_srp_session ); + + if ( status == IB_SUCCESS ) + { + cl_atomic_dec( &p_srp_session->connection.request_limit ); +#if DBG + { /* statistics */ + uint32_t size = (uint32_t)cl_qlist_count(&p_srp_session->descriptors.sent_descriptors); + p_srp_session->x_sent_num++; + p_srp_session->x_sent_total += size; + if ( p_srp_session->x_sent_max < size ) + p_srp_session->x_sent_max = size; + } +#endif + goto exit; + } + else + { + p_srb->SrbStatus = SRB_STATUS_NO_HBA; + + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), Path = 0x%x, " + "Target = 0x%x, Lun = 0x%x, tag 0x%I64xn", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + + status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to unmap FMR Status = %d.\n", status) ); + // TODO: Kill session and inform port driver link down storportnotification + } + + StorPortNotification( RequestComplete, p_dev_ext, p_srb ); + } + +exit: + SRP_EXIT( SRP_DBG_DATA ); +} + +static +void +__srp_repost_io_request( + IN srp_session_t *p_srp_session ) +{ + srp_hba_t *p_hba; + srp_send_descriptor_t *p_send_descriptor = NULL; + srp_descriptors_t *p_descriptors = &p_srp_session->descriptors; + + SRP_ENTER( SRP_DBG_DATA ); + + if ( !cl_qlist_count(&p_descriptors->pending_descriptors) || + (p_srp_session->connection.request_limit <= p_srp_session->connection.request_threashold) ) + goto exit; + +#if DBG + { /* statistics */ + uint32_t size = (uint32_t)cl_qlist_count(&p_descriptors->pending_descriptors); + p_srp_session->x_pend_num++; + p_srp_session->x_pend_total += size; + if ( p_srp_session->x_pend_max < size ) + p_srp_session->x_pend_max = size; + } +#endif + + /* in case when the follows loop will release the last pending request for sending it, + there will be race between it and StorPort, that can call srp_post_io_request + just at that moment. In the "worst" case it will cause changing order between 2 posting. + The flag 'repost_is_on' is intended for preventing ths case */ + cl_atomic_inc( &p_srp_session->repost_is_on ); + + while (p_srp_session->connection.request_limit > p_srp_session->connection.request_threashold) + { + cl_list_item_t *p_list_item; + + /* extract a pending descriptor, if any */ + cl_spinlock_acquire ( &p_descriptors->pending_list_lock ); + p_list_item = cl_qlist_remove_head( &p_descriptors->pending_descriptors ); + if ( p_list_item == cl_qlist_end( &p_descriptors->pending_descriptors ) ) + { + cl_spinlock_release ( &p_descriptors->pending_list_lock ); + break; + } + cl_spinlock_release ( &p_descriptors->pending_list_lock ); + + /* post the request */ + p_hba = p_srp_session->p_hba; + p_send_descriptor = PARENT_STRUCT(p_list_item, srp_send_descriptor_t,list_item); + __srp_post_io_request( p_hba->p_ext, p_send_descriptor->p_srb, p_srp_session ); + } + + cl_atomic_dec( &p_srp_session->repost_is_on ); + +exit: + SRP_EXIT( SRP_DBG_DATA ); +} + +static inline void +__srp_fix_request_limit( + IN srp_session_t *p_srp_session, + IN srp_rsp_t *p_srp_rsp ) +{ + int32_t rld = get_srp_response_request_limit_delta( p_srp_rsp ); + cl_atomic_add( &p_srp_session->connection.request_limit, rld ); +#if DBG + /* statistics */ + p_srp_session->x_rld_num++; + p_srp_session->x_rld_total += rld; + if ( p_srp_session->x_rld_max < rld ) + p_srp_session->x_rld_max = rld; + if ( p_srp_session->x_rld_min > rld ) + p_srp_session->x_rld_min = rld; +#endif +} + +static inline +ib_api_status_t +__srp_process_recv_completion( + IN srp_recv_descriptor_t *p_recv_descriptor, + IN srp_session_t *p_srp_session ) +{ + ib_api_status_t status = IB_SUCCESS; + srp_rsp_t *p_srp_rsp; + uint8_t response_status; + srp_send_descriptor_t *p_send_descriptor; + + SRP_ENTER( SRP_DBG_DATA ); + + p_srp_rsp = (srp_rsp_t *)p_recv_descriptor->p_data_segment; + + set_srp_response_from_network_to_host( p_srp_rsp ); + + response_status = get_srp_response_status( p_srp_rsp ); + + p_send_descriptor = srp_find_matching_send_descriptor( + &p_srp_session->descriptors, + get_srp_response_tag( (srp_rsp_t *)p_recv_descriptor->p_data_segment ) ); + if ( p_send_descriptor == NULL ) + { + /* Repost the recv descriptor */ + status = p_srp_session->p_hba->ifc.post_recv( + p_srp_session->connection.h_qp, &p_recv_descriptor->wr, NULL ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to post send descriptor. Status = %d.\n", status) ); + // TODO: Kill session and inform port driver link down scsiportnotification + } + + __srp_fix_request_limit( p_srp_session, p_srp_rsp ); + __srp_repost_io_request( p_srp_session ); + + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, + ("Matching Send Descriptor Not Found.\n") ); + + goto exit; + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Recv Completion Received for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x, tag 0x%I64xn", + g_srb_function_name[p_send_descriptor->p_srb->Function], + p_send_descriptor->p_srb->Function, + p_send_descriptor->p_srb->PathId, + p_send_descriptor->p_srb->TargetId, + p_send_descriptor->p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + + switch ( get_srp_iu_buffer_type( (srp_iu_buffer_t *)p_send_descriptor->data_segment ) ) + { + case SRP_TSK_MGMT: + { + srp_tsk_mgmt_t *p_srp_tsk_mgmt = (srp_tsk_mgmt_t *)p_send_descriptor->data_segment; + + set_srp_tsk_mgmt_from_network_to_host( p_srp_tsk_mgmt ); + + + if(response_status == SCSISTAT_GOOD) + { + p_send_descriptor->p_srb->SrbStatus = SRB_STATUS_SUCCESS; + } + else + { + p_send_descriptor->p_srb->SrbStatus = SRB_STATUS_ABORT_FAILED; + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, + ("Scsi Error %s (%#x) Received for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x, tag 0x%I64xn", + g_srb_scsi_status_name[response_status], + response_status, + g_srb_function_name[p_send_descriptor->p_srb->Function], + p_send_descriptor->p_srb->Function, + p_send_descriptor->p_srb->PathId, + p_send_descriptor->p_srb->TargetId, + p_send_descriptor->p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + } + + if ( get_srp_tsk_mgmt_task_management_flags( p_srp_tsk_mgmt ) == TMF_ABORT_TASK ) + { + /* Repost the recv descriptor */ + status = p_srp_session->p_hba->ifc.post_recv( + p_srp_session->connection.h_qp, &p_recv_descriptor->wr, NULL ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to post recv descriptor. Status = %d.\n", status) ); + // TODO: Kill session and inform port driver link down storportnotification + } + + __srp_fix_request_limit( p_srp_session, p_srp_rsp ); + __srp_repost_io_request( p_srp_session ); + + __srp_dump_srb_info( p_send_descriptor); + + status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to unmap FMR Status = %d.\n", status) ); + // TODO: Kill session and inform port driver link down storportnotification + } + + StorPortNotification( RequestComplete, p_srp_session->p_hba->p_ext, p_send_descriptor->p_srb ); + } + + + + break; + } + + case SRP_CMD: + p_send_descriptor->p_srb->ScsiStatus = response_status; + if(response_status == SCSISTAT_GOOD) + { + p_send_descriptor->p_srb->SrbStatus = SRB_STATUS_SUCCESS; + } + else + { + p_send_descriptor->p_srb->SrbStatus = SRB_STATUS_ABORT_FAILED; + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, + ("Scsi Error %s (%#x) Received for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x, tag 0x%I64xn", + g_srb_scsi_status_name[response_status], + response_status, + g_srb_function_name[p_send_descriptor->p_srb->Function], + p_send_descriptor->p_srb->Function, + p_send_descriptor->p_srb->PathId, + p_send_descriptor->p_srb->TargetId, + p_send_descriptor->p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + } + + if ( get_srp_response_flags( p_srp_rsp ) != 0 ) + { + uint32_t resid; + + if ( (response_status != SCSISTAT_CHECK_CONDITION) && get_srp_response_di_under( p_srp_rsp ) ) + { + resid = get_srp_response_data_in_residual_count( p_srp_rsp ); + + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, + ("DI Underflow in response: expected %d got %d.\n", + p_send_descriptor->p_srb->DataTransferLength, + p_send_descriptor->p_srb->DataTransferLength - resid) ); + + p_send_descriptor->p_srb->DataTransferLength -= resid; + + if ( p_send_descriptor->p_srb->SrbStatus == SRB_STATUS_SUCCESS ) + { + p_send_descriptor->p_srb->SrbStatus = SRB_STATUS_DATA_OVERRUN; /* Also for underrun see DDK */ + } + } + + if ( (response_status != SCSISTAT_CHECK_CONDITION) && get_srp_response_do_under( p_srp_rsp ) ) + { + resid = get_srp_response_data_out_residual_count( p_srp_rsp ); + + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, + ("DI Underflow in response: expected %d got %d.\n", + p_send_descriptor->p_srb->DataTransferLength, + p_send_descriptor->p_srb->DataTransferLength - resid) ); + + p_send_descriptor->p_srb->DataTransferLength -= resid; + + if ( p_send_descriptor->p_srb->SrbStatus == SRB_STATUS_SUCCESS ) + { + p_send_descriptor->p_srb->SrbStatus = SRB_STATUS_DATA_OVERRUN; /* Also for underrun see DDK */ + } + } + + if ( get_srp_response_sns_valid( p_srp_rsp ) ) + { + uint8_t *p_sense_data = get_srp_response_sense_data( p_srp_rsp ); + + /* Copy only as much of the sense data as we can hold. */ + cl_memcpy( p_send_descriptor->p_srb->SenseInfoBuffer, + p_sense_data, + MIN( get_srp_response_sense_data_list_length( p_srp_rsp ), + p_send_descriptor->p_srb->SenseInfoBufferLength ) ); + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, + ("Sense Data SENSE_KEY 0x%02x ADDITIONAL_SENSE_CODE" + "0x%02x ADDITIONAL_SENSE_QUILIFIER 0x%02x.\n", + p_sense_data[2],p_sense_data[12],p_sense_data[13]) ); + + if ( ((p_sense_data[2]&0xf) == 0x0b /*ABORTED_COMMAND*/) && + (p_sense_data[12] == 0x08) && + (p_sense_data[13] == 0x00) ) + + { + /* probably a problem with the Vfx FC san like wire pull*/ + /* initiate session recovery */ + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, + ("Sense Data indicates FC link connectivity has been lost.\n") ); + StorPortPauseDevice( p_srp_session->p_hba->p_ext, + p_send_descriptor->p_srb->PathId, + p_send_descriptor->p_srb->TargetId, + p_send_descriptor->p_srb->Lun, + 5 ); + } + + } + + if ( get_srp_response_di_over( p_srp_rsp ) || get_srp_response_do_over( p_srp_rsp ) ) + { + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, + ("Overflow error in response.\n") ); + if ( p_send_descriptor->p_srb->SrbStatus == SRB_STATUS_SUCCESS ) + { + p_send_descriptor->p_srb->SrbStatus = SRB_STATUS_DATA_OVERRUN; + } + } + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("DataBuffer = 0x%I64x.\n", MmGetPhysicalAddress( + p_send_descriptor->p_srb->DataBuffer ).QuadPart) ); + + /* Repost the recv descriptor */ + status = p_srp_session->p_hba->ifc.post_recv( + p_srp_session->connection.h_qp, &p_recv_descriptor->wr, NULL ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to post recv descriptor. Status = %d.\n", status) ); + // TODO: Kill session and inform port driver link down storportnotification + } + + __srp_fix_request_limit( p_srp_session, p_srp_rsp ); + __srp_repost_io_request( p_srp_session ); + + __srp_dump_srb_info( p_send_descriptor); + + status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to unmap FMR Status = %d.\n", status) ); + // TODO: Kill session and inform port driver link down storportnotification + } + + StorPortNotification( RequestComplete, p_srp_session->p_hba->p_ext, p_send_descriptor->p_srb ); + break; + + case SRP_LOGIN_REQ: + case SRP_I_LOGOUT: + case SRP_CRED_RSP: + case SRP_AER_RSP: + default: + CL_ASSERT ( 0 ); + break; + } + +exit: + SRP_EXIT( SRP_DBG_DATA ); + + return ( status ); +} + +static inline +void +__srp_process_session_recv_completions( + IN srp_session_t *p_srp_session ) +{ + ib_api_status_t status; + ib_wc_t *p_wc_done_list; + ib_wc_t *p_wc; + + SRP_ENTER( SRP_DBG_DATA ); + + cl_obj_lock( &p_srp_session->obj ); + + if ( p_srp_session->connection.state != SRP_CONNECTED ) + { + cl_obj_unlock( &p_srp_session->obj ); + SRP_EXIT( SRP_DBG_DATA ); + return; + } + + status = p_srp_session->p_hba->ifc.poll_cq( + p_srp_session->connection.h_recv_cq, + &p_srp_session->connection.p_wc_free_list, + &p_wc_done_list ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("ib_poll_cq() failed!, status 0x%x\n", status) ); + + // TODO: Kill session and inform port driver link down scsiportnotification + SRP_EXIT( SRP_DBG_DATA ); + cl_obj_unlock( &p_srp_session->obj ); + return; + } + + cl_obj_ref( &p_srp_session->obj ); + cl_obj_unlock( &p_srp_session->obj ); + + while ( (p_wc = p_wc_done_list) != NULL ) + { + srp_recv_descriptor_t *p_recv_descriptor; + + /* Remove head from list */ + p_wc_done_list = p_wc->p_next; + p_wc->p_next = NULL; + + p_recv_descriptor = (srp_recv_descriptor_t *)((uintn_t)p_wc->wr_id); + + if ( p_wc->status == IB_WCS_SUCCESS ) + { + status = __srp_process_recv_completion( p_recv_descriptor, p_srp_session ); + if ( status != IB_SUCCESS ) + { + // TODO: Kill session and inform port driver link down scsiportnotification + } + } + else + { + if( p_wc->status != IB_WCS_WR_FLUSHED_ERR ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Recv Completion with Error Status %s (vendore specific %#x)\n", + p_srp_session->p_hba->ifc.get_wc_status_str( p_wc->status ), + (int)p_wc->vendor_specific) ); + } + else + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Recv Completion Flushed in Error Status: %s\n", + p_srp_session->p_hba->ifc.get_wc_status_str( p_wc->status ))); + + } + } + + /* Put onto head of free list */ + cl_obj_lock( &p_srp_session->obj ); + p_wc->p_next = p_srp_session->connection.p_wc_free_list; + p_srp_session->connection.p_wc_free_list = p_wc; + cl_obj_unlock( &p_srp_session->obj ); + + /* Get next completion */ + p_wc = p_wc_done_list; + } + + /* Re-arm the CQ for more completions */ + status = p_srp_session->p_hba->ifc.rearm_cq( + p_srp_session->connection.h_recv_cq, FALSE ); + if ( status != IB_SUCCESS) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("ib_rearm_cq() failed!, status 0x%x\n", status) ); + + // TODO: Kill session and inform port driver link down scsiportnotification + } + + cl_obj_deref( &p_srp_session->obj ); + + SRP_EXIT( SRP_DBG_DATA ); +} + +/* srp_recv_completion_cb */ +/*! +Set Timer to Process Receive Completions - Responses to our requests + +@param p_context - context pointer to the owning session + +@return - none +*/ +void +srp_recv_completion_cb( + IN const ib_cq_handle_t h_cq, + IN void *p_context ) +{ + srp_session_t *p_srp_session = (srp_session_t *)p_context; + + SRP_ENTER( SRP_DBG_DATA ); + + UNUSED_PARAM( h_cq ); + + __srp_process_session_recv_completions( p_srp_session ); + + SRP_EXIT( SRP_DBG_DATA ); +} + +/* __srp_build_cmd */ +/*! +Build the SRP Cmd to be sent to the VFx target + +@param p_dev_ext - our context pointer +@param p_srb - scsi request to send to target + +@return - none +*/ +static inline +void +__srp_build_cmd( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb, + IN srp_conn_info_t *p_srp_conn_info ) +{ + srp_send_descriptor_t *p_send_descriptor = (srp_send_descriptor_t *)p_srb->SrbExtension; + srp_cmd_t *p_srp_cmd = (srp_cmd_t *)p_send_descriptor->data_segment; + UCHAR *p_cdb; + PSTOR_SCATTER_GATHER_LIST p_scatter_gather_list = NULL; + uint8_t scatter_gather_count = 0; + srp_memory_descriptor_t *p_memory_descriptor = NULL; + srp_memory_table_descriptor_t *p_table_descriptor = NULL; + uint32_t i; + ULONG scsi_direction = p_srb->SrbFlags & ( SRB_FLAGS_DATA_IN | SRB_FLAGS_DATA_OUT ); + DATA_BUFFER_DESCRIPTOR_FORMAT format = p_srp_conn_info->descriptor_format & DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS; + ULONG length; +#if DBG + srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba; + srp_session_t *p_srp_session; +#endif + + SRP_ENTER( SRP_DBG_DATA ); + +#if DBG + /* statistics */ + p_srp_session = p_hba->session_list[p_send_descriptor->p_srb->TargetId]; + p_srp_session->x_pkt_built++; +#endif + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Sending I/O to Path = 0x%x, Target = 0x%x, Lun = 0x%x\n", + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + + setup_srp_command( p_srp_cmd, + p_send_descriptor->tag, + DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT, + DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT, + 0, + 0, + BUILD_SCSI_ADDRESS( p_srb->Lun ), + TAV_SIMPLE_TASK, + 0 ); + + p_cdb = get_srp_command_cdb( p_srp_cmd ); + cl_memcpy( p_cdb, p_srb->Cdb, p_srb->CdbLength ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("CDB Length = %d.\n", p_srb->CdbLength) ); +#if DBG + { + char* cmd; + cmd = cl_zalloc(p_srb->CdbLength +1); + if(cmd) + { + for ( i = 0; i < p_srb->CdbLength; i++ ) + { + cmd[i] = p_srb->Cdb[i]; + } + cmd[i] = '\0'; + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, ("CDB = 0x%s\n",cmd) ); + + cl_free(cmd); + } + + } +#endif + + if ( !format ) + { + format = DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR; + } + + if ( scsi_direction ) + { + p_scatter_gather_list = StorPortGetScatterGatherList( p_dev_ext, p_srb ); + CL_ASSERT( p_scatter_gather_list != NULL ); + scatter_gather_count = (uint8_t)p_scatter_gather_list->NumberOfElements; + } + + /* Cap the length of the inline descriptors to the maximum IU size. */ + if( p_srp_conn_info->max_scatter_gather_entries < scatter_gather_count ) + { + scatter_gather_count = + (uint8_t)p_srp_conn_info->max_scatter_gather_entries; + } + + if ( scsi_direction == SRB_FLAGS_DATA_IN ) + { + set_srp_command_data_in_buffer_desc_fmt( p_srp_cmd, format ); + set_srp_command_data_in_buffer_desc_count( p_srp_cmd, scatter_gather_count ); + p_memory_descriptor = get_srp_command_data_in_buffer_desc( p_srp_cmd ); + } + + else if ( scsi_direction == SRB_FLAGS_DATA_OUT ) + { + set_srp_command_data_out_buffer_desc_fmt( p_srp_cmd, format ); + set_srp_command_data_out_buffer_desc_count( p_srp_cmd, scatter_gather_count ); + p_memory_descriptor = get_srp_command_data_out_buffer_desc( p_srp_cmd ); + } + +#if DBG + { /* print max SG list, gotten from the StorPort */ + static ULONG s_sg_max = 0; + if ( p_scatter_gather_list && s_sg_max < p_scatter_gather_list->NumberOfElements ) + { + uint32_t total = 0; + PSTOR_SCATTER_GATHER_ELEMENT p_sg_el; + for ( i = 0, p_sg_el = p_scatter_gather_list->List; + i < scatter_gather_count; i++, p_sg_el++ ) + { + total += p_sg_el->Length; + } + s_sg_max = p_scatter_gather_list->NumberOfElements; + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_DATA, + ( "StorPort sg_cnt %d, total %#x, max sg_cnt %d, direction %s\n", + s_sg_max, total, p_srp_conn_info->max_scatter_gather_entries, + ( scsi_direction == SRB_FLAGS_DATA_IN ) ? "IN" : "OUT" )); + } + } +#endif + + if ( p_memory_descriptor != NULL ) + { + PSTOR_SCATTER_GATHER_ELEMENT p_sg_element; + uint32_t totalLength; + uint64_t buf_addr; + if ( format == DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ) + { + p_table_descriptor = (srp_memory_table_descriptor_t *)p_memory_descriptor; + p_memory_descriptor = ( srp_memory_descriptor_t *)(p_table_descriptor + 1 ); + + buf_addr = (StorPortGetPhysicalAddress( p_dev_ext,p_srb, p_memory_descriptor, &length)).QuadPart; + + /* we don't swap rkey - it is already in network order*/ + p_table_descriptor->descriptor.virtual_address = cl_hton64( buf_addr ); + p_table_descriptor->descriptor.memory_handle = p_srp_conn_info->rkey; + + if((p_scatter_gather_list->NumberOfElements > 1) && !__srp_map_fmr(p_dev_ext,p_scatter_gather_list,p_send_descriptor,p_memory_descriptor)) + { + /* Set the discriptor list len */ + p_table_descriptor->descriptor.data_length = + cl_hton32( sizeof(srp_memory_descriptor_t) *1); + p_table_descriptor->total_length = p_memory_descriptor->data_length; + if ( scsi_direction == SRB_FLAGS_DATA_IN ) + set_srp_command_data_in_buffer_desc_count( p_srp_cmd, 1 ); + else if ( scsi_direction == SRB_FLAGS_DATA_OUT ) + set_srp_command_data_out_buffer_desc_count( p_srp_cmd, 1 ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("virtual_address[%d] = 0x%I64x.\n", + 0, cl_ntoh64(p_memory_descriptor->virtual_address) ) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("memory_handle[%d] = 0x%x.\n", + 0, cl_ntoh32( p_memory_descriptor->memory_handle) ) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("data_length[%d] = %d.\n", + 0, cl_ntoh32( p_memory_descriptor->data_length) ) ); + } + else + { + CL_ASSERT( scatter_gather_count == + p_scatter_gather_list->NumberOfElements ); + + /* Set the descriptor list len */ + p_table_descriptor->descriptor.data_length = + cl_hton32( sizeof(srp_memory_descriptor_t) * + p_scatter_gather_list->NumberOfElements ); + + for ( i = 0, totalLength = 0, p_sg_element = p_scatter_gather_list->List; + i < scatter_gather_count; + i++, p_memory_descriptor++, p_sg_element++ ) + { + buf_addr = p_srp_conn_info->vaddr + p_sg_element->PhysicalAddress.QuadPart; + + p_memory_descriptor->virtual_address = cl_hton64( buf_addr ); + p_memory_descriptor->memory_handle = p_srp_conn_info->rkey; + p_memory_descriptor->data_length = cl_hton32( p_sg_element->Length ); + totalLength += p_sg_element->Length; + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("virtual_address[%d] = 0x%I64x.\n", + i, cl_ntoh64(p_memory_descriptor->virtual_address) ) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("memory_handle[%d] = 0x%x.\n", + i, cl_ntoh32( p_memory_descriptor->memory_handle) ) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("data_length[%d] = %d.\n", + i, cl_ntoh32( p_memory_descriptor->data_length) ) ); + } + p_table_descriptor->total_length = cl_hton32( totalLength ); + } + } + else if ( format == DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR ) + { + CL_ASSERT( scatter_gather_count == + p_scatter_gather_list->NumberOfElements ); + if((p_scatter_gather_list->NumberOfElements > 1) && !__srp_map_fmr(p_dev_ext,p_scatter_gather_list,p_send_descriptor,p_memory_descriptor)) + { + if ( scsi_direction == SRB_FLAGS_DATA_IN ) + set_srp_command_data_in_buffer_desc_count( p_srp_cmd, 1 ); + else if ( scsi_direction == SRB_FLAGS_DATA_OUT ) + set_srp_command_data_out_buffer_desc_count( p_srp_cmd, 1 ); + } + else + { + for ( i = 0, p_sg_element = p_scatter_gather_list->List; + i < scatter_gather_count; i++, p_memory_descriptor++, p_sg_element++ ) + { + buf_addr = p_srp_conn_info->vaddr + p_sg_element->PhysicalAddress.QuadPart; + p_memory_descriptor->virtual_address = cl_hton64( buf_addr ); + p_memory_descriptor->memory_handle = p_srp_conn_info->rkey; + p_memory_descriptor->data_length = cl_hton32( p_sg_element->Length ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("virtual_address[%d] = 0x%I64x.\n", + i, cl_ntoh64(p_memory_descriptor->virtual_address) ) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("memory_handle[%d] = 0x%x.\n", + i, cl_ntoh32( p_memory_descriptor->memory_handle) ) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("data_length[%d] = %d.\n", + i, cl_ntoh32( p_memory_descriptor->data_length) ) ); + } + } + } + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("scatter/gather count = %d.\n", scatter_gather_count)); + } + + p_srp_cmd->logical_unit_number = cl_hton64( p_srp_cmd->logical_unit_number ); + + //set_srp_command_from_host_to_network( p_srp_cmd ); + + SRP_EXIT( SRP_DBG_DATA ); +} + +/* srp_format_io_request */ +/*! +Format the SRP Cmd for the VFx target + +@param p_dev_ext - our context pointer +@param p_srb - scsi request to send to target + +@return - TRUE for success, FALSE for failure +*/ +BOOLEAN +srp_format_io_request( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb ) +{ + srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba; + BOOLEAN result = TRUE; + srp_session_t *p_srp_session; + + SRP_ENTER( SRP_DBG_DATA ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Device Extension Address = %p\n", p_dev_ext) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Srb Address = %p\n", p_srb) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Srb DataBuffer Address = %p\n", p_srb->DataBuffer) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Srb DataTransferLength = %d\n", p_srb->DataTransferLength) ); + + cl_obj_lock( &p_hba->obj ); + + p_srp_session = p_hba->session_list[p_srb->TargetId]; + + if ( p_srp_session != NULL ) + { + srp_conn_info_t srp_conn_info; + + cl_obj_ref( &p_srp_session->obj ); + cl_obj_unlock( &p_hba->obj ); + + srp_conn_info.vaddr = p_srp_session->hca.vaddr; + srp_conn_info.lkey = p_srp_session->hca.lkey; + srp_conn_info.rkey = p_srp_session->hca.rkey; + srp_conn_info.descriptor_format = p_srp_session->connection.descriptor_format; + srp_conn_info.init_to_targ_iu_sz = p_srp_session->connection.init_to_targ_iu_sz; + srp_conn_info.max_scatter_gather_entries = p_srp_session->connection.max_scatter_gather_entries; + srp_conn_info.tag = cl_atomic_inc( &p_srp_session->connection.tag ); + srp_conn_info.signal_send_completion = + ((srp_conn_info.tag % p_srp_session->connection.signaled_send_completion_count) == 0) ? TRUE : FALSE; + + cl_obj_deref( &p_srp_session->obj ); + + srp_build_send_descriptor( p_dev_ext, p_srb, &srp_conn_info ); + + __srp_build_cmd( p_dev_ext, p_srb, &srp_conn_info ); + } + else + { + // Handle the error case here + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Find Session For Target ID = %d\n", p_srb->TargetId) ); + cl_obj_unlock( &p_hba->obj ); + p_srb->SrbStatus = SRB_STATUS_INVALID_TARGET_ID; + result = FALSE; + } + + + SRP_EXIT( SRP_DBG_DATA ); + return ( result ); +} + +void +srp_post_io_request( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb ) +{ + ib_api_status_t status = IB_SUCCESS; + srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba; + srp_send_descriptor_t *p_send_descriptor = (srp_send_descriptor_t *)p_srb->SrbExtension; + srp_session_t *p_srp_session; + srp_descriptors_t *p_descriptors; + + SRP_ENTER( SRP_DBG_DATA ); + + cl_obj_lock( &p_hba->obj ); + + p_srp_session = p_hba->session_list[p_srb->TargetId]; + + if ( p_srp_session != NULL ) + { + cl_obj_ref( &p_srp_session->obj ); + cl_obj_unlock( &p_hba->obj ); + + p_descriptors = &p_srp_session->descriptors; + + cl_spinlock_acquire ( &p_descriptors->pending_list_lock ); + if ( (p_srp_session->connection.request_limit <= p_srp_session->connection.request_threashold) || + !cl_is_qlist_empty( &p_descriptors->pending_descriptors ) || + p_srp_session->repost_is_on ) + { + cl_spinlock_release ( &p_descriptors->pending_list_lock ); + srp_add_pending_descriptor( p_descriptors, p_send_descriptor ); + cl_obj_deref( &p_srp_session->obj ); + goto exit; + } + cl_spinlock_release ( &p_descriptors->pending_list_lock ); + + __srp_post_io_request( p_dev_ext, p_srb, p_srp_session ); + cl_obj_deref( &p_srp_session->obj ); + goto exit; + } + else + { + cl_obj_unlock( &p_hba->obj ); + p_srb->SrbStatus = SRB_STATUS_NO_HBA; + goto err; + } + +err: + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), Path = 0x%x, " + "Target = 0x%x, Lun = 0x%x, tag 0x%I64xn", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + + status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to unmap FMR Status = %d.\n", status) ); + // TODO: Kill session and inform port driver link down storportnotification + } + + StorPortNotification( RequestComplete, p_dev_ext, p_srb ); + +exit: + SRP_EXIT( SRP_DBG_DATA ); +} + +void +srp_abort_command( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb ) +{ + srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba; + srp_session_t *p_srp_session; + uint64_t iu_tag; + srp_send_descriptor_t *p_srb_send_descriptor; + srp_send_descriptor_t *p_send_descriptor; + srp_conn_info_t srp_conn_info; + srp_tsk_mgmt_t *p_srp_tsk_mgmt; + + SRP_ENTER( SRP_DBG_DATA ); + + cl_obj_lock( &p_hba->obj ); + + p_srp_session = p_hba->session_list[p_srb->TargetId]; + if ( p_srp_session == NULL ) + { + /* If the session is NULL there is no connection and cannot be aborted */ + p_srb->SrbStatus = SRB_STATUS_ABORT_FAILED; + goto exit; + } + + p_srb_send_descriptor = (srp_send_descriptor_t *)p_srb->NextSrb->SrbExtension; + + iu_tag = get_srp_information_unit_tag( (srp_information_unit_t *)p_srb_send_descriptor->data_segment ); + + p_send_descriptor = srp_find_matching_send_descriptor( &p_srp_session->descriptors, iu_tag ); + if ( p_send_descriptor == NULL ) + { + /* Cannot find the command so it must have been completed */ + p_srb->SrbStatus = SRB_STATUS_ABORT_FAILED; + goto exit; + } + + CL_ASSERT( p_srb_send_descriptor == p_send_descriptor ); + + p_srb->NextSrb->SrbStatus = SRB_STATUS_ABORTED; + + /* create and send abort request to the VFx */ + + srp_conn_info.vaddr = p_srp_session->hca.vaddr; + srp_conn_info.lkey = p_srp_session->hca.lkey; + srp_conn_info.rkey = p_srp_session->hca.rkey; + + srp_conn_info.init_to_targ_iu_sz = p_srp_session->connection.init_to_targ_iu_sz; + srp_conn_info.max_scatter_gather_entries = p_srp_session->connection.max_scatter_gather_entries; + srp_conn_info.tag = cl_atomic_inc( &p_srp_session->connection.tag ); + srp_conn_info.signal_send_completion = + ((srp_conn_info.tag % p_srp_session->connection.signaled_send_completion_count) == 0) ? TRUE : FALSE; + + srp_build_send_descriptor( p_dev_ext, p_srb, &srp_conn_info ); + + p_srp_tsk_mgmt = (srp_tsk_mgmt_t *)p_send_descriptor->data_segment; + + setup_srp_tsk_mgmt( p_srp_tsk_mgmt, + p_send_descriptor->tag, + BUILD_SCSI_ADDRESS( p_srb->Lun ), + TMF_ABORT_TASK, + iu_tag ); + + set_srp_tsk_mgmt_from_host_to_network( p_srp_tsk_mgmt ); + + srp_post_io_request( p_dev_ext, p_srb ); + +exit: + cl_obj_unlock( &p_hba->obj ); + if ( p_srb->SrbStatus == SRB_STATUS_ABORT_FAILED ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x\n", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + StorPortNotification( RequestComplete, p_dev_ext, p_srb ); + } + + SRP_EXIT( SRP_DBG_DATA ); +} + +void +srp_lun_reset( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb ) +{ + + ib_api_status_t status = IB_SUCCESS; + srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba; + srp_session_t *p_srp_session; + + SRP_ENTER( SRP_DBG_DATA ); + + cl_obj_lock( &p_hba->obj ); + + p_srp_session = p_hba->session_list[p_srb->TargetId]; + if ( p_srp_session != NULL ) + { + srp_send_descriptor_t *p_send_descriptor; + UCHAR path_id = p_srb->PathId; + UCHAR target_id = p_srb->TargetId; + UCHAR lun = p_srb->Lun; + + StorPortPauseDevice( p_dev_ext, p_srb->PathId, p_srb->TargetId, p_srb->Lun, 10 ); + + /* release this device' descriptors from the pending_list */ + while ( (p_send_descriptor = srp_remove_lun_head_pending_descriptor( &p_srp_session->descriptors, p_srb->Lun )) != NULL ) + { + status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to unmap FMR Status = %d.\n", status) ); + // TODO: Kill session and inform port driver link down storportnotification + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x, tag 0x%I64xn", + g_srb_status_name[SRB_STATUS_BUS_RESET], + SRB_STATUS_BUS_RESET, + g_srb_function_name[p_send_descriptor->p_srb->Function], + p_send_descriptor->p_srb->Function, + p_send_descriptor->p_srb->PathId, + p_send_descriptor->p_srb->TargetId, + p_send_descriptor->p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + } + + /* release this device' descriptors from the sent_list */ + while ( (p_send_descriptor = srp_remove_lun_head_send_descriptor( &p_srp_session->descriptors, p_srb->Lun )) != NULL ) + { + status = __srp_clean_send_descriptor( p_send_descriptor, p_srp_session ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to unmap FMR Status = %d.\n", status) ); + // TODO: Kill session and inform port driver link down storportnotification + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x, tag 0x%I64xn", + g_srb_status_name[SRB_STATUS_BUS_RESET], + SRB_STATUS_BUS_RESET, + g_srb_function_name[p_send_descriptor->p_srb->Function], + p_send_descriptor->p_srb->Function, + p_send_descriptor->p_srb->PathId, + p_send_descriptor->p_srb->TargetId, + p_send_descriptor->p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + } + + + p_srb->SrbStatus = SRB_STATUS_SUCCESS; + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x\n", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + + StorPortNotification( RequestComplete, p_dev_ext, p_srb ); + + StorPortCompleteRequest( p_dev_ext, path_id, target_id, lun, SRB_STATUS_BUS_RESET ); + + StorPortResumeDevice( p_dev_ext, path_id, target_id, lun ); + } + else + { + // Handle the error case here + p_srb->SrbStatus = SRB_STATUS_INVALID_TARGET_ID; + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x\n", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + + StorPortNotification( RequestComplete, p_dev_ext, p_srb ); + } + + cl_obj_unlock( &p_hba->obj ); + + SRP_EXIT( SRP_DBG_DATA ); +} + +#if DBG + +/* statistics */ + +void +srp_x_clean( + IN void *p_session ) +{ + srp_session_t *p_srp_session = p_session; + + if (p_srp_session == NULL) + return; + + p_srp_session->x_pkt_fmr = 0; + p_srp_session->x_pkt_built = 0; + p_srp_session->x_rld_total = 0; + p_srp_session->x_rld_num = 0; + p_srp_session->x_rld_max = 0; + p_srp_session->x_rld_min = p_srp_session->x_req_limit; + p_srp_session->x_pend_total = 0; + p_srp_session->x_pend_num = 0; + p_srp_session->x_pend_max = 0; + p_srp_session->x_sent_total = 0; + p_srp_session->x_sent_num = 0; + p_srp_session->x_sent_max = 0; +} + +void +srp_x_print( + IN void *p_session ) +{ + srp_session_t *p_srp_session = p_session; + + if (p_srp_session == NULL) + return; + + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_DATA, + ("req_limit %d, pkt_built %d, pkt_fmr'ed %d\n", + p_srp_session->x_req_limit, + p_srp_session->x_pkt_built, + p_srp_session->x_pkt_fmr )); + + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_DATA, + ("request_limit_delta: max %d, min %d, average %d, num %d\n", + p_srp_session->x_rld_max, p_srp_session->x_rld_min, + (p_srp_session->x_rld_num) ? p_srp_session->x_rld_total / p_srp_session->x_rld_num : 0, + p_srp_session->x_rld_num )); + + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_DATA, + ("pendinq_desc: max %d, average %d, num %d\n", + p_srp_session->x_pend_max, + (p_srp_session->x_pend_num) ? p_srp_session->x_pend_total / p_srp_session->x_pend_num : 0, + p_srp_session->x_pend_num )); + + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_DATA, + ("sent_desc: max %d, average %d, num %d\n", + p_srp_session->x_sent_max, + (p_srp_session->x_sent_num) ? p_srp_session->x_sent_total / p_srp_session->x_sent_num : 0, + p_srp_session->x_sent_num )); + +} + +#endif diff --git a/branches/Ndi/ulp/srp/kernel/srp_data_path.h b/branches/Ndi/ulp/srp/kernel/srp_data_path.h new file mode 100644 index 00000000..4bdbae1a --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_data_path.h @@ -0,0 +1,85 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef _SRP_DATA_PATH_H_ +#define _SRP_DATA_PATH_H_ + + +#include +#include +#include "srp.h" +#include "srp_data.h" + +typedef struct _srp_conn_info +{ + uint64_t vaddr; + net32_t lkey; + net32_t rkey; + uint32_t init_to_targ_iu_sz; + uint64_t tag; + uint32_t max_scatter_gather_entries; + BOOLEAN signal_send_completion; + DATA_BUFFER_DESCRIPTOR_FORMAT descriptor_format; +} srp_conn_info_t; + +void +srp_send_completion_cb( + IN const ib_cq_handle_t h_cq, + IN void *p_context ); + +void +srp_recv_completion_cb( + IN const ib_cq_handle_t h_cq, + IN void *p_context ); + +BOOLEAN +srp_format_io_request( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb ); + +void +srp_post_io_request( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb ); + +void +srp_abort_command( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb ); + +void +srp_lun_reset( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb ); + +#endif /* _SRP_DATA_PATH_H_ */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_debug.h b/branches/Ndi/ulp/srp/kernel/srp_debug.h new file mode 100644 index 00000000..9a5ff7a1 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_debug.h @@ -0,0 +1,149 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef _SRP_DEBUG_H_ +#define _SRP_DEBUG_H_ + + +#include + + +extern uint32_t g_srp_dbg_level; +extern uint32_t g_srp_dbg_flags; +extern uint32_t g_srp_mode_flags; + +// mode flags +#define SRP_MODE_NO_FMR_POOL (1 << 0) /* don't use FMR_POOL - for tuning purposes */ +#define SRP_MODE_SG_UNLIMITED (1 << 1) /* don't obey the limitation, stated in DDK, not to enlarge StorPort max SG */ + +#if defined(EVENT_TRACING) +// +// Software Tracing Definitions +// + + +#define WPP_CONTROL_GUIDS \ + WPP_DEFINE_CONTROL_GUID(SRPCtlGuid,(5AF07B3C,D119,4233,9C81,C07EF481CBE6), \ + WPP_DEFINE_BIT( SRP_DBG_ERROR) \ + WPP_DEFINE_BIT( SRP_DBG_PNP) \ + WPP_DEFINE_BIT( SRP_DBG_DATA) \ + WPP_DEFINE_BIT( SRP_DBG_SESSION) \ + WPP_DEFINE_BIT( SRP_DBG_DEBUG)) + + + +#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl) +#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags) +#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE) +#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags) + + +// begin_wpp config +// SRP_ENTER(FLAG); +// SRP_EXIT(FLAG); +// USEPREFIX(SRP_PRINT, "%!STDPREFIX! [SRP] :%!FUNC!() :"); +// USEPREFIX(SRP_PRINT_EXIT, "%!STDPREFIX! [SRP] :%!FUNC!() :"); +// USESUFFIX(SRP_ENTER, " [SRP] :%!FUNC!():["); +// USESUFFIX(SRP_EXIT, " [SRP] :%!FUNC!():]"); +// end_wpp + + +#else + + +#include + +/* + * Debug macros + */ + + +#define SRP_DBG_ERR (1 << 0) +#define SRP_DBG_PNP (1 << 1) +#define SRP_DBG_DATA (1 << 2) +#define SRP_DBG_SESSION (1 << 3) +#define SRP_DBG_DEBUG (1 << 4) + +#define SRP_DBG_ERROR (CL_DBG_ERROR | SRP_DBG_ERR) +#define SRP_DBG_ALL CL_DBG_ALL + +#if DBG + +// assignment of _level_ is need to to overcome warning C4127 +#define SRP_PRINT(_level_,_flag_,_msg_) \ + { \ + if( g_srp_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_srp_dbg_flags, _msg_ ); \ + } + +#define SRP_PRINT_EXIT(_level_,_flag_,_msg_) \ + { \ + if( g_srp_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_srp_dbg_flags, _msg_ );\ + SRP_EXIT(_flag_);\ + } + +#define SRP_ENTER(_flag_) \ + { \ + if( g_srp_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_ENTER( _flag_, g_srp_dbg_flags ); \ + } + +#define SRP_EXIT(_flag_)\ + { \ + if( g_srp_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_EXIT( _flag_, g_srp_dbg_flags ); \ + } + + +#else + +#define SRP_PRINT(lvl, flags, msg) + +#define SRP_PRINT_EXIT(_level_,_flag_,_msg_) + +#define SRP_ENTER(_flag_) + +#define SRP_EXIT(_flag_) + + +#endif + + +#endif //EVENT_TRACING + +extern char g_srb_function_name[][32]; +extern char g_srb_status_name[][32]; +extern char g_srb_scsi_status_name[][32]; + +#endif /* _SRP_DEBUG_H_ */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_descriptors.c b/branches/Ndi/ulp/srp/kernel/srp_descriptors.c new file mode 100644 index 00000000..c77571a5 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_descriptors.c @@ -0,0 +1,673 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "srp_connection.h" +#include "srp_cmd.h" +#include "srp_data_path.h" +#include "srp_descriptors.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "srp_descriptors.tmh" +#endif +#include "srp_rsp.h" +#include "srp_session.h" +#include "srp_tsk_mgmt.h" + +/* __srp_create_recv_descriptors */ +/*! +Creates the receive descriptors and posts them to the receive queue + +@param p_descriptors - pointer to the work requests structure +@param h_pd - protection domain used for registration of memory +@param h_qp - queue pair used to post work requests + +@return - result of operations +*/ +static ib_api_status_t +__srp_create_recv_descriptors( + IN OUT srp_descriptors_t *p_descriptors, + IN ib_al_ifc_t* const p_ifc, + IN ib_pd_handle_t h_pd, + IN ib_qp_handle_t h_qp ) +{ + ib_api_status_t status = IB_SUCCESS; + srp_recv_descriptor_t *p_descriptor; + uint8_t *p_data_segment; + ib_mr_create_t mr_create; + uint32_t i; + + SRP_ENTER( SRP_DBG_PNP ); + + /* Create the array of recv descriptors */ + p_descriptors->p_recv_descriptors_array = + (srp_recv_descriptor_t *)cl_zalloc( p_descriptors->recv_descriptor_count * sizeof(srp_recv_descriptor_t) ); + if ( p_descriptors->p_recv_descriptors_array == NULL ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to allocate %d recv descriptors.\n", p_descriptors->recv_descriptor_count) ); + status = IB_INSUFFICIENT_MEMORY; + goto exit; + } + + /* Create the array of recv data segments */ + p_descriptors->p_recv_data_segments_array = + cl_zalloc( p_descriptors->recv_descriptor_count * p_descriptors->recv_data_segment_size ); + if ( p_descriptors->p_recv_data_segments_array == NULL ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to allocate %d recv data segments of %d length.\n", + p_descriptors->recv_descriptor_count, + p_descriptors->recv_data_segment_size) ); + + cl_free( p_descriptors->p_recv_descriptors_array ); + p_descriptors->p_recv_descriptors_array = NULL; + status = IB_INSUFFICIENT_MEMORY; + goto exit; + } + + /* Register the data segments array memory */ + mr_create.vaddr = p_descriptors->p_recv_data_segments_array; + mr_create.length = p_descriptors->recv_descriptor_count * p_descriptors->recv_data_segment_size; + mr_create.access_ctrl = IB_AC_LOCAL_WRITE; + + status = p_ifc->reg_mem( h_pd, + &mr_create, + &p_descriptors->recv_lkey, + &p_descriptors->recv_rkey, + &p_descriptors->h_recv_mr ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to register recv data segments. Status = %d.\n", status) ); + + cl_free( p_descriptors->p_recv_data_segments_array ); + p_descriptors->p_recv_data_segments_array = NULL; + + cl_free( p_descriptors->p_recv_descriptors_array ); + p_descriptors->p_recv_descriptors_array = NULL; + goto exit; + } + + /* Initialize them and post to receive queue */ + p_descriptor = p_descriptors->p_recv_descriptors_array; + p_data_segment = p_descriptors->p_recv_data_segments_array; + + for ( i = 0; i < p_descriptors->recv_descriptor_count; i++ ) + { + p_descriptor->wr.p_next = NULL; + p_descriptor->wr.wr_id = (uint64_t)((void* __ptr64)p_descriptor); + p_descriptor->wr.num_ds = 1; + p_descriptor->wr.ds_array = p_descriptor->ds; + + p_descriptor->ds[0].vaddr = (uint64_t)((void* __ptr64)p_data_segment); + p_descriptor->ds[0].length = p_descriptors->recv_data_segment_size; + p_descriptor->ds[0].lkey = p_descriptors->recv_lkey; + + p_descriptors->p_recv_descriptors_array[i].p_data_segment = p_data_segment; + + status = p_ifc->post_recv( h_qp, &p_descriptor->wr, NULL ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to post recv descriptor. Status = %d.\n", status) ); + goto exit; + } + + p_descriptor++; + p_data_segment += p_descriptors->recv_data_segment_size; + } + +exit: + SRP_EXIT( SRP_DBG_PNP ); + + return ( status ); +} + +/* srp_init_descriptors */ +/*! +Orchestrates creation of receive descriptor buffers and sent list + +@param p_descriptors - pointer to the descriptors structure +@param recv descriptor_count - number of receive descriptors to create +@param recv data_segment_size - size of each receive descriptor's data area +@param h_pd - protection domain used for registration of memory +@param h_qp - queue pair used to post work requests + +@return - result of operations +*/ +ib_api_status_t +srp_init_descriptors( + IN OUT srp_descriptors_t *p_descriptors, + IN uint32_t recv_descriptor_count, + IN uint32_t recv_data_segment_size, + IN ib_al_ifc_t* const p_ifc, + IN ib_pd_handle_t h_pd, + IN ib_qp_handle_t h_qp ) +{ + ib_api_status_t status; + + SRP_ENTER( SRP_DBG_PNP ); + + CL_ASSERT( p_descriptors != NULL ); + + cl_memclr( p_descriptors, sizeof(*p_descriptors) ); + + cl_spinlock_init ( &p_descriptors->sent_list_lock ); + cl_spinlock_init ( &p_descriptors->pending_list_lock ); + cl_qlist_init( &p_descriptors->sent_descriptors ); + cl_qlist_init( &p_descriptors->pending_descriptors ); + + p_descriptors->initialized = TRUE; + + p_descriptors->recv_descriptor_count = recv_descriptor_count; + p_descriptors->recv_data_segment_size = recv_data_segment_size; + + status = __srp_create_recv_descriptors( p_descriptors, p_ifc, h_pd, h_qp ); + if ( status != IB_SUCCESS ) + { + srp_destroy_descriptors( p_descriptors ); + } + + SRP_EXIT( SRP_DBG_PNP ); + + return ( status ); +} + +/* srp_destroy_descriptors */ +/*! +Destroys the receive work request buffers + +@param p_descriptors - pointer to the descriptors structure + +@return - result of operations +*/ +ib_api_status_t +srp_destroy_descriptors( + IN OUT srp_descriptors_t *p_descriptors ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + if ( p_descriptors->initialized == TRUE ) + { + cl_spinlock_destroy ( &p_descriptors->sent_list_lock ); + cl_spinlock_destroy ( &p_descriptors->pending_list_lock ); + + if ( p_descriptors->p_recv_data_segments_array != NULL ) + { + cl_free( p_descriptors->p_recv_data_segments_array ); + } + + if ( p_descriptors->p_recv_descriptors_array != NULL ) + { + cl_free( p_descriptors->p_recv_descriptors_array ); + } + + cl_memclr( p_descriptors, sizeof( *p_descriptors ) ); + } + + SRP_EXIT( SRP_DBG_PNP ); + + return IB_SUCCESS; +} + +/* __srp_add_descriptor */ +/*! +Puts descriptor at tail of the list + +@param p_descriptors - pointer to the descriptors structure +@param p_descriptor - pointer to the descriptor to add +@param descriptors_list - pointer to the list + +@return - none +*/ +inline +void +__srp_add_descriptor( + IN srp_send_descriptor_t *p_descriptor, + IN cl_qlist_t *descriptors_list, + IN cl_spinlock_t *p_lock) +{ + SRP_ENTER( SRP_DBG_DATA ); + + cl_spinlock_acquire ( p_lock ); + + cl_qlist_insert_tail( descriptors_list, &p_descriptor->list_item ); + CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list ); + + cl_spinlock_release ( p_lock ); + + SRP_EXIT( SRP_DBG_DATA ); +} + +/* srp_add_send_descriptor */ +/*! +Puts send descriptor at tail of the sent list + +@param p_descriptors - pointer to the descriptors structure +@param p_descriptor - pointer to the descriptor to add + +@return - none +*/ +inline +void +srp_add_send_descriptor( + IN srp_descriptors_t *p_descriptors, + IN srp_send_descriptor_t *p_descriptor ) +{ + SRP_ENTER( SRP_DBG_DATA ); + __srp_add_descriptor( p_descriptor, + &p_descriptors->sent_descriptors, &p_descriptors->sent_list_lock ); + SRP_EXIT( SRP_DBG_DATA ); +} + +/* srp_add_pending_descriptor */ +/*! +Puts pending send descriptor at tail of the pending list + +@param p_descriptors - pointer to the descriptors structure +@param p_descriptor - pointer to the descriptor to add + +@return - none +*/ +void +srp_add_pending_descriptor( + IN srp_descriptors_t *p_descriptors, + IN srp_send_descriptor_t *p_descriptor ) +{ + SRP_ENTER( SRP_DBG_DATA ); + __srp_add_descriptor( p_descriptor, + &p_descriptors->pending_descriptors, &p_descriptors->pending_list_lock ); + SRP_EXIT( SRP_DBG_DATA ); +} + +/* __srp_remove_send_descriptor */ +/*! +Removes send descriptor from the sent list + +@param p_descriptors - pointer to the descriptors structure +@param p_descriptor - pointer to the descriptor to add +@param descriptors_list - pointer to the list + +@return - none +*/ +inline +void +__srp_remove_send_descriptor( + IN srp_send_descriptor_t *p_descriptor, + IN cl_qlist_t *descriptors_list, + IN cl_spinlock_t *p_lock) +{ + SRP_ENTER( SRP_DBG_DATA ); + + cl_spinlock_acquire ( p_lock ); + + CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list ); + cl_qlist_remove_item( descriptors_list, &p_descriptor->list_item ); + + cl_spinlock_release ( p_lock ); + + SRP_EXIT( SRP_DBG_DATA ); +} + + +/* srp_remove_send_descriptor */ +/*! +Removes send descriptor from the sent list + +@param p_descriptors - pointer to the descriptors structure +@param p_descriptor - pointer to the descriptor to add + +@return - none +*/ +inline +void +srp_remove_send_descriptor( + IN srp_descriptors_t *p_descriptors, + IN srp_send_descriptor_t *p_descriptor ) +{ + SRP_ENTER( SRP_DBG_DATA ); + __srp_remove_send_descriptor( p_descriptor, + &p_descriptors->sent_descriptors, &p_descriptors->sent_list_lock ); + SRP_EXIT( SRP_DBG_DATA ); +} + +/* srp_remove_pending_descriptor */ +/*! +Removes pending send descriptor from the sent list + +@param p_descriptors - pointer to the descriptors structure +@param p_descriptor - pointer to the descriptor to add + +@return - none +*/ +inline +void +srp_remove_pending_descriptor( + IN srp_descriptors_t *p_descriptors, + IN srp_send_descriptor_t *p_descriptor ) +{ + SRP_ENTER( SRP_DBG_DATA ); + __srp_remove_send_descriptor( p_descriptor, + &p_descriptors->pending_descriptors, &p_descriptors->pending_list_lock ); + SRP_EXIT( SRP_DBG_DATA ); +} + +/* __srp_remove_lun_head_send_descriptor */ +/*! +Removes and returns the send descriptor from the head of the a list for the lun specified + +@param p_descriptors - pointer to the descriptors structure +@param lun - lun for which to remove head send descriptor +@param descriptors_list - pointer to the list + +@return - srp_send_descriptor at head of sent list or NULL if empty +*/ +srp_send_descriptor_t* +__srp_remove_lun_head_send_descriptor( + IN UCHAR lun, + IN cl_qlist_t *descriptors_list, + IN cl_spinlock_t *p_lock) +{ + srp_send_descriptor_t *p_descriptor; + + SRP_ENTER( SRP_DBG_DATA ); + + cl_spinlock_acquire ( p_lock ); + + p_descriptor = (srp_send_descriptor_t *)cl_qlist_head( descriptors_list ); + CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list ); + + while ( p_descriptor != (srp_send_descriptor_t *)cl_qlist_end( descriptors_list ) ) + { + if ( p_descriptor->p_srb->Lun == lun ) + { + CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list ); + cl_qlist_remove_item( descriptors_list, &p_descriptor->list_item ); + break; + } + + p_descriptor = (srp_send_descriptor_t *)cl_qlist_next( &p_descriptor->list_item ); + CL_ASSERT( descriptors_list == p_descriptor->list_item.p_list ); + } + + if ( p_descriptor == (srp_send_descriptor_t *)cl_qlist_end( descriptors_list ) ) + { + p_descriptor = NULL; + } + + cl_spinlock_release ( p_lock ); + + SRP_EXIT( SRP_DBG_DATA ); + + return ( p_descriptor ); +} + + +/* srp_remove_lun_head_send_descriptor */ +/*! +Removes and returns the send descriptor from the head of the sent list for the lun specified + +@param p_descriptors - pointer to the descriptors structure +@param lun - lun for which to remove head send descriptor + +@return - srp_send_descriptor at head of sent list or NULL if empty +*/ +srp_send_descriptor_t* +srp_remove_lun_head_send_descriptor( + IN srp_descriptors_t *p_descriptors, + IN UCHAR lun ) +{ + srp_send_descriptor_t *p_descriptor; + + SRP_ENTER( SRP_DBG_DATA ); + p_descriptor = __srp_remove_lun_head_send_descriptor( + lun, &p_descriptors->sent_descriptors, &p_descriptors->sent_list_lock ); + SRP_EXIT( SRP_DBG_DATA ); + + return ( p_descriptor ); +} + +/* srp_remove_lun_head_pending_descriptor */ +/*! +Removes and returns the send descriptor from the head of the sent list for the lun specified + +@param p_descriptors - pointer to the descriptors structure +@param lun - lun for which to remove head send descriptor + +@return - srp_send_descriptor at head of sent list or NULL if empty +*/ +srp_send_descriptor_t* +srp_remove_lun_head_pending_descriptor( + IN srp_descriptors_t *p_descriptors, + IN UCHAR lun ) +{ + srp_send_descriptor_t *p_descriptor; + + SRP_ENTER( SRP_DBG_DATA ); + p_descriptor = __srp_remove_lun_head_send_descriptor( + lun, &p_descriptors->pending_descriptors, &p_descriptors->pending_list_lock ); + SRP_EXIT( SRP_DBG_DATA ); + + return ( p_descriptor ); +} + +/* srp_post_send_descriptor */ +/*! +Posts send descriptor across the connection specified and +if successful add it to the sent descriptors list + +@param p_descriptors - pointer to the descriptors structure +@param p_descriptor - pointer to the descriptor to send +@param p_session - pointer to the session used to send + +@return - result of post operation, or IB_ERROR if not connected +*/ +ib_api_status_t +srp_post_send_descriptor( + IN srp_descriptors_t *p_descriptors, + IN srp_send_descriptor_t *p_descriptor, + IN srp_session_t *p_session ) +{ + ib_api_status_t status = IB_ERROR; + srp_connection_t *p_connection; + ib_al_ifc_t *p_ifc; + + SRP_ENTER( SRP_DBG_DATA ); + + p_connection = &p_session->connection; + p_ifc = &p_session->hca.p_hba->ifc; + + if ( p_connection->state == SRP_CONNECTED ) + { + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("wr_id = 0x%I64x.\n", p_descriptor->wr.wr_id) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("wr_type = 0x%x.\n", p_descriptor->wr.wr_type) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("send_opt = 0x%x.\n", p_descriptor->wr.send_opt) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("num_ds = 0x%x.\n", p_descriptor->wr.num_ds) ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Posting I/O for Function = %s(0x%x), Path = 0x%x, " + "Target = 0x%x, Lun = 0x%x, tag 0x%I64x\n", + g_srb_function_name[p_descriptor->p_srb->Function], + p_descriptor->p_srb->Function, + p_descriptor->p_srb->PathId, + p_descriptor->p_srb->TargetId, + p_descriptor->p_srb->Lun, + get_srp_command_tag( (srp_cmd_t *)p_descriptor->data_segment )) ); + + if ( get_srp_iu_buffer_type( (srp_iu_buffer_t *)p_descriptor->data_segment ) == SRP_CMD ) + { + p_descriptor->ds[0].length = get_srp_command_length( (srp_cmd_t *)p_descriptor->data_segment ); + } + else /* task type */ + { + p_descriptor->ds[0].length = get_srp_tsk_mgmt_length( (srp_tsk_mgmt_t *)p_descriptor->data_segment ); + } + + ASSERT( p_descriptor->ds[0].length <= p_connection->init_to_targ_iu_sz ); + + srp_add_send_descriptor( p_descriptors, p_descriptor ); + + status = p_ifc->post_send( + p_connection->h_qp, &p_descriptor->wr, NULL ); + if ( status != IB_SUCCESS ) + { + /* Remove From Sent List */ + srp_remove_send_descriptor( p_descriptors, p_descriptor ); + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to post send descriptor. " + "ib_post_send status = 0x%x tag = 0x%I64x\n", + status, + get_srp_command_tag( (srp_cmd_t *)p_descriptor->data_segment )) ); + } + } + else + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Attempting to post to an unconnected session.\n") ); + } + + SRP_EXIT( SRP_DBG_DATA ); + + return ( status ); +} + +/* srp_find_matching_send_descriptor */ +/*! +Given a received response find the matching send descriptor +which originated the request to the VFx and remove it from +the sent descriptor list + +@param p_descriptors - pointer to the descriptors structure +@param tag - tag of descriptor to find + +@return - pointer to send descriptor or NULL if not found +*/ +srp_send_descriptor_t* +srp_find_matching_send_descriptor( + IN srp_descriptors_t *p_descriptors, + IN uint64_t tag ) +{ + srp_send_descriptor_t *p_send_descriptor; + + SRP_ENTER( SRP_DBG_DATA ); + + cl_spinlock_acquire( &p_descriptors->sent_list_lock ); + + p_send_descriptor = (srp_send_descriptor_t *)cl_qlist_head( &p_descriptors->sent_descriptors ); + CL_ASSERT( &p_descriptors->sent_descriptors == p_send_descriptor->list_item.p_list ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("rsp tag = 0x%I64x.\n", tag) ); + + while ( p_send_descriptor != (srp_send_descriptor_t *)cl_qlist_end( &p_descriptors->sent_descriptors ) ) + { + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, ("cmd tag = 0x%I64x.\n", + get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment )) ); + + if ( get_srp_command_tag( (srp_cmd_t *)p_send_descriptor->data_segment ) == tag ) + { + CL_ASSERT( &p_descriptors->sent_descriptors == p_send_descriptor->list_item.p_list ); + cl_qlist_remove_item( &p_descriptors->sent_descriptors, &p_send_descriptor->list_item ); + goto exit; + } + + p_send_descriptor = (srp_send_descriptor_t *)cl_qlist_next( &p_send_descriptor->list_item ); + } + + /* This is not an error. The request may have been aborted */ + p_send_descriptor = NULL; + +exit: + cl_spinlock_release( &p_descriptors->sent_list_lock ); + + SRP_EXIT( SRP_DBG_DATA ); + + return ( p_send_descriptor ); +} + +/* srp_build_send_descriptor */ +/*! +Initializes a send descriptor's fields + +@param p_dev_ext - our context pointer +@param p_srb - scsi request to send to target +@param p_srp_conn_info - information about our connection to the VFx + +@return - none +*/ +void +srp_build_send_descriptor( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb, + IN p_srp_conn_info_t p_srp_conn_info ) +{ + srp_send_descriptor_t *p_send_descriptor = (srp_send_descriptor_t *)p_srb->SrbExtension; + STOR_PHYSICAL_ADDRESS physical_address; + ULONG length; + + SRP_ENTER( SRP_DBG_DATA ); + + cl_memclr( p_send_descriptor, (sizeof ( srp_send_descriptor_t ) - SRP_MAX_IU_SIZE) ); + + physical_address = StorPortGetPhysicalAddress( p_dev_ext, p_srb, p_send_descriptor->data_segment, &length ); + + p_send_descriptor->wr.wr_id = (uint64_t)((uintn_t)p_send_descriptor); + p_send_descriptor->wr.wr_type = WR_SEND; + p_send_descriptor->wr.send_opt = (p_srp_conn_info->signal_send_completion == TRUE) ? IB_SEND_OPT_SIGNALED : 0; + p_send_descriptor->wr.num_ds = 1; + p_send_descriptor->wr.ds_array = p_send_descriptor->ds; + p_send_descriptor->tag = p_srp_conn_info->tag; + p_send_descriptor->p_srb = p_srb; + p_send_descriptor->ds[0].vaddr = p_srp_conn_info->vaddr + physical_address.QuadPart; + p_send_descriptor->ds[0].length = p_srp_conn_info->init_to_targ_iu_sz; + p_send_descriptor->ds[0].lkey = p_srp_conn_info->lkey; + p_send_descriptor->p_fmr_el = NULL; + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("hca vaddr = 0x%I64x.\n", p_srp_conn_info->vaddr)); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("physical_address = 0x%I64x.\n", physical_address.QuadPart)); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("IU vaddr = 0x%I64x.\n", p_send_descriptor->ds[0].vaddr)); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("length = %d.\n", p_send_descriptor->ds[0].length)); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("lkey = 0x%x.\n", p_send_descriptor->ds[0].lkey)); + + SRP_EXIT( SRP_DBG_DATA ); +} diff --git a/branches/Ndi/ulp/srp/kernel/srp_descriptors.h b/branches/Ndi/ulp/srp/kernel/srp_descriptors.h new file mode 100644 index 00000000..9f7c60d7 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_descriptors.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_DESCRIPTORS_H_INCLUDED +#define SRP_DESCRIPTORS_H_INCLUDED + + +#include +#include +#include + +#include "srp_connection.h" +#include "srp_data.h" +#include "srp_debug.h" + +/* + * Number of data segments. + */ +#define SRP_NUM_SGE 1 + +typedef struct _srp_session *p_srp_session_t; +typedef struct _srp_conn_info *p_srp_conn_info_t; + +/* SRP SCSI request block extension. */ +typedef struct _srp_send_descriptor +{ + /* Leave this as first member variable */ + cl_list_item_t list_item; + ib_send_wr_t wr; + uint64_t tag; + SCSI_REQUEST_BLOCK *p_srb; + mlnx_fmr_pool_el_t p_fmr_el; + ib_local_ds_t ds[SRP_NUM_SGE]; + /* must be the last*/ + uint8_t data_segment[SRP_MAX_IU_SIZE]; +}srp_send_descriptor_t; + +typedef struct _srp_recv_descriptor +{ + ib_recv_wr_t wr; + ib_local_ds_t ds[SRP_NUM_SGE]; + uint8_t *p_data_segment; +}srp_recv_descriptor_t; + +typedef struct _srp_descriptors +{ + BOOLEAN initialized; + + cl_spinlock_t sent_list_lock; + cl_qlist_t sent_descriptors; + cl_spinlock_t pending_list_lock; + cl_qlist_t pending_descriptors; + + uint32_t recv_descriptor_count; + srp_recv_descriptor_t *p_recv_descriptors_array; + + uint32_t recv_data_segment_size; + void *p_recv_data_segments_array; + + net32_t recv_lkey; + net32_t recv_rkey; + ib_mr_handle_t h_recv_mr; + +} srp_descriptors_t; + +ib_api_status_t +srp_init_descriptors( + IN OUT srp_descriptors_t *p_descriptors, + IN uint32_t recv_descriptor_count, + IN uint32_t recv_data_segment_size, + IN ib_al_ifc_t* const p_ifc, + IN ib_pd_handle_t h_pd, + IN ib_qp_handle_t h_qp ); + +ib_api_status_t +srp_destroy_descriptors( + IN OUT srp_descriptors_t *p_descriptors ); + +srp_send_descriptor_t* +srp_remove_lun_head_send_descriptor( + IN srp_descriptors_t *p_descriptors, + IN UCHAR lun ); + +srp_send_descriptor_t* +srp_remove_lun_head_pending_descriptor( + IN srp_descriptors_t *p_descriptors, + IN UCHAR lun ); + +void +srp_add_pending_descriptor( + IN srp_descriptors_t *p_descriptors, + IN srp_send_descriptor_t *p_descriptor ); + +ib_api_status_t +srp_post_send_descriptor( + IN srp_descriptors_t *p_descriptors, + IN srp_send_descriptor_t *p_descriptor, + IN struct _srp_session *p_session ); + +srp_send_descriptor_t* +srp_find_matching_send_descriptor( + IN srp_descriptors_t *p_descriptors, + IN uint64_t tag ) ; + +void +srp_build_send_descriptor( + IN PVOID p_dev_ext, + IN OUT PSCSI_REQUEST_BLOCK p_srb, + IN p_srp_conn_info_t p_srp_conn_info ); + +#endif /* SRP_DESCRIPTORS_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_driver.c b/branches/Ndi/ulp/srp/kernel/srp_driver.c new file mode 100644 index 00000000..c5d893a3 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_driver.c @@ -0,0 +1,945 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "srp_data.h" +#include "srp_data_path.h" +#include "srp_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "srp_driver.tmh" +#endif +#include "srp_descriptors.h" +#include "srp_hba.h" +#include "srp_session.h" + +#include +#include +#include + + +#define SCSI_MAXIMUM_TRANSFER_SIZE (1024 * 1024) + +BOOLEAN g_srp_system_shutdown = FALSE; + + +uint32_t g_srp_dbg_level = TRACE_LEVEL_ERROR; +uint32_t g_srp_dbg_flags = 0x0000ffff; +uint32_t g_srp_mode_flags = 0; + +char g_srb_function_name[][32] = +{ + "EXECUTE_SCSI", // 0x00 + "CLAIM_DEVICE", // 0x01 + "IO_CONTROL", // 0x02 + "RECEIVE_EVENT", // 0x03 + "RELEASE_QUEUE", // 0x04 + "ATTACH_DEVICE", // 0x05 + "RELEASE_DEVICE", // 0x06 + "SHUTDOWN", // 0x07 + "FLUSH", // 0x08 + "", // 0x09 + "", // 0x0A + "", // 0x0B + "", // 0x0C + "", // 0x0D + "", // 0x0E + "", // 0x0F + "ABORT_COMMAND", // 0x10 + "RELEASE_RECOVERY", // 0x11 + "RESET_BUS", // 0x12 + "RESET_DEVICE", // 0x13 + "TERMINATE_IO", // 0x14 + "FLUSH_QUEUE", // 0x15 + "REMOVE_DEVICE", // 0x16 + "WMI", // 0x17 + "LOCK_QUEUE", // 0x18 + "UNLOCK_QUEUE", // 0x19 + "", // 0x1A + "", // 0x1B + "", // 0x1C + "", // 0x1D + "", // 0x1E + "", // 0x1F + "RESET_LOGICAL_UNIT", // 0x20 + "SET_LINK_TIMEOUT", // 0x21 + "LINK_TIMEOUT_OCCURRED", // 0x22 + "LINK_TIMEOUT_COMPLETE" // 0x23 +}; + +char g_srb_status_name[][32] = +{ + "PENDING", // 0x00 + "SUCCESS", // 0x01 + "ABORTED", // 0x02 + "ABORT_FAILED", // 0x03 + "ERROR", // 0x04 + "BUSY", // 0x05 + "INVALID_REQUEST", // 0x06 + "INVALID_PATH_ID", // 0x07 + "NO_DEVICE", // 0x08 + "TIMEOUT", // 0x09 + "SELECTION_TIMEOUT", // 0x0A + "COMMAND_TIMEOUT", // 0x0B + "", // 0x0C + "MESSAGE_REJECTED", // 0x0D + "BUS_RESET", // 0x0E + "PARITY_ERROR", // 0x0F + "REQUEST_SENSE_FAILED", // 0x10 + "NO_HBA", // 0x11 + "DATA_OVERRUN", // 0x12 + "UNEXPECTED_BUS_FREE", // 0x13 + "PHASE_SEQUENCE_FAILURE", // 0x14 + "BAD_SRB_BLOCK_LENGTH", // 0x15 + "REQUEST_FLUSHED", // 0x16 + "", // 0x17 + "", // 0x18 + "", // 0x19 + "", // 0x1A + "", // 0x1B + "", // 0x1C + "", // 0x1D + "", // 0x1E + "", // 0x1F + "INVALID_LUN", // 0x20 + "INVALID_TARGET_ID", // 0x21 + "BAD_FUNCTION", // 0x22 + "ERROR_RECOVERY", // 0x23 + "NOT_POWERED", // 0x24 + "LINK_DOWN" // 0x25 +}; + +char g_srb_scsi_status_name[][32] = +{ + "SCSISTAT_GOOD", //0x00 + "", //0x01 + " SCSISTAT_CHECK_CONDITION", //0x02 + "", //0x03 + " SCSISTAT_CONDITION_MET", //0x04 + "", //0x05 + "", //0x06 + "", //0x07 + " SCSISTAT_BUSY", //0x08 + "", //0x09 + "", //0x0A + "", //0x0B + "", //0x0C + "", //0x0D + "", //0x0E + "", //0x0F + " SCSISTAT_INTERMEDIATE", //0x10 + "", //0x11 + "", //0x12 + "", //0x13 + " SCSISTAT_INTERMEDIATE_COND_MET", //0x14 + "", //0x15 + "", //0x16 + "", //0x17 + " SCSISTAT_RESERVATION_CONFLICT", //0x18 + "", //0x19 + "", // 0x1A + "", // 0x1B + "", // 0x1C + "", // 0x1D + "", // 0x1E + "", // 0x1F + "", //0x20 + "", //0x21 + " SCSISTAT_COMMAND_TERMINATED", //0x22 + "", //0x23 + "", //0x24 + "", //0x25 + "", //0x26 + "", //0x27 + " SCSISTAT_QUEUE_FULL", //0x28 +}; + +DRIVER_OBJECT *gp_drv_obj; +cl_obj_t g_drv_obj; + +/* Mutex protecting the next lower device object pointer. */ +KMUTEX g_srp_pnp_mutex; + +PDRIVER_DISPATCH gpfn_pnp; +PDRIVER_ADD_DEVICE gpfn_add_device; +PDRIVER_UNLOAD gpfn_unload; + + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_Param_Path ); + +NTSTATUS +srp_add_device( + IN DRIVER_OBJECT *p_drv_obj, + IN DEVICE_OBJECT *p_pdo ); + +NTSTATUS +srp_dispatch_pnp( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ); + +BOOLEAN +srp_init( + IN PVOID p_dev_ext ); + +BOOLEAN +srp_start_io( + IN PVOID p_dev_ext, + IN PSCSI_REQUEST_BLOCK p_srb ); + +BOOLEAN +srp_isr( + IN PVOID p_dev_ext ); + +ULONG +srp_find_adapter( + IN PVOID p_dev_ext, + IN PVOID resv1, + IN PVOID resv2, + IN PCHAR arg_str, + IN OUT PPORT_CONFIGURATION_INFORMATION p_config, + OUT PBOOLEAN resv3 ); + +BOOLEAN +srp_reset( + IN PVOID p_dev_ext, + IN ULONG path_id ); + +SCSI_ADAPTER_CONTROL_STATUS +srp_adapter_ctrl( + IN PVOID p_dev_ext, + IN SCSI_ADAPTER_CONTROL_TYPE ctrl_type, + IN PVOID params ); + +BOOLEAN +srp_build_io( + IN PVOID p_dev_ext, + IN PSCSI_REQUEST_BLOCK p_srb ); + +static void +srp_unload( + IN DRIVER_OBJECT *p_drv_obj ); + +static void +__srp_free( + IN cl_obj_t *p_obj ); + +#if DBG + +void +srp_x_print( + IN void *p_session ); + +void +srp_x_clean( + IN void *p_session ); + +void *gp_session = NULL; + +#endif + + +static NTSTATUS +__read_registry( + IN UNICODE_STRING* const p_registry_path ) +{ + NTSTATUS status; + /* Remember the terminating entry in the table below. */ + RTL_QUERY_REGISTRY_TABLE table[4]; + UNICODE_STRING param_path; + + SRP_ENTER( SRP_DBG_PNP ); + + RtlInitUnicodeString( ¶m_path, NULL ); + param_path.MaximumLength = p_registry_path->Length + + sizeof(L"\\Parameters"); + param_path.Buffer = cl_zalloc( param_path.MaximumLength ); + if( !param_path.Buffer ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to allocate parameters path buffer.\n") ); + return STATUS_INSUFFICIENT_RESOURCES; + } + + RtlAppendUnicodeStringToString( ¶m_path, p_registry_path ); + RtlAppendUnicodeToString( ¶m_path, L"\\Parameters" ); + + /* + * Clear the table. This clears all the query callback pointers, + * and sets up the terminating table entry. + */ + cl_memclr( table, sizeof(table) ); + + /* Setup the table entries. */ + table[0].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[0].Name = L"DebugLevel"; + table[0].EntryContext = &g_srp_dbg_level; + table[0].DefaultType = REG_DWORD; + table[0].DefaultData = &g_srp_dbg_level; + table[0].DefaultLength = sizeof(ULONG); + + table[1].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[1].Name = L"DebugFlags"; + table[1].EntryContext = &g_srp_dbg_flags; + table[1].DefaultType = REG_DWORD; + table[1].DefaultData = &g_srp_dbg_flags; + table[1].DefaultLength = sizeof(ULONG); + + table[2].Flags = RTL_QUERY_REGISTRY_DIRECT; + table[2].Name = L"ModeFlags"; + table[2].EntryContext = &g_srp_mode_flags; + table[2].DefaultType = REG_DWORD; + table[2].DefaultData = &g_srp_mode_flags; + table[2].DefaultLength = sizeof(ULONG); + + + /* Have at it! */ + status = RtlQueryRegistryValues( RTL_REGISTRY_ABSOLUTE, + param_path.Buffer, table, NULL, NULL ); + +#ifndef EVENT_TRACING + if( g_srp_dbg_flags & SRP_DBG_ERR ) + g_srp_dbg_flags |= CL_DBG_ERROR; +#endif + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("debug level %d debug flags 0x%.8x\n", + g_srp_dbg_level, + g_srp_dbg_flags) ); + + cl_free( param_path.Buffer ); + SRP_EXIT( SRP_DBG_PNP ); + return status; +} + + +ULONG +DriverEntry( + IN DRIVER_OBJECT *p_drv_obj, + IN UNICODE_STRING *p_registry_path ) +{ + ULONG status; + HW_INITIALIZATION_DATA hw_data; + cl_status_t cl_status; + + SRP_ENTER( SRP_DBG_PNP ); + +#if defined(EVENT_TRACING) + WPP_INIT_TRACING( p_drv_obj, p_registry_path ); +#endif + + status = CL_INIT; + if( !NT_SUCCESS(status) ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("cl_init returned %08X.\n", status) ); + return status; + } + + gp_drv_obj = p_drv_obj; + + /* Get the registry values. */ + status = __read_registry( p_registry_path ); + if( !NT_SUCCESS(status) ) + { + CL_DEINIT; + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("__read_registry returned %08x.\n", status) ); + return status; + } + + cl_obj_construct( &g_drv_obj, SRP_OBJ_TYPE_DRV ); + + KeInitializeMutex( &g_srp_pnp_mutex, 0 ); + + cl_memclr( &hw_data, sizeof(HW_INITIALIZATION_DATA) ); + + hw_data.HwInitializationDataSize = sizeof(HW_INITIALIZATION_DATA); + + hw_data.AdapterInterfaceType = Internal; + + /* Miniport driver routines */ + hw_data.HwInitialize = srp_init; + hw_data.HwStartIo = srp_start_io; +// hw_data.HwInterrupt = srp_isr; + hw_data.HwInterrupt = NULL; + hw_data.HwFindAdapter = srp_find_adapter; + hw_data.HwResetBus = srp_reset; + hw_data.HwAdapterControl = srp_adapter_ctrl; + hw_data.HwBuildIo = srp_build_io; + hw_data.HwDmaStarted = NULL; + hw_data.HwAdapterState = NULL; + + /* Extension sizes. */ + hw_data.DeviceExtensionSize = sizeof(srp_ext_t); + /* TODO: Do we need per-LU data? */ + hw_data.SpecificLuExtensionSize = 0; + hw_data.SrbExtensionSize = sizeof(srp_send_descriptor_t); + + /* Driver parameters. */ + hw_data.NumberOfAccessRanges = 1; + /* TODO: Can this be STOR_MAP_NO_BUFFERS? */ + hw_data.MapBuffers = STOR_MAP_NON_READ_WRITE_BUFFERS; + + hw_data.NeedPhysicalAddresses = TRUE; + hw_data.TaggedQueuing = TRUE; + hw_data.AutoRequestSense = TRUE; + hw_data.MultipleRequestPerLu = TRUE; + + cl_status = + cl_obj_init( &g_drv_obj, CL_DESTROY_SYNC, NULL, NULL, __srp_free ); + if( cl_status == CL_SUCCESS ) + { + // Invoke the port initialization function. + status = StorPortInitialize(p_drv_obj, p_registry_path, &hw_data, NULL); + if( NT_SUCCESS( status ) ) + { + /* + * Overwrite the PnP entrypoint, but save the original + * so we can call it. + */ + gpfn_pnp = p_drv_obj->MajorFunction[IRP_MJ_PNP]; + p_drv_obj->MajorFunction[IRP_MJ_PNP] = srp_dispatch_pnp; + gpfn_add_device = p_drv_obj->DriverExtension->AddDevice; + p_drv_obj->DriverExtension->AddDevice = srp_add_device; + gpfn_unload = p_drv_obj->DriverUnload; + p_drv_obj->DriverUnload = srp_unload; + } + else + { + CL_DEINIT; + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("StorPortInitialize returned 0x%x.\n", status) ); + } + } + else + { + CL_DEINIT; + status = (ULONG)STATUS_INSUFFICIENT_RESOURCES; + } + + SRP_PRINT_EXIT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("DriverEntry returning status of 0x%x.\n", status) ); + return status; +} + +static void +srp_unload( + IN DRIVER_OBJECT *p_drv_obj ) +{ + SRP_ENTER( SRP_DBG_PNP ); +#if defined(EVENT_TRACING) + WPP_CLEANUP( p_drv_obj ); +#endif + + /* Kill all SRP objects. */ + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Destroying all SRP objects.\n") ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Driver Object ref_cnt = %d\n", g_drv_obj.ref_cnt) ); + cl_obj_destroy( &g_drv_obj ); + + CL_DEINIT; + + /* Invoke the port driver's unload routine. */ + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Invoking the port driver's unload routine.\n") ); + gpfn_unload( p_drv_obj ); + + SRP_EXIT( SRP_DBG_PNP ); +} + + +static void +__srp_free( + IN cl_obj_t *p_obj ) +{ +// CL_ASSERT( p_obj == &g_drv_obj ); + UNUSED_PARAM ( p_obj ); + cl_obj_deinit( &g_drv_obj ); +} + + +NTSTATUS +srp_add_device( + IN DRIVER_OBJECT *p_drv_obj, + IN DEVICE_OBJECT *p_pdo ) +{ + NTSTATUS status; + + SRP_ENTER( SRP_DBG_PNP ); + + status = gpfn_add_device( p_drv_obj, p_pdo ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("srp_add_device status = 0x%x.\n", status) ); + + SRP_EXIT( SRP_DBG_PNP ); + return status; +} + + +NTSTATUS +srp_dispatch_pnp( + IN DEVICE_OBJECT *p_dev_obj, + IN IRP *p_irp ) +{ + NTSTATUS status; + IO_STACK_LOCATION *p_stack; + UCHAR minor; + SRP_ENTER( SRP_DBG_PNP ); + + p_stack = IoGetCurrentIrpStackLocation( p_irp ); + minor = p_stack->MinorFunction; + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Minor PNP Function = %d.\n", minor) ); + + if( minor == IRP_MN_START_DEVICE ) + { + NTSTATUS wait_status; + + wait_status = KeWaitForMutexObject( + &g_srp_pnp_mutex, Executive, KernelMode, FALSE, NULL ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("KeWaitForMutexObject status = 0x%x.\n", wait_status) ); + gp_self_do = p_dev_obj; + } + status = gpfn_pnp( p_dev_obj, p_irp ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("gpfn_pnp status = 0x%x.\n", status) ); + + if( minor == IRP_MN_START_DEVICE ) + { + LONG release_status; + gp_self_do = NULL; + release_status = KeReleaseMutex( &g_srp_pnp_mutex, FALSE ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("KeReleaseMutex status = %d.\n", release_status) ); + } + + #if DBG + /* statistics */ + + /* this function is called sometimes in the begging of the test with + IRP_MN_QUERY_DEVICE_RELATIONS (7) request. Use this fact to print the statistics */ + { + /* sometimes it's called once in 50msec, so we'll print once in 20 times */ + static int interval = 40; /* 2 sec */ + static int cnt = 0; + if (++cnt >= interval) + { + cnt = 0; + srp_x_print( gp_session ); + srp_x_clean( gp_session ); + } + } + + #endif + + SRP_EXIT( SRP_DBG_PNP ); + return status; +} + + +ULONG +srp_find_adapter( + IN PVOID p_dev_ext, + IN PVOID resv1, + IN PVOID resv2, + IN PCHAR arg_str, + IN OUT PPORT_CONFIGURATION_INFORMATION p_config, + OUT PBOOLEAN resv3 ) +{ + srp_ext_t *p_ext; + ib_api_status_t ib_status; + + SRP_ENTER( SRP_DBG_PNP ); + + UNUSED_PARAM( resv1 ); + UNUSED_PARAM( resv2 ); + UNUSED_PARAM( resv3 ); + UNUSED_PARAM( arg_str ); + UNUSED_PARAM( p_config ); + + if( KeGetCurrentIrql() >= DISPATCH_LEVEL ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Improper IRQL!\n") ); + return SP_RETURN_ERROR; + } + + p_ext = (srp_ext_t*)p_dev_ext; + + ib_status = srp_hba_create( &g_drv_obj, p_ext ); + if( ib_status != IB_SUCCESS ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("srp_hba_create returned %d\n", ib_status) ); + return SP_RETURN_ERROR; + } + + p_config->SrbExtensionSize = MAX( p_ext->p_hba->max_srb_ext_sz, sizeof( srp_send_descriptor_t )); + //CL_ASSERT( p_config->SrbExtensionSize >= sizeof( srp_send_descriptor_t ) ); + + p_config->MaximumTransferLength = SCSI_MAXIMUM_TRANSFER_SIZE; + p_config->AlignmentMask = 0; /* byte alignment */ + p_config->NumberOfBuses = 1; + p_config->ScatterGather = TRUE; + p_config->Master = TRUE; // The HBA is a "bus" master. +// p_config->CachesData = TRUE; // Assume the HBA does cache data. + p_config->CachesData = FALSE; // Assume the HBA does not cache data. + p_config->MaximumNumberOfTargets = p_ext->p_hba->ioc_info.profile.num_svc_entries; + p_config->MaximumNumberOfLogicalUnits = SCSI_MAXIMUM_LUNS_PER_TARGET; + p_config->MultipleRequestPerLu = TRUE; + p_config->SynchronizationModel = StorSynchronizeFullDuplex; + p_config->MapBuffers = STOR_MAP_NON_READ_WRITE_BUFFERS; + p_config->ResetTargetSupported = FALSE; + +// p_config->InitiatorBusId[0] = 127; +// p_config->DeviceExtensionSize = sizeof( srp_ext_t ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("NumberOfPhysicalBreaks passed in = %d.\n", p_config->NumberOfPhysicalBreaks) ); + + if ( p_config->NumberOfPhysicalBreaks == SP_UNINITIALIZED_VALUE ) + { + p_config->NumberOfPhysicalBreaks = p_ext->p_hba->max_sg - 1; + } + else + { + if (g_srp_mode_flags & SRP_MODE_SG_UNLIMITED) + // It is prohibited by DDK, but seems like work + p_config->NumberOfPhysicalBreaks = p_ext->p_hba->max_sg - 1; + else + p_config->NumberOfPhysicalBreaks = MIN( p_ext->p_hba->max_sg - 1, p_config->NumberOfPhysicalBreaks ); + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ( "max_sg %d, New NumberOfPhysicalBreaks %d\n", + p_ext->p_hba->max_sg, p_config->NumberOfPhysicalBreaks)); + + SRP_EXIT( SRP_DBG_PNP ); + return SP_RETURN_FOUND; +} + +BOOLEAN +srp_init( + IN PVOID p_dev_ext ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + UNUSED_PARAM( p_dev_ext ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("called at IRQL %d\n", KeGetCurrentIrql()) ); + + SRP_EXIT( SRP_DBG_PNP ); + return TRUE; +} + +BOOLEAN +srp_start_io( + IN PVOID p_dev_ext, + IN PSCSI_REQUEST_BLOCK p_srb ) +{ + SRP_ENTER( SRP_DBG_DATA ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Starting I/O for Function = %s(0x%x), Path = 0x%x, " + "Target = 0x%x, Lun = 0x%x\n", + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + + CL_ASSERT( p_srb->SrbExtension != NULL ); + + // Check the operation here + switch ( p_srb->Function ) + { + case SRB_FUNCTION_EXECUTE_SCSI: + srp_post_io_request( p_dev_ext, p_srb ); + break; + + case SRB_FUNCTION_ABORT_COMMAND: + srp_abort_command( p_dev_ext, p_srb ); + break; +#if !defined(WinXP) + case SRB_FUNCTION_RESET_LOGICAL_UNIT: +#endif + case SRB_FUNCTION_RESET_DEVICE: + srp_lun_reset( p_dev_ext, p_srb ); + break; + + case SRB_FUNCTION_SHUTDOWN: /* Only receive this if CachesData is TRUE in PORT_CONFIGURATION_INFORMATION */ + { + srp_hba_t *p_hba = ((srp_ext_t *)p_dev_ext)->p_hba; + srp_session_t *p_srp_session = p_hba->session_list[p_srb->TargetId]; + + g_srp_system_shutdown = TRUE; + + if ( (p_srb->Lun == 0) && (p_srp_session != NULL) ) + { + p_hba->session_list[p_srb->TargetId] = NULL; + + CL_ASSERT( p_srp_session != NULL ); + + p_srp_session->p_shutdown_srb = p_srb; + cl_obj_destroy( &p_srp_session->obj ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for " + "Function = %s(0x%x), Path = 0x%x, " + "Target = 0x%x, Lun = 0x%x\n", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + } + else + { + p_srb->SrbStatus = SRB_STATUS_SUCCESS; + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for " + "Function = %s(0x%x), Path = 0x%x, " + "Target = 0x%x, Lun = 0x%x\n", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + StorPortNotification( RequestComplete, p_dev_ext, p_srb ); + } + break; + } + + case SRB_FUNCTION_FLUSH: /* Only receive this if CachesData is TRUE in PORT_CONFIGURATION_INFORMATION */ + p_srb->SrbStatus = SRB_STATUS_SUCCESS; + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for " + "Function = %s(0x%x), Path = 0x%x, " + "Target = 0x%x, Lun = 0x%x\n", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + StorPortNotification( RequestComplete, p_dev_ext, p_srb ); + break; + + case SRB_FUNCTION_IO_CONTROL: /***** May Need To Support *****/ + + case SRB_FUNCTION_RESET_BUS: + case SRB_FUNCTION_TERMINATE_IO: + case SRB_FUNCTION_RELEASE_RECOVERY: + case SRB_FUNCTION_RECEIVE_EVENT: + case SRB_FUNCTION_LOCK_QUEUE: + case SRB_FUNCTION_UNLOCK_QUEUE: + case SRB_FUNCTION_CLAIM_DEVICE: + case SRB_FUNCTION_RELEASE_QUEUE: + case SRB_FUNCTION_ATTACH_DEVICE: + case SRB_FUNCTION_RELEASE_DEVICE: + case SRB_FUNCTION_FLUSH_QUEUE: + case SRB_FUNCTION_REMOVE_DEVICE: + case SRB_FUNCTION_WMI: +#if !defined(WinXP) + case SRB_FUNCTION_SET_LINK_TIMEOUT: + case SRB_FUNCTION_LINK_TIMEOUT_OCCURRED: + case SRB_FUNCTION_LINK_TIMEOUT_COMPLETE: +#endif + default: + p_srb->SrbStatus = SRB_STATUS_INVALID_REQUEST; + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for " + "Function = %s(0x%x), Path = 0x%x, " + "Target = 0x%x, Lun = 0x%x\n", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + StorPortNotification( RequestComplete, p_dev_ext, p_srb ); + + } + + SRP_EXIT( SRP_DBG_DATA ); + + return ( TRUE ); +} + +BOOLEAN +srp_isr( + IN PVOID p_dev_ext ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + UNUSED_PARAM( p_dev_ext ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("called at IRQL %d\n", KeGetCurrentIrql()) ); + + SRP_EXIT( SRP_DBG_PNP ); + return TRUE; +} + +BOOLEAN +srp_reset( + IN PVOID p_dev_ext, + IN ULONG path_id ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + UNUSED_PARAM( p_dev_ext ); + UNUSED_PARAM( path_id ); + + SRP_EXIT( SRP_DBG_PNP ); + return FALSE; +} + +SCSI_ADAPTER_CONTROL_STATUS +srp_adapter_ctrl( + IN PVOID p_dev_ext, + IN SCSI_ADAPTER_CONTROL_TYPE ctrl_type, + IN PVOID params ) +{ + srp_ext_t *p_ext; + SCSI_SUPPORTED_CONTROL_TYPE_LIST *p_ctrl_list; + + SRP_ENTER( SRP_DBG_PNP ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("called at IRQL %d\n", KeGetCurrentIrql()) ); + + p_ext = (srp_ext_t*)p_dev_ext; + + switch( ctrl_type ) + { + case ScsiQuerySupportedControlTypes: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("ScsiQuerySupportedControlTypes\n") ); + p_ctrl_list = (SCSI_SUPPORTED_CONTROL_TYPE_LIST*)params; + p_ctrl_list->SupportedTypeList[ScsiQuerySupportedControlTypes] = TRUE; + p_ctrl_list->SupportedTypeList[ScsiStopAdapter] = TRUE; + p_ctrl_list->SupportedTypeList[ScsiRestartAdapter] = FALSE; + p_ctrl_list->SupportedTypeList[ScsiSetBootConfig] = FALSE; + p_ctrl_list->SupportedTypeList[ScsiSetRunningConfig] = FALSE; + break; + + case ScsiStopAdapter: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("ScsiStopAdapter\n") ); + if( p_ext->p_hba ) + { + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("HBA Object ref_cnt = %d\n", p_ext->p_hba->obj.ref_cnt) ); + cl_obj_destroy( &p_ext->p_hba->obj ); + p_ext->p_hba = NULL; + } + break; + + case ScsiRestartAdapter: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("ScsiRestartAdapter\n") ); + break; + + case ScsiSetBootConfig: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("ScsiSetBootConfig\n") ); + break; + + case ScsiSetRunningConfig: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("ScsiSetRunningConfig\n") ); + break; + } + + SRP_EXIT( SRP_DBG_PNP ); + return ScsiAdapterControlSuccess; +} + +BOOLEAN +srp_build_io( + IN PVOID p_dev_ext, + IN PSCSI_REQUEST_BLOCK p_srb ) +{ + SRP_ENTER( SRP_DBG_DATA ); + + if ( p_srb->Function == SRB_FUNCTION_EXECUTE_SCSI ) + { + + CL_ASSERT( p_srb->SrbExtension != NULL ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DATA, + ("Building I/O for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x\n", + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + + if ( srp_format_io_request( p_dev_ext, p_srb ) == FALSE ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_DATA, + ("Returning SrbStatus %s(0x%x) for " + "Function = %s(0x%x), Path = 0x%x, " + "Target = 0x%x, Lun = 0x%x\n", + g_srb_status_name[p_srb->SrbStatus], + p_srb->SrbStatus, + g_srb_function_name[p_srb->Function], + p_srb->Function, + p_srb->PathId, + p_srb->TargetId, + p_srb->Lun) ); + + StorPortNotification( RequestComplete, p_dev_ext, p_srb ); + + return ( FALSE ); + } + } + + SRP_EXIT( SRP_DBG_DATA ); + + return ( TRUE ); +} diff --git a/branches/Ndi/ulp/srp/kernel/srp_event.c b/branches/Ndi/ulp/srp/kernel/srp_event.c new file mode 100644 index 00000000..c015c9c8 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_event.c @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "srp_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "srp_event.tmh" +#endif +#include "srp_event.h" +#include "srp_session.h" + +/* srp_async_event_handler_cb */ +/*! +Handles asynchronous events from ib + +@param p_event_rec - pointer to the async event + +@return - none +*/ +void +srp_async_event_handler_cb( + IN ib_async_event_rec_t *p_event_rec ) +{ + srp_session_t *p_srp_session = (srp_session_t * __ptr64)p_event_rec->context; + + SRP_ENTER( SRP_DBG_PNP ); + + switch ( p_event_rec->code ) + { + case IB_AE_PORT_ACTIVE: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Async Event IB_AE_PORT_ACTIVE (%d) received for %s.\n", + p_event_rec->code, + p_srp_session->p_hba->ioc_info.profile.id_string) ); + break; + + case IB_AE_PORT_DOWN: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Async Event IB_AE_PORT_DOWN (%d) received for %s.\n", + p_event_rec->code, + p_srp_session->p_hba->ioc_info.profile.id_string) ); + break; + + default: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Async Event %d received.\n", p_event_rec->code) ); + break; + } + + SRP_EXIT( SRP_DBG_PNP ); +} diff --git a/branches/Ndi/ulp/srp/kernel/srp_event.h b/branches/Ndi/ulp/srp/kernel/srp_event.h new file mode 100644 index 00000000..2aad4eaf --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_event.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef _SRP_EVENT_H_ +#define _SRP_EVENT_H_ + +#include + +void +srp_async_event_handler_cb( + IN ib_async_event_rec_t *p_event_rec ); + +#endif /* _SRP_EVENT_H_ */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_hba.c b/branches/Ndi/ulp/srp/kernel/srp_hba.c new file mode 100644 index 00000000..f760ce0d --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_hba.c @@ -0,0 +1,1114 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + + +#include "srp_hba.h" +#include "srp_data.h" +#include "srp_data_path.h" +#include "srp_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "srp_hba.tmh" +#endif +#include "srp_session.h" + +#include +#include +#include +#include + + +static void +__srp_destroying_hba( + IN cl_obj_t *p_obj ); + +static void +__srp_cleanup_hba( + IN cl_obj_t *p_obj ); + +static void +__srp_free_hba( + IN cl_obj_t *p_obj ); + +static ib_api_status_t +__srp_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ); + +void +__srp_dump_ioc_info( const ib_ioc_info_t *p_ioc_info ) +{ + UNUSED_PARAM( p_ioc_info ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Dumping IOC Info\n") ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tchassis_guid\t= 0x%I64x\n", + cl_ntoh64( p_ioc_info->chassis_guid )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tchassis_slot\t= %d\n", + p_ioc_info->chassis_slot) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tiou_guid\t= 0x%I64x\n", + cl_ntoh64( p_ioc_info->iou_guid )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tiou_slot\t= %d\n", + p_ioc_info->iou_slot) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, ("\n") ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Dumping IOC Info Profile\n") ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tioc_guid\t= 0x%I64x\n", + cl_ntoh64( p_ioc_info->profile.ioc_guid )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tvend_id\t= 0x%x\n", + cl_ntoh32( p_ioc_info->profile.vend_id )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tdev_id\t= 0x%x\n", + cl_ntoh32( p_ioc_info->profile.dev_id )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tdev_ver\t= 0x%x\n", + cl_ntoh16( p_ioc_info->profile.dev_ver )) ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tsubsys_vend_id\t= 0x%x\n", + cl_ntoh32( p_ioc_info->profile.subsys_vend_id )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tsubsys_id\t= 0x%x\n", + cl_ntoh32( p_ioc_info->profile.subsys_id )) ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tio_class\t= 0x%x\n", + cl_ntoh16( p_ioc_info->profile.io_class )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tio_subclass\t= 0x%x\n", + cl_ntoh16( p_ioc_info->profile.io_subclass )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tprotocol\t= 0x%x\n", + cl_ntoh16( p_ioc_info->profile.protocol )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tprotocol_ver\t= 0x%x\n", + cl_ntoh16( p_ioc_info->profile.protocol_ver )) ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tsend_msg_depth\t= %d\n", + cl_ntoh16( p_ioc_info->profile.send_msg_depth )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\trdma_read_depth\t= %d\n", + p_ioc_info->profile.rdma_read_depth) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tsend_msg_size\t= %d\n", + cl_ntoh32( p_ioc_info->profile.send_msg_size )) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\trdma_size\t = %d\n", + cl_ntoh32( p_ioc_info->profile.rdma_size )) ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tctrl_ops_cap\t= 0x%X\n", + p_ioc_info->profile.ctrl_ops_cap) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tnum_svc_entries\t= 0x%X\n", + p_ioc_info->profile.num_svc_entries) ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("\tid_string\t= %s\n", + p_ioc_info->profile.id_string) ); +} + + +static boolean_t +__get_ioc_ifc( + IN srp_hba_t* const p_hba ) +{ + NTSTATUS status; + ib_al_ifc_data_t data; + IO_STACK_LOCATION io_stack; + + SRP_ENTER( SRP_DBG_PNP ); + + /* Query for our interface. */ + data.size = sizeof(ioc_ifc_data_t); + data.version = IOC_INTERFACE_DATA_VERSION; + data.type = &GUID_IOC_INTERFACE_DATA; + data.p_data = &p_hba->info; + + io_stack.MinorFunction = IRP_MN_QUERY_INTERFACE; + io_stack.Parameters.QueryInterface.Version = AL_INTERFACE_VERSION; + io_stack.Parameters.QueryInterface.Size = sizeof(ib_al_ifc_t); + io_stack.Parameters.QueryInterface.Interface = (INTERFACE*)&p_hba->ifc; + io_stack.Parameters.QueryInterface.InterfaceSpecificData = &data; + io_stack.Parameters.QueryInterface.InterfaceType = &GUID_IB_AL_INTERFACE; + + status = cl_fwd_query_ifc( gp_self_do, &io_stack ); + if( !NT_SUCCESS( status ) ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Query interface for IOU parameters returned %08x.\n", status) ); + return FALSE; + } + else + { + /* + * Dereference the interface now so that the bus driver doesn't fail a + * query remove IRP. We will always get unloaded before the bus driver + * since we're a child device. + */ + p_hba->ifc.wdm.InterfaceDereference( p_hba->ifc.wdm.Context ); + SRP_EXIT( SRP_DBG_PNP ); + return TRUE; + } +} + + +ib_api_status_t +srp_hba_create( + IN cl_obj_t* const p_drv_obj, + OUT srp_ext_t* const p_ext ) +{ + srp_hba_t *p_hba; + cl_status_t cl_status; + ib_api_status_t ib_status; + ib_pnp_req_t pnp_req; + uint32_t i; + + SRP_ENTER( SRP_DBG_PNP ); + + p_hba = (srp_hba_t*)cl_zalloc( sizeof(srp_hba_t) ); + if( !p_hba ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to allocate srp_hba_t structure.\n") ); + return IB_INSUFFICIENT_MEMORY; + } + + cl_qlist_init( &p_hba->path_record_list ); + cl_spinlock_init( &p_hba->path_record_list_lock ); + + /* Store instance parameters. */ + p_hba->p_ext = p_ext; + p_hba->max_sg = 0xFFFFFFFF; + p_hba->max_srb_ext_sz = 0xFFFFFFFF; + + if( !__get_ioc_ifc( p_hba ) ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("__get_ioc_ifc failed.\n") ); + return IB_ERROR; + } + + for ( i = 0; i < SRP_MAX_SERVICE_ENTRIES; i++ ) + { + p_hba->session_list[i] = NULL; + } + + cl_obj_construct( &p_hba->obj, SRP_OBJ_TYPE_HBA ); + cl_status = cl_obj_init( &p_hba->obj, CL_DESTROY_ASYNC, + __srp_destroying_hba, __srp_cleanup_hba, __srp_free_hba ); + if( cl_status != CL_SUCCESS ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("cl_obj_init returned %s\n", cl_status_text[cl_status]) ); + return IB_ERROR; + } + + ib_status = p_hba->ifc.open_al( &p_hba->h_al ); + if( ib_status != IB_SUCCESS ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("ib_open_al returned %s\n", p_hba->ifc.get_err_str( ib_status )) ); + goto err; + } + + /* Register for IOC events */ + pnp_req.pfn_pnp_cb = __srp_pnp_cb; + pnp_req.pnp_class = IB_PNP_IOC | IB_PNP_FLAG_REG_SYNC; + pnp_req.pnp_context = p_hba; + /* Reference the HBA object before registering for PnP notifications. */ + cl_obj_ref( &p_hba->obj ); + + cl_obj_insert_rel( &p_hba->rel, p_drv_obj, &p_hba->obj ); + + ib_status = p_hba->ifc.reg_pnp( p_hba->h_al, &pnp_req, &p_hba->h_pnp ); + if( ib_status != IB_SUCCESS ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("ib_reg_pnp returned %s\n", p_hba->ifc.get_err_str( ib_status )) ); + goto err; + } + ib_status = IB_ERROR; + for ( i = 0; i < p_hba->ioc_info.profile.num_svc_entries; i++ ) + { + if ( p_hba->session_list[i] != NULL ) + ib_status = IB_SUCCESS; + } + + if( ib_status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Session Connection Failure.\n") ); + +err: + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("HBA Object ref_cnt = %d\n", p_hba->obj.ref_cnt) ); + cl_obj_destroy( &p_hba->obj ); + + return ib_status; + } + + /* + * Add the HBA to the driver object's child list. This will cause + * everything to clean up properly in case we miss an unload notification. + */ + p_ext->p_hba = p_hba; + + SRP_EXIT( SRP_DBG_PNP ); + return ib_status; +} + + +static void +__srp_destroying_hba( + IN cl_obj_t *p_obj ) +{ + srp_hba_t *p_hba; + + SRP_ENTER( SRP_DBG_PNP ); + + p_hba = PARENT_STRUCT( p_obj, srp_hba_t, obj ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Before dereg pnp HBA Object ref_cnt = %d\n", p_hba->obj.ref_cnt) ); + + if( p_hba->h_pnp ) + { + p_hba->ifc.dereg_pnp( p_hba->h_pnp, cl_obj_deref ); + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("After dereg pnp HBA Object ref_cnt = %d\n", p_hba->obj.ref_cnt) ); + + SRP_EXIT( SRP_DBG_PNP ); +} + +static void +__srp_remove_path_records( + IN srp_hba_t *p_hba ) +{ + srp_path_record_t *p_srp_path_record; + + SRP_ENTER( SRP_DBG_PNP ); + + cl_spinlock_acquire( &p_hba->path_record_list_lock ); + p_srp_path_record = (srp_path_record_t *)cl_qlist_remove_head( &p_hba->path_record_list ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Removing any remaining path records.\n") ); + + while ( p_srp_path_record != (srp_path_record_t *)cl_qlist_end( &p_hba->path_record_list ) ) + { + cl_free( p_srp_path_record ); + p_srp_path_record = (srp_path_record_t *)cl_qlist_remove_head( &p_hba->path_record_list ); + } + + cl_spinlock_release( &p_hba->path_record_list_lock ); + + SRP_EXIT( SRP_DBG_PNP ); +} + +static void +__srp_cleanup_hba( + IN cl_obj_t *p_obj ) +{ + srp_hba_t *p_hba; + + SRP_ENTER( SRP_DBG_PNP ); + + p_hba = PARENT_STRUCT( p_obj, srp_hba_t, obj ); + + if( p_hba->h_al ) + p_hba->ifc.close_al( p_hba->h_al ); + + __srp_remove_path_records( p_hba ); + + cl_spinlock_destroy( &p_hba->path_record_list_lock ); + + if ( p_hba->p_svc_entries ) + cl_free( p_hba->p_svc_entries ); + + SRP_EXIT( SRP_DBG_PNP ); +} + + +static void +__srp_free_hba( + IN cl_obj_t *p_obj ) +{ + srp_hba_t *p_hba; + + SRP_ENTER( SRP_DBG_PNP ); + + p_hba = PARENT_STRUCT( p_obj, srp_hba_t, obj ); + + cl_obj_deinit( p_obj ); + cl_free( p_hba ); + + SRP_EXIT( SRP_DBG_PNP ); +} + +static BOOLEAN +__srp_validate_ioc( + IN ib_pnp_ioc_rec_t *p_ioc_rec ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + // Is this really an SRP device? + if ( ( p_ioc_rec->info.profile.io_class != SRP_IO_CLASS && + p_ioc_rec->info.profile.io_class != SRP_IO_CLASS_R10 ) || + p_ioc_rec->info.profile.io_subclass != SRP_IO_SUBCLASS ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Not an SRP CLASS(0x%x)/SUBCLASS(0x%x).\n", + cl_ntoh16( p_ioc_rec->info.profile.io_class ), + cl_ntoh16( p_ioc_rec->info.profile.io_subclass )) ); + return FALSE; + } + + // Does it have the required features? + if ( cl_ntoh16( p_ioc_rec->info.profile.protocol ) != SRP_PROTOCOL || + cl_ntoh16( p_ioc_rec->info.profile.protocol_ver ) != SRP_PROTOCOL_VER || + !(p_ioc_rec->info.profile.ctrl_ops_cap & CTRL_OPS_CAP_ST) || + !(p_ioc_rec->info.profile.ctrl_ops_cap & CTRL_OPS_CAP_SF) || + !(p_ioc_rec->info.profile.ctrl_ops_cap & CTRL_OPS_CAP_RF) || + !(p_ioc_rec->info.profile.ctrl_ops_cap & CTRL_OPS_CAP_WF) ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Not an SRP PROTOCOL/PROTOCOL_VER.\n") ); + return FALSE; + } + + // Can it handle our IO requirements? + if ( cl_ntoh32( p_ioc_rec->info.profile.send_msg_size ) < SRP_MIN_TGT_TO_INI_IU || + cl_ntoh16( p_ioc_rec->info.profile.send_msg_depth ) == 0 || + cl_ntoh32( p_ioc_rec->info.profile.rdma_size ) < SRP_MIN_TGT_TO_INI_DMA ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Device Not Capable.\n") ); + return FALSE; + } + + SRP_EXIT( SRP_DBG_PNP ); + + return TRUE; +} + +static BOOLEAN +__srp_path_rec_equal( + IN const ib_path_rec_t *p_path_rec_1, + IN const ib_path_rec_t *p_path_rec_2, + IN BOOLEAN check_num_path, + IN BOOLEAN check_preference ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + if ( p_path_rec_1->dgid.unicast.prefix != p_path_rec_2->dgid.unicast.prefix ) + return ( FALSE ); + + if ( p_path_rec_1->dgid.unicast.interface_id != p_path_rec_2->dgid.unicast.interface_id ) + return ( FALSE ); + + if ( p_path_rec_1->sgid.unicast.prefix != p_path_rec_2->sgid.unicast.prefix ) + return ( FALSE ); + + if ( p_path_rec_1->sgid.unicast.interface_id != p_path_rec_2->sgid.unicast.interface_id ) + return ( FALSE ); + + if ( p_path_rec_1->dlid != p_path_rec_2->dlid ) + return ( FALSE ); + + if ( p_path_rec_1->slid != p_path_rec_2->slid ) + return ( FALSE ); + + if ( p_path_rec_1->hop_flow_raw.val != p_path_rec_2->hop_flow_raw.val ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("hop_flow_raw.val does not match.\n") ); + return ( FALSE ); + } + + if ( p_path_rec_1->tclass != p_path_rec_2->tclass ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("tclass does not match.\n") ); + return ( FALSE ); + } + + if ( p_path_rec_1->num_path != p_path_rec_2->num_path ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("num_path does not match.\n") ); + if ( check_num_path == TRUE ) + { + return ( FALSE ); + } + } + + if ( p_path_rec_1->pkey != p_path_rec_2->pkey ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("pkey does not match.\n") ); + return ( FALSE ); + } + + if ( p_path_rec_1->sl != p_path_rec_2->sl ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("sl does not match.\n") ); + return ( FALSE ); + } + + if ( p_path_rec_1->mtu != p_path_rec_2->mtu ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("mtu does not match.\n") ); + return ( FALSE ); + } + + if ( p_path_rec_1->rate != p_path_rec_2->rate ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("rate does not match.\n") ); + return ( FALSE ); + } + + if ( p_path_rec_1->pkt_life != p_path_rec_2->pkt_life ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("pkt_life does not match.\n") ); + return ( FALSE ); + } + + if ( p_path_rec_1->preference != p_path_rec_2->preference ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("preference does not match.\n") ); + if ( check_preference == TRUE ) + { + return ( FALSE ); + } + } + +#if defined( _DEBUG_ ) + + if ( p_path_rec_1->resv0 != p_path_rec_2->resv0 ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("resv0 does not match.\n") ); + } + + if ( p_path_rec_1->resv1 != p_path_rec_2->resv1 ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("resv1 does not match.\n") ); + } + + if ( p_path_rec_1->resv2 != p_path_rec_2->resv2 ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("resv2 does not match.\n") ); + } + + if ( cl_memcmp( p_path_rec_1, p_path_rec_2, sizeof( ib_path_rec_t ) ) != 0 ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("p_path_rec_1 does not match p_path_rec_2.\n") ); + } + else + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("p_path_rec_1 matches p_path_rec_2.\n") ); + } + +#endif + + SRP_EXIT( SRP_DBG_PNP ); + + return ( TRUE ); +} + +static +srp_path_record_t* +__srp_find_path( + IN srp_hba_t *p_hba, + IN const ib_path_rec_t *p_path_rec, + IN BOOLEAN check_num_path, + IN BOOLEAN check_preference ) +{ + srp_path_record_t *p_srp_path_record; + + SRP_ENTER( SRP_DBG_PNP ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Finding path record (slid:0x%x dlid:0x%x) for %s.\n", + cl_ntoh16(p_path_rec->slid), + cl_ntoh16(p_path_rec->dlid), + p_hba->ioc_info.profile.id_string) ); + + cl_spinlock_acquire( &p_hba->path_record_list_lock ); + + p_srp_path_record = (srp_path_record_t *)cl_qlist_head( &p_hba->path_record_list ); + + while ( p_srp_path_record != (srp_path_record_t *)cl_qlist_end( &p_hba->path_record_list ) ) + { + if ( __srp_path_rec_equal( (const ib_path_rec_t *)&p_srp_path_record->path_rec, + p_path_rec, + check_num_path, + check_preference ) == TRUE ) + { + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Found path record (slid:0x%x dlid:0x%x) for %s.\n", + cl_ntoh16(p_path_rec->slid), + cl_ntoh16(p_path_rec->dlid), + p_hba->ioc_info.profile.id_string) ); + break; + } + + p_srp_path_record = (srp_path_record_t *)cl_qlist_next( &p_srp_path_record->list_item ); + } + + if ( p_srp_path_record == (srp_path_record_t *)cl_qlist_end( &p_hba->path_record_list ) ) + { + p_srp_path_record = NULL; + } + + cl_spinlock_release( &p_hba->path_record_list_lock ); + + SRP_EXIT( SRP_DBG_PNP ); + + return p_srp_path_record; +} + +static +srp_path_record_t* +__srp_remove_path( + IN srp_hba_t *p_hba, + IN const ib_path_rec_t *p_path_rec ) +{ + srp_path_record_t *p_srp_path_record; + + SRP_ENTER( SRP_DBG_PNP ); + + p_srp_path_record = __srp_find_path( p_hba, p_path_rec, TRUE, TRUE ); + if ( p_srp_path_record != NULL ) + { + cl_spinlock_acquire( &p_hba->path_record_list_lock ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Removing path record (slid:0x%x dlid:0x%x) for %s.\n", + cl_ntoh16(p_path_rec->slid), + cl_ntoh16(p_path_rec->dlid), + p_hba->ioc_info.profile.id_string) ); + + cl_qlist_remove_item( &p_hba->path_record_list, &p_srp_path_record->list_item ); + + cl_spinlock_release( &p_hba->path_record_list_lock ); + } + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Current Path count for %s = %d \n", + p_hba->ioc_info.profile.id_string, + (int)cl_qlist_count( &p_hba->path_record_list )) ); + + SRP_EXIT( SRP_DBG_PNP ); + + return p_srp_path_record; +} + +static +srp_path_record_t* +__srp_add_path( + IN srp_hba_t *p_hba, + IN const ib_path_rec_t *p_path_rec ) +{ + srp_path_record_t *p_srp_path_record; + + SRP_ENTER( SRP_DBG_PNP ); + + p_srp_path_record = __srp_find_path( p_hba, p_path_rec, FALSE, FALSE ); + if ( p_srp_path_record != NULL ) + { + cl_spinlock_acquire( &p_hba->path_record_list_lock ); + p_srp_path_record->path_rec = *p_path_rec; + cl_spinlock_release( &p_hba->path_record_list_lock ); + + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_PNP, + ("Discarding/Updating duplicate path record (slid:0x%x dlid:0x%x) for %s.\n", + cl_ntoh16(p_path_rec->slid), + cl_ntoh16(p_path_rec->dlid), + p_hba->ioc_info.profile.id_string) ); + + goto exit; + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Adding path record (slid:0x%x dlid:0x%x) for %s.\n", + cl_ntoh16(p_path_rec->slid), + cl_ntoh16(p_path_rec->dlid), + p_hba->ioc_info.profile.id_string) ); + + + p_srp_path_record = cl_zalloc( sizeof( srp_path_record_t ) ); + if ( p_srp_path_record == NULL ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Insufficient Memory.\n") ); + } + else + { + p_srp_path_record->path_rec = *p_path_rec; + + cl_spinlock_acquire( &p_hba->path_record_list_lock ); + cl_qlist_insert_tail( &p_hba->path_record_list, &p_srp_path_record->list_item ); + cl_spinlock_release( &p_hba->path_record_list_lock ); + } + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Current Path count for %s = %d \n", + p_hba->ioc_info.profile.id_string, + (int)cl_qlist_count( &p_hba->path_record_list )) ); + +exit: + SRP_EXIT( SRP_DBG_PNP ); + + return p_srp_path_record; +} + +static ib_api_status_t +__srp_connect_sessions( + IN OUT srp_hba_t *p_hba ) +{ + uint32_t i; + srp_session_t *p_session; + ib_api_status_t status = IB_ERROR; + BOOLEAN any_ioc_connected = FALSE; + + SRP_ENTER( SRP_DBG_PNP ); + + /* Create the session(s). */ + for ( i = 0; i < p_hba->ioc_info.profile.num_svc_entries; i++ ) + { + int retry_count = 0; + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Creating New Session For Service Entry Index %d.\n", + p_hba->ioc_info.profile.num_svc_entries)); + + p_session = srp_new_session( + p_hba, &p_hba->p_svc_entries[i], &status ); + if( p_session == NULL ) + { + status = IB_INSUFFICIENT_MEMORY; + continue; + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("New Session For Service Entry Index %d Created.\n", + p_hba->ioc_info.profile.num_svc_entries)); + + do + { + retry_count++; + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Attempting to connect %s. Connection Attempt Count = %d.\n", + p_hba->ioc_info.profile.id_string, + retry_count) ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Logging Into Session.\n")); + status = srp_session_login( p_session ); + if ( status == IB_SUCCESS ) + { + any_ioc_connected = TRUE; + + if ( (p_hba->max_sg > + p_session->connection.max_scatter_gather_entries) + && !(p_session->connection.descriptor_format & + DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS) ) + { + p_hba->max_sg = p_session->connection.max_scatter_gather_entries; + } + + if ( p_hba->max_srb_ext_sz > p_session->connection.init_to_targ_iu_sz ) + { + p_hba->max_srb_ext_sz = + sizeof( srp_send_descriptor_t ) - + SRP_MAX_IU_SIZE + + p_session->connection.init_to_targ_iu_sz; + } + + cl_obj_lock( &p_hba->obj ); + p_hba->session_list[i] = p_session; + cl_obj_unlock( &p_hba->obj ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Session Login Issued Successfully.\n")); + } + else + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_PNP, + ("Session Login Failure Status = %d.\n", status)); + } + } while ( (status != IB_SUCCESS) && (retry_count < 3) ); + + if( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Session Object ref_cnt = %d\n", p_session->obj.ref_cnt) ); + cl_obj_destroy( &p_session->obj ); + } + } + + if ( any_ioc_connected == TRUE ) + { + status = IB_SUCCESS; + if ( p_hba->adapter_paused == TRUE ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Resuming Adapter for %s.\n", + p_hba->ioc_info.profile.id_string) ); + p_hba->adapter_paused = FALSE; + StorPortReady( p_hba->p_ext ); + //StorPortNotification( BusChangeDetected, p_hba->p_ext, 0 ); + } + } + + SRP_EXIT( SRP_DBG_PNP ); + + return status; +} + +static void +__srp_disconnect_sessions( + IN srp_hba_t *p_hba, + IN BOOLEAN pause_adapter ) +{ + uint32_t i; + srp_session_t *p_session; + + SRP_ENTER( SRP_DBG_PNP ); + + cl_obj_lock( &p_hba->obj ); + + for ( i = 0; i < p_hba->ioc_info.profile.num_svc_entries; i++ ) + { + if ( p_hba->session_list[i] != NULL ) + { + break; + } + } + + cl_obj_unlock( &p_hba->obj ); + + if ( i == p_hba->ioc_info.profile.num_svc_entries ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("No current connections to %s.\n", + p_hba->ioc_info.profile.id_string) ); + goto exit; + } + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Current path to %s has been lost.\n", + p_hba->ioc_info.profile.id_string) ); + + p_hba->p_srp_path_record = NULL; + + if ( pause_adapter == TRUE ) + { + if ( p_hba->adapter_paused == FALSE ) + { + p_hba->adapter_paused = TRUE; + StorPortBusy( p_hba->p_ext, (ULONG)-1 ); + StorPortCompleteRequest( p_hba->p_ext, + SP_UNTAGGED, + SP_UNTAGGED, + SP_UNTAGGED, + SRB_STATUS_BUSY ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Pausing Adapter for %s.\n", + p_hba->ioc_info.profile.id_string) ); + } + } + + /* Destroy all the connections. */ + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Destroy all connections to %s.\n", + p_hba->ioc_info.profile.id_string) ); + + for ( i = 0; i < p_hba->ioc_info.profile.num_svc_entries; i++ ) + { + cl_obj_lock( &p_hba->obj ); + p_session = p_hba->session_list[i]; + p_hba->session_list[i] = NULL; + cl_obj_unlock( &p_hba->obj ); + + if ( p_session != NULL ) + { + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Session Object ref_cnt = %d\n", p_session->obj.ref_cnt) ); + __srp_cleanup_session ( &p_session->obj ); + cl_obj_destroy( &p_session->obj ); + } + else + { + SRP_PRINT( TRACE_LEVEL_WARNING, SRP_DBG_PNP, + ("Session for Target ID %d on %s is NULL.\n", + i, + p_hba->ioc_info.profile.id_string) ); // <-- OK in a shutdown or target disconnect + } + } + +exit: + SRP_EXIT( SRP_DBG_PNP ); +} + +static ib_api_status_t +__srp_connect_path( + IN srp_hba_t *p_hba ) +{ + ib_api_status_t status = IB_ERROR; + srp_path_record_t *p_srp_path_record; + + SRP_ENTER( SRP_DBG_PNP ); + + while ( g_srp_system_shutdown == FALSE ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Searching for path to %s.\n", + p_hba->ioc_info.profile.id_string) ); + + cl_spinlock_acquire( &p_hba->path_record_list_lock ); + p_srp_path_record = (srp_path_record_t *)cl_qlist_head( &p_hba->path_record_list ); + cl_spinlock_release( &p_hba->path_record_list_lock ); + if ( p_srp_path_record == (srp_path_record_t *)cl_qlist_end( &p_hba->path_record_list ) ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("No paths to %s found.\n", + p_hba->ioc_info.profile.id_string) ); + break; + } + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Connecting path to %s.\n", + p_hba->ioc_info.profile.id_string) ); + + p_hba->p_srp_path_record = p_srp_path_record; + status = __srp_connect_sessions( p_hba ); + if ( status == IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Path to %s has connected.\n", + p_hba->ioc_info.profile.id_string) ); + break; + } + + p_hba->p_srp_path_record = NULL; + cl_spinlock_acquire( &p_hba->path_record_list_lock ); + cl_qlist_remove_item( &p_hba->path_record_list, &p_srp_path_record->list_item ); + cl_spinlock_release( &p_hba->path_record_list_lock ); + cl_free( p_srp_path_record ); + } + + SRP_EXIT( SRP_DBG_PNP ); + + return status; +} + +static ib_api_status_t +__srp_pnp_cb( + IN ib_pnp_rec_t *p_pnp_rec ) +{ + ib_api_status_t status = IB_SUCCESS; + ib_pnp_ioc_rec_t *p_ioc_rec; + ib_pnp_ioc_path_rec_t *p_ioc_path; + srp_hba_t *p_hba; + srp_path_record_t *p_srp_path_record; + + SRP_ENTER( SRP_DBG_PNP ); + + p_hba = (srp_hba_t* __ptr64)p_pnp_rec->pnp_context; + p_ioc_rec = (ib_pnp_ioc_rec_t*)p_pnp_rec; + p_ioc_path = (ib_pnp_ioc_path_rec_t*)p_pnp_rec; + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("p_pnp_rec->pnp_event = 0x%x (%s)\n", + p_pnp_rec->pnp_event, ib_get_pnp_event_str( p_pnp_rec->pnp_event )) ); + + + switch( p_pnp_rec->pnp_event ) + { + case IB_PNP_IOC_ADD: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("IB_PNP_IOC_ADD for %s.\n", + p_ioc_rec->info.profile.id_string) ); + + __srp_dump_ioc_info( &p_ioc_rec->info ); + + /* + * Trap our CA GUID so we filter path notifications + * for our bound CA only. + */ + if( p_ioc_rec->ca_guid != p_hba->info.ca_guid ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_WARNING, SRP_DBG_PNP, + ("Ignoring CA GUID.\n") ); + status = IB_INVALID_GUID; + break; + } + + /* Trap our IOC GUID so we can get path notification events. */ + if( p_ioc_rec->info.profile.ioc_guid != p_hba->info.guid ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_WARNING, SRP_DBG_PNP, + ("Ignoring GUID.\n") ); + status = IB_INVALID_GUID; + break; + } + + if ( __srp_validate_ioc( p_ioc_rec ) == FALSE ) + { + status = IB_INVALID_GUID; + break; + } + + p_hba->ioc_info = p_ioc_rec->info; + p_hba->p_svc_entries = cl_zalloc( sizeof(ib_svc_entry_t) * p_hba->ioc_info.profile.num_svc_entries ); + if ( p_hba->p_svc_entries == NULL ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Insufficient Memory.\n") ); + status = IB_INSUFFICIENT_MEMORY; + break; + } + + cl_memcpy ( p_hba->p_svc_entries, + p_ioc_rec->svc_entry_array, + sizeof(ib_svc_entry_t) * p_hba->ioc_info.profile.num_svc_entries); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Found %d Service Entries.\n", + p_hba->ioc_info.profile.num_svc_entries)); + break; + + case IB_PNP_IOC_REMOVE: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("IB_PNP_IOC_REMOVE for %s.\n", + p_hba->ioc_info.profile.id_string) ); + + CL_ASSERT( p_pnp_rec->guid == p_hba->info.guid ); + + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("Hey!!! Our IOC went away.\n") ); + + __srp_disconnect_sessions( p_hba, FALSE ); + __srp_remove_path_records( p_hba ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("HBA Object ref_cnt = %d\n", p_hba->obj.ref_cnt) ); + break; + + case IB_PNP_IOC_PATH_ADD: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("IB_PNP_IOC_PATH_ADD (slid:0x%x dlid:0x%x) for %s.\n", + cl_ntoh16(p_ioc_path->path.slid), + cl_ntoh16(p_ioc_path->path.dlid), + p_hba->ioc_info.profile.id_string)); + + p_srp_path_record = __srp_add_path( p_hba, &p_ioc_path->path ); + if ( p_srp_path_record == NULL ) + { + status = IB_INSUFFICIENT_MEMORY; + break; + } + + if ( p_hba->p_srp_path_record == NULL ) + { + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Connecting new path to %s.\n", + p_hba->ioc_info.profile.id_string) ); + status = __srp_connect_path( p_hba ); + } + break; + + case IB_PNP_IOC_PATH_REMOVE: + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_PNP, + ("IB_PNP_IOC_PATH_REMOVE (slid:%x dlid:%x) for %s.\n", + cl_ntoh16(p_ioc_path->path.slid), + cl_ntoh16(p_ioc_path->path.dlid), + p_hba->ioc_info.profile.id_string)); + + p_srp_path_record = __srp_remove_path( p_hba, &p_ioc_path->path ); + if ( p_srp_path_record != NULL ) + { + if ( p_srp_path_record == p_hba->p_srp_path_record ) + { + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_PNP, + ("Current path to %s has been lost.\n", + p_hba->ioc_info.profile.id_string) ); + + if ( g_srp_system_shutdown == FALSE ) + { + __srp_disconnect_sessions( p_hba, TRUE ); + status = __srp_connect_path( p_hba ); + } + else + { + __srp_disconnect_sessions( p_hba, FALSE ); + } + } + + cl_free( p_srp_path_record ); + } + break; + + default: + CL_ASSERT( p_pnp_rec->pnp_event == IB_PNP_IOC_ADD || + p_pnp_rec->pnp_event == IB_PNP_IOC_REMOVE || + p_pnp_rec->pnp_event == IB_PNP_IOC_PATH_ADD || + p_pnp_rec->pnp_event == IB_PNP_IOC_PATH_REMOVE ); + break; + } + + SRP_EXIT( SRP_DBG_PNP ); + return status; +} diff --git a/branches/Ndi/ulp/srp/kernel/srp_hba.h b/branches/Ndi/ulp/srp/kernel/srp_hba.h new file mode 100644 index 00000000..96d9e3dd --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_hba.h @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef _SRP_HBA_H_ +#define _SRP_HBA_H_ + +#include +#include +#include +#include + +#define SRP_MAX_SERVICE_ENTRIES 255 + +typedef struct _srp_session *p_srp_session_t; + + +#pragma warning(disable:4324) +typedef struct _srp_path_record +{ + cl_list_item_t list_item; + ib_path_rec_t path_rec; + +} srp_path_record_t; +#pragma warning(default:4324) + + +typedef struct _srp_hba +{ + cl_obj_t obj; + cl_obj_rel_t rel; + + /* The extension is needed for StorPort calls. */ + struct _srp_ext *p_ext; + + ib_al_handle_t h_al; + ib_pnp_handle_t h_pnp; + + ib_al_ifc_t ifc; + ioc_ifc_data_t info; + + ib_ioc_info_t ioc_info; + ib_svc_entry_t *p_svc_entries; + + srp_path_record_t *p_srp_path_record; + cl_qlist_t path_record_list; + cl_spinlock_t path_record_list_lock; + BOOLEAN adapter_paused; + + uint32_t max_sg; + uint32_t max_srb_ext_sz; + + /* List of sessions indexed by target id */ + p_srp_session_t session_list[SRP_MAX_SERVICE_ENTRIES]; +} srp_hba_t; + + +/* Pointer to the PDO for an instance being initialized. */ +DEVICE_OBJECT *gp_self_do; + + +ib_api_status_t +srp_hba_create( + IN cl_obj_t* const p_drv_obj, + OUT struct _srp_ext* const p_ext ); + +#endif /* _SRP_HBA_H_ */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_hca.c b/branches/Ndi/ulp/srp/kernel/srp_hca.c new file mode 100644 index 00000000..8ea75891 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_hca.c @@ -0,0 +1,256 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "srp_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "srp_hca.tmh" +#endif +#include "srp_event.h" +#include "srp_hca.h" +#include "srp_session.h" + +/* Amount of physical memory to register. */ +#define MEM_REG_SIZE 0xFFFFFFFFFFFFFFFF + + +/* srp_open_ca */ +/*! +Open the channel adapter associated with the SRP initiator +Allocates a protection domain and +Registers all of physical memory + +@param p_hca - pointer to the hca structure +@param p_context - context pointer passed back to callback functions + +@return - result of operation +*/ +ib_api_status_t +srp_open_ca( + IN OUT srp_hca_t *p_hca, + IN void *p_context ) +{ + ib_api_status_t status; + ib_phys_create_t phys_create; + ib_phys_range_t phys_range; + mlnx_fmr_pool_create_t fmr_pool_create; + + SRP_ENTER( SRP_DBG_PNP ); + + status = p_hca->p_hba->ifc.open_ca( p_hca->p_hba->h_al, + p_hca->p_hba->info.ca_guid, srp_async_event_handler_cb, + p_context, &p_hca->h_ca ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to open Channel Adapter. Status = %d\n", status) ); + goto exit; + } + + status = p_hca->p_hba->ifc.alloc_pd( p_hca->h_ca, + IB_PDT_NORMAL, + p_context, + &p_hca->h_pd ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to create Protection Domain. Status = %d\n", status) ); + goto exit; + } + + /* Register all of physical memory */ + phys_create.length = MEM_REG_SIZE; + phys_create.num_ranges = 1; + phys_create.range_array = &phys_range; + phys_create.buf_offset = 0; + phys_create.hca_page_size = PAGE_SIZE; + phys_create.access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_MW_BIND; + + phys_range.base_addr = 0; + phys_range.size = MEM_REG_SIZE; + + p_hca->vaddr = 0; + + + status = p_hca->p_hba->ifc.reg_phys( p_hca->h_pd, + &phys_create, + &p_hca->vaddr, + &p_hca->lkey, + &p_hca->rkey, + &p_hca->h_mr ); + + if( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Physical Memory Registration Failure. Status = %d\n", status) ); + goto exit; + } + + fmr_pool_create.max_pages_per_fmr = SRP_MAX_SG_IN_INDIRECT_DATA_BUFFER; + fmr_pool_create.page_size = 12; + fmr_pool_create.access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE; + fmr_pool_create.pool_size = 100; + fmr_pool_create.dirty_watermark = 2; + fmr_pool_create.flush_function = NULL; + fmr_pool_create.flush_arg = NULL; + fmr_pool_create.cache = TRUE; + + status = p_hca->p_hba->ifc.create_mlnx_fmr_pool(p_hca->h_pd, &fmr_pool_create, &p_hca->h_fmr_pool); + + if( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("FMR pool creation Failure. Status = %d\n", status) ); + goto exit; + } + + p_hca->fmr_page_size = 1<< fmr_pool_create.page_size; + p_hca->fmr_page_shift = (uint32_t)fmr_pool_create.page_size; + + SRP_EXIT( SRP_DBG_PNP ); + return IB_SUCCESS; +exit: + srp_close_ca( p_hca ); + + SRP_EXIT( SRP_DBG_PNP ); + return ( status ); +} + +/* srp_close_ca */ +/*! +Closes the channel adapter + +@param p_hca - pointer to the hca structure + +@return - none +*/ +void +srp_close_ca( + IN OUT srp_hca_t *p_hca ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + if( p_hca->h_ca ) + { + p_hca->p_hba->ifc.close_ca( p_hca->h_ca, ib_sync_destroy ); + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Closed Channel Adapter.\n") ); + } + + cl_memclr( p_hca, sizeof( *p_hca ) ); + + SRP_EXIT( SRP_DBG_PNP ); +} + +/* srp_get_responder_resources */ +/*! +Queries the channel adapter for the number of +outstanding atomic/rdma operations it supports + +@param p_hca - pointer to the hca structure +@param p_responder_resources - value to hold responder resource count + +@return - result of operation +*/ +ib_api_status_t +srp_get_responder_resources( + IN srp_hca_t *p_hca, + OUT uint8_t *p_responder_resources ) +{ + ib_api_status_t status; + ib_ca_attr_t *p_ca_attr = NULL; + uint32_t ca_attr_size = 0; + + SRP_ENTER( SRP_DBG_PNP ); + + status = p_hca->p_hba->ifc.query_ca( p_hca->h_ca, NULL, &ca_attr_size ); + if ( status != IB_INSUFFICIENT_MEMORY ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Query Channel Adapter. Status = %d\n", status) ); + goto exit; + } + + p_ca_attr = cl_zalloc( ca_attr_size ); + if ( p_ca_attr == NULL ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Memory Allocation Error: Cannot Create CA Attributes.\n") ); + goto exit; + } + + status = p_hca->p_hba->ifc.query_ca( p_hca->h_ca, p_ca_attr, &ca_attr_size ); + if ( status != IB_SUCCESS ) + { + SRP_PRINT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Cannot Query Channel Adapter. Status = %d\n", status) ); + } + else + { + *p_responder_resources = p_ca_attr->max_qp_resp_res; + } + + cl_free ( p_ca_attr ); + +exit: + SRP_EXIT( SRP_DBG_PNP ); + + return ( status ); +} + +/* srp_init_hca */ +/*! +Initializes hca resources + +@param p_hca - pointer to the hca structure + +@return - result of initialization +*/ +ib_api_status_t +srp_init_hca( + IN OUT srp_hca_t *p_hca, + IN srp_hba_t *p_hba ) +{ + SRP_ENTER( SRP_DBG_PNP ); + + cl_memclr( p_hca, sizeof( *p_hca ) ); + + p_hca->p_hba = p_hba; + + SRP_EXIT( SRP_DBG_PNP ); + + return ( IB_SUCCESS ); +} + + diff --git a/branches/Ndi/ulp/srp/kernel/srp_hca.h b/branches/Ndi/ulp/srp/kernel/srp_hca.h new file mode 100644 index 00000000..897a3e9d --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_hca.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef _SRP_HCA_H_ +#define _SRP_HCA_H_ + + +#include +#include "srp_hba.h" + +typedef struct _srp_hca +{ + srp_hba_t *p_hba; + + ib_ca_handle_t h_ca; + ib_pd_handle_t h_pd; + ib_mr_handle_t h_mr; + mlnx_fmr_pool_handle_t h_fmr_pool; + uint32_t fmr_page_size; + uint32_t fmr_page_shift; + uint64_t vaddr; + net32_t lkey; + net32_t rkey; + +} srp_hca_t; + +ib_api_status_t +srp_open_ca( + IN OUT srp_hca_t *p_hca, + IN void *p_context ); + +void +srp_close_ca( + IN OUT srp_hca_t *p_hca ); + +ib_api_status_t +srp_get_responder_resources( + IN srp_hca_t *p_hca, + OUT uint8_t *p_responder_resources ); + +ib_api_status_t +srp_init_hca( + IN OUT srp_hca_t *p_hca, + IN srp_hba_t *p_hba ); + +#endif /* _SRP_HCA_H_ */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_i_logout.h b/branches/Ndi/ulp/srp/kernel/srp_i_logout.h new file mode 100644 index 00000000..55c0188c --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_i_logout.h @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_I_LOGOUT_H_INCLUDED +#define SRP_I_LOGOUT_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_information_unit.h" + +/* set_srp_i_logout_tag */ +/*! +Sets the tag field of an initiator logout information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_i_logout_tag( + IN OUT srp_i_logout_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_i_logout */ +/*! +Initializes the initiator logout IU to zeroes +and sets the IU type to Srp Initiator Logout +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_i_logout( + IN OUT srp_i_logout_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_I_LOGOUT ) ; + set_srp_i_logout_tag( p_information_unit, iu_tag ); +} + +/* setup_srp_i_logout */ +/*! +Initializes and sets the Srp Initiator Logout IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +setup_srp_i_logout( + IN OUT srp_i_logout_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_i_logout( p_information_unit, iu_tag ); +} + +/* get_srp_i_logout_tag */ +/*! +Returns the value of the tag field of an initiator logout + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_i_logout_tag( + IN srp_i_logout_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_i_logout_length */ +/*! +Returns the size in bytes of the Srp Initiator Logout IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_i_logout_length( + IN srp_i_logout_t *p_information_unit ) +{ + return( sizeof( *p_information_unit ) ); +} + +/* set_srp_i_logout_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_i_logout_from_host_to_network( + IN OUT srp_i_logout_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); +} + +/* set_srp_i_logout_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_i_logout_from_network_to_host( + IN OUT srp_i_logout_t *p_information_unit ) +{ + set_srp_i_logout_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_I_LOGOUT_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_information_unit.h b/branches/Ndi/ulp/srp/kernel/srp_information_unit.h new file mode 100644 index 00000000..6a6e5d67 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_information_unit.h @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_INFORMATION_UNIT_H_INCLUDED +#define SRP_INFORMATION_UNIT_H_INCLUDED + +#include "srp_iu_buffer.h" +#include + +/* set_srp_information_unit_tag */ +/*! +Set the Information Unit tag for the Srp buffer + +@param p_information_unit - pointer to the IU structure +@param iuTag - IU tag value +*/ +static inline +void +set_srp_information_unit_tag( + IN OUT srp_information_unit_t *p_information_unit, + uint64_t iu_tag ) +{ + p_information_unit->tag = iu_tag; +} + +/* get_srp_information_unit_tag */ +/*! +Returns the Information Unit tag for the Srp buffer + +@param p_information_unit - pointer to the IU structure + +@return - IU tag field value +*/ +static inline +uint64_t +get_srp_information_unit_tag( + IN srp_information_unit_t *p_information_unit ) +{ + return( p_information_unit->tag ); +} + +/* set_srp_information_unit_from_host_to_network */ +/*! +Swaps the tag field bytes from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - NONE +*/ + +static inline +void +set_srp_information_unit_from_host_to_network( + IN OUT srp_information_unit_t *p_information_unit ) +{ + UNUSED_PARAM( p_information_unit ); +// p_information_unit->tag = cl_hton64( p_information_unit->tag ); +} + +/* set_srp_information_unit_from_network_to_host */ +/*! +Swaps the tag field bytes from Network To Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - NONE +*/ + +static inline +void +set_srp_information_unit_from_network_to_host( + IN OUT srp_information_unit_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_INFORMATION_UNIT_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_iu_buffer.h b/branches/Ndi/ulp/srp/kernel/srp_iu_buffer.h new file mode 100644 index 00000000..5939e4b7 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_iu_buffer.h @@ -0,0 +1,142 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_IU_BUFFER_H_INCLUDED +#define SRP_IU_BUFFER_H_INCLUDED + +#include "srp.h" + +/* set_srp_iu_buffer_type */ +/*! +Set the Information Unit type for the Srp IU buffer + +@param p_buffer - pointer to the IU buffer +@param iu_type - IU structure type +*/ +static inline +void +set_srp_iu_buffer_type( + IN OUT srp_iu_buffer_t *p_buffer, + IN uint8_t iu_type ) +{ + p_buffer->information_unit.type = iu_type; +} + +/* init_srp_iu_buffer */ +/*! +Initialize the Srp IU buffer to 0 and set it's type + +@param p_buffer - pointer to the IU buffer +@param iu_type - IU structure type +*/ +static inline +void +init_srp_iu_buffer( + IN OUT srp_iu_buffer_t *p_buffer, + IN uint8_t iu_type ) +{ + size_t iu_size = 0; + + switch( iu_type ) + { + case SRP_RSP: + /* don't dirty the second cache line */ + iu_size = offsetof( srp_rsp_t, data_out_residual_count ); + break; + + case SRP_LOGIN_REQ: + iu_size = sizeof( srp_login_req_t ); + break; + + case SRP_TSK_MGMT: + iu_size = sizeof( srp_tsk_mgmt_t ); + break; + + case SRP_CMD: + iu_size = sizeof( srp_cmd_t ); + break; + + case SRP_I_LOGOUT: + iu_size = sizeof( srp_i_logout_t ); + break; + + case SRP_LOGIN_RSP: + iu_size = sizeof( srp_login_rsp_t ); + break; + + case SRP_LOGIN_REJ: + iu_size = sizeof( srp_login_rej_t ); + break; + + case SRP_T_LOGOUT: + iu_size = sizeof( srp_t_logout_t ); + break; + + case SRP_CRED_REQ: + iu_size = sizeof( srp_cred_req_t ); + break; + + case SRP_AER_REQ: + iu_size = sizeof( srp_aer_req_t ); + break; + + case SRP_CRED_RSP: + iu_size = sizeof( srp_cred_rsp_t ); + break; + + case SRP_AER_RSP: + iu_size = sizeof( srp_aer_rsp_t ); + break; + } + + memset( p_buffer, 0, iu_size ); + + set_srp_iu_buffer_type( p_buffer, iu_type ); +} + +/* get_srp_iu_buffer_type */ +/*! +Returns the Information Unit type for the Srp buffer + +@param p_buffer - pointer to the IU structure + +@return - IU type value +*/ +static inline +uint8_t +get_srp_iu_buffer_type( + IN srp_iu_buffer_t *p_buffer ) +{ + return( p_buffer->information_unit.type ); +} + +#endif /* SRP_IU_BUFFER_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_login_rej.h b/branches/Ndi/ulp/srp/kernel/srp_login_rej.h new file mode 100644 index 00000000..99b66653 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_login_rej.h @@ -0,0 +1,262 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_LOGIN_REJ_H_INCLUDED +#define SRP_LOGIN_REJ_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_Information_unit.h" + +/* set_srp_login_reject_tag */ +/*! +Sets the tag field of a login reject information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_login_reject_tag( + IN OUT srp_login_rej_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_login_reject */ +/*! +Initializes the login reject IU to zeroes +and sets the IU type to Srp Login Reject +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_login_reject( + IN OUT srp_login_rej_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_LOGIN_REJ ) ; + set_srp_login_reject_tag( p_information_unit, iu_tag ) ; +} + +/* set_srp_login_reject_reason */ +/*! +Sets the reason for the rejection + +@param p_information_unit - pointer to the IU structure +@param reason - rejection reason code + +@return - none +*/ +static inline +void +set_srp_login_reject_reason( + IN OUT srp_login_rej_t *p_information_unit, + IN LOGIN_REJECT_CODE reason ) +{ + p_information_unit->reason = reason; +} + +/* set_srp_login_reject_supported_data_buffer_formats */ +/*! +Sets the flags indicating the type of data buffer descriptors +which are supported by the target on this channel + +@param p_information_unit - pointer to the IU structure +@param data_buffer_descriptor_formats - usage indicator values + +@return - none +*/ +static inline +void +set_srp_login_reject_supported_data_buffer_formats( + IN OUT srp_login_rej_t *p_information_unit, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_buffer_descriptor_formats ) +{ + p_information_unit->sup_buffer_fmts.flags = 0; + + if ( data_buffer_descriptor_formats & DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR ) + { + p_information_unit->sup_buffer_fmts.flags |= DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED; + } + + if ( data_buffer_descriptor_formats & DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ) + { + p_information_unit->sup_buffer_fmts.flags |= INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED; + } +} + +/* setup_srp_login_reject */ +/*! +Initializes and sets the Srp Login Reject IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair +@param reason - reason code for login rejection +@param data_buffer_descriptor_formats - usage indicator values + +@return - none +*/ +static inline +void +setup_srp_login_reject( + IN OUT srp_login_rej_t *p_information_unit, + IN uint64_t iu_tag, + IN LOGIN_REJECT_CODE reason, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_buffer_descriptor_formats ) +{ + init_srp_login_reject( p_information_unit, iu_tag ); + set_srp_login_reject_reason( p_information_unit, reason ); + set_srp_login_reject_supported_data_buffer_formats( p_information_unit, data_buffer_descriptor_formats ); +} + +/* get_srp_login_reject_tag */ +/*! +Returns the value of the tag field of a login reject + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_login_reject_tag( + IN srp_login_rej_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_login_reject_reason */ +/*! +Returns the value of the reason code field of a login reject + +@param p_information_unit - pointer to the IU structure + +@return - reason code value +*/ +static inline +LOGIN_REJECT_CODE +get_srp_login_reject_reason( + IN srp_login_rej_t *p_information_unit ) +{ + return( ( LOGIN_REJECT_CODE ) p_information_unit->reason ); +} + +/* get_srp_login_reject_supported_data_buffer_formats */ +/*! +Returns the supported data buffer formats that can be used on the channel + +@param p_information_unit - pointer to the IU structure + +@return - supported data buffer formats settings +*/ +static inline +DATA_BUFFER_DESCRIPTOR_FORMAT +get_srp_login_reject_supported_data_buffer_formats( + IN srp_login_rej_t *p_information_unit ) +{ + switch ( p_information_unit->sup_buffer_fmts.flags & DATA_BUFFER_DESCRIPTOR_FORMAT_MASK ) + { + case DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED: + return( DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR ); + + case INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED: + return( DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ); + + case DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED | INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED: + return( ( DATA_BUFFER_DESCRIPTOR_FORMAT ) ( DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR | DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ) ); + + default: + return( DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT ); + } +} + +/* get_srp_login_reject_length */ +/*! +Returns the size in bytes of the Srp Login Reject IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_login_reject_length( + IN srp_login_rej_t *p_information_unit ) +{ + return( sizeof( *p_information_unit ) ); +} + +/* set_srp_login_reject_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_login_reject_from_host_to_network( + IN OUT srp_login_rej_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); + p_information_unit->reason = cl_hton32( p_information_unit->reason ); +} + +/* set_srp_login_reject_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_login_reject_from_network_to_host( + IN OUT srp_login_rej_t *p_information_unit ) +{ + set_srp_login_reject_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_LOGIN_REJ_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_login_req.h b/branches/Ndi/ulp/srp/kernel/srp_login_req.h new file mode 100644 index 00000000..24487ba9 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_login_req.h @@ -0,0 +1,364 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_LOGIN_REQ_H_INCLUDED +#define SRP_LOGIN_REQ_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_information_unit.h" +#include + +/* set_srp_login_request_tag */ +/*! +Sets the tag field of the login request IU to the supplied value + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +set_srp_login_request_tag( + IN OUT srp_login_req_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_login_request */ +/*! +Initializes the login request IU to zeroes +and sets the IU type to Srp Login Request +and set the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_login_request( + IN OUT srp_login_req_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_LOGIN_REQ ) ; + set_srp_login_request_tag( p_information_unit, iu_tag ); +} + +/* set_srp_login_request_req_max_init_to_targ_iu */ +/*! +Sets the maximum sized IU to be sent on this channel from initiator to target + +@param p_information_unit - pointer to the IU structure +@param req_max_init_to_targ_iu - max initiator to target IU size (64 or greater) + +@return - none +*/ +static inline +void +set_srp_login_request_req_max_init_to_targ_iu( + IN OUT srp_login_req_t *p_information_unit, + IN uint32_t req_max_init_to_targ_iu ) +{ + p_information_unit->req_max_init_to_targ_iu = req_max_init_to_targ_iu; +} + +/* set_srp_login_request_required_data_buffer_formats */ +/*! +Sets the flags indicating whether or not the initiator will use +target support of direct/indirect data buffer descriptors on this channel + +@param p_information_unit - pointer to the IU structure +@param data_buffer_descriptor_formats - usage indicator values + +@return - none +*/ +static inline +void +set_srp_login_request_required_data_buffer_formats( + IN OUT srp_login_req_t *p_information_unit, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_buffer_descriptor_formats ) +{ + p_information_unit->req_buffer_fmts.flags = 0; + + if ( data_buffer_descriptor_formats & DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR ) + { + p_information_unit->req_buffer_fmts.flags |= DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED; + } + + if ( data_buffer_descriptor_formats & DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ) + { + p_information_unit->req_buffer_fmts.flags |= INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED; + } +} + +/* set_srp_login_request_multi_channel_action */ +/*! +Sets the value indicating how existing RDMA channels associated with the +same I_T nexus specified by the Initiator Port Identifier and Target Port +Identifier fields are to be treated. They can either be terminated or allowed +to continue processing. + +@param p_information_unit - pointer to the IU structure +@param multi_channel_action - value indicating action to be applied to + existing RDMA channels + +@return - none +*/ +static inline +void +set_srp_login_request_multi_channel_action( + IN OUT srp_login_req_t *p_information_unit, + IN MULTI_CHANNEL_ACTION multi_channel_action ) +{ + p_information_unit->flags |= multi_channel_action; +} + +/* setSrpLoginRequestITNexus */ +/*! +Sets the I_T nexus value + +@param p_information_unit - pointer to the IU structure +@param p_initiator_port_id - initiator's port id value +@param p_target_port_id - target's port id value + +@return - none +*/ +static inline +void +set_srp_login_request_it_nexus( + IN OUT srp_login_req_t *p_information_unit, + IN srp_ib_port_id_t *p_initiator_port_id, + IN srp_ib_port_id_t *p_target_port_id ) +{ + RtlCopyMemory( &p_information_unit->initiator_port_id, + p_initiator_port_id, sizeof(srp_ib_port_id_t) ); + RtlCopyMemory( &p_information_unit->target_port_id, + p_target_port_id, sizeof(srp_ib_port_id_t) ); +} + +/* setup_srp_login_request */ +/*! +Initializes and sets the Srp Login Request IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair +@param req_max_init_to_targ_iu - max initiator to target IU size (64 or greater) +@param data_buffer_descriptor_formats - usage indicator values +@param multi_channel_action - value indicating action to be applied to existing RDMA channels +@param p_initiator_port_id - initiator's port id value (for I_T nexus) +@param p_target_port_id - target's port id value (for I_T nexus) + +@return - none +*/ +static inline +void +setup_srp_login_request( + IN OUT srp_login_req_t *p_information_unit, + IN uint64_t iu_tag, + IN uint32_t req_max_init_to_targ_iu, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_buffer_descriptor_formats, + IN MULTI_CHANNEL_ACTION multi_channel_action, + IN srp_ib_port_id_t *p_initiator_port_id, + IN srp_ib_port_id_t *p_target_port_id ) +{ + init_srp_login_request( p_information_unit, iu_tag ); + set_srp_login_request_req_max_init_to_targ_iu( p_information_unit, req_max_init_to_targ_iu ); + set_srp_login_request_required_data_buffer_formats( p_information_unit, data_buffer_descriptor_formats ); + set_srp_login_request_multi_channel_action( p_information_unit, multi_channel_action ); + set_srp_login_request_it_nexus( p_information_unit, p_initiator_port_id, p_target_port_id ); +} + +/* get_srp_login_request_tag */ +/*! +Returns the value of the tag field of a login request + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_login_request_tag( + IN srp_login_req_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_login_request_req_max_init_to_targ_iu */ +/*! +Returns the requested max initiator to target information unit size + +@param p_information_unit - pointer to the IU structure + +@return - requested max initiator to target information unit size value +*/ +static inline +uint32_t +get_srp_login_request_req_max_init_to_targ_iu( + IN srp_login_req_t *p_information_unit ) +{ + return( p_information_unit->req_max_init_to_targ_iu ); +} + +/* get_srp_login_request_required_data_buffer_formats */ +/*! +Returns the required data buffer formats to be used on the channel + +@param p_information_unit - pointer to the IU structure + +@return - required data buffer formats settings +*/ +static inline +DATA_BUFFER_DESCRIPTOR_FORMAT +get_srp_login_request_required_data_buffer_formats( + IN srp_login_req_t *p_information_unit ) +{ + switch ( p_information_unit->req_buffer_fmts.flags & DATA_BUFFER_DESCRIPTOR_FORMAT_MASK ) + { + case DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED: + return( DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR ); + + case INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED: + return( DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ); + + case DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED | INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED: + return( ( DATA_BUFFER_DESCRIPTOR_FORMAT ) ( DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR | DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ) ); + + default: + return( DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT ); + } +} + +/* get_srp_login_request_multi_channel_action */ +/*! +Returns the multi channel action setting + +@param p_information_unit - pointer to the IU structure + +@return - multi channel action setting +*/ +static inline +MULTI_CHANNEL_ACTION +get_srp_login_request_multi_channel_action( + IN srp_login_req_t *p_information_unit ) +{ + return( ( MULTI_CHANNEL_ACTION ) ( p_information_unit->flags & MULTI_CHANNEL_ACTION_MASK ) ); +} + +/* get_srp_login_request_initiator_port_id */ +/*! +Returns the initiator port identifier + +@param p_information_unit - pointer to the IU structure + +@return - pointer to initiator port id value +*/ +static inline +srp_ib_port_id_t* +get_srp_login_request_initiator_port_id( + IN srp_login_req_t *p_information_unit ) +{ + return( &p_information_unit->initiator_port_id ); +} + +/* get_srp_login_request_target_port_id */ +/*! +Returns the target port identifier + +@param p_information_unit - pointer to the IU structure + +@return - pointer to target port id value +*/ +static inline +srp_ib_port_id_t* +get_srp_login_request_target_port_id( + IN srp_login_req_t *p_information_unit ) +{ + return( &p_information_unit->target_port_id ); +} + +/* get_srp_login_request_length */ +/*! +Returns the size in bytes of the Srp Login Request IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_login_request_length( + IN srp_login_req_t *p_information_unit ) +{ + return( sizeof( *p_information_unit ) ); +} + +/* set_srp_login_request_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_login_request_from_host_to_network( + IN OUT srp_login_req_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); + p_information_unit->req_max_init_to_targ_iu = cl_hton32( p_information_unit->req_max_init_to_targ_iu ); +} + +/* set_srp_login_request_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_login_request_from_network_to_host( + IN OUT srp_login_req_t *p_information_unit ) +{ + set_srp_login_request_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_LOGIN_REQ_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_login_rsp.h b/branches/Ndi/ulp/srp/kernel/srp_login_rsp.h new file mode 100644 index 00000000..38561c14 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_login_rsp.h @@ -0,0 +1,382 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_LOGIN_RSP_H_INCLUDED +#define SRP_LOGIN_RSP_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_information_unit.h" + +#include + +/* set_srp_login_response_tag */ +/*! +Sets the tag field of a login response information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_login_response_tag( + IN OUT srp_login_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_login_response */ +/*! +Initializes the login response IU to zeroes +and sets the IU type to Srp Login Response +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_login_response( + IN OUT srp_login_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_LOGIN_RSP ) ; + set_srp_login_response_tag( p_information_unit, iu_tag ); +} + +/* set_srp_login_response_request_limit_delta */ +/*! +Sets the request limit delta value for flow control + +@param p_information_unit - pointer to the IU structure +@param request_limit_delta - flow control request limit delta value + +@return - none +*/ +static inline +void +set_srp_login_response_request_limit_delta( + IN OUT srp_login_rsp_t *p_information_unit, + IN int32_t request_limit_delta + ) +{ + p_information_unit->request_limit_delta = request_limit_delta; +} + +/* set_srp_login_response_max_init_to_targ_iu */ +/*! +Sets the maximum sized IU to be sent on this channel from initiator to target + +@param p_information_unit - pointer to the IU structure +@param max_init_to_targ_iu - max initiator to target IU size (64 or greater) + +@return - none +*/ +static inline +void +set_srp_login_response_max_init_to_targ_iu( + IN OUT srp_login_rsp_t *p_information_unit, + IN uint32_t max_init_to_targ_iu ) +{ + p_information_unit->max_init_to_targ_iu = max_init_to_targ_iu; +} + +/* set_srp_login_response_max_targ_to_init_iu */ +/*! +Sets the maximum sized IU to be sent on this channel from target to initiator + +@param p_information_unit - pointer to the IU structure +@param max_targ_to_init_iu - max initiator to target IU size (64 or greater) + +@return - none +*/ +static inline +void +set_srp_login_response_max_targ_to_init_iu( + IN OUT srp_login_rsp_t *p_information_unit, + IN uint32_t max_targ_to_init_iu ) +{ + p_information_unit->max_targ_to_init_iu = max_targ_to_init_iu; +} + +/* set_srp_login_response_supported_data_buffer_formats */ +/*! +Sets the flags indicating whether or not the target can and will use +direct/indirect data buffer descriptors on this channel + +@param p_information_unit - pointer to the IU structure +@param dataBufferDescriptorFormats - usage indicator values + +@return - none +*/ +static inline +void +set_srp_login_response_supported_data_buffer_formats( + IN OUT srp_login_rsp_t *p_information_unit, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_buffer_descriptor_formats ) +{ + p_information_unit->sup_buffer_fmts.flags = 0; + + if ( data_buffer_descriptor_formats & DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR ) + { + p_information_unit->sup_buffer_fmts.flags |= DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED; + } + + if ( data_buffer_descriptor_formats & DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ) + { + p_information_unit->sup_buffer_fmts.flags |= INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED; + } +} + +/* set_srp_login_response_multi_channel_result */ +/*! +Sets the value indicating how existing RDMA channels associated with the +same I_T nexus specified by the Initiator Port Identifier and Target Port +Identifier fields were treated. They can either be terminated or allowed +to continue processing. + +@param p_information_unit - pointer to the IU structure +@param multi_channel_result - value indicating action applied to + existing RDMA channels + +@return - none +*/ +static inline +void +set_srp_login_response_multi_channel_result( + IN OUT srp_login_rsp_t *p_information_unit, + IN MULTI_CHANNEL_RESULT multi_channel_result ) +{ + p_information_unit->flags |= multi_channel_result; +} + +/* setup_srp_login_response */ +/*! +Initializes and sets the Srp Login Response IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair +@param request_limit_delta - flow control request limit delta value +@param max_init_to_targ_iu - max initiator to target IU size (64 or greater) +@param max_targ_to_init_iu - max target to initiator IU size (64 or greater) +@param dataBufferDescriptorFormats - usage indicator values +@param multi_channel_result - value indicating action applied to existing RDMA channels + +@return - none +*/ +static inline +void +setup_srp_login_response( + IN OUT srp_login_rsp_t *p_information_unit, + IN uint64_t iu_tag, + IN int32_t request_limit_delta, + IN uint32_t max_init_to_targ_iu, + IN uint32_t max_targ_to_init_iu, + IN DATA_BUFFER_DESCRIPTOR_FORMAT data_buffer_descriptor_formats, + IN MULTI_CHANNEL_RESULT multi_channel_result ) +{ + init_srp_login_response( p_information_unit, iu_tag ); + set_srp_login_response_request_limit_delta( p_information_unit, request_limit_delta ); + set_srp_login_response_max_init_to_targ_iu( p_information_unit, max_init_to_targ_iu ); + set_srp_login_response_max_targ_to_init_iu( p_information_unit, max_targ_to_init_iu ); + set_srp_login_response_supported_data_buffer_formats( p_information_unit, data_buffer_descriptor_formats ); + set_srp_login_response_multi_channel_result( p_information_unit, multi_channel_result ); +} + +/* get_srp_login_response_tag */ +/*! +Returns the value of the tag field of a login response + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_login_response_tag( + IN srp_login_rsp_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_login_response_request_limit_delta */ +/*! +Returns the value of the request limit delta field of a login response + +@param p_information_unit - pointer to the IU structure + +@return - request limit delta value +*/ +static inline +int32_t +get_srp_login_response_request_limit_delta( + IN srp_login_rsp_t *p_information_unit ) +{ + return( p_information_unit->request_limit_delta ); +} + +/* get_srp_login_response_max_init_to_targ_iu */ +/*! +Returns the value of the max initiator to target IU size value + +@param p_information_unit - pointer to the IU structure + +@return - max initiator to target IU value +*/ +static inline +uint32_t +get_srp_login_response_max_init_to_targ_iu( + IN srp_login_rsp_t *p_information_unit ) +{ + return( p_information_unit->max_init_to_targ_iu ); +} + +/* get_srp_login_response_max_targ_to_init_iu */ +/*! +Returns the value of the max target to initiator IU size value + +@param p_information_unit - pointer to the IU structure + +@return - max target to initiator IU value +*/ +static inline +uint32_t +get_srp_login_response_max_targ_to_init_iu( + IN srp_login_rsp_t *p_information_unit ) +{ + return( p_information_unit->max_targ_to_init_iu ); +} + +/* get_srp_login_response_supported_data_buffer_formats */ +/*! +Returns the supported data buffer formats to be used on the channel + +@param p_information_unit - pointer to the IU structure + +@return - supported data buffer formats settings +*/ +static inline +DATA_BUFFER_DESCRIPTOR_FORMAT +get_srp_login_response_supported_data_buffer_formats( + IN srp_login_rsp_t *p_information_unit ) +{ + switch ( p_information_unit->sup_buffer_fmts.flags & DATA_BUFFER_DESCRIPTOR_FORMAT_MASK ) + { + case DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED: + return( DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR ); + + case INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED: + return( DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ); + + case DIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED | INDIRECT_DATA_BUFFER_DESCRIPTOR_REQUESTED: + return( ( DATA_BUFFER_DESCRIPTOR_FORMAT ) ( DBDF_DIRECT_DATA_BUFFER_DESCRIPTOR | DBDF_INDIRECT_DATA_BUFFER_DESCRIPTORS ) ); + + default: + return( DBDF_NO_DATA_BUFFER_DESCRIPTOR_PRESENT ); + } +} + +/* get_srp_login_response_multi_channel_result */ +/*! +Returns the multi channel result setting + +@param p_information_unit - pointer to the IU structure + +@return - multi channel result setting +*/ +static inline +MULTI_CHANNEL_RESULT +get_srp_login_response_multi_channel_result( + IN srp_login_rsp_t *p_information_unit ) +{ + return( ( MULTI_CHANNEL_RESULT ) ( p_information_unit->flags & MULTI_CHANNEL_RESULT_MASK ) ); +} + +/* get_srp_login_response_length */ +/*! +Returns the size in bytes of the Srp Login Response IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_login_response_length( + IN srp_login_rsp_t *p_information_unit ) +{ + return( sizeof( *p_information_unit ) ); +} + +/* set_srp_login_response_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_login_response_from_host_to_network( + IN OUT srp_login_rsp_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); + p_information_unit->request_limit_delta = cl_hton32( p_information_unit->request_limit_delta ); + p_information_unit->max_init_to_targ_iu = cl_hton32( p_information_unit->max_init_to_targ_iu ); + p_information_unit->max_targ_to_init_iu = cl_hton32( p_information_unit->max_targ_to_init_iu ); +} + +/* setSrpLoginResponseFromNetworkToHost */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_login_response_from_network_to_host( + IN OUT srp_login_rsp_t *p_information_unit ) +{ + set_srp_login_response_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_LOGIN_RSP_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_rsp.h b/branches/Ndi/ulp/srp/kernel/srp_rsp.h new file mode 100644 index 00000000..5e3e526b --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_rsp.h @@ -0,0 +1,726 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_RSP_H_INCLUDED +#define SRP_RSP_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_information_unit.h" + +/* set_srp_response_tag */ +/*! +Sets the tag field of a Response information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_response_tag( + IN OUT srp_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_response */ +/*! +Initializes the Response IU to zeroes +and sets the IU type to Response +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_response( + IN OUT srp_rsp_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_RSP ) ; + set_srp_response_tag( p_information_unit, iu_tag ); +} + +/* set_srp_response_request_limit_delta */ +/*! +sets the request limit delta value + +@param p_information_unit - pointer to the IU structure +@param request_limit_delta - buffer descriptor format value + +@return - none +*/ +static inline +void +set_srp_response_request_limit_delta( + IN OUT srp_rsp_t *p_information_unit, + IN int32_t request_limit_delta ) +{ + p_information_unit->request_limit_delta = request_limit_delta; +} + +/* set_srp_response_flags */ +/*! +sets the flags field + +@param p_information_unit - pointer to the IU structure +@param flags - flags di_under di_over do_under do_over sns_valid RSPVALID + +@return - none +*/ +static inline +void +set_srp_response_flags( + IN OUT srp_rsp_t *p_information_unit, + IN uint8_t flags ) +{ + p_information_unit->flags = flags & 0x3F; +} + +/* set_srp_response_di_under */ +/*! +sets the DIUNDER flag + +@param p_information_unit - pointer to the IU structure +@param di_under - DIUNDER flag + +this is a boolean flag and therefore any non-zero value is true +while zero is of course then false + +@return - none +*/ +static inline +void +set_srp_response_di_under( + IN OUT srp_rsp_t *p_information_unit, + IN uint8_t di_under ) +{ + p_information_unit->flags = ( p_information_unit->flags & 0xDF ) | ( di_under == 0 ? 0 : 0x20 ); +} + +/* set_srp_response_di_over */ +/*! +sets the DIOVER flag + +@param p_information_unit - pointer to the IU structure +@param di_over - DIOVER flag + +this is a boolean flag and therefore any non-zero value is true +while zero is of course then false + +@return - none +*/ +static inline +void +set_srp_response_di_over( + IN OUT srp_rsp_t *p_information_unit, + IN uint8_t di_over ) +{ + p_information_unit->flags = ( p_information_unit->flags & 0xEF ) | ( di_over == 0 ? 0 : 0x10 ); +} + +/* set_srp_response_do_under */ +/*! +sets the DOUNDER flag + +@param p_information_unit - pointer to the IU structure +@param do_under - DOUNDER flag + +this is a boolean flag and therefore any non-zero value is true +while zero is of course then false + +@return - none +*/ +static inline +void +set_srp_response_do_under( + IN OUT srp_rsp_t *p_information_unit, + IN uint8_t do_under ) +{ + p_information_unit->flags = ( p_information_unit->flags & 0xF7 ) | ( do_under == 0 ? 0 : 0x08 ); +} + +/* set_srp_response_do_over */ +/*! +sets the DOOVER flag + +@param p_information_unit - pointer to the IU structure +@param do_over - DOOVER flag + +this is a boolean flag and therefore any non-zero value is true +while zero is of course then false + +@return - none +*/ +static inline +void +set_srp_response_do_over( + IN OUT srp_rsp_t *p_information_unit, + IN uint8_t do_over ) +{ + p_information_unit->flags = ( p_information_unit->flags & 0xFB ) | ( do_over == 0 ? 0 : 0x04 ); +} + +/* set_srp_response_sns_valid */ +/*! +sets the SNSVALID flag + +@param p_information_unit - pointer to the IU structure +@param sns_valid - SNSVALID flag + +this is a boolean flag and therefore any non-zero value is true +while zero is of course then false + +@return - none +*/ +static inline +void +set_srp_response_sns_valid( + IN OUT srp_rsp_t *p_information_unit, + IN uint8_t sns_valid ) +{ + p_information_unit->flags = ( p_information_unit->flags & 0xFD ) | ( sns_valid == 0 ? 0 : 0x02 ); +} + +/* set_srp_response_rsp_valid */ +/*! +sets the RSPVALID flag + +@param p_information_unit - pointer to the IU structure +@param rsp_valid - RSPVALID flag + +this is a boolean flag and therefore any non-zero value is true +while zero is of course then false + +@return - none +*/ +static inline +void +set_srp_response_rsp_valid( + IN OUT srp_rsp_t *p_information_unit, + IN uint8_t rsp_valid ) +{ + p_information_unit->flags = ( p_information_unit->flags & 0xFE ) | ( rsp_valid == 0 ? 0 : 0x01 ); +} + +/* set_srp_response_status */ +/*! +sets the Status value + +@param p_information_unit - pointer to the IU structure +@param status - Status value + +@return - none +*/ +static inline +void +set_srp_response_status( + IN OUT srp_rsp_t *p_information_unit, + IN uint8_t status ) +{ + p_information_unit->status = status; +} + +/* set_srp_response_data_out_residual_count */ +/*! +Sets the data out residual count for the Response IU + +@param p_information_unit - pointer to the IU structure +@param data_out_residual_count - data out residual count for the request + +@return - none +*/ +static inline +void +set_srp_response_data_out_residual_count( + IN OUT srp_rsp_t *p_information_unit, + IN uint32_t data_out_residual_count ) +{ + p_information_unit->data_out_residual_count = data_out_residual_count; +} + +/* set_srp_response_data_in_residual_count */ +/*! +Sets the data in residual count for the Response IU + +@param p_information_unit - pointer to the IU structure +@param data_in_residual_count - data out residual count for the request + +@return - none +*/ +static inline +void +set_srp_response_data_in_residual_count( + IN OUT srp_rsp_t *p_information_unit, + IN uint32_t data_in_residual_count ) +{ + p_information_unit->data_in_residual_count = data_in_residual_count; +} + +/* set_srp_response_sense_data_list_length */ +/*! +Sets the sense data list length for the Response IU + +@param p_information_unit - pointer to the IU structure +@param sense_data_list_length - sense data list length for the request + +@return - none +*/ +static inline +void +set_srp_response_sense_data_list_length( + IN OUT srp_rsp_t *p_information_unit, + IN uint32_t sense_data_list_length ) +{ + p_information_unit->sense_data_list_length = sense_data_list_length; +} + +/* set_srp_response_response_data_list_length */ +/*! +Sets the response data list length for the Response IU + +@param p_information_unit - pointer to the IU structure +@param response_data_list_length - response data list length for the request + +@return - none +*/ +static inline +void +set_srp_response_response_data_list_length( + IN OUT srp_rsp_t *p_information_unit, + IN uint32_t response_data_list_length ) +{ + p_information_unit->response_data_list_length = response_data_list_length; +} + +/* setup_srp_response */ +/*! +Initializes and sets the Srp Response IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair +@param request_limit_delta - buffer descriptor format value +@param di_under - DIUNDER flag +@param di_over - DIOVER flag +@param do_under - DOUNDER flag +@param do_over - DOOVER flag +@param sns_valid - SNSVALID flag +@param rsp_valid - RSPVALID flag +@param status - Status value +@param data_out_residual_count - data out residual count for the request +@param data_in_residual_count - data out residual count for the request +@param sense_data_list_length - sense data list length for the request +@param response_data_list_length - response data list length for the request + +@return - none +*/ +static inline +void +setup_srp_response( + IN OUT srp_rsp_t *p_information_unit, + IN uint64_t iu_tag, + IN int32_t request_limit_delta, + IN uint8_t di_under, + IN uint8_t di_over, + IN uint8_t do_under, + IN uint8_t do_over, + IN uint8_t sns_valid, + IN uint8_t rsp_valid, + IN uint8_t status, + IN uint32_t data_out_residual_count, + IN uint32_t data_in_residual_count, + IN uint32_t sense_data_list_length, + IN uint32_t response_data_list_length ) +{ + init_srp_response( p_information_unit, iu_tag ); + set_srp_response_request_limit_delta( p_information_unit, request_limit_delta ); + set_srp_response_di_under( p_information_unit, di_under ); + set_srp_response_di_over( p_information_unit, di_over ); + set_srp_response_do_under( p_information_unit, do_under ); + set_srp_response_do_over( p_information_unit, do_over ); + set_srp_response_sns_valid( p_information_unit, sns_valid ); + set_srp_response_rsp_valid( p_information_unit, rsp_valid ); + set_srp_response_status( p_information_unit, status ); + set_srp_response_data_out_residual_count( p_information_unit, data_out_residual_count ); + set_srp_response_data_in_residual_count( p_information_unit, data_in_residual_count ); + set_srp_response_sense_data_list_length( p_information_unit, sense_data_list_length ); + set_srp_response_response_data_list_length( p_information_unit, response_data_list_length ); +} + +/* get_srp_response_tag */ +/*! +Returns the value of the tag field of a response IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_response_tag( + IN srp_rsp_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_response_request_limit_delta */ +/*! +Returns the value of the request limit delta field of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - request limit delta value +*/ +static inline +int32_t +get_srp_response_request_limit_delta( + IN srp_rsp_t *p_information_unit ) +{ + return( p_information_unit->request_limit_delta ); +} + +/* get_srp_response_flags */ +/*! +Returns flags field of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - value of the flags field +*/ +static inline +uint8_t +get_srp_response_flags( + IN srp_rsp_t *p_information_unit ) +{ + return( p_information_unit->flags ); +} + +/* get_srp_response_di_under */ +/*! +Returns the DIUNDER flag setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - set/clear setting + one indicates set while zero indicates not set +*/ +static inline +uint8_t +get_srp_response_di_under( + IN srp_rsp_t *p_information_unit ) +{ + return( ( p_information_unit->flags & 0x20 ) != 0 ? 1 : 0 ); +} + +/* get_srp_response_di_over */ +/*! +Returns the DIOVER flag setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - set/clear setting + one indicates set while zero indicates not set +*/ +static inline +uint8_t +get_srp_response_di_over( + IN srp_rsp_t *p_information_unit ) +{ + return( ( p_information_unit->flags & 0x10 ) != 0 ? 1 : 0 ); +} + +/* get_srp_response_do_under */ +/*! +Returns the DOUNDER flag setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - set/clear setting + one indicates set while zero indicates not set +*/ +static inline +uint8_t +get_srp_response_do_under( + IN srp_rsp_t *p_information_unit ) +{ + return( ( p_information_unit->flags & 0x08 ) != 0 ? 1 : 0 ); +} + +/* get_srp_response_do_over */ +/*! +Returns the DOOVER flag setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - set/clear setting + one indicates set while zero indicates not set +*/ +static inline +uint8_t +get_srp_response_do_over( + IN srp_rsp_t *p_information_unit ) +{ + return( ( p_information_unit->flags & 0x04 ) != 0 ? 1 : 0 ); +} + +/* get_srp_response_sns_valid */ +/*! +Returns the SNSVALID flag setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - set/clear setting + one indicates set while zero indicates not set +*/ +static inline +uint8_t +get_srp_response_sns_valid( + IN srp_rsp_t *p_information_unit ) +{ + return( ( p_information_unit->flags & 0x02 ) != 0 ? 1 : 0 ); +} + +/* get_srp_response_rsp_valid */ +/*! +Returns the RSPVALID flag setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - set/clear setting + one indicates set while zero indicates not set +*/ +static inline +uint8_t +get_srp_response_rsp_valid( + IN srp_rsp_t *p_information_unit ) +{ + return( ( p_information_unit->flags & 0x01 ) != 0 ? 1 : 0 ); +} + +/* get_srp_response_status */ +/*! +Returns the Status field setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - Status value setting +*/ +static inline +uint8_t +get_srp_response_status( + IN srp_rsp_t *p_information_unit ) +{ + return( p_information_unit->status ); +} + +/* get_srp_response_data_out_residual_count */ +/*! +Returns the data out residual count field setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - data out residual count value setting +*/ +static inline +uint32_t +get_srp_response_data_out_residual_count( + IN srp_rsp_t *p_information_unit ) +{ + return( p_information_unit->data_out_residual_count ); +} + +/* get_srp_response_data_in_residual_count */ +/*! +Returns the data in residual count field setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - data in residual count value setting +*/ +static inline +uint32_t +get_srp_response_data_in_residual_count( + IN srp_rsp_t *p_information_unit ) +{ + return( p_information_unit->data_in_residual_count ); +} + +/* get_srp_response_sense_data_list_length */ +/*! +Returns the sense data list length field setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - sense data list length value setting +*/ +static inline +uint32_t +get_srp_response_sense_data_list_length( + IN srp_rsp_t *p_information_unit ) +{ + return( p_information_unit->sense_data_list_length ); +} + +/* get_srp_response_response_data_list_length */ +/*! +Returns the response data list length field setting of a Response IU + +@param p_information_unit - pointer to the IU structure + +@return - response data list length value setting +*/ +static inline +uint32_t +get_srp_response_response_data_list_length( + IN srp_rsp_t *p_information_unit ) +{ + return( p_information_unit->response_data_list_length ); +} + +/* get_srp_response_response_data */ +/*! +Returns a pointer to the response data field of a Response + +@param p_information_unit - pointer to the IU structure + +@return - pointer to the response data +*/ +static inline +srp_response_data_t* +get_srp_response_response_data( + IN srp_rsp_t *p_information_unit ) +{ + if ( get_srp_response_rsp_valid( p_information_unit ) ) + return( p_information_unit->response_data ); + + return( NULL ); +} + +/* get_srp_response_sense_data */ +/*! +Returns a pointer to the sense data field of a Response + +WARNING!!!! Set the response data list length before this call so the + offset can be correctly calculated + +@param p_information_unit - pointer to the IU structure + +@return - pointer to sense data +*/ +static inline +uint8_t* +get_srp_response_sense_data( + IN srp_rsp_t *p_information_unit ) +{ + if ( get_srp_response_sns_valid( p_information_unit ) ) + { + if ( get_srp_response_response_data( p_information_unit ) != NULL ) + { + return( ( ( uint8_t* ) p_information_unit->response_data ) + get_srp_response_response_data_list_length( p_information_unit ) ); + } + else + { + return( ( uint8_t* ) p_information_unit->response_data ); + } + } + + return( NULL ); +} + +/* get_srp_response_length */ +/*! +Returns the size in bytes of the Srp Response IU + +@param p_information_unit - pointer to the IU structure + +@return - used length of Response IU buffer +*/ +static inline +uint32_t +get_srp_response_length( + IN srp_rsp_t *p_information_unit ) +{ + /* do not include response data field in the sizeof the IU. add it's length and sense data list length to the structure size */ + return( ( sizeof( *p_information_unit ) - sizeof( p_information_unit->response_data ) ) + + ( get_srp_response_rsp_valid( p_information_unit ) ? get_srp_response_response_data_list_length( p_information_unit ) : 0 ) + + ( get_srp_response_sns_valid( p_information_unit ) ? get_srp_response_sense_data_list_length( p_information_unit ) : 0 ) ); +} + +/* set_srp_response_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_response_from_host_to_network( + IN OUT srp_rsp_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); + p_information_unit->request_limit_delta = cl_hton32( p_information_unit->request_limit_delta ); + if ( get_srp_response_flags( p_information_unit ) != 0 ) + { + p_information_unit->data_out_residual_count = cl_hton32( p_information_unit->data_out_residual_count ); + p_information_unit->data_in_residual_count = cl_hton32( p_information_unit->data_in_residual_count ); + p_information_unit->sense_data_list_length = cl_hton32( p_information_unit->sense_data_list_length ); + p_information_unit->response_data_list_length = cl_hton32( p_information_unit->response_data_list_length ); + } +} + +/* set_srp_response_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_response_from_network_to_host( + IN OUT srp_rsp_t *p_information_unit ) +{ + set_srp_response_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_RSP_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_session.c b/branches/Ndi/ulp/srp/kernel/srp_session.c new file mode 100644 index 00000000..d2deb56d --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_session.c @@ -0,0 +1,360 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "srp_debug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "srp_session.tmh" +#endif +#include "srp_session.h" +#include + +#if DBG +extern void *gp_session; +#endif + +/* __srp_destroying_session */ +/*! +Called when session has been marked for destruction + +@param p_obj - pointer to a session object + +@return - none +*/ +static void +__srp_destroying_session( + IN cl_obj_t *p_obj ) +{ + srp_session_t *p_srp_session; + + SRP_ENTER( SRP_DBG_SESSION ); + + p_srp_session = PARENT_STRUCT( p_obj, srp_session_t, obj ); + + p_srp_session->connection.state = SRP_CONNECTION_CLOSING; + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Session Object ref_cnt = %d\n", p_srp_session->obj.ref_cnt) ); + + SRP_EXIT( SRP_DBG_SESSION ); +} + + +/* __srp_cleanup_session */ +/*! +Called when session is being destroyed +in order to perform resource deallocation + +@param p_obj - pointer to a session object + +@return - none +*/ +void +__srp_cleanup_session( + IN cl_obj_t *p_obj ) +{ + srp_session_t *p_srp_session; + + SRP_ENTER( SRP_DBG_SESSION ); + + p_srp_session = PARENT_STRUCT( p_obj, srp_session_t, obj ); + + srp_close_ca( &p_srp_session->hca ); + + if ( p_srp_session->p_shutdown_srb != NULL ) + { + p_srp_session->p_shutdown_srb->SrbStatus = SRB_STATUS_SUCCESS; + SRP_PRINT( TRACE_LEVEL_INFORMATION, SRP_DBG_DEBUG, + ("Returning SrbStatus %s(0x%x) for Function = %s(0x%x), " + "Path = 0x%x, Target = 0x%x, Lun = 0x%x\n", + g_srb_status_name[p_srp_session->p_shutdown_srb->SrbStatus], + p_srp_session->p_shutdown_srb->SrbStatus, + g_srb_function_name[p_srp_session->p_shutdown_srb->Function], + p_srp_session->p_shutdown_srb->Function, + p_srp_session->p_shutdown_srb->PathId, + p_srp_session->p_shutdown_srb->TargetId, + p_srp_session->p_shutdown_srb->Lun) ); + StorPortNotification( RequestComplete, p_srp_session->p_hba->p_ext, + p_srp_session->p_shutdown_srb ); + } + + srp_free_connection( &p_srp_session->connection ); + srp_destroy_descriptors( &p_srp_session->descriptors ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Session Object ref_cnt = %d\n", p_srp_session->obj.ref_cnt) ); + + SRP_EXIT( SRP_DBG_SESSION ); +} + +/* __srp_free_session */ +/*! +Called when session has been destroyed +and is ready for deallocation + +@param p_obj - pointer to a session object + +@return - none +*/ +static void +__srp_free_session( + IN cl_obj_t *p_obj ) +{ + srp_session_t *p_srp_session; + + SRP_ENTER( SRP_DBG_SESSION ); + + p_srp_session = PARENT_STRUCT( p_obj, srp_session_t, obj ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("Before DeInit Session Object ref_cnt = %d\n", + p_srp_session->obj.ref_cnt) ); + + cl_obj_deinit( p_obj ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("After DeInit Session Object ref_cnt = %d\n", + p_srp_session->obj.ref_cnt) ); + + cl_free( p_srp_session ); + + SRP_EXIT( SRP_DBG_SESSION ); +} + +/* __srp_validate_service_entry */ +/*! +Validates the format of the Service Name and +Converts and returns the id extension encoded +within the service name string + +@param p_svc_entry - pointer to the service entry +@param p_target_id_extension - pointer value to hold the returned id extension + +@return - result of operation +*/ +static ib_api_status_t +__srp_validate_service_entry( + IN ib_svc_entry_t *p_svc_entry, + OUT uint64_t *p_target_id_extension ) +{ + ib_api_status_t status = IB_SUCCESS; + char target_id_extension[SRP_EXTENSION_ID_LENGTH + 1]; + size_t target_id_extension_size; + uint64_t multiplier = 1; + ULONG id_extension; + + SRP_ENTER( SRP_DBG_SESSION ); + + if ( cl_memcmp( p_svc_entry->name, SRP_SERVICE_NAME_PREFIX, strlen(SRP_SERVICE_NAME_PREFIX)) != 0 ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Service Name Not Properly Formatted.\n") ); + status = IB_INVALID_SERVICE_TYPE; + goto exit; + } + + *p_target_id_extension = 0; + + cl_memclr( target_id_extension, sizeof(target_id_extension) ); + cl_memcpy( target_id_extension, &p_svc_entry->name[strlen(SRP_SERVICE_NAME_PREFIX)], 16 ); + + target_id_extension_size = strlen( target_id_extension ); + + while ( target_id_extension_size != 0 ) + { + char current_digit[2] = {'\0', '\0'}; + NTSTATUS ntstatus; + + target_id_extension_size--; + + current_digit[0] = target_id_extension[target_id_extension_size]; + + ntstatus = RtlCharToInteger( current_digit, 16, &id_extension ); + if ( ntstatus != STATUS_SUCCESS ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Target Id Extension INVALID.\n") ); + status = IB_INVALID_PARAMETER; + break; + } + + (*p_target_id_extension) += ( id_extension * multiplier ); + + multiplier <<= 4; + } + + /* Swap to network order now. */ + *p_target_id_extension = cl_hton64( *p_target_id_extension ); + +exit: + SRP_EXIT( SRP_DBG_SESSION ); + + return ( status ); +} + +/* srp_new_session */ +/*! +Allocates and initializes a session structure and it's sub-structures + +@param p_hba - pointer to the hba associated with the new session +@param ioc_guid - pointer to the target's ioc guid +@param p_svc_entry - pointer to the service entry +@param p_status - pointer to the reason code + +@return - Pointer to new session or NULL if failure. See p_status for reason code. +*/ +srp_session_t* +srp_new_session( + IN srp_hba_t *p_hba, + IN ib_svc_entry_t *p_svc_entry, + OUT ib_api_status_t *p_status ) +{ + uint64_t target_id_extension; + srp_session_t *p_srp_session = NULL; + cl_status_t cl_status; + + SRP_ENTER( SRP_DBG_SESSION ); + + *p_status = __srp_validate_service_entry( p_svc_entry, &target_id_extension ); + if ( *p_status != IB_SUCCESS ) + { + goto exit; + } + + p_srp_session = (srp_session_t*)cl_zalloc( sizeof(srp_session_t) ); + if ( p_srp_session == NULL ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("Failed to allocate srp_session_t structure.\n") ); + *p_status = IB_INSUFFICIENT_MEMORY; + goto exit; + } + + p_srp_session->p_hba = p_hba; + + *p_status = srp_init_connection( &p_srp_session->connection, + &p_hba->ioc_info.profile, + p_hba->info.ca_guid, + target_id_extension, + &p_hba->p_srp_path_record->path_rec, + p_svc_entry->id ); + if ( *p_status != IB_SUCCESS ) + { + cl_free( p_srp_session ); + p_srp_session = NULL; + goto exit; + } + + cl_obj_construct( &p_srp_session->obj, SRP_OBJ_TYPE_SESSION ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("After Construct Session Object ref_cnt = %d\n", + p_srp_session->obj.ref_cnt) ); + cl_status = cl_obj_init( &p_srp_session->obj, + CL_DESTROY_ASYNC, + __srp_destroying_session, + __srp_cleanup_session, + __srp_free_session ); + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("After Init Session Object ref_cnt = %d\n", + p_srp_session->obj.ref_cnt) ); + if( cl_status != CL_SUCCESS ) + { + SRP_PRINT_EXIT( TRACE_LEVEL_ERROR, SRP_DBG_ERROR, + ("cl_obj_init returned %s\n", cl_status_text[cl_status]) ); + + cl_free( p_srp_session ); + p_srp_session = NULL; + *p_status = IB_ERROR; + goto exit; + } + + cl_obj_insert_rel( &p_srp_session->rel, + &p_srp_session->p_hba->obj, + &p_srp_session->obj ); + + SRP_PRINT( TRACE_LEVEL_VERBOSE, SRP_DBG_DEBUG, + ("After Insert Rel Session Object ref_cnt = %d\n", + p_srp_session->obj.ref_cnt) ); + +#if DBG + gp_session = p_srp_session; +#endif + +exit: + SRP_EXIT( SRP_DBG_SESSION ); + + return ( p_srp_session ); +} + +/* srp_new_session */ +/*! +Orchestrates the connection process for a session to a target + +@param p_srp_session - pointer to the session to connect to the target + +@return - result of operation +*/ +ib_api_status_t +srp_session_login( + IN srp_session_t *p_srp_session ) +{ + ib_api_status_t status; + + SRP_ENTER( SRP_DBG_SESSION ); + + status = srp_init_hca( &p_srp_session->hca, p_srp_session->p_hba ); + if ( status != IB_SUCCESS ) + { + goto exit; + } + + status = srp_open_ca( &p_srp_session->hca, p_srp_session ); + if ( status != IB_SUCCESS ) + goto exit; + + status = srp_connect( &p_srp_session->connection, + &p_srp_session->hca, + (uint8_t)p_srp_session->p_hba->ioc_info.profile.send_msg_depth, + p_srp_session ); + + if ( status != IB_SUCCESS ) + { // clean resources, taken upon login + srp_close_ca( &p_srp_session->hca ); + srp_destroy_descriptors( &p_srp_session->descriptors ); + } + +exit: + SRP_EXIT( SRP_DBG_SESSION ); + return ( status ); +} diff --git a/branches/Ndi/ulp/srp/kernel/srp_session.h b/branches/Ndi/ulp/srp/kernel/srp_session.h new file mode 100644 index 00000000..925ab689 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_session.h @@ -0,0 +1,108 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + + +#ifndef _SRP_SESSION_H_ +#define _SRP_SESSION_H_ + + +#include +#include +#include + +#include "srp_hba.h" +#include "srp_hca.h" +#include "srp_connection.h" +#include "srp_descriptors.h" + +/* Session information. */ +typedef struct _srp_session +{ + cl_obj_t obj; + cl_obj_rel_t rel; + + srp_hba_t *p_hba; + atomic32_t repost_is_on; + srp_hca_t hca; + srp_connection_t connection; + srp_descriptors_t descriptors; + + SCSI_REQUEST_BLOCK *p_shutdown_srb; + +#if DBG + /* statistics */ + + /* packets, built */ + uint64_t x_pkt_fmr; /* number of packets, mapped by fmr_pool */ + uint64_t x_pkt_built; /* number of packets, built */ + + /* request_limit_delta */ + int64_t x_rld_total; /* sum of req_limit_delta values */ + int32_t x_rld_num; /* number of req_limit_delta values */ + int32_t x_rld_max; /* max req_limit_delta value */ + int32_t x_rld_min; /* min req_limit_delta value */ + int32_t x_rld_zeroes; /* number of zeroes */ + + int32_t x_rld_zeroes_cur; /* number of zeroes */ + int32_t x_rld_zeroes_cur_min; /* number of zeroes */ + int32_t x_rld_busy_success; + int32_t x_rld_busy_fail; + + /* pending queue */ + uint64_t x_pend_total; /* sum of pending_descriptors queue sizes */ + uint32_t x_pend_num; /* number of pending_descriptors queue sizes */ + uint32_t x_pend_max; /* max pending_descriptors queue size */ + + /* pending queue */ + uint64_t x_sent_total; /* sum of sent_descriptors queue sizes */ + uint32_t x_sent_num; /* number of sent_descriptors queue sizes */ + uint32_t x_sent_max; /* max sent_descriptors queue size */ + + uint32_t x_req_limit; /* max number in-flight packets */ +#endif +} srp_session_t; + +srp_session_t* +srp_new_session( + IN srp_hba_t *p_hba, + IN ib_svc_entry_t *p_svc_entry, + OUT ib_api_status_t *p_status ); + +ib_api_status_t +srp_session_login( + IN srp_session_t *p_srp_session ); + +void +__srp_cleanup_session( + IN cl_obj_t *p_obj ); + +#endif /* _SRP_SESSION_H_ */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_t_logout.h b/branches/Ndi/ulp/srp/kernel/srp_t_logout.h new file mode 100644 index 00000000..e284fbea --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_t_logout.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_T_LOGOUT_H_INCLUDED +#define SRP_T_LOGOUT_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_information_unit.h" + +/* set_srp_t_logout_tag */ +/*! +Sets the tag field of a target logout information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_t_logout_tag( + IN OUT srp_t_logout_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_t_logout */ +/*! +Initializes the target logout IU to zeroes +and sets the IU type to Srp Target Logout +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_t_logout( + IN OUT srp_t_logout_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_T_LOGOUT ) ; + set_srp_t_logout_tag( p_information_unit, iu_tag ); +} + +/* set_srp_t_logout_reason */ +/*! +Sets the reason for the target logout + +@param p_information_unit - pointer to the IU structure +@param reason - target logout reason code + +@return - none +*/ +static inline +void +set_srp_t_logout_reason( + IN OUT srp_t_logout_t *p_information_unit, + IN TARGET_LOGOUT_REASON_CODE reason ) +{ + p_information_unit->reason = reason; +} + +/* setup_srp_t_logout */ +/*! +Initializes and sets the Srp Target Logout IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +setup_srp_t_logout( + IN OUT srp_t_logout_t *p_information_unit, + IN uint64_t iu_tag, + IN TARGET_LOGOUT_REASON_CODE reason ) +{ + init_srp_t_logout( p_information_unit, iu_tag ); + set_srp_t_logout_reason( p_information_unit, reason ); +} + +/* get_srp_t_logout_tag */ +/*! +Returns the value of the tag field of a target logout + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_t_logout_tag( + IN srp_t_logout_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_t_logout_reason */ +/*! +Returns the value of the reason code field of a target logout + +@param p_information_unit - pointer to the IU structure + +@return - reason code value +*/ +static inline +TARGET_LOGOUT_REASON_CODE +get_srp_t_logout_reason( + IN srp_t_logout_t *p_information_unit ) +{ + return( ( TARGET_LOGOUT_REASON_CODE ) p_information_unit->reason); +} + +/* get_srp_t_logout_length */ +/*! +Returns the size in bytes of the Srp Target Logout IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_t_logout_length( + IN srp_t_logout_t *p_information_unit ) +{ + return( sizeof( *p_information_unit ) ); +} + +/* set_srp_t_logout_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_t_logout_from_host_to_network( + IN OUT srp_t_logout_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); + p_information_unit->reason = cl_hton32( p_information_unit->reason ); +} + +/* set_srp_t_logout_from_network_to_host */ +/*! +Swaps the IU fields from Network to Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_t_logout_from_network_to_host( + IN OUT srp_t_logout_t *p_information_unit ) +{ + set_srp_t_logout_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_T_LOGOUT_H_INCLUDED */ diff --git a/branches/Ndi/ulp/srp/kernel/srp_tsk_mgmt.h b/branches/Ndi/ulp/srp/kernel/srp_tsk_mgmt.h new file mode 100644 index 00000000..3eacd1c8 --- /dev/null +++ b/branches/Ndi/ulp/srp/kernel/srp_tsk_mgmt.h @@ -0,0 +1,276 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#ifndef SRP_TSK_MGMT_H_INCLUDED +#define SRP_TSK_MGMT_H_INCLUDED + +#include "srp.h" +#include "srp_iu_buffer.h" +#include "srp_information_unit.h" + +/* set_srp_tsk_mgmt_tag */ +/*! +Sets the tag field of a task management information unit + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value of IU + +@return - none +*/ +static inline +void +set_srp_tsk_mgmt_tag( + IN OUT srp_tsk_mgmt_t *p_information_unit, + IN uint64_t iu_tag ) +{ + set_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit, iu_tag ); +} + +/* init_srp_tsk_mgmt */ +/*! +Initializes the task management IU to zeroes +and sets the IU type to Srp Target Logout +and sets the tag to the value supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair + +@return - none +*/ +static inline +void +init_srp_tsk_mgmt( + IN OUT srp_tsk_mgmt_t *p_information_unit, + IN uint64_t iu_tag ) +{ + init_srp_iu_buffer( ( srp_iu_buffer_t* ) p_information_unit, SRP_TSK_MGMT ) ; + set_srp_tsk_mgmt_tag( p_information_unit, iu_tag ); +} + +/* set_srp_tsk_mgmt_logical_unit_number */ +/*! +Sets the logical unit number for the task management request IU + +@param p_information_unit - pointer to the IU structure +@param logical_unit_number - logical unit number + +@return - none +*/ +static inline +void +set_srp_tsk_mgmt_logical_unit_number( + IN OUT srp_tsk_mgmt_t *p_information_unit, + IN uint64_t logical_unit_number ) +{ + p_information_unit->logical_unit_number = logical_unit_number; +} + +/* set_srp_tsk_mgmt_task_management_flags */ +/*! +Sets the task management flags for a task management IU + +@param p_information_unit - pointer to the IU structure +@param task_management_flags - logical unit number + +@return - none +*/ +static inline +void +set_srp_tsk_mgmt_task_management_flags( + IN OUT srp_tsk_mgmt_t *p_information_unit, + IN uint8_t task_management_flags ) +{ + p_information_unit->task_management_flags = task_management_flags; +} + +/* set_srp_tsk_mgmt_managed_task_tag */ +/*! +Sets the task management flags for a task management IU + +@param p_information_unit - pointer to the IU structure +@param managed_task_tag - id of task to be managed + +@return - none +*/ +static inline +void +set_srp_tsk_mgmt_managed_task_tag( + IN OUT srp_tsk_mgmt_t *p_information_unit, + IN uint64_t managed_task_tag ) +{ + p_information_unit->managed_task_tag = managed_task_tag; +} + +/* setup_srp_tsk_mgmt */ +/*! +Initializes and sets the Srp Task Management IU to the values supplied + +@param p_information_unit - pointer to the IU structure +@param iu_tag - tag value to be used for the req/rsp pair +@param logical_unit_number - logical unit number +@param task_management_flags - logical unit number +@param managed_task_tag - id of task to be managed + +@return - none +*/ +static inline +void +setup_srp_tsk_mgmt( + IN OUT srp_tsk_mgmt_t *p_information_unit, + IN uint64_t iu_tag, + IN uint64_t logical_unit_number, + IN uint8_t task_management_flags, + IN uint64_t managed_task_tag ) +{ + init_srp_tsk_mgmt( p_information_unit, iu_tag ); + set_srp_tsk_mgmt_logical_unit_number( p_information_unit, logical_unit_number ); + set_srp_tsk_mgmt_task_management_flags( p_information_unit, task_management_flags ); + set_srp_tsk_mgmt_managed_task_tag( p_information_unit, managed_task_tag ); +} + +/* get_srp_tsk_mgmt_tag */ +/*! +Returns the value of the tag field of a task management iu + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint64_t +get_srp_tsk_mgmt_tag( + IN srp_tsk_mgmt_t *p_information_unit ) +{ + return( get_srp_information_unit_tag( ( srp_information_unit_t* ) p_information_unit ) ); +} + +/* get_srp_tsk_mgmt_logical_unit_number */ +/*! +Returns the value of the logical unit number field of a task management iu + +@param p_information_unit - pointer to the IU structure + +@return - logical unit number +*/ +static inline +uint64_t +get_srp_tsk_mgmt_logical_unit_number( + IN srp_tsk_mgmt_t *p_information_unit ) +{ + return( p_information_unit->logical_unit_number ); +} + +/* get_srp_tsk_mgmt_task_management_flags */ +/*! +Returns the value of the task management flags field of a task management iu + +@param p_information_unit - pointer to the IU structure + +@return - task management flags +*/ +static inline +uint8_t +get_srp_tsk_mgmt_task_management_flags( + IN srp_tsk_mgmt_t *p_information_unit ) +{ + return( p_information_unit->task_management_flags ); +} + +/* get_srp_tsk_mgmt_managed_task_tag */ +/*! +Returns the value of the managed task tag field of a task management iu + +@param p_information_unit - pointer to the IU structure + +@return - managed task tag +*/ +static inline +uint64_t +get_srp_tsk_mgmt_managed_task_tag( + IN srp_tsk_mgmt_t *p_information_unit ) +{ + return( p_information_unit->managed_task_tag ); +} + +/* get_srp_tsk_mgmt_length */ +/*! +Returns the size in bytes of the Srp Task Management IU + +@param p_information_unit - pointer to the IU structure + +@return - tag value +*/ +static inline +uint32_t +get_srp_tsk_mgmt_length( + IN srp_tsk_mgmt_t *p_information_unit ) +{ + return( sizeof( *p_information_unit ) ); +} + +/* set_srp_tsk_mgmt_from_host_to_network */ +/*! +Swaps the IU fields from Host to Network ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_tsk_mgmt_from_host_to_network( + IN OUT srp_tsk_mgmt_t *p_information_unit ) +{ + set_srp_information_unit_from_host_to_network( ( srp_information_unit_t* ) p_information_unit ); + p_information_unit->logical_unit_number = cl_hton64( p_information_unit->logical_unit_number ); + p_information_unit->managed_task_tag = cl_hton64( p_information_unit->managed_task_tag ); +} + +/* set_srp_tsk_mgmt_from_network_to_host */ +/*! +Swaps the IU fields from Network To Host ordering + +@param p_information_unit - pointer to the IU structure + +@return - none +*/ + +static inline +void +set_srp_tsk_mgmt_from_network_to_host( + IN OUT srp_tsk_mgmt_t *p_information_unit ) +{ + set_srp_tsk_mgmt_from_host_to_network ( p_information_unit ); +} + +#endif /* SRP_TSK_MGMT_H_INCLUDED */ diff --git a/branches/Ndi/ulp/wsd/dirs b/branches/Ndi/ulp/wsd/dirs new file mode 100644 index 00000000..389156fd --- /dev/null +++ b/branches/Ndi/ulp/wsd/dirs @@ -0,0 +1,2 @@ +DIRS=\ + user diff --git a/branches/Ndi/ulp/wsd/user/README b/branches/Ndi/ulp/wsd/user/README new file mode 100644 index 00000000..dd75d261 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/README @@ -0,0 +1,38 @@ +TODO +==== + +- use pd_handle or pd, but be consistent. + +- try to inline FindSocketContext and more. + +- use more than one wqc with ib_poll_cq. + +- I think WSPRecv can be called before the connection is +established. Therefore the check of the state of the socket is +incorrect. + +- protect the socket state transition. Use a function to check for +valid transitions? + +- finish to fillup ibsp_pnp.c to manage dynamic IP addresses. + +- ib_cm_rep can be sent synchronously. Should solve the accept dilema. + +- some error paths are leaking ressources, esp IBSPAccept + +- socket_info_list is not protected + +- it might be possible to not get a completion event when the event bit is not set -> optimization. Disable sollicitation bit in that case. + +- duplicating listen socket is not implemented. Is that possible? Will the switch do it? + +- replace listen.mutex by mutex? + +- maybe create a pool of created qps to speedup connections? Or keep existing QPs in a pool after the are moved to RESET. + +- is it a good idea to hold socket_info->mutex while calling CM? + +- do something about socket_info->port->hca + + + diff --git a/branches/Ndi/ulp/wsd/user/SOURCES b/branches/Ndi/ulp/wsd/user/SOURCES new file mode 100644 index 00000000..9da1354a --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/SOURCES @@ -0,0 +1,61 @@ +TARGETNAME=ibwsd +TARGETPATH=..\..\..\bin\user\obj$(BUILD_ALT_DIR) +TARGETTYPE=DYNLINK +DLLENTRY=DllMain +DLLDEF=ibspdll.def +USE_NTDLL=1 + +!if $(FREEBUILD) +ENABLE_EVENT_TRACING=1 +!endif + + +SOURCES=\ + ibspdll.rc \ + extensions.c \ + ib_cm.c \ + ibsp_iblow.c \ + ibsp_ip.c \ + ibsp_mem.c \ + ibsp_pnp.c \ + ibspdebug.c \ + ibspdll.c \ + misc.c \ + sockinfo.c \ + ibsp_duplicate.c \ + ibsp_perfmon.c + +INCLUDES=..\..\..\inc;..\..\..\inc\user;$(DDK_INC_PATH); + +USER_C_FLAGS=$(USER_C_FLAGS) -DCL_NO_TRACK_MEM -DPERFMON_ENABLED -DWPP_OLDCC + + +TARGETLIBS=\ + $(SDK_LIB_PATH)\kernel32.lib \ + $(SDK_LIB_PATH)\ws2_32.lib \ + $(SDK_LIB_PATH)\rpcrt4.lib \ + $(SDK_LIB_PATH)\Advapi32.lib \ + $(SDK_LIB_PATH)\LoadPerf.lib \ +!if $(FREEBUILD) + $(TARGETPATH)\*\complib.lib \ + $(TARGETPATH)\*\ibal.lib +!else + $(TARGETPATH)\*\complibd.lib \ + $(TARGETPATH)\*\ibald.lib +!endif + +!IFDEF ENABLE_EVENT_TRACING + +C_DEFINES = $(C_DEFINES) -DEVENT_TRACING + +RUN_WPP= $(SOURCES) -ext: .c .h -dll \ + -scan:ibspdebug.h\ + -func:IBSP_PRINT(LEVEL,FLAGS,(MSG,...)) \ + -func:IBSP_PRINT_EXIT(LEVEL,FLAGS,(MSG,...)) \ + -func:IBSP_ERROR{LEVEL=TRACE_LEVEL_ERROR,FLAGS=IBSP_DBG_ERROR}((MSG,...)) \ + -func:IBSP_ERROR_EXIT{LEVEL=TRACE_LEVEL_ERROR,FLAGS=IBSP_DBG_ERROR}((MSG,...)) + +!ENDIF + + +MSC_WARNING_LEVEL= /W4 diff --git a/branches/Ndi/ulp/wsd/user/extensions.c b/branches/Ndi/ulp/wsd/user/extensions.c new file mode 100644 index 00000000..2839ce8c --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/extensions.c @@ -0,0 +1,650 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ibspdebug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "extensions.tmh" +#endif + +#include "ibspdll.h" + + +/* Function: IBSPRegisterMemory + * Description: + * Registers buffer memory + */ +HANDLE WSPAPI +IBSPRegisterMemory( + IN SOCKET s, + IN PVOID lpBuffer, + IN DWORD dwBufferLength, + IN DWORD dwFlags, + OUT LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + ib_access_t access_ctrl; + struct memory_node *node; + + IBSP_ENTER( IBSP_DBG_MEM ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + if( lpBuffer == NULL ) + { + IBSP_ERROR_EXIT( ( "invalid buffer %p\n", lpBuffer ) ); + *lpErrno = WSAEFAULT; + return NULL; + } + + if( dwBufferLength > socket_info->socket_options.max_msg_size ) + { + IBSP_ERROR_EXIT( ( "invalid buffer length %d\n", dwBufferLength ) ); + *lpErrno = WSAEFAULT; + return NULL; + } + + switch( dwFlags ) + { + case MEM_READ: + access_ctrl = 0; + break; + + case MEM_WRITE: + access_ctrl = IB_AC_LOCAL_WRITE; + break; + + case MEM_READWRITE: + access_ctrl = IB_AC_LOCAL_WRITE; + break; + + default: + IBSP_ERROR_EXIT( ("invalid flags %x\n", dwFlags) ); + *lpErrno = WSAEINVAL; + return NULL; + } + + node = ibsp_reg_mem( socket_info, socket_info->hca_pd, + lpBuffer, dwBufferLength, access_ctrl, lpErrno ); + + fzprint(("%s():%d:0x%x:0x%x: registering MEM from %p to %p, len %d, handle %p\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), + lpBuffer, (unsigned char *)lpBuffer + dwBufferLength, dwBufferLength, node)); + + + if( node == NULL ) + { + IBSP_ERROR( ("ibsp_reg_mem failed (pd=%p)\n", socket_info->hca_pd) ); + *lpErrno = WSAENOBUFS; + } + else + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, + IBSP_DBG_MEM, ("returning node %p\n", node) ); + *lpErrno = 0; + } + + IBSP_EXIT( IBSP_DBG_MEM ); + + return (HANDLE) node; +} + +/* Function: IBSPDeregisterMemory + * Description: + * This is our provider's DeregisterMemory function. + */ +int WSPAPI +IBSPDeregisterMemory( + IN SOCKET s, + IN HANDLE handle, + OUT LPINT lpErrno ) +{ + struct memory_node *node = handle; + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + int ret; + + IBSP_ENTER( IBSP_DBG_MEM ); + + fzprint(("%s():%d:0x%x:0x%x: handle=0x%p socket=0x%p \n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), handle, s)); + + if( s == INVALID_SOCKET ) + { + IBSP_ERROR_EXIT( ("invalid socket handle %Ix\n", s) ); + *lpErrno = WSAENOTSOCK; + return SOCKET_ERROR; + } + + ret = ibsp_dereg_mem( socket_info, node, lpErrno ); + + fzprint(("%s():%d:0x%x:0x%x: unregistering MEM %p, mr_num=%d, ret=%d\n", + __FUNCTION__, + __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), node, g_ibsp.mr_num, ret)); + + IBSP_EXIT( IBSP_DBG_MEM ); + return ret; +} + +/* Function: IBSPRegisterRdmaMemory + * Description: + * This is our provider's RegisterRdmaMemory function. +*/ +int WSPAPI +IBSPRegisterRdmaMemory( + IN SOCKET s, + IN PVOID lpBuffer, + IN DWORD dwBufferLength, + IN DWORD dwFlags, + OUT LPVOID lpRdmaBufferDescriptor, + IN OUT LPDWORD lpdwDescriptorLength, + OUT LPINT lpErrno ) +{ + struct memory_node *node2; + struct rdma_memory_desc *desc; + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + ib_access_t access_ctrl; + struct ibsp_hca *hca; + + IBSP_ENTER( IBSP_DBG_MEM ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + if( *lpdwDescriptorLength < sizeof(struct rdma_memory_desc) ) + { + /* This is the probe from the switch to learn the length of the descriptor. */ + IBSP_ERROR_EXIT( ("invalid descriptor length %d (usually not an error)\n", + *lpdwDescriptorLength) ); + *lpdwDescriptorLength = sizeof(struct rdma_memory_desc); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + if( lpBuffer == NULL ) + { + IBSP_ERROR_EXIT( ("invalid buffer %p\n", lpBuffer) ); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + if( dwBufferLength > socket_info->socket_options.max_msg_size ) + { + IBSP_ERROR_EXIT( ("invalid buffer length %d\n", dwBufferLength) ); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + switch( dwFlags ) + { + case MEM_READ: + access_ctrl = IB_AC_RDMA_READ; + break; + + case MEM_WRITE: + access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_RDMA_WRITE; + break; + + case MEM_READWRITE: + access_ctrl = IB_AC_LOCAL_WRITE | IB_AC_RDMA_READ | IB_AC_RDMA_WRITE; + break; + + default: + IBSP_ERROR_EXIT( ("invalid flags %x\n", dwFlags) ); + *lpErrno = WSAEINVAL; + return SOCKET_ERROR; + } + + hca = socket_info->port->hca; + + /** TODO: Fix locking so we dont' dereference node outside of mutex. */ + node2 = ibsp_reg_mem( socket_info, hca->pd, + lpBuffer, dwBufferLength, access_ctrl, lpErrno ); + + if( !node2 ) + { + IBSP_ERROR_EXIT( ("ibsp_reg_mem failed %d\n", *lpErrno) ); + *lpErrno = WSAENOBUFS; + return SOCKET_ERROR; + } + + desc = lpRdmaBufferDescriptor; + + desc->iova = (uint64_t) (uintptr_t) lpBuffer; + desc->lkey = node2->p_reg->lkey; + desc->rkey = node2->p_reg->rkey; + desc->node1 = node2; + + *lpErrno = 0; + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_MEM, + ("Socket %Ix registered RDMA MEM at %p, len %d, for access %d, " + "returning handle %p, rkey %08x\n", + s, lpBuffer, dwBufferLength, dwFlags, node2, desc->rkey)); + + + IBSP_EXIT( IBSP_DBG_MEM ); + + return 0; +} + +/* Function: IBSPDeregisterRdmaMemory + * Description: + * This is our provider's DeregisterRdmaMemory function. + */ +int WSPAPI +IBSPDeregisterRdmaMemory( + IN SOCKET s, + IN LPVOID lpRdmaBufferDescriptor, + IN DWORD dwDescriptorLength, + OUT LPINT lpErrno ) +{ + struct rdma_memory_desc *desc; + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + int ret; + + IBSP_ENTER( IBSP_DBG_MEM ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + if( s == INVALID_SOCKET ) + { + /* Seen in real life with overlap/client test. + * The switch closes a socket then calls this. Why? */ + IBSP_ERROR_EXIT( ("invalid socket handle %Ix\n", s) ); + *lpErrno = WSAENOTSOCK; + return SOCKET_ERROR; + } + + CL_ASSERT( lpRdmaBufferDescriptor ); + + if( dwDescriptorLength < sizeof(struct rdma_memory_desc) ) + { + IBSP_ERROR_EXIT( ("invalid descriptor length %d)\n", dwDescriptorLength) ); + *lpErrno = WSAEINVAL; + return SOCKET_ERROR; + } + + desc = lpRdmaBufferDescriptor; + + ret = ibsp_dereg_mem( socket_info, desc->node1, lpErrno ); + + fzprint(("%s():%d:0x%x:0x%x: Unregistering RDMA MEM %p\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), desc->node)); + + IBSP_EXIT( IBSP_DBG_MEM ); + return ret; +} + + +/* + * Do a RDMA read or write operation since the code for both is very close. + */ +static int +do_rdma_op( + IN SOCKET s, + IN LPWSABUFEX lpBuffers, + IN DWORD dwBufferCount, + IN LPVOID lpTargetBufferDescriptor, + IN DWORD dwTargetDescriptorLength, + IN DWORD dwTargetBufferOffset, + IN LPWSAOVERLAPPED lpOverlapped, + IN ib_wr_type_t wr_type, + OUT LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + ib_api_status_t status; + struct rdma_memory_desc *desc; /* remote descriptor */ + struct _wr *wr; + ib_send_wr_t send_wr; + ib_local_ds_t local_ds[QP_ATTRIB_SQ_SGE]; + DWORD ds_idx; + + IBSP_ENTER( IBSP_DBG_IO ); + + CL_ASSERT( wr_type == WR_RDMA_WRITE || wr_type == WR_RDMA_READ ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + switch( socket_info->socket_state ) + { + case IBSP_CONNECTED: + case IBSP_DISCONNECTED: + break; + + default: + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR_EXIT( ("Socket is not in connected socket_state state=%s\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + *lpErrno = WSAENOTCONN; + return SOCKET_ERROR; + } + cl_spinlock_release( &socket_info->mutex1 ); + + if( socket_info->qp_error ) + { + IBSP_ERROR_EXIT( ("QP is in error state %d\n", socket_info->qp_error) ); + *lpErrno = socket_info->qp_error; + return SOCKET_ERROR; + } + + /* This function only works for that case. */ + if( dwBufferCount > QP_ATTRIB_SQ_SGE ) + { + CL_ASSERT( dwBufferCount <= QP_ATTRIB_SQ_SGE ); + /* TODO - support splitting large requests into multiple RDMA operations. */ + IBSP_ERROR_EXIT( + ("dwBufferCount is greater than %d\n", QP_ATTRIB_SQ_SGE) ); + *lpErrno = WSAEINVAL; + return SOCKET_ERROR; + } + + if( dwTargetDescriptorLength != sizeof(struct rdma_memory_desc) ) + { + IBSP_ERROR_EXIT( ( + "invalid descriptor length %d)\n", dwTargetDescriptorLength) ); + *lpErrno = WSAEINVAL; + return SOCKET_ERROR; + } + + desc = lpTargetBufferDescriptor; + + /* The send lock is only used to serialize posting. */ + cl_spinlock_acquire( &socket_info->send_lock ); + if( socket_info->send_cnt == QP_ATTRIB_SQ_DEPTH ) + { + /* TODO: queue requests. */ + cl_spinlock_release( &socket_info->send_lock ); + IBSP_ERROR_EXIT( ("not enough wr on the free list\n") ); + *lpErrno = WSAENETDOWN; + return SOCKET_ERROR; + } + + wr = &socket_info->send_wr[socket_info->send_idx]; + + wr->lpOverlapped = lpOverlapped; + wr->socket_info = socket_info; + + /* Format the send work request and post. */ + send_wr.p_next = NULL; + send_wr.wr_id = (uint64_t)(void* __ptr64)wr; + send_wr.wr_type = wr_type; + send_wr.send_opt = 0; + send_wr.num_ds = dwBufferCount; + send_wr.ds_array = local_ds; + + send_wr.remote_ops.vaddr = desc->iova + dwTargetBufferOffset; + send_wr.remote_ops.rkey = desc->rkey; + + lpOverlapped->InternalHigh = 0; + for( ds_idx = 0; ds_idx < dwBufferCount; ds_idx++ ) + { + local_ds[ds_idx].vaddr = (uint64_t)(void* __ptr64)lpBuffers[ds_idx].buf; + local_ds[ds_idx].length = lpBuffers[ds_idx].len; + local_ds[ds_idx].lkey = + ((struct memory_node*)lpBuffers[ds_idx].handle)->p_reg->lkey; + + lpOverlapped->InternalHigh += lpBuffers[ds_idx].len; + } + + if( wr_type == WR_RDMA_READ ) + { + /* + * Next send must be fenced since it could indicate that this + * RDMA READ is complete. + */ + socket_info->send_opt = IB_SEND_OPT_FENCE; + } + else if( lpOverlapped->InternalHigh <= socket_info->max_inline ) + { + send_wr.send_opt |= IB_SEND_OPT_INLINE; + } + + /* + * We must set this now, because the operation could complete + * before ib_post_send returns. + */ + lpOverlapped->Internal = WSS_OPERATION_IN_PROGRESS; + + cl_atomic_inc( &socket_info->send_cnt ); + +#ifdef _DEBUG_ + if( lpOverlapped->hEvent == 0 ) + { + cl_atomic_inc( &g_ibsp.overlap_h0_count ); + } + else + { + cl_atomic_inc( &g_ibsp.overlap_h1_count ); + cl_atomic_inc( &g_ibsp.overlap_h1_comp_count ); + } + + fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0_cnt=%d h1_cnt=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), lpOverlapped, + g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count)); + +#endif + + status = ib_post_send( socket_info->qp, &send_wr, NULL ); + + if( status == IB_SUCCESS ) + { + /* Update the index and wrap as needed */ +#if QP_ATTRIB_SQ_DEPTH == 256 || QP_ATTRIB_SQ_DEPTH == 128 || \ + QP_ATTRIB_SQ_DEPTH == 64 || QP_ATTRIB_SQ_DEPTH == 32 || \ + QP_ATTRIB_SQ_DEPTH == 16 || QP_ATTRIB_SQ_DEPTH == 8 + socket_info->send_idx++; + socket_info->send_idx &= (QP_ATTRIB_SQ_DEPTH - 1); +#else + if( ++socket_info->send_idx == QP_ATTRIB_SQ_DEPTH ) + socket_info->send_idx = 0; +#endif + + *lpErrno = WSA_IO_PENDING; + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_IO, + ("Posted RDMA: socket=%Ix, ov=%p, type=%d, local=%p, len=%d, " + "dest=%016I64x, rkey=%08x\n", + s, lpOverlapped, wr_type, lpBuffers[0].buf, lpBuffers[0].len, + send_wr.remote_ops.vaddr, send_wr.remote_ops.rkey) ); + + fzprint(("posted RDMA %p, len=%d, op=%d, mr handle=%p\n", + lpOverlapped, lpBuffers[0].len, wr_type, node)); + } + else + { + IBSP_ERROR( ("ib_post_send returned %s\n", ib_get_err_str( status )) ); + +#ifdef _DEBUG_ + + if( lpOverlapped->hEvent == 0 ) + { + cl_atomic_dec( &g_ibsp.overlap_h0_count ); + } + else + { + cl_atomic_dec( &g_ibsp.overlap_h1_count ); + cl_atomic_dec( &g_ibsp.overlap_h1_comp_count ); + } + + memset( wr, 0x44, sizeof(struct _wr) ); +#endif + cl_atomic_dec( &socket_info->send_cnt ); + + *lpErrno = ibal_to_wsa_error( status ); + } + + cl_spinlock_release( &socket_info->send_lock ); + + /* We never complete the operation here. */ + IBSP_EXIT( IBSP_DBG_IO ); + return SOCKET_ERROR; +} + + +/* Function: IBSPRdmaWrite + Description: + This is our provider's RdmaWrite function. When an app calls WSAIoctl + to request the function pointer to RdmaWrite, we return pointer to this + function and this function is called by application directly using the function pointer. +*/ +int WSPAPI +IBSPRdmaWrite( + IN SOCKET s, + IN LPWSABUFEX lpBuffers, + IN DWORD dwBufferCount, + IN LPVOID lpTargetBufferDescriptor, + IN DWORD dwTargetDescriptorLength, + IN DWORD dwTargetBufferOffset, + OUT LPDWORD lpdwNumberOfBytesWritten, + IN DWORD dwFlags, + IN LPWSAOVERLAPPED lpOverlapped, + IN LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, + IN LPWSATHREADID lpThreadId, + OUT LPINT lpErrno ) +{ + int ret; + + IBSP_ENTER( IBSP_DBG_IO ); + + UNUSED_PARAM( lpThreadId ); + UNUSED_PARAM( lpCompletionRoutine ); + UNUSED_PARAM( lpdwNumberOfBytesWritten ); + + if( s == INVALID_SOCKET ) + { + IBSP_ERROR_EXIT( ("invalid socket handle %Ix\n", s) ); + *lpErrno = WSAENOTSOCK; + return SOCKET_ERROR; + } + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p overlapped=0x%p\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s, lpOverlapped)); + + /* Store the flags for reporting back in IBSPGetOverlappedResult */ + lpOverlapped->Offset = dwFlags; + + ret = do_rdma_op( s, lpBuffers, dwBufferCount, lpTargetBufferDescriptor, + dwTargetDescriptorLength, dwTargetBufferOffset, + lpOverlapped, WR_RDMA_WRITE, lpErrno ); + + IBSP_EXIT( IBSP_DBG_IO ); + + return ret; +} + + +/* Function: IBSPRdmaRead + Description: + This is our provider's RdmaRead function. When an app calls WSAIoctl + to request the function pointer to RdmaRead, we return pointer to this + function and this function is called by application directly using the function pointer. +*/ +int WSPAPI +IBSPRdmaRead( + IN SOCKET s, + IN LPWSABUFEX lpBuffers, + IN DWORD dwBufferCount, + IN LPVOID lpTargetBufferDescriptor, + IN DWORD dwTargetDescriptorLength, + IN DWORD dwTargetBufferOffset, + OUT LPDWORD lpdwNumberOfBytesRead, + IN DWORD dwFlags, + IN LPWSAOVERLAPPED lpOverlapped, + IN LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, + IN LPWSATHREADID lpThreadId, + OUT LPINT lpErrno ) +{ + int ret; + + IBSP_ENTER( IBSP_DBG_IO ); + + UNUSED_PARAM( lpThreadId ); + UNUSED_PARAM( lpCompletionRoutine ); + UNUSED_PARAM( lpdwNumberOfBytesRead ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p overlapped=0x%p \n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s, lpOverlapped)); + + /* Store the flags for reporting back in IBSPGetOverlappedResult */ + lpOverlapped->Offset = dwFlags; + + ret = do_rdma_op( s, lpBuffers, dwBufferCount, lpTargetBufferDescriptor, + dwTargetDescriptorLength, dwTargetBufferOffset, + lpOverlapped, WR_RDMA_READ, lpErrno ); + + IBSP_EXIT( IBSP_DBG_IO ); + + return ret; +} + + +/* Function: IBSPMemoryRegistrationCacheCallback + * Description: + * This is our provider's MemoryRegistrationCacheCallback + * function. When an app calls WSAIoctl to request the function + * pointer to MemoryRegistrationCacheCallback, we return pointer to + * this function and this function is called by application directly + * using the function pointer. + */ +int WSPAPI +IBSPMemoryRegistrationCacheCallback( + IN LPVOID lpvAddress, + IN SIZE_T Size, + OUT LPINT lpErrno ) +{ + cl_list_item_t *p_item; + + IBSP_ENTER( IBSP_DBG_MEM ); + + UNUSED_PARAM( lpErrno ); + + cl_spinlock_acquire( &g_ibsp.hca_mutex ); + for( p_item = cl_qlist_head( &g_ibsp.hca_list ); + p_item != cl_qlist_end( &g_ibsp.hca_list ); + p_item = cl_qlist_next( p_item ) ) + { + ibsp_hca_flush_mr_cache( + PARENT_STRUCT( p_item, struct ibsp_hca, item ), lpvAddress, Size ); + } + cl_spinlock_release( &g_ibsp.hca_mutex ); + + IBSP_EXIT( IBSP_DBG_MEM ); + return 0; +} diff --git a/branches/Ndi/ulp/wsd/user/ib_cm.c b/branches/Ndi/ulp/wsd/user/ib_cm.c new file mode 100644 index 00000000..118234ae --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ib_cm.c @@ -0,0 +1,981 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ +#include "ibspdebug.h" +#if defined(EVENT_TRACING) +#include "ib_cm.tmh" +#endif + +#include "ibspdll.h" + +static void AL_API cm_req_callback(IN ib_cm_req_rec_t * p_cm_req_rec); +static void AL_API cm_rep_callback(IN ib_cm_rep_rec_t * p_cm_rep_rec); +static void AL_API cm_rtu_callback(IN ib_cm_rtu_rec_t * p_cm_rtu_rec); +static void AL_API cm_rej_callback(IN ib_cm_rej_rec_t * p_cm_rej_rec); +static void AL_API cm_mra_callback(IN ib_cm_mra_rec_t * p_cm_mra_rec); +static void AL_API cm_dreq_callback(IN ib_cm_dreq_rec_t * p_cm_dreq_rec); +static void AL_API listen_err_callback(IN ib_listen_err_rec_t * p_listen_err_rec); +static void AL_API cm_apr_callback(IN ib_cm_apr_rec_t * p_cm_apr_rec); + + +/* Computes a service ID for a port. */ +static inline ib_net64_t +get_service_id_for_port( + ib_net16_t ip_port) +{ + return BASE_LISTEN_ID | ip_port; +} + + +/* Signals a select event to the switch. */ +void +ibsp_post_select_event( + struct ibsp_socket_info *socket_info, + int event, + int error ) +{ + HANDLE h_event; + + IBSP_ENTER( IBSP_DBG_NEV ); + + CL_ASSERT( socket_info ); + CL_ASSERT( event ); + + switch( event ) + { + case FD_CONNECT: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_NEV, + ("socket %p FD_CONNECT\n", socket_info) ); + socket_info->errno_connect = error; + break; + + case FD_ACCEPT: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_NEV, + ("socket %p FD_ACCEPT\n", socket_info) ); + break; + + default: + CL_ASSERT( 0 ); + break; + } + + _InterlockedOr( &socket_info->network_events, event ); + + h_event = InterlockedCompareExchangePointer( + &socket_info->event_select, NULL, NULL ); + /* Check for event notification request and signal as needed. */ + if( (socket_info->event_mask & event) && h_event ) + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_NEV, + ("Signaling eventHandle %p at time %I64d.\n", + h_event, cl_get_time_stamp() ) ); + SetEvent( h_event ); + } + + IBSP_EXIT( IBSP_DBG_NEV ); +} + + +/* + * A user-specified callback that is invoked after receiving a connection + * request message (REQ). + */ +static void AL_API +cm_req_callback( + IN ib_cm_req_rec_t *p_cm_req_rec ) +{ + struct ibsp_socket_info *socket_info = + (struct ibsp_socket_info * __ptr64)p_cm_req_rec->context; + struct listen_incoming *incoming; + + IBSP_ENTER( IBSP_DBG_CM ); + + CL_ASSERT( socket_info ); + CL_ASSERT( p_cm_req_rec->p_req_pdata ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + + switch( socket_info->socket_state ) + { + case IBSP_LISTEN: + if( cl_qlist_count( &socket_info->listen.list ) >= + socket_info->listen.backlog ) + { + /* Already too many connection requests are queued */ + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CM, + ("already too many incoming connections, rejecting\n") ); + ib_reject( p_cm_req_rec->h_cm_req, IB_REJ_USER_DEFINED ); + break; + } + + incoming = HeapAlloc( g_ibsp.heap, 0, sizeof(struct listen_incoming) ); + if( !incoming ) + { + /* Low on memory. */ + IBSP_ERROR( ("HeapAlloc failed, rejecting\n") ); + ib_reject( p_cm_req_rec->h_cm_req, IB_REJ_INSUF_RESOURCES ); + IBSP_EXIT( IBSP_DBG_CM ); + return; + } + + incoming->cm_req_received = *p_cm_req_rec; + cl_memcpy( &incoming->params, p_cm_req_rec->p_req_pdata, + sizeof(struct cm_req_params) ); + incoming->cm_req_received.p_req_pdata = (const uint8_t*)&incoming->params; + + /* Add to the waiting list */ + cl_qlist_insert_tail( &socket_info->listen.list, &incoming->item ); + + ibsp_post_select_event( socket_info, FD_ACCEPT, 0 ); + break; + + case IBSP_DUPLICATING_REMOTE: + { + int ret; + + /* Non-blocking cancel since we're in CM callback context */ + ib_cm_cancel( socket_info->listen.handle, NULL ); + socket_info->listen.handle = NULL; + cl_spinlock_release( &socket_info->mutex1 ); + + wait_cq_drain( socket_info ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + ret = ib_accept( socket_info, p_cm_req_rec ); + if( ret ) + { + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR( ( + "ib_accept for duplicate socket returned %d, rejecting\n", + ret) ); + /* Call ib_destroy_socket for above ib_create_socket() call */ + ib_destroy_socket( socket_info ); + ib_reject( p_cm_req_rec->h_cm_req, IB_REJ_USER_DEFINED ); + ibsp_dup_overlap_abort( socket_info ); + IBSP_EXIT( IBSP_DBG_CM ); + return; + } + } + break; + + default: + IBSP_ERROR( ("socket is not listening anymore\n") ); + /* We're closing down - let some other listen match. */ + ib_reject( p_cm_req_rec->h_cm_req, IB_REJ_INVALID_SID ); + break; + } + + cl_spinlock_release( &socket_info->mutex1 ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* + * A user-specified callback that is invoked after receiving a connection + * request reply message (REP). + */ +static void AL_API +cm_rep_callback( + IN ib_cm_rep_rec_t *p_cm_rep_rec ) +{ + struct ibsp_socket_info *socket_info = + (struct ibsp_socket_info * __ptr64)p_cm_rep_rec->qp_context; + ib_cm_rtu_t cm_rtu; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_CM ); + + memset( &cm_rtu, 0, sizeof(cm_rtu) ); + + cm_rtu.access_ctrl = IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE; +#if 0 + // Bug in TAVOR + cm_rtu.sq_depth = QP_ATTRIB_SQ_DEPTH; + cm_rtu.rq_depth = QP_ATTRIB_RQ_DEPTH; +#endif + cm_rtu.pfn_cm_apr_cb = cm_apr_callback; + cm_rtu.pfn_cm_dreq_cb = cm_dreq_callback; + + cl_spinlock_acquire( &socket_info->mutex1 ); + + switch( socket_info->socket_state ) + { + case IBSP_CONNECT: + status = ib_cm_rtu( p_cm_rep_rec->h_cm_rep, &cm_rtu ); + if( status != IB_SUCCESS ) + { + /* Note: a REJ has been automatically sent. */ + IBSP_ERROR( ("ib_cm_rtu returned %s\n", ib_get_err_str( status )) ); + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND ); + + /* We changed the state - remove from connection map. */ + ibsp_conn_remove( socket_info ); + + ibsp_post_select_event( socket_info, FD_CONNECT, WSAETIMEDOUT ); + } + else + { + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CONNECTED ); + ibsp_post_select_event( socket_info, FD_CONNECT, 0 ); + } + break; + + case IBSP_DUPLICATING_NEW: + status = ib_cm_rtu( p_cm_rep_rec->h_cm_rep, &cm_rtu ); + if( status != IB_SUCCESS ) + { + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND ); + + /* We changed the state - remove from connection map. */ + ibsp_conn_remove( socket_info ); + + /* Note: a REJ has been automatically sent. */ + IBSP_ERROR( ("ib_cm_rtu returned %s\n", ib_get_err_str( status )) ); + } + else + { + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CONNECTED ); + } + SetEvent( socket_info->h_event ); + break; + + default: + /* The socket might be closing */ + IBSP_ERROR( ("socket %p not in connecting state (%s)\n", + socket_info, IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + + ib_reject( p_cm_rep_rec->h_cm_rep, IB_REJ_USER_DEFINED ); + } + + cl_spinlock_release( &socket_info->mutex1 ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* + * A user-specified callback that is invoked after receiving a connection + * ready to use message (RTU). + */ +static void AL_API +cm_rtu_callback( + IN ib_cm_rtu_rec_t *p_cm_rtu_rec ) +{ + struct ibsp_socket_info *socket_info = + (struct ibsp_socket_info * __ptr64)p_cm_rtu_rec->qp_context; + + IBSP_ENTER( IBSP_DBG_CM ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + + if( socket_info->socket_state == IBSP_DUPLICATING_REMOTE ) + { + struct _recv_wr *wr; + ib_api_status_t status; + uint8_t idx; + + /* Repost all the WR to the new QP */ + cl_spinlock_acquire( &socket_info->recv_lock ); + + while( socket_info->dup_cnt ) + { + if( (socket_info->recv_cnt + socket_info->dup_cnt) > + QP_ATTRIB_RQ_DEPTH ) + { + CL_ASSERT( (socket_info->recv_cnt + socket_info->dup_cnt) <= + QP_ATTRIB_RQ_DEPTH ); + /* TODO: Flag the socket as having failed. */ + break; + } + + + /* Figure out the starting index in the duplicate array. */ + idx = socket_info->dup_idx - (uint8_t)socket_info->dup_cnt; + if( idx >= QP_ATTRIB_RQ_DEPTH ) + { + /* The duplicates wrap over the end of the array. */ + idx += QP_ATTRIB_RQ_DEPTH; + } + + /* + * Copy the duplicate work request from the duplicate array + * to the receive array. + */ + socket_info->recv_wr[socket_info->recv_idx] = + socket_info->dup_wr[idx]; + + wr = &socket_info->recv_wr[socket_info->recv_idx]; + + /* Update the work request ID. */ + wr->recv.wr_id = (uint64_t)(void* __ptr64)wr; + + /* + * Increment the count before posting so it doesn't go + * negative in the completion path. + */ + cl_atomic_inc( &socket_info->recv_cnt ); + + status = ib_post_recv( socket_info->qp, &wr->recv, NULL ); + + if( status == IB_SUCCESS ) + { + /* Update the index and wrap as needed */ +#if QP_ATTRIB_RQ_DEPTH == 256 || QP_ATTRIB_RQ_DEPTH == 128 || \ + QP_ATTRIB_RQ_DEPTH == 64 || QP_ATTRIB_RQ_DEPTH == 32 || \ + QP_ATTRIB_RQ_DEPTH == 16 || QP_ATTRIB_RQ_DEPTH == 8 + socket_info->recv_idx++; + socket_info->recv_idx &= (QP_ATTRIB_RQ_DEPTH - 1); +#else + if( ++socket_info->recv_idx == QP_ATTRIB_RQ_DEPTH ) + socket_info->recv_idx = 0; +#endif + + cl_atomic_dec( &socket_info->dup_cnt ); + } + else + { + IBSP_ERROR( ( + "ib_post_recv returned %s for reposted buffer\n", + ib_get_err_str( status )) ); + + cl_atomic_dec( &socket_info->recv_cnt ); + CL_ASSERT( status == IB_SUCCESS ); + /* TODO: Flag the socket as having failed. */ + break; + } + } + + cl_spinlock_release( &socket_info->recv_lock ); + + socket_info->qp_error = 0; + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CONNECTED ); + } + else if( socket_info->socket_state != IBSP_CONNECTED ) + { + /* The Socket might be closing */ + IBSP_ERROR( ("Got RTU while in socket_state %s - ignoring\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + } + + cl_spinlock_release( &socket_info->mutex1 ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* Force the QP to error state to flush posted work requests. */ +static inline void +__flush_qp( + IN struct ibsp_socket_info *p_socket ) +{ + ib_qp_mod_t qp_mod; + ib_api_status_t status; + + memset( &qp_mod, 0, sizeof(qp_mod) ); + qp_mod.req_state = IB_QPS_ERROR; + status = ib_modify_qp( p_socket->qp, &qp_mod ); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("ib_modify_qp returned %s\n", ib_get_err_str( status )) ); + p_socket->send_cnt = 0; + p_socket->recv_cnt = 0; + } +} + + +/* + * A user-specified callback that is invoked after receiving a connection + * rejection message (REJ). + */ +static void AL_API +cm_rej_callback( + IN ib_cm_rej_rec_t *p_cm_rej_rec ) +{ + struct ibsp_socket_info *socket_info = + (struct ibsp_socket_info * __ptr64)p_cm_rej_rec->qp_context; + + IBSP_ENTER( IBSP_DBG_CM ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CM, ("socket %p connect reject, reason=%d\n", + socket_info, cl_ntoh16(p_cm_rej_rec->rej_status)) ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + + switch( socket_info->socket_state ) + { + case IBSP_CONNECT: + /* Remove from connection map. */ + ibsp_conn_remove( socket_info ); + + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND ); + if( p_cm_rej_rec->rej_status == IB_REJ_TIMEOUT ) + ibsp_post_select_event( socket_info, FD_CONNECT, WSAETIMEDOUT ); + else + ibsp_post_select_event( socket_info, FD_CONNECT, WSAECONNREFUSED ); + break; + + case IBSP_CONNECTED: + /* + * DISCONNECTED is a terminal state. We'll remove the connection + * when the socket gets destroyed. + */ + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_DISCONNECTED ); + + socket_info->qp_error = WSAECONNABORTED; + + __flush_qp( socket_info ); + break; + + case IBSP_DUPLICATING_NEW: + /* Leave in that state. IBSPSocket will eventually return + * an error becaus the socket is not connected. */ + ibsp_conn_remove( socket_info ); + SetEvent( socket_info->h_event ); + break; + + default: + IBSP_ERROR( ("socket %p got an REJ reason %d in state %s\n", + socket_info, cl_ntoh16( p_cm_rej_rec->rej_status ), + IBSP_SOCKET_STATE_STR(socket_info->socket_state)) ); + break; + } + + cl_spinlock_release( &socket_info->mutex1 ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* + * A user-specified callback that is invoked after receiving a message + * received acknowledgement. + */ +static void AL_API +cm_mra_callback( + IN ib_cm_mra_rec_t *p_cm_mra_rec ) +{ + /* TODO */ + IBSP_ENTER( IBSP_DBG_CM ); + + UNUSED_PARAM( p_cm_mra_rec ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* + * A user-specified callback that is invoked after receiving a disconnect + * request message (DREQ). + */ +static void AL_API +cm_dreq_callback( + IN ib_cm_dreq_rec_t *p_cm_dreq_rec ) +{ + ib_api_status_t status; + ib_cm_drep_t cm_drep; + struct disconnect_reason *reason; + struct ibsp_socket_info *socket_info = + (struct ibsp_socket_info * __ptr64)p_cm_dreq_rec->qp_context; + + IBSP_ENTER( IBSP_DBG_CM ); + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CM, + ("socket=%p state=%s\n", + socket_info, IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + + reason = (struct disconnect_reason * __ptr64)p_cm_dreq_rec->p_dreq_pdata; + + cl_spinlock_acquire( &socket_info->mutex1 ); + + if( socket_info->socket_state == IBSP_CONNECTED ) + { + switch( reason->type ) + { + case DISC_DUPLICATING: + { + int ret; + + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_DUPLICATING_REMOTE ); + socket_info->qp_error = -1; + socket_info->duplicate.identifier = reason->duplicating.identifier; + socket_info->duplicate.dwProcessId = reason->duplicating.dwProcessId; + + /* Now, setup our listening callback. */ + socket_info->listen.listen_req_param.dwProcessId = + reason->duplicating.dwProcessId; + socket_info->listen.listen_req_param.identifier = + reason->duplicating.identifier; + + ret = ib_listen( socket_info ); + if( !ret ) + { + /* We changed the state - remove from connection map. */ + ibsp_conn_remove( socket_info ); + break; + } + + IBSP_ERROR_EXIT( ("ib_listen failed with %d\n", ret) ); + /* Fall through. */ + } + default: + /* Right now, treat anything as a normal disconnect. */ + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_DISCONNECTED ); + /* + * DISCONNECTED is a terminal state. We'll remove the connection + * when the socket gets destroyed. + */ + socket_info->qp_error = WSAECONNRESET; + } + + memset( &cm_drep, 0, sizeof(cm_drep) ); + + status = ib_cm_drep( p_cm_dreq_rec->h_cm_dreq, &cm_drep ); + if( status != IB_SUCCESS ) + IBSP_ERROR( ("ib_cm_drep returned %s\n", ib_get_err_str( status )) ); + } + cl_spinlock_release( &socket_info->mutex1 ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* + * A user-specified callback that is invoked after receiving a disconnect + * reply message. + */ +static void AL_API +cm_drep_callback( + IN ib_cm_drep_rec_t *p_cm_drep_rec ) +{ + IBSP_ENTER( IBSP_DBG_CM ); + UNUSED_PARAM( p_cm_drep_rec ); + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* + * A user-specified callback that is invoked after an error has occurred on + * a listen request. + */ +static void AL_API +listen_err_callback( + IN ib_listen_err_rec_t *p_listen_err_rec ) +{ + /* TODO */ + IBSP_ENTER( IBSP_DBG_CM ); + + UNUSED_PARAM( p_listen_err_rec ); + + IBSP_ERROR( ("not implemented") ); + + CL_ASSERT( 0 ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* + * A user-specified callback that is invoked after receiving a load + * alternate path response message. + */ +static void AL_API +cm_apr_callback( + IN ib_cm_apr_rec_t *p_cm_apr_rec ) +{ + /* TODO */ + IBSP_ENTER( IBSP_DBG_CM ); + + UNUSED_PARAM( p_cm_apr_rec ); + + IBSP_ERROR( ("not implemented") ); + + CL_ASSERT( 0 ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* + * A user-specified callback that is invoked after receiving a load + * alternate path message. + * + * SYNOPSIS + */ +static void AL_API +cm_lap_callback( + IN ib_cm_lap_rec_t *p_cm_lap_rec ) +{ + /* TODO */ + IBSP_ENTER( IBSP_DBG_CM ); + + UNUSED_PARAM( p_cm_lap_rec ); + + IBSP_ERROR( ("not implemented") ); + + CL_ASSERT( 0 ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +/* Listen for an incoming connection. */ +int +ib_listen( + IN struct ibsp_socket_info *socket_info ) +{ + ib_cm_listen_t param; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_CM ); + + memset( ¶m, 0, sizeof(param) ); + + param.svc_id = get_service_id_for_port( socket_info->local_addr.sin_port ); + if( socket_info->port ) + { + /* The socket is bound to an IP address */ + param.ca_guid = socket_info->port->hca->guid; + param.port_guid = socket_info->port->guid; + } + else + { + /* The socket is bound to INADDR_ANY */ + param.ca_guid = IB_ALL_CAS; + param.port_guid = IB_ALL_PORTS; + } + param.lid = IB_ALL_LIDS; + + param.p_compare_buffer = (uint8_t *) & socket_info->listen.listen_req_param; + param.compare_length = sizeof(struct listen_req_param); + param.compare_offset = offsetof(struct cm_req_params, listen_req_param); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p params: %x %x\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), socket_info, + socket_info->listen.listen_req_param.dwProcessId, + socket_info->listen.listen_req_param.identifier)); + + param.pfn_cm_req_cb = cm_req_callback; + + param.qp_type = IB_QPT_RELIABLE_CONN; + + status = ib_cm_listen( g_ibsp.al_handle, ¶m, listen_err_callback, socket_info, /* context */ + &socket_info->listen.handle ); + + if( status != IB_SUCCESS ) + { + IBSP_ERROR_EXIT( ("ib_cm_listen failed (0x%d)\n", status) ); + return ibal_to_wsa_error( status ); + } + + STAT_INC( listen_num ); + + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CM, + ("started listening for port %d\n", + CL_HTON16( socket_info->local_addr.sin_port )) ); + + return 0; +} + + +/* Reject all the queued incoming connection requests. */ +void +ib_listen_backlog( + IN struct ibsp_socket_info *socket_info, + IN int backlog ) +{ + cl_list_item_t *item; + struct listen_incoming *incoming; + + socket_info->listen.backlog = backlog; + + while( + cl_qlist_count( &socket_info->listen.list ) > (uint32_t)backlog ) + { + item = cl_qlist_remove_tail( &socket_info->listen.list ); + + incoming = PARENT_STRUCT(item, struct listen_incoming, item); + + ib_reject( incoming->cm_req_received.h_cm_req, IB_REJ_USER_DEFINED ); + + HeapFree( g_ibsp.heap, 0, incoming ); + } +} + + +/* Stop listening on the socket. */ +void +ib_listen_cancel( + IN struct ibsp_socket_info *socket_info ) +{ + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_CM ); + + status = ib_cm_cancel( socket_info->listen.handle, ib_sync_destroy ); + if( status ) + { + IBSP_ERROR( ( + "ib_cm_cancel returned %s\n", ib_get_err_str( status )) ); + } + else + { + STAT_DEC( listen_num ); + } + + /* We can empty the queue now. Since we are closing, + * no new entry will be added. */ + cl_spinlock_acquire( &socket_info->mutex1 ); + ib_listen_backlog( socket_info, 0 ); + cl_spinlock_release( &socket_info->mutex1 ); + + socket_info->listen.handle = NULL; + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +int +ib_connect( + IN struct ibsp_socket_info *socket_info, + IN ib_path_rec_t *path_rec ) +{ + ib_cm_req_t cm_req; + ib_api_status_t status; + struct cm_req_params params; + + IBSP_ENTER( IBSP_DBG_CM ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info)); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CM, ("From:\n") ); + DebugPrintSockAddr( IBSP_DBG_CM, &socket_info->local_addr ); + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CM, ("To:\n") ); + DebugPrintSockAddr( IBSP_DBG_CM, &socket_info->peer_addr ); + + /* Insert into the connection map. */ + if( !ibsp_conn_insert( socket_info ) ) + { + IBSP_EXIT( IBSP_DBG_CM ); + return WSAEADDRINUSE; + } + + memset( &cm_req, 0, sizeof(cm_req) ); + + cm_req.svc_id = get_service_id_for_port( socket_info->peer_addr.sin_port ); + cm_req.max_cm_retries = g_max_cm_retries; + cm_req.p_primary_path = path_rec; + cm_req.pfn_cm_rep_cb = cm_rep_callback; + + cm_req.p_req_pdata = (uint8_t *) & params; + params.source = socket_info->local_addr; + params.dest = socket_info->peer_addr; + params.listen_req_param.dwProcessId = socket_info->duplicate.dwProcessId; + params.listen_req_param.identifier = socket_info->duplicate.identifier; + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CM, + ("ib_connect listen params: %x \n", params.listen_req_param.dwProcessId + /*params.listen_req_param.identifier*/)); + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CM, + ("connecting to port %d, SID=%016I64x\n", socket_info->peer_addr.sin_port, + cm_req.svc_id) ); + + cm_req.req_length = sizeof(struct cm_req_params); + + cm_req.qp_type = IB_QPT_RELIABLE_CONN; + cm_req.h_qp = socket_info->qp; + cm_req.resp_res = QP_ATTRIB_RESPONDER_RESOURCES; + cm_req.init_depth = QP_ATTRIB_INITIATOR_DEPTH; + + cm_req.remote_resp_timeout = + ib_path_rec_pkt_life( path_rec ) + CM_REMOTE_TIMEOUT; + if( cm_req.remote_resp_timeout > 0x1F ) + cm_req.remote_resp_timeout = 0x1F; + else if( cm_req.remote_resp_timeout < CM_MIN_REMOTE_TIMEOUT ) + cm_req.remote_resp_timeout = CM_MIN_REMOTE_TIMEOUT; + + cm_req.flow_ctrl = TRUE; /* HCAs must support end-to-end flow control. */ + + cm_req.local_resp_timeout = + ib_path_rec_pkt_life( path_rec ) + CM_LOCAL_TIMEOUT; + if( cm_req.local_resp_timeout > 0x1F ) + cm_req.local_resp_timeout = 0x1F; + else if( cm_req.local_resp_timeout < CM_MIN_LOCAL_TIMEOUT ) + cm_req.local_resp_timeout = CM_MIN_LOCAL_TIMEOUT; + + cm_req.rnr_nak_timeout = QP_ATTRIB_RNR_NAK_TIMEOUT; + cm_req.rnr_retry_cnt = QP_ATTRIB_RNR_RETRY; + cm_req.retry_cnt = g_qp_retries; + cm_req.p_alt_path = NULL; + cm_req.pfn_cm_mra_cb = cm_mra_callback; + cm_req.pfn_cm_rej_cb = cm_rej_callback; + + status = ib_cm_req( &cm_req ); + if( status != IB_SUCCESS ) + { + /* Remove from connection map. */ + ibsp_conn_remove( socket_info ); + + IBSP_ERROR_EXIT( ("ib_cm_req failed (0x%d)\n", status) ); + return WSAEHOSTUNREACH; + } + + IBSP_EXIT( IBSP_DBG_CM ); + /* Operation is pending */ + return WSAEWOULDBLOCK; +} + + +void +ib_reject( + IN const ib_cm_handle_t h_cm, + IN const ib_rej_status_t rej_status ) +{ + ib_cm_rej_t cm_rej; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_CM ); + + memset( &cm_rej, 0, sizeof(cm_rej) ); + cm_rej.rej_status = rej_status; + + status = ib_cm_rej( h_cm, &cm_rej ); + if( status != IB_SUCCESS ) + IBSP_ERROR( ("ib_cm_rej returned %s\n", ib_get_err_str( status )) ); + + IBSP_EXIT( IBSP_DBG_CM ); +} + + +int +ib_accept( + IN struct ibsp_socket_info *socket_info, + IN ib_cm_req_rec_t *cm_req_received ) +{ + ib_cm_rep_t cm_rep; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_CM ); + + /* Insert into the connection map. */ + if( !ibsp_conn_insert( socket_info ) ) + { + IBSP_EXIT( IBSP_DBG_CM ); + return WSAEADDRINUSE; + } + + memset( &cm_rep, 0, sizeof(cm_rep) ); + + cm_rep.qp_type = IB_QPT_RELIABLE_CONN; + cm_rep.h_qp = socket_info->qp; + cm_rep.access_ctrl = IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE; +#if 0 + // Bug in TAVOR + cm_rep.sq_depth = QP_ATTRIB_SQ_DEPTH; + cm_rep.rq_depth = QP_ATTRIB_RQ_DEPTH; +#endif + cm_rep.init_depth = QP_ATTRIB_INITIATOR_DEPTH; + cm_rep.target_ack_delay = 10; + cm_rep.failover_accepted = IB_FAILOVER_ACCEPT_UNSUPPORTED; + cm_rep.flow_ctrl = cm_req_received->flow_ctrl; + cm_rep.rnr_nak_timeout = QP_ATTRIB_RNR_NAK_TIMEOUT; + cm_rep.rnr_retry_cnt = cm_req_received->rnr_retry_cnt; + cm_rep.pfn_cm_mra_cb = cm_mra_callback; + cm_rep.pfn_cm_rej_cb = cm_rej_callback; + cm_rep.pfn_cm_rtu_cb = cm_rtu_callback; + cm_rep.pfn_cm_lap_cb = cm_lap_callback; + cm_rep.pfn_cm_dreq_cb = cm_dreq_callback; + + fzprint(("%s():%d:0x%x:0x%x: flow_ctrl=%d rnr_retry_cnt=%d\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), cm_rep.flow_ctrl, cm_rep.rnr_retry_cnt)); + + status = ib_cm_rep( cm_req_received->h_cm_req, &cm_rep ); + if( status != IB_SUCCESS ) + { + /* Remove from connection map. */ + ibsp_conn_remove( socket_info ); + + IBSP_ERROR_EXIT( ("ib_cm_rep failed (0x%s) at time %I64d\n", + ib_get_err_str( status ), cl_get_time_stamp()) ); + return WSAEACCES; + } + + IBSP_EXIT( IBSP_DBG_CM ); + return 0; +} + + +void +ib_disconnect( + IN struct ibsp_socket_info *socket_info, + IN struct disconnect_reason *reason ) +{ + ib_api_status_t status; + ib_cm_dreq_t cm_dreq; + + IBSP_ENTER( IBSP_DBG_CM ); + + memset( &cm_dreq, 0, sizeof(cm_dreq) ); + + cm_dreq.qp_type = IB_QPT_RELIABLE_CONN; + cm_dreq.h_qp = socket_info->qp; + cm_dreq.pfn_cm_drep_cb = cm_drep_callback; + + cm_dreq.p_dreq_pdata = (uint8_t *) reason; + cm_dreq.dreq_length = sizeof(struct disconnect_reason); + + status = ib_cm_dreq( &cm_dreq ); + + /* + * If both sides initiate disconnection, we might get + * an invalid state or handle here. + */ + if( status != IB_SUCCESS && status != IB_INVALID_STATE && + status != IB_INVALID_HANDLE ) + { + IBSP_ERROR( ("ib_cm_dreq returned %s\n", ib_get_err_str( status )) ); + } + + /* + * Note that we don't care about getting the DREP - we move the QP to the + * error state now and flush all posted work requests. If the + * disconnection was graceful, we'll only have the pre-posted receives to + * flush. If the disconnection is ungraceful, we don't care if we + * interrupt transfers. + */ + + /* Move the QP to error to flush any work requests. */ + __flush_qp( socket_info ); + + IBSP_EXIT( IBSP_DBG_CM ); +} diff --git a/branches/Ndi/ulp/wsd/user/ibsp_duplicate.c b/branches/Ndi/ulp/wsd/user/ibsp_duplicate.c new file mode 100644 index 00000000..8cac6bac --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibsp_duplicate.c @@ -0,0 +1,322 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ibspdebug.h" +#if defined(EVENT_TRACING) + +#include "ibsp_duplicate.tmh" +#endif + +#include "ibspdll.h" +#include "rpc.h" + +/* +http://msdn.microsoft.com/library/default.asp?url=/library/en-us/dllproc/base/using_shared_memory_in_a_dynamic_link_library.asp +*/ + + +static void +create_name( + OUT char *fname, + IN const DWORD dwProcessId, + IN const GUID *p_guid ) +{ + sprintf( fname, "Global\\OpenIB-WSD-%08lx-" + "%08lx-%04hx-%04hx-%02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x", + dwProcessId, p_guid->Data1, p_guid->Data2, p_guid->Data3, + (int)p_guid->Data4[0], (int)p_guid->Data4[1], + (int)p_guid->Data4[2], (int)p_guid->Data4[3], + (int)p_guid->Data4[4], (int)p_guid->Data4[5], + (int)p_guid->Data4[6], (int)p_guid->Data4[7] ); +} + + +/* Create a duplicated socket. param is given by the other process through the + * lpProtocolInfo->dwProviderReserved field. + * This function is called by the next-controlling process. */ +int +setup_duplicate_socket( + IN struct ibsp_socket_info *socket_info, + IN HANDLE h_dup_info ) +{ + int ret, err; + struct ibsp_duplicate_info *dup_info; + ib_net64_t dest_port_guid; + ib_path_rec_t path_rec; + + IBSP_ENTER( IBSP_DBG_DUP ); + + CL_ASSERT( socket_info->socket_state == IBSP_CREATE ); + + /* Get a pointer to the file-mapped shared memory. */ + dup_info = MapViewOfFile( h_dup_info, FILE_MAP_READ, 0, 0, 0 ); + if( dup_info == NULL ) + { + IBSP_ERROR( ("MapViewOfFile failed with %d\n", GetLastError()) ); + ret = WSAENETDOWN; + goto err1; + } + + socket_info->peer_addr = dup_info->peer_addr; + socket_info->local_addr = dup_info->local_addr; + socket_info->socket_options = dup_info->socket_options; + socket_info->duplicate.dwProcessId = dup_info->dwProcessId; + socket_info->duplicate.identifier = dup_info->identifier; + + socket_info->port = get_port_from_ip_address( dup_info->local_addr.sin_addr ); + if( socket_info->port == NULL ) + { + IBSP_ERROR( ("incoming destination IP address not local (%s)\n", + inet_ntoa( dup_info->local_addr.sin_addr )) ); + ret = WSAENETDOWN; + goto err1; + } + + /* Get the GUID for the remote IP address. */ + ret = query_guid_address( socket_info->port, + socket_info->peer_addr.sin_addr.S_un.S_addr, &dest_port_guid ); + if( ret ) + { + IBSP_ERROR( ("query_guid_address failed for IP %08x\n", + socket_info->peer_addr.sin_addr.s_addr) ); + ret = WSAENETDOWN; + goto err1; + } + + /* Get the path record */ + ret = query_pr( socket_info->port, dest_port_guid, &path_rec ); + if( ret ) + { + IBSP_ERROR( ("query_pr failed for IP %08x\n", + socket_info->peer_addr.sin_addr.s_addr) ); + ret = WSAENETDOWN; + goto err1; + } + + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_DUPLICATING_NEW ); + socket_info->h_event = CreateEvent( NULL, FALSE, FALSE, NULL ); + if( !socket_info->h_event ) + { + IBSP_ERROR( ("CreateEvent failed (%d)\n", GetLastError()) ); + goto err1; + } + + ret = ib_create_socket( socket_info ); + if( ret ) + { + IBSP_ERROR( ("ib_create socket failed with %d\n", ret) ); + goto err1; + } + + /* Connects the QP. */ + ret = ib_connect( socket_info, &path_rec ); + if( ret != WSAEWOULDBLOCK ) + { + IBSP_ERROR( ("ib_connect failed (%d)\n", ret) ); + goto err2; + } + + if( WaitForSingleObject( socket_info->h_event, INFINITE ) != WAIT_OBJECT_0 ) + IBSP_ERROR( ("WaitForSingleObject failed\n") ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + if( socket_info->socket_state != IBSP_CONNECTED ) + { + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR( ("Failed to connect\n") ); + ret = WSAENETDOWN; +err2: + g_ibsp.up_call_table.lpWPUCloseSocketHandle( + socket_info->switch_socket, &err ); + socket_info->switch_socket = INVALID_SOCKET; + STAT_DEC( wpusocket_num ); + + ib_destroy_socket( socket_info ); + } + else + { + ret = 0; + cl_spinlock_release( &socket_info->mutex1 ); + } + +err1: + if( socket_info->h_event ) + { + CloseHandle( socket_info->h_event ); + socket_info->h_event = NULL; + } + + CloseHandle( h_dup_info ); + + IBSP_EXIT( IBSP_DBG_DUP ); + return ret; +} + + +/* Function: IBSPDuplicateSocket + + Description: + This function provides a WSAPROTOCOL_INFOW structure which can be passed + to another process to open a handle to the same socket. First we need + to translate the user socket into the provider socket and call the underlying + WSPDuplicateSocket. Note that the lpProtocolInfo structure passed into us + is an out parameter only! +*/ +int WSPAPI +IBSPDuplicateSocket( + SOCKET s, + DWORD dwProcessId, + LPWSAPROTOCOL_INFOW lpProtocolInfo, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + struct ibsp_duplicate_info *dup_info = NULL; + char fname[100]; + GUID guid; + HANDLE h_dup_info, h_target_process, h_target_dup_info; + struct disconnect_reason reason; + + IBSP_ENTER( IBSP_DBG_DUP ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_DUP, + ("Duplicating socket=0x%p to dwProcessId=0x%x \n", + socket_info, dwProcessId) ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + if( socket_info->socket_state != IBSP_CONNECTED ) + { + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION, IBSP_DBG_DUP, + ("Socket state not IBSP_CONNECTED, state=%s.\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + *lpErrno = WSAENOTCONN; + return SOCKET_ERROR; + } + + /* Create a GUID to use as unique identifier for this duplication. */ + UuidCreate( &guid ); + create_name( fname, dwProcessId, &guid ); + + h_dup_info = CreateFileMapping( INVALID_HANDLE_VALUE, NULL, + PAGE_READWRITE, 0, sizeof(struct ibsp_duplicate_info), fname ); + if( !h_dup_info ) + { + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR_EXIT( ("CreateFileMapping for %s failed with %d\n", + fname, GetLastError()) ); + *lpErrno = WSAENETDOWN; + return SOCKET_ERROR; + } + + /* Get a pointer to the file-mapped shared memory. */ + dup_info = MapViewOfFile( h_dup_info, FILE_MAP_WRITE, 0, 0, 0 ); + if( !dup_info ) + { + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR_EXIT( ("MapViewOfFile failed with %d\n", GetLastError()) ); + CloseHandle( h_dup_info ); + *lpErrno = WSAENETDOWN; + return SOCKET_ERROR; + } + + /* + * Store addressing information so that the duplicating + * process can reconnect. + */ + dup_info->identifier = guid; + dup_info->socket_options = socket_info->socket_options; + dup_info->peer_addr = socket_info->peer_addr; + dup_info->local_addr = socket_info->local_addr; + dup_info->dwProcessId = dwProcessId; + + /* Release the reference on the underlying file */ + UnmapViewOfFile( dup_info ); + + /* Open the target process. */ + h_target_process = OpenProcess( PROCESS_DUP_HANDLE, FALSE, dwProcessId ); + if( !h_target_process ) + { + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR_EXIT( ("OpenProcess failed with %d\n", GetLastError()) ); + CloseHandle( h_dup_info ); + *lpErrno = WSAENETDOWN; + return SOCKET_ERROR; + } + + if( !DuplicateHandle( GetCurrentProcess(), h_dup_info, + h_target_process, &h_target_dup_info, 0, TRUE, + DUPLICATE_CLOSE_SOURCE | DUPLICATE_SAME_ACCESS ) ) + { + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR_EXIT( ("DuplicateHandle failed with %d\n", GetLastError()) ); + CloseHandle( h_target_process ); + *lpErrno = WSAENETDOWN; + return SOCKET_ERROR; + } + + CloseHandle( h_target_process ); + + CL_ASSERT( !((ULONG_PTR)h_target_dup_info >> 32) ); + lpProtocolInfo->dwProviderReserved = (DWORD)(ULONG_PTR)h_target_dup_info; + + socket_info->duplicate.identifier = guid; + + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_DUPLICATING_OLD ); + + memset( &reason, 0, sizeof(reason) ); + reason.type = DISC_DUPLICATING; + reason.duplicating.identifier = guid; + reason.duplicating.dwProcessId = dwProcessId; + + /* + * Flush all the receive buffers. There should be no + * send/rdma buffers left. + */ + ib_disconnect( socket_info, &reason ); + + /* We changed the state - remove from connection map. */ + ibsp_conn_remove( socket_info ); + + cl_spinlock_release( &socket_info->mutex1 ); + + wait_cq_drain( socket_info ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + ib_destroy_socket( socket_info ); + cl_spinlock_release( &socket_info->mutex1 ); + + /* And that's it */ + IBSP_EXIT( IBSP_DBG_DUP ); + *lpErrno = 0; + return 0; +} diff --git a/branches/Ndi/ulp/wsd/user/ibsp_iblow.c b/branches/Ndi/ulp/wsd/user/ibsp_iblow.c new file mode 100644 index 00000000..ff980ef6 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibsp_iblow.c @@ -0,0 +1,1319 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ +#include "ibspdebug.h" + +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ibsp_iblow.tmh" +#endif +#include +#include "ibspdll.h" + +#ifdef PERFMON_ENABLED +#include "ibsp_perfmon.h" +#endif + + +typedef struct _io_comp_info +{ + struct ibsp_socket_info *p_socket; + LPWSAOVERLAPPED p_ov; + +} io_comp_info_t; + + +/* Work queue entry completion routine. */ +static void +complete_wq( + IN const ib_wc_t *wc, + OUT io_comp_info_t *p_io_info ) +{ + struct _wr *wr = NULL; + struct _recv_wr *p_recv_wr = NULL; + LPWSAOVERLAPPED lpOverlapped = NULL; + struct ibsp_socket_info *socket_info = NULL; + + IBSP_ENTER( IBSP_DBG_IO ); + + wr = (struct _wr * __ptr64)wc->wr_id; + p_recv_wr = (struct _recv_wr * __ptr64)wc->wr_id; + + CL_ASSERT( wr ); + + socket_info = wr->socket_info; + p_io_info->p_socket = socket_info; + + lpOverlapped = wr->lpOverlapped; + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_IO, + ("socket %p, ov %p, work completion status=%s, wc_type=%s\n", + socket_info, lpOverlapped, ib_get_wc_status_str( wc->status ), + ib_get_wc_type_str( wc->wc_type )) ); + + /* Set the windows error code. It's not easy to find an easy + * correspondence between the IBAL error codes and windows error + * codes; but it probably does not matter, as long as it returns an + * error. */ + switch( wc->status ) + { + case IB_WCS_SUCCESS: + /* + * Set the length of the operation. Under Infiniband, the work + * completion length is only valid for a receive + * operation. Fortunately we had already set the length during the + * send operation. + * + * lpWPUCompleteOverlappedRequest is supposed to store the length + * into InternalHigh, however it will not be called if the low + * order bit of lpOverlapped->hEvent is set. So we do it and hope + * for the best. + * + * NOTE: Without a valid length, the switch doesn't seem to call + * GetOverlappedResult() even if we call lpWPUCompleteOverlappedRequest() + */ + switch ( wc->wc_type ) + { + case IB_WC_RECV: + CL_ASSERT(wc->length != 0); + lpOverlapped->InternalHigh = wc->length; +#ifdef IBSP_LOGGING + cl_spinlock_acquire( &socket_info->recv_lock ); + DataLogger_WriteData(&socket_info->RecvDataLogger, + p_recv_wr->idx, (void * __ptr64)p_recv_wr->ds_array[0].vaddr, + wc->length); + cl_spinlock_release( &socket_info->recv_lock ); +#endif +#ifdef PERFMON_ENABLED + InterlockedIncrement64( &g_pm_stat.pdata[COMP_RECV] ); + InterlockedExchangeAdd64( &g_pm_stat.pdata[BYTES_RECV], + lpOverlapped->InternalHigh ); +#endif +#ifdef _DEBUG_ + cl_atomic_inc(&g_ibsp.total_recv_compleated); +#endif + break; + + case IB_WC_RDMA_READ: + CL_ASSERT(wc->length != 0); + lpOverlapped->InternalHigh = wc->length; +#ifdef PERFMON_ENABLED + InterlockedIncrement64( &g_pm_stat.pdata[COMP_RECV] ); + InterlockedExchangeAdd64( &g_pm_stat.pdata[BYTES_READ], + lpOverlapped->InternalHigh ); +#endif /* PERFMON_ENABLED */ + break; + +#ifdef PERFMON_ENABLED + case IB_WC_SEND: + InterlockedIncrement64( &g_pm_stat.pdata[COMP_SEND] ); + InterlockedExchangeAdd64( &g_pm_stat.pdata[BYTES_SEND], + lpOverlapped->InternalHigh ); + break; + + case IB_WC_RDMA_WRITE: + InterlockedIncrement64( &g_pm_stat.pdata[COMP_SEND] ); + InterlockedExchangeAdd64( &g_pm_stat.pdata[BYTES_WRITE], + lpOverlapped->InternalHigh ); +#endif /* PERFMON_ENABLED */ + default: + break; + } + + + lpOverlapped->OffsetHigh = 0; + break; + + case IB_WCS_WR_FLUSHED_ERR: + cl_spinlock_acquire( &socket_info->mutex1 ); + + if( socket_info->socket_state == IBSP_DUPLICATING_REMOTE && + wc->wc_type == IB_WC_RECV ) + { + /* + * Take the wr off the wr_list, and place onto the + * dup_wr_list. We will post them later on the new QP. + */ + cl_spinlock_acquire( &socket_info->recv_lock ); + + /* Copy to the duplicate WR array. */ + socket_info->dup_wr[socket_info->dup_idx] = *p_recv_wr; + +#if QP_ATTRIB_RQ_DEPTH == 256 || QP_ATTRIB_RQ_DEPTH == 128 || \ + QP_ATTRIB_RQ_DEPTH == 64 || QP_ATTRIB_RQ_DEPTH == 32 || \ + QP_ATTRIB_RQ_DEPTH == 16 || QP_ATTRIB_RQ_DEPTH == 8 + socket_info->dup_idx++; + socket_info->dup_idx &= (QP_ATTRIB_RQ_DEPTH - 1); +#else + if( ++socket_info->dup_idx == QP_ATTRIB_RQ_DEPTH ) + socket_info->dup_idx = 0; +#endif + + cl_atomic_inc( &socket_info->dup_cnt ); + /* ib_cq_comp will decrement the receive count. */ + cl_atomic_dec( &socket_info->recv_cnt ); + + cl_spinlock_release( &socket_info->recv_lock ); + + cl_spinlock_release( &socket_info->mutex1 ); + p_io_info->p_ov = NULL; + IBSP_EXIT( IBSP_DBG_IO ); + return; + } + + /* Check for flushing the receive buffers on purpose. */ + if( socket_info->socket_state == IBSP_DUPLICATING_OLD ) + wr->lpOverlapped->OffsetHigh = 0; + else + wr->lpOverlapped->OffsetHigh = WSA_OPERATION_ABORTED; + + cl_spinlock_release( &socket_info->mutex1 ); + + /* Override the length, as per the WSD specs. */ + wr->lpOverlapped->InternalHigh = 0; + break; + + case IB_WCS_LOCAL_LEN_ERR: + case IB_WCS_LOCAL_OP_ERR: + case IB_WCS_LOCAL_PROTECTION_ERR: + case IB_WCS_MEM_WINDOW_BIND_ERR: + case IB_WCS_REM_ACCESS_ERR: + case IB_WCS_REM_OP_ERR: + case IB_WCS_RNR_RETRY_ERR: + case IB_WCS_TIMEOUT_RETRY_ERR: + case IB_WCS_REM_INVALID_REQ_ERR: + default: + { + char comp_name[MAX_COMPUTERNAME_LENGTH + 1] = {0}; + DWORD len = sizeof(comp_name); + GetComputerName( comp_name, &len ); + IBSP_ERROR( ("%s (%s:%d to ", + comp_name, inet_ntoa( socket_info->local_addr.sin_addr ), + socket_info->local_addr.sin_port) ); + IBSP_ERROR( ("%s:%d) %s error: %s (vendor specific %I64x)\n", + inet_ntoa( socket_info->peer_addr.sin_addr ), + socket_info->peer_addr.sin_port, + ib_get_wc_type_str( wc->wc_type ), + ib_get_wc_status_str( wc->status ), + wc->vendor_specific) ); + lpOverlapped->OffsetHigh = WSAECONNABORTED; + wr->lpOverlapped->InternalHigh = 0; + socket_info->qp_error = WSAECONNABORTED; + break; + } + } + +#ifdef PERFMON_ENABLED + InterlockedIncrement64( &g_pm_stat.pdata[COMP_TOTAL] ); +#endif + +#ifdef _DEBUG_ + if( wc->wc_type == IB_WC_RECV ) + { + // This code requires the recv count to be decremented here, but it needs + // to be decremented after any callbacks are invoked so socket destruction + // gets delayed until all callbacks have been invoked. + //{ + // uint8_t idx; + + // cl_spinlock_acquire( &socket_info->recv_lock ); + // idx = socket_info->recv_idx - (uint8_t)socket_info->recv_cnt; + // if( idx >= QP_ATTRIB_RQ_DEPTH ) + // idx += QP_ATTRIB_RQ_DEPTH; + + // CL_ASSERT( wc->wr_id == (uint64_t)(void* __ptr64)&socket_info->recv_wr[idx] ); + // cl_atomic_dec( &socket_info->recv_cnt ); + // cl_spinlock_release( &socket_info->recv_lock ); + //} + + if( wc->status == IB_SUCCESS && p_recv_wr->ds_array[0].length >= 40 ) + { + debug_dump_buffer( IBSP_DBG_WQ, "RECV", + (void * __ptr64)p_recv_wr->ds_array[0].vaddr, 40 ); + } + + cl_atomic_dec( &g_ibsp.recv_count ); + cl_atomic_inc( &socket_info->recv_comp ); + + memset( p_recv_wr, 0x33, sizeof(struct _recv_wr) ); + } + else + { + // This code requires the send count to be decremented here, but it needs + // to be decremented after any callbacks are invoked so socket destruction + // gets delayed until all callbacks have been invoked. + //{ + // uint8_t idx; + + // cl_spinlock_acquire( &socket_info->send_lock ); + // idx = socket_info->send_idx - (uint8_t)socket_info->send_cnt; + // if( idx >= QP_ATTRIB_SQ_DEPTH ) + // idx += QP_ATTRIB_SQ_DEPTH; + // CL_ASSERT( wc->wr_id == (uint64_t)(void* __ptr64)&socket_info->send_wr[idx] ); + // cl_atomic_dec( &socket_info->send_cnt ); + // cl_spinlock_release( &socket_info->send_lock ); + //} + + if( wc->wc_type == IB_WC_SEND ) + { + cl_atomic_dec( &g_ibsp.send_count ); + cl_atomic_inc( &socket_info->send_comp ); + + fzprint(("%s():%d:0x%x:0x%x: send_count=%d\n", + __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), g_ibsp.send_count)); + } + + memset( wr, 0x33, sizeof(struct _wr) ); + } +#endif + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_IO, + ("overlapped=%p, InternalHigh=%Id, hEvent=%p\n", + lpOverlapped, lpOverlapped->InternalHigh, + lpOverlapped->hEvent) ); + + + + /* Don't notify the switch for that completion only if: + * - the switch don't want a notification + * - the wq completed with success + * - the socket is still connected + */ + if( ((uintptr_t) lpOverlapped->hEvent) & 0x00000001 ) + { + /* Indicate this operation is complete. The switch will poll + * with calls to WSPGetOverlappedResult(). */ + +#ifdef _DEBUG_ + cl_atomic_dec( &g_ibsp.overlap_h1_comp_count ); + + fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), lpOverlapped, + g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count, + g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count)); +#endif + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_IO, + ("Not calling lpWPUCompleteOverlappedRequest: " + "socket=%p, ov=%p OffsetHigh=%d, InternalHigh=%Id hEvent=%p\n", + socket_info, lpOverlapped, lpOverlapped->OffsetHigh, + lpOverlapped->InternalHigh, lpOverlapped->hEvent) ); + + lpOverlapped->Internal = 0; + p_io_info->p_ov = NULL; + } + else + { +#ifdef _DEBUG_ + cl_atomic_dec( &g_ibsp.overlap_h0_count ); + + fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), lpOverlapped, + g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count, + g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count)); +#endif + + p_io_info->p_ov = lpOverlapped; + cl_atomic_inc( &socket_info->ref_cnt1 ); + } + + if( wc->wc_type == IB_WC_RECV ) + { + cl_atomic_dec( &socket_info->recv_cnt ); + } + else + { + cl_atomic_dec( &socket_info->send_cnt ); + } + + IBSP_EXIT( IBSP_DBG_IO ); +} + + +/* CQ completion handler. */ +int +ib_cq_comp( + void *cq_context ) +{ + struct cq_thread_info *cq_tinfo = cq_context; + ib_api_status_t status; + ib_wc_t wclist[WC_LIST_SIZE]; + ib_wc_t *free_wclist; + ib_wc_t *done_wclist; + io_comp_info_t info[WC_LIST_SIZE]; + int cb_idx; + int i; + int n_comp = 0; +#ifdef _DEBUG_ + int comp_count; +#endif + + IBSP_ENTER( IBSP_DBG_WQ ); + + CL_ASSERT( WC_LIST_SIZE >= 1 ); + + do + { + /* Try to retrieve up to WC_LIST_SIZE completions at a time. */ + for( i = 0; i < (WC_LIST_SIZE - 1); i++ ) + { + wclist[i].p_next = &wclist[i + 1]; + } + wclist[(WC_LIST_SIZE - 1)].p_next = NULL; + + free_wclist = &wclist[0]; + done_wclist = NULL; + + status = ib_poll_cq( cq_tinfo->cq, &free_wclist, &done_wclist ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_WQ, + ("poll CQ got status %d, free=%p, done=%p\n", + status, free_wclist, done_wclist) ); + + switch( status ) + { + case IB_NOT_FOUND: + case IB_SUCCESS: + break; + + case IB_INVALID_CQ_HANDLE: + /* This happens when the switch closes the socket while the + * execution thread was calling lpWPUCompleteOverlappedRequest. */ + IBSP_ERROR( ( + "ib_poll_cq returned IB_INVLALID_CQ_HANDLE\n") ); + goto done; + + default: + IBSP_ERROR( ( + "ib_poll_cq failed returned %s\n", ib_get_err_str( status )) ); + break; + } + +#ifdef _DEBUG_ + comp_count = 0; +#endif + + /* We have some completions. */ + cb_idx = 0; + while( done_wclist ) + { +#ifdef _DEBUG_ + comp_count++; +#endif + complete_wq( done_wclist, &info[cb_idx++] ); + + done_wclist = done_wclist->p_next; + } + + for( i = 0; i < cb_idx; i++ ) + { + int error; + int ret; + + if( info[i].p_ov ) + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_IO, + ("Calling lpWPUCompleteOverlappedRequest: " + "socket=%p, ov=%p OffsetHigh=%d " + "InternalHigh=%Id hEvent=%p\n", + info[i].p_socket, info[i].p_ov, info[i].p_ov->OffsetHigh, + info[i].p_ov->InternalHigh, info[i].p_ov->hEvent) ); + + ret = g_ibsp.up_call_table.lpWPUCompleteOverlappedRequest( + info[i].p_socket->switch_socket, info[i].p_ov, + info[i].p_ov->OffsetHigh, + (DWORD)info[i].p_ov->InternalHigh, &error ); + if( ret != 0 ) + { + IBSP_ERROR( ("WPUCompleteOverlappedRequest for ov=%p " + "returned %d err %d\n", info[i].p_ov, ret, error) ); + } + deref_socket_info( info[i].p_socket ); + } + } + + n_comp += i; + +#ifdef _DEBUG_ + if( comp_count > g_ibsp.max_comp_count ) + { + g_ibsp.max_comp_count = comp_count; + } +#endif + } while( !free_wclist ); + +done: + +#ifdef _DEBUG_ + fzprint(("%s():%d:0x%x:0x%x: overlap_h0_count=%d overlap_h1_count=%d\n", + __FUNCTION__, + __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count)); +#endif + + IBSP_EXIT( IBSP_DBG_WQ ); + return n_comp; +} + + +/* IB completion thread */ +static DWORD WINAPI +ib_cq_thread( + LPVOID lpParameter ) +{ + struct cq_thread_info *cq_tinfo = (struct cq_thread_info *)lpParameter; + cl_status_t cl_status; + ib_api_status_t status; + int i; + DWORD_PTR old_afinity; + + IBSP_ENTER( IBSP_DBG_HW ); + + fzprint(("%s():%d:0x%x:0x%x: cq_tinfo=0x%p\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), cq_tinfo)); + + old_afinity = SetThreadAffinityMask (GetCurrentThread (),g_dwPollThreadAffinityMask); + if (old_afinity == 0) { + IBSP_ERROR(("SetThreadAffinityMask failed\n")); + } else { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_DLL,("SetThreadAffinityMask succeeded\n")); + } + + + do + { + cl_status = cl_waitobj_wait_on( cq_tinfo->cq_waitobj, EVENT_NO_TIMEOUT, TRUE ); + if( cl_status != CL_SUCCESS ) + { + IBSP_ERROR( ( + "cl_waitobj_wait_on() (%d)\n", cl_status) ); + } + + /* + * TODO: By rearranging thread creation and cq creation, this check + * may be eliminated. + */ + if( cq_tinfo->cq != NULL ) + { + fzprint(("%s():%d:0x%x:0x%x: Calling ib_cq_comp().\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId())); + +#ifdef PERFMON_ENABLED + InterlockedIncrement64( &g_pm_stat.pdata[INTR_TOTAL] ); +#endif + i = g_max_poll; + do + { + if( ib_cq_comp( cq_tinfo ) ) + i = g_max_poll; + + } while( i-- ); + + fzprint(("%s():%d:0x%x:0x%x: Done calling ib_cq_comp().\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId())); + + status = ib_rearm_cq( cq_tinfo->cq, FALSE ); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ( + "ib_rearm_cq returned %s)\n", ib_get_err_str( status )) ); + } + } + + } while( !cq_tinfo->ib_cq_thread_exit_wanted ); + + cl_status = cl_waitobj_destroy( cq_tinfo->cq_waitobj ); + if( cl_status != CL_SUCCESS ) + { + IBSP_ERROR( ( + "cl_waitobj_destroy() returned %s\n", CL_STATUS_MSG(cl_status)) ); + } + HeapFree( g_ibsp.heap, 0, cq_tinfo ); + + /* No special exit code, even on errors. */ + IBSP_EXIT( IBSP_DBG_HW ); + ExitThread( 0 ); +} + + +/* Called with the HCA's CQ lock held. */ +static struct cq_thread_info * +ib_alloc_cq_tinfo( + struct ibsp_hca *hca ) +{ + struct cq_thread_info *cq_tinfo = NULL; + ib_cq_create_t cq_create; + ib_api_status_t status; + cl_status_t cl_status; + + IBSP_ENTER( IBSP_DBG_HW ); + + cq_tinfo = HeapAlloc( + g_ibsp.heap, HEAP_ZERO_MEMORY, sizeof(struct cq_thread_info) ); + + if( !cq_tinfo ) + { + IBSP_ERROR_EXIT( ("HeapAlloc() Failed.\n") ); + return NULL; + } + + cl_status = cl_waitobj_create( FALSE, &cq_tinfo->cq_waitobj ); + if( cl_status != CL_SUCCESS ) + { + cq_tinfo->cq_waitobj = NULL; + ib_destroy_cq_tinfo( cq_tinfo ); + IBSP_ERROR_EXIT( ( + "cl_waitobj_create() returned %s\n", CL_STATUS_MSG(cl_status)) ); + return NULL; + } + + cq_tinfo->hca = hca; + cq_tinfo->ib_cq_thread_exit_wanted = FALSE; + + cq_tinfo->ib_cq_thread = CreateThread( NULL, 0, ib_cq_thread, cq_tinfo, 0, + (LPDWORD)&cq_tinfo->ib_cq_thread_id ); + + if( cq_tinfo->ib_cq_thread == NULL ) + { + ib_destroy_cq_tinfo( cq_tinfo ); + IBSP_ERROR_EXIT( ("CreateThread failed (%d)", GetLastError()) ); + return NULL; + } + + STAT_INC( thread_num ); + + /* Completion queue */ + cq_create.size = IB_INIT_CQ_SIZE; + + cq_create.pfn_comp_cb = NULL; + cq_create.h_wait_obj = cq_tinfo->cq_waitobj; + + status = ib_create_cq( hca->hca_handle, &cq_create, cq_tinfo, + NULL, &cq_tinfo->cq ); + if( status ) + { + ib_destroy_cq_tinfo( cq_tinfo ); + IBSP_ERROR_EXIT( ( + "ib_create_cq returned %s\n", ib_get_err_str( status )) ); + return NULL; + } + + STAT_INC( cq_num ); + + status = ib_rearm_cq( cq_tinfo->cq, FALSE ); + if( status ) + { + ib_destroy_cq_tinfo( cq_tinfo ); + IBSP_ERROR_EXIT( ( + "ib_rearm_cq returned %s\n", ib_get_err_str( status )) ); + return NULL; + } + + cq_tinfo->cqe_size = cq_create.size; + + if( hca->cq_tinfo ) + { + __cl_primitive_insert( + &hca->cq_tinfo->list_item, &cq_tinfo->list_item ); + } + else + { + /* Setup the list entry to point to itself. */ + cq_tinfo->list_item.p_next = &cq_tinfo->list_item; + cq_tinfo->list_item.p_prev = &cq_tinfo->list_item; + } + + /* We will be assigned to a QP - set the QP count. */ + cq_tinfo->qp_count = 1; + + /* Upon allocation, the new CQ becomes the primary. */ + hca->cq_tinfo = cq_tinfo; + + IBSP_EXIT( IBSP_DBG_HW ); + return (cq_tinfo); +} + + +void +ib_destroy_cq_tinfo( + struct cq_thread_info *cq_tinfo ) +{ + ib_wc_t wclist; + ib_wc_t *free_wclist; + ib_wc_t *done_wclist; + ib_api_status_t status; + HANDLE h_cq_thread; + DWORD cq_thread_id; + + IBSP_ENTER( IBSP_DBG_HW ); + + CL_ASSERT( cq_tinfo ); + CL_ASSERT( cq_tinfo->qp_count == 0 ); + + if( cq_tinfo->cq ) + { + wclist.p_next = NULL; + free_wclist = &wclist; + + while( ib_poll_cq( + cq_tinfo->cq, &free_wclist, &done_wclist ) == IB_SUCCESS ) + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_WQ, + ("free=%p, done=%p\n", free_wclist, done_wclist) ); + } + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_WQ, ("ib_destroy_cq() start..\n") ); + + /* + * Called from cleanup thread, okay to block. + */ + status = ib_destroy_cq( cq_tinfo->cq, ib_sync_destroy ); + if( status ) + { + IBSP_ERROR( ( + "ib_destroy_cq returned %s\n", ib_get_err_str( status )) ); + } + else + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_WQ, ("ib_destroy_cq() finished.\n") ); + + cq_tinfo->cq = NULL; + + STAT_DEC( cq_num ); + } + } + + if( cq_tinfo->ib_cq_thread ) + { + /* ib_cq_thread() will release the cq_tinfo before exit. Don't + reference cq_tinfo after signaling */ + h_cq_thread = cq_tinfo->ib_cq_thread; + cq_tinfo->ib_cq_thread = NULL; + cq_thread_id = cq_tinfo->ib_cq_thread_id; + + cq_tinfo->ib_cq_thread_exit_wanted = TRUE; + cl_waitobj_signal( cq_tinfo->cq_waitobj ); + + /* Wait for ib_cq_thread to die, if we are not running on it */ + if( GetCurrentThreadId() != cq_thread_id ) + { + fzprint(("%s():%d:0x%x:0x%x: Waiting for ib_cq_thread=0x%x to die\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), + cq_thread_id )); + if( WaitForSingleObject( h_cq_thread, INFINITE ) != WAIT_OBJECT_0 ) + { + IBSP_ERROR( ("WaitForSingleObject failed\n") ); + } + else + { + STAT_DEC( thread_num ); + } + } + else + { + fzprint(("%s():%d:0x%x:0x%x: Currently on ib_cq_thread.\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId())); + STAT_DEC( thread_num ); + } + CloseHandle( h_cq_thread ); + } + else + { + /* There was no thread created, destroy cq_waitobj and + free memory */ + if( cq_tinfo->cq_waitobj ) + { + cl_waitobj_destroy( cq_tinfo->cq_waitobj ); + cq_tinfo->cq_waitobj = NULL; + } + HeapFree( g_ibsp.heap, 0, cq_tinfo ); + } + + IBSP_EXIT( IBSP_DBG_HW ); +} + + +static struct cq_thread_info * +ib_acquire_cq_tinfo( + struct ibsp_hca *hca ) +{ + struct cq_thread_info *cq_tinfo = NULL, *cq_end; + uint32_t cqe_size; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_HW ); + + cl_spinlock_acquire( &hca->cq_lock ); + + if( !hca->cq_tinfo ) + { + cq_tinfo = ib_alloc_cq_tinfo( hca ); + if( !cq_tinfo ) + IBSP_ERROR( ("ib_alloc_cq_tinfo() failed\n") ); + cl_spinlock_release( &hca->cq_lock ); + IBSP_EXIT( IBSP_DBG_HW ); + return cq_tinfo; + } + + cq_tinfo = hca->cq_tinfo; + cq_end = cq_tinfo; + cqe_size = (cq_tinfo->qp_count + 1) * IB_CQ_SIZE; + + do + { + if( cq_tinfo->cqe_size >= cqe_size ) + { + cq_tinfo->qp_count++; + cl_spinlock_release( &hca->cq_lock ); + IBSP_EXIT( IBSP_DBG_HW ); + return (cq_tinfo); + } + + status = ib_modify_cq( cq_tinfo->cq, &cqe_size ); + switch( status ) + { + case IB_SUCCESS: + cq_tinfo->cqe_size = cqe_size; + cq_tinfo->qp_count++; + break; + + default: + IBSP_ERROR_EXIT( ( + "ib_modify_cq() returned %s\n", ib_get_err_str(status)) ); + case IB_INVALID_CQ_SIZE: + case IB_UNSUPPORTED: + cq_tinfo = PARENT_STRUCT( + cl_qlist_next( &cq_tinfo->list_item ), struct cq_thread_info, + list_item ); + cqe_size = (cq_tinfo->qp_count + 1) * IB_CQ_SIZE; + } + + } while( cq_tinfo != cq_end ); + + if( cq_tinfo == cq_end ) + cq_tinfo = ib_alloc_cq_tinfo( hca ); + + cl_spinlock_release( &hca->cq_lock ); + IBSP_EXIT( IBSP_DBG_HW ); + return (cq_tinfo); +} + +void +ib_release_cq_tinfo( + struct cq_thread_info *cq_tinfo ) +{ + IBSP_ENTER( IBSP_DBG_HW ); + + CL_ASSERT( cq_tinfo ); + CL_ASSERT( cq_tinfo->hca ); + + cl_spinlock_acquire( &cq_tinfo->hca->cq_lock ); + /* If this CQ now has fewer QPs than the primary, make it the primary. */ + if( --cq_tinfo->qp_count < cq_tinfo->hca->cq_tinfo->qp_count ) + cq_tinfo->hca->cq_tinfo = cq_tinfo; + cl_spinlock_release( &cq_tinfo->hca->cq_lock ); + + IBSP_EXIT( IBSP_DBG_HW ); +} + + +/* Release IB ressources. */ +void +ib_release(void) +{ + cl_fmap_item_t *p_item; + + IBSP_ENTER( IBSP_DBG_HW ); + + if( g_ibsp.al_handle ) + { + cl_list_item_t *item; + ib_api_status_t status; + + unregister_pnp(); + + while( (item = cl_qlist_head( &g_ibsp.hca_list )) != cl_qlist_end( &g_ibsp.hca_list ) ) + { + struct ibsp_hca *hca = PARENT_STRUCT(item, struct ibsp_hca, item); + + pnp_ca_remove( hca ); + } + + fzprint(("%s():%d:0x%x:0x%x: Calling ib_close_al...\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId())); + + status = ib_close_al( g_ibsp.al_handle ); + + fzprint(("%s():%d:0x%x:0x%x: Done calling ib_close_al, status=%d.\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), + status)); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ( + "ib_close_al returned %s\n", ib_get_err_str( status )) ); + } + else + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, ("ib_close_al success\n") ); + STAT_DEC( al_num ); + } + g_ibsp.al_handle = NULL; + } + + for( p_item = cl_fmap_head( &g_ibsp.ip_map ); + p_item != cl_fmap_end( &g_ibsp.ip_map ); + p_item = cl_fmap_head( &g_ibsp.ip_map ) ) + { + cl_fmap_remove_item( &g_ibsp.ip_map, p_item ); + + HeapFree( g_ibsp.heap, 0, + PARENT_STRUCT(p_item, struct ibsp_ip_addr, item) ); + } + + IBSP_EXIT( IBSP_DBG_HW ); +} + + +/* Initialize IB ressources. */ +int +ibsp_initialize(void) +{ + ib_api_status_t status; + int ret; + + IBSP_ENTER( IBSP_DBG_HW ); + + CL_ASSERT( g_ibsp.al_handle == NULL ); + CL_ASSERT( cl_qlist_count( &g_ibsp.hca_list ) == 0 ); + + /* Open the IB library */ + status = ib_open_al( &g_ibsp.al_handle ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, ("open is %d %p\n", status, g_ibsp.al_handle) ); + + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("ib_open_al failed (%d)\n", status) ); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + STAT_INC( al_num ); + + /* Register for PNP events */ + status = register_pnp(); + if( status ) + { + IBSP_ERROR( ("register_pnp failed (%d)\n", status) ); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + STAT_INC( thread_num ); + + ret = 0; +done: + if( ret ) + { + /* Free up resources. */ + ib_release(); + } + + IBSP_EXIT( IBSP_DBG_HW ); + + return ret; +} + + +/* Destroys the infiniband ressources of a socket. */ +void +ib_destroy_socket( + IN OUT struct ibsp_socket_info *socket_info ) +{ + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_EP ); + + if( socket_info->qp ) + { + + + ib_qp_mod_t qp_mod; + + cl_atomic_inc( &socket_info->ref_cnt1 ); + + cl_memclr( &qp_mod, sizeof(ib_qp_mod_t) ); + qp_mod.req_state = IB_QPS_ERROR; + status = ib_modify_qp(socket_info->qp, &qp_mod); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("ib_modify_qp returned %s\n", + ib_get_err_str( status )) ); + deref_socket_info( socket_info ); + } + + + /* Wait for all work requests to get flushed. */ + while( socket_info->send_cnt || socket_info->send_cnt ) + cl_thread_suspend( 0 ); + + status = ib_destroy_qp( socket_info->qp, deref_socket_info ); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("ib_destroy_qp returned %s\n", + ib_get_err_str( status )) ); + deref_socket_info( socket_info ); + } + + ib_release_cq_tinfo( socket_info->cq_tinfo ); + + socket_info->qp = NULL; + } + + IBSP_EXIT( IBSP_DBG_EP ); +} + + +/* + * Creates the necessary IB ressources for a socket + */ +int +ib_create_socket( + IN OUT struct ibsp_socket_info *socket_info) +{ + ib_qp_create_t qp_create; + ib_api_status_t status; + ib_qp_attr_t qp_attr; + + IBSP_ENTER( IBSP_DBG_EP ); + + CL_ASSERT( socket_info != NULL ); + CL_ASSERT( socket_info->port != NULL ); + CL_ASSERT( socket_info->qp == NULL ); + + socket_info->hca_pd = socket_info->port->hca->pd; + + /* Get the completion queue and thread info for this socket */ + socket_info->cq_tinfo = ib_acquire_cq_tinfo( socket_info->port->hca ); + if( !socket_info->cq_tinfo ) + { + IBSP_ERROR_EXIT( ("ib_acquire_cq_tinfo failed\n") ); + return WSAENOBUFS; + } + + /* Queue pair */ + cl_memclr(&qp_create, sizeof(ib_qp_create_t)); + qp_create.qp_type = IB_QPT_RELIABLE_CONN; + qp_create.sq_depth = QP_ATTRIB_SQ_DEPTH; + qp_create.rq_depth = QP_ATTRIB_RQ_DEPTH; + qp_create.sq_sge = QP_ATTRIB_SQ_SGE; + qp_create.rq_sge = 1; + qp_create.h_rq_cq = socket_info->cq_tinfo->cq; + qp_create.h_sq_cq = socket_info->cq_tinfo->cq; + qp_create.sq_signaled = TRUE; + + status = ib_create_qp( socket_info->hca_pd, &qp_create, socket_info, /* context */ + NULL, /* async handler */ + &socket_info->qp ); + if( status ) + { + ib_release_cq_tinfo( socket_info->cq_tinfo ); + IBSP_ERROR_EXIT( ( + "ib_create_qp returned %s\n", ib_get_err_str( status )) ); + return WSAENOBUFS; + } + + status = ib_query_qp( socket_info->qp, &qp_attr ); + if( status == IB_SUCCESS ) + { + socket_info->max_inline = min( g_max_inline, qp_attr.sq_max_inline ); + } + else + { + IBSP_ERROR( ("ib_query_qp returned %s\n", ib_get_err_str( status )) ); + socket_info->max_inline = 0; + } + + STAT_INC( qp_num ); + + IBSP_EXIT( IBSP_DBG_EP ); + return 0; +} + + +void +wait_cq_drain( + IN OUT struct ibsp_socket_info *socket_info ) +{ + IBSP_ENTER( IBSP_DBG_EP ); + + if( socket_info->cq_tinfo == NULL ) + { + IBSP_EXIT( IBSP_DBG_EP ); + return; + } + + /* Wait for the QP to be drained. */ + while( socket_info->send_cnt || socket_info->recv_cnt ) + { + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p wr_list_count=%d qp state=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), + socket_info, cl_qlist_count(&socket_info->wr_list))); + + Sleep(100); + } + + IBSP_EXIT( IBSP_DBG_EP ); +} + + +void +ibsp_dup_overlap_abort( + IN OUT struct ibsp_socket_info *socket_info ) +{ + LPWSAOVERLAPPED lpOverlapped = NULL; + int error; + int ret; + uint8_t idx; + + IBSP_ENTER( IBSP_DBG_EP ); + CL_ASSERT( !socket_info->send_cnt && !socket_info->recv_cnt ); + + /* Browse the list of all posted overlapped structures + * to mark them as aborted. */ + idx = socket_info->dup_idx - (uint8_t)socket_info->dup_cnt; + if( idx >= QP_ATTRIB_RQ_DEPTH ) + idx += QP_ATTRIB_RQ_DEPTH; + + while( socket_info->dup_cnt ) + { + lpOverlapped = socket_info->dup_wr[idx].wr.lpOverlapped; + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p wr=0x%p overlapped=0x%p Internal=%d InternalHigh=%d hEvent=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info, &socket_info->dup_wr[idx], lpOverlapped, lpOverlapped->Internal, lpOverlapped->InternalHigh, lpOverlapped->hEvent)); + + lpOverlapped->OffsetHigh = WSAECONNABORTED; + lpOverlapped->InternalHigh = 0; + + if( ((uintptr_t) lpOverlapped->hEvent) & 0x00000001 ) + { + /* Indicate this operation is complete. The switch will poll + * with calls to WSPGetOverlappedResult(). */ +#ifdef _DEBUG_ + cl_atomic_dec(&g_ibsp.overlap_h1_comp_count); + + fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), lpOverlapped, + g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count, + g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count)); +#endif + + IBSP_PRINT(TRACE_LEVEL_INFORMATION,IBSP_DBG_WQ, + ("set internal overlapped=0x%p Internal=%Id OffsetHigh=%d\n", + lpOverlapped, lpOverlapped->Internal, + lpOverlapped->OffsetHigh)); + + lpOverlapped->Internal = 0; + } + else + { +#ifdef _DEBUG_ + cl_atomic_dec(&g_ibsp.overlap_h0_count); + + + fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), lpOverlapped, + g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count, + g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count)); +#endif + IBSP_PRINT(TRACE_LEVEL_INFORMATION,IBSP_DBG_WQ, + (" calls lpWPUCompleteOverlappedRequest, overlapped=0x%p OffsetHigh=%d " + "InternalHigh=%Id hEvent=%p\n", + lpOverlapped, lpOverlapped->OffsetHigh, + lpOverlapped->InternalHigh, lpOverlapped->hEvent)); + + ret = g_ibsp.up_call_table.lpWPUCompleteOverlappedRequest + (socket_info->switch_socket, + lpOverlapped, + lpOverlapped->OffsetHigh, (DWORD) lpOverlapped->InternalHigh, &error); + + if( ret != 0 ) + { + IBSP_ERROR( ("lpWPUCompleteOverlappedRequest failed with %d/%d\n", ret, error) ); + } + } + cl_atomic_dec( &socket_info->dup_cnt ); + } + + IBSP_EXIT( IBSP_DBG_EP ); +} + + +/* Closes a connection and release its ressources. */ +void +shutdown_and_destroy_socket_info( + IN OUT struct ibsp_socket_info *socket_info ) +{ + enum ibsp_socket_state old_state; + + IBSP_ENTER( IBSP_DBG_EP ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + old_state = socket_info->socket_state; + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CLOSED ); + cl_spinlock_release( &socket_info->mutex1 ); + + if( socket_info->listen.handle ) + { + /* Stop listening and reject queued connections. */ + ib_listen_cancel( socket_info ); + } + + cl_spinlock_acquire( &g_ibsp.socket_info_mutex ); + cl_qlist_remove_item( &g_ibsp.socket_info_list, &socket_info->item ); + + switch( old_state ) + { + case IBSP_CREATE: + case IBSP_LISTEN: + /* Nothing to do. */ + break; + + case IBSP_CONNECTED: + { + struct disconnect_reason reason; + + memset( &reason, 0, sizeof(reason) ); + reason.type = DISC_SHUTDOWN; + ib_disconnect( socket_info, &reason ); + } + /* Fall through. */ + + case IBSP_CONNECT: + case IBSP_DISCONNECTED: + /* We changed the state - remove from connection map. */ + CL_ASSERT( socket_info->conn_item.p_map ); + cl_rbmap_remove_item( &g_ibsp.conn_map, &socket_info->conn_item ); + break; + } + cl_spinlock_release( &g_ibsp.socket_info_mutex ); + + /* Flush all completions. */ + if( socket_info->dup_cnt ) + ibsp_dup_overlap_abort( socket_info ); + + while( socket_info->send_cnt || socket_info->recv_cnt ) + ib_cq_comp( socket_info->cq_tinfo ); + + ibsp_dereg_socket( socket_info ); + + ib_destroy_socket( socket_info ); + +#ifdef IBSP_LOGGING + DataLogger_Shutdown(&socket_info->SendDataLogger); + DataLogger_Shutdown(&socket_info->RecvDataLogger); +#endif + + /* Release the initial reference and clean up. */ + deref_socket_info( socket_info ); + + IBSP_EXIT( IBSP_DBG_EP ); +} + + +boolean_t +ibsp_conn_insert( + IN struct ibsp_socket_info *s ) +{ + struct ibsp_socket_info *p_sock; + cl_rbmap_item_t *p_item, *p_insert_at; + boolean_t left = TRUE; + + cl_spinlock_acquire( &g_ibsp.socket_info_mutex ); + p_item = cl_rbmap_root( &g_ibsp.conn_map ); + p_insert_at = p_item; + + CL_ASSERT( !s->conn_item.p_map ); + while( p_item != cl_rbmap_end( &g_ibsp.conn_map ) ) + { + p_insert_at = p_item; + p_sock = PARENT_STRUCT( p_item, struct ibsp_socket_info, conn_item ); + if( p_sock->local_addr.sin_family < s->local_addr.sin_family ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_sock->local_addr.sin_family > s->local_addr.sin_family ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else if( p_sock->local_addr.sin_addr.S_un.S_addr < s->local_addr.sin_addr.S_un.S_addr ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_sock->local_addr.sin_addr.S_un.S_addr > s->local_addr.sin_addr.S_un.S_addr ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else if( p_sock->local_addr.sin_port < s->local_addr.sin_port ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_sock->local_addr.sin_port > s->local_addr.sin_port ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else if( p_sock->peer_addr.sin_family < s->peer_addr.sin_family ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_sock->peer_addr.sin_family > s->peer_addr.sin_family ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else if( p_sock->peer_addr.sin_addr.S_un.S_addr < s->peer_addr.sin_addr.S_un.S_addr ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_sock->peer_addr.sin_addr.S_un.S_addr > s->peer_addr.sin_addr.S_un.S_addr ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else if( p_sock->peer_addr.sin_port < s->peer_addr.sin_port ) + p_item = cl_rbmap_left( p_item ), left = TRUE; + else if( p_sock->peer_addr.sin_port > s->peer_addr.sin_port ) + p_item = cl_rbmap_right( p_item ), left = FALSE; + else + goto done; + } + + cl_rbmap_insert( &g_ibsp.conn_map, p_insert_at, &s->conn_item, left ); + +done: + cl_spinlock_release( &g_ibsp.socket_info_mutex ); + return p_item == cl_rbmap_end( &g_ibsp.conn_map ); +} + + +void +ibsp_conn_remove( + IN struct ibsp_socket_info *s ) +{ + cl_spinlock_acquire( &g_ibsp.socket_info_mutex ); + CL_ASSERT( s->conn_item.p_map ); + cl_rbmap_remove_item( &g_ibsp.conn_map, &s->conn_item ); + cl_spinlock_release( &g_ibsp.socket_info_mutex ); +} diff --git a/branches/Ndi/ulp/wsd/user/ibsp_ip.c b/branches/Ndi/ulp/wsd/user/ibsp_ip.c new file mode 100644 index 00000000..6fb1e2c7 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibsp_ip.c @@ -0,0 +1,645 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* Builds and returns the list of IP addresses available from all + * adapters. */ + +#include "ibspdebug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ibsp_ip.tmh" +#endif + +#include "ibspdll.h" + +/*--------------------------------------------------------------------------*/ + +/* + * Query an IP address from a GUID + */ + +struct ip_query_context +{ + cl_fmap_t *p_ip_map; + struct ibsp_port *p_port; +}; + + +intn_t CL_API +ip_cmp( + IN const void* const p_key1, + IN const void* const p_key2 ) +{ + struct ibsp_ip_addr *p_ip1, *p_ip2; + + p_ip1 = (struct ibsp_ip_addr*)p_key1; + p_ip2 = (struct ibsp_ip_addr*)p_key2; + + if( p_ip1->ip_addr.S_un.S_addr < p_ip2->ip_addr.S_un.S_addr ) + return -1; + else if( p_ip1->ip_addr.S_un.S_addr > p_ip2->ip_addr.S_un.S_addr ) + return 1; + + /* IP addresses match. See if we need a port match too. */ + if( !p_ip1->p_port || !p_ip2->p_port ) + return 0; + + /* We need a port match too. */ + return cl_memcmp( + &p_ip1->p_port->guid, &p_ip2->p_port->guid, sizeof(net64_t) ); +} + + +/* Synchronously query the SA for an IP address. */ +int +query_ip_address( + IN struct ibsp_port *p_port, + IN OUT cl_fmap_t *p_ip_map ) +{ + IOCTL_IBAT_IP_ADDRESSES_IN in; + IOCTL_IBAT_IP_ADDRESSES_OUT *p_out; + DWORD size; + LONG i; + cl_fmap_item_t *p_item; + + IBSP_ENTER( IBSP_DBG_HW ); + + /* The list must be initialized and empty */ + CL_ASSERT( !cl_fmap_count( p_ip_map ) ); + + in.Version = IBAT_IOCTL_VERSION; + in.PortGuid = p_port->guid; + + cl_spinlock_acquire( &g_ibsp.ip_mutex ); + if( g_ibsp.h_ibat_dev == INVALID_HANDLE_VALUE ) + { + g_ibsp.h_ibat_dev = CreateFileW( IBAT_WIN32_NAME, + MAXIMUM_ALLOWED, 0, NULL, + OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL ); + } + cl_spinlock_release( &g_ibsp.ip_mutex ); + + size = sizeof(IOCTL_IBAT_IP_ADDRESSES_OUT); + + do + { + p_out = HeapAlloc( g_ibsp.heap, 0, size ); + + if( !p_out ) + { + IBSP_ERROR_EXIT( ("Failed to allocate output buffer.\n") ); + return -1; + } + + if( !DeviceIoControl( g_ibsp.h_ibat_dev, IOCTL_IBAT_IP_ADDRESSES, + &in, sizeof(in), p_out, size, &size, NULL ) ) + { + HeapFree( g_ibsp.heap, 0, p_out ); + IBSP_ERROR_EXIT( ( + "IOCTL_IBAT_IP_ADDRESSES for port %I64x failed (%x).\n", + p_port->guid, GetLastError()) ); + return -1; + } + + if( p_out->Size > size ) + { + size = p_out->Size; + HeapFree( g_ibsp.heap, 0, p_out ); + p_out = NULL; + } + + } while( !p_out ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, ("Port %I64x has %d IP addresses.\n", + p_port->guid, p_out->AddressCount) ); + + for( i = 0; i < p_out->AddressCount; i++ ) + { + struct ibsp_ip_addr *ip_addr; + + ip_addr = HeapAlloc( + g_ibsp.heap, 0, sizeof(struct ibsp_ip_addr) ); + if( !ip_addr ) + { + IBSP_ERROR_EXIT( ("no memory\n") ); + break; + } + + /* Copy the IP address */ + ip_addr->ip_addr.S_un.S_addr = + *(ib_net32_t *) &p_out->Address[i].Address[ATS_IPV4_OFFSET]; + ip_addr->p_port = p_port; + + p_item = cl_fmap_insert( p_ip_map, ip_addr, &ip_addr->item ); + if( p_item != &ip_addr->item ) + { + /* Duplicate! Should never happen. */ + IBSP_ERROR( ( + "Got duplicate addr %s\n", inet_ntoa( ip_addr->ip_addr )) ); + HeapFree( g_ibsp.heap, 0, ip_addr ); + continue; + } + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, + ("Got addr %s\n", inet_ntoa( ip_addr->ip_addr )) ); + } + + HeapFree( g_ibsp.heap, 0, p_out ); + + IBSP_EXIT( IBSP_DBG_HW ); + return 0; +} + + +/* Query a port for it list of supported IP addresses, and update the port and global lists. + * The port mutex must be taken. */ +static int +update_ip_addresses( + struct ibsp_port *port ) +{ + cl_fmap_t new_ip, old_ip, dup_ip; + cl_fmap_item_t *p_item; + int ret; + + cl_fmap_init( &new_ip, ip_cmp ); + cl_fmap_init( &old_ip, ip_cmp ); + cl_fmap_init( &dup_ip, ip_cmp ); + + /* Get the list of new addresses */ + ret = query_ip_address( port, &dup_ip ); + if( ret ) + { + IBSP_ERROR_EXIT( ( + "query_ip_address failed (%d)\n", ret) ); + return 1; + } + + cl_spinlock_acquire( &g_ibsp.ip_mutex ); + + /* Insert the new list of IP into the global list of IP addresses. */ + cl_fmap_delta( &g_ibsp.ip_map, &dup_ip, &new_ip, &old_ip ); + cl_fmap_merge( &g_ibsp.ip_map, &new_ip ); + CL_ASSERT( !cl_fmap_count( &new_ip ) ); + + /* + * Note that the map delta operation will have moved all IP addresses + * for other ports into the old list. Move them back. + */ + for( p_item = cl_fmap_head( &old_ip ); + p_item != cl_fmap_end( &old_ip ); + p_item = cl_fmap_head( &old_ip ) ) + { + struct ibsp_ip_addr *p_ip = + PARENT_STRUCT( p_item, struct ibsp_ip_addr, item ); + + cl_fmap_remove_item( &old_ip, p_item ); + + if( p_ip->p_port != port ) + { + p_item = cl_fmap_insert( &g_ibsp.ip_map, p_ip, &p_ip->item ); + CL_ASSERT( p_item == &p_ip->item ); + } + else + { + HeapFree( g_ibsp.heap, 0, p_ip ); + } + } + + cl_spinlock_release( &g_ibsp.ip_mutex ); + + /* Now clean up duplicates entries. */ + for( p_item = cl_fmap_head( &dup_ip ); + p_item != cl_fmap_end( &dup_ip ); + p_item = cl_fmap_head( &dup_ip ) ) + { + struct ibsp_ip_addr *p_ip = + PARENT_STRUCT( p_item, struct ibsp_ip_addr, item ); + + cl_fmap_remove_item( &dup_ip, p_item ); + + HeapFree( g_ibsp.heap, 0, p_ip ); + } + + return 0; +} + +/*--------------------------------------------------------------------------*/ + +/* + * Query a GUID from an IP address + */ + +struct query_guid_context +{ + ib_api_status_t status; + ib_net64_t guid; +}; + +static void AL_API +query_guid_callback( + ib_query_rec_t *p_query_rec ) +{ + struct query_guid_context *query_context = + (struct query_guid_context * __ptr64)p_query_rec->query_context; + ib_service_record_t *service_record; + + IBSP_ENTER( IBSP_DBG_HW ); + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, + ("status is %d, count is %d\n", p_query_rec->status, + p_query_rec->result_cnt) ); + + if( p_query_rec->status == IB_SUCCESS && p_query_rec->result_cnt ) + { + query_context->status = IB_SUCCESS; + + service_record = ib_get_query_svc_rec( p_query_rec->p_result_mad, 0 ); + + CL_ASSERT( service_record != NULL ); + + query_context->guid = ib_gid_get_guid( &service_record->service_gid ); + } + else + { + query_context->status = IB_ERROR; + } + + if( p_query_rec->p_result_mad ) + ib_put_mad( p_query_rec->p_result_mad ); + + IBSP_EXIT( IBSP_DBG_HW ); +} + + +/* Synchronously query the SA for a GUID. Return 0 on success. */ +int +query_guid_address( + IN struct ibsp_port *port, + IN ib_net32_t ip_addr, + OUT ib_net64_t *port_guid ) +{ + ib_user_query_t user_query; + struct query_guid_context query_context; + ib_service_record_t service_record; + ib_query_handle_t query_handle; + ib_query_req_t query_req; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_HW ); + + query_req.query_type = IB_QUERY_USER_DEFINED; + query_req.p_query_input = &user_query; + query_req.port_guid = port->guid; + query_req.timeout_ms = g_sa_timeout; + query_req.retry_cnt = g_sa_retries; + query_req.flags = IB_FLAGS_SYNC; + query_req.query_context = &query_context; + query_req.pfn_query_cb = query_guid_callback; + + /* TODO: which method one is correct? */ + user_query.method = IB_MAD_METHOD_GETTABLE; + //user_query.method = IB_MAD_METHOD_GET; + user_query.attr_id = IB_MAD_ATTR_SERVICE_RECORD; + user_query.attr_size = sizeof(ib_service_record_t); + user_query.comp_mask = + IB_SR_COMPMASK_SPKEY | + IB_SR_COMPMASK_SLEASE | + IB_SR_COMPMASK_SNAME | + IB_SR_COMPMASK_SDATA8_12 | + IB_SR_COMPMASK_SDATA8_13 | IB_SR_COMPMASK_SDATA8_14 | IB_SR_COMPMASK_SDATA8_15; + + user_query.p_attr = &service_record; + + memset( &service_record, 0, sizeof(service_record) ); + service_record.service_pkey = cl_hton16( IB_DEFAULT_PKEY ); + service_record.service_data8[12] = (uint8_t) (ip_addr >> 0); + service_record.service_data8[13] = (uint8_t) (ip_addr >> 8); + service_record.service_data8[14] = (uint8_t) (ip_addr >> 16); + service_record.service_data8[15] = (uint8_t) (ip_addr >> 24); + service_record.service_lease = 0xFFFFFFFF; + strcpy( (void*)service_record.service_name, ATS_NAME ); + + status = ib_query( g_ibsp.al_handle, &query_req, &query_handle ); + + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("ib_query failed (%d)\n", status) ); + goto error; + } + + if( query_context.status != IB_SUCCESS ) + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, + ("query success, but no GUID for IP address %x (query status %d)\n", + ip_addr, query_context.status) ); + goto error; + } + + *port_guid = query_context.guid; + + IBSP_EXIT( IBSP_DBG_HW ); + return 0; + +error: + IBSP_ERROR_EXIT( ("query_ip_address failed\n") ); + return 1; +} + +/*--------------------------------------------------------------------------*/ + +/* + * Get a path record from a GUID + */ +struct query_pr_context +{ + ib_api_status_t status; + ib_path_rec_t *path_rec; +}; + + +static void AL_API +query_pr_callback( + ib_query_rec_t *p_query_rec ) +{ + struct query_pr_context *query_context = + (struct query_pr_context * __ptr64)p_query_rec->query_context; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_HW ); + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, ("status is %d\n", p_query_rec->status) ); + + if( p_query_rec->status == IB_SUCCESS && p_query_rec->result_cnt ) + { + ib_path_rec_t *path_rec; + + query_context->status = IB_SUCCESS; + + path_rec = ib_get_query_path_rec( p_query_rec->p_result_mad, 0 ); + + CL_ASSERT( path_rec ); + + /* Copy the path record */ + *query_context->path_rec = *path_rec; + } + else + { + query_context->status = IB_ERROR; + } + + if( p_query_rec->p_result_mad ) + status = ib_put_mad( p_query_rec->p_result_mad ); + + IBSP_EXIT( IBSP_DBG_HW ); +} + + +/* Synchronously query the SA for a GUID. Return 0 on success. */ +int +query_pr( + IN struct ibsp_port *port, + IN ib_net64_t dest_port_guid, + OUT ib_path_rec_t *path_rec ) +{ + ib_gid_pair_t user_query; + struct query_pr_context query_context; + ib_query_handle_t query_handle; + ib_query_req_t query_req; + ib_api_status_t status; + uint8_t pkt_life; + + IBSP_ENTER( IBSP_DBG_HW ); + + query_req.query_type = IB_QUERY_PATH_REC_BY_GIDS; + query_req.p_query_input = &user_query; + query_req.port_guid = port->guid; + query_req.timeout_ms = g_sa_timeout; + query_req.retry_cnt = g_sa_retries; + query_req.flags = IB_FLAGS_SYNC; + query_req.query_context = &query_context; + query_req.pfn_query_cb = query_pr_callback; + + ib_gid_set_default( &user_query.src_gid, port->guid ); + ib_gid_set_default( &user_query.dest_gid, dest_port_guid ); + + query_context.path_rec = path_rec; + + fzprint(("%s():%d:0x%x:0x%x: Calling ib_query()..\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId())); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, + ("Query for path from %I64x to %I64x\n", + port->guid, dest_port_guid) ); + + status = ib_query( g_ibsp.al_handle, &query_req, &query_handle ); + + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("ib_query failed (%d)\n", status) ); + goto error; + } + + fzprint(("%s():%d:0x%x:0x%x: Done calling ib_query()..\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId())); + + if( query_context.status != IB_SUCCESS ) + { + IBSP_ERROR( ("query failed (%d)\n", query_context.status) ); + goto error; + } + + if( (port->hca->dev_id == 0x5A44) && + (ib_path_rec_mtu( path_rec ) > IB_MTU_LEN_1024) ) + { + /* Local endpoint is Tavor - cap MTU to 1K for extra bandwidth. */ + path_rec->mtu &= IB_PATH_REC_SELECTOR_MASK; + path_rec->mtu |= IB_MTU_LEN_1024; + } + + pkt_life = ib_path_rec_pkt_life( path_rec ) + g_pkt_life_modifier; + if( pkt_life > 0x1F ) + pkt_life = 0x1F; + + path_rec->pkt_life &= IB_PATH_REC_SELECTOR_MASK; + path_rec->pkt_life |= pkt_life; + + IBSP_EXIT( IBSP_DBG_HW ); + return 0; + +error: + IBSP_ERROR_EXIT( ("query_ip_address failed\n") ); + return 1; +} + +/*--------------------------------------------------------------------------*/ + +/* Builds the list of all IP addresses supported. */ +int +build_ip_list( + IN OUT LPSOCKET_ADDRESS_LIST ip_list, + IN OUT LPDWORD ip_list_size, + OUT LPINT lpErrno ) +{ + size_t size_req; + size_t num_ip; + cl_list_item_t *p_hca_item, *p_port_item; + cl_fmap_item_t *p_item; + struct ibsp_hca *p_hca; + struct ibsp_port *p_port; + struct sockaddr_in *addr; + int i; + + IBSP_ENTER( IBSP_DBG_HW ); + + cl_spinlock_acquire( &g_ibsp.hca_mutex ); + for( p_hca_item = cl_qlist_head( &g_ibsp.hca_list ); + p_hca_item != cl_qlist_end( &g_ibsp.hca_list ); + p_hca_item = cl_qlist_next( p_hca_item ) ) + { + p_hca = PARENT_STRUCT( p_hca_item, struct ibsp_hca, item ); + + cl_spinlock_acquire( &p_hca->port_lock ); + for( p_port_item = cl_qlist_head( &p_hca->port_list ); + p_port_item != cl_qlist_end( &p_hca->port_list ); + p_port_item = cl_qlist_next( p_port_item ) ) + { + p_port = PARENT_STRUCT( p_port_item, struct ibsp_port, item ); + update_ip_addresses( p_port ); + } + cl_spinlock_release( &p_hca->port_lock ); + } + cl_spinlock_release( &g_ibsp.hca_mutex ); + + cl_spinlock_acquire( &g_ibsp.ip_mutex ); + + num_ip = cl_fmap_count( &g_ibsp.ip_map ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, (" num ip = %Id\n", num_ip) ); + + /* Note: the required size computed is a few bytes larger than necessary, + * but that keeps the code clean. */ + size_req = sizeof(SOCKET_ADDRESS_LIST); + + switch( num_ip ) + { + case 0: + cl_spinlock_acquire( &g_ibsp.ip_mutex ); + if( g_ibsp.h_ibat_dev != INVALID_HANDLE_VALUE ) + { + CloseHandle( g_ibsp.h_ibat_dev ); + g_ibsp.h_ibat_dev = INVALID_HANDLE_VALUE; + } + cl_spinlock_release( &g_ibsp.ip_mutex ); + break; + + default: + size_req += + (num_ip - 1) * (sizeof(SOCKET_ADDRESS) + sizeof(SOCKADDR)); + /* Fall through. */ + + case 1: + /* Add the space for the first address. */ + size_req += sizeof(SOCKADDR); + break; + } + + if( size_req > *ip_list_size ) + { + cl_spinlock_release( &g_ibsp.ip_mutex ); + *ip_list_size = (DWORD) size_req; + *lpErrno = WSAEFAULT; + IBSP_ERROR_EXIT( ( + "returning default, size %Id (usually not an error)\n", size_req) ); + return SOCKET_ERROR; + } + + memset( ip_list, 0, *ip_list_size ); + + /* We store the array of addresses after the last address pointer. */ + addr = (struct sockaddr_in *)(&(ip_list->Address[num_ip])); + *ip_list_size = (DWORD) size_req; + + ip_list->iAddressCount = (INT) num_ip; + + for( i = 0, p_item = cl_fmap_head( &g_ibsp.ip_map ); + p_item != cl_fmap_end( &g_ibsp.ip_map ); + i++, p_item = cl_fmap_next( p_item ) ) + { + struct ibsp_ip_addr *ip_addr = + PARENT_STRUCT(p_item, struct ibsp_ip_addr, item); + + ip_list->Address[i].iSockaddrLength = sizeof(struct sockaddr_in); + ip_list->Address[i].lpSockaddr = (LPSOCKADDR) addr; + + addr->sin_family = AF_INET; + addr->sin_port = 0; + addr->sin_addr = ip_addr->ip_addr; + + addr++; + } + + cl_spinlock_release( &g_ibsp.ip_mutex ); + + IBSP_EXIT( IBSP_DBG_HW ); + + lpErrno = 0; + return 0; +} + + +/* Find a port associated with an IP address. */ +struct ibsp_port * +get_port_from_ip_address( + IN const struct in_addr sin_addr ) +{ + cl_fmap_item_t *p_item; + struct ibsp_ip_addr ip; + struct ibsp_port *p_port = NULL; + + IBSP_ENTER( IBSP_DBG_HW ); + + ip.ip_addr = sin_addr; + ip.p_port = NULL; + + cl_spinlock_acquire( &g_ibsp.ip_mutex ); + + p_item = cl_fmap_get( &g_ibsp.ip_map, &ip ); + if( p_item != cl_fmap_end( &g_ibsp.ip_map ) ) + p_port = PARENT_STRUCT(p_item, struct ibsp_ip_addr, item)->p_port; + else + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, ("not found\n") ); + + cl_spinlock_release( &g_ibsp.ip_mutex ); + + IBSP_EXIT( IBSP_DBG_HW ); + return p_port; +} diff --git a/branches/Ndi/ulp/wsd/user/ibsp_mem.c b/branches/Ndi/ulp/wsd/user/ibsp_mem.c new file mode 100644 index 00000000..375bbe90 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibsp_mem.c @@ -0,0 +1,399 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* Registers a memory region */ +#include "ibspdebug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ibsp_mem.tmh" +#endif + +#include "ibspdll.h" + + +__forceinline boolean_t +__check_mr( + IN struct memory_reg *p_reg, + IN ib_access_t acl_mask, + IN void *start, + IN size_t len ) +{ + return( (p_reg->type.access_ctrl & acl_mask) == acl_mask && + start >= p_reg->type.vaddr && + ((uintn_t)start) + len <= + ((uintn_t)(uint64_t)p_reg->type.vaddr) + p_reg->type.length ); +} + + +/* Find the first registered mr that matches the given region. + * mem_list is either socket_info->buf_mem_list or socket_info->rdma_mem_list. + */ +struct memory_node * +lookup_partial_mr( + IN struct ibsp_socket_info *s, + IN ib_access_t acl_mask, + IN void *start, + IN size_t len ) +{ + struct memory_node *p_node; + cl_list_item_t *p_item; + + IBSP_ENTER( IBSP_DBG_MEM ); + + cl_spinlock_acquire( &s->port->hca->rdma_mem_list.mutex ); + + for( p_item = cl_qlist_head( &s->mr_list ); + p_item != cl_qlist_end( &s->mr_list ); + p_item = cl_qlist_next( p_item ) ) + { + p_node = PARENT_STRUCT( p_item, struct memory_node, socket_item ); + if(p_node->p_reg && + __check_mr( p_node->p_reg, acl_mask, start, len ) ) + { + cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex ); + IBSP_EXIT( IBSP_DBG_MEM ); + return p_node; + } + } + + cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex ); + + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION, IBSP_DBG_MEM, ("mr not found\n") ); + return NULL; +} + + +/* Registers a memory region. The memory region might be cached. + * mem_list is either socket_info->buf_mem_list or hca->rdma_mem_list. + */ +struct memory_node * +ibsp_reg_mem( + IN struct ibsp_socket_info *s, + IN ib_pd_handle_t pd, + IN void *start, + IN size_t len, + IN ib_access_t access_ctrl, + OUT LPINT lpErrno ) +{ + struct memory_node *p_node; + struct memory_reg *p_reg; + cl_list_item_t *p_item; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_MEM ); + + CL_ASSERT( start != NULL ); + CL_ASSERT( len != 0 ); + CL_ASSERT( (access_ctrl & ~(IB_AC_RDMA_READ | IB_AC_RDMA_WRITE | IB_AC_LOCAL_WRITE)) == + 0 ); + + /* Optimistically allocate a tracking structure. */ + p_node = HeapAlloc( g_ibsp.heap, 0, sizeof(struct memory_node) ); + if( !p_node ) + { + IBSP_ERROR_EXIT( ("AllocateOverlappedBuf:HeapAlloc() failed: %d\n", + GetLastError()) ); + *lpErrno = WSAENOBUFS; + return NULL; + } + + /* First, try to find a suitable MR */ + cl_spinlock_acquire( &s->port->hca->rdma_mem_list.mutex ); + + /* Find the first registered mr that matches the given region. */ + for( p_item = cl_qlist_head( &s->port->hca->rdma_mem_list.list ); + p_item != cl_qlist_end( &s->port->hca->rdma_mem_list.list ); + p_item = cl_qlist_next( p_item ) ) + { + p_reg = PARENT_STRUCT(p_item, struct memory_reg, item); + + if( __check_mr( p_reg, access_ctrl, start, len ) ) + { + p_node->p_reg = p_reg; + p_node->s = s; + cl_qlist_insert_tail( &p_reg->node_list, &p_node->mr_item ); + cl_qlist_insert_head( + &s->mr_list, &p_node->socket_item ); + cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex ); + IBSP_EXIT( IBSP_DBG_MEM ); + return p_node; + } + } + + /* No corresponding MR has been found. Create a new one. */ + p_reg = HeapAlloc( g_ibsp.heap, 0, sizeof(struct memory_reg) ); + + if( !p_reg ) + { + IBSP_ERROR_EXIT( ("AllocateOverlappedBuf:HeapAlloc() failed: %d\n", + GetLastError()) ); + cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex ); + HeapFree( g_ibsp.heap, 0, p_node ); + *lpErrno = WSAENOBUFS; + return NULL; + } + + /* The node is not initialized yet. All the parameters given are + * supposed to be valid so we don't check them. */ + cl_qlist_init( &p_reg->node_list ); + p_reg->type.vaddr = start; + p_reg->type.length = len; + p_reg->type.access_ctrl = access_ctrl; + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_MEM, ("pinning memory node %p\n", p_node) ); + status = ib_reg_mem( + pd, &p_reg->type, &p_reg->lkey, &p_reg->rkey, &p_reg->mr_handle ); + + if( status ) + { + cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex ); + HeapFree( g_ibsp.heap, 0, p_reg ); + HeapFree( g_ibsp.heap, 0, p_node ); + + IBSP_ERROR_EXIT( ("ib_reg_mem returned %s\n", ib_get_err_str(status)) ); + + *lpErrno = WSAEFAULT; + return NULL; + } + + STAT_INC( mr_num ); + + p_node->p_reg = p_reg; + p_node->s = s; + + /* Link to the list of nodes. */ + cl_qlist_insert_head( &s->port->hca->rdma_mem_list.list, &p_reg->item ); + cl_qlist_insert_head( &s->mr_list, &p_node->socket_item ); + cl_qlist_insert_tail( &p_reg->node_list, &p_node->mr_item ); + cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex ); + + IBSP_EXIT( IBSP_DBG_MEM ); + + *lpErrno = 0; + return p_node; +} + + +static inline int __ibsp_dereg_mem_mr( + IN struct memory_node *node ) +{ + IBSP_ENTER( IBSP_DBG_MEM ); + + // Underlying registration could be freed before the node. + if( node->p_reg ) + cl_qlist_remove_item( &node->p_reg->node_list, &node->mr_item ); + + cl_qlist_remove_item( &node->s->mr_list, &node->socket_item ); + + + memset(node,0x45,sizeof node); + HeapFree( g_ibsp.heap, 0, node ); + + IBSP_EXIT( IBSP_DBG_MEM ); + return 0; +} + + +/* Deregisters a memory region */ +int +ibsp_dereg_mem( + IN struct ibsp_socket_info *s, + IN struct memory_node *node, + OUT LPINT lpErrno ) +{ + IBSP_ENTER( IBSP_DBG_MEM ); + + cl_spinlock_acquire( &s->port->hca->rdma_mem_list.mutex ); + *lpErrno = __ibsp_dereg_mem_mr( node ); + cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex ); + + IBSP_EXIT( IBSP_DBG_MEM ); + return (*lpErrno? SOCKET_ERROR : 0); +} + + +/* + * Deregister the remaining memory regions on an HCA. This function should + * only be called before destroying the PD. In normal case, the list should + * be empty because the switch should have done it. + */ +void +ibsp_dereg_hca( + IN struct mr_list *mem_list ) +{ + cl_list_item_t *item; + cl_list_item_t *item1; + + IBSP_ENTER( IBSP_DBG_MEM ); + + cl_spinlock_acquire( &mem_list->mutex ); + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_MEM, + ("%Id registrations.\n", cl_qlist_count( &mem_list->list )) ); + + for( item = cl_qlist_remove_head( &mem_list->list ); + item != cl_qlist_end( &mem_list->list ); + item = cl_qlist_remove_head( &mem_list->list ) ) + { + struct memory_reg *p_reg = PARENT_STRUCT(item, struct memory_reg, item); + ib_api_status_t status; + + /* + * Clear the pointer from the node to this registration. No need + * to remove from the list as we're about to free the registration. + */ + for( item1 = cl_qlist_head( &p_reg->node_list ); + item1 != cl_qlist_end( &p_reg->node_list ); + item1 = cl_qlist_next( item1 ) ) + { + struct memory_node *p_node = + PARENT_STRUCT( item1, struct memory_node, mr_item ); + p_node->p_reg = NULL; + } + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_MEM, ("unpinning ,memory reg %p\n", p_reg) ); + status = ib_dereg_mr( p_reg->mr_handle ); + if( status ) + { + IBSP_ERROR( ( + "ib_dereg_mem returned %s\n", ib_get_err_str( status )) ); + } + else + { + STAT_DEC( mr_num ); + } + + HeapFree( g_ibsp.heap, 0, p_reg ); + } + + cl_spinlock_release( &mem_list->mutex ); + + IBSP_EXIT( IBSP_DBG_MEM ); +} + + +/* Deregister the remaining memory regions. This function should only + * be called when destroying the socket. In normal case, the list should + * be empty because the switch should have done it. */ +void +ibsp_dereg_socket( + IN struct ibsp_socket_info *s ) +{ + IBSP_ENTER( IBSP_DBG_MEM ); + + if( !s->port ) + { + CL_ASSERT( !cl_qlist_count( &s->mr_list ) ); + IBSP_EXIT( IBSP_DBG_MEM ); + return; + } + + cl_spinlock_acquire( &s->port->hca->rdma_mem_list.mutex ); + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_MEM, + ("%Id registrations.\n", cl_qlist_count( &s->mr_list )) ); + + while( cl_qlist_count( &s->mr_list ) ) + { + __ibsp_dereg_mem_mr( PARENT_STRUCT( cl_qlist_head( &s->mr_list ), + struct memory_node, socket_item) ); + } + + cl_spinlock_release( &s->port->hca->rdma_mem_list.mutex ); + + IBSP_EXIT( IBSP_DBG_MEM ); +} + + +/* + * Loop through all the memory registrations on an HCA and release + * all that fall within the specified range. + */ +void +ibsp_hca_flush_mr_cache( + IN struct ibsp_hca *p_hca, + IN LPVOID lpvAddress, + IN SIZE_T Size ) +{ + struct memory_reg *p_reg; + cl_list_item_t *p_item; + cl_list_item_t *p_item1; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_MEM ); + + cl_spinlock_acquire( &p_hca->rdma_mem_list.mutex ); + p_item = cl_qlist_head( &p_hca->rdma_mem_list.list ); + while( p_item != cl_qlist_end( &p_hca->rdma_mem_list.list ) ) + { + p_reg = PARENT_STRUCT( p_item, struct memory_reg, item ); + + /* Move to the next item now so we can remove the current. */ + p_item = cl_qlist_next( p_item ); + + if( lpvAddress > p_reg->type.vaddr || + ((uintn_t)lpvAddress) + Size < + ((uintn_t)(uint64_t)p_reg->type.vaddr) + p_reg->type.length ) + { + continue; + } + + /* + * Clear the pointer from all sockets' nodes to this registration. + * No need to remove from the list as we're about to free the + * registration. + */ + for( p_item1 = cl_qlist_head( &p_reg->node_list ); + p_item1 != cl_qlist_end( &p_reg->node_list ); + p_item1 = cl_qlist_next( p_item1 ) ) + { + struct memory_node *p_node = + PARENT_STRUCT( p_item1, struct memory_node, mr_item ); + + p_node->p_reg = NULL; + } + + cl_qlist_remove_item( &p_hca->rdma_mem_list.list, &p_reg->item ); + + status = ib_dereg_mr( p_reg->mr_handle ); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ( + "ib_dereg_mr returned %s\n", ib_get_err_str(status)) ); + } + + HeapFree( g_ibsp.heap, 0, p_reg ); + } + cl_spinlock_release( &p_hca->rdma_mem_list.mutex ); + + IBSP_EXIT( IBSP_DBG_MEM ); +} diff --git a/branches/Ndi/ulp/wsd/user/ibsp_mem.h b/branches/Ndi/ulp/wsd/user/ibsp_mem.h new file mode 100644 index 00000000..39861287 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibsp_mem.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ diff --git a/branches/Ndi/ulp/wsd/user/ibsp_mngt.c b/branches/Ndi/ulp/wsd/user/ibsp_mngt.c new file mode 100644 index 00000000..4e221ce1 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibsp_mngt.c @@ -0,0 +1,289 @@ +/* + * FileName: ibsp_mngt.c + * + * Copyright (c) + * + * Abstract: Hardware ressource management (HCA and ports). + * + * Author: + * + * Revision History: + * + */ + +#include "ibspdll.h" + +/* Build a list of IP addresses associated with a port */ +int +build_port_ip_list(IN struct ibsp_port *port) +{ + struct ibsp_ip_addr *ip_addr; + cl_list_item_t *item; + int ret; + + CL_ENTER(IBSP_DBG_HW, gdbg_lvl); + CL_TRACE(IBSP_DBG_HW, gdbg_lvl, + ("build_port_ip_list for port %UI64x\n", cl_ntoh64(port->guid))); + + cl_qlist_init(&port->ip_list); + + ret = query_ip_address(port, &port->ip_list); + if (ret) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, ("query_ip_address failed (%d)\n", ret)); + goto error; + } + + CL_EXIT(IBSP_DBG_HW, gdbg_lvl); + return 0; + + error: + /* Free the list */ + while((item = cl_qlist_remove_head(&port->ip_list)) != cl_qlist_end(&port->ip_list)) { + + ip_addr = PARENT_STRUCT(item, struct ibsp_ip_addr, item); + + HeapFree(g_ibsp.heap, 0, ip_addr); + } + + CL_EXIT_ERROR(IBSP_DBG_HW, gdbg_lvl, + ("Failed to build list of IP addr for port %016UI64x\n", + CL_HTON64(port->guid))); + + return 1; +} + +/* Get the info from a port. Link it to the parent HCA. */ +int +build_port_info(IN struct ibsp_hca *hca, + IN ib_net64_t port_guid, + IN uint8_t port_num, OUT struct ibsp_port **port_out) +{ + int ret; + struct ibsp_port *port; + cl_list_item_t *item_ip; + + CL_ENTER(IBSP_DBG_HW, gdbg_lvl); + + port = HeapAlloc(g_ibsp.heap, HEAP_ZERO_MEMORY, sizeof(struct ibsp_port)); + + if (port == NULL) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, + ("HeapAlloc failed (%d)\n", sizeof(struct ibsp_port))); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + port->guid = port_guid; + port->port_num = port_num; + port->hca = hca; + + ret = build_port_ip_list(port); + + if (ret) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, ("build_port_ip_list failed (%d)\n", ret)); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + /* Insert the new list of IP into the global list of IP addresses. */ + for(item_ip = cl_qlist_head(&port->ip_list); + item_ip != cl_qlist_end(&port->ip_list); item_ip = cl_qlist_next(item_ip)) { + + struct ibsp_ip_addr *ip = PARENT_STRUCT(item_ip, struct ibsp_ip_addr, item); + + cl_qlist_insert_tail(&g_ibsp.ip_list, &ip->item_global); + } + + *port_out = port; + + CL_EXIT(IBSP_DBG_HW, gdbg_lvl); + + ret = 0; + + done: + if (ret) { + HeapFree(g_ibsp.heap, 0, port); + } + + CL_EXIT(IBSP_DBG_HW, gdbg_lvl); + + return ret; +} + +/* Open and query the HCA for its ports */ +int +build_hca_info(IN ib_net64_t hca_guid, OUT struct ibsp_hca **hca_out) +{ + struct ibsp_hca *hca = NULL; + ib_ca_attr_t *ca_attr = NULL; + size_t ca_attr_size = 0; + uint8_t port_num; + int ret; + ib_api_status_t status; + + CL_ENTER(IBSP_DBG_HW, gdbg_lvl); + + hca = HeapAlloc(g_ibsp.heap, HEAP_ZERO_MEMORY, sizeof(struct ibsp_hca)); + if (hca == NULL) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, + ("can't get enough memory (%d)\n", sizeof(struct ibsp_hca))); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + hca->guid = hca_guid; + cl_qlist_init(&hca->ports_list); + + status = ib_open_ca(g_ibsp.al_handle, hca->guid, NULL, /* event handler */ + NULL, /* context */ + &hca->hca_handle); + + if (status != IB_SUCCESS) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, ("ib_open_ca failed (%d)\n", status)); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + /* Build the list of ports of each HCAs */ + query_ca_again: + status = ib_query_ca(hca->hca_handle, ca_attr, &ca_attr_size); + + if (status == IB_INSUFFICIENT_MEMORY) { + + CL_TRACE(IBSP_DBG_HW, gdbg_lvl, ("ib_query_ca needs %d bytes\n", ca_attr_size)); + + /* Allocate more memory */ + if (ca_attr) { + HeapFree(g_ibsp.heap, 0, ca_attr); + } + + ca_attr = HeapAlloc(g_ibsp.heap, 0, ca_attr_size); + + if (ca_attr) + goto query_ca_again; + else { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, ("HeapAlloc failed\n")); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + } else if (status != IB_SUCCESS) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, ("ib_query_ca failed (%d)\n", status)); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + CL_TRACE(IBSP_DBG_HW, gdbg_lvl, ("found %d port on that HCA\n", ca_attr->num_ports)); + + for(port_num = 0; port_num < ca_attr->num_ports; port_num++) { + struct ibsp_port *port; + + ret = build_port_info(hca, ca_attr->p_port_attr[port_num].port_guid, port_num + 1, /* TODO: correct or should query port info? */ + &port); + if (ret) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, ("build_port_info failed (%d)\n", ret)); + goto done; + } + + cl_qlist_insert_tail(&hca->ports_list, &port->item); + } + + *hca_out = hca; + + ret = 0; + + done: + if (ca_attr) { + HeapFree(g_ibsp.heap, 0, ca_attr); + } + + if (ret) { + if (hca) { + + if (hca->hca_handle) { + status = ib_close_ca(hca->hca_handle, NULL); + + if (status != IB_SUCCESS) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, + ("ib_close_ca failed (%d)\n", status)); + } + } + + HeapFree(g_ibsp.heap, 0, hca); + } + } + + CL_TRACE_EXIT(IBSP_DBG_HW, gdbg_lvl, ("return code is %d\n", ret)); + + return ret; +} + +/* Build the HCA tree. This allows for hotplug. Each HCA is + * discovered, as well as each ports. */ +int +build_hca_tree(void) +{ + ib_net64_t *guid_list = NULL; + ib_api_status_t status; + int ret; + unsigned int hca_num; + size_t adapter_count; + + CL_ENTER(IBSP_DBG_HW, gdbg_lvl); + + /* Get the GUIDS of the adapters, so we can open them */ + status = ib_get_ca_guids(g_ibsp.al_handle, NULL, &adapter_count); + if (status != IB_INSUFFICIENT_MEMORY) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, ("first ib_get_ca_guids failed\n")); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + /* Make sure we have a reasonable number of HCAs */ + CL_ASSERT(adapter_count < 10); + + guid_list = HeapAlloc(g_ibsp.heap, 0, sizeof(ib_net64_t) * adapter_count); + if (guid_list == NULL) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, + ("can't get enough memory (%d, %d)\n", sizeof(ib_net64_t), + adapter_count)); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + status = ib_get_ca_guids(g_ibsp.al_handle, guid_list, &adapter_count); + if (status != IB_SUCCESS) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, ("second ib_get_ca_guids failed (%d)\n", status)); + ret = WSAEPROVIDERFAILEDINIT; + goto done; + } + + CL_TRACE(IBSP_DBG_HW, gdbg_lvl, ("got %d adapter guid(s)\n", adapter_count)); + + for(hca_num = 0; hca_num < adapter_count; hca_num++) { + + struct ibsp_hca *hca; + + ret = build_hca_info(guid_list[hca_num], &hca); + if (ret) { + CL_ERROR(IBSP_DBG_HW, gdbg_lvl, ("build_hca_info failed (%d)\n", ret)); + goto done; + } + + cl_qlist_insert_tail(&g_ibsp.hca_list, &hca->item); + } + + CL_ASSERT(adapter_count == cl_qlist_count(&g_ibsp.hca_list)); + + CL_EXIT(IBSP_DBG_HW, gdbg_lvl); + + ret = 0; + + done: + if (guid_list) { + HeapFree(g_ibsp.heap, 0, guid_list); + } + + CL_EXIT(IBSP_DBG_HW, gdbg_lvl); + + return ret; +} diff --git a/branches/Ndi/ulp/wsd/user/ibsp_perfmon.c b/branches/Ndi/ulp/wsd/user/ibsp_perfmon.c new file mode 100644 index 00000000..2b0bdcc1 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibsp_perfmon.c @@ -0,0 +1,560 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include "ibspdebug.h" +#if defined(EVENT_TRACING) +#include "ibsp_perfmon.tmh" +#endif + +#include +#include "ibspdll.h" +#include "ibsp_perfmon.h" + + +struct _ibsp_pm_definition g_ibsp_pm_def; /* IB WSD performance object */ + + +void +IBSPPmInit( void ) +{ + HANDLE h_mapping; + BOOL just_created; + SECURITY_ATTRIBUTES sec_attr; + + IBSP_ENTER( IBSP_DBG_PERFMON ); + + g_pm_stat.idx = INVALID_IDX; + g_pm_stat.p_shmem = NULL; + + sec_attr.nLength = sizeof(SECURITY_ATTRIBUTES); + sec_attr.bInheritHandle = FALSE; + + if( !ConvertStringSecurityDescriptorToSecurityDescriptor( + IBSP_PM_SEC_STRING, SDDL_REVISION_1, + &(sec_attr.lpSecurityDescriptor), NULL ) ) + { + IBSP_ERROR( ("SecurityDescriptor error %d\n", GetLastError()) ); + return; + } + + h_mapping = CreateFileMapping( + INVALID_HANDLE_VALUE, // use paging file + &sec_attr, // security attributes + PAGE_READWRITE, // read/write access + 0, // size: high 32-bits + sizeof(pm_shmem_t), // size: low 32-bits + IBSP_PM_MAPPED_OBJ_NAME ); + + just_created = (GetLastError() != ERROR_ALREADY_EXISTS); + + LocalFree( sec_attr.lpSecurityDescriptor ); + + if( h_mapping == NULL ) + { + IBSP_ERROR_EXIT( ("CreateFileMapping error %d\n", GetLastError()) ); + return; + } + + /* Get a pointer to the shared memory. */ + g_pm_stat.p_shmem = MapViewOfFile( + h_mapping, // object handle + FILE_MAP_ALL_ACCESS, + 0, // high offset: map from + 0, // low offset: beginning + 0); // num bytes to map + + /* Now that we have the view mapped, we don't need the mapping handle. */ + g_pm_stat.h_mapping = h_mapping; + + if( g_pm_stat.p_shmem == NULL ) + { + IBSP_ERROR( ("MapViewOfFile returned %d\n", GetLastError()) ); + return; + } + + if( just_created ) + { + /* + * Reserve instance 0 for fallback counters + * Apps that can't get a dedicated slot will share this one. + */ + wcscpy( g_pm_stat.p_shmem->obj[0].app_name, + IBSP_PM_TOTAL_COUNTER_NAME ); + g_pm_stat.p_shmem->obj[0].taken = 1; + } + + IBSP_EXIT( IBSP_DBG_PERFMON ); +} + + +/* + * We always get a slot - either an individual one, or fall back on the + * common one. + */ +void +IBSPPmGetSlot( void ) +{ + WCHAR mod_path[MAX_PATH]; + WCHAR* buf; + int idx; + size_t name_len; + WCHAR id_str[12]; + mem_obj_t *p_slot; + pm_shmem_t* p_mem = g_pm_stat.p_shmem; + + IBSP_ENTER( IBSP_DBG_PERFMON ); + + if( g_pm_stat.p_shmem == NULL ) + { + g_pm_stat.pdata = g_pm_stat.fall_back_data; + return; + } + + GetModuleFileNameW( NULL, mod_path, MAX_PATH ); + + buf = wcsrchr( mod_path, L'\\' ); + if( !buf ) + buf = mod_path; + else + buf++; + + /* The max length is 11, one for the ':', and 10 for the process ID. */ + id_str[0] = ':'; + _ultow( GetCurrentProcessId(), &id_str[1], 10 ); + + /* Cap the length of the application. */ + name_len = min( wcslen( buf ), + IBSP_PM_APP_NAME_SIZE - 1 - wcslen( id_str ) ); + + /* instance 0 is taken for "Total" counters, so don't try it */ + for( idx = 1; idx < IBSP_PM_NUM_INSTANCES; idx++) + { + /* Compare with 0, exchange with 1 */ + if( InterlockedCompareExchange( + &g_pm_stat.p_shmem->obj[idx].taken, 1, 0 ) ) + { + continue; + } + + p_slot = &g_pm_stat.p_shmem->obj[idx]; + + /* Copy the app name. */ + CopyMemory( p_slot->app_name, buf, name_len * sizeof(WCHAR) ); + CopyMemory( &p_slot->app_name[name_len], id_str, + (wcslen( id_str ) + 1) * sizeof(WCHAR) ); + + g_pm_stat.idx = idx; + g_pm_stat.pdata = g_pm_stat.p_shmem->obj[idx].data; + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_PERFMON, + ("%S got slot %d\n", p_slot->app_name, idx) ); + break; + } + + if( idx == IBSP_PM_NUM_INSTANCES ) + { + /* + * Assign "Total" slot for this process to avoid loosing precious + * statistic. Keep saved idx INVALID so data won't be flushed during + * process closeout. + */ + g_pm_stat.pdata = p_mem->obj[0].data; + } + + IBSP_EXIT( IBSP_DBG_PERFMON ); +} + + +void +IBSPPmReleaseSlot( void ) +{ + mem_obj_t *p_slot; + int idx; + + /* perfmon never get registered itself in shared mem buffer */ + if ( g_pm_stat.idx == INVALID_IDX ) + return; + + if( g_pm_stat.p_shmem == NULL ) + return; + + p_slot = &g_pm_stat.p_shmem->obj[g_pm_stat.idx]; + + /* Add all the data to the "Total" bin (0) */ + for( idx = 0; idx < IBSP_PM_NUM_COUNTERS; idx++ ) + { + InterlockedExchangeAdd64( &g_pm_stat.p_shmem->obj[0].data[idx], + InterlockedExchange64( &g_pm_stat.pdata[idx], 0 ) ); + } + ZeroMemory( p_slot->app_name, sizeof(p_slot->app_name) ); + InterlockedExchange( &p_slot->taken, 0 ); + + g_pm_stat.idx = INVALID_IDX; + + IBSP_EXIT( IBSP_DBG_PERFMON ); +} + + +static BOOL +__PmIsQuerySupported( + IN WCHAR* p_query_str ) +{ + if( p_query_str == NULL ) + return TRUE; + + if( *p_query_str == 0 ) + return TRUE; + + if( wcsstr( p_query_str, L"Global" ) != NULL ) + return TRUE; + + if( wcsstr( p_query_str, L"Foreign" ) != NULL ) + return FALSE; + + if( wcsstr( p_query_str, L"Costly" ) != NULL ) + return FALSE; + + else + return TRUE; +} + + +/* + * http://msdn.microsoft.com/library/en-us/perfctrs/perf/openperformancedata.asp + */ +DWORD APIENTRY +IBSPPmOpen( + IN LPWSTR lpDeviceNames ) +{ + DWORD status = ERROR_SUCCESS; + HKEY pm_hkey = NULL; + DWORD data_size; + DWORD data_type; + DWORD first_counter = 0; + DWORD first_help = 0; + int num = 0; + int num_offset; + + IBSP_ENTER( IBSP_DBG_PERFMON ); + + UNUSED_PARAM(lpDeviceNames); + + if( g_pm_stat.threads++ ) + { + IBSP_EXIT( IBSP_DBG_PERFMON ); + return ERROR_SUCCESS; + } + + /* open Registry and query for the first and last keys */ + status = RegOpenKeyEx( HKEY_LOCAL_MACHINE, + IBSP_PM_REGISTRY_PATH IBSP_PM_SUBKEY_PERF, + 0L, KEY_READ, &pm_hkey); + + if( status != ERROR_SUCCESS ) + { + g_pm_stat.threads--; + IBSP_ERROR_EXIT( ("RegOpenKeyEx for perf information returned %d.\n", status) ); + return status; + } + + data_size = sizeof(DWORD); + status = RegQueryValueEx( pm_hkey, "First Counter", 0L, + &data_type, (LPBYTE)&first_counter, &data_size ); + + if( status != ERROR_SUCCESS ) + { + RegCloseKey(pm_hkey); + g_pm_stat.threads--; + IBSP_ERROR_EXIT( ("RegQueryValueEx for \"First Counter\" returned %d.\n", status) ); + return status; + } + + data_size = sizeof(DWORD); + status = RegQueryValueEx( pm_hkey, "First Help", 0L, + &data_type, (LPBYTE)&first_help, &data_size ); + + RegCloseKey( pm_hkey ); + + if( status != ERROR_SUCCESS ) + { + g_pm_stat.threads--; + IBSP_ERROR_EXIT( ("RegQueryValueEx for \"First Help\" returned %d.\n", status) ); + return status; + } + + /* perf_obj */ + g_ibsp_pm_def.perf_obj.ObjectNameTitleIndex = IBSP_PM_OBJ + first_counter; + g_ibsp_pm_def.perf_obj.ObjectHelpTitleIndex = IBSP_PM_OBJ + first_help; + g_ibsp_pm_def.perf_obj.TotalByteLength = + sizeof(ibsp_pm_definition_t) + sizeof(ibsp_pm_counters_t); + g_ibsp_pm_def.perf_obj.DefinitionLength = sizeof(ibsp_pm_definition_t); + g_ibsp_pm_def.perf_obj.HeaderLength = sizeof(PERF_OBJECT_TYPE); + + g_ibsp_pm_def.perf_obj.ObjectNameTitle = 0; + g_ibsp_pm_def.perf_obj.ObjectHelpTitle = 0; + + g_ibsp_pm_def.perf_obj.DetailLevel = PERF_DETAIL_NOVICE; + g_ibsp_pm_def.perf_obj.NumCounters = IBSP_PM_NUM_COUNTERS; + g_ibsp_pm_def.perf_obj.DefaultCounter = 0; + g_ibsp_pm_def.perf_obj.NumInstances = 0; + g_ibsp_pm_def.perf_obj.CodePage = 0; + + QueryPerformanceFrequency( &g_ibsp_pm_def.perf_obj.PerfFreq ); + + /* initialize all counter definitions */ + num_offset = IBSP_PM_OBJ + 2; + for ( num = 0; num < IBSP_PM_NUM_COUNTERS ; num++, num_offset += 2) + { + g_ibsp_pm_def.counter[num].CounterNameTitleIndex = num_offset + first_counter; + g_ibsp_pm_def.counter[num].CounterHelpTitleIndex = num_offset + first_help; + g_ibsp_pm_def.counter[num].ByteLength = sizeof(PERF_COUNTER_DEFINITION); + g_ibsp_pm_def.counter[num].CounterNameTitle = 0; + g_ibsp_pm_def.counter[num].CounterHelpTitle = 0; + g_ibsp_pm_def.counter[num].DefaultScale = 0; + g_ibsp_pm_def.counter[num].DetailLevel = PERF_DETAIL_NOVICE; + g_ibsp_pm_def.counter[num].CounterType = PERF_COUNTER_BULK_COUNT; + /* All counters should be kept to 64-bits for consistency and simplicity. */ + g_ibsp_pm_def.counter[num].CounterSize = sizeof(LONG64); + g_ibsp_pm_def.counter[num].CounterOffset = + (DWORD)offsetof( ibsp_pm_counters_t, data[num] ); + } + + g_pm_stat.h_evlog = RegisterEventSource( NULL, IBSP_PM_SUBKEY_NAME ); + if( !g_pm_stat.h_evlog ) + { + g_pm_stat.threads--; + status = GetLastError(); + IBSP_ERROR_EXIT( ("RegisterEventSource failed with %d\n", status) ); + return status; + } + + IBSP_EXIT( IBSP_DBG_PERFMON ); + return ERROR_SUCCESS; +} + + +/* + * http://msdn.microsoft.com/library/en-us/perfctrs/perf/closeperformancedata.asp + */ +DWORD APIENTRY +IBSPPmClose( void ) +{ + BOOL status; + + IBSP_ENTER( IBSP_DBG_PERFMON ); + + if( --g_pm_stat.threads ) + { + IBSP_EXIT( IBSP_DBG_PERFMON ); + return ERROR_SUCCESS; + } + + IBSPPmReleaseSlot(); + + /* avoid double closing */ + if( g_pm_stat.p_shmem != NULL ) + { + status = UnmapViewOfFile( g_pm_stat.p_shmem ); + g_pm_stat.p_shmem = NULL; + } + + if( g_pm_stat.h_evlog != NULL ) + { + DeregisterEventSource( g_pm_stat.h_evlog ); + g_pm_stat.h_evlog = NULL; + } + + IBSP_EXIT( IBSP_DBG_PERFMON ); + return ERROR_SUCCESS; +} + + + +/* + * http://msdn.microsoft.com/library/en-us/perfctrs/perf/collectperformancedata.asp + */ +DWORD WINAPI +IBSPPmCollectData( + IN LPWSTR lpValueName, + IN OUT LPVOID* lppData, + IN OUT LPDWORD lpcbTotalBytes, + IN OUT LPDWORD lpNumObjectTypes ) +{ + int32_t sh_num; + int32_t num_instances, max_instances; + uint32_t use_bytes; + ibsp_pm_definition_t *p_obj_def; + ibsp_pm_counters_t *p_count_def; + PERF_INSTANCE_DEFINITION *p_inst_def; + pm_shmem_t *p_mem; + LONG64 total_data[IBSP_PM_NUM_COUNTERS]; + + IBSP_ENTER( IBSP_DBG_PERFMON ); + + p_mem = (pm_shmem_t * __ptr64 )g_pm_stat.p_shmem; + + if( p_mem == NULL ) + { + IBSP_ERROR( ("No shared memory object\n") ); + goto done; + } + + if( !__PmIsQuerySupported(lpValueName ) ) + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_PERFMON, ("Unsupported query\n") ); + goto done; + } + + if( !g_pm_stat.threads ) + { + IBSP_ERROR( ("Initialization was not completed\n") ); +done: + *lpcbTotalBytes = 0; + *lpNumObjectTypes = 0; + + IBSP_EXIT( IBSP_DBG_PERFMON ); + return ERROR_SUCCESS; + } + + ZeroMemory( &total_data, sizeof(total_data) ); + num_instances = 0; + /* sum total counters that were not filled in completion routine */ + for( sh_num = 0; sh_num < IBSP_PM_NUM_INSTANCES; sh_num++ ) + { + if( !InterlockedCompareExchange( &p_mem->obj[sh_num].taken, 1, 1 ) ) + continue; + + total_data[BYTES_SEND] += p_mem->obj[sh_num].data[BYTES_SEND]; + total_data[BYTES_RECV] += p_mem->obj[sh_num].data[BYTES_RECV]; + total_data[BYTES_WRITE] += p_mem->obj[sh_num].data[BYTES_WRITE]; + total_data[BYTES_READ] += p_mem->obj[sh_num].data[BYTES_READ]; + /* Update total for current slot. */ + p_mem->obj[sh_num].data[BYTES_TOTAL] = + p_mem->obj[sh_num].data[BYTES_SEND] + + p_mem->obj[sh_num].data[BYTES_RECV] + + p_mem->obj[sh_num].data[BYTES_WRITE] + + p_mem->obj[sh_num].data[BYTES_READ]; + total_data[BYTES_TOTAL] += p_mem->obj[sh_num].data[BYTES_TOTAL]; + total_data[COMP_SEND] += p_mem->obj[sh_num].data[COMP_SEND]; + total_data[COMP_RECV] += p_mem->obj[sh_num].data[COMP_RECV]; + total_data[COMP_TOTAL] += p_mem->obj[sh_num].data[COMP_TOTAL]; + total_data[INTR_TOTAL] += p_mem->obj[sh_num].data[INTR_TOTAL]; + + num_instances++; + } + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_PERFMON, ("%d instances.\n", num_instances) ); + + /* calc buffer size required for data return */ + use_bytes = sizeof(ibsp_pm_definition_t) + \ + (sizeof(PERF_INSTANCE_DEFINITION) + \ + sizeof(ibsp_pm_counters_t) + \ + (sizeof(WCHAR) * IBSP_PM_APP_NAME_SIZE)) * num_instances; + + if( *lpcbTotalBytes < use_bytes ) + { + *lpcbTotalBytes = 0; + *lpNumObjectTypes = 0; + return ERROR_MORE_DATA; + } + + p_obj_def = (ibsp_pm_definition_t*)*lppData; + use_bytes = sizeof(ibsp_pm_definition_t); + + /* Copy counter definition */ + CopyMemory( p_obj_def, &g_ibsp_pm_def, sizeof(ibsp_pm_definition_t) ); + + p_obj_def->perf_obj.NumInstances = num_instances; + QueryPerformanceCounter( &p_obj_def->perf_obj.PerfTime ); + + max_instances = num_instances; + + /* Assign pointers for the first instance */ + p_inst_def = (PERF_INSTANCE_DEFINITION*)(p_obj_def + 1); + + for( sh_num = 0; sh_num < IBSP_PM_NUM_INSTANCES; sh_num++ ) + { + if( !InterlockedCompareExchange( &p_mem->obj[sh_num].taken, 1, 1 ) ) + continue; + + /* Make sure we don't overrun the buffer! */ + if( max_instances-- == 0 ) + break; + + p_inst_def->ByteLength = sizeof(PERF_INSTANCE_DEFINITION) + + (sizeof(WCHAR) * IBSP_PM_APP_NAME_SIZE); + p_inst_def->ParentObjectTitleIndex = 0; + p_inst_def->ParentObjectInstance = 0; + p_inst_def->UniqueID = -1; /* using module names */ + p_inst_def->NameOffset = sizeof(PERF_INSTANCE_DEFINITION); + + /* Length in bytes of Unicode name string, including terminating NULL */ + p_inst_def->NameLength = + (DWORD)wcslen( p_mem->obj[sh_num].app_name ) + 1; + p_inst_def->NameLength *= sizeof(WCHAR); + + CopyMemory( (WCHAR*)(p_inst_def + 1), + p_mem->obj[sh_num].app_name, p_inst_def->NameLength ); + + use_bytes += p_inst_def->ByteLength; + + /* advance to counter definition */ + p_count_def = (ibsp_pm_counters_t*) + (((BYTE*)p_inst_def) + p_inst_def->ByteLength); + + p_count_def->pm_block.ByteLength = sizeof(ibsp_pm_counters_t); + use_bytes += sizeof(ibsp_pm_counters_t); + + /* Here we report actual counter values. */ + if( sh_num == 0 ) + { + CopyMemory( p_count_def->data, total_data, sizeof(total_data) ); + } + else + { + CopyMemory( p_count_def->data, p_mem->obj[sh_num].data, + sizeof(p_mem->obj[sh_num].data) ); + } + + /* Advance pointers for the next instance definition */ + p_inst_def = (PERF_INSTANCE_DEFINITION*)(p_count_def + 1); + } + + p_obj_def->perf_obj.TotalByteLength = (DWORD)use_bytes; + + *lppData = ((BYTE*)*lppData) + use_bytes; + *lpNumObjectTypes = 1; + *lpcbTotalBytes = (DWORD)use_bytes; + + IBSP_EXIT( IBSP_DBG_PERFMON ); + return ERROR_SUCCESS; +} diff --git a/branches/Ndi/ulp/wsd/user/ibsp_perfmon.h b/branches/Ndi/ulp/wsd/user/ibsp_perfmon.h new file mode 100644 index 00000000..d3098689 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibsp_perfmon.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _IBSP_PERFMON_H_ +#define _IBSP_PERFMON_H_ + + +#include +#include +#include "wsd/ibsp_regpath.h" + + +/* invalid instance index value to initialize */ +#define INVALID_IDX 0xffffffff + +#define IBSP_PM_SEC_STRING \ + TEXT("D:(A;CIOI;GAFA;;;WD)") /* SDDL_EVERYONE */ + +#define IBSP_PM_NUM_OBJECT_TYPES 1 +#define IBSP_PM_NUM_INSTANCES 100 /* how many processes we can handle */ + +#define IBSP_PM_APP_NAME_SIZE 24 /* Must be multiple of 8 */ +#define IBSP_PM_TOTAL_COUNTER_NAME L"_Total" +#define IBSP_PM_MAPPED_OBJ_NAME TEXT("Global\\ibwsd_perfmon_data") + + +/* Structures used to report counter information to perfmon. */ +typedef struct _ibsp_pm_definition +{ + PERF_OBJECT_TYPE perf_obj; + PERF_COUNTER_DEFINITION counter[IBSP_PM_NUM_COUNTERS]; + +} ibsp_pm_definition_t; + +typedef struct _ibsp_pm_counters +{ + PERF_COUNTER_BLOCK pm_block; + LONG64 data[IBSP_PM_NUM_COUNTERS]; + +} ibsp_pm_counters_t; + + +/* Structures used to manage counters internally */ +typedef struct _mem_obj +{ + volatile LONG taken; + WCHAR app_name[IBSP_PM_APP_NAME_SIZE]; + LONG64 data[IBSP_PM_NUM_COUNTERS]; + +} mem_obj_t; + +typedef struct _pm_shmem +{ + mem_obj_t obj[IBSP_PM_NUM_INSTANCES]; + +} pm_shmem_t; + + +/* global data for every process linked to this DLL */ +struct _pm_stat +{ + struct _pm_shmem* p_shmem; /* base pointer to shared memory for this process */ + volatile LONG64* pdata; /* pointer to data collected */ + HANDLE h_mapping; + HANDLE h_evlog; /* event log handle */ + DWORD threads; /* number of threads open */ + DWORD idx; /* slot index assigned for this process */ + LONG64 fall_back_data[IBSP_PM_NUM_COUNTERS]; + +} g_pm_stat; + + +void +IBSPPmInit( void ); + +DWORD APIENTRY +IBSPPmClose( void ); + +void +IBSPPmGetSlot( void ); + +void +IBSPPmReleaseSlot( void ); + +#endif /* _IBSP_PERFMON_H_ */ diff --git a/branches/Ndi/ulp/wsd/user/ibsp_pnp.c b/branches/Ndi/ulp/wsd/user/ibsp_pnp.c new file mode 100644 index 00000000..72593bb1 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibsp_pnp.c @@ -0,0 +1,442 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* TODO: right now, hotplug is not supported. */ + +#include "ibspdebug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "ibsp_pnp.tmh" +#endif + +#include "ibspdll.h" + + +static void pnp_port_remove( + IN struct ibsp_port* const port ); + + +/* Find a HCA in the list based on its GUID */ +static struct ibsp_hca * +lookup_hca( + ib_net64_t ca_guid ) +{ + cl_list_item_t *item; + + cl_spinlock_acquire( &g_ibsp.hca_mutex ); + + for( item = cl_qlist_head( &g_ibsp.hca_list ); + item != cl_qlist_end( &g_ibsp.hca_list ); + item = cl_qlist_next( item ) ) + { + struct ibsp_hca *hca = PARENT_STRUCT(item, struct ibsp_hca, item); + if( hca->guid == ca_guid ) + { + /* Found */ + cl_spinlock_release( &g_ibsp.hca_mutex ); + return hca; + } + } + + cl_spinlock_release( &g_ibsp.hca_mutex ); + + return NULL; +} + + +/* Add a new adapter */ +ib_api_status_t +pnp_ca_add( + IN ib_pnp_ca_rec_t* const p_ca_rec ) +{ + struct ibsp_hca *hca; + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_HW ); + + hca = HeapAlloc( g_ibsp.heap, HEAP_ZERO_MEMORY, sizeof(struct ibsp_hca) ); + if( hca == NULL ) + { + IBSP_ERROR( ("can't get enough memory (%d)\n", sizeof(struct ibsp_hca)) ); + status = IB_INSUFFICIENT_MEMORY; + goto pnp_ca_add_err1; + } + + hca->guid = p_ca_rec->p_ca_attr->ca_guid; + hca->dev_id = p_ca_rec->p_ca_attr->dev_id; + cl_qlist_init( &hca->port_list ); + cl_spinlock_init( &hca->port_lock ); + cl_qlist_init( &hca->rdma_mem_list.list ); + cl_spinlock_init( &hca->rdma_mem_list.mutex ); + cl_spinlock_init( &hca->cq_lock ); + + /* HCA handle */ + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, + ("handle is %p %016I64x\n", g_ibsp.al_handle, hca->guid) ); + status = + ib_open_ca( g_ibsp.al_handle, hca->guid, NULL, hca, &hca->hca_handle ); + + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("ib_open_ca failed (%d)\n", status) ); + goto pnp_ca_add_err2; + } + + STAT_INC( ca_num ); + + /* Protection domain for the HCA */ + status = ib_alloc_pd( hca->hca_handle, IB_PDT_NORMAL, hca, &hca->pd ); + if( status == IB_SUCCESS ) + { + STAT_INC( pd_num ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_EP, ("allocated PD %p for HCA\n", hca->pd) ); + + /* Success */ + cl_spinlock_acquire( &g_ibsp.hca_mutex ); + cl_qlist_insert_tail( &g_ibsp.hca_list, &hca->item ); + cl_spinlock_release( &g_ibsp.hca_mutex ); + + p_ca_rec->pnp_rec.context = hca; + } + else + { + IBSP_ERROR( ("ib_alloc_pd failed (%d)\n", status) ); + if( ib_close_ca( hca->hca_handle, NULL ) == IB_SUCCESS ) + STAT_DEC( ca_num ); + +pnp_ca_add_err2: + HeapFree( g_ibsp.heap, 0, hca ); + + } +pnp_ca_add_err1: + + IBSP_EXIT( IBSP_DBG_HW ); + return status; +} + + +/* Remove an adapter and its ports. */ +void +pnp_ca_remove( + struct ibsp_hca *hca ) +{ + ib_api_status_t status; + cl_list_item_t *p_item; + struct cq_thread_info *p_cq_tinfo; + + IBSP_ENTER( IBSP_DBG_HW ); + + /* + * Remove all the ports + */ + cl_spinlock_acquire( &hca->port_lock ); + while( cl_qlist_count( &hca->port_list ) ) + { + p_item = cl_qlist_remove_head( &hca->port_list ); + + HeapFree( g_ibsp.heap, 0, + PARENT_STRUCT(p_item, struct ibsp_port, item) ); + } + cl_spinlock_release( &hca->port_lock ); + + cl_spinlock_acquire( &hca->cq_lock ); + while( hca->cq_tinfo ) + { + p_cq_tinfo = hca->cq_tinfo; + + hca->cq_tinfo = PARENT_STRUCT( + cl_qlist_next( &hca->cq_tinfo->list_item ), + struct cq_thread_info, list_item ); + + __cl_primitive_remove( &p_cq_tinfo->list_item ); + + if( hca->cq_tinfo == p_cq_tinfo ) + break; + + cl_spinlock_release( &hca->cq_lock ); + ib_destroy_cq_tinfo( p_cq_tinfo ); + cl_spinlock_acquire( &hca->cq_lock ); + } + cl_spinlock_release( &hca->cq_lock ); + + if( hca->pd ) + { + ibsp_dereg_hca( &hca->rdma_mem_list ); + + /* + * No need to wait for PD destruction - CA destruction will block + * until all child resources are released. + */ + status = ib_dealloc_pd( hca->pd, NULL ); + if( status ) + { + IBSP_ERROR( ("ib_dealloc_pd failed (%d)\n", status) ); + } + else + { + STAT_DEC( pd_num ); + } + hca->pd = NULL; + } + + if( hca->hca_handle ) + { + status = ib_close_ca( hca->hca_handle, ib_sync_destroy ); + if( status != IB_SUCCESS ) + IBSP_ERROR( ("ib_close_ca failed (%d)\n", status) ); + + hca->hca_handle = NULL; + } + + /* Remove the HCA from the HCA list and free it. */ + cl_spinlock_acquire( &g_ibsp.hca_mutex ); + cl_qlist_remove_item( &g_ibsp.hca_list, &hca->item ); + cl_spinlock_release( &g_ibsp.hca_mutex ); + + cl_spinlock_destroy( &hca->port_lock ); + cl_spinlock_destroy( &hca->rdma_mem_list.mutex ); + + cl_spinlock_destroy( &hca->cq_lock ); + + HeapFree( g_ibsp.heap, 0, hca ); + + IBSP_EXIT( IBSP_DBG_HW ); +} + + +/* Add a new port to an adapter */ +static ib_api_status_t +pnp_port_add( + IN OUT ib_pnp_port_rec_t* const p_port_rec ) +{ + struct ibsp_hca *hca; + struct ibsp_port *port; + + IBSP_ENTER( IBSP_DBG_HW ); + + hca = lookup_hca( p_port_rec->p_ca_attr->ca_guid ); + if( !hca ) + { + IBSP_ERROR( ("Failed to lookup HCA (%016I64x) for new port (%016I64x)\n", + p_port_rec->p_ca_attr->ca_guid, p_port_rec->p_port_attr->port_guid) ); + IBSP_EXIT( IBSP_DBG_HW ); + return IB_INVALID_GUID; + } + + port = HeapAlloc( g_ibsp.heap, HEAP_ZERO_MEMORY, sizeof(struct ibsp_port) ); + if( port == NULL ) + { + IBSP_ERROR( ("HeapAlloc failed (%d)\n", sizeof(struct ibsp_port)) ); + IBSP_EXIT( IBSP_DBG_HW ); + return IB_INSUFFICIENT_MEMORY; + } + + port->guid = p_port_rec->p_port_attr->port_guid; + port->port_num = p_port_rec->p_port_attr->port_num; + port->hca = hca; + + cl_spinlock_acquire( &hca->port_lock ); + cl_qlist_insert_tail( &hca->port_list, &port->item ); + cl_spinlock_release( &hca->port_lock ); + p_port_rec->pnp_rec.context = port; + + IBSP_EXIT( IBSP_DBG_HW ); + return IB_SUCCESS; +} + + +/* Remove a port. The IP addresses should have already been removed. */ +static void +pnp_port_remove( + IN struct ibsp_port* const port ) +{ + IBSP_ENTER( IBSP_DBG_HW ); + + if( !port ) + goto done; + + CL_ASSERT( port->hca ); + + /* Remove the port from the HCA list */ + cl_spinlock_acquire( &port->hca->port_lock ); + cl_qlist_remove_item( &port->hca->port_list, &port->item ); + cl_spinlock_release( &port->hca->port_lock ); + + HeapFree( g_ibsp.heap, 0, port ); + +done: + IBSP_EXIT( IBSP_DBG_HW ); +} + + +static ib_api_status_t AL_API +pnp_callback( + IN ib_pnp_rec_t *pnp_rec ) +{ + ib_api_status_t status = IB_SUCCESS; + ib_pnp_port_rec_t* p_port_rec = (ib_pnp_port_rec_t*)pnp_rec; + + IBSP_ENTER( IBSP_DBG_HW ); + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_HW, ("event is %x\n", pnp_rec->pnp_event) ); + + switch( pnp_rec->pnp_event ) + { + /* CA events */ + case IB_PNP_CA_ADD: + status = pnp_ca_add( (ib_pnp_ca_rec_t*)pnp_rec ); + break; + + case IB_PNP_CA_REMOVE: + pnp_ca_remove( (struct ibsp_hca* __ptr64)pnp_rec->context ); + break; + + /* Port events */ + case IB_PNP_PORT_ADD: + status = pnp_port_add( p_port_rec ); + break; + + case IB_PNP_PORT_INIT: + case IB_PNP_PORT_ARMED: + case IB_PNP_PORT_ACTIVE: + case IB_PNP_PORT_DOWN: + /* Nothing to do. */ + break; + + case IB_PNP_PORT_REMOVE: + pnp_port_remove( (struct ibsp_port* __ptr64)pnp_rec->context ); + break; + + case IB_PNP_PKEY_CHANGE: + case IB_PNP_SM_CHANGE: + case IB_PNP_GID_CHANGE: + case IB_PNP_LID_CHANGE: + case IB_PNP_SUBNET_TIMEOUT_CHANGE: + IBSP_ERROR( ("pnp_callback: unsupported event %x\n", pnp_rec->pnp_event) ); + break; + + /* Discovery complete event */ + case IB_PNP_REG_COMPLETE: + break; + + default: + IBSP_ERROR( ("pnp_callback: unsupported event %x\n", pnp_rec->pnp_event) ); + break; + } + + IBSP_EXIT( IBSP_DBG_HW ); + + return status; +} + + + + +/* Registers for PNP events and starts the hardware discovery */ +ib_api_status_t +register_pnp(void) +{ + ib_api_status_t status; + ib_pnp_req_t pnp_req; + + IBSP_ENTER( IBSP_DBG_HW ); + + pnp_req.pnp_class = IB_PNP_CA; + pnp_req.pnp_context = NULL; + pnp_req.pfn_pnp_cb = pnp_callback; + status = ib_reg_pnp( g_ibsp.al_handle, &pnp_req, &g_ibsp.pnp_handle_port ); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("register_pnp: ib_reg_pnp for PORT failed (%d)\n", status) ); + goto done; + } + + pnp_req.pnp_class = IB_PNP_PORT | IB_PNP_FLAG_REG_SYNC; + pnp_req.pnp_context = NULL; + pnp_req.pfn_pnp_cb = pnp_callback; + status = ib_reg_pnp( g_ibsp.al_handle, &pnp_req, &g_ibsp.pnp_handle_port ); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("register_pnp: ib_reg_pnp for PORT failed (%d)\n", status) ); + goto done; + } + + STAT_INC( pnp_num ); + +done: + if( status != IB_SUCCESS ) + { + unregister_pnp(); + } + + IBSP_EXIT( IBSP_DBG_HW ); + + return status; +} + + +/* Unregisters the PNP events */ +void +unregister_pnp(void) +{ + ib_api_status_t status; + + IBSP_ENTER( IBSP_DBG_HW ); + + if( g_ibsp.pnp_handle_port ) + { + status = ib_dereg_pnp( g_ibsp.pnp_handle_port, ib_sync_destroy ); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("unregister_pnp: ib_dereg_pnp for PORT failed (%d)\n", + status) ); + } + + g_ibsp.pnp_handle_port = NULL; + } + + if( g_ibsp.pnp_handle_ca ) + { + status = ib_dereg_pnp( g_ibsp.pnp_handle_ca, ib_sync_destroy ); + if( status != IB_SUCCESS ) + { + IBSP_ERROR( ("unregister_pnp: ib_dereg_pnp for PORT failed (%d)\n", + status) ); + } + + g_ibsp.pnp_handle_ca = NULL; + } + + IBSP_EXIT( IBSP_DBG_HW ); +} diff --git a/branches/Ndi/ulp/wsd/user/ibspdebug.c b/branches/Ndi/ulp/wsd/user/ibspdebug.c new file mode 100644 index 00000000..c67a84d3 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibspdebug.c @@ -0,0 +1,291 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "ibspdll.h" + +#ifdef _DEBUG_ + + +void +DebugPrintIBSPIoctlParams( + uint32_t flags, + DWORD dwIoControlCode, + LPVOID lpvInBuffer, + DWORD cbInBuffer, + LPVOID lpvOutBuffer, + DWORD cbOutBuffer, + LPWSAOVERLAPPED lpOverlapped, + LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, + LPWSATHREADID lpThreadId ) +{ + UNUSED_PARAM( lpThreadId ); + + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("\tdwIoControlCode :") ); + switch( dwIoControlCode ) + { + case SIO_GET_QOS: + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("SIO_GET_QOS\n") ); + break; + case SIO_GET_GROUP_QOS: + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("SIO_GET_GROUP_QOS\n") ); + break; + case SIO_SET_QOS: + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("SIO_SET_QOS\n") ); + break; + case SIO_SET_GROUP_QOS: + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("SIO_SET_GROUP_QOS\n") ); + break; + case SIO_ADDRESS_LIST_QUERY: + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("SIO_ADDRESS_LIST_QUERY\n") ); + break; + default: + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("UNKNOWN control code 0x%x)\n", dwIoControlCode) ); + break; + } + + if( lpvInBuffer == NULL ) + { + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("\tInput Buffer pointer is NULL\n") ); + } + else + { + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("\tInput buffer len (%d)\n", cbInBuffer) ); + } + if( lpvOutBuffer == NULL ) + { + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("\tOutput Buffer pointer is NULL\n") ); + } + else + { + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("\tOutput buffer len (%d)\n", cbOutBuffer) ); + } + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags, + ("\tOverlapped IO is (%s)\n", ( lpOverlapped == NULL) ? "FALSE" : "TRUE") ); + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags, + ("\tCompletion Routine is %s\n", + ( lpCompletionRoutine == NULL) ? "NULL" : "non NULL") ); +} + + +void +DebugPrintSockAddr( + uint32_t flags, + struct sockaddr_in *pSockAddr ) +{ + + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("\tAddressFamily (0x%x)\n", pSockAddr->sin_family) ); + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("\tPortNumber (0x%x)\n", pSockAddr->sin_port) ); + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags,("\tIPAddress (%s)\n", inet_ntoa(pSockAddr->sin_addr )) ); +} + + +void +debug_dump_buffer( + uint32_t flags, + const char *name, + void *buf, + size_t len ) +{ + unsigned char *p = buf; + size_t i; + char str[100]; + char *s; + + s = str; + *s = 0; + + IBSP_PRINT( TRACE_LEVEL_VERBOSE, flags,("HEX for %s:\n", name) ); + + for( i = 0; i < len; i++ ) + { + s += sprintf( s, "%02x ", p[i] ); + if( i % 16 == 15 ) + { + IBSP_PRINT( TRACE_LEVEL_VERBOSE, flags, ("HEX:%s: %s\n", name, str) ); + s = str; + *s = 0; + } + } + IBSP_PRINT( TRACE_LEVEL_VERBOSE, flags, ("HEX:%s: %s\n", name, str) ); +} + + +void +debug_dump_overlapped( + uint32_t flags, + const char *name, + LPWSAOVERLAPPED lpOverlapped ) +{ + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags, ("dumping OVERLAPPED %s:\n", name) ); + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags, (" lpOverlapped = %p\n", lpOverlapped) ); + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags, (" Internal = %x\n", lpOverlapped->Internal) ); + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags, (" InternalHigh = %d\n", lpOverlapped->InternalHigh) ); + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags, (" Offset = %d\n", lpOverlapped->Offset) ); + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags, (" OffsetHigh = %d %\n", lpOverlapped->OffsetHigh) ); + IBSP_PRINT( TRACE_LEVEL_INFORMATION, flags, (" hEvent = %x\n", (uintptr_t) lpOverlapped->hEvent) ); +} + +#endif /* _DEBUG_ */ + + +#ifdef IBSP_LOGGING + +VOID DataLogger_Init( + DataLogger *pLogger, + char *prefix, + struct sockaddr_in *addr1, + struct sockaddr_in *addr2 ) +{ + HANDLE hFile; + HANDLE hMapFile; + + char Name[100]; + DWORD DataSize = 20 * 1024 * 1024; + + sprintf(Name,"c:\\%s_%d.%d.%d.%d_%d_%d.%d.%d.%d_%d", + prefix, + addr1->sin_addr.S_un.S_un_b.s_b1, + addr1->sin_addr.S_un.S_un_b.s_b2, + addr1->sin_addr.S_un.S_un_b.s_b3, + addr1->sin_addr.S_un.S_un_b.s_b4, + CL_NTOH16(addr1->sin_port), + addr2->sin_addr.S_un.S_un_b.s_b1, + addr2->sin_addr.S_un.S_un_b.s_b2, + addr2->sin_addr.S_un.S_un_b.s_b3, + addr2->sin_addr.S_un.S_un_b.s_b4, + CL_NTOH16(addr2->sin_port) + ); + + pLogger->NextPrint = NULL; + pLogger->BufferStart = NULL; + pLogger->ShutdownClosed = FALSE; + pLogger->ToatalPrinted = 0; + pLogger->TotalSize = DataSize; + + hFile = CreateFile( Name, GENERIC_READ|GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, + FILE_ATTRIBUTE_NORMAL, NULL ); + + if (hFile == INVALID_HANDLE_VALUE) + { + IBSP_ERROR( ("CreateFile failed with error %d\n", GetLastError()) ); + return; + } + + hMapFile = CreateFileMapping(hFile, // current file handle + NULL, // default security + PAGE_READWRITE, // read/write permission + 0, // max. object size + DataSize, // size of hFile + NULL); // name of mapping object + + CloseHandle( hFile ); + + if (hMapFile == NULL) + { + IBSP_ERROR( ("Could not create file mapping object.\n") ); + return; + } + + pLogger->BufferStart = MapViewOfFile(hMapFile, // handle to mapping object + FILE_MAP_ALL_ACCESS, // read/write permission + 0, // max. object size + 0, // size of hFile + 0); // map entire file + + CloseHandle( hMapFile ); + + if( pLogger->BufferStart == NULL ) + { + IBSP_ERROR( ("Could not MapViewOfFile.\n") ); + return; + } + + pLogger->NextPrint = pLogger->BufferStart; + cl_memclr(pLogger->NextPrint, DataSize); +} + + +VOID DataLogger_WriteData( + DataLogger *pLogger, + long Idx, + char *Data, + DWORD Len ) +{ + char MessageHeader[16]; + CL_ASSERT(Len < 64000); + CL_ASSERT(pLogger->ShutdownClosed == FALSE); + CL_ASSERT(Len < pLogger->TotalSize / 3); + + if( !pLogger->BufferStart ) + return; + + cl_memset( MessageHeader, 0xff, sizeof(MessageHeader) ); + cl_memcpy( MessageHeader+4, &Idx, sizeof(Idx) ); + cl_memcpy( MessageHeader+8, &Len, sizeof(Len) ); + + pLogger->ToatalPrinted += Len; + + if( pLogger->NextPrint + Len + (2 * sizeof (MessageHeader)) > + pLogger->BufferStart + pLogger->TotalSize ) + { + /* We will now zero the remaing of the buffer, and restart */ + cl_memclr( pLogger->NextPrint, + pLogger->TotalSize - (pLogger->NextPrint - pLogger->BufferStart) ); + pLogger->NextPrint = pLogger->BufferStart; + } + + /* Just simple copy */ + cl_memcpy( pLogger->NextPrint, MessageHeader, sizeof(MessageHeader) ); + pLogger->NextPrint += sizeof(MessageHeader); + + cl_memcpy( pLogger->NextPrint, Data, Len ); + pLogger->NextPrint += Len; + + /* + * Add the end marker but don't update NextPrint so the next message + * overwrites the previous message's end marker. + */ + cl_memset( pLogger->NextPrint, 0xff, sizeof(MessageHeader) ); +} + + +VOID DataLogger_Shutdown( + DataLogger *pLogger ) +{ + if( !pLogger->BufferStart ) + return; + + UnmapViewOfFile( pLogger->BufferStart ); +} + +#endif /* IBSP_LOGGING */ diff --git a/branches/Ndi/ulp/wsd/user/ibspdebug.h b/branches/Ndi/ulp/wsd/user/ibspdebug.h new file mode 100644 index 00000000..98d95a1a --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibspdebug.h @@ -0,0 +1,267 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef _IBSP_DEBUG_H_ +#define _IBSP_DEBUG_H_ + + +#include "ibspdll.h" +#include +#include + +#ifndef __MODULE__ +#define __MODULE__ "[IBSP]" +#endif + + + + +extern uint32_t g_ibsp_dbg_level; +extern uint32_t g_ibsp_dbg_flags; + +#if defined(EVENT_TRACING) +// +// Software Tracing Definitions +// + + +#define WPP_CONTROL_GUIDS \ + WPP_DEFINE_CONTROL_GUID(IBSPCtlGuid,(156A98A5,8FDC,4d00,A673,0638123DF336), \ + WPP_DEFINE_BIT( IBSP_DBG_ERROR) \ + WPP_DEFINE_BIT( IBSP_DBG_DLL) \ + WPP_DEFINE_BIT( IBSP_DBG_SI) \ + WPP_DEFINE_BIT( IBSP_DBG_INIT) \ + WPP_DEFINE_BIT( IBSP_DBG_WQ) \ + WPP_DEFINE_BIT( IBSP_DBG_EP) \ + WPP_DEFINE_BIT( IBSP_DBG_MEM) \ + WPP_DEFINE_BIT( IBSP_DBG_CM) \ + WPP_DEFINE_BIT( IBSP_DBG_CONN) \ + WPP_DEFINE_BIT( IBSP_DBG_OPT) \ + WPP_DEFINE_BIT( IBSP_DBG_NEV) \ + WPP_DEFINE_BIT( IBSP_DBG_HW) \ + WPP_DEFINE_BIT( IBSP_DBG_IO) \ + WPP_DEFINE_BIT( IBSP_DBG_DUP) \ + WPP_DEFINE_BIT( IBSP_DBG_PERFMON)) + + + +#define WPP_LEVEL_FLAGS_ENABLED(lvl, flags) (WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= lvl) +#define WPP_LEVEL_FLAGS_LOGGER(lvl,flags) WPP_LEVEL_LOGGER(flags) +#define WPP_FLAG_ENABLED(flags)(WPP_LEVEL_ENABLED(flags) && WPP_CONTROL(WPP_BIT_ ## flags).Level >= TRACE_LEVEL_VERBOSE) +#define WPP_FLAG_LOGGER(flags) WPP_LEVEL_LOGGER(flags) + + +// begin_wpp config +// IBSP_ENTER( FLAG ); +// IBSP_EXIT( FLAG ); +// USEPREFIX(IBSP_PRINT, "%!STDPREFIX! %!FUNC!() :"); +// USEPREFIX(IBSP_PRINT_EXIT, "%!STDPREFIX! %!FUNC!() :"); +// USEPREFIX(IBSP_ERROR, "%!STDPREFIX! %!FUNC!() :ERR***"); +// USEPREFIX(IBSP_ERROR_EXIT, "%!STDPREFIX! %!FUNC!() :ERR***"); +// USESUFFIX(IBSP_ENTER, " %!FUNC!():["); +// USESUFFIX(IBSP_EXIT, " %!FUNC!():]"); +// end_wpp + + + +#define STAT_INC(name) +#define STAT_DEC(name) +#define BREAKPOINT(x) +#define DebugPrintIBSPIoctlParams(a,b,c,d,e,f,g,h,i) +#define DebugPrintSockAddr(a,b) +#define fzprint(a) +#define STATS(expr) + +#else + +#include +#include + +/* + * Debug macros + */ + + + +#define IBSP_DBG_ERR 0x00000001 /* error */ +#define IBSP_DBG_DLL 0x00000002 /* DLL */ +#define IBSP_DBG_SI 0x00000004 /* socket info */ +#define IBSP_DBG_INIT 0x00000008 /* initialization code */ +#define IBSP_DBG_WQ 0x00000010 /* WQ related functions */ +#define IBSP_DBG_EP 0x00000020 /* Enpoints related functions */ +#define IBSP_DBG_MEM 0x00000040 /* memory registration */ +#define IBSP_DBG_CM 0x00000080 /* CM */ +#define IBSP_DBG_CONN 0x00000100 /* connections */ +#define IBSP_DBG_OPT 0x00000200 /* socket options */ +#define IBSP_DBG_NEV 0x00000400 /* network events */ +#define IBSP_DBG_HW 0x00000800 /* Hardware */ +#define IBSP_DBG_IO 0x00001000 /* Overlapped I/O request */ +#define IBSP_DBG_DUP 0x00002000 /* Socket Duplication */ +#define IBSP_DBG_PERFMON 0x00004000 /* Performance Monitoring */ + +#define IBSP_DBG_ERROR (CL_DBG_ERROR | IBSP_DBG_ERR) + + + +#if DBG + +// assignment of _level_ is need to to overcome warning C4127 +#define IBSP_PRINT( _level_,_flag_,_msg_) \ + { \ + if( g_ibsp_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_ibsp_dbg_flags, _msg_ ); \ + } + + +#define IBSP_PRINT_EXIT( _level_,_flag_,_msg_) \ + { \ + if( g_ibsp_dbg_level >= (_level_) ) \ + CL_TRACE( _flag_, g_ibsp_dbg_flags, _msg_ );\ + IBSP_EXIT( _flag_ );\ + } + +#define IBSP_ENTER( _flag_) \ + { \ + if( g_ibsp_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_ENTER( _flag_, g_ibsp_dbg_flags ); \ + } + +#define IBSP_EXIT( _flag_)\ + { \ + if( g_ibsp_dbg_level >= TRACE_LEVEL_VERBOSE ) \ + CL_EXIT( _flag_, g_ibsp_dbg_flags ); \ + } + + +//#define fzprint(a) CL_PRINT(IBSP_DBG_USER, IBSP_DBG_USER, a) +#define fzprint(a) + +//#define BREAKPOINT(x) if( gCurrentDebugLevel & x ) { DebugBreak(); } +void +DebugPrintIBSPIoctlParams( + uint32_t flags, + DWORD dwIoControlCode, + LPVOID lpvInBuffer, + DWORD cbInBuffer, + LPVOID lpvOutBuffer, + DWORD cbOutBuffer, + LPWSAOVERLAPPED lpOverlapped, + LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, + LPWSATHREADID lpThreadId ); + + +void +DebugPrintSockAddr( + uint32_t flags, + struct sockaddr_in *sockaddr ); + +void +debug_dump_buffer( + uint32_t flags, + const char *name, + void *buf, + size_t len ); + +void +debug_dump_overlapped( + uint32_t flags, + const char *name, + LPWSAOVERLAPPED lpOverlapped ); + +/* Activate memory tracking debugging */ +#define HeapAlloc(a,b,c) cl_zalloc(c) +#define HeapFree(a,b,c) (cl_free(c), TRUE) +#define HeapCreate(a,b,c) ((HANDLE)(-1)) +#define HeapDestroy(a) + +#define STAT_INC(name) cl_atomic_inc( &g_ibsp.name ) +#define STAT_DEC(name) cl_atomic_dec( &g_ibsp.name ) + +#else + + +#define IBSP_PRINT( _level_,_flag_,_msg_) +#define IBSP_PRINT_EXIT( _level_,_flag_,_msg_) +#define IBSP_ENTER( _flag_) +#define IBSP_EXIT( _flag_) +#define fzprint(a) +#endif /* DBG */ + + +#define IBSP_ERROR( _msg_) \ + IBSP_PRINT( TRACE_LEVEL_ERROR, IBSP_DBG_ERROR, _msg_) + +#define IBSP_ERROR_EXIT( _msg_) \ + IBSP_PRINT_EXIT( TRACE_LEVEL_ERROR, IBSP_DBG_ERROR, _msg_) + + +#endif /* EVENT_TRACING */ + +/* + * To enable logging of all Send/Receive data for each socket + * uncomment the following line. + */ +//#define IBSP_LOGGING + +#ifdef IBSP_LOGGING + +typedef struct _DataLogger +{ + char *BufferStart; + size_t TotalSize; + char *NextPrint; + size_t ToatalPrinted; + BOOL ShutdownClosed; + HANDLE hMapFile; + +} DataLogger; + + +VOID DataLogger_Init( + DataLogger *pLoger, + char *prefix, + struct sockaddr_in *addr1, + struct sockaddr_in *addr2 ); + + +VOID DataLogger_WriteData( + DataLogger *pLoger, + long Idx, + char *Data, + DWORD Len ); + +VOID DataLogger_Shutdown( + DataLogger *pLoger ); + +#endif /* IBSP_LOGGING */ + +#endif /* _IBSP_DEBUG_H_ */ diff --git a/branches/Ndi/ulp/wsd/user/ibspdefines.h b/branches/Ndi/ulp/wsd/user/ibspdefines.h new file mode 100644 index 00000000..3af02c3e --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibspdefines.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +/* Message sizes. */ +/* TODO: these sizes are arbitrary */ +#define IB_MAX_MSG_SIZE 0xFFFFFFFF //(32*1024*1024) +#define IB_MAX_RDMA_SIZE 0xFFFFFFFF //(16*1024*1024) +#define IB_RDMA_THRESHOLD_SIZE (4*1024) +/* TODO: Change back */ +//#define IB_RDMA_THRESHOLD_SIZE (16*1024) + +/* Number of work completion to poll at a time */ +#define WC_LIST_SIZE 8 + +/* QP creation parameters */ +#define QP_ATTRIB_RESPONDER_RESOURCES 4 +#define QP_ATTRIB_INITIATOR_DEPTH 4 +#define QP_ATTRIB_RETRY_COUNT 6 +#define QP_ATTRIB_RNR_RETRY 7 +#define QP_ATTRIB_RNR_NAK_TIMEOUT 8 /* 16 ms */ + +#define QP_ATTRIB_SQ_DEPTH 16 + +/* + * Only the RDMA calls can have more than one SGE - the send and receive always + * have just one. For send and receives, the switch always uses its internal + * buffers. For RDMAs the switch will issue requests with at most 4 SGEs. + * We support twice that for good measure. + */ +#define QP_ATTRIB_SQ_SGE 8 + +/* Our indexes are single-byte, so make sure we don't screw up. */ +C_ASSERT( QP_ATTRIB_SQ_DEPTH <= 256 ); + +/* + * TODO: During testing, the switch has been observed to post + * 12 receive buffers. It would be nice to know what the max is. + */ +#define QP_ATTRIB_RQ_DEPTH 16 +#define QP_ATTRIB_RQ_SGE 1 + +/* Our indexes are single-byte, so make sure we don't screw up. */ +C_ASSERT( QP_ATTRIB_RQ_DEPTH <= 256 ); + +/* Number of entries in a CQ */ +/* + * TODO: Workaround until MTHCA driver supports resize CQ, pre-allocate + * for 100 QPs per CQ. + */ +#define IB_CQ_SIZE (QP_ATTRIB_SQ_DEPTH + QP_ATTRIB_RQ_DEPTH) +#define IB_INIT_CQ_SIZE (IB_CQ_SIZE * 500) + +/* CM timeouts */ +#define CM_MIN_LOCAL_TIMEOUT (18) +#define CM_LOCAL_TIMEOUT (1) +#define CM_MIN_REMOTE_TIMEOUT (18) +#define CM_REMOTE_TIMEOUT (2) +#define CM_RETRIES 4 + +/* Base service ID for listen */ +#define BASE_LISTEN_ID (CL_CONST64(0xb6e36efb8eda0000)) diff --git a/branches/Ndi/ulp/wsd/user/ibspdll.c b/branches/Ndi/ulp/wsd/user/ibspdll.c new file mode 100644 index 00000000..ae427640 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibspdll.c @@ -0,0 +1,2326 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "ibspdebug.h" +#if defined(EVENT_TRACING) +#include "ibspdll.tmh" +#endif + + +#include +#include +#include "ibspdll.h" + +#ifdef PERFMON_ENABLED +#include "ibsp_perfmon.h" +#endif /* PERFMON_ENABLED */ + +/* Globals */ +struct ibspdll_globals g_ibsp; + +/* Defines */ +static const WCHAR *Description = L"Winsock Service Provider for Infiniband Transport"; + +/* Unique provider GUID generated with "uuidgen -s". Same as in installsp.c. */ +static const GUID provider_guid = { + /* c943654d-2c84-4db7-af3e-fdf1c5322458 */ + 0xc943654d, 0x2c84, 0x4db7, + {0xaf, 0x3e, 0xfd, 0xf1, 0xc5, 0x32, 0x24, 0x58} +}; + +static DWORD no_read = 0; +uint32_t g_max_inline = 0xFFFFFFFF; +uint32_t g_max_poll = 500; +uint32_t g_sa_timeout = 500; +uint32_t g_sa_retries = 4; +int g_connect_err = WSAEADDRNOTAVAIL; +uint8_t g_max_cm_retries = CM_RETRIES; +uint8_t g_pkt_life_modifier = 0; +uint8_t g_qp_retries = QP_ATTRIB_RETRY_COUNT; +DWORD_PTR g_dwPollThreadAffinityMask = 0; + +uint32_t g_ibsp_dbg_level = TRACE_LEVEL_ERROR; +uint32_t g_ibsp_dbg_flags = 0x1; + +/* + * Function: DllMain + * + * Description: + * Provides initialization when the ibspdll DLL is loaded. + */ +#pragma auto_inline( off ) +static BOOL +_DllMain( + IN HINSTANCE hinstDll, + IN DWORD dwReason, + IN LPVOID lpvReserved ) +{ + TCHAR env_var[16]; + DWORD i; + + IBSP_ENTER( IBSP_DBG_DLL ); + + UNUSED_PARAM( hinstDll ); + UNUSED_PARAM( lpvReserved ); + + fzprint(("%s():%d:0x%x:0x%x: hinstDll=%d dwReason=%d lpvReserved=0x%p\n", + __FUNCTION__, + __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), hinstDll, dwReason, lpvReserved)); + +//#ifdef _DEBUG_ +#if 0 + { + char buf[64]; + if( GetEnvironmentVariable( "IBSPLOAD", buf, sizeof(buf) ) == 0 ) + { + IBSP_ERROR_EXIT( ("IBSPLOAD not defined:\n") ); + + return FALSE; + } + } +#endif + + switch( dwReason ) + { + case DLL_PROCESS_ATTACH: + + +#if defined(EVENT_TRACING) +#if DBG + WPP_INIT_TRACING(L"ibspdll.dll"); +#else + WPP_INIT_TRACING(L"ibspdll.dll"); +#endif +#endif + + + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_DLL, ("DllMain: DLL_PROCESS_ATTACH\n") ); + + +#if !defined(EVENT_TRACING) +#if DBG + + i = GetEnvironmentVariable( "IBWSD_DBG_LEVEL", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + { + g_ibsp_dbg_level = _tcstoul( env_var, NULL, 16 ); + } + + i = GetEnvironmentVariable( "IBWSD_DBG_FLAGS", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + { + g_ibsp_dbg_flags = _tcstoul( env_var, NULL, 16 ); + } + + if( g_ibsp_dbg_flags & IBSP_DBG_ERR ) + g_ibsp_dbg_flags |= CL_DBG_ERROR; + + IBSP_PRINT(TRACE_LEVEL_INFORMATION ,IBSP_DBG_DLL , + ("Given IBAL_UAL_DBG debug level:%d debug flags 0x%x\n", + g_ibsp_dbg_level ,g_ibsp_dbg_flags) ); + +#endif +#endif + + + /* See if the user wants to disable RDMA reads. */ + no_read = GetEnvironmentVariable( "IBWSD_NO_READ", NULL, 0 ); + + i = GetEnvironmentVariable( "IBWSD_INLINE", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + g_max_inline = _tcstoul( env_var, NULL, 10 ); + + i = GetEnvironmentVariable( "IBWSD_POLL", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + g_max_poll = _tcstoul( env_var, NULL, 10 ); + + i = GetEnvironmentVariable( "IBWSD_POLL_THREAD_AFFINITY_MASK", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + g_dwPollThreadAffinityMask = _tcstoul( env_var, NULL, 10 ); + else + { + DWORD_PTR xx; + BOOL ret = GetProcessAffinityMask(GetCurrentProcess(), &g_dwPollThreadAffinityMask, &xx); + CL_ASSERT(ret != 0); + if (ret == 0) { + IBSP_ERROR( ("GetProcessAffinityMask Failed (not a fatal error)\n") ); + } + ret = ret; + } + + i = GetEnvironmentVariable( "IBWSD_SA_RETRY", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + g_sa_retries = _tcstoul( env_var, NULL, 10 ); + + i = GetEnvironmentVariable( "IBWSD_SA_TIMEOUT", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + g_sa_timeout = _tcstoul( env_var, NULL, 10 ); + + i = GetEnvironmentVariable( "IBWSD_NO_IPOIB", env_var, sizeof(env_var) ); + if( i ) + g_connect_err = WSAEHOSTUNREACH; + + i = GetEnvironmentVariable( "IBWSD_CM_RETRY", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + { + g_max_cm_retries = (uint8_t)_tcstoul( env_var, NULL, 0 ); + if( g_max_cm_retries < 4 ) + g_max_cm_retries = 4; + else if( g_max_cm_retries > 0xF ) + g_max_cm_retries = 0xFF; + } + + i = GetEnvironmentVariable( "IBWSD_PKT_LIFE", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + { + g_pkt_life_modifier = (uint8_t)_tcstoul( env_var, NULL, 0 ); + if( g_pkt_life_modifier > 0x1F ) + g_pkt_life_modifier = 0x1F; + } + + i = GetEnvironmentVariable( "IBWSD_QP_RETRY", env_var, sizeof(env_var) ); + if( i && i <= 16 ) + { + g_qp_retries = (uint8_t)_tcstoul( env_var, NULL, 0 ); + if( g_qp_retries > 7 ) + g_qp_retries = 7; + } + + if( init_globals() ) + return FALSE; + +#ifdef PERFMON_ENABLED + IBSPPmInit(); +#endif + break; + + case DLL_THREAD_ATTACH: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_DLL, ("DllMain: DLL_THREAD_ATTACH\n") ); + break; + + case DLL_THREAD_DETACH: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_DLL, ("DllMain: DLL_THREAD_DETACH\n") ); + break; + + case DLL_PROCESS_DETACH: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_DLL, ("DllMain: DLL_PROCESS_DETACH\n") ); + +#ifdef _DEBUG_ + { + cl_list_item_t *socket_item = NULL; + + cl_spinlock_acquire( &g_ibsp.socket_info_mutex ); + + for( socket_item = cl_qlist_head( &g_ibsp.socket_info_list ); + socket_item != cl_qlist_end( &g_ibsp.socket_info_list ); + socket_item = cl_qlist_next( socket_item ) ) + { + struct ibsp_socket_info *socket_info = NULL; + socket_info = PARENT_STRUCT(socket_item, struct ibsp_socket_info, item); + +#ifdef IBSP_LOGGING + DataLogger_Shutdown(&socket_info->SendDataLogger); + DataLogger_Shutdown(&socket_info->RecvDataLogger); +#endif + } + + cl_spinlock_release( &g_ibsp.socket_info_mutex ); + + IBSP_ERROR( ("Statistics:\n") ); + IBSP_ERROR( ( + " overlap_h0_count = %d\n", g_ibsp.overlap_h0_count) ); + IBSP_ERROR( ( + " max_comp_count = %d\n", g_ibsp.max_comp_count) ); + IBSP_ERROR( ( + " overlap_h1_count = %d\n", g_ibsp.overlap_h1_count) ); + + IBSP_ERROR( (" send_count = %d\n", g_ibsp.send_count) ); + + IBSP_ERROR( (" total_send_count = %d\n", g_ibsp.total_send_count) ); + + IBSP_ERROR( (" total_recv_count = %d\n", g_ibsp.total_recv_count) ); + + IBSP_ERROR( (" total_recv_compleated = %d\n", g_ibsp.total_recv_compleated) ); + + IBSP_ERROR( ( + " number of QPs left = %d\n", g_ibsp.qp_num) ); + IBSP_ERROR( ( + " number of CQs left = %d\n", g_ibsp.cq_num) ); + IBSP_ERROR( ( + " number of PDs left = %d\n", g_ibsp.pd_num) ); + IBSP_ERROR( ( + " number of ALs left = %d\n", g_ibsp.al_num) ); + IBSP_ERROR( ( + " number of MRs left = %d\n", g_ibsp.mr_num) ); + IBSP_ERROR( ( + " number of listens left = %d\n", g_ibsp.listen_num) ); + IBSP_ERROR( ( + " number of PNPs left = %d\n", g_ibsp.pnp_num) ); + IBSP_ERROR( ( + " number of threads left = %d\n", g_ibsp.thread_num) ); + IBSP_ERROR( ( + " number of WPU sockets left = %d\n", g_ibsp.wpusocket_num) ); + + IBSP_ERROR( ( + " CloseSocket_count = %d\n", g_ibsp.CloseSocket_count) ); + + } +#endif + release_globals(); +#ifdef PERFMON_ENABLED + IBSPPmClose(); +#endif + + +#if defined(EVENT_TRACING) + WPP_CLEANUP(); +#endif + break; + } + + IBSP_EXIT( IBSP_DBG_DLL ); + + return TRUE; +} +#pragma auto_inline( off ) + + +extern BOOL APIENTRY +_DllMainCRTStartupForGS( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ); + + +BOOL APIENTRY +DllMain( + IN HINSTANCE h_module, + IN DWORD ul_reason_for_call, + IN LPVOID lp_reserved ) +{ + switch( ul_reason_for_call ) + { + case DLL_PROCESS_ATTACH: + if( !_DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ) ) + { + return FALSE; + } + + return _DllMain( h_module, ul_reason_for_call, lp_reserved ); + + case DLL_PROCESS_DETACH: + _DllMain( h_module, ul_reason_for_call, lp_reserved ); + + return _DllMainCRTStartupForGS( + h_module, ul_reason_for_call, lp_reserved ); + } + return TRUE; +} + + +static SOCKET +accept_socket( + IN struct ibsp_socket_info *p_socket, + IN struct listen_incoming *p_incoming, + IN struct ibsp_port *p_port, + OUT LPINT lpErrno ) +{ + struct ibsp_socket_info *new_socket_info; + int ret; + + IBSP_ENTER( IBSP_DBG_CONN ); + + /* Create a new socket here */ + new_socket_info = create_socket_info( lpErrno ); + if( !new_socket_info ) + { + ib_reject( + p_incoming->cm_req_received.h_cm_req, IB_REJ_INSUF_RESOURCES ); + + IBSP_ERROR_EXIT( ("create_socket_info failed (%d)\n", *lpErrno) ); + return INVALID_SOCKET; + } + + /* Time to allocate our IB QP */ + new_socket_info->port = p_port; + *lpErrno = ib_create_socket( new_socket_info ); + if( *lpErrno ) + { + deref_socket_info( new_socket_info ); + + ib_reject( + p_incoming->cm_req_received.h_cm_req, IB_REJ_INSUF_QP ); + + IBSP_ERROR_EXIT( ("ib_create_socket failed (%d)\n", *lpErrno) ); + return INVALID_SOCKET; + } + + /* Store the IP address and port number in the socket context */ + new_socket_info->local_addr = p_incoming->params.dest; + + /* Copy the socket context info from parent socket context */ + new_socket_info->socket_options = p_socket->socket_options; + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, + ("The socket address of connecting entity is\n") ); + DebugPrintSockAddr( IBSP_DBG_CONN, &p_incoming->params.source ); + + new_socket_info->peer_addr = p_incoming->params.source; + +#ifdef IBSP_LOGGING + DataLogger_Init( &new_socket_info->SendDataLogger, "Send", + &new_socket_info->peer_addr, &new_socket_info->local_addr ); + DataLogger_Init( &new_socket_info->RecvDataLogger, "Recv", + &new_socket_info->local_addr, &new_socket_info->peer_addr ); +#endif + + cl_spinlock_acquire( &new_socket_info->mutex1 ); + /* Update the state of the socket context */ + IBSP_CHANGE_SOCKET_STATE( new_socket_info, IBSP_CONNECTED ); + + *lpErrno = ib_accept( new_socket_info, &p_incoming->cm_req_received ); + if( *lpErrno ) + { + IBSP_CHANGE_SOCKET_STATE( new_socket_info, IBSP_CREATE ); + cl_spinlock_release( &new_socket_info->mutex1 ); + + if( *lpErrno == WSAEADDRINUSE ) + { + /* Be nice and reject that connection. */ + ib_reject( p_incoming->cm_req_received.h_cm_req, IB_REJ_INSUF_QP ); + } + + g_ibsp.up_call_table.lpWPUCloseSocketHandle( + new_socket_info->switch_socket, &ret ); + new_socket_info->switch_socket = INVALID_SOCKET; + STAT_DEC( wpusocket_num ); + + ib_destroy_socket( new_socket_info ); + deref_socket_info( new_socket_info ); + return INVALID_SOCKET; + } + + cl_spinlock_acquire( &g_ibsp.socket_info_mutex ); + cl_qlist_insert_tail( + &g_ibsp.socket_info_list, &new_socket_info->item ); + cl_spinlock_release( &g_ibsp.socket_info_mutex ); + + cl_spinlock_release( &new_socket_info->mutex1 ); + + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, + ("returns new socket (0x%p)\n", new_socket_info) ); + return (SOCKET)new_socket_info; +} + + +/* Function: IBSPAccept + * + * Description: + * Handle the WSAAccept function. The only special consideration here + * is the conditional accept callback. You can choose to intercept + * this by substituting your own callback (you'll need to keep track + * of the user supplied callback so you can trigger that once your + * substituted function is triggered). + */ +static SOCKET WSPAPI +IBSPAccept( + IN SOCKET s, + OUT struct sockaddr FAR *addr, + IN OUT LPINT addrlen, + IN LPCONDITIONPROC lpfnCondition, + IN DWORD_PTR dwCallbackData, + OUT LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + WSABUF caller_id; + WSABUF callee_id; + struct listen_incoming *incoming; + struct ibsp_port *port; + ib_cm_mra_t mra; + + IBSP_ENTER( IBSP_DBG_CONN ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + CL_ASSERT( lpfnCondition ); + + if( *addrlen < sizeof(struct sockaddr_in) ) + { + IBSP_ERROR_EXIT( ("invalid addrlen (%d, %d)\n", + *addrlen, sizeof(struct sockaddr_in)) ); + *lpErrno = WSAEFAULT; + return INVALID_SOCKET; + } + + /* Check if there is any pending connection for this socket. If + * there is one, create a socket, and then query the switch about + * the pending connection */ + + cl_spinlock_acquire( &socket_info->mutex1 ); + + /* Verify the state of the socket */ + if( socket_info->socket_state != IBSP_LISTEN ) + { + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR_EXIT( ("Socket is not in right socket_state (%s)\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + *lpErrno = WSAEINVAL; + return INVALID_SOCKET; + } + + if( cl_qlist_count( &socket_info->listen.list ) == 0 ) + { + cl_spinlock_release( &socket_info->mutex1 ); + + IBSP_ERROR_EXIT( ("No pending connection found for this socket\n") ); + *lpErrno = WSAEWOULDBLOCK; + return INVALID_SOCKET; + } + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, + ("IBSPAccept: Found pending connection on this socket\n") ); + + incoming = PARENT_STRUCT(cl_qlist_remove_head( &socket_info->listen.list ), + struct listen_incoming, item); + + /* Signal the event again if there are more connection requests. */ + if( cl_qlist_count( &socket_info->listen.list ) ) + ibsp_post_select_event( socket_info, FD_ACCEPT, 0 ); + cl_spinlock_release( &socket_info->mutex1 ); + + port = socket_info->port; + + /* Find the destination IP address */ + if( !port ) + { + /* The socket was bound to INADDR_ANY. We must find the correct port + * for the new socket. */ + port = get_port_from_ip_address( incoming->params.dest.sin_addr ); + if( !port ) + { + IBSP_ERROR( ("incoming destination IP address not local (%s)\n", + inet_ntoa( incoming->params.dest.sin_addr )) ); + goto reject; + } + } + + /* Cross-check with the path info to make sure we are conectiong correctly */ + if( port->guid != ib_gid_get_guid( &incoming->cm_req_received.primary_path.sgid ) ) + { + IBSP_ERROR( ( + "GUIDs of port for destination IP address and primary path do not match (%016I64x, %016I64x)\n", + port->guid, + ib_gid_get_guid( &incoming->cm_req_received.primary_path.sgid )) ); + +reject: + ib_reject( incoming->cm_req_received.h_cm_req, IB_REJ_INSUF_QP ); + + HeapFree( g_ibsp.heap, 0, incoming ); + IBSP_ERROR_EXIT( ("bad incoming parameter\n") ); + *lpErrno = WSAECONNREFUSED; + return INVALID_SOCKET; + } + + /* + * Check against the conditional routine if socket can be created + * or not + */ + + /* Set the caller and callee data buffer */ + caller_id.buf = (char *)&incoming->params.source; + caller_id.len = sizeof(incoming->params.source); + + callee_id.buf = (char *)&incoming->params.dest; + callee_id.len = sizeof(incoming->params.dest); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, + ("Got incoming conn from %s/%d-%d to %s/%d-%d\n", + inet_ntoa( incoming->params.source.sin_addr ), + cl_ntoh16( incoming->params.source.sin_port ), + incoming->params.source.sin_family, + inet_ntoa( incoming->params.dest.sin_addr ), + cl_ntoh16( incoming->params.dest.sin_port ), + incoming->params.dest.sin_family) ); + + /* Call the conditional function */ + switch( lpfnCondition( &caller_id, NULL, NULL, NULL, + &callee_id, NULL, NULL, dwCallbackData ) ) + { + default: + /* Should never happen */ + IBSP_ERROR( ("Conditional routine returned undocumented code\n") ); + /* Fall through. */ + + case CF_REJECT: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, + ("Conditional routine returned CF_REJECT\n") ); + + ib_reject( incoming->cm_req_received.h_cm_req, IB_REJ_USER_DEFINED ); + + HeapFree( g_ibsp.heap, 0, incoming ); + *lpErrno = WSAECONNREFUSED; + IBSP_EXIT( IBSP_DBG_CONN ); + return INVALID_SOCKET; + + case CF_DEFER: + /* Send MRA */ + mra.mra_length = 0; + mra.p_mra_pdata = NULL; + mra.svc_timeout = 0x15; + ib_cm_mra( incoming->cm_req_received.h_cm_req, &mra ); + + /* Put the item back at the head of the list. */ + cl_spinlock_acquire( &socket_info->mutex1 ); + cl_qlist_insert_head( &socket_info->listen.list, &incoming->item ); + cl_spinlock_release( &socket_info->mutex1 ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, + ("Conditional routine returned CF_DEFER\n") ); + + *lpErrno = WSATRY_AGAIN; + IBSP_EXIT( IBSP_DBG_CONN ); + return INVALID_SOCKET; + + case CF_ACCEPT: + break; + } + + s = accept_socket( socket_info, incoming, port, lpErrno ); + if( s != INVALID_SOCKET ) + { + /* Store the client socket address information */ + memcpy( addr, &incoming->params.source, sizeof(struct sockaddr_in) ); + *addrlen = sizeof(struct sockaddr_in); + } + + HeapFree( g_ibsp.heap, 0, incoming ); + + IBSP_EXIT( IBSP_DBG_CONN ); + return s; +} + + +/* Function: IBSPBind + * + * Description: + * Bind the socket to a local address. + * +*/ +static int WSPAPI +IBSPBind( + IN SOCKET s, + IN const struct sockaddr FAR *name, + IN int namelen, + OUT LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + struct sockaddr_in *addr = (struct sockaddr_in *)name; + struct ibsp_port *port; + int ret; + + IBSP_ENTER( IBSP_DBG_CONN ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, ("Address to bind to:\n") ); + DebugPrintSockAddr( IBSP_DBG_CONN, addr ); + + fzprint(("binding to IP %s\n", inet_ntoa( addr->sin_addr ))); + + /* Sanity checks */ + if( namelen != sizeof(struct sockaddr_in) ) + { + IBSP_ERROR( ("invalid namelen (%d instead of %d)\n", + namelen, sizeof(struct sockaddr_in)) ); + *lpErrno = WSAEFAULT; + goto error; + } + + if( addr->sin_family != AF_INET ) + { + IBSP_ERROR( ("bad family for socket\n") ); + *lpErrno = WSAEFAULT; + goto error; + } + + /* Check if the ip address is assigned to one of our IBoIB HCA. */ + if( addr->sin_addr.S_un.S_addr != INADDR_ANY ) + { + port = get_port_from_ip_address( addr->sin_addr ); + if( port == NULL ) + { + IBSP_ERROR( ( + "This IP address does not belong to that host (%08x)\n", + addr->sin_addr.S_un.S_addr) ); + *lpErrno = WSAEADDRNOTAVAIL; + goto error; + } + } + else + { + port = NULL; + } + + /* We are going to take this mutex for some time, + * but at this stage, it shouldn't impact anything. */ + cl_spinlock_acquire( &socket_info->mutex1 ); + + /* Verify the state of the socket */ + if( socket_info->socket_state != IBSP_CREATE ) + { + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR( ( + "Invalid socket state (%s)\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + *lpErrno = WSAEINVAL; + goto error; + } + + if( addr->sin_addr.S_un.S_addr != INADDR_ANY ) + { + /* Time to allocate our IB QP */ + socket_info->port = port; + ret = ib_create_socket( socket_info ); + if( ret ) + { + socket_info->port = NULL; + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR( ("ib_create socket failed with %d\n", ret) ); + *lpErrno = WSAENOBUFS; + goto error; + } + } + + /* Success */ + socket_info->local_addr = *addr; + + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND ); + + cl_spinlock_release( &socket_info->mutex1 ); + + IBSP_EXIT( IBSP_DBG_CONN ); + return 0; + +error: + CL_ASSERT( *lpErrno != 0 ); + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, ("failed with error %d\n", *lpErrno) ); + return SOCKET_ERROR; +} + + +/* Function: IBSPCloseSocket + * + * Description: + * Close the socket handle of the app socket as well as the provider socket. + * However, if there are outstanding async IO requests on the app socket + * we only close the provider socket. Only when all the IO requests complete + * (with error) will we then close the app socket. + */ +static int WSPAPI +IBSPCloseSocket( + SOCKET s, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + + IBSP_ENTER( IBSP_DBG_CONN ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p \n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + if( s == INVALID_SOCKET ) + { + IBSP_ERROR_EXIT( ("invalid socket handle %Ix\n", s) ); + *lpErrno = WSAENOTSOCK; + return SOCKET_ERROR; + } +#ifdef _DEBUG_ + cl_atomic_inc( &g_ibsp.CloseSocket_count ); +#endif + + shutdown_and_destroy_socket_info( socket_info ); + + IBSP_EXIT( IBSP_DBG_CONN ); + + *lpErrno = 0; + return 0; +} + + +/* Function: IBSPConnect + * + * Description: + * Performs a connect call. The only thing we need to do is translate + * the socket handle. + */ +static int WSPAPI +IBSPConnect( + SOCKET s, + const struct sockaddr FAR *name, + int namelen, + LPWSABUF lpCallerData, + LPWSABUF lpCalleeData, + LPQOS lpSQOS, + LPQOS lpGQOS, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + struct sockaddr_in *addr = (struct sockaddr_in *)name; + int ret; + ib_net64_t dest_port_guid; + ib_path_rec_t path_rec; + + IBSP_ENTER( IBSP_DBG_CONN ); + + UNUSED_PARAM( lpCalleeData ); + UNUSED_PARAM( lpSQOS ); + UNUSED_PARAM( lpGQOS ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p state=%s\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), s, IBSP_SOCKET_STATE_STR( socket_info->socket_state ))); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, + ("lpCallerData=%p, lpCalleeData=%p\n", lpCallerData, lpCalleeData) ); + + /* Sanity checks */ + if( lpCallerData ) + { + /* We don't support that. The current switch does not use it. */ + IBSP_ERROR_EXIT( ("lpCallerData.len=%d\n", lpCallerData->len) ); + *lpErrno = WSAEINVAL; + return SOCKET_ERROR; + } + + if( namelen < sizeof(struct sockaddr_in) ) + { + IBSP_ERROR_EXIT( ( + "invalid remote address (%d)\n", socket_info->socket_state) ); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + /* Check if the name (actually address) of peer entity is correct */ + if( addr->sin_family != AF_INET || + addr->sin_port == 0 || addr->sin_addr.s_addr == INADDR_ANY ) + { + IBSP_ERROR_EXIT( ("peer entity address is invalid (%d, %d, %x)\n", + addr->sin_family, addr->sin_port, addr->sin_addr.s_addr) ); + *lpErrno = WSAEADDRNOTAVAIL; + return SOCKET_ERROR; + } + + if( socket_info->local_addr.sin_addr.S_un.S_addr == addr->sin_addr.S_un.S_addr ) + { + /* Loopback - let the regular stack take care of that. */ + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, ("Loopback!\n") ); + *lpErrno = WSAEADDRNOTAVAIL; + return SOCKET_ERROR; + } + + /* Get the GUID for that IP address. */ + ret = query_guid_address( socket_info->port, addr->sin_addr.s_addr, &dest_port_guid ); + if( ret ) + { + IBSP_ERROR_EXIT( ("query_guid_address failed for IP %08x\n", + addr->sin_addr.s_addr) ); + *lpErrno = g_connect_err; + return SOCKET_ERROR; + } + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, ("got GUID %I64x for IP %s\n", + CL_NTOH64( dest_port_guid ), inet_ntoa( addr->sin_addr )) ); + + if( dest_port_guid == socket_info->port->guid ) + { + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, ("Loopback!\n") ); + *lpErrno = WSAEADDRNOTAVAIL; + return SOCKET_ERROR; + } + + /* Get the path record */ + ret = query_pr( socket_info->port, dest_port_guid, &path_rec ); + if( ret ) + { + IBSP_ERROR_EXIT( ( + "query_pr failed for IP %08x\n", addr->sin_addr.s_addr) ); + *lpErrno = g_connect_err; + return SOCKET_ERROR; + } + + cl_spinlock_acquire( &socket_info->mutex1 ); + + /* Verify the state of the socket */ + switch( socket_info->socket_state ) + { + case IBSP_BIND: + /* Good. That's the only valid state we want. */ + break; + + case IBSP_CONNECTED: + IBSP_ERROR( ("Socket is already connected\n") ); + *lpErrno = WSAEISCONN; + goto done; + + case IBSP_LISTEN: + IBSP_ERROR( ("Socket is a listening socket\n") ); + *lpErrno = WSAEINVAL; + goto done; + + default: + IBSP_ERROR( ("Socket is not in the bound state (%s)\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + *lpErrno = WSAEINVAL; + goto done; + } + + /* Store the peer entity's address in socket context */ + socket_info->peer_addr = *addr; + +#ifdef IBSP_LOGGING + DataLogger_Init( &socket_info->SendDataLogger, "Send", + &socket_info->peer_addr, &socket_info->local_addr ); + DataLogger_Init( &socket_info->RecvDataLogger, "Recv", + &socket_info->local_addr, &socket_info->peer_addr ); +#endif + + /* Update the socket state */ + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_CONNECT ); + + /* Connect */ + *lpErrno = ib_connect( socket_info, &path_rec ); + if( *lpErrno != WSAEWOULDBLOCK ) + { + /* We must be sure none destroyed our socket */ + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND ); + memset( &socket_info->peer_addr, 0, sizeof(struct sockaddr_in) ); + + IBSP_ERROR( ("ib_connect failed (%d)\n", *lpErrno) ); + } + +done: + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_EXIT( IBSP_DBG_CONN ); + return SOCKET_ERROR; +} + + +/* Function: IBSPEnumNetworkEvents + * + * Description: + * Enumerate the network events for a socket. We only need to + * translate the socket handle. +*/ +static int WSPAPI +IBSPEnumNetworkEvents( + SOCKET s, + WSAEVENT hEventObject, + LPWSANETWORKEVENTS lpNetworkEvents, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + + IBSP_ENTER( IBSP_DBG_NEV ); + + if( hEventObject != NULL ) + { + ResetEvent( hEventObject ); + } + + lpNetworkEvents->lNetworkEvents = + InterlockedExchange( &socket_info->network_events, 0 ); + + if( lpNetworkEvents->lNetworkEvents & FD_ACCEPT ) + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_NEV, + ("socket %p notify FD_ACCEPT at time %I64d\n", + socket_info, cl_get_time_stamp()) ); + lpNetworkEvents->iErrorCode[FD_ACCEPT_BIT] = 0; + } + + if( lpNetworkEvents->lNetworkEvents & FD_CONNECT ) + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_NEV, + ("socket %p notify FD_CONNECT %d at time %I64d\n", + socket_info, socket_info->errno_connect, cl_get_time_stamp()) ); + lpNetworkEvents->iErrorCode[FD_CONNECT_BIT] = socket_info->errno_connect; + } + + *lpErrno = 0; + IBSP_EXIT( IBSP_DBG_NEV ); + return 0; +} + + +/* Function: IBSPEventSelect + * + * Description: + * Register the specified events on the socket with the given event handle. + * All we need to do is translate the socket handle. + */ +static int WSPAPI +IBSPEventSelect( + SOCKET s, + WSAEVENT hEventObject, + long lNetworkEvents, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + long events; + + IBSP_ENTER( IBSP_DBG_NEV ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_NEV, + ("Socket %Ix requesting notifiction of %d on event %p.\n", + s, lNetworkEvents, hEventObject)); + + if( (lNetworkEvents & ~(FD_ACCEPT | FD_CONNECT)) != 0 ) + { + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION,IBSP_DBG_NEV, + ("Unknown lNetworkEvents flag given (%x)\n", lNetworkEvents) ); + *lpErrno = WSAEINVAL; + return SOCKET_ERROR; + } + + CL_ASSERT( lpErrno ); + + socket_info->event_mask = lNetworkEvents; + InterlockedExchangePointer( &socket_info->event_select, hEventObject ); + + events = InterlockedCompareExchange( &socket_info->network_events, 0, 0 ); + /* Check for existing events and signal as appropriate. */ + if( (socket_info->event_mask & events) && hEventObject ) + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_NEV, + ("Signaling eventHandle %p .\n", socket_info->event_select) ); + SetEvent( hEventObject ); + } + + IBSP_EXIT( IBSP_DBG_NEV ); + return 0; +} + + +/* Function: IBSPGetOverlappedResult + * + * Description: + * This function reports whether the specified overlapped call has + * completed. If it has, return the requested information. If not, + * and fWait is true, wait until completion. Otherwise return an + * error immediately. + */ +static BOOL WSPAPI +IBSPGetOverlappedResult( + IN SOCKET s, + IN LPWSAOVERLAPPED lpOverlapped, + OUT LPDWORD lpcbTransfer, + IN BOOL fWait, + OUT LPDWORD lpdwFlags, + OUT LPINT lpErrno ) +{ + struct ibsp_socket_info *p_socket_info; + BOOL rc; + + IBSP_ENTER( IBSP_DBG_IO ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p overlapped=0x%p Internal=%d InternalHigh=%d OffsetHigh=%d hEvent=%d\n", __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s, lpOverlapped, lpOverlapped->Internal, lpOverlapped->InternalHigh, lpOverlapped->OffsetHigh, lpOverlapped->hEvent)); + + CL_ASSERT( fWait == FALSE ); + if( fWait == TRUE ) + { + IBSP_ERROR_EXIT( ("fWait not supported\n") ); + *lpErrno = WSAENETDOWN; + return FALSE; + } + + if( s == INVALID_SOCKET ) + { + /* Seen in real life with overlap/client test. + * The switch closes a socket then calls this. Why? */ + IBSP_ERROR_EXIT( ("invalid socket handle %Ix\n", s) ); + *lpErrno = WSAENOTSOCK; + return SOCKET_ERROR; + } + + if( lpOverlapped->Internal == WSS_OPERATION_IN_PROGRESS ) + { + p_socket_info = (struct ibsp_socket_info*)s; + /* Poll just in case it's done. */ + ib_cq_comp( p_socket_info->cq_tinfo ); + } + + if( lpOverlapped->Internal != WSS_OPERATION_IN_PROGRESS ) + { + /* Operation has completed, perhaps with error */ + *lpdwFlags = lpOverlapped->Offset; + *lpErrno = lpOverlapped->OffsetHigh; + +#ifdef _DEBUG_ + if( ((uintptr_t) lpOverlapped->hEvent) & 0x00000001 ) + { + cl_atomic_dec( &g_ibsp.overlap_h1_count ); + + fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), + lpOverlapped, g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count, + g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count)); + } + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p completed overlap=0x%x overlap_h0_count=%d overlap_h1_count=%d\n", __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s, lpOverlapped, g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count)); +#endif + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_IO, + ("socket=%p completed ov=%p\n", (void*)s, lpOverlapped)); + } + else + { + /* Operation is still in progress */ + *lpErrno = WSA_IO_INCOMPLETE; + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_IO, + ("socket=%p ov=%p hEvent=%p, operation in progress\n", + (void*)s, lpOverlapped, lpOverlapped->hEvent)); + } + + *lpcbTransfer = (DWORD)lpOverlapped->InternalHigh; + + if( *lpErrno == 0 ) + rc = TRUE; + else + rc = FALSE; + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p overlapped=0x%p lpErrno=%d rc=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s, + lpOverlapped, *lpErrno, rc)); + + if (rc == TRUE) CL_ASSERT(*lpcbTransfer > 0); + + IBSP_EXIT( IBSP_DBG_IO ); + return rc; +} + + +/* Function: IBSPGetSockOpt + * + * Description: + * Get the specified socket option. All we need to do is translate the + * socket handle. + */ +static int WSPAPI +IBSPGetSockOpt( + SOCKET s, + int level, + int optname, + char FAR *optval, + LPINT optlen, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + + IBSP_ENTER( IBSP_DBG_OPT ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + if( level != SOL_SOCKET ) + { + IBSP_ERROR_EXIT( ("invalid level %d", level) ); + *lpErrno = WSAENOPROTOOPT; + return SOCKET_ERROR; + } + + if( optval == NULL || optlen == NULL ) + { + IBSP_ERROR_EXIT( ("invalid optval=%p or optlen=%p", optval, optlen) ); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + switch( optname ) + { + case SO_DEBUG: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_OPT, ("Option name SO_DEBUG\n") ); + if( *optlen < sizeof(BOOL) ) + { + IBSP_ERROR_EXIT( ("option len is invalid (0x%x)\n", *optlen) ); + *optlen = sizeof(BOOL); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + memcpy( optval, &socket_info->socket_options.debug, sizeof(BOOL) ); + *optlen = sizeof(BOOL); + break; + + case SO_GROUP_ID: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_OPT, ("Option name SO_GROUP_ID\n") ); + if( *optlen < sizeof(GROUP) ) + { + IBSP_ERROR_EXIT( ("option len is invalid (0x%x)\n", *optlen) ); + *optlen = sizeof(GROUP); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + memcpy( optval, &socket_info->socket_options.group_id, sizeof(GROUP) ); + *optlen = sizeof(GROUP); + break; + + case SO_GROUP_PRIORITY: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_OPT, ("Option name SO_GROUP_PRIORITY\n") ); + + if( *optlen < sizeof(int) ) + { + IBSP_ERROR_EXIT( ("option len is invalid (0x%x)\n", *optlen) ); + *optlen = sizeof(int); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + memcpy( optval, &socket_info->socket_options.group_priority, sizeof(int) ); + *optlen = sizeof(int); + break; + + case SO_MAX_MSG_SIZE: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_OPT, ("Option name SO_MAX_MSG_SIZE\n") ); + + if( *optlen < sizeof(unsigned int) ) + { + IBSP_ERROR_EXIT( ("option len is invalid (0x%x)\n", *optlen) ); + *optlen = sizeof(unsigned int); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + memcpy( optval, &socket_info->socket_options.max_msg_size, sizeof(unsigned int) ); + *optlen = sizeof(unsigned int); + break; + + case SO_MAX_RDMA_SIZE: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_OPT, ("Option name SO_MAX_RDMA_SIZE\n") ); + + if( *optlen < sizeof(unsigned int) ) + { + IBSP_ERROR_EXIT( ("option len is invalid (0x%x)\n", *optlen) ); + *optlen = sizeof(unsigned int); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + memcpy( optval, &socket_info->socket_options.max_rdma_size, sizeof(unsigned int) ); + *optlen = sizeof(unsigned int); + break; + + case SO_RDMA_THRESHOLD_SIZE: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_OPT, ("Option name SO_RDMA_THRESHOLD_SIZE\n") ); + + if( *optlen < sizeof(unsigned int) ) + { + IBSP_ERROR_EXIT( ("option len is invalid (0x%x)\n", *optlen) ); + *optlen = sizeof(unsigned int); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + memcpy( optval, &socket_info->socket_options.rdma_threshold_size, + sizeof(unsigned int) ); + *optlen = sizeof(unsigned int); + break; + + default: + *lpErrno = WSAENOPROTOOPT; + + IBSP_ERROR_EXIT( ("unknown option 0x%x\n", optname) ); + + return SOCKET_ERROR; + break; + } + + IBSP_EXIT( IBSP_DBG_OPT ); + return 0; +} + + +/* Function: IBSPGetQOSByName + * + * Description: + * Get a QOS template by name. All we need to do is translate the socket + * handle. + */ +static BOOL WSPAPI +IBSPGetQOSByName( + SOCKET s, + LPWSABUF lpQOSName, + LPQOS lpQOS, + LPINT lpErrno ) +{ + IBSP_ENTER( IBSP_DBG_OPT ); + + UNUSED_PARAM( s ); + UNUSED_PARAM( lpQOSName ); + UNUSED_PARAM( lpQOS ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + *lpErrno = WSAEOPNOTSUPP; + + IBSP_ERROR_EXIT( ("not supported\n") ); + + return FALSE; +} + + +/* Function: IBSPIoctl + * + * Description: + * Invoke an ioctl. In most cases, we just need to translate the socket + * handle. However, if the dwIoControlCode is SIO_GET_EXTENSION_FUNCTION_POINTER, + * we'll need to intercept this and return our own function pointers. + */ +static int WSPAPI +IBSPIoctl( + IN SOCKET s, + IN DWORD dwIoControlCode, + IN LPVOID lpvInBuffer, + IN DWORD cbInBuffer, + OUT LPVOID lpvOutBuffer, + IN DWORD cbOutBuffer, + OUT LPDWORD lpcbBytesReturned, + IN LPWSAOVERLAPPED lpOverlapped, + IN LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, + IN LPWSATHREADID lpThreadId, + OUT LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info; + GUID SANRegisterMemory = WSAID_REGISTERMEMORY; + GUID SANDeregisterMemory = WSAID_DEREGISTERMEMORY; + GUID SANRegisterRDMAMemory = WSAID_REGISTERRDMAMEMORY; + GUID SANDeregisterRDMAMemory = WSAID_DEREGISTERRDMAMEMORY; + GUID SANRDMAWrite = WSAID_RDMAWRITE; + GUID SANRDMARead = WSAID_RDMAREAD; + GUID SANMemoryRegistrationCacheCallback = WSAID_MEMORYREGISTRATIONCACHECALLBACK; + + IBSP_ENTER( IBSP_DBG_OPT ); + + UNUSED_PARAM( cbInBuffer ); + UNUSED_PARAM( lpOverlapped ); + UNUSED_PARAM( lpCompletionRoutine ); + UNUSED_PARAM( lpThreadId ); + + if( dwIoControlCode == SIO_GET_EXTENSION_FUNCTION_POINTER ) + { + /* This a special case. The socket handle passed is not valid. */ + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_OPT, ("Get extension function pointer\n") ); + + if( memcmp( lpvInBuffer, &SANRegisterMemory, sizeof(GUID) ) == 0 ) + { + /* Return a pointer to our intermediate extension function */ + *((LPFN_WSPREGISTERMEMORY *) lpvOutBuffer) = IBSPRegisterMemory; + } + else if( memcmp( lpvInBuffer, &SANDeregisterMemory, sizeof(GUID) ) == 0 ) + { + /* Return a pointer to our intermediate extension function */ + *((LPFN_WSPDEREGISTERMEMORY *) lpvOutBuffer) = IBSPDeregisterMemory; + } + else if( memcmp( lpvInBuffer, &SANRegisterRDMAMemory, sizeof(GUID) ) == 0 ) + { + /* Return a pointer to our intermediate extension function */ + *((LPFN_WSPREGISTERRDMAMEMORY *) lpvOutBuffer) = IBSPRegisterRdmaMemory; + } + else if( memcmp( lpvInBuffer, &SANDeregisterRDMAMemory, sizeof(GUID) ) == 0 ) + { + /* Return a pointer to our intermediate extension function */ + *((LPFN_WSPDEREGISTERRDMAMEMORY *) lpvOutBuffer) = IBSPDeregisterRdmaMemory; + } + else if( memcmp( lpvInBuffer, &SANRDMAWrite, sizeof(GUID) ) == 0 ) + { + /* Return a pointer to our intermediate extension function */ + *((LPFN_WSPRDMAWRITE *) lpvOutBuffer ) = IBSPRdmaWrite; + } + else if( memcmp( lpvInBuffer, &SANRDMARead, sizeof(GUID) ) == 0 ) + { + if( no_read ) + { + IBSP_PRINT(TRACE_LEVEL_WARNING, IBSP_DBG_OPT, + ("RDMA_READ disabled.\n") ); + *lpErrno = WSAEOPNOTSUPP; + return SOCKET_ERROR; + } + else + { + /* Return a pointer to our intermediate extension function */ + *((LPFN_WSPRDMAREAD *) lpvOutBuffer ) = IBSPRdmaRead; + } + } + else if( memcmp( lpvInBuffer, &SANMemoryRegistrationCacheCallback, + sizeof(GUID) ) == 0 ) + { + /* Return a pointer to our intermediate extension function */ + *((LPFN_WSPMEMORYREGISTRATIONCACHECALLBACK *) lpvOutBuffer ) = + IBSPMemoryRegistrationCacheCallback; + } + else + { + IBSP_ERROR_EXIT( ("invalid extension GUID\n") ); + *lpErrno = WSAEINVAL; + return SOCKET_ERROR; + } + IBSP_EXIT( IBSP_DBG_OPT ); + return 0; + } + + socket_info = (struct ibsp_socket_info *)s; + + /* Verify the state of the socket */ + /* Not sure which state socket should be in to receive this call */ + DebugPrintIBSPIoctlParams( IBSP_DBG_OPT, + dwIoControlCode, + lpvInBuffer, + cbInBuffer, + lpvOutBuffer, + cbOutBuffer, lpOverlapped, lpCompletionRoutine, lpThreadId ); + + switch( dwIoControlCode ) + { + case SIO_GET_QOS: + case SIO_GET_GROUP_QOS: + case SIO_SET_QOS: + case SIO_SET_GROUP_QOS: + /* We don't support that. dwServiceFlags1 in installSP + * wasn't set. */ + IBSP_ERROR_EXIT( ("unsupported dwIoControlCode %d\n", dwIoControlCode) ); + *lpErrno = WSAENOPROTOOPT; + return SOCKET_ERROR; + break; + + case SIO_ADDRESS_LIST_QUERY: + { + int ret; + + *lpcbBytesReturned = cbOutBuffer; + ret = build_ip_list( (LPSOCKET_ADDRESS_LIST)lpvOutBuffer, + lpcbBytesReturned, lpErrno ); + + IBSP_EXIT( IBSP_DBG_OPT ); + return ret; + } + break; + + default: + IBSP_ERROR_EXIT( ("invalid dwIoControlCode %d\n", dwIoControlCode) ); + + *lpErrno = WSAENOPROTOOPT; + return SOCKET_ERROR; + break; + } + + /* unreachable */ +} + + +/* Function: IBSPListen + * + * Description: + * This function establishes a socket to listen for incoming connections. It sets + * the backlog value on a listening socket. + */ +static int WSPAPI +IBSPListen( + SOCKET s, + int backlog, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + int ret; + + IBSP_ENTER( IBSP_DBG_CONN ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + cl_spinlock_acquire( &socket_info->mutex1 ); + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_CONN, ("socket_state is %s\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + + /* Verify the state of the socket */ + switch( socket_info->socket_state ) + { + case IBSP_BIND: + + /* Store the backlog value in the context */ + socket_info->listen.backlog = backlog; + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_LISTEN ); + + socket_info->listen.listen_req_param.dwProcessId = 0; + cl_memclr( &socket_info->listen.listen_req_param.identifier, + sizeof(socket_info->listen.listen_req_param.identifier) ); + + ret = ib_listen( socket_info ); + if( ret ) + { + IBSP_CHANGE_SOCKET_STATE( socket_info, IBSP_BIND ); + IBSP_ERROR_EXIT( ("ib_listen failed with %d\n", ret) ); + } + break; + + case IBSP_LISTEN: + /* Change the backlog */ + ib_listen_backlog( socket_info, backlog ); + ret = 0; + break; + + default: + IBSP_ERROR( ("Invalid socket_state (%s)\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + ret = WSAEINVAL; + break; + } + + cl_spinlock_release( &socket_info->mutex1 ); + + *lpErrno = ret; + + IBSP_EXIT( IBSP_DBG_CONN ); + if( ret ) + return SOCKET_ERROR; + else + return 0; +} + + +/* Function: IBSPRecv + * + * Description: + * This function receives data on a given socket and also allows for asynchronous + * (overlapped) operation. First translate the socket handle to the lower provider + * handle and then make the receive call. If called with overlap, post the operation + * to our IOCP or completion routine. +*/ +static int WSPAPI +IBSPRecv( + SOCKET s, + LPWSABUF lpBuffers, + DWORD dwBufferCount, + LPDWORD lpNumberOfBytesRecvd, + LPDWORD lpFlags, + LPWSAOVERLAPPED lpOverlapped, + LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, + LPWSATHREADID lpThreadId, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + ib_api_status_t status; + struct memory_node *node; + struct _recv_wr *wr; + DWORD ds_idx; + + IBSP_ENTER( IBSP_DBG_IO ); + + UNUSED_PARAM( lpNumberOfBytesRecvd ); + UNUSED_PARAM( lpCompletionRoutine ); + UNUSED_PARAM( lpThreadId ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + CL_ASSERT( lpCompletionRoutine == NULL ); + CL_ASSERT( lpOverlapped != NULL ); + + if( s == INVALID_SOCKET ) + { + /* Seen in real life with overlap/client test. + * The switch closes a socket then calls this. Why? */ + IBSP_PRINT_EXIT(TRACE_LEVEL_WARNING, IBSP_DBG_IO, + ("invalid socket handle %Ix\n", s) ); + *lpErrno = WSAENOTSOCK; + return SOCKET_ERROR; + } + + cl_spinlock_acquire( &socket_info->mutex1 ); + switch( socket_info->socket_state ) + { + case IBSP_CONNECTED: + case IBSP_DISCONNECTED: + break; + + default: + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR_EXIT( ("Socket is not in connected socket_state state=%s\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + *lpErrno = WSAENOTCONN; + return SOCKET_ERROR; + } + cl_spinlock_release( &socket_info->mutex1 ); + + if( socket_info->qp_error != 0 ) + { + IBSP_ERROR_EXIT( ("QP is in error state %d\n", socket_info->qp_error) ); + *lpErrno = socket_info->qp_error; + return SOCKET_ERROR; + } + + /* This function only works for that case. Right now the switch is + * only using that. */ + if( dwBufferCount > QP_ATTRIB_RQ_SGE ) + { + CL_ASSERT( dwBufferCount <= QP_ATTRIB_RQ_SGE ); + IBSP_ERROR_EXIT( ("dwBufferCount is greater than %d\n", + QP_ATTRIB_RQ_SGE) ); + *lpErrno = WSAEINVAL; + return SOCKET_ERROR; + } + + cl_spinlock_acquire( &socket_info->recv_lock ); + if( socket_info->recv_cnt == QP_ATTRIB_RQ_DEPTH ) + { + /* This should never happen */ + cl_spinlock_release( &socket_info->recv_lock ); + IBSP_ERROR_EXIT( ("not enough wr on the free list\n") ); + *lpErrno = WSAENETDOWN; + return SOCKET_ERROR; + } + + wr = &socket_info->recv_wr[socket_info->recv_idx]; + + wr->wr.lpOverlapped = lpOverlapped; + wr->wr.socket_info = socket_info; + + /* Looks good. Post the receive buffer. */ + wr->recv.p_next = NULL; + wr->recv.wr_id = (uint64_t)(void* __ptr64)wr; + wr->recv.num_ds = dwBufferCount; + wr->recv.ds_array = wr->ds_array; + + for( ds_idx = 0; ds_idx < dwBufferCount; ds_idx++ ) + { + /* Get the memory region node */ + node = lookup_partial_mr( socket_info, IB_AC_LOCAL_WRITE, + lpBuffers[ds_idx].buf, lpBuffers[ds_idx].len ); + if( !node ) + { + cl_spinlock_release( &socket_info->recv_lock ); + /* + * No mr fits. This should never happen. This error is not + * official, but seems to be the closest. + */ + IBSP_ERROR_EXIT( ("no MR found\n") ); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + wr->ds_array[ds_idx].vaddr = + (uint64_t)(void* __ptr64)lpBuffers[ds_idx].buf; + wr->ds_array[ds_idx].length = lpBuffers[ds_idx].len; + wr->ds_array[ds_idx].lkey = node->p_reg->lkey; + } + + /* + * We must set this now, because the operation could complete + * before ib_post_Recv returns. + */ + lpOverlapped->Internal = WSS_OPERATION_IN_PROGRESS; + + /* Store the flags for reporting back in IBSPGetOverlappedResult */ + lpOverlapped->Offset = *lpFlags; + + cl_atomic_inc( &socket_info->recv_cnt ); + +#ifdef _DEBUG_ + if( lpOverlapped->hEvent == 0 ) + { + cl_atomic_inc( &g_ibsp.overlap_h0_count ); + } + else + { + cl_atomic_inc( &g_ibsp.overlap_h1_count ); + cl_atomic_inc( &g_ibsp.overlap_h1_comp_count ); + } + + cl_atomic_inc( &g_ibsp.recv_count ); + cl_atomic_inc( &g_ibsp.total_recv_count ); + + fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), lpOverlapped, + g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count, + g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count)); +#endif + +#ifdef IBSP_LOGGING + wr->idx = socket_info->recv_log_idx++; +#endif + + fzprint(("%s():%d:0x%x:0x%x: posting RECV socket=0x%p overlap=%p wr=0x%p\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s, + lpOverlapped, wr)); + + status = ib_post_recv( socket_info->qp, &wr->recv, NULL ); + + if( status == IB_SUCCESS ) + { + /* Update the index and wrap as needed */ +#if QP_ATTRIB_RQ_DEPTH == 256 || QP_ATTRIB_RQ_DEPTH == 128 || \ + QP_ATTRIB_RQ_DEPTH == 64 || QP_ATTRIB_RQ_DEPTH == 32 || \ + QP_ATTRIB_RQ_DEPTH == 16 || QP_ATTRIB_RQ_DEPTH == 8 + socket_info->recv_idx++; + socket_info->recv_idx &= (QP_ATTRIB_RQ_DEPTH - 1); +#else + if( ++socket_info->recv_idx == QP_ATTRIB_RQ_DEPTH ) + socket_info->recv_idx = 0; +#endif + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_IO, + ("Posted RECV: socket=%p, ov=%p, addr=%p, len=%d\n", + (void*)s, lpOverlapped, lpBuffers[0].buf, lpBuffers[0].len)); + + *lpErrno = WSA_IO_PENDING; + } + else + { + IBSP_ERROR( ("ib_post_recv returned %s\n", ib_get_err_str( status )) ); +#ifdef _DEBUG_ + if( lpOverlapped->hEvent == 0 ) + { + cl_atomic_dec( &g_ibsp.overlap_h0_count ); + } + else + { + cl_atomic_dec( &g_ibsp.overlap_h1_count ); + cl_atomic_dec( &g_ibsp.overlap_h1_comp_count ); + } + + cl_atomic_dec( &g_ibsp.recv_count ); + + memset( wr, 0x33, sizeof(struct _recv_wr) ); +#endif + + cl_atomic_dec( &socket_info->recv_cnt ); + *lpErrno = ibal_to_wsa_error( status ); + } + + cl_spinlock_release( &socket_info->recv_lock ); + + /* We never complete the operation here. */ + IBSP_EXIT( IBSP_DBG_IO ); + return SOCKET_ERROR; +} + + +/* Function: IBSPSend + * + * Description: + * This function sends data on a given socket and also allows for asynchronous + * (overlapped) operation. First translate the socket handle to the lower provider + * handle and then make the send call. +*/ +static int WSPAPI +IBSPSend( + IN SOCKET s, + IN LPWSABUF lpBuffers, + IN DWORD dwBufferCount, + OUT LPDWORD lpNumberOfBytesSent, + IN DWORD dwFlags, + IN LPWSAOVERLAPPED lpOverlapped, + IN LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, + IN LPWSATHREADID lpThreadId, + OUT LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + ib_api_status_t status; + struct memory_node *node; + struct _wr *wr; + ib_send_wr_t send_wr; + ib_local_ds_t local_ds[QP_ATTRIB_SQ_SGE]; + DWORD ds_idx; + + IBSP_ENTER( IBSP_DBG_IO ); + + UNUSED_PARAM( lpNumberOfBytesSent ); + UNUSED_PARAM( lpCompletionRoutine ); + UNUSED_PARAM( lpThreadId ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p overlap=%p\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s, lpOverlapped)); + + if( s == INVALID_SOCKET ) + { + IBSP_ERROR_EXIT( ("invalid socket handle %Ix\n", s) ); + *lpErrno = WSAENOTSOCK; + return SOCKET_ERROR; + } + + CL_ASSERT( lpCompletionRoutine == NULL ); + CL_ASSERT( lpOverlapped != NULL ); + + cl_spinlock_acquire( &socket_info->mutex1 ); + switch( socket_info->socket_state ) + { + case IBSP_CONNECTED: + case IBSP_DISCONNECTED: + break; + + default: + cl_spinlock_release( &socket_info->mutex1 ); + IBSP_ERROR_EXIT( ("Socket is not in connected socket_state state=%s\n", + IBSP_SOCKET_STATE_STR( socket_info->socket_state )) ); + *lpErrno = WSAENOTCONN; + return SOCKET_ERROR; + } + cl_spinlock_release( &socket_info->mutex1 ); + + if( socket_info->qp_error ) + { + IBSP_ERROR_EXIT( ("QP is in error state %d\n", socket_info->qp_error) ); + *lpErrno = socket_info->qp_error; + return SOCKET_ERROR; + } + + /* This function only works for that case. */ + if( dwBufferCount > QP_ATTRIB_SQ_SGE ) + { + CL_ASSERT( dwBufferCount <= QP_ATTRIB_SQ_SGE ); + IBSP_ERROR_EXIT( ("dwBufferCount is greater than %d\n", QP_ATTRIB_SQ_SGE) ); + *lpErrno = WSAEINVAL; + return SOCKET_ERROR; + } + + /* The send lock is only used to serialize posting. */ + cl_spinlock_acquire( &socket_info->send_lock ); + if( socket_info->send_cnt == QP_ATTRIB_SQ_DEPTH ) + { + /* This should never happen */ + cl_spinlock_release( &socket_info->send_lock ); + IBSP_ERROR_EXIT( ("not enough wr on the free list\n") ); + *lpErrno = WSAENETDOWN; + return SOCKET_ERROR; + } + + wr = &socket_info->send_wr[socket_info->send_idx]; + + wr->lpOverlapped = lpOverlapped; + wr->socket_info = socket_info; + + /* Looks good. Post the send buffer. */ + send_wr.p_next = NULL; + send_wr.wr_id = (uint64_t) (uintptr_t) wr; + send_wr.wr_type = WR_SEND; + send_wr.send_opt = socket_info->send_opt; + socket_info->send_opt = 0; + + send_wr.num_ds = dwBufferCount; + send_wr.ds_array = local_ds; + + lpOverlapped->InternalHigh = 0; + for( ds_idx = 0; ds_idx < dwBufferCount; ds_idx++ ) + { + local_ds[ds_idx].vaddr = (uint64_t)(void* __ptr64)lpBuffers[ds_idx].buf; + local_ds[ds_idx].length = lpBuffers[ds_idx].len; + + lpOverlapped->InternalHigh += lpBuffers[ds_idx].len; + } + + if( lpOverlapped->InternalHigh <= socket_info->max_inline ) + { + send_wr.send_opt |= IB_SEND_OPT_INLINE; + } + else + { + for( ds_idx = 0; ds_idx < dwBufferCount; ds_idx++ ) + { + /* Get the memory region node */ + node = lookup_partial_mr( socket_info, 0, /* READ */ + lpBuffers[ds_idx].buf, lpBuffers[ds_idx].len ); + if( !node ) + { + cl_spinlock_release( &socket_info->send_lock ); + /* + * No mr fits. This error is not official, but seems to be the + * closest. + */ + IBSP_ERROR_EXIT( ("mr lookup failed\n") ); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + local_ds[ds_idx].lkey = node->p_reg->lkey; + } + } + + /* + * We must set this now, because the operation could complete + * before ib_post_send returns. + */ + lpOverlapped->Internal = WSS_OPERATION_IN_PROGRESS; + + /* Store the flags for reporting back in IBSPGetOverlappedResult */ + lpOverlapped->Offset = dwFlags; + + cl_atomic_inc( &socket_info->send_cnt ); + +#ifdef _DEBUG_ + if( lpOverlapped->hEvent == 0) + { + cl_atomic_inc( &g_ibsp.overlap_h0_count ); + } + else + { + cl_atomic_inc( &g_ibsp.overlap_h1_count ); + cl_atomic_inc( &g_ibsp.overlap_h1_comp_count ); + } + + cl_atomic_inc( &g_ibsp.send_count ); + cl_atomic_inc( &g_ibsp.total_send_count ); + + fzprint(("%s():%d:0x%x:0x%x: ov=0x%p h0=%d h1=%d h1_c=%d send=%d recv=%d\n", + __FUNCTION__, __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), lpOverlapped, + g_ibsp.overlap_h0_count, g_ibsp.overlap_h1_count, + g_ibsp.overlap_h1_comp_count, g_ibsp.send_count, g_ibsp.recv_count)); + + +#endif + + + fzprint(("%s():%d:0x%x:0x%x: posting SEND %p, mr handle=%p, addr=%p, len=%d\n", + __FUNCTION__, + __LINE__, GetCurrentProcessId(), + GetCurrentThreadId(), + lpOverlapped, node, lpBuffers[0].buf, lpBuffers[0].len)); + +#ifdef _DEBUG_ + if( lpBuffers[0].len >= 40 ) + { + debug_dump_buffer( IBSP_DBG_WQ , "SEND", + lpBuffers[0].buf, 40 ); + } +#endif + +#ifdef IBSP_LOGGING + { + DWORD i; + + for( i=0; i < dwBufferCount; i++ ) + { + DataLogger_WriteData( &socket_info->SendDataLogger, + socket_info->send_log_idx++, lpBuffers[i].buf, + lpBuffers[i].len); + } + } +#endif + + status = ib_post_send( socket_info->qp, &send_wr, NULL ); + + if( status == IB_SUCCESS ) + { + /* Update the index and wrap as needed */ +#if QP_ATTRIB_SQ_DEPTH == 256 || QP_ATTRIB_SQ_DEPTH == 128 || \ + QP_ATTRIB_SQ_DEPTH == 64 || QP_ATTRIB_SQ_DEPTH == 32 || \ + QP_ATTRIB_SQ_DEPTH == 16 || QP_ATTRIB_SQ_DEPTH == 8 + socket_info->send_idx++; + socket_info->send_idx &= (QP_ATTRIB_SQ_DEPTH - 1); +#else + if( ++socket_info->send_idx == QP_ATTRIB_SQ_DEPTH ) + socket_info->send_idx = 0; +#endif + + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_IO, + ("Posted SEND: socket=%p, ov=%p, addr=%p, len=%d\n", + (void*)s, lpOverlapped, lpBuffers[0].buf, lpBuffers[0].len)); + + *lpErrno = WSA_IO_PENDING; + } + else + { + IBSP_ERROR( ("ib_post_send returned %s\n", ib_get_err_str( status )) ); + +#ifdef _DEBUG_ + if( lpOverlapped->hEvent == 0 ) + { + cl_atomic_dec( &g_ibsp.overlap_h0_count ); + } + else + { + cl_atomic_dec( &g_ibsp.overlap_h1_count ); + cl_atomic_dec( &g_ibsp.overlap_h1_comp_count ); + } + cl_atomic_dec( &g_ibsp.send_count ); + + memset( wr, 0x37, sizeof(struct _wr) ); +#endif + cl_atomic_dec( &socket_info->send_cnt ); + + *lpErrno = ibal_to_wsa_error( status ); + } + cl_spinlock_release( &socket_info->send_lock ); + + /* We never complete the operation here. */ + IBSP_EXIT( IBSP_DBG_IO ); + return SOCKET_ERROR; +} + + +/* Function: IBSPSetSockOpt + * + * Description: + * Set a socket option. For most all options we just have to translate the + * socket option and call the lower provider. The only special case is for + * SO_UPDATE_ACCEPT_CONTEXT in which case a socket handle is passed as the + * argument which we need to translate before calling the lower provider. + */ +static int WSPAPI +IBSPSetSockOpt( + SOCKET s, + int level, + int optname, + const char FAR *optval, + int optlen, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = (struct ibsp_socket_info *)s; + + IBSP_ENTER( IBSP_DBG_OPT ); + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), s)); + + if( level != SOL_SOCKET ) + { + IBSP_ERROR_EXIT( ("invalid level %d", level) ); + *lpErrno = WSAENOPROTOOPT; + return SOCKET_ERROR; + } + + if( optval == NULL ) + { + IBSP_ERROR_EXIT( ("invalid optval=%p", optval) ); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + + switch( optname ) + { + case SO_DEBUG: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_OPT, ("Option name SO_DEBUG\n") ); + if( optlen != sizeof(BOOL) ) + { + IBSP_ERROR_EXIT( ("option len is invalid (0x%x)\n", optlen) ); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + memcpy( &socket_info->socket_options.debug, optval, sizeof(BOOL) ); + break; + + case SO_GROUP_PRIORITY: + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_OPT, ("Option name SO_GROUP_PRIORITY\n") ); + if( optlen != sizeof(int) ) + { + IBSP_ERROR_EXIT( ("option len is invalid (0x%x)\n", optlen) ); + *lpErrno = WSAEFAULT; + return SOCKET_ERROR; + } + memcpy( &socket_info->socket_options.group_priority, optval, sizeof(int) ); + break; + + default: + IBSP_ERROR_EXIT( ("invalid option %x\n", optname) ); + *lpErrno = WSAENOPROTOOPT; + return SOCKET_ERROR; + break; + } + + IBSP_EXIT( IBSP_DBG_OPT ); + + return 0; +} + + +/* Function: IBSPSocket + * + * Description: + * This function creates a socket. There are two sockets created. The first + * socket is created by calling the lower providers WSPSocket. This is the + * handle that we use internally within our LSP. We then create a second + * socket with WPUCreateSocketHandle which will be returned to the calling + * application. We will also create a socket context structure which will + * maintain information on each socket. This context is associated with the + * socket handle passed to the application. +*/ +static SOCKET WSPAPI +IBSPSocket( + int af, + int type, + int protocol, + LPWSAPROTOCOL_INFOW lpProtocolInfo, + GROUP g, + DWORD dwFlags, + LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info = NULL; + + IBSP_ENTER( IBSP_DBG_SI ); + + UNUSED_PARAM( g ); + + if( af != AF_INET ) + { + IBSP_ERROR_EXIT( ("bad family %d instead of %d\n", af, AF_INET) ); + *lpErrno = WSAEAFNOSUPPORT; + return INVALID_SOCKET; + } + + if( type != SOCK_STREAM ) + { + IBSP_ERROR_EXIT( ("bad type %d instead of %d\n", type, SOCK_STREAM) ); + *lpErrno = WSAEPROTOTYPE; + return INVALID_SOCKET; + } + + if( protocol != IPPROTO_TCP ) + { + IBSP_ERROR_EXIT( ("bad protocol %d instead of %d\n", protocol, IPPROTO_TCP) ); + *lpErrno = WSAEPROTONOSUPPORT; + return INVALID_SOCKET; + } + + if( (dwFlags != WSA_FLAG_OVERLAPPED) ) + { + IBSP_ERROR_EXIT( ("dwFlags is not WSA_FLAG_OVERLAPPED (%x)\n", dwFlags) ); + *lpErrno = WSAEINVAL; + return INVALID_SOCKET; + } + + socket_info = create_socket_info( lpErrno ); + if( socket_info == NULL ) + { + IBSP_ERROR_EXIT( ("create_socket_info return NULL\n") ); + return INVALID_SOCKET; + } + + if( lpProtocolInfo->dwProviderReserved != 0 ) + { + /* This is a duplicate socket. */ + *lpErrno = setup_duplicate_socket( socket_info, + (HANDLE)(ULONG_PTR)lpProtocolInfo->dwProviderReserved ); + if( *lpErrno ) + { + deref_socket_info( socket_info ); + IBSP_ERROR( ("setup_duplicate_socket failed with %d\n", *lpErrno) ); + return INVALID_SOCKET; + } + } + else + { + socket_info->socket_state = IBSP_CREATE; + + /* Set the (non-zero) default socket options for that socket */ + socket_info->socket_options.max_msg_size = IB_MAX_MSG_SIZE; + socket_info->socket_options.max_rdma_size = IB_MAX_RDMA_SIZE; + socket_info->socket_options.rdma_threshold_size = IB_RDMA_THRESHOLD_SIZE; + } + + cl_spinlock_acquire( &g_ibsp.socket_info_mutex ); + cl_qlist_insert_tail( &g_ibsp.socket_info_list, &socket_info->item ); + cl_spinlock_release( &g_ibsp.socket_info_mutex ); + + *lpErrno = 0; + + fzprint(("%s():%d:0x%x:0x%x: socket=0x%p\n", __FUNCTION__, + __LINE__, GetCurrentProcessId(), GetCurrentThreadId(), socket_info)); + + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION, IBSP_DBG_SI, + ("returning socket handle %p\n", socket_info) ); + + return (SOCKET) socket_info; +} + + +/* Function: IBSPCleanup + * + * Description: + * Decrement the entry count. If equal to zero then we can prepare to have us + * unloaded. Close any outstanding sockets and free up allocated memory. + */ +static int WSPAPI +IBSPCleanup( + LPINT lpErrno ) +{ + int ret = 0; + + IBSP_ENTER( IBSP_DBG_INIT ); + + cl_spinlock_acquire( &g_ibsp.mutex ); + + if( !g_ibsp.entry_count ) + { + cl_spinlock_release( &g_ibsp.mutex ); + + *lpErrno = WSANOTINITIALISED; + + IBSP_ERROR_EXIT( ("returning WSAENOTINITIALISED\n") ); + + return SOCKET_ERROR; + } + + /* Decrement the entry count */ + g_ibsp.entry_count--; + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_INIT, ("WSPCleanup: %d\n", g_ibsp.entry_count) ); + + if( g_ibsp.entry_count == 0 ) + { + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_INIT, ("entry_count is 0 => cleaning up\n") ); + ib_release(); + +#ifdef PERFMON_ENABLED + IBSPPmReleaseSlot(); +#endif + } + + cl_spinlock_release( &g_ibsp.mutex ); + + IBSP_EXIT( IBSP_DBG_INIT ); + + return ret; +} + + +/* + * Function: WSPStartupEx + * + * Description: + * This function intializes the service provider. We maintain a ref count to keep track + * of how many times this function has been called. + */ +int WSPAPI +WSPStartupEx( + WORD wVersion, + LPWSPDATA lpWSPData, + LPWSAPROTOCOL_INFOW lpProtocolInfo, + LPWSPUPCALLTABLEEX UpCallTable, + LPWSPPROC_TABLE lpProcTable ) +{ + static WSPPROC_TABLE gProcTable; + static WSPDATA gWSPData; + + IBSP_ENTER( IBSP_DBG_INIT ); + + /* Make sure that the version requested is >= 2.2. The low byte is the + major version and the high byte is the minor version. */ + if( (LOBYTE(wVersion) < 2) || ((LOBYTE(wVersion) == 2) && (HIBYTE(wVersion) < 2)) ) + { + IBSP_ERROR_EXIT( ("Invalid winsock version requested %x\n", wVersion) ); + + return WSAVERNOTSUPPORTED; + } + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_INIT, ("entry_count=%d)\n", g_ibsp.entry_count) ); + + cl_spinlock_acquire( &g_ibsp.mutex ); + + if( g_ibsp.entry_count == 0 ) + { + int ret; + + /* Save the global WSPData */ + gWSPData.wVersion = MAKEWORD(2, 2); + gWSPData.wHighVersion = MAKEWORD(2, 2); + wcscpy( gWSPData.szDescription, Description ); + + /* provide Service provider's entry points in proc table */ + memset( &gProcTable, 0, sizeof(gProcTable) ); + gProcTable.lpWSPAccept = IBSPAccept; + gProcTable.lpWSPBind = IBSPBind; + gProcTable.lpWSPCleanup = IBSPCleanup; + gProcTable.lpWSPCloseSocket = IBSPCloseSocket; + gProcTable.lpWSPConnect = IBSPConnect; + gProcTable.lpWSPDuplicateSocket = IBSPDuplicateSocket; + gProcTable.lpWSPEnumNetworkEvents = IBSPEnumNetworkEvents; + gProcTable.lpWSPEventSelect = IBSPEventSelect; + gProcTable.lpWSPGetOverlappedResult = IBSPGetOverlappedResult; + gProcTable.lpWSPGetSockOpt = IBSPGetSockOpt; + gProcTable.lpWSPGetQOSByName = IBSPGetQOSByName; + gProcTable.lpWSPIoctl = IBSPIoctl; + gProcTable.lpWSPListen = IBSPListen; + gProcTable.lpWSPRecv = IBSPRecv; + gProcTable.lpWSPSend = IBSPSend; + gProcTable.lpWSPSetSockOpt = IBSPSetSockOpt; + gProcTable.lpWSPSocket = IBSPSocket; + + /* Since we only support 2.2, set both wVersion and wHighVersion to 2.2. */ + lpWSPData->wVersion = MAKEWORD(2, 2); + lpWSPData->wHighVersion = MAKEWORD(2, 2); + wcscpy( lpWSPData->szDescription, Description ); + +#ifdef LATER + /* TODO: remove? */ + cl_qlist_init( &g_ibsp.cq_thread_info_list ); + cl_spinlock_init( &g_ibsp.cq_thread_info_mutex ); +#endif + + g_ibsp.protocol_info = *lpProtocolInfo; + + /* Initialize Infiniband */ + ret = ibsp_initialize(); + if( ret ) + { + IBSP_ERROR_EXIT( ("ibsp_initialize failed (%d)\n", ret) ); + return ret; + } + } + g_ibsp.entry_count++; + + cl_spinlock_release( &g_ibsp.mutex ); + + /* Set the return parameters */ + *lpWSPData = gWSPData; + *lpProcTable = gProcTable; + + /* store the upcall function table */ + g_ibsp.up_call_table = *UpCallTable; + + IBSP_EXIT( IBSP_DBG_INIT ); + +#ifdef PERFMON_ENABLED + /* Socket application register with perfmon */ + IBSPPmGetSlot(); +#endif /* PERFMON_ENABLED */ + + return 0; +} + + diff --git a/branches/Ndi/ulp/wsd/user/ibspdll.def b/branches/Ndi/ulp/wsd/user/ibspdll.def new file mode 100644 index 00000000..8932928b --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibspdll.def @@ -0,0 +1,6 @@ +LIBRARY ibwsd +EXPORTS +WSPStartupEx +IBSPPmOpen +IBSPPmCollectData +IBSPPmClose diff --git a/branches/Ndi/ulp/wsd/user/ibspdll.h b/branches/Ndi/ulp/wsd/user/ibspdll.h new file mode 100644 index 00000000..570a64ae --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibspdll.h @@ -0,0 +1,72 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#ifndef IBSPDLL_H +#define IBSPDLL_H + + +#ifdef __GNUC__ +#include +#endif + +#include +#include +#include +#include +#include + +#include +#include +#include + +#include "ibspdefines.h" +#include "ibspdebug.h" +#include "ibspstruct.h" +#include "ibspproto.h" +#include "ibsp_mem.h" + +/*--------------------------------------------------------------------------*/ + +extern struct ibspdll_globals g_ibsp; + +extern uint32_t g_max_inline; +extern uint32_t g_max_poll; +extern uint32_t g_sa_timeout; +extern uint32_t g_sa_retries; +extern DWORD_PTR g_dwPollThreadAffinityMask; + +/* Allow users to control SA timeouts behavior - fall back on IPoIB or fail. */ +extern int g_connect_err; +extern uint8_t g_max_cm_retries; +extern uint8_t g_pkt_life_modifier; +extern uint8_t g_qp_retries; + +#endif /* IBSPDLL_H */ diff --git a/branches/Ndi/ulp/wsd/user/ibspdll.rc b/branches/Ndi/ulp/wsd/user/ibspdll.rc new file mode 100644 index 00000000..3cbc9d8e --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibspdll.rc @@ -0,0 +1,47 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + + +#include + +#define VER_FILETYPE VFT_DLL +#define VER_FILESUBTYPE VFT2_UNKNOWN + +#ifdef _DEBUG_ +#define VER_FILEDESCRIPTION_STR "Winsock Direct for InfiniBand (Debug)" +#else +#define VER_FILEDESCRIPTION_STR "Winsock Direct for InfiniBand" +#endif + +#define VER_INTERNALNAME_STR "ibwsd.dll" +#define VER_ORIGINALFILENAME_STR "ibwsd.dll" + +#include diff --git a/branches/Ndi/ulp/wsd/user/ibspproto.h b/branches/Ndi/ulp/wsd/user/ibspproto.h new file mode 100644 index 00000000..98ad7eaa --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibspproto.h @@ -0,0 +1,297 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +/* protos from socketinfo.c */ +struct ibsp_socket_info * +create_socket_info( + OUT LPINT lpErrno ); + +void AL_API +deref_socket_info( + IN struct ibsp_socket_info *p_socket ); + +/* protos from extension.c */ +HANDLE WSPAPI +IBSPRegisterMemory( + IN SOCKET s, + IN PVOID lpBuffer, + IN DWORD dwBufferLength, + IN DWORD dwFlags, + OUT LPINT lpErrno ); + +int WSPAPI +IBSPDeregisterMemory( + IN SOCKET s, + IN HANDLE handle, + OUT LPINT lpErrno ); + +int WSPAPI +IBSPRegisterRdmaMemory( + IN SOCKET s, + IN PVOID lpBuffer, + IN DWORD dwBufferLength, + IN DWORD dwFlags, + OUT LPVOID lpRdmaBufferDescriptor, + IN OUT LPDWORD lpdwDescriptorLength, + OUT LPINT lpErrno ); + +int WSPAPI +IBSPDeregisterRdmaMemory( + IN SOCKET s, + IN LPVOID lpRdmaBufferDescriptor, + IN DWORD dwDescriptorLength, + OUT LPINT lpErrno ); + +int WSPAPI +IBSPRdmaWrite( + IN SOCKET s, + IN LPWSABUFEX lpBuffers, + IN DWORD dwBufferCount, + IN LPVOID lpTargetBufferDescriptor, + IN DWORD dwTargetDescriptorLength, + IN DWORD dwTargetBufferOffset, + OUT LPDWORD lpdwNumberOfBytesWritten, + IN DWORD dwFlags, + IN LPWSAOVERLAPPED lpOverlapped, + IN LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, + IN LPWSATHREADID lpThreadId, + OUT LPINT lpErrno ); + +int WSPAPI +IBSPRdmaRead( + IN SOCKET s, + IN LPWSABUFEX lpBuffers, + IN DWORD dwBufferCount, + IN LPVOID lpTargetBufferDescriptor, + IN DWORD dwTargetDescriptorLength, + IN DWORD dwTargetBufferOffset, + OUT LPDWORD lpdwNumberOfBytesRead, + IN DWORD dwFlags, + IN LPWSAOVERLAPPED lpOverlapped, + IN LPWSAOVERLAPPED_COMPLETION_ROUTINE lpCompletionRoutine, + IN LPWSATHREADID lpThreadId, + OUT LPINT lpErrno ); + +int WSPAPI +IBSPMemoryRegistrationCacheCallback( + IN LPVOID lpvAddress, + IN SIZE_T Size, + OUT LPINT lpErrno ); + +/* Protos from ibsp_iblow.c */ +extern void +ib_release( void ); + +extern int +ibsp_initialize( void ); + +void +ib_release_cq_tinfo( + struct cq_thread_info *cq_tinfo ); + +void +ib_destroy_cq_tinfo( + struct cq_thread_info *cq_tinfo ); + +int +ib_create_socket( + IN OUT struct ibsp_socket_info *socket_info ); + +void +ib_destroy_socket( + IN OUT struct ibsp_socket_info *socket_info ); + +void +shutdown_and_destroy_socket_info( + IN OUT struct ibsp_socket_info *socket_info ); + +int +ib_cq_comp( + void *cq_context ); + +void +wait_cq_drain( + IN OUT struct ibsp_socket_info *socket_info ); + +void +ibsp_dup_overlap_abort( + IN OUT struct ibsp_socket_info *socket_info ); + +/* Protos from misc.c */ +extern int +ibal_to_wsa_error( + IN const ib_api_status_t status ); + +/* Protos from ibsp_ip.c */ +intn_t CL_API +ip_cmp( + IN const void* const p_key1, + IN const void* const p_key2 ); + +int +query_guid_address( + IN struct ibsp_port *port, + IN ib_net32_t ip_addr, + OUT ib_net64_t *port_guid ); + +int +query_pr( + IN struct ibsp_port *port, + IN ib_net64_t dest_port_guid, + OUT ib_path_rec_t *path_rec ); + +int +build_ip_list( + IN OUT LPSOCKET_ADDRESS_LIST ip_list, + IN OUT LPDWORD ip_list_size, + OUT LPINT lpErrno ); + +struct ibsp_port* +get_port_from_ip_address( + IN const struct in_addr sin_addr ); + +/* Protos from ibsp_cm.c */ +extern int +ib_listen( + IN struct ibsp_socket_info *socket_info ); + +void +ib_listen_cancel( + IN struct ibsp_socket_info *socket_info ); + +void +ib_reject( + IN const ib_cm_handle_t h_cm, + IN const ib_rej_status_t rej_status ); + +int +ib_accept( + IN struct ibsp_socket_info *socket_info, + IN ib_cm_req_rec_t *cm_req_received ); + +int +ib_connect( + IN struct ibsp_socket_info *socket_info, + IN ib_path_rec_t *path_rec ); + +void +ib_disconnect( + IN struct ibsp_socket_info *socket_info, + IN struct disconnect_reason *reason ); + +void +ib_listen_backlog( + IN struct ibsp_socket_info *socket_info, + IN int backlog ); + +/* ibsp_pnp.h */ +ib_api_status_t +register_pnp( void ); + +void +unregister_pnp( void ); + +void +pnp_ca_remove( + struct ibsp_hca *hca); + +/* ibsp_duplicate.c */ +int +setup_duplicate_socket( + IN struct ibsp_socket_info *socket_info, + IN HANDLE h_dup_info ); + +int WSPAPI +IBSPDuplicateSocket( + SOCKET s, + DWORD dwProcessId, + LPWSAPROTOCOL_INFOW lpProtocolInfo, + LPINT lpErrno ); + +/* ibsp_mem.c */ + + +struct memory_node * +lookup_partial_mr( + IN struct ibsp_socket_info *s, + IN ib_access_t acl_mask, + IN void *start, + IN size_t len ); + +struct memory_node * +ibsp_reg_mem( + IN struct ibsp_socket_info *s, + IN ib_pd_handle_t pd, + IN void *start, + IN size_t len, + IN ib_access_t access_ctrl, + OUT LPINT lpErrno ); + +int +ibsp_dereg_mem( + IN struct ibsp_socket_info *s, + IN struct memory_node *node, + OUT LPINT lpErrno ); + +void +ibsp_dereg_hca( + IN struct mr_list *mem_list ); + +void +ibsp_dereg_socket( + IN struct ibsp_socket_info *s ); + +void +ibsp_hca_flush_mr_cache( + IN struct ibsp_hca *p_hca, + IN LPVOID lpvAddress, + IN SIZE_T Size ); + +int +ibsp_conn_insert( + IN struct ibsp_socket_info *socket_info ); + +void +ibsp_conn_remove( + IN struct ibsp_socket_info *socket_info ); + +void +ibsp_post_select_event( + struct ibsp_socket_info *socket_info, + int event, + int error ); + +/* ibspdll.c */ +extern int +init_globals( void ); + +extern void +release_globals( void ); diff --git a/branches/Ndi/ulp/wsd/user/ibspstruct.h b/branches/Ndi/ulp/wsd/user/ibspstruct.h new file mode 100644 index 00000000..867267d7 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/ibspstruct.h @@ -0,0 +1,467 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include +#include +#include +#include +#include + + + +enum ibsp_socket_state +{ + IBSP_CREATE = 0, + IBSP_BIND, + IBSP_CONNECT, + IBSP_LISTEN, + IBSP_CONNECTED, + IBSP_DUPLICATING_OLD, /* duplicating socket on the original controlling process */ + IBSP_DUPLICATING_NEW, /* duplicating socket on the new controlling process */ + IBSP_DUPLICATING_REMOTE, /* duplicating socket on the remote side */ + IBSP_DISCONNECTED, + IBSP_CLOSED, + IBSP_NUM_STATES +}; + +extern char *ibsp_socket_state_str[IBSP_NUM_STATES]; + +#define IBSP_SOCKET_STATE_STR(state) \ + (state < IBSP_NUM_STATES)?ibsp_socket_state_str[state]:"INVALID" + +/* Socket Options structure */ +struct ibsp_socket_options +{ + BOOL debug; /* SO_DEBUG */ + GROUP group_id; /* SO_GROUP_ID */ + int group_priority; /* SO_GROUP_PRIORITY */ + unsigned int max_msg_size; /* SO_MAX_MSG_SIZE */ + int max_rdma_size; /* SO_MAX_RDMA_SIZE */ + int rdma_threshold_size; /* SO_RDMA_THRESHOLD_SIZE */ +}; + +/* Used to discriminate between various listen on the same ports. + * We need this for socket duplication. + * { 0, 0 } is a standard connection request. */ +struct listen_req_param +{ + DWORD dwProcessId; + GUID identifier; +}; + +/* Parameters given to establish a connection */ +struct cm_req_params +{ + struct listen_req_param listen_req_param; + struct sockaddr_in dest; + struct sockaddr_in source; /* Source of connect() */ +}; + +/* Listen structure. + * Used to remember an incoming connection. */ +struct listen_incoming +{ + cl_list_item_t item; + + ib_cm_req_rec_t cm_req_received; + struct cm_req_params params; +}; + + +/* Keeps track of the posted WR */ +struct _wr +{ + LPWSAOVERLAPPED lpOverlapped; + struct ibsp_socket_info *socket_info; +}; + + +/* Keeps track of the posted WR */ +struct _recv_wr +{ + struct _wr wr; + ib_recv_wr_t recv; + ib_local_ds_t ds_array[QP_ATTRIB_RQ_SGE]; +#ifdef IBSP_LOGGING + LONG idx; +#endif +}; + + +/* Keeps track of the registered MRs */ +struct mr_list +{ + cl_qlist_t list; /* Regions registered through IBSPRegisterMemory */ + cl_spinlock_t mutex; /* Protects the list */ +}; + +/* Information necessary to duplicate sockets */ +struct ibsp_duplicate_info +{ + GUID identifier; + struct ibsp_socket_options socket_options; + struct sockaddr_in local_addr; + struct sockaddr_in peer_addr; + DWORD dwProcessId; +}; + +/* Give the reason for disconnections */ +struct disconnect_reason +{ + enum + { + DISC_INVALID, + DISC_SHUTDOWN, /* normal shutdown */ + DISC_DUPLICATING /* socket duplication */ + } type; + + struct _disconnect_reason_dup + { + GUID identifier; + DWORD dwProcessId; + + } duplicating; +}; + + +/* Internal node describing a registered region. */ +struct memory_reg +{ + cl_list_item_t item; + /* + * List count serves as reference count. The memory registration + * can be released when the list is empty. + */ + cl_qlist_t node_list; + +#ifdef _DEBUG_ +#define MR_NODE_MAGIC 0x7fba43ce + int magic; +#endif + + /* Characteristics of that region. */ + ib_mr_create_t type; + + /* Memory registration parameters, returned by ib_reg_mem. */ + uint32_t lkey; + uint32_t rkey; + ib_mr_handle_t mr_handle; +}; + + +struct memory_node +{ + /* List item to track within a socket structure. */ + cl_list_item_t socket_item; + struct ibsp_socket_info *s; + /* List item to track within the registration structure. */ + cl_list_item_t mr_item; + struct memory_reg *p_reg; +}; + + + +/* Descriptor given back to WSPRegisterRdmaMemory */ +struct rdma_memory_desc +{ + uint64_t iova; + uint32_t lkey; + uint32_t rkey; + struct memory_node *node1; /* valid only on registering node */ +}; + +struct cq_thread_info +{ + cl_list_item_t list_item; + + cl_waitobj_handle_t cq_waitobj; + ib_cq_handle_t cq; + + /* Number of qp's using this cq */ + atomic32_t qp_count; + + /* Current cqe size */ + uint32_t cqe_size; + + HANDLE ib_cq_thread; + DWORD ib_cq_thread_id; + BOOL ib_cq_thread_exit_wanted; + + struct ibsp_hca *hca; /* HCA to which this cq belongs. */ +}; + +/* Structure representing the context information stored for each + * socket created */ +struct ibsp_socket_info +{ + cl_list_item_t item; /* Link to next SOCKET_INFO in the global list */ + cl_rbmap_item_t conn_item; + cl_spinlock_t mutex1; /* protect this structure */ + + /* Switch socket handle created by WPUCreateSocketHandle. */ + SOCKET switch_socket; + + /* IP address and port this socket is bound to. Set by WSPBind */ + struct sockaddr_in local_addr; + + /* Remote address of peer entity after connect/accept is complete */ + struct sockaddr_in peer_addr; + + /* Back pointer to the port to which this socket is + * bound. It is NULL until the socket is bound, except if the listen + * binds to INADDR_ANY. */ + struct ibsp_port *port; + + enum ibsp_socket_state socket_state; /* represents current socket state */ + + struct + { + /* Listening socket */ + unsigned int backlog; /* Maximum number of pending connections */ + cl_qlist_t list; /* list of pending connections */ + ib_listen_handle_t handle; + struct listen_req_param listen_req_param; + } listen; + + /* Event for blocking accept, and connect */ + HANDLE h_event; + + /* Variables associated with IBSPSelectEvent */ + WSAEVENT event_select; /* Handle to Event Object */ + long event_mask; /* Events we care about */ + long network_events; /* Events that happenned */ + int errno_connect; /* errno code (if any) returned by connect */ + + struct ibsp_socket_options socket_options; /* Socket Options */ + + /* Infiniband ressources */ + ib_pd_handle_t hca_pd; /* Copy of the HCA PD, for faster access. */ + + /* Pointer to completion queue and thread assigned to this socket */ + struct cq_thread_info *cq_tinfo; + + ib_qp_handle_t qp; + uint32_t max_inline; + + /* State on the QP. This is only valid when the socket state is IBSP_CONNECTED. + * 0 : good + * <0 : an error occurred, contains a windoes error *ie WSAExxxx + * -1 : disconected, for duplication process. + */ + int qp_error; + + /* Send request processing. */ + cl_spinlock_t send_lock; + ib_send_opt_t send_opt; + struct _wr send_wr[QP_ATTRIB_SQ_DEPTH]; + uint8_t send_idx; + atomic32_t send_cnt; /* Used to limit access to send_wr array. */ + + /* Receive request processing. */ + cl_spinlock_t recv_lock; + struct _recv_wr recv_wr[QP_ATTRIB_RQ_DEPTH]; + uint8_t recv_idx; + atomic32_t recv_cnt; /* Used to limit access to recv_wr array. */ + + /* + * Used to stall destruction of switch socket until all completion + * upcalls have unwound. + */ + atomic32_t ref_cnt1; + +#ifdef _DEBUG_ + atomic32_t send_comp; + atomic32_t recv_comp; +#endif + + struct _recv_wr dup_wr[QP_ATTRIB_RQ_DEPTH]; + uint8_t dup_idx; + atomic32_t dup_cnt; + + /* + * The switch will register local and RDMA memory for use in RDMA + * transfers. All RDMA registrations are cached in the HCA structure, + * and have memory_node structures referencing them stored here in the + * socket structures. + */ + cl_qlist_t mr_list; + + /* Stuff for socket duplication */ + struct + { + GUID identifier; /* Unique identifier */ + DWORD dwProcessId; + } duplicate; + +#ifdef IBSP_LOGGING + DataLogger SendDataLogger; + DataLogger RecvDataLogger; + long recv_log_idx; + long send_log_idx; +#endif +}; + + +inline void +ibsp_css( + char *calling_func, + int line, + struct ibsp_socket_info *s, + enum ibsp_socket_state new_state ) +{ + enum ibsp_socket_state old_state; + + UNUSED_PARAM( calling_func ); + UNUSED_PARAM( line ); + + old_state = s->socket_state; + + if( old_state == new_state ) + { + /* Nothing to change */ + return; + } + + /* IBSP_CLOSED is a dead end state */ + if( old_state == IBSP_CLOSED ) + { + return; + } + + + s->socket_state = new_state; +} + +#define IBSP_CHANGE_SOCKET_STATE(socket_info, new_state) \ + ibsp_css(__FUNCTION__, __LINE__, socket_info, new_state) + + +/*--------------------------------------------------------------------------*/ + +/* Describes an IP address */ +struct ibsp_ip_addr +{ + cl_fmap_item_t item; /* next IP for that port */ + + struct ibsp_port *p_port; /* port that owns this IP address */ + struct in_addr ip_addr; /* IPv4 address */ +}; + +/* describes a port */ +struct ibsp_port +{ + cl_list_item_t item; + + struct ibsp_hca *hca; /* HCA to which this port belong. */ + + ib_net64_t guid; + uint8_t port_num; +}; + +/* Describes a hca */ +struct ibsp_hca +{ + cl_list_item_t item; + + ib_net64_t guid; + uint16_t dev_id; /* Device ID to selectively cap MTU to 1K for Tavor. */ + ib_ca_handle_t hca_handle; + + ib_pd_handle_t pd; + + /* Memory management */ + struct mr_list rdma_mem_list; /* Regions registered through IBSPRegisterRdmaMemory */ + + cl_spinlock_t port_lock; + cl_qlist_t port_list; + + /* + * The CQ list is a circular list without an end. The pointer here + * points to the entry that should be used for the next allocation. + */ + cl_spinlock_t cq_lock; + struct cq_thread_info *cq_tinfo; +}; + +/* There is only one instance of that structure. */ +struct ibspdll_globals +{ + /* Special values. Keep first and in this order. These are not reset + * between WSAStartupEx and WSACleanup calls. */ + cl_spinlock_t mutex; + UINT entry_count; + + /* Provider */ + WSPUPCALLTABLEEX up_call_table; /* MUST keep afetr entry_count */ + HANDLE heap; + cl_qlist_t socket_info_list; /* List of all the created sockets */ + cl_rbmap_t conn_map; /* rb tree of all connections to ensure unique 4-tuple */ + cl_spinlock_t socket_info_mutex; + + WSAPROTOCOL_INFOW protocol_info; + + /* Infiniband */ + ib_al_handle_t al_handle; + ib_pnp_handle_t pnp_handle_ca; + ib_pnp_handle_t pnp_handle_port; + + cl_qlist_t hca_list; + cl_spinlock_t hca_mutex; + + HANDLE h_ibat_dev; + cl_fmap_t ip_map; /* list of all IP addresses supported by all the ports. */ + cl_spinlock_t ip_mutex; + +#ifdef _DEBUG_ + /* Statistics */ + atomic32_t qp_num; + atomic32_t cq_num; + atomic32_t pd_num; + atomic32_t al_num; + atomic32_t mr_num; + atomic32_t ca_num; + atomic32_t listen_num; + atomic32_t pnp_num; + atomic32_t thread_num; + atomic32_t wpusocket_num; + + atomic32_t overlap_h0_count; + atomic32_t overlap_h1_comp_count; + atomic32_t overlap_h1_count; + atomic32_t max_comp_count; + atomic32_t send_count; + atomic32_t recv_count; + atomic32_t total_send_count; + atomic32_t total_recv_count; + atomic32_t total_recv_compleated; + atomic32_t CloseSocket_count; +#endif +}; diff --git a/branches/Ndi/ulp/wsd/user/makefile b/branches/Ndi/ulp/wsd/user/makefile new file mode 100644 index 00000000..bffacaa7 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/makefile @@ -0,0 +1,7 @@ +# +# DO NOT EDIT THIS FILE!!! Edit .\sources. if you want to add a new source +# file to this component. This file merely indirects to the real make file +# that is shared by all the driver components of the OpenIB Windows project. +# + +!INCLUDE ..\..\..\inc\openib.def diff --git a/branches/Ndi/ulp/wsd/user/misc.c b/branches/Ndi/ulp/wsd/user/misc.c new file mode 100644 index 00000000..5d81bb52 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/misc.c @@ -0,0 +1,134 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "ibspdebug.h" +#if defined(EVENT_TRACING) +#ifdef offsetof +#undef offsetof +#endif +#include "misc.tmh" +#endif + +#include "ibspdll.h" + +char *ibsp_socket_state_str[IBSP_NUM_STATES] = { + "IBSP_CREATE", + "IBSP_BIND", + "IBSP_CONNECT", + "IBSP_LISTEN", + "IBSP_CONNECTED", + "IBSP_DUPLICATING_OLD", + "IBSP_DUPLICATING_NEW", + "IBSP_DUPLICATING_REMOTE", + "IBSP_DISCONNECTED", + "IBSP_CLOSED" +}; + +/* Convert an IBAL error to a Winsock error. */ +int +ibal_to_wsa_error( + const ib_api_status_t status ) +{ + switch( status ) + { + case IB_SUCCESS: + return 0; + + case IB_INVALID_QP_HANDLE: + return WSAENOTCONN; + + case IB_INVALID_PARAMETER: + return WSAEINVAL; + + case IB_INSUFFICIENT_RESOURCES: + return WSAENOBUFS; + + case IB_INVALID_WR_TYPE: + return WSAEINVAL; + + case IB_INVALID_QP_STATE: + return WSAENOTCONN; + + default: + return WSAEINVAL; + } +} + + +/* Initialize the global structure. Only the mutex and entry_count fields + * have been initialized so far. */ +int +init_globals( void ) +{ + /* Set everything to 0 */ + memset( &g_ibsp, 0, sizeof(g_ibsp) ); + + /* Create our private heap */ + g_ibsp.heap = HeapCreate( 0, 128000, 0 ); + if( g_ibsp.heap == NULL) + { + IBSP_ERROR_EXIT( ("HeapCreate() failed: %d\n", GetLastError()) ); + return 1; + } + + /* Initialize our various lock and lists */ + cl_spinlock_init( &g_ibsp.mutex ); + + cl_qlist_init( &g_ibsp.hca_list ); + cl_spinlock_init( &g_ibsp.hca_mutex ); + + g_ibsp.h_ibat_dev = INVALID_HANDLE_VALUE; + cl_fmap_init( &g_ibsp.ip_map, ip_cmp ); + cl_spinlock_init( &g_ibsp.ip_mutex ); + + cl_qlist_init( &g_ibsp.socket_info_list ); + cl_rbmap_init( &g_ibsp.conn_map ); + cl_spinlock_init( &g_ibsp.socket_info_mutex ); + + return 0; +} + + +/* Free ressources allocated in our global structure. */ +void +release_globals( void ) +{ + HeapDestroy( g_ibsp.heap ); + + if( g_ibsp.h_ibat_dev != INVALID_HANDLE_VALUE ) + CloseHandle( g_ibsp.h_ibat_dev ); + + cl_spinlock_destroy( &g_ibsp.socket_info_mutex ); + cl_spinlock_destroy( &g_ibsp.hca_mutex ); + cl_spinlock_destroy( &g_ibsp.ip_mutex ); + cl_spinlock_destroy( &g_ibsp.mutex ); +} diff --git a/branches/Ndi/ulp/wsd/user/sockinfo.c b/branches/Ndi/ulp/wsd/user/sockinfo.c new file mode 100644 index 00000000..e837e429 --- /dev/null +++ b/branches/Ndi/ulp/wsd/user/sockinfo.c @@ -0,0 +1,176 @@ +/* + * Copyright (c) 2005 SilverStorm Technologies. All rights reserved. + * Copyright (c) 2006 Mellanox Technologies. All rights reserved. + * + * This software is available to you under the OpenIB.org BSD license + * below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + * + * $Id$ + */ + +#include "ibspdebug.h" +#if defined(EVENT_TRACING) +#include "sockinfo.tmh" +#endif + +#include "ibspdll.h" + + +static void +free_socket_info( + IN struct ibsp_socket_info *socket_info ); + +/* + * Function: create_socket_info + * + * Description: + * Allocates a new socket info context structure and initializes some fields. +*/ +struct ibsp_socket_info * +create_socket_info( + OUT LPINT lpErrno ) +{ + struct ibsp_socket_info *socket_info; + + IBSP_ENTER( IBSP_DBG_SI ); + + socket_info = HeapAlloc( g_ibsp.heap, + HEAP_ZERO_MEMORY, sizeof(struct ibsp_socket_info) ); + if( socket_info == NULL ) + { + IBSP_PRINT_EXIT(TRACE_LEVEL_INFORMATION, IBSP_DBG_SI, + ("HeapAlloc() failed: %d\n", GetLastError()) ); + *lpErrno = WSAENOBUFS; + return NULL; + } + + cl_spinlock_construct( &socket_info->mutex1 ); + cl_spinlock_construct( &socket_info->send_lock ); + cl_spinlock_construct( &socket_info->recv_lock ); + cl_qlist_init( &socket_info->mr_list ); + cl_qlist_init( &socket_info->listen.list ); + + if( cl_spinlock_init( &socket_info->mutex1 ) != CL_SUCCESS ) + goto err; + + if( cl_spinlock_init( &socket_info->send_lock ) != CL_SUCCESS ) + goto err; + + if( cl_spinlock_init( &socket_info->recv_lock ) != CL_SUCCESS ) + goto err; + +#ifdef _DEBUG_ + memset( socket_info->recv_wr, 0x38, sizeof(socket_info->send_wr) ); + memset( socket_info->recv_wr, 0x38, sizeof(socket_info->recv_wr) ); + memset( socket_info->recv_wr, 0x38, sizeof(socket_info->dup_wr) ); +#endif + + socket_info->switch_socket = + g_ibsp.up_call_table.lpWPUCreateSocketHandle( + 0, (DWORD_PTR)socket_info, lpErrno ); + + if( socket_info->switch_socket == INVALID_SOCKET ) + { + IBSP_ERROR( ("WPUCreateSocketHandle() failed: %d", *lpErrno) ); +err: + free_socket_info( socket_info ); + IBSP_EXIT( IBSP_DBG_SI ); + return NULL; + } + + STAT_INC( wpusocket_num ); + + /* + * Preset to 1, IBSPCloseSocket will decrement it, and switch socket + * will be freed once it goes to zero. + */ + socket_info->ref_cnt1 = 1; + + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_SI, ("socket_info (%p), switch socket (%p)\n", + socket_info, (void*)socket_info->switch_socket) ); + + IBSP_EXIT( IBSP_DBG_SI ); + return socket_info; +} + + +static void +free_socket_info( + IN struct ibsp_socket_info *p_socket ) +{ + int ret, error; + + if( p_socket->switch_socket != INVALID_SOCKET ) + { + /* ref_cnt hit zero - destroy the switch socket. */ + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_SI, + ("socket=0x%p calling lpWPUCloseSocketHandle=0x%p\n", + p_socket, (void*)p_socket->switch_socket) ); + + ret = g_ibsp.up_call_table.lpWPUCloseSocketHandle( + p_socket->switch_socket, &error ); + if( ret == SOCKET_ERROR ) + { + IBSP_ERROR( ("WPUCloseSocketHandle failed: %d\n", error) ); + } + else + { + STAT_DEC( wpusocket_num ); + } + p_socket->switch_socket = INVALID_SOCKET; + } + + CL_ASSERT( !p_socket->qp ); + CL_ASSERT( !p_socket->conn_item.p_map ); + CL_ASSERT(!p_socket->send_cnt && !p_socket->recv_cnt); + cl_spinlock_destroy( &p_socket->mutex1 ); + + cl_spinlock_destroy( &p_socket->send_lock ); + cl_spinlock_destroy( &p_socket->recv_lock ); + + HeapFree( g_ibsp.heap, 0, p_socket ); +} + + +/* + * Function: deref_sock_info + * + * Description: + * This routine decrements a socket context's reference count, and if + * it reaches zero, frees the socket context structure. + */ +void AL_API +deref_socket_info( + IN struct ibsp_socket_info *p_socket ) +{ + IBSP_ENTER( IBSP_DBG_SI ); + + if( !cl_atomic_dec( &p_socket->ref_cnt1 ) ) + { + free_socket_info( p_socket ); + IBSP_PRINT(TRACE_LEVEL_INFORMATION, IBSP_DBG_SI, ("Freed socket_info (%p)\n", p_socket) ); + } + IBSP_EXIT( IBSP_DBG_SI ); +} -- 2.41.0